spatstat/0000755000176200001440000000000013624534362012125 5ustar liggesusersspatstat/NAMESPACE0000644000176200001440000030654713624152174013360 0ustar liggesusers# spatstat NAMESPACE file import(stats,graphics,grDevices,utils,methods) import(spatstat.utils,spatstat.data) import(polyclip,goftest) import(Matrix,nlme,rpart) importFrom(deldir, deldir,duplicatedxy,tile.list) importFrom(abind,abind) importFrom(tensor,tensor) importFrom(mgcv, gam,gam.control,anova.gam,formula.gam,predict.gam, print.gam,summary.gam,vcov.gam,s) # .... load dynamic library ..... # (native routines are now registered in init.c) useDynLib(spatstat, .registration=TRUE) # Do not edit the following. # It is generated automatically. # .................................................. # load dynamic library # (native routines are now registered in init.c) # .................................................. useDynLib(spatstat, .registration=TRUE) # .................................................. # Automatically-generated list of documented objects # .................................................. export("accumulateStatus") export("acedist.noshow") export("acedist.show") export("active.interactions") export("adaptcoef") export("adaptive.density") export("add.texture") export("addvar") export("adjust.ratfv") export("affine") export("affine.distfun") export("affine.im") export("affine.layered") export("affine.linim") export("affine.linnet") export("affine.lpp") export("affine.msr") export("affine.owin") export("affine.ppp") export("affine.psp") export("affine.tess") export("affinexy") export("affinexypolygon") export("AIC.dppm") export("AIC.kppm") export("AIC.mppm") export("AIC.ppm") export("allElementsIdentical") export("allstats") export("alltypes") export("ang2rad") export("angles.psp") export("anova.lppm") export("anova.mppm") export("anova.ppm") export("anova.slrm") export("anycrossing.psp") export("anyDuplicated.ppp") export("anyDuplicated.ppx") export("anylapply") export("[<-.anylist") export("[.anylist") export("anylist") export("anyNA.im") export("anyNA.sparse3Darray") export("aperm.sparse3Darray") export("append.psp") export("ApplyConnected") export("applynbd") export("applyPolyclipArgs") export("applySparseEntries") export("apply.ssf") export("applytolayers") export("area") export("area.default") export("areadelta2") export("areaGain") export("areaGain.diri") export("areaGain.grid") export("AreaInter") export("areaLoss") export("areaLoss.diri") export("areaLoss.grid") export("areaLoss.poly") export("area.owin") export("as.anylist") export("as.array.im") export("as.array.sparse3Darray") export("as.box3") export("as.boxx") export("as.breakpts") export("as.character.unitname") export("as.data.frame.bw.optim") export("as.data.frame.envelope") export("as.data.frame.fv") export("as.data.frame.hyperframe") export("as.data.frame.im") export("as.data.frame.linfun") export("as.data.frame.linim") export("as.data.frame.lintess") export("as.data.frame.owin") export("as.data.frame.ppp") export("as.data.frame.ppx") export("as.data.frame.psp") export("as.data.frame.tess") export("as.double.im") export("as.function.fv") export("as.function.im") export("as.function.leverage.ppm") export("as.function.linfun") export("as.function.owin") export("as.function.rhohat") export("as.function.ssf") export("as.function.tess") export("as.fv") export("as.fv.bw.optim") export("as.fv.data.frame") export("as.fv.dppm") export("as.fv.fasp") export("as.fv.fv") export("as.fv.kppm") export("as.fv.matrix") export("as.fv.minconfit") export("as.hyperframe") export("as.hyperframe.anylist") export("as.hyperframe.data.frame") export("as.hyperframe.default") export("as.hyperframe.hyperframe") export("as.hyperframe.listof") export("as.hyperframe.ppx") export("as.im") export("as.im.data.frame") export("as.im.default") export("as.im.densityfun") export("as.im.distfun") export("as.im.expression") export("as.im.function") export("as.im.funxy") export("as.im.im") export("as.im.leverage.ppm") export("as.im.linim") export("as.imlist") export("as.im.matrix") export("as.im.nnfun") export("as.im.owin") export("as.im.ppp") export("as.im.scan.test") export("as.im.Smoothfun") export("as.im.ssf") export("as.im.tess") export("as.interact") export("as.interact.fii") export("as.interact.interact") export("as.interact.ppm") export("as.layered") export("as.layered.default") export("as.layered.listof") export("as.layered.msr") export("as.layered.ppp") export("as.layered.solist") export("as.layered.splitppp") export("as.linfun") export("as.linfun.linfun") export("as.linfun.linim") export("as.linfun.lintess") export("as.linim") export("as.linim.default") export("as.linim.linfun") export("as.linim.linim") export("as.linnet") export("as.linnet.linfun") export("as.linnet.linim") export("as.linnet.linnet") export("as.linnet.lintess") export("as.linnet.lpp") export("as.linnet.lppm") export("as.linnet.psp") export("as.list.hyperframe") export("as.listof") export("as.lpp") export("as.mask") export("as.mask.psp") export("as.matrix.im") export("as.matrix.owin") export("as.matrix.ppx") export("as.owin") export("as.owin.boxx") export("as.owin.data.frame") export("as.owin.default") export("as.owin.distfun") export("as.owin.dppm") export("as.owin.funxy") export("as.owin.im") export("as.owin.influence.ppm") export("as.owin.kppm") export("as.owin.layered") export("as.owin.leverage.ppm") export("as.owin.linfun") export("as.owin.linnet") export("as.owin.lintess") export("as.owin.lpp") export("as.owin.lppm") export("as.owin.msr") export("as.owin.nnfun") export("as.owin.owin") export("as.owin.ppm") export("as.owin.ppp") export("as.owin.psp") export("as.owin.quad") export("as.owin.quadratcount") export("as.owin.quadrattest") export("as.owin.rmhmodel") export("as.owin.tess") export("as.polygonal") export("as.ppm") export("as.ppm.dppm") export("as.ppm.kppm") export("as.ppm.lppm") export("as.ppm.ppm") export("as.ppm.profilepl") export("as.ppm.rppm") export("as.ppp") export("as.ppp.data.frame") export("as.ppp.default") export("as.ppp.influence.ppm") export("as.ppplist") export("as.ppp.lpp") export("as.ppp.matrix") export("as.ppp.ppp") export("as.ppp.psp") export("as.ppp.quad") export("as.ppp.ssf") export("as.psp") export("as.psp.data.frame") export("as.psp.default") export("as.psp.linnet") export("as.psp.lpp") export("as.psp.matrix") export("as.psp.owin") export("as.psp.psp") export("as.rectangle") export("assemble.plot.objects") export("as.solist") export("as.sparse3Darray") export("as.tess") export("as.tess.im") export("as.tess.list") export("as.tess.owin") export("as.tess.quadratcount") export("as.tess.quadrattest") export("as.tess.tess") export("as.unitname") export("AsymmDistance.psp") export("auc") export("auc.kppm") export("auc.lpp") export("auc.lppm") export("auc.ppm") export("auc.ppp") export("augment.msr") export("avenndist") export("BadGey") export("bandwidth.is.infinite") export("BartCalc") export("bbEngine") export("bc") export("bc.ppm") export("bdist.pixels") export("bdist.points") export("bdist.tiles") export("bdry.mask") export("beachcolourmap") export("beachcolours") export("beginner") export("begins") export("berman.test") export("bermantestCalc") export("bermantestEngine") export("berman.test.lpp") export("berman.test.lppm") export("berman.test.ppm") export("berman.test.ppp") export("bilinearform") export("bind.fv") export("bind.ratfv") export("bind.sparse3Darray") export("bits.envelope") export("bits.test") export("blankcoefnames") export("blur") export("border") export("boundingbox") export("bounding.box3") export("boundingbox.default") export("boundingbox.im") export("boundingbox.linnet") export("boundingbox.lpp") export("boundingbox.owin") export("boundingbox.ppp") export("boundingbox.psp") export("boundingbox.solist") export("bounding.box.xy") export("boundingcentre") export("boundingcentre.owin") export("boundingcentre.ppp") export("boundingcircle") export("boundingcircle.owin") export("boundingcircle.ppp") export("boundingradius") export("boundingradius.linnet") export("boundingradius.owin") export("boundingradius.ppp") export("box3") export("boxx") export("branchlabelfun") export("break.holes") export("breakpts") export("breakpts.from.r") export("bt.frame") export("bugfixes") export("bw.abram") export("bw.CvL") export("bw.diggle") export("bw.frac") export("bw.lppl") export("bw.optim") export("bw.pcf") export("bw.ppl") export("bw.relrisk") export("bw.scott") export("bw.scott.iso") export("bw.smoothppp") export("bw.stoyan") export("bw.voronoi") export("by.im") export("by.ppp") export("calc.DR") export("calc.NNIR") export("calc.SAVE") export("calc.SIR") export("calc.TSE") export("cannot.update") export("cartesian") export("cauchy.estK") export("cauchy.estpcf") export("cbind.fv") export("cbind.hyperframe") export("CDF") export("CDF.density") export("cdf.test") export("cdf.test.lpp") export("cdf.test.lppm") export("cdf.test.mppm") export("cdf.test.ppm") export("cdf.test.ppp") export("cdf.test.slrm") export("cellmiddles") export("censtimeCDFest") export("centroid.owin") export("change.default.expand") export("check.anySparseVector") export("check.arc") export("checkbigmatrix") export("checkfields") export("check.finespacing") export("check.hist.lengths") export("check.mat.mul") export("check.separable") export("checksolve") export("check.testfun") export("chop.linnet") export("chop.tess") export("circdensity") export("circticks") export("circumradius") export("circumradius.linnet") export("circumradius.owin") export("circumradius.ppp") export("circunion") export("clarkevans") export("clarkevansCalc") export("clarkevans.test") export("clear.simplepanel") export("clickbox") export("clickdist") export("clickjoin") export("clicklpp") export("clickpoly") export("clickppp") export("clip.infline") export("clippoly.psp") export("clip.psp") export("cliprect.psp") export("closepaircounts") export("closepairs") export("closepairs.pp3") export("closepairs.ppp") export("closethresh") export("closetriples") export("closing") export("closing.owin") export("closing.ppp") export("closing.psp") export("clusterfield") export("clusterfield.character") export("clusterfield.function") export("clusterfield.kppm") export("clusterfit") export("clusterkernel") export("clusterkernel.character") export("clusterkernel.kppm") export("clusterradius") export("clusterradius.character") export("clusterradius.kppm") export("clusterset") export("cobble.xy") export("codetime") export("coef.dppm") export("coef.fii") export("coef.kppm") export("coef.lppm") export("coef.mppm") export("coef.ppm") export("coef.slrm") export("coef.summary.fii") export("coef.summary.kppm") export("coef.summary.ppm") export("coef.vblogit") export("coerce.marks.numeric") export("col2hex") export("col.args.to.grey") export("collapse.anylist") export("collapse.fv") export("colourmap") export("colouroutputs<-") export("colouroutputs") export("commonGrid") export("commonPolyclipArgs") export("compareFit") export("compatible") export("compatible.fasp") export("compatible.fv") export("compatible.im") export("compatible.rat") export("compatible.unitname") export("compileCDF") export("compileK") export("compilepcf") export("complementarycolour") export("complement.owin") export("Complex.im") export("Complex.imlist") export("Complex.linim") export("Complex.sparse3Darray") export("concatxy") export("Concom") export("conform.imagelist") export("conform.ratfv") export("connected") export("connected.im") export("connected.linnet") export("connected.lpp") export("connected.owin") export("connected.pp3") export("connected.ppp") export("connected.tess") export("contour.funxy") export("contour.im") export("contour.imlist") export("contour.leverage.ppm") export("contour.listof") export("contour.objsurf") export("contour.ssf") export("convexhull") export("convexhull.xy") export("convexify") export("convolve.im") export("coords<-") export("coords") export("coords<-.ppp") export("coords.ppp") export("coords<-.ppx") export("coords.ppx") export("coords.quad") export("corners") export("countends") export("countingweights") export("covering") export("CressieReadName") export("CressieReadStatistic") export("CressieReadSymbol") export("crossdist") export("crossdist.default") export("crossdist.lpp") export("crossdist.pp3") export("crossdist.ppp") export("crossdist.ppx") export("crossdist.psp") export("crossing.linnet") export("crossing.psp") export("crosspaircounts") export("crosspairquad") export("crosspairs") export("crosspairs.pp3") export("crosspairs.ppp") export("cut.im") export("cut.lpp") export("cutoff2Dkernel") export("cut.ppp") export("CVforPCF") export("damaged.ppm") export("datagen.rpoisppOnLines") export("datagen.runifpointOnLines") export("datagen.runifpoisppOnLines") export("data.lppm") export("data.mppm") export("data.ppm") export("dclf.progress") export("dclf.sigtrace") export("dclf.test") export("default.clipwindow") export("default.dummy") export("default.expand") export("default.linnet.tolerance") export("default.ntile") export("default.n.tiling") export("default.rmhcontrol") export("delaunay") export("delaunayDistance") export("delaunayNetwork") export("deletebranch") export("deletebranch.linnet") export("deletebranch.lpp") export("deltametric") export("deltasuffstat") export("densityAdaptiveKernel") export("densityAdaptiveKernel.ppp") export("densitycrossEngine") export("densityfun") export("densityfun.ppp") export("density.linnet") export("density.lpp") export("densitypointsEngine") export("density.ppp") export("density.ppplist") export("density.psp") export("densityQuick.lpp") export("density.splitppp") export("density.splitppx") export("densityVoronoi") export("densityVoronoi.lpp") export("densityVoronoi.ppp") export("deriv.fv") export("detpointprocfamilyfun") export("deviance.lppm") export("deviance.ppm") export("Deviation") export("dfbetas.ppm") export("dfbetas.ppmInfluence") export("dffit") export("dffit.ppm") export("dflt.redraw") export("dg.envelope") export("dg.progress") export("dg.sigtrace") export("dg.test") export("diagnose.ppm") export("diagnose.ppm.engine") export("[.diagramobj") export("diagramobj") export("diameter") export("diameter.box3") export("diameter.boxx") export("diameter.linnet") export("diameter.owin") export("digestCovariates") export("DiggleGatesStibbard") export("DiggleGratton") export("digital.volume") export("dilated.areas") export("dilation") export("dilationAny") export("dilation.owin") export("dilation.ppp") export("dilation.psp") export("dim.detpointprocfamily") export("dim.fasp") export("dimhat") export("dim.hyperframe") export("dim.im") export("dim.msr") export("dimnames<-.fasp") export("dimnames.fasp") export("dimnames<-.hyperframe") export("dimnames.hyperframe") export("dimnames.msr") export("dimnames<-.sparse3Darray") export("dimnames.sparse3Darray") export("dim.owin") export("dim<-.sparse3Darray") export("dim.sparse3Darray") export("dirichlet") export("dirichletAreas") export("dirichletEdges") export("dirichletNetwork") export("dirichletVertices") export("dirichletWeights") export("disc") export("discpartarea") export("discretise") export("discs") export("dist2dpath") export("distcdf") export("distfun") export("distfun.lpp") export("distfun.owin") export("distfun.ppp") export("distfun.psp") export("distmap") export("distmap.owin") export("distmap.ppp") export("distmap.psp") export("distributecbind") export("divide.linnet") export("dkernel") export("dknn") export("dmixpois") export("do.as.im") export("do.call.plotfun") export("DoCountCrossEnds") export("DoCountEnds") export("do.istat") export("domain") export("domain.distfun") export("domain.dppm") export("domain.funxy") export("domain.im") export("domain.im") export("domain.influence.ppm") export("domain.kppm") export("domain.layered") export("domain.leverage.ppm") export("domain.linfun") export("domain.lintess") export("domain.lpp") export("domain.lpp") export("domain.lppm") export("domain.msr") export("domain.nnfun") export("domain.pp3") export("domain.ppm") export("domain.ppp") export("domain.ppx") export("domain.psp") export("domain.quad") export("domain.quadratcount") export("domain.quadrattest") export("domain.rmhmodel") export("domain.tess") export("doMultiStraussHard") export("dppapproxkernel") export("dppapproxpcf") export("dppBessel") export("dppCauchy") export("dppeigen") export("dppGauss") export("dppkernel") export("dppm") export("dppMatern") export("dppmFixAlgorithm") export("dppmFixIntensity") export("dppparbounds") export("dppPowerExp") export("dppspecden") export("dppspecdenrange") export("drawSignedPoly") export("dummify") export("dummy.ppm") export("duplicated.ppp") export("duplicated.ppx") export("edge.Ripley") export("edges") export("edges2triangles") export("edges2vees") export("edge.Trans") export("edit.hyperframe") export("edit.im") export("edit.ppp") export("edit.psp") export("eem") export("effectfun") export("ellipse") export("Emark") export("emend") export("emend.lppm") export("emend.ppm") export("emptywindow") export("endpoints.psp") export("EntriesToSparse") export("envelope") export("envelopeArray") export("envelopeEngine") export("envelope.envelope") export("envelope.hasenvelope") export("envelope.kppm") export("envelope.lpp") export("envelope.lppm") export("envelope.matrix") export("envelope.pp3") export("envelope.ppm") export("envelope.ppp") export("envelopeProgressData") export("envelopeTest") export("equalpairs") export("equalpairs.quad") export("equalsfun.quad") export("equals.quad") export("eroded.areas") export("eroded.volumes") export("eroded.volumes.box3") export("eroded.volumes.boxx") export("erodemask") export("erosion") export("erosionAny") export("erosion.owin") export("erosion.ppp") export("erosion.psp") export("evalCovar") export("evalCovariate") export("evalCovar.lppm") export("evalCovar.ppm") export("eval.fasp") export("eval.fv") export("eval.im") export("evalInteraction") export("evalInterEngine") export("eval.linim") export("evalPairPotential") export("evalSparse3Dentrywise") export("evaluate2Dkernel") export("even.breaks.owin") export("ewcdf") export("exactdt") export("exactMPLEstrauss") export("exactPdt") export("existsSpatstatVariable") export("expand.owin") export("expandSparse") export("expandSpecialLists") export("expandwinPerfect") export("ExpSmoothLog") export("extractAIC.dppm") export("extractAIC.kppm") export("extractAIC.lppm") export("extractAIC.mppm") export("extractAIC.ppm") export("extractAIC.slrm") export("extractAtomicQtests") export("extractbranch") export("extractbranch.linnet") export("extractbranch.lpp") export("extrapolate.psp") export("f3Cengine") export("f3engine") export("F3est") export("fakemaintitle") export("family.vblogit") export("fardist") export("fardist.owin") export("fardist.ppp") export("[.fasp") export("fasp") export("FDMKERNEL") export("Fest") export("fft2D") export("fftwAvailable") export("Fhazard") export("fii") export("Fiksel") export("fill.coefs") export("fillNA") export("findbestlegendpos") export("findcbind") export("findCovariate") export("Finhom") export("fitin") export("fitin.ppm") export("fitin.profilepl") export("fitted.dppm") export("fitted.kppm") export("fitted.lppm") export("fitted.mppm") export("fitted.ppm") export("fitted.rppm") export("fitted.slrm") export("fixef.mppm") export("flatfname") export("flipxy") export("flipxy.distfun") export("flipxy.im") export("flipxy.infline") export("flipxy.layered") export("flipxy.msr") export("flipxy.owin") export("flipxypolygon") export("flipxy.ppp") export("flipxy.psp") export("flipxy.tess") export("FmultiInhom") export("foo") export("forbid.logi") export("FormatFaspFormulae") export("format.numberwithunit") export("formula<-") export("formula.dppm") export("formula<-.fv") export("formula.fv") export("formula.kppm") export("formula.lppm") export("formula.ppm") export("formula.slrm") export("fourierbasis") export("fourierbasisraw") export("Frame<-") export("Frame") export("framebottomleft") export("Frame<-.default") export("Frame.default") export("Frame<-.im") export("Frame<-.owin") export("Frame<-.ppp") export("fryplot") export("frypoints") export("fullIndexSequence") export("funxy") export("[<-.fv") export("[.fv") export("$<-.fv") export("fv") export("fvexprmap") export("fvlabelmap") export("fvlabels<-") export("fvlabels") export("fvlegend") export("fvnames<-") export("fvnames") export("g3Cengine") export("g3engine") export("G3est") export("gammabreaks") export("gauss.hermite") export("Gcom") export("Gcross") export("Gdot") export("Gest") export("getCall.mppm") export("getdataname") export("getfields") export("getglmdata") export("getglmfit") export("getglmsubset") export("getlambda.lpp") export("getlastshift") export("getppmdatasubset") export("getppmOriginalCovariates") export("getRandomFieldsModelGen") export("getSpatstatVariable") export("getSumFun") export("Geyer") export("geyercounts") export("geyerdelta2") export("Gfox") export("Ginhom") export("GLMpredict") export("Gmulti") export("GmultiInhom") export("good.correction.K") export("Gres") export("grid1index") export("gridcenters") export("gridcentres") export("gridindex") export("gridweights") export("grokIndexVector") export("grow.box3") export("grow.boxx") export("grow.mask") export("grow.rectangle") export("grow.simplepanel") export("hackglmmPQL") export("Halton") export("Hammersley") export("handle.r.b.args") export("handle.rshift.args") export("Hardcore") export("harmonic") export("harmonise") export("harmonise.fv") export("harmonise.im") export("harmonise.msr") export("harmonise.owin") export("harmonise.unitname") export("harmonize") export("harmonize.fv") export("harmonize.im") export("harmonize.owin") export("harmonize.unitname") export("has.close") export("has.close.default") export("has.close.pp3") export("has.close.ppp") export("hasenvelope") export("hasglmfit") export("has.offset") export("has.offset.term") export("head.hyperframe") export("head.ppp") export("head.ppx") export("head.psp") export("head.tess") export("HermiteCoefs") export("Hest") export("hexagon") export("hexgrid") export("hextess") export("hierarchicalordering") export("HierHard") export("hiermat") export("hierpair.family") export("HierStrauss") export("HierStraussHard") export("hist.funxy") export("hist.im") export("ho.engine") export("hopskel") export("hopskel.test") export("hsvim") export("hsvNA") export("Hybrid") export("hybrid.family") export("[<-.hyperframe") export("[.hyperframe") export("$<-.hyperframe") export("$.hyperframe") export("hyperframe") export("IdenticalRowPair") export("IdenticalRows") export("identify.lpp") export("identify.ppp") export("identify.psp") export("idorempty") export("idw") export("Iest") export("illegal.iformula") export("[<-.im") export("[.im") export("im") export("image.im") export("image.imlist") export("image.listof") export("image.objsurf") export("image.ssf") export("im.apply") export("imcov") export("implemented.for.K") export("impliedcoefficients") export("impliedpresence") export("improve.kppm") export("incircle") export("increment.fv") export("infline") export("[.influence.ppm") export("influence.ppm") export("influence.ppmInfluence") export("inforder.family") export("inpoint") export("inradius") export("insertVertices") export("inside3Darray") export("inside.arc") export("inside.boxx") export("inside.owin") export("instantiate.interact") export("integral") export("integral.im") export("integral.influence.ppm") export("integral.leverage.ppm") export("integral.linfun") export("integral.linim") export("integral.msr") export("integral.ssf") export("intensity") export("intensity.detpointprocfamily") export("intensity.dppm") export("intensity.lpp") export("intensity.ppm") export("intensity.ppp") export("intensity.ppx") export("intensity.psp") export("intensity.quadratcount") export("intensity.splitppp") export("interactionfamilyname") export("intermaker") export("interp.colourmap") export("interp.colours") export("interp.im") export("interpretAsOrigin") export("intersect.lintess") export("intersect.owin") export("intersect.tess") export("intX.owin") export("intX.xypolygon") export("intY.owin") export("intY.xypolygon") export("invokeColourmapRule") export("invoke.symbolmap") export("iplot") export("iplot.default") export("iplot.layered") export("iplot.linnet") export("iplot.lpp") export("iplot.ppp") export("ippm") export("is.atomicQtest") export("is.cadlag") export("is.col.argname") export("is.colour") export("is.connected") export("is.connected.default") export("is.connected.linnet") export("is.connected.ppp") export("is.convex") export("is.data") export("is.dppm") export("is.empty") export("is.empty.default") export("is.empty.owin") export("is.empty.ppp") export("is.empty.psp") export("is.expandable") export("is.expandable.ppm") export("is.expandable.rmhmodel") export("is.fv") export("is.grey") export("is.hybrid") export("is.hybrid.interact") export("is.hybrid.ppm") export("is.hyperframe") export("is.im") export("is.imlist") export("is.infline") export("is.interact") export("is.kppm") export("is.linim") export("is.lpp") export("is.lppm") export("is.marked") export("is.marked.default") export("is.marked.lppm") export("is.marked.msr") export("is.marked.ppm") export("is.marked.ppp") export("is.marked.psp") export("is.marked.quad") export("is.mask") export("is.mppm") export("is.multitype") export("is.multitype.default") export("is.multitype.lpp") export("is.multitype.lppm") export("is.multitype.msr") export("is.multitype.ppm") export("is.multitype.ppp") export("is.multitype.quad") export("is.owin") export("is.poisson") export("is.poisson.interact") export("is.poisson.kppm") export("is.poisson.lppm") export("is.poisson.mppm") export("is.poisson.ppm") export("is.poisson.rmhmodel") export("is.poisson.slrm") export("is.polygonal") export("is.pp3") export("is.ppm") export("is.ppp") export("is.ppplist") export("is.ppx") export("is.psp") export("is.quad") export("is.rectangle") export("isRelevantZero") export("is.scov") export("is.slrm") export("is.sob") export("is.solist") export("is.stationary") export("is.stationary.detpointprocfamily") export("is.stationary.dppm") export("is.stationary.kppm") export("is.stationary.lppm") export("is.stationary.ppm") export("is.stationary.rmhmodel") export("is.stationary.slrm") export("is.subset.owin") export("istat") export("is.tess") export("is.vanilla") export("Jcross") export("Jdot") export("Jest") export("Jfox") export("Jinhom") export("Jmulti") export("joinVertices") export("k3engine") export("K3est") export("kaplan.meier") export("Kborder.engine") export("Kcom") export("Kcross") export("Kcross.inhom") export("Kdot") export("Kdot.inhom") export("kernel.factor") export("kernel.moment") export("kernel.squint") export("Kest") export("Kest.fft") export("killinteraction") export("Kinhom") export("Kmark") export("Kmeasure") export("Kmodel") export("Kmodel.detpointprocfamily") export("Kmodel.dppm") export("Kmodel.kppm") export("Kmodel.ppm") export("km.rs") export("km.rs.opt") export("Kmulti") export("Kmulti.inhom") export("Knone.engine") export("Kount") export("Kpcf.kppm") export("kppm") export("kppmComLik") export("kppm.formula") export("kppmMinCon") export("kppmPalmLik") export("kppm.ppp") export("kppm.quad") export("kraever") export("kraeverRandomFields") export("Krect.engine") export("Kres") export("Kscaled") export("Ksector") export("Kwtsum") export("labels.dppm") export("labels.kppm") export("labels.ppm") export("labels.slrm") export("LambertW") export("laslett") export("latest.news") export("[<-.layered") export("[.layered") export("[[<-.layered") export("layered") export("layerplotargs<-") export("layerplotargs") export("layout.boxes") export("Lcross") export("Lcross.inhom") export("Ldot") export("Ldot.inhom") export("ldtEngine") export("lengths.psp") export("LennardJones") export("Lest") export("levelsAsFactor") export("levelset") export("levels<-.im") export("levels.im") export("leverage") export("[.leverage.ppm") export("leverage.ppm") export("leverage.ppmInfluence") export("lgcp.estK") export("lgcp.estpcf") export("lineardirichlet") export("lineardisc") export("linearK") export("linearKcross") export("linearKcross.inhom") export("linearKdot") export("linearKdot.inhom") export("linearKengine") export("linearKinhom") export("linearKmulti") export("linearKmultiEngine") export("linearKmulti.inhom") export("linearmarkconnect") export("linearmarkequal") export("linearpcf") export("linearpcfcross") export("linearpcfcross.inhom") export("linearpcfdot") export("linearpcfdot.inhom") export("linearpcfengine") export("linearpcfinhom") export("linearpcfmulti") export("linearPCFmultiEngine") export("linearpcfmulti.inhom") export("lineartileindex") export("linequad") export("linfun") export("Linhom") export("[<-.linim") export("[.linim") export("linim") export("[.linnet") export("linnet") export("lintess") export("[<-.listof") export("listof") export("lixellate") export("local2lpp") export("localK") export("localKcross") export("localKcross.inhom") export("localKdot") export("localKengine") export("localKinhom") export("localKmultiEngine") export("localL") export("localLcross") export("localLcross.inhom") export("localLdot") export("localLinhom") export("localpcf") export("localpcfengine") export("localpcfinhom") export("[.localpcfmatrix") export("localpcfmatrix") export("logicalIndex") export("logi.dummy") export("logi.engine") export("logLik.dppm") export("logLik.kppm") export("logLik.lppm") export("logLik.mppm") export("logLik.ppm") export("logLik.slrm") export("logLik.vblogit") export("lohboot") export("lookup2DkernelInfo") export("lookup.im") export("looVoronoiLPP") export("[.lpp") export("lpp") export("lppm") export("lppm.formula") export("lppm.lpp") export("Lscaled") export("lurking") export("lurking.mppm") export("lurking.ppm") export("lurking.ppp") export("lut") export("mad.progress") export("mad.sigtrace") export("mad.test") export("majorminorversion") export("make.even.breaks") export("makefvlabel") export("makeLinnetTolerance") export("makeunitname") export("mapSparseEntries") export("marginSums") export("markappend") export("markappendop") export("markcbind") export("markconnect") export("markcorr") export("markcorrint") export("markcrosscorr") export("markformat") export("markformat.default") export("markformat.ppp") export("markformat.ppx") export("markformat.psp") export("markmarkscatter") export("markmean") export("markreplicateop") export("marks<-") export("marks") export("mark.scale.default") export("marks.default") export("marks<-.lintess") export("marks.lintess") export("marks<-.lpp") export("markspace.integral") export("marks<-.ppp") export("marks.ppp") export("marks<-.ppx") export("marks.ppx") export("marks<-.psp") export("marks.psp") export("marks.quad") export("marks<-.ssf") export("marks.ssf") export("markstat") export("marks<-.tess") export("marks.tess") export("marksubset") export("marksubsetop") export("marktable") export("markvaluetype") export("markvar") export("markvario") export("mask2df") export("maskLaslett") export("match2DkernelName") export("matchingdist") export("match.kernel") export("matclust.estK") export("matclust.estpcf") export("Math.im") export("Math.imlist") export("Math.linim") export("Math.sparse3Darray") export("matrixinvsqrt") export("matrixpower") export("matrixsqrt") export("maxflow") export("max.fv") export("maxnndist") export("max.ssf") export("mctest.progress") export("mctest.sigtrace") export("mctestSigtraceEngine") export("mean.im") export("mean.leverage.ppm") export("mean.linim") export("meanlistfv") export("meanX.owin") export("meanY.owin") export("measureContinuous") export("measureDiscrete") export("measureNegative") export("measurePositive") export("measureVariation") export("median.im") export("median.linim") export("mergeLevels") export("midpoints.psp") export("mincontrast") export("min.fv") export("MinimalTess") export("MinkowskiSum") export("minnndist") export("min.ssf") export("miplot") export("model.covariates") export("model.depends") export("model.frame.dppm") export("modelFrameGam") export("model.frame.kppm") export("model.frame.lppm") export("model.frame.ppm") export("model.images") export("model.images.dppm") export("model.images.kppm") export("model.images.lppm") export("model.images.ppm") export("model.images.slrm") export("model.is.additive") export("model.matrix.dppm") export("model.matrix.ippm") export("model.matrix.kppm") export("model.matrix.lppm") export("model.matrix.mppm") export("model.matrix.ppm") export("model.matrix.slrm") export("model.se.image") export("mpl.engine") export("mpl.get.covariates") export("mpl.prepare") export("mpl.usable") export("mppm") export("[.msr") export("msr") export("MultiHard") export("MultiPair.checkmatrix") export("multiplicity") export("multiplicity.data.frame") export("multiplicity.default") export("multiplicityNumeric") export("multiplicity.ppp") export("multiplicity.ppx") export("multiply.only.finite.entries") export("MultiStrauss") export("MultiStraussHard") export("na.handle.im") export("names<-.fv") export("names<-.hyperframe") export("names.hyperframe") export("nearest.neighbour") export("nearest.pixel") export("nearest.raster.point") export("nearestsegment") export("nearest.valid.pixel") export("nearestValue") export("nestsplit") export("newformula") export("newstyle.coeff.handling") export("nnclean") export("nncleanEngine") export("nnclean.pp3") export("nnclean.ppp") export("nncorr") export("nncross") export("nncross.default") export("nncross.lpp") export("nncross.pp3") export("nncross.ppp") export("nncross.ppx") export("nndcumfun") export("nndensity") export("nndensity.ppp") export("nndist") export("nndist.default") export("nndist.lpp") export("nndist.pp3") export("nndist.ppp") export("nndist.ppx") export("nndist.psp") export("nnfromvertex") export("nnfun") export("nnfun.lpp") export("nnfun.ppp") export("nnfun.psp") export("nnmap") export("nnmark") export("nnmean") export("nnorient") export("nnvario") export("nnwhich") export("nnwhich.default") export("nnwhich.lpp") export("nnwhich.pp3") export("nnwhich.ppp") export("nnwhich.ppx") export("nobjects") export("nobjects.lintess") export("nobjects.ppp") export("nobjects.ppx") export("nobjects.psp") export("nobjects.tess") export("nobs.dppm") export("nobs.kppm") export("nobs.lppm") export("nobs.mppm") export("nobs.ppm") export("no.trend.ppm") export("npfun") export("npoints") export("npoints.pp3") export("npoints.ppp") export("npoints.ppx") export("n.quad") export("nsegments") export("nsegments.linnet") export("nsegments.lpp") export("nsegments.psp") export("numberwithunit") export("numeric.columns") export("nvertices") export("nvertices.default") export("nvertices.linnet") export("nvertices.owin") export("objsurf") export("objsurf.dppm") export("objsurfEngine") export("objsurf.kppm") export("objsurf.minconfit") export("onearrow") export("onecolumn") export("opening") export("opening.owin") export("opening.ppp") export("opening.psp") export("Ops.im") export("Ops.imlist") export("Ops.linim") export("Ops.msr") export("Ops.sparse3Darray") export("optimStatus") export("Ord") export("ord.family") export("OrdThresh") export("outdated.interact") export("overlap.owin") export("oversize.quad") export("[.owin") export("owin") export("owin2polypath") export("owinpoly2mask") export("owinpolycheck") export("packupNNdata") export("padimage") export("pairdist") export("pairdist.default") export("pairdist.lpp") export("pairdist.pp3") export("pairdist.ppp") export("pairdist.ppx") export("pairdist.psp") export("pairorient") export("PairPiece") export("PairPotentialType") export("pairsat.family") export("pairs.im") export("pairs.linim") export("pairs.listof") export("pairs.solist") export("Pairwise") export("pairwise.family") export("paletteindex") export("paletteindex") export("panel.contour") export("panel.histogram") export("panel.image") export("parameters") export("parameters.dppm") export("parameters.fii") export("parameters.interact") export("parameters.kppm") export("parameters.ppm") export("parameters.profilepl") export("param.quad") export("parbreak") export("parres") export("partialModelMatrix") export("pcf") export("pcf3engine") export("pcf3est") export("pcfcross") export("pcfcross.inhom") export("pcfdot") export("pcfdot.inhom") export("pcf.fasp") export("pcf.fv") export("pcfinhom") export("pcfmodel") export("pcfmodel.detpointprocfamily") export("pcfmodel.dppm") export("pcfmodel.kppm") export("pcfmodel.ppm") export("pcfmodel.zclustermodel") export("pcfmulti") export("pcfmulti.inhom") export("pcf.ppp") export("PDEdensityLPP") export("Penttinen") export("perimeter") export("periodify") export("periodify.owin") export("periodify.ppp") export("periodify.psp") export("perspContour") export("persp.funxy") export("persp.im") export("persp.leverage.ppm") export("perspLines") export("persp.objsurf") export("perspPoints") export("perspSegments") export("pickoption") export("pixelcentres") export("pixellate") export("pixellate.linnet") export("pixellate.owin") export("pixellate.ppp") export("pixellate.psp") export("pixelquad") export("pkernel") export("pknn") export("plan.legend.layout") export("plot3Dpoints") export("plot.addvar") export("plot.anylist") export("plot.barplotdata") export("plot.bermantest") export("plot.bw.frac") export("plot.bw.optim") export("plot.cdftest") export("plot.colourmap") export("plot.diagppm") export("plot.dppm") export("plotEachLayer") export("plot.envelope") export("ploterodeimage") export("ploterodewin") export("plot.fasp") export("plot.fii") export("plot.foo") export("plot.funxy") export("plot.fv") export("plot.hyperframe") export("plot.im") export("plot.imlist") export("plot.indicfun") export("plot.infline") export("plot.influence.ppm") export("plot.kppm") export("plot.laslett") export("plot.layered") export("plot.leverage.ppm") export("plot.linfun") export("plot.linim") export("plot.linnet") export("plot.lintess") export("plot.listof") export("plot.localpcfmatrix") export("plot.lpp") export("plot.lppm") export("plot.lurk") export("plot.minconfit") export("plot.mppm") export("plot.msr") export("plot.objsurf") export("plot.onearrow") export("plot.owin") export("plot.parres") export("plot.plotpairsim") export("plot.plotppm") export("plotPolygonBdry") export("plot.pp3") export("plot.ppm") export("plot.ppp") export("plot.pppmatching") export("plot.ppx") export("plot.profilepl") export("plot.psp") export("plot.qqppm") export("plot.quad") export("plot.quadratcount") export("plot.quadrattest") export("plot.rho2hat") export("plot.rhohat") export("plot.rppm") export("plot.scan.test") export("plot.slrm") export("plot.solist") export("plot.spatialcdf") export("plot.splitppp") export("plot.ssf") export("plot.studpermutest") export("plot.symbolmap") export("plot.tess") export("plot.textstring") export("plot.texturemap") export("plotWidthMap") export("plot.yardstick") export("pmixpois") export("pointgrid") export("pointsAlongNetwork") export("points.lpp") export("pointsOnLines") export("pointweights") export("PoisSaddle") export("PoisSaddleArea") export("PoisSaddleGeyer") export("PoisSaddlePairwise") export("Poisson") export("polartess") export("polyLaslett") export("polynom") export("polytileareaEngine") export("pool") export("pool.anylist") export("pool.envelope") export("pool.fasp") export("pool.fv") export("pool.quadrattest") export("pool.rat") export("positiveIndex") export("[.pp3") export("pp3") export("ppllengine") export("ppm") export("ppmCovariates") export("ppm.default") export("ppmDerivatives") export("ppm.formula") export("ppmInfluence") export("ppmInfluenceEngine") export("PPMmodelmatrix") export("ppm.ppp") export("ppm.quad") export("[<-.ppp") export("[.ppp") export("ppp") export("pppdist") export("pppdist.mat") export("pppdist.prohorov") export("pppmatching") export("ppsubset") export("PPversion") export("[.ppx") export("ppx") export("predict.dppm") export("predict.kppm") export("predict.lppm") export("predict.mppm") export("predict.ppm") export("predict.profilepl") export("predict.rho2hat") export("predict.rhohat") export("predict.rppm") export("predict.slrm") export("predict.vblogit") export("predict.zclustermodel") export("prefixfv") export("prepareTitle") export("print.addvar") export("print.anylist") export("print.autoexec") export("print.box3") export("print.boxx") export("print.bt.frame") export("print.bugtable") export("print.bw.frac") export("print.bw.optim") export("print.colourmap") export("print.densityfun") export("print.detpointprocfamily") export("print.detpointprocfamilyfun") export("print.diagppm") export("print.distfun") export("print.dppm") export("print.envelope") export("print.ewcdf") export("print.fasp") export("print.fii") export("print.funxy") export("print.fv") export("print.fvfun") export("print.hasenvelope") export("print.hierarchicalordering") export("print.hyperframe") export("print.im") export("print.indicfun") export("print.infline") export("print.influence.ppm") export("print.interact") export("print.intermaker") export("print.isf") export("print.kppm") export("print.laslett") export("print.layered") export("print.leverage.ppm") export("print.linfun") export("print.linim") export("print.linnet") export("print.lintess") export("print.localpcfmatrix") export("print.lpp") export("print.lppm") export("print.lurk") export("print.lut") export("print.minconfit") export("print.mppm") export("print.msr") export("print.nnfun") export("print.numberwithunit") export("print.objsurf") export("print.onearrow") export("print.owin") export("print.parres") export("print.plotpairsim") export("print.plotppm") export("print.pp3") export("print.ppm") export("print.ppp") export("print.pppmatching") export("print.ppx") export("print.profilepl") export("print.psp") export("print.qqppm") export("print.quad") export("print.quadrattest") export("print.rat") export("print.rho2hat") export("print.rhohat") export("print.rmhcontrol") export("print.rmhexpand") export("print.rmhInfoList") export("print.rmhmodel") export("print.rmhstart") export("print.rppm") export("print.simplepanel") export("print.slrm") export("print.Smoothfun") export("print.solist") export("print.sparse3Darray") export("print.splitppp") export("print.splitppx") export("print.ssf") export("printStatus") export("printStatusList") export("print.summary.distfun") export("print.summary.dppm") export("print.summary.fii") export("print.summary.funxy") export("print.summary.hyperframe") export("print.summary.im") export("print.summary.kppm") export("print.summary.linim") export("print.summary.linnet") export("print.summary.lintess") export("print.summary.listof") export("print.summary.logiquad") export("print.summary.lpp") export("print.summary.lut") export("print.summary.mppm") export("print.summary.owin") export("print.summary.pp3") export("print.summary.ppm") export("print.summary.ppp") export("print.summary.psp") export("print.summary.quad") export("print.summary.rmhexpand") export("print.summary.solist") export("print.summary.splitppp") export("print.summary.splitppx") export("print.summary.ssf") export("print.summary.unitname") export("print.symbolmap") export("print.tess") export("print.textstring") export("print.texturemap") export("print.timed") export("print.unitname") export("print.vblogit") export("print.yardstick") export("print.zclustermodel") export("profilepl") export("progressreport") export("project2segment") export("project2set") export("project3Dhom") export("project.ppm") export("prune.rppm") export("pseudoR2") export("pseudoR2.lppm") export("pseudoR2.ppm") export("psib") export("psib.kppm") export("[.psp") export("psp") export("psst") export("psstA") export("psstG") export("putlastshift") export("putSpatstatVariable") export("qkdeEngine") export("qkernel") export("qknn") export("qmixpois") export("qqplot.ppm") export("QQversion") export("qtPrepareCoordinate") export("[.quad") export("quad") export("quadBlockSizes") export("quadform") export("quad.mppm") export("quad.ppm") export("quadratcount") export("quadratcount.ppp") export("quadratcount.splitppp") export("quadratresample") export("quadrats") export("quadrat.test") export("quadrat.testEngine") export("quadrat.test.mppm") export("quadrat.test.ppm") export("quadrat.test.ppp") export("quadrat.test.quadratcount") export("quadrat.test.splitppp") export("quadscheme") export("quadscheme.logi") export("quadscheme.replicated") export("quadscheme.spatial") export("quantess") export("quantess.im") export("quantess.owin") export("quantess.ppp") export("quantile.density") export("quantile.ewcdf") export("quantile.im") export("quantile.linim") export("rags") export("ragsAreaInter") export("ragsMultiHard") export("RandomFieldsSafe") export("ranef.mppm") export("range.fv") export("range.ssf") export("rasterfilter") export("rastersample") export("raster.x") export("rasterx.im") export("rasterx.mask") export("raster.xy") export("rasterxy.im") export("rasterxy.mask") export("raster.y") export("rastery.im") export("rastery.mask") export("[.rat") export("rat") export("ratfv") export("rbindCompatibleDataFrames") export("rbind.hyperframe") export("rCauchy") export("rcell") export("rcelllpp") export("rcellnumber") export("rDGS") export("rDiggleGratton") export("rdpp") export("reach") export("reach.detpointprocfamily") export("reach.dppm") export("reach.fii") export("reach.interact") export("reach.kppm") export("reach.ppm") export("reach.rmhmodel") export("rebadge.as.crossfun") export("rebadge.as.dotfun") export("rebadge.fv") export("rebound") export("rebound.im") export("rebound.owin") export("rebound.ppp") export("rebound.psp") export("recognise.spatstat.type") export("reconcile.fv") export("rectcontact") export("rectdistmap") export("rectquadrat.breaks") export("rectquadrat.countEngine") export("redraw.simplepanel") export("reduced.sample") export("reduceformula") export("reflect") export("reflect.default") export("reflect.distfun") export("reflect.im") export("reflect.infline") export("reflect.layered") export("reflect.tess") export("regularpolygon") export("reheat") export("reincarnate.interact") export("RelevantDeviation") export("RelevantEmpty") export("RelevantNA") export("RelevantZero") export("relevel.im") export("relevel.ppp") export("relevel.ppx") export("reload.or.compute") export("relrisk") export("relrisk.ppm") export("relrisk.ppp") export("rename.fv") export("repair.image.xycoords") export("repairNetwork") export("repair.old.factor.image") export("replacementIndex") export("representativeRows") export("repul") export("repul.dppm") export("requireversion") export("resampleNetworkDataFrame") export("rescale") export("rescale.distfun") export("rescale.im") export("rescale.layered") export("rescale.linnet") export("rescale.lpp") export("rescale.msr") export("rescale.owin") export("rescale.ppp") export("rescale.psp") export("rescale.unitname") export("rescue.rectangle") export("reset.spatstat.options") export("resid1panel") export("resid1plot") export("resid4plot") export("residuals.dppm") export("residuals.kppm") export("residuals.mppm") export("residuals.ppm") export("resolve.2D.kernel") export("resolveEinfo") export("resolve.foxall.window") export("resolve.lambda") export("resolve.lambda.cross") export("resolve.vargamma.shape") export("restrict.mask") export("reversePolyclipArgs") export("rex") export("rGaussPoisson") export("rgb2hex") export("rgb2hsva") export("rgbim") export("rgbNA") export("rHardcore") export("rho2hat") export("rhohat") export("rhohatCalc") export("rhohatEngine") export("rhohat.lpp") export("rhohat.lppm") export("rhohat.ppm") export("rhohat.ppp") export("rhohat.quad") export("ripras") export("rjitter") export("rkernel") export("rknn") export("rlabel") export("rLGCP") export("rlinegrid") export("rlpp") export("rMatClust") export("rMaternI") export("rMaternII") export("rMaternInhibition") export("rmax.Rigid") export("rmax.Ripley") export("rmax.rule") export("rmax.Trans") export("rmh") export("rmhcontrol") export("rmhcontrol.default") export("rmhcontrol.list") export("rmhcontrol.rmhcontrol") export("rmh.default") export("rmhEngine") export("rmhexpand") export("RmhExpandRule") export("rmhmodel") export("rmhmodel.default") export("rmhmodel.list") export("rmhmodel.ppm") export("rmhmodel.rmhmodel") export("rmh.ppm") export("rmhResolveControl") export("rmhResolveExpansion") export("rmhResolveTypes") export("rmhsnoop") export("rmhSnoopEnv") export("rmhstart") export("rmhstart.default") export("rmhstart.list") export("rmhstart.rmhstart") export("rmixpois") export("rMosaicField") export("rMosaicSet") export("rmpoint") export("rmpoint.I.allim") export("rmpoispp") export("rNeymanScott") export("rnoise") export("roc") export("rocData") export("roc.kppm") export("roc.lpp") export("roc.lppm") export("rocModel") export("roc.ppm") export("roc.ppp") export("rose") export("roseContinuous") export("rose.default") export("rose.density") export("rose.fv") export("rose.histogram") export("rotate") export("rotate.distfun") export("rotate.im") export("rotate.infline") export("rotate.layered") export("rotate.linnet") export("rotate.lpp") export("rotate.msr") export("rotate.owin") export("rotate.ppp") export("rotate.psp") export("rotate.tess") export("rotmean") export("rotxy") export("rotxypolygon") export("rounding") export("rounding.default") export("rounding.pp3") export("rounding.ppp") export("rounding.ppx") export("round.pp3") export("round.ppp") export("round.ppx") export("row.names<-.hyperframe") export("row.names.hyperframe") export("rPenttinen") export("rpoint") export("rpoint.multi") export("rpoisline") export("rpoislinetess") export("rpoislpp") export("rpoispp") export("rpoispp3") export("rpoisppOnLines") export("rpoisppx") export("rPoissonCluster") export("rppm") export("rQuasi") export("rshift") export("rshift.ppp") export("rshift.psp") export("rshift.splitppp") export("rSSI") export("rstrat") export("rStrauss") export("rStraussHard") export("rSwitzerlpp") export("rsyst") export("rtemper") export("rthin") export("rthinclumps") export("rThomas") export("ruletextline") export("runifdisc") export("runiflpp") export("runifpoint") export("runifpoint3") export("runifpointOnLines") export("runifpointx") export("runifpoispp") export("runifpoisppOnLines") export("runifrect") export("run.simplepanel") export("rVarGamma") export("safedeldir") export("safelookup") export("samecolour") export("SatPiece") export("Saturated") export("scalardilate") export("scalardilate.breakpts") export("scalardilate.default") export("scalardilate.diagramobj") export("scalardilate.distfun") export("scalardilate.im") export("scalardilate.layered") export("scalardilate.linim") export("scalardilate.linnet") export("scalardilate.lpp") export("scalardilate.msr") export("scalardilate.owin") export("scalardilate.ppp") export("scalardilate.psp") export("scalardilate.tess") export("scaletointerval") export("scaletointerval.default") export("scaletointerval.im") export("scanBinomLRTS") export("scanLRTS") export("scanmeasure") export("scanmeasure.im") export("scanmeasure.ppp") export("scanPoisLRTS") export("scanpp") export("scan.test") export("sdr") export("sdr.ppp") export("sdrPredict") export("second.moment.calc") export("second.moment.engine") export("segregation.test") export("segregation.test.ppp") export("selfcrossing.psp") export("selfcut.psp") export("sessionLibs") export("setcov") export("setmarks") export("setminus.owin") export("sewpcf") export("sewsmod") export("sharpen") export("sharpen.ppp") export("shift") export("shift.diagramobj") export("shift.distfun") export("shift.im") export("shift.infline") export("shift.influence.ppm") export("shift.layered") export("shift.leverage.ppm") export("shift.linim") export("shift.linnet") export("shift.lpp") export("shift.msr") export("shift.owin") export("shift.ppp") export("shift.psp") export("shift.quadratcount") export("shift.quadrattest") export("shift.tess") export("shiftxy") export("shiftxypolygon") export("shortside") export("shortside.box3") export("shortside.boxx") export("shortside.owin") export("sidelengths") export("sidelengths.box3") export("sidelengths.boxx") export("sidelengths.owin") export("signalStatus") export("simplepanel") export("simplify.owin") export("simulate.detpointprocfamily") export("simulate.dppm") export("simulate.kppm") export("simulate.lppm") export("simulate.mppm") export("simulate.ppm") export("simulate.profilepl") export("simulate.rhohat") export("simulate.slrm") export("simulationresult") export("simulrecipe") export("slrAssemblePixelData") export("slrm") export("slr.prepare") export("Smooth") export("smoothcrossEngine") export("Smoothfun") export("Smoothfun.ppp") export("Smooth.fv") export("Smooth.im") export("Smooth.influence.ppm") export("Smooth.leverage.ppm") export("Smooth.msr") export("smoothpointsEngine") export("Smooth.ppp") export("Smooth.solist") export("Smooth.ssf") export("smudge") export("Softcore") export("solapply") export("[<-.solist") export("[.solist") export("solist") export("solutionset") export("sortalongsegment") export("sort.im") export("[<-.sparse3Darray") export("[.sparse3Darray") export("sparse3Darray") export("SparseEntries") export("SparseIndices") export("sparseVectorCumul") export("spatdim") export("spatialcdf") export("spatialCDFframe") export("spatialCDFtest") export("spatialCDFtestCalc") export("spatstatClusterModelInfo") export("spatstat.deldir.setopt") export("spatstatDiagnostic") export("spatstatDPPModelInfo") export("spatstat.options") export("spatstatRmhInfo") export("spatstat.xy.coords") export("sp.foundclass") export("sp.foundclasses") export("sphere.volume") export("splitHybridInteraction") export("split<-.hyperframe") export("split.hyperframe") export("split.im") export("split.msr") export("[<-.splitppp") export("[.splitppp") export("split<-.ppp") export("split.ppp") export("[<-.splitppx") export("[.splitppx") export("split.ppx") export("spokes") export("square") export("[.ssf") export("ssf") export("stieltjes") export("stienen") export("stienenSet") export("store.versionstring.spatstat") export("stratrand") export("Strauss") export("strausscounts") export("StraussHard") export("str.hyperframe") export("strictIndexSequence") export("studpermu.test") export("subfits") export("subfits.new") export("subfits.old") export("subset.hyperframe") export("subset.lpp") export("subset.pp3") export("subset.ppp") export("subset.ppx") export("subset.psp") export("subspaceDistance") export("suffloc") export("suffstat") export("suffstat.generic") export("suffstat.poisson") export("summarise.trend") export("summary.anylist") export("summary.distfun") export("summary.dppm") export("summary.envelope") export("summary.fii") export("summary.funxy") export("summary.hyperframe") export("summary.im") export("Summary.im") export("Summary.imlist") export("summary.kppm") export("summary.linfun") export("summary.linim") export("Summary.linim") export("summary.linnet") export("summary.lintess") export("summary.listof") export("summary.logiquad") export("summary.lpp") export("summary.lppm") export("summary.lut") export("summary.mppm") export("summary.msr") export("summary.owin") export("summary.pp3") export("summary.ppm") export("summary.ppp") export("summary.pppmatching") export("summary.ppx") export("summary.profilepl") export("summary.psp") export("summary.quad") export("summary.rmhexpand") export("summary.solist") export("Summary.sparse3Darray") export("summary.splitppp") export("summary.splitppx") export("summary.ssf") export("summary.unitname") export("summary.vblogit") export("sumouter") export("sumsymouter") export("sumsymouterSparse") export("superimpose") export("superimpose.default") export("superimpose.lpp") export("superimposeMarks") export("superimpose.ppp") export("superimpose.ppplist") export("superimpose.psp") export("superimpose.splitppp") export("symbolmap") export("symbolmapdomain") export("symbolmaptype") export("tail.hyperframe") export("tail.ppp") export("tail.ppx") export("tail.psp") export("tail.tess") export("tenseur") export("tensor1x1") export("terms.dppm") export("terms.kppm") export("terms.lppm") export("terms.mppm") export("terms.ppm") export("terms.slrm") export("[<-.tess") export("[.tess") export("tess") export("test.crossing.psp") export("test.selfcrossing.psp") export("text.lpp") export("text.ppp") export("text.psp") export("textstring") export("texturemap") export("textureplot") export("thinjump") export("thinNetwork") export("thomas.estK") export("thomas.estpcf") export("tile.areas") export("tilecentroids") export("tileindex") export("tile.lengths") export("tilenames<-") export("tilenames") export("tilenames<-.lintess") export("tilenames.lintess") export("tilenames<-.tess") export("tilenames.tess") export("tiles") export("tiles.empty") export("timed") export("timeTaken") export("to.grey") export("to.opaque") export("to.saturated") export("totalVariation") export("to.transparent") export("transect.im") export("transmat") export("treebranchlabels") export("treeprune") export("trianglediameters") export("triangulate.owin") export("trim.mask") export("trim.rectangle") export("triplet.family") export("Triplets") export("Tstat") export("tweak.closepairs") export("tweak.coefs") export("tweak.colourmap") export("tweak.fv.entry") export("tweak.ratfv.entry") export("twostage.envelope") export("twostage.test") export("unionOfSparseIndices") export("union.owin") export("union.quad") export("uniquemap") export("uniquemap.data.frame") export("uniquemap.default") export("uniquemap.lpp") export("uniquemap.matrix") export("uniquemap.ppp") export("uniquemap.ppx") export("unique.ppp") export("unique.ppx") export("unitname<-") export("unitname") export("unitname<-.box3") export("unitname.box3") export("unitname<-.boxx") export("unitname.boxx") export("unitname<-.default") export("unitname.default") export("unitname<-.dppm") export("unitname.dppm") export("unitname<-.im") export("unitname.im") export("unitname<-.kppm") export("unitname.kppm") export("unitname<-.linnet") export("unitname.linnet") export("unitname<-.lpp") export("unitname.lpp") export("unitname<-.minconfit") export("unitname.minconfit") export("unitname<-.msr") export("unitname.msr") export("unitname<-.owin") export("unitname.owin") export("unitname<-.pp3") export("unitname.pp3") export("unitname<-.ppm") export("unitname.ppm") export("unitname<-.ppp") export("unitname.ppp") export("unitname<-.ppx") export("unitname.ppx") export("unitname<-.psp") export("unitname.psp") export("unitname<-.quad") export("unitname.quad") export("unitname<-.slrm") export("unitname.slrm") export("unitname<-.tess") export("unitname.tess") export("unit.square") export("unmark") export("unmark.lintess") export("unmark.lpp") export("unmark.ppp") export("unmark.ppx") export("unmark.psp") export("unmark.splitppp") export("unmark.ssf") export("unmark.tess") export("unnormdensity") export("unstackFilter") export("unstack.layered") export("unstack.lintess") export("unstack.lpp") export("unstack.msr") export("unstack.ppp") export("unstack.psp") export("unstack.solist") export("unstack.tess") export("update.detpointprocfamily") export("update.im") export("update.interact") export("update.ippm") export("update.kppm") export("update.lppm") export("update.msr") export("update.ppm") export("update.rmhcontrol") export("update.rmhstart") export("update.slrm") export("update.symbolmap") export("valid") export("validate2Dkernel") export("validate.angles") export("validate.lpp.coords") export("validate.mask") export("validate.quad") export("validate.weights") export("valid.detpointprocfamily") export("valid.lppm") export("valid.ppm") export("validradius") export("vanilla.fv") export("varblock") export("varcount") export("varcountEngine") export("vargamma.estK") export("vargamma.estpcf") export("vcov.kppm") export("vcov.lppm") export("vcov.mppm") export("vcov.ppm") export("vcov.slrm") export("vdCorput") export("venn.tess") export("verifyclass") export("versioncurrency.spatstat") export("versionstring.interact") export("versionstring.ppm") export("versionstring.spatstat") export("vertexdegree") export("vertices") export("vertices.linnet") export("vertices.owin") export("veryunique") export("Vmark") export("vnnFind") export("volume") export("volume.box3") export("volume.boxx") export("volume.linnet") export("volume.owin") export("warn.once") export("waxlyrical") export("weightedclosepairs") export("weighted.median") export("weighted.quantile") export("weighted.var") export("where.max") export("where.min") export("whichhalfplane") export("which.max.im") export("whist") export("will.expand") export("Window<-") export("Window") export("Window.distfun") export("Window.dppm") export("Window.funxy") export("Window<-.im") export("Window.im") export("Window.influence.ppm") export("Window.kppm") export("Window.layered") export("Window.leverage.ppm") export("Window<-.linnet") export("Window.linnet") export("Window.lintess") export("Window<-.lpp") export("Window.lpp") export("Window.lppm") export("Window.msr") export("Window.nnfun") export("Window.ppm") export("Window<-.ppp") export("Window.ppp") export("Window<-.psp") export("Window.psp") export("Window.quad") export("Window.quadratcount") export("Window.quadrattest") export("Window.rmhmodel") export("windows.mppm") export("Window.tess") export("with.fv") export("with.hyperframe") export("with.msr") export("with.ssf") export("w.quad") export("X2testEngine") export("x.quad") export("xtfrm.im") export("xy.grid") export("xypolygon2psp") export("xypolyselfint") export("yardstick") export("y.quad") export("zapsmall.im") export("zclustermodel") export("ZeroValue") export("ZeroValue.im") # ....... Special cases ........... export("%(-)%") export("%(+)%") export("%mapp%") export("%mark%") export("%mrep%") export("%msub%") export("%unit%") S3method("Complex", "im") S3method("Complex", "imlist") S3method("Complex", "linim") S3method("Complex", "sparse3Darray") S3method("Math", "im") S3method("Math", "imlist") S3method("Math", "linim") S3method("Math", "sparse3Darray") S3method("mean", "im") S3method("median", "im") S3method("Ops", "im") S3method("Ops", "imlist") S3method("Ops", "linim") S3method("Ops", "msr") S3method("Ops", "sparse3Darray") S3method("Summary", "im") S3method("Summary", "imlist") S3method("Summary", "linim") S3method("Summary", "sparse3Darray") # ....... End of special cases ... # ......................................... # Automatically generated list of S3 methods # ......................................... S3method("affine", "distfun") S3method("affine", "im") S3method("affine", "layered") S3method("affine", "linim") S3method("affine", "linnet") S3method("affine", "lpp") S3method("affine", "msr") S3method("affine", "owin") S3method("affine", "ppp") S3method("affine", "psp") S3method("affine", "tess") S3method("AIC", "dppm") S3method("AIC", "kppm") S3method("AIC", "mppm") S3method("AIC", "ppm") S3method("anova", "lppm") S3method("anova", "mppm") S3method("anova", "ppm") S3method("anova", "slrm") S3method("anyDuplicated", "ppp") S3method("anyDuplicated", "ppx") S3method("[", "anylist") S3method("anyNA", "im") S3method("anyNA", "sparse3Darray") S3method("aperm", "sparse3Darray") S3method("area", "default") S3method("area", "owin") S3method("as.array", "im") S3method("as.array", "sparse3Darray") S3method("as.character", "unitname") S3method("as.data.frame", "bw.optim") S3method("as.data.frame", "envelope") S3method("as.data.frame", "fv") S3method("as.data.frame", "hyperframe") S3method("as.data.frame", "im") S3method("as.data.frame", "linfun") S3method("as.data.frame", "linim") S3method("as.data.frame", "lintess") S3method("as.data.frame", "owin") S3method("as.data.frame", "ppp") S3method("as.data.frame", "ppx") S3method("as.data.frame", "psp") S3method("as.data.frame", "tess") S3method("as.double", "im") S3method("as.function", "fv") S3method("as.function", "im") S3method("as.function", "leverage.ppm") S3method("as.function", "linfun") S3method("as.function", "owin") S3method("as.function", "rhohat") S3method("as.function", "ssf") S3method("as.function", "tess") S3method("as.fv", "bw.optim") S3method("as.fv", "data.frame") S3method("as.fv", "dppm") S3method("as.fv", "fasp") S3method("as.fv", "fv") S3method("as.fv", "kppm") S3method("as.fv", "matrix") S3method("as.fv", "minconfit") S3method("as.hyperframe", "anylist") S3method("as.hyperframe", "data.frame") S3method("as.hyperframe", "default") S3method("as.hyperframe", "hyperframe") S3method("as.hyperframe", "listof") S3method("as.hyperframe", "ppx") S3method("as.im", "data.frame") S3method("as.im", "default") S3method("as.im", "densityfun") S3method("as.im", "distfun") S3method("as.im", "expression") S3method("as.im", "function") S3method("as.im", "funxy") S3method("as.im", "im") S3method("as.im", "leverage.ppm") S3method("as.im", "linim") S3method("as.im", "matrix") S3method("as.im", "nnfun") S3method("as.im", "owin") S3method("as.im", "ppp") S3method("as.im", "scan.test") S3method("as.im", "Smoothfun") S3method("as.im", "ssf") S3method("as.im", "tess") S3method("as.interact", "fii") S3method("as.interact", "interact") S3method("as.interact", "ppm") S3method("as.layered", "default") S3method("as.layered", "listof") S3method("as.layered", "msr") S3method("as.layered", "ppp") S3method("as.layered", "solist") S3method("as.layered", "splitppp") S3method("as.linfun", "linfun") S3method("as.linfun", "linim") S3method("as.linfun", "lintess") S3method("as.linim", "default") S3method("as.linim", "linfun") S3method("as.linim", "linim") S3method("as.linnet", "linfun") S3method("as.linnet", "linim") S3method("as.linnet", "linnet") S3method("as.linnet", "lintess") S3method("as.linnet", "lpp") S3method("as.linnet", "lppm") S3method("as.linnet", "psp") S3method("as.list", "hyperframe") S3method("as.matrix", "im") S3method("as.matrix", "owin") S3method("as.matrix", "ppx") S3method("as.owin", "boxx") S3method("as.owin", "data.frame") S3method("as.owin", "default") S3method("as.owin", "distfun") S3method("as.owin", "dppm") S3method("as.owin", "funxy") S3method("as.owin", "im") S3method("as.owin", "influence.ppm") S3method("as.owin", "kppm") S3method("as.owin", "layered") S3method("as.owin", "leverage.ppm") S3method("as.owin", "linfun") S3method("as.owin", "linnet") S3method("as.owin", "lintess") S3method("as.owin", "lpp") S3method("as.owin", "lppm") S3method("as.owin", "msr") S3method("as.owin", "nnfun") S3method("as.owin", "owin") S3method("as.owin", "ppm") S3method("as.owin", "ppp") S3method("as.owin", "psp") S3method("as.owin", "quad") S3method("as.owin", "quadratcount") S3method("as.owin", "quadrattest") S3method("as.owin", "rmhmodel") S3method("as.owin", "tess") S3method("as.ppm", "dppm") S3method("as.ppm", "kppm") S3method("as.ppm", "lppm") S3method("as.ppm", "ppm") S3method("as.ppm", "profilepl") S3method("as.ppm", "rppm") S3method("as.ppp", "data.frame") S3method("as.ppp", "default") S3method("as.ppp", "influence.ppm") S3method("as.ppp", "lpp") S3method("as.ppp", "matrix") S3method("as.ppp", "ppp") S3method("as.ppp", "psp") S3method("as.ppp", "quad") S3method("as.ppp", "ssf") S3method("as.psp", "data.frame") S3method("as.psp", "default") S3method("as.psp", "linnet") S3method("as.psp", "lpp") S3method("as.psp", "matrix") S3method("as.psp", "owin") S3method("as.psp", "psp") S3method("as.tess", "im") S3method("as.tess", "list") S3method("as.tess", "owin") S3method("as.tess", "quadratcount") S3method("as.tess", "quadrattest") S3method("as.tess", "tess") S3method("auc", "kppm") S3method("auc", "lpp") S3method("auc", "lppm") S3method("auc", "ppm") S3method("auc", "ppp") S3method("bc", "ppm") S3method("berman.test", "lpp") S3method("berman.test", "lppm") S3method("berman.test", "ppm") S3method("berman.test", "ppp") S3method("boundingbox", "default") S3method("boundingbox", "im") S3method("boundingbox", "linnet") S3method("boundingbox", "lpp") S3method("boundingbox", "owin") S3method("boundingbox", "ppp") S3method("boundingbox", "psp") S3method("boundingbox", "solist") S3method("boundingcentre", "owin") S3method("boundingcentre", "ppp") S3method("boundingcircle", "owin") S3method("boundingcircle", "ppp") S3method("boundingradius", "linnet") S3method("boundingradius", "owin") S3method("boundingradius", "ppp") S3method("by", "im") S3method("by", "ppp") S3method("cbind", "fv") S3method("cbind", "hyperframe") S3method("CDF", "density") S3method("cdf.test", "lpp") S3method("cdf.test", "lppm") S3method("cdf.test", "mppm") S3method("cdf.test", "ppm") S3method("cdf.test", "ppp") S3method("cdf.test", "slrm") S3method("circumradius", "linnet") S3method("circumradius", "owin") S3method("circumradius", "ppp") S3method("closepairs", "pp3") S3method("closepairs", "ppp") S3method("closing", "owin") S3method("closing", "ppp") S3method("closing", "psp") S3method("clusterfield", "character") S3method("clusterfield", "function") S3method("clusterfield", "kppm") S3method("clusterkernel", "character") S3method("clusterkernel", "kppm") S3method("clusterradius", "character") S3method("clusterradius", "kppm") S3method("coef", "dppm") S3method("coef", "fii") S3method("coef", "kppm") S3method("coef", "lppm") S3method("coef", "mppm") S3method("coef", "ppm") S3method("coef", "slrm") S3method("coef", "summary.fii") S3method("coef", "summary.kppm") S3method("coef", "summary.ppm") S3method("coef", "vblogit") S3method("collapse", "anylist") S3method("collapse", "fv") S3method("compatible", "fasp") S3method("compatible", "fv") S3method("compatible", "im") S3method("compatible", "rat") S3method("compatible", "unitname") S3method("connected", "im") S3method("connected", "linnet") S3method("connected", "lpp") S3method("connected", "owin") S3method("connected", "pp3") S3method("connected", "ppp") S3method("connected", "tess") S3method("contour", "funxy") S3method("contour", "im") S3method("contour", "imlist") S3method("contour", "leverage.ppm") S3method("contour", "listof") S3method("contour", "objsurf") S3method("contour", "ssf") S3method("coords", "ppp") S3method("coords", "ppx") S3method("coords", "quad") S3method("crossdist", "default") S3method("crossdist", "lpp") S3method("crossdist", "pp3") S3method("crossdist", "ppp") S3method("crossdist", "ppx") S3method("crossdist", "psp") S3method("crosspairs", "pp3") S3method("crosspairs", "ppp") S3method("cut", "im") S3method("cut", "lpp") S3method("cut", "ppp") S3method("deletebranch", "linnet") S3method("deletebranch", "lpp") S3method("densityAdaptiveKernel", "ppp") S3method("densityfun", "ppp") S3method("density", "linnet") S3method("density", "lpp") S3method("density", "ppp") S3method("density", "ppplist") S3method("density", "psp") S3method("density", "splitppp") S3method("density", "splitppx") S3method("densityVoronoi", "lpp") S3method("densityVoronoi", "ppp") S3method("deriv", "fv") S3method("deviance", "lppm") S3method("deviance", "ppm") S3method("dfbetas", "ppm") S3method("dfbetas", "ppmInfluence") S3method("dffit", "ppm") S3method("[", "diagramobj") S3method("diameter", "box3") S3method("diameter", "boxx") S3method("diameter", "linnet") S3method("diameter", "owin") S3method("dilation", "owin") S3method("dilation", "ppp") S3method("dilation", "psp") S3method("dim", "detpointprocfamily") S3method("dim", "fasp") S3method("dim", "hyperframe") S3method("dim", "im") S3method("dim", "msr") S3method("dimnames", "fasp") S3method("dimnames", "hyperframe") S3method("dimnames", "msr") S3method("dimnames", "sparse3Darray") S3method("dim", "owin") S3method("dim", "sparse3Darray") S3method("distfun", "lpp") S3method("distfun", "owin") S3method("distfun", "ppp") S3method("distfun", "psp") S3method("distmap", "owin") S3method("distmap", "ppp") S3method("distmap", "psp") S3method("domain", "distfun") S3method("domain", "dppm") S3method("domain", "funxy") S3method("domain", "im") S3method("domain", "im") S3method("domain", "influence.ppm") S3method("domain", "kppm") S3method("domain", "layered") S3method("domain", "leverage.ppm") S3method("domain", "linfun") S3method("domain", "lintess") S3method("domain", "lpp") S3method("domain", "lpp") S3method("domain", "lppm") S3method("domain", "msr") S3method("domain", "nnfun") S3method("domain", "pp3") S3method("domain", "ppm") S3method("domain", "ppp") S3method("domain", "ppx") S3method("domain", "psp") S3method("domain", "quad") S3method("domain", "quadratcount") S3method("domain", "quadrattest") S3method("domain", "rmhmodel") S3method("domain", "tess") S3method("duplicated", "ppp") S3method("duplicated", "ppx") S3method("edit", "hyperframe") S3method("edit", "im") S3method("edit", "ppp") S3method("edit", "psp") S3method("emend", "lppm") S3method("emend", "ppm") S3method("envelope", "envelope") S3method("envelope", "hasenvelope") S3method("envelope", "kppm") S3method("envelope", "lpp") S3method("envelope", "lppm") S3method("envelope", "matrix") S3method("envelope", "pp3") S3method("envelope", "ppm") S3method("envelope", "ppp") S3method("eroded.volumes", "box3") S3method("eroded.volumes", "boxx") S3method("erosion", "owin") S3method("erosion", "ppp") S3method("erosion", "psp") S3method("evalCovar", "lppm") S3method("evalCovar", "ppm") S3method("extractAIC", "dppm") S3method("extractAIC", "kppm") S3method("extractAIC", "lppm") S3method("extractAIC", "mppm") S3method("extractAIC", "ppm") S3method("extractAIC", "slrm") S3method("extractbranch", "linnet") S3method("extractbranch", "lpp") S3method("family", "vblogit") S3method("fardist", "owin") S3method("fardist", "ppp") S3method("[", "fasp") S3method("fitin", "ppm") S3method("fitin", "profilepl") S3method("fitted", "dppm") S3method("fitted", "kppm") S3method("fitted", "lppm") S3method("fitted", "mppm") S3method("fitted", "ppm") S3method("fitted", "rppm") S3method("fitted", "slrm") S3method("fixef", "mppm") S3method("flipxy", "distfun") S3method("flipxy", "im") S3method("flipxy", "infline") S3method("flipxy", "layered") S3method("flipxy", "msr") S3method("flipxy", "owin") S3method("flipxy", "ppp") S3method("flipxy", "psp") S3method("flipxy", "tess") S3method("format", "numberwithunit") S3method("formula", "dppm") S3method("formula", "fv") S3method("formula", "kppm") S3method("formula", "lppm") S3method("formula", "ppm") S3method("formula", "slrm") S3method("Frame", "default") S3method("[", "fv") S3method("getCall", "mppm") S3method("harmonise", "fv") S3method("harmonise", "im") S3method("harmonise", "msr") S3method("harmonise", "owin") S3method("harmonise", "unitname") S3method("harmonize", "fv") S3method("harmonize", "im") S3method("harmonize", "owin") S3method("harmonize", "unitname") S3method("has.close", "default") S3method("has.close", "pp3") S3method("has.close", "ppp") S3method("head", "hyperframe") S3method("head", "ppp") S3method("head", "ppx") S3method("head", "psp") S3method("head", "tess") S3method("hist", "funxy") S3method("hist", "im") S3method("[", "hyperframe") S3method("$", "hyperframe") S3method("identify", "lpp") S3method("identify", "ppp") S3method("identify", "psp") S3method("[", "im") S3method("image", "im") S3method("image", "imlist") S3method("image", "listof") S3method("image", "objsurf") S3method("image", "ssf") S3method("[", "influence.ppm") S3method("influence", "ppm") S3method("influence", "ppmInfluence") S3method("integral", "im") S3method("integral", "influence.ppm") S3method("integral", "leverage.ppm") S3method("integral", "linfun") S3method("integral", "linim") S3method("integral", "msr") S3method("integral", "ssf") S3method("intensity", "detpointprocfamily") S3method("intensity", "dppm") S3method("intensity", "lpp") S3method("intensity", "ppm") S3method("intensity", "ppp") S3method("intensity", "ppx") S3method("intensity", "psp") S3method("intensity", "quadratcount") S3method("intensity", "splitppp") S3method("iplot", "default") S3method("iplot", "layered") S3method("iplot", "linnet") S3method("iplot", "lpp") S3method("iplot", "ppp") S3method("is.connected", "default") S3method("is.connected", "linnet") S3method("is.connected", "ppp") S3method("is.empty", "default") S3method("is.empty", "owin") S3method("is.empty", "ppp") S3method("is.empty", "psp") S3method("is.expandable", "ppm") S3method("is.expandable", "rmhmodel") S3method("is.hybrid", "interact") S3method("is.hybrid", "ppm") S3method("is.marked", "default") S3method("is.marked", "lppm") S3method("is.marked", "msr") S3method("is.marked", "ppm") S3method("is.marked", "ppp") S3method("is.marked", "psp") S3method("is.marked", "quad") S3method("is.multitype", "default") S3method("is.multitype", "lpp") S3method("is.multitype", "lppm") S3method("is.multitype", "msr") S3method("is.multitype", "ppm") S3method("is.multitype", "ppp") S3method("is.multitype", "quad") S3method("is.poisson", "interact") S3method("is.poisson", "kppm") S3method("is.poisson", "lppm") S3method("is.poisson", "mppm") S3method("is.poisson", "ppm") S3method("is.poisson", "rmhmodel") S3method("is.poisson", "slrm") S3method("is.stationary", "detpointprocfamily") S3method("is.stationary", "dppm") S3method("is.stationary", "kppm") S3method("is.stationary", "lppm") S3method("is.stationary", "ppm") S3method("is.stationary", "rmhmodel") S3method("is.stationary", "slrm") S3method("Kmodel", "detpointprocfamily") S3method("Kmodel", "dppm") S3method("Kmodel", "kppm") S3method("Kmodel", "ppm") S3method("kppm", "formula") S3method("kppm", "ppp") S3method("kppm", "quad") S3method("labels", "dppm") S3method("labels", "kppm") S3method("labels", "ppm") S3method("labels", "slrm") S3method("[", "layered") S3method("levels", "im") S3method("[", "leverage.ppm") S3method("leverage", "ppm") S3method("leverage", "ppmInfluence") S3method("[", "linim") S3method("[", "linnet") S3method("[", "localpcfmatrix") S3method("logLik", "dppm") S3method("logLik", "kppm") S3method("logLik", "lppm") S3method("logLik", "mppm") S3method("logLik", "ppm") S3method("logLik", "slrm") S3method("logLik", "vblogit") S3method("[", "lpp") S3method("lppm", "formula") S3method("lppm", "lpp") S3method("lurking", "mppm") S3method("lurking", "ppm") S3method("lurking", "ppp") S3method("markformat", "default") S3method("markformat", "ppp") S3method("markformat", "ppx") S3method("markformat", "psp") S3method("marks", "default") S3method("marks", "lintess") S3method("marks", "ppp") S3method("marks", "ppx") S3method("marks", "psp") S3method("marks", "quad") S3method("marks", "ssf") S3method("marks", "tess") S3method("max", "fv") S3method("max", "ssf") S3method("mean", "leverage.ppm") S3method("mean", "linim") S3method("median", "linim") S3method("min", "fv") S3method("min", "ssf") S3method("model.frame", "dppm") S3method("model.frame", "kppm") S3method("model.frame", "lppm") S3method("model.frame", "ppm") S3method("model.images", "dppm") S3method("model.images", "kppm") S3method("model.images", "lppm") S3method("model.images", "ppm") S3method("model.images", "slrm") S3method("model.matrix", "dppm") S3method("model.matrix", "ippm") S3method("model.matrix", "kppm") S3method("model.matrix", "lppm") S3method("model.matrix", "mppm") S3method("model.matrix", "ppm") S3method("model.matrix", "slrm") S3method("[", "msr") S3method("multiplicity", "data.frame") S3method("multiplicity", "default") S3method("multiplicity", "ppp") S3method("multiplicity", "ppx") S3method("names", "hyperframe") S3method("nnclean", "pp3") S3method("nnclean", "ppp") S3method("nncross", "default") S3method("nncross", "lpp") S3method("nncross", "pp3") S3method("nncross", "ppp") S3method("nncross", "ppx") S3method("nndensity", "ppp") S3method("nndist", "default") S3method("nndist", "lpp") S3method("nndist", "pp3") S3method("nndist", "ppp") S3method("nndist", "ppx") S3method("nndist", "psp") S3method("nnfun", "lpp") S3method("nnfun", "ppp") S3method("nnfun", "psp") S3method("nnwhich", "default") S3method("nnwhich", "lpp") S3method("nnwhich", "pp3") S3method("nnwhich", "ppp") S3method("nnwhich", "ppx") S3method("nobjects", "lintess") S3method("nobjects", "ppp") S3method("nobjects", "ppx") S3method("nobjects", "psp") S3method("nobjects", "tess") S3method("nobs", "dppm") S3method("nobs", "kppm") S3method("nobs", "lppm") S3method("nobs", "mppm") S3method("nobs", "ppm") S3method("npoints", "pp3") S3method("npoints", "ppp") S3method("npoints", "ppx") S3method("nsegments", "linnet") S3method("nsegments", "lpp") S3method("nsegments", "psp") S3method("nvertices", "default") S3method("nvertices", "linnet") S3method("nvertices", "owin") S3method("objsurf", "dppm") S3method("objsurf", "kppm") S3method("objsurf", "minconfit") S3method("opening", "owin") S3method("opening", "ppp") S3method("opening", "psp") S3method("[", "owin") S3method("pairdist", "default") S3method("pairdist", "lpp") S3method("pairdist", "pp3") S3method("pairdist", "ppp") S3method("pairdist", "ppx") S3method("pairdist", "psp") S3method("pairs", "im") S3method("pairs", "linim") S3method("pairs", "listof") S3method("pairs", "solist") S3method("parameters", "dppm") S3method("parameters", "fii") S3method("parameters", "interact") S3method("parameters", "kppm") S3method("parameters", "ppm") S3method("parameters", "profilepl") S3method("pcf", "fasp") S3method("pcf", "fv") S3method("pcfmodel", "detpointprocfamily") S3method("pcfmodel", "dppm") S3method("pcfmodel", "kppm") S3method("pcfmodel", "ppm") S3method("pcfmodel", "zclustermodel") S3method("pcf", "ppp") S3method("periodify", "owin") S3method("periodify", "ppp") S3method("periodify", "psp") S3method("persp", "funxy") S3method("persp", "im") S3method("persp", "leverage.ppm") S3method("persp", "objsurf") S3method("pixellate", "linnet") S3method("pixellate", "owin") S3method("pixellate", "ppp") S3method("pixellate", "psp") S3method("plot", "addvar") S3method("plot", "anylist") S3method("plot", "barplotdata") S3method("plot", "bermantest") S3method("plot", "bw.frac") S3method("plot", "bw.optim") S3method("plot", "cdftest") S3method("plot", "colourmap") S3method("plot", "diagppm") S3method("plot", "dppm") S3method("plot", "envelope") S3method("plot", "fasp") S3method("plot", "fii") S3method("plot", "foo") S3method("plot", "funxy") S3method("plot", "fv") S3method("plot", "hyperframe") S3method("plot", "im") S3method("plot", "imlist") S3method("plot", "indicfun") S3method("plot", "infline") S3method("plot", "influence.ppm") S3method("plot", "kppm") S3method("plot", "laslett") S3method("plot", "layered") S3method("plot", "leverage.ppm") S3method("plot", "linfun") S3method("plot", "linim") S3method("plot", "linnet") S3method("plot", "lintess") S3method("plot", "listof") S3method("plot", "localpcfmatrix") S3method("plot", "lpp") S3method("plot", "lppm") S3method("plot", "lurk") S3method("plot", "minconfit") S3method("plot", "mppm") S3method("plot", "msr") S3method("plot", "objsurf") S3method("plot", "onearrow") S3method("plot", "owin") S3method("plot", "parres") S3method("plot", "plotpairsim") S3method("plot", "plotppm") S3method("plot", "pp3") S3method("plot", "ppm") S3method("plot", "ppp") S3method("plot", "pppmatching") S3method("plot", "ppx") S3method("plot", "profilepl") S3method("plot", "psp") S3method("plot", "qqppm") S3method("plot", "quad") S3method("plot", "quadratcount") S3method("plot", "quadrattest") S3method("plot", "rho2hat") S3method("plot", "rhohat") S3method("plot", "rppm") S3method("plot", "scan.test") S3method("plot", "slrm") S3method("plot", "solist") S3method("plot", "spatialcdf") S3method("plot", "splitppp") S3method("plot", "ssf") S3method("plot", "studpermutest") S3method("plot", "symbolmap") S3method("plot", "tess") S3method("plot", "textstring") S3method("plot", "texturemap") S3method("plot", "yardstick") S3method("points", "lpp") S3method("pool", "anylist") S3method("pool", "envelope") S3method("pool", "fasp") S3method("pool", "fv") S3method("pool", "quadrattest") S3method("pool", "rat") S3method("[", "pp3") S3method("ppm", "default") S3method("ppm", "formula") S3method("ppm", "ppp") S3method("ppm", "quad") S3method("[", "ppp") S3method("[", "ppx") S3method("predict", "dppm") S3method("predict", "kppm") S3method("predict", "lppm") S3method("predict", "mppm") S3method("predict", "ppm") S3method("predict", "profilepl") S3method("predict", "rho2hat") S3method("predict", "rhohat") S3method("predict", "rppm") S3method("predict", "slrm") S3method("predict", "vblogit") S3method("predict", "zclustermodel") S3method("print", "addvar") S3method("print", "anylist") S3method("print", "autoexec") S3method("print", "box3") S3method("print", "boxx") S3method("print", "bt.frame") S3method("print", "bugtable") S3method("print", "bw.frac") S3method("print", "bw.optim") S3method("print", "colourmap") S3method("print", "densityfun") S3method("print", "detpointprocfamily") S3method("print", "detpointprocfamilyfun") S3method("print", "diagppm") S3method("print", "distfun") S3method("print", "dppm") S3method("print", "envelope") S3method("print", "ewcdf") S3method("print", "fasp") S3method("print", "fii") S3method("print", "funxy") S3method("print", "fv") S3method("print", "fvfun") S3method("print", "hasenvelope") S3method("print", "hierarchicalordering") S3method("print", "hyperframe") S3method("print", "im") S3method("print", "indicfun") S3method("print", "infline") S3method("print", "influence.ppm") S3method("print", "interact") S3method("print", "intermaker") S3method("print", "isf") S3method("print", "kppm") S3method("print", "laslett") S3method("print", "layered") S3method("print", "leverage.ppm") S3method("print", "linfun") S3method("print", "linim") S3method("print", "linnet") S3method("print", "lintess") S3method("print", "localpcfmatrix") S3method("print", "lpp") S3method("print", "lppm") S3method("print", "lurk") S3method("print", "lut") S3method("print", "minconfit") S3method("print", "mppm") S3method("print", "msr") S3method("print", "nnfun") S3method("print", "numberwithunit") S3method("print", "objsurf") S3method("print", "onearrow") S3method("print", "owin") S3method("print", "parres") S3method("print", "plotpairsim") S3method("print", "plotppm") S3method("print", "pp3") S3method("print", "ppm") S3method("print", "ppp") S3method("print", "pppmatching") S3method("print", "ppx") S3method("print", "profilepl") S3method("print", "psp") S3method("print", "qqppm") S3method("print", "quad") S3method("print", "quadrattest") S3method("print", "rat") S3method("print", "rho2hat") S3method("print", "rhohat") S3method("print", "rmhcontrol") S3method("print", "rmhexpand") S3method("print", "rmhInfoList") S3method("print", "rmhmodel") S3method("print", "rmhstart") S3method("print", "rppm") S3method("print", "simplepanel") S3method("print", "slrm") S3method("print", "Smoothfun") S3method("print", "solist") S3method("print", "sparse3Darray") S3method("print", "splitppp") S3method("print", "splitppx") S3method("print", "ssf") S3method("print", "summary.distfun") S3method("print", "summary.dppm") S3method("print", "summary.fii") S3method("print", "summary.funxy") S3method("print", "summary.hyperframe") S3method("print", "summary.im") S3method("print", "summary.kppm") S3method("print", "summary.linim") S3method("print", "summary.linnet") S3method("print", "summary.lintess") S3method("print", "summary.listof") S3method("print", "summary.logiquad") S3method("print", "summary.lpp") S3method("print", "summary.lut") S3method("print", "summary.mppm") S3method("print", "summary.owin") S3method("print", "summary.pp3") S3method("print", "summary.ppm") S3method("print", "summary.ppp") S3method("print", "summary.psp") S3method("print", "summary.quad") S3method("print", "summary.rmhexpand") S3method("print", "summary.solist") S3method("print", "summary.splitppp") S3method("print", "summary.splitppx") S3method("print", "summary.ssf") S3method("print", "summary.unitname") S3method("print", "symbolmap") S3method("print", "tess") S3method("print", "textstring") S3method("print", "texturemap") S3method("print", "timed") S3method("print", "unitname") S3method("print", "vblogit") S3method("print", "yardstick") S3method("print", "zclustermodel") S3method("prune", "rppm") S3method("pseudoR2", "lppm") S3method("pseudoR2", "ppm") S3method("psib", "kppm") S3method("[", "psp") S3method("[", "quad") S3method("quadratcount", "ppp") S3method("quadratcount", "splitppp") S3method("quadrat.test", "mppm") S3method("quadrat.test", "ppm") S3method("quadrat.test", "ppp") S3method("quadrat.test", "quadratcount") S3method("quadrat.test", "splitppp") S3method("quantess", "im") S3method("quantess", "owin") S3method("quantess", "ppp") S3method("quantile", "density") S3method("quantile", "ewcdf") S3method("quantile", "im") S3method("quantile", "linim") S3method("ranef", "mppm") S3method("range", "fv") S3method("range", "ssf") S3method("[", "rat") S3method("rbind", "hyperframe") S3method("reach", "detpointprocfamily") S3method("reach", "dppm") S3method("reach", "fii") S3method("reach", "interact") S3method("reach", "kppm") S3method("reach", "ppm") S3method("reach", "rmhmodel") S3method("rebound", "im") S3method("rebound", "owin") S3method("rebound", "ppp") S3method("rebound", "psp") S3method("reflect", "default") S3method("reflect", "distfun") S3method("reflect", "im") S3method("reflect", "infline") S3method("reflect", "layered") S3method("reflect", "tess") S3method("relevel", "im") S3method("relevel", "ppp") S3method("relevel", "ppx") S3method("relrisk", "ppm") S3method("relrisk", "ppp") S3method("repul", "dppm") S3method("rescale", "distfun") S3method("rescale", "im") S3method("rescale", "layered") S3method("rescale", "linnet") S3method("rescale", "lpp") S3method("rescale", "msr") S3method("rescale", "owin") S3method("rescale", "ppp") S3method("rescale", "psp") S3method("rescale", "unitname") S3method("residuals", "dppm") S3method("residuals", "kppm") S3method("residuals", "mppm") S3method("residuals", "ppm") S3method("rhohat", "lpp") S3method("rhohat", "lppm") S3method("rhohat", "ppm") S3method("rhohat", "ppp") S3method("rhohat", "quad") S3method("rmhcontrol", "default") S3method("rmhcontrol", "list") S3method("rmhcontrol", "rmhcontrol") S3method("rmh", "default") S3method("rmhmodel", "default") S3method("rmhmodel", "list") S3method("rmhmodel", "ppm") S3method("rmhmodel", "rmhmodel") S3method("rmh", "ppm") S3method("rmhstart", "default") S3method("rmhstart", "list") S3method("rmhstart", "rmhstart") S3method("roc", "kppm") S3method("roc", "lpp") S3method("roc", "lppm") S3method("roc", "ppm") S3method("roc", "ppp") S3method("rose", "default") S3method("rose", "density") S3method("rose", "fv") S3method("rose", "histogram") S3method("rotate", "distfun") S3method("rotate", "im") S3method("rotate", "infline") S3method("rotate", "layered") S3method("rotate", "linnet") S3method("rotate", "lpp") S3method("rotate", "msr") S3method("rotate", "owin") S3method("rotate", "ppp") S3method("rotate", "psp") S3method("rotate", "tess") S3method("rounding", "default") S3method("rounding", "pp3") S3method("rounding", "ppp") S3method("rounding", "ppx") S3method("round", "pp3") S3method("round", "ppp") S3method("round", "ppx") S3method("row.names", "hyperframe") S3method("rshift", "ppp") S3method("rshift", "psp") S3method("rshift", "splitppp") S3method("scalardilate", "breakpts") S3method("scalardilate", "default") S3method("scalardilate", "diagramobj") S3method("scalardilate", "distfun") S3method("scalardilate", "im") S3method("scalardilate", "layered") S3method("scalardilate", "linim") S3method("scalardilate", "linnet") S3method("scalardilate", "lpp") S3method("scalardilate", "msr") S3method("scalardilate", "owin") S3method("scalardilate", "ppp") S3method("scalardilate", "psp") S3method("scalardilate", "tess") S3method("scaletointerval", "default") S3method("scaletointerval", "im") S3method("scanmeasure", "im") S3method("scanmeasure", "ppp") S3method("sdr", "ppp") S3method("segregation.test", "ppp") S3method("sharpen", "ppp") S3method("shift", "diagramobj") S3method("shift", "distfun") S3method("shift", "im") S3method("shift", "infline") S3method("shift", "influence.ppm") S3method("shift", "layered") S3method("shift", "leverage.ppm") S3method("shift", "linim") S3method("shift", "linnet") S3method("shift", "lpp") S3method("shift", "msr") S3method("shift", "owin") S3method("shift", "ppp") S3method("shift", "psp") S3method("shift", "quadratcount") S3method("shift", "quadrattest") S3method("shift", "tess") S3method("shortside", "box3") S3method("shortside", "boxx") S3method("shortside", "owin") S3method("sidelengths", "box3") S3method("sidelengths", "boxx") S3method("sidelengths", "owin") S3method("simulate", "detpointprocfamily") S3method("simulate", "dppm") S3method("simulate", "kppm") S3method("simulate", "lppm") S3method("simulate", "mppm") S3method("simulate", "ppm") S3method("simulate", "profilepl") S3method("simulate", "rhohat") S3method("simulate", "slrm") S3method("Smoothfun", "ppp") S3method("Smooth", "fv") S3method("Smooth", "im") S3method("Smooth", "influence.ppm") S3method("Smooth", "leverage.ppm") S3method("Smooth", "msr") S3method("Smooth", "ppp") S3method("Smooth", "solist") S3method("Smooth", "ssf") S3method("[", "solist") S3method("sort", "im") S3method("[", "sparse3Darray") S3method("split", "hyperframe") S3method("split", "im") S3method("split", "msr") S3method("[", "splitppp") S3method("split", "ppp") S3method("split", "ppx") S3method("[", "splitppx") S3method("[", "ssf") S3method("str", "hyperframe") S3method("subset", "hyperframe") S3method("subset", "lpp") S3method("subset", "pp3") S3method("subset", "ppp") S3method("subset", "ppx") S3method("subset", "psp") S3method("summary", "anylist") S3method("summary", "distfun") S3method("summary", "dppm") S3method("summary", "envelope") S3method("summary", "fii") S3method("summary", "funxy") S3method("summary", "hyperframe") S3method("summary", "im") S3method("summary", "kppm") S3method("summary", "linfun") S3method("summary", "linim") S3method("summary", "linnet") S3method("summary", "lintess") S3method("summary", "listof") S3method("summary", "logiquad") S3method("summary", "lpp") S3method("summary", "lppm") S3method("summary", "lut") S3method("summary", "mppm") S3method("summary", "msr") S3method("summary", "owin") S3method("summary", "pp3") S3method("summary", "ppm") S3method("summary", "ppp") S3method("summary", "pppmatching") S3method("summary", "ppx") S3method("summary", "profilepl") S3method("summary", "psp") S3method("summary", "quad") S3method("summary", "rmhexpand") S3method("summary", "solist") S3method("summary", "splitppp") S3method("summary", "splitppx") S3method("summary", "ssf") S3method("summary", "unitname") S3method("summary", "vblogit") S3method("superimpose", "default") S3method("superimpose", "lpp") S3method("superimpose", "ppp") S3method("superimpose", "ppplist") S3method("superimpose", "psp") S3method("superimpose", "splitppp") S3method("tail", "hyperframe") S3method("tail", "ppp") S3method("tail", "ppx") S3method("tail", "psp") S3method("tail", "tess") S3method("terms", "dppm") S3method("terms", "kppm") S3method("terms", "lppm") S3method("terms", "mppm") S3method("terms", "ppm") S3method("terms", "slrm") S3method("[", "tess") S3method("text", "lpp") S3method("text", "ppp") S3method("text", "psp") S3method("tilenames", "lintess") S3method("tilenames", "tess") S3method("uniquemap", "data.frame") S3method("uniquemap", "default") S3method("uniquemap", "lpp") S3method("uniquemap", "matrix") S3method("uniquemap", "ppp") S3method("uniquemap", "ppx") S3method("unique", "ppp") S3method("unique", "ppx") S3method("unitname", "box3") S3method("unitname", "boxx") S3method("unitname", "default") S3method("unitname", "dppm") S3method("unitname", "im") S3method("unitname", "kppm") S3method("unitname", "linnet") S3method("unitname", "lpp") S3method("unitname", "minconfit") S3method("unitname", "msr") S3method("unitname", "owin") S3method("unitname", "pp3") S3method("unitname", "ppm") S3method("unitname", "ppp") S3method("unitname", "ppx") S3method("unitname", "psp") S3method("unitname", "quad") S3method("unitname", "slrm") S3method("unitname", "tess") S3method("unmark", "lintess") S3method("unmark", "lpp") S3method("unmark", "ppp") S3method("unmark", "ppx") S3method("unmark", "psp") S3method("unmark", "splitppp") S3method("unmark", "ssf") S3method("unmark", "tess") S3method("unstack", "layered") S3method("unstack", "lintess") S3method("unstack", "lpp") S3method("unstack", "msr") S3method("unstack", "ppp") S3method("unstack", "psp") S3method("unstack", "solist") S3method("unstack", "tess") S3method("update", "detpointprocfamily") S3method("update", "im") S3method("update", "interact") S3method("update", "ippm") S3method("update", "kppm") S3method("update", "lppm") S3method("update", "msr") S3method("update", "ppm") S3method("update", "rmhcontrol") S3method("update", "rmhstart") S3method("update", "slrm") S3method("update", "symbolmap") S3method("valid", "detpointprocfamily") S3method("valid", "lppm") S3method("valid", "ppm") S3method("vcov", "kppm") S3method("vcov", "lppm") S3method("vcov", "mppm") S3method("vcov", "ppm") S3method("vcov", "slrm") S3method("vertices", "linnet") S3method("vertices", "owin") S3method("volume", "box3") S3method("volume", "boxx") S3method("volume", "linnet") S3method("volume", "owin") S3method("Window", "distfun") S3method("Window", "dppm") S3method("Window", "funxy") S3method("Window", "im") S3method("Window", "influence.ppm") S3method("Window", "kppm") S3method("Window", "layered") S3method("Window", "leverage.ppm") S3method("Window", "linnet") S3method("Window", "lintess") S3method("Window", "lpp") S3method("Window", "lppm") S3method("Window", "msr") S3method("Window", "nnfun") S3method("Window", "ppm") S3method("Window", "ppp") S3method("Window", "psp") S3method("Window", "quad") S3method("Window", "quadratcount") S3method("Window", "quadrattest") S3method("Window", "rmhmodel") S3method("Window", "tess") S3method("with", "fv") S3method("with", "hyperframe") S3method("with", "msr") S3method("with", "ssf") S3method("xtfrm", "im") S3method("ZeroValue", "im") # ......................................... # Assignment methods # ......................................... S3method("[<-", "anylist") S3method("coords<-", "ppp") S3method("coords<-", "ppx") S3method("dimnames<-", "fasp") S3method("dimnames<-", "hyperframe") S3method("dimnames<-", "sparse3Darray") S3method("dim<-", "sparse3Darray") S3method("formula<-", "fv") S3method("Frame<-", "default") S3method("Frame<-", "im") S3method("Frame<-", "owin") S3method("Frame<-", "ppp") S3method("[<-", "fv") S3method("$<-", "fv") S3method("[<-", "hyperframe") S3method("$<-", "hyperframe") S3method("[<-", "im") S3method("[<-", "layered") S3method("[[<-", "layered") S3method("levels<-", "im") S3method("[<-", "linim") S3method("[<-", "listof") S3method("marks<-", "lintess") S3method("marks<-", "lpp") S3method("marks<-", "ppp") S3method("marks<-", "ppx") S3method("marks<-", "psp") S3method("marks<-", "ssf") S3method("marks<-", "tess") S3method("names<-", "fv") S3method("names<-", "hyperframe") S3method("[<-", "ppp") S3method("row.names<-", "hyperframe") S3method("[<-", "solist") S3method("[<-", "sparse3Darray") S3method("split<-", "hyperframe") S3method("[<-", "splitppp") S3method("split<-", "ppp") S3method("[<-", "splitppx") S3method("[<-", "tess") S3method("tilenames<-", "lintess") S3method("tilenames<-", "tess") S3method("unitname<-", "box3") S3method("unitname<-", "boxx") S3method("unitname<-", "default") S3method("unitname<-", "dppm") S3method("unitname<-", "im") S3method("unitname<-", "kppm") S3method("unitname<-", "linnet") S3method("unitname<-", "lpp") S3method("unitname<-", "minconfit") S3method("unitname<-", "msr") S3method("unitname<-", "owin") S3method("unitname<-", "pp3") S3method("unitname<-", "ppm") S3method("unitname<-", "ppp") S3method("unitname<-", "ppx") S3method("unitname<-", "psp") S3method("unitname<-", "quad") S3method("unitname<-", "slrm") S3method("unitname<-", "tess") S3method("Window<-", "im") S3method("Window<-", "linnet") S3method("Window<-", "lpp") S3method("Window<-", "ppp") S3method("Window<-", "psp") # ......................................... # End of methods # ......................................... spatstat/demo/0000755000176200001440000000000013115225157013043 5ustar liggesusersspatstat/demo/spatstat.R0000755000176200001440000005411313442413652015042 0ustar liggesusersif(dev.cur() <= 1) { dd <- getOption("device") if(is.character(dd)) dd <- get(dd) dd() } oldpar <- par(ask = interactive() && dev.interactive(orNone=TRUE)) oldoptions <- options(warn=-1) fanfare <- function(stuff) { plot(c(0,1),c(0,1),type="n",axes=FALSE, xlab="", ylab="") text(0.5,0.5, stuff, cex=2.5) } par(mar=c(1,1,2,1)+0.1) fanfare("Spatstat demonstration") fanfare("I. Types of data") plot(swedishpines, main="Point pattern") plot(demopat, cols=c("green", "blue"), main="Multitype point pattern") plot(longleaf, fg="blue", main="Marked point pattern") plot(finpines, main="Point pattern with multivariate marks") a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) plot(a, main="Line segment pattern") marks(a) <- sample(letters[1:4], 20, replace=TRUE) plot(a, main="Multitype line segment pattern") marks(a) <- runif(20) plot(a, main="Marked line segment pattern") plot(owin(), main="Rectangular window") plot(letterR, main="Polygonal window") plot(as.mask(letterR), main="Binary mask window") Z <- as.im(function(x,y){ sqrt((x - 1)^2 + (y-1)^2)}, square(2)) plot(Z, main="Pixel image") X <- runifpoint(42) plot(dirichlet(X), main="Tessellation") plot(rpoispp3(100), main="Three-dimensional point pattern") plot(simplenet, main="Linear network (linnet)") X <- rpoislpp(20, simplenet) plot(X, main="Point pattern on linear network (lpp)", show.window=FALSE) V <- as.linim(function(x,y,seg,tp){x^2-y^2}, L=simplenet) plot(V, main="Pixel image on a linear network") fanfare("II. Graphics") plot(letterR, col="green", border="red", lwd=2, main="Polygonal window with colour fill") plot(letterR, hatch=TRUE, spacing=0.15, angle=30, main="Polygonal window with line shading") plot(letterR, hatch=TRUE, hatchargs=list(texture=8, spacing=0.12), main="Polygonal window with texture fill") plot(amacrine, chars=c(1,16), main="plot(X, chars = c(1,16))") plot(amacrine, cols=c("red","blue"), chars=16, main="plot(X, cols=c(\"red\", \"blue\"))") opa <- par(mfrow=c(1,2)) plot(longleaf, markscale=0.03, main="markscale=0.03") plot(longleaf, markscale=0.09, main="markscale=0.09") par(opa) plot(longleaf, pch=21, cex=1, bg=colourmap(terrain.colors(128), range=c(0,80)), main="colourmap for numeric mark values") Z <- as.im(function(x,y) { r <- sqrt(x^2+y^2); r * exp(-r) }, owin(c(-5,5),c(-5,5))) plot(Z, main="pixel image: image plot") plot(Z, main="pixel image: image plot (heat colours)", col=heat.colors(256)) plot(Z, main="pixel image: logarithmic colour map", log=TRUE, col=rainbow(128, end=5/6)) contour(Z, main="pixel image: contour plot", axes=FALSE) plot(Z, main="pixel image: image + contour plot") contour(Z, add=TRUE) persp(Z, colmap=terrain.colors(128), shade=0.3, phi=30,theta=100, main="pixel image: perspective plot") ct <- colourmap(rainbow(20), breaks=seq(-1,1,length=21)) plot(ct, main="Colour map for real numbers") ca <- colourmap(rainbow(8), inputs=letters[1:8]) plot(ca, main="Colour map for discrete values") Z <- as.im(nnfun(runifpoint(8))) plot(Z, main="colour image for discrete values") textureplot(Z, main="texture plot for discrete values") W <- owin(c(1,5),c(0,4.5)) Lout <- scaletointerval(distmap(rebound.owin(letterR, W))) Lin <- scaletointerval(distmap(complement.owin(letterR, W))) L <- scaletointerval(eval.im(Lin-Lout)) D <- scaletointerval(density(runifpoint(30, W), adjust=0.3)) X <- scaletointerval(as.im(function(x,y){ x }, W=W)) plot(listof(L=L, D=D, X=X), main="Multiple images") pairs(L, D, X, main="Multiple images: pairs plot") persp(L, colin=D, theta=-24, phi=35, box=FALSE, apron=TRUE, main="Two images:\nperspective + colours", shade=0.4, ltheta=225, lphi=10) plot(rgbim(D,X,L,maxColorValue=1), valuesAreColours=TRUE, main="Three images: RGB display") plot(hsvim(D,L,X), valuesAreColours=TRUE, main="Three images: HSV display") V <- as.linim(function(x,y,seg,tp){(y/1000)^2-(x/1000)^3}, L=domain(chicago)) plot(V, main="Pixel image on a linear network (colour plot)") plot(V, style="w", main="Pixel image on a linear network (width plot)") fanfare("III. Conversion between types") W <- as.owin(chorley) plot(W, "window W") plot(as.mask(W)) plot(as.mask(W, dimyx=1000)) plot(as.im(W, value=3)) plot(as.im(W, value=3, na.replace=0), ribbon=TRUE) plot(as.im(function(x,y) {x^2 + y}, W=square(1)), main="as.im(function(x,y){x^2+y})") V <- delaunay(runifpoint(12)) plot(V, main="Tessellation V") plot(as.im(V, dimyx=256), main="as.im(V)") plot(as.owin(V)) X <- swedishpines plot(X, "point pattern X") plot(as.im(X), col=c("white","red"), ribbon=FALSE, xlab="", ylab="") plot(as.owin(X), add=TRUE) fanfare("IV. Subsetting and splitting data") plot(X, "point pattern X") subset <- 1:20 plot(X[subset], main="subset operation: X[subset]") subwindow <- owin(poly=list(x=c(0,96,96,40,40),y=c(0,0,100,100,50))) plot(X[subwindow], main="subset operation: X[subwindow]") plot(lansing, "Lansing Woods data") plot(split(lansing), main="split operation: split(X)", mar.panel=c(0,0,2,0), hsep=1, pch=3) plot(longleaf, main="Longleaf Pines data") plot(cut(longleaf, breaks=3), main=c("cut operation", "cut(longleaf, breaks=3)")) Z <- dirichlet(runifpoint(16)) X <- runifpoint(100) plot(cut(X,Z), main="points cut by tessellation", leg.side="left") plot(Z, add=TRUE) plot(split(X, Z), main="points split by tessellation", mar.panel=c(0,0,2,2), hsep=1) W <- square(1) X <- as.im(function(x,y){sqrt(x^2+y^2)}, W) Y <- dirichlet(runifpoint(12, W)) plot(split(X,Y), main="image split by tessellation") fanfare("V. Tessellations") plot(quadrats(square(1), nx=5, ny=3)) plot(hextess(square(1), 0.07)) plot(quantess(letterR, "x", 7)) plot(polartess(letterR, nangular=6, radii=(0:4)/2, origin=c(2.8, 1.5)), do.col=TRUE) plot(delaunay(cells)) plot(cells, add=TRUE) plot(dirichlet(cells)) plot(cells, add=TRUE) plot(rpoislinetess(2.5), do.col=TRUE) fanfare("VI. Exploratory data analysis") par(mar=c(3,3,3,2)+0.1) plot(swedishpines, main="Quadrat counts", pch="+") tab <- quadratcount(swedishpines, 4) plot(tab, add=TRUE, lty=2, cex=2, col="blue") par(mar=c(5,3,3,2)+0.1) plot(swedishpines, main="", pch="+") title(main=expression(chi^2 * " test"), cex.main=2) tes <- quadrat.test(swedishpines, 3) tes plot(tes, add=TRUE, col="red", cex=1.5, lty=2, lwd=3) title(sub=paste("p-value =", signif(tes$p.value,3)), cex.sub=1.4) par(mar=c(4,4,3,2)+0.1) tesk <- cdf.test(nztrees, "x") tesk plot(tesk) mur <- lapply(murchison, rescale, s=1000) mur <- lapply(mur, "unitname<-", value="km") X <- mur$gold D <- distfun(mur$faults) plot(X, main="Murchison gold deposits", cols="blue") plot(mur$faults, add=TRUE, col="red") rh <- rhohat(X,D) plot(rh, main="Smoothed rate estimate", xlab="Distance to nearest fault (km)", legend=FALSE) plot(predict(rh), main="predict(rhohat(X,D))") Z <- density(cells, 0.07) plot(Z, main="Kernel smoothed intensity of point pattern") plot(cells, add=TRUE) plot(redwood, main="Redwood data") te <- scan.test(redwood, 0.1, method="poisson") plot(te, main=c("Scan Statistic for redwood data", paste("p-value =", signif(te$p.value,3)))) plot(redwood, add=TRUE) te X <- unique(unmark(shapley)) plot(X, "Shapley galaxy concentration", pch=".") coco <-colourmap(rev(rainbow(128, end=2/3)), range=c(0,1)) pa <- function(i, ...) { if(i == 1) list(chars=c(".", "+"), cols=1:2) else list(size=0.5, pch=16, col=coco) } plot(nnclean(X, k=17), panel.args=pa, mar.panel=c(0,1,1,0), nrows=2, main="Byers-Raftery nearest neighbour cleaning", cex.title=1.2) Y <- sharpen(X, sigma=0.5, edgecorrect=TRUE) plot(Y, main="Choi-Hall data sharpening", pch=".") owpa <- par(mfrow=c(1,2)) W <- grow.rectangle(as.rectangle(letterR), 1) X <- superimpose(runifpoint(300, letterR), runifpoint(50, W), W=W) plot(W, main="clusterset(X, 'm')") plot(clusterset(X, 'marks', fast=TRUE), add=TRUE, chars=c("o", "+"), cols=1:2) plot(letterR, add=TRUE) plot(W, main="clusterset(X, 'd')") plot(clusterset(X, 'domain', exact=FALSE), add=TRUE) plot(letterR, add=TRUE) par(owpa) D <- density(a, sigma=0.05) plot(D, main="Kernel smoothed intensity of line segment pattern") plot(a, add=TRUE) X <- runifpoint(42) plot(dirichlet(X)) plot(X, add=TRUE) plot(delaunay(X)) plot(X, add=TRUE) parsave <- par(mfrow=c(1,1), mar=0.2+c(0,1,3,1)) plot(listof("Longleaf Pines data"=longleaf, "Nearest mark"=nnmark(longleaf), "Kernel smoothing of marks"=Smooth(longleaf,10), "Inverse distance weighted\nsmoothing of marks"=idw(longleaf)), equal.scales=TRUE, halign=TRUE, valign=TRUE, main="", mar.panel=0.2+c(0,0,2,2)) par(parsave) fryplot(cells, main=c("Fry plot","cells data"), pch="+") miplot(longleaf, main="Morishita Index plot", pch=16, col="blue") plot(swedishpines, main="Swedish Pines data") K <- Kest(swedishpines) plot(K, main="K function for Swedish Pines", legendmath=TRUE) en <- envelope(swedishpines, fun=Kest, nsim=10, correction="translate") plot(en, main="Envelopes of K function based on CSR", shade=c("hi", "lo")) pc <- pcf(swedishpines) plot(pc, main="Pair correlation function") plot(swedishpines, main="nearest neighbours") m <- nnwhich(swedishpines) b <- swedishpines[m] arrows(swedishpines$x, swedishpines$y, b$x, b$y, angle=12, length=0.1, col="red") plot(swedishpines %mark% nndist(swedishpines), markscale=1, main="Stienen diagram", legend=FALSE, fg="blue") plot(Gest(swedishpines), main=c("Nearest neighbour distance function G", "Gest(swedishpines)"), legendmath=TRUE) Z <- distmap(swedishpines, dimyx=512) plot(swedishpines$window, main="Distance map") plot(Z, add=TRUE) points(swedishpines) plot(Fest(swedishpines), main=c("Empty space function F", "Fest(swedishpines)"), legendmath=TRUE) W <- rebound.owin(letterR, square(5)) plot(distmap(W), main="Distance map") plot(W, add=TRUE) a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) contour(distmap(a), main="Distance map") plot(a, add=TRUE,col="red") plot(Jest(swedishpines), main=c("J-function", "J(r)=(1-G(r))/(1-F(r))")) X <- swedishpines X <- X[sample(1:npoints(X))] Z <- nnfun(X) plot(as.owin(X), main="Nearest neighbour map") plot(Z, add=TRUE) points(X) plot(allstats(swedishpines)) Fig4b <- residualspaper$Fig4b plot(Fig4b, main="Inhomogeneous point pattern") plot(Kinhom(Fig4b), main="Inhomogeneous K-function") plot(pcfinhom(Fig4b, stoyan=0.1), main="Inhomogeneous pair correlation") plot(Ginhom(Fig4b, sigma=0.06), main="Inhomogeneous G-function") plot(Jinhom(Fig4b, sigma=0.06), main="Inhomogeneous J-function") X <- unmark(bronzefilter) plot(X, "Bronze filter data") lam <- predict(ppm(X ~x)) plot(Kscaled(X, lam), xlim=c(0, 1.5), main="Locally-scaled K function") plot(urkiola) plot(split(urkiola), cex=0.5) plot(density(split(urkiola))) contour(density(split(urkiola)), panel.begin=as.owin(urkiola)) plot(relrisk(urkiola), main="Relative risk (cross-validated)") plot(bramblecanes) br <- rescale(bramblecanes) plot(alltypes(br, "K"), mar.panel=c(4,5,2,2)+0.1) ama <- rescale(amacrine) plot(alltypes(ama, Lcross, envelope=TRUE, nsim=9), . - r ~ r, ylim=c(-25, 5)) ponderosa.extra$plotit(main="Ponderosa Pines") L <- localL(ponderosa) pL <- plot(L, lty=1, col=1, legend=FALSE, main=c("neighbourhood density functions", "for Ponderosa Pines"), cex.main=0.8) parsave <- par(mfrow=c(1,2)) ponderosa.extra$plotit() par(pty="s") plot(L, iso007 ~ r, main="point B") par(mar=0.2+c(1,1,3,1)) ponderosa.extra$plotit() L12 <- localL(ponderosa, rvalue=12) P12 <- ponderosa %mark% L12 Z12 <- Smooth(P12, sigma=5, dimyx=128) plot(Z12, col=topo.colors(128), main=c("smoothed", "neighbourhood density"), cex.main=0.8) contour(Z12, add=TRUE) points(ponderosa, pch=16, cex=0.5) plot(amacrine, main="Amacrine cells data", cex.main=0.8) par(pty="s") mkc <- markcorr(amacrine, correction="translate", method="density", kernel="epanechnikov") plot(mkc, main="Mark correlation function", legend=FALSE, cex.main=0.8) par(parsave) par(mar=0.2+c(4,4,3,1)) plot(alltypes(amacrine, markconnect), title="Mark connection functions for amacrine cells") parsave <- par(mfrow=c(1,2)) parspruce2 <- par(mar=0.2+c(0,2,2,0)) plot(spruces, cex.main=0.8, markscale=10) par(pty="s", mar=0.2+c(2,3,2,0)) plot(markcorr(spruces), main="Mark correlation", legendpos="bottomright") par(parspruce2) plot(spruces, cex.main=0.8, markscale=10) par(pty="s", mar=0.2+c(2,3,2,0)) plot(markvario(spruces), main="Mark variogram", legendpos="topright") par(parsave) plot(listof("Emark(spruces)"=Emark(spruces), "Vmark(spruces)"=Vmark(spruces)), main="Independence diagnostics", ylim.covers=0, legendpos="bottom") par3 <- par(mfrow=c(1,2)) X <- rpoispp3(100) plot(X, main="3D point pattern X") plot(K3est(X), main="K-function in 3D") plot(X, main="3D point pattern X") plot(G3est(X), main="G-function in 3D", legendpos="bottomright") par(par3) par(mfrow=c(1,3)) X <- unmark(chicago) plot(X, col="green", cols="red", pch=16, main="Chicago Street Crimes", cex.main=0.75, show.window=FALSE) plot(linearK(X, correction="none"), main="Network K-function", cex.main=0.75) plot(linearK(X, correction="Ang"), main="Corrected K-function", cex.main=0.75) par(mfrow=c(1,1)) fanfare("VII. Model-fitting") parsave <- par(mar=0.2+c(1,1,3,2)) plot(japanesepines) fit <- ppm(japanesepines ~1) print(fit) fit <- ppm(japanesepines ~polynom(x,y,2)) print(fit) plot(fit, how="image", se=FALSE, main=c("Inhomogeneous Poisson model", "fit by maximum likelihood", "Fitted intensity")) plot(fit, how="image", trend=FALSE, main=c("Standard error", "of fitted intensity")) plot(leverage(fit)) plot(influence(fit)) plot(mur$gold, main="Murchison gold deposits", cols="blue") plot(mur$faults, add=TRUE, col="red") fit <- ppm(mur$gold ~D, covariates=list(D=distfun(mur$faults))) par(mar=0.2+c(4,4,4,2)) plot(parres(fit, "D"), main="Partial residuals from loglinear Poisson model", xlab="Distance to nearest fault (km)", ylab="log intensity of gold", legend=FALSE) legend("bottomleft", legend=c("partial residual", "loglinear fit"), col=c(1,4), lty=c(1,4)) par(mar=rep(0.2, 4), mfrow=c(1,1)) fitT <- kppm(redwood ~1, clusters="Thomas") simT <- simulate(fitT)[[1]] plot(listof(redwood, simT), main.panel=c("Redwood", "simulation from\nfitted Thomas model"), main="", mar.panel=0.2, equal.scales=TRUE) mop <- par(mfrow=c(1,2), pty="s", mar=rep(4.4, 4)) plot(fitT, xname=c("Thomas model", "minimum contrast fit"), pause=FALSE) par(mop) oop <- par(pty="s", mar=0.2+c(4,4,4,2)) os <- objsurf(fitT) plot(os, main="Minimum contrast objective function", col=terrain.colors(128)) contour(os, add=TRUE) par(oop) parra <- par(mfrow=c(1,2), mar=0.2+c(3,3,4,2)) plot(swedishpines) fit <- ppm(swedishpines ~1, Strauss(r=7)) print(fit) plot(fit, how="image", main=c("Strauss model", "fit by maximum pseudolikelihood", "Conditional intensity plot")) # fitted interaction plot(swedishpines) fit <- ppm(swedishpines ~1, PairPiece(c(3,5,7,9,11,13))) plot(fitin(fit), legend=FALSE, main=c("Pairwise interaction model", "fit by maximum pseudolikelihood")) # simulation par(mfrow=c(1,1), mar=0.5+c(0,0,2,0)) Xsim <- rmh(model=fit, start=list(n.start=80), control=list(nrep=100)) plot(listof(swedishpines, Xsim), main="", main.panel=c("Swedish Pines", "Simulation from\nfitted Strauss model"), mar.panel=c(0,0,3,0),hsep=1,equal.scales=TRUE) # model compensator par(parra) par(mar=0.2+c(4,4,3,1)) plot(swedishpines) fit <- ppm(swedishpines ~1, Strauss(r=7)) plot(Kcom(fit), cbind(iso, icom, pois) ~ r, legend=FALSE, main="model compensators") legend("topleft", legend=c("empirical K function", "Strauss model compensator of K", "Poisson theoretical K"), lty=1:3, col=1:3, inset=0.05) par(parsave) # Multitype data dpat <- rescale(demopat, 8) unitname(dpat) <- c("mile", "miles") dpat plot(dpat, cols=c("red", "blue")) fit <- ppm(dpat ~marks + polynom(x,y,2), Poisson()) plot(fit, trend=TRUE, se=TRUE) # Linear network data plot(spiders) fit <- lppm(spiders ~ polynom(x,y,2)) anova(fit, test="Chi") (fit <- step(fit)) lam <- predict(fit) plot(lam, main="Point process model on linear network", ribscale=1000) plot(spiders, add=TRUE, pch=16, show.network=FALSE) fanfare("VIII. Simulation") plot(letterR, main="Poisson random points") lambda <- 10/area.owin(letterR) points(rpoispp(lambda, win=letterR)) points(rpoispp(9 * lambda, win=letterR)) points(rpoispp(90 * lambda, win=letterR)) plot(rpoispp(100)) plot(rpoispp(function(x,y){1000 * exp(-3*x)}, 1000), main="rpoispp(function)") plot(rMaternII(200, 0.05)) plot(rSSI(0.05, 200)) plot(rThomas(10, 0.2, 5)) plot(rMatClust(10, 0.05, 4)) plot(rCauchy(30, 0.01, 5)) plot(rVarGamma(30, 2, 0.02, 5)) plot(rGaussPoisson(30, 0.05, 0.5)) if(require(RandomFields) && RandomFieldsSafe()) { X <- rLGCP("exp", 4, var=0.2, scale=0.1) plot(attr(X, "Lambda"), main="log-Gaussian Cox process") plot(X, add=TRUE, pch=16) } plot(rStrauss(200, 0.3, 0.07)) plot(rDiggleGratton(200,0.03,0.08)) plot(rDGS(300, 0.05)) plot(redwood, main="random thinning - rthin()") points(rthin(redwood, 0.5), col="green", cex=1.4) plot(rcell(nx=15)) plot(rsyst(nx=5)) abline(h=(1:4)/5, lty=2) abline(v=(1:4)/5, lty=2) plot(rstrat(nx=5)) abline(h=(1:4)/5, lty=2) abline(v=(1:4)/5, lty=2) X <- rsyst(nx=10) plot(rjitter(X, 0.02)) Xg <- rmh(list(cif="geyer", par=list(beta=1.25, gamma=1.6, r=0.2, sat=4.5), w=c(0,10,0,10)), control=list(nrep=1e4), start=list(n.start=200)) plot(Xg, main=paste("Geyer saturation process\n", "rmh() with cif=\"geyer\"")) L <- as.psp(matrix(runif(20), 5, 4), window=square(1)) plot(L, main="runifpointOnLines(30, L)") plot(runifpointOnLines(30, L), add=TRUE, pch="+") plot(L, main="rpoisppOnLines(3, L)") plot(rpoisppOnLines(3, L), add=TRUE, pch="+") plot(runiflpp(20, simplenet)) plot(rpoislpp(5, simplenet)) plot(rpoisline(10)) plot(rlinegrid(30, 0.1)) spatstat.options(npixel=256) X <- dirichlet(runifpoint(30)) plot(rMosaicSet(X, 0.4), col="green", border=NA) plot(X, add=TRUE) plot(rMosaicField(X, runif)) plot(rMosaicSet(rpoislinetess(3), 0.5), col="green", border=NA, main="Switzer's random set") spatstat.options(npixel=100) plot(Halton(512, c(2,3)), main="quasirandom pattern") plot(Halton(16384, c(2,3)), main="quasirandom pattern", pch=".") fanfare("IX. Geometry") A <- letterR B <- shift(letterR, c(0.2,0.1)) plot(boundingbox(A,B), main="shift", type="n") plot(A, add=TRUE) plot(B, add=TRUE, border="red") B <- rotate(letterR, 0.2) plot(boundingbox(A,B), main="rotate", type="n") plot(A, add=TRUE) plot(B, add=TRUE, border="red") mat <- matrix(c(1.1, 0, 0.3, 1), 2, 2) B <- affine(letterR, mat=mat, vec=c(0.2,-0.1)) plot(boundingbox(A,B), main="affine", type="n") plot(A, add=TRUE) plot(B, add=TRUE, border="red") par1x2 <- par(mfrow=c(1,2)) L <- rpoisline(10, owin(c(1.5,4.5),c(0.2,3.6))) plot(L, main="Line segment pattern") plot(L$window, main="L[window]", type="n") plot(L[letterR], add=TRUE) plot(letterR, add=TRUE, border="red") par(par1x2) a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) plot(a, main="Self-crossing points") plot(selfcrossing.psp(a), add=TRUE, col="red") a <- as.psp(matrix(runif(20), 5, 4), window=square(1)) b <- rstrat(square(1), 5) plot(a, lwd=3, col="green", main="project points to segments") plot(b, add=TRUE, col="red", pch=16) v <- project2segment(b, a) Xproj <- v$Xproj plot(Xproj, add=TRUE, pch=16) arrows(b$x, b$y, Xproj$x, Xproj$y, angle=10, length=0.15, col="red") plot(a, main="pointsOnLines(L)") plot(pointsOnLines(a, np=100), add=TRUE, pch="+") parry <- par(mfrow=c(1,3), mar=0.3+c(1,1,3,1)) X <- tess(xgrid=seq(2, 4, length=10), ygrid=seq(0, 3.5, length=8)) plot(X, cex.main=0.75) plot(letterR, cex.main=0.75) plot(intersect.tess(X, letterR), cex.main=0.75) X <- dirichlet(runifpoint(10)) plot(X) L <- infline(0.3,0.5) plot(owin(), main="L", cex.main=0.75) plot(L, col="red", lwd=2, cex.main=0.75) plot(chop.tess(X,L), cex.main=0.75) par(parry) W <- chorley$window plot(W, main="simplify.owin") WS <- simplify.owin(W, 2) plot(WS, add=TRUE, border="green") nopa <- par(mfrow=c(2,2)) Rbox <- grow.rectangle(as.rectangle(letterR), 0.3) v <- erode.owin(letterR, 0.25) plot(Rbox, type="n", main="erode.owin", cex.main=0.75) plot(letterR, add=TRUE, col="red", cex.main=0.75) plot(v, add=TRUE, col="blue") v <- dilate.owin(letterR, 0.25) plot(Rbox, type="n", main="dilate.owin", cex.main=0.75) plot(v, add=TRUE, col="blue") plot(letterR, add=TRUE, col="red") v <- closing.owin(letterR, 0.3) plot(Rbox, type="n", main="closing.owin", cex.main=0.75) plot(v, add=TRUE, col="blue") plot(letterR, add=TRUE, col="red") v <- opening.owin(letterR, 0.3) plot(Rbox, type="n", main="opening.owin", cex.main=0.75) plot(letterR, add=TRUE, col="red") plot(v, add=TRUE, col="blue") par(nopa) laslett(heather$fine, main="Laslett's Transform") fanfare("X. Operations on pixel images") Z <- distmap(swedishpines, dimyx=512) plot(Z, main="An image Z") plot(levelset(Z, 4)) plot(cut(Z, 5)) plot(eval.im(sqrt(Z) - 3)) plot(solutionset(abs(Z - 6) <= 1)) nopa <- par(mfrow=c(1,2)) plot(Z) segments(0,0,96,100,lwd=2) plot(transect.im(Z)) par(nopa) d <- distmap(cells, dimyx=256) W <- levelset(d, 0.06) nopa <- par(mfrow=c(1,2)) plot(W) plot(connected(W)) par(nopa) Z <- as.im(function(x,y) { 4 * x^2 + 3 * y }, letterR) plot(Z) plot(letterR, add=TRUE) plot(blur(Z, 0.3, bleed=TRUE)) plot(letterR, add=TRUE) plot(blur(Z, 0.3, bleed=FALSE)) plot(letterR, add=TRUE) plot(blur(Z, 0.3, bleed=FALSE)) plot(letterR, add=TRUE) fanfare("XI. Programming tools") showoffK <- function(Y, current, ..., fullpicture,rad) { plot(fullpicture, main=c("Animation using `applynbd'", "explaining the K function")) points(Y, cex=2) u <- current points(u[1],u[2],pch="+",cex=3) theta <- seq(0,2*pi,length=100) polygon(u[1]+ rad * cos(theta),u[2]+rad*sin(theta)) text(u[1]+rad/3,u[2]+rad/2,Y$n,cex=3) if(runif(1) < 0.2) Sys.sleep(runif(1, max=0.4)) return(npoints(Y)) } par(ask=FALSE) applynbd(redwood, R=0.2, showoffK, fullpicture=redwood, rad=0.2, exclude=TRUE) par(oldpar) options(oldoptions) spatstat/demo/data.R0000755000176200001440000000614013115273007014100 0ustar liggesusersif(dev.cur() <= 1) { dd <- getOption("device") if(is.character(dd)) dd <- get(dd) dd() } oldpar <- par(ask = interactive() && dev.interactive(orNone=TRUE)) oldoptions <- options(warn=-1) plot(amacrine) plot(anemones, markscale=1) ants.extra$plotit() plot(austates) plot(bei.extra$elev, main="Beilschmiedia") plot(bei, add=TRUE, pch=16, cex=0.3) plot(betacells) plot(bramblecanes, cols=1:3) plot(split(bramblecanes)) plot(bronzefilter,markscale=2) plot(cells) plot(chicago, main="Chicago Street Crimes", col="grey", cols=c("red", "blue", "black", "blue", "red", "blue", "blue"), chars=c(16,2,22,17,24,15,6), leg.side="left", show.window=FALSE) chorley.extra$plotit() plot(clmfires, which.marks="cause", cols=2:5, cex=0.25, main="Castilla-La Mancha forest fires") plot(clmfires.extra$clmcov200, main="Covariates for forest fires") plot(copper$Points, main="Copper") plot(copper$Lines, add=TRUE) plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) }), parargs=list(mar=rep(1,4))) plot(dendrite, leg.side="bottom", main="", cex=0.75, cols=2:4) plot(demopat) plot(finpines, main="Finnish pines") wildM1 <- with(flu, virustype == "wt" & stain == "M2-M1") plot(flu[wildM1, 1, drop=TRUE], main=c("flu data", "wild type virus, M2-M1 stain"), chars=c(16,3), cex=0.4, cols=2:3) plot(gordon, main="People in Gordon Square", pch=16) plot(gorillas, which.marks=1, chars=c(1,3), cols=2:3, main="Gorilla nest sites") plot(hamster, cols=c(2,4)) plot(heather) plot(humberside) plot(hyytiala, cols=2:5) plot(japanesepines) plot(lansing) plot(split(lansing)) plot(longleaf) plot(mucosa, chars=c(1,3), cols=c("red", "green")) plot(mucosa.subwin, add=TRUE, lty=3) plot(murchison, main="Murchison data") plot(murchison$greenstone, main="Murchison data", col="lightgreen") plot(murchison$gold, add=TRUE, pch=3, col="blue") plot(murchison$faults, add=TRUE, col="red") plot(nbfires, use.marks=FALSE, pch=".") plot(split(nbfires), use.marks=FALSE, chars=".") plot(split(nbfires)$"2000", which.marks="fire.type", main=c("New Brunswick fires 2000", "by fire type"), cols=c("blue", "green", "red", "cyan"), leg.side="left") plot(nztrees) plot(trim.rectangle(as.owin(nztrees), c(0,5), 0), add=TRUE, lty=3) plot(osteo[1:10,], tick.marks=FALSE, xlab="", ylab="", zlab="") plot(paracou, cols=2:3, chars=c(16,3)) ponderosa.extra$plotit() pyr <- pyramidal pyr$grp <- abbreviate(pyramidal$group, minlength=7) plot(pyr, quote(plot(Neurons, pch=16, main=grp)), main="Pyramidal Neurons") rm(pyr) plot(redwood) plot(redwood3, add=TRUE, pch=20) redwoodfull.extra$plotit() plot(residualspaper$Fig1) plot(residualspaper$Fig4a) plot(residualspaper$Fig4b) plot(residualspaper$Fig4c) shapley.extra$plotit(main="Shapley") plot(simdat) plot(spiders, pch=16, show.window=FALSE) plot(sporophores, chars=c(16,1,2), cex=0.6) points(0,0,pch=16, cex=2) text(15,8,"Tree", cex=0.75) plot(spruces, maxsize=min(nndist(spruces))) plot(swedishpines) plot(urkiola, cex=0.5, cols=2:3) plot(waka, markscale=0.04, main=c("Waka national park", "tree diameters")) plot(waterstriders) par(oldpar) options(oldoptions) spatstat/demo/sumfun.R0000644000176200001440000000775213421320306014506 0ustar liggesusers## demonstration of all summary functions opa <- par(mfrow=c(1,1), mar=c(0,0,1,0)+0.2) ## Ripley's K-function plot(swedishpines) par(mar=c(4,4,2,1)+0.2) plot(Kest(swedishpines)) ## Besag's transformation plot(Lest(swedishpines)) ## pair correlation function plot(pcf(swedishpines)) par(mfrow=c(2,3), mar=c(0,0,1,0)+0.2) ## Showing the utility of the K-function plot(cells) plot(nztrees) plot(redwood) par(mar=c(4,4,2,1)+0.2) plot(Kest(cells)) plot(Kest(nztrees)) plot(Kest(redwood)) ## Showing the utility of the pair correlation function par(mar=c(0,0,1,0)+0.2) plot(cells) plot(nztrees) plot(redwood) par(mar=c(4,4,2,1)+0.2) plot(pcf(cells)) plot(pcf(nztrees)) plot(pcf(redwood)) ## par(mfrow=c(1,1)) ## Analogues for inhomogeneous patterns ## Reweighted K-function plot(japanesepines) fit <- ppm(japanesepines, ~polynom(x,y,2)) plot(predict(fit)) plot(Kinhom(japanesepines, fit)) plot(pcfinhom(japanesepines, fit)) plot(Linhom(japanesepines)) ## Rescaled K-function plot(unmark(bronzefilter)) plot(Kscaled(bronzefilter)) fit <- ppm(unmark(bronzefilter), ~x) plot(predict(fit)) plot(unmark(bronzefilter), add=TRUE) plot(Kscaled(bronzefilter, fit)) plot(Lscaled(bronzefilter, fit)) ## Local indicators of spatial association plot(localL(swedishpines)) plot(localK(swedishpines)) ## anisotropic plot(Ksector(redwood, 0, 90)) plot(Rf <- pairorient(redwood, 0.05, 0.15)) rose(Rf, main="Rose diagram of pair orientation distribution") plot(deriv(Rf, spar=0.6, Dperiodic=TRUE)) rose(nnorient(redwood)) ## par(mfrow=c(2,3), mar=rep(0.2, 4)) ## Empty space function F plot(cells) plot(nztrees) plot(redwood) par(mar=c(4,4,2,1)+0.2) plot(Fest(cells)) plot(Fest(nztrees)) plot(Fest(redwood)) ## Nearest neighbour distance function G par(mar=rep(0.2, 4)) plot(cells) plot(nztrees) plot(redwood) par(mar=c(4,4,2,1)+0.2) plot(Gest(cells)) plot(Gest(nztrees)) plot(Gest(redwood)) ## J-function par(mar=rep(0.2, 4)) plot(cells) plot(nztrees) plot(redwood) par(mar=c(4,4,2,1)+0.2) plot(Jest(cells)) plot(Jest(nztrees)) plot(Jest(redwood)) par(mfrow=c(1,1), mar=c(4,4,2,1)+0.2) ## versions for inhomogeneous patterns plot(Finhom(japanesepines)) plot(Ginhom(japanesepines)) plot(Jinhom(japanesepines)) ## Display F,G,J,K plot(allstats(swedishpines)) ## Multitype patterns plot(amacrine) plot(Kcross(amacrine)) plot(Kdot(amacrine)) I <- (marks(amacrine) == "on") J <- (marks(amacrine) == "off") plot(Kmulti(amacrine, I, J)) plot(alltypes(amacrine, "K")) plot(Lcross(amacrine)) plot(Ldot(amacrine)) plot(pcfcross(amacrine)) plot(pcfdot(amacrine)) plot(pcfmulti(amacrine, I, J)) plot(Gcross(amacrine)) plot(Gdot(amacrine)) plot(Gmulti(amacrine, I, J)) plot(alltypes(amacrine, "G")) plot(Jcross(amacrine)) plot(Jdot(amacrine)) plot(Jmulti(amacrine,I,J)) plot(alltypes(amacrine, "J")) plot(alltypes(amacrine, "F")) plot(Iest(amacrine)) plot(markconnect(amacrine)) ## Multitype, inhomogeneous plot(Kcross.inhom(amacrine)) plot(Kdot.inhom(amacrine)) plot(Kmulti.inhom(amacrine, I, J)) plot(Lcross.inhom(amacrine)) plot(Ldot.inhom(amacrine)) plot(pcfcross.inhom(amacrine)) plot(pcfdot.inhom(amacrine)) plot(pcfmulti.inhom(amacrine, I, J)) ## Numerical marks plot(markcorr(longleaf)) plot(markvario(longleaf)) plot(Emark(longleaf)) plot(Vmark(longleaf)) ## Linear networks plot(chicago) plot(linearK(chicago)) plot(linearKcross(chicago)) plot(linearKdot(chicago)) plot(linearpcf(chicago)) plot(linearpcfcross(chicago)) plot(linearpcfdot(chicago)) lam <- rep(intensity(unmark(chicago)), npoints(chicago)) A <- split(chicago)$assault B <- split(chicago)$burglary lamA <- rep(intensity(A), npoints(A)) lamB <- rep(intensity(B), npoints(B)) plot(linearKinhom(chicago, lam)) plot(linearKcross.inhom(chicago, "assault", "burglary", lamA, lamB)) plot(linearKdot.inhom(chicago, "assault", lamA, lam)) plot(linearpcfinhom(chicago, lam)) plot(linearpcfcross.inhom(chicago, "assault", "burglary", lamA, lamB)) plot(linearpcfdot.inhom(chicago, "assault", lamA, lam)) plot(linearmarkconnect(chicago)) plot(linearmarkequal(chicago)) rm(I,J,fit) par(opa) spatstat/demo/00Index0000755000176200001440000000032413115273007014174 0ustar liggesusersspatstat Demonstration of spatstat library diagnose Demonstration of diagnostic capabilities for models in spatstat data Datasets in spatstat sumfun Demonstration of nonparametric summary functions in spatstat spatstat/demo/diagnose.R0000755000176200001440000001230013115273007014753 0ustar liggesusersif(dev.cur() <= 1) { dd <- getOption("device") if(is.character(dd)) dd <- get(dd) dd() } oldpar <- par(ask = interactive() && (.Device %in% c("X11", "GTK", "windows", "Macintosh"))) par(mfrow=c(1,1)) oldoptions <- options(warn = -1) # ####################################################### # X <- rpoispp(function(x,y) { 1000 * exp(- 4 * x)}, 1000) plot(X, main="Inhomogeneous Poisson pattern") fit.hom <- ppm(X ~1, Poisson()) fit.inhom <- ppm(X ~x, Poisson()) diagnose.ppm(fit.inhom, which="marks", type="Pearson", main=c("Mark plot", "Circles for positive residual mass", "Colour for negative residual density")) par(mfrow=c(1,2)) diagnose.ppm(fit.hom, which="marks", main=c("Wrong model", "(homogeneous Poisson)", "raw residuals")) diagnose.ppm(fit.inhom, which="marks", main=c("Right model", "(inhomogeneous Poisson)", "raw residuals")) par(mfrow=c(1,1)) diagnose.ppm(fit.inhom, which="smooth", main="Smoothed residual field") par(mfrow=c(1,2)) diagnose.ppm(fit.hom, which="smooth", main=c("Wrong model", "(homogeneous Poisson)", "Smoothed residual field")) diagnose.ppm(fit.inhom, which="smooth", main=c("Right model", "(inhomogeneous Poisson)", "Smoothed residual field")) par(mfrow=c(1,1)) diagnose.ppm(fit.inhom, which="x") par(mfrow=c(1,2)) diagnose.ppm(fit.hom, which="x", main=c("Wrong model", "(homogeneous Poisson)", "lurking variable plot for x")) diagnose.ppm(fit.inhom, which="x", main=c("Right model", "(inhomogeneous Poisson)", "lurking variable plot for x")) par(mfrow=c(1,1)) diagnose.ppm(fit.hom, type="Pearson",main="standard diagnostic plots") par(mfrow=c(1,2)) diagnose.ppm(fit.hom, main=c("Wrong model", "(homogeneous Poisson)")) diagnose.ppm(fit.inhom, main=c("Right model", "(inhomogeneous Poisson)")) par(mfrow=c(1,1)) # ####################################################### # LEVERAGE/INFLUENCE plot(leverage(fit.inhom)) plot(influence(fit.inhom)) plot(dfbetas(fit.inhom)) # ####################################################### # COMPENSATORS ## Takes a long time... CF <- compareFit(listof(hom=fit.hom, inhom=fit.inhom), Kcom, same="iso", different="icom") plot(CF, main="model compensators", legend=FALSE) legend("topleft", legend=c("empirical K function", "compensator of CSR", "compensator of inhomogeneous Poisson"), lty=1:3, col=1:3) # ####################################################### # Q - Q PLOTS # qqplot.ppm(fit.hom, 40) #conclusion: homogeneous Poisson model is not correct title(main="Q-Q plot of smoothed residuals") qqplot.ppm(fit.inhom, 40) # TAKES A WHILE... title(main=c("Right model", "(inhomogeneous Poisson)", "Q-Q plot of smoothed residuals")) # conclusion: fitted inhomogeneous Poisson model looks OK # ####################################################### # plot(cells) fitPoisson <- ppm(cells ~1, Poisson()) diagnose.ppm(fitPoisson, main=c("CSR fitted to cells data", "Raw residuals", "No suggestion of departure from CSR")) diagnose.ppm(fitPoisson, type="pearson", main=c("CSR fitted to cells data", "Pearson residuals", "No suggestion of departure from CSR")) # These diagnostic plots do NOT show evidence of departure from uniform Poisson plot(Kcom(fitPoisson), cbind(iso, icom) ~ r) plot(Gcom(fitPoisson), cbind(han, hcom) ~ r) # K compensator DOES show strong evidence of departure from uniform Poisson qqplot.ppm(fitPoisson, 40) title(main=c("CSR fitted to cells data", "Q-Q plot of smoothed raw residuals", "Strong suggestion of departure from CSR")) # Q-Q plot DOES show strong evidence of departure from uniform Poisson. # fitStrauss <- ppm(cells ~1, Strauss(r=0.1)) diagnose.ppm(fitStrauss, main=c("Strauss model fitted to cells data", "Raw residuals")) diagnose.ppm(fitStrauss, type="pearson", main=c("Strauss model fitted to cells data", "Pearson residuals")) plot(Kcom(fitStrauss), cbind(iso, icom) ~ r) plot(Gcom(fitStrauss), cbind(han, hcom) ~ r) # next line takes a LOOONG time ... qqplot.ppm(fitStrauss, 40, type="pearson") title(main=c("Strauss model fitted to cells data", "Q-Q plot of smoothed Pearson residuals", "Suggests adequate fit")) # Conclusion: Strauss model seems OK # ####################################################### # plot(nztrees) fit <- ppm(nztrees ~1, Poisson()) diagnose.ppm(fit, type="pearson") title(main=c("CSR fitted to NZ trees", "Pearson residuals")) diagnose.ppm(fit, type="pearson", cumulative=FALSE) title(main=c("CSR fitted to NZ trees", "Pearson residuals (non-cumulative)")) lurking(fit, expression(x), type="pearson", cumulative=FALSE, splineargs=list(spar=0.3)) # Sharp peak at right is suspicious qqplot.ppm(fit, 40, type="pearson") title(main=c("CSR fitted to NZ trees", "Q-Q plot of smoothed Pearson residuals")) # Slight suggestion of departure from Poisson at top right of pattern. par(oldpar) options(oldoptions) spatstat/man/0000755000176200001440000000000013616220274012673 5ustar liggesusersspatstat/man/as.ppm.Rd0000644000176200001440000000365013333543262014365 0ustar liggesusers\name{as.ppm} \alias{as.ppm} \alias{as.ppm.ppm} \alias{as.ppm.profilepl} \alias{as.ppm.kppm} \alias{as.ppm.dppm} \title{Extract Fitted Point Process Model} \description{ Extracts the fitted point process model from some kind of fitted model. } \usage{ as.ppm(object) \method{as.ppm}{ppm}(object) \method{as.ppm}{profilepl}(object) \method{as.ppm}{kppm}(object) \method{as.ppm}{dppm}(object) } \arguments{ \item{object}{An object that includes a fitted Poisson or Gibbs point process model. An object of class \code{"ppm"}, \code{"profilepl"}, \code{"kppm"} or \code{"dppm"} or possibly other classes. } } \details{ The function \code{as.ppm} extracts the fitted point process model (of class \code{"ppm"}) from a suitable object. The function \code{as.ppm} is generic, with methods for the classes \code{"ppm"}, \code{"profilepl"}, \code{"kppm"} and \code{"dppm"}, and possibly for other classes. For the class \code{"profilepl"} of models fitted by maximum profile pseudolikelihood, the method \code{as.ppm.profilepl} extracts the fitted point process model (with the optimal values of the irregular parameters). For the class \code{"kppm"} of models fitted by minimum contrast (or Palm or composite likelihood) using Waagepetersen's two-step estimation procedure (see \code{\link{kppm}}), the method \code{as.ppm.kppm} extracts the Poisson point process model that is fitted in the first stage of the procedure. The behaviour for the class \code{"dppm"} is analogous to the \code{"kppm"} case above. } \value{ An object of class \code{"ppm"}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{ppm}}, \code{\link{profilepl}}. } \examples{ # fit a model by profile maximum pseudolikelihood rvals <- data.frame(r=(1:10)/100) pfit <- profilepl(rvals, Strauss, cells, ~1) # extract the fitted model fit <- as.ppm(pfit) } \keyword{spatial} \keyword{models} spatstat/man/plot.envelope.Rd0000644000176200001440000000273113333543264015762 0ustar liggesusers\name{plot.envelope} \alias{plot.envelope} \title{Plot a Simulation Envelope} \description{ Plot method for the class \code{"envelope"}. } \usage{ \method{plot}{envelope}(x, \dots, main) } \arguments{ \item{x}{ An object of class \code{"envelope"}, containing the variables to be plotted or variables from which the plotting coordinates can be computed. } \item{main}{Main title for plot.} \item{\dots}{ Extra arguments passed to \code{\link{plot.fv}}. } } \value{ Either \code{NULL}, or a data frame giving the meaning of the different line types and colours. } \details{ This is the \code{plot} method for the class \code{"envelope"} of simulation envelopes. Objects of this class are created by the command \code{\link{envelope}}. This plot method is currently identical to \code{\link{plot.fv}}. Its default behaviour is to shade the region between the upper and lower envelopes in a light grey colour. To suppress the shading and plot the upper and lower envelopes as curves, set \code{shade=NULL}. To change the colour of the shading, use the argument \code{shadecol} which is passed to \code{\link{plot.fv}}. See \code{\link{plot.fv}} for further information on how to control the plot. } \examples{ data(cells) E <- envelope(cells, Kest, nsim=19) plot(E) plot(E, sqrt(./pi) ~ r) } \seealso{ \code{\link{envelope}}, \code{\link{plot.fv}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/reduced.sample.Rd0000644000176200001440000000640013333543264016060 0ustar liggesusers\name{reduced.sample} \alias{reduced.sample} \title{Reduced Sample Estimator using Histogram Data} \description{ Compute the Reduced Sample estimator of a survival time distribution function, from histogram data } \usage{ reduced.sample(nco, cen, ncc, show=FALSE, uppercen=0) } \arguments{ \item{nco}{vector of counts giving the histogram of uncensored observations (those survival times that are less than or equal to the censoring time) } \item{cen}{vector of counts giving the histogram of censoring times } \item{ncc}{vector of counts giving the histogram of censoring times for the uncensored observations only } \item{uppercen}{ number of censoring times greater than the rightmost histogram breakpoint (if there are any) } \item{show}{Logical value controlling the amount of detail returned by the function value (see below) } } \value{ If \code{show = FALSE}, a numeric vector giving the values of the reduced sample estimator. If \code{show=TRUE}, a list with three components which are vectors of equal length, \item{rs}{Reduced sample estimate of the survival time c.d.f. \eqn{F(t)} } \item{numerator}{numerator of the reduced sample estimator } \item{denominator}{denominator of the reduced sample estimator } } \details{ This function is needed mainly for internal use in \pkg{spatstat}, but may be useful in other applications where you want to form the reduced sample estimator from a huge dataset. Suppose \eqn{T_i}{T[i]} are the survival times of individuals \eqn{i=1,\ldots,M} with unknown distribution function \eqn{F(t)} which we wish to estimate. Suppose these times are right-censored by random censoring times \eqn{C_i}{C[i]}. Thus the observations consist of right-censored survival times \eqn{\tilde T_i = \min(T_i,C_i)}{T*[i] = min(T[i],C[i])} and non-censoring indicators \eqn{D_i = 1\{T_i \le C_i\}}{D[i] = 1(T[i] <= C[i])} for each \eqn{i}. If the number of observations \eqn{M} is large, it is efficient to use histograms. Form the histogram \code{cen} of all censoring times \eqn{C_i}{C[i]}. That is, \code{obs[k]} counts the number of values \eqn{C_i}{C[i]} in the interval \code{(breaks[k],breaks[k+1]]} for \eqn{k > 1} and \code{[breaks[1],breaks[2]]} for \eqn{k = 1}. Also form the histogram \code{nco} of all uncensored times, i.e. those \eqn{\tilde T_i}{T*[i]} such that \eqn{D_i=1}{D[i]=1}, and the histogram of all censoring times for which the survival time is uncensored, i.e. those \eqn{C_i}{C[i]} such that \eqn{D_i=1}{D[i]=1}. These three histograms are the arguments passed to \code{kaplan.meier}. The return value \code{rs} is the reduced-sample estimator of the distribution function \eqn{F(t)}. Specifically, \code{rs[k]} is the reduced sample estimate of \code{F(breaks[k+1])}. The value is exact, i.e. the use of histograms does not introduce any approximation error. Note that, for the results to be valid, either the histogram breaks must span the censoring times, or the number of censoring times that do not fall in a histogram cell must have been counted in \code{uppercen}. } \seealso{ \code{\link{kaplan.meier}}, \code{\link{km.rs}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/rMaternII.Rd0000644000176200001440000000532313571674202015023 0ustar liggesusers\name{rMaternII} \alias{rMaternII} \title{Simulate Matern Model II} \description{ Generate a random point pattern, a simulated realisation of the \Matern Model II inhibition process. } \usage{ rMaternII(kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE, ..., nsim=1, drop=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of proposal points. A single positive number. } \item{r}{ Inhibition distance. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. Alternatively a higher-dimensional box of class \code{"box3"} or \code{"boxx"}. } \item{stationary}{ Logical. Whether to start with a stationary process of proposal points (\code{stationary=TRUE}) or to generate the proposal points only inside the window (\code{stationary=FALSE}). } \item{\dots}{Ignored.} \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Each point pattern is normally an object of class \code{"ppp"}, but may be of class \code{"pp3"} or \code{"ppx"} depending on the window. } \details{ This algorithm generates one or more realisations of \Matern's Model II inhibition process inside the window \code{win}. The process is constructed by first generating a uniform Poisson point process of ``proposal'' points with intensity \code{kappa}. If \code{stationary = TRUE} (the default), the proposal points are generated in a window larger than \code{win} that effectively means the proposals are stationary. If \code{stationary=FALSE} then the proposal points are only generated inside the window \code{win}. Then each proposal point is marked by an ``arrival time'', a number uniformly distributed in \eqn{[0,1]} independently of other variables. A proposal point is deleted if it lies within \code{r} units' distance of another proposal point \emph{that has an earlier arrival time}. Otherwise it is retained. The retained points constitute \Matern's Model II. The difference between \Matern's Model I and II is the italicised statement above. Model II has a higher intensity for the same parameter values. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}}, \code{\link{rMaternI}} } \examples{ X <- rMaternII(20, 0.05) Y <- rMaternII(20, 0.05, stationary=FALSE) } \author{ \adrian , Ute Hahn, \rolf and \ege } \keyword{spatial} \keyword{datagen} spatstat/man/anyNA.im.Rd0000644000176200001440000000167313333543262014604 0ustar liggesusers\name{anyNA.im} \alias{anyNA.im} \title{ Check Whether Image Contains NA Values } \description{ Checks whether any pixel values in a pixel image are \code{NA} (meaning that the pixel lies outside the domain of definition of the image). } \usage{ \method{anyNA}{im}(x, recursive = FALSE) } \arguments{ \item{x}{ A pixel image (object of class \code{"im"}). } \item{recursive}{ Ignored. } } \details{ The function \code{\link{anyNA}} is generic: \code{anyNA(x)} is a faster alternative to \code{any(is.na(x))}. This function \code{anyNA.im} is a method for the generic \code{anyNA} defined for pixel images. It returns the value \code{TRUE} if any of the pixel values in \code{x} are \code{NA}, and and otherwise returns \code{FALSE}. } \value{ A single logical value. } \author{ \spatstatAuthors. } \seealso{ \code{\link{im.object}} } \examples{ anyNA(as.im(letterR)) } \keyword{spatial} \keyword{methods} spatstat/man/pcf.ppp.Rd0000644000176200001440000002376313430740476014550 0ustar liggesusers\name{pcf.ppp} \alias{pcf.ppp} \title{Pair Correlation Function of Point Pattern} \description{ Estimates the pair correlation function of a point pattern using kernel methods. } \usage{ \method{pcf}{ppp}(X, \dots, r = NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), divisor = c("r", "d"), var.approx = FALSE, domain=NULL, ratio=FALSE, close=NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. Either a single numeric value, or a character string specifying a bandwidth selection rule recognised by \code{\link{density.default}}. If \code{bw} is missing or \code{NULL}, the default value is computed using Stoyan's rule of thumb: see Details. } \item{\dots}{ Other arguments passed to the kernel density estimation function \code{\link{density.default}}. } \item{stoyan}{ Coefficient for Stoyan's bandwidth selection rule; see Details. } \item{correction}{ Edge correction. A character vector specifying the choice (or choices) of edge correction. See Details. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See Details. } \item{var.approx}{ Logical value indicating whether to compute an analytic approximation to the variance of the estimated pair correlation. } \item{domain}{ Optional. Calculations will be restricted to this subset of the window. See Details. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{close}{ Advanced use only. Precomputed data. See section on Advanced Use. } } \value{ A function value table (object of class \code{"fv"}). Essentially a data frame containing the variables \item{r}{the vector of values of the argument \eqn{r} at which the pair correlation function \eqn{g(r)} has been estimated } \item{theo}{vector of values equal to 1, the theoretical value of \eqn{g(r)} for the Poisson process } \item{trans}{vector of values of \eqn{g(r)} estimated by translation correction } \item{iso}{vector of values of \eqn{g(r)} estimated by Ripley isotropic correction } \item{v}{vector of approximate values of the variance of the estimate of \eqn{g(r)} } as required. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{g(r)}. The return value also has an attribute \code{"bw"} giving the smoothing bandwidth that was used. } \details{ The pair correlation function \eqn{g(r)} is a summary of the dependence between points in a spatial point process. The best intuitive interpretation is the following: the probability \eqn{p(r)} of finding two points at locations \eqn{x} and \eqn{y} separated by a distance \eqn{r} is equal to \deqn{ p(r) = \lambda^2 g(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda^2 * g(r) dx dy } where \eqn{\lambda}{lambda} is the intensity of the point process. For a completely random (uniform Poisson) process, \eqn{p(r) = \lambda^2 \,{\rm d}x \, {\rm d}y}{p(r) = lambda^2 dx dy} so \eqn{g(r) = 1}. Formally, the pair correlation function of a stationary point process is defined by \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the reduced second moment function (aka ``Ripley's \eqn{K} function'') of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. For a stationary Poisson process, the pair correlation function is identically equal to 1. Values \eqn{g(r) < 1} suggest inhibition between points; values greater than 1 suggest clustering. This routine computes an estimate of \eqn{g(r)} by kernel smoothing. \itemize{ \item If \code{divisor="r"} (the default), then the standard kernel estimator (Stoyan and Stoyan, 1994, pages 284--285) is used. By default, the recommendations of Stoyan and Stoyan (1994) are followed exactly. \item If \code{divisor="d"} then a modified estimator is used: the contribution from an interpoint distance \eqn{d_{ij}}{d[ij]} to the estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]} instead of dividing by \eqn{r}. This usually improves the bias of the estimator when \eqn{r} is close to zero. } There is also a choice of spatial edge corrections (which are needed to avoid bias due to edge effects associated with the boundary of the spatial window): \itemize{ \item If \code{correction="translate"} or \code{correction="translation"} then the translation correction is used. For \code{divisor="r"} the translation-corrected estimate is given in equation (15.15), page 284 of Stoyan and Stoyan (1994). \item If \code{correction="Ripley"} or \code{correction="isotropic"} then Ripley's isotropic edge correction is used. For \code{divisor="r"} the isotropic-corrected estimate is given in equation (15.18), page 285 of Stoyan and Stoyan (1994). \item If \code{correction="none"} then no edge correction is used, that is, an uncorrected estimate is computed. } Multiple corrections can be selected. The default is \code{correction=c("translate", "Ripley")}. Alternatively \code{correction="all"} selects all options; \code{correction="best"} selects the option which has the best statistical performance; \code{correction="good"} selects the option which is the best compromise between statistical performance and speed of computation. The choice of smoothing kernel is controlled by the argument \code{kernel} which is passed to \code{\link{density.default}}. The default is the Epanechnikov kernel, recommended by Stoyan and Stoyan (1994, page 285). The bandwidth of the smoothing kernel can be controlled by the argument \code{bw}. Its precise interpretation is explained in the documentation for \code{\link{density.default}}. For the Epanechnikov kernel, the argument \code{bw} is equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}. Stoyan and Stoyan (1994, page 285) recommend using the Epanechnikov kernel with support \eqn{[-h,h]} chosen by the rule of thumn \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)}, where \eqn{\lambda}{lambda} is the (estimated) intensity of the point process, and \eqn{c} is a constant in the range from 0.1 to 0.2. See equation (15.16). If \code{bw} is missing or \code{NULL}, then this rule of thumb will be applied. The argument \code{stoyan} determines the value of \eqn{c}. The smoothing bandwidth that was used in the calculation is returned as an attribute of the final result. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. If it is specified, \code{r} must be a vector of increasing numbers starting from \code{r[1] = 0}, and \code{max(r)} must not exceed half the diameter of the window. If the argument \code{domain} is given, estimation will be restricted to this region. That is, the estimate of \eqn{g(r)} will be based on pairs of points in which the first point lies inside \code{domain} and the second point is unrestricted. The argument \code{domain} should be a window (object of class \code{"owin"}) or something acceptable to \code{\link{as.owin}}. It must be a subset of the window of the point pattern \code{X}. To compute a confidence band for the true value of the pair correlation function, use \code{\link{lohboot}}. If \code{var.approx = TRUE}, the variance of the estimate of the pair correlation will also be calculated using an analytic approximation (Illian et al, 2008, page 234) which is valid for stationary point processes which are not too clustered. This calculation is not yet implemented when the argument \code{domain} is given. } \section{Advanced Use}{ To perform the same computation using several different bandwidths \code{bw}, it is efficient to use the argument \code{close}. This should be the result of \code{\link{closepairs}(X, rmax)} for a suitably large value of \code{rmax}, namely \code{rmax >= max(r) + 3 * bw}. } \references{ Illian, J., Penttinen, A., Stoyan, H. and Stoyan, D. (2008) \emph{Statistical Analysis and Modelling of Spatial Point Patterns.} Wiley. Stoyan, D. and Stoyan, H. (1994) \emph{Fractals, random shapes and point fields: methods of geometrical statistics.} John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{pcf}}, \code{\link{density.default}}, \code{\link{bw.stoyan}}, \code{\link{bw.pcf}}, \code{\link{lohboot}}. } \examples{ X <- simdat \testonly{ X <- X[seq(1,npoints(X), by=4)] } p <- pcf(X) plot(p, main="pair correlation function for X") # indicates inhibition at distances r < 0.3 pd <- pcf(X, divisor="d") # compare estimates plot(p, cbind(iso, theo) ~ r, col=c("blue", "red"), ylim.covers=0, main="", lwd=c(2,1), lty=c(1,3), legend=FALSE) plot(pd, iso ~ r, col="green", lwd=2, add=TRUE) legend("center", col=c("blue", "green"), lty=1, lwd=2, legend=c("divisor=r","divisor=d")) # calculate approximate variance and show POINTWISE confidence bands pv <- pcf(X, var.approx=TRUE) plot(pv, cbind(iso, iso+2*sqrt(v), iso-2*sqrt(v)) ~ r) } \author{ \spatstatAuthors and Martin Hazelton. } \keyword{spatial} \keyword{nonparametric} spatstat/man/nnwhich.lpp.Rd0000644000176200001440000000346713333543263015426 0ustar liggesusers\name{nnwhich.lpp} \alias{nnwhich.lpp} \title{ Identify Nearest Neighbours on a Linear Network } \description{ Given a pattern of points on a linear network, identify the nearest neighbour for each point, measured by the shortest path in the network. } \usage{ \method{nnwhich}{lpp}(X, ..., k=1, method="C") } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{method}{ Optional string determining the method of calculation. Either \code{"interpreted"} or \code{"C"}. } \item{k}{ Integer, or integer vector. The algorithm will find the \code{k}th nearest neighbour. } \item{\dots}{ Ignored. } } \details{ Given a pattern of points on a linear network, this function finds the nearest neighbour of each point (i.e. for each point it identifies the nearest other point) measuring distance by the shortest path in the network. If \code{method="C"} the task is performed using code in the C language. If \code{method="interpreted"} then the computation is performed using interpreted \R code. The \R code is much slower, but is provided for checking purposes. The result is \code{NA} if the \code{k}th nearest neighbour does not exist. This can occur if there are fewer than \code{k+1} points in the dataset, or if the linear network is not connected. } \value{ An integer vector, of length equal to the number of points in \code{X}, identifying the nearest neighbour of each point. If \code{nnwhich(X)[2] = 4} then the nearest neighbour of point 2 is point 4. Alternatively a matrix with one row for each point in \code{X} and one column for each entry of \code{k}. } \author{ \adrian } \seealso{ \code{\link{lpp}} } \examples{ X <- runiflpp(10, simplenet) nnwhich(X) nnwhich(X, k=2) } \keyword{spatial} spatstat/man/rmpoispp.Rd0000644000176200001440000001743113333543264015044 0ustar liggesusers\name{rmpoispp} \alias{rmpoispp} \title{Generate Multitype Poisson Point Pattern} \description{ Generate a random point pattern, a realisation of the (homogeneous or inhomogeneous) multitype Poisson process. } \usage{ rmpoispp(lambda, lmax=NULL, win, types, \dots, nsim=1, drop=TRUE, warnwin=!missing(win)) } \arguments{ \item{lambda}{ Intensity of the multitype Poisson process. Either a single positive number, a vector, a \code{function(x,y,m, \dots)}, a pixel image, a list of functions \code{function(x,y, \dots)}, or a list of pixel images. } \item{lmax}{ An upper bound for the value of \code{lambda}. May be omitted } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. Ignored if \code{lambda} is a pixel image or list of images. } \item{types}{ All the possible types for the multitype pattern. } \item{\dots}{ Arguments passed to \code{lambda} if it is a function. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{warnwin}{ Logical value specifying whether to issue a warning when \code{win} is ignored. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Each point pattern is multitype (it carries a vector of marks which is a factor). } \details{ This function generates a realisation of the marked Poisson point process with intensity \code{lambda}. Note that the intensity function \eqn{\lambda(x,y,m)}{lambda(x,y,m)} is the average number of points \bold{of type m} per unit area near the location \eqn{(x,y)}. Thus a marked point process with a constant intensity of 10 and three possible types will have an average of 30 points per unit area, with 10 points of each type on average. The intensity function may be specified in any of the following ways. \describe{ \item{single number:}{ If \code{lambda} is a single number, then this algorithm generates a realisation of the uniform marked Poisson process inside the window \code{win} with intensity \code{lambda} for each type. The total intensity of points of all types is \code{lambda * length(types)}. The argument \code{types} must be given and determines the possible types in the multitype pattern. } \item{vector:}{ If \code{lambda} is a numeric vector, then this algorithm generates a realisation of the stationary marked Poisson process inside the window \code{win} with intensity \code{lambda[i]} for points of type \code{types[i]}. The total intensity of points of all types is \code{sum(lambda)}. The argument \code{types} defaults to \code{names(lambda)}, or if that is null, \code{1:length(lambda)}. } \item{function:}{ If \code{lambda} is a function, the process has intensity \code{lambda(x,y,m,\dots)} at spatial location \code{(x,y)} for points of type \code{m}. The function \code{lambda} must work correctly with vectors \code{x}, \code{y} and \code{m}, returning a vector of function values. (Note that \code{m} will be a factor with levels equal to \code{types}.) The value \code{lmax}, if present, must be an upper bound on the values of \code{lambda(x,y,m,\dots)} for all locations \code{(x, y)} inside the window \code{win} and all types \code{m}. The argument \code{types} must be given. } \item{list of functions:}{ If \code{lambda} is a list of functions, the process has intensity \code{lambda[[i]](x,y,\dots)} at spatial location \code{(x,y)} for points of type \code{types[i]}. The function \code{lambda[[i]]} must work correctly with vectors \code{x} and \code{y}, returning a vector of function values. The value \code{lmax}, if given, must be an upper bound on the values of \code{lambda(x,y,\dots)} for all locations \code{(x, y)} inside the window \code{win}. The argument \code{types} defaults to \code{names(lambda)}, or if that is null, \code{1:length(lambda)}. } \item{pixel image:}{ If \code{lambda} is a pixel image object of class \code{"im"} (see \code{\link{im.object}}), the intensity at a location \code{(x,y)} for points of any type is equal to the pixel value of \code{lambda} for the pixel nearest to \code{(x,y)}. The argument \code{win} is ignored; the window of the pixel image is used instead. The argument \code{types} must be given. } \item{list of pixel images:}{ If \code{lambda} is a list of pixel images, then the image \code{lambda[[i]]} determines the intensity of points of type \code{types[i]}. The argument \code{win} is ignored; the window of the pixel image is used instead. The argument \code{types} defaults to \code{names(lambda)}, or if that is null, \code{1:length(lambda)}. } } If \code{lmax} is missing, an approximate upper bound will be calculated. To generate an inhomogeneous Poisson process the algorithm uses ``thinning'': it first generates a uniform Poisson process of intensity \code{lmax} for points of each type \code{m}, then randomly deletes or retains each point independently, with retention probability \eqn{p(x,y,m) = \lambda(x,y,m)/\mbox{lmax}}{p(x,y,m) = lambda(x,y)/lmax}. } \seealso{ \code{\link{rpoispp}} for unmarked Poisson point process; \code{\link{rmpoint}} for a fixed number of random marked points; \code{\link{ppp.object}}, \code{\link{owin.object}}. } \examples{ # uniform bivariate Poisson process with total intensity 100 in unit square pp <- rmpoispp(50, types=c("a","b")) # stationary bivariate Poisson process with intensity A = 30, B = 70 pp <- rmpoispp(c(30,70), types=c("A","B")) pp <- rmpoispp(c(30,70)) # works in any window data(letterR) pp <- rmpoispp(c(30,70), win=letterR, types=c("A","B")) # inhomogeneous lambda(x,y,m) # note argument 'm' is a factor lam <- function(x,y,m) { 50 * (x^2 + y^3) * ifelse(m=="A", 2, 1)} pp <- rmpoispp(lam, win=letterR, types=c("A","B")) # extra arguments lam <- function(x,y,m,scal) { scal * (x^2 + y^3) * ifelse(m=="A", 2, 1)} pp <- rmpoispp(lam, win=letterR, types=c("A","B"), scal=50) # list of functions lambda[[i]](x,y) lams <- list(function(x,y){50 * x^2}, function(x,y){20 * abs(y)}) pp <- rmpoispp(lams, win=letterR, types=c("A","B")) pp <- rmpoispp(lams, win=letterR) # functions with extra arguments lams <- list(function(x,y,scal){5 * scal * x^2}, function(x,y, scal){2 * scal * abs(y)}) pp <- rmpoispp(lams, win=letterR, types=c("A","B"), scal=10) pp <- rmpoispp(lams, win=letterR, scal=10) # florid example lams <- list(function(x,y){ 100*exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend , function(x,y){ 100*exp(-0.6*x+0.5*y) } # log linear trend ) X <- rmpoispp(lams, win=unit.square(), types=c("on", "off")) # pixel image Z <- as.im(function(x,y){30 * (x^2 + y^3)}, letterR) pp <- rmpoispp(Z, types=c("A","B")) # list of pixel images ZZ <- list( as.im(function(x,y){20 * (x^2 + y^3)}, letterR), as.im(function(x,y){40 * (x^3 + y^2)}, letterR)) pp <- rmpoispp(ZZ, types=c("A","B")) pp <- rmpoispp(ZZ) # randomising an existing point pattern rmpoispp(intensity(amacrine), win=Window(amacrine)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/edges.Rd0000644000176200001440000000202213333543263014247 0ustar liggesusers\name{edges} \alias{edges} \title{ Extract Boundary Edges of a Window. } \description{ Extracts the boundary edges of a window and returns them as a line segment pattern. } \usage{ edges(x, \dots, window = NULL, check = FALSE) } \arguments{ \item{x}{ A window (object of class \code{"owin"}), or data acceptable to \code{\link{as.owin}}, specifying the window whose boundary is to be extracted. } \item{\dots}{ Ignored. } \item{window}{ Window to contain the resulting line segments. Defaults to \code{as.rectangle(x)}. } \item{check}{ Logical. Whether to check the validity of the resulting segment pattern. } } \details{ The boundary edges of the window \code{x} will be extracted as a line segment pattern. } \value{ A line segment pattern (object of class \code{"psp"}). } \seealso{ \code{\link{perimeter}} for calculating the total length of the boundary. } \examples{ edges(square(1)) edges(letterR) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/sdr.Rd0000644000176200001440000000712013333543264013755 0ustar liggesusers\name{sdr} \alias{sdr} \alias{sdr.ppp} \title{ Sufficient Dimension Reduction } \description{ Given a point pattern and a set of predictors, find a minimal set of new predictors, each constructed as a linear combination of the original predictors. } \usage{ sdr(X, covariates, \dots) \method{sdr}{ppp}(X, covariates, method = c("DR", "NNIR", "SAVE", "SIR", "TSE"), Dim1 = 1, Dim2 = 1, predict=FALSE, \dots) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{covariates}{ A list of pixel images (objects of class \code{"im"}) to serve as predictor variables. } \item{method}{ Character string indicating which method to use. See Details. } \item{Dim1}{ Dimension of the first order Central Intensity Subspace (applicable when \code{method} is \code{"DR"}, \code{"NNIR"}, \code{"SAVE"} or \code{"TSE"}). } \item{Dim2}{ Dimension of the second order Central Intensity Subspace (applicable when \code{method="TSE"}). } \item{predict}{ Logical value indicating whether to compute the new predictors as well. } \item{\dots}{ Additional arguments (ignored by \code{sdr.ppp}). } } \details{ Given a point pattern \eqn{X} and predictor variables \eqn{Z_1, \dots, Z_p}{Z[1], ..., Z[p]}, Sufficient Dimension Reduction methods (Guan and Wang, 2010) attempt to find a minimal set of new predictor variables, each constructed by taking a linear combination of the original predictors, which explain the dependence of \eqn{X} on \eqn{Z_1, \dots, Z_p}{Z[1], ..., Z[p]}. The methods do not assume any particular form of dependence of the point pattern on the predictors. The predictors are assumed to be Gaussian random fields. Available methods are: \tabular{ll}{ \code{method="DR"} \tab directional regression \cr \code{method="NNIR"} \tab nearest neighbour inverse regression \cr \code{method="SAVE"} \tab sliced average variance estimation \cr \code{method="SIR"} \tab sliced inverse regression \cr \code{method="TSE"} \tab two-step estimation \cr } The result includes a matrix \code{B} whose columns are estimates of the basis vectors of the space of new predictors. That is, the \code{j}th column of \code{B} expresses the \code{j}th new predictor as a linear combination of the original predictors. If \code{predict=TRUE}, the new predictors are also evaluated. They can also be evaluated using \code{\link{sdrPredict}}. } \value{ A list with components \code{B, M} or \code{B, M1, M2} where \code{B} is a matrix whose columns are estimates of the basis vectors for the space, and \code{M} or \code{M1,M2} are matrices containing estimates of the kernel. If \code{predict=TRUE}, the result also includes a component \code{Y} which is a list of pixel images giving the values of the new predictors. } \examples{ A <- sdr(bei, bei.extra, predict=TRUE) A Y1 <- A$Y[[1]] plot(Y1) points(bei, pch=".", cex=2) # investigate likely form of dependence plot(rhohat(bei, Y1)) } \seealso{ \code{\link{sdrPredict}} to compute the new predictors from the coefficient matrix. \code{\link{dimhat}} to estimate the subspace dimension. \code{\link{subspaceDistance}} } \references{ Guan, Y. and Wang, H. (2010) Sufficient dimension reduction for spatial point processes directed by Gaussian random fields. \emph{Journal of the Royal Statistical Society, Series B}, \bold{72}, 367--387. } \author{ Matlab original by Yongtao Guan, translated to \R by Suman Rakshit. } \keyword{spatial} \keyword{multivariate} spatstat/man/pppmatching.object.Rd0000644000176200001440000000667513515521657016766 0ustar liggesusers\name{pppmatching.object} \alias{pppmatching.object} %DoNotExport \title{Class of Point Matchings} \description{ A class \code{"pppmatching"} to represent a matching of two planar point patterns. Optionally includes information about the construction of the matching and its associated distance between the point patterns. } \details{ This class represents a (possibly weighted and incomplete) matching between two planar point patterns (objects of class \code{"ppp"}). A matching can be thought of as a bipartite weighted graph where the vertices are given by the two point patterns and edges of positive weights are drawn each time a point of the first point pattern is "matched" with a point of the second point pattern. If \code{m} is an object of type \code{pppmatching}, it contains the following elements \tabular{ll}{ \code{pp1, pp2} \tab the two point patterns to be matched (vertices) \cr \code{matrix} \tab a matrix specifying which points are matched \cr \tab and with what weights (edges) \cr \code{type} \tab (optional) a character string for the type of \cr \tab the matching (one of \code{"spa"}, \code{"ace"} or \code{"mat"}) \cr \code{cutoff} \tab (optional) cutoff value for interpoint distances \cr \code{q} \tab (optional) the order for taking averages of \cr \tab interpoint distances \cr \code{distance} \tab (optional) the distance associated with the matching } The element \code{matrix} is a "generalized adjacency matrix". The numbers of rows and columns match the cardinalities of the first and second point patterns, respectively. The \code{[i,j]}-th entry is positive if the \code{i}-th point of \code{X} and the \code{j}-th point of \code{Y} are matched (zero otherwise) and its value then gives the corresponding weight of the match. For an unweighted matching all the weights are set to \eqn{1}. The optional elements are for saving details about matchings in the context of optimal point matching techniques. \code{type} can be one of \code{"spa"} (for "subpattern assignment"), \code{"ace"} (for "assignment only if cardinalities differ") or \code{"mat"} (for "mass transfer"). \code{cutoff} is a positive numerical value that specifies the maximal interpoint distance and \code{q} is a value in \eqn{[1,\infty]}{[1,Inf]} that gives the order of the average applied to the interpoint distances. See the help files for \code{\link{pppdist}} and \code{\link{matchingdist}} for detailed information about these elements. Objects of class \code{"pppmatching"} may be created by the function \code{\link{pppmatching}}, and are most commonly obtained as output of the function \code{\link{pppdist}}. There are methods \code{plot}, \code{print} and \code{summary} for this class. } \author{ Dominic Schuhmacher \email{dominic.schuhmacher@stat.unibe.ch} \url{http://www.dominic.schuhmacher.name} } \seealso{ \code{\link{matchingdist}}, \code{\link{pppmatching}}, \code{\link{plot.pppmatching}} } \examples{ # a random complete unweighted matching X <- runifpoint(10) Y <- runifpoint(10) am <- r2dtable(1, rep(1,10), rep(1,10))[[1]] # generates a random permutation matrix m <- pppmatching(X, Y, am) summary(m) m$matrix \dontrun{ plot(m) } # an optimal complete unweighted matching m2 <- pppdist(X,Y) summary(m2) m2$matrix \dontrun{ plot(m2) } } \keyword{spatial} \keyword{attribute} spatstat/man/interp.colourmap.Rd0000644000176200001440000000261613606253523016472 0ustar liggesusers\name{interp.colourmap} \alias{interp.colourmap} \title{ Interpolate smoothly between specified colours } \description{ Given a colourmap object which maps numbers to colours, this function interpolates smoothly between the colours, yielding a new colour map. } \usage{ interp.colourmap(m, n = 512) } \arguments{ \item{m}{ A colour map (object of class \code{"colourmap"}). } \item{n}{ Number of colour steps to be created in the new colour map. } } \details{ Given a colourmap object \code{m}, which maps numerical values to colours, this function interpolates the mapping, yielding a new colour map. This makes it easy to build a colour map that has smooth gradation between different colours or shades. First specify a small vector of numbers \code{x} which should be mapped to specific colours \code{y}. Use \code{m <- colourmap(y, inputs=x)} to create a colourmap that represents this simple mapping. Then apply \code{interp.colourmap(m)} to obtain a smooth transition between these points. } \value{ Another colour map (object of class \code{"colourmap"}). } \seealso{ \code{\link{colourmap}}, \code{\link{tweak.colourmap}}, \code{\link[spatstat:colourtools]{colourtools}}. } \examples{ co <- colourmap(inputs=c(0, 0.5, 1), c("black", "red", "white")) plot(interp.colourmap(co)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{color} spatstat/man/perspPoints.Rd0000644000176200001440000000457413333543264015525 0ustar liggesusers\name{perspPoints} \alias{perspPoints} \alias{perspSegments} \alias{perspLines} \alias{perspContour} \title{ Draw Points or Lines on a Surface Viewed in Perspective } \description{ After a surface has been plotted in a perspective view using \code{\link{persp.im}}, these functions can be used to draw points or lines on the surface. } \usage{ perspPoints(x, y=NULL, \dots, Z, M) perspLines(x, y = NULL, \dots, Z, M) perspSegments(x0, y0 = NULL, x1 = NULL, y1 = NULL, \dots, Z, M) perspContour(Z, M, \dots, nlevels=10, levels=pretty(range(Z), nlevels)) } \arguments{ \item{x,y}{ Spatial coordinates, acceptable to \code{\link[grDevices]{xy.coords}}, for the points or lines on the horizontal plane. } \item{Z}{ Pixel image (object of class \code{"im"}) specifying the surface heights. } \item{M}{ Projection matrix returned from \code{\link{persp.im}} when \code{Z} was plotted. } \item{\dots}{ Graphical arguments passed to \code{\link[graphics]{points}}, \code{\link[graphics]{lines}} or \code{\link[graphics]{segments}} to control the drawing. } \item{x0,y0,x1,y1}{ Spatial coordinates of the line segments, on the horizontal plane. Alternatively \code{x0} can be a line segment pattern (object of class \code{"psp"}) and \code{y0,x1,y1} can be \code{NULL}. } \item{nlevels}{Number of contour levels} \item{levels}{Vector of heights of contours.} } \details{ After a surface has been plotted in a perspective view, these functions can be used to draw points or lines on the surface. The user should already have called \code{\link{persp.im}} in the form \code{M <- persp(Z, visible=TRUE, ...)} to display the perspective view of the surface \code{Z}. Only points and lines which are visible from the viewer's standpoint will be drawn. } \value{ Same as the return value from \code{\link[graphics]{points}} or \code{\link[graphics]{segments}}. } \seealso{ \code{\link{persp.im}} } \examples{ M <- persp(bei.extra$elev, colmap=terrain.colors(128), apron=TRUE, theta=-30, phi=20, zlab="Elevation", main="", expand=6, visible=TRUE, shade=0.3) perspContour(bei.extra$elev, M=M, col="pink", nlevels=12) perspPoints(bei, Z=bei.extra$elev, M=M, pch=16, cex=0.3, col="chartreuse") } \author{ \spatstatAuthors } \keyword{spatial} \keyword{hplot} spatstat/man/clickppp.Rd0000644000176200001440000000542413333543263014776 0ustar liggesusers\name{clickppp} \alias{clickppp} \title{Interactively Add Points} \description{ Allows the user to create a point pattern by point-and-click in the display. } \usage{ clickppp(n=NULL, win=square(1), types=NULL, \dots, add=FALSE, main=NULL, hook=NULL) } \arguments{ \item{n}{ Number of points to be added (if this is predetermined). } \item{win}{ Window in which to create the point pattern. An object of class \code{"owin"}. } \item{types}{ Vector of types, when creating a multitype point pattern. } \item{\dots}{ Optional extra arguments to be passed to \code{\link[graphics]{locator}} to control the display. } \item{add}{ Logical value indicating whether to create a new plot (\code{add=FALSE}) or draw over the existing plot (\code{add=TRUE}). } \item{main}{ Main heading for plot. } \item{hook}{For internal use only. Do not use this argument.} } \value{ A point pattern (object of class \code{"ppp"}). } \details{ This function allows the user to create a point pattern by interactively clicking on the screen display. First the window \code{win} is plotted on the current screen device. Then the user is prompted to point the mouse at any desired locations and click the left mouse button to add each point. Interactive input stops after \code{n} clicks (if \code{n} was given) or when the middle mouse button is pressed. The return value is a point pattern containing the locations of all the clicked points inside the original window \code{win}, provided that all of the clicked locations were inside this window. Otherwise, the window is expanded to a box large enough to contain all the points (as well as containing the original window). If the argument \code{types} is given, then a multitype point pattern will be created. The user is prompted to input the locations of points of type \code{type[i]}, for each successive index \code{i}. (If the argument \code{n} was given, there will be \code{n} points of \emph{each} type.) The return value is a multitype point pattern. This function uses the \R{} command \code{\link[graphics]{locator}} to input the mouse clicks. It only works on screen devices such as \sQuote{X11}, \sQuote{windows} and \sQuote{quartz}. Arguments that can be passed to \code{\link[graphics]{locator}} through \code{\dots} include \code{pch} (plotting character), \code{cex} (character expansion factor) and \code{col} (colour). See \code{\link[graphics]{locator}} and \code{\link[graphics]{par}}. } \seealso{ \code{\link{identify.ppp}}, \code{\link[graphics]{locator}}, \code{\link{clickpoly}}, \code{\link{clickbox}}, \code{\link{clickdist}} } \author{Original by Dominic Schuhmacher. Adapted by \adrian and \rolf. } \keyword{spatial} \keyword{iplot} spatstat/man/Extract.ssf.Rd0000644000176200001440000000145613333543263015376 0ustar liggesusers\name{[.ssf} \alias{[.ssf} \title{ Subset of spatially sampled function } \description{ Extract a subset of the data for a spatially sampled function. } \usage{ \method{[}{ssf}(x, i, j, ..., drop) } \arguments{ \item{x}{ Object of class \code{"ssf"}. } \item{i}{ Subset index applying to the locations where the function is sampled. } \item{j}{ Subset index applying to the columns (variables) measured at each location. } \item{\dots, drop}{ Ignored. } } \details{ This is the subset operator for the class \code{"ssf"}. } \value{ Another object of class \code{"ssf"}. } \author{ \adrian. } \seealso{ \code{\link{ssf}}, \code{\link{with.ssf}} } \examples{ f <- ssf(cells, data.frame(d=nndist(cells), i=1:42)) f f[1:10,] f[ ,1] } \keyword{spatial} \keyword{manip} spatstat/man/fitted.mppm.Rd0000644000176200001440000000530413333543265015417 0ustar liggesusers\name{fitted.mppm} \alias{fitted.mppm} \title{Fitted Conditional Intensity for Multiple Point Process Model} \description{ Given a point process model fitted to multiple point patterns, compute the fitted conditional intensity of the model at the points of each data pattern, or at the points of the quadrature schemes used to fit the model. } \usage{ \method{fitted}{mppm}(object, ..., type = "lambda", dataonly = FALSE) } \arguments{ \item{object}{ The fitted model. An object of class \code{"mppm"} obtained from \code{\link{mppm}}. } \item{\dots}{Ignored.} \item{type}{ Type of fitted values: either \code{"trend"} for the spatial trend, or \code{"lambda"} or \code{"cif"} for the conditional intensity. } \item{dataonly}{ If \code{TRUE}, fitted values are computed only for the points of the data point patterns. If \code{FALSE}, fitted values are computed for the points of the quadrature schemes used to fit the model. } } \details{ This function evaluates the conditional intensity \eqn{\hat\lambda(u,x)}{lambdahat(u,x)} or spatial trend \eqn{\hat{b(u)}}{bhat(u)} of the fitted point process model for certain locations \eqn{u}, for each of the original point patterns \eqn{x} to which the model was fitted. The locations \eqn{u} at which the fitted conditional intensity/trend is evaluated, are the points of the quadrature schemes used to fit the model in \code{\link{mppm}}. They include the data points (the points of the original point pattern datasets) and other ``dummy'' points in the window of observation. Use \code{\link{predict.mppm}} to compute the fitted conditional intensity at other locations or with other values of the explanatory variables. } \value{ A list of vectors (one for each row of the original hyperframe, i.e. one vector for each of the original point patterns) containing the values of the fitted conditional intensity or (if \code{type="trend"}) the fitted spatial trend. Entries in these vector correspond to the quadrature points (data or dummy points) used to fit the model. The quadrature points can be extracted from \code{object} by \code{\link{quad.mppm}(object)}. } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \adrian \rolf and \ege } \examples{ model <- mppm(Bugs ~ x, data=hyperframe(Bugs=waterstriders), interaction=Strauss(7)) cifs <- fitted(model) } \seealso{ \code{\link{mppm}}, \code{\link{predict.mppm}} } \keyword{spatial} \keyword{models} spatstat/man/anova.ppm.Rd0000644000176200001440000001456713333543262015077 0ustar liggesusers\name{anova.ppm} \alias{anova.ppm} \title{ANOVA for Fitted Point Process Models} \description{ Performs analysis of deviance for one or more fitted point process models. } \usage{ \method{anova}{ppm}(object, \dots, test=NULL, adjust=TRUE, warn=TRUE, fine=FALSE) } \arguments{ \item{object}{ A fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Optional. Additional objects of class \code{"ppm"}. } \item{test}{ Character string, partially matching one of \code{"Chisq"}, \code{"LRT"}, \code{"Rao"}, \code{"score"}, \code{"F"} or \code{"Cp"}, or \code{NULL} indicating that no test should be performed. } \item{adjust}{ Logical value indicating whether to correct the pseudolikelihood ratio when some of the models are not Poisson processes. } \item{warn}{ Logical value indicating whether to issue warnings if problems arise. } \item{fine}{ Logical value, passed to \code{\link{vcov.ppm}}, indicating whether to use a quick estimate (\code{fine=FALSE}, the default) or a slower, more accurate estimate (\code{fine=TRUE}) of variance terms. Relevant only when some of the models are not Poisson and \code{adjust=TRUE}. } } \value{ An object of class \code{"anova"}, or \code{NULL}. } \details{ This is a method for \code{\link[stats]{anova}} for fitted point process models (objects of class \code{"ppm"}, usually generated by the model-fitting function \code{\link{ppm}}). If the fitted models are all Poisson point processes, then by default, this function performs an Analysis of Deviance of the fitted models. The output shows the deviance differences (i.e. 2 times log likelihood ratio), the difference in degrees of freedom, and (if \code{test="Chi"} or \code{test="LRT"}) the two-sided p-values for the chi-squared tests. Their interpretation is very similar to that in \code{\link[stats]{anova.glm}}. If \code{test="Rao"} or \code{test="score"}, the \emph{score test} (Rao, 1948) is performed instead. If some of the fitted models are \emph{not} Poisson point processes, the `deviance' differences in this table are 'pseudo-deviances' equal to 2 times the differences in the maximised values of the log pseudolikelihood (see \code{\link{ppm}}). It is not valid to compare these values to the chi-squared distribution. In this case, if \code{adjust=TRUE} (the default), the pseudo-deviances will be adjusted using the method of Pace et al (2011) and Baddeley et al (2015) so that the chi-squared test is valid. It is strongly advisable to perform this adjustment. } \section{Errors and warnings}{ \describe{ \item{models not nested:}{ There may be an error message that the models are not \dQuote{nested}. For an Analysis of Deviance the models must be nested, i.e. one model must be a special case of the other. For example the point process model with formula \code{~x} is a special case of the model with formula \code{~x+y}, so these models are nested. However the two point process models with formulae \code{~x} and \code{~y} are not nested. If you get this error message and you believe that the models should be nested, the problem may be the inability of \R to recognise that the two formulae are nested. Try modifying the formulae to make their relationship more obvious. } \item{different sizes of dataset:}{ There may be an error message from \code{anova.glmlist} that \dQuote{models were not all fitted to the same size of dataset}. This implies that the models were fitted using different quadrature schemes (see \code{\link{quadscheme}}) and/or with different edge corrections or different values of the border edge correction distance \code{rbord}. To ensure that models are comparable, check the following: \itemize{ \item the models must all have been fitted to the same point pattern dataset, in the same window. \item all models must have been fitted by the same fitting method as specified by the argument \code{method} in \code{\link{ppm}}. \item If some of the models depend on covariates, then they should all have been fitted using the same list of covariates, and using \code{allcovar=TRUE} to ensure that the same quadrature scheme is used. \item all models must have been fitted using the same edge correction as specified by the arguments \code{correction} and \code{rbord}. If you did not specify the value of \code{rbord}, then it may have taken a different value for different models. The default value of \code{rbord} is equal to zero for a Poisson model, and otherwise equals the reach (interaction distance) of the interaction term (see \code{\link{reach}}). To ensure that the models are comparable, set \code{rbord} to equal the maximum reach of the interactions that you are fitting. } } } } \seealso{ \code{\link{ppm}}, \code{\link{vcov.ppm}} } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix of one of the models was either too large or too small for reliable numerical calculation. See \code{\link{vcov.ppm}} for suggestions on how to handle this. } \examples{ mod0 <- ppm(swedishpines ~1) modx <- ppm(swedishpines ~x) # Likelihood ratio test anova(mod0, modx, test="Chi") # Score test anova(mod0, modx, test="Rao") # Single argument modxy <- ppm(swedishpines ~x + y) anova(modxy, test="Chi") # Adjusted composite likelihood ratio test modP <- ppm(swedishpines ~1, rbord=9) modS <- ppm(swedishpines ~1, Strauss(9)) anova(modP, modS, test="Chi") } \references{ Baddeley, A., Turner, R. and Rubak, E. (2015) Adjusted composite likelihood ratio test for Gibbs point processes. \emph{Journal of Statistical Computation and Simulation} \bold{86} (5) 922--941. DOI: 10.1080/00949655.2015.1044530. Pace, L., Salvan, A. and Sartori, N. (2011) Adjusting composite likelihood ratio statistics. \emph{Statistica Sinica} \bold{21}, 129--148. Rao, C.R. (1948) Large sample tests of statistical hypotheses concerning several parameters with applications to problems of estimation. \emph{Proceedings of the Cambridge Philosophical Society} \bold{44}, 50--57. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/G3est.Rd0000644000176200001440000000701213333543262014150 0ustar liggesusers\name{G3est} \Rdversion{1.1} \alias{G3est} \title{ Nearest Neighbour Distance Distribution Function of a Three-Dimensional Point Pattern } \description{ Estimates the nearest-neighbour distance distribution function \eqn{G_3(r)}{G3(r)} from a three-dimensional point pattern. } \usage{ G3est(X, ..., rmax = NULL, nrval = 128, correction = c("rs", "km", "Hanisch")) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{rmax}{ Optional. Maximum value of argument \eqn{r} for which \eqn{G_3(r)}{G3(r)} will be estimated. } \item{nrval}{ Optional. Number of values of \eqn{r} for which \eqn{G_3(r)}{G3(r)} will be estimated. A large value of \code{nrval} is required to avoid discretisation effects. } \item{correction}{ Optional. Character vector specifying the edge correction(s) to be applied. See Details. } } \details{ For a stationary point process \eqn{\Phi}{Phi} in three-dimensional space, the nearest-neighbour function is \deqn{ G_3(r) = P(d^\ast(x,\Phi) \le r \mid x \in \Phi) }{ G3(r) = P(d*(x,Phi) <= r | x in Phi) } the cumulative distribution function of the distance \eqn{d^\ast(x,\Phi)}{d*(x,Phi)} from a typical point \eqn{x} in \eqn{\Phi}{Phi} to its nearest neighbour, i.e. to the nearest \emph{other} point of \eqn{\Phi}{Phi}. The three-dimensional point pattern \code{X} is assumed to be a partial realisation of a stationary point process \eqn{\Phi}{Phi}. The nearest neighbour function of \eqn{\Phi}{Phi} can then be estimated using techniques described in the References. For each data point, the distance to the nearest neighbour is computed. The empirical cumulative distribution function of these values, with appropriate edge corrections, is the estimate of \eqn{G_3(r)}{G3(r)}. The available edge corrections are: \describe{ \item{\code{"rs"}:}{ the reduced sample (aka minus sampling, border correction) estimator (Baddeley et al, 1993) } \item{\code{"km"}:}{ the three-dimensional version of the Kaplan-Meier estimator (Baddeley and Gill, 1997) } \item{\code{"Hanisch"}:}{ the three-dimensional generalisation of the Hanisch estimator (Hanisch, 1984). } } Alternatively \code{correction="all"} selects all options. } \value{ A function value table (object of class \code{"fv"}) that can be plotted, printed or coerced to a data frame containing the function values. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. Baddeley, A.J. and Gill, R.D. (1997) Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25}, 263--292. Hanisch, K.-H. (1984) Some remarks on estimators of the distribution function of nearest neighbour distance in stationary spatial point patterns. \emph{Mathematische Operationsforschung und Statistik, series Statistics} \bold{15}, 409--412. } \author{ \adrian and Rana Moyeed. } \section{Warnings}{ A large value of \code{nrval} is required in order to avoid discretisation effects (due to the use of histograms in the calculation). } \seealso{ \code{\link{F3est}}, \code{\link{K3est}}, \code{\link{pcf3est}} } \examples{ X <- rpoispp3(42) Z <- G3est(X) if(interactive()) plot(Z) } \keyword{spatial} \keyword{nonparametric} spatstat/man/dirichletWeights.Rd0000644000176200001440000000356613333543263016500 0ustar liggesusers\name{dirichletWeights} \alias{dirichletWeights} \title{Compute Quadrature Weights Based on Dirichlet Tessellation} \description{ Computes quadrature weights for a given set of points, using the areas of tiles in the Dirichlet tessellation. } \usage{ dirichletWeights(X, window=NULL, exact=TRUE, \dots) } \arguments{ \item{X}{Data defining a point pattern.} \item{window}{Default window for the point pattern} \item{exact}{Logical value. If \code{TRUE}, compute exact areas using the package \code{deldir}. If \code{FALSE}, compute approximate areas using a pixel raster. } \item{\dots}{ Ignored. } } \value{ Vector of nonnegative weights for each point in \code{X}. } \details{ This function computes a set of quadrature weights for a given pattern of points (typically comprising both ``data'' and `dummy'' points). See \code{\link{quad.object}} for an explanation of quadrature weights and quadrature schemes. The weights are computed using the Dirichlet tessellation. First \code{X} and (optionally) \code{window} are converted into a point pattern object. Then the Dirichlet tessellation of the points of \code{X} is computed. The weight attached to a point of \code{X} is the area of its Dirichlet tile (inside the window \code{Window(X)}). If \code{exact=TRUE} the Dirichlet tessellation is computed exactly by the Lee-Schachter algorithm using the package \code{deldir}. Otherwise a pixel raster approximation is constructed and the areas are approximations to the true weights. In all cases the sum of the weights is equal to the area of the window. } \seealso{ \code{\link{quad.object}}, \code{\link{gridweights}} } \examples{ Q <- quadscheme(runifpoispp(10)) X <- as.ppp(Q) # data and dummy points together w <- dirichletWeights(X, exact=FALSE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{utilities} spatstat/man/rQuasi.Rd0000644000176200001440000000233313333543264014432 0ustar liggesusers\name{rQuasi} \alias{rQuasi} \title{ Generate Quasirandom Point Pattern in Given Window } \description{ Generates a quasirandom pattern of points in any two-dimensional window. } \usage{ rQuasi(n, W, type = c("Halton", "Hammersley"), ...) } \arguments{ \item{n}{ Maximum number of points to be generated. } \item{W}{ Window (object of class \code{"owin"}) in which to generate the points. } \item{type}{ String identifying the quasirandom generator. } \item{\dots}{ Arguments passed to the quasirandom generator. } } \details{ This function generates a quasirandom point pattern, using the quasirandom sequence generator \code{\link{Halton}} or \code{\link{Hammersley}} as specified. If \code{W} is a rectangle, exactly \code{n} points will be generated. If \code{W} is not a rectangle, \code{n} points will be generated in the containing rectangle \code{as.rectangle(W)}, and only the points lying inside \code{W} will be retained. } \value{ Point pattern (object of class \code{"ppp"}) inside the window \code{W}. } \seealso{ \code{\link{Halton}} } \examples{ plot(rQuasi(256, letterR)) } \author{\adrian , \rolf and \ege. } \keyword{spatial} \keyword{datagen} spatstat/man/quantile.density.Rd0000644000176200001440000000451113333543264016466 0ustar liggesusers\name{quantile.density} \alias{quantile.density} \title{ Quantiles of a Density Estimate } \description{ Given a kernel estimate of a probability density, compute quantiles. } \usage{ \method{quantile}{density}(x, probs = seq(0, 1, 0.25), names = TRUE, \dots, warn = TRUE) } \arguments{ \item{x}{ Object of class \code{"density"} computed by a method for \code{\link[stats]{density}} } \item{probs}{ Numeric vector of probabilities for which the quantiles are required. } \item{names}{ Logical value indicating whether to attach names (based on \code{probs}) to the result. } \item{\dots}{ Ignored. } \item{warn}{ Logical value indicating whether to issue a warning if the density estimate \code{x} had to be renormalised because it was computed in a restricted interval. } } \details{ This function calculates quantiles of the probability distribution whose probability density has been estimated and stored in the object \code{x}. The object \code{x} must belong to the class \code{"density"}, and would typically have been obtained from a call to the function \code{\link[stats]{density}}. The probability density is first normalised so that the total probability is equal to 1. A warning is issued if the density estimate was restricted to an interval (i.e. if \code{x} was created by a call to \code{\link[stats]{density}} which included either of the arguments \code{from} and \code{to}). Next, the density estimate is numerically integrated to obtain an estimate of the cumulative distribution function \eqn{F(x)}. Then for each desired probability \eqn{p}, the algorithm finds the corresponding quantile \eqn{q}. The quantile \eqn{q} corresponding to probability \eqn{p} satisfies \eqn{F(q) = p} up to the resolution of the grid of values contained in \code{x}. The quantile is computed from the right, that is, \eqn{q} is the smallest available value of \eqn{x} such that \eqn{F(x) \ge p}{F(x) >= p}. } \value{ A numeric vector containing the quantiles. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link[stats]{quantile}}, \code{\link{quantile.ewcdf}}, \code{\link{quantile.im}}, \code{\link{CDF}}. } \examples{ dd <- density(runif(10)) quantile(dd) } \keyword{methods} \keyword{univar} \keyword{nonparametric} spatstat/man/rppm.Rd0000644000176200001440000000365113333543264014150 0ustar liggesusers\name{rppm} \alias{rppm} \title{ Recursively Partitioned Point Process Model } \description{ Fits a recursive partition model to point pattern data. } \usage{ rppm(\dots, rpargs=list()) } \arguments{ \item{\dots}{ Arguments passed to \code{\link{ppm}} specifying the point pattern data and the explanatory covariates. } \item{rpargs}{ Optional list of arguments passed to \code{\link[rpart]{rpart}} controlling the recursive partitioning procedure. } } \details{ This function attempts to find a simple rule for predicting low and high intensity regions of points in a point pattern, using explanatory covariates. The arguments \code{\dots} specify the point pattern data and explanatory covariates in the same way as they would be in the function \code{\link{ppm}}. The recursive partitioning algorithm \code{\link[rpart]{rpart}} is then used to find a partitioning rule. } \value{ An object of class \code{"rppm"}. There are methods for \code{print}, \code{plot}, \code{fitted}, \code{predict} and \code{prune} for this class. } \references{ Breiman, L., Friedman, J. H., Olshen, R. A., and Stone, C. J. (1984) \emph{Classification and Regression Trees}. Wadsworth. } \author{ \spatstatAuthors. } \seealso{ \code{\link{plot.rppm}}, \code{\link{predict.rppm}}, \code{\link{prune.rppm}}. } \examples{ # New Zealand trees data: trees planted along border # Use covariates 'x', 'y' nzfit <- rppm(nztrees ~ x + y) nzfit prune(nzfit, cp=0.035) # Murchison gold data: numeric and logical covariates mur <- solapply(murchison, rescale, s=1000, unitname="km") mur$dfault <- distfun(mur$faults) # mfit <- rppm(gold ~ dfault + greenstone, data=mur) mfit # Gorillas data: factor covariates # (symbol '.' indicates 'all variables') gfit <- rppm(unmark(gorillas) ~ . , data=gorillas.extra) gfit } \keyword{spatial} \keyword{models} spatstat/man/anova.slrm.Rd0000644000176200001440000000307013333543262015243 0ustar liggesusers\name{anova.slrm} \Rdversion{1.1} \alias{anova.slrm} \title{ Analysis of Deviance for Spatial Logistic Regression Models } \description{ Performs Analysis of Deviance for two or more fitted Spatial Logistic Regression models. } \usage{ \method{anova}{slrm}(object, ..., test = NULL) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ additional objects of the same type (optional). } \item{test}{ a character string, (partially) matching one of \code{"Chisq"}, \code{"F"} or \code{"Cp"}, indicating the reference distribution that should be used to compute \eqn{p}-values. } } \details{ This is a method for \code{\link[stats]{anova}} for fitted spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). The output shows the deviance differences (i.e. 2 times log likelihood ratio), the difference in degrees of freedom, and (if \code{test="Chi"}) the two-sided \eqn{p}-values for the chi-squared tests. Their interpretation is very similar to that in \code{\link[stats]{anova.glm}}. } \value{ An object of class \code{"anova"}, inheriting from class \code{"data.frame"}, representing the analysis of deviance table. } \seealso{ \code{\link{slrm}} } \examples{ X <- rpoispp(42) fit0 <- slrm(X ~ 1) fit1 <- slrm(X ~ x+y) anova(fit0, fit1, test="Chi") } \author{\adrian \email{adrian@maths.uwa.edu.au} and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/hopskel.Rd0000644000176200001440000000625013333543263014634 0ustar liggesusers\name{hopskel} \alias{hopskel} \alias{hopskel.test} \title{Hopkins-Skellam Test} \description{ Perform the Hopkins-Skellam test of Complete Spatial Randomness, or simply calculate the test statistic. } \usage{ hopskel(X) hopskel.test(X, \dots, alternative=c("two.sided", "less", "greater", "clustered", "regular"), method=c("asymptotic", "MonteCarlo"), nsim=999) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{alternative}{ String indicating the type of alternative for the hypothesis test. Partially matched. } \item{method}{ Method of performing the test. Partially matched. } \item{nsim}{ Number of Monte Carlo simulations to perform, if a Monte Carlo p-value is required. } \item{\dots}{Ignored.} } \details{ Hopkins and Skellam (1954) proposed a test of Complete Spatial Randomness based on comparing nearest-neighbour distances with point-event distances. If the point pattern \code{X} contains \code{n} points, we first compute the nearest-neighbour distances \eqn{P_1, \ldots, P_n}{P[1], ..., P[n]} so that \eqn{P_i}{P[i]} is the distance from the \eqn{i}th data point to the nearest other data point. Then we generate another completely random pattern \code{U} with the same number \code{n} of points, and compute for each point of \code{U} the distance to the nearest point of \code{X}, giving distances \eqn{I_1, \ldots, I_n}{I[1], ..., I[n]}. The test statistic is \deqn{ A = \frac{\sum_i P_i^2}{\sum_i I_i^2} }{ A = (sum[i] P[i]^2) / (sum[i] I[i]^2) } The null distribution of \eqn{A} is roughly an \eqn{F} distribution with shape parameters \eqn{(2n,2n)}. (This is equivalent to using the test statistic \eqn{H=A/(1+A)} and referring \eqn{H} to the Beta distribution with parameters \eqn{(n,n)}). The function \code{hopskel} calculates the Hopkins-Skellam test statistic \eqn{A}, and returns its numeric value. This can be used as a simple summary of spatial pattern: the value \eqn{H=1} is consistent with Complete Spatial Randomness, while values \eqn{H < 1} are consistent with spatial clustering, and values \eqn{H > 1} are consistent with spatial regularity. The function \code{hopskel.test} performs the test. If \code{method="asymptotic"} (the default), the test statistic \eqn{H} is referred to the \eqn{F} distribution. If \code{method="MonteCarlo"}, a Monte Carlo test is performed using \code{nsim} simulated point patterns. } \value{ The value of \code{hopskel} is a single number. The value of \code{hopskel.test} is an object of class \code{"htest"} representing the outcome of the test. It can be printed. } \references{ Hopkins, B. and Skellam, J.G. (1954) A new method of determining the type of distribution of plant individuals. \emph{Annals of Botany} \bold{18}, 213--227. } \seealso{ \code{\link{clarkevans}}, \code{\link{clarkevans.test}}, \code{\link{nndist}}, \code{\link{nncross}} } \examples{ hopskel(redwood) hopskel.test(redwood, alternative="clustered") } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} \keyword{htest} spatstat/man/Gcom.Rd0000644000176200001440000002247613571674202014066 0ustar liggesusers\name{Gcom} \Rdversion{1.1} \alias{Gcom} \title{ Model Compensator of Nearest Neighbour Function } \description{ Given a point process model fitted to a point pattern dataset, this function computes the \emph{compensator} of the nearest neighbour distance distribution function \eqn{G} based on the fitted model (as well as the usual nonparametric estimates of \eqn{G} based on the data alone). Comparison between the nonparametric and model-compensated \eqn{G} functions serves as a diagnostic for the model. } \usage{ Gcom(object, r = NULL, breaks = NULL, ..., correction = c("border", "Hanisch"), conditional = !is.poisson(object), restrict=FALSE, model=NULL, trend = ~1, interaction = Poisson(), rbord = reach(interaction), ppmcorrection="border", truecoef = NULL, hi.res = NULL) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the function \eqn{G(r)} should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Edge correction(s) to be employed in calculating the compensator. Options are \code{"border"}, \code{"Hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{conditional}{ Optional. Logical value indicating whether to compute the estimates for the conditional case. See Details. } \item{restrict}{ Logical value indicating whether to compute the restriction estimator (\code{restrict=TRUE}) or the reweighting estimator (\code{restrict=FALSE}, the default). Applies only if \code{conditional=TRUE}. See Details. } \item{model}{ Optional. A fitted point process model (object of class \code{"ppm"}) to be re-fitted to the data using \code{\link{update.ppm}}, if \code{object} is a point pattern. Overrides the arguments \code{trend,interaction,rbord,ppmcorrection}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{\dots}{ Extra arguments passed to \code{\link{ppm}}. } \item{ppmcorrection}{ The \code{correction} argument to \code{\link{ppm}}. } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } } \details{ This command provides a diagnostic for the goodness-of-fit of a point process model fitted to a point pattern dataset. It computes different estimates of the nearest neighbour distance distribution function \eqn{G} of the dataset, which should be approximately equal if the model is a good fit to the data. The first argument, \code{object}, is usually a fitted point process model (object of class \code{"ppm"}), obtained from the model-fitting function \code{\link{ppm}}. For convenience, \code{object} can also be a point pattern (object of class \code{"ppp"}). In that case, a point process model will be fitted to it, by calling \code{\link{ppm}} using the arguments \code{trend} (for the first order trend), \code{interaction} (for the interpoint interaction) and \code{rbord} (for the erosion distance in the border correction for the pseudolikelihood). See \code{\link{ppm}} for details of these arguments. The algorithm first extracts the original point pattern dataset (to which the model was fitted) and computes the standard nonparametric estimates of the \eqn{G} function. It then also computes the \emph{model-compensated} \eqn{G} function. The different functions are returned as columns in a data frame (of class \code{"fv"}). The interpretation of the columns is as follows (ignoring edge corrections): \describe{ \item{\code{bord}:}{ the nonparametric border-correction estimate of \eqn{G(r)}, \deqn{ \hat G(r) = \frac{\sum_i I\{ d_i \le r\} I\{ b_i > r \}}{\sum_i I\{ b_i > r\}} }{ G(r) = (sum[i] I(d[i] <= r) I(b[i] > r))/(sum[i] I(b[i] > r)) } where \eqn{d_i}{d[i]} is the distance from the \eqn{i}-th data point to its nearest neighbour, and \eqn{b_i}{b[i]} is the distance from the \eqn{i}-th data point to the boundary of the window \eqn{W}. } \item{\code{bcom}:}{ the model compensator of the border-correction estimate \deqn{ {\bf C}\, \hat G(r) = \frac{\int \lambda(u,x) I\{ b(u) > r\} I\{ d(u,x) \le r\}}{ 1 + \sum_i I\{ b_i > r\} } }{ C G(r) = (integral[u] lambda(u,x) I(b(u) > r) I( d(u,x) <= r ))/(1 + sum[i] I(b[i] > r)) } where \eqn{\lambda(u,x)}{lambda(u,x)} denotes the conditional intensity of the model at the location \eqn{u}, and \eqn{d(u,x)} denotes the distance from \eqn{u} to the nearest point in \eqn{x}, while \eqn{b(u)} denotes the distance from \eqn{u} to the boundary of the window\eqn{W}. } \item{\code{han}:}{ the nonparametric Hanisch estimate of \eqn{G(r)} \deqn{ \hat G(r) = \frac{D(r)}{D(\infty)} }{ G(r) = D(r)/D(infty) } where \deqn{ D(r) = \sum_i \frac{ I\{x_i \in W_{\ominus d_i}\} I\{d_i \le r\} }{ \mbox{area}(W_{\ominus d_i}) } }{ D(r) = sum[i] I(x[i] in W[-r]) I(d[i] <= r)/area(W[-d[i]]) } in which \eqn{W_{\ominus r}}{W[-r]} denotes the erosion of the window \eqn{W} by a distance \eqn{r}. } \item{\code{hcom}:}{ the corresponding model-compensated function \deqn{ {\bf C} \, G(r) = \int_W \frac{ \lambda(u,x) I(u \in W_{\ominus d(u)}) I(d(u) \le r) }{ \hat D(\infty) \mbox{area}(W_{\ominus d(u)}) + 1 } }{ C G(r) = integral[u] lambda(u,x) I(u in W[-d(u)]) I(d(u) <= r)/ (1 + D(infty) area(W[-d(u)])) } where \eqn{d(u) = d(u, x)} is the (`empty space') distance from location \eqn{u} to the nearest point of \eqn{x}. } } If the fitted model is a Poisson point process, then the formulae above are exactly what is computed. If the fitted model is not Poisson, the formulae above are modified slightly to handle edge effects. The modification is determined by the arguments \code{conditional} and \code{restrict}. The value of \code{conditional} defaults to \code{FALSE} for Poisson models and \code{TRUE} for non-Poisson models. If \code{conditional=FALSE} then the formulae above are not modified. If \code{conditional=TRUE}, then the algorithm calculates the \emph{restriction estimator} if \code{restrict=TRUE}, and calculates the \emph{reweighting estimator} if \code{restrict=FALSE}. See Appendix E of Baddeley, Rubak and \Moller (2011). See also \code{\link{spatstat.options}('eroded.intensity')}. Thus, by default, the reweighting estimator is computed for non-Poisson models. The border-corrected and Hanisch-corrected estimates of \eqn{G(r)} are approximately unbiased estimates of the \eqn{G}-function, assuming the point process is stationary. The model-compensated functions are unbiased estimates \emph{of the mean value of the corresponding nonparametric estimate}, assuming the model is true. Thus, if the model is a good fit, the mean value of the difference between the nonparametric and model-compensated estimates is approximately zero. To compute the difference between the nonparametric and model-compensated functions, use \code{\link{Gres}}. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Related functions: \code{\link{Gest}}, \code{\link{Gres}}. Alternative functions: \code{\link{Kcom}}, \code{\link{psstA}}, \code{\link{psstG}}, \code{\link{psst}}. Model fitting: \code{\link{ppm}}. } \examples{ data(cells) fit0 <- ppm(cells, ~1) # uniform Poisson G0 <- Gcom(fit0) G0 plot(G0) # uniform Poisson is clearly not correct # Hanisch estimates only plot(Gcom(fit0), cbind(han, hcom) ~ r) fit1 <- ppm(cells, ~1, Strauss(0.08)) plot(Gcom(fit1), cbind(han, hcom) ~ r) # Try adjusting interaction distance fit2 <- update(fit1, Strauss(0.10)) plot(Gcom(fit2), cbind(han, hcom) ~ r) G3 <- Gcom(cells, interaction=Strauss(0.12)) plot(G3, cbind(han, hcom) ~ r) } \keyword{spatial} \keyword{models} spatstat/man/thinNetwork.Rd0000644000176200001440000000521513555737613015515 0ustar liggesusers\name{thinNetwork} \alias{thinNetwork} \title{ Remove Vertices or Segments from a Linear Network } \description{ Delete some vertices and/or segments from a linear network or related object. } \usage{ thinNetwork(X, retainvertices, retainedges) } \arguments{ \item{X}{ A linear network (object of class \code{"linnet"}), or a point pattern on a linear network (object of class \code{"lpp"}). } \item{retainvertices}{ Optional. Subset index specifying which vertices should be retained (not deleted). } \item{retainedges}{ Optional. Subset index specifying which edges (segments) should be retained (not deleted). } } \details{ This function deletes some of the vertices and edges (segments) in the linear network. The arguments \code{retainvertices} and \code{retainedges} can be any kind of subset index: a vector of positive integers specifying which vertices/edges should be retained; a vector of negative integers specifying which vertices/edges should be deleted; or a logical vector specifying whether each vertex/edge should be retained (\code{TRUE}) or deleted (\code{FALSE}). Vertices are indexed in the same sequence as in \code{vertices(as.linnet(X))}. Segments are indexed in the same sequence as in \code{as.psp(as.linnet(X))}. The argument \code{retainedges} has higher precedence than \code{retainvertices} in the sense that: \itemize{ \item If \code{retainedges} is given, then any vertex which is an endpoint of a retained edge will also be retained. \item If \code{retainvertices} is given and \code{retainedges} is \bold{missing}, then any segment joining two retained vertices will also be retained. \item Thus, when both \code{retainvertices} and \code{retainedges} are given, it is possible that more vertices will be retained than those specified by \code{retainvertices}. } After the network has been altered, other consequential changes will occur, including renumbering of the segments and vertices. If \code{X} is a point pattern on a linear network, then data points will be deleted if they lie on a deleted edge. } \value{ An object of the same kind as \code{X}. } \author{ \adrian and Suman Rakshit. } \seealso{ \code{\link{linnet}} to make a network; \code{\link{connected.linnet}} to extract connected components. \code{\link{repairNetwork}}. } \examples{ L <- simplenet plot(L, main="thinNetwork(L, retainedges=c(-3, -5))") text(midpoints.psp(as.psp(L)), labels=1:nsegments(L), pos=3) Lsub <- thinNetwork(L, retainedges=c(-3, -5)) plot(Lsub, add=TRUE, col="blue", lwd=2) } \keyword{spatial} \keyword{manip} spatstat/man/as.function.owin.Rd0000644000176200001440000000225313333543262016367 0ustar liggesusers\name{as.function.owin} \alias{as.function.owin} \title{ Convert Window to Indicator Function } \description{ Converts a spatial window to a function of the \eqn{x} and \eqn{y} coordinates returning the value 1 inside the window and 0 outside. } \usage{ \method{as.function}{owin}(x, \dots) } \arguments{ \item{x}{ Pixel image (object of class \code{"owin"}). } \item{\dots}{ Ignored. } } \details{ This command converts a spatial window (object of class \code{"owin"}) to a \code{function(x,y)} where the arguments \code{x} and \code{y} are (vectors of) spatial coordinates. This is the indicator function of the window: it returns the value 1 for locations inside the window, and returns 0 for values outside the window. } \value{ A function in the \R language with arguments \code{x,y}. It also belongs to the class \code{"indicfun"} which has methods for \code{plot} and \code{print}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{as.im.owin}} } \examples{ W <- Window(humberside) f <- as.function(W) f f(5000, 4500) f(123456, 78910) X <- runifpoint(5, Frame(humberside)) f(X) plot(f) } \keyword{spatial} \keyword{manip} spatstat/man/simplepanel.Rd0000644000176200001440000002016513333543264015502 0ustar liggesusers\name{simplepanel} \alias{simplepanel} \alias{grow.simplepanel} \title{Simple Point-and-Click Interface Panels} \description{ These functions enable the user to create a simple, robust, point-and-click interface to any \R code. } \usage{ simplepanel(title, B, boxes, clicks, redraws=NULL, exit = NULL, env) grow.simplepanel(P, side = c("right", "left", "top", "bottom"), len = NULL, new.clicks, new.redraws=NULL, \dots, aspect) } \arguments{ \item{title}{ Character string giving the title of the interface panel. } \item{B}{ Bounding box of the panel coordinates. A rectangular window (object of class \code{"owin"}) } \item{boxes}{ A list of rectangular windows (objects of class \code{"owin"}) specifying the placement of the buttons and other interactive components of the panel. } \item{clicks}{ A list of \R functions, of the same length as \code{boxes}, specifying the operations to be performed when each button is clicked. Entries can also be \code{NULL} indicating that no action should occur. See Details. } \item{redraws}{ Optional list of \R functions, of the same length as \code{boxes}, specifying how to redraw each button. Entries can also be \code{NULL} indicating a simple default. See Details. } \item{exit}{ An \R function specifying actions to be taken when the interactive panel terminates. } \item{env}{ An \code{environment} that will be passed as an argument to all the functions in \code{clicks}, \code{redraws} and \code{exit}. } \item{P}{ An existing interaction panel (object of class \code{"simplepanel"}). } \item{side}{ Character string identifying which side of the panel \code{P} should be grown to accommodate the new buttons. } \item{len}{ Optional. Thickness of the new panel area that should be grown to accommodate the new buttons. A single number in the same units as the coordinate system of \code{P}. } \item{new.clicks}{ List of \R functions defining the operations to be performed when each of the new buttons is clicked. } \item{new.redraws}{ Optional. List of \R functions, of the same length as \code{new.clicks}, defining how to redraw each of the new buttons. } \item{\dots}{ Arguments passed to \code{\link{layout.boxes}} to determine the layout of the new buttons. } \item{aspect}{ Optional. Aspect ratio (height/width) of the new buttons. } } \details{ These functions enable the user to create a simple, robust, point-and-click interface to any \R code. The functions \code{simplepanel} and \code{grow.simplepanel} create an object of class \code{"simplepanel"}. Such an object defines the graphics to be displayed and the actions to be performed when the user interacts with the panel. The panel is activated by calling \code{\link{run.simplepanel}}. The function \code{simplepanel} creates a panel object from basic data. The function \code{grow.simplepanel} modifies an existing panel object \code{P} by growing an additional row or column of buttons. For \code{simplepanel}, \itemize{ \item The spatial layout of the panel is determined by the rectangles \code{B} and \code{boxes}. \item The argument \code{clicks} must be a list of functions specifying the action to be taken when each button is clicked (or \code{NULL} to indicate that no action should be taken). The list entries should have names (but there are sensible defaults). Each function should be of the form \code{function(env, xy)} where \code{env} is an \code{environment} that may contain shared data, and \code{xy} gives the coordinates of the mouse click, in the format \code{list(x, y)}. The function returns \code{TRUE} if the panel should continue running, and \code{FALSE} if the panel should terminate. \item The argument \code{redraws}, if given, must be a list of functions specifying the action to be taken when each button is to be redrawn. Each function should be of the form \code{function(button, name, env)} where \code{button} is a rectangle specifying the location of the button in the current coordinate system; \code{name} is a character string giving the name of the button; and \code{env} is the \code{environment} that may contain shared data. The function returns \code{TRUE} if the panel should continue running, and \code{FALSE} if the panel should terminate. If \code{redraws} is not given (or if one of the entries in \code{redraws} is \code{NULL}), the default action is to draw a pink rectangle showing the button position, draw the name of the button in the middle of this rectangle, and return \code{TRUE}. \item The argument \code{exit}, if given, must be a function specifying the action to be taken when the panel terminates. (Termination occurs when one of the \code{clicks} functions returns \code{FALSE}). The \code{exit} function should be of the form \code{function(env)} where \code{env} is the \code{environment} that may contain shared data. Its return value will be used as the return value of \code{\link{run.simplepanel}}. \item The argument \code{env} should be an \R environment. The panel buttons will have access to this environment, and will be able to read and write data in it. This mechanism is used to exchange data between the panel and other \R code. } For \code{grow.simplepanel}, \itemize{ \item the spatial layout of the new boxes is determined by the arguments \code{side}, \code{len}, \code{aspect} and by the additional \code{\dots} arguments passed to \code{\link{layout.boxes}}. \item the argument \code{new.clicks} should have the same format as \code{clicks}. It implicitly specifies the number of new buttons to be added, and the actions to be performed when they are clicked. \item the optional argument \code{new.redraws}, if given, should have the same format as \code{redraws}. It specifies the actions to be performed when the new buttons are clicked. } } \value{ An object of class \code{"simplepanel"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{run.simplepanel}}, \code{\link{layout.boxes}} } \examples{ # make boxes (alternatively use layout.boxes()) Bminus <- square(1) Bvalue <- shift(Bminus, c(1.2, 0)) Bplus <- shift(Bvalue, c(1.2, 0)) Bdone <- shift(Bplus, c(1.2, 0)) myboxes <- list(Bminus, Bvalue, Bplus, Bdone) myB <- do.call(boundingbox,myboxes) # make environment containing an integer count myenv <- new.env() assign("answer", 0, envir=myenv) # what to do when finished: return the count. myexit <- function(e) { return(get("answer", envir=e)) } # button clicks # decrement the count Cminus <- function(e, xy) { ans <- get("answer", envir=e) assign("answer", ans - 1, envir=e) return(TRUE) } # display the count (clicking does nothing) Cvalue <- function(...) { TRUE } # increment the count Cplus <- function(e, xy) { ans <- get("answer", envir=e) assign("answer", ans + 1, envir=e) return(TRUE) } # 'Clear' button Cclear <- function(e, xy) { assign("answer", 0, envir=e) return(TRUE) } # quit button Cdone <- function(e, xy) { return(FALSE) } myclicks <- list("-"=Cminus, value=Cvalue, "+"=Cplus, done=Cdone) # redraw the button that displays the current value of the count Rvalue <- function(button, nam, e) { plot(button, add=TRUE) ans <- get("answer", envir=e) text(centroid.owin(button), labels=ans) return(TRUE) } # make the panel P <- simplepanel("Counter", B=myB, boxes=myboxes, clicks=myclicks, redraws = list(NULL, Rvalue, NULL, NULL), exit=myexit, env=myenv) # print it P # show what it looks like redraw.simplepanel(P) # ( type run.simplepanel(P) to run the panel interactively ) # add another button to right Pplus <- grow.simplepanel(P, "right", new.clicks=list(clear=Cclear)) } \keyword{iplot} \keyword{utilities} spatstat/man/edit.ppp.Rd0000644000176200001440000000312113333543263014704 0ustar liggesusers\name{edit.ppp} \alias{edit.ppp} \alias{edit.psp} \alias{edit.im} \title{ Invoke Text Editor on Spatial Data } \description{ Invokes a text editor allowing the user to inspect and change entries in a spatial dataset. } \usage{ \method{edit}{ppp}(name, \dots) \method{edit}{psp}(name, \dots) \method{edit}{im}(name, \dots) } \arguments{ \item{name}{ A spatial dataset (object of class \code{"ppp"}, \code{"psp"} or \code{"im"}). } \item{\dots}{ Other arguments passed to \code{\link[utils]{edit.data.frame}}. } } \details{ The function \code{\link[utils]{edit}} is generic. These functions are methods for spatial objects of class \code{"ppp"}, \code{"psp"} and \code{"im"}. The spatial dataset \code{name} is converted to a data frame or array, and the text editor is invoked. The user can change the values of spatial coordinates or marks of the points in a point pattern, or the coordinates or marks of the segments in a segment pattern, or the pixel values in an image. The names of the columns of marks can also be edited. If \code{name} is a pixel image, it is converted to a matrix and displayed in the same spatial orientation as if the image had been plotted. Note that the original object \code{name} is not changed; the function returns the edited dataset. } \value{ Object of the same kind as \code{name} containing the edited data. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link[utils]{edit.data.frame}}, \code{\link{edit.hyperframe}} } \examples{ if(interactive()) Z <- edit(cells) } \keyword{spatial} \keyword{manip} spatstat/man/project2segment.Rd0000644000176200001440000000441413333543264016303 0ustar liggesusers\name{project2segment} \alias{project2segment} \title{Move Point To Nearest Line} \description{ Given a point pattern and a line segment pattern, this function moves each point to the closest location on a line segment. } \usage{ project2segment(X, Y) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{Y}{A line segment pattern (object of class \code{"psp"}).} } \details{ For each point \code{x} in the point pattern \code{X}, this function finds the closest line segment \code{y} in the line segment pattern \code{Y}. It then `projects' the point \code{x} onto the line segment \code{y} by finding the position \code{z} along \code{y} which is closest to \code{x}. This position \code{z} is returned, along with supplementary information. } \value{ A list with the following components. Each component has length equal to the number of points in \code{X}, and its entries correspond to the points of \code{X}. \item{Xproj }{ Point pattern (object of class \code{"ppp"} containing the projected points. } \item{mapXY }{ Integer vector identifying the nearest segment to each point. } \item{d}{ Numeric vector of distances from each point of \code{X} to the corresponding projected point. } \item{tp}{ Numeric vector giving the scaled parametric coordinate \eqn{0 \le t_p \le 1}{0 <= tp <= 1} of the position of the projected point along the segment. } For example suppose \code{mapXY[2] = 5} and \code{tp[2] = 0.33}. Then \code{Y[5]} is the line segment lying closest to \code{X[2]}. The projection of the point \code{X[2]} onto the segment \code{Y[5]} is the point \code{Xproj[2]}, which lies one-third of the way between the first and second endpoints of the line segment \code{Y[5]}. } \author{ \adrian and \rolf } \seealso{ \code{\link{nearestsegment}} for a faster way to determine which segment is closest to each point. } \examples{ X <- rstrat(square(1), 5) Y <- as.psp(matrix(runif(20), 5, 4), window=owin()) plot(Y, lwd=3, col="green") plot(X, add=TRUE, col="red", pch=16) v <- project2segment(X,Y) Xproj <- v$Xproj plot(Xproj, add=TRUE, pch=16) arrows(X$x, X$y, Xproj$x, Xproj$y, angle=10, length=0.15, col="red") } \keyword{spatial} \keyword{math} spatstat/man/with.fv.Rd0000644000176200001440000000745213333543265014563 0ustar liggesusers\name{with.fv} \alias{with.fv} \title{Evaluate an Expression in a Function Table} \description{ Evaluate an R expression in a function value table (object of class \code{"fv"}). } \usage{ \method{with}{fv}(data, expr, ..., fun = NULL, enclos=NULL) } \arguments{ \item{data}{A function value table (object of class \code{"fv"}) in which the expression will be evaluated. } \item{expr}{The expression to be evaluated. An \R language expression, which may involve the names of columns in \code{data}, the special abbreviations \code{.}, \code{.x} and \code{.y}, and global constants or functions. } \item{\dots}{Ignored.} \item{fun}{Logical value, specifying whether the result should be interpreted as another function (\code{fun=TRUE}) or simply returned as a numeric vector or array (\code{fun=FALSE}). See Details. } \item{enclos}{ An environment in which to search for variables that are not found in \code{data}. Defaults to \code{\link{parent.frame}()}. } } \details{ This is a method for the generic command \code{\link{with}} for an object of class \code{"fv"} (function value table). An object of class \code{"fv"} is a convenient way of storing and plotting several different estimates of the same function. It is effectively a data frame with extra attributes. See \code{\link{fv.object}} for further explanation. This command makes it possible to perform computations that involve different estimates of the same function. For example we use it to compute the arithmetic difference between two different edge-corrected estimates of the \eqn{K} function of a point pattern. The argument \code{expr} should be an \R language expression. The expression may involve \itemize{ \item the name of any column in \code{data}, referring to one of the estimates of the function; \item the symbol \code{.} which stands for all the available estimates of the function; \item the symbol \code{.y} which stands for the recommended estimate of the function (in an \code{"fv"} object, one of the estimates is always identified as the recommended estimate); \item the symbol \code{.x} which stands for the argument of the function; \item global constants or functions. } See the Examples. The expression should be capable of handling vectors and matrices. The interpretation of the argument \code{fun} is as follows: \itemize{ \item If \code{fun=FALSE}, the result of evaluating the expression \code{expr} will be returned as a numeric vector, matrix or data frame. \item If \code{fun=TRUE}, then the result of evaluating \code{expr} will be interpreted as containing the values of a new function. The return value will be an object of class \code{"fv"}. (This can only happen if the result has the right dimensions.) \item The default is \code{fun=TRUE} if the result of evaluating \code{expr} has more than one column, and \code{fun=FALSE} otherwise. } To perform calculations involving \emph{several} objects of class \code{"fv"}, use \code{\link{eval.fv}}. } \value{ A function value table (object of class \code{"fv"}) or a numeric vector or data frame. } \seealso{ \code{\link{with}}, \code{\link{fv.object}}, \code{\link{eval.fv}}, \code{\link{Kest}} } \examples{ # compute 4 estimates of the K function X <- rpoispp(42) K <- Kest(X) plot(K) # derive 4 estimates of the L function L(r) = sqrt(K(r)/pi) L <- with(K, sqrt(./pi)) plot(L) # compute 4 estimates of V(r) = L(r)/r V <- with(L, ./.x) plot(V) # compute the maximum absolute difference between # the isotropic and translation correction estimates of K(r) D <- with(K, max(abs(iso - trans))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/tiles.empty.Rd0000644000176200001440000000247513333543264015452 0ustar liggesusers\name{tiles.empty} \alias{tiles.empty} \title{Check For Empty Tiles in a Tessellation} \description{ Checks whether each tile in a tessellation is empty or non-empty. } \usage{ tiles.empty(x) } \arguments{ \item{x}{A tessellation (object of class \code{"tess"}).} } \details{ A tessellation is a collection of disjoint spatial regions (called \emph{tiles}) that fit together to form a larger spatial region. See \code{\link{tess}}. It is possible for some tiles of a tessellation to be empty. For example, this can happen when the tessellation \code{x} is obtained by restricting another tessellation \code{y} to a smaller spatial domain \code{w}. The function \code{tiles.empty} checks whether each tile is empty or non-empty. The result is a logical vector, with entries equal to \code{TRUE} when the corresponding tile is empty. Results are given in the same order as the tiles would be listed by \code{tiles(x)}. } \value{ A logical vector. } \seealso{ \code{\link{tess}}, \code{\link{tiles}}, \code{\link{tilenames}}, \code{\link{tile.areas}} } \examples{ A <- tess(xgrid=0:2,ygrid=0:2) tiles.empty(A) v <- as.im(function(x,y){factor(round(x^2 + y^2))}, W=owin()) E <- tess(image=v) tiles.empty(E) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{manip} spatstat/man/Kmark.Rd0000644000176200001440000001422613333543262014235 0ustar liggesusers\name{Kmark} \alias{Kmark} \alias{markcorrint} \title{Mark-Weighted K Function} \description{ Estimates the mark-weighted \eqn{K} function of a marked point pattern. } \usage{ Kmark(X, f = NULL, r = NULL, correction = c("isotropic", "Ripley", "translate"), ..., f1 = NULL, normalise = TRUE, returnL = FALSE, fargs = NULL) markcorrint(X, f = NULL, r = NULL, correction = c("isotropic", "Ripley", "translate"), ..., f1 = NULL, normalise = TRUE, returnL = FALSE, fargs = NULL) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. } \item{f}{Optional. Test function \eqn{f} used in the definition of the mark correlation function. An \R function with at least two arguments. There is a sensible default. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the mark correlation function \eqn{k_f(r)}{k[f](r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{\dots}{ Ignored. } \item{f1}{ An alternative to \code{f}. If this argument is given, then \eqn{f} is assumed to take the form \eqn{f(u,v)=f_1(u)f_1(v)}{f(u,v)=f1(u) * f1(v)}. } \item{normalise}{ If \code{normalise=FALSE}, compute only the numerator of the expression for the mark correlation. } \item{returnL}{ Compute the analogue of the K-function if \code{returnL=FALSE} or the analogue of the L-function if \code{returnL=TRUE}. } \item{fargs}{ Optional. A list of extra arguments to be passed to the function \code{f} or \code{f1}. } } \details{ The functions \code{Kmark} and \code{markcorrint} are identical. (Eventually \code{markcorrint} will be deprecated.) The \emph{mark-weighted \eqn{K} function} \eqn{K_f(r)}{K[f](r)} of a marked point process (Penttinen et al, 1992) is a generalisation of Ripley's \eqn{K} function, in which the contribution from each pair of points is weighted by a function of their marks. If the marks of the two points are \eqn{m_1, m_2}{m1, m2} then the weight is proportional to \eqn{f(m_1, m_2)}{f(m1, m2)} where \eqn{f} is a specified \emph{test function}. The mark-weighted \eqn{K} function is defined so that \deqn{ \lambda K_f(r) = \frac{C_f(r)}{E[ f(M_1, M_2) ]} }{ lambda * K_f(r) = C[f](r)/E[f(M1, M2)] } where \deqn{ C_f(r) = E \left[ \sum_{x \in X} f(m(u), m(x)) 1{0 < ||u - x|| \le r} \; \big| \; u \in X \right] }{ C[f](r) = E[ sum[x in X] f(m(u), m(x)) 1(0 < d(u,x) <= r) | u in X] } for any spatial location \eqn{u} taken to be a typical point of the point process \eqn{X}. Here \eqn{||u-x||}{d(u,x)} is the euclidean distance between \eqn{u} and \eqn{x}, so that the sum is taken over all random points \eqn{x} that lie within a distance \eqn{r} of the point \eqn{u}. The function \eqn{C_f(r)}{C[f](r)} is the \emph{unnormalised} mark-weighted \eqn{K} function. To obtain \eqn{K_f(r)}{K[f](r)} we standardise \eqn{C_f(r)}{C[f](r)} by dividing by \eqn{E[f(M_1,M_2)]}{E[f(M1,M2)]}, the expected value of \eqn{f(M_1,M_2)}{f(M1,M2)} when \eqn{M_1}{M1} and \eqn{M_2}{M2} are independent random marks with the same distribution as the marks in the point process. Under the hypothesis of random labelling, the mark-weighted \eqn{K} function is equal to Ripley's \eqn{K} function, \eqn{K_f(r) = K(r)}{K[f](r) = K(r)}. The mark-weighted \eqn{K} function is sometimes called the \emph{mark correlation integral} because it is related to the mark correlation function \eqn{k_f(r)}{k[f](r)} and the pair correlation function \eqn{g(r)} by \deqn{ K_f(r) = 2 \pi \int_0^r s k_f(s) \, g(s) \, {\rm d}s }{ K[f](r) = 2 * pi * integral[0,r] (s * k[f](s) * g(s) ) ds } See \code{\link{markcorr}} for a definition of the mark correlation function. Given a marked point pattern \code{X}, this command computes edge-corrected estimates of the mark-weighted \eqn{K} function. If \code{returnL=FALSE} then the estimated function \eqn{K_f(r)}{K[f](r)} is returned; otherwise the function \deqn{ L_f(r) = \sqrt{K_f(r)/\pi} }{ L[f](r) = sqrt(K[f](r)/pi) } is returned. } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the mark correlation integral \eqn{K_f(r)}{K[f](r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_f(r)}{K[f](r)} when the marks attached to different points are independent, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the mark-weighted \eqn{K} function \eqn{K_f(r)}{K[f](r)} obtained by the edge corrections named (if \code{returnL=FALSE}). } \references{ Penttinen, A., Stoyan, D. and Henttonen, H. M. (1992) Marked point processes in forest statistics. \emph{Forest Science} \bold{38} (1992) 806-824. Illian, J., Penttinen, A., Stoyan, H. and Stoyan, D. (2008) \emph{Statistical analysis and modelling of spatial point patterns}. Chichester: John Wiley. } \seealso{ \code{\link{markcorr}} to estimate the mark correlation function. } \examples{ # CONTINUOUS-VALUED MARKS: # (1) Spruces # marks represent tree diameter # mark correlation function ms <- Kmark(spruces) plot(ms) # (2) simulated data with independent marks X <- rpoispp(100) X <- X \%mark\% runif(npoints(X)) Xc <- Kmark(X) plot(Xc) # MULTITYPE DATA: # Hughes' amacrine data # Cells marked as 'on'/'off' M <- Kmark(amacrine, function(m1,m2) {m1==m2}, correction="translate") plot(M) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/closetriples.Rd0000644000176200001440000000160513333543263015676 0ustar liggesusers\name{closetriples} \alias{closetriples} \title{ Close Triples of Points } \description{ Low-level function to find all close triples of points. } \usage{ closetriples(X, rmax) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"} or \code{"pp3"}). } \item{rmax}{ Maximum distance between each pair of points in a triple. } } \details{ This low-level function finds all triples of points in a point pattern in which each pair lies closer than \code{rmax}. } \value{ A data frame with columns \code{i,j,k} giving the indices of the points in each triple, and a column \code{diam} giving the diameter (maximum pairwise distance) in the triple. } \author{ \spatstatAuthors. } \seealso{ \code{\link{closepairs}}, \code{\link{Tstat}}. } \examples{ closetriples(redwoodfull, 0.02) closetriples(redwoodfull, 0.005) } \keyword{spatial} \keyword{math} spatstat/man/linearpcfcross.inhom.Rd0000644000176200001440000001075713623712063017322 0ustar liggesusers\name{linearpcfcross.inhom} \alias{linearpcfcross.inhom} \title{ Inhomogeneous Multitype Pair Correlation Function (Cross-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the inhomogeneous multitype pair correlation function from points of type \eqn{i} to points of type \eqn{j}. } \usage{ linearpcfcross.inhom(X, i, j, lambdaI, lambdaJ, r=NULL, \dots, correction="Ang", normalise=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the \eqn{i}-to-any pair correlation function \eqn{g_{ij}(r)}{g[ij](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. Defaults to the second level of \code{marks(X)}. } \item{lambdaI}{ Intensity values for the points of type \code{i}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{lambdaJ}{ Intensity values for the points of type \code{j}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{g_{ij}(r)}{g[ij](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{\link[stats]{density.default}} to control the kernel smoothing. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the points of type \code{i}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{pcfcross.inhom}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g_{ij}(r)}{g[ij](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. If \code{lambdaI} or \code{lambdaJ} is a fitted point process model, the default behaviour is to update the model by re-fitting it to the data, before computing the fitted intensity. This can be disabled by setting \code{update=FALSE}. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), \bold{63}, 673--694. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearpcfdot}}, \code{\link{linearpcf}}, \code{\link{pcfcross.inhom}}. } \examples{ lam <- table(marks(chicago))/(summary(chicago)$totlength) lamI <- function(x,y,const=lam[["assault"]]){ rep(const, length(x)) } lamJ <- function(x,y,const=lam[["robbery"]]){ rep(const, length(x)) } g <- linearpcfcross.inhom(chicago, "assault", "robbery", lamI, lamJ) \dontrun{ fit <- lppm(chicago, ~marks + x) linearpcfcross.inhom(chicago, "assault", "robbery", fit, fit) } } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/cauchy.estpcf.Rd0000644000176200001440000001354413571674202015734 0ustar liggesusers\name{cauchy.estpcf} \alias{cauchy.estpcf} \title{Fit the Neyman-Scott cluster process with Cauchy kernel} \description{ Fits the Neyman-Scott Cluster point process with Cauchy kernel to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. } \usage{ cauchy.estpcf(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs = list()) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the model. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits the Neyman-Scott cluster point process model with Cauchy kernel to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits the Neyman-Scott cluster point process with Cauchy kernel to \code{X}, by finding the parameters of the \Matern Cluster model which give the closest match between the theoretical pair correlation function of the \Matern Cluster process and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The model is described in Jalilian et al (2013). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{\kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{\mu}, and the locations of the offspring points of one parent follow a common distribution described in Jalilian et al (2013). If the argument \code{lambda} is provided, then this is used as the value of the point process intensity \eqn{\lambda}{\lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{\lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{\lambda} cannot be estimated, and the parameter \eqn{\mu}{\mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The corresponding model can be simulated using \code{\link{rCauchy}}. For computational reasons, the optimisation procedure internally uses the parameter \code{eta2}, which is equivalent to \code{4 * scale^2} where \code{scale} is the scale parameter for the model as used in \code{\link{rCauchy}}. Homogeneous or inhomogeneous Neyman-Scott/Cauchy models can also be fitted using the function \code{\link{kppm}} and the fitted models can be simulated using \code{\link{simulate.kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Ghorbani, M. (2012) Cauchy cluster process. \emph{Metrika}, to appear. Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{cauchy.estK}}, \code{\link{lgcp.estpcf}}, \code{\link{thomas.estpcf}}, \code{\link{vargamma.estpcf}}, \code{\link{mincontrast}}, \code{\link{pcf}}, \code{\link{pcfmodel}}. \code{\link{rCauchy}} to simulate the model. } \examples{ u <- cauchy.estpcf(redwood) u plot(u, legendpos="topright") } \keyword{spatial} \keyword{models} spatstat/man/convexhull.Rd0000644000176200001440000000153413333543263015356 0ustar liggesusers\name{convexhull} \alias{convexhull} \title{Convex Hull} \description{ Computes the convex hull of a spatial object. } \usage{ convexhull(x) } \arguments{ \item{x}{ a window (object of class \code{"owin"}), a point pattern (object of class \code{"ppp"}), a line segment pattern (object of class \code{"psp"}), or an object that can be converted to a window by \code{\link{as.owin}}. } } \value{ A window (an object of class \code{"owin"}). } \details{ This function computes the convex hull of the spatial object \code{x}. } \seealso{ \code{\link{owin}}, \code{\link{convexhull.xy}}, \code{\link{is.convex}} } \examples{ data(demopat) W <- Window(demopat) plot(convexhull(W), col="lightblue", border=NA) plot(W, add=TRUE, lwd=2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{utilities} spatstat/man/densityAdaptiveKernel.Rd0000644000176200001440000001313713441712766017475 0ustar liggesusers\name{densityAdaptiveKernel} \alias{densityAdaptiveKernel} \alias{densityAdaptiveKernel.ppp} \title{Adaptive Kernel Estimate of Intensity of Point Pattern} \description{ Computes an adaptive estimate of the intensity function of a point pattern using a variable-bandwidth smoothing kernel. } \usage{ densityAdaptiveKernel(X, \dots) \method{densityAdaptiveKernel}{ppp}(X, bw, \dots, weights=NULL, at=c("pixels", "points"), edge=TRUE, ngroups) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{bw}{ Numeric vector of smoothing bandwidths for each point in \code{X}, or a pixel image giving the smoothing bandwidth at each spatial location, or a spatial function of class \code{"funxy"} giving the smoothing bandwidth at each location. The default is to compute bandwidths using \code{\link{bw.abram}}. } \item{\dots}{ Arguments passed to \code{\link{bw.abram}} to compute the smoothing bandwidths if \code{bw} is missing, or passed to \code{\link[spatstat]{as.mask}} to control the spatial resolution of the result. } \item{weights}{ Optional vector of numeric weights for the points of \code{X}. } \item{at}{ String specifying whether to compute the intensity values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{x} (\code{at="points"}). } \item{edge}{ Logical value indicating whether to perform edge correction. } \item{ngroups}{ Number of groups into which the bandwidth values should be partitioned and discretised. } } \details{ This function computes a spatially-adaptive kernel estimate of the spatially-varying intensity from the point pattern \code{X} using the partitioning technique of Davies and Baddeley (2018). The argument \code{bw} specifies the smoothing bandwidths to be applied to each of the points in \code{X}. It may be a numeric vector of bandwidth values, or a pixel image or function yielding the bandwidth values. If the points of \code{X} are \eqn{x_1,\ldots,x_n}{x[1], ..., x[n]} and the corresponding bandwidths are \eqn{\sigma_1,\ldots,\sigma_n}{\sigma[1], ..., \sigma[n]} then the adaptive kernel estimate of intensity at a location \eqn{u} is \deqn{ \hat\lambda(u) = \sum_{i=1}^n k(u, x_i, \sigma_i) }{ \lambda(u) = sum[i] e(x[i], k(u, x[i], \sigma[i]) } where \eqn{k(u, v, \sigma)} is the value at \eqn{u} of the (possibly edge-corrected) smoothing kernel with bandwidth \eqn{\sigma} induced by a data point at \eqn{v}. Exact computation of the estimate above can be time-consuming: it takes \eqn{n} times longer than fixed-bandwidth smoothing. The partitioning method of Davies and Baddeley (2018) accelerates this computation by partitioning the range of bandwidths into \code{ngroups} intervals, correspondingly subdividing the points of the pattern \code{X} into \code{ngroups} sub-patterns according to bandwidth, and applying fixed-bandwidth smoothing to each sub-pattern. The default value of \code{ngroups} is the integer part of the square root of the number of points in \code{X}, so that the computation time is only about \eqn{\sqrt{n}}{sqrt(n)} times slower than fixed-bandwidth smoothing. Any positive value of \code{ngroups} can be specified by the user. Specifying \code{ngroups=Inf} enforces exact computation of the estimate without partitioning. Specifying \code{ngroups=1} is the same as fixed-bandwidth smoothing with bandwidth \code{sigma=median(bw)}. } \section{Bandwidths and Bandwidth Selection}{ The function \code{densityAdaptiveKernel} computes one adaptive estimate of the intensity, determined by the smoothing bandwidth values \code{bw}. Typically the bandwidth values are computed by first computing a pilot estimate of the intensity, then using \code{\link{bw.abram}} to compute the vector of bandwidths according to Abramson's rule. This involves specifying a global bandwidth \code{h0}. The default bandwidths may work well in many contexts, but for optimal bandwidth selection, this calculation should be performed repeatedly with different values of \code{h0} to optimise the value of \code{h0}. This can be computationally demanding; we recommend the function \code{multiscale.density} in the \pkg{sparr} package which supports much faster bandwidth selection, using the FFT method of Davies and Baddeley (2018). } \value{ If \code{at="pixels"} (the default), the result is a pixel image. If \code{at="points"}, the result is a numeric vector with one entry for each data point in \code{X}. } \author{ \adrian and Tilman Davies. } \references{ Davies, T.M. and Baddeley, A. (2018) Fast computation of spatially adaptive kernel estimates. \emph{Statistics and Computing}, \bold{28}(4), 937-956.\cr Hall, P. and Marron, J.S. (1988) Variable window width kernel density estimates of probability densities. \emph{Probability Theory and Related Fields}, \bold{80}, 37-49.\cr Silverman, B.W. (1986) \emph{Density Estimation for Statistics and Data Analysis}. Chapman and Hall, New York. } \seealso{ \code{\link{density.ppp}}, \code{\link{adaptive.density}}, \code{\link{densityVoronoi}}, \code{\link{im.object}}. See the function \code{bivariate.density} in the \pkg{sparr} package for a more flexible implementation, and \code{multiscale.density} for an implementation that is more efficient for bandwidth selection. } \examples{ Z <- densityAdaptiveKernel(redwood, h0=0.1) plot(Z, main="Adaptive kernel estimate") points(redwood, col="white") } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/nobjects.Rd0000644000176200001440000000246113536734342015003 0ustar liggesusers\name{nobjects} \alias{nobjects} \alias{nobjects.ppp} \alias{nobjects.ppx} \alias{nobjects.psp} \alias{nobjects.tess} \alias{nobjects.lintess} \title{ Count Number of Geometrical Objects in a Spatial Dataset } \description{ A generic function to count the number of geometrical objects in a spatial dataset. } \usage{ nobjects(x) \method{nobjects}{ppp}(x) \method{nobjects}{ppx}(x) \method{nobjects}{psp}(x) \method{nobjects}{tess}(x) \method{nobjects}{lintess}(x) } \arguments{ \item{x}{A dataset.} } \details{ The generic function \code{nobjects} counts the number of geometrical objects in the spatial dataset \code{x}. The methods for point patterns (classes \code{"ppp"} and \code{"ppx"}, embracing \code{"pp3"} and \code{"lpp"}) count the number of points in the pattern. The method for line segment patterns (class \code{"psp"}) counts the number of line segments in the pattern. The method for tessellations (class \code{"tess"} or \code{"lintess"}) counts the number of tiles of the tessellation. } \value{ A single integer. } \author{ \spatstatAuthors } \seealso{ \code{\link{npoints}} } \examples{ nobjects(redwood) nobjects(edges(letterR)) nobjects(dirichlet(cells)) nobjects(lineardirichlet(runiflpp(5, simplenet))) } \keyword{spatial} \keyword{manip} spatstat/man/linnet.Rd0000644000176200001440000000625013333543263014460 0ustar liggesusers\name{linnet} \alias{linnet} \title{ Create a Linear Network } \description{ Creates an object of class \code{"linnet"} representing a network of line segments. } \usage{ linnet(vertices, m, edges, sparse=FALSE, warn=TRUE) } \arguments{ \item{vertices}{ Point pattern (object of class \code{"ppp"}) specifying the vertices of the network. } \item{m}{ Adjacency matrix. A matrix or sparse matrix of logical values equal to \code{TRUE} when the corresponding vertices are joined by a line. (Specify either \code{m} or \code{edges}.) } \item{edges}{ Edge list. A two-column matrix of integers, specifying all pairs of vertices that should be joined by an edge. (Specify either \code{m} or \code{edges}.) } \item{sparse}{ Optional. Logical value indicating whether to use a sparse matrix representation of the network. See Details. } \item{warn}{ Logical value indicating whether to issue a warning if the resulting network is not connected. } } \details{ An object of class \code{"linnet"} represents a network of straight line segments in two dimensions. The function \code{linnet} creates such an object from the minimal information: the spatial location of each vertex (endpoint, crossing point or meeting point of lines) and information about which vertices are joined by an edge. If \code{sparse=FALSE} (the default), the algorithm will compute and store various properties of the network, including the adjacency matrix \code{m} and a matrix giving the shortest-path distances between each pair of vertices in the network. This is more efficient for small datasets. However it can require large amounts of memory and can take a long time to execute. If \code{sparse=TRUE}, then the shortest-path distances will not be computed, and the network adjacency matrix \code{m} will be stored as a sparse matrix. This saves a lot of time and memory when creating the linear network. If the argument \code{edges} is given, then it will also determine the \emph{ordering} of the line segments when they are stored or extracted. For example, \code{edges[i,]} corresponds to \code{as.psp(L)[i]}. } \value{ Object of class \code{"linnet"} representing the linear network. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ \code{\link[spatstat.data]{simplenet}} for an example of a linear network. \code{\link[spatstat:methods.linnet]{methods.linnet}} for methods applicable to \code{linnet} objects. Special tools: \code{\link{thinNetwork}}, \code{\link{insertVertices}}, \code{\link{joinVertices}}, \code{\link{connected.linnet}}, \code{\link{lixellate}}. \code{\link{delaunayNetwork}} for the Delaunay triangulation as a network. \code{\link{ppp}}, \code{\link{psp}}. } \examples{ # letter 'A' specified by adjacency matrix v <- ppp(x=(-2):2, y=3*c(0,1,2,1,0), c(-3,3), c(-1,7)) m <- matrix(FALSE, 5,5) for(i in 1:4) m[i,i+1] <- TRUE m[2,4] <- TRUE m <- m | t(m) letterA <- linnet(v, m) plot(letterA) # letter 'A' specified by edge list edg <- cbind(1:4, 2:5) edg <- rbind(edg, c(2,4)) letterA <- linnet(v, edges=edg) } \keyword{spatial} spatstat/man/plot.lppm.Rd0000644000176200001440000000250613333543264015115 0ustar liggesusers\name{plot.lppm} \alias{plot.lppm} \title{ Plot a Fitted Point Process Model on a Linear Network } \description{ Plots the fitted intensity of a point process model on a linear network. } \usage{ \method{plot}{lppm}(x, ..., type="trend") } \arguments{ \item{x}{ An object of class \code{"lppm"} representing a fitted point process model on a linear network. } \item{\dots}{ Arguments passed to \code{\link{plot.linim}} to control the plot. } \item{type}{ Character string (either \code{"trend"} or \code{"cif"}) determining whether to plot the fitted first order trend or the conditional intensity. } } \details{ This function is the plot method for the class \code{"lppm"}. It computes the fitted intensity of the point process model, and displays it using \code{\link{plot.linim}}. The default is to display intensity values as colours. Alternatively if the argument \code{style="width"} is given, intensity values are displayed as the widths of thick lines drawn over the network. } \value{ Null. } \author{ \adrian } \seealso{ \code{\link{lppm}}, \code{\link{plot.linim}}, \code{\link{methods.lppm}}, \code{\link{predict.lppm}}. } \examples{ X <- runiflpp(10, simplenet) fit <- lppm(X ~x) plot(fit) plot(fit, style="width") } \keyword{spatial} \keyword{models} spatstat/man/plot.mppm.Rd0000644000176200001440000000463313333543265015122 0ustar liggesusers\name{plot.mppm} \alias{plot.mppm} \title{plot a Fitted Multiple Point Process Model} \description{ Given a point process model fitted to multiple point patterns by \code{\link{mppm}}, compute spatial trend or conditional intensity surface of the model, in a form suitable for plotting, and (optionally) plot this surface. } \usage{ \method{plot}{mppm}(x, \dots, trend=TRUE, cif=FALSE, se=FALSE, how=c("image", "contour", "persp")) } \arguments{ \item{x}{ A point process model fitted to multiple point patterns, typically obtained from the model-fitting algorithm \code{\link{mppm}}. An object of class \code{"mppm"}. } \item{\dots}{ Arguments passed to \code{\link{plot.ppm}} or \code{\link{plot.anylist}} controlling the plot. } \item{trend}{ Logical value indicating whether to plot the fitted trend. } \item{cif}{ Logical value indicating whether to plot the fitted conditional intensity. } \item{se}{ Logical value indicating whether to plot the standard error of the fitted trend. } \item{how}{ Single character string indicating the style of plot to be performed. } } \value{ \code{NULL}. } \details{ This is the \code{plot} method for the class \code{"mppm"} of point process models fitted to multiple point patterns (see \code{\link{mppm}}). It invokes \code{\link{subfits}} to compute the fitted model for each individual point pattern dataset, then calls \code{\link{plot.ppm}} to plot these individual models. These individual plots are displayed using \code{\link{plot.anylist}}, which generates either a series of separate plot frames or an array of plot panels on a single page. } \seealso{ \code{\link{plot.ppm}}, \code{\link{mppm}}, \code{\link{plot.listof}} } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \examples{ # Synthetic data from known model n <- 9 H <- hyperframe(V=1:n, U=runif(n, min=-1, max=1)) H$Z <- setcov(square(1)) H$U <- with(H, as.im(U, as.rectangle(Z))) H$Y <- with(H, rpoispp(eval.im(exp(2+3*Z)))) fit <- mppm(Y ~Z + U + V, data=H) plot(fit) } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \adrian \rolf and \ege } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat/man/is.multitype.ppp.Rd0000644000176200001440000000435313333543263016435 0ustar liggesusers\name{is.multitype.ppp} \alias{is.multitype.ppp} \alias{is.multitype.lpp} \title{Test Whether A Point Pattern is Multitype} \description{ Tests whether a point pattern has ``marks'' attached to the points which classify the points into several types. } \usage{ \method{is.multitype}{ppp}(X, na.action="warn", \dots) \method{is.multitype}{lpp}(X, na.action="warn", \dots) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"} or \code{"lpp"}) } \item{na.action}{ String indicating what to do if \code{NA} values are encountered amongst the marks. Options are \code{"warn"}, \code{"fatal"} and \code{"ignore"}. } \item{\dots}{ Ignored. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is a multitype point pattern. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link[spatstat.data]{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link[spatstat.data]{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. This function tests whether the point pattern \code{X} contains or involves marked points, \bold{and} that the marks are a factor. It is a method for the generic function \code{\link{is.multitype}}. For example, the \code{\link[spatstat.data]{amacrine}} dataset is multitype (there are two types of cells, on and off), but the \code{\link[spatstat.data]{longleaf}} dataset is \emph{not} multitype (the marks are real numbers). The argument \code{na.action} determines what action will be taken if the point pattern has a vector of marks but some or all of the marks are \code{NA}. Options are \code{"fatal"} to cause a fatal error; \code{"warn"} to issue a warning and then return \code{TRUE}; and \code{"ignore"} to take no action except returning \code{TRUE}. } \seealso{ \code{\link{is.multitype}}, \code{\link{is.multitype.ppm}} } \examples{ is.multitype(cells) #FALSE - no marks is.multitype(longleaf) #FALSE - real valued marks is.multitype(amacrine) #TRUE } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/affine.lpp.Rd0000644000176200001440000000512613333543262015211 0ustar liggesusers\name{affine.lpp} \alias{affine.lpp} \alias{shift.lpp} \alias{rotate.lpp} \alias{rescale.lpp} \alias{scalardilate.lpp} \title{Apply Geometrical Transformations to Point Pattern on a Linear Network} \description{ Apply geometrical transformations to a point pattern on a linear network. } \usage{ \method{affine}{lpp}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) \method{shift}{lpp}(X, vec=c(0,0), \dots, origin=NULL) \method{rotate}{lpp}(X, angle=pi/2, \dots, centre=NULL) \method{scalardilate}{lpp}(X, f, \dots) \method{rescale}{lpp}(X, s, unitname) } \arguments{ \item{X}{Point pattern on a linear network (object of class \code{"lpp"}).} \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{angle}{Rotation angle in radians.} \item{f}{Scalar dilation factor.} \item{s}{ Unit conversion factor: the new units are \code{s} times the old units. } \item{\dots}{ Arguments passed to other methods. } \item{origin}{ Character string determining a location that will be shifted to the origin. Options are \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}. Partially matched. } \item{centre}{ Centre of rotation. Either a vector of length 2, or a character string (partially matched to \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}). The default is the coordinate origin \code{c(0,0)}. } \item{unitname}{ Optional. New name for the unit of length. A value acceptable to the function \code{\link{unitname<-}} } } \value{ Another point pattern on a linear network (object of class \code{"lpp"}) representing the result of applying the geometrical transformation. } \details{ These functions are methods for the generic functions \code{\link{affine}}, \code{\link{shift}}, \code{\link{rotate}}, \code{\link{rescale}} and \code{\link{scalardilate}} applicable to objects of class \code{"lpp"}. All of these functions perform geometrical transformations on the object \code{X}, except for \code{rescale}, which simply rescales the units of length. } \seealso{ \code{\link{lpp}}. Generic functions \code{\link{affine}}, \code{\link{shift}}, \code{\link{rotate}}, \code{\link{scalardilate}}, \code{\link{rescale}}. } \examples{ X <- rpoislpp(2, simplenet) U <- rotate(X, pi) V <- shift(X, c(0.1, 0.2)) stretch <- diag(c(2,3)) Y <- affine(X, mat=stretch) shear <- matrix(c(1,0,0.6,1),ncol=2, nrow=2) Z <- affine(X, mat=shear, vec=c(0, 1)) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/linearKcross.Rd0000644000176200001440000000564613623712063015634 0ustar liggesusers\name{linearKcross} \alias{linearKcross} \title{ Multitype K Function (Cross-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the multitype \eqn{K} function which counts the expected number of points of type \eqn{j} within a given distance of a point of type \eqn{i}. } \usage{ linearKcross(X, i, j, r=NULL, \dots, correction="Ang") } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross type \eqn{K} function \eqn{K_{ij}(r)}{Kij(r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. Defaults to the second level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the \eqn{K}-function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{Ignored.} } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{Kcross}} for a point pattern on a linear network (object of class \code{"lpp"}). The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} and \code{j} are missing, they default to the first and second level of the marks factor, respectively. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), \bold{63}, 673--694. } \section{Warnings}{ The arguments \code{i} and \code{j} are interpreted as levels of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearKdot}}, \code{\link{linearK}}. } \examples{ data(chicago) K <- linearKcross(chicago, "assault", "robbery") } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/linim.Rd0000644000176200001440000000601013333543263014271 0ustar liggesusers\name{linim} \alias{linim} \title{ Create Pixel Image on Linear Network } \description{ Creates an object of class \code{"linim"} that represents a pixel image on a linear network. } \usage{ linim(L, Z, \dots, restrict=TRUE, df=NULL) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}). } \item{Z}{ Pixel image (object of class \code{"im"}). } \item{\dots}{Ignored.} \item{restrict}{ Advanced use only. Logical value indicating whether to ensure that all pixels in \code{Z} which do not lie on the network \code{L} have pixel value \code{NA}. This condition must be satisfied, but if you set \code{restrict=FALSE} it will not be checked, and the code will run faster. } \item{df}{ Advanced use only. Data frame giving full details of the mapping between the pixels of \code{Z} and the lines of \code{L}. See Details. } } \details{ This command creates an object of class \code{"linim"} that represents a pixel image defined on a linear network. Typically such objects are used to represent the result of smoothing or model-fitting on the network. Most users will not need to call \code{linim} directly. The argument \code{L} is a linear network (object of class \code{"linnet"}). It gives the exact spatial locations of the line segments of the network, and their connectivity. The argument \code{Z} is a pixel image object of class \code{"im"} that gives a pixellated approximation of the function values. For increased efficiency, advanced users may specify the optional argument \code{df}. This is a data frame giving the precomputed mapping between the pixels of \code{Z} and the line segments of \code{L}. It should have columns named \code{xc, yc} containing the coordinates of the pixel centres, \code{x,y} containing the projections of these pixel centres onto the linear network, \code{mapXY} identifying the line segment on which each projected point lies, and \code{tp} giving the parametric position of \code{(x,y)} along the segment. } \value{ Object of class \code{"linim"} that also inherits the class \code{"im"}. There is a special method for plotting this class. } \author{ \adrian } \seealso{ \code{\link{plot.linim}}, \code{\link{linnet}}, \code{\link{eval.linim}}, \code{\link{Math.linim}}, \code{\link{im}}. } \examples{ Z <- as.im(function(x,y) {x-y}, Frame(simplenet)) X <- linim(simplenet, Z) X } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. McSwiggan, G., Nair, M.G. and Baddeley, A. (2012) Fitting Poisson point process models to events on a linear network. Manuscript in preparation. } \keyword{spatial} spatstat/man/branchlabelfun.Rd0000644000176200001440000000302013333543262016124 0ustar liggesusers\name{branchlabelfun} \alias{branchlabelfun} \title{ Tree Branch Membership Labelling Function } \description{ Creates a function which returns the tree branch membership label for any location on a linear network. } \usage{ branchlabelfun(L, root = 1) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}). The network must have no loops. } \item{root}{ Root of the tree. An integer index identifying which point in \code{vertices(L)} is the root of the tree. } } \details{ The linear network \code{L} must be an acyclic graph (i.e. must not contain any loops) so that it can be interpreted as a tree. The result of \code{f <- branchlabelfun(L, root)} is a function \code{f} which gives, for each location on the linear network \code{L}, the tree branch label at that location. Tree branch labels are explained in \code{\link{treebranchlabels}}. The result \code{f} also belongs to the class \code{"linfun"}. It can be called using several different kinds of data, as explained in the help for \code{\link{linfun}}. The values of the function are character strings. } \value{ A function (of class \code{"linfun"}). } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{treebranchlabels}}, \code{\link{linfun}} } \examples{ # make a simple tree m <- simplenet$m m[8,10] <- m[10,8] <- FALSE L <- linnet(vertices(simplenet), m) # make function f <- branchlabelfun(L, 1) plot(f) X <- runiflpp(5, L) f(X) } \keyword{spatial} \keyword{math} spatstat/man/print.ppm.Rd0000644000176200001440000000303013333543264015110 0ustar liggesusers\name{print.ppm} \alias{print.ppm} \title{Print a Fitted Point Process Model} \description{ Default \code{print} method for a fitted point process model. } \usage{ \method{print}{ppm}(x,\dots, what=c("all", "model", "trend", "interaction", "se", "errors")) } \arguments{ \item{x}{ A fitted point process model, typically obtained from the model-fittingg algorithm \code{\link{ppm}}. An object of class \code{"ppm"}. } \item{what}{ Character vector (partially-matched) indicating what information should be printed. } \item{\dots}{Ignored.} } \value{ none. } \details{ This is the \code{print} method for the class \code{"ppm"}. It prints information about the fitted model in a sensible format. The argument \code{what} makes it possible to print only some of the information. If \code{what} is missing, then by default, standard errors for the estimated coefficients of the model will be printed only if the model is a Poisson point process. To print the standard errors for a non-Poisson model, call \code{print.ppm} with the argument \code{what} given explicitly, or reset the default rule by typing \code{spatstat.options(print.ppm.SE="always")}. } \seealso{ \code{\link{ppm.object}} for details of the class \code{"ppm"}. \code{\link{ppm}} for generating these objects. \code{\link{plot.ppm}}, \code{\link{predict.ppm}} } \examples{ \dontrun{ m <- ppm(cells, ~1, Strauss(0.05)) m } } \author{\adrian and \rolf } \keyword{spatial} \keyword{print} \keyword{models} spatstat/man/intensity.psp.Rd0000644000176200001440000000477313333543263016026 0ustar liggesusers\name{intensity.psp} \alias{intensity.psp} \title{ Empirical Intensity of Line Segment Pattern } \description{ Computes the average total length of segments per unit area in a spatial pattern of line segments. } \usage{ \method{intensity}{psp}(X, ..., weights=NULL) } \arguments{ \item{X}{ A line segment pattern (object of class \code{"psp"}). } \item{weights}{ Optional. Numeric vector of weights attached to the segments of \code{X}. Alternatively, an \code{expression} which can be evaluated to give a vector of weights. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{intensity}}. It computes the empirical intensity of a line segment pattern (object of class \code{"psp"}), i.e. the average total segment length per unit area. If the segment pattern is multitype, the intensities of the different types are computed separately. Note that the intensity will be computed as the length per area in units per square unit, based on the unit of length for \code{X}, given by \code{unitname(X)}. If the unit of length is a strange multiple of a standard unit, like \code{5.7 metres}, then it can be converted to the standard unit using \code{\link{rescale}}. See the Examples. If \code{weights} are given, then the intensity is computed as the total \emph{weight times length} per square unit. The argument \code{weights} should be a numeric vector of weights for each point of \code{X} (weights may be negative or zero). Alternatively \code{weights} can be an \code{expression} which will be evaluated for the dataset to yield a vector of weights. The expression may involve the Cartesian coordinates \eqn{x,y} of the points, and the marks of the points, if any. Variable names permitted in the expression include \code{x0}, \code{x1}, \code{y0}, \code{y1} for the coordinates of the segment endpoint, the name \code{marks} if \code{X} has a single column of marks, the names of any columns of marks if \code{X} has a data frame of marks, and the names of constants or functions that exist in the global environment. See the Examples. } \value{ A numeric value (giving the intensity) or numeric vector (giving the intensity for each possible type). } \seealso{ \code{\link{intensity}} } \examples{ S <- as.psp(simplenet) intensity(S) intensity(S, weights=runif(nsegments(S))) intensity(S, weights=expression((x0+x1)/2)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/rStraussHard.Rd0000644000176200001440000000720113602545270015610 0ustar liggesusers\name{rStraussHard} \alias{rStraussHard} \title{Perfect Simulation of the Strauss-Hardcore Process} \description{ Generate a random pattern of points, a simulated realisation of the Strauss-Hardcore process, using a perfect simulation algorithm. } \usage{ rStraussHard(beta, gamma = 1, R = 0, H = 0, W = owin(), expand=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{gamma}{ interaction parameter (a number between 0 and 1, inclusive). } \item{R}{ interaction radius (a non-negative number). } \item{H}{ hard core distance (a non-negative number smaller than \code{R}). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. Currently this must be a rectangular window. } \item{expand}{ Logical. If \code{FALSE}, simulation is performed in the window \code{W}, which must be rectangular. If \code{TRUE} (the default), simulation is performed on a larger window, and the result is clipped to the original window \code{W}. Alternatively \code{expand} can be an object of class \code{"rmhexpand"} (see \code{\link{rmhexpand}}) determining the expansion method. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function generates a realisation of the Strauss-Hardcore point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. The Strauss-Hardcore process is described in \code{\link{StraussHard}}. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and \Moller (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). A limitation of the perfect simulation algorithm is that the interaction parameter \eqn{\gamma}{gamma} must be less than or equal to \eqn{1}. To simulate a Strauss-hardcore process with \eqn{\gamma > 1}{gamma > 1}, use \code{\link{rmh}}. There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}). If \code{nsim > 1}, a list of point patterns. } \references{ Berthelsen, K.K. and \Moller, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and \Moller, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. \Moller, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. } \author{ Kasper Klitgaard Berthelsen and \adrian } \examples{ Z <- rStraussHard(100,0.7,0.05,0.02) Y <- rStraussHard(100,0.7,0.05,0.01, nsim=2) } \seealso{ \code{\link{rmh}}, \code{\link{StraussHard}}. \code{\link{rHardcore}}, \code{\link{rStrauss}}, \code{\link{rDiggleGratton}}, \code{\link{rDGS}}, \code{\link{rPenttinen}}. } \keyword{spatial} \keyword{datagen} spatstat/man/shift.psp.Rd0000644000176200001440000000474113442350577015115 0ustar liggesusers\name{shift.psp} \alias{shift.psp} \title{Apply Vector Translation To Line Segment Pattern} \description{ Applies a vector shift to a line segment pattern. } \usage{ \method{shift}{psp}(X, vec=c(0,0), \dots, origin=NULL) } \arguments{ \item{X}{Line Segment pattern (object of class \code{"psp"}).} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{Ignored} \item{origin}{ Location that will be shifted to the origin. Either a numeric vector of length 2 giving the location, or a point pattern containing only one point, or a list with two entries named \code{x} and \code{y}, or one of the character strings \code{"centroid"}, \code{"midpoint"}, \code{"left"}, \code{"right"}, \code{"top"}, \code{"bottom"}, \code{"topleft"}, \code{"bottomleft"}, \code{"topright"} or \code{"bottomright"} (partially matched). } } \value{ Another line segment pattern (of class \code{"psp"}) representing the result of applying the vector shift. } \details{ The line segment pattern, and its window, are translated by the vector \code{vec}. This is a method for the generic function \code{\link{shift}}. If \code{origin} is given, the argument \code{vec} will be ignored; instead the shift will be performed so that the specified geometric location is shifted to the coordinate origin \eqn{(0,0)}. The argument \code{origin} should be either a numeric vector of length 2 giving the spatial coordinates of a location, or one of the character strings \code{"centroid"}, \code{"midpoint"}, \code{"left"}, \code{"right"}, \code{"top"}, \code{"bottom"}, \code{"topleft"}, \code{"bottomleft"}, \code{"topright"} or \code{"bottomright"} (partially matched). If \code{origin="centroid"} then the centroid of the window will be shifted to the origin. If \code{origin="midpoint"} then the centre of the bounding rectangle of the window will be shifted to the origin. If \code{origin="bottomleft"} then the bottom left corner of the bounding rectangle of the window will be shifted to the origin, and so on. } \seealso{ \code{\link{shift}}, \code{\link{shift.owin}}, \code{\link{shift.ppp}}, \code{\link{periodify}}, \code{\link{rotate}}, \code{\link{affine}} } \examples{ X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(X, col="red") Y <- shift(X, c(0.05,0.05)) plot(Y, add=TRUE, col="blue") shift(Y, origin="mid") } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/mean.im.Rd0000644000176200001440000000446613333543263014522 0ustar liggesusers\name{mean.im} %DontDeclareMethods \alias{mean.im} \alias{median.im} \title{Mean and Median of Pixel Values in an Image} \description{ Calculates the mean or median of the pixel values in a pixel image. } %NAMESPACE S3method("mean", "im") %NAMESPACE S3method("median", "im") \usage{ ## S3 method for class 'im' ## mean(x, trim=0, na.rm=TRUE, ...) ## S3 method for class 'im' ## median(x, na.rm=TRUE) [R < 3.4.0] ## median(x, na.rm=TRUE, ...) [R >= 3.4.0] } \arguments{ \item{x}{A pixel image (object of class \code{"im"}).} \item{na.rm}{ Logical value indicating whether \code{NA} values should be stripped before the computation proceeds. } \item{trim}{ The fraction (0 to 0.5) of pixel values to be trimmed from each end of their range, before the mean is computed. } \item{\dots}{ Ignored. } } \details{ These functions calculate the mean and median of the pixel values in the image \code{x}. An object of class \code{"im"} describes a pixel image. See \code{\link{im.object}}) for details of this class. The function \code{mean.im} is a method for the generic function \code{\link[base]{mean}} for the class \code{"im"}. Similarly \code{median.im} is a method for the generic \code{\link[stats]{median}}. If the image \code{x} is logical-valued, the mean value of \code{x} is the fraction of pixels that have the value \code{TRUE}. The median is not defined. If the image \code{x} is factor-valued, then the mean of \code{x} is the mean of the integer codes of the pixel values. The median is are not defined. Other mathematical operations on images are supported by \code{\link{Math.im}}, \code{\link{Summary.im}} and \code{\link{Complex.im}}. Other information about an image can be obtained using \code{\link{summary.im}} or \code{\link{quantile.im}}. } \value{ A single number. } \seealso{ \code{\link{Math.im}} for other operations. Generics and default methods: \code{\link[base]{mean}}, \code{\link[stats]{median}}. \code{\link{quantile.im}}, \code{\link{anyNA.im}}, \code{\link{im.object}}, \code{\link{summary.im}}. } \examples{ X <- as.im(function(x,y) {x^2}, unit.square()) mean(X) median(X) mean(X, trim=0.05) } \author{ \spatstatAuthors and Kassel Hingee. } \keyword{spatial} \keyword{methods} \keyword{univar} spatstat/man/erosion.Rd0000644000176200001440000000626613333543263014654 0ustar liggesusers\name{erosion} \alias{erosion} \alias{erosion.owin} \alias{erosion.ppp} \alias{erosion.psp} \title{Morphological Erosion by a Disc} \description{ Perform morphological erosion of a window, a line segment pattern or a point pattern by a disc. } \usage{ erosion(w, r, \dots) \method{erosion}{owin}(w, r, shrink.frame=TRUE, \dots, strict=FALSE, polygonal=NULL) \method{erosion}{ppp}(w, r,\dots) \method{erosion}{psp}(w, r,\dots) } \arguments{ \item{w}{ A window (object of class \code{"owin"} or a line segment pattern (object of class \code{"psp"}) or a point pattern (object of class \code{"ppp"}). } \item{r}{positive number: the radius of erosion.} \item{shrink.frame}{logical: if \code{TRUE}, erode the bounding rectangle as well.} \item{\dots}{extra arguments to \code{\link{as.mask}} controlling the pixel resolution, if pixel approximation is used.} \item{strict}{Logical flag determining the fate of boundary pixels, if pixel approximation is used. See details.} \item{polygonal}{ Logical flag indicating whether to compute a polygonal approximation to the erosion (\code{polygonal=TRUE}) or a pixel grid approximation (\code{polygonal=FALSE}). } } \value{ If \code{r > 0}, an object of class \code{"owin"} representing the eroded region (or \code{NULL} if this region is empty). If \code{r=0}, the result is identical to \code{w}. } \details{ The morphological erosion of a set \eqn{W} by a distance \eqn{r > 0} is the subset consisting of all points \eqn{x \in W}{x in W} such that the distance from \eqn{x} to the boundary of \eqn{W} is greater than or equal to \eqn{r}. In other words it is the result of trimming a margin of width \eqn{r} off the set \eqn{W}. If \code{polygonal=TRUE} then a polygonal approximation to the erosion is computed. If \code{polygonal=FALSE} then a pixel approximation to the erosion is computed from the distance map of \code{w}. The arguments \code{"\dots"} are passed to \code{\link{as.mask}} to control the pixel resolution. The erosion consists of all pixels whose distance from the boundary of \code{w} is strictly greater than \code{r} (if \code{strict=TRUE}) or is greater than or equal to \code{r} (if \code{strict=FALSE}). When \code{w} is a window, the default (when \code{polygonal=NULL}) is to compute a polygonal approximation if \code{w} is a rectangle or polygonal window, and to compute a pixel approximation if \code{w} is a window of type \code{"mask"}. If \code{shrink.frame} is false, the resulting window is given the same outer, bounding rectangle as the original window \code{w}. If \code{shrink.frame} is true, the original bounding rectangle is also eroded by the same distance \code{r}. To simply compute the area of the eroded window, use \code{\link{eroded.areas}}. } \seealso{ \code{\link{dilation}} for the opposite operation. \code{\link{erosionAny}} for morphological erosion using any shape. \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{eroded.areas}} } \examples{ plot(letterR, main="erosion(letterR, 0.2)") plot(erosion(letterR, 0.2), add=TRUE, col="red") } \author{ \spatstatAuthors } \keyword{spatial} \keyword{math} spatstat/man/nncross.pp3.Rd0000644000176200001440000001456213333543263015362 0ustar liggesusers\name{nncross.pp3} \alias{nncross.pp3} \title{Nearest Neighbours Between Two Patterns in 3D} \description{ Given two point patterns \code{X} and \code{Y} in three dimensions, finds the nearest neighbour in \code{Y} of each point of \code{X}. } \usage{ \method{nncross}{pp3}(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), \dots, k = 1, sortby=c("range", "var", "x", "y", "z"), is.sorted.X = FALSE, is.sorted.Y = FALSE) } \arguments{ \item{X,Y}{Point patterns in three dimensions (objects of class \code{"pp3"}).} \item{iX, iY}{Optional identifiers, used to determine whether a point in \code{X} is identical to a point in \code{Y}. See Details. } \item{what}{ Character string specifying what information should be returned. Either the nearest neighbour distance (\code{"dist"}), the identifier of the nearest neighbour (\code{"which"}), or both. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } \item{sortby}{ Determines which coordinate to use to sort the point patterns. See Details. } \item{is.sorted.X, is.sorted.Y}{ Logical values attesting whether the point patterns \code{X} and \code{Y} have been sorted. See Details. } \item{\dots}{Ignored.} } \details{ Given two point patterns \code{X} and \code{Y} in three dimensions, this function finds, for each point of \code{X}, the nearest point of \code{Y}. The distance between these points is also computed. If the argument \code{k} is specified, then the \code{k}-th nearest neighbours will be found. The return value is a data frame, with rows corresponding to the points of \code{X}. The first column gives the nearest neighbour distances (i.e. the \code{i}th entry is the distance from the \code{i}th point of \code{X} to the nearest element of \code{Y}). The second column gives the indices of the nearest neighbours (i.e.\ the \code{i}th entry is the index of the nearest element in \code{Y}.) If \code{what="dist"} then only the vector of distances is returned. If \code{what="which"} then only the vector of indices is returned. The argument \code{k} may be an integer or an integer vector. If it is a single integer, then the \code{k}-th nearest neighbours are computed. If it is a vector, then the \code{k[i]}-th nearest neighbours are computed for each entry \code{k[i]}. For example, setting \code{k=1:3} will compute the nearest, second-nearest and third-nearest neighbours. The result is a data frame. Note that this function is not symmetric in \code{X} and \code{Y}. To find the nearest neighbour in \code{X} of each point in \code{Y}, use \code{nncross(Y,X)}. The arguments \code{iX} and \code{iY} are used when the two point patterns \code{X} and \code{Y} have some points in common. In this situation \code{nncross(X, Y)} would return some zero distances. To avoid this, attach a unique integer identifier to each point, such that two points are identical if their identifying numbers are equal. Let \code{iX} be the vector of identifier values for the points in \code{X}, and \code{iY} the vector of identifiers for points in \code{Y}. Then the code will only compare two points if they have different values of the identifier. See the Examples. } \section{Sorting data and pre-sorted data}{ Read this section if you care about the speed of computation. For efficiency, the algorithm sorts both the point patterns \code{X} and \code{Y} into increasing order of the \eqn{x} coordinate, or both into increasing order of the \eqn{y} coordinate, or both into increasing order of the \eqn{z} coordinate. Sorting is only an intermediate step; it does not affect the output, which is always given in the same order as the original data. By default (if \code{sortby="range"}), the sorting will occur on the coordinate that has the largest range of values (according to the frame of the enclosing window of \code{Y}). If \code{sortby = "var"}), sorting will occur on the coordinate that has the greater variance (in the pattern \code{Y}). Setting \code{sortby="x"} or \code{sortby = "y"} or \code{sortby = "z"} will specify that sorting should occur on the \eqn{x}, \eqn{y} or \eqn{z} coordinate, respectively. If the point pattern \code{X} is already sorted, then the corresponding argument \code{is.sorted.X} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"}, \code{"y"} or \code{"z"} to indicate which coordinate is sorted. Similarly if \code{Y} is already sorted, then \code{is.sorted.Y} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"}, \code{"y"} or \code{"z"} to indicate which coordinate is sorted. If both \code{X} and \code{Y} are sorted \emph{on the same coordinate axis} then both \code{is.sorted.X} and \code{is.sorted.Y} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"}, \code{"y"} or \code{"z"} to indicate which coordinate is sorted. } \value{ A data frame, or a vector if the data frame would contain only one column. By default (if \code{what=c("dist", "which")} and \code{k=1}) a data frame with two columns: \item{dist}{Nearest neighbour distance} \item{which}{Nearest neighbour index in \code{Y}} If \code{what="dist"} and \code{k=1}, a vector of nearest neighbour distances. If \code{what="which"} and \code{k=1}, a vector of nearest neighbour indices. If \code{k} is specified, the result is a data frame with columns containing the \code{k}-th nearest neighbour distances and/or nearest neighbour indices. } \seealso{ \code{\link{nndist}} for nearest neighbour distances in a single point pattern. } \examples{ # two different point patterns X <- pp3(runif(10), runif(10), runif(10), box3(c(0,1))) Y <- pp3(runif(20), runif(20), runif(20), box3(c(0,1))) N <- nncross(X,Y)$which N <- nncross(X,Y, what="which") #faster # note that length(N) = 10 # k-nearest neighbours N3 <- nncross(X, Y, k=1:3) # two patterns with some points in common Z <- pp3(runif(20), runif(20), runif(20), box3(c(0,1))) X <- Z[1:15] Y <- Z[10:20] iX <- 1:15 iY <- 10:20 N <- nncross(X,Y, iX, iY, what="which") } \author{ \adrian , \rolf , and Jens Oehlschlaegel } \keyword{spatial} \keyword{math} spatstat/man/model.matrix.ppm.Rd0000644000176200001440000001042213333543263016361 0ustar liggesusers\name{model.matrix.ppm} \alias{model.matrix.ppm} \alias{model.matrix.kppm} \alias{model.matrix.dppm} \alias{model.matrix.lppm} \alias{model.matrix.ippm} \title{Extract Design Matrix from Point Process Model} \description{ Given a point process model that has been fitted to spatial point pattern data, this function extracts the design matrix of the model. } \usage{ \method{model.matrix}{ppm}(object, data=model.frame(object, na.action=NULL), \dots, Q=NULL, keepNA=TRUE) \method{model.matrix}{kppm}(object, data=model.frame(object, na.action=NULL), \dots, Q=NULL, keepNA=TRUE) \method{model.matrix}{dppm}(object, data=model.frame(object, na.action=NULL), \dots, Q=NULL, keepNA=TRUE) \method{model.matrix}{lppm}(object, data=model.frame(object, na.action=NULL), \dots, keepNA=TRUE) \method{model.matrix}{ippm}(object, data=model.frame(object, na.action=NULL), \dots, Q=NULL, keepNA=TRUE, irregular=FALSE) } \arguments{ \item{object}{ The fitted point process model. An object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"} or \code{"ippm"} or \code{"lppm"}. } \item{data}{ A model frame, containing the data required for the Berman-Turner device. } \item{Q}{ A point pattern (class \code{"ppp"}) or quadrature scheme (class \code{"quad"}) specifying new locations where the covariates should be computed. } \item{keepNA}{ Logical. Determines whether rows containing NA values will be deleted or retained. } \item{\dots}{ Other arguments (such as \code{na.action}) passed to \code{\link{model.matrix.lm}}. } \item{irregular}{ Logical value indicating whether to include the irregular score components. } } \details{ These commands are methods for the generic function \code{\link{model.matrix}}. They extract the design matrix of a spatial point process model (class \code{"ppm"} or \code{"kppm"} or \code{"dppm"} or \code{"lppm"}). More precisely, this command extracts the design matrix of the generalised linear model associated with a spatial point process model. The \code{object} must be a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"} or \code{"lppm"}) fitted to spatial point pattern data. Such objects are produced by the model-fitting functions \code{\link{ppm}}, \code{\link{kppm}}, \code{\link{dppm}} and \code{\link{lppm}}. The methods \code{model.matrix.ppm}, \code{model.matrix.kppm}, \code{model.matrix.dppm} and \code{model.matrix.lppm} extract the model matrix for the GLM. The result is a matrix, with one row for every quadrature point in the fitting procedure, and one column for every constructed covariate in the design matrix. If there are \code{NA} values in the covariates, the argument \code{keepNA} determines whether to retain or delete the corresponding rows of the model matrix. The default \code{keepNA=TRUE} is to retain them. Note that this differs from the default behaviour of many other methods for \code{model.matrix}, which typically delete rows containing \code{NA}. The quadrature points themselves can be extracted using \code{\link{quad.ppm}}. } \value{ A matrix. Columns of the matrix are canonical covariates in the model. Rows of the matrix correspond to quadrature points in the fitting procedure (provided \code{keepNA=TRUE}). } \author{ \spatstatAuthors. } \seealso{ \code{\link{model.matrix}}, \code{\link{model.images}}, \code{\link{ppm}}, \code{\link{kppm}}, \code{\link{dppm}}, \code{\link{lppm}}, \code{\link{ippm}}, \code{\link{ppm.object}}, \code{\link{quad.ppm}}, \code{\link{residuals.ppm}} } \examples{ fit <- ppm(cells ~ x) head(model.matrix(fit)) model.matrix(fit, Q=runifpoint(5)) kfit <- kppm(redwood ~ x, "Thomas") m <- model.matrix(kfit) } \keyword{spatial} \keyword{models} spatstat/man/requireversion.Rd0000644000176200001440000000232413616014552016245 0ustar liggesusers\name{requireversion} \alias{requireversion} \title{ Require a Specific Version of a Package } \description{ Checks that the version number of a specified package is greater than or equal to the specified version number. For use in stand-alone \R scripts. } \usage{ requireversion(pkg, ver, fatal=TRUE) } \arguments{ \item{pkg}{ Package name. } \item{ver}{ Character string containing version number. } \item{fatal}{ Logical value indicating whether an error should occur when the package version is less than \code{ver}. } } \details{ This function checks whether the installed version of the package \code{pkg} is greater than or equal to \code{ver}. By default, an error occurs if this condition is not met. It is useful in stand-alone \R scripts, which often require a particular version of a package in order to work correctly. \bold{This function should not be used inside a package}: for that purpose, the dependence on packages and versions should be specified in the package description file. } \value{ A logical value. } \author{ \adrian } \examples{ requireversion(spatstat, "1.42-0") requireversion(spatstat, "999.999-999", fatal=FALSE) } \keyword{environment} spatstat/man/dppPowerExp.Rd0000644000176200001440000000247713571674202015455 0ustar liggesusers\name{dppPowerExp} \alias{dppPowerExp} \title{Power Exponential Spectral Determinantal Point Process Model} \description{Function generating an instance of the Power Exponential Spectral determinantal point process model.} \usage{dppPowerExp(\dots)} \arguments{ \item{\dots}{arguments of the form \code{tag=value} specifying the parameters. See Details.} } \details{ The Power Exponential Spectral DPP is defined in (Lavancier, \Moller and Rubak, 2015) The possible parameters are: \itemize{ \item the intensity \code{lambda} as a positive numeric \item the scale parameter \code{alpha} as a positive numeric \item the shape parameter \code{nu} as a positive numeric (artificially required to be less than 20 in the code for numerical stability) \item the dimension \code{d} as a positive integer } } \value{An object of class \code{"detpointprocfamily"}.} \author{ \adrian \rolf and \ege } \references{ Lavancier, F. \Moller, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. } \examples{ m <- dppPowerExp(lambda=100, alpha=.01, nu=1, d=2) } \seealso{ \code{\link{dppBessel}}, \code{\link{dppCauchy}}, \code{\link{dppGauss}}, \code{\link{dppMatern}} } spatstat/man/delaunay.Rd0000644000176200001440000000237213333543263014772 0ustar liggesusers\name{delaunay} \alias{delaunay} \title{Delaunay Triangulation of Point Pattern} \description{ Computes the Delaunay triangulation of a spatial point pattern. } \usage{ delaunay(X) } \arguments{ \item{X}{Spatial point pattern (object of class \code{"ppp"}).} } \details{ The Delaunay triangulation of a spatial point pattern \code{X} is defined as follows. First the Dirichlet/Voronoi tessellation of \code{X} computed; see \code{\link{dirichlet}}. Then two points of \code{X} are defined to be Delaunay neighbours if their Dirichlet/Voronoi tiles share a common boundary. Every pair of Delaunay neighbours is joined by a straight line. The result is a tessellation, consisting of disjoint triangles. The union of these triangles is the convex hull of \code{X}. } \value{ A tessellation (object of class \code{"tess"}). The window of the tessellation is the convex hull of \code{X}, not the original window of \code{X}. } \seealso{ \code{\link{tess}}, \code{\link{dirichlet}}, \code{\link{convexhull.xy}}, \code{\link{ppp}}, \code{\link{delaunayDistance}}, \code{\link{delaunayNetwork}} } \examples{ X <- runifpoint(42) plot(delaunay(X)) plot(X, add=TRUE) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{manip} spatstat/man/dim.detpointprocfamily.Rd0000644000176200001440000000073113333543263017651 0ustar liggesusers\name{dim.detpointprocfamily} \alias{dim.detpointprocfamily} \title{Dimension of Determinantal Point Process Model} \description{Extracts the dimension of a determinantal point process model.} \usage{ \method{dim}{detpointprocfamily}(x) } \arguments{ \item{x}{object of class \code{"detpointprocfamily"}.} } \value{A numeric (or NULL if the dimension of the model is unspecified).} \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/pixellate.Rd0000644000176200001440000000323613333543264015160 0ustar liggesusers\name{pixellate} \Rdversion{1.1} \alias{pixellate} \title{ Convert Spatial Object to Pixel Image } \description{ Convert a spatial object to a pixel image by measuring the amount of stuff in each pixel. } \usage{ pixellate(x, ...) } \arguments{ \item{x}{ Spatial object to be converted. A point pattern (object of class \code{"ppp"}), a window (object of class \code{"owin"}), a line segment pattern (object of class \code{"psp"}), or some other suitable data. } \item{\dots}{ Arguments passed to methods. } } \details{ The function \code{pixellate} converts a geometrical object \code{x} into a pixel image, by measuring the \emph{amount} of \code{x} that is inside each pixel. If \code{x} is a point pattern, \code{pixellate(x)} counts the number of points of \code{x} falling in each pixel. If \code{x} is a window, \code{pixellate(x)} measures the area of intersection of each pixel with the window. The function \code{pixellate} is generic, with methods for point patterns (\code{\link{pixellate.ppp}}), windows (\code{\link{pixellate.owin}}), and line segment patterns (\code{\link{pixellate.psp}}), See the separate documentation for these methods. The related function \code{\link{as.im}} also converts \code{x} into a pixel image, but typically measures only the presence or absence of \code{x} inside each pixel. } \value{ A pixel image (object of class \code{"im"}). } \seealso{ \code{\link{pixellate.ppp}}, \code{\link{pixellate.owin}}, \code{\link{pixellate.psp}}, \code{\link{pixellate.linnet}}, \code{\link{as.im}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/pairdist.default.Rd0000644000176200001440000000670413357000546016433 0ustar liggesusers\name{pairdist.default} \alias{pairdist.default} \title{Pairwise distances} \description{ Computes the matrix of distances between all pairs of points in a set of points in two dimensional space } \usage{ \method{pairdist}{default}(X, Y=NULL, \dots, period=NULL, method="C", squared=FALSE) } \arguments{ \item{X,Y}{ Arguments specifying the coordinates of a set of points. Typically \code{X} and \code{Y} would be numeric vectors of equal length. Alternatively \code{Y} may be omitted and \code{X} may be a list with two components \code{x} and \code{y}, or a matrix with two columns. } \item{\dots}{ Ignored. } \item{period}{ Optional. Dimensions for periodic edge correction. } \item{method}{ String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. Usually not specified. } \item{squared}{ Logical. If \code{squared=TRUE}, the squared distances are returned instead (this computation is faster). } } \value{ A square matrix whose \code{[i,j]} entry is the distance between the points numbered \code{i} and \code{j}. } \details{ Given the coordinates of a set of points in two dimensional space, this function computes the Euclidean distances between all pairs of points, and returns the matrix of distances. It is a method for the generic function \code{pairdist}. Note: If only pairwise distances within some threshold value are needed the low-level function \code{\link{closepairs}} may be much faster to use. The arguments \code{X} and \code{Y} must determine the coordinates of a set of points. Typically \code{X} and \code{Y} would be numeric vectors of equal length. Alternatively \code{Y} may be omitted and \code{X} may be a list with two components named \code{x} and \code{y}, or a matrix or data frame with two columns. For typical input the result is numerically equivalent to (but computationally faster than) \code{as.matrix(dist(x))} where \code{x = cbind(X, Y)}, but that command is useful for calculating all pairwise distances between points in \eqn{k}-dimensional space when \code{x} has \eqn{k} columns. Alternatively if \code{period} is given, then the distances will be computed in the `periodic' sense (also known as `torus' distance). The points will be treated as if they are in a rectangle of width \code{period[1]} and height \code{period[2]}. Opposite edges of the rectangle are regarded as equivalent. If \code{squared=TRUE} then the \emph{squared} Euclidean distances \eqn{d^2} are returned, instead of the Euclidean distances \eqn{d}. The squared distances are faster to calculate, and are sufficient for many purposes (such as finding the nearest neighbour of a point). The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is somewhat faster. } \seealso{ \code{\link{crossdist}}, \code{\link{nndist}}, \code{\link{Kest}}, \code{\link{closepairs}} } \examples{ x <- runif(100) y <- runif(100) d <- pairdist(x, y) d <- pairdist(cbind(x,y)) d <- pairdist(x, y, period=c(1,1)) d <- pairdist(x, y, squared=TRUE) } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and \adrian } \keyword{spatial} \keyword{math} spatstat/man/Lcross.Rd0000644000176200001440000000563713551505532014443 0ustar liggesusers\name{Lcross} \alias{Lcross} \title{Multitype L-function (cross-type)} \description{ Calculates an estimate of the cross-type L-function for a multitype point pattern. } \usage{ Lcross(X, i, j, ..., from, to, correction) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross-type \eqn{L} function \eqn{L_{ij}(r)}{Lij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{correction,\dots}{ Arguments passed to \code{\link{Kcross}}. } \item{from,to}{ An alternative way to specify \code{i} and \code{j} respectively. } } \details{ The cross-type L-function is a transformation of the cross-type K-function, \deqn{L_{ij}(r) = \sqrt{\frac{K_{ij}(r)}{\pi}}}{Lij(r) = sqrt(Kij(r)/pi)} where \eqn{K_{ij}(r)}{Kij(r)} is the cross-type K-function from type \code{i} to type \code{j}. See \code{\link{Kcross}} for information about the cross-type K-function. The command \code{Lcross} first calls \code{\link{Kcross}} to compute the estimate of the cross-type K-function, and then applies the square root transformation. For a marked point pattern in which the points of type \code{i} are independent of the points of type \code{j}, the theoretical value of the L-function is \eqn{L_{ij}(r) = r}{Lij(r) = r}. The square root also has the effect of stabilising the variance of the estimator, so that \eqn{L_{ij}}{Lij} is more appropriate for use in simulation envelopes and hypothesis tests. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{L_{ij}}{Lij} has been estimated } \item{theo}{the theoretical value \eqn{L_{ij}(r) = r}{Lij(r) = r} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L_{ij}}{Lij} obtained by the edge corrections named. } \seealso{ \code{\link{Kcross}}, \code{\link{Ldot}}, \code{\link{Lest}} } \examples{ data(amacrine) L <- Lcross(amacrine, "off", "on") plot(L) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/pcf.fv.Rd0000644000176200001440000001134213333543264014350 0ustar liggesusers\name{pcf.fv} \alias{pcf.fv} \title{Pair Correlation Function obtained from K Function} \description{ Estimates the pair correlation function of a point pattern, given an estimate of the K function. } \usage{ \method{pcf}{fv}(X, \dots, method="c") } \arguments{ \item{X}{ An estimate of the \eqn{K} function or one of its variants. An object of class \code{"fv"}. } \item{\dots}{ Arguments controlling the smoothing spline function \code{smooth.spline}. } \item{method}{ Letter \code{"a"}, \code{"b"}, \code{"c"} or \code{"d"} indicating the method for deriving the pair correlation function from the \code{K} function. } } \value{ A function value table (object of class \code{"fv"}, see \code{\link{fv.object}}) representing a pair correlation function. Essentially a data frame containing (at least) the variables \item{r}{the vector of values of the argument \eqn{r} at which the pair correlation function \eqn{g(r)} has been estimated } \item{pcf}{vector of values of \eqn{g(r)} } } \details{ The pair correlation function of a stationary point process is \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the reduced second moment function (aka ``Ripley's \eqn{K} function'') of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. For a stationary Poisson process, the pair correlation function is identically equal to 1. Values \eqn{g(r) < 1} suggest inhibition between points; values greater than 1 suggest clustering. We also apply the same definition to other variants of the classical \eqn{K} function, such as the multitype \eqn{K} functions (see \code{\link{Kcross}}, \code{\link{Kdot}}) and the inhomogeneous \eqn{K} function (see \code{\link{Kinhom}}). For all these variants, the benchmark value of \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} corresponds to \eqn{g(r) = 1}. This routine computes an estimate of \eqn{g(r)} from an estimate of \eqn{K(r)} or its variants, using smoothing splines to approximate the derivative. It is a method for the generic function \code{\link{pcf}} for the class \code{"fv"}. The argument \code{X} should be an estimated \eqn{K} function, given as a function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). This object should be the value returned by \code{\link{Kest}}, \code{\link{Kcross}}, \code{\link{Kmulti}} or \code{\link{Kinhom}}. The smoothing spline operations are performed by \code{\link{smooth.spline}} and \code{\link{predict.smooth.spline}} from the \code{modreg} library. Four numerical methods are available: \itemize{ \item \bold{"a"} apply smoothing to \eqn{K(r)}, estimate its derivative, and plug in to the formula above; \item \bold{"b"} apply smoothing to \eqn{Y(r) = \frac{K(r)}{2 \pi r}}{Y(r) = K(r)/(2 * pi * r)} constraining \eqn{Y(0) = 0}, estimate the derivative of \eqn{Y}, and solve; \item \bold{"c"} apply smoothing to \eqn{Z(r) = \frac{K(r)}{\pi r^2}}{Y(r) = K(r)/(pi * r^2)} constraining \eqn{Z(0)=1}, estimate its derivative, and solve. \item \bold{"d"} apply smoothing to \eqn{V(r) = \sqrt{K(r)}}{V(r) = sqrt(K(r))}, estimate its derivative, and solve. } Method \code{"c"} seems to be the best at suppressing variability for small values of \eqn{r}. However it effectively constrains \eqn{g(0) = 1}. If the point pattern seems to have inhibition at small distances, you may wish to experiment with method \code{"b"} which effectively constrains \eqn{g(0)=0}. Method \code{"a"} seems comparatively unreliable. Useful arguments to control the splines include the smoothing tradeoff parameter \code{spar} and the degrees of freedom \code{df}. See \code{\link{smooth.spline}} for details. } \references{ Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{pcf}}, \code{\link{pcf.ppp}}, \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kmulti}}, \code{\link{alltypes}}, \code{\link{smooth.spline}}, \code{\link{predict.smooth.spline}} } \examples{ # univariate point pattern X <- simdat \testonly{ X <- X[seq(1,npoints(X), by=4)] } K <- Kest(X) p <- pcf.fv(K, spar=0.5, method="b") plot(p, main="pair correlation function for simdat") # indicates inhibition at distances r < 0.3 } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/density.psp.Rd0000644000176200001440000000676513333543263015462 0ustar liggesusers\name{density.psp} \alias{density.psp} \alias{density.linnet} \title{Kernel Smoothing of Line Segment Pattern or Linear Network} \description{ Compute a kernel smoothed intensity function from a line segment pattern or a linear network. } \usage{ \method{density}{psp}(x, sigma, \dots, edge=TRUE, method=c("FFT", "C", "interpreted"), at=NULL) \method{density}{linnet}(x, \dots) } \arguments{ \item{x}{ Line segment pattern (object of class \code{"psp"}) or linear network (object of class \code{"linnet"}) to be smoothed. } \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel. } \item{\dots}{ Extra arguments, including arguments passed to \code{\link{as.mask}} to determine the resolution of the resulting image. } \item{edge}{ Logical flag indicating whether to apply edge correction. } \item{method}{ Character string (partially matched) specifying the method of computation. Option \code{"FFT"} is the fastest, while \code{"C"} is the most accurate. } \item{at}{ Optional. An object specifying the locations where density values should be computed. Either a window (object of class \code{"owin"}) or a point pattern (object of class \code{"ppp"} or \code{"lpp"}). } } \value{ A pixel image (object of class \code{"im"}) or a numeric vector. } \details{ These are methods for the generic function \code{\link{density}} for the classes \code{"psp"} (line segment patterns) and \code{"linnet"} (linear networks). If \code{x} is a linear network, it is first converted to a line segment pattern. A kernel estimate of the intensity of the line segment pattern is computed. The result is the convolution of the isotropic Gaussian kernel, of standard deviation \code{sigma}, with the line segments. The result is computed as follows: \itemize{ \item if \code{method="FFT"} (the default), the line segments are discretised using \code{\link{pixellate.psp}}, then the Fast Fourier Transform is used to calculate the convolution. This method is the fastest, but is slightly less accurate. Accuracy can be improved by increasing pixel resolution. \item if \code{method="C"} the exact value of the convolution at the centre of each pixel is computed analytically using \code{C} code; \item if \code{method="interpreted"}, the exact value of the convolution at the centre of each pixel is computed analytically using \code{R} code. This method is the slowest. } If \code{edge=TRUE} this result is adjusted for edge effects by dividing it by the convolution of the same Gaussian kernel with the observation window. If the argument \code{at} is given, then it specifies the locations where density values should be computed. \itemize{ \item If \code{at} is a window, then the window is converted to a binary mask using the arguments \code{\dots}, and density values are computed at the centre of each pixel in this mask. The result is a pixel image. \item If \code{at} is a point pattern, then density values are computed at each point location, and the result is a numeric vector. } } \seealso{ \code{\link{psp.object}}, \code{\link{im.object}}, \code{\link{density}} } \examples{ L <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) D <- density(L, sigma=0.03) plot(D, main="density(L)") plot(L, add=TRUE) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/dmixpois.Rd0000644000176200001440000000512313333543263015021 0ustar liggesusers\name{dmixpois} \alias{dmixpois} \alias{pmixpois} \alias{qmixpois} \alias{rmixpois} \title{ Mixed Poisson Distribution } \description{ Density, distribution function, quantile function and random generation for a mixture of Poisson distributions. } \usage{ dmixpois(x, mu, sd, invlink = exp, GHorder = 5) pmixpois(q, mu, sd, invlink = exp, lower.tail = TRUE, GHorder = 5) qmixpois(p, mu, sd, invlink = exp, lower.tail = TRUE, GHorder = 5) rmixpois(n, mu, sd, invlink = exp) } \arguments{ \item{x}{vector of (non-negative integer) quantiles.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of random values to return.} \item{mu}{ Mean of the linear predictor. A single numeric value. } \item{sd}{ Standard deviation of the linear predictor. A single numeric value. } \item{invlink}{ Inverse link function. A function in the \R language, used to transform the linear predictor into the parameter \code{lambda} of the Poisson distribution. } \item{lower.tail}{ Logical. If \code{TRUE} (the default), probabilities are \eqn{P[X \le x]}, otherwise, \eqn{P[X > x]}. } \item{GHorder}{ Number of quadrature points in the Gauss-Hermite quadrature approximation. A small positive integer. } } \details{ These functions are analogous to \code{\link{dpois}} \code{\link{ppois}}, \code{\link{qpois}} and \code{\link{rpois}} except that they apply to a mixture of Poisson distributions. In effect, the Poisson mean parameter \code{lambda} is randomised by setting \code{lambda = invlink(Z)} where \code{Z} has a Gaussian \eqn{N(\mu,\sigma^2)}{N(\mu, \sigma^2)} distribution. The default is \code{invlink=exp} which means that \code{lambda} is lognormal. Set \code{invlink=I} to assume that \code{lambda} is approximately Normal. For \code{dmixpois}, \code{pmixpois} and \code{qmixpois}, the probability distribution is approximated using Gauss-Hermite quadrature. For \code{rmixpois}, the deviates are simulated exactly. } \value{ Numeric vector: \code{dmixpois} gives probability masses, \code{ppois} gives cumulative probabilities, \code{qpois} gives (non-negative integer) quantiles, and \code{rpois} generates (non-negative integer) random deviates. } \seealso{ \code{\link{dpois}}, \code{\link{gauss.hermite}}. } \examples{ dmixpois(7, 10, 1, invlink = I) dpois(7, 10) pmixpois(7, log(10), 0.2) ppois(7, 10) qmixpois(0.95, log(10), 0.2) qpois(0.95, 10) x <- rmixpois(100, log(10), log(1.2)) mean(x) var(x) } \author{\adrian , \rolf and \ege } \keyword{distribution} spatstat/man/pairdist.Rd0000644000176200001440000000257713333543263015016 0ustar liggesusers\name{pairdist} \alias{pairdist} \title{Pairwise distances} \description{ Computes the matrix of distances between all pairs of `things' in a dataset } \usage{ pairdist(X, \dots) } \arguments{ \item{X}{ Object specifying the locations of a set of `things' (such as a set of points or a set of line segments). } \item{\dots}{ Further arguments depending on the method. } } \value{ A square matrix whose \code{[i,j]} entry is the distance between the `things' numbered \code{i} and \code{j}. } \details{ Given a dataset \code{X} and \code{Y} (representing either a point pattern or a line segment pattern) \code{pairdist} computes the distance between each pair of `things' in the dataset, and returns a matrix containing these distances. The function \code{pairdist} is generic, with methods for point patterns (objects of class \code{"ppp"}), line segment patterns (objects of class \code{"psp"}) and a default method. See the documentation for \code{\link{pairdist.ppp}}, \code{\link{pairdist.psp}} or \code{\link{pairdist.default}} for details. } \seealso{ \code{\link{pairdist.ppp}}, \code{\link{pairdist.psp}}, \code{\link{pairdist.default}}, \code{\link{crossdist}}, \code{\link{nndist}}, \code{\link{Kest}} } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and \adrian } \keyword{spatial} \keyword{math} spatstat/man/ppmInfluence.Rd0000644000176200001440000000607713333543264015624 0ustar liggesusers\name{ppmInfluence} \alias{ppmInfluence} \title{ Leverage and Influence Measures for Spatial Point Process Model } \description{ Calculates all the leverage and influence measures described in \code{\link{influence.ppm}}, \code{\link{leverage.ppm}} and \code{\link{dfbetas.ppm}}. } \usage{ ppmInfluence(fit, what = c("leverage", "influence", "dfbetas"), \dots, iScore = NULL, iHessian = NULL, iArgs = NULL, drop = FALSE, fitname = NULL) } \arguments{ \item{fit}{ A fitted point process model of class \code{"ppm"}. } \item{what}{ Character vector specifying which quantities are to be calculated. Default is to calculate all quantities. } \item{\dots}{ Ignored. } \item{iScore,iHessian}{ Components of the score vector and Hessian matrix for the irregular parameters, if required. See Details. } \item{iArgs}{ List of extra arguments for the functions \code{iScore}, \code{iHessian} if required. } \item{drop}{ Logical. Whether to include (\code{drop=FALSE}) or exclude (\code{drop=TRUE}) contributions from quadrature points that were not used to fit the model. } \item{fitname}{ Optional character string name for the fitted model \code{fit}. } } \details{ This function calculates all the leverage and influence measures described in \code{\link{influence.ppm}}, \code{\link{leverage.ppm}} and \code{\link{dfbetas.ppm}}. When analysing large datasets, the user can call \code{ppmInfluence} to perform the calculations efficiently, then extract the leverage and influence values as desired. For example the leverage can be extracted either as \code{result$leverage} or \code{leverage(result)}. If the point process model trend has irregular parameters that were fitted (using \code{\link{ippm}}) then the influence calculation requires the first and second derivatives of the log trend with respect to the irregular parameters. The argument \code{iScore} should be a list, with one entry for each irregular parameter, of \R functions that compute the partial derivatives of the log trend (i.e. log intensity or log conditional intensity) with respect to each irregular parameter. The argument \code{iHessian} should be a list, with \eqn{p^2} entries where \eqn{p} is the number of irregular parameters, of \R functions that compute the second order partial derivatives of the log trend with respect to each pair of irregular parameters. } \value{ A list containing the leverage and influence measures specified by \code{what}. The result also belongs to the class \code{"ppmInfluence"}. } \author{ \adrian } \seealso{ \code{\link{leverage.ppm}}, \code{\link{influence.ppm}}, \code{\link{dfbetas.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~ x+y) fI <- ppmInfluence(fit) fitlev <- fI$leverage fitlev <- leverage(fI) fitinf <- fI$influence fitinf <- influence(fI) fitdfb <- fI$dfbetas fitdfb <- dfbetas(fI) } \keyword{spatial} \keyword{models} spatstat/man/reach.Rd0000644000176200001440000001206013552031337014242 0ustar liggesusers\name{reach} \alias{reach} \alias{reach.ppm} \alias{reach.interact} \alias{reach.fii} \alias{reach.rmhmodel} \title{Interaction Distance of a Point Process} \description{ Computes the interaction distance of a point process. } \usage{ reach(x, \dots) \method{reach}{ppm}(x, \dots, epsilon=0) \method{reach}{interact}(x, \dots) \method{reach}{rmhmodel}(x, \dots) \method{reach}{fii}(x, \dots, epsilon) } \arguments{ \item{x}{Either a fitted point process model (object of class \code{"ppm"}), an interpoint interaction (object of class \code{"interact"}), a fitted interpoint interaction (object of class \code{"fii"}) or a point process model for simulation (object of class \code{"rmhmodel"}). } \item{epsilon}{ Numerical threshold below which interaction is treated as zero. See details. } \item{\dots}{ Other arguments are ignored. } } \value{ The interaction distance, or \code{NA} if this cannot be computed from the information given. } \details{ The `interaction distance' or `interaction range' of a point process model of class \code{"ppm"} is the smallest distance \eqn{D} such that any two points in the process which are separated by a distance greater than \eqn{D} do not interact with each other. For example, the interaction range of a Strauss process (see \code{\link{Strauss}}) with parameters \eqn{\beta,\gamma,r}{beta,gamma,r} is equal to \eqn{r}, unless \eqn{\gamma=1}{gamma=1} in which case the model is Poisson and the interaction range is \eqn{0}. The interaction range of a Poisson process is zero. The interaction range of the Ord threshold process (see \code{\link{OrdThresh}}) is infinite, since two points \emph{may} interact at any distance apart. The function \code{reach(x)} is generic, with methods for the case where \code{x} is \itemize{ \item a fitted point process model (object of class \code{"ppm"}, usually obtained from the model-fitting function \code{\link{ppm}}); \item an interpoint interaction structure (object of class \code{"interact"}), created by one of the functions \code{\link{Poisson}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{Softcore}}, \code{\link{DiggleGratton}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Geyer}}, \code{\link{LennardJones}}, \code{\link{Saturated}}, \code{\link{OrdThresh}} or \code{\link{Ord}}; \item a fitted interpoint interaction (object of class \code{"fii"}) extracted from a fitted point process model by the command \code{\link{fitin}}; \item a point process model for simulation (object of class \code{"rmhmodel"}), usually obtained from \code{\link{rmhmodel}}. } When \code{x} is an \code{"interact"} object, \code{reach(x)} returns the maximum possible interaction range for any point process model with interaction structure given by \code{x}. For example, \code{reach(Strauss(0.2))} returns \code{0.2}. When \code{x} is a \code{"ppm"} object, \code{reach(x)} returns the interaction range for the point process model represented by \code{x}. For example, a fitted Strauss process model with parameters \code{beta,gamma,r} will return either \code{0} or \code{r}, depending on whether the fitted interaction parameter \code{gamma} is equal or not equal to 1. For some point process models, such as the soft core process (see \code{\link{Softcore}}), the interaction distance is infinite, because the interaction terms are positive for all pairs of points. A practical solution is to compute the distance at which the interaction contribution from a pair of points falls below a threshold \code{epsilon}, on the scale of the log conditional intensity. This is done by setting the argument \code{epsilon} to a positive value. } \section{Other types of models}{ Methods for \code{reach} are also defined for point process models of class \code{"kppm"} and \code{"dppm"}. Their technical definition is different from this one. See \code{\link{reach.kppm}} and \code{\link{reach.dppm}}. } \seealso{ \code{\link{ppm}}, \code{\link{Poisson}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{Softcore}}, \code{\link{DiggleGratton}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Geyer}}, \code{\link{LennardJones}}, \code{\link{Saturated}}, \code{\link{OrdThresh}}, \code{\link{Ord}}, \code{\link{rmhmodel}} See \code{\link{reach.kppm}} and \code{\link{reach.dppm}} for other types of point process models. } \examples{ reach(Poisson()) # returns 0 reach(Strauss(r=7)) # returns 7 fit <- ppm(swedishpines ~ 1, Strauss(r=7)) reach(fit) # returns 7 reach(OrdThresh(42)) # returns Inf reach(MultiStrauss(matrix(c(1,3,3,1),2,2))) # returns 3 } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/methods.lppm.Rd0000644000176200001440000000421613333543263015601 0ustar liggesusers\name{methods.lppm} \alias{methods.lppm} %DoNotExport \alias{coef.lppm} \alias{emend.lppm} \alias{extractAIC.lppm} \alias{formula.lppm} \alias{logLik.lppm} \alias{deviance.lppm} \alias{nobs.lppm} \alias{print.lppm} \alias{summary.lppm} \alias{terms.lppm} \alias{update.lppm} \alias{valid.lppm} \alias{vcov.lppm} \alias{as.linnet.lppm} \title{ Methods for Fitted Point Process Models on a Linear Network } \description{ These are methods for the class \code{"lppm"} of fitted point process models on a linear network. } \usage{ \method{coef}{lppm}(object, ...) \method{emend}{lppm}(object, \dots) \method{extractAIC}{lppm}(fit, ...) \method{formula}{lppm}(x, ...) \method{logLik}{lppm}(object, ...) \method{deviance}{lppm}(object, ...) \method{nobs}{lppm}(object, ...) \method{print}{lppm}(x, ...) \method{summary}{lppm}(object, ...) \method{terms}{lppm}(x, ...) \method{update}{lppm}(object, ...) \method{valid}{lppm}(object, ...) \method{vcov}{lppm}(object, ...) \method{as.linnet}{lppm}(X, ...) } \arguments{ \item{object,fit,x,X}{ An object of class \code{"lppm"} representing a fitted point process model on a linear network. } \item{\dots}{ Arguments passed to other methods, usually the method for the class \code{"ppm"}. } } \details{ These are methods for the generic commands \code{\link[stats]{coef}}, \code{\link{emend}}, \code{\link[stats]{extractAIC}}, \code{\link[stats]{formula}}, \code{\link[stats]{logLik}}, \code{\link[stats]{deviance}}, \code{\link[stats]{nobs}}, \code{\link[base]{print}}, \code{\link[base]{summary}}, \code{\link[stats]{terms}}, \code{\link[stats]{update}}, \code{\link{valid}} and \code{\link[stats]{vcov}} for the class \code{"lppm"}. } \value{ See the default methods. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{lppm}}, \code{\link{plot.lppm}}. } \examples{ X <- runiflpp(15, simplenet) fit <- lppm(X ~ x) print(fit) coef(fit) formula(fit) terms(fit) logLik(fit) deviance(fit) nobs(fit) extractAIC(fit) update(fit, ~1) valid(fit) vcov(fit) } \keyword{spatial} \keyword{models} spatstat/man/predict.ppm.Rd0000644000176200001440000003640213333543264015417 0ustar liggesusers\name{predict.ppm} \alias{predict.ppm} \title{Prediction from a Fitted Point Process Model} \description{ Given a fitted point process model obtained by \code{\link{ppm}}, evaluate the spatial trend or the conditional intensity of the model at new locations. } \usage{ \method{predict}{ppm}(object, window=NULL, ngrid=NULL, locations=NULL, covariates=NULL, type=c("trend", "cif", "intensity", "count"), se=FALSE, interval=c("none", "confidence", "prediction"), level = 0.95, X=data.ppm(object), correction, ignore.hardcore=FALSE, \dots, dimyx=NULL, eps=NULL, new.coef=NULL, check=TRUE, repair=TRUE) } \arguments{ \item{object}{ A fitted point process model, typically obtained from the model-fitting algorithm \code{\link{ppm}}. An object of class \code{"ppm"} (see \code{\link{ppm.object}}). } \item{window}{ Optional. A window (object of class \code{"owin"}) \emph{delimiting} the locations where predictions should be computed. Defaults to the window of the original data used to fit the model \code{object}. } \item{ngrid}{ Optional. Dimensions of a rectangular grid of locations inside \code{window} where the predictions should be computed. An integer, or an integer vector of length 2, specifying the number of grid points in the \eqn{y} and \eqn{x} directions. (Incompatible with \code{locations}. Equivalent to \code{dimyx}.) } \item{locations}{ Optional. Data giving the exact \eqn{x,y} coordinates (and marks, if required) of locations at which predictions should be computed. Either a point pattern, or a data frame with columns named \code{x} and \code{y}, or a binary image mask, or a pixel image. (Incompatible with \code{ngrid}, \code{dimyx} and \code{eps}). } \item{covariates}{ Values of external covariates required by the model. Either a data frame or a list of images. See Details. } \item{type}{ Character string. Indicates which property of the fitted model should be predicted. Options are \code{"trend"} for the spatial trend, \code{"cif"} or \code{"lambda"} for the conditional intensity, \code{"intensity"} for the intensity, and \code{"count"} for the total number of points in \code{window}. } \item{se}{ Logical value indicating whether to calculate standard errors as well. } \item{interval}{ String (partially matched) indicating whether to produce estimates (\code{interval="none"}, the default) or a confidence interval (\code{interval="confidence"}) or a prediction interval (\code{interval="prediction"}). } \item{level}{ Coverage probability for the confidence or prediction interval. } \item{X}{ Optional. A point pattern (object of class \code{"ppp"}) to be taken as the data point pattern when calculating the conditional intensity. The default is to use the original data to which the model was fitted. } \item{correction}{ Name of the edge correction to be used in calculating the conditional intensity. Options include \code{"border"} and \code{"none"}. Other options may include \code{"periodic"}, \code{"isotropic"} and \code{"translate"} depending on the model. The default correction is the one that was used to fit \code{object}. } \item{ignore.hardcore}{ Advanced use only. Logical value specifying whether to compute only the finite part of the interaction potential (effectively removing any hard core interaction terms). } \item{\dots}{ Ignored. } \item{dimyx}{ Equivalent to \code{ngrid}. } \item{eps}{ Width and height of pixels in the prediction grid. A numerical value, or numeric vector of length 2. } \item{new.coef}{ Numeric vector of parameter values to replace the fitted model parameters \code{coef(object)}. } \item{check}{ Logical value indicating whether to check the internal format of \code{object}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{repair}{ Logical value indicating whether to repair the internal format of \code{object}, if it is found to be damaged. } } \value{ \emph{If \code{total} is given:} a numeric vector or matrix. \emph{If \code{locations} is given and is a data frame:} a vector of predicted values for the spatial locations (and marks, if required) given in \code{locations}. \emph{If \code{ngrid} is given, or if \code{locations} is given and is a binary image mask or a pixel image:} If \code{object} is an unmarked point process, the result is a pixel image object (of class \code{"im"}, see \code{\link{im.object}}) containing the predictions. If \code{object} is a multitype point process, the result is a list of pixel images, containing the predictions for each type at the same grid of locations. The ``predicted values'' are either values of the spatial trend (if \code{type="trend"}), values of the conditional intensity (if \code{type="cif"} or \code{type="lambda"}), values of the intensity (if \code{type="intensity"}) or numbers of points (if \code{type="count"}). If \code{se=TRUE}, then the result is a list with two entries, the first being the predicted values in the format described above, and the second being the standard errors in the same format. } \details{ This function computes properties of a fitted spatial point process model (object of class \code{"ppm"}). For a Poisson point process it can compute the fitted intensity function, or the expected number of points in a region. For a Gibbs point process it can compute the spatial trend (first order potential), conditional intensity, and approximate intensity of the process. Point estimates, standard errors, confidence intervals and prediction intervals are available. Given a point pattern dataset, we may fit a point process model to the data using the model-fitting algorithm \code{\link{ppm}}. This returns an object of class \code{"ppm"} representing the fitted point process model (see \code{\link{ppm.object}}). The parameter estimates in this fitted model can be read off simply by printing the \code{ppm} object. The spatial trend, conditional intensity and intensity of the fitted model are evaluated using this function \code{predict.ppm}. The default action is to create a rectangular grid of points in the observation window of the data point pattern, and evaluate the spatial trend at these locations. The argument \code{type} specifies the values that are desired: \describe{ \item{If \code{type="trend"}:}{ the ``spatial trend'' of the fitted model is evaluated at each required spatial location \eqn{u}. See below. } \item{If \code{type="cif"}:}{ the conditional intensity \eqn{\lambda(u, X)}{lambda(u,X)} of the fitted model is evaluated at each required spatial location \eqn{u}, with respect to the data point pattern \eqn{X}. } \item{If \code{type="intensity"}:}{ the intensity \eqn{\lambda(u)}{lambda(u)} of the fitted model is evaluated at each required spatial location \eqn{u}. } \item{If \code{type="count"}:}{ the expected total number of points (or the expected number of points falling in \code{window}) is evaluated. If \code{window} is a tessellation, the expected number of points in each tile of the tessellation is evaluated. } } The spatial trend, conditional intensity, and intensity are all equivalent if the fitted model is a Poisson point process. However, if the model is not a Poisson process, then they are all different. The ``spatial trend'' is the (exponentiated) first order potential, and not the intensity of the process. [For example if we fit the stationary Strauss process with parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, then the spatial trend is constant and equal to \eqn{\beta}{beta}, while the intensity is a smaller value.] The default is to compute an estimate of the desired quantity. If \code{interval="confidence"} or \code{interval="prediction"}, the estimate is replaced by a confidence interval or prediction interval. If \code{se=TRUE}, then a standard error is also calculated, and is returned together with the (point or interval) estimate. The spatial locations where predictions are required, are determined by the (incompatible) arguments \code{ngrid} and \code{locations}. \itemize{ \item If the argument \code{ngrid} is present, then predictions are performed at a rectangular grid of locations in the window \code{window}. The result of prediction will be a pixel image or images. \item If \code{locations} is present, then predictions will be performed at the spatial locations given by this dataset. These may be an arbitrary list of spatial locations, or they may be a rectangular grid. The result of prediction will be either a numeric vector or a pixel image or images. \item If neither \code{ngrid} nor \code{locations} is given, then \code{ngrid} is assumed. The value of \code{ngrid} defaults to \code{\link{spatstat.options}("npixel")}, which is initialised to 128 when \pkg{spatstat} is loaded. } The argument \code{locations} may be a point pattern, a data frame or a list specifying arbitrary locations; or it may be a binary image mask (an object of class \code{"owin"} with type \code{"mask"}) or a pixel image (object of class \code{"im"}) specifying (a subset of) a rectangular grid of locations. \itemize{ \item If \code{locations} is a point pattern (object of class \code{"ppp"}), then prediction will be performed at the points of the point pattern. The result of prediction will be a vector of predicted values, one value for each point. If the model is a marked point process, then \code{locations} should be a marked point pattern, with marks of the same kind as the model; prediction will be performed at these marked points. The result of prediction will be a vector of predicted values, one value for each (marked) point. \item If \code{locations} is a data frame or list, then it must contain vectors \code{locations$x} and \code{locations$y} specifying the \eqn{x,y} coordinates of the prediction locations. Additionally, if the model is a marked point process, then \code{locations} must also contain a factor \code{locations$marks} specifying the marks of the prediction locations. These vectors must have equal length. The result of prediction will be a vector of predicted values, of the same length. \item If \code{locations} is a binary image mask, then prediction will be performed at each pixel in this binary image where the pixel value is \code{TRUE} (in other words, at each pixel that is inside the window). If the fitted model is an unmarked point process, then the result of prediction will be an image. If the fitted model is a marked point process, then prediction will be performed for each possible value of the mark at each such location, and the result of prediction will be a list of images, one for each mark value. \item If \code{locations} is a pixel image (object of class \code{"im"}), then prediction will be performed at each pixel in this image where the pixel value is defined (i.e.\ where the pixel value is not \code{NA}). } The argument \code{covariates} gives the values of any spatial covariates at the prediction locations. If the trend formula in the fitted model involves spatial covariates (other than the Cartesian coordinates \code{x}, \code{y}) then \code{covariates} is required. The format and use of \code{covariates} are analogous to those of the argument of the same name in \code{\link{ppm}}. It is either a data frame or a list of images. \itemize{ \item If \code{covariates} is a list of images, then the names of the entries should correspond to the names of covariates in the model formula \code{trend}. Each entry in the list must be an image object (of class \code{"im"}, see \code{\link{im.object}}). The software will look up the pixel values of each image at the quadrature points. \item If \code{covariates} is a data frame, then the \code{i}th row of \code{covariates} is assumed to contain covariate data for the \code{i}th location. When \code{locations} is a data frame, this just means that each row of \code{covariates} contains the covariate data for the location specified in the corresponding row of \code{locations}. When \code{locations} is a binary image mask, the row \code{covariates[i,]} must correspond to the location \code{x[i],y[i]} where \code{x = as.vector(raster.x(locations))} and \code{y = as.vector(raster.y(locations))}. } Note that if you only want to use prediction in order to generate a plot of the predicted values, it may be easier to use \code{\link{plot.ppm}} which calls this function and plots the results. } \references{ Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Berman, M. and Turner, T.R. Approximating point process likelihoods with GLIM. \emph{Applied Statistics} \bold{41} (1992) 31--38. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{plot.ppm}}, \code{\link{print.ppm}}, \code{\link{fitted.ppm}}, \code{\link{spatstat.options}} } \section{Warnings}{ The current implementation invokes \code{\link{predict.glm}} so that \bold{prediction is wrong} if the trend formula in \code{object} involves terms in \code{ns()}, \code{bs()} or \code{poly()}. This is a weakness of \code{\link{predict.glm}} itself! Error messages may be very opaque, as they tend to come from deep in the workings of \code{\link{predict.glm}}. If you are passing the \code{covariates} argument and the function crashes, it is advisable to start by checking that all the conditions listed above are satisfied. } \examples{ \testonly{op <- spatstat.options(npixel=32)} m <- ppm(cells ~ polynom(x,y,2), Strauss(0.05)) trend <- predict(m, type="trend") \dontrun{ image(trend) points(cells) } cif <- predict(m, type="cif") \dontrun{ persp(cif) } mj <- ppm(japanesepines ~ harmonic(x,y,2)) se <- predict(mj, se=TRUE) # image of standard error ci <- predict(mj, interval="c") # two images, confidence interval # prediction interval for total number of points predict(mj, type="count", interval="p") # prediction intervals for counts in tiles predict(mj, window=quadrats(japanesepines, 3), type="count", interval="p") # prediction at arbitrary locations predict(mj, locations=data.frame(x=0.3, y=0.4)) X <- runifpoint(5, Window(japanesepines)) predict(mj, locations=X, se=TRUE) # multitype rr <- matrix(0.06, 2, 2) ma <- ppm(amacrine ~ marks, MultiStrauss(rr)) Z <- predict(ma) Z <- predict(ma, type="cif") predict(ma, locations=data.frame(x=0.8, y=0.5,marks="on"), type="cif") \testonly{spatstat.options(op)} } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/suffstat.Rd0000644000176200001440000001072313333543264015027 0ustar liggesusers\name{suffstat} \alias{suffstat} \title{Sufficient Statistic of Point Process Model} \description{ The canonical sufficient statistic of a point process model is evaluated for a given point pattern. } \usage{ suffstat(model, X=data.ppm(model)) } \arguments{ \item{model}{A fitted point process model (object of class \code{"ppm"}). } \item{X}{ A point pattern (object of class \code{"ppp"}). } } \value{ A numeric vector of sufficient statistics. The entries correspond to the model coefficients \code{coef(model)}. } \details{ The canonical sufficient statistic of \code{model} is evaluated for the point pattern \code{X}. This computation is useful for various Monte Carlo methods. Here \code{model} should be a point process model (object of class \code{"ppm"}, see \code{\link{ppm.object}}), typically obtained from the model-fitting function \code{\link{ppm}}. The argument \code{X} should be a point pattern (object of class \code{"ppp"}). Every point process model fitted by \code{\link{ppm}} has a probability density of the form \deqn{f(x) = Z(\theta) \exp(\theta^T S(x))}{f(x) = Z(theta) exp(theta * S(x))} where \eqn{x} denotes a typical realisation (i.e. a point pattern), \eqn{\theta}{theta} is the vector of model coefficients, \eqn{Z(\theta)}{Z(theta)} is a normalising constant, and \eqn{S(x)} is a function of the realisation \eqn{x}, called the ``canonical sufficient statistic'' of the model. For example, the stationary Poisson process has canonical sufficient statistic \eqn{S(x)=n(x)}, the number of points in \eqn{x}. The stationary Strauss process with interaction range \eqn{r} (and fitted with no edge correction) has canonical sufficient statistic \eqn{S(x)=(n(x),s(x))} where \eqn{s(x)} is the number of pairs of points in \eqn{x} which are closer than a distance \eqn{r} to each other. \code{suffstat(model, X)} returns the value of \eqn{S(x)}, where \eqn{S} is the canonical sufficient statistic associated with \code{model}, evaluated when \eqn{x} is the given point pattern \code{X}. The result is a numeric vector, with entries which correspond to the entries of the coefficient vector \code{coef(model)}. The sufficient statistic \eqn{S} does not depend on the fitted coefficients of the model. However it does depend on the irregular parameters which are fixed in the original call to \code{\link{ppm}}, for example, the interaction range \code{r} of the Strauss process. The sufficient statistic also depends on the edge correction that was used to fit the model. For example in a Strauss process, \itemize{ \item If the model is fitted with \code{correction="none"}, the sufficient statistic is \eqn{S(x) = (n(x), s(x))} where \eqn{n(x)} is the number of points and \eqn{s(x)} is the number of pairs of points which are closer than \eqn{r} units apart. \item If the model is fitted with \code{correction="periodic"}, the sufficient statistic is the same as above, except that distances are measured in the periodic sense. \item If the model is fitted with \code{correction="translate"}, then \eqn{n(x)} is unchanged but \eqn{s(x)} is replaced by a weighted sum (the sum of the translation correction weights for all pairs of points which are closer than \eqn{r} units apart). \item If the model is fitted with \code{correction="border"} (the default), then points lying less than \eqn{r} units from the boundary of the observation window are treated as fixed. Thus \eqn{n(x)} is replaced by the number \eqn{n_r(x)}{n[r](x)} of points lying at least \eqn{r} units from the boundary of the observation window, and \eqn{s(x)} is replaced by the number \eqn{s_r(x)}{s[r](x)} of pairs of points, which are closer than \eqn{r} units apart, and at least one of which lies more than \eqn{r} units from the boundary of the observation window. } Non-finite values of the sufficient statistic (\code{NA} or \code{-Inf}) may be returned if the point pattern \code{X} is not a possible realisation of the model (i.e. if \code{X} has zero probability of occurring under \code{model} for all values of the canonical coefficients \eqn{\theta}{theta}). } \seealso{ \code{\link{ppm}} } \examples{ fitS <- ppm(swedishpines~1, Strauss(7)) suffstat(fitS) X <- rpoispp(intensity(swedishpines), win=Window(swedishpines)) suffstat(fitS, X) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat/man/summary.solist.Rd0000644000176200001440000000155513333543264016204 0ustar liggesusers\name{summary.solist} \alias{summary.solist} \title{Summary of a List of Spatial Objects} \description{ Prints a useful summary of each entry in a list of two-dimensional spatial objects. } \usage{ \method{summary}{solist}(object, \dots) } \arguments{ \item{object}{ An object of class \code{"solist"}. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{summary}}. An object of the class \code{"solist"} is effectively a list of two-dimensional spatial datasets. See \code{\link{solist}}. This function extracts a useful summary of each of the datasets. } \seealso{ \code{\link{solist}}, \code{\link{summary}}, \code{\link{plot.solist}} } \examples{ x <- solist(cells, japanesepines, redwood) summary(x) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{methods} spatstat/man/logLik.slrm.Rd0000644000176200001440000000324013333543263015360 0ustar liggesusers\name{logLik.slrm} \Rdversion{1.1} \alias{logLik.slrm} \title{ Loglikelihood of Spatial Logistic Regression } \description{ Computes the (maximised) loglikelihood of a fitted Spatial Logistic Regression model. } \usage{ \method{logLik}{slrm}(object, ..., adjust = TRUE) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Ignored. } \item{adjust}{ Logical value indicating whether to adjust the loglikelihood of the model to make it comparable with a point process likelihood. See Details. } } \details{ This is a method for \code{\link[stats]{logLik}} for fitted spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). It computes the log-likelihood of a fitted spatial logistic regression model. If \code{adjust=FALSE}, the loglikelihood is computed using the standard formula for the loglikelihood of a logistic regression model for a finite set of (pixel) observations. If \code{adjust=TRUE} then the loglikelihood is adjusted so that it is approximately comparable with the likelihood of a point process in continuous space, by subtracting the value \eqn{n \log(a)}{n * log(a)} where \eqn{n} is the number of points in the original point pattern dataset, and \eqn{a} is the area of one pixel. } \value{ A numerical value. } \seealso{ \code{\link{slrm}} } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x+y) logLik(fit) logLik(fit, adjust=FALSE) } \author{\adrian \email{adrian@maths.uwa.edu.au} and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/dffit.ppm.Rd0000644000176200001440000000355613512337523015063 0ustar liggesusers\name{dffit.ppm} \alias{dffit} \alias{dffit.ppm} \title{ Case Deletion Effect Measure of Fitted Model } \description{ Computes the case deletion effect measure \code{DFFIT} for a fitted model. } \usage{ dffit(object, \dots) \method{dffit}{ppm}(object, \dots, collapse = FALSE, dfb = NULL) } \arguments{ \item{object}{ A fitted model, such as a point process model (object of class \code{"ppm"}). } \item{\dots}{ Additional arguments passed to \code{\link{dfbetas.ppm}}. } \item{collapse}{ Logical value specifying whether to collapse the vector-valued measure to a scalar-valued measure by adding all the components. } \item{dfb}{ Optional. The result of \code{dfbetas(object)}, if it has already been computed. } } \details{ The case deletion effect measure \code{DFFIT} is a model diagnostic traditionally used for regression models. In that context, \code{DFFIT[i,j]} is the negative change, in the value of the \code{j}th term in the linear predictor, that would occur if the \code{i}th data value was deleted. It is closely related to the diagnostic \code{DFBETA}. For a spatial point process model, \code{dffit} computes the analogous spatial case deletion diagnostic, described in Baddeley, Rubak and Turner (2019). } \value{ A measure (object of class \code{"msr"}). } \references{ Baddeley, A., Rubak, E. and Turner, R. (2019) Leverage and influence diagnostics for Gibbs spatial point processes. \emph{Spatial Statistics} \bold{29}, {15--48}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{dfbetas.ppm}} } \examples{ \testonly{op <- spatstat.options(npixel=32)} X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) \testonly{fit <- ppm(X ~x+y, nd=16)} plot(dffit(fit)) plot(dffit(fit, collapse=TRUE)) \testonly{spatstat.options(op)} } \keyword{spatial} \keyword{models} spatstat/man/plot.fv.Rd0000644000176200001440000002113213333543264014554 0ustar liggesusers\name{plot.fv} \alias{plot.fv} \title{Plot Function Values} \description{ Plot method for the class \code{"fv"}. } \usage{ \method{plot}{fv}(x, fmla, \dots, subset=NULL, lty=NULL, col=NULL, lwd=NULL, xlim=NULL, ylim=NULL, xlab=NULL, ylab=NULL, ylim.covers=NULL, legend=!add, legendpos="topleft", legendavoid=missing(legendpos), legendmath=TRUE, legendargs=list(), shade=fvnames(x, ".s"), shadecol="grey", add=FALSE, log="", mathfont=c("italic", "plain", "bold", "bolditalic"), limitsonly=FALSE) } \arguments{ \item{x}{ An object of class \code{"fv"}, containing the variables to be plotted or variables from which the plotting coordinates can be computed. } \item{fmla}{ an R language formula determining which variables or expressions are plotted. Either a formula object, or a string that can be parsed as a formula. See Details. } \item{subset}{ (optional) subset of rows of the data frame that will be plotted. } \item{lty}{ (optional) numeric vector of values of the graphical parameter \code{lty} controlling the line style of each plot. } \item{col}{ (optional) numeric vector of values of the graphical parameter \code{col} controlling the colour of each plot. } \item{lwd}{ (optional) numeric vector of values of the graphical parameter \code{lwd} controlling the line width of each plot. } \item{xlim}{ (optional) range of x axis } \item{ylim}{ (optional) range of y axis } \item{xlab}{ (optional) label for x axis } \item{ylab}{ (optional) label for y axis } \item{\dots}{ Extra arguments passed to \code{plot.default}. } \item{ylim.covers}{ Optional vector of \eqn{y} values that must be included in the \eqn{y} axis. For example \code{ylim.covers=0} will ensure that the \eqn{y} axis includes the origin. } \item{legend}{ Logical flag or \code{NULL}. If \code{legend=TRUE}, the algorithm plots a legend in the top left corner of the plot, explaining the meaning of the different line types and colours. } \item{legendpos}{ The position of the legend. Either a character string keyword (see \code{\link[graphics]{legend}} for keyword options) or a pair of coordinates in the format \code{list(x,y)}. Alternatively if \code{legendpos="float"}, a location will be selected inside the plot region, avoiding the graphics. } \item{legendavoid}{ Whether to avoid collisions between the legend and the graphics. Logical value. If \code{TRUE}, the code will check for collisions between the legend box and the graphics, and will override \code{legendpos} if a collision occurs. If \code{FALSE}, the value of \code{legendpos} is always respected. } \item{legendmath}{ Logical. If \code{TRUE}, the legend will display the mathematical notation for each curve. If \code{FALSE}, the legend text is the identifier (column name) for each curve. } \item{legendargs}{ Named list containing additional arguments to be passed to \code{\link{legend}} controlling the appearance of the legend. } \item{shade}{ A character vector giving the names of two columns of \code{x}, or another type of index that identifies two columns. When the corresponding curves are plotted, the region between the curves will be shaded in light grey. The object \code{x} may or may not contain two columns which are designated as boundaries for shading; they are identified by \code{fvnames(x, ".s")}. The default is to shade between these two curves if they exist. To suppress this behaviour, set \code{shade=NULL}. } \item{shadecol}{ The colour to be used in the \code{shade} plot. A character string or an integer specifying a colour. } \item{add}{ Logical. Whether the plot should be added to an existing plot } \item{log}{ A character string which contains \code{"x"} if the x axis is to be logarithmic, \code{"y"} if the y axis is to be logarithmic and \code{"xy"} or \code{"yx"} if both axes are to be logarithmic. } \item{mathfont}{ Character string. The font to be used for mathematical expressions in the axis labels and the legend. } \item{limitsonly}{ Logical. If \code{FALSE}, plotting is performed normally. If \code{TRUE}, no plotting is performed at all; just the \eqn{x} and \eqn{y} limits of the plot are computed and returned. } } \value{ Invisible: either \code{NULL}, or a data frame giving the meaning of the different line types and colours. } \details{ This is the \code{plot} method for the class \code{"fv"}. The use of the argument \code{fmla} is like \code{plot.formula}, but offers some extra functionality. The left and right hand sides of \code{fmla} are evaluated, and the results are plotted against each other (the left side on the \eqn{y} axis against the right side on the \eqn{x} axis). The left and right hand sides of \code{fmla} may be the names of columns of the data frame \code{x}, or expressions involving these names. If a variable in \code{fmla} is not the name of a column of \code{x}, the algorithm will search for an object of this name in the environment where \code{plot.fv} was called, and then in the enclosing environment, and so on. Multiple curves may be specified by a single formula of the form \code{cbind(y1,y2,\dots,yn) ~ x}, where \code{x,y1,y2,\dots,yn} are expressions involving the variables in the data frame. Each of the variables \code{y1,y2,\dots,yn} in turn will be plotted against \code{x}. See the examples. Convenient abbreviations which can be used in the formula are \itemize{ \item the symbol \code{.} which represents all the columns in the data frame that will be plotted by default; \item the symbol \code{.x} which represents the function argument; \item the symbol \code{.y} which represents the recommended value of the function. } For further information, see \code{\link{fvnames}}. The value returned by this plot function indicates the meaning of the line types and colours in the plot. It can be used to make a suitable legend for the plot if you want to do this by hand. See the examples. The argument \code{shade} can be used to display critical bands or confidence intervals. If it is not \code{NULL}, then it should be a subset index for the columns of \code{x}, that identifies exactly 2 columns. When the corresponding curves are plotted, the region between the curves will be shaded in light grey. See the Examples. The default values of \code{lty}, \code{col} and \code{lwd} can be changed using \code{\link{spatstat.options}("plot.fv")}. Use \code{type = "n"} to create the plot region and draw the axes without plotting any data. Use \code{limitsonly=TRUE} to suppress all plotting and just compute the \eqn{x} and \eqn{y} limits. This can be used to calculate common \eqn{x} and \eqn{y} scales for several plots. To change the kind of parenthesis enclosing the explanatory text about the unit of length, use \code{\link{spatstat.options}('units.paren')} } \examples{ K <- Kest(cells) # K is an object of class "fv" plot(K, iso ~ r) # plots iso against r plot(K, sqrt(iso/pi) ~ r) # plots sqrt(iso/r) against r plot(K, cbind(iso,theo) ~ r) # plots iso against r AND theo against r plot(K, . ~ r) # plots all available estimates of K against r plot(K, sqrt(./pi) ~ r) # plots all estimates of L-function # L(r) = sqrt(K(r)/pi) plot(K, cbind(iso,theo) ~ r, col=c(2,3)) # plots iso against r in colour 2 # and theo against r in colour 3 plot(K, iso ~ r, subset=quote(r < 0.2)) # plots iso against r for r < 10 # Can't remember the names of the columns? No problem.. plot(K, sqrt(./pi) ~ .x) # making a legend by hand v <- plot(K, . ~ r, legend=FALSE) legend("topleft", legend=v$meaning, lty=v$lty, col=v$col) # significance bands KE <- envelope(cells, Kest, nsim=19) plot(KE, shade=c("hi", "lo")) # how to display two functions on a common scale Kr <- Kest(redwood) a <- plot(K, limitsonly=TRUE) b <- plot(Kr, limitsonly=TRUE) xlim <- range(a$xlim, b$xlim) ylim <- range(a$ylim, b$ylim) opa <- par(mfrow=c(1,2)) plot(K, xlim=xlim, ylim=ylim) plot(Kr, xlim=xlim, ylim=ylim) par(opa) } \seealso{ \code{\link{fv.object}}, \code{\link{Kest}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/pcfmulti.Rd0000644000176200001440000001144213333543264015012 0ustar liggesusers\name{pcfmulti} \alias{pcfmulti} \title{ Marked pair correlation function } \description{ For a marked point pattern, estimate the multitype pair correlation function using kernel methods. } \usage{ pcfmulti(X, I, J, ..., r = NULL, kernel = "epanechnikov", bw = NULL, stoyan = 0.15, correction = c("translate", "Ripley"), divisor = c("r", "d"), Iname = "points satisfying condition I", Jname = "points satisfying condition J") } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{I}{Subset index specifying the points of \code{X} from which distances are measured. } \item{J}{Subset index specifying the points in \code{X} to which distances are measured. } \item{\dots}{ Ignored. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{stoyan}{ Coefficient for default bandwidth rule. } \item{correction}{ Choice of edge correction. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. } \item{Iname,Jname}{ Optional. Character strings describing the members of the subsets \code{I} and \code{J}. } } \details{ This is a generalisation of \code{\link{pcfcross}} to arbitrary collections of points. The algorithm measures the distance from each data point in subset \code{I} to each data point in subset \code{J}, excluding identical pairs of points. The distances are kernel-smoothed and renormalised to form a pair correlation function. \itemize{ \item If \code{divisor="r"} (the default), then the multitype counterpart of the standard kernel estimator (Stoyan and Stoyan, 1994, pages 284--285) is used. By default, the recommendations of Stoyan and Stoyan (1994) are followed exactly. \item If \code{divisor="d"} then a modified estimator is used: the contribution from an interpoint distance \eqn{d_{ij}}{d[ij]} to the estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]} instead of dividing by \eqn{r}. This usually improves the bias of the estimator when \eqn{r} is close to zero. } There is also a choice of spatial edge corrections (which are needed to avoid bias due to edge effects associated with the boundary of the spatial window): \code{correction="translate"} is the Ohser-Stoyan translation correction, and \code{correction="isotropic"} or \code{"Ripley"} is Ripley's isotropic correction. The arguments \code{I} and \code{J} specify two subsets of the point pattern \code{X}. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. The choice of smoothing kernel is controlled by the argument \code{kernel} which is passed to \code{\link{density}}. The default is the Epanechnikov kernel. The bandwidth of the smoothing kernel can be controlled by the argument \code{bw}. Its precise interpretation is explained in the documentation for \code{\link{density.default}}. For the Epanechnikov kernel with support \eqn{[-h,h]}, the argument \code{bw} is equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}. If \code{bw} is not specified, the default bandwidth is determined by Stoyan's rule of thumb (Stoyan and Stoyan, 1994, page 285) applied to the points of type \code{j}. That is, \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)}, where \eqn{\lambda}{lambda} is the (estimated) intensity of the point process of type \code{j}, and \eqn{c} is a constant in the range from 0.1 to 0.2. The argument \code{stoyan} determines the value of \eqn{c}. } \value{ An object of class \code{"fv"}. } \seealso{ \code{\link{pcfcross}}, \code{\link{pcfdot}}, \code{\link{pcf.ppp}}. } \examples{ adult <- (marks(longleaf) >= 30) juvenile <- !adult p <- pcfmulti(longleaf, adult, juvenile) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/owin.object.Rd0000644000176200001440000000706713333543263015417 0ustar liggesusers\name{owin.object} \alias{owin.object} %DoNotExport \title{Class owin} \description{ A class \code{owin} to define the ``observation window'' of a point pattern } \details{ In the \pkg{spatstat} library, a point pattern dataset must include information about the window or region in which the pattern was observed. A window is described by an object of class \code{"owin"}. Windows of arbitrary shape are supported. An object of class \code{"owin"} has one of three types: \tabular{ll}{ \code{"rectangle"}: \tab a rectangle in the two-dimensional plane with edges parallel to the axes \cr \code{"polygonal"}: \tab a region whose boundary is a polygon or several polygons. The region may have holes and may consist of several disconnected pieces. \cr \code{"mask"}: \tab a binary image (a logical matrix) set to \code{TRUE} for pixels inside the window and \code{FALSE} outside the window. } Objects of class \code{"owin"} may be created by the function \code{\link{owin}} and converted from other types of data by the function \code{\link{as.owin}}. They may be manipulated by the functions \code{\link{as.rectangle}}, \code{\link{as.mask}}, \code{\link{complement.owin}}, \code{\link{rotate}}, \code{\link{shift}}, \code{\link{affine}}, \code{\link{erosion}}, \code{\link{dilation}}, \code{\link{opening}} and \code{\link{closing}}. Geometrical calculations available for windows include \code{\link{area.owin}}, \code{\link{perimeter}}, \code{\link{diameter.owin}}, \code{\link{boundingbox}}, \code{\link{eroded.areas}}, \code{\link{bdist.points}}, \code{\link{bdist.pixels}}, and \code{even.breaks.owin}. The mapping between continuous coordinates and pixel raster indices is facilitated by the functions \code{\link{raster.x}}, \code{\link{raster.y}} and \code{\link{nearest.raster.point}}. There is a \code{plot} method for window objects, \code{\link{plot.owin}}. This may be useful if you wish to plot a point pattern's window without the points for graphical purposes. There are also methods for \code{summary} and \code{print}. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{as.rectangle}}, \code{\link{as.mask}}, \code{\link{summary.owin}}, \code{\link{print.owin}}, \code{\link{complement.owin}}, \code{\link{erosion}}, \code{\link{dilation}}, \code{\link{opening}}, \code{\link{closing}}, \code{\link{affine.owin}}, \code{\link{shift.owin}}, \code{\link{rotate.owin}}, \code{\link{raster.x}}, \code{\link{raster.y}}, \code{\link{nearest.raster.point}}, \code{\link{plot.owin}}, \code{\link{area.owin}}, \code{\link{boundingbox}}, \code{\link{diameter}}, \code{\link{eroded.areas}}, \code{\link{bdist.points}}, \code{\link{bdist.pixels}} } \section{Warnings}{ In a window of type \code{"mask"}, the row index corresponds to increasing \eqn{y} coordinate, and the column index corresponds to increasing \eqn{x} coordinate. } \examples{ w <- owin() w <- owin(c(0,1), c(0,1)) # the unit square w <- owin(c(0,1), c(0,2)) \dontrun{ if(FALSE) { plot(w) # plots edges of a box 1 unit x 2 units v <- locator() # click on points in the plot window # to be the vertices of a polygon # traversed in anticlockwise order u <- owin(c(0,1), c(0,2), poly=v) plot(u) # plots polygonal boundary using polygon() plot(as.mask(u, eps=0.02)) # plots discrete pixel approximation to polygon } } } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat/man/lohboot.Rd0000644000176200001440000002035713503565562014646 0ustar liggesusers\name{lohboot} \alias{lohboot} \title{Bootstrap Confidence Bands for Summary Function} \description{ Computes a bootstrap confidence band for a summary function of a point process. } \usage{ lohboot(X, fun=c("pcf", "Kest", "Lest", "pcfinhom", "Kinhom", "Linhom", "Kcross", "Lcross", "Kdot", "Ldot", "Kcross.inhom", "Lcross.inhom"), \dots, block=FALSE, global=FALSE, basicboot=FALSE, Vcorrection=FALSE, confidence=0.95, nx = 4, ny = nx, nsim=200, type=7) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{fun}{ Name of the summary function for which confidence intervals are desired: one of the strings \code{"pcf"}, \code{"Kest"}, \code{"Lest"}, \code{"pcfinhom"}, \code{"Kinhom"} \code{"Linhom"}, \code{"Kcross"}, \code{"Lcross"}, \code{"Kdot"}, \code{"Ldot"}, \code{"Kcross.inhom"} or \code{"Lcross.inhom"}. Alternatively, the function itself; it must be one of the functions listed here. } \item{\dots}{ Arguments passed to the corresponding local version of the summary function (see Details). } \item{block}{ Logical value indicating whether to use Loh's block bootstrap as originally proposed. Default is \code{FALSE} for consistency with older code. See Details. } \item{global}{ Logical. If \code{FALSE} (the default), pointwise confidence intervals are constructed. If \code{TRUE}, a global (simultaneous) confidence band is constructed. } \item{basicboot}{ Logical value indicating whether to use the so-called basic bootstrap confidence interval. See Details. } \item{Vcorrection}{ Logical value indicating whether to use a variance correction when \code{fun="Kest"} or \code{fun="Kinhom"}. See Details. } \item{confidence}{ Confidence level, as a fraction between 0 and 1. } \item{nx,ny}{ Integers. If \code{block=TRUE}, divide the window into \code{nx*ny} rectangles. } \item{nsim}{ Number of bootstrap simulations. } \item{type}{ Integer. Type of quantiles. Argument passed to \code{\link[stats]{quantile.default}} controlling the way the quantiles are calculated. } } \value{ A function value table (object of class \code{"fv"}) containing columns giving the estimate of the summary function, the upper and lower limits of the bootstrap confidence interval, and the theoretical value of the summary function for a Poisson process. } \details{ This algorithm computes confidence bands for the true value of the summary function \code{fun} using the bootstrap method of Loh (2008) and a modification described in Baddeley, Rubak, Turner (2015). If \code{fun="pcf"}, for example, the algorithm computes a pointwise \code{(100 * confidence)}\% confidence interval for the true value of the pair correlation function for the point process, normally estimated by \code{\link{pcf}}. It starts by computing the array of \emph{local} pair correlation functions, \code{\link{localpcf}}, of the data pattern \code{X}. This array consists of the contributions to the estimate of the pair correlation function from each data point. If \code{block=FALSE}, these contributions are resampled \code{nsim} times with replacement as described in Baddeley, Rubak, Turner (2015); from each resampled dataset the total contribution is computed, yielding \code{nsim} random pair correlation functions. If \code{block=TRUE}, the calculation is performed as originally proposed by Loh (2008, 2010). The (bounding box of the) window is divided into \eqn{nx * ny} rectangles (blocks). The average contribution of a block is obtained by averaging the contribution of each point included in the block. Then, the average contributions on each block are resampled \code{nsim} times with replacement as described in Loh (2008) and Loh (2010); from each resampled dataset the total contribution is computed, yielding \code{nsim} random pair correlation functions. Notice that for non-rectangular windows any blocks not fully contained in the window are discarded before doing the resampling, so the effective number of blocks may be substantially smaller than \eqn{nx * ny} in this case. The pointwise \code{alpha/2} and \code{1 - alpha/2} quantiles of these functions are computed, where \code{alpha = 1 - confidence}. The average of the local functions is also computed as an estimate of the pair correlation function. There are several ways to define a bootstrap confidence interval. If \code{basicbootstrap=TRUE}, the so-called basic confidence bootstrap interval is used as described in Loh (2008). It has been noticed in Loh (2010) that when the intensity of the point process is unknown, the bootstrap error estimate is larger than it should be. When the \eqn{K} function is used, an adjustment procedure has been proposed in Loh (2010) that is used if \code{Vcorrection=TRUE}. In this case, the basic confidence bootstrap interval is implicitly used. To control the estimation algorithm, use the arguments \code{\dots}, which are passed to the local version of the summary function, as shown below: \tabular{ll}{ \bold{fun} \tab \bold{local version} \cr \code{\link{pcf}} \tab \code{\link{localpcf}} \cr \code{\link{Kest}} \tab \code{\link{localK}} \cr \code{\link{Lest}} \tab \code{\link{localL}} \cr \code{\link{pcfinhom}} \tab \code{\link{localpcfinhom}} \cr \code{\link{Kinhom}} \tab \code{\link{localKinhom}} \cr \code{\link{Linhom}} \tab \code{\link{localLinhom}} \cr \code{\link{Kcross}} \tab \code{\link{localKcross}} \cr \code{\link{Lcross}} \tab \code{\link{localLcross}} \cr \code{\link{Kdot}} \tab \code{\link{localKdot}} \cr \code{\link{Ldot}} \tab \code{\link{localLdot}} \cr \code{\link{Kcross.inhom}} \tab \code{\link{localKcross.inhom}} \cr \code{\link{Lcross.inhom}} \tab \code{\link{localLcross.inhom}} } For \code{fun="Lest"}, the calculations are first performed as if \code{fun="Kest"}, and then the square-root transformation is applied to obtain the \eqn{L}-function. Similarly for \code{fun="Linhom", "Lcross", "Ldot", "Lcross.inhom"}. Note that the confidence bands computed by \code{lohboot(fun="pcf")} may not contain the estimate of the pair correlation function computed by \code{\link{pcf}}, because of differences between the algorithm parameters (such as the choice of edge correction) in \code{\link{localpcf}} and \code{\link{pcf}}. If you are using \code{lohboot}, the appropriate point estimate of the pair correlation itself is the pointwise mean of the local estimates, which is provided in the result of \code{lohboot} and is shown in the default plot. If the confidence bands seem unbelievably narrow, this may occur because the point pattern has a hard core (the true pair correlation function is zero for certain values of distance) or because of an optical illusion when the function is steeply sloping (remember the width of the confidence bands should be measured \emph{vertically}). An alternative to \code{lohboot} is \code{\link{varblock}}. } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. Loh, J.M. (2008) A valid and fast spatial bootstrap for correlation functions. \emph{The Astrophysical Journal}, \bold{681}, 726--734. Loh, J.M. (2010) Bootstrapping an inhomogeneous point process. \emph{Journal of Statistical Planning and Inference}, \bold{140}, 734--749. } \seealso{ Summary functions \code{\link{Kest}}, \code{\link{pcf}}, \code{\link{Kinhom}}, \code{\link{pcfinhom}}, \code{\link{localK}}, \code{\link{localpcf}}, \code{\link{localKinhom}}, \code{\link{localpcfinhom}}, \code{\link{localKcross}}, \code{\link{localKdot}}, \code{\link{localLcross}}, \code{\link{localLdot}}. \code{\link{localKcross.inhom}}, \code{\link{localLcross.inhom}}. See \code{\link{varblock}} for an alternative bootstrap technique. } \examples{ p <- lohboot(simdat, stoyan=0.5) g <- lohboot(simdat, stoyan=0.5, block=TRUE) g plot(g) } \author{ \spatstatAuthors and Christophe Biscio. } \keyword{spatial} \keyword{nonparametric} spatstat/man/rags.Rd0000644000176200001440000000337313333543264014127 0ustar liggesusers\name{rags} \alias{rags} \title{ Alternating Gibbs Sampler for Multitype Point Processes } \description{ Simulate a realisation of a point process model using the alternating Gibbs sampler. } \usage{ rags(model, \dots, ncycles = 100) } \arguments{ \item{model}{ Data specifying some kind of point process model. } \item{\dots}{ Additional arguments passed to other code. } \item{ncycles}{ Number of cycles of the alternating Gibbs sampler that should be performed. } } \details{ The Alternating Gibbs Sampler for a multitype point process is an iterative simulation procedure. Each step of the sampler updates the pattern of points of a particular type \code{i}, by drawing a realisation from the conditional distribution of points of type \code{i} given the points of all other types. Successive steps of the sampler update the points of type 1, then type 2, type 3, and so on. This is an experimental implementation which currently works only for multitype hard core processes (see \code{\link{MultiHard}}) in which there is no interaction between points of the same type. The argument \code{model} should be an object describing a point process model. At the moment, the only permitted format for \code{model} is of the form \code{list(beta, hradii)} where \code{beta} gives the first order trend and \code{hradii} is the matrix of interaction radii. See \code{\link{ragsMultiHard}} for full details. } \value{ A point pattern (object of class \code{"ppp"}). } \author{ \adrian } \seealso{ \code{\link{ragsMultiHard}}, \code{\link{ragsAreaInter}} } \examples{ mo <- list(beta=c(30, 20), hradii = 0.05 * matrix(c(0,1,1,0), 2, 2)) rags(mo, ncycles=10) } \keyword{spatial} \keyword{datagen} spatstat/man/imcov.Rd0000644000176200001440000000334613333543263014307 0ustar liggesusers\name{imcov} \alias{imcov} \title{Spatial Covariance of a Pixel Image} \description{ Computes the unnormalised spatial covariance function of a pixel image. } \usage{ imcov(X, Y=X) } \arguments{ \item{X}{ A pixel image (object of class \code{"im"}. } \item{Y}{ Optional. Another pixel image. } } \value{ A pixel image (an object of class \code{"im"}) representing the spatial covariance function of \code{X}, or the cross-covariance of \code{X} and \code{Y}. } \details{ The (uncentred, unnormalised) \emph{spatial covariance function} of a pixel image \eqn{X} in the plane is the function \eqn{C(v)} defined for each vector \eqn{v} as \deqn{ C(v) = \int X(u)X(u-v)\, {\rm d}u }{ C(v) = integral of X(u) * X(u-v) du } where the integral is over all spatial locations \eqn{u}, and where \eqn{X(u)} denotes the pixel value at location \eqn{u}. This command computes a discretised approximation to the spatial covariance function, using the Fast Fourier Transform. The return value is another pixel image (object of class \code{"im"}) whose greyscale values are values of the spatial covariance function. If the argument \code{Y} is present, then \code{imcov(X,Y)} computes the set \emph{cross-covariance} function \eqn{C(u)} defined as \deqn{ C(v) = \int X(u)Y(u-v)\, {\rm d}u. }{ C(v) = integral of X(u) * Y(u-v) du. } Note that \code{imcov(X,Y)} is equivalent to \code{convolve.im(X,Y,reflectY=TRUE)}. } \seealso{ \code{\link{setcov}}, \code{\link{convolve.im}}, \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{erosion}} } \examples{ X <- as.im(square(1)) v <- imcov(X) plot(v) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/methods.linfun.Rd0000644000176200001440000000473413333543263016131 0ustar liggesusers\name{methods.linfun} \Rdversion{1.1} \alias{methods.linfun} %DoNotExport \alias{print.linfun} \alias{summary.linfun} \alias{plot.linfun} \alias{as.data.frame.linfun} \alias{as.owin.linfun} \alias{as.function.linfun} \title{ Methods for Functions on Linear Network } \description{ Methods for the class \code{"linfun"} of functions on a linear network. } \usage{ \method{print}{linfun}(x, \dots) \method{summary}{linfun}(object, \dots) \method{plot}{linfun}(x, \dots, L=NULL, main) \method{as.data.frame}{linfun}(x, \dots) \method{as.owin}{linfun}(W, \dots) \method{as.function}{linfun}(x, \dots) } \arguments{ \item{x,object,W}{ A function on a linear network (object of class \code{"linfun"}). } \item{L}{A linear network} \item{\dots}{ Extra arguments passed to \code{\link{as.linim}}, \code{\link{plot.linim}}, \code{\link{plot.im}} or \code{\link{print.default}}, or arguments passed to \code{x} if it is a function. } \item{main}{Main title for plot.} } \details{ These are methods for the generic functions \code{\link{plot}}, \code{\link{print}}, \code{\link{summary}} \code{\link{as.data.frame}} and \code{\link{as.function}}, and for the \pkg{spatstat} generic function \code{\link{as.owin}}. An object of class \code{"linfun"} represents a mathematical function that could be evaluated at any location on a linear network. It is essentially an \R \code{function} with some extra attributes. The method \code{as.owin.linfun} extracts the two-dimensional spatial window containing the linear network. The method \code{plot.linfun} first converts the function to a pixel image using \code{\link{as.linim.linfun}}, then plots the image using \code{\link{plot.linim}}. Note that a \code{linfun} function may have additional arguments, other than those which specify the location on the network (see \code{\link{linfun}}). These additional arguments may be passed to \code{plot.linfun}. } \value{ For \code{print.linfun} and \code{summary.linfun} the result is \code{NULL}. For \code{plot.linfun} the result is the same as for \code{\link{plot.linim}}. For the conversion methods, the result is an object of the required type: \code{as.owin.linfun} returns an object of class \code{"owin"}, and so on. } \examples{ X <- runiflpp(3, simplenet) f <- nnfun(X) f plot(f) as.function(f) as.owin(f) head(as.data.frame(f)) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{math} spatstat/man/connected.lpp.Rd0000644000176200001440000000530413333543263015722 0ustar liggesusers\name{connected.lpp} \alias{connected.lpp} \title{ Connected Components of a Point Pattern on a Linear Network } \description{ Finds the topologically-connected components of a point pattern on a linear network, when all pairs of points closer than a threshold distance are joined. } \usage{ \method{connected}{lpp}(X, R=Inf, \dots, dismantle=TRUE) } \arguments{ \item{X}{ A linear network (object of class \code{"lpp"}). } \item{R}{ Threshold distance. Pairs of points will be joined together if they are closer than \code{R} units apart, measured by the shortest path in the network. The default \code{R=Inf} implies that points will be joined together if they are mutually connected by any path in the network. } \item{dismantle}{ Logical. If \code{TRUE} (the default), the network itself will be divided into its path-connected components using \code{\link{connected.linnet}}. } \item{\dots}{ Ignored. } } \details{ The function \code{connected} is generic. This is the method for point patterns on a linear network (objects of class \code{"lpp"}). It divides the point pattern \code{X} into one or more groups of points. If \code{R=Inf} (the default), then \code{X} is divided into groups such that any pair of points in the same group can be joined by a path in the network. If \code{R} is a finite number, then two points of \code{X} are declared to be \emph{R-close} if they lie closer than \code{R} units apart, measured by the length of the shortest path in the network. Two points are \emph{R-connected} if they can be reached by a series of steps between R-close pairs of points of \code{X}. Then \code{X} is divided into groups such that any pair of points in the same group is R-connected. If \code{dismantle=TRUE} (the default) the algorithm first checks whether the network is connected (i.e. whether any pair of vertices can be joined by a path in the network), and if not, the network is decomposed into its connected components. } \value{ A point pattern (of class \code{"lpp"}) with marks indicating the grouping, or a list of such point patterns. } \author{ \adrian. } \seealso{ \code{\link{thinNetwork}} } \examples{ ## behaviour like connected.ppp U <- runiflpp(20, simplenet) plot(connected(U, 0.15, dismantle=FALSE)) ## behaviour like connected.owin ## remove some edges from a network to make it disconnected plot(simplenet, col="grey", main="", lty=2) A <- thinNetwork(simplenet, retainedges=-c(3,5)) plot(A, add=TRUE, lwd=2) X <- runiflpp(10, A) ## find the connected components cX <- connected(X) plot(cX[[1]], add=TRUE, col="blue", lwd=2) } \keyword{spatial} \keyword{manip} spatstat/man/Extract.fv.Rd0000644000176200001440000000613613333543263015216 0ustar liggesusers\name{Extract.fv} \alias{[.fv} \alias{[<-.fv} \alias{$<-.fv} \title{Extract or Replace Subset of Function Values} \description{ Extract or replace a subset of an object of class \code{"fv"}. } \usage{ \method{[}{fv}(x, i, j, \dots, drop=FALSE) \method{[}{fv}(x, i, j) <- value \method{$}{fv}(x, name) <- value } \arguments{ \item{x}{ a function value object, of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame. } \item{i}{ any appropriate subset index. Selects a subset of the rows of the data frame, i.e. a subset of the domain of the function(s) represented by \code{x}. } \item{j}{ any appropriate subset index for the columns of the data frame. Selects some of the functions present in \code{x}. } \item{name}{ the name of a column of the data frame. } \item{\dots}{ Ignored. } \item{drop}{ Logical. If \code{TRUE}, the result is a data frame or vector containing the selected rows and columns of data. If \code{FALSE} (the default), the result is another object of class \code{"fv"}. } \item{value}{ Replacement value for the column or columns selected by \code{name} or \code{j}. } } \value{ The result of \code{[.fv} with \code{drop=TRUE} is a data frame or vector. Otherwise, the result is another object of class \code{"fv"}. } \details{ These functions extract a designated subset of an object of class \code{"fv"}, or replace the designated subset with other data, or delete the designated subset. The subset is specified by the row index \code{i} and column index \code{j}, or by the column name \code{name}. Either \code{i} or \code{j} may be missing, or both may be missing. The function \code{[.fv} is a method for the generic operator \code{\link{[}} for the class \code{"fv"}. It extracts the designated subset of \code{x}, and returns it as another object of class \code{"fv"} (if \code{drop=FALSE}) or as a data frame or vector (if \code{drop=TRUE}). The function \code{[<-.fv} is a method for the generic operator \code{\link{[<-}} for the class \code{"fv"}. If \code{value} is \code{NULL}, the designated subset of \code{x} will be deleted from \code{x}. Otherwise, the designated subset of \code{x} will be replaced by the data contained in \code{value}. The return value is the modified object \code{x}. The function \code{$<-.fv} is a method for the generic operator \code{\link{$<-}} for the class \code{"fv"}. If \code{value} is \code{NULL}, the designated column of \code{x} will be deleted from \code{x}. Otherwise, the designated column of \code{x} will be replaced by the data contained in \code{value}. The return value is the modified object \code{x}. } \seealso{ \code{\link{fv.object}} } \examples{ K <- Kest(cells) # discard the estimates of K(r) for r > 0.1 Ksub <- K[K$r <= 0.1, ] # extract the border method estimates bor <- K[ , "border", drop=TRUE] # or equivalently bor <- K$border # remove the border-method estimates K$border <- NULL K } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/rshift.splitppp.Rd0000644000176200001440000000412613333543264016341 0ustar liggesusers\name{rshift.splitppp} \alias{rshift.splitppp} \title{Randomly Shift a List of Point Patterns} \description{ Randomly shifts each point pattern in a list of point patterns. } \usage{ \method{rshift}{splitppp}(X, \dots, which=seq_along(X)) } \arguments{ \item{X}{ An object of class \code{"splitppp"}. Basically a list of point patterns. } \item{\dots}{ Parameters controlling the generation of the random shift vector and the handling of edge effects. See \code{\link{rshift.ppp}}. } \item{which}{ Optional. Identifies which patterns will be shifted, while other patterns are not shifted. Any valid subset index for \code{X}. } } \value{ Another object of class \code{"splitppp"}. } \details{ This operation applies a random shift to each of the point patterns in the list \code{X}. The function \code{\link{rshift}} is generic. This function \code{rshift.splitppp} is the method for objects of class \code{"splitppp"}, which are essentially lists of point patterns, created by the function \code{\link{split.ppp}}. By default, every pattern in the list \code{X} will be shifted. The argument \code{which} indicates that only some of the patterns should be shifted, while other groups should be left unchanged. \code{which} can be any valid subset index for \code{X}. Each point pattern in the list \code{X} (or each pattern in \code{X[which]}) is shifted by a random displacement vector. The shifting is performed by \code{\link{rshift.ppp}}. See the help page for \code{\link{rshift.ppp}} for details of the other arguments. } \seealso{ \code{\link{rshift}}, \code{\link{rshift.ppp}} } \examples{ data(amacrine) Y <- split(amacrine) # random toroidal shift # shift "on" and "off" points separately X <- rshift(Y) # shift "on" points and leave "off" points fixed X <- rshift(Y, which="on") # maximum displacement distance 0.1 units X <- rshift(Y, radius=0.1) # shift with erosion X <- rshift(Y, radius=0.1, edge="erode") } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/bw.lppl.Rd0000644000176200001440000000755513602545270014555 0ustar liggesusers\name{bw.lppl} \alias{bw.lppl} \title{ Likelihood Cross Validation Bandwidth Selection for Kernel Density on a Linear Network } \description{ Uses likelihood cross-validation to select a smoothing bandwidth for the kernel estimation of point process intensity on a linear network. } \usage{ bw.lppl(X, \dots, srange=NULL, ns=16, sigma=NULL, weights=NULL, distance="euclidean", shortcut=FALSE, warn=TRUE) } \arguments{ \item{X}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{srange}{ Optional numeric vector of length 2 giving the range of values of bandwidth to be searched. } \item{ns}{ Optional integer giving the number of values of bandwidth to search. } \item{sigma}{ Optional. Vector of values of the bandwidth to be searched. Overrides the values of \code{ns} and \code{srange}. } \item{weights}{ Optional. Numeric vector of weights for the points of \code{X}. Argument passed to \code{\link{density.lpp}}. } \item{distance}{ Argument passed to \code{\link{density.lpp}} controlling the type of kernel estimator. } \item{\dots}{ Additional arguments passed to \code{\link{density.lpp}}. } \item{shortcut}{ Logical value indicating whether to speed up the calculation by omitting the integral term in the cross-validation criterion. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the maximum of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function selects an appropriate bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.lpp}}. The argument \code{X} should be a point pattern on a linear network (class \code{"lpp"}). The bandwidth \eqn{\sigma}{\sigma} is chosen to maximise the point process likelihood cross-validation criterion \deqn{ \mbox{LCV}(\sigma) = \sum_i \log\hat\lambda_{-i}(x_i) - \int_L \hat\lambda(u) \, {\rm d}u }{ LCV(\sigma) = sum[i] log(\lambda[-i](x[i])) - integral[L] \lambda(u) du } where the sum is taken over all the data points \eqn{x_i}{x[i]}, where \eqn{\hat\lambda_{-i}(x_i)}{\lambda[-i](x_i)} is the leave-one-out kernel-smoothing estimate of the intensity at \eqn{x_i}{x[i]} with smoothing bandwidth \eqn{\sigma}{\sigma}, and \eqn{\hat\lambda(u)}{\lambda(u)} is the kernel-smoothing estimate of the intensity at a spatial location \eqn{u} with smoothing bandwidth \eqn{\sigma}{\sigma}. See Loader(1999, Section 5.3). The value of \eqn{\mbox{LCV}(\sigma)}{LCV(\sigma)} is computed directly, using \code{\link{density.lpp}}, for \code{ns} different values of \eqn{\sigma}{\sigma} between \code{srange[1]} and \code{srange[2]}. The result is a numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted to show the (rescaled) mean-square error as a function of \code{sigma}. If \code{shortcut=TRUE}, the computation is accelerated by omitting the integral term in the equation above. This is valid because the integral is approximately constant. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{density.lpp}}, \code{\link{bw.scott}}. For point patterns in two-dimensional space, use \code{\link{bw.ppl}}. } \examples{ if(interactive()) { b <- bw.lppl(spiders) plot(b, main="Likelihood cross validation for spiders") plot(density(spiders, b, distance="e")) } else { b1 <- bw.lppl(spiders, ns=2) b2 <- bw.lppl(spiders, ns=2, shortcut=TRUE) } } \references{ Loader, C. (1999) \emph{Local Regression and Likelihood}. Springer, New York. } \author{ Suman Rakshit. Tweaks by \adrian. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/pairorient.Rd0000644000176200001440000000777013416566724015364 0ustar liggesusers\name{pairorient} \alias{pairorient} \title{ Point Pair Orientation Distribution } \description{ Computes the distribution of the orientation of vectors joining pairs of points at a particular range of distances. } \usage{ pairorient(X, r1, r2, \dots, cumulative=FALSE, correction, ratio = FALSE, unit=c("degree", "radian"), domain=NULL) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{r1,r2}{ Minimum and maximum values of distance to be considered. } \item{\dots}{ Arguments passed to \code{\link{circdensity}} to control the kernel smoothing, if \code{cumulative=FALSE}. } \item{cumulative}{ Logical value specifying whether to estimate the probability density (\code{cumulative=FALSE}, the default) or the cumulative distribution function (\code{cumulative=TRUE}). } \item{correction}{ Character vector specifying edge correction or corrections. Options are \code{"none"}, \code{"isotropic"}, \code{"translate"}, \code{"border"}, \code{"bord.modif"}, \code{"good"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. The default is to compute all edge corrections except \code{"none"}. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{unit}{ Unit in which the angles should be expressed. Either \code{"degree"} or \code{"radian"}. } \item{domain}{ Optional window. The first point \eqn{x_i}{x[i]} of each pair of points will be constrained to lie in \code{domain}. } } \details{ This algorithm considers all pairs of points in the pattern \code{X} that lie more than \code{r1} and less than \code{r2} units apart. The \emph{direction} of the arrow joining the points is measured, as an angle in degrees or radians, anticlockwise from the \eqn{x} axis. If \code{cumulative=FALSE} (the default), a kernel estimate of the probability density of the orientations is calculated using \code{\link{circdensity}}. If \code{cumulative=TRUE}, then the cumulative distribution function of these directions is calculated. This is the function \eqn{O_{r1,r2}(\phi)}{O[r1,r2](phi)} defined in Stoyan and Stoyan (1994), equation (14.53), page 271. In either case the result can be plotted as a rose diagram by \code{\link{rose}}, or as a function plot by \code{\link{plot.fv}}. The algorithm gives each observed direction a weight, determined by an edge correction, to adjust for the fact that some interpoint distances are more likely to be observed than others. The choice of edge correction or corrections is determined by the argument \code{correction}. See the help for \code{\link{Kest}} for details of edge corrections, and explanation of the options available. The choice \code{correction="none"} is not recommended; it is included for demonstration purposes only. The default is to compute all corrections except \code{"none"}. It is also possible to calculate an estimate of the probability density from the cumulative distribution function, by numerical differentiation. Use \code{\link{deriv.fv}} with the argument \code{Dperiodic=TRUE}. } \value{ A function value table (object of class \code{"fv"}) containing the estimates of the probability density or the cumulative distribution function of angles, in degrees (if \code{unit="degree"}) or radians (if \code{unit="radian"}). } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, Random Shapes and Point Fields: Methods of Geometrical Statistics. John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{Ksector}}, \code{\link{nnorient}} } \examples{ rose(pairorient(redwood, 0.05, 0.15, sigma=8), col="grey") plot(CDF <- pairorient(redwood, 0.05, 0.15, cumulative=TRUE)) plot(f <- deriv(CDF, spar=0.6, Dperiodic=TRUE)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/Pairwise.Rd0000644000176200001440000000675113333543262014757 0ustar liggesusers\name{Pairwise} \alias{Pairwise} \title{Generic Pairwise Interaction model} \description{ Creates an instance of a pairwise interaction point process model which can then be fitted to point pattern data. } \usage{ Pairwise(pot, name, par, parnames, printfun) } \arguments{ \item{pot}{An R language function giving the user-supplied pairwise interaction potential.} \item{name}{Character string.} \item{par}{List of numerical values for irregular parameters} \item{parnames}{Vector of names of irregular parameters} \item{printfun}{Do not specify this argument: for internal use only.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ This code constructs a member of the pairwise interaction family \code{\link{pairwise.family}} with arbitrary pairwise interaction potential given by the user. Each pair of points in the point pattern contributes a factor \eqn{h(d)} to the probability density, where \eqn{d} is the distance between the two points. The factor term \eqn{h(d)} is \deqn{h(d) = \exp(-\theta \mbox{pot}(d))}{h(d) = exp(-theta * pot(d))} provided \eqn{\mbox{pot}(d)}{pot(d)} is finite, where \eqn{\theta}{theta} is the coefficient vector in the model. The function \code{pot} must take as its first argument a matrix of interpoint distances, and evaluate the potential for each of these distances. The result must be either a matrix with the same dimensions as its input, or an array with its first two dimensions the same as its input (the latter case corresponds to a vector-valued potential). If irregular parameters are present, then the second argument to \code{pot} should be a vector of the same type as \code{par} giving those parameter values. The values returned by \code{pot} may be finite numeric values, or \code{-Inf} indicating a hard core (that is, the corresponding interpoint distance is forbidden). We define \eqn{h(d) = 0} if \eqn{\mbox{pot}(d) = -\infty}{pot(d) = -Inf}. Thus, a potential value of minus infinity is \emph{always} interpreted as corresponding to \eqn{h(d) = 0}, regardless of the sign and magnitude of \eqn{\theta}{theta}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \examples{ #This is the same as StraussHard(r=0.7,h=0.05) strpot <- function(d,par) { r <- par$r h <- par$h value <- (d <= r) value[d < h] <- -Inf value } mySH <- Pairwise(strpot, "StraussHard process", list(r=0.7,h=0.05), c("interaction distance r", "hard core distance h")) data(cells) ppm(cells, ~ 1, mySH, correction="isotropic") # Fiksel (1984) double exponential interaction # see Stoyan, Kendall, Mecke 1987 p 161 fikspot <- function(d, par) { r <- par$r h <- par$h zeta <- par$zeta value <- exp(-zeta * d) value[d < h] <- -Inf value[d > r] <- 0 value } Fiksel <- Pairwise(fikspot, "Fiksel double exponential process", list(r=3.5, h=1, zeta=1), c("interaction distance r", "hard core distance h", "exponential coefficient zeta")) data(spruces) fit <- ppm(unmark(spruces), ~1, Fiksel, rbord=3.5) fit plot(fitin(fit), xlim=c(0,4)) coef(fit) # corresponding values obtained by Fiksel (1984) were -1.9 and -6.0 } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/valid.ppm.Rd0000644000176200001440000000532513333543264015064 0ustar liggesusers\name{valid.ppm} \alias{valid.ppm} \title{ Check Whether Point Process Model is Valid } \description{ Determines whether a fitted point process model satisfies the integrability conditions for existence of the point process. } \usage{ \method{valid}{ppm}(object, warn=TRUE, \dots) } \arguments{ \item{object}{ Fitted point process model (object of class \code{"ppm"}). } \item{warn}{ Logical value indicating whether to issue a warning if the validity of the model cannot be checked (due to unavailability of the required code). } \item{\dots}{Ignored.} } \details{ This is a method for the generic function \code{\link{valid}} for Poisson and Gibbs point process models (class \code{"ppm"}). The model-fitting function \code{\link{ppm}} fits Gibbs point process models to point pattern data. By default, \code{\link{ppm}} does not check whether the fitted model actually exists as a point process. This checking is done by \code{valid.ppm}. Unlike a regression model, which is well-defined for any values of the fitted regression coefficients, a Gibbs point process model is only well-defined if the fitted interaction parameters satisfy some constraints. A famous example is the Strauss process (see \code{\link{Strauss}}) which exists only when the interaction parameter \eqn{\gamma}{gamma} is less than or equal to 1. For values \eqn{\gamma > 1}{gamma > 1}, the probability density is not integrable and the process does not exist (and cannot be simulated). By default, \code{\link{ppm}} does not enforce the constraint that a fitted Strauss process (for example) must satisfy \eqn{\gamma \le 1}{gamma <= 1}. This is because a fitted parameter value of \eqn{\gamma > 1}{gamma > 1} could be useful information for data analysis, as it indicates that the Strauss model is not appropriate, and suggests a clustered model should be fitted. The function \code{valid.ppm} checks whether the fitted model \code{object} specifies a well-defined point process. It returns \code{TRUE} if the model is well-defined. Another possible reason for invalid models is that the data may not be adequate for estimation of the model parameters. In this case, some of the fitted coefficients could be \code{NA} or infinite values. If this happens then \code{valid.ppm} returns \code{FALSE}. Use the function \code{\link{project.ppm}} to force the fitted model to be valid. } \value{ A logical value, or \code{NA}. } \author{\adrian and \rolf } \seealso{ \code{\link{ppm}}, \code{\link{project.ppm}} } \examples{ fit1 <- ppm(cells, ~1, Strauss(0.1)) valid(fit1) fit2 <- ppm(redwood, ~1, Strauss(0.1)) valid(fit2) } \keyword{spatial} \keyword{models} spatstat/man/Lest.Rd0000644000176200001440000000551113551505532014074 0ustar liggesusers\name{Lest} \alias{Lest} \title{L-function} \description{ Calculates an estimate of the \eqn{L}-function (Besag's transformation of Ripley's \eqn{K}-function) for a spatial point pattern. } \usage{ Lest(X, ..., correction) } \arguments{ \item{X}{ The observed point pattern, from which an estimate of \eqn{L(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{correction,\dots}{ Other arguments passed to \code{\link{Kest}} to control the estimation procedure. } } \details{ This command computes an estimate of the \eqn{L}-function for the spatial point pattern \code{X}. The \eqn{L}-function is a transformation of Ripley's \eqn{K}-function, \deqn{L(r) = \sqrt{\frac{K(r)}{\pi}}}{L(r) = sqrt(K(r)/pi)} where \eqn{K(r)} is the \eqn{K}-function. See \code{\link{Kest}} for information about Ripley's \eqn{K}-function. The transformation to \eqn{L} was proposed by Besag (1977). The command \code{Lest} first calls \code{\link{Kest}} to compute the estimate of the \eqn{K}-function, and then applies the square root transformation. For a completely random (uniform Poisson) point pattern, the theoretical value of the \eqn{L}-function is \eqn{L(r) = r}. The square root also has the effect of stabilising the variance of the estimator, so that \eqn{L(r)} is more appropriate for use in simulation envelopes and hypothesis tests. See \code{\link{Kest}} for the list of arguments. } \section{Variance approximations}{ If the argument \code{var.approx=TRUE} is given, the return value includes columns \code{rip} and \code{ls} containing approximations to the variance of \eqn{\hat L(r)}{Lest(r)} under CSR. These are obtained by the delta method from the variance approximations described in \code{\link{Kest}}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{L} has been estimated } \item{theo}{the theoretical value \eqn{L(r) = r} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L(r)} obtained by the edge corrections named. } \references{ Besag, J. (1977) Discussion of Dr Ripley's paper. \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 193--195. } \seealso{ \code{\link{Kest}}, \code{\link{pcf}} } \examples{ data(cells) L <- Lest(cells) plot(L, main="L function for cells") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/unitname.Rd0000644000176200001440000000671213333543264015013 0ustar liggesusers\name{unitname} \alias{unitname} \alias{unitname.dppm} \alias{unitname.im} \alias{unitname.kppm} \alias{unitname.minconfit} \alias{unitname.owin} \alias{unitname.ppp} \alias{unitname.ppm} \alias{unitname.psp} \alias{unitname.quad} \alias{unitname.slrm} \alias{unitname.tess} \alias{unitname<-} \alias{unitname<-.dppm} \alias{unitname<-.im} \alias{unitname<-.kppm} \alias{unitname<-.minconfit} \alias{unitname<-.owin} \alias{unitname<-.ppp} \alias{unitname<-.ppm} \alias{unitname<-.psp} \alias{unitname<-.quad} \alias{unitname<-.slrm} \alias{unitname<-.tess} \title{Name for Unit of Length} \description{ Inspect or change the name of the unit of length in a spatial dataset. } \usage{ unitname(x) \method{unitname}{dppm}(x) \method{unitname}{im}(x) \method{unitname}{kppm}(x) \method{unitname}{minconfit}(x) \method{unitname}{owin}(x) \method{unitname}{ppm}(x) \method{unitname}{ppp}(x) \method{unitname}{psp}(x) \method{unitname}{quad}(x) \method{unitname}{slrm}(x) \method{unitname}{tess}(x) unitname(x) <- value \method{unitname}{dppm}(x) <- value \method{unitname}{im}(x) <- value \method{unitname}{kppm}(x) <- value \method{unitname}{minconfit}(x) <- value \method{unitname}{owin}(x) <- value \method{unitname}{ppm}(x) <- value \method{unitname}{ppp}(x) <- value \method{unitname}{psp}(x) <- value \method{unitname}{quad}(x) <- value \method{unitname}{slrm}(x) <- value \method{unitname}{tess}(x) <- value } \arguments{ \item{x}{A spatial dataset. Either a point pattern (object of class \code{"ppp"}), a line segment pattern (object of class \code{"psp"}), a window (object of class \code{"owin"}), a pixel image (object of class \code{"im"}), a tessellation (object of class \code{"tess"}), a quadrature scheme (object of class \code{"quad"}), or a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"slrm"} or \code{"dppm"} or \code{"minconfit"}). } \item{value}{ Name of the unit of length. See Details. } } \details{ Spatial datasets in the \pkg{spatstat} package may include the name of the unit of length. This name is used when printing or plotting the dataset, and in some other applications. \code{unitname(x)} extracts this name, and \code{unitname(x) <- value} sets the name to \code{value}. A valid name is either \itemize{ \item a single character string \item a vector of two character strings giving the singular and plural forms of the unit name \item a list of length 3, containing two character strings giving the singular and plural forms of the basic unit, and a number specifying the multiple of this unit. } Note that re-setting the name of the unit of length \emph{does not} affect the numerical values in \code{x}. It changes only the string containing the name of the unit of length. To rescale the numerical values, use \code{\link{rescale}}. } \value{ The return value of \code{unitname} is an object of class \code{"unitname"} containing the name of the unit of length in \code{x}. There are methods for \code{print}, \code{summary}, \code{as.character}, \code{\link{rescale}} and \code{\link{compatible}}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{rescale}}, \code{\link{owin}}, \code{\link{ppp}} } \examples{ X <- runifpoint(20) # if the unit of length is 1 metre: unitname(X) <- c("metre", "metres") # if the unit of length is 6 inches: unitname(X) <- list("inch", "inches", 6) } \keyword{spatial} \keyword{manip} spatstat/man/superimpose.Rd0000644000176200001440000001667513333543264015557 0ustar liggesusers\name{superimpose} \alias{superimpose} \alias{superimpose.ppp} \alias{superimpose.splitppp} \alias{superimpose.ppplist} \alias{superimpose.psp} \alias{superimpose.default} \title{Superimpose Several Geometric Patterns} \description{ Superimpose any number of point patterns or line segment patterns. } \usage{ superimpose(\dots) \method{superimpose}{ppp}(\dots, W=NULL, check=TRUE) \method{superimpose}{psp}(\dots, W=NULL, check=TRUE) \method{superimpose}{splitppp}(\dots, W=NULL, check=TRUE) \method{superimpose}{ppplist}(\dots, W=NULL, check=TRUE) \method{superimpose}{default}(\dots) } \arguments{ \item{\dots}{ Any number of arguments, each of which represents either a point pattern or a line segment pattern or a list of point patterns. } \item{W}{ Optional. Data determining the window for the resulting pattern. Either a window (object of class \code{"owin"}, or something acceptable to \code{\link{as.owin}}), or a function which returns a window, or one of the strings \code{"convex"}, \code{"rectangle"}, \code{"bbox"} or \code{"none"}. } \item{check}{ Logical value (passed to \code{\link{ppp}} or \code{\link{psp}} as appropriate) determining whether to check the geometrical validity of the resulting pattern. } } \value{ For \code{superimpose.ppp}, a point pattern (object of class \code{"ppp"}). For \code{superimpose.default}, either a point pattern (object of class \code{"ppp"}) or a \code{list(x,y)}. For \code{superimpose.psp}, a line segment pattern (object of class \code{"psp"}). } \details{ This function is used to superimpose several geometric patterns of the same kind, producing a single pattern of the same kind. The function \code{superimpose} is generic, with methods for the class \code{ppp} of point patterns, the class \code{psp} of line segment patterns, and a default method. There is also a method for \code{lpp}, described separately in \code{\link{superimpose.lpp}}. The dispatch to a method is initially determined by the class of the \emph{first} argument in \code{\dots}. \itemize{ \item \code{default}: If the first argument is \emph{not} an object of class \code{ppp} or \code{psp}, then the default method \code{superimpose.default} is executed. This checks the class of all arguments, and dispatches to the appropriate method. Arguments of class \code{ppplist} can be handled. \item \code{ppp}: If the first \code{\dots} argument is an object of class \code{ppp} then the method \code{superimpose.ppp} is executed. All arguments in \code{\dots} must be either \code{ppp} objects or lists with components \code{x} and \code{y}. The result will be an object of class \code{ppp}. \item psp: If the first \code{\dots} argument is an object of class \code{psp} then the \code{psp} method is dispatched and all \code{\dots} arguments must be \code{psp} objects. The result is a \code{psp} object. } The patterns are \emph{not} required to have the same window of observation. The window for the superimposed pattern is controlled by the argument \code{W}. \itemize{ \item If \code{W} is a window (object of class \code{"W"} or something acceptable to \code{\link{as.owin}}) then this determines the window for the superimposed pattern. \item If \code{W} is \code{NULL}, or the character string \code{"none"}, then windows are extracted from the geometric patterns, as follows. For \code{superimpose.psp}, all arguments \code{\dots} are line segment patterns (objects of class \code{"psp"}); their observation windows are extracted; the union of these windows is computed; and this union is taken to be the window for the superimposed pattern. For \code{superimpose.ppp} and \code{superimpose.default}, the arguments \code{\dots} are inspected, and any arguments which are point patterns (objects of class \code{"ppp"}) are selected; their observation windows are extracted, and the union of these windows is taken to be the window for the superimposed point pattern. For \code{superimpose.default} if none of the arguments is of class \code{"ppp"} then no window is computed and the result of \code{superimpose} is a \code{list(x,y)}. \item If \code{W} is one of the strings \code{"convex"}, \code{"rectangle"} or \code{"bbox"} then a window for the superimposed pattern is computed from the coordinates of the points or the line segments as follows. \describe{ \item{\code{"bbox"}:}{the bounding box of the points or line segments (see \code{\link{bounding.box.xy}});} \item{\code{"convex"}:}{the Ripley-Rasson estimator of a convex window (see \code{\link{ripras}});} \item{\code{"rectangle"}:}{the Ripley-Rasson estimator of a rectangular window (using \code{\link{ripras}} with argument \code{shape="rectangle"}).} } \item If \code{W} is a function, then this function is used to compute a window for the superimposed pattern from the coordinates of the points or the line segments. The function should accept input of the form \code{list(x,y)} and is expected to return an object of class \code{"owin"}. Examples of such functions are \code{\link{ripras}} and \code{\link{bounding.box.xy}}. } The arguments \code{\dots} may be \emph{marked} patterns. The marks of each component pattern must have the same format. Numeric and character marks may be ``mixed''. If there is such mixing then the numeric marks are coerced to character in the combining process. If the mark structures are all data frames, then these data frames must have the same number of columns and identical column names. If the arguments \code{\dots} are given in the form \code{name=value}, then the \code{name}s will be used as an extra column of marks attached to the elements of the corresponding patterns. } \seealso{ \code{\link{superimpose.lpp}}, \code{\link{concatxy}}, \code{\link{quadscheme}}. } \examples{ # superimposing point patterns p1 <- runifrect(30) p2 <- runifrect(42) s1 <- superimpose(p1,p2) # Unmarked pattern. p3 <- list(x=rnorm(20),y=rnorm(20)) s2 <- superimpose(p3,p2,p1) # Default method gets called. s2a <- superimpose(p1,p2,p3) # Same as s2 except for order of points. s3 <- superimpose(clyde=p1,irving=p2) # Marked pattern; marks a factor # with levels "clyde" and "irving"; # warning given. marks(p1) <- factor(sample(LETTERS[1:3],30,TRUE)) marks(p2) <- factor(sample(LETTERS[1:3],42,TRUE)) s5 <- superimpose(clyde=p1,irving=p2) # Marked pattern with extra column marks(p2) <- data.frame(a=marks(p2),b=runif(42)) s6 <- try(superimpose(p1,p2)) # Gives an error. marks(p1) <- data.frame(a=marks(p1),b=1:30) s7 <- superimpose(p1,p2) # O.K. # how to make a 2-type point pattern with types "a" and "b" u <- superimpose(a = rpoispp(10), b = rpoispp(20)) # how to make a 2-type point pattern with types 1 and 2 u <- superimpose("1" = rpoispp(10), "2" = rpoispp(20)) # superimposing line segment patterns X <- rpoisline(10) Y <- as.psp(matrix(runif(40), 10, 4), window=owin()) Z <- superimpose(X, Y) # being unreasonable \dontrun{ if(FALSE) { crud <- try(superimpose(p1,p2,X,Y)) # Gives an error, of course! } } } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/is.lpp.Rd0000644000176200001440000000107113333543263014370 0ustar liggesusers\name{is.lpp} \alias{is.lpp} \title{Test Whether An Object Is A Point Pattern on a Linear Network} \description{ Checks whether its argument is a point pattern on a linear network (object of class \code{"lpp"}). } \usage{ is.lpp(x) } \arguments{ \item{x}{Any object.} } \details{ This function tests whether the object \code{x} is a point pattern object of class \code{"lpp"}. } \value{ \code{TRUE} if \code{x} is a point pattern of class \code{"lpp"}, otherwise \code{FALSE}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/rotmean.Rd0000644000176200001440000000507013333543264014634 0ustar liggesusers\name{rotmean} \alias{rotmean} \title{ Rotational Average of a Pixel Image } \description{ Compute the average pixel value over all rotations of the image about the origin, as a function of distance from the origin. } \usage{ rotmean(X, ..., origin, padzero=TRUE, Xname, result=c("fv", "im")) } \arguments{ \item{X}{ A pixel image. } \item{\dots}{ Ignored. } \item{origin}{ Optional. Origin about which the rotations should be performed. Either a numeric vector or a character string as described in the help for \code{\link{shift.owin}}. } \item{padzero}{ Logical. If \code{TRUE} (the default), the value of \code{X} is assumed to be zero outside the window of \code{X}. If \code{FALSE}, the value of \code{X} is taken to be undefined outside the window of \code{X}. } \item{Xname}{ Optional name for \code{X} to be used in the function labels. } \item{result}{ Character string specifying the kind of result required: either a function object or a pixel image. } } \details{ This command computes, for each possible distance \eqn{r}, the average pixel value of the pixels lying at distance \eqn{r} from the origin. Kernel smoothing is used to obtain a smooth function of \eqn{r}. If \code{result="fv"} (the default) the result is a function object of class \code{"fv"} giving the mean pixel value of \code{X} as a function of distance from the origin. If \code{result="im"} the result is a pixel image, with the same dimensions as \code{X}, giving the mean value of \code{X} over all pixels lying at the same distance from the origin as the current pixel. If \code{padzero=TRUE} (the default), the value of \code{X} is assumed to be zero outside the window of \code{X}. The rotational mean at a given distance \eqn{r} is the average value of the image \code{X} over the \emph{entire} circle of radius \eqn{r}, including zero values outside the window if the circle lies partly outside the window. If \code{padzero=FALSE}, the value of \code{X} is taken to be undefined outside the window of \code{X}. The rotational mean is the average of the \code{X} values over the \emph{subset} of the circle of radius \eqn{r} that lies entirely inside the window. } \value{ An object of class \code{"fv"} or \code{"im"}. } \author{\adrian \rolf and \ege } \examples{ if(interactive()) { Z <- setcov(square(1)) plot(rotmean(Z)) plot(rotmean(Z, result="im")) } else { Z <- setcov(square(1), dimyx=32) f <- rotmean(Z) } } \keyword{spatial} \keyword{math} spatstat/man/methods.distfun.Rd0000644000176200001440000000412113333543263016300 0ustar liggesusers\name{methods.distfun} \alias{methods.distfun} %DoNotExport \alias{shift.distfun} \alias{rotate.distfun} \alias{scalardilate.distfun} \alias{affine.distfun} \alias{flipxy.distfun} \alias{reflect.distfun} \alias{rescale.distfun} \Rdversion{1.1} \title{ Geometrical Operations for Distance Functions } \description{ Methods for objects of the class \code{"distfun"}. } \usage{ \method{shift}{distfun}(X, \dots) \method{rotate}{distfun}(X, \dots) \method{scalardilate}{distfun}(X, \dots) \method{affine}{distfun}(X, \dots) \method{flipxy}{distfun}(X) \method{reflect}{distfun}(X) \method{rescale}{distfun}(X, s, unitname) } \arguments{ \item{X}{ Object of class \code{"distfun"} representing the distance function of a spatial object. } \item{\dots}{ Arguments passed to the next method for the geometrical operation. See Details. } \item{s, unitname}{ Arguments passed to the next method for \code{\link{rescale}}. } } \details{ These are methods for the generic functions \code{\link{shift}}, \code{\link{rotate}}, \code{\link{scalardilate}}, \code{\link{affine}}, \code{\link{flipxy}} and \code{\link{reflect}} which perform geometrical operations on spatial objects, and for the generic \code{\link{rescale}} which changes the unit of length. The argument \code{X} should be an object of class \code{"distfun"} representing the distance function of a spatial object \code{Y}. Objects of class \code{"distfun"} are created by \code{\link{distfun}}. The methods apply the specified geometrical transformation to the original object \code{Y}, producing a new object \code{Z} of the same type as \code{Y}. They then create a new \code{distfun} object representing the distance function of \code{Z}. } \value{ Another object of class \code{"distfun"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{distfun}}, \code{\link{methods.funxy}}. } \examples{ (f <- distfun(letterR)) plot(f) flipxy(f) shift(f, origin="midpoint") plot(rotate(f, angle=pi/2)) (g <- distfun(lansing)) rescale(g) } \keyword{spatial} \keyword{methods} spatstat/man/fvnames.Rd0000644000176200001440000000517113333543263014627 0ustar liggesusers\name{fvnames} \alias{fvnames} \alias{fvnames<-} \title{ Abbreviations for Groups of Columns in Function Value Table } \description{ Groups of columns in a function value table (object of class \code{"fv"}) identified by standard abbreviations. } \usage{ fvnames(X, a = ".") fvnames(X, a = ".") <- value } \arguments{ \item{X}{ Function value table (object of class \code{"fv"}). See \code{\link{fv.object}}. } \item{a}{ One of the standard abbreviations listed below. } \item{value}{ Character vector containing names of columns of \code{X}. } } \details{ An object of class \code{"fv"} represents a table of values of a function, usually a summary function for spatial data such as the \eqn{K}-function, for which several different statistical estimators may be available. The different estimates are stored as columns of the table. Auxiliary information carried in the object \code{X} specifies some columns or groups of columns of this table that should be used for particular purposes. For convenience these groups can be referred to by standard abbreviations which are recognised by various functions in the \pkg{spatstat} package, such as \code{\link{plot.fv}}. These abbreviations are: \tabular{ll}{ \code{".x"} \tab the function argument \cr \code{".y"} \tab the recommended value of the function \cr \code{"."} \tab all function values to be plotted by default \cr \tab (in order of plotting) \cr \code{".s"} \tab the upper and lower limits of shading \cr \tab (for envelopes and confidence intervals)\cr \code{".a"} \tab all function values (in column order) } The command \code{fvnames(X, a)} expands the abbreviation \code{a} and returns a character vector containing the names of the columns. The assignment \code{fvnames(X, a) <- value} changes the definition of the abbreviation \code{a} to the character string \code{value} (which should be the name of another column of \code{X}). The column names of \code{X} are not changed. Note that \code{fvnames(x, ".")} lists the columns of values that will be plotted by default, in the order that they would be plotted, not in order of the column position. The order in which curves are plotted affects the colours and line styles associated with the curves. } \value{ For \code{fvnames}, a character vector. For \code{fvnames<-}, the updated object. } \author{\adrian and \rolf } \seealso{ \code{\link{fv.object}}, \code{\link{plot.fv}} } \examples{ K <- Kest(cells) fvnames(K, ".y") fvnames(K, ".y") <- "trans" } \keyword{spatial} \keyword{manip} spatstat/man/densityVoronoi.lpp.Rd0000644000176200001440000000662213427753643017030 0ustar liggesusers\name{densityVoronoi.lpp} \alias{densityVoronoi.lpp} \title{Intensity Estimate of Point Pattern on Linear Network Using Voronoi-Dirichlet Tessellation} \description{ Computes an adaptive estimate of the intensity function of a point pattern on a linear network, using the Dirichlet-Voronoi tessellation on the network. } \usage{ \method{densityVoronoi}{lpp}(X, f = 1, \dots, nrep = 1, verbose = TRUE) } \arguments{ \item{X}{ Point pattern on a linear network (object of class \code{"lpp"}). } \item{f}{ Fraction (between 0 and 1 inclusive) of the data points that will be used to build a tessellation for the intensity estimate. } \item{\dots}{ Arguments passed to \code{\link{linim}} determining the pixel resolution of the result. } \item{nrep}{ Number of independent repetitions of the randomised procedure. } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ This function is an alternative to \code{\link{density.lpp}}. It computes an estimate of the intensity function of a point pattern dataset on a linear network. The result is a pixel image on the network, giving the estimated intensity. This function is a method for the generic \code{\link{densityVoronoi}} for the class \code{"lpp"} of point patterns on a linear network. If \code{f=1} (the default), the Voronoi estimate (Barr and Schoenberg, 2010) is computed: the point pattern \code{X} is used to construct a Voronoi/Dirichlet tessellation on the network (see \code{\link{lineardirichlet}}); the lengths of the Dirichlet tiles are computed; the estimated intensity in each tile is the reciprocal of the tile length. The result is a pixel image of intensity estimates which are constant on each tile of the tessellation. If \code{f=0}, the intensity estimate at every location is equal to the average intensity (number of points divided by network length). The result is a pixel image of intensity estimates which are constant. If \code{f} is strictly between 0 and 1, the smoothed Voronoi estimate (Moradi et al, 2019) is computed. The dataset \code{X} is randomly thinned by deleting or retaining each point independently, with probability \code{f} of retaining a point. The thinned pattern is used to construct a Dirichlet tessellation and form the Voronoi estimate, which is then adjusted by a factor \code{1/f}. This procedure is repeated \code{nrep} times and the results are averaged to obtain the smoothed Voronoi estimate. The value \code{f} can be chosen automatically by bandwidth selection using \code{\link{bw.voronoi}}. } \value{ Pixel image on a linear network (object of class \code{"linim"}). } \references{ Moradi, M., Cronie, 0., Rubak, E., Lachieze-Rey, R., Mateu, J. and Baddeley, A. (2019) Resample-smoothing of Voronoi intensity estimators. \emph{Statistics and Computing}, in press. } \author{ \spatstatAuthors and Mehdi Moradi. } \seealso{ \code{\link{densityVoronoi}} is the generic, with a method for class \code{"ppp"}. \code{\link{lineardirichlet}} computes the Dirichlet-Voronoi tessellation on a network. \code{\link{bw.voronoi}} performs bandwidth selection of the fraction \code{f}. See also \code{\link{density.lpp}}. } \examples{ nr <- if(interactive()) 100 else 3 plot(densityVoronoi(spiders, 0.1, nrep=nr)) } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/connected.Rd0000644000176200001440000001021713333543263015127 0ustar liggesusers\name{connected} \Rdversion{1.1} \alias{connected} \alias{connected.im} \alias{connected.owin} \title{ Connected components } \description{ Finds the topologically-connected components of a spatial object, such as the connected clumps of pixels in a binary image. } \usage{ connected(X, \dots) \method{connected}{owin}(X, \dots, method="C") \method{connected}{im}(X, \dots, background = NA, method="C") } \arguments{ \item{X}{ A spatial object such as a pixel image (object of class \code{"im"}) or a window (object of class \code{"owin"}). } \item{background}{ Optional. Treat pixels with this value as being part of the background. } \item{method}{ String indicating the algorithm to be used. Either \code{"C"} or \code{"interpreted"}. See Details. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution. } } \details{ The function \code{connected} is generic, with methods for pixel images (class \code{"im"}) and windows (class \code{"owin"}) described here. There are also methods for tessellations (\code{\link{connected.tess}}), point patterns (\code{\link{connected.ppp}} and \code{\link{connected.lpp}}), and linear networks (\code{\link{connected.linnet}}). The functions described here compute the connected component transform (Rosenfeld and Pfalz, 1966) of a binary image or binary mask. The argument \code{X} is first converted into a pixel image with logical values. Then the algorithm identifies the connected components (topologically-connected clumps of pixels) in the foreground. Two pixels belong to the same connected component if they have the value \code{TRUE} and if they are neighbours (in the 8-connected sense). This rule is applied repeatedly until it terminates. Then each connected component contains all the pixels that can be reached by stepping from neighbour to neighbour. If \code{method="C"}, the computation is performed by a compiled C language implementation of the classical algorithm of Rosenfeld and Pfalz (1966). If \code{method="interpreted"}, the computation is performed by an \R implementation of the algorithm of Park et al (2000). The result is a factor-valued image, with levels that correspond to the connected components. The Examples show how to extract each connected component as a separate window object. } \value{ A pixel image (object of class \code{"im"}) with factor values. The levels of the factor correspond to the connected components. } \references{ Park, J.-M., Looney, C.G. and Chen, H.-C. (2000) Fast connected component labeling algorithm using a divide and conquer technique. Pages 373-376 in S.Y. Shin (ed) \emph{Computers and Their Applications:} Proceedings of the ISCA 15th International Conference on Computers and Their Applications, March 29-31, 2000, New Orleans, Louisiana USA. ISCA 2000, ISBN 1-880843-32-3. Rosenfeld, A. and Pfalz, J.L. (1966) Sequential operations in digital processing. \emph{Journal of the Association for Computing Machinery} \bold{13} 471-494. } \seealso{ \code{\link{connected.ppp}}, \code{\link{connected.tess}}, \code{\link{connected.lpp}}, \code{\link{connected.linnet}}, \code{\link{im.object}}, \code{\link{tess}} } \section{Warnings}{ It may be hard to distinguish different components in the default plot because the colours of nearby components may be very similar. See the Examples for a randomised colour map. The algorithm for \code{method="interpreted"} can be very slow for large images (or images where the connected components include a large number of pixels). } \examples{ d <- distmap(cells, dimyx=256) X <- levelset(d, 0.07) plot(X) Z <- connected(X) plot(Z) # or equivalently Z <- connected(d <= 0.07) # number of components nc <- length(levels(Z)) # plot with randomised colour map plot(Z, col=hsv(h=sample(seq(0,1,length=nc), nc))) # how to extract the components as a list of windows W <- tiles(tess(image=Z)) } \author{ Original \R code by Julian Burgos, University of Washington. Adapted for \pkg{spatstat} by \adrian and \rolf. } \keyword{spatial} \keyword{math} spatstat/man/pairdist.pp3.Rd0000644000176200001440000000365113333543263015511 0ustar liggesusers\name{pairdist.pp3} \alias{pairdist.pp3} \title{Pairwise distances in Three Dimensions} \description{ Computes the matrix of distances between all pairs of points in a three-dimensional point pattern. } \usage{ \method{pairdist}{pp3}(X, \dots, periodic=FALSE, squared=FALSE) } \arguments{ \item{X}{ A point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{periodic}{ Logical. Specifies whether to apply a periodic edge correction. } \item{squared}{ Logical. If \code{squared=TRUE}, the squared distances are returned instead (this computation is faster). } } \value{ A square matrix whose \code{[i,j]} entry is the distance between the points numbered \code{i} and \code{j}. } \details{ This is a method for the generic function \code{pairdist}. Given a three-dimensional point pattern \code{X} (an object of class \code{"pp3"}), this function computes the Euclidean distances between all pairs of points in \code{X}, and returns the matrix of distances. Alternatively if \code{periodic=TRUE} and the window containing \code{X} is a box, then the distances will be computed in the `periodic' sense (also known as `torus' distance): opposite faces of the box are regarded as equivalent. This is meaningless if the window is not a box. If \code{squared=TRUE} then the \emph{squared} Euclidean distances \eqn{d^2} are returned, instead of the Euclidean distances \eqn{d}. The squared distances are faster to calculate, and are sufficient for many purposes (such as finding the nearest neighbour of a point). } \seealso{ \code{\link{pairdist}}, \code{\link{crossdist}}, \code{\link{nndist}}, \code{\link{K3est}} } \examples{ X <- runifpoint3(20) d <- pairdist(X) d <- pairdist(X, periodic=TRUE) d <- pairdist(X, squared=TRUE) } \author{ \adrian based on two-dimensional code by Pavel Grabarnik. } \keyword{spatial} \keyword{math} spatstat/man/methods.lpp.Rd0000644000176200001440000000500113333543263015415 0ustar liggesusers\name{methods.lpp} \alias{methods.lpp} %DoNotExport \Rdversion{1.1} \alias{as.ppp.lpp} \alias{as.psp.lpp} \alias{marks<-.lpp} \alias{nsegments.lpp} \alias{print.lpp} \alias{print.summary.lpp} \alias{summary.lpp} \alias{unitname.lpp} \alias{unitname<-.lpp} \alias{unmark.lpp} \title{ Methods for Point Patterns on a Linear Network } \description{ These are methods specifically for the class \code{"lpp"} of point patterns on linear networks. } \usage{ \method{as.ppp}{lpp}(X, ..., fatal=TRUE) \method{as.psp}{lpp}(x, ..., fatal=TRUE) \method{marks}{lpp}(x, ...) <- value \method{nsegments}{lpp}(x) \method{print}{lpp}(x, ...) \method{print}{summary.lpp}(x, ...) \method{summary}{lpp}(object, ...) \method{unitname}{lpp}(x) \method{unitname}{lpp}(x) <- value \method{unmark}{lpp}(X) } \arguments{ \item{x,X,object}{ An object of class \code{"lpp"} representing a point pattern on a linear network. } \item{\dots}{ Arguments passed to other methods. } \item{value}{ Replacement value for the \code{marks} or \code{unitname} of \code{x}. See Details. } \item{fatal}{ Logical value indicating whether data in the wrong format should lead to an error (\code{fatal=TRUE}) or a warning (\code{fatal=FALSE}). } } \details{ These are methods for the generic functions \code{\link{as.ppp}}, \code{\link{as.psp}}, \code{\link{marks<-}}, \code{\link{nsegments}}, \code{\link{print}}, \code{\link{summary}}, \code{\link{unitname}}, \code{\link{unitname<-}} and \code{\link{unmark}} for objects of the class \code{"lpp"}. For \code{"marks<-.lpp"} the replacement \code{value} should be either \code{NULL}, or a vector of length equal to the number of points in \code{x}, or a data frame with one row for each point in \code{x}. For \code{"unitname<-.lpp"} the replacement \code{value} should be a valid name for the unit of length, as described in \code{\link{unitname}}. } \section{Other methods}{ An object of class \code{"lpp"} also inherits the class \code{"ppx"} for which many other methods are available. See \code{\link[spatstat:methods.ppx]{methods.ppx}}. } \value{ See the documentation on the corresponding generic function. } \author{ \adrian } \seealso{ \code{\link{lpp}}, \code{\link{intensity.lpp}}, \code{\link[spatstat:methods.ppx]{methods.ppx}} } \examples{ X <- runiflpp(10, simplenet) unitname(X) <- c("furlong", "furlongs") X summary(X) summary(chicago) nsegments(X) Y <- as.ppp(X) } \keyword{spatial} \keyword{methods} spatstat/man/scalardilate.Rd0000644000176200001440000000503513433753433015622 0ustar liggesusers\name{scalardilate} \alias{scalardilate} \alias{scalardilate.im} \alias{scalardilate.owin} \alias{scalardilate.ppp} \alias{scalardilate.psp} \alias{scalardilate.default} \title{Apply Scalar Dilation} \description{ Applies scalar dilation to a plane geometrical object, such as a point pattern or a window, relative to a specified origin. } \usage{ scalardilate(X, f, \dots) \method{scalardilate}{im}(X, f, \dots, origin=NULL) \method{scalardilate}{owin}(X, f, \dots, origin=NULL) \method{scalardilate}{ppp}(X, f, \dots, origin=NULL) \method{scalardilate}{psp}(X, f, \dots, origin=NULL) \method{scalardilate}{default}(X, f, \dots) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), a window (object of class \code{"owin"}), a pixel image (class \code{"im"}) and so on. } \item{f}{ Scalar dilation factor. A finite number greater than zero. } \item{\dots}{Ignored by the methods.} \item{origin}{ Origin for the scalar dilation. Either a vector of 2 numbers, or one of the character strings \code{"centroid"}, \code{"midpoint"}, \code{"left"}, \code{"right"}, \code{"top"}, \code{"bottom"}, \code{"topleft"}, \code{"bottomleft"}, \code{"topright"} or \code{"bottomright"} (partially matched). } } \value{ Another object of the same type, representing the result of applying the scalar dilation. } \details{ This command performs scalar dilation of the object \code{X} by the factor \code{f} relative to the origin specified by \code{origin}. The function \code{scalardilate} is generic, with methods for windows (class \code{"owin"}), point patterns (class \code{"ppp"}), pixel images (class \code{"im"}), line segment patterns (class \code{"psp"}) and a default method. If the argument \code{origin} is not given, then every spatial coordinate is multiplied by the factor \code{f}. If \code{origin} is given, then scalar dilation is performed relative to the specified origin. Effectively, \code{X} is shifted so that \code{origin} is moved to \code{c(0,0)}, then scalar dilation is performed, then the result is shifted so that \code{c(0,0)} is moved to \code{origin}. This command is a special case of an affine transformation: see \code{\link{affine}}. } \seealso{ \code{\link{affine}}, \code{\link{shift}} } \examples{ plot(letterR) plot(scalardilate(letterR, 0.7, origin="left"), col="red", add=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/as.data.frame.ppp.Rd0000644000176200001440000000201513333543262016363 0ustar liggesusers\name{as.data.frame.ppp} \alias{as.data.frame.ppp} \title{Coerce Point Pattern to a Data Frame} \description{ Extracts the coordinates of the points in a point pattern, and their marks if any, and returns them in a data frame. } \usage{ \method{as.data.frame}{ppp}(x, row.names = NULL, ...) } \arguments{ \item{x}{Point pattern (object of class \code{"ppp"}).} \item{row.names}{Optional character vector of row names.} \item{\dots}{Ignored.} } \details{ This is a method for the generic function \code{\link{as.data.frame}} for the class \code{"ppp"} of point patterns. It extracts the coordinates of the points in the point pattern, and returns them as columns named \code{x} and \code{y} in a data frame. If the points were marked, the marks are returned as a column named \code{marks} with the same type as in the point pattern dataset. } \value{ A data frame. } \examples{ data(amacrine) df <- as.data.frame(amacrine) df[1:5,] } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/compatible.fasp.Rd0000644000176200001440000000212313333543263016231 0ustar liggesusers\name{compatible.fasp} \alias{compatible.fasp} \title{Test Whether Function Arrays Are Compatible} \description{ Tests whether two or more function arrays (class \code{"fasp"}) are compatible. } \usage{ \method{compatible}{fasp}(A, B, \dots) } \arguments{ \item{A,B,\dots}{Two or more function arrays (object of class \code{"fasp"}).} } \details{ An object of class \code{"fasp"} can be regarded as an array of functions. Such objects are returned by the command \code{\link{alltypes}}. This command tests whether such objects are compatible (so that, for example, they could be added or subtracted). It is a method for the generic command \code{\link{compatible}}. The function arrays are compatible if the arrays have the same dimensions, and the corresponding elements in each cell of the array are compatible as defined by \code{\link{compatible.fv}}. } \value{ Logical value: \code{TRUE} if the objects are compatible, and \code{FALSE} if they are not. } \seealso{ \code{\link{eval.fasp}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/harmonise.fv.Rd0000644000176200001440000000575413333543263015576 0ustar liggesusers\name{harmonise.fv} \alias{harmonise.fv} \alias{harmonize.fv} \title{Make Function Tables Compatible} \description{ Convert several objects of class \code{"fv"} to the same values of the function argument. } \usage{ \method{harmonise}{fv}(\dots, strict=FALSE) \method{harmonize}{fv}(\dots, strict=FALSE) } \arguments{ \item{\dots}{ Any number of function tables (objects of class \code{"fv"}). } \item{strict}{ Logical. If \code{TRUE}, a column of data will be deleted if columns of the same name do not appear in every object. } } \details{ A function value table (object of class \code{"fv"}) is essentially a data frame giving the values of a function \eqn{f(x)} (or several alternative estimates of this value) at equally-spaced values of the function argument \eqn{x}. The command \code{\link{harmonise}} is generic. This is the method for objects of class \code{"fv"}. This command makes any number of \code{"fv"} objects compatible, in the loose sense that they have the same sequence of values of \eqn{x}. They can then be combined by \code{\link{cbind.fv}}, but not necessarily by \code{\link{eval.fv}}. All arguments \code{\dots} must be function value tables (objects of class \code{"fv"}). The result will be a list, of length equal to the number of arguments \code{\dots}, containing new versions of each of these functions, converted to a common sequence of \eqn{x} values. If the arguments were named (\code{name=value}) then the return value also carries these names. The range of \eqn{x} values in the resulting functions will be the intersection of the ranges of \eqn{x} values in the original functions. The spacing of \eqn{x} values in the resulting functions will be the finest (narrowest) of the spacings of the \eqn{x} values in the original functions. Function values are interpolated using \code{\link[stats]{approxfun}}. If \code{strict=TRUE}, each column of data will be retained only if a column of the same name appears in all of the arguments \code{\dots}. This ensures that the resulting objects are strictly compatible in the sense of \code{\link{compatible.fv}}, and can be combined using \code{\link{eval.fv}} or \code{\link{collapse.fv}}. If \code{strict=FALSE} (the default), this does not occur, and then the resulting objects are \bold{not} guaranteed to be compatible in the sense of \code{\link{compatible.fv}}. } \value{ A list, of length equal to the number of arguments \code{\dots}, whose entries are objects of class \code{"fv"}. If the arguments were named (\code{name=value}) then the return value also carries these names. } \author{\adrian , \rolf and \ege. } \examples{ H <- harmonise(K=Kest(cells), G=Gest(cells)) H \dontrun{ ## generates a warning about duplicated columns try(cbind(H$K, H$G)) } } \seealso{ \code{\link{fv.object}}, \code{\link{cbind.fv}}, \code{\link{eval.fv}}, \code{\link{compatible.fv}} } \keyword{spatial} \keyword{manip} spatstat/man/gauss.hermite.Rd0000644000176200001440000000322213333543263015741 0ustar liggesusers\name{gauss.hermite} \alias{gauss.hermite} \title{ Gauss-Hermite Quadrature Approximation to Expectation for Normal Distribution } \description{ Calculates an approximation to the expected value of any function of a normally-distributed random variable, using Gauss-Hermite quadrature. } \usage{ gauss.hermite(f, mu = 0, sd = 1, ..., order = 5) } \arguments{ \item{f}{ The function whose moment should be approximated. } \item{mu}{ Mean of the normal distribution. } \item{sd}{ Standard deviation of the normal distribution. } \item{\dots}{ Additional arguments passed to \code{f}. } \item{order}{ Number of quadrature points in the Gauss-Hermite quadrature approximation. A small positive integer. } } \details{ This algorithm calculates the approximate expected value of \code{f(Z)} when \code{Z} is a normally-distributed random variable with mean \code{mu} and standard deviation \code{sd}. The expected value is an integral with respect to the Gaussian density; this integral is approximated using Gauss-Hermite quadrature. The argument \code{f} should be a function in the \R language whose first argument is the variable \code{Z}. Additional arguments may be passed through \code{\dots}. The value returned by \code{f} may be a single numeric value, a vector, or a matrix. The values returned by \code{f} for different values of \code{Z} must have compatible dimensions. The result is a weighted average of several values of \code{f}. } \value{ Numeric value, vector or matrix. } \author{\adrian , \rolf and \ege. } \examples{ gauss.hermite(function(x) x^2, 3, 1) } \keyword{math} spatstat/man/pairs.linim.Rd0000644000176200001440000000402113333543263015406 0ustar liggesusers\name{pairs.linim} \alias{pairs.linim} \title{ Scatterplot Matrix for Pixel Images on a Linear Network } \description{ Produces a scatterplot matrix of the pixel values in two or more pixel images on a linear network. } \usage{ \method{pairs}{linim}(..., plot=TRUE, eps=NULL) } \arguments{ \item{\dots}{ Any number of arguments, each of which is either a pixel image on a linear network (object of class \code{"linim"}), a pixel image (object of class \code{"im"}), or a named argument to be passed to \code{\link{pairs.default}}. } \item{plot}{ Logical. If \code{TRUE}, the scatterplot matrix is plotted. } \item{eps}{ Optional. Spacing between sample points on the network. A positive number. } } \details{ This is a method for the generic function \code{\link{pairs}} for the class of pixel images on a linear network. It produces a square array of plot panels, in which each panel shows a scatterplot of the pixel values of one image against the corresponding pixel values of another image. At least two of the arguments \code{\dots} should be a pixel image on a linear network (object of class \code{"linim"}). They should be defined on the \bold{same} linear network, but may have different pixel resolutions. First the pixel values of each image are extracted at a set of sample points equally-spaced across the network. Then \code{\link{pairs.default}} is called to plot the scatterplot matrix. Any arguments in \code{\dots} which are not pixel images will be passed to \code{\link{pairs.default}} to control the plot. } \value{ Invisible. A \code{data.frame} containing the corresponding pixel values for each image. The return value also belongs to the class \code{plotpairsim} which has a plot method, so that it can be re-plotted. } \seealso{ \code{\link{pairs.default}}, \code{\link{pairs.im}} } \examples{ fit <- lppm(chicago ~ marks * (x+y)) lam <- predict(fit) do.call(pairs, lam) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{hplot} spatstat/man/tile.areas.Rd0000644000176200001440000000175413333543264015223 0ustar liggesusers\name{tile.areas} \alias{tile.areas} \title{Compute Areas of Tiles in a Tessellation} \description{ Computes the area of each tile in a tessellation. } \usage{ tile.areas(x) } \arguments{ \item{x}{A tessellation (object of class \code{"tess"}).} } \details{ A tessellation is a collection of disjoint spatial regions (called \emph{tiles}) that fit together to form a larger spatial region. See \code{\link{tess}}. This command computes the area of each of the tiles that make up the tessellation \code{x}. The result is a numeric vector in the same order as the tiles would be listed by \code{tiles(x)}. } \value{ A numeric vector. } \seealso{ \code{\link{tess}}, \code{\link{tiles}}, \code{\link{tilenames}}, \code{\link{tiles.empty}} } \examples{ A <- tess(xgrid=0:2,ygrid=0:2) tile.areas(A) v <- as.im(function(x,y){factor(round(x^2 + y^2))}, W=owin()) E <- tess(image=v) tile.areas(E) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/unstack.msr.Rd0000644000176200001440000000223613333543264015440 0ustar liggesusers\name{unstack.msr} \alias{unstack.msr} \title{ Separate a Vector Measure into its Scalar Components } \description{ Converts a vector-valued measure into a list of scalar-valued measures. } \usage{ \method{unstack}{msr}(x, \dots) } \arguments{ \item{x}{ A measure (object of class \code{"msr"}). } \item{\dots}{ Ignored. } } \details{ This is a method for the generic \code{\link[utils]{unstack}} for the class \code{"msr"} of measures. If \code{x} is a vector-valued measure, then \code{y <- unstack(x)} is a list of scalar-valued measures defined by the components of \code{x}. The \code{j}th entry of the list, \code{y[[j]]}, is equivalent to the \code{j}th component of the vector measure \code{x}. If \code{x} is a scalar-valued measure, then the result is a list consisting of one entry, which is \code{x}. } \value{ A list of measures, of class \code{"solist"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link[utils]{unstack}} \code{\link{unstack.ppp}} \code{\link{split.msr}}. } \examples{ fit <- ppm(cells ~ x) m <- residuals(fit, type="score") m unstack(m) } \keyword{spatial} \keyword{manip} spatstat/man/transect.im.Rd0000644000176200001440000000351613333543264015421 0ustar liggesusers\name{transect.im} \alias{transect.im} \title{ Pixel Values Along a Transect } \description{ Extract the pixel values of a pixel image at each point along a linear transect. } \usage{ transect.im(X, ..., from="bottomleft", to="topright", click=FALSE, add=FALSE) } \arguments{ \item{X}{ A pixel image (object of class \code{"im"}). } \item{\dots}{ Ignored. } \item{from,to}{ Optional. Start point and end point of the transect. Pairs of \eqn{(x,y)} coordinates in a format acceptable to \code{\link{xy.coords}}, or keywords \code{"bottom"}, \code{"left"}, \code{"top"}, \code{"right"}, \code{"bottomleft"} etc. } \item{click}{ Optional. Logical value. If \code{TRUE}, the linear transect is determined interactively by the user, who clicks two points on the current plot. } \item{add}{ Logical. If \code{click=TRUE}, this argument determines whether to perform interactive tasks on the current plot (\code{add=TRUE}) or to start by plotting \code{X} (\code{add=FALSE}). } } \details{ The pixel values of the image \code{X} along a line segment will be extracted. The result is a function table (\code{"fv"} object) which can be plotted directly. If \code{click=TRUE}, then the user is prompted to click two points on the plot of \code{X}. These endpoints define the transect. Otherwise, the transect is defined by the endpoints \code{from} and \code{to}. The default is a diagonal transect from bottom left to top right of the frame. } \value{ An object of class \code{"fv"} which can be plotted. } \author{ \adrian and \rolf } \seealso{ \code{\link{im}} } \examples{ Z <- density(redwood) plot(transect.im(Z)) \dontrun{ if(FALSE) { plot(transect.im(Z, click=TRUE)) } } } \keyword{spatial} \keyword{manip} \keyword{iplot} spatstat/man/Extract.msr.Rd0000644000176200001440000000230013333543263015371 0ustar liggesusers\name{Extract.msr} \alias{[.msr} \title{Extract Subset of Signed or Vector Measure} \description{ Extract a subset of a signed measure or vector-valued measure. } \usage{ \method{[}{msr}(x, i, j, \dots) } \arguments{ \item{x}{ A signed or vector measure. An object of class \code{"msr"} (see \code{\link{msr}}). } \item{i}{ Object defining the subregion or subset to be extracted. Either a spatial window (an object of class \code{"owin"}), or a pixel image with logical values, or any type of index that applies to a matrix. } \item{j}{ Subset index selecting the vector coordinates to be extracted, if \code{x} is a vector-valued measure. } \item{\dots}{Ignored.} } \value{ An object of class \code{"msr"}. } \details{ This operator extracts a subset of the data which determines the signed measure or vector-valued measure \code{x}. The result is another measure. } \seealso{ \code{\link{msr}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) rp <- residuals(fit, type="pearson") rs <- residuals(fit, type="score") rp[square(0.5)] rs[ , 2:3] } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/as.data.frame.psp.Rd0000644000176200001440000000253413333543262016374 0ustar liggesusers\name{as.data.frame.psp} \alias{as.data.frame.psp} \title{Coerce Line Segment Pattern to a Data Frame} \description{ Extracts the coordinates of the endpoints in a line segment pattern, and their marks if any, and returns them in a data frame. } \usage{ \method{as.data.frame}{psp}(x, row.names = NULL, ...) } \arguments{ \item{x}{Line segment pattern (object of class \code{"psp"}).} \item{row.names}{Optional character vector of row names.} \item{\dots}{Ignored.} } \details{ This is a method for the generic function \code{\link{as.data.frame}} for the class \code{"psp"} of line segment patterns. It extracts the coordinates of the endpoints of the line segments, and returns them as columns named \code{x0}, \code{y0}, \code{x1} and \code{y1} in a data frame. If the line segments were marked, the marks are appended as an extra column or columns to the data frame which is returned. If the marks are a vector then a single column named \code{marks} is appended. in the data frame, with the same type as in the line segment pattern dataset. If the marks are a data frame, then the columns of this data frame are appended (retaining their names). } \value{ A data frame with 4 or 5 columns. } \examples{ data(copper) df <- as.data.frame(copper$Lines) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/Extract.linim.Rd0000644000176200001440000000402113333543263015702 0ustar liggesusers\name{Extract.linim} \alias{[.linim} \title{Extract Subset of Pixel Image on Linear Network} \description{ Extract a subset of a pixel image on a linear network. } \usage{ \method{[}{linim}(x, i, \dots, drop=TRUE) } \arguments{ \item{x}{ A pixel image on a linear network (object of class \code{"linim"}). } \item{i}{ Spatial window defining the subregion. Either a spatial window (an object of class \code{"owin"}), or a logical-valued pixel image, or any type of index that applies to a matrix, or a point pattern (an object of class \code{"lpp"} or \code{"ppp"}), or something that can be converted to a point pattern by \code{\link{as.lpp}} (using the network on which \code{x} is defined). } \item{\dots}{Additional arguments passed to \code{[.im}.} \item{drop}{Logical value indicating whether \code{NA} values should be omitted from the result.} } \value{ Another pixel image on a linear network (object of class \code{"linim"}) or a vector of pixel values. } \details{ This function is a method for the subset operator \code{"["} for pixel images on linear networks (objects of class \code{"linim"}). The pixel image \code{x} will be restricted to the domain specified by \code{i}. Pixels outside the domain of \code{x} are assigned the value \code{NA}; if \code{drop=TRUE} (the default) such \code{NA} values are deleted from the result; if \code{drop=FALSE}, then \code{NA} values are retained. If \code{i} is a window (or a logical-valued pixel image) then \code{x[i]} is another pixel image of class \code{"linim"}, representing the restriction of \code{x} to the spatial domain specified by \code{i}. If \code{i} is a point pattern, then \code{x[i]} is the vector of pixel values of \code{x} at the locations specified by \code{i}. } \examples{ M <- as.mask.psp(as.psp(simplenet)) Z <- as.im(function(x,y){x}, W=M) Y <- linim(simplenet, Z) X <- runiflpp(4, simplenet) Y[X] Y[square(c(0.3, 0.6))] } \author{ \adrian } \keyword{spatial} \keyword{manip} spatstat/man/uniquemap.ppp.Rd0000644000176200001440000000276513471207115015774 0ustar liggesusers\name{uniquemap.ppp} \alias{uniquemap} \alias{uniquemap.ppp} \alias{uniquemap.lpp} \alias{uniquemap.ppx} \title{ Map Duplicate Entries to Unique Entries } \description{ Determine whether points in a point pattern are duplicated, choose a unique representative for each set of duplicates, and map the duplicates to the unique representative. } \usage{ uniquemap(x) \method{uniquemap}{ppp}(x) \method{uniquemap}{lpp}(x) \method{uniquemap}{ppx}(x) } \arguments{ \item{x}{ A point pattern (object of class \code{"ppp"}, \code{"lpp"}, \code{"pp3"} or \code{"ppx"}). } } \details{ The function \code{uniquemap} is generic, with methods for point patterns and data frames. This function determines whether any points of \code{x} are duplicated, and constructs a mapping of the indices of \code{x} so that all duplicates are mapped to a unique representative index. The result is an integer vector \code{u} such that \code{u[j] = i} if the points \code{x[i]} and \code{x[j]} are identical and point \code{i} has been chosen as the unique representative. The entry \code{u[i] = i} means either that point \code{i} is unique, or that it has been chosen as the unique representative of its equivalence class. } \value{ An integer vector. } \author{ \spatstatAuthors. } \seealso{ \code{\link{unique.ppp}}, \code{\link{duplicated.ppp}}, \code{\link{uniquemap.default}} } \examples{ Y <- runifpoint(4) X <- Y[c(1,2,3,4,2,1)] uniquemap(X) } \keyword{spatial} \keyword{methods} spatstat/man/dg.envelope.Rd0000644000176200001440000001023013501357607015370 0ustar liggesusers\name{dg.envelope} \alias{dg.envelope} \title{ Global Envelopes for Dao-Genton Test } \description{ Computes the global envelopes corresponding to the Dao-Genton test of goodness-of-fit. } \usage{ dg.envelope(X, \dots, nsim = 19, nsimsub=nsim-1, nrank = 1, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose = TRUE) } \arguments{ \item{X}{ Either a point pattern dataset (object of class \code{"ppp"}, \code{"lpp"} or \code{"pp3"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"slrm"}). } \item{\dots}{ Arguments passed to \code{\link{mad.test}} or \code{\link{envelope}} to control the conduct of the test. Useful arguments include \code{fun} to determine the summary function, \code{rinterval} to determine the range of \eqn{r} values used in the test, and \code{verbose=FALSE} to turn off the messages. } \item{nsim}{ Number of simulated patterns to be generated in the primary experiment. } \item{nsimsub}{ Number of simulations in each basic test. There will be \code{nsim} repetitions of the basic test, each involving \code{nsimsub} simulated realisations, so there will be a total of \code{nsim * (nsimsub + 1)} simulations. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{alternative}{ Character string determining whether the envelope corresponds to a two-sided test (\code{alternative="two.sided"}, the default) or a one-sided test with a lower critical boundary (\code{alternative="less"}) or a one-sided test with an upper critical boundary (\code{alternative="greater"}). } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating whether to interpolate the distribution of the test statistic by kernel smoothing, as described in Dao and Genton (2014, Section 5). } \item{savefuns}{ Logical flag indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical flag indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical value determining whether to print progress reports. } } \details{ Computes global simulation envelopes corresponding to the Dao-Genton (2014) adjusted Monte Carlo goodness-of-fit test. The envelopes are described in Baddeley et al (2015). If \code{X} is a point pattern, the null hypothesis is CSR. If \code{X} is a fitted model, the null hypothesis is that model. The Dao-Genton test is biased when the significance level is very small (small \eqn{p}-values are not reliable) and we recommend \code{\link{bits.envelope}} in this case. } \value{ An object of class \code{"fv"}. } \references{ Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2015) Pushing the envelope: extensions of graphical Monte Carlo tests. Submitted for publication. } \author{ \adrian, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \adrian \rolf and \ege } \seealso{ \code{\link{dg.test}}, \code{\link{mad.test}}, \code{\link{envelope}} } \examples{ ns <- if(interactive()) 19 else 4 E <- dg.envelope(swedishpines, Lest, nsim=ns) E plot(E) Eo <- dg.envelope(swedishpines, Lest, alternative="less", nsim=ns) Ei <- dg.envelope(swedishpines, Lest, interpolate=TRUE, nsim=ns) } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat/man/mincontrast.Rd0000644000176200001440000001436513571674202015540 0ustar liggesusers\name{mincontrast} \alias{mincontrast} \title{Method of Minimum Contrast} \description{ A general low-level algorithm for fitting theoretical point process models to point pattern data by the Method of Minimum Contrast. } \usage{ mincontrast(observed, theoretical, startpar, \dots, ctrl=list(q = 1/4, p = 2, rmin=NULL, rmax=NULL), fvlab=list(label=NULL, desc="minimum contrast fit"), explain=list(dataname=NULL, modelname=NULL, fname=NULL), adjustment=NULL) } \arguments{ \item{observed}{ Summary statistic, computed for the data. An object of class \code{"fv"}. } \item{theoretical}{ An R language function that calculates the theoretical expected value of the summary statistic, given the model parameters. See Details. } \item{startpar}{ Vector of initial values of the parameters of the point process model (passed to \code{theoretical}). } \item{\dots}{ Additional arguments passed to the function \code{theoretical} and to the optimisation algorithm \code{\link[stats]{optim}}. } \item{ctrl}{ Optional. List of arguments controlling the optimisation. See Details. } \item{fvlab}{ Optional. List containing some labels for the return value. See Details. } \item{explain}{ Optional. List containing strings that give a human-readable description of the model, the data and the summary statistic. } \item{adjustment}{ Internal use only. } } \details{ This function is a general algorithm for fitting point process models by the Method of Minimum Contrast. If you want to fit the Thomas process, see \code{\link{thomas.estK}}. If you want to fit a log-Gaussian Cox process, see \code{\link{lgcp.estK}}. If you want to fit the \Matern cluster process, see \code{\link{matclust.estK}}. The Method of Minimum Contrast (Diggle and Gratton, 1984) is a general technique for fitting a point process model to point pattern data. First a summary function (typically the \eqn{K} function) is computed from the data point pattern. Second, the theoretical expected value of this summary statistic under the point process model is derived (if possible, as an algebraic expression involving the parameters of the model) or estimated from simulations of the model. Then the model is fitted by finding the optimal parameter values for the model to give the closest match between the theoretical and empirical curves. The argument \code{observed} should be an object of class \code{"fv"} (see \code{\link{fv.object}}) containing the values of a summary statistic computed from the data point pattern. Usually this is the function \eqn{K(r)} computed by \code{\link{Kest}} or one of its relatives. The argument \code{theoretical} should be a user-supplied function that computes the theoretical expected value of the summary statistic. It must have an argument named \code{par} that will be the vector of parameter values for the model (the length and format of this vector are determined by the starting values in \code{startpar}). The function \code{theoretical} should also expect a second argument (the first argument other than \code{par}) containing values of the distance \eqn{r} for which the theoretical value of the summary statistic \eqn{K(r)} should be computed. The value returned by \code{theoretical} should be a vector of the same length as the given vector of \eqn{r} values. The argument \code{ctrl} determines the contrast criterion (the objective function that will be minimised). The algorithm minimises the criterion \deqn{ D(\theta)= \int_{r_{\mbox{\scriptsize min}}}^{r_{\mbox{\scriptsize max}}} |\hat F(r)^q - F_\theta(r)^q|^p \, {\rm d}r }{ D(theta) = integral from rmin to rmax of abs(Fhat(r)^q - F(theta,r)^q)^p } where \eqn{\theta}{theta} is the vector of parameters of the model, \eqn{\hat F(r)}{Fhat(r)} is the observed value of the summary statistic computed from the data, \eqn{F_\theta(r)}{F(theta,r)} is the theoretical expected value of the summary statistic, and \eqn{p,q} are two exponents. The default is \code{q = 1/4}, \code{p=2} so that the contrast criterion is the integrated squared difference between the fourth roots of the two functions (Waagepetersen, 2007). The other arguments just make things print nicely. The argument \code{fvlab} contains labels for the component \code{fit} of the return value. The argument \code{explain} contains human-readable strings describing the data, the model and the summary statistic. The \code{"..."} argument of \code{mincontrast} can be used to pass extra arguments to the function \code{theoretical} and/or to the optimisation function \code{\link[stats]{optim}}. In this case, the function \code{theoretical} should also have a \code{"..."} argument and should ignore it (so that it ignores arguments intended for \code{\link[stats]{optim}}). } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } \item{opt }{The return value from the optimizer \code{\link{optim}}.} \item{crtl }{The control parameters of the algorithm.} \item{info }{List of explanatory strings.} } \references{ Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007). An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Rasmus Waagepetersen \email{rw@math.auc.dk}, adapted for \pkg{spatstat} by \adrian. } \seealso{ \code{\link{kppm}}, \code{\link{lgcp.estK}}, \code{\link{matclust.estK}}, \code{\link{thomas.estK}}, } \keyword{spatial} \keyword{models} spatstat/man/foo.Rd0000644000176200001440000000241513333543263013751 0ustar liggesusers\name{foo} \alias{foo} \alias{plot.foo} \title{ Foo is Not a Real Name } \description{ The name \code{foo} is not a real name: it is a place holder, used to represent the name of any desired thing. The functions defined here simply print an explanation of the placeholder name \code{foo}. } \usage{ foo() \method{plot}{foo}(x, \dots) } \arguments{ \item{x}{Ignored.} \item{\dots}{Ignored.} } \details{ The name \code{foo} is used by computer scientists as a \emph{place holder}, to represent the name of any desired object or function. It is not the name of an actual object or function; it serves only as an example, to explain a concept. However, many users misinterpret this convention, and actually type the command \code{foo} or \code{foo()}. Then they email the package author to inform them that \code{foo} is not defined. To avoid this correspondence, we have now defined an object called \code{foo}. The function \code{foo()} prints a message explaining that \code{foo} is not really the name of a variable. The function can be executed simply by typing \code{foo} without parentheses. } \value{ Null. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{beginner}} } \examples{ foo } \keyword{documentation} spatstat/man/selfcrossing.psp.Rd0000644000176200001440000000200013333543264016457 0ustar liggesusers\name{selfcrossing.psp} \alias{selfcrossing.psp} \title{Crossing Points in a Line Segment Pattern} \description{ Finds any crossing points between the line segments in a line segment pattern. } \usage{ selfcrossing.psp(A) } \arguments{ \item{A}{ Line segment pattern (object of class \code{"psp"}). } } \value{ Point pattern (object of class \code{"ppp"}). } \details{ This function finds any crossing points between different line segments in the line segment pattern \code{A}. A crossing point occurs whenever one of the line segments in \code{A} intersects another line segment in \code{A}, at a nonzero angle of intersection. } \seealso{ \code{\link{crossing.psp}}, \code{\link{psp.object}}, \code{\link{ppp.object}}. } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(a, col="green", main="selfcrossing.psp") P <- selfcrossing.psp(a) plot(P, add=TRUE, col="red") } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/area.owin.Rd0000644000176200001440000000323213431146326015045 0ustar liggesusers\name{area.owin} \alias{area} \alias{area.owin} \alias{area.default} \alias{volume.owin} \title{Area of a Window} \description{ Computes the area of a window } \usage{ area(w) \method{area}{owin}(w) \method{area}{default}(w) \method{volume}{owin}(x) } \arguments{ \item{w}{A window, whose area will be computed. This should be an object of class \code{\link{owin}}, or can be given in any format acceptable to \code{\link{as.owin}()}. } \item{x}{Object of class \code{\link{owin}}} } \value{ A numerical value giving the area of the window. } \details{ If the window \code{w} is of type \code{"rectangle"} or \code{"polygonal"}, the area of this rectangular window is computed by analytic geometry. If \code{w} is of type \code{"mask"} the area of the discrete raster approximation of the window is computed by summing the binary image values and adjusting for pixel size. The function \code{volume.owin} is identical to \code{area.owin} except for the argument name. It is a method for the generic function \code{volume}. } \seealso{ \code{\link{perimeter}}, \code{\link{diameter.owin}}, \code{\link{owin.object}}, \code{\link{as.owin}} } \examples{ w <- unit.square() area(w) # returns 1.00000 k <- 6 theta <- 2 * pi * (0:(k-1))/k co <- cos(theta) si <- sin(theta) mas <- owin(c(-1,1), c(-1,1), poly=list(x=co, y=si)) area(mas) # returns approx area of k-gon mas <- as.mask(square(2), eps=0.01) X <- raster.x(mas) Y <- raster.y(mas) mas$m <- ((X - 1)^2 + (Y - 1)^2 <= 1) area(mas) # returns 3.14 approx } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/quadscheme.logi.Rd0000644000176200001440000001230413333543264016235 0ustar liggesusers\name{quadscheme.logi} \alias{quadscheme.logi} \title{Generate a Logistic Regression Quadrature Scheme from a Point Pattern} \description{ Generates a logistic regression quadrature scheme (an object of class \code{"logiquad"} inheriting from \code{"quad"}) from point patterns of data and dummy points. } \usage{ quadscheme.logi(data, dummy, dummytype = "stratrand", nd = NULL, mark.repeat = FALSE, \dots) } \arguments{ \item{data}{ The observed data point pattern. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{dummy}{ The pattern of dummy points for the quadrature. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()}. If missing a sensible default is generated. } \item{dummytype}{ The name of the type of dummy points to use when \code{"dummy"} is missing. Currently available options are: \code{"stratrand"} (default), \code{"binomial"}, \code{"poisson"}, \code{"grid"} and \code{"transgrid"}. } \item{nd}{ Integer, or integer vector of length 2 controlling the intensity of dummy points when \code{"dummy"} is missing. } \item{mark.repeat}{ Repeating the dummy points for each level of a marked data pattern when \code{"dummy"} is missing. (See details.) } \item{\dots}{ Ignored. } } \value{ An object of class \code{"logiquad"} inheriting from \code{"quad"} describing the quadrature scheme (data points, dummy points, and quadrature weights) suitable as the argument \code{Q} of the function \code{\link{ppm}()} for fitting a point process model. The quadrature scheme can be inspected using the \code{print} and \code{plot} methods for objects of class \code{"quad"}. } \details{ This is the primary method for producing a quadrature schemes for use by \code{\link{ppm}} when the logistic regression approximation (Baddeley et al. 2013) to the pseudolikelihood of the model is applied (i.e. when \code{method="logi"} in \code{\link{ppm}}). The function \code{\link{ppm}} fits a point process model to an observed point pattern. When used with the option \code{method="logi"} it requires a quadrature scheme consisting of the original data point pattern and an additional pattern of dummy points. Such quadrature schemes are represented by objects of class \code{"logiquad"}. Quadrature schemes are created by the function \code{quadscheme.logi}. The arguments \code{data} and \code{dummy} specify the data and dummy points, respectively. There is a sensible default for the dummy points. Alternatively the dummy points may be specified arbitrarily and given in any format recognised by \code{\link{as.ppp}}. The quadrature region is the region over which we are integrating, and approximating integrals by finite sums. If \code{dummy} is a point pattern object (class \code{"ppp"}) then the quadrature region is taken to be \code{Window(dummy)}. If \code{dummy} is just a list of \eqn{x, y} coordinates then the quadrature region defaults to the observation window of the data pattern, \code{Window(data)}. If \code{dummy} is missing, then a pattern of dummy points will be generated, taking account of the optional arguments \code{dummytype}, \code{nd}, and \code{mark.repeat}. The currently accepted values for \code{dummytype} are: \itemize{ \item \code{"grid"} where the frame of the window is divided into a \code{nd * nd} or \code{nd[1] * nd[2]} regular grid of tiles and the centers constitutes the dummy points. \item \code{"transgrid"} where a regular grid as above is translated by a random vector. \item \code{"stratrand"} where each point of a regular grid as above is randomly translated within its tile. \item \code{"binomial"} where \code{nd * nd} or \code{nd[1] * nd[2]} points are generated uniformly in the frame of the window. \code{"poisson"} where a homogeneous Poisson point process with intensity \code{nd * nd} or \code{nd[1] * nd[2]} is generated within the frame of observation window. } Then if the window is not rectangular, any dummy points lying outside it are deleted. If \code{data} is a multitype point pattern the dummy points should also be marked (with the same levels of the marks as \code{data}). If \code{dummy} is missing and the dummy pattern is generated by \code{quadscheme.logi} the default behaviour is to attach a uniformly distributed mark (from the levels of the marks) to each dummy point. Alternatively, if \code{mark.repeat=TRUE} each dummy point is repeated as many times as there are levels of the marks with a distinct mark value attached to it. Finally, each point (data and dummy) is assigned the weight 1. The weights are never used and only appear to be compatible with the class \code{"quad"} from which the \code{"logiquad"} object inherits. } \references{ Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2014) Logistic regression for spatial Gibbs point processes. \emph{Biometrika} \bold{101} (2) 377--392. } \seealso{ \code{\link{ppm}}, \code{\link{as.ppp}} } \examples{ data(simdat) Q <- quadscheme.logi(simdat) } \author{\adrian , \rolf and \ege . } \keyword{spatial} \keyword{datagen} spatstat/man/dppkernel.Rd0000644000176200001440000000121513333543263015147 0ustar liggesusers\name{dppkernel} \alias{dppkernel} \title{Extract Kernel from Determinantal Point Process Model Object} \description{ Returns the kernel of a determinantal point process model as a function of one argument \code{x}. } \usage{dppkernel(model, \dots)} \arguments{ \item{model}{Model of class \code{"detpointprocfamily"}.} \item{\dots}{Arguments passed to \code{\link{dppapproxkernel}} if the exact kernel is unknown} } \value{A function} \author{ \adrian \rolf and \ege } \examples{ kernelMatern <- dppkernel(dppMatern(lambda = 100, alpha=.01, nu=1, d=2)) plot(kernelMatern, xlim = c(0,0.1)) } \keyword{spatial} \keyword{models} spatstat/man/anova.mppm.Rd0000644000176200001440000001152613421320305015231 0ustar liggesusers\name{anova.mppm} \alias{anova.mppm} \title{ANOVA for Fitted Point Process Models for Replicated Patterns} \description{ Performs analysis of deviance for one or more point process models fitted to replicated point pattern data. } \usage{ \method{anova}{mppm}(object, \dots, test=NULL, adjust=TRUE, fine=FALSE, warn=TRUE) } \arguments{ \item{object}{ Object of class \code{"mppm"} representing a point process model that was fitted to replicated point patterns. } \item{\dots}{ Optional. Additional objects of class \code{"mppm"}. } \item{test}{ Type of hypothesis test to perform. A character string, partially matching one of \code{"Chisq"}, \code{"LRT"}, \code{"Rao"}, \code{"score"}, \code{"F"} or \code{"Cp"}, or \code{NULL} indicating that no test should be performed. } \item{adjust}{ Logical value indicating whether to correct the pseudolikelihood ratio when some of the models are not Poisson processes. } \item{fine}{ Logical value passed to \code{\link{vcov.ppm}} indicating whether to use a quick estimate (\code{fine=FALSE}, the default) or a slower, more accurate estimate (\code{fine=TRUE}) of the variance of the fitted coefficients of each model. Relevant only when some of the models are not Poisson and \code{adjust=TRUE}. } \item{warn}{ Logical value indicating whether to issue warnings if problems arise. } } \value{ An object of class \code{"anova"}, or \code{NULL}. } \details{ This is a method for \code{\link{anova}} for comparing several fitted point process models of class \code{"mppm"}, usually generated by the model-fitting function \code{\link{mppm}}). If the fitted models are all Poisson point processes, then this function performs an Analysis of Deviance of the fitted models. The output shows the deviance differences (i.e. 2 times log likelihood ratio), the difference in degrees of freedom, and (if \code{test="Chi"}) the two-sided p-values for the chi-squared tests. Their interpretation is very similar to that in \code{\link{anova.glm}}. If some of the fitted models are \emph{not} Poisson point processes, the `deviance' differences in this table are 'pseudo-deviances' equal to 2 times the differences in the maximised values of the log pseudolikelihood (see \code{\link{ppm}}). It is not valid to compare these values to the chi-squared distribution. In this case, if \code{adjust=TRUE} (the default), the pseudo-deviances will be adjusted using the method of Pace et al (2011) and Baddeley, Turner and Rubak (2015) so that the chi-squared test is valid. It is strongly advisable to perform this adjustment. The argument \code{test} determines which hypothesis test, if any, will be performed to compare the models. The argument \code{test} should be a character string, partially matching one of \code{"Chisq"}, \code{"F"} or \code{"Cp"}, or \code{NULL}. The first option \code{"Chisq"} gives the likelihood ratio test based on the asymptotic chi-squared distribution of the deviance difference. The meaning of the other options is explained in \code{\link{anova.glm}}. For random effects models, only \code{"Chisq"} is available, and again gives the likelihood ratio test. } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix of one of the models was either too large or too small for reliable numerical calculation. See \code{\link{vcov.ppm}} for suggestions on how to handle this. } \seealso{ \code{\link{mppm}} } \examples{ H <- hyperframe(X=waterstriders) #' test for loglinear trend in x coordinate mod0 <- mppm(X~1, data=H, Poisson()) modx <- mppm(X~x, data=H, Poisson()) anova(mod0, modx, test="Chi") # not significant anova(modx, test="Chi") # not significant #' test for inhibition mod0S <- mppm(X~1, data=H, Strauss(2)) anova(mod0, mod0S, test="Chi") # significant! #' test for trend after accounting for inhibition modxS <- mppm(X~x, data=H, Strauss(2)) anova(mod0S, modxS, test="Chi") # not significant } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. Baddeley, A., Turner, R. and Rubak, E. (2015) Adjusted composite likelihood ratio test for Gibbs point processes. \emph{Journal of Statistical Computation and Simulation} \bold{86} (5) 922--941. DOI: 10.1080/00949655.2015.1044530. Pace, L., Salvan, A. and Sartori, N. (2011) Adjusting composite likelihood ratio statistics. \emph{Statistica Sinica} \bold{21}, 129--148. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \spatstatAuthors. } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/colouroutputs.Rd0000644000176200001440000000321113333543263016130 0ustar liggesusers\name{colouroutputs} \alias{colouroutputs} \alias{colouroutputs<-} \title{ Extract or Assign Colour Values in a Colour Map } \description{ Extract the colour values in a colour map, or assign new colour values. } \usage{ colouroutputs(x) colouroutputs(x) <- value } \arguments{ \item{x}{ A colour map (object of class \code{"colourmap"}). } \item{value}{ A vector of values that can be interpreted as colours. } } \details{ An object of class \code{"colourmap"} is effectively a function that maps its inputs (numbers or factor levels) to colour values. The command \code{colouroutputs(x)} extracts the colour values in the colour map \code{x}. The assignment \code{colouroutputs(x) <- value} replaces the colour values in the colour map \code{x} by the entries in \code{value}. The replacement vector \code{value} should have the same length as \code{colouroutputs(x)}, and its entries should be interpretable as colours. To change only some of the colour values in a colour map, it may be easier to use \code{\link{tweak.colourmap}}. } \value{ The result of \code{colouroutputs} is a character vector of colour values. The result of the assignment \code{colouroutputs(x) <- value} is another colour map (object of class \code{"colourmap"}). } \seealso{ \code{\link{colourmap}}, \code{\link{interp.colourmap}}, \code{\link{tweak.colourmap}}, \code{\link[spatstat:colourtools]{colourtools}}. } \examples{ m <- colourmap(rainbow(5), range=c(0,1)) m # reverse order of colours colouroutputs(m) <- rev(colouroutputs(m)) m } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{color} spatstat/man/as.layered.Rd0000644000176200001440000000375713333543262015226 0ustar liggesusers\name{as.layered} \alias{as.layered} \alias{as.layered.default} \alias{as.layered.ppp} \alias{as.layered.splitppp} \alias{as.layered.solist} \alias{as.layered.listof} \alias{as.layered.msr} \title{Convert Data To Layered Object} \description{ Converts spatial data into a layered object. } \usage{ as.layered(X) \method{as.layered}{default}(X) \method{as.layered}{ppp}(X) \method{as.layered}{splitppp}(X) \method{as.layered}{solist}(X) \method{as.layered}{listof}(X) \method{as.layered}{msr}(X) } \arguments{ \item{X}{ Some kind of spatial data. } } \value{ An object of class \code{"layered"} (see \code{\link{layered}}). } \details{ This function converts the object \code{X} into an object of class \code{"layered"}. The argument \code{X} should contain some kind of spatial data such as a point pattern, window, or pixel image. If \code{X} is a simple object then it will be converted into a \code{layered} object containing only one layer which is equivalent to \code{X}. If \code{X} can be interpreted as consisting of multiple layers of data, then the result will be a \code{layered} object consisting of these separate layers of data. \itemize{ \item if \code{X} is a list of class \code{"listof"} or \code{"solist"}, then \code{as.layered(X)} consists of several layers, one for each entry in the list \code{X}; \item if \code{X} is a multitype point pattern, then \code{as.layered(X)} consists of several layers, each containing the sub-pattern consisting of points of one type; \item if \code{X} is a vector-valued measure, then \code{as.layered(X)} consists of several layers, each containing a scalar-valued measure. } } \seealso{ \code{\link{layered}}, \code{\link{split.ppp}} } \examples{ as.layered(cells) as.layered(amacrine) P <- rpoispp(100) fit <- ppm(P ~ x+y) rs <- residuals(fit, type="score") as.layered(rs) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{manip} spatstat/man/kernel.moment.Rd0000644000176200001440000000347513333543263015753 0ustar liggesusers\name{kernel.moment} \alias{kernel.moment} \title{Moment of Smoothing Kernel} \description{ Computes the complete or incomplete \eqn{m}th moment of a smoothing kernel. } \usage{ kernel.moment(m, r, kernel = "gaussian") } \arguments{ \item{m}{ Exponent (order of moment). An integer. } \item{r}{ Upper limit of integration for the incomplete moment. A numeric value or numeric vector. Set \code{r=Inf} to obtain the complete moment. } \item{kernel}{ String name of the kernel. Options are \code{"gaussian"}, \code{"rectangular"}, \code{"triangular"}, \code{"epanechnikov"}, \code{"biweight"}, \code{"cosine"} and \code{"optcosine"}. (Partial matching is used). } } \details{ Kernel estimation of a probability density in one dimension is performed by \code{\link[stats]{density.default}} using a kernel function selected from the list above. For more information about these kernels, see \code{\link[stats]{density.default}}. The function \code{kernel.moment} computes the partial integral \deqn{ \int_{-\infty}^r t^m k(t) dt }{ integral[-Inf][r] t^m k(t) dt } where \eqn{k(t)} is the selected kernel, \eqn{r} is the upper limit of integration, and \eqn{m} is the exponent or order. Here \eqn{k(t)} is the \bold{standard form} of the kernel, which has support \eqn{[-1,1]} and standard deviation \eqn{sigma = 1/c} where \code{c = kernel.factor(kernel)}. } \value{ A single number, or a numeric vector of the same length as \code{r}. } \seealso{ \code{\link[stats]{density.default}}, \code{\link{dkernel}}, \code{\link{kernel.factor}}, } \examples{ kernel.moment(1, 0.1, "epa") curve(kernel.moment(2, x, "epa"), from=-1, to=1) } \author{ \adrian and Martin Hazelton. } \keyword{methods} \keyword{nonparametric} \keyword{smooth} spatstat/man/Ginhom.Rd0000644000176200001440000001536213434220172014405 0ustar liggesusers\name{Ginhom} \alias{Ginhom} \title{ Inhomogeneous Nearest Neighbour Function } \description{ Estimates the inhomogeneous nearest neighbour function \eqn{G} of a non-stationary point pattern. } \usage{ Ginhom(X, lambda = NULL, lmin = NULL, ..., sigma = NULL, varcov = NULL, r = NULL, breaks = NULL, ratio = FALSE, update = TRUE, warn.bias=TRUE, savelambda=FALSE) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the inhomogeneous \eqn{G} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lmin}{ Optional. The minimum possible value of the intensity over the spatial domain. A positive numerical value. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{\dots}{ Extra arguments passed to \code{\link{as.mask}} to control the pixel resolution, or passed to \code{\link{density.ppp}} to control the smoothing bandwidth. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of the estimate will also be saved, for use in analysing replicated point patterns. } \item{update}{ Logical. If \code{lambda} is a fitted model (class \code{"ppm"} or \code{"kppm"}) and \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without fitting it to \code{X}. } \item{warn.bias}{ Logical value specifying whether to issue a warning when the inhomogeneity correction factor takes extreme values, which can often lead to biased results. This usually occurs when insufficient smoothing is used to estimate the intensity. } \item{savelambda}{ Logical value specifying whether to save the values of \code{lmin} and \code{lambda} as attributes of the result. } } \details{ This command computes estimates of the inhomogeneous \eqn{G}-function (van Lieshout, 2010) of a point pattern. It is the counterpart, for inhomogeneous spatial point patterns, of the nearest-neighbour distance distribution function \eqn{G} for homogeneous point patterns computed by \code{\link{Gest}}. The argument \code{X} should be a point pattern (object of class \code{"ppp"}). The inhomogeneous \eqn{G} function is computed using the border correction, equation (7) in Van Lieshout (2010). The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda} of the point process. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a fitted point process model}{ (object of class \code{"ppm"} or \code{"kppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. (2010) A J-function for inhomogeneous point processes. \emph{Statistica Neerlandica} \bold{65}, 183--201. } \seealso{ \code{\link{Finhom}}, \code{\link{Jinhom}}, \code{\link{Gest}} } \examples{ \dontrun{ plot(Ginhom(swedishpines, sigma=bw.diggle, adjust=2)) } plot(Ginhom(swedishpines, sigma=10)) } \author{ Original code by Marie-Colette van Lieshout. C implementation and R adaptation by \adrian and \ege. } \keyword{spatial} \keyword{nonparametric} spatstat/man/plot.linim.Rd0000644000176200001440000001443713616267207015267 0ustar liggesusers\name{plot.linim} \alias{plot.linim} \title{ Plot Pixel Image on Linear Network } \description{ Given a pixel image on a linear network, the pixel values are displayed either as colours or as line widths. } \usage{ \method{plot}{linim}(x, ..., style = c("colour", "width"), scale, adjust = 1, fatten = 0, negative.args = list(col=2), legend=TRUE, leg.side=c("right", "left", "bottom", "top"), leg.sep=0.1, leg.wid=0.1, leg.args=list(), leg.scale=1, zlim, box=FALSE, do.plot=TRUE) } \arguments{ \item{x}{ The pixel image to be plotted. An object of class \code{"linim"}. } \item{\dots}{ Extra graphical parameters, passed to \code{\link{plot.im}} if \code{style="colour"}, or to \code{\link[graphics]{polygon}} if \code{style="width"}. } \item{style}{ Character string (partially matched) specifying the type of plot. See Details. } \item{scale}{ Physical scale factor for representing the pixel values as line widths. } \item{adjust}{ Adjustment factor for the conversion of pixel value to line width, when \code{style="width"}. } \item{fatten}{ Distance by which the line segments should be thickened, when \code{style="colour"}. } \item{negative.args}{ A list of arguments to be passed to \code{\link[graphics]{polygon}} specifying how to plot negative values of \code{x} when \code{style="width"}. } \item{legend}{ Logical value indicating whether to plot a legend (colour ribbon or scale bar). } \item{leg.side}{ Character string (partially matched) indicating where to display the legend relative to the main image. } \item{leg.sep}{ Factor controlling the space between the legend and the image. } \item{leg.wid}{ Factor controlling the width of the legend. } \item{leg.scale}{ Rescaling factor for annotations on the legend. The values on the numerical scale printed beside the legend will be multiplied by this rescaling factor. } \item{leg.args}{ List of additional arguments passed to \code{\link[graphics]{image.default}}, \code{\link[graphics]{axis}} or \code{\link[graphics]{text.default}} to control the display of the legend. These may override the \code{\dots} arguments. } \item{zlim}{ The range of numerical values that should be mapped. A numeric vector of length 2. Defaults to the range of values of \code{x}. } \item{box}{ Logical value indicating whether to draw a bounding box. } \item{do.plot}{ Logical value indicating whether to actually perform the plot. } } \details{ This is the \code{plot} method for objects of class \code{"linim"}. Such an object represents a pixel image defined on a linear network. If \code{style="colour"} (the default) then the pixel values of \code{x} are plotted as colours, using \code{\link{plot.im}}. The mapping from pixel values to colours is determined by any additional arguments \code{\dots} which are passed to \code{\link{plot.im}}. If \code{style="width"} then the pixel values of \code{x} are used to determine the widths of thick lines centred on the line segments of the linear network. The mapping from pixel values to line widths is determined by the arguments \code{scale} and \code{adjust}. The plotting of colours and borders of the lines is controlled by the additional arguments \code{\dots} which are passed to \code{\link[graphics]{polygon}}. A different set of colours and borders can be assigned to negative pixel values by passing a list of arguments in \code{negative.args} as shown in the Examples. A legend is displayed alongside the plot if \code{legend=TRUE} (the default). The legend displays the relationship between pixel values and colours (if \code{style="colour"}) or between pixel values and line widths (if \code{style="width"}). The plotting of the legend itself is controlled by the arguments \code{leg.side}, \code{leg.sep}, \code{leg.wid}, \code{leg.scale} and the list of arguments \code{leg.args}, which are described above. If \code{style="colour"}, these arguments are mapped to the arguments \code{ribside}, \code{ribsep}, \code{ribwid}, \code{ribscale} and \code{ribargs} respectively, which are passed to \code{\link{plot.im}}. } \section{Thin lines}{ When \code{style="colour"} it often appears that the lines are drawn too thin. This occurs because \code{x} is a pixel image, in which the only pixels that have a defined value are those which lie directly over the network. To make the lines appear thicker in the plot, use the argument \code{fatten}. The domain of the image will be expanded by a distance equal to \code{fatten/2} in every direction using \code{\link{dilation.owin}}; the pixel values will be extrapolated to this expanded domain using \code{\link{nearestValue}}. This may improve the visual appearance of the plot. } \value{ If \code{style="colour"}, the result is an object of class \code{"colourmap"} specifying the colour map used. If \code{style="width"}, the result is a numeric value \code{v} giving the physical scale: one unit of pixel value is represented as \code{v} physical units on the plot. The result also has an attribute \code{"bbox"} giving a bounding box for the plot. The bounding box includes the ribbon or scale bar, if present, but not the main title. } \author{ \adrian } \seealso{ \code{\link{linim}}, \code{\link{plot.im}}, \code{\link[graphics]{polygon}} } \references{ Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. } \examples{ X <- linfun(function(x,y,seg,tp){y^2+x}, simplenet) X <- as.linim(X) plot(X, main="Colour represents function value") plot(X, fatten=0.02, main="fattened") plot(X, style="width", main="Width proportional to function value") # signed values f <- linfun(function(x,y,seg,tp){y-x}, simplenet) plot(f, style="w", main="Negative values in red") plot(f, style="w", negative.args=list(density=10), main="Negative values are hatched") } \keyword{spatial} spatstat/man/PairPiece.Rd0000644000176200001440000000746213333543262015035 0ustar liggesusers\name{PairPiece} \alias{PairPiece} \title{The Piecewise Constant Pairwise Interaction Point Process Model} \description{ Creates an instance of a pairwise interaction point process model with piecewise constant potential function. The model can then be fitted to point pattern data. } \usage{ PairPiece(r) } \arguments{ \item{r}{vector of jump points for the potential function} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. The process is a pairwise interaction process, whose interaction potential is piecewise constant, with jumps at the distances given in the vector \eqn{r}. } \details{ A pairwise interaction point process in a bounded region is a stochastic point process with probability density of the form \deqn{ f(x_1,\ldots,x_n) = \alpha \prod_i b(x_i) \prod_{i < j} h(x_i, x_j) }{ f(x_1,\ldots,x_n) = alpha . product { b(x[i]) } product { h(x_i, x_j) } } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern. The first product on the right hand side is over all points of the pattern; the second product is over all unordered pairs of points of the pattern. Thus each point \eqn{x_i}{x[i]} of the pattern contributes a factor \eqn{b(x_i)}{b(x[i])} to the probability density, and each pair of points \eqn{x_i, x_j}{x[i], x[j]} contributes a factor \eqn{h(x_i,x_j)}{h(x[i], x[j])} to the density. The pairwise interaction term \eqn{h(u, v)} is called \emph{piecewise constant} if it depends only on the distance between \eqn{u} and \eqn{v}, say \eqn{h(u,v) = H(||u-v||)}, and \eqn{H} is a piecewise constant function (a function which is constant except for jumps at a finite number of places). The use of piecewise constant interaction terms was first suggested by Takacs (1986). The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the piecewise constant pairwise interaction is yielded by the function \code{PairPiece()}. See the examples below. The entries of \code{r} must be strictly increasing, positive numbers. They are interpreted as the points of discontinuity of \eqn{H}. It is assumed that \eqn{H(s) =1} for all \eqn{s > r_{max}}{s > rmax} where \eqn{r_{max}}{rmax} is the maximum value in \code{r}. Thus the model has as many regular parameters (see \code{\link{ppm}}) as there are entries in \code{r}. The \eqn{i}-th regular parameter \eqn{\theta_i}{theta[i]} is the logarithm of the value of the interaction function \eqn{H} on the interval \eqn{[r_{i-1},r_i)}{[r[i-1],r[i])}. If \code{r} is a single number, this model is similar to the Strauss process, see \code{\link{Strauss}}. The difference is that in \code{PairPiece} the interaction function is continuous on the right, while in \code{\link{Strauss}} it is continuous on the left. The analogue of this model for multitype point processes has not yet been implemented. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{Strauss}} \code{\link{rmh.ppm}} } \examples{ PairPiece(c(0.1,0.2)) # prints a sensible description of itself data(cells) \dontrun{ ppm(cells, ~1, PairPiece(r = c(0.05, 0.1, 0.2))) # fit a stationary piecewise constant pairwise interaction process } ppm(cells, ~polynom(x,y,3), PairPiece(c(0.05, 0.1))) # nonstationary process with log-cubic polynomial trend } \references{ Takacs, R. (1986) Estimator for the pair potential of a Gibbsian point process. \emph{Statistics} \bold{17}, 429--433. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/scaletointerval.Rd0000644000176200001440000000262413333543264016370 0ustar liggesusers\name{scaletointerval} \alias{scaletointerval} \alias{scaletointerval.default} \alias{scaletointerval.im} \title{Rescale Data to Lie Between Specified Limits} \description{ Rescales a dataset so that the values range exactly between the specified limits. } \usage{ scaletointerval(x, from=0, to=1, xrange=range(x)) \method{scaletointerval}{default}(x, from=0, to=1, xrange=range(x)) \method{scaletointerval}{im}(x, from=0, to=1, xrange=range(x)) } \arguments{ \item{x}{Data to be rescaled.} \item{from,to}{Lower and upper endpoints of the interval to which the values of \code{x} should be rescaled. } \item{xrange}{ Optional range of values of \code{x} that should be mapped to the new interval. } } \details{ These functions rescale a dataset \code{x} so that its values range exactly between the limits \code{from} and \code{to}. The method for pixel images (objects of class \code{"im"}) applies this scaling to the pixel values of \code{x}. Rescaling cannot be performed if the values in \code{x} are not interpretable as numeric, or if the values in \code{x} are all equal. } \value{ An object of the same type as \code{x}. } \seealso{ \code{\link{scale}} } \examples{ X <- as.im(function(x,y) {x+y+3}, unit.square()) summary(X) Y <- scaletointerval(X) summary(Y) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{univar} spatstat/man/Jinhom.Rd0000644000176200001440000001523313434220172014405 0ustar liggesusers\name{Jinhom} \alias{Jinhom} \title{ Inhomogeneous J-function } \description{ Estimates the inhomogeneous \eqn{J} function of a non-stationary point pattern. } \usage{ Jinhom(X, lambda = NULL, lmin = NULL, ..., sigma = NULL, varcov = NULL, r = NULL, breaks = NULL, update = TRUE, warn.bias=TRUE, savelambda=FALSE) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the inhomogeneous \eqn{J} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lmin}{ Optional. The minimum possible value of the intensity over the spatial domain. A positive numerical value. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{\dots}{ Extra arguments passed to \code{\link{as.mask}} to control the pixel resolution, or passed to \code{\link{density.ppp}} to control the smoothing bandwidth. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{update}{ Logical. If \code{lambda} is a fitted model (class \code{"ppm"} or \code{"kppm"}) and \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without fitting it to \code{X}. } \item{warn.bias}{ Logical value specifying whether to issue a warning when the inhomogeneity correction factor takes extreme values, which can often lead to biased results. This usually occurs when insufficient smoothing is used to estimate the intensity. } \item{savelambda}{ Logical value specifying whether to save the values of \code{lmin} and \code{lambda} as attributes of the result. } } \details{ This command computes estimates of the inhomogeneous \eqn{J}-function (Van Lieshout, 2010) of a point pattern. It is the counterpart, for inhomogeneous spatial point patterns, of the \eqn{J} function for homogeneous point patterns computed by \code{\link{Jest}}. The argument \code{X} should be a point pattern (object of class \code{"ppp"}). The inhomogeneous \eqn{J} function is computed as \eqn{Jinhom(r) = (1 - Ginhom(r))/(1-Finhom(r))} where \eqn{Ginhom, Finhom} are the inhomogeneous \eqn{G} and \eqn{F} functions computed using the border correction (equations (7) and (6) respectively in Van Lieshout, 2010). The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda} of the point process. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a fitted point process model}{ (object of class \code{"ppm"} or \code{"kppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \references{ van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. van Lieshout, M.N.M. (2010) A J-function for inhomogeneous point processes. \emph{Statistica Neerlandica} \bold{65}, 183--201. } \seealso{ \code{\link{Ginhom}}, \code{\link{Finhom}}, \code{\link{Jest}} } \examples{ \dontrun{ plot(Jinhom(swedishpines, sigma=bw.diggle, adjust=2)) } plot(Jinhom(swedishpines, sigma=10)) } \author{ Original code by Marie-Colette van Lieshout. C implementation and R adaptation by \adrian and \ege. } \keyword{spatial} \keyword{nonparametric} spatstat/man/pairsat.family.Rd0000644000176200001440000000435213333543263016113 0ustar liggesusers\name{pairsat.family} \alias{pairsat.family} \title{Saturated Pairwise Interaction Point Process Family} \description{ An object describing the Saturated Pairwise Interaction family of point process models } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the ``saturated pairwise interaction'' family of point process models. If you need to create a specific interaction model for use in spatial pattern analysis, use the function \code{\link{Saturated}()} or the two existing implementations of models in this family, \code{\link{Geyer}()} and \code{\link{SatPiece}()}. Geyer (1999) introduced the ``saturation process'', a modification of the Strauss process in which the total contribution to the potential from each point (from its pairwise interaction with all other points) is trimmed to a maximum value \eqn{c}. This model is implemented in the function \code{\link{Geyer}()}. The present class \code{pairsat.family} is the extension of this saturation idea to all pairwise interactions. Note that the resulting models are no longer pairwise interaction processes - they have interactions of infinite order. \code{pairsat.family} is an object of class \code{"isf"} containing a function \code{pairwise$eval} for evaluating the sufficient statistics of any saturated pairwise interaction point process model in which the original pair potentials take an exponential family form. } \references{ Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \seealso{ \code{\link{Geyer}} to create the Geyer saturation process. \code{\link{SatPiece}} to create a saturated process with piecewise constant pair potential. \code{\link{Saturated}} to create a more general saturation model. Other families: \code{\link{inforder.family}}, \code{\link{ord.family}}, \code{\link{pairwise.family}}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/Iest.Rd0000644000176200001440000001234713333543262014076 0ustar liggesusers\name{Iest} \alias{Iest} \title{Estimate the I-function} \description{ Estimates the summary function \eqn{I(r)} for a multitype point pattern. } \usage{ Iest(X, ..., eps=NULL, r=NULL, breaks=NULL, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{I(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{Ignored.} \item{eps}{ the resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{Optional. Numeric vector of values for the argument \eqn{r} at which \eqn{I(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \code{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. Vector of character strings specifying the edge correction(s) to be used by \code{\link{Jest}}. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{I} has been estimated} \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{I(r)} computed from the border-corrected estimates of \eqn{J} functions} \item{km}{the spatial Kaplan-Meier estimator of \eqn{I(r)} computed from the Kaplan-Meier estimates of \eqn{J} functions} \item{han}{the Hanisch-style estimator of \eqn{I(r)} computed from the Hanisch-style estimates of \eqn{J} functions} \item{un}{the uncorrected estimate of \eqn{I(r)} computed from the uncorrected estimates of \eqn{J} } \item{theo}{the theoretical value of \eqn{I(r)} for a stationary Poisson process: identically equal to \eqn{0} } } \note{ Sizeable amounts of memory may be needed during the calculation. } \details{ The \eqn{I} function summarises the dependence between types in a multitype point process (Van Lieshout and Baddeley, 1999) It is based on the concept of the \eqn{J} function for an unmarked point process (Van Lieshout and Baddeley, 1996). See \code{\link{Jest}} for information about the \eqn{J} function. The \eqn{I} function is defined as \deqn{ % I(r) = \sum_{i=1}^m p_i J_{ii}(r) % - J_{\bullet\bullet}(r)}{ % I(r) = (sum p[i] Jii(r)) - J(r) } where \eqn{J_{\bullet\bullet}}{J} is the \eqn{J} function for the entire point process ignoring the marks, while \eqn{J_{ii}}{Jii} is the \eqn{J} function for the process consisting of points of type \eqn{i} only, and \eqn{p_i}{p[i]} is the proportion of points which are of type \eqn{i}. The \eqn{I} function is designed to measure dependence between points of different types, even if the points are not Poisson. Let \eqn{X} be a stationary multitype point process, and write \eqn{X_i}{X[i]} for the process of points of type \eqn{i}. If the processes \eqn{X_i}{X[i]} are independent of each other, then the \eqn{I}-function is identically equal to \eqn{0}. Deviations \eqn{I(r) < 1} or \eqn{I(r) > 1} typically indicate negative and positive association, respectively, between types. See Van Lieshout and Baddeley (1999) for further information. An estimate of \eqn{I} derived from a multitype spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern. The estimate of \eqn{I(r)} is compared against the constant function \eqn{0}. Deviations \eqn{I(r) < 1} or \eqn{I(r) > 1} may suggest negative and positive association, respectively. This algorithm estimates the \eqn{I}-function from the multitype point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial marked point process in the plane, observed through a bounded window. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link{as.ppp}()}. It must be a multitype point pattern (it must have a \code{marks} vector which is a \code{factor}). The function \code{\link{Jest}} is called to compute estimates of the \eqn{J} functions in the formula above. In fact three different estimates are computed using different edge corrections. See \code{\link{Jest}} for information. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \seealso{ \code{\link{Jest}} } \examples{ data(amacrine) Ic <- Iest(amacrine) plot(Ic, main="Amacrine Cells data") # values are below I= 0, suggesting negative association # between 'on' and 'off' cells. } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/is.marked.Rd0000644000176200001440000000251213333543263015041 0ustar liggesusers\name{is.marked} \alias{is.marked} \title{Test Whether Marks Are Present} \description{ Generic function to test whether a given object (usually a point pattern or something related to a point pattern) has ``marks'' attached to the points. } \usage{ is.marked(X, \dots) } \arguments{ \item{X}{ Object to be inspected } \item{\dots}{ Other arguments. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is marked. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link[spatstat.data]{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link[spatstat.data]{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. Other objects related to point patterns, such as point process models, may involve marked points. This function tests whether the object \code{X} contains or involves marked points. It is generic; methods are provided for point patterns (objects of class \code{"ppp"}) and point process models (objects of class \code{"ppm"}). } \seealso{ \code{\link{is.marked.ppp}}, \code{\link{is.marked.ppm}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/affine.linnet.Rd0000644000176200001440000000505513333543262015710 0ustar liggesusers\name{affine.linnet} \alias{affine.linnet} \alias{shift.linnet} \alias{rotate.linnet} \alias{rescale.linnet} \alias{scalardilate.linnet} \title{Apply Geometrical Transformations to a Linear Network} \description{ Apply geometrical transformations to a linear network. } \usage{ \method{affine}{linnet}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) \method{shift}{linnet}(X, vec=c(0,0), \dots, origin=NULL) \method{rotate}{linnet}(X, angle=pi/2, \dots, centre=NULL) \method{scalardilate}{linnet}(X, f, \dots) \method{rescale}{linnet}(X, s, unitname) } \arguments{ \item{X}{Linear network (object of class \code{"linnet"}).} \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{angle}{Rotation angle in radians.} \item{f}{Scalar dilation factor.} \item{s}{ Unit conversion factor: the new units are \code{s} times the old units. } \item{\dots}{ Arguments passed to other methods. } \item{origin}{ Character string determining a location that will be shifted to the origin. Options are \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}. Partially matched. } \item{centre}{ Centre of rotation. Either a vector of length 2, or a character string (partially matched to \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}). The default is the coordinate origin \code{c(0,0)}. } \item{unitname}{ Optional. New name for the unit of length. A value acceptable to the function \code{\link{unitname<-}} } } \value{ Another linear network (of class \code{"linnet"}) representing the result of applying the geometrical transformation. } \details{ These functions are methods for the generic functions \code{\link{affine}}, \code{\link{shift}}, \code{\link{rotate}}, \code{\link{rescale}} and \code{\link{scalardilate}} applicable to objects of class \code{"linnet"}. All of these functions perform geometrical transformations on the object \code{X}, except for \code{rescale}, which simply rescales the units of length. } \seealso{ \code{\link{linnet}} and \code{\link{as.linnet}}. Generic functions \code{\link{affine}}, \code{\link{shift}}, \code{\link{rotate}}, \code{\link{scalardilate}}, \code{\link{rescale}}. } \examples{ U <- rotate(simplenet, pi) stretch <- diag(c(2,3)) Y <- affine(simplenet, mat=stretch) shear <- matrix(c(1,0,0.6,1),ncol=2, nrow=2) Z <- affine(simplenet, mat=shear, vec=c(0, 1)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/predict.rppm.Rd0000644000176200001440000000511413333543264015575 0ustar liggesusers\name{predict.rppm} \alias{fitted.rppm} \alias{predict.rppm} \title{ Make Predictions From a Recursively Partitioned Point Process Model } \description{ Given a model which has been fitted to point pattern data by recursive partitioning, compute the predicted intensity of the model. } \usage{ \method{predict}{rppm}(object, \dots) \method{fitted}{rppm}(object, \dots) } \arguments{ \item{object}{ Fitted point process model of class \code{"rppm"} produced by the function \code{\link{rppm}}. } \item{\dots}{ Optional arguments passed to \code{\link{predict.ppm}} to specify the locations where prediction is required. (Ignored by \code{fitted.rppm}) } } \details{ These functions are methods for the generic functions \code{\link[stats]{fitted}} and \code{\link[stats]{predict}}. They compute the fitted intensity of a point process model. The argument \code{object} should be a fitted point process model of class \code{"rppm"} produced by the function \code{\link{rppm}}. The \code{fitted} method computes the fitted intensity at the original data points, yielding a numeric vector with one entry for each data point. The \code{predict} method computes the fitted intensity at any locations. By default, predictions are calculated at a regular grid of spatial locations, and the result is a pixel image giving the predicted intensity values at these locations. Alternatively, predictions can be performed at other locations, or a finer grid of locations, or only at certain specified locations, using additional arguments \code{\dots} which will be interpreted by \code{\link{predict.ppm}}. Common arguments are \code{ngrid} to increase the grid resolution, \code{window} to specify the prediction region, and \code{locations} to specify the exact locations of predictions. See \code{\link{predict.ppm}} for details of these arguments. Predictions are computed by evaluating the explanatory covariates at each desired location, and applying the recursive partitioning rule to each set of covariate values. } \value{ The result of \code{fitted.rppm} is a numeric vector. The result of \code{predict.rppm} is a pixel image, a list of pixel images, or a numeric vector. } \author{ \spatstatAuthors. } \seealso{ \code{\link{rppm}}, \code{\link{plot.rppm}} } \examples{ fit <- rppm(unmark(gorillas) ~ vegetation, data=gorillas.extra) plot(predict(fit)) lambdaX <- fitted(fit) lambdaX[1:5] # Mondriaan pictures plot(predict(rppm(redwoodfull ~ x + y))) points(redwoodfull) } \keyword{spatial} \keyword{models} spatstat/man/rthinclumps.Rd0000644000176200001440000000401713525477112015540 0ustar liggesusers\name{rthinclumps} \alias{rthinclumps} \title{Random Thinning of Clumps} \description{ Finds the topologically-connected clumps of a spatial region and randomly deletes some of the clumps. } \usage{ rthinclumps(W, p, \dots) } \arguments{ \item{W}{ Window (object of class \code{"owin"} or pixel image (object of class \code{"im"}). } \item{p}{ Probability of \emph{retaining} each clump. A single number between 0 and 1. } \item{\dots}{ Additional arguments passed to \code{\link{connected.im}} or \code{\link{connected.owin}} to determine the connected clumps. } } \details{ The argument \code{W} specifies a region of space, typically consisting of several clumps that are not connected to each other. The algorithm randomly deletes or retains each clump. The fate of each clump is independent of other clumps. If \code{W} is a spatial window (class \code{"owin"}) then it will be divided into clumps using \code{\link{connected.owin}}. Each clump will either be retained (with probability \code{p}) or deleted in its entirety (with probability \code{1-p}). If \code{W} is a pixel image (class \code{"im"}) then its domain will be divided into clumps using \code{\link{connected.im}}. The default behaviour depends on the type of pixel values. If the pixel values are logical, then the spatial region will be taken to consist of all pixels whose value is \code{TRUE}. Otherwise, the spatial region is taken to consist of all pixels whose value is defined (i.e. not equal to \code{NA}). This behaviour can be changed using the argument \code{background} passed to \code{\link{connected.im}}. The result is a window comprising all the clumps that were retained. } \value{ Window (object of class \code{"owin"}). } \author{ \adrian. } \seealso{ \code{\link{rthin}} for thinning other kinds of objects. } \examples{ A <- (distmap(cells) < 0.06) opa <- par(mfrow=c(1,2)) plot(A) plot(rthinclumps(A, 0.5)) par(opa) } \keyword{spatial} \keyword{datagen} \keyword{manip} spatstat/man/pppmatching.Rd0000644000176200001440000000504313333543264015501 0ustar liggesusers\name{pppmatching} \alias{pppmatching} \title{Create a Point Matching} \description{ Creates an object of class \code{"pppmatching"} representing a matching of two planar point patterns (objects of class \code{"ppp"}). } \usage{ pppmatching(X, Y, am, type = NULL, cutoff = NULL, q = NULL, mdist = NULL) } \arguments{ \item{X,Y}{Two point patterns (objects of class \code{"ppp"}).} \item{am}{ An \code{npoints(X)} by \code{npoints(Y)} matrix with entries \eqn{\geq 0}{>= 0} that specifies which points are matched and with what weight; alternatively, an object that can be coerced to this form by \code{as.matrix}. } \item{type}{ A character string giving the type of the matching. One of \code{"spa"}, \code{"ace"} or \code{"mat"}, or \code{NULL} for a generic or unknown matching. } \item{cutoff, q}{ Numerical values specifying the cutoff value \eqn{> 0} for interpoint distances and the order \eqn{q \in [1,\infty]}{q in [0,Inf]} of the average that is applied to them. \code{NULL} if not applicable or unknown. } \item{mdist}{ Numerical value for the distance to be associated with the matching. } } \details{ The argument \code{am} is interpreted as a "generalized adjacency matrix": if the \code{[i,j]}-th entry is positive, then the \code{i}-th point of \code{X} and the \code{j}-th point of \code{Y} are matched and the value of the entry gives the corresponding weight of the match. For an unweighted matching all the weights should be set to \eqn{1}. The remaining arguments are optional and allow to save additional information about the matching. See the help files for \code{\link{pppdist}} and \code{\link{matchingdist}} for details on the meaning of these parameters. } \author{ Dominic Schuhmacher \email{dominic.schuhmacher@stat.unibe.ch} \url{http://www.dominic.schuhmacher.name} } \seealso{ \code{\link{pppmatching.object}} \code{\link{matchingdist}} } \examples{ # a random unweighted complete matching X <- runifpoint(10) Y <- runifpoint(10) am <- r2dtable(1, rep(1,10), rep(1,10))[[1]] # generates a random permutation matrix m <- pppmatching(X, Y, am) summary(m) m$matrix plot(m) # a random weighted complete matching X <- runifpoint(7) Y <- runifpoint(7) am <- r2dtable(1, rep(10,7), rep(10,7))[[1]]/10 # generates a random doubly stochastic matrix m2 <- pppmatching(X, Y, am) summary(m2) m2$matrix plot(m2) m3 <- pppmatching(X, Y, am, "ace") m4 <- pppmatching(X, Y, am, "mat") } \keyword{spatial} \keyword{datagen} spatstat/man/eval.linim.Rd0000644000176200001440000000572313357016145015231 0ustar liggesusers\name{eval.linim} \alias{eval.linim} \title{Evaluate Expression Involving Pixel Images on Linear Network} \description{ Evaluates any expression involving one or more pixel images on a linear network, and returns a pixel image on the same linear network. } \usage{ eval.linim(expr, envir, harmonize=TRUE, warn=TRUE) } \arguments{ \item{expr}{An expression in the \R language, involving the names of objects of class \code{"linim"}.} \item{envir}{Optional. The environment in which to evaluate the expression.} \item{harmonize}{ Logical. Whether to resolve inconsistencies between the pixel grids. } \item{warn}{ Logical. Whether to issue a warning if the pixel grids were inconsistent. } } \details{ This function a wrapper to make it easier to perform pixel-by-pixel calculations. It is one of several functions whose names begin with \code{eval} which work on objects of different types. This particular function is designed to work with objects of class \code{"linim"} which represent pixel images on a linear network. Suppose \code{X} is a pixel image on a linear network (object of class \code{"linim"}. Then \code{eval.linim(X+3)} will add 3 to the value of every pixel in \code{X}, and return the resulting pixel image on the same linear network. Suppose \code{X} and \code{Y} are two pixel images on the same linear network, with compatible pixel dimensions. Then \code{eval.linim(X + Y)} will add the corresponding pixel values in \code{X} and \code{Y}, and return the resulting pixel image on the same linear network. In general, \code{expr} can be any expression in the R language involving (a) the \emph{names} of pixel images, (b) scalar constants, and (c) functions which are vectorised. See the Examples. First \code{eval.linim} determines which of the \emph{variable names} in the expression \code{expr} refer to pixel images. Each such name is replaced by a matrix containing the pixel values. The expression is then evaluated. The result should be a matrix; it is taken as the matrix of pixel values. The expression \code{expr} must be vectorised. There must be at least one linear pixel image in the expression. All images must have compatible dimensions. If \code{harmonize=FALSE}, images that are incompatible will cause an error. If \code{harmonize=TRUE}, images that have incompatible dimensions will be resampled so that they are compatible; if \code{warn=TRUE}, a warning will be issued. } \value{ An image object of class \code{"linim"}. } \seealso{ \code{\link{eval.im}}, \code{\link{linim}} } \examples{ M <- as.mask.psp(as.psp(simplenet)) Z <- as.im(function(x,y) {x-y}, W=M) X <- linim(simplenet, Z) X Y <- linfun(function(x,y,seg,tp){y^2+x}, simplenet) Y <- as.linim(Y) eval.linim(X + 3) eval.linim(X - Y) eval.linim(abs(X - Y)) Z <- eval.linim(sin(X * pi) + Y) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/spokes.Rd0000644000176200001440000000536613333543264014503 0ustar liggesusers\name{spokes} \alias{spokes} \title{Spokes pattern of dummy points} \description{ Generates a pattern of dummy points in a window, given a data point pattern. The dummy points lie on the radii of circles emanating from each data point. } \usage{ spokes(x, y, nrad = 3, nper = 3, fctr = 1.5, Mdefault = 1) } \arguments{ \item{x}{ Vector of \eqn{x} coordinates of data points, or a list with components \code{x} and \code{y}, or a point pattern (an object of class \code{ppp}). } \item{y}{ Vector of \eqn{y} coordinates of data points. Ignored unless \code{x} is a vector. } \item{nrad}{ Number of radii emanating from each data point. } \item{nper}{ Number of dummy points per radius. } \item{fctr}{ Scale factor. Length of largest spoke radius is \code{fctr * M} where \code{M} is the mean nearest neighbour distance for the data points. } \item{Mdefault}{ Value of \code{M} to be used if \code{x} has length 1. } } \value{ If argument \code{x} is a point pattern, a point pattern with window equal to that of \code{x}. Otherwise a list with two components \code{x} and \code{y}. In either case the components \code{x} and \code{y} of the value are numeric vectors giving the coordinates of the dummy points. } \details{ This function is useful in creating dummy points for quadrature schemes (see \code{\link{quadscheme}}). Given the data points, the function creates a collection of \code{nrad * nper * length(x)} dummy points. Around each data point \code{(x[i],y[i])} there are \code{nrad * nper} dummy points, lying on \code{nrad} radii emanating from \code{(x[i],y[i])}, with \code{nper} dummy points equally spaced along each radius. The (equal) spacing of dummy points along each radius is controlled by the factor \code{fctr}. The distance from a data point to the furthest of its associated dummy points is \code{fctr * M} where \code{M} is the mean nearest neighbour distance for the data points. If there is only one data point the nearest neighbour distance is infinite, so the value \code{Mdefault} will be used in place of \code{M}. If \code{x} is a point pattern, then the value returned is also a point pattern, which is clipped to the window of \code{x}. Hence there may be fewer than \code{nrad * nper * length(x)} dummy points in the pattern returned. } \seealso{ \code{\link{quad.object}}, \code{\link{quadscheme}}, \code{\link{inside.owin}}, \code{\link{gridcentres}}, \code{\link{stratrand}} } \examples{ dat <- runifrect(10) dum <- spokes(dat$x, dat$y, 5, 3, 0.7) plot(dum) Q <- quadscheme(dat, dum, method="dirichlet") plot(Q, tiles=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/update.detpointprocfamily.Rd0000644000176200001440000000106113333543264020360 0ustar liggesusers\name{update.detpointprocfamily} \alias{update.detpointprocfamily} \title{Set Parameter Values in a Determinantal Point Process Model} \description{ Set parameter values in a determinantal point process model object. } \usage{ \method{update}{detpointprocfamily}(object, \dots) } \arguments{ \item{object}{object of class \code{"detpointprocfamily"}.} \item{\dots}{ arguments of the form \code{tag=value} specifying the parameters values to set. } } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/rPenttinen.Rd0000644000176200001440000001013613602545270015312 0ustar liggesusers\name{rPenttinen} \alias{rPenttinen} \title{Perfect Simulation of the Penttinen Process} \description{ Generate a random pattern of points, a simulated realisation of the Penttinen process, using a perfect simulation algorithm. } \usage{ rPenttinen(beta, gamma=1, R, W = owin(), expand=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{gamma}{ Interaction strength parameter (a number between 0 and 1). } \item{R}{ disc radius (a non-negative number). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. } \item{expand}{ Logical. If \code{FALSE}, simulation is performed in the window \code{W}, which must be rectangular. If \code{TRUE} (the default), simulation is performed on a larger window, and the result is clipped to the original window \code{W}. Alternatively \code{expand} can be an object of class \code{"rmhexpand"} (see \code{\link{rmhexpand}}) determining the expansion method. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function generates a realisation of the Penttinen point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. Penttinen (1984, Example 2.1, page 18), citing Cormack (1979), described the pairwise interaction point process with interaction factor \deqn{ h(d) = e^{\theta A(d)} = \gamma^{A(d)} }{ h(d) = exp(theta * A(d)) = gamma^(A(d)) } between each pair of points separated by a distance $d$. Here \eqn{A(d)} is the area of intersection between two discs of radius \eqn{R} separated by a distance \eqn{d}, normalised so that \eqn{A(0) = 1}. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and \Moller (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}). If \code{nsim > 1}, a list of point patterns. } \references{ Berthelsen, K.K. and \Moller, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and \Moller, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. Cormack, R.M. (1979) Spatial aspects of competition between individuals. Pages 151--212 in \emph{Spatial and Temporal Analysis in Ecology}, eds. R.M. Cormack and J.K. Ord, International Co-operative Publishing House, Fairland, MD, USA. \Moller, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. Penttinen, A. (1984) \emph{Modelling Interaction in Spatial Point Patterns: Parameter Estimation by the Maximum Likelihood Method.} \Jyvaskyla Studies in Computer Science, Economics and Statistics \bold{7}, University of \Jyvaskyla, Finland. } \author{ \adrian, based on original code for the Strauss process by Kasper Klitgaard Berthelsen. } \examples{ X <- rPenttinen(50, 0.5, 0.02) Z <- rPenttinen(50, 0.5, 0.01, nsim=2) } \seealso{ \code{\link{rmh}}, \code{\link{Penttinen}}. \code{\link{rStrauss}}, \code{\link{rHardcore}}, \code{\link{rStraussHard}}, \code{\link{rDiggleGratton}}, \code{\link{rDGS}}. } \keyword{spatial} \keyword{datagen} spatstat/man/plot.influence.ppm.Rd0000644000176200001440000000474113333543264016713 0ustar liggesusers\name{plot.influence.ppm} \alias{plot.influence.ppm} \title{ Plot Influence Measure } \description{ Plots an influence measure that has been computed by \code{\link{influence.ppm}}. } \usage{ \method{plot}{influence.ppm}(x, ..., multiplot=TRUE) } \arguments{ \item{x}{ Influence measure (object of class \code{"influence.ppm"}) computed by \code{\link{influence.ppm}}. } \item{\dots}{ Arguments passed to \code{\link{plot.ppp}} to control the plotting. } \item{multiplot}{ Logical value indicating whether it is permissible to plot more than one panel. This happens if the original point process model is multitype. } } \details{ This is the plot method for objects of class \code{"influence.ppm"}. These objects are computed by the command \code{\link{influence.ppm}}. For a point process model fitted by maximum likelihood or maximum pseudolikelihood (the default), influence values are associated with the data points. The display shows circles centred at the data points with radii proportional to the influence values. If the original data were a multitype point pattern, then if \code{multiplot=TRUE} (the default), there is one such display for each possible type of point, while if \code{multiplot=FALSE} there is a single plot combining all data points regardless of type. For a model fitted by logistic composite likelihood (\code{method="logi"} in \code{\link{ppm}}) influence values are associated with the data points and also with the dummy points used to fit the model. The display consist of two panels, for the data points and dummy points respectively, showing circles with radii proportional to the influence values. If the original data were a multitype point pattern, then if \code{multiplot=TRUE} (the default), there is one pair of panels for each possible type of point, while if \code{multiplot=FALSE} there is a single plot combining all data and dummy points regardless of type. Use the argument \code{clipwin} to restrict the plot to a subset of the full data. } \value{ None. } \references{ Baddeley, A. and Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. } \author{ \spatstatAuthors. } \seealso{ \code{\link{influence.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) plot(influence(fit)) } \keyword{spatial} \keyword{models} spatstat/man/distmap.ppp.Rd0000644000176200001440000000420113333543263015420 0ustar liggesusers\name{distmap.ppp} \alias{distmap.ppp} \title{ Distance Map of Point Pattern } \description{ Computes the distance from each pixel to the nearest point in the given point pattern. } \usage{ \method{distmap}{ppp}(X, \dots) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}). } \item{\dots}{Arguments passed to \code{\link{as.mask}} to control pixel resolution. } } \value{ A pixel image (object of class \code{"im"}) whose greyscale values are the values of the distance map. The return value has attributes \code{"index"} and \code{"bdry"} which are also pixel images. } \details{ The ``distance map'' of a point pattern \eqn{X} is the function \eqn{f} whose value \code{f(u)} is defined for any two-dimensional location \eqn{u} as the shortest distance from \eqn{u} to \eqn{X}. This function computes the distance map of the point pattern \code{X} and returns the distance map as a pixel image. The greyscale value at a pixel \eqn{u} equals the distance from \eqn{u} to the nearest point of the pattern \code{X}. Additionally, the return value has two attributes, \code{"index"} and \code{"bdry"}, which are also pixel images. The grey values in \code{"bdry"} give the distance from each pixel to the bounding rectangle of the image. The grey values in \code{"index"} are integers identifying which point of \code{X} is closest. This is a method for the generic function \code{\link{distmap}}. Note that this function gives the distance from the \emph{centre of each pixel} to the nearest data point. To compute the exact distance from a given spatial location to the nearest data point in \code{X}, use \code{\link{distfun}} or \code{\link{nncross}}. } \seealso{ Generic function \code{\link{distmap}} and other methods \code{\link{distmap.psp}}, \code{\link{distmap.owin}}. Generic function \code{\link{distfun}}. Nearest neighbour distance \code{\link{nncross}} } \examples{ data(cells) U <- distmap(cells) \dontrun{ plot(U) plot(attr(U, "bdry")) plot(attr(U, "index")) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/K3est.Rd0000644000176200001440000000662513333543262014165 0ustar liggesusers\name{K3est} \Rdversion{1.1} \alias{K3est} \title{ K-function of a Three-Dimensional Point Pattern } \description{ Estimates the \eqn{K}-function from a three-dimensional point pattern. } \usage{ K3est(X, \dots, rmax = NULL, nrval = 128, correction = c("translation", "isotropic"), ratio=FALSE) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{rmax}{ Optional. Maximum value of argument \eqn{r} for which \eqn{K_3(r)}{K3(r)} will be estimated. } \item{nrval}{ Optional. Number of values of \eqn{r} for which \eqn{K_3(r)}{K3(r)} will be estimated. A large value of \code{nrval} is required to avoid discretisation effects. } \item{correction}{ Optional. Character vector specifying the edge correction(s) to be applied. See Details. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \details{ For a stationary point process \eqn{\Phi}{Phi} in three-dimensional space, the three-dimensional \eqn{K} function is \deqn{ K_3(r) = \frac 1 \lambda E(N(\Phi, x, r) \mid x \in \Phi) }{ K3(r) = (1/lambda) E(N(Phi,x,r) | x in Phi) } where \eqn{\lambda}{lambda} is the intensity of the process (the expected number of points per unit volume) and \eqn{N(\Phi,x,r)}{N(Phi,x,r)} is the number of points of \eqn{\Phi}{Phi}, other than \eqn{x} itself, which fall within a distance \eqn{r} of \eqn{x}. This is the three-dimensional generalisation of Ripley's \eqn{K} function for two-dimensional point processes (Ripley, 1977). The three-dimensional point pattern \code{X} is assumed to be a partial realisation of a stationary point process \eqn{\Phi}{Phi}. The distance between each pair of distinct points is computed. The empirical cumulative distribution function of these values, with appropriate edge corrections, is renormalised to give the estimate of \eqn{K_3(r)}{K3(r)}. The available edge corrections are: \describe{ \item{\code{"translation"}:}{ the Ohser translation correction estimator (Ohser, 1983; Baddeley et al, 1993) } \item{\code{"isotropic"}:}{ the three-dimensional counterpart of Ripley's isotropic edge correction (Ripley, 1977; Baddeley et al, 1993). } } Alternatively \code{correction="all"} selects all options. } \value{ A function value table (object of class \code{"fv"}) that can be plotted, printed or coerced to a data frame containing the function values. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \author{ \adrian and Rana Moyeed. } \seealso{ \code{\link{F3est}}, \code{\link{G3est}}, \code{\link{pcf3est}} } \examples{ X <- rpoispp3(42) Z <- K3est(X) if(interactive()) plot(Z) } \keyword{spatial} \keyword{nonparametric} spatstat/man/Extract.solist.Rd0000644000176200001440000000306013333543263016111 0ustar liggesusers\name{Extract.solist} \alias{[.solist} \alias{[<-.solist} \title{Extract or Replace Subset of a List of Spatial Objects} \description{ Extract or replace some entries in a list of spatial objects, or extract a designated sub-region in each object. } \usage{ \method{[}{solist}(x, i, \dots) \method{[}{solist}(x, i) <- value } \arguments{ \item{x}{ An object of class \code{"solist"} representing a list of two-dimensional spatial objects. } \item{i}{ Subset index. Any valid subset index for vectors in the usual \R sense, or a window (object of class \code{"owin"}). } \item{value}{ Replacement value for the subset. } \item{\dots}{Ignored.} } \value{ Another object of the same class as \code{x}. } \details{ These are methods for extracting and replacing subsets for the class \code{"solist"}. The argument \code{x} should be an object of class \code{"solist"} representing a list of two-dimensional spatial objects. See \code{\link{solist}}. For the subset method, the subset index \code{i} can be either a vector index (specifying some elements of the list) or a spatial window (specifying a spatial sub-region). For the replacement method, \code{i} must be a vector index: the designated elements will be replaced. } \seealso{ \code{\link{solist}}, \code{\link{plot.solist}}, \code{\link{summary.solist}} } \examples{ x <- solist(japanesepines, cells, redwood) x[2:3] x[square(0.5)] x[1] <- list(finpines) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{list} \keyword{manip} spatstat/man/corners.Rd0000644000176200001440000000173113333543263014641 0ustar liggesusers\name{corners} \alias{corners} \title{Corners of a rectangle} \description{ Returns the four corners of a rectangle } \usage{ corners(window) } \arguments{ \item{window}{A window. An object of class \code{\link{owin}}, or data in any format acceptable to \code{\link{as.owin}()}. } } \value{ A list with two components \code{x} and \code{y}, which are numeric vectors of length 4 giving the coordinates of the four corner points of the (bounding rectangle of the) window. } \details{ This trivial function is occasionally convenient. If \code{window} is of type \code{"rectangle"} this returns the four corners of the window itself; otherwise, it returns the corners of the bounding rectangle of the window. } \seealso{ \code{\link{quad.object}}, \code{\link{quadscheme}} } \examples{ w <- unit.square() corners(w) # returns list(x=c(0,1,0,1),y=c(0,0,1,1)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{utilities} spatstat/man/summary.distfun.Rd0000644000176200001440000000343213333543264016337 0ustar liggesusers\name{summary.distfun} \alias{summary.distfun} \alias{summary.funxy} \title{ Summarizing a Function of Spatial Location } \description{ Prints a useful summary of a function of spatial location. } \usage{ \method{summary}{distfun}(object, \dots) \method{summary}{funxy}(object, \dots) } \arguments{ \item{object}{ An object of class \code{"distfun"} or \code{"funxy"} representing a function of spatial coordinates. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} controlling the pixel resolution used to compute the summary. } } \details{ These are the \code{\link[base]{summary}} methods for the classes \code{"funxy"} and \code{"distfun"}. An object of class \code{"funxy"} represents a function of spatial location, defined in a particular region of space. This includes objects of the special class \code{"distfun"} which represent distance functions. The \code{summary} method computes a summary of the function values. The function is evaluated on a grid of locations using \code{\link{as.im}} and numerical values at these locations are summarised using \code{\link{summary.im}}. The pixel resolution for the grid of locations is determined by the arguments \code{\dots} which are passed to \code{\link{as.mask}}. } \value{ For \code{summary.funxy} the result is an object of class \code{"summary.funxy"}. For \code{summary.distfun} the result is an object of class \code{"summary.distfun"}. There are \code{print} methods for these classes. } \author{ \spatstatAuthors. } \seealso{ \code{\link{distfun}}, \code{\link{funxy}} } \examples{ f <- function(x,y) { x^2 + y^2 - 1} g <- funxy(f, square(2)) summary(g) summary(distfun(cells)) summary(distfun(cells), dimyx=256) } \keyword{spatial} \keyword{methods} spatstat/man/is.marked.ppp.Rd0000644000176200001440000000340413333543263015640 0ustar liggesusers\name{is.marked.ppp} \alias{is.marked.ppp} \title{Test Whether A Point Pattern is Marked} \description{ Tests whether a point pattern has ``marks'' attached to the points. } \usage{ \method{is.marked}{ppp}(X, na.action="warn", \dots) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}) } \item{na.action}{ String indicating what to do if \code{NA} values are encountered amongst the marks. Options are \code{"warn"}, \code{"fatal"} and \code{"ignore"}. } \item{\dots}{ Ignored. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is a marked point pattern. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link[spatstat.data]{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link[spatstat.data]{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. This function tests whether the point pattern \code{X} contains or involves marked points. It is a method for the generic function \code{\link{is.marked}}. The argument \code{na.action} determines what action will be taken if the point pattern has a vector of marks but some or all of the marks are \code{NA}. Options are \code{"fatal"} to cause a fatal error; \code{"warn"} to issue a warning and then return \code{TRUE}; and \code{"ignore"} to take no action except returning \code{TRUE}. } \seealso{ \code{\link{is.marked}}, \code{\link{is.marked.ppm}} } \examples{ data(cells) is.marked(cells) #FALSE data(longleaf) is.marked(longleaf) #TRUE } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/Hybrid.Rd0000644000176200001440000000633313547301023014403 0ustar liggesusers\name{Hybrid} \alias{Hybrid} \title{ Hybrid Interaction Point Process Model } \description{ Creates an instance of a hybrid point process model which can then be fitted to point pattern data. } \usage{ Hybrid(...) } \arguments{ \item{\dots}{ Two or more interactions (objects of class \code{"interact"}) or objects which can be converted to interactions. See Details. } } \details{ A \emph{hybrid} (Baddeley, Turner, Mateu and Bevan, 2013) is a point process model created by combining two or more point process models, or an interpoint interaction created by combining two or more interpoint interactions. The \emph{hybrid} of two point processes, with probability densities \eqn{f(x)} and \eqn{g(x)} respectively, is the point process with probability density \deqn{h(x) = c \, f(x) \, g(x)}{h(x) = c * f(x) * g(x)} where \eqn{c} is a normalising constant. Equivalently, the hybrid of two point processes with conditional intensities \eqn{\lambda(u,x)}{lambda(u,x)} and \eqn{\kappa(u,x)}{kappa(u,x)} is the point process with conditional intensity \deqn{ \phi(u,x) = \lambda(u,x) \, \kappa(u,x). }{ phi(u,x) = lambda(u,x) * kappa(u,x). } The hybrid of \eqn{m > 3} point processes is defined in a similar way. The function \code{\link{ppm}}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of a hybrid interaction is yielded by the function \code{Hybrid()}. The arguments \code{\dots} will be interpreted as interpoint interactions (objects of class \code{"interact"}) and the result will be the hybrid of these interactions. Each argument must either be an interpoint interaction (object of class \code{"interact"}), or a point process model (object of class \code{"ppm"}) from which the interpoint interaction will be extracted. The arguments \code{\dots} may also be given in the form \code{name=value}. This is purely cosmetic: it can be used to attach simple mnemonic names to the component interactions, and makes the printed output from \code{\link{print.ppm}} neater. } \value{ An object of class \code{"interact"} describing an interpoint interaction structure. } \references{ Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013) Hybrids of Gibbs point process models and their implementation. \emph{Journal of Statistical Software} \bold{55}:11, 1--43. \url{http://www.jstatsoft.org/v55/i11/} } \seealso{ \code{\link{ppm}} } \examples{ Hybrid(Strauss(0.1), Geyer(0.2, 3)) Hybrid(Ha=Hardcore(0.05), St=Strauss(0.1), Ge=Geyer(0.2, 3)) fit <- ppm(redwood, ~1, Hybrid(A=Strauss(0.02), B=Geyer(0.1, 2))) fit ctr <- rmhcontrol(nrep=5e4, expand=1) plot(simulate(fit, control=ctr)) # hybrid components can be models (including hybrid models) Hybrid(fit, S=Softcore(0.5)) # plot.fii only works if every component is a pairwise interaction data(swedishpines) fit2 <- ppm(swedishpines, ~1, Hybrid(DG=DiggleGratton(2,10), S=Strauss(5))) plot(fitin(fit2)) plot(fitin(fit2), separate=TRUE, mar.panel=rep(4,4)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/eem.Rd0000644000176200001440000000520313333543263013732 0ustar liggesusers\name{eem} \alias{eem} \title{ Exponential Energy Marks } \description{ Given a point process model fitted to a point pattern, compute the Stoyan-Grabarnik diagnostic ``exponential energy marks'' for the data points. } \usage{ eem(fit, check=TRUE) } \arguments{ \item{fit}{ The fitted point process model. An object of class \code{"ppm"}. } \item{check}{ Logical value indicating whether to check the internal format of \code{fit}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } } \value{ A vector containing the values of the exponential energy mark for each point in the pattern. } \details{ Stoyan and Grabarnik (1991) proposed a diagnostic tool for point process models fitted to spatial point pattern data. Each point \eqn{x_i}{x[i]} of the data pattern \eqn{X} is given a `mark' or `weight' \deqn{m_i = \frac 1 {\hat\lambda(x_i,X)}}{m[i] = 1/\lambda(x[i],X)} where \eqn{\hat\lambda(x_i,X)}{\lambda(x[i],X)} is the conditional intensity of the fitted model. If the fitted model is correct, then the sum of these marks for all points in a region \eqn{B} has expected value equal to the area of \eqn{B}. The argument \code{fit} must be a fitted point process model (object of class \code{"ppm"}). Such objects are produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). This fitted model object contains complete information about the original data pattern and the model that was fitted to it. The value returned by \code{eem} is the vector of weights \eqn{m[i]}{m_i} associated with the points \eqn{x[i]}{x_i} of the original data pattern. The original data pattern (in corresponding order) can be extracted from \code{fit} using \code{\link{data.ppm}}. The function \code{\link{diagnose.ppm}} produces a set of sensible diagnostic plots based on these weights. } \references{ Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \seealso{ \code{\link{diagnose.ppm}}, \code{\link{ppm.object}}, \code{\link{data.ppm}}, \code{\link{residuals.ppm}}, \code{\link{ppm}} } \examples{ data(cells) fit <- ppm(cells, ~x, Strauss(r=0.15)) ee <- eem(fit) sum(ee)/area(Window(cells)) # should be about 1 if model is correct Y <- setmarks(cells, ee) plot(Y, main="Cells data\n Exponential energy marks") } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/sdrPredict.Rd0000644000176200001440000000236413333543264015275 0ustar liggesusers\name{sdrPredict} \alias{sdrPredict} \title{ Compute Predictors from Sufficient Dimension Reduction } \description{ Given the result of a Sufficient Dimension Reduction method, compute the new predictors. } \usage{ sdrPredict(covariates, B) } \arguments{ \item{covariates}{ A list of pixel images (objects of class \code{"im"}). } \item{B}{ Either a matrix of coefficients for the covariates, or the result of a call to \code{\link{sdr}}. } } \details{ This function assumes that \code{\link{sdr}} has already been used to find a minimal set of predictors based on the \code{covariates}. The argument \code{B} should be either the result of \code{\link{sdr}} or the coefficient matrix returned as one of the results of \code{\link{sdr}}. The columns of this matrix define linear combinations of the \code{covariates}. This function evaluates those linear combinations, and returns a list of pixel images containing the new predictors. } \value{ A list of pixel images (objects of class \code{"im"}) with one entry for each column of \code{B}. } \author{ \adrian } \seealso{ \code{\link{sdr}} } \examples{ A <- sdr(bei, bei.extra) Y <- sdrPredict(bei.extra, A) Y } \keyword{spatial} \keyword{nonparametric} spatstat/man/affine.ppp.Rd0000644000176200001440000000310313333543262015206 0ustar liggesusers\name{affine.ppp} \alias{affine.ppp} \title{Apply Affine Transformation To Point Pattern} \description{ Applies any affine transformation of the plane (linear transformation plus vector shift) to a point pattern. } \usage{ \method{affine}{ppp}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) } \arguments{ \item{X}{Point pattern (object of class \code{"ppp"}).} \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{Arguments passed to \code{\link{affine.owin}} affecting the handling of the observation window, if it is a binary pixel mask. } } \value{ Another point pattern (of class \code{"ppp"}) representing the result of applying the affine transformation. } \details{ The point pattern, and its window, are subjected first to the linear transformation represented by \code{mat} (multiplying on the left by \code{mat}), and are then translated by the vector \code{vec}. The argument \code{mat} must be a nonsingular \eqn{2 \times 2}{2 * 2} matrix. This is a method for the generic function \code{\link{affine}}. } \seealso{ \code{\link{affine}}, \code{\link{affine.owin}}, \code{\link{affine.psp}}, \code{\link{affine.im}}, \code{\link{flipxy}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ data(cells) # shear transformation X <- affine(cells, matrix(c(1,0,0.6,1),ncol=2)) \dontrun{ plot(X) # rescale y coordinates by factor 1.3 plot(affine(cells, diag(c(1,1.3)))) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/pool.rat.Rd0000644000176200001440000000655713333543264014740 0ustar liggesusers\name{pool.rat} \alias{pool.rat} \title{ Pool Data from Several Ratio Objects } \description{ Pool the data from several ratio objects (objects of class \code{"rat"}) and compute a pooled estimate. } \usage{ \method{pool}{rat}(..., weights=NULL, relabel=TRUE, variance=TRUE) } \arguments{ \item{\dots}{ Objects of class \code{"rat"}. } \item{weights}{ Numeric vector of weights. } \item{relabel}{ Logical value indicating whether the result should be relabelled to show that it was obtained by pooling. } \item{variance}{ Logical value indicating whether to compute the sample variance and related terms. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"rat"} of ratio objects. It is used to combine several estimates of the same quantity when each estimate is a ratio. Each of the arguments \code{\dots} must be an object of class \code{"rat"} representing a ratio object (basically a numerator and a denominator; see \code{\link{rat}}). We assume that these ratios are all estimates of the same quantity. If the objects are called \eqn{R_1, \ldots, R_n}{R[1], \dots, R[n]} and if \eqn{R_i}{R[i]} has numerator \eqn{Y_i}{Y[i]} and denominator \eqn{X_i}{X[i]}, so that notionally \eqn{R_i = Y_i/X_i}{R[i] = Y[i]/X[i]}, then the pooled estimate is the ratio-of-sums estimator \deqn{ R = \frac{\sum_i Y_i}{\sum_i X_i}. }{ R = (Y[1]+\dots+Y[n])/(X[1]+\dots+X[n]). } The standard error of \eqn{R} is computed using the delta method as described in Baddeley \emph{et al.} (1993) or Cochran (1977, pp 154, 161). If the argument \code{weights} is given, it should be a numeric vector of length equal to the number of objects to be pooled. The pooled estimator is the ratio-of-sums estimator \deqn{ R = \frac{\sum_i w_i Y_i}{\sum_i w_i X_i} }{ R = (w[1] * Y[1]+\dots+ w[n] * Y[n])/(w[1] * X[1]+\dots+w[n] * X[n]) } where \code{w_i}{w[i]} is the \code{i}th weight. This calculation is implemented only for certain classes of objects where the arithmetic can be performed. This calculation is currently implemented only for objects which also belong to the class \code{"fv"} (function value tables). For example, if \code{\link{Kest}} is called with argument \code{ratio=TRUE}, the result is a suitable object (belonging to the classes \code{"rat"} and \code{"fv"}). Warnings or errors will be issued if the ratio objects \code{\dots} appear to be incompatible. However, the code is not smart enough to decide whether it is sensible to pool the data. } \value{ An object of the same class as the input. } \seealso{ \code{\link{rat}}, \code{\link{pool}}, \code{\link{pool.fv}}, \code{\link{Kest}} } \examples{ K1 <- Kest(runifpoint(42), ratio=TRUE, correction="iso") K2 <- Kest(runifpoint(42), ratio=TRUE, correction="iso") K3 <- Kest(runifpoint(42), ratio=TRUE, correction="iso") K <- pool(K1, K2, K3) plot(K, pooliso ~ r, shade=c("hiiso", "loiso")) } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. Cochran, W.G. (1977) \emph{Sampling techniques}, 3rd edition. New York: John Wiley and Sons. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/lpp.Rd0000644000176200001440000000727313333543263013770 0ustar liggesusers\name{lpp} \alias{lpp} \title{ Create Point Pattern on Linear Network } \description{ Creates an object of class \code{"lpp"} that represents a point pattern on a linear network. } \usage{ lpp(X, L, \dots) } \arguments{ \item{X}{ Locations of the points. A matrix or data frame of coordinates, or a point pattern object (of class \code{"ppp"}) or other data acceptable to \code{\link{as.ppp}}. } \item{L}{ Linear network (object of class \code{"linnet"}). } \item{\dots}{ Ignored. } } \details{ This command creates an object of class \code{"lpp"} that represents a point pattern on a linear network. Normally \code{X} is a point pattern. The points of \code{X} should lie on the lines of \code{L}. Alternatively \code{X} may be a matrix or data frame containing at least two columns. \itemize{ \item Usually the first two columns of \code{X} will be interpreted as spatial coordinates, and any remaining columns as marks. \item An exception occurs if \code{X} is a data frame with columns named \code{x}, \code{y}, \code{seg} and \code{tp}. Then \code{x} and \code{y} will be interpreted as spatial coordinates, and \code{seg} and \code{tp} as local coordinates, with \code{seg} indicating which line segment of \code{L} the point lies on, and \code{tp} indicating how far along the segment the point lies (normalised to 1). Any remaining columns will be interpreted as marks. \item Another exception occurs if \code{X} is a data frame with columns named \code{seg} and \code{tp}. Then \code{seg} and \code{tp} will be interpreted as local coordinates, as above, and the spatial coordinates \code{x,y} will be computed from them. Any remaining columns will be interpreted as marks. } If \code{X} is missing or \code{NULL}, the result is an empty point pattern (i.e. containing no points). } \section{Note on changed format}{ The internal format of \code{"lpp"} objects was changed in \pkg{spatstat} version \code{1.28-0}. Objects in the old format are still handled correctly, but computations are faster in the new format. To convert an object \code{X} from the old format to the new format, use \code{X <- lpp(as.ppp(X), as.linnet(X))}. } \value{ An object of class \code{"lpp"}. Also inherits the class \code{"ppx"}. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ Installed datasets which are \code{"lpp"} objects: \code{\link[spatstat.data]{chicago}}, \code{\link[spatstat.data]{dendrite}}, \code{\link[spatstat.data]{spiders}}. See \code{\link{as.lpp}} for converting data to an \code{lpp} object. See \code{\link{methods.lpp}} and \code{\link{methods.ppx}} for other methods applicable to \code{lpp} objects. Calculations on an \code{lpp} object: \code{\link{intensity.lpp}}, \code{\link{distfun.lpp}}, \code{\link{nndist.lpp}}, \code{\link{nnwhich.lpp}}, \code{\link{nncross.lpp}}, \code{\link{nnfun.lpp}}. Summary functions: \code{\link{linearK}}, \code{\link{linearKinhom}}, \code{\link{linearpcf}}, \code{\link{linearKdot}}, \code{\link{linearKcross}}, \code{\link{linearmarkconnect}}, etc. Random point patterns on a linear network can be generated by \code{\link{rpoislpp}} or \code{\link{runiflpp}}. See \code{\link{linnet}} for linear networks. } \examples{ # letter 'A' v <- ppp(x=(-2):2, y=3*c(0,1,2,1,0), c(-3,3), c(-1,7)) edg <- cbind(1:4, 2:5) edg <- rbind(edg, c(2,4)) letterA <- linnet(v, edges=edg) # points on letter A xx <- list(x=c(-1.5,0,0.5,1.5), y=c(1.5,3,4.5,1.5)) X <- lpp(xx, letterA) plot(X) X summary(X) # empty pattern lpp(L=letterA) } \keyword{spatial} spatstat/man/update.symbolmap.Rd0000644000176200001440000000226113333543264016452 0ustar liggesusers\name{update.symbolmap} \alias{update.symbolmap} \title{ Update a Graphics Symbol Map. } \description{ This command updates the \code{object} using the arguments given. } \usage{ \method{update}{symbolmap}(object, \dots) } \arguments{ \item{object}{ Graphics symbol map (object of class \code{"symbolmap"}). } \item{\dots}{ Additional or replacement arguments to \code{\link{symbolmap}}. } } \details{ This is a method for the generic function \code{\link[stats]{update}} for the class \code{"symbolmap"} of graphics symbol maps. It updates the \code{object} using the parameters given in the extra arguments \code{\dots}. The extra arguments must be given in the form \code{name=value} and must be recognisable to \code{\link{symbolmap}}. They override any parameters of the same name in \code{object}. } \value{ Another object of class \code{"symbolmap"}. } \author{\adrian , \rolf and \ege. } \seealso{ \code{\link{symbolmap}} to create a graphics symbol map. } \examples{ g <- symbolmap(size=function(x) x/50) g update(g, range=c(0,1)) update(g, size=42) update(g, shape="squares", range=c(0,1)) } \keyword{spatial} \keyword{hplot} spatstat/man/Jcross.Rd0000644000176200001440000001604613454351175014441 0ustar liggesusers\name{Jcross} \alias{Jcross} \title{ Multitype J Function (i-to-j) } \description{ For a multitype point pattern, estimate the multitype \eqn{J} function summarising the interpoint dependence between points of type \eqn{i} and of type \eqn{j}. } \usage{ Jcross(X, i, j, eps=NULL, r=NULL, breaks=NULL, \dots, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype \eqn{J} function \eqn{J_{ij}(r)}{Jij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{eps}{A positive number. The resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the function \eqn{J_{ij}(r)}{Jij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{J}{the recommended estimator of \eqn{J_{ij}(r)}{Jij(r)}, currently the Kaplan-Meier estimator. } \item{r}{the values of the argument \eqn{r} at which the function \eqn{J_{ij}(r)}{Jij(r)} has been estimated } \item{km}{the Kaplan-Meier estimator of \eqn{J_{ij}(r)}{Jij(r)} } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{J_{ij}(r)}{Jij(r)} } \item{han}{the Hanisch-style estimator of \eqn{J_{ij}(r)}{Jij(r)} } \item{un}{the ``uncorrected'' estimator of \eqn{J_{ij}(r)}{Jij(r)} formed by taking the ratio of uncorrected empirical estimators of \eqn{1 - G_{ij}(r)}{1 - Gij(r)} and \eqn{1 - F_{j}(r)}{1 - Fj(r)}, see \code{\link{Gdot}} and \code{\link{Fest}}. } \item{theo}{the theoretical value of \eqn{J_{ij}(r)}{Jij(r)} for a marked Poisson process, namely 1. } The result also has two attributes \code{"G"} and \code{"F"} which are respectively the outputs of \code{\link{Gcross}} and \code{\link{Fest}} for the point pattern. } \details{ This function \code{Jcross} and its companions \code{\link{Jdot}} and \code{\link{Jmulti}} are generalisations of the function \code{\link{Jest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument \code{i} will be interpreted as a level of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level). The ``type \eqn{i} to type \eqn{j}'' multitype \eqn{J} function of a stationary multitype point process \eqn{X} was introduced by Van lieshout and Baddeley (1999). It is defined by \deqn{J_{ij}(r) = \frac{1 - G_{ij}(r)}{1 - F_{j}(r)}}{Jij(r) = (1 - Gij(r))/(1-Fj(r))} where \eqn{G_{ij}(r)}{Gij(r)} is the distribution function of the distance from a type \eqn{i} point to the nearest point of type \eqn{j}, and \eqn{F_{j}(r)}{Fj(r)} is the distribution function of the distance from a fixed point in space to the nearest point of type \eqn{j} in the pattern. An estimate of \eqn{J_{ij}(r)}{Jij(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the subprocess of type \eqn{i} points is independent of the subprocess of points of type \eqn{j}, then \eqn{J_{ij}(r) \equiv 1}{Jij(r) = 1}. Hence deviations of the empirical estimate of \eqn{J_{ij}}{Jij} from the value 1 may suggest dependence between types. This algorithm estimates \eqn{J_{ij}(r)}{Jij(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Jest}}, using the Kaplan-Meier and border corrections. The main work is done by \code{\link{Gmulti}} and \code{\link{Fest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{J_{ij}(r)}{Jij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Jdot}}, \code{\link{Jest}}, \code{\link{Jmulti}} } \examples{ # Lansing woods data: 6 types of trees woods <- lansing \testonly{ woods <- woods[seq(1,npoints(woods), by=30)] } Jhm <- Jcross(woods, "hickory", "maple") # diagnostic plot for independence between hickories and maples plot(Jhm) # synthetic example with two types "a" and "b" pp <- runifpoint(30) \%mark\% factor(sample(c("a","b"), 30, replace=TRUE)) J <- Jcross(pp) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/intensity.quadratcount.Rd0000644000176200001440000000424213333543263017725 0ustar liggesusers\name{intensity.quadratcount} \alias{intensity.quadratcount} \title{ Intensity Estimates Using Quadrat Counts } \description{ Uses quadrat count data to estimate the intensity of a point pattern in each tile of a tessellation, assuming the intensity is constant in each tile. } \usage{ \method{intensity}{quadratcount}(X, ..., image=FALSE) } \arguments{ \item{X}{ An object of class \code{"quadratcount"}. } \item{image}{ Logical value specifying whether to return a table of estimated intensities (the default) or a pixel image of the estimated intensity (\code{image=TRUE}). } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the resolution of the pixel image, if \code{image=TRUE}. } } \details{ This is a method for the generic function \code{\link{intensity}}. It computes an estimate of the intensity of a point pattern from its quadrat counts. The argument \code{X} should be an object of class \code{"quadratcount"}. It would have been obtained by applying the function \code{\link{quadratcount}} to a point pattern (object of class \code{"ppp"}). It contains the counts of the numbers of points of the point pattern falling in each tile of a tessellation. Using this information, \code{intensity.quadratcount} divides the quadrat counts by the tile areas, yielding the average density of points per unit area in each tile of the tessellation. If \code{image=FALSE} (the default), these intensity values are returned in a contingency table. Cells of the contingency table correspond to tiles of the tessellation. If \code{image=TRUE}, the estimated intensity function is returned as a pixel image. For each pixel, the pixel value is the estimated intensity in the tile which contains that pixel. } \value{ If \code{image=FALSE} (the default), a contingency table. If \code{image=TRUE}, a pixel image (object of class \code{"im"}). } \seealso{ \code{\link{intensity}}, \code{\link{quadratcount}} } \examples{ qa <- quadratcount(swedishpines, 4,3) qa intensity(qa) plot(intensity(qa, image=TRUE)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/pairdist.ppp.Rd0000644000176200001440000000465113333543263015607 0ustar liggesusers\name{pairdist.ppp} \alias{pairdist.ppp} \title{Pairwise distances} \description{ Computes the matrix of distances between all pairs of points in a point pattern. } \usage{ \method{pairdist}{ppp}(X, \dots, periodic=FALSE, method="C", squared=FALSE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{\dots}{ Ignored. } \item{periodic}{ Logical. Specifies whether to apply a periodic edge correction. } \item{method}{ String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. Usually not specified. } \item{squared}{ Logical. If \code{squared=TRUE}, the squared distances are returned instead (this computation is faster). } } \value{ A square matrix whose \code{[i,j]} entry is the distance between the points numbered \code{i} and \code{j}. } \details{ This is a method for the generic function \code{pairdist}. Given a point pattern \code{X} (an object of class \code{"ppp"}), this function computes the Euclidean distances between all pairs of points in \code{X}, and returns the matrix of distances. Alternatively if \code{periodic=TRUE} and the window containing \code{X} is a rectangle, then the distances will be computed in the `periodic' sense (also known as `torus' distance): opposite edges of the rectangle are regarded as equivalent. This is meaningless if the window is not a rectangle. If \code{squared=TRUE} then the \emph{squared} Euclidean distances \eqn{d^2} are returned, instead of the Euclidean distances \eqn{d}. The squared distances are faster to calculate, and are sufficient for many purposes (such as finding the nearest neighbour of a point). The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is somewhat faster. } \seealso{ \code{\link{pairdist}}, \code{\link{pairdist.default}}, \code{\link{pairdist.psp}}, \code{\link{crossdist}}, \code{\link{nndist}}, \code{\link{Kest}} } \examples{ data(cells) d <- pairdist(cells) d <- pairdist(cells, periodic=TRUE) d <- pairdist(cells, squared=TRUE) } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and \adrian } \keyword{spatial} \keyword{math} spatstat/man/test.crossing.psp.Rd0000644000176200001440000000237013333543264016575 0ustar liggesusers\name{test.crossing.psp} \alias{test.crossing.psp} \alias{test.selfcrossing.psp} \title{ Check Whether Segments Cross } \description{ Determine whether there is a crossing (intersection) between each pair of line segments. } \usage{ test.crossing.psp(A, B) test.selfcrossing.psp(A) } \arguments{ \item{A,B}{ Line segment patterns (objects of class \code{"psp"}). } } \details{ These functions decide whether the given line segments intersect each other. If \code{A} and \code{B} are two spatial patterns of line segments, \code{test.crossing.psp(A, B)} returns a logical matrix in which the entry on row \code{i}, column \code{j} is equal to \code{TRUE} if segment \code{A[i]} has an intersection with segment \code{B[j]}. If \code{A} is a pattern of line segments, \code{test.selfcross.psp(A)} returns a symmetric logical matrix in which the entry on row \code{i}, column \code{j} is equal to \code{TRUE} if segment \code{A[i]} has an intersection with segment \code{A[j]}. } \value{ A logical matrix. } \author{ \spatstatAuthors. } \seealso{ \code{\link{psp}} } \examples{ B <- edges(letterR) A <- rpoisline(5, Frame(B)) MA <- test.selfcrossing.psp(A) MAB <- test.crossing.psp(A, B) } \keyword{spatial} \keyword{math} spatstat/man/plot.studpermutest.Rd0000644000176200001440000000716513545612360017102 0ustar liggesusers\name{plot.studpermutest} \alias{plot.studpermutest} \title{ Plot a Studentised Permutation Test } \description{ Plot the result of the studentised permutation test. } \usage{ \method{plot}{studpermutest}(x, fmla, \dots, lty = NULL, col = NULL, lwd = NULL, lty.theo = NULL, col.theo = NULL, lwd.theo = NULL, lwd.mean = if (meanonly) 1 else NULL, lty.mean = lty, col.mean = col, separately = FALSE, meanonly = FALSE, main = if (meanonly) "group means" else NULL, xlim = NULL, ylim = NULL, ylab = NULL, legend = !add, legendpos = "topleft", lbox = FALSE, add = FALSE) } \arguments{ \item{x}{ An object of class \code{"studpermutest"} generated by \code{\link{studpermu.test}} and representing the result of a studentised permutation test for spatial point pattern data. } \item{fmla}{ Plot formula used in \code{\link{plot.fv}}. } \item{\dots}{ Additional graphical arguments passed to \code{\link{plot.fv}}. } \item{lty,col,lwd}{ Line type, colour, and line width of the curves plotting the summary function for each point pattern in the original data. Either a single value or a vector of length equal to the number of point patterns. } \item{lty.theo,col.theo,lwd.theo}{ Line type, colour, and line width of the curve representing the theoretical value of the summary function. } \item{lty.mean,col.mean,lwd.mean}{ Line type, colour, and line width (as a multiple of \code{lwd}) of the curve representing the group mean of the summary function. } \item{separately}{ Logical value indicating whether to plot each group of data in a separate panel. } \item{meanonly}{ Logical value indicating whether to plot only the group means of the summary function. } \item{main}{ Character string giving a main title for the plot. } \item{xlim,ylim}{ Numeric vectors of length 2 giving the limits for the \eqn{x} and \eqn{y} coordinates of the plot or plots. } \item{ylab}{ Character string or expression to be used for the label on the \eqn{y} axis. } \item{legend}{ Logical value indicating whether to plot a legend explaining the meaning of each curve. } \item{legendpos}{ Position of legend. See \code{\link{plot.fv}}. } \item{lbox}{ Logical value indicating whether to plot a box around the plot. } \item{add}{ Logical value indicating whether the plot should be added to the existing plot (\code{add=TRUE}) or whether a new frame should be created (\code{add=FALSE}, the default). } } \details{ This is the \code{plot} method for objects of class \code{"studpermutest"} which represent the result of a studentised permutation test applied to several point patterns. The test is performed by \code{\link{studpermu.test}}. The plot shows the summary functions for each point pattern, coloured according to group. Optionally it can show the different groups in separate plot panels, or show only the group means in a single panel. } \value{ Null. } \author{ Ute Hahn. Modified for \code{spatstat} by \spatstatAuthors. } \seealso{ \code{\link{studpermu.test}} } \examples{ np <- if(interactive()) 99 else 19 testpyramidal <- studpermu.test(pyramidal, Neurons ~ group, nperm=np) plot(testpyramidal) plot(testpyramidal, meanonly=TRUE) plot(testpyramidal, col.theo=8, lwd.theo=4, lty.theo=1) plot(testpyramidal, . ~ pi * r^2) op <- par(mfrow=c(1,3)) plot(testpyramidal, separately=TRUE) plot(testpyramidal, separately=TRUE, col=2, lty=1, lwd.mean=2, col.mean=4) par(op) } \keyword{hplot} \keyword{htest} spatstat/man/vargamma.estpcf.Rd0000644000176200001440000001521613333543264016250 0ustar liggesusers\name{vargamma.estpcf} \alias{vargamma.estpcf} \title{Fit the Neyman-Scott Cluster Point Process with Variance Gamma kernel} \description{ Fits the Neyman-Scott cluster point process, with Variance Gamma kernel, to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. } \usage{ vargamma.estpcf(X, startpar=c(kappa=1,scale=1), nu = -1/4, lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs = list()) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the model. } \item{nu}{ Numerical value controlling the shape of the tail of the clusters. A number greater than \code{-1/2}. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits the Neyman-Scott Cluster point process model with Variance Gamma kernel (Jalilian et al, 2013) to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits the Neyman-Scott Cluster point process with Variance Gamma kernel to \code{X}, by finding the parameters of the model which give the closest match between the theoretical pair correlation function of the model and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Neyman-Scott cluster point process with Variance Gamma kernel is described in Jalilian et al (2013). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent have a common distribution described in Jalilian et al (2013). The shape of the kernel is determined by the dimensionless index \code{nu}. This is the parameter \eqn{\nu^\prime = \alpha/2-1}{nu' = alpha/2 - 1} appearing in equation (12) on page 126 of Jalilian et al (2013). In previous versions of spatstat instead of specifying \code{nu} (called \code{nu.ker} at that time) the user could specify \code{nu.pcf} which is the parameter \eqn{\nu=\alpha-1}{nu = alpha-1} appearing in equation (13), page 127 of Jalilian et al (2013). These are related by \code{nu.pcf = 2 * nu.ker + 1} and \code{nu.ker = (nu.pcf - 1)/2}. This syntax is still supported but not recommended for consistency across the package. In that case exactly one of \code{nu.ker} or \code{nu.pcf} must be specified. If the argument \code{lambda} is provided, then this is used as the value of the point process intensity \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The corresponding model can be simulated using \code{\link{rVarGamma}}. The parameter \code{eta} appearing in \code{startpar} is equivalent to the scale parameter \code{omega} used in \code{\link{rVarGamma}}. Homogeneous or inhomogeneous Neyman-Scott/VarGamma models can also be fitted using the function \code{\link{kppm}} and the fitted models can be simulated using \code{\link{simulate.kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{vargamma.estK}}, \code{\link{lgcp.estpcf}}, \code{\link{thomas.estpcf}}, \code{\link{cauchy.estpcf}}, \code{\link{mincontrast}}, \code{\link{pcf}}, \code{\link{pcfmodel}}. \code{\link{rVarGamma}} to simulate the model. } \examples{ u <- vargamma.estpcf(redwood) u plot(u, legendpos="topright") } \keyword{spatial} \keyword{models} spatstat/man/nncross.Rd0000644000176200001440000001602213333543263014652 0ustar liggesusers\name{nncross} \alias{nncross} \alias{nncross.ppp} \alias{nncross.default} \title{Nearest Neighbours Between Two Patterns} \description{ Given two point patterns \code{X} and \code{Y}, finds the nearest neighbour in \code{Y} of each point of \code{X}. Alternatively \code{Y} may be a line segment pattern. } \usage{ nncross(X, Y, \dots) \method{nncross}{ppp}(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), \dots, k = 1, sortby=c("range", "var", "x", "y"), is.sorted.X = FALSE, is.sorted.Y = FALSE) \method{nncross}{default}(X, Y, \dots) } \arguments{ \item{X}{Point pattern (object of class \code{"ppp"}).} \item{Y}{Either a point pattern (object of class \code{"ppp"}) or a line segment pattern (object of class \code{"psp"}).} \item{iX, iY}{Optional identifiers, applicable only in the case where \code{Y} is a point pattern, used to determine whether a point in \code{X} is identical to a point in \code{Y}. See Details. } \item{what}{ Character string specifying what information should be returned. Either the nearest neighbour distance (\code{"dist"}), the identifier of the nearest neighbour (\code{"which"}), or both. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } \item{sortby}{ Determines which coordinate to use to sort the point patterns. See Details. } \item{is.sorted.X, is.sorted.Y}{ Logical values attesting whether the point patterns \code{X} and \code{Y} have been sorted. See Details. } \item{\dots}{Ignored.} } \details{ Given two point patterns \code{X} and \code{Y} this function finds, for each point of \code{X}, the nearest point of \code{Y}. The distance between these points is also computed. If the argument \code{k} is specified, then the \code{k}-th nearest neighbours will be found. Alternatively if \code{X} is a point pattern and \code{Y} is a line segment pattern, the function finds the nearest line segment to each point of \code{X}, and computes the distance. The return value is a data frame, with rows corresponding to the points of \code{X}. The first column gives the nearest neighbour distances (i.e. the \code{i}th entry is the distance from the \code{i}th point of \code{X} to the nearest element of \code{Y}). The second column gives the indices of the nearest neighbours (i.e.\ the \code{i}th entry is the index of the nearest element in \code{Y}.) If \code{what="dist"} then only the vector of distances is returned. If \code{what="which"} then only the vector of indices is returned. The argument \code{k} may be an integer or an integer vector. If it is a single integer, then the \code{k}-th nearest neighbours are computed. If it is a vector, then the \code{k[i]}-th nearest neighbours are computed for each entry \code{k[i]}. For example, setting \code{k=1:3} will compute the nearest, second-nearest and third-nearest neighbours. The result is a data frame. Note that this function is not symmetric in \code{X} and \code{Y}. To find the nearest neighbour in \code{X} of each point in \code{Y}, where \code{Y} is a point pattern, use \code{nncross(Y,X)}. The arguments \code{iX} and \code{iY} are used when the two point patterns \code{X} and \code{Y} have some points in common. In this situation \code{nncross(X, Y)} would return some zero distances. To avoid this, attach a unique integer identifier to each point, such that two points are identical if their identifying numbers are equal. Let \code{iX} be the vector of identifier values for the points in \code{X}, and \code{iY} the vector of identifiers for points in \code{Y}. Then the code will only compare two points if they have different values of the identifier. See the Examples. } \section{Sorting data and pre-sorted data}{ Read this section if you care about the speed of computation. For efficiency, the algorithm sorts the point patterns \code{X} and \code{Y} into increasing order of the \eqn{x} coordinate or increasing order of the the \eqn{y} coordinate. Sorting is only an intermediate step; it does not affect the output, which is always given in the same order as the original data. By default (if \code{sortby="range"}), the sorting will occur on the coordinate that has the larger range of values (according to the frame of the enclosing window of \code{Y}). If \code{sortby = "var"}), sorting will occur on the coordinate that has the greater variance (in the pattern \code{Y}). Setting \code{sortby="x"} or \code{sortby = "y"} will specify that sorting should occur on the \eqn{x} or \eqn{y} coordinate, respectively. If the point pattern \code{X} is already sorted, then the corresponding argument \code{is.sorted.X} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"} or \code{"y"} to indicate which coordinate is sorted. Similarly if \code{Y} is already sorted, then \code{is.sorted.Y} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"} or \code{"y"} to indicate which coordinate is sorted. If both \code{X} and \code{Y} are sorted \emph{on the same coordinate axis} then both \code{is.sorted.X} and \code{is.sorted.Y} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"} or \code{"y"} to indicate which coordinate is sorted. } \value{ A data frame, or a vector if the data frame would contain only one column. By default (if \code{what=c("dist", "which")} and \code{k=1}) a data frame with two columns: \item{dist}{Nearest neighbour distance} \item{which}{Nearest neighbour index in \code{Y}} If \code{what="dist"} and \code{k=1}, a vector of nearest neighbour distances. If \code{what="which"} and \code{k=1}, a vector of nearest neighbour indices. If \code{k} is specified, the result is a data frame with columns containing the \code{k}-th nearest neighbour distances and/or nearest neighbour indices. } \seealso{ \code{\link{nndist}} for nearest neighbour distances in a single point pattern. } \examples{ # two different point patterns X <- runifpoint(15) Y <- runifpoint(20) N <- nncross(X,Y)$which # note that length(N) = 15 plot(superimpose(X=X,Y=Y), main="nncross", cols=c("red","blue")) arrows(X$x, X$y, Y[N]$x, Y[N]$y, length=0.15) # third-nearest neighbour NXY <- nncross(X, Y, k=3) NXY[1:3,] # second and third nearest neighbours NXY <- nncross(X, Y, k=2:3) NXY[1:3,] # two patterns with some points in common Z <- runifpoint(50) X <- Z[1:30] Y <- Z[20:50] iX <- 1:30 iY <- 20:50 N <- nncross(X,Y, iX, iY)$which N <- nncross(X,Y, iX, iY, what="which") #faster plot(superimpose(X=X, Y=Y), main="nncross", cols=c("red","blue")) arrows(X$x, X$y, Y[N]$x, Y[N]$y, length=0.15) # point pattern and line segment pattern X <- runifpoint(15) Y <- rpoisline(10) N <- nncross(X,Y) } \author{ \adrian , \rolf , and Jens Oehlschlaegel } \keyword{spatial} \keyword{math} spatstat/man/rSwitzerlpp.Rd0000644000176200001440000000504713557001576015543 0ustar liggesusers\name{rSwitzerlpp} \alias{rSwitzerlpp} \title{ Switzer-type Point Process on Linear Network } \description{ Generate a realisation of the Switzer-type point process on a linear network. } \usage{ rSwitzerlpp(L, lambdacut, rintens = rexp, \dots, cuts=c("points", "lines")) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}). } \item{lambdacut}{ Intensity of Poisson process of breakpoints. } \item{rintens}{ Optional. Random variable generator used to generate the random intensity in each component. } \item{\dots}{ Additional arguments to \code{rintens}. } \item{cuts}{ String (partially matched) specifying the type of random cuts to be generated. } } \details{ This function generates simulated realisations of the Switzer-type point process on a network, as described in Baddeley et al (2017). The linear network is first divided into pieces by a random mechanism: \itemize{ \item if \code{cuts="points"}, a Poisson process of breakpoints with intensity \code{lambdacut} is generated on the network, and these breakpoints separate the network into connected pieces. \item if \code{cuts="lines"}, a Poisson line process in the plane with intensity \code{lambdacut} is generated; these lines divide space into tiles; the network is divided into subsets associated with the tiles. Each subset may not be a connected sub-network. } In each piece of the network, a random intensity is generated using the random variable generator \code{rintens} (the default is a negative exponential random variable with rate 1). Given the intensity value, a Poisson process is generated with the specified intensity. The intensity of the final process is determined by the mean of the values generated by \code{rintens}. If \code{rintens=rexp} (the default), then the parameter \code{rate} specifies the inverse of the intensity. } \value{ Point pattern on a linear network (object of class \code{"lpp"}) with an attribute \code{"breaks"} containing the breakpoints (if \code{cuts="points"}) or the random lines (if \code{cuts="lines"}). } \author{ \adrian. } \seealso{ \code{\link{rcelllpp}} } \references{ Baddeley, A., Nair, G., Rakshit, S. and McSwiggan, G. (2017) \sQuote{Stationary} point processes are uncommon on linear networks. \emph{STAT} \bold{6}, {68--78}. } \examples{ plot(rSwitzerlpp(domain(spiders), 0.01, rate=100)) plot(rSwitzerlpp(domain(spiders), 0.0005, rate=100, cuts="l")) } \keyword{spatial} \keyword{datagen} spatstat/man/round.ppp.Rd0000644000176200001440000000204313333543264015111 0ustar liggesusers\name{round.ppp} \alias{round.ppp} \alias{round.pp3} \alias{round.ppx} \title{ Apply Numerical Rounding to Spatial Coordinates } \description{ Apply numerical rounding to the spatial coordinates of a point pattern. } \usage{ \method{round}{ppp}(x, digits = 0) \method{round}{pp3}(x, digits = 0) \method{round}{ppx}(x, digits = 0) } \arguments{ \item{x}{ A spatial point pattern in any dimension (object of class \code{"ppp"}, \code{"pp3"} or \code{"ppx"}). } \item{digits}{ integer indicating the number of decimal places. } } \details{ These functions are methods for the generic function \code{\link[base]{round}}. They apply numerical rounding to the spatial coordinates of the point pattern \code{x}. } \value{ A point pattern object, of the same class as \code{x}. } \author{ \adrian and \rolf } \seealso{ \code{\link{rounding}} to determine whether numbers have been rounded. \code{\link[base]{round}} in the Base package. } \examples{ round(cells, 1) } \keyword{spatial} \keyword{manip} spatstat/man/rlinegrid.Rd0000644000176200001440000000155713333543264015154 0ustar liggesusers\name{rlinegrid} \alias{rlinegrid} \title{Generate grid of parallel lines with random displacement} \description{ Generates a grid of parallel lines, equally spaced, inside the specified window. } \usage{ rlinegrid(angle = 45, spacing = 0.1, win = owin()) } \arguments{ \item{angle}{Common orientation of the lines, in degrees anticlockwise from the x axis. } \item{spacing}{Spacing between successive lines.} \item{win}{Window in which to generate the lines. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } } \details{ The grid is randomly displaced from the origin. } \value{ A line segment pattern (object of class \code{"psp"}). } \seealso{ \code{\link{psp}}, \code{\link{rpoisline}} } \examples{ plot(rlinegrid(30, 0.05)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/nndist.pp3.Rd0000644000176200001440000000601113333543263015162 0ustar liggesusers\name{nndist.pp3} \alias{nndist.pp3} \title{Nearest neighbour distances in three dimensions} \description{ Computes the distance from each point to its nearest neighbour in a three-dimensional point pattern. Alternatively computes the distance to the second nearest neighbour, or third nearest, etc. } \usage{ \method{nndist}{pp3}(X, \dots, k=1) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } } \value{ Numeric vector or matrix containing the nearest neighbour distances for each point. If \code{k = 1} (the default), the return value is a numeric vector \code{v} such that \code{v[i]} is the nearest neighbour distance for the \code{i}th data point. If \code{k} is a single integer, then the return value is a numeric vector \code{v} such that \code{v[i]} is the \code{k}th nearest neighbour distance for the \code{i}th data point. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the \code{k[j]}th nearest neighbour distance for the \code{i}th data point. } \details{ This function computes the Euclidean distance from each point in a three-dimensional point pattern to its nearest neighbour (the nearest other point of the pattern). If \code{k} is specified, it computes the distance to the \code{k}th nearest neighbour. The function \code{nndist} is generic; this function \code{nndist.pp3} is the method for the class \code{"pp3"}. The argument \code{k} may be a single integer, or an integer vector. If it is a vector, then the \eqn{k}th nearest neighbour distances are computed for each value of \eqn{k} specified in the vector. If there is only one point (if \code{x} has length 1), then a nearest neighbour distance of \code{Inf} is returned. If there are no points (if \code{x} has length zero) a numeric vector of length zero is returned. To identify \emph{which} point is the nearest neighbour of a given point, use \code{\link{nnwhich}}. To use the nearest neighbour distances for statistical inference, it is often advisable to use the edge-corrected empirical distribution, computed by \code{\link{G3est}}. To find the nearest neighbour distances from one point pattern to another point pattern, use \code{\link{nncross}}. } \section{Warnings}{ An infinite or \code{NA} value is returned if the distance is not defined (e.g. if there is only one point in the point pattern). } \seealso{ \code{\link{nndist}}, \code{\link{pairdist}}, \code{\link{G3est}}, \code{\link{nnwhich}} } \examples{ X <- runifpoint3(40) # nearest neighbours d <- nndist(X) # second nearest neighbours d2 <- nndist(X, k=2) # first, second and third nearest d1to3 <- nndist(X, k=1:3) } \author{ \adrian based on code for two dimensions by Pavel Grabarnik } \keyword{spatial} \keyword{math} spatstat/man/grow.rectangle.Rd0000644000176200001440000000325513333543263016112 0ustar liggesusers\name{grow.rectangle} \alias{grow.rectangle} \title{Add margins to rectangle} \description{ Adds a margin to a rectangle. } \usage{ grow.rectangle(W, xmargin=0, ymargin=xmargin, fraction=NULL) } \arguments{ \item{W}{ A window (object of class \code{"owin"}). Must be of type \code{"rectangle"}. } \item{xmargin}{Width of horizontal margin to be added. A single nonnegative number, or a vector of length 2 indicating margins of unequal width at left and right. } \item{ymargin}{Height of vertical margin to be added. A single nonnegative number, or a vector of length 2 indicating margins of unequal width at bottom and top. } \item{fraction}{ Fraction of width and height to be added. A number greater than zero, or a numeric vector of length 2 indicating different fractions of width and of height, respectively. Incompatible with specifying \code{xmargin} and \code{ymargin}. } } \value{ Another object of class \code{"owin"} representing the window after margins are added. } \details{ This is a simple convenience function to add a margin of specified width and height on each side of a rectangular window. Unequal margins can also be added. } \seealso{ \code{\link{trim.rectangle}}, \code{\link{dilation}}, \code{\link{erosion}}, \code{\link{owin.object}} } \examples{ w <- square(10) # add a margin of width 1 on all four sides square12 <- grow.rectangle(w, 1) # add margin of width 3 on the right side # and margin of height 4 on top. v <- grow.rectangle(w, c(0,3), c(0,4)) # grow by 5 percent on all sides grow.rectangle(w, fraction=0.05) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/distmap.Rd0000644000176200001440000000273313333543263014632 0ustar liggesusers\name{distmap} \alias{distmap} \title{ Distance Map } \description{ Compute the distance map of an object, and return it as a pixel image. Generic. } \usage{ distmap(X, \dots) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), a window (object of class \code{"owin"}) or a line segment pattern (object of class \code{"psp"}). } \item{\dots}{Arguments passed to \code{\link{as.mask}} to control pixel resolution. } } \value{ A pixel image (object of class \code{"im"}) whose grey scale values are the values of the distance map. } \details{ The \dQuote{distance map} of a set of points \eqn{A} is the function \eqn{f} whose value \code{f(x)} is defined for any two-dimensional location \eqn{x} as the shortest distance from \eqn{x} to \eqn{A}. This function computes the distance map of the set \code{X} and returns the distance map as a pixel image. This is generic. Methods are provided for point patterns (\code{\link{distmap.ppp}}), line segment patterns (\code{\link{distmap.psp}}) and windows (\code{\link{distmap.owin}}). } \seealso{ \code{\link{distmap.ppp}}, \code{\link{distmap.psp}}, \code{\link{distmap.owin}}, \code{\link{distfun}} } \examples{ data(cells) U <- distmap(cells) data(letterR) V <- distmap(letterR) \dontrun{ plot(U) plot(V) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/DiggleGratton.Rd0000644000176200001440000000544513333543262015725 0ustar liggesusers\name{DiggleGratton} \alias{DiggleGratton} \title{Diggle-Gratton model} \description{ Creates an instance of the Diggle-Gratton pairwise interaction point process model, which can then be fitted to point pattern data. } \usage{ DiggleGratton(delta=NA, rho) } \arguments{ \item{delta}{lower threshold \eqn{\delta}{\delta}} \item{rho}{upper threshold \eqn{\rho}{\rho}} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ Diggle and Gratton (1984, pages 208-210) introduced the pairwise interaction point process with pair potential \eqn{h(t)} of the form \deqn{ h(t) = \left( \frac{t-\delta}{\rho-\delta} \right)^\kappa \quad\quad \mbox{ if } \delta \le t \le \rho }{ h(t) = ((t - \delta)/(\rho - \delta))^\kappa, { } \delta \le t \le \rho } with \eqn{h(t) = 0} for \eqn{t < \delta}{t < \delta} and \eqn{h(t) = 1} for \eqn{t > \rho}{t > \rho}. Here \eqn{\delta}{\delta}, \eqn{\rho}{\rho} and \eqn{\kappa}{\kappa} are parameters. Note that we use the symbol \eqn{\kappa}{\kappa} where Diggle and Gratton (1984) and Diggle, Gates and Stibbard (1987) use \eqn{\beta}{\beta}, since in \pkg{spatstat} we reserve the symbol \eqn{\beta}{\beta} for an intensity parameter. The parameters must all be nonnegative, and must satisfy \eqn{\delta \le \rho}{\delta \le \rho}. The potential is inhibitory, i.e.\ this model is only appropriate for regular point patterns. The strength of inhibition increases with \eqn{\kappa}{\kappa}. For \eqn{\kappa=0}{\kappa=0} the model is a hard core process with hard core radius \eqn{\delta}{\delta}. For \eqn{\kappa=\infty}{\kappa=Inf} the model is a hard core process with hard core radius \eqn{\rho}{\rho}. The irregular parameters \eqn{\delta, \rho}{\delta, \rho} must be given in the call to \code{DiggleGratton}, while the regular parameter \eqn{\kappa}{\kappa} will be estimated. If the lower threshold \code{delta} is missing or \code{NA}, it will be estimated from the data when \code{\link{ppm}} is called. The estimated value of \code{delta} is the minimum nearest neighbour distance multiplied by \eqn{n/(n+1)}, where \eqn{n} is the number of data points. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{Pairwise}} } \examples{ ppm(cells ~1, DiggleGratton(0.05, 0.1)) } \references{ Diggle, P.J., Gates, D.J. and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. \emph{Biometrika} \bold{74}, 763 -- 770. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat/man/Gest.Rd0000644000176200001440000002134413333543262014071 0ustar liggesusers\name{Gest} \alias{Gest} \alias{nearest.neighbour} \title{ Nearest Neighbour Distance Function G } \description{ Estimates the nearest neighbour distance distribution function \eqn{G(r)} from a point pattern in a window of arbitrary shape. } \usage{ Gest(X, r=NULL, breaks=NULL, \dots, correction=c("rs", "km", "han"), domain=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{G(r)} will be computed. An object of class \code{ppp}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which \eqn{G(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{correction}{ Optional. The edge correction(s) to be used to estimate \eqn{G(r)}. A vector of character strings selected from \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{domain}{ Optional. Calculations will be restricted to this subset of the window. See Details. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing some or all of the following columns: \item{r}{the values of the argument \eqn{r} at which the function \eqn{G(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{G(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{G(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{G(r)} by the spatial Kaplan-Meier method } \item{raw}{the uncorrected estimate of \eqn{G(r)}, i.e. the empirical distribution of the distances from each point in the pattern \code{X} to the nearest other point of the pattern } \item{han}{the Hanisch correction estimator of \eqn{G(r)} } \item{theo}{the theoretical value of \eqn{G(r)} for a stationary Poisson process of the same estimated intensity. } } \details{ The nearest neighbour distance distribution function (also called the ``\emph{event-to-event}'' or ``\emph{inter-event}'' distribution) of a point process \eqn{X} is the cumulative distribution function \eqn{G} of the distance from a typical random point of \eqn{X} to the nearest other point of \eqn{X}. An estimate of \eqn{G} derived from a spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern (Cressie, 1991; Diggle, 1983; Ripley, 1988). In exploratory analyses, the estimate of \eqn{G} is a useful statistic summarising one aspect of the ``clustering'' of points. For inferential purposes, the estimate of \eqn{G} is usually compared to the true value of \eqn{G} for a completely random (Poisson) point process, which is \deqn{G(r) = 1 - e^{ - \lambda \pi r^2} }{% G(r) = 1 - exp( - lambda * pi * r^2)} where \eqn{\lambda}{lambda} is the intensity (expected number of points per unit area). Deviations between the empirical and theoretical \eqn{G} curves may suggest spatial clustering or spatial regularity. This algorithm estimates the nearest neighbour distance distribution function \eqn{G} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link{as.ppp}()}. The estimation of \eqn{G} is hampered by edge effects arising from the unobservability of points of the random pattern outside the window. An edge correction is needed to reduce bias (Baddeley, 1998; Ripley, 1988). The edge corrections implemented here are the border method or ``\emph{reduced sample}'' estimator, the spatial Kaplan-Meier estimator (Baddeley and Gill, 1997) and the Hanisch estimator (Hanisch, 1984). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The estimators are computed from histogram counts. This introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{G(r)}. The hazard rate is defined as the derivative \deqn{\lambda(r) = - \frac{d}{dr} \log (1 - G(r))}{% lambda(r) = - (d/dr) log(1 - G(r))} This estimate should be used with caution as \eqn{G} is not necessarily differentiable. If the argument \code{domain} is given, the estimate of \eqn{G(r)} will be based only on the nearest neighbour distances measured from points falling inside \code{domain} (although their nearest neighbours may lie outside \code{domain}). This is useful in bootstrap techniques. The argument \code{domain} should be a window (object of class \code{"owin"}) or something acceptable to \code{\link{as.owin}}. It must be a subset of the window of the point pattern \code{X}. The naive empirical distribution of distances from each point of the pattern \code{X} to the nearest other point of the pattern, is a biased estimate of \eqn{G}. However it is sometimes useful. It can be returned by the algorithm, by selecting \code{correction="none"}. Care should be taken not to use the uncorrected empirical \eqn{G} as if it were an unbiased estimator of \eqn{G}. To simply compute the nearest neighbour distance for each point in the pattern, use \code{\link{nndist}}. To determine which point is the nearest neighbour of a given point, use \code{\link{nnwhich}}. } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37-78. Baddeley, A.J. and Gill, R.D. Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25} (1997) 263-292. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Hanisch, K.-H. (1984) Some remarks on estimators of the distribution function of nearest-neighbour distance in stationary spatial point patterns. \emph{Mathematische Operationsforschung und Statistik, series Statistics} \bold{15}, 409--412. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \section{Warnings}{ The function \eqn{G} does not necessarily have a density. Any valid c.d.f. may appear as the nearest neighbour distance distribution function of a stationary point process. The reduced sample estimator of \eqn{G} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{G} is always nondecreasing but its maximum value may be less than \eqn{1}. } \seealso{ \code{\link{nndist}}, \code{\link{nnwhich}}, \code{\link{Fest}}, \code{\link{Jest}}, \code{\link{Kest}}, \code{\link{km.rs}}, \code{\link{reduced.sample}}, \code{\link{kaplan.meier}} } \examples{ data(cells) G <- Gest(cells) plot(G) # P-P style plot plot(G, cbind(km,theo) ~ theo) # the empirical G is below the Poisson G, # indicating an inhibited pattern \dontrun{ plot(G, . ~ r) plot(G, . ~ theo) plot(G, asin(sqrt(.)) ~ asin(sqrt(theo))) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/rotate.ppp.Rd0000644000176200001440000000241313333543264015261 0ustar liggesusers\name{rotate.ppp} \alias{rotate.ppp} \title{Rotate a Point Pattern} \description{ Rotates a point pattern } \usage{ \method{rotate}{ppp}(X, angle=pi/2, \dots, centre=NULL) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{angle}{Angle of rotation.} \item{\dots}{ Arguments passed to \code{\link{rotate.owin}} affecting the handling of the observation window, if it is a binary pixel mask. } \item{centre}{ Centre of rotation. Either a vector of length 2, or a character string (partially matched to \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}). The default is the coordinate origin \code{c(0,0)}. } } \value{ Another object of class \code{"ppp"} representing the rotated point pattern. } \details{ The points of the pattern, and the window of observation, are rotated about the origin by the angle specified. Angles are measured in radians, anticlockwise. The default is to rotate the pattern 90 degrees anticlockwise. If the points carry marks, these are preserved. } \seealso{ \code{\link{ppp.object}}, \code{\link{rotate.owin}} } \examples{ data(cells) X <- rotate(cells, pi/3) \dontrun{ plot(X) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/closepairs.pp3.Rd0000644000176200001440000001031213376755257016045 0ustar liggesusers\name{closepairs.pp3} \alias{closepairs.pp3} \alias{crosspairs.pp3} \title{ Close Pairs of Points in 3 Dimensions } \description{ Low-level functions to find all close pairs of points in three-dimensional point patterns. } \usage{ \method{closepairs}{pp3}(X, rmax, twice=TRUE, what=c("all", "indices", "ijd"), distinct=TRUE, neat=TRUE, \dots) \method{crosspairs}{pp3}(X, Y, rmax, what=c("all", "indices", "ijd"), \dots) } \arguments{ \item{X,Y}{ Point patterns in three dimensions (objects of class \code{"pp3"}). } \item{rmax}{ Maximum distance between pairs of points to be counted as close pairs. } \item{twice}{ Logical value indicating whether all ordered pairs of close points should be returned. If \code{twice=TRUE}, each pair will appear twice in the output, as \code{(i,j)} and again as \code{(j,i)}. If \code{twice=FALSE}, then each pair will appear only once, as the pair \code{(i,j)} such that \code{i < j}. } \item{what}{ String specifying the data to be returned for each close pair of points. If \code{what="all"} (the default) then the returned information includes the indices \code{i,j} of each pair, their \code{x,y,z} coordinates, and the distance between them. If \code{what="indices"} then only the indices \code{i,j} are returned. If \code{what="ijd"} then the indices \code{i,j} and the distance \code{d} are returned. } \item{distinct}{ Logical value indicating whether to return only the pairs of points with different indices \code{i} and \code{j} (\code{distinct=TRUE}, the default) or to also include the pairs where \code{i=j} (\code{distinct=FALSE}). } \item{neat}{ Logical value indicating whether to ensure that \code{i < j} in each output pair, when \code{twice=FALSE}. } \item{\dots}{Ignored.} } \details{ These are the efficient low-level functions used by \pkg{spatstat} to find all close pairs of points in a three-dimensional point pattern or all close pairs between two point patterns in three dimensions. \code{closepairs(X,rmax)} identifies all pairs of neighbours in the pattern \code{X} and returns them. The result is a list with the following components: \describe{ \item{i}{Integer vector of indices of the first point in each pair.} \item{j}{Integer vector of indices of the second point in each pair.} \item{xi,yi,zi}{Coordinates of the first point in each pair.} \item{xj,yj,zj}{Coordinates of the second point in each pair.} \item{dx}{Equal to \code{xj-xi}} \item{dy}{Equal to \code{yj-yi}} \item{dz}{Equal to \code{zj-zi}} \item{d}{Euclidean distance between each pair of points.} } If \code{what="indices"} then only the components \code{i} and \code{j} are returned. This is slightly faster. \code{crosspairs(X,rmax)} identifies all pairs of neighbours \code{(X[i], Y[j])} between the patterns \code{X} and \code{Y}, and returns them. The result is a list with the same format as for \code{closepairs}. } \section{Warning about accuracy}{ The results of these functions may not agree exactly with the correct answer (as calculated by a human) and may not be consistent between different computers and different installations of \R. The discrepancies arise in marginal cases where the interpoint distance is equal to, or very close to, the threshold \code{rmax}. Floating-point numbers in a computer are not mathematical Real Numbers: they are approximations using finite-precision binary arithmetic. The approximation is accurate to a tolerance of about \code{.Machine$double.eps}. If the true interpoint distance \eqn{d} and the threshold \code{rmax} are equal, or if their difference is no more than \code{.Machine$double.eps}, the result may be incorrect. } \value{ A list with components \code{i} and \code{j}, and possibly other components as described under Details. } \author{\adrian , \rolf and \ege. } \seealso{ \code{\link{closepairs}} } \examples{ X <- pp3(runif(10), runif(10), runif(10), box3(c(0,1))) Y <- pp3(runif(10), runif(10), runif(10), box3(c(0,1))) a <- closepairs(X, 0.1) b <- crosspairs(X, Y, 0.1) } \keyword{spatial} \keyword{math} spatstat/man/mergeLevels.Rd0000644000176200001440000000376013333543263015444 0ustar liggesusers\name{mergeLevels} \alias{mergeLevels} \title{ Merge Levels of a Factor } \description{ Specified levels of the factor will be merged into a single level. } \usage{ mergeLevels(.f, \dots) } \arguments{ \item{.f}{ A factor (or a factor-valued pixel image or a point pattern with factor-valued marks). } \item{\dots}{ List of \code{name=value} pairs, where \code{name} is the new merged level, and \code{value} is the vector of old levels that will be merged. } } \details{ This utility function takes a factor \code{.f} and merges specified levels of the factor. The grouping is specified by the arguments \code{\dots} which must each be given in the form \code{new=old}, where \code{new} is the name for the new merged level, and \code{old} is a character vector containing the old levels that are to be merged. The result is a new factor (or factor-valued object), in which the levels listed in \code{old} have been replaced by a single level \code{new}. An argument of the form \code{name=character(0)} or \code{name=NULL} is interpreted to mean that all other levels of the old factor should be mapped to \code{name}. } \value{ Another factor of the same length as \code{.f} (or object of the same kind as \code{.f}). } \section{Tips for manipulating factor levels}{ To remove unused levels from a factor \code{f}, just type \code{f <- factor(f)}. To change the ordering of levels in a factor, use \code{\link[base]{factor}(f, levels=l)} or \code{\link[stats]{relevel}(f, ref)}. } \seealso{ \code{\link[base]{factor}}, \code{\link[stats]{relevel}} } \author{ \adrian \rolf and \ege } \examples{ likert <- c("Strongly Agree", "Agree", "Neutral", "Disagree", "Strongly Disagree") answers <- factor(sample(likert, 15, replace=TRUE), levels=likert) answers mergeLevels(answers, Positive=c("Strongly Agree", "Agree"), Negative=c("Strongly Disagree", "Disagree")) } \keyword{manip} \keyword{spatial} spatstat/man/split.hyperframe.Rd0000644000176200001440000000365613333543264016473 0ustar liggesusers\name{split.hyperframe} \alias{split.hyperframe} \alias{split<-.hyperframe} \title{ Divide Hyperframe Into Subsets and Reassemble } \description{ \code{split} divides the data \code{x} into subsets defined by \code{f}. The replacement form replaces values corresponding to such a division. } \usage{ \method{split}{hyperframe}(x, f, drop = FALSE, ...) \method{split}{hyperframe}(x, f, drop = FALSE, ...) <- value } \arguments{ \item{x}{ Hyperframe (object of class \code{"hyperframe"}). } \item{f}{ a \code{factor} in the sense that \code{as.factor(f)} defines the grouping, or a list of such factors in which case their interaction is used for the grouping. } \item{drop}{ logical value, indicating whether levels that do not occur should be dropped from the result. } \item{value}{ a list of hyperframes which arose (or could have arisen) from the command \code{split(x,f,drop=drop)}. } \item{\dots}{ Ignored. } } \details{ These are methods for the generic functions \code{\link{split}} and \code{\link{split<-}} for hyperframes (objects of class \code{"hyperframe"}). A hyperframe is like a data frame, except that its entries can be objects of any kind. The behaviour of these methods is analogous to the corresponding methods for data frames. } \value{ The value returned from \code{split.hyperframe} is a list of hyperframe containing the values for the groups. The components of the list are named by the levels of \code{f} (after converting to a factor, or if already a factor and \code{drop = TRUE}, dropping unused levels). The replacement method \code{split<-.hyperframe} returns a new hyperframe \code{x} for which \code{split(x,f)} equals \code{value}. } \author{\adrian , \rolf and \ege } \seealso{ \code{\link{hyperframe}}, \code{\link{[.hyperframe}} } \examples{ split(pyramidal, pyramidal$group) } \keyword{spatial} \keyword{manip} spatstat/man/ppx.Rd0000644000176200001440000000621313333543264013776 0ustar liggesusers\name{ppx} \Rdversion{1.1} \alias{ppx} \title{ Multidimensional Space-Time Point Pattern } \description{ Creates a multidimensional space-time point pattern with any kind of coordinates and marks. } \usage{ ppx(data, domain=NULL, coord.type=NULL, simplify=FALSE) } \arguments{ \item{data}{ The coordinates and marks of the points. A \code{data.frame} or \code{hyperframe}. } \item{domain}{ Optional. The space-time domain containing the points. An object in some appropriate format, or \code{NULL}. } \item{coord.type}{ Character vector specifying how each column of \code{data} should be interpreted: as a spatial coordinate, a temporal coordinate, a local coordinate or a mark. Entries are partially matched to the values \code{"spatial"}, \code{"temporal"}, \code{"local"} and \code{"mark"}. } \item{simplify}{ Logical value indicating whether to simplify the result in special cases. If \code{simplify=TRUE}, a two-dimensional point pattern will be returned as an object of class \code{"ppp"}, and a three-dimensional point pattern will be returned as an object of class \code{"pp3"}. If \code{simplify=FALSE} (the default) then the result is always an object of class \code{"ppx"}. } } \details{ An object of class \code{"ppx"} represents a marked point pattern in multidimensional space and/or time. There may be any number of spatial coordinates, any number of temporal coordinates, any number of local coordinates, and any number of mark variables. The individual marks may be atomic (numeric values, factor values, etc) or objects of any kind. The argument \code{data} should contain the coordinates and marks of the points. It should be a \code{data.frame} or more generally a \code{hyperframe} (see \code{\link{hyperframe}}) with one row of data for each point. Each column of \code{data} is either a spatial coordinate, a temporal coordinate, a local coordinate, or a mark variable. The argument \code{coord.type} determines how each column is interpreted. It should be a character vector, of length equal to the number of columns of \code{data}. It should contain strings that partially match the values \code{"spatial"}, \code{"temporal"}, \code{"local"} and \code{"mark"}. (The first letters will be sufficient.) By default (if \code{coord.type} is missing or \code{NULL}), columns of numerical data are assumed to represent spatial coordinates, while other columns are assumed to be marks. } \value{ Usually an object of class \code{"ppx"}. If \code{simplify=TRUE} the result may be an object of class \code{"ppp"} or \code{"pp3"}. } \author{\adrian and \rolf } \seealso{ \code{\link{pp3}}, \code{\link{print.ppx}} } \examples{ df <- data.frame(x=runif(4),y=runif(4),t=runif(4), age=rep(c("old", "new"), 2), size=runif(4)) X <- ppx(data=df, coord.type=c("s","s","t","m","m")) X val <- 20 * runif(4) E <- lapply(val, function(s) { rpoispp(s) }) hf <- hyperframe(t=val, e=as.listof(E)) Z <- ppx(data=hf, domain=c(0,1)) Z } \keyword{spatial} \keyword{datagen} spatstat/man/is.convex.Rd0000644000176200001440000000201213333543263015073 0ustar liggesusers\name{is.convex} \alias{is.convex} \title{Test Whether a Window is Convex} \description{ Determines whether a window is convex. } \usage{ is.convex(x) } \arguments{ \item{x}{ Window (object of class \code{"owin"}). } } \value{ Logical value, equal to \code{TRUE} if \code{x} is convex. } \details{ If \code{x} is a rectangle, the result is TRUE. If \code{x} is polygonal, the result is TRUE if \code{x} consists of a single polygon and this polygon is equal to the minimal convex hull of its vertices computed by \code{\link[grDevices]{chull}}. If \code{x} is a mask, the algorithm first extracts all boundary pixels of \code{x} using \code{\link{vertices}}. Then it computes the (polygonal) convex hull \eqn{K} of the boundary pixels. The result is TRUE if every boundary pixel lies within one pixel diameter of an edge of \eqn{K}. } \seealso{ \code{\link{owin}}, \code{\link{convexhull.xy}}, \code{\link{vertices}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/unique.ppp.Rd0000644000176200001440000000274113333543264015275 0ustar liggesusers\name{unique.ppp} \alias{unique.ppp} \alias{unique.ppx} \title{Extract Unique Points from a Spatial Point Pattern} \description{ Removes any points that are identical to other points in a spatial point pattern. } \usage{ \method{unique}{ppp}(x, \dots, warn=FALSE) \method{unique}{ppx}(x, \dots, warn=FALSE) } \arguments{ \item{x}{ A spatial point pattern (object of class \code{"ppp"} or \code{"ppx"}). } \item{\dots}{ Arguments passed to \code{\link{duplicated.ppp}} or \code{\link{duplicated.data.frame}}. } \item{warn}{ Logical. If \code{TRUE}, issue a warning message if any duplicated points were found. } } \value{ Another point pattern object. } \details{ These are methods for the generic function \code{unique} for point pattern datasets (of class \code{"ppp"}, see \code{\link{ppp.object}}, or class \code{"ppx"}). This function removes duplicate points in \code{x}, and returns a point pattern. Two points in a point pattern are deemed to be identical if their \eqn{x,y} coordinates are the same, \emph{and} their marks are the same (if they carry marks). This is the default rule: see \code{\link{duplicated.ppp}} for other options. } \seealso{ \code{\link{ppp.object}}, \code{\link{duplicated.ppp}}, \code{\link{multiplicity.ppp}} } \examples{ X <- ppp(c(1,1,0.5), c(2,2,1), window=square(3)) unique(X) unique(X, rule="deldir") } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/plot.pppmatching.Rd0000644000176200001440000000434113515521657016462 0ustar liggesusers\name{plot.pppmatching} \alias{plot.pppmatching} \title{ Plot a Point Matching } \description{ Plot an object of class \code{"pppmatching"} which represents a matching of two planar point patterns. } \usage{ \method{plot}{pppmatching}(x, addmatch = NULL, main = NULL, \dots, adjust = 1) } \arguments{ \item{x}{ Point pattern matching object (class \code{"pppmatching"}) to be plotted. } \item{addmatch}{ Optional. A matrix indicating additional pairs of points that should be matched. See Details. } \item{main}{ Main title for the plot. } \item{\dots}{ Additional arguments passed to other plot methods. } \item{adjust}{ Adjustment factor for the widths of line segments. A positive number. } } \details{ The object \code{x} represents a matching found between two point patterns \code{X} and \code{Y}. The matching may be incomplete. See \code{\link{pppmatching.object}} for further description. This function plots the matching by drawing the two point patterns \code{X} and \code{Y} as red and blue dots respectively, and drawing line segments between each pair of matched points. The width of the line segments is proportional to the strength of matching. The proportionality constant can be adjusted using the argument \code{adjust}. Additional graphics arguments \code{\dots} control the plotting of the window (and are passed to \code{\link{plot.owin}}) and the plotting of the line segments (and are passed to \code{\link{plot.psp}}, \code{\link{plot.linim}} and ultimately to the base graphics function \code{\link[graphics]{polygon}}). The argument \code{addmatch} is for use mainly by developers to study algorithms which update the matching. If \code{addmatch} is given, it should be a matrix with dimensions \code{npoints(X) * npoints(Y)}. If \code{addmatch[i,j] > 0} then a light grey line segment will be drawn between \code{X[i]} and \code{Y[j}. } \value{ Null. } \author{ Dominic Schuhmacher and \adrian. } \seealso{ \code{\link{pppmatching.object}} } \examples{ X <- runifpoint(7) Y <- runifpoint(7) am <- r2dtable(1, rep(10,7), rep(10,7))[[1]]/10 m2 <- pppmatching(X, Y, am) plot(m2, adjust=0.3) } \keyword{spatial} \keyword{hplot} spatstat/man/setcov.Rd0000644000176200001440000000361313333543264014473 0ustar liggesusers\name{setcov} \alias{setcov} \title{Set Covariance of a Window} \description{ Computes the set covariance function of a window. } \usage{ setcov(W, V=W, \dots) } \arguments{ \item{W}{ A window (object of class \code{"owin"}. } \item{V}{ Optional. Another window. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} to control the pixel resolution. } } \value{ A pixel image (an object of class \code{"im"}) representing the set covariance function of \code{W}, or the cross-covariance of \code{W} and \code{V}. } \details{ The set covariance function of a region \eqn{W} in the plane is the function \eqn{C(v)} defined for each vector \eqn{v} as the area of the intersection between \eqn{W} and \eqn{W+v}, where \eqn{W+v} is the set obtained by shifting (translating) \eqn{W} by \eqn{v}. We may interpret \eqn{C(v)} as the area of the set of all points \eqn{x} in \eqn{W} such that \eqn{x+v} also lies in \eqn{W}. This command computes a discretised approximation to the set covariance function of any plane region \eqn{W} represented as a window object (of class \code{"owin"}, see \code{\link{owin.object}}). The return value is a pixel image (object of class \code{"im"}) whose greyscale values are values of the set covariance function. The set covariance is computed using the Fast Fourier Transform, unless \code{W} is a rectangle, when an exact formula is used. If the argument \code{V} is present, then \code{setcov(W,V)} computes the set \emph{cross-covariance} function \eqn{C(x)} defined for each vector \eqn{x} as the area of the intersection between \eqn{W} and \eqn{V+x}. } \seealso{ \code{\link{imcov}}, \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{erosion}} } \examples{ w <- owin(c(0,1),c(0,1)) v <- setcov(w) plot(v) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/Kmodel.kppm.Rd0000644000176200001440000000365513333543262015355 0ustar liggesusers\name{Kmodel.kppm} \alias{Kmodel.kppm} \alias{pcfmodel.kppm} \title{K Function or Pair Correlation Function of Cluster Model or Cox model} \description{ Returns the theoretical \eqn{K} function or the pair correlation function of a cluster point process model or Cox point process model. } \usage{ \method{Kmodel}{kppm}(model, \dots) \method{pcfmodel}{kppm}(model, \dots) } \arguments{ \item{model}{ A fitted cluster point process model (object of class \code{"kppm"}) typically obtained from the model-fitting algorithm \code{\link{kppm}}. } \item{\dots}{ Ignored. } } \value{ A \code{function} in the \R language, which takes one argument \code{r}. } \details{ For certain types of point process models, it is possible to write down a mathematical expression for the \eqn{K} function or the pair correlation function of the model. In particular this is possible for a fitted cluster point process model (object of class \code{"kppm"} obtained from \code{\link{kppm}}). The functions \code{\link{Kmodel}} and \code{\link{pcfmodel}} are generic. The functions documented here are the methods for the class \code{"kppm"}. The return value is a \code{function} in the \R language, which takes one argument \code{r}. Evaluation of this function, on a numeric vector \code{r}, yields values of the desired \eqn{K} function or pair correlation function at these distance values. } \seealso{ \code{\link{Kest}} or \code{\link{pcf}} to estimate the \eqn{K} function or pair correlation function nonparametrically from data. \code{\link{kppm}} to fit cluster models. \code{\link{Kmodel}} for the generic functions. \code{\link{Kmodel.ppm}} for the method for Gibbs processes. } \examples{ data(redwood) fit <- kppm(redwood, ~x, "MatClust") K <- Kmodel(fit) K(c(0.1, 0.2)) curve(K(x), from=0, to=0.25) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/distmap.owin.Rd0000644000176200001440000000556413333543263015612 0ustar liggesusers\name{distmap.owin} \alias{distmap.owin} \title{Distance Map of Window} \description{ Computes the distance from each pixel to the nearest point in the given window. } \usage{ \method{distmap}{owin}(X, \dots, discretise=FALSE, invert=FALSE) } \arguments{ \item{X}{ A window (object of class \code{"owin"}). } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to control pixel resolution. } \item{discretise}{ Logical flag controlling the choice of algorithm when \code{X} is a polygonal window. See Details. } \item{invert}{ If \code{TRUE}, compute the distance transform of the complement of the window. } } \value{ A pixel image (object of class \code{"im"}) whose greyscale values are the values of the distance map. The return value has an attribute \code{"bdry"} which is a pixel image. } \details{ The ``distance map'' of a window \eqn{W} is the function \eqn{f} whose value \code{f(u)} is defined for any two-dimensional location \eqn{u} as the shortest distance from \eqn{u} to \eqn{W}. This function computes the distance map of the window \code{X} and returns the distance map as a pixel image. The greyscale value at a pixel \eqn{u} equals the distance from \eqn{u} to the nearest pixel in \code{X}. Additionally, the return value has an attribute \code{"bdry"} which is also a pixel image. The grey values in \code{"bdry"} give the distance from each pixel to the bounding rectangle of the image. If \code{X} is a binary pixel mask, the distance values computed are not the usual Euclidean distances. Instead the distance between two pixels is measured by the length of the shortest path connecting the two pixels. A path is a series of steps between neighbouring pixels (each pixel has 8 neighbours). This is the standard `distance transform' algorithm of image processing (Rosenfeld and Kak, 1968; Borgefors, 1986). If \code{X} is a polygonal window, then exact Euclidean distances will be computed if \code{discretise=FALSE}. If \code{discretise=TRUE} then the window will first be converted to a binary pixel mask and the discrete path distances will be computed. The arguments \code{\dots} are passed to \code{\link{as.mask}} to control the pixel resolution. This function is a method for the generic \code{\link{distmap}}. } \seealso{ \code{\link{distmap}}, \code{\link{distmap.ppp}}, \code{\link{distmap.psp}} } \examples{ data(letterR) U <- distmap(letterR) \dontrun{ plot(U) plot(attr(U, "bdry")) } } \references{ Borgefors, G. Distance transformations in digital images. \emph{Computer Vision, Graphics and Image Processing} \bold{34} (1986) 344--371. Rosenfeld, A. and Pfalz, J.L. Distance functions on digital pictures. \emph{Pattern Recognition} \bold{1} (1968) 33-61. } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/concatxy.Rd0000644000176200001440000000225513333543263015020 0ustar liggesusers\name{concatxy} \alias{concatxy} \title{Concatenate x,y Coordinate Vectors} \description{ Concatenate any number of pairs of \code{x} and \code{y} coordinate vectors. } \usage{ concatxy(\dots) } \arguments{ \item{\dots}{ Any number of arguments, each of which is a structure containing elements \code{x} and \code{y}. } } \value{ A list with two components \code{x} and \code{y}, which are the concatenations of all the corresponding \code{x} and \code{y} vectors in the argument list. } \details{ This function can be used to superimpose two or more point patterns of unmarked points (but see also \code{\link{superimpose}} which is recommended). It assumes that each of the arguments in \code{\dots} is a structure containing (at least) the elements \code{x} and \code{y}. It concatenates all the \code{x} elements into a vector \code{x}, and similarly for \code{y}, and returns these concatenated vectors. } \seealso{ \code{\link{superimpose}}, \code{\link{quadscheme}} } \examples{ dat <- runifrect(30) xy <- list(x=runif(10),y=runif(10)) new <- concatxy(dat, xy) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/intensity.lpp.Rd0000644000176200001440000000203713333543263016006 0ustar liggesusers\name{intensity.lpp} \alias{intensity.lpp} \title{ Empirical Intensity of Point Pattern on Linear Network } \description{ Computes the average number of points per unit length in a point pattern on a linear network. } \usage{ \method{intensity}{lpp}(X, ...) } \arguments{ \item{X}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{intensity}} It computes the empirical intensity of a point pattern on a linear network (object of class \code{"lpp"}), i.e. the average density of points per unit length. If the point pattern is multitype, the intensities of the different types are computed separately. } \value{ A numeric value (giving the intensity) or numeric vector (giving the intensity for each possible type). } \seealso{ \code{\link{intensity}}, \code{\link{intensity.ppp}} } \examples{ intensity(chicago) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/rmhmodel.Rd0000644000176200001440000000573213430245337015001 0ustar liggesusers\name{rmhmodel} \alias{rmhmodel} \title{Define Point Process Model for Metropolis-Hastings Simulation.} \description{ Builds a description of a point process model for use in simulating the model by the Metropolis-Hastings algorithm. } \usage{ rmhmodel(...) } \arguments{ \item{\dots}{Arguments specifying the point process model in some format. } } \value{ An object of class \code{"rmhmodel"}, which is essentially a list of parameter values for the model. There is a \code{print} method for this class, which prints a sensible description of the model chosen. } \details{ Simulated realisations of many point process models can be generated using the Metropolis-Hastings algorithm \code{\link{rmh}}. The algorithm requires the model to be specified in a particular format: an object of class \code{"rmhmodel"}. The function \code{\link{rmhmodel}} takes a description of a point process model in some other format, and converts it into an object of class \code{"rmhmodel"}. It also checks that the parameters of the model are valid. The function \code{\link{rmhmodel}} is generic, with methods for \describe{ \item{fitted point process models:}{ an object of class \code{"ppm"}, obtained by a call to the model-fitting function \code{\link{ppm}}. See \code{\link{rmhmodel.ppm}}. } \item{lists:}{ a list of parameter values in a certain format. See \code{\link{rmhmodel.list}}. } \item{default:}{ parameter values specified as separate arguments to \code{\dots}. See \code{\link{rmhmodel.default}}. } } } \references{ Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \seealso{ \code{\link{rmhmodel.ppm}}, \code{\link{rmhmodel.default}}, \code{\link{rmhmodel.list}}, \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{ppm}}, \code{\link{Strauss}}, \code{\link{Softcore}}, \code{\link{StraussHard}}, \code{\link{Triplets}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{DiggleGratton}}, \code{\link{PairPiece}} \code{\link{Penttinen}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/intensity.ppp.Rd0000644000176200001440000000536013333543263016014 0ustar liggesusers\name{intensity.ppp} \alias{intensity.ppp} \alias{intensity.splitppp} \title{ Empirical Intensity of Point Pattern } \description{ Computes the average number of points per unit area in a point pattern dataset. } \usage{ \method{intensity}{ppp}(X, ..., weights=NULL) \method{intensity}{splitppp}(X, ..., weights=NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{weights}{ Optional. Numeric vector of weights attached to the points of \code{X}. Alternatively, an \code{expression} which can be evaluated to give a vector of weights. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{intensity}}. It computes the empirical intensity of a point pattern (object of class \code{"ppp"}), i.e. the average density of points per unit area. If the point pattern is multitype, the intensities of the different types are computed separately. Note that the intensity will be computed as the number of points per square unit, based on the unit of length for \code{X}, given by \code{unitname(X)}. If the unit of length is a strange multiple of a standard unit, like \code{5.7 metres}, then it can be converted to the standard unit using \code{\link{rescale}}. See the Examples. If \code{weights} are given, then the intensity is computed as the total \emph{weight} per square unit. The argument \code{weights} should be a numeric vector of weights for each point of \code{X} (weights may be negative or zero). Alternatively \code{weights} can be an \code{expression} which will be evaluated for the dataset to yield a vector of weights. The expression may involve the Cartesian coordinates \eqn{x,y} of the points, and the marks of the points, if any. Variable names permitted in the expression include \code{x} and \code{y}, the name \code{marks} if \code{X} has a single column of marks, the names of any columns of marks if \code{X} has a data frame of marks, and the names of constants or functions that exist in the global environment. See the Examples. } \value{ A numeric value (giving the intensity) or numeric vector (giving the intensity for each possible type). } \seealso{ \code{\link{intensity}}, \code{\link{intensity.ppm}} } \examples{ japanesepines intensity(japanesepines) unitname(japanesepines) intensity(rescale(japanesepines)) intensity(amacrine) intensity(split(amacrine)) # numeric vector of weights volumes <- with(marks(finpines), (pi/4) * height * diameter^2) intensity(finpines, weights=volumes) # expression for weights intensity(finpines, weights=expression((pi/4) * height * diameter^2)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/idw.Rd0000644000176200001440000001157013406057617013757 0ustar liggesusers\name{idw} \alias{idw} \title{Inverse-distance weighted smoothing of observations at irregular points} \description{ Performs spatial smoothing of numeric values observed at a set of irregular locations using inverse-distance weighting. } \usage{ idw(X, power=2, at=c("pixels", "points"), ..., se=FALSE) } \arguments{ \item{X}{A marked point pattern (object of class \code{"ppp"}).} \item{power}{Numeric. Power of distance used in the weighting.} \item{at}{ Character string specifying whether to compute the intensity values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). String is partially matched. } \item{\dots}{Arguments passed to \code{\link{as.mask}} to control the pixel resolution of the result.} \item{se}{ Logical value specifying whether to calculate a standard error. } } \details{ This function performs spatial smoothing of numeric values observed at a set of irregular locations. Smoothing is performed by inverse distance weighting. If the observed values are \eqn{v_1,\ldots,v_n}{v[1],...,v[n]} at locations \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} respectively, then the smoothed value at a location \eqn{u} is \deqn{ g(u) = \frac{\sum_i w_i v_i}{\sum_i w_i} }{ g(u) = (sum of w[i] * v[i])/(sum of w[i]) } where the weights are the inverse \eqn{p}-th powers of distance, \deqn{ w_i = \frac 1 {d(u,x_i)^p} }{ w[i] = 1/d(u,x[i])^p } where \eqn{d(u,x_i) = ||u - x_i||}{d(u,x[i])} is the Euclidean distance from \eqn{u} to \eqn{x_i}{x[i]}. The argument \code{X} must be a marked point pattern (object of class \code{"ppp"}, see \code{\link{ppp.object}}). The points of the pattern are taken to be the observation locations \eqn{x_i}{x[i]}, and the marks of the pattern are taken to be the numeric values \eqn{v_i}{v[i]} observed at these locations. The marks are allowed to be a data frame. Then the smoothing procedure is applied to each column of marks. If \code{at="pixels"} (the default), the smoothed mark value is calculated at a grid of pixels, and the result is a pixel image. The arguments \code{\dots} control the pixel resolution. See \code{\link{as.mask}}. If \code{at="points"}, the smoothed mark values are calculated at the data points only, using a leave-one-out rule (the mark value at a data point is excluded when calculating the smoothed value for that point). An estimate of standard error is also calculated, if \code{se=TRUE}. The calculation assumes that the data point locations are fixed, that is, the standard error only takes into account the variability in the mark values, and not the variability due to randomness of the data point locations. An alternative to inverse-distance weighting is kernel smoothing, which is performed by \code{\link{Smooth.ppp}}. } \value{ \emph{If \code{X} has a single column of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a pixel image (object of class \code{"im"}). Pixel values are values of the interpolated function. \item If \code{at="points"}, the result is a numeric vector of length equal to the number of points in \code{X}. Entries are values of the interpolated function at the points of \code{X}. } \emph{If \code{X} has a data frame of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a named list of pixel images (object of class \code{"im"}). There is one image for each column of marks. This list also belongs to the class \code{"solist"}, for which there is a plot method. \item If \code{at="points"}, the result is a data frame with one row for each point of \code{X}, and one column for each column of marks. Entries are values of the interpolated function at the points of \code{X}. } If \code{se=TRUE}, then the result is a list with two entries named \code{estimate} and \code{SE}, which each have the format described above. } \seealso{ \code{\link{density.ppp}}, \code{\link{ppp.object}}, \code{\link{im.object}}. See \code{\link{Smooth.ppp}} for kernel smoothing and \code{\link{nnmark}} for nearest-neighbour interpolation. To perform other kinds of interpolation, see also the \code{akima} package. } \examples{ # data frame of marks: trees marked by diameter and height plot(idw(finpines)) idw(finpines, at="points")[1:5,] plot(idw(finpines, se=TRUE)$SE) idw(finpines, at="points", se=TRUE)$SE[1:5, ] } \references{ Shepard, D. (1968) A two-dimensional interpolation function for irregularly-spaced data. \emph{Proceedings of the 1968 ACM National Conference}, 1968, pages 517--524. DOI: 10.1145/800186.810616 } \author{ \spatstatAuthors. Variance calculation by Andrew P Wheeler with modifications by Adrian Baddeley. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/zclustermodel.Rd0000644000176200001440000000176413333543265016072 0ustar liggesusers\name{zclustermodel} \alias{zclustermodel} \title{ Cluster Point Process Model } \description{ Experimental code. Creates an object representing a cluster point process model. Typically used for theoretical calculations about such a model. } \usage{ zclustermodel(name = "Thomas", \dots, mu, kappa, scale) } \arguments{ \item{name}{ Name of the cluster process. One of \code{"Thomas"}, \code{"MatClust"}, \code{"VarGamma"} or \code{"Cauchy"}. } \item{\dots}{ Other arguments needed for the model. } \item{mu}{ Mean cluster size. A single number, or a pixel image. } \item{kappa}{ Parent intensity. A single number. } \item{scale}{ Cluster scale parameter of the model. } } \details{ Experimental. } \value{ Object of the experimental class \code{"zclustermodel"}. } \author{ \adrian } \seealso{ \code{\link{methods.zclustermodel}} } \examples{ m <- zclustermodel("Thomas", kappa=10, mu=5, scale=0.1) } \keyword{spatial} \keyword{models} spatstat/man/methods.box3.Rd0000644000176200001440000000254113333543263015503 0ustar liggesusers\name{methods.box3} \Rdversion{1.1} \alias{methods.box3} %DoNotExport \alias{print.box3} \alias{unitname.box3} \alias{unitname<-.box3} \title{ Methods for Three-Dimensional Box } \description{ Methods for class \code{"box3"}. } \usage{ \method{print}{box3}(x, ...) \method{unitname}{box3}(x) \method{unitname}{box3}(x) <- value } \arguments{ \item{x}{ Object of class \code{"box3"} representing a three-dimensional box. } \item{\dots}{ Other arguments passed to \code{print.default}. } \item{value}{ Name of the unit of length. See \code{\link{unitname}}. } } \details{ These are methods for the generic functions \code{\link{print}} and \code{\link{unitname}} for the class \code{"box3"} of three-dimensional boxes. The \code{print} method prints a description of the box, while the \code{unitname} method extracts the name of the unit of length in which the box coordinates are expressed. } \value{ For \code{print.box3} the value is \code{NULL}. For \code{unitname.box3} an object of class \code{"units"}. } \author{\adrian and \rolf } \seealso{ \code{\link{box3}}, \code{\link{print}}, \code{\link{unitname}} } \examples{ X <- box3(c(0,10),c(0,10),c(0,5), unitname=c("metre", "metres")) X unitname(X) # Northern European usage unitname(X) <- "meter" } \keyword{spatial} \keyword{methods} spatstat/man/as.tess.Rd0000644000176200001440000000476013333543262014552 0ustar liggesusers\name{as.tess} \alias{as.tess} \alias{as.tess.tess} \alias{as.tess.im} \alias{as.tess.owin} \alias{as.tess.quadratcount} \alias{as.tess.quadrattest} \alias{as.tess.list} \title{Convert Data To Tessellation} \description{ Converts data specifying a tessellation, in any of several formats, into an object of class \code{"tess"}. } \usage{ as.tess(X) \method{as.tess}{tess}(X) \method{as.tess}{im}(X) \method{as.tess}{owin}(X) \method{as.tess}{quadratcount}(X) \method{as.tess}{quadrattest}(X) \method{as.tess}{list}(X) } \arguments{ \item{X}{Data to be converted to a tessellation.} } \value{ An object of class \code{"tess"} specifying a tessellation. } \details{ A tessellation is a collection of disjoint spatial regions (called \emph{tiles}) that fit together to form a larger spatial region. This command creates an object of class \code{"tess"} that represents a tessellation. This function converts data in any of several formats into an object of class \code{"tess"} for use by the \pkg{spatstat} package. The argument \code{X} may be \itemize{ \item an object of class \code{"tess"}. The object will be stripped of any extraneous attributes and returned. \item a pixel image (object of class \code{"im"}) with pixel values that are logical or factor values. Each level of the factor will determine a tile of the tessellation. \item a window (object of class \code{"owin"}). The result will be a tessellation consisting of a single tile. \item a set of quadrat counts (object of class \code{"quadratcount"}) returned by the command \code{\link{quadratcount}}. The quadrats used to generate the counts will be extracted and returned as a tessellation. \item a quadrat test (object of class \code{"quadrattest"}) returned by the command \code{\link{quadrat.test}}. The quadrats used to perform the test will be extracted and returned as a tessellation. \item a list of windows (objects of class \code{"owin"}) giving the tiles of the tessellation. } The function \code{as.tess} is generic, with methods for various classes, as listed above. } \seealso{ \code{\link{tess}} } \examples{ # pixel image v <- as.im(function(x,y){factor(round(5 * (x^2 + y^2)))}, W=owin()) levels(v) <- letters[seq(length(levels(v)))] as.tess(v) # quadrat counts data(nztrees) qNZ <- quadratcount(nztrees, nx=4, ny=3) as.tess(qNZ) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/Kmulti.Rd0000644000176200001440000001700513333543262014433 0ustar liggesusers\name{Kmulti} \alias{Kmulti} \title{ Marked K-Function } \description{ For a marked point pattern, estimate the multitype \eqn{K} function which counts the expected number of points of subset \eqn{J} within a given distance from a typical point in subset \code{I}. } \usage{ Kmulti(X, I, J, r=NULL, breaks=NULL, correction, \dots, ratio=FALSE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype \eqn{K} function \eqn{K_{IJ}(r)}{KIJ(r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset index specifying the points of \code{X} from which distances are measured. See Details. } \item{J}{Subset index specifying the points in \code{X} to which distances are measured. See Details. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the multitype \eqn{K} function \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{\dots}{Ignored.} \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{IJ}(r)}{KIJ(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{IJ}(r)}{KIJ(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{IJ}(r)}{KIJ(r)} obtained by the edge corrections named. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K(r)}. } \details{ The function \code{Kmulti} generalises \code{\link{Kest}} (for unmarked point patterns) and \code{\link{Kdot}} and \code{\link{Kcross}} (for multitype point patterns) to arbitrary marked point patterns. Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are subsets, possibly overlapping, of a marked point process. The multitype \eqn{K} function is defined so that \eqn{\lambda_J K_{IJ}(r)}{lambda[J] KIJ(r)} equals the expected number of additional random points of \eqn{X_J}{X[J]} within a distance \eqn{r} of a typical point of \eqn{X_I}{X[I]}. Here \eqn{\lambda_J}{lambda[J]} is the intensity of \eqn{X_J}{X[J]} i.e. the expected number of points of \eqn{X_J}{X[J]} per unit area. The function \eqn{K_{IJ}}{KIJ} is determined by the second order moment properties of \eqn{X}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. The arguments \code{I} and \code{J} specify two subsets of the point pattern. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}. The edge corrections implemented here are \describe{ \item{border}{the border method or ``reduced sample'' estimator (see Ripley, 1988). This is the least efficient (statistically) and the fastest to compute. It can be computed for a window of arbitrary shape. } \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is currently implemented only for rectangular and polygonal windows. } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries. } } The pair correlation function \code{\link{pcf}} can also be applied to the result of \code{Kmulti}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The function \eqn{K_{IJ}}{KIJ} is not necessarily differentiable. The border correction (reduced sample) estimator of \eqn{K_{IJ}}{KIJ} used here is pointwise approximately unbiased, but need not be a nondecreasing function of \eqn{r}, while the true \eqn{K_{IJ}}{KIJ} must be nondecreasing. } \seealso{ \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kest}}, \code{\link{pcf}} } \examples{ # Longleaf Pine data: marks represent diameter trees <- longleaf \testonly{ trees <- trees[seq(1,npoints(trees), by=50), ] } K <- Kmulti(trees, marks(trees) <= 15, marks(trees) >= 25) plot(K) # functions determining subsets f1 <- function(X) { marks(X) <= 15 } f2 <- function(X) { marks(X) >= 15 } K <- Kmulti(trees, f1, f2) \testonly{ rm(trees) } } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/dg.progress.Rd0000644000176200001440000001472513333543263015432 0ustar liggesusers\name{dg.progress} \alias{dg.progress} \title{ Progress Plot of Dao-Genton Test of Spatial Pattern } \description{ Generates a progress plot (envelope representation) of the Dao-Genton test for a spatial point pattern. } \usage{ dg.progress(X, fun = Lest, \dots, exponent = 2, nsim = 19, nsimsub = nsim - 1, nrank = 1, alpha, leaveout=1, interpolate = FALSE, rmin=0, savefuns = FALSE, savepatterns = FALSE, verbose=TRUE) } \arguments{ \item{X}{ Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class) or an envelope object (class \code{"envelope"}). } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{\dots}{ Arguments passed to \code{\link{envelope}}. Useful arguments include \code{alternative} to specify one-sided or two-sided envelopes. } \item{exponent}{ Positive number. The exponent of the \eqn{L^p} distance. See Details. } \item{nsim}{ Number of repetitions of the basic test. } \item{nsimsub}{ Number of simulations in each basic test. There will be \code{nsim} repetitions of the basic test, each involving \code{nsimsub} simulated realisations, so there will be a total of \code{nsim * (nsimsub + 1)} simulations. } \item{nrank}{ Integer. The rank of the critical value of the Monte Carlo test, amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will become the critical values for the test. } \item{alpha}{ Optional. The significance level of the test. Equivalent to \code{nrank/(nsim+1)} where \code{nsim} is the number of simulations. } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating how to compute the critical value. If \code{interpolate=FALSE} (the default), a standard Monte Carlo test is performed, and the critical value is the largest simulated value of the test statistic (if \code{nrank=1}) or the \code{nrank}-th largest (if \code{nrank} is another number). If \code{interpolate=TRUE}, kernel density estimation is applied to the simulated values, and the critical value is the upper \code{alpha} quantile of this estimated distribution. } \item{rmin}{ Optional. Left endpoint for the interval of \eqn{r} values on which the test statistic is calculated. } \item{savefuns}{ Logical value indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical value indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ The Dao and Genton (2014) test for a spatial point pattern is described in \code{\link{dg.test}}. This test depends on the choice of an interval of distance values (the argument \code{rinterval}). A \emph{progress plot} or \emph{envelope representation} of the test (Baddeley et al, 2014) is a plot of the test statistic (and the corresponding critical value) against the length of the interval \code{rinterval}. The command \code{dg.progress} effectively performs \code{\link{dg.test}} on \code{X} using all possible intervals of the form \eqn{[0,R]}, and returns the resulting values of the test statistic, and the corresponding critical values of the test, as a function of \eqn{R}. The result is an object of class \code{"fv"} that can be plotted to obtain the progress plot. The display shows the test statistic (solid black line) and the test acceptance region (grey shading). If \code{X} is an envelope object, then some of the data stored in \code{X} may be re-used: \itemize{ \item If \code{X} is an envelope object containing simulated functions, and \code{fun=NULL}, then the code will re-use the simulated functions stored in \code{X}. \item If \code{X} is an envelope object containing simulated point patterns, then \code{fun} will be applied to the stored point patterns to obtain the simulated functions. If \code{fun} is not specified, it defaults to \code{\link{Lest}}. \item Otherwise, new simulations will be performed, and \code{fun} defaults to \code{\link{Lest}}. } If the argument \code{rmin} is given, it specifies the left endpoint of the interval defining the test statistic: the tests are performed using intervals \eqn{[r_{\mbox{\scriptsize min}},R]}{[rmin,R]} where \eqn{R \ge r_{\mbox{\scriptsize min}}}{R \ge rmin}. The argument \code{leaveout} specifies how to calculate the discrepancy between the summary function for the data and the nominal reference value, when the reference value must be estimated by simulation. The values \code{leaveout=0} and \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014, Appendix) to computing the difference \code{observed - reference} where the \code{reference} is the mean of simulated values. The value \code{leaveout=2} gives the leave-two-out discrepancy proposed by Dao and Genton (2014). } \value{ An object of class \code{"fv"} that can be plotted to obtain the progress plot. } \references{ Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T., Milne, R. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2015) Pushing the envelope: extensions of graphical Monte Carlo tests. Submitted for publication. Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. } \author{ \adrian, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \adrian \rolf and \ege } \seealso{ \code{\link{dg.test}}, \code{\link{dclf.progress}} } \examples{ ns <- if(interactive()) 19 else 5 plot(dg.progress(cells, nsim=ns)) } \keyword{spatial} \keyword{htest} spatstat/man/BadGey.Rd0000644000176200001440000001141413547301023014311 0ustar liggesusers\name{BadGey} \alias{BadGey} \title{Hybrid Geyer Point Process Model} \description{ Creates an instance of the Baddeley-Geyer point process model, defined as a hybrid of several Geyer interactions. The model can then be fitted to point pattern data. } \usage{ BadGey(r, sat) } \arguments{ \item{r}{vector of interaction radii} \item{sat}{ vector of saturation parameters, or a single common value of saturation parameter } } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ This is Baddeley's generalisation of the Geyer saturation point process model, described in \code{\link{Geyer}}, to a process with multiple interaction distances. The BadGey point process with interaction radii \eqn{r_1,\ldots,r_k}{r[1], \ldots, r[k]}, saturation thresholds \eqn{s_1,\ldots,s_k}{s[1],\ldots,s[k]}, intensity parameter \eqn{\beta}{\beta} and interaction parameters \eqn{\gamma_1,\ldots,gamma_k}{\gamma[1], \ldots, \gamma[k]}, is the point process in which each point \eqn{x_i}{x[i]} in the pattern \eqn{X} contributes a factor \deqn{ \beta \gamma_1^{v_1(x_i, X)} \ldots gamma_k^{v_k(x_i,X)} }{ \beta \gamma[1]^v(1, x_i, X) \ldots \gamma[k]^v(k, x_i, X) } to the probability density of the point pattern, where \deqn{ v_j(x_i, X) = \min( s_j, t_j(x_i,X) ) }{ v(j, x[i], X) = min(s[j], t(j, x[i], X)) } where \eqn{t_j(x_i, X)}{t(j,x[i],X)} denotes the number of points in the pattern \eqn{X} which lie within a distance \eqn{r_j}{r[j]} from the point \eqn{x_i}{x[i]}. \code{BadGey} is used to fit this model to data. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the piecewise constant Saturated pairwise interaction is yielded by the function \code{BadGey()}. See the examples below. The argument \code{r} specifies the vector of interaction distances. The entries of \code{r} must be strictly increasing, positive numbers. The argument \code{sat} specifies the vector of saturation parameters that are applied to the point counts \eqn{t_j(x_i, X)}{t(j,x[i],X)}. It should be a vector of the same length as \code{r}, and its entries should be nonnegative numbers. Thus \code{sat[1]} is applied to the count of points within a distance \code{r[1]}, and \code{sat[2]} to the count of points within a distance \code{r[2]}, etc. Alternatively \code{sat} may be a single number, and this saturation value will be applied to every count. Infinite values of the saturation parameters are also permitted; in this case \eqn{v_j(x_i,X) = t_j(x_i,X)}{v(j, x[i], X) = t(j, x[i], X)} and there is effectively no `saturation' for the distance range in question. If all the saturation parameters are set to \code{Inf} then the model is effectively a pairwise interaction process, equivalent to \code{\link{PairPiece}} (however the interaction parameters \eqn{\gamma}{\gamma} obtained from \code{\link{BadGey}} have a complicated relationship to the interaction parameters \eqn{\gamma}{\gamma} obtained from \code{\link{PairPiece}}). If \code{r} is a single number, this model is virtually equivalent to the Geyer process, see \code{\link{Geyer}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairsat.family}}, \code{\link{Geyer}}, \code{\link{PairPiece}}, \code{\link{SatPiece}}, \code{\link{Hybrid}} } \section{Hybrids}{ A \sQuote{hybrid} interaction is one which is built by combining several different interactions (Baddeley et al, 2013). The \code{BadGey} interaction can be described as a hybrid of several \code{\link{Geyer}} interactions. The \code{\link{Hybrid}} command can be used to build hybrids of any interactions. If the \code{\link{Hybrid}} operator is applied to several \code{\link{Geyer}} models, the result is equivalent to a \code{BadGey} model. This can be useful for incremental model selection. } \references{ Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013) Hybrids of Gibbs point process models and their implementation. \emph{Journal of Statistical Software} \bold{55}:11, 1--43. \url{http://www.jstatsoft.org/v55/i11/} } \examples{ BadGey(c(0.1,0.2), c(1,1)) # prints a sensible description of itself BadGey(c(0.1,0.2), 1) # fit a stationary Baddeley-Geyer model ppm(cells ~1, BadGey(c(0.07, 0.1, 0.13), 2)) # nonstationary process with log-cubic polynomial trend \dontrun{ ppm(cells ~polynom(x,y,3), BadGey(c(0.07, 0.1, 0.13), 2)) } } \author{ \adrian and \rolf in collaboration with Hao Wang and Jeff Picka } \keyword{spatial} \keyword{models} spatstat/man/connected.ppp.Rd0000644000176200001440000000401513333543263015724 0ustar liggesusers\name{connected.ppp} \Rdversion{1.1} \alias{connected.ppp} \alias{connected.pp3} \title{ Connected Components of a Point Pattern } \description{ Finds the topologically-connected components of a point pattern, when all pairs of points closer than a threshold distance are joined. } \usage{ \method{connected}{ppp}(X, R, \dots) \method{connected}{pp3}(X, R, \dots) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"} or \code{"pp3"}). } \item{R}{ Threshold distance. Pairs of points closer than \code{R} units apart will be joined together. } \item{\dots}{ Other arguments, not recognised by these methods. } } \details{ This function can be used to identify clumps of points in a point pattern. The function \code{connected} is generic. This file documents the methods for point patterns in dimension two or three (objects of class \code{"ppp"} or \code{"pp3"}). The point pattern \code{X} is first converted into an abstract graph by joining every pair of points that lie closer than \code{R} units apart. Then the connected components of this graph are identified. Two points in \code{X} belong to the same connected component if they can be reached by a series of steps between points of \code{X}, each step being shorter than \code{R} units in length. The result is a vector of labels for the points of \code{X} where all the points in a connected component have the same label. } \value{ A point pattern, equivalent to \code{X} except that the points have factor-valued marks, with levels corresponding to the connected components. } \seealso{ \code{\link{connected.im}}, \code{\link{im.object}}, \code{\link{tess}} } \examples{ Y <- connected(redwoodfull, 0.1) if(interactive()) { plot(Y, cols=1:length(levels(marks(Y))), main="connected(redwoodfull, 0.1)") } X <- osteo$pts[[1]] Z <- connected(X, 32) if(interactive()) { plot(Z, col=marks(Z), main="") } } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat/man/diameter.boxx.Rd0000644000176200001440000000346113333543263015741 0ustar liggesusers\name{diameter.boxx} \Rdversion{1.1} \alias{diameter.boxx} \alias{volume.boxx} \alias{shortside.boxx} \alias{sidelengths.boxx} \alias{eroded.volumes.boxx} \title{ Geometrical Calculations for Multi-Dimensional Box } \description{ Calculates the volume, diameter, shortest side, side lengths, or eroded volume of a multi-dimensional box. } \usage{ \method{diameter}{boxx}(x) \method{volume}{boxx}(x) \method{shortside}{boxx}(x) \method{sidelengths}{boxx}(x) \method{eroded.volumes}{boxx}(x, r) } \arguments{ \item{x}{ Multi-dimensional box (object of class \code{"boxx"}). } \item{r}{ Numeric value or vector of numeric values for which eroded volumes should be calculated. } } \details{ \code{diameter.boxx}, \code{volume.boxx} and \code{shortside.boxx} compute the diameter, volume and shortest side length of the box. \code{sidelengths.boxx} returns the lengths of each side of the box. \code{eroded.volumes.boxx} computes, for each entry \code{r[i]}, the volume of the smaller box obtained by removing a slab of thickness \code{r[i]} from each face of the box. This smaller box is the subset consisting of points that lie at least \code{r[i]} units away from the boundary of the box. } \value{ For \code{diameter.boxx}, \code{shortside.boxx} and \code{volume.boxx}, a single numeric value. For \code{sidelengths.boxx}, a numeric vector of length equal to the number of spatial dimensions. For \code{eroded.volumes.boxx}, a numeric vector of the same length as \code{r}. } \author{\adrian and \rolf } \seealso{ \code{\link{boxx}} } \examples{ X <- boxx(c(0,10),c(0,10),c(0,5),c(0,2)) diameter(X) volume(X) shortside(X) sidelengths(X) hd <- shortside(X)/2 eroded.volumes(X, seq(0,hd, length=10)) } \keyword{spatial} \keyword{math} spatstat/man/miplot.Rd0000644000176200001440000000405513333543263014474 0ustar liggesusers\name{miplot} \alias{miplot} \title{Morisita Index Plot} \description{ Displays the Morisita Index Plot of a spatial point pattern. } \usage{ miplot(X, ...) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) or something acceptable to \code{\link{as.ppp}}. } \item{\dots}{Optional arguments to control the appearance of the plot.} } \details{ Morisita (1959) defined an index of spatial aggregation for a spatial point pattern based on quadrat counts. The spatial domain of the point pattern is first divided into \eqn{Q} subsets (quadrats) of equal size and shape. The numbers of points falling in each quadrat are counted. Then the Morisita Index is computed as \deqn{ \mbox{MI} = Q \frac{\sum_{i=1}^Q n_i (n_i - 1)}{N(N-1)} }{ MI = Q * sum(n[i] (n[i]-1))/(N(N-1)) } where \eqn{n_i}{n[i]} is the number of points falling in the \eqn{i}-th quadrat, and \eqn{N} is the total number of points. If the pattern is completely random, \code{MI} should be approximately equal to 1. Values of \code{MI} greater than 1 suggest clustering. The \emph{Morisita Index plot} is a plot of the Morisita Index \code{MI} against the linear dimension of the quadrats. The point pattern dataset is divided into \eqn{2 \times 2}{2 * 2} quadrats, then \eqn{3 \times 3}{3 * 3} quadrats, etc, and the Morisita Index is computed each time. This plot is an attempt to discern different scales of dependence in the point pattern data. } \value{ None. } \references{ M. Morisita (1959) Measuring of the dispersion of individuals and analysis of the distributional patterns. Memoir of the Faculty of Science, Kyushu University, Series E: Biology. \bold{2}: 215--235. } \seealso{ \code{\link{quadratcount}} } \examples{ data(longleaf) miplot(longleaf) opa <- par(mfrow=c(2,3)) data(cells) data(japanesepines) data(redwood) plot(cells) plot(japanesepines) plot(redwood) miplot(cells) miplot(japanesepines) miplot(redwood) par(opa) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/methods.layered.Rd0000644000176200001440000000411213333543263016251 0ustar liggesusers\name{methods.layered} \Rdversion{1.1} \alias{methods.layered} %DoNotExport \alias{shift.layered} \alias{reflect.layered} \alias{flipxy.layered} \alias{rotate.layered} \alias{affine.layered} \alias{rescale.layered} \alias{scalardilate.layered} \title{ Methods for Layered Objects } \description{ Methods for geometrical transformations of layered objects (class \code{"layered"}). } \usage{ \method{shift}{layered}(X, vec=c(0,0), ...) \method{rotate}{layered}(X, ..., centre=NULL) \method{affine}{layered}(X, ...) \method{reflect}{layered}(X) \method{flipxy}{layered}(X) \method{rescale}{layered}(X, s, unitname) \method{scalardilate}{layered}(X, ...) } \arguments{ \item{X}{ Object of class \code{"layered"}. } \item{\dots}{ Arguments passed to the relevant methods when applying the operation to each layer of \code{X}. } \item{s}{ Rescaling factor passed to the relevant method for \code{\link{rescale}}. May be missing. } \item{vec}{ Shift vector (numeric vector of length 2). } \item{centre}{ Centre of rotation. Either a vector of length 2, or a character string (partially matched to \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}). The default is the coordinate origin \code{c(0,0)}. } \item{unitname}{ Optional. New name for the unit of length. A value acceptable to the function \code{\link{unitname<-}} } } \details{ These are methods for the generic functions \code{\link{shift}}, \code{\link{rotate}}, \code{\link{reflect}}, \code{\link{affine}}, \code{\link{rescale}}, \code{\link{scalardilate}} and \code{\link{flipxy}} for the class of layered objects. A layered object represents data that should be plotted in successive layers, for example, a background and a foreground. See \code{\link{layered}}. } \value{ Another object of class \code{"layered"}. } \author{\adrian and \rolf } \seealso{ \code{\link{layered}} } \examples{ L <- layered(letterR, runifpoint(20, letterR)) plot(L) plot(rotate(L, pi/4)) } \keyword{spatial} \keyword{methods} spatstat/man/model.matrix.slrm.Rd0000644000176200001440000000325113333543263016544 0ustar liggesusers\name{model.matrix.slrm} \alias{model.matrix.slrm} \title{Extract Design Matrix from Spatial Logistic Regression Model} \description{ This function extracts the design matrix of a spatial logistic regression model. } \usage{ \method{model.matrix}{slrm}(object, ..., keepNA=TRUE) } \arguments{ \item{object}{ A fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Other arguments (such as \code{na.action}) passed to \code{\link{model.matrix.lm}}. } \item{keepNA}{ Logical. Determines whether rows containing \code{NA} values will be deleted or retained. } } \details{ This command is a method for the generic function \code{\link{model.matrix}}. It extracts the design matrix of a spatial logistic regression. The \code{object} must be a fitted spatial logistic regression (object of class \code{"slrm"}). Such objects are produced by the model-fitting function \code{\link{slrm}}. Usually the result is a matrix with one column for every constructed covariate in the model, and one row for every pixel in the grid used to fit the model. If \code{object} was fitted using split pixels (by calling \code{\link{slrm}} using the argument \code{splitby}) then the matrix has one row for every pixel or half-pixel. } \value{ A matrix. Columns of the matrix are canonical covariates in the model. } \author{\adrian and \rolf } \seealso{ \code{\link{model.matrix}}, \code{\link{model.images}}, \code{\link{slrm}}. } \examples{ fit <- slrm(japanesepines ~x) head(model.matrix(fit)) # matrix with two columns: '(Intercept)' and 'x' } \keyword{spatial} \keyword{models} spatstat/man/tweak.colourmap.Rd0000644000176200001440000000277213333543264016310 0ustar liggesusers\name{tweak.colourmap} \alias{tweak.colourmap} \title{ Change Colour Values in a Colour Map } \description{ Assign new colour values to some of the entries in a colour map. } \usage{ tweak.colourmap(m, col, ..., inputs=NULL, range=NULL) } \arguments{ \item{m}{ A colour map (object of class \code{"colourmap"}). } \item{inputs}{ Input values to the colour map, to be assigned new colours. Incompatible with \code{range}. } \item{range}{ Numeric vector of length 2 specifying a range of numerical values which should be assigned a new colour. Incompatible with \code{inputs}. } \item{col}{ Replacement colours for the specified \code{inputs} or the specified \code{range} of values. } \item{\dots}{Other arguments are ignored.} } \details{ This function changes the colour map \code{m} by assigning new colours to each of the input values specified by \code{inputs}, or by assigning a single new colour to the range of input values specified by \code{range}. The modified colour map is returned. } \value{ Another colour map (object of class \code{"colourmap"}). } \seealso{ \code{\link{colourmap}}, \code{\link{interp.colourmap}}, \code{\link{colouroutputs}}, \code{\link[spatstat:colourtools]{colourtools}}. } \examples{ co <- colourmap(rainbow(32), range=c(0,1)) plot(tweak.colourmap(co, inputs=c(0.5, 0.6), "white")) plot(tweak.colourmap(co, range=c(0.5,0.6), "white")) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{color} spatstat/man/rshift.ppp.Rd0000644000176200001440000001606113333543264015266 0ustar liggesusers\name{rshift.ppp} \alias{rshift.ppp} \title{Randomly Shift a Point Pattern} \description{ Randomly shifts the points of a point pattern. } \usage{ \method{rshift}{ppp}(X, \dots, which=NULL, group) } \arguments{ \item{X}{Point pattern to be subjected to a random shift. An object of class \code{"ppp"} } \item{\dots}{ Arguments that determine the random shift. See Details. } \item{group}{ Optional. Factor specifying a grouping of the points of \code{X}, or \code{NULL} indicating that all points belong to the same group. Each group will be shifted together, and separately from other groups. By default, points in a marked point pattern are grouped according to their mark values, while points in an unmarked point pattern are treated as a single group. } \item{which}{ Optional. Identifies which groups of the pattern will be shifted, while other groups are not shifted. A vector of levels of \code{group}. } } \value{ A point pattern (object of class \code{"ppp"}). } \details{ This operation randomly shifts the locations of the points in a point pattern. The function \code{rshift} is generic. This function \code{rshift.ppp} is the method for point patterns. The most common use of this function is to shift the points in a multitype point pattern. By default, points of the same type are shifted in parallel (i.e. points of a common type are shifted by a common displacement vector), and independently of other types. This is useful for testing the hypothesis of independence of types (the null hypothesis that the sub-patterns of points of each type are independent point processes). In general the points of \code{X} are divided into groups, then the points within a group are shifted by a common random displacement vector. Different groups of points are shifted independently. The grouping is determined as follows: \itemize{ \item If the argument \code{group} is present, then this determines the grouping. \item Otherwise, if \code{X} is a multitype point pattern, the marks determine the grouping. \item Otherwise, all points belong to a single group. } The argument \code{group} should be a factor, of length equal to the number of points in \code{X}. Alternatively \code{group} may be \code{NULL}, which specifies that all points of \code{X} belong to a single group. By default, every group of points will be shifted. The argument \code{which} indicates that only some of the groups should be shifted, while other groups should be left unchanged. \code{which} must be a vector of levels of \code{group} (for example, a vector of types in a multitype pattern) indicating which groups are to be shifted. The displacement vector, i.e. the vector by which the data points are shifted, is generated at random. Parameters that control the randomisation and the handling of edge effects are passed through the \code{\dots} argument. They are \describe{ \item{radius,width,height}{ Parameters of the random shift vector. } \item{edge}{ String indicating how to deal with edges of the pattern. Options are \code{"torus"}, \code{"erode"} and \code{"none"}. } \item{clip}{ Optional. Window to which the final point pattern should be clipped. } } If the window is a rectangle, the \emph{default} behaviour is to generate a displacement vector at random with equal probability for all possible displacements. This means that the \eqn{x} and \eqn{y} coordinates of the displacement vector are independent random variables, uniformly distributed over the range of possible coordinates. Alternatively, the displacement vector can be generated by another random mechanism, controlled by the arguments \code{radius}, \code{width} and \code{height}. \describe{ \item{rectangular:}{ if \code{width} and \code{height} are given, then the displacement vector is uniformly distributed in a rectangle of these dimensions, centred at the origin. The maximum possible displacement in the \eqn{x} direction is \code{width/2}. The maximum possible displacement in the \eqn{y} direction is \code{height/2}. The \eqn{x} and \eqn{y} displacements are independent. (If \code{width} and \code{height} are actually equal to the dimensions of the observation window, then this is equivalent to the default.) } \item{radial:}{ if \code{radius} is given, then the displacement vector is generated by choosing a random point inside a disc of the given radius, centred at the origin, with uniform probability density over the disc. Thus the argument \code{radius} determines the maximum possible displacement distance. The argument \code{radius} is incompatible with the arguments \code{width} and \code{height}. } } The argument \code{edge} controls what happens when a shifted point lies outside the window of \code{X}. Options are: \describe{ \item{"none":}{ Points shifted outside the window of \code{X} simply disappear. } \item{"torus":}{ Toroidal or periodic boundary. Treat opposite edges of the window as identical, so that a point which disappears off the right-hand edge will re-appear at the left-hand edge. This is called a ``toroidal shift'' because it makes the rectangle topologically equivalent to the surface of a torus (doughnut). The window must be a rectangle. Toroidal shifts are undefined if the window is non-rectangular. } \item{"erode":}{ Clip the point pattern to a smaller window. If the random displacements are generated by a radial mechanism (see above), then the window of \code{X} is eroded by a distance equal to the value of the argument \code{radius}, using \code{\link{erosion}}. If the random displacements are generated by a rectangular mechanism, then the window of \code{X} is (if it is not rectangular) eroded by a distance \code{max(height,width)} using \code{\link{erosion}}; or (if it is rectangular) trimmed by a margin of width \code{width} at the left and right sides and trimmed by a margin of height \code{height} at the top and bottom. The rationale for this is that the clipping window is the largest window for which edge effects can be ignored. } } The optional argument \code{clip} specifies a smaller window to which the pattern should be restricted. } \seealso{ \code{\link{rshift}}, \code{\link{rshift.psp}} } \examples{ data(amacrine) # random toroidal shift # shift "on" and "off" points separately X <- rshift(amacrine) # shift "on" points and leave "off" points fixed X <- rshift(amacrine, which="on") # shift all points simultaneously X <- rshift(amacrine, group=NULL) # maximum displacement distance 0.1 units X <- rshift(amacrine, radius=0.1) # shift with erosion X <- rshift(amacrine, radius=0.1, edge="erode") } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/OrdThresh.Rd0000644000176200001440000000336313333543262015072 0ustar liggesusers\name{OrdThresh} \alias{OrdThresh} \title{Ord's Interaction model} \description{ Creates an instance of Ord's point process model which can then be fitted to point pattern data. } \usage{ OrdThresh(r) } \arguments{ \item{r}{Positive number giving the threshold value for Ord's model.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ Ord's point process model (Ord, 1977) is a Gibbs point process of infinite order. Each point \eqn{x_i}{x[i]} in the point pattern \eqn{x} contributes a factor \eqn{g(a_i)}{g(a[i])} where \eqn{a_i = a(x_i, x)}{a[i] = a(x[i], x)} is the area of the tile associated with \eqn{x_i}{x[i]} in the Dirichlet tessellation of \eqn{x}. The function \eqn{g} is simply \eqn{g(a) = 1} if \eqn{a \ge r}{a >= r} and \eqn{g(a) = \gamma < 1}{g(a) = gamma < 1} if \eqn{a < r}{a < r}, where \eqn{r} is called the threshold value. This function creates an instance of Ord's model with a given value of \eqn{r}. It can then be fitted to point process data using \code{\link{ppm}}. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ord, J.K. (1977) Contribution to the discussion of Ripley (1977). Ord, J.K. (1978) How many trees in a forest? \emph{Mathematical Scientist} \bold{3}, 23--33. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/boundingcircle.Rd0000644000176200001440000000423413333543262016155 0ustar liggesusers\name{boundingcircle} \alias{boundingradius} \alias{boundingradius.owin} \alias{boundingradius.ppp} \alias{boundingcentre} \alias{boundingcircle} \alias{boundingcentre.owin} \alias{boundingcircle.owin} \alias{boundingcentre.ppp} \alias{boundingcircle.ppp} \title{ Smallest Enclosing Circle } \description{ Find the smallest circle enclosing a spatial window or other object. Return its radius, or the location of its centre, or the circle itself. } \usage{ boundingradius(x, \dots) boundingcentre(x, \dots) boundingcircle(x, \dots) \method{boundingradius}{owin}(x, \dots) \method{boundingcentre}{owin}(x, \dots) \method{boundingcircle}{owin}(x, \dots) \method{boundingradius}{ppp}(x, \dots) \method{boundingcentre}{ppp}(x, \dots) \method{boundingcircle}{ppp}(x, \dots) } \arguments{ \item{x}{ A window (object of class \code{"owin"}), or another spatial object. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution for the calculation. } } \details{ The \code{boundingcircle} of a spatial region \eqn{W} is the smallest circle that contains \eqn{W}. The \code{boundingradius} is the radius of this circle, and the \code{boundingcentre} is the centre of the circle. The functions \code{boundingcircle}, \code{boundingcentre} and \code{boundingradius} are generic. There are methods for objects of class \code{"owin"}, \code{"ppp"} and \code{"linnet"}. } \value{ The result of \code{boundingradius} is a single numeric value. The result of \code{boundingcentre} is a point pattern containing a single point. The result of \code{boundingcircle} is a window representing the boundingcircle. } \author{ \adrian } \seealso{ \code{\link{boundingradius.linnet}} } \examples{ boundingradius(letterR) plot(grow.rectangle(Frame(letterR), 0.2), main="", type="n") plot(letterR, add=TRUE, col="grey") plot(boundingcircle(letterR), add=TRUE, border="green", lwd=2) plot(boundingcentre(letterR), pch="+", cex=2, col="blue", add=TRUE) X <- runifpoint(5) plot(X) plot(boundingcircle(X), add=TRUE) plot(boundingcentre(X), pch="+", cex=2, col="blue", add=TRUE) } \keyword{spatial} \keyword{math} spatstat/man/stienen.Rd0000644000176200001440000000407513333543264014640 0ustar liggesusers\name{stienen} \alias{stienen} \alias{stienenSet} \title{ Stienen Diagram } \description{ Draw the Stienen diagram of a point pattern, or compute the region covered by the Stienen diagram. } \usage{ stienen(X, \dots, bg = "grey", border = list(bg = NULL)) stienenSet(X, edge=TRUE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{\dots}{ Arguments passed to \code{\link{plot.ppp}} to control the plot. } \item{bg}{ Fill colour for circles. } \item{border}{ Either a list of arguments passed to \code{\link{plot.ppp}} to control the display of circles at the border of the diagram, or the value \code{FALSE} indicating that the border circles should not be plotted. } \item{edge}{ Logical value indicating whether to include the circles at the border of the diagram. } } \details{ The Stienen diagram of a point pattern (Stienen, 1982) is formed by drawing a circle around each point of the pattern, with diameter equal to the nearest-neighbour distance for that point. These circles do not overlap. If two points are nearest neighbours of each other, then the corresponding circles touch. \code{stienenSet(X)} computes the union of these circles and returns it as a window (object of class \code{"owin"}). \code{stienen(X)} generates a plot of the Stienen diagram of the point pattern \code{X}. By default, circles are shaded in grey if they lie inside the window of \code{X}, and are not shaded otherwise. } \value{ The plotting function \code{stienen} returns \code{NULL}. The return value of \code{stienenSet} is a window (object of class \code{"owin"}). } \references{ Stienen, H. (1982) \emph{Die Vergroeberung von Karbiden in reinen Eisen-Kohlenstoff Staehlen}. Dissertation, RWTH Aachen. } \seealso{ \code{\link{nndist}}, \code{\link{plot.ppp}} } \examples{ Y <- stienenSet(cells) stienen(redwood) stienen(redwood, border=list(bg=NULL, lwd=2, cols="red")) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{math} \keyword{manip} spatstat/man/DiggleGatesStibbard.Rd0000644000176200001440000000504413333543262017020 0ustar liggesusers\name{DiggleGatesStibbard} \alias{DiggleGatesStibbard} \title{Diggle-Gates-Stibbard Point Process Model} \description{ Creates an instance of the Diggle-Gates-Stibbard point process model which can then be fitted to point pattern data. } \usage{ DiggleGatesStibbard(rho) } \arguments{ \item{rho}{Interaction range} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Diggle-Gates-Stibbard process with interaction range \code{rho}. } \details{ Diggle, Gates and Stibbard (1987) proposed a pairwise interaction point process in which each pair of points separated by a distance \eqn{d} contributes a factor \eqn{e(d)} to the probability density, where \deqn{ e(d) = \sin^2\left(\frac{\pi d}{2\rho}\right) }{ e(d) = sin^2((\pi * d)/(2 * \rho)) } for \eqn{d < \rho}{d < \rho}, and \eqn{e(d)} is equal to 1 for \eqn{d \ge \rho}{d \ge \rho}. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Diggle-Gates-Stibbard pairwise interaction is yielded by the function \code{DiggleGatesStibbard()}. See the examples below. Note that this model does not have any regular parameters (as explained in the section on Interaction Parameters in the help file for \code{\link{ppm}}). The parameter \eqn{\rho} is not estimated by \code{\link{ppm}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{DiggleGratton}}, \code{\link{rDGS}}, \code{\link{ppm.object}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. } \examples{ DiggleGatesStibbard(0.02) # prints a sensible description of itself \dontrun{ ppm(cells ~1, DiggleGatesStibbard(0.05)) # fit the stationary D-G-S process to `cells' } ppm(cells ~ polynom(x,y,3), DiggleGatesStibbard(0.05)) # fit a nonstationary D-G-S process # with log-cubic polynomial trend } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat/man/colourtools.Rd0000644000176200001440000001506513333543263015557 0ustar liggesusers\name{colourtools} \alias{colourtools} %DoNotExport \alias{paletteindex} \alias{rgb2hex} \alias{rgb2hsva} \alias{col2hex} \alias{paletteindex} \alias{samecolour} \alias{complementarycolour} \alias{interp.colours} \alias{is.colour} \alias{is.grey} \alias{to.grey} \alias{to.opaque} \alias{to.transparent} \alias{to.saturated} \title{ Convert and Compare Colours in Different Formats } \description{ These functions convert between different formats for specifying a colour in \R, determine whether colours are equivalent, and convert colour to greyscale. } \usage{ col2hex(x) rgb2hex(v, maxColorValue=255) rgb2hsva(red, green=NULL, blue=NULL, alpha=NULL, maxColorValue=255) paletteindex(x) samecolour(x,y) complementarycolour(x) interp.colours(x, length.out=512) is.colour(x) to.grey(x, weights=c(0.299, 0.587, 0.114), transparent=FALSE) is.grey(x) to.opaque(x) to.transparent(x, fraction) to.saturated(x, s=1) } \arguments{ \item{x,y}{ Any valid specification for a colour or sequence of colours accepted by \code{\link[grDevices]{col2rgb}}. } \item{v}{ A numeric vector of length 3, giving the RGB values of a single colour, or a 3-column matrix giving the RGB values of several colours. Alternatively a vector of length 4 or a matrix with 4 columns, giving the RGB and alpha (transparency) values. } \item{red,green,blue,alpha}{ Arguments acceptable to \code{\link[grDevices]{rgb}} determining the red, green, blue channels and optionally the alpha (transparency) channel. Note that \code{red} can also be a matrix with 3 \bold{rows} giving the RGB values, or a matrix with 4 rows giving RGB and alpha values. } \item{maxColorValue}{ Number giving the maximum possible value for the entries in \code{v} or \code{red,green,blue,alpha}. } \item{weights}{ Numeric vector of length 3 giving relative weights for the red, green, and blue channels respectively. } \item{transparent}{ Logical value indicating whether transparent colours should be converted to transparent grey values (\code{transparent=TRUE}) or converted to opaque grey values (\code{transparent=FALSE}, the default). } \item{fraction}{ Transparency fraction. Numerical value or vector of values between 0 and 1, giving the opaqueness of a colour. A fully opaque colour has \code{fraction=1}. } \item{length.out}{ Integer. Length of desired sequence. } \item{s}{ Saturation value (between 0 and 1). } } \details{ \code{is.colour(x)} can be applied to any kind of data \code{x} and returns \code{TRUE} if \code{x} can be interpreted as a colour or colours. The remaining functions expect data that can be interpreted as colours. \code{col2hex} converts colours specified in any format into their hexadecimal character codes. \code{rgb2hex} converts RGB colour values into their hexadecimal character codes. It is a very minor extension to \code{\link[grDevices]{rgb}}. Arguments to \code{rgb2hex} should be similar to arguments to \code{\link[grDevices]{rgb}}. \code{rgb2hsva} converts RGB colour values into HSV colour values including the alpha (transparency) channel. It is an extension of \code{\link[grDevices]{rgb2hsv}}. Arguments to \code{rgb2hsva} should be similar to arguments to \code{\link[grDevices]{rgb2hsv}}. \code{paletteindex} checks whether the colour or colours specified by \code{x} are available in the default palette returned by \code{\link[grDevices]{palette}()}. If so, it returns the index or indices of the colours in the palette. If not, it returns \code{NA}. \code{samecolour} decides whether two colours \code{x} and \code{y} are equivalent. \code{is.grey} determines whether each entry of \code{x} is a greyscale colour, and returns a logical vector. \code{to.grey} converts the colour data in \code{x} to greyscale colours. Alternatively \code{x} can be an object of class \code{"colourmap"} and \code{to.grey(x)} is the modified colour map. \code{to.opaque} converts the colours in \code{x} to opaque (non-transparent) colours, and \code{to.transparent} converts them to transparent colours with a specified transparency value. Note that \code{to.transparent(x,1)} is equivalent to \code{to.opaque(x)}. For \code{to.grey}, \code{to.opaque} and \code{to.transparent}, if all the data in \code{x} specifies colours from the standard palette, and if the result would be equivalent to \code{x}, then the result is identical to \code{x}. \code{to.saturated} converts each colour in \code{x} to its fully-saturated equivalent. For example, pink is mapped to red. Shades of grey are converted to black; white is unchanged. \code{complementarycolour} replaces each colour by its complementary colour in RGB space (the colour obtained by replacing RGB values \code{(r, g, b)} by \code{(255-r, 255-g, 255-b)}). The transparency value is not changed. Alternatively \code{x} can be an object of class \code{"colourmap"} and \code{complementarycolour(x)} is the modified colour map. \code{interp.colours} interpolates between each successive pair of colours in a sequence of colours, to generate a more finely-spaced sequence. It uses linear interpolation in HSV space (with hue represented as a two-dimensional unit vector). } \section{Warning}{ \code{paletteindex("green")} returns \code{NA} because the green colour in the default palette is called \code{"green3"}. } \value{ For \code{col2hex} and \code{rgb2hex} a character vector containing hexadecimal colour codes. For \code{to.grey}, \code{to.opaque} and \code{to.transparent}, either a character vector containing hexadecimal colour codes, or a value identical to the input \code{x}. For \code{rgb2hsva}, a matrix with 3 or 4 rows containing HSV colour values. For \code{paletteindex}, an integer vector, possibly containing \code{NA} values. For \code{samecolour} and \code{is.grey}, a logical value or logical vector. } \author{\adrian and \rolf } \seealso{ \code{\link[grDevices]{col2rgb}}, \code{\link[grDevices]{rgb2hsv}}, \code{\link[grDevices]{palette}}. See also the class of colour map objects in the \pkg{spatstat} package: \code{\link{colourmap}}, \code{\link{interp.colourmap}}, \code{\link{tweak.colourmap}}. } \examples{ samecolour("grey", "gray") paletteindex("grey") col2hex("orange") to.grey("orange") to.saturated("orange") complementarycolour("orange") is.grey("lightgrey") is.grey(8) to.transparent("orange", 0.5) to.opaque("red") interp.colours(c("orange", "red", "violet"), 5) } \keyword{color} spatstat/man/project2set.Rd0000644000176200001440000000272013333543264015432 0ustar liggesusers\name{project2set} \alias{project2set} \title{ Find Nearest Point in a Region } \description{ For each data point in a point pattern \code{X}, find the nearest location in a given spatial region \code{W}. } \usage{ project2set(X, W, \dots) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{W}{ Window (object of class \code{"owin"}) or something acceptable to \code{\link{as.owin}}. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} controlling the pixel resolution. } } \details{ The window \code{W} is first discretised as a binary mask using \code{\link{as.mask}}. For each data point \code{X[i]} in the point pattern \code{X}, the algorithm finds the nearest pixel in \code{W}. The result is a point pattern \code{Y} containing these nearest points, that is, \code{Y[i]} is the nearest point in \code{W} to the point \code{X[i]}. } \value{ A point pattern (object of class \code{"ppp"}) with the same number of points as \code{X} in the window \code{W}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{project2segment}}, \code{\link{nncross}} } \examples{ He <- heather$fine[owin(c(2.8, 7.4), c(4.0, 7.8))] plot(He, main="project2set") X <- runifpoint(4, erosion(complement.owin(He), 0.2)) points(X, col="red") Y <- project2set(X, He) points(Y, col="green") arrows(X$x, X$y, Y$x, Y$y, angle=15, length=0.2) } \keyword{spatial} \keyword{math} spatstat/man/cbind.hyperframe.Rd0000644000176200001440000000324613333543263016411 0ustar liggesusers\name{cbind.hyperframe} \alias{cbind.hyperframe} \alias{rbind.hyperframe} \title{ Combine Hyperframes by Rows or by Columns } \description{ Methods for \code{cbind} and \code{rbind} for hyperframes. } \usage{ \method{cbind}{hyperframe}(...) \method{rbind}{hyperframe}(...) } \arguments{ \item{\dots}{ Any number of hyperframes (objects of class \code{\link{hyperframe}}). } } \details{ These are methods for \code{\link{cbind}} and \code{\link{rbind}} for hyperframes. Note that \emph{all} the arguments must be hyperframes (because of the peculiar dispatch rules of \code{\link{cbind}} and \code{\link{rbind}}). To combine a hyperframe with a data frame, one should either convert the data frame to a hyperframe using \code{\link{as.hyperframe}}, or explicitly invoke the function \code{cbind.hyperframe} or \code{rbind.hyperframe}. In other words: if \code{h} is a hyperframe and \code{d} is a data frame, the result of \code{cbind(h,d)} will be the same as \code{cbind(as.data.frame(h), d)}, so that all hypercolumns of \code{h} will be deleted (and a warning will be issued). To combine \code{h} with \code{d} so that all columns of \code{h} are retained, type either \code{cbind(h, as.hyperframe(d))} or \code{cbind.hyperframe(h,d)}. } \value{ Another hyperframe. } \author{\adrian and \rolf } \seealso{ \code{\link{hyperframe}}, \code{\link{as.hyperframe}} } \examples{ lambda <- runif(5, min=10, max=30) X <- lapply(as.list(lambda), function(x) { rpoispp(x) }) h <- hyperframe(lambda=lambda, X=X) g <- hyperframe(id=letters[1:5], Y=rev(X)) gh <- cbind(h, g) hh <- rbind(h, h) } \keyword{spatial} \keyword{manip} spatstat/man/sessionLibs.Rd0000644000176200001440000000153413333543264015465 0ustar liggesusers\name{sessionLibs} \alias{sessionLibs} \title{ Print Names and Version Numbers of Libraries Loaded } \description{ Prints the names and version numbers of libraries currently loaded by the user. } \usage{ sessionLibs() } \details{ This function prints a list of the libraries loaded by the user in the current session, giving just their name and version number. It obtains this information from \code{\link[utils]{sessionInfo}}. This function is not needed in an interactive \R session because the package startup messages will usually provide this information. Its main use is in an \code{\link{Sweave}} script, where it is needed because the package startup messages are not printed. } \value{ Null. } \examples{ sessionLibs() } \author{ \adrian and \rolf. } \seealso{ \code{\link[utils]{sessionInfo}} } \keyword{data} spatstat/man/interp.im.Rd0000644000176200001440000000370713333543263015100 0ustar liggesusers\name{interp.im} \alias{interp.im} \title{Interpolate a Pixel Image} \description{ Interpolates the values of a pixel image at any desired location in the frame. } \usage{ interp.im(Z, x, y=NULL, bilinear=FALSE) } \arguments{ \item{Z}{ Pixel image (object of class \code{"im"}) with numeric or integer values. } \item{x,y}{ Vectors of Cartesian coordinates. Alternatively \code{x} can be a point pattern and \code{y} can be missing. } \item{bilinear}{ Logical value specifying the choice of interpolation rule. If \code{bilinear=TRUE} then a bilinear interpolation rule is used. If \code{bilinear=FALSE} (the default) then a slightly biased rule is used; this rule is consistent with earlier versions of \pkg{spatstat}. } } \details{ A value at each location \code{(x[i],y[i])} will be interpolated using the pixel values of \code{Z} at the four surrounding pixel centres, by simple bilinear interpolation. At the boundary (where \code{(x[i],y[i])} is not surrounded by four pixel centres) the value at the nearest pixel is taken. The arguments \code{x,y} can be anything acceptable to \code{\link[grDevices]{xy.coords}}. } \value{ Vector of interpolated values, with \code{NA} for points that lie outside the domain of the image. } \examples{ opa <- par(mfrow=c(1,2)) # coarse image V <- as.im(function(x,y) { x^2 + y }, owin(), dimyx=10) plot(V, main="coarse image", col=terrain.colors(256)) # lookup value at location (0.5,0.5) V[list(x=0.5,y=0.5)] # interpolated value at location (0.5,0.5) interp.im(V, 0.5, 0.5) interp.im(V, 0.5, 0.5, bilinear=TRUE) # true value is 0.75 # how to obtain an interpolated image at a desired resolution U <- as.im(interp.im, W=owin(), Z=V, dimyx=256) plot(U, main="interpolated image", col=terrain.colors(256)) par(opa) } \author{ \adrian and \rolf, with a contribution from an anonymous user. } \keyword{spatial} \keyword{manip} spatstat/man/Extract.influence.ppm.Rd0000644000176200001440000000340613333543263017343 0ustar liggesusers\name{Extract.influence.ppm} \alias{[.influence.ppm} \title{Extract Subset of Influence Object} \description{ Extract a subset of an influence object, or extract the influence values at specified locations. } \usage{ \method{[}{influence.ppm}(x, i, ...) } \arguments{ \item{x}{ A influence object (of class \code{"influence.ppm"}) computed by \code{\link{influence.ppm}}. } \item{i}{ Subset index (passed to \code{\link{[.ppp}}). Either a spatial window (object of class \code{"owin"}) or an integer index. } \item{\dots}{ Ignored. } } \value{ Another object of class \code{"influence.ppm"}. } \details{ An object of class \code{"influence.ppm"} contains the values of the likelihood influence for a point process model, computed by \code{\link{influence.ppm}}. This is effectively a marked point pattern obtained by marking each of the original data points with its likelihood influence. This function extracts a designated subset of the influence values, either as another influence object, or as a vector of numeric values. The function \code{[.influence.ppm} is a method for \code{\link{[}} for the class \code{"influence.ppm"}. The argument \code{i} should be an index applicable to a point pattern. It may be either a spatial window (object of class \code{"owin"}) or a sequence index. The result will be another influence object (of class \code{influence.ppm}). To extract the influence values as a numeric vector, use \code{marks(as.ppp(x))}. } \seealso{ \code{\link{influence.ppm}}. } \examples{ fit <- ppm(cells, ~x) infl <- influence(fit) b <- owin(c(0.1, 0.3), c(0.2, 0.4)) infl[b] infl[1:5] marks(as.ppp(infl))[1:3] } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/coords.Rd0000644000176200001440000000506113417031500014444 0ustar liggesusers\name{coords} \Rdversion{1.1} \alias{coords} \alias{coords.ppp} \alias{coords.ppx} \alias{coords.quad} \alias{coords<-} \alias{coords<-.ppp} \alias{coords<-.ppx} \title{ Extract or Change Coordinates of a Spatial or Spatiotemporal Point Pattern } \description{ Given any kind of spatial or space-time point pattern, this function extracts the (space and/or time and/or local) coordinates of the points and returns them as a data frame. } \usage{ coords(x, ...) \method{coords}{ppp}(x, ...) \method{coords}{ppx}(x, ..., spatial = TRUE, temporal = TRUE, local=TRUE) coords(x, ...) <- value \method{coords}{ppp}(x, ...) <- value \method{coords}{ppx}(x, ..., spatial = TRUE, temporal = TRUE, local=TRUE) <- value \method{coords}{quad}(x, ...) } \arguments{ \item{x}{ A point pattern: either a two-dimensional point pattern (object of class \code{"ppp"}), a three-dimensional point pattern (object of class \code{"pp3"}), or a general multidimensional space-time point pattern (object of class \code{"ppx"}) or a quadrature scheme (object of class \code{"quad"}). } \item{\dots}{ Further arguments passed to methods. } \item{spatial,temporal,local}{ Logical values indicating whether to extract spatial, temporal and local coordinates, respectively. The default is to return all such coordinates. (Only relevant to \code{ppx} objects). } \item{value}{ New values of the coordinates. A numeric vector with one entry for each point in \code{x}, or a numeric matrix or data frame with one row for each point in \code{x}. } } \details{ The function \code{coords} extracts the coordinates from a point pattern. The function \code{coords<-} replaces the coordinates of the point pattern with new values. Both functions \code{coords} and \code{coords<-} are generic, with methods for the classes \code{"ppp"}) and \code{"ppx"}. An object of class \code{"pp3"} also inherits from \code{"ppx"} and is handled by the method for \code{"ppx"}. } \value{ \code{coords} returns a \code{data.frame} with one row for each point, containing the coordinates. \code{coords<-} returns the altered point pattern. } \author{\adrian and \rolf } \seealso{ \code{\link{ppx}}, \code{\link{pp3}}, \code{\link{ppp}}, \code{as.hyperframe.ppx}, \code{as.data.frame.ppx}. } \examples{ df <- data.frame(x=runif(4),y=runif(4),t=runif(4)) X <- ppx(data=df, coord.type=c("s","s","t")) coords(X) coords(X, temporal=FALSE) coords(X) <- matrix(runif(12), ncol=3) } \keyword{spatial} \keyword{manip} spatstat/man/Ksector.Rd0000644000176200001440000000556013333543262014603 0ustar liggesusers\name{Ksector} \alias{Ksector} \title{Sector K-function} \description{ A directional counterpart of Ripley's \eqn{K} function, in which pairs of points are counted only when the vector joining the pair happens to lie in a particular range of angles. } \usage{ Ksector(X, begin = 0, end = 360, \dots, units = c("degrees", "radians"), r = NULL, breaks = NULL, correction = c("border", "isotropic", "Ripley", "translate"), domain=NULL, ratio = FALSE, verbose=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{K(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{begin,end}{ Numeric values giving the range of angles inside which points will be counted. Angles are measured in degrees (if \code{units="degrees"}, the default) or radians (if \code{units="radians"}) anti-clockwise from the positive \eqn{x}-axis. } \item{\dots}{Ignored.} \item{units}{ Units in which the angles \code{begin} and \code{end} are expressed. } \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{K(r)} should be evaluated. Users are advised \emph{not} to specify this argument; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. A character vector containing any selection of the options \code{"none"}, \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"}, \code{"good"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{domain}{ Optional window. The first point \eqn{x_i}{x[i]} of each pair of points will be constrained to lie in \code{domain}. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{verbose}{ Logical value indicating whether to print progress reports and warnings. } } \details{ This is a directional counterpart of Ripley's \eqn{K} function (see \code{\link{Kest}}) in which, instead of counting all pairs of points within a specified distance \eqn{r}, we count only the pairs \eqn{(x_i, x_j)}{x[i], x[j]} for which the vector \eqn{x_j - x_i}{x[j] - x[i]} falls in a particular range of angles. This can be used to evaluate evidence for anisotropy in the point pattern \code{X}. } \value{ An object of class \code{"fv"} containing the estimated function. } \seealso{ \code{\link{Kest}} } \examples{ K <- Ksector(swedishpines, 0, 90) plot(K) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{nonparametric} spatstat/man/rCauchy.Rd0000644000176200001440000001354313333543264014571 0ustar liggesusers\name{rCauchy} \alias{rCauchy} \title{Simulate Neyman-Scott Point Process with Cauchy cluster kernel} \description{ Generate a random point pattern, a simulated realisation of the Neyman-Scott process with Cauchy cluster kernel. } \usage{ rCauchy(kappa, scale, mu, win = owin(), thresh = 0.001, nsim=1, drop=TRUE, saveLambda=FALSE, expand = NULL, \dots, poisthresh=1e-6, saveparents=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{scale}{ Scale parameter for cluster kernel. Determines the size of clusters. A positive number, in the same units as the spatial coordinates. } \item{mu}{ Mean number of points per cluster (a single positive number) or reference intensity for the cluster points (a function or a pixel image). } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{thresh}{ Threshold relative to the cluster kernel value at the origin (parent location) determining when the cluster kernel will be treated as zero for simulation purposes. Will be overridden by argument \code{expand} if that is given. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{saveLambda}{ Logical. If \code{TRUE} then the random intensity corresponding to the simulated parent points will also be calculated and saved, and returns as an attribute of the point pattern. } \item{expand}{ Numeric. Size of window expansion for generation of parent points. By default determined by calling \code{\link{clusterradius}} with the numeric threshold value given in \code{thresh}. } \item{\dots}{ Passed to \code{\link{clusterfield}} to control the image resolution when \code{saveLambda=TRUE} and to \code{\link{clusterradius}} when \code{expand} is missing or \code{NULL}. } \item{poisthresh}{ Numerical threshold below which the model will be treated as a Poisson process. See Details. } \item{saveparents}{ Logical value indicating whether to save the locations of the parent points as an attribute. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of this point pattern (see \code{\link{rNeymanScott}}). Furthermore, the simulated intensity function is returned as an attribute \code{"Lambda"}, if \code{saveLambda=TRUE}. } \details{ This algorithm generates a realisation of the Neyman-Scott process with Cauchy cluster kernel, inside the window \code{win}. The process is constructed by first generating a Poisson point process of ``parent'' points with intensity \code{kappa}. Then each parent point is replaced by a random cluster of points, the number of points in each cluster being random with a Poisson (\code{mu}) distribution, and the points being placed independently and uniformly according to a Cauchy kernel. In this implementation, parent points are not restricted to lie in the window; the parent process is effectively the uniform Poisson process on the infinite plane. This model can be fitted to data by the method of minimum contrast, maximum composite likelihood or Palm likelihood using \code{\link{kppm}}. The algorithm can also generate spatially inhomogeneous versions of the cluster process: \itemize{ \item The parent points can be spatially inhomogeneous. If the argument \code{kappa} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is taken as specifying the intensity function of an inhomogeneous Poisson process that generates the parent points. \item The offspring points can be inhomogeneous. If the argument \code{mu} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is interpreted as the reference density for offspring points, in the sense of Waagepetersen (2006). } When the parents are homogeneous (\code{kappa} is a single number) and the offspring are inhomogeneous (\code{mu} is a function or pixel image), the model can be fitted to data using \code{\link{kppm}}. If the pair correlation function of the model is very close to that of a Poisson process, deviating by less than \code{poisthresh}, then the model is approximately a Poisson process, and will be simulated as a Poisson process with intensity \code{kappa * mu}, using \code{\link{rpoispp}}. This avoids computations that would otherwise require huge amounts of memory. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}}, \code{\link{rThomas}}, \code{\link{rVarGamma}}, \code{\link{rNeymanScott}}, \code{\link{rGaussPoisson}}, \code{\link{kppm}}, \code{\link{clusterfit}}. } \examples{ # homogeneous X <- rCauchy(30, 0.01, 5) # inhomogeneous ff <- function(x,y){ exp(2 - 3 * abs(x)) } Z <- as.im(ff, W= owin()) Y <- rCauchy(50, 0.01, Z) YY <- rCauchy(ff, 0.01, 5) } \references{ Ghorbani, M. (2013) Cauchy cluster process. \emph{Metrika} \bold{76}, 697-706. Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian } \keyword{spatial} \keyword{datagen} spatstat/man/is.ppp.Rd0000644000176200001440000000130613333543263014375 0ustar liggesusers\name{is.ppp} \alias{is.ppp} \title{Test Whether An Object Is A Point Pattern} \description{ Checks whether its argument is a point pattern (object of class \code{"ppp"}). } \usage{ is.ppp(x) } \arguments{ \item{x}{Any object.} } \details{ This function tests whether the object \code{x} is a point pattern object of class \code{"ppp"}. See \code{\link{ppm.object}} for details of this class. The result is determined to be \code{TRUE} if \code{x} inherits from \code{"ppp"}, i.e. if \code{x} has \code{"ppp"} amongst its classes. } \value{ \code{TRUE} if \code{x} is a point pattern, otherwise \code{FALSE}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/bw.ppl.Rd0000644000176200001440000000723313544333571014376 0ustar liggesusers\name{bw.ppl} \alias{bw.ppl} \title{ Likelihood Cross Validation Bandwidth Selection for Kernel Density } \description{ Uses likelihood cross-validation to select a smoothing bandwidth for the kernel estimation of point process intensity. } \usage{ bw.ppl(X, \dots, srange=NULL, ns=16, sigma=NULL, weights=NULL, shortcut=FALSE, warn=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{srange}{ Optional numeric vector of length 2 giving the range of values of bandwidth to be searched. } \item{ns}{ Optional integer giving the number of values of bandwidth to search. } \item{sigma}{ Optional. Vector of values of the bandwidth to be searched. Overrides the values of \code{ns} and \code{srange}. } \item{weights}{ Optional. Numeric vector of weights for the points of \code{X}. Argument passed to \code{\link{density.ppp}}. } \item{\dots}{ Additional arguments passed to \code{\link{density.ppp}}. } \item{shortcut}{ Logical value indicating whether to speed up the calculation by omitting the integral term in the cross-validation criterion. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the maximum of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function selects an appropriate bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}}. The bandwidth \eqn{\sigma}{\sigma} is chosen to maximise the point process likelihood cross-validation criterion \deqn{ \mbox{LCV}(\sigma) = \sum_i \log\hat\lambda_{-i}(x_i) - \int_W \hat\lambda(u) \, {\rm d}u }{ LCV(\sigma) = sum[i] log(\lambda[-i](x[i])) - integral[W] \lambda(u) du } where the sum is taken over all the data points \eqn{x_i}{x[i]}, where \eqn{\hat\lambda_{-i}(x_i)}{\lambda[-i](x_i)} is the leave-one-out kernel-smoothing estimate of the intensity at \eqn{x_i}{x[i]} with smoothing bandwidth \eqn{\sigma}{\sigma}, and \eqn{\hat\lambda(u)}{\lambda(u)} is the kernel-smoothing estimate of the intensity at a spatial location \eqn{u} with smoothing bandwidth \eqn{\sigma}{\sigma}. See Loader(1999, Section 5.3). The value of \eqn{\mbox{LCV}(\sigma)}{LCV(\sigma)} is computed directly, using \code{\link{density.ppp}}, for \code{ns} different values of \eqn{\sigma}{\sigma} between \code{srange[1]} and \code{srange[2]}. The result is a numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted to show the (rescaled) mean-square error as a function of \code{sigma}. If \code{shortcut=TRUE}, the computation is accelerated by omitting the integral term in the equation above. This is valid because the integral is approximately constant. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{density.ppp}}, \code{\link{bw.diggle}}, \code{\link{bw.scott}}, \code{\link{bw.CvL}}, \code{\link{bw.frac}}. For point patterns on a linear network, use \code{\link{bw.lppl}}. } \examples{ if(interactive()) { b <- bw.ppl(redwood) plot(b, main="Likelihood cross validation for redwoods") plot(density(redwood, b)) } \testonly{ b1 <- bw.ppl(redwood, srange=c(0.03, 0.07), ns=2) b2 <- bw.ppl(redwood, srange=c(0.03, 0.07), ns=2, shortcut=TRUE) } } \references{ Loader, C. (1999) \emph{Local Regression and Likelihood}. Springer, New York. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/nnwhich.Rd0000644000176200001440000001323013333543263014621 0ustar liggesusers\name{nnwhich} \alias{nnwhich} \alias{nnwhich.ppp} \alias{nnwhich.default} \title{Nearest neighbour} \description{ Finds the nearest neighbour of each point in a point pattern. } \usage{ nnwhich(X, \dots) \method{nnwhich}{ppp}(X, \dots, k=1, by=NULL, method="C") \method{nnwhich}{default}(X, Y=NULL, \dots, k=1, by=NULL, method="C") } \arguments{ \item{X,Y}{ Arguments specifying the locations of a set of points. For \code{nnwhich.ppp}, the argument \code{X} should be a point pattern (object of class \code{"ppp"}). For \code{nnwhich.default}, typically \code{X} and \code{Y} would be numeric vectors of equal length. Alternatively \code{Y} may be omitted and \code{X} may be a list with two components \code{x} and \code{y}, or a matrix with two columns. } \item{\dots}{ Ignored by \code{nnwhich.ppp} and \code{nnwhich.default}. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } \item{by}{ Optional. A factor, which separates \code{X} into groups. The algorithm will find the nearest neighbour in each group. } \item{method}{String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. } } \value{ Numeric vector or matrix giving, for each point, the index of its nearest neighbour (or \code{k}th nearest neighbour). If \code{k = 1} (the default), the return value is a numeric vector \code{v} giving the indices of the nearest neighbours (the nearest neighbout of the \code{i}th point is the \code{j}th point where \code{j = v[i]}). If \code{k} is a single integer, then the return value is a numeric vector giving the indices of the \code{k}th nearest neighbours. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the index of the \code{k[j]}th nearest neighbour for the \code{i}th data point. If the argument \code{by} is given, then the result is a data frame containing the indices described above, from each point of \code{X}, to the nearest point in each subset of \code{X} defined by the factor \code{by}. } \details{ For each point in the given point pattern, this function finds its nearest neighbour (the nearest other point of the pattern). By default it returns a vector giving, for each point, the index of the point's nearest neighbour. If \code{k} is specified, the algorithm finds each point's \code{k}th nearest neighbour. The function \code{nnwhich} is generic, with method for point patterns (objects of class \code{"ppp"}) and a default method which are described here, as well as a method for three-dimensional point patterns (objects of class \code{"pp3"}, described in \code{\link{nnwhich.pp3}}. The method \code{nnwhich.ppp} expects a single point pattern argument \code{X}. The default method expects that \code{X} and \code{Y} will determine the coordinates of a set of points. Typically \code{X} and \code{Y} would be numeric vectors of equal length. Alternatively \code{Y} may be omitted and \code{X} may be a list with two components named \code{x} and \code{y}, or a matrix or data frame with two columns. The argument \code{k} may be a single integer, or an integer vector. If it is a vector, then the \eqn{k}th nearest neighbour distances are computed for each value of \eqn{k} specified in the vector. If the argument \code{by} is given, it should be a \code{factor}, of length equal to the number of points in \code{X}. This factor effectively partitions \code{X} into subsets, each subset associated with one of the levels of \code{X}. The algorithm will then find, for each point of \code{X}, the nearest neighbour \emph{in each subset}. If there are no points (if \code{x} has length zero) a numeric vector of length zero is returned. If there is only one point (if \code{x} has length 1), then the nearest neighbour is undefined, and a value of \code{NA} is returned. In general if the number of points is less than or equal to \code{k}, then a vector of \code{NA}'s is returned. The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is faster by two to three orders of magnitude and uses much less memory. To evaluate the \emph{distance} between a point and its nearest neighbour, use \code{\link{nndist}}. To find the nearest neighbours from one point pattern to another point pattern, use \code{\link{nncross}}. } \section{Nearest neighbours of each type}{ If \code{X} is a multitype point pattern and \code{by=marks(X)}, then the algorithm will find, for each point of \code{X}, the nearest neighbour of each type. See the Examples. } \section{Warnings}{ A value of \code{NA} is returned if there is only one point in the point pattern. } \seealso{ \code{\link{nndist}}, \code{\link{nncross}} } \examples{ data(cells) plot(cells) m <- nnwhich(cells) m2 <- nnwhich(cells, k=2) # plot nearest neighbour links b <- cells[m] arrows(cells$x, cells$y, b$x, b$y, angle=15, length=0.15, col="red") # find points which are the neighbour of their neighbour self <- (m[m] == seq(m)) # plot them A <- cells[self] B <- cells[m[self]] plot(cells) segments(A$x, A$y, B$x, B$y) # nearest neighbours of each type head(nnwhich(ants, by=marks(ants))) } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and \adrian } \keyword{spatial} \keyword{math} spatstat/man/text.ppp.Rd0000644000176200001440000000332413333543264014751 0ustar liggesusers\name{text.ppp} \alias{text.ppp} \alias{text.lpp} \alias{text.psp} \title{ Add Text Labels to Spatial Pattern } \description{ Plots a text label at the location of each point in a spatial point pattern, or each object in a spatial pattern of objects. } \usage{ \method{text}{ppp}(x, \dots) \method{text}{lpp}(x, \dots) \method{text}{psp}(x, \dots) } \arguments{ \item{x}{ A spatial point pattern (object of class \code{"ppp"}), a point pattern on a linear network (class \code{"lpp"}) or a spatial pattern of line segments (class \code{"psp"}). } \item{\dots}{ Additional arguments passed to \code{\link[graphics]{text.default}}. } } \details{ These functions are methods for the generic \code{\link{text}}. A text label is added to the existing plot, at the location of each point in the point pattern \code{x}, or near the location of the midpoint of each segment in the segment pattern \code{x}. Additional arguments \code{\dots} are passed to \code{\link[graphics]{text.default}} and may be used to control the placement of the labels relative to the point locations, and the size and colour of the labels. By default, the labels are the serial numbers 1 to \code{n}, where \code{n} is the number of points or segments in \code{x}. This can be changed by specifying the argument \code{labels}, which should be a vector of length \code{n}. } \value{ Null. } \author{ \spatstatAuthors. } \seealso{ \code{\link[graphics]{text.default}} } \examples{ plot(cells) text(cells, pos=2) plot(Frame(cells)) text(cells, cex=1.5) S <- as.psp(simplenet) plot(S) text(S) X <- runiflpp(5, simplenet) plot(X) text(X, pos=2, col="blue") } \keyword{spatial} \keyword{hplot} spatstat/man/detpointprocfamilyfun.Rd0000644000176200001440000001473513333543263017623 0ustar liggesusers\name{detpointprocfamilyfun} \alias{detpointprocfamilyfun} \title{Construct a New Determinantal Point Process Model Family Function} \description{ Function to ease the implementation of a new determinantal point process model family. } \usage{detpointprocfamilyfun(kernel = NULL, specden = NULL, basis = "fourierbasis", convkernel = NULL, Kfun = NULL, valid = NULL, intensity = NULL, dim = 2, name = "User-defined", isotropic = TRUE, range = NULL, parbounds = NULL, specdenrange = NULL, startpar = NULL, \dots) } \arguments{ \item{kernel}{ function specifying the kernel. May be set to \code{NULL}. See Details. } \item{specden}{ function specifying the spectral density. May be set to \code{NULL}. See Details. } \item{basis}{ character string giving the name of the basis. Defaults to the Fourier basis. See Details. } \item{convkernel}{ function specifying the k-fold auto-convolution of the kernel. May be set to \code{NULL}. See Details. } \item{Kfun}{ function specifying the K-function. May be set to \code{NULL}. See Details. } \item{valid}{ function determining whether a given set of parameter values yields a valid model. May be set to \code{NULL}. See Examples. } \item{intensity}{ character string specifying which parameter is the intensity in the model family. Should be \code{NULL} if the model family has no intensity parameter. } \item{dim}{ character strig specifying which parameter is the dimension of the state space in this model family (if any). Alternatively a positive integer specifying the dimension. } \item{name}{ character string giving the name of the model family used for printing. } \item{isotropic}{ logical value indicating whether or not the model is isotropic. } \item{range}{ function determining the interaction range of the model. May be set to \code{NULL}. See Examples. } \item{parbounds}{ function determining the bounds for each model parameter when all other parameters are fixed. May be set to \code{NULL}. See Examples. } \item{specdenrange}{ function specifying the the range of the spectral density if it is finite (only the case for very few models). May be set to \code{NULL}. } \item{startpar}{ function determining starting values for parameters in any estimation algorithm. May be set to \code{NULL}. See Examples. } \item{\dots}{ Additional arguments for inclusion in the returned model object. These are not checked in any way. } } \details{ A determinantal point process family is specified either in terms of a kernel (a positive semi-definite function, i.e. a covariance function) or a spectral density, or preferably both. One of these can be \code{NULL} if it is unknown, but not both. When both are supplied they must have the same arguments. The first argument gives the values at which the function should be evaluated. In general the function should accept an \eqn{n} by \eqn{d} matrix or \code{data.frame} specifying \eqn{n (>=0)} points in dimension \eqn{d}. If the model is isotropic it only needs to accept a non-negative valued numeric of length \eqn{n}. (In fact there is currently almost no support for non-isotropic models, so it is recommended not to specify such a model.) The name of this argument could be chosen freely, but \eqn{x} is recommended. The remaining arguments are the parameters of the model. If one of these is an intensity parameter the name should be mentioned in the argument \code{intensity}. If one of these specifies the dimension of the model it should be mentioned in the argument \code{dim}. The kernel and spectral density is with respect to a specific set of basis functions, which is typically the Fourier basis. However this can be changed to any user-supplied basis in the argument \code{basis}. If such an alternative is supplied it must be the name of a function expecting the same arguments as \code{\link{fourierbasis}} and returning the results in the same form as \code{\link{fourierbasis}}. If supplied, the arguments of convkernel must obey the following: first argument should be like the first argument of kernel and/or specden (see above). The second argument (preferably called \code{k}) should be the positive integer specifying how many times the auto-convolution is done (i.e. the \eqn{k} in \eqn{k}-fold auto-convolution). The remaining arguments must agree with the arguments of \code{kernel} and/or \code{specden} (see above). If supplied, the arguments of \code{Kfun} should be like the arguments of \code{kernel} and \code{specden} (see above). } \author{ \adrian \rolf and \ege } \examples{ ## Example of how to define the Gauss family exGauss <- detpointprocfamilyfun( name="Gaussian", kernel=function(x, lambda, alpha, d){ lambda*exp(-(x/alpha)^2) }, specden=function(x, lambda, alpha, d){ lambda * (sqrt(pi)*alpha)^d * exp(-(x*alpha*pi)^2) }, convkernel=function(x, k, lambda, alpha, d){ logres <- k*log(lambda*pi*alpha^2) - log(pi*k*alpha^2) - x^2/(k*alpha^2) return(exp(logres)) }, Kfun = function(x, lambda, alpha, d){ pi*x^2 - pi*alpha^2/2*(1-exp(-2*x^2/alpha^2)) }, valid=function(lambda, alpha, d){ lambda>0 && alpha>0 && d>=1 && lambda <= (sqrt(pi)*alpha)^(-d) }, isotropic=TRUE, intensity="lambda", dim="d", range=function(alpha, bound = .99){ if(missing(alpha)) stop("The parameter alpha is missing.") if(!(is.numeric(bound)&&bound>0&&bound<1)) stop("Argument bound must be a numeric between 0 and 1.") return(alpha*sqrt(-log(sqrt(1-bound)))) }, parbounds=function(name, lambda, alpha, d){ switch(name, lambda = c(0, (sqrt(pi)*alpha)^(-d)), alpha = c(0, lambda^(-1/d)/sqrt(pi)), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("lambda" \%in\% model$freepar){ lambda <- intensity(X) rslt <- c(rslt, "lambda" = lambda) model <- update(model, lambda=lambda) } if("alpha" \%in\% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2] rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) exGauss m <- exGauss(lambda=100, alpha=.05, d=2) m } \keyword{spatial} \keyword{models} spatstat/man/flipxy.Rd0000644000176200001440000000214513333543263014501 0ustar liggesusers\name{flipxy} \alias{flipxy} \alias{flipxy.owin} \alias{flipxy.ppp} \alias{flipxy.psp} \alias{flipxy.im} \title{Exchange X and Y Coordinates} \description{ Exchanges the \eqn{x} and \eqn{y} coordinates in a spatial dataset. } \usage{ flipxy(X) \method{flipxy}{owin}(X) \method{flipxy}{ppp}(X) \method{flipxy}{psp}(X) \method{flipxy}{im}(X) } \arguments{ \item{X}{Spatial dataset. An object of class \code{"owin"}, \code{"ppp"}, \code{"psp"} or \code{"im"}. } } \value{ Another object of the same type, representing the result of swapping the \eqn{x} and \eqn{y} coordinates. } \details{ This function swaps the \eqn{x} and \eqn{y} coordinates of a spatial dataset. This could also be performed using the command \code{\link{affine}}, but \code{flipxy} is faster. The function \code{\link{flipxy}} is generic, with methods for the classes of objects listed above. } \seealso{ \code{\link{affine}}, \code{\link{reflect}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ data(cells) X <- flipxy(cells) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/kaplan.meier.Rd0000644000176200001440000000604513333543263015537 0ustar liggesusers\name{kaplan.meier} \alias{kaplan.meier} \title{Kaplan-Meier Estimator using Histogram Data} \description{ Compute the Kaplan-Meier estimator of a survival time distribution function, from histogram data } \usage{ kaplan.meier(obs, nco, breaks, upperobs=0) } \arguments{ \item{obs}{vector of \eqn{n} integers giving the histogram of all observations (censored or uncensored survival times) } \item{nco}{vector of \eqn{n} integers giving the histogram of uncensored observations (those survival times that are less than or equal to the censoring time) } \item{breaks}{Vector of \eqn{n+1} breakpoints which were used to form both histograms. } \item{upperobs}{ Number of observations beyond the rightmost breakpoint, if any. } } \value{ A list with two elements: \item{km}{Kaplan-Meier estimate of the survival time c.d.f. \eqn{F(t)} } \item{lambda}{corresponding Nelson-Aalen estimate of the hazard rate \eqn{\lambda(t)}{lambda(t)} } These are numeric vectors of length \eqn{n}. } \details{ This function is needed mainly for internal use in \pkg{spatstat}, but may be useful in other applications where you want to form the Kaplan-Meier estimator from a huge dataset. Suppose \eqn{T_i}{T[i]} are the survival times of individuals \eqn{i=1,\ldots,M} with unknown distribution function \eqn{F(t)} which we wish to estimate. Suppose these times are right-censored by random censoring times \eqn{C_i}{C[i]}. Thus the observations consist of right-censored survival times \eqn{\tilde T_i = \min(T_i,C_i)}{T*[i] = min(T[i],C[i])} and non-censoring indicators \eqn{D_i = 1\{T_i \le C_i\}}{D[i] = 1(T[i] <= C[i])} for each \eqn{i}. If the number of observations \eqn{M} is large, it is efficient to use histograms. Form the histogram \code{obs} of all observed times \eqn{\tilde T_i}{T*[i]}. That is, \code{obs[k]} counts the number of values \eqn{\tilde T_i}{T*[i]} in the interval \code{(breaks[k],breaks[k+1]]} for \eqn{k > 1} and \code{[breaks[1],breaks[2]]} for \eqn{k = 1}. Also form the histogram \code{nco} of all uncensored times, i.e. those \eqn{\tilde T_i}{T*[i]} such that \eqn{D_i=1}{D[i]=1}. These two histograms are the arguments passed to \code{kaplan.meier}. The vectors \code{km} and \code{lambda} returned by \code{kaplan.meier} are (histogram approximations to) the Kaplan-Meier estimator of \eqn{F(t)} and its hazard rate \eqn{\lambda(t)}{lambda(t)}. Specifically, \code{km[k]} is an estimate of \code{F(breaks[k+1])}, and \code{lambda[k]} is an estimate of the average of \eqn{\lambda(t)}{lambda(t)} over the interval \code{(breaks[k],breaks[k+1])}. The histogram breaks must include \eqn{0}. If the histogram breaks do not span the range of the observations, it is important to count how many survival times \eqn{\tilde T_i}{T*[i]} exceed the rightmost breakpoint, and give this as the value \code{upperobs}. } \seealso{ \code{\link{reduced.sample}}, \code{\link{km.rs}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/predict.dppm.Rd0000644000176200001440000000275313333543264015565 0ustar liggesusers\name{predict.dppm} \alias{predict.dppm} \alias{fitted.dppm} \title{Prediction from a Fitted Determinantal Point Process Model} \description{ Given a fitted determinantal point process model, these functions compute the fitted intensity. } \usage{ \method{fitted}{dppm}(object, ...) \method{predict}{dppm}(object, ...) } \arguments{ \item{object}{ Fitted determinantal point process model. An object of class \code{"dppm"}. } \item{\dots}{ Arguments passed to \code{\link{fitted.ppm}} or \code{\link{predict.ppm}} respectively. } } \details{ These functions are methods for the generic functions \code{\link{fitted}} and \code{\link{predict}}. The argument \code{object} should be a determinantal point process model (object of class \code{"dppm"}) obtained using the function \code{\link{dppm}}. The \emph{intensity} of the fitted model is computed, using \code{\link{fitted.ppm}} or \code{\link{predict.ppm}} respectively. } \value{ The value of \code{fitted.dppm} is a numeric vector giving the fitted values at the quadrature points. The value of \code{predict.dppm} is usually a pixel image (object of class \code{"im"}), but see \code{\link{predict.ppm}} for details. } \seealso{ \code{\link{dppm}}, \code{\link{plot.dppm}}, \code{\link{fitted.ppm}}, \code{\link{predict.ppm}} } \examples{ fit <- dppm(swedishpines ~ x + y, dppGauss()) predict(fit) } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/affine.owin.Rd0000644000176200001440000000330313333543262015365 0ustar liggesusers\name{affine.owin} \alias{affine.owin} \title{Apply Affine Transformation To Window} \description{ Applies any affine transformation of the plane (linear transformation plus vector shift) to a window. } \usage{ \method{affine}{owin}(X, mat=diag(c(1,1)), vec=c(0,0), \dots, rescue=TRUE) } \arguments{ \item{X}{Window (object of class \code{"owin"}).} \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{rescue}{ Logical. If \code{TRUE}, the transformed window will be processed by \code{\link{rescue.rectangle}}. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} controlling the pixel resolution of the transformed window, if \code{X} is a binary pixel mask. } } \value{ Another window (of class \code{"owin"}) representing the result of applying the affine transformation. } \details{ The window is subjected first to the linear transformation represented by \code{mat} (multiplying on the left by \code{mat}), and then the result is translated by the vector \code{vec}. The argument \code{mat} must be a nonsingular \eqn{2 \times 2}{2 * 2} matrix. This is a method for the generic function \code{\link{affine}}. } \seealso{ \code{\link{affine}}, \code{\link{affine.ppp}}, \code{\link{affine.psp}}, \code{\link{affine.im}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ # shear transformation shear <- matrix(c(1,0,0.6,1),ncol=2) X <- affine(owin(), shear) \dontrun{ plot(X) } data(letterR) affine(letterR, shear, c(0, 0.5)) affine(as.mask(letterR), shear, c(0, 0.5)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/rMatClust.Rd0000644000176200001440000001541613571674202015113 0ustar liggesusers\name{rMatClust} \alias{rMatClust} \title{Simulate Matern Cluster Process} \description{ Generate a random point pattern, a simulated realisation of the \Matern Cluster Process. } \usage{ rMatClust(kappa, scale, mu, win = owin(c(0,1),c(0,1)), nsim=1, drop=TRUE, saveLambda=FALSE, expand = scale, ..., poisthresh=1e-6, saveparents=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{scale}{ Radius parameter of the clusters. } \item{mu}{ Mean number of points per cluster (a single positive number) or reference intensity for the cluster points (a function or a pixel image). } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{saveLambda}{ Logical. If \code{TRUE} then the random intensity corresponding to the simulated parent points will also be calculated and saved, and returns as an attribute of the point pattern. } \item{expand}{Numeric. Size of window expansion for generation of parent points. Defaults to \code{scale} which is the cluster radius. } \item{\dots}{Passed to \code{\link{clusterfield}} to control the image resolution when \code{saveLambda=TRUE}. } \item{poisthresh}{ Numerical threshold below which the model will be treated as a Poisson process. See Details. } \item{saveparents}{ Logical value indicating whether to save the locations of the parent points as an attribute. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of this point pattern (see \code{\link{rNeymanScott}}). Furthermore, the simulated intensity function is returned as an attribute \code{"Lambda"}, if \code{saveLambda=TRUE}. } \details{ This algorithm generates a realisation of \Matern's cluster process, a special case of the Neyman-Scott process, inside the window \code{win}. In the simplest case, where \code{kappa} and \code{mu} are single numbers, the algorithm generates a uniform Poisson point process of \dQuote{parent} points with intensity \code{kappa}. Then each parent point is replaced by a random cluster of \dQuote{offspring} points, the number of points per cluster being Poisson (\code{mu}) distributed, and their positions being placed and uniformly inside a disc of radius \code{scale} centred on the parent point. The resulting point pattern is a realisation of the classical \dQuote{stationary \Matern cluster process} generated inside the window \code{win}. This point process has intensity \code{kappa * mu}. The algorithm can also generate spatially inhomogeneous versions of the \Matern cluster process: \itemize{ \item The parent points can be spatially inhomogeneous. If the argument \code{kappa} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is taken as specifying the intensity function of an inhomogeneous Poisson process that generates the parent points. \item The offspring points can be inhomogeneous. If the argument \code{mu} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is interpreted as the reference density for offspring points, in the sense of Waagepetersen (2007). For a given parent point, the offspring constitute a Poisson process with intensity function equal to \code{mu/(pi * scale^2)} inside the disc of radius \code{scale} centred on the parent point, and zero intensity outside this disc. Equivalently we first generate, for each parent point, a Poisson (\eqn{M}) random number of offspring (where \eqn{M} is the maximum value of \code{mu}) placed independently and uniformly in the disc of radius \code{scale} centred on the parent location, and then randomly thin the offspring points, with retention probability \code{mu/M}. \item Both the parent points and the offspring points can be inhomogeneous, as described above. } Note that if \code{kappa} is a pixel image, its domain must be larger than the window \code{win}. This is because an offspring point inside \code{win} could have its parent point lying outside \code{win}. In order to allow this, the simulation algorithm first expands the original window \code{win} by a distance \code{expand} and generates the Poisson process of parent points on this larger window. If \code{kappa} is a pixel image, its domain must contain this larger window. The intensity of the \Matern cluster process is \code{kappa * mu} if either \code{kappa} or \code{mu} is a single number. In the general case the intensity is an integral involving \code{kappa}, \code{mu} and \code{scale}. The \Matern cluster process model with homogeneous parents (i.e. where \code{kappa} is a single number) can be fitted to data using \code{\link{kppm}}. Currently it is not possible to fit the \Matern cluster process model with inhomogeneous parents. If the pair correlation function of the model is very close to that of a Poisson process, deviating by less than \code{poisthresh}, then the model is approximately a Poisson process, and will be simulated as a Poisson process with intensity \code{kappa * mu}, using \code{\link{rpoispp}}. This avoids computations that would otherwise require huge amounts of memory. } \seealso{ \code{\link{rpoispp}}, \code{\link{rThomas}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}}, \code{\link{rNeymanScott}}, \code{\link{rGaussPoisson}}, \code{\link{kppm}}, \code{\link{clusterfit}}. } \examples{ # homogeneous X <- rMatClust(10, 0.05, 4) # inhomogeneous ff <- function(x,y){ 4 * exp(2 * abs(x) - 1) } Z <- as.im(ff, owin()) Y <- rMatClust(10, 0.05, Z) YY <- rMatClust(ff, 0.05, 3) } \references{ \Matern, B. (1960) \emph{Spatial Variation}. Meddelanden \ifelse{latex}{\out{fr\r{a}n}}{fraan} Statens Skogsforskningsinstitut, volume 59, number 5. Statens Skogsforskningsinstitut, Sweden. \Matern, B. (1986) \emph{Spatial Variation}. Lecture Notes in Statistics 36, Springer-Verlag, New York. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/fryplot.Rd0000644000176200001440000001315313333543263014666 0ustar liggesusers\name{fryplot} \alias{fryplot} \alias{frypoints} \title{Fry Plot of Point Pattern} \description{ Displays the Fry plot (Patterson plot) of a spatial point pattern. } \usage{ fryplot(X, ..., width=NULL, from=NULL, to=NULL, axes=FALSE) frypoints(X, from=NULL, to=NULL, dmax=Inf) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) or something acceptable to \code{\link{as.ppp}}. } \item{\dots}{Optional arguments to control the appearance of the plot.} \item{width}{Optional parameter indicating the width of a box for a zoomed-in view of the Fry plot near the origin.} \item{from,to}{ Optional. Subset indices specifying which points of \code{X} will be considered when forming the vectors (drawn from each point of \code{from}, to each point of \code{to}.) } \item{axes}{ Logical value indicating whether to draw axes, crossing at the origin. } \item{dmax}{ Maximum distance between points. Pairs at greater distances do not contribute to the result. The default means there is no maximum distance. } } \details{ The function \code{fryplot} generates a Fry plot (or Patterson plot); \code{frypoints} returns the points of the Fry plot as a point pattern dataset. Fry (1979) and Hanna and Fry (1979) introduced a manual graphical method for investigating features of a spatial point pattern of mineral deposits. A transparent sheet, marked with an origin or centre point, is placed over the point pattern. The transparent sheet is shifted so that the origin lies over one of the data points, and the positions of all the \emph{other} data points are copied onto the transparent sheet. This procedure is repeated for each data point in turn. The resulting plot (the Fry plot) is a pattern of \eqn{n(n-1)} points, where \eqn{n} is the original number of data points. This procedure was previously proposed by Patterson (1934, 1935) for studying inter-atomic distances in crystals, and is also known as a Patterson plot. The function \code{fryplot} generates the Fry/Patterson plot. Standard graphical parameters such as \code{main}, \code{pch}, \code{lwd}, \code{col}, \code{bg}, \code{cex} can be used to control the appearance of the plot. To zoom in (to view only a subset of the Fry plot at higher magnification), use the argument \code{width} to specify the width of a rectangular field of view centred at the origin, or the standard graphical arguments \code{xlim} and \code{ylim} to specify another rectangular field of view. (The actual field of view may be slightly larger, depending on the graphics device.) The function \code{frypoints} returns the points of the Fry plot as a point pattern object. There may be a large number of points in this pattern, so this function should be used only if further analysis of the Fry plot is required. Fry plots are particularly useful for recognising anisotropy in regular point patterns. A void around the origin in the Fry plot suggests regularity (inhibition between points) and the shape of the void gives a clue to anisotropy in the pattern. Fry plots are also useful for detecting periodicity or rounding of the spatial coordinates. In mathematical terms, the Fry plot of a point pattern \code{X} is simply a plot of the vectors \code{X[i] - X[j]} connecting all pairs of distinct points in \code{X}. The Fry plot is related to the \eqn{K} function (see \code{\link{Kest}}) and the reduced second moment measure (see \code{\link{Kmeasure}}). For example, the number of points in the Fry plot lying within a circle of given radius is an unnormalised and uncorrected version of the \eqn{K} function. The Fry plot has a similar appearance to the plot of the reduced second moment measure \code{\link{Kmeasure}} when the smoothing parameter \code{sigma} is very small. The Fry plot does not adjust for the effect of the size and shape of the sampling window. The density of points in the Fry plot tapers off near the edges of the plot. This is an edge effect, a consequence of the bounded sampling window. In geological applications this is usually not important, because interest is focused on the behaviour near the origin where edge effects can be ignored. To correct for the edge effect, use \code{\link{Kmeasure}} or \code{\link{Kest}} or its relatives. } \value{ \code{fryplot} returns \code{NULL}. \code{frypoints} returns a point pattern (object of class \code{"ppp"}). } \references{ Fry, N. (1979) Random point distributions and strain measurement in rocks. \emph{Tectonophysics} \bold{60}, 89--105. Hanna, S.S. and Fry, N. (1979) A comparison of methods of strain determination in rocks from southwest Dyfed (Pembrokeshire) and adjacent areas. \emph{Journal of Structural Geology} \bold{1}, 155--162. Patterson, A.L. (1934) A Fourier series method for the determination of the component of inter-atomic distances in crystals. \emph{Physics Reviews} \bold{46}, 372--376. Patterson, A.L. (1935) A direct method for the determination of the components of inter-atomic distances in crystals. \emph{Zeitschrift fuer Krystallographie} \bold{90}, 517--554. } \seealso{ \code{\link{Kmeasure}}, \code{\link{Kest}} } \examples{ ## unmarked data fryplot(cells) Y <- frypoints(cells) ## numerical marks fryplot(longleaf, width=4, axes=TRUE) ## multitype points fryplot(amacrine, width=0.2, from=(marks(amacrine) == "on"), chars=c(3,16), cols=2:3, main="Fry plot centred at an On-cell") points(0,0) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/nndist.Rd0000644000176200001440000001644413604234565014477 0ustar liggesusers\name{nndist} \alias{nndist} \alias{nndist.ppp} \alias{nndist.default} \title{Nearest neighbour distances} \description{ Computes the distance from each point to its nearest neighbour in a point pattern. Alternatively computes the distance to the second nearest neighbour, or third nearest, etc. } \usage{ nndist(X, \dots) \method{nndist}{ppp}(X, \dots, k=1, by=NULL, method="C") \method{nndist}{default}(X, Y=NULL, \dots, k=1, by=NULL, method="C") } \arguments{ \item{X,Y}{ Arguments specifying the locations of a set of points. For \code{nndist.ppp}, the argument \code{X} should be a point pattern (object of class \code{"ppp"}). For \code{nndist.default}, typically \code{X} and \code{Y} would be numeric vectors of equal length. Alternatively \code{Y} may be omitted and \code{X} may be a list with two components \code{x} and \code{y}, or a matrix with two columns. Alternatively \code{X} can be a three-dimensional point pattern (class \code{"pp3"}), a higher-dimensional point pattern (class \code{"ppx"}), a point pattern on a linear network (class \code{"lpp"}), or a spatial pattern of line segments (class \code{"psp"}). } \item{\dots}{ Ignored by \code{nndist.ppp} and \code{nndist.default}. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } \item{by}{ Optional. A factor, which separates \code{X} into groups. The algorithm will compute the distance to the nearest point in each group. } \item{method}{String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. } } \value{ Numeric vector or matrix containing the nearest neighbour distances for each point. If \code{k = 1} (the default), the return value is a numeric vector \code{v} such that \code{v[i]} is the nearest neighbour distance for the \code{i}th data point. If \code{k} is a single integer, then the return value is a numeric vector \code{v} such that \code{v[i]} is the \code{k}th nearest neighbour distance for the \code{i}th data point. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the \code{k[j]}th nearest neighbour distance for the \code{i}th data point. If the argument \code{by} is given, then the result is a data frame containing the distances described above, from each point of \code{X}, to the nearest point in each subset of \code{X} defined by the factor \code{by}. } \details{ This function computes the Euclidean distance from each point in a point pattern to its nearest neighbour (the nearest other point of the pattern). If \code{k} is specified, it computes the distance to the \code{k}th nearest neighbour. The function \code{nndist} is generic, with a method for point patterns (objects of class \code{"ppp"}), and a default method for coordinate vectors. There are also methods for line segment patterns, \code{\link{nndist.psp}}, three-dimensional point patterns, \code{\link{nndist.pp3}}, higher-dimensional point patterns, \code{\link{nndist.ppx}} and point patterns on a linear network, \code{\link{nndist.lpp}}; these are described in their own help files. Type \code{methods(nndist)} to see all available methods. The method for planar point patterns \code{nndist.ppp} expects a single point pattern argument \code{X} and returns the vector of its nearest neighbour distances. The default method expects that \code{X} and \code{Y} will determine the coordinates of a set of points. Typically \code{X} and \code{Y} would be numeric vectors of equal length. Alternatively \code{Y} may be omitted and \code{X} may be a list with two components named \code{x} and \code{y}, or a matrix or data frame with two columns. The argument \code{k} may be a single integer, or an integer vector. If it is a vector, then the \eqn{k}th nearest neighbour distances are computed for each value of \eqn{k} specified in the vector. If the argument \code{by} is given, it should be a \code{factor}, of length equal to the number of points in \code{X}. This factor effectively partitions \code{X} into subsets, each subset associated with one of the levels of \code{X}. The algorithm will then compute, for each point of \code{X}, the distance to the nearest neighbour \emph{in each subset}. The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is faster by two to three orders of magnitude and uses much less memory. If there is only one point (if \code{x} has length 1), then a nearest neighbour distance of \code{Inf} is returned. If there are no points (if \code{x} has length zero) a numeric vector of length zero is returned. To identify \emph{which} point is the nearest neighbour of a given point, use \code{\link{nnwhich}}. To use the nearest neighbour distances for statistical inference, it is often advisable to use the edge-corrected empirical distribution, computed by \code{\link{Gest}}. To find the nearest neighbour distances from one point pattern to another point pattern, use \code{\link{nncross}}. } \section{Nearest neighbours of each type}{ If \code{X} is a multitype point pattern and \code{by=marks(X)}, then the algorithm will compute, for each point of \code{X}, the distance to the nearest neighbour of each type. See the Examples. To find the minimum distance from \emph{any} point of type \code{i} to the nearest point of type \code{j}, for all combinations of \code{i} and \code{j}, use \code{\link{minnndist}}, or the \R function \code{\link[stats]{aggregate}} as suggested in the Examples. } \section{Warnings}{ An infinite or \code{NA} value is returned if the distance is not defined (e.g. if there is only one point in the point pattern). } \seealso{ \code{\link{nndist.psp}}, \code{\link{nndist.pp3}}, \code{\link{nndist.ppx}}, \code{\link{nndist.lpp}}, \code{\link{pairdist}}, \code{\link{Gest}}, \code{\link{nnwhich}}, \code{\link{nncross}}, \code{\link{minnndist}}, \code{\link{maxnndist}}. } \examples{ data(cells) # nearest neighbours d <- nndist(cells) # second nearest neighbours d2 <- nndist(cells, k=2) # first, second and third nearest d1to3 <- nndist(cells, k=1:3) x <- runif(100) y <- runif(100) d <- nndist(x, y) # Stienen diagram plot(cells \%mark\% nndist(cells), markscale=1) # distance to nearest neighbour of each type nnda <- nndist(ants, by=marks(ants)) head(nnda) # For nest number 1, the nearest Cataglyphis nest is 87.32125 units away # minimum distance between each pair of types minnndist(ants, by=marks(ants)) # Use of 'aggregate': # _minimum_ distance between each pair of types aggregate(nnda, by=list(from=marks(ants)), min) # _mean_ nearest neighbour distances aggregate(nnda, by=list(from=marks(ants)), mean) # The mean distance from a Messor nest to # the nearest Cataglyphis nest is 59.02549 units } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and \adrian. } \keyword{spatial} \keyword{math} spatstat/man/as.function.tess.Rd0000644000176200001440000000305413333543262016371 0ustar liggesusers\name{as.function.tess} \alias{as.function.tess} \title{ Convert a Tessellation to a Function } \description{ Convert a tessellation into a function of the \eqn{x} and \eqn{y} coordinates. The default function values are factor levels specifying which tile of the tessellation contains the point \eqn{(x,y)}. } \usage{ \method{as.function}{tess}(x,\dots,values=NULL) } \arguments{ \item{x}{ A tessellation (object of class \code{"tess"}). } \item{values}{ Optional. A vector giving the values of the function for each tile of \code{x}. } \item{\dots}{ Ignored. } } \details{ This command converts a tessellation (object of class \code{"tess"}) to a \code{function(x,y)} where the arguments \code{x} and \code{y} are (vectors of) spatial coordinates. The corresponding function values are factor levels identifying which tile of the tessellation contains each point. Values are \code{NA} if the corresponding point lies outside the tessellation. If the argument \code{values} is given, then it determines the value of the function in each tile of \code{x}. } \value{ A function in the \R language, also belonging to the class \code{"funxy"}. } \author{ \spatstatAuthors } \seealso{ \code{\link{tileindex}} for the low-level calculation of tile index. \code{\link{cut.ppp}} and \code{\link{split.ppp}} to divide up the points of a point pattern according to a tessellation. } \examples{ X <- runifpoint(7) V <- dirichlet(X) f <- as.function(V) f(0.1, 0.4) plot(f) } \keyword{spatial} \keyword{manip} spatstat/man/coef.mppm.Rd0000644000176200001440000000643713333543265015064 0ustar liggesusers\name{coef.mppm} \alias{coef.mppm} \title{ Coefficients of Point Process Model Fitted to Multiple Point Patterns } \description{ Given a point process model fitted to a list of point patterns, extract the coefficients of the fitted model. A method for \code{coef}. } \usage{ \method{coef}{mppm}(object, \dots) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"mppm"}) } \item{\dots}{ Ignored. } } \value{ Either a vector containing the fitted coefficients, or a data frame containing the fitted coefficients for each point pattern. } \details{ This function is a method for the generic function \code{\link{coef}}. The argument \code{object} must be a fitted point process model (object of class \code{"mppm"}) produced by the fitting algorithm \code{\link{mppm}}). This represents a point process model that has been fitted to a list of several point pattern datasets. See \code{\link{mppm}} for information. This function extracts the vector of coefficients of the fitted model. This is the estimate of the parameter vector \eqn{\theta}{\theta} such that the conditional intensity of the model is of the form \deqn{ \lambda(u,x) = \exp(\theta S(u,x)) }{ \lambda(u,x) = \exp(\theta . S(u,x)) } where \eqn{S(u,x)} is a (vector-valued) statistic. For example, if the model \code{object} is the uniform Poisson process, then \code{coef(object)} will yield a single value (named \code{"(Intercept)"}) which is the logarithm of the fitted intensity of the Poisson process. If the fitted model includes random effects (i.e. if the argument \code{random} was specified in the call to \code{\link{mppm}}), then the fitted coefficients are different for each point pattern in the original data, so \code{coef(object)} is a data frame with one row for each point pattern, and one column for each parameter. Use \code{\link{fixef.mppm}} to extract the vector of fixed effect coefficients, and \code{\link{ranef.mppm}} to extract the random effect coefficients at each level. Use \code{\link{print.mppm}} to print a more useful description of the fitted model. } \seealso{ \code{\link{fixef.mppm}} and \code{\link{ranef.mppm}} for the fixed and random effect coefficients in a model that includes random effects. \code{\link{print.mppm}}, \code{\link{mppm}} } \examples{ H <- hyperframe(X=waterstriders) fit.Poisson <- mppm(X ~ 1, H) coef(fit.Poisson) # The single entry "(Intercept)" # is the log of the fitted intensity of the Poisson process fit.Strauss <- mppm(X~1, H, Strauss(7)) coef(fit.Strauss) # The two entries "(Intercept)" and "Interaction" # are respectively log(beta) and log(gamma) # in the usual notation for Strauss(beta, gamma, r) # Tweak data to exaggerate differences H$X[[1]] <- rthin(H$X[[1]], 0.3) # Model with random effects fitran <- mppm(X ~ 1, H, random=~1|id) coef(fitran) } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented in \pkg{spatstat} by \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/matrixpower.Rd0000644000176200001440000000315113333543263015545 0ustar liggesusers\name{matrixpower} \alias{matrixpower} \alias{matrixsqrt} \alias{matrixinvsqrt} \title{ Power of a Matrix } \description{ Evaluate a specified power of a matrix. } \usage{ matrixpower(x, power, complexOK = TRUE) matrixsqrt(x, complexOK = TRUE) matrixinvsqrt(x, complexOK = TRUE) } \arguments{ \item{x}{ A square matrix containing numeric or complex values. } \item{power}{ A numeric value giving the power (exponent) to which \code{x} should be raised. } \item{complexOK}{ Logical value indicating whether the result is allowed to be complex. } } \details{ These functions raise the matrix \code{x} to the desired power: \code{matrixsqrt} takes the square root, \code{matrixinvsqrt} takes the inverse square root, and \code{matrixpower} takes the specified power of \code{x}. Up to numerical error, \code{matrixpower(x, 2)} should be equivalent to \code{x \%*\% x}, and \code{matrixpower(x, -1)} should be equivalent to \code{solve(x)}, the inverse of \code{x}. The square root \code{y <- matrixsqrt(x)} should satisfy \code{y \%*\% y = x}. The inverse square root \code{z <- matrixinvsqrt(x)} should satisfy \code{z \%*\% z = solve(x)}. Computations are performed using the eigen decomposition (\code{\link{eigen}}). } \value{ A matrix of the same size as \code{x} containing numeric or complex values. } \author{ \adrian. } \seealso{ \code{\link[base]{eigen}}, \code{\link[base]{svd}} } \examples{ x <- matrix(c(10,2,2,1), 2, 2) y <- matrixsqrt(x) y y \%*\% y z <- matrixinvsqrt(x) z \%*\% y matrixpower(x, 0.1) } \keyword{algebra} \keyword{array} spatstat/man/model.frame.ppm.Rd0000644000176200001440000000433513333543263016155 0ustar liggesusers\name{model.frame.ppm} \alias{model.frame.ppm} \alias{model.frame.kppm} \alias{model.frame.dppm} \alias{model.frame.lppm} \title{ Extract the Variables in a Point Process Model } \description{ Given a fitted point process model, this function returns a data frame containing all the variables needed to fit the model using the Berman-Turner device. } \usage{ \method{model.frame}{ppm}(formula, ...) \method{model.frame}{kppm}(formula, ...) \method{model.frame}{dppm}(formula, ...) \method{model.frame}{lppm}(formula, ...) } \arguments{ \item{formula}{ A fitted point process model. An object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"} or \code{"lppm"}. } \item{\dots}{ Additional arguments passed to \code{\link{model.frame.glm}}. } } \details{ The function \code{\link{model.frame}} is generic. These functions are method for \code{\link{model.frame}} for fitted point process models (objects of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"} or \code{"lppm"}). The first argument should be a fitted point process model; it has to be named \code{formula} for consistency with the generic function. The result is a data frame containing all the variables used in fitting the model. The data frame has one row for each quadrature point used in fitting the model. The quadrature scheme can be extracted using \code{\link{quad.ppm}}. } \value{ A \code{data.frame} containing all the variables used in the fitted model, plus additional variables specified in \code{\dots}. It has an additional attribute \code{"terms"} containing information about the model formula. For details see \code{\link{model.frame.glm}}. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. } \seealso{ \code{\link{ppm}}, \code{\link{kppm}}, \code{\link{dppm}}, \code{\link{lppm}}, \code{\link{model.frame}}, \code{\link{model.matrix.ppm}} } \examples{ fit <- ppm(cells ~ x) mf <- model.frame(fit) kfit <- kppm(redwood ~ x, "Thomas") kmf <- model.frame(kfit) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/plot.lpp.Rd0000644000176200001440000000617613333543264014747 0ustar liggesusers\name{plot.lpp} \alias{plot.lpp} \title{ Plot Point Pattern on Linear Network } \description{ Plots a point pattern on a linear network. Plot method for the class \code{"lpp"} of point patterns on a linear network. } \usage{ \method{plot}{lpp}(x, \dots, main, add = FALSE, use.marks=TRUE, which.marks=NULL, show.all = !add, show.window=FALSE, show.network=TRUE, do.plot = TRUE, multiplot=TRUE) } \arguments{ \item{x}{ Point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Additional arguments passed to \code{\link{plot.linnet}} or \code{\link{plot.ppp}}. } \item{main}{ Main title for plot. } \item{add}{ Logical value indicating whether the plot is to be added to the existing plot (\code{add=TRUE}) or whether a new plot should be initialised (\code{add=FALSE}, the default). } \item{use.marks}{ logical flag; if \code{TRUE}, plot points using a different plotting symbol for each mark; if \code{FALSE}, only the locations of the points will be plotted, using \code{\link{points}()}. } \item{which.marks}{ Index determining which column of marks to use, if the marks of \code{x} are a data frame. A character or integer vector identifying one or more columns of marks. If \code{add=FALSE} then the default is to plot all columns of marks, in a series of separate plots. If \code{add=TRUE} then only one column of marks can be plotted, and the default is \code{which.marks=1} indicating the first column of marks. } \item{show.all}{ Logical value indicating whether to plot everything including the main title and the window containing the network. } \item{show.window}{ Logical value indicating whether to plot the window containing the network. Overrides \code{show.all}. } \item{show.network}{ Logical value indicating whether to plot the network. } \item{do.plot}{ Logical value determining whether to actually perform the plotting. } \item{multiplot}{ Logical value giving permission to display multiple plots. } } \details{ The linear network is plotted by \code{\link{plot.linnet}}, then the points are plotted by \code{\link{plot.ppp}}. Commonly-used arguments include: \itemize{ \item \code{col} and \code{lwd} for the colour and width of lines in the linear network \item \code{cols} for the colour or colours of the points \item \code{chars} for the plot characters representing different types of points \item \code{legend} and \code{leg.side} to control the graphics legend } Note that the linear network will be plotted even when \code{add=TRUE}, unless \code{show.network=FALSE}. } \value{ (Invisible) object of class \code{"symbolmap"} giving the correspondence between mark values and plotting characters. } \seealso{ \code{\link{lpp}}. See \code{\link{plot.ppp}} for options for representing the points. See also \code{\link{points.lpp}}, \code{\link{text.lpp}}. } \examples{ plot(chicago, cols=1:6) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{hplot} spatstat/man/Jdot.Rd0000644000176200001440000001640413454351175014074 0ustar liggesusers\name{Jdot} \alias{Jdot} \title{ Multitype J Function (i-to-any) } \description{ For a multitype point pattern, estimate the multitype \eqn{J} function summarising the interpoint dependence between the type \eqn{i} points and the points of any type. } \usage{ Jdot(X, i, eps=NULL, r=NULL, breaks=NULL, \dots, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype \eqn{J} function \eqn{J_{i\bullet}(r)}{Ji.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{eps}{A positive number. The resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{J_{i\bullet}(r)}{Ji.(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{J}{the recommended estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)}, currently the Kaplan-Meier estimator. } \item{r}{the values of the argument \eqn{r} at which the function \eqn{J_{i\bullet}(r)}{Ji.(r)} has been estimated } \item{km}{the Kaplan-Meier estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)} } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)} } \item{han}{the Hanisch-style estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)} } \item{un}{the ``uncorrected'' estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)} formed by taking the ratio of uncorrected empirical estimators of \eqn{1 - G_{i\bullet}(r)}{1 - Gi.(r)} and \eqn{1 - F_{\bullet}(r)}{1 - F.(r)}, see \code{\link{Gdot}} and \code{\link{Fest}}. } \item{theo}{the theoretical value of \eqn{J_{i\bullet}(r)}{Ji.(r)} for a marked Poisson process, namely 1. } The result also has two attributes \code{"G"} and \code{"F"} which are respectively the outputs of \code{\link{Gdot}} and \code{\link{Fest}} for the point pattern. } \details{ This function \code{Jdot} and its companions \code{\link{Jcross}} and \code{\link{Jmulti}} are generalisations of the function \code{\link{Jest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument \code{i} will be interpreted as a level of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level.) The ``type \eqn{i} to any type'' multitype \eqn{J} function of a stationary multitype point process \eqn{X} was introduced by Van lieshout and Baddeley (1999). It is defined by \deqn{J_{i\bullet}(r) = \frac{1 - G_{i\bullet}(r)}{1 - F_{\bullet}(r)}}{Ji.(r) = (1 - Gi.(r))/(1-F.(r))} where \eqn{G_{i\bullet}(r)}{Gi.(r)} is the distribution function of the distance from a type \eqn{i} point to the nearest other point of the pattern, and \eqn{F_{\bullet}(r)}{F.(r)} is the distribution function of the distance from a fixed point in space to the nearest point of the pattern. An estimate of \eqn{J_{i\bullet}(r)}{Ji.(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the pattern is a marked Poisson point process, then \eqn{J_{i\bullet}(r) \equiv 1}{Ji.(r) = 1}. If the subprocess of type \eqn{i} points is independent of the subprocess of points of all types not equal to \eqn{i}, then \eqn{J_{i\bullet}(r)}{Ji.(r)} equals \eqn{J_{ii}(r)}{Jii(r)}, the ordinary \eqn{J} function (see \code{\link{Jest}} and Van Lieshout and Baddeley (1996)) of the points of type \eqn{i}. Hence deviations from zero of the empirical estimate of \eqn{J_{i\bullet} - J_{ii}}{Ji.-Jii} may suggest dependence between types. This algorithm estimates \eqn{J_{i\bullet}(r)}{Ji.(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Jest}}, using the Kaplan-Meier and border corrections. The main work is done by \code{\link{Gmulti}} and \code{\link{Fest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{J_{i\bullet}(r)}{Ji.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Jcross}}, \code{\link{Jest}}, \code{\link{Jmulti}} } \examples{ # Lansing woods data: 6 types of trees woods <- lansing \testonly{ woods <- woods[seq(1,npoints(woods), by=30), ] } Jh. <- Jdot(woods, "hickory") plot(Jh.) # diagnostic plot for independence between hickories and other trees Jhh <- Jest(split(woods)$hickory) plot(Jhh, add=TRUE, legendpos="bottom") \dontrun{ # synthetic example with two marks "a" and "b" pp <- runifpoint(30) \%mark\% factor(sample(c("a","b"), 30, replace=TRUE)) J <- Jdot(pp, "a") } } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/LennardJones.Rd0000644000176200001440000001200213433412422015532 0ustar liggesusers\name{LennardJones} \alias{LennardJones} \title{The Lennard-Jones Potential} \description{ Creates the Lennard-Jones pairwise interaction structure which can then be fitted to point pattern data. } \usage{ LennardJones(sigma0=NA) } \value{ An object of class \code{"interact"} describing the Lennard-Jones interpoint interaction structure. } \arguments{ \item{sigma0}{ Optional. Initial estimate of the parameter \eqn{\sigma}{sigma}. A positive number. } } \details{ In a pairwise interaction point process with the Lennard-Jones pair potential (Lennard-Jones, 1924) each pair of points in the point pattern, a distance \eqn{d} apart, contributes a factor \deqn{ v(d) = \exp \left\{ - 4\epsilon \left[ \left( \frac{\sigma}{d} \right)^{12} - \left( \frac{\sigma}{d} \right)^6 \right] \right\} }{ v(d) = exp( - 4 * epsilon * ((sigma/d)^12 - (sigma/d)^6)) } to the probability density, where \eqn{\sigma}{sigma} and \eqn{\epsilon}{epsilon} are positive parameters to be estimated. See \bold{Examples} for a plot of this expression. This potential causes very strong inhibition between points at short range, and attraction between points at medium range. The parameter \eqn{\sigma}{sigma} is called the \emph{characteristic diameter} and controls the scale of interaction. The parameter \eqn{\epsilon}{epsilon} is called the \emph{well depth} and determines the strength of attraction. The potential switches from inhibition to attraction at \eqn{d=\sigma}{d=sigma}. The maximum value of the pair potential is \eqn{\exp(\epsilon)}{exp(epsilon)} occuring at distance \eqn{d = 2^{1/6} \sigma}{d = 2^(1/6) * sigma}. Interaction is usually considered to be negligible for distances \eqn{d > 2.5 \sigma \max\{1,\epsilon^{1/6}\}}{d > 2.5 * sigma * max(1, epsilon^(1/6))}. This potential is used to model interactions between uncharged molecules in statistical physics. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Lennard-Jones pairwise interaction is yielded by the function \code{LennardJones()}. See the examples below. } \section{Rescaling}{ To avoid numerical instability, the interpoint distances \code{d} are rescaled when fitting the model. Distances are rescaled by dividing by \code{sigma0}. In the formula for \eqn{v(d)} above, the interpoint distance \eqn{d} will be replaced by \code{d/sigma0}. The rescaling happens automatically by default. If the argument \code{sigma0} is missing or \code{NA} (the default), then \code{sigma0} is taken to be the minimum nearest-neighbour distance in the data point pattern (in the call to \code{\link{ppm}}). If the argument \code{sigma0} is given, it should be a positive number, and it should be a rough estimate of the parameter \eqn{\sigma}{sigma}. The ``canonical regular parameters'' estimated by \code{\link{ppm}} are \eqn{\theta_1 = 4 \epsilon (\sigma/\sigma_0)^{12}}{theta1 = 4 * epsilon * (sigma/sigma0)^12} and \eqn{\theta_2 = 4 \epsilon (\sigma/\sigma_0)^6}{theta2 = 4 * epsilon * (sigma/sigma0)^6}. } \section{Warnings and Errors}{ Fitting the Lennard-Jones model is extremely unstable, because of the strong dependence between the functions \eqn{d^{-12}}{d^(-12)} and \eqn{d^{-6}}{d^(-6)}. The fitting algorithm often fails to converge. Try increasing the number of iterations of the GLM fitting algorithm, by setting \code{gcontrol=list(maxit=1e3)} in the call to \code{\link{ppm}}. Errors are likely to occur if this model is fitted to a point pattern dataset which does not exhibit both short-range inhibition and medium-range attraction between points. The values of the parameters \eqn{\sigma}{sigma} and \eqn{\epsilon}{epsilon} may be \code{NA} (because the fitted canonical parameters have opposite sign, which usually occurs when the pattern is completely random). An absence of warnings does not mean that the fitted model is sensible. A negative value of \eqn{\epsilon}{epsilon} may be obtained (usually when the pattern is strongly clustered); this does not correspond to a valid point process model, but the software does not issue a warning. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \examples{ badfit <- ppm(cells ~1, LennardJones(), rbord=0.1) badfit fit <- ppm(unmark(longleaf) ~1, LennardJones(), rbord=1) fit plot(fitin(fit)) # Note the Longleaf Pines coordinates are rounded to the nearest decimetre # (multiple of 0.1 metres) so the apparent inhibition may be an artefact } \references{ Lennard-Jones, J.E. (1924) On the determination of molecular fields. \emph{Proc Royal Soc London A} \bold{106}, 463--477. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/is.rectangle.Rd0000644000176200001440000000152113333543263015541 0ustar liggesusers\name{is.rectangle} \alias{is.rectangle} \alias{is.polygonal} \alias{is.mask} \title{Determine Type of Window} \description{ Determine whether a window is a rectangle, a polygonal region, or a binary mask. } \usage{ is.rectangle(w) is.polygonal(w) is.mask(w) } \arguments{ \item{w}{ Window to be inspected. An object of class \code{"owin"}. } } \value{ Logical value, equal to \code{TRUE} if \code{w} is a window of the specified type. } \details{ These simple functions determine whether a window \code{w} (object of class \code{"owin"}) is a rectangle (\code{is.rectangle(w) = TRUE}), a domain with polygonal boundary (\code{is.polygonal(w) = TRUE}), or a binary pixel mask (\code{is.mask(w) = TRUE}). } \seealso{ \code{\link{owin}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/expand.owin.Rd0000644000176200001440000000232313333543263015416 0ustar liggesusers\name{expand.owin} \alias{expand.owin} \title{Apply Expansion Rule} \description{ Applies an expansion rule to a window. } \usage{ expand.owin(W, \dots) } \arguments{ \item{W}{A window.} \item{\dots}{ Arguments passed to \code{\link{rmhexpand}} to determine an expansion rule. } } \value{ A window (object of class \code{"owin"}). } \details{ The argument \code{W} should be a window (an object of class \code{"owin"}). This command applies the expansion rule specified by the arguments \code{\dots} to the window \code{W}, yielding another window. The arguments \code{\dots} are passed to \code{\link{rmhexpand}} to determine the expansion rule. For other transformations of the scale, location and orientation of a window, see \code{\link{shift}}, \code{\link{affine}} and \code{\link{rotate}}. } \seealso{ \code{\link{rmhexpand}} about expansion rules. \code{\link{shift}}, \code{\link{rotate}}, \code{\link{affine}} for other types of manipulation. } \examples{ expand.owin(square(1), 9) expand.owin(square(1), distance=0.5) expand.owin(letterR, length=2) expand.owin(letterR, distance=0.1) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/measureContinuous.Rd0000644000176200001440000000246213333543263016720 0ustar liggesusers\name{measureContinuous} \alias{measureContinuous} \alias{measureDiscrete} \title{ Discrete and Continuous Components of a Measure } \description{ Given a measure \code{A} (object of class \code{"msr"}) these functions find the discrete and continuous parts of \code{A}. } \usage{ measureDiscrete(x) measureContinuous(x) } \arguments{ \item{x}{ A measure (object of class \code{"msr"}). } } \details{ The functions \code{measureDiscrete} and \code{measureContinuous} return the discrete and continuous components, respectively, of a measure. If \code{x} is a measure, then \code{measureDiscrete(x)} is a measure consisting only of the discrete (atomic) component of \code{x}, and \code{measureContinuous(x)} is a measure consisting only of the continuous (diffuse) component of \code{x}. } \value{ Another measure (object of class \code{"msr"}) on the same spatial domain. } \references{ Halmos, P.R. (1950) \emph{Measure Theory}. Van Nostrand. } \author{ \adrian. } \seealso{ \code{\link{msr}}, \code{\link{with.msr}}, \code{\link{split.msr}}, \code{\link{measurePositive}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") rp measureDiscrete(rp) measureContinuous(rp) } \keyword{spatial} \keyword{math} spatstat/man/iplot.Rd0000644000176200001440000000470313333543263014317 0ustar liggesusers\name{iplot} \alias{iplot} \alias{iplot.ppp} \alias{iplot.layered} \alias{iplot.linnet} \alias{iplot.lpp} \alias{iplot.default} \title{Point and Click Interface for Displaying Spatial Data} \description{ Plot spatial data with interactive (point-and-click) control over the plot. } \usage{ iplot(x, ...) \method{iplot}{ppp}(x, ..., xname) \method{iplot}{linnet}(x, ..., xname) \method{iplot}{lpp}(x, ..., xname) \method{iplot}{layered}(x, ..., xname, visible) \method{iplot}{default}(x, ..., xname) } \arguments{ \item{x}{ The spatial object to be plotted. An object of class \code{"ppp"}, \code{"psp"}, \code{"im"}, \code{"owin"}, \code{"linnet"}, \code{"lpp"} or \code{"layered"}. } \item{\dots}{Ignored.} \item{xname}{ Optional. Character string to use as the title of the dataset. } \item{visible}{ Optional. Logical vector indicating which layers of \code{x} should initially be turned on (visible). } } \value{ \code{NULL}. } \details{ The function \code{iplot} generates a plot of the spatial dataset \code{x} and allows interactive control over the appearance of the plot using a point-and-click interface. The function \code{iplot} is generic, with methods for for point patterns (\code{\link{iplot.ppp}}), layered objects (\code{\link{iplot.layered}}) and a default method. The default method will handle objects of class \code{"psp"}, \code{"im"} and \code{"owin"} at least. A new popup window is launched. The spatial dataset \code{x} is displayed in the middle of the window using the appropriate \code{plot} method. The left side of the window contains buttons and sliders allowing the user to change the plot parameters. The right side of the window contains navigation controls for zooming (changing magnification), panning (shifting the field of view relative to the data), redrawing and exiting. If the user clicks in the area where the point pattern is displayed, the field of view will be re-centred at the point that was clicked. } \seealso{ \code{\link{istat}} } \section{Package Dependence}{ This function requires the package \pkg{rpanel} to be loaded. } \examples{ if(interactive() && require(rpanel)) { iplot(cells) iplot(amacrine) iplot(lansing) L <- layered(D=distmap(cells), P=cells, plotargs=list(list(ribbon=FALSE), list(pch=16))) iplot(L) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/unnormdensity.Rd0000644000176200001440000000435113333543264016106 0ustar liggesusers\name{unnormdensity} \alias{unnormdensity} \title{ Weighted kernel smoother } \description{ An unnormalised version of kernel density estimation where the weights are not required to sum to 1. The weights may be positive, negative or zero. } \usage{ unnormdensity(x, ..., weights = NULL) } \arguments{ \item{x}{ Numeric vector of data } \item{\dots}{ Arguments passed to \code{\link{density.default}}. Arguments must be \emph{named}. }` \item{weights}{ Optional numeric vector of weights for the data. } } \details{ This is an alternative to the standard \R kernel density estimation function \code{\link{density.default}}. The standard \code{\link{density.default}} requires the \code{weights} to be nonnegative numbers that add up to 1, and returns a probability density (a function that integrates to 1). This function \code{unnormdensity} does not impose any requirement on the \code{weights} except that they be finite. Individual weights may be positive, negative or zero. The result is a function that does not necessarily integrate to 1 and may be negative. The result is the convolution of the kernel \eqn{k} with the weighted data, \deqn{ f(x) = \sum_i w_i k(x- x_i) }{ f(x) = sum of w[i] * k(x - x[i]) } where \eqn{x_i}{x[i]} are the data points and \eqn{w_i}{w[i]} are the weights. The algorithm first selects the kernel bandwidth by applying \code{\link{density.default}} to the data \code{x} with normalised, positive weight vector \code{w = abs(weights)/sum(abs(weights))} and extracting the selected bandwidth. Then the result is computed by applying applying \code{\link{density.default}} to \code{x} twice using the normalised positive and negative parts of the weights. Note that the arguments \code{\dots} must be passed by name, i.e. in the form (\code{name=value}). Arguments that do not match an argument of \code{\link{density.default}} will be ignored \emph{silently}. } \value{ Object of class \code{"density"} as described in \code{\link{density.default}}. } \author{\adrian and \rolf } \seealso{ \code{\link{density.default}} } \examples{ d <- unnormdensity(1:3, weights=c(-1,0,1)) if(interactive()) plot(d) } \keyword{smooth} spatstat/man/nndensity.Rd0000644000176200001440000000550113333543263015200 0ustar liggesusers\name{nndensity.ppp} \alias{nndensity} \alias{nndensity.ppp} \title{ Estimate Intensity of Point Pattern Using Nearest Neighbour Distances } \description{ Estimates the intensity of a point pattern using the distance from each spatial location to the \code{k}th nearest data point. } \usage{ nndensity(x, ...) \method{nndensity}{ppp}(x, k, ..., verbose = TRUE) } \arguments{ \item{x}{ A point pattern (object of class \code{"ppp"}) or some other spatial object. } \item{k}{ Integer. The distance to the \code{k}th nearest data point will be computed. There is a sensible default. } \item{\dots}{ Arguments passed to \code{\link{nnmap}} and \code{\link{as.mask}} controlling the pixel resolution. } \item{verbose}{ Logical. If \code{TRUE}, print the value of \code{k} when it is automatically selected. If \code{FALSE}, remain silent. } } \details{ This function computes a quick estimate of the intensity of the point process that generated the point pattern \code{x}. For each spatial location \eqn{s}, let \eqn{d(s)} be the distance from \eqn{s} to the \eqn{k}-th nearest point in the dataset \code{x}. If the data came from a homogeneous Poisson process with intensity \eqn{\lambda}{lambda}, then \eqn{\pi d(s)^2}{pi * d(s)^2} would follow a negative exponential distribution with mean \eqn{1/\lambda}{1/lambda}, and the maximum likelihood estimate of \eqn{\lambda}{lambda} would be \eqn{1/(\pi d(s)^2)}{1/(pi * d(s)^2)}. This is the estimate computed by \code{nndensity}, apart from an edge effect correction. This estimator of intensity is relatively fast to compute, and is spatially adaptive (so that it can handle wide variation in the intensity function). However, it implicitly assumes the points are independent, so it does not perform well if the pattern is strongly clustered or strongly inhibited. The value of \code{k} should be greater than 1 in order to avoid infinite peaks in the intensity estimate around each data point. The default value of \code{k} is the square root of the number of points in \code{x}, which seems to work well in many cases. The window of \code{x} is digitised using \code{\link{as.mask}} and the values \eqn{d(s)} are computed using \code{\link{nnmap}}. To control the pixel resolution, see \code{\link{as.mask}}. } \value{ A pixel image (object of class \code{"im"}) giving the estimated intensity of the point process at each spatial location. Pixel values are intensities (number of points per unit area). } \references{ NEED REFERENCES. TRY CRESSIE } \seealso{ \code{\link{density.ppp}}, \code{\link{intensity}} for alternative estimates of point process intensity. } \examples{ plot(nndensity(swedishpines)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/rDiggleGratton.Rd0000644000176200001440000001054413602545270016103 0ustar liggesusers\name{rDiggleGratton} \alias{rDiggleGratton} \title{Perfect Simulation of the Diggle-Gratton Process} \description{ Generate a random pattern of points, a simulated realisation of the Diggle-Gratton process, using a perfect simulation algorithm. } \usage{ rDiggleGratton(beta, delta, rho, kappa=1, W = owin(), expand=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{delta}{ hard core distance (a non-negative number). } \item{rho}{ interaction range (a number greater than \code{delta}). } \item{kappa}{ interaction exponent (a non-negative number). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. Currently this must be a rectangular window. } \item{expand}{ Logical. If \code{FALSE}, simulation is performed in the window \code{W}, which must be rectangular. If \code{TRUE} (the default), simulation is performed on a larger window, and the result is clipped to the original window \code{W}. Alternatively \code{expand} can be an object of class \code{"rmhexpand"} (see \code{\link{rmhexpand}}) determining the expansion method. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function generates a realisation of the Diggle-Gratton point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. Diggle and Gratton (1984, pages 208-210) introduced the pairwise interaction point process with pair potential \eqn{h(t)} of the form \deqn{ h(t) = \left( \frac{t-\delta}{\rho-\delta} \right)^\kappa \quad\quad \mbox{ if } \delta \le t \le \rho }{ h(t) = ((t - delta)/(rho - delta))^kappa, { } delta <= t <= rho } with \eqn{h(t) = 0} for \eqn{t < \delta}{t < delta} and \eqn{h(t) = 1} for \eqn{t > \rho}{t > rho}. Here \eqn{\delta}{delta}, \eqn{\rho}{rho} and \eqn{\kappa}{kappa} are parameters. Note that we use the symbol \eqn{\kappa}{kappa} where Diggle and Gratton (1984) use \eqn{\beta}{beta}, since in \pkg{spatstat} we reserve the symbol \eqn{\beta}{beta} for an intensity parameter. The parameters must all be nonnegative, and must satisfy \eqn{\delta \le \rho}{delta <= rho}. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and \Moller (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}). If \code{nsim > 1}, a list of point patterns. } \references{ Berthelsen, K.K. and \Moller, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and \Moller, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. \Moller, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. } \author{ \adrian based on original code for the Strauss process by Kasper Klitgaard Berthelsen. } \examples{ X <- rDiggleGratton(50, 0.02, 0.07) Z <- rDiggleGratton(50, 0.02, 0.07, 2, nsim=2) } \seealso{ \code{\link{rmh}}, \code{\link{DiggleGratton}}. \code{\link{rStrauss}}, \code{\link{rHardcore}}, \code{\link{rStraussHard}}, \code{\link{rDGS}}, \code{\link{rPenttinen}}. } \keyword{spatial} \keyword{datagen} spatstat/man/as.polygonal.Rd0000644000176200001440000000362213333543262015574 0ustar liggesusers\name{as.polygonal} \Rdversion{1.1} \alias{as.polygonal} \title{ Convert a Window to a Polygonal Window } \description{ Given a window \code{W} of any geometric type (rectangular, polygonal or binary mask), this function returns a polygonal window that represents the same spatial domain. } \usage{ as.polygonal(W, repair=FALSE) } \arguments{ \item{W}{ A window (object of class \code{"owin"}). } \item{repair}{ Logical value indicating whether to check the validity of the polygon data and repair it, if \code{W} is already a polygonal window. } } \details{ Given a window \code{W} of any geometric type (rectangular, polygonal or binary mask), this function returns a polygonal window that represents the same spatial domain. If \code{W} is a rectangle, it is converted to a polygon with 4 vertices. If \code{W} is already polygonal, it is returned unchanged, by default. However if \code{repair=TRUE} then the validity of the polygonal coordinates will be checked (for example to check the boundary is not self-intersecting) and repaired if necessary, so that the result could be different from \code{W}. If \code{W} is a binary mask, then each pixel in the mask is replaced by a small square or rectangle, and the union of these squares or rectangles is computed. The result is a polygonal window that has only horizontal and vertical edges. (Use \code{\link{simplify.owin}} to remove the staircase appearance, if desired). } \value{ A polygonal window (object of class \code{"owin"} and of type \code{"polygonal"}). } \author{ \spatstatAuthors } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{as.mask}}, \code{\link{simplify.owin}} } \examples{ data(letterR) m <- as.mask(letterR, dimyx=32) p <- as.polygonal(m) if(interactive()) { plot(m) plot(p, add=TRUE, lwd=2) } } \keyword{spatial} \keyword{manip} spatstat/man/perimeter.Rd0000644000176200001440000000221613333543264015162 0ustar liggesusers\name{perimeter} \Rdversion{1.1} \alias{perimeter} \title{ Perimeter Length of Window } \description{ Computes the perimeter length of a window } \usage{ perimeter(w) } \arguments{ \item{w}{ A window (object of class \code{"owin"}) or data that can be converted to a window by \code{\link{as.owin}}. } } \details{ This function computes the perimeter (length of the boundary) of the window \code{w}. If \code{w} is a rectangle or a polygonal window, the perimeter is the sum of the lengths of the edges of \code{w}. If \code{w} is a mask, it is first converted to a polygonal window using \code{\link{as.polygonal}}, then staircase edges are removed using \code{\link{simplify.owin}}, and the perimeter of the resulting polygon is computed. } \value{ A numeric value giving the perimeter length of the window. } \seealso{ \code{\link{area.owin}} \code{\link{diameter.owin}}, \code{\link{owin.object}}, \code{\link{as.owin}} } \examples{ perimeter(square(3)) data(letterR) perimeter(letterR) if(interactive()) print(perimeter(as.mask(letterR))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/prune.rppm.Rd0000644000176200001440000000255013333543264015275 0ustar liggesusers\name{prune.rppm} \alias{prune.rppm} \title{ Prune a Recursively Partitioned Point Process Model } \description{ Given a model which has been fitted to point pattern data by recursive partitioning, apply pruning to reduce the complexity of the partition tree. } \usage{ \method{prune}{rppm}(tree, \dots) } \arguments{ \item{tree}{ Fitted point process model of class \code{"rppm"} produced by the function \code{\link{rppm}}. } \item{\dots}{ Arguments passed to \code{\link[rpart]{prune.rpart}} to control the pruning procedure. } } \details{ This is a method for the generic function \code{\link[rpart]{prune}} for the class \code{"rppm"}. An object of this class is a point process model, fitted to point pattern data by recursive partitioning, by the function \code{\link{rppm}}. The recursive partition tree will be pruned using \code{\link[rpart]{prune.rpart}}. The result is another object of class \code{"rppm"}. } \value{ Object of class \code{"rppm"}. } \author{ \spatstatAuthors } \seealso{ \code{\link{rppm}}, \code{\link{plot.rppm}}, \code{\link{predict.rppm}}. } \examples{ # Murchison gold data mur <- solapply(murchison, rescale, s=1000, unitname="km") mur$dfault <- distfun(mur$faults) fit <- rppm(gold ~ dfault + greenstone, data=mur) fit prune(fit, cp=0.1) } \keyword{spatial} \keyword{models} spatstat/man/linearmarkconnect.Rd0000644000176200001440000000561413623712063016667 0ustar liggesusers\name{linearmarkconnect} \alias{linearmarkconnect} \title{ Mark Connection Function for Multitype Point Pattern on Linear Network } \description{ For a multitype point pattern on a linear network, estimate the mark connection function from points of type \eqn{i} to points of type \eqn{j}. } \usage{ linearmarkconnect(X, i, j, r=NULL, \dots) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the mark connection function \eqn{p_{ij}(r)}{p[ij](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. Defaults to the second level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{\dots}{ Arguments passed to \code{\link{linearpcfcross}} and \code{\link{linearpcf}}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{markconnect}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (2014) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), \bold{63}, 673--694. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearpcfcross}}, \code{\link{linearpcf}}, \code{\link{linearmarkequal}}, \code{\link{markconnect}}. } \examples{ pab <- linearmarkconnect(chicago, "assault", "burglary") \dontrun{ plot(alltypes(chicago, linearmarkconnect)) } } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/clip.infline.Rd0000644000176200001440000000226013333543263015536 0ustar liggesusers\name{clip.infline} \alias{clip.infline} \title{Intersect Infinite Straight Lines with a Window} \description{ Take the intersection between a set of infinite straight lines and a window, yielding a set of line segments. } \usage{ clip.infline(L, win) } \arguments{ \item{L}{ Object of class \code{"infline"} specifying a set of infinite straight lines in the plane. } \item{win}{ Window (object of class \code{"owin"}). } } \details{ This function computes the intersection between a set of infinite straight lines in the plane (stored in an object \code{L} of class \code{"infline"} created by the function \code{\link{infline}}) and a window \code{win}. The result is a pattern of line segments. Each line segment carries a mark indicating which line it belongs to. } \value{ A line segment pattern (object of class \code{"psp"}) with a single column of marks. } \author{ \adrian and \rolf. } \seealso{ \code{\link{infline}},\code{\link{psp}}. To divide a window into pieces using infinite lines, use \code{\link{chop.tess}}. } \examples{ L <- infline(p=1:3, theta=pi/4) W <- square(4) clip.infline(L, W) } \keyword{spatial} \keyword{math} spatstat/man/simulate.dppm.Rd0000644000176200001440000001126013571674202015750 0ustar liggesusers\name{simulate.dppm} \alias{simulate.dppm} \alias{simulate.detpointprocfamily} \title{Simulation of Determinantal Point Process Model} \description{ Generates simulated realisations from a determinantal point process model. } \usage{ \method{simulate}{dppm}(object, nsim = 1, seed = NULL, \dots, W = NULL, trunc = 0.99, correction = "periodic", rbord = reach(object)) \method{simulate}{detpointprocfamily}(object, nsim = 1, seed = NULL, \dots, W = NULL, trunc = 0.99, correction = "periodic", rbord = reach(object)) } \arguments{ \item{object}{ Determinantal point process model. An object of class \code{"detpointprocfamily"} or \code{"dppm"}. } \item{nsim}{Number of simulated realisations.} \item{seed}{ an object specifying whether and how to initialise the random number generator. Either \code{NULL} or an integer that will be used in a call to \code{\link[base:Random]{set.seed}} before simulating the point patterns. } \item{\dots}{Arguments passed on to \code{\link{rdpp}}.} \item{W}{ Object specifying the window of simulation (defaults to a unit box if nothing else is sensible -- see Details). Can be any single argument acceptable to \code{\link{as.boxx}} (e.g. an \code{"owin"}, \code{"box3"} or \code{"boxx"} object). } \item{trunc}{ Numeric value specifying how the model truncation is preformed. See Details. } \item{correction}{ Character string specifying the type of correction to use. The options are "periodic" (default) and "border". See Details. } \item{rbord}{ Numeric value specifying the extent of the border correction if this correction is used. See Details. } } \details{ These functions are methods for the generic function \code{\link[stats]{simulate}} for the classes \code{"detpointprocfamily"} and \code{"dppm"} of determinantal point process models. The return value is a list of \code{nsim} point patterns. It also carries an attribute \code{"seed"} that captures the initial state of the random number generator. This follows the convention used in \code{simulate.lm} (see \code{\link[stats]{simulate}}). It can be used to force a sequence of simulations to be repeated exactly, as shown in the examples for \code{\link[stats]{simulate}}. The exact simulation of a determinantal point process model involves an infinite series, which typically has no analytical solution. In the implementation a truncation is performed. The truncation \code{trunc} can be specified either directly as a positive integer or as a fraction between 0 and 1. In the latter case the truncation is chosen such that the expected number of points in a simulation is \code{trunc} times the theoretical expected number of points in the model. The default is 0.99. The window of the returned point pattern(s) can be specified via the argument \code{W}. For a fitted model (of class \code{"dppm"}) it defaults to the observation window of the data used to fit the model. For inhomogeneous models it defaults to the window of the intensity image. Otherwise it defaults to a unit box. For non-rectangular windows simulation is done in the containing rectangle and then restricted to the window. For inhomogeneous models a stationary model is first simulated using the maximum intensity and then the result is obtained by thinning. The default is to use periodic edge correction for simulation such that opposite edges are glued together. If border correction is used then the simulation is done in an extended window. Edge effects are theoretically completely removed by doubling the size of the window in each spatial dimension, but for practical purposes much less extension may be sufficient. The numeric \code{rbord} determines the extent of the extra space added to the window. } \value{ A list of length \code{nsim} containing simulated point patterns. If the patterns are two-dimensional, then they are objects of class \code{"ppp"}, and the list has class \code{"solist"}. Otherwise, the patterns are objects of class \code{"ppx"} and the list has class \code{"anylist"}. The return value also carries an attribute \code{"seed"} that captures the initial state of the random number generator. See Details. } \references{ Lavancier, F. \Moller, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{rdpp}}, \code{\link[stats]{simulate}} } \examples{ model <- dppGauss(lambda=100, alpha=.05, d=2) simulate(model, 2) } \keyword{datagen} \keyword{spatial} \keyword{models} spatstat/man/affine.Rd0000644000176200001440000000224413333543262014415 0ustar liggesusers\name{affine} \alias{affine} \title{Apply Affine Transformation} \description{ Applies any affine transformation of the plane (linear transformation plus vector shift) to a plane geometrical object, such as a point pattern or a window. } \usage{ affine(X, \dots) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), a line segment pattern (object of class \code{"psp"}), a window (object of class \code{"owin"}) or a pixel image (object of class \code{"im"}). } \item{\dots}{Arguments determining the affine transformation.} } \value{ Another object of the same type, representing the result of applying the affine transformation. } \details{ This is generic. Methods are provided for point patterns (\code{\link{affine.ppp}}) and windows (\code{\link{affine.owin}}). } \seealso{ \code{\link{affine.ppp}}, \code{\link{affine.psp}}, \code{\link{affine.owin}}, \code{\link{affine.im}}, \code{\link{flipxy}}, \code{\link{reflect}}, \code{\link{rotate}}, \code{\link{shift}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/runiflpp.Rd0000644000176200001440000000272113333543264015026 0ustar liggesusers\name{runiflpp} \alias{runiflpp} \title{ Uniform Random Points on a Linear Network } \description{ Generates \eqn{n} random points, independently and uniformly distributed, on a linear network. } \usage{ runiflpp(n, L, nsim=1, drop=TRUE) } \arguments{ \item{n}{ Number of random points to generate. A nonnegative integer, or a vector of integers specifying the number of points of each type. } \item{L}{ A linear network (object of class \code{"linnet"}, see \code{\link{linnet}}). } \item{nsim}{Number of simulated realisations to generate.} \item{drop}{ Logical value indicating what to do when \code{nsim=1}. If \code{drop=TRUE} (the default), the result is a point pattern. If \code{drop=FALSE}, the result is a list with one entry which is a point pattern. } } \details{ This function uses \code{\link{runifpointOnLines}} to generate the random points. } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern on the linear network, i.e.\ an object of class \code{"lpp"}. Otherwise, a list of such point patterns. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ \code{\link{rlpp}} for non-uniform random points; \code{\link{rpoislpp}} for Poisson point process; \code{\link{lpp}}, \code{\link{linnet}} } \examples{ data(simplenet) X <- runiflpp(10, simplenet) plot(X) # marked Z <- runiflpp(c(a=10, b=3), simplenet) } \keyword{spatial} \keyword{datagen} spatstat/man/vargamma.estK.Rd0000644000176200001440000001470113333543264015670 0ustar liggesusers\name{vargamma.estK} \alias{vargamma.estK} \title{Fit the Neyman-Scott Cluster Point Process with Variance Gamma kernel} \description{ Fits the Neyman-Scott cluster point process, with Variance Gamma kernel, to a point pattern dataset by the Method of Minimum Contrast. } \usage{ vargamma.estK(X, startpar=c(kappa=1,scale=1), nu = -1/4, lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the model. } \item{nu}{ Numerical value controlling the shape of the tail of the clusters. A number greater than \code{-1/2}. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits the Neyman-Scott Cluster point process model with Variance Gamma kernel (Jalilian et al, 2013) to a point pattern dataset by the Method of Minimum Contrast, using the \eqn{K} function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits the Neyman-Scott Cluster point process with Variance Gamma kernel to \code{X}, by finding the parameters of the model which give the closest match between the theoretical \eqn{K} function of the model and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Neyman-Scott cluster point process with Variance Gamma kernel is described in Jalilian et al (2013). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent have a common distribution described in Jalilian et al (2013). The shape of the kernel is determined by the dimensionless index \code{nu}. This is the parameter \eqn{\nu^\prime = \alpha/2-1}{nu' = alpha/2 - 1} appearing in equation (12) on page 126 of Jalilian et al (2013). In previous versions of spatstat instead of specifying \code{nu} (called \code{nu.ker} at that time) the user could specify \code{nu.pcf} which is the parameter \eqn{\nu=\alpha-1}{nu = alpha-1} appearing in equation (13), page 127 of Jalilian et al (2013). These are related by \code{nu.pcf = 2 * nu.ker + 1} and \code{nu.ker = (nu.pcf - 1)/2}. This syntax is still supported but not recommended for consistency across the package. In that case exactly one of \code{nu.ker} or \code{nu.pcf} must be specified. If the argument \code{lambda} is provided, then this is used as the value of the point process intensity \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The corresponding model can be simulated using \code{\link{rVarGamma}}. The parameter \code{eta} appearing in \code{startpar} is equivalent to the scale parameter \code{omega} used in \code{\link{rVarGamma}}. Homogeneous or inhomogeneous Neyman-Scott/VarGamma models can also be fitted using the function \code{\link{kppm}} and the fitted models can be simulated using \code{\link{simulate.kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{vargamma.estpcf}}, \code{\link{lgcp.estK}}, \code{\link{thomas.estK}}, \code{\link{cauchy.estK}}, \code{\link{mincontrast}}, \code{\link{Kest}}, \code{\link{Kmodel}}. \code{\link{rVarGamma}} to simulate the model. } \examples{ \testonly{ u <- vargamma.estK(redwood, startpar=c(kappa=15, eta=0.075)) } if(interactive()) { u <- vargamma.estK(redwood) u plot(u) } } \keyword{spatial} \keyword{models} spatstat/man/Extract.owin.Rd0000644000176200001440000000217613333543263015557 0ustar liggesusers\name{Extract.owin} \alias{[.owin} \title{Extract Subset of Window} \description{ Extract a subset of a window. } \usage{ \method{[}{owin}(x, i, \dots) } \arguments{ \item{x}{ A spatial window (object of class \code{"owin"}). } \item{i}{ Object defining the subregion. Either a spatial window, or a pixel image with logical values. } \item{\dots}{Ignored.} } \value{ Another spatial window (object of class \code{"owin"}). } \details{ This function computes the intersection between the window \code{x} and the domain specified by \code{i}, using \code{\link{intersect.owin}}. This function is a method for the subset operator \code{"["} for spatial windows (objects of class \code{"owin"}). It is provided mainly for completeness. The index \code{i} may be either a window, or a pixel image with logical values (the \code{TRUE} values of the image specify the spatial domain). } \seealso{ \code{\link{intersect.owin}} } \examples{ W <- owin(c(2.5, 3.2), c(1.4, 2.9)) plot(letterR) plot(letterR[W], add=TRUE, col="red") } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/lgcp.estK.Rd0000644000176200001440000002141313571674202015021 0ustar liggesusers\name{lgcp.estK} \alias{lgcp.estK} \title{Fit a Log-Gaussian Cox Point Process by Minimum Contrast} \description{ Fits a log-Gaussian Cox point process model to a point pattern dataset by the Method of Minimum Contrast. } \usage{ lgcp.estK(X, startpar=c(var=1,scale=1), covmodel=list(model="exponential"), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the log-Gaussian Cox process model. } \item{covmodel}{ Specification of the covariance model for the log-Gaussian field. See Details. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits a log-Gaussian Cox point process (LGCP) model to a point pattern dataset by the Method of Minimum Contrast, using the K function of the point pattern. The shape of the covariance of the LGCP must be specified: the default is the exponential covariance function, but other covariance models can be selected. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits a log-Gaussian Cox point process (LGCP) model to \code{X}, by finding the parameters of the LGCP model which give the closest match between the theoretical \eqn{K} function of the LGCP model and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The model fitted is a stationary, isotropic log-Gaussian Cox process (\Moller and Waagepetersen, 2003, pp. 72-76). To define this process we start with a stationary Gaussian random field \eqn{Z} in the two-dimensional plane, with constant mean \eqn{\mu}{mu} and covariance function \eqn{C(r)}. Given \eqn{Z}, we generate a Poisson point process \eqn{Y} with intensity function \eqn{\lambda(u) = \exp(Z(u))}{lambda(u) = exp(Z(u))} at location \eqn{u}. Then \eqn{Y} is a log-Gaussian Cox process. The \eqn{K}-function of the LGCP is \deqn{ K(r) = \int_0^r 2\pi s \exp(C(s)) \, {\rm d}s. }{ K(r) = integral from 0 to r of (2 * pi * s * exp(C(s))) ds. } The intensity of the LGCP is \deqn{ \lambda = \exp(\mu + \frac{C(0)}{2}). }{ lambda= exp(mu + C(0)/2). } The covariance function \eqn{C(r)} is parametrised in the form \deqn{ C(r) = \sigma^2 c(r/\alpha) }{ C(r) = sigma^2 * c(-r/alpha) } where \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} are parameters controlling the strength and the scale of autocorrelation, respectively, and \eqn{c(r)} is a known covariance function determining the shape of the covariance. The strength and scale parameters \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} will be estimated by the algorithm as the values \code{var} and \code{scale} respectively. The template covariance function \eqn{c(r)} must be specified as explained below. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha^2}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. The template covariance function \eqn{c(r)} is specified using the argument \code{covmodel}. This should be of the form \code{list(model="modelname", \dots)} where \code{modelname} is a string identifying the template model as explained below, and \code{\dots} are optional arguments of the form \code{tag=value} giving the values of parameters controlling the \emph{shape} of the template model. The default is the exponential covariance \eqn{c(r) = e^{-r}}{c(r) = e^(-r)} so that the scaled covariance is \deqn{ C(r) = \sigma^2 e^{-r/\alpha}. }{ C(r) = sigma^2 * exp(-r/alpha). } To determine the template model, the string \code{"modelname"} will be prefixed by \code{"RM"} and the code will search for a function of this name in the \pkg{RandomFields} package. For a list of available models see \code{\link[RandomFields]{RMmodel}} in the \pkg{RandomFields} package. For example the \Matern covariance with exponent \eqn{\nu=0.3}{nu = 0.3} is specified by \code{covmodel=list(model="matern", nu=0.3)} corresponding to the function \code{RMmatern} in the \pkg{RandomFields} package. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \note{ This function is considerably slower than \code{\link{lgcp.estpcf}} because of the computation time required for the integral in the \eqn{K}-function. Computation can be accelerated, at the cost of less accurate results, by setting \code{spatstat.options(fastK.lgcp=TRUE)}. } \references{ \Moller, J, Syversveen, A. and Waagepetersen, R. (1998) Log Gaussian Cox Processes. \emph{Scandinavian Journal of Statistics} \bold{25}, 451--482. \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ Rasmus Waagepetersen \email{rw@math.auc.dk}. Adapted for \pkg{spatstat} by \adrian Further modifications by Rasmus Waagepetersen and Shen Guochun, and by \ege. } \seealso{ \code{\link{lgcp.estpcf}} for alternative method of fitting LGCP. \code{\link{matclust.estK}}, \code{\link{thomas.estK}} for other models. \code{\link{mincontrast}} for the generic minimum contrast fitting algorithm, including important parameters that affect the accuracy of the fit. \code{\link[RandomFields]{RMmodel}} in the \pkg{RandomFields} package, for covariance function models. \code{\link{Kest}} for the \eqn{K} function. } \examples{ if(interactive()) { u <- lgcp.estK(redwood) } else { # slightly faster - better starting point u <- lgcp.estK(redwood, c(var=1, scale=0.1)) } u plot(u) \testonly{ if(require(RandomFields)) { K <- Kest(redwood, r=seq(0, 0.1, length=9)) op <- spatstat.options(fastK.lgcp=TRUE) lgcp.estK(K, covmodel=list(model="matern", nu=0.3), control=list(maxit=2)) spatstat.options(op) } } if(FALSE) { ## takes several minutes! lgcp.estK(redwood, covmodel=list(model="matern", nu=0.3)) } } \keyword{spatial} \keyword{models} spatstat/man/qqplot.ppm.Rd0000644000176200001440000003564113571674202015312 0ustar liggesusers\name{qqplot.ppm} \alias{qqplot.ppm} \title{ Q-Q Plot of Residuals from Fitted Point Process Model } \description{ Given a point process model fitted to a point pattern, produce a Q-Q plot based on residuals from the model. } \usage{ qqplot.ppm(fit, nsim=100, expr=NULL, \dots, type="raw", style="mean", fast=TRUE, verbose=TRUE, plot.it=TRUE, dimyx=NULL, nrep=if(fast) 5e4 else 1e5, control=update(default.rmhcontrol(fit), nrep=nrep), saveall=FALSE, monochrome=FALSE, limcol=if(monochrome) "black" else "red", maxerr=max(100, ceiling(nsim/10)), check=TRUE, repair=TRUE, envir.expr) } \arguments{ \item{fit}{ The fitted point process model, which is to be assessed using the Q-Q plot. An object of class \code{"ppm"}. Smoothed residuals obtained from this fitted model will provide the ``data'' quantiles for the Q-Q plot. } \item{nsim}{ The number of simulations from the ``reference'' point process model. } \item{expr}{ Determines the simulation mechanism which provides the ``theoretical'' quantiles for the Q-Q plot. See Details. } \item{\dots}{ Arguments passed to \code{\link{diagnose.ppm}} influencing the computation of residuals. } \item{type}{ String indicating the type of residuals or weights to be used. Current options are \code{"eem"} for the Stoyan-Grabarnik exponential energy weights, \code{"raw"} for the raw residuals, \code{"inverse"} for the inverse-lambda residuals, and \code{"pearson"} for the Pearson residuals. A partial match is adequate. } \item{style}{ Character string controlling the type of Q-Q plot. Options are \code{"classical"} and \code{"mean"}. See Details. } \item{fast}{ Logical flag controlling the speed and accuracy of computation. Use \code{fast=TRUE} for interactive use and \code{fast=FALSE} for publication standard plots. See Details. } \item{verbose}{ Logical flag controlling whether the algorithm prints progress reports during long computations. } \item{plot.it}{ Logical flag controlling whether the function produces a plot or simply returns a value (silently). } \item{dimyx}{ Dimensions of the pixel grid on which the smoothed residual field will be calculated. A vector of two integers. } \item{nrep}{ If \code{control} is absent, then \code{nrep} gives the number of iterations of the Metropolis-Hastings algorithm that should be used to generate one simulation of the fitted point process. } \item{control}{ List of parameters controlling the Metropolis-Hastings algorithm \code{\link{rmh}} which generates each simulated realisation from the model (unless the model is Poisson). This list becomes the argument \code{control} of \code{\link{rmh.default}}. It overrides \code{nrep}. } \item{saveall}{ Logical flag indicating whether to save all the intermediate calculations. } \item{monochrome}{ Logical flag indicating whether the plot should be in black and white (\code{monochrome=TRUE}), or in colour (\code{monochrome=FALSE}). } \item{limcol}{ String. The colour to be used when plotting the 95\% limit curves. } \item{maxerr}{ Maximum number of failures tolerated while generating simulated realisations. See Details. } \item{check}{ Logical value indicating whether to check the internal format of \code{fit}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{repair}{ Logical value indicating whether to repair the internal format of \code{fit}, if it is found to be damaged. } \item{envir.expr}{ Optional. An environment in which the expression \code{expr} should be evaluated. } } \value{ An object of class \code{"qqppm"} containing the information needed to reproduce the Q-Q plot. Entries \code{x} and \code{y} are numeric vectors containing quantiles of the simulations and of the data, respectively. } \details{ This function generates a Q-Q plot of the residuals from a fitted point process model. It is an addendum to the suite of diagnostic plots produced by the function \code{\link{diagnose.ppm}}, kept separate because it is computationally intensive. The quantiles of the theoretical distribution are estimated by simulation. In classical statistics, a Q-Q plot of residuals is a useful diagnostic for checking the distributional assumptions. Analogously, in spatial statistics, a Q-Q plot of the (smoothed) residuals from a fitted point process model is a useful way to check the interpoint interaction part of the model (Baddeley et al, 2005). The systematic part of the model (spatial trend, covariate effects, etc) is assessed using other plots made by \code{\link{diagnose.ppm}}. The argument \code{fit} represents the fitted point process model. It must be an object of class \code{"ppm"} (typically produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). Residuals will be computed for this fitted model using \code{\link{residuals.ppm}}, and the residuals will be kernel-smoothed to produce a ``residual field''. The values of this residual field will provide the ``data'' quantiles for the Q-Q plot. The argument \code{expr} is not usually specified. It provides a way to modify the ``theoretical'' or ``reference'' quantiles for the Q-Q plot. In normal usage we set \code{expr=NULL}. The default is to generate \code{nsim} simulated realisations of the fitted model \code{fit}, re-fit this model to each of the simulated patterns, evaluate the residuals from these fitted models, and use the kernel-smoothed residual field from these fitted models as a sample from the reference distribution for the Q-Q plot. In advanced use, \code{expr} may be an \code{expression}. It will be re-evaluated \code{nsim} times, and should include random computations so that the results are not identical each time. The result of evaluating \code{expr} should be either a point pattern (object of class \code{"ppp"}) or a fitted point process model (object of class \code{"ppm"}). If the value is a point pattern, then the original fitted model \code{fit} will be fitted to this new point pattern using \code{\link{update.ppm}}, to yield another fitted model. Smoothed residuals obtained from these \code{nsim} fitted models will yield the ``theoretical'' quantiles for the Q-Q plot. Alternatively \code{expr} can be a list of point patterns, or an \code{envelope} object that contains a list of point patterns (typically generated by calling \code{\link{envelope}} with \code{savepatterns=TRUE}). These point patterns will be used as the simulated patterns. Simulation is performed (if \code{expr=NULL}) using the Metropolis-Hastings algorithm \code{\link{rmh}}. Each simulated realisation is the result of running the Metropolis-Hastings algorithm from an independent random starting state each time. The iterative and termination behaviour of the Metropolis-Hastings algorithm are governed by the argument \code{control}. See \code{\link{rmhcontrol}} for information about this argument. As a shortcut, the argument \code{nrep} determines the number of Metropolis-Hastings iterations used to generate each simulated realisation, if \code{control} is absent. By default, simulations are generated in an expanded window. Use the argument \code{control} to change this, as explained in the section on \emph{Warning messages}. The argument \code{type} selects the type of residual or weight that will be computed. For options, see \code{\link{diagnose.ppm}}. The argument \code{style} determines the type of Q-Q plot. It is highly recommended to use the default, \code{style="mean"}. \describe{ \item{\code{style="classical"}}{ The quantiles of the residual field for the data (on the \eqn{y} axis) are plotted against the quantiles of the \bold{pooled} simulations (on the \eqn{x} axis). This plot is biased, and therefore difficult to interpret, because of strong autocorrelations in the residual field and the large differences in sample size. } \item{\code{style="mean"}}{ The order statistics of the residual field for the data are plotted against the sample means, over the \code{nsim} simulations, of the corresponding order statistics of the residual field for the simulated datasets. Dotted lines show the 2.5 and 97.5 percentiles, over the \code{nsim} simulations, of each order statistic. } } The argument \code{fast} is a simple way to control the accuracy and speed of computation. If \code{fast=FALSE}, the residual field is computed on a fine grid of pixels (by default 100 by 100 pixels, see below) and the Q-Q plot is based on the complete set of order statistics (usually 10,000 quantiles). If \code{fast=TRUE}, the residual field is computed on a coarse grid (at most 40 by 40 pixels) and the Q-Q plot is based on the \emph{percentiles} only. This is about 7 times faster. It is recommended to use \code{fast=TRUE} for interactive data analysis and \code{fast=FALSE} for definitive plots for publication. The argument \code{dimyx} gives full control over the resolution of the pixel grid used to calculate the smoothed residuals. Its interpretation is the same as the argument \code{dimyx} to the function \code{\link{as.mask}}. Note that \code{dimyx[1]} is the number of pixels in the \eqn{y} direction, and \code{dimyx[2]} is the number in the \eqn{x} direction. If \code{dimyx} is not present, then the default pixel grid dimensions are controlled by \code{spatstat.options("npixel")}. Since the computation is so time-consuming, \code{qqplot.ppm} returns a list containing all the data necessary to re-display the Q-Q plot. It is advisable to assign the result of \code{qqplot.ppm} to something (or use \code{.Last.value} if you forgot to.) The return value is an object of class \code{"qqppm"}. There are methods for \code{\link{plot.qqppm}} and \code{\link{print.qqppm}}. See the Examples. The argument \code{saveall} is usually set to \code{FALSE}. If \code{saveall=TRUE}, then the intermediate results of calculation for each simulated realisation are saved and returned. The return value includes a 3-dimensional array \code{sim} containing the smoothed residual field images for each of the \code{nsim} realisations. When \code{saveall=TRUE}, the return value is an object of very large size, and should not be saved on disk. Errors may occur during the simulation process, because random data are generated. For example: \itemize{ \item one of the simulated patterns may be empty. \item one of the simulated patterns may cause an error in the code that fits the point process model. \item the user-supplied argument \code{expr} may have a bug. } Empty point patterns do not cause a problem for the code, but they are reported. Other problems that would lead to a crash are trapped; the offending simulated data are discarded, and the simulation is retried. The argument \code{maxerr} determines the maximum number of times that such errors will be tolerated (mainly as a safeguard against an infinite loop). } \section{Side Effects}{ Produces a Q-Q plot if \code{plot.it} is TRUE. } \section{Warning messages}{ A warning message will be issued if any of the simulations trapped an error (a potential crash). A warning message will be issued if all, or many, of the simulated point patterns are empty. This usually indicates a problem with the simulation procedure. The default behaviour of \code{qqplot.ppm} is to simulate patterns on an expanded window (specified through the argument \code{control}) in order to avoid edge effects. The model's trend is extrapolated over this expanded window. If the trend is strongly inhomogeneous, the extrapolated trend may have very large (or even infinite) values. This can cause the simulation algorithm to produce empty patterns. The only way to suppress this problem entirely is to prohibit the expansion of the window, by setting the \code{control} argument to something like \code{control=list(nrep=1e6, expand=1)}. Here \code{expand=1} means there will be no expansion. See \code{\link{rmhcontrol}} for more information about the argument \code{control}. } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \seealso{ \code{\link{diagnose.ppm}}, \code{\link{lurking}}, \code{\link{residuals.ppm}}, \code{\link{eem}}, \code{\link{ppm.object}}, \code{\link{ppm}}, \code{\link{rmh}}, \code{\link{rmhcontrol}} } \examples{ data(cells) fit <- ppm(cells, ~1, Poisson()) diagnose.ppm(fit) # no suggestion of departure from stationarity \dontrun{qqplot.ppm(fit, 80) # strong evidence of non-Poisson interaction} \testonly{qqplot.ppm(fit, 4)} \dontrun{ diagnose.ppm(fit, type="pearson") qqplot.ppm(fit, type="pearson") } \testonly{qqplot.ppm(fit, 4, type="pearson")} ########################################### ## oops, I need the plot coordinates mypreciousdata <- .Last.value \dontrun{mypreciousdata <- qqplot.ppm(fit, type="pearson")} \testonly{mypreciousdata <- qqplot.ppm(fit, 4, type="pearson")} plot(mypreciousdata) ###################################################### # Q-Q plots based on fixed n # The above QQ plots used simulations from the (fitted) Poisson process. # But I want to simulate conditional on n, instead of Poisson # Do this by setting rmhcontrol(p=1) fixit <- list(p=1) \dontrun{qqplot.ppm(fit, 100, control=fixit)} \testonly{qqplot.ppm(fit, 4, control=fixit)} ###################################################### # Inhomogeneous Poisson data X <- rpoispp(function(x,y){1000 * exp(-3*x)}, 1000) plot(X) # Inhomogeneous Poisson model fit <- ppm(X, ~x, Poisson()) \dontrun{qqplot.ppm(fit, 100)} \testonly{qqplot.ppm(fit, 4)} # conclusion: fitted inhomogeneous Poisson model looks OK ###################################################### # Advanced use of 'expr' argument # # set the initial conditions in Metropolis-Hastings algorithm # expr <- expression(rmh(fit, start=list(n.start=42), verbose=FALSE)) \dontrun{qqplot.ppm(fit, 100, expr)} \testonly{qqplot.ppm(fit, 4, expr)} } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{hplot} spatstat/man/plot.listof.Rd0000644000176200001440000002157513333543264015454 0ustar liggesusers\name{plot.listof} \alias{plot.listof} \title{Plot a List of Things} \description{ Plots a list of things } \usage{ \method{plot}{listof}(x, \dots, main, arrange=TRUE, nrows=NULL, ncols=NULL, main.panel=NULL, mar.panel=c(2,1,1,2), hsep=0, vsep=0, panel.begin=NULL, panel.end=NULL, panel.args=NULL, panel.begin.args=NULL, panel.end.args=NULL, panel.vpad=0.2, plotcommand="plot", adorn.left=NULL, adorn.right=NULL, adorn.top=NULL, adorn.bottom=NULL, adorn.size=0.2, equal.scales=FALSE, halign=FALSE, valign=FALSE) } \arguments{ \item{x}{ An object of the class \code{"listof"}. Essentially a list of objects. } \item{\dots}{ Arguments passed to \code{\link{plot}} when generating each plot panel. } \item{main}{ Overall heading for the plot. } \item{arrange}{ Logical flag indicating whether to plot the objects side-by-side on a single page (\code{arrange=TRUE}) or plot them individually in a succession of frames (\code{arrange=FALSE}). } \item{nrows,ncols}{ Optional. The number of rows/columns in the plot layout (assuming \code{arrange=TRUE}). You can specify either or both of these numbers. } \item{main.panel}{ Optional. A character string, or a vector of character strings, giving the headings for each of the objects. } \item{mar.panel}{ Size of the margins outside each plot panel. A numeric vector of length 4 giving the bottom, left, top, and right margins in that order. (Alternatively the vector may have length 1 or 2 and will be replicated to length 4). See the section on \emph{Spacing between plots}. } \item{hsep,vsep}{ Additional horizontal and vertical separation between plot panels, expressed in the same units as \code{mar.panel}. } \item{panel.begin,panel.end}{ Optional. Functions that will be executed before and after each panel is plotted. See Details. } \item{panel.args}{ Optional. Function that determines different plot arguments for different panels. See Details. } \item{panel.begin.args}{ Optional. List of additional arguments for \code{panel.begin} when it is a function. } \item{panel.end.args}{ Optional. List of additional arguments for \code{panel.end} when it is a function. } \item{panel.vpad}{ Amount of extra vertical space that should be allowed for the title of each panel, if a title will be displayed. Expressed as a fraction of the height of the panel. Applies only when \code{equal.scales=FALSE} (the default) and requires that the height of each panel can be determined. } \item{plotcommand}{ Optional. Character string containing the name of the command that should be executed to plot each panel. } \item{adorn.left,adorn.right,adorn.top,adorn.bottom}{ Optional. Functions (with no arguments) that will be executed to generate additional plots at the margins (left, right, top and/or bottom, respectively) of the array of plots. } \item{adorn.size}{ Relative width (as a fraction of the other panels' widths) of the margin plots. } \item{equal.scales}{ Logical value indicating whether the components should be plotted at (approximately) the same physical scale. } \item{halign,valign}{ Logical values indicating whether panels in a column should be aligned to the same \eqn{x} coordinate system (\code{halign=TRUE}) and whether panels in a row should be aligned to the same \eqn{y} coordinate system (\code{valign=TRUE}). These are applicable only if \code{equal.scales=TRUE}. } } \value{ Null. } \details{ This is the \code{plot} method for the class \code{"listof"}. An object of class \code{"listof"} (defined in the base R package) represents a list of objects, all belonging to a common class. The base R package defines a method for printing these objects, \code{\link[base]{print.listof}}, but does not define a method for \code{plot}. So here we have provided a method for \code{plot}. In the \pkg{spatstat} package, various functions produce an object of class \code{"listof"}, essentially a list of spatial objects of the same kind. These objects can be plotted in a nice arrangement using \code{plot.listof}. See the Examples. The argument \code{panel.args} determines extra graphics parameters for each panel. It should be a function that will be called as \code{panel.args(i)} where \code{i} is the panel number. Its return value should be a list of graphics parameters that can be passed to the relevant \code{plot} method. These parameters override any parameters specified in the \code{\dots} arguments. The arguments \code{panel.begin} and \code{panel.end} determine graphics that will be plotted before and after each panel is plotted. They may be objects of some class that can be plotted with the generic \code{plot} command. Alternatively they may be functions that will be called as \code{panel.begin(i, y, main=main.panel[i])} and \code{panel.end(i, y, add=TRUE)} where \code{i} is the panel number and \code{y = x[[i]]}. If all entries of \code{x} are pixel images, the function \code{\link{image.listof}} is called to control the plotting. The arguments \code{equal.ribbon} and \code{col} can be used to determine the colour map or maps applied. If \code{equal.scales=FALSE} (the default), then the plot panels will have equal height on the plot device (unless there is only one column of panels, in which case they will have equal width on the plot device). This means that the objects are plotted at different physical scales, by default. If \code{equal.scales=TRUE}, then the dimensions of the plot panels on the plot device will be proportional to the spatial dimensions of the corresponding components of \code{x}. This means that the objects will be plotted at \emph{approximately} equal physical scales. If these objects have very different spatial sizes, the plot command could fail (when it tries to plot the smaller objects at a tiny scale), with an error message that the figure margins are too large. The objects will be plotted at \emph{exactly} equal physical scales, and \emph{exactly} aligned on the device, under the following conditions: \itemize{ \item every component of \code{x} is a spatial object whose position can be shifted by \code{\link{shift}}; \item \code{panel.begin} and \code{panel.end} are either \code{NULL} or they are spatial objects whose position can be shifted by \code{\link{shift}}; \item \code{adorn.left}, \code{adorn.right}, \code{adorn.top} and \code{adorn.bottom} are all \code{NULL}. } Another special case is when every component of \code{x} is an object of class \code{"fv"} representing a function. If \code{equal.scales=TRUE} then all these functions will be plotted with the same axis scales (i.e. with the same \code{xlim} and the same \code{ylim}). } \section{Spacing between plots}{ The spacing between individual plots is controlled by the parameters \code{mar.panel}, \code{hsep} and \code{vsep}. If \code{equal.scales=FALSE}, the plot panels are logically separate plots. The margins for each panel are determined by the argument \code{mar.panel} which becomes the graphics parameter \code{mar} described in the help file for \code{\link{par}}. One unit of \code{mar} corresponds to one line of text in the margin. If \code{hsep} or \code{vsep} are present, \code{mar.panel} is augmented by \code{c(vsep, hsep, vsep, hsep)/2}. If \code{equal.scales=TRUE}, all the plot panels are drawn in the same coordinate system which represents a physical scale. The unit of measurement for \code{mar.panel[1,3]} is one-sixth of the greatest height of any object plotted in the same row of panels, and the unit for \code{mar.panel[2,4]} is one-sixth of the greatest width of any object plotted in the same column of panels. If \code{hsep} or \code{vsep} are present, they are interpreted in the same units as \code{mar.panel[2]} and \code{mar.panel[1]} respectively. } \seealso{ \code{\link[base]{print.listof}}, \code{\link{contour.listof}}, \code{\link{image.listof}}, \code{\link{density.splitppp}} } \section{Error messages}{ If the error message \sQuote{Figure margins too large} occurs, this generally means that one of the objects had a much smaller physical scale than the others. Ensure that \code{equal.scales=FALSE} and increase the values of \code{mar.panel}. } \examples{ # Intensity estimate of multitype point pattern plot(D <- density(split(amacrine))) plot(D, main="", equal.ribbon=TRUE, panel.end=function(i,y,...){contour(y, ...)}) # list of 3D point patterns ape1 <- osteo[osteo$shortid==4, "pts", drop=TRUE] class(ape1) plot(ape1, main.panel="", mar.panel=0.1, hsep=0.7, vsep=1, cex=1.5, pch=21, bg='white') } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/km.rs.Rd0000644000176200001440000000610313333543263014216 0ustar liggesusers\name{km.rs} \alias{km.rs} \title{Kaplan-Meier and Reduced Sample Estimator using Histograms} \description{ Compute the Kaplan-Meier and Reduced Sample estimators of a survival time distribution function, using histogram techniques } \usage{ km.rs(o, cc, d, breaks) } \arguments{ \item{o}{vector of observed survival times } \item{cc}{vector of censoring times } \item{d}{vector of non-censoring indicators } \item{breaks}{Vector of breakpoints to be used to form histograms. } } \value{ A list with five elements \item{rs}{Reduced-sample estimate of the survival time c.d.f. \eqn{F(t)} } \item{km}{Kaplan-Meier estimate of the survival time c.d.f. \eqn{F(t)} } \item{hazard}{corresponding Nelson-Aalen estimate of the hazard rate \eqn{\lambda(t)}{lambda(t)} } \item{r}{values of \eqn{t} for which \eqn{F(t)} is estimated } \item{breaks}{the breakpoints vector } } \details{ This function is needed mainly for internal use in \pkg{spatstat}, but may be useful in other applications where you want to form the Kaplan-Meier estimator from a huge dataset. Suppose \eqn{T_i}{T[i]} are the survival times of individuals \eqn{i=1,\ldots,M} with unknown distribution function \eqn{F(t)} which we wish to estimate. Suppose these times are right-censored by random censoring times \eqn{C_i}{C[i]}. Thus the observations consist of right-censored survival times \eqn{\tilde T_i = \min(T_i,C_i)}{T*[i] = min(T[i],C[i])} and non-censoring indicators \eqn{D_i = 1\{T_i \le C_i\}}{D[i] = 1(T[i] <= C[i])} for each \eqn{i}. The arguments to this function are vectors \code{o}, \code{cc}, \code{d} of observed values of \eqn{\tilde T_i}{T*[i]}, \eqn{C_i}{C[i]} and \eqn{D_i}{D[i]} respectively. The function computes histograms and forms the reduced-sample and Kaplan-Meier estimates of \eqn{F(t)} by invoking the functions \code{\link{kaplan.meier}} and \code{\link{reduced.sample}}. This is efficient if the lengths of \code{o}, \code{cc}, \code{d} (i.e. the number of observations) is large. The vectors \code{km} and \code{hazard} returned by \code{kaplan.meier} are (histogram approximations to) the Kaplan-Meier estimator of \eqn{F(t)} and its hazard rate \eqn{\lambda(t)}{lambda(t)}. Specifically, \code{km[k]} is an estimate of \code{F(breaks[k+1])}, and \code{lambda[k]} is an estimate of the average of \eqn{\lambda(t)}{lambda(t)} over the interval \code{(breaks[k],breaks[k+1])}. This approximation is exact only if the survival times are discrete and the histogram breaks are fine enough to ensure that each interval \code{(breaks[k],breaks[k+1])} contains only one possible value of the survival time. The vector \code{rs} is the reduced-sample estimator, \code{rs[k]} being the reduced sample estimate of \code{F(breaks[k+1])}. This value is exact, i.e. the use of histograms does not introduce any approximation error in the reduced-sample estimator. } \seealso{ \code{\link{reduced.sample}}, \code{\link{kaplan.meier}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/Jest.Rd0000644000176200001440000002246213333543262014076 0ustar liggesusers\name{Jest} \alias{Jest} \title{Estimate the J-function} \description{ Estimates the summary function \eqn{J(r)} for a point pattern in a window of arbitrary shape. } \usage{ Jest(X, ..., eps=NULL, r=NULL, breaks=NULL, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{J(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{Ignored.} \item{eps}{ the resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{vector of values for the argument \eqn{r} at which \eqn{J(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \code{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. Character string specifying the choice of edge correction(s) in \code{\link{Fest}} and \code{\link{Gest}}. See Details. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{J} has been estimated} \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{J(r)} computed from the border-corrected estimates of \eqn{F} and \eqn{G} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{J(r)} computed from the Kaplan-Meier estimates of \eqn{F} and \eqn{G} } \item{han}{the Hanisch-style estimator of \eqn{J(r)} computed from the Hanisch estimate of \eqn{G} and the Chiu-Stoyan estimate of \eqn{F} } \item{un}{the uncorrected estimate of \eqn{J(r)} computed from the uncorrected estimates of \eqn{F} and \eqn{G} } \item{theo}{the theoretical value of \eqn{J(r)} for a stationary Poisson process: identically equal to \eqn{1} } The data frame also has \bold{attributes} \item{F}{ the output of \code{\link{Fest}} for this point pattern, containing three estimates of the empty space function \eqn{F(r)} and an estimate of its hazard function } \item{G}{ the output of \code{\link{Gest}} for this point pattern, containing three estimates of the nearest neighbour distance distribution function \eqn{G(r)} and an estimate of its hazard function } } \note{ Sizeable amounts of memory may be needed during the calculation. } \details{ The \eqn{J} function (Van Lieshout and Baddeley, 1996) of a stationary point process is defined as \deqn{J(r) = \frac{1-G(r)}{1-F(r)} }{ % J(r) = (1-G(r))/(1-F(r))} where \eqn{G(r)} is the nearest neighbour distance distribution function of the point process (see \code{\link{Gest}}) and \eqn{F(r)} is its empty space function (see \code{\link{Fest}}). For a completely random (uniform Poisson) point process, the \eqn{J}-function is identically equal to \eqn{1}. Deviations \eqn{J(r) < 1} or \eqn{J(r) > 1} typically indicate spatial clustering or spatial regularity, respectively. The \eqn{J}-function is one of the few characteristics that can be computed explicitly for a wide range of point processes. See Van Lieshout and Baddeley (1996), Baddeley et al (2000), Thonnes and Van Lieshout (1999) for further information. An estimate of \eqn{J} derived from a spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern. The estimate of \eqn{J(r)} is compared against the constant function \eqn{1}. Deviations \eqn{J(r) < 1} or \eqn{J(r) > 1} may suggest spatial clustering or spatial regularity, respectively. This algorithm estimates the \eqn{J}-function from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link{as.ppp}()}. The functions \code{\link{Fest}} and \code{\link{Gest}} are called to compute estimates of \eqn{F(r)} and \eqn{G(r)} respectively. These estimates are then combined by simply taking the ratio \eqn{J(r) = (1-G(r))/(1-F(r))}. In fact several different estimates are computed using different edge corrections (Baddeley, 1998). The Kaplan-Meier estimate (returned as \code{km}) is the ratio \code{J = (1-G)/(1-F)} of the Kaplan-Meier estimates of \eqn{1-F} and \eqn{1-G} computed by \code{\link{Fest}} and \code{\link{Gest}} respectively. This is computed if \code{correction=NULL} or if \code{correction} includes \code{"km"}. The Hanisch-style estimate (returned as \code{han}) is the ratio \code{J = (1-G)/(1-F)} where \code{F} is the Chiu-Stoyan estimate of \eqn{F} and \code{G} is the Hanisch estimate of \eqn{G}. This is computed if \code{correction=NULL} or if \code{correction} includes \code{"cs"} or \code{"han"}. The reduced-sample or border corrected estimate (returned as \code{rs}) is the same ratio \code{J = (1-G)/(1-F)} of the border corrected estimates. This is computed if \code{correction=NULL} or if \code{correction} includes \code{"rs"} or \code{"border"}. These edge-corrected estimators are slightly biased for \eqn{J}, since they are ratios of approximately unbiased estimators. The logarithm of the Kaplan-Meier estimate is exactly unbiased for \eqn{\log J}{log J}. The uncorrected estimate (returned as \code{un} and computed only if \code{correction} includes \code{"none"}) is the ratio \code{J = (1-G)/(1-F)} of the uncorrected (``raw'') estimates of the survival functions of \eqn{F} and \eqn{G}, which are the empirical distribution functions of the empty space distances \code{Fest(X,\dots)$raw} and of the nearest neighbour distances \code{Gest(X,\dots)$raw}. The uncorrected estimates of \eqn{F} and \eqn{G} are severely biased. However the uncorrected estimate of \eqn{J} is approximately unbiased (if the process is close to Poisson); it is insensitive to edge effects, and should be used when edge effects are severe (see Baddeley et al, 2000). The algorithm for \code{\link{Fest}} uses two discrete approximations which are controlled by the parameter \code{eps} and by the spacing of values of \code{r} respectively. See \code{\link{Fest}} for details. First-time users are strongly advised not to specify these arguments. Note that the value returned by \code{Jest} includes the output of \code{\link{Fest}} and \code{\link{Gest}} as attributes (see the last example below). If the user is intending to compute the \code{F,G} and \code{J} functions for the point pattern, it is only necessary to call \code{Jest}. } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37--78. Baddeley, A.J. and Gill, R.D. The empty space hazard of a spatial pattern. Research Report 1994/3, Department of Mathematics, University of Western Australia, May 1994. Baddeley, A.J. and Gill, R.D. Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25} (1997) 263--292. Baddeley, A., Kerscher, M., Schladitz, K. and Scott, B.T. Estimating the \emph{J} function without edge correction. \emph{Statistica Neerlandica} \bold{54} (2000) 315--328. Borgefors, G. Distance transformations in digital images. \emph{Computer Vision, Graphics and Image Processing} \bold{34} (1986) 344--371. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Thonnes, E. and Van Lieshout, M.N.M, A comparative study on the power of Van Lieshout and Baddeley's J-function. \emph{Biometrical Journal} \bold{41} (1999) 721--734. Van Lieshout, M.N.M. and Baddeley, A.J. A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50} (1996) 344--361. } \seealso{ \code{\link{Jinhom}}, \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Kest}}, \code{\link{km.rs}}, \code{\link{reduced.sample}}, \code{\link{kaplan.meier}} } \examples{ data(cells) J <- Jest(cells, 0.01) plot(J, main="cells data") # values are far above J = 1, indicating regular pattern data(redwood) J <- Jest(redwood, 0.01, legendpos="center") plot(J, main="redwood data") # values are below J = 1, indicating clustered pattern } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/ppp.object.Rd0000644000176200001440000000771513333543264015243 0ustar liggesusers\name{ppp.object} \alias{ppp.object} %DoNotExport \title{Class of Point Patterns} \description{ A class \code{"ppp"} to represent a two-dimensional point pattern. Includes information about the window in which the pattern was observed. Optionally includes marks. } \details{ This class represents a two-dimensional point pattern dataset. It specifies \itemize{ \item the locations of the points \item the window in which the pattern was observed \item optionally, ``marks'' attached to each point (extra information such as a type label). } If \code{X} is an object of type \code{ppp}, it contains the following elements: \tabular{ll}{ \code{x} \tab vector of \eqn{x} coordinates of data points \cr \code{y} \tab vector of \eqn{y} coordinates of data points \cr \code{n} \tab number of points \cr \code{window} \tab window of observation \cr \tab (an object of class \code{\link{owin}}) \cr \code{marks} \tab optional vector or data frame of marks } Users are strongly advised not to manipulate these entries directly. Objects of class \code{"ppp"} may be created by the function \code{\link{ppp}} and converted from other types of data by the function \code{\link{as.ppp}}. Note that you must always specify the window of observation; there is intentionally no default action of ``guessing'' the window dimensions from the data points alone. Standard point pattern datasets provided with the package include \code{\link[spatstat.data]{amacrine}}, \code{\link[spatstat.data]{betacells}}, \code{\link[spatstat.data]{bramblecanes}}, \code{\link[spatstat.data]{cells}}, \code{\link[spatstat.data]{demopat}}, \code{\link[spatstat.data]{ganglia}}, \code{\link[spatstat.data]{lansing}}, \code{\link[spatstat.data]{longleaf}}, \code{\link[spatstat.data]{nztrees}}, \code{\link[spatstat.data]{redwood}}, \code{\link[spatstat.data]{simdat}} and \code{\link[spatstat.data]{swedishpines}}. Point patterns may be scanned from your own data files by \code{\link{scanpp}} or by using \code{\link{read.table}} and \code{\link{as.ppp}}. They may be manipulated by the functions \code{\link{[.ppp}} and \code{\link{superimpose}}. Point pattern objects can be plotted just by typing \code{plot(X)} which invokes the \code{plot} method for point pattern objects, \code{\link{plot.ppp}}. See \code{\link{plot.ppp}} for further information. There are also methods for \code{summary} and \code{print} for point patterns. Use \code{summary(X)} to see a useful description of the data. Patterns may be generated at random by \code{\link{runifpoint}}, \code{\link{rpoispp}}, \code{\link{rMaternI}}, \code{\link{rMaternII}}, \code{\link{rSSI}}, \code{\link{rNeymanScott}}, \code{\link{rMatClust}}, and \code{\link{rThomas}}. Most functions which are intended to operate on a window (of class \code{\link{owin}}) will, if presented with a \code{\link{ppp}} object instead, automatically extract the window information from the point pattern. } \seealso{ \code{\link{owin}}, \code{\link{ppp}}, \code{\link{as.ppp}}, \code{\link{[.ppp}} } \section{Warnings}{ The internal representation of marks is likely to change in the next release of this package. } \examples{ x <- runif(100) y <- runif(100) X <- ppp(x, y, c(0,1),c(0,1)) X \dontrun{plot(X)} mar <- sample(1:3, 100, replace=TRUE) mm <- ppp(x, y, c(0,1), c(0,1), marks=mar) \dontrun{plot(mm)} # points with mark equal to 2 ss <- mm[ mm$marks == 2 , ] \dontrun{plot(ss)} # left half of pattern 'mm' lu <- owin(c(0,0.5),c(0,1)) mmleft <- mm[ , lu] \dontrun{plot(mmleft)} \dontrun{ if(FALSE) { # input data from file qq <- scanpp("my.table", unit.square()) # interactively build a point pattern plot(unit.square()) X <- as.ppp(locator(10), unit.square()) plot(X) } } } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat/man/segregation.test.Rd0000644000176200001440000000557313333543264016464 0ustar liggesusers\name{segregation.test} \alias{segregation.test} \alias{segregation.test.ppp} \title{ Test of Spatial Segregation of Types } \description{ Performs a Monte Carlo test of spatial segregation of the types in a multitype point pattern. } \usage{ segregation.test(X, \dots) \method{segregation.test}{ppp}(X, \dots, nsim = 19, permute = TRUE, verbose = TRUE, Xname) } \arguments{ \item{X}{ Multitype point pattern (object of class \code{"ppp"} with factor-valued marks). } \item{\dots}{ Additional arguments passed to \code{\link{relrisk.ppp}} to control the smoothing parameter or bandwidth selection. } \item{nsim}{ Number of simulations for the Monte Carlo test. } \item{permute}{ Argument passed to \code{\link{rlabel}}. If \code{TRUE} (the default), randomisation is performed by randomly permuting the labels of \code{X}. If \code{FALSE}, randomisation is performing by resampling the labels with replacement. } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{Xname}{ Optional character string giving the name of the dataset \code{X}. } } \details{ The Monte Carlo test of spatial segregation of types, proposed by Kelsall and Diggle (1995) and Diggle et al (2005), is applied to the point pattern \code{X}. The test statistic is \deqn{ T = \sum_i \sum_m \left( \widehat p(m \mid x_i) - \overline p_m \right)^2 }{ T = sum[i] sum[m] (phat(m | x[i]) - pbar[m])^2 } where \eqn{\widehat p(m \mid x_i)}{phat(m | x[i])} is the leave-one-out kernel smoothing estimate of the probability that the \eqn{i}-th data point has type \eqn{m}, and \eqn{\overline p_m}{pbar[m]} is the average fraction of data points which are of type \eqn{m}. The statistic \eqn{T} is evaluated for the data and for \code{nsim} randomised versions of \code{X}, generated by randomly permuting or resampling the marks. Note that, by default, automatic bandwidth selection will be performed separately for each randomised pattern. This computation can be very time-consuming but is necessary for the test to be valid in most conditions. A short-cut is to specify the value of the smoothing bandwidth \code{sigma} as shown in the examples. } \value{ An object of class \code{"htest"} representing the result of the test. } \references{ Kelsall, J.E. and Diggle, P.J. (1995) Kernel estimation of relative risk. \emph{Bernoulli} \bold{1}, 3--16. Diggle, P.J., Zheng, P. and Durr, P. (2005) Non-parametric estimation of spatial segregation in a multivariate point process: bovine tuberculosis in Cornwall, UK. \emph{Applied Statistics} \bold{54}, 645--658. } \seealso{ \code{\link{relrisk}} } \examples{ segregation.test(hyytiala, 5) if(interactive()) segregation.test(hyytiala, hmin=0.05) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{htest} spatstat/man/distfun.lpp.Rd0000644000176200001440000000500613333543263015433 0ustar liggesusers\name{distfun.lpp} \Rdversion{1.1} \alias{distfun.lpp} \title{ Distance Map on Linear Network } \description{ Compute the distance function of a point pattern on a linear network. } \usage{ \method{distfun}{lpp}(X, ..., k=1) } \arguments{ \item{X}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{k}{ An integer. The distance to the \code{k}th nearest point will be computed. } \item{\dots}{ Extra arguments are ignored. } } \details{ On a linear network \eqn{L}, the \dQuote{geodesic distance function} of a set of points \eqn{A} in \eqn{L} is the mathematical function \eqn{f} such that, for any location \eqn{s} on \eqn{L}, the function value \code{f(s)} is the shortest-path distance from \eqn{s} to \eqn{A}. The command \code{distfun.lpp} is a method for the generic command \code{\link{distfun}} for the class \code{"lpp"} of point patterns on a linear network. If \code{X} is a point pattern on a linear network, \code{f <- distfun(X)} returns a \emph{function} in the \R language that represents the distance function of \code{X}. Evaluating the function \code{f} in the form \code{v <- f(x,y)}, where \code{x} and \code{y} are any numeric vectors of equal length containing coordinates of spatial locations, yields the values of the distance function at these locations. More efficiently \code{f} can be called in the form \code{v <- f(x, y, seg, tp)} where \code{seg} and \code{tp} are the local coordinates on the network. It can also be called as \code{v <- f(x)} where \code{x} is a point pattern on the same linear network. The function \code{f} obtained from \code{f <- distfun(X)} also belongs to the class \code{"linfun"}. It can be printed and plotted immediately as shown in the Examples. It can be converted to a pixel image using \code{\link{as.linim}}. } \value{ A \code{function} with arguments \code{x,y} and optional arguments \code{seg,tp}. It also belongs to the class \code{"linfun"} which has methods for \code{plot}, \code{print} etc. } \seealso{ \code{\link{linfun}}, \code{\link{methods.linfun}}. To identify \emph{which} point is the nearest neighbour, see \code{\link{nnfun.lpp}}. } \examples{ data(letterR) X <- runiflpp(3, simplenet) f <- distfun(X) f plot(f) # using a distfun as a covariate in a point process model: Y <- runiflpp(4, simplenet) fit <- lppm(Y ~D, covariates=list(D=f)) f(Y) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat/man/varblock.Rd0000644000176200001440000001050213333543264014766 0ustar liggesusers\name{varblock} \alias{varblock} \title{ Estimate Variance of Summary Statistic by Subdivision } \description{ This command estimates the variance of any summary statistic (such as the \eqn{K}-function) by spatial subdivision of a single point pattern dataset. } \usage{ varblock(X, fun = Kest, blocks = quadrats(X, nx = nx, ny = ny), \dots, nx = 3, ny = nx, confidence=0.95) } \arguments{ \item{X}{ Point pattern dataset (object of class \code{"ppp"}). } \item{fun}{ Function that computes the summary statistic. } \item{blocks}{ Optional. A tessellation that specifies the division of the space into blocks. } \item{\dots}{ Arguments passed to \code{fun}. } \item{nx,ny}{ Optional. Number of rectangular blocks in the \eqn{x} and \eqn{y} directions. Incompatible with \code{blocks}. } \item{confidence}{ Confidence level, as a fraction between 0 and 1. } } \details{ This command computes an estimate of the variance of the summary statistic \code{fun(X)} from a single point pattern dataset \code{X} using a subdivision method. It can be used to plot \bold{confidence intervals} for the true value of a summary function such as the \eqn{K}-function. The window containing \code{X} is divided into pieces by an \code{nx * ny} array of rectangles (or is divided into pieces of more general shape, according to the argument \code{blocks} if it is present). The summary statistic \code{fun} is applied to each of the corresponding sub-patterns of \code{X} as described below. Then the pointwise sample mean, sample variance and sample standard deviation of these summary statistics are computed. Then pointwise confidence intervals are computed, for the specified level of confidence, defaulting to 95 percent. The variance is estimated by equation (4.21) of Diggle (2003, page 52). This assumes that the point pattern \code{X} is stationary. For further details see Diggle (2003, pp 52--53). The estimate of the summary statistic from each block is computed as follows. For most functions \code{fun}, the estimate from block \code{B} is computed by finding the subset of \code{X} consisting of points that fall inside \code{B}, and applying \code{fun} to these points, by calling \code{fun(X[B])}. However if \code{fun} is the \eqn{K}-function \code{\link{Kest}}, or any function which has an argument called \code{domain}, the estimate for each block \code{B} is computed by calling \code{fun(X, domain=B)}. In the case of the \eqn{K}-function this means that the estimate from block \code{B} is computed by counting pairs of points in which the \emph{first} point lies in \code{B}, while the second point may lie anywhere. } \section{Errors}{ If the blocks are too small, there may be insufficient data in some blocks, and the function \code{fun} may report an error. If this happens, you need to take larger blocks. An error message about incompatibility may occur. The different function estimates may be incompatible in some cases, for example, because they use different default edge corrections (typically because the tiles of the tessellation are not the same kind of geometric object as the window of \code{X}, or because the default edge correction depends on the number of points). To prevent this, specify the choice of edge correction, in the \code{correction} argument to \code{fun}, if it has one. An alternative to \code{varblock} is Loh's mark bootstrap \code{\link{lohboot}}. } \value{ A function value table (object of class \code{"fv"}) that contains the result of \code{fun(X)} as well as the sample mean, sample variance and sample standard deviation of the block estimates, together with the upper and lower two-standard-deviation confidence limits. } \references{ Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. } \author{ \adrian and \rolf } \seealso{ \code{\link{tess}}, \code{\link{quadrats}} for basic manipulation. \code{\link{lohboot}} for an alternative bootstrap technique. } \examples{ v <- varblock(amacrine, Kest, nx=4, ny=2) v <- varblock(amacrine, Kcross, nx=4, ny=2) if(interactive()) plot(v, iso ~ r, shade=c("hiiso", "loiso")) } \keyword{nonparametric} \keyword{spatial} spatstat/man/clickjoin.Rd0000644000176200001440000000432513333543263015135 0ustar liggesusers\name{clickjoin} \alias{clickjoin} \title{ Interactively join vertices on a plot } \description{ Given a point pattern representing a set of vertices, this command gives a point-and-click interface allowing the user to join pairs of selected vertices by edges. } \usage{ clickjoin(X, \dots, add = TRUE, m = NULL, join = TRUE) } \arguments{ \item{X}{ Point pattern of vertices. An object of class \code{"ppp"}. } \item{\dots}{ Arguments passed to \code{\link{segments}} to control the plotting of the new edges. } \item{add}{ Logical. Whether the point pattern \code{X} should be added to the existing plot (\code{add=TRUE}) or a new plot should be created (\code{add=FALSE}). } \item{m}{ Optional. Logical matrix specifying an initial set of edges. There is an edge between vertices \code{i} and \code{j} if \code{m[i,j] = TRUE}. } \item{join}{ Optional. If \code{TRUE}, then each user click will join a pair of vertices. If \code{FALSE}, then each user click will delete an existing edge. This is only relevant if \code{m} is supplied. } } \details{ This function makes it easier for the user to create a linear network or a planar graph, given a set of vertices. The function first displays the point pattern \code{X}, then repeatedly prompts the user to click on a pair of points in \code{X}. Each selected pair of points will be joined by an edge. The function returns a logical matrix which has entries equal to \code{TRUE} for each pair of vertices joined by an edge. The selection of points is performed using \code{\link{identify.ppp}} which typically expects the user to click the left mouse button. This point-and-click interaction continues until the user terminates it, by pressing the middle mouse button, or pressing the right mouse button and selecting \code{stop}. The return value can be used in \code{\link{linnet}} to create a linear network. } \value{ Logical matrix \code{m} with value \code{m[i,j] = TRUE} for every pair of vertices \code{X[i]} and \code{X[j]} that should be joined by an edge. } \author{ \adrian. } \seealso{ \code{\link{linnet}}, \code{\link{clickppp}} } \keyword{spatial} \keyword{datagen} spatstat/man/simulate.kppm.Rd0000644000176200001440000000602013444120760015747 0ustar liggesusers\name{simulate.kppm} \alias{simulate.kppm} \title{Simulate a Fitted Cluster Point Process Model} \description{ Generates simulated realisations from a fitted cluster point process model. } \usage{ \method{simulate}{kppm}(object, nsim = 1, seed=NULL, ..., window=NULL, covariates=NULL, verbose=TRUE, retry=10, drop=FALSE) } \arguments{ \item{object}{ Fitted cluster point process model. An object of class \code{"kppm"}. } \item{nsim}{ Number of simulated realisations. } \item{seed}{ an object specifying whether and how to initialise the random number generator. Either \code{NULL} or an integer that will be used in a call to \code{\link[base:Random]{set.seed}} before simulating the point patterns. } \item{\dots}{ Additional arguments passed to the relevant random generator. See Details. } \item{window}{ Optional. Window (object of class \code{"owin"}) in which the model should be simulated. } \item{covariates}{ Optional. A named list containing new values for the covariates in the model. } \item{verbose}{ Logical. Whether to print progress reports (when \code{nsim > 1}). } \item{retry}{ Number of times to repeat the simulation if it fails (e.g. because of insufficient memory). } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE}, the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"kppm"} of fitted cluster point process models. Simulations are performed by \code{\link{rThomas}}, \code{\link{rMatClust}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}} or \code{\link{rLGCP}} depending on the model. Additional arguments \code{\dots} are passed to the relevant function performing the simulation. For example the argument \code{saveLambda} is recognised by all of the simulation functions. The return value is a list of point patterns. It also carries an attribute \code{"seed"} that captures the initial state of the random number generator. This follows the convention used in \code{simulate.lm} (see \code{\link[stats]{simulate}}). It can be used to force a sequence of simulations to be repeated exactly, as shown in the examples for \code{\link[stats]{simulate}}. } \value{ A list of length \code{nsim} containing simulated point patterns (objects of class \code{"ppp"}). The return value also carries an attribute \code{"seed"} that captures the initial state of the random number generator. See Details. } \examples{ fit <- kppm(redwood ~1, "Thomas") simulate(fit, 2) fitx <- kppm(redwood ~x, "Thomas") simulate(fitx, 2) } \seealso{ \code{\link{kppm}}, \code{\link{rThomas}}, \code{\link{rMatClust}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}}, \code{\link{rLGCP}}, \code{\link{simulate.ppm}}, \code{\link[stats]{simulate}} } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat/man/Gmulti.Rd0000644000176200001440000001722713333543262014435 0ustar liggesusers\name{Gmulti} \alias{Gmulti} \title{ Marked Nearest Neighbour Distance Function } \description{ For a marked point pattern, estimate the distribution of the distance from a typical point in subset \code{I} to the nearest point of subset \eqn{J}. } \usage{ Gmulti(X, I, J, r=NULL, breaks=NULL, \dots, disjoint=NULL, correction=c("rs", "km", "han")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype distance distribution function \eqn{G_{IJ}(r)}{GIJ(r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset of points of \code{X} from which distances are measured. } \item{J}{Subset of points in \code{X} to which distances are measured. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{G_{IJ}(r)}{GIJ(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{disjoint}{Optional flag indicating whether the subsets \code{I} and \code{J} are disjoint. If missing, this value will be computed by inspecting the vectors \code{I} and \code{J}. } \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{G_{IJ}(r)}{GIJ(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{G_{IJ}(r)}{GIJ(r)} } \item{han}{the Hanisch-style estimator of \eqn{G_{IJ}(r)}{GIJ(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{G_{IJ}(r)}{GIJ(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{G_{IJ}(r)}{GIJ(r)} by the spatial Kaplan-Meier method } \item{raw}{the uncorrected estimate of \eqn{G_{IJ}(r)}{GIJ(r)}, i.e. the empirical distribution of the distances from each point of type \eqn{i} to the nearest point of type \eqn{j} } \item{theo}{the theoretical value of \eqn{G_{IJ}(r)}{GIJ(r)} for a marked Poisson process with the same estimated intensity } } \details{ The function \code{Gmulti} generalises \code{\link{Gest}} (for unmarked point patterns) and \code{\link{Gdot}} and \code{\link{Gcross}} (for multitype point patterns) to arbitrary marked point patterns. Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are subsets, possibly overlapping, of a marked point process. This function computes an estimate of the cumulative distribution function \eqn{G_{IJ}(r)}{GIJ(r)} of the distance from a typical point of \eqn{X_I}{X[I]} to the nearest distinct point of \eqn{X_J}{X[J]}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. The arguments \code{I} and \code{J} specify two subsets of the point pattern. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. This algorithm estimates the distribution function \eqn{G_{IJ}(r)}{GIJ(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Gest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G_{IJ}(r)}{GIJ(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The reduced-sample and Kaplan-Meier estimators are computed from histogram counts. In the case of the Kaplan-Meier estimator this introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{G_{IJ}(r)}{GIJ(r)}. This estimate should be used with caution as \eqn{G_{IJ}(r)}{GIJ(r)} is not necessarily differentiable. The naive empirical distribution of distances from each point of the pattern \code{X} to the nearest other point of the pattern, is a biased estimate of \eqn{G_{IJ}}{GIJ}. However this is also returned by the algorithm, as it is sometimes useful in other contexts. Care should be taken not to use the uncorrected empirical \eqn{G_{IJ}}{GIJ} as if it were an unbiased estimator of \eqn{G_{IJ}}{GIJ}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The function \eqn{G_{IJ}}{GIJ} does not necessarily have a density. The reduced sample estimator of \eqn{G_{IJ}}{GIJ} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{G_{IJ}}{GIJ} is always nondecreasing but its maximum value may be less than \eqn{1}. } \seealso{ \code{\link{Gcross}}, \code{\link{Gdot}}, \code{\link{Gest}} } \examples{ trees <- longleaf # Longleaf Pine data: marks represent diameter \testonly{ trees <- trees[seq(1, npoints(trees), by=50), ] } Gm <- Gmulti(trees, marks(trees) <= 15, marks(trees) >= 25) plot(Gm) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/rescale.Rd0000644000176200001440000000510213333543264014601 0ustar liggesusers\name{rescale} \alias{rescale} \title{Convert dataset to another unit of length} \description{ Converts between different units of length in a spatial dataset, such as a point pattern or a window. } \usage{ rescale(X, s, unitname) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), or a window (object of class \code{"owin"}).} \item{s}{Conversion factor: the new units are \code{s} times the old units.} \item{unitname}{ Optional. New name for the unit of length. See \code{\link{unitname}}. } } \value{ Another object of the same type, representing the same data, but expressed in the new units. } \details{ This is generic. Methods are provided for many spatial objects. The spatial coordinates in the dataset \code{X} will be re-expressed in terms of a new unit of length that is \code{s} times the current unit of length given in \code{X}. The name of the unit of length will also be adjusted. The result is an object of the same type, representing the same data, but expressed in the new units. For example if \code{X} is a dataset giving coordinates in metres, then \code{rescale(X,1000)} will take the new unit of length to be 1000 metres. To do this, it will divide the old coordinate values by 1000 to obtain coordinates expressed in kilometres, and change the name of the unit of length from \code{"metres"} to \code{"1000 metres"}. If \code{unitname} is given, it will be taken as the new name of the unit of length. It should be a valid name for the unit of length, as described in the help for \code{\link{unitname}}. For example if \code{X} is a dataset giving coordinates in metres, \code{rescale(X, 1000, "km")} will divide the coordinate values by 1000 to obtain coordinates in kilometres, and the unit name will be changed to \code{"km"}. } \section{Note}{ The result of this operation is equivalent to the original dataset. If you want to actually change the coordinates by a linear transformation, producing a dataset that is not equivalent to the original one, use \code{\link{affine}}. } \seealso{ Available methods: \code{\link{rescale.im}}, \code{\link{rescale.layered}}, \code{\link{rescale.linnet}}, \code{\link{rescale.lpp}}, \code{\link{rescale.owin}}, \code{\link{rescale.ppp}}, \code{\link{rescale.psp}} and \code{\link{rescale.unitname}}. Other generics: \code{\link{unitname}}, \code{\link{affine}}, \code{\link{rotate}}, \code{\link{shift}}. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat/man/intersect.owin.Rd0000644000176200001440000000613413333543263016143 0ustar liggesusers\name{intersect.owin} \alias{intersect.owin} \alias{union.owin} \alias{setminus.owin} \title{Intersection, Union or Set Subtraction of Windows} \description{ Yields the intersection, union or set subtraction of windows. } \usage{ intersect.owin(\dots, fatal=FALSE, p) union.owin(\dots, p) setminus.owin(A, B, \dots, p) } \arguments{ \item{A,B}{Windows (objects of class \code{"owin"}).} \item{\dots}{ Windows, or arguments passed to \code{\link{as.mask}} to control the discretisation. } \item{fatal}{Logical. Determines what happens if the intersection is empty. } \item{p}{ Optional list of parameters passed to \code{\link[polyclip]{polyclip}} to control the accuracy of polygon geometry. } } \value{ A window (object of class \code{"owin"}) or possibly \code{NULL}. } \details{ The function \code{intersect.owin} computes the intersection between the windows given in \code{\dots}, while \code{union.owin} computes their union. The function \code{setminus.owin} computes the intersection of \code{A} with the complement of \code{B}. For \code{intersect.owin} and \code{union.owin}, the arguments \code{\dots} must be either \itemize{ \item window objects of class \code{"owin"}, \item data that can be coerced to this class by \code{\link{as.owin}}), \item lists of windows, of class \code{"solist"}, \item named arguments of \code{\link{as.mask}} to control the discretisation if required. } For \code{setminus.owin}, the arguments \code{\dots} must be named arguments of \code{\link{as.mask}}. If the intersection is empty, then if \code{fatal=FALSE} the result is an empty window or \code{NULL}, while if \code{fatal=TRUE} an error occurs. } \author{ \spatstatAuthors. } \seealso{ \code{\link{is.subset.owin}}, \code{\link{overlap.owin}}, \code{\link{is.empty}}, \code{\link{boundingbox}}, \code{\link{owin.object}} } \examples{ # rectangles u <- unit.square() v <- owin(c(0.5,3.5), c(0.4,2.5)) # polygon data(letterR) # mask m <- as.mask(letterR) # two rectangles intersect.owin(u, v) union.owin(u,v) setminus.owin(u,v) # polygon and rectangle intersect.owin(letterR, v) union.owin(letterR,v) setminus.owin(letterR,v) # mask and rectangle intersect.owin(m, v) union.owin(m,v) setminus.owin(m,v) # mask and polygon p <- rotate(v, 0.2) intersect.owin(m, p) union.owin(m,p) setminus.owin(m,p) # two polygons A <- letterR B <- rotate(letterR, 0.2) plot(boundingbox(A,B), main="intersection") w <- intersect.owin(A, B) plot(w, add=TRUE, col="lightblue") plot(A, add=TRUE) plot(B, add=TRUE) plot(boundingbox(A,B), main="union") w <- union.owin(A,B) plot(w, add=TRUE, col="lightblue") plot(A, add=TRUE) plot(B, add=TRUE) plot(boundingbox(A,B), main="set minus") w <- setminus.owin(A,B) plot(w, add=TRUE, col="lightblue") plot(A, add=TRUE) plot(B, add=TRUE) # intersection and union of three windows C <- shift(B, c(0.2, 0.3)) plot(union.owin(A,B,C)) plot(intersect.owin(A,B,C)) } \keyword{spatial} \keyword{math} spatstat/man/plot.leverage.ppm.Rd0000644000176200001440000001025013333543264016525 0ustar liggesusers\name{plot.leverage.ppm} \alias{plot.leverage.ppm} \alias{contour.leverage.ppm} \alias{persp.leverage.ppm} \title{ Plot Leverage Function } \description{ Generate a pixel image plot, or a contour plot, or a perspective plot, of a leverage function that has been computed by \code{\link{leverage.ppm}}. } \usage{ \method{plot}{leverage.ppm}(x, \dots, what=c("smooth", "nearest", "exact"), showcut=TRUE, args.cut=list(drawlabels=FALSE), multiplot=TRUE) \method{contour}{leverage.ppm}(x, \dots, what=c("smooth", "nearest"), showcut=TRUE, args.cut=list(col=3, lwd=3, drawlabels=FALSE), multiplot=TRUE) \method{persp}{leverage.ppm}(x, \dots, what=c("smooth", "nearest"), main, zlab="leverage") } \arguments{ \item{x}{ Leverage function (object of class \code{"leverage.ppm"}) computed by \code{\link{leverage.ppm}}. } \item{\dots}{ Arguments passed to \code{\link{plot.im}} or \code{\link{contour.im}} or \code{\link{persp.im}} controlling the plot. } \item{what}{ Character string (partially matched) specifying the values to be plotted. See Details. } \item{showcut}{ Logical. If \code{TRUE}, a contour line is plotted at the level equal to the theoretical mean of the leverage. } \item{args.cut}{ Optional list of arguments passed to \code{\link[graphics]{contour.default}} to control the plotting of the contour line for the mean leverage. } \item{multiplot}{ Logical value indicating whether it is permissible to display several plot panels. } \item{main}{ Optional main title. A character string or character vector. } \item{zlab}{ Label for the \eqn{z} axis. A character string. } } \details{ These functions are the \code{plot}, \code{contour} and \code{persp} methods for objects of class \code{"leverage.ppm"}. Such objects are computed by the command \code{\link{leverage.ppm}}. The \code{plot} method displays the leverage function as a colour pixel image using \code{\link{plot.im}}, and draws a single contour line at the mean leverage value using \code{\link{contour.default}}. Use the argument \code{clipwin} to restrict the plot to a subset of the full data. The \code{contour} method displays the leverage function as a contour plot, and also draws a single contour line at the mean leverage value, using \code{\link{contour.im}}. The \code{persp} method displays the leverage function as a surface in perspective view, using \code{\link{persp.im}}. Since the exact values of leverage are computed only at a finite set of quadrature locations, there are several options for these plots: \describe{ \item{\code{what="smooth"}:}{ (the default) an image plot showing a smooth function, obtained by applying kernel smoothing to the exact leverage values; } \item{\code{what="nearest"}:}{ an image plot showing a piecewise-constant function, obtained by taking the exact leverage value at the nearest quadrature point; } \item{\code{what="exact"}:}{ a symbol plot showing the exact values of leverage as circles, centred at the quadrature points, with diameters proportional to leverage. } } The pixel images are already contained in the object \code{x} and were computed by \code{\link{leverage.ppm}}; the resolution of these images is controlled by arguments to \code{\link{leverage.ppm}}. } \value{ Same as for \code{\link{plot.im}}, \code{\link{contour.im}} and \code{\link{persp.im}} respectively. } \references{ Baddeley, A., Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. } \author{ \spatstatAuthors. } \seealso{ \code{\link{leverage.ppm}}. } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) lef <- leverage(fit) plot(lef) contour(lef) persp(lef) } \keyword{spatial} \keyword{models} spatstat/man/plot.laslett.Rd0000644000176200001440000000324213333543264015613 0ustar liggesusers\name{plot.laslett} \alias{plot.laslett} \title{ Plot Laslett Transform } \description{ Plot the result of Laslett's Transform. } \usage{ \method{plot}{laslett}(x, \dots, Xpars = list(box = TRUE, col = "grey"), pointpars = list(pch = 3, cols = "blue"), rectpars = list(lty = 3, border = "green")) } \arguments{ \item{x}{ Object of class \code{"laslett"} produced by \code{\link{laslett}} representing the result of Laslett's transform. } \item{\dots}{ Additional plot arguments passed to \code{\link{plot.solist}}. } \item{Xpars}{ A list of plot arguments passed to \code{\link{plot.owin}} or \code{\link{plot.im}} to display the original region \code{X} before transformation. } \item{pointpars}{ A list of plot arguments passed to \code{\link{plot.ppp}} to display the tangent points. } \item{rectpars}{ A list of plot arguments passed to \code{\link{plot.owin}} to display the maximal rectangle. } } \details{ This is the \code{plot} method for the class \code{"laslett"}. The function \code{\link{laslett}} applies Laslett's Transform to a spatial region \code{X} and returns an object of class \code{"laslett"} representing the result of the transformation. The result is plotted by this method. The plot function \code{\link{plot.solist}} is used to align the before-and-after pictures. See \code{\link{plot.solist}} for further options to control the plot. } \value{ None. } \author{ Kassel Hingee and \adrian. } \seealso{ \code{\link{laslett}} } \examples{ b <- laslett(heather$coarse, plotit=FALSE) plot(b, main="Heather Data") } \keyword{spatial} \keyword{hplot} spatstat/man/Fiksel.Rd0000644000176200001440000000743213333543262014406 0ustar liggesusers\name{Fiksel} \alias{Fiksel} \title{The Fiksel Interaction} \description{ Creates an instance of Fiksel's double exponential pairwise interaction point process model, which can then be fitted to point pattern data. } \usage{ Fiksel(r, hc=NA, kappa) } \arguments{ \item{r}{The interaction radius of the Fiksel model} \item{hc}{The hard core distance} \item{kappa}{The rate parameter} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Fiksel process with interaction radius \eqn{r}, hard core distance \code{hc} and rate parameter \code{kappa}. } \details{ Fiksel (1984) introduced a pairwise interaction point process with the following interaction function \eqn{c}. For two points \eqn{u} and \eqn{v} separated by a distance \eqn{d=||u-v||}, the interaction \eqn{c(u,v)} is equal to \eqn{0} if \eqn{d < h}, equal to \eqn{1} if \eqn{d > r}, and equal to \deqn{ \exp(a \exp(-\kappa d))}{exp(a * exp(-kappa * d))} if \eqn{h \le d \le r}{h <= d <= r}, where \eqn{h,r,\kappa,a}{h,r,kappa,a} are parameters. A graph of this interaction function is shown in the Examples. The interpretation of the parameters is as follows. \itemize{ \item \eqn{h} is the hard core distance: distinct points are not permitted to come closer than a distance \eqn{h} apart. \item \eqn{r} is the interaction range: points further than this distance do not interact. \item \eqn{\kappa}{kappa} is the rate or slope parameter, controlling the decay of the interaction as distance increases. \item \eqn{a} is the interaction strength parameter, controlling the strength and type of interaction. If \eqn{a} is zero, the process is Poisson. If \code{a} is positive, the process is clustered. If \code{a} is negative, the process is inhibited (regular). } The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Fiksel pairwise interaction is yielded by the function \code{Fiksel()}. See the examples below. The parameters \eqn{h}, \eqn{r} and \eqn{\kappa}{kappa} must be fixed and given in the call to \code{Fiksel}, while the canonical parameter \eqn{a} is estimated by \code{\link{ppm}()}. To estimate \eqn{h}, \eqn{r} and\eqn{\kappa}{kappa} it is possible to use \code{\link{profilepl}}. The maximum likelihood estimator of\eqn{h} is the minimum interpoint distance. If the hard core distance argument \code{hc} is missing or \code{NA}, it will be estimated from the data when \code{\link{ppm}} is called. The estimated value of \code{hc} is the minimum nearest neighbour distance multiplied by \eqn{n/(n+1)}, where \eqn{n} is the number of data points. See also Stoyan, Kendall and Mecke (1987) page 161. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{StraussHard}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Fiksel, T. (1984) Estimation of parameterized pair potentials of marked and non-marked Gibbsian point processes. \emph{Electronische Informationsverabeitung und Kybernetika} \bold{20}, 270--278. Stoyan, D, Kendall, W.S. and Mecke, J. (1987) \emph{Stochastic geometry and its applications}. Wiley. } \examples{ Fiksel(r=1,hc=0.02, kappa=2) # prints a sensible description of itself data(spruces) X <- unmark(spruces) fit <- ppm(X ~ 1, Fiksel(r=3.5, kappa=1)) plot(fitin(fit)) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/Hardcore.Rd0000644000176200001440000000522313333543262014714 0ustar liggesusers\name{Hardcore} \alias{Hardcore} \title{The Hard Core Point Process Model} \description{ Creates an instance of the hard core point process model which can then be fitted to point pattern data. } \usage{ Hardcore(hc=NA) } \arguments{ \item{hc}{The hard core distance} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the hard core process with hard core distance \code{hc}. } \details{ A hard core process with hard core distance \eqn{h} and abundance parameter \eqn{\beta}{beta} is a pairwise interaction point process in which distinct points are not allowed to come closer than a distance \eqn{h} apart. The probability density is zero if any pair of points is closer than \eqn{h} units apart, and otherwise equals \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, and \eqn{\alpha}{alpha} is the normalising constant. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the hard core process pairwise interaction is yielded by the function \code{Hardcore()}. See the examples below. If the hard core distance argument \code{hc} is missing or \code{NA}, it will be estimated from the data when \code{\link{ppm}} is called. The estimated value of \code{hc} is the minimum nearest neighbour distance multiplied by \eqn{n/(n+1)}, where \eqn{n} is the number of data points. } \seealso{ \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{MultiHard}}, \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. } \examples{ Hardcore(0.02) # prints a sensible description of itself \dontrun{ ppm(cells, ~1, Hardcore(0.05)) # fit the stationary hard core process to `cells' } # estimate hard core radius from data ppm(cells, ~1, Hardcore()) ppm(cells, ~1, Hardcore) ppm(cells, ~ polynom(x,y,3), Hardcore(0.05)) # fit a nonstationary hard core process # with log-cubic polynomial trend } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/persp.im.Rd0000644000176200001440000001357313377123425014734 0ustar liggesusers\name{persp.im} \alias{persp.im} \title{Perspective Plot of Pixel Image} \description{ Displays a perspective plot of a pixel image. } \usage{ \method{persp}{im}(x, \dots, colmap=NULL, colin=x, apron=FALSE, visible=FALSE) } \arguments{ \item{x}{ The pixel image to be plotted as a surface. An object of class \code{"im"} (see \code{\link{im.object}}). } \item{\dots}{ Extra arguments passed to \code{\link{persp.default}} to control the display. } \item{colmap}{ Optional data controlling the colour map. See Details. } \item{colin}{ Optional. Colour input. Another pixel image (of the same dimensions as \code{x}) containing the values that will be mapped to colours. } \item{apron}{ Logical. If \code{TRUE}, a grey apron is placed around the sides of the perspective plot. } \item{visible}{ Logical value indicating whether to compute which pixels of \code{x} are visible in the perspective view. See Details. } } \value{ (invisibly) the 3D transformation matrix returned by \code{\link{persp.default}}, together with an attribute \code{"expand"} which gives the relative scale of the \eqn{z} coordinate. If argument \code{visible=TRUE} was given, the return value also has an attribute \code{"visible"} which is a pixel image, compatible with \code{x}, with logical values which are \emph{TRUE} when the corresponding pixel is visible in the perspective view, and \code{FALSE} when it is obscured. } \details{ This is the \code{persp} method for the class \code{"im"}. The pixel image \code{x} must have real or integer values. These values are treated as heights of a surface, and the surface is displayed as a perspective plot on the current plot device, using equal scales on the \code{x} and \code{y} axes. The optional argument \code{colmap} gives an easy way to display different altitudes in different colours (if this is what you want). \itemize{ \item If \code{colmap} is a colour map (object of class \code{"colourmap"}, created by the function \code{\link{colourmap}}) then this colour map will be used to associate altitudes with colours. \item If \code{colmap} is a character vector, then the range of altitudes in the perspective plot will be divided into \code{length(colmap)} intervals, and those parts of the surface which lie in a particular altitude range will be assigned the corresponding colour from \code{colmap}. \item If \code{colmap} is a function in the \R language of the form \code{function(n, ...)}, this function will be called with an appropriate value of \code{n} to generate a character vector of \code{n} colours. Examples of such functions are \code{\link[grDevices]{heat.colors}}, \code{\link[grDevices]{terrain.colors}}, \code{\link[grDevices]{topo.colors}} and \code{\link[grDevices]{cm.colors}}. \item If \code{colmap} is a function in the \R language of the form \code{function(range, ...)} then it will be called with \code{range} equal to the range of altitudes, to determine the colour values or colour map. Examples of such functions are \code{\link{beachcolours}} and \code{\link{beachcolourmap}}. \item If \code{colmap} is a list with entries \code{breaks} and \code{col}, then \code{colmap$breaks} determines the breakpoints of the altitude intervals, and \code{colmap$col} provides the corresponding colours. } Alternatively, if the argument \code{colin} (\emph{colour input}) is present, then the colour map \code{colmap} will be applied to the pixel values of \code{colin} instead of the pixel values of \code{x}. The result is a perspective view of a surface with heights determined by \code{x} and colours determined by \code{colin} (mapped by \code{colmap}). If \code{apron=TRUE}, vertical surface is drawn around the boundary of the perspective plot, so that the terrain appears to have been cut out of a solid material. If colour data were supplied, then the apron is coloured light grey. Graphical parameters controlling the perspective plot are passed through the \code{...} arguments directly to the function \code{\link{persp.default}}. See the examples in \code{\link{persp.default}} or in \code{demo(persp)}. The vertical scale is controlled by the argument \code{expand}: setting \code{expand=1} will interpret the pixel values as being in the same units as the spatial coordinates \eqn{x} and \eqn{y} and represent them at the same scale. If \code{visible=TRUE}, the algorithm also computes whether each pixel in \code{x} is visible in the perspective view. In order to be visible, a pixel must not be obscured by another pixel which lies in front of it (as seen from the viewing direction), and the three-dimensional vector normal to the surface must be pointing toward the viewer. The return value of \code{persp.im} then has an attribute \code{"visible"} which is a pixel image, compatible with \code{x}, with pixel value equal to \code{TRUE} if the corresponding pixel in \code{x} is visible, and \code{FALSE} if it is not visible. } \seealso{ \code{\link{perspPoints}}, \code{\link{perspLines}} for drawing additional points or lines \emph{on the surface}. \code{\link[grDevices]{trans3d}} for mapping arbitrary \eqn{(x,y,z)} coordinate locations to the plotting coordinates. \code{\link{im.object}}, \code{\link{plot.im}}, \code{\link{contour.im}} } \examples{ # an image Z <- setcov(owin()) persp(Z, colmap=terrain.colors(128)) co <- colourmap(range=c(0,1), col=rainbow(128)) persp(Z, colmap=co, axes=FALSE, shade=0.3) ## Terrain elevation persp(bei.extra$elev, colmap=terrain.colors(128), apron=TRUE, theta=-30, phi=20, zlab="Elevation", main="", ticktype="detailed", expand=6) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{hplot} spatstat/man/uniquemap.default.Rd0000644000176200001440000000347113556254467016634 0ustar liggesusers\name{uniquemap.default} \alias{uniquemap.default} \alias{uniquemap.data.frame} \alias{uniquemap.matrix} \title{ Map Duplicate Entries to Unique Entries } \description{ Determine whether entries in a vector (or rows in a matrix or data frame) are duplicated, choose a unique representative for each set of duplicates, and map the duplicates to the unique representative. } \usage{ \method{uniquemap}{default}(x) \method{uniquemap}{data.frame}(x) \method{uniquemap}{matrix}(x) } \arguments{ \item{x}{ A vector, data frame or matrix, or another type of data. } } \details{ The function \code{\link{uniquemap}} is generic, with methods for point patterns, data frames, and a default method. The default method expects a vector. It determines whether any entries of the vector \code{x} are duplicated, and constructs a mapping of the indices of \code{x} so that all duplicates are mapped to a unique representative index. The result is an integer vector \code{u} such that \code{u[j] = i} if the entries \code{x[i]} and \code{x[j]} are identical and point \code{i} has been chosen as the unique representative. The entry \code{u[i] = i} means either that point \code{i} is unique, or that it has been chosen as the unique representative of its equivalence class. The method for \code{data.frame} determines whether any rows of the data frame \code{x} are duplicated, and constructs a mapping of the row indices so that all duplicate rows are mapped to a unique representative row. } \value{ An integer vector. } \author{ \spatstatAuthors. } \seealso{ \code{\link{uniquemap}}, \code{\link{uniquemap.ppp}} } \examples{ x <- c(3, 5, 2, 4, 2, 3) uniquemap(x) df <- data.frame(A=x, B=42) uniquemap(df) z <- cbind(x, 10-x) uniquemap(z) } \keyword{spatial} \keyword{methods} spatstat/man/pixellate.owin.Rd0000644000176200001440000000451013333543264016127 0ustar liggesusers\name{pixellate.owin} \Rdversion{1.1} \alias{pixellate.owin} \title{ Convert Window to Pixel Image } \description{ Convert a window to a pixel image by measuring the area of intersection between the window and each pixel in a raster. } \usage{ \method{pixellate}{owin}(x, W = NULL, ..., DivideByPixelArea=FALSE) } \arguments{ \item{x}{ Window (object of class \code{"owin"}) to be converted. } \item{W}{ Optional. Window determining the pixel raster on which the conversion should occur. } \item{\dots}{ Optional. Extra arguments passed to \code{\link{as.mask}} to determine the pixel raster. } \item{DivideByPixelArea}{ Logical value, indicating whether the resulting pixel values should be divided by the pixel area. } } \details{ This is a method for the generic function \code{pixellate}. It converts a window \code{x} into a pixel image, by measuring the \emph{amount} of \code{x} that is inside each pixel. (The related function \code{\link{as.im}} also converts \code{x} into a pixel image, but records only the presence or absence of \code{x} in each pixel.) The pixel raster for the conversion is determined by the argument \code{W} and the extra arguments \code{\dots}. \itemize{ \item If \code{W} is given, and it is a binary mask (a window of type \code{"mask"}) then it determines the pixel raster. \item If \code{W} is given, but it is not a binary mask (it is a window of another type) then it will be converted to a binary mask using \code{as.mask(W, \dots)}. \item If \code{W} is not given, it defaults to \code{as.mask(as.rectangle(x), \dots)} } In the second and third cases it would be common to use the argument \code{dimyx} to control the number of pixels. See the Examples. The algorithm then computes the area of intersection of each pixel with the window. The result is a pixel image with pixel entries equal to these intersection areas. } \value{ A pixel image (object of class \code{"im"}). } \seealso{ \code{\link{pixellate.ppp}}, \code{\link{pixellate}}, \code{\link{as.im}} } \examples{ data(letterR) plot(pixellate(letterR, dimyx=15)) W <- grow.rectangle(as.rectangle(letterR), 0.2) plot(pixellate(letterR, W, dimyx=15)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/solist.Rd0000644000176200001440000000641613333543264014511 0ustar liggesusers\name{solist} \alias{solist} \title{ List of Two-Dimensional Spatial Objects } \description{ Make a list of two-dimensional spatial objects. } \usage{ solist(\dots, check=TRUE, promote=TRUE, demote=FALSE, .NameBase) } \arguments{ \item{\dots}{ Any number of objects, each representing a two-dimensional spatial dataset. } \item{check}{ Logical value. If \code{TRUE}, check that each of the objects is a 2D spatial object. } \item{promote}{ Logical value. If \code{TRUE}, test whether all objects belong to the \emph{same} class, and if so, promote the list of objects to the appropriate class of list. } \item{demote}{ Logical value determining what should happen if any of the objects is not a 2D spatial object: if \code{demote=FALSE} (the default), a fatal error occurs; if \code{demote=TRUE}, a list of class \code{"anylist"} is returned. } \item{.NameBase}{ Optional. Character string. If the \code{\dots} arguments have no names, then the entries of the resulting list will be given names that start with \code{.NameBase}. } } \details{ This command creates an object of class \code{"solist"} (spatial object list) which represents a list of two-dimensional spatial datasets. The datasets do not necessarily belong to the same class. Typically the intention is that the datasets in the list should be treated in the same way, for example, they should be plotted side-by-side. The \pkg{spatstat} package provides a plotting function, \code{\link{plot.solist}}, and many other functions for this class. In the \pkg{spatstat} package, various functions produce an object of class \code{"solist"}. For example, when a point pattern is split into several point patterns by \code{\link{split.ppp}}, or an image is split into several images by \code{\link{split.im}}, the result is of class \code{"solist"}. If \code{check=TRUE} then the code will check whether all objects in \code{\dots} belong to the classes of two-dimensional spatial objects defined in the \pkg{spatstat} package. They do not have to belong to the \emph{same} class. Set \code{check=FALSE} for efficiency, but only if you are sure that all the objects are valid. If some of the objects in \code{\dots} are not two-dimensional spatial objects, the action taken depends on the argument \code{demote}. If \code{demote=TRUE}, the result will belong to the more general class \code{"anylist"} instead of \code{"solist"}. If \code{demote=FALSE} (the default), an error occurs. If \code{promote=TRUE} then the code will check whether all the objects \code{\dots} belong to the same class. If they are all point patterns (class \code{"ppp"}), the result will also belong to the class \code{"ppplist"}. If they are all pixel images (class \code{"im"}), the result will also belong to the class \code{"imlist"}. Use \code{\link{as.solist}} to convert a list to a \code{"solist"}. } \value{ A list, usually belonging to the class \code{"solist"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{as.solist}}, \code{\link{anylist}}, \code{\link{solapply}} } \examples{ solist(cells, density(cells)) solist(cells, japanesepines, redwood, .NameBase="Pattern") } \keyword{spatial} \keyword{list} \keyword{manip} spatstat/man/plot.rppm.Rd0000644000176200001440000000425413333543264015125 0ustar liggesusers\name{plot.rppm} \alias{plot.rppm} \title{ Plot a Recursively Partitioned Point Process Model } \description{ Given a model which has been fitted to point pattern data by recursive partitioning, plot the partition tree or the fitted intensity. } \usage{ \method{plot}{rppm}(x, \dots, what = c("tree", "spatial"), treeplot=NULL) } \arguments{ \item{x}{ Fitted point process model of class \code{"rppm"} produced by the function \code{\link{rppm}}. } \item{what}{ Character string (partially matched) specifying whether to plot the partition tree or the fitted intensity. } \item{\dots}{ Arguments passed to \code{\link[rpart]{plot.rpart}} and \code{\link[rpart]{text.rpart}} (if \code{what="tree"}) or passed to \code{\link{plot.im}} (if \code{what="spatial"}) controlling the appearance of the plot. } \item{treeplot}{ Optional. A function to be used to plot and label the partition tree, replacing the two functions \code{\link[rpart]{plot.rpart}} and \code{\link[rpart]{text.rpart}}. } } \details{ If \code{what="tree"} (the default), the partition tree will be plotted using \code{\link[rpart]{plot.rpart}}, and labelled using \code{\link[rpart]{text.rpart}}. If the argument \code{treeplot} is given, then plotting and labelling will be performed by \code{treeplot} instead. A good choice is the function \code{prp} in package \pkg{rpart.plot}. If \code{what="spatial"}, the predicted intensity will be computed using \code{\link{predict.rppm}}, and this intensity will be plotted as an image using \code{\link{plot.im}}. } \value{ If \code{what="tree"}, a list containing \code{x} and \code{y} coordinates of the plotted nodes of the tree. If \code{what="spatial"}, the return value of \code{\link{plot.im}}. } \author{ \spatstatAuthors } \seealso{ \code{\link{rppm}} } \examples{ # Murchison gold data mur <- solapply(murchison, rescale, s=1000, unitname="km") mur$dfault <- distfun(mur$faults) # fit <- rppm(gold ~ dfault + greenstone, data=mur) # opa <- par(mfrow=c(1,2)) plot(fit) plot(fit, what="spatial") par(opa) } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat/man/MultiHard.Rd0000644000176200001440000000555013333543262015061 0ustar liggesusers\name{MultiHard} \alias{MultiHard} \title{The Multitype Hard Core Point Process Model} \description{ Creates an instance of the multitype hard core point process model which can then be fitted to point pattern data. } \usage{ MultiHard(hradii, types=NULL) } \arguments{ \item{hradii}{Matrix of hard core radii} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the multitype hard core process with hard core radii \eqn{hradii[i,j]}. } \details{ This is a multitype version of the hard core process. A pair of points of types \eqn{i} and \eqn{j} must not lie closer than \eqn{h_{ij}}{h[i,j]} units apart. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the MultiStrauss interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{hradii}. The matrix \code{hradii} must be symmetric, with entries which are either positive numbers or \code{NA}. A value of \code{NA} indicates that no distance constraint should be applied for this combination of types. Note that only the hardcore radii are specified in \code{MultiHard}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} are estimated by \code{\link{ppm}()}, not fixed in \code{MultiHard()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{Strauss}}. See \code{\link{ragsMultiHard}} and \code{\link{rmh}} for simulation. } \examples{ h <- matrix(c(1,2,2,1), nrow=2,ncol=2) # prints a sensible description of itself MultiHard(h) # Fit the stationary multitype hardcore process to `amacrine' # with hard core operating only between cells of the same type. h <- 0.02 * matrix(c(1, NA, NA, 1), nrow=2,ncol=2) ppm(amacrine ~1, MultiHard(h)) } \section{Warnings}{ In order that \code{\link{ppm}} can fit the multitype hard core model correctly to a point pattern \code{X}, this pattern must be marked, with \code{markformat} equal to \code{vector} and the mark vector \code{marks(X)} must be a factor. If the argument \code{types} is specified it is interpreted as a set of factor levels and this set must equal \code{levels(marks(X))}. } \section{Changed Syntax}{ Before \pkg{spatstat} version \code{1.37-0}, the syntax of this function was different: \code{MultiHard(types=NULL, hradii)}. The new code attempts to handle the old syntax as well. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat/man/intensity.Rd0000644000176200001440000000247113333543263015216 0ustar liggesusers\name{intensity} \alias{intensity} \title{ Intensity of a Dataset or a Model } \description{ Generic function for computing the intensity of a spatial dataset or spatial point process model. } \usage{ intensity(X, ...) } \arguments{ \item{X}{ A spatial dataset or a spatial point process model. } \item{\dots}{ Further arguments depending on the class of \code{X}. } } \details{ This is a generic function for computing the intensity of a spatial dataset or spatial point process model. There are methods for point patterns (objects of class \code{"ppp"}) and fitted point process models (objects of class \code{"ppm"}). The empirical intensity of a dataset is the average density (the average amount of \sQuote{stuff} per unit area or volume). The empirical intensity of a point pattern is computed by the method \code{\link{intensity.ppp}}. The theoretical intensity of a stochastic model is the expected density (expected amount of \sQuote{stuff} per unit area or volume). The theoretical intensity of a fitted point process model is computed by the method \code{\link{intensity.ppm}}. } \value{ Usually a numeric value or vector. } \seealso{ \code{\link{intensity.ppp}}, \code{\link{intensity.ppm}}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/rpoispp.Rd0000644000176200001440000001373613333543264014673 0ustar liggesusers\name{rpoispp} \alias{rpoispp} \title{Generate Poisson Point Pattern} \description{ Generate a random point pattern using the (homogeneous or inhomogeneous) Poisson process. Includes CSR (complete spatial randomness). } \usage{ rpoispp(lambda, lmax=NULL, win=owin(), \dots, nsim=1, drop=TRUE, ex=NULL, warnwin=TRUE) } \arguments{ \item{lambda}{ Intensity of the Poisson process. Either a single positive number, a \code{function(x,y, \dots)}, or a pixel image. } \item{lmax}{ Optional. An upper bound for the value of \code{lambda(x,y)}, if \code{lambda} is a function. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. Ignored if \code{lambda} is a pixel image. } \item{\dots}{ Arguments passed to \code{lambda} if it is a function. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{ex}{ Optional. A point pattern to use as the example. If \code{ex} is given and \code{lambda,lmax,win} are missing, then \code{lambda} and \code{win} will be calculated from the point pattern \code{ex}. } \item{warnwin}{ Logical value specifying whether to issue a warning when \code{win} is ignored (which occurs when \code{lambda} is an image and \code{win} is present). } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ If \code{lambda} is a single number, then this algorithm generates a realisation of the uniform Poisson process (also known as Complete Spatial Randomness, CSR) inside the window \code{win} with intensity \code{lambda} (points per unit area). If \code{lambda} is a function, then this algorithm generates a realisation of the inhomogeneous Poisson process with intensity function \code{lambda(x,y,\dots)} at spatial location \code{(x,y)} inside the window \code{win}. The function \code{lambda} must work correctly with vectors \code{x} and \code{y}. If \code{lmax} is given, it must be an upper bound on the values of \code{lambda(x,y,\dots)} for all locations \code{(x, y)} inside the window \code{win}. That is, we must have \code{lambda(x,y,\dots) <= lmax} for all locations \code{(x,y)}. If this is not true then the results of the algorithm will be incorrect. If \code{lmax} is missing or \code{NULL}, an approximate upper bound is computed by finding the maximum value of \code{lambda(x,y,\dots)} on a grid of locations \code{(x,y)} inside the window \code{win}, and adding a safety margin equal to 5 percent of the range of \code{lambda} values. This can be computationally intensive, so it is advisable to specify \code{lmax} if possible. If \code{lambda} is a pixel image object of class \code{"im"} (see \code{\link{im.object}}), this algorithm generates a realisation of the inhomogeneous Poisson process with intensity equal to the pixel values of the image. (The value of the intensity function at an arbitrary location is the pixel value of the nearest pixel.) The argument \code{win} is ignored; the window of the pixel image is used instead. It will be converted to a rectangle if possible, using \code{\link{rescue.rectangle}}. To generate an inhomogeneous Poisson process the algorithm uses ``thinning'': it first generates a uniform Poisson process of intensity \code{lmax}, then randomly deletes or retains each point, independently of other points, with retention probability \eqn{p(x,y) = \lambda(x,y)/\mbox{lmax}}{p(x,y) = lambda(x,y)/lmax}. For \emph{marked} point patterns, use \code{\link{rmpoispp}}. } \section{Warning}{ Note that \code{lambda} is the \bold{intensity}, that is, the expected number of points \bold{per unit area}. The total number of points in the simulated pattern will be random with expected value \code{mu = lambda * a} where \code{a} is the area of the window \code{win}. } \section{Reproducibility}{ The simulation algorithm, for the case where \code{lambda} is a pixel image, was changed in \pkg{spatstat} version \code{1.42-3}. Set \code{spatstat.options(fastpois=FALSE)} to use the previous, slower algorithm, if it is desired to reproduce results obtained with earlier versions. } \seealso{ \code{\link{rmpoispp}} for Poisson \emph{marked} point patterns, \code{\link{runifpoint}} for a fixed number of independent uniform random points; \code{\link{rpoint}}, \code{\link{rmpoint}} for a fixed number of independent random points with any distribution; \code{\link{rMaternI}}, \code{\link{rMaternII}}, \code{\link{rSSI}}, \code{\link{rStrauss}}, \code{\link{rstrat}} for random point processes with spatial inhibition or regularity; \code{\link{rThomas}}, \code{\link{rGaussPoisson}}, \code{\link{rMatClust}}, \code{\link{rcell}} for random point processes exhibiting clustering; \code{\link{rmh.default}} for Gibbs processes. See also \code{\link{ppp.object}}, \code{\link{owin.object}}. } \examples{ # uniform Poisson process with intensity 100 in the unit square pp <- rpoispp(100) # uniform Poisson process with intensity 1 in a 10 x 10 square pp <- rpoispp(1, win=owin(c(0,10),c(0,10))) # plots should look similar ! # inhomogeneous Poisson process in unit square # with intensity lambda(x,y) = 100 * exp(-3*x) # Intensity is bounded by 100 pp <- rpoispp(function(x,y) {100 * exp(-3*x)}, 100) # How to tune the coefficient of x lamb <- function(x,y,a) { 100 * exp( - a * x)} pp <- rpoispp(lamb, 100, a=3) # pixel image Z <- as.im(function(x,y){100 * sqrt(x+y)}, unit.square()) pp <- rpoispp(Z) # randomising an existing point pattern rpoispp(intensity(cells), win=Window(cells)) rpoispp(ex=cells) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{datagen} spatstat/man/rPoissonCluster.Rd0000644000176200001440000001121213333543264016340 0ustar liggesusers\name{rPoissonCluster} \alias{rPoissonCluster} \title{Simulate Poisson Cluster Process} \description{ Generate a random point pattern, a realisation of the general Poisson cluster process. } \usage{ rPoissonCluster(kappa, expand, rcluster, win = owin(c(0,1),c(0,1)), \dots, lmax=NULL, nsim=1, drop=TRUE, saveparents=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{expand}{ Size of the expansion of the simulation window for generating parent points. A single non-negative number. } \item{rcluster}{ A function which generates random clusters. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{\dots}{ Arguments passed to \code{rcluster} } \item{lmax}{ Optional. Upper bound on the values of \code{kappa} when \code{kappa} is a function or pixel image. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{saveparents}{ Logical value indicating whether to save the locations of the parent points as an attribute. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of the point pattern: see Details. } \details{ This algorithm generates a realisation of the general Poisson cluster process, with the cluster mechanism given by the function \code{rcluster}. First, the algorithm generates a Poisson point process of ``parent'' points with intensity \code{kappa} in an expanded window as explained below.. Here \code{kappa} may be a single positive number, a function \code{kappa(x, y)}, or a pixel image object of class \code{"im"} (see \code{\link{im.object}}). See \code{\link{rpoispp}} for details. Second, each parent point is replaced by a random cluster of points, created by calling the function \code{rcluster}. These clusters are combined together to yield a single point pattern, and the restriction of this pattern to the window \code{win} is then returned as the result of \code{rPoissonCluster}. The expanded window consists of \code{\link{as.rectangle}(win)} extended by the amount \code{expand} in each direction. The size of the expansion is saved in the attribute \code{"expand"} and may be extracted by \code{attr(X, "expand")} where \code{X} is the generated point pattern. The function \code{rcluster} should expect to be called as \code{rcluster(xp[i],yp[i],\dots)} for each parent point at a location \code{(xp[i],yp[i])}. The return value of \code{rcluster} should be a list with elements \code{x,y} which are vectors of equal length giving the absolute \eqn{x} and \code{y} coordinates of the points in the cluster. If the return value of \code{rcluster} is a point pattern (object of class \code{"ppp"}) then it may have marks. The result of \code{rPoissonCluster} will then be a marked point pattern. If required, the intermediate stages of the simulation (the parents and the individual clusters) can also be extracted from the return value of \code{rPoissonCluster} through the attributes \code{"parents"} and \code{"parentid"}. The attribute \code{"parents"} is the point pattern of parent points. The attribute \code{"parentid"} is an integer vector specifying the parent for each of the points in the simulated pattern. (If these data are not required, it is more efficient to set \code{saveparents=FALSE}.) } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}}, \code{\link{rThomas}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}}, \code{\link{rNeymanScott}}, \code{\link{rGaussPoisson}}. } \examples{ # each cluster consist of 10 points in a disc of radius 0.2 nclust <- function(x0, y0, radius, n) { return(runifdisc(n, radius, centre=c(x0, y0))) } plot(rPoissonCluster(10, 0.2, nclust, radius=0.2, n=5)) # multitype Neyman-Scott process (each cluster is a multitype process) nclust2 <- function(x0, y0, radius, n, types=c("a", "b")) { X <- runifdisc(n, radius, centre=c(x0, y0)) M <- sample(types, n, replace=TRUE) marks(X) <- M return(X) } plot(rPoissonCluster(15,0.1,nclust2, radius=0.1, n=5)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/compatible.im.Rd0000644000176200001440000000212413333543263015706 0ustar liggesusers\name{compatible.im} \alias{compatible.im} \title{Test Whether Pixel Images Are Compatible} \description{ Tests whether two or more pixel image objects have compatible dimensions. } \usage{ \method{compatible}{im}(A, B, \dots, tol=1e-6) } \arguments{ \item{A,B,\dots}{Two or more pixel images (objects of class \code{"im"}).} \item{tol}{Tolerance factor} } \details{ This function tests whether the pixel images \code{A} and \code{B} (and any additional images \code{\dots}) have compatible pixel dimensions. They are compatible if they have the same number of rows and columns, the same physical pixel dimensions, and occupy the same rectangle in the plane. The argument \code{tol} specifies the maximum tolerated error in the pixel coordinates, expressed as a fraction of the dimensions of a single pixel. } \value{ Logical value: \code{TRUE} if the images are compatible, and \code{FALSE} if they are not. } \seealso{ \code{\link{eval.im}}, \code{\link{harmonise.im}}, \code{\link{commonGrid}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/Kest.fft.Rd0000644000176200001440000000615013333543262014651 0ustar liggesusers\name{Kest.fft} \alias{Kest.fft} \title{K-function using FFT} \description{ Estimates the reduced second moment function \eqn{K(r)} from a point pattern in a window of arbitrary shape, using the Fast Fourier Transform. } \usage{ Kest.fft(X, sigma, r=NULL, \dots, breaks=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{K(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{sigma}{ Standard deviation of the isotropic Gaussian smoothing kernel. } \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{K(r)} should be evaluated. There is a sensible default. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} determining the spatial resolution for the FFT calculation. } \item{breaks}{ This argument is for internal use only. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{border}{the estimates of \eqn{K(r)} for these values of \eqn{r} } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} for a stationary Poisson process } } \details{ This is an alternative to the function \code{\link{Kest}} for estimating the \eqn{K} function. It may be useful for very large patterns of points. Whereas \code{\link{Kest}} computes the distance between each pair of points analytically, this function discretises the point pattern onto a rectangular pixel raster and applies Fast Fourier Transform techniques to estimate \eqn{K(t)}. The hard work is done by the function \code{\link{Kmeasure}}. The result is an approximation whose accuracy depends on the resolution of the pixel raster. The resolution is controlled by the arguments \code{\dots}, or by setting the parameter \code{npixel} in \code{\link{spatstat.options}}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{Kmeasure}}, \code{\link{spatstat.options}} } \examples{ pp <- runifpoint(10000) \testonly{ op <- spatstat.options(npixel=125) } Kpp <- Kest.fft(pp, 0.01) plot(Kpp) \testonly{spatstat.options(op)} } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat/man/methods.zclustermodel.Rd0000644000176200001440000000261413333543265017527 0ustar liggesusers\name{methods.zclustermodel} \alias{methods.zclustermodel} % DoNotExport \alias{pcfmodel.zclustermodel} \alias{predict.zclustermodel} \alias{print.zclustermodel} \title{ Methods for Cluster Models } \description{ Methods for the experimental class of cluster models. } \usage{ \method{pcfmodel}{zclustermodel}(model, \dots) \method{predict}{zclustermodel}(object, \dots, locations, type = "intensity", ngrid = NULL) \method{print}{zclustermodel}(x, \dots) } \arguments{ \item{model,object,x}{ Object of class \code{"zclustermodel"}. } \item{\dots}{ Arguments passed to other methods. } \item{locations}{ Locations where prediction should be performed. A window or a point pattern. } \item{type}{ Currently must equal \code{"intensity"}. } \item{ngrid}{ Pixel grid dimensions for prediction, if \code{locations} is a rectangle or polygon. } } \details{ Experimental. } \value{ Same as for other methods. } \author{ \adrian } \seealso{ \code{\link{zclustermodel}} } \examples{ m <- zclustermodel("Thomas", kappa=10, mu=5, scale=0.1) m2 <- zclustermodel("VarGamma", kappa=10, mu=10, scale=0.1, nu=0.7) m m2 g <- pcfmodel(m) g(0.2) g2 <- pcfmodel(m2) g2(1) Z <- predict(m, locations=square(2)) Z2 <- predict(m2, locations=square(1)) varcount(m, square(1)) varcount(m2, square(1)) } \keyword{spatial} \keyword{models} spatstat/man/dilation.Rd0000644000176200001440000000550313333543263014772 0ustar liggesusers\name{dilation} \alias{dilation} \alias{dilation.owin} \alias{dilation.ppp} \alias{dilation.psp} \title{Morphological Dilation} \description{ Perform morphological dilation of a window, a line segment pattern or a point pattern } \usage{ dilation(w, r, \dots) \method{dilation}{owin}(w, r, \dots, polygonal=NULL, tight=TRUE) \method{dilation}{ppp}(w, r, \dots, polygonal=TRUE, tight=TRUE) \method{dilation}{psp}(w, r, \dots, polygonal=TRUE, tight=TRUE) } \arguments{ \item{w}{ A window (object of class \code{"owin"} or a line segment pattern (object of class \code{"psp"}) or a point pattern (object of class \code{"ppp"}). } \item{r}{positive number: the radius of dilation.} \item{\dots}{extra arguments passed to \code{\link{as.mask}} controlling the pixel resolution, if the pixel approximation is used; or passed to \code{\link{disc}} if the polygonal approximation is used. } \item{polygonal}{ Logical flag indicating whether to compute a polygonal approximation to the dilation (\code{polygonal=TRUE}) or a pixel grid approximation (\code{polygonal=FALSE}). } \item{tight}{ Logical flag indicating whether the bounding frame of the window should be taken as the smallest rectangle enclosing the dilated region (\code{tight=TRUE}), or should be the dilation of the bounding frame of \code{w} (\code{tight=FALSE}). } } \value{ If \code{r > 0}, an object of class \code{"owin"} representing the dilated region. If \code{r=0}, the result is identical to \code{w}. } \details{ The morphological dilation of a set \eqn{W} by a distance \eqn{r > 0} is the set consisting of all points lying at most \eqn{r} units away from \eqn{W}. Effectively, dilation adds a margin of width \eqn{r} onto the set \eqn{W}. If \code{polygonal=TRUE} then a polygonal approximation to the dilation is computed. If \code{polygonal=FALSE} then a pixel approximation to the dilation is computed from the distance map of \code{w}. The arguments \code{"\dots"} are passed to \code{\link{as.mask}} to control the pixel resolution. When \code{w} is a window, the default (when \code{polygonal=NULL}) is to compute a polygonal approximation if \code{w} is a rectangle or polygonal window, and to compute a pixel approximation if \code{w} is a window of type \code{"mask"}. } \seealso{ \code{\link{erosion}} for the opposite operation. \code{\link{dilationAny}} for morphological dilation using any shape. \code{\link{owin}}, \code{\link{as.owin}} } \examples{ plot(dilation(redwood, 0.05)) points(redwood) plot(dilation(letterR, 0.2)) plot(letterR, add=TRUE, lwd=2, border="red") X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(dilation(X, 0.1)) plot(X, add=TRUE, col="red") } \author{ \spatstatAuthors } \keyword{spatial} \keyword{math} spatstat/man/marks.Rd0000644000176200001440000000652013333543263014304 0ustar liggesusers\name{marks} \alias{marks} \alias{marks.ppp} \alias{marks.ppx} \alias{marks<-} \alias{marks<-.ppp} \alias{marks<-.ppx} \alias{setmarks} \alias{\%mark\%} %DoNotExport %NAMESPACE export("%mark%") \title{Marks of a Point Pattern} \description{ Extract or change the marks attached to a point pattern dataset. } \usage{ marks(x, \dots) \method{marks}{ppp}(x, \dots, dfok=TRUE, drop=TRUE) \method{marks}{ppx}(x, \dots, drop=TRUE) marks(x, \dots) <- value \method{marks}{ppp}(x, \dots, dfok=TRUE, drop=TRUE) <- value \method{marks}{ppx}(x, \dots) <- value setmarks(x, value) x \%mark\% value } \arguments{ \item{x}{ Point pattern dataset (object of class \code{"ppp"} or \code{"ppx"}). } \item{\dots}{ Ignored. } \item{dfok}{ Logical. If \code{FALSE}, data frames of marks are not permitted and will generate an error. } \item{drop}{ Logical. If \code{TRUE}, a data frame consisting of a single column of marks will be converted to a vector or factor. } \item{value}{ Replacement value. A vector, data frame or hyperframe of mark values, or \code{NULL}. } } \value{ For \code{marks(x)}, the result is a vector, factor, data frame or hyperframe, containing the mark values attached to the points of \code{x}. For \code{marks(x) <- value}, the result is the updated point pattern \code{x} (with the side-effect that the dataset \code{x} is updated in the current environment). For \code{setmarks(x,value)} and \code{x \%mark\% value}, the return value is the point pattern obtained by replacing the marks of \code{x} by \code{value}. } \details{ These functions extract or change the marks attached to the points of the point pattern \code{x}. The expression \code{marks(x)} extracts the marks of \code{x}. The assignment \code{marks(x) <- value} assigns new marks to the dataset \code{x}, and updates the dataset \code{x} in the current environment. The expression \code{setmarks(x,value)} or equivalently \code{x \%mark\% value} returns a point pattern obtained by replacing the marks of \code{x} by \code{value}, but does not change the dataset \code{x} itself. For point patterns in two-dimensional space (objects of class \code{"ppp"}) the marks can be a vector, a factor, or a data frame. For general point patterns (objects of class "ppx") the marks can be a vector, a factor, a data frame or a hyperframe. For the assignment \code{marks(x) <- value}, the \code{value} should be a vector or factor of length equal to the number of points in \code{x}, or a data frame or hyperframe with as many rows as there are points in \code{x}. If \code{value} is a single value, or a data frame or hyperframe with one row, then it will be replicated so that the same marks will be attached to each point. To remove marks, use \code{marks(x) <- NULL} or \code{\link{unmark}(x)}. Use \code{\link{ppp}} or \code{\link{ppx}} to create point patterns in more general situations. } \seealso{ \code{\link{ppp.object}}, \code{\link{ppx}}, \code{\link{unmark}}, \code{\link{hyperframe}} } \examples{ X <- amacrine # extract marks m <- marks(X) # recode the mark values "off", "on" as 0, 1 marks(X) <- as.integer(m == "on") } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/bind.fv.Rd0000644000176200001440000000675213333543262014523 0ustar liggesusers\name{bind.fv} \alias{bind.fv} \alias{cbind.fv} \title{ Combine Function Value Tables } \description{ Advanced Use Only. Combine objects of class \code{"fv"}, or glue extra columns of data onto an existing \code{"fv"} object. } \usage{ \method{cbind}{fv}(...) bind.fv(x, y, labl = NULL, desc = NULL, preferred = NULL, clip=FALSE) } \arguments{ \item{\dots}{ Any number of arguments, which are objects of class \code{"fv"}. } \item{x}{ An object of class \code{"fv"}. } \item{y}{ Either a data frame or an object of class \code{"fv"}. } \item{labl}{ Plot labels (see \code{\link{fv}}) for columns of \code{y}. A character vector. } \item{desc}{ Descriptions (see \code{\link{fv}}) for columns of \code{y}. A character vector. } \item{preferred}{ Character string specifying the column which is to be the new recommended value of the function. } \item{clip}{ Logical value indicating whether each object must have exactly the same domain, that is, the same sequence of values of the function argument (\code{clip=FALSE}, the default) or whether objects with different domains are permissible and will be restricted to a common domain (\code{clip=TRUE}). } } \details{ This documentation is provided for experienced programmers who want to modify the internal behaviour of \pkg{spatstat}. The function \code{cbind.fv} is a method for the generic \R function \code{\link{cbind}}. It combines any number of objects of class \code{"fv"} into a single object of class \code{"fv"}. The objects must be compatible, in the sense that they have identical values of the function argument. The function \code{bind.fv} is a lower level utility which glues additional columns onto an existing object \code{x} of class \code{"fv"}. It has two modes of use: \itemize{ \item If the additional dataset \code{y} is an object of class \code{"fv"}, then \code{x} and \code{y} must be compatible as described above. Then the columns of \code{y} that contain function values will be appended to the object \code{x}. \item Alternatively if \code{y} is a data frame, then \code{y} must have the same number of rows as \code{x}. All columns of \code{y} will be appended to \code{x}. } The arguments \code{labl} and \code{desc} provide plot labels and description strings (as described in \code{\link{fv}}) for the \emph{new} columns. If \code{y} is an object of class \code{"fv"} then \code{labl} and \code{desc} are optional, and default to the relevant entries in the object \code{y}. If \code{y} is a data frame then \code{labl} and \code{desc} must be provided. } \value{ An object of class \code{"fv"}. } \author{ \spatstatAuthors. } \examples{ data(cells) K1 <- Kest(cells, correction="border") K2 <- Kest(cells, correction="iso") # remove column 'theo' to avoid duplication K2 <- K2[, names(K2) != "theo"] cbind(K1, K2) bind.fv(K1, K2, preferred="iso") # constrain border estimate to be monotonically increasing bm <- cumsum(c(0, pmax(0, diff(K1$border)))) bind.fv(K1, data.frame(bmono=bm), "\%s[bmo](r)", "monotone border-corrected estimate of \%s", "bmono") } \seealso{ \code{\link{fv}}, \code{\link{with.fv}}. \emph{Undocumented} functions for modifying an \code{"fv"} object include \code{fvnames}, \code{fvnames<-}, \code{tweak.fv.entry} and \code{rebadge.fv}. } \keyword{spatial} \keyword{attribute} spatstat/man/linearmarkequal.Rd0000644000176200001440000000422313623712063016340 0ustar liggesusers\name{linearmarkequal} \alias{linearmarkequal} \title{ Mark Connection Function for Multitype Point Pattern on Linear Network } \description{ For a multitype point pattern on a linear network, estimate the mark connection function from points of type \eqn{i} to points of type \eqn{j}. } \usage{ linearmarkequal(X, r=NULL, \dots) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the mark connection function \eqn{p_{ij}(r)}{p[ij](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{\dots}{ Arguments passed to \code{\link{linearpcfcross}} and \code{\link{linearpcf}}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is the mark equality function for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (2014) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), \bold{63}, 673--694. } \seealso{ \code{\link{linearpcfcross}}, \code{\link{linearpcf}}, \code{\link{linearmarkconnect}}, \code{\link{markconnect}}. } \examples{ if(interactive()) { X <- chicago } else { m <- sample(factor(c("A","B")), 20, replace=TRUE) X <- runiflpp(20, simplenet) \%mark\% m } p <- linearmarkequal(X) } \author{\adrian} \keyword{spatial} \keyword{nonparametric} spatstat/man/plot.slrm.Rd0000644000176200001440000000234013333543264015116 0ustar liggesusers\name{plot.slrm} \Rdversion{1.1} \alias{plot.slrm} \title{ Plot a Fitted Spatial Logistic Regression } \description{ Plots a fitted Spatial Logistic Regression model. } \usage{ \method{plot}{slrm}(x, ..., type = "intensity") } \arguments{ \item{x}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Extra arguments passed to \code{\link{plot.im}} to control the appearance of the plot. } \item{type}{ Character string (partially) matching one of \code{"probabilities"}, \code{"intensity"} or \code{"link"}. } } \details{ This is a method for \code{\link{plot}} for fitted spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). This function plots the result of \code{\link{predict.slrm}}. } \value{ None. } \seealso{ \code{\link{slrm}}, \code{\link{predict.slrm}}, \code{\link{plot.im}} } \examples{ data(copper) X <- copper$SouthPoints Y <- copper$SouthLines Z <- distmap(Y) fit <- slrm(X ~ Z) plot(fit) plot(fit, type="link") } \author{\adrian \email{adrian@maths.uwa.edu.au} and \rolf } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat/man/varcount.Rd0000644000176200001440000000656613333543264015043 0ustar liggesusers\name{varcount} \alias{varcount} \title{ Predicted Variance of the Number of Points } \description{ Given a fitted point process model, calculate the predicted variance of the number of points in a nominated set \code{B}. } \usage{ varcount(model, B, \dots, dimyx = NULL) } \arguments{ \item{model}{ A fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). } \item{B}{ A window (object of class \code{"owin"} specifying the region in which the points are counted. Alternatively a pixel image (object of class \code{"im"}) or a function of spatial coordinates specifying a numerical weight for each random point. } \item{\dots}{ Additional arguments passed to \code{B} when it is a function. } \item{dimyx}{ Spatial resolution for the calculations. Argument passed to \code{\link{as.mask}}. } } \details{ This command calculates the variance of the number of points falling in a specified window \code{B} according to the \code{model}. It can also calculate the variance of a sum of weights attached to each random point. The \code{model} should be a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). \itemize{ \item{ If \code{B} is a window, this command calculates the variance of the number of points falling in \code{B}, according to the fitted \code{model}. If the \code{model} depends on spatial covariates other than the Cartesian coordinates, then \code{B} should be a subset of the domain in which these covariates are defined. } \item{ If \code{B} is a pixel image, this command calculates the variance of \eqn{T = \sum_i B(x_i)}{T = sum[i] B(x[i])}, the sum of the values of \code{B} over all random points falling in the domain of the image. If the \code{model} depends on spatial covariates other than the Cartesian coordinates, then the domain of the pixel image, \code{as.owin(B)}, should be a subset of the domain in which these covariates are defined. } \item{ If \code{B} is a \code{function(x,y)} or \code{function(x,y,...)} this command calculates the variance of \eqn{T = \sum_i B(x_i)}{T = sum[i] B(x[i])}, the sum of the values of \code{B} over all random points falling inside the window \code{W=as.owin(model)}, the window in which the original data were observed. } } The variance calculation involves the intensity and the pair correlation function of the model. The calculation is exact (up to discretisation error) for models of class \code{"kppm"} and \code{"dppm"}, and for Poisson point process models of class \code{"ppm"}. For Gibbs point process models of class \code{"ppm"} the calculation depends on the Poisson-saddlepoint approximations to the intensity and pair correlation function, which are rough approximations. The approximation is not yet implemented for some Gibbs models. } \value{ A single number. } \author{ \spatstatAuthors } \seealso{ \code{\link{predict.ppm}}, \code{\link{predict.kppm}}, \code{\link{predict.dppm}} } \examples{ fitT <- kppm(redwood ~ 1, "Thomas") B <- owin(c(0, 0.5), c(-0.5, 0)) varcount(fitT, B) fitS <- ppm(swedishpines ~ 1, Strauss(9)) BS <- square(50) varcount(fitS, BS) } \keyword{spatial} \keyword{models} spatstat/man/square.Rd0000644000176200001440000000266613333543264014477 0ustar liggesusers\name{square} \alias{square} \alias{unit.square} \title{Square Window} \description{ Creates a square window } \usage{ square(r=1, unitname=NULL) unit.square() } \arguments{ \item{r}{Numeric. The side length of the square, or a vector giving the minimum and maximum coordinate values. } \item{unitname}{ Optional. Name of unit of length. Either a single character string, or a vector of two character strings giving the singular and plural forms, respectively. } } \value{ An object of class \code{"owin"} (see \code{\link{owin.object}}) specifying a window. } \details{ If \code{r} is a number, \code{square(r)} is a shortcut for creating a window object representing the square \eqn{[0,r] \times [0,r]}{[0,r] * [0,r]}. It is equivalent to the command \code{owin(c(0,r),c(0,r))}. If \code{r} is a vector of length 2, then \code{square(r)} creates the square with \code{x} and \code{y} coordinates ranging from \code{r[1]} to \code{r[2]}. \code{unit.square} creates the unit square \eqn{[0,1] \times [0,1]}{[0,1] * [0,1]}. It is equivalent to \code{square(1)} or \code{square()} or \code{owin(c(0,1),c(0,1))}. These commands are included for convenience, and to improve the readability of some code. } \seealso{ \code{\link{owin.object}}, \code{\link{owin}} } \examples{ W <- square(10) W <- square(c(-1,1)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/Kmodel.ppm.Rd0000644000176200001440000000450113333543262015171 0ustar liggesusers\name{Kmodel.ppm} \alias{Kmodel.ppm} \alias{pcfmodel.ppm} \title{K Function or Pair Correlation Function of Gibbs Point Process model} \description{ Returns the theoretical \eqn{K} function or the pair correlation function of a fitted Gibbs point process model. } \usage{ \method{Kmodel}{ppm}(model, \dots) \method{pcfmodel}{ppm}(model, \dots) } \arguments{ \item{model}{ A fitted Poisson or Gibbs point process model (object of class \code{"ppm"}) typically obtained from the model-fitting algorithm \code{\link{ppm}}. } \item{\dots}{ Ignored. } } \value{ A \code{function} in the \R language, which takes one argument \code{r}. } \details{ This function computes an \emph{approximation} to the \eqn{K} function or the pair correlation function of a Gibbs point process. The functions \code{\link{Kmodel}} and \code{\link{pcfmodel}} are generic. The functions documented here are the methods for the class \code{"ppm"}. The approximation is only available for stationary pairwise-interaction models. It uses the second order Poisson-saddlepoint approximation (Baddeley and Nair, 2012b) which is a combination of the Poisson-Boltzmann-Emden and Percus-Yevick approximations. The return value is a \code{function} in the \R language, which takes one argument \code{r}. Evaluation of this function, on a numeric vector \code{r}, yields values of the desired \eqn{K} function or pair correlation function at these distance values. } \seealso{ \code{\link{Kest}} or \code{\link{pcf}} to estimate the \eqn{K} function or pair correlation function nonparametrically from data. \code{\link{ppm}} to fit Gibbs models. \code{\link{Kmodel}} for the generic functions. \code{\link{Kmodel.kppm}} for the method for cluster/Cox processes. } \examples{ fit <- ppm(swedishpines, ~1, Strauss(8)) p <- pcfmodel(fit) K <- Kmodel(fit) p(6) K(8) curve(K(x), from=0, to=15) } \references{ Baddeley, A. and Nair, G. (2012a) Fast approximation of the intensity of Gibbs point processes. \emph{Electronic Journal of Statistics} \bold{6} 1155--1169. Baddeley, A. and Nair, G. (2012b) Approximating the moments of a spatial point process. \emph{Stat} \bold{1}, 1, 18--30. doi: 10.1002/sta4.5 } \author{\adrian and Gopalan Nair. } \keyword{spatial} \keyword{models} spatstat/man/rmh.Rd0000644000176200001440000000565313333543264013764 0ustar liggesusers\name{rmh} \alias{rmh} \title{Simulate point patterns using the Metropolis-Hastings algorithm.} \description{ Generic function for running the Metropolis-Hastings algorithm to produce simulated realisations of a point process model. } \usage{rmh(model, \dots)} \arguments{ \item{model}{The point process model to be simulated. } \item{\dots}{Further arguments controlling the simulation. } } \details{ The Metropolis-Hastings algorithm can be used to generate simulated realisations from a wide range of spatial point processes. For caveats, see below. The function \code{rmh} is generic; it has methods \code{\link{rmh.ppm}} (for objects of class \code{"ppm"}) and \code{\link{rmh.default}} (the default). The actual implementation of the Metropolis-Hastings algorithm is contained in \code{\link{rmh.default}}. For details of its use, see \code{\link{rmh.ppm}} or \code{\link{rmh.default}}. [If the model is a Poisson process, then Metropolis-Hastings is not used; the Poisson model is generated directly using \code{\link{rpoispp}} or \code{\link{rmpoispp}}.] In brief, the Metropolis-Hastings algorithm is a Markov Chain, whose states are spatial point patterns, and whose limiting distribution is the desired point process. After running the algorithm for a very large number of iterations, we may regard the state of the algorithm as a realisation from the desired point process. However, there are difficulties in deciding whether the algorithm has run for ``long enough''. The convergence of the algorithm may indeed be extremely slow. No guarantees of convergence are given! While it is fashionable to decry the Metropolis-Hastings algorithm for its poor convergence and other properties, it has the advantage of being easy to implement for a wide range of models. } \section{Warning}{ As of version 1.22-1 of \code{spatstat} a subtle change was made to \code{rmh.default()}. We had noticed that the results produced were sometimes not ``scalable'' in that two models, differing in effect only by the units in which distances are measured and starting from the same seed, gave different results. This was traced to an idiosyncracy of floating point arithmetic. The code of \code{rmh.default()} has been changed so that the results produced by \code{rmh} are now scalable. The downside of this is that code which users previously ran may now give results which are different from what they formerly were. In order to recover former behaviour (so that previous results can be reproduced) set \code{spatstat.options(scalable=FALSE)}. See the last example in the help for \code{\link{rmh.default}}. } \value{ A point pattern, in the form of an object of class \code{"ppp"}. See \code{\link{rmh.default}} for details. } \seealso{ \code{\link{rmh.default}} } \examples{ # See examples in rmh.default and rmh.ppm } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/reflect.Rd0000644000176200001440000000223413333543264014612 0ustar liggesusers\name{reflect} \alias{reflect} \alias{reflect.im} \alias{reflect.default} \title{Reflect In Origin} \description{ Reflects a geometrical object through the origin. } \usage{ reflect(X) \method{reflect}{im}(X) \method{reflect}{default}(X) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), or a window (object of class \code{"owin"}).} } \value{ Another object of the same type, representing the result of reflection. } \details{ The object \code{X} is reflected through the origin. That is, each point in \code{X} with coordinates \eqn{(x,y)} is mapped to the position \eqn{(-x, -y)}. This is equivalent to applying the affine transformation with matrix \code{diag(c(-1,-1))}. It is also equivalent to rotation about the origin by 180 degrees. The command \code{reflect} is generic, with a method for pixel images and a default method. } \seealso{ \code{\link{affine}}, \code{\link{flipxy}} } \examples{ plot(reflect(as.im(letterR))) plot(reflect(letterR), add=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/laslett.Rd0000644000176200001440000001356713420233047014641 0ustar liggesusers\name{laslett} \alias{laslett} \title{ Laslett's Transform } \description{ Apply Laslett's Transform to a spatial region, returning the original and transformed regions, and the original and transformed positions of the lower tangent points. This is a diagnostic for the Boolean model. } \usage{ laslett(X, \dots, verbose = FALSE, plotit = TRUE, discretise = FALSE, type=c("lower", "upper", "left", "right")) } \arguments{ \item{X}{ Spatial region to be transformed. A window (object of class \code{"owin"}) or a logical-valued pixel image (object of class \code{"im"}). } \item{\dots}{ Graphics arguments to control the plot (passed to \code{\link{plot.laslett}} when \code{plotit=TRUE}) or arguments determining the pixel resolution (passed to \code{\link{as.mask}}). } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{plotit}{ Logical value indicating whether to plot the result. } \item{discretise}{ Logical value indicating whether polygonal windows should first be converted to pixel masks before the Laslett transform is computed. This should be set to \code{TRUE} for very complicated polygons. } \item{type}{ Type of tangent points to be detected. This also determines the direction of contraction in the set transformation. Default is \code{type="lower"}. } } \details{ This function finds the lower tangent points of the spatial region \code{X}, then applies Laslett's Transform to the space, and records the transformed positions of the lower tangent points. Laslett's transform is a diagnostic for the Boolean Model. A test of the Boolean model can be performed by applying a test of CSR to the transformed tangent points. See the Examples. The rationale is that, if the region \code{X} was generated by a Boolean model with convex grains, then the lower tangent points of \code{X}, when subjected to Laslett's transform, become a Poisson point process (Cressie, 1993, section 9.3.5; Molchanov, 1997; Barbour and Schmidt, 2001). Intuitively, Laslett's transform is a way to account for the fact that tangent points of \code{X} cannot occur \emph{inside} \code{X}. It treats the interior of \code{X} as empty space, and collapses this empty space so that only the \emph{exterior} of \code{X} remains. In this collapsed space, the tangent points are completely random. Formally, Laslett's transform is a random (i.e. data-dependent) spatial transformation which maps each spatial location \eqn{(x,y)} to a new location \eqn{(x',y)} at the same height \eqn{y}. The transformation is defined so that \eqn{x'} is the total \emph{uncovered} length of the line segment from \eqn{(0,y)} to \eqn{(x,y)}, that is, the total length of the parts of this segment that fall outside the region \code{X}. In more colourful terms, suppose we use an abacus to display a pixellated version of \code{X}. Each wire of the abacus represents one horizontal line in the pixel image. Each pixel lying \emph{outside} the region \code{X} is represented by a bead of the abacus; pixels \emph{inside} \code{X} are represented by the absence of a bead. Next we find any beads which are lower tangent points of \code{X}, and paint them green. Then Laslett's Transform is applied by pushing all beads to the left, as far as possible. The final locations of all the beads provide a new spatial region, inside which is the point pattern of tangent points (marked by the green-painted beads). If \code{plotit=TRUE} (the default), a before-and-after plot is generated, showing the region \code{X} and the tangent points before and after the transformation. This plot can also be generated by calling \code{plot(a)} where \code{a} is the object returned by the function \code{laslett}. If the argument \code{type} is given, then this determines the type of tangents that will be detected, and also the direction of contraction in Laslett's transform. The computation is performed by first rotating \code{X}, applying Laslett's transform for lower tangent points, then rotating back. There are separate algorithms for polygonal windows and pixellated windows (binary masks). The polygonal algorithm may be slow for very complicated polygons. If this happens, setting \code{discretise=TRUE} will convert the polygonal window to a binary mask and invoke the pixel raster algorithm. } \value{ A list, which also belongs to the class \code{"laslett"} so that it can immediately be printed and plotted. The list elements are: \describe{ \item{oldX:}{the original dataset \code{X};} \item{TanOld:}{a point pattern, whose window is \code{Frame(X)}, containing the lower tangent points of \code{X};} \item{TanNew:}{a point pattern, whose window is the Laslett transform of \code{Frame(X)}, and which contains the Laslett-transformed positions of the tangent points;} \item{Rect:}{a rectangular window, which is the largest rectangle lying inside the transformed set;} \item{df:}{a data frame giving the locations of the tangent points before and after transformation. } \item{type:}{character string specifying the type of tangents.} } } \references{ Barbour, A.D. and Schmidt, V. (2001) On Laslett's Transform for the Boolean Model. \emph{Advances in Applied Probability} \bold{33}(1), 1--5. Cressie, N.A.C. (1993) \emph{Statistics for spatial data}, second edition. John Wiley and Sons. Molchanov, I. (1997) \emph{Statistics of the Boolean Model for Practitioners and Mathematicians}. Wiley. } \author{ Kassel Hingee and \adrian. } \seealso{ \code{\link{plot.laslett}} } \examples{ a <- laslett(heather$coarse) with(a, clarkevans.test(TanNew[Rect], correction="D", nsim=39)) X <- discs(runifpoint(15) \%mark\% 0.2, npoly=16) b <- laslett(X, type="left") b } \keyword{spatial} \keyword{manip} spatstat/man/Softcore.Rd0000644000176200001440000001321313333543262014747 0ustar liggesusers\name{Softcore} \alias{Softcore} \title{The Soft Core Point Process Model} \description{ Creates an instance of the Soft Core point process model which can then be fitted to point pattern data. } \usage{ Softcore(kappa, sigma0=NA) } \arguments{ \item{kappa}{The exponent \eqn{\kappa}{kappa} of the Soft Core interaction} \item{sigma0}{ Optional. Initial estimate of the parameter \eqn{\sigma}{sigma}. A positive number. } } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Soft Core process with exponent \eqn{\kappa}{kappa}. } \details{ The (stationary) Soft Core point process with parameters \eqn{\beta}{beta} and \eqn{\sigma}{sigma} and exponent \eqn{\kappa}{kappa} is the pairwise interaction point process in which each point contributes a factor \eqn{\beta}{beta} to the probability density of the point pattern, and each pair of points contributes a factor \deqn{ \exp \left\{ - \left( \frac{\sigma}{d} \right)^{2/\kappa} \right\} }{ exp( - (sigma/d)^(2/kappa) ) } to the density, where \eqn{d} is the distance between the two points. See the Examples for a plot of this interaction curve. Thus the process has probability density \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \exp \left\{ - \sum_{i < j} \left( \frac{\sigma}{||x_i-x_j||} \right)^{2/\kappa} \right\} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) exp( - sum (sigma/||x[i]-x[j]||)^(2/kappa)) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{\alpha}{alpha} is the normalising constant, and the sum on the right hand side is over all unordered pairs of points of the pattern. This model describes an ``ordered'' or ``inhibitive'' process, with the strength of inhibition decreasing smoothly with distance. The interaction is controlled by the parameters \eqn{\sigma}{sigma} and \eqn{\kappa}{kappa}. \itemize{ \item The \emph{spatial scale} of interaction is controlled by the parameter \eqn{\sigma}{sigma}, which is a positive real number interpreted as a distance, expressed in the same units of distance as the spatial data. The parameter \eqn{\sigma}{sigma} is the distance at which the pair potential reaches the threshold value 0.37. \item The \emph{shape} of the interaction function is controlled by the exponent \eqn{\kappa}{kappa} which is a dimensionless number in the range \eqn{(0,1)}, with larger values corresponding to a flatter shape (or a more gradual decay rate). The process is well-defined only for \eqn{\kappa}{kappa} in \eqn{(0,1)}. The limit of the model as \eqn{\kappa \to 0}{kappa -> 0} is the hard core process with hard core distance \eqn{h=\sigma}{h=sigma}. \item The \dQuote{strength} of the interaction is determined by both of the parameters \eqn{\sigma}{sigma} and \eqn{\kappa}{kappa}. The larger the value of \eqn{\kappa}{kappa}, the wider the range of distances over which the interaction has an effect. If \eqn{\sigma}{sigma} is very small, the interaction is very weak for all practical purposes (theoretically if \eqn{\sigma = 0}{sigma = 0} the model reduces to the Poisson point process). } The nonstationary Soft Core process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Soft Core process pairwise interaction is yielded by the function \code{Softcore()}. See the examples below. The main argument is the exponent \code{kappa}. When \code{kappa} is fixed, the model becomes an exponential family with canonical parameters \eqn{\log \beta}{log(beta)} and \deqn{ \log \gamma = \frac{2}{\kappa} \log\sigma }{ log(gamma) = (2/kappa) log(sigma) } The canonical parameters are estimated by \code{\link{ppm}()}, not fixed in \code{Softcore()}. The optional argument \code{sigma0} can be used to improve numerical stability. If \code{sigma0} is given, it should be a positive number, and it should be a rough estimate of the parameter \eqn{\sigma}{sigma}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \references{ Ogata, Y, and Tanemura, M. (1981). Estimation of interaction potentials of spatial point patterns through the maximum likelihood procedure. \emph{Annals of the Institute of Statistical Mathematics}, B \bold{33}, 315--338. Ogata, Y, and Tanemura, M. (1984). Likelihood analysis of spatial point patterns. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 496--518. } \examples{ # fit the stationary Soft Core process to `cells' fit5 <- ppm(cells ~1, Softcore(kappa=0.5), correction="isotropic") # study shape of interaction and explore effect of parameters fit2 <- update(fit5, Softcore(kappa=0.2)) fit8 <- update(fit5, Softcore(kappa=0.8)) plot(fitin(fit2), xlim=c(0, 0.4), main="Pair potential (sigma = 0.1)", xlab=expression(d), ylab=expression(h(d)), legend=FALSE) plot(fitin(fit5), add=TRUE, col=4) plot(fitin(fit8), add=TRUE, col=3) legend("bottomright", col=c(1,4,3), lty=1, legend=expression(kappa==0.2, kappa==0.5, kappa==0.8)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat/man/bw.scott.Rd0000644000176200001440000000673113515321576014741 0ustar liggesusers\name{bw.scott} \alias{bw.scott} \alias{bw.scott.iso} \title{ Scott's Rule for Bandwidth Selection for Kernel Density } \description{ Use Scott's rule of thumb to determine the smoothing bandwidth for the kernel estimation of point process intensity. } \usage{ bw.scott(X, isotropic=FALSE, d=NULL) bw.scott.iso(X) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}, \code{"lpp"}, \code{"pp3"} or \code{"ppx"}). } \item{isotropic}{ Logical value indicating whether to compute a single bandwidth for an isotropic Gaussian kernel (\code{isotropic=TRUE}) or separate bandwidths for each coordinate axis (\code{isotropic=FALSE}, the default). } \item{d}{ Advanced use only. An integer value that should be used in Scott's formula instead of the true number of spatial dimensions. } } \details{ These functions select a bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}} or \code{\link{density.lpp}} or other appropriate functions. They can be applied to a point pattern belonging to any class \code{"ppp"}, \code{"lpp"}, \code{"pp3"} or \code{"ppx"}. The bandwidth \eqn{\sigma}{\sigma} is computed by the rule of thumb of Scott (1992, page 152, equation 6.42). The bandwidth is proportional to \eqn{n^{-1/(d+4)}}{n^(-1/(d+4))} where \eqn{n} is the number of points and \eqn{d} is the number of spatial dimensions. This rule is very fast to compute. It typically produces a larger bandwidth than \code{\link{bw.diggle}}. It is useful for estimating gradual trend. If \code{isotropic=FALSE} (the default), \code{bw.scott} provides a separate bandwidth for each coordinate axis, and the result of the function is a vector, of length equal to the number of coordinates. If \code{isotropic=TRUE}, a single bandwidth value is computed and the result is a single numeric value. \code{bw.scott.iso(X)} is equivalent to \code{bw.scott(X, isotropic=TRUE)}. The default value of \eqn{d} is as follows: \tabular{ll}{ \bold{class} \tab \bold{dimension} \cr \code{"ppp"} \tab 2 \cr \code{"lpp"} \tab 1 \cr \code{"pp3"} \tab 3 \cr \code{"ppx"} \tab number of spatial coordinates } The use of \code{d=1} for point patterns on a linear network (class \code{"lpp"}) was proposed by McSwiggan et al (2016) and Rakshit et al (2019). } \value{ A numerical value giving the selected bandwidth, or a numerical vector giving the selected bandwidths for each coordinate. } \seealso{ \code{\link{density.ppp}}, \code{\link{bw.diggle}}, \code{\link{bw.ppl}}, \code{\link{bw.CvL}}, \code{\link{bw.frac}}. } \examples{ hickory <- split(lansing)[["hickory"]] b <- bw.scott(hickory) b \donttest{ plot(density(hickory, b)) } bw.scott.iso(hickory) bw.scott(chicago) bw.scott(osteo$pts[[1]]) } \references{ Scott, D.W. (1992) \emph{Multivariate Density Estimation. Theory, Practice and Visualization}. New York: Wiley. McSwiggan, G., Baddeley, A. and Nair, G. (2016) Kernel density estimation on a linear network. \emph{Scandinavian Journal of Statistics} \bold{44} (2) 324--345. Rakshit, S., Davies, T., Moradi, M., McSwiggan, G., Nair, G., Mateu, J. and Baddeley, A. (2019) Fast kernel smoothing of point patterns on a large network using 2D convolution. \emph{International Statistical Review}. In press. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/ppm.ppp.Rd0000644000176200001440000010643313333543264014566 0ustar liggesusers\name{ppm.ppp} \alias{ppm.ppp} \alias{ppm.quad} \concept{point process model} \concept{Poisson point process} \concept{Gibbs point process} \title{ Fit Point Process Model to Point Pattern Data } \description{ Fits a point process model to an observed point pattern. } \usage{ \method{ppm}{ppp}(Q, trend=~1, interaction=Poisson(), \dots, covariates=data, data=NULL, covfunargs = list(), subset, clipwin, correction="border", rbord=reach(interaction), use.gam=FALSE, method="mpl", forcefit=FALSE, emend=project, project=FALSE, prior.mean = NULL, prior.var = NULL, nd = NULL, eps = NULL, gcontrol=list(), nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh), verb=TRUE, callstring=NULL) \method{ppm}{quad}(Q, trend=~1, interaction=Poisson(), \dots, covariates=data, data=NULL, covfunargs = list(), subset, clipwin, correction="border", rbord=reach(interaction), use.gam=FALSE, method="mpl", forcefit=FALSE, emend=project, project=FALSE, prior.mean = NULL, prior.var = NULL, nd = NULL, eps = NULL, gcontrol=list(), nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh), verb=TRUE, callstring=NULL) } \arguments{ \item{Q}{ A data point pattern (of class \code{"ppp"}) to which the model will be fitted, or a quadrature scheme (of class \code{"quad"}) containing this pattern. } \item{trend}{ An \R formula object specifying the spatial trend to be fitted. The default formula, \code{~1}, indicates the model is stationary and no trend is to be fitted. } \item{interaction}{ An object of class \code{"interact"} describing the point process interaction structure, or a function that makes such an object, or \code{NULL} indicating that a Poisson process (stationary or nonstationary) should be fitted. } \item{\dots}{Ignored.} \item{data,covariates}{ The values of any spatial covariates (other than the Cartesian coordinates) required by the model. Either a data frame, or a list whose entries are images, functions, windows, tessellations or single numbers. See Details. } \item{subset}{ Optional. An expression (which may involve the names of the Cartesian coordinates \code{x} and \code{y} and the names of entries in \code{data}) defining a subset of the spatial domain, to which the likelihood or pseudolikelihood should be restricted. See Details. The result of evaluating the expression should be either a logical vector, or a window (object of class \code{"owin"}) or a logical-valued pixel image (object of class \code{"im"}). } \item{clipwin}{ Optional. A spatial window (object of class \code{"owin"}) to which data will be restricted, before model-fitting is performed. See Details. } \item{covfunargs}{ A named list containing the values of any additional arguments required by covariate functions. } \item{correction}{ The name of the edge correction to be used. The default is \code{"border"} indicating the border correction. Other possibilities may include \code{"Ripley"}, \code{"isotropic"}, \code{"periodic"}, \code{"translate"} and \code{"none"}, depending on the \code{interaction}. } \item{rbord}{ If \code{correction = "border"} this argument specifies the distance by which the window should be eroded for the border correction. } \item{use.gam}{ Logical flag; if \code{TRUE} then computations are performed using \code{gam} instead of \code{\link{glm}}. } \item{method}{ The method used to fit the model. Options are \code{"mpl"} for the method of Maximum PseudoLikelihood, \code{"logi"} for the Logistic Likelihood method, \code{"VBlogi"} for the Variational Bayes Logistic Likelihood method, and \code{"ho"} for the Huang-Ogata approximate maximum likelihood method. } \item{forcefit}{ Logical flag for internal use. If \code{forcefit=FALSE}, some trivial models will be fitted by a shortcut. If \code{forcefit=TRUE}, the generic fitting method will always be used. } \item{emend,project}{ (These are equivalent: \code{project} is an older name for \code{emend}.) Logical value. Setting \code{emend=TRUE} will ensure that the fitted model is always a valid point process by applying \code{\link{emend.ppm}}. } \item{prior.mean}{ Optional vector of prior means for canonical parameters (for \code{method="VBlogi"}). See Details. } \item{prior.var}{ Optional prior variance covariance matrix for canonical parameters (for \code{method="VBlogi"}). See Details. } \item{nd}{ Optional. Integer or pair of integers. The dimension of the grid of dummy points (\code{nd * nd} or \code{nd[1] * nd[2]}) used to evaluate the integral in the pseudolikelihood. Incompatible with \code{eps}. } \item{eps}{ Optional. A positive number, or a vector of two positive numbers, giving the horizontal and vertical spacing, respectively, of the grid of dummy points. Incompatible with \code{nd}. } \item{gcontrol}{ Optional. List of parameters passed to \code{\link{glm.control}} (or passed to \code{\link{gam.control}} if \code{use.gam=TRUE}) controlling the model-fitting algorithm. } \item{nsim}{ Number of simulated realisations to generate (for \code{method="ho"}) } \item{nrmh}{ Number of Metropolis-Hastings iterations for each simulated realisation (for \code{method="ho"}) } \item{start,control}{ Arguments passed to \code{\link{rmh}} controlling the behaviour of the Metropolis-Hastings algorithm (for \code{method="ho"}) } \item{verb}{ Logical flag indicating whether to print progress reports (for \code{method="ho"}) } \item{callstring}{ Internal use only. } } \value{ An object of class \code{"ppm"} describing a fitted point process model. See \code{\link{ppm.object}} for details of the format of this object and methods available for manipulating it. } \details{ \bold{NOTE:} This help page describes the \bold{old syntax} of the function \code{ppm}, described in many older documents. This old syntax is still supported. However, if you are learning about \code{ppm} for the first time, we recommend you use the \bold{new syntax} described in the help file for \code{\link{ppm}}. This function fits a point process model to an observed point pattern. The model may include spatial trend, interpoint interaction, and dependence on covariates. \describe{ \item{basic use:}{ In basic use, \code{Q} is a point pattern dataset (an object of class \code{"ppp"}) to which we wish to fit a model. The syntax of \code{ppm()} is closely analogous to the \R functions \code{\link{glm}} and \code{gam}. The analogy is: \tabular{ll}{ \bold{glm} \tab \bold{ppm} \cr \code{formula} \tab \code{trend} \cr \code{family} \tab \code{interaction} } The point process model to be fitted is specified by the arguments \code{trend} and \code{interaction} which are respectively analogous to the \code{formula} and \code{family} arguments of glm(). Systematic effects (spatial trend and/or dependence on spatial covariates) are specified by the argument \code{trend}. This is an \R formula object, which may be expressed in terms of the Cartesian coordinates \code{x}, \code{y}, the marks \code{marks}, or the variables in \code{covariates} (if supplied), or both. It specifies the \bold{logarithm} of the first order potential of the process. The formula should not use any names beginning with \code{.mpl} as these are reserved for internal use. If \code{trend} is absent or equal to the default, \code{~1}, then the model to be fitted is stationary (or at least, its first order potential is constant). The symbol \code{.} in the trend expression stands for all the covariates supplied in the argument \code{data}. For example the formula \code{~ .} indicates an additive model with a main effect for each covariate in \code{data}. Stochastic interactions between random points of the point process are defined by the argument \code{interaction}. This is an object of class \code{"interact"} which is initialised in a very similar way to the usage of family objects in \code{\link{glm}} and \code{gam}. The models currently available are: \GibbsInteractionsList. See the examples below. It is also possible to combine several interactions using \code{\link{Hybrid}}. If \code{interaction} is missing or \code{NULL}, then the model to be fitted has no interpoint interactions, that is, it is a Poisson process (stationary or nonstationary according to \code{trend}). In this case the methods of maximum pseudolikelihood and maximum logistic likelihood coincide with maximum likelihood. The fitted point process model returned by this function can be printed (by the print method \code{\link{print.ppm}}) to inspect the fitted parameter values. If a nonparametric spatial trend was fitted, this can be extracted using the predict method \code{\link{predict.ppm}}. } \item{Models with covariates:}{ To fit a model involving spatial covariates other than the Cartesian coordinates \eqn{x} and \eqn{y}, the values of the covariates should be supplied in the argument \code{covariates}. Note that it is not sufficient to have observed the covariate only at the points of the data point pattern; the covariate must also have been observed at other locations in the window. Typically the argument \code{covariates} is a list, with names corresponding to variables in the \code{trend} formula. Each entry in the list is either \describe{ \item{a pixel image,}{ giving the values of a spatial covariate at a fine grid of locations. It should be an object of class \code{"im"}, see \code{\link{im.object}}. } \item{a function,}{ which can be evaluated at any location \code{(x,y)} to obtain the value of the spatial covariate. It should be a \code{function(x, y)} or \code{function(x, y, ...)} in the \R language. The first two arguments of the function should be the Cartesian coordinates \eqn{x} and \eqn{y}. The function may have additional arguments; if the function does not have default values for these additional arguments, then the user must supply values for them, in \code{covfunargs}. See the Examples. } \item{a window,}{ interpreted as a logical variable which is \code{TRUE} inside the window and \code{FALSE} outside it. This should be an object of class \code{"owin"}. } \item{a tessellation,}{ interpreted as a factor covariate. For each spatial location, the factor value indicates which tile of the tessellation it belongs to. This should be an object of class \code{"tess"}. } \item{a single number,}{indicating a covariate that is constant in this dataset. } } The software will look up the values of each covariate at the required locations (quadrature points). Note that, for covariate functions, only the \emph{name} of the function appears in the trend formula. A covariate function is treated as if it were a single variable. The function arguments do not appear in the trend formula. See the Examples. If \code{covariates} is a list, the list entries should have names corresponding to the names of covariates in the model formula \code{trend}. The variable names \code{x}, \code{y} and \code{marks} are reserved for the Cartesian coordinates and the mark values, and these should not be used for variables in \code{covariates}. If \code{covariates} is a data frame, \code{Q} must be a quadrature scheme (see under Quadrature Schemes below). Then \code{covariates} must have as many rows as there are points in \code{Q}. The \eqn{i}th row of \code{covariates} should contain the values of spatial variables which have been observed at the \eqn{i}th point of \code{Q}. } \item{Quadrature schemes:}{ In advanced use, \code{Q} may be a `quadrature scheme'. This was originally just a technicality but it has turned out to have practical uses, as we explain below. Quadrature schemes are required for our implementation of the method of maximum pseudolikelihood. The definition of the pseudolikelihood involves an integral over the spatial window containing the data. In practice this integral must be approximated by a finite sum over a set of quadrature points. We use the technique of Baddeley and Turner (2000), a generalisation of the Berman-Turner (1992) device. In this technique the quadrature points for the numerical approximation include all the data points (points of the observed point pattern) as well as additional `dummy' points. Quadrature schemes are also required for the method of maximum logistic likelihood, which combines the data points with additional `dummy' points. A quadrature scheme is an object of class \code{"quad"} (see \code{\link{quad.object}}) which specifies both the data point pattern and the dummy points for the quadrature scheme, as well as the quadrature weights associated with these points. If \code{Q} is simply a point pattern (of class \code{"ppp"}, see \code{\link{ppp.object}}) then it is interpreted as specifying the data points only; a set of dummy points specified by \code{\link{default.dummy}()} is added, and the default weighting rule is invoked to compute the quadrature weights. Finer quadrature schemes (i.e. those with more dummy points) generally yield a better approximation, at the expense of higher computational load. An easy way to fit models using a finer quadrature scheme is to let \code{Q} be the original point pattern data, and use the argument \code{nd} to determine the number of dummy points in the quadrature scheme. Complete control over the quadrature scheme is possible. See \code{\link{quadscheme}} for an overview. Use \code{quadscheme(X, D, method="dirichlet")} to compute quadrature weights based on the Dirichlet tessellation, or \code{quadscheme(X, D, method="grid")} to compute quadrature weights by counting points in grid squares, where \code{X} and \code{D} are the patterns of data points and dummy points respectively. Alternatively use \code{\link{pixelquad}} to make a quadrature scheme with a dummy point at every pixel in a pixel image. A practical advantage of quadrature schemes arises when we want to fit a model involving covariates (e.g. soil pH). Suppose we have only been able to observe the covariates at a small number of locations. Suppose \code{cov.dat} is a data frame containing the values of the covariates at the data points (i.e.\ \code{cov.dat[i,]} contains the observations for the \code{i}th data point) and \code{cov.dum} is another data frame (with the same columns as \code{cov.dat}) containing the covariate values at another set of points whose locations are given by the point pattern \code{Y}. Then setting \code{Q = quadscheme(X,Y)} combines the data points and dummy points into a quadrature scheme, and \code{covariates = rbind(cov.dat, cov.dum)} combines the covariate data frames. We can then fit the model by calling \code{ppm(Q, ..., covariates)}. } \item{Model-fitting technique:}{ There are several choices for the technique used to fit the model. \describe{ \item{method="mpl"}{ (the default): the model will be fitted by maximising the pseudolikelihood (Besag, 1975) using the Berman-Turner computational approximation (Berman and Turner, 1992; Baddeley and Turner, 2000). Maximum pseudolikelihood is equivalent to maximum likelihood if the model is a Poisson process. Maximum pseudolikelihood is biased if the interpoint interaction is very strong, unless there is a large number of dummy points. The default settings for \code{method='mpl'} specify a moderately large number of dummy points, striking a compromise between speed and accuracy. } \item{method="logi":}{ the model will be fitted by maximising the logistic likelihood (Baddeley et al, 2014). This technique is roughly equivalent in speed to maximum pseudolikelihood, but is believed to be less biased. Because it is less biased, the default settings for \code{method='logi'} specify a relatively small number of dummy points, so that this method is the fastest, in practice. } \item{method="VBlogi":}{ the model will be fitted in a Bayesian setup by maximising the posterior probability density for the canonical model parameters. This uses the variational Bayes approximation to the posterior derived from the logistic likelihood as described in Rajala (2014). The prior is assumed to be multivariate Gaussian with mean vector \code{prior.mean} and variance-covariance matrix \code{prior.var}. } \item{method="ho":}{ the model will be fitted by applying the approximate maximum likelihood method of Huang and Ogata (1999). See below. The Huang-Ogata method is slower than the other options, but has better statistical properties. } } Note that \code{method='logi'}, \code{method='VBlogi'} and \code{method='ho'} involve randomisation, so that the results are subject to random variation. } \item{Huang-Ogata method:}{ If \code{method="ho"} then the model will be fitted using the Huang-Ogata (1999) approximate maximum likelihood method. First the model is fitted by maximum pseudolikelihood as described above, yielding an initial estimate of the parameter vector \eqn{\theta_0}{theta0}. From this initial model, \code{nsim} simulated realisations are generated. The score and Fisher information of the model at \eqn{\theta=\theta_0}{theta=theta0} are estimated from the simulated realisations. Then one step of the Fisher scoring algorithm is taken, yielding an updated estimate \eqn{\theta_1}{theta1}. The corresponding model is returned. Simulated realisations are generated using \code{\link{rmh}}. The iterative behaviour of the Metropolis-Hastings algorithm is controlled by the arguments \code{start} and \code{control} which are passed to \code{\link{rmh}}. As a shortcut, the argument \code{nrmh} determines the number of Metropolis-Hastings iterations run to produce one simulated realisation (if \code{control} is absent). Also if \code{start} is absent or equal to \code{NULL}, it defaults to \code{list(n.start=N)} where \code{N} is the number of points in the data point pattern. } \item{Edge correction}{ Edge correction should be applied to the sufficient statistics of the model, to reduce bias. The argument \code{correction} is the name of an edge correction method. The default \code{correction="border"} specifies the border correction, in which the quadrature window (the domain of integration of the pseudolikelihood) is obtained by trimming off a margin of width \code{rbord} from the observation window of the data pattern. Not all edge corrections are implemented (or implementable) for arbitrary windows. Other options depend on the argument \code{interaction}, but these generally include \code{correction="periodic"} (the periodic or toroidal edge correction in which opposite edges of a rectangular window are identified) and \code{correction="translate"} (the translation correction, see Baddeley 1998 and Baddeley and Turner 2000). For pairwise interaction models there is also Ripley's isotropic correction, identified by \code{correction="isotropic"} or \code{"Ripley"}. } \item{Subsetting}{ The arguments \code{subset} and \code{clipwin} specify that the model should be fitted to a restricted subset of the available data. These arguments are equivalent for Poisson point process models, but different for Gibbs models. If \code{clipwin} is specified, then all the available data will be restricted to this spatial region, and data outside this region will be discarded, before the model is fitted. If \code{subset} is specified, then no data are deleted, but the domain of integration of the likelihood or pseudolikelihood is restricted to the \code{subset}. For Poisson models, these two arguments have the same effect; but for a Gibbs model, interactions between points inside and outside the \code{subset} are taken into account, while interactions between points inside and outside the \code{clipwin} are ignored. } } } \section{Interaction parameters}{ Apart from the Poisson model, every point process model fitted by \code{ppm} has parameters that determine the strength and range of \sQuote{interaction} or dependence between points. These parameters are of two types: \describe{ \item{regular parameters:}{ A parameter \eqn{\phi}{phi} is called \emph{regular} if the log likelihood is a linear function of \eqn{\theta}{theta} where \eqn{\theta = \theta(\psi)}{theta = theta(psi)} is some transformation of \eqn{\psi}{psi}. [Then \eqn{\theta}{theta} is called the canonical parameter.] } \item{irregular parameters}{ Other parameters are called \emph{irregular}. } } Typically, regular parameters determine the \sQuote{strength} of the interaction, while irregular parameters determine the \sQuote{range} of the interaction. For example, the Strauss process has a regular parameter \eqn{\gamma}{gamma} controlling the strength of interpoint inhibition, and an irregular parameter \eqn{r} determining the range of interaction. The \code{ppm} command is only designed to estimate regular parameters of the interaction. It requires the values of any irregular parameters of the interaction to be fixed. For example, to fit a Strauss process model to the \code{cells} dataset, you could type \code{ppm(cells, ~1, Strauss(r=0.07))}. Note that the value of the irregular parameter \code{r} must be given. The result of this command will be a fitted model in which the regular parameter \eqn{\gamma}{gamma} has been estimated. To determine the irregular parameters, there are several practical techniques, but no general statistical theory available. Useful techniques include maximum profile pseudolikelihood, which is implemented in the command \code{\link{profilepl}}, and Newton-Raphson maximisation, implemented in the experimental command \code{\link{ippm}}. Some irregular parameters can be estimated directly from data: the hard-core radius in the model \code{\link{Hardcore}} and the matrix of hard-core radii in \code{\link{MultiHard}} can be estimated easily from data. In these cases, \code{ppm} allows the user to specify the interaction without giving the value of the irregular parameter. The user can give the hard core interaction as \code{interaction=Hardcore()} or even \code{interaction=Hardcore}, and the hard core radius will then be estimated from the data. } \references{ Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2014) Logistic regression for spatial Gibbs point processes. \emph{Biometrika} \bold{101} (2) 377--392. Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Berman, M. and Turner, T.R. Approximating point process likelihoods with GLIM. \emph{Applied Statistics} \bold{41} (1992) 31--38. Besag, J. Statistical analysis of non-lattice data. \emph{The Statistician} \bold{24} (1975) 179-195. Diggle, P.J., Fiksel, T., Grabarnik, P., Ogata, Y., Stoyan, D. and Tanemura, M. On parameter estimation for pairwise interaction processes. \emph{International Statistical Review} \bold{62} (1994) 99-117. Huang, F. and Ogata, Y. Improvements of the maximum pseudo-likelihood estimators in various spatial statistical models. \emph{Journal of Computational and Graphical Statistics} \bold{8} (1999) 510-530. Jensen, J.L. and Moeller, M. Pseudolikelihood for exponential family models of spatial point processes. \emph{Annals of Applied Probability} \bold{1} (1991) 445--461. Jensen, J.L. and Kuensch, H.R. On asymptotic normality of pseudo likelihood estimates for pairwise interaction processes, \emph{Annals of the Institute of Statistical Mathematics} \bold{46} (1994) 475-486. Rajala T. (2014) \emph{A note on Bayesian logistic regression for spatial exponential family Gibbs point processes}, Preprint on ArXiv.org. \url{http://arxiv.org/abs/1411.0539} } \seealso{ \code{\link{ppm.object}} for details of how to print, plot and manipulate a fitted model. \code{\link{ppp}} and \code{\link{quadscheme}} for constructing data. Interactions: \GibbsInteractionsList. See \code{\link{profilepl}} for advice on fitting nuisance parameters in the interaction, and \code{\link{ippm}} for irregular parameters in the trend. See \code{\link{valid.ppm}} and \code{\link{emend.ppm}} for ensuring the fitted model is a valid point process. } \section{Error and Warning Messages}{ Some common error messages and warning messages are listed below, with explanations. \describe{ \item{\dQuote{System is computationally singular}}{ The Fisher information matrix of the fitted model has a determinant close to zero, so that the matrix cannot be inverted, and the software cannot calculate standard errors or confidence intervals. This error is usually reported when the model is printed, because the \code{print} method calculates standard errors for the fitted parameters. Singularity usually occurs because the spatial coordinates in the original data were very large numbers (e.g. expressed in metres) so that the fitted coefficients were very small numbers. The simple remedy is to \bold{rescale the data}, for example, to convert from metres to kilometres by \code{X <- \link{rescale}(X, 1000)}, then re-fit the model. Singularity can also occur if the covariate values are very large numbers, or if the covariates are approximately collinear. } \item{\dQuote{Covariate values were NA or undefined at X\% (M out of N) of the quadrature points}}{ The covariate data (typically a pixel image) did not provide values of the covariate at some of the spatial locations in the observation window of the point pattern. This means that the spatial domain of the pixel image does not completely cover the observation window of the point pattern. If the percentage is small, this warning can be ignored - typically it happens because of rounding effects which cause the pixel image to be one-pixel-width narrower than the observation window. However if more than a few percent of covariate values are undefined, it would be prudent to check that the pixel images are correct, and are correctly registered in their spatial relation to the observation window. } \item{\dQuote{Model is unidentifiable}}{ It is not possible to estimate all the model parameters from this dataset. The error message gives a further explanation, such as \dQuote{data pattern is empty}. Choose a simpler model, or check the data. } \item{\dQuote{N data points are illegal (zero conditional intensity)}}{ In a Gibbs model (i.e. with interaction between points), the conditional intensity may be zero at some spatial locations, indicating that the model forbids the presence of a point at these locations. However if the conditional intensity is zero \emph{at a data point}, this means that the model is inconsistent with the data. Modify the interaction parameters so that the data point is not illegal (e.g. reduce the value of the hard core radius) or choose a different interaction. } } } \section{Warnings}{ The implementation of the Huang-Ogata method is experimental; several bugs were fixed in \pkg{spatstat} 1.19-0. See the comments above about the possible inefficiency and bias of the maximum pseudolikelihood estimator. The accuracy of the Berman-Turner approximation to the pseudolikelihood depends on the number of dummy points used in the quadrature scheme. The number of dummy points should at least equal the number of data points. The parameter values of the fitted model do not necessarily determine a valid point process. Some of the point process models are only defined when the parameter values lie in a certain subset. For example the Strauss process only exists when the interaction parameter \eqn{\gamma}{gamma} is less than or equal to \eqn{1}, corresponding to a value of \code{ppm()$theta[2]} less than or equal to \code{0}. By default (if \code{emend=FALSE}) the algorithm maximises the pseudolikelihood without constraining the parameters, and does not apply any checks for sanity after fitting the model. This is because the fitted parameter value could be useful information for data analysis. To constrain the parameters to ensure that the model is a valid point process, set \code{emend=TRUE}. See also the functions \code{\link{valid.ppm}} and \code{\link{emend.ppm}}. The \code{trend} formula should not use any variable names beginning with the prefixes \code{.mpl} or \code{Interaction} as these names are reserved for internal use. The data frame \code{covariates} should have as many rows as there are points in \code{Q}. It should not contain variables called \code{x}, \code{y} or \code{marks} as these names are reserved for the Cartesian coordinates and the marks. If the model formula involves one of the functions \code{poly()}, \code{bs()} or \code{ns()} (e.g. applied to spatial coordinates \code{x} and \code{y}), the fitted coefficients can be misleading. The resulting fit is not to the raw spatial variates (\code{x}, \code{x^2}, \code{x*y}, etc.) but to a transformation of these variates. The transformation is implemented by \code{poly()} in order to achieve better numerical stability. However the resulting coefficients are appropriate for use with the transformed variates, not with the raw variates. This affects the interpretation of the constant term in the fitted model, \code{logbeta}. Conventionally, \eqn{\beta}{beta} is the background intensity, i.e. the value taken by the conditional intensity function when all predictors (including spatial or ``trend'' predictors) are set equal to \eqn{0}. However the coefficient actually produced is the value that the log conditional intensity takes when all the predictors, including the \emph{transformed} spatial predictors, are set equal to \code{0}, which is not the same thing. Worse still, the result of \code{\link{predict.ppm}} can be completely wrong if the trend formula contains one of the functions \code{poly()}, \code{bs()} or \code{ns()}. This is a weakness of the underlying function \code{\link{predict.glm}}. If you wish to fit a polynomial trend, we offer an alternative to \code{\link{poly}()}, namely \code{polynom()}, which avoids the difficulty induced by transformations. It is completely analogous to \code{poly} except that it does not orthonormalise. The resulting coefficient estimates then have their natural interpretation and can be predicted correctly. Numerical stability may be compromised. Values of the maximised pseudolikelihood are not comparable if they have been obtained with different values of \code{rbord}. } \examples{ # fit the stationary Poisson process # to point pattern 'nztrees' ppm(nztrees) ppm(nztrees ~ 1) \dontrun{ Q <- quadscheme(nztrees) ppm(Q) # equivalent. } \dontrun{ ppm(nztrees, nd=128) } \testonly{ ppm(nztrees, nd=16) } fit1 <- ppm(nztrees, ~ x) # fit the nonstationary Poisson process # with intensity function lambda(x,y) = exp(a + bx) # where x,y are the Cartesian coordinates # and a,b are parameters to be estimated fit1 coef(fit1) coef(summary(fit1)) \dontrun{ ppm(nztrees, ~ polynom(x,2)) } \testonly{ ppm(nztrees, ~ polynom(x,2), nd=16) } # fit the nonstationary Poisson process # with intensity function lambda(x,y) = exp(a + bx + cx^2) \dontrun{ library(splines) ppm(nztrees, ~ bs(x,df=3)) } # WARNING: do not use predict.ppm() on this result # Fits the nonstationary Poisson process # with intensity function lambda(x,y) = exp(B(x)) # where B is a B-spline with df = 3 \dontrun{ ppm(nztrees, ~1, Strauss(r=10), rbord=10) } \testonly{ ppm(nztrees, ~1, Strauss(r=10), rbord=10, nd=16) } # Fit the stationary Strauss process with interaction range r=10 # using the border method with margin rbord=10 \dontrun{ ppm(nztrees, ~ x, Strauss(13), correction="periodic") } \testonly{ ppm(nztrees, ~ x, Strauss(13), correction="periodic", nd=16) } # Fit the nonstationary Strauss process with interaction range r=13 # and exp(first order potential) = activity = beta(x,y) = exp(a+bx) # using the periodic correction. # Compare Maximum Pseudolikelihood, Huang-Ogata and VB fits: \dontrun{ppm(swedishpines, ~1, Strauss(9))} \dontrun{ppm(swedishpines, ~1, Strauss(9), method="ho")} \testonly{ppm(swedishpines, ~1, Strauss(9), method="ho", nd=16, nsim=8)} ppm(swedishpines, ~1, Strauss(9), method="VBlogi") # COVARIATES # X <- rpoispp(42) weirdfunction <- function(x,y){ 10 * x^2 + 5 * sin(10 * y) } # # (a) covariate values as function ppm(X, ~ y + Z, covariates=list(Z=weirdfunction)) # # (b) covariate values in pixel image Zimage <- as.im(weirdfunction, unit.square()) ppm(X, ~ y + Z, covariates=list(Z=Zimage)) # # (c) covariate values in data frame Q <- quadscheme(X) xQ <- x.quad(Q) yQ <- y.quad(Q) Zvalues <- weirdfunction(xQ,yQ) ppm(Q, ~ y + Z, covariates=data.frame(Z=Zvalues)) # Note Q not X # COVARIATE FUNCTION WITH EXTRA ARGUMENTS # f <- function(x,y,a){ y - a } ppm(X, ~x + f, covariates=list(f=f), covfunargs=list(a=1/2)) # COVARIATE: inside/outside window b <- owin(c(0.1, 0.6), c(0.1, 0.9)) ppm(X, ~w, covariates=list(w=b)) ## MULTITYPE POINT PROCESSES ### # fit stationary marked Poisson process # with different intensity for each species \dontrun{ppm(lansing, ~ marks, Poisson())} \testonly{ ama <- amacrine[square(0.7)] a <- ppm(ama, ~ marks, Poisson(), nd=16) } # fit nonstationary marked Poisson process # with different log-cubic trend for each species \dontrun{ppm(lansing, ~ marks * polynom(x,y,3), Poisson())} \testonly{b <- ppm(ama, ~ marks * polynom(x,y,2), Poisson(), nd=16)} } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat/man/Concom.Rd0000644000176200001440000001327413571674202014413 0ustar liggesusers\name{Concom} \alias{Concom} \title{The Connected Component Process Model} \description{ Creates an instance of the Connected Component point process model which can then be fitted to point pattern data. } \usage{ Concom(r) } \arguments{ \item{r}{Threshold distance} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the connected component process with disc radius \eqn{r}. } \details{ This function defines the interpoint interaction structure of a point process called the connected component process. It can be used to fit this model to point pattern data. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the connected component interaction is yielded by the function \code{Concom()}. See the examples below. In \bold{standard form}, the connected component process (Baddeley and \Moller, 1989) with disc radius \eqn{r}, intensity parameter \eqn{\kappa}{\kappa} and interaction parameter \eqn{\gamma}{\gamma} is a point process with probability density \deqn{ f(x_1,\ldots,x_n) = \alpha \kappa^{n(x)} \gamma^{-C(x)} }{ f(x[1],\ldots,x[n]) = \alpha . \kappa^n(x) . \gamma^(-C(x)) } for a point pattern \eqn{x}, where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, and \eqn{C(x)} is defined below. Here \eqn{\alpha}{\alpha} is a normalising constant. To define the term \code{C(x)}, suppose that we construct a planar graph by drawing an edge between each pair of points \eqn{x_i,x_j}{x[i],x[j]} which are less than \eqn{r} units apart. Two points belong to the same connected component of this graph if they are joined by a path in the graph. Then \eqn{C(x)} is the number of connected components of the graph. The interaction parameter \eqn{\gamma}{\gamma} can be any positive number. If \eqn{\gamma = 1}{\gamma = 1} then the model reduces to a Poisson process with intensity \eqn{\kappa}{\kappa}. If \eqn{\gamma < 1}{\gamma < 1} then the process is regular, while if \eqn{\gamma > 1}{\gamma > 1} the process is clustered. Thus, a connected-component interaction process can be used to model either clustered or regular point patterns. In \pkg{spatstat}, the model is parametrised in a different form, which is easier to interpret. In \bold{canonical form}, the probability density is rewritten as \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{-U(x)} }{ f(x_1,\ldots,x_n) = \alpha . \beta^n(x) \gamma^(-U(x)) } where \eqn{\beta}{\beta} is the new intensity parameter and \eqn{U(x) = C(x) - n(x)} is the interaction potential. In this formulation, each isolated point of the pattern contributes a factor \eqn{\beta}{\beta} to the probability density (so the first order trend is \eqn{\beta}{\beta}). The quantity \eqn{U(x)} is a true interaction potential, in the sense that \eqn{U(x) = 0} if the point pattern \eqn{x} does not contain any points that lie close together. When a new point \eqn{u} is added to an existing point pattern \eqn{x}, the rescaled potential \eqn{-U(x)} increases by zero or a positive integer. The increase is zero if \eqn{u} is not close to any point of \eqn{x}. The increase is a positive integer \eqn{k} if there are \eqn{k} different connected components of \eqn{x} that lie close to \eqn{u}. Addition of the point \eqn{u} contributes a factor \eqn{\beta \eta^\delta}{\beta * \eta^\delta} to the probability density, where \eqn{\delta}{\delta} is the increase in potential. If desired, the original parameter \eqn{\kappa}{\kappa} can be recovered from the canonical parameter by \eqn{\kappa = \beta\gamma}{\kappa = \beta * \gamma}. The \emph{nonstationary} connected component process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{\beta(x[i])} of location, rather than a constant beta. Note the only argument of \code{Concom()} is the threshold distance \code{r}. When \code{r} is fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(\beta)} and \eqn{\log(\gamma)}{log(\gamma)} are estimated by \code{\link{ppm}()}, not fixed in \code{Concom()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \section{Edge correction}{ The interaction distance of this process is infinite. There are no well-established procedures for edge correction for fitting such models, and accordingly the model-fitting function \code{\link{ppm}} will give an error message saying that the user must specify an edge correction. A reasonable solution is to use the border correction at the same distance \code{r}, as shown in the Examples. } \examples{ # prints a sensible description of itself Concom(r=0.1) # Fit the stationary connected component process to redwood data ppm(redwood, ~1, Concom(r=0.07), rbord=0.07) # Fit the stationary connected component process to `cells' data ppm(cells, ~1, Concom(r=0.06), rbord=0.06) # eta=0 indicates hard core process. # Fit a nonstationary connected component model # with log-cubic polynomial trend \dontrun{ ppm(swedishpines, ~polynom(x/10,y/10,3), Concom(r=7), rbord=7) } } \references{ Baddeley, A.J. and \Moller, J. (1989) Nearest-neighbour Markov point processes and random sets. \emph{International Statistical Review} \bold{57}, 89--121. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat/man/polynom.Rd0000644000176200001440000000324713333543264014670 0ustar liggesusers\name{polynom} \alias{polynom} \title{ Polynomial in One or Two Variables } \description{ This function is used to represent a polynomial term in a model formula. It computes the homogeneous terms in the polynomial of degree \code{n} in one variable \code{x} or two variables \code{x,y}. } \usage{ polynom(x, \dots) } \arguments{ \item{x}{ A numerical vector. } \item{\dots}{ Either a single integer \code{n} specifying the degree of the polynomial, or two arguments \code{y,n} giving another vector of data \code{y} and the degree of the polynomial. } } \details{ This function is typically used inside a model formula in order to specify the most general possible polynomial of order \code{n} involving one numerical variable \code{x} or two numerical variables \code{x,y}. It is equivalent to \code{\link[stats]{poly}(, raw=TRUE)}. If only one numerical vector argument \code{x} is given, the function computes the vectors \code{x^k} for \code{k = 1, 2, \dots, n}. These vectors are combined into a matrix with \code{n} columns. If two numerical vector arguments \code{x,y} are given, the function computes the vectors \code{x^k * y^m} for \code{k >= 0} and \code{m >= 0} satisfying \code{0 < k + m <= n}. These vectors are combined into a matrix with one column for each homogeneous term. } \value{ A numeric matrix, with rows corresponding to the entries of \code{x}, and columns corresponding to the terms in the polynomial. } \author{ \spatstatAuthors. } \seealso{ \code{\link[stats]{poly}}, \code{\link{harmonic}} } \examples{ x <- 1:4 y <- 10 * (0:3) polynom(x, 3) polynom(x, y, 3) } \keyword{arith} spatstat/man/SatPiece.Rd0000644000176200001440000001076613333543262014672 0ustar liggesusers\name{SatPiece} \alias{SatPiece} \title{Piecewise Constant Saturated Pairwise Interaction Point Process Model} \description{ Creates an instance of a saturated pairwise interaction point process model with piecewise constant potential function. The model can then be fitted to point pattern data. } \usage{ SatPiece(r, sat) } \arguments{ \item{r}{vector of jump points for the potential function} \item{sat}{ vector of saturation values, or a single saturation value } } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ This is a generalisation of the Geyer saturation point process model, described in \code{\link{Geyer}}, to the case of multiple interaction distances. It can also be described as the saturated analogue of a pairwise interaction process with piecewise-constant pair potential, described in \code{\link{PairPiece}}. The saturated point process with interaction radii \eqn{r_1,\ldots,r_k}{r[1], ..., r[k]}, saturation thresholds \eqn{s_1,\ldots,s_k}{s[1],...,s[k]}, intensity parameter \eqn{\beta}{beta} and interaction parameters \eqn{\gamma_1,\ldots,gamma_k}{gamma[1], ..., gamma[k]}, is the point process in which each point \eqn{x_i}{x[i]} in the pattern \eqn{X} contributes a factor \deqn{ \beta \gamma_1^{v_1(x_i, X)} \ldots gamma_k^{v_k(x_i,X)} }{ beta gamma[1]^v(1, x_i, X) ... gamma[k]^v(k, x_i, X) } to the probability density of the point pattern, where \deqn{ v_j(x_i, X) = \min( s_j, t_j(x_i,X) ) }{ v(j, x_i, X) = min(s[j], t(j, x_i, X)) } where \eqn{t_j(x_i, X)}{t(j,x[i],X)} denotes the number of points in the pattern \eqn{X} which lie at a distance between \eqn{r_{j-1}}{r[j-1]} and \eqn{r_j}{r[j]} from the point \eqn{x_i}{x[i]}. We take \eqn{r_0 = 0}{r[0] = 0} so that \eqn{t_1(x_i,X)}{t(1, x[i], X)} is the number of points of \eqn{X} that lie within a distance \eqn{r_1}{r[1]} of the point \eqn{x_i}{x[i]}. \code{SatPiece} is used to fit this model to data. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the piecewise constant Saturated pairwise interaction is yielded by the function \code{SatPiece()}. See the examples below. Simulation of this point process model is not yet implemented. This model is not locally stable (the conditional intensity is unbounded). The argument \code{r} specifies the vector of interaction distances. The entries of \code{r} must be strictly increasing, positive numbers. The argument \code{sat} specifies the vector of saturation parameters. It should be a vector of the same length as \code{r}, and its entries should be nonnegative numbers. Thus \code{sat[1]} corresponds to the distance range from \code{0} to \code{r[1]}, and \code{sat[2]} to the distance range from \code{r[1]} to \code{r[2]}, etc. Alternatively \code{sat} may be a single number, and this saturation value will be applied to every distance range. Infinite values of the saturation parameters are also permitted; in this case \eqn{v_j(x_i,X) = t_j(x_i,X)}{v(j, x_i, X) = t(j, x_i, X)} and there is effectively no `saturation' for the distance range in question. If all the saturation parameters are set to \code{Inf} then the model is effectively a pairwise interaction process, equivalent to \code{\link{PairPiece}} (however the interaction parameters \eqn{\gamma}{gamma} obtained from \code{\link{SatPiece}} are the square roots of the parameters \eqn{\gamma}{gamma} obtained from \code{\link{PairPiece}}). If \code{r} is a single number, this model is virtually equivalent to the Geyer process, see \code{\link{Geyer}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairsat.family}}, \code{\link{Geyer}}, \code{\link{PairPiece}}, \code{\link{BadGey}}. } \examples{ SatPiece(c(0.1,0.2), c(1,1)) # prints a sensible description of itself SatPiece(c(0.1,0.2), 1) data(cells) ppm(cells, ~1, SatPiece(c(0.07, 0.1, 0.13), 2)) # fit a stationary piecewise constant Saturated pairwise interaction process \dontrun{ ppm(cells, ~polynom(x,y,3), SatPiece(c(0.07, 0.1, 0.13), 2)) # nonstationary process with log-cubic polynomial trend } } \author{\adrian and \rolf in collaboration with Hao Wang and Jeff Picka } \keyword{spatial} \keyword{models} spatstat/man/pcfdot.inhom.Rd0000644000176200001440000001144513333543264015562 0ustar liggesusers\name{pcfdot.inhom} \alias{pcfdot.inhom} \title{ Inhomogeneous Multitype Pair Correlation Function (Type-i-To-Any-Type) } \description{ Estimates the inhomogeneous multitype pair correlation function (from type \eqn{i} to any type) for a multitype point pattern. } \usage{ pcfdot.inhom(X, i, lambdaI = NULL, lambdadot = NULL, ..., r = NULL, breaks = NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), sigma = NULL, varcov = NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous multitype pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the estimated intensity function of the points of type \code{i}. Either a vector giving the intensity values at the points of type \code{i}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdadot}{ Optional. Values of the estimated intensity function of the point pattern \code{X}. A numeric vector, pixel image or \code{function(x,y)}. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. There is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{\dots}{ Other arguments passed to the kernel density estimation function \code{\link{density.default}}. } \item{stoyan}{ Bandwidth coefficient; see Details. } \item{correction}{ Choice of edge correction. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambdaI} or \code{lambdadot} is estimated by kernel smoothing. } } \details{ The inhomogeneous multitype (type \eqn{i} to any type) pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} is a summary of the dependence between different types of points in a multitype spatial point process that does not have a uniform density of points. The best intuitive interpretation is the following: the probability \eqn{p(r)} of finding a point of type \eqn{i} at location \eqn{x} and another point of any type at location \eqn{y}, where \eqn{x} and \eqn{y} are separated by a distance \eqn{r}, is equal to \deqn{ p(r) = \lambda_i(x) lambda(y) g(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda[i](x) * lambda(y) * g(r) dx dy } where \eqn{\lambda_i}{lambda[i]} is the intensity function of the process of points of type \eqn{i}, and where \eqn{\lambda}{lambda} is the intensity function of the points of all types. For a multitype Poisson point process, this probability is \eqn{p(r) = \lambda_i(x) \lambda(y)}{p(r) = lambda[i](x) * lambda(y)} so \eqn{g_{i\bullet}(r) = 1}{g[i.](r) = 1}. The command \code{pcfdot.inhom} estimates the inhomogeneous multitype pair correlation using a modified version of the algorithm in \code{\link{pcf.ppp}}. If the arguments \code{lambdaI} and \code{lambdadot} are missing or null, they are estimated from \code{X} by kernel smoothing using a leave-one-out estimator. } \value{ A function value table (object of class \code{"fv"}). Essentially a data frame containing the variables \item{r}{ the vector of values of the argument \eqn{r} at which the inhomogeneous multitype pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} has been estimated } \item{theo}{vector of values equal to 1, the theoretical value of \eqn{g_{i\bullet}(r)}{g[i.](r)} for the Poisson process } \item{trans}{vector of values of \eqn{g_{i\bullet}(r)}{g[i.](r)} estimated by translation correction } \item{iso}{vector of values of \eqn{g_{i\bullet}(r)}{g[i.](r)} estimated by Ripley isotropic correction } as required. } \seealso{ \code{\link{pcf.ppp}}, \code{\link{pcfinhom}}, \code{\link{pcfdot}}, \code{\link{pcfcross.inhom}} } \examples{ data(amacrine) plot(pcfdot.inhom(amacrine, "on", stoyan=0.1), legendpos="bottom") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/Saturated.Rd0000644000176200001440000000136513333543262015124 0ustar liggesusers\name{Saturated} \alias{Saturated} \title{Saturated Pairwise Interaction model} \description{ Experimental. } \usage{ Saturated(pot, name) } \arguments{ \item{pot}{An S language function giving the user-supplied pairwise interaction potential.} \item{name}{Character string.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ This is experimental. It constructs a member of the ``saturated pairwise'' family \code{\link{pairsat.family}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairsat.family}}, \code{\link{Geyer}}, \code{\link{SatPiece}}, \code{\link{ppm.object}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/dppspecdenrange.Rd0000644000176200001440000000111113333543263016320 0ustar liggesusers\name{dppspecdenrange} \alias{dppspecdenrange} \title{Range of Spectral Density of a Determinantal Point Process Model} \description{ Computes the range of the spectral density of a determinantal point process model. } \usage{dppspecdenrange(model)} \arguments{ \item{model}{Model of class \code{"detpointprocfamily"}.} } \value{Numeric value (possibly \code{Inf}).} \author{ \adrian \rolf and \ege } \examples{ m <- dppBessel(lambda=100, alpha=0.05, sigma=1, d=2) dppspecdenrange(m) } \seealso{ \code{\link{dppspecden}} } \keyword{spatial} \keyword{models} spatstat/man/Tstat.Rd0000644000176200001440000000562513333543262014272 0ustar liggesusers\name{Tstat} \alias{Tstat} \title{ Third order summary statistic } \description{ Computes the third order summary statistic \eqn{T(r)} of a spatial point pattern. } \usage{ Tstat(X, ..., r = NULL, rmax = NULL, correction = c("border", "translate"), ratio = FALSE, verbose=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{T(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{Ignored.} \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{T(r)} should be evaluated. Users are advised \emph{not} to specify this argument; there is a sensible default. } \item{rmax}{ Optional. Numeric. The maximum value of \eqn{r} for which \eqn{T(r)} should be estimated. } \item{correction}{ Optional. A character vector containing any selection of the options \code{"none"}, \code{"border"}, \code{"bord.modif"}, \code{"translate"}, \code{"translation"}, or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{verbose}{ Logical. If \code{TRUE}, an estimate of the computation time is printed. } } \details{ This command calculates the third-order summary statistic \eqn{T(r)} for a spatial point patterns, defined by Schladitz and Baddeley (2000). The definition of \eqn{T(r)} is similar to the definition of Ripley's \eqn{K} function \eqn{K(r)}, except that \eqn{K(r)} counts pairs of points while \eqn{T(r)} counts triples of points. Essentially \eqn{T(r)} is a rescaled cumulative distribution function of the diameters of triangles in the point pattern. The diameter of a triangle is the length of its longest side. } \section{Computation time}{ If the number of points is large, the algorithm can take a very long time to inspect all possible triangles. A rough estimate of the total computation time will be printed at the beginning of the calculation. If this estimate seems very large, stop the calculation using the user interrupt signal, and call \code{Tstat} again, using \code{rmax} to restrict the range of \code{r} values, thus reducing the number of triangles to be inspected. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \references{ Schladitz, K. and Baddeley, A. (2000) A third order point process characteristic. \emph{Scandinavian Journal of Statistics} \bold{27} (2000) 657--671. } \seealso{ \code{\link{Kest}} } \examples{ plot(Tstat(redwood)) } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/rmhcontrol.Rd0000644000176200001440000003345313553462425015367 0ustar liggesusers\name{rmhcontrol} \alias{rmhcontrol} \alias{rmhcontrol.default} \title{Set Control Parameters for Metropolis-Hastings Algorithm.} \description{ Sets up a list of parameters controlling the iterative behaviour of the Metropolis-Hastings algorithm. } \usage{ rmhcontrol(\dots) \method{rmhcontrol}{default}(\dots, p=0.9, q=0.5, nrep=5e5, expand=NULL, periodic=NULL, ptypes=NULL, x.cond=NULL, fixall=FALSE, nverb=0, nsave=NULL, nburn=nsave, track=FALSE, pstage=c("block", "start")) } \arguments{ \item{\dots}{Arguments passed to methods.} \item{p}{Probability of proposing a shift (as against a birth/death).} \item{q}{Conditional probability of proposing a death given that a birth or death will be proposed.} \item{nrep}{Total number of steps (proposals) of Metropolis-Hastings algorithm that should be run.} \item{expand}{ Simulation window or expansion rule. Either a window (object of class \code{"owin"}) or a numerical expansion factor, specifying that simulations are to be performed in a domain other than the original data window, then clipped to the original data window. This argument is passed to \code{\link{rmhexpand}}. A numerical expansion factor can be in several formats: see \code{\link{rmhexpand}}. } \item{periodic}{ Logical value (or \code{NULL}) indicating whether to simulate ``periodically'', i.e. identifying opposite edges of the rectangular simulation window. A \code{NULL} value means ``undecided.'' } \item{ptypes}{For multitype point processes, the distribution of the mark attached to a new random point (when a birth is proposed)} \item{x.cond}{Conditioning points for conditional simulation.} \item{fixall}{(Logical) for multitype point processes, whether to fix the number of points of each type.} \item{nverb}{Progress reports will be printed every \code{nverb} iterations} \item{nsave,nburn}{ If these values are specified, then intermediate states of the simulation algorithm will be saved every \code{nsave} iterations, after an initial burn-in period of \code{nburn} iterations. } \item{track}{ Logical flag indicating whether to save the transition history of the simulations. } \item{pstage}{ Character string specifying when to generate proposal points. Either \code{"start"} or \code{"block"}. } } \value{ An object of class \code{"rmhcontrol"}, which is essentially a list of parameter values for the algorithm. There is a \code{print} method for this class, which prints a sensible description of the parameters chosen. } \details{ The Metropolis-Hastings algorithm, implemented as \code{\link{rmh}}, generates simulated realisations of point process models. The function \code{rmhcontrol} sets up a list of parameters which control the iterative behaviour and termination of the Metropolis-Hastings algorithm, for use in a subsequent call to \code{\link{rmh}}. It also checks that the parameters are valid. (A separate function \code{\link{rmhstart}} determines the initial state of the algorithm, and \code{\link{rmhmodel}} determines the model to be simulated.) The parameters are as follows: \describe{ \item{p}{The probability of proposing a ``shift'' (as opposed to a birth or death) in the Metropolis-Hastings algorithm. If \eqn{p = 1} then the algorithm only alters existing points, so the number of points never changes, i.e. we are simulating conditionally upon the number of points. The number of points is determined by the initial state (specified by \code{\link{rmhstart}}). If \eqn{p=1} and \code{fixall=TRUE} and the model is a multitype point process model, then the algorithm only shifts the locations of existing points and does not alter their marks (types). This is equivalent to simulating conditionally upon the number of points of each type. These numbers are again specified by the initial state. If \eqn{p = 1} then no expansion of the simulation window is allowed (see \code{expand} below). The default value of \code{p} can be changed by setting the parameter \code{rmh.p} in \code{\link{spatstat.options}}. } \item{q}{The conditional probability of proposing a death (rather than a birth) given that a shift is not proposed. This is of course ignored if \code{p} is equal to 1. The default value of \code{q} can be changed by setting the parameter \code{rmh.q} in \code{\link{spatstat.options}}. } \item{nrep}{The number of repetitions or iterations to be made by the Metropolis-Hastings algorithm. It should be large. The default value of \code{nrep} can be changed by setting the parameter \code{rmh.nrep} in \code{\link{spatstat.options}}. } \item{expand}{ Either a number or a window (object of class \code{"owin"}). Indicates that the process is to be simulated on a domain other than the original data window \code{w}, then clipped to \code{w} when the algorithm has finished. This would often be done in order to approximate the simulation of a stationary process (Geyer, 1999) or more generally a process existing in the whole plane, rather than just in the window \code{w}. If \code{expand} is a window object, it is taken as the larger domain in which simulation is performed. If \code{expand} is numeric, it is interpreted as an expansion factor or expansion distance for determining the simulation domain from the data window. It should be a \emph{named} scalar, such as \code{expand=c(area=2)}, \code{expand=c(distance=0.1)}, \code{expand=c(length=1.2)}. See \code{\link{rmhexpand}()} for more details. If the name is omitted, it defaults to \code{area}. Expansion is not permitted if the number of points has been fixed by setting \code{p = 1} or if the starting configuration has been specified via the argument \code{x.start} in \code{\link{rmhstart}}. If \code{expand} is \code{NULL}, this is interpreted to mean \dQuote{not yet decided}. An expansion rule will be determined at a later stage, using appropriate defaults. See \code{\link{rmhexpand}}. } \item{periodic}{A logical value (or \code{NULL}) determining whether to simulate \dQuote{periodically}. If \code{periodic} is \code{TRUE}, and if the simulation window is a rectangle, then the simulation algorithm effectively identifies opposite edges of the rectangle. Points near the right-hand edge of the rectangle are deemed to be close to points near the left-hand edge. Periodic simulation usually gives a better approximation to a stationary point process. For periodic simulation, the simulation window must be a rectangle. (The simulation window is determined by \code{expand} as described above.) The value \code{NULL} means \sQuote{undecided}. The decision is postponed until \code{\link{rmh}} is called. Depending on the point process model to be simulated, \code{rmh} will then set \code{periodic=TRUE} if the simulation window is expanded \emph{and} the expanded simulation window is rectangular; otherwise \code{periodic=FALSE}. Note that \code{periodic=TRUE} is only permitted when the simulation window (i.e. the expanded window) is rectangular. } \item{ptypes}{A vector of probabilities (summing to 1) to be used in assigning a random type to a new point. Defaults to a vector each of whose entries is \eqn{1/nt} where \eqn{nt} is the number of types for the process. Convergence of the simulation algorithm should be improved if \code{ptypes} is close to the relative frequencies of the types which will result from the simulation. } \item{x.cond}{ If this argument is given, then \emph{conditional simulation} will be performed, and \code{x.cond} specifies the location of the fixed points as well as the type of conditioning. It should be either a point pattern (object of class \code{"ppp"}) or a \code{list(x,y)} or a \code{data.frame}. See the section on Conditional Simulation. } \item{fixall}{A logical scalar specifying whether to condition on the number of points of each type. Meaningful only if a marked process is being simulated, and if \eqn{p = 1}. A warning message is given if \code{fixall} is set equal to \code{TRUE} when it is not meaningful. } \item{nverb}{An integer specifying how often ``progress reports'' (which consist simply of the number of repetitions completed) should be printed out. If nverb is left at 0, the default, the simulation proceeds silently. } \item{nsave,nburn}{ If these integers are given, then the current state of the simulation algorithm (i.e. the current random point pattern) will be saved every \code{nsave} iterations, starting from iteration \code{nburn}. (Alternatively \code{nsave} can be a vector, specifying different numbers of iterations between each successive save. This vector will be recycled until the end of the simulations.) } \item{track}{ Logical flag indicating whether to save the transition history of the simulations (i.e. information specifying what type of proposal was made, and whether it was accepted or rejected, for each iteration). } \item{pstage}{ Character string specifying the stage of the algorithm at which the randomised proposal points should be generated. If \code{pstage="start"} or if \code{nsave=0}, the entire sequence of \code{nrep} random proposal points is generated at the start of the algorithm. This is the original behaviour of the code, and should be used in order to maintain consistency with older versions of \pkg{spatstat}. If \code{pstage="block"} and \code{nsave > 0}, then a set of \code{nsave} random proposal points will be generated before each block of \code{nsave} iterations. This is much more efficient. The default is \code{pstage="block"}. } } } \section{Conditional Simulation}{ For a Gibbs point process \eqn{X}, the Metropolis-Hastings algorithm easily accommodates several kinds of conditional simulation: \describe{ \item{conditioning on the total number of points:}{ We fix the total number of points \eqn{N(X)} to be equal to \eqn{n}. We simulate from the conditional distribution of \eqn{X} given \eqn{N(X) = n}. } \item{conditioning on the number of points of each type:}{ In a multitype point process, where \eqn{Y_j}{Y[[j]]} denotes the process of points of type \eqn{j}, we fix the number \eqn{N(Y_j)}{N(Y[[j]])} of points of type \eqn{j} to be equal to \eqn{n_j}{n[j]}, for \eqn{j=1,2,\ldots,m}{j=1,2,...,m}. We simulate from the conditional distribution of \eqn{X} given \eqn{N(Y_j)=n_j}{N(Y[[j]]) = n[j]} for \eqn{j=1,2,\ldots,m}{j=1,2,...,m}. } \item{conditioning on the realisation in a subwindow:}{ We require that the point process \eqn{X} should, within a specified sub-window \eqn{V}, coincide with a specified point pattern \eqn{y}. We simulate from the conditional distribution of \eqn{X} given \eqn{X \cap V = y}{(X intersect V) = y}. } \item{Palm conditioning:}{ We require that the point process \eqn{X} include a specified list of points \eqn{y}. We simulate from the point process with probability density \eqn{g(x) = c f(x \cup y)}{g(x) = c * f(x union y)} where \eqn{f} is the probability density of the original process \eqn{X}, and \eqn{c} is a normalising constant. } } To achieve each of these types of conditioning we do as follows: \describe{ \item{conditioning on the total number of points:}{ Set \code{p=1}. The number of points is determined by the initial state of the simulation: see \code{\link{rmhstart}}. } \item{conditioning on the number of points of each type:}{ Set \code{p=1} and \code{fixall=TRUE}. The number of points of each type is determined by the initial state of the simulation: see \code{\link{rmhstart}}. } \item{conditioning on the realisation in a subwindow:}{ Set \code{x.cond} to be a point pattern (object of class \code{"ppp"}). Its window \code{V=Window(x.cond)} becomes the conditioning subwindow \eqn{V}. } \item{Palm conditioning:}{ Set \code{x.cond} to be a \code{list(x,y)} or \code{data.frame} with two columns containing the coordinates of the points, or a \code{list(x,y,marks)} or \code{data.frame} with three columns containing the coordinates and marks of the points. } } The arguments \code{x.cond}, \code{p} and \code{fixall} can be combined. } \references{ Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \seealso{ \code{\link{rmh}}, \code{\link{rmhmodel}}, \code{\link{rmhstart}}, \code{\link{rmhexpand}}, \code{\link{spatstat.options}} } \examples{ # parameters given as named arguments c1 <- rmhcontrol(p=0.3,periodic=TRUE,nrep=1e6,nverb=1e5) # parameters given as a list liz <- list(p=0.9, nrep=1e4) c2 <- rmhcontrol(liz) # parameters given in rmhcontrol object c3 <- rmhcontrol(c1) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{datagen} spatstat/man/rstrat.Rd0000644000176200001440000000401113333543264014500 0ustar liggesusers\name{rstrat} \alias{rstrat} \title{Simulate Stratified Random Point Pattern} \description{ Generates a ``stratified random'' pattern of points in a window, by dividing the window into rectangular tiles and placing \code{k} random points independently in each tile. } \usage{ rstrat(win=square(1), nx, ny=nx, k = 1, nsim=1, drop=TRUE) } \arguments{ \item{win}{ A window. An object of class \code{\link{owin}}, or data in any format acceptable to \code{\link{as.owin}()}. } \item{nx}{Number of tiles in each column. } \item{ny}{Number of tiles in each row. } \item{k}{Number of random points to generate in each tile. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This function generates a random pattern of points in a ``stratified random'' sampling design. It can be useful for generating random spatial sampling points. The bounding rectangle of \code{win} is divided into a regular \eqn{nx \times ny}{nx * ny} grid of rectangular tiles. In each tile, \code{k} random points are generated independently with a uniform distribution in that tile. Some of these grid points may lie outside the window \code{win}: if they do, they are deleted. The result is a point pattern inside the window \code{win}. This function is useful in creating dummy points for quadrature schemes (see \code{\link{quadscheme}}) as well as in simulating random point patterns. } \seealso{ \code{\link{rsyst}}, \code{\link{runifpoint}}, \code{\link{quadscheme}} } \examples{ X <- rstrat(nx=10) plot(X) # polygonal boundary data(letterR) X <- rstrat(letterR, 5, 10, k=3) plot(X) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/pixellate.psp.Rd0000644000176200001440000000514313333543264015760 0ustar liggesusers\name{pixellate.psp} \alias{pixellate.psp} \title{ Convert Line Segment Pattern to Pixel Image } \description{ Converts a line segment pattern to a pixel image by measuring the length or number of lines intersecting each pixel. } \usage{ \method{pixellate}{psp}(x, W=NULL, ..., weights = NULL, what=c("length", "number"), DivideByPixelArea=FALSE) } \arguments{ \item{x}{ Line segment pattern (object of class \code{"psp"}). } \item{W}{ Optional window (object of class \code{"owin"}) determining the pixel resolution. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} to determine the pixel resolution. } \item{weights}{ Optional vector of weights associated with each line segment. } \item{what}{ String (partially matched) indicating whether to compute the total length of intersection (\code{what="length"}, the default) or the total number of segments intersecting each pixel (\code{what="number"}). } \item{DivideByPixelArea}{ Logical value, indicating whether the resulting pixel values should be divided by the pixel area. } } \details{ This function converts a line segment pattern to a pixel image by computing, for each pixel, the total length of intersection between the pixel and the line segments. Alternatively it can count the number of line segments intersecting each pixel. This is a method for the generic function \code{\link{pixellate}} for the class of line segment patterns. The pixel raster is determined by \code{W} and the optional arguments \code{\dots}. If \code{W} is missing or \code{NULL}, it defaults to the window containing \code{x}. Then \code{W} is converted to a binary pixel mask using \code{\link{as.mask}}. The arguments \code{\dots} are passed to \code{\link{as.mask}} to control the pixel resolution. If \code{weights} are given, then the length of the intersection between line segment \code{i} and pixel \code{j} is multiplied by \code{weights[i]} before the lengths are summed for each pixel. } \value{ A pixel image (object of class \code{"im"}) with numeric values. } \seealso{ \code{\link{pixellate}}, \code{\link{as.mask}}, \code{\link{as.mask.psp}}. Use \code{\link{as.mask.psp}} if you only want to know which pixels are intersected by lines. } \examples{ X <- psp(runif(10),runif(10), runif(10), runif(10), window=owin()) plot(pixellate(X)) plot(X, add=TRUE) sum(lengths.psp(X)) sum(pixellate(X)) plot(pixellate(X, what="n")) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/is.dppm.Rd0000644000176200001440000000062113333543263014535 0ustar liggesusers\name{is.dppm} \alias{is.dppm} \title{Recognise Fitted Determinantal Point Process Models} \description{Check that an object inherits the class dppm} \usage{is.dppm(x)} \arguments{ \item{x}{Any object.} } \value{A single logical value.} \author{\ege , \adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{models} spatstat/man/Strauss.Rd0000644000176200001440000000600013333543262014623 0ustar liggesusers\name{Strauss} \alias{Strauss} \title{The Strauss Point Process Model} \description{ Creates an instance of the Strauss point process model which can then be fitted to point pattern data. } \usage{ Strauss(r) } \arguments{ \item{r}{The interaction radius of the Strauss process} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Strauss process with interaction radius \eqn{r}. } \details{ The (stationary) Strauss process with interaction radius \eqn{r} and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma} is the pairwise interaction point process in which each point contributes a factor \eqn{\beta}{beta} to the probability density of the point pattern, and each pair of points closer than \eqn{r} units apart contributes a factor \eqn{\gamma}{gamma} to the density. Thus the probability density is \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{s(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) gamma^s(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{s(x)} is the number of distinct unordered pairs of points that are closer than \eqn{r} units apart, and \eqn{\alpha}{alpha} is the normalising constant. The interaction parameter \eqn{\gamma}{gamma} must be less than or equal to \eqn{1} so that this model describes an ``ordered'' or ``inhibitive'' pattern. The nonstationary Strauss process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Strauss process pairwise interaction is yielded by the function \code{Strauss()}. See the examples below. Note the only argument is the interaction radius \code{r}. When \code{r} is fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(beta)} and \eqn{\log(\gamma)}{log(gamma)} are estimated by \code{\link{ppm}()}, not fixed in \code{Strauss()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \references{ Kelly, F.P. and Ripley, B.D. (1976) On Strauss's model for clustering. \emph{Biometrika} \bold{63}, 357--360. Strauss, D.J. (1975) A model for clustering. \emph{Biometrika} \bold{62}, 467--475. } \examples{ Strauss(r=0.1) # prints a sensible description of itself data(cells) \dontrun{ ppm(cells, ~1, Strauss(r=0.07)) # fit the stationary Strauss process to `cells' } ppm(cells, ~polynom(x,y,3), Strauss(r=0.07)) # fit a nonstationary Strauss process with log-cubic polynomial trend } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/compareFit.Rd0000644000176200001440000001007013571674202015255 0ustar liggesusers\name{compareFit} \alias{compareFit} \title{ Residual Diagnostics for Multiple Fitted Models } \description{ Compares several fitted point process models using the same residual diagnostic. } \usage{ compareFit(object, Fun, r = NULL, breaks = NULL, ..., trend = ~1, interaction = Poisson(), rbord = NULL, modelnames = NULL, same = NULL, different = NULL) } \arguments{ \item{object}{ Object or objects to be analysed. Either a fitted point process model (object of class \code{"ppm"}), a point pattern (object of class \code{"ppp"}), or a list of these objects. } \item{Fun}{ Diagnostic function to be computed for each model. One of the functions \code{Kcom}, \code{Kres}, \code{Gcom}, \code{Gres}, \code{psst}, \code{psstA} or \code{psstG} or a string containing one of these names. } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the diagnostic should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ Optional alternative to \code{r} for advanced use. } \item{\dots}{ Extra arguments passed to \code{Fun}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern or list of point patterns. See \code{\link{ppm}} for details. Each of these arguments can be a list, specifying different \code{trend}, \code{interaction} and/or \code{rbord} values to be used to generate different fitted models. } \item{modelnames}{ Character vector. Short descriptive names for the different models. } \item{same,different}{ Character strings or character vectors passed to \code{\link{collapse.fv}} to determine the format of the output. } } \details{ This is a convenient way to collect diagnostic information for several different point process models fitted to the same point pattern dataset, or for point process models of the same form fitted to several different datasets, etc. The first argument, \code{object}, is usually a list of fitted point process models (objects of class \code{"ppm"}), obtained from the model-fitting function \code{\link{ppm}}. For convenience, \code{object} can also be a list of point patterns (objects of class \code{"ppp"}). In that case, point process models will be fitted to each of the point pattern datasets, by calling \code{\link{ppm}} using the arguments \code{trend} (for the first order trend), \code{interaction} (for the interpoint interaction) and \code{rbord} (for the erosion distance in the border correction for the pseudolikelihood). See \code{\link{ppm}} for details of these arguments. Alternatively \code{object} can be a single point pattern (object of class \code{"ppp"}) and one or more of the arguments \code{trend}, \code{interaction} or \code{rbord} can be a list. In this case, point process models will be fitted to the same point pattern dataset, using each of the model specifications listed. The diagnostic function \code{Fun} will be applied to each of the point process models. The results will be collected into a single function value table. The \code{modelnames} are used to label the results from each fitted model. } \value{ Function value table (object of class \code{"fv"}). } \author{ \ege, \adrian and Jesper \Moller. } \seealso{ \code{\link{ppm}}, \code{\link{Kcom}}, \code{\link{Kres}}, \code{\link{Gcom}}, \code{\link{Gres}}, \code{\link{psst}}, \code{\link{psstA}}, \code{\link{psstG}}, \code{\link{collapse.fv}} } \examples{ nd <- 40 \testonly{ nd <- 10 } ilist <- list(Poisson(), Geyer(7, 2), Strauss(7)) iname <- c("Poisson", "Geyer", "Strauss") \testonly{ ilist <- ilist[c(1,3)] iname <- iname[c(1,3)] } K <- compareFit(swedishpines, Kcom, interaction=ilist, rbord=9, correction="translate", same="trans", different="tcom", modelnames=iname, nd=nd) K } \keyword{spatial} \keyword{models} spatstat/man/rshift.psp.Rd0000644000176200001440000001044113333543264015265 0ustar liggesusers\name{rshift.psp} \alias{rshift.psp} \title{Randomly Shift a Line Segment Pattern} \description{ Randomly shifts the segments in a line segment pattern. } \usage{ \method{rshift}{psp}(X, \dots, group=NULL, which=NULL) } \arguments{ \item{X}{Line segment pattern to be subjected to a random shift. An object of class \code{"psp"}. } \item{\dots}{ Arguments controlling the randomisation and the handling of edge effects. See \code{\link{rshift.ppp}}. } \item{group}{ Optional. Factor specifying a grouping of the line segments of \code{X}, or \code{NULL} indicating that all line segments belong to the same group. Each group will be shifted together, and separately from other groups. } \item{which}{ Optional. Identifies which groups of the pattern will be shifted, while other groups are not shifted. A vector of levels of \code{group}. } } \value{ A line segment pattern (object of class \code{"psp"}). } \details{ This operation randomly shifts the locations of the line segments in a line segment pattern. The function \code{rshift} is generic. This function \code{rshift.psp} is the method for line segment patterns. The line segments of \code{X} are first divided into groups, then the line segments within a group are shifted by a common random displacement vector. Different groups of line segments are shifted independently. If the argument \code{group} is present, then this determines the grouping. Otherwise, all line segments belong to a single group. The argument \code{group} should be a factor, of length equal to the number of line segments in \code{X}. Alternatively \code{group} may be \code{NULL}, which specifies that all line segments of \code{X} belong to a single group. By default, every group of line segments will be shifted. The argument \code{which} indicates that only some of the groups should be shifted, while other groups should be left unchanged. \code{which} must be a vector of levels of \code{group} indicating which groups are to be shifted. The displacement vector, i.e. the vector by which the data line segments are shifted, is generated at random. The \emph{default} behaviour is to generate a displacement vector at random with equal probability for all possible displacements. This means that the \eqn{x} and \eqn{y} coordinates of the displacement vector are independent random variables, uniformly distributed over the range of possible coordinates. Alternatively, the displacement vector can be generated by another random mechanism, controlled by the arguments \code{radius}, \code{width} and \code{height}. \describe{ \item{rectangular:}{ if \code{width} and \code{height} are given, then the displacement vector is uniformly distributed in a rectangle of these dimensions, centred at the origin. The maximum possible displacement in the \eqn{x} direction is \code{width/2}. The maximum possible displacement in the \eqn{y} direction is \code{height/2}. The \eqn{x} and \eqn{y} displacements are independent. (If \code{width} and \code{height} are actually equal to the dimensions of the observation window, then this is equivalent to the default.) } \item{radial:}{ if \code{radius} is given, then the displacement vector is generated by choosing a random line segment inside a disc of the given radius, centred at the origin, with uniform probability density over the disc. Thus the argument \code{radius} determines the maximum possible displacement distance. The argument \code{radius} is incompatible with the arguments \code{width} and \code{height}. } } The argument \code{edge} controls what happens when a shifted line segment lies partially or completely outside the window of \code{X}. Currently the only option is \code{"erode"} which specifies that the segments will be clipped to a smaller window. The optional argument \code{clip} specifies a smaller window to which the pattern should be restricted. } \seealso{ \code{\link{rshift}}, \code{\link{rshift.ppp}} } \examples{ X <- psp(runif(20), runif(20), runif(20), runif(20), window=owin()) Y <- rshift(X, radius=0.1) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/subset.hyperframe.Rd0000644000176200001440000000454513333543264016643 0ustar liggesusers\name{subset.hyperframe} \alias{subset.hyperframe} \title{ Subset of Hyperframe Satisfying A Condition } \description{ Given a hyperframe, return the subset specified by imposing a condition on each row, and optionally by choosing only some of the columns. } \usage{ \method{subset}{hyperframe}(x, subset, select, \dots) } \arguments{ \item{x}{ A hyperframe pattern (object of class \code{"hyperframe"}. } \item{subset}{ Logical expression indicating which points are to be kept. The expression may involve the names of columns of \code{x} and will be evaluated by \code{\link{with.hyperframe}}. } \item{select}{ Expression indicating which columns of marks should be kept. } \item{\dots}{ Arguments passed to \code{\link{[.hyperframe}} such as \code{drop} and \code{strip}. } } \details{ This is a method for the generic function \code{\link{subset}}. It extracts the subset of rows of \code{x} that satisfy the logical expression \code{subset}, and retains only the columns of \code{x} that are specified by the expression \code{select}. The result is always a hyperframe. The argument \code{subset} determines the subset of rows that will be extracted. It should be a logical expression. It may involve the names of columns of \code{x}. The default is to keep all points. The argument \code{select} determines which columns of \code{x} will be retained. It should be an expression involving the names of columns (which will be interpreted as integers representing the positions of these columns). For example if there are columns named \code{A} to \code{Z}, then \code{select=D:F} is a valid expression and means that columns \code{D}, \code{E} and \code{F} will be retained. Similarly \code{select=-(A:C)} is valid and means that columns \code{A} to \code{C} will be deleted. The default is to retain all columns. Setting \code{subset=FALSE} will remove all the rows. Setting \code{select=FALSE} will remove all the columns. The result is always a hyperframe. } \value{ A hyperframe. } \author{\adrian , \rolf and \ege } \seealso{ \code{\link[base]{subset}}, \code{\link{[.hyperframe}} } \examples{ a <- subset(flu, virustype=="wt") aa <- subset(flu, minnndist(pattern) > 10) aaa <- subset(flu, virustype=="wt", select = -pattern) } \keyword{spatial} \keyword{manip} spatstat/man/crossing.linnet.Rd0000644000176200001440000000200213333543263016275 0ustar liggesusers\name{crossing.linnet} \alias{crossing.linnet} \title{ Crossing Points between Linear Network and Other Lines } \description{ Find all the crossing-points between a linear network and another pattern of lines or line segments. } \usage{ crossing.linnet(X, Y) } \arguments{ \item{X}{ Linear network (object of class \code{"linnet"}). } \item{Y}{ A linear network, or a spatial pattern of line segments (class \code{"psp"}) or infinite lines (class \code{"infline"}). } } \details{ All crossing-points between \code{X} and \code{Y} are determined. The result is a point pattern on the network \code{X}. } \value{ Point pattern on a linear network (object of class \code{"lpp"}). } \author{ \adrian. } \seealso{ \code{\link{crossing.psp}} % \code{\link{chop.linnet}} } \examples{ plot(simplenet, main="") L <- infline(p=runif(3), theta=runif(3, max=pi/2)) plot(L, col="red") Y <- crossing.linnet(simplenet, L) plot(Y, add=TRUE, cols="blue") } \keyword{spatial} \keyword{manip} spatstat/man/markmarkscatter.Rd0000644000176200001440000000411113401202457016345 0ustar liggesusers\name{markmarkscatter} \alias{markmarkscatter} \title{ Mark-Mark Scatter Plot } \description{ Generates the mark-mark scatter plot of a point pattern. } \usage{ markmarkscatter(X, rmax, \dots, col = NULL, symap = NULL, transform=I, jit=FALSE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}, \code{"pp3"}, \code{"lpp"} or \code{"ppx"}) with numeric marks. } \item{rmax}{ Maximum distance between pairs of points which contribute to the plot. } \item{\dots}{ Additional arguments passed to \code{\link{plot.ppp}} to control the scatterplot. } \item{transform}{ Optional. A function which should be applied to the mark values. } \item{jit}{ Logical value indicating whether mark values should be randomly perturbed using \code{\link[base]{jitter}}. } \item{col}{ Optional. A vector of colour values, or a \code{\link{colourmap}} to be used to portray the pairwise distance values. Ignored if \code{symap} is given. } \item{symap}{ Optional. A \code{\link{symbolmap}} to be used to portray the pairwise distance values. Overrides \code{col}. } } \details{ The mark-mark scatter plot (Ballani et al, 2019) is a scatterplot of the mark values of all pairs of distinct points in \code{X} which are closer than the distance \code{rmax}. The dots in the scatterplot are coloured according to the pairwise distance between the two spatial points. The plot is augmented by three curves explained by Ballani et al (2019). If the marks only take a few different values, then it is usually appropriate to apply random perturbation (jitter) to the mark values, by setting \code{jit=TRUE}. } \value{ Null. } \references{ Ballani, F., Pommerening, A. and Stoyan, D. (2019) Mark-mark scatterplots improve pattern analysis in spatial plant ecology. \emph{Ecological Informatics} \bold{49}, 13--21. } \author{ Adrian Baddeley (coded from the description in Ballani et al.) } \examples{ markmarkscatter(longleaf, 10) markmarkscatter(spruces, 10, jit=TRUE) } \keyword{spatial} \keyword{hplot} spatstat/man/reach.dppm.Rd0000644000176200001440000000212513571674202015207 0ustar liggesusers\name{reach.dppm} \alias{reach.dppm} \alias{reach.detpointprocfamily} \title{Range of Interaction for a Determinantal Point Process Model} \description{ Returns the range of interaction for a determinantal point process model. } \usage{ \method{reach}{dppm}(x, \dots) \method{reach}{detpointprocfamily}(x, \dots) } \arguments{ \item{x}{Model of class \code{"detpointprocfamily"} or \code{"dppm"}.} \item{\dots}{Additional arguments passed to the range function of the given model. } } \details{ The range of interaction for a determinantal point process model may defined as the smallest number \eqn{R} such that \eqn{g(r)=1} for all \eqn{r\ge R}{r>=R}, where \eqn{g} is the pair correlation function. For many models the range is infinite, but one may instead use a value where the pair correlation function is sufficiently close to 1. For example in the \Matern model this defaults to finding \eqn{R} such that \eqn{g(R)=0.99}. } \value{Numeric} \author{ \spatstatAuthors. } \examples{ reach(dppMatern(lambda=100, alpha=.01, nu=1, d=2)) } \keyword{spatial} \keyword{models} spatstat/man/bdist.points.Rd0000644000176200001440000000241113333543262015601 0ustar liggesusers\name{bdist.points} \alias{bdist.points} \title{Distance to Boundary of Window} \description{ Computes the distances from each point of a point pattern to the boundary of the window. } \usage{ bdist.points(X) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} } \value{ A numeric vector, giving the distances from each point of the pattern to the boundary of the window. } \details{ This function computes, for each point \eqn{x_i}{x[i]} in the point pattern \code{X}, the shortest distance \eqn{d(x_i, W^c)}{dist(x[i], W')} from \eqn{x_i}{x[i]} to the boundary of the window \eqn{W} of observation. If the window \code{Window(X)} is of type \code{"rectangle"} or \code{"polygonal"}, then these distances are computed by analytic geometry and are exact, up to rounding errors. If the window is of type \code{"mask"} then the distances are computed using the real-valued distance transform, which is an approximation with maximum error equal to the width of one pixel in the mask. } \seealso{ \code{\link{bdist.pixels}}, \code{\link{bdist.tiles}}, \code{\link{ppp.object}}, \code{\link{erosion}} } \examples{ data(cells) d <- bdist.points(cells) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/Kcross.Rd0000644000176200001440000001714413333543262014436 0ustar liggesusers\name{Kcross} \alias{Kcross} \title{ Multitype K Function (Cross-type) } \description{ For a multitype point pattern, estimate the multitype \eqn{K} function which counts the expected number of points of type \eqn{j} within a given distance of a point of type \eqn{i}. } \usage{ Kcross(X, i, j, r=NULL, breaks=NULL, correction, \dots, ratio=FALSE, from, to ) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross type \eqn{K} function \eqn{K_{ij}(r)}{Kij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{\dots}{Ignored.} \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{from,to}{ An alternative way to specify \code{i} and \code{j} respectively. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{ij}(r)}{Kij(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{ij}(r)}{Kij(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{ij}(r)}{Kij(r)} obtained by the edge corrections named. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K(r)}. } \details{ This function \code{Kcross} and its companions \code{\link{Kdot}} and \code{\link{Kmulti}} are generalisations of the function \code{\link{Kest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{X$marks}. If \code{i} and \code{j} are missing, they default to the first and second level of the marks factor, respectively. The ``cross-type'' (type \eqn{i} to type \eqn{j}) \eqn{K} function of a stationary multitype point process \eqn{X} is defined so that \eqn{\lambda_j K_{ij}(r)}{lambda[j] Kij(r)} equals the expected number of additional random points of type \eqn{j} within a distance \eqn{r} of a typical point of type \eqn{i} in the process \eqn{X}. Here \eqn{\lambda_j}{lambda[j]} is the intensity of the type \eqn{j} points, i.e. the expected number of points of type \eqn{j} per unit area. The function \eqn{K_{ij}}{Kij} is determined by the second order moment properties of \eqn{X}. An estimate of \eqn{K_{ij}(r)}{Kij(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the process of type \eqn{i} points were independent of the process of type \eqn{j} points, then \eqn{K_{ij}(r)}{Kij(r)} would equal \eqn{\pi r^2}{pi * r^2}. Deviations between the empirical \eqn{K_{ij}}{Kij} curve and the theoretical curve \eqn{\pi r^2}{pi * r^2} may suggest dependence between the points of types \eqn{i} and \eqn{j}. This algorithm estimates the distribution function \eqn{K_{ij}(r)}{Kij(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}, using the border correction. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. The pair correlation function can also be applied to the result of \code{Kcross}; see \code{\link{pcf}}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Kdot}}, \code{\link{Kest}}, \code{\link{Kmulti}}, \code{\link{pcf}} } \examples{ # amacrine cells data K01 <- Kcross(amacrine, "off", "on") plot(K01) \testonly{ K01 <- Kcross(amacrine, "off", "on", ratio=TRUE) } \dontrun{ K10 <- Kcross(amacrine, "on", "off") # synthetic example: point pattern with marks 0 and 1 pp <- runifpoispp(50) pp <- pp \%mark\% factor(sample(0:1, npoints(pp), replace=TRUE)) K <- Kcross(pp, "0", "1") K <- Kcross(pp, 0, 1) # equivalent } } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/methods.funxy.Rd0000644000176200001440000000343113333543263016000 0ustar liggesusers\name{methods.funxy} \alias{methods.funxy} %DoNotExport \alias{contour.funxy} \alias{persp.funxy} \alias{plot.funxy} \Rdversion{1.1} \title{ Methods for Spatial Functions } \description{ Methods for objects of the class \code{"funxy"}. } \usage{ \method{contour}{funxy}(x, \dots) \method{persp}{funxy}(x, \dots) \method{plot}{funxy}(x, \dots) } \arguments{ \item{x}{ Object of class \code{"funxy"} representing a function of \eqn{x,y} coordinates. } \item{\dots}{ Named arguments controlling the plot. See Details. } } \details{ These are methods for the generic functions \code{\link{plot}}, \code{\link{contour}} and \code{\link{persp}} for the class \code{"funxy"} of spatial functions. Objects of class \code{"funxy"} are created, for example, by the commands \code{\link{distfun}} and \code{\link{funxy}}. The \code{plot}, \code{contour} and \code{persp} methods first convert \code{x} to a pixel image object using \code{\link{as.im}}, then display it using \code{\link{plot.im}}, \code{\link{contour.im}} or \code{\link{persp.im}}. Additional arguments \code{\dots} are either passed to \code{\link{as.im.function}} to control the spatial resolution of the pixel image, or passed to \code{\link{contour.im}}, \code{\link{persp.im}} or \code{\link{plot.im}} to control the appearance of the plot. } \value{ \code{NULL}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{funxy}}, \code{\link{distfun}}, \code{\link{as.im}}, \code{\link{plot.im}}, \code{\link{persp.im}}, \code{\link{contour.im}}, \code{\link{spatstat.options}} } \examples{ f <- distfun(letterR) contour(f) B <- owin(c(1,5), c(-1, 4)) contour(f, W=B) persp(f, W=B, theta=40, phi=40, border=NA, shade=0.7) } \keyword{spatial} \keyword{methods} spatstat/man/convexify.Rd0000644000176200001440000000334213333543263015200 0ustar liggesusers\name{convexify} \alias{convexify} \title{ Weil's Convexifying Operation } \description{ Converts the window \code{W} into a convex set by rearranging the edges, preserving spatial orientation of each edge. } \usage{ convexify(W, eps) } \arguments{ \item{W}{ A window (object of class \code{"owin"}). } \item{eps}{ Optional. Minimum edge length of polygonal approximation, if \code{W} is not a polygon. } } \details{ Weil (1995) defined a convexification operation for windows \eqn{W} that belong to the convex ring (that is, for any \eqn{W} which is a finite union of convex sets). Note that this is \bold{not} the same as the convex hull. The convexified set \eqn{f(W)} has the same total boundary length as \eqn{W} and the same distribution of orientations of the boundary. If \eqn{W} is a polygonal set, then the convexification \eqn{f(W)} is obtained by rearranging all the edges of \eqn{W} in order of their spatial orientation. The argument \code{W} must be a window. If it is not already a polygonal window, it is first converted to one, using \code{\link{simplify.owin}}. The edges are sorted in increasing order of angular orientation and reassembled into a convex polygon. } \value{ A window (object of class \code{"owin"}). } \references{ Weil, W. (1995) The estimation of mean particle shape and mean particle number in overlapping particle systems in the plane. \emph{Advances in Applied Probability} \bold{27}, 102--119. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{convexhull}} for the convex hull of a window. } \examples{ opa <- par(mfrow=c(1,2)) plot(letterR) plot(convexify(letterR)) par(opa) } \keyword{spatial} \keyword{utilities} spatstat/man/relrisk.ppm.Rd0000644000176200001440000001716113346155530015440 0ustar liggesusers\name{relrisk.ppm} \alias{relrisk.ppm} \title{ Parametric Estimate of Spatially-Varying Relative Risk } \description{ Given a point process model fitted to a multitype point pattern, this function computes the fitted spatially-varying probability of each type of point, or the ratios of such probabilities, according to the fitted model. Optionally the standard errors of the estimates are also computed. } \usage{ \method{relrisk}{ppm}(X, \dots, at = c("pixels", "points"), relative = FALSE, se = FALSE, casecontrol = TRUE, control = 1, case, ngrid = NULL, window = NULL) } \arguments{ \item{X}{ A fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Ignored. } \item{at}{ String specifying whether to compute the probability values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). } \item{relative}{ Logical. If \code{FALSE} (the default) the algorithm computes the probabilities of each type of point. If \code{TRUE}, it computes the \emph{relative risk}, the ratio of probabilities of each type relative to the probability of a control. } \item{se}{ Logical value indicating whether to compute standard errors as well. } \item{casecontrol}{ Logical. Whether to treat a bivariate point pattern as consisting of cases and controls, and return only the probability or relative risk of a case. Ignored if there are more than 2 types of points. See Details. } \item{control}{ Integer, or character string, identifying which mark value corresponds to a control. } \item{case}{ Integer, or character string, identifying which mark value corresponds to a case (rather than a control) in a bivariate point pattern. This is an alternative to the argument \code{control} in a bivariate point pattern. Ignored if there are more than 2 types of points. } \item{ngrid}{ Optional. Dimensions of a rectangular grid of locations inside \code{window} where the predictions should be computed. An integer, or an integer vector of length 2, specifying the number of grid points in the \eqn{y} and \eqn{x} directions. (Applies only when \code{at="pixels"}.) } \item{window}{ Optional. A window (object of class \code{"owin"}) \emph{delimiting} the locations where predictions should be computed. Defaults to the window of the original data used to fit the model \code{object}. (Applies only when \code{at="pixels"}.) } } \details{ The command \code{\link{relrisk}} is generic and can be used to estimate relative risk in different ways. This function \code{relrisk.ppm} is the method for fitted point process models (class \code{"ppm"}). It computes \emph{parametric} estimates of relative risk, using the fitted model. If \code{X} is a bivariate point pattern (a multitype point pattern consisting of two types of points) then by default, the points of the first type (the first level of \code{marks(X)}) are treated as controls or non-events, and points of the second type are treated as cases or events. Then by default this command computes the spatially-varying \emph{probability} of a case, i.e. the probability \eqn{p(u)} that a point at spatial location \eqn{u} will be a case. If \code{relative=TRUE}, it computes the spatially-varying \emph{relative risk} of a case relative to a control, \eqn{r(u) = p(u)/(1- p(u))}. If \code{X} is a multitype point pattern with \eqn{m > 2} types, or if \code{X} is a bivariate point pattern and \code{casecontrol=FALSE}, then by default this command computes, for each type \eqn{j}, a nonparametric estimate of the spatially-varying \emph{probability} of an event of type \eqn{j}. This is the probability \eqn{p_j(u)}{p[j](u)} that a point at spatial location \eqn{u} will belong to type \eqn{j}. If \code{relative=TRUE}, the command computes the \emph{relative risk} of an event of type \eqn{j} relative to a control, \eqn{r_j(u) = p_j(u)/p_k(u)}{r[j](u) = p[j](u)/p[k](u)}, where events of type \eqn{k} are treated as controls. The argument \code{control} determines which type \eqn{k} is treated as a control. If \code{at = "pixels"} the calculation is performed for every spatial location \eqn{u} on a fine pixel grid, and the result is a pixel image representing the function \eqn{p(u)} or a list of pixel images representing the functions \eqn{p_j(u)}{p[j](u)} or \eqn{r_j(u)}{r[j](u)} for \eqn{j = 1,\ldots,m}{j = 1,...,m}. An infinite value of relative risk (arising because the probability of a control is zero) will be returned as \code{NA}. If \code{at = "points"} the calculation is performed only at the data points \eqn{x_i}{x[i]}. By default the result is a vector of values \eqn{p(x_i)}{p(x[i])} giving the estimated probability of a case at each data point, or a matrix of values \eqn{p_j(x_i)}{p[j](x[i])} giving the estimated probability of each possible type \eqn{j} at each data point. If \code{relative=TRUE} then the relative risks \eqn{r(x_i)}{r(x[i])} or \eqn{r_j(x_i)}{r[j](x[i])} are returned. An infinite value of relative risk (arising because the probability of a control is zero) will be returned as \code{Inf}. Probabilities and risks are computed from the fitted intensity of the model, using \code{\link{predict.ppm}}. If \code{se=TRUE} then standard errors will also be computed, based on asymptotic theory, using \code{\link{vcov.ppm}}. } \value{ If \code{se=FALSE} (the default), the format is described below. If \code{se=TRUE}, the result is a list of two entries, \code{estimate} and \code{SE}, each having the format described below. If \code{X} consists of only two types of points, and if \code{casecontrol=TRUE}, the result is a pixel image (if \code{at="pixels"}) or a vector (if \code{at="points"}). The pixel values or vector values are the probabilities of a case if \code{relative=FALSE}, or the relative risk of a case (probability of a case divided by the probability of a control) if \code{relative=TRUE}. If \code{X} consists of more than two types of points, or if \code{casecontrol=FALSE}, the result is: \itemize{ \item (if \code{at="pixels"}) a list of pixel images, with one image for each possible type of point. The result also belongs to the class \code{"solist"} so that it can be printed and plotted. \item (if \code{at="points"}) a matrix of probabilities, with rows corresponding to data points \eqn{x_i}{x[i]}, and columns corresponding to types \eqn{j}. } The pixel values or matrix entries are the probabilities of each type of point if \code{relative=FALSE}, or the relative risk of each type (probability of each type divided by the probability of a control) if \code{relative=TRUE}. If \code{relative=FALSE}, the resulting values always lie between 0 and 1. If \code{relative=TRUE}, the results are either non-negative numbers, or the values \code{Inf} or \code{NA}. } \author{ \spatstatAuthors. } \seealso{ There is another method \code{\link{relrisk.ppp}} for point pattern datasets which computes \emph{nonparametric} estimates of relative risk by kernel smoothing. See also \code{\link{relrisk}}, \code{\link{relrisk.ppp}}, \code{\link{ppm}} } \examples{ fit <- ppm(chorley ~ marks * (x+y)) rr <- relrisk(fit, relative=TRUE, control="lung", se=TRUE) plot(rr$estimate) plot(rr$SE) rrX <- relrisk(fit, at="points", relative=TRUE, control="lung") } \keyword{spatial} \keyword{models} spatstat/man/AreaInter.Rd0000644000176200001440000001737713333543262015054 0ustar liggesusers\name{AreaInter} \alias{AreaInter} \title{The Area Interaction Point Process Model} \description{ Creates an instance of the Area Interaction point process model (Widom-Rowlinson penetrable spheres model) which can then be fitted to point pattern data. } \usage{ AreaInter(r) } \arguments{ \item{r}{The radius of the discs in the area interaction process} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the area-interaction process with disc radius \eqn{r}. } \details{ This function defines the interpoint interaction structure of a point process called the Widom-Rowlinson penetrable sphere model or area-interaction process. It can be used to fit this model to point pattern data. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the area interaction structure is yielded by the function \code{AreaInter()}. See the examples below. In \bold{standard form}, the area-interaction process (Widom and Rowlinson, 1970; Baddeley and Van Lieshout, 1995) with disc radius \eqn{r}, intensity parameter \eqn{\kappa}{\kappa} and interaction parameter \eqn{\gamma}{\gamma} is a point process with probability density \deqn{ f(x_1,\ldots,x_n) = \alpha \kappa^{n(x)} \gamma^{-A(x)} }{ f(x[1],\ldots,x[n]) = \alpha . \kappa^n(x) . \gamma^(-A(x)) } for a point pattern \eqn{x}, where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, and \eqn{A(x)} is the area of the region formed by the union of discs of radius \eqn{r} centred at the points \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]}. Here \eqn{\alpha}{\alpha} is a normalising constant. The interaction parameter \eqn{\gamma}{\gamma} can be any positive number. If \eqn{\gamma = 1}{\gamma = 1} then the model reduces to a Poisson process with intensity \eqn{\kappa}{\kappa}. If \eqn{\gamma < 1}{\gamma < 1} then the process is regular, while if \eqn{\gamma > 1}{\gamma > 1} the process is clustered. Thus, an area interaction process can be used to model either clustered or regular point patterns. Two points interact if the distance between them is less than \eqn{2r}{2 * r}. The standard form of the model, shown above, is a little complicated to interpret in practical applications. For example, each isolated point of the pattern \eqn{x} contributes a factor \eqn{\kappa \gamma^{-\pi r^2}}{\kappa * \gamma^(-\pi * r^2)} to the probability density. In \pkg{spatstat}, the model is parametrised in a different form, which is easier to interpret. In \bold{canonical scale-free form}, the probability density is rewritten as \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \eta^{-C(x)} }{ f(x_1,\ldots,x_n) = \alpha . \beta^n(x) \eta^(-C(x)) } where \eqn{\beta}{\beta} is the new intensity parameter, \eqn{\eta}{\eta} is the new interaction parameter, and \eqn{C(x) = B(x) - n(x)} is the interaction potential. Here \deqn{ B(x) = \frac{A(x)}{\pi r^2} }{ B(x) = A(x)/(\pi * r^2) } is the normalised area (so that the discs have unit area). In this formulation, each isolated point of the pattern contributes a factor \eqn{\beta}{\beta} to the probability density (so the first order trend is \eqn{\beta}{\beta}). The quantity \eqn{C(x)} is a true interaction potential, in the sense that \eqn{C(x) = 0} if the point pattern \eqn{x} does not contain any points that lie close together (closer than \eqn{2r}{2*r} units apart). When a new point \eqn{u} is added to an existing point pattern \eqn{x}, the rescaled potential \eqn{-C(x)} increases by a value between 0 and 1. The increase is zero if \eqn{u} is not close to any point of \eqn{x}. The increase is 1 if the disc of radius \eqn{r} centred at \eqn{u} is completely contained in the union of discs of radius \eqn{r} centred at the data points \eqn{x_i}{x[i]}. Thus, the increase in potential is a measure of how close the new point \eqn{u} is to the existing pattern \eqn{x}. Addition of the point \eqn{u} contributes a factor \eqn{\beta \eta^\delta}{\beta * \eta^\delta} to the probability density, where \eqn{\delta}{\delta} is the increase in potential. The old parameters \eqn{\kappa,\gamma}{\kappa,\gamma} of the standard form are related to the new parameters \eqn{\beta,\eta}{\beta,\eta} of the canonical scale-free form, by \deqn{ \beta = \kappa \gamma^{-\pi r^2} = \kappa /\eta }{ \beta = \kappa * \gamma^(-\pi * r^2)= \kappa / \eta } and \deqn{ \eta = \gamma^{\pi r^2} }{ \eta = \gamma^(\pi * r^2) } provided \eqn{\gamma}{\gamma} and \eqn{\kappa}{\kappa} are positive and finite. In the canonical scale-free form, the parameter \eqn{\eta}{\eta} can take any nonnegative value. The value \eqn{\eta = 1}{\eta = 1} again corresponds to a Poisson process, with intensity \eqn{\beta}{\beta}. If \eqn{\eta < 1}{\eta < 1} then the process is regular, while if \eqn{\eta > 1}{\eta > 1} the process is clustered. The value \eqn{\eta = 0}{\eta = 0} corresponds to a hard core process with hard core radius \eqn{r} (interaction distance \eqn{2r}). The \emph{nonstationary} area interaction process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{\beta(x[i])} of location, rather than a constant beta. Note the only argument of \code{AreaInter()} is the disc radius \code{r}. When \code{r} is fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(\beta)} and \eqn{\log(\eta)}{log(\eta)} are estimated by \code{\link{ppm}()}, not fixed in \code{AreaInter()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} \code{\link{ragsAreaInter}} and \code{\link{rmh}} for simulation of area-interaction models. } \section{Warnings}{ The interaction distance of this process is equal to \code{2 * r}. Two discs of radius \code{r} overlap if their centres are closer than \code{2 * r} units apart. The estimate of the interaction parameter \eqn{\eta}{\eta} is unreliable if the interaction radius \code{r} is too small or too large. In these situations the model is approximately Poisson so that \eqn{\eta}{\eta} is unidentifiable. As a rule of thumb, one can inspect the empty space function of the data, computed by \code{\link{Fest}}. The value \eqn{F(r)} of the empty space function at the interaction radius \code{r} should be between 0.2 and 0.8. } \examples{ \testonly{op <- spatstat.options(ngrid.disc=8)} # prints a sensible description of itself AreaInter(r=0.1) # Note the reach is twice the radius reach(AreaInter(r=1)) # Fit the stationary area interaction process to Swedish Pines data data(swedishpines) ppm(swedishpines, ~1, AreaInter(r=7)) # Fit the stationary area interaction process to `cells' data(cells) ppm(cells, ~1, AreaInter(r=0.06)) # eta=0 indicates hard core process. # Fit a nonstationary area interaction with log-cubic polynomial trend \dontrun{ ppm(swedishpines, ~polynom(x/10,y/10,3), AreaInter(r=7)) } \testonly{spatstat.options(op)} } \references{ Baddeley, A.J. and Van Lieshout, M.N.M. (1995). Area-interaction point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{47} (1995) 601--619. Widom, B. and Rowlinson, J.S. (1970). New model for the study of liquid-vapor phase transitions. \emph{The Journal of Chemical Physics} \bold{52} (1970) 1670--1684. } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/print.owin.Rd0000644000176200001440000000155713333543264015304 0ustar liggesusers\name{print.owin} \alias{print.owin} \title{Print Brief Details of a Spatial Window} \description{ Prints a very brief description of a window object. } \usage{ \method{print}{owin}(x, \dots, prefix="window: ") } \arguments{ \item{x}{Window (object of class \code{"owin"}).} \item{\dots}{Ignored.} \item{prefix}{Character string to be printed at the start of the output.} } \details{ A very brief description of the window \code{x} is printed. This is a method for the generic function \code{\link{print}}. } \seealso{ \code{\link{print}}, \code{\link{print.ppp}}, \code{\link{summary.owin}} } \examples{ owin() # the unit square data(demopat) W <- Window(demopat) W # just says it is polygonal as.mask(W) # just says it is a binary image } \author{\adrian and \rolf } \keyword{spatial} \keyword{print} spatstat/man/ranef.mppm.Rd0000644000176200001440000000321313333543265015230 0ustar liggesusers\name{ranef.mppm} \alias{ranef.mppm} \title{ Extract Random Effects from Point Process Model } \description{ Given a point process model fitted to a list of point patterns, extract the fixed effects of the model. A method for \code{ranef}. } \usage{ \method{ranef}{mppm}(object, \dots) } \arguments{ \item{object}{ A fitted point process model (an object of class \code{"mppm"}). } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link[nlme]{ranef}}. The argument \code{object} must be a fitted point process model (object of class \code{"mppm"}) produced by the fitting algorithm \code{\link{mppm}}). This represents a point process model that has been fitted to a list of several point pattern datasets. See \code{\link{mppm}} for information. This function extracts the coefficients of the random effects of the model. } \value{ A data frame, or list of data frames, as described in the help for \code{\link[nlme]{ranef.lme}}. } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented in \pkg{spatstat} by \spatstatAuthors. } \seealso{ \code{\link{fixef.mppm}}, \code{\link{coef.mppm}} } \examples{ H <- hyperframe(Y = waterstriders) # Tweak data to exaggerate differences H$Y[[1]] <- rthin(H$Y[[1]], 0.3) m1 <- mppm(Y ~ id, data=H, Strauss(7)) ranef(m1) m2 <- mppm(Y ~ 1, random=~1|id, data=H, Strauss(7)) ranef(m2) } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/tilenames.Rd0000644000176200001440000000240613536625731015155 0ustar liggesusers\name{tilenames} \alias{tilenames} \alias{tilenames<-} \alias{tilenames.tess} \alias{tilenames<-.tess} \alias{tilenames.lintess} \alias{tilenames<-.lintess} \title{Names of Tiles in a Tessellation} \description{ Extract or Change the Names of the Tiles in a Tessellation. } \usage{ tilenames(x) tilenames(x) <- value \method{tilenames}{tess}(x) \method{tilenames}{lintess}(x) \method{tilenames}{tess}(x) <- value \method{tilenames}{lintess}(x) <- value } \arguments{ \item{x}{ A tessellation (object of class \code{"tess"}) or a tessellation on a linear network (object of class \code{"lintess"}). } \item{value}{Character vector giving new names for the tiles.} } \details{ These functions extract or change the names of the tiles that make up the tessellation \code{x}. If the tessellation is a regular grid, the tile names cannot be changed. } \value{ \code{tilenames} returns a character vector. } \seealso{ \code{\link{tess}}, \code{\link{lintess}}, \code{\link{tiles}} } \examples{ D <- dirichlet(runifpoint(10)) tilenames(D) tilenames(D) <- paste("Cell", 1:10) tilenames(D) B <- lineardirichlet(runiflpp(5, simplenet)) tilenames(B) tilenames(B) <- letters[1:5] } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/as.matrix.owin.Rd0000644000176200001440000000220213333543262016040 0ustar liggesusers\name{as.matrix.owin} \alias{as.matrix.owin} \title{Convert Pixel Image to Matrix} \description{ Converts a pixel image to a matrix. } \usage{ \method{as.matrix}{owin}(x, ...) } \arguments{ \item{x}{A window (object of class \code{"owin"}).} \item{\dots}{Arguments passed to \code{\link{as.mask}} to control the pixel resolution.} } \details{ The function \code{as.matrix.owin} converts a window to a logical matrux. It first converts the window \code{x} into a binary pixel mask using \code{\link{as.mask}}. It then extracts the pixel entries as a logical matrix. The resulting matrix has entries that are \code{TRUE} if the corresponding pixel is inside the window, and \code{FALSE} if it is outside. The function \code{as.matrix} is generic. The function \code{as.matrix.owin} is the method for windows (objects of class \code{"owin"}). Use \code{\link{as.im}} to convert a window to a pixel image. } \value{ A logical matrix. } \examples{ m <- as.matrix(letterR) } \seealso{ \code{\link{as.matrix.im}}, \code{\link{as.im}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/linequad.Rd0000644000176200001440000000440413445127226014771 0ustar liggesusers\name{linequad} \alias{linequad} \title{ Quadrature Scheme on a Linear Network } \description{ Generates a quadrature scheme (an object of class \code{"quad"}) on a linear network. } \usage{ linequad(X, Y, \dots, eps = NULL, nd = 1000, random = FALSE) } \arguments{ \item{X}{ Data points. An object of class \code{"lpp"} or \code{"ppp"}. } \item{Y}{ Line segments on which the points of \code{X} lie. An object of class \code{"psp"}. Required only when \code{X} is a \code{"ppp"} object. } \item{\dots}{ Ignored. } \item{eps}{ Optional. Spacing between successive dummy points along each segment. (This is the maximum spacing; some spacings will be shorter.) } \item{nd}{ Optional. Total number of dummy points to be generated. (Actual number may be larger.) } \item{random}{ Logical value indicating whether the sequence of dummy points should start at a randomly-chosen position along each segment. } } \details{ This command generates a quadrature scheme (object of class \code{"quad"}) from a pattern of points on a linear network. Normally the user does not need to call \code{linequad} explicitly. It is invoked by \pkg{spatstat} functions when needed. A quadrature scheme is required by \code{\link{lppm}} in order to fit point process models to point pattern data on a linear network. A quadrature scheme is also used by \code{\link{rhohat.lpp}} and other functions. In order to create the quadrature scheme, dummy points are placed along each line segment of the network. The dummy points are evenly-spaced with spacing \code{eps}. The default is \code{eps = totlen/nd} where \code{totlen} is the total length of all line segments in the network. Every line segment of the network will contain at least one dummy point. Consequently the actual number of dummy points generated will typically be greater than \code{nd}, especially when \code{nd} is small. If \code{eps} is specified, the number of dummy points will be greater than \code{totlen/eps}, especially when \code{eps} is large. } \value{ A quadrature scheme (object of class \code{"quad"}). } \author{ \adrian, Greg McSwiggan and Suman Rakshit. } \seealso{ \code{\link{lppm}} } \keyword{datagen} \keyword{spatial} spatstat/man/formula.ppm.Rd0000644000176200001440000000326213333543263015427 0ustar liggesusers\name{formula.ppm} \alias{formula.ppm} \alias{terms.ppm} \title{ Model Formulae for Gibbs Point Process Models } \description{ Extract the trend formula, or the terms in the trend formula, in a fitted Gibbs point process model. } \usage{ \method{formula}{ppm}(x, \dots) \method{terms}{ppm}(x, \dots) } \arguments{ \item{x}{ An object of class \code{"ppm"}, representing a fitted point process model. } \item{\dots}{ Arguments passed to other methods. } } \details{ These functions are methods for the generic commands \code{\link{formula}} and \code{\link{terms}} for the class \code{"ppm"}. An object of class \code{"ppm"} represents a fitted Poisson or Gibbs point process model. It is obtained from the model-fitting function \code{\link{ppm}}. The method \code{formula.ppm} extracts the trend formula from the fitted model \code{x} (the formula originally specified as the argument \code{trend} to \code{\link{ppm}}). The method \code{terms.ppm} extracts the individual terms in the trend formula. } \value{ See the help files for the corresponding generic functions. } \author{ \adrian } \seealso{ \code{\link{ppm}}, \code{\link{as.owin}}, \code{\link{coef.ppm}}, \code{\link{extractAIC.ppm}}, \code{\link{fitted.ppm}}, \code{\link{logLik.ppm}}, \code{\link{model.frame.ppm}}, \code{\link{model.matrix.ppm}}, \code{\link{plot.ppm}}, \code{\link{predict.ppm}}, \code{\link{residuals.ppm}}, \code{\link{simulate.ppm}}, \code{\link{summary.ppm}}, \code{\link{update.ppm}}, \code{\link{vcov.ppm}}. } \examples{ data(cells) fit <- ppm(cells, ~x) formula(fit) terms(fit) } \keyword{spatial} \keyword{methods} spatstat/man/stieltjes.Rd0000644000176200001440000000434313333543264015177 0ustar liggesusers\name{stieltjes} \alias{stieltjes} \title{Compute Integral of Function Against Cumulative Distribution} \description{ Computes the Stieltjes integral of a function \eqn{f} with respect to a function \eqn{M}. } \usage{ stieltjes(f, M, ...) } \arguments{ \item{f}{ The integrand. A function in the \R language. } \item{M}{ The cumulative function against which \code{f} will be integrated. An object of class \code{"fv"} or \code{"stepfun"}. } \item{\dots}{ Additional arguments passed to \code{f}. } } \details{ This command computes the Stieltjes integral \deqn{I = \int f(x) dM(x)}{I = integral f(x) dM(x)} of a real-valued function \eqn{f(x)} with respect to a nondecreasing function \eqn{M(x)}. One common use of the Stieltjes integral is to find the mean value of a random variable from its cumulative distribution function \eqn{F(x)}. The mean value is the Stieltjes integral of \eqn{f(x)=x} with respect to \eqn{F(x)}. The argument \code{f} should be a \code{function} in the \R language. It should accept a numeric vector argument \code{x} and should return a numeric vector of the same length. The argument \code{M} should be either a step function (object of class \code{"stepfun"}) or a function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). Objects of class \code{"stepfun"} are returned by \code{\link[stats]{ecdf}}, \code{\link{ewcdf}}, \code{\link{spatialcdf}} and other utilities. Objects of class \code{"fv"} are returned by the commands \code{\link{Kest}}, \code{\link{Gest}}, etc. } \value{ A list containing the value of the Stieltjes integral computed using each of the versions of the function \code{M}. } \seealso{ \code{\link{fv.object}}, \code{\link{Gest}} } \examples{ # estimate cdf of nearest neighbour distance in redwood data G <- Gest(redwood) # compute estimate of mean nearest neighbour distance stieltjes(function(x){x}, G) # estimated probability of a distance in the interval [0.1,0.2] stieltjes(function(x,a,b){ (x >= a) & (x <= b)}, G, a=0.1, b=0.2) # stepfun example H <- spatialcdf(bei.extra$elev, normalise=TRUE) stieltjes(function(x){x}, H) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat/man/valid.detpointprocfamily.Rd0000644000176200001440000000121413333543264020175 0ustar liggesusers\name{valid.detpointprocfamily} \alias{valid.detpointprocfamily} \title{Check Validity of a Determinantal Point Process Model} \description{ Checks the validity of a determinantal point process model. } \usage{ \method{valid}{detpointprocfamily}(object, \dots) } \arguments{ \item{object}{Model of class \code{"detpointprocfamily"}.} \item{\dots}{Ignored.} } \value{Logical} \author{ \adrian \rolf and \ege } \examples{ model1 <- dppMatern(lambda=100, alpha=.01, nu=1, d=2) valid(model1) model2 <- dppMatern(lambda=100, alpha=1, nu=1, d=2) valid(model2) } \seealso{ \code{\link{valid}} } \keyword{spatial} \keyword{models} spatstat/man/as.matrix.im.Rd0000644000176200001440000000232513333543262015477 0ustar liggesusers\name{as.matrix.im} \alias{as.matrix.im} \alias{as.array.im} \title{Convert Pixel Image to Matrix or Array} \description{ Converts a pixel image to a matrix or an array. } \usage{ \method{as.matrix}{im}(x, ...) \method{as.array}{im}(x, ...) } \arguments{ \item{x}{A pixel image (object of class \code{"im"}).} \item{\dots}{See below.} } \details{ The function \code{as.matrix.im} converts the pixel image \code{x} into a matrix containing the pixel values. It is handy when you want to extract a summary of the pixel values. See the Examples. The function \code{as.array.im} converts the pixel image to an array. By default this is a three-dimensional array of dimension \eqn{n} by \eqn{m} by \eqn{1}. If the extra arguments \code{\dots} are given, they will be passed to \code{\link{array}}, and they may change the dimensions of the array. } \value{ A matrix or array. } \seealso{ \code{\link{as.matrix.owin}} } \examples{ # artificial image Z <- setcov(square(1)) M <- as.matrix(Z) median(M) \dontrun{ # plot the cumulative distribution function of pixel values plot(ecdf(as.matrix(Z))) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/quad.ppm.Rd0000644000176200001440000000625013333543264014715 0ustar liggesusers\name{quad.ppm} \alias{quad.ppm} \title{Extract Quadrature Scheme Used to Fit a Point Process Model} \description{ Given a fitted point process model, this function extracts the quadrature scheme used to fit the model. } \usage{ quad.ppm(object, drop=FALSE, clip=FALSE) } \arguments{ \item{object}{ fitted point process model (an object of class \code{"ppm"} or \code{"kppm"} or \code{"lppm"}). } \item{drop}{ Logical value determining whether to delete quadrature points that were not used to fit the model. } \item{clip}{ Logical value determining whether to erode the window, if \code{object} was fitted using the border correction. See Details. } } \value{ A quadrature scheme (object of class \code{"quad"}). } \details{ An object of class \code{"ppm"} represents a point process model that has been fitted to data. It is typically produced by the model-fitting algorithm \code{\link{ppm}}. The maximum pseudolikelihood algorithm in \code{\link{ppm}} approximates the pseudolikelihood integral by a sum over a finite set of quadrature points, which is constructed by augmenting the original data point pattern by a set of ``dummy'' points. The fitted model object returned by \code{\link{ppm}} contains complete information about this quadrature scheme. See \code{\link{ppm}} or \code{\link{ppm.object}} for further information. This function \code{quad.ppm} extracts the quadrature scheme. A typical use of this function would be to inspect the quadrature scheme (points and weights) to gauge the accuracy of the approximation to the exact pseudolikelihood. Some quadrature points may not have been used in fitting the model. This happens if the border correction is used, and in other cases (e.g. when the value of a covariate is \code{NA} at these points). The argument \code{drop} specifies whether these unused quadrature points shall be deleted (\code{drop=TRUE}) or retained (\code{drop=FALSE}) in the return value. The quadrature scheme has a \emph{window}, which by default is set to equal the window of the original data. However this window may be larger than the actual domain of integration of the pseudolikelihood or composite likelihood that was used to fit the model. If \code{clip=TRUE} then the window of the quadrature scheme is set to the actual domain of integration. This option only has an effect when the model was fitted using the border correction; then the window is obtained by eroding the original data window by the border correction distance. See \code{\link{ppm.object}} for a list of all operations that can be performed on objects of class \code{"ppm"}. See \code{\link{quad.object}} for a list of all operations that can be performed on objects of class \code{"quad"}. This function can also be applied to objects of class \code{"kppm"} and \code{"lppm"}. } \seealso{ \code{\link{ppm.object}}, \code{\link{quad.object}}, \code{\link{ppm}} } \examples{ fit <- ppm(cells ~1, Strauss(r=0.1)) Q <- quad.ppm(fit) \dontrun{plot(Q)} npoints(Q$data) npoints(Q$dummy) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{models} spatstat/man/exactMPLEstrauss.Rd0000644000176200001440000001034013604315233016364 0ustar liggesusers\name{exactMPLEstrauss} \alias{exactMPLEstrauss} \title{ Exact Maximum Pseudolikelihood Estimate for Stationary Strauss Process } \description{ Computes, to very high accuracy, the Maximum Pseudolikelihood Estimates of the parameters of a stationary Strauss point process. } \usage{ exactMPLEstrauss(X, R, ngrid = 2048, plotit = FALSE, project=TRUE) } \arguments{ \item{X}{ Data to which the Strauss process will be fitted. A point pattern dataset (object of class \code{"ppp"}). } \item{R}{ Interaction radius of the Strauss process. A non-negative number. } \item{ngrid}{ Grid size for calculation of integrals. An integer, giving the number of grid points in the \eqn{x} and \eqn{y} directions. } \item{plotit}{ Logical. If \code{TRUE}, the log pseudolikelihood is plotted on the current device. } \item{project}{ Logical. If \code{TRUE} (the default), the parameter \eqn{\gamma}{gamma} is constrained to lie in the interval \eqn{[0,1]}. If \code{FALSE}, this constraint is not applied. } } \details{ This function is intended mainly for technical investigation of algorithm performance. Its practical use is quite limited. It fits the stationary Strauss point process model to the point pattern dataset \code{X} by maximum pseudolikelihood (with the border edge correction) using an algorithm with very high accuracy. This algorithm is more accurate than the \emph{default} behaviour of the model-fitting function \code{\link{ppm}} because the discretisation is much finer. Ripley (1988) and Baddeley and Turner (2000) derived the log pseudolikelihood for the stationary Strauss process, and eliminated the parameter \eqn{\beta}{beta}, obtaining an exact formula for the partial log pseudolikelihood as a function of the interaction parameter \eqn{\gamma}{gamma} only. The algorithm evaluates this expression to a high degree of accuracy, using numerical integration on a \code{ngrid * ngrid} lattice, uses \code{\link[stats]{optim}} to maximise the log pseudolikelihood with respect to \eqn{\gamma}{gamma}, and finally recovers \eqn{\beta}{beta}. The result is a vector of length 2, containing the fitted coefficients \eqn{\log\beta}{log(beta)} and \eqn{\log\gamma}{log(gamma)}. These values correspond to the entries that would be obtained with \code{coef(ppm(X, ~1, Strauss(R)))}. The fitted coefficients are typically accurate to within \eqn{10^{-6}}{10^(-6)} as shown in Baddeley and Turner (2013). Note however that (by default) \code{exactMPLEstrauss} constrains the parameter \eqn{\gamma}{gamma} to lie in the interval \eqn{[0,1]} in which the point process is well defined (Kelly and Ripley, 1976) whereas \code{\link{ppm}} does not constrain the value of \eqn{\gamma}{gamma} (by default). This behaviour is controlled by the argument \code{project} to \code{\link{ppm}} and \code{exactMPLEstrauss}. The default for \code{\link{ppm}} is \code{project=FALSE}, while the default for \code{exactMPLEstrauss} is \code{project=TRUE}. } \value{ Vector of length 2. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Baddeley, A. and Turner, R. (2013) Bias correction for parameter estimates of spatial point process models. \emph{Journal of Statistical Computation and Simulation} \bold{2012}. {doi: 10.1080/00949655.2012.755976} Kelly, F.P. and Ripley, B.D. (1976) On Strauss's model for clustering. \emph{Biometrika} \bold{63}, 357--360. Ripley, B.D. (1988) \emph{Statistical inference for spatial processes}. Cambridge University Press. } \author{ \adrian and \rolf } \seealso{ \code{\link{ppm}} } \examples{ if(interactive()) { rc <- 0.09 exactMPLEstrauss(cells, rc, plotit=TRUE) coef(ppm(cells ~1, Strauss(rc))) coef(ppm(cells ~1, Strauss(rc), nd=128)) rr <- 0.04 exactMPLEstrauss(redwood, rr) exactMPLEstrauss(redwood, rr, project=FALSE) coef(ppm(redwood ~1, Strauss(rr))) } else { rc <- 0.09 exactMPLEstrauss(cells, rc, ngrid=64, plotit=TRUE) exactMPLEstrauss(cells, rc, ngrid=64, project=FALSE) } } \keyword{spatial} \keyword{models} spatstat/man/residuals.mppm.Rd0000644000176200001440000000476213571674202016142 0ustar liggesusers\name{residuals.mppm} \alias{residuals.mppm} \title{Residuals for Point Process Model Fitted to Multiple Point Patterns} \description{ Given a point process model fitted to multiple point patterns, compute residuals for each pattern. } \usage{ \method{residuals}{mppm}(object, type = "raw", ..., fittedvalues = fitted.mppm(object)) } \arguments{ \item{object}{Fitted point process model (object of class \code{"mppm"}).} \item{\dots}{Ignored.} \item{type}{Type of residuals: either \code{"raw"}, \code{"pearson"} or \code{"inverse"}. Partially matched.} \item{fittedvalues}{Advanced use only. Fitted values of the model to be used in the calculation. } } \details{ Baddeley et al (2005) defined residuals for the fit of a point process model to spatial point pattern data. For an explanation of these residuals, see the help file for \code{\link{residuals.ppm}}. This function computes the residuals for a point process model fitted to \emph{multiple} point patterns. The \code{object} should be an object of class \code{"mppm"} obtained from \code{\link{mppm}}. The return value is a list. The number of entries in the list equals the number of point patterns in the original data. Each entry in the list has the same format as the output of \code{\link{residuals.ppm}}. That is, each entry in the list is a signed measure (object of class \code{"msr"}) giving the residual measure for the corresponding point pattern. } \value{ A list of signed measures (objects of class \code{"msr"}) giving the residual measure for each of the original point patterns. See Details. } \examples{ fit <- mppm(Bugs ~ x, hyperframe(Bugs=waterstriders)) r <- residuals(fit) # compute total residual for each point pattern rtot <- sapply(r, integral.msr) # standardise the total residuals areas <- sapply(windows.mppm(fit), area.owin) rtot/sqrt(areas) } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \adrian \rolf and \ege } \seealso{ \code{\link{mppm}}, \code{\link{residuals.mppm}} } \keyword{spatial} \keyword{models} spatstat/man/will.expand.Rd0000644000176200001440000000171413333543265015416 0ustar liggesusers\name{will.expand} \alias{will.expand} \title{ Test Expansion Rule } \description{ Determines whether an expansion rule will actually expand the window or not. } \usage{ will.expand(x) } \arguments{ \item{x}{ Expansion rule. An object of class \code{"rmhexpand"}. } } \details{ An object of class \code{"rmhexpand"} describes a rule for expanding a simulation window. See \code{\link{rmhexpand}} for details. One possible expansion rule is to do nothing, i.e. not to expand the window. This command inspects the expansion rule \code{x} and determines whether it will or will not actually expand the window. It returns \code{TRUE} if the window will be expanded. } \value{ Logical value. } \author{\adrian and \rolf } \seealso{ \code{\link{rmhexpand}}, \code{\link{expand.owin}} } \examples{ x <- rmhexpand(distance=0.2) y <- rmhexpand(area=1) will.expand(x) will.expand(y) } \keyword{spatial} \keyword{manip} spatstat/man/is.marked.ppm.Rd0000644000176200001440000000443413333543263015641 0ustar liggesusers\name{is.marked.ppm} \alias{is.marked.ppm} \alias{is.marked.lppm} \title{Test Whether A Point Process Model is Marked} \description{ Tests whether a fitted point process model involves ``marks'' attached to the points. } \usage{ \method{is.marked}{ppm}(X, \dots) \method{is.marked}{lppm}(X, \dots) } \arguments{ \item{X}{ Fitted point process model (object of class \code{"ppm"}) usually obtained from \code{\link{ppm}}. Alternatively, a model of class \code{"lppm"}. } \item{\dots}{ Ignored. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is a model that was fitted to a marked point pattern dataset. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link[spatstat.data]{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link[spatstat.data]{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. The argument \code{X} is a fitted point process model (an object of class \code{"ppm"}) typically obtained by fitting a model to point pattern data using \code{\link{ppm}}. This function returns \code{TRUE} if the \emph{original data} (to which the model \code{X} was fitted) were a marked point pattern. Note that this is not the same as testing whether the model involves terms that depend on the marks (i.e. whether the fitted model ignores the marks in the data). Currently we have not implemented a test for this. If this function returns \code{TRUE}, the implications are (for example) that any simulation of this model will require simulation of random marks as well as random point locations. } \seealso{ \code{\link{is.marked}}, \code{\link{is.marked.ppp}} } \examples{ X <- lansing # Multitype point pattern --- trees marked by species \testonly{ # Smaller dataset X <- amacrine } fit1 <- ppm(X, ~ marks, Poisson()) is.marked(fit1) # TRUE fit2 <- ppm(X, ~ 1, Poisson()) is.marked(fit2) # TRUE # Unmarked point pattern fit3 <- ppm(cells, ~ 1, Poisson()) is.marked(fit3) # FALSE } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{models} spatstat/man/owin.Rd0000644000176200001440000001616513333543263014151 0ustar liggesusers\name{owin} \alias{owin} \title{Create a Window} \description{ Creates an object of class \code{"owin"} representing an observation window in the two-dimensional plane } \usage{ owin(xrange=c(0,1), yrange=c(0,1), ..., poly=NULL, mask=NULL, unitname=NULL, xy=NULL) } \arguments{ \item{xrange}{\eqn{x} coordinate limits of enclosing box} \item{yrange}{\eqn{y} coordinate limits of enclosing box} \item{\dots}{Ignored.} \item{poly}{ Optional. Polygonal boundary of window. Incompatible with \code{mask}. } \item{mask}{ Optional. Logical matrix giving binary image of window. Incompatible with \code{poly}. } \item{unitname}{ Optional. Name of unit of length. Either a single character string, or a vector of two character strings giving the singular and plural forms, respectively. } \item{xy}{ Optional. List with components \code{x} and \code{y} specifying the pixel coordinates for \code{mask}. } } \value{ An object of class \code{"owin"} describing a window in the two-dimensional plane. } \details{ In the \pkg{spatstat} library, a point pattern dataset must include information about the window of observation. This is represented by an object of class \code{"owin"}. See \code{\link{owin.object}} for an overview. To create a window in its own right, users would normally invoke \code{owin}, although sometimes \code{\link{as.owin}} may be convenient. A window may be rectangular, polygonal, or a mask (a binary image). \itemize{ \item \bold{rectangular windows:} If only \code{xrange} and \code{yrange} are given, then the window will be rectangular, with its \eqn{x} and \eqn{y} coordinate dimensions given by these two arguments (which must be vectors of length 2). If no arguments are given at all, the default is the unit square with dimensions \code{xrange=c(0,1)} and \code{yrange=c(0,1)}. \item \bold{polygonal windows:} If \code{poly} is given, then the window will be polygonal. \itemize{ \item \emph{single polygon:} If \code{poly} is a matrix or data frame with two columns, or a structure with two component vectors \code{x} and \code{y} of equal length, then these values are interpreted as the cartesian coordinates of the vertices of a polygon circumscribing the window. The vertices must be listed \emph{anticlockwise}. No vertex should be repeated (i.e. do not repeat the first vertex). \item \emph{multiple polygons or holes:} If \code{poly} is a list, each entry \code{poly[[i]]} of which is a matrix or data frame with two columns or a structure with two component vectors \code{x} and \code{y} of equal length, then the successive list members \code{poly[[i]]} are interpreted as separate polygons which together make up the boundary of the window. The vertices of each polygon must be listed \emph{anticlockwise} if the polygon is part of the external boundary, but \emph{clockwise} if the polygon is the boundary of a hole in the window. Again, do not repeat any vertex. } \item \bold{binary masks:} If \code{mask} is given, then the window will be a binary image. \itemize{ \item \emph{Specified by logical matrix:} Normally the argument \code{mask} should be a logical matrix such that \code{mask[i,j]} is \code{TRUE} if the point \code{(x[j],y[i])} belongs to the window, and \code{FALSE} if it does not. Note carefully that rows of \code{mask} correspond to the \eqn{y} coordinate, and columns to the \eqn{x} coordinate. Here \code{x} and \code{y} are vectors of \eqn{x} and \eqn{y} coordinates equally spaced over \code{xrange} and \code{yrange} respectively. The pixel coordinate vectors \code{x} and \code{y} may be specified explicitly using the argument \code{xy}, which should be a list containing components \code{x} and \code{y}. Alternatively there is a sensible default. \item \emph{Specified by list of pixel coordinates:} Alternatively the argument \code{mask} can be a data frame with 2 or 3 columns. If it has 2 columns, it is expected to contain the spatial coordinates of all the pixels which are inside the window. If it has 3 columns, it should contain the spatial coordinates \eqn{(x,y)} of every pixel in the grid, and the logical value associated with each pixel. The pixels may be listed in any order. } } To create a window which is mathematically defined by inequalities in the Cartesian coordinates, use \code{\link{raster.x}()} and \code{\link{raster.y}()} as in the examples below. Functions \code{\link{square}} and \code{\link{disc}} will create square and circular windows, respectively. } \section{Validity of polygon data}{ Polygon data may contain geometrical inconsistencies such as self-intersections and overlaps. These inconsistencies must be removed to prevent problems in other \pkg{spatstat} functions. By default, polygon data will be repaired automatically using polygon-clipping code. The repair process may change the number of vertices in a polygon and the number of polygon components. To disable the repair process, set \code{spatstat.options(fixpolygons=FALSE)}. } \seealso{ \code{\link{owin.object}}, \code{\link{as.owin}}, \code{\link{complement.owin}}, \code{\link{ppp.object}}, \code{\link{ppp}} \code{\link{square}}, \code{\link{hexagon}}, \code{\link{regularpolygon}}, \code{\link{disc}}, \code{\link{ellipse}}. } \examples{ w <- owin() w <- owin(c(0,1), c(0,1)) # the unit square w <- owin(c(10,20), c(10,30), unitname=c("foot","feet")) # a rectangle of dimensions 10 x 20 feet # with lower left corner at (10,10) # polygon (diamond shape) w <- owin(poly=list(x=c(0.5,1,0.5,0),y=c(0,1,2,1))) w <- owin(c(0,1), c(0,2), poly=list(x=c(0.5,1,0.5,0),y=c(0,1,2,1))) # polygon with hole ho <- owin(poly=list(list(x=c(0,1,1,0), y=c(0,0,1,1)), list(x=c(0.6,0.4,0.4,0.6), y=c(0.2,0.2,0.4,0.4)))) w <- owin(c(-1,1), c(-1,1), mask=matrix(TRUE, 100,100)) # 100 x 100 image, all TRUE X <- raster.x(w) Y <- raster.y(w) wm <- owin(w$xrange, w$yrange, mask=(X^2 + Y^2 <= 1)) # discrete approximation to the unit disc \dontrun{ if(FALSE) { plot(c(0,1),c(0,1),type="n") bdry <- locator() # click the vertices of a polygon (anticlockwise) } } \testonly{ bdry <- list(x=c(0.1,0.3,0.7,0.4,0.2), y=c(0.1,0.1,0.5,0.7,0.3)) } w <- owin(poly=bdry) \dontrun{plot(w)} \dontrun{ im <- as.logical(matrix(scan("myfile"), nrow=128, ncol=128)) # read in an arbitrary 128 x 128 digital image from text file rim <- im[, 128:1] # Assuming it was given in row-major order in the file # i.e. scanning left-to-right in rows from top-to-bottom, # the use of matrix() has effectively transposed rows & columns, # so to convert it to our format just reverse the column order. w <- owin(mask=rim) plot(w) # display it to check! } } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/lixellate.Rd0000644000176200001440000000467613333543263015164 0ustar liggesusers\name{lixellate} \alias{lixellate} \title{ Subdivide Segments of a Network } \description{ Each line segment of a linear network will be divided into several shorter segments (line elements or lixels). } \usage{ lixellate(X, \dots, nsplit, eps, sparse = TRUE) } \arguments{ \item{X}{ A linear network (object of class \code{"linnet"}) or a point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Ignored. } \item{nsplit}{ Number of pieces into which \emph{each} line segment of \code{X} should be divided. Either a single integer, or an integer vector with one entry for each line segment in \code{X}. Incompatible with \code{eps}. } \item{eps}{ Maximum length of the resulting pieces of line segment. A single numeric value. Incompatible with \code{nsplit}. } \item{sparse}{ Optional. Logical value specifying whether the resulting linear network should be represented using a sparse matrix. If \code{sparse=NULL}, then the representation will be the same as in \code{X}. } } \details{ Each line segment in \code{X} will be subdivided into equal pieces. The result is an object of the same kind as \code{X}, representing the same data as \code{X} except that the segments have been subdivided. Splitting is controlled by the arguments \code{nsplit} and \code{eps}, exactly one of which should be given. If \code{nsplit} is given, it specifies the number of pieces into which \emph{each} line segment of \code{X} should be divided. It should be either a single integer, or an integer vector of length equal to the number of line segments in \code{X}. If \code{eps} is given, it specifies the maximum length of any resulting piece of line segment. It is strongly advisable to use \code{sparse=TRUE} (the default) to limit the computation time. If \code{X} is a point pattern (class \code{"lpp"}) then the spatial coordinates and marks of each data point are unchanged, but the local coordinates will change, because they are adjusted to map them to the new subdivided network. } \value{ Object of the same kind as \code{X}. } \author{ Greg McSwiggan, \spatstatAuthors. } \seealso{ \code{\link{linnet}}, \code{\link{lpp}}. } \examples{ A <- lixellate(simplenet, nsplit=4) plot(A, main="lixellate(simplenet, nsplit=4)") points(vertices(A), pch=16) spiders lixellate(spiders, nsplit=3) } \keyword{spatial} \keyword{manip} spatstat/man/psib.Rd0000644000176200001440000000305513377650213014126 0ustar liggesusers\name{psib} \alias{psib} \alias{psib.kppm} \title{ Sibling Probability of Cluster Point Process } \description{ Computes the sibling probability of a cluster point process model. } \usage{ psib(object) \method{psib}{kppm}(object) } \arguments{ \item{object}{ Fitted cluster point process model (object of class \code{"kppm"}). } } \details{ In a Poisson cluster process, two points are called \emph{siblings} if they belong to the same cluster, that is, if they had the same parent point. If two points of the process are separated by a distance \eqn{r}, the probability that they are siblings is \eqn{p(r) = 1 - 1/g(r)} where \eqn{g} is the pair correlation function of the process. The value \eqn{p(0) = 1 - 1/g(0)} is the probability that, if two points of the process are situated very close to each other, they came from the same cluster. This probability is an index of the strength of clustering, with high values suggesting strong clustering. This concept was proposed in Baddeley, Rubak and Turner (2015, page 479) and Baddeley (2017). } \value{ A single number. } \references{ Baddeley, A. (2017) Local composite likelihood for spatial point processes. \emph{Spatial Statistics} \bold{22}, 261--295. Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. } \author{ \adrian. } \seealso{ \code{\link[spatstat]{kppm}} } \examples{ fit <- kppm(redwood ~1, "Thomas") psib(fit) } \keyword{spatial} \keyword{models} spatstat/man/pcfdot.Rd0000644000176200001440000001400413333543264014443 0ustar liggesusers\name{pcfdot} \alias{pcfdot} \title{Multitype pair correlation function (i-to-any)} \description{ Calculates an estimate of the multitype pair correlation function (from points of type \code{i} to points of any type) for a multitype point pattern. } \usage{ pcfdot(X, i, ..., r = NULL, kernel = "epanechnikov", bw = NULL, stoyan = 0.15, correction = c("isotropic", "Ripley", "translate"), divisor = c("r", "d")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the dot-type pair correlation function \eqn{g_{i\bullet}(r)}{gdot[i](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{\dots}{ Ignored. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{stoyan}{ Coefficient for default bandwidth rule; see Details. } \item{correction}{ Choice of edge correction. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See Details. } } \details{ This is a generalisation of the pair correlation function \code{\link{pcf}} to multitype point patterns. For two locations \eqn{x} and \eqn{y} separated by a nonzero distance \eqn{r}, the probability \eqn{p(r)} of finding a point of type \eqn{i} at location \eqn{x} and a point of any type at location \eqn{y} is \deqn{ p(r) = \lambda_i \lambda g_{i\bullet}(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda[i] * lambda * gdot[i](r) dx dy } where \eqn{\lambda}{lambda} is the intensity of all points, and \eqn{\lambda_i}{lambda[i]} is the intensity of the points of type \eqn{i}. For a completely random Poisson marked point process, \eqn{p(r) = \lambda_i \lambda}{p(r) = lambda[i] * lambda} so \eqn{g_{i\bullet}(r) = 1}{gdot[i](r) = 1}. For a stationary multitype point process, the type-\code{i}-to-any-type pair correlation function between marks \eqn{i} and \eqn{j} is formally defined as \deqn{ g_{i\bullet}(r) = \frac{K_{i\bullet}^\prime(r)}{2\pi r} }{ g(r) = Kdot[i]'(r)/ ( 2 * pi * r) } where \eqn{K_{i\bullet}^\prime}{Kdot[i]'(r)} is the derivative of the type-\code{i}-to-any-type \eqn{K} function \eqn{K_{i\bullet}(r)}{Kdot[i](r)}. of the point process. See \code{\link{Kdot}} for information about \eqn{K_{i\bullet}(r)}{Kdot[i](r)}. The command \code{pcfdot} computes a kernel estimate of the multitype pair correlation function from points of type \eqn{i} to points of any type. \itemize{ \item If \code{divisor="r"} (the default), then the multitype counterpart of the standard kernel estimator (Stoyan and Stoyan, 1994, pages 284--285) is used. By default, the recommendations of Stoyan and Stoyan (1994) are followed exactly. \item If \code{divisor="d"} then a modified estimator is used: the contribution from an interpoint distance \eqn{d_{ij}}{d[ij]} to the estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]} instead of dividing by \eqn{r}. This usually improves the bias of the estimator when \eqn{r} is close to zero. } There is also a choice of spatial edge corrections (which are needed to avoid bias due to edge effects associated with the boundary of the spatial window): \code{correction="translate"} is the Ohser-Stoyan translation correction, and \code{correction="isotropic"} or \code{"Ripley"} is Ripley's isotropic correction. The choice of smoothing kernel is controlled by the argument \code{kernel} which is passed to \code{\link{density}}. The default is the Epanechnikov kernel. The bandwidth of the smoothing kernel can be controlled by the argument \code{bw}. Its precise interpretation is explained in the documentation for \code{\link{density.default}}. For the Epanechnikov kernel with support \eqn{[-h,h]}, the argument \code{bw} is equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}. If \code{bw} is not specified, the default bandwidth is determined by Stoyan's rule of thumb (Stoyan and Stoyan, 1994, page 285). That is, \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)}, where \eqn{\lambda}{lambda} is the (estimated) intensity of the unmarked point process, and \eqn{c} is a constant in the range from 0.1 to 0.2. The argument \code{stoyan} determines the value of \eqn{c}. The companion function \code{\link{pcfcross}} computes the corresponding analogue of \code{\link{Kcross}}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{g_{i\bullet}}{gdot[i]} has been estimated } \item{theo}{the theoretical value \eqn{g_{i\bullet}(r) = 1}{gdot[i](r) = r} for independent marks. } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{g_{i,j}}{g[i,j]} obtained by the edge corrections named. } \seealso{ Mark connection function \code{\link{markconnect}}. Multitype pair correlation \code{\link{pcfcross}}, \code{\link{pcfmulti}}. Pair correlation \code{\link{pcf}},\code{\link{pcf.ppp}}. \code{\link{Kdot}} } \examples{ data(amacrine) p <- pcfdot(amacrine, "on") p <- pcfdot(amacrine, "on", stoyan=0.1) plot(p) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/lgcp.estpcf.Rd0000644000176200001440000002031713571674202015401 0ustar liggesusers\name{lgcp.estpcf} \alias{lgcp.estpcf} \title{Fit a Log-Gaussian Cox Point Process by Minimum Contrast} \description{ Fits a log-Gaussian Cox point process model to a point pattern dataset by the Method of Minimum Contrast using the pair correlation function. } \usage{ lgcp.estpcf(X, startpar=c(var=1,scale=1), covmodel=list(model="exponential"), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs=list()) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the log-Gaussian Cox process model. } \item{covmodel}{ Specification of the covariance model for the log-Gaussian field. See Details. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits a log-Gaussian Cox point process (LGCP) model to a point pattern dataset by the Method of Minimum Contrast, using the estimated pair correlation function of the point pattern. The shape of the covariance of the LGCP must be specified: the default is the exponential covariance function, but other covariance models can be selected. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits a log-Gaussian Cox point process (LGCP) model to \code{X}, by finding the parameters of the LGCP model which give the closest match between the theoretical pair correlation function of the LGCP model and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The model fitted is a stationary, isotropic log-Gaussian Cox process (\Moller and Waagepetersen, 2003, pp. 72-76). To define this process we start with a stationary Gaussian random field \eqn{Z} in the two-dimensional plane, with constant mean \eqn{\mu}{mu} and covariance function \eqn{C(r)}. Given \eqn{Z}, we generate a Poisson point process \eqn{Y} with intensity function \eqn{\lambda(u) = \exp(Z(u))}{lambda(u) = exp(Z(u))} at location \eqn{u}. Then \eqn{Y} is a log-Gaussian Cox process. The theoretical pair correlation function of the LGCP is \deqn{ g(r) = \exp(C(s)) }{ g(r) = exp(C(s)) } The intensity of the LGCP is \deqn{ \lambda = \exp(\mu + \frac{C(0)}{2}). }{ lambda= exp(mu + C(0)/2). } The covariance function \eqn{C(r)} takes the form \deqn{ C(r) = \sigma^2 c(r/\alpha) }{ C(r) = sigma^2 * c(-r/alpha) } where \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} are parameters controlling the strength and the scale of autocorrelation, respectively, and \eqn{c(r)} is a known covariance function determining the shape of the covariance. The strength and scale parameters \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} will be estimated by the algorithm. The template covariance function \eqn{c(r)} must be specified as explained below. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha^2}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. The template covariance function \eqn{c(r)} is specified using the argument \code{covmodel}. This should be of the form \code{list(model="modelname", \dots)} where \code{modelname} is a string identifying the template model as explained below, and \code{\dots} are optional arguments of the form \code{tag=value} giving the values of parameters controlling the \emph{shape} of the template model. The default is the exponential covariance \eqn{c(r) = e^{-r}}{c(r) = e^(-r)} so that the scaled covariance is \deqn{ C(r) = \sigma^2 e^{-r/\alpha}. }{ C(r) = sigma^2 * exp(-r/alpha). } To determine the template model, the string \code{"modelname"} will be prefixed by \code{"RM"} and the code will search for a function of this name in the \pkg{RandomFields} package. For a list of available models see \code{\link[RandomFields]{RMmodel}} in the \pkg{RandomFields} package. For example the \Matern covariance with exponent \eqn{\nu=0.3}{nu = 0.3} is specified by \code{covmodel=list(model="matern", nu=0.3)} corresponding to the function \code{RMmatern} in the \pkg{RandomFields} package. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ \Moller, J., Syversveen, A. and Waagepetersen, R. (1998) Log Gaussian Cox Processes. \emph{Scandinavian Journal of Statistics} \bold{25}, 451--482. \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ \adrian with modifications by Shen Guochun and Rasmus Waagepetersen \email{rw@math.auc.dk} and \ege. } \seealso{ \code{\link{lgcp.estK}} for alternative method of fitting LGCP. \code{\link{matclust.estpcf}}, \code{\link{thomas.estpcf}} for other models. \code{\link{mincontrast}} for the generic minimum contrast fitting algorithm, including important parameters that affect the accuracy of the fit. \code{\link[RandomFields]{RMmodel}} in the \pkg{RandomFields} package, for covariance function models. \code{\link{pcf}} for the pair correlation function. } \examples{ data(redwood) u <- lgcp.estpcf(redwood, c(var=1, scale=0.1)) u plot(u) if(require(RandomFields)) { lgcp.estpcf(redwood, covmodel=list(model="matern", nu=0.3)) } } \keyword{spatial} \keyword{models} spatstat/man/rgbim.Rd0000644000176200001440000000461313333543264014271 0ustar liggesusers\name{rgbim} \alias{rgbim} \alias{hsvim} \title{Create Colour-Valued Pixel Image} \description{ Creates an object of class \code{"im"} representing a two-dimensional pixel image whose pixel values are colours. } \usage{ rgbim(R, G, B, A, maxColorValue=255, autoscale=FALSE) hsvim(H, S, V, A, autoscale=FALSE) } \arguments{ \item{R,G,B}{ Pixel images (objects of class \code{"im"}) or constants giving the red, green, and blue components of a colour, respectively. } \item{A}{ Optional. Pixel image or constant value giving the alpha (transparency) component of a colour. } \item{maxColorValue}{ Maximum colour channel value for \code{R,G,B,A}. } \item{H,S,V}{ Pixel images (objects of class \code{"im"}) or constants giving the hue, saturation, and value components of a colour, respectively. } \item{autoscale}{ Logical. If \code{TRUE}, input values are automatically rescaled to fit the permitted range. RGB values are scaled to lie between 0 and \code{maxColorValue}. HSV values are scaled to lie between 0 and 1. } } \details{ These functions take three pixel images, with real or integer pixel values, and create a single pixel image whose pixel values are colours recognisable to \R. Some of the arguments may be constant numeric values, but at least one of the arguments must be a pixel image. The image arguments should be compatible (in array dimension and in spatial position). \code{rgbim} calls \code{\link{rgb}} to compute the colours, while \code{hsvim} calls \code{\link{hsv}}. See the help for the relevant function for more information about the meaning of the colour channels. } \seealso{ \code{\link{im.object}}, \code{\link{rgb}}, \code{\link{hsv}}. See \code{\link[spatstat:colourtools]{colourtools}} for additional colour tools. } \examples{ \testonly{ op <- spatstat.options(npixel=32) } # create three images with values in [0,1] X <- setcov(owin()) X <- eval.im(pmin(1,X)) M <- Window(X) Y <- as.im(function(x,y){(x+1)/2}, W=M) Z <- as.im(function(x,y){(y+1)/2}, W=M) # convert RGB <- rgbim(X, Y, Z, maxColorValue=1) HSV <- hsvim(X, Y, Z) opa <- par(mfrow=c(1,2)) plot(RGB, valuesAreColours=TRUE) plot(HSV, valuesAreColours=TRUE) par(opa) \testonly{ spatstat.options(op) } } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} \keyword{datagen} spatstat/man/methods.rho2hat.Rd0000644000176200001440000000412413333543263016176 0ustar liggesusers\name{methods.rho2hat} \alias{methods.rho2hat} %DoNotExport \alias{predict.rho2hat} \alias{print.rho2hat} \alias{plot.rho2hat} \title{ Methods for Intensity Functions of Two Spatial Covariates } \description{ These are methods for the class \code{"rho2hat"}. } \usage{ \method{plot}{rho2hat}(x, \dots, do.points=FALSE) \method{print}{rho2hat}(x, \dots) \method{predict}{rho2hat}(object, \dots, relative=FALSE) } \arguments{ \item{x,object}{ An object of class \code{"rho2hat"}. } \item{\dots}{ Arguments passed to other methods. } \item{do.points}{ Logical value indicating whether to plot the observed values of the covariates at the data points. } \item{relative}{ Logical value indicating whether to compute the estimated point process intensity (\code{relative=FALSE}) or the relative risk (\code{relative=TRUE}) in the case of a relative risk estimate. } } \details{ These functions are methods for the generic commands \code{\link{print}}, \code{\link{predict}} and \code{\link{plot}} for the class \code{"rho2hat"}. An object of class \code{"rho2hat"} is an estimate of the intensity of a point process, as a function of two given spatial covariates. See \code{\link{rho2hat}}. The method \code{plot.rho2hat} displays the estimated function \eqn{\rho}{rho} using \code{\link{plot.fv}}, and optionally adds a \code{\link{rug}} plot of the observed values of the covariate. In this plot the two axes represent possible values of the two covariates. The method \code{predict.rho2hat} computes a pixel image of the intensity \eqn{\rho(Z_1(u), Z_2(u))}{rho(Z1(u), Z2(u))} at each spatial location \eqn{u}, where \eqn{Z_1(u)}{Z1(u)} and \eqn{Z_2(u)}{Z2(u)} are the two spatial covariates. } \value{ For \code{predict.rho2hat} the value is a pixel image (object of class \code{"im"}). For other functions, the value is \code{NULL}. } \author{ \adrian } \seealso{ \code{\link{rho2hat}} } \examples{ r2 <- with(bei.extra, rho2hat(bei, elev, grad)) r2 plot(r2) plot(predict(r2)) } \keyword{spatial} \keyword{methods} spatstat/man/dppapproxkernel.Rd0000644000176200001440000000120613333543263016401 0ustar liggesusers\name{dppapproxkernel} \alias{dppapproxkernel} \title{Approximate Determinantal Point Process Kernel} \description{ Returns an approximation to the kernel of a determinantal point process, as a function of one argument \eqn{x}. } \usage{dppapproxkernel(model, trunc = 0.99, W = NULL)} \arguments{ \item{model}{Object of class \code{"detpointprocfamily"}.} \item{trunc}{Numeric specifying how the model truncation is performed. See Details section of \code{\link{simulate.detpointprocfamily}}. } \item{W}{Optional window -- undocumented at the moment.} } \value{A function} \author{ \adrian \rolf and \ege } spatstat/man/crossdist.Rd0000644000176200001440000000252413333543263015204 0ustar liggesusers\name{crossdist} \alias{crossdist} \title{Pairwise distances} \description{ Computes the distances between pairs of `things' taken from two different datasets. } \usage{ crossdist(X, Y, \dots) } \arguments{ \item{X,Y}{ Two objects of the same class. } \item{\dots}{ Additional arguments depending on the method. } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th thing in the first dataset to the \code{j}-th thing in the second dataset. } \details{ Given two datasets \code{X} and \code{Y} (representing either two point patterns or two line segment patterns) \code{crossdist} computes the Euclidean distance from each thing in the first dataset to each thing in the second dataset, and returns a matrix containing these distances. The function \code{crossdist} is generic, with methods for point patterns (objects of class \code{"ppp"}), line segment patterns (objects of class \code{"psp"}), and a default method. See the documentation for \code{\link{crossdist.ppp}}, \code{\link{crossdist.psp}} or \code{\link{crossdist.default}} for further details. } \seealso{ \code{\link{crossdist.ppp}}, \code{\link{crossdist.psp}}, \code{\link{crossdist.default}}, \code{\link{pairdist}}, \code{\link{nndist}} } \author{ \adrian } \keyword{spatial} \keyword{math} spatstat/man/as.boxx.Rd0000644000176200001440000000217413333543262014551 0ustar liggesusers\name{as.boxx} \alias{as.boxx} \title{Convert Data to Multi-Dimensional Box} \description{Interprets data as the dimensions of a multi-dimensional box.} \usage{ as.boxx(\dots, warn.owin = TRUE) } \arguments{ \item{\dots}{ Data that can be interpreted as giving the dimensions of a multi-dimensional box. See Details. } \item{warn.owin}{ Logical value indicating whether to print a warning if a non-rectangular window (object of class \code{"owin"}) is supplied. } } \details{ Either a single argument should be provided which is one of the following: \itemize{ \item an object of class \code{"boxx"} \item an object of class \code{"box3"} \item an object of class \code{"owin"} \item a numeric vector of even length, specifying the corners of the box. See Examples } or a list of arguments acceptable to \code{\link{boxx}}. } \value{A \code{"boxx"} object.} \author{ \adrian \rolf and \ege } \examples{ # Convert unit square to two dimensional box. W <- owin() as.boxx(W) # Make three dimensional box [0,1]x[0,1]x[0,1] from numeric vector as.boxx(c(0,1,0,1,0,1)) } spatstat/man/Kmodel.dppm.Rd0000644000176200001440000000200313333543262015330 0ustar liggesusers\name{Kmodel.dppm} \alias{Kmodel.detpointprocfamily} \alias{pcfmodel.detpointprocfamily} \alias{Kmodel.dppm} \alias{pcfmodel.dppm} \title{ K-function or Pair Correlation Function of a Determinantal Point Process Model } \description{Returns the theoretical \eqn{K}-function or theoretical pair correlation function of a determinantal point process model as a function of one argument \eqn{r}. } \usage{ \method{Kmodel}{dppm}(model, \dots) \method{pcfmodel}{dppm}(model, \dots) \method{Kmodel}{detpointprocfamily}(model, \dots) \method{pcfmodel}{detpointprocfamily}(model, \dots) } \arguments{ \item{model}{Model of class \code{"detpointprocfamily"} or \code{"dppm"}.} \item{\dots}{Ignored (not quite true -- there is some undocumented internal use)} } \author{ \adrian \rolf and \ege } \examples{ model <- dppMatern(lambda=100, alpha=.01, nu=1, d=2) KMatern <- Kmodel(model) pcfMatern <- pcfmodel(model) plot(KMatern, xlim = c(0,0.05)) plot(pcfMatern, xlim = c(0,0.05)) } spatstat/man/rlpp.Rd0000644000176200001440000000450413333543264014145 0ustar liggesusers\name{rlpp} \alias{rlpp} \title{ Random Points on a Linear Network } \description{ Generates \eqn{n} independent random points on a linear network with a specified probability density. } \usage{ rlpp(n, f, \dots, nsim=1, drop=TRUE) } \arguments{ \item{n}{ Number of random points to generate. A nonnegative integer giving the number of points, or an integer vector giving the numbers of points of each type. } \item{f}{ Probability density (not necessarily normalised). A pixel image on a linear network (object of class \code{"linim"}) or a function on a linear network (object of class \code{"linfun"}). Alternatively, \code{f} can be a list of functions or pixel images, giving the densities of points of each type. } \item{\dots}{ Additional arguments passed to \code{f} if it is a function or a list of functions. } \item{nsim}{Number of simulated realisations to generate.} \item{drop}{ Logical value indicating what to do when \code{nsim=1}. If \code{drop=TRUE} (the default), the result is a point pattern. If \code{drop=FALSE}, the result is a list with one entry which is a point pattern. } } \details{ The linear network \code{L}, on which the points will be generated, is determined by the argument \code{f}. If \code{f} is a function, it is converted to a pixel image on the linear network, using any additional function arguments \code{\dots}. If \code{n} is a single integer and \code{f} is a function or pixel image, then independent random points are generated on \code{L} with probability density proportional to \code{f}. If \code{n} is an integer vector and \code{f} is a list of functions or pixel images, where \code{n} and \code{f} have the same length, then independent random points of several types are generated on \code{L}, with \code{n[i]} points of type \code{i} having probability density proportional to \code{f[[i]]}. } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern on the linear network, i.e.\ an object of class \code{"lpp"}. Otherwise, a list of such point patterns. } \author{ \adrian } \seealso{ \code{\link{runiflpp}} } \examples{ g <- function(x, y, seg, tp) { exp(x + 3*y) } f <- linfun(g, simplenet) rlpp(20, f) plot(rlpp(20, f, nsim=3)) } \keyword{spatial} \keyword{datagen} spatstat/man/plot.psp.Rd0000644000176200001440000001262013557702266014754 0ustar liggesusers\name{plot.psp} \alias{plot.psp} \title{plot a Spatial Line Segment Pattern} \description{ Plot a two-dimensional line segment pattern } \usage{ \method{plot}{psp}(x, \dots, main, add=FALSE, show.all=!add, show.window=show.all, which.marks=1, style=c("colour", "width", "none"), col=NULL, ribbon=show.all, ribsep=0.15, ribwid=0.05, ribn=1024, do.plot=TRUE) } \arguments{ \item{x}{ The line segment pattern to be plotted. An object of class \code{"psp"}, or data which can be converted into this format by \code{\link{as.psp}()}. } \item{\dots}{ extra arguments that will be passed to the plotting functions \code{\link{segments}} (to plot the segments) and \code{\link{plot.owin}} (to plot the observation window). } \item{main}{ Character string giving a title for the plot. } \item{add}{ Logical. If \code{TRUE}, the current plot is not erased; the segments are plotted on top of the current plot, and the window is not plotted (by default). } \item{show.all}{ Logical value specifying whether to plot everything including the window, main title, and colour ribbon. } \item{show.window}{ Logical value specifying whether to plot the window. } \item{which.marks}{ Index determining which column of marks to use, if the marks of \code{x} are a data frame. A character string or an integer. Defaults to \code{1} indicating the first column of marks. } \item{style}{ Character string specifying how to represent the mark value of each segment. If \code{style="colour"} (the default) segments are coloured according to their mark value. If \code{style="width"}, segments are drawn with a width proportional to their mark value. If \code{style="none"} the mark values are ignored. } \item{col}{ Colour information. If \code{style="width"} or \code{style="none"}, then \code{col} should be a single value, interpretable as a colour; the line segments will be plotted using this colour. If \code{style="colour"} and \code{x} has marks, then the mark values will be mapped to colours using the information in \code{col}, which should be a colour map (object of class \code{"colourmap"}) or a vector of colour values. } \item{ribbon}{ Logical flag indicating whether to display a ribbon showing the colour map (in which mark values are associated with colours) when \code{style="colour"}. } \item{ribsep}{ Factor controlling the space between the ribbon and the image. } \item{ribwid}{ Factor controlling the width of the ribbon. } \item{ribn}{ Number of different values to display in the ribbon. } \item{do.plot}{ Logical value indicating whether to actually perform the plot. } } \value{ (Invisibly) a colour map object specifying the association between marks and colours, if any. The return value also has an attribute \code{"bbox"} giving a bounding box for the plot. } \details{ This is the \code{plot} method for line segment pattern datasets (of class \code{"psp"}, see \code{\link{psp.object}}). It plots both the observation window \code{Window(x)} and the line segments themselves. Plotting of the window \code{Window(x)} is performed by \code{\link{plot.owin}}. This plot may be modified through the \code{...} arguments. Plotting of the segments themselves is performed by the standard R function \code{\link{segments}}. Its plotting behaviour may also be modified through the \code{...} arguments. If the segments do not have marks (i.e. if \code{marks(x) = NULL}) then There are three different styles of plotting which apply when the segments have marks (i.e. when \code{marks(x)} is not null): \describe{ \item{\code{style="colour"} (the default):}{ Segments are plotted with different colours depending on their mark values. The colour map, associating mark values with colours, is determined by the argument \code{col}. The colour map will be displayed as a vertical colour ribbon to the right of the plot, if \code{ribbon=TRUE} (the default). } \item{\code{style="width"}:}{ Segments are plotted with different widths depending on their mark values. The width map, associating mark values with line widths, is determined by \code{leg.scale} (see \code{\link{plot.linim}}). The width map will be displayed as a vertical stack of lines to the right of the plot, if \code{legend=TRUE} (the default). } \item{\code{style="none"}:}{ Mark information is ignored. } } If \code{marks(x)} is a data frame, the default is to use the first column of \code{marks(x)} to determine the colours. To specify another column, use the argument \code{which.marks}. } \seealso{ \code{\link{psp.object}}, \code{\link{plot}}, \code{\link{par}}, \code{\link{plot.owin}}, \code{\link{text.psp}}, \code{\link{symbols}} } \examples{ X <- psp(runif(20), runif(20), runif(20), runif(20), window=owin()) plot(X) plot(X, lwd=3) lettuce <- sample(letters[1:4], 20, replace=TRUE) marks(X) <- data.frame(A=1:20, B=factor(lettuce)) plot(X) plot(X, which.marks="B") plot(X, style="width", col="grey") } \author{ \adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/Kest.Rd0000644000176200001440000003166513333543262014104 0ustar liggesusers\name{Kest} \alias{Kest} \title{K-function} \description{ Estimates Ripley's reduced second moment function \eqn{K(r)} from a point pattern in a window of arbitrary shape. } \usage{ Kest(X, \dots, r=NULL, rmax=NULL, breaks=NULL, correction=c("border", "isotropic", "Ripley", "translate"), nlarge=3000, domain=NULL, var.approx=FALSE, ratio=FALSE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{K(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{Ignored.} \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{K(r)} should be evaluated. Users are advised \emph{not} to specify this argument; there is a sensible default. If necessary, specify \code{rmax}. } \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. A character vector containing any selection of the options \code{"none"}, \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"rigid"}, \code{"none"}, \code{"good"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{nlarge}{ Optional. Efficiency threshold. If the number of points exceeds \code{nlarge}, then only the border correction will be computed (by default), using a fast algorithm. } \item{domain}{ Optional. Calculations will be restricted to this subset of the window. See Details. } \item{var.approx}{Logical. If \code{TRUE}, the approximate variance of \eqn{\hat K(r)}{Kest(r)} under CSR will also be computed. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K(r)} obtained by the edge corrections named. If \code{var.approx=TRUE} then the return value also has columns \code{rip} and \code{ls} containing approximations to the variance of \eqn{\hat K(r)}{Kest(r)} under CSR. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K(r)}. } \details{ The \eqn{K} function (variously called ``Ripley's K-function'' and the ``reduced second moment function'') of a stationary point process \eqn{X} is defined so that \eqn{\lambda K(r)}{lambda K(r)} equals the expected number of additional random points within a distance \eqn{r} of a typical random point of \eqn{X}. Here \eqn{\lambda}{lambda} is the intensity of the process, i.e. the expected number of points of \eqn{X} per unit area. The \eqn{K} function is determined by the second order moment properties of \eqn{X}. An estimate of \eqn{K} derived from a spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern (Cressie, 1991; Diggle, 1983; Ripley, 1977, 1988). In exploratory analyses, the estimate of \eqn{K} is a useful statistic summarising aspects of inter-point ``dependence'' and ``clustering''. For inferential purposes, the estimate of \eqn{K} is usually compared to the true value of \eqn{K} for a completely random (Poisson) point process, which is \eqn{K(r) = \pi r^2}{K(r) = pi * r^2}. Deviations between the empirical and theoretical \eqn{K} curves may suggest spatial clustering or spatial regularity. This routine \code{Kest} estimates the \eqn{K} function of a stationary point process, given observation of the process inside a known, bounded window. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link{as.ppp}()}. The estimation of \eqn{K} is hampered by edge effects arising from the unobservability of points of the random pattern outside the window. An edge correction is needed to reduce bias (Baddeley, 1998; Ripley, 1988). The corrections implemented here are \describe{ \item{border}{the border method or ``reduced sample'' estimator (see Ripley, 1988). This is the least efficient (statistically) and the fastest to compute. It can be computed for a window of arbitrary shape. } \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is implemented for rectangular and polygonal windows (not for binary masks). } \item{translate/translation}{Translation correction (Ohser, 1983). Implemented for all window geometries, but slow for complex windows. } \item{rigid}{Rigid motion correction (Ohser and Stoyan, 1981). Implemented for all window geometries, but slow for complex windows. } \item{none}{ Uncorrected estimate. An estimate of the K function \emph{without} edge correction. (i.e. setting \eqn{e_{ij} = 1}{e[i,j] = 1} in the equation below. This estimate is \bold{biased} and should not be used for data analysis, \emph{unless} you have an extremely large point pattern (more than 100,000 points). } \item{best}{ Selects the best edge correction that is available for the geometry of the window. Currently this is Ripley's isotropic correction for a rectangular or polygonal window, and the translation correction for masks. } \item{good}{ Selects the best edge correction that can be computed in a reasonable time. This is the same as \code{"best"} for datasets with fewer than 3000 points; otherwise the selected edge correction is \code{"border"}, unless there are more than 100,000 points, when it is \code{"none"}. } } The estimates of \eqn{K(r)} are of the form \deqn{ \hat K(r) = \frac a {n (n-1) } \sum_i \sum_j I(d_{ij}\le r) e_{ij} }{ Kest(r) = (a/(n * (n-1))) * sum[i,j] I(d[i,j] <= r) e[i,j]) } where \eqn{a} is the area of the window, \eqn{n} is the number of data points, and the sum is taken over all ordered pairs of points \eqn{i} and \eqn{j} in \code{X}. Here \eqn{d_{ij}}{d[i,j]} is the distance between the two points, and \eqn{I(d_{ij} \le r)}{I(d[i,j] <= r)} is the indicator that equals 1 if the distance is less than or equal to \eqn{r}. The term \eqn{e_{ij}}{e[i,j]} is the edge correction weight (which depends on the choice of edge correction listed above). Note that this estimator assumes the process is stationary (spatially homogeneous). For inhomogeneous point patterns, see \code{\link{Kinhom}}. If the point pattern \code{X} contains more than about 3000 points, the isotropic and translation edge corrections can be computationally prohibitive. The computations for the border method are much faster, and are statistically efficient when there are large numbers of points. Accordingly, if the number of points in \code{X} exceeds the threshold \code{nlarge}, then only the border correction will be computed. Setting \code{nlarge=Inf} or \code{correction="best"} will prevent this from happening. Setting \code{nlarge=0} is equivalent to selecting only the border correction with \code{correction="border"}. If \code{X} contains more than about 100,000 points, even the border correction is time-consuming. You may want to consider setting \code{correction="none"} in this case. There is an even faster algorithm for the uncorrected estimate. Approximations to the variance of \eqn{\hat K(r)}{Kest(r)} are available, for the case of the isotropic edge correction estimator, \bold{assuming complete spatial randomness} (Ripley, 1988; Lotwick and Silverman, 1982; Diggle, 2003, pp 51-53). If \code{var.approx=TRUE}, then the result of \code{Kest} also has a column named \code{rip} giving values of Ripley's (1988) approximation to \eqn{\mbox{var}(\hat K(r))}{var(Kest(r))}, and (if the window is a rectangle) a column named \code{ls} giving values of Lotwick and Silverman's (1982) approximation. If the argument \code{domain} is given, the calculations will be restricted to a subset of the data. In the formula for \eqn{K(r)} above, the \emph{first} point \eqn{i} will be restricted to lie inside \code{domain}. The result is an approximately unbiased estimate of \eqn{K(r)} based on pairs of points in which the first point lies inside \code{domain} and the second point is unrestricted. This is useful in bootstrap techniques. The argument \code{domain} should be a window (object of class \code{"owin"}) or something acceptable to \code{\link{as.owin}}. It must be a subset of the window of the point pattern \code{X}. The estimator \code{Kest} ignores marks. Its counterparts for multitype point patterns are \code{\link{Kcross}}, \code{\link{Kdot}}, and for general marked point patterns see \code{\link{Kmulti}}. Some writers, particularly Stoyan (1994, 1995) advocate the use of the ``pair correlation function'' \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}. See \code{\link{pcf}} on how to estimate this function. } \section{Envelopes, significance bands and confidence intervals}{ To compute simulation envelopes for the \eqn{K}-function under CSR, use \code{\link{envelope}}. To compute a confidence interval for the true \eqn{K}-function, use \code{\link{varblock}} or \code{\link{lohboot}}. } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37--78. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. Ohser, J. and Stoyan, D. (1981) On the second-order and orientation analysis of planar stationary point processes. \emph{Biometrical Journal} \bold{23}, 523--533. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \section{Warnings}{ The estimator of \eqn{K(r)} is approximately unbiased for each fixed \eqn{r}. Bias increases with \eqn{r} and depends on the window geometry. For a rectangular window it is prudent to restrict the \eqn{r} values to a maximum of \eqn{1/4} of the smaller side length of the rectangle. Bias may become appreciable for point patterns consisting of fewer than 15 points. While \eqn{K(r)} is always a non-decreasing function, the estimator of \eqn{K} is not guaranteed to be non-decreasing. This is rarely a problem in practice. } \seealso{ \code{\link{localK}} to extract individual summands in the \eqn{K} function. \code{\link{pcf}} for the pair correlation. \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} for alternative summary functions. \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kinhom}}, \code{\link{Kmulti}} for counterparts of the \eqn{K} function for multitype point patterns. \code{\link{reduced.sample}} for the calculation of reduced sample estimators. } \examples{ X <- runifpoint(50) K <- Kest(X) K <- Kest(cells, correction="isotropic") plot(K) plot(K, main="K function for cells") # plot the L function plot(K, sqrt(iso/pi) ~ r) plot(K, sqrt(./pi) ~ r, ylab="L(r)", main="L function for cells") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/update.kppm.Rd0000644000176200001440000000450213333543264015416 0ustar liggesusers\name{update.kppm} \alias{update.kppm} \title{Update a Fitted Cluster Point Process Model} \description{ \code{update} method for class \code{"kppm"}. } \usage{ \method{update}{kppm}(object, \dots, evaluate=TRUE) } \arguments{ \item{object}{ Fitted cluster point process model. An object of class \code{"kppm"}, obtained from \code{\link{kppm}}. } \item{\dots}{ Arguments passed to \code{\link{kppm}}. } \item{evaluate}{ Logical value indicating whether to return the updated fitted model (\code{evaluate=TRUE}, the default) or just the updated call to \code{kppm} (\code{evaluate=FALSE}). } } \details{ \code{object} should be a fitted cluster point process model, obtained from the model-fitting function \code{\link{kppm}}. The model will be updated according to the new arguments provided. If the argument \code{trend} is provided, it determines the intensity in the updated model. It should be an \R formula (with or without a left hand side). It may include the symbols \code{+} or \code{-} to specify addition or deletion of terms in the current model formula, as shown in the Examples below. The symbol \code{.} refers to the current contents of the formula. The intensity in the updated model is determined by the argument \code{trend} if it is provided, or otherwise by any unnamed argument that is a formula, or otherwise by the formula of the original model, \code{formula(object)}. The spatial point pattern data to which the new model is fitted is determined by the left hand side of the updated model formula, if this is present. Otherwise it is determined by the argument \code{X} if it is provided, or otherwise by any unnamed argument that is a point pattern or a quadrature scheme. The model is refitted using \code{\link{kppm}}. } \value{ Another fitted cluster point process model (object of class \code{"kppm"}. } \seealso{ \code{\link{kppm}}, \code{\link{plot.kppm}}, \code{\link{predict.kppm}}, \code{\link{simulate.kppm}}, \code{\link{methods.kppm}}, \code{\link{vcov.kppm}} } \examples{ fit <- kppm(redwood ~1, "Thomas") fitx <- update(fit, ~ . + x) fitM <- update(fit, clusters="MatClust") fitC <- update(fit, cells) fitCx <- update(fit, cells ~ x) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/Extract.quad.Rd0000644000176200001440000000221413333543263015526 0ustar liggesusers\name{Extract.quad} \alias{[.quad} \title{Subset of Quadrature Scheme} \description{ Extract a subset of a quadrature scheme. } \usage{ \method{[}{quad}(x, ...) } \arguments{ \item{x}{ A quadrature scheme (object of class \code{"quad"}). } \item{\dots}{ Arguments passed to \code{\link{[.ppp}} to determine the subset. } } \value{ A quadrature scheme (object of class \code{"quad"}). } \details{ This function extracts a designated subset of a quadrature scheme. The function \code{[.quad} is a method for \code{\link{[}} for the class \code{"quad"}. It extracts a designated subset of a quadrature scheme. The subset to be extracted is determined by the arguments \code{\dots} which are interpreted by \code{\link{[.ppp}}. Thus it is possible to take the subset consisting of all quadrature points that lie inside a given region, or a subset of quadrature points identified by numeric indices. } \seealso{ \code{\link{quad.object}}, \code{\link{[.ppp}}. } \examples{ Q <- quadscheme(nztrees) W <- owin(c(0,148),c(0,95)) # a subwindow Q[W] } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/Jmulti.Rd0000644000176200001440000001364313333543262014436 0ustar liggesusers\name{Jmulti} \alias{Jmulti} \title{ Marked J Function } \description{ For a marked point pattern, estimate the multitype \eqn{J} function summarising dependence between the points in subset \eqn{I} and those in subset \eqn{J}. } \usage{ Jmulti(X, I, J, eps=NULL, r=NULL, breaks=NULL, \dots, disjoint=NULL, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype distance distribution function \eqn{J_{IJ}(r)}{J[IJ](r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset of points of \code{X} from which distances are measured. See Details. } \item{J}{Subset of points in \code{X} to which distances are measured. See Details. } \item{eps}{A positive number. The pixel resolution of the discrete approximation to Euclidean distance (see \code{\link{Jest}}). There is a sensible default. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{J_{IJ}(r)}{J[IJ](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{disjoint}{Optional flag indicating whether the subsets \code{I} and \code{J} are disjoint. If missing, this value will be computed by inspecting the vectors \code{I} and \code{J}. } \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{J_{IJ}(r)}{J[IJ](r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{J_{IJ}(r)}{J[IJ](r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{J_{IJ}(r)}{J[IJ](r)} } \item{han}{the Hanisch-style estimator of \eqn{J_{IJ}(r)}{J[IJ](r)} } \item{un}{the uncorrected estimate of \eqn{J_{IJ}(r)}{J[IJ](r)}, formed by taking the ratio of uncorrected empirical estimators of \eqn{1 - G_{IJ}(r)}{1 - G[IJ](r)} and \eqn{1 - F_{J}(r)}{1 - F[J](r)}, see \code{\link{Gdot}} and \code{\link{Fest}}. } \item{theo}{the theoretical value of \eqn{J_{IJ}(r)}{J[IJ](r)} for a marked Poisson process with the same estimated intensity, namely 1. } } \details{ The function \code{Jmulti} generalises \code{\link{Jest}} (for unmarked point patterns) and \code{\link{Jdot}} and \code{\link{Jcross}} (for multitype point patterns) to arbitrary marked point patterns. Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are subsets, possibly overlapping, of a marked point process. Define \deqn{J_{IJ}(r) = \frac{1 - G_{IJ}(r)}{1 - F_J(r)}}{ J[IJ](r) = (1 - G[IJ](r))/(1 - F[J](r))} where \eqn{F_J(r)}{F[J](r)} is the cumulative distribution function of the distance from a fixed location to the nearest point of \eqn{X_J}{X[J]}, and \eqn{G_{IJ}(r)}{GJ(r)} is the distribution function of the distance from a typical point of \eqn{X_I}{X[I]} to the nearest distinct point of \eqn{X_J}{X[J]}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. The arguments \code{I} and \code{J} specify two subsets of the point pattern. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. It is assumed that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Jest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{J_{IJ}(r)}{J[IJ](r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The reduced-sample and Kaplan-Meier estimators are computed from histogram counts. In the case of the Kaplan-Meier estimator this introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \seealso{ \code{\link{Jcross}}, \code{\link{Jdot}}, \code{\link{Jest}} } \examples{ trees <- longleaf # Longleaf Pine data: marks represent diameter \testonly{ trees <- trees[seq(1,npoints(trees), by=50)] } Jm <- Jmulti(trees, marks(trees) <= 15, marks(trees) >= 25) plot(Jm) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/compileK.Rd0000644000176200001440000001017313333543263014731 0ustar liggesusers\name{compileK} \alias{compileK} \alias{compilepcf} \title{ Generic Calculation of K Function and Pair Correlation Function } \description{ Low-level functions which calculate the estimated \eqn{K} function and estimated pair correlation function (or any similar functions) from a matrix of pairwise distances and optional weights. } \usage{ compileK(D, r, weights = NULL, denom = 1, check = TRUE, ratio = FALSE, fname = "K") compilepcf(D, r, weights = NULL, denom = 1, check = TRUE, endcorrect = TRUE, ratio=FALSE, \dots, fname = "g") } \arguments{ \item{D}{ A square matrix giving the distances between all pairs of points. } \item{r}{ An equally spaced, finely spaced sequence of distance values. } \item{weights}{ Optional numerical weights for the pairwise distances. A numeric matrix with the same dimensions as \code{D}. If absent, the weights are taken to equal 1. } \item{denom}{ Denominator for the estimator. A single number, or a numeric vector with the same length as \code{r}. See Details. } \item{check}{ Logical value specifying whether to check that \code{D} is a valid matrix of pairwise distances. } \item{ratio}{ Logical value indicating whether to store ratio information. See Details. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{density.default}} controlling the kernel smoothing. } \item{endcorrect}{ Logical value indicating whether to apply End Correction of the pair correlation estimate at \code{r=0}. } \item{fname}{ Character string giving the name of the function being estimated. } } \details{ These low-level functions construct estimates of the \eqn{K} function or pair correlation function, or any similar functions, given only the matrix of pairwise distances and optional weights associated with these distances. These functions are useful for code development and for teaching, because they perform a common task, and do the housekeeping required to make an object of class \code{"fv"} that represents the estimated function. However, they are not very efficient. \code{compileK} calculates the weighted estimate of the \eqn{K} function, \deqn{ \hat K(r) = (1/v(r)) \sum_i \sum_j 1\{ d_{ij} \le r\} w_{ij} }{ K(r) = (1/v(r)) \sum[i] \sum[j] 1(d[i,j] \le r) w[i,j] } and \code{compilepcf} calculates the weighted estimate of the pair correlation function, \deqn{ \hat g(r) = (1/v(r)) \sum_i \sum_j \kappa( d_{ij} - r ) w_{ij} }{ g(r) = (1/v(r)) \sum[i] \sum[j] \kappa ( d[i,j] - r) w[i,j] } where \eqn{d_{ij}}{d[i,j]} is the distance between spatial points \eqn{i} and \eqn{j}, with corresponding weight \eqn{w_{ij}}{w[i,j]}, and \eqn{v(r)} is a specified denominator. Here \eqn{\kappa}{\kappa} is a fixed-bandwidth smoothing kernel. For a point pattern in two dimensions, the usual denominator \eqn{v(r)} is constant for the \eqn{K} function, and proportional to \eqn{r} for the pair correlation function. See the Examples. The result is an object of class \code{"fv"} representing the estimated function. This object has only one column of function values. Additional columns (such as a column giving the theoretical value) must be added by the user, with the aid of \code{\link{bind.fv}}. If \code{ratio=TRUE}, the result also belongs to class \code{"rat"} and has attributes containing the numerator and denominator of the function estimate. This allows function estimates from several datasets to be pooled using \code{\link{pool}}. } \value{ An object of class \code{"fv"} representing the estimated function. } \author{ \adrian } \seealso{ \code{\link{Kest}}, \code{\link{pcf}} for definitions of the \eqn{K} function and pair correlation function. \code{\link{bind.fv}} to add more columns. } \examples{ X <- japanesepines D <- pairdist(X) Wt <- edge.Ripley(X, D) lambda <- intensity(X) a <- (npoints(X)-1) * lambda r <- seq(0, 0.25, by=0.01) K <- compileK(D=D, r=r, weights=Wt, denom=a) g <- compilepcf(D=D, r=r, weights=Wt, denom= a * 2 * pi * r) } \keyword{spatial} \keyword{nonparametric} spatstat/man/crossing.psp.Rd0000644000176200001440000000425213333543263015617 0ustar liggesusers\name{crossing.psp} \alias{crossing.psp} \title{Crossing Points of Two Line Segment Patterns} \description{ Finds any crossing points between two line segment patterns. } \usage{ crossing.psp(A,B,fatal=TRUE,details=FALSE) } \arguments{ \item{A,B}{ Line segment patterns (objects of class \code{"psp"}). } \item{details}{ Logical value indicating whether to return additional information. See below. } \item{fatal}{ Logical value indicating what to do if the windows of \code{A} and \code{B} do not overlap. See Details. } } \value{ Point pattern (object of class \code{"ppp"}). } \details{ This function finds any crossing points between the line segment patterns \code{A} and \code{B}. A crossing point occurs whenever one of the line segments in \code{A} intersects one of the line segments in \code{B}, at a nonzero angle of intersection. The result is a point pattern consisting of all the intersection points. If \code{details=TRUE}, additional information is computed, specifying where each intersection point came from. The resulting point pattern has a data frame of marks, with columns named \code{iA, jB, tA, tB}. The marks \code{iA} and \code{jB} are the indices of the line segments in \code{A} and \code{B}, respectively, which produced each intersection point. The marks \code{tA} and \code{tB} are numbers between 0 and 1 specifying the position of the intersection point along the original segments. If the windows \code{Window(A)} and \code{Window(B)} do not overlap, then an error will be reported if \code{fatal=TRUE}, while if \code{fatal=FALSE} an error will not occur and the result will be \code{NULL}. } \seealso{ \code{\link{selfcrossing.psp}}, \code{\link{psp.object}}, \code{\link{ppp.object}}. } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) b <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(a, col="green", main="crossing.psp") plot(b, add=TRUE, col="blue") P <- crossing.psp(a,b) plot(P, add=TRUE, col="red") as.data.frame(crossing.psp(a,b,details=TRUE)) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/quantile.ewcdf.Rd0000644000176200001440000000406013333543264016076 0ustar liggesusers\name{quantile.ewcdf} \alias{quantile.ewcdf} \title{ Quantiles of Weighted Empirical Cumulative Distribution Function } \description{ Compute quantiles of a weighted empirical cumulative distribution function. } \usage{ \method{quantile}{ewcdf}(x, probs = seq(0, 1, 0.25), names = TRUE, \dots, normalise = TRUE, type=1) } \arguments{ \item{x}{ A weighted empirical cumulative distribution function (object of class \code{"ewcdf"}, produced by \code{\link{ewcdf}}) for which the quantiles are desired. } \item{probs}{ probabilities for which the quantiles are desired. A numeric vector of values between 0 and 1. } \item{names}{ Logical. If \code{TRUE}, the resulting vector of quantiles is annotated with names corresponding to \code{probs}. } \item{\dots}{ Ignored. } \item{normalise}{ Logical value indicating whether \code{x} should first be normalised so that it ranges between 0 and 1. } \item{type}{ Integer specifying the type of quantile to be calculated, as explained in \code{\link[stats]{quantile.default}}. Only types 1 and 2 are currently implemented. } } \details{ This is a method for the generic \code{\link[stats]{quantile}} function for the class \code{ewcdf} of empirical weighted cumulative distribution functions. The quantile for a probability \code{p} is computed as the right-continuous inverse of the cumulative distribution function \code{x} (assuming \code{type=1}, the default). If \code{normalise=TRUE} (the default), the weighted cumulative function \code{x} is first normalised to have total mass \code{1} so that it can be interpreted as a cumulative probability distribution function. } \value{ Numeric vector of quantiles, of the same length as \code{probs}. } \seealso{ \code{\link{ewcdf}}, \code{\link[stats]{quantile}} } \examples{ z <- rnorm(50) w <- runif(50) Fun <- ewcdf(z, w) quantile(Fun, c(0.95,0.99)) } \author{ \spatstatAuthors and Kevin Ummel. } \keyword{spatial} \keyword{nonparametric} spatstat/man/Extract.splitppp.Rd0000644000176200001440000000252213333543263016451 0ustar liggesusers\name{Extract.splitppp} \alias{[.splitppp} \alias{[<-.splitppp} \title{Extract or Replace Sub-Patterns} \description{ Extract or replace some of the sub-patterns in a split point pattern. } \usage{ \method{[}{splitppp}(x, ...) \method{[}{splitppp}(x, ...) <- value } \arguments{ \item{x}{ An object of class \code{"splitppp"}, representing a point pattern separated into a list of sub-patterns. } \item{\dots}{ Subset index. Any valid subset index in the usual \R sense. } \item{value}{ Replacement value for the subset. A list of point patterns. } } \value{ Another object of class \code{"splitppp"}. } \details{ These are subset methods for the class \code{"splitppp"}. The argument \code{x} should be an object of class \code{"splitppp"}, representing a point pattern that has been separated into a list of sub-patterns. It is created by \code{\link{split.ppp}}. The methods extract or replace a designated subset of the list \code{x}, and return an object of class \code{"splitppp"}. } \seealso{ \code{\link{split.ppp}}, \code{\link{plot.splitppp}}, \code{\link{summary.splitppp}} } \examples{ data(amacrine) # multitype point pattern y <- split(amacrine) y[1] y["off"] y[1] <- list(runifpoint(42, Window(amacrine))) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/matclust.estpcf.Rd0000644000176200001440000001476113571674202016316 0ustar liggesusers\name{matclust.estpcf} \alias{matclust.estpcf} \title{Fit the \Matern Cluster Point Process by Minimum Contrast Using Pair Correlation} \description{ Fits the \Matern Cluster point process to a point pattern dataset by the Method of Minimum Contrast using the pair correlation function. } \usage{ matclust.estpcf(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs=list()) } \arguments{ \item{X}{ Data to which the \Matern Cluster model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the \Matern Cluster process. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits the \Matern Cluster point process model to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits the \Matern Cluster point process to \code{X}, by finding the parameters of the \Matern Cluster model which give the closest match between the theoretical pair correlation function of the \Matern Cluster process and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The \Matern Cluster point process is described in \Moller and Waagepetersen (2003, p. 62). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent are independent and uniformly distributed inside a circle of radius \eqn{R} centred on the parent point, where \eqn{R}{R} is equal to the parameter \code{scale}. The named vector of stating values can use either \code{R} or \code{scale} as the name of the second component, but the latter is recommended for consistency with other cluster models. The theoretical pair correlation function of the \Matern Cluster process is \deqn{ g(r) = 1 + \frac 1 {4\pi R \kappa r} h(\frac{r}{2R}) }{ g(r) = 1 + h(r/(2*R))/(4 * pi * R * kappa * r) } where the radius R is the parameter \code{scale} and \deqn{ h(z) = \frac {16} \pi [ z \mbox{arccos}(z) - z^2 \sqrt{1 - z^2} ] }{ h(z) = (16/pi) * ((z * arccos(z) - z^2 * sqrt(1 - z^2)) } for \eqn{z <= 1}, and \eqn{h(z) = 0} for \eqn{z > 1}. The theoretical intensity of the \Matern Cluster process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\kappa}{kappa} and \eqn{R}{R}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The \Matern Cluster process can be simulated, using \code{\link{rMatClust}}. Homogeneous or inhomogeneous \Matern Cluster models can also be fitted using the function \code{\link{kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ \adrian } \seealso{ \code{\link{kppm}}, \code{\link{matclust.estK}}, \code{\link{thomas.estpcf}}, \code{\link{thomas.estK}}, \code{\link{lgcp.estK}}, \code{\link{mincontrast}}, \code{\link{pcf}}, \code{\link{rMatClust}} to simulate the fitted model. } \examples{ data(redwood) u <- matclust.estpcf(redwood, c(kappa=10, R=0.1)) u plot(u, legendpos="topright") } \keyword{spatial} \keyword{models} spatstat/man/fitin.Rd0000644000176200001440000000472313333543263014303 0ustar liggesusers\name{fitin.ppm} \alias{fitin} \alias{fitin.ppm} \alias{fitin.profilepl} \title{Extract the Interaction from a Fitted Point Process Model} \description{ Given a point process model that has been fitted to point pattern data, this function extracts the interpoint interaction part of the model as a separate object. } \usage{ fitin(object) \method{fitin}{ppm}(object) \method{fitin}{profilepl}(object) } \arguments{ \item{object}{A fitted point process model (object of class \code{"ppm"} or \code{"profilepl"}). } } \details{ An object of class \code{"ppm"} describes a fitted point process model. It contains information about the original data to which the model was fitted, the spatial trend that was fitted, the interpoint interaction that was fitted, and other data. See \code{\link{ppm.object}}) for details of this class. The function \code{fitin} extracts from this model the information about the fitted interpoint interaction only. The information is organised as an object of class \code{"fii"} (fitted interpoint interaction). This object can be printed or plotted. Users may find this a convenient way to plot the fitted interpoint interaction term, as shown in the Examples. For a pairwise interaction, the plot of the fitted interaction shows the pair interaction function (the contribution to the probability density from a pair of points as a function of the distance between them). For a higher-order interaction, the plot shows the strongest interaction (the value most different from 1) that could ever arise at the given distance. The fitted interaction coefficients can also be extracted from this object using \code{\link{coef}}. } \value{ An object of class \code{"fii"} representing the fitted interpoint interaction. This object can be printed and plotted. } \author{ \spatstatAuthors. } \seealso{ Methods for handling fitted interactions: \code{\link{methods.fii}}, \code{\link{reach.fii}}, \code{\link{as.interact.fii}}. Background: \code{\link{ppm}}, \code{\link{ppm.object}}. } \examples{ # unmarked model <- ppm(swedishpines ~1, PairPiece(seq(3,19,by=4))) f <- fitin(model) f plot(f) # extract fitted interaction coefficients coef(f) # multitype # fit the stationary multitype Strauss process to `amacrine' r <- 0.02 * matrix(c(1,2,2,1), nrow=2,ncol=2) model <- ppm(amacrine ~1, MultiStrauss(r)) f <- fitin(model) f plot(f) } \keyword{spatial} \keyword{models} spatstat/man/rectdistmap.Rd0000644000176200001440000000213313333543264015503 0ustar liggesusers\name{rectdistmap} \alias{rectdistmap} \title{ Distance Map Using Rectangular Distance Metric } \description{ Computes the distance map of a spatial region based on the rectangular distance metric. } \usage{ rectdistmap(X, asp = 1, npasses=1, verbose=FALSE) } \arguments{ \item{X}{ A window (object of class \code{"owin"}). } \item{asp}{ Aspect ratio for the metric. See Details. } \item{npasses}{ Experimental. } \item{verbose}{ Logical value indicating whether to print trace information. } } \details{ This function computes the distance map of the spatial region \code{X} using the rectangular distance metric with aspect ratio \code{asp}. This metric is defined so that the set of all points lying at most 1 unit away from the origin (according to the metric) form a rectangle of width 1 and height \code{asp}. } \value{ A pixel image (object of class \code{"im"}). } \author{ \adrian } \seealso{ \code{\link{distmap}} } \examples{ V <- letterR Frame(V) <- grow.rectangle(Frame(V), 0.5) plot(rectdistmap(V)) } \keyword{spatial} \keyword{math} spatstat/man/as.linnet.linim.Rd0000644000176200001440000000316113333543262016166 0ustar liggesusers\name{as.linnet.linim} \alias{as.linnet.lpp} \alias{as.linnet.linim} \alias{as.linnet.linfun} \alias{as.linnet.lintess} \title{ Extract Linear Network from Data on a Linear Network } \description{ Given some kind of data on a linear network, the command \code{as.linnet} extracts the linear network itself. } \usage{ \method{as.linnet}{linim}(X, \dots) \method{as.linnet}{linfun}(X, \dots) \method{as.linnet}{lintess}(X, \dots) \method{as.linnet}{lpp}(X, \dots, fatal=TRUE, sparse) } \arguments{ \item{X}{ Data on a linear network. A point pattern (class \code{"lpp"}), pixel image (class \code{"linim"}), function (class \code{"linfun"}) or tessellation (class \code{"lintess"}) on a linear network. } \item{\dots}{ Ignored. } \item{fatal}{ Logical value indicating whether data in the wrong format should lead to an error (\code{fatal=TRUE}) or a warning (\code{fatal=FALSE}). } \item{sparse}{ Logical value indicating whether to use a sparse matrix representation, as explained in \code{\link{linnet}}. Default is to keep the same representation as in \code{X}. } } \details{ These are methods for the generic \code{\link{as.linnet}} for various classes. The network on which the data are defined is extracted. } \value{ A linear network (object of class \code{"linnet"}). } \author{ \spatstatAuthors. } \seealso{ \code{\link{linnet}}, \code{\link{methods.linnet}}. } \examples{ # make some data xcoord <- linfun(function(x,y,seg,tp) { x }, simplenet) as.linnet(xcoord) X <- as.linim(xcoord) as.linnet(X) } \keyword{spatial} \keyword{manip} spatstat/man/ragsMultiHard.Rd0000644000176200001440000000543213333543264015737 0ustar liggesusers\name{ragsMultiHard} \alias{ragsMultiHard} \title{ Alternating Gibbs Sampler for Multitype Hard Core Process } \description{ Generate a realisation of the multitype hard core point process using the alternating Gibbs sampler. } \usage{ ragsMultiHard(beta, hradii, \dots, types=NULL, bmax = NULL, periodic=FALSE, ncycles = 100) } \arguments{ \item{beta}{ First order trend. A numeric vector, a pixel image, a function, a list of functions, or a list of pixel images. } \item{hradii}{ Matrix of hard core radii between each pair of types. Diagonal entries should be \code{0} or \code{NA}. } \item{types}{ Vector of all possible types for the multitype point pattern. } \item{\dots}{ Arguments passed to \code{\link{rmpoispp}} when generating random points. } \item{bmax}{ Optional upper bound on \code{beta}. } \item{periodic}{ Logical value indicating whether to measure distances in the periodic sense, so that opposite sides of the (rectangular) window are treated as identical. } \item{ncycles}{ Number of cycles of the sampler to be performed. } } \details{ The Alternating Gibbs Sampler for a multitype point process is an iterative simulation procedure. Each step of the sampler updates the pattern of points of a particular type \code{i}, by drawing a realisation from the conditional distribution of points of type \code{i} given the points of all other types. Successive steps of the sampler update the points of type 1, then type 2, type 3, and so on. This is an experimental implementation which currently works only for multitype hard core processes (see \code{\link{MultiHard}}) in which there is no interaction between points of the same type, and for the area-interaction process (see \code{\link{ragsAreaInter}}). The argument \code{beta} gives the first order trend for each possible type of point. It may be a single number, a numeric vector, a \code{function(x,y)}, a pixel image, a list of functions, a \code{function(x,y,m)}, or a list of pixel images. The argument \code{hradii} is the matrix of hard core radii between each pair of possible types of points. Two points of types \code{i} and \code{j} respectively are forbidden to lie closer than a distance \code{hradii[i,j]} apart. The diagonal of this matrix must contain \code{NA} or \code{0} values, indicating that there is no hard core constraint applying between points of the same type. } \value{ A point pattern (object of class \code{"ppp"}). } \author{ \adrian } \seealso{ \code{\link{rags}}, \code{\link{ragsAreaInter}} } \examples{ b <- c(30,20) h <- 0.05 * matrix(c(0,1,1,0), 2, 2) ragsMultiHard(b, h, ncycles=10) ragsMultiHard(b, h, ncycles=5, periodic=TRUE) } \keyword{spatial} \keyword{datagen} spatstat/man/bw.relrisk.Rd0000644000176200001440000000726713544333571015265 0ustar liggesusers\name{bw.relrisk} \alias{bw.relrisk} \title{ Cross Validated Bandwidth Selection for Relative Risk Estimation } \description{ Uses cross-validation to select a smoothing bandwidth for the estimation of relative risk. } \usage{ bw.relrisk(X, method = "likelihood", nh = spatstat.options("n.bandwidth"), hmin=NULL, hmax=NULL, warn=TRUE) } \arguments{ \item{X}{ A multitype point pattern (object of class \code{"ppp"} which has factor valued marks). } \item{method}{ Character string determining the cross-validation method. Current options are \code{"likelihood"}, \code{"leastsquares"} or \code{"weightedleastsquares"}. } \item{nh}{ Number of trial values of smoothing bandwith \code{sigma} to consider. The default is 32. } \item{hmin, hmax}{ Optional. Numeric values. Range of trial values of smoothing bandwith \code{sigma} to consider. There is a sensible default. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the minimum of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function selects an appropriate bandwidth for the nonparametric estimation of relative risk using \code{\link{relrisk}}. Consider the indicators \eqn{y_{ij}}{y[i,j]} which equal \eqn{1} when data point \eqn{x_i}{x[i]} belongs to type \eqn{j}, and equal \eqn{0} otherwise. For a particular value of smoothing bandwidth, let \eqn{\hat p_j(u)}{p*[j](u)} be the estimated probabilities that a point at location \eqn{u} will belong to type \eqn{j}. Then the bandwidth is chosen to minimise either the negative likelihood, the squared error, or the approximately standardised squared error, of the indicators \eqn{y_{ij}}{y[i,j]} relative to the fitted values \eqn{\hat p_j(x_i)}{p*[j](x[i])}. See Diggle (2003) or Baddeley et al (2015). The result is a numerical value giving the selected bandwidth \code{sigma}. The result also belongs to the class \code{"bw.optim"} allowing it to be printed and plotted. The plot shows the cross-validation criterion as a function of bandwidth. The range of values for the smoothing bandwidth \code{sigma} is set by the arguments \code{hmin, hmax}. There is a sensible default, based on multiples of Stoyan's rule of thumb \code{\link{bw.stoyan}}. If the optimal bandwidth is achieved at an endpoint of the interval \code{[hmin, hmax]}, the algorithm will issue a warning (unless \code{warn=FALSE}). If this occurs, then it is probably advisable to expand the interval by changing the arguments \code{hmin, hmax}. Computation time depends on the number \code{nh} of trial values considered, and also on the range \code{[hmin, hmax]} of values considered, because larger values of \code{sigma} require calculations involving more pairs of data points. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{relrisk}}, \code{\link{bw.stoyan}} } \examples{ data(urkiola) \testonly{op <- spatstat.options(n.bandwidth=8)} b <- bw.relrisk(urkiola) b plot(b) b <- bw.relrisk(urkiola, hmax=20) plot(b) \testonly{spatstat.options(op)} } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. Kelsall, J.E. and Diggle, P.J. (1995) Kernel estimation of relative risk. \emph{Bernoulli} \bold{1}, 3--16. } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/markvario.Rd0000644000176200001440000000721413613547037015167 0ustar liggesusers\name{markvario} \alias{markvario} \title{Mark Variogram} \description{ Estimate the mark variogram of a marked point pattern. } \usage{ markvario(X, correction = c("isotropic", "Ripley", "translate"), r = NULL, method = "density", ..., normalise=FALSE) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. It must have marks which are numeric. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}. It specifies the edge correction(s) to be applied. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the mark variogram \eqn{\gamma(r)}{gamma(r)} should be evaluated. There is a sensible default. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Other arguments passed to \code{\link{markcorr}}, or passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{normalise}{If \code{TRUE}, normalise the variogram by dividing it by the estimated mark variance. } } \details{ The mark variogram \eqn{\gamma(r)}{gamma(r)} of a marked point process \eqn{X} is a measure of the dependence between the marks of two points of the process a distance \eqn{r} apart. It is informally defined as \deqn{ \gamma(r) = E[\frac 1 2 (M_1 - M_2)^2] }{ gamma(r) = E[(1/2) * (M1 - M2)^2 ] } where \eqn{E[ ]} denotes expectation and \eqn{M_1,M_2}{M1,M2} are the marks attached to two points of the process a distance \eqn{r} apart. The mark variogram of a marked point process is analogous, but \bold{not equivalent}, to the variogram of a random field in geostatistics. See Waelder and Stoyan (1996). } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the mark variogram \eqn{\gamma(r)}{gamma(r)} has been estimated } \item{theo}{the theoretical value of \eqn{\gamma(r)}{gamma(r)} when the marks attached to different points are independent; equal to the sample variance of the marks } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{\gamma(r)}{gamma(r)} obtained by the edge corrections named. } \references{ Cressie, N.A.C. (1991) \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Mase, S. (1996) The threshold method for estimating annual rainfall. \emph{Annals of the Institute of Statistical Mathematics} \bold{48} (1996) 201-213. Waelder, O. and Stoyan, D. (1996) On variograms in point process statistics. \emph{Biometrical Journal} \bold{38} (1996) 895-905. } \seealso{ Mark correlation function \code{\link{markcorr}} for numeric marks. Mark connection function \code{\link{markconnect}} and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}} for factor-valued marks. } \examples{ # Longleaf Pine data # marks represent tree diameter data(longleaf) # Subset of this large pattern swcorner <- owin(c(0,100),c(0,100)) sub <- longleaf[ , swcorner] # mark correlation function mv <- markvario(sub) plot(mv) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/envelope.envelope.Rd0000644000176200001440000000656113333543263016625 0ustar liggesusers\name{envelope.envelope} \alias{envelope.envelope} \title{ Recompute Envelopes } \description{ Given a simulation envelope (object of class \code{"envelope"}), compute another envelope from the same simulation data using different parameters. } \usage{ \method{envelope}{envelope}(Y, fun = NULL, ..., transform=NULL, global=FALSE, VARIANCE=FALSE) } \arguments{ \item{Y}{ A simulation envelope (object of class \code{"envelope"}). } \item{fun}{ Optional. Summary function to be applied to the simulated point patterns. } \item{\dots,transform,global,VARIANCE}{ Parameters controlling the type of envelope that is re-computed. See \code{\link{envelope}}. } } \details{ This function can be used to re-compute a simulation envelope from previously simulated data, using different parameter settings for the envelope: for example, a different significance level, or a global envelope instead of a pointwise envelope. The function \code{\link{envelope}} is generic. This is the method for the class \code{"envelope"}. The argument \code{Y} should be a simulation envelope (object of class \code{"envelope"}) produced by any of the methods for \code{\link{envelope}}. Additionally, \code{Y} must contain either \itemize{ \item the simulated point patterns that were used to create the original envelope (so \code{Y} should have been created by calling \code{\link{envelope}} with \code{savepatterns=TRUE}); \item the summary functions of the simulated point patterns that were used to create the original envelope (so \code{Y} should have been created by calling \code{\link{envelope}} with \code{savefuns=TRUE}). } If the argument \code{fun} is given, it should be a summary function that can be applied to the simulated point patterns that were used to create \code{Y}. The envelope of the summary function \code{fun} for these point patterns will be computed using the parameters specified in \code{\dots}. If \code{fun} is not given, then: \itemize{ \item If \code{Y} contains the summary functions that were used to compute the original envelope, then the new envelope will be computed from these original summary functions. \item Otherwise, if \code{Y} contains the simulated point patterns. then the \eqn{K} function \code{\link{Kest}} will be applied to each of these simulated point patterns, and the new envelope will be based on the \eqn{K} functions. } The new envelope will be computed using the parameters specified in \code{\dots}. See \code{\link{envelope}} for a full list of envelope parameters. Frequently-used parameters include \code{nrank} and \code{nsim} (to change the number of simulations used and the significance level of the envelope), \code{global} (to change from pointwise to global envelopes) and \code{VARIANCE} (to compute the envelopes from the sample moments instead of the ranks). } \value{ An envelope (object of class \code{"envelope"}. } \seealso{ \code{\link{envelope}} } \examples{ E <- envelope(cells, Kest, nsim=19, savefuns=TRUE, savepatterns=TRUE) E2 <- envelope(E, nrank=2) Eg <- envelope(E, global=TRUE) EG <- envelope(E, Gest) EL <- envelope(E, transform=expression(sqrt(./pi))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat/man/MinkowskiSum.Rd0000644000176200001440000000730413333543262015627 0ustar liggesusers\name{MinkowskiSum} \alias{MinkowskiSum} \alias{\%(+)\%} %DoNotExport %NAMESPACE export("%(+)%") \alias{dilationAny} \title{Minkowski Sum of Windows} \description{ Compute the Minkowski sum of two spatial windows. } \usage{ MinkowskiSum(A, B) A \%(+)\% B dilationAny(A, B) } \arguments{ \item{A,B}{ Windows (objects of class \code{"owin"}), point patterns (objects of class \code{"ppp"}) or line segment patterns (objects of class \code{"psp"}) in any combination. } } \value{ A window (object of class \code{"owin"}) except that if \code{A} is a point pattern, then the result is an object of the same type as \code{B} (and vice versa). } \details{ The operator \code{A \%(+)\% B} and function \code{MinkowskiSum(A,B)} are synonymous: they both compute the Minkowski sum of the windows \code{A} and \code{B}. The function \code{dilationAny} computes the Minkowski dilation \code{A \%(+)\% reflect(B)}. The Minkowski sum of two spatial regions \eqn{A} and \eqn{B} is another region, formed by taking all possible pairs of points, one in \eqn{A} and one in \eqn{B}, and adding them as vectors. The Minkowski Sum \eqn{A \oplus B}{A \%(+)\% B} is the set of all points \eqn{a+b} where \eqn{a} is in \eqn{A} and \eqn{b} is in \eqn{B}. A few common facts about the Minkowski sum are: \itemize{ \item The sum is symmetric: \eqn{A \oplus B = B \oplus A}{A \%(+)\% B = B \%(+)\% A}. \item If \eqn{B} is a single point, then \eqn{A \oplus B}{A \%(+)\% B} is a shifted copy of \eqn{A}. \item If \eqn{A} is a square of side length \eqn{a}, and \eqn{B} is a square of side length \eqn{b}, with sides that are parallel to the coordinate axes, then \eqn{A \oplus B}{A \%(+)\% B} is a square of side length \eqn{a+b}. \item If \eqn{A} and \eqn{B} are discs of radius \eqn{r} and \eqn{s} respectively, then \eqn{A \oplus B}{A \%(+)\% B} is a disc of redius \eqn{r+s}. \item If \eqn{B} is a disc of radius \eqn{r} centred at the origin, then \eqn{A \oplus B}{A \%(+)\% B} is equivalent to the \emph{morphological dilation} of \eqn{A} by distance \eqn{r}. See \code{\link{dilation}}. } The Minkowski dilation is the closely-related region \eqn{A \oplus (-B)}{A \%(+)\% (-B)} where \eqn{(-B)} is the reflection of \eqn{B} through the origin. The Minkowski dilation is the set of all vectors \eqn{z} such that, if \eqn{B} is shifted by \eqn{z}, the resulting set \eqn{B+z} has nonempty intersection with \eqn{A}. The algorithm currently computes the result as a polygonal window using the \pkg{polyclip} library. It will be quite slow if applied to binary mask windows. The arguments \code{A} and \code{B} can also be point patterns or line segment patterns. These are interpreted as spatial regions, the Minkowski sum is computed, and the result is returned as an object of the most appropriate type. The Minkowski sum of two point patterns is another point pattern. The Minkowski sum of a point pattern and a line segment pattern is another line segment pattern. } \seealso{ \code{\link{dilation}}, \code{\link{erosionAny}} } \examples{ B <- square(0.2) RplusB <- letterR \%(+)\% B opa <- par(mfrow=c(1,2)) FR <- grow.rectangle(Frame(letterR), 0.3) plot(FR, main="") plot(letterR, add=TRUE, lwd=2, hatch=TRUE, hatchargs=list(texture=5)) plot(shift(B, vec=c(3.675, 3)), add=TRUE, border="red", lwd=2) plot(FR, main="") plot(letterR, add=TRUE, lwd=2, hatch=TRUE, hatchargs=list(texture=5)) plot(RplusB, add=TRUE, border="blue", lwd=2, hatch=TRUE, hatchargs=list(col="blue")) par(opa) plot(cells \%(+)\% square(0.1)) } \author{ \adrian } \keyword{spatial} \keyword{math} spatstat/man/rmhexpand.Rd0000644000176200001440000001273213333543264015160 0ustar liggesusers\name{rmhexpand} \alias{rmhexpand} \title{ Specify Simulation Window or Expansion Rule } \description{ Specify a spatial domain in which point process simulations will be performed. Alternatively, specify a rule which will be used to determine the simulation window. } \usage{ rmhexpand(x = NULL, ..., area = NULL, length = NULL, distance = NULL) } \arguments{ \item{x}{ Any kind of data determining the simulation window or the expansion rule. A window (object of class \code{"owin"}) specifying the simulation window, a numerical value specifying an expansion factor or expansion distance, a list containing one numerical value, an object of class \code{"rmhexpand"}, or \code{NULL}. } \item{\dots}{ Ignored. } \item{area}{ Area expansion factor. Incompatible with other arguments. } \item{length}{ Length expansion factor. Incompatible with other arguments. } \item{distance}{ Expansion distance (buffer width). Incompatible with other arguments. } } \details{ In the Metropolis-Hastings algorithm \code{\link{rmh}} for simulating spatial point processes, simulations are usually carried out on a spatial domain that is larger than the original window of the point process model, then subsequently clipped to the original window. The command \code{rmhexpand} can be used to specify the simulation window, or to specify a rule which will later be used to determine the simulation window from data. The arguments are all incompatible: at most one of them should be given. If the first argument \code{x} is given, it may be any of the following: \itemize{ \item a window (object of class \code{"owin"}) specifying the simulation window. \item an object of class \code{"rmhexpand"} specifying the expansion rule. \item a single numerical value, without attributes. This will be interpreted as the value of the argument \code{area}. \item either \code{c(area=v)} or \code{list(area=v)}, where \code{v} is a single numeric value. This will be interpreted as the value of the argument \code{area}. \item either \code{c(length=v)} or \code{list(length=v)}, where \code{v} is a single numeric value. This will be interpreted as the value of the argument \code{length}. \item either \code{c(distance=v)} or \code{list(distance=v)}, where \code{v} is a single numeric value. This will be interpreted as the value of the argument \code{distance}. \item \code{NULL}, meaning that the expansion rule is not yet determined. } If one of the arguments \code{area}, \code{length} or \code{distance} is given, then the simulation window is determined from the original data window as follows. \describe{ \item{area}{ The bounding box of the original data window will be extracted, and the simulation window will be a scalar dilation of this rectangle. The argument \code{area} should be a numerical value, greater than or equal to 1. It specifies the area expansion factor, i.e. the ratio of the area of the simulation window to the area of the original point process window's bounding box. } \item{length}{ The bounding box of the original data window will be extracted, and the simulation window will be a scalar dilation of this rectangle. The argument \code{length} should be a numerical value, greater than or equal to 1. It specifies the length expansion factor, i.e. the ratio of the width (height) of the simulation window to the width (height) of the original point process window's bounding box. } \item{distance}{ The argument \code{distance} should be a numerical value, greater than or equal to 0. It specifies the width of a buffer region around the original data window. If the original data window is a rectangle, then this window is extended by a margin of width equal to \code{distance} around all sides of the original rectangle. The result is a rectangle. If the original data window is not a rectangle, then morphological dilation is applied using \code{\link{dilation.owin}} so that a margin or buffer of width equal to \code{distance} is created around all sides of the original window. The result is a non-rectangular window, typically of a different shape. } } } \section{Undetermined expansion}{ If \code{expand=NULL}, this is interpreted to mean that the expansion rule is \dQuote{not yet decided}. Expansion will be decided later, by the simulation algorithm \code{\link{rmh}}. If the model cannot be expanded (for example if the covariate data in the model are not available on a larger domain) then expansion will not occur. If the model can be expanded, then if the point process model has a finite interaction range \code{r}, the default is \code{rmhexpand(distance=2*r)}, and otherwise \code{rmhexpand(area=2)}. } \value{ An object of class \code{"rmhexpand"} specifying the expansion rule. There is a \code{print} method for this class. } \author{\adrian and \rolf } \seealso{ \code{\link{expand.owin}} to apply the rule to a window. \code{\link{will.expand}} to test whether expansion will occur. \code{\link{rmh}}, \code{\link{rmhcontrol}} for background details. } \examples{ rmhexpand() rmhexpand(2) rmhexpand(1) rmhexpand(length=1.5) rmhexpand(distance=0.1) rmhexpand(letterR) } \keyword{spatial} \keyword{datagen} spatstat/man/as.function.fv.Rd0000644000176200001440000000741313333543262016031 0ustar liggesusers\name{as.function.fv} \alias{as.function.fv} \alias{as.function.rhohat} \title{ Convert Function Value Table to Function } \description{ Converts an object of class \code{"fv"} to an \R language function. } \usage{ \method{as.function}{fv}(x, ..., value=".y", extrapolate=FALSE) \method{as.function}{rhohat}(x, ..., value=".y", extrapolate=TRUE) } \arguments{ \item{x}{ Object of class \code{"fv"} or \code{"rhohat"}. } \item{\dots}{ Ignored. } \item{value}{ Optional. Character string or character vector selecting one or more of the columns of \code{x} for use as the function value. See Details. } \item{extrapolate}{ Logical, indicating whether to extrapolate the function outside the domain of \code{x}. See Details. } } \details{ A function value table (object of class \code{"fv"}) is a convenient way of storing and plotting several different estimates of the same function. Objects of this class are returned by many commands in \pkg{spatstat}, such as \code{\link{Kest}} which returns an estimate of Ripley's \eqn{K}-function for a point pattern dataset. Sometimes it is useful to convert the function value table to a \code{function} in the \R language. This is done by \code{as.function.fv}. It converts an object \code{x} of class \code{"fv"} to an \R function \code{f}. If \code{f <- as.function(x)} then \code{f} is an \R function that accepts a numeric argument and returns a corresponding value for the summary function by linear interpolation between the values in the table \code{x}. Argument values lying outside the range of the table yield an \code{NA} value (if \code{extrapolate=FALSE}) or the function value at the nearest endpoint of the range (if \code{extrapolate = TRUE}). To apply different rules to the left and right extremes, use \code{extrapolate=c(TRUE,FALSE)} and so on. Typically the table \code{x} contains several columns of function values corresponding to different edge corrections. Auxiliary information for the table identifies one of these columns as the \emph{recommended value}. By default, the values of the function \code{f <- as.function(x)} are taken from this column of recommended values. This default can be changed using the argument \code{value}, which can be a character string or character vector of names of columns of \code{x}. Alternatively \code{value} can be one of the abbreviations used by \code{\link{fvnames}}. If \code{value} specifies a single column of the table, then the result is a function \code{f(r)} with a single numeric argument \code{r} (with the same name as the orginal argument of the function table). If \code{value} specifies several columns of the table, then the result is a function \code{f(r,what)} where \code{r} is the numeric argument and \code{what} is a character string identifying the column of values to be used. The formal arguments of the resulting function are \code{f(r, what=value)}, which means that in a call to this function \code{f}, the permissible values of \code{what} are the entries of the original vector \code{value}; the default value of \code{what} is the first entry of \code{value}. The command \code{as.function.fv} is a method for the generic command \code{\link{as.function}}. } \value{ A \code{function(r)} or \code{function(r,what)} where \code{r} is the name of the original argument of the function table. } \author{ \adrian and \rolf } \seealso{ \code{\link{fv}}, \code{\link{fv.object}}, \code{\link{fvnames}}, \code{\link{plot.fv}}, \code{\link{Kest}} } \examples{ K <- Kest(cells) f <- as.function(K) f f(0.1) g <- as.function(K, value=c("iso", "trans")) g g(0.1, "trans") } \keyword{spatial} \keyword{methods} spatstat/man/pool.fv.Rd0000644000176200001440000000317213333543264014553 0ustar liggesusers\name{pool.fv} \alias{pool.fv} \title{Pool Several Functions} \description{ Combine several summary functions into a single function. } \usage{ \method{pool}{fv}(..., weights=NULL, relabel=TRUE, variance=TRUE) } \arguments{ \item{\dots}{ Objects of class \code{"fv"}. } \item{weights}{ Optional numeric vector of weights for the functions. } \item{relabel}{ Logical value indicating whether the columns of the resulting function should be labelled to show that they were obtained by pooling. } \item{variance}{ Logical value indicating whether to compute the sample variance and related terms. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"fv"} of summary functions. It is used to combine several estimates of the same function into a single function. Each of the arguments \code{\dots} must be an object of class \code{"fv"}. They must be compatible, in that they are estimates of the same function, and were computed using the same options. The sample mean and sample variance of the corresponding estimates will be computed. } \value{ An object of class \code{"fv"}. } \seealso{ \code{\link{pool}}, \code{\link{pool.anylist}}, \code{\link{pool.rat}} } \examples{ K <- lapply(waterstriders, Kest, correction="iso") Kall <- pool(K[[1]], K[[2]], K[[3]]) Kall <- pool(as.anylist(K)) plot(Kall, cbind(pooliso, pooltheo) ~ r, shade=c("loiso", "hiiso"), main="Pooled K function of waterstriders") } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat/man/intensity.dppm.Rd0000644000176200001440000000133213333543263016150 0ustar liggesusers\name{intensity.dppm} \alias{intensity.dppm} \alias{intensity.detpointprocfamily} \title{Intensity of Determinantal Point Process Model} \description{Extracts the intensity of a determinantal point process model.} \usage{ \method{intensity}{detpointprocfamily}(X, \dots) \method{intensity}{dppm}(X, \dots) } \arguments{ \item{X}{ A determinantal point process model (object of class \code{"detpointprocfamily"} or \code{"dppm"}). } \item{\dots}{Ignored.} } \value{ A numeric value (if the model is stationary), a pixel image (if the model is non-stationary) or \code{NA} if the intensity is unknown for the model. } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/predict.slrm.Rd0000644000176200001440000000536713333543264015606 0ustar liggesusers\name{predict.slrm} \Rdversion{1.1} \alias{predict.slrm} \title{ Predicted or Fitted Values from Spatial Logistic Regression } \description{ Given a fitted Spatial Logistic Regression model, this function computes the fitted probabilities for each pixel, or the fitted point process intensity, or the values of the linear predictor in each pixel. } \usage{ \method{predict}{slrm}(object, ..., type = "intensity", newdata=NULL, window=NULL) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Optional arguments passed to \code{\link{pixellate}} determining the pixel resolution for the discretisation of the point pattern. } \item{type}{ Character string (partially) matching one of \code{"probabilities"}, \code{"intensity"} or \code{"link"}. } \item{newdata}{ Optional. List containing new covariate values for the prediction. See Details. } \item{window}{ Optional. New window in which to predict. An object of class \code{"owin"}. } } \details{ This is a method for \code{\link[stats]{predict}} for spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). The argument \code{type} determines which quantity is computed. If \code{type="intensity"}), the value of the point process intensity is computed at each pixel. If \code{type="probabilities"}) the probability of the presence of a random point in each pixel is computed. If \code{type="link"}, the value of the linear predictor is computed at each pixel. If \code{newdata = NULL} (the default), the algorithm computes fitted values of the model (based on the data that was originally used to fit the model \code{object}). If \code{newdata} is given, the algorithm computes predicted values of the model, using the new values of the covariates provided by \code{newdata}. The argument \code{newdata} should be a list; names of entries in the list should correspond to variables appearing in the model formula of the \code{object}. Each list entry may be a pixel image or a single numeric value. } \value{ A pixel image (object of class \code{"im"}) containing the predicted values for each pixel. } \seealso{ \code{\link{slrm}} } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x+y) plot(predict(fit)) data(copper) X <- copper$SouthPoints Y <- copper$SouthLines Z <- distmap(Y) fitc <- slrm(X ~ Z) pc <- predict(fitc) Znew <- distmap(copper$Lines)[copper$SouthWindow] pcnew <- predict(fitc, newdata=list(Z=Znew)) } \author{\adrian \email{adrian@maths.uwa.edu.au} and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/nncorr.Rd0000644000176200001440000002000313421506143014452 0ustar liggesusers\name{nncorr} \alias{nncorr} \alias{nnmean} \alias{nnvario} \title{Nearest-Neighbour Correlation Indices of Marked Point Pattern} \description{ Computes nearest-neighbour correlation indices of a marked point pattern, including the nearest-neighbour mark product index (default case of \code{nncorr}), the nearest-neighbour mark index (\code{nnmean}), and the nearest-neighbour variogram index (\code{nnvario}). } \usage{ nncorr(X, f = function(m1, m2) { m1 * m2 }, k = 1, \dots, use = "all.obs", method = c("pearson", "kendall", "spearman"), denominator=NULL, na.action="warn") nnmean(X, k=1, na.action="warn") nnvario(X, k=1, na.action="warn") } \arguments{ \item{X}{ The observed point pattern. An object of class \code{"ppp"}. } \item{f}{ Function \eqn{f} used in the definition of the nearest neighbour correlation. There is a sensible default that depends on the type of marks of \code{X}. } \item{k}{ Integer. The \code{k}-th nearest neighbour of each point will be used. } \item{\dots}{ Extra arguments passed to \code{f}. } \item{use,method}{ Arguments passed to the standard correlation function \code{\link{cor}}. } \item{denominator}{ Internal use only. } \item{na.action}{ Character string (passed to \code{\link{is.marked.ppp}}) specifying what to do if the marks contain \code{NA} values. } } \details{ The nearest neighbour correlation index \eqn{\bar n_f}{nbar} of a marked point process \eqn{X} is a number measuring the dependence between the mark of a typical point and the mark of its nearest neighbour. The command \code{nncorr} computes the nearest neighbour correlation index based on any test function \code{f} provided by the user. The default behaviour of \code{nncorr} is to compute the nearest neighbour mark product index. The commands \code{nnmean} and \code{nnvario} are convenient abbreviations for other special choices of \code{f}. In the default case, \code{nncorr(X)} computes three different versions of the nearest-neighbour correlation index: the unnormalised, normalised, and classical correlations. \describe{ \item{unnormalised:}{ The \bold{unnormalised} nearest neighbour correlation (Stoyan and Stoyan, 1994, section 14.7) is defined as \deqn{\bar n_f = E[f(M, M^\ast)]}{nbar[f] = E[f(M, M*)]} where \eqn{E[]} denotes mean value, \eqn{M} is the mark attached to a typical point of the point process, and \eqn{M^\ast}{M*} is the mark attached to its nearest neighbour (i.e. the nearest other point of the point process). Here \eqn{f} is any function \eqn{f(m_1,m_2)}{f(m1,m2)} with two arguments which are possible marks of the pattern, and which returns a nonnegative real value. Common choices of \eqn{f} are: for continuous real-valued marks, \deqn{f(m_1,m_2) = m_1 m_2}{f(m1,m2)= m1 * m2} for discrete marks (multitype point patterns), \deqn{f(m_1,m_2) = 1(m_1 = m_2)}{f(m1,m2)= (m1 == m2)} and for marks taking values in \eqn{[0,2\pi)}{[0,2 * pi)}, \deqn{f(m_1,m_2) = \sin(m_1 - m_2)}{f(m1,m2) = sin(m1-m2).} For example, in the second case, the unnormalised nearest neighbour correlation \eqn{\bar n_f}{nbar[f]} equals the proportion of points in the pattern which have the same mark as their nearest neighbour. Note that \eqn{\bar n_f}{nbar[f]} is not a ``correlation'' in the usual statistical sense. It can take values greater than 1. } \item{normalised:}{ We can define a \bold{normalised} nearest neighbour correlation by \deqn{\bar m_f = \frac{E[f(M,M^\ast)]}{E[f(M,M')]}}{mbar[f] = E[f(M,M*)]/E[f(M,M')]} where again \eqn{M} is the mark attached to a typical point, \eqn{M^\ast}{M*} is the mark attached to its nearest neighbour, and \eqn{M'} is an independent copy of \eqn{M} with the same distribution. This normalisation is also not a ``correlation'' in the usual statistical sense, but is normalised so that the value 1 suggests ``lack of correlation'': if the marks attached to the points of \code{X} are independent and identically distributed, then \eqn{\bar m_f = 1}{mbar[f] = 1}. The interpretation of values larger or smaller than 1 depends on the choice of function \eqn{f}. } \item{classical:}{ Finally if the marks of \code{X} are real numbers, we can also compute the \bold{classical} correlation, that is, the correlation coefficient of the two random variables \eqn{M} and \eqn{M^\ast}{M*}. The classical correlation has a value between \eqn{-1} and \eqn{1}. Values close to \eqn{-1} or \eqn{1} indicate strong dependence between the marks. } } In the default case where \code{f} is not given, \code{nncorr(X)} computes \itemize{ \item If the marks of \code{X} are real numbers, the unnormalised and normalised versions of the nearest-neighbour product index \eqn{E[M \, M^\ast]}{E[M * M*]}, and the classical correlation between \eqn{M} and \eqn{M^\ast}{M*}. \item If the marks of \code{X} are factor valued, the unnormalised and normalised versions of the nearest-neighbour equality index \eqn{P[M = M^\ast]}{P[M = M*]}. } The wrapper functions \code{nnmean} and \code{nnvario} compute the correlation indices for two special choices of the function \eqn{f(m_1,m_2)}{f(m1,m2)}. They are defined only when the marks are numeric. \itemize{ \item \code{nnmean} computes the correlation indices for \eqn{f(m_1,m_2) = m_1}{f(m1,m2) = m1}. The unnormalised index is simply the mean value of the mark of the neighbour of a typical point, \eqn{E[M^\ast]}{E[M*]}, while the normalised index is \eqn{E[M^\ast]/E[M]}{E[M*]/E[M]}, the ratio of the mean mark of the neighbour of a typical point to the mean mark of a typical point. \item \code{nnvario} computes the correlation indices for \eqn{f(m_1,m_2) = (1/2) (m_1-m_2)^2}{f(m1,m2) = (1/2) * (m1-m2)^2}. } The argument \code{X} must be a point pattern (object of class \code{"ppp"}) and must be a marked point pattern. (The marks may be a data frame, containing several columns of mark variables; each column is treated separately.) If the argument \code{f} is given, it must be a function, accepting two arguments \code{m1} and \code{m2} which are vectors of equal length containing mark values (of the same type as the marks of \code{X}). It must return a vector of numeric values of the same length as \code{m1} and \code{m2}. The values must be non-negative. The arguments \code{use} and \code{method} control the calculation of the classical correlation using \code{\link{cor}}, as explained in the help file for \code{\link{cor}}. Other arguments may be passed to \code{f} through the \code{...} argument. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated using the \sQuote{border method} edge correction. } \value{ Labelled vector of length 2 or 3 containing the unnormalised and normalised nearest neighbour correlations, and the classical correlation if appropriate. Alternatively a matrix with 2 or 3 rows, containing this information for each mark variable. } \examples{ nnmean(finpines) nnvario(finpines) nncorr(finpines) # heights of neighbouring trees are slightly negatively correlated nncorr(amacrine) # neighbouring cells are usually of different type } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/lintess.Rd0000644000176200001440000000636013543023021014636 0ustar liggesusers\name{lintess} \alias{lintess} \title{ Tessellation on a Linear Network } \description{ Create a tessellation on a linear network. } \usage{ lintess(L, df, marks=NULL) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}). } \item{df}{ Data frame of local coordinates for the pieces that make up the tiles of the tessellation. See Details. } \item{marks}{ Vector or data frame of marks associated with the tiles of the tessellation. } } \details{ A tessellation on a linear network \code{L} is a partition of the network into non-overlapping pieces (tiles). Each tile consists of one or more line segments which are subsets of the line segments making up the network. A tile can consist of several disjoint pieces. The data frame \code{df} should have columns named \code{seg}, \code{t0}, \code{t1} and \code{tile}. Any additional columns will be ignored. Each row of the data frame specifies one sub-segment of the network and allocates it to a particular tile. The \code{seg} column specifies which line segment of the network contains the sub-segment. Values of \code{seg} are integer indices for the segments in \code{as.psp(L)}. The \code{t0} and \code{t1} columns specify the start and end points of the sub-segment. They should be numeric values between 0 and 1 inclusive, where the values 0 and 1 representing the network vertices that are joined by this network segment. The \code{tile} column specifies which tile of the tessellation includes this sub-segment. It will be coerced to a factor and its levels will be the names of the tiles. If \code{df} is missing or \code{NULL}, the result is a tessellation with only one tile, consisting of the entire network \code{L}. Additional data called \emph{marks} may be associated with each tile of the tessellation. The argument \code{marks} should be a vector with one entry for each tile (that is, one entry for each level of \code{df$tile}) or a data frame with one row for each tile. In general \code{df} and \code{marks} will have different numbers of rows. } \value{ An object of class \code{"lintess"}. There are methods for \code{print}, \code{plot} and \code{summary} for this object. } \author{ \adrian and Greg McSwiggan. } \seealso{ \code{\link{linnet}} for linear networks. \code{\link{plot.lintess}} for plotting. \code{\link{divide.linnet}} to make a tessellation demarcated by given points. \code{\link{lineardirichlet}} to create the Dirichlet-Voronoi tessellation from a point pattern on a linear network. \code{\link{as.linfun.lintess}}, \code{\link{as.linnet.lintess}} and \code{\link{as.linim}} to convert to other classes. \code{\link{tile.lengths}} to compute the length of each tile in the tessellation. The undocumented methods \code{Window.lintess} and \code{as.owin.lintess} extract the spatial window. } \examples{ # tessellation consisting of one tile for each existing segment ns <- nsegments(simplenet) df <- data.frame(seg=1:ns, t0=0, t1=1, tile=letters[1:ns]) u <- lintess(simplenet, df) u plot(u) S <- as.psp(simplenet) marks(u) <- data.frame(len=lengths.psp(S), ang=angles.psp(S)) u plot(u) } \keyword{spatial} \keyword{datagen} spatstat/man/clusterkernel.Rd0000644000176200001440000000231713333543263016051 0ustar liggesusers\name{clusterkernel} \alias{clusterkernel} \alias{clusterkernel.character} \alias{clusterkernel.kppm} \title{ Extract Cluster Offspring Kernel } \description{ Given a cluster point process model, this command returns the probability density of the cluster offspring. } \usage{ clusterkernel(model, \dots) \method{clusterkernel}{kppm}(model, \dots) \method{clusterkernel}{character}(model, \dots) } \arguments{ \item{model}{ Cluster model. Either a fitted cluster or Cox model (object of class \code{"kppm"}), or a character string specifying the type of cluster model. } \item{\dots}{ Parameter values for the model, when \code{model} is a character string. } } \details{ Given a specification of a cluster point process model, this command returns a \code{function(x,y)} giving the two-dimensional probability density of the cluster offspring points assuming a cluster parent located at the origin. } \value{ A function in the \R\ language with arguments \code{x,y,\dots}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{clusterfield}}, \code{\link{kppm}} } \examples{ fit <- kppm(redwood ~ x, "MatClust") f <- clusterkernel(fit) f(0.1, 0.2) } \keyword{spatial} spatstat/man/coef.ppm.Rd0000644000176200001440000000370613333543263014701 0ustar liggesusers\name{coef.ppm} \alias{coef.ppm} \title{ Coefficients of Fitted Point Process Model } \description{ Given a point process model fitted to a point pattern, extract the coefficients of the fitted model. A method for \code{coef}. } \usage{ \method{coef}{ppm}(object, \dots) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) } \item{\dots}{ Ignored. } } \value{ A vector containing the fitted coefficients. } \details{ This function is a method for the generic function \code{\link{coef}}. The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}). Such objects are produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). This function extracts the vector of coefficients of the fitted model. This is the estimate of the parameter vector \eqn{\theta}{\theta} such that the conditional intensity of the model is of the form \deqn{ \lambda(u,x) = \exp(\theta S(u,x)) }{ \lambda(u,x) = exp(\theta . S(u,x)) } where \eqn{S(u,x)} is a (vector-valued) statistic. For example, if the model \code{object} is the uniform Poisson process, then \code{coef(object)} will yield a single value (named \code{"(Intercept)"}) which is the logarithm of the fitted intensity of the Poisson process. Use \code{\link{print.ppm}} to print a more useful description of the fitted model. } \seealso{ \code{\link{print.ppm}}, \code{\link{ppm.object}}, \code{\link{ppm}} } \examples{ data(cells) poi <- ppm(cells, ~1, Poisson()) coef(poi) # This is the log of the fitted intensity of the Poisson process stra <- ppm(cells, ~1, Strauss(r=0.07)) coef(stra) # The two entries "(Intercept)" and "Interaction" # are respectively log(beta) and log(gamma) # in the usual notation for Strauss(beta, gamma, r) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/with.msr.Rd0000644000176200001440000000536313333543265014750 0ustar liggesusers\name{with.msr} \alias{with.msr} \title{Evaluate Expression Involving Components of a Measure} \description{ An expression involving the names of components of a measure is evaluated. } \usage{ \method{with}{msr}(data, expr, \dots) } \arguments{ \item{data}{ A measure (object of class \code{"msr"}). } \item{expr}{ An expression to be evaluated. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link[base]{with}} for the class \code{"msr"}. The argument \code{data} should be an object of class \code{"msr"} representing a measure (a function which assigns a value to each subset of two-dimensional space). This function can be used to extract the components of the measure, or to perform more complicated manipulations of the components. The argument \code{expr} should be an un-evaluated expression in the \R language. The expression may involve any of the variable names listed below with their corresponding meanings. \tabular{ll}{ \code{qlocations} \tab (point pattern) all quadrature locations \cr \code{qweights} \tab (numeric) all quadrature weights \cr \code{density} \tab (numeric) density value at each quadrature point \cr \code{discrete} \tab (numeric) discrete mass at each quadrature point \cr \code{continuous} \tab (numeric) increment of continuous component \cr \code{increment} \tab (numeric) increment of measure \cr \code{is.atom} \tab (logical) whether quadrature point is an atom \cr \code{atoms} \tab (point pattern) locations of atoms \cr \code{atommass} \tab (numeric) massess of atoms } The measure is the sum of discrete and continuous components. The discrete component assigns non-zero mass to several points called atoms. The continuous component has a density which should be integrated over a region to determine the value for that region. An object of class \code{"msr"} approximates the continuous component by a sum over quadrature points. The quadrature points are chosen so that they include the atoms of the measure. In the list above, we have \code{increment = continuous + discrete}, \code{continuous = density * qweights}, \code{is.atom = (discrete > 0)}, \code{atoms = qlocations[is.atom]} and \code{atommass = discrete[is.atom]}. } \value{ The result of evaluating the expression could be an object of any kind. } \author{ \spatstatAuthors. } \seealso{ \code{\link{msr}}, \code{\link{split.msr}}, \code{\link{measureContinuous}}, \code{\link{measurePositive}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") with(rp, atoms) with(rp, qlocations \%mark\% continuous) } \keyword{spatial} \keyword{manip} spatstat/man/plot.quadratcount.Rd0000644000176200001440000000441013333543264016653 0ustar liggesusers\name{plot.quadratcount} \alias{plot.quadratcount} \title{ Plot Quadrat Counts } \description{ Given a table of quadrat counts for a spatial point pattern, plot the quadrats which were used, and display the quadrat count as text in the centre of each quadrat. } \usage{ \method{plot}{quadratcount}(x, \dots, add = FALSE, entries = as.vector(t(as.table(x))), dx = 0, dy = 0, show.tiles = TRUE, textargs = list()) } \arguments{ \item{x}{ Object of class \code{"quadratcount"} produced by the function \code{\link{quadratcount}}. } \item{\dots}{ Additional arguments passed to \code{\link{plot.tess}} to plot the quadrats. } \item{add}{ Logical. Whether to add the graphics to an existing plot. } \item{entries}{ Vector of numbers to be plotted in each quadrat. The default is to plot the quadrat counts. } \item{dx,dy}{ Horizontal and vertical displacement of text relative to centroid of quadrat. } \item{show.tiles}{ Logical value indicating whether to plot the quadrats. } \item{textargs}{ List containing extra arguments passed to \code{\link[graphics]{text.default}} to control the annotation. } } \details{ This is the plot method for the objects of class \code{"quadratcount"} that are produced by the function \code{\link{quadratcount}}. Given a spatial point pattern, \code{\link{quadratcount}} divides the observation window into disjoint tiles or quadrats, counts the number of points in each quadrat, and stores the result as a contingency table which also belongs to the class \code{"quadratcount"}. First the quadrats are plotted (provided \code{show.tiles=TRUE}, the default). This display can be controlled by passing additional arguments \code{\dots} to \code{\link{plot.tess}}. Then the quadrat counts are printed using \code{\link[graphics]{text.default}}. This display can be controlled using the arguments \code{dx,dy} and \code{textargs}. } \value{ Null. } \seealso{ \code{\link{quadratcount}}, \code{\link{plot.tess}}, \code{\link[graphics]{text.default}}, \code{\link{plot.quadrattest}}. } \examples{ plot(quadratcount(swedishpines, 5)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/clusterset.Rd0000644000176200001440000001131013333543263015355 0ustar liggesusers\name{clusterset} \alias{clusterset} \title{ Allard-Fraley Estimator of Cluster Feature } \description{ Detect high-density features in a spatial point pattern using the (unrestricted) Allard-Fraley estimator. } \usage{ clusterset(X, what=c("marks", "domain"), \dots, verbose=TRUE, fast=FALSE, exact=!fast) } \arguments{ \item{X}{ A dimensional spatial point pattern (object of class \code{"ppp"}). } \item{what}{ Character string or character vector specifying the type of result. See Details. } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{fast}{ Logical. If \code{FALSE} (the default), the Dirichlet tile areas will be computed exactly using polygonal geometry, so that the optimal choice of tiles will be computed exactly. If \code{TRUE}, the Dirichlet tile areas will be approximated using pixel counting, so the optimal choice will be approximate. } \item{exact}{ Logical. If \code{TRUE}, the Allard-Fraley estimator of the domain will be computed exactly using polygonal geometry. If \code{FALSE}, the Allard-Fraley estimator of the domain will be approximated by a binary pixel mask. The default is initially set to \code{FALSE}. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} to control the pixel resolution if \code{exact=FALSE}. } } \details{ Allard and Fraley (1997) developed a technique for recognising features of high density in a spatial point pattern in the presence of random clutter. This algorithm computes the \emph{unrestricted} Allard-Fraley estimator. The Dirichlet (Voronoi) tessellation of the point pattern \code{X} is computed. The smallest \code{m} Dirichlet cells are selected, where the number \code{m} is determined by a maximum likelihood criterion. \itemize{ \item If \code{fast=FALSE} (the default), the areas of the tiles of the Dirichlet tessellation will be computed exactly using polygonal geometry. This ensures that the optimal selection of tiles is computed exactly. \item If \code{fast=TRUE}, the Dirichlet tile areas will be approximated by counting pixels. This is faster, and is usually correct (depending on the pixel resolution, which is controlled by the arguments \code{\dots}). } The type of result depends on the character vector \code{what}. \itemize{ \item If \code{what="marks"} the result is the point pattern \code{X} with a vector of marks labelling each point with a value \code{yes} or \code{no} depending on whether the corresponding Dirichlet cell is selected by the Allard-Fraley estimator. In other words each point of \code{X} is labelled as either a cluster point or a non-cluster point. \item If \code{what="domain"}, the result is the Allard-Fraley estimator of the cluster feature set, which is the union of all the selected Dirichlet cells, represented as a window (object of class \code{"owin"}). \item If \code{what=c("marks", "domain")} the result is a list containing both of the results described above. } Computation of the Allard-Fraley set estimator depends on the argument \code{exact}. \itemize{ \item If \code{exact=TRUE} (the default), the Allard-Fraley set estimator will be computed exactly using polygonal geometry. The result is a polygonal window. \item If \code{exact=FALSE}, the Allard-Fraley set estimator will be approximated by a binary pixel mask. This is faster than the exact computation. The result is a binary mask. } } \value{ If \code{what="marks"}, a multitype point pattern (object of class \code{"ppp"}). If \code{what="domain"}, a window (object of class \code{"owin"}). If \code{what=c("marks", "domain")} (the default), a list consisting of a multitype point pattern and a window. } \references{ Allard, D. and Fraley, C. (1997) Nonparametric maximum likelihood estimation of features in spatial point processes using Voronoi tessellation. \emph{Journal of the American Statistical Association} \bold{92}, 1485--1493. } \author{ \adrian and \rolf } \seealso{ \code{\link{nnclean}}, \code{\link{sharpen}} } \examples{ opa <- par(mfrow=c(1,2)) W <- grow.rectangle(as.rectangle(letterR), 1) X <- superimpose(runifpoint(300, letterR), runifpoint(50, W), W=W) plot(W, main="clusterset(X, 'm')") plot(clusterset(X, "marks", fast=TRUE), add=TRUE, chars=c(1, 3), cols=1:2) plot(letterR, add=TRUE) plot(W, main="clusterset(X, 'd')") plot(clusterset(X, "domain", exact=FALSE), add=TRUE) plot(letterR, add=TRUE) par(opa) } \keyword{spatial} \keyword{classif} spatstat/man/methods.kppm.Rd0000644000176200001440000000310013333543263015567 0ustar liggesusers\name{methods.kppm} \alias{methods.kppm} %DoNotExport \alias{coef.kppm} \alias{formula.kppm} \alias{print.kppm} \alias{terms.kppm} \alias{labels.kppm} \title{ Methods for Cluster Point Process Models } \description{ These are methods for the class \code{"kppm"}. } \usage{ \method{coef}{kppm}(object, \dots) \method{formula}{kppm}(x, \dots) \method{print}{kppm}(x, ...) \method{terms}{kppm}(x, \dots) \method{labels}{kppm}(object, \dots) } \arguments{ \item{x,object}{ An object of class \code{"kppm"}, representing a fitted cluster point process model. } \item{\dots}{ Arguments passed to other methods. } } \details{ These functions are methods for the generic commands \code{\link{coef}}, \code{\link{formula}}, \code{\link{print}}, \code{\link{terms}} and \code{\link{labels}} for the class \code{"kppm"}. An object of class \code{"kppm"} represents a fitted cluster point process model. It is obtained from \code{\link{kppm}}. The method \code{coef.kppm} returns the vector of \emph{regression coefficients} of the fitted model. It does not return the clustering parameters. } \value{ See the help files for the corresponding generic functions. } \author{ \adrian } \seealso{ \code{\link{kppm}}, \code{\link{plot.kppm}}, \code{\link{predict.kppm}}, \code{\link{simulate.kppm}}, \code{\link{update.kppm}}, \code{\link{vcov.kppm}}, \code{\link{as.ppm.kppm}}. } \examples{ data(redwood) fit <- kppm(redwood ~ x, "MatClust") coef(fit) formula(fit) tf <- terms(fit) labels(fit) } \keyword{spatial} \keyword{methods} spatstat/man/diameter.Rd0000644000176200001440000000176413333543263014766 0ustar liggesusers\name{diameter} \alias{diameter} \title{Diameter of an Object} \description{ Computes the diameter of an object such as a two-dimensional window or three-dimensional box. } \usage{ diameter(x) } \arguments{ \item{x}{ A window or other object whose diameter will be computed. } } \value{ The numerical value of the diameter of the object. } \details{ This function computes the diameter of an object such as a two-dimensional window or a three-dimensional box. The diameter is the maximum distance between any two points in the object. The function \code{diameter} is generic, with methods for the class \code{"owin"} (two-dimensional windows), \code{"box3"} (three-dimensional boxes), \code{"boxx"} (multi-dimensional boxes) and \code{"linnet"} (linear networks). } \seealso{ \code{\link{diameter.owin}}, \code{\link{diameter.box3}}, \code{\link{diameter.boxx}}, \code{\link{diameter.linnet}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/clusterfield.Rd0000644000176200001440000000674113571674202015663 0ustar liggesusers\name{clusterfield} \alias{clusterfield} \alias{clusterfield.character} \alias{clusterfield.function} \alias{clusterfield.kppm} \title{Field of clusters} \description{ Calculate the superposition of cluster kernels at the location of a point pattern. } \usage{ clusterfield(model, locations = NULL, \dots) \method{clusterfield}{character}(model, locations = NULL, \dots) \method{clusterfield}{function}(model, locations = NULL, \dots, mu = NULL) \method{clusterfield}{kppm}(model, locations = NULL, \dots) } \arguments{ \item{model}{ Cluster model. Either a fitted cluster model (object of class \code{"kppm"}), a character string specifying the type of cluster model, or a function defining the cluster kernel. See Details. } \item{locations}{ A point pattern giving the locations of the kernels. Defaults to the centroid of the observation window for the \code{"kppm"} method and to the center of a unit square otherwise. } \item{\dots}{ Additional arguments passed to \code{\link{density.ppp}} or the cluster kernel. See Details. } \item{mu}{ Mean number of offspring per cluster. A single number or a pixel image. } } \details{ The actual calculations are preformed by \code{\link{density.ppp}} and \code{\dots} arguments are passed thereto for control over the pixel resolution etc. (These arguments are then passed on to \code{\link{pixellate.ppp}} and \code{\link{as.mask}}.) For the function method the given kernel function should accept vectors of x and y coordinates as its first two arguments. Any additional arguments may be passed through the \code{\dots}. The function method also accepts the optional parameter \code{mu} (defaulting to 1) specifying the mean number of points per cluster (as a numeric) or the inhomogeneous reference cluster intensity (as an \code{"im"} object or a \code{function(x,y)}). The interpretation of \code{mu} is as explained in the simulation functions referenced in the See Also section below. For the character method \code{model} must be one of: \code{model="Thomas"} for the Thomas process, \code{model="MatClust"} for the \Matern cluster process, \code{model="Cauchy"} for the Neyman-Scott cluster process with Cauchy kernel, or \code{model="VarGamma"} for the Neyman-Scott cluster process with Variance Gamma kernel. For all these models the parameter \code{scale} is required and passed through \code{\dots} as well as the parameter \code{nu} when \code{model="VarGamma"}. This method calls \code{clusterfield.function} so the parameter \code{mu} may also be passed through \code{\dots} and will be interpreted as explained above. The kppm method extracts the relevant information from the fitted model (including \code{mu}) and calls \code{clusterfield.function}. } \value{ A pixel image (object of class \code{"im"}). } \seealso{ \code{\link{density.ppp}} and \code{\link{kppm}} Simulation algorithms for cluster models: \code{\link{rCauchy}} \code{\link{rMatClust}} \code{\link{rThomas}} \code{\link{rVarGamma}} } \examples{ # method for fitted model fit <- kppm(redwood~1, "Thomas") clusterfield(fit, eps = 0.01) # method for functions kernel <- function(x,y,scal) { r <- sqrt(x^2 + y^2) ifelse(r > 0, dgamma(r, shape=5, scale=scal)/(2 * pi * r), 0) } X <- runifpoint(10) clusterfield(kernel, X, scal=0.05) } \author{\adrian , \rolf and \ege . } \keyword{spatial} spatstat/man/Extract.im.Rd0000644000176200001440000001731213333543263015206 0ustar liggesusers\name{Extract.im} \alias{[.im} \title{Extract Subset of Image} \description{ Extract a subset or subregion of a pixel image. } \usage{ \method{[}{im}(x, i, j, \dots, drop=TRUE, tight=FALSE, raster=NULL, rescue=is.owin(i)) } \arguments{ \item{x}{ A two-dimensional pixel image. An object of class \code{"im"}. } \item{i}{ Object defining the subregion or subset to be extracted. Either a spatial window (an object of class \code{"owin"}), or a pixel image with logical values, or a linear network (object of class \code{"linnet"}) or a point pattern (an object of class \code{"ppp"}), or any type of index that applies to a matrix, or something that can be converted to a point pattern by \code{\link{as.ppp}} (using the window of \code{x}). } \item{j}{ An integer or logical vector serving as the column index if matrix indexing is being used. Ignored if \code{i} is a spatial object. } \item{\dots}{Ignored.} \item{drop}{ Logical value. Locations in \code{w} that lie outside the spatial domain of the image \code{x} return a pixel value of \code{NA} if \code{drop=FALSE}, and are omitted if \code{drop=TRUE}. } \item{tight}{ Logical value. If \code{tight=TRUE}, and if the result of the subset operation is an image, the image will be trimmed to the smallest possible rectangle. } \item{raster}{ Optional. An object of class \code{"owin"} or \code{"im"} determining a pixel grid. } \item{rescue}{ Logical value indicating whether rectangular blocks of data should always be returned as pixel images. } } \value{ Either a pixel image or a vector of pixel values. See Details. } \details{ This function extracts a subset of the pixel values in a pixel image. (To reassign the pixel values, see \code{\link{[<-.im}}). The image \code{x} must be an object of class \code{"im"} representing a pixel image defined inside a rectangle in two-dimensional space (see \code{\link{im.object}}). The subset to be extracted is determined by the arguments \code{i,j} according to the following rules (which are checked in this order): \enumerate{ \item \code{i} is a spatial object such as a window, a pixel image with logical values, a linear network, or a point pattern; or \item \code{i,j} are indices for the matrix \code{as.matrix(x)}; or \item \code{i} can be converted to a point pattern by \code{\link{as.ppp}(i, W=Window(x))}, and \code{i} is not a matrix. } If \code{i} is a spatial window (an object of class \code{"owin"}), the values of the image inside this window are extracted (after first clipping the window to the spatial domain of the image if necessary). If \code{i} is a linear network (object of class \code{"linnet"}), the values of the image on this network are extracted. If \code{i} is a pixel image with logical values, it is interpreted as a spatial window (with \code{TRUE} values inside the window and \code{FALSE} outside). If \code{i} is a point pattern (an object of class \code{"ppp"}), then the values of the pixel image at the points of this pattern are extracted. This is a simple way to read the pixel values at a given spatial location. At locations outside the spatial domain of the image, the pixel value is undefined, and is taken to be \code{NA}. The logical argument \code{drop} determines whether such \code{NA} values will be returned or omitted. It also influences the format of the return value. If \code{i} is a point pattern (or something that can be converted to a point pattern), then \code{X[i, drop=FALSE]} is a numeric vector containing the pixel values at each of the points of the pattern. Its length is equal to the number of points in the pattern \code{i}. It may contain \code{NA}s corresponding to points which lie outside the spatial domain of the image \code{x}. By contrast, \code{X[i]} or \code{X[i, drop=TRUE]} contains only those pixel values which are not \code{NA}. It may be shorter. If \code{i} is a spatial window then \code{X[i, drop=FALSE]} is another pixel image of the same dimensions as \code{X} obtained by setting all pixels outside the window \code{i} to have value \code{NA}. When the result is displayed by \code{\link{plot.im}} the effect is that the pixel image \code{x} is clipped to the window \code{i}. If \code{i} is a linear network (object of class \code{"linnet"}) then \code{X[i, drop=FALSE]} is another pixel image of the same dimensions as \code{X} obtained by restricting the pixel image \code{X} to the linear network. The result also belongs to the class \code{"linim"} (pixel image on a linear network). If \code{i} is a spatial window then \code{X[i, drop=TRUE]} is either: \itemize{ \item a numeric vector containing the pixel values for all pixels that lie inside the window \code{i}. This happens if \code{i} is \emph{not} a rectangle (i.e. \code{i$type != "rectangle"}) or if \code{rescue=FALSE}. \item a pixel image. This happens only if \code{i} is a rectangle (\code{i$type = "rectangle"}) and \code{rescue=TRUE} (the default). } If the optional argument \code{raster} is given, then it should be a binary image mask or a pixel image. Then \code{x} will first be converted to an image defined on the pixel grid implied by \code{raster}, before the subset operation is carried out. In particular, \code{x[i, raster=i, drop=FALSE]} will return an image defined on the same pixel array as the object \code{i}. If \code{i} does not satisfy any of the conditions above, then the algorithm attempts to interpret \code{i} and \code{j} as indices for the matrix \code{as.matrix(x)}. Either \code{i} or \code{j} may be missing or blank. The result is usually a vector or matrix of pixel values. Exceptionally the result is a pixel image if \code{i,j} determines a rectangular subset of the pixel grid, and if the user specifies \code{rescue=TRUE}. Finally, if none of the above conditions is met, the object \code{i} may also be a data frame or list of \code{x,y} coordinates which will be converted to a point pattern, taking the observation window to be \code{Window(x)}. Then the pixel values at these points will be extracted as a vector. } \section{Warnings}{ If you have a 2-column matrix containing the \eqn{x,y} coordinates of point locations, then to prevent this being interpreted as an array index, you should convert it to a \code{data.frame} or to a point pattern. If \code{W} is a window or a pixel image, then \code{x[W, drop=FALSE]} will return an image defined on the same pixel array as the original image \code{x}. If you want to obtain an image whose pixel dimensions agree with those of \code{W}, use the \code{raster} argument, \code{x[W, raster=W, drop=FALSE]}. } \seealso{ \code{\link{im.object}}, \code{\link{[<-.im}}, \code{\link{ppp.object}}, \code{\link{as.ppp}}, \code{\link{owin.object}}, \code{\link{plot.im}} } \examples{ # make up an image X <- setcov(unit.square()) plot(X) # a rectangular subset W <- owin(c(0,0.5),c(0.2,0.8)) Y <- X[W] plot(Y) # a polygonal subset R <- affine(letterR, diag(c(1,1)/2), c(-2,-0.7)) plot(X[R, drop=FALSE]) plot(X[R, drop=FALSE, tight=TRUE]) # a point pattern P <- rpoispp(20) Y <- X[P] # look up a specified location X[list(x=0.1,y=0.2)] # 10 x 10 pixel array X <- as.im(function(x,y) { x + y }, owin(c(-1,1),c(-1,1)), dimyx=10) # 100 x 100 W <- as.mask(disc(1, c(0,0)), dimyx=100) # 10 x 10 raster X[W,drop=FALSE] # 100 x 100 raster X[W, raster=W, drop=FALSE] } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/mppm.Rd0000644000176200001440000002506013572424117014141 0ustar liggesusers\name{mppm} \alias{mppm} \title{Fit Point Process Model to Several Point Patterns} \description{ Fits a Gibbs point process model to several point patterns simultaneously. } \usage{ mppm(formula, data, interaction=Poisson(), ..., iformula=NULL, random=NULL, weights=NULL, use.gam = FALSE, reltol.pql=1e-3, gcontrol=list()) } \arguments{ \item{formula}{ A formula describing the systematic part of the model. Variables in the formula are names of columns in \code{data}. } \item{data}{ A hyperframe (object of class \code{"hyperframe"}, see \code{\link{hyperframe}}) containing the point pattern responses and the explanatory variables. } \item{interaction}{ Interpoint interaction(s) appearing in the model. Either an object of class \code{"interact"} describing the point process interaction structure, or a hyperframe (with the same number of rows as \code{data}) whose entries are objects of class \code{"interact"}. } \item{\dots}{Arguments passed to \code{\link{ppm}} controlling the fitting procedure. } \item{iformula}{ Optional. A formula (with no left hand side) describing the interaction to be applied to each case. Each variable name in the formula should either be the name of a column in the hyperframe \code{interaction}, or the name of a column in the hyperframe \code{data} that is a vector or factor. } \item{random}{ Optional. A formula (with no left hand side) describing a random effect. Variable names in the formula may be any of the column names of \code{data} and \code{interaction}. The formula must be recognisable to \code{\link{lme}}. } \item{weights}{ Optional. Numeric vector of case weights for each row of \code{data}. } \item{use.gam}{Logical flag indicating whether to fit the model using \code{\link[mgcv]{gam}} or \code{\link[stats]{glm}}. } \item{reltol.pql}{ Relative tolerance for successive steps in the penalised quasi-likelihood algorithm, used when the model includes random effects. The algorithm terminates when the root mean square of the relative change in coefficients is less than \code{reltol.pql}. } \item{gcontrol}{ List of arguments to control the fitting algorithm. Arguments are passed to \code{\link[stats]{glm.control}} or \code{\link[mgcv]{gam.control}} or \code{\link[nlme]{lmeControl}} depending on the kind of model being fitted. If the model has random effects, the arguments are passed to \code{\link[nlme]{lmeControl}}. Otherwise, if \code{use.gam=TRUE} the arguments are passed to \code{\link[mgcv]{gam.control}}, and if \code{use.gam=FALSE} (the default) they are passed to \code{\link[stats]{glm.control}}. } } \details{ This function fits a common point process model to a dataset containing several different point patterns. It extends the capabilities of the function \code{\link{ppm}} to deal with data such as \itemize{ \item replicated observations of spatial point patterns \item two groups of spatial point patterns \item a designed experiment in which the response from each unit is a point pattern. } The syntax of this function is similar to that of standard \R model-fitting functions like \code{\link{lm}} and \code{\link{glm}}. The first argument \code{formula} is an \R formula describing the systematic part of the model. The second argument \code{data} contains the responses and the explanatory variables. Other arguments determine the stochastic structure of the model. Schematically, the data are regarded as the results of a designed experiment involving \eqn{n} experimental units. Each unit has a \sQuote{response}, and optionally some \sQuote{explanatory variables} (covariates) describing the experimental conditions for that unit. In this context, \emph{the response from each unit is a point pattern}. The value of a particular covariate for each unit can be either a single value (numerical, logical or factor), or a spatial covariate. A \sQuote{spatial} covariate is a quantity that depends on spatial location, for example, the soil acidity or altitude at each location. For the purposes of \code{mppm}, a spatial covariate must be stored as a pixel image (object of class \code{"im"}) which gives the values of the covariate at a fine grid of locations. The argument \code{data} is a hyperframe (a generalisation of a data frame, see \code{\link{hyperframe}}). This is like a data frame except that the entries can be objects of any class. The hyperframe has one row for each experimental unit, and one column for each variable (response or explanatory variable). The \code{formula} should be an \R formula. The left hand side of \code{formula} determines the \sQuote{response} variable. This should be a single name, which should correspond to a column in \code{data}. The right hand side of \code{formula} determines the spatial trend of the model. It specifies the linear predictor, and effectively represents the \bold{logarithm} of the spatial trend. Variables in the formula must be the names of columns of \code{data}, or one of the reserved names \describe{ \item{x,y}{Cartesian coordinates of location} \item{marks}{Mark attached to point} \item{id}{which is a factor representing the serial number (\eqn{1} to \eqn{n}) of the point pattern, i.e. the row number in the data hyperframe. } } The column of responses in \code{data} must consist of point patterns (objects of class \code{"ppp"}). The individual point pattern responses can be defined in different spatial windows. If some of the point patterns are marked, then they must all be marked, and must have the same type of marks. The scope of models that can be fitted to each pattern is the same as the scope of \code{\link{ppm}}, that is, Gibbs point processes with interaction terms that belong to a specified list, including for example the Poisson process, Strauss process, Geyer's saturation model, and piecewise constant pairwise interaction models. Additionally, it is possible to include random effects as explained in the section on Random Effects below. The stochastic part of the model is determined by the arguments \code{interaction} and (optionally) \code{iformula}. \itemize{ \item In the simplest case, \code{interaction} is an object of class \code{"interact"}, determining the interpoint interaction structure of the point process model, for all experimental units. \item Alternatively, \code{interaction} may be a hyperframe, whose entries are objects of class \code{"interact"}. It should have the same number of rows as \code{data}. \itemize{ \item If \code{interaction} consists of only one column, then the entry in row \code{i} is taken to be the interpoint interaction for the \code{i}th experimental unit (corresponding to the \code{i}th row of \code{data}). \item If \code{interaction} has more than one column, then the argument \code{iformula} is also required. Each row of \code{interaction} determines several interpoint interaction structures that might be applied to the corresponding row of \code{data}. The choice of interaction is determined by \code{iformula}; this should be an \R formula, without a left hand side. For example if \code{interaction} has two columns called \code{A} and \code{B} then \code{iformula = ~B} indicates that the interpoint interactions are taken from the second column. } } Variables in \code{iformula} typically refer to column names of \code{interaction}. They can also be names of columns in \code{data}, but only for columns of numeric, logical or factor values. For example \code{iformula = ~B * group} (where \code{group} is a column of \code{data} that contains a factor) causes the model with interpoint interaction \code{B} to be fitted with different interaction parameters for each level of \code{group}. } \section{Random Effects}{ It is also possible to include random effects in the trend term. The argument \code{random} is a formula, with no left-hand side, that specifies the structure of the random effects. The formula should be recognisable to \code{\link{lme}} (see the description of the argument \code{random} for \code{\link{lme}}). The names in the formula \code{random} may be any of the covariates supplied by \code{data}. Additionally the formula may involve the name \code{id}, which is a factor representing the serial number (\eqn{1} to \eqn{n}) of the point pattern in the list \code{X}. } \value{ An object of class \code{"mppm"} representing the fitted model. There are methods for \code{print}, \code{summary}, \code{coef}, \code{AIC}, \code{anova}, \code{fitted}, \code{fixef}, \code{logLik}, \code{plot}, \code{predict}, \code{ranef}, \code{residuals}, \code{summary}, \code{terms} and \code{vcov} for this class. The default methods for \code{\link[stats]{update}} and \code{\link[stats]{formula}} also work on this class. } \references{ Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Baddeley, A., Bischof, L., Sintorn, I.-M., Haggarty, S., Bell, M. and Turner, R. Analysis of a designed experiment where the response is a spatial point pattern. In preparation. Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. Bell, M. and Grunwald, G. (2004) Mixed models for the analysis of replicated spatial point patterns. \emph{Biostatistics} \bold{5}, 633--648. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented in \pkg{spatstat} by \spatstatAuthors. } \seealso{ \code{\link{ppm}}, \code{\link{print.mppm}}, \code{\link{summary.mppm}}, \code{\link{coef.mppm}}, } \examples{ # Waterstriders data H <- hyperframe(Y = waterstriders) mppm(Y ~ 1, data=H) mppm(Y ~ 1, data=H, Strauss(7)) mppm(Y ~ id, data=H) mppm(Y ~ x, data=H) # Synthetic data from known model n <- 10 H <- hyperframe(V=1:n, U=runif(n, min=-1, max=1), M=factor(letters[1 + (1:n) \%\% 3])) H$Z <- setcov(square(1)) H$U <- with(H, as.im(U, as.rectangle(Z))) H$Y <- with(H, rpoispp(eval.im(exp(2+3*Z)))) fit <- mppm(Y ~Z + U + V, data=H) } \keyword{spatial} \keyword{models} spatstat/man/rectcontact.Rd0000644000176200001440000000302313333543264015474 0ustar liggesusers\name{rectcontact} \alias{rectcontact} \title{ Contact Distribution Function using Rectangular Structuring Element } \description{ Computes an estimate of the contact distribution function of a set, using a rectangular structuring element. } \usage{ rectcontact(X, \dots, asp = 1, npasses=4, eps = NULL, r = NULL, breaks = NULL, correction = c("rs", "km")) } \arguments{ \item{X}{ Logical-valued image. The \code{TRUE} values in the image determine the spatial region whose contact distribution function should be estimated. } \item{\dots}{ Ignored. } \item{asp}{ Aspect ratio for the rectangular metric. A single positive number. See \code{\link{rectdistmap}} for explanation. } \item{npasses}{ Number of passes to perform in the distance algorithm. A positive integer. See \code{\link{rectdistmap}} for explanation. } \item{eps}{ Pixel size, if the image should be converted to a finer grid. } \item{r}{ Optional vector of distance values. Do Not Use This. } \item{breaks}{ Do Not Use This. } \item{correction}{ Character vector specifying the edge correction. } } \details{ To be written. } \value{ Object of class \code{"fv"}. } \author{ \adrian. } \seealso{ \code{\link{Hest}} } \examples{ ## make an image which is TRUE/FALSE inside/outside the letter R V <- letterR Frame(V) <- grow.rectangle(Frame(V), 0.5) Z <- as.im(V, value=TRUE, na.replace=FALSE) ## analyse plot(rectcontact(Z)) } \keyword{spatial} \keyword{nonparametric} spatstat/man/Extract.ppp.Rd0000644000176200001440000001437713333543263015410 0ustar liggesusers\name{Extract.ppp} \alias{[.ppp} \alias{[<-.ppp} \title{Extract or Replace Subset of Point Pattern} \description{ Extract or replace a subset of a point pattern. Extraction of a subset has the effect of thinning the points and/or trimming the window. } \usage{ \method{[}{ppp}(x, i, j, drop=FALSE, \dots, clip=FALSE) \method{[}{ppp}(x, i, j) <- value } \arguments{ \item{x}{ A two-dimensional point pattern. An object of class \code{"ppp"}. } \item{i}{ Subset index. Either a valid subset index in the usual \R sense, indicating which points should be retained, or a window (an object of class \code{"owin"}) delineating a subset of the original observation window, or a pixel image with logical values defining a subset of the original observation window. } \item{value}{ Replacement value for the subset. A point pattern. } \item{j}{ Redundant. Included for backward compatibility. } \item{drop}{ Logical value indicating whether to remove unused levels of the marks, if the marks are a factor. } \item{clip}{ Logical value indicating how to form the window of the resulting point pattern, when \code{i} is a window. If \code{clip=FALSE} (the default), the result has window equal to \code{i}. If \code{clip=TRUE}, the resulting window is the intersection between the window of \code{x} and the window \code{i}. } \item{\dots}{ Ignored. This argument is required for compatibility with the generic function. } } \value{ A point pattern (of class \code{"ppp"}). } \details{ These functions extract a designated subset of a point pattern, or replace the designated subset with another point pattern. The function \code{[.ppp} is a method for \code{\link{[}} for the class \code{"ppp"}. It extracts a designated subset of a point pattern, either by ``\emph{thinning}'' (retaining/deleting some points of a point pattern) or ``\emph{trimming}'' (reducing the window of observation to a smaller subregion and retaining only those points which lie in the subregion) or both. The pattern will be ``thinned'' if \code{i} is a subset index in the usual \R sense: either a numeric vector of positive indices (identifying the points to be retained), a numeric vector of negative indices (identifying the points to be deleted) or a logical vector of length equal to the number of points in the point pattern \code{x}. In the latter case, the points \code{(x$x[i], x$y[i])} for which \code{subset[i]=TRUE} will be retained, and the others will be deleted. The pattern will be ``trimmed'' if \code{i} is an object of class \code{"owin"} specifying a window of observation. The points of \code{x} lying inside the new window \code{i} will be retained. Alternatively \code{i} may be a pixel image (object of class \code{"im"}) with logical values; the pixels with the value \code{TRUE} will be interpreted as a window. The argument \code{drop} determines whether to remove unused levels of a factor, if the point pattern is multitype (i.e. the marks are a factor) or if the marks are a data frame in which some of the columns are factors. The function \code{[<-.ppp} is a method for \code{\link{[<-}} for the class \code{"ppp"}. It replaces the designated subset with the point pattern \code{value}. The subset of \code{x} to be replaced is designated by the argument \code{i} as above. The replacement point pattern \code{value} must lie inside the window of the original pattern \code{x}. The ordering of points in \code{x} will be preserved if the replacement pattern \code{value} has the same number of points as the subset to be replaced. Otherwise the ordering is unpredictable. If the original pattern \code{x} has marks, then the replacement pattern \code{value} must also have marks, of the same type. Use the function \code{\link{unmark}} to remove marks from a marked point pattern. Use the function \code{\link{split.ppp}} to select those points in a marked point pattern which have a specified mark. } \seealso{ \code{\link{subset.ppp}}. \code{\link{ppp.object}}, \code{\link{owin.object}}, \code{\link{unmark}}, \code{\link{split.ppp}}, \code{\link{cut.ppp}} } \section{Warnings}{ The function does not check whether \code{i} is a subset of \code{Window(x)}. Nor does it check whether \code{value} lies inside \code{Window(x)}. } \examples{ # Longleaf pines data lon <- longleaf \dontrun{ plot(lon) } \testonly{lon <- lon[seq(1,npoints(lon),by=10)]} # adult trees defined to have diameter at least 30 cm longadult <- subset(lon, marks >= 30) \dontrun{ plot(longadult) } # note that the marks are still retained. # Use unmark(longadult) to remove the marks # New Zealand trees data \dontrun{ plot(nztrees) # plot shows a line of trees at the far right abline(v=148, lty=2) # cut along this line } nzw <- owin(c(0,148),c(0,95)) # the subwindow # trim dataset to this subwindow nzsub <- nztrees[nzw] \dontrun{ plot(nzsub) } # Redwood data \dontrun{ plot(redwood) } # Random thinning: delete 60\% of data retain <- (runif(npoints(redwood)) < 0.4) thinred <- redwood[retain] \dontrun{ plot(thinred) } # Scramble 60\% of data X <- redwood modif <- (runif(npoints(X)) < 0.6) X[modif] <- runifpoint(ex=X[modif]) # Lansing woods data - multitype points lan <- lansing \testonly{ lan <- lan[seq(1, npoints(lan), length=100)] } # Hickory trees hicks <- split(lansing)$hickory # Trees in subwindow win <- owin(c(0.3, 0.6),c(0.2, 0.5)) lsub <- lan[win] # Scramble the locations of trees in subwindow, retaining their marks lan[win] <- runifpoint(ex=lsub) \%mark\% marks(lsub) # Extract oaks only oaknames <- c("redoak", "whiteoak", "blackoak") oak <- lan[marks(lan) \%in\% oaknames, drop=TRUE] oak <- subset(lan, marks \%in\% oaknames, drop=TRUE) # To clip or not to clip X <- runifpoint(25, letterR) B <- owin(c(2.2, 3.9), c(2, 3.5)) opa <- par(mfrow=c(1,2)) plot(X, main="X[B]") plot(X[B], border="red", cols="red", add=TRUE, show.all=TRUE, main="") plot(X, main="X[B, clip=TRUE]") plot(B, add=TRUE, lty=2) plot(X[B, clip=TRUE], border="blue", cols="blue", add=TRUE, show.all=TRUE, main="") par(opa) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/split.im.Rd0000644000176200001440000000400413333543264014722 0ustar liggesusers\name{split.im} \alias{split.im} \title{Divide Image Into Sub-images} \description{ Divides a pixel image into several sub-images according to the value of a factor, or according to the tiles of a tessellation. } \usage{ \method{split}{im}(x, f, ..., drop = FALSE) } \arguments{ \item{x}{Pixel image (object of class \code{"im"}).} \item{f}{ Splitting criterion. Either a tessellation (object of class \code{"tess"}) or a pixel image with factor values. } \item{\dots}{Ignored.} \item{drop}{Logical value determining whether each subset should be returned as a pixel images (\code{drop=FALSE}) or as a one-dimensional vector of pixel values (\code{drop=TRUE}). } } \details{ This is a method for the generic function \code{\link{split}} for the class of pixel images. The image \code{x} will be divided into subsets determined by the data \code{f}. The result is a list of these subsets. The splitting criterion may be either \itemize{ \item a tessellation (object of class \code{"tess"}). Each tile of the tessellation delineates a subset of the spatial domain. \item a pixel image (object of class \code{"im"}) with factor values. The levels of the factor determine subsets of the spatial domain. } If \code{drop=FALSE} (the default), the result is a list of pixel images, each one a subset of the pixel image \code{x}, obtained by restricting the pixel domain to one of the subsets. If \code{drop=TRUE}, then the pixel values are returned as numeric vectors. } \value{ If \code{drop=FALSE}, a list of pixel images (objects of class \code{"im"}). It is also of class \code{"solist"} so that it can be plotted immediately. If \code{drop=TRUE}, a list of numeric vectors. } \seealso{ \code{\link{by.im}}, \code{\link{tess}}, \code{\link{im}} } \examples{ W <- square(1) X <- as.im(function(x,y){sqrt(x^2+y^2)}, W) Y <- dirichlet(runifpoint(12, W)) plot(split(X,Y)) } \author{\adrian and \rolf. } \keyword{spatial} \keyword{methods} \keyword{manip} spatstat/man/bugfixes.Rd0000644000176200001440000000526513623712063015006 0ustar liggesusers\name{bugfixes} \alias{bugfixes} \title{ List Recent Bug Fixes } \description{ List all bug fixes in a package, starting from a certain date or version of the package. Fixes are sorted alphabetically by the name of the affected function. The default is to list bug fixes in the latest version of the \pkg{spatstat} package. } \usage{ bugfixes(sinceversion = NULL, sincedate = NULL, package = "spatstat", show = TRUE) } \arguments{ \item{sinceversion}{ Earliest version of \code{package} for which bugs should be listed. The default is the current installed version. } \item{sincedate}{ Earliest release date of \code{package} for which bugs should be listed. A character string or a date-time object. } \item{package}{ Character string. The name of the package for which bugs are to be listed. } \item{show}{ Logical value indicating whether to display the bug table on the terminal. } } \details{ Bug reports are extracted from the NEWS file of the specified \code{package}. Only those after a specified date, or after a specified version of the package, are retained. The bug reports are then sorted alphabetically, so that all bugs affecting a particular function are listed consecutively. Finally the table of bug reports is displayed (if \code{show=TRUE}) and returned invisibly. The argument \code{sinceversion} should be a character string like \code{"1.2-3"}. The default is the current installed version of the package. The argument \code{sincedate} should be a character string like \code{"2015-05-27"}, or a date-time object. If \code{sinceversion="all"} or \code{sincedate="all"} then all recorded bugs will be listed. If \code{package="spatstat"} (the default) then \code{sinceversion="book"} and \code{sincedate="book"} are interpreted to mean \code{sinceversion="1.42-1"}, which gives all bugs reported after publication of the book by Baddeley, Rubak and Turner (2015). Typing \code{bugfixes} without parentheses will display a table of all bugs that were fixed in the current installed version of \pkg{spatstat}. } \value{ A data frame, belonging to the class \code{"bugtable"}, which has its own print method. } \author{ \adrian. } \seealso{ \code{\link{latest.news}}, \code{\link[utils]{news}}. } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. } \examples{ bugfixes ## show all bugs reported after publication of the spatstat book if(interactive()) bugfixes(sinceversion="1.42-1") ## equivalent to bugfixes(sinceversion="book") } \keyword{documentation} spatstat/man/plot.layered.Rd0000644000176200001440000000706613333543264015600 0ustar liggesusers\name{plot.layered} \alias{plot.layered} \title{ Layered Plot } \description{ Generates a layered plot. The plot method for objects of class \code{"layered"}. } \usage{ \method{plot}{layered}(x, ..., which = NULL, plotargs = NULL, add=FALSE, show.all=!add, main=NULL, do.plot=TRUE) } \arguments{ \item{x}{ An object of class \code{"layered"} created by the function \code{\link{layered}}. } \item{\dots}{ Arguments to be passed to the \code{plot} method for \emph{every} layer. } \item{which}{ Subset index specifying which layers should be plotted. } \item{plotargs}{ Arguments to be passed to the \code{plot} methods for individual layers. A list of lists of arguments of the form \code{name=value}. } \item{add}{Logical value indicating whether to add the graphics to an existing plot. } \item{show.all}{ Logical value indicating whether the \emph{first} layer should be displayed in full (including the main title, bounding window, coordinate axes, colour ribbon, and so on). } \item{main}{Main title for the plot} \item{do.plot}{Logical value indicating whether to actually do the plotting.} } \details{ Layering is a simple mechanism for controlling a high-level plot that is composed of several successive plots, for example, a background and a foreground plot. The layering mechanism makes it easier to plot, to switch on or off the plotting of each individual layer, to control the plotting arguments that are passed to each layer, and to zoom in on a subregion. The layers of data to be plotted should first be converted into a single object of class \code{"layered"} using the function \code{\link{layered}}. Then the layers can be plotted using the method \code{plot.layered}. To zoom in on a subregion, apply the subset operator \code{\link{[.layered}} to \code{x} before plotting. Graphics parameters for each layer are determined by (in order of precedence) \code{\dots}, \code{plotargs}, and \code{\link{layerplotargs}(x)}. The graphics parameters may also include the special argument \code{.plot} specifying (the name of) a function which will be used to perform the plotting instead of the generic \code{plot}. The argument \code{show.all} is recognised by many plot methods in \pkg{spatstat}. It determines whether a plot is drawn with all its additional components such as the main title, bounding window, coordinate axes, colour ribbons and legends. The default is \code{TRUE} for new plots and \code{FALSE} for added plots. In \code{plot.layered}, the argument \code{show.all} applies only to the \bold{first} layer. The subsequent layers are plotted with \code{show.all=FALSE}. To override this, that is, if you really want to draw all the components of \bold{all} layers of \code{x}, insert the argument \code{show.all=TRUE} in each entry of \code{plotargs} or \code{\link{layerplotargs}(x)}. } \value{ (Invisibly) a list containing the return values from the plot commands for each layer. This list has an attribute \code{"bbox"} giving a bounding box for the entire plot. } \author{\adrian and \rolf } \seealso{ \code{\link{layered}}, \code{\link{layerplotargs}}, \code{\link{[.layered}}, \code{\link{plot}}. } \examples{ data(cells) D <- distmap(cells) L <- layered(D, cells) plot(L) plot(L, which = 2) plot(L, plotargs=list(list(ribbon=FALSE), list(pch=3, cols="white"))) # plot a subregion plot(L[, square(0.5)]) } \keyword{spatial} \keyword{hplot} spatstat/man/boundingbox.Rd0000644000176200001440000000526513333543262015511 0ustar liggesusers\name{boundingbox} \alias{boundingbox} \alias{boundingbox.default} \alias{boundingbox.im} \alias{boundingbox.owin} \alias{boundingbox.ppp} \alias{boundingbox.psp} \alias{boundingbox.lpp} \alias{boundingbox.linnet} \alias{boundingbox.solist} \title{ Bounding Box of a Window, Image, or Point Pattern } \description{ Find the smallest rectangle containing a given window(s), image(s) or point pattern(s). } \usage{ boundingbox(\dots) \method{boundingbox}{default}(\dots) \method{boundingbox}{im}(\dots) \method{boundingbox}{owin}(\dots) \method{boundingbox}{ppp}(\dots) \method{boundingbox}{psp}(\dots) \method{boundingbox}{lpp}(\dots) \method{boundingbox}{linnet}(\dots) \method{boundingbox}{solist}(\dots) } \arguments{ \item{\dots}{One or more windows (objects of class \code{"owin"}), pixel images (objects of class \code{"im"}) or point patterns (objects of class \code{"ppp"} or \code{"lpp"}) or line segment patterns (objects of class \code{"psp"}) or linear networks (objects of class \code{"linnet"}) or any combination of such objects. Alternatively, the argument may be a list of such objects, of class \code{"solist"}. } } \details{ This function finds the smallest rectangle (with sides parallel to the coordinate axes) that contains all the given objects. For a window (object of class \code{"owin"}), the bounding box is the smallest rectangle that contains all the vertices of the window (this is generally smaller than the enclosing frame, which is returned by \code{\link{as.rectangle}}). For a point pattern (object of class \code{"ppp"} or \code{"lpp"}), the bounding box is the smallest rectangle that contains all the points of the pattern. This is usually smaller than the bounding box of the window of the point pattern. For a line segment pattern (object of class \code{"psp"}) or a linear network (object of class \code{"linnet"}), the bounding box is the smallest rectangle that contains all endpoints of line segments. For a pixel image (object of class \code{"im"}), the image will be converted to a window using \code{\link{as.owin}}, and the bounding box of this window is obtained. If the argument is a list of several objects, then this function finds the smallest rectangle that contains all the bounding boxes of the objects. } \value{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{as.rectangle}} } \examples{ w <- owin(c(0,10),c(0,10), poly=list(x=c(1,2,3,2,1), y=c(2,3,4,6,7))) r <- boundingbox(w) # returns rectangle [1,3] x [2,7] w2 <- unit.square() r <- boundingbox(w, w2) # returns rectangle [0,3] x [0,7] } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{utilities} spatstat/man/beachcolours.Rd0000644000176200001440000000542413571674202015644 0ustar liggesusers\name{beachcolours} \alias{beachcolours} \alias{beachcolourmap} \title{ Create Colour Scheme for a Range of Numbers } \description{ Given a range of numerical values, this command creates a colour scheme that would be appropriate if the numbers were altitudes (elevation above or below sea level). } \usage{ beachcolours(range, sealevel = 0, monochrome = FALSE, ncolours = if (monochrome) 16 else 64, nbeach = 1) beachcolourmap(range, ...) } \arguments{ \item{range}{ Range of numerical values to be mapped. A numeric vector of length 2. } \item{sealevel}{ Value that should be treated as zero. A single number, lying between \code{range[1]} and \code{range[2]}. } \item{monochrome}{ Logical. If \code{TRUE} then a greyscale colour map is constructed. } \item{ncolours}{ Number of distinct colours to use. } \item{nbeach}{ Number of colours that will be yellow. } \item{\dots}{Arguments passed to \code{beachcolours}.} } \details{ Given a range of numerical values, these commands create a colour scheme that would be appropriate if the numbers were altitudes (elevation above or below sea level). Numerical values close to zero are portrayed in green (representing the waterline). Negative values are blue (representing water) and positive values are yellow to red (representing land). At least, these are the colours of land and sea in Western Australia. This colour scheme was proposed by Baddeley et al (2005). The function \code{beachcolours} returns these colours as a character vector, while \code{beachcolourmap} returns a colourmap object. The argument \code{range} should be a numeric vector of length 2 giving a range of numerical values. The argument \code{sealevel} specifies the height value that will be treated as zero, and mapped to the colour green. A vector of \code{ncolours} colours will be created, of which \code{nbeach} colours will be green. The argument \code{monochrome} is included for convenience when preparing publications. If \code{monochrome=TRUE} the colour map will be a simple grey scale containing \code{ncolours} shades from black to white. } \value{ For \code{beachcolours}, a character vector of length \code{ncolours} specifying colour values. For \code{beachcolourmap}, a colour map (object of class \code{"colourmap"}). } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. } \seealso{ \code{\link{colourmap}}, \code{\link[spatstat:colourtools]{colourtools}}. } \examples{ plot(beachcolourmap(c(-2,2))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{color} spatstat/man/Extract.anylist.Rd0000644000176200001440000000226613333543263016266 0ustar liggesusers\name{Extract.anylist} \alias{[.anylist} \alias{[<-.anylist} \title{Extract or Replace Subset of a List of Things} \description{ Extract or replace a subset of a list of things. } \usage{ \method{[}{anylist}(x, i, \dots) \method{[}{anylist}(x, i) <- value } \arguments{ \item{x}{ An object of class \code{"anylist"} representing a list of things. } \item{i}{ Subset index. Any valid subset index in the usual \R sense. } \item{value}{ Replacement value for the subset. } \item{\dots}{Ignored.} } \value{ Another object of class \code{"anylist"}. } \details{ These are the methods for extracting and replacing subsets for the class \code{"anylist"}. The argument \code{x} should be an object of class \code{"anylist"} representing a list of things. See \code{\link{anylist}}. The method replaces a designated subset of \code{x}, and returns an object of class \code{"anylist"}. } \seealso{ \code{\link{anylist}}, \code{\link{plot.anylist}}, \code{\link{summary.anylist}} } \examples{ x <- anylist(A=runif(10), B=runif(10), C=runif(10)) x[1] <- list(A=rnorm(10)) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{list} \keyword{manip} spatstat/man/nncross.ppx.Rd0000644000176200001440000001025713553255473015473 0ustar liggesusers\name{nncross.ppx} \alias{nncross.ppx} \title{Nearest Neighbours Between Two Patterns in Any Dimensions} \description{ Given two point patterns \code{X} and \code{Y} in many dimensional space, finds the nearest neighbour in \code{Y} of each point of \code{X}. } \usage{ \method{nncross}{ppx}(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), \dots, k = 1) } \arguments{ \item{X,Y}{ Point patterns in any number of spatial dimensions (objects of class \code{"ppx"}). } \item{iX, iY}{Optional identifiers, used to determine whether a point in \code{X} is identical to a point in \code{Y}. See Details. } \item{what}{ Character string specifying what information should be returned. Either the nearest neighbour distance (\code{"dist"}), the identifier of the nearest neighbour (\code{"which"}), or both. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } \item{\dots}{Ignored.} } \details{ Given two point patterns \code{X} and \code{Y} in \eqn{m}-dimensional space, this function finds, for each point of \code{X}, the nearest point of \code{Y}. The distance between these points is also computed. If the argument \code{k} is specified, then the \code{k}-th nearest neighbours will be found. The return value is a data frame, with rows corresponding to the points of \code{X}. The first column gives the nearest neighbour distances (i.e. the \code{i}th entry is the distance from the \code{i}th point of \code{X} to the nearest element of \code{Y}). The second column gives the indices of the nearest neighbours (i.e.\ the \code{i}th entry is the index of the nearest element in \code{Y}.) If \code{what="dist"} then only the vector of distances is returned. If \code{what="which"} then only the vector of indices is returned. The argument \code{k} may be an integer or an integer vector. If it is a single integer, then the \code{k}-th nearest neighbours are computed. If it is a vector, then the \code{k[i]}-th nearest neighbours are computed for each entry \code{k[i]}. For example, setting \code{k=1:3} will compute the nearest, second-nearest and third-nearest neighbours. The result is a data frame. Note that this function is not symmetric in \code{X} and \code{Y}. To find the nearest neighbour in \code{X} of each point in \code{Y}, use \code{nncross(Y,X)}. The arguments \code{iX} and \code{iY} are used when the two point patterns \code{X} and \code{Y} have some points in common. In this situation \code{nncross(X, Y)} would return some zero distances. To avoid this, attach a unique integer identifier to each point, such that two points are identical if their identifying numbers are equal. Let \code{iX} be the vector of identifier values for the points in \code{X}, and \code{iY} the vector of identifiers for points in \code{Y}. Then the code will only compare two points if they have different values of the identifier. See the Examples. } \value{ A data frame, or a vector if the data frame would contain only one column. By default (if \code{what=c("dist", "which")} and \code{k=1}) a data frame with two columns: \item{dist}{Nearest neighbour distance} \item{which}{Nearest neighbour index in \code{Y}} If \code{what="dist"} and \code{k=1}, a vector of nearest neighbour distances. If \code{what="which"} and \code{k=1}, a vector of nearest neighbour indices. If \code{k} is specified, the result is a data frame with columns containing the \code{k}-th nearest neighbour distances and/or nearest neighbour indices. } \seealso{ \code{\link{nndist}} for nearest neighbour distances in a single point pattern. } \examples{ B <- boxx(c(0,1), c(0,1), c(0,1), c(0,1)) ## two different point patterns X <- runifpointx(5, B) Y <- runifpointx(10, B) nncross(X,Y) N23 <- nncross(X,Y, k=2:3) ## two patterns with some points in common Z <- runifpointx(20, B) X <- Z[1:15] Y <- Z[10:20] iX <- 1:15 iY <- 10:20 N <- nncross(X,Y, iX, iY, what="which") N4 <- nncross(X,Y, iX, iY, k=4) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat/man/psp.Rd0000644000176200001440000000756213474140142013773 0ustar liggesusers\name{psp} \alias{psp} \title{Create a Line Segment Pattern} \description{ Creates an object of class \code{"psp"} representing a line segment pattern in the two-dimensional plane. } \usage{ psp(x0,y0, x1, y1, window, marks=NULL, check=spatstat.options("checksegments")) } \arguments{ \item{x0}{Vector of \eqn{x} coordinates of first endpoint of each segment} \item{y0}{Vector of \eqn{y} coordinates of first endpoint of each segment} \item{x1}{Vector of \eqn{x} coordinates of second endpoint of each segment} \item{y1}{Vector of \eqn{y} coordinates of second endpoint of each segment} \item{window}{window of observation, an object of class \code{"owin"}} \item{marks}{(optional) vector or data frame of mark values} \item{check}{Logical value indicating whether to check that the line segments lie inside the window.} } \value{ An object of class \code{"psp"} describing a line segment pattern in the two-dimensional plane (see \code{\link{psp.object}}). } \details{ In the \pkg{spatstat} library, a spatial pattern of line segments is described by an object of class \code{"psp"}. This function creates such objects. The vectors \code{x0}, \code{y0}, \code{x1} and \code{y1} must be numeric vectors of equal length. They are interpreted as the cartesian coordinates of the endpoints of the line segments. A line segment pattern is assumed to have been observed within a specific region of the plane called the observation window. An object of class \code{"psp"} representing a point pattern contains information specifying the observation window. This window must always be specified when creating a point pattern dataset; there is intentionally no default action of ``guessing'' the window dimensions from the data points alone. The argument \code{window} must be an object of class \code{"owin"}. It is a full description of the window geometry, and could have been obtained from \code{\link{owin}} or \code{\link{as.owin}}, or by just extracting the observation window of another dataset, or by manipulating such windows. See \code{\link{owin}} or the Examples below. The optional argument \code{marks} is given if the line segment pattern is marked, i.e. if each line segment carries additional information. For example, line segments which are classified into two or more different types, or colours, may be regarded as having a mark which identifies which colour they are. The object \code{marks} must be a vector of the same length as \code{x0}, or a data frame with number of rows equal to the length of \code{x0}. The interpretation is that \code{marks[i]} or \code{marks[i,]} is the mark attached to the \eqn{i}th line segment. If the marks are real numbers then \code{marks} should be a numeric vector, while if the marks takes only a finite number of possible values (e.g. colours or types) then \code{marks} should be a \code{factor}. See \code{\link{psp.object}} for a description of the class \code{"psp"}. Users would normally invoke \code{psp} to create a line segment pattern, and the function \code{\link{as.psp}} to convert data in another format into a line segment pattern. } \seealso{ \code{\link{psp.object}}, \code{\link{as.psp}}, \code{\link{owin.object}}, \code{\link{owin}}, \code{\link{as.owin}}. Function for extracting information from a segment pattern: \code{\link{marks.psp}}, \code{\link{summary.psp}}, \code{\link{midpoints.psp}}, \code{\link{lengths.psp}} \code{\link{angles.psp}}, \code{\link{endpoints.psp}} Convert line segments to infinite lines: \code{\link{extrapolate.psp}}. } \examples{ X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) m <- data.frame(A=1:10, B=letters[1:10]) X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin(), marks=m) } \author{\adrian and \rolf. } \keyword{spatial} \keyword{datagen} spatstat/man/diameter.owin.Rd0000644000176200001440000000206413333543263015733 0ustar liggesusers\name{diameter.owin} \alias{diameter.owin} \title{Diameter of a Window} \description{ Computes the diameter of a window. } \usage{ \method{diameter}{owin}(x) } \arguments{ \item{x}{ A window whose diameter will be computed. } } \value{ The numerical value of the diameter of the window. } \details{ This function computes the diameter of a window of arbitrary shape, i.e. the maximum distance between any two points in the window. The argument \code{x} should be a window (an object of class \code{"owin"}, see \code{\link{owin.object}} for details) or can be given in any format acceptable to \code{\link{as.owin}()}. The function \code{diameter} is generic. This function is the method for the class \code{"owin"}. } \seealso{ \code{\link{area.owin}}, \code{\link{perimeter}}, \code{\link{edges}}, \code{\link{owin}}, \code{\link{as.owin}} } \examples{ w <- owin(c(0,1),c(0,1)) diameter(w) # returns sqrt(2) data(letterR) diameter(letterR) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/F3est.Rd0000644000176200001440000001227513333543262014156 0ustar liggesusers\name{F3est} \Rdversion{1.1} \alias{F3est} \title{ Empty Space Function of a Three-Dimensional Point Pattern } \description{ Estimates the empty space function \eqn{F_3(r)}{F3(r)} from a three-dimensional point pattern. } \usage{ F3est(X, ..., rmax = NULL, nrval = 128, vside = NULL, correction = c("rs", "km", "cs"), sphere = c("fudge", "ideal", "digital")) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{rmax}{ Optional. Maximum value of argument \eqn{r} for which \eqn{F_3(r)}{F3(r)} will be estimated. } \item{nrval}{ Optional. Number of values of \eqn{r} for which \eqn{F_3(r)}{F3(r)} will be estimated. A large value of \code{nrval} is required to avoid discretisation effects. } \item{vside}{ Optional. Side length of the voxels in the discrete approximation. } \item{correction}{ Optional. Character vector specifying the edge correction(s) to be applied. See Details. } \item{sphere}{ Optional. Character string specifying how to calculate the theoretical value of \eqn{F_3(r)}{F3(r)} for a Poisson process. See Details. } } \details{ For a stationary point process \eqn{\Phi}{Phi} in three-dimensional space, the empty space function is \deqn{ F_3(r) = P(d(0,\Phi) \le r) }{ F3(r) = P(d(0,Phi) <= r) } where \eqn{d(0,\Phi)}{d(0,Phi)} denotes the distance from a fixed origin \eqn{0} to the nearest point of \eqn{\Phi}{Phi}. The three-dimensional point pattern \code{X} is assumed to be a partial realisation of a stationary point process \eqn{\Phi}{Phi}. The empty space function of \eqn{\Phi}{Phi} can then be estimated using techniques described in the References. The box containing the point pattern is discretised into cubic voxels of side length \code{vside}. The distance function \eqn{d(u,\Phi)}{d(u,Phi)} is computed for every voxel centre point \eqn{u} using a three-dimensional version of the distance transform algorithm (Borgefors, 1986). The empirical cumulative distribution function of these values, with appropriate edge corrections, is the estimate of \eqn{F_3(r)}{F3(r)}. The available edge corrections are: \describe{ \item{\code{"rs"}:}{ the reduced sample (aka minus sampling, border correction) estimator (Baddeley et al, 1993) } \item{\code{"km"}:}{ the three-dimensional version of the Kaplan-Meier estimator (Baddeley and Gill, 1997) } \item{\code{"cs"}:}{ the three-dimensional generalisation of the Chiu-Stoyan or Hanisch estimator (Chiu and Stoyan, 1998). } } Alternatively \code{correction="all"} selects all options. The result includes a column \code{theo} giving the theoretical value of \eqn{F_3(r)}{F3(r)} for a uniform Poisson process (Complete Spatial Randomness). This value depends on the volume of the sphere of radius \code{r} measured in the discretised distance metric. The argument \code{sphere} determines how this will be calculated. \itemize{ \item If \code{sphere="ideal"} the calculation will use the volume of an ideal sphere of radius \eqn{r} namely \eqn{(4/3) \pi r^3}{(4/3) * pi * r^3}. This is not recommended because the theoretical values of \eqn{F_3(r)}{F3(r)} are inaccurate. \item If \code{sphere="fudge"} then the volume of the ideal sphere will be multiplied by 0.78, which gives the approximate volume of the sphere in the discretised distance metric. \item If \code{sphere="digital"} then the volume of the sphere in the discretised distance metric is computed exactly using another distance transform. This takes longer to compute, but is exact. } } \value{ A function value table (object of class \code{"fv"}) that can be plotted, printed or coerced to a data frame containing the function values. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42} (1993) 641--668. Baddeley, A.J. and Gill, R.D. (1997) Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25}, 263--292. Borgefors, G. (1986) Distance transformations in digital images. \emph{Computer Vision, Graphics and Image Processing} \bold{34}, 344--371. Chiu, S.N. and Stoyan, D. (1998) Estimators of distance distributions for spatial patterns. \emph{Statistica Neerlandica} \bold{52}, 239--246. } \author{ \adrian and Rana Moyeed. } \section{Warnings}{ A small value of \code{vside} and a large value of \code{nrval} are required for reasonable accuracy. The default value of \code{vside} ensures that the total number of voxels is \code{2^22} or about 4 million. To change the default number of voxels, see \code{\link{spatstat.options}("nvoxel")}. } \seealso{ \code{\link{G3est}}, \code{\link{K3est}}, \code{\link{pcf3est}}. } \examples{ \testonly{op <- spatstat.options(nvoxel=2^18)} X <- rpoispp3(42) Z <- F3est(X) if(interactive()) plot(Z) \testonly{spatstat.options(op)} } \keyword{spatial} \keyword{nonparametric} spatstat/man/runifpointx.Rd0000644000176200001440000000245713333543264015562 0ustar liggesusers\name{runifpointx} \alias{runifpointx} \title{ Generate N Uniform Random Points in Any Dimensions } \description{ Generate a random point pattern containing \code{n} independent, uniform random points in any number of spatial dimensions. } \usage{ runifpointx(n, domain, nsim=1, drop=TRUE) } \arguments{ \item{n}{ Number of points to be generated. } \item{domain}{ Multi-dimensional box in which the process should be generated. An object of class \code{"boxx"}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a single point pattern. } } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern (an object of class \code{"ppx"}). If \code{nsim > 1} or \code{drop=FALSE}, a list of such point patterns. } \details{ This function generates a pattern of \code{n} independent random points, uniformly distributed in the multi-dimensional box \code{domain}. } \seealso{ \code{\link{rpoisppx}}, \code{\link{ppx}}, \code{\link{boxx}} } \examples{ w <- boxx(x=c(0,1), y=c(0,1), z=c(0,1), t=c(0,3)) X <- runifpointx(50, w) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/kernel.factor.Rd0000644000176200001440000000265213333543263015726 0ustar liggesusers\name{kernel.factor} \alias{kernel.factor} \title{Scale factor for density kernel} \description{ Returns a scale factor for the kernels used in density estimation for numerical data. } \usage{ kernel.factor(kernel = "gaussian") } \arguments{ \item{kernel}{ String name of the kernel. Options are \code{"gaussian"}, \code{"rectangular"}, \code{"triangular"}, \code{"epanechnikov"}, \code{"biweight"}, \code{"cosine"} and \code{"optcosine"}. (Partial matching is used). } } \details{ Kernel estimation of a probability density in one dimension is performed by \code{\link[stats]{density.default}} using a kernel function selected from the list above. This function computes a scale constant for the kernel. For the Gaussian kernel, this constant is equal to 1. Otherwise, the constant \eqn{c} is such that the kernel with standard deviation \eqn{1} is supported on the interval \eqn{[-c,c]}. For more information about these kernels, see \code{\link[stats]{density.default}}. } \value{ A single number. } \seealso{ \code{\link[stats]{density.default}}, \code{\link{dkernel}}, \code{\link{kernel.moment}}, \code{\link{kernel.squint}} } \examples{ kernel.factor("rect") # bandwidth for Epanechnikov kernel with half-width h=1 h <- 1 bw <- h/kernel.factor("epa") } \author{\adrian and Martin Hazelton } \keyword{methods} \keyword{nonparametric} \keyword{smooth} spatstat/man/cauchy.estK.Rd0000644000176200001440000001302213571674202015345 0ustar liggesusers\name{cauchy.estK} \alias{cauchy.estK} \title{Fit the Neyman-Scott cluster process with Cauchy kernel} \description{ Fits the Neyman-Scott Cluster point process with Cauchy kernel to a point pattern dataset by the Method of Minimum Contrast. } \usage{ cauchy.estK(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the model. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits the Neyman-Scott cluster point process model with Cauchy kernel to a point pattern dataset by the Method of Minimum Contrast, using the \eqn{K} function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits the Neyman-Scott cluster point process with Cauchy kernel to \code{X}, by finding the parameters of the \Matern Cluster model which give the closest match between the theoretical \eqn{K} function of the \Matern Cluster process and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The model is described in Jalilian et al (2013). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{\kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{\mu}, and the locations of the offspring points of one parent follow a common distribution described in Jalilian et al (2013). If the argument \code{lambda} is provided, then this is used as the value of the point process intensity \eqn{\lambda}{\lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{\lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{\lambda} cannot be estimated, and the parameter \eqn{\mu}{\mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The corresponding model can be simulated using \code{\link{rCauchy}}. For computational reasons, the optimisation procedure uses the parameter \code{eta2}, which is equivalent to \code{4 * scale^2} where \code{scale} is the scale parameter for the model as used in \code{\link{rCauchy}}. Homogeneous or inhomogeneous Neyman-Scott/Cauchy models can also be fitted using the function \code{\link{kppm}} and the fitted models can be simulated using \code{\link{simulate.kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Ghorbani, M. (2012) Cauchy cluster process. \emph{Metrika}, to appear. Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{cauchy.estpcf}}, \code{\link{lgcp.estK}}, \code{\link{thomas.estK}}, \code{\link{vargamma.estK}}, \code{\link{mincontrast}}, \code{\link{Kest}}, \code{\link{Kmodel}}. \code{\link{rCauchy}} to simulate the model. } \examples{ u <- cauchy.estK(redwood) u plot(u) } \keyword{spatial} \keyword{models} spatstat/man/pcfcross.inhom.Rd0000644000176200001440000001147013333543264016123 0ustar liggesusers\name{pcfcross.inhom} \alias{pcfcross.inhom} \title{ Inhomogeneous Multitype Pair Correlation Function (Cross-Type) } \description{ Estimates the inhomogeneous cross-type pair correlation function for a multitype point pattern. } \usage{ pcfcross.inhom(X, i, j, lambdaI = NULL, lambdaJ = NULL, ..., r = NULL, breaks = NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), sigma = NULL, varcov = NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the estimated intensity function of the points of type \code{i}. Either a vector giving the intensity values at the points of type \code{i}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdaJ}{ Optional. Values of the estimated intensity function of the points of type \code{j}. A numeric vector, pixel image or \code{function(x,y)}. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g_{ij}(r)}{g[i,j](r)} should be evaluated. There is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{\dots}{ Other arguments passed to the kernel density estimation function \code{\link{density.default}}. } \item{stoyan}{ Bandwidth coefficient; see Details. } \item{correction}{ Choice of edge correction. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambdaI} or \code{lambdaJ} is estimated by kernel smoothing. } } \details{ The inhomogeneous cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} is a summary of the dependence between two types of points in a multitype spatial point process that does not have a uniform density of points. The best intuitive interpretation is the following: the probability \eqn{p(r)} of finding two points, of types \eqn{i} and \eqn{j} respectively, at locations \eqn{x} and \eqn{y} separated by a distance \eqn{r} is equal to \deqn{ p(r) = \lambda_i(x) lambda_j(y) g(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda[i](x) * lambda[j](y) * g(r) dx dy } where \eqn{\lambda_i}{lambda[i]} is the intensity function of the process of points of type \eqn{i}. For a multitype Poisson point process, this probability is \eqn{p(r) = \lambda_i(x) \lambda_j(y)}{p(r) = lambda[i](x) * lambda[j](y)} so \eqn{g_{ij}(r) = 1}{g[i,j](r) = 1}. The command \code{pcfcross.inhom} estimates the inhomogeneous pair correlation using a modified version of the algorithm in \code{\link{pcf.ppp}}. If the arguments \code{lambdaI} and \code{lambdaJ} are missing or null, they are estimated from \code{X} by kernel smoothing using a leave-one-out estimator. } \value{ A function value table (object of class \code{"fv"}). Essentially a data frame containing the variables \item{r}{ the vector of values of the argument \eqn{r} at which the inhomogeneous cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} has been estimated } \item{theo}{vector of values equal to 1, the theoretical value of \eqn{g_{ij}(r)}{g[i,j](r)} for the Poisson process } \item{trans}{vector of values of \eqn{g_{ij}(r)}{g[i,j](r)} estimated by translation correction } \item{iso}{vector of values of \eqn{g_{ij}(r)}{g[i,j](r)} estimated by Ripley isotropic correction } as required. } \seealso{ \code{\link{pcf.ppp}}, \code{\link{pcfinhom}}, \code{\link{pcfcross}}, \code{\link{pcfdot.inhom}} } \examples{ data(amacrine) plot(pcfcross.inhom(amacrine, "on", "off", stoyan=0.1), legendpos="bottom") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/vcov.kppm.Rd0000644000176200001440000000557113333543264015120 0ustar liggesusers\name{vcov.kppm} \alias{vcov.kppm} \title{Variance-Covariance Matrix for a Fitted Cluster Point Process Model} \description{ Returns the variance-covariance matrix of the estimates of the parameters of a fitted cluster point process model. } \usage{ \method{vcov}{kppm}(object, ..., what=c("vcov", "corr", "fisher", "internals"), fast = NULL, rmax = NULL, eps.rmax = 0.01, verbose = TRUE) } \arguments{ \item{object}{ A fitted cluster point process model (an object of class \code{"kppm"}.) } \item{\dots}{ Ignored. } \item{what}{ Character string (partially-matched) that specifies what matrix is returned. Options are \code{"vcov"} for the variance-covariance matrix, \code{"corr"} for the correlation matrix, and \code{"fisher"} for the Fisher information matrix. } \item{fast}{ Logical specifying whether tapering (using sparse matrices from \pkg{Matrix}) should be used to speed up calculations. Warning: This is expected to underestimate the true asymptotic variances/covariances. } \item{rmax}{ Optional. The dependence range. Not usually specified by the user. Only used when \code{fast=TRUE}. } \item{eps.rmax}{ Numeric. A small positive number which is used to determine \code{rmax} from the tail behaviour of the pair correlation function when fast option (\code{fast=TRUE}) is used. Namely \code{rmax} is the smallest value of \eqn{r} at which \eqn{(g(r)-1)/(g(0)-1)} falls below \code{eps.rmax}. Only used when \code{fast=TRUE}. Ignored if \code{rmax} is provided. } \item{verbose}{ Logical value indicating whether to print progress reports during very long calculations. } } \details{ This function computes the asymptotic variance-covariance matrix of the estimates of the canonical (regression) parameters in the cluster point process model \code{object}. It is a method for the generic function \code{\link{vcov}}. The result is an \code{n * n} matrix where \code{n = length(coef(model))}. To calculate a confidence interval for a regression parameter, use \code{\link[stats]{confint}} as shown in the examples. } \value{ A square matrix. } \references{ Waagepetersen, R. (2007) Estimating functions for inhomogeneous spatial point processes with incomplete covariate data. \emph{Biometrika} \bold{95}, 351--363. } \author{ Abdollah Jalilian and Rasmus Waagepetersen. Ported to \pkg{spatstat} by \adrian and \ege. } \seealso{ \code{\link{kppm}}, \code{\link{vcov}}, \code{\link{vcov.ppm}} } \examples{ fit <- kppm(redwood ~ x + y) vcov(fit) vcov(fit, what="corr") # confidence interval confint(fit) # cross-check the confidence interval by hand: sd <- sqrt(diag(vcov(fit))) t(coef(fit) + 1.96 * outer(sd, c(lower=-1, upper=1))) } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/lurking.Rd0000644000176200001440000003041513571674202014644 0ustar liggesusers\name{lurking} \alias{lurking} \alias{lurking.ppp} \alias{lurking.ppm} \title{Lurking Variable Plot} \description{ Plot spatial point process residuals against a covariate } \usage{ lurking(object, \dots) \method{lurking}{ppm}(object, covariate, type="eem", cumulative=TRUE, \dots, plot.it = TRUE, plot.sd = is.poisson(object), clipwindow=default.clipwindow(object), rv = NULL, envelope=FALSE, nsim=39, nrank=1, typename, covname, oldstyle=FALSE, check=TRUE, verbose=TRUE, nx=128, splineargs=list(spar=0.5), internal=NULL) \method{lurking}{ppp}(object, covariate, type="eem", cumulative=TRUE, \dots, plot.it = TRUE, plot.sd = is.poisson(object), clipwindow=default.clipwindow(object), rv = NULL, envelope=FALSE, nsim=39, nrank=1, typename, covname, oldstyle=FALSE, check=TRUE, verbose=TRUE, nx=128, splineargs=list(spar=0.5), internal=NULL) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) for which diagnostics should be produced. This object is usually obtained from \code{\link{ppm}}. Alternatively, \code{object} may be a point pattern (object of class \code{"ppp"}). } \item{covariate}{ The covariate against which residuals should be plotted. Either a numeric vector, a pixel image, or an \code{expression}. See \emph{Details} below. } \item{type}{ String indicating the type of residuals or weights to be computed. Choices include \code{"eem"}, \code{"raw"}, \code{"inverse"} and \code{"pearson"}. See \code{\link{diagnose.ppm}} for all possible choices. } \item{cumulative}{ Logical flag indicating whether to plot a cumulative sum of marks (\code{cumulative=TRUE}) or the derivative of this sum, a marginal density of the smoothed residual field (\code{cumulative=FALSE}). } \item{\dots}{ Arguments passed to \code{\link{plot.default}} and \code{\link{lines}} to control the plot behaviour. } \item{plot.it}{ Logical value indicating whether plots should be shown. If \code{plot.it=FALSE}, only the computed coordinates for the plots are returned. See \emph{Value}. } \item{plot.sd}{ Logical value indicating whether error bounds should be added to plot. The default is \code{TRUE} for Poisson models and \code{FALSE} for non-Poisson models. See Details. } \item{clipwindow}{ If not \code{NULL} this argument indicates that residuals shall only be computed inside a subregion of the window containing the original point pattern data. Then \code{clipwindow} should be a window object of class \code{"owin"}. } \item{rv}{ Usually absent. If this argument is present, the point process residuals will not be calculated from the fitted model \code{object}, but will instead be taken directly from \code{rv}. } \item{envelope}{ Logical value indicating whether to compute simulation envelopes for the plot. Alternatively \code{envelope} may be a list of point patterns to use for computing the simulation envelopes, or an object of class \code{"envelope"} containing simulated point patterns. } \item{nsim}{ Number of simulated point patterns to be generated to produce the simulation envelope, if \code{envelope=TRUE}. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{typename}{ Usually absent. If this argument is present, it should be a string, and will be used (in the axis labels of plots) to describe the type of residuals. } \item{covname}{ A string name for the covariate, to be used in axis labels of plots. } \item{oldstyle}{ Logical flag indicating whether error bounds should be plotted using the approximation given in the original paper (\code{oldstyle=TRUE}), or using the correct asymptotic formula (\code{oldstyle=FALSE}). } \item{check}{ Logical flag indicating whether the integrity of the data structure in \code{object} should be checked. } \item{verbose}{ Logical value indicating whether to print progress reports during Monte Carlo simulation. } \item{nx}{ Integer. Number of covariate values to be used in the plot. } \item{splineargs}{ A list of arguments passed to \code{smooth.spline} for the estimation of the derivatives in the case \code{cumulative=FALSE}. } \item{internal}{ Internal use only. } } \value{ The (invisible) return value is an object belonging to the class \code{"lurk"}, for which there are methods for \code{plot} and \code{print}. This object is a list containing two dataframes \code{empirical} and \code{theoretical}. The first dataframe \code{empirical} contains columns \code{covariate} and \code{value} giving the coordinates of the lurking variable plot. The second dataframe \code{theoretical} contains columns \code{covariate}, \code{mean} and \code{sd} giving the coordinates of the plot of the theoretical mean and standard deviation. } \details{ This function generates a \sQuote{lurking variable} plot for a fitted point process model. Residuals from the model represented by \code{object} are plotted against the covariate specified by \code{covariate}. This plot can be used to reveal departures from the fitted model, in particular, to reveal that the point pattern depends on the covariate. The function \code{lurking} is generic, with methods for \code{ppm} and \code{ppp} documented here, and possibly other methods. The argument \code{object} would usually be a fitted point process model (object of class \code{"ppm"}) produced by the model-fitting algorithm \code{\link{ppm}}). If \code{object} is a point pattern (object of class \code{"ppp"}) then the model is taken to be the uniform Poisson process (Complete Spatial Randomness) fitted to this point pattern. First the residuals from the fitted model (Baddeley et al, 2004) are computed at each quadrature point, or alternatively the `exponential energy marks' (Stoyan and Grabarnik, 1991) are computed at each data point. The argument \code{type} selects the type of residual or weight. See \code{\link{diagnose.ppm}} for options and explanation. A lurking variable plot for point processes (Baddeley et al, 2004) displays either the cumulative sum of residuals/weights (if \code{cumulative = TRUE}) or a kernel-weighted average of the residuals/weights (if \code{cumulative = FALSE}) plotted against the covariate. The empirical plot (solid lines) is shown together with its expected value assuming the model is true (dashed lines) and optionally also the pointwise two-standard-deviation limits (grey shading). To be more precise, let \eqn{Z(u)} denote the value of the covariate at a spatial location \eqn{u}. \itemize{ \item If \code{cumulative=TRUE} then we plot \eqn{H(z)} against \eqn{z}, where \eqn{H(z)} is the sum of the residuals over all quadrature points where the covariate takes a value less than or equal to \eqn{z}, or the sum of the exponential energy weights over all data points where the covariate takes a value less than or equal to \eqn{z}. \item If \code{cumulative=FALSE} then we plot \eqn{h(z)} against \eqn{z}, where \eqn{h(z)} is the derivative of \eqn{H(z)}, computed approximately by spline smoothing. } For the point process residuals \eqn{E(H(z)) = 0}, while for the exponential energy weights \eqn{E(H(z)) = } area of the subset of the window satisfying \eqn{Z(u) <= z}{Z(u) \le z}. If the empirical and theoretical curves deviate substantially from one another, the interpretation is that the fitted model does not correctly account for dependence on the covariate. The correct form (of the spatial trend part of the model) may be suggested by the shape of the plot. If \code{plot.sd = TRUE}, then superimposed on the lurking variable plot are the pointwise two-standard-deviation error limits for \eqn{H(x)} calculated for the inhomogeneous Poisson process. The default is \code{plot.sd = TRUE} for Poisson models and \code{plot.sd = FALSE} for non-Poisson models. By default, the two-standard-deviation limits are calculated from the exact formula for the asymptotic variance of the residuals under the asymptotic normal approximation, equation (37) of Baddeley et al (2006). However, for compatibility with the original paper of Baddeley et al (2005), if \code{oldstyle=TRUE}, the two-standard-deviation limits are calculated using the innovation variance, an over-estimate of the true variance of the residuals. The argument \code{covariate} is either a numeric vector, a pixel image, or an R language expression. If it is a numeric vector, it is assumed to contain the values of the covariate for each of the quadrature points in the fitted model. The quadrature points can be extracted by \code{\link{quad.ppm}(object)}. If \code{covariate} is a pixel image, it is assumed to contain the values of the covariate at each location in the window. The values of this image at the quadrature points will be extracted. Alternatively, if \code{covariate} is an \code{expression}, it will be evaluated in the same environment as the model formula used in fitting the model \code{object}. It must yield a vector of the same length as the number of quadrature points. The expression may contain the terms \code{x} and \code{y} representing the cartesian coordinates, and may also contain other variables that were available when the model was fitted. Certain variable names are reserved words; see \code{\link{ppm}}. Note that lurking variable plots for the \eqn{x} and \eqn{y} coordinates are also generated by \code{\link{diagnose.ppm}}, amongst other types of diagnostic plots. This function is more general in that it enables the user to plot the residuals against any chosen covariate that may have been present. For advanced use, even the values of the residuals/weights can be altered. If the argument \code{rv} is present, the residuals will not be calculated from the fitted model \code{object} but will instead be taken directly from the object \code{rv}. If \code{type = "eem"} then \code{rv} should be similar to the return value of \code{\link{eem}}, namely, a numeric vector with length equal to the number of data points in the original point pattern. Otherwise, \code{rv} should be similar to the return value of \code{\link{residuals.ppm}}, that is, \code{rv} should be an object of class \code{"msr"} (see \code{\link{msr}}) representing a signed measure. } \seealso{ \code{\link{residuals.ppm}}, \code{\link{diagnose.ppm}}, \code{\link{residuals.ppm}}, \code{\link{qqplot.ppm}}, \code{\link{eem}}, \code{\link{ppm}} } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \Moller, J. and Pakes, A.G. (2006) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \examples{ (a <- lurking(nztrees, expression(x), type="raw")) fit <- ppm(nztrees ~x, Poisson(), nd=128) (b <- lurking(fit, expression(x), type="raw")) lurking(fit, expression(x), type="raw", cumulative=FALSE) } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{models} \keyword{hplot} spatstat/man/with.ssf.Rd0000644000176200001440000000301613333543265014733 0ustar liggesusers\name{with.ssf} \alias{with.ssf} \alias{apply.ssf} \title{ Evaluate Expression in a Spatially Sampled Function } \description{ Given a spatially sampled function, evaluate an expression involving the function values. } \usage{ apply.ssf(X, \dots) \method{with}{ssf}(data, \dots) } \arguments{ \item{X, data}{ A spatially sampled function (object of class \code{"ssf"}). } \item{\dots}{ Arguments passed to \code{\link{with.default}} or \code{\link{apply}} specifying what to compute. } } \details{ An object of class \code{"ssf"} represents a function (real- or vector-valued) that has been sampled at a finite set of points. It contains a data frame which provides the function values at the sample points. In \code{with.ssf}, the expression specified by \code{\dots} will be evaluated in this dataframe. In \code{apply.ssf}, the dataframe will be subjected to the \code{\link{apply}} operator using the additional arguments \code{\dots}. If the result of evaluation is a data frame with one row for each data point, or a numeric vector with one entry for each data point, then the result will be an object of class \code{"ssf"} containing this information. Otherwise, the result will be a numeric vector. } \value{ An object of class \code{"ssf"} or a numeric vector. } \author{ \adrian. } \seealso{ \code{\link{ssf}} } \examples{ a <- ssf(cells, data.frame(d=nndist(cells), i=1:npoints(cells))) with(a, i/d) } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/eval.fasp.Rd0000644000176200001440000000570613333543263015053 0ustar liggesusers\name{eval.fasp} \alias{eval.fasp} \title{Evaluate Expression Involving Function Arrays} \description{ Evaluates any expression involving one or more function arrays (\code{fasp} objects) and returns another function array. } \usage{ eval.fasp(expr, envir, dotonly=TRUE) } \arguments{ \item{expr}{ An expression involving the names of objects of class \code{"fasp"}. } \item{envir}{ Optional. The environment in which to evaluate the expression, or a named list containing \code{"fasp"} objects to be used in the expression. } \item{dotonly}{Logical. Passed to \code{\link{eval.fv}}.} } \details{ This is a wrapper to make it easier to perform pointwise calculations with the arrays of summary functions used in spatial statistics. A function array (object of class \code{"fasp"}) can be regarded as a matrix whose entries are functions. Objects of this kind are returned by the command \code{\link{alltypes}}. Suppose \code{X} is an object of class \code{"fasp"}. Then \code{eval.fasp(X+3)} effectively adds 3 to the value of every function in the array \code{X}, and returns the resulting object. Suppose \code{X} and \code{Y} are two objects of class \code{"fasp"} which are compatible (for example the arrays must have the same dimensions). Then \code{eval.fasp(X + Y)} will add the corresponding functions in each cell of the arrays \code{X} and \code{Y}, and return the resulting array of functions. Suppose \code{X} is an object of class \code{"fasp"} and \code{f} is an object of class \code{"fv"}. Then \code{eval.fasp(X + f)} will add the function \code{f} to the functions in each cell of the array \code{X}, and return the resulting array of functions. In general, \code{expr} can be any expression involving (a) the \emph{names} of objects of class \code{"fasp"} or \code{"fv"}, (b) scalar constants, and (c) functions which are vectorised. See the Examples. First \code{eval.fasp} determines which of the \emph{variable names} in the expression \code{expr} refer to objects of class \code{"fasp"}. The expression is then evaluated for each cell of the array using \code{\link{eval.fv}}. The expression \code{expr} must be vectorised. There must be at least one object of class \code{"fasp"} in the expression. All such objects must be compatible. } \value{ Another object of class \code{"fasp"}. } \seealso{ \code{\link{fasp.object}}, \code{\link{Kest}} } \examples{ # manipulating the K function K <- alltypes(amacrine, "K") # expressions involving a fasp object eval.fasp(K + 3) L <- eval.fasp(sqrt(K/pi)) # expression involving two fasp objects D <- eval.fasp(K - L) # subtracting the unmarked K function from the cross-type K functions K0 <- Kest(unmark(amacrine)) DK <- eval.fasp(K - K0) ## Use of 'envir' S <- eval.fasp(1-G, list(G=alltypes(amacrine, "G"))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/as.linim.Rd0000644000176200001440000000575113333543262014705 0ustar liggesusers\name{as.linim} \alias{as.linim} \alias{as.linim.linim} \alias{as.linim.linfun} \alias{as.linim.default} \title{Convert to Pixel Image on Linear Network} \description{ Converts various kinds of data to a pixel image on a linear network. } \usage{ as.linim(X, \dots) \method{as.linim}{linim}(X, \dots) \method{as.linim}{default}(X, L, \dots, eps = NULL, dimyx = NULL, xy = NULL, delta=NULL) \method{as.linim}{linfun}(X, L=domain(X), \dots, eps = NULL, dimyx = NULL, xy = NULL, delta=NULL) } \arguments{ \item{X}{ Data to be converted to a pixel image on a linear network. } \item{L}{ Linear network (object of class \code{"linnet"}). } \item{\dots}{Additional arguments passed to \code{X} when \code{X} is a function. } \item{eps,dimyx,xy}{ Optional arguments passed to \code{\link{as.mask}} to control the pixel resolution. } \item{delta}{ Optional. Numeric value giving the approximate distance (in coordinate units) between successive sample points along each segment of the network. } } \details{ This function converts the data \code{X} into a pixel image on a linear network, an object of class \code{"linim"} (see \code{\link{linim}}). The argument \code{X} may be any of the following: \itemize{ \item a function on a linear network, an object of class \code{"linfun"}. \item a pixel image on a linear network, an object of class \code{"linim"}. \item a pixel image, an object of class \code{"im"}. \item any type of data acceptable to \code{\link{as.im}}, such as a function, numeric value, or window. } First \code{X} is converted to a pixel image object \code{Y} (object of class \code{"im"}). The conversion is performed by \code{\link{as.im}}. The arguments \code{eps}, \code{dimyx} and \code{xy} determine the pixel resolution. Next \code{Y} is converted to a pixel image on a linear network using \code{\link{linim}}. The argument \code{L} determines the linear network. If \code{L} is missing or \code{NULL}, then \code{X} should be an object of class \code{"linim"}, and \code{L} defaults to the linear network on which \code{X} is defined. In addition to converting the function to a pixel image, the algorithm also generates a fine grid of sample points evenly spaced along each segment of the network (with spacing at most \code{delta} coordinate units). The function values at these sample points are stored in the resulting object as a data frame (the argument \code{df} of \code{\link{linim}}). This mechanism allows greater accuracy for some calculations (such as \code{\link{integral.linim}}). } \value{ An image object on a linear network; an object of class \code{"linim"}. } \seealso{ \code{\link{as.im}} } \examples{ f <- function(x,y){ x + y } plot(as.linim(f, simplenet)) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/Extract.psp.Rd0000644000176200001440000000633613333543263015407 0ustar liggesusers\name{Extract.psp} \alias{[.psp} \title{Extract Subset of Line Segment Pattern} \description{ Extract a subset of a line segment pattern. } \usage{ \method{[}{psp}(x, i, j, drop, \dots, fragments=TRUE) } \arguments{ \item{x}{ A two-dimensional line segment pattern. An object of class \code{"psp"}. } \item{i}{ Subset index. Either a valid subset index in the usual \R sense, indicating which segments should be retained, or a window (an object of class \code{"owin"}) delineating a subset of the original observation window. } \item{j}{ Redundant - included for backward compatibility. } \item{drop}{ Ignored. Required for compatibility with generic function. } \item{\dots}{ Ignored. } \item{fragments}{ Logical value indicating whether to retain all pieces of line segments that intersect the new window (\code{fragments=TRUE}, the default) or to retain only those line segments that lie entirely inside the new window (\code{fragments=FALSE}). } } \value{ A line segment pattern (of class \code{"psp"}). } \details{ These functions extract a designated subset of a line segment pattern. The function \code{[.psp} is a method for \code{\link{[}} for the class \code{"psp"}. It extracts a designated subset of a line segment pattern, either by ``\emph{thinning}'' (retaining/deleting some line segments of a line segment pattern) or ``\emph{trimming}'' (reducing the window of observation to a smaller subregion and clipping the line segments to this boundary) or both. The pattern will be ``thinned'' if \code{subset} is specified. The line segments designated by \code{subset} will be retained. Here \code{subset} can be a numeric vector of positive indices (identifying the line segments to be retained), a numeric vector of negative indices (identifying the line segments to be deleted) or a logical vector of length equal to the number of line segments in the line segment pattern \code{x}. In the latter case, the line segments for which \code{subset[i]=TRUE} will be retained, and the others will be deleted. The pattern will be ``trimmed'' if \code{window} is specified. This should be an object of class \code{\link{owin}} specifying a window of observation to which the line segment pattern \code{x} will be trimmed. Line segments of \code{x} lying inside the new \code{window} will be retained unchanged. Line segments lying partially inside the new \code{window} and partially outside it will, by default, be clipped so that they lie entirely inside the window; but if \code{fragments=FALSE}, such segments will be removed. Both ``thinning'' and ``trimming'' can be performed together. } \seealso{ \code{\link{psp.object}}, \code{\link{owin.object}} } \examples{ a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) plot(a) # thinning id <- sample(c(TRUE, FALSE), 20, replace=TRUE) b <- a[id] plot(b, add=TRUE, lwd=3) # trimming plot(a) w <- owin(c(0.1,0.7), c(0.2, 0.8)) b <- a[w] plot(b, add=TRUE, col="red", lwd=2) plot(w, add=TRUE) u <- a[w, fragments=FALSE] plot(u, add=TRUE, col="blue", lwd=3) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/rjitter.Rd0000644000176200001440000000427413333543264014657 0ustar liggesusers\name{rjitter} \alias{rjitter} \title{Random Perturbation of a Point Pattern} \description{ Applies independent random displacements to each point in a point pattern. } \usage{ rjitter(X, radius, retry=TRUE, giveup = 10000, \dots, nsim=1, drop=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{radius}{ Scale of perturbations. A positive numerical value. The displacement vectors will be uniformly distributed in a circle of this radius. There is a sensible default. } \item{retry}{ What to do when a perturbed point lies outside the window of the original point pattern. If \code{retry=FALSE}, the point will be lost; if \code{retry=TRUE}, the algorithm will try again. } \item{giveup}{ Maximum number of unsuccessful attempts. } \item{\dots}{Ignored.} \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}, in the same window as \code{X}. } \details{ Each of the points in the point pattern \code{X} is subjected to an independent random displacement. The displacement vectors are uniformly distributed in a circle of radius \code{radius}. If a displaced point lies outside the window, then if \code{retry=FALSE} the point will be lost. However if \code{retry=TRUE}, the algorithm will try again: each time a perturbed point lies outside the window, the algorithm will reject it and generate another proposed perturbation of the original point, until one lies inside the window, or until \code{giveup} unsuccessful attempts have been made. In the latter case, any unresolved points will be included without any perturbation. The return value will always be a point pattern with the same number of points as \code{X}. } \examples{ X <- rsyst(owin(), 10, 10) Y <- rjitter(X, 0.02) plot(Y) Z <- rjitter(X) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/pcf.Rd0000644000176200001440000000714113333543264013740 0ustar liggesusers\name{pcf} \alias{pcf} \title{Pair Correlation Function} \description{ Estimate the pair correlation function. } \usage{ pcf(X, \dots) } \arguments{ \item{X}{ Either the observed data point pattern, or an estimate of its \eqn{K} function, or an array of multitype \eqn{K} functions (see Details). } \item{\dots}{ Other arguments passed to the appropriate method. } } \value{ Either a function value table (object of class \code{"fv"}, see \code{\link{fv.object}}) representing a pair correlation function, or a function array (object of class \code{"fasp"}, see \code{\link{fasp.object}}) representing an array of pair correlation functions. } \details{ The pair correlation function of a stationary point process is \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the reduced second moment function (aka ``Ripley's \eqn{K} function'') of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. For a stationary Poisson process, the pair correlation function is identically equal to 1. Values \eqn{g(r) < 1} suggest inhibition between points; values greater than 1 suggest clustering. We also apply the same definition to other variants of the classical \eqn{K} function, such as the multitype \eqn{K} functions (see \code{\link{Kcross}}, \code{\link{Kdot}}) and the inhomogeneous \eqn{K} function (see \code{\link{Kinhom}}). For all these variants, the benchmark value of \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} corresponds to \eqn{g(r) = 1}. This routine computes an estimate of \eqn{g(r)} either directly from a point pattern, or indirectly from an estimate of \eqn{K(r)} or one of its variants. This function is generic, with methods for the classes \code{"ppp"}, \code{"fv"} and \code{"fasp"}. If \code{X} is a point pattern (object of class \code{"ppp"}) then the pair correlation function is estimated using a traditional kernel smoothing method (Stoyan and Stoyan, 1994). See \code{\link{pcf.ppp}} for details. If \code{X} is a function value table (object of class \code{"fv"}), then it is assumed to contain estimates of the \eqn{K} function or one of its variants (typically obtained from \code{\link{Kest}} or \code{\link{Kinhom}}). This routine computes an estimate of \eqn{g(r)} using smoothing splines to approximate the derivative. See \code{\link{pcf.fv}} for details. If \code{X} is a function value array (object of class \code{"fasp"}), then it is assumed to contain estimates of several \eqn{K} functions (typically obtained from \code{\link{Kmulti}} or \code{\link{alltypes}}). This routine computes an estimate of \eqn{g(r)} for each cell in the array, using smoothing splines to approximate the derivatives. See \code{\link{pcf.fasp}} for details. } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{pcf.ppp}}, \code{\link{pcf.fv}}, \code{\link{pcf.fasp}}, \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kmulti}}, \code{\link{alltypes}} } \examples{ # ppp object X <- simdat \testonly{ X <- X[seq(1,npoints(X), by=4)] } p <- pcf(X) plot(p) # fv object K <- Kest(X) p2 <- pcf(K, spar=0.8, method="b") plot(p2) # multitype pattern; fasp object amaK <- alltypes(amacrine, "K") amap <- pcf(amaK, spar=1, method="b") plot(amap) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat/man/append.psp.Rd0000644000176200001440000000226713564435130015242 0ustar liggesusers\name{append.psp} \alias{append.psp} \title{Combine Two Line Segment Patterns} \description{ Combine two line segment patterns into a single pattern. } \usage{ append.psp(A, B) } \arguments{ \item{A,B}{ Line segment patterns (objects of class \code{"psp"}). } } \value{ Another line segment pattern (object of class \code{"psp"}). } \details{ This function is used to superimpose two line segment patterns \code{A} and \code{B}. The two patterns must have \bold{identical} windows. If one pattern has marks, then the other must also have marks of the same type. It the marks are data frames then the number of columns of these data frames, and the names of the columns must be identical. (To combine two point patterns, see \code{superimpose}). If one of the arguments is \code{NULL}, it will be ignored and the other argument will be returned. } \seealso{ \code{\link{psp}}, \code{\link{as.psp}}, \code{\link{superimpose}}, } \examples{ X <- psp(runif(20), runif(20), runif(20), runif(20), window=owin()) Y <- psp(runif(5), runif(5), runif(5), runif(5), window=owin()) append.psp(X,Y) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/integral.linim.Rd0000644000176200001440000000311313333543263016076 0ustar liggesusers\name{integral.linim} \alias{integral.linim} \alias{integral.linfun} \title{ Integral on a Linear Network } \description{ Computes the integral (total value) of a function or pixel image over a linear network. } \usage{ \method{integral}{linim}(f, domain=NULL, ...) \method{integral}{linfun}(f, domain=NULL, ..., delta) } \arguments{ \item{f}{ A pixel image on a linear network (class \code{"linim"}) or a function on a linear network (class \code{"linfun"}). } \item{domain}{ Optional window specifying the domain of integration. Alternatively a tessellation. } \item{\dots}{ Ignored. } \item{delta}{ Optional. The step length (in coordinate units) for computing the approximate integral. A single positive number. } } \details{ The integral (total value of the function over the network) is calculated. If \code{domain} is a window (class \code{"owin"}) then the integration will be restricted to this window. If \code{domain} is a tessellation (class \code{"tess"}) then the integral of \code{f} in each tile of \code{domain} will be computed. } \value{ A single numeric or complex value (or a vector of such values if \code{domain} is a tessellation). } \seealso{ \code{\link{linim}}, \code{\link{integral.im}} } \examples{ # make some data xcoord <- linfun(function(x,y,seg,tp) { x }, simplenet) integral(xcoord) X <- as.linim(xcoord) integral(X) # integrals inside each tile of a tessellation A <- quadrats(Frame(simplenet), 3) integral(X, A) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat/man/rHardcore.Rd0000644000176200001440000000666213602545270015106 0ustar liggesusers\name{rHardcore} \alias{rHardcore} \title{Perfect Simulation of the Hardcore Process} \description{ Generate a random pattern of points, a simulated realisation of the Hardcore process, using a perfect simulation algorithm. } \usage{ rHardcore(beta, R = 0, W = owin(), expand=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{R}{ hard core distance (a non-negative number). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. Currently this must be a rectangular window. } \item{expand}{ Logical. If \code{FALSE}, simulation is performed in the window \code{W}, which must be rectangular. If \code{TRUE} (the default), simulation is performed on a larger window, and the result is clipped to the original window \code{W}. Alternatively \code{expand} can be an object of class \code{"rmhexpand"} (see \code{\link{rmhexpand}}) determining the expansion method. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function generates a realisation of the Hardcore point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. The Hardcore process is a model for strong spatial inhibition. Two points of the process are forbidden to lie closer than \code{R} units apart. The Hardcore process is the special case of the Strauss process (see \code{\link{rStrauss}}) with interaction parameter \eqn{\gamma}{gamma} equal to zero. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and \Moller (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}). If \code{nsim > 1}, a list of point patterns. } \references{ Berthelsen, K.K. and \Moller, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and \Moller, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. \Moller, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. } \author{ \adrian based on original code for the Strauss process by Kasper Klitgaard Berthelsen. } \examples{ X <- rHardcore(0.05,1.5,square(141.4)) Z <- rHardcore(100,0.05, nsim=2) } \seealso{ \code{\link{rmh}}, \code{\link{Hardcore}}, \code{\link{rStrauss}}, \code{\link{rStraussHard}}, \code{\link{rDiggleGratton}}. \code{\link{rDGS}}, \code{\link{rPenttinen}}. } \keyword{spatial} \keyword{datagen} spatstat/man/gridweights.Rd0000644000176200001440000000436213333543263015511 0ustar liggesusers\name{gridweights} \alias{gridweights} \title{Compute Quadrature Weights Based on Grid Counts} \description{ Computes quadrature weights for a given set of points, using the ``counting weights'' for a grid of rectangular tiles. } \usage{ gridweights(X, ntile, \dots, window=NULL, verbose=FALSE, npix=NULL, areas=NULL) } \arguments{ \item{X}{Data defining a point pattern.} \item{ntile}{Number of tiles in each row and column of the rectangular grid. An integer vector of length 1 or 2. } \item{\dots}{Ignored.} \item{window}{Default window for the point pattern} \item{verbose}{Logical flag. If \code{TRUE}, information will be printed about the computation of the grid weights. } \item{npix}{Dimensions of pixel grid to use when computing a digital approximation to the tile areas. } \item{areas}{Vector of areas of the tiles, if they are already known.} } \value{ Vector of nonnegative weights for each point in \code{X}. } \details{ This function computes a set of quadrature weights for a given pattern of points (typically comprising both ``data'' and `dummy'' points). See \code{\link{quad.object}} for an explanation of quadrature weights and quadrature schemes. The weights are computed by the ``counting weights'' rule based on a regular grid of rectangular tiles. First \code{X} and (optionally) \code{window} are converted into a point pattern object. Then the bounding rectangle of the window of the point pattern is divided into a regular \code{ntile[1] * ntile[2]} grid of rectangular tiles. The weight attached to a point of \code{X} is the area of the tile in which it lies, divided by the number of points of \code{X} lying in that tile. For non-rectangular windows the tile areas are currently calculated by approximating the window as a binary mask. The accuracy of this approximation is controlled by \code{npix}, which becomes the argument \code{dimyx} of \code{\link{as.mask}}. } \seealso{ \code{\link{quad.object}}, \code{\link{dirichletWeights}} } \examples{ Q <- quadscheme(runifpoispp(10)) X <- as.ppp(Q) # data and dummy points together w <- gridweights(X, 10) w <- gridweights(X, c(10, 10)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/quantess.Rd0000644000176200001440000001070013443162256015026 0ustar liggesusers\name{quantess} \alias{quantess} \alias{quantess.owin} \alias{quantess.ppp} \alias{quantess.im} \title{Quantile Tessellation} \description{ Divide space into tiles which contain equal amounts of stuff. } \usage{ quantess(M, Z, n, \dots) \method{quantess}{owin}(M, Z, n, \dots, type=2, origin=c(0,0), eps=NULL) \method{quantess}{ppp}(M, Z, n, \dots, type=2, origin=c(0,0), eps=NULL) \method{quantess}{im}(M, Z, n, \dots, type=2, origin=c(0,0)) } \arguments{ \item{M}{ A spatial object (such as a window, point pattern or pixel image) determining the weight or amount of stuff at each location. } \item{Z}{ A spatial covariate (a pixel image or a \code{function(x,y)}) or one of the strings \code{"x"} or \code{"y"} indicating the Cartesian coordinates \eqn{x} or \eqn{y}, or one of the strings \code{"rad"} or \code{"ang"} indicating polar coordinates. The range of values of \code{Z} will be broken into \code{n} bands containing equal amounts of stuff. } \item{n}{ Number of bands. A positive integer. } \item{type}{ Integer specifying the rule for calculating quantiles. Passed to \code{\link[stats]{quantile.default}}. } \item{\dots}{ Additional arguments passed to \code{\link{quadrats}} or \code{\link{tess}} defining another tessellation which should be intersected with the quantile tessellation. } \item{origin}{ Location of the origin of polar coordinates, if \code{Z="rad"} or \code{Z="ang"}. Either a numeric vector of length 2 giving the location, or a point pattern containing only one point, or a list with two entries named \code{x} and \code{y}, or one of the character strings \code{"centroid"}, \code{"midpoint"}, \code{"left"}, \code{"right"}, \code{"top"}, \code{"bottom"}, \code{"topleft"}, \code{"bottomleft"}, \code{"topright"} or \code{"bottomright"} (partially matched). } \item{eps}{ Optional. The size of pixels in the approximation which is used to compute the quantiles. A positive numeric value, or vector of two positive numeric values. } } \details{ A \emph{quantile tessellation} is a division of space into pieces which contain equal amounts of stuff. The function \code{quantess} computes a quantile tessellation and returns the tessellation itself. The function \code{quantess} is generic, with methods for windows (class \code{"owin"}), point patterns (\code{"ppp"}) and pixel images (\code{"im"}). The first argument \code{M} (for mass) specifies the spatial distribution of stuff that is to be divided. If \code{M} is a window, the \emph{area} of the window is to be divided into \code{n} equal pieces. If \code{M} is a point pattern, the \emph{number of points} in the pattern is to be divided into \code{n} equal parts, as far as possible. If \code{M} is a pixel image, the pixel values are interpreted as weights, and the \emph{total weight} is to be divided into \code{n} equal parts. The second argument \code{Z} is a spatial covariate. The range of values of \code{Z} will be divided into \code{n} bands, each containing the same total weight. That is, we determine the quantiles of \code{Z} with weights given by \code{M}. For convenience, additional arguments \code{\dots} can be given, to further subdivide the tiles of the tessellation. These arguments should be recognised by one of the functions \code{\link{quadrats}} or \code{\link{tess}}. The tessellation determined by these arguments is intersected with the quantile tessellation. The result of \code{quantess} is a tessellation of \code{as.owin(M)} determined by the quantiles of \code{Z}. } \value{ A tessellation (object of class \code{"tess"}). } \author{ Original idea by Ute Hahn. Implemented in \code{spatstat} by \spatstatAuthors. } \seealso{ \code{\link{tess}}, \code{\link{quadrats}}, \code{\link{quantile}}, \code{\link{tilenames}} } \examples{ plot(quantess(letterR, "x", 5)) plot(quantess(bronzefilter, "x", 6)) points(unmark(bronzefilter)) plot(quantess(letterR, "rad", 7, origin=c(2.8, 1.5))) plot(quantess(letterR, "ang", 7, origin=c(2.8, 1.5))) opa <- par(mar=c(0,0,2,5)) A <- quantess(Window(bei), bei.extra$elev, 4) plot(A, ribargs=list(las=1)) B <- quantess(bei, bei.extra$elev, 4) tilenames(B) <- paste(spatstat.utils::ordinal(1:4), "quartile") plot(B, ribargs=list(las=1)) points(bei, pch=".", cex=2, col="white") par(opa) } \keyword{spatial} \keyword{manip} spatstat/man/dppMatern.Rd0000644000176200001440000000243613571674202015125 0ustar liggesusers\name{dppMatern} \alias{dppMatern} \title{Whittle-Matern Determinantal Point Process Model} \description{ Function generating an instance of the Whittle-\Matern determinantal point process model } \usage{dppMatern(\dots)} \arguments{ \item{\dots}{arguments of the form \code{tag=value} specifying the parameters. See Details. } } \details{ The Whittle-\Matern DPP is defined in (Lavancier, \Moller and Rubak, 2015) The possible parameters are: \itemize{ \item the intensity \code{lambda} as a positive numeric \item the scale parameter \code{alpha} as a positive numeric \item the shape parameter \code{nu} as a positive numeric (artificially required to be less than 20 in the code for numerical stability) \item the dimension \code{d} as a positive integer } } \value{An object of class \code{"detpointprocfamily"}.} \author{ \adrian \rolf and \ege } \references{ Lavancier, F. \Moller, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. } \examples{ m <- dppMatern(lambda=100, alpha=.02, nu=1, d=2) } \seealso{ \code{\link{dppBessel}}, \code{\link{dppCauchy}}, \code{\link{dppGauss}}, \code{\link{dppPowerExp}} } spatstat/man/selfcut.psp.Rd0000644000176200001440000000303013424530024015416 0ustar liggesusers\name{selfcut.psp} \alias{selfcut.psp} \title{Cut Line Segments Where They Intersect} \description{ Finds any crossing points between the line segments in a line segment pattern, and cuts the segments into pieces at these crossing-points. } \usage{ selfcut.psp(A, \dots, eps) } \arguments{ \item{A}{ Line segment pattern (object of class \code{"psp"}). } \item{eps}{ Optional. Smallest permissible length of the resulting line segments. There is a sensible default. } \item{\dots}{Ignored.} } \details{ This function finds any crossing points between different line segments in the line segment pattern \code{A}, and cuts the line segments into pieces at these intersection points. A crossing point occurs whenever one of the line segments in \code{A} intersects another line segment in \code{A}, at a nonzero angle of intersection. } \value{ Another line segment pattern (object of class \code{"psp"}) in the same window as \code{A} with the same kind of marks as \code{A}. The result also has an attribute \code{"camefrom"} indicating the provenance of each segment in the result. For example \code{camefrom[3]=2} means that the third segment in the result is a piece of the second segment of \code{A}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{selfcrossing.psp}} } \examples{ X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) Y <- selfcut.psp(X) n <- nsegments(Y) plot(Y \%mark\% factor(sample(seq_len(n), n, replace=TRUE))) } \keyword{spatial} \keyword{manip} spatstat/man/pool.envelope.Rd0000644000176200001440000000544413333543264015761 0ustar liggesusers\name{pool.envelope} \alias{pool.envelope} \title{ Pool Data from Several Envelopes } \description{ Pool the simulation data from several simulation envelopes (objects of class \code{"envelope"}) and compute a new envelope. } \usage{ \method{pool}{envelope}(..., savefuns=FALSE, savepatterns=FALSE) } \arguments{ \item{\dots}{ Objects of class \code{"envelope"}. } \item{savefuns}{ Logical flag indicating whether to save all the simulated function values. } \item{savepatterns}{ Logical flag indicating whether to save all the simulated point patterns. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"envelope"} of simulation envelopes. It is used to combine the simulation data from several simulation envelopes and to compute an envelope based on the combined data. Each of the arguments \code{\dots} must be an object of class \code{"envelope"}. These envelopes must be compatible, in that they are envelopes for the same function, and were computed using the same options. \itemize{ \item In normal use, each envelope object will have been created by running the command \code{\link{envelope}} with the argument \code{savefuns=TRUE}. This ensures that each object contains the simulated data (summary function values for the simulated point patterns) that were used to construct the envelope. The simulated data are extracted from each object and combined. A new envelope is computed from the combined set of simulations. \item Alternatively, if each envelope object was created by running \code{\link{envelope}} with \code{VARIANCE=TRUE}, then the saved functions are not required. The sample means and sample variances from each envelope will be pooled. A new envelope is computed from the pooled mean and variance. } Warnings or errors will be issued if the envelope objects \code{\dots} appear to be incompatible. Apart from these basic checks, the code is not smart enough to decide whether it is sensible to pool the data. To modify the envelope parameters or the type of envelope that is computed, first pool the envelope data using \code{pool.envelope}, then use \code{\link{envelope.envelope}} to modify the envelope parameters. } \value{ An object of class \code{"envelope"}. } \seealso{ \code{\link{envelope}}, \code{\link{envelope.envelope}}, \code{\link{pool}}, \code{\link{pool.fasp}} } \examples{ E1 <- envelope(cells, Kest, nsim=10, savefuns=TRUE) E2 <- envelope(cells, Kest, nsim=20, savefuns=TRUE) pool(E1, E2) V1 <- envelope(E1, VARIANCE=TRUE) V2 <- envelope(E2, VARIANCE=TRUE) pool(V1, V2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat/man/as.linnet.psp.Rd0000644000176200001440000000367013333543262015665 0ustar liggesusers\name{as.linnet.psp} \alias{as.linnet.psp} \title{ Convert Line Segment Pattern to Linear Network } \description{ Converts a line segment pattern to a linear network. } \usage{ \method{as.linnet}{psp}(X, \dots, eps, sparse=FALSE) } \arguments{ \item{X}{ Line segment pattern (object of class \code{"psp"}). } \item{\dots}{ Ignored. } \item{eps}{ Optional. Distance threshold. If two segment endpoints are closer than \code{eps} units apart, they will be treated as the same point, and will become a single vertex in the linear network. } \item{sparse}{ Logical value indicating whether to use a sparse matrix representation, as explained in \code{\link{linnet}}. } } \details{ This command converts any collection of line segments into a linear network by guessing the connectivity of the network, using the distance threshold \code{eps}. If any segments in \code{X} cross over each other, they are first cut into pieces using \code{\link{selfcut.psp}}. Then any pair of segment endpoints lying closer than \code{eps} units apart, is treated as a single vertex. The linear network is then constructed using \code{\link{linnet}}. It would be wise to check the result by plotting the degree of each vertex, as shown in the Examples. If \code{X} has marks, then these are stored in the resulting linear network \code{Y <- as.linnet(X)}, and can be extracted as \code{marks(as.psp(Y))} or \code{marks(Y$lines)}. } \value{ A linear network (object of class \code{"linnet"}). } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{linnet}}, \code{\link{selfcut.psp}}, \code{\link{methods.linnet}}. } \examples{ # make some data A <- psp(0.09, 0.55, 0.79, 0.80, window=owin()) B <- superimpose(A, as.psp(simplenet)) # convert to a linear network D <- as.linnet(B) # check validity D plot(D) text(vertices(D), labels=vertexdegree(D)) } \keyword{spatial} \keyword{manip} spatstat/man/hierpair.family.Rd0000644000176200001440000000172413333543263016253 0ustar liggesusers\name{hierpair.family} \alias{hierpair.family} \title{Hierarchical Pairwise Interaction Process Family} \description{ An object describing the family of all hierarchical pairwise interaction Gibbs point processes. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the hierarchical pairwise interaction family of point process models. Anyway, \code{hierpair.family} is an object of class \code{"isf"} containing a function \code{hierpair.family$eval} for evaluating the sufficient statistics of any hierarchical pairwise interaction point process model taking an exponential family form. } \seealso{ Other families: \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{ord.family}}, \code{\link{inforder.family}}. Hierarchical Strauss interaction: \code{\link{HierStrauss}}. } \author{\adrian \rolf and \ege. } \keyword{spatial} \keyword{models} spatstat/man/methods.slrm.Rd0000644000176200001440000000331113333543263015601 0ustar liggesusers\name{methods.slrm} \alias{methods.slrm} %DoNotExport \alias{formula.slrm} \alias{update.slrm} \alias{print.slrm} \alias{terms.slrm} \alias{labels.slrm} \title{ Methods for Spatial Logistic Regression Models } \description{ These are methods for the class \code{"slrm"}. } \usage{ \method{formula}{slrm}(x, \dots) \method{print}{slrm}(x, ...) \method{terms}{slrm}(x, \dots) \method{labels}{slrm}(object, \dots) \method{update}{slrm}(object, ..., evaluate = TRUE, env = parent.frame()) } \arguments{ \item{x,object}{ An object of class \code{"slrm"}, representing a fitted spatial logistic regression model. } \item{\dots}{ Arguments passed to other methods. } \item{evaluate}{ Logical value. If \code{TRUE}, evaluate the updated call to \code{slrm}, so that the model is refitted; if \code{FALSE}, simply return the updated call. } \item{env}{ Optional environment in which the model should be updated. } } \details{ These functions are methods for the generic commands \code{\link{formula}}, \code{\link{update}}, \code{\link{print}}, \code{\link{terms}} and \code{\link{labels}} for the class \code{"slrm"}. An object of class \code{"slrm"} represents a fitted spatial logistic regression model. It is obtained from \code{\link{slrm}}. } \value{ See the help files for the corresponding generic functions. } \author{ \adrian } \seealso{ \code{\link{slrm}}, \code{\link{plot.slrm}}, \code{\link{predict.slrm}}, \code{\link{simulate.slrm}}, \code{\link{vcov.slrm}}, \code{\link{coef.slrm}}. } \examples{ data(redwood) fit <- slrm(redwood ~ x) coef(fit) formula(fit) tf <- terms(fit) labels(fit) } \keyword{spatial} \keyword{methods} spatstat/man/runifpoint3.Rd0000644000176200001440000000233513333543264015450 0ustar liggesusers\name{runifpoint3} \alias{runifpoint3} \title{ Generate N Uniform Random Points in Three Dimensions } \description{ Generate a random point pattern containing \code{n} independent, uniform random points in three dimensions. } \usage{ runifpoint3(n, domain = box3(), nsim=1, drop=TRUE) } \arguments{ \item{n}{ Number of points to be generated. } \item{domain}{ Three-dimensional box in which the process should be generated. An object of class \code{"box3"}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern in three dimensions (an object of class \code{"pp3"}). If \code{nsim > 1}, a list of such point patterns. } \details{ This function generates \code{n} independent random points, uniformly distributed in the three-dimensional box \code{domain}. } \seealso{ \code{\link{rpoispp3}}, \code{\link{pp3}}, \code{\link{box3}} } \examples{ X <- runifpoint3(50) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/crossdist.ppp.Rd0000644000176200001440000000474213333543263016006 0ustar liggesusers\name{crossdist.ppp} \alias{crossdist.ppp} \title{Pairwise distances between two different point patterns} \description{ Computes the distances between pairs of points taken from two different point patterns. } \usage{ \method{crossdist}{ppp}(X, Y, \dots, periodic=FALSE, method="C", squared=FALSE) } \arguments{ \item{X,Y}{ Point patterns (objects of class \code{"ppp"}). } \item{\dots}{ Ignored. } \item{periodic}{ Logical. Specifies whether to apply a periodic edge correction. } \item{method}{ String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. } \item{squared}{ Logical. If \code{squared=TRUE}, the squared distances are returned instead (this computation is faster). } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th point in \code{X} to the \code{j}-th point in \code{Y}. } \details{ Given two point patterns, this function computes the Euclidean distance from each point in the first pattern to each point in the second pattern, and returns a matrix containing these distances. This is a method for the generic function \code{\link{crossdist}} for point patterns (objects of class \code{"ppp"}). This function expects two point patterns \code{X} and \code{Y}, and returns the matrix whose \code{[i,j]} entry is the distance from \code{X[i]} to \code{Y[j]}. Alternatively if \code{periodic=TRUE}, then provided the windows containing \code{X} and \code{Y} are identical and are rectangular, then the distances will be computed in the `periodic' sense (also known as `torus' distance): opposite edges of the rectangle are regarded as equivalent. This is meaningless if the window is not a rectangle. The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is faster by a factor of 4. } \seealso{ \code{\link{crossdist}}, \code{\link{crossdist.default}}, \code{\link{crossdist.psp}}, \code{\link{pairdist}}, \code{\link{nndist}}, \code{\link{Gest}} } \examples{ data(cells) d <- crossdist(cells, runifpoint(6)) d <- crossdist(cells, runifpoint(6), periodic=TRUE) } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and \adrian } \keyword{spatial} \keyword{math} spatstat/man/markcrosscorr.Rd0000644000176200001440000000730113333543263016057 0ustar liggesusers\name{markcrosscorr} \alias{markcrosscorr} \title{ Mark Cross-Correlation Function } \description{ Given a spatial point pattern with several columns of marks, this function computes the mark correlation function between each pair of columns of marks. } \usage{ markcrosscorr(X, r = NULL, correction = c("isotropic", "Ripley", "translate"), method = "density", \dots, normalise = TRUE, Xname = NULL) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the mark correlation function \eqn{k_f(r)}{k[f](r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{normalise}{ If \code{normalise=FALSE}, compute only the numerator of the expression for the mark correlation. } \item{Xname}{ Optional character string name for the dataset \code{X}. } } \details{ First, all columns of marks are converted to numerical values. A factor with \eqn{m} possible levels is converted to \eqn{m} columns of dummy (indicator) values. Next, each pair of columns is considered, and the mark cross-correlation is defined as \deqn{ k_{mm}(r) = \frac{E_{0u}[M_i(0) M_j(u)]}{E[M_i,M_j]} }{ k[mm](r) = E[0u](M(i,0) * M(j,u))/E(Mi * Mj) } where \eqn{E_{0u}}{E[0u]} denotes the conditional expectation given that there are points of the process at the locations \eqn{0} and \eqn{u} separated by a distance \eqn{r}. On the numerator, \eqn{M_i(0)}{M(i,0)} and \eqn{M_j(u)}{M(j,u)} are the marks attached to locations \eqn{0} and \eqn{u} respectively in the \eqn{i}th and \eqn{j}th columns of marks respectively. On the denominator, \eqn{M_i}{Mi} and \eqn{M_j}{Mj} are independent random values drawn from the \eqn{i}th and \eqn{j}th columns of marks, respectively, and \eqn{E} is the usual expectation. Note that \eqn{k_{mm}(r)}{k[mm](r)} is not a ``correlation'' in the usual statistical sense. It can take any nonnegative real value. The value 1 suggests ``lack of correlation'': if the marks attached to the points of \code{X} are independent and identically distributed, then \eqn{k_{mm}(r) \equiv 1}{k[mm](r) = 1}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern. The cross-correlations are estimated in the same manner as for \code{\link{markcorr}}. } \value{ A function array (object of class \code{"fasp"}) containing the mark cross-correlation functions for each possible pair of columns of marks. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{markcorr}} } \examples{ # The dataset 'betacells' has two columns of marks: # 'type' (factor) # 'area' (numeric) if(interactive()) plot(betacells) plot(markcrosscorr(betacells)) } \keyword{spatial} \keyword{nonparametric} spatstat/man/integral.im.Rd0000644000176200001440000000413313333543263015376 0ustar liggesusers\name{integral.im} \alias{integral} \alias{integral.im} \title{ Integral of a Pixel Image } \description{ Computes the integral of a pixel image. } \usage{ integral(f, domain=NULL, \dots) \method{integral}{im}(f, domain=NULL, \dots) } \arguments{ \item{f}{ A pixel image (object of class \code{"im"}) with pixel values that can be treated as numeric or complex values. } \item{domain}{ Optional. Window specifying the domain of integration. Alternatively a tessellation. } \item{\dots}{ Ignored. } } \details{ The function \code{integral} is generic, with methods for \code{"im"}, \code{"msr"}, \code{"linim"} and \code{"linfun"}. The method \code{integral.im} treats the pixel image \code{f} as a function of the spatial coordinates, and computes its integral. The integral is calculated by summing the pixel values and multiplying by the area of one pixel. The pixel values of \code{f} may be numeric, integer, logical or complex. They cannot be factor or character values. The logical values \code{TRUE} and \code{FALSE} are converted to \code{1} and \code{0} respectively, so that the integral of a logical image is the total area of the \code{TRUE} pixels, in the same units as \code{unitname(x)}. If \code{domain} is a window (class \code{"owin"}) then the integration will be restricted to this window. If \code{domain} is a tessellation (class \code{"tess"}) then the integral of \code{f} in each tile of \code{domain} will be computed. } \value{ A single numeric or complex value (or a vector of such values if \code{domain} is a tessellation). } \seealso{ \code{\link{eval.im}}, \code{\link{[.im}} } \examples{ # approximate integral of f(x,y) dx dy f <- function(x,y){3*x^2 + 2*y} Z <- as.im(f, square(1)) integral.im(Z) # correct answer is 2 D <- density(cells) integral.im(D) # should be approximately equal to number of points = 42 # integrate over the subset [0.1,0.9] x [0.2,0.8] W <- owin(c(0.1,0.9), c(0.2,0.8)) integral.im(D, W) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{math} spatstat/man/dppapproxpcf.Rd0000644000176200001440000000156613333543263015702 0ustar liggesusers\name{dppapproxpcf} \alias{dppapproxpcf} \title{Approximate Pair Correlation Function of Determinantal Point Process Model} \description{ Returns an approximation to the theoretical pair correlation function of a determinantal point process model, as a function of one argument \eqn{x}. } \usage{dppapproxpcf(model, trunc = 0.99, W = NULL)} \arguments{ \item{model}{Object of class \code{"detpointprocfamily"}.} \item{trunc}{Numeric specifying how the model truncation is performed. See Details section of \code{\link{simulate.detpointprocfamily}}.} \item{W}{Optional window -- undocumented at the moment.} } \details{This function is usually NOT needed for anything. It only exists for investigative purposes.} \author{ \adrian \rolf and \ege } \examples{ f <- dppapproxpcf(dppMatern(lambda = 100, alpha=.028, nu=1, d=2)) plot(f, xlim = c(0,0.1)) } spatstat/man/plot.texturemap.Rd0000644000176200001440000000537313333543264016350 0ustar liggesusers\name{plot.texturemap} \alias{plot.texturemap} \title{ Plot a Texture Map } \description{ Plot a representation of a texture map, similar to a plot legend. } \usage{ \method{plot}{texturemap}(x, \dots, main, xlim = NULL, ylim = NULL, vertical = FALSE, axis = TRUE, labelmap = NULL, gap = 0.25, spacing = NULL, add = FALSE) } \arguments{ \item{x}{ Texture map object (class \code{"texturemap"}). } \item{\dots}{ Additional graphics arguments passed to \code{\link{add.texture}} or \code{\link{axis.default}}. } \item{main}{ Main title for plot. } \item{xlim,ylim}{ Optional vectors of length 2 giving the \eqn{x} and \eqn{y} limits of the plot. } \item{vertical}{ Logical value indicating whether to arrange the texture boxes in a vertical column (\code{vertical=TRUE} or a horizontal row (\code{vertical=FALSE}, the default). } \item{axis}{ Logical value indicating whether to plot an axis line joining the texture boxes. } \item{labelmap}{ Optional. A \code{function} which will be applied to the data values (the inputs of the texture map) before they are displayed on the plot. } \item{gap}{ Separation between texture boxes, as a fraction of the width or height of a box. } \item{spacing}{ Argument passed to \code{\link{add.texture}} controlling the density of lines in a texture. Expressed in spatial coordinate units. } \item{add}{ Logical value indicating whether to add the graphics to an existing plot (\code{add=TRUE}) or to initialise a new plot (\code{add=FALSE}, the default). } } \details{ A texture map is an association between data values and graphical textures. An object of class \code{"texturemap"} represents a texture map. Such objects are returned from the plotting function \code{\link{textureplot}}, and can be created directly by the function \code{\link{texturemap}}. This function \code{plot.texturemap} is a method for the generic \code{\link{plot}} for the class \code{"texturemap"}. It displays a sample of each of the textures in the texture map, in a separate box, annotated by the data value which is mapped to that texture. The arrangement and position of the boxes is controlled by the arguments \code{vertical}, \code{xlim}, \code{ylim} and \code{gap}. } \value{ Null. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{texturemap}}, \code{\link{textureplot}}, \code{\link{add.texture}}. } \examples{ tm <- texturemap(c("First", "Second", "Third"), 2:4, col=2:4) plot(tm, vertical=FALSE) ## abbreviate the labels plot(tm, labelmap=function(x) substr(x, 1, 2)) } \keyword{spatial} \keyword{hplot} spatstat/man/fv.Rd0000644000176200001440000001627613333543263013613 0ustar liggesusers\name{fv} \alias{fv} \title{ Create a Function Value Table } \description{ Advanced Use Only. This low-level function creates an object of class \code{"fv"} from raw numerical data. } \usage{ fv(x, argu = "r", ylab = NULL, valu, fmla = NULL, alim = NULL, labl = names(x), desc = NULL, unitname = NULL, fname = NULL, yexp = ylab) } \arguments{ \item{x}{ A data frame with at least 2 columns containing the values of the function argument and the corresponding values of (one or more versions of) the function. } \item{argu}{ String. The name of the column of \code{x} that contains the values of the function argument. } \item{ylab}{ Either \code{NULL}, or an \R language expression representing the mathematical name of the function. See Details. } \item{valu}{ String. The name of the column of \code{x} that should be taken as containing the function values, in cases where a single column is required. } \item{fmla}{ Either \code{NULL}, or a \code{formula} specifying the default plotting behaviour. See Details. } \item{alim}{ Optional. The default range of values of the function argument for which the function will be plotted. Numeric vector of length 2. } \item{labl}{ Optional. Plot labels for the columns of \code{x}. A vector of strings, with one entry for each column of \code{x}. } \item{desc}{ Optional. Descriptions of the columns of \code{x}. A vector of strings, with one entry for each column of \code{x}. } \item{unitname}{ Optional. Name of the unit (usually a unit of length) in which the function argument is expressed. Either a single character string, or a vector of two character strings giving the singular and plural forms, respectively. } \item{fname}{ Optional. The name of the function itself. A character string. } \item{yexp}{ Optional. Alternative form of \code{ylab} more suitable for annotating an axis of the plot. See Details. } } \details{ This documentation is provided for experienced programmers who want to modify the internal behaviour of \pkg{spatstat}. Other users please see \code{\link{fv.object}}. The low-level function \code{fv} is used to create an object of class \code{"fv"} from raw numerical data. The data frame \code{x} contains the numerical data. It should have one column (typically but not necessarily named \code{"r"}) giving the values of the function argument for which the function has been evaluated; and at least one other column, containing the corresponding values of the function. Typically there is more than one column of function values. These columns typically give the values of different versions or estimates of the same function, for example, different estimates of the \eqn{K} function obtained using different edge corrections. However they may also contain the values of related functions such as the derivative or hazard rate. \code{argu} specifies the name of the column of \code{x} that contains the values of the function argument (typically \code{argu="r"} but this is not compulsory). \code{valu} specifies the name of another column that contains the \sQuote{recommended} estimate of the function. It will be used to provide function values in those situations where a single column of data is required. For example, \code{\link{envelope}} computes its simulation envelopes using the recommended value of the summary function. \code{fmla} specifies the default plotting behaviour. It should be a formula, or a string that can be converted to a formula. Variables in the formula are names of columns of \code{x}. See \code{\link{plot.fv}} for the interpretation of this formula. \code{alim} specifies the recommended range of the function argument. This is used in situations where statistical theory or statistical practice indicates that the computed estimates of the function are not trustworthy outside a certain range of values of the function argument. By default, \code{\link{plot.fv}} will restrict the plot to this range. \code{fname} is a string giving the name of the function itself. For example, the \eqn{K} function would have \code{fname="K"}. \code{ylab} is a mathematical expression for the function value, used when labelling an axis of the plot, or when printing a description of the function. It should be an \R language object. For example the \eqn{K} function's mathematical name \eqn{K(r)} is rendered by \code{ylab=quote(K(r))}. If \code{yexp} is present, then \code{ylab} will be used only for printing, and \code{yexp} will be used for annotating axes in a plot. (Otherwise \code{yexp} defaults to \code{ylab}). For example the cross-type \eqn{K} function \eqn{K_{1,2}(r)}{K[1,2](r)} is rendered by something like \code{ylab=quote(Kcross[1,2](r))} and \code{yexp=quote(Kcross[list(1,2)](r))} to get the most satisfactory behaviour. (A useful tip: use \code{\link{substitute}} instead of \code{\link{quote}} to insert values of variables into an expression, e.g. \code{substitute(Kcross[i,j](r), list(i=42,j=97))} yields the same as \code{quote(Kcross[42, 97](r))}.) \code{labl} is a character vector specifying plot labels for each column of \code{x}. These labels will appear on the plot axes (in non-default plots), legends and printed output. Entries in \code{labl} may contain the string \code{"\%s"} which will be replaced by \code{fname}. For example the border-corrected estimate of the \eqn{K} function has label \code{"\%s[bord](r)"} which becomes \code{"K[bord](r)"}. \code{desc} is a character vector containing intelligible explanations of each column of \code{x}. Entries in \code{desc} may contain the string \code{"\%s"} which will be replaced by \code{ylab}. For example the border correction estimate of the \eqn{K} function has description \code{"border correction estimate of \%s"}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}. } \seealso{ See \code{\link{plot.fv}} for plotting an \code{"fv"} object. See \code{\link{as.function.fv}} to convert an \code{"fv"} object to an \R function. Use \code{\link{cbind.fv}} to combine several \code{"fv"} objects. Use \code{\link{bind.fv}} to glue additional columns onto an existing \code{"fv"} object. Use \code{\link{range.fv}} to compute the range of \eqn{y} values for a function, and \code{\link{with.fv}} for more complicated calculations. The functions \code{fvnames}, \code{fvnames<-} allow the user to use standard abbreviations to refer to columns of an \code{"fv"} object. \emph{Undocumented} functions for modifying an \code{"fv"} object include \code{tweak.fv.entry} and \code{rebadge.fv}. } \author{\adrian and \rolf } \examples{ df <- data.frame(r=seq(0,5,by=0.1)) df <- transform(df, a=pi*r^2, b=3*r^2) X <- fv(df, "r", quote(A(r)), "a", cbind(a, b) ~ r, alim=c(0,4), labl=c("r", "\%s[true](r)", "\%s[approx](r)"), desc=c("radius of circle", "true area \%s", "rough area \%s"), fname="A") X } \keyword{spatial} \keyword{classes} spatstat/man/fardist.Rd0000644000176200001440000000271713333543263014627 0ustar liggesusers\name{fardist} \alias{fardist} \alias{fardist.ppp} \alias{fardist.owin} \title{ Farthest Distance to Boundary of Window } \description{ Computes the farthest distance from each pixel, or each data point, to the boundary of the window. } \usage{ fardist(X, \dots) \method{fardist}{owin}(X, \dots, squared=FALSE) \method{fardist}{ppp}(X, \dots, squared=FALSE) } \arguments{ \item{X}{ A spatial object such as a window or point pattern. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution, if required. } \item{squared}{ Logical. If \code{TRUE}, the squared distances will be returned. } } \details{ The function \code{fardist} is generic, with methods for the classes \code{owin} and \code{ppp}. For a window \code{W}, the command \code{fardist(W)} returns a pixel image in which the value at each pixel is the \emph{largest} distance from that pixel to the boundary of \code{W}. For a point pattern \code{X}, with window \code{W}, the command \code{fardist(X)} returns a numeric vector with one entry for each point of \code{X}, giving the largest distance from that data point to the boundary of \code{W}. } \value{ For \code{fardist.owin}, a pixel image (object of class \code{"im"}). For \code{fardist.ppp}, a numeric vector. } \examples{ fardist(cells) plot(FR <- fardist(letterR)) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{math} spatstat/man/Extract.listof.Rd0000644000176200001440000000212613333543263016076 0ustar liggesusers\name{Extract.listof} \alias{[<-.listof} \title{Extract or Replace Subset of a List of Things} \description{ Replace a subset of a list of things. } \usage{ \method{[}{listof}(x, i) <- value } \arguments{ \item{x}{ An object of class \code{"listof"} representing a list of things which all belong to one class. } \item{i}{ Subset index. Any valid subset index in the usual \R sense. } \item{value}{ Replacement value for the subset. } } \value{ Another object of class \code{"listof"}. } \details{ This is a subset replacement method for the class \code{"listof"}. The argument \code{x} should be an object of class \code{"listof"} representing a list of things that all belong to one class. The method replaces a designated subset of \code{x}, and returns an object of class \code{"listof"}. } \seealso{ \code{\link{plot.listof}}, \code{\link{summary.listof}} } \examples{ x <- list(A=runif(10), B=runif(10), C=runif(10)) class(x) <- c("listof", class(x)) x[1] <- list(A=rnorm(10)) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/Gcross.Rd0000644000176200001440000002210313333543262014421 0ustar liggesusers\name{Gcross} \alias{Gcross} \title{ Multitype Nearest Neighbour Distance Function (i-to-j) } \description{ For a multitype point pattern, estimate the distribution of the distance from a point of type \eqn{i} to the nearest point of type \eqn{j}. } \usage{ Gcross(X, i, j, r=NULL, breaks=NULL, \dots, correction=c("rs", "km", "han")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross type distance distribution function \eqn{G_{ij}(r)}{Gij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{G_{ij}(r)}{Gij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{ Ignored. } \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{G_{ij}(r)}{Gij(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{G_{ij}(r)}{Gij(r)} } \item{han}{the Hanisch-style estimator of \eqn{G_{ij}(r)}{Gij(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{G_{ij}(r)}{Gij(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{G_{ij}(r)}{Gij(r)} by the spatial Kaplan-Meier method } \item{raw}{the uncorrected estimate of \eqn{G_{ij}(r)}{Gij(r)}, i.e. the empirical distribution of the distances from each point of type \eqn{i} to the nearest point of type \eqn{j} } \item{theo}{the theoretical value of \eqn{G_{ij}(r)}{Gij(r)} for a marked Poisson process with the same estimated intensity (see below). } } \details{ This function \code{Gcross} and its companions \code{\link{Gdot}} and \code{\link{Gmulti}} are generalisations of the function \code{\link{Gest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level). The ``cross-type'' (type \eqn{i} to type \eqn{j}) nearest neighbour distance distribution function of a multitype point process is the cumulative distribution function \eqn{G_{ij}(r)}{Gij(r)} of the distance from a typical random point of the process with type \eqn{i} the nearest point of type \eqn{j}. An estimate of \eqn{G_{ij}(r)}{Gij(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the process of type \eqn{i} points were independent of the process of type \eqn{j} points, then \eqn{G_{ij}(r)}{Gij(r)} would equal \eqn{F_j(r)}{Fj(r)}, the empty space function of the type \eqn{j} points. For a multitype Poisson point process where the type \eqn{i} points have intensity \eqn{\lambda_i}{lambda[i]}, we have \deqn{G_{ij}(r) = 1 - e^{ - \lambda_j \pi r^2} }{% Gij(r) = 1 - exp( - lambda[j] * pi * r^2)} Deviations between the empirical and theoretical \eqn{G_{ij}}{Gij} curves may suggest dependence between the points of types \eqn{i} and \eqn{j}. This algorithm estimates the distribution function \eqn{G_{ij}(r)}{Gij(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Gest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G_{ij}(r)}{Gij(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The reduced-sample and Kaplan-Meier estimators are computed from histogram counts. In the case of the Kaplan-Meier estimator this introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{G_{ij}(r)}{Gij(r)}. This estimate should be used with caution as \eqn{G_{ij}(r)}{Gij(r)} is not necessarily differentiable. The naive empirical distribution of distances from each point of the pattern \code{X} to the nearest other point of the pattern, is a biased estimate of \eqn{G_{ij}}{Gij}. However this is also returned by the algorithm, as it is sometimes useful in other contexts. Care should be taken not to use the uncorrected empirical \eqn{G_{ij}}{Gij} as if it were an unbiased estimator of \eqn{G_{ij}}{Gij}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. The function \eqn{G_{ij}}{Gij} does not necessarily have a density. The reduced sample estimator of \eqn{G_{ij}}{Gij} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{G_{ij}}{Gij} is always nondecreasing but its maximum value may be less than \eqn{1}. } \seealso{ \code{\link{Gdot}}, \code{\link{Gest}}, \code{\link{Gmulti}} } \examples{ # amacrine cells data G01 <- Gcross(amacrine) # equivalent to: \dontrun{ G01 <- Gcross(amacrine, "off", "on") } plot(G01) # empty space function of `on' points \dontrun{ F1 <- Fest(split(amacrine)$on, r = G01$r) lines(F1$r, F1$km, lty=3) } # synthetic example pp <- runifpoispp(30) pp <- pp \%mark\% factor(sample(0:1, npoints(pp), replace=TRUE)) G <- Gcross(pp, "0", "1") # note: "0" not 0 } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/rpoisppx.Rd0000644000176200001440000000307313333543264015054 0ustar liggesusers\name{rpoisppx} \alias{rpoisppx} \title{ Generate Poisson Point Pattern in Any Dimensions } \description{ Generate a random multi-dimensional point pattern using the homogeneous Poisson process. } \usage{ rpoisppx(lambda, domain, nsim=1, drop=TRUE) } \arguments{ \item{lambda}{ Intensity of the Poisson process. A single positive number. } \item{domain}{ Multi-dimensional box in which the process should be generated. An object of class \code{"boxx"}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a single point pattern. } } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern (an object of class \code{"ppx"}). If \code{nsim > 1} or \code{drop=FALSE}, a list of such point patterns. } \details{ This function generates a realisation of the homogeneous Poisson process in multi dimensions, with intensity \code{lambda} (points per unit volume). The realisation is generated inside the multi-dimensional region \code{domain} which currently must be a rectangular box (object of class \code{"boxx"}). } \note{ The intensity \code{lambda} is the expected number of points \emph{per unit volume}. } \seealso{ \code{\link{runifpointx}}, \code{\link{ppx}}, \code{\link{boxx}} } \examples{ w <- boxx(x=c(0,1), y=c(0,1), z=c(0,1), t=c(0,3)) X <- rpoisppx(10, w) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/gridcentres.Rd0000644000176200001440000000355513333543263015505 0ustar liggesusers\name{gridcentres} \alias{gridcentres} \alias{gridcenters} \title{Rectangular grid of points} \description{ Generates a rectangular grid of points in a window } \usage{ gridcentres(window, nx, ny) } \arguments{ \item{window}{A window. An object of class \code{\link{owin}}, or data in any format acceptable to \code{\link{as.owin}()}. } \item{nx}{Number of points in each row of the rectangular grid. } \item{ny}{Number of points in each column of the rectangular grid. } } \value{ A list with two components \code{x} and \code{y}, which are numeric vectors giving the coordinates of the points of the rectangular grid. } \details{ This function creates a rectangular grid of points in the window. The bounding rectangle of the \code{window} is divided into a regular \eqn{nx \times ny}{nx * ny} grid of rectangular tiles. The function returns the \eqn{x,y} coordinates of the centres of these tiles. Note that some of these grid points may lie outside the window, if \code{window} is not of type \code{"rectangle"}. The function \code{\link{inside.owin}} can be used to select those grid points which do lie inside the window. See the examples. This function is useful in creating dummy points for quadrature schemes (see \code{\link{quadscheme}}) and for other miscellaneous purposes. } \seealso{ \code{\link{quad.object}}, \code{\link{quadscheme}}, \code{\link{inside.owin}}, \code{\link{stratrand}} } \examples{ w <- unit.square() xy <- gridcentres(w, 10,15) \dontrun{ plot(w) points(xy) } bdry <- list(x=c(0.1,0.3,0.7,0.4,0.2), y=c(0.1,0.1,0.5,0.7,0.3)) w <- owin(c(0,1), c(0,1), poly=bdry) xy <- gridcentres(w, 30, 30) ok <- inside.owin(xy$x, xy$y, w) \dontrun{ plot(w) points(xy$x[ok], xy$y[ok]) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/hextess.Rd0000644000176200001440000000461213333543263014652 0ustar liggesusers\name{hextess} \alias{hexgrid} \alias{hextess} \title{ Hexagonal Grid or Tessellation } \description{ Construct a hexagonal grid of points, or a hexagonal tessellation. } \usage{ hexgrid(W, s, offset = c(0, 0), origin=NULL, trim = TRUE) hextess(W, s, offset = c(0, 0), origin=NULL, trim = TRUE) } \arguments{ \item{W}{ Window in which to construct the hexagonal grid or tessellation. An object of class \code{"owin"}. } \item{s}{ Side length of hexagons. A positive number. } \item{offset}{ Numeric vector of length 2 specifying a shift of the hexagonal grid. See Details. } \item{origin}{ Numeric vector of length 2 specifying the initial origin of the hexagonal grid, before the offset is applied. See Details. } \item{trim}{ Logical value indicating whether to restrict the result to the window \code{W}. See Details. } } \details{ \code{hexgrid} constructs a hexagonal grid of points on the window \code{W}. If \code{trim=TRUE} (the default), the grid is intersected with \code{W} so that all points lie inside \code{W}. If \code{trim=FALSE}, then we retain all grid points which are the centres of hexagons that intersect \code{W}. \code{hextess} constructs a tessellation of hexagons on the window \code{W}. If \code{trim=TRUE} (the default), the tessellation is restricted to the interior of \code{W}, so that there will be some fragmentary hexagons near the boundary of \code{W}. If \code{trim=FALSE}, the tessellation consists of all hexagons which intersect \code{W}. The points of \code{hexgrid(...)} are the centres of the tiles of \code{hextess(...)} in the same order. In the initial position of the grid or tessellation, one of the grid points (tile centres) is placed at the \code{origin}, which defaults to the midpoint of the bounding rectangle of \code{W}. The grid can be shifted relative to this origin by specifing the \code{offset}. } \value{ The value of \code{hexgrid} is a point pattern (object of class \code{"ppp"}). The value of \code{hextess} is a tessellation (object of class \code{"tess"}). } \seealso{ \code{\link{tess}} \code{\link{hexagon}} } \examples{ if(interactive()) { W <- Window(chorley) s <- 0.7 } else { W <- letterR s <- 0.3 } plot(hextess(W, s)) plot(hexgrid(W, s), add=TRUE) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{datagen} spatstat/man/tiles.Rd0000644000176200001440000000160713333543264014311 0ustar liggesusers\name{tiles} \alias{tiles} \title{Extract List of Tiles in a Tessellation} \description{ Extracts a list of the tiles that make up a tessellation. } \usage{ tiles(x) } \arguments{ \item{x}{A tessellation (object of class \code{"tess"}).} } \details{ A tessellation is a collection of disjoint spatial regions (called \emph{tiles}) that fit together to form a larger spatial region. See \code{\link{tess}}. The tiles that make up the tessellation \code{x} are returned in a list. } \value{ A list of windows (objects of class \code{"owin"}). } \seealso{ \code{\link{tess}}, \code{\link{tilenames}}, \code{\link{tile.areas}}, \code{\link{tiles.empty}} } \examples{ A <- tess(xgrid=0:2,ygrid=0:2) tiles(A) v <- as.im(function(x,y){factor(round(x^2 + y^2))}, W=owin()) E <- tess(image=v) tiles(E) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/Math.linim.Rd0000644000176200001440000000572513333543262015174 0ustar liggesusers\name{Math.linim} \alias{Math.linim} \alias{Ops.linim} \alias{Summary.linim} \alias{Complex.linim} \title{S3 Group Generic Methods for Images on a Linear Network} \description{ These are group generic methods for images of class \code{"linim"}, which allows for usual mathematical functions and operators to be applied directly to pixel images on a linear network. See Details for a list of implemented functions. } \usage{ ## S3 methods for group generics have prototypes: \special{Math(x, \dots)} \special{Ops(e1, e2)} \special{Complex(z)} \special{Summary(\dots, na.rm = FALSE)} %NAMESPACE S3method("Math", "linim") %NAMESPACE S3method("Ops", "linim") %NAMESPACE S3method("Complex", "linim") %NAMESPACE S3method("Summary", "linim") } \arguments{ \item{x, z, e1, e2}{objects of class \code{"linim"}.} \item{\dots}{further arguments passed to methods.} \item{na.rm}{logical: should missing values be removed?} } \details{ An object of class \code{"linim"} represents a pixel image on a linear network. See \code{\link{linim}}. Below is a list of mathematical functions and operators which are defined for these images. Not all functions will make sense for all types of images. For example, none of the functions in the \code{"Math"} group make sense for character-valued images. Note that the \code{"Ops"} group methods are implemented using \code{\link{eval.linim}}. \enumerate{ \item Group \code{"Math"}: \itemize{ \item \code{abs}, \code{sign}, \code{sqrt},\cr \code{floor}, \code{ceiling}, \code{trunc},\cr \code{round}, \code{signif} \item \code{exp}, \code{log}, \code{expm1}, \code{log1p},\cr \code{cos}, \code{sin}, \code{tan},\cr \code{cospi}, \code{sinpi}, \code{tanpi},\cr \code{acos}, \code{asin}, \code{atan} \code{cosh}, \code{sinh}, \code{tanh},\cr \code{acosh}, \code{asinh}, \code{atanh} \item \code{lgamma}, \code{gamma}, \code{digamma}, \code{trigamma} \item \code{cumsum}, \code{cumprod}, \code{cummax}, \code{cummin} } \item Group \code{"Ops"}: \itemize{ \item \code{"+"}, \code{"-"}, \code{"*"}, \code{"/"}, \code{"^"}, \code{"\%\%"}, \code{"\%/\%"} \item \code{"&"}, \code{"|"}, \code{"!"} \item \code{"=="}, \code{"!="}, \code{"<"}, \code{"<="}, \code{">="}, \code{">"} } \item Group \code{"Summary"}: \itemize{ \item \code{all}, \code{any} \item \code{sum}, \code{prod} \item \code{min}, \code{max} \item \code{range} } \item Group \code{"Complex"}: \itemize{ \item \code{Arg}, \code{Conj}, \code{Im}, \code{Mod}, \code{Re} } } } \seealso{ \code{\link{eval.linim}} for evaluating expressions involving images. } \examples{ fx <- function(x,y,seg,tp) { (x - y)^2 } fL <- linfun(fx, simplenet) Z <- as.linim(fL) A <- Z+2 A <- -Z A <- sqrt(Z) A <- !(Z > 0.1) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{methods} spatstat/man/rose.Rd0000644000176200001440000001261213333543264014137 0ustar liggesusers\name{rose} \alias{rose} \alias{rose.default} \alias{rose.histogram} \alias{rose.density} \alias{rose.fv} \title{Rose Diagram} \description{ Plots a rose diagram (rose of directions), the analogue of a histogram or density plot for angular data. } \usage{ rose(x, \dots) \method{rose}{default}(x, breaks = NULL, \dots, weights=NULL, nclass = NULL, unit = c("degree", "radian"), start=0, clockwise=FALSE, main) \method{rose}{histogram}(x, \dots, unit = c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot = TRUE) \method{rose}{density}(x, \dots, unit = c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot = TRUE) \method{rose}{fv}(x, \dots, unit = c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot = TRUE) } \arguments{ \item{x}{ Data to be plotted. A numeric vector containing angles, or a \code{histogram} object containing a histogram of angular values, or a \code{density} object containing a smooth density estimate for angular data, or an \code{fv} object giving a function of an angular argument. } \item{breaks, nclass}{ Arguments passed to \code{\link[graphics]{hist}} to determine the histogram breakpoints. } \item{\dots}{ Additional arguments passed to \code{\link[graphics]{polygon}} controlling the appearance of the plot (or passed from \code{rose.default} to \code{\link[graphics]{hist}} to control the calculation of the histogram). } \item{unit}{ The unit in which the angles are expressed. } \item{start}{ The starting direction for measurement of angles, that is, the spatial direction which corresponds to a measured angle of zero. Either a character string giving a compass direction (\code{"N"} for north, \code{"S"} for south, \code{"E"} for east, or \code{"W"} for west) or a number giving the angle from the the horizontal (East) axis to the starting direction. For example, if \code{unit="degree"} and \code{clockwise=FALSE}, then \code{start=90} and \code{start="N"} are equivalent. The default is to measure angles anti-clockwise from the horizontal axis (East direction). } \item{clockwise}{ Logical value indicating whether angles increase in the clockwise direction (\code{clockwise=TRUE}) or anti-clockwise, counter-clockwise direction (\code{clockwise=FALSE}, the default). } \item{weights}{ Optional vector of numeric weights associated with \code{x}. } \item{main}{ Optional main title for the plot. } \item{labels}{ Either a logical value indicating whether to plot labels next to the tick marks, or a vector of labels for the tick marks. } \item{at}{ Optional vector of angles at which tick marks should be plotted. Set \code{at=numeric(0)} to suppress tick marks. } \item{do.plot}{ Logical value indicating whether to really perform the plot. } } \details{ A rose diagram or rose of directions is the analogue of a histogram or bar chart for data which represent angles in two dimensions. The bars of the bar chart are replaced by circular sectors in the rose diagram. The function \code{rose} is generic, with a default method for numeric data, and methods for histograms and function tables. If \code{x} is a numeric vector, it must contain angular values in the range 0 to 360 (if \code{unit="degree"}) or in the range 0 to \code{2 * pi} (if \code{unit="radian"}). A histogram of the data will first be computed using \code{\link[graphics]{hist}}. Then the rose diagram of this histogram will be plotted by \code{rose.histogram}. If \code{x} is an object of class \code{"histogram"} produced by the function \code{\link[graphics]{hist}}, representing the histogram of angular data, then the rose diagram of the densities (rather than the counts) in this histogram object will be plotted. If \code{x} is an object of class \code{"density"} produced by \code{\link{circdensity}} or \code{\link[stats]{density.default}}, representing a kernel smoothed density estimate of angular data, then the rose diagram of the density estimate will be plotted. If \code{x} is a function value table (object of class \code{"fv"}) then the argument of the function will be interpreted as an angle, and the value of the function will be interpreted as the radius. By default, angles are interpreted using the mathematical convention where the zero angle is the horizontal \eqn{x} axis, and angles increase anti-clockwise. Other conventions can be specified using the arguments \code{start} and \code{clockwise}. Standard compass directions are obtained by setting \code{unit="degree"}, \code{start="N"} and \code{clockwise=TRUE}. } \value{A window (class \code{"owin"}) containing the plotted region.} \author{\adrian \rolf and \ege } \seealso{ \code{\link{fv}}, \code{\link[graphics]{hist}}, \code{\link{circdensity}}, \code{\link[stats]{density.default}}. } \examples{ ang <- runif(1000, max=360) rose(ang, col="grey") rose(ang, col="grey", start="N", clockwise=TRUE) } \keyword{spatial} \keyword{hplot} spatstat/man/pairdist.lpp.Rd0000644000176200001440000000264013333543263015577 0ustar liggesusers\name{pairdist.lpp} \alias{pairdist.lpp} \title{ Pairwise shortest-path distances between points on a linear network } \description{ Given a pattern of points on a linear network, compute the matrix of distances between all pairs of points, measuring distance by the shortest path in the network. } \usage{ \method{pairdist}{lpp}(X, ..., method="C") } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{method}{ Optional string determining the method of calculation. Either \code{"interpreted"} or \code{"C"}. } \item{\dots}{ Ignored. } } \details{ Given a pattern of points on a linear network, this function computes the matrix of distances between all pairs of points, measuring distance by the shortest path in the network. If \code{method="C"} the distances are computed using code in the C language. If \code{method="interpreted"} then the computation is performed using interpreted \R code. The \R code is much slower, but is provided for checking purposes. If two points cannot be joined by a path, the distance between them is infinite (\code{Inf}). } \value{ A symmetric matrix, whose values are nonnegative numbers or infinity (\code{Inf}). } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian. } \seealso{ \code{\link{lpp}} } \examples{ X <- runiflpp(12, simplenet) pairdist(X) } \keyword{spatial} spatstat/man/ppm.object.Rd0000644000176200001440000001355613524437051015236 0ustar liggesusers\name{ppm.object} \alias{ppm.object} %DoNotExport \alias{methods.ppm} %DoNotExport \title{Class of Fitted Point Process Models} \description{ A class \code{ppm} to represent a fitted stochastic model for a point process. The output of \code{\link{ppm}}. } \details{ An object of class \code{ppm} represents a stochastic point process model that has been fitted to a point pattern dataset. Typically it is the output of the model fitter, \code{\link{ppm}}. The class \code{ppm} has methods for the following standard generic functions: \tabular{lll}{ generic \tab method \tab description \cr \code{print} \tab \code{\link{print.ppm}} \tab print details \cr \code{plot} \tab \code{\link{plot.ppm}} \tab plot fitted model \cr \code{predict} \tab \code{\link{predict.ppm}} \tab fitted intensity and conditional intensity \cr \code{fitted} \tab \code{\link{fitted.ppm}} \tab fitted intensity \cr \code{coef} \tab \code{\link{coef.ppm}} \tab fitted coefficients of model \cr \code{anova} \tab \code{\link{anova.ppm}} \tab Analysis of Deviance \cr \code{formula} \tab \code{\link{formula.ppm}} \tab Extract model formula \cr \code{terms} \tab \code{\link{terms.ppm}} \tab Terms in the model formula \cr \code{labels} \tab \code{labels.ppm} \tab Names of estimable terms in the model formula \cr \code{residuals} \tab \code{\link{residuals.ppm}} \tab Point process residuals \cr \code{simulate} \tab \code{\link{simulate.ppm}} \tab Simulate the fitted model \cr \code{update} \tab \code{\link{update.ppm}} \tab Change or refit the model \cr \code{vcov} \tab \code{\link{vcov.ppm}} \tab Variance/covariance matrix of parameter estimates \cr \code{model.frame} \tab \code{\link{model.frame.ppm}} \tab Model frame \cr \code{model.matrix} \tab \code{\link{model.matrix.ppm}} \tab Design matrix \cr \code{logLik} \tab \code{\link{logLik.ppm}} \tab log \emph{pseudo} likelihood \cr \code{extractAIC} \tab \code{\link{extractAIC.ppm}} \tab pseudolikelihood counterpart of AIC \cr \code{nobs} \tab \code{\link{nobs.ppm}} \tab number of observations } Objects of class \code{ppm} can also be handled by the following standard functions, without requiring a special method: \tabular{ll}{ name \tab description \cr \code{\link{confint}} \tab Confidence intervals for parameters \cr \code{\link{step}} \tab Stepwise model selection \cr \code{\link{drop1}} \tab One-step model improvement \cr \code{\link{add1}} \tab One-step model improvement } The class \code{ppm} also has methods for the following generic functions defined in the \pkg{spatstat} package: \tabular{lll}{ generic \tab method \tab description \cr \code{\link{as.interact}} \tab \code{\link{as.interact.ppm}} \tab Interpoint interaction structure \cr \code{\link{as.owin}} \tab \code{\link{as.owin.ppm}} \tab Observation window of data \cr \code{\link{berman.test}} \tab \code{\link{berman.test.ppm}} \tab Berman's test \cr \code{\link{envelope}} \tab \code{\link{envelope.ppm}} \tab Simulation envelopes \cr \code{\link{fitin}} \tab \code{\link{fitin.ppm}} \tab Fitted interaction \cr \code{\link{is.marked}} \tab \code{\link{is.marked.ppm}} \tab Determine whether the model is marked \cr \code{\link{is.multitype}} \tab \code{\link{is.multitype.ppm}} \tab Determine whether the model is multitype \cr \code{\link{is.poisson}} \tab \code{\link{is.poisson.ppm}} \tab Determine whether the model is Poisson \cr \code{\link{is.stationary}} \tab \code{\link{is.stationary.ppm}} \tab Determine whether the model is stationary \cr \code{\link{cdf.test}} \tab \code{\link{cdf.test.ppm}} \tab Spatial distribution test \cr \code{\link{quadrat.test}} \tab \code{\link{quadrat.test.ppm}} \tab Quadrat counting test \cr \code{\link{reach}} \tab \code{\link{reach.ppm}} \tab Interaction range of model \cr \code{\link{rmhmodel}} \tab \code{\link{rmhmodel.ppm}} \tab Model in a form that can be simulated \cr \code{\link{rmh}} \tab \code{\link{rmh.ppm}} \tab Perform simulation \cr \code{\link{unitname}} \tab \code{\link{unitname.ppm}} \tab Name of unit of length } Information about the data (to which the model was fitted) can be extracted using \code{\link{data.ppm}}, \code{\link{dummy.ppm}} and \code{\link{quad.ppm}}. } \section{Internal format}{ If you really need to get at the internals, a \code{ppm} object contains at least the following entries: \tabular{ll}{ \code{coef} \tab the fitted regular parameters (as returned by \code{glm}) \cr \code{trend} \tab the trend formula or \code{NULL} \cr \code{interaction} \tab the point process interaction family (an object of class \code{"interact"}) or \code{NULL} \cr \code{Q} \tab the quadrature scheme used \cr \code{maxlogpl} \tab the maximised value of log pseudolikelihood \cr \code{correction} \tab name of edge correction method used \cr } See \code{\link{ppm}} for explanation of these concepts. The irregular parameters (e.g. the interaction radius of the Strauss process) are encoded in the \code{interaction} entry. However see the Warnings. } \seealso{ \code{\link{ppm}}, \code{\link{coef.ppm}}, \code{\link{fitted.ppm}}, \code{\link{print.ppm}}, \code{\link{predict.ppm}}, \code{\link{plot.ppm}}. } \section{Warnings}{ The internal representation of \code{ppm} objects may change slightly between releases of the \pkg{spatstat} package. } \examples{ fit <- ppm(cells ~ x, Strauss(0.1), correction="periodic") fit coef(fit) \dontrun{ pred <- predict(fit) } pred <- predict(fit, ngrid=20, type="trend") \dontrun{ plot(fit) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat/man/quadscheme.Rd0000644000176200001440000001305513333543264015310 0ustar liggesusers\name{quadscheme} \alias{quadscheme} \title{Generate a Quadrature Scheme from a Point Pattern} \description{ Generates a quadrature scheme (an object of class \code{"quad"}) from point patterns of data and dummy points. } \usage{ quadscheme(data, dummy, method="grid", \dots) } \arguments{ \item{data}{ The observed data point pattern. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{dummy}{ The pattern of dummy points for the quadrature. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} Defaults to \code{default.dummy(data, ...)} } \item{method}{ The name of the method for calculating quadrature weights: either \code{"grid"} or \code{"dirichlet"}. } \item{\dots}{ Parameters of the weighting method (see below) and parameters for constructing the dummy points if necessary. } } \value{ An object of class \code{"quad"} describing the quadrature scheme (data points, dummy points, and quadrature weights) suitable as the argument \code{Q} of the function \code{\link{ppm}()} for fitting a point process model. The quadrature scheme can be inspected using the \code{print} and \code{plot} methods for objects of class \code{"quad"}. } \details{ This is the primary method for producing a quadrature schemes for use by \code{\link{ppm}}. The function \code{\link{ppm}} fits a point process model to an observed point pattern using the Berman-Turner quadrature approximation (Berman and Turner, 1992; Baddeley and Turner, 2000) to the pseudolikelihood of the model. It requires a quadrature scheme consisting of the original data point pattern, an additional pattern of dummy points, and a vector of quadrature weights for all these points. Such quadrature schemes are represented by objects of class \code{"quad"}. See \code{\link{quad.object}} for a description of this class. Quadrature schemes are created by the function \code{quadscheme}. The arguments \code{data} and \code{dummy} specify the data and dummy points, respectively. There is a sensible default for the dummy points (provided by \code{\link{default.dummy}}). Alternatively the dummy points may be specified arbitrarily and given in any format recognised by \code{\link{as.ppp}}. There are also functions for creating dummy patterns including \code{\link{corners}}, \code{\link{gridcentres}}, \code{\link{stratrand}} and \code{\link{spokes}}. The quadrature region is the region over which we are integrating, and approximating integrals by finite sums. If \code{dummy} is a point pattern object (class \code{"ppp"}) then the quadrature region is taken to be \code{Window(dummy)}. If \code{dummy} is just a list of \eqn{x, y} coordinates then the quadrature region defaults to the observation window of the data pattern, \code{Window(data)}. If \code{dummy} is missing, then a pattern of dummy points will be generated using \code{\link{default.dummy}}, taking account of the optional arguments \code{...}. By default, the dummy points are arranged in a rectangular grid; recognised arguments include \code{nd} (the number of grid points in the horizontal and vertical directions) and \code{eps} (the spacing between dummy points). If \code{random=TRUE}, a systematic random pattern of dummy points is generated instead. See \code{\link{default.dummy}} for details. If \code{method = "grid"} then the optional arguments (for \code{\dots}) are \code{(nd, ntile, eps)}. The quadrature region (defined above) is divided into an \code{ntile[1]} by \code{ntile[2]} grid of rectangular tiles. The weight for each quadrature point is the area of a tile divided by the number of quadrature points in that tile. If \code{method="dirichlet"} then the optional arguments are \code{(exact=TRUE, nd, eps)}. The quadrature points (both data and dummy) are used to construct the Dirichlet tessellation. The quadrature weight of each point is the area of its Dirichlet tile inside the quadrature region. If \code{exact == TRUE} then this area is computed exactly using the package \code{deldir}; otherwise it is computed approximately by discretisation. } \references{ Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Berman, M. and Turner, T.R. Approximating point process likelihoods with GLIM. \emph{Applied Statistics} \bold{41} (1992) 31--38. } \seealso{ \code{\link{ppm}}, \code{\link{as.ppp}}, \code{\link{quad.object}}, \code{\link{gridweights}}, \code{\link{dirichletWeights}}, \code{\link{corners}}, \code{\link{gridcentres}}, \code{\link{stratrand}}, \code{\link{spokes}} } \examples{ data(simdat) # grid weights Q <- quadscheme(simdat) Q <- quadscheme(simdat, method="grid") Q <- quadscheme(simdat, eps=0.5) # dummy point spacing 0.5 units Q <- quadscheme(simdat, nd=50) # 1 dummy point per tile Q <- quadscheme(simdat, ntile=25, nd=50) # 4 dummy points per tile # Dirichlet weights Q <- quadscheme(simdat, method="dirichlet", exact=FALSE) # random dummy pattern \dontrun{ D <- runifpoint(250, Window(simdat)) Q <- quadscheme(simdat, D, method="dirichlet", exact=FALSE) } # polygonal window data(demopat) X <- unmark(demopat) Q <- quadscheme(X) # mask window Window(X) <- as.mask(Window(X)) Q <- quadscheme(X) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/nsegments.Rd0000644000176200001440000000133113333543263015165 0ustar liggesusers\name{nsegments} \alias{nsegments} \alias{nsegments.psp} \title{ Number of Line Segments in a Line Segment Pattern } \description{ Returns the number of line segments in a line segment pattern. } \usage{ nsegments(x) \method{nsegments}{psp}(x) } \arguments{ \item{x}{ A line segment pattern, i.e. an object of class \code{psp}, or an object containing a linear network. } } \details{ This function is generic, with methods for classes \code{psp}, \code{linnet} and \code{lpp}. } \value{ Integer. } \author{ \spatstatAuthors } \seealso{ \code{\link{npoints}()}, \code{\link{psp.object}()} } \examples{ nsegments(copper$Lines) nsegments(copper$SouthLines) } \keyword{spatial} \keyword{manip} spatstat/man/roc.Rd0000644000176200001440000000564013333543264013755 0ustar liggesusers\name{roc} \alias{roc} \alias{roc.ppp} \alias{roc.lpp} \alias{roc.ppm} \alias{roc.kppm} \alias{roc.lppm} \title{ Receiver Operating Characteristic } \description{ Computes the Receiver Operating Characteristic curve for a point pattern or a fitted point process model. } \usage{ roc(X, \dots) \method{roc}{ppp}(X, covariate, \dots, high = TRUE) \method{roc}{ppm}(X, \dots) \method{roc}{kppm}(X, \dots) \method{roc}{lpp}(X, covariate, \dots, high = TRUE) \method{roc}{lppm}(X, \dots) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"} or \code{"lpp"}) or fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"lppm"}). } \item{covariate}{ Spatial covariate. Either a \code{function(x,y)}, a pixel image (object of class \code{"im"}), or one of the strings \code{"x"} or \code{"y"} indicating the Cartesian coordinates. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} controlling the pixel resolution for calculations. } \item{high}{ Logical value indicating whether the threshold operation should favour high or low values of the covariate. } } \details{ This command computes Receiver Operating Characteristic curve. The area under the ROC is computed by \code{\link{auc}}. For a point pattern \code{X} and a covariate \code{Z}, the ROC is a plot showing the ability of the covariate to separate the spatial domain into areas of high and low density of points. For each possible threshold \eqn{z}, the algorithm calculates the fraction \eqn{a(z)} of area in the study region where the covariate takes a value greater than \eqn{z}, and the fraction \eqn{b(z)} of data points for which the covariate value is greater than \eqn{z}. The ROC is a plot of \eqn{b(z)} against \eqn{a(z)} for all thresholds \eqn{z}. For a fitted point process model, the ROC shows the ability of the fitted model intensity to separate the spatial domain into areas of high and low density of points. The ROC is \bold{not} a diagnostic for the goodness-of-fit of the model (Lobo et al, 2007). } \value{ Function value table (object of class \code{"fv"}) which can be plotted to show the ROC curve. } \references{ Lobo, J.M., \ifelse{latex}{\out{Jim{\'e}nez}}{Jimenez}-Valverde, A. and Real, R. (2007) AUC: a misleading measure of the performance of predictive distribution models. \emph{Global Ecology and Biogeography} \bold{17}(2) 145--151. Nam, B.-H. and D'Agostino, R. (2002) Discrimination index, the area under the {ROC} curve. Pages 267--279 in Huber-Carol, C., Balakrishnan, N., Nikulin, M.S. and Mesbah, M., \emph{Goodness-of-fit tests and model validity}, \ifelse{latex}{\out{Birkh{\"a}user}}{Birkhauser}, Basel. } \author{ \spatstatAuthors. } \seealso{ \code{\link{auc}} } \examples{ plot(roc(swedishpines, "x")) fit <- ppm(swedishpines ~ x+y) plot(roc(fit)) } \keyword{spatial} spatstat/man/rescale.psp.Rd0000644000176200001440000000376113333543264015413 0ustar liggesusers\name{rescale.psp} \alias{rescale.psp} \title{Convert Line Segment Pattern to Another Unit of Length} \description{ Converts a line segment pattern dataset to another unit of length. } \usage{ \method{rescale}{psp}(X, s, unitname) } \arguments{ \item{X}{Line segment pattern (object of class \code{"psp"}).} \item{s}{Conversion factor: the new units are \code{s} times the old units.} \item{unitname}{ Optional. New name for the unit of length. See \code{\link{unitname}}. } } \value{ Another line segment pattern (of class \code{"psp"}), representing the same data, but expressed in the new units. } \details{ This is a method for the generic function \code{\link{rescale}}. The spatial coordinates in the line segment pattern \code{X} (and its window) will be re-expressed in terms of a new unit of length that is \code{s} times the current unit of length given in \code{X}. (Thus, the coordinate values are \emph{divided} by \code{s}, while the unit value is multiplied by \code{s}). The result is a line segment pattern representing the \emph{same} data but re-expressed in a different unit. Mark values are unchanged. If \code{s} is missing, then the coordinates will be re-expressed in \sQuote{native} units; for example if the current unit is equal to 0.1 metres, then the coordinates will be re-expressed in metres. } \section{Note}{ The result of this operation is equivalent to the original segment pattern. If you want to actually change the coordinates by a linear transformation, producing a segment pattern that is not equivalent to the original one, use \code{\link{affine}}. } \seealso{ \code{\link{units}}, \code{\link{affine}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ data(copper) X <- copper$Lines X # data are in km # convert to metres rescale(X, 1/1000) # convert data and rename unit rescale(X, 1/1000, c("metre", "metres")) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/as.box3.Rd0000644000176200001440000000223413333543262014441 0ustar liggesusers\name{as.box3} \Rdversion{1.1} \alias{as.box3} \title{ Convert Data to Three-Dimensional Box } \description{ Interprets data as the dimensions of a three-dimensional box. } \usage{ as.box3(...) } \arguments{ \item{\dots}{ Data that can be interpreted as giving the dimensions of a three-dimensional box. See Details. } } \details{ This function converts data in various formats to an object of class \code{"box3"} representing a three-dimensional box (see \code{\link{box3}}). The arguments \code{\dots} may be \itemize{ \item an object of class \code{"box3"} \item arguments acceptable to \code{box3} \item a numeric vector of length 6, interpreted as \code{c(xrange[1],xrange[2],yrange[1],yrange[2],zrange[1],zrange[2])} \item an object of class \code{"pp3"} representing a three-dimensional point pattern contained in a box. } } \value{ Object of class \code{"box3"}. } \author{\adrian and \rolf } \seealso{ \code{\link{box3}}, \code{\link{pp3}} } \examples{ X <- c(0,10,0,10,0,5) as.box3(X) X <- pp3(runif(42),runif(42),runif(42), box3(c(0,1))) as.box3(X) } \keyword{spatial} \keyword{manip} spatstat/man/is.empty.Rd0000644000176200001440000000224213333543263014734 0ustar liggesusers\name{is.empty} \alias{is.empty} \alias{is.empty.owin} \alias{is.empty.ppp} \alias{is.empty.psp} \alias{is.empty.default} \title{Test Whether An Object Is Empty} \description{ Checks whether the argument is an empty window, an empty point pattern, etc. } \usage{ is.empty(x) \method{is.empty}{owin}(x) \method{is.empty}{ppp}(x) \method{is.empty}{psp}(x) \method{is.empty}{default}(x) } \arguments{ \item{x}{ A window (object of class \code{"owin"}), a point pattern (object of class \code{"ppp"}), or a line segment pattern (object of class \code{"psp"}). } } \details{ This function tests whether the object \code{x} represents an empty spatial object, such as an empty window, a point pattern with zero points, or a line segment pattern with zero line segments. An empty window can be obtained as the output of \code{\link{intersect.owin}}, \code{\link{erosion}}, \code{\link{opening}}, \code{\link{complement.owin}} and some other operations. An empty point pattern or line segment pattern can be obtained as the result of simulation. } \value{ Logical value. } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/split.msr.Rd0000644000176200001440000000476513333543264015134 0ustar liggesusers\name{split.msr} \alias{split.msr} \title{ Divide a Measure into Parts } \description{ Decomposes a measure into components, each component being a measure. } \usage{ \method{split}{msr}(x, f, drop = FALSE, \dots) } \arguments{ \item{x}{ Measure (object of class \code{"msr"}) to be decomposed. } \item{f}{ Factor or tessellation determining the decomposition. Argument passed to \code{\link{split.ppp}}. See Details. } \item{drop}{ Logical value indicating whether empty components should be retained in the list (\code{drop=FALSE}, the default) or deleted (\code{drop=TRUE}). } \item{\dots}{ Ignored. } } \details{ An object of class \code{"msr"} represents a signed (i.e. real-valued) or vector-valued measure in the \pkg{spatstat} package. See \code{\link{msr}} for explanation. This function is a method for the generic \code{\link[base]{split}}. It divides the measure \code{x} into components, each of which is a measure. A measure \code{x} is represented in \pkg{spatstat} by a finite set of sample points with values attached to them. The function \code{split.msr} divides this pattern of sample points into several sub-patterns of points using \code{\link{split.ppp}}. For each sub-pattern, the values attached to these points are extracted from \code{x}, and these values and sample points determine a measure, which is a component or piece of the original \code{x}. The argument \code{f} can be missing, if the sample points of \code{x} are multitype points. In this case, \code{x} represents a measure associated with marked spatial locations, and the command \code{split(x)} separates \code{x} into a list of component measures, one for each possible mark. Otherwise the argument \code{f} is passed to \code{\link{split.ppp}}. It should be either a factor (of length equal to the number of sample points of \code{x}) or a tessellation (object of class \code{"tess"} representing a division of space into tiles) as documented under \code{\link{split.ppp}}. } \value{ A list, each of whose entries is a measure (object of class \code{"msr"}). } \author{ \spatstatAuthors } \seealso{ \code{\link{msr}}, \code{\link{[.msr}}, \code{\link{with.msr}} } \examples{ ## split by tessellation a <- residuals(ppm(cells ~ x)) aa <- split(a, dirichlet(runifpoint(4))) aa sapply(aa, integral) ## split by type of point b <- residuals(ppm(amacrine ~ marks + x)) bb <- split(b) bb } \keyword{spatial} \keyword{manip} spatstat/man/union.quad.Rd0000644000176200001440000000200713333543264015245 0ustar liggesusers\name{union.quad} \alias{union.quad} \title{Union of Data and Dummy Points} \description{ Combines the data and dummy points of a quadrature scheme into a single point pattern. } \usage{ union.quad(Q) } \arguments{ \item{Q}{A quadrature scheme (an object of class \code{"quad"}).} } \value{ A point pattern (of class \code{"ppp"}). } \details{ The argument \code{Q} should be a quadrature scheme (an object of class \code{"quad"}, see \code{\link{quad.object}} for details). This function combines the data and dummy points of \code{Q} into a single point pattern. If either the data or the dummy points are marked, the result is a marked point pattern. The function \code{\link{as.ppp}} will perform the same task. } \seealso{ \code{\link{quad.object}}, \code{\link{as.ppp}} } \examples{ data(simdat) Q <- quadscheme(simdat, default.dummy(simdat)) U <- union.quad(Q) \dontrun{plot(U)} # equivalent: U <- as.ppp(Q) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/centroid.owin.Rd0000644000176200001440000000367413333543263015760 0ustar liggesusers\name{centroid.owin} \alias{centroid.owin} \title{Centroid of a window} \description{ Computes the centroid (centre of mass) of a window } \usage{ centroid.owin(w, as.ppp = FALSE) } \arguments{ \item{w}{A window} \item{as.ppp}{Logical flag indicating whether to return the centroid as a point pattern (\code{ppp} object)} } \value{ Either a list with components \code{x, y}, or a point pattern (of class \code{ppp}) consisting of a single point, giving the coordinates of the centroid of the window \code{w}. } \details{ The centroid of the window \code{w} is computed. The centroid (``centre of mass'') is the point whose \eqn{x} and \eqn{y} coordinates are the mean values of the \eqn{x} and \eqn{y} coordinates of all points in the window. The argument \code{w} should be a window (an object of class \code{"owin"}, see \code{\link{owin.object}} for details) or can be given in any format acceptable to \code{\link{as.owin}()}. The calculation uses an exact analytic formula for the case of polygonal windows. Note that the centroid of a window is not necessarily inside the window, unless the window is convex. If \code{as.ppp=TRUE} and the centroid of \code{w} lies outside \code{w}, then the window of the returned point pattern will be a rectangle containing the original window (using \code{\link{as.rectangle}}. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}} } \examples{ w <- owin(c(0,1),c(0,1)) centroid.owin(w) # returns 0.5, 0.5 data(demopat) w <- Window(demopat) # an irregular window cent <- centroid.owin(w, as.ppp = TRUE) \dontrun{ plot(cent) # plot the window and its centroid } wapprox <- as.mask(w) # pixel approximation of window \dontrun{ points(centroid.owin(wapprox)) # should be indistinguishable } \testonly{ centroid.owin(w) centroid.owin(wapprox) } } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{math} spatstat/man/clickpoly.Rd0000644000176200001440000000410513333543263015155 0ustar liggesusers\name{clickpoly} \alias{clickpoly} \title{Interactively Define a Polygon} \description{ Allows the user to create a polygon by point-and-click in the display. } \usage{ clickpoly(add=FALSE, nv=NULL, np=1, \dots) } \arguments{ \item{add}{ Logical value indicating whether to create a new plot (\code{add=FALSE}) or draw over the existing plot (\code{add=TRUE}). } \item{nv}{ Number of vertices of the polygon (if this is predetermined). } \item{np}{ Number of polygons to create. } \item{\dots}{ Arguments passed to \code{\link[graphics]{locator}} to control the interactive plot, and to \code{\link[graphics]{polygon}} to plot the polygons. } } \value{ A window (object of class \code{"owin"}) representing the polygon. } \details{ This function allows the user to create a polygonal window by interactively clicking on the screen display. The user is prompted to point the mouse at any desired locations for the polygon vertices, and click the left mouse button to add each point. Interactive input stops after \code{nv} clicks (if \code{nv} was given) or when the middle mouse button is pressed. The return value is a window (object of class \code{"owin"}) representing the polygon. This function uses the \R command \code{\link[graphics]{locator}} to input the mouse clicks. It only works on screen devices such as \sQuote{X11}, \sQuote{windows} and \sQuote{quartz}. Arguments that can be passed to \code{\link[graphics]{locator}} through \code{\dots} include \code{pch} (plotting character), \code{cex} (character expansion factor) and \code{col} (colour). See \code{\link[graphics]{locator}} and \code{\link[graphics]{par}}. Multiple polygons can also be drawn, by specifying \code{np > 1}. The polygons must be disjoint. The result is a single window object consisting of all the polygons. } \seealso{ \code{\link{identify.ppp}}, \code{\link{clickbox}}, \code{\link{clickppp}}, \code{\link{clickdist}}, \code{\link[graphics]{locator}} } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{iplot} spatstat/man/connected.tess.Rd0000644000176200001440000000264413333543263016111 0ustar liggesusers\name{connected.tess} \Rdversion{1.1} \alias{connected.tess} \title{ Connected Components of Tiles of a Tessellation } \description{ Given a tessellation, find the topologically-connected pieces of each tile, and make a new tessellation using these pieces. } \usage{ \method{connected}{tess}(X, \dots) } \arguments{ \item{X}{ A tessellation (object of class \code{"tess"}). } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution. } } \details{ The function \code{connected} is generic. This function \code{connected.tess} is the method for tessellations. Given the tessellation \code{X}, the algorithm considers each tile of the tessellation, and identifies its connected components (topologically-connected pieces) using \code{\link{connected.owin}}. Each of these pieces is treated as a distinct tile and a new tessellation is made from these pieces. The result is another tessellation obtained by subdividing each tile of \code{X} into one or more new tiles. } \value{ Another tessellation (object of class \code{"tess"}). } \seealso{ \code{\link{connected.owin}} } \examples{ BB <- grow.rectangle(Frame(letterR), 0.2) H <- tess(tiles=list(IN=letterR, OUT=complement.owin(letterR, BB))) opa <- par(mfrow=c(1,2)) plot(H, do.col=TRUE) plot(connected(H), do.col=TRUE, col=2:4) par(opa) } \author{ \adrian. } \keyword{spatial} \keyword{math} spatstat/man/fitted.slrm.Rd0000644000176200001440000000222713333543263015422 0ustar liggesusers\name{fitted.slrm} \Rdversion{1.1} \alias{fitted.slrm} \title{ Fitted Probabilities for Spatial Logistic Regression } \description{ Given a fitted Spatial Logistic Regression model, this function computes the fitted probabilities for each pixel. } \usage{ \method{fitted}{slrm}(object, ...) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link[stats:fitted.values]{fitted}} for spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). The algorithm computes the fitted probabilities of the presence of a random point in each pixel. } \value{ A pixel image (object of class \code{"im"}) containing the fitted probability for each pixel. } \seealso{ \code{\link{slrm}}, \code{\link[stats:fitted.values]{fitted}} } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x+y) plot(fitted(fit)) } \author{\adrian \email{adrian@maths.uwa.edu.au} and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/predict.mppm.Rd0000644000176200001440000001140613333543265015572 0ustar liggesusers\name{predict.mppm} \alias{predict.mppm} \title{Prediction for Fitted Multiple Point Process Model} \description{ Given a fitted multiple point process model obtained by \code{\link{mppm}}, evaluate the spatial trend and/or the conditional intensity of the model. By default, predictions are evaluated over a grid of locations, yielding pixel images of the trend and conditional intensity. Alternatively predictions may be evaluated at specified locations with specified values of the covariates. } \usage{ \method{predict}{mppm}(object, ..., newdata = NULL, type = c("trend", "cif"), ngrid = 40, locations=NULL, verbose=FALSE) } \arguments{ \item{object}{The fitted model. An object of class \code{"mppm"} obtained from \code{\link{mppm}}. } \item{\dots}{Ignored.} \item{newdata}{ New values of the covariates, for which the predictions should be computed. If \code{newdata=NULL}, predictions are computed for the original values of the covariates, to which the model was fitted. Otherwise \code{newdata} should be a hyperframe (see \code{\link{hyperframe}}) containing columns of covariates as required by the model. If \code{type} includes \code{"cif"}, then \code{newdata} must also include a column of spatial point pattern responses, in order to compute the conditional intensity. } \item{type}{ Type of predicted values required. A character string or vector of character strings. Options are \code{"trend"} for the spatial trend (first-order term) and \code{"cif"} or \code{"lambda"} for the conditional intensity. Alternatively \code{type="all"} selects all options. } \item{ngrid}{ Dimensions of the grid of spatial locations at which prediction will be performed (if \code{locations=NULL}). An integer or a pair of integers. } \item{locations}{ Optional. The locations at which predictions should be performed. A list of point patterns, with one entry for each row of \code{newdata}. } \item{verbose}{ Logical flag indicating whether to print progress reports. } } \details{ This function computes the spatial trend and the conditional intensity of a fitted multiple spatial point process model. See Baddeley and Turner (2000) and Baddeley et al (2007) for explanation and examples. Note that by ``spatial trend'' we mean the (exponentiated) first order potential and not the intensity of the process. [For example if we fit the stationary Strauss process with parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, then the spatial trend is constant and equal to \eqn{\beta}{beta}.] The conditional intensity \eqn{\lambda(u,X)}{lambda(u,X)} of the fitted model is evaluated at each required spatial location u, with respect to the response point pattern X. If \code{locations=NULL}, then predictions are performed at an \code{ngrid} by \code{ngrid} grid of locations in the window for each response point pattern. The result will be a hyperframe containing a column of images of the trend (if selected) and a column of images of the conditional intensity (if selected). The result can be plotted. If \code{locations} is given, then it should be a list of point patterns (objects of class \code{"ppp"}). Predictions are performed at these points. The result is a hyperframe containing a column of marked point patterns where the locations each point. } \value{ A hyperframe with columns named \code{trend} and \code{cif}. If \code{locations=NULL}, the entries of the hyperframe are pixel images. If \code{locations} is not null, the entries are marked point patterns constructed by attaching the predicted values to the \code{locations} point patterns. } \references{ Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Baddeley, A., Bischof, L., Sintorn, I.-M., Haggarty, S., Bell, M. and Turner, R. Analysis of a designed experiment where the response is a spatial point pattern. In preparation. Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \adrian \rolf and \ege } \seealso{ \code{\link{mppm}}, \code{\link{fitted.mppm}}, \code{\link{hyperframe}} } \examples{ h <- hyperframe(Bugs=waterstriders) fit <- mppm(Bugs ~ x, data=h, interaction=Strauss(7)) # prediction on a grid p <- predict(fit) plot(p$trend) # prediction at specified locations loc <- with(h, runifpoint(20, Window(Bugs))) p2 <- predict(fit, locations=loc) plot(p2$trend) } \keyword{spatial} \keyword{models} spatstat/man/rotate.infline.Rd0000644000176200001440000000361613333543264016114 0ustar liggesusers\name{rotate.infline} \alias{rotate.infline} \alias{shift.infline} \alias{reflect.infline} \alias{flipxy.infline} \title{ Rotate or Shift Infinite Lines } \description{ Given the coordinates of one or more infinite straight lines in the plane, apply a rotation or shift. } \usage{ \method{rotate}{infline}(X, angle = pi/2, \dots) \method{shift}{infline}(X, vec = c(0,0), \dots) \method{reflect}{infline}(X) \method{flipxy}{infline}(X) } \arguments{ \item{X}{ Object of class \code{"infline"} representing one or more infinite straight lines in the plane. } \item{angle}{ Angle of rotation, in radians. } \item{vec}{ Translation (shift) vector: a numeric vector of length 2, or a \code{list(x,y)}, or a point pattern containing one point. } \item{\dots}{ Ignored. } } \details{ These functions are methods for the generic \code{\link{shift}}, \code{\link{rotate}}, \code{\link{reflect}} and \code{\link{flipxy}} for the class \code{"infline"}. An object of class \code{"infline"} represents one or more infinite lines in the plane. } \value{ Another \code{"infline"} object representing the result of the transformation. } \author{ \adrian. } \seealso{ \code{\link{infline}} } \examples{ L <- infline(v=0.5) plot(square(c(-1,1)), main="rotate lines", type="n") points(0, 0, pch=3) plot(L, col="green") plot(rotate(L, pi/12), col="red") plot(rotate(L, pi/6), col="red") plot(rotate(L, pi/4), col="red") L <- infline(p=c(0.4, 0.9), theta=pi* c(0.2, 0.6)) plot(square(c(-1,1)), main="shift lines", type="n") L <- infline(p=c(0.7, 0.8), theta=pi* c(0.2, 0.6)) plot(L, col="green") plot(shift(L, c(-0.5, -0.4)), col="red") plot(square(c(-1,1)), main="reflect lines", type="n") points(0, 0, pch=3) L <- infline(p=c(0.7, 0.8), theta=pi* c(0.2, 0.6)) plot(L, col="green") plot(reflect(L), col="red") } \keyword{spatial} \keyword{manip} spatstat/man/methods.objsurf.Rd0000644000176200001440000000256013333543263016303 0ustar liggesusers\name{methods.objsurf} \Rdversion{1.1} \alias{methods.objsurf} %DoNotExport \alias{print.objsurf} \alias{plot.objsurf} \alias{persp.objsurf} \alias{image.objsurf} \alias{contour.objsurf} \title{ Methods for Objective Function Surfaces } \description{ Methods for printing and plotting an objective function surface. } \usage{ \method{print}{objsurf}(x, ...) \method{plot}{objsurf}(x, ...) \method{image}{objsurf}(x, ...) \method{contour}{objsurf}(x, ...) \method{persp}{objsurf}(x, ...) } \arguments{ \item{x}{ Object of class \code{"objsurf"} representing an objective function surface. } \item{\dots}{ Additional arguments passed to plot methods. } } \details{ These are methods for the generic functions \code{\link{print}}, \code{\link{plot}}, \code{\link{image}}, \code{\link{contour}} and \code{\link{persp}} for the class \code{"objsurf"}. } \value{ For \code{print.objsurf}, \code{plot.objsurf} and \code{image.objsurf} the value is \code{NULL}. For \code{contour.objsurf} and \code{persp.objsurf} the value is described in the help for \code{\link{contour.default}} and \code{\link{persp.default}} respectively. } \author{\adrian and \ege. } \seealso{ \code{\link{objsurf}} } \examples{ fit <- kppm(redwood ~ 1, "Thomas") os <- objsurf(fit) os plot(os) contour(os, add=TRUE) persp(os) } \keyword{spatial} \keyword{hplot} spatstat/man/rcellnumber.Rd0000644000176200001440000000312713333543264015502 0ustar liggesusers\name{rcellnumber} \alias{rcellnumber} \title{ Generate Random Numbers of Points for Cell Process } \description{ Generates random integers for the Baddeley-Silverman counterexample. } \usage{ rcellnumber(n, N = 10, mu=1) } \arguments{ \item{n}{ Number of random integers to be generated. } \item{N}{ Distributional parameter: the largest possible value (when \code{mu <= 1}). An integer greater than 1. } \item{mu}{ Mean of the distribution (equals the variance). Any positive real number. } } \details{ If \code{mu = 1} (the default), this function generates random integers which have mean and variance equal to 1, but which do not have a Poisson distribution. The random integers take the values \eqn{0}, \eqn{1} and \eqn{N} with probabilities \eqn{1/N}, \eqn{(N-2)/(N-1)} and \eqn{1/(N(N-1))} respectively. See Baddeley and Silverman (1984). If \code{mu} is another positive number, the random integers will have mean and variance equal to \code{mu}. They are obtained by generating the one-dimensional counterpart of the cell process and counting the number of points in the interval from \code{0} to \code{mu}. The maximum possible value of each random integer is \code{N * ceiling(mu)}. } \value{ An integer vector of length \code{n}. } \references{ Baddeley, A.J. and Silverman, B.W. (1984) A cautionary example on the use of second-order methods for analyzing point patterns. \emph{Biometrics} \bold{40}, 1089-1094. } \author{ \spatstatAuthors. } \seealso{ \code{\link{rcell}} } \examples{ rcellnumber(30, 3) } \keyword{datagen} spatstat/man/bw.CvL.Rd0000644000176200001440000000621313544333571014264 0ustar liggesusers\name{bw.CvL} \alias{bw.CvL} \title{ Cronie and van Lieshout's Criterion for Bandwidth Selection for Kernel Density } \description{ Uses Cronie and van Lieshout's criterion based on Cambell's formula to select a smoothing bandwidth for the kernel estimation of point process intensity. } \usage{ bw.CvL(X, \dots, srange = NULL, ns = 16, sigma = NULL, warn=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{\dots}{Ignored.} \item{srange}{ Optional numeric vector of length 2 giving the range of values of bandwidth to be searched. } \item{ns}{ Optional integer giving the number of values of bandwidth to search. } \item{sigma}{ Optional. Vector of values of the bandwidth to be searched. Overrides the values of \code{ns} and \code{srange}. } \item{warn}{ Logical. If \code{TRUE}, a warning is issued if the optimal value of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function selects an appropriate bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}}. The bandwidth \eqn{\sigma}{\sigma} is chosen to minimise the discrepancy between the area of the observation window and the sum of reciprocal estimated intensity values at the points of the point process \deqn{ \mbox{CvL}(\sigma) = (|W| - \sum_i 1/\hat\lambda(x_i))^2 }{ CvL(\sigma) = (|W| - sum[i] 1/\lambda(x[i]))^2 } where the sum is taken over all the data points \eqn{x_i}{x[i]}, and where \eqn{\hat\lambda(x_i)}{\lambda(x[i])} is the kernel-smoothing estimate of the intensity at \eqn{x_i}{x[i]} with smoothing bandwidth \eqn{\sigma}{\sigma}. The value of \eqn{\mbox{CvL}(\sigma)}{CvL(\sigma)} is computed directly, using \code{\link{density.ppp}}, for \code{ns} different values of \eqn{\sigma}{\sigma} between \code{srange[1]} and \code{srange[2]}. The result is a numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted to show the bandwidth selection criterion as a function of \code{sigma}. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{density.ppp}}, \code{\link{bw.diggle}}, \code{\link{bw.scott}}, \code{\link{bw.ppl}}, \code{\link{bw.frac}}. } \examples{ if(interactive()) { b <- bw.CvL(redwood) b plot(b, main="Cronie and van Lieshout bandwidth criterion for redwoods") plot(density(redwood, b)) plot(density(redwood, bw.CvL)) } \testonly{ b <- bw.CvL(redwood, srange=c(0.03, 0.07), ns=2) } } \references{ Cronie, O and Van Lieshout, M N M (2018) A non-model-based approach to bandwidth selection for kernel estimators of spatial intensity functions, \emph{Biometrika}, \bold{105}, 455-462. } \author{Ottmar Cronie \email{ottmar.cronie@umu.se} and Marie-Colette van Lieshout \email{Marie-Colette.van.Lieshout@cwi.nl} adapted for \pkg{spatstat} by \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/rmpoint.Rd0000644000176200001440000002547113333543264014666 0ustar liggesusers\name{rmpoint} \alias{rmpoint} \title{Generate N Random Multitype Points} \description{ Generate a random multitype point pattern with a fixed number of points, or a fixed number of points of each type. } \usage{ rmpoint(n, f=1, fmax=NULL, win=unit.square(), types, ptypes, \dots, giveup=1000, verbose=FALSE, nsim=1, drop=TRUE) } \arguments{ \item{n}{ Number of marked points to generate. Either a single number specifying the total number of points, or a vector specifying the number of points of each type. } \item{f}{ The probability density of the multitype points, usually un-normalised. Either a constant, a vector, a function \code{f(x,y,m, ...)}, a pixel image, a list of functions \code{f(x,y,...)} or a list of pixel images. } \item{fmax}{ An upper bound on the values of \code{f}. If missing, this number will be estimated. } \item{win}{ Window in which to simulate the pattern. Ignored if \code{f} is a pixel image or list of pixel images. } \item{types}{ All the possible types for the multitype pattern. } \item{ptypes}{ Optional vector of probabilities for each type. } \item{\dots}{ Arguments passed to \code{f} if it is a function. } \item{giveup}{ Number of attempts in the rejection method after which the algorithm should stop trying to generate new points. } \item{verbose}{ Flag indicating whether to report details of performance of the simulation algorithm. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This function generates random multitype point patterns consisting of a fixed number of points. Three different models are available: \describe{ \item{I. Random location and type:}{ If \code{n} is a single number and the argument \code{ptypes} is missing, then \code{n} independent, identically distributed random multitype points are generated. Their locations \code{(x[i],y[i])} and types \code{m[i]} have joint probability density proportional to \eqn{f(x,y,m)}. } \item{II. Random type, and random location given type:}{ If \code{n} is a single number and \code{ptypes} is given, then \code{n} independent, identically distributed random multitype points are generated. Their types \code{m[i]} have probability distribution \code{ptypes}. Given the types, the locations \code{(x[i],y[i])} have conditional probability density proportional to \eqn{f(x,y,m)}. } \item{III. Fixed types, and random location given type:}{ If \code{n} is a vector, then we generate \code{n[i]} independent, identically distributed random points of type \code{types[i]}. For points of type \eqn{m} the conditional probability density of location \eqn{(x,y)} is proportional to \eqn{f(x,y,m)}. } } Note that the density \code{f} is normalised in different ways in Model I and Models II and III. In Model I the normalised joint density is \eqn{g(x,y,m)=f(x,y,m)/Z} where \deqn{ Z = \sum_m \int\int \lambda(x,y,m) {\rm d}x \, {\rm d}y }{ Z = sum_[m] integral lambda(x,y,m) dx dy } while in Models II and III the normalised conditional density is \eqn{g(x,y\mid m) = f(x,y,m)/Z_m}{g(x,y|m) = f(x,y,m)/Z[m]} where \deqn{ Z_m = \int\int \lambda(x,y,m) {\rm d}x \, {\rm d}y. }{ Z[m] = integral lambda(x,y,m) dx dy. } In Model I, the marginal distribution of types is \eqn{p_m = Z_m/Z}{p[m] = Z[m]/Z}. The unnormalised density \code{f} may be specified in any of the following ways. \describe{ \item{single number:}{ If \code{f} is a single number, the conditional density of location given type is uniform. That is, the points of each type are uniformly distributed. In Model I, the marginal distribution of types is also uniform (all possible types have equal probability). } \item{vector:}{ If \code{f} is a numeric vector, the conditional density of location given type is uniform. That is, the points of each type are uniformly distributed. In Model I, the marginal distribution of types is proportional to the vector \code{f}. In Model II, the marginal distribution of types is \code{ptypes}, that is, the values in \code{f} are ignored. The argument \code{types} defaults to \code{names(f)}, or if that is null, \code{1:length(f)}. } \item{function:}{ If \code{f} is a function, it will be called in the form \code{f(x,y,m,\dots)} at spatial location \code{(x,y)} for points of type \code{m}. In Model I, the joint probability density of location and type is proportional to \code{f(x,y,m,\dots)}. In Models II and III, the conditional probability density of location \code{(x,y)} given type \code{m} is proportional to \code{f(x,y,m,\dots)}. The function \code{f} must work correctly with vectors \code{x}, \code{y} and \code{m}, returning a vector of function values. (Note that \code{m} will be a factor with levels \code{types}.) The value \code{fmax} must be given and must be an upper bound on the values of \code{f(x,y,m,\dots)} for all locations \code{(x, y)} inside the window \code{win} and all types \code{m}. The argument \code{types} must be given. } \item{list of functions:}{ If \code{f} is a list of functions, then the functions will be called in the form \code{f[[i]](x,y,\dots)} at spatial location \code{(x,y)} for points of type \code{types[i]}. In Model I, the joint probability density of location and type is proportional to \code{f[[m]](x,y,\dots)}. In Models II and III, the conditional probability density of location \code{(x,y)} given type \code{m} is proportional to \code{f[[m]](x,y,\dots)}. The function \code{f[[i]]} must work correctly with vectors \code{x} and \code{y}, returning a vector of function values. The value \code{fmax} must be given and must be an upper bound on the values of \code{f[[i]](x,y,\dots)} for all locations \code{(x, y)} inside the window \code{win}. The argument \code{types} defaults to \code{names(f)}, or if that is null, \code{1:length(f)}. } \item{pixel image:}{ If \code{f} is a pixel image object of class \code{"im"} (see \code{\link{im.object}}), the unnormalised density at a location \code{(x,y)} for points of any type is equal to the pixel value of \code{f} for the pixel nearest to \code{(x,y)}. In Model I, the marginal distribution of types is uniform. The argument \code{win} is ignored; the window of the pixel image is used instead. The argument \code{types} must be given. } \item{list of pixel images:}{ If \code{f} is a list of pixel images, then the image \code{f[[i]]} determines the density values of points of type \code{types[i]}. The argument \code{win} is ignored; the window of the pixel image is used instead. The argument \code{types} defaults to \code{names(f)}, or if that is null, \code{1:length(f)}. } } The implementation uses the rejection method. For Model I, \code{\link{rmpoispp}} is called repeatedly until \code{n} points have been generated. It gives up after \code{giveup} calls if there are still fewer than \code{n} points. For Model II, the types are first generated according to \code{ptypes}, then the locations of the points of each type are generated using \code{\link{rpoint}}. For Model III, the locations of the points of each type are generated using \code{\link{rpoint}}. } \seealso{ \code{\link{ppp.object}}, \code{\link{owin.object}} } \examples{ abc <- c("a","b","c") ##### Model I rmpoint(25, types=abc) rmpoint(25, 1, types=abc) # 25 points, equal probability for each type, uniformly distributed locations rmpoint(25, function(x,y,m) {rep(1, length(x))}, types=abc) # same as above rmpoint(25, list(function(x,y){rep(1, length(x))}, function(x,y){rep(1, length(x))}, function(x,y){rep(1, length(x))}), types=abc) # same as above rmpoint(25, function(x,y,m) { x }, types=abc) # 25 points, equal probability for each type, # locations nonuniform with density proportional to x rmpoint(25, function(x,y,m) { ifelse(m == "a", 1, x) }, types=abc) rmpoint(25, list(function(x,y) { rep(1, length(x)) }, function(x,y) { x }, function(x,y) { x }), types=abc) # 25 points, UNEQUAL probabilities for each type, # type "a" points uniformly distributed, # type "b" and "c" points nonuniformly distributed. ##### Model II rmpoint(25, 1, types=abc, ptypes=rep(1,3)/3) rmpoint(25, 1, types=abc, ptypes=rep(1,3)) # 25 points, equal probability for each type, # uniformly distributed locations rmpoint(25, function(x,y,m) {rep(1, length(x))}, types=abc, ptypes=rep(1,3)) # same as above rmpoint(25, list(function(x,y){rep(1, length(x))}, function(x,y){rep(1, length(x))}, function(x,y){rep(1, length(x))}), types=abc, ptypes=rep(1,3)) # same as above rmpoint(25, function(x,y,m) { x }, types=abc, ptypes=rep(1,3)) # 25 points, equal probability for each type, # locations nonuniform with density proportional to x rmpoint(25, function(x,y,m) { ifelse(m == "a", 1, x) }, types=abc, ptypes=rep(1,3)) # 25 points, EQUAL probabilities for each type, # type "a" points uniformly distributed, # type "b" and "c" points nonuniformly distributed. ###### Model III rmpoint(c(12, 8, 4), 1, types=abc) # 12 points of type "a", # 8 points of type "b", # 4 points of type "c", # each uniformly distributed rmpoint(c(12, 8, 4), function(x,y,m) { ifelse(m=="a", 1, x)}, types=abc) rmpoint(c(12, 8, 4), list(function(x,y) { rep(1, length(x)) }, function(x,y) { x }, function(x,y) { x }), types=abc) # 12 points of type "a", uniformly distributed # 8 points of type "b", nonuniform # 4 points of type "c", nonuniform ######### ## Randomising an existing point pattern: # same numbers of points of each type, uniform random locations (Model III) rmpoint(table(marks(demopat)), 1, win=Window(demopat)) # same total number of points, distribution of types estimated from X, # uniform random locations (Model II) rmpoint(npoints(demopat), 1, types=levels(marks(demopat)), win=Window(demopat), ptypes=table(marks(demopat))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/rescue.rectangle.Rd0000644000176200001440000000252613333543264016423 0ustar liggesusers\name{rescue.rectangle} \alias{rescue.rectangle} \title{Convert Window Back To Rectangle} \description{ Determines whether the given window is really a rectangle aligned with the coordinate axes, and if so, converts it to a rectangle object. } \usage{ rescue.rectangle(W) } \arguments{ \item{W}{A window (object of class \code{"owin"}).} } \value{ Another object of class \code{"owin"} representing the same window. } \details{ This function decides whether the window \code{W} is actually a rectangle aligned with the coordinate axes. This will be true if \code{W} is \itemize{ \item a rectangle (window object of type \code{"rectangle"}); \item a polygon (window object of type \code{"polygonal"} with a single polygonal boundary) that is a rectangle aligned with the coordinate axes; \item a binary mask (window object of type \code{"mask"}) in which all the pixel entries are \code{TRUE}. } If so, the function returns this rectangle, a window object of type \code{"rectangle"}. If not, the function returns \code{W}. } \seealso{ \code{\link{as.owin}}, \code{\link{owin.object}} } \examples{ w <- owin(poly=list(x=c(0,1,1,0),y=c(0,0,1,1))) rw <- rescue.rectangle(w) w <- as.mask(unit.square()) rw <- rescue.rectangle(w) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/linearpcf.Rd0000644000176200001440000000625513333543263015137 0ustar liggesusers\name{linearpcf} \alias{linearpcf} \title{ Linear Pair Correlation Function } \description{ Computes an estimate of the linear pair correlation function for a point pattern on a linear network. } \usage{ linearpcf(X, r=NULL, ..., correction="Ang", ratio=FALSE) } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{r}{ Optional. Numeric vector of values of the function argument \eqn{r}. There is a sensible default. } \item{\dots}{ Arguments passed to \code{\link{density.default}} to control the smoothing. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each estimate will also be saved, for use in analysing replicated point patterns. } } \details{ This command computes the linear pair correlation function from point pattern data on a linear network. The pair correlation function is estimated from the shortest-path distances between each pair of data points, using the fixed-bandwidth kernel smoother \code{\link{density.default}}, with a bias correction at each end of the interval of \eqn{r} values. To switch off the bias correction, set \code{endcorrect=FALSE}. The bandwidth for smoothing the pairwise distances is determined by arguments \code{\dots} passed to \code{\link{density.default}}, mainly the arguments \code{bw} and \code{adjust}. The default is to choose the bandwidth by Silverman's rule of thumb \code{bw="nrd0"} explained in \code{\link{density.default}}. If \code{correction="none"}, the calculations do not include any correction for the geometry of the linear network. The result is an estimate of the first derivative of the network \eqn{K} function defined by Okabe and Yamada (2001). If \code{correction="Ang"}, the pair counts are weighted using Ang's correction (Ang, 2010). The result is an estimate of the pair correlation function in the linear network. } \value{ Function value table (object of class \code{"fv"}). If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{g(r)}. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian. } \references{ Ang, Q.W. (2010) Statistical methodology for spatial point patterns on a linear network. MSc thesis, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. Okabe, A. and Yamada, I. (2001) The K-function method on a network and its computational implementation. \emph{Geographical Analysis} \bold{33}, 271-290. } \seealso{ \code{\link{linearK}}, \code{\link{linearpcfinhom}}, \code{\link{lpp}} } \examples{ data(simplenet) X <- rpoislpp(5, simplenet) linearpcf(X) linearpcf(X, correction="none") } \keyword{spatial} \keyword{nonparametric} spatstat/man/runifpoint.Rd0000644000176200001440000000636213333543264015371 0ustar liggesusers\name{runifpoint} \alias{runifpoint} \title{Generate N Uniform Random Points} \description{ Generate a random point pattern containing \eqn{n} independent uniform random points. } \usage{ runifpoint(n, win=owin(c(0,1),c(0,1)), giveup=1000, warn=TRUE, \dots, nsim=1, drop=TRUE, ex=NULL) } \arguments{ \item{n}{ Number of points. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{giveup}{ Number of attempts in the rejection method after which the algorithm should stop trying to generate new points. } \item{warn}{ Logical. Whether to issue a warning if \code{n} is very large. See Details. } \item{\dots}{Ignored.} \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{ex}{ Optional. A point pattern to use as the example. If \code{ex} is given and \code{n} and \code{win} are missing, then \code{n} and \code{win} will be calculated from the point pattern \code{ex}. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This function generates \code{n} independent random points, uniformly distributed in the window \code{win}. (For nonuniform distributions, see \code{\link{rpoint}}.) The algorithm depends on the type of window, as follows: \itemize{ \item If \code{win} is a rectangle then \eqn{n} independent random points, uniformly distributed in the rectangle, are generated by assigning uniform random values to their cartesian coordinates. \item If \code{win} is a binary image mask, then a random sequence of pixels is selected (using \code{\link{sample}}) with equal probabilities. Then for each pixel in the sequence we generate a uniformly distributed random point in that pixel. \item If \code{win} is a polygonal window, the algorithm uses the rejection method. It finds a rectangle enclosing the window, generates points in this rectangle, and tests whether they fall in the desired window. It gives up when \code{giveup * n} tests have been performed without yielding \code{n} successes. } The algorithm for binary image masks is faster than the rejection method but involves discretisation. If \code{warn=TRUE}, then a warning will be issued if \code{n} is very large. The threshold is \code{\link{spatstat.options}("huge.npoints")}. This warning has no consequences, but it helps to trap a number of common errors. } \seealso{ \code{\link{ppp.object}}, \code{\link{owin.object}}, \code{\link{rpoispp}}, \code{\link{rpoint}} } \examples{ # 100 random points in the unit square pp <- runifpoint(100) # irregular window data(letterR) # polygonal pp <- runifpoint(100, letterR) # binary image mask pp <- runifpoint(100, as.mask(letterR)) ## # randomising an existing point pattern runifpoint(npoints(cells), win=Window(cells)) runifpoint(ex=cells) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/dirichletVertices.Rd0000644000176200001440000000326313333543263016644 0ustar liggesusers\name{dirichletVertices} \alias{dirichletVertices} \alias{dirichletEdges} \title{ Vertices and Edges of Dirichlet Tessellation } \description{ Computes the Dirichlet-Voronoi tessellation of a point pattern and extracts the vertices or edges of the tiles. } \usage{ dirichletVertices(X) dirichletEdges(X) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } } \details{ These function compute the Dirichlet-Voronoi tessellation of \code{X} (see \code{\link{dirichlet}}) and extract the vertices or edges of the tiles of the tessellation. The Dirichlet vertices are the spatial locations which are locally farthest away from \code{X}, that is, where the distance function of \code{X} reaches a local maximum. The Dirichlet edges are the dividing lines equally distant between a pair of points of \code{X}. The Dirichlet tessellation of \code{X} is computed using \code{\link{dirichlet}}. The vertices or edges of all tiles of the tessellation are extracted. For \code{dirichletVertices}, any vertex which lies on the boundary of the window of \code{X} is deleted. The remaining vertices are returned, as a point pattern, without duplicated entries. } \value{ \code{dirichletVertices} returns a point pattern (object of class \code{"ppp"}) in the same window as \code{X}. \code{dirichletEdges} returns a line segment pattern (object of class \code{"psp"}). } \seealso{ \code{\link{dirichlet}}, \code{\link{dirichletAreas}} } \examples{ plot(dirichlet(cells)) plot(dirichletVertices(cells), add=TRUE) ed <- dirichletEdges(cells) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{math} \keyword{manip} spatstat/man/treebranchlabels.Rd0000644000176200001440000000442113333543264016466 0ustar liggesusers\name{treebranchlabels} \alias{treebranchlabels} \title{ Label Vertices of a Tree by Branch Membership } \description{ Given a linear network which is a tree (acyclic graph), this function assigns a label to each vertex, indicating its position in the tree. } \usage{ treebranchlabels(L, root = 1) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}). The network must have no loops. } \item{root}{ Root of the tree. An integer index identifying which point in \code{vertices(L)} is the root of the tree. } } \details{ The network \code{L} should be a tree, that is, it must have no loops. This function computes a character string label for each vertex of the network \code{L}. The vertex identified by \code{root} (that is, \code{vertices(L)[root]}) is taken as the root of the tree and is given the empty label \code{""}. \itemize{ \item If there are several line segments which meet at the root vertex, each of these segments is the start of a new branch of the tree; the other endpoints of these segments are assigned the labels \code{"a"}, \code{"b"}, \code{"c"} and so on. \item If only one segment issues from the root vertex, the other endpoint of this segment is assigned the empty label \code{""}. } A similar rule is then applied to each of the newly-labelled vertices. If the vertex labelled \code{"a"} is joined to two other unlabelled vertices, these will be labelled \code{"aa"} and \code{"ab"}. The rule is applied recursively until all vertices have been labelled. If \code{L} is not a tree, the algorithm will terminate, but the results will be nonsense. } \value{ A vector of character strings, with one entry for each point in \code{vertices(L)}. } \author{ \spatstatAuthors } \seealso{ \code{\link{deletebranch}}, \code{\link{extractbranch}}, \code{\link{treeprune}} for manipulating a network using the branch labels. \code{\link{linnet}} for creating a network. } \examples{ # make a simple tree m <- simplenet$m m[8,10] <- m[10,8] <- FALSE L <- linnet(vertices(simplenet), m) plot(L, main="") # compute branch labels tb <- treebranchlabels(L, 1) tbc <- paste0("[", tb, "]") text(vertices(L), labels=tbc, cex=2) } \keyword{spatial} \keyword{math} spatstat/man/colourmap.Rd0000644000176200001440000001153313573615567015205 0ustar liggesusers\name{colourmap} \alias{colourmap} \title{Colour Lookup Tables} \description{ Create a colour map (colour lookup table). } \usage{ colourmap(col, \dots, range=NULL, breaks=NULL, inputs=NULL, gamma=1) } \arguments{ \item{col}{Vector of values specifying colours} \item{\dots}{Ignored.} \item{range}{ Interval to be mapped. A numeric vector of length 2, specifying the endpoints of the range of values to be mapped. Incompatible with \code{breaks} or \code{inputs}. } \item{inputs}{ Values to which the colours are associated. A factor or vector of the same length as \code{col}. Incompatible with \code{breaks} or \code{range}. } \item{breaks}{ Breakpoints for the colour map. A numeric vector of length equal to \code{length(col)+1}. Incompatible with \code{range} or \code{inputs}. } \item{gamma}{ Exponent for the gamma correction, when \code{range} is given. A single positive number. See Details. } } \details{ A colour map is a mechanism for associating colours with data. It can be regarded as a function, mapping data to colours. The command \code{colourmap} creates an object representing a colour map, which can then be used to control the plot commands in the \pkg{spatstat} package. It can also be used to compute the colour assigned to any data value. The argument \code{col} specifies the colours to which data values will be mapped. It should be a vector whose entries can be interpreted as colours by the standard \R graphics system. The entries can be string names of colours like \code{"red"}, or integers that refer to colours in the standard palette, or strings containing six-letter hexadecimal codes like \code{"#F0A0FF"}. Exactly one of the arguments \code{range}, \code{inputs} or \code{breaks} must be specified by name. \itemize{ \item If \code{inputs} is given, then it should be a vector or factor, of the same length as \code{col}. The entries of \code{inputs} can be any atomic type (e.g. numeric, logical, character, complex) or factor values. The resulting colour map associates the value \code{inputs[i]} with the colour \code{col[i]}. The argument \code{col} should have the same length as \code{inputs}. \item If \code{range} is given, then it determines the interval of the real number line that will be mapped. It should be a numeric vector of length 2. The interval will be divided evenly into bands, each of which is assigned one of the colours in \code{col}. (If \code{gamma} is given, then the bands are equally spaced on a scale where the original values are raised to the power \code{gamma}.) \item If \code{breaks} is given, then it determines the precise intervals of the real number line which are mapped to each colour. It should be a numeric vector, of length at least 2, with entries that are in increasing order. Infinite values are allowed. Any number in the range between \code{breaks[i]} and \code{breaks[i+1]} will be mapped to the colour \code{col[i]}. The argument \code{col} should have length equal to \code{length(breaks) - 1}. } It is also permissible for \code{col} to be a single colour value, representing a trivial colour map in which all data values are mapped to the same colour. The result is an object of class \code{"colourmap"}. There are \code{print} and \code{plot} methods for this class. Some plot commands in the \pkg{spatstat} package accept an object of this class as a specification of the colour map. The result is also a function \code{f} which can be used to compute the colour assigned to any data value. That is, \code{f(x)} returns the character value of the colour assigned to \code{x}. This also works for vectors of data values. } \value{ A function, which is also an object of class \code{"colourmap"}. } \seealso{ The plot method \code{\link{plot.colourmap}}. See the \R help file on \code{\link[grDevices:colors]{colours}} for information about the colours that \R recognises, and how to manipulate them. To make a smooth transition between colours, see \code{\link{interp.colourmap}}. To alter individual colour values, see \code{\link{tweak.colourmap}}. To extract or replace all colour values, see \code{\link{colouroutputs}}. See \code{\link[spatstat:colourtools]{colourtools}} for more tools to manipulate colour values. See \code{\link{lut}} for lookup tables. } \examples{ # colour map for real numbers, using breakpoints cr <- colourmap(c("red", "blue", "green"), breaks=c(0,5,10,15)) cr cr(3.2) cr(c(3,5,7)) # a large colour map co <- colourmap(rainbow(100), range=c(-1,1)) co(0.2) # colour map for discrete set of values ct <- colourmap(c("red", "green"), inputs=c(FALSE, TRUE)) ct(TRUE) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{color} spatstat/man/blur.Rd0000644000176200001440000000730513616220210014121 0ustar liggesusers\name{blur} \alias{blur} \alias{Smooth.im} \title{Apply Gaussian Blur to a Pixel Image} \description{ Applies a Gaussian blur to a pixel image. } \usage{ blur(x, sigma = NULL, \dots, kernel="gaussian", normalise=FALSE, bleed = TRUE, varcov=NULL) \method{Smooth}{im}(X, sigma = NULL, \dots, kernel="gaussian", normalise=FALSE, bleed = TRUE, varcov=NULL) } \arguments{ \item{x,X}{The pixel image. An object of class \code{"im"}.} \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel. } \item{\dots}{ Ignored. } \item{kernel}{ String (partially matched) specifying the smoothing kernel. Current options are \code{"gaussian"}, \code{"epanechnikov"}, \code{"quartic"} or \code{"disc"}. } \item{normalise}{ Logical flag indicating whether the output values should be divided by the corresponding blurred image of the window itself. See Details. } \item{bleed}{ Logical flag indicating whether to allow blur to extend outside the original domain of the image. See Details. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel. Incompatible with \code{sigma}. } } \details{ This command applies a Gaussian blur to the pixel image \code{x}. \code{Smooth.im} is a method for the generic \code{\link{Smooth}} for pixel images. It is currently identical to \code{blur}, apart from the name of the first argument. The blurring kernel is the isotropic Gaussian kernel with standard deviation \code{sigma}, or the anisotropic Gaussian kernel with variance-covariance matrix \code{varcov}. The arguments \code{sigma} and \code{varcov} are incompatible. Also \code{sigma} may be a vector of length 2 giving the standard deviations of two independent Gaussian coordinates, thus equivalent to \code{varcov = diag(sigma^2)}. If the pixel values of \code{x} include some \code{NA} values (meaning that the image domain does not completely fill the rectangular frame) then these \code{NA} values are first reset to zero. The algorithm then computes the convolution \eqn{x \ast G}{x * G} of the (zero-padded) pixel image \eqn{x} with the specified Gaussian kernel \eqn{G}. If \code{normalise=FALSE}, then this convolution \eqn{x\ast G}{x * G} is returned. If \code{normalise=TRUE}, then the convolution \eqn{x \ast G}{x * G} is normalised by dividing it by the convolution \eqn{w \ast G}{w * G} of the image domain \code{w} with the same Gaussian kernel. Normalisation ensures that the result can be interpreted as a weighted average of input pixel values, without edge effects due to the shape of the domain. If \code{bleed=FALSE}, then pixel values outside the original image domain are set to \code{NA}. Thus the output is a pixel image with the same domain as the input. If \code{bleed=TRUE}, then no such alteration is performed, and the result is a pixel image defined everywhere in the rectangular frame containing the input image. Computation is performed using the Fast Fourier Transform. } \value{ A pixel image with the same pixel array as the input image \code{x}. } \seealso{ \code{\link{interp.im}} for interpolating a pixel image to a finer resolution, \code{\link{density.ppp}} for blurring a point pattern, \code{\link{Smooth.ppp}} for interpolating marks attached to points. } \examples{ Z <- as.im(function(x,y) { 4 * x^2 + 3 * y }, letterR) par(mfrow=c(1,3)) plot(Z) plot(letterR, add=TRUE) plot(blur(Z, 0.3, bleed=TRUE)) plot(letterR, add=TRUE) plot(blur(Z, 0.3, bleed=FALSE)) plot(letterR, add=TRUE) par(mfrow=c(1,1)) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{nonparametric} \keyword{manip} spatstat/man/envelope.lpp.Rd0000644000176200001440000002567213551001752015601 0ustar liggesusers\name{envelope.lpp} \alias{envelope.lpp} \alias{envelope.lppm} \title{ Envelope for Point Patterns on Linear Network } \description{ Enables envelopes to be computed for point patterns on a linear network. } \usage{ \method{envelope}{lpp}(Y, fun=linearK, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, transform=NULL,global=FALSE,ginterval=NULL,use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) \method{envelope}{lppm}(Y, fun=linearK, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, transform=NULL,global=FALSE,ginterval=NULL,use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) } \arguments{ \item{Y}{ A point pattern on a linear network (object of class \code{"lpp"}) or a fitted point process model on a linear network (object of class \code{"lppm"}). } \item{fun}{ Function that is to be computed for each simulated pattern. } \item{nsim}{ Number of simulations to perform. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{\dots}{ Extra arguments passed to \code{fun}. } \item{funargs}{ A list, containing extra arguments to be passed to \code{fun}. } \item{funYargs}{ Optional. A list, containing extra arguments to be passed to \code{fun} when applied to the original data \code{Y} only. } \item{simulate}{ Optional. Specifies how to generate the simulated point patterns. If \code{simulate} is an expression in the R language, then this expression will be evaluated \code{nsim} times, to obtain \code{nsim} point patterns which are taken as the simulated patterns from which the envelopes are computed. If \code{simulate} is a function, then this function will be repeatedly applied to the data pattern \code{Y} to obtain \code{nsim} simulated patterns. If \code{simulate} is a list of point patterns, then the entries in this list will be treated as the simulated patterns from which the envelopes are computed. Alternatively \code{simulate} may be an object produced by the \code{envelope} command: see Details. } \item{fix.n}{ Logical. If \code{TRUE}, simulated patterns will have the same number of points as the original data pattern. } \item{fix.marks}{ Logical. If \code{TRUE}, simulated patterns will have the same number of points \emph{and} the same marks as the original data pattern. In a multitype point pattern this means that the simulated patterns will have the same number of points \emph{of each type} as the original data. } \item{verbose}{ Logical flag indicating whether to print progress reports during the simulations. } \item{transform}{ Optional. A transformation to be applied to the function values, before the envelopes are computed. An expression object (see Details). } \item{global}{ Logical flag indicating whether envelopes should be pointwise (\code{global=FALSE}) or simultaneous (\code{global=TRUE}). } \item{ginterval}{ Optional. A vector of length 2 specifying the interval of \eqn{r} values for the simultaneous critical envelopes. Only relevant if \code{global=TRUE}. } \item{use.theory}{ Logical value indicating whether to use the theoretical value, computed by \code{fun}, as the reference value for simultaneous envelopes. Applicable only when \code{global=TRUE}. } \item{alternative}{ Character string determining whether the envelope corresponds to a two-sided test (\code{side="two.sided"}, the default) or a one-sided test with a lower critical boundary (\code{side="less"}) or a one-sided test with an upper critical boundary (\code{side="greater"}). } \item{scale}{ Optional. Scaling function for global envelopes. A function in the \R language which determines the relative scale of deviations, as a function of distance \eqn{r}, when computing the global envelopes. Applicable only when \code{global=TRUE}. Summary function values for distance \code{r} will be \emph{divided} by \code{scale(r)} before the maximum deviation is computed. The resulting global envelopes will have width proportional to \code{scale(r)}. } \item{clamp}{ Logical value indicating how to compute envelopes when \code{alternative="less"} or \code{alternative="greater"}. Deviations of the observed summary function from the theoretical summary function are initially evaluated as signed real numbers, with large positive values indicating consistency with the alternative hypothesis. If \code{clamp=FALSE} (the default), these values are not changed. If \code{clamp=TRUE}, any negative values are replaced by zero. } \item{savefuns}{ Logical flag indicating whether to save all the simulated function values. } \item{savepatterns}{ Logical flag indicating whether to save all the simulated point patterns. } \item{nsim2}{ Number of extra simulated point patterns to be generated if it is necessary to use simulation to estimate the theoretical mean of the summary function. Only relevant when \code{global=TRUE} and the simulations are not based on CSR. } \item{VARIANCE}{ Logical. If \code{TRUE}, critical envelopes will be calculated as sample mean plus or minus \code{nSD} times sample standard deviation. } \item{nSD}{ Number of estimated standard deviations used to determine the critical envelopes, if \code{VARIANCE=TRUE}. } \item{Yname}{ Character string that should be used as the name of the data point pattern \code{Y} when printing or plotting the results. } \item{maxnerr}{ Maximum number of rejected patterns. If \code{fun} yields a fatal error when applied to a simulated point pattern (for example, because the pattern is empty and \code{fun} requires at least one point), the pattern will be rejected and a new random point pattern will be generated. If this happens more than \code{maxnerr} times, the algorithm will give up. } \item{rejectNA}{ Logical value specifying whether to reject a simulated pattern if the resulting values of \code{fun} are all equal to \code{NA}, \code{NaN} or infinite. If \code{FALSE} (the default), then simulated patterns are rejected only when \code{fun} gives a fatal error. } \item{silent}{ Logical value specifying whether to print a report each time a simulated pattern is rejected. } \item{do.pwrong}{ Logical. If \code{TRUE}, the algorithm will also estimate the true significance level of the \dQuote{wrong} test (the test that declares the summary function for the data to be significant if it lies outside the \emph{pointwise} critical boundary at any point). This estimate is printed when the result is printed. } \item{envir.simul}{ Environment in which to evaluate the expression \code{simulate}, if not the current environment. } } \details{ This is a method for the generic function \code{\link{envelope}} applicable to point patterns on a linear network. The argument \code{Y} can be either a point pattern on a linear network, or a fitted point process model on a linear network. The function \code{fun} will be evaluated for the data and also for \code{nsim} simulated point patterns on the same linear network. The upper and lower envelopes of these evaluated functions will be computed as described in \code{\link{envelope}}. The type of simulation is determined as follows. \itemize{ \item if \code{Y} is a point pattern (object of class \code{"lpp"}) and \code{simulate} is missing or \code{NULL}, then random point patterns will be generated according to a Poisson point process on the linear network on which \code{Y} is defined, with intensity estimated from \code{Y}. \item if \code{Y} is a fitted point process model (object of class \code{"lppm"}) and \code{simulate} is missing or \code{NULL}, then random point patterns will be generated by simulating from the fitted model. \item If \code{simulate} is present, it specifies the type of simulation as explained below. \item If \code{simulate} is an expression (typically including a call to a random generator), then the expression will be repeatedly evaluated, and should yield random point patterns on the same linear network as \code{Y}. \item If \code{simulate} is a function (typically including a call to a random generator), then the function will be repeatedly applied to the original point pattern \code{Y}, and should yield random point patterns on the same linear network as \code{Y}. \item If \code{simulate} is a list of point patterns, then these will be taken as the simulated point patterns. They should be on the same linear network as \code{Y}. } The function \code{fun} should accept as its first argument a point pattern on a linear network (object of class \code{"lpp"}) and should have another argument called \code{r} or a \code{\dots} argument. } \value{ Function value table (object of class \code{"fv"}) with additional information, as described in \code{\link{envelope}}. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ \code{\link{envelope}}, \code{\link{linearK}} } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. Okabe, A. and Yamada, I. (2001) The K-function method on a network and its computational implementation. \emph{Geographical Analysis} \bold{33}, 271-290. } \examples{ if(interactive()) { ns <- 39 np <- 40 } else { ns <- np <- 3 } X <- runiflpp(np, simplenet) # uniform Poisson: random numbers of points envelope(X, nsim=ns) # uniform Poisson: conditional on observed number of points envelope(X, fix.n=TRUE, nsim=ns) # nonuniform Poisson fit <- lppm(X ~x) envelope(fit, nsim=ns) #multitype marks(X) <- sample(letters[1:2], np, replace=TRUE) envelope(X, nsim=ns) } \keyword{spatial} spatstat/man/spatstat-deprecated.Rd0000644000176200001440000000160213606041401017112 0ustar liggesusers\name{spatstat-deprecated} \alias{as.psp.owin} \alias{which.max.im} \alias{circumradius} \alias{circumradius.owin} \alias{circumradius.ppp} \alias{circumradius.linnet} \title{Deprecated spatstat functions} \description{ Deprecated spatstat functions. } \usage{ \method{as.psp}{owin}(x, \dots, window=NULL, check=spatstat.options("checksegments"), fatal=TRUE) which.max.im(x) circumradius(x, \dots) \method{circumradius}{owin}(x, \dots) \method{circumradius}{ppp}(x, \dots) \method{circumradius}{linnet}(x, \dots) } \details{ These functions are deprecated, and will eventually be deleted from the \pkg{spatstat} package. \code{as.psp.owin} has been replaced by \code{\link{edges}}. \code{which.max.im(x)} is replaced by \code{\link{im.apply}(x, which.max)}. \code{circumradius} is replaced by the more appropriately named \code{boundingradius}. } \keyword{internal} spatstat/man/linearKdot.inhom.Rd0000644000176200001440000001035013623712063016366 0ustar liggesusers\name{linearKdot.inhom} \alias{linearKdot.inhom} \title{ Inhomogeneous multitype K Function (Dot-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the inhomogeneous multitype \eqn{K} function which counts the expected number of points (of any type) within a given distance of a point of type \eqn{i}. } \usage{ linearKdot.inhom(X, i, lambdaI, lambdadot, r=NULL, \dots, correction="Ang", normalise=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the dot type \eqn{K} function \eqn{K_{i\bullet}(r)}{K[i.](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{lambdaI}{ Intensity values for the points of type \code{i}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{lambdadot}{ Intensity values for all points of \code{X}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{numeric vector. The values of the argument \eqn{r} at which the \eqn{K}-function \eqn{K_{i\bullet}(r)}{K[i.](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{lambdaI} and \code{lambdadot} if they are functions. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the points of type \code{i}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{Kdot.inhom}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. If \code{lambdaI} or \code{lambdadot} is a fitted point process model, the default behaviour is to update the model by re-fitting it to the data, before computing the fitted intensity. This can be disabled by setting \code{update=FALSE}. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), \bold{63}, 673--694. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearKdot}}, \code{\link{linearK}}. } \examples{ lam <- table(marks(chicago))/(summary(chicago)$totlength) lamI <- function(x,y,const=lam[["assault"]]){ rep(const, length(x)) } lam. <- function(x,y,const=sum(lam)){ rep(const, length(x)) } K <- linearKdot.inhom(chicago, "assault", lamI, lam.) \dontrun{ fit <- lppm(chicago, ~marks + x) linearKdot.inhom(chicago, "assault", fit, fit) } } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/rMosaicField.Rd0000644000176200001440000000262013333543264015526 0ustar liggesusers\name{rMosaicField} \alias{rMosaicField} \title{Mosaic Random Field} \description{ Generate a realisation of a random field which is piecewise constant on the tiles of a given tessellation. } \usage{ rMosaicField(X, rgen = function(n) { sample(0:1, n, replace = TRUE)}, ..., rgenargs=NULL) } \arguments{ \item{X}{ A tessellation (object of class \code{"tess"}). } \item{\dots}{ Arguments passed to \code{\link{as.mask}} determining the pixel resolution. } \item{rgen}{ Function that generates random values for the tiles of the tessellation. } \item{rgenargs}{ List containing extra arguments that should be passed to \code{rgen} (typically specifying parameters of the distribution of the values). } } \details{ This function generates a realisation of a random field which is piecewise constant on the tiles of the given tessellation \code{X}. The values in each tile are independent and identically distributed. } \value{ A pixel image (object of class \code{"im"}). } \author{\adrian and \rolf } \seealso{ \code{\link{rpoislinetess}}, \code{\link{rMosaicSet}} } \examples{ X <- rpoislinetess(3) plot(rMosaicField(X, runif)) plot(rMosaicField(X, runif, dimyx=256)) plot(rMosaicField(X, rnorm, rgenargs=list(mean=10, sd=2))) plot(rMosaicField(dirichlet(runifpoint(30)), rnorm)) } \keyword{spatial} \keyword{datagen} spatstat/man/summary.kppm.Rd0000644000176200001440000000532713333543264015637 0ustar liggesusers\name{summary.kppm} \alias{summary.kppm} \alias{print.summary.kppm} \title{Summarizing a Fitted Cox or Cluster Point Process Model} \description{ \code{summary} method for class \code{"kppm"}. } \usage{ \method{summary}{kppm}(object, \dots, quick=FALSE) \method{print}{summary.kppm}(x, \dots) } \arguments{ \item{object}{ A fitted Cox or cluster point process model (object of class \code{"kppm"}). } \item{quick}{Logical value controlling the scope of the summary.} \item{\dots}{Arguments passed to \code{\link{summary.ppm}} or \code{\link{print.summary.ppm}} controlling the treatment of the trend component of the model.} \item{x}{Object of class \code{"summary.kppm"} as returned by \code{summary.kppm}. } } \details{ This is a method for the generic \code{\link{summary}} for the class \code{"kppm"}. An object of class \code{"kppm"} describes a fitted Cox or cluster point process model. See \code{\link{kppm}}. \code{summary.kppm} extracts information about the type of model that has been fitted, the data to which the model was fitted, and the values of the fitted coefficients. \code{print.summary.kppm} prints this information in a comprehensible format. In normal usage, \code{print.summary.kppm} is invoked implicitly when the user calls \code{summary.kppm} without assigning its value to anything. See the examples. You can also type \code{coef(summary(object))} to extract a table of the fitted coefficients of the point process model \code{object} together with standard errors and confidence limits. } \value{ \code{summary.kppm} returns an object of class \code{"summary.kppm"}, while \code{print.summary.kppm} returns \code{NULL}. The result of \code{summary.kppm} includes at least the following components: \item{Xname}{character string name of the original point pattern data} \item{stationary}{logical value indicating whether the model is stationary} \item{clusters}{the \code{clusters} argument to \code{\link{kppm}}} \item{modelname}{character string describing the model} \item{isPCP}{\code{TRUE} if the model is a Poisson cluster process, \code{FALSE} if it is a log-Gaussian Cox process} \item{lambda}{Estimated intensity: numeric value, or pixel image} \item{mu}{Mean cluster size: numeric value, pixel image, or \code{NULL}} \item{clustpar}{list of fitted parameters for the cluster model} \item{clustargs}{list of fixed parameters for the cluster model, if any} \item{callstring}{character string representing the original call to \code{\link{kppm}}} } \examples{ fit <- kppm(redwood ~ 1, "Thomas") summary(fit) coef(summary(fit)) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/domain.Rd0000644000176200001440000000707013333543263014437 0ustar liggesusers\name{domain} \alias{domain} \alias{domain.ppp} \alias{domain.psp} \alias{domain.im} \alias{domain.ppx} \alias{domain.pp3} \alias{domain.lpp} \alias{domain.ppm} \alias{domain.kppm} \alias{domain.dppm} \alias{domain.lpp} \alias{domain.lppm} \alias{domain.msr} \alias{domain.quad} \alias{domain.quadratcount} \alias{domain.quadrattest} \alias{domain.tess} \alias{domain.linfun} \alias{domain.lintess} \alias{domain.im} \alias{domain.layered} \alias{domain.distfun} \alias{domain.nnfun} \alias{domain.funxy} \alias{domain.rmhmodel} \alias{domain.leverage.ppm} \alias{domain.influence.ppm} \title{ Extract the Domain of any Spatial Object } \description{ Given a spatial object such as a point pattern, in any number of dimensions, this function extracts the spatial domain in which the object is defined. } \usage{ domain(X, \dots) \method{domain}{ppp}(X, \dots) \method{domain}{psp}(X, \dots) \method{domain}{im}(X, \dots) \method{domain}{ppx}(X, \dots) \method{domain}{pp3}(X, \dots) \method{domain}{lpp}(X, \dots) \method{domain}{ppm}(X, \dots, from=c("points", "covariates")) \method{domain}{kppm}(X, \dots, from=c("points", "covariates")) \method{domain}{dppm}(X, \dots, from=c("points", "covariates")) \method{domain}{lpp}(X, \dots) \method{domain}{lppm}(X, \dots) \method{domain}{msr}(X, \dots) \method{domain}{quad}(X, \dots) \method{domain}{quadratcount}(X, \dots) \method{domain}{quadrattest}(X, \dots) \method{domain}{tess}(X, \dots) \method{domain}{linfun}(X, \dots) \method{domain}{lintess}(X, \dots) \method{domain}{im}(X, \dots) \method{domain}{layered}(X, \dots) \method{domain}{distfun}(X, \dots) \method{domain}{nnfun}(X, \dots) \method{domain}{funxy}(X, \dots) \method{domain}{rmhmodel}(X, \dots) \method{domain}{leverage.ppm}(X, \dots) \method{domain}{influence.ppm}(X, \dots) } \arguments{ \item{X}{ A spatial object such as a point pattern (in any number of dimensions), line segment pattern or pixel image. } \item{\dots}{ Extra arguments. They are ignored by all the methods listed here. } \item{from}{Character string. See Details.} } \details{ The function \code{domain} is generic. For a spatial object \code{X} in any number of dimensions, \code{domain(X)} extracts the spatial domain in which \code{X} is defined. For a two-dimensional object \code{X}, typically \code{domain(X)} is the same as \code{domain(X)}. The exception is that, if \code{X} is a point pattern on a linear network (class \code{"lpp"}) or a point process model on a linear network (class \code{"lppm"}), then \code{domain(X)} is the linear network on which the points lie, while \code{Window(X)} is the two-dimensional window containing the linear network. The argument \code{from} applies when \code{X} is a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{from="data"} (the default), \code{domain} extracts the window of the original point pattern data to which the model was fitted. If \code{from="covariates"} then \code{domain} returns the window in which the spatial covariates of the model were provided. } \value{ A spatial object representing the domain of \code{X}. Typically a window (object of class \code{"owin"}), a three-dimensional box (\code{"box3"}), a multidimensional box (\code{"boxx"}) or a linear network (\code{"linnet"}). } \author{ \spatstatAuthors. } \seealso{ \code{\link{Window}}, \code{\link{Frame}} } \examples{ domain(cells) domain(bei.extra$elev) domain(chicago) } \keyword{spatial} \keyword{manip} spatstat/man/kernel.squint.Rd0000644000176200001440000000274313333543263015774 0ustar liggesusers\name{kernel.squint} \alias{kernel.squint} \title{Integral of Squared Kernel} \description{ Computes the integral of the squared kernel, for the kernels used in density estimation for numerical data. } \usage{ kernel.squint(kernel = "gaussian", bw=1) } \arguments{ \item{kernel}{ String name of the kernel. Options are \code{"gaussian"}, \code{"rectangular"}, \code{"triangular"}, \code{"epanechnikov"}, \code{"biweight"}, \code{"cosine"} and \code{"optcosine"}. (Partial matching is used). } \item{bw}{ Bandwidth (standard deviation) of the kernel. } } \details{ Kernel estimation of a probability density in one dimension is performed by \code{\link[stats]{density.default}} using a kernel function selected from the list above. This function computes the integral of the squared kernel, \deqn{ R = \int_{-\infty}^{\infty} k(x)^2 \, {\rm d}x }{ R = integral of k(x)^2 dx from x = -infinity to x = +infinity } where \eqn{k(x)} is the kernel with bandwidth \code{bw}. } \value{ A single number. } \seealso{ \code{\link[stats]{density.default}}, \code{\link{dkernel}}, \code{\link{kernel.moment}}, \code{\link{kernel.factor}} } \examples{ kernel.squint("gaussian", 3) # integral of squared Epanechnikov kernel with half-width h=1 h <- 1 bw <- h/kernel.factor("epa") kernel.squint("epa", bw) } \author{ \spatstatAuthors and Martin Hazelton } \keyword{methods} \keyword{nonparametric} \keyword{smooth} spatstat/man/extrapolate.psp.Rd0000644000176200001440000000244413474140237016321 0ustar liggesusers\name{extrapolate.psp} \alias{extrapolate.psp} \title{ Extrapolate Line Segments to Obtain Infinite Lines } \description{ Given a spatial pattern of line segments, extrapolate the segments to infinite lines. } \usage{ extrapolate.psp(x, \dots) } \arguments{ \item{x}{ Spatial pattern of line segments (object of class \code{"psp"}). } \item{\dots}{ Ignored. } } \details{ Each line segment in the pattern \code{x} is extrapolated to an infinite line, drawn through its two endpoints. The resulting pattern of infinite lines is returned as an object of class \code{"infline"}. If a segment's endpoints are identical (so that it has zero length) the resulting infinite line is vertical (i.e. parallel to the \eqn{y} coordinate axis). } \value{ An object of class \code{"infline"} representing the pattern of infinite lines. See \code{\link{infline}} for details of structure. } \author{ \spatstatAuthors. } \seealso{ \code{\link{psp}}, \code{\link{infline}} \code{\link{midpoints.psp}}, \code{\link{lengths.psp}} \code{\link{angles.psp}}, \code{\link{endpoints.psp}}. } \examples{ X <- psp(runif(4), runif(4), runif(4), runif(4), window=owin()) Y <- extrapolate.psp(X) plot(X, col=3, lwd=4) plot(Y, lty=3) Y } \keyword{spatial} \keyword{math} spatstat/man/is.connected.ppp.Rd0000644000176200001440000000255413333543263016344 0ustar liggesusers\name{is.connected.ppp} \Rdversion{1.1} \alias{is.connected.ppp} \title{ Determine Whether a Point Pattern is Connected } \description{ Determine whether a point pattern is topologically connected when all pairs of points closer than a threshold distance are joined. } \usage{ \method{is.connected}{ppp}(X, R, \dots) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{R}{ Threshold distance. Pairs of points closer than \code{R} units apart will be joined together. } \item{\dots}{ Ignored. } } \details{ The function \code{is.connected} is generic. This is the method for point patterns (objects of class \code{"ppp"}). The point pattern \code{X} is first converted into an abstract graph by joining every pair of points that lie closer than \code{R} units apart. Then the algorithm determines whether this graph is connected. That is, the result of \code{is.connected(X)} is \code{TRUE} if any point in \code{X} can be reached from any other point, by a series of steps between points of \code{X}, each step being shorter than \code{R} units in length. } \value{ A logical value. } \seealso{ \code{\link{is.connected}}, \code{\link{connected.ppp}}. } \examples{ is.connected(redwoodfull, 0.1) is.connected(redwoodfull, 0.2) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{math} spatstat/man/npfun.Rd0000644000176200001440000000143213571674202014314 0ustar liggesusers\name{npfun} \alias{npfun} \title{ Dummy Function Returns Number of Points } \description{ Returns a summary function which is constant with value equal to the number of points in the point pattern. } \usage{ npfun(X, ..., r) } \arguments{ \item{X}{ Point pattern. } \item{\dots}{ Ignored. } \item{r}{ Vector of values of the distance argument \eqn{r}. } } \details{ This function is normally not called by the user. Instead it is passed as an argument to the function \code{\link{psst}}. } \value{ Object of class \code{"fv"} representing a constant function. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ \code{\link{psst}} } \examples{ fit0 <- ppm(cells, ~1, nd=10) v <- psst(fit0, npfun) } \keyword{spatial} \keyword{nonparametric} spatstat/man/Ord.Rd0000644000176200001440000000352613333543262013715 0ustar liggesusers\name{Ord} \alias{Ord} \title{Generic Ord Interaction model} \description{ Creates an instance of an Ord-type interaction point process model which can then be fitted to point pattern data. } \usage{ Ord(pot, name) } \arguments{ \item{pot}{An S language function giving the user-supplied interaction potential.} \item{name}{Character string.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ Ord's point process model (Ord, 1977) is a Gibbs point process of infinite order. Each point \eqn{x_i}{x[i]} in the point pattern \eqn{x} contributes a factor \eqn{g(a_i)}{g(a[i])} where \eqn{a_i = a(x_i, x)}{a[i] = a(x[i], x)} is the area of the tile associated with \eqn{x_i}{x[i]} in the Dirichlet tessellation of \eqn{x}. Ord (1977) proposed fitting this model to forestry data when \eqn{g(a)} has a simple ``threshold'' form. That model is implemented in our function \code{\link{OrdThresh}}. The present function \code{Ord} implements the case of a completely general Ord potential \eqn{g(a)} specified as an S language function \code{pot}. This is experimental. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ord, J.K. (1977) Contribution to the discussion of Ripley (1977). Ord, J.K. (1978) How many trees in a forest? \emph{Mathematical Scientist} \bold{3}, 23--33. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{OrdThresh}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/ellipse.Rd0000644000176200001440000000411613333543263014623 0ustar liggesusers\name{ellipse} \alias{ellipse} \title{ Elliptical Window. } \description{ Create an elliptical window. } \usage{ ellipse(a, b, centre=c(0,0), phi=0, \dots, mask=FALSE, npoly = 128) } \arguments{ \item{a,b}{ The half-lengths of the axes of the ellipse. } \item{centre}{ The centre of the ellipse. } \item{phi}{ The (anti-clockwise) angle through which the ellipse should be rotated (about its centre) starting from an orientation in which the axis of half-length \code{a} is horizontal. } \item{mask}{ Logical value controlling the type of approximation to a perfect ellipse. See Details. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution, if \code{mask} is \code{TRUE}. } \item{npoly}{ The number of edges in the polygonal approximation to the ellipse. } } \details{ This command creates a window object representing an ellipse with the given centre and axes. By default, the ellipse is approximated by a polygon with \code{npoly} edges. If \code{mask=TRUE}, then the ellipse is approximated by a binary pixel mask. The resolution of the mask is controlled by the arguments \code{\dots} which are passed to \code{\link{as.mask}}. The arguments \code{a} and \code{b} must be single positive numbers. The argument \code{centre} specifies the ellipse centre: it can be either a numeric vector of length 2 giving the coordinates, or a \code{list(x,y)} giving the coordinates of exactly one point, or a point pattern (object of class \code{"ppp"}) containing exactly one point. } \value{ An object of class \code{owin} (either of type \dQuote{polygonal} or of type \dQuote{mask}) specifying an elliptical window. } \author{\adrian and \rolf } \seealso{ \code{\link{disc}}, \code{\link{owin.object}}, \code{\link{owin}}, \code{\link{as.mask}} } \examples{ W <- ellipse(a=5,b=2,centre=c(5,1),phi=pi/6) plot(W,lwd=2,border="red") WM <- ellipse(a=5,b=2,centre=c(5,1),phi=pi/6,mask=TRUE,dimyx=512) plot(WM,add=TRUE,box=FALSE) } \keyword{spatial} \keyword{datagen} spatstat/man/nnorient.Rd0000644000176200001440000000675513333543263015035 0ustar liggesusers\name{nnorient} \alias{nnorient} \title{ Nearest Neighbour Orientation Distribution } \description{ Computes the distribution of the orientation of the vectors from each point to its nearest neighbour. } \usage{ nnorient(X, \dots, cumulative = FALSE, correction, k = 1, unit = c("degree", "radian"), domain = NULL, ratio = FALSE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{\dots}{ Arguments passed to \code{\link{circdensity}} to control the kernel smoothing, if \code{cumulative=FALSE}. } \item{cumulative}{ Logical value specifying whether to estimate the probability density (\code{cumulative=FALSE}, the default) or the cumulative distribution function (\code{cumulative=TRUE}). } \item{correction}{ Character vector specifying edge correction or corrections. Options are \code{"none"}, \code{"bord.modif"}, \code{"good"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{k}{ Integer. The \eqn{k}th nearest neighbour will be used. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{unit}{ Unit in which the angles should be expressed. Either \code{"degree"} or \code{"radian"}. } \item{domain}{ Optional window. The first point \eqn{x_i}{x[i]} of each pair of points will be constrained to lie in \code{domain}. } } \details{ This algorithm considers each point in the pattern \code{X} and finds its nearest neighbour (or \eqn{k}th nearest neighour). The \emph{direction} of the arrow joining the data point to its neighbour is measured, as an angle in degrees or radians, anticlockwise from the \eqn{x} axis. If \code{cumulative=FALSE} (the default), a kernel estimate of the probability density of the angles is calculated using \code{\link{circdensity}}. This is the function \eqn{\vartheta(\phi)}{theta(phi)} defined in Illian et al (2008), equation (4.5.3), page 253. If \code{cumulative=TRUE}, then the cumulative distribution function of these angles is calculated. In either case the result can be plotted as a rose diagram by \code{\link{rose}}, or as a function plot by \code{\link{plot.fv}}. The algorithm gives each observed direction a weight, determined by an edge correction, to adjust for the fact that some interpoint distances are more likely to be observed than others. The choice of edge correction or corrections is determined by the argument \code{correction}. It is also possible to calculate an estimate of the probability density from the cumulative distribution function, by numerical differentiation. Use \code{\link{deriv.fv}} with the argument \code{Dperiodic=TRUE}. } \value{ A function value table (object of class \code{"fv"}) containing the estimates of the probability density or the cumulative distribution function of angles, in degrees (if \code{unit="degree"}) or radians (if \code{unit="radian"}). } \references{ Illian, J., Penttinen, A., Stoyan, H. and Stoyan, D. (2008) \emph{Statistical Analysis and Modelling of Spatial Point Patterns.} Wiley. } \seealso{ \code{\link{pairorient}} } \examples{ rose(nnorient(redwood, adjust=0.6), col="grey") plot(CDF <- nnorient(redwood, cumulative=TRUE)) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{nonparametric} spatstat/man/Kcross.inhom.Rd0000644000176200001440000003120013571674202015537 0ustar liggesusers\name{Kcross.inhom} \alias{Kcross.inhom} \title{ Inhomogeneous Cross K Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the cross \eqn{K} function, which counts the expected number of points of type \eqn{j} within a given distance of a point of type \eqn{i}, adjusted for spatially varying intensity. } \usage{ Kcross.inhom(X, i, j, lambdaI=NULL, lambdaJ=NULL, \dots, r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL, lambdaIJ=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross type \eqn{K} function \eqn{K_{ij}(r)}{Kij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the estimated intensity of the sub-process of points of type \code{i}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{i} points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdaJ}{ Optional. Values of the the estimated intensity of the sub-process of points of type \code{j}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{j} points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{r}{ Optional. Numeric vector giving the values of the argument \eqn{r} at which the cross K function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for advanced use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"} ,\code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{\dots}{ Ignored. } \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel, used in computing leave-one-out kernel estimates of \code{lambdaI}, \code{lambdaJ} if they are omitted. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel, used in computing leave-one-out kernel estimates of \code{lambdaI}, \code{lambdaJ} if they are omitted. Incompatible with \code{sigma}. } \item{lambdaIJ}{ Optional. A matrix containing estimates of the product of the intensities \code{lambdaI} and \code{lambdaJ} for each pair of points of types \code{i} and \code{j} respectively. } \item{lambdaX}{ Optional. Values of the intensity for all points of \code{X}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. If present, this argument overrides both \code{lambdaI} and \code{lambdaJ}. } \item{update}{ Logical value indicating what to do when \code{lambdaI}, \code{lambdaJ} or \code{lambdaX} is a fitted point process model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{ij}(r)}{Kij(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{ij}(r)}{Kij(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{ij}(r)}{Kij(r)} obtained by the edge corrections named. } \details{ This is a generalisation of the function \code{\link{Kcross}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Kinhom}}. The inhomogeneous cross-type \eqn{K} function is described by \Moller and Waagepetersen (2003, pages 48-49 and 51-53). Briefly, given a multitype point process, suppose the sub-process of points of type \eqn{j} has intensity function \eqn{\lambda_j(u)}{lambda[j](u)} at spatial locations \eqn{u}. Suppose we place a mass of \eqn{1/\lambda_j(\zeta)}{1/lambda[j](z)} at each point \eqn{\zeta}{z} of type \eqn{j}. Then the expected total mass per unit area is 1. The inhomogeneous ``cross-type'' \eqn{K} function \eqn{K_{ij}^{\mbox{inhom}}(r)}{K[ij]inhom(r)} equals the expected total mass within a radius \eqn{r} of a point of the process of type \eqn{i}. If the process of type \eqn{i} points were independent of the process of type \eqn{j} points, then \eqn{K_{ij}^{\mbox{inhom}}(r)}{K[ij]inhom(r)} would equal \eqn{\pi r^2}{pi * r^2}. Deviations between the empirical \eqn{K_{ij}}{Kij} curve and the theoretical curve \eqn{\pi r^2}{pi * r^2} suggest dependence between the points of types \eqn{i} and \eqn{j}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level). If \code{i} and \code{j} are missing, they default to the first and second level of the marks factor, respectively. The argument \code{lambdaI} supplies the values of the intensity of the sub-process of points of type \code{i}. It may be either \describe{ \item{a pixel image}{(object of class \code{"im"}) which gives the values of the type \code{i} intensity at all locations in the window containing \code{X}; } \item{a numeric vector}{containing the values of the type \code{i} intensity evaluated only at the data points of type \code{i}. The length of this vector must equal the number of type \code{i} points in \code{X}. } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{a fitted point process model}{ (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{omitted:}{ if \code{lambdaI} is omitted then it will be estimated using a leave-one-out kernel smoother. } } If \code{lambdaI} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \Moller and Waagepetersen (2000). The estimate of \code{lambdaI} for a given point is computed by removing the point from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point in question. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Similarly \code{lambdaJ} should contain estimated values of the intensity of the sub-process of points of type \code{j}. It may be either a pixel image, a function, a numeric vector, or omitted. Alternatively if the argument \code{lambdaX} is given, then it specifies the intensity values for all points of \code{X}, and the arguments \code{lambdaI}, \code{lambdaJ} will be ignored. The optional argument \code{lambdaIJ} is for advanced use only. It is a matrix containing estimated values of the products of these two intensities for each pair of data points of types \code{i} and \code{j} respectively. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. The argument \code{correction} chooses the edge correction as explained e.g. in \code{\link{Kest}}. The pair correlation function can also be applied to the result of \code{Kcross.inhom}; see \code{\link{pcf}}. } \references{ Baddeley, A., \Moller, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. \Moller, J. and Waagepetersen, R. Statistical Inference and Simulation for Spatial Point Processes Chapman and Hall/CRC Boca Raton, 2003. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Kcross}}, \code{\link{Kinhom}}, \code{\link{Kdot.inhom}}, \code{\link{Kmulti.inhom}}, \code{\link{pcf}} } \examples{ # Lansing Woods data woods <- lansing \testonly{woods <- woods[seq(1,npoints(woods), by=10)]} ma <- split(woods)$maple wh <- split(woods)$whiteoak # method (1): estimate intensities by nonparametric smoothing lambdaM <- density.ppp(ma, sigma=0.15, at="points") lambdaW <- density.ppp(wh, sigma=0.15, at="points") K <- Kcross.inhom(woods, "whiteoak", "maple", lambdaW, lambdaM) # method (2): leave-one-out K <- Kcross.inhom(woods, "whiteoak", "maple", sigma=0.15) # method (3): fit parametric intensity model fit <- ppm(woods ~marks * polynom(x,y,2)) # alternative (a): use fitted model as 'lambda' argument K <- Kcross.inhom(woods, "whiteoak", "maple", lambdaI=fit, lambdaJ=fit, update=FALSE) K <- Kcross.inhom(woods, "whiteoak", "maple", lambdaX=fit, update=FALSE) # alternative (b): evaluate fitted intensities at data points # (these are the intensities of the sub-processes of each type) inten <- fitted(fit, dataonly=TRUE) # split according to types of points lambda <- split(inten, marks(woods)) K <- Kcross.inhom(woods, "whiteoak", "maple", lambda$whiteoak, lambda$maple) # synthetic example: type A points have intensity 50, # type B points have intensity 100 * x lamB <- as.im(function(x,y){50 + 100 * x}, owin()) X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB)) K <- Kcross.inhom(X, "A", "B", lambdaI=as.im(50, Window(X)), lambdaJ=lamB) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/rmhstart.Rd0000644000176200001440000000701113333543264015030 0ustar liggesusers\name{rmhstart} \alias{rmhstart} \alias{rmhstart.default} \title{Determine Initial State for Metropolis-Hastings Simulation.} \description{ Builds a description of the initial state for the Metropolis-Hastings algorithm. } \usage{ rmhstart(start, \dots) \method{rmhstart}{default}(start=NULL, \dots, n.start=NULL, x.start=NULL) } \arguments{ \item{start}{An existing description of the initial state in some format. Incompatible with the arguments listed below. } \item{\dots}{There should be no other arguments.} \item{n.start}{ Number of initial points (to be randomly generated). Incompatible with \code{x.start}. } \item{x.start}{ Initial point pattern configuration. Incompatible with \code{n.start}. } } \value{ An object of class \code{"rmhstart"}, which is essentially a list of parameters describing the initial point pattern and (optionally) the initial state of the random number generator. There is a \code{print} method for this class, which prints a sensible description of the initial state. } \details{ Simulated realisations of many point process models can be generated using the Metropolis-Hastings algorithm implemented in \code{\link{rmh}}. This function \code{rmhstart} creates a full description of the initial state of the Metropolis-Hastings algorithm, \emph{including possibly the initial state of the random number generator}, for use in a subsequent call to \code{\link{rmh}}. It also checks that the initial state is valid. The initial state should be specified \bold{either} by the first argument \code{start} \bold{or} by the other arguments \code{n.start}, \code{x.start} etc. If \code{start} is a list, then it should have components named \code{n.start} or \code{x.start}, with the same interpretation as described below. The arguments are: \describe{ \item{n.start}{ The number of \dQuote{initial} points to be randomly (uniformly) generated in the simulation window \code{w}. Incompatible with \code{x.start}. For a multitype point process, \code{n.start} may be a vector (of length equal to the number of types) giving the number of points of each type to be generated. If expansion of the simulation window is selected (see the argument \code{expand} to \code{\link{rmhcontrol}}), then the actual number of starting points in the simulation will be \code{n.start} multiplied by the expansion factor (ratio of the areas of the expanded window and original window). For faster convergence of the Metropolis-Hastings algorithm, the value of \code{n.start} should be roughly equal to (an educated guess at) the expected number of points for the point process inside the window. } \item{x.start}{ Initial point pattern configuration. Incompatible with \code{n.start}. \code{x.start} may be a point pattern (an object of class \code{ppp}), or an object which can be coerced to this class by \code{\link{as.ppp}}, or a dataset containing vectors \code{x} and \code{y}. If \code{x.start} is specified, then expansion of the simulation window (the argument \code{expand} of \code{\link{rmhcontrol}}) is not permitted. } } The parameters \code{n.start} and \code{x.start} are \emph{incompatible}. } \seealso{ \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhmodel}} } \examples{ # 30 random points a <- rmhstart(n.start=30) a # a particular point pattern b <- rmhstart(x.start=cells) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/npoints.Rd0000644000176200001440000000162313333543263014660 0ustar liggesusers\name{npoints} \alias{npoints} \alias{npoints.ppp} \alias{npoints.pp3} \alias{npoints.ppx} \title{Number of Points in a Point Pattern} \description{ Returns the number of points in a point pattern of any kind. } \usage{ npoints(x) \method{npoints}{ppp}(x) \method{npoints}{pp3}(x) \method{npoints}{ppx}(x) } \arguments{ \item{x}{ A point pattern (object of class \code{"ppp"}, \code{"pp3"}, \code{"ppx"} or some other suitable class). } } \value{ Integer. } \details{ This function returns the number of points in a point pattern. The function \code{npoints} is generic with methods for the classes \code{"ppp"}, \code{"pp3"}, \code{"ppx"} and possibly other classes. } \seealso{ \code{\link{ppp.object}}, \code{\link{print.pp3}}, \code{\link{print.ppx}}. } \examples{ data(cells) npoints(cells) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/sharpen.Rd0000644000176200001440000000644513333543264014636 0ustar liggesusers\name{sharpen} \alias{sharpen} \alias{sharpen.ppp} \title{Data Sharpening of Point Pattern} \description{ Performs Choi-Hall data sharpening of a spatial point pattern. } \usage{ sharpen(X, \dots) \method{sharpen}{ppp}(X, sigma=NULL, \dots, varcov=NULL, edgecorrect=FALSE) } \arguments{ \item{X}{A marked point pattern (object of class \code{"ppp"}).} \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel. Incompatible with \code{sigma}. } \item{edgecorrect}{ Logical value indicating whether to apply edge effect bias correction. } \item{\dots}{Arguments passed to \code{\link{density.ppp}} to control the pixel resolution of the result.} } \details{ Choi and Hall (2001) proposed a procedure for \emph{data sharpening} of spatial point patterns. This procedure is appropriate for earthquake epicentres and other point patterns which are believed to exhibit strong concentrations of points along a curve. Data sharpening causes such points to concentrate more tightly along the curve. If the original data points are \eqn{X_1, \ldots, X_n}{X[1],..., X[n]} then the sharpened points are \deqn{ \hat X_i = \frac{\sum_j X_j k(X_j-X_i)}{\sum_j k(X_j - X_i)} }{ X^[i] = (sum[j] X[j] * k(X[j] - X[i]))/(sum[j] k(X[j] - X[i])) } where \eqn{k} is a smoothing kernel in two dimensions. Thus, the new point \eqn{\hat X_i}{X^[i]} is a vector average of the nearby points \eqn{X[j]}. The function \code{sharpen} is generic. It currently has only one method, for two-dimensional point patterns (objects of class \code{"ppp"}). If \code{sigma} is given, the smoothing kernel is the isotropic two-dimensional Gaussian density with standard deviation \code{sigma} in each axis. If \code{varcov} is given, the smoothing kernel is the Gaussian density with variance-covariance matrix \code{varcov}. The data sharpening procedure tends to cause the point pattern to contract away from the boundary of the window. That is, points \code{X_i}{X[i]} that lie `quite close to the edge of the window of the point pattern tend to be displaced inward. If \code{edgecorrect=TRUE} then the algorithm is modified to correct this vector bias. } \value{ A point pattern (object of class \code{"ppp"}) in the same window as the original pattern \code{X}, and with the same marks as \code{X}. } \seealso{ \code{\link{density.ppp}}, \code{\link{Smooth.ppp}}. } \examples{ data(shapley) X <- unmark(shapley) \dontshow{ if(!(interactive())) X <- rthin(X, 0.05) } Y <- sharpen(X, sigma=0.5) Z <- sharpen(X, sigma=0.5, edgecorrect=TRUE) opa <- par(mar=rep(0.2, 4)) plot(solist(X, Y, Z), main= " ", main.panel=c("data", "sharpen", "sharpen, correct"), pch=".", equal.scales=TRUE, mar.panel=0.2) par(opa) } \references{ Choi, E. and Hall, P. (2001) Nonparametric analysis of earthquake point-process data. In M. de Gunst, C. Klaassen and A. van der Vaart (eds.) \emph{State of the art in probability and statistics: Festschrift for Willem R. van Zwet}, Institute of Mathematical Statistics, Beachwood, Ohio. Pages 324--344. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat/man/psstG.Rd0000644000176200001440000001176513571674202014300 0ustar liggesusers\name{psstG} \Rdversion{1.1} \alias{psstG} \title{ Pseudoscore Diagnostic For Fitted Model against Saturation Alternative } \description{ Given a point process model fitted to a point pattern dataset, this function computes the pseudoscore diagnostic of goodness-of-fit for the model, against moderately clustered or moderately inhibited alternatives of saturation type. } \usage{ psstG(object, r = NULL, breaks = NULL, \dots, model=NULL, trend = ~1, interaction = Poisson(), rbord = reach(interaction), truecoef = NULL, hi.res = NULL) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the diagnostic should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ Optional alternative to \code{r} for advanced use. } \item{\dots}{ Ignored. } \item{model}{ Optional. A fitted point process model (object of class \code{"ppm"}) to be re-fitted to the data using \code{\link{update.ppm}}, if \code{object} is a point pattern. Overrides the arguments \code{trend,interaction,rbord,ppmcorrection}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } } \details{ This function computes the pseudoscore test statistic which can be used as a diagnostic for goodness-of-fit of a fitted point process model. Consider a point process model fitted to \eqn{x}, with conditional intensity \eqn{\lambda(u,x)}{lambda(u,x)} at location \eqn{u}. For the purpose of testing goodness-of-fit, we regard the fitted model as the null hypothesis. The alternative hypothesis is a family of hybrid models obtained by combining the fitted model with the Geyer saturation process (see \code{\link{Geyer}}) with saturation parameter 1. The family of alternatives includes models that are more regular than the fitted model, and others that are more clustered than the fitted model. For any point pattern \eqn{x}, and any \eqn{r > 0}, let \eqn{S(x,r)} be the number of points in \eqn{x} whose nearest neighbour (the nearest other point in \eqn{x}) is closer than \eqn{r} units. Then the pseudoscore for the null model is \deqn{ V(r) = \sum_i \Delta S(x_i, x, r ) - \int_W \Delta S(u,x,r) \lambda(u,x) {\rm d} u }{ V(r) = sum( Delta S(x[i], x, r)) - integral( Delta S(u,x, r) lambda(u,x) du) } where the \eqn{\Delta}{Delta} operator is \deqn{ \Delta S(u,x,r) = S(x\cup\{u\}, r) - S(x\setminus u, r) }{ Delta S(u,x, r) = S(x union u, r) - S(x setminus u, r) } the difference between the values of \eqn{S} for the point pattern with and without the point \eqn{u}. According to the Georgii-Nguyen-Zessin formula, \eqn{V(r)} should have mean zero if the model is correct (ignoring the fact that the parameters of the model have been estimated). Hence \eqn{V(r)} can be used as a diagnostic for goodness-of-fit. The diagnostic \eqn{V(r)} is also called the \bold{pseudoresidual} of \eqn{S}. On the right hand side of the equation for \eqn{V(r)} given above, the sum over points of \eqn{x} is called the \bold{pseudosum} and the integral is called the \bold{pseudocompensator}. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. Columns in this data frame include \code{dat} for the pseudosum, \code{com} for the compensator and \code{res} for the pseudoresidual. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Alternative functions: \code{\link{psstA}}, \code{\link{psst}}, \code{\link{Kres}}, \code{\link{Gres}}. } \examples{ X <- rStrauss(200,0.1,0.05) plot(psstG(X)) plot(psstG(X, interaction=Strauss(0.05))) } \keyword{spatial} \keyword{models} spatstat/man/intensity.ppx.Rd0000644000176200001440000000175013333543263016023 0ustar liggesusers\name{intensity.ppx} \alias{intensity.ppx} \title{Intensity of a Multidimensional Space-Time Point Pattern} \description{ Calculates the intensity of points in a multi-dimensional point pattern of class \code{"ppx"} or \code{"pp3"}. } \usage{ \method{intensity}{ppx}(X, \dots) } \arguments{ \item{X}{Point pattern of class \code{"ppx"} or \code{"pp3"}.} \item{\dots}{Ignored.} } \value{ A single number or a numeric vector. } \details{ This is a method for the generic function \code{\link{intensity}}. It computes the empirical intensity of a multi-dimensional point pattern (object of class \code{"ppx"} including \code{"pp3"}), i.e. the average density of points per unit volume. If the point pattern is multitype, the intensities of the different types are computed separately. } \author{ \adrian \rolf and \ege } \examples{ X <- osteo$pts[[1]] intensity(X) marks(X) <- factor(sample(letters[1:3], npoints(X), replace=TRUE)) intensity(X) } spatstat/man/dppBessel.Rd0000644000176200001440000000174713333543263015116 0ustar liggesusers\name{dppBessel} \alias{dppBessel} \title{Bessel Type Determinantal Point Process Model} \description{ Function generating an instance of the Bessel-type determinantal point process model. } \usage{dppBessel(\dots)} \arguments{ \item{\dots}{arguments of the form \code{tag=value} specifying the model parameters. See Details. } } \details{ The possible parameters are: \itemize{ \item the intensity \code{lambda} as a positive numeric \item the scale parameter \code{alpha} as a positive numeric \item the shape parameter \code{sigma} as a non-negative numeric \item the dimension \code{d} as a positive integer } } \value{An object of class \code{"detpointprocfamily"}.} \author{ Frederic Lavancier and Christophe Biscio. Modified by \ege , \adrian and \rolf } \examples{ m <- dppBessel(lambda=100, alpha=.05, sigma=0, d=2) } \seealso{ \code{\link{dppCauchy}}, \code{\link{dppGauss}}, \code{\link{dppMatern}}, \code{\link{dppPowerExp}} } spatstat/man/cdf.test.mppm.Rd0000644000176200001440000002014113571674202015646 0ustar liggesusers\name{cdf.test.mppm} \alias{cdf.test.mppm} \title{Spatial Distribution Test for Multiple Point Process Model} \description{ Performs a spatial distribution test of a point process model fitted to multiple spatial point patterns. The test compares the observed and predicted distributions of the values of a spatial covariate, using either the Kolmogorov-Smirnov, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises or Anderson-Darling test of goodness-of-fit. } \usage{ \method{cdf.test}{mppm}(model, covariate, test=c("ks", "cvm", "ad"), ..., nsim=19, verbose=TRUE, interpolate=FALSE, fast=TRUE, jitter=TRUE) } \arguments{ \item{model}{ An object of class \code{"mppm"} representing a point process model fitted to multiple spatial point patterns. } \item{covariate}{ The spatial covariate on which the test will be based. A function, a pixel image, a list of functions, a list of pixel images, a hyperframe, a character string containing the name of one of the covariates in \code{model}, or one of the strings \code{"x"} or \code{"y"}. } \item{test}{ Character string identifying the test to be performed: \code{"ks"} for Kolmogorov-Smirnov test, \code{"cvm"} for \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or \code{"ad"} for Anderson-Darling test. } \item{\dots}{ Arguments passed to \code{\link{cdf.test}} to control the test. } \item{nsim}{ Number of simulated realisations which should be generated, if a Monte Carlo test is required. } \item{verbose}{Logical flag indicating whether to print progress reports. } \item{interpolate}{ Logical flag indicating whether to interpolate between pixel values when code{covariate} is a pixel image. See \emph{Details}. } \item{fast}{ Logical flag. If \code{TRUE}, values of the covariate are only sampled at the original quadrature points used to fit the model. If \code{FALSE}, values of the covariate are sampled at all pixels, which can be slower by three orders of magnitude. } \item{jitter}{ Logical flag. If \code{TRUE}, observed values of the covariate are perturbed by adding small random values, to avoid tied observations. } } \details{ This function is a method for the generic function \code{\link{cdf.test}} for the class \code{mppm}. This function performs a goodness-of-fit test of a point process model that has been fitted to multiple point patterns. The observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same values under the model, are compared using the Kolmogorov-Smirnov, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises or Anderson-Darling test of goodness-of-fit. These are exact tests if the model is Poisson; otherwise, for a Gibbs model, a Monte Carlo p-value is computed by generating simulated realisations of the model and applying the selected goodness-of-fit test to each simulation. The argument \code{model} should be a fitted point process model fitted to multiple point patterns (object of class \code{"mppm"}). The argument \code{covariate} contains the values of a spatial function. It can be \itemize{ \item a \code{function(x,y)} \item a pixel image (object of class \code{"im"} \item a list of \code{function(x,y)}, one for each point pattern \item a list of pixel images, one for each point pattern \item a hyperframe (see \code{\link{hyperframe}}) of which the first column will be taken as containing the covariate \item a character string giving the name of one of the covariates in \code{model} \item one of the character strings \code{"x"} or \code{"y"}, indicating the spatial coordinates. } If \code{covariate} is an image, it should have numeric values, and its domain should cover the observation window of the \code{model}. If \code{covariate} is a function, it should expect two arguments \code{x} and \code{y} which are vectors of coordinates, and it should return a numeric vector of the same length as \code{x} and \code{y}. First the original data point pattern is extracted from \code{model}. The values of the \code{covariate} at these data points are collected. The predicted distribution of the values of the \code{covariate} under the fitted \code{model} is computed as follows. The values of the \code{covariate} at all locations in the observation window are evaluated, weighted according to the point process intensity of the fitted model, and compiled into a cumulative distribution function \eqn{F} using \code{\link{ewcdf}}. The probability integral transformation is then applied: the values of the \code{covariate} at the original data points are transformed by the predicted cumulative distribution function \eqn{F} into numbers between 0 and 1. If the model is correct, these numbers are i.i.d. uniform random numbers. A goodness-of-fit test of the uniform distribution is applied to these numbers using \code{\link[stats]{ks.test}}, \code{\link[goftest]{cvm.test}} or \code{\link[goftest]{ad.test}}. The argument \code{interpolate} determines how pixel values will be handled when code{covariate} is a pixel image. The value of the covariate at a data point is obtained by looking up the value of the nearest pixel if \code{interpolate=FALSE}, or by linearly interpolating between the values of the four nearest pixels if \code{interpolate=TRUE}. Linear interpolation is slower, but is sometimes necessary to avoid tied values of the covariate arising when the pixel grid is coarse. If \code{model} is a Poisson point process, then the Kolmogorov-Smirnov, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises and Anderson-Darling tests are theoretically exact. This test was apparently first described (in the context of spatial data, and for Kolmogorov-Smirnov) by Berman (1986). See also Baddeley et al (2005). If \code{model} is not a Poisson point process, then the Kolmogorov-Smirnov, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises and Anderson-Darling tests are biased. Instead they are used as the basis of a Monte Carlo test. First \code{nsim} simulated realisations of the model will be generated. Each simulated realisation consists of a list of simulated point patterns, one for each of the original data patterns. This can take a very long time. The model is then re-fitted to each simulation, and the refitted model is subjected to the goodness-of-fit test described above. A Monte Carlo p-value is then computed by comparing the p-value of the original test with the p-values obtained from the simulations. } \value{ An object of class \code{"cdftest"} and \code{"htest"} containing the results of the test. See \code{\link{cdf.test}} for details. } \seealso{ \code{\link{cdf.test}}, \code{\link{quadrat.test}}, \code{\link{mppm}} } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Berman, M. (1986) Testing for spatial association between a point process and another stochastic process. \emph{Applied Statistics} \bold{35}, 54--62. } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \spatstatAuthors. } \examples{ # three i.i.d. realisations of nonuniform Poisson process lambda <- as.im(function(x,y) { 300 * exp(x) }, square(1)) dat <- hyperframe(X=list(rpoispp(lambda), rpoispp(lambda), rpoispp(lambda))) # fit uniform Poisson process fit0 <- mppm(X~1, dat) # fit correct nonuniform Poisson process fit1 <- mppm(X~x, dat) # test wrong model cdf.test(fit0, "x") # test right model cdf.test(fit1, "x") # Gibbs model fitGibbs <- update(fit0, interaction=Strauss(0.07)) ns <- if(interactive()) 19 else 3 cdf.test(fitGibbs, "x", nsim=ns) } \keyword{htest} \keyword{spatial} spatstat/man/shift.im.Rd0000644000176200001440000000446413442350577014722 0ustar liggesusers\name{shift.im} \alias{shift.im} \title{Apply Vector Translation To Pixel Image} \description{ Applies a vector shift to a pixel image } \usage{ \method{shift}{im}(X, vec=c(0,0), \dots, origin=NULL) } \arguments{ \item{X}{Pixel image (object of class \code{"im"}).} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{Ignored} \item{origin}{ Location that will be shifted to the origin. Either a numeric vector of length 2 giving the location, or a point pattern containing only one point, or a list with two entries named \code{x} and \code{y}, or one of the character strings \code{"centroid"}, \code{"midpoint"}, \code{"left"}, \code{"right"}, \code{"top"}, \code{"bottom"}, \code{"topleft"}, \code{"bottomleft"}, \code{"topright"} or \code{"bottomright"} (partially matched). } } \value{ Another pixel image (of class \code{"im"}) representing the result of applying the vector shift. } \details{ The spatial location of each pixel in the image is translated by the vector \code{vec}. This is a method for the generic function \code{\link{shift}}. If \code{origin} is given, the argument \code{vec} will be ignored; instead the shift will be performed so that the specified geometric location is shifted to the coordinate origin \eqn{(0,0)}. The argument \code{origin} should be either a numeric vector of length 2 giving the spatial coordinates of a location, or one of the character strings \code{"centroid"}, \code{"midpoint"}, \code{"left"}, \code{"right"}, \code{"top"}, \code{"bottom"}, \code{"topleft"}, \code{"bottomleft"}, \code{"topright"} or \code{"bottomright"} (partially matched). If \code{origin="centroid"} then the centroid of the window will be shifted to the origin. If \code{origin="midpoint"} then the centre of the bounding rectangle of the window will be shifted to the origin. If \code{origin="bottomleft"} then the bottom left corner of the bounding rectangle of the window will be shifted to the origin, and so on. } \seealso{ \code{\link{shift}} } \examples{ # make up an image X <- setcov(unit.square()) plot(X) Y <- shift(X, c(10,10)) plot(Y) # no discernible difference except coordinates are different shift(X, origin="c") } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/lineartileindex.Rd0000644000176200001440000000421213426740666016354 0ustar liggesusers\name{lineartileindex} \alias{lineartileindex} \title{ Determine Which Tile Contains Each Given Point on a Linear Network } \description{ Given a tessellation on a linear network, and a list of points on the network, determine which tile of the tessellation contains each of the given points. } \usage{ lineartileindex(seg, tp, Z, method = c("encode", "C", "interpreted")) } \arguments{ \item{seg,tp}{ Vectors of local coordinates of the query points. See Details. } \item{Z}{ A tessellation on a linear network (object of class \code{"lintess"}). } \item{method}{ Internal use only. } } \details{ This low-level function is the analogue of \code{\link{tileindex}} for linear networks. For a tessellation \code{Z} on a linear network, and a list of query points on the same network, the function determines which tile of the tessellation contains each query point. Argument \code{Z} should be a tessellation on a linear network (object of class \code{"lintess"}). The vectors \code{seg} and \code{tp} specify the locations of the query points, on the same network, using local coordinates: \code{seg} contains integer values specifying which segment of the network contains each query point; \code{tp} contains numeric values between 0 and 1 specifying the fractional position along that segment. The result is a factor, of the same length as \code{seg} and \code{tp}, indicating which tile contains each point. The levels of the factor are the names of the tiles of \code{Z}. } \value{ A factor, of the same length as \code{seg} and \code{tp}, whose levels are the names of the tiles of \code{Z}. } \author{ \spatstatAuthors } \seealso{ \code{\link{lintess}}. \code{\link{as.linfun.lintess}} to create a function whose value is the tile index. \code{\link{cut.lpp}} for a neater way to classify the points of a point pattern on a linear network according to a tessellation on the network. } \examples{ Z <- lineardirichlet(runiflpp(15, simplenet)) X <- runiflpp(10, simplenet) coX <- coords(X) ii <- lineartileindex(coX$seg, coX$tp, Z) } \keyword{spatial} \keyword{manip} spatstat/man/summary.splitppp.Rd0000644000176200001440000000176513333543264016545 0ustar liggesusers\name{summary.splitppp} \alias{summary.splitppp} \title{Summary of a Split Point Pattern} \description{ Prints a useful summary of a split point pattern. } \usage{ \method{summary}{splitppp}(object, \dots) } \arguments{ \item{object}{ Split point pattern (object of class \code{"splitppp"}, effectively a list of point patterns, usually created by \code{\link{split.ppp}}). } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{summary}}. An object of the class \code{"splitppp"} is effectively a list of point patterns (objects of class \code{"ppp"}) representing different sub-patterns of an original point pattern. This function extracts a useful summary of each of the sub-patterns. } \seealso{ \code{\link{summary}}, \code{\link{split}}, \code{\link{split.ppp}} } \examples{ data(amacrine) # multitype point pattern summary(split(amacrine)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/as.solist.Rd0000644000176200001440000000176013333543262015106 0ustar liggesusers\name{as.solist} \alias{as.solist} \title{ Convert List of Two-Dimensional Spatial Objects } \description{ Given a list of two-dimensional spatial objects, convert it to the class \code{"solist"}. } \usage{ as.solist(x, \dots) } \arguments{ \item{x}{ A list of objects, each representing a two-dimensional spatial dataset. } \item{\dots}{ Additional arguments passed to \code{\link{solist}}. } } \details{ This command makes the list \code{x} into an object of class \code{"solist"} (spatial object list). See \code{\link{solist}} for details. The entries in the list \code{x} should be two-dimensional spatial datasets (not necessarily of the same class). } \value{ A list, usually of class \code{"solist"}. } \seealso{ \code{\link{solist}}, \code{\link{as.anylist}}, \code{\link{solapply}}. } \examples{ x <- list(cells, density(cells)) y <- as.solist(x) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{list} \keyword{manip} spatstat/man/quad.object.Rd0000644000176200001440000000567713333543264015403 0ustar liggesusers\name{quad.object} \alias{quad.object} %DoNotExport \title{Class of Quadrature Schemes} \description{ A class \code{"quad"} to represent a quadrature scheme. } \details{ A (finite) quadrature scheme is a list of quadrature points \eqn{u_j}{u[j]} and associated weights \eqn{w_j}{w[j]} which is used to approximate an integral by a finite sum: \deqn{ \int f(x) dx \approx \sum_j f(u_j) w_j }{ integral(f(x) dx) ~= sum( f(u[j]) w[j] ) } Given a point pattern dataset, a \emph{Berman-Turner} quadrature scheme is one which includes all these data points, as well as a nonzero number of other (``dummy'') points. These quadrature schemes are used to approximate the pseudolikelihood of a point process, in the method of Baddeley and Turner (2000) (see Berman and Turner (1992)). Accuracy and computation time both increase with the number of points in the quadrature scheme. An object of class \code{"quad"} represents a Berman-Turner quadrature scheme. It can be passed as an argument to the model-fitting function \code{\link{ppm}}, which requires a quadrature scheme. An object of this class contains at least the following elements: \tabular{ll}{ \code{data}: \tab an object of class \code{"ppp"} \cr \tab giving the locations (and marks) of the data points.\cr \code{dummy}: \tab an object of class \code{"ppp"} \cr \tab giving the locations (and marks) of the dummy points.\cr \code{w}: \tab vector of nonnegative weights for the quadrature points\cr } Users are strongly advised not to manipulate these entries directly. The domain of quadrature is specified by \code{Window(dummy)} while the observation window (if this needs to be specified separately) is taken to be \code{Window(data)}. The weights vector \code{w} may also have an attribute \code{attr(w, "zeroes")} equivalent to the logical vector \code{(w == 0)}. If this is absent then all points are known to have positive weights. To create an object of class \code{"quad"}, users would typically call the high level function \code{\link{quadscheme}}. (They are actually created by the low level function \code{quad}.) Entries are extracted from a \code{"quad"} object by the functions \code{x.quad}, \code{y.quad}, \code{w.quad} and \code{marks.quad}, which extract the \eqn{x} coordinates, \eqn{y} coordinates, weights, and marks, respectively. The function \code{n.quad} returns the total number of quadrature points (dummy plus data). An object of class \code{"quad"} can be converted into an ordinary point pattern by the function \code{\link{union.quad}} which simply takes the union of the data and dummy points. Quadrature schemes can be plotted using \code{\link{plot.quad}} (a method for the generic \code{\link{plot}}). } \seealso{ \code{\link{quadscheme}}, \code{\link{ppm}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat/man/pcfinhom.Rd0000644000176200001440000001621013503620205014756 0ustar liggesusers\name{pcfinhom} \alias{pcfinhom} \title{ Inhomogeneous Pair Correlation Function } \description{ Estimates the inhomogeneous pair correlation function of a point pattern using kernel methods. } \usage{ pcfinhom(X, lambda = NULL, ..., r = NULL, kernel = "epanechnikov", bw = NULL, stoyan = 0.15, correction = c("translate", "Ripley"), divisor = c("r", "d"), renormalise = TRUE, normpower=1, update = TRUE, leaveoneout = TRUE, reciplambda = NULL, sigma = NULL, varcov = NULL, close=NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. Either a single numeric value, or a character string specifying a bandwidth selection rule recognised by \code{\link{density.default}}. If \code{bw} is missing or \code{NULL}, the default value is computed using Stoyan's rule of thumb: see \code{\link{bw.stoyan}}. } \item{\dots}{ Other arguments passed to the kernel density estimation function \code{\link{density.default}}. } \item{stoyan}{ Coefficient for Stoyan's bandwidth selection rule; see \code{\link{bw.stoyan}}. } \item{correction}{ Character string or character vector specifying the choice of edge correction. See \code{\link{Kest}} for explanation and options. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See \code{\link{pcf.ppp}}. } \item{renormalise}{ Logical. Whether to renormalise the estimate. See Details. } \item{normpower}{ Integer (usually either 1 or 2). Normalisation power. See Details. } \item{update}{ Logical. If \code{lambda} is a fitted model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) and \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } \item{reciplambda}{ Alternative to \code{lambda}. Values of the estimated \emph{reciprocal} \eqn{1/\lambda}{1/lambda} of the intensity function. Either a vector giving the reciprocal intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the reciprocal intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the reciprocal intensity value at any location. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{close}{ Advanced use only. Precomputed data. See section on Advanced Use. } } \details{ The inhomogeneous pair correlation function \eqn{g_{\rm inhom}(r)}{ginhom(r)} is a summary of the dependence between points in a spatial point process that does not have a uniform density of points. The best intuitive interpretation is the following: the probability \eqn{p(r)} of finding two points at locations \eqn{x} and \eqn{y} separated by a distance \eqn{r} is equal to \deqn{ p(r) = \lambda(x) lambda(y) g(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda(x) * lambda(y) * g(r) dx dy } where \eqn{\lambda}{lambda} is the intensity function of the point process. For a Poisson point process with intensity function \eqn{\lambda}{lambda}, this probability is \eqn{p(r) = \lambda(x) \lambda(y)}{p(r) = lambda(x) * lambda(y)} so \eqn{g_{\rm inhom}(r) = 1}{ginhom(r) = 1}. The inhomogeneous pair correlation function is related to the inhomogeneous \eqn{K} function through \deqn{ g_{\rm inhom}(r) = \frac{K'_{\rm inhom}(r)}{2\pi r} }{ ginhom(r) = Kinhom'(r)/ ( 2 * pi * r) } where \eqn{K'_{\rm inhom}(r)}{Kinhom'(r)} is the derivative of \eqn{K_{\rm inhom}(r)}{Kinhom(r)}, the inhomogeneous \eqn{K} function. See \code{\link{Kinhom}} for information about \eqn{K_{\rm inhom}(r)}{Kinhom(r)}. The command \code{pcfinhom} estimates the inhomogeneous pair correlation using a modified version of the algorithm in \code{\link{pcf.ppp}}. If \code{renormalise=TRUE} (the default), then the estimates are multiplied by \eqn{c^{\mbox{normpower}}}{c^normpower} where \eqn{ c = \mbox{area}(W)/\sum (1/\lambda(x_i)). }{ c = area(W)/sum[i] (1/lambda(x[i])). } This rescaling reduces the variability and bias of the estimate in small samples and in cases of very strong inhomogeneity. The default value of \code{normpower} is 1 but the most sensible value is 2, which would correspond to rescaling the \code{lambda} values so that \eqn{ \sum (1/\lambda(x_i)) = \mbox{area}(W). }{ sum[i] (1/lambda(x[i])) = area(W). } } \value{ A function value table (object of class \code{"fv"}). Essentially a data frame containing the variables \item{r}{ the vector of values of the argument \eqn{r} at which the inhomogeneous pair correlation function \eqn{g_{\rm inhom}(r)}{ginhom(r)} has been estimated } \item{theo}{vector of values equal to 1, the theoretical value of \eqn{g_{\rm inhom}(r)}{ginhom(r)} for the Poisson process } \item{trans}{vector of values of \eqn{g_{\rm inhom}(r)}{ginhom(r)} estimated by translation correction } \item{iso}{vector of values of \eqn{g_{\rm inhom}(r)}{ginhom(r)} estimated by Ripley isotropic correction } as required. } \section{Advanced Use}{ To perform the same computation using several different bandwidths \code{bw}, it is efficient to use the argument \code{close}. This should be the result of \code{\link{closepairs}(X, rmax)} for a suitably large value of \code{rmax}, namely \code{rmax >= max(r) + 3 * bw}. } \seealso{ \code{\link{pcf}}, \code{\link{pcf.ppp}}, \code{\link{bw.stoyan}}, \code{\link{bw.pcf}}, \code{\link{Kinhom}} } \examples{ data(residualspaper) X <- residualspaper$Fig4b plot(pcfinhom(X, stoyan=0.2, sigma=0.1)) fit <- ppm(X, ~polynom(x,y,2)) plot(pcfinhom(X, lambda=fit, normpower=2)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/subspaceDistance.Rd0000644000176200001440000000263213333543264016450 0ustar liggesusers\name{subspaceDistance} \alias{subspaceDistance} \title{ Distance Between Linear Spaces } \description{ Evaluate the distance between two linear subspaces using the measure proposed by Li, Zha and Chiaromonte (2005). } \usage{ subspaceDistance(B0, B1) } \arguments{ \item{B0}{ Matrix whose columns are a basis for the first subspace. } \item{B1}{ Matrix whose columns are a basis for the second subspace. } } \details{ This algorithm calculates the maximum absolute value of the eigenvalues of \eqn{P1-P0} where \eqn{P0,P1} are the projection matrices onto the subspaces generated by \code{B0,B1}. This measure of distance was proposed by Li, Zha and Chiaromonte (2005). See also Xia (2007). } \value{ A single numeric value. } \references{ Guan, Y. and Wang, H. (2010) Sufficient dimension reduction for spatial point processes directed by Gaussian random fields. \emph{Journal of the Royal Statistical Society, Series B}, \bold{72}, 367--387. Li, B., Zha, H. and Chiaromonte, F. (2005) Contour regression: a general approach to dimension reduction. \emph{Annals of Statistics} \bold{33}, 1580--1616. Xia, Y. (2007) A constructive approach to the estimation of dimension reduction directions. \emph{Annals of Statistics} \bold{35}, 2654--2690. } \author{ Matlab original by Yongtao Guan, translated to \R by Suman Rakshit. } \keyword{multivariate} \keyword{algebra} spatstat/man/Kmeasure.Rd0000644000176200001440000001570113333543262014743 0ustar liggesusers\name{Kmeasure} \alias{Kmeasure} \title{Reduced Second Moment Measure} \description{ Estimates the reduced second moment measure \eqn{\kappa}{Kappa} from a point pattern in a window of arbitrary shape. } \usage{ Kmeasure(X, sigma, edge=TRUE, \dots, varcov=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{\kappa}{Kappa} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{sigma}{ Standard deviation \eqn{\sigma}{sigma} of the Gaussian smoothing kernel. Incompatible with \code{varcov}. } \item{edge}{ Logical value indicating whether an edge correction should be applied. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} controlling the pixel resolution. } \item{varcov}{ Variance-covariance matrix of the Gaussian smoothing kernel. Incompatible with \code{sigma}. } } \value{ A real-valued pixel image (an object of class \code{"im"}, see \code{\link{im.object}}) whose pixel values are estimates of the density of the reduced second moment measure at each location. } \details{ Given a point pattern dataset, this command computes an estimate of the reduced second moment measure \eqn{\kappa}{Kappa} of the point process. The result is a pixel image whose pixel values are estimates of the density of the reduced second moment measure. The reduced second moment measure \eqn{\kappa}{Kappa} can be regarded as a generalisation of the more familiar \eqn{K}-function. An estimate of \eqn{\kappa}{Kappa} derived from a spatial point pattern dataset can be useful in exploratory data analysis. Its advantage over the \eqn{K}-function is that it is also sensitive to anisotropy and directional effects. In a nutshell, the command \code{Kmeasure} computes a smoothed version of the \emph{Fry plot}. As explained under \code{\link{fryplot}}, the Fry plot is a scatterplot of the vectors joining all pairs of points in the pattern. The reduced second moment measure is (essentially) defined as the average of the Fry plot over different realisations of the point process. The command \code{Kmeasure} effectively smooths the Fry plot of a dataset to obtain an estimate of the reduced second moment measure. In formal terms, the reduced second moment measure \eqn{\kappa}{Kappa} of a stationary point process \eqn{X} is a measure defined on the two-dimensional plane such that, for a `typical' point \eqn{x} of the process, the expected number of other points \eqn{y} of the process such that the vector \eqn{y - x} lies in a region \eqn{A}, equals \eqn{\lambda \kappa(A)}{lambda * Kappa(A)}. Here \eqn{\lambda}{lambda} is the intensity of the process, i.e. the expected number of points of \eqn{X} per unit area. The \eqn{K}-function is a special case. The function value \eqn{K(t)} is the value of the reduced second moment measure for the disc of radius \eqn{t} centred at the origin; that is, \eqn{K(t) = \kappa(b(0,t))}{K(t) = Kappa(b(0,t))}. The command \code{Kmeasure} computes an estimate of \eqn{\kappa}{Kappa} from a point pattern dataset \code{X}, which is assumed to be a realisation of a stationary point process, observed inside a known, bounded window. Marks are ignored. The algorithm approximates the point pattern and its window by binary pixel images, introduces a Gaussian smoothing kernel and uses the Fast Fourier Transform \code{\link{fft}} to form a density estimate of \eqn{\kappa}{Kappa}. The calculation corresponds to the edge correction known as the ``translation correction''. The Gaussian smoothing kernel may be specified by either of the arguments \code{sigma} or \code{varcov}. If \code{sigma} is a single number, this specifies an isotropic Gaussian kernel with standard deviation \code{sigma} on each coordinate axis. If \code{sigma} is a vector of two numbers, this specifies a Gaussian kernel with standard deviation \code{sigma[1]} on the \eqn{x} axis, standard deviation \code{sigma[2]} on the \eqn{y} axis, and zero correlation between the \eqn{x} and \eqn{y} axes. If \code{varcov} is given, this specifies the variance-covariance matrix of the Gaussian kernel. There do not seem to be any well-established rules for selecting the smoothing kernel in this context. The density estimate of \eqn{\kappa}{Kappa} is returned in the form of a real-valued pixel image. Pixel values are estimates of the normalised second moment density at the centre of the pixel. (The uniform Poisson process would have values identically equal to \eqn{1}.) The image \code{x} and \code{y} coordinates are on the same scale as vector displacements in the original point pattern window. The point \code{x=0, y=0} corresponds to the `typical point'. A peak in the image near \code{(0,0)} suggests clustering; a dip in the image near \code{(0,0)} suggests inhibition; peaks or dips at other positions suggest possible periodicity. If desired, the value of \eqn{\kappa(A)}{Kappa(A)} for a region \eqn{A} can be estimated by computing the integral of the pixel image over the domain \eqn{A}, i.e.\ summing the pixel values and multiplying by pixel area, using \code{\link{integral.im}}. One possible application is to compute anisotropic counterparts of the \eqn{K}-function (in which the disc of radius \eqn{t} is replaced by another shape). See Examples. } \section{Warning}{ Some writers use the term \emph{reduced second moment measure} when they mean the \eqn{K}-function. This has caused confusion. As originally defined, the reduced second moment measure is a measure, obtained by modifying the second moment measure, while the \eqn{K}-function is a function obtained by evaluating this measure for discs of increasing radius. In \pkg{spatstat}, the \eqn{K}-function is computed by \code{\link{Kest}} and the reduced second moment measure is computed by \code{Kmeasure}. } \references{ Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{fryplot}}, \code{\link{spatstat.options}}, \code{\link{integral.im}}, \code{\link{im.object}} } \examples{ plot(Kmeasure(cells, 0.05)) # shows pronounced dip around origin consistent with strong inhibition plot(Kmeasure(redwood, 0.03), col=grey(seq(1,0,length=32))) # shows peaks at several places, reflecting clustering and ?periodicity M <- Kmeasure(cells, 0.05) # evaluate measure on a sector W <- Window(M) ang <- as.im(atan2, W) rad <- as.im(function(x,y){sqrt(x^2+y^2)}, W) sector <- solutionset(ang > 0 & ang < 1 & rad < 0.6) integral.im(M[sector, drop=FALSE]) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/HierHard.Rd0000644000176200001440000001054713333543262014660 0ustar liggesusers\name{HierHard} \alias{HierHard} \title{The Hierarchical Hard Core Point Process Model} \description{ Creates an instance of the hierarchical hard core point process model which can then be fitted to point pattern data. } \usage{ HierHard(hradii=NULL, types=NULL, archy=NULL) } \arguments{ \item{hradii}{Optional matrix of hard core distances} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} \item{archy}{Optional: the hierarchical order. See Details.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the hierarchical hard core process with hard core distances \eqn{hradii[i,j]}. } \details{ This is a hierarchical point process model for a multitype point pattern (\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander} and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, 1999; Grabarnik and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, 2009). It is appropriate for analysing multitype point pattern data in which the types are ordered so that the points of type \eqn{j} depend on the points of type \eqn{1,2,\ldots,j-1}{1,2,...,j-1}. The hierarchical version of the (stationary) hard core process with \eqn{m} types, with hard core distances \eqn{h_{ij}}{h[i,j]} and parameters \eqn{\beta_j}{beta[j]}, is a point process in which each point of type \eqn{j} contributes a factor \eqn{\beta_j}{beta[j]} to the probability density of the point pattern. If any pair of points of types \eqn{i} and \eqn{j} lies closer than \eqn{h_{ij}}{h[i,j]} units apart, the configuration of points is impossible (probability density zero). The nonstationary hierarchical hard core process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location and type, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the hierarchical hard core process pairwise interaction is yielded by the function \code{HierHard()}. See the examples below. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the HierHard interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{radii}. The argument \code{archy} can be used to specify a hierarchical ordering of the types. It can be either a vector of integers or a character vector matching the possible types. The default is the sequence \eqn{1,2, \ldots, m}{1,2, ..., m} meaning that type \eqn{j} depends on types \eqn{1,2, \ldots, j-1}{1,2, ..., j-1}. The matrix \code{iradii} must be square, with entries which are either positive numbers, or zero or \code{NA}. A value of zero or \code{NA} indicates that no hard core interaction term should be included for this combination of types. Note that only the hard core distances are specified in \code{HierHard}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} are estimated by \code{\link{ppm}()}, not fixed in \code{HierHard()}. } \seealso{ \code{\link{MultiHard}} for the corresponding symmetrical interaction. \code{\link{HierStrauss}}, \code{\link{HierStraussHard}}. } \examples{ h <- matrix(c(4, NA, 10, 15), 2, 2) HierHard(h) # prints a sensible description of itself ppm(ants ~1, HierHard(h)) # fit the stationary hierarchical hard core process to ants data } \author{\adrian , \rolf and \ege. } \references{ Grabarnik, P. and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, A. (2009) Modelling the spatial structure of forest stands by multivariate point processes with hierarchical interactions. \emph{Ecological Modelling} \bold{220}, 1232--1240. \ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}, H. and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, A. (1999) Multitype spatial point patterns with hierarchical interactions. \emph{Biometrics} \bold{55}, 1051--1058. } \keyword{spatial} \keyword{models} spatstat/man/rpoislpp.Rd0000644000176200001440000000345513333543264015044 0ustar liggesusers\name{rpoislpp} \alias{rpoislpp} \title{ Poisson Point Process on a Linear Network } \description{ Generates a realisation of the Poisson point process with specified intensity on the given linear network. } \usage{ rpoislpp(lambda, L, \dots, nsim=1, drop=TRUE) } \arguments{ \item{lambda}{ Intensity of the Poisson process. A single number, a \code{function(x,y)}, a pixel image (object of class \code{"im"}), or a vector of numbers, a list of functions, or a list of images. } \item{L}{ A linear network (object of class \code{"linnet"}, see \code{\link{linnet}}). Can be omitted in some cases: see Details. } \item{\dots}{ Arguments passed to \code{\link{rpoisppOnLines}}. } \item{nsim}{Number of simulated realisations to generate.} \item{drop}{ Logical value indicating what to do when \code{nsim=1}. If \code{drop=TRUE} (the default), the result is a point pattern. If \code{drop=FALSE}, the result is a list with one entry which is a point pattern. } } \details{ This function uses \code{\link{rpoisppOnLines}} to generate the random points. Argument \code{L} can be omitted, and defaults to \code{as.linnet(lambda)}, when \code{lambda} is a function on a linear network (class \code{"linfun"}) or a pixel image on a linear network (\code{"linim"}). } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern on the linear network, i.e.\ an object of class \code{"lpp"}. Otherwise, a list of such point patterns. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ \code{\link{runiflpp}}, \code{\link{rlpp}}, \code{\link{lpp}}, \code{\link{linnet}} } \examples{ X <- rpoislpp(5, simplenet) plot(X) # multitype X <- rpoislpp(c(a=5, b=5), simplenet) } \keyword{spatial} \keyword{datagen} spatstat/man/quantile.im.Rd0000644000176200001440000000203313333543264015411 0ustar liggesusers\name{quantile.im} \alias{quantile.im} \title{Sample Quantiles of Pixel Image} \description{ Compute the sample quantiles of the pixel values of a given pixel image. } \usage{ \method{quantile}{im}(x, \dots) } \arguments{ \item{x}{ A pixel image. An object of class \code{"im"}. } \item{\dots}{ Optional arguments passed to \code{\link{quantile.default}}. They determine the probabilities for which quantiles should be computed. See \code{\link{quantile.default}}. } } \value{ A vector of quantiles. } \details{ This simple function applies the generic \code{\link{quantile}} operation to the pixel values of the image \code{x}. This function is a convenient way to inspect an image and to obtain summary statistics. See the examples. } \seealso{ \code{\link{quantile}}, \code{\link{cut.im}}, \code{\link{im.object}} } \examples{ # artificial image data Z <- setcov(square(1)) # find the quartiles quantile(Z) # find the deciles quantile(Z, probs=(0:10)/10) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{univar} spatstat/man/diagnose.ppm.Rd0000644000176200001440000004137713333543263015564 0ustar liggesusers\name{diagnose.ppm} \alias{diagnose.ppm} \alias{plot.diagppm} \title{ Diagnostic Plots for Fitted Point Process Model } \description{ Given a point process model fitted to a point pattern, produce diagnostic plots based on residuals. } \usage{ diagnose.ppm(object, \dots, type="raw", which="all", sigma=NULL, rbord=reach(object), cumulative=TRUE, plot.it=TRUE, rv = NULL, compute.sd=is.poisson(object), compute.cts=TRUE, envelope=FALSE, nsim=39, nrank=1, typename, check=TRUE, repair=TRUE, oldstyle=FALSE, splineargs=list(spar=0.5)) \method{plot}{diagppm}(x, \dots, which, plot.neg=c("image", "discrete", "contour", "imagecontour"), plot.smooth=c("imagecontour", "image", "contour", "persp"), plot.sd, spacing=0.1, outer=3, srange=NULL, monochrome=FALSE, main=NULL) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) for which diagnostics should be produced. This object is usually obtained from \code{\link{ppm}}. } \item{type}{ String indicating the type of residuals or weights to be used. Current options are \code{"eem"} for the Stoyan-Grabarnik exponential energy weights, \code{"raw"} for the raw residuals, \code{"inverse"} for the inverse-lambda residuals, and \code{"pearson"} for the Pearson residuals. A partial match is adequate. } \item{which}{ Character string or vector indicating the choice(s) of plots to be generated. Options are \code{"all"}, \code{"marks"}, \code{"smooth"}, \code{"x"}, \code{"y"} and \code{"sum"}. Multiple choices may be given but must be matched exactly. See Details. } \item{sigma}{ Bandwidth for kernel smoother in \code{"smooth"} option. } \item{rbord}{ Width of border to avoid edge effects. The diagnostic calculations will be confined to those points of the data pattern which are at least \code{rbord} units away from the edge of the window. (An infinite value of \code{rbord} will be ignored.) } \item{cumulative}{ Logical flag indicating whether the lurking variable plots for the \eqn{x} and \eqn{y} coordinates will be the plots of cumulative sums of marks (\code{cumulative=TRUE}) or the plots of marginal integrals of the smoothed residual field (\code{cumulative=FALSE}). } \item{plot.it}{ Logical value indicating whether plots should be shown. If \code{plot.it=FALSE}, the computed diagnostic quantities are returned without plotting them. } \item{plot.neg}{ String indicating how the density part of the residual measure should be plotted. } \item{plot.smooth}{ String indicating how the smoothed residual field should be plotted. } \item{compute.sd,plot.sd}{ Logical values indicating whether error bounds should be computed and added to the \code{"x"} and \code{"y"} plots. The default is \code{TRUE} for Poisson models and \code{FALSE} for non-Poisson models. See Details. } \item{envelope,nsim,nrank}{ Arguments passed to \code{\link{lurking}} in order to plot simulation envelopes for the lurking variable plots. } \item{rv}{ Usually absent. Advanced use only. If this argument is present, the values of the residuals will not be calculated from the fitted model \code{object} but will instead be taken directly from \code{rv}. } \item{spacing}{ The spacing between plot panels (when a four-panel plot is generated) expressed as a fraction of the width of the window of the point pattern. } \item{outer}{ The distance from the outermost line of text to the nearest plot panel, expressed as a multiple of the spacing between plot panels. } \item{srange}{ Vector of length 2 that will be taken as giving the range of values of the smoothed residual field, when generating an image plot of this field. This is useful if you want to generate diagnostic plots for two different fitted models using the same colour map. } \item{monochrome}{ Flag indicating whether images should be displayed in greyscale (suitable for publication) or in colour (suitable for the screen). The default is to display in colour. } \item{check}{ Logical value indicating whether to check the internal format of \code{object}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{repair}{ Logical value indicating whether to repair the internal format of \code{object}, if it is found to be damaged. } \item{oldstyle}{ Logical flag indicating whether error bounds should be plotted using the approximation given in the original paper (\code{oldstyle=TRUE}), or using the correct asymptotic formula (\code{oldstyle=FALSE}). } \item{splineargs}{ Argument passed to \code{\link{lurking}} to control the smoothing in the lurking variable plot. } \item{x}{The value returned from a previous call to \code{diagnose.ppm}. An object of class \code{"diagppm"}. } \item{typename}{String to be used as the name of the residuals.} \item{main}{Main title for the plot.} \item{\dots}{ Extra arguments, controlling either the resolution of the smoothed image (passed from \code{diagnose.ppm} to \code{\link{density.ppp}}) or the appearance of the plots (passed from \code{diagnose.ppm} to \code{plot.diagppm} and from \code{plot.diagppm} to \code{\link{plot.default}}). } \item{compute.cts}{Advanced use only.} } \value{ An object of class \code{"diagppm"} which contains the coordinates needed to reproduce the selected plots. This object can be plotted using \code{plot.diagppm} and printed using \code{print.diagppm}. } \details{ The function \code{diagnose.ppm} generates several diagnostic plots for a fitted point process model. The plots display the residuals from the fitted model (Baddeley et al, 2005) or alternatively the `exponential energy marks' (Stoyan and Grabarnik, 1991). These plots can be used to assess goodness-of-fit, to identify outliers in the data, and to reveal departures from the fitted model. See also the companion function \code{\link{qqplot.ppm}}. The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}) typically produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). The argument \code{type} selects the type of residual or weight that will be computed. Current options are: \describe{ \item{\code{"eem"}:}{ exponential energy marks (Stoyan and Grabarnik, 1991) computed by \code{\link{eem}}. These are positive weights attached to the data points (i.e. the points of the point pattern dataset to which the model was fitted). If the fitted model is correct, then the sum of these weights for all data points in a spatial region \eqn{B} has expected value equal to the area of \eqn{B}. See \code{\link{eem}} for further explanation. } \item{\code{"raw"}, \code{"inverse"} or \code{"pearson"}:}{ point process residuals (Baddeley et al, 2005) computed by the function \code{\link{residuals.ppm}}. These are residuals attached both to the data points and to some other points in the window of observation (namely, to the dummy points of the quadrature scheme used to fit the model). If the fitted model is correct, then the sum of the residuals in a spatial region \eqn{B} has mean zero. The options are \itemize{ \item \code{"raw"}: the raw residuals; \item \code{"inverse"}: the `inverse-lambda' residuals, a counterpart of the exponential energy weights; \item \code{"pearson"}: the Pearson residuals. } See \code{\link{residuals.ppm}} for further explanation. } } The argument \code{which} selects the type of plot that is produced. Options are: \describe{ \item{\code{"marks"}:}{ plot the residual measure. For the exponential energy weights (\code{type="eem"}) this displays circles centred at the points of the data pattern, with radii proportional to the exponential energy weights. For the residuals (\code{type="raw"}, \code{type="inverse"} or \code{type="pearson"}) this again displays circles centred at the points of the data pattern with radii proportional to the (positive) residuals, while the plotting of the negative residuals depends on the argument \code{plot.neg}. If \code{plot.neg="image"} then the negative part of the residual measure, which is a density, is plotted as a colour image. If \code{plot.neg="discrete"} then the discretised negative residuals (obtained by approximately integrating the negative density using the quadrature scheme of the fitted model) are plotted as squares centred at the dummy points with side lengths proportional to the (negative) residuals. [To control the size of the circles and squares, use the argument \code{maxsize}.] } \item{\code{"smooth"}:}{ plot a kernel-smoothed version of the residual measure. Each data or dummy point is taken to have a `mass' equal to its residual or exponential energy weight. (Note that residuals can be negative). This point mass is then replaced by a bivariate isotropic Gaussian density with standard deviation \code{sigma}. The value of the smoothed residual field at any point in the window is the sum of these weighted densities. If the fitted model is correct, this smoothed field should be flat, and its height should be close to 0 (for the residuals) or 1 (for the exponential energy weights). The field is plotted either as an image, contour plot or perspective view of a surface, according to the argument \code{plot.smooth}. The range of values of the smoothed field is printed if the option \code{which="sum"} is also selected. } \item{\code{"x"}:}{ produce a `lurking variable' plot for the \eqn{x} coordinate. This is a plot of \eqn{h(x)} against \eqn{x} (solid lines) and of \eqn{E(h(x))} against \eqn{x} (dashed lines), where \eqn{h(x)} is defined below, and \eqn{E(h(x))} denotes the expectation of \eqn{h(x)} assuming the fitted model is true. \itemize{ \item if \code{cumulative=TRUE} then \eqn{h(x)} is the cumulative sum of the weights or residuals for all points which have \eqn{X} coordinate less than or equal to \eqn{x}. For the residuals \eqn{E(h(x)) = 0}, and for the exponential energy weights \eqn{E(h(x)) = } area of the subset of the window to the left of the line \eqn{X=x}. \item if \code{cumulative=FALSE} then \eqn{h(x)} is the marginal integral of the smoothed residual field (see the case \code{which="smooth"} described above) on the \eqn{x} axis. This is approximately the derivative of the plot for \code{cumulative=TRUE}. The value of \eqn{h(x)} is computed by summing the values of the smoothed residual field over all pixels with the given \eqn{x} coordinate. For the residuals \eqn{E(h(x)) = 0}, and for the exponential energy weights \eqn{E(h(x)) = } length of the intersection between the observation window and the line \eqn{X=x}. } If \code{plot.sd = TRUE}, then superimposed on the lurking variable plot are the pointwise two-standard-deviation error limits for \eqn{h(x)} calculated for the inhomogeneous Poisson process. The default is \code{plot.sd = TRUE} for Poisson models and \code{plot.sd = FALSE} for non-Poisson models. } \item{\code{"y"}:}{ produce a similar lurking variable plot for the \eqn{y} coordinate. } \item{\code{"sum"}:}{ print the sum of the weights or residuals for all points in the window (clipped by a margin \code{rbord} if required) and the area of the same window. If the fitted model is correct the sum of the exponential energy weights should equal the area of the window, while the sum of the residuals should equal zero. Also print the range of values of the smoothed field displayed in the \code{"smooth"} case. } \item{\code{"all"}:}{ All four of the diagnostic plots listed above are plotted together in a two-by-two display. Top left panel is \code{"marks"} plot. Bottom right panel is \code{"smooth"} plot. Bottom left panel is \code{"x"} plot. Top right panel is \code{"y"} plot, rotated 90 degrees. } } The argument \code{rbord} ensures there are no edge effects in the computation of the residuals. The diagnostic calculations will be confined to those points of the data pattern which are at least \code{rbord} units away from the edge of the window. The value of \code{rbord} should be greater than or equal to the range of interaction permitted in the model. By default, the two-standard-deviation limits are calculated from the exact formula for the asymptotic variance of the residuals under the asymptotic normal approximation, equation (37) of Baddeley et al (2006). However, for compatibility with the original paper of Baddeley et al (2005), if \code{oldstyle=TRUE}, the two-standard-deviation limits are calculated using the innovation variance, an over-estimate of the true variance of the residuals. (However, see the section about Replicated Data). The argument \code{rv} would normally be used only by experts. It enables the user to substitute arbitrary values for the residuals or marks, overriding the usual calculations. If \code{rv} is present, then instead of calculating the residuals from the fitted model, the algorithm takes the residuals from the object \code{rv}, and plots them in the manner appropriate to the type of residual or mark selected by \code{type}. If \code{type ="eem"} then \code{rv} should be similar to the return value of \code{\link{eem}}, namely, a numeric vector of length equal to the number of points in the original data point pattern. Otherwise, \code{rv} should be similar to the return value of \code{\link{residuals.ppm}}, that is, it should be an object of class \code{"msr"} (see \code{\link{msr}}) representing a signed measure. The return value of \code{diagnose.ppm} is an object of class \code{"diagppm"}. The \code{plot} method for this class is documented here. There is also a \code{print} method. See the Examples. In \code{plot.diagppm}, if a four-panel diagnostic plot is produced (the default), then the extra arguments \code{xlab}, \code{ylab}, \code{rlab} determine the text labels for the \eqn{x} and \eqn{y} coordinates and the residuals, respectively. The undocumented arguments \code{col.neg} and \code{col.smooth} control the colour maps used in the top left and bottom right panels respectively. See also the companion functions \code{\link{qqplot.ppm}}, which produces a Q-Q plot of the residuals, and \code{\link{lurking}}, which produces lurking variable plots for any spatial covariate. } \section{Replicated Data}{ Note that if \code{object} is a model that was obtained by first fitting a model to replicated point pattern data using \code{\link{mppm}} and then using \code{\link{subfits}} to extract a model for one of the individual point patterns, then the variance calculations are only implemented for the innovation variance (\code{oldstyle=TRUE}) and this is the default in such cases. } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \Moller, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \seealso{ \code{\link{residuals.ppm}}, \code{\link{eem}}, \code{\link{ppm.object}}, \code{\link{qqplot.ppm}}, \code{\link{lurking}}, \code{\link{ppm}} } \examples{ fit <- ppm(cells ~x, Strauss(r=0.15)) diagnose.ppm(fit) \dontrun{ diagnose.ppm(fit, type="pearson") } diagnose.ppm(fit, which="marks") diagnose.ppm(fit, type="raw", plot.neg="discrete") diagnose.ppm(fit, type="pearson", which="smooth") # save the diagnostics and plot them later u <- diagnose.ppm(fit, rbord=0.15, plot.it=FALSE) \dontrun{ plot(u) plot(u, which="marks") } } \author{ \adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{hplot} spatstat/man/plot.ppm.Rd0000644000176200001440000001532213333543264014741 0ustar liggesusers\name{plot.ppm} \alias{plot.ppm} \title{plot a Fitted Point Process Model} \description{ Given a fitted point process model obtained by \code{\link{ppm}}, create spatial trend and conditional intensity surfaces of the model, in a form suitable for plotting, and (optionally) plot these surfaces. } \usage{ \method{plot}{ppm}(x, ngrid = c(40,40), superimpose = TRUE, trend = TRUE, cif = TRUE, se = TRUE, pause = interactive(), how=c("persp","image", "contour"), plot.it = TRUE, locations = NULL, covariates=NULL, \dots) } \arguments{ \item{x}{ A fitted point process model, typically obtained from the model-fitting algorithm \code{\link{ppm}}. An object of class \code{"ppm"}. } \item{ngrid}{ The dimensions for a grid on which to evaluate, for plotting, the spatial trend and conditional intensity. A vector of 1 or 2 integers. If it is of length 1, \code{ngrid} is replaced by \code{c(ngrid,ngrid)}. } \item{superimpose}{ logical flag; if \code{TRUE} (and if \code{plot=TRUE}) the original data point pattern will be superimposed on the plots. } \item{trend}{ logical flag; if \code{TRUE}, the spatial trend surface will be produced. } \item{cif}{ logical flag; if \code{TRUE}, the conditional intensity surface will be produced. } \item{se}{ logical flag; if \code{TRUE}, the estimated standard error of the spatial trend surface will be produced. } \item{pause}{ logical flag indicating whether to pause with a prompt after each plot. Set \code{pause=FALSE} if plotting to a file. (This flag is ignored if \code{plot=FALSE}). } \item{how}{ character string or character vector indicating the style or styles of plots to be performed. Ignored if \code{plot=FALSE}. } \item{plot.it}{ logical scalar; should a plot be produced immediately? } \item{locations}{ If present, this determines the locations of the pixels at which predictions are computed. It must be a binary pixel image (an object of class \code{"owin"} with type \code{"mask"}). (Incompatible with \code{ngrid}). } \item{covariates}{ Values of external covariates required by the fitted model. Passed to \code{\link{predict.ppm}}. } \item{\dots}{ extra arguments to the plotting functions \code{\link{persp}}, \code{\link{image}} and \code{\link{contour}}. } } \value{ An object of class \code{plotppm}. Such objects may be plotted by \code{\link{plot.plotppm}()}. This is a list with components named \code{trend} and \code{cif}, either of which may be missing. They will be missing if the corresponding component does not make sense for the model, or if the corresponding argument was set equal to \code{FALSE}. Both \code{trend} and \code{cif} are lists of images. If the model is an unmarked point process, then they are lists of length 1, so that \code{trend[[1]]} is an image of the spatial trend and \code{cif[[1]]} is an image of the conditional intensity. If the model is a marked point process, then \code{trend[[i]]} is an image of the spatial trend for the mark \code{m[i]}, and \code{cif[[1]]} is an image of the conditional intensity for the mark \code{m[i]}, where \code{m} is the vector of levels of the marks. } \details{ This is the \code{plot} method for the class \code{"ppm"} (see \code{\link{ppm.object}} for details of this class). It invokes \code{\link{predict.ppm}} to compute the spatial trend and conditional intensity of the fitted point process model. See \code{\link{predict.ppm}} for more explanation about spatial trend and conditional intensity. The default action is to create a rectangular grid of points in (the bounding box of) the observation window of the data point pattern, and evaluate the spatial trend and conditional intensity of the fitted spatial point process model \code{x} at these locations. If the argument \code{locations=} is supplied, then the spatial trend and conditional intensity are calculated at the grid of points specified by this argument. The argument \code{locations}, if present, should be a binary image mask (an object of class \code{"owin"} and type \code{"mask"}). This determines a rectangular grid of locations, or a subset of such a grid, at which predictions will be computed. Binary image masks are conveniently created using \code{\link{as.mask}}. The argument \code{covariates} gives the values of any spatial covariates at the prediction locations. If the trend formula in the fitted model involves spatial covariates (other than the Cartesian coordinates \code{x}, \code{y}) then \code{covariates} is required. The argument \code{covariates} has the same format and interpretation as in \code{\link{predict.ppm}}. It may be either a data frame (the number of whose rows must match the number of pixels in \code{locations} multiplied by the number of possible marks in the point pattern), or a list of images. If argument \code{locations} is not supplied, and \code{covariates} \bold{is} supplied, then it \bold{must} be a list of images. If the fitted model was a marked (multitype) point process, then predictions are made for each possible mark value in turn. If the fitted model had no spatial trend, then the default is to omit calculating this (flat) surface, unless \code{trend=TRUE} is set explicitly. If the fitted model was Poisson, so that there were no spatial interactions, then the conditional intensity and spatial trend are identical, and the default is to omit the conditional intensity, unless \code{cif=TRUE} is set explicitly. If \code{plot.it=TRUE} then \code{\link{plot.plotppm}()} is called upon to plot the class \code{plotppm} object which is produced. (That object is also returned, silently.) Plots are produced successively using \code{\link{persp}}, \code{\link{image}} and \code{\link{contour}} (or only a selection of these three, if \code{how} is given). Extra graphical parameters controlling the display may be passed directly via the arguments \code{...} or indirectly reset using \code{\link{spatstat.options}}. } \seealso{ \code{\link{plot.plotppm}}, \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{predict.ppm}}, \code{\link{print.ppm}}, \code{\link{persp}}, \code{\link{image}}, \code{\link{contour}}, \code{\link{plot}}, \code{\link{spatstat.options}} } \section{Warnings}{ See warnings in \code{\link{predict.ppm}}. } \examples{ m <- ppm(cells ~1, Strauss(0.05)) pm <- plot(m) # The object ``pm'' will be plotted as well as saved # for future plotting. pm } \author{ \spatstatAuthors } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat/man/improve.kppm.Rd0000644000176200001440000001140713333543263015616 0ustar liggesusers\name{improve.kppm} \alias{improve.kppm} \title{Improve Intensity Estimate of Fitted Cluster Point Process Model} \description{ Update the fitted intensity of a fitted cluster point process model. } \usage{ improve.kppm(object, type=c("quasi", "wclik1", "clik1"), rmax = NULL, eps.rmax = 0.01, dimyx = 50, maxIter = 100, tolerance = 1e-06, fast = TRUE, vcov = FALSE, fast.vcov = FALSE, verbose = FALSE, save.internals = FALSE) } \arguments{ \item{object}{ Fitted cluster point process model (object of class \code{"kppm"}). } \item{type}{ A character string indicating the method of estimation. Current options are \code{"clik1"}, \code{"wclik1"} and \code{"quasi"} for, respectively, first order composite (Poisson) likelihood, weighted first order composite likelihood and quasi-likelihood. } \item{rmax}{ Optional. The dependence range. Not usually specified by the user. } \item{eps.rmax}{ Numeric. A small positive number which is used to determine \code{rmax} from the tail behaviour of the pair correlation function. Namely \code{rmax} is the smallest value of \eqn{r} at which \eqn{(g(r)-1)/(g(0)-1)} falls below \code{eps.rmax}. Ignored if \code{rmax} is provided. } \item{dimyx}{ Pixel array dimensions. See Details. } \item{maxIter}{ Integer. Maximum number of iterations of iterative weighted least squares (Fisher scoring). } \item{tolerance}{ Numeric. Tolerance value specifying when to stop iterative weighted least squares (Fisher scoring). } \item{fast}{ Logical value indicating whether tapering should be used to make the computations faster (requires the package \pkg{Matrix}). } \item{vcov}{ Logical value indicating whether to calculate the asymptotic variance covariance/matrix. } \item{fast.vcov}{ Logical value indicating whether tapering should be used for the variance/covariance matrix to make the computations faster (requires the package \pkg{Matrix}). Caution: This is expected to underestimate the true asymptotic variances/covariances. } \item{verbose}{ A logical indicating whether the details of computations should be printed. } \item{save.internals}{ A logical indicating whether internal quantities should be saved in the returned object (mostly for development purposes). } } \value{ A fitted cluster point process model of class \code{"kppm"}. } \details{ This function reestimates the intensity parameters in a fitted \code{"kppm"} object. If \code{type="clik1"} estimates are based on the first order composite (Poisson) likelihood, which ignores dependence between the points. Note that \code{type="clik1"} is mainly included for testing purposes and is not recommended for the typical user; instead the more efficient \code{\link{kppm}} with \code{improve.type="none"} should be used. When \code{type="quasi"} or \code{type="wclik1"} the dependence structure between the points is incorporated in the estimation procedure by using the estimated pair correlation function in the estimating equation. In all cases the estimating equation is based on dividing the observation window into small subregions and count the number of points in each subregion. To do this the observation window is first converted into a digital mask by \code{\link{as.mask}} where the resolution is controlled by the argument \code{dimyx}. The computational time grows with the cube of the number of subregions, so fine grids may take very long to compute (or even run out of memory). } \seealso{ \code{\link{ppm}}, \code{\link{kppm}}, \code{\link{improve.kppm}} } \references{ Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes, \emph{Biometrics}, \bold{63}, 252-258. Guan, Y. and Shen, Y. (2010) A weighted estimating equation approach to inference for inhomogeneous spatial point processes, \emph{Biometrika}, \bold{97}, 867-880. Guan, Y., Jalilian, A. and Waagepetersen, R. (2015) Quasi-likelihood for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 677--697. } \examples{ # fit a Thomas process using minimum contrast estimation method # to model interaction between points of the pattern fit0 <- kppm(bei ~ elev + grad, data = bei.extra) # fit the log-linear intensity model with quasi-likelihood method fit1 <- improve.kppm(fit0, type="quasi") # compare coef(fit0) coef(fit1) } \author{Abdollah Jalilian \email{jalilian@razi.ac.ir} % \url{http://www.razi.ac.ir/ajalilian/} and Rasmus Waagepetersen \email{rw@math.aau.dk} adapted for \pkg{spatstat} by \adrian and \ege } \keyword{spatial} \keyword{fit model} spatstat/man/plot.profilepl.Rd0000644000176200001440000000756313606253523016150 0ustar liggesusers\name{plot.profilepl} \alias{plot.profilepl} \title{ Plot Profile Likelihood } \description{ Plot the profile (pseudo) likelihood against the irregular parameters, for a model that was fitted by maximum profile (pseudo)likelihood. } \usage{ \method{plot}{profilepl}(x, \dots, add = FALSE, main = NULL, tag = TRUE, coeff = NULL, xvariable = NULL, col = 1, lty = 1, lwd = 1, col.opt = "green", lty.opt = 3, lwd.opt = 1) } \arguments{ \item{x}{ A point process model fitted by maximum profile (pseudo)likelihood. Object of class \code{"profilepl"}, obtained from \code{\link{profilepl}}. } \item{\dots}{ Additional plot arguments passed to \code{\link[graphics]{plot.default}} and \code{\link[graphics]{lines}}. } \item{add}{ Logical. If \code{TRUE}, the plot is drawn over the existing plot. } \item{main}{ Optional. Main title for the plot. A character string or character vector. } \item{tag}{ Logical value. If \code{TRUE} (the default), when the plot contains multiple curves corresponding to different values of a parameter, each curve will be labelled with the values of the irregular parameter. } \item{coeff}{ Optional. If this is given, it should be a character string matching the name of one of the fitted model coefficients. This coefficient will then be plotted on the vertical axis. } \item{xvariable}{ Optional. The name of the irregular parameter that should be plotted along the horizontal axis. The default is the first irregular parameter. } \item{col,lty,lwd}{ Graphical parameters (colour, line type, line width) for the curves on the plot. } \item{col.opt, lty.opt, lwd.opt}{ Graphical parameters for indicating the optimal parameter value. } } \details{ This is the \code{\link[graphics]{plot}} method for the class \code{"profilepl"} of fitted point process models obtained by maximising the profile likelihood or profile pseudolikelihood. The default behaviour is to plot the profile likelihood or profile pseudolikelihood on the vertical axis, against the value of the irregular parameter on the horizontal axis. If there are several irregular parameters, then one of them is plotted on the horizontal axis, and the plot consists of many different curves, corresponding to different values of the other parameters. The parameter to be plotted on the horizontal axis is specified by the argument \code{xvariable}; the default is to use the parameter that was listed first in the original call to \code{\link{profilepl}}. If \code{coeff} is given, it should be the name of one of the fitted model coefficients \code{names(coef(as.ppm(x)))}. The fitted value of that coefficient is plotted on the vertical axis. } \value{ Null. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. } \author{ \spatstatAuthors. } \seealso{ \code{\link{profilepl}} } \examples{ rstep <- if(interactive()) 0.005 else 0.02 # one irregular parameter rr <- data.frame(r=seq(0.05,0.15, by=rstep)) ps <- profilepl(rr, Strauss, cells) plot(ps) # profile pseudolikelihood plot(ps, coeff="Interaction") # fitted interaction coefficient log(gamma) # two irregular parameters rs <- expand.grid(r=seq(0.05,0.15, by=rstep),sat=1:3) pg <- profilepl(rs, Geyer, cells) plot(pg) # profile pseudolikelihood against r for each value of 'sat' plot(pg, coeff="Interaction") plot(pg, xvariable="sat", col=ifelse(r < 0.1, "red", "green")) } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat/man/HierStrauss.Rd0000644000176200001440000001071713333543262015445 0ustar liggesusers\name{HierStrauss} \alias{HierStrauss} \title{The Hierarchical Strauss Point Process Model} \description{ Creates an instance of the hierarchical Strauss point process model which can then be fitted to point pattern data. } \usage{ HierStrauss(radii, types=NULL, archy=NULL) } \arguments{ \item{radii}{Matrix of interaction radii} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} \item{archy}{Optional: the hierarchical order. See Details.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the hierarchical Strauss process with interaction radii \eqn{radii[i,j]}. } \details{ This is a hierarchical point process model for a multitype point pattern (\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander} and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, 1999; Grabarnik and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, 2009). It is appropriate for analysing multitype point pattern data in which the types are ordered so that the points of type \eqn{j} depend on the points of type \eqn{1,2,\ldots,j-1}{1,2,...,j-1}. The hierarchical version of the (stationary) Strauss process with \eqn{m} types, with interaction radii \eqn{r_{ij}}{r[i,j]} and parameters \eqn{\beta_j}{beta[j]} and \eqn{\gamma_{ij}}{gamma[i,j]} is a point process in which each point of type \eqn{j} contributes a factor \eqn{\beta_j}{beta[j]} to the probability density of the point pattern, and a pair of points of types \eqn{i} and \eqn{j} closer than \eqn{r_{ij}}{r[i,j]} units apart contributes a factor \eqn{\gamma_{ij}}{gamma[i,j]} to the density \bold{provided} \eqn{i \le j}{i <= j}. The nonstationary hierarchical Strauss process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location and type, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the hierarchical Strauss process pairwise interaction is yielded by the function \code{HierStrauss()}. See the examples below. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the HierStrauss interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{radii}. The argument \code{archy} can be used to specify a hierarchical ordering of the types. It can be either a vector of integers or a character vector matching the possible types. The default is the sequence \eqn{1,2, \ldots, m}{1,2, ..., m} meaning that type \eqn{j} depends on types \eqn{1,2, \ldots, j-1}{1,2, ..., j-1}. The matrix \code{radii} must be symmetric, with entries which are either positive numbers or \code{NA}. A value of \code{NA} indicates that no interaction term should be included for this combination of types. Note that only the interaction radii are specified in \code{HierStrauss}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} and \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by \code{\link{ppm}()}, not fixed in \code{HierStrauss()}. } \seealso{ \code{\link{MultiStrauss}} for the corresponding symmetrical interaction. \code{\link{HierHard}}, \code{\link{HierStraussHard}}. } \examples{ r <- matrix(10 * c(3,4,4,3), nrow=2,ncol=2) HierStrauss(r) # prints a sensible description of itself ppm(ants ~1, HierStrauss(r, , c("Messor", "Cataglyphis"))) # fit the stationary hierarchical Strauss process to ants data } \author{\adrian , \rolf and \ege. } \references{ Grabarnik, P. and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, A. (2009) Modelling the spatial structure of forest stands by multivariate point processes with hierarchical interactions. \emph{Ecological Modelling} \bold{220}, 1232--1240. \ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}, H. and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, A. (1999) Multitype spatial point patterns with hierarchical interactions. \emph{Biometrics} \bold{55}, 1051--1058. } \keyword{spatial} \keyword{models} spatstat/man/macros/0000755000176200001440000000000013115225157014156 5ustar liggesusersspatstat/man/macros/defns.Rd0000644000176200001440000000432013252431517015544 0ustar liggesusers%% macro definitions for spatstat man pages \newcommand{\adrian}{Adrian Baddeley \email{Adrian.Baddeley@curtin.edu.au}} \newcommand{\rolf}{Rolf Turner \email{r.turner@auckland.ac.nz}} \newcommand{\ege}{Ege Rubak \email{rubak@math.aau.dk}} \newcommand{\spatstatAuthors}{\adrian, \rolf and \ege} % Names with accents \newcommand{\Bogsted}{\ifelse{latex}{\out{B\o gsted}}{Bogsted}} \newcommand{\Cramer}{\ifelse{latex}{\out{Cram\'er}}{Cramer}} \newcommand{\Hogmander}{\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}} \newcommand{\Jyvaskyla}{\ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla}} \newcommand{\Matern}{\ifelse{latex}{\out{Mat\'ern}}{Matern}} \newcommand{\Moller}{\ifelse{latex}{\out{M\o ller}}{Moller}} \newcommand{\Oehlschlaegel}{\ifelse{latex}{\out{Oehlschl\"{a}gel}}{Oehlschlaegel}} \newcommand{\Prokesova}{\ifelse{latex}{\out{Proke\u{s}ov{\'{a}}}}{Prokesova}} \newcommand{\Sarkka}{\ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}} %% List of all Gibbs interactions \newcommand{\GibbsInteractionsList}{\code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{Concom}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{HierHard}}, \code{\link{HierStrauss}}, \code{\link{HierStraussHard}}, \code{\link{Hybrid}}, \code{\link{LennardJones}}, \code{\link{MultiHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{OrdThresh}}, \code{\link{Ord}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Penttinen}}, \code{\link{Poisson}}, \code{\link{Saturated}}, \code{\link{SatPiece}}, \code{\link{Softcore}}, \code{\link{Strauss}}, \code{\link{StraussHard}} and \code{\link{Triplets}}} %% List of interactions recognised by RMH code \newcommand{\rmhInteractionsList}{\code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{Hybrid}}, \code{\link{LennardJones}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Penttinen}}, \code{\link{Poisson}}, \code{\link{Softcore}}, \code{\link{Strauss}}, \code{\link{StraussHard}} and \code{\link{Triplets}}} spatstat/man/is.connected.Rd0000644000176200001440000000304113333543263015536 0ustar liggesusers\name{is.connected} \Rdversion{1.1} \alias{is.connected} \alias{is.connected.default} \alias{is.connected.linnet} \title{ Determine Whether an Object is Connected } \description{ Determine whether an object is topologically connected. } \usage{ is.connected(X, \dots) \method{is.connected}{default}(X, \dots) \method{is.connected}{linnet}(X, \dots) } \arguments{ \item{X}{ A spatial object such as a pixel image (object of class \code{"im"}), a window (object of class \code{"owin"}) or a linear network (object of class \code{"linnet"}). } \item{\dots}{ Arguments passed to \code{\link{connected}} to determine the connected components. } } \details{ The command \code{is.connected(X)} returns \code{TRUE} if the object \code{X} consists of a single, topologically-connected piece, and returns \code{FALSE} if \code{X} consists of several pieces which are not joined together. The function \code{is.connected} is generic. The default method \code{is.connected.default} works for many classes of objects, including windows (class \code{"owin"}) and images (class \code{"im"}). There is a method for linear networks, \code{is.connected.linnet}, described here, and a method for point patterns described in \code{\link{is.connected.ppp}}. } \value{ A logical value. } \seealso{ \code{\link{connected}}, \code{\link{is.connected.ppp}}. } \examples{ d <- distmap(cells, dimyx=256) X <- levelset(d, 0.07) plot(X) is.connected(X) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat/man/methods.linim.Rd0000644000176200001440000000600613333543263015740 0ustar liggesusers\name{methods.linim} \Rdversion{1.1} \alias{methods.linim} %DoNotExport \alias{as.im.linim} \alias{as.data.frame.linim} \alias{print.linim} \alias{summary.linim} \alias{affine.linim} \alias{scalardilate.linim} \alias{shift.linim} \title{ Methods for Images on a Linear Network } \description{ Methods for the class \code{"linim"} of functions on a linear network. } \usage{ \method{print}{linim}(x, \dots) \method{summary}{linim}(object, \dots) \method{as.im}{linim}(X, \dots) \method{as.data.frame}{linim}(x, \dots) \method{shift}{linim}(X, \dots) \method{scalardilate}{linim}(X, f, \dots, origin=NULL) \method{affine}{linim}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) } \arguments{ \item{X,x,object}{ A pixel image on a linear network (object of class \code{"linim"}). } \item{\dots}{ Extra arguments passed to other methods. } \item{f}{Numeric. Scalar dilation factor.} \item{mat}{Numeric matrix representing the linear transformation.} \item{vec}{Numeric vector of length 2 specifying the shift vector.} \item{origin}{Character string determining a location that will be shifted to the origin. Options are \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}. Partially matched. } } \details{ These are methods for the generic functions \code{\link{print}}, \code{\link{summary}} and \code{\link{as.data.frame}}, and the \pkg{spatstat} generic functions \code{\link{as.im}}, \code{\link{shift}}, \code{\link{scalardilate}} and \code{\link{affine}}. An object of class \code{"linfun"} represents a pixel image defined on a linear network. The method \code{as.im.linim} extracts the pixel values and returns a pixel image of class \code{"im"}. The method \code{as.data.frame.linim} returns a data frame giving spatial locations (in cartesian and network coordinates) and corresponding function values. The methods \code{shift.linim}, \code{scalardilate.linim} and \code{affine.linim} apply geometric transformations to the pixels and the underlying linear network, without changing the pixel values. } \value{ For \code{print.linim} the result is \code{NULL}. The function \code{summary.linim} returns an object of class \code{"summary.linim"}. In normal usage this summary is automatically printed by \code{\link{print.summary.linim}}. For \code{as.im.linim} the result is an object of class \code{"im"}. For the geometric transformations \code{shift.linim}, \code{scalardilate.linim} and \code{affine.linim}, the result is another object of class \code{"linim"}. } \examples{ M <- as.mask.psp(as.psp(simplenet)) Z <- as.im(function(x,y) {x-y}, W=M) X <- linim(simplenet, Z) ## ............ print basic details ......................... X ## ............ print gory details ......................... summary(X) ## ........................................................... shift(X, c(1,1)) scalardilate(X, 2) head(as.data.frame(X)) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{math} spatstat/man/kppm.Rd0000644000176200001440000003720513571674202014144 0ustar liggesusers\name{kppm} \alias{kppm} \alias{kppm.formula} \alias{kppm.ppp} \alias{kppm.quad} \concept{point process model} \concept{Cox point process} \concept{cluster process} \concept{Neyman-Scott cluster process} \title{Fit Cluster or Cox Point Process Model} \description{ Fit a homogeneous or inhomogeneous cluster process or Cox point process model to a point pattern. } \usage{ kppm(X, \dots) \method{kppm}{formula}(X, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), \dots, data=NULL) \method{kppm}{ppp}(X, trend = ~1, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), data = NULL, ..., covariates=data, subset, method = c("mincon", "clik2", "palm"), improve.type = c("none", "clik1", "wclik1", "quasi"), improve.args = list(), weightfun=NULL, control=list(), algorithm="Nelder-Mead", statistic="K", statargs=list(), rmax = NULL, covfunargs=NULL, use.gam=FALSE, nd=NULL, eps=NULL) \method{kppm}{quad}(X, trend = ~1, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), data = NULL, ..., covariates=data, subset, method = c("mincon", "clik2", "palm"), improve.type = c("none", "clik1", "wclik1", "quasi"), improve.args = list(), weightfun=NULL, control=list(), algorithm="Nelder-Mead", statistic="K", statargs=list(), rmax = NULL, covfunargs=NULL, use.gam=FALSE, nd=NULL, eps=NULL) } \arguments{ \item{X}{ A point pattern dataset (object of class \code{"ppp"} or \code{"quad"}) to which the model should be fitted, or a \code{formula} in the \R language defining the model. See Details. } \item{trend}{ An \R formula, with no left hand side, specifying the form of the log intensity. } \item{clusters}{ Character string determining the cluster model. Partially matched. Options are \code{"Thomas"}, \code{"MatClust"}, \code{"Cauchy"}, \code{"VarGamma"} and \code{"LGCP"}. } \item{data,covariates}{ The values of spatial covariates (other than the Cartesian coordinates) required by the model. A named list of pixel images, functions, windows, tessellations or numeric constants. } \item{\dots}{ Additional arguments. See Details. } \item{subset}{ Optional. A subset of the spatial domain, to which the model-fitting should be restricted. A window (object of class \code{"owin"}) or a logical-valued pixel image (object of class \code{"im"}), or an expression (possibly involving the names of entries in \code{data}) which can be evaluated to yield a window or pixel image. } \item{method}{ The fitting method. Either \code{"mincon"} for minimum contrast, \code{"clik2"} for second order composite likelihood, or \code{"palm"} for Palm likelihood. Partially matched. } \item{improve.type}{ Method for updating the initial estimate of the trend. Initially the trend is estimated as if the process is an inhomogeneous Poisson process. The default, \code{improve.type = "none"}, is to use this initial estimate. Otherwise, the trend estimate is updated by \code{\link{improve.kppm}}, using information about the pair correlation function. Options are \code{"clik1"} (first order composite likelihood, essentially equivalent to \code{"none"}), \code{"wclik1"} (weighted first order composite likelihood) and \code{"quasi"} (quasi likelihood). } \item{improve.args}{ Additional arguments passed to \code{\link{improve.kppm}} when \code{improve.type != "none"}. See Details. } \item{weightfun}{ Optional weighting function \eqn{w} in the composite likelihood or Palm likelihood. A \code{function} in the \R language. See Details. } \item{control}{ List of control parameters passed to the optimization function \code{\link[stats]{optim}}. } \item{algorithm}{ Character string determining the mathematical optimisation algorithm to be used by \code{\link[stats]{optim}}. This argument is passed to \code{\link[stats]{optim}} as the argument \code{method}. } \item{statistic}{ Name of the summary statistic to be used for minimum contrast estimation: either \code{"K"} or \code{"pcf"}. } \item{statargs}{ Optional list of arguments to be used when calculating the \code{statistic}. See Details. } \item{rmax}{ Maximum value of interpoint distance to use in the composite likelihood. } \item{covfunargs,use.gam,nd,eps}{ Arguments passed to \code{\link{ppm}} when fitting the intensity. } } \details{ This function fits a clustered point process model to the point pattern dataset \code{X}. The model may be either a \emph{Neyman-Scott cluster process} or another \emph{Cox process}. The type of model is determined by the argument \code{clusters}. Currently the options are \code{clusters="Thomas"} for the Thomas process, \code{clusters="MatClust"} for the \Matern cluster process, \code{clusters="Cauchy"} for the Neyman-Scott cluster process with Cauchy kernel, \code{clusters="VarGamma"} for the Neyman-Scott cluster process with Variance Gamma kernel (requires an additional argument \code{nu} to be passed through the dots; see \code{\link{rVarGamma}} for details), and \code{clusters="LGCP"} for the log-Gaussian Cox process (may require additional arguments passed through \code{\dots}; see \code{\link{rLGCP}} for details on argument names). The first four models are Neyman-Scott cluster processes. The algorithm first estimates the intensity function of the point process using \code{\link{ppm}}. The argument \code{X} may be a point pattern (object of class \code{"ppp"}) or a quadrature scheme (object of class \code{"quad"}). The intensity is specified by the \code{trend} argument. If the trend formula is \code{~1} (the default) then the model is \emph{homogeneous}. The algorithm begins by estimating the intensity as the number of points divided by the area of the window. Otherwise, the model is \emph{inhomogeneous}. The algorithm begins by fitting a Poisson process with log intensity of the form specified by the formula \code{trend}. (See \code{\link{ppm}} for further explanation). The argument \code{X} may also be a \code{formula} in the \R language. The right hand side of the formula gives the \code{trend} as described above. The left hand side of the formula gives the point pattern dataset to which the model should be fitted. If \code{improve.type="none"} this is the final estimate of the intensity. Otherwise, the intensity estimate is updated, as explained in \code{\link{improve.kppm}}. Additional arguments to \code{\link{improve.kppm}} are passed as a named list in \code{improve.args}. The clustering parameters of the model are then fitted either by minimum contrast estimation, or by maximising a composite likelihood. \describe{ \item{Minimum contrast:}{ If \code{method = "mincon"} (the default) clustering parameters of the model will be fitted by minimum contrast estimation, that is, by matching the theoretical \eqn{K}-function of the model to the empirical \eqn{K}-function of the data, as explained in \code{\link{mincontrast}}. For a homogeneous model (\code{ trend = ~1 }) the empirical \eqn{K}-function of the data is computed using \code{\link{Kest}}, and the parameters of the cluster model are estimated by the method of minimum contrast. For an inhomogeneous model, the inhomogeneous \eqn{K} function is estimated by \code{\link{Kinhom}} using the fitted intensity. Then the parameters of the cluster model are estimated by the method of minimum contrast using the inhomogeneous \eqn{K} function. This two-step estimation procedure is due to Waagepetersen (2007). If \code{statistic="pcf"} then instead of using the \eqn{K}-function, the algorithm will use the pair correlation function \code{\link{pcf}} for homogeneous models and the inhomogeneous pair correlation function \code{\link{pcfinhom}} for inhomogeneous models. In this case, the smoothing parameters of the pair correlation can be controlled using the argument \code{statargs}, as shown in the Examples. Additional arguments \code{\dots} will be passed to \code{\link{clusterfit}} to control the minimum contrast fitting algorithm. } \item{Composite likelihood:}{ If \code{method = "clik2"} the clustering parameters of the model will be fitted by maximising the second-order composite likelihood (Guan, 2006). The log composite likelihood is \deqn{ \sum_{i,j} w(d_{ij}) \log\rho(d_{ij}; \theta) - \left( \sum_{i,j} w(d_{ij}) \right) \log \int_D \int_D w(\|u-v\|) \rho(\|u-v\|; \theta)\, du\, dv }{ sum[i,j] w(d[i,j]) log(rho(d[i,j]; theta)) - (sum[i,j] w(d[i,j])) log(integral[D,D] w(||u-v||) rho(||u-v||; theta) du dv) } where the sums are taken over all pairs of data points \eqn{x_i, x_j}{x[i], x[j]} separated by a distance \eqn{d_{ij} = \| x_i - x_j\|}{d[i,j] = ||x[i] - x[j]||} less than \code{rmax}, and the double integral is taken over all pairs of locations \eqn{u,v} in the spatial window of the data. Here \eqn{\rho(d;\theta)}{rho(d;theta)} is the pair correlation function of the model with cluster parameters \eqn{\theta}{theta}. The function \eqn{w} in the composite likelihood is a weighting function and may be chosen arbitrarily. It is specified by the argument \code{weightfun}. If this is missing or \code{NULL} then the default is a threshold weight function, \eqn{w(d) = 1(d \le R)}{w(d) = 1(d <= R)}, where \eqn{R} is \code{rmax/2}. } \item{Palm likelihood:}{ If \code{method = "palm"} the clustering parameters of the model will be fitted by maximising the Palm loglikelihood (Tanaka et al, 2008) \deqn{ \sum_{i,j} w(x_i, x_j) \log \lambda_P(x_j \mid x_i; \theta) - \int_D w(x_i, u) \lambda_P(u \mid x_i; \theta) {\rm d} u }{ sum[i,j] w(x[i], x[j]) log(lambdaP(x[j] | x[i]; theta) - integral[D] w(x[i], u) lambdaP(u | x[i]; theta) du } with the same notation as above. Here \eqn{\lambda_P(u|v;\theta}{lambdaP(u|v;theta)} is the Palm intensity of the model at location \eqn{u} given there is a point at \eqn{v}. } } In all three methods, the optimisation is performed by the generic optimisation algorithm \code{\link[stats]{optim}}. The behaviour of this algorithm can be modified using the arguments \code{control} and \code{algorithm}. Useful control arguments include \code{trace}, \code{maxit} and \code{abstol} (documented in the help for \code{\link[stats]{optim}}). Fitting the LGCP model requires the \pkg{RandomFields} package, except in the default case where the exponential covariance is assumed. } \section{Log-Gaussian Cox Models}{ To fit a log-Gaussian Cox model with non-exponential covariance, specify \code{clusters="LGCP"} and use additional arguments to specify the covariance structure. These additional arguments can be given individually in the call to \code{kppm}, or they can be collected together in a list called \code{covmodel}. For example a \Matern model with parameter \eqn{\nu=0.5} could be specified either by \code{kppm(X, clusters="LGCP", model="matern", nu=0.5)} or by \code{kppm(X, clusters="LGCP", covmodel=list(model="matern", nu=0.5))}. The argument \code{model} specifies the type of covariance model: the default is \code{model="exp"} for an exponential covariance. Alternatives include \code{"matern"}, \code{"cauchy"} and \code{"spheric"}. Model names correspond to functions beginning with \code{RM} in the \pkg{RandomFields} package: for example \code{model="matern"} corresponds to the function \code{RMmatern} in the \pkg{RandomFields} package. Additional arguments are passed to the relevant function in the \pkg{RandomFields} package: for example if \code{model="matern"} then the additional argument \code{nu} is required, and is passed to the function \code{RMmatern} in the \pkg{RandomFields} package. Note that it is not possible to use \emph{anisotropic} covariance models because the \code{kppm} technique assumes the pair correlation function is isotropic. } \value{ An object of class \code{"kppm"} representing the fitted model. There are methods for printing, plotting, predicting, simulating and updating objects of this class. } \section{Error and warning messages}{ See \code{\link{ppm.ppp}} for a list of common error messages and warnings originating from the first stage of model-fitting. } \seealso{ Methods for \code{kppm} objects: \code{\link{plot.kppm}}, \code{\link{fitted.kppm}}, \code{\link{predict.kppm}}, \code{\link{simulate.kppm}}, \code{\link{update.kppm}}, \code{\link{vcov.kppm}}, \code{\link[spatstat:methods.kppm]{methods.kppm}}, \code{\link{as.ppm.kppm}}, \code{\link{as.fv.kppm}}, \code{\link{Kmodel.kppm}}, \code{\link{pcfmodel.kppm}}. Minimum contrast fitting algorithm: higher level interface \code{\link{clusterfit}}; low-level algorithm \code{\link{mincontrast}}. Alternative fitting algorithms: \code{\link{thomas.estK}}, \code{\link{matclust.estK}}, \code{\link{lgcp.estK}}, \code{\link{cauchy.estK}}, \code{\link{vargamma.estK}}, \code{\link{thomas.estpcf}}, \code{\link{matclust.estpcf}}, \code{\link{lgcp.estpcf}}, \code{\link{cauchy.estpcf}}, \code{\link{vargamma.estpcf}}, Summary statistics: \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{pcf}}, \code{\link{pcfinhom}}. See also \code{\link{ppm}} } \references{ Guan, Y. (2006) A composite likelihood approach in fitting spatial point process models. \emph{Journal of the American Statistical Association} \bold{101}, 1502--1512. Jalilian, A., Guan, Y. and Waagepetersen, R. (2012) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119--137. Tanaka, U. and Ogata, Y. and Stoyan, D. (2008) Parameter estimation and model selection for Neyman-Scott point processes. \emph{Biometrical Journal} \bold{50}, 43--57. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \examples{ # method for point patterns kppm(redwood, ~1, "Thomas") # method for formulas kppm(redwood ~ 1, "Thomas") # different models for clustering kppm(redwood ~ x, "MatClust") kppm(redwood ~ x, "MatClust", statistic="pcf", statargs=list(stoyan=0.2)) kppm(redwood ~ x, cluster="Cauchy", statistic="K") kppm(redwood, cluster="VarGamma", nu = 0.5, statistic="pcf") # log-Gaussian Cox process (LGCP) models kppm(redwood ~ 1, "LGCP", statistic="pcf") if(require("RandomFields")) { # Random Fields package is needed for non-default choice of covariance model kppm(redwood ~ x, "LGCP", statistic="pcf", model="matern", nu=0.3, control=list(maxit=10)) } # Different fitting techniques kppm(redwood ~ 1, "Thomas", method="c") kppm(redwood ~ 1, "Thomas", method="p") # composite likelihood method kppm(redwood ~ x, "VarGamma", method="clik2", nu.ker=-3/8) # quasi-likelihood method kppm(redwood ~ x, "Thomas", improve.type = "quasi") } \author{ \spatstatAuthors, with contributions from Abdollah Jalilian and Rasmus Waagepetersen. } \keyword{spatial} \keyword{models} spatstat/man/rLGCP.Rd0000644000176200001440000001162213571674202014077 0ustar liggesusers\name{rLGCP} \alias{rLGCP} \title{Simulate Log-Gaussian Cox Process} \description{ Generate a random point pattern, a realisation of the log-Gaussian Cox process. } \usage{ rLGCP(model="exp", mu = 0, param = NULL, \dots, win=NULL, saveLambda=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{model}{ character string: the short name of a covariance model for the Gaussian random field. After adding the prefix \code{"RM"}, the code will search for a function of this name in the \pkg{RandomFields} package. } \item{mu}{ mean function of the Gaussian random field. Either a single number, a \code{function(x,y, ...)} or a pixel image (object of class \code{"im"}). } \item{param}{ List of parameters for the covariance. Standard arguments are \code{var} and \code{scale}. } \item{\dots}{ Additional parameters for the covariance, or arguments passed to \code{\link{as.mask}} to determine the pixel resolution. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"}. } \item{saveLambda}{ Logical. If \code{TRUE} (the default) then the simulated random intensity will also be saved, and returns as an attribute of the point pattern. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (object of class \code{"ppp"}) or a list of point patterns. Additionally, the simulated intensity function for each point pattern is returned as an attribute \code{"Lambda"} of the point pattern, if \code{saveLambda=TRUE}. } \details{ This function generates a realisation of a log-Gaussian Cox process (LGCP). This is a Cox point process in which the logarithm of the random intensity is a Gaussian random field with mean function \eqn{\mu} and covariance function \eqn{c(r)}. Conditional on the random intensity, the point process is a Poisson process with this intensity. The string \code{model} specifies the covariance function of the Gaussian random field, and the parameters of the covariance are determined by \code{param} and \code{\dots}. To determine the covariance model, the string \code{model} is prefixed by \code{"RM"}, and a function of this name is sought in the \pkg{RandomFields} package. For a list of available models see \code{\link[RandomFields]{RMmodel}} in the \pkg{RandomFields} package. For example the \Matern covariance is specified by \code{model="matern"}, corresponding to the function \code{RMmatern} in the \pkg{RandomFields} package. Standard variance parameters (for all functions beginning with \code{"RM"} in the \pkg{RandomFields} package) are \code{var} for the variance at distance zero, and \code{scale} for the scale parameter. Other parameters are specified in the help files for the individual functions beginning with \code{"RM"}. For example the help file for \code{RMmatern} states that \code{nu} is a parameter for this model. This algorithm uses the function \code{\link[RandomFields]{RFsimulate}} in the \pkg{RandomFields} package to generate values of a Gaussian random field, with the specified mean function \code{mu} and the covariance specified by the arguments \code{model} and \code{param}, on the points of a regular grid. The exponential of this random field is taken as the intensity of a Poisson point process, and a realisation of the Poisson process is then generated by the function \code{\link{rpoispp}} in the \pkg{spatstat} package. If the simulation window \code{win} is missing or \code{NULL}, then it defaults to \code{Window(mu)} if \code{mu} is a pixel image, and it defaults to the unit square otherwise. The LGCP model can be fitted to data using \code{\link{kppm}}. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}}, \code{\link{rGaussPoisson}}, \code{\link{rNeymanScott}}, \code{\link{lgcp.estK}}, \code{\link{kppm}} } \references{ \Moller, J., Syversveen, A. and Waagepetersen, R. (1998) Log Gaussian Cox Processes. \emph{Scandinavian Journal of Statistics} \bold{25}, 451--482. } \examples{ if(require(RandomFields)) { # homogeneous LGCP with exponential covariance function X <- rLGCP("exp", 3, var=0.2, scale=.1) # inhomogeneous LGCP with Gaussian covariance function m <- as.im(function(x, y){5 - 1.5 * (x - 0.5)^2 + 2 * (y - 0.5)^2}, W=owin()) X <- rLGCP("gauss", m, var=0.15, scale =0.5) plot(attr(X, "Lambda")) points(X) # inhomogeneous LGCP with Matern covariance function X <- rLGCP("matern", function(x, y){ 1 - 0.4 * x}, var=2, scale=0.7, nu=0.5, win = owin(c(0, 10), c(0, 10))) plot(X) } } \author{Abdollah Jalilian and Rasmus Waagepetersen. Modified by \spatstatAuthors. } \keyword{spatial} \keyword{datagen} spatstat/man/allstats.Rd0000644000176200001440000000603213333543262015013 0ustar liggesusers\name{allstats} \alias{allstats} \title{Calculate four standard summary functions of a point pattern.} \description{ Calculates the \eqn{F}, \eqn{G}, \eqn{J}, and \eqn{K} summary functions for an unmarked point pattern. Returns them as a function array (of class \code{"fasp"}, see \code{\link{fasp.object}}). } \usage{ allstats(pp, \dots, dataname=NULL, verb=FALSE) } \arguments{ \item{pp}{The observed point pattern, for which summary function estimates are required. An object of class \code{"ppp"}. It must not be marked. } \item{\dots}{ Optional arguments passed to the summary functions \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} and \code{\link{Kest}}. } \item{dataname}{A character string giving an optional (alternative) name for the point pattern. } \item{verb}{A logical value meaning ``verbose''. If \code{TRUE}, progress reports are printed during calculation. } } \details{ This computes four standard summary statistics for a point pattern: the empty space function \eqn{F(r)}, nearest neighbour distance distribution function \eqn{G(r)}, van Lieshout-Baddeley function \eqn{J(r)} and Ripley's function \eqn{K(r)}. The real work is done by \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} and \code{\link{Kest}} respectively. Consult the help files for these functions for further information about the statistical interpretation of \eqn{F}, \eqn{G}, \eqn{J} and \eqn{K}. If \code{verb} is \code{TRUE}, then ``progress reports'' (just indications of completion) are printed out when the calculations are finished for each of the four function types. The overall title of the array of four functions (for plotting by \code{\link{plot.fasp}}) will be formed from the argument \code{dataname}. If this is not given, it defaults to the expression for \code{pp} given in the call to \code{allstats}. } \value{ A list of length 4 containing the \eqn{F}, \eqn{G}, \eqn{J} and \eqn{K} functions respectively. The list can be plotted directly using \code{plot} (which dispatches to \code{\link{plot.solist}}). Each list entry retains the format of the output of the relevant estimating routine \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} or \code{\link{Kest}}. Thus each entry in the list is a function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). The default formulae for plotting these functions are \code{cbind(km,theo) ~ r} for F, G, and J, and \code{cbind(trans,theo) ~ r} for K. } \author{\adrian and \rolf } \seealso{ \code{\link{plot.solist}}, \code{\link{plot.fv}}, \code{\link{fv.object}}, \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}} } \examples{ data(swedishpines) a <- allstats(swedishpines,dataname="Swedish Pines") \dontrun{ plot(a) plot(a, subset=list("r<=15","r<=15","r<=15","r<=50")) } } \keyword{spatial} \keyword{nonparametric} spatstat/man/methods.rhohat.Rd0000644000176200001440000000651613333543263016123 0ustar liggesusers\name{methods.rhohat} \alias{methods.rhohat} %DoNotExport \alias{print.rhohat} \alias{plot.rhohat} \alias{predict.rhohat} \alias{simulate.rhohat} \title{ Methods for Intensity Functions of Spatial Covariate } \description{ These are methods for the class \code{"rhohat"}. } \usage{ \method{print}{rhohat}(x, ...) \method{plot}{rhohat}(x, ..., do.rug=TRUE) \method{predict}{rhohat}(object, ..., relative=FALSE, what=c("rho", "lo", "hi", "se")) \method{simulate}{rhohat}(object, nsim=1, ..., drop=TRUE) } \arguments{ \item{x,object}{ An object of class \code{"rhohat"} representing a smoothed estimate of the intensity function of a point process. } \item{\dots}{ Arguments passed to other methods. } \item{do.rug}{ Logical value indicating whether to plot the observed values of the covariate as a rug plot along the horizontal axis. } \item{relative}{ Logical value indicating whether to compute the estimated point process intensity (\code{relative=FALSE}) or the relative risk (\code{relative=TRUE}) in the case of a relative risk estimate. } \item{nsim}{ Number of simulations to be generated. } \item{drop}{ Logical value indicating what to do when \code{nsim=1}. If \code{drop=TRUE} (the default), a point pattern is returned. If \code{drop=FALSE}, a list of length 1 containing a point pattern is returned. } \item{what}{ Optional character string (partially matched) specifying which value should be calculated: either the function estimate (\code{what="rho"}, the default), the lower or upper end of the confidence interval (\code{what="lo"} or \code{what="hi"}) or the standard error (\code{what="se"}). } } \details{ These functions are methods for the generic commands \code{\link{print}}, \code{\link[graphics]{plot}}, \code{\link[stats]{predict}} and \code{\link[stats]{simulate}} for the class \code{"rhohat"}. An object of class \code{"rhohat"} is an estimate of the intensity of a point process, as a function of a given spatial covariate. See \code{\link{rhohat}}. The method \code{plot.rhohat} displays the estimated function \eqn{\rho}{rho} using \code{\link{plot.fv}}, and optionally adds a \code{\link{rug}} plot of the observed values of the covariate. The method \code{predict.rhohat} computes a pixel image of the intensity \eqn{\rho(Z(u))}{rho(Z(u))} at each spatial location \eqn{u}, where \eqn{Z} is the spatial covariate. The method \code{simulate.rhohat} invokes \code{predict.rhohat} to determine the predicted intensity, and then simulates a Poisson point process with this intensity. } \value{ For \code{predict.rhohat} the value is a pixel image (object of class \code{"im"} or \code{"linim"}). For \code{simulate.rhohat} the value is a point pattern (object of class \code{"ppp"} or \code{"lpp"}). For other functions, the value is \code{NULL}. } \author{ \adrian } \seealso{ \code{\link{rhohat}} } \examples{ X <- rpoispp(function(x,y){exp(3+3*x)}) rho <- rhohat(X, function(x,y){x}) rho plot(rho) Y <- predict(rho) plot(Y) plot(simulate(rho), add=TRUE) # fit <- ppm(X, ~x) rho <- rhohat(fit, "y") opa <- par(mfrow=c(1,2)) plot(predict(rho)) plot(predict(rho, relative=TRUE)) par(opa) plot(predict(rho, what="se")) } \keyword{spatial} \keyword{methods} spatstat/man/compatible.fv.Rd0000644000176200001440000000262613471743367015735 0ustar liggesusers\name{compatible.fv} \alias{compatible.fv} \title{Test Whether Function Objects Are Compatible} \description{ Tests whether two or more function objects (class \code{"fv"}) are compatible. } \usage{ \method{compatible}{fv}(A, B, \dots, samenames=TRUE) } \arguments{ \item{A,B,\dots}{Two or more function value objects (class \code{"fv"}).} \item{samenames}{ Logical value indicating whether to check for complete agreement between the column names of the objects (\code{samenames=TRUE}, the default) or just to check that the name of the function argument is the same (\code{samenames=FALSE}). } } \details{ An object of class \code{"fv"} is essentially a data frame containing several different statistical estimates of the same function. Such objects are returned by \code{\link{Kest}} and its relatives. This command tests whether such objects are compatible (so that, for example, they could be added or subtracted). It is a method for the generic command \code{\link{compatible}}. The functions are compatible if they have been evaluated at the same sequence of values of the argument \code{r}, and if the statistical estimates have the same names. } \value{ Logical value: \code{TRUE} if the objects are compatible, and \code{FALSE} if they are not. } \seealso{ \code{\link{eval.fv}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/profilepl.Rd0000644000176200001440000002015413606002175015155 0ustar liggesusers\name{profilepl} \alias{profilepl} \title{Fit Models by Profile Maximum Pseudolikelihood or AIC} \description{ Fits point process models by maximising the profile likelihood, profile pseudolikelihood, profile composite likelihood or AIC. } \usage{ profilepl(s, f, \dots, aic=FALSE, rbord=NULL, verbose = TRUE, fast=TRUE) } \arguments{ \item{s}{ Data frame containing values of the irregular parameters over which the criterion will be computed. } \item{f}{ Function (such as \code{\link{Strauss}}) that generates an interpoint interaction object, given values of the irregular parameters. } \item{\dots}{ Data passed to \code{\link{ppm}} to fit the model. } \item{aic}{ Logical value indicating whether to find the parameter values which minimise the AIC (\code{aic=TRUE}) or maximise the profile likelihood (\code{aic=FALSE}, the default). } \item{rbord}{ Radius for border correction (same for all models). If omitted, this will be computed from the interactions. } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{fast}{ Logical value indicating whether to use a faster, less accurate model-fitting technique when computing the profile pseudolikelihood. See Section on Speed and Accuracy. } } \details{ The model-fitting function \code{\link{ppm}} fits point process models to point pattern data. However, only the \sQuote{regular} parameters of the model can be fitted by \code{\link{ppm}}. The model may also depend on \sQuote{irregular} parameters that must be fixed in any call to \code{\link{ppm}}. This function \code{profilepl} is a wrapper which finds the values of the irregular parameters that give the best fit. If \code{aic=FALSE} (the default), the best fit is the model which maximises the likelihood (if the models are Poisson processes) or maximises the pseudolikelihood or logistic likelihood. If \code{aic=TRUE} then the best fit is the model which minimises the Akaike Information Criterion \code{\link{AIC.ppm}}. The argument \code{s} must be a data frame whose columns contain values of the irregular parameters over which the maximisation is to be performed. An irregular parameter may affect either the interpoint interaction or the spatial trend. \describe{ \item{interaction parameters:}{ in a call to \code{\link{ppm}}, the argument \code{interaction} determines the interaction between points. It is usually a call to a function such as \code{\link{Strauss}}. The arguments of this call are irregular parameters. For example, the interaction radius parameter \eqn{r} of the Strauss process, determined by the argument \code{r} to the function \code{\link{Strauss}}, is an irregular parameter. } \item{trend parameters:}{ in a call to \code{\link{ppm}}, the spatial trend may depend on covariates, which are supplied by the argument \code{covariates}. These covariates may be functions written by the user, of the form \code{function(x,y,...)}, and the extra arguments \code{\dots} are irregular parameters. } } The argument \code{f} determines the interaction for each model to be fitted. It would typically be one of the functions \code{\link{Poisson}}, \code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{LennardJones}}, \code{\link{OrdThresh}}, \code{\link{Softcore}}, \code{\link{Strauss}} or \code{\link{StraussHard}}. Alternatively it could be a function written by the user. Columns of \code{s} which match the names of arguments of \code{f} will be interpreted as interaction parameters. Other columns will be interpreted as trend parameters. The data frame \code{s} must provide values for each argument of \code{f}, except for the optional arguments, which are those arguments of \code{f} that have the default value \code{NA}. To find the best fit, each row of \code{s} will be taken in turn. Interaction parameters in this row will be passed to \code{f}, resulting in an interaction object. Then \code{\link{ppm}} will be applied to the data \code{...} using this interaction. Any trend parameters will be passed to \code{\link{ppm}} through the argument \code{covfunargs}. This results in a fitted point process model. The value of the log pseudolikelihood or AIC from this model is stored. After all rows of \code{s} have been processed in this way, the row giving the maximum value of log pseudolikelihood will be found. The object returned by \code{profilepl} contains the profile pseudolikelihood (or profile AIC) function, the best fitting model, and other data. It can be plotted (yielding a plot of the log pseudolikelihood or AIC values against the irregular parameters) or printed (yielding information about the best fitting values of the irregular parameters). In general, \code{f} may be any function that will return an interaction object (object of class \code{"interact"}) that can be used in a call to \code{\link{ppm}}. Each argument of \code{f} must be a single value. } \section{Speed and Accuracy}{ Computation of the profile pseudolikelihood can be time-consuming. We recommend starting with a small experiment in which \code{s} contains only a few rows of values. This will indicate roughly the optimal values of the parameters. Then a full calculation using more finely spaced values can identify the exact optimal values. It is normal that the procedure appears to slow down at the end. During the computation of the profile pseudolikelihood, the model-fitting procedure is accelerated by omitting some calculations that are not needed for computing the pseudolikelihood. When the optimal parameter values have been identified, they are used to fit the final model in its entirety. Fitting the final model can take longer than computing the profile pseudolikelihood. If \code{fast=TRUE} (the default), then additional shortcuts are taken in order to accelerate the computation of the profile log pseudolikelihood. These shortcuts mean that the values of the profile log pseudolikelihood in the result (\code{$prof}) may not be equal to the values that would be obtained if the model was fitted normally. Currently this happens only for the area interaction \code{\link{AreaInter}}. It may be wise to do a small experiment with \code{fast=TRUE} and then a definitive calculation with \code{fast=FALSE}. } \value{ An object of class \code{"profilepl"}. There are methods for \code{\link[graphics]{plot}}, \code{\link[base]{print}}, \code{\link[base]{summary}}, \code{\link[stats]{simulate}}, \code{\link{as.ppm}}, \code{\link{fitin}} and \code{\link{parameters}} for objects of this class. The components of the object include \item{fit}{Best-fitting model} \item{param}{The data frame \code{s}} \item{iopt}{Row index of the best-fitting parameters in \code{s}} To extract the best fitting model you can also use \code{\link{as.ppm}}. } \seealso{ \code{\link{plot.profilepl}} } \examples{ # one irregular parameter rr <- data.frame(r=seq(0.05,0.15, by=0.01)) \testonly{ rr <- data.frame(r=c(0.05,0.1,0.15)) } ps <- profilepl(rr, Strauss, cells) ps plot(ps) # two irregular parameters rs <- expand.grid(r=seq(0.05,0.15, by=0.01),sat=1:3) \testonly{ rs <- expand.grid(r=c(0.07,0.12),sat=1:2) } pg <- profilepl(rs, Geyer, cells) pg as.ppm(pg) # multitype pattern with a common interaction radius \dontrun{ RR <- data.frame(R=seq(0.03,0.05,by=0.01)) MS <- function(R) { MultiStrauss(radii=diag(c(R,R))) } pm <- profilepl(RR, MS, amacrine ~marks) } ## more information summary(pg) } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat/man/is.subset.owin.Rd0000644000176200001440000000246213333543263016062 0ustar liggesusers\name{is.subset.owin} \alias{is.subset.owin} \title{Determine Whether One Window is Contained In Another} \description{ Tests whether window \code{A} is a subset of window \code{B}. } \usage{ is.subset.owin(A, B) } \arguments{ \item{A}{A window object (see Details).} \item{B}{A window object (see Details).} } \value{ Logical scalar; \code{TRUE} if \code{A} is a sub-window of \code{B}, otherwise \code{FALSE}. } \details{ This function tests whether the window \code{A} is a subset of the window \code{B}. The arguments \code{A} and \code{B} must be window objects (either objects of class \code{"owin"}, or data that can be coerced to this class by \code{\link{as.owin}}). Various algorithms are used, depending on the geometrical type of the two windows. Note that if \code{B} is not rectangular, the algorithm proceeds by discretising \code{A}, converting it to a pixel mask using \code{\link{as.mask}}. In this case the resulting answer is only ``approximately correct''. The accuracy of the approximation can be controlled: see \code{\link{as.mask}}. } \author{\adrian and \rolf } \examples{ w1 <- as.owin(c(0,1,0,1)) w2 <- as.owin(c(-1,2,-1,2)) is.subset.owin(w1,w2) # Returns TRUE. is.subset.owin(w2,w1) # Returns FALSE. } \keyword{spatial} \keyword{math} spatstat/man/runifpointOnLines.Rd0000644000176200001440000000376213333543264016662 0ustar liggesusers\name{runifpointOnLines} \alias{runifpointOnLines} \title{Generate N Uniform Random Points On Line Segments} \description{ Given a line segment pattern, generate a random point pattern consisting of \code{n} points uniformly distributed on the line segments. } \usage{ runifpointOnLines(n, L, nsim=1, drop=TRUE) } \arguments{ \item{n}{Number of points to generate.} \item{L}{Line segment pattern (object of class \code{"psp"}) on which the points should lie. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This command generates a point pattern consisting of \code{n} independent random points, each point uniformly distributed on the line segment pattern. This means that, for each random point, \itemize{ \item the probability of falling on a particular segment is proportional to the length of the segment; and \item given that the point falls on a particular segment, it has uniform probability density along that segment. } If \code{n} is a single integer, the result is an unmarked point pattern containing \code{n} points. If \code{n} is a vector of integers, the result is a marked point pattern, with \code{m} different types of points, where \code{m = length(n)}, in which there are \code{n[j]} points of type \code{j}. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}) with the same window as \code{L}. If \code{nsim > 1}, a list of point patterns. } \seealso{ \code{\link{psp}}, \code{\link{ppp}}, \code{\link{pointsOnLines}}, \code{\link{runifpoint}} } \examples{ X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) Y <- runifpointOnLines(20, X) plot(X, main="") plot(Y, add=TRUE) Z <- runifpointOnLines(c(5,5), X) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/fixef.mppm.Rd0000644000176200001440000000305213333543265015237 0ustar liggesusers\name{fixef.mppm} \alias{fixef.mppm} \title{ Extract Fixed Effects from Point Process Model } \description{ Given a point process model fitted to a list of point patterns, extract the fixed effects of the model. A method for \code{fixef}. } \usage{ \method{fixef}{mppm}(object, \dots) } \arguments{ \item{object}{ A fitted point process model (an object of class \code{"mppm"}). } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link[nlme]{fixef}}. The argument \code{object} must be a fitted point process model (object of class \code{"mppm"}) produced by the fitting algorithm \code{\link{mppm}}). This represents a point process model that has been fitted to a list of several point pattern datasets. See \code{\link{mppm}} for information. This function extracts the coefficients of the fixed effects of the model. } \value{ A numeric vector of coefficients. } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented in \pkg{spatstat} by \spatstatAuthors. } \seealso{ \code{\link{coef.mppm}} } \examples{ H <- hyperframe(Y = waterstriders) # Tweak data to exaggerate differences H$Y[[1]] <- rthin(H$Y[[1]], 0.3) m1 <- mppm(Y ~ id, data=H, Strauss(7)) fixef(m1) m2 <- mppm(Y ~ 1, random=~1|id, data=H, Strauss(7)) fixef(m2) } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/Kdot.inhom.Rd0000644000176200001440000002764513571674202015216 0ustar liggesusers\name{Kdot.inhom} \alias{Kdot.inhom} \title{ Inhomogeneous Multitype K Dot Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the dot \eqn{K} function, which counts the expected number of points of any type within a given distance of a point of type \eqn{i}, adjusted for spatially varying intensity. } \usage{ Kdot.inhom(X, i, lambdaI=NULL, lambdadot=NULL, \dots, r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL, lambdaIdot=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross type \eqn{K} function \eqn{K_{i\bullet}(r)}{Ki.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the estimated intensity of the sub-process of points of type \code{i}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{i} points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdadot}{ Optional. Values of the estimated intensity of the entire point process, Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{ Ignored. } \item{r}{ Optional. Numeric vector giving the values of the argument \eqn{r} at which the cross K function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel, used in computing leave-one-out kernel estimates of \code{lambdaI}, \code{lambdadot} if they are omitted. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel, used in computing leave-one-out kernel estimates of \code{lambdaI}, \code{lambdadot} if they are omitted. Incompatible with \code{sigma}. } \item{lambdaIdot}{ Optional. A matrix containing estimates of the product of the intensities \code{lambdaI} and \code{lambdadot} for each pair of points, the first point of type \code{i} and the second of any type. } \item{lambdaX}{ Optional. Values of the intensity for all points of \code{X}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. If present, this argument overrides both \code{lambdaI} and \code{lambdadot}. } \item{update}{ Logical value indicating what to do when \code{lambdaI}, \code{lambdadot} or \code{lambdaX} is a fitted point process model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{i\bullet}(r)}{Ki.(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{i\bullet}(r)}{Ki.(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{i\bullet}(r)}{Ki.(r)} obtained by the edge corrections named. } \details{ This is a generalisation of the function \code{\link{Kdot}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Kinhom}}. Briefly, given a multitype point process, consider the points without their types, and suppose this unmarked point process has intensity function \eqn{\lambda(u)}{lambda(u)} at spatial locations \eqn{u}. Suppose we place a mass of \eqn{1/\lambda(\zeta)}{1/lambda(z)} at each point \eqn{\zeta}{z} of the process. Then the expected total mass per unit area is 1. The inhomogeneous ``dot-type'' \eqn{K} function \eqn{K_{i\bullet}^{\mbox{inhom}}(r)}{K[i.]inhom(r)} equals the expected total mass within a radius \eqn{r} of a point of the process of type \eqn{i}, discounting this point itself. If the process of type \eqn{i} points were independent of the points of other types, then \eqn{K_{i\bullet}^{\mbox{inhom}}(r)}{K[i.]inhom(r)} would equal \eqn{\pi r^2}{pi * r^2}. Deviations between the empirical \eqn{K_{i\bullet}}{Ki.} curve and the theoretical curve \eqn{\pi r^2}{pi * r^2} suggest dependence between the points of types \eqn{i} and \eqn{j} for \eqn{j\neq i}{j != i}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument \code{i} will be interpreted as a level of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level). If \code{i} is missing, it defaults to the first level of the marks factor, \code{i = levels(X$marks)[1]}. The argument \code{lambdaI} supplies the values of the intensity of the sub-process of points of type \code{i}. It may be either \describe{ \item{a pixel image}{(object of class \code{"im"}) which gives the values of the type \code{i} intensity at all locations in the window containing \code{X}; } \item{a numeric vector}{containing the values of the type \code{i} intensity evaluated only at the data points of type \code{i}. The length of this vector must equal the number of type \code{i} points in \code{X}. } \item{a function}{ of the form \code{function(x,y)} which can be evaluated to give values of the intensity at any locations. } \item{a fitted point process model}{ (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{omitted:}{ if \code{lambdaI} is omitted then it will be estimated using a leave-one-out kernel smoother. } } If \code{lambdaI} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \Moller and Waagepetersen (2000). The estimate of \code{lambdaI} for a given point is computed by removing the point from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point in question. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Similarly the argument \code{lambdadot} should contain estimated values of the intensity of the entire point process. It may be either a pixel image, a numeric vector of length equal to the number of points in \code{X}, a function, or omitted. Alternatively if the argument \code{lambdaX} is given, then it specifies the intensity values for all points of \code{X}, and the arguments \code{lambdaI}, \code{lambdadot} will be ignored. (The two arguments \code{lambdaI}, \code{lambdadot} allow the user to specify two different methods for calculating the intensities of the two kinds of points, while \code{lambdaX} ensures that the same method is used for both kinds of points.) For advanced use only, the optional argument \code{lambdaIdot} is a matrix containing estimated values of the products of these two intensities for each pair of points, the first point of type \code{i} and the second of any type. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. The argument \code{correction} chooses the edge correction as explained e.g. in \code{\link{Kest}}. The pair correlation function can also be applied to the result of \code{Kcross.inhom}; see \code{\link{pcf}}. } \references{ \Moller, J. and Waagepetersen, R. Statistical Inference and Simulation for Spatial Point Processes Chapman and Hall/CRC Boca Raton, 2003. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Kdot}}, \code{\link{Kinhom}}, \code{\link{Kcross.inhom}}, \code{\link{Kmulti.inhom}}, \code{\link{pcf}} } \examples{ # Lansing Woods data woods <- lansing woods <- woods[seq(1,npoints(woods), by=10)] ma <- split(woods)$maple lg <- unmark(woods) # Estimate intensities by nonparametric smoothing lambdaM <- density.ppp(ma, sigma=0.15, at="points") lambdadot <- density.ppp(lg, sigma=0.15, at="points") K <- Kdot.inhom(woods, "maple", lambdaI=lambdaM, lambdadot=lambdadot) # Equivalent K <- Kdot.inhom(woods, "maple", sigma=0.15) # Fit model fit <- ppm(woods ~ marks * polynom(x,y,2)) K <- Kdot.inhom(woods, "maple", lambdaX=fit, update=FALSE) # synthetic example: type A points have intensity 50, # type B points have intensity 50 + 100 * x lamB <- as.im(function(x,y){50 + 100 * x}, owin()) lamdot <- as.im(function(x,y) { 100 + 100 * x}, owin()) X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB)) K <- Kdot.inhom(X, "B", lambdaI=lamB, lambdadot=lamdot) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat/man/nnwhich.pp3.Rd0000644000176200001440000000477713333543263015342 0ustar liggesusers\name{nnwhich.pp3} \alias{nnwhich.pp3} \title{Nearest neighbours in three dimensions} \description{ Finds the nearest neighbour of each point in a three-dimensional point pattern. } \usage{ \method{nnwhich}{pp3}(X, \dots, k=1) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } } \value{ Numeric vector or matrix giving, for each point, the index of its nearest neighbour (or \code{k}th nearest neighbour). If \code{k = 1} (the default), the return value is a numeric vector \code{v} giving the indices of the nearest neighbours (the nearest neighbout of the \code{i}th point is the \code{j}th point where \code{j = v[i]}). If \code{k} is a single integer, then the return value is a numeric vector giving the indices of the \code{k}th nearest neighbours. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the index of the \code{k[j]}th nearest neighbour for the \code{i}th data point. } \details{ For each point in the given three-dimensional point pattern, this function finds its nearest neighbour (the nearest other point of the pattern). By default it returns a vector giving, for each point, the index of the point's nearest neighbour. If \code{k} is specified, the algorithm finds each point's \code{k}th nearest neighbour. The function \code{nnwhich} is generic. This is the method for the class \code{"pp3"}. If there are no points in the pattern, a numeric vector of length zero is returned. If there is only one point, then the nearest neighbour is undefined, and a value of \code{NA} is returned. In general if the number of points is less than or equal to \code{k}, then a vector of \code{NA}'s is returned. To evaluate the \emph{distance} between a point and its nearest neighbour, use \code{\link{nndist}}. To find the nearest neighbours from one point pattern to another point pattern, use \code{\link{nncross}}. } \section{Warnings}{ A value of \code{NA} is returned if there is only one point in the point pattern. } \seealso{ \code{\link{nnwhich}}, \code{\link{nndist}}, \code{\link{nncross}} } \examples{ X <- runifpoint3(30) m <- nnwhich(X) m2 <- nnwhich(X, k=2) } \author{ \adrian based on two-dimensional code by Pavel Grabarnik } \keyword{spatial} \keyword{math} spatstat/man/repairNetwork.Rd0000644000176200001440000000154313606323517016024 0ustar liggesusers\name{repairNetwork} \alias{repairNetwork} \title{ Repair Internal Data in a Linear Network } \description{ Detect and repair inconsistencies or duplication in the internal data of a network object. } \usage{ repairNetwork(X) } \arguments{ \item{X}{ A linear network (object of class \code{"linnet"}) or a point pattern on a linear network (object of class \code{"lpp"}). } } \details{ This function detects and repairs inconsistencies in the internal data of \code{X}. Currently it does the following: \itemize{ \item removes any duplicated edges of the network \item ensures that each edge is recorded as a pair of vertex indices \code{(from, to)} with \code{from < to}. } } \value{ An object of the same kind as \code{X}. } \author{ \adrian. } \seealso{ \code{\link{thinNetwork}} } \keyword{spatial} \keyword{manip} spatstat/man/deletebranch.Rd0000644000176200001440000000463713333543263015616 0ustar liggesusers\name{deletebranch} \alias{deletebranch} \alias{deletebranch.linnet} \alias{deletebranch.lpp} \alias{extractbranch} \alias{extractbranch.linnet} \alias{extractbranch.lpp} \title{ Delete or Extract a Branch of a Tree } \description{ Deletes or extracts a given branch of a tree. } \usage{ deletebranch(X, \dots) \method{deletebranch}{linnet}(X, code, labels, \dots) \method{deletebranch}{lpp}(X, code, labels, \dots) extractbranch(X, \dots) \method{extractbranch}{linnet}(X, code, labels, \dots, which=NULL) \method{extractbranch}{lpp}(X, code, labels, \dots, which=NULL) } \arguments{ \item{X}{ Linear network (object of class \code{"linnet"}) or point pattern on a linear network (object of class \code{"lpp"}). } \item{code}{ Character string. Label of the branch to be deleted or extracted. } \item{labels}{ Vector of character strings. Branch labels for the vertices of the network, usually obtained from \code{\link{treebranchlabels}}. } \item{\dots}{Arguments passed to methods.} \item{which}{ Logical vector indicating which vertices of the network should be extracted. Overrides \code{code} and \code{labels}. } } \details{ The linear network \code{L <- X} or \code{L <- as.linnet(X)} must be a tree, that is, it has no loops. The argument \code{labels} should be a character vector giving tree branch labels for each vertex of the network. It is usually obtained by calling \code{\link{treebranchlabels}}. The branch designated by the string \code{code} will be deleted or extracted. The return value is the result of deleting or extracting this branch from \code{X} along with any data associated with this branch (such as points or marks). } \value{ Another object of the same type as \code{X} obtained by deleting or extracting the specified branch. } \author{ \spatstatAuthors } \seealso{ \code{\link{treebranchlabels}}, \code{\link{branchlabelfun}}, \code{\link{linnet}} } \examples{ # make a simple tree m <- simplenet$m m[8,10] <- m[10,8] <- FALSE L <- linnet(vertices(simplenet), m) plot(L, main="") # compute branch labels tb <- treebranchlabels(L, 1) tbc <- paste0("[", tb, "]") text(vertices(L), labels=tbc, cex=2) # delete branch B LminusB <- deletebranch(L, "b", tb) plot(LminusB, add=TRUE, col="green") # extract branch B LB <- extractbranch(L, "b", tb) plot(LB, add=TRUE, col="red") } \keyword{spatial} \keyword{manip} spatstat/man/progressreport.Rd0000644000176200001440000001000713333543264016263 0ustar liggesusers\name{progressreport} \alias{progressreport} \title{Print Progress Reports} \description{ Prints Progress Reports during a loop or iterative calculation. } \usage{ progressreport(i, n, every = min(100,max(1, ceiling(n/100))), tick = 1, nperline = NULL, charsperline = getOption("width"), style = spatstat.options("progress"), showtime = NULL, state=NULL) } \arguments{ \item{i}{ Integer. The current iteration number (from 1 to \code{n}). } \item{n}{ Integer. The (maximum) number of iterations to be computed. } \item{every}{ Optional integer. Iteration number will be printed when \code{i} is a multiple of \code{every}. } \item{tick}{ Optional integer. A tick mark or dot will be printed when \code{i} is a multiple of \code{tick}. } \item{nperline}{ Optional integer. Number of iterations per line of output. } \item{charsperline}{ Optional integer. The number of characters in a line of output. } \item{style}{ Character string determining the style of display. Options are \code{"tty"} (the default), \code{"tk"} and \code{"txtbar"}. See Details. } \item{showtime}{ Optional. Logical value indicating whether to print the estimated time remaining. Applies only when \code{style="tty"}. } \item{state}{ Optional. A list containing the internal data. } } \details{ This is a convenient function for reporting progress during an iterative sequence of calculations or a suite of simulations. \itemize{ \item If \code{style="tk"} then \code{tcltk::tkProgressBar} is used to pop-up a new graphics window showing a progress bar. This requires the package \pkg{tcltk}. As \code{i} increases from 1 to \code{n}, the bar will lengthen. The arguments \code{every, tick, nperline, showtime} are ignored. \item If \code{style="txtbar"} then \code{\link[utils]{txtProgressBar}} is used to represent progress as a bar made of text characters in the \R interpreter window. As \code{i} increases from 1 to \code{n}, the bar will lengthen. The arguments \code{every, tick, nperline, showtime} are ignored. \item If \code{style="tty"} (the default), then progress reports are printed to the console. This only seems to work well under Linux. As \code{i} increases from 1 to \code{n}, the output will be a sequence of dots (one dot for every \code{tick} iterations), iteration numbers (printed when iteration number is a multiple of \code{every} or is less than 4), and optionally the estimated time remaining. For example \code{[etd 1:20:05]} means an estimated time of 1 hour, 20 minutes and 5 seconds until finished. The estimated time remaining will be printed only if \code{style="tty"}, and the argument \code{state} is given, and either \code{showtime=TRUE}, or \code{showtime=NULL} and the iterations are slow (defined as: the estimated time remaining is longer than 3 minutes, or the average time per iteration is longer than 20 seconds). } It is optional, but strongly advisable, to use the argument \code{state} to store and update the internal data for the progress reports (such as the cumulative time taken for computation) as shown in the last example below. This avoids conflicts with other programs that might be calling \code{progressreport} at the same time. } \value{ If \code{state} was \code{NULL}, the result is \code{NULL}. Otherwise the result is the updated value of \code{state}. } \author{ \spatstatAuthors. } \examples{ for(i in 1:40) { # # code that does something... # progressreport(i, 40) } # saving internal state: *recommended* sta <- list() for(i in 1:20) { # some code ... sta <- progressreport(i, 20, state=sta) } #' use text progress bar sta <- list() for(i in 1:10) { # some code ... sta <- progressreport(i, 10, state=sta, style="txtbar") } } \keyword{print} spatstat/man/CDF.Rd0000644000176200001440000000245413333543262013564 0ustar liggesusers\name{CDF} \alias{CDF} \alias{CDF.density} \title{ Cumulative Distribution Function From Kernel Density Estimate } \description{ Given a kernel estimate of a probability density, compute the corresponding cumulative distribution function. } \usage{ CDF(f, \dots) \method{CDF}{density}(f, \dots, warn = TRUE) } \arguments{ \item{f}{ Density estimate (object of class \code{"density"}). } \item{\dots}{ Ignored. } \item{warn}{ Logical value indicating whether to issue a warning if the density estimate \code{f} had to be renormalised because it was computed in a restricted interval. } } \details{ \code{CDF} is generic, with a method for class \code{"density"}. This calculates the cumulative distribution function whose probability density has been estimated and stored in the object \code{f}. The object \code{f} must belong to the class \code{"density"}, and would typically have been obtained from a call to the function \code{\link[stats]{density}}. } \value{ A function, which can be applied to any numeric value or vector of values. } \author{ \spatstatAuthors } \seealso{ \code{\link[stats]{density}}, \code{\link{quantile.density}} } \examples{ b <- density(runif(10)) f <- CDF(b) f(0.5) plot(f) } \keyword{nonparametric} \keyword{univar} spatstat/man/plot.dppm.Rd0000644000176200001440000000351013333543264015101 0ustar liggesusers\name{plot.dppm} \alias{plot.dppm} \title{Plot a fitted determinantal point process} \description{ Plots a fitted determinantal point process model, displaying the fitted intensity and the fitted summary function. } \usage{ \method{plot}{dppm}(x, ..., what=c("intensity", "statistic")) } \arguments{ \item{x}{ Fitted determinantal point process model. An object of class \code{"dppm"}. } \item{\dots}{ Arguments passed to \code{\link{plot.ppm}} and \code{\link{plot.fv}} to control the plot. } \item{what}{ Character vector determining what will be plotted. } } \details{ This is a method for the generic function \code{\link{plot}} for the class \code{"dppm"} of fitted determinantal point process models. The argument \code{x} should be a determinantal point process model (object of class \code{"dppm"}) obtained using the function \code{\link{dppm}}. The choice of plots (and the order in which they are displayed) is controlled by the argument \code{what}. The options (partially matched) are \code{"intensity"} and \code{"statistic"}. This command is capable of producing two different plots: \describe{ \item{what="intensity"}{specifies the fitted intensity of the model, which is plotted using \code{\link{plot.ppm}}. By default this plot is not produced for stationary models.} \item{what="statistic"}{specifies the empirical and fitted summary statistics, which are plotted using \code{\link{plot.fv}}. This is only meaningful if the model has been fitted using the Method of Minimum Contrast, and it is turned off otherwise.} } } \value{ Null. } \examples{ fit <- dppm(swedishpines ~ x + y, dppGauss()) plot(fit) } \seealso{ \code{\link{dppm}}, \code{\link{plot.ppm}}, } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/Extract.fasp.Rd0000644000176200001440000000343313333543263015531 0ustar liggesusers\name{Extract.fasp} \alias{[.fasp} \title{Extract Subset of Function Array} \description{ Extract a subset of a function array (an object of class \code{"fasp"}). } \usage{ \method{[}{fasp}(x, I, J, drop=TRUE,\dots) } \arguments{ \item{x}{ A function array. An object of class \code{"fasp"}. } \item{I}{ any valid expression for a subset of the row indices of the array. } \item{J}{ any valid expression for a subset of the column indices of the array. } \item{drop}{ Logical. When the selected subset consists of only one cell of the array, if \code{drop=FALSE} the result is still returned as a \eqn{1 \times 1}{1 * 1} array of functions (class \code{"fasp"}) while if \code{drop=TRUE} it is returned as a function (class \code{"fv"}). } \item{\dots}{Ignored.} } \value{ A function array (of class \code{"fasp"}). Exceptionally, if the array has only one cell, and if \code{drop=TRUE}, then the result is a function value table (class \code{"fv"}). } \details{ A function array can be regarded as a matrix whose entries are functions. See \code{\link{fasp.object}} for an explanation of function arrays. This routine extracts a sub-array according to the usual conventions for matrix indexing. } \seealso{ \code{\link{fasp.object}} } \examples{ # Lansing woods data - multitype points with 6 types woods <- lansing \testonly{ # smaller dataset woods <- woods[ seq(1,npoints(woods),by=45)] } # compute 6 x 6 array of all cross-type K functions a <- alltypes(woods, "K") # extract first three marks only b <- a[1:3,1:3] \dontrun{plot(b)} # subset of array pertaining to hickories h <- a[levels(marks(woods)) == "hickory", ] \dontrun{plot(h)} } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/pixelquad.Rd0000644000176200001440000000557513333543264015175 0ustar liggesusers\name{pixelquad} \alias{pixelquad} \title{Quadrature Scheme Based on Pixel Grid} \description{ Makes a quadrature scheme with a dummy point at every pixel of a pixel image. } \usage{ pixelquad(X, W = as.owin(X)) } \arguments{ \item{X}{Point pattern (object of class \code{"ppp"}) containing the data points for the quadrature scheme. } \item{W}{ Specifies the pixel grid. A pixel image (object of class \code{"im"}), a window (object of class \code{"owin"}), or anything that can be converted to a window by \code{\link{as.owin}}. } } \value{ An object of class \code{"quad"} describing the quadrature scheme (data points, dummy points, and quadrature weights) suitable as the argument \code{Q} of the function \code{\link{ppm}()} for fitting a point process model. The quadrature scheme can be inspected using the \code{print} and \code{plot} methods for objects of class \code{"quad"}. } \details{ This is a method for producing a quadrature scheme for use by \code{\link{ppm}}. It is an alternative to \code{\link{quadscheme}}. The function \code{\link{ppm}} fits a point process model to an observed point pattern using the Berman-Turner quadrature approximation (Berman and Turner, 1992; Baddeley and Turner, 2000) to the pseudolikelihood of the model. It requires a quadrature scheme consisting of the original data point pattern, an additional pattern of dummy points, and a vector of quadrature weights for all these points. Such quadrature schemes are represented by objects of class \code{"quad"}. See \code{\link{quad.object}} for a description of this class. Given a grid of pixels, this function creates a quadrature scheme in which there is one dummy point at the centre of each pixel. The counting weights are used (the weight attached to each quadrature point is 1 divided by the number of quadrature points falling in the same pixel). The argument \code{X} specifies the locations of the data points for the quadrature scheme. Typically this would be a point pattern dataset. The argument \code{W} specifies the grid of pixels for the dummy points of the quadrature scheme. It should be a pixel image (object of class \code{"im"}), a window (object of class \code{"owin"}), or anything that can be converted to a window by \code{\link{as.owin}}. If \code{W} is a pixel image or a binary mask (a window of type \code{"mask"}) then the pixel grid of \code{W} will be used. If \code{W} is a rectangular or polygonal window, then it will first be converted to a binary mask using \code{\link{as.mask}} at the default pixel resolution. } \examples{ W <- owin(c(0,1),c(0,1)) X <- runifpoint(42, W) W <- as.mask(W,dimyx=128) pixelquad(X,W) } \seealso{ \code{\link{quadscheme}}, \code{\link{quad.object}}, \code{\link{ppm}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/dg.test.Rd0000644000176200001440000001250413501357607014540 0ustar liggesusers\name{dg.test} \alias{dg.test} \title{ Dao-Genton Adjusted Goodness-Of-Fit Test } \description{ Performs the Dao and Genton (2014) adjusted goodness-of-fit test of spatial pattern. } \usage{ dg.test(X, \dots, exponent = 2, nsim=19, nsimsub=nsim-1, alternative=c("two.sided", "less", "greater"), reuse = TRUE, leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose = TRUE) } \arguments{ \item{X}{ Either a point pattern dataset (object of class \code{"ppp"}, \code{"lpp"} or \code{"pp3"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"} or \code{"slrm"}). } \item{\dots}{ Arguments passed to \code{\link{dclf.test}} or \code{\link{mad.test}} or \code{\link{envelope}} to control the conduct of the test. Useful arguments include \code{fun} to determine the summary function, \code{rinterval} to determine the range of \eqn{r} values used in the test, and \code{use.theory} described under Details. } \item{exponent}{ Exponent used in the test statistic. Use \code{exponent=2} for the Diggle-Cressie-Loosmore-Ford test, and \code{exponent=Inf} for the Maximum Absolute Deviation test. } \item{nsim}{ Number of repetitions of the basic test. } \item{nsimsub}{ Number of simulations in each basic test. There will be \code{nsim} repetitions of the basic test, each involving \code{nsimsub} simulated realisations, so there will be a total of \code{nsim * (nsimsub + 1)} simulations. } \item{alternative}{ Character string specifying the alternative hypothesis. The default (\code{alternative="two.sided"}) is that the true value of the summary function is not equal to the theoretical value postulated under the null hypothesis. If \code{alternative="less"} the alternative hypothesis is that the true value of the summary function is lower than the theoretical value. } \item{reuse}{ Logical value indicating whether to re-use the first stage simulations at the second stage, as described by Dao and Genton (2014). } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating whether to interpolate the distribution of the test statistic by kernel smoothing, as described in Dao and Genton (2014, Section 5). } \item{savefuns}{ Logical flag indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical flag indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ Performs the Dao-Genton (2014) adjusted Monte Carlo goodness-of-fit test, in the equivalent form described by Baddeley et al (2014). If \code{X} is a point pattern, the null hypothesis is CSR. If \code{X} is a fitted model, the null hypothesis is that model. The argument \code{use.theory} passed to \code{\link{envelope}} determines whether to compare the summary function for the data to its theoretical value for CSR (\code{use.theory=TRUE}) or to the sample mean of simulations from CSR (\code{use.theory=FALSE}). The argument \code{leaveout} specifies how to calculate the discrepancy between the summary function for the data and the nominal reference value, when the reference value must be estimated by simulation. The values \code{leaveout=0} and \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014, Appendix) to computing the difference \code{observed - reference} where the \code{reference} is the mean of simulated values. The value \code{leaveout=2} gives the leave-two-out discrepancy proposed by Dao and Genton (2014). The Dao-Genton test is biased when the significance level is very small (small \eqn{p}-values are not reliable) and we recommend \code{\link{bits.test}} in this case. } \value{ A hypothesis test (object of class \code{"htest"} which can be printed to show the outcome of the test. } \references{ Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2017) On two-stage Monte Carlo tests of composite hypotheses. \emph{Computational Statistics and Data Analysis}, in press. } \author{ Adrian Baddeley, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \spatstatAuthors. } \seealso{ \code{\link{bits.test}}, \code{\link{dclf.test}}, \code{\link{mad.test}} } \examples{ ns <- if(interactive()) 19 else 4 dg.test(cells, nsim=ns) dg.test(cells, alternative="less", nsim=ns) dg.test(cells, nsim=ns, interpolate=TRUE) } \keyword{spatial} \keyword{htest} spatstat/man/funxy.Rd0000644000176200001440000000401113417031500014316 0ustar liggesusers\name{funxy} \Rdversion{1.1} \alias{funxy} \title{ Spatial Function Class } \description{ A simple class of functions of spatial location } \usage{ funxy(f, W) } \arguments{ \item{f}{ A \code{function} in the \R language with arguments \code{x,y} (at least) } \item{W}{ Window (object of class \code{"owin"}) inside which the function is well-defined. } } \details{ This creates an object of class \code{"funxy"}. This is a simple mechanism for handling a function of spatial location \eqn{f(x,y)} to make it easier to display and manipulate. \code{f} should be a \code{function} in the \R language. The first two arguments of \code{f} must be named \code{x} and \code{y} respectively. \code{W} should be a window (object of class \code{"owin"}) inside which the function \code{f} is well-defined. The function \code{f} should be vectorised: that is, if \code{x} and \code{y} are numeric vectors of the same length \code{n}, then \code{v <- f(x,y)} should be a vector of length \code{n}. The resulting function \code{g <- funxy(f, W)} has the same formal arguments as \code{f} and can be called in the same way, \code{v <- g(x,y)} where \code{x} and \code{y} are numeric vectors. However it can also be called as \code{v <- g(X)}, where \code{X} is a point pattern (object of class \code{"ppp"} or \code{"lpp"}) or a quadrature scheme (class \code{"quad"}); the function will be evaluated at the points of \code{X}. } \value{ A \code{function} with the same arguments as \code{f}, which also belongs to the class \code{"funxy"}. This class has methods for \code{print}, \code{plot}, \code{contour} and \code{persp}. } \seealso{ \code{\link{plot.funxy}}, \code{\link{summary.funxy}} } \examples{ f <- function(x,y) { x^2 + y^2 - 1} g <- funxy(f, square(2)) g ## evaluate function at any x, y coordinates g(0.2, 0.3) ## evaluate function at the points of a point pattern g(cells[1:4]) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat/man/nnclean.Rd0000644000176200001440000001036013451622213014574 0ustar liggesusers\name{nnclean} \alias{nnclean} \alias{nnclean.ppp} \alias{nnclean.pp3} \title{ Nearest Neighbour Clutter Removal } \description{ Detect features in a 2D or 3D spatial point pattern using nearest neighbour clutter removal. } \usage{ nnclean(X, k, ...) \method{nnclean}{ppp}(X, k, ..., edge.correct = FALSE, wrap = 0.1, convergence = 0.001, plothist = FALSE, verbose = TRUE, maxit = 50) \method{nnclean}{pp3}(X, k, ..., convergence = 0.001, plothist = FALSE, verbose = TRUE, maxit = 50) } \arguments{ \item{X}{ A two-dimensional spatial point pattern (object of class \code{"ppp"}) or a three-dimensional point pattern (object of class \code{"pp3"}). } \item{k}{ Degree of neighbour: \code{k=1} means nearest neighbour, \code{k=2} means second nearest, etc. } \item{\dots}{ Arguments passed to \code{\link{hist.default}} to control the appearance of the histogram, if \code{plothist=TRUE}. } \item{edge.correct}{ Logical flag specifying whether periodic edge correction should be performed (only implemented in 2 dimensions). } \item{wrap}{ Numeric value specifying the relative size of the margin in which data will be replicated for the periodic edge correction (if \code{edge.correct=TRUE}). A fraction of window width and window height. } \item{convergence}{ Relative tolerance threshold for testing convergence of EM algorithm. } \item{maxit}{ Maximum number of iterations for EM algorithm. } \item{plothist}{ Logical flag specifying whether to plot a diagnostic histogram of the nearest neighbour distances and the fitted distribution. } \item{verbose}{ Logical flag specifying whether to print progress reports. } } \details{ Byers and Raftery (1998) developed a technique for recognising features in a spatial point pattern in the presence of random clutter. For each point in the pattern, the distance to the \eqn{k}th nearest neighbour is computed. Then the E-M algorithm is used to fit a mixture distribution to the \eqn{k}th nearest neighbour distances. The mixture components represent the feature and the clutter. The mixture model can be used to classify each point as belong to one or other component. The function \code{nnclean} is generic, with methods for two-dimensional point patterns (class \code{"ppp"}) and three-dimensional point patterns (class \code{"pp3"}) currently implemented. The result is a point pattern (2D or 3D) with two additional columns of marks: \describe{ \item{class}{ A factor, with levels \code{"noise"} and \code{"feature"}, indicating the maximum likelihood classification of each point. } \item{prob}{ Numeric vector giving the estimated probabilities that each point belongs to a feature. } } The object also has extra information stored in attributes: \code{"theta"} contains the fitted parameters of the mixture model, \code{"info"} contains information about the fitting procedure, and \code{"hist"} contains the histogram structure returned from \code{\link{hist.default}} if \code{plothist = TRUE}. } \value{ An object of the same kind as \code{X}, obtained by attaching marks to the points of \code{X}. The object also has attributes, as described under Details. } \references{ Byers, S. and Raftery, A.E. (1998) Nearest-neighbour clutter removal for estimating features in spatial point processes. \emph{Journal of the American Statistical Association} \bold{93}, 577--584. } \author{ Original by Simon Byers and Adrian Raftery. Adapted for \pkg{spatstat} by \adrian. } \seealso{ \code{\link{nndist}}, \code{\link{split.ppp}}, \code{\link{cut.ppp}} } \examples{ # shapley galaxy cluster X <- nnclean(shapley, k=17, plothist=TRUE) plot(X, which.marks=1, chars=c(".", "+"), cols=1:2, main="Shapley data, cluster and noise") plot(X, which.marks=2, cols=function(x)hsv(0.2+0.8*(1-x),1,1), main="Shapley data, probability of cluster") Y <- split(X, un=TRUE) plot(Y, chars="+", cex=0.5) marks(X) <- marks(X)$prob plot(cut(X, breaks=3), chars=c(".", "+", "+"), cols=1:3) } \keyword{spatial} \keyword{classif} spatstat/man/nearestValue.Rd0000644000176200001440000000236613616220274015627 0ustar liggesusers\name{nearestValue} \alias{nearestValue} \title{ Image of Nearest Defined Pixel Value } \description{ Given a pixel image defined on a subset of a rectangle, this function assigns a value to every pixel in the rectangle, by looking up the value of the nearest pixel that has a value. } \usage{ nearestValue(X) } \arguments{ \item{X}{ A pixel image (object of class \code{"im"}). } } \details{ A pixel image in \pkg{spatstat} is always stored on a rectangular grid of pixels, but its value may be \code{NA} on some pixels, indicating that the image is not defined at those pixels. This function assigns a value to every pixel in the rectangular grid. For each pixel \code{a} in the grid, if the value of \code{X} is not defined at \code{a}, the function finds the nearest other pixel \code{b} at which the value of \code{X} is defined, and takes the pixel value at \code{b} as the new pixel value at \code{a}. } \value{ Another image of the same kind as \code{X}. } \author{ \adrian. } \seealso{ \code{\link{blur}}, \code{\link{Smooth.ppp}} } \examples{ X <- as.im(function(x,y) { x + y }, letterR) Y <- nearestValue(X) plot(solist("X"=X,"nearestValue(X)"=Y), main="", panel.end=letterR) } \keyword{spatial} \keyword{manip} spatstat/man/subfits.Rd0000644000176200001440000000517313333543265014653 0ustar liggesusers\name{subfits} \alias{subfits} \alias{subfits.new} \alias{subfits.old} \title{Extract List of Individual Point Process Models} \description{ Takes a Gibbs point process model that has been fitted to several point patterns simultaneously, and produces a list of fitted point process models for the individual point patterns. } \usage{ subfits(object, what="models", verbose=FALSE) subfits.old(object, what="models", verbose=FALSE) subfits.new(object, what="models", verbose=FALSE) } \arguments{ \item{object}{ An object of class \code{"mppm"} representing a point process model fitted to several point patterns. } \item{what}{ What should be returned. Either \code{"models"} to return the fitted models, or \code{"interactions"} to return the fitted interactions only. } \item{verbose}{ Logical flag indicating whether to print progress reports. } } \details{ \code{object} is assumed to have been generated by \code{\link{mppm}}. It represents a point process model that has been fitted to a list of several point patterns, with covariate data. For each of the \emph{individual} point pattern datasets, this function derives the corresponding fitted model for that dataset only (i.e. a point process model for the \eqn{i}th point pattern, that is consistent with \code{object}). If \code{what="models"}, the result is a list of point process models (a list of objects of class \code{"ppm"}), one model for each point pattern dataset in the original fit. If \code{what="interactions"}, the result is a list of fitted interpoint interactions (a list of objects of class \code{"fii"}). Two different algorithms are provided, as \code{subfits.old} and \code{subfits.new}. Currently \code{subfits} is the same as the old algorithm \code{subfits.old} because the newer algorithm is too memory-hungry. } \value{ A list of point process models (a list of objects of class \code{"ppm"}) or a list of fitted interpoint interactions (a list of objects of class \code{"fii"}). } \examples{ H <- hyperframe(Wat=waterstriders) fit <- mppm(Wat~x, data=H) subfits(fit) H$Wat[[3]] <- rthin(H$Wat[[3]], 0.1) fit2 <- mppm(Wat~x, data=H, random=~1|id) subfits(fit2) \testonly{a <- subfits.new(fit)} } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented in \pkg{spatstat} by \spatstatAuthors. } \seealso{ \code{\link{mppm}}, \code{\link{ppm}} } \keyword{spatial} \keyword{models} spatstat/man/rMaternI.Rd0000644000176200001440000000455213571674202014715 0ustar liggesusers\name{rMaternI} \alias{rMaternI} \title{Simulate Matern Model I} \description{ Generate a random point pattern, a simulated realisation of the \Matern Model I inhibition process model. } \usage{ rMaternI(kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE, \dots, nsim=1, drop=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of proposal points. A single positive number. } \item{r}{ Inhibition distance. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. Alternatively a higher-dimensional box of class \code{"box3"} or \code{"boxx"}. } \item{stationary}{ Logical. Whether to start with a stationary process of proposal points (\code{stationary=TRUE}) or to generate the proposal points only inside the window (\code{stationary=FALSE}). } \item{\dots}{Ignored.} \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Each point pattern is normally an object of class \code{"ppp"}, but may be of class \code{"pp3"} or \code{"ppx"} depending on the window. } \details{ This algorithm generates one or more realisations of \Matern's Model I inhibition process inside the window \code{win}. The process is constructed by first generating a uniform Poisson point process of ``proposal'' points with intensity \code{kappa}. If \code{stationary = TRUE} (the default), the proposal points are generated in a window larger than \code{win} that effectively means the proposals are stationary. If \code{stationary=FALSE} then the proposal points are only generated inside the window \code{win}. A proposal point is then deleted if it lies within \code{r} units' distance of another proposal point. Otherwise it is retained. The retained points constitute \Matern's Model I. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}} } \examples{ X <- rMaternI(20, 0.05) Y <- rMaternI(20, 0.05, stationary=FALSE) } \author{ \adrian , Ute Hahn, \rolf and \ege } \keyword{spatial} \keyword{datagen} spatstat/man/bits.test.Rd0000644000176200001440000001174613524437051015113 0ustar liggesusers\name{bits.test} \alias{bits.test} \title{ Balanced Independent Two-Stage Monte Carlo Test } \description{ Performs a Balanced Independent Two-Stage Monte Carlo test of goodness-of-fit for spatial pattern. } \usage{ bits.test(X, \dots, exponent = 2, nsim=19, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose = TRUE) } \arguments{ \item{X}{ Either a point pattern dataset (object of class \code{"ppp"}, \code{"lpp"} or \code{"pp3"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"} or \code{"slrm"}). } \item{\dots}{ Arguments passed to \code{\link{dclf.test}} or \code{\link{mad.test}} or \code{\link{envelope}} to control the conduct of the test. Useful arguments include \code{fun} to determine the summary function, \code{rinterval} to determine the range of \eqn{r} values used in the test, and \code{use.theory} described under Details. } \item{exponent}{ Exponent used in the test statistic. Use \code{exponent=2} for the Diggle-Cressie-Loosmore-Ford test, and \code{exponent=Inf} for the Maximum Absolute Deviation test. } \item{nsim}{ Number of replicates in each stage of the test. A total of \code{nsim * (nsim + 1)} simulated point patterns will be generated, and the \eqn{p}-value will be a multiple of \code{1/(nsim+1)}. } \item{alternative}{ Character string specifying the alternative hypothesis. The default (\code{alternative="two.sided"}) is that the true value of the summary function is not equal to the theoretical value postulated under the null hypothesis. If \code{alternative="less"} the alternative hypothesis is that the true value of the summary function is lower than the theoretical value. } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating whether to interpolate the distribution of the test statistic by kernel smoothing, as described in Dao and Genton (2014, Section 5). } \item{savefuns}{ Logical flag indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical flag indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ Performs the Balanced Independent Two-Stage Monte Carlo test proposed by Baddeley et al (2017), an improvement of the Dao-Genton (2014) test. If \code{X} is a point pattern, the null hypothesis is CSR. If \code{X} is a fitted model, the null hypothesis is that model. The argument \code{use.theory} passed to \code{\link{envelope}} determines whether to compare the summary function for the data to its theoretical value for CSR (\code{use.theory=TRUE}) or to the sample mean of simulations from CSR (\code{use.theory=FALSE}). The argument \code{leaveout} specifies how to calculate the discrepancy between the summary function for the data and the nominal reference value, when the reference value must be estimated by simulation. The values \code{leaveout=0} and \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014, Appendix) to computing the difference \code{observed - reference} where the \code{reference} is the mean of simulated values. The value \code{leaveout=2} gives the leave-two-out discrepancy proposed by Dao and Genton (2014). } \value{ A hypothesis test (object of class \code{"htest"} which can be printed to show the outcome of the test. } \references{ Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2017) On two-stage Monte Carlo tests of composite hypotheses. \emph{Computational Statistics and Data Analysis}, in press. } \author{ Adrian Baddeley, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \spatstatAuthors. } \seealso{ Simulation envelopes: \code{\link{bits.envelope}}. Other tests: \code{\link{dg.test}}, \code{\link{dclf.test}}, \code{\link{mad.test}}. } \examples{ ns <- if(interactive()) 19 else 4 bits.test(cells, nsim=ns) bits.test(cells, alternative="less", nsim=ns) bits.test(cells, nsim=ns, interpolate=TRUE) } \keyword{spatial} \keyword{htest} spatstat/man/residuals.kppm.Rd0000644000176200001440000000213113333543264016123 0ustar liggesusers\name{residuals.kppm} \alias{residuals.kppm} \title{ Residuals for Fitted Cox or Cluster Point Process Model } \description{ Given a Cox or cluster point process model fitted to a point pattern, compute residuals. } \usage{ \method{residuals}{kppm}(object, \dots) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"kppm"}) for which residuals should be calculated. } \item{\dots}{ Arguments passed to \code{\link{residuals.ppm}}. } } \value{ An object of class \code{"msr"} representing a signed measure or vector-valued measure (see \code{\link{msr}}). This object can be plotted. } \details{ This function extracts the intensity component of the model using \code{\link{as.ppm}} and then applies \code{\link{residuals.ppm}} to compute the residuals. Use \code{\link{plot.msr}} to plot the residuals directly. } \seealso{ \code{\link{msr}}, \code{\link{kppm}} } \examples{ fit <- kppm(redwood ~ x, "Thomas") rr <- residuals(fit) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/lineardirichlet.Rd0000644000176200001440000000257613333543263016340 0ustar liggesusers\name{lineardirichlet} \alias{lineardirichlet} \title{ Dirichlet Tessellation on a Linear Network } \description{ Given a point pattern on a linear network, compute the Dirichlet (or Voronoi or Thiessen) tessellation induced by the points. } \usage{ lineardirichlet(X) } \arguments{ \item{X}{ Point pattern on a linear network (object of class \code{"lpp"}). } } \details{ The Dirichlet tessellation induced by a point pattern \code{X} on a linear network \code{L} is a partition of \code{L} into subsets. The subset \code{L[i]} associated with the data point \code{X[i]} is the part of \code{L} lying closer to \code{X[i]} than to any other data point \code{X[j]}, where distance is measured by the shortest path. } \section{Missing tiles}{ If the linear network is not connected, and if one of the connected components contains no data points, then the Dirichlet tessellation is mathematically undefined inside this component. The resulting tessellation object includes a tile with label \code{NA}, which contains this component of the network. A plot of the tessellation will not show this tile. } \value{ A tessellation on a linear network (object of class \code{"lintess"}). } \author{ \adrian. } \seealso{ \code{\link{lintess}} } \examples{ X <- runiflpp(5, simplenet) plot(lineardirichlet(X), lwd=3) points(X) } \keyword{spatial} \keyword{manip} spatstat/man/is.stationary.Rd0000644000176200001440000000631713333543263016002 0ustar liggesusers\name{is.stationary} \alias{is.stationary} \alias{is.stationary.ppm} \alias{is.stationary.kppm} \alias{is.stationary.lppm} \alias{is.stationary.slrm} \alias{is.stationary.rmhmodel} \alias{is.stationary.dppm} \alias{is.stationary.detpointprocfamily} \alias{is.poisson} \alias{is.poisson.ppm} \alias{is.poisson.kppm} \alias{is.poisson.lppm} \alias{is.poisson.slrm} \alias{is.poisson.rmhmodel} \alias{is.poisson.interact} \title{ Recognise Stationary and Poisson Point Process Models } \description{ Given a point process model that has been fitted to data, determine whether the model is a stationary point process, and whether it is a Poisson point process. } \usage{ is.stationary(x) \method{is.stationary}{ppm}(x) \method{is.stationary}{kppm}(x) \method{is.stationary}{lppm}(x) \method{is.stationary}{slrm}(x) \method{is.stationary}{rmhmodel}(x) \method{is.stationary}{dppm}(x) \method{is.stationary}{detpointprocfamily}(x) is.poisson(x) \method{is.poisson}{ppm}(x) \method{is.poisson}{kppm}(x) \method{is.poisson}{lppm}(x) \method{is.poisson}{slrm}(x) \method{is.poisson}{rmhmodel}(x) \method{is.poisson}{interact}(x) } \arguments{ \item{x}{ A fitted spatial point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"}, \code{"dppm"} or \code{"slrm"}) or similar object. } } \details{ The argument \code{x} represents a fitted spatial point process model or a similar object. \code{is.stationary(x)} returns \code{TRUE} if \code{x} represents a stationary point process, and \code{FALSE} if not. \code{is.poisson(x)} returns \code{TRUE} if \code{x} represents a Poisson point process, and \code{FALSE} if not. The functions \code{is.stationary} and \code{is.poisson} are generic, with methods for the classes \code{"ppm"} (Gibbs point process models), \code{"kppm"} (cluster or Cox point process models), \code{"slrm"} (spatial logistic regression models) and \code{"rmhmodel"} (model specifications for the Metropolis-Hastings algorithm). Additionally \code{is.stationary} has a method for classes \code{"detpointprocfamily"} and \code{"dppm"} (both determinantal point processes) and \code{is.poisson} has a method for class \code{"interact"} (interaction structures for Gibbs models). \code{is.poisson.kppm} will return \code{FALSE}, unless the model \code{x} is degenerate: either \code{x} has zero intensity so that its realisations are empty with probability 1, or it is a log-Gaussian Cox process where the log intensity has zero variance. \code{is.poisson.slrm} will always return \code{TRUE}, by convention. } \value{ A logical value. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{is.marked}} to determine whether a model is a marked point process. \code{\link{summary.ppm}} for detailed information. Model-fitting functions \code{\link{ppm}}, \code{\link{dppm}}, \code{\link{kppm}}, \code{\link{lppm}}, \code{\link{slrm}}. } \examples{ data(cells) data(redwood) fit <- ppm(cells ~ x) is.stationary(fit) is.poisson(fit) fut <- kppm(redwood ~ 1, "MatClust") is.stationary(fut) is.poisson(fut) fot <- slrm(cells ~ x) is.stationary(fot) is.poisson(fot) } \keyword{spatial} \keyword{models} spatstat/man/is.multitype.ppm.Rd0000644000176200001440000000455613333543263016437 0ustar liggesusers\name{is.multitype.ppm} \alias{is.multitype.ppm} \alias{is.multitype.lppm} \title{Test Whether A Point Process Model is Multitype} \description{ Tests whether a fitted point process model involves ``marks'' attached to the points that classify the points into several types. } \usage{ \method{is.multitype}{ppm}(X, \dots) \method{is.multitype}{lppm}(X, \dots) } \arguments{ \item{X}{ Fitted point process model (object of class \code{"ppm"}) usually obtained from \code{\link{ppm}}. Alternatively a model of class \code{"lppm"}. } \item{\dots}{ Ignored. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is a model that was fitted to a multitype point pattern dataset. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link[spatstat.data]{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link[spatstat.data]{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. The argument \code{X} is a fitted point process model (an object of class \code{"ppm"}) typically obtained by fitting a model to point pattern data using \code{\link{ppm}}. This function returns \code{TRUE} if the \emph{original data} (to which the model \code{X} was fitted) were a multitype point pattern. Note that this is not the same as testing whether the model involves terms that depend on the marks (i.e. whether the fitted model ignores the marks in the data). Currently we have not implemented a test for this. If this function returns \code{TRUE}, the implications are (for example) that any simulation of this model will require simulation of random marks as well as random point locations. } \seealso{ \code{\link{is.multitype}}, \code{\link{is.multitype.ppp}} } \examples{ X <- lansing # Multitype point pattern --- trees marked by species \testonly{ # Smaller dataset X <- amacrine } fit1 <- ppm(X, ~ marks, Poisson()) is.multitype(fit1) # TRUE fit2 <- ppm(X, ~ 1, Poisson()) is.multitype(fit2) # TRUE # Unmarked point pattern fit3 <- ppm(cells, ~ 1, Poisson()) is.multitype(fit3) # FALSE } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{models} spatstat/man/trim.rectangle.Rd0000644000176200001440000000252313333543264016105 0ustar liggesusers\name{trim.rectangle} \alias{trim.rectangle} \title{Cut margins from rectangle} \description{ Trims a margin from a rectangle. } \usage{ trim.rectangle(W, xmargin=0, ymargin=xmargin) } \arguments{ \item{W}{ A window (object of class \code{"owin"}). Must be of type \code{"rectangle"}. } \item{xmargin}{Width of horizontal margin to be trimmed. A single nonnegative number, or a vector of length 2 indicating margins of unequal width at left and right. } \item{ymargin}{Height of vertical margin to be trimmed. A single nonnegative number, or a vector of length 2 indicating margins of unequal width at bottom and top. } } \value{ Another object of class \code{"owin"} representing the window after margins are trimmed. } \details{ This is a simple convenience function to trim off a margin of specified width and height from each side of a rectangular window. Unequal margins can also be trimmed. } \seealso{ \code{\link{grow.rectangle}}, \code{\link{erosion}}, \code{\link{owin.object}} } \examples{ w <- square(10) # trim a margin of width 1 from all four sides square9 <- trim.rectangle(w, 1) # trim margin of width 3 from the right side # and margin of height 4 from top edge. v <- trim.rectangle(w, c(0,3), c(0,4)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/pixellate.ppp.Rd0000644000176200001440000001161213357331230015745 0ustar liggesusers\name{pixellate.ppp} \alias{pixellate.ppp} \alias{as.im.ppp} \title{Convert Point Pattern to Pixel Image} \description{ Converts a point pattern to a pixel image. The value in each pixel is the number of points falling in that pixel, and is typically either 0 or 1. } \usage{ \method{pixellate}{ppp}(x, W=NULL, \dots, weights = NULL, padzero=FALSE, fractional=FALSE, preserve=FALSE, DivideByPixelArea=FALSE, savemap=FALSE) \method{as.im}{ppp}(X, \dots) } \arguments{ \item{x,X}{Point pattern (object of class \code{"ppp"}).} \item{\dots}{Arguments passed to \code{\link{as.mask}} to determine the pixel resolution} \item{W}{Optional window mask (object of class \code{"owin"}) determining the pixel raster. } \item{weights}{Optional vector of weights associated with the points.} \item{padzero}{ Logical value indicating whether to set pixel values to zero outside the window. } \item{fractional,preserve}{ Logical values determining the type of discretisation. See Details. } \item{DivideByPixelArea}{ Logical value, indicating whether the resulting pixel values should be divided by the pixel area. } \item{savemap}{ Logical value, indicating whether to save information about the discretised coordinates of the points of \code{x}. } } \details{ The functions \code{pixellate.ppp} and \code{as.im.ppp} convert a spatial point pattern \code{x} into a pixel image, by counting the number of points (or the total weight of points) falling in each pixel. Calling \code{as.im.ppp} is equivalent to calling \code{pixellate.ppp} with its default arguments. Note that \code{pixellate.ppp} is more general than \code{as.im.ppp} (it has additional arguments for greater flexibility). The functions \code{as.im.ppp} and \code{pixellate.ppp} are methods for the generic functions \code{\link{as.im}} and \code{\link{pixellate}} respectively, for the class of point patterns. The pixel raster (in which points are counted) is determined by the argument \code{W} if it is present (for \code{pixellate.ppp} only). In this case \code{W} should be a binary mask (a window object of class \code{"owin"} with type \code{"mask"}). Otherwise the pixel raster is determined by extracting the window containing \code{x} and converting it to a binary pixel mask using \code{\link{as.mask}}. The arguments \code{\dots} are passed to \code{\link{as.mask}} to control the pixel resolution. If \code{weights} is \code{NULL}, then for each pixel in the mask, the algorithm counts how many points in \code{x} fall in the pixel. This count is usually either 0 (for a pixel with no data points in it) or 1 (for a pixel containing one data point) but may be greater than 1. The result is an image with these counts as its pixel values. If \code{weights} is given, it should be a numeric vector of the same length as the number of points in \code{x}. For each pixel, the algorithm finds the total weight associated with points in \code{x} that fall in the given pixel. The result is an image with these total weights as its pixel values. By default (if \code{zeropad=FALSE}) the resulting pixel image has the same spatial domain as the window of the point pattern \code{x}. If \code{zeropad=TRUE} then the resulting pixel image has a rectangular domain; pixels outside the original window are assigned the value zero. The discretisation procedure is controlled by the arguments \code{fractional} and \code{preserve}. \itemize{ \item The argument \code{fractional} specifies how data points are mapped to pixels. If \code{fractional=FALSE} (the default), each data point is allocated to the nearest pixel centre. If \code{fractional=TRUE}, each data point is allocated with fractional weight to four pixel centres (the corners of a rectangle containing the data point). \item The argument \code{preserve} specifies what to do with pixels lying near the boundary of the window, if the window is not a rectangle. If \code{preserve=FALSE} (the default), any contributions that are attributed to pixel centres lying outside the window are reset to zero. If \code{preserve=TRUE}, any such contributions are shifted to the nearest pixel lying inside the window, so that the total mass is preserved. } If \code{savemap=TRUE} then the result has an attribute \code{"map"} which is a 2-column matrix containing the row and column indices of the discretised positions of the points of \code{x} in the pixel grid. } \value{ A pixel image (object of class \code{"im"}). } \seealso{ \code{\link{pixellate}}, \code{\link{im}}, \code{\link{as.im}}, \code{\link{density.ppp}}, \code{\link{Smooth.ppp}}. } \examples{ plot(pixellate(humberside)) plot(pixellate(humberside, fractional=TRUE)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/summary.quad.Rd0000644000176200001440000000312713333543264015616 0ustar liggesusers\name{summary.quad} \alias{summary.quad} \alias{print.summary.quad} \title{Summarizing a Quadrature Scheme} \description{ \code{summary} method for class \code{"quad"}. } \usage{ \method{summary}{quad}(object, \dots, checkdup=FALSE) \method{print}{summary.quad}(x, \dots, dp=3) } \arguments{ \item{object}{A quadrature scheme.} \item{\dots}{Ignored.} \item{checkdup}{ Logical value indicating whether to test for duplicated points. } \item{dp}{Number of significant digits to print.} \item{x}{Object of class \code{"summary.quad"} returned by \code{summary.quad}.} } \details{ This is a method for the generic \code{\link{summary}} for the class \code{"quad"}. An object of class \code{"quad"} describes a quadrature scheme, used to fit a point process model. See \code{\link{quad.object}}) for details of this class. \code{summary.quad} extracts information about the quadrature scheme, and \code{print.summary.quad} prints this information in a comprehensible format. In normal usage, \code{print.summary.quad} is invoked implicitly when the user calls \code{summary.quad} without assigning its value to anything. See the examples. } \value{ \code{summary.quad} returns an object of class \code{"summary.quad"}, while \code{print.summary.quad} returns \code{NULL}. } \examples{ # make a quadrature scheme Q <- quadscheme(rpoispp(42)) # summarize it summary(Q) # save the summary s <- summary(Q) # print it print(s) s # extract total quadrature weight s$w$all$sum } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/Kres.Rd0000644000176200001440000000561413571674202014100 0ustar liggesusers\name{Kres} \Rdversion{1.1} \alias{Kres} \title{ Residual K Function } \description{ Given a point process model fitted to a point pattern dataset, this function computes the residual \eqn{K} function, which serves as a diagnostic for goodness-of-fit of the model. } \usage{ Kres(object, ...) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}), a point pattern (object of class \code{"ppp"}), a quadrature scheme (object of class \code{"quad"}), or the value returned by a previous call to \code{\link{Kcom}}. } \item{\dots}{ Arguments passed to \code{\link{Kcom}}. } } \details{ This command provides a diagnostic for the goodness-of-fit of a point process model fitted to a point pattern dataset. It computes a residual version of the \eqn{K} function of the dataset, which should be approximately zero if the model is a good fit to the data. In normal use, \code{object} is a fitted point process model or a point pattern. Then \code{Kres} first calls \code{\link{Kcom}} to compute both the nonparametric estimate of the \eqn{K} function and its model compensator. Then \code{Kres} computes the difference between them, which is the residual \eqn{K}-function. Alternatively, \code{object} may be a function value table (object of class \code{"fv"}) that was returned by a previous call to \code{\link{Kcom}}. Then \code{Kres} computes the residual from this object. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Related functions: \code{\link{Kcom}}, \code{\link{Kest}}. Alternative functions: \code{\link{Gres}}, \code{\link{psstG}}, \code{\link{psstA}}, \code{\link{psst}}. Point process models: \code{\link{ppm}}. } \examples{ data(cells) fit0 <- ppm(cells, ~1) # uniform Poisson \testonly{ fit0 <- ppm(cells, ~1, nd=16)} K0 <- Kres(fit0) K0 plot(K0) # isotropic-correction estimate plot(K0, ires ~ r) # uniform Poisson is clearly not correct fit1 <- ppm(cells, ~1, Strauss(0.08)) \testonly{fit1 <- ppm(cells, ~1, Strauss(0.08), nd=16)} K1 <- Kres(fit1) if(interactive()) { plot(K1, ires ~ r) # fit looks approximately OK; try adjusting interaction distance plot(Kres(cells, interaction=Strauss(0.12))) } # How to make envelopes \dontrun{ E <- envelope(fit1, Kres, model=fit1, nsim=19) plot(E) } # For computational efficiency Kc <- Kcom(fit1) K1 <- Kres(Kc) } \keyword{spatial} \keyword{models} spatstat/man/chop.tess.Rd0000644000176200001440000000304513333543263015074 0ustar liggesusers\name{chop.tess} \alias{chop.tess} \title{Subdivide a Window or Tessellation using a Set of Lines} \description{ Divide a given window into tiles delineated by a set of infinite straight lines, obtaining a tessellation of the window. Alternatively, given a tessellation, divide each tile of the tessellation into sub-tiles delineated by the lines. } \usage{ chop.tess(X, L) } \arguments{ \item{X}{ A window (object of class \code{"owin"}) or tessellation (object of class \code{"tess"}) to be subdivided by lines. } \item{L}{ A set of infinite straight lines (object of class \code{"infline"}) } } \details{ The argument \code{L} should be a set of infinite straight lines in the plane (stored in an object \code{L} of class \code{"infline"} created by the function \code{\link{infline}}). If \code{X} is a window, then it is divided into tiles delineated by the lines in \code{L}. If \code{X} is a tessellation, then each tile of \code{X} is subdivided into sub-tiles delineated by the lines in \code{L}. The result is a tessellation. } \section{Warning}{ If \code{X} is a non-convex window, or a tessellation containing non-convex tiles, then \code{chop.tess(X,L)} may contain a tile which consists of several unconnected pieces. } \value{ A tessellation (object of class \code{"tess"}). } \author{\adrian and \rolf } \seealso{ \code{\link{infline}}, \code{\link{clip.infline}} } \examples{ L <- infline(p=1:3, theta=pi/4) W <- square(4) chop.tess(W, L) } \keyword{spatial} \keyword{math} spatstat/man/dppspecden.Rd0000644000176200001440000000110413333543263015305 0ustar liggesusers\name{dppspecden} \alias{dppspecden} \title{Extract Spectral Density from Determinantal Point Process Model Object} \description{ Returns the spectral density of a determinantal point process model as a function of one argument \code{x}. } \usage{dppspecden(model)} \arguments{ \item{model}{Model of class \code{"detpointprocfamily"}.} } \value{A function} \author{ \adrian \rolf and \ege } \examples{ model <- dppMatern(lambda = 100, alpha=.01, nu=1, d=2) dppspecden(model) } \seealso{ \code{\link{dppspecdenrange}} } \keyword{spatial} \keyword{models} spatstat/man/nvertices.Rd0000644000176200001440000000160113333543263015164 0ustar liggesusers\name{nvertices} \alias{nvertices} \alias{nvertices.owin} \alias{nvertices.default} \title{ Count Number of Vertices } \description{ Count the number of vertices in an object for which vertices are well-defined. } \usage{ nvertices(x, \dots) \method{nvertices}{owin}(x, \dots) \method{nvertices}{default}(x, \dots) } \arguments{ \item{x}{ A window (object of class \code{"owin"}), or some other object which has vertices. } \item{\dots}{ Currently ignored. } } \details{ This function counts the number of vertices of \code{x} as they would be returned by \code{\link{vertices}(x)}. It is more efficient than executing \code{npoints(vertices(x))}. } \value{ A single integer. } \author{ \spatstatAuthors and Suman Rakshit. } \seealso{ \code{\link{vertices}} } \examples{ nvertices(square(2)) nvertices(letterR) } \keyword{spatial} \keyword{manip} spatstat/man/by.ppp.Rd0000644000176200001440000000522713333543263014402 0ustar liggesusers\name{by.ppp} \alias{by.ppp} \title{Apply a Function to a Point Pattern Broken Down by Factor} \description{ Splits a point pattern into sub-patterns, and applies the function to each sub-pattern. } \usage{ \method{by}{ppp}(data, INDICES=marks(data), FUN, ...) } \arguments{ \item{data}{Point pattern (object of class \code{"ppp"}).} \item{INDICES}{Grouping variable. Either a factor, a pixel image with factor values, or a tessellation.} \item{FUN}{Function to be applied to subsets of \code{data}.} \item{\dots}{Additional arguments to \code{FUN}.} } \details{ This is a method for the generic function \code{\link{by}} for point patterns (class \code{"ppp"}). The point pattern \code{data} is first divided into subsets according to \code{INDICES}. Then the function \code{FUN} is applied to each subset. The results of each computation are returned in a list. The argument \code{INDICES} may be \itemize{ \item a factor, of length equal to the number of points in \code{data}. The levels of \code{INDICES} determine the destination of each point in \code{data}. The \code{i}th point of \code{data} will be placed in the sub-pattern \code{split.ppp(data)$l} where \code{l = f[i]}. \item a pixel image (object of class \code{"im"}) with factor values. The pixel value of \code{INDICES} at each point of \code{data} will be used as the classifying variable. \item a tessellation (object of class \code{"tess"}). Each point of \code{data} will be classified according to the tile of the tessellation into which it falls. } If \code{INDICES} is missing, then \code{data} must be a multitype point pattern (a marked point pattern whose marks vector is a factor). Then the effect is that the points of each type are separated into different point patterns. } \value{ A list (also of class \code{"anylist"} or \code{"solist"} as appropriate) containing the results returned from \code{FUN} for each of the subpatterns. } \seealso{ \code{\link{ppp}}, \code{\link{split.ppp}}, \code{\link{cut.ppp}}, \code{\link{tess}}, \code{\link{im}}. } \examples{ # multitype point pattern, broken down by type data(amacrine) by(amacrine, FUN=density) by(amacrine, FUN=function(x) { min(nndist(x)) } ) # how to pass additional arguments to FUN by(amacrine, FUN=clarkevans, correction=c("Donnelly","cdf")) # point pattern broken down by tessellation data(swedishpines) tes <- quadrats(swedishpines, 5, 5) B <- by(swedishpines, tes, clarkevans, correction="Donnelly") unlist(lapply(B, as.numeric)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{manip} spatstat/man/reach.kppm.Rd0000644000176200001440000000220713552031337015212 0ustar liggesusers\name{reach.kppm} \alias{reach.kppm} \title{Range of Interaction for a Cox or Cluster Point Process Model} \description{ Returns the range of interaction for a Cox or cluster point process model. } \usage{ \method{reach}{kppm}(x, \dots, epsilon) } \arguments{ \item{x}{ Fitted point process model of class \code{"kppm"}. } \item{epsilon}{ Optional numerical value. Differences smaller than \code{epsilon} are treated as zero. } \item{\dots}{ Additional arguments passed to the range function of the given model. } } \details{ The range of interaction for a fitted point process model of class \code{"kppm"} may defined as the smallest number \eqn{R} such that \eqn{g(r)=1} for all \eqn{r\ge R}{r>=R}, where \eqn{g} is the pair correlation function. For many models the range is infinite, but one may instead use a value where the pair correlation function is sufficiently close to 1. The argument \code{epsilon} specifies the tolerance; there is a sensible default. } \value{Numeric} \author{ \spatstatAuthors. } \examples{ fit <- kppm(redwood ~ 1) reach(fit) } \keyword{spatial} \keyword{models} spatstat/man/eroded.areas.Rd0000644000176200001440000000314713333543263015525 0ustar liggesusers\name{eroded.areas} \alias{eroded.areas} \title{Areas of Morphological Erosions} \description{ Computes the areas of successive morphological erosions of a window. } \usage{ eroded.areas(w, r, subset=NULL) } \arguments{ \item{w}{A window.} \item{r}{Numeric vector of radii at which erosions will be performed.} \item{subset}{ Optional window inside which the areas should be computed. } } \value{ Numeric vector, of the same length as \code{r}, giving the areas of the successive erosions. } \details{ This function computes the areas of the erosions of the window \code{w} by each of the radii \code{r[i]}. The morphological erosion of a set \eqn{W} by a distance \eqn{r > 0} is the subset consisting of all points \eqn{x \in W}{x in W} such that the distance from \eqn{x} to the boundary of \eqn{W} is greater than or equal to \eqn{r}. In other words it is the result of trimming a margin of width \eqn{r} off the set \eqn{W}. The argument \code{r} should be a vector of positive numbers. The argument \code{w} should be a window (an object of class \code{"owin"}, see \code{\link{owin.object}} for details) or can be given in any format acceptable to \code{\link{as.owin}()}. Unless \code{w} is a rectangle, the computation is performed using a pixel raster approximation. To compute the eroded window itself, use \code{\link{erosion}}. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{erosion}} } \examples{ w <- owin(c(0,1),c(0,1)) a <- eroded.areas(w, seq(0.01,0.49,by=0.01)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/pool.quadrattest.Rd0000644000176200001440000000530613532403247016477 0ustar liggesusers\name{pool.quadrattest} \alias{pool.quadrattest} \title{ Pool Several Quadrat Tests } \description{ Pool several quadrat tests into a single quadrat test. } \usage{ \method{pool}{quadrattest}(..., df=NULL, df.est=NULL, nsim=1999, Xname=NULL, CR=NULL) } \arguments{ \item{\dots}{ Any number of objects, each of which is a quadrat test (object of class \code{"quadrattest"}). } \item{df}{ Optional. Number of degrees of freedom of the test statistic. Relevant only for \eqn{\chi^2}{chi^2} tests. Incompatible with \code{df.est}. } \item{df.est}{ Optional. The number of fitted parameters, or the number of degrees of freedom lost by estimation of parameters. Relevant only for \eqn{\chi^2}{chi^2} tests. Incompatible with \code{df}. } \item{nsim}{ Number of simulations, for Monte Carlo test. } \item{Xname}{ Optional. Name of the original data. } \item{CR}{ Optional. Numeric value of the Cressie-Read exponent \code{CR} overriding the value used in the tests. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"quadrattest"}. An object of class \code{"quadrattest"} represents a \eqn{\chi^2}{chi^2} test or Monte Carlo test of goodness-of-fit for a point process model, based on quadrat counts. Such objects are created by the command \code{\link{quadrat.test}}. Each of the arguments \code{\dots} must be an object of class \code{"quadrattest"}. They must all be the same type of test (chi-squared test or Monte Carlo test, conditional or unconditional) and must all have the same type of alternative hypothesis. The test statistic of the pooled test is the Pearson \eqn{X^2} statistic taken over all cells (quadrats) of all tests. The \eqn{p} value of the pooled test is then computed using either a Monte Carlo test or a \eqn{\chi^2}{chi^2} test. For a pooled \eqn{\chi^2}{chi^2} test, the number of degrees of freedom of the combined test is computed by adding the degrees of freedom of all the tests (equivalent to assuming the tests are independent) unless it is determined by the arguments \code{df} or \code{df.est}. The resulting \eqn{p} value is computed to obtain the pooled test. For a pooled Monte Carlo test, new simulations are performed to determine the pooled Monte Carlo \eqn{p} value. } \value{ Another object of class \code{"quadrattest"}. } \seealso{ \code{\link{pool}}, \code{\link{quadrat.test}} } \examples{ Y <- split(humberside) test1 <- quadrat.test(Y[[1]]) test2 <- quadrat.test(Y[[2]]) pool(test1, test2, Xname="Humberside") } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} spatstat/man/marktable.Rd0000644000176200001440000000541713333543263015135 0ustar liggesusers\name{marktable} \alias{marktable} \title{Tabulate Marks in Neighbourhood of Every Point in a Point Pattern} \description{ Visit each point in a point pattern, find the neighbouring points, and compile a frequency table of the marks of these neighbour points. } \usage{ marktable(X, R, N, exclude=TRUE, collapse=FALSE) } \arguments{ \item{X}{ A marked point pattern. An object of class \code{"ppp"}. } \item{R}{ Neighbourhood radius. Incompatible with \code{N}. } \item{N}{ Number of neighbours of each point. Incompatible with \code{R}. } \item{exclude}{ Logical. If \code{exclude=TRUE}, the neighbours of a point do not include the point itself. If \code{exclude=FALSE}, a point belongs to its own neighbourhood. } \item{collapse}{ Logical. If \code{collapse=FALSE} (the default) the results for each point are returned as separate rows of a table. If \code{collapse=TRUE}, the results are aggregated according to the type of point. } } \value{ A contingency table (object of class \code{"table"}). If \code{collapse=FALSE}, the table has one row for each point in \code{X}, and one column for each possible mark value. If \code{collapse=TRUE}, the table has one row and one column for each possible mark value. } \details{ This algorithm visits each point in the point pattern \code{X}, inspects all the neighbouring points within a radius \code{R} of the current point (or the \code{N} nearest neighbours of the current point), and compiles a frequency table of the marks attached to the neighbours. The dataset \code{X} must be a multitype point pattern, that is, \code{marks(X)} must be a \code{factor}. If \code{collapse=FALSE} (the default), the result is a two-dimensional contingency table with one row for each point in the pattern, and one column for each possible mark value. The \code{[i,j]} entry in the table gives the number of neighbours of point \code{i} that have mark \code{j}. If \code{collapse=TRUE}, this contingency table is aggregated according to the type of point, so that the result is a contingency table with one row and one column for each possible mark value. The \code{[i,j]} entry in the table gives the number of neighbours of a point with mark \code{i} that have mark \code{j}. To perform more complicated calculations on the neighbours of every point, use \code{\link{markstat}} or \code{\link{applynbd}}. } \seealso{ \code{\link{markstat}}, \code{\link{applynbd}}, \code{\link{Kcross}}, \code{\link{ppp.object}}, \code{\link{table}} } \examples{ head(marktable(amacrine, 0.1)) head(marktable(amacrine, 0.1, exclude=FALSE)) marktable(amacrine, N=1, collapse=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{programming} spatstat/man/bw.diggle.Rd0000644000176200001440000000733613544333571015042 0ustar liggesusers\name{bw.diggle} \alias{bw.diggle} \title{ Cross Validated Bandwidth Selection for Kernel Density } \description{ Uses cross-validation to select a smoothing bandwidth for the kernel estimation of point process intensity. } \usage{ bw.diggle(X, ..., correction="good", hmax=NULL, nr=512, warn=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{\dots}{Ignored.} \item{correction}{ Character string passed to \code{\link{Kest}} determining the edge correction to be used to calculate the \eqn{K} function. } \item{hmax}{ Numeric. Maximum value of bandwidth that should be considered. } \item{nr}{ Integer. Number of steps in the distance value \eqn{r} to use in computing numerical integrals. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the minimum of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function selects an appropriate bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}}. The bandwidth \eqn{\sigma}{\sigma} is chosen to minimise the mean-square error criterion defined by Diggle (1985). The algorithm uses the method of Berman and Diggle (1989) to compute the quantity \deqn{ M(\sigma) = \frac{\mbox{MSE}(\sigma)}{\lambda^2} - g(0) }{ M(\sigma) = MSE(\sigma)/\lambda^2 - g(0) } as a function of bandwidth \eqn{\sigma}{\sigma}, where \eqn{\mbox{MSE}(\sigma)}{MSE(\sigma)} is the mean squared error at bandwidth \eqn{\sigma}{\sigma}, while \eqn{\lambda}{\lambda} is the mean intensity, and \eqn{g} is the pair correlation function. See Diggle (2003, pages 115-118) for a summary of this method. The result is a numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted to show the (rescaled) mean-square error as a function of \code{sigma}. } \section{Definition of bandwidth}{ The smoothing parameter \code{sigma} returned by \code{bw.diggle} (and displayed on the horizontal axis of the plot) corresponds to \code{h/2}, where \code{h} is the smoothing parameter described in Diggle (2003, pages 116-118) and Berman and Diggle (1989). In those references, the smoothing kernel is the uniform density on the disc of radius \code{h}. In \code{\link{density.ppp}}, the smoothing kernel is the isotropic Gaussian density with standard deviation \code{sigma}. When replacing one kernel by another, the usual practice is to adjust the bandwidths so that the kernels have equal variance (cf. Diggle 2003, page 118). This implies that \code{sigma = h/2}. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{density.ppp}}, \code{\link{bw.ppl}}, \code{\link{bw.scott}}, \code{\link{bw.CvL}}, \code{\link{bw.frac}}. } \examples{ data(lansing) attach(split(lansing)) b <- bw.diggle(hickory) plot(b, ylim=c(-2, 0), main="Cross validation for hickories") \donttest{ plot(density(hickory, b)) } } \references{ Berman, M. and Diggle, P. (1989) Estimating weighted integrals of the second-order intensity of a spatial point process. \emph{Journal of the Royal Statistical Society, series B} \bold{51}, 81--92. Diggle, P.J. (1985) A kernel method for smoothing point process data. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C) \bold{34} (1985) 138--147. Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/Extract.ppx.Rd0000644000176200001440000000417113333543263015407 0ustar liggesusers\name{Extract.ppx} \alias{[.ppx} \title{Extract Subset of Multidimensional Point Pattern} \description{ Extract a subset of a multidimensional point pattern. } \usage{ \method{[}{ppx}(x, i, drop=FALSE, ...) } \arguments{ \item{x}{ A multidimensional point pattern (object of class \code{"ppx"}). } \item{i}{ Subset index. A valid subset index in the usual \R sense, indicating which points should be retained; or a spatial domain of class \code{"boxx"} or \code{"box3"}. } \item{drop}{ Logical value indicating whether to remove unused levels of the marks, if the marks are a factor. } \item{\dots}{ Ignored. } } \value{ A multidimensional point pattern (of class \code{"ppx"}). } \details{ This function extracts a designated subset of a multidimensional point pattern. The function \code{[.ppx} is a method for \code{\link{[}} for the class \code{"ppx"}. It extracts a designated subset of a point pattern. The argument \code{i} may be either \itemize{ \item a subset index in the usual \R sense: either a numeric vector of positive indices (identifying the points to be retained), a numeric vector of negative indices (identifying the points to be deleted) or a logical vector of length equal to the number of points in the point pattern \code{x}. In the latter case, the points \code{(x$x[i], x$y[i])} for which \code{subset[i]=TRUE} will be retained, and the others will be deleted. \item a spatial domain of class \code{"boxx"} or \code{"box3"}. Points falling inside this region will be retained. } The argument \code{drop} determines whether to remove unused levels of a factor, if the point pattern is multitype (i.e. the marks are a factor) or if the marks are a data frame or hyperframe in which some of the columns are factors. Use the function \code{\link{unmark}} to remove marks from a marked point pattern. } \seealso{ \code{\link{ppx}} } \examples{ df <- data.frame(x=runif(4),y=runif(4),z=runif(4)) X <- ppx(data=df, coord.type=c("s","s","t")) X[-2] } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/simulate.ppm.Rd0000644000176200001440000001017713333543264015611 0ustar liggesusers\name{simulate.ppm} \alias{simulate.ppm} \title{Simulate a Fitted Gibbs Point Process Model} \description{ Generates simulated realisations from a fitted Gibbs or Poisson point process model. } \usage{ \method{simulate}{ppm}(object, nsim=1, ..., singlerun = FALSE, start = NULL, control = default.rmhcontrol(object, w=w), w = NULL, project=TRUE, new.coef=NULL, verbose=FALSE, progress=(nsim > 1), drop=FALSE) } \arguments{ \item{object}{ Fitted point process model. An object of class \code{"ppm"}. } \item{nsim}{ Number of simulated realisations. } \item{singlerun}{ Logical. Whether to generate the simulated realisations from a single long run of the Metropolis-Hastings algorithm (\code{singlerun=TRUE}) or from separate, independent runs of the algorithm (\code{singlerun=FALSE}, the default). } \item{start}{Data determining the initial state of the Metropolis-Hastings algorithm. See \code{\link{rmhstart}} for description of these arguments. Defaults to \code{list(n.start=npoints(data.ppm(object)))} meaning that the initial state of the algorithm has the same number of points as the original dataset. } \item{control}{Data controlling the running of the Metropolis-Hastings algorithm. See \code{\link{rmhcontrol}} for description of these arguments. } \item{w}{ Optional. The window in which the model is defined. An object of class \code{"owin"}. } \item{\dots}{ Further arguments passed to \code{\link{rmhcontrol}}, or to \code{\link{rmh.default}}, or to covariate functions in the model. } \item{project}{ Logical flag indicating what to do if the fitted model is invalid (in the sense that the values of the fitted coefficients do not specify a valid point process). If \code{project=TRUE} the closest valid model will be simulated; if \code{project=FALSE} an error will occur. } \item{verbose}{ Logical flag indicating whether to print progress reports from \code{\link{rmh.ppm}} during the simulation of each point pattern. } \item{progress}{ Logical flag indicating whether to print progress reports for the sequence of simulations. } \item{new.coef}{ New values for the canonical parameters of the model. A numeric vector of the same length as \code{coef(object)}. } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE}, the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"ppm"} of fitted point process models. Simulations are performed by \code{\link{rmh.ppm}}. If \code{singlerun=FALSE} (the default), the simulated patterns are the results of independent runs of the Metropolis-Hastings algorithm. If \code{singlerun=TRUE}, a single long run of the algorithm is performed, and the state of the simulation is saved every \code{nsave} iterations to yield the simulated patterns. In the case of a single run, the behaviour is controlled by the parameters \code{nsave,nburn,nrep}. These are described in \code{\link{rmhcontrol}}. They may be passed in the \code{\dots} arguments or included in \code{control}. It is sufficient to specify two of the three parameters \code{nsave,nburn,nrep}. } \value{ A list of length \code{nsim} containing simulated point patterns (objects of class \code{"ppp"}). It also belongs to the class \code{"solist"}, so that it can be plotted, and the class \code{"timed"}, so that the total computation time is recorded. } \examples{ \testonly{op <- spatstat.options(rmh.nrep=10)} fit <- ppm(japanesepines, ~1, Strauss(0.1)) simulate(fit, 2) simulate(fit, 2, singlerun=TRUE, nsave=1e4, nburn=1e4) \testonly{spatstat.options(op)} } \seealso{ \code{\link{ppm}}, \code{\link{simulate.kppm}}, \code{\link[stats]{simulate}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/subset.psp.Rd0000644000176200001440000000727013354565763015314 0ustar liggesusers\name{subset.psp} \alias{subset.psp} \title{ Subset of Line Segment Satisfying A Condition } \description{ Given a line segment pattern, return the subset of segments which satisfy a specified condition. } \usage{ \method{subset}{psp}(x, subset, select, drop=FALSE, \dots) } \arguments{ \item{x}{ A line segment pattern (object of class \code{"psp"}). } \item{subset}{ Logical expression indicating which points are to be kept. The expression may involve the names of spatial coordinates of the segment endpoints (\code{x0}, \code{y0}, \code{x1}, \code{y1}), the \code{marks}, and (if there is more than one column of marks) the names of individual columns of marks. Missing values are taken as false. See Details. } \item{select}{ Expression indicating which columns of marks should be kept. The \emph{names} of columns of marks can be used in this expression, and will be treated as if they were column indices. See Details. } \item{drop}{ Logical value indicating whether to remove unused levels of the marks, if the marks are a factor. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{subset}}. It extracts the subset of \code{x} consisting of those segments that satisfy the logical expression \code{subset}, and retains only the columns of marks that are specified by the expression \code{select}. The result is always a line segment pattern, with the same window as \code{x}. The argument \code{subset} determines the subset that will be extracted. It should be a logical expression. It may involve the variable names \code{x0}, \code{y0}, \code{x1}, \code{y1} representing the Cartesian coordinates of the segment endpoints; the name \code{marks} representing the marks; and (if there is more than one column of marks) the names of individual columns of marks. The default is to keep all segments. The argument \code{select} determines which columns of marks will be retained (if there are several columns of marks). It should be an expression involving the names of columns of marks (which will be interpreted as integers representing the positions of these columns). For example if there are columns of marks named \code{A} to \code{Z}, then \code{select=D:F} is a valid expression and means that columns \code{D}, \code{E} and \code{F} will be retained. Similarly \code{select=-(A:C)} is valid and means that columns \code{A} to \code{C} will be deleted. The default is to retain all columns. Setting \code{subset=FALSE} will produce an empty point pattern (i.e. containing zero points) in the same window as \code{x}. Setting \code{select=FALSE} or \code{select= -marks} will remove all the marks from \code{x}. The argument \code{drop} determines whether to remove unused levels of a factor, if the resulting point pattern is multitype (i.e. the marks are a factor) or if the marks are a data frame in which some of the columns are factors. The result is always a line segment pattern. To extract only some columns of marks as a data frame, use \code{subset(as.data.frame(x), ...)} } \value{ A line segment pattern (object of class \code{"psp"}) in the same spatial window as \code{x}. The result is a subset of \code{x}, possibly with some columns of marks removed. } \author{ \spatstatAuthors. } \seealso{ \code{\link[base]{subset}}, \code{\link{[.psp}}. } \examples{ plot(nbw.seg) plot(subset(nbw.seg, x0 < 500 & y0 < 800), add=TRUE, lwd=6) subset(nbw.seg, type == "island") subset(nbw.seg, type == "coast", select= -type) subset(nbw.seg, type \%in\% c("island", "coast"), select= FALSE) } \keyword{spatial} \keyword{manip} spatstat/man/crossdist.psp.Rd0000644000176200001440000000474113333543263016010 0ustar liggesusers\name{crossdist.psp} \alias{crossdist.psp} \title{Pairwise distances between two different line segment patterns} \description{ Computes the distances between all pairs of line segments taken from two different line segment patterns. } \usage{ \method{crossdist}{psp}(X, Y, \dots, method="C", type="Hausdorff") } \arguments{ \item{X,Y}{ Line segment patterns (objects of class \code{"psp"}). } \item{\dots}{ Ignored. } \item{method}{String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. Usually not specified. } \item{type}{ Type of distance to be computed. Options are \code{"Hausdorff"} and \code{"separation"}. Partial matching is used. } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th line segment in \code{X} to the \code{j}-th line segment in \code{Y}. } \details{ This is a method for the generic function \code{\link{crossdist}}. Given two line segment patterns, this function computes the distance from each line segment in the first pattern to each line segment in the second pattern, and returns a matrix containing these distances. The distances between line segments are measured in one of two ways: \itemize{ \item if \code{type="Hausdorff"}, distances are computed in the Hausdorff metric. The Hausdorff distance between two line segments is the \emph{maximum} distance from any point on one of the segments to the nearest point on the other segment. \item if \code{type="separation"}, distances are computed as the \emph{minimum} distance from a point on one line segment to a point on the other line segment. For example, line segments which cross over each other have separation zero. } The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted \R code only. If \code{method="C"} (the default) then compiled \code{C} code is used. The \code{C} code is several times faster. } \seealso{ \code{\link{pairdist}}, \code{\link{nndist}}, \code{\link{Gest}} } \examples{ L1 <- psp(runif(5), runif(5), runif(5), runif(5), owin()) L2 <- psp(runif(10), runif(10), runif(10), runif(10), owin()) D <- crossdist(L1, L2) #result is a 5 x 10 matrix S <- crossdist(L1, L2, type="sep") } \author{ \adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/dppGauss.Rd0000644000176200001440000000215113571674202014753 0ustar liggesusers\name{dppGauss} \alias{dppGauss} \title{Gaussian Determinantal Point Process Model} \description{ Function generating an instance of the Gaussian determinantal point process model. } \usage{dppGauss(\dots)} \arguments{ \item{\dots}{arguments of the form \code{tag=value} specifying the parameters. See Details.} } \details{ The Gaussian DPP is defined in (Lavancier, \Moller and Rubak, 2015) The possible parameters are: \itemize{ \item the intensity \code{lambda} as a positive numeric \item the scale parameter \code{alpha} as a positive numeric \item the dimension \code{d} as a positive integer } } \value{An object of class \code{"detpointprocfamily"}.} \author{ \adrian \rolf and \ege } \references{ Lavancier, F. \Moller, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. } \examples{ m <- dppGauss(lambda=100, alpha=.05, d=2) } \seealso{ \code{\link{dppBessel}}, \code{\link{dppCauchy}}, \code{\link{dppMatern}}, \code{\link{dppPowerExp}} } spatstat/man/dummify.Rd0000644000176200001440000000342713333543263014644 0ustar liggesusers\name{dummify} \alias{dummify} \title{ Convert Data to Numeric Values by Constructing Dummy Variables } \description{ Converts data of any kind to numeric values. A factor is expanded to a set of dummy variables. } \usage{ dummify(x) } \arguments{ \item{x}{ Vector, factor, matrix or data frame to be converted. } } \details{ This function converts data (such as a factor) to numeric values in order that the user may calculate, for example, the mean, variance, covariance and correlation of the data. If \code{x} is a numeric vector or integer vector, it is returned unchanged. If \code{x} is a logical vector, it is converted to a 0-1 matrix with 2 columns. The first column contains a 1 if the logical value is \code{FALSE}, and the second column contains a 1 if the logical value is \code{TRUE}. If \code{x} is a complex vector, it is converted to a matrix with 2 columns, containing the real and imaginary parts. If \code{x} is a factor, the result is a matrix of 0-1 dummy variables. The matrix has one column for each possible level of the factor. The \code{(i,j)} entry is equal to 1 when the \code{i}th factor value equals the \code{j}th level, and is equal to 0 otherwise. If \code{x} is a matrix or data frame, the appropriate conversion is applied to each column of \code{x}. Note that, unlike \code{\link[stats]{model.matrix}}, this command converts a factor into a full set of dummy variables (one column for each level of the factor). } \value{ A numeric matrix. } \author{ \adrian } \examples{ chara <- sample(letters[1:3], 8, replace=TRUE) logi <- (runif(8) < 0.3) comp <- round(4*runif(8) + 3*runif(8) * 1i, 1) nume <- 8:1 + 0.1 df <- data.frame(nume, chara, logi, comp) df dummify(df) } \keyword{math} spatstat/man/pool.anylist.Rd0000644000176200001440000000235213333543264015622 0ustar liggesusers\name{pool.anylist} \alias{pool.anylist} \title{ Pool Data from a List of Objects } \description{ Pool the data from the objects in a list. } \usage{ \method{pool}{anylist}(x, ...) } \arguments{ \item{x}{ A list, belonging to the class \code{"anylist"}, containing objects that can be pooled. } \item{\dots}{ Optional additional objects which can be pooled with the elements of \code{x}. } } \details{ The function \code{\link{pool}} is generic. Its purpose is to combine data from several objects of the same type (typically computed from different datasets) into a common, pooled estimate. The function \code{pool.anyist} is the method for the class \code{"anylist"}. It is used when the objects to be pooled are given in a list \code{x}. Each of the elements of the list \code{x}, and each of the subsequent arguments \code{\dots} if provided, must be an object of the same class. } \value{ An object of the same class as each of the entries in \code{x}. } \seealso{ \code{\link{anylist}}, \code{\link{pool}}. } \examples{ Keach <- anylapply(waterstriders, Kest, ratio=TRUE, correction="iso") K <- pool(Keach) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{nonparametric} spatstat/man/clicklpp.Rd0000644000176200001440000000550713333543263014774 0ustar liggesusers\name{clicklpp} \alias{clicklpp} \title{Interactively Add Points on a Linear Network} \description{ Allows the user to create a point pattern on a linear network by point-and-click in the display. } \usage{ clicklpp(L, n=NULL, types=NULL, \dots, add=FALSE, main=NULL, hook=NULL) } \arguments{ \item{L}{ Linear network on which the points will be placed. An object of class \code{"linnet"}. } \item{n}{ Number of points to be added (if this is predetermined). } \item{types}{ Vector of types, when creating a multitype point pattern. } \item{\dots}{ Optional extra arguments to be passed to \code{\link[graphics]{locator}} to control the display. } \item{add}{ Logical value indicating whether to create a new plot (\code{add=FALSE}) or draw over the existing plot (\code{add=TRUE}). } \item{main}{ Main heading for plot. } \item{hook}{For internal use only. Do not use this argument.} } \value{ A point pattern (object of class \code{"lpp"}). } \details{ This function allows the user to create a point pattern on a linear network by interactively clicking on the screen display. First the linear network \code{L} is plotted on the current screen device. Then the user is prompted to point the mouse at any desired locations and click the left mouse button to add each point. Interactive input stops after \code{n} clicks (if \code{n} was given) or when the middle mouse button is pressed. The return value is a point pattern on the network \code{L}, containing the locations of all the clicked points, after they have been projected onto the network \code{L}. Any points that were clicked outside the bounding window of the network will be ignored. If the argument \code{types} is given, then a multitype point pattern will be created. The user is prompted to input the locations of points of type \code{type[i]}, for each successive index \code{i}. (If the argument \code{n} was given, there will be \code{n} points of \emph{each} type.) The return value is a multitype point pattern on a linear network. This function uses the \R{} command \code{\link[graphics]{locator}} to input the mouse clicks. It only works on screen devices such as \sQuote{X11}, \sQuote{windows} and \sQuote{quartz}. Arguments that can be passed to \code{\link[graphics]{locator}} through \code{\dots} include \code{pch} (plotting character), \code{cex} (character expansion factor) and \code{col} (colour). See \code{\link[graphics]{locator}} and \code{\link[graphics]{par}}. } \seealso{ \code{\link{clickppp}}, \code{\link{identify.lpp}}, \code{\link[graphics]{locator}}, \code{\link{clickpoly}}, \code{\link{clickbox}}, \code{\link{clickdist}} } \author{ \spatstatAuthors, based on an idea by Dominic Schuhmacher. } \keyword{spatial} \keyword{iplot} spatstat/man/methods.ssf.Rd0000644000176200001440000000656613362773322015441 0ustar liggesusers\name{methods.ssf} \alias{methods.ssf} %DoNotExport \alias{marks.ssf} \alias{marks<-.ssf} \alias{unmark.ssf} \alias{as.im.ssf} \alias{as.function.ssf} \alias{as.ppp.ssf} \alias{print.ssf} \alias{summary.ssf} \alias{range.ssf} \alias{min.ssf} \alias{max.ssf} \alias{integral.ssf} \title{Methods for Spatially Sampled Functions} \description{ Methods for various generic commands, for the class \code{"ssf"} of spatially sampled functions. } \usage{ \method{marks}{ssf}(x, \dots) \method{marks}{ssf}(x, \dots) <- value \method{unmark}{ssf}(X) \method{as.im}{ssf}(X, \dots) \method{as.function}{ssf}(x, \dots) \method{as.ppp}{ssf}(X, \dots) \method{print}{ssf}(x, \dots, brief=FALSE) \method{summary}{ssf}(object, \dots) \method{range}{ssf}(x, \dots) \method{min}{ssf}(x, \dots) \method{max}{ssf}(x, \dots) \method{integral}{ssf}(f, domain=NULL, ..., weights=attr(f, "weights")) } \arguments{ \item{x,X,f,object}{ A spatially sampled function (object of class \code{"ssf"}). } \item{\dots}{Arguments passed to the default method.} \item{brief}{Logical value controlling the amount of detail printed.} \item{value}{Matrix of replacement values for the function.} \item{domain}{Optional. Domain of integration. An object of class\code{"owin"} or \code{"tess"}. } \item{weights}{ Optional. Numeric vector of \emph{quadrature weights} associated with the sample points. } } \value{ \code{marks} returns a matrix. \code{marks(x) <- value} returns an object of class \code{"ssf"}. \code{as.owin} returns a window (object of class \code{"owin"}). \code{as.ppp} and \code{unmark} return a point pattern (object of class \code{"ppp"}). \code{as.function} returns a \code{function(x,y)} of class \code{"funxy"}. \code{print} returns \code{NULL}. \code{summary} returns an object of class \code{"summary.ssf"} which has a print method. \code{range} returns a numeric vector of length 2. \code{min} and \code{max} return a single numeric value. \code{integral} returns a numeric or complex value, vector, or matrix. \code{integral(f)} returns a numeric or complex value (if \code{f} had numeric or complex values) or a numeric vector (if \code{f} had vector values). If \code{domain} is a tessellation then \code{integral(f, domain)} returns a numeric or complex vector with one entry for each tile (if \code{f} had numeric or complex values) or a numeric matrix with one row for each tile (if \code{f} had vector values). } \details{ An object of class \code{"ssf"} represents a function (real- or vector-valued) that has been sampled at a finite set of points. The commands documented here are methods for this class, for the generic commands \code{\link[spatstat]{marks}}, \code{\link[spatstat]{marks<-}}, \code{\link[spatstat]{unmark}}, \code{\link[spatstat]{as.im}}, \code{\link[base]{as.function}}, \code{\link[spatstat]{as.ppp}}, \code{\link[base]{print}}, \code{\link[base]{summary}}, \code{\link[base]{range}}, \code{\link[base]{min}}, \code{\link[base]{max}} and \code{\link[spatstat]{integral}}. } \seealso{ \code{\link{ssf}} } \examples{ g <- distfun(cells[1:4]) X <- rsyst(Window(cells), 10) f <- ssf(X, g(X)) f summary(f) marks(f) as.ppp(f) as.im(f) integral(f) integral(f, quadrats(Window(f), 3)) } \author{Adrian Baddeley} \keyword{spatial} \keyword{methods} spatstat/man/intersect.tess.Rd0000644000176200001440000000454213433744646016157 0ustar liggesusers\name{intersect.tess} \alias{intersect.tess} \title{Intersection of Two Tessellations} \description{ Yields the intersection of two tessellations, or the intersection of a tessellation with a window. } \usage{ intersect.tess(X, Y, \dots, keepmarks=FALSE, sep="x") } \arguments{ \item{X,Y}{Two tessellations (objects of class \code{"tess"}), or windows (objects of class \code{"tess"}), or other data that can be converted to tessellations by \code{\link{as.tess}}. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} to control the discretisation, if required. } \item{keepmarks}{ Logical value. If \code{TRUE}, the marks attached to the tiles of \code{X} and \code{Y} will be retained as marks of the intersection tiles. } \item{sep}{ Character string used to separate the names of tiles from \code{X} and from \code{Y}, when forming the name of the tiles of the intersection. } } \value{ A tessellation (object of class \code{"tess"}). } \details{ A tessellation is a collection of disjoint spatial regions (called \emph{tiles}) that fit together to form a larger spatial region. See \code{\link{tess}}. If \code{X} and \code{Y} are not tessellations, they are first converted into tessellations by \code{\link{as.tess}}. The function \code{intersect.tess} then computes the intersection between the two tessellations. This is another tessellation, each of whose tiles is the intersection of a tile from \code{X} and a tile from \code{Y}. One possible use of this function is to slice a window \code{W} into subwindows determined by a tessellation. See the Examples. } \author{\adrian and \rolf } \seealso{ \code{\link{tess}}, \code{\link{as.tess}}, \code{\link{intersect.owin}} } \examples{ opa <- par(mfrow=c(1,3)) # polygon data(letterR) plot(letterR) # tessellation of rectangles X <- tess(xgrid=seq(2, 4, length=10), ygrid=seq(0, 3.5, length=8)) plot(X) plot(intersect.tess(X, letterR)) A <- runifpoint(10) B <- runifpoint(10) plot(DA <- dirichlet(A)) plot(DB <- dirichlet(B)) plot(intersect.tess(DA, DB)) par(opa) marks(DA) <- 1:10 marks(DB) <- 1:10 plot(Z <- intersect.tess(DA,DB, keepmarks=TRUE)) mZ <- marks(Z) tZ <- tiles(Z) for(i in which(mZ[,1] == 3)) plot(tZ[[i]], add=TRUE, col="pink") } \keyword{spatial} \keyword{math} spatstat/man/im.object.Rd0000644000176200001440000000714613333543263015046 0ustar liggesusers\name{im.object} \alias{im.object} %DoNotExport \title{Class of Images} \description{ A class \code{"im"} to represent a two-dimensional pixel image. } \details{ An object of this class represents a two-dimensional pixel image. It specifies \itemize{ \item the dimensions of the rectangular array of pixels \item \eqn{x} and \eqn{y} coordinates for the pixels \item a numeric value (``grey value'') at each pixel } If \code{X} is an object of type \code{im}, it contains the following elements: \tabular{ll}{ \code{v} \tab matrix of values \cr \code{dim} \tab dimensions of matrix \code{v} \cr \code{xrange} \tab range of \eqn{x} coordinates of image window \cr \code{yrange} \tab range of \eqn{y} coordinates of image window \cr \code{xstep} \tab width of one pixel \cr \code{ystep} \tab height of one pixel \cr \code{xcol} \tab vector of \eqn{x} coordinates of centres of pixels \cr \code{yrow} \tab vector of \eqn{y} coordinates of centres of pixels } Users are strongly advised not to manipulate these entries directly. Objects of class \code{"im"} may be created by the functions \code{\link{im}} and \code{\link{as.im}}. Image objects are also returned by various functions including \code{\link{distmap}}, \code{\link{Kmeasure}}, \code{\link{setcov}}, \code{\link{eval.im}} and \code{\link{cut.im}}. Image objects may be displayed using the methods \code{\link{plot.im}}, \code{image.im}, \code{\link{persp.im}} and \code{contour.im}. There are also methods \code{\link{print.im}} for printing information about an image, \code{\link{summary.im}} for summarising an image, \code{\link{mean.im}} for calculating the average pixel value, \code{\link{hist.im}} for plotting a histogram of pixel values, \code{\link{quantile.im}} for calculating quantiles of pixel values, and \code{\link{cut.im}} for dividing the range of pixel values into categories. Pixel values in an image may be extracted using the subset operator \code{\link{[.im}}. To extract all pixel values from an image object, use \code{\link{as.matrix.im}}. The levels of a factor-valued image can be extracted and changed with \code{levels} and \code{levels<-}. Calculations involving one or more images (for example, squaring all the pixel values in an image, converting numbers to factor levels, or subtracting one image from another) can often be done easily using \code{\link{eval.im}}. To find all pixels satisfying a certain constraint, use \code{\link{solutionset}}. Note carefully that the entry \code{v[i,j]} gives the pixel value at the location \code{(xcol[j],yrow[i]}. That is, the \bold{row} index of the matrix \code{v} corresponds to increasing \bold{y} coordinate, while the column index of \code{mat} corresponds to increasing \bold{x} coordinate. Thus \code{yrow} has one entry for each row of \code{v} and \code{xcol} has one entry for each column of \code{v}. Under the usual convention in \R, a correct display of the image would be obtained by transposing the matrix, e.g. \code{image.default(xcol, yrow, t(v))}, if you wanted to do it by hand. } \seealso{ \code{\link{im}}, \code{\link{as.im}}, \code{\link{plot.im}}, \code{\link{persp.im}}, \code{\link{eval.im}}, \code{\link{[.im}} } \section{Warnings}{ The internal representation of images is likely to change in future releases of \pkg{spatstat}. Do not address the entries in an image directly. To extract all pixel values from an image object, use \code{\link{as.matrix.im}}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat/man/dimhat.Rd0000644000176200001440000000267513333543263014444 0ustar liggesusers\name{dimhat} \alias{dimhat} \title{ Estimate Dimension of Central Subspace } \description{ Given the kernel matrix that characterises a central subspace, this function estimates the dimension of the subspace. } \usage{ dimhat(M) } \arguments{ \item{M}{ Kernel of subspace. A symmetric, non-negative definite, numeric matrix, typically obtained from \code{\link{sdr}}. } } \details{ This function computes the maximum descent estimate of the dimension of the central subspace with a given kernel matrix \code{M}. The matrix \code{M} should be the kernel matrix of a central subspace, which can be obtained from \code{\link{sdr}}. It must be a symmetric, non-negative-definite, numeric matrix. The algorithm finds the eigenvalues \eqn{\lambda_1 \ge \ldots \ge \lambda_n}{lambda[1] \ge ...\ge lambda[n]} of \eqn{M}, and then determines the index \eqn{k} for which \eqn{\lambda_k/\lambda_{k-1}}{lambda[k]/lambda[k-1]} is greatest. } \value{ A single integer giving the estimated dimension. } \seealso{ \code{\link{sdr}}, \code{\link{subspaceDistance}} } \references{ Guan, Y. and Wang, H. (2010) Sufficient dimension reduction for spatial point processes directed by Gaussian random fields. \emph{Journal of the Royal Statistical Society, Series B}, \bold{72}, 367--387. } \author{ Matlab original by Yongtao Guan, translated to \R by Suman Rakshit. } \keyword{array} \keyword{algebra} \keyword{multivariate} spatstat/man/bw.abram.Rd0000644000176200001440000001527113460256013014656 0ustar liggesusers\name{bw.abram} \alias{bw.abram} \title{ Abramson's Adaptive Bandwidths } \description{ Computes adaptive smoothing bandwidths according to the inverse-square-root rule of Abramson (1982). } \usage{ bw.abram(X, h0, \dots, at=c("points", "pixels"), hp = h0, pilot = NULL, trim=5, smoother=density.ppp) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) for which the variable bandwidths should be computed. } \item{h0}{ A scalar value giving the global smoothing bandwidth in the same units as the coordinates of \code{X}. The default is \code{h0=\link{bw.ppl}(X)}. } \item{\dots}{ Additional arguments passed to \code{\link{as.im}} to control the pixel resolution, or passed to \code{\link[spatstat]{density.ppp}} or \code{smoother} to control the type of smoothing, when computing the pilot estimate. } \item{at}{ Character string (partially matched) specifying whether to compute bandwidth values at the points of \code{X} (\code{at="points"}, the default) or to compute bandwidths at every pixel in a fine pixel grid (\code{at="pixels"}). } \item{hp}{ Optional. A scalar pilot bandwidth, used for estimation of the pilot density if required. Ignored if \code{pilot} is a pixel image (object of class \code{"im"}); see below. } \item{pilot}{ Optional. Specification of a pilot density (possibly unnormalised). If \code{pilot=NULL} the pilot density is computed by applying fixed-bandwidth density estimation to \code{X} using bandwidth \code{hp}. If \code{pilot} is a point pattern, the pilot density is is computed using a fixed-bandwidth estimate based on \code{pilot} and \code{hp}. If \code{pilot} is a pixel image (object of class \code{"im"}), this is taken to be the (possibly unnormalised) pilot density, and \code{hp} is ignored. } \item{trim}{ A trimming value required to curb excessively large bandwidths. See Details. The default is sensible in most cases. } \item{smoother}{ Smoother for the pilot. A function or character string, specifying the function to be used to compute the pilot estimate when \code{pilot} is \code{NULL} or is a point pattern. } } \details{ This function computes adaptive smoothing bandwidths using the methods of Abramson (1982) and Hall and Marron (1988). If \code{at="points"} (the default) a smoothing bandwidth is computed for each point in the pattern \code{X}. Alternatively if \code{at="pixels"} a smoothing bandwidth is computed for each spatial location in a pixel grid. Under the Abramson-Hall-Marron rule, the bandwidth at location \eqn{u} is \deqn{ h(u) = \mbox{\texttt{h0}} * \mbox{min}[ \frac{\tilde{f}(u)^{-1/2}}{\gamma}, \mbox{\texttt{trim}} ] }{ h(u) = h0 * min(\tilde{f}(u)^{-1/2}/\gamma, trim) } where \eqn{\tilde{f}(u)} is a pilot estimate of the spatially varying probability density. The variable bandwidths are rescaled by \eqn{\gamma}, the geometric mean of the \eqn{\tilde{f}(u)^{-1/2}} terms evaluated at the data; this allows the global bandwidth \code{h0} to be considered on the same scale as a corresponding fixed bandwidth. The trimming value \code{trim} has the same interpretation as the required `clipping' of the pilot density at some small nominal value (see Hall and Marron, 1988), to necessarily prevent extreme bandwidths (which can occur at very isolated observations). The pilot density or intensity is determined as follows: \itemize{ \item If \code{pilot} is a pixel image, this is taken as the pilot density or intensity. \item If \code{pilot} is \code{NULL}, then the pilot intensity is computed as a fixed-bandwidth kernel intensity estimate using \code{\link{density.ppp}} applied to the data pattern \code{X} using the pilot bandwidth \code{hp}. \item If \code{pilot} is a different point pattern on the same spatial domain as \code{X}, then the pilot intensity is computed as a fixed-bandwidth kernel intensity estimate using \code{\link{density.ppp}} applied to \code{pilot} using the pilot bandwidth \code{hp}. } In each case the pilot density or intensity is renormalised to become a probability density, and then the Abramson rule is applied. Instead of calculating the pilot as a fixed-bandwidth density estimate, the user can specify another density estimation procedure using the argument \code{smoother}. This should be either a function or the character string name of a function. It will replace \code{\link{density.ppp}} as the function used to calculate the pilot estimate. The pilot estimate will be computed as \code{smoother(X, sigma=hp, ...)} if \code{pilot} is \code{NULL}, or \code{smoother(pilot, sigma=hp, ...)} if \code{pilot} is a point pattern. If \code{smoother} does not recognise the argument name \code{sigma} for the smoothing bandwidth, then \code{hp} is effectively ignored, as shown in the Examples. } \value{ Either a numeric vector of length \code{npoints(X)} giving the Abramson bandwidth for each point (when \code{at = "points"}, the default), or the entire pixel \code{\link[spatstat]{im}}age of the Abramson bandwidths over the relevant spatial domain (when \code{at = "pixels"}). } \references{ Abramson, I. (1982) On bandwidth variation in kernel estimates --- a square root law. \emph{Annals of Statistics}, \bold{10}(4), 1217-1223.\cr Davies, T.M. and Baddeley, A. (2018) Fast computation of spatially adaptive kernel estimates. \emph{Statistics and Computing}, \bold{28}(4), 937-956.\cr Davies, T.M., Marshall, J.C., and Hazelton, M.L. (2018) Tutorial on kernel estimation of continuous spatial and spatiotemporal relative risk. \emph{Statistics in Medicine}, \bold{37}(7), 1191-1221.\cr Hall, P. and Marron, J.S. (1988) Variable window width kernel density estimates of probability densities. \emph{Probability Theory and Related Fields}, \bold{80}, 37-49.\cr Silverman, B.W. (1986) \emph{Density Estimation for Statistics and Data Analysis}. Chapman and Hall, New York. } \author{ Tilman M. Davies. Adapted by \adrian. } \examples{ # 'ch' just 58 laryngeal cancer cases ch <- split(chorley)[[1]] h <- bw.abram(ch,h0=1,hp=0.7) length(h) summary(h) if(interactive()) hist(h) # calculate pilot based on all 1036 observations h.pool <- bw.abram(ch,h0=1,hp=0.7,pilot=chorley) length(h.pool) summary(h.pool) if(interactive()) hist(h.pool) # get full image used for 'h' above him <- bw.abram(ch,h0=1,hp=0.7,at="pixels") plot(him);points(ch,col="grey") # use Voronoi-Dirichlet pilot ('hp' is ignored) hvo <- bw.abram(ch, h0=1, smoother=densityVoronoi) } \keyword{spatial} \keyword{nonparametric} spatstat/man/as.linfun.Rd0000644000176200001440000000425413537141454015070 0ustar liggesusers\name{as.linfun} \alias{as.linfun} \alias{as.linfun.linim} \alias{as.linfun.lintess} \title{ Convert Data to a Function on a Linear Network } \description{ Convert some kind of data to an object of class \code{"linfun"} representing a function on a linear network. } \usage{ as.linfun(X, \dots) \method{as.linfun}{linim}(X, \dots) \method{as.linfun}{lintess}(X, \dots, values=marks(X), navalue=NA) } \arguments{ \item{X}{ Some kind of data to be converted. } \item{\dots}{ Other arguments passed to methods. } \item{values}{ Optional. Vector of function values, one entry associated with each tile of the tessellation. } \item{navalue}{ Optional. Function value associated with locations that do not belong to a tile of the tessellation. } } \details{ An object of class \code{"linfun"} represents a function defined on a linear network. The function \code{as.linfun} is generic. The method \code{as.linfun.linim} converts objects of class \code{"linim"} (pixel images on a linear network) to functions on the network. The method \code{as.linfun.lintess} converts a tessellation on a linear network into a function with a different value on each tile of the tessellation. The function values are specified by the argument \code{values}. It should be a vector with one entry for each tile of the tessellation; any point lying in tile number \code{i} will return the value \code{v[i]}. If \code{values} is missing, the marks of the tessellation are taken as the function values. If \code{values} is missing and the tessellation has no marks, or if \code{values} is given as \code{NULL}, then the function returns factor values identifying which tile contains each given point. } \value{ Object of class \code{"linfun"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{linfun}} } \examples{ X <- runiflpp(2, simplenet) Y <- runiflpp(5, simplenet) # image on network D <- density(Y, 0.1, verbose=FALSE) f <- as.linfun(D) f f(X) # tessellation on network Z <- lineardirichlet(Y) g <- as.linfun(Z) g(X) h <- as.linfun(Z, values = runif(5)) h(X) } \keyword{spatial} \keyword{manip} spatstat/man/commonGrid.Rd0000644000176200001440000000342513333543263015266 0ustar liggesusers\name{commonGrid} \alias{commonGrid} \title{Determine A Common Spatial Domain And Pixel Resolution} \description{ Determine a common spatial domain and pixel resolution for several spatial objects such as images, masks, windows and point patterns. } \usage{ commonGrid(\dots) } \arguments{ \item{\dots}{ Any number of pixel images (objects of class \code{"im"}), binary masks (objects of class \code{"owin"} of type \code{"mask"}) or data which can be converted to binary masks by \code{\link{as.mask}}. } } \details{ This function determines a common spatial resolution and spatial domain for several spatial objects. The arguments \code{\dots} may be pixel images, binary masks, or other spatial objects acceptable to \code{\link{as.mask}}. The common pixel grid is determined by inspecting all the pixel images and binary masks in the argument list, finding the pixel grid with the highest spatial resolution, and extending this pixel grid to cover the bounding box of all the spatial objects. The return value is a binary mask \code{M}, representing the bounding box at the chosen pixel resolution. Use \code{\link{as.im}(X, W=M)} to convert a pixel image \code{X} to this new pixel resolution. Use \code{\link{as.mask}(W, xy=M)} to convert a window \code{W} to a binary mask at this new pixel resolution. See the Examples. } \value{ A binary mask (object of class \code{"owin"} and type \code{"mask"}). } \author{\adrian and \rolf } \examples{ A <- setcov(square(1)) G <- density(runifpoint(42), dimyx=16) H <- commonGrid(A, letterR, G) newR <- as.mask(letterR, xy=H) newG <- as.im(G, W=H) } \seealso{ \code{\link{harmonise.im}}, \code{\link{compatible.im}}, \code{\link{as.im}} } \keyword{spatial} \keyword{manip} spatstat/man/envelope.Rd0000644000176200001440000010035513551001752014777 0ustar liggesusers\name{envelope} \alias{envelope} \alias{envelope.ppp} \alias{envelope.ppm} \alias{envelope.kppm} \title{Simulation Envelopes of Summary Function} \description{ Computes simulation envelopes of a summary function. } \usage{ envelope(Y, fun, \dots) \method{envelope}{ppp}(Y, fun=Kest, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) \method{envelope}{ppm}(Y, fun=Kest, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, clipdata=TRUE, start=NULL, control=update(default.rmhcontrol(Y), nrep=nrep), nrep=1e5, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) \method{envelope}{kppm}(Y, fun=Kest, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) } \arguments{ \item{Y}{ Object containing point pattern data. A point pattern (object of class \code{"ppp"}) or a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}). } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{nsim}{ Number of simulated point patterns to be generated when computing the envelopes. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{\dots}{ Extra arguments passed to \code{fun}. } \item{funargs}{ A list, containing extra arguments to be passed to \code{fun}. } \item{funYargs}{ Optional. A list, containing extra arguments to be passed to \code{fun} when applied to the original data \code{Y} only. } \item{simulate}{ Optional. Specifies how to generate the simulated point patterns. If \code{simulate} is an expression in the R language, then this expression will be evaluated \code{nsim} times, to obtain \code{nsim} point patterns which are taken as the simulated patterns from which the envelopes are computed. If \code{simulate} is a function, then this function will be repeatedly applied to the data pattern \code{Y} to obtain \code{nsim} simulated patterns. If \code{simulate} is a list of point patterns, then the entries in this list will be treated as the simulated patterns from which the envelopes are computed. Alternatively \code{simulate} may be an object produced by the \code{envelope} command: see Details. } \item{fix.n}{ Logical. If \code{TRUE}, simulated patterns will have the same number of points as the original data pattern. This option is currently not available for \code{envelope.kppm}. } \item{fix.marks}{ Logical. If \code{TRUE}, simulated patterns will have the same number of points \emph{and} the same marks as the original data pattern. In a multitype point pattern this means that the simulated patterns will have the same number of points \emph{of each type} as the original data. This option is currently not available for \code{envelope.kppm}. } \item{verbose}{ Logical flag indicating whether to print progress reports during the simulations. } \item{clipdata}{ Logical flag indicating whether the data point pattern should be clipped to the same window as the simulated patterns, before the summary function for the data is computed. This should usually be \code{TRUE} to ensure that the data and simulations are properly comparable. } \item{start,control}{ Optional. These specify the arguments \code{start} and \code{control} of \code{rmh}, giving complete control over the simulation algorithm. Applicable only when \code{Y} is a fitted model of class \code{"ppm"}. } \item{nrep}{ Number of iterations in the Metropolis-Hastings simulation algorithm. Applicable only when \code{Y} is a fitted model of class \code{"ppm"}. } \item{transform}{ Optional. A transformation to be applied to the function values, before the envelopes are computed. An expression object (see Details). } \item{global}{ Logical flag indicating whether envelopes should be pointwise (\code{global=FALSE}) or simultaneous (\code{global=TRUE}). } \item{ginterval}{ Optional. A vector of length 2 specifying the interval of \eqn{r} values for the simultaneous critical envelopes. Only relevant if \code{global=TRUE}. } \item{use.theory}{ Logical value indicating whether to use the theoretical value, computed by \code{fun}, as the reference value for simultaneous envelopes. Applicable only when \code{global=TRUE}. Default is \code{use.theory=TRUE} if \code{Y} is a point pattern, or a point process model equivalent to Complete Spatial Randomness, and \code{use.theory=FALSE} otherwise. } \item{alternative}{ Character string determining whether the envelope corresponds to a two-sided test (\code{side="two.sided"}, the default) or a one-sided test with a lower critical boundary (\code{side="less"}) or a one-sided test with an upper critical boundary (\code{side="greater"}). } \item{scale}{ Optional. Scaling function for global envelopes. A function in the \R language which determines the relative scale of deviations, as a function of distance \eqn{r}, when computing the global envelopes. Applicable only when \code{global=TRUE}. Summary function values for distance \code{r} will be \emph{divided} by \code{scale(r)} before the maximum deviation is computed. The resulting global envelopes will have width proportional to \code{scale(r)}. } \item{clamp}{ Logical value indicating how to compute envelopes when \code{alternative="less"} or \code{alternative="greater"}. Deviations of the observed summary function from the theoretical summary function are initially evaluated as signed real numbers, with large positive values indicating consistency with the alternative hypothesis. If \code{clamp=FALSE} (the default), these values are not changed. If \code{clamp=TRUE}, any negative values are replaced by zero. } \item{savefuns}{ Logical flag indicating whether to save all the simulated function values. } \item{savepatterns}{ Logical flag indicating whether to save all the simulated point patterns. } \item{nsim2}{ Number of extra simulated point patterns to be generated if it is necessary to use simulation to estimate the theoretical mean of the summary function. Only relevant when \code{global=TRUE} and the simulations are not based on CSR. } \item{VARIANCE}{ Logical. If \code{TRUE}, critical envelopes will be calculated as sample mean plus or minus \code{nSD} times sample standard deviation. } \item{nSD}{ Number of estimated standard deviations used to determine the critical envelopes, if \code{VARIANCE=TRUE}. } \item{Yname}{ Character string that should be used as the name of the data point pattern \code{Y} when printing or plotting the results. } \item{maxnerr}{ Maximum number of rejected patterns. If \code{fun} yields a fatal error when applied to a simulated point pattern (for example, because the pattern is empty and \code{fun} requires at least one point), the pattern will be rejected and a new random point pattern will be generated. If this happens more than \code{maxnerr} times, the algorithm will give up. } \item{rejectNA}{ Logical value specifying whether to reject a simulated pattern if the resulting values of \code{fun} are all equal to \code{NA}, \code{NaN} or infinite. If \code{FALSE} (the default), then simulated patterns are only rejected when \code{fun} gives a fatal error. } \item{silent}{ Logical value specifying whether to print a report each time a simulated pattern is rejected. } \item{do.pwrong}{ Logical. If \code{TRUE}, the algorithm will also estimate the true significance level of the \dQuote{wrong} test (the test that declares the summary function for the data to be significant if it lies outside the \emph{pointwise} critical boundary at any point). This estimate is printed when the result is printed. } \item{envir.simul}{ Environment in which to evaluate the expression \code{simulate}, if not the current environment. } } \value{ An object of class \code{"envelope"} and \code{"fv"}, see \code{\link{fv.object}}, which can be printed and plotted directly. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the summary function \code{fun} has been estimated } \item{obs}{ values of the summary function for the data point pattern } \item{lo}{ lower envelope of simulations } \item{hi}{ upper envelope of simulations } and \emph{either} \item{theo}{ theoretical value of the summary function under CSR (Complete Spatial Randomness, a uniform Poisson point process) if the simulations were generated according to CSR } \item{mmean}{ estimated theoretical value of the summary function, computed by averaging simulated values, if the simulations were not generated according to CSR. } Additionally, if \code{savepatterns=TRUE}, the return value has an attribute \code{"simpatterns"} which is a list containing the \code{nsim} simulated patterns. If \code{savefuns=TRUE}, the return value has an attribute \code{"simfuns"} which is an object of class \code{"fv"} containing the summary functions computed for each of the \code{nsim} simulated patterns. } \details{ The \code{envelope} command performs simulations and computes envelopes of a summary statistic based on the simulations. The result is an object that can be plotted to display the envelopes. The envelopes can be used to assess the goodness-of-fit of a point process model to point pattern data. For the most basic use, if you have a point pattern \code{X} and you want to test Complete Spatial Randomness (CSR), type \code{plot(envelope(X, Kest,nsim=39))} to see the \eqn{K} function for \code{X} plotted together with the envelopes of the \eqn{K} function for 39 simulations of CSR. The \code{envelope} function is generic, with methods for the classes \code{"ppp"}, \code{"ppm"} and \code{"kppm"} described here. There are also methods for the classes \code{"pp3"}, \code{"lpp"} and \code{"lppm"} which are described separately under \code{\link{envelope.pp3}} and \code{\link{envelope.lpp}}. Envelopes can also be computed from other envelopes, using \code{\link{envelope.envelope}}. To create simulation envelopes, the command \code{envelope(Y, ...)} first generates \code{nsim} random point patterns in one of the following ways. \itemize{ \item If \code{Y} is a point pattern (an object of class \code{"ppp"}) and \code{simulate=NULL}, then we generate \code{nsim} simulations of Complete Spatial Randomness (i.e. \code{nsim} simulated point patterns each being a realisation of the uniform Poisson point process) with the same intensity as the pattern \code{Y}. (If \code{Y} is a multitype point pattern, then the simulated patterns are also given independent random marks; the probability distribution of the random marks is determined by the relative frequencies of marks in \code{Y}.) \item If \code{Y} is a fitted point process model (an object of class \code{"ppm"} or \code{"kppm"}) and \code{simulate=NULL}, then this routine generates \code{nsim} simulated realisations of that model. \item If \code{simulate} is supplied, then it determines how the simulated point patterns are generated. It may be either \itemize{ \item an expression in the R language, typically containing a call to a random generator. This expression will be evaluated \code{nsim} times to yield \code{nsim} point patterns. For example if \code{simulate=expression(runifpoint(100))} then each simulated pattern consists of exactly 100 independent uniform random points. \item a function in the R language, typically containing a call to a random generator. This function will be applied repeatedly to the original data pattern \code{Y} to yield \code{nsim} point patterns. For example if \code{simulate=\link{rlabel}} then each simulated pattern was generated by evaluating \code{\link{rlabel}(Y)} and consists of a randomly-relabelled version of \code{Y}. \item a list of point patterns. The entries in this list will be taken as the simulated patterns. \item an object of class \code{"envelope"}. This should have been produced by calling \code{envelope} with the argument \code{savepatterns=TRUE}. The simulated point patterns that were saved in this object will be extracted and used as the simulated patterns for the new envelope computation. This makes it possible to plot envelopes for two different summary functions based on exactly the same set of simulated point patterns. } } The summary statistic \code{fun} is applied to each of these simulated patterns. Typically \code{fun} is one of the functions \code{Kest}, \code{Gest}, \code{Fest}, \code{Jest}, \code{pcf}, \code{Kcross}, \code{Kdot}, \code{Gcross}, \code{Gdot}, \code{Jcross}, \code{Jdot}, \code{Kmulti}, \code{Gmulti}, \code{Jmulti} or \code{Kinhom}. It may also be a character string containing the name of one of these functions. The statistic \code{fun} can also be a user-supplied function; if so, then it must have arguments \code{X} and \code{r} like those in the functions listed above, and it must return an object of class \code{"fv"}. Upper and lower critical envelopes are computed in one of the following ways: \describe{ \item{pointwise:}{by default, envelopes are calculated pointwise (i.e. for each value of the distance argument \eqn{r}), by sorting the \code{nsim} simulated values, and taking the \code{m}-th lowest and \code{m}-th highest values, where \code{m = nrank}. For example if \code{nrank=1}, the upper and lower envelopes are the pointwise maximum and minimum of the simulated values. The pointwise envelopes are \bold{not} \dQuote{confidence bands} for the true value of the function! Rather, they specify the critical points for a Monte Carlo test (Ripley, 1981). The test is constructed by choosing a \emph{fixed} value of \eqn{r}, and rejecting the null hypothesis if the observed function value lies outside the envelope \emph{at this value of} \eqn{r}. This test has exact significance level \code{alpha = 2 * nrank/(1 + nsim)}. } \item{simultaneous:}{if \code{global=TRUE}, then the envelopes are determined as follows. First we calculate the theoretical mean value of the summary statistic (if we are testing CSR, the theoretical value is supplied by \code{fun}; otherwise we perform a separate set of \code{nsim2} simulations, compute the average of all these simulated values, and take this average as an estimate of the theoretical mean value). Then, for each simulation, we compare the simulated curve to the theoretical curve, and compute the maximum absolute difference between them (over the interval of \eqn{r} values specified by \code{ginterval}). This gives a deviation value \eqn{d_i}{d[i]} for each of the \code{nsim} simulations. Finally we take the \code{m}-th largest of the deviation values, where \code{m=nrank}, and call this \code{dcrit}. Then the simultaneous envelopes are of the form \code{lo = expected - dcrit} and \code{hi = expected + dcrit} where \code{expected} is either the theoretical mean value \code{theo} (if we are testing CSR) or the estimated theoretical value \code{mmean} (if we are testing another model). The simultaneous critical envelopes have constant width \code{2 * dcrit}. The simultaneous critical envelopes allow us to perform a different Monte Carlo test (Ripley, 1981). The test rejects the null hypothesis if the graph of the observed function lies outside the envelope \bold{at any value of} \eqn{r}. This test has exact significance level \code{alpha = nrank/(1 + nsim)}. This test can also be performed using \code{\link{mad.test}}. } \item{based on sample moments:}{if \code{VARIANCE=TRUE}, the algorithm calculates the (pointwise) sample mean and sample variance of the simulated functions. Then the envelopes are computed as mean plus or minus \code{nSD} standard deviations. These envelopes do not have an exact significance interpretation. They are a naive approximation to the critical points of the Neyman-Pearson test assuming the summary statistic is approximately Normally distributed. } } The return value is an object of class \code{"fv"} containing the summary function for the data point pattern, the upper and lower simulation envelopes, and the theoretical expected value (exact or estimated) of the summary function for the model being tested. It can be plotted using \code{\link{plot.envelope}}. If \code{VARIANCE=TRUE} then the return value also includes the sample mean, sample variance and other quantities. Arguments can be passed to the function \code{fun} through \code{...}. This means that you simply specify these arguments in the call to \code{envelope}, and they will be passed to \code{fun}. In particular, the argument \code{correction} determines the edge correction to be used to calculate the summary statistic. See the section on Edge Corrections, and the Examples. Arguments can also be passed to the function \code{fun} through the list \code{funargs}. This mechanism is typically used if an argument of \code{fun} has the same name as an argument of \code{envelope}. The list \code{funargs} should contain entries of the form \code{name=value}, where each \code{name} is the name of an argument of \code{fun}. There is also an option, rarely used, in which different function arguments are used when computing the summary function for the data \code{Y} and for the simulated patterns. If \code{funYargs} is given, it will be used when the summary function for the data \code{Y} is computed, while \code{funargs} will be used when computing the summary function for the simulated patterns. This option is only needed in rare cases: usually the basic principle requires that the data and simulated patterns must be treated equally, so that \code{funargs} and \code{funYargs} should be identical. If \code{Y} is a fitted cluster point process model (object of class \code{"kppm"}), and \code{simulate=NULL}, then the model is simulated directly using \code{\link{simulate.kppm}}. If \code{Y} is a fitted Gibbs point process model (object of class \code{"ppm"}), and \code{simulate=NULL}, then the model is simulated by running the Metropolis-Hastings algorithm \code{\link{rmh}}. Complete control over this algorithm is provided by the arguments \code{start} and \code{control} which are passed to \code{\link{rmh}}. For simultaneous critical envelopes (\code{global=TRUE}) the following options are also useful: \describe{ \item{\code{ginterval}}{determines the interval of \eqn{r} values over which the deviation between curves is calculated. It should be a numeric vector of length 2. There is a sensible default (namely, the recommended plotting interval for \code{fun(X)}, or the range of \code{r} values if \code{r} is explicitly specified). } \item{\code{transform}}{specifies a transformation of the summary function \code{fun} that will be carried out before the deviations are computed. Such transforms are useful if \code{global=TRUE} or \code{VARIANCE=TRUE}. The \code{transform} must be an expression object using the symbol \code{.} to represent the function value (and possibly other symbols recognised by \code{\link{with.fv}}). For example, the conventional way to normalise the \eqn{K} function (Ripley, 1981) is to transform it to the \eqn{L} function \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/\pi)} and this is implemented by setting \code{transform=expression(sqrt(./pi))}. } } It is also possible to extract the summary functions for each of the individual simulated point patterns, by setting \code{savefuns=TRUE}. Then the return value also has an attribute \code{"simfuns"} containing all the summary functions for the individual simulated patterns. It is an \code{"fv"} object containing functions named \code{sim1, sim2, ...} representing the \code{nsim} summary functions. It is also possible to save the simulated point patterns themselves, by setting \code{savepatterns=TRUE}. Then the return value also has an attribute \code{"simpatterns"} which is a list of length \code{nsim} containing all the simulated point patterns. See \code{\link{plot.envelope}} and \code{\link{plot.fv}} for information about how to plot the envelopes. Different envelopes can be recomputed from the same data using \code{\link{envelope.envelope}}. Envelopes can be combined using \code{\link{pool.envelope}}. } \section{Errors and warnings}{ An error may be generated if one of the simulations produces a point pattern that is empty, or is otherwise unacceptable to the function \code{fun}. The upper envelope may be \code{NA} (plotted as plus or minus infinity) if some of the function values computed for the simulated point patterns are \code{NA}. Whether this occurs will depend on the function \code{fun}, but it usually happens when the simulated point pattern does not contain enough points to compute a meaningful value. } \section{Confidence intervals}{ Simulation envelopes do \bold{not} compute confidence intervals; they generate significance bands. If you really need a confidence interval for the true summary function of the point process, use \code{\link{lohboot}}. See also \code{\link{varblock}}. } \section{Edge corrections}{ It is common to apply a correction for edge effects when calculating a summary function such as the \eqn{K} function. Typically the user has a choice between several possible edge corrections. In a call to \code{envelope}, the user can specify the edge correction to be applied in \code{fun}, using the argument \code{correction}. See the Examples below. \describe{ \item{Summary functions in \pkg{spatstat}}{ Summary functions that are available in \pkg{spatstat}, such as \code{\link{Kest}}, \code{\link{Gest}} and \code{\link{pcf}}, have a standard argument called \code{correction} which specifies the name of one or more edge corrections. The list of available edge corrections is different for each summary function, and may also depend on the kind of window in which the point pattern is recorded. In the case of \code{Kest} (the default and most frequently used value of \code{fun}) the best edge correction is Ripley's isotropic correction if the window is rectangular or polygonal, and the translation correction if the window is a binary mask. See the help files for the individual functions for more information. All the summary functions in \pkg{spatstat} recognise the option \code{correction="best"} which gives the \dQuote{best} (most accurate) available edge correction for that function. In a call to \code{envelope}, if \code{fun} is one of the summary functions provided in \pkg{spatstat}, then the default is \code{correction="best"}. This means that \emph{by default, the envelope will be computed using the \dQuote{best} available edge correction}. The user can override this default by specifying the argument \code{correction}. For example the computation can be accelerated by choosing another edge correction which is less accurate than the \dQuote{best} one, but faster to compute. } \item{User-written summary functions}{ If \code{fun} is a function written by the user, then \code{envelope} has to guess what to do. If \code{fun} has an argument called \code{correction}, or has \code{\dots} arguments, then \code{envelope} assumes that the function can handle a correction argument. To compute the envelope, \code{fun} will be called with a \code{correction} argument. The default is \code{correction="best"}, unless overridden in the call to \code{envelope}. Otherwise, if \code{fun} does not have an argument called \code{correction} and does not have \code{\dots} arguments, then \code{envelope} assumes that the function \emph{cannot} handle a correction argument. To compute the envelope, \code{fun} is called without a correction argument. } } } \references{ Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Arnold, 2003. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{dclf.test}}, \code{\link{mad.test}} for envelope-based tests. \code{\link{fv.object}}, \code{\link{plot.envelope}}, \code{\link{plot.fv}}, \code{\link{envelope.envelope}}, \code{\link{pool.envelope}} for handling envelopes. There are also methods for \code{print} and \code{summary}. \code{\link{Kest}}, \code{\link{Gest}}, \code{\link{Fest}}, \code{\link{Jest}}, \code{\link{pcf}}, \code{\link{ppp}}, \code{\link{ppm}}, \code{\link{default.expand}} } \examples{ X <- simdat # Envelope of K function under CSR \dontrun{ plot(envelope(X)) } \testonly{ plot(envelope(X, nsim=3)) } # Translation edge correction (this is also FASTER): \dontrun{ plot(envelope(X, correction="translate")) } \testonly{ E <- envelope(X, nsim=3, correction="translate") } # Global envelopes \dontrun{ plot(envelope(X, Lest, global=TRUE)) plot(envelope(X, Kest, global=TRUE, scale=function(r) { r })) } \testonly{ E <- envelope(X, Lest, nsim=3, global=TRUE) E <- envelope(X, Kest, nsim=3, global=TRUE, scale=function(r) { r }) E summary(E) } # Envelope of K function for simulations from Gibbs model \dontrun{ fit <- ppm(cells ~1, Strauss(0.05)) plot(envelope(fit)) plot(envelope(fit), global=TRUE) } \testonly{ fit <- ppm(cells ~1, Strauss(0.05), nd=20) E <- envelope(fit, nsim=3, correction="border", nrep=100) E <- envelope(fit, nsim=3, correction="border", global=TRUE, nrep=100) } # Envelope of K function for simulations from cluster model fit <- kppm(redwood ~1, "Thomas") \dontrun{ plot(envelope(fit, Gest)) plot(envelope(fit, Gest, global=TRUE)) } \testonly{ E <- envelope(fit, Gest, correction="rs", nsim=3, global=TRUE, nrep=100) } # Envelope of G function under CSR \dontrun{ plot(envelope(X, Gest)) } \testonly{ E <- envelope(X, Gest, correction="rs", nsim=3) } # Envelope of L function under CSR # L(r) = sqrt(K(r)/pi) \dontrun{ E <- envelope(X, Kest) plot(E, sqrt(./pi) ~ r) } \testonly{ E <- envelope(X, Kest, correction="border", nsim=3) plot(E, sqrt(./pi) ~ r) } # Simultaneous critical envelope for L function # (alternatively, use Lest) \dontrun{ plot(envelope(X, Kest, transform=expression(sqrt(./pi)), global=TRUE)) } \testonly{ E <- envelope(X, Kest, nsim=3, correction="border", transform=expression(sqrt(./pi)), global=TRUE) } ## One-sided envelope \dontrun{ plot(envelope(X, Lest, alternative="less")) } \testonly{ E <- envelope(X, Lest, nsim=3, alternative="less") } # How to pass arguments needed to compute the summary functions: # We want envelopes for Jcross(X, "A", "B") # where "A" and "B" are types of points in the dataset 'demopat' \dontrun{ plot(envelope(demopat, Jcross, i="A", j="B")) } \testonly{ plot(envelope(demopat, Jcross, correction="rs", i="A", j="B", nsim=3)) } # Use of `simulate' expression \dontrun{ plot(envelope(cells, Gest, simulate=expression(runifpoint(42)))) plot(envelope(cells, Gest, simulate=expression(rMaternI(100,0.02)))) } \testonly{ plot(envelope(cells, Gest, correction="rs", simulate=expression(runifpoint(42)), nsim=3)) plot(envelope(cells, Gest, correction="rs", simulate=expression(rMaternI(100, 0.02)), nsim=3, global=TRUE)) } # Use of `simulate' function \dontrun{ plot(envelope(amacrine, Kcross, simulate=rlabel)) } \testonly{ plot(envelope(amacrine, Kcross, simulate=rlabel, nsim=3)) } # Envelope under random toroidal shifts \dontrun{ plot(envelope(amacrine, Kcross, i="on", j="off", simulate=expression(rshift(amacrine, radius=0.25)))) } # Envelope under random shifts with erosion \dontrun{ plot(envelope(amacrine, Kcross, i="on", j="off", simulate=expression(rshift(amacrine, radius=0.1, edge="erode")))) } # Envelope of INHOMOGENEOUS K-function with fitted trend # The following is valid. # Setting lambda=fit means that the fitted model is re-fitted to # each simulated pattern to obtain the intensity estimates for Kinhom. # (lambda=NULL would also be valid) fit <- kppm(redwood ~1, clusters="MatClust") \dontrun{ plot(envelope(fit, Kinhom, lambda=fit, nsim=19)) } \testonly{ envelope(fit, Kinhom, lambda=fit, nsim=3) } # Note that the principle of symmetry, essential to the validity of # simulation envelopes, requires that both the observed and # simulated patterns be subjected to the same method of intensity # estimation. In the following example it would be incorrect to set the # argument 'lambda=red.dens' in the envelope command, because this # would mean that the inhomogeneous K functions of the simulated # patterns would be computed using the intensity function estimated # from the original redwood data, violating the symmetry. There is # still a concern about the fact that the simulations are generated # from a model that was fitted to the data; this is only a problem in # small datasets. \dontrun{ red.dens <- density(redwood, sigma=bw.diggle) plot(envelope(redwood, Kinhom, sigma=bw.diggle, simulate=expression(rpoispp(red.dens)))) } # Precomputed list of point patterns \dontrun{ nX <- npoints(X) PatList <- list() for(i in 1:19) PatList[[i]] <- runifpoint(nX) E <- envelope(X, Kest, nsim=19, simulate=PatList) } \testonly{ PatList <- list() for(i in 1:3) PatList[[i]] <- runifpoint(10) E <- envelope(X, Kest, nsim=3, simulate=PatList) } # re-using the same point patterns \dontrun{ EK <- envelope(X, Kest, savepatterns=TRUE) EG <- envelope(X, Gest, simulate=EK) } \testonly{ EK <- envelope(X, Kest, nsim=3, savepatterns=TRUE) EG <- envelope(X, Gest, nsim=3, simulate=EK) } } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat/man/localpcf.Rd0000644000176200001440000001435513503620205014746 0ustar liggesusers\name{localpcf} \alias{localpcf} \alias{localpcfinhom} \title{Local pair correlation function} \description{ Computes individual contributions to the pair correlation function from each data point. } \usage{ localpcf(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15) localpcfinhom(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15, lambda=NULL, sigma=NULL, varcov=NULL, update=TRUE, leaveoneout=TRUE) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{delta}{ Smoothing bandwidth for pair correlation. The halfwidth of the Epanechnikov kernel. } \item{rmax}{ Optional. Maximum value of distance \eqn{r} for which pair correlation values \eqn{g(r)} should be computed. } \item{nr}{ Optional. Number of values of distance \eqn{r} for which pair correlation \eqn{g(r)} should be computed. } \item{stoyan}{ Optional. The value of the constant \eqn{c} in Stoyan's rule of thumb for selecting the smoothing bandwidth \code{delta}. } \item{lambda}{ Optional. Values of the estimated intensity function, for the inhomogeneous pair correlation. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{sigma,varcov,\dots}{ These arguments are ignored by \code{localpcf} but are passed by \code{localpcfinhom} (when \code{lambda=NULL}) to the function \code{\link{density.ppp}} to control the kernel smoothing estimation of \code{lambda}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } \item{update}{ Logical value indicating what to do when \code{lambda} is a fitted model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } } \details{ \code{localpcf} computes the contribution, from each individual data point in a point pattern \code{X}, to the empirical pair correlation function of \code{X}. These contributions are sometimes known as LISA (local indicator of spatial association) functions based on pair correlation. \code{localpcfinhom} computes the corresponding contribution to the \emph{inhomogeneous} empirical pair correlation function of \code{X}. Given a spatial point pattern \code{X}, the local pcf \eqn{g_i(r)}{g[i](r)} associated with the \eqn{i}th point in \code{X} is computed by \deqn{ g_i(r) = \frac a {2 \pi n} \sum_j k(d_{i,j} - r) }{ g[i](r) = (a/(2 * pi * n) * sum[j] k(d[i,j] - r) } where the sum is over all points \eqn{j \neq i}{j != i}, \eqn{a} is the area of the observation window, \eqn{n} is the number of points in \code{X}, and \eqn{d_{ij}}{d[i,j]} is the distance between points \code{i} and \code{j}. Here \code{k} is the Epanechnikov kernel, \deqn{ k(t) = \frac 3 { 4\delta} \max(0, 1 - \frac{t^2}{\delta^2}). }{ k(t) = (3/(4*delta)) * max(0, 1 - t^2/delta^2). } Edge correction is performed using the border method (for the sake of computational efficiency): the estimate \eqn{g_i(r)}{g[i](r)} is set to \code{NA} if \eqn{r > b_i}{r > b[i]}, where \eqn{b_i}{b[i]} is the distance from point \eqn{i} to the boundary of the observation window. The smoothing bandwidth \eqn{\delta}{delta} may be specified. If not, it is chosen by Stoyan's rule of thumb \eqn{\delta = c/\hat\lambda}{delta = c/lambda} where \eqn{\hat\lambda = n/a}{lambda = n/a} is the estimated intensity and \eqn{c} is a constant, usually taken to be 0.15. The value of \eqn{c} is controlled by the argument \code{stoyan}. For \code{localpcfinhom}, the optional argument \code{lambda} specifies the values of the estimated intensity function. If \code{lambda} is given, it should be either a numeric vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. If \code{lambda} is not given, then it will be estimated using a leave-one-out kernel density smoother as described in \code{\link{pcfinhom}}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the local pair correlation function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point. The last two columns contain the \code{r} and \code{theo} values. } \seealso{ \code{\link{localK}}, \code{\link{localKinhom}}, \code{\link{pcf}}, \code{\link{pcfinhom}} } \examples{ data(ponderosa) X <- ponderosa g <- localpcf(X, stoyan=0.5) colo <- c(rep("grey", npoints(X)), "blue") a <- plot(g, main=c("local pair correlation functions", "Ponderosa pines"), legend=FALSE, col=colo, lty=1) # plot only the local pair correlation function for point number 7 plot(g, est007 ~ r) gi <- localpcfinhom(X, stoyan=0.5) a <- plot(gi, main=c("inhomogeneous local pair correlation functions", "Ponderosa pines"), legend=FALSE, col=colo, lty=1) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/levelset.Rd0000644000176200001440000000403213333543263015006 0ustar liggesusers\name{levelset} \alias{levelset} \title{Level Set of a Pixel Image} \description{ Given a pixel image, find all pixels which have values less than a specified threshold value (or greater than a threshold, etc), and assemble these pixels into a window. } \usage{ levelset(X, thresh, compare="<=") } \arguments{ \item{X}{A pixel image (object of class "im")}. \item{thresh}{Threshold value. A single number or value compatible with the pixel values in \code{X}}. \item{compare}{Character string specifying one of the comparison operators \code{"<", ">", "==", "<=", ">=", "!="}. } } \details{ If \code{X} is a pixel image with numeric values, then \code{levelset(X, thresh)} finds the region of space where the pixel values are less than or equal to the threshold value \code{thresh}. This region is returned as a spatial window. The argument \code{compare} specifies how the pixel values should be compared with the threshold value. Instead of requiring pixel values to be less than or equal to \code{thresh}, you can specify that they must be less than (\code{<}), greater than (\code{>}), equal to (\code{==}), greater than or equal to (\code{>=}), or not equal to (\code{!=}) the threshold value \code{thresh}. If \code{X} has non-numeric pixel values (for example, logical or factor values) it is advisable to use only the comparisons \code{==} and \code{!=}, unless you really know what you are doing. For more complicated logical comparisons, see \code{\link{solutionset}}. } \value{ A spatial window (object of class \code{"owin"}, see \code{\link{owin.object}}) containing the pixels satisfying the constraint. } \seealso{ \code{\link{im.object}}, \code{\link{as.owin}}, \code{\link{solutionset}}. } \examples{ # test image X <- as.im(function(x,y) { x^2 - y^2 }, unit.square()) W <- levelset(X, 0.2) W <- levelset(X, -0.3, ">") # compute area of level set area(levelset(X, 0.1)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{programming} \keyword{manip} spatstat/man/crossdist.lpp.Rd0000644000176200001440000000411413333543263015773 0ustar liggesusers\name{crossdist.lpp} \alias{crossdist.lpp} \title{Pairwise distances between two point patterns on a linear network} \description{ Computes the distances between pairs of points taken from two different point patterns on the same linear network. } \usage{ \method{crossdist}{lpp}(X, Y, \dots, method="C") } \arguments{ \item{X,Y}{ Point patterns on a linear network (objects of class \code{"lpp"}). They must lie on the \emph{same} network. } \item{\dots}{ Ignored. } \item{method}{String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th point in \code{X} to the \code{j}-th point in \code{Y}. Matrix entries are nonnegative numbers or infinity (\code{Inf}). } \details{ Given two point patterns on a linear network, this function computes the Euclidean distance from each point in the first pattern to each point in the second pattern, measuring distance by the shortest path in the network. This is a method for the generic function \code{\link{crossdist}} for point patterns on a linear network (objects of class \code{"lpp"}). This function expects two point pattern objects \code{X} and \code{Y} on the \emph{same} linear network, and returns the matrix whose \code{[i,j]} entry is the shortest-path distance from \code{X[i]} to \code{Y[j]}. The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is much faster. If two points cannot be joined by a path, the distance between them is infinite (\code{Inf}). } \seealso{ \code{\link{crossdist}}, \code{\link{crossdist.ppp}}, \code{\link{pairdist}}, \code{\link{nndist}} } \examples{ v <- split(chicago) X <- v$cartheft Y <- v$burglary d <- crossdist(X, Y) } \author{ \adrian. } \keyword{spatial} \keyword{math} spatstat/man/treeprune.Rd0000644000176200001440000000306013333543264015175 0ustar liggesusers\name{treeprune} \alias{treeprune} \title{ Prune Tree to Given Level } \description{ Prune a tree by removing all the branches above a given level. } \usage{ treeprune(X, root = 1, level = 0) } \arguments{ \item{X}{ Object of class \code{"linnet"} or \code{"lpp"}. } \item{root}{ Index of the root vertex amongst the vertices of \code{as.linnet(X)}. } \item{level}{ Integer specifying the level above which the tree should be pruned. } } \details{ The object \code{X} must be either a linear network, or a derived object such as a point pattern on a linear network. The linear network must be an acyclic graph (i.e. must not contain any loops) so that it can be interpreted as a tree. This function removes all vertices for which \code{\link{treebranchlabels}} gives a string more than \code{level} characters long. } \value{ Object of the same kind as \code{X}. } \author{ \spatstatAuthors } \seealso{ \code{\link{treebranchlabels}} for calculating the branch labels. \code{\link{deletebranch}} for removing entire branches. \code{\link{extractbranch}} for extracting entire branches. \code{\link{linnet}} for creating networks. } \examples{ # make a simple tree m <- simplenet$m m[8,10] <- m[10,8] <- FALSE L <- linnet(vertices(simplenet), m) plot(L, main="") # compute branch labels tb <- treebranchlabels(L, 1) tbc <- paste0("[", tb, "]") text(vertices(L), labels=tbc, cex=2) # prune tree tp <- treeprune(L, root=1, 1) plot(tp, add=TRUE, col="blue", lwd=3) } \keyword{spatial} \keyword{manip} spatstat/man/duplicated.ppp.Rd0000644000176200001440000000550313333543263016103 0ustar liggesusers\name{duplicated.ppp} \alias{duplicated.ppp} \alias{duplicated.ppx} \alias{anyDuplicated.ppp} \alias{anyDuplicated.ppx} \title{Determine Duplicated Points in a Spatial Point Pattern} \description{ Determines which points in a spatial point pattern are duplicates of previous points, and returns a logical vector. } \usage{ \method{duplicated}{ppp}(x, \dots, rule=c("spatstat", "deldir", "unmark")) \method{duplicated}{ppx}(x, \dots) \method{anyDuplicated}{ppp}(x, \dots) \method{anyDuplicated}{ppx}(x, \dots) } \arguments{ \item{x}{ A spatial point pattern (object of class \code{"ppp"} or \code{"ppx"}). } \item{\dots}{ Ignored. } \item{rule}{ Character string. The rule for determining duplicated points. } } \value{ \code{duplicated(x)} returns a logical vector of length equal to the number of points in \code{x}. \code{anyDuplicated(x)} is a number equal to 0 if there are no duplicated points, and otherwise is equal to the index of the first duplicated point. } \details{ These are methods for the generic functions \code{\link{duplicated}} and \code{\link{anyDuplicated}} for point pattern datasets (of class \code{"ppp"}, see \code{\link{ppp.object}}, or class \code{"ppx"}). \code{anyDuplicated(x)} is a faster version of \code{any(duplicated(x))}. Two points in a point pattern are deemed to be identical if their \eqn{x,y} coordinates are the same, and their marks are also the same (if they carry marks). The Examples section illustrates how it is possible for a point pattern to contain a pair of identical points. This function determines which points in \code{x} duplicate other points that appeared earlier in the sequence. It returns a logical vector with entries that are \code{TRUE} for duplicated points and \code{FALSE} for unique (non-duplicated) points. If \code{rule="spatstat"} (the default), two points are deemed identical if their coordinates are equal according to \code{==}, \emph{and} their marks are equal according to \code{==}. This is the most stringent possible test. If \code{rule="unmark"}, duplicated points are determined by testing equality of their coordinates only, using \code{==}. If \code{rule="deldir"}, duplicated points are determined by testing equality of their coordinates only, using the function \code{\link[deldir]{duplicatedxy}} in the package \pkg{deldir}, which currently uses \code{\link{duplicated.data.frame}}. Setting \code{rule="deldir"} will ensure consistency with functions in the \pkg{deldir} package. } \seealso{ \code{\link{ppp.object}}, \code{\link{unique.ppp}}, \code{\link{multiplicity.ppp}} } \examples{ X <- ppp(c(1,1,0.5), c(2,2,1), window=square(3)) duplicated(X) duplicated(X, rule="deldir") } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/discpartarea.Rd0000644000176200001440000000373713333543263015640 0ustar liggesusers\name{discpartarea} \Rdversion{1.1} \alias{discpartarea} \title{ Area of Part of Disc } \description{ Compute area of intersection between a disc and a window } \usage{ discpartarea(X, r, W=as.owin(X)) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}) specifying the centres of the discs. Alternatively, \code{X} may be in any format acceptable to \code{\link{as.ppp}}. } \item{r}{ Matrix, vector or numeric value specifying the radii of the discs. } \item{W}{ Window (object of class \code{"owin"}) with which the discs should be intersected. } } \details{ This algorithm computes the exact area of the intersection between a window \code{W} and a disc (or each of several discs). The centres of the discs are specified by the point pattern \code{X}, and their radii are specified by \code{r}. If \code{r} is a single numeric value, then the algorithm computes the area of intersection between \code{W} and the disc of radius \code{r} centred at each point of \code{X}, and returns a one-column matrix containing one entry for each point of \code{X}. If \code{r} is a vector of length \code{m}, then the algorithm returns an \code{n * m} matrix in which the entry on row \code{i}, column \code{j} is the area of the intersection between \code{W} and the disc centred at \code{X[i]} with radius \code{r[j]}. If \code{r} is a matrix, it should have one row for each point in \code{X}. The algorithm returns a matrix in which the entry on row \code{i}, column \code{j} is the area of the intersection between \code{W} and the disc centred at \code{X[i]} with radius \code{r[i,j]}. Areas are computed by analytic geometry. } \value{ Numeric matrix, with one row for each point of \code{X}. } \seealso{ \code{\link{owin}}, \code{\link{disc}} } \examples{ data(letterR) X <- runifpoint(3, letterR) discpartarea(X, 0.2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/Kscaled.Rd0000644000176200001440000002165313333543262014540 0ustar liggesusers\name{Kscaled} \alias{Kscaled} \alias{Lscaled} \title{Locally Scaled K-function} \description{ Estimates the locally-rescaled \eqn{K}-function of a point process. } \usage{ Kscaled(X, lambda=NULL, \dots, r = NULL, breaks = NULL, rmax = 2.5, correction=c("border", "isotropic", "translate"), renormalise=FALSE, normpower=1, sigma=NULL, varcov=NULL) Lscaled(\dots) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the locally scaled \eqn{K} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()}. } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a \code{function(x,y)} which can be evaluated to give the intensity value at any location, or a fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Arguments passed from \code{Lscaled} to \code{Kscaled} and from \code{Kscaled} to \code{\link{density.ppp}} if \code{lambda} is omitted. } \item{r}{ vector of values for the argument \eqn{r} at which the locally scaled \eqn{K} function should be evaluated. (These are rescaled distances.) Not normally given by the user; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{rmax}{ maximum value of the argument \eqn{r} that should be used. (This is the rescaled distance). } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{renormalise}{ Logical. Whether to renormalise the estimate. See Details. } \item{normpower}{ Integer (usually either 1 or 2). Normalisation power. See Details. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing at least the following columns, \item{r}{the vector of values of the argument \eqn{r} at which the pair correlation function \eqn{g(r)} has been estimated } \item{theo}{vector of values of \eqn{\pi r^2}{pi * r^2}, the theoretical value of \eqn{K_{\rm scaled}(r)}{Kscaled(r)} for an inhomogeneous Poisson process } and containing additional columns according to the choice specified in the \code{correction} argument. The additional columns are named \code{border}, \code{trans} and \code{iso} and give the estimated values of \eqn{K_{\rm scaled}(r)}{Kscaled(r)} using the border correction, translation correction, and Ripley isotropic correction, respectively. } \details{ \code{Kscaled} computes an estimate of the \eqn{K} function for a locally scaled point process. \code{Lscaled} computes the corresponding \eqn{L} function \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}. Locally scaled point processes are a class of models for inhomogeneous point patterns, introduced by Hahn et al (2003). They include inhomogeneous Poisson processes, and many other models. The template \eqn{K} function of a locally-scaled process is a counterpart of the ``ordinary'' Ripley \eqn{K} function, in which the distances between points of the process are measured on a spatially-varying scale (such that the locally rescaled process has unit intensity). The template \eqn{K} function is an indicator of interaction between the points. For an inhomogeneous Poisson process, the theoretical template \eqn{K} function is approximately equal to \eqn{K(r) = \pi r^2}{K(r) = pi * r^2}. Values \eqn{K_{\rm scaled}(r) > \pi r^2}{Kscaled(r) > pi * r^2} are suggestive of clustering. \code{Kscaled} computes an estimate of the template \eqn{K} function and \code{Lscaled} computes the corresponding \eqn{L} function \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}. The locally scaled interpoint distances are computed using an approximation proposed by Hahn (2007). The Euclidean distance between two points is multiplied by the average of the square roots of the intensity values at the two points. The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda}. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \Moller and Waagepetersen (2000). The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. If \code{renormalise=TRUE}, the estimated intensity \code{lambda} is multiplied by \eqn{c^(normpower/2)} before performing other calculations, where \eqn{c = area(W)/sum[i] (1/lambda(x[i]))}. This renormalisation has about the same effect as in \code{\link{Kinhom}}, reducing the variability and bias of the estimate in small samples and in cases of very strong inhomogeneity. Edge corrections are used to correct bias in the estimation of \eqn{K_{\rm scaled}}{Kscaled}. First the interpoint distances are rescaled, and then edge corrections are applied as in \code{\link{Kest}}. See \code{\link{Kest}} for details of the edge corrections and the options for the argument \code{correction}. The pair correlation function can also be applied to the result of \code{Kscaled}; see \code{\link{pcf}} and \code{\link{pcf.fv}}. } \references{ Baddeley, A., \Moller, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. Hahn, U. (2007) \emph{Global and Local Scaling in the Statistics of Spatial Point Processes}. Habilitationsschrift, Universitaet Augsburg. Hahn, U., Jensen, E.B.V., van Lieshout, M.N.M. and Nielsen, L.S. (2003) Inhomogeneous spatial point processes by location-dependent scaling. \emph{Advances in Applied Probability} \bold{35}, 319--336. \Prokesova, M., Hahn, U. and Vedel Jensen, E.B. (2006) Statistics for locally scaled point patterns. In A. Baddeley, P. Gregori, J. Mateu, R. Stoica and D. Stoyan (eds.) \emph{Case Studies in Spatial Point Pattern Modelling}. Lecture Notes in Statistics 185. New York: Springer Verlag. Pages 99--123. } \seealso{ \code{\link{Kest}}, \code{\link{pcf}} } \examples{ data(bronzefilter) X <- unmark(bronzefilter) K <- Kscaled(X) fit <- ppm(X, ~x) lam <- predict(fit) K <- Kscaled(X, lam) } \author{Ute Hahn, \adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/relrisk.ppp.Rd0000644000176200001440000002260413355557761015455 0ustar liggesusers\name{relrisk.ppp} \alias{relrisk.ppp} \title{ Nonparametric Estimate of Spatially-Varying Relative Risk } \description{ Given a multitype point pattern, this function estimates the spatially-varying probability of each type of point, or the ratios of such probabilities, using kernel smoothing. The default smoothing bandwidth is selected by cross-validation. } \usage{ \method{relrisk}{ppp}(X, sigma = NULL, ..., varcov = NULL, at = c("pixels", "points"), relative=FALSE, adjust=1, edge=TRUE, diggle=FALSE, se=FALSE, casecontrol=TRUE, control=1, case) } \arguments{ \item{X}{ A multitype point pattern (object of class \code{"ppp"} which has factor valued marks). } \item{sigma}{ Optional. The numeric value of the smoothing bandwidth (the standard deviation of isotropic Gaussian smoothing kernel). Alternatively \code{sigma} may be a function which can be used to select a different bandwidth for each type of point. See Details. } \item{\dots}{ Arguments passed to \code{\link{bw.relrisk}} to select the bandwidth, or passed to \code{\link{density.ppp}} to control the pixel resolution. } \item{varcov}{ Optional. Variance-covariance matrix of anisotopic Gaussian smoothing kernel. Incompatible with \code{sigma}. } \item{at}{ Character string specifying whether to compute the probability values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). } \item{relative}{ Logical. If \code{FALSE} (the default) the algorithm computes the probabilities of each type of point. If \code{TRUE}, it computes the \emph{relative risk}, the ratio of probabilities of each type relative to the probability of a control. } \item{adjust}{ Optional. Adjustment factor for the bandwidth \code{sigma}. } \item{edge}{ Logical value indicating whether to apply edge correction. } \item{diggle}{ Logical. If \code{TRUE}, use the Jones-Diggle improved edge correction, which is more accurate but slower to compute than the default correction. } \item{se}{ Logical value indicating whether to compute standard errors as well. } \item{casecontrol}{ Logical. Whether to treat a bivariate point pattern as consisting of cases and controls, and return only the probability or relative risk of a case. Ignored if there are more than 2 types of points. See Details. } \item{control}{ Integer, or character string, identifying which mark value corresponds to a control. } \item{case}{ Integer, or character string, identifying which mark value corresponds to a case (rather than a control) in a bivariate point pattern. This is an alternative to the argument \code{control} in a bivariate point pattern. Ignored if there are more than 2 types of points. } } \details{ The command \code{\link{relrisk}} is generic and can be used to estimate relative risk in different ways. This function \code{relrisk.ppp} is the method for point pattern datasets. It computes \emph{nonparametric} estimates of relative risk by kernel smoothing. If \code{X} is a bivariate point pattern (a multitype point pattern consisting of two types of points) then by default, the points of the first type (the first level of \code{marks(X)}) are treated as controls or non-events, and points of the second type are treated as cases or events. Then by default this command computes the spatially-varying \emph{probability} of a case, i.e. the probability \eqn{p(u)} that a point at spatial location \eqn{u} will be a case. If \code{relative=TRUE}, it computes the spatially-varying \emph{relative risk} of a case relative to a control, \eqn{r(u) = p(u)/(1- p(u))}. If \code{X} is a multitype point pattern with \eqn{m > 2} types, or if \code{X} is a bivariate point pattern and \code{casecontrol=FALSE}, then by default this command computes, for each type \eqn{j}, a nonparametric estimate of the spatially-varying \emph{probability} of an event of type \eqn{j}. This is the probability \eqn{p_j(u)}{p[j](u)} that a point at spatial location \eqn{u} will belong to type \eqn{j}. If \code{relative=TRUE}, the command computes the \emph{relative risk} of an event of type \eqn{j} relative to a control, \eqn{r_j(u) = p_j(u)/p_k(u)}{r[j](u) = p[j](u)/p[k](u)}, where events of type \eqn{k} are treated as controls. The argument \code{control} determines which type \eqn{k} is treated as a control. If \code{at = "pixels"} the calculation is performed for every spatial location \eqn{u} on a fine pixel grid, and the result is a pixel image representing the function \eqn{p(u)} or a list of pixel images representing the functions \eqn{p_j(u)}{p[j](u)} or \eqn{r_j(u)}{r[j](u)} for \eqn{j = 1,\ldots,m}{j = 1,...,m}. An infinite value of relative risk (arising because the probability of a control is zero) will be returned as \code{NA}. If \code{at = "points"} the calculation is performed only at the data points \eqn{x_i}{x[i]}. By default the result is a vector of values \eqn{p(x_i)}{p(x[i])} giving the estimated probability of a case at each data point, or a matrix of values \eqn{p_j(x_i)}{p[j](x[i])} giving the estimated probability of each possible type \eqn{j} at each data point. If \code{relative=TRUE} then the relative risks \eqn{r(x_i)}{r(x[i])} or \eqn{r_j(x_i)}{r[j](x[i])} are returned. An infinite value of relative risk (arising because the probability of a control is zero) will be returned as \code{Inf}. Estimation is performed by a simple Nadaraja-Watson type kernel smoother (Diggle, 2003). The smoothing bandwidth can be specified in any of the following ways: \itemize{ \item \code{sigma} is a single numeric value, giving the standard deviation of the isotropic Gaussian kernel. \item \code{sigma} is a numeric vector of length 2, giving the standard deviations in the \eqn{x} and \eqn{y} directions of a Gaussian kernel. \item \code{varcov} is a 2 by 2 matrix giving the variance-covariance matrix of the Gaussian kernel. \item \code{sigma} is a \code{function} which selects the bandwidth. Bandwidth selection will be applied \bold{separately to each type of point}. An example of such a function is \code{\link{bw.diggle}}. \item \code{sigma} and \code{varcov} are both missing or null. Then a \bold{common} smoothing bandwidth \code{sigma} will be selected by cross-validation using \code{\link{bw.relrisk}}. \item An infinite smoothing bandwidth, \code{sigma=Inf}, is permitted and yields a constant estimate of relative risk. } If \code{se=TRUE} then standard errors will also be computed, based on asymptotic theory, \emph{assuming a Poisson process}. } \value{ If \code{se=FALSE} (the default), the format is described below. If \code{se=TRUE}, the result is a list of two entries, \code{estimate} and \code{SE}, each having the format described below. If \code{X} consists of only two types of points, and if \code{casecontrol=TRUE}, the result is a pixel image (if \code{at="pixels"}) or a vector (if \code{at="points"}). The pixel values or vector values are the probabilities of a case if \code{relative=FALSE}, or the relative risk of a case (probability of a case divided by the probability of a control) if \code{relative=TRUE}. If \code{X} consists of more than two types of points, or if \code{casecontrol=FALSE}, the result is: \itemize{ \item (if \code{at="pixels"}) a list of pixel images, with one image for each possible type of point. The result also belongs to the class \code{"solist"} so that it can be printed and plotted. \item (if \code{at="points"}) a matrix of probabilities, with rows corresponding to data points \eqn{x_i}{x[i]}, and columns corresponding to types \eqn{j}. } The pixel values or matrix entries are the probabilities of each type of point if \code{relative=FALSE}, or the relative risk of each type (probability of each type divided by the probability of a control) if \code{relative=TRUE}. If \code{relative=FALSE}, the resulting values always lie between 0 and 1. If \code{relative=TRUE}, the results are either non-negative numbers, or the values \code{Inf} or \code{NA}. } \seealso{ There is another method \code{\link{relrisk.ppm}} for point process models which computes \emph{parametric} estimates of relative risk, using the fitted model. See also \code{\link{bw.relrisk}}, \code{\link{density.ppp}}, \code{\link{Smooth.ppp}}, \code{\link{eval.im}} } \examples{ p.oak <- relrisk(urkiola, 20) if(interactive()) { plot(p.oak, main="proportion of oak") plot(eval.im(p.oak > 0.3), main="More than 30 percent oak") plot(split(lansing), main="Lansing Woods") p.lan <- relrisk(lansing, 0.05, se=TRUE) plot(p.lan$estimate, main="Lansing Woods species probability") plot(p.lan$SE, main="Lansing Woods standard error") wh <- im.apply(p.lan$estimate, which.max) types <- levels(marks(lansing)) wh <- eval.im(types[wh]) plot(wh, main="Most common species") } } \references{ Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/nndist.psp.Rd0000644000176200001440000000540313333543263015266 0ustar liggesusers\name{nndist.psp} \alias{nndist.psp} \title{Nearest neighbour distances between line segments} \description{ Computes the distance from each line segment to its nearest neighbour in a line segment pattern. Alternatively finds the distance to the second nearest, third nearest etc. } \usage{ \method{nndist}{psp}(X, \dots, k=1, method="C") } \arguments{ \item{X}{ A line segment pattern (object of class \code{"psp"}). } \item{\dots}{ Ignored. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } \item{method}{ String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. Usually not specified. } } \value{ Numeric vector or matrix containing the nearest neighbour distances for each line segment. If \code{k = 1} (the default), the return value is a numeric vector \code{v} such that \code{v[i]} is the nearest neighbour distance for the \code{i}th segment. If \code{k} is a single integer, then the return value is a numeric vector \code{v} such that \code{v[i]} is the \code{k}th nearest neighbour distance for the \code{i}th segment. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the \code{k[j]}th nearest neighbour distance for the \code{i}th segment. } \details{ This is a method for the generic function \code{\link{nndist}} for the class \code{"psp"}. If \code{k=1}, this function computes the distance from each line segment to the nearest other line segment in \code{X}. In general it computes the distance from each line segment to the \code{k}th nearest other line segment. The argument \code{k} can also be a vector, and this computation will be performed for each value of \code{k}. Distances are calculated using the Hausdorff metric. The Hausdorff distance between two line segments is the maximum distance from any point on one of the segments to the nearest point on the other segment. If there are fewer than \code{max(k)+1} line segments in the pattern, some of the nearest neighbour distances will be infinite (\code{Inf}). The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted \R code only. If \code{method="C"} (the default) then compiled \code{C} code is used. The \code{C} code is somewhat faster. } \seealso{ \code{\link{nndist}}, \code{\link{nndist.ppp}} } \examples{ L <- psp(runif(10), runif(10), runif(10), runif(10), owin()) D <- nndist(L) D <- nndist(L, k=1:3) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/unstack.ppp.Rd0000644000176200001440000000401513536625731015440 0ustar liggesusers\name{unstack.ppp} \alias{unstack.ppp} \alias{unstack.psp} \alias{unstack.lpp} \alias{unstack.tess} \alias{unstack.lintess} \title{ Separate Multiple Columns of Marks } \description{ Given a spatial pattern with several columns of marks, take one column at a time, and return a list of spatial patterns each having only one column of marks. } \usage{ \method{unstack}{ppp}(x, \dots) \method{unstack}{psp}(x, \dots) \method{unstack}{lpp}(x, \dots) \method{unstack}{tess}(x, \dots) \method{unstack}{lintess}(x, \dots) } \arguments{ \item{x}{ A spatial point pattern (object of class \code{"ppp"} or \code{"lpp"}) or a spatial pattern of line segments (object of class \code{"psp"}) or a spatial tessellation (object of class \code{"tess"}) or a tessellation on a linear network (object of class \code{"lintess"}). } \item{\dots}{ Ignored. } } \details{ The functions defined here are methods for the generic \code{\link[utils]{unstack}}. The functions expect a spatial object \code{x} which has several columns of marks; they separate the columns, and return a list of spatial objects, each having only one column of marks. If \code{x} has several columns of marks (i.e. \code{marks(x)} is a matrix, data frame or hyperframe with several columns), then \code{y <- unstack(x)} is a list of spatial objects, each of the same kind as \code{x}. The \code{j}th entry \code{y[[j]]} is equivalent to \code{x} except that it only includes the \code{j}th column of \code{marks(x)}. If \code{x} has no marks, or has only a single column of marks, the result is a list consisting of one entry, which is \code{x}. } \value{ A list, of class \code{"solist"}, whose entries are objects of the same type as \code{x}. } \author{ \spatstatAuthors. } \seealso{ \code{\link[utils]{unstack}} \code{\link{unstack.msr}} See also methods for the generic \code{\link[base]{split}} such as \code{\link{split.ppp}}. } \examples{ finpines unstack(finpines) } \keyword{spatial} \keyword{manip} spatstat/man/inforder.family.Rd0000644000176200001440000000225313333543263016256 0ustar liggesusers\name{inforder.family} \alias{inforder.family} \title{Infinite Order Interaction Family} \description{ An object describing the family of all Gibbs point processes with infinite interaction order. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the interaction structure of Gibbs point processes which have infinite order of interaction, such as the area-interaction process \cite{\link{AreaInter}}. Anyway, \code{inforder.family} is an object of class \code{"isf"} containing a function \code{inforder.family$eval} for evaluating the sufficient statistics of a Gibbs point process model taking an exponential family form. } \seealso{ \code{\link{AreaInter}} to create the area interaction process structure. Other families: \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{ord.family}}. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/as.hyperframe.Rd0000644000176200001440000000503313333543262015730 0ustar liggesusers\name{as.hyperframe} \Rdversion{1.1} \alias{as.hyperframe} \alias{as.hyperframe.default} \alias{as.hyperframe.data.frame} \alias{as.hyperframe.hyperframe} \alias{as.hyperframe.listof} \alias{as.hyperframe.anylist} \title{ Convert Data to Hyperframe } \description{ Converts data from any suitable format into a hyperframe. } \usage{ as.hyperframe(x, \dots) \method{as.hyperframe}{default}(x, \dots) \method{as.hyperframe}{data.frame}(x, \dots, stringsAsFactors=FALSE) \method{as.hyperframe}{hyperframe}(x, \dots) \method{as.hyperframe}{listof}(x, \dots) \method{as.hyperframe}{anylist}(x, \dots) } \arguments{ \item{x}{ Data in some other format. } \item{\dots}{ Optional arguments passed to \code{\link{hyperframe}}. } \item{stringsAsFactors}{ Logical. If \code{TRUE}, any column of the data frame \code{x} that contains character strings will be converted to a \code{factor}. If \code{FALSE}, no such conversion will occur. } } \details{ A hyperframe is like a data frame, except that its entries can be objects of any kind. The generic function \code{as.hyperframe} converts any suitable kind of data into a hyperframe. There are methods for the classes \code{data.frame}, \code{listof}, \code{anylist} and a default method, all of which convert data that is like a hyperframe into a hyperframe object. (The method for the class \code{listof} and \code{anylist} converts a list of objects, of arbitrary type, into a hyperframe with one column.) These methods do not discard any information. There are also methods for other classes (see \code{\link{as.hyperframe.ppx}}) which extract the coordinates from a spatial dataset. These methods do discard some information. } \section{Conversion of Strings to Factors}{ Note that \code{as.hyperframe.default} will convert a character vector to a factor. It behaves like \code{\link{as.data.frame}}. However \code{as.hyperframe.data.frame} does not convert strings to factors; it respects the structure of the data frame \code{x}. The behaviour can be changed using the argument \code{stringsAsFactors}. } \value{ An object of class \code{"hyperframe"} created by \code{\link{hyperframe}}. } \author{\adrian and \rolf } \seealso{ \code{\link{hyperframe}}, \code{\link{as.hyperframe.ppx}} } \examples{ df <- data.frame(x=runif(4),y=letters[1:4]) as.hyperframe(df) sims <- list() for(i in 1:3) sims[[i]] <- rpoispp(42) as.hyperframe(as.listof(sims)) as.hyperframe(as.solist(sims)) } \keyword{spatial} \keyword{manip} spatstat/man/layerplotargs.Rd0000644000176200001440000000331613333543263016057 0ustar liggesusers\name{layerplotargs} \alias{layerplotargs} \alias{layerplotargs<-} \title{ Extract or Replace the Plot Arguments of a Layered Object } \description{ Extracts or replaces the plot arguments of a layered object. } \usage{ layerplotargs(L) layerplotargs(L) <- value } \arguments{ \item{L}{ An object of class \code{"layered"} created by the function \code{\link{layered}}. } \item{value}{ Replacement value. A list, with the same length as \code{L}, whose elements are lists of plot arguments. } } \details{ These commands extract or replace the \code{plotargs} in a layered object. See \code{\link{layered}}. The replacement \code{value} should normally have the same length as the current value. However, it can also be a list with \emph{one} element which is a list of parameters. This will be replicated to the required length. For the assignment function \code{layerplotargs<-}, the argument \code{L} can be any spatial object; it will be converted to a \code{layered} object with a single layer. } \value{ \code{layerplotargs} returns a list of lists of plot arguments. \code{"layerplotargs<-"} returns the updated object of class \code{"layered"}. } \author{\adrian and \rolf } \seealso{ \code{\link{layered}}, \code{\link{methods.layered}}, \code{\link{[.layered}}. } \examples{ W <- square(2) L <- layered(W=W, X=cells) ## The following are equivalent layerplotargs(L) <- list(list(), list(pch=16)) layerplotargs(L)[[2]] <- list(pch=16) layerplotargs(L)$X <- list(pch=16) ## The following are equivalent layerplotargs(L) <- list(list(cex=2), list(cex=2)) layerplotargs(L) <- list(list(cex=2)) } \keyword{spatial} \keyword{hplot} spatstat/man/where.max.Rd0000644000176200001440000000334013333543264015063 0ustar liggesusers\name{where.max} \alias{where.max} \alias{where.min} \title{ Find Location of Maximum in a Pixel Image } \description{ Finds the spatial location(s) where a given pixel image attains its maximum or minimum value. } \usage{ where.max(x, first = TRUE) where.min(x, first = TRUE) } \arguments{ \item{x}{ A pixel image (object of class \code{"im"}). } \item{first}{ Logical value. If \code{TRUE} (the default), then only one location will be returned. If \code{FALSE}, then all locations where the maximum is achieved will be returned. } } \details{ This function finds the spatial location or locations where the pixel image \code{x} attains its maximum or minimum value. The result is a point pattern giving the locations. If \code{first=TRUE} (the default), then only one location will be returned, namely the location with the smallest \eqn{y} coordinate value which attains the maximum or minimum. This behaviour is analogous to the functions \code{\link[base]{which.min}} and \code{\link[base:which.min]{which.max}}. If \code{first=FALSE}, then the function returns the locations of all pixels where the maximum (or minimum) value is attained. This could be a large number of points. } \value{ A point pattern (object of class \code{"ppp"}). } \author{ \adrian } \seealso{ \code{\link{Summary.im}} for computing the minimum and maximum of pixel values; \code{\link{eval.im}} and \code{\link{Math.im}} for mathematical expressions involving images; \code{\link{solutionset}} for finding the set of pixels where a statement is true. } \examples{ D <- distmap(letterR, invert=TRUE) plot(D) plot(where.max(D), add=TRUE, pch=16, cols="green") } \keyword{spatial} \keyword{math} spatstat/man/affine.im.Rd0000644000176200001440000000272713333543262015027 0ustar liggesusers\name{affine.im} \alias{affine.im} \title{Apply Affine Transformation To Pixel Image} \description{ Applies any affine transformation of the plane (linear transformation plus vector shift) to a pixel image. } \usage{ \method{affine}{im}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) } \arguments{ \item{X}{Pixel image (object of class \code{"im"}).} \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} controlling the pixel resolution of the transformed image. } } \value{ Another pixel image (of class \code{"im"}) representing the result of applying the affine transformation. } \details{ The image is subjected first to the linear transformation represented by \code{mat} (multiplying on the left by \code{mat}), and then the result is translated by the vector \code{vec}. The argument \code{mat} must be a nonsingular \eqn{2 \times 2}{2 * 2} matrix. This is a method for the generic function \code{\link{affine}}. } \seealso{ \code{\link{affine}}, \code{\link{affine.ppp}}, \code{\link{affine.psp}}, \code{\link{affine.owin}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ X <- setcov(owin()) stretch <- diag(c(2,3)) Y <- affine(X, mat=stretch) shear <- matrix(c(1,0,0.6,1),ncol=2, nrow=2) Z <- affine(X, mat=shear) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/Smooth.Rd0000644000176200001440000000147113333543264014441 0ustar liggesusers\name{Smooth} \alias{Smooth} \title{Spatial smoothing of data} \description{ Generic function to perform spatial smoothing of spatial data. } \usage{ Smooth(X, ...) } \arguments{ \item{X}{Some kind of spatial data} \item{\dots}{Arguments passed to methods.} } \details{ This generic function calls an appropriate method to perform spatial smoothing on the spatial dataset \code{X}. Methods for this function include \itemize{ \item \code{\link{Smooth.ppp}} for point patterns \item \code{\link{Smooth.msr}} for measures \item \code{\link{Smooth.fv}} for function value tables } } \seealso{ \code{\link{Smooth.ppp}}, \code{\link{Smooth.im}}, \code{\link{Smooth.msr}}, \code{\link{Smooth.fv}}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/bits.envelope.Rd0000644000176200001440000001121413501357607015742 0ustar liggesusers\name{bits.envelope} \alias{bits.envelope} \title{ Global Envelopes for Balanced Independent Two-Stage Test } \description{ Computes the global envelopes corresponding to the balanced independent two-stage Monte Carlo test of goodness-of-fit. } \usage{ bits.envelope(X, \dots, nsim = 19, nrank = 1, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose = TRUE) } \arguments{ \item{X}{ Either a point pattern dataset (object of class \code{"ppp"}, \code{"lpp"} or \code{"pp3"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"slrm"}). } \item{\dots}{ Arguments passed to \code{\link{mad.test}} or \code{\link{envelope}} to control the conduct of the test. Useful arguments include \code{fun} to determine the summary function, \code{rinterval} to determine the range of \eqn{r} values used in the test, and \code{verbose=FALSE} to turn off the messages. } \item{nsim}{ Number of simulated patterns to be generated in each stage. Number of simulations in each basic test. There will be \code{nsim} repetitions of the basic test, each involving \code{nsim} simulated realisations, together with one independent set of \code{nsim} realisations, so there will be a total of \code{nsim * (nsim + 1)} simulations. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{alternative}{ Character string determining whether the envelope corresponds to a two-sided test (\code{alternative="two.sided"}, the default) or a one-sided test with a lower critical boundary (\code{alternative="less"}) or a one-sided test with an upper critical boundary (\code{alternative="greater"}). } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating whether to interpolate the distribution of the test statistic by kernel smoothing, as described in Dao and Genton (2014, Section 5). } \item{savefuns}{ Logical flag indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical flag indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical value determining whether to print progress reports. } } \details{ Computes global simulation envelopes corresponding to the balanced independent two-stage Monte Carlo test of goodness-of-fit described by Baddeley et al (2017). The envelopes are described in Baddeley et al (2019). If \code{X} is a point pattern, the null hypothesis is CSR. If \code{X} is a fitted model, the null hypothesis is that model. This command is similar to \code{\link{dg.envelope}} which corresponds to the Dao-Genton test of goodness-of-fit. It was shown in Baddeley et al (2017) that the Dao-Genton test is biased when the significance level is very small (small \eqn{p}-values are not reliable) and we recommend \code{\link{bits.envelope}} in this case. } \value{ An object of class \code{"fv"}. } \references{ Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. Baddeley, A., Hardegen, A., Lawrence, T., Milne, R.K., Nair, G. and Rakshit, S. (2017) On two-stage Monte Carlo tests of composite hypotheses. \emph{Computational Statistics and Data Analysis} \bold{114}, {75--87}. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2019) Pushing the envelope: extensions of graphical Monte Carlo tests. In preparation. } \author{ Adrian Baddeley, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \adrian. } \seealso{ \code{\link{dg.envelope}}, \code{\link{bits.test}}, \code{\link{mad.test}}, \code{\link{envelope}} } \examples{ ns <- if(interactive()) 19 else 4 E <- bits.envelope(swedishpines, Lest, nsim=ns) E plot(E) Eo <- bits.envelope(swedishpines, Lest, alternative="less", nsim=ns) Ei <- bits.envelope(swedishpines, Lest, interpolate=TRUE, nsim=ns) } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat/man/plot.colourmap.Rd0000644000176200001440000000736713354377013016160 0ustar liggesusers\name{plot.colourmap} \alias{plot.colourmap} \title{Plot a Colour Map} \description{ Displays a colour map as a colour ribbon } \usage{ \method{plot}{colourmap}(x, ..., main, xlim = NULL, ylim = NULL, vertical = FALSE, axis = TRUE, labelmap=NULL, gap=0.25, add=FALSE, increasing=NULL) } \arguments{ \item{x}{Colour map to be plotted. An object of class \code{"colourmap"}.} \item{\dots}{ Graphical arguments passed to \code{\link{image.default}} or \code{\link{axis}}. } \item{main}{Main title for plot. A character string.} \item{xlim}{ Optional range of \code{x} values for the location of the colour ribbon. } \item{ylim}{ Optional range of \code{y} values for the location of the colour ribbon. } \item{vertical}{Logical flag determining whether the colour ribbon is plotted as a horizontal strip (\code{FALSE}) or a vertical strip (\code{TRUE}).} \item{axis}{Logical flag determining whether an axis should be plotted showing the numerical values that are mapped to the colours. } \item{labelmap}{ Function. If this is present, then the labels on the plot, which indicate the input values corresponding to particular colours, will be transformed by \code{labelmap} before being displayed on the plot. Typically used to simplify or shorten the labels on the plot. } \item{gap}{ Distance between separate blocks of colour, as a fraction of the width of one block, if the colourmap is discrete. } \item{add}{ Logical value indicating whether to add the colourmap to the existing plot (\code{add=TRUE}), or to start a new plot (\code{add=FALSE}, the default). } \item{increasing}{ Logical value indicating whether to display the colour map in increasing order. See Details. } } \details{ This is the plot method for the class \code{"colourmap"}. An object of this class (created by the function \code{\link{colourmap}}) represents a colour map or colour lookup table associating colours with each data value. The command \code{plot.colourmap} displays the colour map as a colour ribbon or as a colour legend (a sequence of blocks of colour). This plot can be useful on its own to inspect the colour map. If the domain of the colourmap is an interval of real numbers, the colourmap is displayed as a continuous ribbon of colour. If the domain of the colourmap is a finite set of inputs, the colours are displayed as separate blocks of colour. The separation between blocks is equal to \code{gap} times the width of one block. To annotate an existing plot with an explanatory colour ribbon or colour legend, specify \code{add=TRUE} and use the arguments \code{xlim} and/or \code{ylim} to control the physical position of the ribbon on the plot. Labels explaining the colour map are drawn by \code{\link[graphics]{axis}} and can be modified by specifying arguments that will be passed to this function. The argument \code{increasing} indicates whether the colourmap should be displayed so that the input values are increasing with the spatial coordinate: that is, increasing from left to right (if \code{vertical=FALSE}) or increasing from bottom to top (if \code{vertical=TRUE}). If \code{increasing=FALSE}, this ordering will be reversed. The default is \code{increasing=TRUE} in all cases except when \code{vertical=TRUE} and the domain of the colourmap is a finite set of discrete inputs. } \value{ None. } \seealso{\code{\link{colourmap}}} \examples{ co <- colourmap(rainbow(100), breaks=seq(-1,1,length=101)) plot(co) plot(co, col.ticks="pink") ca <- colourmap(rainbow(8), inputs=letters[1:8]) plot(ca, vertical=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{color} \keyword{hplot} spatstat/man/plot.plotppm.Rd0000644000176200001440000000634413333543264015644 0ustar liggesusers\name{plot.plotppm} \alias{plot.plotppm} \title{Plot a plotppm Object Created by plot.ppm} \description{ The function plot.ppm produces objects which specify plots of fitted point process models. The function plot.plotppm carries out the actual plotting of these objects. } \usage{ \method{plot}{plotppm}(x, data = NULL, trend = TRUE, cif = TRUE, se = TRUE, pause = interactive(), how = c("persp", "image", "contour"), \dots, pppargs) } \arguments{ \item{x}{ An object of class \code{plotppm} produced by \code{\link{plot.ppm}()} }. \item{data}{ The point pattern (an object of class \code{ppp}) to which the point process model was fitted (by \code{\link{ppm}}). } \item{trend}{ Logical scalar; should the trend component of the fitted model be plotted? } \item{cif}{ Logical scalar; should the complete conditional intensity of the fitted model be plotted? } \item{se}{ Logical scalar; should the estimated standard error of the fitted intensity be plotted? } \item{pause}{ Logical scalar indicating whether to pause with a prompt after each plot. Set \code{pause=FALSE} if plotting to a file. } \item{how}{ Character string or character vector indicating the style or styles of plots to be performed. } \item{\dots}{ Extra arguments to the plotting functions \code{\link{persp}}, \code{\link{image}} and \code{\link{contour}}. } \item{pppargs}{ List of extra arguments passed to \code{\link{plot.ppp}} when displaying the original point pattern data. } } \details{ If argument \code{data} is supplied then the point pattern will be superimposed on the image and contour plots. Sometimes a fitted model does not have a trend component, or the trend component may constitute all of the conditional intensity (if the model is Poisson). In such cases the object \code{x} will not contain a trend component, or will contain only a trend component. This will also be the case if one of the arguments \code{trend} and \code{cif} was set equal to \code{FALSE} in the call to \code{plot.ppm()} which produced \code{x}. If this is so then only the item which is present will be plotted. Explicitly setting \code{trend=TRUE}, or \code{cif=TRUE}, respectively, will then give an error. } \value{ None. } \section{Warning}{ Arguments which are passed to \code{persp}, \code{image}, and \code{contour} via the \dots argument get passed to any of the other functions listed in the \code{how} argument, and won't be recognized by them. This leads to a lot of annoying but harmless warning messages. Arguments to \code{persp} may be supplied via \code{\link{spatstat.options}()} which alleviates the warning messages in this instance. } \author{\adrian and \rolf } \seealso{ \code{\link{plot.ppm}()} } \examples{ \dontrun{ m <- ppm(cells ~ 1, Strauss(0.05)) mpic <- plot(m) # Perspective plot only, with altered parameters: plot(mpic,how="persp", theta=-30,phi=40,d=4) # All plots, with altered parameters for perspective plot: op <- spatstat.options(par.persp=list(theta=-30,phi=40,d=4)) plot(mpic) # Revert spatstat.options(op) } } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat/man/reload.or.compute.Rd0000644000176200001440000000502013333543264016522 0ustar liggesusers\name{reload.or.compute} \alias{reload.or.compute} \title{ Compute Unless Previously Saved } \description{ If the designated file does not yet exist, evaluate the expression and save the results in the file. If the file already exists, re-load the results from the file. } \usage{ reload.or.compute(filename, expr, objects = NULL, destination = parent.frame(), force=FALSE) } \arguments{ \item{filename}{ Name of data file. A character string. } \item{expr}{ \R language expression to be evaluated. } \item{objects}{ Optional character vector of names of objects to be saved in \code{filename} after evaluating \code{expr}, or names of objects that should be present in \code{filename} when loaded. } \item{destination}{ Environment in which the resulting objects should be assigned. } \item{force}{ Logical value indicating whether to perform the computation in any case. } } \details{ This facility is useful for saving, and later re-loading, the results of time-consuming computations. It would typically be used in an \R script file or an \code{\link[utils]{Sweave}} document. If the file called \code{filename} does not yet exist, then \code{expr} will be evaluated and the results will be saved in \code{filename}. The optional argument \code{objects} specifies which results should be saved to the file: the default is to save all objects that were created by evaluating the expression. If the file called \code{filename} already exists, then it will be loaded. The optional argument \code{objects} specifies the names of objects that should be present in the file; a warning is issued if any of them are missing. The resulting objects can be assigned into any desired \code{destination}. The default behaviour is equivalent to evaluating \code{expr} in the current environment. If \code{force=TRUE} then \code{expr} will be evaluated (regardless of whether the file already exists or not) and the results will be saved in \code{filename}, overwriting any previously-existing file with that name. This is a convenient way to force the code to re-compute everything in an \R script file or \code{\link[utils]{Sweave}} document. } \value{ Character vector (invisible) giving the names of the objects computed or loaded. } \examples{ \dontrun{ if(FALSE) { reload.or.compute("mydata.rda", { x <- very.long.computation() y <- 42 }) } } } \author{\adrian and \rolf } \keyword{utilities} spatstat/man/nnwhich.ppx.Rd0000644000176200001440000000541713333543263015437 0ustar liggesusers\name{nnwhich.ppx} \alias{nnwhich.ppx} \title{Nearest Neighbours in Any Dimensions} \description{ Finds the nearest neighbour of each point in a multi-dimensional point pattern. } \usage{ \method{nnwhich}{ppx}(X, \dots, k=1) } \arguments{ \item{X}{ Multi-dimensional point pattern (object of class \code{"ppx"}). } \item{\dots}{ Arguments passed to \code{\link{coords.ppx}} to determine which coordinates should be used. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } } \value{ Numeric vector or matrix giving, for each point, the index of its nearest neighbour (or \code{k}th nearest neighbour). If \code{k = 1} (the default), the return value is a numeric vector \code{v} giving the indices of the nearest neighbours (the nearest neighbout of the \code{i}th point is the \code{j}th point where \code{j = v[i]}). If \code{k} is a single integer, then the return value is a numeric vector giving the indices of the \code{k}th nearest neighbours. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the index of the \code{k[j]}th nearest neighbour for the \code{i}th data point. } \details{ For each point in the given multi-dimensional point pattern, this function finds its nearest neighbour (the nearest other point of the pattern). By default it returns a vector giving, for each point, the index of the point's nearest neighbour. If \code{k} is specified, the algorithm finds each point's \code{k}th nearest neighbour. The function \code{nnwhich} is generic. This is the method for the class \code{"ppx"}. If there are no points in the pattern, a numeric vector of length zero is returned. If there is only one point, then the nearest neighbour is undefined, and a value of \code{NA} is returned. In general if the number of points is less than or equal to \code{k}, then a vector of \code{NA}'s is returned. To evaluate the \emph{distance} between a point and its nearest neighbour, use \code{\link{nndist}}. To find the nearest neighbours from one point pattern to another point pattern, use \code{\link{nncross}}. By default, both spatial and temporal coordinates are extracted. To obtain the spatial distance between points in a space-time point pattern, set \code{temporal=FALSE}. } \section{Warnings}{ A value of \code{NA} is returned if there is only one point in the point pattern. } \seealso{ \code{\link{nnwhich}}, \code{\link{nndist}}, \code{\link{nncross}} } \examples{ df <- data.frame(x=runif(5),y=runif(5),z=runif(5),w=runif(5)) X <- ppx(data=df) m <- nnwhich(X) m2 <- nnwhich(X, k=2) } \author{ \adrian } \keyword{spatial} \keyword{math} spatstat/man/auc.Rd0000644000176200001440000000712313333543262013736 0ustar liggesusers\name{auc} \alias{auc} \alias{auc.ppp} \alias{auc.lpp} \alias{auc.ppm} \alias{auc.kppm} \alias{auc.lppm} \title{ Area Under ROC Curve } \description{ Compute the AUC (area under the Receiver Operating Characteristic curve) for a fitted point process model. } \usage{ auc(X, \dots) \method{auc}{ppp}(X, covariate, \dots, high = TRUE) \method{auc}{ppm}(X, \dots) \method{auc}{kppm}(X, \dots) \method{auc}{lpp}(X, covariate, \dots, high = TRUE) \method{auc}{lppm}(X, \dots) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"} or \code{"lpp"}) or fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"lppm"}). } \item{covariate}{ Spatial covariate. Either a \code{function(x,y)}, a pixel image (object of class \code{"im"}), or one of the strings \code{"x"} or \code{"y"} indicating the Cartesian coordinates. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} controlling the pixel resolution for calculations. } \item{high}{ Logical value indicating whether the threshold operation should favour high or low values of the covariate. } } \details{ This command computes the AUC, the area under the Receiver Operating Characteristic curve. The ROC itself is computed by \code{\link{roc}}. For a point pattern \code{X} and a covariate \code{Z}, the AUC is a numerical index that measures the ability of the covariate to separate the spatial domain into areas of high and low density of points. Let \eqn{x_i}{x[i]} be a randomly-chosen data point from \code{X} and \eqn{U} a randomly-selected location in the study region. The AUC is the probability that \eqn{Z(x_i) > Z(U)}{Z(x[i]) > Z(U)} assuming \code{high=TRUE}. That is, AUC is the probability that a randomly-selected data point has a higher value of the covariate \code{Z} than does a randomly-selected spatial location. The AUC is a number between 0 and 1. A value of 0.5 indicates a complete lack of discriminatory power. For a fitted point process model \code{X}, the AUC measures the ability of the fitted model intensity to separate the spatial domain into areas of high and low density of points. Suppose \eqn{\lambda(u)}{\lambda(u)} is the intensity function of the model. The AUC is the probability that \eqn{\lambda(x_i) > \lambda(U)}{\lambda(x[i]) > \lambda(U)}. That is, AUC is the probability that a randomly-selected data point has higher predicted intensity than does a randomly-selected spatial location. The AUC is \bold{not} a measure of the goodness-of-fit of the model (Lobo et al, 2007). } \value{ Numeric. For \code{auc.ppp} and \code{auc.lpp}, the result is a single number giving the AUC value. For \code{auc.ppm}, \code{auc.kppm} and \code{auc.lppm}, the result is a numeric vector of length 2 giving the AUC value and the theoretically expected AUC value for this model. } \references{ Lobo, J.M., \ifelse{latex}{\out{Jim{\'e}nez}}{Jimenez}-Valverde, A. and Real, R. (2007) AUC: a misleading measure of the performance of predictive distribution models. \emph{Global Ecology and Biogeography} \bold{17}(2) 145--151. Nam, B.-H. and D'Agostino, R. (2002) Discrimination index, the area under the {ROC} curve. Pages 267--279 in Huber-Carol, C., Balakrishnan, N., Nikulin, M.S. and Mesbah, M., \emph{Goodness-of-fit tests and model validity}, \ifelse{latex}{\out{Birkh{\"a}user}}{Birkhauser}, Basel. } \author{ \spatstatAuthors. } \seealso{ \code{\link{roc}} } \examples{ fit <- ppm(swedishpines ~ x+y) auc(fit) auc(swedishpines, "x") } \keyword{spatial} spatstat/man/whist.Rd0000644000176200001440000000431113333543265014323 0ustar liggesusers\name{whist} \alias{whist} \title{ Weighted Histogram } \description{ Computes the weighted histogram of a set of observations with a given set of weights. } \usage{ whist(x, breaks, weights = NULL) } \arguments{ \item{x}{ Numeric vector of observed values. } \item{breaks}{ Vector of breakpoints for the histogram. } \item{weights}{ Numeric vector of weights for the observed values. } } \details{ This low-level function computes (but does not plot) the weighted histogram of a vector of observations \code{x} using a given vector of \code{weights}. The arguments \code{x} and \code{weights} should be numeric vectors of equal length. They may include \code{NA} or infinite values. The argument \code{breaks} should be a numeric vector whose entries are strictly increasing. These values define the boundaries between the successive histogram cells. The breaks \emph{do not} have to span the range of the observations. There are \code{N-1} histogram cells, where \code{N = length(breaks)}. An observation \code{x[i]} falls in the \code{j}th cell if \code{breaks[j] <= x[i] < breaks[j+1]} (for \code{j < N-1}) or \code{breaks[j] <= x[i] <= breaks[j+1]} (for \code{j = N-1}). The weighted histogram value \code{h[j]} for the \code{j}th cell is the sum of \code{weights[i]} for all observations \code{x[i]} that fall in the cell. Note that, in contrast to the function \code{\link{hist}}, the function \code{whist} does not require the breakpoints to span the range of the observations \code{x}. Values of \code{x} that fall outside the range of \code{breaks} are handled separately; their total weight is returned as an attribute of the histogram. } \value{ A numeric vector of length \code{N-1} containing the histogram values, where \code{N = length(breaks)}. The return value also has attributes \code{"low"} and \code{"high"} giving the total weight of all observations that are less than the lowest breakpoint, or greater than the highest breakpoint, respectively. } \examples{ x <- rnorm(100) b <- seq(-1,1,length=21) w <- runif(100) whist(x,b,w) } \author{\adrian and \rolf with thanks to Peter Dalgaard. } \keyword{arith} spatstat/man/dppeigen.Rd0000644000176200001440000000124313333543263014757 0ustar liggesusers\name{dppeigen} \alias{dppeigen} \title{Internal function calculating eig and index} \description{This function is mainly for internal package use and is usually not called by the user.} \usage{dppeigen(model, trunc, Wscale, stationary = FALSE)} \arguments{ \item{model}{object of class \code{"detpointprocfamily"} } \item{trunc}{numeric giving the truncation} \item{Wscale}{numeric giving the scale of the window relative to a unit box} \item{stationary}{logical indicating whether the stationarity of the model should be used (only works in dimension 2).} } \value{A list} \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/linearpcfdot.inhom.Rd0000644000176200001440000001043113623712063016744 0ustar liggesusers\name{linearpcfdot.inhom} \alias{linearpcfdot.inhom} \title{ Inhomogeneous Multitype Pair Correlation Function (Dot-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the inhomogeneous multitype pair correlation function from points of type \eqn{i} to points of any type. } \usage{ linearpcfdot.inhom(X, i, lambdaI, lambdadot, r=NULL, \dots, correction="Ang", normalise=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the \eqn{i}-to-any pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{lambdaI}{ Intensity values for the points of type \code{i}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{lambdadot}{ Intensity values for all points of \code{X}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{\link[stats]{density.default}} to control the kernel smoothing. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the points of type \code{i}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{pcfdot.inhom}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. If \code{lambdaI} or \code{lambdadot} is a fitted point process model, the default behaviour is to update the model by re-fitting it to the data, before computing the fitted intensity. This can be disabled by setting \code{update=FALSE}. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), \bold{63}, 673--694. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearpcfcross.inhom}}, \code{\link{linearpcfcross}}, \code{\link{pcfcross.inhom}}. } \examples{ lam <- table(marks(chicago))/(summary(chicago)$totlength) lamI <- function(x,y,const=lam[["assault"]]){ rep(const, length(x)) } lam. <- function(x,y,const=sum(lam)){ rep(const, length(x)) } g <- linearpcfdot.inhom(chicago, "assault", lamI, lam.) \dontrun{ fit <- lppm(chicago, ~marks + x) linearpcfdot.inhom(chicago, "assault", fit, fit) } } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/circdensity.Rd0000644000176200001440000000265113333543263015510 0ustar liggesusers\name{circdensity} \alias{circdensity} \title{ Density Estimation for Circular Data } \description{ Computes a kernel smoothed estimate of the probability density for angular data. } \usage{ circdensity(x, sigma = "nrd0", \dots, bw = NULL, weights=NULL, unit = c("degree", "radian")) } \arguments{ \item{x}{ Numeric vector, containing angular data. } \item{sigma}{ Smoothing bandwidth, or bandwidth selection rule, passed to \code{\link[stats]{density.default}}. } \item{bw}{Alternative to \code{sigma} for consistency with other functions.} \item{\dots}{ Additional arguments passed to \code{\link[stats]{density.default}}, such as \code{kernel} and \code{weights}. } \item{weights}{ Optional numeric vector of weights for the data in \code{x}. } \item{unit}{ The unit of angle in which \code{x} is expressed. } } \details{ The angular values \code{x} are smoothed using (by default) the wrapped Gaussian kernel with standard deviation \code{sigma}. } \value{ An object of class \code{"density"} (produced by \code{\link[stats]{density.default}}) which can be plotted by \code{plot} or by \code{\link{rose}}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link[stats]{density.default}}), \code{\link{rose}}. } \examples{ ang <- runif(1000, max=360) rose(circdensity(ang, 12)) } \keyword{nonparametric} \keyword{smooth} spatstat/man/localKcross.Rd0000644000176200001440000001241713503055420015440 0ustar liggesusers\name{localKcross} \alias{localKcross} \alias{localLcross} \title{Local Multitype K Function (Cross-Type)} \description{ for a multitype point pattern, computes the cross-type version of the local K function. } \usage{ localKcross(X, from, to, \dots, rmax = NULL, correction = "Ripley", verbose = TRUE, rvalue=NULL) localLcross(X, from, to, \dots, rmax = NULL, correction = "Ripley") } \arguments{ \item{X}{ A multitype point pattern (object of class \code{"ppp"} with marks which are a factor). } \item{\dots}{ Further arguments passed from \code{localLcross} to \code{localKcross}. } \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{from}{ Type of points from which distances should be measured. A single value; one of the possible levels of \code{marks(X)}, or an integer indicating which level. } \item{to}{ Type of points to which distances should be measured. A single value; one of the possible levels of \code{marks(X)}, or an integer indicating which level. } \item{correction}{ String specifying the edge correction to be applied. Options are \code{"none"}, \code{"translate"}, \code{"translation"}, \code{"Ripley"}, \code{"isotropic"} or \code{"best"}. Only one correction may be specified. } \item{verbose}{ Logical flag indicating whether to print progress reports during the calculation. } \item{rvalue}{Optional. A \emph{single} value of the distance argument \eqn{r} at which the function L or K should be computed. } } \details{ Given a multitype spatial point pattern \code{X}, the local cross-type \eqn{K} function \code{localKcross} is the local version of the multitype \eqn{K} function \code{\link{Kcross}}. Recall that \code{Kcross(X, from, to)} is a sum of contributions from all pairs of points in \code{X} where the first point belongs to \code{from} and the second point belongs to type \code{to}. The \emph{local} cross-type \eqn{K} function is defined for each point \code{X[i]} that belongs to type \code{from}, and it consists of all the contributions to the cross-type \eqn{K} function that originate from point \code{X[i]}: \deqn{ K_{i,from,to}(r) = \sqrt{\frac a {(n-1) \pi} \sum_j e_{ij}} }{ K[i,from,to](r) = sqrt( (a/((n-1)* pi)) * sum[j] e[i,j]) } where the sum is over all points \eqn{j \neq i}{j != i} belonging to type \code{to}, that lie within a distance \eqn{r} of the \eqn{i}th point, \eqn{a} is the area of the observation window, \eqn{n} is the number of points in \code{X}, and \eqn{e_{ij}}{e[i,j]} is an edge correction term (as described in \code{\link{Kest}}). The value of \eqn{K_{i,from,to}(r)}{K[i,from,to](r)} can also be interpreted as one of the summands that contributes to the global estimate of the \code{\link{Kcross}} function. By default, the function \eqn{K_{i,from,to}(r)}{K[i,from,to](r)} is computed for a range of \eqn{r} values for each point \eqn{i} belonging to type \code{from}. The results are stored as a function value table (object of class \code{"fv"}) with a column of the table containing the function estimates for each point of the pattern \code{X} belonging to type \code{from}. Alternatively, if the argument \code{rvalue} is given, and it is a single number, then the function will only be computed for this value of \eqn{r}, and the results will be returned as a numeric vector, with one entry of the vector for each point of the pattern \code{X} belonging to type \code{from}. The local cross-type \eqn{L} function \code{localLcross} is computed by applying the transformation \eqn{L(r) = \sqrt{K(r)/(2\pi)}}{L(r) = sqrt(K(r)/(2*pi))}. } \value{ If \code{rvalue} is given, the result is a numeric vector of length equal to the number of points in the point pattern that belong to type \code{from}. If \code{rvalue} is absent, the result is an object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the neighbourhood density function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point of type \code{from}. The last two columns contain the \code{r} and \code{theo} values. } \seealso{ \code{\link{Kcross}}, \code{\link{Lcross}}, \code{\link{localK}}, \code{\link{localL}}. Inhomogeneous counterparts of \code{localK} and \code{localL} are computed by \code{\link{localKcross.inhom}} and \code{\link{localLinhom}}. } \examples{ X <- amacrine # compute all the local Lcross functions L <- localLcross(X) # plot all the local Lcross functions against r plot(L, main="local Lcross functions for amacrine", legend=FALSE) # plot only the local L function for point number 7 plot(L, iso007 ~ r) # compute the values of L(r) for r = 0.1 metres L12 <- localLcross(X, rvalue=0.1) } \author{ \ege and \adrian. } \keyword{spatial} \keyword{nonparametric} spatstat/man/pool.Rd0000644000176200001440000000171713333543264014144 0ustar liggesusers\name{pool} \alias{pool} \title{ Pool Data } \description{ Pool the data from several objects of the same class. } \usage{ pool(...) } \arguments{ \item{\dots}{ Objects of the same type. } } \details{ The function \code{pool} is generic. There are methods for several classes, listed below. \code{pool} is used to combine the data from several objects of the same type, and to compute statistics based on the combined dataset. It may be used to pool the estimates obtained from replicated datasets. It may also be used in high-performance computing applications, when the objects \code{\dots} have been computed on different processors or in different batch runs, and we wish to combine them. } \value{ An object of the same class as the arguments \code{\dots}. } \seealso{ \code{\link{pool.envelope}}, \code{\link{pool.fasp}}, \code{\link{pool.rat}}, \code{\link{pool.fv}} } \author{\adrian and \rolf } \keyword{spatial} spatstat/man/fitted.lppm.Rd0000644000176200001440000000527113333543263015417 0ustar liggesusers\name{fitted.lppm} \alias{fitted.lppm} \title{ Fitted Intensity for Point Process on Linear Network } \description{ Given a point process model fitted to a point pattern on a linear network, compute the fitted intensity of the model at the points of the pattern, or at the points of the quadrature scheme used to fit the model. } \usage{ \method{fitted}{lppm}(object, \dots, dataonly = FALSE, new.coef = NULL, leaveoneout = FALSE) } \arguments{ \item{object}{ Fitted point process model on a linear network (object of class \code{"lppm"}). } \item{\dots}{ Ignored. } \item{dataonly}{ Logical value indicating whether to computed fitted intensities at the points of the original point pattern dataset (\code{dataonly=TRUE}) or at all the quadrature points of the quadrature scheme used to fit the model (\code{dataonly=FALSE}, the default). } \item{new.coef}{ Numeric vector of parameter values to replace the fitted model parameters \code{coef(object)}. } \item{leaveoneout}{ Logical. If \code{TRUE} the fitted value at each data point will be computed using a leave-one-out method. See Details. } } \details{ This is a method for the generic function \code{\link[stats]{fitted}} for the class \code{"lppm"} of fitted point process models on a linear network. The locations \eqn{u} at which the fitted conditional intensity/trend is evaluated, are the points of the quadrature scheme used to fit the model in \code{\link{ppm}}. They include the data points (the points of the original point pattern dataset \code{x}) and other ``dummy'' points in the window of observation. If \code{leaveoneout=TRUE}, fitted values will be computed for the data points only, using a \sQuote{leave-one-out} rule: the fitted value at \code{X[i]} is effectively computed by deleting this point from the data and re-fitting the model to the reduced pattern \code{X[-i]}, then predicting the value at \code{X[i]}. (Instead of literally performing this calculation, we apply a Taylor approximation using the influence function computed in \code{\link{dfbetas.ppm}}. } \value{ A vector containing the values of the fitted spatial trend. Entries in this vector correspond to the quadrature points (data or dummy points) used to fit the model. The quadrature points can be extracted from \code{object} by \code{union.quad(quad.ppm(object))}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{lppm}}, \code{\link{predict.lppm}} } \examples{ fit <- lppm(spiders~x+y) a <- fitted(fit) b <- fitted(fit, dataonly=TRUE) } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/summary.im.Rd0000644000176200001440000000360613333543264015273 0ustar liggesusers\name{summary.im} \alias{summary.im} \alias{print.summary.im} \title{Summarizing a Pixel Image} \description{ \code{summary} method for class \code{"im"}. } \usage{ \method{summary}{im}(object, \dots) \method{print}{summary.im}(x, \dots) } \arguments{ \item{object}{A pixel image.} \item{\dots}{Ignored.} \item{x}{Object of class \code{"summary.im"} as returned by \code{summary.im}. } } \details{ This is a method for the generic \code{\link{summary}} for the class \code{"im"}. An object of class \code{"im"} describes a pixel image. See \code{\link{im.object}}) for details of this class. \code{summary.im} extracts information about the pixel image, and \code{print.summary.im} prints this information in a comprehensible format. In normal usage, \code{print.summary.im} is invoked implicitly when the user calls \code{summary.im} without assigning its value to anything. See the examples. The information extracted by \code{summary.im} includes \describe{ \item{range}{The range of the image values.} \item{mean}{The mean of the image values.} \item{integral}{The ``integral'' of the image values, calculated as the sum of the image values multiplied by the area of one pixel.} \item{dim}{The dimensions of the pixel array: \code{dim[1]} is the number of rows in the array, corresponding to the \bold{y} coordinate.} } } \value{ \code{summary.im} returns an object of class \code{"summary.im"}, while \code{print.summary.im} returns \code{NULL}. } \seealso{ \code{\link{mean.im}}, \code{\link{integral.im}}, \code{\link{anyNA.im}} } \examples{ # make an image X <- as.im(function(x,y) {x^2}, unit.square()) # summarize it summary(X) # save the summary s <- summary(X) # print it print(X) s # extract stuff X$dim X$range X$integral } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/inside.boxx.Rd0000644000176200001440000000353513333543263015424 0ustar liggesusers\name{inside.boxx} \alias{inside.boxx} \title{Test Whether Points Are Inside A Multidimensional Box} \description{ Test whether points lie inside or outside a given multidimensional box. } \usage{ inside.boxx(\dots, w) } \arguments{ \item{\dots}{ Coordinates of points to be tested. One vector for each dimension (all of same length). (Alternatively, a single point pattern object of class \code{"\link{ppx}"} or its coordinates as a \code{"\link{hyperframe}"}) } \item{w}{A window. This should be an object of class \code{\link{boxx}}, or can be given in any format acceptable to \code{\link{as.boxx}()}. } } \value{ Logical vector whose \code{i}th entry is \code{TRUE} if the corresponding point is inside \code{w}. } \details{ This function tests whether each of the points \code{(x[i],y[i])} lies inside or outside the window \code{w} and returns \code{TRUE} if it is inside. The boundary of the window is treated as being inside. Normally each argument provided (except \code{w}) must be numeric vectors of equal length (length zero is allowed) containing the coordinates of points. Alternatively a single point pattern (object of class \code{"ppx"}) can be given; then the coordinates of the point pattern are extracted. } \seealso{ \code{\link{boxx}}, \code{\link{as.boxx}} } \examples{ # Random points in box with side [0,2] w <- boxx(c(0,2), c(0,2), c(0,2)) # Random points in box with side [-1,3] x <- runif(30, min=-1, max=3) y <- runif(30, min=-1, max=3) z <- runif(30, min=-1, max=3) # Points falling in smaller box ok <- inside.boxx(x, y, z, w=w) # Same using a point pattern as argument: X <- ppx(data = cbind(x, y, z), domain = boxx(c(0,3), c(0,3), c(0,3))) ok2 <- inside.boxx(X, w=w) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{math} spatstat/man/rGaussPoisson.Rd0000644000176200001440000000427613333543264016015 0ustar liggesusers\name{rGaussPoisson} \alias{rGaussPoisson} \title{Simulate Gauss-Poisson Process} \description{ Generate a random point pattern, a simulated realisation of the Gauss-Poisson Process. } \usage{ rGaussPoisson(kappa, r, p2, win = owin(c(0,1),c(0,1)), \dots, nsim=1, drop=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{r}{ Diameter of each cluster that consists of exactly 2 points. } \item{p2}{ Probability that a cluster contains exactly 2 points. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{\dots}{Ignored.} \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of the point pattern. See \code{\link{rNeymanScott}}. } \details{ This algorithm generates a realisation of the Gauss-Poisson point process inside the window \code{win}. The process is constructed by first generating a Poisson point process of parent points with intensity \code{kappa}. Then each parent point is either retained (with probability \code{1 - p2}) or replaced by a pair of points at a fixed distance \code{r} apart (with probability \code{p2}). In the case of clusters of 2 points, the line joining the two points has uniform random orientation. In this implementation, parent points are not restricted to lie in the window; the parent process is effectively the uniform Poisson process on the infinite plane. } \seealso{ \code{\link{rpoispp}}, \code{\link{rThomas}}, \code{\link{rMatClust}}, \code{\link{rNeymanScott}} } \examples{ pp <- rGaussPoisson(30, 0.07, 0.5) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/with.hyperframe.Rd0000644000176200001440000000504713333543265016310 0ustar liggesusers\name{with.hyperframe} \alias{with.hyperframe} \title{Evaluate an Expression in Each Row of a Hyperframe} \description{ An expression, involving the names of columns in a hyperframe, is evaluated separately for each row of the hyperframe. } \usage{ \method{with}{hyperframe}(data, expr, ..., simplify = TRUE, ee = NULL, enclos=NULL) } \arguments{ \item{data}{A hyperframe (object of class \code{"hyperframe"}) containing data. } \item{expr}{An \R language expression to be evaluated.} \item{\dots}{Ignored.} \item{simplify}{ Logical. If \code{TRUE}, the return value will be simplified to a vector whenever possible. } \item{ee}{ Alternative form of \code{expr}, as an object of class \code{"expression"}. } \item{enclos}{ An environment in which to search for objects that are not found in the hyperframe. Defaults to \code{\link{parent.frame}()}. } } \details{ This function evaluates the expression \code{expr} in each row of the hyperframe \code{data}. It is a method for the generic function \code{\link{with}}. The argument \code{expr} should be an \R language expression in which each variable name is either the name of a column in the hyperframe \code{data}, or the name of an object in the parent frame (the environment in which \code{with} was called.) The argument \code{ee} can be used as an alternative to \code{expr} and should be an expression object (of class \code{"expression"}). For each row of \code{data}, the expression will be evaluated so that variables which are column names of \code{data} are interpreted as the entries for those columns in the current row. For example, if a hyperframe \code{h} has columns called \code{A} and \code{B}, then \code{with(h, A != B)} inspects each row of \code{data} in turn, tests whether the entries in columns \code{A} and \code{B} are equal, and returns the \eqn{n} logical values. } \value{ Normally a list of length \eqn{n} (where \eqn{n} is the number of rows) containing the results of evaluating the expression for each row. If \code{simplify=TRUE} and each result is a single atomic value, then the result is a vector or factor containing the same values. } \author{\adrian and \rolf } \seealso{ \code{\link{hyperframe}}, \code{\link{plot.hyperframe}} } \examples{ # generate Poisson point patterns with intensities 10 to 100 H <- hyperframe(L=seq(10,100, by=10)) X <- with(H, rpoispp(L)) } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/densityQuick.lpp.Rd0000644000176200001440000000734413515322436016441 0ustar liggesusers\name{densityQuick.lpp} \alias{densityQuick.lpp} \title{ Kernel Estimation of Intensity on a Network using a 2D Kernel } \description{ Estimates the intensity of a point process on a linear network using a two-dimensional smoothing kernel. } \usage{ densityQuick.lpp(X, sigma=NULL, \dots, kernel="gaussian", at = c("pixels", "points"), what = c("estimate", "se", "var"), leaveoneout = TRUE, diggle = FALSE, edge2D = FALSE, weights = NULL, positive = FALSE) } \arguments{ \item{X}{ Point pattern on a linear network (object of class \code{"lpp"}). } \item{sigma}{ Smoothing bandwidth. A single numeric value, in the same units as the coordinates of \code{X}. Alternatively \code{sigma} may be a function which selects a bandwidth when applied to \code{X}, for example, \code{\link{bw.scott.iso}}. } \item{\dots}{ Additional arguments passed to \code{\link{as.mask}} to determine the pixel resolution. } \item{kernel}{ String (partially matched) specifying the smoothing kernel. Current options are \code{"gaussian"}, \code{"epanechnikov"}, \code{"quartic"} or \code{"disc"}. } \item{at}{ String (partially matched) specifying whether to compute the intensity values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{x} (\code{at="points"}). } \item{what}{ String (partially matched) specifying whether to calculate the intensity estimate, or its estimated standard error, or its estimated variance. } \item{leaveoneout}{ Logical value indicating whether to compute a leave-one-out estimator. Applicable only when \code{at="points"}. } \item{diggle}{ Logical value specifying whether to use the \sQuote{Diggle} correction. } \item{edge2D}{ Logical value specifying whether to apply the usual two-dimensional edge correction procedure to the numerator and denominator of the estimate. } \item{weights}{ Optional weights to be attached to the points. A numeric vector, an \code{expression}, or a pixel image. } \item{positive}{ Logical value indicating whether to force the resulting values to be positive. Default is \code{FALSE} for the sake of speed. } } \details{ Kernel smoothing is applied to the points of \code{x} using a two-dimensional Gaussian kernel, as described in Rakshit et al (2019). The result is a pixel image on the linear network (class \code{"linim"}) which can be plotted. Other techniques for kernel smoothing on a network are implemented in \code{\link{density.lpp}}. The main advantages of using a two-dimensional kernel are very fast computation and insensitivity to changes in the network geometry. The main disadvantage is that it ignores the connectivity of the network. See Rakshit et al (2019) for further explanation. } \value{ Image on a linear network (object of class \code{"linim"}). } \references{ Rakshit, S., Davies, T., Moradi, M., McSwiggan, G., Nair, G., Mateu, J. and Baddeley, A. (2019) Fast kernel smoothing of point patterns on a large network using 2D convolution. \emph{International Statistical Review}. In press. Published online 06 June 2019. DOI: 10.1111/insr.12327. } \author{ Adrian Baddeley, Suman Rakshit and Tilman Davies } \seealso{ \code{\link{density.lpp}}, the main function for density estimation on a network. \code{\link{bw.scott}}, \code{\link{bw.scott.iso}} for bandwidth selection. } \examples{ X <- unmark(chicago) plot(densityQuick.lpp(X, 500)) plot(densityQuick.lpp(X, 500, diggle=TRUE)) plot(densityQuick.lpp(X, bw.scott.iso)) plot(densityQuick.lpp(X, 500, what="se")) } \keyword{spatial} \keyword{nonparametric} spatstat/man/nearestsegment.Rd0000644000176200001440000000305713333543263016215 0ustar liggesusers\name{nearestsegment} \alias{nearestsegment} \title{Find Line Segment Nearest to Each Point} \description{ Given a point pattern and a line segment pattern, this function finds the nearest line segment for each point. } \usage{ nearestsegment(X, Y) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{Y}{A line segment pattern (object of class \code{"psp"}).} } \details{ The distance between a point \code{x} and a straight line segment \code{y} is defined to be the shortest Euclidean distance between \code{x} and any location on \code{y}. This algorithm first calculates the distance from each point of \code{X} to each segment of \code{Y}. Then it determines, for each point \code{x} in \code{X}, which segment of \code{Y} is closest. The index of this segment is returned. } \value{ Integer vector \code{v} (of length equal to the number of points in \code{X}) identifying the nearest segment to each point. If \code{v[i] = j}, then \code{Y[j]} is the line segment lying closest to \code{X[i]}. } \author{ \adrian and \rolf } \seealso{ \code{\link{project2segment}} to project each point of \code{X} to a point lying on one of the line segments. Use \code{\link{distmap.psp}} to identify the nearest line segment for each pixel in a grid. } \examples{ X <- runifpoint(3) Y <- as.psp(matrix(runif(20), 5, 4), window=owin()) v <- nearestsegment(X,Y) plot(Y) plot(X, add=TRUE) plot(X[1], add=TRUE, col="red") plot(Y[v[1]], add=TRUE, lwd=2, col="red") } \keyword{spatial} \keyword{math} spatstat/man/psst.Rd0000644000176200001440000001213313571674202014157 0ustar liggesusers\name{psst} \alias{psst} \title{ Pseudoscore Diagnostic For Fitted Model against General Alternative } \description{ Given a point process model fitted to a point pattern dataset, and any choice of functional summary statistic, this function computes the pseudoscore test statistic of goodness-of-fit for the model. } \usage{ psst(object, fun, r = NULL, breaks = NULL, ..., model=NULL, trend = ~1, interaction = Poisson(), rbord = reach(interaction), truecoef=NULL, hi.res=NULL, funargs = list(correction="best"), verbose=TRUE) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{fun}{ Summary function to be applied to each point pattern. } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the function \eqn{S(r)} should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ Optional alternative to \code{r} for advanced use. } \item{\dots}{ Ignored. } \item{model}{ Optional. A fitted point process model (object of class \code{"ppm"}) to be re-fitted to the data using \code{\link{update.ppm}}, if \code{object} is a point pattern. Overrides the arguments \code{trend,interaction,rbord}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } \item{funargs}{ List of additional arguments to be passed to \code{fun}. } \item{verbose}{ Logical value determining whether to print progress reports during the computation. } } \details{ Let \eqn{x} be a point pattern dataset consisting of points \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} in a window \eqn{W}. Consider a point process model fitted to \eqn{x}, with conditional intensity \eqn{\lambda(u,x)}{lambda(u,x)} at location \eqn{u}. For the purpose of testing goodness-of-fit, we regard the fitted model as the null hypothesis. Given a functional summary statistic \eqn{S}, consider a family of alternative models obtained by exponential tilting of the null model by \eqn{S}. The pseudoscore for the null model is \deqn{ V(r) = \sum_i \Delta S(x_i, x, r) - \int_W \Delta S(u,x, r) \lambda(u,x) {\rm d} u }{ V(r) = sum( Delta S(x[i], x, r)) - integral( Delta S(u,x, r) lambda(u,x) du) } where the \eqn{\Delta}{Delta} operator is \deqn{ \Delta S(u,x, r) = S(x\cup\{u\}, r) - S(x\setminus u, r) }{ Delta S(u,x, r) = S(x union u, r) - S(x setminus u, r) } the difference between the values of \eqn{S} for the point pattern with and without the point \eqn{u}. According to the Georgii-Nguyen-Zessin formula, \eqn{V(r)} should have mean zero if the model is correct (ignoring the fact that the parameters of the model have been estimated). Hence \eqn{V(r)} can be used as a diagnostic for goodness-of-fit. This algorithm computes \eqn{V(r)} by direct evaluation of the sum and integral. It is computationally intensive, but it is available for any summary statistic \eqn{S(r)}. The diagnostic \eqn{V(r)} is also called the \bold{pseudoresidual} of \eqn{S}. On the right hand side of the equation for \eqn{V(r)} given above, the sum over points of \eqn{x} is called the \bold{pseudosum} and the integral is called the \bold{pseudocompensator}. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. Columns in this data frame include \code{dat} for the pseudosum, \code{com} for the compensator and \code{res} for the pseudoresidual. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Special cases: \code{\link{psstA}}, \code{\link{psstG}}. Alternative functions: \code{\link{Kres}}, \code{\link{Gres}}. } \examples{ data(cells) fit0 <- ppm(cells, ~1) # uniform Poisson \testonly{fit0 <- ppm(cells, ~1, nd=8)} G0 <- psst(fit0, Gest) G0 if(interactive()) plot(G0) } \keyword{spatial} \keyword{models} spatstat/man/rho2hat.Rd0000644000176200001440000000730513333543264014541 0ustar liggesusers\name{rho2hat} \alias{rho2hat} \title{ Smoothed Relative Density of Pairs of Covariate Values } \description{ Given a point pattern and two spatial covariates \eqn{Z_1}{Z1} and \eqn{Z_2}{Z2}, construct a smooth estimate of the relative risk of the pair \eqn{(Z_1,Z_2)}{(Z1, Z2)}. } \usage{ rho2hat(object, cov1, cov2, ..., method=c("ratio", "reweight")) } \arguments{ \item{object}{ A point pattern (object of class \code{"ppp"}), a quadrature scheme (object of class \code{"quad"}) or a fitted point process model (object of class \code{"ppm"}). } \item{cov1,cov2}{ The two covariates. Each argument is either a \code{function(x,y)} or a pixel image (object of class \code{"im"}) providing the values of the covariate at any location, or one of the strings \code{"x"} or \code{"y"} signifying the Cartesian coordinates. } \item{\dots}{ Additional arguments passed to \code{\link{density.ppp}} to smooth the scatterplots. } \item{method}{ Character string determining the smoothing method. See Details. } } \details{ This is a bivariate version of \code{\link{rhohat}}. If \code{object} is a point pattern, this command produces a smoothed version of the scatterplot of the values of the covariates \code{cov1} and \code{cov2} observed at the points of the point pattern. The covariates \code{cov1,cov2} must have continuous values. If \code{object} is a fitted point process model, suppose \code{X} is the original data point pattern to which the model was fitted. Then this command assumes \code{X} is a realisation of a Poisson point process with intensity function of the form \deqn{ \lambda(u) = \rho(Z_1(u), Z_2(u)) \kappa(u) }{ lambda(u) = rho(Z1(u), Z2(u)) * kappa(u) } where \eqn{\kappa(u)}{kappa(u)} is the intensity of the fitted model \code{object}, and \eqn{\rho(z_1,z_2)}{rho(z1, z2)} is a function to be estimated. The algorithm computes a smooth estimate of the function \eqn{\rho}{rho}. The \code{method} determines how the density estimates will be combined to obtain an estimate of \eqn{\rho(z_1, z_2)}{rho(z1, z2)}: \itemize{ \item If \code{method="ratio"}, then \eqn{\rho(z_1, z_2)}{rho(z1,z2)} is estimated by the ratio of two density estimates. The numerator is a (rescaled) density estimate obtained by smoothing the points \eqn{(Z_1(y_i), Z_2(y_i))}{(Z1(y[i]), Z2(y[i]))} obtained by evaluating the two covariate \eqn{Z_1, Z_2}{Z1, Z2} at the data points \eqn{y_i}{y[i]}. The denominator is a density estimate of the reference distribution of \eqn{(Z_1,Z_2)}{(Z1, Z2)}. \item If \code{method="reweight"}, then \eqn{\rho(z_1, z_2)}{rho(z1,z2)} is estimated by applying density estimation to the points \eqn{(Z_1(y_i), Z_2(y_i))}{(Z1(y[i]), Z2(y[i]))} obtained by evaluating the two covariate \eqn{Z_1, Z_2}{Z1, Z2} at the data points \eqn{y_i}{y[i]}, with weights inversely proportional to the reference density of \eqn{(Z_1,Z_2)}{(Z1, Z2)}. } } \value{ A pixel image (object of class \code{"im"}). Also belongs to the special class \code{"rho2hat"} which has a plot method. } \references{ Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2012) Nonparametric estimation of the dependence of a point process on spatial covariates. \emph{Statistics and Its Interface} \bold{5} (2), 221--236. } \author{ \adrian } \seealso{ \code{\link{rhohat}}, \code{\link{methods.rho2hat}} } \examples{ data(bei) attach(bei.extra) plot(rho2hat(bei, elev, grad)) fit <- ppm(bei, ~elev, covariates=bei.extra) \dontrun{ plot(rho2hat(fit, elev, grad)) } plot(rho2hat(fit, elev, grad, method="reweight")) } \keyword{spatial} \keyword{models} spatstat/man/cut.ppp.Rd0000644000176200001440000001102513421006320014536 0ustar liggesusers\name{cut.ppp} \alias{cut.ppp} \title{Classify Points in a Point Pattern} \description{ Classifies the points in a point pattern into distinct types according to the numerical marks in the pattern, or according to another variable. } \usage{ \method{cut}{ppp}(x, z=marks(x), ...) } \arguments{ \item{x}{ A two-dimensional point pattern. An object of class \code{"ppp"}. } \item{z}{ Data determining the classification. A numeric vector, a factor, a pixel image, a window, a tessellation, or a string giving the name of a column of marks or the name of a spatial coordinate. } \item{\dots}{ Arguments passed to \code{\link{cut.default}}. They determine the breakpoints for the mapping from numerical values in \code{z} to factor values in the output. See \code{\link{cut.default}}. } } \value{ A multitype point pattern, that is, a point pattern object (of class \code{"ppp"}) with a \code{marks} vector that is a factor. } \details{ This function has the effect of classifying each point in the point pattern \code{x} into one of several possible types. The classification is based on the dataset \code{z}, which may be either \itemize{ \item a factor (of length equal to the number of points in \code{z}) determining the classification of each point in \code{x}. Levels of the factor determine the classification. \item a numeric vector (of length equal to the number of points in \code{z}). The range of values of \code{z} will be divided into bands (the number of bands is determined by \code{\dots}) and \code{z} will be converted to a factor using \code{\link{cut.default}}. \item a pixel image (object of class \code{"im"}). The value of \code{z} at each point of \code{x} will be used as the classifying variable. \item a tessellation (object of class \code{"tess"}, see \code{\link{tess}}). Each point of \code{x} will be classified according to the tile of the tessellation into which it falls. \item a window (object of class \code{"owin"}). Each point of \code{x} will be classified according to whether it falls inside or outside this window. \item a character string, giving the name of one of the columns of \code{marks(x)}, if this is a data frame. \item a character string \code{"x"} or \code{"y"} identifying one of the spatial coordinates. } The default is to take \code{z} to be the vector of marks in \code{x} (or the first column in the data frame of marks of \code{x}, if it is a data frame). If the marks are numeric, then the range of values of the numerical marks is divided into several intervals, and each interval is associated with a level of a factor. The result is a marked point pattern, with the same window and point locations as \code{x}, but with the numeric mark of each point discretised by replacing it by the factor level. This is a convenient way to transform a marked point pattern which has numeric marks into a multitype point pattern, for example to plot it or analyse it. See the examples. To select some points from a point pattern, use the subset operators \code{\link{[.ppp}} or \code{\link{subset.ppp}} instead. } \seealso{ \code{\link{cut}}, \code{\link{ppp.object}}, \code{\link{tess}} } \examples{ # (1) cutting based on numeric marks of point pattern trees <- longleaf # Longleaf Pines data # the marks are positive real numbers indicating tree diameters. \testonly{ # smaller dataset trees <- trees[seq(1, npoints(trees), by=80)] } \dontrun{ plot(trees) } # cut the range of tree diameters into three intervals long3 <- cut(trees, breaks=3) \dontrun{ plot(long3) } # adult trees defined to have diameter at least 30 cm long2 <- cut(trees, breaks=c(0,30,100), labels=c("Sapling", "Adult")) plot(long2) plot(long2, cols=c("green","blue")) # (2) cutting based on another numeric vector # Divide Swedish Pines data into 3 classes # according to nearest neighbour distance swedishpines plot(cut(swedishpines, nndist(swedishpines), breaks=3)) # (3) cutting based on tessellation # Divide Swedish Pines study region into a 4 x 4 grid of rectangles # and classify points accordingly tes <- tess(xgrid=seq(0,96,length=5),ygrid=seq(0,100,length=5)) plot(cut(swedishpines, tes)) plot(tes, lty=2, add=TRUE) # (4) inside/outside a given region with(murchison, cut(gold, greenstone)) # (5) multivariate marks finpines cut(finpines, "height", breaks=4) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} spatstat/man/nestsplit.Rd0000644000176200001440000000447513442413713015220 0ustar liggesusers\name{nestsplit} \alias{nestsplit} \title{ Nested Split } \description{ Applies two splitting operations to a point pattern, producing a list of lists of patterns. } \usage{ nestsplit(X, \dots) } \arguments{ \item{X}{ Point pattern to be split. Object of class \code{"ppp"}. } \item{\dots}{ Data determining the splitting factors or splitting regions. See Details. } } \details{ This function splits the point pattern \code{X} into several sub-patterns using \code{\link{split.ppp}}, then splits each of the sub-patterns into sub-sub-patterns using \code{\link{split.ppp}} again. The result is a hyperframe containing the sub-sub-patterns and two factors indicating the grouping. The arguments \code{\dots} determine the two splitting factors or splitting regions. Each argument may be: \itemize{ \item a factor (of length equal to the number of points in \code{X}) \item the name of a column of marks of \code{X} (provided this column contains factor values) \item a tessellation (class \code{"tess"}) \item a pixel image (class \code{"im"}) with factor values \item a window (class \code{"owin"}) \item identified by name (in the form \code{name=value}) as one of the formal arguments of \code{\link{quadrats}} or \code{\link{tess}} } The arguments will be processed to yield a list of two splitting factors/tessellations. The splits will be applied to \code{X} consecutively to produce the sub-sub-patterns. } \value{ A hyperframe with three columns. The first column contains the sub-sub-patterns. The second and third columns are factors which identify the grouping according to the two splitting factors. } \author{ Original idea by Ute Hahn. Code by \spatstatAuthors. } \seealso{ \code{\link{split.ppp}}, \code{\link{quantess}} } \examples{ # factor and tessellation Nft <- nestsplit(amacrine, marks(amacrine), quadrats(amacrine, 3, 1)) Ntf <- nestsplit(amacrine, quadrats(amacrine, 3, 1), marks(amacrine)) Ntf # two factors big <- with(marks(betacells), area > 300) Nff <- nestsplit(betacells, "type", factor(big)) # two tessellations Tx <- quantess(redwood, "x", 4) Td <- dirichlet(runifpoint(5, Window(redwood))) Ntt <- nestsplit(redwood, Td, Tx) Ntt2 <- nestsplit(redwood, Td, ny=3) } \keyword{spatial} \keyword{manip} spatstat/man/ord.family.Rd0000644000176200001440000000321613333543263015232 0ustar liggesusers\name{ord.family} \alias{ord.family} \title{Ord Interaction Process Family} \description{ An object describing the family of all Ord interaction point processes } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the family of point process models introduced by Ord (1977). If you need to create a specific Ord-type model for use in analysis, use the function \code{\link{OrdThresh}} or \code{\link{Ord}}. Anyway, \code{ord.family} is an object of class \code{"isf"} containing a function \code{ord.family$eval} for evaluating the sufficient statistics of any Ord type point process model taking an exponential family form. } \seealso{ \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{Poisson}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{Softcore}}, \code{\link{Geyer}}, \code{\link{SatPiece}}, \code{\link{Saturated}}, \code{\link{Ord}}, \code{\link{OrdThresh}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ord, J.K. (1977) Contribution to the discussion of Ripley (1977). Ord, J.K. (1978) How many trees in a forest? \emph{Mathematical Scientist} \bold{3}, 23--33. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/lurking.mppm.Rd0000644000176200001440000000671113333543263015614 0ustar liggesusers\name{lurking.mppm} \alias{lurking.mppm} \title{ Lurking Variable Plot for Multiple Point Patterns } \description{ Generate a lurking variable plot of spatial point process residuals against a covariate, for a model fitted to several point patterns. } \usage{ \method{lurking}{mppm}(object, covariate, type="eem", \dots, separate = FALSE, plot.it = TRUE, covname, oldstyle = FALSE, nx = 512, main="") } \arguments{ \item{object}{ The fitted model. An object of class \code{"mppm"} representing a point process model fitted to several point patterns. } \item{covariate}{ The covariate to be used on the horizontal axis. Either an \code{expression} which can be evaluated in the original data, or a list of pixel images, one image for each point pattern in the original data. } \item{type}{ String indicating the type of residuals or weights to be computed. Choices include \code{"eem"}, \code{"raw"}, \code{"inverse"} and \code{"pearson"}. See \code{\link{diagnose.ppm}} for all possible choices. } \item{\dots}{ Additional arguments passed to \code{\link{lurking.ppm}}, including arguments controlling the plot. } \item{separate}{ Logical value indicating whether to compute a separate lurking variable plot for each of the original point patterns. If \code{FALSE} (the default), a single lurking-variable plot is produced by combining residuals from all patterns. } \item{plot.it}{ Logical value indicating whether plots should be shown. If \code{plot.it=FALSE}, only the computed coordinates for the plots are returned. See \emph{Value}. } \item{covname}{ A string name for the covariate, to be used in axis labels of plots. } \item{oldstyle}{ Logical flag indicating whether error bounds should be plotted using the approximation given in the original paper (\code{oldstyle=TRUE}), or using the correct asymptotic formula (\code{oldstyle=FALSE}). } \item{nx}{ Integer. Number of covariate values to be used in the plot. } \item{main}{ Character string giving a main title for the plot. } } \details{ This function generates a \sQuote{lurking variable} plot for a point process model fitted to several point patterns. Residuals from the model represented by \code{object} are plotted against the covariate specified by \code{covariate}. This plot can be used to reveal departures from the fitted model. The function \code{lurking} is generic. This is the method for the class \code{mppm}. The argument \code{object} must be a fitted point process model object of class \code{"mppm"}) produced by the model-fitting algorithm \code{\link{mppm}}. } \value{ If \code{separate=FALSE} (the default), the return value is an object belonging to the class \code{"lurk"}, for which there are methods for \code{plot} and \code{print}. See \code{\link{lurking}} for details of the format. If \code{separate=TRUE}, the result is a list of such objects, and also belongs to the class \code{anylist} so that it can be printed and plotted. } \author{ \adrian, with thanks to Nicholas Read. } \seealso{ \code{\link{lurking.ppm}} } \examples{ fit <- mppm(Points ~ Image + Group, demohyper) lurking(fit, expression(Image), type="P") lurking(fit, expression(Image), type="P", separate=TRUE) } \keyword{spatial} \keyword{models} \keyword{hplot} spatstat/man/rpoint.Rd0000644000176200001440000001003613333543264014500 0ustar liggesusers\name{rpoint} \alias{rpoint} \title{Generate N Random Points} \description{ Generate a random point pattern containing \eqn{n} independent, identically distributed random points with any specified distribution. } \usage{ rpoint(n, f, fmax=NULL, win=unit.square(), \dots, giveup=1000, verbose=FALSE, nsim=1, drop=TRUE) } \arguments{ \item{n}{ Number of points to generate. } \item{f}{ The probability density of the points, possibly un-normalised. Either a constant, a function \code{f(x,y,...)}, or a pixel image object. } \item{fmax}{ An upper bound on the values of \code{f}. If missing, this number will be estimated. } \item{win}{ Window in which to simulate the pattern. Ignored if \code{f} is a pixel image. } \item{\dots}{ Arguments passed to the function \code{f}. } \item{giveup}{ Number of attempts in the rejection method after which the algorithm should stop trying to generate new points. } \item{verbose}{ Flag indicating whether to report details of performance of the simulation algorithm. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This function generates \code{n} independent, identically distributed random points with common probability density proportional to \code{f}. The argument \code{f} may be \describe{ \item{a numerical constant:}{ uniformly distributed random points will be generated. } \item{a function:}{random points will be generated in the window \code{win} with probability density proportional to \code{f(x,y,...)} where \code{x} and \code{y} are the cartesian coordinates. The function \code{f} must accept two \emph{vectors} of coordinates \code{x,y} and return the corresponding vector of function values. Additional arguments \code{...} of any kind may be passed to the function. } \item{a pixel image:}{if \code{f} is a pixel image object of class \code{"im"} (see \code{\link{im.object}}) then random points will be generated in the window of this pixel image, with probability density proportional to the pixel values of \code{f}. } } The algorithm is as follows: \itemize{ \item If \code{f} is a constant, we invoke \code{\link{runifpoint}}. \item If \code{f} is a function, then we use the rejection method. Proposal points are generated from the uniform distribution. A proposal point \eqn{(x,y)} is accepted with probability \code{f(x,y,...)/fmax} and otherwise rejected. The algorithm continues until \code{n} points have been accepted. It gives up after \code{giveup * n} proposals if there are still fewer than \code{n} points. \item If \code{f} is a pixel image, then a random sequence of pixels is selected (using \code{\link{sample}}) with probabilities proportional to the pixel values of \code{f}. Then for each pixel in the sequence we generate a uniformly distributed random point in that pixel. } The algorithm for pixel images is more efficient than that for functions. } \seealso{ \code{\link{ppp.object}}, \code{\link{owin.object}}, \code{\link{runifpoint}} } \examples{ # 100 uniform random points in the unit square X <- rpoint(100) # 100 random points with probability density proportional to x^2 + y^2 X <- rpoint(100, function(x,y) { x^2 + y^2}, 1) # `fmax' may be omitted X <- rpoint(100, function(x,y) { x^2 + y^2}) # irregular window data(letterR) X <- rpoint(100, function(x,y) { x^2 + y^2}, win=letterR) # make a pixel image Z <- setcov(letterR) # 100 points with density proportional to pixel values X <- rpoint(100, Z) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/split.ppp.Rd0000644000176200001440000001446413575344136015134 0ustar liggesusers\name{split.ppp} \alias{split.ppp} \alias{split<-.ppp} \title{Divide Point Pattern into Sub-patterns} \description{ Divides a point pattern into several sub-patterns, according to their marks, or according to any user-specified grouping. } \usage{ \method{split}{ppp}(x, f = marks(x), drop=FALSE, un=NULL, reduce=FALSE, \dots) \method{split}{ppp}(x, f = marks(x), drop=FALSE, un=NULL, \dots) <- value } \arguments{ \item{x}{ A two-dimensional point pattern. An object of class \code{"ppp"}. } \item{f}{ Data determining the grouping. Either a factor, a logical vector, a pixel image with factor values, a tessellation, a window, or the name of one of the columns of marks. } \item{drop}{ Logical. Determines whether empty groups will be deleted. } \item{un}{ Logical. Determines whether the resulting subpatterns will be unmarked (i.e. whether marks will be removed from the points in each subpattern). } \item{reduce}{ Logical. Determines whether to delete the column of marks used to split the pattern, when the marks are a data frame. } \item{\dots}{ Other arguments are ignored. } \item{value}{ List of point patterns. } } \value{ The value of \code{split.ppp} is a list of point patterns. The components of the list are named by the levels of \code{f}. The list also has the class \code{"splitppp"}. The assignment form \code{split<-.ppp} returns the updated point pattern \code{x}. } \details{ The function \code{split.ppp} divides up the points of the point pattern \code{x} into several sub-patterns according to the values of \code{f}. The result is a list of point patterns. The argument \code{f} may be \itemize{ \item a factor, of length equal to the number of points in \code{x}. The levels of \code{f} determine the destination of each point in \code{x}. The \code{i}th point of \code{x} will be placed in the sub-pattern \code{split.ppp(x)$l} where \code{l = f[i]}. \item a pixel image (object of class \code{"im"}) with factor values. The pixel value of \code{f} at each point of \code{x} will be used as the classifying variable. \item a tessellation (object of class \code{"tess"}). Each point of \code{x} will be classified according to the tile of the tessellation into which it falls. \item a window (object of class \code{"owin"}). Each point of \code{x} will be classified according to whether it falls inside or outside this window. \item a character string, matching the name of one of the columns of marks, if \code{marks(x)} is a data frame. This column should be a factor. } If \code{f} is missing, then it will be determined by the marks of the point pattern. The pattern \code{x} can be either \itemize{ \item a multitype point pattern (a marked point pattern whose marks vector is a factor). Then \code{f} is taken to be the marks vector. The effect is that the points of each type are separated into different point patterns. \item a marked point pattern with a data frame of marks, containing at least one column that is a factor. The first such column will be used to determine the splitting factor \code{f}. } Some of the sub-patterns created by the split may be empty. If \code{drop=TRUE}, then empty sub-patterns will be deleted from the list. If \code{drop=FALSE} then they are retained. The argument \code{un} determines how to handle marks in the case where \code{x} is a marked point pattern. If \code{un=TRUE} then the marks of the points will be discarded when they are split into groups, while if \code{un=FALSE} then the marks will be retained. If \code{f} and \code{un} are both missing, then the default is \code{un=TRUE} for multitype point patterns and \code{un=FALSE} for marked point patterns with a data frame of marks. If the marks of \code{x} are a data frame, then \code{split(x, reduce=TRUE)} will discard only the column of marks that was used to split the pattern. This applies only when the argument \code{f} is missing. The result of \code{split.ppp} has class \code{"splitppp"} and can be plotted using \code{\link{plot.splitppp}}. The assignment function \code{split<-.ppp} updates the point pattern \code{x} so that it satisfies \code{split(x, f, drop, un) = value}. The argument \code{value} is expected to be a list of point patterns, one for each level of \code{f}. These point patterns are expected to be compatible with the type of data in the original pattern \code{x}. Splitting can also be undone by the function \code{\link{superimpose}}, but this typically changes the ordering of the data. } \seealso{ \code{\link{cut.ppp}}, \code{\link{plot.splitppp}}, \code{\link{superimpose}}, \code{\link{im}}, \code{\link{tess}}, \code{\link{ppp.object}} } \examples{ # (1) Splitting by marks # Multitype point pattern: separate into types u <- split(amacrine) # plot them plot(split(amacrine)) # the following are equivalent: amon <- split(amacrine)$on amon <- unmark(amacrine[amacrine$marks == "on"]) amon <- subset(amacrine, marks == "on", -marks) # the following are equivalent: amon <- split(amacrine, un=FALSE)$on amon <- amacrine[amacrine$marks == "on"] # Scramble the locations of the 'on' cells X <- amacrine u <- split(X) u$on <- runifpoint(ex=amon) split(X) <- u # Point pattern with continuous marks trees <- longleaf \testonly{ # smaller dataset trees <- trees[seq(1, npoints(trees), by=80)] } # cut the range of tree diameters into three intervals # using cut.ppp long3 <- cut(trees, breaks=3) # now split them long3split <- split(long3) # (2) Splitting by a factor # Unmarked point pattern swedishpines # cut & split according to nearest neighbour distance f <- cut(nndist(swedishpines), 3) u <- split(swedishpines, f) # (3) Splitting over a tessellation tes <- tess(xgrid=seq(0,96,length=5),ygrid=seq(0,100,length=5)) v <- split(swedishpines, tes) # (4) how to apply an operation to selected points: # split into components, transform desired component, then un-split # e.g. apply random jitter to 'on' points only X <- amacrine Y <- split(X) Y$on <- rjitter(Y$on, 0.1) split(X) <- Y } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{manip} spatstat/man/pointsOnLines.Rd0000644000176200001440000000334513333543264015776 0ustar liggesusers\name{pointsOnLines} \alias{pointsOnLines} \title{Place Points Evenly Along Specified Lines} \description{ Given a line segment pattern, place a series of points at equal distances along each line segment. } \usage{ pointsOnLines(X, eps = NULL, np = 1000, shortok=TRUE) } \arguments{ \item{X}{A line segment pattern (object of class \code{"psp"}).} \item{eps}{Spacing between successive points.} \item{np}{Approximate total number of points (incompatible with \code{eps}).} \item{shortok}{ Logical. If \code{FALSE}, very short segments (of length shorter than \code{eps}) will not generate any points. If \code{TRUE}, a very short segment will be represented by its midpoint. } } \details{ For each line segment in the pattern \code{X}, a succession of points is placed along the line segment. These points are equally spaced at a distance \code{eps}, except for the first and last points in the sequence. The spacing \code{eps} is measured in coordinate units of \code{X}. If \code{eps} is not given, then it is determined by \code{eps = len/np} where \code{len} is the total length of the segments in \code{X}. The actual number of points will then be slightly larger than \code{np}. } \value{ A point pattern (object of class \code{"ppp"}) in the same window as \code{X}. The result also has an attribute called \code{"map"} which maps the points to their parent line segments. } \seealso{ \code{\link{psp}}, \code{\link{ppp}}, \code{\link{runifpointOnLines}} } \examples{ X <- psp(runif(20), runif(20), runif(20), runif(20), window=owin()) Y <- pointsOnLines(X, eps=0.05) plot(X, main="") plot(Y, add=TRUE, pch="+") } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/print.psp.Rd0000644000176200001440000000133613333543264015125 0ustar liggesusers\name{print.psp} \alias{print.psp} \title{Print Brief Details of a Line Segment Pattern Dataset} \description{ Prints a very brief description of a line segment pattern dataset. } \usage{ \method{print}{psp}(x, \dots) } \arguments{ \item{x}{Line segment pattern (object of class \code{"psp"}).} \item{\dots}{Ignored.} } \details{ A very brief description of the line segment pattern \code{x} is printed. This is a method for the generic function \code{\link{print}}. } \seealso{ \code{\link{print}}, \code{\link{print.owin}}, \code{\link{summary.psp}} } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) a } \author{\adrian and \rolf } \keyword{spatial} \keyword{print} spatstat/man/endpoints.psp.Rd0000644000176200001440000000503413474140142015765 0ustar liggesusers\name{endpoints.psp} \alias{endpoints.psp} \title{Endpoints of Line Segment Pattern} \description{ Extracts the endpoints of each line segment in a line segment pattern. } \usage{ endpoints.psp(x, which="both") } \arguments{ \item{x}{ A line segment pattern (object of class \code{"psp"}). } \item{which}{ String specifying which endpoint or endpoints should be returned. See Details. } } \value{ Point pattern (object of class \code{"ppp"}). } \details{ This function extracts one endpoint, or both endpoints, from each of the line segments in \code{x}, and returns these points as a point pattern object. The argument \code{which} determines which endpoint or endpoints of each line segment should be returned: \describe{ \item{\code{which="both"}}{ (the default): both endpoints of each line segment are returned. The result is a point pattern with twice as many points as there are line segments in \code{x}. } \item{\code{which="first"}}{ select the first endpoint of each line segment (returns the points with coordinates \code{x$ends$x0, x$ends$y0}). } \item{\code{which="second"}}{ select the second endpoint of each line segment (returns the points with coordinates \code{x$ends$x1, x$ends$y1}). } \item{\code{which="left"}}{ select the left-most endpoint (the endpoint with the smaller \eqn{x} coordinate) of each line segment. } \item{\code{which="right"}}{ select the right-most endpoint (the endpoint with the greater \eqn{x} coordinate) of each line segment. } \item{\code{which="lower"}}{ select the lower endpoint (the endpoint with the smaller \eqn{y} coordinate) of each line segment. } \item{\code{which="upper"}}{ select the upper endpoint (the endpoint with the greater \eqn{y} coordinate) of each line segment. } } The result is a point pattern. It also has an attribute \code{"id"} which is an integer vector identifying the segment which contributed each point. } \seealso{ \code{\link{psp.object}}, \code{\link{ppp.object}}, \code{\link{marks.psp}}, \code{\link{summary.psp}}, \code{\link{midpoints.psp}}, \code{\link{lengths.psp}}, \code{\link{angles.psp}}, \code{\link{extrapolate.psp}}. } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(a) b <- endpoints.psp(a, "left") plot(b, add=TRUE) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/linearKcross.inhom.Rd0000644000176200001440000001101013623712063016723 0ustar liggesusers\name{linearKcross.inhom} \alias{linearKcross.inhom} \title{ Inhomogeneous multitype K Function (Cross-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the inhomogeneous multitype \eqn{K} function which counts the expected number of points of type \eqn{j} within a given distance of a point of type \eqn{i}. } \usage{ linearKcross.inhom(X, i, j, lambdaI, lambdaJ, r=NULL, \dots, correction="Ang", normalise=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross type \eqn{K} function \eqn{K_{ij}(r)}{Kij(r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. Defaults to the second level of \code{marks(X)}. } \item{lambdaI}{ Intensity values for the points of type \code{i}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{lambdaJ}{ Intensity values for the points of type \code{j}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{numeric vector. The values of the argument \eqn{r} at which the \eqn{K}-function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{lambdaI} and \code{lambdaJ} if they are functions. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the points of type \code{i}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{Kcross.inhom}} for a point pattern on a linear network (object of class \code{"lpp"}). The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} and \code{j} are missing, they default to the first and second level of the marks factor, respectively. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. If \code{lambdaI} or \code{lambdaJ} is a fitted point process model, the default behaviour is to update the model by re-fitting it to the data, before computing the fitted intensity. This can be disabled by setting \code{update=FALSE}. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), \bold{63}, 673--694. } \section{Warnings}{ The arguments \code{i} and \code{j} are interpreted as levels of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearKdot}}, \code{\link{linearK}}. } \examples{ lam <- table(marks(chicago))/(summary(chicago)$totlength) lamI <- function(x,y,const=lam[["assault"]]){ rep(const, length(x)) } lamJ <- function(x,y,const=lam[["robbery"]]){ rep(const, length(x)) } K <- linearKcross.inhom(chicago, "assault", "robbery", lamI, lamJ) \dontrun{ fit <- lppm(chicago, ~marks + x) linearKcross.inhom(chicago, "assault", "robbery", fit, fit) } } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/Math.imlist.Rd0000644000176200001440000000612513333543262015360 0ustar liggesusers\name{Math.imlist} \alias{Math.imlist} \alias{Ops.imlist} \alias{Complex.imlist} \alias{Summary.imlist} \title{S3 Group Generic methods for List of Images} \description{ These are group generic methods for the class \code{"imlist"} of lists of images. These methods allows the usual mathematical functions and operators to be applied directly to lists of images. See Details for a list of implemented functions. } \usage{ ## S3 methods for group generics have prototypes: \special{Math(x, \dots)} \special{Ops(e1, e2)} \special{Complex(z)} \special{Summary(\dots, na.rm = TRUE)} %NAMESPACE S3method("Math", "imlist") %NAMESPACE S3method("Ops", "imlist") %NAMESPACE S3method("Complex", "imlist") %NAMESPACE S3method("Summary", "imlist") } \arguments{ \item{x, z, e1, e2}{objects of class \code{"imlist"}.} \item{\dots}{further arguments passed to methods.} \item{na.rm}{logical: should missing values be removed?} } \details{ Below is a list of mathematical functions and operators which are defined for lists of images. Not all functions will make sense for all types of images. For example, none of the functions in the \code{"Math"} group make sense for character-valued images. Note that the \code{"Ops"} group methods are implemented using \code{\link{eval.im}}, which tries to harmonise images via \code{\link{harmonise.im}} if they aren't compatible to begin with. \enumerate{ \item Group \code{"Math"}: \itemize{ \item \code{abs}, \code{sign}, \code{sqrt},\cr \code{floor}, \code{ceiling}, \code{trunc},\cr \code{round}, \code{signif} \item \code{exp}, \code{log}, \code{expm1}, \code{log1p},\cr \code{cos}, \code{sin}, \code{tan},\cr \code{cospi}, \code{sinpi}, \code{tanpi},\cr \code{acos}, \code{asin}, \code{atan} \code{cosh}, \code{sinh}, \code{tanh},\cr \code{acosh}, \code{asinh}, \code{atanh} \item \code{lgamma}, \code{gamma}, \code{digamma}, \code{trigamma} \item \code{cumsum}, \code{cumprod}, \code{cummax}, \code{cummin} } \item Group \code{"Ops"}: \itemize{ \item \code{"+"}, \code{"-"}, \code{"*"}, \code{"/"}, \code{"^"}, \code{"\%\%"}, \code{"\%/\%"} \item \code{"&"}, \code{"|"}, \code{"!"} \item \code{"=="}, \code{"!="}, \code{"<"}, \code{"<="}, \code{">="}, \code{">"} } \item Group \code{"Summary"}: \itemize{ \item \code{all}, \code{any} \item \code{sum}, \code{prod} \item \code{min}, \code{max} \item \code{range} } \item Group \code{"Complex"}: \itemize{ \item \code{Arg}, \code{Conj}, \code{Im}, \code{Mod}, \code{Re} } } } \value{ The result of \code{"Math"}, \code{"Ops"} and \code{"Complex"} group operations is another list of images. The result of \code{"Summary"} group operations is a numeric vector of length 1 or 2. } \seealso{ \code{\link{Math.im}} or \code{\link{eval.im}} for evaluating expressions involving images. } \examples{ a <- Smooth(finpines, 2) log(a)/2 - sqrt(a) range(a) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} spatstat/man/bw.smoothppp.Rd0000644000176200001440000000616413343614711015631 0ustar liggesusers\name{bw.smoothppp} \alias{bw.smoothppp} \title{ Cross Validated Bandwidth Selection for Spatial Smoothing } \description{ Uses least-squares cross-validation to select a smoothing bandwidth for spatial smoothing of marks. } \usage{ bw.smoothppp(X, nh = spatstat.options("n.bandwidth"), hmin=NULL, hmax=NULL, warn=TRUE, kernel="gaussian") } \arguments{ \item{X}{ A marked point pattern with numeric marks. } \item{nh}{ Number of trial values of smoothing bandwith \code{sigma} to consider. The default is 32. } \item{hmin, hmax}{ Optional. Numeric values. Range of trial values of smoothing bandwith \code{sigma} to consider. There is a sensible default. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the minimum of the cross-validation criterion occurs at one of the ends of the search interval. } \item{kernel}{ The smoothing kernel. A character string specifying the smoothing kernel (current options are \code{"gaussian"}, \code{"epanechnikov"}, \code{"quartic"} or \code{"disc"}). } } \details{ This function selects an appropriate bandwidth for the nonparametric smoothing of mark values using \code{\link{Smooth.ppp}}. The argument \code{X} must be a marked point pattern with a vector or data frame of marks. All mark values must be numeric. The bandwidth is selected by least-squares cross-validation. Let \eqn{y_i}{y[i]} be the mark value at the \eqn{i}th data point. For a particular choice of smoothing bandwidth, let \eqn{\hat y_i}{y*[i]} be the smoothed value at the \eqn{i}th data point. Then the bandwidth is chosen to minimise the squared error of the smoothed values \eqn{\sum_i (y_i - \hat y_i)^2}{sum (y[i] - y*[i])^2}. The result of \code{bw.smoothppp} is a numerical value giving the selected bandwidth \code{sigma}. The result also belongs to the class \code{"bw.optim"} allowing it to be printed and plotted. The plot shows the cross-validation criterion as a function of bandwidth. The range of values for the smoothing bandwidth \code{sigma} is set by the arguments \code{hmin, hmax}. There is a sensible default, based on the nearest neighbour distances. If the optimal bandwidth is achieved at an endpoint of the interval \code{[hmin, hmax]}, the algorithm will issue a warning (unless \code{warn=FALSE}). If this occurs, then it is probably advisable to expand the interval by changing the arguments \code{hmin, hmax}. Computation time depends on the number \code{nh} of trial values considered, and also on the range \code{[hmin, hmax]} of values considered, because larger values of \code{sigma} require calculations involving more pairs of data points. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{Smooth.ppp}} } \examples{ data(longleaf) \testonly{op <- spatstat.options(n.bandwidth=8)} b <- bw.smoothppp(longleaf) b plot(b) \testonly{spatstat.options(op)} } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/nnfromvertex.Rd0000644000176200001440000000250313333543263015721 0ustar liggesusers\name{nnfromvertex} \alias{nnfromvertex} \title{ Nearest Data Point From Each Vertex in a Network } \description{ Given a point pattern on a linear network, for each vertex of the network find the nearest data point. } \usage{ nnfromvertex(X, what = c("dist", "which"), k = 1) } \arguments{ \item{X}{ Point pattern on a linear network (object of class \code{"lpp"}). } \item{what}{ Character string specifying whether to return the nearest-neighbour distances, nearest-neighbour identifiers, or both. } \item{k}{ Integer, or integer vector, specifying that the \code{k}th nearest neighbour should be returned. } } \details{ For each vertex (node) of the linear network, this algorithm finds the nearest data point to the vertex, and returns either the distance from the vertex to its nearest neighbour in \code{X}, or the serial number of the nearest neighbour in \code{X}, or both. If \code{k} is an integer, then the \code{k}-th nearest neighbour is found instead. If \code{k} is an integer vector, this is repeated for each integer in \code{k}. } \value{ A numeric vector, matrix, or data frame. } \author{ \adrian. } \seealso{ \code{\link{nndist.lpp}} } \examples{ X <- runiflpp(5, simplenet) nnfromvertex(X) nnfromvertex(X, k=1:3) } \keyword{spatial} \keyword{math} spatstat/man/data.ppm.Rd0000644000176200001440000000221213524136171014663 0ustar liggesusers\name{data.ppm} \alias{data.ppm} \title{Extract Original Data from a Fitted Point Process Model} \description{ Given a fitted point process model, this function extracts the original point pattern dataset to which the model was fitted. } \usage{ data.ppm(object) } \arguments{ \item{object}{ fitted point process model (an object of class \code{"ppm"}). } } \value{ A point pattern (object of class \code{"ppp"}). } \details{ An object of class \code{"ppm"} represents a point process model that has been fitted to data. It is typically produced by the model-fitting algorithm \code{\link{ppm}}. The object contains complete information about the original data point pattern to which the model was fitted. This function extracts the original data pattern. See \code{\link{ppm.object}} for a list of all operations that can be performed on objects of class \code{"ppm"}. } \seealso{ \code{\link{ppm.object}}, \code{\link{ppp.object}} } \examples{ fit <- ppm(cells, ~1, Strauss(r=0.1)) X <- data.ppm(fit) # 'X' is identical to 'cells' } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{manip} \keyword{models} spatstat/man/simulate.mppm.Rd0000644000176200001440000000354113333543264015763 0ustar liggesusers\name{simulate.mppm} \alias{simulate.mppm} \title{Simulate a Point Process Model Fitted to Several Point Patterns} \description{ Generates simulated realisations from a point process model that was fitted to several point patterns. } \usage{ \method{simulate}{mppm}(object, nsim=1, \dots, verbose=TRUE) } \arguments{ \item{object}{ Point process model fitted to several point patterns. An object of class \code{"mppm"}. } \item{nsim}{ Number of simulated realisations (of each original pattern). } \item{\dots}{ Further arguments passed to \code{\link{simulate.ppm}} to control the simulation. } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"mppm"} of fitted point process models for replicated point pattern data. The result is a hyperframe with \code{n} rows and \code{nsim} columns, where \code{n} is the number of original point pattern datasets to which the model was fitted. Each column of the hyperframe contains a simulated version of the original data. For each of the original point pattern datasets, the fitted model for this dataset is extracted using \code{\link{subfits}}, then \code{nsim} simulated realisations of this model are generated using \code{\link{simulate.ppm}}, and these are stored in the corresponding row of the output. } \value{ A hyperframe. } \examples{ H <- hyperframe(Bugs=waterstriders) fit <- mppm(Bugs ~ id, H) y <- simulate(fit, nsim=2) y plot(y[1,,drop=TRUE], main="Simulations for Waterstriders pattern 1") plot(y[,1,drop=TRUE], main="Simulation 1 for each Waterstriders pattern") } \seealso{ \code{\link{mppm}}, \code{\link{simulate.ppm}}. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat/man/fourierbasis.Rd0000644000176200001440000000424713333543263015670 0ustar liggesusers\name{fourierbasis} \alias{fourierbasis} \alias{fourierbasisraw} \title{Fourier Basis Functions} \description{Evaluates the Fourier basis functions on a \eqn{d}-dimensional box with \eqn{d}-dimensional frequencies \eqn{k_i} at the \eqn{d}-dimensional coordinates \eqn{x_j}. } \usage{ fourierbasis(x, k, win = boxx(rep(list(0:1), ncol(k)))) fourierbasisraw(x, k, boxlengths) } \arguments{ \item{x}{ Coordinates. A \code{data.frame} or matrix with \eqn{n} rows and \eqn{d} columns giving the \eqn{d}-dimensional coordinates. } \item{k}{Frequencies. A \code{data.frame} or matrix with \eqn{m} rows and \eqn{d} columns giving the frequencies of the Fourier-functions. } \item{win}{ window (of class \code{"owin"}, \code{"box3"} or \code{"boxx"}) giving the \eqn{d}-dimensional box domain of the Fourier functions. } \item{boxlengths}{ numeric giving the side lengths of the box domain of the Fourier functions. } } \details{ The result is an \eqn{m} by \eqn{n} matrix where the \eqn{(i,j)}'th entry is the \eqn{d}-dimensional Fourier basis function with frequency \eqn{k_i} evaluated at the point \eqn{x_j}, i.e., \deqn{ \frac{1}{\sqrt{|W|}} \exp(2\pi i \sum{l=1}^d k_{i,l} x_{j,l}/L_l) }{ 1/sqrt(|W|) * exp(2*pi*i*(k_{i,1}*x_{j,1}/L_1 + ... + k_{i,d}*x_{j,d}/L_d)) } where \eqn{L_l}, \eqn{l=1,...,d} are the box side lengths and \eqn{|W|} is the volume of the domain (window/box). Note that the algorithm does not check whether the coordinates given in \code{x} are contained in the given box. Actually the box is only used to determine the side lengths and volume of the domain for normalization. The stripped down faster version \code{fourierbasisraw} doesn't do checking or conversion of arguments and requires \code{x} and \code{k} to be matrices. } \value{An \code{m} by \code{n} matrix of complex values.} \author{ \adrian \rolf and \ege } \examples{ ## 27 rows of three dimensional Fourier frequencies: k <- expand.grid(-1:1,-1:1, -1:1) ## Two random points in the three dimensional unit box: x <- rbind(runif(3),runif(3)) ## 27 by 2 resulting matrix: v <- fourierbasis(x, k) head(v) } spatstat/man/pairwise.family.Rd0000644000176200001440000000273713333543264016301 0ustar liggesusers\name{pairwise.family} \alias{pairwise.family} \title{Pairwise Interaction Process Family} \description{ An object describing the family of all pairwise interaction Gibbs point processes. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the pairwise interaction family of point process models. If you need to create a specific pairwise interaction model for use in modelling, use the function \code{\link{Pairwise}} or one of the existing functions listed below. Anyway, \code{pairwise.family} is an object of class \code{"isf"} containing a function \code{pairwise.family$eval} for evaluating the sufficient statistics of any pairwise interaction point process model taking an exponential family form. } \seealso{ Other families: \code{\link{pairsat.family}}, \code{\link{ord.family}}, \code{\link{inforder.family}}. Pairwise interactions: \code{\link{Poisson}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Fiksel}}, \code{\link{Hardcore}}, \code{\link{LennardJones}}, \code{\link{MultiHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{Softcore}}. Other interactions: \code{\link{AreaInter}}, \code{\link{Geyer}}, \code{\link{Saturated}}, \code{\link{Ord}}, \code{\link{OrdThresh}}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/distfun.Rd0000644000176200001440000000633713333543263014651 0ustar liggesusers\name{distfun} \Rdversion{1.1} \alias{distfun} \alias{distfun.ppp} \alias{distfun.psp} \alias{distfun.owin} \title{ Distance Map as a Function } \description{ Compute the distance function of an object, and return it as a function. } \usage{ distfun(X, \dots) \method{distfun}{ppp}(X, \dots, k=1, undef=Inf) \method{distfun}{psp}(X, \dots) \method{distfun}{owin}(X, \dots, invert=FALSE) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), a window (object of class \code{"owin"}) or a line segment pattern (object of class \code{"psp"}). } \item{\dots}{ Extra arguments are ignored. } \item{k}{ An integer. The distance to the \code{k}th nearest point will be computed. } \item{undef}{ The value that should be returned if the distance is undefined (that is, if \code{X} contains fewer than \code{k} points). } \item{invert}{ If \code{TRUE}, compute the distance transform of the complement of \code{X}. } } \details{ The \dQuote{distance function} of a set of points \eqn{A} is the mathematical function \eqn{f} such that, for any two-dimensional spatial location \eqn{(x,y)}, the function value \code{f(x,y)} is the shortest distance from \eqn{(x,y)} to \eqn{A}. The command \code{f <- distfun(X)} returns a \emph{function} in the \R language, with arguments \code{x,y}, that represents the distance function of \code{X}. Evaluating the function \code{f} in the form \code{v <- f(x,y)}, where \code{x} and \code{y} are any numeric vectors of equal length containing coordinates of spatial locations, yields the values of the distance function at these locations. Alternatively \code{x} can be a point pattern (object of class \code{"ppp"} or \code{"lpp"}) of locations at which the distance function should be computed (and then \code{y} should be missing). This should be contrasted with the related command \code{\link{distmap}} which computes the distance function of \code{X} on a grid of locations, and returns the distance values in the form of a pixel image. The result of \code{f <- distfun(X)} also belongs to the class \code{"funxy"} and to the special class \code{"distfun"}. It can be printed and plotted immediately as shown in the Examples. A \code{distfun} object can be converted to a pixel image using \code{\link{as.im}}. } \value{ A \code{function} with arguments \code{x,y}. The function belongs to the class \code{"distfun"} which has methods for \code{print} and \code{summary}, and for geometric operations like \code{shift}. It also belongs to the class \code{"funxy"} which has methods for \code{plot}, \code{contour} and \code{persp}. } \seealso{ \code{\link{distmap}}, \code{\link{summary.distfun}}, \code{\link{methods.distfun}}, \code{\link{methods.funxy}}, \code{\link{plot.funxy}} } \examples{ data(letterR) f <- distfun(letterR) f plot(f) f(0.2, 0.3) plot(distfun(letterR, invert=TRUE), eps=0.1) d <- distfun(cells) d2 <- distfun(cells, k=2) d(0.5, 0.5) d2(0.5, 0.5) domain(d) summary(d) z <- d(japanesepines) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat/man/plot.quad.Rd0000644000176200001440000000427513333543264015104 0ustar liggesusers\name{plot.quad} \alias{plot.quad} \title{Plot a Spatial Quadrature Scheme} \description{ Plot a two-dimensional spatial quadrature scheme. } \usage{ \method{plot}{quad}(x, ..., main, add=FALSE, dum=list(), tiles=FALSE) } \arguments{ \item{x}{ The spatial quadrature scheme to be plotted. An object of class \code{"quad"}. } \item{\dots}{ extra arguments controlling the plotting of the data points of the quadrature scheme. } \item{main}{ text to be displayed as a title above the plot. } \item{add}{ Logical value indicating whether the graphics should be added to the current plot if there is one (\code{add=TRUE}) or whether a new plot should be initialised (\code{add=FALSE}, the default). } \item{dum}{ list of extra arguments controlling the plotting of the dummy points of the quadrature scheme. See below. } \item{tiles}{ Logical value indicating whether to display the tiles used to compute the quadrature weights. } } \value{ \code{NULL}. } \details{ This is the \code{plot} method for quadrature schemes (objects of class \code{"quad"}, see \code{\link{quad.object}}). First the data points of the quadrature scheme are plotted (in their observation window) using \code{\link{plot.ppp}} with any arguments specified in \code{...} Then the dummy points of the quadrature scheme are plotted using \code{\link{plot.ppp}} with any arguments specified in \code{dum}. By default the dummy points are superimposed onto the plot of data points. This can be overridden by including the argument \code{add=FALSE} in the list \code{dum} as shown in the examples. In this case the data and dummy point patterns are plotted separately. See \code{\link[graphics]{par}} and \code{\link{plot.ppp}} for other possible arguments controlling the plots. } \seealso{ \code{\link{quad.object}}, \code{\link{plot.ppp}}, \code{\link[graphics]{par}} } \examples{ data(nztrees) Q <- quadscheme(nztrees) plot(Q, main="NZ trees: quadrature scheme") oldpar <- par(mfrow=c(2,1)) plot(Q, main="NZ trees", dum=list(add=FALSE)) par(oldpar) } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/rat.Rd0000644000176200001440000000335613333543264013762 0ustar liggesusers\name{rat} \alias{rat} \title{ Ratio object } \description{ Stores the numerator, denominator, and value of a ratio as a single object. } \usage{ rat(ratio, numerator, denominator, check = TRUE) } \arguments{ \item{ratio,numerator,denominator}{ Three objects belonging to the same class. } \item{check}{ Logical. Whether to check that the objects are \code{\link{compatible}}. } } \details{ The class \code{"rat"} is a simple mechanism for keeping track of the numerator and denominator when calculating a ratio. Its main purpose is simply to signal that the object is a ratio. The function \code{rat} creates an object of class \code{"rat"} given the numerator, the denominator and the ratio. No calculation is performed; the three objects are simply stored together. The arguments \code{ratio}, \code{numerator}, \code{denominator} can be objects of any kind. They should belong to the same class. It is assumed that the relationship \deqn{ \mbox{ratio} = \frac{\mbox{numerator}}{\mbox{denominator}} }{ ratio = numerator/denominator } holds in some version of arithmetic. However, no calculation is performed. By default the algorithm checks whether the three arguments \code{ratio}, \code{numerator}, \code{denominator} are compatible objects, according to \code{\link{compatible}}. The result is equivalent to \code{ratio} except for the addition of extra information. } \value{ An object equivalent to the object \code{ratio} except that it also belongs to the class \code{"rat"} and has additional attributes \code{numerator} and \code{denominator}. } \author{\adrian and \rolf. } \seealso{ \code{\link{compatible}}, \code{\link{pool}} } \keyword{spatial} \keyword{manip} spatstat/man/superimpose.lpp.Rd0000644000176200001440000000501613333543264016334 0ustar liggesusers\name{superimpose.lpp} \alias{superimpose.lpp} \title{Superimpose Several Point Patterns on Linear Network} \description{ Superimpose any number of point patterns on the same linear network. } \usage{ \method{superimpose}{lpp}(\dots, L=NULL) } \arguments{ \item{\dots}{ Any number of arguments, each of which represents a point pattern on the same linear network. Each argument can be either an object of class \code{"lpp"}, giving both the spatial coordinates of the points and the linear network, or a \code{list(x,y)} or \code{list(x,y,seg,tp)} giving just the spatial coordinates of the points. } \item{L}{ Optional. The linear network. An object of class \code{"linnet"}. This argument is required if none of the other arguments is of class \code{"lpp"}. } } \value{ An object of class \code{"lpp"} representing the combined point pattern on the linear network. } \details{ This function is used to superimpose several point patterns on the same linear network. It is a method for the generic function \code{\link{superimpose}}. Each of the arguments \code{\dots} can be either a point pattern on a linear network (object of class \code{"lpp"} giving both the spatial coordinates of the points and the linear network), or a \code{list(x,y)} or \code{list(x,y,seg,tp)} giving just the spatial coordinates of the points. These arguments must represent point patterns on the \emph{same} linear network. The argument \code{L} is an alternative way to specify the linear network, and is required if none of the arguments \code{\dots} is an object of class \code{"lpp"}. The arguments \code{\dots} may be \emph{marked} patterns. The marks of each component pattern must have the same format. Numeric and character marks may be ``mixed''. If there is such mixing then the numeric marks are coerced to character in the combining process. If the mark structures are all data frames, then these data frames must have the same number of columns and identical column names. If the arguments \code{\dots} are given in the form \code{name=value}, then the \code{name}s will be used as an extra column of marks attached to the elements of the corresponding patterns. } \seealso{ \code{\link{superimpose}} } \examples{ X <- rpoislpp(5, simplenet) Y <- rpoislpp(10, simplenet) superimpose(X,Y) # not marked superimpose(A=X, B=Y) # multitype with types A and B } \author{\adrian \rolf \ege and Greg McSwiggan. } \keyword{spatial} \keyword{manip} spatstat/man/points.lpp.Rd0000644000176200001440000000364713333543264015305 0ustar liggesusers\name{points.lpp} \alias{points.lpp} \title{ Draw Points on Existing Plot } \description{ For a point pattern on a linear network, this function draws the coordinates of the points only, on the existing plot display. } \usage{ \method{points}{lpp}(x, \dots) } \arguments{ \item{x}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Additional arguments passed to \code{\link[graphics]{points.default}}. } } \details{ This is a method for the generic function \code{\link[graphics]{points}} for the class \code{"lpp"} of point patterns on a linear network. If \code{x} is a point pattern on a linear network, then \code{points(x)} plots the spatial coordinates of the points only, on the existing plot display, without plotting the underlying network. It is an error to call this function if a plot has not yet been initialised. The spatial coordinates are extracted and passed to \code{\link[graphics]{points.default}} along with any extra arguments. Arguments controlling the colours and the plot symbols are interpreted by \code{\link[graphics]{points.default}}. For example, if the argument \code{col} is a vector, then the \code{i}th point is drawn in the colour \code{col[i]}. } \section{Difference from plot method}{ The more usual way to plot the points is using \code{\link{plot.lpp}}. For example \code{plot(x)} would plot both the points and the underlying network, while \code{plot(x, add=TRUE)} would plot only the points. The interpretation of arguments controlling the colours and plot symbols is different here: they determine a symbol map, as explained in the help for \code{\link{plot.ppp}}. } \value{ Null. } \author{ \spatstatAuthors. } \seealso{ \code{\link{plot.lpp}}, \code{\link[graphics]{points.default}} } \examples{ plot(Frame(spiders), main="Spiders on a Brick Wall") points(spiders) } \keyword{spatial} \keyword{hplot} spatstat/man/tile.lengths.Rd0000644000176200001440000000171413333543264015570 0ustar liggesusers\name{tile.lengths} \alias{tile.lengths} \title{Compute Lengths of Tiles in a Tessellation on a Network} \description{ Computes the length of each tile in a tessellation on a linear network. } \usage{ tile.lengths(x) } \arguments{ \item{x}{A tessellation on a linear network (object of class \code{"lintess"}).} } \details{ A tessellation on a linear network \code{L} is a partition of the network into non-overlapping pieces (tiles). Each tile consists of one or more line segments which are subsets of the line segments making up the network. A tile can consist of several disjoint pieces. This command computes the length of each of the tiles that make up the tessellation \code{x}. The result is a numeric vector. } \value{ A numeric vector. } \seealso{ \code{\link{lintess}} } \examples{ X <- runiflpp(5, simplenet) A <- lineardirichlet(X) plot(A) tile.lengths(A) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/harmonic.Rd0000644000176200001440000000405613333543263014771 0ustar liggesusers\name{harmonic} \alias{harmonic} \title{Basis for Harmonic Functions} \description{ Evaluates a basis for the harmonic polynomials in \eqn{x} and \eqn{y} of degree less than or equal to \eqn{n}. } \usage{ harmonic(x, y, n) } \arguments{ \item{x}{ Vector of \eqn{x} coordinates } \item{y}{ Vector of \eqn{y} coordinates } \item{n}{ Maximum degree of polynomial } } \value{ A data frame with \code{2 * n} columns giving the values of the basis functions at the coordinates. Each column is labelled by an algebraic expression for the corresponding basis function. } \details{ This function computes a basis for the harmonic polynomials in two variables \eqn{x} and \eqn{y} up to a given degree \eqn{n} and evaluates them at given \eqn{x,y} locations. It can be used in model formulas (for example in the model-fitting functions \code{\link{lm},\link{glm},\link{gam}} and \code{\link{ppm}}) to specify a linear predictor which is a harmonic function. A function \eqn{f(x,y)} is harmonic if \deqn{\frac{\partial^2}{\partial x^2} f + \frac{\partial^2}{\partial y^2}f = 0.}{ (d/dx)^2 f + (d/dy)^2 f = 0.} The harmonic polynomials of degree less than or equal to \eqn{n} have a basis consisting of \eqn{2 n} functions. This function was implemented on a suggestion of P. McCullagh for fitting nonstationary spatial trend to point process models. } \seealso{ \code{\link{ppm}}, \code{\link{polynom}} } \examples{ # inhomogeneous point pattern X <- unmark(longleaf) \testonly{ # smaller dataset X <- X[seq(1,npoints(X), by=50)] } # fit Poisson point process with log-cubic intensity fit.3 <- ppm(X ~ polynom(x,y,3), Poisson()) # fit Poisson process with log-cubic-harmonic intensity fit.h <- ppm(X ~ harmonic(x,y,3), Poisson()) # Likelihood ratio test lrts <- 2 * (logLik(fit.3) - logLik(fit.h)) df <- with(coords(X), ncol(polynom(x,y,3)) - ncol(harmonic(x,y,3))) pval <- 1 - pchisq(lrts, df=df) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat/man/effectfun.Rd0000644000176200001440000000766613442413652015147 0ustar liggesusers\name{effectfun} \alias{effectfun} \title{Compute Fitted Effect of a Spatial Covariate in a Point Process Model} \description{ Compute the trend or intensity of a fitted point process model as a function of one of its covariates. } \usage{ effectfun(model, covname, \dots, se.fit=FALSE, nvalues=256) } \arguments{ \item{model}{ A fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"}, \code{"dppm"}, \code{"rppm"} or \code{"profilepl"}). } \item{covname}{ The name of the covariate. A character string. (Needed only if the model has more than one covariate.) } \item{\dots}{ The fixed values of other covariates (in the form \code{name=value}) if required. } \item{se.fit}{ Logical. If \code{TRUE}, asymptotic standard errors of the estimates will be computed, together with a 95\% confidence interval. } \item{nvalues}{ Integer. The number of values of the covariate (if it is numeric) for which the effect function should be evaluated. We recommend at least 256. } } \details{ The object \code{model} should be an object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"}, \code{"dppm"}, \code{"rppm"} or \code{"profilepl"} representing a point process model fitted to point pattern data. The model's trend formula should involve a spatial covariate named \code{covname}. This could be \code{"x"} or \code{"y"} representing one of the Cartesian coordinates. More commonly the covariate is another, external variable that was supplied when fitting the model. The command \code{effectfun} computes the fitted trend of the point process \code{model} as a function of the covariate named \code{covname}. The return value can be plotted immediately, giving a plot of the fitted trend against the value of the covariate. If the model also involves covariates other than \code{covname}, then these covariates will be held fixed. Values for these other covariates must be provided as arguments to \code{effectfun} in the form \code{name=value}. If \code{se.fit=TRUE}, the algorithm also calculates the asymptotic standard error of the fitted trend, and a (pointwise) asymptotic 95\% confidence interval for the true trend. This command is just a wrapper for the prediction method \code{\link{predict.ppm}}. For more complicated computations about the fitted intensity, use \code{\link{predict.ppm}}. } \section{Trend and intensity}{ For a Poisson point process model, the trend is the same as the intensity of the point process. For a more general Gibbs model, the trend is the first order potential in the model (the first order term in the Gibbs representation). In Poisson or Gibbs models fitted by \code{\link{ppm}}, the trend is the only part of the model that depends on the covariates. } \section{Determinantal point process models with fixed intensity}{ The function \code{\link{dppm}} which fits a determinantal point process model allows the user to specify the intensity \code{lambda}. In such cases the effect function is undefined, and \code{effectfun} stops with an error message. } \value{ A data frame containing a column of values of the covariate and a column of values of the fitted trend. If \code{se.fit=TRUE}, there are 3 additional columns containing the standard error and the upper and lower limits of a confidence interval. If the covariate named \code{covname} is numeric (rather than a factor or logical variable), the return value is also of class \code{"fv"} so that it can be plotted immediately. } \seealso{ \code{\link{ppm}}, \code{\link{predict.ppm}}, \code{\link{fv.object}} } \examples{ X <- copper$SouthPoints D <- distfun(copper$SouthLines) fit <- ppm(X ~ polynom(D, 5)) effectfun(fit) plot(effectfun(fit, se.fit=TRUE)) fitx <- ppm(X ~ x + polynom(D, 5)) plot(effectfun(fitx, "D", x=20)) } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{models} spatstat/man/Replace.im.Rd0000644000176200001440000001145513513756353015157 0ustar liggesusers\name{Replace.im} \alias{[<-.im} \title{Reset Values in Subset of Image} \description{ Reset the values in a subset of a pixel image. } \usage{ \method{[}{im}(x, i, j, \dots, drop=TRUE) <- value } \arguments{ \item{x}{ A two-dimensional pixel image. An object of class \code{"im"}. } \item{i}{ Object defining the subregion or subset to be replaced. Either a spatial window (an object of class \code{"owin"}), or a pixel image with logical values, or a point pattern (an object of class \code{"ppp"}), or any type of index that applies to a matrix, or something that can be converted to a point pattern by \code{\link{as.ppp}} (using the window of \code{x}). } \item{j}{ An integer or logical vector serving as the column index if matrix indexing is being used. Ignored if \code{i} is appropriate to some sort of replacement \emph{other than} matrix indexing. } \item{\dots}{Ignored.} \item{drop}{ Logical value specifying what happens when \code{i} and \code{j} are both missing. See Details. } \item{value}{ Vector, matrix, factor or pixel image containing the replacement values. Short vectors will be recycled. } } \value{ The image \code{x} with the values replaced. } \details{ This function changes some of the pixel values in a pixel image. The image \code{x} must be an object of class \code{"im"} representing a pixel image defined inside a rectangle in two-dimensional space (see \code{\link{im.object}}). The subset to be changed is determined by the arguments \code{i,j} according to the following rules (which are checked in this order): \enumerate{ \item \code{i} is a spatial object such as a window, a pixel image with logical values, or a point pattern; or \item \code{i,j} are indices for the matrix \code{as.matrix(x)}; or \item \code{i} can be converted to a point pattern by \code{\link{as.ppp}(i, W=Window(x))}, and \code{i} is not a matrix. } If \code{i} is a spatial window (an object of class \code{"owin"}), the values of the image inside this window are changed. If \code{i} is a point pattern (an object of class \code{"ppp"}), then the values of the pixel image at the points of this pattern are changed. If \code{i} does not satisfy any of the conditions above, then the algorithm tries to interpret \code{i,j} as indices for the matrix \code{as.matrix(x)}. Either \code{i} or \code{j} may be missing or blank. If none of the conditions above are met, and if \code{i} is not a matrix, then \code{i} is converted into a point pattern by \code{\link{as.ppp}(i, W=Window(x))}. Again the values of the pixel image at the points of this pattern are changed. If \code{i} and \code{j} are both missing, as in the call \code{x[] <- value}, then all pixel values in \code{x} are replaced by \code{value}: \itemize{ \item If \code{drop=TRUE} (the default), then this replacement applies only to pixels whose values are currently defined (i.e. where the current pixel value is not \code{NA}). If \code{value} is a vector, then its length must equal the number of pixels whose values are currently defined. \item If \code{drop=FALSE} then the replacement applies to all pixels inside the rectangle \code{Frame(x)}. If \code{value} is a vector, then its length must equal the number of pixels in the entire rectangle. } } \section{Warning}{ If you have a 2-column matrix containing the \eqn{x,y} coordinates of point locations, then to prevent this being interpreted as an array index, you should convert it to a \code{data.frame} or to a point pattern. } \seealso{ \code{\link{im.object}}, \code{\link{[.im}}, \code{\link{[}}, \code{\link{ppp.object}}, \code{\link{as.ppp}}, \code{\link{owin.object}} } \examples{ # make up an image X <- setcov(unit.square()) plot(X) # a rectangular subset W <- owin(c(0,0.5),c(0.2,0.8)) X[W] <- 2 plot(X) # a polygonal subset data(letterR) R <- affine(letterR, diag(c(1,1)/2), c(-2,-0.7)) X[R] <- 3 plot(X) # a point pattern P <- rpoispp(20) X[P] <- 10 plot(X) # change pixel value at a specific location X[list(x=0.1,y=0.2)] <- 7 # matrix indexing --- single vector index X[1:2570] <- 10 plot(X) # matrix indexing using double indices X[1:257,1:10] <- 5 plot(X) # matrix indexing using a matrix of indices X[cbind(1:257,1:257)] <- 10 X[cbind(257:1,1:257)] <- 10 plot(X) # Blank indices Y <- as.im(letterR) plot(Y) Y[] <- 42 # replace values only inside the window 'R' plot(Y) Y[drop=FALSE] <- 7 # replace all values in the rectangle plot(Y) Z <- as.im(letterR) Z[] <- raster.x(Z, drop=TRUE) # excludes NA plot(Z) Z[drop=FALSE] <- raster.y(Z, drop=FALSE) # includes NA plot(Z) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/nnfun.Rd0000644000176200001440000000556613537661344014333 0ustar liggesusers\name{nnfun} \Rdversion{1.1} \alias{nnfun} \alias{nnfun.ppp} \alias{nnfun.psp} \title{ Nearest Neighbour Index Map as a Function } \description{ Compute the nearest neighbour index map of an object, and return it as a function. } \usage{ nnfun(X, ...) \method{nnfun}{ppp}(X, ..., k=1, value=c("index", "mark")) \method{nnfun}{psp}(X, ..., value=c("index", "mark")) } \arguments{ \item{X}{ Any suitable dataset representing a two-dimensional collection of objects, such as a point pattern (object of class \code{"ppp"}) or a line segment pattern (object of class \code{"psp"}). } \item{k}{ A single integer. The \code{k}th nearest neighbour will be found. } \item{\dots}{ Extra arguments are ignored. } \item{value}{ String (partially matched) specifying whether to return the index of the neighbour (\code{value="index"}, the default) or the mark value of the neighbour (\code{value="mark"}). } } \details{ For a collection \eqn{X} of two dimensional objects (such as a point pattern or a line segment pattern), the \dQuote{nearest neighbour index function} of \eqn{X} is the mathematical function \eqn{f} such that, for any two-dimensional spatial location \eqn{(x,y)}, the function value \code{f(x,y)} is the index \eqn{i} identifying the closest member of \eqn{X}. That is, if \eqn{i = f(x,y)} then \eqn{X[i]} is the closest member of the collection \eqn{X} to the location \eqn{(x,y)}. The command \code{f <- nnfun(X)} returns a \emph{function} in the \R language, with arguments \code{x,y}, that represents the nearest neighbour index function of \code{X}. Evaluating the function \code{f} in the form \code{v <- f(x,y)}, where \code{x} and \code{y} are any numeric vectors of equal length containing coordinates of spatial locations, yields the indices of the nearest neighbours to these locations. If the argument \code{k} is specified then the \code{k}-th nearest neighbour will be found. The result of \code{f <- nnfun(X)} also belongs to the class \code{"funxy"} and to the special class \code{"nnfun"}. It can be printed and plotted immediately as shown in the Examples. A \code{nnfun} object can be converted to a pixel image using \code{\link{as.im}}. } \value{ A \code{function} with arguments \code{x,y}. The function also belongs to the class \code{"nnfun"} which has a method for \code{print}. It also belongs to the class \code{"funxy"} which has methods for \code{plot}, \code{contour} and \code{persp}. } \seealso{ \code{\link{distfun}}, \code{\link{plot.funxy}} } \examples{ f <- nnfun(cells) f plot(f) f(0.2, 0.3) g <- nnfun(cells, k=2) g(0.2, 0.3) plot(nnfun(amacrine, value="m")) L <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) h <- nnfun(L) h(0.2, 0.3) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat/man/quadratresample.Rd0000644000176200001440000000460513333543264016364 0ustar liggesusers\name{quadratresample} \alias{quadratresample} \title{Resample a Point Pattern by Resampling Quadrats} \description{ Given a point pattern dataset, create a resampled point pattern by dividing the window into rectangular quadrats and randomly resampling the list of quadrats. } \usage{ quadratresample(X, nx, ny=nx, ..., replace = FALSE, nsamples = 1, verbose = (nsamples > 1)) } \arguments{ \item{X}{ A point pattern dataset (object of class \code{"ppp"}). } \item{nx,ny}{ Numbers of quadrats in the \eqn{x} and \eqn{y} directions. } \item{\dots}{Ignored.} \item{replace}{ Logical value. Specifies whether quadrats should be sampled with or without replacement. } \item{nsamples}{Number of randomised point patterns to be generated.} \item{verbose}{Logical value indicating whether to print progress reports.} } \details{ This command implements a very simple bootstrap resampling procedure for spatial point patterns \code{X}. The dataset \code{X} must be a point pattern (object of class \code{"ppp"}) and its observation window must be a rectangle. The window is first divided into \code{N = nx * ny} rectangular tiles (quadrats) of equal size and shape. To generate one resampled point pattern, a random sample of \code{N} quadrats is selected from the list of \code{N} quadrats, with replacement (if \code{replace=TRUE}) or without replacement (if \code{replace=FALSE}). The \eqn{i}th quadrat in the original dataset is then replaced by the \eqn{i}th sampled quadrat, after the latter is shifted so that it occupies the correct spatial position. The quadrats are then reconstituted into a point pattern inside the same window as \code{X}. If \code{replace=FALSE}, this procedure effectively involves a random permutation of the quadrats. The resulting resampled point pattern has the same number of points as \code{X}. If \code{replace=TRUE}, the number of points in the resampled point pattern is random. } \value{ A point pattern (if \code{nsamples = 1}) or a list of point patterns (if \code{nsamples > 1}). } \author{\adrian and \rolf } \seealso{ \code{\link{quadrats}}, \code{\link{quadratcount}}. See \code{\link{varblock}} to estimate the variance of a summary statistic by block resampling. } \examples{ data(bei) quadratresample(bei, 6, 3) } \keyword{spatial} \keyword{datagen} spatstat/man/sidelengths.owin.Rd0000644000176200001440000000304313333543264016451 0ustar liggesusers\name{sidelengths.owin} \alias{sidelengths.owin} \alias{shortside.owin} \title{Side Lengths of Enclosing Rectangle of a Window} \description{ Computes the side lengths of the (enclosing rectangle of) a window. } \usage{ \method{sidelengths}{owin}(x) \method{shortside}{owin}(x) } \arguments{ \item{x}{ A window whose side lengths will be computed. Object of class \code{"owin"}. } } \value{ For \code{sidelengths.owin}, a numeric vector of length 2 giving the side-lengths (\eqn{x} then \eqn{y}) of the enclosing rectangle. For \code{shortside.owin}, a numeric value. } \details{ The functions \code{shortside} and \code{sidelengths} are generic. The functions documented here are the methods for the class \code{"owin"}. \code{sidelengths.owin} computes the side-lengths of the enclosing rectangle of the window \code{x}. For safety, both functions give a warning if the window is not a rectangle. To suppress the warning, first convert the window to a rectangle using \code{\link{as.rectangle}}. \code{shortside.owin} computes the minimum of the two side-lengths. } \seealso{ \code{\link{shortside}}, \code{\link{sidelengths}} for the generic functions. \code{\link{area.owin}}, \code{\link{diameter.owin}}, \code{\link{perimeter}} for other geometric calculations on \code{"owin"} objects. \code{\link{owin}}, \code{\link{as.owin}}. } \examples{ w <- owin(c(0,2),c(-1,3)) sidelengths(w) shortside(as.rectangle(letterR)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/shift.ppp.Rd0000644000176200001440000000472413442350577015113 0ustar liggesusers\name{shift.ppp} \alias{shift.ppp} \title{Apply Vector Translation To Point Pattern} \description{ Applies a vector shift to a point pattern. } \usage{ \method{shift}{ppp}(X, vec=c(0,0), \dots, origin=NULL) } \arguments{ \item{X}{Point pattern (object of class \code{"ppp"}).} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{Ignored} \item{origin}{ Location that will be shifted to the origin. Either a numeric vector of length 2 giving the location, or a point pattern containing only one point, or a list with two entries named \code{x} and \code{y}, or one of the character strings \code{"centroid"}, \code{"midpoint"}, \code{"left"}, \code{"right"}, \code{"top"}, \code{"bottom"}, \code{"topleft"}, \code{"bottomleft"}, \code{"topright"} or \code{"bottomright"} (partially matched). } } \value{ Another point pattern (of class \code{"ppp"}) representing the result of applying the vector shift. } \details{ The point pattern, and its window, are translated by the vector \code{vec}. This is a method for the generic function \code{\link{shift}}. If \code{origin} is given, the argument \code{vec} will be ignored; instead the shift will be performed so that the specified geometric location is shifted to the coordinate origin \eqn{(0,0)}. The argument \code{origin} should be either a numeric vector of length 2 giving the spatial coordinates of a location, or one of the character strings \code{"centroid"}, \code{"midpoint"}, \code{"left"}, \code{"right"}, \code{"top"}, \code{"bottom"}, \code{"topleft"}, \code{"bottomleft"}, \code{"topright"} or \code{"bottomright"} (partially matched). If \code{origin="centroid"} then the centroid of the window will be shifted to the origin. If \code{origin="midpoint"} then the centre of the bounding rectangle of the window will be shifted to the origin. If \code{origin="bottomleft"} then the bottom left corner of the bounding rectangle of the window will be shifted to the origin, and so on. } \seealso{ \code{\link{shift}}, \code{\link{shift.owin}}, \code{\link{periodify}}, \code{\link{rotate}}, \code{\link{affine}} } \examples{ data(cells) X <- shift(cells, c(2,3)) \dontrun{ plot(X) # no discernible difference except coordinates are different } plot(cells, pch=16) plot(shift(cells, c(0.03,0.03)), add=TRUE) shift(cells, origin="mid") } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/identify.psp.Rd0000644000176200001440000000351413333543263015603 0ustar liggesusers\name{identify.psp} \alias{identify.psp} \title{Identify Segments in a Line Segment Pattern} \description{ If a line segment pattern is plotted in the graphics window, this function will find the segment which is nearest to the mouse position, and print its serial number. } \usage{ \method{identify}{psp}(x, \dots, labels=seq_len(nsegments(x)), n=nsegments(x), plot=TRUE) } \arguments{ \item{x}{ A line segment pattern (object of class \code{"psp"}). } \item{labels}{ Labels associated with the segments, to be plotted when the segments are identified. A character vector or numeric vector of length equal to the number of segments in \code{x}. } \item{n}{ Maximum number of segments to be identified. } \item{plot}{ Logical. Whether to plot the labels when a segment is identified. } \item{\dots}{ Arguments passed to \code{\link[graphics]{text.default}} controlling the plotting of the labels. } } \value{ Vector containing the serial numbers of the segments in the pattern \code{x} that were identified. } \details{ This is a method for the generic function \code{\link[graphics]{identify}} for line segment pattern objects. The line segment pattern \code{x} should first be plotted using \code{\link{plot.psp}}. Then \code{identify(x)} reads the position of the graphics pointer each time the left mouse button is pressed. It then finds the segment in the pattern \code{x} that is closest to the mouse position. This segment's index will be returned as part of the value of the call. Each time a segment is identified, text will be displayed next to the point, showing its serial number (or the relevant entry of \code{labels}). } \seealso{ \code{\link[graphics]{identify}}, \code{\link{identify.ppp}}. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{iplot} spatstat/man/ripras.Rd0000644000176200001440000000612413333543264014470 0ustar liggesusers\name{ripras} \alias{ripras} \title{Estimate window from points alone} \description{ Given an observed pattern of points, computes the Ripley-Rasson estimate of the spatial domain from which they came. } \usage{ ripras(x, y=NULL, shape="convex", f) } \arguments{ \item{x}{ vector of \code{x} coordinates of observed points, or a 2-column matrix giving \code{x,y} coordinates, or a list with components \code{x,y} giving coordinates (such as a point pattern object of class \code{"ppp"}.) } \item{y}{(optional) vector of \code{y} coordinates of observed points, if \code{x} is a vector.} \item{shape}{String indicating the type of window to be estimated: either \code{"convex"} or \code{"rectangle"}. } \item{f}{ (optional) scaling factor. See Details. } } \value{ A window (an object of class \code{"owin"}). } \details{ Given an observed pattern of points with coordinates given by \code{x} and \code{y}, this function computes an estimate due to Ripley and Rasson (1977) of the spatial domain from which the points came. The points are assumed to have been generated independently and uniformly distributed inside an unknown domain \eqn{D}. If \code{shape="convex"} (the default), the domain \eqn{D} is assumed to be a convex set. The maximum likelihood estimate of \eqn{D} is the convex hull of the points (computed by \code{\link{convexhull.xy}}). Analogously to the problems of estimating the endpoint of a uniform distribution, the MLE is not optimal. Ripley and Rasson's estimator is a rescaled copy of the convex hull, centred at the centroid of the convex hull. The scaling factor is \eqn{1/sqrt(1 - m/n)}{1/\sqrt{1 - \frac m n}} where \eqn{n} is the number of data points and \eqn{m} the number of vertices of the convex hull. The scaling factor may be overridden using the argument \code{f}. If \code{shape="rectangle"}, the domain \eqn{D} is assumed to be a rectangle with sides parallel to the coordinate axes. The maximum likelihood estimate of \eqn{D} is the bounding box of the points (computed by \code{\link{bounding.box.xy}}). The Ripley-Rasson estimator is a rescaled copy of the bounding box, with scaling factor \eqn{(n+1)/(n-1)} where \eqn{n} is the number of data points, centred at the centroid of the bounding box. The scaling factor may be overridden using the argument \code{f}. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{bounding.box.xy}}, \code{\link{convexhull.xy}} } \examples{ x <- runif(30) y <- runif(30) w <- ripras(x,y) plot(owin(), main="ripras(x,y)") plot(w, add=TRUE) points(x,y) X <- rpoispp(15) plot(X, main="ripras(X)") plot(ripras(X), add=TRUE) # two points insufficient ripras(c(0,1),c(0,0)) # triangle ripras(c(0,1,0.5), c(0,0,1)) # three collinear points ripras(c(0,0,0), c(0,1,2)) } \references{ Ripley, B.D. and Rasson, J.-P. (1977) Finding the edge of a Poisson forest. \emph{Journal of Applied Probability}, \bold{14}, 483 -- 491. } \author{\adrian and \rolf } \keyword{spatial} \keyword{utilities} spatstat/man/Extract.leverage.ppm.Rd0000644000176200001440000000446213333543263017170 0ustar liggesusers\name{Extract.leverage.ppm} \alias{[.leverage.ppm} \title{Extract Subset of Leverage Object} \description{ Extract a subset of a leverage map, or extract the leverage values at specified locations. } \usage{ \method{[}{leverage.ppm}(x, i, \dots, update=TRUE) } \arguments{ \item{x}{ A leverage object (of class \code{"leverage.ppm"}) computed by \code{\link{leverage.ppm}}. } \item{i}{ Subset index (passed to \code{\link{[.im}}). Either a spatial window (object of class \code{"owin"}) or a spatial point pattern (object of class \code{"ppp"}). } \item{\dots}{ Further arguments passed to \code{\link{[.im}}, especially the argument \code{drop}. } \item{update}{ Logical value indicating whether to update the internally-stored value of the mean leverage, by averaging over the specified subset. } } \value{ Another object of class \code{"leverage.ppm"}, or a vector of numeric values of leverage. } \details{ An object of class \code{"leverage.ppm"} contains the values of the leverage function for a point process model, computed by \code{\link{leverage.ppm}}. This function extracts a designated subset of the leverage values, either as another leverage object, or as a vector of numeric values. The function \code{[.leverage.ppm} is a method for \code{\link{[}} for the class \code{"leverage.ppm"}. The argument \code{i} should be either \itemize{ \item a spatial window (object of class \code{"owin"}) determining a region where the leverage map is required. The result will typically be another leverage map (object of class \code{leverage.ppm}). \item a spatial point pattern (object of class \code{"ppp"}) specifying locations at which the leverage values are required. The result will be a numeric vector. } The subset operator for images, \code{\link{[.im}}, is applied to the leverage map. If this yields a pixel image, then the result of \code{\link{[.leverage.ppm}} is another leverage object. Otherwise, a vector containing the numeric values of leverage is returned. } \seealso{ \code{\link{leverage.ppm}}. } \examples{ fit <- ppm(cells ~x) lev <- leverage(fit) b <- owin(c(0.1, 0.3), c(0.2, 0.4)) lev[b] lev[cells] } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/dppCauchy.Rd0000644000176200001440000000245113571674202015110 0ustar liggesusers\name{dppCauchy} \alias{dppCauchy} \title{Generalized Cauchy Determinantal Point Process Model} \description{ Function generating an instance of the (generalized) Cauchy determinantal point process model. } \usage{dppCauchy(\dots)} \arguments{ \item{\dots}{arguments of the form \code{tag=value} specifying the parameters. See Details.} } \details{ The (generalized) Cauchy DPP is defined in (Lavancier, \Moller and Rubak, 2015) The possible parameters are: \itemize{ \item the intensity \code{lambda} as a positive numeric \item the scale parameter \code{alpha} as a positive numeric \item the shape parameter \code{nu} as a positive numeric (artificially required to be less than 20 in the code for numerical stability) \item the dimension \code{d} as a positive integer } } \value{An object of class \code{"detpointprocfamily"}.} \author{ \adrian \rolf and \ege } \references{ Lavancier, F. \Moller, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. } \examples{ m <- dppCauchy(lambda=100, alpha=.05, nu=1, d=2) } \seealso{ \code{\link{dppBessel}}, \code{\link{dppGauss}}, \code{\link{dppMatern}}, \code{\link{dppPowerExp}} } spatstat/man/linearpcfinhom.Rd0000644000176200001440000001213413333543263016163 0ustar liggesusers\name{linearpcfinhom} \alias{linearpcfinhom} \title{ Inhomogeneous Linear Pair Correlation Function } \description{ Computes an estimate of the inhomogeneous linear pair correlation function for a point pattern on a linear network. } \usage{ linearpcfinhom(X, lambda=NULL, r=NULL, ..., correction="Ang", normalise=TRUE, normpower=1, update = TRUE, leaveoneout = TRUE, ratio = FALSE) } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{lambda}{ Intensity values for the point pattern. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{ Optional. Numeric vector of values of the function argument \eqn{r}. There is a sensible default. } \item{\dots}{ Arguments passed to \code{\link{density.default}} to control the smoothing. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the data points, raised to \code{normpower}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } \item{normpower}{ Integer (usually either 1 or 2). Normalisation power. See explanation in \code{\link{linearKinhom}}. } \item{update}{ Logical value indicating what to do when \code{lambda} is a fitted model (class \code{"lppm"} or \code{"ppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.lppm}} or \code{\link{update.ppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{fitted.lppm}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity, when \code{lambda} is a fitted model. Supported only when \code{update=TRUE}. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each estimate will also be saved, for use in analysing replicated point patterns. } } \details{ This command computes the inhomogeneous version of the linear pair correlation function from point pattern data on a linear network. If \code{lambda = NULL} the result is equivalent to the homogeneous pair correlation function \code{\link{linearpcf}}. If \code{lambda} is given, then it is expected to provide estimated values of the intensity of the point process at each point of \code{X}. The argument \code{lambda} may be a numeric vector (of length equal to the number of points in \code{X}), or a \code{function(x,y)} that will be evaluated at the points of \code{X} to yield numeric values, or a pixel image (object of class \code{"im"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). If \code{lambda} is a fitted point process model, the default behaviour is to update the model by re-fitting it to the data, before computing the fitted intensity. This can be disabled by setting \code{update=FALSE}. If \code{correction="none"}, the calculations do not include any correction for the geometry of the linear network. If \code{correction="Ang"}, the pair counts are weighted using Ang's correction (Ang, 2010). The bandwidth for smoothing the pairwise distances is determined by arguments \code{\dots} passed to \code{\link{density.default}}, mainly the arguments \code{bw} and \code{adjust}. The default is to choose the bandwidth by Silverman's rule of thumb \code{bw="nrd0"} explained in \code{\link{density.default}}. } \value{ Function value table (object of class \code{"fv"}). If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{g(r)}. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian. } \references{ Ang, Q.W. (2010) Statistical methodology for spatial point patterns on a linear network. MSc thesis, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. Okabe, A. and Yamada, I. (2001) The K-function method on a network and its computational implementation. \emph{Geographical Analysis} \bold{33}, 271-290. } \seealso{ \code{\link{linearpcf}}, \code{\link{linearKinhom}}, \code{\link{lpp}} } \examples{ data(simplenet) X <- rpoislpp(5, simplenet) fit <- lppm(X ~x) K <- linearpcfinhom(X, lambda=fit) plot(K) } \keyword{spatial} \keyword{nonparametric} spatstat/man/diameter.box3.Rd0000644000176200001440000000363113333543263015633 0ustar liggesusers\name{diameter.box3} \Rdversion{1.1} \alias{diameter.box3} \alias{volume.box3} \alias{shortside.box3} \alias{sidelengths.box3} \alias{eroded.volumes.box3} \alias{shortside} \alias{sidelengths} \alias{eroded.volumes} \title{ Geometrical Calculations for Three-Dimensional Box } \description{ Calculates the volume, diameter, shortest side, side lengths, or eroded volume of a three-dimensional box. } \usage{ \method{diameter}{box3}(x) \method{volume}{box3}(x) shortside(x) sidelengths(x) eroded.volumes(x, r) \method{shortside}{box3}(x) \method{sidelengths}{box3}(x) \method{eroded.volumes}{box3}(x, r) } \arguments{ \item{x}{ Three-dimensional box (object of class \code{"box3"}). } \item{r}{ Numeric value or vector of numeric values for which eroded volumes should be calculated. } } \details{ \code{diameter.box3} computes the diameter of the box. \code{volume.box3} computes the volume of the box. \code{shortside.box3} finds the shortest of the three side lengths of the box. \code{sidelengths.box3} returns all three side lengths of the box. \code{eroded.volumes} computes, for each entry \code{r[i]}, the volume of the smaller box obtained by removing a slab of thickness \code{r[i]} from each face of the box. This smaller box is the subset consisting of points that lie at least \code{r[i]} units away from the boundary of the box. } \value{ For \code{diameter.box3}, \code{shortside.box3} and \code{volume.box3}, a single numeric value. For \code{sidelengths.box3}, a vector of three numbers. For \code{eroded.volumes}, a numeric vector of the same length as \code{r}. } \author{\adrian and \rolf } \seealso{ \code{\link{as.box3}} } \examples{ X <- box3(c(0,10),c(0,10),c(0,5)) diameter(X) volume(X) sidelengths(X) shortside(X) hd <- shortside(X)/2 eroded.volumes(X, seq(0,hd, length=10)) } \keyword{spatial} \keyword{math} spatstat/man/measureVariation.Rd0000644000176200001440000000423013333543263016501 0ustar liggesusers\name{measureVariation} \alias{measureVariation} \alias{measurePositive} \alias{measureNegative} \alias{totalVariation} \title{ Positive and Negative Parts, and Variation, of a Measure } \description{ Given a measure \code{A} (object of class \code{"msr"}) these functions find the positive part, negative part and variation of \code{A}. } \usage{ measurePositive(x) measureNegative(x) measureVariation(x) totalVariation(x) } \arguments{ \item{x}{ A measure (object of class \code{"msr"}). } } \details{ The functions \code{measurePositive} and \code{measureNegative} return the positive and negative parts of the measure, and \code{measureVariation} returns the variation (sum of positive and negative parts). The function \code{totalVariation} returns the total variation norm. If \eqn{\mu} is a signed measure, it can be represented as \deqn{\mu = \mu_{+} - \mu_{-}}{\mu = \mu[+] - \mu[-]} where \eqn{\mu_{+}}{\mu[+]} and \eqn{\mu_{-}}{\mu[-]} are \emph{nonnegative} measures called the positive and negative parts of \eqn{\mu}. In a nutshell, the positive part of \eqn{\mu} consists of all positive contributions or increments, and the negative part consists of all negative contributions multiplied by \code{-1}. The variation \eqn{|\mu|} is defined by \deqn{\mu = \mu_{+} + \mu_{-}}{\mu = \mu[+] + \mu[-]} and is also a nonnegative measure. The total variation norm is the integral of the variation. } \value{ The result of \code{measurePositive}, \code{measureNegative} and \code{measureVariation} is another measure (object of class \code{"msr"}) on the same spatial domain. The result of \code{totalVariation} is a non-negative number. } \references{ Halmos, P.R. (1950) \emph{Measure Theory}. Van Nostrand. } \author{ \adrian. } \seealso{ \code{\link{msr}}, \code{\link{with.msr}}, \code{\link{split.msr}}, \code{\link{measureDiscrete}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") measurePositive(rp) measureNegative(rp) measureVariation(rp) # total variation norm totalVariation(rp) } \keyword{spatial} \keyword{math} spatstat/man/methods.boxx.Rd0000644000176200001440000000254713333543263015616 0ustar liggesusers\name{methods.boxx} \Rdversion{1.1} \alias{methods.boxx} %DoNotExport \alias{print.boxx} \alias{unitname.boxx} \alias{unitname<-.boxx} \title{ Methods for Multi-Dimensional Box } \description{ Methods for class \code{"boxx"}. } \usage{ \method{print}{boxx}(x, ...) \method{unitname}{boxx}(x) \method{unitname}{boxx}(x) <- value } \arguments{ \item{x}{ Object of class \code{"boxx"} representing a multi-dimensional box. } \item{\dots}{ Other arguments passed to \code{print.default}. } \item{value}{ Name of the unit of length. See \code{\link{unitname}}. } } \details{ These are methods for the generic functions \code{\link{print}} and \code{\link{unitname}} for the class \code{"boxx"} of multi-dimensional boxes. The \code{print} method prints a description of the box, while the \code{unitname} method extracts the name of the unit of length in which the box coordinates are expressed. } \value{ For \code{print.boxx} the value is \code{NULL}. For \code{unitname.boxx} an object of class \code{"units"}. } \author{\adrian and \rolf } \seealso{ \code{\link{boxx}}, \code{\link{print}}, \code{\link{unitname}} } \examples{ X <- boxx(c(0,10),c(0,10),c(0,5),c(0,1), unitname=c("metre", "metres")) X unitname(X) # Northern European usage unitname(X) <- "meter" } \keyword{spatial} \keyword{methods} spatstat/man/default.rmhcontrol.Rd0000644000176200001440000000277513333543263017011 0ustar liggesusers\name{default.rmhcontrol} \alias{default.rmhcontrol} \title{Set Default Control Parameters for Metropolis-Hastings Algorithm.} \description{ Given a fitted point process model, this command sets appropriate default values of the parameters controlling the iterative behaviour of the Metropolis-Hastings algorithm. } \usage{ default.rmhcontrol(model, w=NULL) } \arguments{ \item{model}{ A fitted point process model (object of class \code{"ppm"}) } \item{w}{ Optional. Window for the resulting simulated patterns. } } \value{ An object of class \code{"rmhcontrol"}. See \code{\link{rmhcontrol}}. } \details{ This function sets the values of the parameters controlling the iterative behaviour of the Metropolis-Hastings simulation algorithm. It uses default values that would be appropriate for the fitted point process model \code{model}. The expansion parameter \code{expand} is set to \code{\link{default.expand}(model, w)}. All other parameters revert to their defaults given in \code{\link{rmhcontrol.default}}. See \code{\link{rmhcontrol}} for the full list of control parameters. To override default parameters, use \code{\link{update.rmhcontrol}}. } \seealso{ \code{\link{rmhcontrol}}, \code{\link{update.rmhcontrol}}, \code{\link{ppm}}, \code{\link{default.expand}} } \examples{ fit <- ppm(cells, ~1, Strauss(0.1)) default.rmhcontrol(fit) default.rmhcontrol(fit, w=square(2)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/convolve.im.Rd0000644000176200001440000000446513333543263015434 0ustar liggesusers\name{convolve.im} \alias{convolve.im} \title{Convolution of Pixel Images} \description{ Computes the convolution of two pixel images. } \usage{ convolve.im(X, Y=X, \dots, reflectX=FALSE, reflectY=FALSE) } \arguments{ \item{X}{ A pixel image (object of class \code{"im"}. } \item{Y}{ Optional. Another pixel image. } \item{\dots}{Ignored.} \item{reflectX,reflectY}{ Logical values specifying whether the images \code{X} and \code{Y} (respectively) should be reflected in the origin before computing the convolution. } } \value{ A pixel image (an object of class \code{"im"}) representing the convolution of \code{X} and \code{Y}. } \details{ The \emph{convolution} of two pixel images \eqn{X} and \eqn{Y} in the plane is the function \eqn{C(v)} defined for each vector \eqn{v} as \deqn{ C(v) = \int X(u)Y(v-u)\, {\rm d}u }{ C(v) = integral of X(u) * Y(v-u) du } where the integral is over all spatial locations \eqn{u}, and where \eqn{X(u)} and \eqn{Y(u)} denote the pixel values of \eqn{X} and \eqn{Y} respectively at location \eqn{u}. This command computes a discretised approximation to the convolution, using the Fast Fourier Transform. The return value is another pixel image (object of class \code{"im"}) whose greyscale values are values of the convolution. If \code{reflectX = TRUE} then the pixel image \code{X} is reflected in the origin (see \code{\link{reflect}}) before the convolution is computed, so that \code{convolve.im(X,Y,reflectX=TRUE)} is mathematically equivalent to \code{convolve.im(reflect(X), Y)}. (These two commands are not exactly equivalent, because the reflection is performed in the Fourier domain in the first command, and reflection is performed in the spatial domain in the second command). Similarly if \code{reflectY = TRUE} then the pixel image \code{Y} is reflected in the origin before the convolution is computed, so that \code{convolve.im(X,Y,reflectY=TRUE)} is mathematically equivalent to \code{convolve.im(X, reflect(Y))}. } \seealso{ \code{\link{imcov}}, \code{\link{reflect}} } \examples{ X <- as.im(letterR) Y <- as.im(square(1)) plot(convolve.im(X, Y)) plot(convolve.im(X, Y, reflectX=TRUE)) plot(convolve.im(X)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/msr.Rd0000644000176200001440000001546413571674202014001 0ustar liggesusers\name{msr} \alias{msr} \title{ Signed or Vector-Valued Measure } \description{ Defines an object representing a signed measure or vector-valued measure on a spatial domain. } \usage{ msr(qscheme, discrete, density, check=TRUE) } \arguments{ \item{qscheme}{ A quadrature scheme (object of class \code{"quad"} usually extracted from a fitted point process model). } \item{discrete}{ Vector or matrix containing the values (masses) of the discrete component of the measure, for each of the data points in \code{qscheme}. } \item{density}{ Vector or matrix containing values of the density of the diffuse component of the measure, for each of the quadrature points in \code{qscheme}. } \item{check}{ Logical. Whether to check validity of the arguments. } } \details{ This function creates an object that represents a signed or vector valued \emph{measure} on the two-dimensional plane. It is not normally called directly by the user. A signed measure is a classical mathematical object (Diestel and Uhl, 1977) which can be visualised as a collection of electric charges, positive and/or negative, spread over the plane. Electric charges may be concentrated at specific points (atoms), or spread diffusely over a region. An object of class \code{"msr"} represents a signed (i.e. real-valued) or vector-valued measure in the \pkg{spatstat} package. Spatial residuals for point process models (Baddeley et al, 2005, 2008) take the form of a real-valued or vector-valued measure. The function \code{\link{residuals.ppm}} returns an object of class \code{"msr"} representing the residual measure. Various other diagnostic tools such as \code{\link{dfbetas.ppm}} and \code{\link{dffit.ppm}} also return an object of class \code{"msr"}. The function \code{msr} would not normally be called directly by the user. It is the low-level creator function that makes an object of class \code{"msr"} from raw data. The first argument \code{qscheme} is a quadrature scheme (object of class \code{"quad"}). It is typically created by \code{\link{quadscheme}} or extracted from a fitted point process model using \code{\link{quad.ppm}}. A quadrature scheme contains both data points and dummy points. The data points of \code{qscheme} are used as the locations of the atoms of the measure. All quadrature points (i.e. both data points and dummy points) of \code{qscheme} are used as sampling points for the density of the continuous component of the measure. The argument \code{discrete} gives the values of the atomic component of the measure for each \emph{data point} in \code{qscheme}. It should be either a numeric vector with one entry for each data point, or a numeric matrix with one row for each data point. The argument \code{density} gives the values of the \emph{density} of the diffuse component of the measure, at each \emph{quadrature point} in \code{qscheme}. It should be either a numeric vector with one entry for each quadrature point, or a numeric matrix with one row for each quadrature point. If both \code{discrete} and \code{density} are vectors (or one-column matrices) then the result is a signed (real-valued) measure. Otherwise, the result is a vector-valued measure, with the dimension of the vector space being determined by the number of columns in the matrices \code{discrete} and/or \code{density}. (If one of these is a \eqn{k}-column matrix and the other is a 1-column matrix, then the latter is replicated to \eqn{k} columns). The class \code{"msr"} has methods for \code{print}, \code{plot} and \code{[}. There is also a function \code{\link{Smooth.msr}} for smoothing a measure. } \value{ An object of class \code{"msr"}. } \section{Guide to using measures}{ Objects of class \code{"msr"}, representing measures, are returned by the functions \code{\link{residuals.ppm}}, \code{\link{dfbetas.ppm}}, \code{\link{dffit.ppm}} and possibly by other functions. There are methods for printing and plotting a measure, along with many other operations, which can be listed by typing \code{methods(class="msr")}. The \code{print} and \code{summary} methods report basic information about a measure, such as the total value of the measure, and the spatial domain on which it is defined. The \code{plot} method displays the measure. It is documented separately in \code{\link{plot.msr}}. A measure can be smoothed using \code{\link{Smooth.msr}}, yielding a pixel image which is sometimes easier to interpret than the plot of the measure itself. The subset operator \code{[} can be used to restrict the measure to a subregion of space, or to extract one of the scalar components of a vector-valued measure. It is documented separately in \code{\link{[.msr}}. The total value of a measure, or the value on a subregion, can be obtained using \code{\link{integral.msr}}. The value of a measure \code{m} on a subregion \code{B} can be obtained by \code{integral(m, domain=B)} or \code{integral(m[B])}. The values of a measure \code{m} on each tile of a tessellation \code{A} can be obtained by \code{integral(m, domain=A)}. Some mathematical operations on measures are supported, such as multiplying a measure by a single number, or adding two measures. Measures can be separated into components in different ways using \code{\link{as.layered.msr}}, \code{\link{unstack.msr}} and \code{\link{split.msr}}. Internal components of the data structure of an \code{"msr"} object can be extracted using \code{\link{with.msr}}. } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \Moller, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. Diestel, J. and Uhl, J.J. Jr (1977) \emph{Vector measures}. Providence, RI, USA: American Mathematical Society. Halmos, P.R. (1950) \emph{Measure Theory}. Van Nostrand. } \author{ \adrian } \seealso{ \code{\link{plot.msr}}, \code{\link{Smooth.msr}}, \code{\link{[.msr}}, \code{\link{with.msr}}, \code{\link{split.msr}}, \code{\link{Ops.msr}}, \code{\link{measureVariation}}, \code{\link{measureContinuous}}. } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") rp rs <- residuals(fit, type="score") rs colnames(rs) # An equivalent way to construct the Pearson residual measure by hand Q <- quad.ppm(fit) lambda <- fitted(fit) slam <- sqrt(lambda) Z <- is.data(Q) m <- msr(Q, discrete=1/slam[Z], density = -slam) m } \keyword{spatial} \keyword{models} spatstat/man/Extract.linnet.Rd0000644000176200001440000000351313333543263016070 0ustar liggesusers\name{Extract.linnet} \alias{[.linnet} \title{Extract Subset of Linear Network} \description{ Extract a subset of a linear network. } \usage{ \method{[}{linnet}(x, i, \dots, snip=TRUE) } \arguments{ \item{x}{ A linear network (object of class \code{"linnet"}). } \item{i}{ Spatial window defining the subregion. An object of class \code{"owin"}. } \item{snip}{ Logical. If \code{TRUE} (the default), segments of \code{x} which cross the boundary of \code{i} will be cut by the boundary. If \code{FALSE}, these segments will be deleted. } \item{\dots}{Ignored.} } \value{ Another linear network (object of class \code{"linnet"}). } \details{ This function computes the intersection between the linear network \code{x} and the domain specified by \code{i}. This function is a method for the subset operator \code{"["} for linear networks (objects of class \code{"linnet"}). It is provided mainly for completeness. The index \code{i} should be a window. The argument \code{snip} specifies what to do with segments of \code{x} which cross the boundary of \code{i}. If \code{snip=FALSE}, such segments are simply deleted. If \code{snip=TRUE} (the default), such segments are cut into pieces by the boundary of \code{i}, and those pieces which lie inside the window \code{i} are included in the resulting network. } \examples{ p <- par(mfrow=c(1,2), mar=0.2+c(0,0,1,0)) B <- owin(c(0.1,0.7),c(0.19,0.6)) plot(simplenet, main="x[w, snip=TRUE]") plot(simplenet[B], add=TRUE, col="green", lwd=3) plot(B, add=TRUE, border="red", lty=3) plot(simplenet, main="x[w, snip=FALSE]") plot(simplenet[B, snip=FALSE], add=TRUE, col="green", lwd=3) plot(B, add=TRUE, border="red", lty=3) par(p) } \author{ \adrian, \rolf, \ege and Suman Rakshit. } \keyword{spatial} \keyword{manip} spatstat/man/shift.Rd0000644000176200001440000000170413333543264014304 0ustar liggesusers\name{shift} \alias{shift} \title{Apply Vector Translation} \description{ Applies a vector shift of the plane to a geometrical object, such as a point pattern or a window. } \usage{ shift(X, \dots) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), or a window (object of class \code{"owin"}).} \item{\dots}{Arguments determining the shift vector.} } \value{ Another object of the same type, representing the result of applying the shift. } \details{ This is generic. Methods are provided for point patterns (\code{\link{shift.ppp}}) and windows (\code{\link{shift.owin}}). The object is translated by the vector \code{vec}. } \seealso{ \code{\link{shift.ppp}}, \code{\link{shift.owin}}, \code{\link{rotate}}, \code{\link{affine}}, \code{\link{periodify}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/multiplicity.ppp.Rd0000644000176200001440000000403413333543263016513 0ustar liggesusers\name{multiplicity.ppp} \alias{multiplicity} \alias{multiplicity.default} \alias{multiplicity.data.frame} \alias{multiplicity.ppp} \alias{multiplicity.ppx} \title{Count Multiplicity of Duplicate Points} \description{ Counts the number of duplicates for each point in a spatial point pattern. } \usage{ multiplicity(x) \method{multiplicity}{ppp}(x) \method{multiplicity}{ppx}(x) \method{multiplicity}{data.frame}(x) \method{multiplicity}{default}(x) } \arguments{ \item{x}{ A spatial point pattern (object of class \code{"ppp"} or \code{"ppx"}) or a vector, matrix or data frame. } } \value{ A vector of integers (multiplicities) of length equal to the number of points in \code{x}. } \details{ Two points in a point pattern are deemed to be identical if their \eqn{x,y} coordinates are the same, and their marks are also the same (if they carry marks). The Examples section illustrates how it is possible for a point pattern to contain a pair of identical points. For each point in \code{x}, the function \code{multiplicity} counts how many points are identical to it, and returns the vector of counts. The argument \code{x} can also be a vector, a matrix or a data frame. When \code{x} is a vector, \code{m <- multiplicity(x)} is a vector of the same length as \code{x}, and \code{m[i]} is the number of elements of \code{x} that are identical to \code{x[i]}. When \code{x} is a matrix or data frame, \code{m <- multiplicity(x)} is a vector of length equal to the number of rows of \code{x}, and \code{m[i]} is the number of rows of \code{x} that are identical to the \code{i}th row. } \seealso{ \code{\link{ppp.object}}, \code{\link{duplicated.ppp}}, \code{\link{unique.ppp}} } \examples{ X <- ppp(c(1,1,0.5,1), c(2,2,1,2), window=square(3), check=FALSE) m <- multiplicity(X) # unique points in X, marked by their multiplicity first <- !duplicated(X) Y <- X[first] \%mark\% m[first] } \author{\adrian , \rolf and Sebastian Meyer. } \keyword{spatial} \keyword{utilities} spatstat/man/plot.bermantest.Rd0000644000176200001440000000551413333543264016313 0ustar liggesusers\name{plot.bermantest} \alias{plot.bermantest} \title{Plot Result of Berman Test} \description{ Plot the result of Berman's test of goodness-of-fit } \usage{ \method{plot}{bermantest}(x, ..., lwd=par("lwd"), col=par("col"), lty=par("lty"), lwd0=lwd, col0=2, lty0=2) } \arguments{ \item{x}{ Object to be plotted. An object of class \code{"bermantest"} produced by \code{\link{berman.test}}. } \item{\dots}{ extra arguments that will be passed to the plotting function \code{\link{plot.ecdf}}. } \item{col,lwd,lty}{ The width, colour and type of lines used to plot the empirical distribution curve. } \item{col0,lwd0,lty0}{ The width, colour and type of lines used to plot the predicted (null) distribution curve. } } \value{ \code{NULL}. } \details{ This is the \code{plot} method for the class \code{"bermantest"}. An object of this class represents the outcome of Berman's test of goodness-of-fit of a spatial Poisson point process model, computed by \code{\link{berman.test}}. For the \emph{Z1} test (i.e. if \code{x} was computed using \code{berman.test( ,which="Z1")}), the plot displays the two cumulative distribution functions that are compared by the test: namely the empirical cumulative distribution function of the covariate at the data points, \eqn{\hat F}{Fhat}, and the predicted cumulative distribution function of the covariate under the model, \eqn{F_0}{F0}, both plotted against the value of the covariate. Two vertical lines show the mean values of these two distributions. If the model is correct, the two curves should be close; the test is based on comparing the two vertical lines. For the \emph{Z2} test (i.e. if \code{x} was computed using \code{berman.test( ,which="Z2")}), the plot displays the empirical cumulative distribution function of the values \eqn{U_i = F_0(Y_i)}{U[i] = F0(Y[i])} where \eqn{Y_i}{Y[i]} is the value of the covariate at the \eqn{i}-th data point. The diagonal line with equation \eqn{y=x} is also shown. Two vertical lines show the mean of the values \eqn{U_i}{U[i]} and the value \eqn{1/2}. If the model is correct, the two curves should be close. The test is based on comparing the two vertical lines. } \seealso{ \code{\link{berman.test}} } \examples{ # synthetic data: nonuniform Poisson process X <- rpoispp(function(x,y) { 100 * exp(-x) }, win=square(1)) # fit uniform Poisson process fit0 <- ppm(X, ~1) # test covariate = x coordinate xcoord <- function(x,y) { x } # test wrong model k <- berman.test(fit0, xcoord, "Z1") # plot result of test plot(k, col="red", col0="green") # Z2 test k2 <- berman.test(fit0, xcoord, "Z2") plot(k2, col="red", col0="green") } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{hplot} spatstat/man/stratrand.Rd0000644000176200001440000000403113333543264015165 0ustar liggesusers\name{stratrand} \alias{stratrand} \title{Stratified random point pattern} \description{ Generates a \dQuote{stratified random} pattern of points in a window, by dividing the window into rectangular tiles and placing \code{k} random points in each tile. } \usage{ stratrand(window, nx, ny, k = 1) } \arguments{ \item{window}{A window. An object of class \code{\link{owin}}, or data in any format acceptable to \code{\link{as.owin}()}. } \item{nx}{Number of tiles in each row. } \item{ny}{Number of tiles in each column. } \item{k}{Number of random points to generate in each tile. } } \value{ A list with two components \code{x} and \code{y}, which are numeric vectors giving the coordinates of the random points. } \details{ The bounding rectangle of \code{window} is divided into a regular \eqn{nx \times ny}{nx * ny} grid of rectangular tiles. In each tile, \code{k} random points are generated independently with a uniform distribution in that tile. Note that some of these grid points may lie outside the window, if \code{window} is not of type \code{"rectangle"}. The function \code{\link{inside.owin}} can be used to select those grid points which do lie inside the window. See the examples. This function is useful in creating dummy points for quadrature schemes (see \code{\link{quadscheme}}) as well as in simulating random point patterns. } \seealso{ \code{\link{quad.object}}, \code{\link{quadscheme}}, \code{\link{inside.owin}}, \code{\link{gridcentres}} } \examples{ w <- unit.square() xy <- stratrand(w, 10, 10) \dontrun{ plot(w) points(xy) } # polygonal boundary bdry <- list(x=c(0.1,0.3,0.7,0.4,0.2), y=c(0.1,0.1,0.5,0.7,0.3)) w <- owin(c(0,1), c(0,1), poly=bdry) xy <- stratrand(w, 10, 10, 3) \dontrun{ plot(w) points(xy) } # determine which grid points are inside polygon ok <- inside.owin(xy$x, xy$y, w) \dontrun{ plot(w) points(xy$x[ok], xy$y[ok]) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/thomas.estpcf.Rd0000644000176200001440000001472513333543264015754 0ustar liggesusers\name{thomas.estpcf} \alias{thomas.estpcf} \title{Fit the Thomas Point Process by Minimum Contrast} \description{ Fits the Thomas point process to a point pattern dataset by the Method of Minimum Contrast using the pair correlation function. } \usage{ thomas.estpcf(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs=list()) } \arguments{ \item{X}{ Data to which the Thomas model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the Thomas process. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits the Thomas point process model to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function \code{\link{pcf}}. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits the Thomas point process to \code{X}, by finding the parameters of the Thomas model which give the closest match between the theoretical pair correlation function of the Thomas process and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Thomas point process is described in \Moller and Waagepetersen (2003, pp. 61--62). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent are independent and isotropically Normally distributed around the parent point with standard deviation \eqn{\sigma}{sigma} which is equal to the parameter \code{scale}. The named vector of stating values can use either \code{sigma2} (\eqn{\sigma^2}{sigma^2}) or \code{scale} as the name of the second component, but the latter is recommended for consistency with other cluster models. The theoretical pair correlation function of the Thomas process is \deqn{ g(r) = 1 + \frac 1 {4\pi \kappa \sigma^2} \exp(-\frac{r^2}{4\sigma^2})). }{ g(r) = 1 + exp(-r^2/(4 * sigma^2)))/(4 * pi * kappa * sigma^2). } The theoretical intensity of the Thomas process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\kappa}{kappa} and \eqn{\sigma^2}{sigma^2}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The Thomas process can be simulated, using \code{\link{rThomas}}. Homogeneous or inhomogeneous Thomas process models can also be fitted using the function \code{\link{kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Diggle, P. J., Besag, J. and Gleaves, J. T. (1976) Statistical analysis of spatial point patterns by means of distance methods. \emph{Biometrics} \bold{32} 659--667. \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Thomas, M. (1949) A generalisation of Poisson's binomial limit for use in ecology. \emph{Biometrika} \bold{36}, 18--25. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ \adrian } \seealso{ \code{\link{thomas.estK}} \code{\link{mincontrast}}, \code{\link{pcf}}, \code{\link{rThomas}} to simulate the fitted model. } \examples{ data(redwood) u <- thomas.estpcf(redwood, c(kappa=10, scale=0.1)) u plot(u, legendpos="topright") u2 <- thomas.estpcf(redwood, c(kappa=10, scale=0.1), pcfargs=list(stoyan=0.12)) } \keyword{spatial} \keyword{models} spatstat/man/symbolmap.Rd0000644000176200001440000001213013333543264015165 0ustar liggesusers\name{symbolmap} \alias{symbolmap} \title{ Graphics Symbol Map } \description{ Create a graphics symbol map that associates data values with graphical symbols. } \usage{ symbolmap(\dots, range = NULL, inputs = NULL) } \arguments{ \item{\dots}{ Named arguments specifying the graphical parameters. See Details. } \item{range}{ Optional. Range of numbers that are mapped. A numeric vector of length 2 giving the minimum and maximum values that will be mapped. Incompatible with \code{inputs}. } \item{inputs}{ Optional. A vector containing all the data values that will be mapped to symbols. Incompatible with \code{range}. } } \details{ A graphical symbol map is an association between data values and graphical symbols. The command \code{symbolmap} creates an object of class \code{"symbolmap"} that represents a graphical symbol map. Once a symbol map has been created, it can be applied to any suitable data to generate a plot of those data. This makes it easy to ensure that the \emph{same} symbol map is used in two different plots. The symbol map can be plotted as a legend to the plots, and can also be plotted in its own right. The possible values of data that will be mapped are specified by \code{range} or \code{inputs}. \itemize{ \item if \code{range} is given, it should be a numeric vector of length 2 giving the minimum and maximum values of the range of numbers that will be mapped. These limits must be finite. \item if \code{inputs} is given, it should be a vector of any atomic type (e.g. numeric, character, logical, factor). This vector contains all the possible data values that will be mapped. \item If neither \code{range} nor \code{inputs} is given, it is assumed that the possible values are real numbers. } The association of data values with graphical symbols is specified by the other arguments \code{\dots} which are given in \code{name=value} form. These arguments specify the kinds of symbols that will be used, the sizes of the symbols, and graphics parameters for drawing the symbols. Each graphics parameter can be either a single value, for example \code{shape="circles"}, or a \code{function(x)} which determines the value of the graphics parameter as a function of the data \code{x}, for example \code{shape=function(x) ifelse(x > 0, "circles", "squares")}. Colourmaps (see \code{\link{colourmap}}) are also acceptable because they are functions. Currently recognised graphics parameters, and their allowed values, are: \describe{ \item{shape}{ The shape of the symbol: currently either \code{"circles"}, \code{"squares"}, \code{"arrows"} or \code{NA}. This parameter takes precedence over \code{pch}. } \item{size}{ The size of the symbol: a positive number or zero. } \item{pch}{ Graphics character code: a positive integer, or a single character. See \code{\link[graphics]{par}}. } \item{cex}{ Graphics character expansion factor. } \item{cols}{ Colour of plotting characters. } \item{fg,bg}{ Colour of foreground (or symbol border) and background (or symbol interior). } \item{col,lwd,lty}{ Colour, width and style of lines. } \item{etch}{ Logical. If \code{TRUE}, each symbol is surrounded by a border drawn in the opposite colour, which improves its visibility against the background. Default is \code{FALSE}. } \item{direction,headlength,headangle,arrowtype}{ Numeric parameters of arrow symbols, applicable when \code{shape="arrows"}. Here \code{direction} is the direction of the arrow in degrees anticlockwise from the \eqn{x} axis; \code{headlength} is the length of the head of the arrow in coordinate units; \code{headangle} is the angle subtended by the point of the arrow; and \code{arrowtype} is an integer code specifying which ends of the shaft have arrowheads attached (0 means no arrowheads, 1 is an arrowhead at the start of the shaft, 2 is an arrowhead at the end of the shaft, and 3 is arrowheads at both ends). } } A vector of colour values is also acceptable for the arguments \code{col,cols,fg,bg} if \code{range} is specified. } \value{ An object of class \code{"symbolmap"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{plot.symbolmap}} to plot the symbol map itself. \code{\link{invoke.symbolmap}} to apply the symbol map to some data and plot the resulting symbols. \code{\link{update.symbolmap}} to change the symbol map. } \examples{ g <- symbolmap(inputs=letters[1:10], pch=11:20) g1 <- symbolmap(range=c(0,100), size=function(x) x/50) g2 <- symbolmap(shape=function(x) ifelse(x > 0, "circles", "squares"), size=function(x) sqrt(ifelse(x > 0, x/pi, -x)), bg = function(x) ifelse(abs(x) < 1, "red", "black")) colmap <- colourmap(topo.colors(20), range=c(0,10)) g3 <- symbolmap(pch=21, bg=colmap, range=c(0,10)) plot(g3) } \keyword{spatial} \keyword{hplot} spatstat/man/nndist.ppx.Rd0000644000176200001440000000611513333543263015274 0ustar liggesusers\name{nndist.ppx} \alias{nndist.ppx} \title{Nearest Neighbour Distances in Any Dimensions} \description{ Computes the distance from each point to its nearest neighbour in a multi-dimensional point pattern. Alternatively computes the distance to the second nearest neighbour, or third nearest, etc. } \usage{ \method{nndist}{ppx}(X, \dots, k=1) } \arguments{ \item{X}{ Multi-dimensional point pattern (object of class \code{"ppx"}). } \item{\dots}{ Arguments passed to \code{\link{coords.ppx}} to determine which coordinates should be used. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } } \value{ Numeric vector or matrix containing the nearest neighbour distances for each point. If \code{k = 1} (the default), the return value is a numeric vector \code{v} such that \code{v[i]} is the nearest neighbour distance for the \code{i}th data point. If \code{k} is a single integer, then the return value is a numeric vector \code{v} such that \code{v[i]} is the \code{k}th nearest neighbour distance for the \code{i}th data point. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the \code{k[j]}th nearest neighbour distance for the \code{i}th data point. } \details{ This function computes the Euclidean distance from each point in a multi-dimensional point pattern to its nearest neighbour (the nearest other point of the pattern). If \code{k} is specified, it computes the distance to the \code{k}th nearest neighbour. The function \code{nndist} is generic; this function \code{nndist.ppx} is the method for the class \code{"ppx"}. The argument \code{k} may be a single integer, or an integer vector. If it is a vector, then the \eqn{k}th nearest neighbour distances are computed for each value of \eqn{k} specified in the vector. If there is only one point (if \code{x} has length 1), then a nearest neighbour distance of \code{Inf} is returned. If there are no points (if \code{x} has length zero) a numeric vector of length zero is returned. To identify \emph{which} point is the nearest neighbour of a given point, use \code{\link{nnwhich}}. To find the nearest neighbour distances from one point pattern to another point pattern, use \code{\link{nncross}}. By default, both spatial and temporal coordinates are extracted. To obtain the spatial distance between points in a space-time point pattern, set \code{temporal=FALSE}. } \section{Warnings}{ An infinite or \code{NA} value is returned if the distance is not defined (e.g. if there is only one point in the point pattern). } \seealso{ \code{\link{nndist}}, \code{\link{pairdist}}, \code{\link{nnwhich}} } \examples{ df <- data.frame(x=runif(5),y=runif(5),z=runif(5),w=runif(5)) X <- ppx(data=df) # nearest neighbours d <- nndist(X) # second nearest neighbours d2 <- nndist(X, k=2) # first, second and third nearest d1to3 <- nndist(X, k=1:3) } \author{ \adrian } \keyword{spatial} \keyword{math} spatstat/man/clusterradius.Rd0000644000176200001440000000671613571674202016071 0ustar liggesusers\name{clusterradius} \alias{clusterradius} \alias{clusterradius.character} \alias{clusterradius.kppm} \title{ Compute or Extract Effective Range of Cluster Kernel } \description{ Given a cluster point process model, this command returns a value beyond which the the probability density of the cluster offspring is neglible. } \usage{ clusterradius(model, \dots) \method{clusterradius}{kppm}(model, \dots, thresh = NULL, precision = FALSE) \method{clusterradius}{character}(model, \dots, thresh = NULL, precision = FALSE) } \arguments{ \item{model}{ Cluster model. Either a fitted cluster or Cox model (object of class \code{"kppm"}), or a character string specifying the type of cluster model. } \item{\dots}{ Parameter values for the model, when \code{model} is a character string. } \item{thresh}{ Numerical threshold relative to the cluster kernel value at the origin (parent location) determining when the cluster kernel will be considered neglible. A sensible default is provided. } \item{precision}{ Logical. If \code{precision=TRUE} the precision of the calculated range is returned as an attribute to the range. See details. } } \details{ Given a cluster model this function by default returns the effective range of the model with the given parameters as used in spatstat. For the \Matern cluster model (see e.g. \code{\link{rMatClust}}) this is simply the finite radius of the offsring density given by the paramter \code{scale} irrespective of other options given to this function. The remaining models in spatstat have infinite theoretical range, and an effective finite value is given as follows: For the Thomas model (see e.g. \code{\link{rThomas}} the default is \code{4*scale} where scale is the scale or standard deviation parameter of the model. If \code{thresh} is given the value is instead found as described for the other models below. For the Cauchy model (see e.g. \code{\link{rCauchy}}) and the Variance Gamma (Bessel) model (see e.g. \code{\link{rVarGamma}}) the value of \code{thresh} defaults to 0.001, and then this is used to compute the range numerically as follows. If \eqn{k(x,y)=k_0(r)}{k(x,y)=k0(r)} with \eqn{r=\sqrt(x^2+y^2)}{r=sqrt(x^2+y^2)} denotes the isotropic cluster kernel then \eqn{f(r) = 2 \pi r k_0(r)}{f(r) = 2 \pi r k0(r)} is the density function of the offspring distance from the parent. The range is determined as the value of \eqn{r} where \eqn{f(r)} falls below \code{thresh} times \eqn{k_0(r)}{k0(r)}. If \code{precision=TRUE} the precision related to the chosen range is returned as an attribute. Here the precision is defined as the polar integral of the kernel from distance 0 to the calculated range. Ideally this should be close to the value 1 which would be obtained for the true theretical infinite range. } \value{ A positive numeric. Additionally, the precision related to this range value is returned as an attribute \code{"prec"}, if \code{precision=TRUE}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{clusterkernel}}, \code{\link{kppm}}, \code{\link{rMatClust}}, \code{\link{rThomas}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}}, \code{\link{rNeymanScott}}. } \examples{ fit <- kppm(redwood ~ x, "MatClust") clusterradius(fit) clusterradius("Thomas", scale = .1) clusterradius("Thomas", scale = .1, thresh = 0.001) clusterradius("VarGamma", scale = .1, nu = 2, precision = TRUE) } \keyword{spatial} spatstat/man/distcdf.Rd0000644000176200001440000000676213333543263014617 0ustar liggesusers\name{distcdf} \alias{distcdf} \title{Distribution Function of Interpoint Distance } \description{ Computes the cumulative distribution function of the distance between two independent random points in a given window or windows. } \usage{ distcdf(W, V=W, \dots, dW=1, dV=dW, nr=1024, regularise=TRUE) } \arguments{ \item{W}{ A window (object of class \code{"owin"}) containing the first random point. } \item{V}{ Optional. Another window containing the second random point. Defaults to \code{W}. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution for the calculation. } \item{dV, dW}{ Optional. Probability densities (not necessarily normalised) for the first and second random points respectively. Data in any format acceptable to \code{\link{as.im}}, for example, a \code{function(x,y)} or a pixel image or a numeric value. The default corresponds to a uniform distribution over the window. } \item{nr}{ Integer. The number of values of interpoint distance \eqn{r} for which the CDF will be computed. Should be a large value! } \item{regularise}{ Logical value indicating whether to smooth the results for very small distances, to avoid discretisation artefacts. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \details{ This command computes the Cumulative Distribution Function \eqn{ CDF(r) = Prob(T \le r) }{ CDF(r) = Prob(T \le r) } of the Euclidean distance \eqn{T = \|X_1 - X_2\|}{T = |X1-X2|} between two independent random points \eqn{X_1}{X1} and \eqn{X_2}{X2}. In the simplest case, the command \code{distcdf(W)}, the random points are assumed to be uniformly distributed in the same window \code{W}. Alternatively the two random points may be uniformly distributed in two different windows \code{W} and \code{V}. In the most general case the first point \eqn{X_1}{X1} is random in the window \code{W} with a probability density proportional to \code{dW}, and the second point \eqn{X_2}{X2} is random in a different window \code{V} with probability density proportional to \code{dV}. The values of \code{dW} and \code{dV} must be finite and nonnegative. The calculation is performed by numerical integration of the set covariance function \code{\link{setcov}} for uniformly distributed points, and by computing the covariance function \code{\link{imcov}} in the general case. The accuracy of the result depends on the pixel resolution used to represent the windows: this is controlled by the arguments \code{\dots} which are passed to \code{\link{as.mask}}. For example use \code{eps=0.1} to specify pixels of size 0.1 units. The arguments \code{W} or \code{V} may also be point patterns (objects of class \code{"ppp"}). The result is the cumulative distribution function of the distance from a randomly selected point in the point pattern, to a randomly selected point in the other point pattern or window. If \code{regularise=TRUE} (the default), values of the cumulative distribution function for very short distances are smoothed to avoid discretisation artefacts. Smoothing is applied to all distances shorter than the width of 7 pixels. } \seealso{ \code{\link{setcov}}, \code{\link{as.mask}}. } \examples{ # The unit disc B <- disc() plot(distcdf(B)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/is.owin.Rd0000644000176200001440000000127113333543263014553 0ustar liggesusers\name{is.owin} \alias{is.owin} \title{Test Whether An Object Is A Window} \description{ Checks whether its argument is a window (object of class \code{"owin"}). } \usage{ is.owin(x) } \arguments{ \item{x}{Any object.} } \details{ This function tests whether the object \code{x} is a window object of class \code{"owin"}. See \code{\link{owin.object}} for details of this class. The result is determined to be \code{TRUE} if \code{x} inherits from \code{"owin"}, i.e. if \code{x} has \code{"owin"} amongst its classes. } \value{ \code{TRUE} if \code{x} is a point pattern, otherwise \code{FALSE}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/tess.Rd0000644000176200001440000001352313442413652014145 0ustar liggesusers\name{tess} \alias{tess} \title{Create a Tessellation} \description{ Creates an object of class \code{"tess"} representing a tessellation of a spatial region. } \usage{ tess(..., xgrid = NULL, ygrid = NULL, tiles = NULL, image = NULL, window=NULL, marks=NULL, keepempty=FALSE, unitname=NULL, check=TRUE) } \arguments{ \item{\dots}{Ignored.} \item{xgrid,ygrid}{Cartesian coordinates of vertical and horizontal lines determining a grid of rectangles. Incompatible with other arguments. } \item{tiles}{List of tiles in the tessellation. A list, each of whose elements is a window (object of class \code{"owin"}). Incompatible with other arguments. } \item{image}{ Pixel image which specifies the tessellation. Incompatible with other arguments. } \item{window}{ Optional. The spatial region which is tessellated (i.e. the union of all the tiles). An object of class \code{"owin"}. } \item{marks}{ Optional vector or data frame of marks associated with the tiles. } \item{keepempty}{ Logical flag indicating whether empty tiles should be retained or deleted. } \item{unitname}{ Optional. Name of unit of length. Either a single character string, or a vector of two character strings giving the singular and plural forms, respectively. If this argument is missing or \code{NULL}, information about the unitname will be extracted from the other arguments. If this argument is given, it overrides any other information about the unitname. } \item{check}{ Logical value indicating whether to check the validity of the input data. It is strongly recommended to use the default value \code{check=TRUE}. } } \details{ A tessellation is a collection of disjoint spatial regions (called \emph{tiles}) that fit together to form a larger spatial region. This command creates an object of class \code{"tess"} that represents a tessellation. Three types of tessellation are supported: \describe{ \item{rectangular:}{ tiles are rectangles, with sides parallel to the \code{x} and \code{y} axes. They may or may not have equal size and shape. The arguments \code{xgrid} and \code{ygrid} determine the positions of the vertical and horizontal grid lines, respectively. (See \code{\link{quadrats}} for another way to do this.) } \item{tile list:}{ tiles are arbitrary spatial regions. The argument \code{tiles} is a list of these tiles, which are objects of class \code{"owin"}. } \item{pixel image:}{ Tiles are subsets of a fine grid of pixels. The argument \code{image} is a pixel image (object of class \code{"im"}) with factor values. Each level of the factor represents a different tile of the tessellation. The pixels that have a particular value of the factor constitute a tile. } } The optional argument \code{window} specifies the spatial region formed by the union of all the tiles. In other words it specifies the spatial region that is divided into tiles by the tessellation. If this argument is missing or \code{NULL}, it will be determined by computing the set union of all the tiles. This is a time-consuming computation. For efficiency it is advisable to specify the window. Note that the validity of the window will not be checked. Empty tiles may occur, either because one of the entries in the list \code{tiles} is an empty window, or because one of the levels of the factor-valued pixel image \code{image} does not occur in the pixel data. When \code{keepempty=TRUE}, empty tiles are permitted. When \code{keepempty=FALSE} (the default), tiles are not allowed to be empty, and any empty tiles will be removed from the tessellation. There are methods for \code{print}, \code{plot}, \code{[} and \code{[<-} for tessellations. Use \code{\link{tiles}} to extract the list of tiles in a tessellation, \code{\link{tilenames}} to extract the names of the tiles, and \code{\link{tile.areas}} to compute their areas. The tiles may have marks, which can be extracted by \code{\link{marks.tess}} and changed by \code{\link{marks<-.tess}}. Tessellations can be used to classify the points of a point pattern, in \code{\link{split.ppp}}, \code{\link{cut.ppp}} and \code{\link{by.ppp}}. To construct particular tessellations, see \code{\link{quadrats}}, \code{\link{hextess}}, \code{\link{dirichlet}}, \code{\link{delaunay}}, \code{\link{venn.tess}}, \code{\link{polartess}}, \code{\link{quantess}} and \code{\link{rpoislinetess}}. } \value{ An object of class \code{"tess"} representing the tessellation. } \seealso{ \code{\link{marks.tess}}, \code{\link{plot.tess}}, \code{\link{[.tess}}, \code{\link{as.tess}}, \code{\link{tiles}}, \code{\link{intersect.tess}}, \code{\link{split.ppp}}, \code{\link{cut.ppp}}, \code{\link{by.ppp}}, \code{\link{bdist.tiles}}, \code{\link{tile.areas}}, \code{\link{as.function.tess}}. To construct particular tessellations, see \code{\link{quadrats}}, \code{\link{hextess}}, \code{\link{venn.tess}}, \code{\link{polartess}}, \code{\link{dirichlet}}, \code{\link{delaunay}}, \code{\link{quantess}} and \code{\link{rpoislinetess}}. To divide space into pieces containing equal amounts of stuff, use \code{\link{quantess}}. To convert a tessellation to a function, for use as a spatial covariate (associating a numerical value with each tile of the tessellation) use \code{\link{as.function.tess}}. } \examples{ A <- tess(xgrid=0:4,ygrid=0:4) A plot(A) B <- A[c(1, 2, 5, 7, 9)] B v <- as.im(function(x,y){factor(round(5 * (x^2 + y^2)))}, W=owin()) levels(v) <- letters[seq(length(levels(v)))] E <- tess(image=v) plot(E) G <- tess(image=v, marks=toupper(levels(v)), unitname="km") G } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{datagen} spatstat/man/collapse.fv.Rd0000644000176200001440000000557213333543263015411 0ustar liggesusers\name{collapse.fv} \alias{collapse.fv} \alias{collapse.anylist} \title{ Collapse Several Function Tables into One } \description{ Combines several function tables (objects of class \code{"fv"}) into a single function table, merging columns that are identical and relabelling columns that are different. } \usage{ \method{collapse}{fv}(object, \dots, same = NULL, different = NULL) \method{collapse}{anylist}(object, \dots, same = NULL, different = NULL) } \arguments{ \item{object}{ An object of class \code{"fv"}, or a list of such objects. } \item{\dots}{ Additional objects of class \code{"fv"}. } \item{same}{ Character string or character vector specifying a column or columns, present in each \code{"fv"} object, that are identical in each object. This column or columns will be included only once. } \item{different}{ Character string or character vector specifying a column or columns, present in each \code{"fv"} object, that contain different values in each object. Each of these columns of data will be included, with labels that distinguish them from each other. } } \details{ This is a method for the generic function \code{\link[nlme]{collapse}}. It combines the data in several function tables (objects of class \code{"fv"}, see \code{\link{fv.object}}) to make a single function table. It is essentially a smart wrapper for \code{\link{cbind.fv}}. A typical application is to calculate the same summary statistic (such as the \eqn{K} function) for different point patterns, and then to use \code{collapse.fv} to combine the results into a single object that can easily be plotted. See the Examples. The arguments \code{object} and \code{\dots} should be function tables (objects of class \code{"fv"}, see \code{\link{fv.object}}) that are compatible in the sense that they have the same values of the function argument. The argument \code{same} identifies any columns that are present in each function table, and which are known to contain exactly the same values in each table. This column or columns will be included only once in the result. The argument \code{different} identifies any columns that are present in each function table, and which contain different numerical values in each table. Each of these columns will be included, with labels to distinguish them. Columns that are not named in \code{same} or \code{different} will not be included. } \value{ Object of class \code{"fv"}. } \seealso{ \code{\link{fv.object}}, \code{\link{cbind.fv}} } \examples{ # generate simulated data X <- replicate(3, rpoispp(100), simplify=FALSE) names(X) <- paste("Simulation", 1:3) # compute K function estimates Klist <- anylapply(X, Kest) # collapse K <- collapse(Klist, same="theo", different="iso") K } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/integral.msr.Rd0000644000176200001440000000376713431414266015605 0ustar liggesusers\name{integral.msr} \alias{integral.msr} \title{ Integral of a Measure } \description{ Computes the integral (total value) of a measure over its domain. } \usage{ \method{integral}{msr}(f, domain=NULL, \dots) } \arguments{ \item{f}{ A signed measure or vector-valued measure (object of class \code{"msr"}). } \item{domain}{ Optional window specifying the domain of integration. Alternatively a tessellation. } \item{\dots}{ Ignored. } } \details{ The integral (total value of the measure over its domain) is calculated. If \code{domain} is a window (class \code{"owin"}) then the integration will be restricted to this window. If \code{domain} is a tessellation (class \code{"tess"}) then the integral of \code{f} in each tile of \code{domain} will be computed. For a multitype measure \code{m}, use \code{\link{split.msr}} to separate the contributions for each type of point, as shown in the Examples. } \value{ A numeric value, vector, or matrix. \code{integral(f)} returns a numeric value (for a signed measure) or a vector of values (for a vector-valued measure). If \code{domain} is a tessellation then \code{integral(f, domain)} returns a numeric vector with one entry for each tile (if \code{f} is a signed measure) or a numeric matrix with one row for each tile (if \code{f} is a vector-valued measure). } \seealso{ \code{\link{msr}}, \code{\link{integral}} } \examples{ fit <- ppm(cells ~ x) rr <- residuals(fit) integral(rr) # vector-valued measure rs <- residuals(fit, type="score") integral(rs) # multitype fitA <- ppm(amacrine ~ x) rrA <- residuals(fitA) sapply(split(rrA), integral) # multitype and vector-valued rsA <- residuals(fitA, type="score") sapply(split(rsA), integral) ## integral over a subregion integral(rr, domain=square(0.5)) ## integrals over the tiles of a tessellation integral(rr, domain=quadrats(cells, 2)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat/man/Triplets.Rd0000644000176200001440000000632413333543262014776 0ustar liggesusers\name{Triplets} \alias{Triplets} \title{The Triplet Point Process Model} \description{ Creates an instance of Geyer's triplet interaction point process model which can then be fitted to point pattern data. } \usage{ Triplets(r) } \arguments{ \item{r}{The interaction radius of the Triplets process} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Triplets process with interaction radius \eqn{r}. } \details{ The (stationary) Geyer triplet process (Geyer, 1999) with interaction radius \eqn{r} and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma} is the point process in which each point contributes a factor \eqn{\beta}{beta} to the probability density of the point pattern, and each triplet of close points contributes a factor \eqn{\gamma}{gamma} to the density. A triplet of close points is a group of 3 points, each pair of which is closer than \eqn{r} units apart. Thus the probability density is \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{s(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) gamma^s(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{s(x)} is the number of unordered triples of points that are closer than \eqn{r} units apart, and \eqn{\alpha}{alpha} is the normalising constant. The interaction parameter \eqn{\gamma}{gamma} must be less than or equal to \eqn{1} so that this model describes an ``ordered'' or ``inhibitive'' pattern. The nonstationary Triplets process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Triplets process pairwise interaction is yielded by the function \code{Triplets()}. See the examples below. Note the only argument is the interaction radius \code{r}. When \code{r} is fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(beta)} and \eqn{\log(\gamma)}{log(gamma)} are estimated by \code{\link{ppm}()}, not fixed in \code{Triplets()}. } \seealso{ \code{\link{ppm}}, \code{\link{triplet.family}}, \code{\link{ppm.object}} } \references{ Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \examples{ Triplets(r=0.1) # prints a sensible description of itself \dontrun{ ppm(cells, ~1, Triplets(r=0.2)) # fit the stationary Triplets process to `cells' } ppm(cells, ~polynom(x,y,3), Triplets(r=0.2)) # fit a nonstationary Triplets process with log-cubic polynomial trend } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/simulate.lppm.Rd0000644000176200001440000000403313333543264015757 0ustar liggesusers\name{simulate.lppm} \alias{simulate.lppm} \title{Simulate a Fitted Point Process Model on a Linear Network} \description{ Generates simulated realisations from a fitted Poisson point process model on a linear network. } \usage{ \method{simulate}{lppm}(object, nsim=1, ..., new.coef=NULL, progress=(nsim > 1), drop=FALSE) } \arguments{ \item{object}{ Fitted point process model on a linear network. An object of class \code{"lppm"}. } \item{nsim}{ Number of simulated realisations. } \item{progress}{ Logical flag indicating whether to print progress reports for the sequence of simulations. } \item{new.coef}{ New values for the canonical parameters of the model. A numeric vector of the same length as \code{coef(object)}. } \item{\dots}{ Arguments passed to \code{\link{predict.lppm}} to determine the spatial resolution of the image of the fitted intensity used in the simulation. } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE}, the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"lppm"} of fitted point process models on a linear network. Only Poisson process models are supported so far. Simulations are performed by \code{\link{rpoislpp}}. } \value{ A list of length \code{nsim} containing simulated point patterns (objects of class \code{"lpp"}) on the same linear network as the original data used to fit the model. The result also belongs to the class \code{"solist"}, so that it can be plotted, and the class \code{"timed"}, so that the total computation time is recorded. } \examples{ fit <- lppm(unmark(chicago) ~ y) simulate(fit)[[1]] } \seealso{ \code{\link{lppm}}, \code{\link{rpoislpp}}, \code{\link[stats]{simulate}} } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/hybrid.family.Rd0000644000176200001440000000167113333543263015732 0ustar liggesusers\name{hybrid.family} \alias{hybrid.family} \title{ Hybrid Interaction Family } \description{ An object describing the family of all hybrid interactions. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the family of all hybrid point process models. If you need to create a specific hybrid interaction model for use in modelling, use the function \code{\link{Hybrid}}. Anyway, \code{hybrid.family} is an object of class \code{"isf"} containing a function \code{hybrid.family$eval} for evaluating the sufficient statistics of any hybrid interaction point process model. } \seealso{ Use \code{\link{Hybrid}} to make hybrid interactions. Other families: \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{ord.family}}, \code{\link{inforder.family}}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/update.ppm.Rd0000644000176200001440000001431513333543264015246 0ustar liggesusers\name{update.ppm} \alias{update.ppm} \title{Update a Fitted Point Process Model} \description{ \code{update} method for class \code{"ppm"}. } \usage{ \method{update}{ppm}(object, \dots, fixdummy=TRUE, use.internal=NULL, envir=environment(terms(object))) } \arguments{ \item{object}{ An existing fitted point process model, typically produced by \code{\link{ppm}}. } \item{\dots}{ Arguments to be updated in the new call to \code{\link{ppm}}. } \item{fixdummy}{ Logical flag indicating whether the quadrature scheme for the call to \code{\link{ppm}} should use the same set of dummy points as that in the original call. } \item{use.internal}{ Optional. Logical flag indicating whether the model should be refitted using the internally saved data (\code{use.internal=TRUE}) or by re-evaluating these data in the current frame (\code{use.internal=FALSE}). } \item{envir}{ Environment in which to re-evaluate the call to \code{\link{ppm}}. } } \details{ This is a method for the generic function \code{\link{update}} for the class \code{"ppm"}. An object of class \code{"ppm"} describes a fitted point process model. See \code{\link{ppm.object}}) for details of this class. \code{update.ppm} will modify the point process model specified by \code{object} according to the new arguments given, then re-fit it. The actual re-fitting is performed by the model-fitting function \code{\link{ppm}}. If you are comparing several model fits to the same data, or fits of the same model to different data, it is strongly advisable to use \code{update.ppm} rather than trying to fit them by hand. This is because \code{update.ppm} re-fits the model in a way which is comparable to the original fit. The arguments \code{...} are matched to the formal arguments of \code{\link{ppm}} as follows. First, all the \emph{named} arguments in \code{...} are matched with the formal arguments of \code{\link{ppm}}. Use \code{name=NULL} to remove the argument \code{name} from the call. Second, any \emph{unnamed} arguments in \code{...} are matched with formal arguments of \code{\link{ppm}} if the matching is obvious from the class of the object. Thus \code{...} may contain \itemize{ \item exactly one argument of class \code{"ppp"} or \code{"quad"}, which will be interpreted as the named argument \code{Q}; \item exactly one argument of class \code{"formula"}, which will be interpreted as the named argument \code{trend} (or as specifying a change to the trend formula); \item exactly one argument of class \code{"interact"}, which will be interpreted as the named argument \code{interaction}; \item exactly one argument of class \code{"data.frame"}, which will be interpreted as the named argument \code{covariates}. } The \code{trend} argument can be a formula that specifies a \emph{change} to the current trend formula. For example, the formula \code{~ . + Z} specifies that the additional covariate \code{Z} will be added to the right hand side of the trend formula in the existing \code{object}. The argument \code{fixdummy=TRUE} ensures comparability of the objects before and after updating. When \code{fixdummy=FALSE}, calling \code{update.ppm} is exactly the same as calling \code{ppm} with the updated arguments. However, the original and updated models are not strictly comparable (for example, their pseudolikelihoods are not strictly comparable) unless they used the same set of dummy points for the quadrature scheme. Setting \code{fixdummy=TRUE} ensures that the re-fitting will be performed using the same set of dummy points. This is highly recommended. The value of \code{use.internal} determines where to find data to re-evaluate the model (data for the arguments mentioned in the original call to \code{ppm} that are not overwritten by arguments to \code{update.ppm}). If \code{use.internal=FALSE}, then arguments to \code{ppm} are \emph{re-evaluated} in the frame where you call \code{update.ppm}. This is like the behaviour of the other methods for \code{\link{update}}. This means that if you have changed any of the objects referred to in the call, these changes will be taken into account. Also if the original call to \code{ppm} included any calls to random number generators, these calls will be recomputed, so that you will get a different outcome of the random numbers. If \code{use.internal=TRUE}, then arguments to \code{ppm} are extracted from internal data stored inside the current fitted model \code{object}. This is useful if you don't want to re-evaluate anything. It is also necessary if if \code{object} has been restored from a dump file using \code{\link{load}} or \code{\link{source}}. In such cases, we have lost the environment in which \code{object} was fitted, and data cannot be re-evaluated. By default, if \code{use.internal} is missing, \code{update.ppm} will re-evaluate the arguments if this is possible, and use internal data if not. } \value{ Another fitted point process model (object of class \code{"ppm"}). } \examples{ data(nztrees) data(cells) # fit the stationary Poisson process fit <- ppm(nztrees, ~ 1) # fit a nonstationary Poisson process fitP <- update(fit, trend=~x) fitP <- update(fit, ~x) # change the trend formula: add another term to the trend fitPxy <- update(fitP, ~ . + y) # change the trend formula: remove the x variable fitPy <- update(fitPxy, ~ . - x) # fit a stationary Strauss process fitS <- update(fit, interaction=Strauss(13)) fitS <- update(fit, Strauss(13)) # refit using a different edge correction fitS <- update(fitS, correction="isotropic") # re-fit the model to a subset # of the original point pattern nzw <- owin(c(0,148),c(0,95)) nzsub <- nztrees[,nzw] fut <- update(fitS, Q=nzsub) fut <- update(fitS, nzsub) # WARNING: the point pattern argument is called 'Q' ranfit <- ppm(rpoispp(42), ~1, Poisson()) ranfit # different random data! update(ranfit) # the original data update(ranfit, use.internal=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/relevel.im.Rd0000644000176200001440000000256013333543264015232 0ustar liggesusers\name{relevel.im} \alias{relevel.im} \alias{relevel.ppp} \alias{relevel.ppx} \title{ Reorder Levels of a Factor-Valued Image or Pattern } \description{ For a pixel image with factor values, or a point pattern with factor-valued marks, the levels of the factor are re-ordered so that the level \code{ref} is first and the others are moved down. } \usage{ \method{relevel}{im}(x, ref, \dots) \method{relevel}{ppp}(x, ref, \dots) \method{relevel}{ppx}(x, ref, \dots) } \arguments{ \item{x}{ A pixel image (object of class \code{"im"}) with factor values, or a point pattern (object of class \code{"ppp"}, \code{"ppx"}, \code{"lpp"} or \code{"pp3"}) with factor-valued marks. } \item{ref}{ The reference level. } \item{\dots}{ Ignored. } } \details{ These functions are methods for the generic \code{\link[stats]{relevel}}. If \code{x} is a pixel image (object of class \code{"im"}) with factor values, or a point pattern (object of class \code{"ppp"}, \code{"ppx"}, \code{"lpp"} or \code{"pp3"}) with factor-valued marks, the levels of the factor are changed so that the level specified by \code{ref} comes first. } \value{ Object of the same kind as \code{x}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{mergeLevels}} } \examples{ amacrine relevel(amacrine, "on") } \keyword{manip} \keyword{spatial} spatstat/man/crossdist.pp3.Rd0000644000176200001440000000406513333543263015707 0ustar liggesusers\name{crossdist.pp3} \alias{crossdist.pp3} \title{Pairwise distances between two different three-dimensional point patterns} \description{ Computes the distances between pairs of points taken from two different three-dimensional point patterns. } \usage{ \method{crossdist}{pp3}(X, Y, \dots, periodic=FALSE, squared=FALSE) } \arguments{ \item{X,Y}{ Point patterns in three dimensions (objects of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{periodic}{ Logical. Specifies whether to apply a periodic edge correction. } \item{squared}{ Logical. If \code{squared=TRUE}, the squared distances are returned instead (this computation is faster). } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th point in \code{X} to the \code{j}-th point in \code{Y}. } \details{ Given two point patterns in three-dimensional space, this function computes the Euclidean distance from each point in the first pattern to each point in the second pattern, and returns a matrix containing these distances. This is a method for the generic function \code{\link{crossdist}} for three-dimensional point patterns (objects of class \code{"pp3"}). This function expects two point patterns \code{X} and \code{Y}, and returns the matrix whose \code{[i,j]} entry is the distance from \code{X[i]} to \code{Y[j]}. Alternatively if \code{periodic=TRUE}, then provided the windows containing \code{X} and \code{Y} are identical and are rectangular, then the distances will be computed in the `periodic' sense (also known as `torus' distance): opposite edges of the rectangle are regarded as equivalent. This is meaningless if the window is not a rectangle. } \seealso{ \code{\link{crossdist}}, \code{\link{pairdist}}, \code{\link{nndist}}, \code{\link{G3est}} } \examples{ X <- runifpoint3(20) Y <- runifpoint3(30) d <- crossdist(X, Y) d <- crossdist(X, Y, periodic=TRUE) } \author{ \adrian based on code for two dimensions by Pavel Grabarnik. } \keyword{spatial} \keyword{math} spatstat/man/scanLRTS.Rd0000644000176200001440000001167213333543264014625 0ustar liggesusers\name{scanLRTS} \alias{scanLRTS} \title{ Likelihood Ratio Test Statistic for Scan Test } \description{ Calculate the Likelihood Ratio Test Statistic for the Scan Test, at each spatial location. } \usage{ scanLRTS(X, r, \dots, method = c("poisson", "binomial"), baseline = NULL, case = 2, alternative = c("greater", "less", "two.sided"), saveopt = FALSE, Xmask = NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{r}{ Radius of circle to use. A single number or a numeric vector. } \item{\dots}{ Optional. Arguments passed to \code{\link{as.mask}} to determine the spatial resolution of the computations. } \item{method}{ Either \code{"poisson"} or \code{"binomial"} specifying the type of likelihood. } \item{baseline}{ Baseline for the Poisson intensity, if \code{method="poisson"}. A pixel image or a function. } \item{case}{ Which type of point should be interpreted as a case, if \code{method="binomial"}. Integer or character string. } \item{alternative}{ Alternative hypothesis: \code{"greater"} if the alternative postulates that the mean number of points inside the circle will be greater than expected under the null. } \item{saveopt}{ Logical value indicating to save the optimal value of \code{r} at each location. } \item{Xmask}{ Internal use only. } } \details{ This command computes, for all spatial locations \code{u}, the Likelihood Ratio Test Statistic \eqn{\Lambda(u)}{Lambda(u)} for a test of homogeneity at the location \eqn{u}, as described below. The result is a pixel image giving the values of \eqn{\Lambda(u)}{Lambda(u)} at each pixel. The \bold{maximum} value of \eqn{\Lambda(u)}{Lambda(u)} over all locations \eqn{u} is the \emph{scan statistic}, which is the basis of the \emph{scan test} performed by \code{\link{scan.test}}. \itemize{ \item If \code{method="poisson"} then the test statistic is based on Poisson likelihood. The dataset \code{X} is treated as an unmarked point pattern. By default (if \code{baseline} is not specified) the null hypothesis is complete spatial randomness CSR (i.e. a uniform Poisson process). At the spatial location \eqn{u}, the alternative hypothesis is a Poisson process with one intensity \eqn{\beta_1}{beta1} inside the circle of radius \code{r} centred at \eqn{u}, and another intensity \eqn{\beta_0}{beta0} outside the circle. If \code{baseline} is given, then it should be a pixel image or a \code{function(x,y)}. The null hypothesis is an inhomogeneous Poisson process with intensity proportional to \code{baseline}. The alternative hypothesis is an inhomogeneous Poisson process with intensity \code{beta1 * baseline} inside the circle, and \code{beta0 * baseline} outside the circle. \item If \code{method="binomial"} then the test statistic is based on binomial likelihood. The dataset \code{X} must be a bivariate point pattern, i.e. a multitype point pattern with two types. The null hypothesis is that all permutations of the type labels are equally likely. The alternative hypothesis is that the circle of radius \code{r} centred at \eqn{u} has a higher proportion of points of the second type, than expected under the null hypothesis. } If \code{r} is a vector of more than one value for the radius, then the calculations described above are performed for every value of \code{r}. Then the maximum over \code{r} is taken for each spatial location \eqn{u}. The resulting pixel value of \code{scanLRTS} at a location \eqn{u} is the profile maximum of the Likelihood Ratio Test Statistic, that is, the maximum of the Likelihood Ratio Test Statistic for circles of all radii, centred at the same location \eqn{u}. If you have already performed a scan test using \code{\link{scan.test}}, the Likelihood Ratio Test Statistic can be extracted from the test result using the function \code{\link{as.im.scan.test}}. } \section{Warning: window size}{ Note that the result of \code{scanLRTS} is a pixel image on a larger window than the original window of \code{X}. The expanded window contains the centre of any circle of radius \code{r} that has nonempty intersection with the original window. } \value{ A pixel image (object of class \code{"im"}) whose pixel values are the values of the (profile) Likelihood Ratio Test Statistic at each spatial location. } \references{ Kulldorff, M. (1997) A spatial scan statistic. \emph{Communications in Statistics --- Theory and Methods} \bold{26}, 1481--1496. } \author{\adrian and \rolf } \seealso{ \code{\link{scan.test}}, \code{\link{as.im.scan.test}} } \examples{ plot(scanLRTS(redwood, 0.1, method="poisson")) sc <- scanLRTS(chorley, 1, method="binomial", case="larynx") plot(sc) scanstatchorley <- max(sc) } \keyword{htest} \keyword{spatial} spatstat/man/edge.Trans.Rd0000644000176200001440000001124513333543263015161 0ustar liggesusers\name{edge.Trans} \alias{edge.Trans} \alias{rmax.Trans} \title{ Translation Edge Correction } \description{ Computes Ohser and Stoyan's translation edge correction weights for a point pattern. } \usage{ edge.Trans(X, Y = X, W = Window(X), exact = FALSE, paired = FALSE, ..., trim = spatstat.options("maxedgewt"), dx=NULL, dy=NULL, give.rmax=FALSE, gW=NULL) rmax.Trans(W, g=setcov(W)) } \arguments{ \item{X,Y}{ Point patterns (objects of class \code{"ppp"}). } \item{W}{ Window for which the edge correction is required. } \item{exact}{ Logical. If \code{TRUE}, a slow algorithm will be used to compute the exact value. If \code{FALSE}, a fast algorithm will be used to compute the approximate value. } \item{paired}{ Logical value indicating whether \code{X} and \code{Y} are paired. If \code{TRUE}, compute the edge correction for corresponding points \code{X[i], Y[i]} for all \code{i}. If \code{FALSE}, compute the edge correction for each possible pair of points \code{X[i], Y[j]} for all \code{i} and \code{j}. } \item{\dots}{Ignored.} \item{trim}{ Maximum permitted value of the edge correction weight. } \item{dx,dy}{ Alternative data giving the \eqn{x} and \eqn{y} coordinates of the vector differences between the points. Incompatible with \code{X} and \code{Y}. See Details. } \item{give.rmax}{ Logical. If \code{TRUE}, also compute the value of \code{rmax.Trans(W)} and return it as an attribute of the result. } \item{g, gW}{ Optional. Set covariance of \code{W}, if it has already been computed. Not required if \code{W} is a rectangle. } } \details{ The function \code{edge.Trans} computes Ohser and Stoyan's translation edge correction weight, which is used in estimating the \eqn{K} function and in many other contexts. The function \code{rmax.Trans} computes the maximum value of distance \eqn{r} for which the translation edge correction estimate of \eqn{K(r)} is valid. For a pair of points \eqn{x} and \eqn{y} in a window \eqn{W}, the translation edge correction weight is \deqn{ e(u, r) = \frac{\mbox{area}(W)}{\mbox{area}(W \cap (W + y - x))} }{ e(u, r) = area(W) / area(intersect(W, W + y - x)) } where \eqn{W + y - x} is the result of shifting the window \eqn{W} by the vector \eqn{y - x}. The denominator is the area of the overlap between this shifted window and the original window. The function \code{edge.Trans} computes this edge correction weight. If \code{paired=TRUE}, then \code{X} and \code{Y} should contain the same number of points. The result is a vector containing the edge correction weights \code{e(X[i], Y[i])} for each \code{i}. If \code{paired=FALSE}, then the result is a matrix whose \code{i,j} entry gives the edge correction weight \code{e(X[i], Y[j])}. Computation is exact if the window is a rectangle. Otherwise, \itemize{ \item if \code{exact=TRUE}, the edge correction weights are computed exactly using \code{\link{overlap.owin}}, which can be quite slow. \item if \code{exact=FALSE} (the default), the weights are computed rapidly by evaluating the set covariance function \code{\link{setcov}} using the Fast Fourier Transform. } If any value of the edge correction weight exceeds \code{trim}, it is set to \code{trim}. The arguments \code{dx} and \code{dy} can be provided as an alternative to \code{X} and \code{Y}. If \code{paired=TRUE} then \code{dx,dy} should be vectors of equal length such that the vector difference of the \eqn{i}th pair is \code{c(dx[i], dy[i])}. If \code{paired=FALSE} then \code{dx,dy} should be matrices of the same dimensions, such that the vector difference between \code{X[i]} and \code{Y[j]} is \code{c(dx[i,j], dy[i,j])}. The argument \code{W} is needed. The value of \code{rmax.Trans} is the shortest distance from the origin \eqn{(0,0)} to the boundary of the support of the set covariance function of \code{W}. It is computed by pixel approximation using \code{\link{setcov}}, unless \code{W} is a rectangle, when \code{rmax.Trans(W)} is the length of the shortest side of the rectangle. } \value{ Numeric vector or matrix. } \references{ Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. } \seealso{ \code{\link{rmax.Trans}}, \code{\link{edge.Ripley}}, \code{\link{setcov}}, \code{\link{Kest}} } \examples{ v <- edge.Trans(cells) rmax.Trans(Window(cells)) } \author{\adrian and \rolf. } \keyword{spatial} \keyword{nonparametric} spatstat/man/clarkevans.Rd0000644000176200001440000001061513333543263015320 0ustar liggesusers\name{clarkevans} \alias{clarkevans} \title{Clark and Evans Aggregation Index} \description{ Computes the Clark and Evans aggregation index \eqn{R} for a spatial point pattern. } \usage{ clarkevans(X, correction=c("none", "Donnelly", "cdf"), clipregion=NULL) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}). } \item{correction}{ Character vector. The type of edge correction(s) to be applied. } \item{clipregion}{ Clipping region for the guard area correction. A window (object of class \code{"owin"}). See Details. } } \details{ The Clark and Evans (1954) aggregation index \eqn{R} is a crude measure of clustering or ordering of a point pattern. It is the ratio of the observed mean nearest neighbour distance in the pattern to that expected for a Poisson point process of the same intensity. A value \eqn{R>1} suggests ordering, while \eqn{R<1} suggests clustering. Without correction for edge effects, the value of \code{R} will be positively biased. Edge effects arise because, for a point of \code{X} close to the edge of the window, the true nearest neighbour may actually lie outside the window. Hence observed nearest neighbour distances tend to be larger than the true nearest neighbour distances. The argument \code{correction} specifies an edge correction or several edge corrections to be applied. It is a character vector containing one or more of the options \code{"none"}, \code{"Donnelly"}, \code{"guard"} and \code{"cdf"} (which are recognised by partial matching). These edge corrections are: \describe{ \item{"none":}{ No edge correction is applied. } \item{"Donnelly":}{ Edge correction of Donnelly (1978), available for rectangular windows only. The theoretical expected value of mean nearest neighbour distance under a Poisson process is adjusted for edge effects by the edge correction of Donnelly (1978). The value of \eqn{R} is the ratio of the observed mean nearest neighbour distance to this adjusted theoretical mean. } \item{"guard":}{ Guard region or buffer area method. The observed mean nearest neighbour distance for the point pattern \code{X} is re-defined by averaging only over those points of \code{X} that fall inside the sub-window \code{clipregion}. } \item{"cdf":}{ Cumulative Distribution Function method. The nearest neighbour distance distribution function \eqn{G(r)} of the stationary point process is estimated by \code{\link{Gest}} using the Kaplan-Meier type edge correction. Then the mean of the distribution is calculated from the cdf. } } Alternatively \code{correction="all"} selects all options. If the argument \code{clipregion} is given, then the selected edge corrections will be assumed to include \code{correction="guard"}. To perform a test based on the Clark-Evans index, see \code{\link{clarkevans.test}}. } \value{ A numeric value, or a numeric vector with named components \item{naive}{\eqn{R} without edge correction} \item{Donnelly}{\eqn{R} using Donnelly edge correction} \item{guard}{\eqn{R} using guard region} \item{cdf}{\eqn{R} using cdf method} (as selected by \code{correction}). The value of the \code{Donnelly} component will be \code{NA} if the window of \code{X} is not a rectangle. } \references{ Clark, P.J. and Evans, F.C. (1954) Distance to nearest neighbour as a measure of spatial relationships in populations \emph{Ecology} \bold{35}, 445--453. Donnelly, K. (1978) Simulations to determine the variance and edge-effect of total nearest neighbour distance. In I. Hodder (ed.) \emph{Simulation studies in archaeology}, Cambridge/New York: Cambridge University Press, pp 91--95. } \author{ John Rudge \email{rudge@esc.cam.ac.uk} with modifications by \adrian } \seealso{ \code{\link{clarkevans.test}}, \code{\link{hopskel}}, \code{\link{nndist}}, \code{\link{Gest}} } \examples{ # Example of a clustered pattern clarkevans(redwood) # Example of an ordered pattern clarkevans(cells) # Random pattern X <- rpoispp(100) clarkevans(X) # How to specify a clipping region clip1 <- owin(c(0.1,0.9),c(0.1,0.9)) clip2 <- erosion(Window(cells), 0.1) clarkevans(cells, clipregion=clip1) clarkevans(cells, clipregion=clip2) } \keyword{spatial} \keyword{nonparametric} spatstat/man/nnmap.Rd0000644000176200001440000001125613333543263014302 0ustar liggesusers\name{nnmap} \alias{nnmap} \title{ K-th Nearest Point Map } \description{ Given a point pattern, this function constructs pixel images giving the distance from each pixel to its \eqn{k}-th nearest neighbour in the point pattern, and the index of the \eqn{k}-th nearest neighbour. } \usage{ nnmap(X, k = 1, what = c("dist", "which"), \dots, W = as.owin(X), is.sorted.X = FALSE, sortby = c("range", "var", "x", "y")) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{k}{ Integer, or integer vector. The algorithm will find the \code{k}th nearest neighbour. } \item{what}{ Character string specifying what information should be returned. Either the nearest neighbour distance (\code{"dist"}), the index of the nearest neighbour (\code{"which"}), or both. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution of the result. } \item{W}{ Window (object of class \code{"owin"}) specifying the spatial domain in which the distances will be computed. Defaults to the window of \code{X}. } \item{is.sorted.X}{ Logical value attesting whether the point pattern \code{X} has been sorted. See Details. } \item{sortby}{ Determines which coordinate to use to sort the point pattern. See Details. } } \details{ Given a point pattern \code{X}, this function constructs two pixel images: \itemize{ \item a distance map giving, for each pixel, the distance to the nearest point of \code{X}; \item a nearest neighbour map giving, for each pixel, the identifier of the nearest point of \code{X}. } If the argument \code{k} is specified, then the \code{k}-th nearest neighbours will be found. If \code{what="dist"} then only the distance map is returned. If \code{what="which"} then only the nearest neighbour map is returned. The argument \code{k} may be an integer or an integer vector. If it is a single integer, then the \code{k}-th nearest neighbours are computed. If it is a vector, then the \code{k[i]}-th nearest neighbours are computed for each entry \code{k[i]}. For example, setting \code{k=1:3} will compute the nearest, second-nearest and third-nearest neighbours. } \section{Sorting data and pre-sorted data}{ Read this section if you care about the speed of computation. For efficiency, the algorithm sorts the point pattern \code{X} into increasing order of the \eqn{x} coordinate or increasing order of the the \eqn{y} coordinate. Sorting is only an intermediate step; it does not affect the output, which is always given in the same order as the original data. By default (if \code{sortby="range"}), the sorting will occur on the coordinate that has the larger range of values (according to the frame of the enclosing window of \code{X}). If \code{sortby = "var"}), sorting will occur on the coordinate that has the greater variance (in the pattern \code{X}). Setting \code{sortby="x"} or \code{sortby = "y"} will specify that sorting should occur on the \eqn{x} or \eqn{y} coordinate, respectively. If the point pattern \code{X} is already sorted, then the argument \code{is.sorted.X} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"} or \code{"y"} to indicate which coordinate is sorted. } \section{Warning About Ties}{ Ties are possible: there may be two data points which lie exactly the same distance away from a particular pixel. This affects the results from \code{nnmap(what="which")}. The handling of ties is not well-defined: it is not consistent between different computers and different installations of \R. If there are ties, then different calls to \code{nnmap(what="which")} may give inconsistent results. For example, you may get a different answer from \code{nnmap(what="which",k=1)} and \code{nnmap(what="which", k=1:2)[[1]]}. } \value{ A pixel image, or a list of pixel images. By default (if \code{what=c("dist", "which")}), the result is a list with two components \code{dist} and \code{which} containing the distance map and the nearest neighbour map. If \code{what="dist"} then the result is a real-valued pixel image containing the distance map. If \code{what="which"} then the result is an integer-valued pixel image containing the nearest neighbour map. If \code{k} is a vector of several integers, then the result is similar except that each pixel image is replaced by a list of pixel images, one for each entry of \code{k}. } \seealso{ \code{\link{distmap}} } \examples{ plot(nnmap(cells, 2, what="which")) } \author{ \adrian , \rolf , and Jens Oehlschlaegel } \keyword{spatial} \keyword{math} spatstat/man/Extract.lpp.Rd0000644000176200001440000000655013333543263015376 0ustar liggesusers\name{Extract.lpp} \alias{[.lpp} \title{Extract Subset of Point Pattern on Linear Network} \description{ Extract a subset of a point pattern on a linear network. } \usage{ \method{[}{lpp}(x, i, j, drop=FALSE, \dots, snip=TRUE) } \arguments{ \item{x}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{i}{ Subset index. A valid subset index in the usual \R sense, indicating which points should be retained. } \item{j}{ Spatial window (object of class \code{"owin"}) delineating the region that should be retained. } \item{drop}{ Logical value indicating whether to remove unused levels of the marks, if the marks are a factor. } \item{snip}{ Logical. If \code{TRUE} (the default), segments of the network which cross the boundary of the window \code{j} will be cut by the boundary. If \code{FALSE}, these segments will be deleted. } \item{\dots}{ Ignored. } } \value{ A point pattern on a linear network (of class \code{"lpp"}). } \details{ This function extracts a designated subset of a point pattern on a linear network. The function \code{[.lpp} is a method for \code{\link{[}} for the class \code{"lpp"}. It extracts a designated subset of a point pattern. The argument \code{i} should be a subset index in the usual \R sense: either a numeric vector of positive indices (identifying the points to be retained), a numeric vector of negative indices (identifying the points to be deleted) or a logical vector of length equal to the number of points in the point pattern \code{x}. In the latter case, the points \code{(x$x[i], x$y[i])} for which \code{subset[i]=TRUE} will be retained, and the others will be deleted. The argument \code{j}, if present, should be a spatial window. The pattern inside the region will be retained. \emph{Line segments that cross the boundary of the window are deleted} in the current implementation. The argument \code{drop} determines whether to remove unused levels of a factor, if the point pattern is multitype (i.e. the marks are a factor) or if the marks are a data frame or hyperframe in which some of the columns are factors. The argument \code{snip} specifies what to do with segments of the network which cross the boundary of the window \code{j}. If \code{snip=FALSE}, such segments are simply deleted. If \code{snip=TRUE} (the default), such segments are cut into pieces by the boundary of \code{j}, and those pieces which lie inside the window \code{ji} are included in the resulting network. Use \code{\link{unmark}} to remove all the marks in a marked point pattern, and \code{\link{subset.lpp}} to remove only some columns of marks. } \seealso{ \code{\link{lpp}}, \code{\link{subset.lpp}} } \examples{ # Chicago crimes data - remove cases of assault chicago[marks(chicago) != "assault"] # equivalent to subset(chicago, select=-assault) # spatial window subset B <- owin(c(350, 700), c(600, 1000)) plot(chicago) plot(B, add=TRUE, lty=2, border="red", lwd=3) op <- par(mfrow=c(1,2), mar=0.6+c(0,0,1,0)) plot(B, main="chicago[B, snip=FALSE]", lty=3, border="red") plot(chicago[, B, snip=FALSE], add=TRUE) plot(B, main="chicago[B, snip=TRUE]", lty=3, border="red") plot(chicago[, B, snip=TRUE], add=TRUE) par(op) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/bdist.pixels.Rd0000644000176200001440000000553513333543262015603 0ustar liggesusers\name{bdist.pixels} \alias{bdist.pixels} \title{Distance to Boundary of Window} \description{ Computes the distances from each pixel in a window to the boundary of the window. } \usage{ bdist.pixels(w, \dots, style="image", method=c("C", "interpreted")) } \arguments{ \item{w}{A window (object of class \code{"owin"}).} \item{\dots}{Arguments passed to \code{\link{as.mask}} to determine the pixel resolution.} \item{style}{Character string determining the format of the output: either \code{"matrix"}, \code{"coords"} or \code{"image"}. } \item{method}{Choice of algorithm to use when \code{w} is polygonal.} } \value{ If \code{style="image"}, a pixel image (object of class \code{"im"}) containing the distances from each pixel in the image raster to the boundary of the window. If \code{style="matrix"}, a matrix giving the distances from each pixel in the image raster to the boundary of the window. Rows of this matrix correspond to the \eqn{y} coordinate and columns to the \eqn{x} coordinate. If \code{style="coords"}, a list with three components \code{x,y,z}, where \code{x,y} are vectors of length \eqn{m,n} giving the \eqn{x} and \eqn{y} coordinates respectively, and \code{z} is an \eqn{m \times n}{m x n} matrix such that \code{z[i,j]} is the distance from \code{(x[i],y[j])} to the boundary of the window. Rows of this matrix correspond to the \eqn{x} coordinate and columns to the \eqn{y} coordinate. This result can be plotted with \code{persp}, \code{image} or \code{contour}. } \details{ This function computes, for each pixel \eqn{u} in the window \code{w}, the shortest distance \eqn{d(u, W^c)}{dist(u, W')} from \eqn{u} to the boundary of \eqn{W}. If the window is a binary mask then the distance from each pixel to the boundary is computed using the distance transform algorithm \code{\link{distmap.owin}}. The result is equivalent to \code{distmap(W, invert=TRUE)}. If the window is a rectangle or a polygonal region, the grid of pixels is determined by the arguments \code{"\dots"} passed to \code{\link{as.mask}}. The distance from each pixel to the boundary is calculated exactly, using analytic geometry. This is slower but more accurate than in the case of a binary mask. For software testing purposes, there are two implementations available when \code{w} is a polygon: the default is \code{method="C"} which is much faster than \code{method="interpreted"}. } \seealso{ \code{\link{owin.object}}, \code{\link{erosion}}, \code{\link{bdist.points}}, \code{\link{bdist.tiles}}, \code{\link{distmap.owin}}. } \examples{ u <- owin(c(0,1),c(0,1)) d <- bdist.pixels(u, eps=0.01) image(d) d <- bdist.pixels(u, eps=0.01, style="matrix") mean(d >= 0.1) # value is approx (1 - 2 * 0.1)^2 = 0.64 } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/localKcross.inhom.Rd0000644000176200001440000001374113506541762016566 0ustar liggesusers\name{localKcross.inhom} \alias{localKcross.inhom} \alias{localLcross.inhom} \title{Inhomogeneous Multitype K Function} \description{ Computes spatially-weighted versions of the the local multitype \eqn{K}-function or \eqn{L}-function. } \usage{ localKcross.inhom(X, from, to, lambdaFrom=NULL, lambdaTo=NULL, \dots, rmax = NULL, correction = "Ripley", sigma=NULL, varcov=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) localLcross.inhom(X, from, to, lambdaFrom=NULL, lambdaTo=NULL, \dots, rmax = NULL) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{from}{ Type of points from which distances should be measured. A single value; one of the possible levels of \code{marks(X)}, or an integer indicating which level. } \item{to}{ Type of points to which distances should be measured. A single value; one of the possible levels of \code{marks(X)}, or an integer indicating which level. } \item{lambdaFrom,lambdaTo}{ Optional. Values of the estimated intensity function for the points of type \code{from} and \code{to}, respectively. Each argument should be either a vector giving the intensity values at the required points, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{ Extra arguments. Ignored if \code{lambda} is present. Passed to \code{\link{density.ppp}} if \code{lambda} is omitted. } \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{correction}{ String specifying the edge correction to be applied. Options are \code{"none"}, \code{"translate"}, \code{"Ripley"}, \code{"translation"}, \code{"isotropic"} or \code{"best"}. Only one correction may be specified. } \item{sigma, varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the kernel smoothing procedure for estimating \code{lambdaFrom} and \code{lambdaTo}, if they are missing. } \item{lambdaX}{ Optional. Values of the estimated intensity function for all points of \code{X}. Either a vector giving the intensity values at each point of \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a list of pixel images giving the intensity values at all locations for each type of point, or a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} or \code{function(x,y,m)} which can be evaluated to give the intensity value at any location. } \item{update}{ Logical value indicating what to do when \code{lambdaFrom}, \code{lambdaTo} or \code{lambdaX} is a fitted model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } } \details{ The functions \code{localKcross.inhom} and \code{localLcross.inhom} are inhomogeneous or weighted versions of the local multitype \eqn{K} and \eqn{L} functions implemented in \code{\link{localKcross}} and \code{\link{localLcross}}. Given a multitype spatial point pattern \code{X}, and two designated types \code{from} and \code{to}, the local multitype \eqn{K} function is defined for each point \code{X[i]} that belongs to type \code{from}, and is computed by \deqn{ K_i(r) = \sqrt{\frac 1 \pi \sum_j \frac{e_{ij}}{\lambda_j}} }{ K[i](r) = sqrt( (1/pi) * sum[j] e[i,j]/lambda[j]) } where the sum is over all points \eqn{j \neq i}{j != i} of type \code{to} that lie within a distance \eqn{r} of the \eqn{i}th point, \eqn{\lambda_j}{\lambda[j]} is the estimated intensity of the point pattern at the point \eqn{j}, and \eqn{e_{ij}}{e[i,j]} is an edge correction term (as described in \code{\link{Kest}}). The function \eqn{K_i(r)}{K[i](r)} is computed for a range of \eqn{r} values for each point \eqn{i}. The results are stored as a function value table (object of class \code{"fv"}) with a column of the table containing the function estimates for each point of the pattern \code{X} of type \code{from}. The corresponding \eqn{L} function \eqn{L_i(r)}{L[i](r)} is computed by applying the transformation \eqn{L(r) = \sqrt{K(r)/(2\pi)}}{L(r) = sqrt(K(r)/(2*pi))}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the neighbourhood density function for each point in the pattern of type \code{from}. The last two columns contain the \code{r} and \code{theo} values. } \seealso{ \code{\link{Kinhom}}, \code{\link{Linhom}}, \code{\link{localK}}, \code{\link{localL}}. } \examples{ X <- amacrine # compute all the local L functions L <- localLcross.inhom(X) # plot all the local L functions against r plot(L, main="local L functions for ponderosa", legend=FALSE) # plot only the local L function for point number 7 plot(L, iso007 ~ r) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/textureplot.Rd0000644000176200001440000000666413333543264015600 0ustar liggesusers\name{textureplot} \alias{textureplot} \title{ Plot Image or Tessellation Using Texture Fill } \description{ For a factor-valued pixel image, this command plots each level of the factor using a different texture. For a tessellation, each tile is plotted using a different texture. } \usage{ textureplot(x, \dots, main, add=FALSE, clipwin=NULL, do.plot = TRUE, border=NULL, col = NULL, lwd = NULL, lty = NULL, spacing = NULL, textures=1:8, legend=TRUE, leg.side=c("right", "left", "bottom", "top"), legsep=0.1, legwid=0.2) } \arguments{ \item{x}{ A tessellation (object of class \code{"tess"} or something acceptable to \code{\link{as.tess}}) with at most 8 tiles, or a pixel image (object of class \code{"im"} or something acceptable to \code{\link{as.im}}) whose pixel values are a \code{factor} with at most 8 levels. } \item{\dots}{ Other arguments passed to \code{\link{add.texture}}. } \item{main}{ Character string giving a main title for the plot. } \item{add}{ Logical value indicating whether to draw on the current plot (\code{add=TRUE}) or to initialise a new plot (\code{add=FALSE}). } \item{clipwin}{ Optional. A window (object of class \code{"owin"}). Only this subset of the image will be displayed. } \item{do.plot}{ Logical. Whether to actually do the plot. } \item{border}{ Colour for drawing the boundaries between the different regions. The default (\code{border=NULL}) means to use \code{par("fg")}. Use \code{border=NA} to omit borders. } \item{col}{ Numeric value or vector giving the colour or colours in which the textures should be plotted. } \item{lwd}{ Numeric value or vector giving the line width or widths to be used. } \item{lty}{ Numeric value or vector giving the line type or types to be used. } \item{spacing}{ Numeric value or vector giving the \code{spacing} parameter for the textures. } \item{textures}{ Textures to be used for each level. Either a texture map (object of class \code{"texturemap"}) or a vector of integer codes (to be interpreted by \code{\link{add.texture}}). } \item{legend}{ Logical. Whether to display an explanatory legend. } \item{leg.side}{Position of legend relative to main plot.} \item{legsep}{ Separation between legend and main plot, as a fraction of the shortest side length of the main plot. } \item{legwid}{ Width (if vertical) or height (if horizontal) of the legend as a fraction of the shortest side length of the main plot. } } \details{ If \code{x} is a tessellation, then each tile of the tessellation is plotted and filled with a texture using \link{add.texture}. If \code{x} is a factor-valued pixel image, then for each level of the factor, the algorithm finds the region where the image takes this value, and fills the region with a texture using \code{\link{add.texture}}. } \value{ (Invisible) A texture map (object of class \code{"texturemap"}) associating a texture with each level of the factor. } \author{ \spatstatAuthors. } \seealso{ \code{\link{im}}, \code{\link{plot.im}}, \code{\link{add.texture}}. } \examples{ nd <- if(interactive()) 128 else 32 Z <- setcov(owin(), dimyx=nd) Zcut <- cut(Z, 3, labels=c("Lo", "Med", "Hi")) textureplot(Zcut) textureplot(dirichlet(runifpoint(6))) } \keyword{spatial} \keyword{hplot} spatstat/man/complement.owin.Rd0000644000176200001440000000400313333543263016277 0ustar liggesusers\name{complement.owin} \alias{complement.owin} \title{Take Complement of a Window} \description{ Take the set complement of a window, within its enclosing rectangle or in a larger rectangle. } \usage{ complement.owin(w, frame=as.rectangle(w)) } \arguments{ \item{w}{ an object of class \code{"owin"} describing a window of observation for a point pattern. } \item{frame}{ Optional. The enclosing rectangle, with respect to which the set complement is taken. } } \value{ Another object of class \code{"owin"} representing the complement of the window, i.e. the inside of the window becomes the outside. } \details{ This yields a window object (of class \code{"owin"}, see \code{\link{owin.object}}) representing the set complement of \code{w} with respect to the rectangle \code{frame}. By default, \code{frame} is the enclosing box of \code{w} (originally specified by the arguments \code{xrange} and \code{yrange} given to \code{\link{owin}} when \code{w} was created). If \code{frame} is specified, it must be a rectangle (an object of class \code{"owin"} whose type is \code{"rectangle"}) and it must be larger than the enclosing box of \code{w}. This rectangle becomes the enclosing box for the resulting window. If \code{w} is a rectangle, then \code{frame} must be specified. Otherwise an error will occur (since the complement of \code{w} in itself is empty). For rectangular and polygonal windows, the complement is computed by reversing the sign of each boundary polygon, while for binary masks it is computed by negating the pixel values. } \seealso{ \code{\link{owin}}, \code{\link{owin.object}} } \examples{ # rectangular a <- owin(c(0,1),c(0,1)) b <- owin(c(-1,2),c(-1,2)) bmina <- complement.owin(a, frame=b) # polygonal data(demopat) w <- Window(demopat) outside <- complement.owin(w) # mask w <- as.mask(Window(demopat)) outside <- complement.owin(w) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/quasirandom.Rd0000644000176200001440000000621013333543264015507 0ustar liggesusers\name{quasirandom} \alias{quasirandom} %DoNotExport \alias{vdCorput} \alias{Halton} \alias{Hammersley} \title{ Quasirandom Patterns } \description{ Generates quasirandom sequences of numbers and quasirandom spatial patterns of points in any dimension. } \usage{ vdCorput(n, base) Halton(n, bases = c(2, 3), raw = FALSE, simplify = TRUE) Hammersley(n, bases = 2, raw = FALSE, simplify = TRUE) } \arguments{ \item{n}{ Number of points to generate. } \item{base}{ A prime number giving the base of the sequence. } \item{bases}{ Vector of prime numbers giving the bases of the sequences for each coordinate axis. } \item{raw}{ Logical value indicating whether to return the coordinates as a matrix (\code{raw=TRUE}) or as a spatial point pattern (\code{raw=FALSE}, the default). } \item{simplify}{ Argument passed to \code{\link{ppx}} indicating whether point patterns of dimension 2 or 3 should be returned as objects of class \code{"ppp"} or \code{"pp3"} respectively (\code{simplify=TRUE}, the default) or as objects of class \code{"ppx"} (\code{simplify=FALSE}). } } \details{ The function \code{vdCorput} generates the quasirandom sequence of Van der Corput (1935) of length \code{n} with the given \code{base}. These are numbers between 0 and 1 which are in some sense uniformly distributed over the interval. The function \code{Halton} generates the Halton quasirandom sequence of points in \code{d}-dimensional space, where \code{d = length(bases)}. The values of the \eqn{i}-th coordinate of the points are generated using the van der Corput sequence with base equal to \code{bases[i]}. The function \code{Hammersley} generates the Hammersley set of points in \code{d+1}-dimensional space, where \code{d = length(bases)}. The first \code{d} coordinates of the points are generated using the van der Corput sequence with base equal to \code{bases[i]}. The \code{d+1}-th coordinate is the sequence \code{1/n, 2/n, ..., 1}. If \code{raw=FALSE} (the default) then the Halton and Hammersley sets are interpreted as spatial point patterns of the appropriate dimension. They are returned as objects of class \code{"ppx"} (multidimensional point patterns) unless \code{simplify=TRUE} and \code{d=2} or \code{d=3} when they are returned as objects of class \code{"ppp"} or \code{"pp3"}. If \code{raw=TRUE}, the coordinates are returned as a matrix with \code{n} rows and \code{D} columns where \code{D} is the spatial dimension. } \value{ For \code{vdCorput}, a numeric vector. For \code{Halton} and \code{Hammersley}, an object of class \code{"ppp"}, \code{"pp3"} or \code{"ppx"}; or if \code{raw=TRUE}, a numeric matrix. } \references{ Van der Corput, J. G. (1935) Verteilungsfunktionen. \emph{Proc. Ned. Akad. v. Wetensch.} \bold{38}: 813--821. Kuipers, L. and Niederreiter, H. (2005) \emph{Uniform distribution of sequences}, Dover Publications. } \seealso{ \code{\link{rQuasi}} } \examples{ vdCorput(10, 2) plot(Halton(256, c(2,3))) plot(Hammersley(256, 3)) } \author{\adrian , \rolf and \ege. } \keyword{spatial} \keyword{datagen} spatstat/man/plot.hyperframe.Rd0000644000176200001440000000667013333543264016315 0ustar liggesusers\name{plot.hyperframe} \alias{plot.hyperframe} \title{Plot Entries in a Hyperframe} \description{ Plots the entries in a hyperframe, in a series of panels, one panel for each row of the hyperframe. } \usage{ \method{plot}{hyperframe}(x, e, \dots, main, arrange=TRUE, nrows=NULL, ncols=NULL, parargs=list(mar=mar * marsize), marsize=1, mar=c(1,1,3,1)) } \arguments{ \item{x}{ Data to be plotted. A hyperframe (object of class \code{"hyperframe"}, see \code{\link{hyperframe}}). } \item{e}{ How to plot each row. Optional. An \R language call or expression (typically enclosed in \code{\link{quote}()} that will be evaluated in each row of the hyperframe to generate the plots. } \item{\dots}{ Extra arguments controlling the plot (when \code{e} is missing). } \item{main}{Overall title for the array of plots.} \item{arrange}{ Logical flag indicating whether to plot the objects side-by-side on a single page (\code{arrange=TRUE}) or plot them individually in a succession of frames (\code{arrange=FALSE}). } \item{nrows,ncols}{ Optional. The number of rows/columns in the plot layout (assuming \code{arrange=TRUE}). You can specify either or both of these numbers. } \item{parargs}{ Optional list of arguments passed to \code{\link{par}} before plotting each panel. Can be used to control margin sizes, etc. } \item{marsize}{ Optional scale parameter controlling the sizes of margins around the panels. Incompatible with \code{parargs}. } \item{mar}{ Optional numeric vector of length 1, 2 or 4 controlling the relative sizes of margins between the panels. Incompatible with \code{parargs}. } } \details{ This is the \code{plot} method for the class \code{"hyperframe"}. The argument \code{x} must be a hyperframe (like a data frame, except that the entries can be objects of any class; see \code{\link{hyperframe}}). This function generates a series of plots, one plot for each row of the hyperframe. If \code{arrange=TRUE} (the default), then these plots are arranged in a neat array of panels within a single plot frame. If \code{arrange=FALSE}, the plots are simply executed one after another. Exactly what is plotted, and how it is plotted, depends on the argument \code{e}. The default (if \code{e} is missing) is to plot only the first column of \code{x}. Each entry in the first column is plotted using the generic \code{\link{plot}} command, together with any extra arguments given in \code{\dots}. If \code{e} is present, it should be an \R language expression involving the column names of \code{x}. (It is typically created using \code{\link{quote}} or \code{\link{expression}}.) The expression will be evaluated once for each row of \code{x}. It will be evaluated in an environment where each column name of \code{x} is interpreted as meaning the object in that column in the current row. See the Examples. } \value{ \code{NULL}. } \seealso{ \code{\link{hyperframe}}, \code{\link{with.hyperframe}} } \examples{ H <- hyperframe(id=1:10) H$X <- with(H, rpoispp(100)) H$D <- with(H, distmap(X)) # points only plot(H[,"X"]) plot(H, quote(plot(X, main=id))) # points superimposed on images plot(H, quote({plot(D, main=id); plot(X, add=TRUE)})) } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/plot.fasp.Rd0000644000176200001440000001251713333543264015101 0ustar liggesusers\name{plot.fasp} \alias{plot.fasp} \title{Plot a Function Array} \description{ Plots an array of summary functions, usually associated with a point pattern, stored in an object of class \code{"fasp"}. A method for \code{plot}. } \usage{ \method{plot}{fasp}(x,formule=NULL, \dots, subset=NULL, title=NULL, banner=TRUE, transpose=FALSE, samex=FALSE, samey=FALSE, mar.panel=NULL, outerlabels=TRUE, cex.outerlabels=1.25, legend=FALSE) } \arguments{ \item{x}{An object of class \code{"fasp"} representing a function array. } \item{formule}{ A formula or list of formulae indicating what variables are to be plotted against what variable. Each formula is either an R language formula object, or a string that can be parsed as a formula. If \code{formule} is a list, its \eqn{k^{th}}{k-th} component should be applicable to the \eqn{(i,j)^{th}}{(i,j)-th} plot where \code{x$which[i,j]=k}. If the formula is left as \code{NULL}, then \code{plot.fasp} attempts to use the component \code{default.formula} of \code{x}. If that component is NULL as well, it gives up. } \item{\dots}{ Arguments passed to \code{\link{plot.fv}} to control the individual plot panels. } \item{subset}{ A logical vector, or a vector of indices, or an expression or a character string, or a \bold{list} of such, indicating a subset of the data to be included in each plot. If \code{subset} is a list, its \eqn{k^{th}}{k-th} component should be applicable to the \eqn{(i,j)^{th}}{(i,j)-th} plot where \code{x$which[i,j]=k}. } \item{title}{ Overall title for the plot. } \item{banner}{ Logical. If \code{TRUE}, the overall title is plotted. If \code{FALSE}, the overall title is not plotted and no space is allocated for it. } \item{transpose}{ Logical. If \code{TRUE}, rows and columns will be exchanged. } \item{samex,samey}{ Logical values indicating whether all individual plot panels should have the same x axis limits and the same y axis limits, respectively. This makes it easier to compare the plots. } \item{mar.panel}{ Vector of length 4 giving the value of the graphics parameter \code{mar} controlling the size of plot margins for each individual plot panel. See \code{\link{par}}. } \item{outerlabels}{Logical. If \code{TRUE}, the row and column names of the array of functions are plotted in the margins of the array of plot panels. If \code{FALSE}, each individual plot panel is labelled by its row and column name. } \item{cex.outerlabels}{ Character expansion factor for row and column labels of array. } \item{legend}{ Logical flag determining whether to plot a legend in each panel. } } \details{ An object of class \code{"fasp"} represents an array of summary functions, usually associated with a point pattern. See \code{\link{fasp.object}} for details. Such an object is created, for example, by \code{\link{alltypes}}. The function \code{plot.fasp} is a method for \code{plot}. It calls \code{\link{plot.fv}} to plot the individual panels. For information about the interpretation of the arguments \code{formule} and \code{subset}, see \code{\link{plot.fv}}. Arguments that are often passed through \code{...} include \code{col} to control the colours of the different lines in a panel, and \code{lty} and \code{lwd} to control the line type and line width of the different lines in a panel. The argument \code{shade} can also be used to display confidence intervals or significance bands as filled grey shading. See \code{\link{plot.fv}}. The argument \code{title}, if present, will determine the overall title of the plot. If it is absent, it defaults to \code{x$title}. Titles for the individual plot panels will be taken from \code{x$titles}. } \value{None.} \section{Warnings}{ (Each component of) the \code{subset} argument may be a logical vector (of the same length as the vectors of data which are extracted from \code{x}), or a vector of indices, or an \bold{expression} such as \code{expression(r<=0.2)}, or a text string, such as \code{"r<=0.2"}. Attempting a syntax such as \code{subset = r<=0.2} (without wrapping \code{r<=0.2} either in quote marks or in \code{expression()}) will cause this function to fall over. Variables referred to in any formula must exist in the data frames stored in \code{x}. What the names of these variables are will of course depend upon the nature of \code{x}. } \seealso{ \code{\link{alltypes}}, \code{\link{plot.fv}}, \code{\link{fasp.object}} } \examples{ \dontrun{ # Bramble Canes data. data(bramblecanes) X.G <- alltypes(bramblecanes,"G",dataname="Bramblecanes",verb=TRUE) plot(X.G) plot(X.G,subset="r<=0.2") plot(X.G,formule=asin(sqrt(cbind(km,theo))) ~ asin(sqrt(theo))) plot(X.G,fo=cbind(km,theo) - theo~r,subset="r<=0.2") # Simulated data. pp <- runifpoint(350, owin(c(0,1),c(0,1))) pp <- pp \%mark\% factor(c(rep(1,50),rep(2,100),rep(3,200))) X.K <- alltypes(pp,"K",verb=TRUE,dataname="Fake Data") plot(X.K,fo=cbind(border,theo)~theo,subset="theo<=0.75") } } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/hist.funxy.Rd0000644000176200001440000000375113333543263015311 0ustar liggesusers\name{hist.funxy} \alias{hist.funxy} \title{Histogram of Values of a Spatial Function} \description{ Computes and displays a histogram of the values of a spatial function of class \code{"funxy"}. } \usage{ \method{hist}{funxy}(x, \dots, xname) } \arguments{ \item{x}{A pixel image (object of class \code{"funxy"}).} \item{\dots}{ Arguments passed to \code{\link{as.im}} or \code{\link{hist.im}}. } \item{xname}{ Optional. Character string to be used as the name of the dataset \code{x}. } } \details{ This function computes and (by default) displays a histogram of the values of the function \code{x}. An object of class \code{"funxy"} describes a function of spatial location. It is a \code{function(x,y,..)} in the \R language, with additional attributes. The function \code{hist.funxy} is a method for the generic function \code{\link{hist}} for the class \code{"funxy"}. The function is first converted to a pixel image using \code{\link{as.im}}, then \code{\link{hist.im}} is called to produce the histogram. Any arguments in \code{...} are passed to \code{\link{as.im}} to determine the pixel resolution, or to \code{\link{hist.im}} to determine the histogram breaks and to control or suppress plotting. Useful arguments include \code{W} for the spatial domain, \code{eps,dimyx} for pixel resolution, \code{main} for the main title. } \value{ An object of class \code{"histogram"} as returned by \code{\link[graphics:hist]{hist.default}}. This object can be plotted. } \seealso{ \code{\link{spatialcdf}} for the cumulative distribution function of an image or function. \code{\link{hist}}, \code{\link{hist.default}}. For other statistical graphics such as Q-Q plots, use \code{as.im(X)[]} to extract the pixel values of image \code{X}, and apply the usual statistical graphics commands. } \examples{ f <- funxy(function(x,y) {x^2}, unit.square()) hist(f) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} spatstat/man/timed.Rd0000644000176200001440000000561713333543264014300 0ustar liggesusers\name{timed} \alias{timed} \title{ Record the Computation Time } \description{ Saves the result of a calculation as an object of class \code{"timed"} which includes information about the time taken to compute the result. The computation time is printed when the object is printed. } \usage{ timed(x, ..., starttime = NULL, timetaken = NULL) } \arguments{ \item{x}{ An expression to be evaluated, or an object that has already been evaluated. } \item{starttime}{ The time at which the computation is defined to have started. The default is the current time. Ignored if \code{timetaken} is given. } \item{timetaken}{ The length of time taken to perform the computation. The default is the time taken to evaluate \code{x}. } \item{\dots}{ Ignored. } } \details{ This is a simple mechanism for recording how long it takes to perform complicated calculations (usually for the purposes of reporting in a publication). If \code{x} is an expression to be evaluated, \code{timed(x)} evaluates the expression and measures the time taken to evaluate it. The result is saved as an object of the class \code{"timed"}. Printing this object displays the computation time. If \code{x} is an object which has already been computed, then the time taken to compute the object can be specified either directly by the argument \code{timetaken}, or indirectly by the argument \code{starttime}. \itemize{ \item \code{timetaken} is the duration of time taken to perform the computation. It should be the difference of two clock times returned by \code{\link{proc.time}}. Typically the user sets \code{begin <- proc.time()} before commencing the calculations, then \code{end <- proc.time()} after completing the calculations, and then sets \code{timetaken <- end - begin}. \item \code{starttime} is the clock time at which the computation started. It should be a value that was returned by \code{\link{proc.time}} at some earlier time when the calculations commenced. When \code{timed} is called, the computation time will be taken as the difference between the current clock time and \code{starttime}. Typically the user sets \code{begin <- proc.time()} before commencing the calculations, and when the calculations are completed, the user calls \code{result <- timed(result, starttime=begin)}. } If the result of evaluating \code{x} belongs to other S3 classes, then the result of \code{timed(x, \dots)} also inherits these classes, and printing the object will display the appropriate information for these classes as well. } \value{ An object inheriting the class \code{"timed"}. } \examples{ timed(clarkevans(cells)) timed(Kest(cells)) answer <- timed(42, timetaken=4.1e17) answer } \seealso{ \code{\link{timeTaken}} to extract the time taken. } \author{ \spatstatAuthors. } \keyword{utilities} spatstat/man/rescale.im.Rd0000644000176200001440000000345513333543264015216 0ustar liggesusers\name{rescale.im} \alias{rescale.im} \title{Convert Pixel Image to Another Unit of Length} \description{ Converts a pixel image to another unit of length. } \usage{ \method{rescale}{im}(X, s, unitname) } \arguments{ \item{X}{Pixel image (object of class \code{"im"}).} \item{s}{Conversion factor: the new units are \code{s} times the old units.} \item{unitname}{ Optional. New name for the unit of length. See \code{\link{unitname}}. } } \value{ Another pixel image (of class \code{"im"}), containing the same pixel values, but with pixel coordinates expressed in the new units. } \details{ This is a method for the generic function \code{\link{rescale}}. The spatial coordinates of the pixels in \code{X} will be re-expressed in terms of a new unit of length that is \code{s} times the current unit of length given in \code{X}. (Thus, the coordinate values are \emph{divided} by \code{s}, while the unit value is multiplied by \code{s}). If \code{s} is missing, then the coordinates will be re-expressed in \sQuote{native} units; for example if the current unit is equal to 0.1 metres, then the coordinates will be re-expressed in metres. The result is a pixel image representing the \emph{same} data but re-expressed in a different unit. Pixel values are unchanged. This may not be what you intended! } \seealso{ \code{\link{im}}, \code{\link{rescale}}, \code{\link{unitname}}, \code{\link{eval.im}} } \examples{ # Bramble Canes data: 1 unit = 9 metres data(bramblecanes) # distance transform Z <- distmap(bramblecanes) # convert to metres # first alter the pixel values Zm <- eval.im(9 * Z) # now rescale the pixel coordinates Z <- rescale(Zm, 1/9) # or equivalently Z <- rescale(Zm) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/default.expand.Rd0000644000176200001440000000771513333543263016100 0ustar liggesusers\name{default.expand} \alias{default.expand} \title{Default Expansion Rule for Simulation of Model} \description{ Defines the default expansion window or expansion rule for simulation of a fitted point process model. } \usage{ default.expand(object, m=2, epsilon=1e-6, w=Window(object)) } \arguments{ \item{object}{ A point process model (object of class \code{"ppm"} or \code{"rmhmodel"}). } \item{m}{ A single numeric value. The window will be expanded by a distance \code{m * reach(object)} along each side. } \item{epsilon}{ Threshold argument passed to \code{\link{reach}} to determine \code{reach(object)}. } \item{w}{ Optional. The un-expanded window in which the model is defined. The resulting simulated point patterns will lie in this window. } } \value{ A window expansion rule (object of class \code{"rmhexpand"}). } \details{ This function computes a default value for the expansion rule (the argument \code{expand} in \code{\link{rmhcontrol}}) given a fitted point process model \code{object}. This default is used by \code{\link{envelope}}, \code{\link{qqplot.ppm}}, \code{\link{simulate.ppm}} and other functions. Suppose we wish to generate simulated realisations of a fitted point process model inside a window \code{w}. It is advisable to first simulate the pattern on a larger window, and then clip it to the original window \code{w}. This avoids edge effects in the simulation. It is called \emph{expansion} of the simulation window. Accordingly, for the Metropolis-Hastings simulation algorithm \code{\link{rmh}}, the algorithm control parameters specified by \code{\link{rmhcontrol}} include an argument \code{expand} that determines the expansion of the simulation window. The function \code{default.expand} determines the default expansion rule for a fitted point process model \code{object}. If the model is Poisson, then no expansion is necessary. No expansion is performed by default, and \code{default.expand} returns a rule representing no expansion. The simulation window is the original window \code{w = Window(object)}. If the model depends on external covariates (i.e.\ covariates other than the Cartesian covariates \code{x} and \code{y} and the \code{marks}) then no expansion is feasible, in general, because the spatial domain of the covariates is not guaranteed to be large enough. \code{default.expand} returns a rule representing no expansion. The simulation window is the original window \code{w = Window(object)}. If the model depends on the Cartesian covariates \code{x} and \code{y}, it would be feasible to expand the simulation window, and this was the default for \pkg{spatstat} version 1.24-1 and earlier. However this sometimes produces artefacts (such as an empty point pattern) or memory overflow, because the fitted trend, extrapolated outside the original window of the data, may become very large. In \pkg{spatstat} version 1.24-2 and later, the default rule is \emph{not} to expand if the model depends on \code{x} or \code{y}. Again \code{default.expand} returns a rule representing no expansion. Otherwise, expansion will occur. The original window \code{w = Window(object)} is expanded by a distance \code{m * rr}, where \code{rr} is the interaction range of the model, computed by \code{\link{reach}}. If \code{w} is a rectangle then each edge of \code{w} is displaced outward by distance \code{m * rr}. If \code{w} is not a rectangle then \code{w} is dilated by distance \code{m * rr} using \code{\link{dilation}}. } \seealso{ \code{\link{rmhexpand}}, \code{\link{rmhcontrol}}, \code{\link{rmh}}, \code{\link{envelope}}, \code{\link{qqplot.ppm}} } \examples{ data(cells) fit <- ppm(cells, ~1, Strauss(0.07)) default.expand(fit) mod <- rmhmodel(cif="strauss", par=list(beta=100, gamma=0.5, r=0.07)) default.expand(fit) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/spatstat-internal.Rd0000644000176200001440000015237713623370546016664 0ustar liggesusers\name{spatstat-internal} \title{Internal spatstat functions} \alias{[.pp3} \alias{[.localpcfmatrix} \alias{[.rat} \alias{[.splitppx} \alias{[.diagramobj} \alias{[<-.splitppx} \alias{accumulateStatus} \alias{acedist.show} \alias{acedist.noshow} \alias{active.interactions} \alias{adaptcoef} \alias{adjust.ratfv} \alias{affinexy} \alias{affinexypolygon} \alias{affine.msr} \alias{ang2rad} \alias{anycrossing.psp} \alias{ApplyConnected} \alias{applytolayers} \alias{applyPolyclipArgs} \alias{areadelta2} \alias{areaGain.diri} \alias{areaGain.grid} \alias{areaLoss.diri} \alias{areaLoss.grid} \alias{areaLoss.poly} \alias{assemble.plot.objects} \alias{AsymmDistance.psp} \alias{as.breakpts} \alias{as.character.unitname} \alias{as.data.frame.bw.optim} \alias{as.data.frame.fv} \alias{as.double.im} \alias{as.linfun.linfun} \alias{as.list.hyperframe} \alias{as.listof} \alias{as.owin.lintess} \alias{as.unitname} \alias{augment.msr} \alias{avenndist} \alias{bandwidth.is.infinite} \alias{BartCalc} \alias{bbEngine} \alias{bermantestCalc} \alias{bermantestEngine} \alias{bdry.mask} \alias{bind.ratfv} \alias{blankcoefnames} \alias{bounding.box3} \alias{break.holes} \alias{breakpts} \alias{breakpts.from.r} \alias{bt.frame} \alias{bw.optim} \alias{calc.DR} \alias{calc.NNIR} \alias{calc.SAVE} \alias{calc.SIR} \alias{calc.TSE} \alias{cannot.update} \alias{cartesian} \alias{cellmiddles} \alias{censtimeCDFest} \alias{change.default.expand} \alias{checkbigmatrix} \alias{checkfields} \alias{checksolve} \alias{check.arc} \alias{check.anySparseVector} \alias{check.finespacing} \alias{check.hist.lengths} \alias{check.mat.mul} \alias{check.separable} \alias{check.testfun} \alias{circticks} \alias{circunion} \alias{clarkevansCalc} \alias{clip.psp} \alias{cliprect.psp} \alias{clippoly.psp} \alias{closethresh} \alias{coef.summary.kppm} \alias{coef.summary.ppm} \alias{coef.vblogit} \alias{coerce.marks.numeric} \alias{compatible.rat} \alias{compileCDF} \alias{conform.ratfv} \alias{crosspairquad} \alias{cobble.xy} \alias{codetime} \alias{col.args.to.grey} \alias{commonPolyclipArgs} \alias{conform.imagelist} \alias{countingweights} \alias{CressieReadStatistic} \alias{CressieReadSymbol} \alias{CressieReadName} \alias{cutoff2Dkernel} \alias{CVforPCF} \alias{damaged.ppm} \alias{data.mppm} \alias{datagen.runifpointOnLines} \alias{datagen.runifpoisppOnLines} \alias{datagen.rpoisppOnLines} \alias{default.clipwindow} \alias{default.linnet.tolerance} \alias{default.n.tiling} \alias{default.ntile} \alias{deltasuffstat} \alias{dimnames.hyperframe} \alias{dimnames<-.hyperframe} \alias{DoCountCrossEnds} \alias{DoCountEnds} \alias{Deviation} \alias{dfbetas.ppmInfluence} \alias{dflt.redraw} \alias{densitycrossEngine} \alias{densitypointsEngine} \alias{diagnose.ppm.engine} \alias{diagramobj} \alias{digestCovariates} \alias{digital.volume} \alias{dim.fasp} \alias{dim.hyperframe} \alias{dim.im} \alias{dim.msr} \alias{dim.owin} \alias{dimnames.fasp} \alias{dimnames<-.fasp} \alias{dimnames.msr} \alias{distributecbind} \alias{dist2dpath} \alias{do.as.im} \alias{do.call.plotfun} \alias{do.istat} \alias{doMultiStraussHard} \alias{dppmFixAlgorithm} \alias{dppmFixIntensity} \alias{drawSignedPoly} \alias{emptywindow} \alias{envelopeEngine} \alias{envelopeProgressData} \alias{envelopeTest} \alias{envelope.hasenvelope} \alias{envelope.matrix} \alias{equalpairs} \alias{equalpairs.quad} \alias{equals.quad} \alias{equalsfun.quad} \alias{erodemask} \alias{evalCovar} \alias{evalCovar.ppm} \alias{evalCovar.lppm} \alias{evalCovariate} \alias{evalInteraction} \alias{evalInterEngine} \alias{evalPairPotential} \alias{evalSparse3Dentrywise} \alias{evaluate2Dkernel} \alias{even.breaks.owin} \alias{exactdt} \alias{exactPdt} \alias{existsSpatstatVariable} \alias{expandSpecialLists} \alias{expandwinPerfect} \alias{ExpSmoothLog} \alias{extractAIC.slrm} \alias{extractAtomicQtests} \alias{fakemaintitle} \alias{family.vblogit} \alias{f3engine} \alias{f3Cengine} \alias{fasp} \alias{FDMKERNEL} \alias{fft2D} \alias{fftwAvailable} \alias{fill.coefs} \alias{findbestlegendpos} \alias{findCovariate} \alias{findcbind} \alias{fii} \alias{fillNA} \alias{flatfname} \alias{flipxypolygon} \alias{flipxy.msr} \alias{forbid.logi} \alias{format.numberwithunit} \alias{FormatFaspFormulae} \alias{framebottomleft} \alias{fullIndexSequence} \alias{fvexprmap} \alias{fvlabels} \alias{fvlabels<-} \alias{fvlabelmap} \alias{fvlegend} \alias{gammabreaks} \alias{g3engine} \alias{g3Cengine} \alias{getdataname} \alias{getfields} \alias{getglmdata} \alias{getglmfit} \alias{getglmsubset} \alias{getlambda.lpp} \alias{getlastshift} \alias{getppmdatasubset} \alias{getppmOriginalCovariates} \alias{getRandomFieldsModelGen} \alias{getSpatstatVariable} \alias{getSumFun} \alias{geyercounts} \alias{geyerdelta2} \alias{GLMpredict} \alias{good.correction.K} %\alias{gridadjacencymatrix} %DoNotExport \alias{gridindex} \alias{grid1index} \alias{grokIndexVector} \alias{grow.mask} \alias{hackglmmPQL} \alias{hasenvelope} \alias{hasglmfit} \alias{HermiteCoefs} \alias{handle.r.b.args} \alias{handle.rshift.args} \alias{head.hyperframe} \alias{hierarchicalordering} \alias{hiermat} \alias{ho.engine} \alias{hsvNA} \alias{IdenticalRowPair} \alias{IdenticalRows} \alias{idorempty} \alias{illegal.iformula} \alias{implemented.for.K} \alias{impliedpresence} \alias{impliedcoefficients} \alias{influence.ppmInfluence} \alias{inpoint} \alias{inside.arc} \alias{instantiate.interact} \alias{integral.leverage.ppm} \alias{integral.influence.ppm} \alias{interactionfamilyname} \alias{intermaker} \alias{interpretAsOrigin} \alias{intX.owin} \alias{intX.xypolygon} \alias{intY.owin} \alias{intY.xypolygon} \alias{invokeColourmapRule} \alias{is.atomicQtest} \alias{is.cadlag} \alias{is.col.argname} \alias{is.data} \alias{is.expandable} \alias{is.expandable.ppm} \alias{is.expandable.rmhmodel} \alias{is.fv} \alias{is.hyperframe} \alias{is.imlist} \alias{is.infline} \alias{is.interact} \alias{is.marked.default} \alias{is.marked.msr} \alias{is.marked.psp} \alias{is.marked.quad} \alias{is.mppm} \alias{is.multitype.msr} \alias{is.multitype.quad} \alias{is.multitype.default} \alias{is.poisson.mppm} \alias{is.ppplist} \alias{is.pp3} \alias{is.ppx} \alias{is.psp} \alias{is.quad} \alias{is.scov} \alias{is.sob} \alias{is.solist} \alias{is.tess} \alias{is.vanilla} \alias{k3engine} \alias{Kborder.engine} \alias{Knone.engine} \alias{Krect.engine} \alias{Kount} \alias{Kwtsum} \alias{Kpcf.kppm} \alias{killinteraction} \alias{km.rs.opt} \alias{kppmComLik} \alias{kppmMinCon} \alias{kppmPalmLik} \alias{kraever} \alias{kraeverRandomFields} \alias{labels.ppm} \alias{ldtEngine} \alias{levels.im} \alias{levels<-.im} \alias{levelsAsFactor} \alias{leverage.ppmInfluence} \alias{linearKengine} \alias{linearKmulti} \alias{linearKmulti.inhom} \alias{linearKmultiEngine} \alias{linearpcfengine} \alias{linearpcfmulti} \alias{linearpcfmulti.inhom} \alias{linearPCFmultiEngine} \alias{listof} \alias{localKengine} \alias{localKmultiEngine} \alias{localpcfengine} \alias{localpcfmatrix} \alias{local2lpp} \alias{logicalIndex} \alias{logi.dummy} \alias{logi.engine} \alias{logLik.vblogit} \alias{looVoronoiLPP} \alias{makeLinnetTolerance} \alias{markvaluetype} \alias{maskLaslett} \alias{match2DkernelName} \alias{packupNNdata} \alias{parbreak} \alias{PairPotentialType} \alias{PDEdensityLPP} \alias{plan.legend.layout} \alias{plotWidthMap} \alias{pointweights} \alias{PoisSaddle} \alias{PoisSaddleArea} \alias{PoisSaddleGeyer} \alias{PoisSaddlePairwise} \alias{polyLaslett} \alias{polytileareaEngine} \alias{positiveIndex} \alias{PPMmodelmatrix} \alias{putSpatstatVariable} \alias{lookup.im} \alias{lookup2DkernelInfo} \alias{majorminorversion} \alias{make.even.breaks} \alias{makefvlabel} \alias{makeunitname} \alias{markappend} \alias{markcbind} \alias{markformat} \alias{markformat.ppp} \alias{markformat.ppx} \alias{markformat.psp} \alias{markformat.default} \alias{mark.scale.default} \alias{markspace.integral} \alias{marks.default} \alias{marks.quad} \alias{\%mapp\%} %DoNotExport %NAMESPACE export("%mapp%") \alias{markappendop} \alias{marksubset} \alias{markreplicateop} \alias{\%mrep\%} %DoNotExport %NAMESPACE export("%mrep%") \alias{marksubsetop} \alias{\%msub\%} %DoNotExport %NAMESPACE export("%msub%") \alias{mask2df} \alias{match.kernel} \alias{maxflow} \alias{mctestSigtraceEngine} \alias{mean.leverage.ppm} \alias{meanlistfv} \alias{meanX.owin} \alias{meanY.owin} \alias{model.se.image} \alias{modelFrameGam} \alias{mpl.engine} \alias{mpl.get.covariates} \alias{mpl.prepare} \alias{mpl.usable} \alias{MinimalTess} \alias{MultiPair.checkmatrix} \alias{multiply.only.finite.entries} \alias{multiplicityNumeric} \alias{na.handle.im} \alias{names.hyperframe} \alias{names<-.fv} \alias{names<-.hyperframe} \alias{nearest.pixel} \alias{nearest.valid.pixel} \alias{newformula} \alias{newstyle.coeff.handling} \alias{nncleanEngine} \alias{nndcumfun} \alias{no.trend.ppm} \alias{n.quad} \alias{numberwithunit} \alias{numeric.columns} \alias{objsurfEngine} \alias{onecolumn} \alias{optimStatus} \alias{outdated.interact} \alias{oversize.quad} \alias{owinpolycheck} \alias{owinpoly2mask} \alias{owin2polypath} \alias{pairs.listof} \alias{pairs.solist} \alias{param.quad} \alias{partialModelMatrix} \alias{pcf3engine} \alias{pcfmulti.inhom} \alias{pickoption} \alias{plotEachLayer} \alias{ploterodewin} \alias{ploterodeimage} \alias{plot3Dpoints} \alias{plotPolygonBdry} \alias{plot.addvar} \alias{plot.barplotdata} \alias{plot.bw.frac} \alias{plot.bw.optim} \alias{plot.indicfun} \alias{plot.localpcfmatrix} \alias{plot.lurk} \alias{plot.minconfit} \alias{plot.parres} \alias{plot.plotpairsim} \alias{plot.qqppm} \alias{plot.spatialcdf} \alias{ppllengine} \alias{ppm.default} \alias{ppmCovariates} \alias{ppmDerivatives} \alias{ppmInfluenceEngine} \alias{pppdist.mat} \alias{pppdist.prohorov} \alias{ppsubset} \alias{predict.vblogit} \alias{prefixfv} \alias{prepareTitle} \alias{printStatus} \alias{printStatusList} \alias{print.addvar} \alias{print.anylist} \alias{print.autoexec} \alias{print.bt.frame} \alias{print.bugtable} \alias{print.bw.frac} \alias{print.bw.optim} \alias{print.colourmap} \alias{print.diagppm} \alias{print.densityfun} \alias{print.distfun} \alias{print.detpointprocfamily} \alias{print.detpointprocfamilyfun} \alias{print.envelope} \alias{print.ewcdf} \alias{print.fasp} \alias{print.fv} \alias{print.fvfun} \alias{print.funxy} \alias{print.hasenvelope} \alias{print.hierarchicalordering} \alias{print.hyperframe} \alias{print.indicfun} \alias{print.influence.ppm} \alias{print.interact} \alias{print.intermaker} \alias{print.isf} \alias{print.laslett} \alias{print.layered} \alias{print.leverage.ppm} \alias{print.lintess} \alias{print.localpcfmatrix} \alias{print.lurk} \alias{print.lut} \alias{print.minconfit} \alias{print.mppm} \alias{print.msr} \alias{print.nnfun} \alias{print.numberwithunit} \alias{print.onearrow} \alias{print.parres} \alias{print.plotpairsim} \alias{print.plotppm} \alias{print.pppmatching} \alias{print.profilepl} \alias{print.quadrattest} \alias{print.qqppm} \alias{print.rat} \alias{print.rmhcontrol} \alias{print.rmhexpand} \alias{print.rmhmodel} \alias{print.rmhstart} \alias{print.rmhInfoList} \alias{print.rppm} \alias{print.splitppp} \alias{print.simplepanel} \alias{print.Smoothfun} \alias{print.solist} \alias{print.splitppx} \alias{print.summary.distfun} \alias{print.summary.funxy} \alias{print.summary.hyperframe} \alias{print.summary.listof} \alias{print.summary.linim} \alias{print.summary.linnet} \alias{print.summary.lintess} \alias{print.summary.logiquad} \alias{print.summary.lut} \alias{print.summary.mppm} \alias{print.summary.owin} \alias{print.summary.ppp} \alias{print.summary.psp} \alias{print.summary.rmhexpand} \alias{print.summary.ssf} \alias{print.summary.solist} \alias{print.summary.splitppp} \alias{print.summary.splitppx} \alias{print.summary.unitname} \alias{print.symbolmap} \alias{print.textstring} \alias{print.texturemap} \alias{print.tess} \alias{print.timed} \alias{print.vblogit} \alias{print.yardstick} \alias{project3Dhom} \alias{putlastshift} \alias{qkdeEngine} \alias{qtPrepareCoordinate} \alias{quad} \alias{quad.mppm} \alias{quadBlockSizes} \alias{RandomFieldsSafe} \alias{rasterfilter} \alias{ratfv} \alias{recognise.spatstat.type} \alias{rectquadrat.breaks} \alias{rectquadrat.countEngine} \alias{reduceformula} \alias{reheat} \alias{RelevantDeviation} \alias{repair.image.xycoords} \alias{replacementIndex} \alias{representativeRows} \alias{rescale.msr} \alias{resolveEinfo} \alias{resolve.foxall.window} \alias{resolve.vargamma.shape} \alias{resolve.lambda} \alias{resolve.lambda.cross} \alias{rgbNA} \alias{rhohatEngine} \alias{rhohatCalc} \alias{rMaternInhibition} \alias{RmhExpandRule} \alias{rmhsnoop} \alias{rocData} \alias{rocModel} \alias{roseContinuous} \alias{ruletextline} \alias{quadrat.testEngine} \alias{quadscheme.replicated} \alias{quadscheme.spatial} \alias{pointgrid} \alias{rastersample} \alias{rasterx.mask} \alias{rastery.mask} \alias{rasterxy.mask} \alias{rasterx.im} \alias{rastery.im} \alias{rasterxy.im} \alias{rebadge.fv} \alias{rebadge.as.crossfun} \alias{rebadge.as.dotfun} \alias{rebound} \alias{rebound.im} \alias{rebound.ppp} \alias{rebound.psp} \alias{rebound.owin} \alias{reconcile.fv} \alias{rename.fv} \alias{repair.old.factor.image} \alias{reincarnate.interact} \alias{resid4plot} \alias{resid1plot} \alias{resid1panel} \alias{resolve.2D.kernel} \alias{restrict.mask} \alias{reversePolyclipArgs} \alias{rmax.Rigid} \alias{rmax.rule} \alias{rotate.msr} \alias{rotxy} \alias{rotxypolygon} \alias{row.names.hyperframe} \alias{row.names<-.hyperframe} \alias{runifpoispp} \alias{runifpoisppOnLines} \alias{runifrect} \alias{rmhResolveControl} \alias{rmhResolveExpansion} \alias{rmhResolveTypes} \alias{rmhSnoopEnv} \alias{rmhcontrol.rmhcontrol} \alias{rmhcontrol.list} \alias{rmhEngine} \alias{rmhmodel.rmhmodel} \alias{rmhstart.rmhstart} \alias{rmhstart.list} \alias{rmpoint.I.allim} \alias{rpoint.multi} \alias{safedeldir} \alias{safelookup} \alias{scalardilate.breakpts} \alias{scalardilate.diagramobj} \alias{scalardilate.msr} \alias{scanmeasure} \alias{scanmeasure.ppp} \alias{scanmeasure.im} \alias{scanBinomLRTS} \alias{scanPoisLRTS} \alias{second.moment.calc} \alias{second.moment.engine} \alias{sewpcf} \alias{sewsmod} \alias{shift.diagramobj} \alias{shift.influence.ppm} \alias{shift.leverage.ppm} \alias{shift.msr} \alias{shift.quadratcount} \alias{shift.quadrattest} \alias{shiftxy} \alias{shiftxypolygon} \alias{signalStatus} \alias{simulate.profilepl} \alias{simulationresult} \alias{simulrecipe} \alias{slr.prepare} \alias{slrAssemblePixelData} \alias{Smooth.solist} \alias{Smooth.leverage.ppm} \alias{Smooth.influence.ppm} \alias{smoothcrossEngine} \alias{smoothpointsEngine} \alias{smudge} \alias{sort.im} \alias{sortalongsegment} \alias{spatstat.deldir.setopt} \alias{spatstat.xy.coords} \alias{spatstatClusterModelInfo} \alias{spatstatDPPModelInfo} \alias{spatstatRmhInfo} \alias{spatialCDFframe} \alias{spatialCDFtest} \alias{spatialCDFtestCalc} \alias{splitHybridInteraction} \alias{sp.foundclass} \alias{sp.foundclasses} \alias{sphere.volume} \alias{store.versionstring.spatstat} \alias{str.hyperframe} \alias{strictIndexSequence} \alias{strausscounts} \alias{suffloc} \alias{suffstat.generic} \alias{suffstat.poisson} \alias{summarise.trend} \alias{summary.envelope} \alias{summary.hyperframe} \alias{summary.lintess} \alias{summary.logiquad} \alias{summary.lut} \alias{summary.mppm} \alias{summary.msr} \alias{summary.profilepl} \alias{summary.pppmatching} \alias{summary.ppx} \alias{summary.splitppx} \alias{summary.rmhexpand} \alias{summary.vblogit} \alias{sumsymouter} \alias{superimposeMarks} \alias{symbolmapdomain} \alias{symbolmaptype} \alias{tail.hyperframe} \alias{tensor1x1} \alias{thinjump} \alias{tilecentroids} \alias{trianglediameters} \alias{trim.mask} \alias{tweak.closepairs} \alias{tweak.fv.entry} \alias{tweak.ratfv.entry} \alias{tweak.coefs} \alias{twostage.test} \alias{twostage.envelope} \alias{\%unit\%} %DoNotExport %NAMESPACE export("%unit%") \alias{unitname.default} \alias{unitname<-.default} \alias{unitname.msr} \alias{unitname<-.msr} \alias{unstackFilter} \alias{update.im} \alias{update.ippm} \alias{update.msr} \alias{update.rmhstart} \alias{validradius} \alias{validate2Dkernel} \alias{validate.angles} \alias{validate.lpp.coords} \alias{validate.mask} \alias{validate.quad} \alias{validate.weights} \alias{vanilla.fv} \alias{varcountEngine} %\alias{vblogit} %DoNotExport %\alias{vblogit.fmla} %DoNotExport \alias{versioncurrency.spatstat} \alias{versionstring.interact} \alias{versionstring.ppm} \alias{versionstring.spatstat} \alias{verifyclass} \alias{veryunique} \alias{vnnFind} \alias{Window.lintess} \alias{Window<-.linnet} \alias{Window<-.lpp} \alias{warn.once} \alias{waxlyrical} \alias{weightedclosepairs} \alias{windows.mppm} \alias{w.quad} \alias{x.quad} \alias{y.quad} \alias{xy.grid} \alias{X2testEngine} \alias{xtfrm.im} \alias{xypolygon2psp} \alias{xypolyselfint} \alias{ZeroValue} \alias{ZeroValue.im} %%%% sparse 3D arrays \alias{sparse3Darray} \alias{as.sparse3Darray} \alias{dim.sparse3Darray} \alias{dim<-.sparse3Darray} \alias{dimnames.sparse3Darray} \alias{dimnames<-.sparse3Darray} \alias{print.sparse3Darray} \alias{aperm.sparse3Darray} \alias{as.array.sparse3Darray} \alias{[.sparse3Darray} \alias{[<-.sparse3Darray} \alias{anyNA.sparse3Darray} \alias{RelevantZero} \alias{RelevantNA} \alias{RelevantEmpty} \alias{isRelevantZero} \alias{unionOfSparseIndices} \alias{Complex.sparse3Darray} \alias{Math.sparse3Darray} \alias{Ops.sparse3Darray} \alias{Summary.sparse3Darray} \alias{inside3Darray} \alias{SparseEntries} \alias{SparseIndices} \alias{EntriesToSparse} \alias{mapSparseEntries} \alias{applySparseEntries} \alias{sumsymouterSparse} \alias{tenseur} \alias{marginSums} \alias{rbindCompatibleDataFrames} \alias{bind.sparse3Darray} %%%% \alias{spatstatDiagnostic} %% \alias{as.ppplist} \alias{as.imlist} \alias{pointsAlongNetwork} \alias{expandSparse} \alias{allElementsIdentical} \alias{resampleNetworkDataFrame} \alias{sparseVectorCumul} %% \alias{as.ppm.lppm} \alias{as.ppm.rppm} \alias{predict.profilepl} %%%%%%% \description{ Internal spatstat functions. } \usage{ \method{[}{splitppx}(x, \dots) \method{[}{splitppx}(x, \dots) <- value \method{[}{diagramobj}(x, \dots) \method{[}{rat}(x, \dots) accumulateStatus(x, stats) acedist.show(X, Y, n, d, timelag) acedist.noshow(X, Y, n, d) active.interactions(object) adaptcoef(new.coef, fitcoef, drop) adjust.ratfv(f, columns, numfactor, denfactor) affinexy(X, mat, vec, invert) affinexypolygon(p, mat, vec, detmat) \method{affine}{msr}(X, mat, vec, \dots) ang2rad(ang, unit, start, clockwise) anycrossing.psp(A,B) ApplyConnected(X, Engine, r, \dots, rule, auxdata) applytolayers(L, FUN, \dots) applyPolyclipArgs(x, p) areadelta2(X, r, \dots, sparseOK) areaGain.diri(u, X, r, \dots, W, verbose) areaGain.grid(u, X, r, \dots, W, ngrid) areaLoss.diri(X, r, \dots, W, subset) areaLoss.grid(X, r, \dots, W, subset, method = c("count", "distmap"), ngrid = spatstat.options("ngrid.disc"), exact = FALSE) areaLoss.poly(X, r, \dots, W, subset, splitem) assemble.plot.objects(xlim, ylim, \dots, lines, polygon) AsymmDistance.psp(X, Y, metric, method) as.breakpts(\dots) \method{as.character}{unitname}(x, \dots) \method{as.data.frame}{bw.optim}(x, \dots) \method{as.data.frame}{fv}(x, \dots) \method{as.double}{im}(x, \dots) \method{as.linfun}{linfun}(X, \dots) \method{as.list}{hyperframe}(x, \dots) as.listof(x) \method{as.owin}{lintess}(W, \dots) as.unitname(s) augment.msr(x, \dots, sigma, recompute) avenndist(X) bandwidth.is.infinite(sigma) BartCalc(fY, fK) bbEngine(\dots) bermantestCalc(fram, which, alternative, \dots) bermantestEngine(model, covariate, which, alternative, \dots, modelname, covname, dataname) bdry.mask(W) bind.ratfv(x, numerator, denominator, labl, desc, preferred, ratio, quotient) blankcoefnames(x) bounding.box3(\dots) break.holes(x, splitby, depth, maxdepth) breakpts(val, maxi, even = FALSE, npos = NULL, step = NULL) breakpts.from.r(r) bt.frame(Q, trend, interaction, \dots, covariates, correction, rbord, use.gam, allcovar) bw.optim(cv, h, iopt, \dots, cvname, hname, criterion, warnextreme, hargnames, unitname) calc.DR(COV, z, Dim) calc.NNIR(COV, z, pos, Dim) calc.SAVE(COV, z, Dim) calc.SIR(COV, z) calc.TSE(COV, z, pos, Dim1, Dim2) cannot.update(\dots) cartesian(pp, markset, fac = TRUE) cellmiddles(W, nx, ny, npix, distances) censtimeCDFest(o, cc, d, breaks, \dots, KM, RS, HAN, RAW, han.denom, tt, pmax) change.default.expand(x, newdefault) checkbigmatrix(n, m, fatal, silent) checkfields(X,L) checksolve(M, action, descrip, target) check.arc(arc, fatal) check.anySparseVector(v, npoints, fatal, things, naok, warn, vname, oneok) check.finespacing(r, eps, win, rmaxdefault, context, action, rname) check.hist.lengths(hist,breaks) check.mat.mul(A, B, Acols, Brows, fatal) check.separable(dmat, covname, isconstant, fatal) check.testfun(f, f1, X) circticks(R, at, unit, start, clockwise, labels) circunion(arcs) clarkevansCalc(X, correction, clipregion, working) clip.psp(x, window, check, fragments) cliprect.psp(x, window, fragments) clippoly.psp(s, window, fragments) closethresh(X,R,S,twice,\dots) \method{coef}{summary.kppm}(object, \dots) \method{coef}{summary.ppm}(object, \dots) \method{coef}{vblogit}(object, \dots) coerce.marks.numeric(X, warn) \method{compatible}{rat}(A, B, \dots) compileCDF(D, B, r, \dots, han.denom, check) conform.ratfv(x) crosspairquad(Q,rmax,what) cobble.xy(x, y, f, fatal, \dots) codetime(x, hms, what) col.args.to.grey(x, \dots) commonPolyclipArgs(\dots, p) conform.imagelist(X, Zlist) countingweights(id, areas, check = TRUE) CressieReadStatistic(OBS,EXP,lambda,normalise,named) CressieReadSymbol(lambda) CressieReadName(lambda) cutoff2Dkernel(kernel, sigma, varcov, \dots, scalekernel, cutoff, fatal) CVforPCF(bw, stuff) damaged.ppm(object) data.mppm(x) datagen.runifpointOnLines(n, L) datagen.runifpoisppOnLines(lambda, L) datagen.rpoisppOnLines(lambda, L, lmax, \dots, check) default.clipwindow(object, epsilon) default.linnet.tolerance(L) default.n.tiling(X, nd, ntile, npix, eps, random, quasi, verbose) default.ntile(X) deltasuffstat(model, \dots, restrict, dataonly, sparseOK, quadsub, force, warn.forced, verbose, use.special) \method{dimnames}{hyperframe}(x) \method{dimnames}{hyperframe}(x) <- value DoCountEnds(X, D, toler) DoCountCrossEnds(X, I, J, DIJ, toler) Deviation(x, ref, leaveout, n, xi) \method{dfbetas}{ppmInfluence}(model, \dots) dflt.redraw(button, name, env) densitycrossEngine(Xdata, Xquery, sigma, \dots, kernel, scalekernel, weights, edge, varcov, diggle, sorted, cutoff) densitypointsEngine(x, sigma, \dots, kernel, scalekernel, weights, edge, varcov, leaveoneout, diggle, sorted, spill, cutoff) diagnose.ppm.engine(object, \dots, type, typename, opt, sigma, rbord, compute.sd, compute.cts, envelope, nsim, nrank, rv, oldstyle, splineargs, verbose) diagramobj(X, \dots) digestCovariates(\dots, W) digital.volume(range, nval, vside) \method{dim}{fasp}(x) \method{dim}{hyperframe}(x) \method{dim}{im}(x) \method{dim}{msr}(x) \method{dim}{owin}(x) \method{dimnames}{fasp}(x) \method{dimnames}{fasp}(x) <- value \method{dimnames}{msr}(x) distributecbind(x) dist2dpath(dist, method="C") do.as.im(x, action, \dots, W, eps, dimyx, xy, na.replace) do.call.plotfun(fun, arglist, \dots) do.istat(panel) doMultiStraussHard(iradii, hradii, types) dppmFixIntensity(DPP, lambda, po) dppmFixAlgorithm(algorithm, changealgorithm, clusters, startpar) drawSignedPoly(x,y,pars,sgn) emptywindow(w) envelopeEngine(X, fun, simul, nsim, nrank, \dots, funargs, funYargs, verbose, clipdata, transform, global, ginterval, use.theory, alternative, scale, clamp, savefuns, savepatterns, saveresultof, weights, nsim2, VARIANCE, nSD, Yname, maxnerr, rejectNA, silent, maxerr.action, internal, cl, envir.user, expected.arg, do.pwrong, foreignclass, collectrubbish) envelopeProgressData(X, fun, \dots, exponent, alternative, leaveout, scale, clamp, normalize, deflate, rmin, save.envelope, savefuns, savepatterns) envelopeTest(X, \dots, exponent, alternative, rinterval, leaveout, scale, clamp, tie.rule, interpolate, save.interpolant, save.envelope, savefuns, savepatterns, Xname, badXfatal, verbose) \method{envelope}{hasenvelope}(Y, \dots, Yname) \method{envelope}{matrix}(Y, \dots, rvals, observed, theory, funX, nsim, nsim2, jsim, jsim.mean, type, alternative, scale, clamp, csr, use.theory, nrank, ginterval, nSD, savefuns, check, Yname, do.pwrong, weights, precomputed, gaveup) equalpairs(U, X, marked=FALSE) equalpairs.quad(Q) equals.quad(Q) equalsfun.quad(Q) erodemask(w,r,strict) evalCovar(model, covariate, \dots) \method{evalCovar}{ppm}(model, covariate, \dots, lambdatype, dimyx, eps, interpolate, jitter, modelname, covname, dataname, subset) \method{evalCovar}{lppm}(model, covariate, \dots, lambdatype, eps, nd, interpolate, jitter, modelname, covname, dataname, subset) evalCovariate(covariate, locations) evalInteraction(X,P,E,interaction,correction,splitInf,\dots, precomputed,savecomputed) evalInterEngine(X,P,E,interaction,correction,splitInf,\dots, Reach,precomputed,savecomputed) evalPairPotential(X,P,E,pairpot,potpars,R) evalSparse3Dentrywise(expr, envir) evaluate2Dkernel(kernel, x, y, sigma, varcov, \dots, scalekernel) even.breaks.owin(w) exactdt(X, \dots) exactPdt(w) existsSpatstatVariable(name) expandSpecialLists(x, special) expandwinPerfect(W, expand, amount) ExpSmoothLog(X, \dots, at, weights) \method{extractAIC}{slrm}(fit, scale = 0, k = 2, \dots) extractAtomicQtests(x) fakemaintitle(bb, main, \dots) \method{family}{vblogit}(object, \dots) f3engine(x, y, z, box, vside, range, nval, correction) f3Cengine(x, y, z, box, vside, rmax, nrval) fasp(fns, which, formulae, dataname, title, rowNames, colNames, checkfv) FDMKERNEL(lppobj, sigma, dtt, weights, iterMax, sparse, dtx, stepnames) fft2D(z, inverse, west) fftwAvailable() fill.coefs(coefs, required) findbestlegendpos(\dots) findCovariate(covname, scope, scopename=NULL) findcbind(root, depth, maxdepth) fii(interaction, coefs, Vnames, IsOffset) fillNA(x, value) flatfname(x) flipxypolygon(p) \method{flipxy}{msr}(X) forbid.logi(object) \method{format}{numberwithunit}(x, \dots, collapse, modifier) FormatFaspFormulae(f, argname) framebottomleft(w) fullIndexSequence(g) fvexprmap(x) fvlabels(x, expand=FALSE) fvlabels(x) <- value fvlabelmap(x, dot=TRUE) fvlegend(object, elang) gammabreaks(ra, n, gamma) g3engine(x, y, z, box, rmax, nrval, correction) g3Cengine(x, y, z, box, rmax, nrval) getdataname(defaultvalue, \dots, dataname) getfields(X, L, fatal = TRUE) getglmdata(object, drop=FALSE) getglmfit(object) getglmsubset(object) getlambda.lpp(lambda, X, subset, \dots, update, leaveoneout, loo.given, lambdaname) getlastshift(X) getppmdatasubset(object) getppmOriginalCovariates(object) getRandomFieldsModelGen(model) getSpatstatVariable(name) getSumFun(abbreviation, classname, ismarked, fatal) geyercounts(U,X,r,sat,Xcounts,EqualPairs) geyerdelta2(X,r,sat,\dots,sparseOK, correction) GLMpredict(fit, data, coefs, changecoef, type) good.correction.K(X) %gridadjacencymatrix(dims) gridindex(x, y, xrange, yrange, nx, ny) grid1index(x, xrange, nx) grokIndexVector(ind, len, nama) grow.mask(M, xmargin=0, ymargin=xmargin) hackglmmPQL(fixed, random, family, data, correlation, weights, control, niter, verbose, subset, \dots, reltol) hasenvelope(X, E) hasglmfit(object) HermiteCoefs(order) handle.r.b.args(r = NULL, breaks = NULL, window, pixeps = NULL, rmaxdefault) handle.rshift.args(W, \dots, radius, width, height, edge, clip, edgedefault) \method{head}{hyperframe}(x,n,\dots) hierarchicalordering(i, s) hiermat(x, h) ho.engine(model, \dots, nsim, nrmh, start, control, verb) hsvNA(h, s, v, alpha) IdenticalRowPair(i,j,a,b) IdenticalRows(i,j,a,b) idorempty(w, r, caller) illegal.iformula(ifmla, itags, dfvarnames) implemented.for.K(correction, windowtype, explicit) impliedpresence(tags, formula, df, extranames=character(0)) impliedcoefficients(object, tag) \method{influence}{ppmInfluence}(model, \dots) inpoint(W) inside.arc(theta, arc) instantiate.interact(x, par) \method{integral}{leverage.ppm}(f, domain, \dots) \method{integral}{influence.ppm}(f, domain, \dots) interactionfamilyname(x) intermaker(f, blank) interpretAsOrigin(x, W) intX.owin(w) intX.xypolygon(polly) intY.owin(w) intY.xypolygon(polly) invokeColourmapRule(colfun, x, \dots, zlim, colargs) is.atomicQtest(x) is.cadlag(s) is.col.argname(x) is.data(Q) is.expandable(x) \method{is.expandable}{ppm}(x) \method{is.expandable}{rmhmodel}(x) is.fv(x) is.hyperframe(x) is.imlist(x) is.infline(x) is.interact(x) \method{is.marked}{default}(\dots) \method{is.marked}{msr}(X, \dots) \method{is.marked}{psp}(X, \dots) \method{is.marked}{quad}(X, na.action="warn", \dots) is.mppm(x) \method{is.multitype}{default}(X, \dots) \method{is.multitype}{msr}(X, \dots) \method{is.multitype}{quad}(X, na.action="warn", \dots) \method{is.poisson}{mppm}(x) is.ppplist(x) is.pp3(x) is.ppx(x) is.psp(x) is.quad(x) is.scov(x) is.solist(x) is.sob(x) is.tess(x) is.vanilla(u) k3engine(x, y, z, box, rmax, nrval, correction) Kborder.engine(X, rmax, nr, correction, weights, ratio) Knone.engine(X, rmax, nr, weights, ratio) Krect.engine(X, rmax, nr, correction, weights, ratio, fname, use.integers) Kount(dIJ, bI, b, breaks) Kwtsum(dIJ, bI, wIJ, b, w, breaks, fatal) Kpcf.kppm(model, what) killinteraction(model) km.rs.opt(o, cc, d, breaks, KM, RS) kppmComLik(X, Xname, po, clusters, control, weightfun, rmax, algorithm, DPP, \dots) kppmMinCon(X, Xname, po, clusters, control, statistic, statargs, algorithm, DPP, \dots) kppmPalmLik(X, Xname, po, clusters, control, weightfun, rmax, algorithm, DPP, \dots) kraever(package, fatal) kraeverRandomFields() \method{labels}{ppm}(object, \dots) ldtEngine(nv, ns, from, to, seglen, huge, coUXord, vnndist, vnnwhich, vnnlab) \method{levels}{im}(x) \method{levels}{im}(x) <- value levelsAsFactor(x) \method{leverage}{ppmInfluence}(model, \dots) linearKengine(X, \dots, r, reweight, denom, correction, ratio, showworking) linearKmulti(X, I, J, r, \dots, correction) linearKmulti.inhom(X, I, J, lambdaI, lambdaJ, r, \dots, correction, normalise) linearpcfengine(X, \dots, r, reweight, denom, correction, ratio) linearpcfmulti(X, I, J, r, \dots, correction) linearpcfmulti.inhom(X, I, J, lambdaI, lambdaJ, r, \dots, correction, normalise) linearKmultiEngine(X, I, J, \dots, r, reweight, denom, correction, showworking) linearPCFmultiEngine(X, I, J, \dots, r, reweight, denom, correction, showworking) listof(\dots) localKengine(X, \dots, wantL, lambda, rmax, correction, verbose, rvalue) localKmultiEngine(X, from, to, lambdaFrom, lambdaTo, \dots, rmax, wantL, correction, verbose, rvalue, sigma, varcov, lambdaX, update, leaveoneout, Iexplain, Jexplain, Ikey, Jkey, miss.update, miss.leave) localpcfengine(X, \dots, delta, rmax, nr, stoyan, lambda) localpcfmatrix(X, i, \dots, lambda, delta, rmax, nr, stoyan) local2lpp(L, seg, tp, X, df.only) logicalIndex(i, nama, len) logi.dummy(X, dummytype, nd, mark.repeat, \dots) logi.engine(Q, trend, interaction, \dots, covariates, subsetexpr, clipwin, correction, rbord, covfunargs, allcovar, vnamebase, vnameprefix, justQ, savecomputed, precomputed, VB) \method{logLik}{vblogit}(object, \dots) looVoronoiLPP(X) makeLinnetTolerance maskLaslett(X, \dots, eps, dimyx, xy, oldX, verbose, plotit) markvaluetype(x) match2DkernelName(kernel) packupNNdata(NND, NNW, what, k) parbreak(terse) plan.legend.layout(B, \dots, side, sep, size, sep.frac, size.frac, started, map) PairPotentialType(pairpot) PDEdensityLPP(x, sigma, \dots, weights, dx, dt, iterMax, fun, finespacing, finedata) plotWidthMap(bb.leg, zlim, phys.scale, leg.scale, leg.side, leg.args, grafpar) pointweights(X, \dots, weights, parent) PoisSaddle(beta, fi) PoisSaddleArea(beta, fi) PoisSaddleGeyer(beta, fi) PoisSaddlePairwise(beta, fi) polyLaslett(X, \dots, oldX, verbose, plotit) polytileareaEngine(P, xrange, yrange, nx, ny, DivideByPixelArea) positiveIndex(i, nama, len) PPMmodelmatrix(object, data, \dots, subset, Q, keepNA, irregular, splitInf) \method{print}{localpcfmatrix}(x, \dots) \method{plot}{localpcfmatrix}(x, \dots) putSpatstatVariable(name, value) \method{[}{localpcfmatrix}(x, i, \dots) \method{[}{pp3}(x, i, drop, \dots) lookup.im(Z, x, y, naok, strict) lookup2DkernelInfo(kernel) majorminorversion(v) make.even.breaks(bmax, npos, bstep) makefvlabel(op, accent, fname, sub, argname) makeunitname(sing, plur, mul) markappend(\dots) markcbind(\dots) markformat(x) \method{markformat}{ppp}(x) \method{markformat}{ppx}(x) \method{markformat}{psp}(x) \method{markformat}{default}(x) mark.scale.default(marx, w, \dots, markscale, maxsize, meansize, characters) markspace.integral(X) \method{marks}{default}(x, \dots) \method{marks}{quad}(x, dfok=FALSE, \dots) markappendop(x, y) x \%mapp\% y marksubset(x, index, format) marksubsetop(x, i) x \%msub\% i markreplicateop(x, n) x \%mrep\% n mask2df(w) match.kernel(kernel) maxflow(costm) mctestSigtraceEngine(R, devdata, devsim, \dots, interpolate, confint, alpha, exponent, unitname) \method{mean}{leverage.ppm}(x, ...) meanlistfv(z, \dots) meanX.owin(w) meanY.owin(w) model.se.image(fit, W, \dots, what) modelFrameGam(formula, \dots) mpl.engine(Q, trend, interaction, \dots, covariates, subsetexpr, clipwin, covfunargs, correction, rbord, use.gam, gcontrol, GLM, GLMfamily, GLMcontrol, famille, forcefit, nd, eps, allcovar, callstring, precomputed, savecomputed, preponly, rename.intercept, justQ, weightfactor) mpl.get.covariates(covariates, locations, type, covfunargs, need.deriv) mpl.prepare(Q, X, P, trend, interaction, covariates, want.trend, want.inter, correction, rbord, Pname, callstring, \dots, subsetexpr, covfunargs, allcovar, precomputed, savecomputed, vnamebase, vnameprefix, warn.illegal, warn.unidentifiable, weightfactor, skip.border, clip.interaction, splitInf) mpl.usable(x) MinimalTess(W, \dots) MultiPair.checkmatrix(mat, n, matname, naok, zerook, asymmok) multiplicityNumeric(x) multiply.only.finite.entries(x, a) na.handle.im(X, na.replace) \method{names}{fv}(x) <- value \method{names}{hyperframe}(x) \method{names}{hyperframe}(x) <- value nearest.pixel(x, y, Z) nearest.valid.pixel(x, y, Z, method, nsearch) newformula(old, change, eold, enew) newstyle.coeff.handling(object) nncleanEngine(kthNND, k, d, \dots, tol, maxit, plothist, lineargs, verbose, Xname) nndcumfun(X, \dots, r) no.trend.ppm(x) n.quad(Q) numberwithunit(x, u) numeric.columns(M, logical, others) objsurfEngine(objfun, optpar, objargs, \dots, dotargs, objname, ngrid, ratio, verbose) onecolumn(m) optimStatus(x, call) printStatus(x, errors.only) printStatusList(stats) signalStatus(x, errors.only) outdated.interact(object) oversize.quad(Q, \dots, nU, nX, p) owinpolycheck(W, verbose=TRUE) owinpoly2mask(w, rasta, check=TRUE) owin2polypath(w) \method{pairs}{listof}(\dots, plot=TRUE) \method{pairs}{solist}(\dots, plot=TRUE) param.quad(Q) partialModelMatrix(X,D,model,callstring,\dots) pcf3engine(x, y, z, box, rmax, nrval, correction, delta) pcfmulti.inhom(X, I, J, lambdaI = NULL, lambdaJ = NULL, \dots, r = NULL, breaks = NULL, kernel = "epanechnikov", bw = NULL, stoyan = 0.15, correction = c("translate", "Ripley"), sigma = NULL, varcov = NULL, Iname = "points satisfying condition I", Jname = "points satisfying condition J") pickoption(what="option", key, keymap, \dots, exact=FALSE, list.on.err=TRUE, die=TRUE, multi=FALSE, allow.all=TRUE) plotEachLayer(x, \dots, main, plotargs, add, show.all, do.plot) ploterodewin(W1, W2, col.edge, col.inside, do.plot, \dots) ploterodeimage(W, Z, \dots, Wcol, rangeZ, colsZ, do.plot) plot3Dpoints(xyz, eye, org, \dots, type, xlim, ylim, zlim, add, box, main, cex, box.back, box.front) plotPolygonBdry(x, \dots) \method{plot}{addvar}(x, \dots, do.points=FALSE) \method{plot}{barplotdata}(x, \dots) \method{plot}{bw.frac}(x, \dots) \method{plot}{bw.optim}(x, \dots, showopt, optargs) \method{plot}{indicfun}(x, W, \dots, main) \method{plot}{lurk}(x, \dots, shade) \method{plot}{minconfit}(x, \dots) \method{plot}{parres}(x, \dots) \method{plot}{plotpairsim}(x, \dots) \method{plot}{qqppm}(x, \dots, limits=TRUE, monochrome=spatstat.options('monochrome'), limcol=if(monochrome) "black" else "red") \method{plot}{spatialcdf}(x, \dots, xlab, ylab) ppllengine(X, Y, action="project", check=FALSE) \method{ppm}{default}(Q, trend, interaction, \dots, covariates, data, covfunargs, subset, clipwin, correction, rbord, use.gam, method, forcefit, emend, project, prior.mean, prior.var, nd, eps, gcontrol, nsim, nrmh, start, control, verb, callstring) ppmCovariates(model) ppmDerivatives(fit, what, Dcovfun, loc, covfunargs) ppmInfluenceEngine(fit, what, \dots, iScore, iHessian, iArgs, drop, method, fine, precomputed, sparseOK, fitname, multitypeOK, entrywise, matrix.action, dimyx, eps, geomsmooth) pppdist.mat(X, Y, cutoff = 1, q = 1, matching = TRUE, precision = 9, approximation = 10) pppdist.prohorov(X, Y, n, dfix, type, cutoff, matching, ccode, auction, precision, approximation) ppsubset(X, I, Iname, fatal) \method{predict}{vblogit}(object, newdata, type, se.fit, dispersion, terms, na.action, \dots) prefixfv(x, tagprefix, descprefix, lablprefix, whichtags) prepareTitle(main) \method{print}{addvar}(x, \dots) \method{print}{anylist}(x, \dots) \method{print}{autoexec}(x, \dots) \method{print}{bt.frame}(x, \dots) \method{print}{bugtable}(x, \dots) \method{print}{bw.frac}(x, \dots) \method{print}{bw.optim}(x, \dots) \method{print}{colourmap}(x, \dots) \method{print}{densityfun}(x, \dots) \method{print}{diagppm}(x, \dots) \method{print}{distfun}(x, \dots) \method{print}{detpointprocfamily}(x, \dots) \method{print}{detpointprocfamilyfun}(x, \dots) \method{print}{envelope}(x, \dots) \method{print}{ewcdf}(x, digits, \dots) \method{print}{fasp}(x, \dots) \method{print}{funxy}(x, \dots) \method{print}{fv}(x, \dots, tight) \method{print}{fvfun}(x, \dots) \method{print}{hasenvelope}(x, \dots) \method{print}{hierarchicalordering}(x, \dots) \method{print}{hyperframe}(x, \dots) \method{print}{indicfun}(x, \dots) \method{print}{influence.ppm}(x, \dots) \method{print}{interact}(x, \dots, family, brief, banner) \method{print}{intermaker}(x, \dots) \method{print}{isf}(x, \dots) \method{print}{laslett}(x, \dots) \method{print}{layered}(x, \dots) \method{print}{leverage.ppm}(x, \dots) \method{print}{lintess}(x, \dots) \method{print}{lurk}(x, \dots) \method{print}{lut}(x, \dots) \method{print}{minconfit}(x, \dots) \method{print}{mppm}(x, \dots) \method{print}{msr}(x, \dots) \method{print}{nnfun}(x, \dots) \method{print}{numberwithunit}(x, \dots) \method{print}{onearrow}(x, \dots) \method{print}{parres}(x, \dots) \method{print}{plotppm}(x, \dots) \method{print}{plotpairsim}(x, \dots) \method{print}{pppmatching}(x, \dots) \method{print}{profilepl}(x, \dots) \method{print}{quadrattest}(x, \dots) \method{print}{qqppm}(x, \dots) \method{print}{rat}(x, \dots) \method{print}{rmhcontrol}(x, \dots) \method{print}{rmhexpand}(x, \dots, prefix=TRUE) \method{print}{rmhmodel}(x, \dots) \method{print}{rmhstart}(x, \dots) \method{print}{rmhInfoList}(x, \dots) \method{print}{rppm}(x, \dots) \method{print}{simplepanel}(x, \dots) \method{print}{Smoothfun}(x, \dots) \method{print}{solist}(x, \dots) \method{print}{splitppp}(x, \dots) \method{print}{splitppx}(x, \dots) \method{print}{summary.distfun}(x, \dots) \method{print}{summary.funxy}(x, \dots) \method{print}{summary.hyperframe}(x, \dots) \method{print}{summary.linim}(x, \dots) \method{print}{summary.linnet}(x, \dots) \method{print}{summary.lintess}(x, \dots) \method{print}{summary.listof}(x, \dots) \method{print}{summary.logiquad}(x, \dots, dp=3) \method{print}{summary.lut}(x, \dots) \method{print}{summary.mppm}(x, \dots, brief) \method{print}{summary.owin}(x, \dots) \method{print}{summary.ppp}(x, \dots, dp) \method{print}{summary.psp}(x, \dots) \method{print}{summary.rmhexpand}(x, \dots) \method{print}{summary.ssf}(x, \dots) \method{print}{summary.splitppp}(x, \dots) \method{print}{summary.solist}(x, \dots) \method{print}{summary.splitppx}(x, \dots) \method{print}{summary.unitname}(x, \dots) \method{print}{symbolmap}(x, \dots) \method{print}{textstring}(x, \dots) \method{print}{texturemap}(x, \dots) \method{print}{tess}(x, \dots, brief=FALSE) \method{print}{timed}(x, \dots) \method{print}{vblogit}(x, \dots) \method{print}{yardstick}(x, \dots) project3Dhom(xyz, eye, org, vert) putlastshift(X, vec) qkdeEngine(X, sigma, \dots, at, what, leaveoneout, diggle, raw, edge2D, edge, weights, varcov, positive, shortcut, precomputed, savecomputed) qtPrepareCoordinate(covname, W, origin) quad(data, dummy, w, param) quad.mppm(x) quadBlockSizes(nX, nD, p, nMAX, announce) RandomFieldsSafe() rasterfilter(X, f) ratfv(df, numer, denom, \dots, ratio) recognise.spatstat.type(x) rectquadrat.breaks(xr, yr, nx = 5, ny = nx, xbreaks = NULL, ybreaks = NULL) rectquadrat.countEngine(x, y, xbreaks, ybreaks, weights) reduceformula(fmla, deletevar, verbose) reheat(model, invtemp) RelevantDeviation(x, alternative, clamp, scaling) repair.image.xycoords(x) replacementIndex(ii, stuff) representativeRows(x) \method{rescale}{msr}(X, s, unitname) resolveEinfo(x, what, fallback, warn, atomic) resolve.foxall.window(X, Y, W, warn.trim) resolve.vargamma.shape(\dots, nu.ker, nu.pcf, default) resolve.lambda(X, lambda, \dots, sigma, varcov, leaveoneout, update) resolve.lambda.cross(X, I, J, lambdaI, lambdaJ, \dots, lambdaX, sigma, varcov, leaveoneout, update, lambdaIJ, Iexplain, Jexplain, miss.update, miss.leave, caller) rgbNA(red, green, blue, alpha, maxColorValue) rhohatEngine(model, covariate, reference, volume, \dots, subset, weights, method, horvitz, smoother, resolution, n, bw, adjust, from, to, bwref, covname, covunits, confidence, modelcall, callstring) rhohatCalc(ZX, Zvalues, lambda, denom, \dots, weights, lambdaX, method, horvitz, smoother, n, bw, adjust, from, to, bwref, covname, confidence, positiveCI, markovCI, covunits, modelcall, callstring, savestuff) rMaternInhibition(type, kappa, r, win, stationary, \dots, nsim, drop) RmhExpandRule(nama) rocData(covariate, nullmodel, \dots, high) rocModel(lambda, nullmodel, \dots, high) rmhsnoop(\dots, Wsim, Wclip, R, xcoords, ycoords, mlevels, mcodes, irep, itype, proptype, proplocn, propmark, propindx, numerator, denominator, panel.only) roseContinuous(ang, rad, unit, \dots, start, clockwise, main, labels, at, do.plot) ruletextline(ch, n, terse) quadrat.testEngine(X, nx, ny, alternative, method, conditional, CR, \dots, nsim, Xcount, xbreaks, ybreaks, tess, fit, df.est, Xname, fitname) quadscheme.replicated(data, dummy, method, \dots) quadscheme.spatial(data, dummy, method, \dots) pointgrid(W, ngrid) rastersample(X, Y) rasterx.mask(w, drop) rastery.mask(w, drop) rasterxy.mask(w, drop) rasterx.im(x) rastery.im(x) rasterxy.im(x, drop) rebadge.fv(x, new.ylab, new.fname, tags, new.desc, new.labl, new.yexp, new.dotnames, new.preferred, new.formula, new.tags) rebadge.as.crossfun(x, main, sub, i, j) rebadge.as.dotfun(x, main, sub, i) rebound(x, rect) \method{rebound}{im}(x, rect) \method{rebound}{ppp}(x, rect) \method{rebound}{psp}(x, rect) \method{rebound}{owin}(x, rect) reconcile.fv(\dots) rename.fv(x, fname, ylab, yexp) repair.old.factor.image(x) reincarnate.interact(object) resid4plot(RES, plot.neg, plot.smooth, spacing, outer, srange, monochrome, main, xlab, ylab, rlab, col.neg, col.smooth, \dots) resid1plot(RES, opt, plot.neg, plot.smooth, srange, monochrome, main, add, show.all, do.plot, col.neg, col.smooth, \dots) resid1panel(observedX, observedV, theoreticalX, theoreticalV, theoreticalSD, xlab,ylab, \dots, do.plot) resolve.2D.kernel(\dots, sigma, varcov, x, mindist, adjust, bwfun, allow.zero) restrict.mask(M, W) reversePolyclipArgs(x, p) rmax.Rigid(X, g) rmax.rule(fun, W, lambda) \method{rotate}{msr}(X, angle, \dots, centre) rotxy(X, angle = pi/2) rotxypolygon(p, angle = pi/2) rmhResolveControl(control, model) rmhResolveExpansion(win, control, imagelist, itype) rmhResolveTypes(model, start, control) rmhSnoopEnv(Xinit, Wclip, R) \method{rmhcontrol}{rmhcontrol}(\dots) \method{rmhcontrol}{list}(\dots) rmhEngine(InfoList, \dots, verbose, kitchensink, preponly, snoop, overrideXstart, overrideclip) \method{rmhmodel}{rmhmodel}(model, \dots) \method{rmhstart}{rmhstart}(start, \dots) \method{rmhstart}{list}(start, \dots) rmpoint.I.allim(n, f, types) \method{row.names}{hyperframe}(x) \method{row.names}{hyperframe}(x) <- value rpoint.multi(n, f, fmax, marks, win, giveup, verbose, warn, nsim, drop) runifpoispp(lambda, win, \dots, nsim, drop) runifpoisppOnLines(lambda, L, nsim, drop) runifrect(n, win, nsim, drop) safedeldir(X) safelookup(Z, x, factor, warn) \method{scalardilate}{breakpts}(X, f, \dots) \method{scalardilate}{diagramobj}(X, f, \dots) \method{scalardilate}{msr}(X, f, \dots) scanmeasure(X, \dots) \method{scanmeasure}{ppp}(X, r, \dots, method) \method{scanmeasure}{im}(X, r, \dots) scanPoisLRTS(nZ, nG, muZ, muG, alternative) scanBinomLRTS(nZ, nG, muZ, muG, alternative) second.moment.calc(x, sigma, edge, what, \dots, varcov, expand, obswin, npts, debug) second.moment.engine(x, sigma, edge, what, \dots, kernel, scalekernel, obswin, varcov, npts, debug) sewpcf(d, w, denargs, lambda2area, divisor) sewsmod(d, ff, wt, Ef, rvals, method="smrep", \dots, nwtsteps=500) \method{shift}{diagramobj}(X, \dots) \method{shift}{influence.ppm}(X, \dots) \method{shift}{leverage.ppm}(X, \dots) \method{shift}{msr}(X, \dots) \method{shift}{quadratcount}(X, \dots) \method{shift}{quadrattest}(X, \dots) shiftxy(X, vec = c(0, 0)) shiftxypolygon(p, vec = c(0, 0)) \method{simulate}{profilepl}(object, \dots) simulationresult(resultlist, nsim, drop, NameBase) simulrecipe(type, expr, envir, csr, pois, constraints) slr.prepare(CallInfo, envir, data, dataAtPoints, splitby, clip) slrAssemblePixelData(Y, Yname, W, covimages, dataAtPoints, pixelarea) \method{Smooth}{solist}(X, \dots) \method{Smooth}{leverage.ppm}(X, \dots) \method{Smooth}{influence.ppm}(X, \dots) smoothcrossEngine(Xdata, Xquery, values, sigma, \dots, weights, varcov, kernel, scalekernel, sorted, cutoff) smoothpointsEngine(x, values, sigma, \dots, kernel, scalekernel, weights, varcov, leaveoneout, sorted, cutoff) smudge(X) \method{sort}{im}(x, \dots) sortalongsegment(df) spatstat.deldir.setopt(use.trigrafS, use.trigraf, debug.delaunay) spatstat.xy.coords(x, y) spatstatClusterModelInfo(name, onlyPCP) spatstatDPPModelInfo(model) spatstatRmhInfo(cifname) spatialCDFframe(model, covariate, \dots, jitter) spatialCDFtest(model, covariate, test, \dots, dimyx, eps, interpolate, jitter, nsim, verbose, modelname, covname, dataname) spatialCDFtestCalc(fra, test, \dots, details) sphere.volume(range, nval = 10) splitHybridInteraction(coeffs, inte) sp.foundclass(cname, inlist, formalname, argsgiven) sp.foundclasses(cnames, inlist, formalname, argsgiven) store.versionstring.spatstat() \method{str}{hyperframe}(object, \dots) strictIndexSequence(g) strausscounts(U,X,r,EqualPairs) suffloc(object) suffstat.generic(model, X, callstring) suffstat.poisson(model, X, callstring) summarise.trend(trend, w, a) \method{summary}{envelope}(object,\dots) \method{summary}{hyperframe}(object, \dots, brief=FALSE) \method{summary}{lintess}(object, \dots) \method{summary}{logiquad}(object, \dots, checkdup=FALSE) \method{summary}{lut}(object, \dots) \method{summary}{mppm}(object, \dots, brief=FALSE) \method{summary}{msr}(object, \dots) \method{summary}{profilepl}(object, \dots) \method{summary}{pppmatching}(object, \dots) \method{summary}{ppx}(object, \dots) \method{summary}{rmhexpand}(object, \dots) \method{summary}{splitppx}(object, \dots) \method{summary}{vblogit}(object, \dots) sumsymouter(x, w) superimposeMarks(arglist, nobj) symbolmapdomain(x) symbolmaptype(x) \method{tail}{hyperframe}(x,n,\dots) tensor1x1(A,B) thinjump(n, p) tilecentroids(W, nx, ny) trianglediameters(iedge, jedge, edgelength, \dots, nvert, dmax, check) trim.mask(M, R, tolerant) tweak.closepairs(cl, rmax, i, deltax, deltay, deltaz) tweak.fv.entry(x, current.tag, new.labl, new.desc, new.tag) tweak.ratfv.entry(x, \dots) tweak.coefs(model, new.coef) twostage.test(X, \dots, exponent, nsim, nsimsub, alternative, reuse, leaveout, interpolate, savefuns, savepatterns, verbose, badXfatal, testblurb) twostage.envelope(X, \dots, nsim, nsimsub, nrank, alternative, reuse, leaveout, interpolate, savefuns, savepatterns, verbose, badXfatal, testlabel) x \%unit\% u \method{unitname}{default}(x) \method{unitname}{default}(x) <- value \method{unitname}{msr}(x) \method{unitname}{msr}(x) <- value unstackFilter(x) \method{update}{im}(object, \dots) \method{update}{ippm}(object, \dots, envir) \method{update}{msr}(object, \dots) \method{update}{rmhstart}(object, \dots) validradius(r, caller) validate2Dkernel(kernel, fatal) validate.angles(angles, unit, guess) validate.lpp.coords(X, fatal, context) validate.mask(w, fatal=TRUE) validate.quad(Q, fatal, repair, announce) validate.weights(x, recip, how, allowzero, allowinf) vanilla.fv(x) varcountEngine(g, B, lambdaB, f) %vblogit(y, X, offset, eps, m0, S0, S0i, xi0, verb, maxiter, \dots) %vblogit.fmla(formula, offset, data, subset, weights, verbose, epsilon, \dots) versioncurrency.spatstat(today, checkR) versionstring.interact(object) versionstring.ppm(object) versionstring.spatstat() veryunique(z) verifyclass(X, C, N = deparse(substitute(X)), fatal = TRUE) vnnFind(seg, tp, ns, nv, from, to, seglen, huge, tol, kmax) \method{Window}{lintess}(X, \dots) \method{Window}{linnet}(X, \dots, check=TRUE) <- value \method{Window}{lpp}(X, \dots, check=TRUE) <- value warn.once(key, \dots) waxlyrical(type, terse) weightedclosepairs(X, r, correction, what) windows.mppm(x) w.quad(Q) x.quad(Q) y.quad(Q) xy.grid(xr, yr, nx, ny, dx, dy) X2testEngine(OBS, EXP, \dots, method, CR, df, nsim, conditional, alternative, testname, dataname) \method{xtfrm}{im}(x) xypolyselfint(p, eps, proper, yesorno, checkinternal) xypolygon2psp(p, w, check) ZeroValue(x) \method{ZeroValue}{im} %%% sparse 3D arrays sparse3Darray(i,j,k,x,dims,dimnames,strict,nonzero) as.sparse3Darray(x, \dots) \method{dim}{sparse3Darray}(x) \method{dim}{sparse3Darray}(x) <- value \method{dimnames}{sparse3Darray}(x) \method{dimnames}{sparse3Darray}(x) <- value \method{print}{sparse3Darray}(x, \dots) \method{aperm}{sparse3Darray}(a, perm, resize, \dots) \method{as.array}{sparse3Darray}(x, \dots) \method{[}{sparse3Darray}(x, i, j, k, drop, \dots) \method{[}{sparse3Darray}(x, i, j, k, \dots) <- value \method{anyNA}{sparse3Darray}(x, recursive) RelevantZero(x) RelevantNA(x) RelevantEmpty(x) isRelevantZero(x) unionOfSparseIndices(A,B) \special{Complex(z)} \special{Math(x, \dots)} \special{Ops(e1, e2)} \special{Summary(\dots, na.rm = FALSE)} %NAMESPACE S3method("Complex", "sparse3Darray") %NAMESPACE S3method("Math", "sparse3Darray") %NAMESPACE S3method("Ops", "sparse3Darray") %NAMESPACE S3method("Summary", "sparse3Darray") inside3Darray(d, i, j, k) SparseEntries(x) SparseIndices(x) EntriesToSparse(df, dims) mapSparseEntries(x, margin, values, conform, across) applySparseEntries(x, f, \dots) sumsymouterSparse(x, w, dbg) tenseur(A, B, alongA, alongB) marginSums(X, MARGIN) rbindCompatibleDataFrames(x) bind.sparse3Darray(A, B, along) %% spatstatDiagnostic(msg) %% as.ppplist(x, check) as.imlist(x, check) pointsAlongNetwork(L, delta) expandSparse(x, n, across) allElementsIdentical(x, entry) resampleNetworkDataFrame(df, template) sparseVectorCumul(x, i, length) %% \method{as.ppm}{lppm}(object) \method{as.ppm}{rppm}(object) \method{predict}{profilepl}(object, \dots) %%%%%%% } \details{ These internal \pkg{spatstat} functions are not usually called directly by the user. Their names and capabilities may change without warning from one version of \pkg{spatstat} to the next. } \keyword{internal} spatstat/man/rknn.Rd0000644000176200001440000000365113333543264014142 0ustar liggesusers\name{rknn} \alias{dknn} \alias{pknn} \alias{qknn} \alias{rknn} \title{ Theoretical Distribution of Nearest Neighbour Distance } \description{ Density, distribution function, quantile function and random generation for the random distance to the \eqn{k}th nearest neighbour in a Poisson point process in \eqn{d} dimensions. } \usage{ dknn(x, k = 1, d = 2, lambda = 1) pknn(q, k = 1, d = 2, lambda = 1) qknn(p, k = 1, d = 2, lambda = 1) rknn(n, k = 1, d = 2, lambda = 1) } \arguments{ \item{x,q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations to be generated.} \item{k}{order of neighbour.} \item{d}{dimension of space.} \item{lambda}{intensity of Poisson point process.} } \details{ In a Poisson point process in \eqn{d}-dimensional space, let the random variable \eqn{R} be the distance from a fixed point to the \eqn{k}-th nearest random point, or the distance from a random point to the \eqn{k}-th nearest other random point. Then \eqn{R^d} has a Gamma distribution with shape parameter \eqn{k} and rate \eqn{\lambda * \alpha}{lambda * alpha} where \eqn{\alpha}{alpha} is a constant (equal to the volume of the unit ball in \eqn{d}-dimensional space). See e.g. Cressie (1991, page 61). These functions support calculation and simulation for the distribution of \eqn{R}. } \value{ A numeric vector: \code{dknn} returns the probability density, \code{pknn} returns cumulative probabilities (distribution function), \code{qknn} returns quantiles, and \code{rknn} generates random deviates. } \references{ Cressie, N.A.C. (1991) \emph{Statistics for spatial data}. John Wiley and Sons, 1991. } \author{\adrian and \rolf } \examples{ x <- seq(0, 5, length=20) densities <- dknn(x, k=3, d=2) cdfvalues <- pknn(x, k=3, d=2) randomvalues <- rknn(100, k=3, d=2) deciles <- qknn((1:9)/10, k=3, d=2) } \keyword{spatial} \keyword{distribution} spatstat/man/pppdist.Rd0000644000176200001440000002220713515521657014657 0ustar liggesusers\name{pppdist} \alias{pppdist} \title{Distance Between Two Point Patterns} \description{ Given two point patterns, find the distance between them based on optimal point matching. } \usage{ pppdist(X, Y, type = "spa", cutoff = 1, q = 1, matching = TRUE, ccode = TRUE, auction = TRUE, precision = NULL, approximation = 10, show.rprimal = FALSE, timelag = 0) } \arguments{ \item{X,Y}{Two point patterns (objects of class \code{"ppp"}).} \item{type}{ A character string giving the type of distance to be computed. One of \code{"spa"} (default), \code{"ace"} or \code{"mat"}, indicating whether the algorithm should find the optimal matching based on \dQuote{subpattern assignment}, \dQuote{assignment only if cardinalities are equal} or \dQuote{mass transfer}. See Details. } \item{cutoff}{ The value \eqn{> 0} at which interpoint distances are cut off. } \item{q}{ The order of the average that is applied to the interpoint distances. May be \code{Inf}, in which case the maximum of the interpoint distances is taken. } \item{matching}{ Logical. Whether to return the optimal matching or only the associated distance. } \item{ccode}{ Logical. If \code{FALSE}, \R code is used which allows for higher precision, but is much slower. } \item{auction}{ Logical. By default a version of Bertsekas' auction algorithm is used to compute an optimal point matching if \code{type} is either \code{"spa"} or \code{"ace"}. If \code{auction} is \code{FALSE} (or \code{type} is \code{"mat"}) a specialized primal-dual algorithm is used instead. This was the standard in earlier versions of \pkg{spatstat}, but is several orders of magnitudes slower. } \item{precision}{ Index controlling accuracy of algorithm. The \code{q}-th powers of interpoint distances will be rounded to the nearest multiple of \code{10^(-precision)}. There is a sensible default which depends on \code{ccode}. } \item{approximation}{ If \code{q = Inf}, compute distance based on the optimal matching for the corresponding distance of order \code{approximation}. Can be \code{Inf}, but this makes computations extremely slow. } \item{show.rprimal}{ Logical. Whether to plot the progress of the primal-dual algorithm. If \code{TRUE}, slow primal-dual \R code is used, regardless of the arguments \code{ccode} and \code{auction}. } \item{timelag}{ Time lag, in seconds, between successive displays of the iterative solution of the restricted primal problem. } } \details{ Computes the distance between point patterns \code{X} and \code{Y} based on finding the matching between them which minimizes the average of the distances between matched points (if \code{q=1}), the maximum distance between matched points (if \code{q=Inf}), and in general the \code{q}-th order average (i.e. the \code{1/q}th power of the sum of the \code{q}th powers) of the distances between matched points. Distances between matched points are Euclidean distances cut off at the value of \code{cutoff}. The parameter \code{type} controls the behaviour of the algorithm if the cardinalities of the point patterns are different. For the type \code{"spa"} (subpattern assignment) the subpattern of the point pattern with the larger cardinality \eqn{n} that is closest to the point pattern with the smaller cardinality \eqn{m} is determined; then the \code{q}-th order average is taken over \eqn{n} values: the \eqn{m} distances of matched points and \eqn{n-m} "penalty distances" of value \code{cutoff} for the unmatched points. For the type \code{"ace"} (assignment only if cardinalities equal) the matching is empty and the distance returned is equal to \code{cutoff} if the cardinalities differ. For the type \code{"mat"} (mass transfer) each point pattern is assumed to have total mass \eqn{m} (= the smaller cardinality) distributed evenly among its points; the algorithm finds then the "mass transfer plan" that minimizes the \code{q}-th order weighted average of the distances, where the weights are given by the transferred mass divided by \eqn{m}. The result is a fractional matching (each match of two points has a weight in \eqn{(0,1]}) with the minimized quantity as the associated distance. The central problem to be solved is the assignment problem (for types \code{"spa"} and \code{"ace"}) or the more general transport problem (for type \code{"mat"}). Both are well-known problems in discrete optimization, see e.g. Luenberger (2003). For the assignment problem \code{pppdist} uses by default the forward/backward version of Bertsekas' auction algorithm with automated epsilon scaling; see Bertsekas (1992). The implemented version gives good overall performance and can handle point patterns with several thousand points. For the transport problem a specialized primal-dual algorithm is employed; see Luenberger (2003), Section 5.9. The C implementation used by default can handle patterns with a few hundreds of points, but should not be used with thousands of points. By setting \code{show.rprimal = TRUE}, some insight in the working of the algorithm can be gained. For a broader selection of optimal transport algorithms that are not restricted to spatial point patterns and allow for additional fine tuning, we recommend the \R package \pkg{transport}. For moderate and large values of \code{q} there can be numerical issues based on the fact that the \code{q}-th powers of distances are taken and some positive values enter the optimization algorithm as zeroes because they are too small in comparison with the larger values. In this case the number of zeroes introduced is given in a warning message, and it is possible then that the matching obtained is not optimal and the associated distance is only a strict upper bound of the true distance. As a general guideline (which can be very wrong in special situations) a small number of zeroes (up to about 50\% of the smaller point pattern cardinality \eqn{m}) usually still results in the right matching, and the number can even be quite a bit higher and usually still provides a highly accurate upper bound for the distance. These numerical problems can be reduced by enforcing (much slower) \R code via the argument \code{ccode = FALSE}. For \code{q = Inf} there is no fast algorithm available, which is why approximation is normally used: for finding the optimal matching, \code{q} is set to the value of \code{approximation}. The resulting distance is still given as the maximum rather than the \code{q}-th order average in the corresponding distance computation. If \code{approximation = Inf}, approximation is suppressed and a very inefficient exhaustive search for the best matching is performed. The value of \code{precision} should normally not be supplied by the user. If \code{ccode = TRUE}, this value is preset to the highest exponent of 10 that the C code still can handle (usually \eqn{9}). If \code{ccode = FALSE}, the value is preset according to \code{q} (usually \eqn{15} if \code{q} is small), which can sometimes be changed to obtain less severe warning messages. } \value{ Normally an object of class \code{pppmatching} that contains detailed information about the parameters used and the resulting distance. See \code{\link{pppmatching.object}} for details. If \code{matching = FALSE}, only the numerical value of the distance is returned. } \references{ Bertsekas, D.P. (1992). Auction algorithms for network flow problems: a tutorial introduction. Computational Optimization and Applications 1, 7-66. Luenberger, D.G. (2003). \emph{Linear and nonlinear programming.} Second edition. Kluwer. Schuhmacher, D. (2014). \emph{transport: optimal transport in various forms.} R package version 0.6-2 (or later) Schuhmacher, D. and Xia, A. (2008). A new metric between distributions of point processes. \emph{Advances in Applied Probability} \bold{40}, 651--672 Schuhmacher, D., Vo, B.-T. and Vo, B.-N. (2008). A consistent metric for performance evaluation of multi-object filters. \emph{IEEE Transactions on Signal Processing} \bold{56}, 3447--3457. } \author{ Dominic Schuhmacher \email{dominic.schuhmacher@mathematik.uni-goettingen.de} \cr \url{http://www.dominic.schuhmacher.name} } \seealso{ \code{\link{pppmatching.object}}, \code{\link{matchingdist}}, \code{\link{plot.pppmatching}} } \examples{ # equal cardinalities set.seed(140627) X <- runifpoint(500) Y <- runifpoint(500) m <- pppdist(X, Y) m \dontrun{ plot(m)} # differing cardinalities X <- runifpoint(14) Y <- runifpoint(10) m1 <- pppdist(X, Y, type="spa") m2 <- pppdist(X, Y, type="ace") m3 <- pppdist(X, Y, type="mat", auction=FALSE) summary(m1) summary(m2) summary(m3) \dontrun{ m1$matrix m2$matrix m3$matrix} # q = Inf X <- runifpoint(10) Y <- runifpoint(10) mx1 <- pppdist(X, Y, q=Inf, matching=FALSE) mx2 <- pppdist(X, Y, q=Inf, matching=FALSE, ccode=FALSE, approximation=50) mx3 <- pppdist(X, Y, q=Inf, matching=FALSE, approximation=Inf) all.equal(mx1,mx2,mx3) # sometimes TRUE all.equal(mx2,mx3) # very often TRUE } \keyword{spatial} \keyword{math} spatstat/man/Kinhom.Rd0000644000176200001440000003535013571674202014421 0ustar liggesusers\name{Kinhom} \alias{Kinhom} \title{Inhomogeneous K-function} \description{ Estimates the inhomogeneous \eqn{K} function of a non-stationary point pattern. } \usage{ Kinhom(X, lambda=NULL, \dots, r = NULL, breaks = NULL, correction=c("border", "bord.modif", "isotropic", "translate"), renormalise=TRUE, normpower=1, update=TRUE, leaveoneout=TRUE, nlarge = 1000, lambda2=NULL, reciplambda=NULL, reciplambda2=NULL, diagonal=TRUE, sigma=NULL, varcov=NULL, ratio=FALSE) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the inhomogeneous \eqn{K} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{ Extra arguments. Ignored if \code{lambda} is present. Passed to \code{\link{density.ppp}} if \code{lambda} is omitted. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{renormalise}{ Logical. Whether to renormalise the estimate. See Details. } \item{normpower}{ Integer (usually either 1 or 2). Normalisation power. See Details. } \item{update}{ Logical value indicating what to do when \code{lambda} is a fitted model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } \item{nlarge}{ Optional. Efficiency threshold. If the number of points exceeds \code{nlarge}, then only the border correction will be computed, using a fast algorithm. } \item{lambda2}{ Advanced use only. Matrix containing estimates of the products \eqn{\lambda(x_i)\lambda(x_j)}{lambda(x[i]) * lambda(x[j])} of the intensities at each pair of data points \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}. } \item{reciplambda}{ Alternative to \code{lambda}. Values of the estimated \emph{reciprocal} \eqn{1/\lambda}{1/lambda} of the intensity function. Either a vector giving the reciprocal intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the reciprocal intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the reciprocal intensity value at any location. } \item{reciplambda2}{ Advanced use only. Alternative to \code{lambda2}. A matrix giving values of the estimated \emph{reciprocal products} \eqn{1/\lambda(x_i)\lambda(x_j)}{1/(lambda(x[i]) * lambda(x[j]))} of the intensities at each pair of data points \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}. } \item{diagonal}{ Do not use this argument. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing at least the following columns, \item{r}{the vector of values of the argument \eqn{r} at which \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} has been estimated } \item{theo}{vector of values of \eqn{\pi r^2}{pi * r^2}, the theoretical value of \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} for an inhomogeneous Poisson process } and containing additional columns according to the choice specified in the \code{correction} argument. The additional columns are named \code{border}, \code{trans} and \code{iso} and give the estimated values of \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} using the border correction, translation correction, and Ripley isotropic correction, respectively. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)}. } \details{ This computes a generalisation of the \eqn{K} function for inhomogeneous point patterns, proposed by Baddeley, \Moller and Waagepetersen (2000). The ``ordinary'' \eqn{K} function (variously known as the reduced second order moment function and Ripley's \eqn{K} function), is described under \code{\link{Kest}}. It is defined only for stationary point processes. The inhomogeneous \eqn{K} function \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} is a direct generalisation to nonstationary point processes. Suppose \eqn{x} is a point process with non-constant intensity \eqn{\lambda(u)}{lambda(u)} at each location \eqn{u}. Define \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} to be the expected value, given that \eqn{u} is a point of \eqn{x}, of the sum of all terms \eqn{1/\lambda(x_j)}{1/lambda(x[j])} over all points \eqn{x_j}{x[j]} in the process separated from \eqn{u} by a distance less than \eqn{r}. This reduces to the ordinary \eqn{K} function if \eqn{\lambda()}{lambda()} is constant. If \eqn{x} is an inhomogeneous Poisson process with intensity function \eqn{\lambda(u)}{lambda(u)}, then \eqn{K_{\mbox{\scriptsize\rm inhom}}(r) = \pi r^2}{Kinhom(r) = pi * r^2}. Given a point pattern dataset, the inhomogeneous \eqn{K} function can be estimated essentially by summing the values \eqn{1/(\lambda(x_i)\lambda(x_j))}{1/(lambda(x[i]) * lambda(x[j]))} for all pairs of points \eqn{x_i, x_j}{x[i], x[j]} separated by a distance less than \eqn{r}. This allows us to inspect a point pattern for evidence of interpoint interactions after allowing for spatial inhomogeneity of the pattern. Values \eqn{K_{\mbox{\scriptsize\rm inhom}}(r) > \pi r^2}{Kinhom(r) > pi * r^2} are suggestive of clustering. The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda}. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a fitted point process model}{ (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \Moller and Waagepetersen (2000). The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Edge corrections are used to correct bias in the estimation of \eqn{K_{\mbox{\scriptsize\rm inhom}}}{Kinhom}. Each edge-corrected estimate of \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} is of the form \deqn{ \widehat K_{\mbox{\scriptsize\rm inhom}}(r) = (1/A) \sum_i \sum_j \frac{1\{d_{ij} \le r\} e(x_i,x_j,r)}{\lambda(x_i)\lambda(x_j)} }{ K^inhom(r)= (1/A) sum[i] sum[j] 1(d[i,j] <= r) * e(x[i],x[j],r)/(lambda(x[i]) * lambda(x[j])) } where \code{A} is a constant denominator, \eqn{d_{ij}}{d[i,j]} is the distance between points \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}, and \eqn{e(x_i,x_j,r)}{e(x[i],x[j],r)} is an edge correction factor. For the `border' correction, \deqn{ e(x_i,x_j,r) = \frac{1(b_i > r)}{\sum_j 1(b_j > r)/\lambda(x_j)} }{ 1(b[i] > r)/(sum[j] 1(b[j] > r)/lambda(x[j])) } where \eqn{b_i}{b[i]} is the distance from \eqn{x_i}{x[i]} to the boundary of the window. For the `modified border' correction, \deqn{ e(x_i,x_j,r) = \frac{1(b_i > r)}{\mbox{area}(W \ominus r)} }{ 1(b[i] > r)/area(W [-] r) } where \eqn{W \ominus r}{W [-] r} is the eroded window obtained by trimming a margin of width \eqn{r} from the border of the original window. For the `translation' correction, \deqn{ e(x_i,x_j,r) = \frac 1 {\mbox{area}(W \cap (W + (x_j - x_i)))} }{ 1/area(W intersect (W + x[j]-x[i])) } and for the `isotropic' correction, \deqn{ e(x_i,x_j,r) = \frac 1 {\mbox{area}(W) g(x_i,x_j)} }{ 1/(area(W) g(x[i],x[j])) } where \eqn{g(x_i,x_j)}{g(x[i],x[j])} is the fraction of the circumference of the circle with centre \eqn{x_i}{x[i]} and radius \eqn{||x_i - x_j||}{||x[i]-x[j]||} which lies inside the window. If \code{renormalise=TRUE} (the default), then the estimates described above are multiplied by \eqn{c^{\mbox{normpower}}}{c^normpower} where \eqn{ c = \mbox{area}(W)/\sum (1/\lambda(x_i)). }{ c = area(W)/sum[i] (1/lambda(x[i])). } This rescaling reduces the variability and bias of the estimate in small samples and in cases of very strong inhomogeneity. The default value of \code{normpower} is 1 (for consistency with previous versions of \pkg{spatstat}) but the most sensible value is 2, which would correspond to rescaling the \code{lambda} values so that \eqn{ \sum (1/\lambda(x_i)) = \mbox{area}(W). }{ sum[i] (1/lambda(x[i])) = area(W). } If the point pattern \code{X} contains more than about 1000 points, the isotropic and translation edge corrections can be computationally prohibitive. The computations for the border method are much faster, and are statistically efficient when there are large numbers of points. Accordingly, if the number of points in \code{X} exceeds the threshold \code{nlarge}, then only the border correction will be computed. Setting \code{nlarge=Inf} or \code{correction="best"} will prevent this from happening. Setting \code{nlarge=0} is equivalent to selecting only the border correction with \code{correction="border"}. The pair correlation function can also be applied to the result of \code{Kinhom}; see \code{\link{pcf}}. } \references{ Baddeley, A., \Moller, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. } \seealso{ \code{\link{Kest}}, \code{\link{pcf}} } \examples{ # inhomogeneous pattern of maples X <- unmark(split(lansing)$maple) \testonly{ sub <- sample(c(TRUE,FALSE), npoints(X), replace=TRUE, prob=c(0.1,0.9)) X <- X[sub] } # (1) intensity function estimated by model-fitting # Fit spatial trend: polynomial in x and y coordinates fit <- ppm(X, ~ polynom(x,y,2), Poisson()) # (a) predict intensity values at points themselves, # obtaining a vector of lambda values lambda <- predict(fit, locations=X, type="trend") # inhomogeneous K function Ki <- Kinhom(X, lambda) plot(Ki) # (b) predict intensity at all locations, # obtaining a pixel image lambda <- predict(fit, type="trend") Ki <- Kinhom(X, lambda) plot(Ki) # (2) intensity function estimated by heavy smoothing Ki <- Kinhom(X, sigma=0.1) plot(Ki) # (3) simulated data: known intensity function lamfun <- function(x,y) { 50 + 100 * x } # inhomogeneous Poisson process Y <- rpoispp(lamfun, 150, owin()) # inhomogeneous K function Ki <- Kinhom(Y, lamfun) plot(Ki) # How to make simulation envelopes: # Example shows method (2) \dontrun{ smo <- density.ppp(X, sigma=0.1) Ken <- envelope(X, Kinhom, nsim=99, simulate=expression(rpoispp(smo)), sigma=0.1, correction="trans") plot(Ken) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/pcfcross.Rd0000644000176200001440000001455613333543264015022 0ustar liggesusers\name{pcfcross} \alias{pcfcross} \title{Multitype pair correlation function (cross-type)} \description{ Calculates an estimate of the cross-type pair correlation function for a multitype point pattern. } \usage{ pcfcross(X, i, j, ..., r = NULL, kernel = "epanechnikov", bw = NULL, stoyan = 0.15, correction = c("isotropic", "Ripley", "translate"), divisor = c("r", "d")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{\dots}{ Ignored. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{stoyan}{ Coefficient for default bandwidth rule; see Details. } \item{correction}{ Choice of edge correction. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See Details. } } \details{ The cross-type pair correlation function is a generalisation of the pair correlation function \code{\link{pcf}} to multitype point patterns. For two locations \eqn{x} and \eqn{y} separated by a distance \eqn{r}, the probability \eqn{p(r)} of finding a point of type \eqn{i} at location \eqn{x} and a point of type \eqn{j} at location \eqn{y} is \deqn{ p(r) = \lambda_i \lambda_j g_{i,j}(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda[i] * lambda[j] * g[i,j](r) dx dy } where \eqn{\lambda_i}{lambda[i]} is the intensity of the points of type \eqn{i}. For a completely random Poisson marked point process, \eqn{p(r) = \lambda_i \lambda_j}{p(r) = lambda[i] * lambda[j]} so \eqn{g_{i,j}(r) = 1}{g[i,j](r) = 1}. Indeed for any marked point pattern in which the points of type \code{i} are independent of the points of type \code{j}, the theoretical value of the cross-type pair correlation is \eqn{g_{i,j}(r) = 1}{g[i,j](r) = 1}. For a stationary multitype point process, the cross-type pair correlation function between marks \eqn{i} and \eqn{j} is formally defined as \deqn{ g_{i,j}(r) = \frac{K_{i,j}^\prime(r)}{2\pi r} }{ g(r) = K[i,j]'(r)/ ( 2 * pi * r) } where \eqn{K_{i,j}^\prime}{K[i,j]'(r)} is the derivative of the cross-type \eqn{K} function \eqn{K_{i,j}(r)}{K[i,j](r)}. of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. The command \code{pcfcross} computes a kernel estimate of the cross-type pair correlation function between marks \eqn{i} and \eqn{j}. \itemize{ \item If \code{divisor="r"} (the default), then the multitype counterpart of the standard kernel estimator (Stoyan and Stoyan, 1994, pages 284--285) is used. By default, the recommendations of Stoyan and Stoyan (1994) are followed exactly. \item If \code{divisor="d"} then a modified estimator is used: the contribution from an interpoint distance \eqn{d_{ij}}{d[ij]} to the estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]} instead of dividing by \eqn{r}. This usually improves the bias of the estimator when \eqn{r} is close to zero. } There is also a choice of spatial edge corrections (which are needed to avoid bias due to edge effects associated with the boundary of the spatial window): \code{correction="translate"} is the Ohser-Stoyan translation correction, and \code{correction="isotropic"} or \code{"Ripley"} is Ripley's isotropic correction. The choice of smoothing kernel is controlled by the argument \code{kernel} which is passed to \code{\link{density}}. The default is the Epanechnikov kernel. The bandwidth of the smoothing kernel can be controlled by the argument \code{bw}. Its precise interpretation is explained in the documentation for \code{\link{density.default}}. For the Epanechnikov kernel with support \eqn{[-h,h]}, the argument \code{bw} is equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}. If \code{bw} is not specified, the default bandwidth is determined by Stoyan's rule of thumb (Stoyan and Stoyan, 1994, page 285) applied to the points of type \code{j}. That is, \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)}, where \eqn{\lambda}{lambda} is the (estimated) intensity of the point process of type \code{j}, and \eqn{c} is a constant in the range from 0.1 to 0.2. The argument \code{stoyan} determines the value of \eqn{c}. The companion function \code{\link{pcfdot}} computes the corresponding analogue of \code{\link{Kdot}}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{g_{i,j}}{g[i,j]} has been estimated } \item{theo}{the theoretical value \eqn{g_{i,j}(r) = 1}{g[i,j](r) = r} for independent marks. } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{g_{i,j}}{g[i,j]} obtained by the edge corrections named. } \seealso{ Mark connection function \code{\link{markconnect}}. Multitype pair correlation \code{\link{pcfdot}}, \code{\link{pcfmulti}}. Pair correlation \code{\link{pcf}},\code{\link{pcf.ppp}}. \code{\link{Kcross}} } \examples{ data(amacrine) p <- pcfcross(amacrine, "off", "on") p <- pcfcross(amacrine, "off", "on", stoyan=0.1) plot(p) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/begins.Rd0000644000176200001440000000147413333543262014440 0ustar liggesusers\name{begins} \alias{begins} \title{ Check Start of Character String } \description{ Checks whether a character string begins with a particular prefix. } \usage{ begins(x, firstbit) } \arguments{ \item{x}{ Character string, or vector of character strings, to be tested. } \item{firstbit}{ A single character string. } } \details{ This simple wrapper function checks whether (each entry in) \code{x} begins with the string \code{firstbit}, and returns a logical value or logical vector with one entry for each entry of \code{x}. This function is useful mainly for reducing complexity in model formulae. } \value{ Logical vector of the same length as \code{x}. } \author{ \adrian \rolf and \ege } \examples{ begins(c("Hello", "Goodbye"), "Hell") begins("anything", "") } \keyword{character} spatstat/man/parameters.Rd0000644000176200001440000000271513362326066015336 0ustar liggesusers\name{parameters} \alias{parameters} \alias{parameters.dppm} \alias{parameters.kppm} \alias{parameters.ppm} \alias{parameters.profilepl} \alias{parameters.interact} \alias{parameters.fii} \title{ Extract Model Parameters in Understandable Form } \description{ Given a fitted model of some kind, this function extracts all the parameters needed to specify the model, and returns them as a list. } \usage{ parameters(model, \dots) \method{parameters}{dppm}(model, \dots) \method{parameters}{kppm}(model, \dots) \method{parameters}{ppm}(model, \dots) \method{parameters}{profilepl}(model, \dots) \method{parameters}{fii}(model, \dots) \method{parameters}{interact}(model, \dots) } \arguments{ \item{model}{ A fitted model of some kind. } \item{\dots}{ Arguments passed to methods. } } \details{ The argument \code{model} should be a fitted model of some kind. This function extracts all the parameters that would be needed to specify the model, and returns them as a list. The function \code{parameters} is generic, with methods for class \code{"ppm"}, \code{"kppm"}, \code{"dppm"} and \code{"profilepl"} and other classes. } \value{ A named list, whose format depends on the fitted model. } \author{ \spatstatAuthors } \seealso{ \code{\link{coef}} } \examples{ parameters(Strauss(0.1)) fit1 <- ppm(cells ~ x, Strauss(0.1)) parameters(fit1) fit2 <- kppm(redwood ~ x, "Thomas") parameters(fit2) } \keyword{spatial} \keyword{models} spatstat/man/methods.linnet.Rd0000644000176200001440000001026213333543263016120 0ustar liggesusers\name{methods.linnet} \alias{methods.linnet} %DoNotExport \Rdversion{1.1} \alias{as.linnet} \alias{as.linnet.linnet} \alias{as.owin.linnet} \alias{as.psp.linnet} \alias{nsegments.linnet} \alias{nvertices.linnet} \alias{pixellate.linnet} \alias{print.linnet} \alias{summary.linnet} \alias{unitname.linnet} \alias{unitname<-.linnet} \alias{vertexdegree} \alias{vertices.linnet} \alias{volume.linnet} \alias{Window.linnet} \title{ Methods for Linear Networks } \description{ These are methods for the class \code{"linnet"} of linear networks. } \usage{ as.linnet(X, \dots) \method{as.linnet}{linnet}(X, \dots, sparse) \method{as.owin}{linnet}(W, \dots) \method{as.psp}{linnet}(x, \dots, fatal=TRUE) \method{nsegments}{linnet}(x) \method{nvertices}{linnet}(x, \dots) \method{pixellate}{linnet}(x, \dots) \method{print}{linnet}(x, \dots) \method{summary}{linnet}(object, \dots) \method{unitname}{linnet}(x) \method{unitname}{linnet}(x) <- value vertexdegree(x) \method{vertices}{linnet}(w) \method{volume}{linnet}(x) \method{Window}{linnet}(X, \dots) } \arguments{ \item{x,X,object,w,W}{ An object of class \code{"linnet"} representing a linear network. } \item{\dots}{ Arguments passed to other methods. } \item{value}{ A valid name for the unit of length for \code{x}. See \code{\link{unitname}}. } \item{fatal}{ Logical value indicating whether data in the wrong format should lead to an error (\code{fatal=TRUE}) or a warning (\code{fatal=FALSE}). } \item{sparse}{ Logical value indicating whether to use a sparse matrix representation, as explained in \code{\link{linnet}}. Default is to keep the same representation as in \code{X}. } } \details{ The function \code{as.linnet} is generic. It converts data from some other format into an object of class \code{"linnet"}. The method \code{as.linnet.lpp} extracts the linear network information from an \code{lpp} object. The other functions are methods for the generic commands \code{\link{as.owin}}, \code{\link{as.psp}}, \code{\link{nsegments}}, \code{\link{nvertices}}, \code{\link{pixellate}}, \code{\link[base]{print}}, \code{\link[base]{summary}}, \code{\link{unitname}}, \code{\link{unitname<-}}, \code{\link{vertices}}, \code{\link{volume}} and \code{\link{Window}} for the class \code{"linnet"}. The methods \code{as.owin.linnet} and \code{Window.linnet} extract the window containing the linear network, and return it as an object of class \code{"owin"}. The method \code{as.psp.linnet} extracts the lines of the linear network as a line segment pattern (object of class \code{"psp"}) while \code{nsegments.linnet} simply counts the number of line segments. The method \code{vertices.linnet} extracts the vertices (nodes) of the linear network and \code{nvertices.linnet} simply counts the vertices. The function \code{vertexdegree} calculates the topological degree of each vertex (the number of lines emanating from that vertex) and returns these values as an integer vector. The method \code{pixellate.linnet} applies \code{\link{as.psp.linnet}} to convert the network to a collection of line segments, then invokes \code{\link{pixellate.psp}}. } \value{ For \code{as.linnet} the value is an object of class \code{"linnet"}. For other functions, see the help file for the corresponding generic function. } \author{ \adrian } \seealso{ \code{\link{linnet}}. Generic functions: \code{\link{as.owin}}, \code{\link{as.psp}}, \code{\link{nsegments}}, \code{\link{nvertices}}, \code{\link{pixellate}}, \code{\link{print}}, \code{\link{summary}}, \code{\link{unitname}}, \code{\link{unitname<-}}, \code{\link{vertices}}, \code{\link{volume}} and \code{\link{Window}}. Special tools: \code{\link{thinNetwork}}, \code{\link{insertVertices}}, \code{\link{joinVertices}}, \code{\link{connected.linnet}}. \code{\link{lixellate}} for dividing segments into shorter segments. } \examples{ simplenet summary(simplenet) nsegments(simplenet) nvertices(simplenet) pixellate(simplenet) volume(simplenet) unitname(simplenet) <- c("cubit", "cubits") Window(simplenet) } \keyword{spatial} \keyword{methods} spatstat/man/psp.object.Rd0000644000176200001440000000616713333543264015246 0ustar liggesusers\name{psp.object} \alias{psp.object} %DoNotExport \title{Class of Line Segment Patterns} \description{ A class \code{"psp"} to represent a spatial pattern of line segments in the plane. Includes information about the window in which the pattern was observed. Optionally includes marks. } \details{ An object of this class represents a two-dimensional pattern of line segments. It specifies \itemize{ \item the locations of the line segments (both endpoints) \item the window in which the pattern was observed \item optionally, a ``mark'' attached to each line segment (extra information such as a type label). } If \code{X} is an object of type \code{psp}, it contains the following elements: \tabular{ll}{ \code{ends} \tab data frame with entries \code{x0, y0, x1, y1} \cr \tab giving coordinates of segment endpoints \cr \code{window} \tab window of observation \cr \tab (an object of class \code{\link{owin}}) \cr \code{n} \tab number of line segments \cr \code{marks} \tab optional vector or data frame of marks \cr \code{markformat} \tab character string specifying the format of the \cr \tab marks; \dQuote{none}, \dQuote{vector}, or \dQuote{dataframe} } Users are strongly advised not to manipulate these entries directly. Objects of class \code{"psp"} may be created by the function \code{\link{psp}} and converted from other types of data by the function \code{\link{as.psp}}. Note that you must always specify the window of observation; there is intentionally no default action of ``guessing'' the window dimensions from the line segments alone. Subsets of a line segment pattern may be obtained by the functions \code{\link{[.psp}} and \code{\link{clip.psp}}. Line segment pattern objects can be plotted just by typing \code{plot(X)} which invokes the \code{plot} method for line segment pattern objects, \code{\link{plot.psp}}. See \code{\link{plot.psp}} for further information. There are also methods for \code{summary} and \code{print} for line segment patterns. Use \code{summary(X)} to see a useful description of the data. Utilities for line segment patterns include \code{\link{midpoints.psp}} (to compute the midpoints of each segment), \code{\link{lengths.psp}}, (to compute the length of each segment), \code{\link{angles.psp}}, (to compute the angle of orientation of each segment), and \code{\link{distmap.psp}} to compute the distance map of a line segment pattern. } \seealso{ \code{\link{psp}}, \code{\link{as.psp}}, \code{\link{[.psp}} } \examples{ # creating a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) # converting from other formats a <- as.psp(matrix(runif(80), ncol=4), window=owin()) a <- as.psp(data.frame(x0=runif(20), y0=runif(20), x1=runif(20), y1=runif(20)), window=owin()) # clipping w <- owin(c(0.1,0.7), c(0.2, 0.8)) b <- clip.psp(a, w) b <- a[w] # the last two lines are equivalent. } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat/man/nnmark.Rd0000644000176200001440000000670113333543263014456 0ustar liggesusers\name{nnmark} \alias{nnmark} \title{ Mark of Nearest Neighbour } \description{ Given a marked point pattern dataset \code{X} this function computes, for each desired location \code{y}, the mark attached to the nearest neighbour of \code{y} in \code{X}. The desired locations \code{y} can be either a pixel grid or the point pattern \code{X} itself. } \usage{ nnmark(X, \dots, k = 1, at=c("pixels", "points")) } \arguments{ \item{X}{ A marked point pattern (object of class \code{"ppp"}). } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution. } \item{k}{ Single integer. The \code{k}th nearest data point will be used. } \item{at}{ String specifying whether to compute the values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). } } \details{ Given a marked point pattern dataset \code{X} this function computes, for each desired location \code{y}, the mark attached to the point of \code{X} that is nearest to \code{y}. The desired locations \code{y} can be either a pixel grid or the point pattern \code{X} itself. The argument \code{X} must be a marked point pattern (object of class \code{"ppp"}, see \code{\link{ppp.object}}). The marks are allowed to be a vector or a data frame. \itemize{ \item If \code{at="points"}, then for each point in \code{X}, the algorithm finds the nearest \emph{other} point in \code{X}, and extracts the mark attached to it. The result is a vector or data frame containing the marks of the neighbours of each point. \item If \code{at="pixels"} (the default), then for each pixel in a rectangular grid, the algorithm finds the nearest point in \code{X}, and extracts the mark attached to it. The result is an image or a list of images containing the marks of the neighbours of each pixel. The pixel resolution is controlled by the arguments \code{\dots} passed to \code{\link{as.mask}}. } If the argument \code{k} is given, then the \code{k}-th nearest neighbour will be used. } \value{ \emph{If \code{X} has a single column of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a pixel image (object of class \code{"im"}). The value at each pixel is the mark attached to the nearest point of \code{X}. \item If \code{at="points"}, the result is a vector or factor of length equal to the number of points in \code{X}. Entries are the mark values of the nearest neighbours of each point of \code{X}. } \emph{If \code{X} has a data frame of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a named list of pixel images (object of class \code{"im"}). There is one image for each column of marks. This list also belongs to the class \code{"solist"}, for which there is a plot method. \item If \code{at="points"}, the result is a data frame with one row for each point of \code{X}, Entries are the mark values of the nearest neighbours of each point of \code{X}. } } \author{\adrian \rolf and \ege } \seealso{ \code{\link{Smooth.ppp}}, \code{\link{marktable}}, \code{\link{nnwhich}} } \examples{ plot(nnmark(ants)) v <- nnmark(ants, at="points") v[1:10] plot(nnmark(finpines)) vf <- nnmark(finpines, at="points") vf[1:5,] } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/plot.anylist.Rd0000644000176200001440000002125513333543264015632 0ustar liggesusers\name{plot.anylist} \alias{plot.anylist} \title{Plot a List of Things} \description{ Plots a list of things } \usage{ \method{plot}{anylist}(x, \dots, main, arrange=TRUE, nrows=NULL, ncols=NULL, main.panel=NULL, mar.panel=c(2,1,1,2), hsep=0, vsep=0, panel.begin=NULL, panel.end=NULL, panel.args=NULL, panel.begin.args=NULL, panel.end.args=NULL, panel.vpad=0.2, plotcommand="plot", adorn.left=NULL, adorn.right=NULL, adorn.top=NULL, adorn.bottom=NULL, adorn.size=0.2, equal.scales=FALSE, halign=FALSE, valign=FALSE) } \arguments{ \item{x}{ An object of the class \code{"anylist"}. Essentially a list of objects. } \item{\dots}{ Arguments passed to \code{\link{plot}} when generating each plot panel. } \item{main}{ Overall heading for the plot. } \item{arrange}{ Logical flag indicating whether to plot the objects side-by-side on a single page (\code{arrange=TRUE}) or plot them individually in a succession of frames (\code{arrange=FALSE}). } \item{nrows,ncols}{ Optional. The number of rows/columns in the plot layout (assuming \code{arrange=TRUE}). You can specify either or both of these numbers. } \item{main.panel}{ Optional. A character string, or a vector of character strings, giving the headings for each of the objects. } \item{mar.panel}{ Size of the margins outside each plot panel. A numeric vector of length 4 giving the bottom, left, top, and right margins in that order. (Alternatively the vector may have length 1 or 2 and will be replicated to length 4). See the section on \emph{Spacing between plots}. } \item{hsep,vsep}{ Additional horizontal and vertical separation between plot panels, expressed in the same units as \code{mar.panel}. } \item{panel.begin,panel.end}{ Optional. Functions that will be executed before and after each panel is plotted. See Details. } \item{panel.args}{ Optional. Function that determines different plot arguments for different panels. See Details. } \item{panel.begin.args}{ Optional. List of additional arguments for \code{panel.begin} when it is a function. } \item{panel.end.args}{ Optional. List of additional arguments for \code{panel.end} when it is a function. } \item{panel.vpad}{ Amount of extra vertical space that should be allowed for the title of each panel, if a title will be displayed. Expressed as a fraction of the height of the panel. Applies only when \code{equal.scales=FALSE} (the default) and requires that the height of each panel can be determined. } \item{plotcommand}{ Optional. Character string containing the name of the command that should be executed to plot each panel. } \item{adorn.left,adorn.right,adorn.top,adorn.bottom}{ Optional. Functions (with no arguments) that will be executed to generate additional plots at the margins (left, right, top and/or bottom, respectively) of the array of plots. } \item{adorn.size}{ Relative width (as a fraction of the other panels' widths) of the margin plots. } \item{equal.scales}{ Logical value indicating whether the components should be plotted at (approximately) the same physical scale. } \item{halign,valign}{ Logical values indicating whether panels in a column should be aligned to the same \eqn{x} coordinate system (\code{halign=TRUE}) and whether panels in a row should be aligned to the same \eqn{y} coordinate system (\code{valign=TRUE}). These are applicable only if \code{equal.scales=TRUE}. } } \value{ Null. } \details{ This is the \code{plot} method for the class \code{"anylist"}. An object of class \code{"anylist"} represents a list of objects intended to be treated in the same way. This is the method for \code{plot}. In the \pkg{spatstat} package, various functions produce an object of class \code{"anylist"}, essentially a list of objects of the same kind. These objects can be plotted in a nice arrangement using \code{plot.anylist}. See the Examples. The argument \code{panel.args} determines extra graphics parameters for each panel. It should be a function that will be called as \code{panel.args(i)} where \code{i} is the panel number. Its return value should be a list of graphics parameters that can be passed to the relevant \code{plot} method. These parameters override any parameters specified in the \code{\dots} arguments. The arguments \code{panel.begin} and \code{panel.end} determine graphics that will be plotted before and after each panel is plotted. They may be objects of some class that can be plotted with the generic \code{plot} command. Alternatively they may be functions that will be called as \code{panel.begin(i, y, main=main.panel[i])} and \code{panel.end(i, y, add=TRUE)} where \code{i} is the panel number and \code{y = x[[i]]}. If all entries of \code{x} are pixel images, the function \code{\link{image.listof}} is called to control the plotting. The arguments \code{equal.ribbon} and \code{col} can be used to determine the colour map or maps applied. If \code{equal.scales=FALSE} (the default), then the plot panels will have equal height on the plot device (unless there is only one column of panels, in which case they will have equal width on the plot device). This means that the objects are plotted at different physical scales, by default. If \code{equal.scales=TRUE}, then the dimensions of the plot panels on the plot device will be proportional to the spatial dimensions of the corresponding components of \code{x}. This means that the objects will be plotted at \emph{approximately} equal physical scales. If these objects have very different spatial sizes, the plot command could fail (when it tries to plot the smaller objects at a tiny scale), with an error message that the figure margins are too large. The objects will be plotted at \emph{exactly} equal physical scales, and \emph{exactly} aligned on the device, under the following conditions: \itemize{ \item every component of \code{x} is a spatial object whose position can be shifted by \code{\link{shift}}; \item \code{panel.begin} and \code{panel.end} are either \code{NULL} or they are spatial objects whose position can be shifted by \code{\link{shift}}; \item \code{adorn.left}, \code{adorn.right}, \code{adorn.top} and \code{adorn.bottom} are all \code{NULL}. } Another special case is when every component of \code{x} is an object of class \code{"fv"} representing a function. If \code{equal.scales=TRUE} then all these functions will be plotted with the same axis scales (i.e. with the same \code{xlim} and the same \code{ylim}). } \section{Spacing between plots}{ The spacing between individual plots is controlled by the parameters \code{mar.panel}, \code{hsep} and \code{vsep}. If \code{equal.scales=FALSE}, the plot panels are logically separate plots. The margins for each panel are determined by the argument \code{mar.panel} which becomes the graphics parameter \code{mar} described in the help file for \code{\link{par}}. One unit of \code{mar} corresponds to one line of text in the margin. If \code{hsep} or \code{vsep} are present, \code{mar.panel} is augmented by \code{c(vsep, hsep, vsep, hsep)/2}. If \code{equal.scales=TRUE}, all the plot panels are drawn in the same coordinate system which represents a physical scale. The unit of measurement for \code{mar.panel[1,3]} is one-sixth of the greatest height of any object plotted in the same row of panels, and the unit for \code{mar.panel[2,4]} is one-sixth of the greatest width of any object plotted in the same column of panels. If \code{hsep} or \code{vsep} are present, they are interpreted in the same units as \code{mar.panel[2]} and \code{mar.panel[1]} respectively. } \seealso{ \code{\link{contour.listof}}, \code{\link{image.listof}}, \code{\link{density.splitppp}} } \section{Error messages}{ If the error message \sQuote{Figure margins too large} occurs, this generally means that one of the objects had a much smaller physical scale than the others. Ensure that \code{equal.scales=FALSE} and increase the values of \code{mar.panel}. } \examples{ trichotomy <- list(regular=cells, random=japanesepines, clustered=redwood) K <- lapply(trichotomy, Kest) K <- as.anylist(K) plot(K, main="") # list of 3D point patterns ape1 <- osteo[osteo$shortid==4, "pts", drop=TRUE] class(ape1) plot(ape1, main.panel="", mar.panel=0.1, hsep=0.7, vsep=1, cex=1.5, pch=21, bg='white') } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{hplot} spatstat/man/rNeymanScott.Rd0000644000176200001440000002245413333543264015622 0ustar liggesusers\name{rNeymanScott} \alias{rNeymanScott} \title{Simulate Neyman-Scott Process} \description{ Generate a random point pattern, a realisation of the Neyman-Scott cluster process. } \usage{ rNeymanScott(kappa, expand, rcluster, win = owin(c(0,1),c(0,1)), \dots, lmax=NULL, nsim=1, drop=TRUE, nonempty=TRUE, saveparents=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{expand}{ Size of the expansion of the simulation window for generating parent points. A single non-negative number. } \item{rcluster}{ A function which generates random clusters, or other data specifying the random cluster mechanism. See Details. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{\dots}{ Arguments passed to \code{rcluster}. } \item{lmax}{ Optional. Upper bound on the values of \code{kappa} when \code{kappa} is a function or pixel image. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{nonempty}{ Logical. If \code{TRUE} (the default), a more efficient algorithm is used, in which parents are generated conditionally on having at least one offspring point. If \code{FALSE}, parents are generated even if they have no offspring. Both choices are valid; the default is recommended unless you need to simulate all the parent points for some other purpose. } \item{saveparents}{ Logical value indicating whether to save the locations of the parent points as an attribute. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of this point pattern: see Details. } \details{ This algorithm generates a realisation of the general Neyman-Scott process, with the cluster mechanism given by the function \code{rcluster}. First, the algorithm generates a Poisson point process of \dQuote{parent} points with intensity \code{kappa} in an expanded window as explained below. Here \code{kappa} may be a single positive number, a function \code{kappa(x,y)}, or a pixel image object of class \code{"im"} (see \code{\link{im.object}}). See \code{\link{rpoispp}} for details. Second, each parent point is replaced by a random cluster of points. These clusters are combined together to yield a single point pattern, and the restriction of this pattern to the window \code{win} is then returned as the result of \code{rNeymanScott}. The expanded window consists of \code{\link{as.rectangle}(win)} extended by the amount \code{expand} in each direction. The size of the expansion is saved in the attribute \code{"expand"} and may be extracted by \code{attr(X, "expand")} where \code{X} is the generated point pattern. The argument \code{rcluster} specifies the cluster mechanism. It may be either: \itemize{ \item A \code{function} which will be called to generate each random cluster (the offspring points of each parent point). The function should expect to be called in the form \code{rcluster(x0,y0,\dots)} for a parent point at a location \code{(x0,y0)}. The return value of \code{rcluster} should specify the coordinates of the points in the cluster; it may be a list containing elements \code{x,y}, or a point pattern (object of class \code{"ppp"}). If it is a marked point pattern then the result of \code{rNeymanScott} will be a marked point pattern. \item A \code{list(mu, f)} where \code{mu} specifies the mean number of offspring points in each cluster, and \code{f} generates the random displacements (vectors pointing from the parent to the offspring). In this case, the number of offspring in a cluster is assumed to have a Poisson distribution, implying that the Neyman-Scott process is also a Cox process. The first element \code{mu} should be either a single nonnegative number (interpreted as the mean of the Poisson distribution of cluster size) or a pixel image or a \code{function(x,y)} giving a spatially varying mean cluster size (interpreted in the sense of Waagepetersen, 2007). The second element \code{f} should be a function that will be called once in the form \code{f(n)} to generate \code{n} independent and identically distributed displacement vectors (i.e. as if there were a cluster of size \code{n} with a parent at the origin \code{(0,0)}). The function should return a point pattern (object of class \code{"ppp"}) or something acceptable to \code{\link[grDevices]{xy.coords}} that specifies the coordinates of \code{n} points. } If required, the intermediate stages of the simulation (the parents and the individual clusters) can also be extracted from the return value of \code{rNeymanScott} through the attributes \code{"parents"} and \code{"parentid"}. The attribute \code{"parents"} is the point pattern of parent points. The attribute \code{"parentid"} is an integer vector specifying the parent for each of the points in the simulated pattern. Neyman-Scott models where \code{kappa} is a single number and \code{rcluster = list(mu,f)} can be fitted to data using the function \code{\link{kppm}}. } \section{Inhomogeneous Neyman-Scott Processes}{ There are several different ways of specifying a spatially inhomogeneous Neyman-Scott process: \itemize{ \item The point process of parent points can be inhomogeneous. If the argument \code{kappa} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is taken as specifying the intensity function of an inhomogeneous Poisson process according to which the parent points are generated. \item The number of points in a typical cluster can be spatially varying. If the argument \code{rcluster} is a list of two elements \code{mu, f} and the first entry \code{mu} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then \code{mu} is interpreted as the reference intensity for offspring points, in the sense of Waagepetersen (2007). For a given parent point, the offspring constitute a Poisson process with intensity function equal to \code{mu(x, y) * g(x-x0, y-y0)} where \code{g} is the probability density of the offspring displacements generated by the function \code{f}. Equivalently, clusters are first generated with a constant expected number of points per cluster: the constant is \code{mumax}, the maximum of \code{mu}. Then the offspring are randomly \emph{thinned} (see \code{\link{rthin}}) with spatially-varying retention probabilities given by \code{mu/mumax}. \item The entire mechanism for generating a cluster can be dependent on the location of the parent point. If the argument \code{rcluster} is a function, then the cluster associated with a parent point at location \code{(x0,y0)} will be generated by calling \code{rcluster(x0, y0, \dots)}. The behaviour of this function could depend on the location \code{(x0,y0)} in any fashion. } Note that if \code{kappa} is an image, the spatial domain covered by this image must be large enough to include the \emph{expanded} window in which the parent points are to be generated. This requirement means that \code{win} must be small enough so that the expansion of \code{as.rectangle(win)} is contained in the spatial domain of \code{kappa}. As a result, one may wind up having to simulate the process in a window smaller than what is really desired. In the first two cases, the intensity of the Neyman-Scott process is equal to \code{kappa * mu} if at least one of \code{kappa} or \code{mu} is a single number, and is otherwise equal to an integral involving \code{kappa}, \code{mu} and \code{f}. } \seealso{ \code{\link{rpoispp}}, \code{\link{rThomas}}, \code{\link{rGaussPoisson}}, \code{\link{rMatClust}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}} } \examples{ # each cluster consist of 10 points in a disc of radius 0.2 nclust <- function(x0, y0, radius, n) { return(runifdisc(n, radius, centre=c(x0, y0))) } plot(rNeymanScott(10, 0.2, nclust, radius=0.2, n=5)) # multitype Neyman-Scott process (each cluster is a multitype process) nclust2 <- function(x0, y0, radius, n, types=c("a", "b")) { X <- runifdisc(n, radius, centre=c(x0, y0)) M <- sample(types, n, replace=TRUE) marks(X) <- M return(X) } plot(rNeymanScott(15,0.1,nclust2, radius=0.1, n=5)) } \references{ Neyman, J. and Scott, E.L. (1958) A statistical approach to problems of cosmology. \emph{Journal of the Royal Statistical Society, Series B} \bold{20}, 1--43. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/areaLoss.Rd0000644000176200001440000000402413333543262014734 0ustar liggesusers\name{areaLoss} \alias{areaLoss} \title{Difference of Disc Areas} \description{ Computes the area of that part of a disc that is not covered by other discs. } \usage{ areaLoss(X, r, ..., W=as.owin(X), subset=NULL, exact=FALSE, ngrid=spatstat.options("ngrid.disc")) } \arguments{ \item{X}{ Locations of the centres of discs. A point pattern (object of class \code{"ppp"}). } \item{r}{ Disc radius, or vector of disc radii. } \item{\dots}{Ignored.} \item{W}{ Optional. Window (object of class \code{"owin"}) inside which the area should be calculated. } \item{subset}{ Optional. Index identifying a subset of the points of \code{X} for which the area difference should be computed. } \item{exact}{ Choice of algorithm. If \code{exact=TRUE}, areas are computed exactly using analytic geometry. If \code{exact=FALSE} then a faster algorithm is used to compute a discrete approximation to the areas. } \item{ngrid}{ Integer. Number of points in the square grid used to compute the discrete approximation, when \code{exact=FALSE}. } } \value{ A matrix with one row for each point in \code{X} (or \code{X[subset]}) and one column for each value in \code{r}. } \details{ This function computes, for each point \code{X[i]} in \code{X} and for each radius \code{r}, the area of that part of the disc of radius \code{r} centred at the location \code{X[i]} that is \emph{not} covered by any of the other discs of radius \code{r} centred at the points \code{X[j]} for \code{j} not equal to \code{i}. This area is important in some calculations related to the area-interaction model \code{\link{AreaInter}}. The result is a matrix, with one row for each point in \code{X} and one column for each entry of \code{r}. } \seealso{ \code{\link{AreaInter}}, \code{\link{areaGain}}, \code{\link{dilated.areas}} } \examples{ data(cells) areaLoss(cells, 0.1) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/is.im.Rd0000644000176200001440000000117313333543263014205 0ustar liggesusers\name{is.im} \alias{is.im} \title{Test Whether An Object Is A Pixel Image} \description{ Tests whether its argument is a pixel image (object of class \code{"im"}). } \usage{ is.im(x) } \arguments{ \item{x}{Any object.} } \details{ This function tests whether the argument \code{x} is a pixel image object of class \code{"im"}. For details of this class, see \code{\link{im.object}}. The object is determined to be an image if it inherits from class \code{"im"}. } \value{ \code{TRUE} if \code{x} is a pixel image, otherwise \code{FALSE}. } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/erosionAny.Rd0000644000176200001440000000362313333543263015316 0ustar liggesusers\name{erosionAny} \alias{erosionAny} \alias{\%(-)\%} %DoNotExport %NAMESPACE export("%(-)%") \title{Morphological Erosion of Windows} \description{ Compute the morphological erosion of one spatial window by another. } \usage{ erosionAny(A, B) A \%(-)\% B } \arguments{ \item{A,B}{ Windows (objects of class \code{"owin"}). } } \value{ Another window (object of class \code{"owin"}). } \details{ The operator \code{A \%(-)\% B} and function \code{erosionAny(A,B)} are synonymous: they both compute the morphological erosion of the window \code{A} by the window \code{B}. The morphological erosion \eqn{A \ominus B}{A \%(-)\% B} of region \eqn{A} by region \eqn{B} is the spatial region consisting of all vectors \eqn{z} such that, when \eqn{B} is shifted by the vector \eqn{z}, the result is a subset of \eqn{A}. Equivalently \deqn{ A \ominus B = ((A^c \oplus (-B))^c }{ (A^c \%+\% (-B))^c } where \eqn{\oplus}{\%+\%} is the Minkowski sum, \eqn{A^c} denotes the set complement, and \eqn{(-B)} is the reflection of \eqn{B} through the origin, consisting of all vectors \eqn{-b} where \eqn{b} is a point in \eqn{B}. If \code{B} is a disc of radius \code{r}, then \code{erosionAny(A, B)} is equivalent to \code{erosion(A, r)}. See \code{\link{erosion}}. The algorithm currently computes the result as a polygonal window using the \pkg{polyclip} library. It will be quite slow if applied to binary mask windows. } \seealso{ \code{\link{erosion}}, \code{\link{MinkowskiSum}} } \examples{ B <- square(c(-0.1, 0.1)) RminusB <- letterR \%(-)\% B FR <- grow.rectangle(Frame(letterR), 0.3) plot(FR, main="", type="n") plot(letterR, add=TRUE, lwd=2, hatch=TRUE, box=FALSE) plot(RminusB, add=TRUE, col="blue", box=FALSE) plot(shift(B, vec=c(3.49, 2.98)), add=TRUE, border="red", lwd=2) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{math} spatstat/man/regularpolygon.Rd0000644000176200001440000000350113333543264016235 0ustar liggesusers\name{regularpolygon} \alias{regularpolygon} \alias{hexagon} \title{ Create A Regular Polygon } \description{ Create a window object representing a regular (equal-sided) polygon. } \usage{ regularpolygon(n, edge = 1, centre = c(0, 0), \dots, align = c("bottom", "top", "left", "right", "no")) hexagon(edge = 1, centre = c(0,0), \dots, align = c("bottom", "top", "left", "right", "no")) } \arguments{ \item{n}{ Number of edges in the polygon. } \item{edge}{ Length of each edge in the polygon. A single positive number. } \item{centre}{ Coordinates of the centre of the polygon. A numeric vector of length 2, or a \code{list(x,y)} giving the coordinates of exactly one point, or a point pattern (object of class \code{"ppp"}) containing exactly one point. } \item{align}{ Character string specifying whether to align one of the edges with a vertical or horizontal boundary. } \item{\dots}{ Ignored. } } \details{ The function \code{regularpolygon} creates a regular (equal-sided) polygon with \code{n} sides, centred at \code{centre}, with sides of equal length \code{edge}. The function \code{hexagon} is the special case \code{n=6}. The orientation of the polygon is determined by the argument \code{align}. If \code{align="no"}, one vertex of the polygon is placed on the \eqn{x}-axis. Otherwise, an edge of the polygon is aligned with one side of the frame, specified by the value of \code{align}. } \value{ A window (object of class \code{"owin"}). } \author{ \spatstatAuthors. } \seealso{ \code{\link{disc}}, \code{\link{ellipse}}, \code{\link{owin}}. \code{\link{hextess}} for hexagonal tessellations. } \examples{ plot(hexagon()) plot(regularpolygon(7)) plot(regularpolygon(7, align="left")) } \keyword{spatial} \keyword{datagen} spatstat/man/rDGS.Rd0000644000176200001440000000733013602545270013765 0ustar liggesusers\name{rDGS} \alias{rDGS} \title{Perfect Simulation of the Diggle-Gates-Stibbard Process} \description{ Generate a random pattern of points, a simulated realisation of the Diggle-Gates-Stibbard process, using a perfect simulation algorithm. } \usage{ rDGS(beta, rho, W = owin(), expand=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{rho}{ interaction range (a non-negative number). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. } \item{expand}{ Logical. If \code{FALSE}, simulation is performed in the window \code{W}, which must be rectangular. If \code{TRUE} (the default), simulation is performed on a larger window, and the result is clipped to the original window \code{W}. Alternatively \code{expand} can be an object of class \code{"rmhexpand"} (see \code{\link{rmhexpand}}) determining the expansion method. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function generates a realisation of the Diggle-Gates-Stibbard point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. Diggle, Gates and Stibbard (1987) proposed a pairwise interaction point process in which each pair of points separated by a distance \eqn{d} contributes a factor \eqn{e(d)} to the probability density, where \deqn{ e(d) = \sin^2\left(\frac{\pi d}{2\rho}\right) }{ e(d) = sin^2((pi * d)/(2 * rho)) } for \eqn{d < \rho}{d < rho}, and \eqn{e(d)} is equal to 1 for \eqn{d \ge \rho}{d >= rho}. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and \Moller (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}). If \code{nsim > 1}, a list of point patterns. } \references{ Berthelsen, K.K. and \Moller, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and \Moller, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. \Moller, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. } \author{ \adrian, based on original code for the Strauss process by Kasper Klitgaard Berthelsen. } \examples{ X <- rDGS(50, 0.05) Z <- rDGS(50, 0.03, nsim=2) } \seealso{ \code{\link{rmh}}, \code{\link{DiggleGatesStibbard}}. \code{\link{rStrauss}}, \code{\link{rHardcore}}, \code{\link{rStraussHard}}, \code{\link{rDiggleGratton}}, \code{\link{rPenttinen}}. } \keyword{spatial} \keyword{datagen} spatstat/man/methods.ppx.Rd0000644000176200001440000000263513333543263015443 0ustar liggesusers\name{methods.ppx} \Rdversion{1.1} \alias{methods.ppx} %DoNotExport \alias{print.ppx} \alias{plot.ppx} \alias{unitname.ppx} \alias{unitname<-.ppx} \title{ Methods for Multidimensional Space-Time Point Patterns } \description{ Methods for printing and plotting a general multidimensional space-time point pattern. } \usage{ \method{print}{ppx}(x, ...) \method{plot}{ppx}(x, ...) \method{unitname}{ppx}(x) \method{unitname}{ppx}(x) <- value } \arguments{ \item{x}{ Multidimensional point pattern (object of class \code{"ppx"}). } \item{\dots}{ Additional arguments passed to plot methods. } \item{value}{ Name of the unit of length. See \code{\link{unitname}}. } } \details{ These are methods for the generic functions \code{\link{print}}, \code{\link{plot}}, \code{\link{unitname}} and \code{\link{unitname<-}} for the class \code{"ppx"} of multidimensional point patterns. The \code{print} method prints a description of the point pattern and its spatial domain. The \code{unitname} method extracts the name of the unit of length in which the point coordinates are expressed. The \code{unitname<-} method assigns the name of the unit of length. } \value{ For \code{print.ppx} the value is \code{NULL}. For \code{unitname.ppx} an object of class \code{"units"}. } \author{\adrian and \rolf } \seealso{ \code{\link{ppx}}, \code{\link{unitname}} } \keyword{spatial} spatstat/man/ppp.Rd0000644000176200001440000002304013333543264013763 0ustar liggesusers\name{ppp} \alias{ppp} \title{Create a Point Pattern} \description{ Creates an object of class \code{"ppp"} representing a point pattern dataset in the two-dimensional plane. } \usage{ ppp(x,y, \dots, window, marks, check=TRUE, checkdup=check, drop=TRUE) } \arguments{ \item{x}{Vector of \eqn{x} coordinates of data points} \item{y}{Vector of \eqn{y} coordinates of data points} \item{window}{window of observation, an object of class \code{"owin"}} \item{\dots}{arguments passed to \code{\link{owin}} to create the window, if \code{window} is missing} \item{marks}{(optional) mark values for the points. A vector or data frame.} \item{check}{ Logical value indicating whether to check that all the \eqn{(x,y)} points lie inside the specified window. Do not set this to \code{FALSE} unless you are absolutely sure that this check is unnecessary. See Warnings below. } \item{checkdup}{ Logical value indicating whether to check for duplicated coordinates. See Warnings below. } \item{drop}{ Logical flag indicating whether to simplify data frames of marks. See Details. } } \value{ An object of class \code{"ppp"} describing a point pattern in the two-dimensional plane (see \code{\link{ppp.object}}). } \details{ In the \pkg{spatstat} library, a point pattern dataset is described by an object of class \code{"ppp"}. This function creates such objects. The vectors \code{x} and \code{y} must be numeric vectors of equal length. They are interpreted as the cartesian coordinates of the points in the pattern. Note that \code{x} and \code{y} are permitted to have length zero, corresponding to an empty point pattern; this is the default if these arguments are missing. A point pattern dataset is assumed to have been observed within a specific region of the plane called the observation window. An object of class \code{"ppp"} representing a point pattern contains information specifying the observation window. This window must always be specified when creating a point pattern dataset; there is intentionally no default action of ``guessing'' the window dimensions from the data points alone. You can specify the observation window in several (mutually exclusive) ways: \itemize{ \item \code{xrange, yrange} specify a rectangle with these dimensions; \item \code{poly} specifies a polygonal boundary. If the boundary is a single polygon then \code{poly} must be a list with components \code{x,y} giving the coordinates of the vertices. If the boundary consists of several disjoint polygons then \code{poly} must be a list of such lists so that \code{poly[[i]]$x} gives the \eqn{x} coordinates of the vertices of the \eqn{i}th boundary polygon. \item \code{mask} specifies a binary pixel image with entries that are \code{TRUE} if the corresponding pixel is inside the window. \item \code{window} is an object of class \code{"owin"} specifying the window. A window object can be created by \code{\link{owin}} from raw coordinate data. Special shapes of windows can be created by the functions \code{\link{square}}, \code{\link{hexagon}}, \code{\link{regularpolygon}}, \code{\link{disc}} and \code{\link{ellipse}}. See the Examples. } The arguments \code{xrange, yrange} or \code{poly} or \code{mask} are passed to the window creator function \code{\link{owin}} for interpretation. See \code{\link{owin}} for further details. The argument \code{window}, if given, must be an object of class \code{"owin"}. It is a full description of the window geometry, and could have been obtained from \code{\link{owin}} or \code{\link{as.owin}}, or by just extracting the observation window of another point pattern, or by manipulating such windows. See \code{\link{owin}} or the Examples below. The points with coordinates \code{x} and \code{y} \bold{must} lie inside the specified window, in order to define a valid object of this class. Any points which do not lie inside the window will be removed from the point pattern, and a warning will be issued. See the section on Rejected Points. The name of the unit of length for the \code{x} and \code{y} coordinates can be specified in the dataset, using the argument \code{unitname}, which is passed to \code{\link{owin}}. See the examples below, or the help file for \code{\link{owin}}. The optional argument \code{marks} is given if the point pattern is marked, i.e. if each data point carries additional information. For example, points which are classified into two or more different types, or colours, may be regarded as having a mark which identifies which colour they are. Data recording the locations and heights of trees in a forest can be regarded as a marked point pattern where the mark is the tree height. The argument \code{marks} can be either \itemize{ \item a vector, of the same length as \code{x} and \code{y}, which is interpreted so that \code{marks[i]} is the mark attached to the point \code{(x[i],y[i])}. If the mark is a real number then \code{marks} should be a numeric vector, while if the mark takes only a finite number of possible values (e.g. colours or types) then \code{marks} should be a \code{factor}. \item a data frame, with the number of rows equal to the number of points in the point pattern. The \code{i}th row of the data frame is interpreted as containing the mark values for the \code{i}th point in the point pattern. The columns of the data frame correspond to different mark variables (e.g. tree species and tree diameter). } If \code{drop=TRUE} (the default), then a data frame with only one column will be converted to a vector, and a data frame with no columns will be converted to \code{NULL}. See \code{\link{ppp.object}} for a description of the class \code{"ppp"}. Users would normally invoke \code{ppp} to create a point pattern, but the functions \code{\link{as.ppp}} and \code{scanpp} may sometimes be convenient. } \section{Invalid coordinate values}{ The coordinate vectors \code{x} and \code{y} must contain only finite numerical values. If the coordinates include any of the values \code{NA}, \code{NaN}, \code{Inf} or \code{-Inf}, these will be removed. } \section{Rejected points}{ The points with coordinates \code{x} and \code{y} \bold{must} lie inside the specified window, in order to define a valid object of class \code{"ppp"}. Any points which do not lie inside the window will be removed from the point pattern, and a warning will be issued. The rejected points are still accessible: they are stored as an attribute of the point pattern called \code{"rejects"} (which is an object of class \code{"ppp"} containing the rejected points in a large window). However, rejected points in a point pattern will be ignored by all other functions except \code{\link{plot.ppp}}. To remove the rejected points altogether, use \code{\link{as.ppp}}. To include the rejected points, you will need to find a larger window that contains them, and use this larger window in a call to \code{ppp}. } \section{Warnings}{ The code will check for problems with the data, and issue a warning if any problems are found. The checks and warnings can be switched off, for efficiency's sake, but this should only be done if you are confident that the data do not have these problems. Setting \code{check=FALSE} will disable all the checking procedures: the check for points outside the window, and the check for duplicated points. This is extremely dangerous, because points lying outside the window will break many of the procedures in \pkg{spatstat}, causing crashes and strange errors. Set \code{check=FALSE} only if you are absolutely sure that there are no points outside the window. If duplicated points are found, a warning is issued, but no action is taken. Duplicated points are not illegal, but may cause unexpected problems later. Setting \code{checkdup=FALSE} will disable the check for duplicated points. Do this only if you already know the answer. Methodology and software for spatial point patterns often assume that all points are distinct so that there are no duplicated points. If duplicated points are present, the consequence could be an incorrect result or a software crash. To the best of our knowledge, all \pkg{spatstat} code handles duplicated points correctly. However, if duplicated points are present, we advise using \code{\link{unique.ppp}} or \code{\link{multiplicity.ppp}} to eliminate duplicated points and re-analyse the data. } \seealso{ \code{\link{ppp.object}}, \code{\link{as.ppp}}, \code{\link{owin.object}}, \code{\link{owin}}, \code{\link{as.owin}} } \examples{ # some arbitrary coordinates in [0,1] x <- runif(20) y <- runif(20) # the following are equivalent X <- ppp(x, y, c(0,1), c(0,1)) X <- ppp(x, y) X <- ppp(x, y, window=owin(c(0,1),c(0,1))) # specify that the coordinates are given in metres X <- ppp(x, y, c(0,1), c(0,1), unitname=c("metre","metres")) \dontrun{plot(X)} # marks m <- sample(1:2, 20, replace=TRUE) m <- factor(m, levels=1:2) X <- ppp(x, y, c(0,1), c(0,1), marks=m) \dontrun{plot(X)} # polygonal window X <- ppp(x, y, poly=list(x=c(0,10,0), y=c(0,0,10))) \dontrun{plot(X)} # circular window of radius 2 X <- ppp(x, y, window=disc(2)) # copy the window from another pattern data(cells) X <- ppp(x, y, window=Window(cells)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/summary.anylist.Rd0000644000176200001440000000156013333543264016346 0ustar liggesusers\name{summary.anylist} \alias{summary.anylist} \title{Summary of a List of Things} \description{ Prints a useful summary of each item in a list of things. } \usage{ \method{summary}{anylist}(object, \dots) } \arguments{ \item{object}{ An object of class \code{"anylist"}. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{summary}}. An object of the class \code{"anylist"} is effectively a list of things which are intended to be treated in a similar way. See \code{\link{anylist}}. This function extracts a useful summary of each of the items in the list. } \seealso{ \code{\link{anylist}}, \code{\link{summary}}, \code{\link{plot.anylist}} } \examples{ x <- anylist(A=runif(10), B=runif(10), C=runif(10)) summary(x) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/eval.im.Rd0000644000176200001440000000577213414272450014527 0ustar liggesusers\name{eval.im} \alias{eval.im} \title{Evaluate Expression Involving Pixel Images} \description{ Evaluates any expression involving one or more pixel images, and returns a pixel image. } \usage{ eval.im(expr, envir, harmonize=TRUE, warn=TRUE) } \arguments{ \item{expr}{An expression.} \item{envir}{Optional. The environment in which to evaluate the expression, or a named list containing pixel images to be used in the expression.} \item{harmonize}{ Logical. Whether to resolve inconsistencies between the pixel grids. } \item{warn}{ Logical. Whether to issue a warning if the pixel grids were inconsistent. } } \details{ This function is a wrapper to make it easier to perform pixel-by-pixel calculations in an image. Pixel images in \pkg{spatstat} are represented by objects of class \code{"im"} (see \code{\link{im.object}}). These are essentially matrices of pixel values, with extra attributes recording the pixel dimensions, etc. Suppose \code{X} is a pixel image. Then \code{eval.im(X+3)} will add 3 to the value of every pixel in \code{X}, and return the resulting pixel image. Suppose \code{X} and \code{Y} are two pixel images with compatible dimensions: they have the same number of pixels, the same physical size of pixels, and the same bounding box. Then \code{eval.im(X + Y)} will add the corresponding pixel values in \code{X} and \code{Y}, and return the resulting pixel image. In general, \code{expr} can be any expression in the R language involving (a) the \emph{names} of pixel images, (b) scalar constants, and (c) functions which are vectorised. See the Examples. First \code{eval.im} determines which of the \emph{variable names} in the expression \code{expr} refer to pixel images. Each such name is replaced by a matrix containing the pixel values. The expression is then evaluated. The result should be a matrix; it is taken as the matrix of pixel values. The expression \code{expr} must be vectorised. There must be at least one pixel image in the expression. All images must have compatible dimensions. If \code{harmonize=FALSE}, images that are incompatible will cause an error. If \code{harmonize=TRUE}, images that have incompatible dimensions will be resampled so that they are compatible; if \code{warn=TRUE}, a warning will be issued. } \value{ An image object of class \code{"im"}. } \seealso{ \code{\link{im.apply}} for operations similar to \code{\link{apply}}, such as taking the sum of a list of images. \code{\link{as.im}}, \code{\link{compatible.im}}, \code{\link{harmonise.im}}, \code{\link{im.object}} } \examples{ # test images X <- as.im(function(x,y) { x^2 - y^2 }, unit.square()) Y <- as.im(function(x,y) { 3 * x + y }, unit.square()) eval.im(X + 3) eval.im(X - Y) eval.im(abs(X - Y)) Z <- eval.im(sin(X * pi) + Y) ## Use of 'envir' W <- eval.im(sin(U), list(U=density(cells))) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/Smooth.ssf.Rd0000644000176200001440000000155113333543264015232 0ustar liggesusers\name{Smooth.ssf} \alias{Smooth.ssf} \title{ Smooth a Spatially Sampled Function } \description{ Applies kernel smoothing to a spatially sampled function. } \usage{ \method{Smooth}{ssf}(X, \dots) } \arguments{ \item{X}{ Object of class \code{"ssf"}. } \item{\dots}{ Arguments passed to \code{\link[spatstat]{Smooth.ppp}} to control the smoothing. } } \details{ An object of class \code{"ssf"} represents a real-valued or vector-valued function that has been evaluated or sampled at an irregular set of points. The function values will be smoothed using a Gaussian kernel. } \value{ A pixel image or a list of pixel images. } \author{ \adrian. } \seealso{ \code{\link{ssf}}, \code{\link{Smooth.ppp}} } \examples{ f <- ssf(redwood, nndist(redwood)) Smooth(f, sigma=0.1) } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/plot.ssf.Rd0000644000176200001440000000551613377650213014745 0ustar liggesusers\name{plot.ssf} \alias{plot.ssf} \alias{image.ssf} \alias{contour.ssf} \title{ Plot a Spatially Sampled Function } \description{ Plot a spatially sampled function object. } \usage{ \method{plot}{ssf}(x, \dots, how = c("smoothed", "nearest", "points"), style = c("image", "contour", "imagecontour"), sigma = NULL, contourargs=list()) \method{image}{ssf}(x, \dots) \method{contour}{ssf}(x, ..., main, sigma = NULL) } \arguments{ \item{x}{ Spatially sampled function (object of class \code{"ssf"}). } \item{\dots}{ Arguments passed to \code{\link[graphics]{image.default}} or \code{\link[spatstat]{plot.ppp}} to control the plot. } \item{how}{ Character string determining whether to display the function values at the data points (\code{how="points"}), a smoothed interpolation of the function (\code{how="smoothed"}), or the function value at the nearest data point (\code{how="nearest"}). } \item{style}{ Character string indicating whether to plot the smoothed function as a colour image, a contour map, or both. } \item{contourargs}{ Arguments passed to \code{\link[graphics]{contour.default}} to control the contours, if \code{style="contour"} or \code{style="imagecontour"}. } \item{sigma}{ Smoothing bandwidth for smooth interpolation. } \item{main}{ Optional main title for the plot. } } \details{ These are methods for the generic \code{\link[graphics]{plot}}, \code{\link[graphics]{image}} and \code{\link[graphics]{contour}} for the class \code{"ssf"}. An object of class \code{"ssf"} represents a function (real- or vector-valued) that has been sampled at a finite set of points. For \code{plot.ssf} there are three types of display. If \code{how="points"} the exact function values will be displayed as circles centred at the locations where they were computed. If \code{how="smoothed"} (the default) these values will be kernel-smoothed using \code{\link{Smooth.ppp}} and displayed as a pixel image. If \code{how="nearest"} the values will be interpolated by nearest neighbour interpolation using \code{\link{nnmark}} and displayed as a pixel image. For \code{image.ssf} and \code{contour.ssf} the values are kernel-smoothed before being displayed. } \value{ \code{NULL}. } \references{ Baddeley, A. (2017) Local composite likelihood for spatial point processes. \emph{Spatial Statistics} \bold{22}, 261--295. Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. } \author{ \adrian. } \seealso{ \code{\link{ssf}} } \examples{ a <- ssf(cells, nndist(cells, k=1:3)) plot(a, how="points") plot(a, how="smoothed") plot(a, how="nearest") } \keyword{spatial} \keyword{hplot} spatstat/man/coef.slrm.Rd0000644000176200001440000000177613333543263015067 0ustar liggesusers\name{coef.slrm} \Rdversion{1.1} \alias{coef.slrm} \title{ Coefficients of Fitted Spatial Logistic Regression Model } \description{ Extracts the coefficients (parameters) from a fitted Spatial Logistic Regression model. } \usage{ \method{coef}{slrm}(object, ...) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Ignored. } } \details{ This is a method for \code{\link{coef}} for fitted spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). It extracts the fitted canonical parameters, i.e.\ the coefficients in the linear predictor of the spatial logistic regression. } \value{ Numeric vector of coefficients. } \seealso{ \code{\link{slrm}} } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x+y) coef(fit) } \author{\adrian \email{adrian@maths.uwa.edu.au} and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/diameter.linnet.Rd0000644000176200001440000000247513333543263016256 0ustar liggesusers\name{diameter.linnet} \alias{boundingradius.linnet} \alias{diameter.linnet} \title{ Diameter and Bounding Radius of a Linear Network } \description{ Compute the diameter or bounding radius of a linear network measured using the shortest path distance. } \usage{ \method{diameter}{linnet}(x) \method{boundingradius}{linnet}(x, \dots) } \arguments{ \item{x}{ Linear network (object of class \code{"linnet"}). } \item{\dots}{Ignored.} } \details{ The diameter of a linear network (in the shortest path distance) is the maximum value of the shortest-path distance between any two points \eqn{u} and \eqn{v} on the network. The bounding radius of a linear network (in the shortest path distance) is the minimum value, over all points \eqn{u} on the network, of the maximum shortest-path distance from \eqn{u} to another point \eqn{v} on the network. The functions \code{\link{boundingradius}} and \code{\link{diameter}} are generic; the functions \code{boundingradius.linnet} and \code{diameter.linnet} are the methods for objects of class \code{linnet}. } \value{ A single numeric value. } \author{ \adrian } \seealso{ \code{\link{boundingradius}}, \code{\link{diameter}}, \code{\link{linnet}} } \examples{ diameter(simplenet) boundingradius(simplenet) } \keyword{spatial} \keyword{math} spatstat/man/dg.sigtrace.Rd0000644000176200001440000001503313333543263015360 0ustar liggesusers\name{dg.sigtrace} \alias{dg.sigtrace} \title{ Significance Trace of Dao-Genton Test } \description{ Generates a Significance Trace of the Dao and Genton (2014) test for a spatial point pattern. } \usage{ dg.sigtrace(X, fun = Lest, \dots, exponent = 2, nsim = 19, nsimsub = nsim - 1, alternative = c("two.sided", "less", "greater"), rmin=0, leaveout=1, interpolate = FALSE, confint = TRUE, alpha = 0.05, savefuns=FALSE, savepatterns=FALSE, verbose=FALSE) } \arguments{ \item{X}{ Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class) or an envelope object (class \code{"envelope"}). } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{\dots}{ Arguments passed to \code{\link{envelope}}. } \item{exponent}{ Positive number. Exponent used in the test statistic. Use \code{exponent=2} for the Diggle-Cressie-Loosmore-Ford test, and \code{exponent=Inf} for the Maximum Absolute Deviation test. See Details. } \item{nsim}{ Number of repetitions of the basic test. } \item{nsimsub}{ Number of simulations in each basic test. There will be \code{nsim} repetitions of the basic test, each involving \code{nsimsub} simulated realisations, so there will be a total of \code{nsim * (nsimsub + 1)} simulations. } \item{alternative}{ Character string specifying the alternative hypothesis. The default (\code{alternative="two.sided"}) is that the true value of the summary function is not equal to the theoretical value postulated under the null hypothesis. If \code{alternative="less"} the alternative hypothesis is that the true value of the summary function is lower than the theoretical value. } \item{rmin}{ Optional. Left endpoint for the interval of \eqn{r} values on which the test statistic is calculated. } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating whether to interpolate the distribution of the test statistic by kernel smoothing, as described in Dao and Genton (2014, Section 5). } \item{confint}{ Logical value indicating whether to compute a confidence interval for the \sQuote{true} \eqn{p}-value. } \item{alpha}{ Significance level to be plotted (this has no effect on the calculation but is simply plotted as a reference value). } \item{savefuns}{ Logical flag indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical flag indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical flag indicating whether to print progress reports. } } \details{ The Dao and Genton (2014) test for a spatial point pattern is described in \code{\link{dg.test}}. This test depends on the choice of an interval of distance values (the argument \code{rinterval}). A \emph{significance trace} (Bowman and Azzalini, 1997; Baddeley et al, 2014, 2015) of the test is a plot of the \eqn{p}-value obtained from the test against the length of the interval \code{rinterval}. The command \code{dg.sigtrace} effectively performs \code{\link{dg.test}} on \code{X} using all possible intervals of the form \eqn{[0,R]}, and returns the resulting \eqn{p}-values as a function of \eqn{R}. The result is an object of class \code{"fv"} that can be plotted to obtain the significance trace. The plot shows the Dao-Genton adjusted \eqn{p}-value (solid black line), the critical value \code{0.05} (dashed red line), and a pointwise 95\% confidence band (grey shading) for the \sQuote{true} (Neyman-Pearson) \eqn{p}-value. The confidence band is based on the Agresti-Coull (1998) confidence interval for a binomial proportion. If \code{X} is an envelope object and \code{fun=NULL} then the code will re-use the simulated functions stored in \code{X}. If the argument \code{rmin} is given, it specifies the left endpoint of the interval defining the test statistic: the tests are performed using intervals \eqn{[r_{\mbox{\scriptsize min}},R]}{[rmin,R]} where \eqn{R \ge r_{\mbox{\scriptsize min}}}{R \ge rmin}. The argument \code{leaveout} specifies how to calculate the discrepancy between the summary function for the data and the nominal reference value, when the reference value must be estimated by simulation. The values \code{leaveout=0} and \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014, Appendix) to computing the difference \code{observed - reference} where the \code{reference} is the mean of simulated values. The value \code{leaveout=2} gives the leave-two-out discrepancy proposed by Dao and Genton (2014). } \value{ An object of class \code{"fv"} that can be plotted to obtain the significance trace. } \references{ Agresti, A. and Coull, B.A. (1998) Approximate is better than \dQuote{Exact} for interval estimation of binomial proportions. \emph{American Statistician} \bold{52}, 119--126. Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T., Milne, R. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84}(3) 477--489. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2015) Pushing the envelope: extensions of graphical Monte Carlo tests. Submitted for publication. Bowman, A.W. and Azzalini, A. (1997) \emph{Applied smoothing techniques for data analysis: the kernel approach with S-Plus illustrations}. Oxford University Press, Oxford. Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. } \author{ \adrian, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \spatstatAuthors. } \seealso{ \code{\link{dg.test}} for the Dao-Genton test, \code{\link{dclf.sigtrace}} for significance traces of other tests. } \examples{ ns <- if(interactive()) 19 else 5 plot(dg.sigtrace(cells, nsim=ns)) } \keyword{spatial} \keyword{htest} spatstat/man/range.fv.Rd0000644000176200001440000000236013333543264014674 0ustar liggesusers\name{range.fv} \alias{range.fv} \alias{max.fv} \alias{min.fv} \title{ Range of Function Values } \description{ Compute the range, maximum, or minimum of the function values in a summary function. } \usage{ \method{range}{fv}(\dots, na.rm = TRUE, finite = na.rm) \method{max}{fv}(\dots, na.rm = TRUE, finite = na.rm) \method{min}{fv}(\dots, na.rm = TRUE, finite = na.rm) } \arguments{ \item{\dots}{ One or more function value tables (objects of class \code{"fv"} representing summary functions) or other data. } \item{na.rm}{ Logical. Whether to ignore \code{NA} values. } \item{finite}{ Logical. Whether to ignore values that are infinite, \code{NaN} or \code{NA}. } } \details{ These are methods for the generic \code{\link[base]{range}}, \code{\link[base]{max}} and \code{\link[base]{min}}. They compute the range, maximum, and minimum of the \emph{function} values that would be plotted on the \eqn{y} axis by default. For more complicated calculations, use \code{\link{with.fv}}. } \value{ Numeric vector of length 2. } \seealso{ \code{\link{with.fv}} } \examples{ G <- Gest(cells) range(G) max(G) min(G) } \author{ \adrian , \rolf and \ege. } \keyword{spatial} \keyword{math} spatstat/man/rThomas.Rd0000644000176200001440000001521013333543264014601 0ustar liggesusers\name{rThomas} \alias{rThomas} \title{Simulate Thomas Process} \description{ Generate a random point pattern, a realisation of the Thomas cluster process. } \usage{ rThomas(kappa, scale, mu, win = owin(c(0,1),c(0,1)), nsim=1, drop=TRUE, saveLambda=FALSE, expand = 4*scale, ..., poisthresh=1e-6, saveparents=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{scale}{ Standard deviation of random displacement (along each coordinate axis) of a point from its cluster centre. } \item{mu}{ Mean number of points per cluster (a single positive number) or reference intensity for the cluster points (a function or a pixel image). } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{saveLambda}{ Logical. If \code{TRUE} then the random intensity corresponding to the simulated parent points will also be calculated and saved, and returns as an attribute of the point pattern. } \item{expand}{ Numeric. Size of window expansion for generation of parent points. Has a sensible default. } \item{\dots}{ Passed to \code{\link{clusterfield}} to control the image resolution when \code{saveLambda=TRUE} and to \code{\link{clusterradius}} when \code{expand} is missing. } \item{poisthresh}{ Numerical threshold below which the model will be treated as a Poisson process. See Details. } \item{saveparents}{ Logical value indicating whether to save the locations of the parent points as an attribute. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of this point pattern (see \code{\link{rNeymanScott}}). Furthermore, the simulated intensity function is returned as an attribute \code{"Lambda"}, if \code{saveLambda=TRUE}. } \details{ This algorithm generates a realisation of the (`modified') Thomas process, a special case of the Neyman-Scott process, inside the window \code{win}. In the simplest case, where \code{kappa} and \code{mu} are single numbers, the algorithm generates a uniform Poisson point process of \dQuote{parent} points with intensity \code{kappa}. Then each parent point is replaced by a random cluster of \dQuote{offspring} points, the number of points per cluster being Poisson (\code{mu}) distributed, and their positions being isotropic Gaussian displacements from the cluster parent location. The resulting point pattern is a realisation of the classical \dQuote{stationary Thomas process} generated inside the window \code{win}. This point process has intensity \code{kappa * mu}. The algorithm can also generate spatially inhomogeneous versions of the Thomas process: \itemize{ \item The parent points can be spatially inhomogeneous. If the argument \code{kappa} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is taken as specifying the intensity function of an inhomogeneous Poisson process that generates the parent points. \item The offspring points can be inhomogeneous. If the argument \code{mu} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is interpreted as the reference density for offspring points, in the sense of Waagepetersen (2007). For a given parent point, the offspring constitute a Poisson process with intensity function equal to \code{mu * f}, where \code{f} is the Gaussian probability density centred at the parent point. Equivalently we first generate, for each parent point, a Poisson (\code{mumax}) random number of offspring (where \eqn{M} is the maximum value of \code{mu}) with independent Gaussian displacements from the parent location, and then randomly thin the offspring points, with retention probability \code{mu/M}. \item Both the parent points and the offspring points can be spatially inhomogeneous, as described above. } Note that if \code{kappa} is a pixel image, its domain must be larger than the window \code{win}. This is because an offspring point inside \code{win} could have its parent point lying outside \code{win}. In order to allow this, the simulation algorithm first expands the original window \code{win} by a distance \code{expand} and generates the Poisson process of parent points on this larger window. If \code{kappa} is a pixel image, its domain must contain this larger window. The intensity of the Thomas process is \code{kappa * mu} if either \code{kappa} or \code{mu} is a single number. In the general case the intensity is an integral involving \code{kappa}, \code{mu} and \code{f}. The Thomas process with homogeneous parents (i.e. where \code{kappa} is a single number) can be fitted to data using \code{\link{kppm}}. Currently it is not possible to fit the Thomas model with inhomogeneous parents. If the pair correlation function of the model is very close to that of a Poisson process, deviating by less than \code{poisthresh}, then the model is approximately a Poisson process, and will be simulated as a Poisson process with intensity \code{kappa * mu}, using \code{\link{rpoispp}}. This avoids computations that would otherwise require huge amounts of memory. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}}, \code{\link{rNeymanScott}}, \code{\link{rGaussPoisson}}, \code{\link{kppm}}, \code{\link{clusterfit}}. } \references{ Diggle, P. J., Besag, J. and Gleaves, J. T. (1976) Statistical analysis of spatial point patterns by means of distance methods. \emph{Biometrics} \bold{32} 659--667. Thomas, M. (1949) A generalisation of Poisson's binomial limit for use in ecology. \emph{Biometrika} \bold{36}, 18--25. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \examples{ #homogeneous X <- rThomas(10, 0.2, 5) #inhomogeneous Z <- as.im(function(x,y){ 5 * exp(2 * x - 1) }, owin()) Y <- rThomas(10, 0.2, Z) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/rpoislinetess.Rd0000644000176200001440000000273613333543264016100 0ustar liggesusers\name{rpoislinetess} \alias{rpoislinetess} \title{Poisson Line Tessellation} \description{ Generate a tessellation delineated by the lines of the Poisson line process } \usage{ rpoislinetess(lambda, win = owin()) } \arguments{ \item{lambda}{ Intensity of the Poisson line process. A positive number. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. Currently, the window must be a rectangle. } } \details{ This algorithm generates a realisation of the uniform Poisson line process, and divides the window \code{win} into tiles separated by these lines. The argument \code{lambda} must be a positive number. It controls the intensity of the process. The expected number of lines intersecting a convex region of the plane is equal to \code{lambda} times the perimeter length of the region. The expected total length of the lines crossing a region of the plane is equal to \code{lambda * pi} times the area of the region. } \value{ A tessellation (object of class \code{"tess"}). Also has an attribute \code{"lines"} containing the realisation of the Poisson line process, as an object of class \code{"infline"}. } \author{\adrian and \rolf } \seealso{ \code{\link{rpoisline}} to generate the lines only. } \examples{ X <- rpoislinetess(3) plot(as.im(X), main="rpoislinetess(3)") plot(X, add=TRUE) } \keyword{spatial} \keyword{datagen} spatstat/man/vcov.mppm.Rd0000644000176200001440000000562413333543265015122 0ustar liggesusers\name{vcov.mppm} \alias{vcov.mppm} \title{Calculate Variance-Covariance Matrix for Fitted Multiple Point Process Model} \description{ Given a fitted multiple point process model, calculate the variance-covariance matrix of the parameter estimates. } \usage{ \method{vcov}{mppm}(object, ..., what="vcov", err="fatal") } \arguments{ \item{object}{ A multiple point process model (object of class \code{"mppm"}). } \item{\dots}{ Arguments recognised by \code{\link{vcov.ppm}}. } \item{what}{ Character string indicating which quantity should be calculated. Options include \code{"vcov"} for the variance-covariance matrix, \code{"corr"} for the correlation matrix, and \code{"fisher"} for the Fisher information matrix. } \item{err}{ Character string indicating what action to take if an error occurs. Either \code{"fatal"}, \code{"warn"} or \code{"null"}. } } \details{ This is a method for the generic function \code{\link{vcov}}. The argument \code{object} should be a fitted multiple point process model (object of class \code{"mppm"}) generated by \code{\link{mppm}}. The variance-covariance matrix of the parameter estimates is computed using asymptotic theory for maximum likelihood (for Poisson processes) or estimating equations (for other Gibbs models). If \code{what="vcov"} (the default), the variance-covariance matrix is returned. If \code{what="corr"}, the variance-covariance matrix is normalised to yield a correlation matrix, and this is returned. If \code{what="fisher"}, the Fisher information matrix is returned instead. In all three cases, the rows and columns of the matrix correspond to the parameters (coefficients) in the same order as in \code{coef{model}}. If errors or numerical problems occur, the argument \code{err} determines what will happen. If \code{err="fatal"} an error will occur. If \code{err="warn"} a warning will be issued and \code{NA} will be returned. If \code{err="null"}, no warning is issued, but \code{NULL} is returned. } \value{ A numeric matrix (or \code{NA} or \code{NULL}). } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix of one of the models was either too large or too small for reliable numerical calculation. See \code{\link{vcov.ppm}} for suggestions on how to handle this. } \seealso{ \code{\link{vcov}}, \code{\link{vcov.ppm}}, \code{\link{mppm}} } \examples{ fit <- mppm(Wat ~x, data=hyperframe(Wat=waterstriders)) vcov(fit) } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/contour.im.Rd0000644000176200001440000000636213333543263015270 0ustar liggesusers\name{contour.im} \alias{contour.im} \title{Contour plot of pixel image} \description{ Generates a contour plot of a pixel image. } \usage{ \method{contour}{im}(x, \dots, main, axes=FALSE, add=FALSE, col=par("fg"), clipwin=NULL, show.all=!add, do.plot=TRUE) } \arguments{ \item{x}{ Pixel image to be plotted. An object of class \code{"im"}. } \item{main}{ Character string to be displayed as the main title. } \item{axes}{ Logical. If \code{TRUE}, coordinate axes are plotted (with tick marks) around a region slightly larger than the image window. If \code{FALSE} (the default), no axes are plotted, and a box is drawn tightly around the image window. Ignored if \code{add=TRUE}. } \item{add}{ Logical. If \code{FALSE}, a new plot is created. If \code{TRUE}, the contours are drawn over the existing plot. } \item{col}{ Colour in which to draw the contour lines. Either a single value that can be interpreted as a colour value, or a \code{colourmap} object. } \item{clipwin}{ Optional. A window (object of class \code{"owin"}). Only this subset of the data will be displayed. } \item{\dots}{ Other arguments passed to \code{\link{contour.default}} controlling the contour plot; see Details. } \item{show.all}{ Logical value indicating whether to display all plot elements including the main title, bounding box, and (if \code{axis=TRUE}) coordinate axis markings. Default is \code{TRUE} for new plots and \code{FALSE} for added plots. } \item{do.plot}{ Logical value indicating whether to actually perform the plot. } } \details{ This is a method for the generic \code{contour} function, for objects of the class \code{"im"}. An object of class \code{"im"} represents a pixel image; see \code{\link{im.object}}. This function displays the values of the pixel image \code{x} as a contour plot on the current plot device, using equal scales on the \eqn{x} and \eqn{y} axes. The appearance of the plot can be modified using any of the arguments listed in the help for \code{\link{contour.default}}. Useful ones include: \describe{ \item{nlevels}{ Number of contour levels to plot. } \item{drawlabels}{ Whether to label the contour lines with text. } \item{col,lty,lwd}{ Colour, type, and width of contour lines. } } See \code{\link{contour.default}} for a full list of these arguments. The defaults for any of the abovementioned arguments can be reset using \code{\link{spatstat.options}("par.contour")}. If \code{col} is a colour map (object of class \code{"colourmap"}, see \code{\link{colourmap}}) then the contours will be plotted in different colours as determined by the colour map. The contour at level \code{z} will be plotted in the colour \code{col(z)} associated with this level in the colour map. } \value{ none. } \examples{ # an image Z <- setcov(owin()) contour(Z, axes=TRUE) contour(Z) co <- colourmap(rainbow(100), range=c(0,1)) contour(Z, col=co, lwd=2) } \seealso{ \code{\link{im.object}}, \code{\link{plot.im}}, \code{\link{persp.im}} } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{hplot} spatstat/man/marks.psp.Rd0000644000176200001440000000445113333543263015106 0ustar liggesusers\name{marks.psp} \alias{marks.psp} \alias{marks<-.psp} \title{Marks of a Line Segment Pattern} \description{ Extract or change the marks attached to a line segment pattern. } \usage{ \method{marks}{psp}(x, \dots, dfok=TRUE) \method{marks}{psp}(x, \dots) <- value } \arguments{ \item{x}{ Line segment pattern dataset (object of class \code{"psp"}). } \item{\dots}{ Ignored. } \item{dfok}{ Logical. If \code{FALSE}, data frames of marks are not permitted and will generate an error. } \item{value}{ Vector or data frame of mark values, or \code{NULL}. } } \value{ For \code{marks(x)}, the result is a vector, factor or data frame, containing the mark values attached to the line segments of \code{x}. If there are no marks, the result is \code{NULL}. For \code{marks(x) <- value}, the result is the updated line segment pattern \code{x} (with the side-effect that the dataset \code{x} is updated in the current environment). } \details{ These functions extract or change the marks attached to each of the line segments in the pattern \code{x}. They are methods for the generic functions \code{\link{marks}} and \code{\link{marks<-}} for the class \code{"psp"} of line segment patterns. The expression \code{marks(x)} extracts the marks of \code{x}. The assignment \code{marks(x) <- value} assigns new marks to the dataset \code{x}, and updates the dataset \code{x} in the current environment. The marks can be a vector, a factor, or a data frame. For the assignment \code{marks(x) <- value}, the \code{value} should be a vector or factor of length equal to the number of segments in \code{x}, or a data frame with as many rows as there are segments in \code{x}. If \code{value} is a single value, or a data frame with one row, then it will be replicated so that the same marks will be attached to each segment. To remove marks, use \code{marks(x) <- NULL} or \code{unmark(x)}. } \seealso{ \code{\link{psp.object}}, \code{\link{marks}}, \code{\link{marks<-}} } \examples{ m <- data.frame(A=1:10, B=letters[1:10]) X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin(), marks=m) marks(X) marks(X)[,2] marks(X) <- 42 marks(X) <- NULL } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/rsyst.Rd0000644000176200001440000000500413333543264014350 0ustar liggesusers\name{rsyst} \alias{rsyst} \title{Simulate systematic random point pattern} \description{ Generates a \dQuote{systematic random} pattern of points in a window, consisting of a grid of equally-spaced points with a random common displacement. } \usage{ rsyst(win=square(1), nx=NULL, ny=nx, \dots, dx=NULL, dy=dx, nsim=1, drop=TRUE) } \arguments{ \item{win}{ A window. An object of class \code{\link{owin}}, or data in any format acceptable to \code{\link{as.owin}()}. } \item{nx}{Number of columns of grid points in the window. Incompatible with \code{dx}. } \item{ny}{Number of rows of grid points in the window. Incompatible with \code{dy}. } \item{\dots}{Ignored.} \item{dx}{Spacing of grid points in \eqn{x} direction. Incompatible with \code{nx}. } \item{dy}{Spacing of grid points in \eqn{y} direction. Incompatible with \code{ny}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This function generates a \dQuote{systematic random} pattern of points in the window \code{win}. The pattern consists of a rectangular grid of points with a random common displacement. The grid spacing in the \eqn{x} direction is determined either by the number of columns \code{nx} or by the horizontal spacing \code{dx}. The grid spacing in the \eqn{y} direction is determined either by the number of rows \code{ny} or by the vertical spacing \code{dy}. The grid is then given a random displacement (the common displacement of the grid points is a uniformly distributed random vector in the tile of dimensions \code{dx, dy}). Some of the resulting grid points may lie outside the window \code{win}: if they do, they are deleted. The result is a point pattern inside the window \code{win}. This function is useful in creating dummy points for quadrature schemes (see \code{\link{quadscheme}}) as well as in simulating random point patterns. } \seealso{ \code{\link{rstrat}}, \code{\link{runifpoint}}, \code{\link{quadscheme}} } \examples{ X <- rsyst(nx=10) plot(X) # polygonal boundary data(letterR) X <- rsyst(letterR, 5, 10) plot(X) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/vcov.ppm.Rd0000644000176200001440000002177313550025246014743 0ustar liggesusers\name{vcov.ppm} \alias{vcov.ppm} \title{Variance-Covariance Matrix for a Fitted Point Process Model} \description{ Returns the variance-covariance matrix of the estimates of the parameters of a fitted point process model. } \usage{ \method{vcov}{ppm}(object, \dots, what = "vcov", verbose = TRUE, fine=FALSE, gam.action=c("warn", "fatal", "silent"), matrix.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent"), nacoef.action=c("warn", "fatal", "silent"), hessian=FALSE) } \arguments{ \item{object}{A fitted point process model (an object of class \code{"ppm"}.)} \item{\dots}{Ignored.} \item{what}{Character string (partially-matched) that specifies what matrix is returned. Options are \code{"vcov"} for the variance-covariance matrix, \code{"corr"} for the correlation matrix, and \code{"fisher"} or \code{"Fisher"} for the Fisher information matrix. } \item{fine}{ Logical value indicating whether to use a quick estimate (\code{fine=FALSE}, the default) or a slower, more accurate estimate (\code{fine=TRUE}). } \item{verbose}{Logical. If \code{TRUE}, a message will be printed if various minor problems are encountered. } \item{gam.action}{String indicating what to do if \code{object} was fitted by \code{gam}. } \item{matrix.action}{String indicating what to do if the matrix is ill-conditioned (so that its inverse cannot be calculated). } \item{logi.action}{String indicating what to do if \code{object} was fitted via the logistic regression approximation using a non-standard dummy point process. } \item{nacoef.action}{ String indicating what to do if some of the fitted coefficients are \code{NA} (so that variance cannot be calculated). } \item{hessian}{ Logical. Use the negative Hessian matrix of the log pseudolikelihood instead of the Fisher information. } } \details{ This function computes the asymptotic variance-covariance matrix of the estimates of the canonical parameters in the point process model \code{object}. It is a method for the generic function \code{\link{vcov}}. \code{object} should be an object of class \code{"ppm"}, typically produced by \code{\link{ppm}}. The canonical parameters of the fitted model \code{object} are the quantities returned by \code{coef.ppm(object)}. The function \code{vcov} calculates the variance-covariance matrix for these parameters. The argument \code{what} provides three options: \describe{ \item{\code{what="vcov"}}{ return the variance-covariance matrix of the parameter estimates } \item{\code{what="corr"}}{ return the correlation matrix of the parameter estimates } \item{\code{what="fisher"}}{ return the observed Fisher information matrix. } } In all three cases, the result is a square matrix. The rows and columns of the matrix correspond to the canonical parameters given by \code{\link{coef.ppm}(object)}. The row and column names of the matrix are also identical to the names in \code{\link{coef.ppm}(object)}. For models fitted by the Berman-Turner approximation (Berman and Turner, 1992; Baddeley and Turner, 2000) to the maximum pseudolikelihood (using the default \code{method="mpl"} in the call to \code{\link{ppm}}), the implementation works as follows. \itemize{ \item If the fitted model \code{object} is a Poisson process, the calculations are based on standard asymptotic theory for the maximum likelihood estimator (Kutoyants, 1998). The observed Fisher information matrix of the fitted model \code{object} is first computed, by summing over the Berman-Turner quadrature points in the fitted model. The asymptotic variance-covariance matrix is calculated as the inverse of the observed Fisher information. The correlation matrix is then obtained by normalising. \item If the fitted model is not a Poisson process (i.e. it is some other Gibbs point process) then the calculations are based on Coeurjolly and Rubak (2012). A consistent estimator of the variance-covariance matrix is computed by summing terms over all pairs of data points. If required, the Fisher information is calculated as the inverse of the variance-covariance matrix. } For models fitted by the Huang-Ogata method (\code{method="ho"} in the call to \code{\link{ppm}}), the implementation uses the Monte Carlo estimate of the Fisher information matrix that was computed when the original model was fitted. For models fitted by the logistic regression approximation to the maximum pseudolikelihood (\code{method="logi"} in the call to \code{\link{ppm}}), calculations are based on (Baddeley et al., 2013). A consistent estimator of the variance-covariance matrix is computed by summing terms over all pairs of data points. If required, the Fisher information is calculated as the inverse of the variance-covariance matrix. In this case the calculations depend on the type of dummy pattern used, and currently only the types \code{"stratrand"}, \code{"binomial"} and \code{"poisson"} as generated by \code{\link{quadscheme.logi}} are implemented. For other types the behavior depends on the argument \code{logi.action}. If \code{logi.action="fatal"} an error is produced. Otherwise, for types \code{"grid"} and \code{"transgrid"} the formulas for \code{"stratrand"} are used which in many cases should be conservative. For an arbitrary user specified dummy pattern (type \code{"given"}) the formulas for \code{"poisson"} are used which in many cases should be conservative. If \code{logi.action="warn"} a warning is issued otherwise the calculation proceeds without a warning. The argument \code{verbose} makes it possible to suppress some diagnostic messages. The asymptotic theory is not correct if the model was fitted using \code{gam} (by calling \code{\link{ppm}} with \code{use.gam=TRUE}). The argument \code{gam.action} determines what to do in this case. If \code{gam.action="fatal"}, an error is generated. If \code{gam.action="warn"}, a warning is issued and the calculation proceeds using the incorrect theory for the parametric case, which is probably a reasonable approximation in many applications. If \code{gam.action="silent"}, the calculation proceeds without a warning. If \code{hessian=TRUE} then the negative Hessian (second derivative) matrix of the log pseudolikelihood, and its inverse, will be computed. For non-Poisson models, this is not a valid estimate of variance, but is useful for other calculations. Note that standard errors and 95\% confidence intervals for the coefficients can also be obtained using \code{confint(object)} or \code{coef(summary(object))}. } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix was either too large or too small for reliable numerical calculation. If this message occurs, try repeating the calculation using \code{fine=TRUE}. Singularity can occur because of numerical overflow or collinearity in the covariates. To check this, rescale the coordinates of the data points and refit the model. See the Examples. In a Gibbs model, a singular matrix may also occur if the fitted model is a hard core process: this is a feature of the variance estimator. } \value{ A square matrix. } \examples{ X <- rpoispp(42) fit <- ppm(X, ~ x + y) vcov(fit) vcov(fit, what="Fish") # example of singular system m <- ppm(demopat ~polynom(x,y,2)) \dontrun{ try(v <- vcov(m)) } # rescale x, y coordinates to range [0,1] x [0,1] approximately demopatScale <- rescale(demopat, 10000) m <- ppm(demopatScale ~ polynom(x,y,2)) v <- vcov(m) # Gibbs example fitS <- ppm(swedishpines ~1, Strauss(9)) coef(fitS) sqrt(diag(vcov(fitS))) } \author{ Original code for Poisson point process was written by \adrian and \rolf . New code for stationary Gibbs point processes was generously contributed by \ege and Jean-Francois Coeurjolly. New code for generic Gibbs process written by \adrian. New code for logistic method contributed by \ege. } \seealso{ \code{\link{vcov}} for the generic, \code{\link{ppm}} for information about fitted models, \code{\link[stats]{confint}} for confidence intervals. } \references{ Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2014) Logistic regression for spatial Gibbs point processes. \emph{Biometrika} \bold{101} (2) 377--392. Coeurjolly, J.-F. and Rubak, E. (2013) Fast covariance estimation for innovations computed from a spatial Gibbs point process. Scandinavian Journal of Statistics \bold{40} 669--684. Kutoyants, Y.A. (1998) \bold{Statistical Inference for Spatial Poisson Processes}, Lecture Notes in Statistics 134. New York: Springer 1998. } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/disc.Rd0000644000176200001440000000452013333543263014107 0ustar liggesusers\name{disc} \alias{disc} \title{Circular Window} \description{ Creates a circular window } \usage{ disc(radius=1, centre=c(0,0), \dots, mask=FALSE, npoly=128, delta=NULL) } \arguments{ \item{radius}{Radius of the circle.} \item{centre}{The centre of the circle.} \item{mask}{Logical flag controlling the type of approximation to a perfect circle. See Details. } \item{npoly}{Number of edges of the polygonal approximation, if \code{mask=FALSE}. Incompatible with \code{delta}. } \item{delta}{ Tolerance of polygonal approximation: the length of arc that will be replaced by one edge of the polygon. Incompatible with \code{npoly}. } \item{\dots}{Arguments passed to \code{as.mask} determining the pixel resolution, if \code{mask=TRUE}. } } \value{ An object of class \code{"owin"} (see \code{\link{owin.object}}) specifying a window. } \details{ This command creates a window object representing a disc, with the given radius and centre. By default, the circle is approximated by a polygon with \code{npoly} edges. If \code{mask=TRUE}, then the disc is approximated by a binary pixel mask. The resolution of the mask is controlled by the arguments \code{\dots} which are passed to \code{\link{as.mask}}. The argument \code{radius} must be a single positive number. The argument \code{centre} specifies the disc centre: it can be either a numeric vector of length 2 giving the coordinates, or a \code{list(x,y)} giving the coordinates of exactly one point, or a point pattern (object of class \code{"ppp"}) containing exactly one point. } \seealso{ \code{\link{ellipse}}, \code{\link{discs}}, \code{\link{owin.object}}, \code{\link{owin}}, \code{\link{as.mask}} } \note{This function can also be used to generate regular polygons, by setting \code{npoly} to a small integer value. For example \code{npoly=5} generates a pentagon and \code{npoly=13} a triskaidecagon. } \examples{ # unit disc W <- disc() # disc of radius 3 centred at x=10, y=5 W <- disc(3, c(10,5)) # plot(disc()) plot(disc(mask=TRUE)) # nice smooth circle plot(disc(npoly=256)) # how to control the resolution of the mask plot(disc(mask=TRUE, dimyx=256)) # check accuracy of approximation area(disc())/pi area(disc(mask=TRUE))/pi } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/lengths.psp.Rd0000644000176200001440000000226513474140142015431 0ustar liggesusers\name{lengths.psp} \alias{lengths.psp} \title{Lengths of Line Segments} \description{ Computes the length of each line segment in a line segment pattern. } \usage{ lengths.psp(x, squared=FALSE) } \arguments{ \item{x}{ A line segment pattern (object of class \code{"psp"}). } \item{squared}{ Logical value indicating whether to return the squared lengths (\code{squared=TRUE}) or the lengths themselves (\code{squared=FALSE}, the default). } } \value{ Numeric vector. } \details{ The length of each line segment is computed and the lengths are returned as a numeric vector. Using squared lengths may be more efficient for some purposes, for example, to find the length of the shortest segment, \code{sqrt(min(lengths.psp(x, squared=TRUE)))} is faster than \code{min(lengths.psp(x))}. } \seealso{ \code{\link{marks.psp}}, \code{\link{summary.psp}}, \code{\link{midpoints.psp}}, \code{\link{angles.psp}}, \code{\link{endpoints.psp}}, \code{\link{extrapolate.psp}}. } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) b <- lengths.psp(a) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/anova.lppm.Rd0000644000176200001440000000733713333543262015250 0ustar liggesusers\name{anova.lppm} \alias{anova.lppm} \title{ANOVA for Fitted Point Process Models on Linear Network} \description{ Performs analysis of deviance for two or more fitted point process models on a linear network. } \usage{ \method{anova}{lppm}(object, \dots, test=NULL) } \arguments{ \item{object}{A fitted point process model on a linear network (object of class \code{"lppm"}). } \item{\dots}{ One or more fitted point process models on the same linear network. } \item{test}{ Character string, partially matching one of \code{"Chisq"}, \code{"F"} or \code{"Cp"}. } } \value{ An object of class \code{"anova"}, or \code{NULL}. } \details{ This is a method for \code{\link{anova}} for fitted point process models on a linear network (objects of class \code{"lppm"}, usually generated by the model-fitting function \code{\link{lppm}}). If the fitted models are all Poisson point processes, then this function performs an Analysis of Deviance of the fitted models. The output shows the deviance differences (i.e. 2 times log likelihood ratio), the difference in degrees of freedom, and (if \code{test="Chi"}) the two-sided p-values for the chi-squared tests. Their interpretation is very similar to that in \code{\link{anova.glm}}. If some of the fitted models are \emph{not} Poisson point processes, then the deviance difference is replaced by the adjusted composite likelihood ratio (Pace et al, 2011; Baddeley et al, 2014). } \section{Errors and warnings}{ \describe{ \item{models not nested:}{ There may be an error message that the models are not \dQuote{nested}. For an Analysis of Deviance the models must be nested, i.e. one model must be a special case of the other. For example the point process model with formula \code{~x} is a special case of the model with formula \code{~x+y}, so these models are nested. However the two point process models with formulae \code{~x} and \code{~y} are not nested. If you get this error message and you believe that the models should be nested, the problem may be the inability of \R to recognise that the two formulae are nested. Try modifying the formulae to make their relationship more obvious. } \item{different sizes of dataset:}{ There may be an error message from \code{anova.glmlist} that \dQuote{models were not all fitted to the same size of dataset}. This generally occurs when the point process models are fitted on different linear networks. } } } \seealso{ \code{\link{lppm}} } \examples{ X <- runiflpp(10, simplenet) mod0 <- lppm(X ~1) modx <- lppm(X ~x) anova(mod0, modx, test="Chi") } \author{\adrian } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. Baddeley, A., Turner, R. and Rubak, E. (2015) Adjusted composite likelihood ratio test for Gibbs point processes. \emph{Journal of Statistical Computation and Simulation} \bold{86} (5) 922--941. DOI: 10.1080/00949655.2015.1044530. McSwiggan, G., Nair, M.G. and Baddeley, A. (2012) Fitting Poisson point process models to events on a linear network. Manuscript in preparation. Pace, L., Salvan, A. and Sartori, N. (2011) Adjusting composite likelihood ratio statistics. \emph{Statistica Sinica} \bold{21}, 129--148. } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/rotate.Rd0000644000176200001440000000147213333543264014467 0ustar liggesusers\name{rotate} \alias{rotate} \title{Rotate} \description{ Applies a rotation to any two-dimensional object, such as a point pattern or a window. } \usage{ rotate(X, \dots) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), or a window (object of class \code{"owin"}).} \item{\dots}{Data specifying the rotation.} } \value{ Another object of the same type, representing the result of rotating \code{X} through the specified angle. } \details{ This is generic. Methods are provided for point patterns (\code{\link{rotate.ppp}}) and windows (\code{\link{rotate.owin}}). } \seealso{ \code{\link{rotate.ppp}}, \code{\link{rotate.owin}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/rthin.Rd0000644000176200001440000000704213525477112014315 0ustar liggesusers\name{rthin} \alias{rthin} \title{Random Thinning} \description{ Applies independent random thinning to a point pattern or segment pattern. } \usage{ rthin(X, P, \dots, nsim=1, drop=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"} or \code{"pp3"} or \code{"ppx"}) or line segment pattern (object of class \code{"psp"}) that will be thinned. } \item{P}{ Data giving the retention probabilities, i.e. the probability that each point or line in \code{X} will be retained. Either a single number, or a vector of numbers, or a \code{function(x,y)} in the \R language, or a function object (class \code{"funxy"} or \code{"linfun"}), or a pixel image (object of class \code{"im"} or \code{"linim"}). } \item{\dots}{ Additional arguments passed to \code{P}, if it is a function. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ An object of the same kind as \code{X} if \code{nsim=1}, or a list of such objects if \code{nsim > 1}. } \details{ In a random thinning operation, each point of the point pattern \code{X} is randomly either deleted or retained (i.e. not deleted). The result is a point pattern, consisting of those points of \code{X} that were retained. Independent random thinning means that the retention/deletion of each point is independent of other points. The argument \code{P} determines the probability of \bold{retaining} each point. It may be \describe{ \item{a single number,}{so that each point will be retained with the same probability \code{P}; } \item{a vector of numbers,}{so that the \code{i}th point of \code{X} will be retained with probability \code{P[i]}; } \item{a function \code{P(x,y)},}{so that a point at a location \code{(x,y)} will be retained with probability \code{P(x,y)}; } \item{an object of class \code{"funxy"} or \code{"linfun"},}{so that points in the pattern \code{X} will be retained with probabilities \code{P(X)}; } \item{a pixel image,}{containing values of the retention probability for all locations in a region encompassing the point pattern. } } If \code{P} is a function \code{P(x,y)}, it should be \sQuote{vectorised}, that is, it should accept vector arguments \code{x,y} and should yield a numeric vector of the same length. The function may have extra arguments which are passed through the \code{\dots} argument. } \section{Reproducibility}{ The algorithm for random thinning was changed in \pkg{spatstat} version \code{1.42-3}. Set \code{spatstat.options(fastthin=FALSE)} to use the previous, slower algorithm, if it is desired to reproduce results obtained with earlier versions. } \examples{ plot(redwood, main="thinning") # delete 20\% of points Y <- rthin(redwood, 0.8) points(Y, col="green", cex=1.4) # function f <- function(x,y) { ifelse(x < 0.4, 1, 0.5) } Y <- rthin(redwood, f) # pixel image Z <- as.im(f, Window(redwood)) Y <- rthin(redwood, Z) # pattern on a linear network A <- runiflpp(30, simplenet) B <- rthin(A, 0.2) g <- function(x,y,seg,tp) { ifelse(y < 0.4, 1, 0.5) } B <- rthin(A, linfun(g, simplenet)) # thin other kinds of patterns E <- rthin(osteo$pts[[1]], 0.6) L <- rthin(copper$Lines, 0.5) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{datagen} \keyword{manip}spatstat/man/areaGain.Rd0000644000176200001440000000460413333543262014676 0ustar liggesusers\name{areaGain} \alias{areaGain} \title{Difference of Disc Areas} \description{ Computes the area of that part of a disc that is not covered by other discs. } \usage{ areaGain(u, X, r, ..., W=as.owin(X), exact=FALSE, ngrid=spatstat.options("ngrid.disc")) } \arguments{ \item{u}{ Coordinates of the centre of the disc of interest. A vector of length 2. Alternatively, a point pattern (object of class \code{"ppp"}). } \item{X}{ Locations of the centres of other discs. A point pattern (object of class \code{"ppp"}). } \item{r}{ Disc radius, or vector of disc radii. } \item{\dots}{ Arguments passed to \code{\link{distmap}} to determine the pixel resolution, when \code{exact=FALSE}. } \item{W}{ Window (object of class \code{"owin"}) in which the area should be computed. } \item{exact}{ Choice of algorithm. If \code{exact=TRUE}, areas are computed exactly using analytic geometry. If \code{exact=FALSE} then a faster algorithm is used to compute a discrete approximation to the areas. } \item{ngrid}{ Integer. Number of points in the square grid used to compute the discrete approximation, when \code{exact=FALSE}. } } \value{ A matrix with one row for each point in \code{u} and one column for each value in \code{r}. } \details{ This function computes the area of that part of the disc of radius \code{r} centred at the location \code{u} that is \emph{not} covered by any of the discs of radius \code{r} centred at the points of the pattern \code{X}. This area is important in some calculations related to the area-interaction model \code{\link{AreaInter}}. If \code{u} is a point pattern and \code{r} is a vector, the result is a matrix, with one row for each point in \code{u} and one column for each entry of \code{r}. The \code{[i,j]} entry in the matrix is the area of that part of the disc of radius \code{r[j]} centred at the location \code{u[i]} that is \emph{not} covered by any of the discs of radius \code{r[j]} centred at the points of the pattern \code{X}. If \code{W} is not \code{NULL}, then the areas are computed only inside the window \code{W}. } \seealso{ \code{\link{AreaInter}}, \code{\link{areaLoss}} } \examples{ data(cells) u <- c(0.5,0.5) areaGain(u, cells, 0.1) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/linfun.Rd0000644000176200001440000000444513436126374014472 0ustar liggesusers\name{linfun} \Rdversion{1.1} \alias{linfun} \title{ Function on a Linear Network } \description{ Create a function on a linear network. } \usage{ linfun(f, L) } \arguments{ \item{f}{ A \code{function} in the \R language. } \item{L}{ A linear network (object of class \code{"linnet"}) on which \code{f} is defined. } } \details{ This creates an object of class \code{"linfun"}. This is a simple mechanism for handling a function defined on a linear network, to make it easier to display and manipulate. \code{f} should be a \code{function} in the \R language, with formal arguments \code{x,y,seg,tp} (and optional additional arguments) where \code{x,y} are Cartesian coordinates of locations on the linear network, \code{seg, tp} are the local coordinates. The function \code{f} should be vectorised: that is, if \code{x,y,seg,tp} are numeric vectors of the same length \code{n}, then \code{v <- f(x,y,seg,tp)} should be a vector of length \code{n}. \code{L} should be a linear network (object of class \code{"linnet"}) on which the function \code{f} is well-defined. The result is a function \code{g} in the \R language which belongs to the special class \code{"linfun"}. There are several methods for this class including \code{print}, \code{plot} and \code{\link{as.linim}}. This function can be called as \code{g(X)} where \code{X} is an \code{"lpp"} object, or called as \code{g(x,y)} or \code{g(x,y,seg,tp)} where \code{x,y,seg,tp} are coordinates. If the original function \code{f} had additional arguments, then these may be included in the call to \code{g}, and will be passed to \code{f}. } \value{ A function in the \R\ language. It also belongs to the class \code{"linfun"} which has methods for \code{plot}, \code{print} etc. } \seealso{ \code{\link{methods.linfun}} for methods applicable to \code{"linfun"} objects. \code{\link{distfun.lpp}}, \code{\link{nnfun.lpp}}. } \examples{ f <- function(x,y,seg,tp) { x+y } g <- linfun(f, simplenet) plot(g) X <- runiflpp(3, simplenet) g(X) Z <- as.linim(g) f <- function(x,y,seg,tp, mul=1) { mul*(x+y) } g <- linfun(f, simplenet) plot(g) plot(g, mul=10) g(X, mul=10) Z <- as.linim(g, mul=10) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/dppparbounds.Rd0000644000176200001440000000145013333543263015665 0ustar liggesusers\name{dppparbounds} \alias{dppparbounds} \title{Parameter Bound for a Determinantal Point Process Model} \description{ Returns the lower and upper bound for a specific parameter of a determinantal point process model when all other parameters are fixed. } \usage{dppparbounds(model, name, \dots)} \arguments{ \item{model}{Model of class \code{"detpointprocfamily"}.} \item{name}{name of the parameter for which the bound should be computed.} \item{\dots}{ Additional arguments passed to the \code{parbounds} function of the given model } } \value{A \code{data.frame} containing lower and upper bounds.} \author{ \adrian \rolf and \ege } \examples{ model <- dppMatern(lambda=100, alpha=.01, nu=1, d=2) dppparbounds(model, "lambda") } \keyword{spatial} \keyword{models} spatstat/man/LambertW.Rd0000644000176200001440000000252613333543262014705 0ustar liggesusers\name{LambertW} \alias{LambertW} \title{ Lambert's W Function } \description{ Computes Lambert's W-function. } \usage{ LambertW(x) } \arguments{ \item{x}{ Vector of nonnegative numbers. } } \details{ Lambert's W-function is the inverse function of \eqn{f(y) = y e^y}{f(y) = y * exp(y)}. That is, \eqn{W} is the function such that \deqn{ W(x) e^{W(x)} = x }{ W(x) * exp(W(x)) = x } This command \code{LambertW} computes \eqn{W(x)} for each entry in the argument \code{x}. If the library \pkg{gsl} has been installed, then the function \code{lambert_W0} in that library is invoked. Otherwise, values of the W-function are computed by root-finding, using the function \code{\link[stats]{uniroot}}. Computation using \pkg{gsl} is about 100 times faster. If any entries of \code{x} are infinite or \code{NA}, the corresponding results are \code{NA}. } \value{ Numeric vector. } \references{ Corless, R, Gonnet, G, Hare, D, Jeffrey, D and Knuth, D (1996), On the Lambert W function. \emph{Computational Mathematics}, \bold{5}, 325--359. Roy, R and Olver, F (2010), Lambert W function. In Olver, F, Lozier, D and Boisvert, R (eds.), \emph{{NIST} Handbook of Mathematical Functions}, Cambridge University Press. } \author{\adrian and \rolf } \examples{ LambertW(exp(1)) } \keyword{math} spatstat/man/ewcdf.Rd0000644000176200001440000000420613441712766014264 0ustar liggesusers\name{ewcdf} \alias{ewcdf} \title{Weighted Empirical Cumulative Distribution Function} \description{ Compute a weighted version of the empirical cumulative distribution function. } \usage{ ewcdf(x, weights = NULL, normalise=TRUE, adjust=1) } \arguments{ \item{x}{Numeric vector of observations.} \item{weights}{ Optional. Numeric vector of non-negative weights for \code{x}. Defaults to equal weight 1 for each entry of \code{x}. } \item{normalise}{ Logical value indicating whether the weights should be rescaled so that they sum to 1. } \item{adjust}{ Numeric value. Adjustment factor. The weights will be multiplied by \code{adjust}. } } \details{ This is a modification of the standard function \code{\link{ecdf}} allowing the observations \code{x} to have weights. The weighted e.c.d.f. (empirical cumulative distribution function) \code{Fn} is defined so that, for any real number \code{y}, the value of \code{Fn(y)} is equal to the total weight of all entries of \code{x} that are less than or equal to \code{y}. That is \code{Fn(y) = sum(weights[x <= y])}. Thus \code{Fn} is a step function which jumps at the values of \code{x}. The height of the jump at a point \code{y} is the total weight of all entries in \code{x} number of tied observations at that value. Missing values are ignored. If \code{weights} is omitted, the default is equivalent to \code{ecdf(x)} except for the class membership. The result of \code{ewcdf} is a function, of class \code{"ewcdf"}, inheriting from the classes \code{"ecdf"} (if \code{normalise=TRUE}) and \code{"stepfun"}. The class \code{ewcdf} has methods for \code{print} and \code{quantile}. The inherited classes \code{ecdf} and \code{stepfun} have methods for \code{plot} and \code{summary}. } \value{ A function, of class \code{"ewcdf"}, inheriting from \code{"ecdf"} (if \code{normalise=TRUE}) and \code{"stepfun"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{ecdf}}. \code{\link{quantile.ewcdf}} } \examples{ x <- rnorm(100) w <- runif(100) plot(e <- ewcdf(x,w)) e } \keyword{nonparametric} \keyword{univar} spatstat/man/plot.tess.Rd0000644000176200001440000001152213333543264015121 0ustar liggesusers\name{plot.tess} \alias{plot.tess} \title{Plot a Tessellation} \description{ Plots a tessellation, with optional labels for the tiles, and optional filled colour in each tile. } \usage{ \method{plot}{tess}(x, \dots, main, add=FALSE, show.all=!add, border=NULL, do.plot=TRUE, do.labels=FALSE, labels=tilenames(x), labelargs=list(), do.col=FALSE, values=marks(x), multiplot=TRUE, col=NULL, ribargs=list()) } \arguments{ \item{x}{Tessellation (object of class \code{"tess"}) to be plotted.} \item{\dots}{Arguments controlling the appearance of the plot.} \item{main}{Heading for the plot. A character string.} \item{add}{Logical. Determines whether the tessellation plot is added to the existing plot. } \item{show.all}{ Logical value indicating whether to plot everything including the main title and the observation window of \code{x}. } \item{border}{ Colour of the tile boundaries. A character string or other value specifying a single colour. Ignored for pixel tessellations. } \item{do.plot}{ Logical value indicating whether to actually perform the plot. } \item{do.labels}{ Logical value indicating whether to show a text label for each tile of the tessellation. } \item{labels}{Character vector of labels for the tiles.} \item{labelargs}{ List of arguments passed to \code{\link{text.default}} to control display of the text labels. } \item{do.col}{ Logical value indicating whether tiles should be filled with colour. Always \code{TRUE} for pixel tessellations. } \item{values}{ A vector of numerical values (or a factor, or vector of character strings) that will be associated with each tile of the tessellation and which determine the colour of the tile. The default is the marks of \code{x}. If the tessellation is not marked, or if the argument \code{values=NULL} is given, the default is a factor giving the tile identifier. } \item{multiplot}{ Logical value giving permission to display multiple plot panels. This applies when \code{do.col=TRUE} and \code{ncol(values) > 1}. } \item{col}{ A vector of colours for each of the \code{values}, or a \code{\link{colourmap}} that maps these values to colours. } \item{ribargs}{ List of additional arguments to control the plot of the colour map, if \code{do.col=TRUE}. See explanation in \code{\link{plot.im}}. } } \details{ This is a method for the generic \code{\link{plot}} function for the class \code{"tess"} of tessellations (see \code{\link{tess}}). The window of the tessellation is plotted, and then the tiles of the tessellation are plotted in their correct positions in the window. Rectangular or polygonal tiles are plotted individually using \code{\link{plot.owin}}, while a tessellation represented by a pixel image is plotted using \code{\link{plot.im}}. The arguments \code{\dots} control the appearance of the plot, and are passed to \code{\link{segments}}, \code{\link{plot.owin}} or \code{\link{plot.im}} as appropriate. If \code{do.col=TRUE}, then the tiles of the tessellation are filled with colours determined by the argument \code{values}. By default, these values are the marks associated with each of the tiles. If there is more than one column of marks or values, then the default behaviour (if \code{multiplot=TRUE}) is to display several plot panels, one for each column of mark values. Then the arguments \code{\dots} are passed to \code{\link{plot.solist}} to determine the arrangement of the panels. } \value{ (Invisible) window of class \code{"owin"} specifying a bounding box for the plot, or an object of class \code{"colourmap"} specifying the colour map. (In the latter case, the bounding box information is available as an attribute, and can be extracted using \code{\link{as.owin}}.) } \seealso{ \code{\link{tess}} } \examples{ Rect <- tess(xgrid=0:4,ygrid=0:4) Diri <- dirichlet(runifpoint(7)) plot(Diri) plot(Rect, border="blue", lwd=2, lty=2) plot(Rect, do.col=TRUE, border="white") plot(Rect, do.col=TRUE, values=runif(16), border="white") B <- Rect[c(1, 2, 5, 7, 9)] plot(B, hatch=TRUE) plot(Diri, do.col=TRUE) plot(Diri, do.col=TRUE, do.labels=TRUE, labelargs=list(col="white"), ribbon=FALSE) v <- as.im(function(x,y){factor(round(5 * (x^2 + y^2)))}, W=owin()) levels(v) <- letters[seq(length(levels(v)))] Img <- tess(image=v) plot(Img) plot(Img, col=rainbow(11), ribargs=list(las=1)) a <- tile.areas(Diri) marks(Diri) <- data.frame(area=a, random=runif(7, max=max(a))) plot(Diri, do.col=TRUE, equal.ribbon=TRUE) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{hplot} spatstat/man/as.data.frame.owin.Rd0000644000176200001440000000411313333543262016541 0ustar liggesusers\name{as.data.frame.owin} \alias{as.data.frame.owin} \title{Convert Window to Data Frame} \description{ Converts a window object to a data frame. } \usage{ \method{as.data.frame}{owin}(x, \dots, drop=TRUE) } \arguments{ \item{x}{ Window (object of class \code{"owin"}). } \item{\dots}{Further arguments passed to \code{\link[base:as.data.frame]{as.data.frame.default}} to determine the row names and other features. } \item{drop}{ Logical value indicating whether to discard pixels that are outside the window, when \code{x} is a binary mask. } } \details{ This function returns a data frame specifying the coordinates of the window. If \code{x} is a binary mask window, the result is a data frame with columns \code{x} and \code{y} containing the spatial coordinates of each \emph{pixel}. If \code{drop=TRUE} (the default), only pixels inside the window are retained. If \code{drop=FALSE}, all pixels are retained, and the data frame has an extra column \code{inside} containing the logical value of each pixel (\code{TRUE} for pixels inside the window, \code{FALSE} for outside). If \code{x} is a rectangle or a polygonal window, the result is a data frame with columns \code{x} and \code{y} containing the spatial coordinates of the \emph{vertices} of the window. If the boundary consists of several polygons, the data frame has additional columns \code{id}, identifying which polygon is being traced, and \code{sign}, indicating whether the polygon is an outer or inner boundary (\code{sign=1} and \code{sign=-1} respectively). } \value{ A data frame with columns named \code{x} and \code{y}, and possibly other columns. } \author{ \spatstatAuthors. } \seealso{ \code{\link{as.data.frame.im}}, \code{\link{as.owin.data.frame}} } \examples{ as.data.frame(square(1)) holey <- owin(poly=list( list(x=c(0,10,0), y=c(0,0,10)), list(x=c(2,2,4,4), y=c(2,4,4,2)))) as.data.frame(holey) M <- as.mask(holey, eps=0.5) Mdf <- as.data.frame(M) } \keyword{spatial} \keyword{methods} spatstat/man/discretise.Rd0000644000176200001440000000544113333543263015326 0ustar liggesusers\name{discretise} \alias{discretise} \title{ Safely Convert Point Pattern Window to Binary Mask } \description{ Given a point pattern, discretise its window by converting it to a binary pixel mask, adjusting the mask so that it still contains all the points. } \usage{ discretise(X, eps = NULL, dimyx = NULL, xy = NULL) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}) to be converted.} \item{eps}{(optional) width and height of each pixel} \item{dimyx}{(optional) pixel array dimensions} \item{xy}{(optional) pixel coordinates} } \details{ This function modifies the point pattern \code{X} by converting its observation window \code{Window(X)} to a binary pixel image (a window of type \code{"mask"}). It ensures that no points of \code{X} are deleted by the discretisation. The window is first discretised using \code{\link{as.mask}}. It can happen that points of \code{X} that were inside the original window may fall outside the new mask. The \code{discretise} function corrects this by augmenting the mask (so that the mask includes any pixel that contains a point of the pattern). The arguments \code{eps}, \code{dimyx} and \code{xy} control the fineness of the pixel array. They are passed to \code{\link{as.mask}}. If \code{eps}, \code{dimyx} and \code{xy} are all absent or \code{NULL}, and if the window of \code{X} is of type \code{"mask"} to start with, then \code{discretise(X)} returns \code{X} unchanged. See \code{\link{as.mask}} for further details about the arguments \code{eps}, \code{dimyx}, and \code{xy}, and the process of converting a window to one of type \code{mask}. } \section{Error checking}{ Before doing anything, \code{discretise} checks that all the points of the pattern are actually inside the original window. This is guaranteed to be the case if the pattern was constructed using \code{\link{ppp}} or \code{\link{as.ppp}}. However anomalies are possible if the point pattern was created or manipulated inappropriately. These will cause an error. } \value{ A point pattern (object of class \code{"ppp"}), identical to \code{X}, except that its observation window has been converted to one of type \code{mask}. } \author{\adrian and \rolf } \seealso{ \code{\link{as.mask}} } \examples{ data(demopat) X <- demopat plot(X, main="original pattern") Y <- discretise(X, dimyx=50) plot(Y, main="discretise(X)") stopifnot(npoints(X) == npoints(Y)) # what happens if we just convert the window to a mask? W <- Window(X) M <- as.mask(W, dimyx=50) plot(M, main="window of X converted to mask") plot(X, add=TRUE, pch=16) plot(X[M], add=TRUE, pch=1, cex=1.5) XM <- X[M] cat(paste(npoints(X) - npoints(XM), "points of X lie outside M\n")) } \keyword{spatial} \keyword{manip} spatstat/man/addvar.Rd0000644000176200001440000001507313333543262014432 0ustar liggesusers\name{addvar} \alias{addvar} \title{ Added Variable Plot for Point Process Model } \description{ Computes the coordinates for an Added Variable Plot for a fitted point process model. } \usage{ addvar(model, covariate, ..., subregion=NULL, bw="nrd0", adjust=1, from=NULL, to=NULL, n=512, bw.input = c("points", "quad"), bw.restrict = FALSE, covname, crosscheck=FALSE) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{covariate}{ The covariate to be added to the model. Either a pixel image, a \code{function(x,y)}, or a character string giving the name of a covariate that was supplied when the model was fitted. } \item{subregion}{ Optional. A window (object of class \code{"owin"}) specifying a subset of the spatial domain of the data. The calculation will be confined to the data in this subregion. } \item{bw}{ Smoothing bandwidth or bandwidth rule (passed to \code{\link[stats]{density.default}}). } \item{adjust}{ Smoothing bandwidth adjustment factor (passed to \code{\link[stats]{density.default}}). } \item{n, from, to}{ Arguments passed to \code{\link[stats]{density.default}} to control the number and range of values at which the function will be estimated. } \item{\dots}{ Additional arguments passed to \code{\link[stats]{density.default}}. } \item{bw.input}{ Character string specifying the input data used for automatic bandwidth selection. } \item{bw.restrict}{ Logical value, specifying whether bandwidth selection is performed using data from the entire spatial domain or from the \code{subregion}. } \item{covname}{ Optional. Character string to use as the name of the covariate. } \item{crosscheck}{ For developers only. Logical value indicating whether to perform cross-checks on the validity of the calculation. } } \details{ This command generates the plot coordinates for an Added Variable Plot for a spatial point process model. Added Variable Plots (Cox, 1958, sec 4.5; Wang, 1985) are commonly used in linear models and generalized linear models, to decide whether a model with response \eqn{y} and predictors \eqn{x} would be improved by including another predictor \eqn{z}. In a (generalised) linear model with response \eqn{y} and predictors \eqn{x}, the Added Variable Plot for a new covariate \eqn{z} is a plot of the smoothed Pearson residuals from the original model against the scaled residuals from a weighted linear regression of \eqn{z} on \eqn{x}. If this plot has nonzero slope, then the new covariate \eqn{z} is needed. For general advice see Cook and Weisberg(1999); Harrell (2001). Essentially the same technique can be used for a spatial point process model (Baddeley et al, 2012). The argument \code{model} should be a fitted spatial point process model (object of class \code{"ppm"}). The argument \code{covariate} identifies the covariate that is to be considered for addition to the model. It should be either a pixel image (object of class \code{"im"}) or a \code{function(x,y)} giving the values of the covariate at any spatial location. Alternatively \code{covariate} may be a character string, giving the name of a covariate that was supplied (in the \code{covariates} argument to \code{\link{ppm}}) when the model was fitted, but was not used in the model. The result of \code{addvar(model, covariate)} is an object belonging to the classes \code{"addvar"} and \code{"fv"}. Plot this object to generate the added variable plot. Note that the plot method shows the pointwise significance bands for a test of the \emph{null} model, i.e. the null hypothesis that the new covariate has no effect. The smoothing bandwidth is controlled by the arguments \code{bw}, \code{adjust}, \code{bw.input} and \code{bw.restrict}. If \code{bw} is a numeric value, then the bandwidth is taken to be \code{adjust * bw}. If \code{bw} is a string representing a bandwidth selection rule (recognised by \code{\link[stats]{density.default}}) then the bandwidth is selected by this rule. The data used for automatic bandwidth selection are specified by \code{bw.input} and \code{bw.restrict}. If \code{bw.input="points"} (the default) then bandwidth selection is based on the covariate values at the points of the original point pattern dataset to which the model was fitted. If \code{bw.input="quad"} then bandwidth selection is based on the covariate values at every quadrature point used to fit the model. If \code{bw.restrict=TRUE} then the bandwidth selection is performed using only data from inside the \code{subregion}. } \section{Slow computation}{ In a large dataset, computation can be very slow if the default settings are used, because the smoothing bandwidth is selected automatically. To avoid this, specify a numerical value for the bandwidth \code{bw}. One strategy is to use a coarser subset of the data to select \code{bw} automatically. The selected bandwidth can be read off the print output for \code{addvar}. } \value{ An object of class \code{"addvar"} containing the coordinates for the added variable plot. There is a \code{plot} method. } \section{Internal data}{ The return value has an attribute \code{"spatial"} which contains the internal data: the computed values of the residuals, and of all relevant covariates, at each quadrature point of the model. It is an object of class \code{"ppp"} with a data frame of marks. } \references{ Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2013) Residual diagnostics for covariate effects in spatial point process models. \emph{Journal of Computational and Graphical Statistics}, \bold{22}, 886--905. Cook, R.D. and Weisberg, S. (1999) \emph{Applied regression, including computing and graphics}. New York: Wiley. Cox, D.R. (1958) \emph{Planning of Experiments}. New York: Wiley. Harrell, F. (2001) \emph{Regression Modeling Strategies}. New York: Springer. Wang, P. (1985) Adding a variable in generalized linear models. \emph{Technometrics} \bold{27}, 273--276. } \author{ \adrian, \rolf, Ya-Mei Chang and Yong Song. } \seealso{ \code{\link{parres}}, \code{\link{rhohat}}, \code{\link{rho2hat}}. } \examples{ X <- rpoispp(function(x,y){exp(3+3*x)}) model <- ppm(X, ~y) adv <- addvar(model, "x") plot(adv) adv <- addvar(model, "x", subregion=square(0.5)) } \keyword{spatial} \keyword{models} spatstat/man/inside.owin.Rd0000644000176200001440000000446213333543263015420 0ustar liggesusers\name{inside.owin} \alias{inside.owin} \title{Test Whether Points Are Inside A Window} \description{ Test whether points lie inside or outside a given window. } \usage{ inside.owin(x, y, w) } \arguments{ \item{x}{ Vector of \eqn{x} coordinates of points to be tested. (Alternatively, a point pattern object providing both \eqn{x} and \eqn{y} coordinates.) } \item{y}{ Vector of \eqn{y} coordinates of points to be tested. } \item{w}{A window. This should be an object of class \code{\link{owin}}, or can be given in any format acceptable to \code{\link{as.owin}()}. } } \value{ Logical vector whose \code{i}th entry is \code{TRUE} if the corresponding point \code{(x[i],y[i])} is inside \code{w}. } \details{ This function tests whether each of the points \code{(x[i],y[i])} lies inside or outside the window \code{w} and returns \code{TRUE} if it is inside. The boundary of the window is treated as being inside. If \code{w} is of type \code{"rectangle"} or \code{"polygonal"}, the algorithm uses analytic geometry (the discrete Stokes theorem). Computation time is linear in the number of points and (for polygonal windows) in the number of vertices of the boundary polygon. Boundary cases are correct to single precision accuracy. If \code{w} is of type \code{"mask"} then the pixel closest to \code{(x[i],y[i])} is tested. The results may be incorrect for points lying within one pixel diameter of the window boundary. Normally \code{x} and \code{y} must be numeric vectors of equal length (length zero is allowed) containing the coordinates of points. Alternatively \code{x} can be a point pattern (object of class \code{"ppp"}) while \code{y} is missing; then the coordinates of the point pattern are extracted. } \seealso{ \code{\link{owin.object}}, \code{\link{as.owin}} } \examples{ # hexagonal window k <- 6 theta <- 2 * pi * (0:(k-1))/k co <- cos(theta) si <- sin(theta) mas <- owin(c(-1,1), c(-1,1), poly=list(x=co, y=si)) \dontrun{ plot(mas) } # random points in rectangle x <- runif(30,min=-1, max=1) y <- runif(30,min=-1, max=1) ok <- inside.owin(x, y, mas) \dontrun{ points(x[ok], y[ok]) points(x[!ok], y[!ok], pch="x") } } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/boxx.Rd0000644000176200001440000000234713333543262014151 0ustar liggesusers\name{boxx} \Rdversion{1.1} \alias{boxx} \title{ Multi-Dimensional Box } \description{ Creates an object representing a multi-dimensional box. } \usage{ boxx(..., unitname = NULL) } \arguments{ \item{\dots}{ Dimensions of the box. Vectors of length 2. } \item{unitname}{ Optional. Name of the unit of length. See Details. } } \details{ This function creates an object representing a multi-dimensional rectangular parallelepiped (box) with sides parallel to the coordinate axes. The object can be used to specify the domain of a multi-dimensional point pattern (see \code{\link{ppx}}) and in various geometrical calculations (see \code{\link{volume.boxx}}, \code{\link{diameter.boxx}}, \code{\link{eroded.volumes}}). The optional argument \code{unitname} specifies the name of the unit of length. See \code{\link{unitname}} for valid formats. } \value{ An object of class \code{"boxx"}. There is a print method for this class. } \author{ \spatstatAuthors. } \seealso{ \code{\link{ppx}}, \code{\link{volume.boxx}}, \code{\link{diameter.boxx}}, \code{\link{eroded.volumes.boxx}}. } \examples{ boxx(c(0,10),c(0,10),c(0,5),c(0,1), unitname=c("metre","metres")) } \keyword{spatial} \keyword{datagen} spatstat/man/Math.im.Rd0000644000176200001440000000673213333543262014470 0ustar liggesusers\name{Math.im} \alias{Math.im} \alias{Ops.im} \alias{Complex.im} \alias{Summary.im} \title{S3 Group Generic methods for images} \description{ These are group generic methods for images of class \code{"im"}, which allows for usual mathematical functions and operators to be applied directly to images. See Details for a list of implemented functions. } \usage{ ## S3 methods for group generics have prototypes: \special{Math(x, \dots)} \special{Ops(e1, e2)} \special{Complex(z)} \special{Summary(\dots, na.rm=FALSE, drop=TRUE)} %NAMESPACE S3method("Math", "im") %NAMESPACE S3method("Ops", "im") %NAMESPACE S3method("Complex", "im") %NAMESPACE S3method("Summary", "im") } \arguments{ \item{x, z, e1, e2}{objects of class \code{"im"}.} \item{\dots}{further arguments passed to methods.} \item{na.rm,drop}{ Logical values specifying whether missing values should be removed. This will happen if either \code{na.rm=TRUE} or \code{drop=TRUE}. See Details. } } \details{ Below is a list of mathematical functions and operators which are defined for images. Not all functions will make sense for all types of images. For example, none of the functions in the \code{"Math"} group make sense for character-valued images. Note that the \code{"Ops"} group methods are implemented using \code{\link{eval.im}}, which tries to harmonise images via \code{\link{harmonise.im}} if they aren't compatible to begin with. \enumerate{ \item Group \code{"Math"}: \itemize{ \item \code{abs}, \code{sign}, \code{sqrt},\cr \code{floor}, \code{ceiling}, \code{trunc},\cr \code{round}, \code{signif} \item \code{exp}, \code{log}, \code{expm1}, \code{log1p},\cr \code{cos}, \code{sin}, \code{tan},\cr \code{cospi}, \code{sinpi}, \code{tanpi},\cr \code{acos}, \code{asin}, \code{atan} \code{cosh}, \code{sinh}, \code{tanh},\cr \code{acosh}, \code{asinh}, \code{atanh} \item \code{lgamma}, \code{gamma}, \code{digamma}, \code{trigamma} \item \code{cumsum}, \code{cumprod}, \code{cummax}, \code{cummin} } \item Group \code{"Ops"}: \itemize{ \item \code{"+"}, \code{"-"}, \code{"*"}, \code{"/"}, \code{"^"}, \code{"\%\%"}, \code{"\%/\%"} \item \code{"&"}, \code{"|"}, \code{"!"} \item \code{"=="}, \code{"!="}, \code{"<"}, \code{"<="}, \code{">="}, \code{">"} } \item Group \code{"Summary"}: \itemize{ \item \code{all}, \code{any} \item \code{sum}, \code{prod} \item \code{min}, \code{max} \item \code{range} } \item Group \code{"Complex"}: \itemize{ \item \code{Arg}, \code{Conj}, \code{Im}, \code{Mod}, \code{Re} } } For the \code{Summary} group, the generic has an argument \code{na.rm=FALSE}, but for pixel images it makes sense to set \code{na.rm=TRUE} so that pixels outside the domain of the image are ignored. To enable this, we added the argument \code{drop}. Pixel values that are \code{NA} are removed if \code{drop=TRUE} or if \code{na.rm=TRUE}. } \seealso{ \code{\link{eval.im}} for evaluating expressions involving images. } \examples{ ## Convert gradient values to angle of inclination: V <- atan(bei.extra$grad) * 180/pi ## Make logical image which is TRUE when heat equals 'Moderate': A <- (gorillas.extra$heat == "Moderate") ## Summary: any(A) ## Complex: Z <- exp(1 + V * 1i) Z Re(Z) } \author{ \spatstatAuthors and Kassel Hingee. } \keyword{spatial} \keyword{methods} spatstat/man/applynbd.Rd0000644000176200001440000002003513333543262014774 0ustar liggesusers\name{applynbd} \alias{applynbd} \title{Apply Function to Every Neighbourhood in a Point Pattern} \description{ Visit each point in a point pattern, find the neighbouring points, and apply a given function to them. } \usage{ applynbd(X, FUN, N=NULL, R=NULL, criterion=NULL, exclude=FALSE, \dots) } \arguments{ \item{X}{ Point pattern. An object of class \code{"ppp"}, or data which can be converted into this format by \code{\link{as.ppp}}. } \item{FUN}{ Function to be applied to each neighbourhood. The arguments of \code{FUN} are described under \bold{Details}. } \item{N}{ Integer. If this argument is present, the neighbourhood of a point of \code{X} is defined to consist of the \code{N} points of \code{X} which are closest to it. } \item{R}{ Nonnegative numeric value. If this argument is present, the neighbourhood of a point of \code{X} is defined to consist of all points of \code{X} which lie within a distance \code{R} of it. } \item{criterion}{ Function. If this argument is present, the neighbourhood of a point of \code{X} is determined by evaluating this function. See under \bold{Details}. } \item{exclude}{ Logical. If \code{TRUE} then the point currently being visited is excluded from its own neighbourhood. } \item{\dots}{ extra arguments passed to the function \code{FUN}. They must be given in the form \code{name=value}. } } \value{ Similar to the result of \code{\link{apply}}. If each call to \code{FUN} returns a single numeric value, the result is a vector of dimension \code{npoints(X)}, the number of points in \code{X}. If each call to \code{FUN} returns a vector of the same length \code{m}, then the result is a matrix of dimensions \code{c(m,n)}; note the transposition of the indices, as usual for the family of \code{apply} functions. If the calls to \code{FUN} return vectors of different lengths, the result is a list of length \code{npoints(X)}. } \details{ This is an analogue of \code{\link{apply}} for point patterns. It visits each point in the point pattern \code{X}, determines which points of \code{X} are ``neighbours'' of the current point, applies the function \code{FUN} to this neighbourhood, and collects the values returned by \code{FUN}. The definition of ``neighbours'' depends on the arguments \code{N}, \code{R} and \code{criterion}. Also the argument \code{exclude} determines whether the current point is excluded from its own neighbourhood. \itemize{ \item If \code{N} is given, then the neighbours of the current point are the \code{N} points of \code{X} which are closest to the current point (including the current point itself unless \code{exclude=TRUE}). \item If \code{R} is given, then the neighbourhood of the current point consists of all points of \code{X} which lie closer than a distance \code{R} from the current point. \item If \code{criterion} is given, then it must be a function with two arguments \code{dist} and \code{drank} which will be vectors of equal length. The interpretation is that \code{dist[i]} will be the distance of a point from the current point, and \code{drank[i]} will be the rank of that distance (the three points closest to the current point will have rank 1, 2 and 3). This function must return a logical vector of the same length as \code{dist} and \code{drank} whose \code{i}-th entry is \code{TRUE} if the corresponding point should be included in the neighbourhood. See the examples below. \item If more than one of the arguments \code{N}, \code{R} and \code{criterion} is given, the neighbourhood is defined as the \emph{intersection} of the neighbourhoods specified by these arguments. For example if \code{N=3} and \code{R=5} then the neighbourhood is formed by finding the 3 nearest neighbours of current point, and retaining only those neighbours which lie closer than 5 units from the current point. } When \code{applynbd} is executed, each point of \code{X} is visited, and the following happens for each point: \itemize{ \item the neighbourhood of the current point is determined according to the chosen rule, and stored as a point pattern \code{Y}; \item the function \code{FUN} is called as: \code{FUN(Y=Y, current=current, dists=dists, dranks=dranks, \dots)} where \code{current} is the location of the current point (in a format explained below), \code{dists} is a vector of distances from the current point to each of the points in \code{Y}, \code{dranks} is a vector of the ranks of these distances with respect to the full point pattern \code{X}, and \code{\dots} are the arguments passed from the call to \code{applynbd}; \item The result of the call to \code{FUN} is stored. } The results of each call to \code{FUN} are collected and returned according to the usual rules for \code{\link{apply}} and its relatives. See the \bold{Value} section of this help file. The format of the argument \code{current} is as follows. If \code{X} is an unmarked point pattern, then \code{current} is a vector of length 2 containing the coordinates of the current point. If \code{X} is marked, then \code{current} is a point pattern containing exactly one point, so that \code{current$x} is its \eqn{x}-coordinate and \code{current$marks} is its mark value. In either case, the coordinates of the current point can be referred to as \code{current$x} and \code{current$y}. Note that \code{FUN} will be called exactly as described above, with each argument named explicitly. Care is required when writing the function \code{FUN} to ensure that the arguments will match up. See the Examples. See \code{\link{markstat}} for a common use of this function. To simply tabulate the marks in every \code{R}-neighbourhood, use \code{\link{marktable}}. } \seealso{ \code{\link{ppp.object}}, \code{\link{apply}}, \code{\link{markstat}}, \code{\link{marktable}} } \examples{ redwood # count the number of points within radius 0.2 of each point of X nneighbours <- applynbd(redwood, R=0.2, function(Y, ...){npoints(Y)-1}) # equivalent to: nneighbours <- applynbd(redwood, R=0.2, function(Y, ...){npoints(Y)}, exclude=TRUE) # compute the distance to the second nearest neighbour of each point secondnndist <- applynbd(redwood, N = 2, function(dists, ...){max(dists)}, exclude=TRUE) # marked point pattern trees <- longleaf \testonly{ # smaller dataset trees <- trees[seq(1, npoints(trees), by=80)] } # compute the median of the marks of all neighbours of a point # (see also 'markstat') dbh.med <- applynbd(trees, R=90, exclude=TRUE, function(Y, ...) { median(marks(Y))}) # ANIMATION explaining the definition of the K function # (arguments `fullpicture' and 'rad' are passed to FUN) if(interactive()) { showoffK <- function(Y, current, dists, dranks, fullpicture,rad) { plot(fullpicture, main="") points(Y, cex=2) ux <- current[["x"]] uy <- current[["y"]] points(ux, uy, pch="+",cex=3) theta <- seq(0,2*pi,length=100) polygon(ux + rad * cos(theta), uy+rad*sin(theta)) text(ux + rad/3, uy + rad/2,npoints(Y),cex=3) if(interactive()) Sys.sleep(if(runif(1) < 0.1) 1.5 else 0.3) return(npoints(Y)) } applynbd(redwood, R=0.2, showoffK, fullpicture=redwood, rad=0.2, exclude=TRUE) # animation explaining the definition of the G function showoffG <- function(Y, current, dists, dranks, fullpicture) { plot(fullpicture, main="") points(Y, cex=2) u <- current points(u[1],u[2],pch="+",cex=3) v <- c(Y$x[1],Y$y[1]) segments(u[1],u[2],v[1],v[2],lwd=2) w <- (u + v)/2 nnd <- dists[1] text(w[1],w[2],round(nnd,3),cex=2) if(interactive()) Sys.sleep(if(runif(1) < 0.1) 1.5 else 0.3) return(nnd) } applynbd(cells, N=1, showoffG, exclude=TRUE, fullpicture=cells) } } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{programming} \keyword{iteration} spatstat/man/Hest.Rd0000644000176200001440000001336413524235613014075 0ustar liggesusers\name{Hest} \alias{Hest} \title{Spherical Contact Distribution Function} \description{ Estimates the spherical contact distribution function of a random set. } \usage{ Hest(X, r=NULL, breaks=NULL, ..., W, correction=c("km", "rs", "han"), conditional=TRUE) } \arguments{ \item{X}{The observed random set. An object of class \code{"ppp"}, \code{"psp"} or \code{"owin"}. Alternatively a pixel image (class \code{"im"}) with logical values. } \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{H(r)} should be evaluated. Users are advised \emph{not} to specify this argument; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Arguments passed to \code{\link{as.mask}} to control the discretisation. } \item{W}{ Optional. A window (object of class \code{"owin"}) to be taken as the window of observation. The contact distribution function will be estimated from values of the contact distance inside \code{W}. The default is \code{W=Frame(X)} when \code{X} is a window, and \code{W=Window(X)} otherwise. } \item{correction}{ Optional. The edge correction(s) to be used to estimate \eqn{H(r)}. A vector of character strings selected from \code{"none"}, \code{"rs"}, \code{"km"}, \code{"han"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{conditional}{ Logical value indicating whether to compute the conditional or unconditional distribution. See Details. } } \details{ The spherical contact distribution function of a stationary random set \eqn{X} is the cumulative distribution function \eqn{H} of the distance from a fixed point in space to the nearest point of \eqn{X}, given that the point lies outside \eqn{X}. That is, \eqn{H(r)} equals the probability that \code{X} lies closer than \eqn{r} units away from the fixed point \eqn{x}, given that \code{X} does not cover \eqn{x}. Let \eqn{D = d(x,X)} be the shortest distance from an arbitrary point \eqn{x} to the set \code{X}. Then the spherical contact distribution function is \deqn{H(r) = P(D \le r \mid D > 0)}{H(r) = P(D <= r | D > 0)} For a point process, the spherical contact distribution function is the same as the empty space function \eqn{F} discussed in \code{\link{Fest}}. The argument \code{X} may be a point pattern (object of class \code{"ppp"}), a line segment pattern (object of class \code{"psp"}) or a window (object of class \code{"owin"}). It is assumed to be a realisation of a stationary random set. The algorithm first calls \code{\link{distmap}} to compute the distance transform of \code{X}, then computes the Kaplan-Meier and reduced-sample estimates of the cumulative distribution following Hansen et al (1999). If \code{conditional=TRUE} (the default) the algorithm returns an estimate of the spherical contact function \eqn{H(r)} as defined above. If \code{conditional=FALSE}, it instead returns an estimate of the cumulative distribution function \eqn{H^\ast(r) = P(D \le r)}{H*(r) = P(D <= r)} which includes a jump at \eqn{r=0} if \code{X} has nonzero area. Accuracy depends on the pixel resolution, which is controlled by the arguments \code{eps}, \code{dimyx} and \code{xy} passed to \code{\link{as.mask}}. For example, use \code{eps=0.1} to specify square pixels of side 0.1 units, and \code{dimyx=256} to specify a 256 by 256 grid of pixels. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing up to six columns: \item{r}{the values of the argument \eqn{r} at which the function \eqn{H(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{H(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{H(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{H(r)} by the spatial Kaplan-Meier method } \item{han}{the spatial Hanisch-Chiu-Stoyan estimator of \eqn{H(r)} } \item{raw}{the uncorrected estimate of \eqn{H(r)}, i.e. the empirical distribution of the distance from a fixed point in the window to the nearest point of \code{X} } } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37-78. Baddeley, A.J. and Gill, R.D. The empty space hazard of a spatial pattern. Research Report 1994/3, Department of Mathematics, University of Western Australia, May 1994. Hansen, M.B., Baddeley, A.J. and Gill, R.D. First contact distributions for spatial patterns: regularity and estimation. \emph{Advances in Applied Probability} \bold{31} (1999) 15-33. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \seealso{\code{\link{Fest}}} \examples{ X <- runifpoint(42) H <- Hest(X) Y <- rpoisline(10) H <- Hest(Y) H <- Hest(Y, dimyx=256) X <- heather$coarse plot(Hest(X)) H <- Hest(X, conditional=FALSE) P <- owin(poly=list(x=c(5.3, 8.5, 8.3, 3.7, 1.3, 3.7), y=c(9.7, 10.0, 13.6, 14.4, 10.7, 7.2))) plot(X) plot(P, add=TRUE, col="red") H <- Hest(X, W=P) Z <- as.im(FALSE, Frame(X)) Z[X] <- TRUE Z <- Z[P, drop=FALSE] plot(Z) H <- Hest(Z) } \author{ \spatstatAuthors with contributions from Kassel Hingee. } \keyword{spatial} \keyword{nonparametric} spatstat/man/FmultiInhom.Rd0000644000176200001440000000425313333543262015422 0ustar liggesusers\name{FmultiInhom} \alias{FmultiInhom} \title{ Inhomogeneous Marked F-Function } \description{ For a marked point pattern, estimate the inhomogeneous version of the multitype \eqn{F} function, effectively the cumulative distribution function of the distance from a fixed point to the nearest point in subset \eqn{J}, adjusted for spatially varying intensity. } \usage{ FmultiInhom(X, J, lambda = NULL, lambdaJ = NULL, lambdamin = NULL, \dots, r = NULL) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}. } \item{J}{ A subset index specifying the subset of points to which distances are measured. Any kind of subset index acceptable to \code{\link{[.ppp}}. } \item{lambda}{ Intensity estimates for each point of \code{X}. A numeric vector of length equal to \code{npoints(X)}. Incompatible with \code{lambdaJ}. } \item{lambdaJ}{ Intensity estimates for each point of \code{X[J]}. A numeric vector of length equal to \code{npoints(X[J])}. Incompatible with \code{lambda}. } \item{lambdamin}{ A lower bound for the intensity, or at least a lower bound for the values in \code{lambdaJ} or \code{lambda[J]}. } \item{\dots}{ Ignored. } \item{r}{ Vector of distance values at which the inhomogeneous \eqn{G} function should be estimated. There is a sensible default. } } \details{ See Cronie and Van Lieshout (2015). } \value{ Object of class \code{"fv"} containing the estimate of the inhomogeneous multitype \eqn{F} function. } \references{ Cronie, O. and Van Lieshout, M.N.M. (2015) Summary statistics for inhomogeneous marked point processes. \emph{Annals of the Institute of Statistical Mathematics} DOI: 10.1007/s10463-015-0515-z } \author{ Ottmar Cronie and Marie-Colette van Lieshout. Rewritten for \pkg{spatstat} by \adrian. } \seealso{ \code{\link{Finhom}} } \examples{ X <- amacrine J <- (marks(X) == "off") mod <- ppm(X ~ marks * x) lam <- fitted(mod, dataonly=TRUE) lmin <- min(predict(mod)[["off"]]) * 0.9 plot(FmultiInhom(X, J, lambda=lam, lambdamin=lmin)) } \keyword{spatial} \keyword{nonparametric} spatstat/man/zapsmall.im.Rd0000644000176200001440000000136213333543265015417 0ustar liggesusers\name{zapsmall.im} \alias{zapsmall.im} \title{Rounding of Pixel Values} \description{ Modifies a pixel image, identifying those pixels that have values very close to zero, and replacing the value by zero. } \usage{ zapsmall.im(x, digits) } \arguments{ \item{x}{Pixel image (object of class \code{"im"}).} \item{digits}{ Argument passed to \code{\link{zapsmall}} indicating the precision to be used. } } \details{ The function \code{\link{zapsmall}} is applied to each pixel value of the image \code{x}. } \value{ Another pixel image. } \seealso{ \code{\link{zapsmall}} } \examples{ data(cells) D <- density(cells) zapsmall.im(D) } \author{\ege and \adrian } \keyword{spatial} \keyword{methods} \keyword{univar} spatstat/man/convexhull.xy.Rd0000644000176200001440000000236313333543263016016 0ustar liggesusers\name{convexhull.xy} \alias{convexhull.xy} \title{Convex Hull of Points} \description{ Computes the convex hull of a set of points in two dimensions. } \usage{ convexhull.xy(x, y=NULL) } \arguments{ \item{x}{ vector of \code{x} coordinates of observed points, or a 2-column matrix giving \code{x,y} coordinates, or a list with components \code{x,y} giving coordinates (such as a point pattern object of class \code{"ppp"}.) } \item{y}{(optional) vector of \code{y} coordinates of observed points, if \code{x} is a vector.} } \value{ A window (an object of class \code{"owin"}). } \details{ Given an observed pattern of points with coordinates given by \code{x} and \code{y}, this function computes the convex hull of the points, and returns it as a window. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{convexhull}}, \code{\link{bounding.box.xy}}, \code{\link{ripras}} } \examples{ x <- runif(30) y <- runif(30) w <- convexhull.xy(x,y) plot(owin(), main="convexhull.xy(x,y)", lty=2) plot(w, add=TRUE) points(x,y) X <- rpoispp(30) plot(X, main="convexhull.xy(X)") plot(convexhull.xy(X), add=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{utilities} spatstat/man/vcov.slrm.Rd0000644000176200001440000000660713333543264015127 0ustar liggesusers\name{vcov.slrm} \alias{vcov.slrm} \title{Variance-Covariance Matrix for a Fitted Spatial Logistic Regression} \description{ Returns the variance-covariance matrix of the estimates of the parameters of a point process model that was fitted by spatial logistic regression. } \usage{ \method{vcov}{slrm}(object, \dots, what=c("vcov", "corr", "fisher", "Fisher")) } \arguments{ \item{object}{A fitted point process model of class \code{"slrm"}.} \item{\dots}{Ignored.} \item{what}{Character string (partially-matched) that specifies what matrix is returned. Options are \code{"vcov"} for the variance-covariance matrix, \code{"corr"} for the correlation matrix, and \code{"fisher"} or \code{"Fisher"} for the Fisher information matrix. } } \details{ This function computes the asymptotic variance-covariance matrix of the estimates of the canonical parameters in the point process model \code{object}. It is a method for the generic function \code{\link{vcov}}. \code{object} should be an object of class \code{"slrm"}, typically produced by \code{\link{slrm}}. It represents a Poisson point process model fitted by spatial logistic regression. The canonical parameters of the fitted model \code{object} are the quantities returned by \code{coef.slrm(object)}. The function \code{vcov} calculates the variance-covariance matrix for these parameters. The argument \code{what} provides three options: \describe{ \item{\code{what="vcov"}}{ return the variance-covariance matrix of the parameter estimates } \item{\code{what="corr"}}{ return the correlation matrix of the parameter estimates } \item{\code{what="fisher"}}{ return the observed Fisher information matrix. } } In all three cases, the result is a square matrix. The rows and columns of the matrix correspond to the canonical parameters given by \code{\link{coef.slrm}(object)}. The row and column names of the matrix are also identical to the names in \code{\link{coef.slrm}(object)}. Note that standard errors and 95\% confidence intervals for the coefficients can also be obtained using \code{confint(object)} or \code{coef(summary(object))}. Standard errors for the fitted intensity can be obtained using \code{\link{predict.slrm}}. } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix was either too large or too small for reliable numerical calculation. This can occur because of numerical overflow or collinearity in the covariates. } \value{ A square matrix. } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x + y) vcov(fit) vcov(fit, what="corr") vcov(fit, what="f") } \author{ \adrian and \rolf . } \seealso{ \code{\link{vcov}} for the generic, \code{\link{slrm}} for information about fitted models, \code{\link{predict.slrm}} for other kinds of calculation about the model, \code{\link[stats]{confint}} for confidence intervals. } \references{ Baddeley, A., Berman, M., Fisher, N.I., Hardegen, A., Milne, R.K., Schuhmacher, D., Shah, R. and Turner, R. (2010) Spatial logistic regression and change-of-support for spatial Poisson point processes. \emph{Electronic Journal of Statistics} \bold{4}, 1151--1201. {doi: 10.1214/10-EJS581} } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/Gdot.Rd0000644000176200001440000002132313333543262014061 0ustar liggesusers\name{Gdot} \alias{Gdot} \title{ Multitype Nearest Neighbour Distance Function (i-to-any) } \description{ For a multitype point pattern, estimate the distribution of the distance from a point of type \eqn{i} to the nearest other point of any type. } \usage{ Gdot(X, i, r=NULL, breaks=NULL, \dots, correction=c("km", "rs", "han")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the distance distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{G_{i\bullet}(r)}{Gi.(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{G_{i\bullet}(r)}{Gi.(r)} } \item{han}{the Hanisch-style estimator of \eqn{G_{i\bullet}(r)}{Gi.(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{G_{i\bullet}(r)}{Gi.(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{G_{i\bullet}(r)}{Gi.(r)} by the spatial Kaplan-Meier method } \item{raw}{the uncorrected estimate of \eqn{G_{i\bullet}(r)}{Gi.(r)}, i.e. the empirical distribution of the distances from each point of type \eqn{i} to the nearest other point of any type. } \item{theo}{the theoretical value of \eqn{G_{i\bullet}(r)}{Gi.(r)} for a marked Poisson process with the same estimated intensity (see below). } } \details{ This function \code{Gdot} and its companions \code{\link{Gcross}} and \code{\link{Gmulti}} are generalisations of the function \code{\link{Gest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument will be interpreted as a level of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level.) The ``dot-type'' (type \eqn{i} to any type) nearest neighbour distance distribution function of a multitype point process is the cumulative distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)} of the distance from a typical random point of the process with type \eqn{i} the nearest other point of the process, regardless of type. An estimate of \eqn{G_{i\bullet}(r)}{Gi.(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the type \eqn{i} points were independent of all other points, then \eqn{G_{i\bullet}(r)}{Gi.(r)} would equal \eqn{G_{ii}(r)}{Gii(r)}, the nearest neighbour distance distribution function of the type \eqn{i} points alone. For a multitype Poisson point process with total intensity \eqn{\lambda}{lambda}, we have \deqn{G_{i\bullet}(r) = 1 - e^{ - \lambda \pi r^2} }{% Gi.(r) = 1 - exp( - lambda * pi * r^2)} Deviations between the empirical and theoretical \eqn{G_{i\bullet}}{Gi.} curves may suggest dependence of the type \eqn{i} points on the other points. This algorithm estimates the distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Gest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G_{i\bullet}(r)}{Gi.(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The reduced-sample and Kaplan-Meier estimators are computed from histogram counts. In the case of the Kaplan-Meier estimator this introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{G_{i\bullet}(r)}{Gi.(r)}. This estimate should be used with caution as \eqn{G_{i\bullet}(r)}{Gi.(r)} is not necessarily differentiable. The naive empirical distribution of distances from each point of the pattern \code{X} to the nearest other point of the pattern, is a biased estimate of \eqn{G_{i\bullet}}{Gi.}. However this is also returned by the algorithm, as it is sometimes useful in other contexts. Care should be taken not to use the uncorrected empirical \eqn{G_{i\bullet}}{Gi.} as if it were an unbiased estimator of \eqn{G_{i\bullet}}{Gi.}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. The function \eqn{G_{i\bullet}}{Gi.} does not necessarily have a density. The reduced sample estimator of \eqn{G_{i\bullet}}{Gi.} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{G_{i\bullet}}{Gi.} is always nondecreasing but its maximum value may be less than \eqn{1}. } \seealso{ \code{\link{Gcross}}, \code{\link{Gest}}, \code{\link{Gmulti}} } \examples{ # amacrine cells data G0. <- Gdot(amacrine, "off") plot(G0.) # synthetic example pp <- runifpoispp(30) pp <- pp \%mark\% factor(sample(0:1, npoints(pp), replace=TRUE)) G <- Gdot(pp, "0") G <- Gdot(pp, 0) # equivalent } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/istat.Rd0000644000176200001440000000304113333543263014306 0ustar liggesusers\name{istat} \alias{istat} \title{Point and Click Interface for Exploratory Analysis of Point Pattern} \description{ Compute various summary functions for a point pattern using a point-and-click interface. } \usage{ istat(x, xname) } \arguments{ \item{x}{ The spatial point pattern to be analysed. An object of class \code{"ppp"}. } \item{xname}{ Optional. Character string to use as the title of the dataset. } } \value{ \code{NULL}. } \details{ This command launches an interactive (point-and-click) interface which offers a choice of spatial summary functions that can be applied to the point pattern \code{x}. The selected summary function is computed for the point pattern \code{x} and plotted in a popup window. The selection of functions includes \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{pcf}}, \code{\link{Fest}} ,\code{\link{Gest}} and \code{\link{Jest}}. For the function \code{\link{pcf}} it is possible to control the bandwidth parameter \code{bw}. There is also an option to show simulation envelopes of the summary function. } \section{Note}{ Before adjusting the bandwidth parameter \code{bw}, it is advisable to select \emph{No simulation envelopes} to save a lot of computation time. } \section{Package Dependence}{ This function requires the package \pkg{rpanel} to be loaded. } \seealso{ \code{\link{iplot}} } \examples{ if(interactive() && require(rpanel)) { istat(swedishpines) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/subset.ppp.Rd0000644000176200001440000001147513333543264015300 0ustar liggesusers\name{subset.ppp} \alias{subset.ppp} \alias{subset.pp3} \alias{subset.lpp} \alias{subset.ppx} \title{ Subset of Point Pattern Satisfying A Condition } \description{ Given a point pattern, return the subset of points which satisfy a specified condition. } \usage{ \method{subset}{ppp}(x, subset, select, drop=FALSE, \dots) \method{subset}{pp3}(x, subset, select, drop=FALSE, \dots) \method{subset}{lpp}(x, subset, select, drop=FALSE, \dots) \method{subset}{ppx}(x, subset, select, drop=FALSE, \dots) } \arguments{ \item{x}{ A point pattern (object of class \code{"ppp"}, \code{"lpp"}, \code{"pp3"} or \code{"ppx"}). } \item{subset}{ Logical expression indicating which points are to be kept. The expression may involve the names of spatial coordinates (\code{x}, \code{y}, etc), the \code{marks}, and (if there is more than one column of marks) the names of individual columns of marks. Missing values are taken as false. See Details. } \item{select}{ Expression indicating which columns of marks should be kept. The \emph{names} of columns of marks can be used in this expression, and will be treated as if they were column indices. See Details. } \item{drop}{ Logical value indicating whether to remove unused levels of the marks, if the marks are a factor. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{subset}}. It extracts the subset of points of \code{x} that satisfy the logical expression \code{subset}, and retains only the columns of marks that are specified by the expression \code{select}. The result is always a point pattern, with the same window as \code{x}. The argument \code{subset} determines the subset of points that will be extracted. It should be a logical expression. It may involve the variable names \code{x} and \code{y} representing the Cartesian coordinates; the names of other spatial coordinates or local coordinates; the name \code{marks} representing the marks; and (if there is more than one column of marks) the names of individual columns of marks. The default is to keep all points. The argument \code{select} determines which columns of marks will be retained (if there are several columns of marks). It should be an expression involving the names of columns of marks (which will be interpreted as integers representing the positions of these columns). For example if there are columns of marks named \code{A} to \code{Z}, then \code{select=D:F} is a valid expression and means that columns \code{D}, \code{E} and \code{F} will be retained. Similarly \code{select=-(A:C)} is valid and means that columns \code{A} to \code{C} will be deleted. The default is to retain all columns. Setting \code{subset=FALSE} will produce an empty point pattern (i.e. containing zero points) in the same window as \code{x}. Setting \code{select=FALSE} or \code{select= -marks} will remove all the marks from \code{x}. The argument \code{drop} determines whether to remove unused levels of a factor, if the resulting point pattern is multitype (i.e. the marks are a factor) or if the marks are a data frame in which some of the columns are factors. The result is always a point pattern, of the same class as \code{x}. Spatial coordinates (and local coordinates) are always retained. To extract only some columns of marks or coordinates as a data frame, use \code{subset(as.data.frame(x), ...)} } \section{Other kinds of subset arguments}{ Alternatively the argument \code{subset} can be any kind of subset index acceptable to \code{\link{[.ppp}}, \code{\link{[.pp3}}, \code{\link{[.ppx}}. This argument selects which points of \code{x} will be retained. \bold{Warning:} if the argument \code{subset} is a window, this is interpreted as specifying the subset of points that fall inside that window, but the resulting point pattern has the same window as the original pattern \code{x}. } \value{ A point pattern of the same class as \code{x}, in the same spatial window as \code{x}. The result is a subset of \code{x}, possibly with some columns of marks removed. } \author{ \spatstatAuthors. } \seealso{ \code{\link[base]{subset}}, \code{\link{[.ppp}}, \code{\link{[.pp3}}, \code{\link{[.lpp}}, \code{\link{[.ppx}} } \examples{ plot(subset(cells, x > 0.5)) subset(amacrine, marks == "on") subset(amacrine, marks == "on", drop=TRUE) subset(redwood, nndist(redwood) > 0.04) subset(finpines, select=height) subset(finpines, diameter > 2, height) subset(nbfires, year==1999 & ign.src == "campfire", select=cause:fnl.size) v <- subset(chicago, x + y > 1100 & marks == "assault") vv <- subset(chicago, x + y > 1100 & marks == "assault", drop=TRUE) a <- subset(rpoispp3(40), z > 0.5) } \keyword{spatial} \keyword{manip} spatstat/man/box3.Rd0000644000176200001440000000301013333543262014030 0ustar liggesusers\name{box3} \Rdversion{1.1} \alias{box3} \title{ Three-Dimensional Box } \description{ Creates an object representing a three-dimensional box. } \usage{ box3(xrange = c(0, 1), yrange = xrange, zrange = yrange, unitname = NULL) } \arguments{ \item{xrange, yrange, zrange}{ Dimensions of the box in the \eqn{x,y,z} directions. Each of these arguments should be a numeric vector of length 2. } \item{unitname}{ Optional. Name of the unit of length. See Details. } } \details{ This function creates an object representing a three-dimensional rectangular parallelepiped (box) with sides parallel to the coordinate axes. The object can be used to specify the domain of a three-dimensional point pattern (see \code{\link{pp3}}) and in various geometrical calculations (see \code{\link{volume.box3}}, \code{\link{diameter.box3}}, \code{\link{eroded.volumes}}). The optional argument \code{unitname} specifies the name of the unit of length. See \code{\link{unitname}} for valid formats. The function \code{\link{as.box3}} can be used to convert other kinds of data to this format. } \value{ An object of class \code{"box3"}. There is a print method for this class. } \author{\adrian and \rolf } \seealso{ \code{\link{as.box3}}, \code{\link{pp3}}, \code{\link{volume.box3}}, \code{\link{diameter.box3}}, \code{\link{eroded.volumes}}. } \examples{ box3() box3(c(0,10),c(0,10),c(0,5), unitname=c("metre","metres")) box3(c(-1,1)) } \keyword{spatial} \keyword{datagen} spatstat/man/as.rectangle.Rd0000644000176200001440000000335413333543262015536 0ustar liggesusers\name{as.rectangle} \alias{as.rectangle} \title{Window Frame} \description{ Extract the window frame of a window or other spatial dataset } \usage{ as.rectangle(w, \dots) } \arguments{ \item{w}{A window, or a dataset that has a window. Either a window (object of class \code{"owin"}), a pixel image (object of class \code{"im"}) or other data determining such a window. } \item{\dots}{ Optional. Auxiliary data to help determine the window. If \code{w} does not belong to a recognised class, the arguments \code{w} and \code{\dots} are passed to \code{\link{as.owin}} to determine the window. } } \value{ A window (object of class \code{"owin"}) of type \code{"rectangle"} representing a rectangle. } \details{ This function is the quickest way to determine a bounding rectangle for a spatial dataset. If \code{w} is a window, the function just extracts the outer bounding rectangle of \code{w} as given by its elements \code{xrange,yrange}. The function can also be applied to any spatial dataset that has a window: for example, a point pattern (object of class \code{"ppp"}) or a line segment pattern (object of class \code{"psp"}). The bounding rectangle of the window of the dataset is extracted. Use the function \code{\link{boundingbox}} to compute the \emph{smallest} bounding rectangle of a dataset. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{boundingbox}} } \examples{ w <- owin(c(0,10),c(0,10), poly=list(x=c(1,2,3,2,1), y=c(2,3,4,6,7))) r <- as.rectangle(w) # returns a 10 x 10 rectangle data(lansing) as.rectangle(lansing) data(copper) as.rectangle(copper$SouthLines) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/pool.fasp.Rd0000644000176200001440000000350713333543264015073 0ustar liggesusers\name{pool.fasp} \alias{pool.fasp} \title{ Pool Data from Several Function Arrays } \description{ Pool the simulation data from several function arrays (objects of class \code{"fasp"}) and compute a new function array. } \usage{ \method{pool}{fasp}(...) } \arguments{ \item{\dots}{ Objects of class \code{"fasp"}. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"fasp"} of function arrays. It is used to combine the simulation data from several arrays of simulation envelopes and to compute a new array of envelopes based on the combined data. Each of the arguments \code{\dots} must be a function array (object of class \code{"fasp"}) containing simulation envelopes. This is typically created by running the command \code{\link{alltypes}} with the arguments \code{envelope=TRUE} and \code{savefuns=TRUE}. This ensures that each object is an array of simulation envelopes, and that each envelope contains the simulated data (summary function values) that were used to construct the envelope. The simulated data are extracted from each object and combined. A new array of envelopes is computed from the combined set of simulations. Warnings or errors will be issued if the objects \code{\dots} appear to be incompatible. However, the code is not smart enough to decide whether it is sensible to pool the data. } \value{ An object of class \code{"fasp"}. } \seealso{ \code{\link{fasp}}, \code{\link{alltypes}}, \code{\link{pool.envelope}}, \code{\link{pool}} } \examples{ data(amacrine) A1 <- alltypes(amacrine,"K",nsim=9,envelope=TRUE,savefuns=TRUE) A2 <- alltypes(amacrine,"K",nsim=10,envelope=TRUE,savefuns=TRUE) pool(A1, A2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat/man/crossdist.ppx.Rd0000644000176200001440000000335413333543263016014 0ustar liggesusers\name{crossdist.ppx} \alias{crossdist.ppx} \title{Pairwise Distances Between Two Different Multi-Dimensional Point Patterns} \description{ Computes the distances between pairs of points taken from two different multi-dimensional point patterns. } \usage{ \method{crossdist}{ppx}(X, Y, \dots) } \arguments{ \item{X,Y}{ Multi-dimensional point patterns (objects of class \code{"ppx"}). } \item{\dots}{ Arguments passed to \code{\link{coords.ppx}} to determine which coordinates should be used. } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th point in \code{X} to the \code{j}-th point in \code{Y}. } \details{ Given two point patterns in multi-dimensional space, this function computes the Euclidean distance from each point in the first pattern to each point in the second pattern, and returns a matrix containing these distances. This is a method for the generic function \code{\link{crossdist}} for three-dimensional point patterns (objects of class \code{"ppx"}). This function expects two multidimensional point patterns \code{X} and \code{Y}, and returns the matrix whose \code{[i,j]} entry is the distance from \code{X[i]} to \code{Y[j]}. By default, both spatial and temporal coordinates are extracted. To obtain the spatial distance between points in a space-time point pattern, set \code{temporal=FALSE}. } \seealso{ \code{\link{crossdist}}, \code{\link{pairdist}}, \code{\link{nndist}} } \examples{ df <- data.frame(x=runif(3),y=runif(3),z=runif(3),w=runif(3)) X <- ppx(data=df) df <- data.frame(x=runif(5),y=runif(5),z=runif(5),w=runif(5)) Y <- ppx(data=df) d <- crossdist(X, Y) } \author{ \adrian } \keyword{spatial} \keyword{math} spatstat/man/invoke.symbolmap.Rd0000644000176200001440000000364313333543263016467 0ustar liggesusers\name{invoke.symbolmap} \alias{invoke.symbolmap} \title{ Plot Data Using Graphics Symbol Map } \description{ Apply a graphics symbol map to a vector of data values and plot the resulting symbols. } \usage{ invoke.symbolmap(map, values, x=NULL, y = NULL, \dots, add = FALSE, do.plot = TRUE, started = add && do.plot) } \arguments{ \item{map}{ Graphics symbol map (object of class \code{"symbolmap"}). } \item{values}{ Vector of data that can be mapped by the symbol map. } \item{x,y}{ Coordinate vectors for the spatial locations of the symbols to be plotted. } \item{\dots}{ Additional graphics parameters. } \item{add}{ Logical value indicating whether to add the symbols to an existing plot (\code{add=TRUE}) or to initialise a new plot (\code{add=FALSE}, the default). } \item{do.plot}{ Logical value indicating whether to actually perform the plotting. } \item{started}{ Logical value indicating whether the plot has already been initialised. } } \details{ A symbol map is an association between data values and graphical symbols. This command applies the symbol map \code{map} to the data \code{values} and plots the resulting symbols at the locations given by \code{\link{xy.coords}(x,y)}. } \value{ (Invisibly) the maximum diameter of the symbols, in user coordinate units. } \author{\adrian , \rolf and \ege. } \seealso{ \code{\link{plot.symbolmap}} to plot the graphics map itself. \code{\link{symbolmap}} to create a graphics map. } \examples{ g <- symbolmap(range=c(-1,1), shape=function(x) ifelse(x > 0, "circles", "squares"), size=function(x) sqrt(ifelse(x > 0, x/pi, -x))/15, bg=function(x) ifelse(x > 0, "green", "red")) plot(square(1), main="") a <- invoke.symbolmap(g, runif(10, -1, 1), runifpoint(10), add=TRUE) a } \keyword{spatial} \keyword{hplot} spatstat/man/update.rmhcontrol.Rd0000644000176200001440000000210413333543264016632 0ustar liggesusers\name{update.rmhcontrol} \alias{update.rmhcontrol} \title{Update Control Parameters of Metropolis-Hastings Algorithm} \description{ \code{update} method for class \code{"rmhcontrol"}. } \usage{ \method{update}{rmhcontrol}(object, \dots) } \arguments{ \item{object}{ Object of class \code{"rmhcontrol"} containing control parameters for a Metropolis-Hastings algorithm. } \item{\dots}{ Arguments to be updated in the new call to \code{\link{rmhcontrol}}. } } \details{ This is a method for the generic function \code{\link{update}} for the class \code{"rmhcontrol"}. An object of class \code{"rmhcontrol"} describes a set of control parameters for the Metropolis-Hastings simulation algorithm. See \code{\link{rmhcontrol}}). \code{update.rmhcontrol} will modify the parameters specified by \code{object} according to the new arguments given. } \value{ Another object of class \code{"rmhcontrol"}. } \examples{ a <- rmhcontrol(expand=1) update(a, expand=2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/intersect.lintess.Rd0000644000176200001440000000203213557001576016645 0ustar liggesusers\name{intersect.lintess} \alias{intersect.lintess} \title{ Intersection of Tessellations on a Linear Network } \description{ Yields the intersection (common refinement) of two tessellations on a linear network. } \usage{ intersect.lintess(X, Y) } \arguments{ \item{X,Y}{ Tessellations (objects of class \code{"lintess"}) on the same linear network. } } \value{ Another tessellation (object of class \code{"lintess"}) on the same linear network as \code{X} and \code{Y}. } \details{ \code{X} and \code{Y} should be tessellations defined on the same linear network. Each tile in the resulting tessellation is the intersection of a tile of \code{X} with a tile of \code{Y}. } \author{ \adrian. } \seealso{ \code{\link{lintess}}, \code{\link{divide.linnet}}, \code{\link{chop.linnet}} } \examples{ X <- divide.linnet(runiflpp(4, simplenet)) Y <- divide.linnet(runiflpp(3, simplenet)) opa <- par(mfrow=c(1,3)) plot(X) plot(Y) plot(intersect.lintess(X,Y)) par(opa) } \keyword{spatial} \keyword{manip} spatstat/man/spatialcdf.Rd0000644000176200001440000000655213441712766015314 0ustar liggesusers\name{spatialcdf} \alias{spatialcdf} \title{ Spatial Cumulative Distribution Function } \description{ Compute the spatial cumulative distribution function of a spatial covariate, optionally using spatially-varying weights. } \usage{ spatialcdf(Z, weights = NULL, normalise = FALSE, ..., W = NULL, Zname = NULL) } \arguments{ \item{Z}{ Spatial covariate. A pixel image or a \code{function(x,y,...)} } \item{weights}{ Spatial weighting for different locations. A pixel image, a \code{function(x,y,...)}, a window, a constant value, or a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}). } \item{normalise}{ Logical. Whether the weights should be normalised so that they sum to 1. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution, or extra arguments passed to \code{Z} if it is a function. } \item{W}{ Optional window (object of class \code{"owin"}) defining the spatial domain. } \item{Zname}{ Optional character string for the name of the covariate \code{Z} used in plots. } } \details{ If \code{weights} is missing or \code{NULL}, it defaults to 1. The values of the covariate \code{Z} are computed on a grid of pixels. The weighted cumulative distribution function of \code{Z} values is computed, taking each value with weight equal to the pixel area. The resulting function \eqn{F} is such that \eqn{F(t)} is the area of the region of space where \eqn{Z \le t}{Z <= t}. If \code{weights} is a pixel image or a function, then the values of \code{weights} and of the covariate \code{Z} are computed on a grid of pixels. The \code{weights} are multiplied by the pixel area. Then the weighted empirical cumulative distribution function of \code{Z} values is computed using \code{\link{ewcdf}}. The resulting function \eqn{F} is such that \eqn{F(t)} is the total weight (or weighted area) of the region of space where \eqn{Z \le t}{Z <= t}. If \code{weights} is a fitted point process model, then it should be a Poisson process. The fitted intensity of the model, and the value of the covariate \code{Z}, are evaluated at the quadrature points used to fit the model. The \code{weights} are multiplied by the weights of the quadrature points. Then the weighted empirical cumulative distribution of \code{Z} values is computed using \code{\link{ewcdf}}. The resulting function \eqn{F} is such that \eqn{F(t)} is the expected number of points in the point process that will fall in the region of space where \eqn{Z \le t}{Z <= t}. If \code{normalise=TRUE}, the function is normalised so that its maximum value equals 1, so that it gives the cumulative \emph{fraction} of weight or cumulative fraction of points. The result can be printed, plotted, and used as a function. } \value{ A cumulative distribution function object belonging to the classes \code{"spatialcdf"}, \code{"ewcdf"}, \code{"ecdf"} (only if \code{normalise=TRUE}) and \code{"stepfun"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{ewcdf}}, \code{\link{cdf.test}} } \examples{ with(bei.extra, { plot(spatialcdf(grad)) fit <- ppm(bei ~ elev) plot(spatialcdf(grad, predict(fit))) plot(A <- spatialcdf(grad, fit)) A(0.1) }) plot(spatialcdf("x", W=letterR)) } \keyword{spatial} \keyword{nonparametric} spatstat/man/Kmodel.Rd0000644000176200001440000000326413333543262014403 0ustar liggesusers\name{Kmodel} \alias{Kmodel} \alias{pcfmodel} \title{K Function or Pair Correlation Function of a Point Process Model} \description{ Returns the theoretical \eqn{K} function or the pair correlation function of a point process model. } \usage{ Kmodel(model, \dots) pcfmodel(model, \dots) } \arguments{ \item{model}{ A fitted point process model of some kind. } \item{\dots}{ Ignored. } } \value{ A \code{function} in the \R language, which takes one argument \code{r}. } \details{ For certain types of point process models, it is possible to write down a mathematical expression for the \eqn{K} function or the pair correlation function of the model. The functions \code{Kmodel} and \code{pcfmodel} give the theoretical \eqn{K}-function and the theoretical pair correlation function for a point process model that has been fitted to data. The functions \code{Kmodel} and \code{pcfmodel} are generic, with methods for the classes \code{"kppm"} (cluster processes and Cox processes) and \code{"ppm"} (Gibbs processes). The return value is a \code{function} in the \R language, which takes one argument \code{r}. Evaluation of this function, on a numeric vector \code{r}, yields values of the desired \eqn{K} function or pair correlation function at these distance values. } \seealso{ \code{\link{Kest}} or \code{\link{pcf}} to estimate the \eqn{K} function or pair correlation function nonparametrically from data. \code{\link{Kmodel.kppm}} for the method for cluster processes and Cox processes. \code{\link{Kmodel.ppm}} for the method for Gibbs processes. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/MultiStrauss.Rd0000644000176200001440000000765313333543262015655 0ustar liggesusers\name{MultiStrauss} \alias{MultiStrauss} \title{The Multitype Strauss Point Process Model} \description{ Creates an instance of the multitype Strauss point process model which can then be fitted to point pattern data. } \usage{ MultiStrauss(radii, types=NULL) } \arguments{ \item{radii}{Matrix of interaction radii} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the multitype Strauss process with interaction radii \eqn{radii[i,j]}. } \details{ The (stationary) multitype Strauss process with \eqn{m} types, with interaction radii \eqn{r_{ij}}{r[i,j]} and parameters \eqn{\beta_j}{beta[j]} and \eqn{\gamma_{ij}}{gamma[i,j]} is the pairwise interaction point process in which each point of type \eqn{j} contributes a factor \eqn{\beta_j}{beta[j]} to the probability density of the point pattern, and a pair of points of types \eqn{i} and \eqn{j} closer than \eqn{r_{ij}}{r[i,j]} units apart contributes a factor \eqn{\gamma_{ij}}{gamma[i,j]} to the density. The nonstationary multitype Strauss process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location and type, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the multitype Strauss process pairwise interaction is yielded by the function \code{MultiStrauss()}. See the examples below. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the MultiStrauss interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{radii}. The matrix \code{radii} must be symmetric, with entries which are either positive numbers or \code{NA}. A value of \code{NA} indicates that no interaction term should be included for this combination of types. Note that only the interaction radii are specified in \code{MultiStrauss}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} and \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by \code{\link{ppm}()}, not fixed in \code{MultiStrauss()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{Strauss}}, \code{\link{MultiHard}} } \examples{ r <- matrix(c(1,2,2,1), nrow=2,ncol=2) MultiStrauss(r) # prints a sensible description of itself r <- 0.03 * matrix(c(1,2,2,1), nrow=2,ncol=2) X <- amacrine \testonly{ X <- X[ owin(c(0, 0.8), c(0, 1)) ] } ppm(X ~1, MultiStrauss(r)) # fit the stationary multitype Strauss process to `amacrine' \dontrun{ ppm(X ~polynom(x,y,3), MultiStrauss(r, c("off","on"))) # fit a nonstationary multitype Strauss process with log-cubic trend } } \section{Warnings}{ In order that \code{\link{ppm}} can fit the multitype Strauss model correctly to a point pattern \code{X}, this pattern must be marked, with \code{markformat} equal to \code{vector} and the mark vector \code{marks(X)} must be a factor. If the argument \code{types} is specified it is interpreted as a set of factor levels and this set must equal \code{levels(marks(X))}. } \section{Changed Syntax}{ Before \pkg{spatstat} version \code{1.37-0}, the syntax of this function was different: \code{MultiStrauss(types=NULL, radii)}. The new code attempts to handle the old syntax as well. } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/plot.lintess.Rd0000644000176200001440000000765713536734342015646 0ustar liggesusers\name{plot.lintess} \alias{plot.lintess} \title{ Plot a Tessellation on a Linear Network } \description{ Plot a tessellation or division of a linear network into tiles. } \usage{ \method{plot}{lintess}(x, \dots, main, add = FALSE, style = c("colour", "width", "image"), col = NULL, values=marks(x), ribbon=TRUE, ribargs=list(), multiplot=TRUE, do.plot=TRUE) } \arguments{ \item{x}{ Tessellation on a linear network (object of class \code{"lintess"}). } \item{\dots}{ Arguments passed to \code{\link[graphics]{segments}} (if \code{style="segments"}) or to \code{\link{plot.im}} (if \code{style="image"}) to control the plot. } \item{main}{ Optional main title for the plot. } \item{add}{ Logical value indicating whether the plot is to be added to an existing plot. } \item{style}{ Character string (partially matched) specifying the type of plot. If \code{style="colour"} (the default), tiles are plotted using \code{\link[graphics]{segments}} using colours to distinguish the different tiles or values. If \code{style="width"}, tiles are plotted using \code{\link[graphics]{segments}} using different segment widths to distinguish the different tiles or values. If \code{style="image"}, the tessellation is converted to a pixel image and plotted by \code{\link{plot.im}}. } \item{col}{ Vector of colours, or colour map, determining the colours used to plot the different tiles of the tessellation. } \item{values}{ Values associated with each tile of the tessellation, used to determine the colours or widths. A vector with one entry for each tile, or a data frame with one row for each tile. The default is \code{marks(x)}, or if that is null, then \code{tilenames(x)}. } \item{ribbon}{ Logical value specifying whether to print an explanatory legend for the colour map or width map. } \item{ribargs}{ Arguments passed to \code{\link{plot.colourmap}} controlling the display of the colour map legend. } \item{multiplot}{ Logical value determining what should happen if \code{marks(x)} has more than one column. If \code{multiplot=TRUE} (the default), several plot panels will be generated, one panel for each column of marks. If \code{multiplot=FALSE}, the first column of marks will be selected. } \item{do.plot}{ Logical value specifying whether to actually generate the plot (\code{do.plot=TRUE}, the default) or just to compute the colour map and return it (\code{do.plot=FALSE}). } } \details{ A tessellation on a linear network \code{L} is a partition of the network into non-overlapping pieces (tiles). Each tile consists of one or more line segments which are subsets of the line segments making up the network. A tile can consist of several disjoint pieces. This function plots the tessellation on the current device. It is a method for the generic \code{plot}. If \code{style="colour"}, each tile is plotted using \code{\link[graphics]{segments}}, drawing segments of different colours. If \code{style="width"}, each tile is plotted using \code{\link[graphics]{segments}}, drawing segments of different widths. If \code{style="image"}, the tessellation is converted to a pixel image, and plotted as a colour image using \code{\link{plot.im}}. The colours or widths are determined by the \code{values} associated with each tile of the tessellation. If \code{values} is missing, the default is to use the marks of the tessellation, or if there are no marks, the names of the tiles. } \value{ (Invisible) colour map. } \author{ \adrian } \seealso{ \code{\link{lintess}} } \examples{ X <- runiflpp(7, simplenet) Z <- divide.linnet(X) plot(Z, main="tessellation on network") points(as.ppp(X)) plot(Z, main="tessellation on network", values=1:nobjects(Z), style="w") } \keyword{spatial} \keyword{hplot} spatstat/man/runifdisc.Rd0000644000176200001440000000346713333543264015165 0ustar liggesusers\name{runifdisc} \alias{runifdisc} \title{Generate N Uniform Random Points in a Disc} \description{ Generate a random point pattern containing \eqn{n} independent uniform random points in a circular disc. } \usage{ runifdisc(n, radius=1, centre=c(0,0), ..., nsim=1, drop=TRUE) } \arguments{ \item{n}{ Number of points. } \item{radius}{Radius of the circle.} \item{centre}{Coordinates of the centre of the circle.} \item{\dots}{ Arguments passed to \code{\link{disc}} controlling the accuracy of approximation to the circle. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This function generates \code{n} independent random points, uniformly distributed in a circular disc. It is faster (for a circular window) than the general code used in \code{\link{runifpoint}}. To generate random points in an ellipse, first generate points in a circle using \code{runifdisc}, then transform to an ellipse using \code{\link{affine}}, as shown in the examples. To generate random points in other windows, use \code{\link{runifpoint}}. To generate non-uniform random points, use \code{\link{rpoint}}. } \seealso{ \code{\link{disc}}, \code{\link{runifpoint}}, \code{\link{rpoint}} } \examples{ # 100 random points in the unit disc plot(runifdisc(100)) # 42 random points in the ellipse with major axis 3 and minor axis 1 X <- runifdisc(42) Y <- affine(X, mat=diag(c(3,1))) plot(Y) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/Extract.hyperframe.Rd0000644000176200001440000000735513333543263016751 0ustar liggesusers\name{Extract.hyperframe} \alias{[.hyperframe} \alias{[<-.hyperframe} \alias{$.hyperframe} \alias{$<-.hyperframe} \title{Extract or Replace Subset of Hyperframe} \description{ Extract or replace a subset of a hyperframe. } \usage{ \method{[}{hyperframe}(x, i, j, drop, strip=drop, \dots) \method{[}{hyperframe}(x, i, j) <- value \method{$}{hyperframe}(x, name) \method{$}{hyperframe}(x, name) <- value } \arguments{ \item{x}{ A hyperframe (object of class \code{"hyperframe"}). } \item{i,j}{ Row and column indices. } \item{drop,strip}{ Logical values indicating what to do when the hyperframe has only one row or column. See Details. } \item{\dots}{ Ignored. } \item{name}{ Name of a column of the hyperframe. } \item{value}{ Replacement value for the subset. A hyperframe or (if the subset is a single column) a list or an atomic vector. } } \value{ A hyperframe (of class \code{"hyperframe"}). } \details{ These functions extract a designated subset of a hyperframe, or replace the designated subset with another hyperframe. The function \code{[.hyperframe} is a method for the subset operator \code{\link{[}} for the class \code{"hyperframe"}. It extracts the subset of \code{x} specified by the row index \code{i} and column index \code{j}. The argument \code{drop} determines whether the array structure will be discarded if possible. The argument \code{strip} determines whether the list structure in a row or column or cell will be discarded if possible. If \code{drop=FALSE} (the default), the return value is always a hyperframe or data frame. If \code{drop=TRUE}, and if the selected subset has only one row, or only one column, or both, then \itemize{ \item{ if \code{strip=FALSE}, the result is a list, with one entry for each array cell that was selected. } \item{ if \code{strip=TRUE}, \itemize{ \item if the subset has one row containing several columns, the result is a list or (if possible) an atomic vector; \item if the subset has one column containing several rows, the result is a list or (if possible) an atomic vector; \item if the subset has exactly one row and exactly one column, the result is the object (or atomic value) contained in this row and column. } } } The function \code{[<-.hyperframe} is a method for the subset replacement operator \code{\link{[<-}} for the class \code{"hyperframe"}. It replaces the designated subset with the hyperframe \code{value}. The subset of \code{x} to be replaced is designated by the arguments \code{i} and \code{j} as above. The replacement \code{value} should be a hyperframe with the appropriate dimensions, or (if the specified subset is a single column) a list of the appropriate length. The function \code{$.hyperframe} is a method for \code{\link{$}} for hyperframes. It extracts the relevant column of the hyperframe. The result is always a list (i.e. equivalent to using \code{[.hyperframe} with \code{strip=FALSE}). The function \code{$<-.hyperframe} is a method for \code{\link{$<-}} for hyperframes. It replaces the relevant column of the hyperframe. The replacement value should be a list of the appropriate length. } \seealso{ \code{\link{hyperframe}} } \examples{ h <- hyperframe(X=list(square(1), square(2)), Y=list(sin, cos)) h h[1, ] h[1, ,drop=TRUE] h[ , 1] h[ , 1, drop=TRUE] h[1,1] h[1,1,drop=TRUE] h[1,1,drop=TRUE,strip=FALSE] h[1,1] <- list(square(3)) # extract column h$X # replace existing column h$Y <- list(cells, cells) # add new column h$Z <- list(cells, cells) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/covering.Rd0000644000176200001440000000267713333543263015014 0ustar liggesusers\name{covering} \alias{covering} \title{Cover Region with Discs} \description{ Given a spatial region, this function finds an efficient covering of the region using discs of a chosen radius. } \usage{ covering(W, r, \dots, giveup=1000) } \arguments{ \item{W}{ A window (object of class \code{"owin"}). } \item{r}{positive number: the radius of the covering discs.} \item{\dots}{ extra arguments passed to \code{\link{as.mask}} controlling the pixel resolution for the calculations. } \item{giveup}{ Maximum number of attempts to place additional discs. } } \value{ A point pattern (object of class \code{"ppp"}) giving the centres of the discs. } \details{ This function finds an efficient covering of the window \code{W} using discs of the given radius \code{r}. The result is a point pattern giving the centres of the discs. The algorithm tries to use as few discs as possible, but is not guaranteed to find the minimal number of discs. It begins by placing a hexagonal grid of points inside \code{W}, then adds further points until every location inside \code{W} lies no more than \code{r} units away from one of the points. } \examples{ rr <- 0.5 X <- covering(letterR, rr) plot(grow.rectangle(Frame(X), rr), type="n", main="") plot(X, pch=16, add=TRUE, col="red") plot(letterR, add=TRUE, lwd=3) plot(X \%mark\% (2*rr), add=TRUE, markscale=1) } \author{ \adrian } \keyword{spatial} \keyword{math} spatstat/man/rcelllpp.Rd0000644000176200001440000000556213557003310015001 0ustar liggesusers\name{rcelllpp} \alias{rcelllpp} \title{ Simulate Cell Process on Linear Network } \description{ Generate a realisation of the cell process on a linear network. } \usage{ rcelllpp(L, lambda, rnumgen = NULL, \dots, saveid=FALSE) } \arguments{ \item{L}{ Either a linear network (object of class \code{"linnet"}) or a tessellation on a linear network (object of class \code{"lintess"}). } \item{lambda}{ Intensity of the process (expected number of points per unit length), } \item{rnumgen}{ Optional. Random number generator for the number of points in each cell. } \item{\dots}{ Additional arguments to \code{rnumgen}. } \item{saveid}{ Logical value indicating whether to save information about cell membership. } } \details{ This function generates simulated realisations of a cell point process on a network, as described in Baddeley et al (2017). This is the analogue on a linear network of the two-dimensional cell point process of Baddeley and Silverman (1988). The argument \code{L} should be a tessellation on a linear network. Alternatively if \code{L} is a linear network, it is converted to a tessellation by treating each network segment as a tile in the tessellation. The cell process generates a point process by generating independent point processes inside each tile of the tessellation. Within each tile, given the number of random points in the tile, the points are independent and uniformly distributed within the tile. By default (when \code{rnumgen} is not given), the number of points in a tile of length \code{t} is a random variable with mean and variance equal to \code{lambda * t}, generated by calling \code{\link{rcellnumber}}. If \code{rnumgen} is given, it should be a function with arguments \code{rnumgen(n, mu, \dots)} where \code{n} is the number of random integers to be generated, \code{mu} is the mean value of the distribution, and \code{\dots} are additional arguments, if needed. It will be called in the form \code{rnumgen(1, lambda * t, \dots)} to determine the number of random points falling in each tile of length \code{t}. } \value{ Point pattern on a linear network (object of class \code{"lpp"}). If \code{saveid=TRUE}, the result has an attribute \code{"cellid"} which is a factor specifying the cell that contains each point. } \author{ \adrian. } \seealso{ \code{\link{rSwitzerlpp}} } \references{ Baddeley, A.J. and Silverman, B.W. (1984) A cautionary example on the use of second-order methods for analyzing point patterns. \emph{Biometrics} \bold{40}, 1089-1094. Baddeley, A., Nair, G., Rakshit, S. and McSwiggan, G. (2017) \sQuote{Stationary} point processes are uncommon on linear networks. \emph{STAT} \bold{6}, {68--78}. } \examples{ X <- rcelllpp(domain(spiders), 0.01) plot(X) plot(linearK(X)) } \keyword{spatial} \keyword{datagen} spatstat/man/studpermu.test.Rd0000644000176200001440000001011313333543264016167 0ustar liggesusers\name{studpermu.test} \alias{studpermu.test} \title{ Studentised Permutation Test } \description{ Perform a studentised permutation test for a difference between groups of point patterns. } \usage{ studpermu.test(X, formula, summaryfunction = Kest, \dots, rinterval = NULL, nperm = 999, use.Tbar = FALSE, minpoints = 20, rsteps = 128, r = NULL, arguments.in.data = FALSE) } \arguments{ \item{X}{ Data. Either a \code{hyperframe} or a list of lists of point patterns. } \item{formula}{ Formula describing the grouping, when \code{X} is a hyperframe. The left side of the formula identifies which column of \code{X} contains the point patterns. The right side identifies the grouping factor. If the formula is missing, the grouping variable is taken to be the first column of \code{X} that contains a factor, and the point patterns are taken from the first column that contains point patterns. } \item{summaryfunction}{ Summary function applicable to point patterns. } \item{\dots}{ Additional arguments passed to \code{summaryfunction}. } \item{rinterval}{ Interval of distance values \eqn{r} over which the summary function should be evaluated and over which the test statistic will be integrated. If \code{NULL}, the default range of the summary statistic is used (taking the intersection of these ranges over all patterns). } \item{nperm}{ Number of random permutations for the test. } \item{use.Tbar}{ Logical value indicating choice of test statistic. If \code{TRUE}, use the alternative test statistic, which is appropriate for summary functions with roughly constant variance, such as \eqn{K(r)/r} or \eqn{L(r)}. } \item{minpoints}{ Minimum permissible number of points in a point pattern for inclusion in the test calculation. } \item{rsteps}{ Number of discretisation steps in the \code{rinterval}. } \item{r}{ Optional vector of distance values as the argument for \code{summaryfunction}. Should not usually be given. There is a sensible default. } \item{arguments.in.data}{ Logical. If \code{TRUE}, individual extra arguments to \code{summaryfunction} will be taken from \code{X} (which must be a hyperframe). This assumes that the first argument of \code{summaryfunction} is the point pattern dataset. } } \details{ This function performs the studentized permutation test of Hahn (2012) for a difference between groups of point patterns. The first argument \code{X} should be either \describe{ \item{a list of lists of point patterns.}{ Each element of \code{X} will be interpreted as a group of point patterns, assumed to be replicates of the same point process. } \item{a hyperframe:}{ One column of the hyperframe should contain point patterns, and another column should contain a factor indicating the grouping. The argument \code{formula} should be a formula in the \R language specifying the grouping: it should be of the form \code{P ~ G} where \code{P} is the name of the column of point patterns, and \code{G} is the name of the factor. } } A group needs to contain at least two point patterns with at least \code{minpoints} points in each pattern. The function returns an object of class \code{"htest"} and \code{"studpermutest"} that can be printed and plotted. The printout shows the test result and \eqn{p}-value. The plot shows the summary functions for the groups (and the group means if requested). } \value{ Object of class \code{"studpermutest"}. } \references{ Hahn, U. (2012) A studentized permutation test for the comparison of spatial point patterns. \emph{Journal of the American Statistical Association} \bold{107} (498), 754--764. } \seealso{ \code{\link{plot.studpermutest}} } \author{ Ute Hahn. Modified for \code{spatstat} by \spatstatAuthors. } \examples{ np <- if(interactive()) 99 else 19 testpyramidal <- studpermu.test(pyramidal, Neurons ~ group, nperm=np) testpyramidal } \keyword{spatial} \keyword{htest} spatstat/man/add.texture.Rd0000644000176200001440000000313713333543262015416 0ustar liggesusers\name{add.texture} \alias{add.texture} \title{ Fill Plot With Texture } \description{ Draws a simple texture inside a region on the plot. } \usage{ add.texture(W, texture = 4, spacing = NULL, ...) } \arguments{ \item{W}{ Window (object of class \code{"owin"}) inside which the texture should be drawn. } \item{texture}{ Integer from 1 to 8 identifying the type of texture. See Details. } \item{spacing}{ Spacing between elements of the texture, in units of the current plot. } \item{\dots}{ Further arguments controlling the plot colour, line width etc. } } \details{ The chosen texture, confined to the window \code{W}, will be added to the current plot. The available textures are: \describe{ \item{texture=1:}{ Small crosses arranged in a square grid. } \item{texture=2:}{ Parallel vertical lines. } \item{texture=3:}{ Parallel horizontal lines. } \item{texture=4:}{ Parallel diagonal lines at 45 degrees from the horizontal. } \item{texture=5:}{ Parallel diagonal lines at 135 degrees from the horizontal. } \item{texture=6:}{ Grid of horizontal and vertical lines. } \item{texture=7:}{ Grid of diagonal lines at 45 and 135 degrees from the horizontal. } \item{texture=8:}{ Grid of hexagons. } } } \author{ \adrian and \rolf } \seealso{ \code{\link{owin}}, \code{\link{plot.owin}}, \code{\link{textureplot}}, \code{\link{texturemap}}. } \examples{ W <- Window(chorley) plot(W, main="") add.texture(W, 7) } \keyword{spatial} \keyword{hplot} spatstat/man/hist.im.Rd0000644000176200001440000000462213333543263014543 0ustar liggesusers\name{hist.im} \alias{hist.im} \title{Histogram of Pixel Values in an Image} \description{ Computes and displays a histogram of the pixel values in a pixel image. The \code{hist} method for class \code{"im"}. } \usage{ \method{hist}{im}(x, \dots, probability=FALSE, xname) } \arguments{ \item{x}{A pixel image (object of class \code{"im"}).} \item{\dots}{Arguments passed to \code{\link{hist.default}} or \code{\link{barplot}}.} \item{probability}{Logical. If \code{TRUE}, the histogram will be normalised to give probabilities or probability densities. } \item{xname}{Optional. Character string to be used as the name of the dataset \code{x}. } } \details{ This function computes and (by default) displays a histogram of the pixel values in the image \code{x}. An object of class \code{"im"} describes a pixel image. See \code{\link{im.object}}) for details of this class. The function \code{hist.im} is a method for the generic function \code{\link{hist}} for the class \code{"im"}. Any arguments in \code{...} are passed to \code{\link{hist.default}} (for numeric valued images) or \code{\link{barplot}} (for factor or logical images). For example, such arguments control the axes, and may be used to suppress the plotting. } \value{ For numeric-valued images, an object of class \code{"histogram"} as returned by \code{\link[graphics:hist]{hist.default}}. This object can be plotted. For factor-valued or logical images, an object of class \code{"barplotdata"}, which can be plotted. This is a list with components called \code{counts} (contingency table of counts of the numbers of pixels taking each possible value), \code{probs} (corresponding relative frequencies) and \code{mids} (graphical \eqn{x}-coordinates of the midpoints of the bars in the barplot). } \seealso{ \code{\link{spatialcdf}} for the cumulative distribution function of an image. \code{\link{hist}}, \code{\link{hist.default}}, \code{\link{barplot}}. For other statistical graphics such as Q-Q plots, use \code{X[]} to extract the pixel values of image \code{X}, and apply the usual statistical graphics commands. For information about pixel images see \code{\link{im.object}}, \code{\link{summary.im}}. } \examples{ X <- as.im(function(x,y) {x^2}, unit.square()) hist(X) hist(cut(X,3)) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/Window.Rd0000644000176200001440000000521313333543262014433 0ustar liggesusers\name{Window} \alias{Window} \alias{Window<-} \alias{Window.ppp} \alias{Window<-.ppp} \alias{Window.psp} \alias{Window<-.psp} \alias{Window.im} \alias{Window<-.im} \title{ Extract or Change the Window of a Spatial Object } \description{ Given a spatial object (such as a point pattern or pixel image) in two dimensions, these functions extract or change the window in which the object is defined. } \usage{ Window(X, \dots) Window(X, \dots) <- value \method{Window}{ppp}(X, \dots) \method{Window}{ppp}(X, \dots) <- value \method{Window}{psp}(X, \dots) \method{Window}{psp}(X, \dots) <- value \method{Window}{im}(X, \dots) \method{Window}{im}(X, \dots) <- value } \arguments{ \item{X}{ A spatial object such as a point pattern, line segment pattern or pixel image. } \item{\dots}{ Extra arguments. They are ignored by all the methods listed here. } \item{value}{ Another window (object of class \code{"owin"}) to be used as the window for \code{X}. } } \details{ The functions \code{Window} and \code{Window<-} are generic. \code{Window(X)} extracts the spatial window in which \code{X} is defined. \code{Window(X) <- W} changes the window in which \code{X} is defined to the new window \code{W}, and \emph{discards any data outside} \code{W}. In particular: \itemize{ \item If \code{X} is a point pattern (object of class \code{"ppp"}) then \code{Window(X) <- W} discards any points of \code{X} which fall outside \code{W}. \item If \code{X} is a line segment pattern (object of class \code{"psp"}) then \code{Window(X) <- W} clips the segments of \code{X} to the boundaries of \code{W}. \item If \code{X} is a pixel image (object of class \code{"im"}) then \code{Window(X) <- W} has the effect that pixels lying outside \code{W} are retained but their pixel values are set to \code{NA}. } Many other classes of spatial object have a method for \code{Window}, but not \code{Window<-}. See \code{\link{Window.ppm}}. } \value{ The result of \code{Window} is a window (object of class \code{"owin"}). The result of \code{Window<-} is the updated object \code{X}, of the same class as \code{X}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{Window.ppm}} } \examples{ ## point patterns Window(cells) X <- demopat Window(X) Window(X) <- as.rectangle(Window(X)) ## line segment patterns X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) Window(X) Window(X) <- square(0.5) ## images Z <- setcov(owin()) Window(Z) Window(Z) <- square(0.5) } \keyword{spatial} \keyword{manip} spatstat/man/raster.x.Rd0000644000176200001440000000477113333543264014744 0ustar liggesusers\name{raster.x} \alias{raster.x} \alias{raster.y} \alias{raster.xy} \title{Cartesian Coordinates for a Pixel Raster} \description{ Return the \eqn{x} and \eqn{y} coordinates of each pixel in a pixel image or binary mask. } \usage{ raster.x(w, drop=FALSE) raster.y(w, drop=FALSE) raster.xy(w, drop=FALSE) } \arguments{ \item{w}{ A pixel image (object of class \code{"im"}) or a mask window (object of class \code{"owin"} of type \code{"mask"}). } \item{drop}{ Logical. If \code{TRUE}, then coordinates of pixels that lie outside the window are removed. If \code{FALSE} (the default) then the coordinates of every pixel in the containing rectangle are retained. } } \value{ \code{raster.xy} returns a list with components \code{x} and \code{y} which are numeric vectors of equal length containing the pixel coordinates. If \code{drop=FALSE}, \code{raster.x} and \code{raster.y} return a matrix of the same dimensions as the pixel grid in \code{w}, and giving the value of the \eqn{x} (or \eqn{y}) coordinate of each pixel in the raster. If \code{drop=TRUE}, \code{raster.x} and \code{raster.y} return numeric vectors. } \details{ The argument \code{w} should be either a pixel image (object of class \code{"im"}) or a mask window (an object of class \code{"owin"} of type \code{"mask"}). If \code{drop=FALSE} (the default), the functions \code{raster.x} and \code{raster.y} return a matrix of the same dimensions as the pixel image or mask itself, with entries giving the \eqn{x} coordinate (for \code{raster.x}) or \eqn{y} coordinate (for \code{raster.y}) of each pixel in the pixel grid. If \code{drop=TRUE}, pixels that lie outside the window \code{w} (or outside the domain of the image \code{w}) are removed, and \code{raster.x} and \code{raster.y} return numeric vectors containing the coordinates of the pixels that are inside the window \code{w}. The function \code{raster.xy} returns a list with components \code{x} and \code{y} which are numeric vectors of equal length containing the pixel coordinates. } \seealso{ \code{\link{owin}}, \code{\link{as.mask}}, \code{\link{pixelcentres}} } \examples{ u <- owin(c(-1,1),c(-1,1)) # square of side 2 w <- as.mask(u, eps=0.01) # 200 x 200 grid X <- raster.x(w) Y <- raster.y(w) disc <- owin(c(-1,1), c(-1,1), mask=(X^2 + Y^2 <= 1)) \dontrun{plot(disc)} # approximation to the unit disc } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{manip} spatstat/man/rStrauss.Rd0000644000176200001440000001164313602545270015016 0ustar liggesusers\name{rStrauss} \alias{rStrauss} \title{Perfect Simulation of the Strauss Process} \description{ Generate a random pattern of points, a simulated realisation of the Strauss process, using a perfect simulation algorithm. } \usage{ rStrauss(beta, gamma = 1, R = 0, W = owin(), expand=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{gamma}{ interaction parameter (a number between 0 and 1, inclusive). } \item{R}{ interaction radius (a non-negative number). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. } \item{expand}{ Logical. If \code{FALSE}, simulation is performed in the window \code{W}, which must be rectangular. If \code{TRUE} (the default), simulation is performed on a larger window, and the result is clipped to the original window \code{W}. Alternatively \code{expand} can be an object of class \code{"rmhexpand"} (see \code{\link{rmhexpand}}) determining the expansion method. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function generates a realisation of the Strauss point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. The Strauss process (Strauss, 1975; Kelly and Ripley, 1976) is a model for spatial inhibition, ranging from a strong `hard core' inhibition to a completely random pattern according to the value of \code{gamma}. The Strauss process with interaction radius \eqn{R} and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma} is the pairwise interaction point process with probability density \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{s(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) gamma^s(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{s(x)} is the number of distinct unordered pairs of points that are closer than \eqn{R} units apart, and \eqn{\alpha}{alpha} is the normalising constant. Intuitively, each point of the pattern contributes a factor \eqn{\beta}{beta} to the probability density, and each pair of points closer than \eqn{r} units apart contributes a factor \eqn{\gamma}{gamma} to the density. The interaction parameter \eqn{\gamma}{gamma} must be less than or equal to \eqn{1} in order that the process be well-defined (Kelly and Ripley, 1976). This model describes an ``ordered'' or ``inhibitive'' pattern. If \eqn{\gamma=1}{gamma=1} it reduces to a Poisson process (complete spatial randomness) with intensity \eqn{\beta}{beta}. If \eqn{\gamma=0}{gamma=0} it is called a ``hard core process'' with hard core radius \eqn{R/2}, since no pair of points is permitted to lie closer than \eqn{R} units apart. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and \Moller (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}). If \code{nsim > 1}, a list of point patterns. } \references{ Berthelsen, K.K. and \Moller, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and \Moller, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. Kelly, F.P. and Ripley, B.D. (1976) On Strauss's model for clustering. \emph{Biometrika} \bold{63}, 357--360. \Moller, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. Strauss, D.J. (1975) A model for clustering. \emph{Biometrika} \bold{62}, 467--475. } \author{ Kasper Klitgaard Berthelsen, adapted for \pkg{spatstat} by \adrian } \examples{ X <- rStrauss(0.05,0.2,1.5,square(141.4)) Z <- rStrauss(100,0.7,0.05, nsim=2) } \seealso{ \code{\link{rmh}}, \code{\link{Strauss}}, \code{\link{rHardcore}}, \code{\link{rStraussHard}}, \code{\link{rDiggleGratton}}, \code{\link{rDGS}}, \code{\link{rPenttinen}}. } \keyword{spatial} \keyword{datagen} spatstat/man/Replace.linim.Rd0000644000176200001440000000345413333543264015655 0ustar liggesusers\name{Replace.linim} \alias{[<-.linim} \title{Reset Values in Subset of Image on Linear Network} \description{ Reset the values in a subset of a pixel image on a linear network. } \usage{ \method{[}{linim}(x, i, j) <- value } \arguments{ \item{x}{ A pixel image on a linear network. An object of class \code{"linim"}. } \item{i}{ Object defining the subregion or subset to be replaced. Either a spatial window (an object of class \code{"owin"}), or a pixel image with logical values, or a point pattern (an object of class \code{"ppp"}), or any type of index that applies to a matrix, or something that can be converted to a point pattern by \code{\link{as.ppp}} (using the window of \code{x}). } \item{j}{ An integer or logical vector serving as the column index if matrix indexing is being used. Ignored if \code{i} is appropriate to some sort of replacement \emph{other than} matrix indexing. } \item{value}{ Vector, matrix, factor or pixel image containing the replacement values. Short vectors will be recycled. } } \value{ The image \code{x} with the values replaced. } \details{ This function changes some of the pixel values in a pixel image. The image \code{x} must be an object of class \code{"linim"} representing a pixel image on a linear network. The pixel values are replaced according to the rules described in the help for \code{\link{[<-.im}}. Then the auxiliary data are updated. } \seealso{ \code{\link{[<-.im}}. } \examples{ # make a function Y <- as.linim(distfun(runiflpp(5, simplenet))) # replace some values B <- square(c(0.25, 0.55)) Y[B] <- 2 plot(Y, main="") plot(B, add=TRUE, lty=3) X <- runiflpp(4, simplenet) Y[X] <- 5 } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/adaptive.density.Rd0000644000176200001440000000302713426455703016445 0ustar liggesusers\name{adaptive.density} \alias{adaptive.density} \title{Adaptive Estimate of Intensity of Point Pattern} \description{ Computes an adaptive estimate of the intensity function of a point pattern. } \usage{ adaptive.density(X, \dots, method=c("voronoi","kernel")) } \arguments{ \item{X}{Point pattern (object of class \code{"ppp"} or \code{"lpp"}).} \item{method}{Character string specifying the estimation method} \item{\dots}{ Additional arguments passed to \code{\link{densityVoronoi}} or \code{\link{densityAdaptiveKernel}}. } } \details{ This function is an alternative to \code{\link{density.ppp}}. It computes an estimate of the intensity function of a point pattern dataset. The result is a pixel image giving the estimated intensity. If \code{method="voronoi"} the data are passed to the function \code{\link{densityVoronoi}} which estimates the intensity using the Voronoi-Dirichlet tessellation. If \code{method="kernel"} the data are passed to the function \code{\link{densityAdaptiveKernel}} which estimates the intensity using a variable-bandwidth kernel estimator. } \value{ A pixel image (object of class \code{"im"}) whose values are estimates of the intensity of \code{X}. } \seealso{ \code{\link{density.ppp}}, \code{\link{densityVoronoi}}, \code{\link{densityAdaptiveKernel}}, \code{\link{im.object}}. } \examples{ plot(adaptive.density(nztrees, 1), main="Voronoi estimate") } \author{ \spatstatAuthors and Mehdi Moradi. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/edges2triangles.Rd0000644000176200001440000000332113333543263016245 0ustar liggesusers\name{edges2triangles} \alias{edges2triangles} \title{ List Triangles in a Graph } \description{ Given a list of edges between vertices, compile a list of all triangles formed by these edges. } \usage{ edges2triangles(iedge, jedge, nvert=max(iedge, jedge), \dots, check=TRUE, friendly=rep(TRUE, nvert)) } \arguments{ \item{iedge,jedge}{ Integer vectors, of equal length, specifying the edges. } \item{nvert}{ Number of vertices in the network. } \item{\dots}{Ignored} \item{check}{Logical. Whether to check validity of input data.} \item{friendly}{ Optional. For advanced use. See Details. } } \details{ This low level function finds all the triangles (cliques of size 3) in a finite graph with \code{nvert} vertices and with edges specified by \code{iedge, jedge}. The interpretation of \code{iedge, jedge} is that each successive pair of entries specifies an edge in the graph. The \eqn{k}th edge joins vertex \code{iedge[k]} to vertex \code{jedge[k]}. Entries of \code{iedge} and \code{jedge} must be integers from 1 to \code{nvert}. To improve efficiency in some applications, the optional argument \code{friendly} can be used. It should be a logical vector of length \code{nvert} specifying a labelling of the vertices, such that two vertices \code{j,k} which are \emph{not} friendly (\code{friendly[j] = friendly[k] = FALSE}) are \emph{never} connected by an edge. } \value{ A 3-column matrix of integers, in which each row represents a triangle. } \seealso{ \code{\link{edges2vees}} } \author{\adrian and \rolf } \examples{ i <- c(1, 2, 5, 5, 1, 4, 2) j <- c(2, 3, 3, 1, 3, 2, 5) edges2triangles(i, j) } \keyword{spatial} \keyword{manip} spatstat/man/edge.Ripley.Rd0000644000176200001440000000610013541331477015333 0ustar liggesusers\name{edge.Ripley} \alias{edge.Ripley} \alias{rmax.Ripley} \title{ Ripley's Isotropic Edge Correction } \description{ Computes Ripley's isotropic edge correction weights for a point pattern. } \usage{ edge.Ripley(X, r, W = Window(X), method = c("C", "interpreted"), maxweight = 100, internal=list()) rmax.Ripley(W) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{W}{ Window for which the edge correction is required. } \item{r}{ Vector or matrix of interpoint distances for which the edge correction should be computed. } \item{method}{ Choice of algorithm. Either \code{"interpreted"} or \code{"C"}. This is needed only for debugging purposes. } \item{maxweight}{ Maximum permitted value of the edge correction weight. } \item{internal}{For developer use only.} } \details{ The function \code{edge.Ripley} computes Ripley's (1977) isotropic edge correction weight, which is used in estimating the \eqn{K} function and in many other contexts. The function \code{rmax.Ripley} computes the maximum value of distance \eqn{r} for which the isotropic edge correction estimate of \eqn{K(r)} is valid. For a single point \eqn{x} in a window \eqn{W}, and a distance \eqn{r > 0}, the isotropic edge correction weight is \deqn{ e(u, r) = \frac{2\pi r}{\mbox{length}(c(u,r) \cap W)} }{ e(u, r) = 2 * \pi * r/length(intersection(c(u,r), W)) } where \eqn{c(u,r)} is the circle of radius \eqn{r} centred at the point \eqn{u}. The denominator is the length of the overlap between this circle and the window \eqn{W}. The function \code{edge.Ripley} computes this edge correction weight for each point in the point pattern \code{X} and for each corresponding distance value in the vector or matrix \code{r}. If \code{r} is a vector, with one entry for each point in \code{X}, then the result is a vector containing the edge correction weights \code{e(X[i], r[i])} for each \code{i}. If \code{r} is a matrix, with one row for each point in \code{X}, then the result is a matrix whose \code{i,j} entry gives the edge correction weight \code{e(X[i], r[i,j])}. For example \code{edge.Ripley(X, pairdist(X))} computes all the edge corrections required for the \eqn{K}-function. If any value of the edge correction weight exceeds \code{maxwt}, it is set to \code{maxwt}. The function \code{rmax.Ripley} computes the smallest distance \eqn{r} such that it is possible to draw a circle of radius \eqn{r}, centred at a point of \code{W}, such that the circle does not intersect the interior of \code{W}. } \value{ A numeric vector or matrix. } \references{ Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \seealso{ \code{\link{edge.Trans}}, \code{\link{rmax.Trans}}, \code{\link{Kest}} } \examples{ v <- edge.Ripley(cells, pairdist(cells)) rmax.Ripley(Window(cells)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/envelopeArray.Rd0000644000176200001440000000517413333543263016007 0ustar liggesusers\name{envelopeArray} \alias{envelopeArray} \title{ Array of Simulation Envelopes of Summary Function } \description{ Compute an array of simulation envelopes using a summary function that returns an array of curves. } \usage{ envelopeArray(X, fun, \dots, dataname = NULL, verb = FALSE, reuse = TRUE) } \arguments{ \item{X}{ Object containing point pattern data. A point pattern (object of class \code{"ppp"}, \code{"lpp"}, \code{"pp3"} or \code{"ppx"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"lppm"}). } \item{fun}{ Function that computes the desired summary statistic for a point pattern. The result of \code{fun} should be a function array (object of class \code{"fasp"}). } \item{\dots}{ Arguments passed to \code{\link{envelope}} to control the simulations, or passed to \code{fun} when evaluating the function. } \item{dataname}{ Optional character string name for the data. } \item{verb}{ Logical value indicating whether to print progress reports. } \item{reuse}{ Logical value indicating whether the envelopes in each panel should be based on the same set of simulated patterns (\code{reuse=TRUE}, the default) or on different, independent sets of simulated patterns (\code{reuse=FALSE}). } } \details{ This command is the counterpart of \code{\link{envelope}} when the function \code{fun} that is evaluated on each simulated point pattern will return an object of class \code{"fasp"} representing an array of summary functions. Simulated point patterns are generated according to the rules described for \code{\link{envelope}}. In brief, if \code{X} is a point pattern, the algorithm generates simulated point patterns of the same kind, according to complete spatial randomness. If \code{X} is a fitted model, the algorithm generates simulated point patterns according to this model. For each simulated point pattern \code{Y}, the function \code{fun} is invoked. The result \code{Z <- fun(Y, ...)} should be an object of class \code{"fasp"} representing an array of summary functions. The dimensions of the array \code{Z} should be the same for each simulated pattern \code{Y}. This algorithm finds the simulation envelope of the summary functions in each cell of the array. } \value{ An object of class \code{"fasp"} representing an array of envelopes. } \author{ \spatstatAuthors. } \seealso{ \code{\link{envelope}}, \code{\link{alltypes}}. } \examples{ A <- envelopeArray(finpines, markcrosscorr, nsim=9) plot(A) } \keyword{spatial} \keyword{nonparametric} \keyword{iteration} spatstat/man/fitted.ppm.Rd0000644000176200001440000001177113571674202015247 0ustar liggesusers\name{fitted.ppm} \alias{fitted.ppm} \title{ Fitted Conditional Intensity for Point Process Model } \description{ Given a point process model fitted to a point pattern, compute the fitted conditional intensity or fitted trend of the model at the points of the pattern, or at the points of the quadrature scheme used to fit the model. } \usage{ \method{fitted}{ppm}(object, \dots, type="lambda", dataonly=FALSE, new.coef=NULL, leaveoneout=FALSE, drop=FALSE, check=TRUE, repair=TRUE, ignore.hardcore=FALSE, dropcoef=FALSE) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) } \item{\dots}{ Ignored. } \item{type}{ String (partially matched) indicating whether the fitted value is the conditional intensity (\code{"lambda"} or \code{"cif"}) or the first order trend (\code{"trend"}) or the logarithm of conditional intensity (\code{"link"}). } \item{dataonly}{ Logical. If \code{TRUE}, then values will only be computed at the points of the data point pattern. If \code{FALSE}, then values will be computed at all the points of the quadrature scheme used to fit the model, including the points of the data point pattern. } \item{new.coef}{ Numeric vector of parameter values to replace the fitted model parameters \code{coef(object)}. } \item{leaveoneout}{ Logical. If \code{TRUE} the fitted value at each data point will be computed using a leave-one-out method. See Details. } \item{drop}{ Logical value determining whether to delete quadrature points that were not used to fit the model. } \item{check}{ Logical value indicating whether to check the internal format of \code{object}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{repair}{ Logical value indicating whether to repair the internal format of \code{object}, if it is found to be damaged. } \item{ignore.hardcore}{ Advanced use only. Logical value specifying whether to compute only the finite part of the interaction potential (effectively removing any hard core interaction terms). } \item{dropcoef}{ Internal use only. } } \value{ A vector containing the values of the fitted conditional intensity, fitted spatial trend, or logarithm of the fitted conditional intensity. Entries in this vector correspond to the quadrature points (data or dummy points) used to fit the model. The quadrature points can be extracted from \code{object} by \code{union.quad(quad.ppm(object))}. } \details{ The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}). Such objects are produced by the model-fitting algorithm \code{\link{ppm}}). This function evaluates the conditional intensity \eqn{\hat\lambda(u, x)}{lambdahat(u,x)} or spatial trend \eqn{\hat b(u)}{bhat(u)} of the fitted point process model for certain locations \eqn{u}, where \code{x} is the original point pattern dataset to which the model was fitted. The locations \eqn{u} at which the fitted conditional intensity/trend is evaluated, are the points of the quadrature scheme used to fit the model in \code{\link{ppm}}. They include the data points (the points of the original point pattern dataset \code{x}) and other ``dummy'' points in the window of observation. If \code{leaveoneout=TRUE}, fitted values will be computed for the data points only, using a \sQuote{leave-one-out} rule: the fitted value at \code{X[i]} is effectively computed by deleting this point from the data and re-fitting the model to the reduced pattern \code{X[-i]}, then predicting the value at \code{X[i]}. (Instead of literally performing this calculation, we apply a Taylor approximation using the influence function computed in \code{\link{dfbetas.ppm}}. The argument \code{drop} is explained in \code{\link{quad.ppm}}. Use \code{\link{predict.ppm}} to compute the fitted conditional intensity at other locations or with other values of the explanatory variables. } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005). Residual analysis for spatial point processes (with discussion). \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. } \seealso{ \code{\link{ppm.object}}, \code{\link{ppm}}, \code{\link{predict.ppm}} } \examples{ str <- ppm(cells ~x, Strauss(r=0.1)) lambda <- fitted(str) # extract quadrature points in corresponding order quadpoints <- union.quad(quad.ppm(str)) # plot conditional intensity values # as circles centred on the quadrature points quadmarked <- setmarks(quadpoints, lambda) plot(quadmarked) if(!interactive()) str <- ppm(cells ~ x) lambdaX <- fitted(str, leaveoneout=TRUE) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/linearK.Rd0000644000176200001440000000406413333543263014555 0ustar liggesusers\name{linearK} \alias{linearK} \title{ Linear K Function } \description{ Computes an estimate of the linear \eqn{K} function for a point pattern on a linear network. } \usage{ linearK(X, r=NULL, ..., correction="Ang", ratio=FALSE) } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{r}{ Optional. Numeric vector of values of the function argument \eqn{r}. There is a sensible default. } \item{\dots}{ Ignored. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of the estimate will also be saved, for use in analysing replicated point patterns. } } \details{ This command computes the linear \eqn{K} function from point pattern data on a linear network. If \code{correction="none"}, the calculations do not include any correction for the geometry of the linear network. The result is the network \eqn{K} function as defined by Okabe and Yamada (2001). If \code{correction="Ang"}, the pair counts are weighted using Ang's correction (Ang, 2010; Ang et al, 2012). } \value{ Function value table (object of class \code{"fv"}). } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian. } \references{ Ang, Q.W. (2010) Statistical methodology for spatial point patterns on a linear network. MSc thesis, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. Okabe, A. and Yamada, I. (2001) The K-function method on a network and its computational implementation. \emph{Geographical Analysis} \bold{33}, 271-290. } \seealso{ \code{\link{compileK}}, \code{\link{lpp}} } \examples{ data(simplenet) X <- rpoislpp(5, simplenet) linearK(X) linearK(X, correction="none") } \keyword{spatial} \keyword{nonparametric} spatstat/man/logLik.ppm.Rd0000644000176200001440000001243113556771775015223 0ustar liggesusers\name{logLik.ppm} \alias{logLik.ppm} \alias{deviance.ppm} \alias{AIC.ppm} \alias{extractAIC.ppm} \alias{nobs.ppm} \title{Log Likelihood and AIC for Point Process Model} \description{ Extracts the log likelihood, deviance, and AIC of a fitted Poisson point process model, or analogous quantities based on the pseudolikelihood or logistic likelihood for a fitted Gibbs point process model. } \usage{ \method{logLik}{ppm}(object, \dots, new.coef=NULL, warn=TRUE, absolute=FALSE) \method{deviance}{ppm}(object, \dots) \method{AIC}{ppm}(object, \dots, k=2, takeuchi=TRUE) \method{extractAIC}{ppm}(fit, scale=0, k=2, \dots, takeuchi=TRUE) \method{nobs}{ppm}(object, \dots) } \arguments{ \item{object,fit}{Fitted point process model. An object of class \code{"ppm"}. } \item{\dots}{Ignored.} \item{warn}{ If \code{TRUE}, a warning is given when the pseudolikelihood or logistic likelihood is returned instead of the likelihood. } \item{absolute}{ Logical value indicating whether to include constant terms in the loglikelihood. } \item{scale}{Ignored.} \item{k}{Numeric value specifying the weight of the equivalent degrees of freedom in the AIC. See Details. } \item{new.coef}{ New values for the canonical parameters of the model. A numeric vector of the same length as \code{coef(object)}. } \item{takeuchi}{ Logical value specifying whether to use the Takeuchi penalty (\code{takeuchi=TRUE}) or the number of fitted parameters (\code{takeuchi=FALSE}) in calculating AIC. } } \details{ These functions are methods for the generic commands \code{\link[stats]{logLik}}, \code{\link[stats]{deviance}}, \code{\link[stats]{extractAIC}} and \code{\link[stats]{nobs}} for the class \code{"ppm"}. An object of class \code{"ppm"} represents a fitted Poisson or Gibbs point process model. It is obtained from the model-fitting function \code{\link{ppm}}. The method \code{logLik.ppm} computes the maximised value of the log likelihood for the fitted model \code{object} (as approximated by quadrature using the Berman-Turner approximation) is extracted. If \code{object} is not a Poisson process, the maximised log \emph{pseudolikelihood} is returned, with a warning (if \code{warn=TRUE}). The Akaike Information Criterion AIC for a fitted model is defined as \deqn{ AIC = -2 \log(L) + k \times \mbox{penalty} }{ AIC = -2 * log(L) + k * penalty } where \eqn{L} is the maximised likelihood of the fitted model, and \eqn{\mbox{penalty}}{penalty} is a penalty for model complexity, usually equal to the effective degrees of freedom of the model. The method \code{extractAIC.ppm} returns the \emph{analogous} quantity \eqn{AIC*} in which \eqn{L} is replaced by \eqn{L*}, the quadrature approximation to the likelihood (if \code{fit} is a Poisson model) or the pseudolikelihood or logistic likelihood (if \code{fit} is a Gibbs model). The \eqn{\mbox{penalty}}{penalty} term is calculated as follows. If \code{takeuchi=FALSE} then \eqn{\mbox{penalty}}{penalty} is the number of fitted parameters. If \code{takeuchi=TRUE} then \eqn{\mbox{penalty} = \mbox{trace}(J H^{-1})}{penalty = trace(J H^(-1))} where \eqn{J} and \eqn{H} are the estimated variance and hessian, respectively, of the composite score. These two choices are equivalent for a Poisson process. The method \code{nobs.ppm} returns the number of points in the original data point pattern to which the model was fitted. The \R function \code{\link[stats]{step}} uses these methods. } \section{Model comparison}{ The values of \code{logLik} and \code{AIC} returned by these functions are based on the \emph{pseudolikelihood} of the Gibbs point process model. If the model is a Poisson process, then the pseudolikelihood is the same as the likelihood, but for other Gibbs models, the pseudolikelihood is different from the likelihood (and the likelihood of a Gibbs model is hard to compute). For model comparison and model selection, it is valid to compare the \code{logLik} values, or to compare the \code{AIC} values, but only when all the models are of class \code{"ppm"}. } \value{ \code{logLik} returns a numerical value, belonging to the class \code{"logLik"}, with an attribute \code{"df"} giving the degrees of freedom. \code{AIC} returns a numerical value. \code{extractAIC} returns a numeric vector of length 2 containing the degrees of freedom and the AIC value. \code{nobs} returns an integer value. } \references{ Varin, C. and Vidoni, P. (2005) A note on composite likelihood inference and model selection. \emph{Biometrika} \bold{92}, 519--528. } \seealso{ \code{\link{ppm}}, \code{\link{as.owin}}, \code{\link{anova.ppm}}, \code{\link{coef.ppm}}, \code{\link{fitted.ppm}}, \code{\link{formula.ppm}}, \code{\link{model.frame.ppm}}, \code{\link{model.matrix.ppm}}, \code{\link{plot.ppm}}, \code{\link{predict.ppm}}, \code{\link{residuals.ppm}}, \code{\link{simulate.ppm}}, \code{\link{summary.ppm}}, \code{\link{terms.ppm}}, \code{\link{update.ppm}}, \code{\link{vcov.ppm}}. } \author{ \spatstatAuthors. } \examples{ data(cells) fit <- ppm(cells, ~x) nobs(fit) logLik(fit) deviance(fit) extractAIC(fit) AIC(fit) step(fit) } \keyword{spatial} \keyword{models} spatstat/man/parres.Rd0000644000176200001440000001763013442413652014466 0ustar liggesusers\name{parres} \alias{parres} \title{ Partial Residuals for Point Process Model } \description{ Computes the smoothed partial residuals, a diagnostic for transformation of a covariate in a Poisson point process model. } \usage{ parres(model, covariate, ..., smooth.effect=FALSE, subregion=NULL, bw = "nrd0", adjust=1, from = NULL, to = NULL, n = 512, bw.input = c("points", "quad"), bw.restrict=FALSE, covname) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{covariate}{ The covariate of interest. Either a character string matching the name of one of the canonical covariates in the model, or one of the names \code{"x"} or \code{"y"} referring to the Cartesian coordinates, or one of the names of the covariates given when \code{model} was fitted, or a pixel image (object of class \code{"im"}) or \code{function(x,y)} supplying the values of a covariate at any location. If the \code{model} depends on only one covariate, then this covariate is the default; otherwise a covariate must be specified. } \item{smooth.effect}{ Logical. Determines the choice of algorithm. See Details. } \item{subregion}{ Optional. A window (object of class \code{"owin"}) specifying a subset of the spatial domain of the data. The calculation will be confined to the data in this subregion. } \item{bw}{ Smoothing bandwidth or bandwidth rule (passed to \code{\link[stats]{density.default}}). } \item{adjust}{ Smoothing bandwidth adjustment factor (passed to \code{\link[stats]{density.default}}). } \item{n, from, to}{ Arguments passed to \code{\link[stats]{density.default}} to control the number and range of values at which the function will be estimated. } \item{\dots}{ Additional arguments passed to \code{\link[stats]{density.default}}. } \item{bw.input}{ Character string specifying the input data used for automatic bandwidth selection. } \item{bw.restrict}{ Logical value, specifying whether bandwidth selection is performed using data from the entire spatial domain or from the \code{subregion}. } \item{covname}{ Optional. Character string to use as the name of the covariate. } } \details{ This command computes the smoothed partial residual diagnostic (Baddeley, Chang, Song and Turner, 2012) for the transformation of a covariate in a Poisson point process model. The argument \code{model} must be a fitted Poisson point process model. The diagnostic works in two different ways: \describe{ \item{Canonical covariate:}{ The argument \code{covariate} may be a character string which is the name of one of the \emph{canonical covariates} in the model. The canonical covariates are the functions \eqn{Z_j}{Z[j]} that appear in the expression for the Poisson point process intensity \deqn{ \lambda(u) = \exp(\beta_1 Z_1(u) + \ldots + \beta_p Z_p(u)) }{ lambda(u) = exp(beta[1] * Z[1](u) + \ldots + \beta[p] * Z[p](u)) } at spatial location \eqn{u}. Type \code{names(coef(model))} to see the names of the canonical covariates in \code{model}. If the selected covariate is \eqn{Z_j}{Z[j]}, then the diagnostic plot concerns the model term \eqn{\beta_j Z_j(u)}{beta[j] * Z[j](u)}. The plot shows a smooth estimate of a function \eqn{h(z)} that should replace this linear term, that is, \eqn{\beta_j Z_j(u)}{beta[j] * Z[j](u)} should be replaced by \eqn{h(Z_j(u))}{h(Z[j](u))}. The linear function is also plotted as a dotted line. } \item{New covariate:}{ If the argument \code{covariate} is a pixel image (object of class \code{"im"}) or a \code{function(x,y)}, it is assumed to provide the values of a covariate that is not present in the model. Alternatively \code{covariate} can be the name of a covariate that was supplied when the model was fitted (i.e. in the call to \code{\link{ppm}}) but which does not feature in the model formula. In either case we speak of a new covariate \eqn{Z(u)}. If the fitted model intensity is \eqn{\lambda(u)}{lambda(u)} then we consider modifying this to \eqn{\lambda(u) \exp(h(Z(u)))}{lambda(u) * exp(h(Z(u)))} where \eqn{h(z)} is some function. The diagnostic plot shows an estimate of \eqn{h(z)}. \bold{Warning: in this case the diagnostic is not theoretically justified. This option is provided for research purposes.} } } Alternatively \code{covariate} can be one of the character strings \code{"x"} or \code{"y"} signifying the Cartesian coordinates. The behaviour here depends on whether the coordinate was one of the canonical covariates in the model. If there is more than one canonical covariate in the model that depends on the specified \code{covariate}, then the covariate effect is computed using all these canonical covariates. For example in a log-quadratic model which includes the terms \code{x} and \code{I(x^2)}, the quadratic effect involving both these terms will be computed. There are two choices for the algorithm. If \code{smooth.effect=TRUE}, the fitted covariate effect (according to \code{model}) is added to the point process residuals, then smoothing is applied to these values. If \code{smooth.effect=FALSE}, the point process residuals are smoothed first, and then the fitted covariate effect is added to the result. The smoothing bandwidth is controlled by the arguments \code{bw}, \code{adjust}, \code{bw.input} and \code{bw.restrict}. If \code{bw} is a numeric value, then the bandwidth is taken to be \code{adjust * bw}. If \code{bw} is a string representing a bandwidth selection rule (recognised by \code{\link[stats]{density.default}}) then the bandwidth is selected by this rule. The data used for automatic bandwidth selection are specified by \code{bw.input} and \code{bw.restrict}. If \code{bw.input="points"} (the default) then bandwidth selection is based on the covariate values at the points of the original point pattern dataset to which the model was fitted. If \code{bw.input="quad"} then bandwidth selection is based on the covariate values at every quadrature point used to fit the model. If \code{bw.restrict=TRUE} then the bandwidth selection is performed using only data from inside the \code{subregion}. } \section{Slow computation}{ In a large dataset, computation can be very slow if the default settings are used, because the smoothing bandwidth is selected automatically. To avoid this, specify a numerical value for the bandwidth \code{bw}. One strategy is to use a coarser subset of the data to select \code{bw} automatically. The selected bandwidth can be read off the print output for \code{parres}. } \value{ A function value table (object of class \code{"fv"}) containing the values of the smoothed partial residual, the estimated variance, and the fitted effect of the covariate. Also belongs to the class \code{"parres"} which has methods for \code{print} and \code{plot}. } \references{ Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2013) Residual diagnostics for covariate effects in spatial point process models. \emph{Journal of Computational and Graphical Statistics}, \bold{22}, 886--905. } \author{ \adrian, \rolf, Ya-Mei Chang and Yong Song. } \seealso{ \code{\link{addvar}}, \code{\link{rhohat}}, \code{\link{rho2hat}} } \examples{ X <- rpoispp(function(x,y){exp(3+x+2*x^2)}) model <- ppm(X ~x+y) tra <- parres(model, "x") plot(tra) tra plot(parres(model, "x", subregion=square(0.5))) model2 <- ppm(X ~x+I(x^2)+y) plot(parres(model2, "x")) Z <- setcov(owin()) plot(parres(model2, Z)) #' when the model involves only one covariate modelb <- ppm(bei ~ elev + I(elev^2), data=bei.extra) plot(parres(modelb)) } \keyword{spatial} \keyword{models} spatstat/man/edit.hyperframe.Rd0000644000176200001440000000223513333543263016254 0ustar liggesusers\name{edit.hyperframe} \alias{edit.hyperframe} \title{ Invoke Text Editor on Hyperframe } \description{ Invokes a text editor allowing the user to inspect and change entries in a hyperframe. } \usage{ \method{edit}{hyperframe}(name, \dots) } \arguments{ \item{name}{ A hyperframe (object of class \code{"hyperframe"}). } \item{\dots}{ Other arguments passed to \code{\link[utils]{edit.data.frame}}. } } \details{ The function \code{\link[utils]{edit}} is generic. This function is the methods for objects of class \code{"hyperframe"}. The hyperframe \code{name} is converted to a data frame or array, and the text editor is invoked. The user can change entries in the columns of data, and create new columns of data. Only the columns of atomic data (numbers, characters, factor values etc) can be edited. Note that the original object \code{name} is not changed; the function returns the edited dataset. } \value{ Another hyperframe. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link[utils]{edit.data.frame}}, \code{\link{edit.ppp}} } \examples{ if(interactive()) Z <- edit(flu) } \keyword{spatial} \keyword{manip} spatstat/man/is.multitype.Rd0000644000176200001440000000325713333543263015641 0ustar liggesusers\name{is.multitype} \alias{is.multitype} \title{Test whether Object is Multitype} \description{ Generic function to test whether a given object (usually a point pattern or something related to a point pattern) has ``marks'' attached to the points which classify the points into several types. } \usage{ is.multitype(X, \dots) } \arguments{ \item{X}{ Object to be inspected } \item{\dots}{ Other arguments. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is multitype. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link[spatstat.data]{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link[spatstat.data]{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. Other objects related to point patterns, such as point process models, may involve marked points. This function tests whether the object \code{X} contains or involves marked points, \bold{and} that the marks are a factor. For example, the \code{\link[spatstat.data]{amacrine}} dataset is multitype (there are two types of cells, on and off), but the \code{\link[spatstat.data]{longleaf}} dataset is \emph{not} multitype (the marks are real numbers). This function is generic; methods are provided for point patterns (objects of class \code{"ppp"}) and point process models (objects of class \code{"ppm"}). } \seealso{ \code{\link{is.multitype.ppp}}, \code{\link{is.multitype.ppm}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/layered.Rd0000644000176200001440000000564313333543263014621 0ustar liggesusers\name{layered} \alias{layered} \title{ Create List of Plotting Layers } \description{ Given several objects which are capable of being plotted, create a list containing these objects as if they were successive layers of a plot. The list can then be plotted in different ways. } \usage{ layered(..., plotargs = NULL, LayerList=NULL) } \arguments{ \item{\dots}{ Objects which can be plotted by \code{plot}. } \item{plotargs}{ Default values of the plotting arguments for each of the objects. A list of lists of arguments of the form \code{name=value}. } \item{LayerList}{ A list of objects. Incompatible with \code{\dots}. } } \details{ Layering is a simple mechanism for controlling a high-level plot that is composed of several successive plots, for example, a background and a foreground plot. The layering mechanism makes it easier to issue the plot command, to switch on or off the plotting of each individual layer, to control the plotting arguments that are passed to each layer, and to zoom in. Each individual layer in the plot should be saved as an object that can be plotted using \code{plot}. It will typically belong to some class, which has a method for the generic function \code{plot}. The command \code{layered} simply saves the objects \code{\dots} as a list of class \code{"layered"}. This list can then be plotted by the method \code{\link{plot.layered}}. Thus, you only need to type a single \code{plot} command to produce the multi-layered plot. Individual layers of the plot can be switched on or off, or manipulated, using arguments to \code{\link{plot.layered}}. The argument \code{plotargs} contains default values of the plotting arguments for each layer. It should be a list, with one entry for each object in \code{\dots}. Each entry of \code{plotargs} should be a list of arguments in the form \code{name=value}, which are recognised by the \code{plot} method for the relevant layer. The \code{plotargs} can also include an argument named \code{.plot} specifying (the name of) a function to perform the plotting instead of the generic \code{plot}. The length of \code{plotargs} should either be equal to the number of layers, or equal to 1. In the latter case it will be replicated to the appropriate length. } \value{ A list, belonging to the class \code{"layered"}. There are methods for \code{plot}, \code{"["}, \code{"shift"}, \code{"affine"}, \code{"rotate"} and \code{"rescale"}. } \author{\adrian and \rolf } \seealso{ \code{\link{plot.layered}}, \code{\link{methods.layered}}, \code{\link{as.layered}}, \code{\link{[.layered}}, \code{\link{layerplotargs}}. } \examples{ D <- distmap(cells) L <- layered(D, cells) L L <- layered(D, cells, plotargs=list(list(ribbon=FALSE), list(pch=16))) plot(L) layerplotargs(L)[[1]] <- list(.plot="contour") plot(L) } \keyword{spatial} \keyword{hplot} spatstat/man/GmultiInhom.Rd0000644000176200001440000000575213551474410015430 0ustar liggesusers\name{GmultiInhom} \alias{GmultiInhom} \title{ Inhomogeneous Marked G-Function } \description{ For a marked point pattern, estimate the inhomogeneous version of the multitype \eqn{G} function, effectively the cumulative distribution function of the distance from a point in subset \eqn{I} to the nearest point in subset \eqn{J}, adjusted for spatially varying intensity. } \usage{ GmultiInhom(X, I, J, lambda = NULL, lambdaI = NULL, lambdaJ = NULL, lambdamin = NULL, \dots, r = NULL, ReferenceMeasureMarkSetI = NULL, ratio = FALSE) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}. } \item{I}{ A subset index specifying the subset of points \emph{from} which distances are measured. Any kind of subset index acceptable to \code{\link{[.ppp}}. } \item{J}{ A subset index specifying the subset of points \emph{to} which distances are measured. Any kind of subset index acceptable to \code{\link{[.ppp}}. } \item{lambda}{ Intensity estimates for each point of \code{X}. A numeric vector of length equal to \code{npoints(X)}. Incompatible with \code{lambdaI,lambdaJ}. } \item{lambdaI}{ Intensity estimates for each point of \code{X[I]}. A numeric vector of length equal to \code{npoints(X[I])}. Incompatible with \code{lambda}. } \item{lambdaJ}{ Intensity estimates for each point of \code{X[J]}. A numeric vector of length equal to \code{npoints(X[J])}. Incompatible with \code{lambda}. } \item{lambdamin}{ A lower bound for the intensity, or at least a lower bound for the values in \code{lambdaJ} or \code{lambda[J]}. } \item{\dots}{ Ignored. } \item{r}{ Vector of distance values at which the inhomogeneous \eqn{G} function should be estimated. There is a sensible default. } \item{ReferenceMeasureMarkSetI}{ Optional. The total measure of the mark set. A positive number. } \item{ratio}{ Logical value indicating whether to save ratio information. } } \details{ See Cronie and Van Lieshout (2015). } \value{ Object of class \code{"fv"} containing the estimate of the inhomogeneous multitype \eqn{G} function. } \references{ Cronie, O. and Van Lieshout, M.N.M. (2015) Summary statistics for inhomogeneous marked point processes. \emph{Annals of the Institute of Statistical Mathematics} DOI: 10.1007/s10463-015-0515-z } \author{ Ottmar Cronie and Marie-Colette van Lieshout. Rewritten for \pkg{spatstat} by \adrian. } \seealso{ \code{\link{Ginhom}}, \code{\link{Gmulti}} } \examples{ X <- rescale(amacrine) I <- (marks(X) == "on") J <- (marks(X) == "off") mod <- ppm(X ~ marks * x) lam <- fitted(mod, dataonly=TRUE) lmin <- min(predict(mod)[["off"]]) * 0.9 plot(GmultiInhom(X, I, J, lambda=lam, lambdamin=lmin)) # equivalent plot(GmultiInhom(X, I, J, lambdaI=lam[I], lambdaJ=lam[J], lambdamin=lmin), main="") } \keyword{spatial} \keyword{nonparametric} spatstat/man/rmh.default.Rd0000644000176200001440000006612213615444537015413 0ustar liggesusers\name{rmh.default} \alias{rmh.default} \title{Simulate Point Process Models using the Metropolis-Hastings Algorithm.} \description{ Generates a random point pattern, simulated from a chosen point process model, using the Metropolis-Hastings algorithm. } \usage{ \method{rmh}{default}(model, start=NULL, control=default.rmhcontrol(model), \dots, nsim=1, drop=TRUE, saveinfo=TRUE, verbose=TRUE, snoop=FALSE) } \arguments{ \item{model}{Data specifying the point process model that is to be simulated. } \item{start}{Data determining the initial state of the algorithm. } \item{control}{Data controlling the iterative behaviour and termination of the algorithm. } \item{\dots}{ Further arguments passed to \code{\link{rmhcontrol}} or to trend functions in \code{model}. } \item{nsim}{ Number of simulated point patterns that should be generated. } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a single point pattern. } \item{saveinfo}{ Logical value indicating whether to save auxiliary information. } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{snoop}{ Logical. If \code{TRUE}, activate the visual debugger. } } \value{ A point pattern (an object of class \code{"ppp"}, see \code{\link{ppp.object}}) or a list of point patterns. The returned value has an attribute \code{info} containing modified versions of the arguments \code{model}, \code{start}, and \code{control} which together specify the exact simulation procedure. The \code{info} attribute can be printed (and is printed automatically by \code{\link{summary.ppp}}). For computational efficiency, the \code{info} attribute can be omitted by setting \code{saveinfo=FALSE}. The value of \code{\link[base:Random]{.Random.seed}} at the start of the simulations is also saved and returned as an attribute \code{seed}. If the argument \code{track=TRUE} was given (see \code{\link{rmhcontrol}}), the transition history of the algorithm is saved, and returned as an attribute \code{history}. The transition history is a data frame containing a factor \code{proposaltype} identifying the proposal type (Birth, Death or Shift) and a logical vector \code{accepted} indicating whether the proposal was accepted. The data frame also has columns \code{numerator}, \code{denominator} which give the numerator and denominator of the Hastings ratio for the proposal. If the argument \code{nsave} was given (see \code{\link{rmhcontrol}}), the return value has an attribute \code{saved} which is a list of point patterns, containing the intermediate states of the algorithm. } \details{ This function generates simulated realisations from any of a range of spatial point processes, using the Metropolis-Hastings algorithm. It is the default method for the generic function \code{\link{rmh}}. This function executes a Metropolis-Hastings algorithm with birth, death and shift proposals as described in Geyer and \Moller (1994). The argument \code{model} specifies the point process model to be simulated. It is either a list, or an object of class \code{"rmhmodel"}, with the following components: \describe{ \item{cif}{A character string specifying the choice of interpoint interaction for the point process. } \item{par}{ Parameter values for the conditional intensity function. } \item{w}{ (Optional) window in which the pattern is to be generated. An object of class \code{"owin"}, or data acceptable to \code{\link{as.owin}}. } \item{trend}{ Data specifying the spatial trend in the model, if it has a trend. This may be a function, a pixel image (of class \code{"im"}), (or a list of functions or images if the model is multitype). If the trend is a function or functions, any auxiliary arguments \code{...} to \code{rmh.default} will be passed to these functions, which should be of the form \code{function(x, y, ...)}. } \item{types}{ List of possible types, for a multitype point process. } } For full details of these parameters, see \code{\link{rmhmodel.default}}. The argument \code{start} determines the initial state of the Metropolis-Hastings algorithm. It is either \code{NULL}, or an object of class \code{"rmhstart"}, or a list with the following components: \describe{ \item{n.start}{ Number of points in the initial point pattern. A single integer, or a vector of integers giving the numbers of points of each type in a multitype point pattern. Incompatible with \code{x.start}. } \item{x.start}{ Initial point pattern configuration. Incompatible with \code{n.start}. \code{x.start} may be a point pattern (an object of class \code{"ppp"}), or data which can be coerced to this class by \code{\link{as.ppp}}, or an object with components \code{x} and \code{y}, or a two-column matrix. In the last two cases, the window for the pattern is determined by \code{model$w}. In the first two cases, if \code{model$w} is also present, then the final simulated pattern will be clipped to the window \code{model$w}. } } For full details of these parameters, see \code{\link{rmhstart}}. The third argument \code{control} controls the simulation procedure (including \emph{conditional simulation}), iterative behaviour, and termination of the Metropolis-Hastings algorithm. It is either \code{NULL}, or a list, or an object of class \code{"rmhcontrol"}, with components: \describe{ \item{p}{The probability of proposing a ``shift'' (as opposed to a birth or death) in the Metropolis-Hastings algorithm. } \item{q}{The conditional probability of proposing a death (rather than a birth) given that birth/death has been chosen over shift. } \item{nrep}{The number of repetitions or iterations to be made by the Metropolis-Hastings algorithm. It should be large. } \item{expand}{ Either a numerical expansion factor, or a window (object of class \code{"owin"}). Indicates that the process is to be simulated on a larger domain than the original data window \code{w}, then clipped to \code{w} when the algorithm has finished. The default is to expand the simulation window if the model is stationary and non-Poisson (i.e. it has no trend and the interaction is not Poisson) and not to expand in all other cases. If the model has a trend, then in order for expansion to be feasible, the trend must be given either as a function, or an image whose bounding box is large enough to contain the expanded window. } \item{periodic}{A logical scalar; if \code{periodic} is \code{TRUE} we simulate a process on the torus formed by identifying opposite edges of a rectangular window. } \item{ptypes}{A vector of probabilities (summing to 1) to be used in assigning a random type to a new point. } \item{fixall}{A logical scalar specifying whether to condition on the number of points of each type. } \item{nverb}{An integer specifying how often ``progress reports'' (which consist simply of the number of repetitions completed) should be printed out. If nverb is left at 0, the default, the simulation proceeds silently. } \item{x.cond}{If this argument is present, then \emph{conditional simulation} will be performed, and \code{x.cond} specifies the conditioning points and the type of conditioning. } \item{nsave,nburn}{ If these values are specified, then intermediate states of the simulation algorithm will be saved every \code{nsave} iterations, after an initial burn-in period of \code{nburn} iterations. } \item{track}{ Logical flag indicating whether to save the transition history of the simulations. } } For full details of these parameters, see \code{\link{rmhcontrol}}. The control parameters can also be given in the \code{\dots} arguments. } \section{Conditional Simulation}{ There are several kinds of conditional simulation. \itemize{ \item Simulation \emph{conditional upon the number of points}, that is, holding the number of points fixed. To do this, set \code{control$p} (the probability of a shift) equal to 1. The number of points is then determined by the starting state, which may be specified either by setting \code{start$n.start} to be a scalar, or by setting the initial pattern \code{start$x.start}. \item In the case of multitype processes, it is possible to simulate the model \emph{conditionally upon the number of points of each type}, i.e. holding the number of points of each type to be fixed. To do this, set \code{control$p} equal to 1 and \code{control$fixall} to be \code{TRUE}. The number of points is then determined by the starting state, which may be specified either by setting \code{start$n.start} to be an integer vector, or by setting the initial pattern \code{start$x.start}. \item Simulation \emph{conditional on the configuration observed in a sub-window}, that is, requiring that, inside a specified sub-window \eqn{V}, the simulated pattern should agree with a specified point pattern \eqn{y}.To do this, set \code{control$x.cond} to equal the specified point pattern \eqn{y}, making sure that it is an object of class \code{"ppp"} and that the window \code{Window(control$x.cond)} is the conditioning window \eqn{V}. \item Simulation \emph{conditional on the presence of specified points}, that is, requiring that the simulated pattern should include a specified set of points. This is simulation from the Palm distribution of the point process given a pattern \eqn{y}. To do this, set \code{control$x.cond} to be a \code{data.frame} containing the coordinates (and marks, if appropriate) of the specified points. } For further information, see \code{\link{rmhcontrol}}. Note that, when we simulate conditionally on the number of points, or conditionally on the number of points of each type, no expansion of the window is possible. } \section{Visual Debugger}{ If \code{snoop = TRUE}, an interactive debugger is activated. On the current plot device, the debugger displays the current state of the Metropolis-Hastings algorithm together with the proposed transition to the next state. Clicking on this graphical display (using the left mouse button) will re-centre the display at the clicked location. Surrounding this graphical display is an array of boxes representing different actions. Clicking on one of the action boxes (using the left mouse button) will cause the action to be performed. Debugger actions include: \itemize{ \item Zooming in or out \item Panning (shifting the field of view) left, right, up or down \item Jumping to the next iteration \item Skipping 10, 100, 1000, 10000 or 100000 iterations \item Jumping to the next Birth proposal (etc) \item Changing the fate of the proposal (i.e. changing whether the proposal is accepted or rejected) \item Dumping the current state and proposal to a file \item Printing detailed information at the terminal \item Exiting the debugger (so that the simulation algorithm continues without further interruption). } Right-clicking the mouse will also cause the debugger to exit. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283 -- 322. Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. Geyer, C.J. and \Moller, J. (1994) Simulation procedures and likelihood inference for spatial point processes. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \section{Warnings}{ There is never a guarantee that the Metropolis-Hastings algorithm has converged to its limiting distribution. If \code{start$x.start} is specified then \code{expand} is set equal to 1 and simulation takes place in \code{Window(x.start)}. Any specified value for \code{expand} is simply ignored. The presence of both a component \code{w} of \code{model} and a non-null value for \code{Window(x.start)} makes sense ONLY if \code{w} is contained in \code{Window(x.start)}. For multitype processes make sure that, even if there is to be no trend corresponding to a particular type, there is still a component (a NULL component) for that type, in the list. } \seealso{ \code{\link{rmh}}, \code{\link{rmh.ppm}}, \code{\link{rStrauss}}, \code{\link{ppp}}, \code{\link{ppm}}, \code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{LennardJones}}, \code{\link{MultiHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Penttinen}}, \code{\link{Poisson}}, \code{\link{Softcore}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{Triplets}} } \section{Other models}{ In theory, any finite point process model can be simulated using the Metropolis-Hastings algorithm, provided the conditional intensity is uniformly bounded. In practice, the list of point process models that can be simulated using \code{rmh.default} is limited to those that have been implemented in the package's internal C code. More options will be added in the future. Note that the \code{lookup} conditional intensity function permits the simulation (in theory, to any desired degree of approximation) of any pairwise interaction process for which the interaction depends only on the distance between the pair of points. } \section{Reproducible simulations}{ If the user wants the simulation to be exactly reproducible (e.g. for a figure in a journal article, where it is useful to have the figure consistent from draft to draft) then the state of the random number generator should be set before calling \code{rmh.default}. This can be done either by calling \code{\link[base:Random]{set.seed}} or by assigning a value to \code{\link[base:Random]{.Random.seed}}. In the examples below, we use \code{\link[base:Random]{set.seed}}. If a simulation has been performed and the user now wants to repeat it exactly, the random seed should be extracted from the simulated point pattern \code{X} by \code{seed <- attr(x, "seed")}, then assigned to the system random nunber state by \code{.Random.seed <- seed} before calling \code{rmh.default}. } \examples{ if(interactive()) { nr <- 1e5 nv <- 5000 ns <- 200 } else { nr <- 20 nv <- 5 ns <- 20 oldopt <- spatstat.options() spatstat.options(expand=1.05) } set.seed(961018) # Strauss process. mod01 <- list(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) X1.strauss <- rmh(model=mod01,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X1.strauss) # Strauss process, conditioning on n = 42: X2.strauss <- rmh(model=mod01,start=list(n.start=42), control=list(p=1,nrep=nr,nverb=nv)) # Tracking algorithm progress: # (a) saving intermediate states: X <- rmh(model=mod01,start=list(n.start=ns), control=list(nrep=nr, nsave=nr/5, nburn=nr/2)) Saved <- attr(X, "saved") plot(Saved) # (b) inspecting transition history: X <- rmh(model=mod01,start=list(n.start=ns), control=list(nrep=nr, track=TRUE)) History <- attr(X, "history") head(History) # Hard core process: mod02 <- list(cif="hardcore",par=list(beta=2,hc=0.7),w=c(0,10,0,10)) X3.hardcore <- rmh(model=mod02,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X3.hardcore) # Strauss process equal to pure hardcore: mod02s <- list(cif="strauss",par=list(beta=2,gamma=0,r=0.7),w=c(0,10,0,10)) X3.strauss <- rmh(model=mod02s,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Strauss process in a polygonal window. x <- c(0.55,0.68,0.75,0.58,0.39,0.37,0.19,0.26,0.42) y <- c(0.20,0.27,0.68,0.99,0.80,0.61,0.45,0.28,0.33) mod03 <- list(cif="strauss",par=list(beta=2000,gamma=0.6,r=0.07), w=owin(poly=list(x=x,y=y))) X4.strauss <- rmh(model=mod03,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X4.strauss) # Strauss process in a polygonal window, conditioning on n = 80. X5.strauss <- rmh(model=mod03,start=list(n.start=ns), control=list(p=1,nrep=nr,nverb=nv)) # Strauss process, starting off from X4.strauss, but with the # polygonal window replace by a rectangular one. At the end, # the generated pattern is clipped to the original polygonal window. xxx <- X4.strauss Window(xxx) <- as.owin(c(0,1,0,1)) X6.strauss <- rmh(model=mod03,start=list(x.start=xxx), control=list(nrep=nr,nverb=nv)) # Strauss with hardcore: mod04 <- list(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3), w=c(0,10,0,10)) X1.straush <- rmh(model=mod04,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Another Strauss with hardcore (with a perhaps surprising result): mod05 <- list(cif="straush",par=list(beta=80,gamma=0.36,r=45,hc=2.5), w=c(0,250,0,250)) X2.straush <- rmh(model=mod05,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Pure hardcore (identical to X3.strauss). mod06 <- list(cif="straush",par=list(beta=2,gamma=1,r=1,hc=0.7), w=c(0,10,0,10)) X3.straush <- rmh(model=mod06,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Soft core: w <- c(0,10,0,10) mod07 <- list(cif="sftcr",par=list(beta=0.8,sigma=0.1,kappa=0.5), w=c(0,10,0,10)) X.sftcr <- rmh(model=mod07,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.sftcr) # Area-interaction process: mod42 <- rmhmodel(cif="areaint",par=list(beta=2,eta=1.6,r=0.7), w=c(0,10,0,10)) X.area <- rmh(model=mod42,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.area) # Triplets process modtrip <- list(cif="triplets",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) X.triplets <- rmh(model=modtrip, start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.triplets) # Multitype Strauss: beta <- c(0.027,0.008) gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2) r <- matrix(c(45,45,45,45),2,2) mod08 <- list(cif="straussm",par=list(beta=beta,gamma=gmma,radii=r), w=c(0,250,0,250)) X1.straussm <- rmh(model=mod08,start=list(n.start=ns), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv)) if(interactive()) plot(X1.straussm) # Multitype Strauss conditioning upon the total number # of points being 80: X2.straussm <- rmh(model=mod08,start=list(n.start=ns), control=list(p=1,ptypes=c(0.75,0.25),nrep=nr, nverb=nv)) # Conditioning upon the number of points of type 1 being 60 # and the number of points of type 2 being 20: X3.straussm <- rmh(model=mod08,start=list(n.start=c(60,20)), control=list(fixall=TRUE,p=1,ptypes=c(0.75,0.25), nrep=nr,nverb=nv)) # Multitype Strauss hardcore: rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2) mod09 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=c(0,250,0,250)) X.straushm <- rmh(model=mod09,start=list(n.start=ns), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv)) # Multitype Strauss hardcore with trends for each type: beta <- c(0.27,0.08) tr3 <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend tr4 <- function(x,y){x <- x/250; y <- y/250; exp(-0.6*x+0.5*y)} # log linear trend mod10 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=c(0,250,0,250), trend=list(tr3,tr4)) X1.straushm.trend <- rmh(model=mod10,start=list(n.start=ns), control=list(ptypes=c(0.75,0.25), nrep=nr,nverb=nv)) if(interactive()) plot(X1.straushm.trend) # Multitype Strauss hardcore with trends for each type, given as images: bigwin <- square(250) i1 <- as.im(tr3, bigwin) i2 <- as.im(tr4, bigwin) mod11 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=bigwin, trend=list(i1,i2)) X2.straushm.trend <- rmh(model=mod11,start=list(n.start=ns), control=list(ptypes=c(0.75,0.25),expand=1, nrep=nr,nverb=nv)) # Diggle, Gates, and Stibbard: mod12 <- list(cif="dgs",par=list(beta=3600,rho=0.08),w=c(0,1,0,1)) X.dgs <- rmh(model=mod12,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.dgs) # Diggle-Gratton: mod13 <- list(cif="diggra", par=list(beta=1800,kappa=3,delta=0.02,rho=0.04), w=square(1)) X.diggra <- rmh(model=mod13,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.diggra) # Fiksel: modFik <- list(cif="fiksel", par=list(beta=180,r=0.15,hc=0.07,kappa=2,a= -1.0), w=square(1)) X.fiksel <- rmh(model=modFik,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.fiksel) # Geyer: mod14 <- list(cif="geyer",par=list(beta=1.25,gamma=1.6,r=0.2,sat=4.5), w=c(0,10,0,10)) X1.geyer <- rmh(model=mod14,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X1.geyer) # Geyer; same as a Strauss process with parameters # (beta=2.25,gamma=0.16,r=0.7): mod15 <- list(cif="geyer",par=list(beta=2.25,gamma=0.4,r=0.7,sat=10000), w=c(0,10,0,10)) X2.geyer <- rmh(model=mod15,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) mod16 <- list(cif="geyer",par=list(beta=8.1,gamma=2.2,r=0.08,sat=3)) data(redwood) X3.geyer <- rmh(model=mod16,start=list(x.start=redwood), control=list(periodic=TRUE,nrep=nr,nverb=nv)) # Geyer, starting from the redwood data set, simulating # on a torus, and conditioning on n: X4.geyer <- rmh(model=mod16,start=list(x.start=redwood), control=list(p=1,periodic=TRUE,nrep=nr,nverb=nv)) # Lookup (interaction function h_2 from page 76, Diggle (2003)): r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0. h <- 20*(r-0.05) h[r<0.05] <- 0 h[r>0.10] <- 1 mod17 <- list(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1)) X.lookup <- rmh(model=mod17,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.lookup) # Strauss with trend tr <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } beta <- 0.3 gmma <- 0.5 r <- 45 modStr <- list(cif="strauss",par=list(beta=beta,gamma=gmma,r=r), w=square(250), trend=tr) X1.strauss.trend <- rmh(model=modStr,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Baddeley-Geyer r <- seq(0,0.2,length=8)[-1] gmma <- c(0.5,0.6,0.7,0.8,0.7,0.6,0.5) mod18 <- list(cif="badgey",par=list(beta=4000, gamma=gmma,r=r,sat=5), w=square(1)) X1.badgey <- rmh(model=mod18,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) mod19 <- list(cif="badgey", par=list(beta=4000, gamma=gmma,r=r,sat=1e4), w=square(1)) set.seed(1329) X2.badgey <- rmh(model=mod18,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Check: h <- ((prod(gmma)/cumprod(c(1,gmma)))[-8])^2 hs <- stepfun(r,c(h,1)) mod20 <- list(cif="lookup",par=list(beta=4000,h=hs),w=square(1)) set.seed(1329) X.check <- rmh(model=mod20,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # X2.badgey and X.check will be identical. mod21 <- list(cif="badgey",par=list(beta=300,gamma=c(1,0.4,1), r=c(0.035,0.07,0.14),sat=5), w=square(1)) X3.badgey <- rmh(model=mod21,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Same result as Geyer model with beta=300, gamma=0.4, r=0.07, # sat = 5 (if seeds and control parameters are the same) # Or more simply: mod22 <- list(cif="badgey", par=list(beta=300,gamma=0.4,r=0.07, sat=5), w=square(1)) X4.badgey <- rmh(model=mod22,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Same again --- i.e. the BadGey model includes the Geyer model. # Illustrating scalability. \dontrun{ M1 <- rmhmodel(cif="strauss",par=list(beta=60,gamma=0.5,r=0.04),w=owin()) set.seed(496) X1 <- rmh(model=M1,start=list(n.start=300)) M2 <- rmhmodel(cif="strauss",par=list(beta=0.6,gamma=0.5,r=0.4), w=owin(c(0,10),c(0,10))) set.seed(496) X2 <- rmh(model=M2,start=list(n.start=300)) chk <- affine(X1,mat=diag(c(10,10))) all.equal(chk,X2,check.attributes=FALSE) # Under the default spatstat options the foregoing all.equal() # will yield TRUE. Setting spatstat.options(scalable=FALSE) and # re-running the code will reveal differences between X1 and X2. } if(!interactive()) spatstat.options(oldopt) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/rshift.Rd0000644000176200001440000000300713333543264014464 0ustar liggesusers\name{rshift} \alias{rshift} \title{Random Shift} \description{ Randomly shifts the points of a point pattern or line segment pattern. Generic. } \usage{ rshift(X, \dots) } \arguments{ \item{X}{Pattern to be subjected to a random shift. A point pattern (class \code{"ppp"}), a line segment pattern (class \code{"psp"}) or an object of class \code{"splitppp"}. } \item{\dots}{ Arguments controlling the generation of the random shift vector, or specifying which parts of the pattern will be shifted. } } \value{ An object of the same type as \code{X}. } \details{ This operation applies a random shift (vector displacement) to the points in a point pattern, or to the segments in a line segment pattern. The argument \code{X} may be \itemize{ \item a point pattern (an object of class \code{"ppp"}) \item a line segment pattern (an object of class \code{"psp"}) \item an object of class \code{"splitppp"} (basically a list of point patterns, obtained from \code{\link{split.ppp}}). } The function \code{rshift} is generic, with methods for the three classes \code{"ppp"}, \code{"psp"} and \code{"splitppp"}. See the help pages for these methods, \code{\link{rshift.ppp}}, \code{\link{rshift.psp}} and \code{\link{rshift.splitppp}}, for further information. } \seealso{ \code{\link{rshift.ppp}}, \code{\link{rshift.psp}}, \code{\link{rshift.splitppp}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/scan.test.Rd0000644000176200001440000001260113333543264015067 0ustar liggesusers\name{scan.test} \alias{scan.test} \title{ Spatial Scan Test } \description{ Performs the Spatial Scan Test for clustering in a spatial point pattern, or for clustering of one type of point in a bivariate spatial point pattern. } \usage{ scan.test(X, r, ..., method = c("poisson", "binomial"), nsim = 19, baseline = NULL, case = 2, alternative = c("greater", "less", "two.sided"), verbose = TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{r}{ Radius of circle to use. A single number or a numeric vector. } \item{\dots}{ Optional. Arguments passed to \code{\link{as.mask}} to determine the spatial resolution of the computations. } \item{method}{ Either \code{"poisson"} or \code{"binomial"} specifying the type of likelihood. } \item{nsim}{ Number of simulations for computing Monte Carlo p-value. } \item{baseline}{ Baseline for the Poisson intensity, if \code{method="poisson"}. A pixel image or a function. } \item{case}{ Which type of point should be interpreted as a case, if \code{method="binomial"}. Integer or character string. } \item{alternative}{ Alternative hypothesis: \code{"greater"} if the alternative postulates that the mean number of points inside the circle will be greater than expected under the null. } \item{verbose}{ Logical. Whether to print progress reports. } } \details{ The spatial scan test (Kulldorf, 1997) is applied to the point pattern \code{X}. In a nutshell, \itemize{ \item If \code{method="poisson"} then a significant result would mean that there is a circle of radius \code{r}, located somewhere in the spatial domain of the data, which contains a significantly higher than expected number of points of \code{X}. That is, the pattern \code{X} exhibits spatial clustering. \item If \code{method="binomial"} then \code{X} must be a bivariate (two-type) point pattern. By default, the first type of point is interpreted as a control (non-event) and the second type of point as a case (event). A significant result would mean that there is a circle of radius \code{r} which contains a significantly higher than expected number of cases. That is, the cases are clustered together, conditional on the locations of all points. } Following is a more detailed explanation. \itemize{ \item If \code{method="poisson"} then the scan test based on Poisson likelihood is performed (Kulldorf, 1997). The dataset \code{X} is treated as an unmarked point pattern. By default (if \code{baseline} is not specified) the null hypothesis is complete spatial randomness CSR (i.e. a uniform Poisson process). The alternative hypothesis is a Poisson process with one intensity \eqn{\beta_1}{beta1} inside some circle of radius \code{r} and another intensity \eqn{\beta_0}{beta0} outside the circle. If \code{baseline} is given, then it should be a pixel image or a \code{function(x,y)}. The null hypothesis is an inhomogeneous Poisson process with intensity proportional to \code{baseline}. The alternative hypothesis is an inhomogeneous Poisson process with intensity \code{beta1 * baseline} inside some circle of radius \code{r}, and \code{beta0 * baseline} outside the circle. \item If \code{method="binomial"} then the scan test based on binomial likelihood is performed (Kulldorf, 1997). The dataset \code{X} must be a bivariate point pattern, i.e. a multitype point pattern with two types. The null hypothesis is that all permutations of the type labels are equally likely. The alternative hypothesis is that some circle of radius \code{r} has a higher proportion of points of the second type, than expected under the null hypothesis. } The result of \code{scan.test} is a hypothesis test (object of class \code{"htest"}) which can be plotted to report the results. The component \code{p.value} contains the \eqn{p}-value. The result of \code{scan.test} can also be plotted (using the plot method for the class \code{"scan.test"}). The plot is a pixel image of the Likelihood Ratio Test Statistic (2 times the log likelihood ratio) as a function of the location of the centre of the circle. This pixel image can be extracted from the object using \code{\link{as.im.scan.test}}. The Likelihood Ratio Test Statistic is computed by \code{\link{scanLRTS}}. } \value{ An object of class \code{"htest"} (hypothesis test) which also belongs to the class \code{"scan.test"}. Printing this object gives the result of the test. Plotting this object displays the Likelihood Ratio Test Statistic as a function of the location of the centre of the circle. } \references{ Kulldorff, M. (1997) A spatial scan statistic. \emph{Communications in Statistics --- Theory and Methods} \bold{26}, 1481--1496. } \author{\adrian and \rolf } \seealso{ \code{\link{plot.scan.test}}, \code{\link{as.im.scan.test}}, \code{\link{relrisk}}, \code{\link{scanLRTS}} } \examples{ nsim <- if(interactive()) 19 else 2 rr <- if(interactive()) seq(0.5, 1, by=0.1) else c(0.5, 1) scan.test(redwood, 0.1 * rr, method="poisson", nsim=nsim) scan.test(chorley, rr, method="binomial", case="larynx", nsim=nsim) } \keyword{htest} \keyword{spatial} spatstat/man/discs.Rd0000644000176200001440000000641313333543263014275 0ustar liggesusers\name{discs} \alias{discs} \title{ Union of Discs } \description{ Make a spatial region composed of discs with given centres and radii. } \usage{ discs(centres, radii = marks(centres)/2, \dots, separate = FALSE, mask = FALSE, trim = TRUE, delta = NULL, npoly=NULL) } \arguments{ \item{centres}{ Point pattern giving the locations of centres for the discs. } \item{radii}{ Vector of radii for each disc, or a single number giving a common radius. (Notice that the default assumes that the marks of \code{X} are \emph{diameters}.) } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} to determine the pixel resolution, if \code{mask=TRUE}. } \item{separate}{ Logical. If \code{TRUE}, the result is a list containing each disc as a separate entry. If \code{FALSE} (the default), the result is a window obtained by forming the union of the discs. } \item{mask}{ Logical. If \code{TRUE}, the result is a binary mask window. If \code{FALSE}, the result is a polygonal window. Applies only when \code{separate=FALSE}. } \item{trim}{ Logical value indicating whether to restrict the result to the original window of the \code{centres}. Applies only when \code{separate=FALSE}. } \item{delta}{ Argument passed to \code{\link{disc}} to determine the tolerance for the polygonal approximation of each disc. Applies only when \code{mask=FALSE}. Incompatible with \code{npoly}. } \item{npoly}{ Argument passed to \code{\link{disc}} to determine the number of edges in the polygonal approximation of each disc. Applies only when \code{mask=FALSE}. Incompatible with \code{delta}. } } \details{ This command is typically applied to a marked point pattern dataset \code{X} in which the marks represent the sizes of objects. The result is a spatial region representing the space occupied by the objects. If the marks of \code{X} represent the diameters of circular objects, then the result of \code{discs(X)} is a spatial region constructed by taking discs, of the specified diameters, centred at the points of \code{X}, and forming the union of these discs. If the marks of \code{X} represent the areas of objects, one could take \code{discs(X, sqrt(marks(X)/pi))} to produce discs of equivalent area. A fast algorithm is used to compute the result as a binary mask, when \code{mask=TRUE}. This option is recommended unless polygons are really necessary. If \code{mask=FALSE}, the discs will be constructed as polygons by the function \code{\link{disc}}. To avoid computational problems, by default, the discs will all be constructed using the same physical tolerance value \code{delta} passed to \code{\link{disc}}. The default is such that the smallest disc will be approximated by a 16-sided polygon. (The argument \code{npoly} should not normally be used, to avoid computational problems arising with small radii.) } \value{ If \code{separate=FALSE}, a window (object of class \code{"owin"}). If \code{separate=TRUE}, a list of windows. } \author{ \spatstatAuthors. } \seealso{ \code{\link{disc}}, \code{\link{union.owin}} } \examples{ plot(discs(anemones, mask=TRUE, eps=0.5)) } \keyword{spatial} \keyword{datagen} spatstat/man/opening.Rd0000644000176200001440000000467213333543263014634 0ustar liggesusers\name{opening} \alias{opening} \alias{opening.owin} \alias{opening.psp} \alias{opening.ppp} \title{Morphological Opening} \description{ Perform morphological opening of a window, a line segment pattern or a point pattern. } \usage{ opening(w, r, \dots) \method{opening}{owin}(w, r, \dots, polygonal=NULL) \method{opening}{ppp}(w, r, \dots) \method{opening}{psp}(w, r, \dots) } \arguments{ \item{w}{ A window (object of class \code{"owin"} or a line segment pattern (object of class \code{"psp"}) or a point pattern (object of class \code{"ppp"}). } \item{r}{positive number: the radius of the opening.} \item{\dots}{ extra arguments passed to \code{\link{as.mask}} controlling the pixel resolution, if a pixel approximation is used } \item{polygonal}{ Logical flag indicating whether to compute a polygonal approximation to the erosion (\code{polygonal=TRUE}) or a pixel grid approximation (\code{polygonal=FALSE}). } } \value{ If \code{r > 0}, an object of class \code{"owin"} representing the opened region. If \code{r=0}, the result is identical to \code{w}. } \details{ The morphological opening (Serra, 1982) of a set \eqn{W} by a distance \eqn{r > 0} is the subset of points in \eqn{W} that can be separated from the boundary of \eqn{W} by a circle of radius \eqn{r}. That is, a point \eqn{x} belongs to the opening if it is possible to draw a circle of radius \eqn{r} (not necessarily centred on \eqn{x}) that has \eqn{x} on the inside and the boundary of \eqn{W} on the outside. The opened set is a subset of \code{W}. For a small radius \eqn{r}, the opening operation has the effect of smoothing out irregularities in the boundary of \eqn{W}. For larger radii, the opening operation removes promontories in the boundary. For very large radii, the opened set is empty. The algorithm applies \code{\link{erosion}} followed by \code{\link{dilation}}. } \seealso{ \code{\link{closing}} for the opposite operation. \code{\link{dilation}}, \code{\link{erosion}} for the basic operations. \code{\link{owin}}, \code{\link{as.owin}} for information about windows. } \examples{ v <- opening(letterR, 0.3) plot(letterR, type="n", main="opening") plot(v, add=TRUE, col="grey") plot(letterR, add=TRUE) } \references{ Serra, J. (1982) Image analysis and mathematical morphology. Academic Press. } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/berman.test.Rd0000644000176200001440000001553013333543262015411 0ustar liggesusers\name{berman.test} \alias{berman.test} \alias{berman.test.ppm} \alias{berman.test.ppp} \alias{berman.test.lppm} \alias{berman.test.lpp} \title{Berman's Tests for Point Process Model} \description{ Tests the goodness-of-fit of a Poisson point process model using methods of Berman (1986). } \usage{ berman.test(...) \method{berman.test}{ppp}(X, covariate, which = c("Z1", "Z2"), alternative = c("two.sided", "less", "greater"), ...) \method{berman.test}{ppm}(model, covariate, which = c("Z1", "Z2"), alternative = c("two.sided", "less", "greater"), ...) \method{berman.test}{lpp}(X, covariate, which = c("Z1", "Z2"), alternative = c("two.sided", "less", "greater"), ...) \method{berman.test}{lppm}(model, covariate, which = c("Z1", "Z2"), alternative = c("two.sided", "less", "greater"), ...) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"}). } \item{model}{ A fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{covariate}{ The spatial covariate on which the test will be based. An image (object of class \code{"im"}) or a function. } \item{which}{ Character string specifying the choice of test. } \item{alternative}{ Character string specifying the alternative hypothesis. } \item{\dots}{ Additional arguments controlling the pixel resolution (arguments \code{dimyx} and \code{eps} passed to \code{\link{as.mask}}) or other undocumented features. } } \details{ These functions perform a goodness-of-fit test of a Poisson point process model fitted to point pattern data. The observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same values under the model, are compared using either of two test statistics \eqn{Z_1}{Z[1]} and \eqn{Z_2}{Z[2]} proposed by Berman (1986). The \eqn{Z_1}{Z[1]} test is also known as the Lawson-Waller test. The function \code{berman.test} is generic, with methods for point patterns (\code{"ppp"} or \code{"lpp"}) and point process models (\code{"ppm"} or \code{"lppm"}). \itemize{ \item If \code{X} is a point pattern dataset (object of class \code{"ppp"} or \code{"lpp"}), then \code{berman.test(X, ...)} performs a goodness-of-fit test of the uniform Poisson point process (Complete Spatial Randomness, CSR) for this dataset. \item If \code{model} is a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}) then \code{berman.test(model, ...)} performs a test of goodness-of-fit for this fitted model. In this case, \code{model} should be a Poisson point process. } The test is performed by comparing the observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same covariate under the model. Thus, you must nominate a spatial covariate for this test. The argument \code{covariate} should be either a \code{function(x,y)} or a pixel image (object of class \code{"im"} containing the values of a spatial function. If \code{covariate} is an image, it should have numeric values, and its domain should cover the observation window of the \code{model}. If \code{covariate} is a function, it should expect two arguments \code{x} and \code{y} which are vectors of coordinates, and it should return a numeric vector of the same length as \code{x} and \code{y}. First the original data point pattern is extracted from \code{model}. The values of the \code{covariate} at these data points are collected. Next the values of the \code{covariate} at all locations in the observation window are evaluated. The point process intensity of the fitted model is also evaluated at all locations in the window. \itemize{ \item If \code{which="Z1"}, the test statistic \eqn{Z_1}{Z[1]} is computed as follows. The sum \eqn{S} of the covariate values at all data points is evaluated. The predicted mean \eqn{\mu}{\mu} and variance \eqn{\sigma^2}{\sigma^2} of \eqn{S} are computed from the values of the covariate at all locations in the window. Then we compute \eqn{Z_1 = (S-\mu)/\sigma}{Z[1]=(S-\mu)/\sigma}. Closely-related tests were proposed independently by Waller et al (1993) and Lawson (1993) so this test is often termed the Lawson-Waller test in epidemiological literature. \item If \code{which="Z2"}, the test statistic \eqn{Z_2}{Z[2]} is computed as follows. The values of the \code{covariate} at all locations in the observation window, weighted by the point process intensity, are compiled into a cumulative distribution function \eqn{F}. The probability integral transformation is then applied: the values of the \code{covariate} at the original data points are transformed by the predicted cumulative distribution function \eqn{F} into numbers between 0 and 1. If the model is correct, these numbers are i.i.d. uniform random numbers. The standardised sample mean of these numbers is the statistic \eqn{Z_2}{Z[2]}. } In both cases the null distribution of the test statistic is the standard normal distribution, approximately. The return value is an object of class \code{"htest"} containing the results of the hypothesis test. The print method for this class gives an informative summary of the test outcome. } \value{ An object of class \code{"htest"} (hypothesis test) and also of class \code{"bermantest"}, containing the results of the test. The return value can be plotted (by \code{\link{plot.bermantest}}) or printed to give an informative summary of the test. } \section{Warning}{ The meaning of a one-sided test must be carefully scrutinised: see the printed output. } \author{\adrian , \rolf and \ege. } \seealso{ \code{\link{cdf.test}}, \code{\link{quadrat.test}}, \code{\link{ppm}} } \references{ Berman, M. (1986) Testing for spatial association between a point process and another stochastic process. \emph{Applied Statistics} \bold{35}, 54--62. Lawson, A.B. (1993) On the analysis of mortality events around a prespecified fixed point. \emph{Journal of the Royal Statistical Society, Series A} \bold{156} (3) 363--377. Waller, L., Turnbull, B., Clark, L.C. and Nasca, P. (1992) Chronic Disease Surveillance and testing of clustering of disease and exposure: Application to leukaemia incidence and TCE-contaminated dumpsites in upstate New York. \emph{Environmetrics} \bold{3}, 281--300. } \examples{ # Berman's data data(copper) X <- copper$SouthPoints L <- copper$SouthLines D <- distmap(L, eps=1) # test of CSR berman.test(X, D) berman.test(X, D, "Z2") } \keyword{htest} \keyword{spatial} spatstat/man/rnoise.Rd0000644000176200001440000000342213333543264014465 0ustar liggesusers\name{rnoise} \alias{rnoise} \title{ Random Pixel Noise } \description{ Generate a pixel image whose pixel values are random numbers following a specified probability distribution. } \usage{ rnoise(rgen = runif, w = square(1), \dots) } \arguments{ \item{rgen}{ Random generator for the pixel values. A function in the \R language. } \item{w}{ Window (region or pixel raster) in which to generate the image. Any data acceptable to \code{\link{as.mask}}. } \item{\dots}{ Arguments, matched by name, to be passed to \code{rgen} to specify the parameters of the probability distribution, or passed to \code{\link{as.mask}} to control the pixel resolution. } } \details{ The argument \code{w} could be a window (class \code{"owin"}), a pixel image (class \code{"im"}) or other data. It is first converted to a binary mask by \code{\link{as.mask}} using any relevant arguments in \code{\dots}. Then each pixel inside the window (i.e. with logical value \code{TRUE} in the mask) is assigned a random numerical value by calling the function \code{rgen}. The function \code{rgen} would typically be one of the standard random variable generators like \code{\link{runif}} (uniformly distributed random values) or \code{\link{rnorm}} (Gaussian random values). Its first argument \code{n} is the number of values to be generated. Other arguments to \code{rgen} must be matched by name. } \value{ A pixel image (object of class \code{"im"}). } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{as.mask}}, \code{\link{as.im}}, \code{\link[stats]{Distributions}}. } \examples{ plot(rnoise(), main="Uniform noise") plot(rnoise(rnorm, dimyx=32, mean=2, sd=1), main="White noise") } \keyword{spatial} \keyword{datagen} spatstat/man/localK.Rd0000644000176200001440000001122613503055420014363 0ustar liggesusers\name{localK} \alias{localK} \alias{localL} \title{Neighbourhood density function} \description{ Computes the neighbourhood density function, a local version of the \eqn{K}-function or \eqn{L}-function, defined by Getis and Franklin (1987). } \usage{ localK(X, ..., rmax = NULL, correction = "Ripley", verbose = TRUE, rvalue=NULL) localL(X, ..., rmax = NULL, correction = "Ripley", verbose = TRUE, rvalue=NULL) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{\dots}{Ignored.} \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{correction}{String specifying the edge correction to be applied. Options are \code{"none"}, \code{"translate"}, \code{"translation"}, \code{"Ripley"}, \code{"isotropic"} or \code{"best"}. Only one correction may be specified. } \item{verbose}{Logical flag indicating whether to print progress reports during the calculation. } \item{rvalue}{Optional. A \emph{single} value of the distance argument \eqn{r} at which the function L or K should be computed. } } \details{ The command \code{localL} computes the \emph{neighbourhood density function}, a local version of the \eqn{L}-function (Besag's transformation of Ripley's \eqn{K}-function) that was proposed by Getis and Franklin (1987). The command \code{localK} computes the corresponding local analogue of the K-function. Given a spatial point pattern \code{X}, the neighbourhood density function \eqn{L_i(r)}{L[i](r)} associated with the \eqn{i}th point in \code{X} is computed by \deqn{ L_i(r) = \sqrt{\frac a {(n-1) \pi} \sum_j e_{ij}} }{ L[i](r) = sqrt( (a/((n-1)* pi)) * sum[j] e[i,j]) } where the sum is over all points \eqn{j \neq i}{j != i} that lie within a distance \eqn{r} of the \eqn{i}th point, \eqn{a} is the area of the observation window, \eqn{n} is the number of points in \code{X}, and \eqn{e_{ij}}{e[i,j]} is an edge correction term (as described in \code{\link{Kest}}). The value of \eqn{L_i(r)}{L[i](r)} can also be interpreted as one of the summands that contributes to the global estimate of the L function. By default, the function \eqn{L_i(r)}{L[i](r)} or \eqn{K_i(r)}{K[i](r)} is computed for a range of \eqn{r} values for each point \eqn{i}. The results are stored as a function value table (object of class \code{"fv"}) with a column of the table containing the function estimates for each point of the pattern \code{X}. Alternatively, if the argument \code{rvalue} is given, and it is a single number, then the function will only be computed for this value of \eqn{r}, and the results will be returned as a numeric vector, with one entry of the vector for each point of the pattern \code{X}. Inhomogeneous counterparts of \code{localK} and \code{localL} are computed by \code{localKinhom} and \code{localLinhom}. } \value{ If \code{rvalue} is given, the result is a numeric vector of length equal to the number of points in the point pattern. If \code{rvalue} is absent, the result is an object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the neighbourhood density function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point. The last two columns contain the \code{r} and \code{theo} values. } \references{ Getis, A. and Franklin, J. (1987) Second-order neighbourhood analysis of mapped point patterns. \emph{Ecology} \bold{68}, 473--477. } \seealso{ \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{localKinhom}}, \code{\link{localLinhom}}. } \examples{ data(ponderosa) X <- ponderosa # compute all the local L functions L <- localL(X) # plot all the local L functions against r plot(L, main="local L functions for ponderosa", legend=FALSE) # plot only the local L function for point number 7 plot(L, iso007 ~ r) # compute the values of L(r) for r = 12 metres L12 <- localL(X, rvalue=12) # Spatially interpolate the values of L12 # Compare Figure 5(b) of Getis and Franklin (1987) X12 <- X \%mark\% L12 Z <- Smooth(X12, sigma=5, dimyx=128) plot(Z, col=topo.colors(128), main="smoothed neighbourhood density") contour(Z, add=TRUE) points(X, pch=16, cex=0.5) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/Finhom.Rd0000644000176200001440000001527513434220172014407 0ustar liggesusers\name{Finhom} \alias{Finhom} \title{ Inhomogeneous Empty Space Function } \description{ Estimates the inhomogeneous empty space function of a non-stationary point pattern. } \usage{ Finhom(X, lambda = NULL, lmin = NULL, ..., sigma = NULL, varcov = NULL, r = NULL, breaks = NULL, ratio = FALSE, update = TRUE, warn.bias=TRUE, savelambda=FALSE) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the inhomogeneous \eqn{F} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lmin}{ Optional. The minimum possible value of the intensity over the spatial domain. A positive numerical value. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{\dots}{ Extra arguments passed to \code{\link{as.mask}} to control the pixel resolution, or passed to \code{\link{density.ppp}} to control the smoothing bandwidth. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of the estimate will also be saved, for use in analysing replicated point patterns. } \item{update}{ Logical. If \code{lambda} is a fitted model (class \code{"ppm"} or \code{"kppm"}) and \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without fitting it to \code{X}. } \item{warn.bias}{ Logical value specifying whether to issue a warning when the inhomogeneity correction factor takes extreme values, which can often lead to biased results. This usually occurs when insufficient smoothing is used to estimate the intensity. } \item{savelambda}{ Logical value specifying whether to save the values of \code{lmin} and \code{lambda} as attributes of the result. } } \details{ This command computes estimates of the inhomogeneous \eqn{F}-function (van Lieshout, 2010) of a point pattern. It is the counterpart, for inhomogeneous spatial point patterns, of the empty space function \eqn{F} for homogeneous point patterns computed by \code{\link{Fest}}. The argument \code{X} should be a point pattern (object of class \code{"ppp"}). The inhomogeneous \eqn{F} function is computed using the border correction, equation (6) in Van Lieshout (2010). The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda} of the point process. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a fitted point process model}{ (object of class \code{"ppm"} or \code{"kppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. (2010) A J-function for inhomogeneous point processes. \emph{Statistica Neerlandica} \bold{65}, 183--201. } \seealso{ \code{\link{Ginhom}}, \code{\link{Jinhom}}, \code{\link{Fest}} } \examples{ \dontrun{ plot(Finhom(swedishpines, sigma=bw.diggle, adjust=2)) } plot(Finhom(swedishpines, sigma=10)) } \author{ Original code by Marie-Colette van Lieshout. C implementation and R adaptation by \adrian and \ege. } \keyword{spatial} \keyword{nonparametric} spatstat/man/volume.Rd0000644000176200001440000000176413333543264014504 0ustar liggesusers\name{volume} \alias{volume} \title{Volume of an Object} \description{ Computes the volume of a spatial object such as a three-dimensional box. } \usage{ volume(x) } \arguments{ \item{x}{ An object whose volume will be computed. } } \value{ The numerical value of the volume of the object. } \details{ This function computes the volume of an object such as a three-dimensional box. The function \code{volume} is generic, with methods for the classes \code{"box3"} (three-dimensional boxes) and \code{"boxx"} (multi-dimensional boxes). There is also a method for the class \code{"owin"} (two-dimensional windows), which is identical to \code{\link{area.owin}}, and a method for the class \code{"linnet"} of linear networks, which returns the length of the network. } \seealso{ \code{\link{area.owin}}, \code{\link{volume.box3}}, \code{\link{volume.boxx}}, \code{\link{volume.linnet}} } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{math} spatstat/man/methods.fii.Rd0000644000176200001440000000504413333543263015400 0ustar liggesusers\name{methods.fii} \alias{methods.fii} %DoNotExport \Rdversion{1.1} \alias{print.fii} \alias{plot.fii} \alias{coef.fii} \alias{summary.fii} \alias{print.summary.fii} \alias{coef.summary.fii} \title{ Methods for Fitted Interactions } \description{ These are methods specifically for the class \code{"fii"} of fitted interpoint interactions. } \usage{ \method{print}{fii}(x, \dots) \method{coef}{fii}(object, \dots) \method{plot}{fii}(x, \dots) \method{summary}{fii}(object,\dots) \method{print}{summary.fii}(x, ...) \method{coef}{summary.fii}(object, ...) } \arguments{ \item{x,object}{ An object of class \code{"fii"} representing a fitted interpoint interaction. } \item{\dots}{ Arguments passed to other methods. } } \details{ These are methods for the class \code{"fii"}. An object of class \code{"fii"} represents a fitted interpoint interaction. It is usually obtained by using the command \code{\link{fitin}} to extract the fitted interaction part of a fitted point process model. See \code{\link{fitin}} for further explanation of this class. The commands listed here are methods for the generic functions \code{\link{print}}, \code{\link{summary}}, \code{\link{plot}} and \code{\link{coef}} for objects of the class \code{"fii"}. Following the usual convention, \code{summary.fii} returns an object of class \code{summary.fii}, for which there is a print method. The effect is that, when the user types \code{summary(x)}, the summary is printed, but when the user types \code{y <- summary(x)}, the summary information is saved. The method \code{coef.fii} extracts the canonical coefficients of the fitted interaction, and returns them as a numeric vector. The method \code{coef.summary.fii} transforms these values into quantities that are more easily interpretable, in a format that depends on the particular model. There are also methods for the generic commands \code{\link{reach}} and \code{\link{as.interact}}, described elsewhere. } \value{ The \code{print} and \code{plot} methods return \code{NULL}. The \code{summary} method returns an object of class \code{summary.fii}. \code{coef.fii} returns a numeric vector. \code{coef.summary.fii} returns data whose structure depends on the model. } \author{ \adrian } \seealso{ \code{\link{fitin}}, \code{\link{reach.fii}}, \code{\link{as.interact.fii}} } \examples{ mod <- ppm(cells, ~1, Strauss(0.1)) f <- fitin(mod) f summary(f) plot(f) coef(f) coef(summary(f)) } \keyword{spatial} \keyword{methods} spatstat/man/slrm.Rd0000644000176200001440000001536513606002175014146 0ustar liggesusers\name{slrm} \alias{slrm} \title{Spatial Logistic Regression} \description{ Fits a spatial logistic regression model to a spatial point pattern. } \usage{ slrm(formula, ..., data = NULL, offset = TRUE, link = "logit", dataAtPoints=NULL, splitby=NULL) } \arguments{ \item{formula}{The model formula. See Details.} \item{\dots}{ Optional arguments passed to \code{\link{pixellate}} determining the pixel resolution for the discretisation of the point pattern. } \item{data}{ Optional. A list containing data required in the formula. The names of entries in the list should correspond to variable names in the formula. The entries should be point patterns, pixel images or windows. } \item{offset}{ Logical flag indicating whether the model formula should be augmented by an offset equal to the logarithm of the pixel area. } \item{link}{The link function for the regression model. A character string, specifying a link function for binary regression. } \item{dataAtPoints}{Optional. Exact values of the covariates at the data points. A data frame, with column names corresponding to variables in the \code{formula}, with one row for each point in the point pattern dataset. } \item{splitby}{ Optional. Character string identifying a window. The window will be used to split pixels into sub-pixels. } } \details{ This function fits a Spatial Logistic Regression model (Tukey, 1972; Agterberg, 1974) to a spatial point pattern dataset. The logistic function may be replaced by another link function. The \code{formula} specifies the form of the model to be fitted, and the data to which it should be fitted. The \code{formula} must be an \R formula with a left and right hand side. The left hand side of the \code{formula} is the name of the point pattern dataset, an object of class \code{"ppp"}. The right hand side of the \code{formula} is an expression, in the usual \R formula syntax, representing the functional form of the linear predictor for the model. Each variable name that appears in the formula may be \itemize{ \item one of the reserved names \code{x} and \code{y}, referring to the Cartesian coordinates; \item the name of an entry in the list \code{data}, if this argument is given; \item the name of an object in the parent environment, that is, in the environment where the call to \code{slrm} was issued. } Each object appearing on the right hand side of the formula may be \itemize{ \item a pixel image (object of class \code{"im"}) containing the values of a covariate; \item a window (object of class \code{"owin"}), which will be interpreted as a logical covariate which is \code{TRUE} inside the window and \code{FALSE} outside it; \item a \code{function} in the \R language, with arguments \code{x,y}, which can be evaluated at any location to obtain the values of a covariate. } See the Examples below. The fitting algorithm discretises the point pattern onto a pixel grid. The value in each pixel is 1 if there are any points of the point pattern in the pixel, and 0 if there are no points in the pixel. The dimensions of the pixel grid will be determined as follows: \itemize{ \item The pixel grid will be determined by the extra arguments \code{\dots} if they are specified (for example the argument \code{dimyx} can be used to specify the number of pixels). \item Otherwise, if the right hand side of the \code{formula} includes the names of any pixel images containing covariate values, these images will determine the pixel grid for the discretisation. The covariate image with the finest grid (the smallest pixels) will be used. \item Otherwise, the default pixel grid size is given by \code{spatstat.options("npixel")}. } If \code{link="logit"} (the default), the algorithm fits a Spatial Logistic Regression model. This model states that the probability \eqn{p} that a given pixel contains a data point, is related to the covariates through \deqn{\log\frac{p}{1-p} = \eta}{log(p/(1-p)) = eta} where \eqn{\eta}{eta} is the linear predictor of the model (a linear combination of the covariates, whose form is specified by the \code{formula}). If \code{link="cloglog"} then the algorithm fits a model stating that \deqn{\log(-\log(1-p)) = \eta}{log(-log(1-p)) = eta}. If \code{offset=TRUE} (the default), the model formula will be augmented by adding an offset term equal to the logarithm of the pixel area. This ensures that the fitted parameters are approximately independent of pixel size. If \code{offset=FALSE}, the offset is not included, and the traditional form of Spatial Logistic Regression is fitted. } \value{ An object of class \code{"slrm"} representing the fitted model. There are many methods for this class, including methods for \code{print}, \code{fitted}, \code{predict}, \code{anova}, \code{coef}, \code{logLik}, \code{terms}, \code{update}, \code{formula} and \code{vcov}. Automated stepwise model selection is possible using \code{\link{step}}. Confidence intervals for the parameters can be computed using \code{\link[stats]{confint}}. } \seealso{ \code{\link{anova.slrm}}, \code{\link{coef.slrm}}, \code{\link{fitted.slrm}}, \code{\link{logLik.slrm}}, \code{\link{plot.slrm}}, \code{\link{predict.slrm}}, \code{\link{vcov.slrm}} } \references{ Agterberg, F.P. (1974) Automatic contouring of geological maps to detect target areas for mineral exploration. \emph{Journal of the International Association for Mathematical Geology} \bold{6}, 373--395. Baddeley, A., Berman, M., Fisher, N.I., Hardegen, A., Milne, R.K., Schuhmacher, D., Shah, R. and Turner, R. (2010) Spatial logistic regression and change-of-support for spatial Poisson point processes. \emph{Electronic Journal of Statistics} \bold{4}, 1151--1201. {doi: 10.1214/10-EJS581} Tukey, J.W. (1972) Discussion of paper by F.P. Agterberg and S.C. Robinson. \emph{Bulletin of the International Statistical Institute} \bold{44} (1) p. 596. Proceedings, 38th Congress, International Statistical Institute. } \examples{ X <- copper$SouthPoints slrm(X ~ 1) slrm(X ~ x+y) slrm(X ~ x+y, link="cloglog") # specify a grid of 2-km-square pixels slrm(X ~ 1, eps=2) Y <- copper$SouthLines Z <- distmap(Y) slrm(X ~ Z) slrm(X ~ Z, dataAtPoints=list(Z=nncross(X,Y,what="dist"))) mur <- murchison mur$dfault <- distfun(mur$faults) slrm(gold ~ dfault, data=mur) slrm(gold ~ dfault + greenstone, data=mur) slrm(gold ~ dfault, data=mur, splitby="greenstone") } \author{\adrian and \rolf. } \keyword{spatial} \keyword{models} spatstat/man/clickdist.Rd0000644000176200001440000000170113333543263015134 0ustar liggesusers\name{clickdist} \alias{clickdist} \title{Interactively Measure Distance} \description{ Measures the distance between two points which the user has clicked on. } \usage{ clickdist() } \value{ A single nonnegative number. } \details{ This function allows the user to measure the distance between two spatial locations, interactively, by clicking on the screen display. When \code{clickdist()} is called, the user is expected to click two points in the current graphics device. The distance between these points will be returned. This function uses the \R{} command \code{\link[graphics]{locator}} to input the mouse clicks. It only works on screen devices such as \sQuote{X11}, \sQuote{windows} and \sQuote{quartz}. } \seealso{ \code{\link[graphics]{locator}}, \code{\link{clickppp}}, \code{\link{clicklpp}}, \code{\link{clickpoly}}, \code{\link{clickbox}} } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{iplot} spatstat/man/spatdim.Rd0000644000176200001440000000313313515012146014616 0ustar liggesusers\name{spatdim} \alias{spatdim} \title{Spatial Dimension of a Dataset} \description{ Extracts the spatial dimension of an object in the \pkg{spatstat} package. } \usage{spatdim(X, intrinsic=FALSE)} \arguments{ \item{X}{ Object belonging to any class defined in the \pkg{spatstat} package. } \item{intrinsic}{ Logical value indicating whether to return the number of intrinsic dimensions. See Details. } } \value{ An integer, or \code{NA}. } \details{ This function returns the number of spatial coordinate dimensions of the dataset \code{X}. The results for some of the more common types of objects are as follows: \tabular{ll}{ \bold{object class} \tab \bold{dimension} \cr \code{"ppp"} \tab 2 \cr \code{"lpp"} \tab 2 \cr \code{"pp3"} \tab 3 \cr \code{"ppx"} \tab number of \emph{spatial} dimensions \cr \code{"owin"} \tab 2 \cr \code{"psp"} \tab 2 \cr \code{"ppm"} \tab 2 } Note that time dimensions are not counted. Some spatial objects are lower-dimensional subsets of the space in which they live. This lower number of dimensions is returned if \code{intrinsic=TRUE}. For example, a dataset on a linear network (an object \code{X} of class \code{"linnet", "lpp", "linim", "linfun"} or \code{"lintess"}) returns \code{spatdim(X) = 2} but \code{spatdim(X, intrinsic=TRUE) = 1}. If \code{X} is not a recognised spatial object, the result is \code{NA}. } \author{ \spatstatAuthors. } \examples{ spatdim(lansing) A <- osteo$pts[[1]] spatdim(A) spatdim(domain(A)) spatdim(chicago) spatdim(chicago, intrinsic=TRUE) } spatstat/man/im.apply.Rd0000644000176200001440000000504213414274205014713 0ustar liggesusers\name{im.apply} \alias{im.apply} \title{ Apply Function Pixelwise to List of Images } \description{ Returns a pixel image obtained by applying a function to the values of corresponding pixels in several pixel images. } \usage{ im.apply(X, FUN, \dots, fun.handles.na=FALSE, check=TRUE) } \arguments{ \item{X}{ A list of pixel images (objects of class \code{"im"}). } \item{FUN}{ A function that can be applied to vectors, or a character string giving the name of such a function. } \item{\dots}{ Additional arguments to \code{FUN}. } \item{fun.handles.na}{ Logical value specifying what to do when the data include \code{NA} values. See Details. } \item{check}{ Logical value specifying whether to check that the images in \code{X} are compatible (for example that they have the same grid of pixel locations) and to convert them to compatible images if necessary. } } \details{ The argument \code{X} should be a list of pixel images (objects of class \code{"im"}). If the images do not have identical pixel grids, they will be converted to a common grid using \code{\link{harmonise.im}}. At each pixel location, the values of the images in \code{X} at that pixel will be extracted as a vector. The function \code{FUN} will be applied to this vector. The result (which should be a single value) becomes the pixel value of the resulting image. The argument \code{fun.handles.na} specifies what to do when some of the pixel values are \code{NA}. \itemize{ \item If \code{fun.handles.na=FALSE} (the default), the function \code{FUN} is never applied to data that include \code{NA} values; the result is defined to be \code{NA} whenever the data contain \code{NA}. \item If \code{fun.handles.na=TRUE}, the function \code{FUN} will be applied to all pixel data, including those which contain \code{NA} values. } } \value{ A pixel image (object of class \code{"im"}). } \seealso{ \code{\link{eval.im}} for algebraic operations with images. } \examples{ DA <- density(split(amacrine)) DA im.apply(DA, max) im.apply(DA, sum) ## Example with incompatible patterns of NA values Z <- density(split(ants)) B <- owin(c(438, 666), c(80, 310)) Z[[1]][B] <- NA opa <- par(mfrow=c(2,2)) plot(Z[[1]]) plot(Z[[2]]) #' Default action: NA -> NA plot(im.apply(Z, mean)) #' Use NA handling in mean.default plot(im.apply(Z, mean, na.rm=TRUE, fun.handles.na=TRUE)) par(opa) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/distmap.psp.Rd0000644000176200001440000000444213333543263015432 0ustar liggesusers\name{distmap.psp} \alias{distmap.psp} \title{ Distance Map of Line Segment Pattern } \description{ Computes the distance from each pixel to the nearest line segment in the given line segment pattern. } \usage{ \method{distmap}{psp}(X, \dots) } \arguments{ \item{X}{A line segment pattern (object of class \code{"psp"}). } \item{\dots}{Arguments passed to \code{\link{as.mask}} to control pixel resolution. } } \value{ A pixel image (object of class \code{"im"}) whose greyscale values are the values of the distance map. The return value has attributes \code{"index"} and \code{"bdry"} which are also pixel images. } \details{ The ``distance map'' of a line segment pattern \eqn{X} is the function \eqn{f} whose value \code{f(u)} is defined for any two-dimensional location \eqn{u} as the shortest distance from \eqn{u} to \eqn{X}. This function computes the distance map of the line segment pattern \code{X} and returns the distance map as a pixel image. The greyscale value at a pixel \eqn{u} equals the distance from \eqn{u} to the nearest line segment of the pattern \code{X}. Distances are computed using analytic geometry. Additionally, the return value has two attributes, \code{"index"} and \code{"bdry"}, which are also pixel images. The grey values in \code{"bdry"} give the distance from each pixel to the bounding rectangle of the image. The grey values in \code{"index"} are integers identifying which line segment of \code{X} is closest. This is a method for the generic function \code{\link{distmap}}. Note that this function gives the exact distance from the centre of each pixel to the nearest line segment. To compute the exact distance from the points in a point pattern to the nearest line segment, use \code{\link{distfun}} or one of the low-level functions \code{\link{nncross}} or \code{\link{project2segment}}. } \seealso{ \code{\link{distmap}}, \code{\link{distmap.owin}}, \code{\link{distmap.ppp}}, \code{\link{distfun}}, \code{\link{nncross}}, \code{\link{nearestsegment}}, \code{\link{project2segment}}. } \examples{ a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) Z <- distmap(a) plot(Z) plot(a, add=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/angles.psp.Rd0000644000176200001440000000312713474140142015234 0ustar liggesusers\name{angles.psp} \alias{angles.psp} \title{Orientation Angles of Line Segments} \description{ Computes the orientation angle of each line segment in a line segment pattern. } \usage{ angles.psp(x, directed=FALSE) } \arguments{ \item{x}{ A line segment pattern (object of class \code{"psp"}). } \item{directed}{ Logical flag. See details. } } \value{ Numeric vector. } \details{ For each line segment, the angle of inclination to the \eqn{x}-axis (in radians) is computed, and the angles are returned as a numeric vector. If \code{directed=TRUE}, the directed angle of orientation is computed. The angle respects the sense of direction from \code{(x0,y0)} to \code{(x1,y1)}. The values returned are angles in the full range from \eqn{-\pi}{-\pi} to \eqn{\pi}{\pi}. The angle is computed as \code{atan2(y1-y0,x1-x0)}. See \code{\link{atan2}}. If \code{directed=FALSE}, the undirected angle of orientation is computed. Angles differing by \eqn{\pi} are regarded as equivalent. The values returned are angles in the range from \eqn{0} to \eqn{\pi}{\pi}. These angles are computed by first computing the directed angle, then adding \eqn{\pi}{\pi} to any negative angles. } \seealso{ \code{\link{psp}}, \code{\link{marks.psp}}, \code{\link{summary.psp}}, \code{\link{midpoints.psp}}, \code{\link{lengths.psp}}, \code{\link{endpoints.psp}}, \code{\link{extrapolate.psp}}. } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) b <- angles.psp(a) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/harmonise.Rd0000644000176200001440000000261013333543263015150 0ustar liggesusers\name{harmonise} \alias{harmonise} \alias{harmonize} \title{Make Objects Compatible} \description{ Converts several objects of the same class to a common format so that they can be combined or compared. } \usage{ harmonise(\dots) harmonize(\dots) } \arguments{ \item{\dots}{ Any number of objects of the same class. } } \details{ This generic command takes any number of objects of the same class, and \emph{attempts} to make them compatible in the sense of \code{\link{compatible}} so that they can be combined or compared. There are methods for the classes \code{"fv"} (\code{\link{harmonise.fv}}) and \code{"im"} (\code{\link{harmonise.im}}). All arguments \code{\dots} must be objects of the same class. The result will be a list, of length equal to the number of arguments \code{\dots}, containing new versions of each of these objects, converted to a common format. If the arguments were named (\code{name=value}) then the return value also carries these names. } \value{ A list, of length equal to the number of arguments \code{\dots}, whose entries are objects of the same class. If the arguments were named (\code{name=value}) then the return value also carries these names. } \author{\adrian , \rolf and \ege. } \seealso{ \code{\link{compatible}}, \code{\link{harmonise.fv}}, \code{\link{harmonise.im}} } \keyword{spatial} \keyword{manip} spatstat/man/PPversion.Rd0000644000176200001440000000555213333543262015117 0ustar liggesusers\name{PPversion} \alias{PPversion} \alias{QQversion} \title{ Transform a Function into its P-P or Q-Q Version } \description{ Given a function object \code{f} containing both the estimated and theoretical versions of a summary function, these operations combine the estimated and theoretical functions into a new function. When plotted, the new function gives either the P-P plot or Q-Q plot of the original \code{f}. } \usage{ PPversion(f, theo = "theo", columns = ".") QQversion(f, theo = "theo", columns = ".") } \arguments{ \item{f}{ The function to be transformed. An object of class \code{"fv"}. } \item{theo}{ The name of the column of \code{f} that should be treated as the theoretical value of the function. } \item{columns}{ Character vector, specifying the columns of \code{f} to which the transformation will be applied. Either a vector of names of columns of \code{f}, or one of the abbreviations recognised by \code{\link{fvnames}}. } } \details{ The argument \code{f} should be an object of class \code{"fv"}, containing both empirical estimates \eqn{\widehat f(r)}{fhat(r)} and a theoretical value \eqn{f_0(r)}{f0(r)} for a summary function. The \emph{P--P version} of \code{f} is the function \eqn{g(x) = \widehat f (f_0^{-1}(x))}{g(x) = fhat(f0^(-1)(x))} where \eqn{f_0^{-1}}{f0^(-1)} is the inverse function of \eqn{f_0}{f0}. A plot of \eqn{g(x)} against \eqn{x} is equivalent to a plot of \eqn{\widehat f(r)}{fhat(r)} against \eqn{f_0(r)}{f0(r)} for all \eqn{r}. If \code{f} is a cumulative distribution function (such as the result of \code{\link{Fest}} or \code{\link{Gest}}) then this is a P--P plot, a plot of the observed versus theoretical probabilities for the distribution. The diagonal line \eqn{y=x} corresponds to perfect agreement between observed and theoretical distribution. The \emph{Q--Q version} of \code{f} is the function \eqn{h(x) = f_0^{-1}(\widehat f(x))}{f0^(-1)(fhat(x))}. If \code{f} is a cumulative distribution function, a plot of \eqn{h(x)} against \eqn{x} is a Q--Q plot, a plot of the observed versus theoretical quantiles of the distribution. The diagonal line \eqn{y=x} corresponds to perfect agreement between observed and theoretical distribution. Another straight line corresponds to the situation where the observed variable is a linear transformation of the theoretical variable. For a point pattern \code{X}, the Q--Q version of \code{Kest(X)} is essentially equivalent to \code{Lest(X)}. } \value{ Another object of class \code{"fv"}. } \author{ Tom Lawrence and Adrian Baddeley. Implemented by \spatstatAuthors. } \seealso{ \code{\link{plot.fv}} } \examples{ opa <- par(mar=0.1+c(5,5,4,2)) G <- Gest(redwoodfull) plot(PPversion(G)) plot(QQversion(G)) par(opa) } \keyword{spatial} \keyword{nonparametric} \keyword{manip} spatstat/man/Smoothfun.ppp.Rd0000644000176200001440000000370613333543264015753 0ustar liggesusers\name{Smoothfun.ppp} \alias{Smoothfun} \alias{Smoothfun.ppp} \title{ Smooth Interpolation of Marks as a Spatial Function } \description{ Perform spatial smoothing of numeric values observed at a set of irregular locations, and return the result as a function of spatial location. } \usage{ Smoothfun(X, \dots) \method{Smoothfun}{ppp}(X, sigma = NULL, \dots, weights = NULL, edge = TRUE, diggle = FALSE) } \arguments{ \item{X}{ Marked point pattern (object of class \code{"ppp"}). } \item{sigma}{ Smoothing bandwidth, or bandwidth selection function, passed to \code{\link{Smooth.ppp}}. } \item{\dots}{ Additional arguments passed to \code{\link{Smooth.ppp}}. } \item{weights}{ Optional vector of weights associated with the points of \code{X}. } \item{edge,diggle}{ Logical arguments controlling the edge correction. Arguments passed to \code{\link{Smooth.ppp}}. } } \details{ The commands \code{Smoothfun} and \code{\link{Smooth}} both perform kernel-smoothed spatial interpolation of numeric values observed at irregular spatial locations. The difference is that \code{\link{Smooth}} returns a pixel image, containing the interpolated values at a grid of locations, while \code{Smoothfun} returns a \code{function(x,y)} which can be used to compute the interpolated value at \emph{any} spatial location. For purposes such as model-fitting it is more accurate to use \code{Smoothfun} to interpolate data. } \value{ A \code{function} with arguments \code{x,y}. The function also belongs to the class \code{"Smoothfun"} which has methods for \code{print} and \code{\link{as.im}}. It also belongs to the class \code{"funxy"} which has methods for \code{plot}, \code{contour} and \code{persp}. } \seealso{ \code{\link{Smooth}} } \examples{ f <- Smoothfun(longleaf) f f(120, 80) plot(f) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/as.mask.psp.Rd0000644000176200001440000000301413333543262015317 0ustar liggesusers\name{as.mask.psp} \alias{as.mask.psp} \title{ Convert Line Segment Pattern to Binary Pixel Mask } \description{ Converts a line segment pattern to a binary pixel mask by determining which pixels intersect the lines. } \usage{ as.mask.psp(x, W=NULL, ...) } \arguments{ \item{x}{ Line segment pattern (object of class \code{"psp"}). } \item{W}{ Optional window (object of class \code{"owin"}) determining the pixel raster. } \item{\dots}{ Optional extra arguments passed to \code{\link{as.mask}} to determine the pixel resolution. } } \details{ This function converts a line segment pattern to a binary pixel mask by determining which pixels intersect the lines. The pixel raster is determined by \code{W} and the optional arguments \code{\dots}. If \code{W} is missing or \code{NULL}, it defaults to the window containing \code{x}. Then \code{W} is converted to a binary pixel mask using \code{\link{as.mask}}. The arguments \code{\dots} are passed to \code{\link{as.mask}} to control the pixel resolution. } \value{ A window (object of class \code{"owin"}) which is a binary pixel mask (type \code{"mask"}). } \seealso{ \code{\link{pixellate.psp}}, \code{\link{as.mask}}. Use \code{\link{pixellate.psp}} if you want to measure the length of line in each pixel. } \examples{ X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(as.mask.psp(X)) plot(X, add=TRUE, col="red") } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/Gfox.Rd0000644000176200001440000001020313624152173014062 0ustar liggesusers\name{Gfox} \alias{Gfox} \alias{Jfox} \title{ Foxall's Distance Functions } \description{ Given a point pattern \code{X} and a spatial object \code{Y}, compute estimates of Foxall's \eqn{G} and \eqn{J} functions. } \usage{ Gfox(X, Y, r=NULL, breaks=NULL, correction=c("km", "rs", "han"), W, \dots) Jfox(X, Y, r=NULL, breaks=NULL, correction=c("km", "rs", "han"), W, \dots, warn.trim=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) from which distances will be measured. } \item{Y}{ An object of class \code{"ppp"}, \code{"psp"} or \code{"owin"} to which distances will be measured. Alternatively a pixel image (class \code{"im"}) with logical values. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which \eqn{Gfox(r)} or \eqn{Jfox(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. The edge correction(s) to be used to estimate \eqn{Gfox(r)} or \eqn{Jfox(r)}. A vector of character strings selected from \code{"none"}, \code{"rs"}, \code{"km"}, \code{"cs"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{W}{ Optional. A window (object of class \code{"owin"}) to be taken as the window of observation. The distribution function will be estimated from data inside \code{W}. The default is \code{W=Frame(Y)} when \code{Y} is a window, and \code{W=Window(Y)} otherwise. } \item{\dots}{ Extra arguments affecting the discretisation of distances. These arguments are ignored by \code{Gfox}, but \code{Jfox} passes them to \code{\link{Hest}} to determine the discretisation of the spatial domain. } \item{warn.trim}{ Logical value indicating whether a warning should be issued by \code{Jfox} when the window of \code{X} had to be trimmed in order to be a subset of the frame of \code{Y}. } } \details{ Given a point pattern \code{X} and another spatial object \code{Y}, these functions compute two nonparametric measures of association between \code{X} and \code{Y}, introduced by Foxall (Foxall and Baddeley, 2002). Let the random variable \eqn{R} be the distance from a typical point of \code{X} to the object \code{Y}. Foxall's \eqn{G}-function is the cumulative distribution function of \eqn{R}: \deqn{G(r) = P(R \le r)}{P(R <= r)} Let the random variable \eqn{S} be the distance from a \emph{fixed} point in space to the object \code{Y}. The cumulative distribution function of \eqn{S} is the (unconditional) spherical contact distribution function \deqn{H(r) = P(S \le r)}{H(r) = P(S <= r)} which is computed by \code{\link{Hest}}. Foxall's \eqn{J}-function is the ratio \deqn{ J(r) = \frac{1-G(r)}{1-H(r)} }{ J(r) = (1-G(r))/(1-H(r)) } For further interpretation, see Foxall and Baddeley (2002). Accuracy of \code{Jfox} depends on the pixel resolution, which is controlled by the arguments \code{eps}, \code{dimyx} and \code{xy} passed to \code{\link{as.mask}}. For example, use \code{eps=0.1} to specify square pixels of side 0.1 units, and \code{dimyx=256} to specify a 256 by 256 grid of pixels. } \value{ A function value table (object of class \code{"fv"}) which can be printed, plotted, or converted to a data frame of values. } \references{ Foxall, R. and Baddeley, A. (2002) Nonparametric measures of association between a spatial point process and a random set, with geological applications. \emph{Applied Statistics} \bold{51}, 165--182. } \seealso{ \code{\link{Gest}}, \code{\link{Hest}}, \code{\link{Jest}}, \code{\link{Fest}} } \examples{ data(copper) X <- copper$SouthPoints Y <- copper$SouthLines G <- Gfox(X,Y) J <- Jfox(X,Y, correction="km") \testonly{ J <- Jfox(X,Y, correction="km", eps=1) } \dontrun{ J <- Jfox(X,Y, correction="km", eps=0.25) } } \author{Rob Foxall and \adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/pseudoR2.Rd0000644000176200001440000000400213333543264014664 0ustar liggesusers\name{pseudoR2} \alias{pseudoR2} \alias{pseudoR2.ppm} \alias{pseudoR2.lppm} \title{ Calculate Pseudo-R-Squared for Point Process Model } \description{ Given a fitted point process model, calculate the pseudo-R-squared value, which measures the fraction of variation in the data that is explained by the model. } \usage{ pseudoR2(object, \dots) \method{pseudoR2}{ppm}(object, \dots, keepoffset=TRUE) \method{pseudoR2}{lppm}(object, \dots, keepoffset=TRUE) } \arguments{ \item{object}{ Fitted point process model. An object of class \code{"ppm"} or \code{"lppm"}. } \item{keepoffset}{ Logical value indicating whether to retain offset terms in the model when computing the deviance difference. See Details. } \item{\dots}{ Additional arguments passed to \code{\link{deviance.ppm}} or \code{\link{deviance.lppm}}. } } \details{ The function \code{pseudoR2} is generic, with methods for fitted point process models of class \code{"ppm"} and \code{"lppm"}. This function computes McFadden's pseudo-Rsquared \deqn{ R^2 = 1 - \frac{D}{D_0} }{ R^2 = 1 - D/D0 } where \eqn{D} is the deviance of the fitted model \code{object}, and \eqn{D_0}{D0} is the deviance of the null model. Deviance is defined as twice the negative log-likelihood or log-pseudolikelihood. The null model is usually obtained by re-fitting the model using the trend formula \code{~1}. However if the original model formula included \code{offset} terms, and if \code{keepoffset=TRUE} (the default), then the null model formula consists of these offset terms. This ensures that the \code{pseudoR2} value is non-negative. } \value{ A single numeric value. } \author{ \spatstatAuthors. } \seealso{ \code{\link{deviance.ppm}}, \code{\link{deviance.lppm}}. } \examples{ fit <- ppm(swedishpines ~ x+y) pseudoR2(fit) xcoord <- as.im(function(x,y) x, Window(swedishpines)) fut <- ppm(swedishpines ~ offset(xcoord/200) + y) pseudoR2(fut) } \keyword{spatial} \keyword{models} spatstat/man/pcf3est.Rd0000644000176200001440000001021613333543264014534 0ustar liggesusers\name{pcf3est} \Rdversion{1.1} \alias{pcf3est} \title{ Pair Correlation Function of a Three-Dimensional Point Pattern } \description{ Estimates the pair correlation function from a three-dimensional point pattern. } \usage{ pcf3est(X, ..., rmax = NULL, nrval = 128, correction = c("translation", "isotropic"), delta=NULL, adjust=1, biascorrect=TRUE) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{rmax}{ Optional. Maximum value of argument \eqn{r} for which \eqn{g_3(r)}{g3(r)} will be estimated. } \item{nrval}{ Optional. Number of values of \eqn{r} for which \eqn{g_3(r)}{g3(r)} will be estimated. } \item{correction}{ Optional. Character vector specifying the edge correction(s) to be applied. See Details. } \item{delta}{ Optional. Half-width of the Epanechnikov smoothing kernel. } \item{adjust}{ Optional. Adjustment factor for the default value of \code{delta}. } \item{biascorrect}{ Logical value. Whether to correct for underestimation due to truncation of the kernel near \eqn{r=0}. } } \details{ For a stationary point process \eqn{\Phi}{Phi} in three-dimensional space, the pair correlation function is \deqn{ g_3(r) = \frac{K_3'(r)}{4\pi r^2} }{ g3(r) = K3'(r)/(4 * pi * r^2) } where \eqn{K_3'}{K3'} is the derivative of the three-dimensional \eqn{K}-function (see \code{\link{K3est}}). The three-dimensional point pattern \code{X} is assumed to be a partial realisation of a stationary point process \eqn{\Phi}{Phi}. The distance between each pair of distinct points is computed. Kernel smoothing is applied to these distance values (weighted by an edge correction factor) and the result is renormalised to give the estimate of \eqn{g_3(r)}{g3(r)}. The available edge corrections are: \describe{ \item{\code{"translation"}:}{ the Ohser translation correction estimator (Ohser, 1983; Baddeley et al, 1993) } \item{\code{"isotropic"}:}{ the three-dimensional counterpart of Ripley's isotropic edge correction (Ripley, 1977; Baddeley et al, 1993). } } Kernel smoothing is performed using the Epanechnikov kernel with half-width \code{delta}. If \code{delta} is missing, the default is to use the rule-of-thumb \eqn{\delta = 0.26/\lambda^{1/3}}{delta = 0.26/lambda^(1/3)} where \eqn{\lambda = n/v}{lambda = n/v} is the estimated intensity, computed from the number \eqn{n} of data points and the volume \eqn{v} of the enclosing box. This default value of \code{delta} is multiplied by the factor \code{adjust}. The smoothing estimate of the pair correlation \eqn{g_3(r)}{g3(r)} is typically an underestimate when \eqn{r} is small, due to truncation of the kernel at \eqn{r=0}. If \code{biascorrect=TRUE}, the smoothed estimate is approximately adjusted for this bias. This is advisable whenever the dataset contains a sufficiently large number of points. } \value{ A function value table (object of class \code{"fv"}) that can be plotted, printed or coerced to a data frame containing the function values. Additionally the value of \code{delta} is returned as an attribute of this object. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \author{ \adrian and Rana Moyeed. } \seealso{ \code{\link{K3est}}, \code{\link{pcf}} } \examples{ X <- rpoispp3(250) Z <- pcf3est(X) Zbias <- pcf3est(X, biascorrect=FALSE) if(interactive()) { opa <- par(mfrow=c(1,2)) plot(Z, ylim.covers=c(0, 1.2)) plot(Zbias, ylim.covers=c(0, 1.2)) par(opa) } attr(Z, "delta") } \keyword{spatial} \keyword{nonparametric} spatstat/man/as.psp.Rd0000644000176200001440000001467613333543262014405 0ustar liggesusers\name{as.psp} \alias{as.psp} \alias{as.psp.psp} \alias{as.psp.data.frame} \alias{as.psp.matrix} \alias{as.psp.default} \title{Convert Data To Class psp} \description{ Tries to coerce any reasonable kind of data object to a line segment pattern (an object of class \code{"psp"}) for use by the \pkg{spatstat} package. } \usage{ as.psp(x, \dots, from=NULL, to=NULL) \method{as.psp}{psp}(x, \dots, check=FALSE, fatal=TRUE) \method{as.psp}{data.frame}(x, \dots, window=NULL, marks=NULL, check=spatstat.options("checksegments"), fatal=TRUE) \method{as.psp}{matrix}(x, \dots, window=NULL, marks=NULL, check=spatstat.options("checksegments"), fatal=TRUE) \method{as.psp}{default}(x, \dots, window=NULL, marks=NULL, check=spatstat.options("checksegments"), fatal=TRUE) } \arguments{ \item{x}{Data which will be converted into a line segment pattern} \item{window}{Data which define a window for the pattern.} \item{\dots}{Ignored.} \item{marks}{(Optional) vector or data frame of marks for the pattern} \item{check}{ Logical value indicating whether to check the validity of the data, e.g. to check that the line segments lie inside the window. } \item{fatal}{Logical value. See Details.} \item{from,to}{Point patterns (object of class \code{"ppp"}) containing the first and second endpoints (respectively) of each segment. Incompatible with \code{x}. } } \value{ An object of class \code{"psp"} (see \code{\link{psp.object}}) describing the line segment pattern and its window of observation. The value \code{NULL} may also be returned; see Details. } \details{ Converts the dataset \code{x} to a line segment pattern (an object of class \code{"psp"}; see \code{\link{psp.object}} for an overview). This function is normally used to convert an existing line segment pattern dataset, stored in another format, to the \code{"psp"} format. To create a new point pattern from raw data such as \eqn{x,y} coordinates, it is normally easier to use the creator function \code{\link{psp}}. The dataset \code{x} may be: \itemize{ \item an object of class \code{"psp"} \item a data frame with at least 4 columns \item a structure (list) with elements named \code{x0, y0, x1, y1} or elements named \code{xmid, ymid, length, angle} and possibly a fifth element named \code{marks} } If \code{x} is a data frame the interpretation of its columns is as follows: \itemize{ \item If there are columns named \code{x0, y0, x1, y1} then these will be interpreted as the coordinates of the endpoints of the segments and used to form the \code{ends} component of the \code{psp} object to be returned. \item If there are columns named \code{xmid, ymid, length, angle} then these will be interpreted as the coordinates of the segment midpoints, the lengths of the segments, and the orientations of the segments in radians and used to form the \code{ends} component of the \code{psp} object to be returned. \item If there is a column named \code{marks} then this will be interpreted as the marks of the pattern provided that the argument \code{marks} of this function is \code{NULL}. If argument \code{marks} is not \code{NULL} then the value of this argument is taken to be the marks of the pattern and the column named \code{marks} is ignored (with a warning). In either case the column named marks is deleted and omitted from further consideration. \item If there is no column named \code{marks} and if the \code{marks} argument of this function is \code{NULL}, and if after interpreting 4 columns of \code{x} as determining the \code{ends} component of the \code{psp} object to be returned, there remain other columns of \code{x}, then these remaining columns will be taken to form a data frame of marks for the \code{psp} object to be returned. } If \code{x} is a structure (list) with elements named \code{x0, y0, x1, y1, marks} or \code{xmid, ymid, length, angle, marks}, then the element named \code{marks} will be interpreted as the marks of the pattern provide that the argument \code{marks} of this function is \code{NULL}. If this argument is non-\code{NULL} then it is interpreted as the marks of the pattern and the element \code{marks} of \code{x} is ignored --- with a warning. Alternatively, you may specify two point patterns \code{from} and \code{to} containing the first and second endpoints of the line segments. The argument \code{window} is converted to a window object by the function \code{\link{as.owin}}. The argument \code{fatal} indicates what to do when the data cannot be converted to a line segment pattern. If \code{fatal=TRUE}, a fatal error will be generated; if \code{fatal=FALSE}, the value \code{NULL} is returned. The function \code{as.psp} is generic, with methods for the classes \code{"psp"}, \code{"data.frame"}, \code{"matrix"} and a default method. Point pattern datasets can also be created by the function \code{\link{psp}}. } \section{Warnings}{ If only a proper subset of the names \code{x0,y0,x1,y1} or \code{xmid,ymid,length,angle} appear amongst the names of the columns of \code{x} where \code{x} is a data frame, then these special names are ignored. For example if the names of the columns were \code{xmid,ymid,length,degrees}, then these columns would be interpreted as if the represented \code{x0,y0,x1,y1} in that order. Whether it gets used or not, column named \code{marks} is \emph{always} removed from \code{x} before any attempt to form the \code{ends} component of the \code{psp} object that is returned. } \seealso{ \code{\link{psp}}, \code{\link{psp.object}}, \code{\link{as.owin}}, \code{\link{owin.object}}. See \code{\link{edges}} for extracting the edges of a polygonal window as a \code{"psp"} object. } \examples{ mat <- matrix(runif(40), ncol=4) mx <- data.frame(v1=sample(1:4,10,TRUE), v2=factor(sample(letters[1:4],10,TRUE),levels=letters[1:4])) a <- as.psp(mat, window=owin(),marks=mx) mat <- cbind(as.data.frame(mat),mx) b <- as.psp(mat, window=owin()) # a and b are identical. stuff <- list(xmid=runif(10), ymid=runif(10), length=rep(0.1, 10), angle=runif(10, 0, 2 * pi)) a <- as.psp(stuff, window=owin()) b <- as.psp(from=runifpoint(10), to=runifpoint(10)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/pairdist.ppx.Rd0000644000176200001440000000243013333543263015610 0ustar liggesusers\name{pairdist.ppx} \alias{pairdist.ppx} \title{Pairwise Distances in Any Dimensions} \description{ Computes the matrix of distances between all pairs of points in a multi-dimensional point pattern. } \usage{ \method{pairdist}{ppx}(X, \dots) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppx"}). } \item{\dots}{ Arguments passed to \code{\link{coords.ppx}} to determine which coordinates should be used. } } \value{ A square matrix whose \code{[i,j]} entry is the distance between the points numbered \code{i} and \code{j}. } \details{ This is a method for the generic function \code{pairdist}. Given a multi-dimensional point pattern \code{X} (an object of class \code{"ppx"}), this function computes the Euclidean distances between all pairs of points in \code{X}, and returns the matrix of distances. By default, both spatial and temporal coordinates are extracted. To obtain the spatial distance between points in a space-time point pattern, set \code{temporal=FALSE}. } \seealso{ \code{\link{pairdist}}, \code{\link{crossdist}}, \code{\link{nndist}} } \examples{ df <- data.frame(x=runif(4),y=runif(4),z=runif(4),w=runif(4)) X <- ppx(data=df) pairdist(X) } \author{ \adrian } \keyword{spatial} \keyword{math} spatstat/man/edges2vees.Rd0000644000176200001440000000303713333543263015223 0ustar liggesusers\name{edges2vees} \alias{edges2vees} \title{ List Dihedral Triples in a Graph } \description{ Given a list of edges between vertices, compile a list of all \sQuote{vees} or dihedral triples formed by these edges. } \usage{ edges2vees(iedge, jedge, nvert=max(iedge, jedge), \dots, check=TRUE) } \arguments{ \item{iedge,jedge}{ Integer vectors, of equal length, specifying the edges. } \item{nvert}{ Number of vertices in the network. } \item{\dots}{Ignored} \item{check}{Logical. Whether to check validity of input data.} } \details{ Given a finite graph with \code{nvert} vertices and with edges specified by \code{iedge, jedge}, this low-level function finds all \sQuote{vees} or \sQuote{dihedral triples} in the graph, that is, all triples of vertices \code{(i,j,k)} where \code{i} and \code{j} are joined by an edge and \code{i} and \code{k} are joined by an edge. The interpretation of \code{iedge, jedge} is that each successive pair of entries specifies an edge in the graph. The \eqn{k}th edge joins vertex \code{iedge[k]} to vertex \code{jedge[k]}. Entries of \code{iedge} and \code{jedge} must be integers from 1 to \code{nvert}. } \value{ A 3-column matrix of integers, in which each row represents a triple of vertices, with the first vertex joined to the other two vertices. } \seealso{ \code{\link{edges2triangles}} } \author{\adrian and \rolf } \examples{ i <- c(1, 2, 5, 5, 1, 4, 2) j <- c(2, 3, 3, 1, 3, 2, 5) edges2vees(i, j) } \keyword{spatial} \keyword{manip} spatstat/man/as.lpp.Rd0000644000176200001440000000534213333543262014364 0ustar liggesusers\name{as.lpp} \Rdversion{1.1} \alias{as.lpp} \title{ Convert Data to a Point Pattern on a Linear Network } \description{ Convert various kinds of data to a point pattern on a linear network. } \usage{ as.lpp(x=NULL, y=NULL, seg=NULL, tp=NULL, \dots, marks=NULL, L=NULL, check=FALSE, sparse) } \arguments{ \item{x,y}{ Vectors of cartesian coordinates, or any data acceptable to \code{\link[grDevices]{xy.coords}}. Alternatively \code{x} can be a point pattern on a linear network (object of class \code{"lpp"}) or a planar point pattern (object of class \code{"ppp"}). } \item{seg,tp}{ Optional local coordinates. Vectors of the same length as \code{x,y}. See Details. } \item{\dots}{Ignored.} \item{marks}{ Optional marks for the point pattern. A vector or factor with one entry for each point, or a data frame or hyperframe with one row for each point. } \item{L}{ Linear network (object of class \code{"linnet"}) on which the points lie. } \item{check}{ Logical. Whether to check the validity of the spatial coordinates. } \item{sparse}{ Optional logical value indicating whether to store the linear network data in a sparse matrix representation or not. See \code{\link{linnet}}. } } \details{ This function converts data in various formats into a point pattern on a linear network (object of class \code{"lpp"}). The possible formats are: \itemize{ \item \code{x} is already a point pattern on a linear network (object of class \code{"lpp"}). Then \code{x} is returned unchanged. \item \code{x} is a planar point pattern (object of class \code{"ppp"}). Then \code{x} is converted to a point pattern on the linear network \code{L} using \code{\link{lpp}}. \item \code{x,y,seg,tp} are vectors of equal length. These specify that the \code{i}th point has Cartesian coordinates \code{(x[i],y[i])}, and lies on segment number \code{seg[i]} of the network \code{L}, at a fractional position \code{tp[i]} along that segment (with \code{tp=0} representing one endpoint and \code{tp=1} the other endpoint of the segment). \item \code{x,y} are missing and \code{seg,tp} are vectors of equal length as described above. \item \code{seg,tp} are \code{NULL}, and \code{x,y} are data in a format acceptable to \code{\link[grDevices]{xy.coords}} specifying the Cartesian coordinates. } } \value{ A point pattern on a linear network (object of class \code{"lpp"}). } \seealso{ \code{\link{lpp}}. } \examples{ A <- as.psp(simplenet) X <- runifpointOnLines(10, A) is.ppp(X) Y <- as.lpp(X, L=simplenet) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/model.matrix.mppm.Rd0000644000176200001440000000401613333543263016540 0ustar liggesusers\name{model.matrix.mppm} \alias{model.matrix.mppm} \title{Extract Design Matrix of Point Process Model for Several Point Patterns} \description{ Given a point process model fitted to a list of point patterns, this function extracts the design matrix. } \usage{ \method{model.matrix}{mppm}(object, ..., keepNA=TRUE, separate=FALSE) } \arguments{ \item{object}{ A point process model fitted to several point patterns. An object of class \code{"mppm"}. } \item{\dots}{ Other arguments (such as \code{na.action}) passed to \code{\link{model.matrix.lm}}. } \item{keepNA}{ Logical. Determines whether rows containing \code{NA} values will be deleted or retained. } \item{separate}{ Logical value indicating whether to split the model matrix into sub-matrices corresponding to each of the original point patterns. } } \details{ This command is a method for the generic function \code{\link{model.matrix}}. It extracts the design matrix of a point process model fitted to several point patterns. The argument \code{object} must be a fitted point process model (object of class \code{"mppm"}) produced by the fitting algorithm \code{\link{mppm}}). This represents a point process model that has been fitted to a list of several point pattern datasets. See \code{\link{mppm}} for information. The result is a matrix with one column for every constructed covariate in the model, and one row for every quadrature point. If \code{separate=TRUE} this matrix will be split into sub-matrices corresponding to the original point patterns, and the result will be a list containing these matrices. } \value{ A matrix (or list of matrices). Columns of the matrix are canonical covariates in the model. } \author{ \spatstatAuthors. } \seealso{ \code{\link{model.matrix}}, \code{\link{mppm}}. } \examples{ fit <- mppm(Points ~ Image + x, demohyper) head(model.matrix(fit)) # matrix with three columns: '(Intercept)', 'x' and 'Image' } \keyword{spatial} \keyword{models} spatstat/man/emend.ppm.Rd0000644000176200001440000000776413333543263015065 0ustar liggesusers\name{emend.ppm} \alias{emend.ppm} \alias{project.ppm} \title{ Force Point Process Model to be Valid } \description{ Ensures that a fitted point process model satisfies the integrability conditions for existence of the point process. } \usage{ project.ppm(object, \dots, fatal=FALSE, trace=FALSE) \method{emend}{ppm}(object, \dots, fatal=FALSE, trace=FALSE) } \arguments{ \item{object}{ Fitted point process model (object of class \code{"ppm"}). } \item{\dots}{Ignored.} \item{fatal}{ Logical value indicating whether to generate an error if the model cannot be projected to a valid model. } \item{trace}{ Logical value indicating whether to print a trace of the decision process. } } \details{ The functions \code{emend.ppm} and \code{project.ppm} are identical: \code{emend.ppm} is a method for the generic \code{\link{emend}}, while \code{project.ppm} is an older name for the same function. The purpose of the function is to ensure that a fitted model is valid. The model-fitting function \code{\link{ppm}} fits Gibbs point process models to point pattern data. By default, the fitted model returned by \code{\link{ppm}} may not actually exist as a point process. First, some of the fitted coefficients of the model may be \code{NA} or infinite values. This usually occurs when the data are insufficient to estimate all the parameters. The model is said to be \emph{unidentifiable} or \emph{confounded}. Second, unlike a regression model, which is well-defined for any finite values of the fitted regression coefficients, a Gibbs point process model is only well-defined if the fitted interaction parameters satisfy some constraints. A famous example is the Strauss process (see \code{\link{Strauss}}) which exists only when the interaction parameter \eqn{\gamma}{gamma} is less than or equal to 1. For values \eqn{\gamma > 1}{gamma > 1}, the probability density is not integrable and the process does not exist (and cannot be simulated). By default, \code{\link{ppm}} does not enforce the constraint that a fitted Strauss process (for example) must satisfy \eqn{\gamma \le 1}{gamma <= 1}. This is because a fitted parameter value of \eqn{\gamma > 1}{gamma > 1} could be useful information for data analysis, as it indicates that the Strauss model is not appropriate, and suggests a clustered model should be fitted. The function \code{emend.ppm} or \code{project.ppm} modifies the model \code{object} so that the model is valid. It identifies the terms in the model \code{object} that are associated with illegal parameter values (i.e. parameter values which are either \code{NA}, infinite, or outside their permitted range). It considers all possible sub-models of \code{object} obtained by deleting one or more of these terms. It identifies which of these submodels are valid, and chooses the valid submodel with the largest pseudolikelihood. The result of \code{emend.ppm} or \code{project.ppm} is the true maximum pseudolikelihood fit to the data. For large datasets or complex models, the algorithm used in \code{emend.ppm} or \code{project.ppm} may be time-consuming, because it takes time to compute all the sub-models. A faster, approximate algorithm can be applied by setting \code{spatstat.options(project.fast=TRUE)}. This produces a valid submodel, which may not be the maximum pseudolikelihood submodel. Use the function \code{\link{valid.ppm}} to check whether a fitted model object specifies a well-defined point process. Use the expression \code{all(is.finite(coef(object)))} to determine whether all parameters are identifiable. } \value{ Another point process model (object of class \code{"ppm"}). } \author{\adrian and \rolf } \seealso{ \code{\link{ppm}}, \code{\link{valid.ppm}}, \code{\link{emend}}, \code{\link{spatstat.options}} } \examples{ fit <- ppm(redwood ~1, Strauss(0.1)) coef(fit) fit2 <- emend(fit) coef(fit2) } \keyword{spatial} \keyword{models} spatstat/man/as.function.im.Rd0000644000176200001440000000156113333543262016021 0ustar liggesusers\name{as.function.im} \alias{as.function.im} \title{ Convert Pixel Image to Function of Coordinates } \description{ Converts a pixel image to a function of the \eqn{x} and \eqn{y} coordinates. } \usage{ \method{as.function}{im}(x, ...) } \arguments{ \item{x}{ Pixel image (object of class \code{"im"}). } \item{\dots}{ Ignored. } } \details{ This command converts a pixel image (object of class \code{"im"}) to a \code{function(x,y)} where the arguments \code{x} and \code{y} are (vectors of) spatial coordinates. This function returns the pixel values at the specified locations. } \value{ A function in the \R language, also belonging to the class \code{"funxy"}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{[.im}} } \examples{ d <- density(cells) f <- as.function(d) f(0.1, 0.3) } \keyword{spatial} \keyword{manip} spatstat/man/is.ppm.Rd0000644000176200001440000000145513333543263014377 0ustar liggesusers\name{is.ppm} \alias{is.ppm} \alias{is.lppm} \alias{is.kppm} \alias{is.slrm} \title{Test Whether An Object Is A Fitted Point Process Model} \description{ Checks whether its argument is a fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"} or \code{"slrm"}). } \usage{ is.ppm(x) is.kppm(x) is.lppm(x) is.slrm(x) } \arguments{ \item{x}{Any object.} } \details{ These functions test whether the object \code{x} is a fitted point process model object of the specified class. The result of \code{is.ppm(x)} is \code{TRUE} if \code{x} has \code{"ppm"} amongst its classes, and otherwise \code{FALSE}. Similarly for the other functions. } \value{ A single logical value. } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{models} spatstat/man/logLik.dppm.Rd0000644000176200001440000000554513333543263015355 0ustar liggesusers\name{logLik.dppm} \alias{logLik.dppm} \alias{AIC.dppm} \alias{extractAIC.dppm} \alias{nobs.dppm} \title{Log Likelihood and AIC for Fitted Determinantal Point Process Model} \description{ Extracts the log Palm likelihood, deviance, and AIC of a fitted determinantal point process model. } \usage{ \method{logLik}{dppm}(object, ...) \method{AIC}{dppm}(object, \dots, k=2) \method{extractAIC}{dppm}(fit, scale=0, k=2, \dots) \method{nobs}{dppm}(object, ...) } \arguments{ \item{object,fit}{Fitted point process model. An object of class \code{"dppm"}. } \item{\dots}{Ignored.} \item{scale}{Ignored.} \item{k}{Numeric value specifying the weight of the equivalent degrees of freedom in the AIC. See Details. } } \details{ These functions are methods for the generic commands \code{\link{logLik}}, \code{\link{extractAIC}} and \code{\link{nobs}} for the class \code{"dppm"}. An object of class \code{"dppm"} represents a fitted Cox or cluster point process model. It is obtained from the model-fitting function \code{\link{dppm}}. These methods apply only when the model was fitted by maximising the Palm likelihood (Tanaka et al, 2008) by calling \code{\link{dppm}} with the argument \code{method="palm"}. The method \code{logLik.dppm} computes the maximised value of the log Palm likelihood for the fitted model \code{object}. The methods \code{AIC.dppm} and \code{extractAIC.dppm} compute the Akaike Information Criterion AIC for the fitted model based on the Palm likelihood (Tanaka et al, 2008) \deqn{ AIC = -2 \log(PL) + k \times \mbox{edf} }{ AIC = -2 * log(PL) + k * edf } where \eqn{PL} is the maximised Palm likelihood of the fitted model, and \eqn{\mbox{edf}}{edf} is the effective degrees of freedom of the model. The method \code{nobs.dppm} returns the number of points in the original data point pattern to which the model was fitted. The \R function \code{\link{step}} uses these methods, but it does not work for determinantal models yet due to a missing implementation of \code{update.dppm}. } \value{ \code{logLik} returns a numerical value, belonging to the class \code{"logLik"}, with an attribute \code{"df"} giving the degrees of freedom. \code{AIC} returns a numerical value. \code{extractAIC} returns a numeric vector of length 2 containing the degrees of freedom and the AIC value. \code{nobs} returns an integer value. } \references{ Tanaka, U. and Ogata, Y. and Stoyan, D. (2008) Parameter estimation and model selection for Neyman-Scott point processes. \emph{Biometrical Journal} \bold{50}, 43--57. } \seealso{ \code{\link{dppm}}, \code{\link{logLik.ppm}} } \author{\adrian \rolf and \ege } \examples{ fit <- dppm(swedishpines ~ x, dppGauss(), method="palm") nobs(fit) logLik(fit) extractAIC(fit) AIC(fit) } \keyword{spatial} \keyword{models} spatstat/man/residuals.ppm.Rd0000644000176200001440000002007713571674202015762 0ustar liggesusers\name{residuals.ppm} \alias{residuals.ppm} \title{ Residuals for Fitted Point Process Model } \description{ Given a point process model fitted to a point pattern, compute residuals. } \usage{ \method{residuals}{ppm}(object, type="raw", \dots, check=TRUE, drop=FALSE, fittedvalues=NULL, new.coef=NULL, dropcoef=FALSE, quad=NULL) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) for which residuals should be calculated. } \item{type}{ String indicating the type of residuals to be calculated. Current options are \code{"raw"}, \code{"inverse"}, \code{"pearson"} and \code{"score"}. A partial match is adequate. } \item{\dots}{ Ignored. } \item{check}{ Logical value indicating whether to check the internal format of \code{object}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{drop}{ Logical value determining whether to delete quadrature points that were not used to fit the model. See \code{\link{quad.ppm}} for explanation. } \item{fittedvalues}{ Vector of fitted values for the conditional intensity at the quadrature points, from which the residuals will be computed. For expert use only. } \item{new.coef}{ Optional. Numeric vector of coefficients for the model, replacing \code{coef(object)}. See the section on Modified Residuals below. } \item{dropcoef}{ Internal use only. } \item{quad}{ Optional. Data specifying how to re-fit the model. A list of arguments passed to \code{\link{quadscheme}}. See the section on Modified Residuals below. } } \value{ An object of class \code{"msr"} representing a signed measure or vector-valued measure (see \code{\link{msr}}). This object can be plotted. } \details{ This function computes several kinds of residuals for the fit of a point process model to a spatial point pattern dataset (Baddeley et al, 2005). Use \code{\link{plot.msr}} to plot the residuals directly, or \code{\link{diagnose.ppm}} to produce diagnostic plots based on these residuals. The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}). Such objects are produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}. This fitted model object contains complete information about the original data pattern. Residuals are attached both to the data points and to some other points in the window of observation (namely, to the dummy points of the quadrature scheme used to fit the model). If the fitted model is correct, then the sum of the residuals over all (data and dummy) points in a spatial region \eqn{B} has mean zero. For further explanation, see Baddeley et al (2005). The type of residual is chosen by the argument \code{type}. Current options are \describe{ \item{\code{"raw"}:}{ the raw residuals \deqn{ r_j = z_j - w_j \lambda_j }{ r[j] = z[j] - w[j] lambda[j] } at the quadrature points \eqn{u_j}{u[j]}, where \eqn{z_j}{z[j]} is the indicator equal to 1 if \eqn{u_j}{u[j]} is a data point and 0 if \eqn{u_j}{u[j]} is a dummy point; \eqn{w_j}{w[j]} is the quadrature weight attached to \eqn{u_j}{u[j]}; and \deqn{\lambda_j = \hat\lambda(u_j,x)}{lambda[j] = lambda(u[j],x)} is the conditional intensity of the fitted model at \eqn{u_j}{u[j]}. These are the spatial analogue of the martingale residuals of a one-dimensional counting process. } \item{\code{"inverse"}:}{ the `inverse-lambda' residuals (Baddeley et al, 2005) \deqn{ r^{(I)}_j = \frac{r_j}{\lambda_j} = \frac{z_j}{\lambda_j} - w_j }{ rI[j] = r[j]/lambda[j] = z[j]/lambda[j] - w[j] } obtained by dividing the raw residuals by the fitted conditional intensity. These are a counterpart of the exponential energy marks (see \code{\link{eem}}). } \item{\code{"pearson"}:}{ the Pearson residuals (Baddeley et al, 2005) \deqn{ r^{(P)}_j = \frac{r_j}{\sqrt{\lambda_j}} = \frac{z_j}{\sqrt{\lambda_j}} - w_j \sqrt{\lambda_j} }{ rP[j] = r[j]/sqrt(lambda[j]) = z[j]/sqrt(lambda[j]) - w[j] sqrt(lambda[j]) } obtained by dividing the raw residuals by the square root of the fitted conditional intensity. The Pearson residuals are standardised, in the sense that if the model (true and fitted) is Poisson, then the sum of the Pearson residuals in a spatial region \eqn{B} has variance equal to the area of \eqn{B}. } \item{\code{"score"}:}{ the score residuals (Baddeley et al, 2005) \deqn{ r_j = (z_j - w_j \lambda_j) x_j }{ r[j] = (z[j] - w[j] lambda[j]) * x[j,] } obtained by multiplying the raw residuals \eqn{r_j}{r[j]} by the covariates \eqn{x_j}{x[j,]} for quadrature point \eqn{j}. The score residuals always sum to zero. } } The result of \code{residuals.ppm} is a measure (object of class \code{"msr"}). Use \code{\link{plot.msr}} to plot the residuals directly, or \code{\link{diagnose.ppm}} to produce diagnostic plots based on these residuals. Use \code{\link{integral.msr}} to compute the total residual. By default, the window of the measure is the same as the original window of the data. If \code{drop=TRUE} then the window is the domain of integration of the pseudolikelihood or composite likelihood. This only matters when the model \code{object} was fitted using the border correction: in that case, if \code{drop=TRUE} the window of the residuals is the erosion of the original data window by the border correction distance \code{rbord}. } \section{Modified Residuals}{ Sometimes we want to modify the calculation of residuals by using different values for the model parameters. This capability is provided by the arguments \code{new.coef} and \code{quad}. If \code{new.coef} is given, then the residuals will be computed by taking the model parameters to be \code{new.coef}. This should be a numeric vector of the same length as the vector of fitted model parameters \code{coef(object)}. If \code{new.coef} is missing and \code{quad} is given, then the model parameters will be determined by re-fitting the model using a new quadrature scheme specified by \code{quad}. Residuals will be computed for the original model \code{object} using these new parameter values. The argument \code{quad} should normally be a list of arguments in \code{name=value} format that will be passed to \code{\link{quadscheme}} (together with the original data points) to determine the new quadrature scheme. It may also be a quadrature scheme (object of class \code{"quad"}) to which the model should be fitted, or a point pattern (object of class \code{"ppp"}) specifying the \emph{dummy points} in a new quadrature scheme. } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \Moller, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. } \seealso{ \code{\link{msr}}, \code{\link{diagnose.ppm}}, \code{\link{ppm.object}}, \code{\link{ppm}} } \examples{ fit <- ppm(cells, ~x, Strauss(r=0.15)) # Pearson residuals rp <- residuals(fit, type="pe") rp # simulated data X <- rStrauss(100,0.7,0.05) # fit Strauss model fit <- ppm(X, ~1, Strauss(0.05)) res.fit <- residuals(fit) # check that total residual is 0 integral.msr(residuals(fit, drop=TRUE)) # true model parameters truecoef <- c(log(100), log(0.7)) res.true <- residuals(fit, new.coef=truecoef) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/densityfun.Rd0000644000176200001440000000375713342711244015364 0ustar liggesusers\name{densityfun.ppp} \alias{densityfun} \alias{densityfun.ppp} \title{ Kernel Estimate of Intensity as a Spatial Function } \description{ Compute a kernel estimate of intensity for a point pattern, and return the result as a function of spatial location. } \usage{ densityfun(X, \dots) \method{densityfun}{ppp}(X, sigma = NULL, \dots, weights = NULL, edge = TRUE, diggle = FALSE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{sigma}{ Smoothing bandwidth, or bandwidth selection function, passed to \code{\link{density.ppp}}. } \item{\dots}{ Additional arguments passed to \code{\link{density.ppp}}. } \item{weights}{ Optional vector of weights associated with the points of \code{X}. } \item{edge,diggle}{ Logical arguments controlling the edge correction. Arguments passed to \code{\link{density.ppp}}. } } \details{ The commands \code{densityfun} and \code{\link{density}} both perform kernel estimation of the intensity of a point pattern. The difference is that \code{\link{density}} returns a pixel image, containing the estimated intensity values at a grid of locations, while \code{densityfun} returns a \code{function(x,y)} which can be used to compute the intensity estimate at \emph{any} spatial location. For purposes such as model-fitting it is more accurate to use \code{densityfun}. } \value{ A \code{function} with arguments \code{x,y} returning values of the intensity. The function also belongs to the class \code{"densityfun"} which has methods for \code{print} and \code{\link{as.im}}. It also belongs to the class \code{"funxy"} which has methods for \code{plot}, \code{contour} and \code{persp}. } \seealso{ \code{\link{density}}. To interpolate values observed at the points, use \code{\link{Smoothfun}}. } \examples{ f <- densityfun(swedishpines) f f(42, 60) plot(f) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/matclust.estK.Rd0000644000176200001440000001444213571674202015734 0ustar liggesusers\name{matclust.estK} \alias{matclust.estK} \title{Fit the Matern Cluster Point Process by Minimum Contrast} \description{ Fits the \Matern Cluster point process to a point pattern dataset by the Method of Minimum Contrast. } \usage{ matclust.estK(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the \Matern Cluster model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the \Matern Cluster process. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits the \Matern Cluster point process model to a point pattern dataset by the Method of Minimum Contrast, using the \eqn{K} function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits the \Matern Cluster point process to \code{X}, by finding the parameters of the \Matern Cluster model which give the closest match between the theoretical \eqn{K} function of the \Matern Cluster process and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The \Matern Cluster point process is described in \Moller and Waagepetersen (2003, p. 62). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent are independent and uniformly distributed inside a circle of radius \eqn{R}{R} centred on the parent point, where \eqn{R}{R} is equal to the parameter \code{scale}. The named vector of stating values can use either \code{R} or \code{scale} as the name of the second component, but the latter is recommended for consistency with other cluster models. The theoretical \eqn{K}-function of the \Matern Cluster process is \deqn{ K(r) = \pi r^2 + \frac 1 \kappa h(\frac{r}{2R}) }{ K(r) = pi r^2 + h(r/(2*R))/kappa } where the radius R is the parameter \code{scale} and \deqn{ h(z) = 2 + \frac 1 \pi [ ( 8 z^2 - 4 ) \mbox{arccos}(z) - 2 \mbox{arcsin}(z) + 4 z \sqrt{(1 - z^2)^3} - 6 z \sqrt{1 - z^2} ] }{ h(z) = 2 + (1/pi) * ((8 * z^2 - 4) * arccos(z) - 2 * arcsin(z) + 4 * z * sqrt((1 - z^2)^3) - 6 * z * sqrt(1 - z^2)) } for \eqn{z <= 1}, and \eqn{h(z) = 1} for \eqn{z > 1}. The theoretical intensity of the \Matern Cluster process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\kappa}{kappa} and \eqn{R}{R}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The \Matern Cluster process can be simulated, using \code{\link{rMatClust}}. Homogeneous or inhomogeneous \Matern Cluster models can also be fitted using the function \code{\link{kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Rasmus Waagepetersen \email{rw@math.auc.dk} Adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{lgcp.estK}}, \code{\link{thomas.estK}}, \code{\link{mincontrast}}, \code{\link{Kest}}, \code{\link{rMatClust}} to simulate the fitted model. } \examples{ data(redwood) u <- matclust.estK(redwood, c(kappa=10, scale=0.1)) u plot(u) } \keyword{spatial} \keyword{models} spatstat/man/grow.boxx.Rd0000644000176200001440000000250213333543263015120 0ustar liggesusers\name{grow.boxx} \alias{grow.boxx} \alias{grow.box3} \title{Add margins to box in any dimension} \description{ Adds a margin to a box of class boxx. } \usage{ grow.boxx(W, left, right = left) grow.box3(W, left, right = left) } \arguments{ \item{W}{ A box (object of class \code{"boxx"} or \code{"box3"}). } \item{left}{Width of margin to be added to left endpoint of box side in every dimension. A single nonnegative number, or a vector of same length as the dimension of the box to add different left margin in each dimension. } \item{right}{Width of margin to be added to right endpoint of box side in every dimension. A single nonnegative number, or a vector of same length as the dimension of the box to add different right margin in each dimension. } } \value{ Another object of the same class \code{"boxx"} or \code{"box3"} representing the window after margins are added. } \seealso{ \code{\link{grow.rectangle}}, \code{\link{boxx}}, \code{\link{box3}} } \examples{ w <- boxx(c(0,10), c(0,10), c(0,10), c(0,10)) # add a margin of size 1 on both sides in all four dimensions b12 <- grow.boxx(w, 1) # add margin of size 2 at left, and margin of size 3 at right, # in each dimension. v <- grow.boxx(w, 2, 3) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/logLik.kppm.Rd0000644000176200001440000000725213606002175015354 0ustar liggesusers\name{logLik.kppm} \alias{logLik.kppm} \alias{AIC.kppm} \alias{extractAIC.kppm} \alias{nobs.kppm} \title{Log Likelihood and AIC for Fitted Cox or Cluster Point Process Model} \description{ Extracts the log composite likelihood, deviance, and AIC of a fitted Cox or cluster point process model. } \usage{ \method{logLik}{kppm}(object, ...) \method{AIC}{kppm}(object, \dots, k=2) \method{extractAIC}{kppm}(fit, scale=0, k=2, \dots) \method{nobs}{kppm}(object, ...) } \arguments{ \item{object,fit}{Fitted point process model. An object of class \code{"kppm"}. } \item{\dots}{Ignored.} \item{scale}{Ignored.} \item{k}{Numeric value specifying the weight of the equivalent degrees of freedom in the AIC. See Details. } } \details{ These functions are methods for the generic commands \code{\link{logLik}}, \code{\link{extractAIC}} and \code{\link{nobs}} for the class \code{"kppm"}. An object of class \code{"kppm"} represents a fitted Cox or cluster point process model. It is obtained from the model-fitting function \code{\link{kppm}}. These methods apply only when the model was fitted by maximising a composite likelihood: either the Palm likelihood (Tanaka et al, 2008) or the second order composite likelihood (Guan, 2006), by calling \code{\link{kppm}} with the argument \code{method="palm"} or \code{method="clik2"} respectively. The method \code{logLik.kppm} computes the maximised value of the log composite likelihood for the fitted model \code{object}. The methods \code{AIC.kppm} and \code{extractAIC.kppm} compute the Akaike Information Criterion AIC for the fitted model based on the composite likelihood \deqn{ AIC = -2 \log(CL) + k \times \mbox{edf} }{ AIC = -2 * log(CL) + k * edf } where \eqn{CL} is the maximised composite likelihood of the fitted model, and \eqn{\mbox{edf}}{edf} is the effective degrees of freedom of the model. The method \code{nobs.kppm} returns the number of points in the original data point pattern to which the model was fitted. The \R function \code{\link{step}} uses these methods. } \section{Model comparison}{ The values of log-likelihood and AIC returned by these functions are based on the \emph{composite likelihood} of the cluster process or Cox process model. They are available only when the model was fitted using \code{method="palm"} or \code{method="clik2"}. For model comparison and model selection, it is valid to compare the \code{logLik} values, or to compare the \code{AIC} values, but only when all the models are of class \code{"kppm"} and were fitted using the same \code{method}. For \code{method="palm"} some theoretical justification was provided by Tanaka et al (2008). } \value{ \code{logLik} returns a numerical value, belonging to the class \code{"logLik"}, with an attribute \code{"df"} giving the degrees of freedom. \code{AIC} returns a numerical value. \code{extractAIC} returns a numeric vector of length 2 containing the degrees of freedom and the AIC value. \code{nobs} returns an integer value. } \references{ Guan, Y. (2006) A composite likelihood approach in fitting spatial point process models. \emph{Journal of the American Statistical Association} \bold{101}, 1502--1512. Tanaka, U. and Ogata, Y. and Stoyan, D. (2008) Parameter estimation and model selection for Neyman-Scott point processes. \emph{Biometrical Journal} \bold{50}, 43--57. } \seealso{ \code{\link{kppm}}, \code{\link{logLik.ppm}} } \author{ \spatstatAuthors. } \examples{ fit <- kppm(redwood ~ x, "Thomas", method="palm") nobs(fit) logLik(fit) extractAIC(fit) AIC(fit) step(fit) } \keyword{spatial} \keyword{models} spatstat/man/envelope.pp3.Rd0000644000176200001440000002224713551001752015503 0ustar liggesusers\name{envelope.pp3} \alias{envelope.pp3} \title{Simulation Envelopes of Summary Function for 3D Point Pattern} \description{ Computes simulation envelopes of a summary function for a three-dimensional point pattern. } \usage{ \method{envelope}{pp3}(Y, fun=K3est, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, transform=NULL,global=FALSE,ginterval=NULL,use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) } \arguments{ \item{Y}{ A three-dimensional point pattern (object of class \code{"pp3"}). } \item{fun}{ Function that computes the desired summary statistic for a 3D point pattern. } \item{nsim}{ Number of simulated point patterns to be generated when computing the envelopes. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{\dots}{ Extra arguments passed to \code{fun}. } \item{funargs}{ A list, containing extra arguments to be passed to \code{fun}. } \item{funYargs}{ Optional. A list, containing extra arguments to be passed to \code{fun} when applied to the original data \code{Y} only. } \item{simulate}{ Optional. Specifies how to generate the simulated point patterns. If \code{simulate} is an expression in the R language, then this expression will be evaluated \code{nsim} times, to obtain \code{nsim} point patterns which are taken as the simulated patterns from which the envelopes are computed. If \code{simulate} is a function, then this function will be repeatedly applied to the data pattern \code{Y} to obtain \code{nsim} simulated patterns. If \code{simulate} is a list of point patterns, then the entries in this list will be treated as the simulated patterns from which the envelopes are computed. Alternatively \code{simulate} may be an object produced by the \code{envelope} command: see Details. } \item{verbose}{ Logical flag indicating whether to print progress reports during the simulations. } \item{transform}{ Optional. A transformation to be applied to the function values, before the envelopes are computed. An expression object (see Details). } \item{global}{ Logical flag indicating whether envelopes should be pointwise (\code{global=FALSE}) or simultaneous (\code{global=TRUE}). } \item{ginterval}{ Optional. A vector of length 2 specifying the interval of \eqn{r} values for the simultaneous critical envelopes. Only relevant if \code{global=TRUE}. } \item{use.theory}{ Logical value indicating whether to use the theoretical value, computed by \code{fun}, as the reference value for simultaneous envelopes. Applicable only when \code{global=TRUE}. } \item{alternative}{ Character string determining whether the envelope corresponds to a two-sided test (\code{side="two.sided"}, the default) or a one-sided test with a lower critical boundary (\code{side="less"}) or a one-sided test with an upper critical boundary (\code{side="greater"}). } \item{scale}{ Optional. Scaling function for global envelopes. A function in the \R language which determines the relative scale of deviations, as a function of distance \eqn{r}, when computing the global envelopes. Applicable only when \code{global=TRUE}. Summary function values for distance \code{r} will be \emph{divided} by \code{scale(r)} before the maximum deviation is computed. The resulting global envelopes will have width proportional to \code{scale(r)}. } \item{clamp}{ Logical value indicating how to compute envelopes when \code{alternative="less"} or \code{alternative="greater"}. Deviations of the observed summary function from the theoretical summary function are initially evaluated as signed real numbers, with large positive values indicating consistency with the alternative hypothesis. If \code{clamp=FALSE} (the default), these values are not changed. If \code{clamp=TRUE}, any negative values are replaced by zero. } \item{savefuns}{ Logical flag indicating whether to save all the simulated function values. } \item{savepatterns}{ Logical flag indicating whether to save all the simulated point patterns. } \item{nsim2}{ Number of extra simulated point patterns to be generated if it is necessary to use simulation to estimate the theoretical mean of the summary function. Only relevant when \code{global=TRUE} and the simulations are not based on CSR. } \item{VARIANCE}{ Logical. If \code{TRUE}, critical envelopes will be calculated as sample mean plus or minus \code{nSD} times sample standard deviation. } \item{nSD}{ Number of estimated standard deviations used to determine the critical envelopes, if \code{VARIANCE=TRUE}. } \item{Yname}{ Character string that should be used as the name of the data point pattern \code{Y} when printing or plotting the results. } \item{maxnerr}{ Maximum number of rejected patterns. If \code{fun} yields a fatal error when applied to a simulated point pattern (for example, because the pattern is empty and \code{fun} requires at least one point), the pattern will be rejected and a new random point pattern will be generated. If this happens more than \code{maxnerr} times, the algorithm will give up. } \item{rejectNA}{ Logical value specifying whether to reject a simulated pattern if the resulting values of \code{fun} are all equal to \code{NA}, \code{NaN} or infinite. If \code{FALSE} (the default), then simulated patterns are only rejected when \code{fun} gives a fatal error. } \item{silent}{ Logical value specifying whether to print a report each time a simulated pattern is rejected. } \item{do.pwrong}{ Logical. If \code{TRUE}, the algorithm will also estimate the true significance level of the \dQuote{wrong} test (the test that declares the summary function for the data to be significant if it lies outside the \emph{pointwise} critical boundary at any point). This estimate is printed when the result is printed. } \item{envir.simul}{ Environment in which to evaluate the expression \code{simulate}, if not the current environment. } } \value{ A function value table (object of class \code{"fv"}) which can be plotted directly. See \code{\link{envelope}} for further details. } \details{ The \code{envelope} command performs simulations and computes envelopes of a summary statistic based on the simulations. The result is an object that can be plotted to display the envelopes. The envelopes can be used to assess the goodness-of-fit of a point process model to point pattern data. The \code{envelope} function is generic, with methods for the classes \code{"ppp"}, \code{"ppm"} and \code{"kppm"} described in the help file for \code{\link{envelope}}. This function \code{envelope.pp3} is the method for three-dimensional point patterns (objects of class \code{"pp3"}). For the most basic use, if you have a 3D point pattern \code{X} and you want to test Complete Spatial Randomness (CSR), type \code{plot(envelope(X, K3est,nsim=39))} to see the three-dimensional \eqn{K} function for \code{X} plotted together with the envelopes of the three-dimensional \eqn{K} function for 39 simulations of CSR. To create simulation envelopes, the command \code{envelope(Y, ...)} first generates \code{nsim} random point patterns in one of the following ways. \itemize{ \item If \code{simulate=NULL}, then we generate \code{nsim} simulations of Complete Spatial Randomness (i.e. \code{nsim} simulated point patterns each being a realisation of the uniform Poisson point process) with the same intensity as the pattern \code{Y}. \item If \code{simulate} is supplied, then it determines how the simulated point patterns are generated. See \code{\link{envelope}} for details. } The summary statistic \code{fun} is applied to each of these simulated patterns. Typically \code{fun} is one of the functions \code{K3est}, \code{G3est}, \code{F3est} or \code{pcf3est}. It may also be a character string containing the name of one of these functions. For further information, see the documentation for \code{\link{envelope}}. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. } \seealso{ \code{\link{pp3}}, \code{\link{rpoispp3}}, \code{\link{K3est}}, \code{\link{G3est}}, \code{\link{F3est}}, \code{\link{pcf3est}}. } \examples{ X <- rpoispp3(20, box3()) \dontrun{ plot(envelope(X, nsim=39)) } \testonly{ plot(envelope(X, nsim=4)) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat/man/bc.ppm.Rd0000644000176200001440000000363513333543262014351 0ustar liggesusers\name{bc.ppm} \alias{bc} \alias{bc.ppm} \title{ Bias Correction for Fitted Model } \description{ Applies a first-order bias correction to a fitted model. } \usage{ bc(fit, \dots) \method{bc}{ppm}(fit, \dots, nfine = 256) } \arguments{ \item{fit}{ A fitted point process model (object of class \code{"ppm"}) or a model of some other class. } \item{\dots}{ Additional arguments are currently ignored. } \item{nfine}{ Grid dimensions for fine grid of locations. An integer, or a pair of integers. See Details. } } \details{ This command applies the first order Newton-Raphson bias correction method of Baddeley and Turner (2014, sec 4.2) to a fitted model. The function \code{bc} is generic, with a method for fitted point process models of class \code{"ppm"}. A fine grid of locations, of dimensions \code{nfine * nfine} or \code{nfine[2] * nfine[1]}, is created over the original window of the data, and the intensity or conditional intensity of the fitted model is calculated on this grid. The result is used to update the fitted model parameters once by a Newton-Raphson update. This is only useful if the quadrature points used to fit the original model \code{fit} are coarser than the grid of points specified by \code{nfine}. } \value{ A numeric vector, of the same length as \code{coef(fit)}, giving updated values for the fitted model coefficients. } \references{ Baddeley, A. and Turner, R. (2014) Bias correction for parameter estimates of spatial point process models. \emph{Journal of Statistical Computation and Simulation} \bold{84}, 1621--1643. DOI: 10.1080/00949655.2012.755976 } \author{ \adrian and \rolf. } \seealso{ \code{\link{rex}} } \examples{ fit <- ppm(cells ~ x, Strauss(0.07)) coef(fit) if(!interactive()) { bc(fit, nfine=64) } else { bc(fit) } } \keyword{spatial} \keyword{models} \keyword{math} \keyword{optimize} spatstat/man/as.data.frame.tess.Rd0000644000176200001440000000327613333543262016554 0ustar liggesusers\name{as.data.frame.tess} \alias{as.data.frame.tess} \title{Convert Tessellation to Data Frame} \description{ Converts a spatial tessellation object to a data frame. } \usage{ \method{as.data.frame}{tess}(x, \dots) } \arguments{ \item{x}{ Tessellation (object of class \code{"tess"}). } \item{\dots}{Further arguments passed to \code{\link{as.data.frame.owin}} or \code{\link{as.data.frame.im}} and ultimately to \code{\link[base:as.data.frame]{as.data.frame.default}} to determine the row names and other features. } } \details{ This function converts the tessellation \code{x} to a data frame. If \code{x} is a pixel image tessellation (a pixel image with factor values specifying the tile membership of each pixel) then this pixel image is converted to a data frame by \code{\link{as.data.frame.im}}. The result is a data frame with columns \code{x} and \code{y} giving the pixel coordinates, and \code{Tile} identifying the tile containing the pixel. If \code{x} is a tessellation consisting of a rectangular grid of tiles or a list of polygonal tiles, then each tile is converted to a data frame by \code{\link{as.data.frame.owin}}, and these data frames are joined together, yielding a single large data frame containing columns \code{x}, \code{y} giving the coordinates of vertices of the polygons, and \code{Tile} identifying the tile. } \value{ A data frame with columns named \code{x}, \code{y}, \code{Tile}, and possibly other columns. } \author{ \spatstatAuthors. } \seealso{ \code{\link{as.data.frame.owin}}, \code{\link{as.data.frame.im}} } \examples{ Z <- as.data.frame(dirichlet(cells)) head(Z, 10) } \keyword{spatial} \keyword{methods} spatstat/man/latest.news.Rd0000644000176200001440000000360713422272142015433 0ustar liggesusers\name{latest.news} \alias{latest.news} \title{ Print News About Latest Version of Package } \description{ Prints the news documentation for the current version of \code{spatstat} or another specified package. } \usage{ latest.news(package = "spatstat", doBrowse=FALSE, major=TRUE) } \arguments{ \item{package}{ Name of package for which the latest news should be printed. } \item{doBrowse}{ Logical value indicating whether to display the results in a browser window instead of printing them. } \item{major}{ Logical value. If \code{TRUE} (the default), print all information for the current major version \code{"x.y"}. If \code{FALSE}, print only the information for the current minor version \code{"x.y-z"}. } } \details{ This function prints the news documentation about changes in the current installed version of the \pkg{spatstat} package. The function can be called simply by typing its name without parentheses (see the Examples). If \code{major=FALSE}, only information for the current minor version \code{"x.y-z"} will be printed. If \code{major=TRUE} (the default), all information for the current major version \code{"x.y"} will be printed, encompassing versions \code{"x.y-0"}, \code{"x.y-1"}, up to \code{"x.y-z"}. If \code{package} is given, then the function reads the news for the specified package from its \code{NEWS} file (if it has one) and prints only the entries that refer to the current version of the package. To see the news for all previous versions as well as the current version, use the \R utility \code{\link[utils]{news}}. See the Examples. } \value{ Null. } \author{ \adrian and \rolf } \seealso{ \code{\link[utils]{news}}, \code{\link{bugfixes}} } \examples{ if(interactive()) { # current news latest.news # all news news(package="spatstat") } } \keyword{documentation} spatstat/man/as.data.frame.lintess.Rd0000644000176200001440000000427113540105333017244 0ustar liggesusers\name{as.data.frame.lintess} \alias{as.data.frame.lintess} \title{Convert Network Tessellation to Data Frame} \description{ Converts a tessellation on a linear network into a data frame. } \usage{ \method{as.data.frame}{lintess}(x, \dots) } \arguments{ \item{x}{ Tessellation on a linear network (object of class \code{"lintess"}). } \item{\dots}{Further arguments passed to \code{\link[base:as.data.frame]{as.data.frame.default}} to determine the row names and other features. } } \details{ A tessellation on a linear network is a partition of the network into non-overlapping pieces (tiles). Each tile consists of one or more line segments which are subsets of the line segments making up the network. A tile can consist of several disjoint pieces. This function converts the tessellation \code{x} to a data frame. Each row of the data frame specifies one sub-segment of the network, and allocates it to a particular tile. The data frame has the following columns: \itemize{ \item The \code{seg} column specifies which line segment of the network contains the sub-segment. Values of \code{seg} are integer indices for the network segments in \code{as.psp(as.linnet(x))}. \item The \code{t0} and \code{t1} columns specify the start and end points of the sub-segment. They are numeric values between 0 and 1 inclusive, where the values 0 and 1 representing the network vertices that are joined by this network segment. \item The \code{tile} column specifies which tile of the tessellation includes this sub-segment. It is a factor whose levels are the names of the tiles. } The tessellation may have marks, which are attached to the \emph{tiles} of the tessellation. If marks are present, the resulting data frame includes columns containing, for each sub-segment, the mark value of the corresponding tile. } \value{ A data frame with columns named \code{seg}, \code{t0}, \code{t1}, \code{tile}, and possibly other columns. } \author{ \spatstatAuthors. } \seealso{ \code{\link{lintess}} } \examples{ X <- lineardirichlet(runiflpp(3, simplenet)) marks(X) <- letters[1:3] as.data.frame(X) } \keyword{spatial} \keyword{methods} spatstat/man/weighted.median.Rd0000644000176200001440000000260613333543264016225 0ustar liggesusers\name{weighted.median} \alias{weighted.median} \alias{weighted.quantile} \alias{weighted.var} \title{ Weighted Median, Quantiles or Variance } \description{ Compute the median, quantiles or variance of a set of numbers which have weights associated with them. } \usage{ weighted.median(x, w, na.rm = TRUE) weighted.quantile(x, w, probs=seq(0,1,0.25), na.rm = TRUE) weighted.var(x, w, na.rm = TRUE) } \arguments{ \item{x}{ Data values. A vector of numeric values, for which the median or quantiles are required. } \item{w}{ Weights. A vector of nonnegative numbers, of the same length as \code{x}. } \item{probs}{ Probabilities for which the quantiles should be computed. A numeric vector of values between 0 and 1. } \item{na.rm}{ Logical. Whether to ignore \code{NA} values. } } \details{ The \code{i}th observation \code{x[i]} is treated as having a weight proportional to \code{w[i]}. The weighted median is a value \code{m} such that the total weight of data to the left of \code{m} is equal to half the total weight. If there is no such value, linear interpolation is performed. } \value{ A numeric value or vector. } \author{ \adrian. } \seealso{ \code{\link[stats]{quantile}}, \code{\link[stats]{median}}. } \examples{ x <- 1:20 w <- runif(20) weighted.median(x, w) weighted.quantile(x, w) weighted.var(x, w) } \keyword{math} spatstat/man/as.data.frame.hyperframe.Rd0000644000176200001440000000275113333543262017735 0ustar liggesusers\name{as.data.frame.hyperframe} \alias{as.data.frame.hyperframe} \title{Coerce Hyperframe to Data Frame} \description{ Converts a hyperframe to a data frame. } \usage{ \method{as.data.frame}{hyperframe}(x, row.names = NULL, optional = FALSE, ..., discard=TRUE, warn=TRUE) } \arguments{ \item{x}{Hyperframe (object of class \code{"hyperframe"}).} \item{row.names}{Optional character vector of row names.} \item{optional}{Argument passed to \code{\link{as.data.frame}} controlling what happens to row names.} \item{\dots}{Ignored.} \item{discard}{Logical. Whether to discard columns of the hyperframe that do not contain atomic data. See Details. } \item{warn}{Logical. Whether to issue a warning when columns are discarded.} } \details{ This is a method for the generic function \code{\link{as.data.frame}} for the class of hyperframes (see \code{\link{hyperframe}}. If \code{discard=TRUE}, any columns of the hyperframe that do not contain atomic data will be removed (and a warning will be issued if \code{warn=TRUE}). If \code{discard=FALSE}, then such columns are converted to strings indicating what class of data they originally contained. } \value{ A data frame. } \examples{ h <- hyperframe(X=1:3, Y=letters[1:3], f=list(sin, cos, tan)) as.data.frame(h, discard=TRUE, warn=FALSE) as.data.frame(h, discard=FALSE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/dirichlet.Rd0000644000176200001440000000313013333543263015130 0ustar liggesusers\name{dirichlet} \alias{dirichlet} \title{Dirichlet Tessellation of Point Pattern} \description{ Computes the Dirichlet tessellation of a spatial point pattern. Also known as the Voronoi or Thiessen tessellation. } \usage{ dirichlet(X) } \arguments{ \item{X}{Spatial point pattern (object of class \code{"ppp"}).} } \details{ In a spatial point pattern \code{X}, the Dirichlet tile associated with a particular point \code{X[i]} is the region of space that is closer to \code{X[i]} than to any other point in \code{X}. The Dirichlet tiles divide the two-dimensional plane into disjoint regions, forming a tessellation. The Dirichlet tessellation is also known as the Voronoi or Thiessen tessellation. This function computes the Dirichlet tessellation (within the original window of \code{X}) using the function \code{\link[deldir]{deldir}} in the package \pkg{deldir}. To ensure that there is a one-to-one correspondence between the points of \code{X} and the tiles of \code{dirichlet(X)}, duplicated points in \code{X} should first be removed by \code{X <- unique(X, rule="deldir")}. The tiles of the tessellation will be computed as polygons if the original window is a rectangle or a polygon. Otherwise the tiles will be computed as binary masks. } \value{ A tessellation (object of class \code{"tess"}). } \seealso{ \code{\link{tess}}, \code{\link{delaunay}}, \code{\link{ppp}}, \code{\link{dirichletVertices}} } \examples{ X <- runifpoint(42) plot(dirichlet(X)) plot(X, add=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/midpoints.psp.Rd0000644000176200001440000000142313474140142015766 0ustar liggesusers\name{midpoints.psp} \alias{midpoints.psp} \title{Midpoints of Line Segment Pattern} \description{ Computes the midpoints of each line segment in a line segment pattern. } \usage{ midpoints.psp(x) } \arguments{ \item{x}{ A line segment pattern (object of class \code{"psp"}). } } \value{ Point pattern (object of class \code{"ppp"}). } \details{ The midpoint of each line segment is computed. } \seealso{ \code{\link{marks.psp}}, \code{\link{summary.psp}}, \code{\link{lengths.psp}} \code{\link{angles.psp}}, \code{\link{endpoints.psp}}, \code{\link{extrapolate.psp}}. } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) b <- midpoints.psp(a) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/affine.psp.Rd0000644000176200001440000000334013333543262015214 0ustar liggesusers\name{affine.psp} \alias{affine.psp} \title{Apply Affine Transformation To Line Segment Pattern} \description{ Applies any affine transformation of the plane (linear transformation plus vector shift) to a line segment pattern. } \usage{ \method{affine}{psp}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) } \arguments{ \item{X}{Line Segment pattern (object of class \code{"psp"}).} \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{Arguments passed to \code{\link{affine.owin}} affecting the handling of the observation window, if it is a binary pixel mask. } } \value{ Another line segment pattern (of class \code{"psp"}) representing the result of applying the affine transformation. } \details{ The line segment pattern, and its window, are subjected first to the linear transformation represented by \code{mat} (multiplying on the left by \code{mat}), and are then translated by the vector \code{vec}. The argument \code{mat} must be a nonsingular \eqn{2 \times 2}{2 * 2} matrix. This is a method for the generic function \code{\link{affine}}. } \seealso{ \code{\link{affine}}, \code{\link{affine.owin}}, \code{\link{affine.ppp}}, \code{\link{affine.im}}, \code{\link{flipxy}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ oldpar <- par(mfrow=c(2,1)) X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(X, main="original") # shear transformation Y <- affine(X, matrix(c(1,0,0.6,1),ncol=2)) plot(Y, main="transformed") par(oldpar) # # rescale y coordinates by factor 0.2 affine(X, diag(c(1,0.2))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/connected.linnet.Rd0000644000176200001440000000323713333543263016423 0ustar liggesusers\name{connected.linnet} \alias{connected.linnet} \title{ Connected Components of a Linear Network } \description{ Find the topologically-connected components of a linear network. } \usage{ \method{connected}{linnet}(X, \dots, what = c("labels", "components")) } \arguments{ \item{X}{ A linear network (object of class \code{"linnet"}). } \item{\dots}{ Ignored. } \item{what}{ Character string specifying the kind of result. } } \details{ The function \code{connected} is generic. This is the method for linear networks (objects of class \code{"linnet"}). Two vertices of the network are connected if they are joined by a path in the network. This function divides the network into subsets, such that all points in a subset are connected to each other. If \code{what="labels"} the return value is a factor with one entry for each vertex of \code{X}, identifying which connected component the vertex belongs to. If \code{what="components"} the return value is a list of linear networks, which are the connected components of \code{X}. } \value{ If \code{what="labels"}, a factor. If \code{what="components"}, a list of linear networks. } \author{ \adrian and Suman Rakshit. } \seealso{ \code{\link{thinNetwork}} } \examples{ # remove some edges from a network to make it disconnected plot(simplenet, col="grey", main="", lty=2) A <- thinNetwork(simplenet, retainedges=-c(3,5)) plot(A, add=TRUE, lwd=2) # find the connected components connected(A) cA <- connected(A, what="components") plot(cA[[1]], add=TRUE, col="green", lwd=2) plot(cA[[2]], add=TRUE, col="blue", lwd=2) } \keyword{spatial} \keyword{manip} spatstat/man/rmh.ppm.Rd0000644000176200001440000002116113333543264014547 0ustar liggesusers\name{rmh.ppm} \alias{rmh.ppm} \title{Simulate from a Fitted Point Process Model} \description{ Given a point process model fitted to data, generate a random simulation of the model, using the Metropolis-Hastings algorithm. } \usage{ \method{rmh}{ppm}(model, start=NULL, control=default.rmhcontrol(model, w=w), \dots, w = NULL, project=TRUE, nsim=1, drop=TRUE, saveinfo=TRUE, verbose=TRUE, new.coef=NULL) } \arguments{ \item{model}{A fitted point process model (object of class \code{"ppm"}, see \code{\link{ppm.object}}) which it is desired to simulate. This fitted model is usually the result of a call to \code{\link{ppm}}. See \bold{Details} below. } \item{start}{Data determining the initial state of the Metropolis-Hastings algorithm. See \code{\link{rmhstart}} for description of these arguments. Defaults to \code{list(x.start=data.ppm(model))} } \item{control}{Data controlling the iterative behaviour of the Metropolis-Hastings algorithm. See \code{\link{rmhcontrol}} for description of these arguments. } \item{\dots}{ Further arguments passed to \code{\link{rmhcontrol}}, or to \code{\link{rmh.default}}, or to covariate functions in the model. } \item{w}{ Optional. Window in which the simulations should be generated. Default is the window of the original data. } \item{project}{ Logical flag indicating what to do if the fitted model is invalid (in the sense that the values of the fitted coefficients do not specify a valid point process). If \code{project=TRUE} the closest valid model will be simulated; if \code{project=FALSE} an error will occur. } \item{nsim}{ Number of simulated point patterns that should be generated. } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a single point pattern. } \item{saveinfo}{ Logical value indicating whether to save auxiliary information. } \item{verbose}{ Logical flag indicating whether to print progress reports. } \item{new.coef}{ New values for the canonical parameters of the model. A numeric vector of the same length as \code{coef(model)}. } } \value{ A point pattern (an object of class \code{"ppp"}; see \code{\link{ppp.object}}) or a list of point patterns. } \details{ This function generates simulated realisations from a point process model that has been fitted to point pattern data. It is a method for the generic function \code{\link{rmh}} for the class \code{"ppm"} of fitted point process models. To simulate other kinds of point process models, see \code{\link{rmh}} or \code{\link{rmh.default}}. The argument \code{model} describes the fitted model. It must be an object of class \code{"ppm"} (see \code{\link{ppm.object}}), and will typically be the result of a call to the point process model fitting function \code{\link{ppm}}. The current implementation enables simulation from any fitted model involving the interactions \code{\link{AreaInter}}, \code{\link{DiggleGratton}}, \code{\link{DiggleGatesStibbard}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Poisson}}, \code{\link{Strauss}}, \code{\link{StraussHard}} and \code{\link{Softcore}}, including nonstationary models. See the examples. It is also possible to simulate \emph{hybrids} of several such models. See \code{\link{Hybrid}} and the examples. It is possible that the fitted coefficients of a point process model may be ``illegal'', i.e. that there may not exist a mathematically well-defined point process with the given parameter values. For example, a Strauss process with interaction parameter \eqn{\gamma > 1}{gamma > 1} does not exist, but the model-fitting procedure used in \code{\link{ppm}} will sometimes produce values of \eqn{\gamma}{gamma} greater than 1. In such cases, if \code{project=FALSE} then an error will occur, while if \code{project=TRUE} then \code{rmh.ppm} will find the nearest legal model and simulate this model instead. (The nearest legal model is obtained by projecting the vector of coefficients onto the set of valid coefficient vectors. The result is usually the Poisson process with the same fitted intensity.) The arguments \code{start} and \code{control} are lists of parameters determining the initial state and the iterative behaviour, respectively, of the Metropolis-Hastings algorithm. The argument \code{start} is passed directly to \code{\link{rmhstart}}. See \code{\link{rmhstart}} for details of the parameters of the initial state, and their default values. The argument \code{control} is first passed to \code{\link{rmhcontrol}}. Then if any additional arguments \code{\dots} are given, \code{\link{update.rmhcontrol}} is called to update the parameter values. See \code{\link{rmhcontrol}} for details of the iterative behaviour parameters, and \code{\link{default.rmhcontrol}} for their default values. Note that if you specify expansion of the simulation window using the parameter \code{expand} (so that the model will be simulated on a window larger than the original data window) then the model must be capable of extrapolation to this larger window. This is usually not possible for models which depend on external covariates, because the domain of a covariate image is usually the same as the domain of the fitted model. After extracting the relevant information from the fitted model object \code{model}, \code{rmh.ppm} invokes the default \code{rmh} algorithm \code{\link{rmh.default}}, unless the model is Poisson. If the model is Poisson then the Metropolis-Hastings algorithm is not needed, and the model is simulated directly, using one of \code{\link{rpoispp}}, \code{\link{rmpoispp}}, \code{\link{rpoint}} or \code{\link{rmpoint}}. See \code{\link{rmh.default}} for further information about the implementation, or about the Metropolis-Hastings algorithm. } \section{Warnings}{ See Warnings in \code{\link{rmh.default}}. } \seealso{ \code{\link{simulate.ppm}}, \code{\link{rmh}}, \code{\link{rmhmodel}}, \code{\link{rmhcontrol}}, \code{\link{default.rmhcontrol}}, \code{\link{update.rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{rmh.default}}, \code{\link{ppp.object}}, \code{\link{ppm}}, Interactions: \code{\link{AreaInter}}, \code{\link{DiggleGratton}}, \code{\link{DiggleGatesStibbard}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{Hybrid}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Poisson}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{Softcore}} } \examples{ live <- interactive() op <- spatstat.options() spatstat.options(rmh.nrep=1e5) Nrep <- 1e5 X <- swedishpines if(live) plot(X, main="Swedish Pines data") # Poisson process fit <- ppm(X, ~1, Poisson()) Xsim <- rmh(fit) if(live) plot(Xsim, main="simulation from fitted Poisson model") # Strauss process fit <- ppm(X, ~1, Strauss(r=7)) Xsim <- rmh(fit) if(live) plot(Xsim, main="simulation from fitted Strauss model") \dontrun{ # Strauss process simulated on a larger window # then clipped to original window Xsim <- rmh(fit, control=list(nrep=Nrep, expand=1.1, periodic=TRUE)) Xsim <- rmh(fit, nrep=Nrep, expand=2, periodic=TRUE) } \dontrun{ X <- rSSI(0.05, 100) # piecewise-constant pairwise interaction function fit <- ppm(X, ~1, PairPiece(seq(0.02, 0.1, by=0.01))) Xsim <- rmh(fit) } # marked point pattern Y <- amacrine \dontrun{ # marked Poisson models fit <- ppm(Y) fit <- ppm(Y,~marks) fit <- ppm(Y,~polynom(x,2)) fit <- ppm(Y,~marks+polynom(x,2)) fit <- ppm(Y,~marks*polynom(x,y,2)) Ysim <- rmh(fit) } # multitype Strauss models MS <- MultiStrauss(radii=matrix(0.07, ncol=2, nrow=2), types = levels(Y$marks)) \dontrun{ fit <- ppm(Y ~marks, MS) Ysim <- rmh(fit) } fit <- ppm(Y ~ marks*polynom(x,y,2), MS) Ysim <- rmh(fit) if(live) plot(Ysim, main="simulation from fitted inhomogeneous Multitype Strauss") spatstat.options(op) \dontrun{ # Hybrid model fit <- ppm(redwood, ~1, Hybrid(A=Strauss(0.02), B=Geyer(0.1, 2))) Y <- rmh(fit) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} \keyword{datagen} spatstat/man/rounding.Rd0000644000176200001440000000417413333543264015020 0ustar liggesusers\name{rounding} \alias{rounding} \alias{rounding.default} \alias{rounding.ppp} \alias{rounding.pp3} \alias{rounding.ppx} \title{ Detect Numerical Rounding } \description{ Given a numeric vector, or an object containing numeric spatial coordinates, determine whether the values have been rounded to a certain number of decimal places. } \usage{ rounding(x) \method{rounding}{default}(x) \method{rounding}{ppp}(x) \method{rounding}{pp3}(x) \method{rounding}{ppx}(x) } \arguments{ \item{x}{ A numeric vector, or an object containing numeric spatial coordinates. } } \details{ For a numeric vector \code{x}, this function determines whether the values have been rounded to a certain number of decimal places. \itemize{ \item If the entries of \code{x} are not all integers, then \code{rounding(x)} returns the smallest number of digits \code{d} after the decimal point such that \code{\link[base]{round}(x, digits=d)} is identical to \code{x}. For example if \code{rounding(x) = 2} then the entries of \code{x} are rounded to 2 decimal places, and are multiples of 0.01. \item If all the entries of \code{x} are integers, then \code{rounding(x)} returns \code{-d}, where \code{d} is the smallest number of digits \emph{before} the decimal point such that \code{\link[base]{round}(x, digits=-d)} is identical to \code{x}. For example if \code{rounding(x) = -3} then the entries of \code{x} are multiples of 1000. If \code{rounding(x) = 0} then the entries of \code{x} are integers but not multiples of 10. \item If all entries of \code{x} are equal to 0, the rounding is not determined, and a value of \code{NULL} is returned. } For a point pattern (object of class \code{"ppp"}) or similar object \code{x} containing numeric spatial coordinates, this procedure is applied to the spatial coordinates. } \value{ An integer. } \author{ \adrian and \rolf } \seealso{ \code{\link{round.ppp}} } \examples{ rounding(c(0.1, 0.3, 1.2)) rounding(c(1940, 1880, 2010)) rounding(0) rounding(cells) } \keyword{spatial} \keyword{math} spatstat/man/nearest.raster.point.Rd0000644000176200001440000000412613333543263017257 0ustar liggesusers\name{nearest.raster.point} \alias{nearest.raster.point} \title{Find Pixel Nearest to a Given Point} \description{ Given cartesian coordinates, find the nearest pixel. } \usage{ nearest.raster.point(x,y,w, indices=TRUE) } \arguments{ \item{x}{Numeric vector of \eqn{x} coordinates of any points} \item{y}{Numeric vector of \eqn{y} coordinates of any points} \item{w}{An image (object of class \code{"im"}) or a binary mask window (an object of class \code{"owin"} of type \code{"mask"}). } \item{indices}{Logical flag indicating whether to return the row and column indices, or the actual \eqn{x,y} coordinates. } } \value{ If \code{indices=TRUE}, a list containing two vectors \code{rr} and \code{cc} giving row and column positions (in the image matrix). If \code{indices=FALSE}, a list containing vectors \code{x} and \code{y} giving actual coordinates of the pixels. } \details{ The argument \code{w} should be either a pixel image (object of class \code{"im"}) or a window (an object of class \code{"owin"}, see \code{\link{owin.object}} for details) of type \code{"mask"}. The arguments \code{x} and \code{y} should be numeric vectors of equal length. They are interpreted as the coordinates of points in space. For each point \code{(x[i], y[i])}, the function finds the nearest pixel in the grid of pixels for \code{w}. If \code{indices=TRUE}, this function returns a list containing two vectors \code{rr} and \code{cc} giving row and column positions (in the image matrix). For the location \code{(x[i],y[i])} the nearest pixel is at row \code{rr[i]} and column \code{cc[i]} of the image. If \code{indices=FALSE}, the function returns a list containing two vectors \code{x} and \code{y} giving the actual coordinates of the pixels. } \seealso{ \code{\link{owin.object}}, \code{\link{as.mask}} } \examples{ w <- owin(c(0,1), c(0,1), mask=matrix(TRUE, 100,100)) # 100 x 100 grid nearest.raster.point(0.5, 0.3, w) nearest.raster.point(0.5, 0.3, w, indices=FALSE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/rotate.im.Rd0000644000176200001440000000211113333543264015062 0ustar liggesusers\name{rotate.im} \alias{rotate.im} \title{Rotate a Pixel Image} \description{ Rotates a pixel image } \usage{ \method{rotate}{im}(X, angle=pi/2, \dots, centre=NULL) } \arguments{ \item{X}{A pixel image (object of class \code{"im"}).} \item{angle}{Angle of rotation, in radians.} \item{\dots}{Ignored.} \item{centre}{ Centre of rotation. Either a vector of length 2, or a character string (partially matched to \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}). The default is the coordinate origin \code{c(0,0)}. } } \value{ Another object of class \code{"im"} representing the rotated pixel image. } \details{ The image is rotated by the angle specified. Angles are measured in radians, anticlockwise. The default is to rotate the image 90 degrees anticlockwise. } \seealso{ \code{\link{affine.im}}, \code{\link{shift.im}}, \code{\link{rotate}} } \examples{ Z <- distmap(letterR) X <- rotate(Z) \dontrun{ plot(X) } Y <- rotate(X, centre="midpoint") } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/predict.lppm.Rd0000644000176200001440000000651413333543264015574 0ustar liggesusers\name{predict.lppm} \alias{predict.lppm} \title{ Predict Point Process Model on Linear Network } \description{ Given a fitted point process model on a linear network, compute the fitted intensity or conditional intensity of the model. } \usage{ \method{predict}{lppm}(object, ..., type = "trend", locations = NULL, new.coef=NULL) } \arguments{ \item{object}{ The fitted model. An object of class \code{"lppm"}, see \code{\link{lppm}}. } \item{type}{ Type of values to be computed. Either \code{"trend"}, \code{"cif"} or \code{"se"}. } \item{locations}{ Optional. Locations at which predictions should be computed. Either a data frame with two columns of coordinates, or a binary image mask. } \item{new.coef}{ Optional. Numeric vector of model coefficients, to be used instead of the fitted coefficients \code{coef(object)} when calculating the prediction. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} to determine the pixel resolution (if \code{locations} is missing). } } \details{ This function computes the fitted poin process intensity, fitted conditional intensity, or standard error of the fitted intensity, for a point process model on a linear network. It is a method for the generic \code{\link[stats]{predict}} for the class \code{"lppm"}. The argument \code{object} should be an object of class \code{"lppm"} (produced by \code{\link{lppm}}) representing a point process model on a linear network. Predicted values are computed at the locations given by the argument \code{locations}. If this argument is missing, then predicted values are computed at a fine grid of points on the linear network. \itemize{ \item If \code{locations} is missing or \code{NULL} (the default), the return value is a pixel image (object of class \code{"linim"} which inherits class \code{"im"}) corresponding to a discretisation of the linear network, with numeric pixel values giving the predicted values at each location on the linear network. \item If \code{locations} is a data frame, the result is a numeric vector of predicted values at the locations specified by the data frame. \item If \code{locations} is a binary mask, the result is a pixel image with predicted values computed at the pixels of the mask. } } \value{ A pixel image (object of class \code{"linim"} which inherits class \code{"im"}) or a numeric vector, depending on the argument \code{locations}. See Details. } \author{ \adrian } \seealso{ \code{\link{lpp}}, \code{\link{linim}} } \examples{ X <- runiflpp(12, simplenet) fit <- lppm(X ~ x) v <- predict(fit, type="trend") plot(v) } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. McSwiggan, G., Nair, M.G. and Baddeley, A. (2012) Fitting Poisson point process models to events on a linear network. Manuscript in preparation. } \keyword{spatial} \keyword{models} spatstat/man/timeTaken.Rd0000644000176200001440000000213413333543264015106 0ustar liggesusers\name{timeTaken} \alias{timeTaken} \title{ Extract the Total Computation Time } \description{ Given an object or objects that contain timing information (reporting the amount of computer time taken to compute each object), this function extracts the timing data and evaluates the total time taken. } \usage{ timeTaken(..., warn=TRUE) } \arguments{ \item{\dots}{ One or more objects of class \code{"timed"} containing timing data. } \item{warn}{ Logical value indicating whether a warning should be issued if some of the arguments do not contain timing information. } } \details{ An object of class \code{"timed"} contains information on the amount of computer time that was taken to compute the object. See \code{\link{timed}}. This function extracts the timing information from one or more such objects, and calculates the total time. } \value{ An object inheriting the class \code{"timed"}. } \examples{ A <- timed(Kest(cells)) B <- timed(Gest(cells)) A B timeTaken(A,B) } \seealso{ \code{\link{timed}} } \author{ \spatstatAuthors. } \keyword{utilities} spatstat/man/rSSI.Rd0000644000176200001440000001111313333543264014002 0ustar liggesusers\name{rSSI} \alias{rSSI} \title{Simulate Simple Sequential Inhibition} \description{ Generate a random point pattern, a realisation of the Simple Sequential Inhibition (SSI) process. } \usage{ rSSI(r, n=Inf, win = square(1), giveup = 1000, x.init=NULL, ..., f=NULL, fmax=NULL, nsim=1, drop=TRUE) } \arguments{ \item{r}{ Inhibition distance. } \item{n}{ Maximum number of points allowed. If \code{n} is finite, stop when the \emph{total} number of points in the point pattern reaches \code{n}. If \code{n} is infinite (the default), stop only when it is apparently impossible to add any more points. See \bold{Details}. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. The default window is the unit square, unless \code{x.init} is specified, when the default window is the window of \code{x.init}. } \item{giveup}{ Number of rejected proposals after which the algorithm should terminate. } \item{x.init}{ Optional. Initial configuration of points. A point pattern (object of class \code{"ppp"}). The pattern returned by \code{rSSI} consists of this pattern together with the points added via simple sequential inhibition. See \bold{Details}. } \item{\dots}{Ignored.} \item{f,fmax}{ Optional arguments passed to \code{\link{rpoint}} to specify a non-uniform probability density for the random points. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This algorithm generates one or more realisations of the Simple Sequential Inhibition point process inside the window \code{win}. Starting with an empty window (or with the point pattern \code{x.init} if specified), the algorithm adds points one-by-one. Each new point is generated uniformly in the window and independently of preceding points. If the new point lies closer than \code{r} units from an existing point, then it is rejected and another random point is generated. The algorithm terminates when either \describe{ \item{(a)}{ the desired number \code{n} of points is reached, or } \item{(b)}{ the current point configuration has not changed for \code{giveup} iterations, suggesting that it is no longer possible to add new points. } } If \code{n} is infinite (the default) then the algorithm terminates only when (b) occurs. The result is sometimes called a \emph{Random Sequential Packing}. Note that argument \code{n} specifies the maximum permitted \bold{total} number of points in the pattern returned by \code{rSSI()}. If \code{x.init} is not \code{NULL} then the number of points that are \emph{added} is at most \code{n - npoints(x.init)} if \code{n} is finite. Thus if \code{x.init} is not \code{NULL} then argument \code{n} must be at least as large as \code{npoints(x.init)}, otherwise an error is given. If \code{n==npoints(x.init)} then a warning is given and the call to \code{rSSI()} has no real effect; \code{x.init} is returned. There is no requirement that the points of \code{x.init} be at a distance at least \code{r} from each other. All of the \emph{added} points will be at a distance at least \code{r} from each other and from any point of \code{x.init}. The points will be generated inside the window \code{win} and the result will be a point pattern in the same window. The default window is the unit square, \code{win = square(1)}, unless \code{x.init} is specified, when the default is \code{win=Window(x.init)}, the window of \code{x.init}. If both \code{win} and \code{x.init} are specified, and if the two windows are different, then a warning will be issued. Any points of \code{x.init} lying outside \code{win} will be removed, with a warning. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMaternI}}, \code{\link{rMaternII}}. } \examples{ Vinf <- rSSI(0.07) V100 <- rSSI(0.07, 100) X <- runifpoint(100) Y <- rSSI(0.03,142,x.init=X) # Y consists of X together with # 42 added points. plot(Y, main="rSSI") plot(X,add=TRUE,chars=20,cols="red") ## inhomogeneous Z <- rSSI(0.07, 50, f=function(x,y){x}) plot(Z) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{datagen} spatstat/man/summary.ppm.Rd0000644000176200001440000000510413333543264015455 0ustar liggesusers\name{summary.ppm} \alias{summary.ppm} \alias{print.summary.ppm} \title{Summarizing a Fitted Point Process Model} \description{ \code{summary} method for class \code{"ppm"}. } \usage{ \method{summary}{ppm}(object, \dots, quick=FALSE, fine=FALSE) \method{print}{summary.ppm}(x, \dots) } \arguments{ \item{object}{A fitted point process model.} \item{\dots}{Ignored.} \item{quick}{Logical flag controlling the scope of the summary.} \item{fine}{ Logical value passed to \code{\link{vcov.ppm}} determining whether to compute the quick, coarse estimate of variance (\code{fine=FALSE}, the default) or the slower, finer estimate (\code{fine=TRUE}). } \item{x}{Object of class \code{"summary.ppm"} as returned by \code{summary.ppm}. } } \details{ This is a method for the generic \code{\link{summary}} for the class \code{"ppm"}. An object of class \code{"ppm"} describes a fitted point process model. See \code{\link{ppm.object}}) for details of this class. \code{summary.ppm} extracts information about the type of model that has been fitted, the data to which the model was fitted, and the values of the fitted coefficients. (If \code{quick=TRUE} then only the information about the type of model is extracted.) \code{print.summary.ppm} prints this information in a comprehensible format. In normal usage, \code{print.summary.ppm} is invoked implicitly when the user calls \code{summary.ppm} without assigning its value to anything. See the examples. You can also type \code{coef(summary(object))} to extract a table of the fitted coefficients of the point process model \code{object} together with standard errors and confidence limits. } \value{ \code{summary.ppm} returns an object of class \code{"summary.ppm"}, while \code{print.summary.ppm} returns \code{NULL}. } \examples{ # invent some data X <- rpoispp(42) # fit a model to it fit <- ppm(X ~ x, Strauss(r=0.1)) # summarize the fitted model summary(fit) # `quick' option summary(fit, quick=TRUE) # coefficients with standard errors and CI coef(summary(fit)) coef(summary(fit, fine=TRUE)) # save the full summary s <- summary(fit) # print it print(s) s # extract stuff names(s) coef(s) s$args$correction s$name s$trend$value \dontrun{ # multitype pattern data(demopat) fit <- ppm(demopat, ~marks, Poisson()) summary(fit) } # model with external covariates fitX <- ppm(X, ~Z, covariates=list(Z=function(x,y){x+y})) summary(fitX) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/chop.linnet.Rd0000644000176200001440000000203713557001576015413 0ustar liggesusers\name{chop.linnet} \alias{chop.linnet} \title{ Divide a Linear Network into Tiles Using Infinite Lines } \description{ Given a linear network and a set of infinite lines, divide the network into tiles demarcated by the lines. The result is a tessellation of the network. } \usage{ chop.linnet(X, L) } \arguments{ \item{X}{ Linear network (object of class \code{"linnet"}) or data acceptable to \code{\link{as.linnet}}. } \item{L}{ Infinite line or lines (object of class \code{"infline"}). } } \details{ The first line of \code{L} divides \code{X} into two tiles. Subsequent lines divide each of these tiles. The result is a tessellation of \code{X}. Tiles are not necessarily connected sets. } \value{ Tessellation on a linear network (object of class \code{"lintess"}). } \author{ \adrian. } \seealso{ \code{\link{crossing.linnet}} } \examples{ L <- infline(p=runif(3), theta=runif(3, max=pi/2)) Y <- chop.linnet(simplenet, L) plot(Y, main="") plot(L, col="red") } \keyword{spatial} \keyword{manip} spatstat/man/plot.ppp.Rd0000644000176200001440000003374513333543264014755 0ustar liggesusers\name{plot.ppp} \alias{plot.ppp} \title{plot a Spatial Point Pattern} \description{ Plot a two-dimensional spatial point pattern } \usage{ \method{plot}{ppp}(x, main, \dots, clipwin=NULL, chars=NULL, cols=NULL, use.marks=TRUE, which.marks=NULL, add=FALSE, type=c("p","n"), legend=TRUE, leg.side=c("left", "bottom", "top", "right"), leg.args=list(), symap=NULL, maxsize=NULL, meansize=NULL, markscale=NULL, zap=0.01, show.window=show.all, show.all=!add, do.plot=TRUE, multiplot=TRUE) } \arguments{ \item{x}{ The spatial point pattern to be plotted. An object of class \code{"ppp"}, or data which can be converted into this format by \code{\link{as.ppp}()}. } \item{main}{ text to be displayed as a title above the plot. } \item{\dots}{ extra arguments that will be passed to the plotting functions \code{\link{plot.default}}, \code{\link{points}} and/or \code{\link{symbols}}. } \item{clipwin}{ Optional. A window (object of class \code{"owin"}). Only this subset of the image will be displayed. } \item{chars}{ plotting character(s) used to plot points. } \item{cols}{ the colour(s) used to plot points. } \item{use.marks}{ logical flag; if \code{TRUE}, plot points using a different plotting symbol for each mark; if \code{FALSE}, only the locations of the points will be plotted, using \code{\link{points}()}. } \item{which.marks}{ Index determining which column of marks to use, if the marks of \code{x} are a data frame. A character or integer vector identifying one or more columns of marks. If \code{add=FALSE} then the default is to plot all columns of marks, in a series of separate plots. If \code{add=TRUE} then only one column of marks can be plotted, and the default is \code{which.marks=1} indicating the first column of marks. } \item{add}{ logical flag; if \code{TRUE}, just the points are plotted, over the existing plot. A new plot is not created, and the window is not plotted. } \item{type}{ Type of plot: either \code{"p"} or \code{"n"}. If \code{type="p"} (the default), both the points and the observation window are plotted. If \code{type="n"}, only the window is plotted. } \item{legend}{ Logical value indicating whether to add a legend showing the mapping between mark values and graphical symbols (for a marked point pattern). } \item{leg.side}{ Position of legend relative to main plot. } \item{leg.args}{ List of additional arguments passed to \code{\link{plot.symbolmap}} or \code{\link{symbolmap}} to control the legend. In addition to arguments documented under \code{\link{plot.symbolmap}}, and graphical arguments recognised by \code{\link{symbolmap}}, the list may also include the argument \code{sep} giving the separation between the main plot and the legend, or \code{sep.frac} giving the separation as a fraction of the relevant dimension (width or height) of the main plot. } \item{symap}{ Optional. The graphical symbol map to be applied to the marks. An object of class \code{"symbolmap"}; see \code{\link{symbolmap}}. } \item{maxsize}{ \emph{Maximum} physical size of the circles/squares plotted when \code{x} is a marked point pattern with numerical marks. Incompatible with \code{meansize} and \code{markscale}. Ignored if \code{symap} is given. } \item{meansize}{ \emph{Average} physical size of the circles/squares plotted when \code{x} is a marked point pattern with numerical marks. Incompatible with \code{maxsize} and \code{markscale}. Ignored if \code{symap} is given. } \item{markscale}{ physical scale factor determining the sizes of the circles/squares plotted when \code{x} is a marked point pattern with numerical marks. Mark value will be multiplied by \code{markscale} to determine physical size. Incompatible with \code{maxsize} and \code{meansize}. Ignored if \code{symap} is given. } \item{zap}{ Fraction between 0 and 1. When \code{x} is a marked point pattern with numerical marks, \code{zap} is the smallest mark value (expressed as a fraction of the maximum possible mark) that will be plotted. Any points which have marks smaller in absolute value than \code{zap * max(abs(marks(x)))} will not be plotted. } \item{show.window}{ Logical value indicating whether to plot the observation window of \code{x}. } \item{show.all}{ Logical value indicating whether to plot everything including the main title and the observation window of \code{x}. } \item{do.plot}{ Logical value determining whether to actually perform the plotting. } \item{multiplot}{ Logical value giving permission to display multiple plots. } } \value{ (Invisible) object of class \code{"symbolmap"} giving the correspondence between mark values and plotting characters. } \details{ This is the \code{plot} method for point pattern datasets (of class \code{"ppp"}, see \code{\link{ppp.object}}). First the observation window \code{Window(x)} is plotted (if \code{show.window=TRUE}). Then the points themselves are plotted, in a fashion that depends on their marks, as follows. \describe{ \item{unmarked point pattern:}{ If the point pattern does not have marks, or if \code{use.marks = FALSE}, then the locations of all points will be plotted using a single plot character } \item{multitype point pattern:}{ If \code{x$marks} is a factor, then each level of the factor is represented by a different plot character. } \item{continuous marks:}{ If \code{x$marks} is a numeric vector, the marks are rescaled to the unit interval and each point is represented by a circle with \emph{diameter} proportional to the rescaled mark (if the value is positive) or a square with \emph{side length} proportional to the absolute value of the rescaled mark (if the value is negative). } \item{other kinds of marks:}{ If \code{x$marks} is neither numeric nor a factor, then each possible mark will be represented by a different plotting character. The default is to represent the \eqn{i}th smallest mark value by \code{points(..., pch=i)}. } } If there are several columns of marks, and if \code{which.marks} is missing or \code{NULL}, then \itemize{ \item if \code{add=FALSE} and \code{multiplot=TRUE} the default is to plot all columns of marks, in a series of separate plots, placed side-by-side. The plotting is coordinated by \code{\link{plot.listof}}, which calls \code{plot.ppp} to make each of the individual plots. \item Otherwise, only one column of marks can be plotted, and the default is \code{which.marks=1} indicating the first column of marks. } Plotting of the window \code{Window(x)} is performed by \code{\link{plot.owin}}. This plot may be modified through the \code{...} arguments. In particular the extra argument \code{border} determines the colour of the window, if the window is not a binary mask. Plotting of the points themselves is performed by the function \code{\link{points}}, except for the case of continuous marks, where it is performed by \code{\link{symbols}}. Their plotting behaviour may be modified through the \code{...} arguments. The argument \code{chars} determines the plotting character or characters used to display the points (in all cases except for the case of continuous marks). For an unmarked point pattern, this should be a single integer or character determining a plotting character (see \code{par("pch")}). For a multitype point pattern, \code{chars} should be a vector of integers or characters, of the same length as \code{levels(x$marks)}, and then the \eqn{i}th level or type will be plotted using character \code{chars[i]}. If \code{chars} is absent, but there is an extra argument \code{pch}, then this will determine the plotting character for all points. The argument \code{cols} determines the colour or colours used to display the points. For an unmarked point pattern, \code{cols} should be a character string determining a colour. For a multitype point pattern, \code{cols} should be a character vector, of the same length as \code{levels(marks(x))}: that is, there is one colour for each possible mark value. The \eqn{i}th level or type will be plotted using colour \code{cols[i]}. For a point pattern with continuous marks, \code{cols} can be either a character string or a character vector specifying colour values: the range of mark values will be mapped to the specified colours. If \code{cols} is absent, the colours used to plot the points may be determined by the extra argument \code{fg} (for multitype point patterns) or the extra argument \code{col} (for all other cases). Note that specifying \code{col} will also apply this colour to the window itself. The default colour for the points is a semi-transparent grey, if this is supported by the plot device. This behaviour can be suppressed (so that the default colour is non-transparent) by setting \code{spatstat.options(transparent=FALSE)}. The arguments \code{maxsize}, \code{meansize} and \code{markscale} incompatible. They control the physical size of the circles and squares which represent the marks in a point pattern with continuous marks. The size of a circle is defined as its \emph{diameter}; the size of a square is its side length. If \code{markscale} is given, then a mark value of \code{m} is plotted as a circle of diameter \code{m * markscale} (if \code{m} is positive) or a square of side \code{abs(m) * markscale} (if \code{m} is negative). If \code{maxsize} is given, then the largest mark in absolute value, \code{mmax=max(abs(marks(x)))}, will be scaled to have physical size \code{maxsize}. If \code{meansize} is given, then the average absolute mark value, \code{mmean=mean(abs(marks(x)))}, will be scaled to have physical size \code{meansize}. The user can set the default values of these plotting parameters using \code{\link{spatstat.options}("par.points")}. To zoom in (to view only a subset of the point pattern at higher magnification), use the graphical arguments \code{xlim} and \code{ylim} to specify the rectangular field of view. The value returned by this plot function is an object of class \code{"symbolmap"} representing the mapping from mark values to graphical symbols. See \code{\link{symbolmap}}. It can be used to make a suitable legend, or to ensure that two plots use the same graphics map. } \section{Removing White Space Around The Plot}{ A frequently-asked question is: How do I remove the white space around the plot? Currently \code{plot.ppp} uses the base graphics system of \R, so the space around the plot is controlled by parameters to \code{\link{par}}. To reduce the white space, change the parameter \code{mar}. Typically, \code{par(mar=rep(0.5, 4))} is adequate, if there are no annotations or titles outside the window. } \seealso{ \code{\link{iplot}}, \code{\link{ppp.object}}, \code{\link{plot}}, \code{\link{par}}, \code{\link{points}}, \code{\link{text.ppp}}, \code{\link{plot.owin}}, \code{\link{symbols}} } \examples{ plot(cells) plot(cells, pch=16) # make the plotting symbols larger (for publication at reduced scale) plot(cells, cex=2) # set it in spatstat.options oldopt <- spatstat.options(par.points=list(cex=2)) plot(cells) spatstat.options(oldopt) # multitype plot(lansing) # marked by a real number plot(longleaf) # just plot the points plot(longleaf, use.marks=FALSE) plot(unmark(longleaf)) # equivalent # point pattern with multiple marks plot(finpines) plot(finpines, which.marks="height") # controlling COLOURS of points plot(cells, cols="blue") plot(lansing, cols=c("black", "yellow", "green", "blue","red","pink")) plot(longleaf, fg="blue") # make window purple plot(lansing, border="purple") # make everything purple plot(lansing, border="purple", cols="purple", col.main="purple", leg.args=list(col.axis="purple")) # controlling PLOT CHARACTERS for multitype pattern plot(lansing, chars = 11:16) plot(lansing, chars = c("o","h","m",".","o","o")) ## multitype pattern mapped to symbols plot(amacrine, shape=c("circles", "squares"), size=0.04) plot(amacrine, shape="arrows", direction=c(0,90), size=0.07) ## plot trees as trees! plot(lansing, shape="arrows", direction=90, cols=1:6) # controlling MARK SCALE for pattern with numeric marks plot(longleaf, markscale=0.1) plot(longleaf, maxsize=5) plot(longleaf, meansize=2) # draw circles of diameter equal to nearest neighbour distance plot(cells \%mark\% nndist(cells), markscale=1, legend=FALSE) # inspecting the symbol map v <- plot(amacrine) v ## variable colours ('cols' not 'col') plot(longleaf, cols=function(x) ifelse(x < 30, "red", "black")) ## re-using the same mark scale a <- plot(longleaf) juveniles <- longleaf[marks(longleaf) < 30] plot(juveniles, symap=a) ## numerical marks mapped to symbols of fixed size with variable colour ra <- range(marks(longleaf)) colmap <- colourmap(terrain.colors(20), range=ra) ## filled plot characters are the codes 21-25 ## fill colour is indicated by 'bg' sy <- symbolmap(pch=21, bg=colmap, range=ra) plot(longleaf, symap=sy) ## or more compactly.. plot(longleaf, bg=terrain.colors(20), pch=21, cex=1) ## clipping plot(humberside) B <- owin(c(4810, 5190), c(4180, 4430)) plot(B, add=TRUE, border="red") plot(humberside, clipwin=B, main="Humberside (clipped)") } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{hplot} spatstat/man/ippm.Rd0000644000176200001440000001340113425751505014132 0ustar liggesusers\name{ippm} \alias{ippm} \title{ Fit Point Process Model Involving Irregular Trend Parameters } \description{ Experimental extension to \code{ppm} which finds optimal values of the irregular trend parameters in a point process model. } \usage{ ippm(Q, \dots, iScore=NULL, start=list(), covfunargs=start, nlm.args=list(stepmax=1/2), silent=FALSE, warn.unused=TRUE) } \arguments{ \item{Q,\dots}{ Arguments passed to \code{\link{ppm}} to fit the point process model. } \item{iScore}{ Optional. A named list of \R functions that compute the partial derivatives of the logarithm of the trend, with respect to each irregular parameter. See Details. } \item{start}{ Named list containing initial values of the irregular parameters over which to optimise. } \item{covfunargs}{ Argument passed to \code{\link{ppm}}. A named list containing values for \emph{all} irregular parameters required by the covariates in the model. Must include all the parameters named in \code{start}. } \item{nlm.args}{ Optional list of arguments passed to \code{\link[stats]{nlm}} to control the optimization algorithm. } \item{silent}{ Logical. Whether to print warnings if the optimization algorithm fails to converge. } \item{warn.unused}{ Logical. Whether to print a warning if some of the parameters in \code{start} are not used in the model. } } \details{ This function is an experimental extension to the point process model fitting command \code{\link{ppm}}. The extension allows the trend of the model to include irregular parameters, which will be maximised by a Newton-type iterative method, using \code{\link[stats]{nlm}}. For the sake of explanation, consider a Poisson point process with intensity function \eqn{\lambda(u)}{\lambda(u)} at location \eqn{u}. Assume that \deqn{ \lambda(u) = \exp(\alpha + \beta Z(u)) \, f(u, \gamma) }{ \lambda(u) = exp(\alpha + \beta * Z(u)) * f(u, \gamma) } where \eqn{\alpha,\beta,\gamma} are parameters to be estimated, \eqn{Z(u)} is a spatial covariate function, and \eqn{f} is some known function. Then the parameters \eqn{\alpha,\beta} are called \emph{regular} because they appear in a loglinear form; the parameter \eqn{\gamma} is called \emph{irregular}. To fit this model using \code{ippm}, we specify the intensity using the \code{trend} formula in the same way as usual for \code{\link{ppm}}. The trend formula is a representation of the log intensity. In the above example the log intensity is \deqn{ \log\lambda(u) = \alpha + \beta Z(u) + \log f(u, \gamma) }{ log(\lambda(u)) = \alpha + \beta * Z(u) + log(f(u, \gamma)) } So the model above would be encoded with the trend formula \code{~Z + offset(log(f))}. Note that the irregular part of the model is an \emph{offset} term, which means that it is included in the log trend as it is, without being multiplied by another regular parameter. The optimisation runs faster if we specify the derivative of \eqn{\log f(u,\gamma)}{log(f(u,\gamma))} with respect to \eqn{\gamma}. We call this the \emph{irregular score}. To specify this, the user must write an \R function that computes the irregular score for any value of \eqn{\gamma} at any location \code{(x,y)}. Thus, to code such a problem, \enumerate{ \item The argument \code{trend} should define the log intensity, with the irregular part as an offset; \item The argument \code{start} should be a list containing initial values of each of the irregular parameters; \item The argument \code{iScore}, if provided, must be a list (with one entry for each entry of \code{start}) of functions with arguments \code{x,y,\dots}, that evaluate the partial derivatives of \eqn{\log f(u,\gamma)}{log(f(u,gamma))} with respect to each irregular parameter. } The coded example below illustrates the model with two irregular parameters \eqn{\gamma,\delta}{gamma,delta} and irregular term \deqn{ f((x,y), (\gamma, \delta)) = 1 + \exp(\gamma - \delta x^3) }{ f((x,y), (\gamma, \delta)) = 1 + \exp(\gamma - \delta * x^3) } Arguments \code{\dots} passed to \code{\link{ppm}} may also include \code{interaction}. In this case the model is not a Poisson point process but a more general Gibbs point process; the trend formula \code{trend} determines the first-order trend of the model (the first order component of the conditional intensity), not the intensity. } \value{ A fitted point process model (object of class \code{"ppm"}) which also belongs to the special class \code{"ippm"}. } \author{\spatstatAuthors.} \seealso{ \code{\link{ppm}}, \code{\link{profilepl}} } \examples{ nd <- 32 \testonly{nd <- 10} gamma0 <- 3 delta0 <- 5 POW <- 3 # Terms in intensity Z <- function(x,y) { -2*y } f <- function(x,y,gamma,delta) { 1 + exp(gamma - delta * x^POW) } # True intensity lamb <- function(x,y,gamma,delta) { 200 * exp(Z(x,y)) * f(x,y,gamma,delta) } # Simulate realisation lmax <- max(lamb(0,0,gamma0,delta0), lamb(1,1,gamma0,delta0)) set.seed(42) X <- rpoispp(lamb, lmax=lmax, win=owin(), gamma=gamma0, delta=delta0) # Partial derivatives of log f DlogfDgamma <- function(x,y, gamma, delta) { topbit <- exp(gamma - delta * x^POW) topbit/(1 + topbit) } DlogfDdelta <- function(x,y, gamma, delta) { topbit <- exp(gamma - delta * x^POW) - (x^POW) * topbit/(1 + topbit) } # irregular score Dlogf <- list(gamma=DlogfDgamma, delta=DlogfDdelta) # fit model ippm(X ~Z + offset(log(f)), covariates=list(Z=Z, f=f), iScore=Dlogf, start=list(gamma=1, delta=1), nlm.args=list(stepmax=1), nd=nd) } \keyword{spatial} \keyword{models} spatstat/man/dilated.areas.Rd0000644000176200001440000000441413333543263015667 0ustar liggesusers\name{dilated.areas} \Rdversion{1.1} \alias{dilated.areas} \title{ Areas of Morphological Dilations } \description{ Computes the areas of successive morphological dilations. } \usage{ dilated.areas(X, r, W=as.owin(X), ..., constrained=TRUE, exact = FALSE) } \arguments{ \item{X}{ Object to be dilated. A point pattern (object of class \code{"ppp"}), a line segment pattern (object of class \code{"psp"}), or a window (object of class \code{"owin"}). } \item{r}{ Numeric vector of radii for the dilations. } \item{W}{ Window (object of class \code{"owin"}) inside which the areas will be computed, if \code{constrained=TRUE}. } \item{\dots}{ Arguments passed to \code{\link{distmap}} to control the pixel resolution, if \code{exact=FALSE}. } \item{constrained}{ Logical flag indicating whether areas should be restricted to the window \code{W}. } \item{exact}{ Logical flag indicating whether areas should be computed using analytic geometry (which is slower but more accurate). Currently available only when \code{X} is a point pattern. } } \details{ This function computes the areas of the dilations of \code{X} by each of the radii \code{r[i]}. Areas may also be computed inside a specified window \code{W}. The morphological dilation of a set \eqn{X} by a distance \eqn{r > 0} is the subset consisting of all points \eqn{x}{x} such that the distance from \eqn{x} to \eqn{X} is less than or equal to \eqn{r}. When \code{X} is a point pattern, the dilation by a distance \eqn{r} is the union of discs of radius \eqn{r} centred at the points of \code{X}. The argument \code{r} should be a vector of nonnegative numbers. If \code{exact=TRUE} and if \code{X} is a point pattern, then the areas are computed using analytic geometry, which is slower but much more accurate. Otherwise the computation is performed using \code{\link{distmap}}. To compute the dilated object itself, use \code{\link{dilation}}. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{dilation}}, \code{\link{eroded.areas}} } \examples{ X <- runifpoint(10) a <- dilated.areas(X, c(0.1,0.2), W=square(1), exact=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/linearKinhom.Rd0000644000176200001440000001326213333543263015610 0ustar liggesusers\name{linearKinhom} \alias{linearKinhom} \title{ Inhomogeneous Linear K Function } \description{ Computes an estimate of the inhomogeneous linear \eqn{K} function for a point pattern on a linear network. } \usage{ linearKinhom(X, lambda=NULL, r=NULL, ..., correction="Ang", normalise=TRUE, normpower=1, update=TRUE, leaveoneout=TRUE, ratio=FALSE) } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{lambda}{ Intensity values for the point pattern. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{ Optional. Numeric vector of values of the function argument \eqn{r}. There is a sensible default. } \item{\dots}{ Ignored. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the data points, raised to \code{normpower}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } \item{normpower}{ Integer (usually either 1 or 2). Normalisation power. See Details. } \item{update}{ Logical value indicating what to do when \code{lambda} is a fitted model (class \code{"lppm"} or \code{"ppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.lppm}} or \code{\link{update.ppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{fitted.lppm}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity, when \code{lambda} is a fitted model. Supported only when \code{update=TRUE}. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of the estimate will also be saved, for use in analysing replicated point patterns. } } \details{ This command computes the inhomogeneous version of the linear \eqn{K} function from point pattern data on a linear network. If \code{lambda = NULL} the result is equivalent to the homogeneous \eqn{K} function \code{\link{linearK}}. If \code{lambda} is given, then it is expected to provide estimated values of the intensity of the point process at each point of \code{X}. The argument \code{lambda} may be a numeric vector (of length equal to the number of points in \code{X}), or a \code{function(x,y)} that will be evaluated at the points of \code{X} to yield numeric values, or a pixel image (object of class \code{"im"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). If \code{lambda} is a fitted point process model, the default behaviour is to update the model by re-fitting it to the data, before computing the fitted intensity. This can be disabled by setting \code{update=FALSE}. If \code{correction="none"}, the calculations do not include any correction for the geometry of the linear network. If \code{correction="Ang"}, the pair counts are weighted using Ang's correction (Ang, 2010). Each estimate is initially computed as \deqn{ \widehat K_{\rm inhom}(r) = \frac{1}{\mbox{length}(L)} \sum_i \sum_j \frac{1\{d_{ij} \le r\} e(x_i,x_j)}{\lambda(x_i)\lambda(x_j)} }{ K^inhom(r)= (1/length(L)) sum[i] sum[j] 1(d[i,j] <= r) * e(x[i],x[j])/(lambda(x[i]) * lambda(x[j])) } where \code{L} is the linear network, \eqn{d_{ij}}{d[i,j]} is the distance between points \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}, and \eqn{e(x_i,x_j)}{e(x[i],x[j])} is a weight. If \code{correction="none"} then this weight is equal to 1, while if \code{correction="Ang"} the weight is \eqn{e(x_i,x_j,r) = 1/m(x_i, d_{ij})}{e(x[i],x[j],r) = 1/m(x[i],d[i,j])} where \eqn{m(u,t)} is the number of locations on the network that lie exactly \eqn{t} units distant from location \eqn{u} by the shortest path. If \code{normalise=TRUE} (the default), then the estimates described above are multiplied by \eqn{c^{\mbox{normpower}}}{c^normpower} where \eqn{ c = \mbox{length}(L)/\sum (1/\lambda(x_i)). }{ c = length(L)/sum[i] (1/lambda(x[i])). } This rescaling reduces the variability and bias of the estimate in small samples and in cases of very strong inhomogeneity. The default value of \code{normpower} is 1 (for consistency with previous versions of \pkg{spatstat}) but the most sensible value is 2, which would correspond to rescaling the \code{lambda} values so that \eqn{ \sum (1/\lambda(x_i)) = \mbox{area}(W). }{ sum[i] (1/lambda(x[i])) = area(W). } } \value{ Function value table (object of class \code{"fv"}). } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \references{ Ang, Q.W. (2010) Statistical methodology for spatial point patterns on a linear network. MSc thesis, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. } \seealso{ \code{\link{lpp}} } \examples{ data(simplenet) X <- rpoislpp(5, simplenet) fit <- lppm(X ~x) K <- linearKinhom(X, lambda=fit) plot(K) } \keyword{spatial} \keyword{nonparametric} spatstat/man/plot.linnet.Rd0000644000176200001440000000232413333543264015434 0ustar liggesusers\name{plot.linnet} \alias{plot.linnet} \title{ Plot a linear network } \description{ Plots a linear network } \usage{ \method{plot}{linnet}(x, ..., main=NULL, add=FALSE, vertices=FALSE, window=FALSE, do.plot=TRUE) } \arguments{ \item{x}{ Linear network (object of class \code{"linnet"}). } \item{\dots}{ Arguments passed to \code{\link{plot.psp}} controlling the plot. } \item{main}{ Main title for plot. Use \code{main=""} to suppress it. } \item{add}{ Logical. If code{TRUE}, superimpose the graphics over the current plot. If \code{FALSE}, generate a new plot. } \item{vertices}{ Logical. Whether to plot the vertices as well. } \item{window}{ Logical. Whether to plot the window containing the linear network. } \item{do.plot}{ Logical. Whether to actually perform the plot. } } \details{ This is the plot method for class \code{"linnet"}. } \value{ An (invisible) object of class \code{"owin"} giving the bounding box of the network. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ \code{\link{linnet}} } \examples{ plot(simplenet) } \keyword{spatial} spatstat/man/ssf.Rd0000644000176200001440000000307013333543264013760 0ustar liggesusers\name{ssf} \alias{ssf} \title{ Spatially Sampled Function } \description{ Create an object that represents a spatial function which has been evaluated or sampled at an irregular set of points. } \usage{ ssf(loc, val) } \arguments{ \item{loc}{ The spatial locations at which the function has been evaluated. A point pattern (object of class \code{"ppp"}). } \item{val}{ The function values at these locations. A numeric vector with one entry for each point of \code{loc}, or a data frame with one row for each point of \code{loc}. } } \details{ An object of class \code{"ssf"} represents a real-valued or vector-valued function that has been evaluated or sampled at an irregular set of points. An example would be a spatial covariate that has only been measured at certain locations. An object of this class also inherits the class \code{"ppp"}, and is essentially the same as a marked point pattern, except for the class membership which enables it to be handled in a different way. There are methods for \code{plot}, \code{print} etc; see \code{\link{plot.ssf}} and \code{\link{methods.ssf}}. Use \code{\link[spatstat]{unmark}} to extract only the point locations, and \code{\link{marks.ssf}} to extract only the function values. } \value{ Object of class \code{"ssf"}. } \author{ \adrian } \seealso{ \code{\link{plot.ssf}}, \code{\link{methods.ssf}}, \code{\link{Smooth.ssf}}, \code{\link{with.ssf}}, \code{\link{[.ssf}}. } \examples{ ssf(cells, nndist(cells, k=1:3)) } \keyword{spatial} \keyword{datagen} spatstat/man/Ops.msr.Rd0000644000176200001440000000300513333543262014522 0ustar liggesusers\name{Ops.msr} \alias{Ops.msr} \title{Arithmetic Operations on Measures} \description{ These group generic methods for the class \code{"msr"} allow the arithmetic operators \code{+}, \code{-}, \code{*} and \code{/} to be applied directly to measures. } \usage{ ## S3 methods for group generics have prototypes: \special{Ops(e1, e2)} %NAMESPACE S3method("Ops", "msr") } \arguments{ \item{e1, e2}{objects of class \code{"msr"}.} } \details{ Arithmetic operators on a measure \code{A} are only defined in some cases. The arithmetic operator is effectively applied to the value of \code{A(W)} for every spatial domain \code{W}. If the result is a measure, then this operation is valid. If \code{A} is a measure (object of class \code{"msr"}) then the operations \code{-A} and \code{+A} are defined. If \code{A} and \code{B} are measures with the same dimension (i.e. both are scalar-valued, or both are \code{k}-dimensional vector-valued) then \code{A + B} and \code{A - B} are defined. If \code{A} is a measure and \code{z} is a numeric value, then \code{A * z} and \code{A / z} are defined, and \code{z * A} is defined. } \value{ Another measure (object of class \code{"msr"}). } \seealso{ \code{\link{with.msr}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") rp -rp 2 * rp rp /2 rp - rp rr <- residuals(fit, type="raw") rp - rr } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} spatstat/man/quadrat.test.splitppp.Rd0000644000176200001440000000362113333543264017460 0ustar liggesusers\name{quadrat.test.splitppp} \alias{quadrat.test.splitppp} \title{Dispersion Test of CSR for Split Point Pattern Based on Quadrat Counts} \description{ Performs a test of Complete Spatial Randomness for each of the component patterns in a split point pattern, based on quadrat counts. By default performs chi-squared tests; can also perform Monte Carlo based tests. } \usage{ \method{quadrat.test}{splitppp}(X, ..., df=NULL, df.est=NULL, Xname=NULL) } \arguments{ \item{X}{ A split point pattern (object of class \code{"splitppp"}), each component of which will be subjected to the goodness-of-fit test. } \item{\dots}{Arguments passed to \code{\link{quadrat.test.ppp}}.} \item{df,df.est,Xname}{Arguments passed to \code{\link{pool.quadrattest}}.} } \details{ The function \code{quadrat.test} is generic, with methods for point patterns (class \code{"ppp"}), split point patterns (class \code{"splitppp"}) and point process models (class \code{"ppm"}). If \code{X} is a split point pattern, then for each of the component point patterns (taken separately) we test the null hypotheses of Complete Spatial Randomness, then combine the result into a single test. The method \code{quadrat.test.ppp} is applied to each component point pattern. Then the results are pooled using \code{\link{pool.quadrattest}} to obtain a single test. } \seealso{ \code{\link{quadrat.test}}, \code{\link{quadratcount}}, \code{\link{quadrats}}, \code{\link{quadratresample}}, \code{\link{chisq.test}}, \code{\link{cdf.test}}. To test a Poisson point process model against a specific Poisson alternative, use \code{\link{anova.ppm}}. } \value{ An object of class \code{"quadrattest"} which can be printed and plotted. } \examples{ data(humberside) qH <- quadrat.test(split(humberside), 2, 3) plot(qH) qH } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} spatstat/man/by.im.Rd0000644000176200001440000000325713333543262014210 0ustar liggesusers\name{by.im} \alias{by.im} \title{Apply Function to Image Broken Down by Factor} \description{ Splits a pixel image into sub-images and applies a function to each sub-image. } \usage{ \method{by}{im}(data, INDICES, FUN, ...) } \arguments{ \item{data}{A pixel image (object of class \code{"im"}).} \item{INDICES}{Grouping variable. Either a tessellation (object of class \code{"tess"}) or a factor-valued pixel image. } \item{FUN}{Function to be applied to each sub-image of \code{data}.} \item{\dots}{Extra arguments passed to \code{FUN}.} } \details{ This is a method for the generic function \code{\link{by}} for pixel images (class \code{"im"}). The pixel image \code{data} is first divided into sub-images according to \code{INDICES}. Then the function \code{FUN} is applied to each subset. The results of each computation are returned in a list. The grouping variable \code{INDICES} may be either \itemize{ \item a tessellation (object of class \code{"tess"}). Each tile of the tessellation delineates a subset of the spatial domain. \item a pixel image (object of class \code{"im"}) with factor values. The levels of the factor determine subsets of the spatial domain. } } \value{ A list containing the results of each evaluation of \code{FUN}. } \seealso{ \code{\link{split.im}}, \code{\link{tess}}, \code{\link{im}} } \examples{ W <- square(1) X <- as.im(function(x,y){sqrt(x^2+y^2)}, W) Y <- dirichlet(runifpoint(12, W)) # mean pixel value in each subset unlist(by(X, Y, mean)) # trimmed mean unlist(by(X, Y, mean, trim=0.05)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{manip} spatstat/man/closepairs.Rd0000644000176200001440000001342413376755257015353 0ustar liggesusers\name{closepairs} \alias{closepairs} \alias{crosspairs} \alias{closepairs.ppp} \alias{crosspairs.ppp} \alias{closepaircounts} \alias{crosspaircounts} \title{ Close Pairs of Points } \description{ Low-level functions to find all close pairs of points. } \usage{ closepaircounts(X, r) crosspaircounts(X, Y, r) closepairs(X, rmax, \dots) \method{closepairs}{ppp}(X, rmax, twice=TRUE, what=c("all","indices","ijd"), distinct=TRUE, neat=TRUE, periodic=FALSE, \dots) crosspairs(X, Y, rmax, \dots) \method{crosspairs}{ppp}(X, Y, rmax, what=c("all", "indices", "ijd"), \dots) } \arguments{ \item{X,Y}{ Point patterns (objects of class \code{"ppp"}). } \item{r,rmax}{ Maximum distance between pairs of points to be counted as close pairs. } \item{twice}{ Logical value indicating whether all ordered pairs of close points should be returned. If \code{twice=TRUE} (the default), each pair will appear twice in the output, as \code{(i,j)} and again as \code{(j,i)}. If \code{twice=FALSE}, then each pair will appear only once, as the pair \code{(i,j)} with \code{i < j}. } \item{what}{ String specifying the data to be returned for each close pair of points. If \code{what="all"} (the default) then the returned information includes the indices \code{i,j} of each pair, their \code{x,y} coordinates, and the distance between them. If \code{what="indices"} then only the indices \code{i,j} are returned. If \code{what="ijd"} then the indices \code{i,j} and the distance \code{d} are returned. } \item{distinct}{ Logical value indicating whether to return only the pairs of points with different indices \code{i} and \code{j} (\code{distinct=TRUE}, the default) or to also include the pairs where \code{i=j} (\code{distinct=FALSE}). } \item{neat}{ Logical value indicating whether to ensure that \code{i < j} in each output pair, when \code{twice=FALSE}. } \item{periodic}{ Logical value indicating whether to use the periodic edge correction. The window of \code{X} should be a rectangle. Opposite pairs of edges of the window will be treated as identical. } \item{\dots}{Extra arguments, ignored by methods.} } \details{ These are the efficient low-level functions used by \pkg{spatstat} to find all close pairs of points in a point pattern or all close pairs between two point patterns. \code{closepaircounts(X,r)} counts the number of neighbours for each point in the pattern \code{X}. That is, for each point \code{X[i]}, it counts the number of other points \code{X[j]} with \code{j != i} such that \code{d(X[i],X[j]) <= r} where \code{d} denotes Euclidean distance. The result is an integer vector \code{v} such that \code{v[i]} is the number of neighbours of \code{X[i]}. \code{crosspaircounts(X,Y,r)} counts, for each point in the pattern \code{X}, the number of neighbours in the pattern \code{Y}. That is, for each point \code{X[i]}, it counts the number of points \code{Y[j]} such that \code{d(X[i],X[j]) <= r}. The result is an integer vector \code{v} such that \code{v[i]} is the number of neighbours of \code{X[i]} in the pattern \code{Y}. \code{closepairs(X,rmax)} identifies all pairs of distinct neighbours in the pattern \code{X} and returns them. The result is a list with the following components: \describe{ \item{i}{Integer vector of indices of the first point in each pair.} \item{j}{Integer vector of indices of the second point in each pair.} \item{xi,yi}{Coordinates of the first point in each pair.} \item{xj,yj}{Coordinates of the second point in each pair.} \item{dx}{Equal to \code{xj-xi}} \item{dy}{Equal to \code{yj-yi}} \item{d}{Euclidean distance between each pair of points.} } If \code{what="indices"} then only the components \code{i} and \code{j} are returned. This is slightly faster and more efficient with use of memory. \code{crosspairs(X,rmax)} identifies all pairs of neighbours \code{(X[i], Y[j])} between the patterns \code{X} and \code{Y}, and returns them. The result is a list with the same format as for \code{closepairs}. } \section{Warning about accuracy}{ The results of these functions may not agree exactly with the correct answer (as calculated by a human) and may not be consistent between different computers and different installations of \R. The discrepancies arise in marginal cases where the interpoint distance is equal to, or very close to, the threshold \code{rmax}. Floating-point numbers in a computer are not mathematical Real Numbers: they are approximations using finite-precision binary arithmetic. The approximation is accurate to a tolerance of about \code{.Machine$double.eps}. If the true interpoint distance \eqn{d} and the threshold \code{rmax} are equal, or if their difference is no more than \code{.Machine$double.eps}, the result may be incorrect. } \value{ For \code{closepaircounts} and \code{crosspaircounts}, an integer vector of length equal to the number of points in \code{X}. For \code{closepairs} and \code{crosspairs}, a list with components \code{i} and \code{j}, and possibly other components as described under Details. } \author{\adrian and \rolf } \seealso{ \code{\link{closepairs.pp3}} for the corresponding functions for 3D point patterns. \code{\link{Kest}}, \code{\link{Kcross}}, \code{\link{nndist}}, \code{\link{nncross}}, \code{\link{applynbd}}, \code{\link{markstat}} for functions which use these capabilities. } \examples{ a <- closepaircounts(cells, 0.1) sum(a) Y <- split(amacrine) b <- crosspaircounts(Y$on, Y$off, 0.1) d <- closepairs(cells, 0.1) e <- crosspairs(Y$on, Y$off, 0.1) } \keyword{spatial} \keyword{math} spatstat/man/polartess.Rd0000644000176200001440000000712113443161042015172 0ustar liggesusers\name{polartess} \alias{polartess} \title{ Tessellation Using Polar Coordinates } \description{ Create a tessellation with tiles defined by polar coordinates (radius and angle). } \usage{ polartess(W, \dots, nradial = NULL, nangular = NULL, radii = NULL, angles = NULL, origin = NULL, sep = "x") } \arguments{ \item{W}{ A window (object of class \code{"owin"}) or anything that can be coerced to a window using \code{\link{as.owin}}, such as a point pattern. } \item{\dots}{ Ignored. } \item{nradial}{ Number of \emph{tiles} in the radial direction. A single integer. Ignored if \code{radii} is given. } \item{nangular}{ Number of \emph{tiles} in the angular coordinate. A single integer. Ignored if \code{angles} is given. } \item{radii}{ The numeric values of the radii, defining the tiles in the radial direction. A numeric vector, of length at least 2, containing nonnegative numbers in increasing order. The value \code{Inf} is permitted. } \item{angles}{ The numeric values of the angles defining the tiles in the angular coordinate. A numeric vector, of length at least 2, in increasing order, containing angles in radians. } \item{origin}{ Location to be used as the origin of the polar coordinates. Either a numeric vector of length 2 giving the spatial location of the origin, or one of the strings \code{"centroid"}, \code{"midpoint"}, \code{"left"}, \code{"right"}, \code{"top"}, \code{"bottom"}, \code{"topleft"}, \code{"bottomleft"}, \code{"topright"} or \code{"bottomright"} indicating the location in the window. } \item{sep}{ Argument passed to \code{\link{intersect.tess}} specifying the character string to be used as a separator when forming the names of the tiles. } } \details{ A tessellation will be formed from tiles defined by intervals in the polar coordinates \eqn{r} (radial distance from the origin) or \eqn{\theta} (angle from the horizontal axis) or both. These tiles look like the cells on a dartboard. If the argument \code{radii} is given, tiles will be demarcated by circles centred at the origin, with the specified radii. If \code{radii} is absent but \code{nradial} is given, then \code{radii} will default to a sequence of \code{nradial+1} radii equally spaced from zero to the maximum possible radius. If neither \code{radii} nor \code{nradial} are given, the tessellation will not include circular arc boundaries. If the argument \code{angles} is given, tiles will be demarcated by lines emanating from the origin at the specified angles. The angular values can be any real numbers; they will be interpreted as angles in radians modulo \code{2*pi}, but they must be an increasing sequence of numbers. If \code{angles} is absent but \code{nangular} is given, then \code{angles} will default to a sequence of \code{nangular+1} angles equally spaced from 0 to \code{2*pi}. If neither \code{angles} nor \code{nangular} are given, the tessellation will not include linear boundaries. } \value{ A tessellation (object of class \code{"tess"}). } \author{ \adrian. } \seealso{ \code{\link{intersect.tess}} To construct other kinds of tessellations, see \code{\link{tess}}, \code{\link{quadrats}}, \code{\link{hextess}}, \code{\link{venn.tess}}, \code{\link{dirichlet}}, \code{\link{delaunay}}, \code{\link{quantess}} and \code{\link{rpoislinetess}}. } \examples{ Y <- c(2.8, 1.5) plot(polartess(letterR, nangular=6, radii=(0:4)/2, origin=Y), do.col=TRUE) } \keyword{spatial} \keyword{manip} spatstat/man/markconnect.Rd0000644000176200001440000001460413617232327015476 0ustar liggesusers\name{markconnect} \alias{markconnect} \title{ Mark Connection Function } \description{ Estimate the marked connection function of a multitype point pattern. } \usage{ markconnect(X, i, j, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", \dots, normalise=FALSE) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the mark connection function \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}. It specifies the edge correction(s) to be applied. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to \code{\link{markcorr}}, or passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{normalise}{ If \code{TRUE}, normalise the pair connection function by dividing it by \eqn{p_i p_j}{p[i]*p[j]}, the estimated probability that randomly-selected points will have marks \eqn{i} and \eqn{j}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the mark connection function \eqn{p_{ij}(r)}{p[i,j](r)} has been estimated } \item{theo}{the theoretical value of \eqn{p_{ij}(r)}{p[i,j](r)} when the marks attached to different points are independent } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{p_{ij}(r)}{p[i,j](r)} obtained by the edge corrections named. } \details{ The mark connection function \eqn{p_{ij}(r)}{p[i,j](r)} of a multitype point process \eqn{X} is a measure of the dependence between the types of two points of the process a distance \eqn{r} apart. Informally \eqn{p_{ij}(r)}{p[i,j](r)} is defined as the conditional probability, given that there is a point of the process at a location \eqn{u} and another point of the process at a location \eqn{v} separated by a distance \eqn{||u-v|| = r}, that the first point is of type \eqn{i} and the second point is of type \eqn{j}. See Stoyan and Stoyan (1994). If the marks attached to the points of \code{X} are independent and identically distributed, then \eqn{p_{ij}(r) \equiv p_i p_j}{p[i,j](r) = p[i]p[j]} where \eqn{p_i}{p[i]} denotes the probability that a point is of type \eqn{i}. Values larger than this, \eqn{p_{ij}(r) > p_i p_j}{p[i,j](r) > p[i]p[j]}, indicate positive association between the two types, while smaller values indicate negative association. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a multitype point pattern (a marked point pattern with factor-valued marks). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{p_{ij}(r)}{p[i,j](r)} is estimated. There is a sensible default. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}. The edge corrections implemented here are \describe{ \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is implemented only for rectangular and polygonal windows (not for binary masks) and is slow for complicated polygons. } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries. } \item{none}{No edge correction.} } The option \code{correction="none"} should only be used if the number of data points is extremely large (otherwise an edge correction is needed to correct bias). Note that the estimator assumes the process is stationary (spatially homogeneous). The mark connection function is estimated using density estimation techniques. The user can choose between \describe{ \item{\code{"density"}}{ which uses the standard kernel density estimation routine \code{\link{density}}, and works only for evenly-spaced \code{r} values; } \item{\code{"loess"}}{ which uses the function \code{loess} in the package \pkg{modreg}; } \item{\code{"sm"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is extremely slow; } \item{\code{"smrep"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is relatively fast, but may require manual control of the smoothing parameter \code{hmult}. } } } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ Multitype pair correlation \code{\link{pcfcross}} and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}}. Use \code{\link{alltypes}} to compute the mark connection functions between all pairs of types. Mark correlation \code{\link{markcorr}} and mark variogram \code{\link{markvario}} for numeric-valued marks. } \examples{ # Hughes' amacrine data # Cells marked as 'on'/'off' data(amacrine) M <- markconnect(amacrine, "on", "off") plot(M) # Compute for all pairs of types at once plot(alltypes(amacrine, markconnect)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/Emark.Rd0000644000176200001440000001426013521235726014227 0ustar liggesusers\name{Emark} \alias{Emark} \alias{Vmark} \title{ Diagnostics for random marking } \description{ Estimate the summary functions \eqn{E(r)} and \eqn{V(r)} for a marked point pattern, proposed by Schlather et al (2004) as diagnostics for dependence between the points and the marks. } \usage{ Emark(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", \dots, normalise=FALSE) Vmark(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", \dots, normalise=FALSE) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. The pattern should have numeric marks. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the function \eqn{E(r)} or \eqn{V(r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}. It specifies the edge correction(s) to be applied. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{normalise}{ If\code{TRUE}, normalise the estimate of \eqn{E(r)} or \eqn{V(r)} so that it would have value equal to 1 if the marks are independent of the points. } } \value{ If \code{marks(X)} is a numeric vector, the result is an object of class \code{"fv"} (see \code{\link{fv.object}}). If \code{marks(X)} is a data frame, the result is a list of objects of class \code{"fv"}, one for each column of marks. An object of class \code{"fv"} is essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{E(r)} or \eqn{V(r)} has been estimated } \item{theo}{the theoretical, constant value of \eqn{E(r)} or \eqn{V(r)} when the marks attached to different points are independent } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{E(r)} or \eqn{V(r)} obtained by the edge corrections named. } \details{ For a marked point process, Schlather et al (2004) defined the functions \eqn{E(r)} and \eqn{V(r)} to be the conditional mean and conditional variance of the mark attached to a typical random point, given that there exists another random point at a distance \eqn{r} away from it. More formally, \deqn{ E(r) = E_{0u}[M(0)] }{ E(r) = E[0u] M(0) } and \deqn{ V(r) = E_{0u}[(M(0)-E(u))^2] }{ V(r) = E[0u]((M(0)-E(u))^2) } where \eqn{E_{0u}}{E[0u]} denotes the conditional expectation given that there are points of the process at the locations \eqn{0} and \eqn{u} separated by a distance \eqn{r}, and where \eqn{M(0)} denotes the mark attached to the point \eqn{0}. These functions may serve as diagnostics for dependence between the points and the marks. If the points and marks are independent, then \eqn{E(r)} and \eqn{V(r)} should be constant (not depending on \eqn{r}). See Schlather et al (2004). The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern with numeric marks. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{k_f(r)}{k[f](r)} is estimated. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}. The edge corrections implemented here are \describe{ \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is implemented only for rectangular and polygonal windows (not for binary masks). } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries, but slow for complex windows. } } Note that the estimator assumes the process is stationary (spatially homogeneous). The numerator and denominator of the mark correlation function (in the expression above) are estimated using density estimation techniques. The user can choose between \describe{ \item{\code{"density"}}{ which uses the standard kernel density estimation routine \code{\link{density}}, and works only for evenly-spaced \code{r} values; } \item{\code{"loess"}}{ which uses the function \code{loess} in the package \pkg{modreg}; } \item{\code{"sm"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is extremely slow; } \item{\code{"smrep"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is relatively fast, but may require manual control of the smoothing parameter \code{hmult}. } } } \references{ Schlather, M. and Ribeiro, P. and Diggle, P. (2004) Detecting dependence between marks and locations of marked point processes. \emph{Journal of the Royal Statistical Society, series B} \bold{66} (2004) 79-83. } \seealso{ Mark correlation \code{\link{markcorr}}, mark variogram \code{\link{markvario}} for numeric marks. Mark connection function \code{\link{markconnect}} and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}} for factor-valued marks. } \examples{ plot(Emark(spruces)) E <- Emark(spruces, method="density", kernel="epanechnikov") plot(Vmark(spruces)) plot(Emark(finpines)) V <- Vmark(finpines) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/quadrats.Rd0000644000176200001440000000612413433744646015024 0ustar liggesusers\name{quadrats} \alias{quadrats} \title{Divide Region into Quadrats} \description{ Divides window into rectangular quadrats and returns the quadrats as a tessellation. } \usage{ quadrats(X, nx = 5, ny = nx, xbreaks = NULL, ybreaks = NULL, keepempty=FALSE) } \arguments{ \item{X}{ A window (object of class \code{"owin"}) or anything that can be coerced to a window using \code{\link{as.owin}}, such as a point pattern. } \item{nx,ny}{ Numbers of quadrats in the \eqn{x} and \eqn{y} directions. Incompatible with \code{xbreaks} and \code{ybreaks}. } \item{xbreaks}{ Numeric vector giving the \eqn{x} coordinates of the boundaries of the quadrats. Incompatible with \code{nx}. } \item{ybreaks}{ Numeric vector giving the \eqn{y} coordinates of the boundaries of the quadrats. Incompatible with \code{ny}. } \item{keepempty}{ Logical value indicating whether to delete or retain empty quadrats. See Details. } } \details{ If the window \code{X} is a rectangle, it is divided into an \code{nx * ny} grid of rectangular tiles or `quadrats'. If \code{X} is not a rectangle, then the bounding rectangle of \code{X} is first divided into an \code{nx * ny} grid of rectangular tiles, and these tiles are then intersected with the window \code{X}. The resulting tiles are returned as a tessellation (object of class \code{"tess"}) which can be plotted and used in other analyses. If \code{xbreaks} is given, it should be a numeric vector giving the \eqn{x} coordinates of the quadrat boundaries. If it is not given, it defaults to a sequence of \code{nx+1} values equally spaced over the range of \eqn{x} coordinates in the window \code{Window(X)}. Similarly if \code{ybreaks} is given, it should be a numeric vector giving the \eqn{y} coordinates of the quadrat boundaries. It defaults to a vector of \code{ny+1} values equally spaced over the range of \eqn{y} coordinates in the window. The lengths of \code{xbreaks} and \code{ybreaks} may be different. By default (if \code{keepempty=FALSE}), any rectangular tile which does not intersect the window \code{X} is ignored, and only the non-empty intersections are treated as quadrats, so the tessellation may consist of fewer than \code{nx * ny} tiles. If \code{keepempty=TRUE}, empty intersections are retained, and the tessellation always contains exactly \code{nx * ny} tiles, some of which may be empty. } \value{ A tessellation (object of class \code{"tess"}) as described under \code{\link{tess}}. } \examples{ W <- square(10) Z <- quadrats(W, 4, 5) plot(Z) data(letterR) plot(quadrats(letterR, 5, 7)) } \seealso{ For calculations using quadrats, see \code{\link{quadratcount}}, \code{\link{quadrat.test}}, \code{\link{quadratresample}} For other kinds of tessellations, see \code{\link{tess}}, \code{\link{hextess}}, \code{\link{venn.tess}}, \code{\link{polartess}}, \code{\link{dirichlet}}, \code{\link{delaunay}}, \code{\link{rpoislinetess}} and \code{\link{quantess}}. } \author{\adrian and \rolf } \keyword{utilities} \keyword{datagen} spatstat/man/quadratcount.Rd0000644000176200001440000001357413333543264015711 0ustar liggesusers\name{quadratcount} \alias{quadratcount} \alias{quadratcount.ppp} \alias{quadratcount.splitppp} \title{Quadrat counting for a point pattern} \description{ Divides window into quadrats and counts the numbers of points in each quadrat. } \usage{ quadratcount(X, \dots) \method{quadratcount}{ppp}(X, nx=5, ny=nx, \dots, xbreaks=NULL, ybreaks=NULL, tess=NULL) \method{quadratcount}{splitppp}(X, \dots) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) or a split point pattern (object of class \code{"splitppp"}). } \item{nx,ny}{ Numbers of rectangular quadrats in the \eqn{x} and \eqn{y} directions. Incompatible with \code{xbreaks} and \code{ybreaks}. } \item{\dots}{Additional arguments passed to \code{quadratcount.ppp}.} \item{xbreaks}{ Numeric vector giving the \eqn{x} coordinates of the boundaries of the rectangular quadrats. Incompatible with \code{nx}. } \item{ybreaks}{ Numeric vector giving the \eqn{y} coordinates of the boundaries of the rectangular quadrats. Incompatible with \code{ny}. } \item{tess}{ Tessellation (object of class \code{"tess"} or something acceptable to \code{\link{as.tess}}) determining the quadrats. Incompatible with \code{nx,ny,xbreaks,ybreaks}. } } \value{ The value of \code{quadratcount.ppp} is a contingency table containing the number of points in each quadrat. The table is also an object of the special class \code{"quadratcount"} and there is a plot method for this class. The value of \code{quadratcount.splitppp} is a list of such contingency tables, each containing the quadrat counts for one of the component point patterns in \code{X}. This list also has the class \code{"solist"} which has print and plot methods. } \details{ Quadrat counting is an elementary technique for analysing spatial point patterns. See Diggle (2003). \bold{If \code{X} is a point pattern}, then by default, the window containing the point pattern \code{X} is divided into an \code{nx * ny} grid of rectangular tiles or `quadrats'. (If the window is not a rectangle, then these tiles are intersected with the window.) The number of points of \code{X} falling in each quadrat is counted. These numbers are returned as a contingency table. If \code{xbreaks} is given, it should be a numeric vector giving the \eqn{x} coordinates of the quadrat boundaries. If it is not given, it defaults to a sequence of \code{nx+1} values equally spaced over the range of \eqn{x} coordinates in the window \code{Window(X)}. Similarly if \code{ybreaks} is given, it should be a numeric vector giving the \eqn{y} coordinates of the quadrat boundaries. It defaults to a vector of \code{ny+1} values equally spaced over the range of \eqn{y} coordinates in the window. The lengths of \code{xbreaks} and \code{ybreaks} may be different. Alternatively, quadrats of any shape may be used. The argument \code{tess} can be a tessellation (object of class \code{"tess"}) whose tiles will serve as the quadrats. The algorithm counts the number of points of \code{X} falling in each quadrat, and returns these counts as a contingency table. The return value is a \code{table} which can be printed neatly. The return value is also a member of the special class \code{"quadratcount"}. Plotting the object will display the quadrats, annotated by their counts. See the examples. To perform a chi-squared test based on the quadrat counts, use \code{\link{quadrat.test}}. To calculate an estimate of intensity based on the quadrat counts, use \code{\link{intensity.quadratcount}}. To extract the quadrats used in a \code{quadratcount} object, use \code{\link{as.tess}}. \bold{If \code{X} is a split point pattern} (object of class \code{"splitppp"} then quadrat counting will be performed on each of the components point patterns, and the resulting contingency tables will be returned in a list. This list can be printed or plotted. Marks attached to the points are ignored by \code{quadratcount.ppp}. To obtain a separate contingency table for each type of point in a multitype point pattern, first separate the different points using \code{\link{split.ppp}}, then apply \code{quadratcount.splitppp}. See the Examples. } \note{ To perform a chi-squared test based on the quadrat counts, use \code{\link{quadrat.test}}. } \section{Warning}{ If \code{Q} is the result of \code{quadratcount} using rectangular tiles, then \code{as.numeric(Q)} extracts the counts \bold{in the wrong order}. To obtain the quadrat counts in the same order as the tiles of the corresponding tessellation would be listed, use \code{as.vector(t(Q))}, which works in all cases. } \seealso{ \code{\link{plot.quadratcount}}, \code{\link{intensity.quadratcount}}, \code{\link{quadrats}}, \code{\link{quadrat.test}}, \code{\link{tess}}, \code{\link{hextess}}, \code{\link{quadratresample}}, \code{\link{miplot}} } \references{ Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 2003. Stoyan, D. and Stoyan, H. (1994) \emph{Fractals, random shapes and point fields: methods of geometrical statistics.} John Wiley and Sons. } \examples{ X <- runifpoint(50) quadratcount(X) quadratcount(X, 4, 5) quadratcount(X, xbreaks=c(0, 0.3, 1), ybreaks=c(0, 0.4, 0.8, 1)) qX <- quadratcount(X, 4, 5) # plotting: plot(X, pch="+") plot(qX, add=TRUE, col="red", cex=1.5, lty=2) # irregular window data(humberside) plot(humberside) qH <- quadratcount(humberside, 2, 3) plot(qH, add=TRUE, col="blue", cex=1.5, lwd=2) # multitype - split plot(quadratcount(split(humberside), 2, 3)) # quadrats determined by tessellation: B <- dirichlet(runifpoint(6)) qX <- quadratcount(X, tess=B) plot(X, pch="+") plot(qX, add=TRUE, col="red", cex=1.5, lty=2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/clusterfit.Rd0000644000176200001440000001306013571674202015352 0ustar liggesusers\name{clusterfit} \alias{clusterfit} \title{Fit Cluster or Cox Point Process Model via Minimum Contrast} \description{ Fit a homogeneous or inhomogeneous cluster process or Cox point process model to a point pattern by the Method of Minimum Contrast. } \usage{ clusterfit(X, clusters, lambda = NULL, startpar = NULL, \dots, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ctrl=list(q=q, p=p, rmin=rmin, rmax=rmax), statistic = NULL, statargs = NULL, algorithm="Nelder-Mead", verbose=FALSE) } \arguments{ \item{X}{ Data to which the cluster or Cox model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{clusters}{ Character string determining the cluster or Cox model. Partially matched. Options are \code{"Thomas"}, \code{"MatClust"}, \code{"Cauchy"}, \code{"VarGamma"} and \code{"LGCP"}. } \item{lambda}{ Optional. An estimate of the intensity of the point process. Either a single numeric specifying a constant intensity, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{startpar}{ Vector of initial values of the parameters of the point process mode. If \code{X} is a point pattern sensible defaults are used. Otherwise rather arbitrary values are used. } \item{q,p}{ Optional. Exponents for the contrast criterion. See \code{\link{mincontrast}}. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. See \code{\link{mincontrast}}. } \item{ctrl}{ Optional. Named list containing values of the parameters \code{q,p,rmin,rmax}. } \item{\dots}{ Additional arguments passed to \code{\link{mincontrast}.} } \item{statistic}{ Optional. Name of the summary statistic to be used for minimum contrast estimation: either \code{"K"} or \code{"pcf"}. } \item{statargs}{ Optional list of arguments to be used when calculating the \code{statistic}. See Details. } \item{algorithm}{ Character string determining the mathematical optimisation algorithm to be used by \code{\link[stats]{optim}}. See the argument \code{method} of \code{\link[stats]{optim}}. } \item{verbose}{ Logical value indicating whether to print detailed progress reports for debugging purposes. } } \details{ This function fits the clustering parameters of a cluster or Cox point process model by the Method of Minimum Contrast, that is, by matching the theoretical \eqn{K}-function of the model to the empirical \eqn{K}-function of the data, as explained in \code{\link{mincontrast}}. If \code{statistic="pcf"} (or \code{X} appears to be an estimated pair correlation function) then instead of using the \eqn{K}-function, the algorithm will use the pair correlation function. If \code{X} is a point pattern of class \code{"ppp"} an estimate of the summary statistic specfied by \code{statistic} (defaults to \code{"K"}) is first computed before minimum contrast estimation is carried out as described above. In this case the argument \code{statargs} can be used for controlling the summary statistic estimation. The precise algorithm for computing the summary statistic depends on whether the intensity specification (\code{lambda}) is: \describe{ \item{homogeneous:}{ If \code{lambda} is \code{NUll} or a single numeric the pattern is considered homogeneous and either \code{\link{Kest}} or \code{\link{pcf}} is invoked. In this case \code{lambda} is \bold{not} used for anything when estimating the summary statistic. } \item{inhomogeneous:}{ If \code{lambda} is a pixel image (object of class \code{"im"}), a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}) or a \code{function(x,y)} the pattern is considered inhomogeneous. In this case either \code{\link{Kinhom}} or \code{\link{pcfinhom}} is invoked with \code{lambda} as an argument. } } After the clustering parameters of the model have been estimated by minimum contrast \code{lambda} (if non-null) is used to compute the additional model parameter \eqn{\mu}{\mu}. The algorithm parameters \code{q,p,rmax,rmin} are described in the help for \code{\link{mincontrast}}. They may be provided either as individually-named arguments, or as entries in the list \code{ctrl}. The individually-named arguments \code{q,p,rmax,rmin} override the entries in the list \code{ctrl}. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. See \code{\link{mincontrast}}. } \references{ Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007). An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63} (2007) 252--258. } \author{ \spatstatAuthors. } \seealso{ \code{\link{kppm}} } \examples{ fit <- clusterfit(redwood, "Thomas") fit if(interactive()){ plot(fit) } K <- Kest(redwood) fit2 <- clusterfit(K, "MatClust") } \keyword{spatial} \keyword{models} spatstat/man/localKinhom.Rd0000644000176200001440000001327513503620205015423 0ustar liggesusers\name{localKinhom} \alias{localKinhom} \alias{localLinhom} \title{Inhomogeneous Neighbourhood Density Function} \description{ Computes spatially-weighted versions of the the local \eqn{K}-function or \eqn{L}-function. } \usage{ localKinhom(X, lambda, ..., rmax = NULL, correction = "Ripley", verbose = TRUE, rvalue=NULL, sigma = NULL, varcov = NULL, update=TRUE, leaveoneout=TRUE) localLinhom(X, lambda, ..., rmax = NULL, correction = "Ripley", verbose = TRUE, rvalue=NULL, sigma = NULL, varcov = NULL, update=TRUE, leaveoneout=TRUE) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{ Extra arguments. Ignored if \code{lambda} is present. Passed to \code{\link{density.ppp}} if \code{lambda} is omitted. } \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{correction}{ String specifying the edge correction to be applied. Options are \code{"none"}, \code{"translate"}, \code{"Ripley"}, \code{"translation"}, \code{"isotropic"} or \code{"best"}. Only one correction may be specified. } \item{verbose}{Logical flag indicating whether to print progress reports during the calculation. } \item{rvalue}{Optional. A \emph{single} value of the distance argument \eqn{r} at which the function L or K should be computed. } \item{sigma, varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the kernel smoothing procedure for estimating \code{lambda}, if \code{lambda} is missing. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } \item{update}{ Logical value indicating what to do when \code{lambda} is a fitted model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } } \details{ The functions \code{localKinhom} and \code{localLinhom} are inhomogeneous or weighted versions of the neighbourhood density function implemented in \code{\link{localK}} and \code{\link{localL}}. Given a spatial point pattern \code{X}, the inhomogeneous neighbourhood density function \eqn{L_i(r)}{L[i](r)} associated with the \eqn{i}th point in \code{X} is computed by \deqn{ L_i(r) = \sqrt{\frac 1 \pi \sum_j \frac{e_{ij}}{\lambda_j}} }{ L[i](r) = sqrt( (1/pi) * sum[j] e[i,j]/lambda[j]) } where the sum is over all points \eqn{j \neq i}{j != i} that lie within a distance \eqn{r} of the \eqn{i}th point, \eqn{\lambda_j}{\lambda[j]} is the estimated intensity of the point pattern at the point \eqn{j}, and \eqn{e_{ij}}{e[i,j]} is an edge correction term (as described in \code{\link{Kest}}). The value of \eqn{L_i(r)}{L[i](r)} can also be interpreted as one of the summands that contributes to the global estimate of the inhomogeneous L function (see \code{\link{Linhom}}). By default, the function \eqn{L_i(r)}{L[i](r)} or \eqn{K_i(r)}{K[i](r)} is computed for a range of \eqn{r} values for each point \eqn{i}. The results are stored as a function value table (object of class \code{"fv"}) with a column of the table containing the function estimates for each point of the pattern \code{X}. Alternatively, if the argument \code{rvalue} is given, and it is a single number, then the function will only be computed for this value of \eqn{r}, and the results will be returned as a numeric vector, with one entry of the vector for each point of the pattern \code{X}. } \value{ If \code{rvalue} is given, the result is a numeric vector of length equal to the number of points in the point pattern. If \code{rvalue} is absent, the result is an object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the neighbourhood density function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point. The last two columns contain the \code{r} and \code{theo} values. } \seealso{ \code{\link{Kinhom}}, \code{\link{Linhom}}, \code{\link{localK}}, \code{\link{localL}}. } \examples{ data(ponderosa) X <- ponderosa # compute all the local L functions L <- localLinhom(X) # plot all the local L functions against r plot(L, main="local L functions for ponderosa", legend=FALSE) # plot only the local L function for point number 7 plot(L, iso007 ~ r) # compute the values of L(r) for r = 12 metres L12 <- localL(X, rvalue=12) } \author{ Mike Kuhn, \adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/rmhmodel.ppm.Rd0000644000176200001440000001102013333543264015561 0ustar liggesusers\name{rmhmodel.ppm} \alias{rmhmodel.ppm} \title{Interpret Fitted Model for Metropolis-Hastings Simulation.} \description{ Converts a fitted point process model into a format that can be used to simulate the model by the Metropolis-Hastings algorithm. } \usage{ \method{rmhmodel}{ppm}(model, w, ..., verbose=TRUE, project=TRUE, control=rmhcontrol(), new.coef=NULL) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{w}{ Optional. Window in which the simulations should be generated. } \item{\dots}{Ignored.} \item{verbose}{ Logical flag indicating whether to print progress reports while the model is being converted. } \item{project}{Logical flag indicating what to do if the fitted model does not correspond to a valid point process. See Details.} \item{control}{ Parameters determining the iterative behaviour of the simulation algorithm. Passed to \code{\link{rmhcontrol}}. } \item{new.coef}{ New values for the canonical parameters of the model. A numeric vector of the same length as \code{coef(model)}. } } \value{ An object of class \code{"rmhmodel"}, which is essentially a list of parameter values for the model. There is a \code{print} method for this class, which prints a sensible description of the model chosen. } \details{ The generic function \code{\link{rmhmodel}} takes a description of a point process model in some format, and converts it into an object of class \code{"rmhmodel"} so that simulations of the model can be generated using the Metropolis-Hastings algorithm \code{\link{rmh}}. This function \code{rmhmodel.ppm} is the method for the class \code{"ppm"} of fitted point process models. The argument \code{model} should be a fitted point process model (object of class \code{"ppm"}) typically obtained from the model-fitting function \code{\link{ppm}}. This will be converted into an object of class \code{"rmhmodel"}. The optional argument \code{w} specifies the window in which the pattern is to be generated. If specified, it must be in a form which can be coerced to an object of class \code{owin} by \code{\link{as.owin}}. Not all fitted point process models obtained from \code{\link{ppm}} can be simulated. We have not yet implemented simulation code for the \code{\link{LennardJones}} and \code{\link{OrdThresh}} models. It is also possible that a fitted point process model obtained from \code{\link{ppm}} may not correspond to a valid point process. For example a fitted model with the \code{\link{Strauss}} interpoint interaction may have any value of the interaction parameter \eqn{\gamma}{gamma}; however the Strauss process is not well-defined for \eqn{\gamma > 1}{gamma > 1} (Kelly and Ripley, 1976). The argument \code{project} determines what to do in such cases. If \code{project=FALSE}, a fatal error will occur. If \code{project=TRUE}, the fitted model parameters will be adjusted to the nearest values which do correspond to a valid point process. For example a Strauss process with \eqn{\gamma > 1}{gamma > 1} will be projected to a Strauss process with \eqn{\gamma = 1}{gamma = 1}, equivalent to a Poisson process. } \references{ Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. Kelly, F.P. and Ripley, B.D. (1976) On Strauss's model for clustering. \emph{Biometrika} \bold{63}, 357--360. } \seealso{ \code{\link{rmhmodel}}, \code{\link{rmhmodel.list}}, \code{\link{rmhmodel.default}}, \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{ppm}}, \rmhInteractionsList. } \examples{ fit1 <- ppm(cells ~1, Strauss(0.07)) mod1 <- rmhmodel(fit1) fit2 <- ppm(cells ~x, Geyer(0.07, 2)) mod2 <- rmhmodel(fit2) fit3 <- ppm(cells ~x, Hardcore(0.07)) mod3 <- rmhmodel(fit3) # Then rmh(mod1), etc } \author{ \adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/as.hyperframe.ppx.Rd0000644000176200001440000000452513333543262016543 0ustar liggesusers\name{as.hyperframe.ppx} \Rdversion{1.1} \alias{as.hyperframe.ppx} \alias{as.data.frame.ppx} \alias{as.matrix.ppx} \title{ Extract coordinates and marks of multidimensional point pattern } \description{ Given any kind of spatial or space-time point pattern, extract the coordinates and marks of the points. } \usage{ \method{as.hyperframe}{ppx}(x, ...) \method{as.data.frame}{ppx}(x, ...) \method{as.matrix}{ppx}(x, ...) } \arguments{ \item{x}{ A general multidimensional space-time point pattern (object of class \code{"ppx"}). } \item{\dots}{ Ignored. } } \details{ An object of class \code{"ppx"} (see \code{\link{ppx}}) represents a marked point pattern in multidimensional space and/or time. There may be any number of spatial coordinates, any number of temporal coordinates, and any number of mark variables. The individual marks may be atomic (numeric values, factor values, etc) or objects of any kind. The function \code{as.hyperframe.ppx} extracts the coordinates and the marks as a \code{"hyperframe"} (see \code{\link{hyperframe}}) with one row of data for each point in the pattern. This is a method for the generic function \code{\link{as.hyperframe}}. The function \code{as.data.frame.ppx} discards those mark variables which are not atomic values, and extracts the coordinates and the remaining marks as a \code{data.frame} with one row of data for each point in the pattern. This is a method for the generic function \code{\link{as.data.frame}}. Finally \code{as.matrix(x)} is equivalent to \code{as.matrix(as.data.frame(x))} for an object of class \code{"ppx"}. Be warned that, if there are any columns of non-numeric data (i.e. if there are mark variables that are factors), the result will be a matrix of character values. } \value{ A \code{hyperframe}, \code{data.frame} or \code{matrix} as appropriate. } \author{\adrian and \rolf } \seealso{ \code{\link{ppx}}, \code{\link{hyperframe}}, \code{\link{as.hyperframe}}. } \examples{ df <- data.frame(x=runif(4),y=runif(4),t=runif(4)) X <- ppx(data=df, coord.type=c("s","s","t")) as.data.frame(X) val <- runif(4) E <- lapply(val, function(s) { rpoispp(s) }) hf <- hyperframe(t=val, e=as.listof(E)) Z <- ppx(data=hf, domain=c(0,1)) as.hyperframe(Z) as.data.frame(Z) } \keyword{spatial} \keyword{manip} spatstat/man/lineardisc.Rd0000644000176200001440000000764113577325565015326 0ustar liggesusers\name{lineardisc} \alias{lineardisc} \alias{countends} \title{ Compute Disc of Given Radius in Linear Network } \description{ Computes the \sQuote{disc} of given radius and centre in a linear network. } \usage{ lineardisc(L, x = locator(1), r, plotit = TRUE, cols=c("blue", "red","green"), add=TRUE) countends(L, x = locator(1), r, toler=NULL, internal=list()) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}). } \item{x}{ Location of centre of disc. Either a point pattern (object of class \code{"ppp"}) containing exactly 1 point, or a numeric vector of length 2. } \item{r}{ Radius of disc. } \item{plotit}{ Logical. Whether to plot the disc. } \item{add}{ Logical. If \code{add=TRUE} (the default), the disc will be plotted on the current plot frame. If \code{add=FALSE}, a new plot frame will be started, the entire network will be displayed, and then the disc will be plotted over this. } \item{cols}{ Colours for plotting the disc. A numeric or character vector of length 3 specifying the colours of the disc centre, disc lines and disc endpoints respectively. } \item{toler}{ Optional. Distance threshold for \code{countends}. See Details. There is a sensible default. } \item{internal}{Argument for internal use by the package.} } \details{ The \sQuote{disc} \eqn{B(u,r)} of centre \eqn{x} and radius \eqn{r} in a linear network \eqn{L} is the set of all points \eqn{u} in \eqn{L} such that the shortest path distance from \eqn{x} to \eqn{u} is less than or equal to \eqn{r}. This is a union of line segments contained in \eqn{L}. The \emph{relative boundary} of the disc \eqn{B(u,r)} is the set of points \eqn{v} such that the shortest path distance from \eqn{x} to \eqn{u} is \emph{equal} to \eqn{r}. The function \code{lineardisc} computes the disc of radius \eqn{r} and its relative boundary, optionally plots them, and returns them. The faster function \code{countends} simply counts the number of points in the relative boundary. Note that \code{countends} requires the linear network \code{L} to be given in the non-sparse matrix format (see the argument \code{sparse} in \code{\link{linnet}} or \code{\link{as.linnet}}) while \code{lineardisc} accepts both sparse and non-sparse formats. The optional threshold \code{toler} is used to suppress numerical errors in \code{countends}. If the distance from \eqn{u} to a network vertex \eqn{v} is between \code{r-toler} and \code{r+toler}, the vertex will be treated as lying on the relative boundary. } \value{ The value of \code{lineardisc} is a list with two entries: \item{lines }{Line segment pattern (object of class \code{"psp"}) representing the interior disc} \item{endpoints}{Point pattern (object of class \code{"ppp"}) representing the relative boundary of the disc. } The value of \code{countends} is an integer giving the number of points in the relative boundary. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ \code{\link{linnet}} } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. } \examples{ # letter 'A' v <- ppp(x=(-2):2, y=3*c(0,1,2,1,0), c(-3,3), c(-1,7)) edg <- cbind(1:4, 2:5) edg <- rbind(edg, c(2,4)) letterA <- linnet(v, edges=edg) plot(letterA) lineardisc(letterA, c(0,3), 1.6) # count the endpoints countends(letterA, c(0,3), 1.6) # cross-check (slower) en <- lineardisc(letterA, c(0,3), 1.6, plotit=FALSE)$endpoints npoints(en) } \keyword{spatial} spatstat/man/dclf.progress.Rd0000644000176200001440000001340613333543263015743 0ustar liggesusers\name{dclf.progress} \alias{dclf.progress} \alias{mad.progress} \alias{mctest.progress} \title{ Progress Plot of Test of Spatial Pattern } \description{ Generates a progress plot (envelope representation) of the Diggle-Cressie-Loosmore-Ford test or the Maximum Absolute Deviation test for a spatial point pattern. } \usage{ dclf.progress(X, \dots) mad.progress(X, \dots) mctest.progress(X, fun = Lest, \dots, exponent = 1, nrank = 1, interpolate = FALSE, alpha, rmin=0) } \arguments{ \item{X}{ Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class) or an envelope object (class \code{"envelope"}). } \item{\dots}{ Arguments passed to \code{mctest.progress} or to \code{\link{envelope}}. Useful arguments include \code{fun} to determine the summary function, \code{nsim} to specify the number of Monte Carlo simulations, \code{alternative} to specify one-sided or two-sided envelopes, and \code{verbose=FALSE} to turn off the messages. } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{exponent}{ Positive number. The exponent of the \eqn{L^p} distance. See Details. } \item{nrank}{ Integer. The rank of the critical value of the Monte Carlo test, amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will become the critical values for the test. } \item{interpolate}{ Logical value indicating how to compute the critical value. If \code{interpolate=FALSE} (the default), a standard Monte Carlo test is performed, and the critical value is the largest simulated value of the test statistic (if \code{nrank=1}) or the \code{nrank}-th largest (if \code{nrank} is another number). If \code{interpolate=TRUE}, kernel density estimation is applied to the simulated values, and the critical value is the upper \code{alpha} quantile of this estimated distribution. } \item{alpha}{ Optional. The significance level of the test. Equivalent to \code{nrank/(nsim+1)} where \code{nsim} is the number of simulations. } \item{rmin}{ Optional. Left endpoint for the interval of \eqn{r} values on which the test statistic is calculated. } } \details{ The Diggle-Cressie-Loosmore-Ford test and the Maximum Absolute Deviation test for a spatial point pattern are described in \code{\link{dclf.test}}. These tests depend on the choice of an interval of distance values (the argument \code{rinterval}). A \emph{progress plot} or \emph{envelope representation} of the test (Baddeley et al, 2014) is a plot of the test statistic (and the corresponding critical value) against the length of the interval \code{rinterval}. The command \code{dclf.progress} performs \code{\link{dclf.test}} on \code{X} using all possible intervals of the form \eqn{[0,R]}, and returns the resulting values of the test statistic, and the corresponding critical values of the test, as a function of \eqn{R}. Similarly \code{mad.progress} performs \code{\link{mad.test}} using all possible intervals and returns the test statistic and critical value. More generally, \code{mctest.progress} performs a test based on the \eqn{L^p} discrepancy between the curves. The deviation between two curves is measured by the \eqn{p}th root of the integral of the \eqn{p}th power of the absolute value of the difference between the two curves. The exponent \eqn{p} is given by the argument \code{exponent}. The case \code{exponent=2} is the Cressie-Loosmore-Ford test, while \code{exponent=Inf} is the MAD test. If the argument \code{rmin} is given, it specifies the left endpoint of the interval defining the test statistic: the tests are performed using intervals \eqn{[r_{\mbox{\scriptsize min}},R]}{[rmin,R]} where \eqn{R \ge r_{\mbox{\scriptsize min}}}{R \ge rmin}. The result of each command is an object of class \code{"fv"} that can be plotted to obtain the progress plot. The display shows the test statistic (solid black line) and the Monte Carlo acceptance region (grey shading). The significance level for the Monte Carlo test is \code{nrank/(nsim+1)}. Note that \code{nsim} defaults to 99, so if the values of \code{nrank} and \code{nsim} are not given, the default is a test with significance level 0.01. If \code{X} is an envelope object, then some of the data stored in \code{X} may be re-used: \itemize{ \item If \code{X} is an envelope object containing simulated functions, and \code{fun=NULL}, then the code will re-use the simulated functions stored in \code{X}. \item If \code{X} is an envelope object containing simulated point patterns, then \code{fun} will be applied to the stored point patterns to obtain the simulated functions. If \code{fun} is not specified, it defaults to \code{\link{Lest}}. \item Otherwise, new simulations will be performed, and \code{fun} defaults to \code{\link{Lest}}. } } \value{ An object of class \code{"fv"} that can be plotted to obtain the progress plot. } \references{ Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T., Milne, R. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. } \author{ \adrian , Andrew Hardegen, Tom Lawrence, Gopal Nair and Robin Milne. } \seealso{ \code{\link{dclf.test}} and \code{\link{mad.test}} for the tests. See \code{\link{plot.fv}} for information on plotting objects of class \code{"fv"}. } \examples{ plot(dclf.progress(cells, nsim=19)) } \keyword{spatial} \keyword{htest} spatstat/man/Poisson.Rd0000644000176200001440000000345313333543262014622 0ustar liggesusers\name{Poisson} \alias{Poisson} \title{Poisson Point Process Model} \description{ Creates an instance of the Poisson point process model which can then be fitted to point pattern data. } \usage{ Poisson() } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Poisson point process (namely, there are no interactions). } \details{ The function \code{\link{ppm}}, which fits point process models to point pattern data, requires an argument \code{interaction} of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Poisson process is provided by the value of the function \code{Poisson}. This works for all types of Poisson processes including multitype and nonstationary Poisson processes. } \seealso{ \code{\link{ppm}}, \code{\link{Strauss}} } \examples{ ppm(nztrees ~1, Poisson()) # fit the stationary Poisson process to 'nztrees' # no edge correction needed lon <- longleaf \testonly{ lon <- lon[seq(1, npoints(lon), by=50)] } longadult <- unmark(subset(lon, marks >= 30)) ppm(longadult ~ x, Poisson()) # fit the nonstationary Poisson process # with intensity lambda(x,y) = exp( a + bx) # trees marked by species lans <- lansing \testonly{ lans <- lans[seq(1, npoints(lans), by=30)] } ppm(lans ~ marks, Poisson()) # fit stationary marked Poisson process # with different intensity for each species \dontrun{ ppm(lansing ~ marks * polynom(x,y,3), Poisson()) } # fit nonstationary marked Poisson process # with different log-cubic trend for each species \testonly{ # equivalent functionality - smaller dataset ppm(amacrine ~ marks * polynom(x,y,2), Poisson()) } } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat/man/fasp.object.Rd0000644000176200001440000000621413333543263015365 0ustar liggesusers\name{fasp.object} \alias{fasp.object} %DoNotExport \title{Function Arrays for Spatial Patterns} \description{ A class \code{"fasp"} to represent a \dQuote{matrix} of functions, amenable to plotting as a matrix of plot panels. } \details{ An object of this class is a convenient way of storing (and later plotting, editing, etc) a set of functions \eqn{f_{i,j}(r)}{f[i,j](r)} of a real argument \eqn{r}, defined for each possible pair \eqn{(i,j)} of indices \eqn{1 \le i,j \le n}{1 <= i,j <= n}. We may think of this as a matrix or array of functions \eqn{f_{i,j}}{f[i,j]}. Function arrays are particularly useful in the analysis of a multitype point pattern (a point pattern in which the points are identified as belonging to separate types). We may want to compute a summary function for the points of type \eqn{i} only, for each of the possible types \eqn{i}. This produces a \eqn{1 \times m}{1 * m} array of functions. Alternatively we may compute a summary function for each possible pair of types \eqn{(i,j)}. This produces an \eqn{m \times m}{m * m} array of functions. For multitype point patterns the command \code{\link{alltypes}} will compute arrays of summary functions for each possible type or for each possible pair of types. The function \code{\link{alltypes}} returns an object of class \code{"fasp"}. An object of class \code{"fasp"} is a list containing at least the following components: \describe{ \item{fns}{ A list of data frames, each representing one of the functions. } \item{which}{ A matrix representing the spatial arrangement of the functions. If \code{which[i,j] = k} then the function represented by \code{fns[[k]]} should be plotted in the panel at position \eqn{(i,j)}. If \code{which[i,j] = NA} then nothing is plotted in that position. } \item{titles}{ A list of character strings, providing suitable plotting titles for the functions. } \item{default.formulae}{ A list of default formulae for plotting each of the functions. } \item{title}{ A character string, giving a default title for the array when it is plotted. } } } \section{Functions available}{ There are methods for \code{plot}, \code{print} and \code{"["} for this class. The plot method displays the entire array of functions. The method \code{\link{[.fasp}} selects a sub-array using the natural indices \code{i,j}. The command \code{\link{eval.fasp}} can be used to apply a transformation to each function in the array, and to combine two arrays. } \seealso{ \code{\link{alltypes}}, \code{\link{plot.fasp}}, \code{\link{[.fasp}}, \code{\link{eval.fasp}} } \examples{ # multitype point pattern data(amacrine) GG <- alltypes(amacrine, "G") plot(GG) # select the row corresponding to cells of type "on" Gon <- GG["on", ] plot(Gon) # extract the G function for i = "on", j = "off" Gonoff <- GG["on", "off", drop=TRUE] # Fisher variance stabilising transformation GGfish <- eval.fasp(asin(sqrt(GG))) plot(GGfish) } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat/man/venn.tess.Rd0000644000176200001440000000331213433744646015117 0ustar liggesusers\name{venn.tess} \alias{venn.tess} \title{ Tessellation Delimited by Several Sets } \description{ Given a list of windows, construct the tessellation formed by all combinations of inclusion/exclusion of these windows. } \usage{ venn.tess(\dots, window = NULL) } \arguments{ \item{\dots}{ Sets which delimit the tessellation. Any number of windows (objects of class \code{"owin"}) or tessellations (objects of class \code{"tess"}). } \item{window}{ Optional. The bounding window of the resulting tessellation. If not specified, the default is the union of all the arguments \code{\dots}. } } \details{ The arguments \code{\dots} may be any number of windows. This function constructs a tessellation, like a Venn diagram, whose boundaries are made up of the boundaries of these sets. Each tile of the tessellation is defined by one of the possible combinations in which each set is either included or excluded. If the arguments \code{\dots} are named, then the resulting tiles will also have names, which identify the inclusion/exclusion combinations defining each tile. See the Examples. } \value{ A tessellation (object of class \code{"tess"}). } \author{ \spatstatAuthors. } \seealso{ \code{\link{intersect.tess}}. To construct other kinds of tessellations, see \code{\link{tess}}, \code{\link{quadrats}}, \code{\link{hextess}}, \code{\link{polartess}}, \code{\link{dirichlet}}, \code{\link{delaunay}}, \code{\link{quantess}} and \code{\link{rpoislinetess}}. } \examples{ V <- venn.tess(A=square(1), B=square(c(-0.5, 0.5)), window=square(c(-1,1.5))) V plot(V, do.labels=TRUE) } \keyword{spatial} \keyword{math} spatstat/man/predict.kppm.Rd0000644000176200001440000000277113333543264015574 0ustar liggesusers\name{predict.kppm} \alias{predict.kppm} \alias{fitted.kppm} \title{Prediction from a Fitted Cluster Point Process Model} \description{ Given a fitted cluster point process model, these functions compute the fitted intensity. } \usage{ \method{fitted}{kppm}(object, ...) \method{predict}{kppm}(object, ...) } \arguments{ \item{object}{ Fitted cluster point process model. An object of class \code{"kppm"}. } \item{\dots}{ Arguments passed to \code{\link{fitted.ppm}} or \code{\link{predict.ppm}} respectively. } } \details{ These functions are methods for the generic functions \code{\link[stats]{fitted}} and \code{\link[stats]{predict}}. The argument \code{object} should be a cluster point process model (object of class \code{"kppm"}) obtained using the function \code{\link{kppm}}. The \emph{intensity} of the fitted model is computed, using \code{\link{fitted.ppm}} or \code{\link{predict.ppm}} respectively. } \value{ The value of \code{fitted.kppm} is a numeric vector giving the fitted values at the quadrature points. The value of \code{predict.kppm} is usually a pixel image (object of class \code{"im"}), but see \code{\link{predict.ppm}} for details. } \seealso{ \code{\link{kppm}}, \code{\link{plot.kppm}}, \code{\link{vcov.kppm}}, \code{\link{fitted.ppm}}, \code{\link{predict.ppm}} } \examples{ data(redwood) fit <- kppm(redwood ~ x, "Thomas") predict(fit) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/rpoisline.Rd0000644000176200001440000000276013333543264015176 0ustar liggesusers\name{rpoisline} \alias{rpoisline} \title{Generate Poisson Random Line Process} \description{ Generate a random pattern of line segments obtained from the Poisson line process. } \usage{ rpoisline(lambda, win=owin()) } \arguments{ \item{lambda}{ Intensity of the Poisson line process. A positive number. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } } \value{ A line segment pattern (an object of class \code{"psp"}). The result also has an attribute called \code{"lines"} (an object of class \code{"infline"} specifying the original infinite random lines) and an attribute \code{"linemap"} (an integer vector mapping the line segments to their parent lines). } \details{ This algorithm generates a realisation of the uniform Poisson line process, and clips it to the window \code{win}. The argument \code{lambda} must be a positive number. It controls the intensity of the process. The expected number of lines intersecting a convex region of the plane is equal to \code{lambda} times the perimeter length of the region. The expected total length of the lines crossing a region of the plane is equal to \code{lambda * pi} times the area of the region. } \seealso{ \code{\link{psp}} } \examples{ # uniform Poisson line process with intensity 10, # clipped to the unit square rpoisline(10) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/cdf.test.Rd0000644000176200001440000002620613571674202014706 0ustar liggesusers\name{cdf.test} \alias{cdf.test} \alias{cdf.test.ppm} \alias{cdf.test.lppm} \alias{cdf.test.lpp} \alias{cdf.test.ppp} \alias{cdf.test.slrm} \title{Spatial Distribution Test for Point Pattern or Point Process Model} \description{ Performs a test of goodness-of-fit of a point process model. The observed and predicted distributions of the values of a spatial covariate are compared using either the Kolmogorov-Smirnov test, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or Anderson-Darling test. For non-Poisson models, a Monte Carlo test is used. } \usage{ cdf.test(...) \method{cdf.test}{ppp}(X, covariate, test=c("ks", "cvm", "ad"), \dots, interpolate=TRUE, jitter=TRUE) \method{cdf.test}{ppm}(model, covariate, test=c("ks", "cvm", "ad"), \dots, interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE) \method{cdf.test}{lpp}(X, covariate, test=c("ks", "cvm", "ad"), \dots, interpolate=TRUE, jitter=TRUE) \method{cdf.test}{lppm}(model, covariate, test=c("ks", "cvm", "ad"), \dots, interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE) \method{cdf.test}{slrm}(model, covariate, test=c("ks", "cvm", "ad"), \dots, modelname=NULL, covname=NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"}). } \item{model}{ A fitted point process model (object of class \code{"ppm"} or \code{"lppm"}) or fitted spatial logistic regression (object of class \code{"slrm"}). } \item{covariate}{ The spatial covariate on which the test will be based. A function, a pixel image (object of class \code{"im"}), a list of pixel images, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. } \item{test}{ Character string identifying the test to be performed: \code{"ks"} for Kolmogorov-Smirnov test, \code{"cvm"} for \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or \code{"ad"} for Anderson-Darling test. } \item{\dots}{ Arguments passed to \code{\link[stats]{ks.test}} (from the \pkg{stats} package) or \code{\link[goftest]{cvm.test}} or \code{\link[goftest]{ad.test}} (from the \pkg{goftest} package) to control the test. } \item{interpolate}{ Logical flag indicating whether to interpolate pixel images. If \code{interpolate=TRUE}, the value of the covariate at each point of \code{X} will be approximated by interpolating the nearby pixel values. If \code{interpolate=FALSE}, the nearest pixel value will be used. } \item{jitter}{ Logical flag. If \code{jitter=TRUE}, values of the covariate will be slightly perturbed at random, to avoid tied values in the test. } \item{modelname,covname}{ Character strings giving alternative names for \code{model} and \code{covariate} to be used in labelling plot axes. } \item{nsim}{ Number of simulated realisations from the \code{model} to be used for the Monte Carlo test, when \code{model} is not a Poisson process. } \item{verbose}{ Logical value indicating whether to print progress reports when performing a Monte Carlo test. } } \details{ These functions perform a goodness-of-fit test of a Poisson or Gibbs point process model fitted to point pattern data. The observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same values under the model, are compared using the Kolmogorov-Smirnov test, the \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or the Anderson-Darling test. For Gibbs models, a Monte Carlo test is performed using these test statistics. The function \code{cdf.test} is generic, with methods for point patterns (\code{"ppp"} or \code{"lpp"}), point process models (\code{"ppm"} or \code{"lppm"}) and spatial logistic regression models (\code{"slrm"}). \itemize{ \item If \code{X} is a point pattern dataset (object of class \code{"ppp"}), then \code{cdf.test(X, \dots)} performs a goodness-of-fit test of the uniform Poisson point process (Complete Spatial Randomness, CSR) for this dataset. For a multitype point pattern, the uniform intensity is assumed to depend on the type of point (sometimes called Complete Spatial Randomness and Independence, CSRI). \item If \code{model} is a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}) then \code{cdf.test(model, \dots)} performs a test of goodness-of-fit for this fitted model. \item If \code{model} is a fitted spatial logistic regression (object of class \code{"slrm"}) then \code{cdf.test(model, \dots)} performs a test of goodness-of-fit for this fitted model. } The test is performed by comparing the observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same covariate under the model, using a classical goodness-of-fit test. Thus, you must nominate a spatial covariate for this test. If \code{X} is a point pattern that does not have marks, the argument \code{covariate} should be either a \code{function(x,y)} or a pixel image (object of class \code{"im"} containing the values of a spatial function, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. If \code{covariate} is an image, it should have numeric values, and its domain should cover the observation window of the \code{model}. If \code{covariate} is a function, it should expect two arguments \code{x} and \code{y} which are vectors of coordinates, and it should return a numeric vector of the same length as \code{x} and \code{y}. If \code{X} is a multitype point pattern, the argument \code{covariate} can be either a \code{function(x,y,marks)}, or a pixel image, or a list of pixel images corresponding to each possible mark value, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. First the original data point pattern is extracted from \code{model}. The values of the \code{covariate} at these data points are collected. The predicted distribution of the values of the \code{covariate} under the fitted \code{model} is computed as follows. The values of the \code{covariate} at all locations in the observation window are evaluated, weighted according to the point process intensity of the fitted model, and compiled into a cumulative distribution function \eqn{F} using \code{\link{ewcdf}}. The probability integral transformation is then applied: the values of the \code{covariate} at the original data points are transformed by the predicted cumulative distribution function \eqn{F} into numbers between 0 and 1. If the model is correct, these numbers are i.i.d. uniform random numbers. The A goodness-of-fit test of the uniform distribution is applied to these numbers using \code{stats::\link[stats]{ks.test}}, \code{goftest::\link[goftest]{cvm.test}} or \code{goftest::\link[goftest]{ad.test}}. This test was apparently first described (in the context of spatial data, and using Kolmogorov-Smirnov) by Berman (1986). See also Baddeley et al (2005). If \code{model} is not a Poisson process, then a Monte Carlo test is performed, by generating \code{nsim} point patterns which are simulated realisations of the \code{model}, re-fitting the model to each simulated point pattern, and calculating the test statistic for each fitted model. The Monte Carlo \eqn{p} value is determined by comparing the simulated values of the test statistic with the value for the original data. The return value is an object of class \code{"htest"} containing the results of the hypothesis test. The print method for this class gives an informative summary of the test outcome. The return value also belongs to the class \code{"cdftest"} for which there is a plot method \code{\link{plot.cdftest}}. The plot method displays the empirical cumulative distribution function of the covariate at the data points, and the predicted cumulative distribution function of the covariate under the model, plotted against the value of the covariate. The argument \code{jitter} controls whether covariate values are randomly perturbed, in order to avoid ties. If the original data contains any ties in the covariate (i.e. points with equal values of the covariate), and if \code{jitter=FALSE}, then the Kolmogorov-Smirnov test implemented in \code{\link[stats]{ks.test}} will issue a warning that it cannot calculate the exact \eqn{p}-value. To avoid this, if \code{jitter=TRUE} each value of the covariate will be perturbed by adding a small random value. The perturbations are normally distributed with standard deviation equal to one hundredth of the range of values of the covariate. This prevents ties, and the \eqn{p}-value is still correct. There is a very slight loss of power. } \value{ An object of class \code{"htest"} containing the results of the test. See \code{\link[stats]{ks.test}} for details. The return value can be printed to give an informative summary of the test. The value also belongs to the class \code{"cdftest"} for which there is a plot method. } \section{Warning}{ The outcome of the test involves a small amount of random variability, because (by default) the coordinates are randomly perturbed to avoid tied values. Hence, if \code{cdf.test} is executed twice, the \eqn{p}-values will not be exactly the same. To avoid this behaviour, set \code{jitter=FALSE}. } \author{\adrian and \rolf } \seealso{ \code{\link{plot.cdftest}}, \code{\link{quadrat.test}}, \code{\link{berman.test}}, \code{\link[stats]{ks.test}}, \code{\link[goftest]{cvm.test}}, \code{\link[goftest]{ad.test}}, \code{\link{ppm}} } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Berman, M. (1986) Testing for spatial association between a point process and another stochastic process. \emph{Applied Statistics} \bold{35}, 54--62. } \examples{ op <- options(useFancyQuotes=FALSE) # test of CSR using x coordinate cdf.test(nztrees, "x") cdf.test(nztrees, "x", "cvm") cdf.test(nztrees, "x", "ad") # test of CSR using a function of x and y fun <- function(x,y){2* x + y} cdf.test(nztrees, fun) # test of CSR using an image covariate funimage <- as.im(fun, W=Window(nztrees)) cdf.test(nztrees, funimage) # fit inhomogeneous Poisson model and test model <- ppm(nztrees ~x) cdf.test(model, "x") if(interactive()) { # synthetic data: nonuniform Poisson process X <- rpoispp(function(x,y) { 100 * exp(x) }, win=square(1)) # fit uniform Poisson process fit0 <- ppm(X ~1) # fit correct nonuniform Poisson process fit1 <- ppm(X ~x) # test wrong model cdf.test(fit0, "x") # test right model cdf.test(fit1, "x") } # multitype point pattern cdf.test(amacrine, "x") yimage <- as.im(function(x,y){y}, W=Window(amacrine)) cdf.test(ppm(amacrine ~marks+y), yimage) options(op) } \keyword{htest} \keyword{spatial} spatstat/man/harmonise.im.Rd0000644000176200001440000000354113333543263015560 0ustar liggesusers\name{harmonise.im} \alias{harmonise.im} \alias{harmonize.im} \title{Make Pixel Images Compatible} \description{ Convert several pixel images to a common pixel raster. } \usage{ \method{harmonise}{im}(\dots) \method{harmonize}{im}(\dots) } \arguments{ \item{\dots}{ Any number of pixel images (objects of class \code{"im"}) or data which can be converted to pixel images by \code{\link{as.im}}. } } \details{ This function makes any number of pixel images compatible, by converting them all to a common pixel grid. The command \code{\link{harmonise}} is generic. This is the method for objects of class \code{"im"}. At least one of the arguments \code{\dots} must be a pixel image. Some arguments may be windows (objects of class \code{"owin"}), functions (\code{function(x,y)}) or numerical constants. These will be converted to images using \code{\link{as.im}}. The common pixel grid is determined by inspecting all the pixel images in the argument list, computing the bounding box of all the images, then finding the image with the highest spatial resolution, and extending its pixel grid to cover the bounding box. The return value is a list with entries corresponding to the input arguments. If the arguments were named (\code{name=value}) then the return value also carries these names. If you just want to determine the appropriate pixel resolution, without converting the images, use \code{\link{commonGrid}}. } \value{ A list, of length equal to the number of arguments \code{\dots}, whose entries are pixel images. } \author{\adrian and \rolf } \examples{ A <- setcov(square(1)) B <- function(x,y) { x } G <- density(runifpoint(42)) harmonise(X=A, Y=B, Z=G) } \seealso{ \code{\link{commonGrid}}, \code{\link{compatible.im}}, \code{\link{as.im}} } \keyword{spatial} \keyword{manip} spatstat/man/plot.im.Rd0000644000176200001440000004454613622670141014560 0ustar liggesusers\name{plot.im} \alias{plot.im} \alias{image.im} \title{Plot a Pixel Image} \description{ Plot a pixel image. } \usage{ \method{plot}{im}(x, \dots, main, add=FALSE, clipwin=NULL, col=NULL, valuesAreColours=NULL, log=FALSE, ncolours=256, gamma=1, ribbon=show.all, show.all=!add, ribside=c("right", "left", "bottom", "top"), ribsep=0.15, ribwid=0.05, ribn=1024, ribscale=1, ribargs=list(), riblab=NULL, colargs=list(), useRaster=NULL, workaround=FALSE, zap=1, do.plot=TRUE) \method{image}{im}(x, \dots, main, add=FALSE, clipwin=NULL, col=NULL, valuesAreColours=NULL, log=FALSE, ncolours=256, gamma=1, ribbon=show.all, show.all=!add, ribside=c("right", "left", "bottom", "top"), ribsep=0.15, ribwid=0.05, ribn=1024, ribscale=1, ribargs=list(), riblab=NULL, colargs=list(), useRaster=NULL, workaround=FALSE, zap=1, do.plot=TRUE) } \arguments{ \item{x}{ The pixel image to be plotted. An object of class \code{"im"} (see \code{\link{im.object}}). } \item{\dots}{ Extra arguments passed to \code{\link[graphics]{image.default}} to control the plot. See Details. } \item{main}{Main title for the plot.} \item{add}{ Logical value indicating whether to superimpose the image on the existing plot (\code{add=TRUE}) or to initialise a new plot (\code{add=FALSE}, the default). } \item{clipwin}{ Optional. A window (object of class \code{"owin"}). Only this subset of the image will be displayed. } \item{col}{ Colours for displaying the pixel values. Either a character vector of colour values, an object of class \code{\link{colourmap}}, or a \code{function} as described under Details. } \item{valuesAreColours}{ Logical value. If \code{TRUE}, the pixel values of \code{x} are to be interpreted as colour values. } \item{log}{ Logical value. If \code{TRUE}, the colour map will be evenly-spaced on a logarithmic scale. } \item{ncolours}{ Integer. The default number of colours in the colour map for a real-valued image. } \item{gamma}{ Exponent for the gamma correction of the colours. A single positive number. } \item{ribbon}{ Logical flag indicating whether to display a ribbon showing the colour map. Default is \code{TRUE} for new plots and \code{FALSE} for added plots. } \item{show.all}{ Logical value indicating whether to display all plot elements including the main title and colour ribbon. Default is \code{TRUE} for new plots and \code{FALSE} for added plots. } \item{ribside}{ Character string indicating where to display the ribbon relative to the main image. } \item{ribsep}{ Factor controlling the space between the ribbon and the image. } \item{ribwid}{ Factor controlling the width of the ribbon. } \item{ribn}{ Number of different values to display in the ribbon. } \item{ribscale}{ Rescaling factor for tick marks. The values on the numerical scale printed beside the ribbon will be multiplied by this rescaling factor. } \item{ribargs}{ List of additional arguments passed to \code{\link[graphics]{image.default}}, \code{\link[graphics]{axis}} and \code{\link[grDevices]{axisTicks}} to control the display of the ribbon and its scale axis. These may override the \code{\dots} arguments. } \item{riblab}{ Text to be plotted in the margin near the ribbon. A character string or expression to be interpreted as text, or a list of arguments to be passed to \code{\link[graphics]{mtext}}. } \item{colargs}{ List of additional arguments passed to \code{col} if it is a function. } \item{useRaster}{ Logical value, passed to \code{\link[graphics]{image.default}}. Images are plotted using a bitmap raster if \code{useRaster=TRUE} or by drawing polygons if \code{useRaster=FALSE}. Bitmap raster display tends to produce better results, but is not supported on all graphics devices. The default is to use bitmap raster display if it is supported. } \item{workaround}{ Logical value, specifying whether to use a workaround to avoid a bug which occurs with some device drivers in \R, in which the image has the wrong spatial orientation. See the section on \bold{Image is Displayed in Wrong Spatial Orientation} below. } \item{zap}{ Noise threshold factor. A numerical value greater than or equal to 1. If the range of pixel values is less than \code{zap * .Machine$double.eps}, the image will be treated as constant. This avoids displaying images which should be constant but contain small numerical errors. } \item{do.plot}{ Logical value indicating whether to actually plot the image and colour ribbon. Setting \code{do.plot=FALSE} will simply return the colour map and the bounding box that were chosen for the plot. } } \value{ The colour map used. An object of class \code{"colourmap"}. Also has an attribute \code{"bbox"} giving a bounding box for the plot (containing the main colour image and the colour ribbon if plotted). If a ribbon was plotted, there is also an attribute \code{"bbox.legend"} giving a bounding box for the ribbon image. Text annotation occurs outside these bounding boxes. } \details{ This is the \code{plot} method for the class \code{"im"}. [It is also the \code{image} method for \code{"im"}.] The pixel image \code{x} is displayed on the current plot device, using equal scales on the \code{x} and \code{y} axes. If \code{ribbon=TRUE}, a legend will be plotted. The legend consists of a colour ribbon and an axis with tick-marks, showing the correspondence between the pixel values and the colour map. Arguments \code{ribside, ribsep, ribwid} control the placement of the colour ribbon. By default, the ribbon is placed at the right of the main image. This can be changed using the argument \code{ribside}. The width of the ribbon is \code{ribwid} times the size of the pixel image, where `size' means the larger of the width and the height. The distance separating the ribbon and the image is \code{ribsep} times the size of the pixel image. The ribbon contains the colours representing \code{ribn} different numerical values, evenly spaced between the minimum and maximum pixel values in the image \code{x}, rendered according to the chosen colour map. The argument \code{ribargs} controls the annotation of the colour ribbon. It is a list of arguments to be passed to \code{\link[graphics]{image.default}}, \code{\link[graphics]{axis}} and \code{\link[grDevices]{axisTicks}}. To plot the colour ribbon without the axis and tick-marks, use \code{ribargs=list(axes=FALSE)}. To ensure that the numerals or symbols printed next to the colour map are oriented horizontally, use \code{ribargs=list(las=1)}. To double the size of the numerals or symbols, use \code{ribargs=list(cex.axis=2)}. To control the number of tick-marks, use \code{ribargs=list(nint=N)} where \code{N} is the desired number of intervals (so there will be \code{N+1} tickmarks, subject to the vagaries of \R internal code). The argument \code{riblab} contains text that will be displayed in the margin next to the ribbon. The argument \code{ribscale} is used to rescale the numerical values printed next to the colour map, for convenience. For example if the pixel values in \code{x} range between 1000 and 4000, it would be sensible to use \code{ribscale=1/1000} so that the colour map tickmarks would be labelled 1 to 4. Normally the pixel values are displayed using the colours given in the argument \code{col}. This may be either \itemize{ \item an explicit colour map (an object of class \code{"colourmap"}, created by the command \code{\link{colourmap}}). This is the best way to ensure that when we plot different images, the colour maps are consistent. \item a character vector or integer vector that specifies a set of colours. The colour mapping will be stretched to match the range of pixel values in the image \code{x}. The mapping of pixel values to colours is determined as follows. \describe{ \item{logical-valued images:}{the values \code{FALSE} and \code{TRUE} are mapped to the colours \code{col[1]} and \code{col[2]} respectively. The vector \code{col} should have length 2. } \item{factor-valued images:}{the factor levels \code{levels(x)} are mapped to the entries of \code{col} in order. The vector \code{col} should have the same length as \code{levels(x)}. } \item{numeric-valued images:}{ By default, the range of pixel values in \code{x} is divided into \code{n = length(col)} equal subintervals, which are mapped to the colours in \code{col}. (If \code{col} was not specified, it defaults to a vector of 255 colours.) Alternatively if the argument \code{zlim} is given, it should be a vector of length 2 specifying an interval of real numbers. This interval will be used instead of the range of pixel values. The interval from \code{zlim[1]} to \code{zlim[2]} will be mapped to the colours in \code{col}. This facility enables the user to plot several images using a consistent colour map. Alternatively if the argument \code{breaks} is given, then this specifies the endpoints of the subintervals that are mapped to each colour. This is incompatible with \code{zlim}. The arguments \code{col} and \code{zlim} or \code{breaks} are then passed to the function \code{\link{image.default}}. For examples of the use of these arguments, see \code{\link{image.default}}. } } \item { a \code{function} in the \R language with an argument named \code{range} or \code{inputs}. If \code{col} is a function with an argument named \code{range}, and if the pixel values of \code{x} are numeric values, then the colour values will be determined by evaluating \code{col(range=range(x))}. The result of this evaluation should be a character vector containing colour values, or a \code{"colourmap"} object. Examples of such functions are \code{\link{beachcolours}} and \code{\link{beachcolourmap}}. If \code{col} is a function with an argument named \code{inputs}, and if the pixel values of \code{x} are discrete values (integer, logical, factor or character), then the colour values will be determined by evaluating \code{col(inputs=p)} where \code{p} is the set of possible pixel values. The result should be a character vector containing colour values, or a \code{"colourmap"} object. } \item{ a \code{function} in the \R language with first argument named \code{n}. The colour values will be determined by evaluating \code{col(n)} where \code{n} is the number of distinct pixel values, up to a maximum of 128. The result of this evaluation should be a character vector containing color values. Examples of such functions are \code{\link[grDevices]{heat.colors}}, \code{\link[grDevices]{terrain.colors}}, \code{\link[grDevices]{topo.colors}} and \code{\link[grDevices]{cm.colors}}. } } If \code{spatstat.options("monochrome")} has been set to \code{TRUE} then \bold{all colours will be converted to grey scale values}. Other graphical parameters controlling the display of both the pixel image and the ribbon can be passed through the \code{...} arguments to the function \code{\link[graphics]{image.default}}. A parameter is handled only if it is one of the following: \itemize{ \item a formal argument of \code{\link[graphics]{image.default}} that is operative when \code{add=TRUE}. \item one of the parameters \code{"main", "asp", "sub", "axes", "xlab", "ylab"} described in \code{\link[graphics]{plot.default}}. \item one of the parameters \code{"ann", "cex", "font", "cex.axis", "cex.lab", "cex.main", "cex.sub", "col.axis", "col.lab", "col.main", "col.sub", "font.axis", "font.lab", "font.main", "font.sub"} described in \code{\link[graphics]{par}}. \item the argument \code{box}, a logical value specifying whether a box should be drawn. } Images are plotted using a bitmap raster if \code{useRaster=TRUE} or by drawing polygons if \code{useRaster=FALSE}. Bitmap raster display (performed by \code{\link[graphics]{rasterImage}}) tends to produce better results, but is not supported on all graphics devices. The default is to use bitmap raster display if it is supported according to \code{\link[grDevices]{dev.capabilities}}. Alternatively, the pixel values could be directly interpretable as colour values in \R. That is, the pixel values could be character strings that represent colours, or values of a factor whose levels are character strings representing colours. \itemize{ \item If \code{valuesAreColours=TRUE}, then the pixel values will be interpreted as colour values and displayed using these colours. \item If \code{valuesAreColours=FALSE}, then the pixel values will \emph{not} be interpreted as colour values, even if they could be. \item If \code{valuesAreColours=NULL}, the algorithm will guess what it should do. If the argument \code{col} is given, the pixel values will \emph{not} be interpreted as colour values. Otherwise, if all the pixel values are strings that represent colours, then they will be interpreted and displayed as colours. } If pixel values are interpreted as colours, the arguments \code{col} and \code{ribbon} will be ignored, and a ribbon will not be plotted. } \section{Complex-valued images}{ If the pixel values in \code{x} are complex numbers, they will be converted into four images containing the real and imaginary parts and the modulus and argument, and plotted side-by-side using \code{\link{plot.imlist}}. } \section{Monochrome colours}{ If \code{spatstat.options("monochrome")} has been set to \code{TRUE}, then \bold{the image will be plotted in greyscale}. The colours are converted to grey scale values using \code{\link{to.grey}}. The choice of colour map still has an effect, since it determines the final grey scale values. Monochrome display can also be achieved by setting the graphics device parameter \code{colormodel="grey"} when starting a new graphics device, or in a call to \code{\link{ps.options}} or \code{\link{pdf.options}}. } \section{Image Looks Like Noise}{ An image plot which looks like digital noise can be produced when the pixel values are almost exactly equal but include a tiny amount of numerical error. To check this, look at the numerals plotted next to the colour ribbon, or compute \code{diff(range(x))}, to determine whether the range of pixel values is almost zero. The behaviour can be suppressed by picking a larger value of the argument \code{zap}. } \section{Image Rendering Errors and Problems}{ The help for \code{\link[graphics]{image.default}} and \code{\link[graphics]{rasterImage}} explains that errors may occur, or images may be rendered incorrectly, on some devices, depending on the availability of colours and other device-specific constraints. If the image is not displayed at all, try setting \code{useRaster=FALSE} in the call to \code{plot.im}. If the ribbon colours are not displayed, set \code{ribargs=list(useRaster=FALSE)}. Errors may occur on some graphics devices if the image is very large. If this happens, try setting \code{useRaster=FALSE} in the call to \code{plot.im}. The error message \code{useRaster=TRUE can only be used with a regular grid} means that the \eqn{x} and \eqn{y} coordinates of the pixels in the image are not perfectly equally spaced, due to numerical rounding. This occurs with some images created by earlier versions of \pkg{spatstat}. To repair the coordinates in an image \code{X}, type \code{X <- as.im(X)}. } \section{Image is Displayed in Wrong Spatial Orientation}{ If the image is displayed in the wrong spatial orientation, and you created the image data directly, please check that you understand the \pkg{spatstat} convention for the spatial orientation of pixel images. The row index of the matrix of pixel values corresponds to the increasing \eqn{y} coordinate; the column index of the matrix corresponds to the increasing \eqn{x} coordinate (Baddeley, Rubak and Turner, 2015, section 3.6.3, pages 66--67). Images can be displayed in the wrong spatial orientation on some devices, due to a bug in the device driver. This occurs only when the plot coordinates are \emph{reversed}, that is, when the plot was initialised with coordinate limits \code{xlim, ylim} such that \code{xlim[1] > xlim[2]} or \code{ylim[1] > ylim[2]} or both. This bug is reported to occur only when \code{useRaster=TRUE}. To fix this, try setting \code{workaround=TRUE}, or if that is unsuccessful, \code{useRaster=FALSE}. } \seealso{ \code{\link{im.object}}, \code{\link{colourmap}}, \code{\link{contour.im}}, \code{\link{persp.im}}, \code{\link{hist.im}}, \code{\link[graphics]{image.default}}, \code{\link{spatstat.options}} } \examples{ # an image Z <- setcov(owin()) plot(Z) plot(Z, ribside="bottom") # stretchable colour map plot(Z, col=rainbow) plot(Z, col=terrain.colors(128), axes=FALSE) # fixed colour map tc <- colourmap(rainbow(128), breaks=seq(-1,2,length=129)) plot(Z, col=tc) # colour map function, with argument 'range' plot(Z, col=beachcolours, colargs=list(sealevel=0.5)) # tweaking the plot plot(Z, main="La vie en bleu", col.main="blue", cex.main=1.5, box=FALSE, ribargs=list(col.axis="blue", col.ticks="blue", cex.axis=0.75)) # add axes and axis labels plot(Z, axes=TRUE, ann=TRUE, xlab="Easting", ylab="Northing") # log scale V <- eval.im(exp(exp(Z+2))/1e4) plot(V, log=TRUE, main="Log scale") # it's complex Y <- exp(Z + V * 1i) plot(Y) } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{hplot} spatstat/man/incircle.Rd0000644000176200001440000000240013333543263014750 0ustar liggesusers\name{incircle} \alias{incircle} \alias{inradius} \title{Find Largest Circle Inside Window} \description{ Find the largest circle contained in a given window. } \usage{ incircle(W) inradius(W) } \arguments{ \item{W}{A window (object of class \code{"owin"}).} } \details{ Given a window \code{W} of any type and shape, the function \code{incircle} determines the largest circle that is contained inside \code{W}, while \code{inradius} computes its radius only. For non-rectangular windows, the incircle is computed approximately by finding the maximum of the distance map (see \code{\link{distmap}}) of the complement of the window. } \value{ The result of \code{incircle} is a list with entries \code{x,y,r} giving the location \code{(x,y)} and radius \code{r} of the incircle. The result of \code{inradius} is the numerical value of radius. } \seealso{ \code{\link{centroid.owin}} } \examples{ W <- square(1) Wc <- incircle(W) plot(W) plot(disc(Wc$r, c(Wc$x, Wc$y)), add=TRUE) plot(letterR) Rc <- incircle(letterR) plot(disc(Rc$r, c(Rc$x, Rc$y)), add=TRUE) W <- as.mask(letterR) plot(W) Rc <- incircle(W) plot(disc(Rc$r, c(Rc$x, Rc$y)), add=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/plot.solist.Rd0000644000176200001440000002074213333543264015464 0ustar liggesusers\name{plot.solist} \alias{plot.solist} \title{Plot a List of Spatial Objects} \description{ Plots a list of two-dimensional spatial objects. } \usage{ \method{plot}{solist}(x, \dots, main, arrange=TRUE, nrows=NULL, ncols=NULL, main.panel=NULL, mar.panel=c(2,1,1,2), hsep=0, vsep=0, panel.begin=NULL, panel.end=NULL, panel.args=NULL, panel.begin.args=NULL, panel.end.args=NULL, panel.vpad = 0.2, plotcommand="plot", adorn.left=NULL, adorn.right=NULL, adorn.top=NULL, adorn.bottom=NULL, adorn.size=0.2, equal.scales=FALSE, halign=FALSE, valign=FALSE) } \arguments{ \item{x}{ An object of the class \code{"solist"}, essentially a list of two-dimensional spatial datasets. } \item{\dots}{ Arguments passed to \code{\link{plot}} when generating each plot panel. } \item{main}{ Overall heading for the plot. } \item{arrange}{ Logical flag indicating whether to plot the objects side-by-side on a single page (\code{arrange=TRUE}) or plot them individually in a succession of frames (\code{arrange=FALSE}). } \item{nrows,ncols}{ Optional. The number of rows/columns in the plot layout (assuming \code{arrange=TRUE}). You can specify either or both of these numbers. } \item{main.panel}{ Optional. A character string, or a vector of character strings, or a vector of expressions, giving the headings for each plot panel. } \item{mar.panel}{ Size of the margins outside each plot panel. A numeric vector of length 4 giving the bottom, left, top, and right margins in that order. (Alternatively the vector may have length 1 or 2 and will be replicated to length 4). See the section on \emph{Spacing between plots}. } \item{hsep,vsep}{ Additional horizontal and vertical separation between plot panels, expressed in the same units as \code{mar.panel}. } \item{panel.begin,panel.end}{ Optional. Functions that will be executed before and after each panel is plotted. See Details. } \item{panel.args}{ Optional. Function that determines different plot arguments for different panels. See Details. } \item{panel.begin.args}{ Optional. List of additional arguments for \code{panel.begin} when it is a function. } \item{panel.end.args}{ Optional. List of additional arguments for \code{panel.end} when it is a function. } \item{panel.vpad}{ Amount of extra vertical space that should be allowed for the title of each panel, if a title will be displayed. Expressed as a fraction of the height of the panel. Applies only when \code{equal.scales=FALSE} (the default). } \item{plotcommand}{ Optional. Character string containing the name of the command that should be executed to plot each panel. } \item{adorn.left,adorn.right,adorn.top,adorn.bottom}{ Optional. Functions (with no arguments) that will be executed to generate additional plots at the margins (left, right, top and/or bottom, respectively) of the array of plots. } \item{adorn.size}{ Relative width (as a fraction of the other panels' widths) of the margin plots. } \item{equal.scales}{ Logical value indicating whether the components should be plotted at (approximately) the same physical scale. } \item{halign,valign}{ Logical values indicating whether panels in a column should be aligned to the same \eqn{x} coordinate system (\code{halign=TRUE}) and whether panels in a row should be aligned to the same \eqn{y} coordinate system (\code{valign=TRUE}). These are applicable only if \code{equal.scales=TRUE}. } } \value{ Null. } \details{ This is the \code{plot} method for the class \code{"solist"}. An object of class \code{"solist"} represents a list of two-dimensional spatial datasets. This is the \code{plot} method for such objects. In the \pkg{spatstat} package, various functions produce an object of class \code{"solist"}. These objects can be plotted in a nice arrangement using \code{plot.solist}. See the Examples. The argument \code{panel.args} determines extra graphics parameters for each panel. It should be a function that will be called as \code{panel.args(i)} where \code{i} is the panel number. Its return value should be a list of graphics parameters that can be passed to the relevant \code{plot} method. These parameters override any parameters specified in the \code{\dots} arguments. The arguments \code{panel.begin} and \code{panel.end} determine graphics that will be plotted before and after each panel is plotted. They may be objects of some class that can be plotted with the generic \code{plot} command. Alternatively they may be functions that will be called as \code{panel.begin(i, y, main=main.panel[i])} and \code{panel.end(i, y, add=TRUE)} where \code{i} is the panel number and \code{y = x[[i]]}. If all entries of \code{x} are pixel images, the function \code{\link{image.listof}} is called to control the plotting. The arguments \code{equal.ribbon} and \code{col} can be used to determine the colour map or maps applied. If \code{equal.scales=FALSE} (the default), then the plot panels will have equal height on the plot device (unless there is only one column of panels, in which case they will have equal width on the plot device). This means that the objects are plotted at different physical scales, by default. If \code{equal.scales=TRUE}, then the dimensions of the plot panels on the plot device will be proportional to the spatial dimensions of the corresponding components of \code{x}. This means that the objects will be plotted at \emph{approximately} equal physical scales. If these objects have very different spatial sizes, the plot command could fail (when it tries to plot the smaller objects at a tiny scale), with an error message that the figure margins are too large. The objects will be plotted at \emph{exactly} equal physical scales, and \emph{exactly} aligned on the device, under the following conditions: \itemize{ \item every component of \code{x} is a spatial object whose position can be shifted by \code{\link{shift}}; \item \code{panel.begin} and \code{panel.end} are either \code{NULL} or they are spatial objects whose position can be shifted by \code{\link{shift}}; \item \code{adorn.left}, \code{adorn.right}, \code{adorn.top} and \code{adorn.bottom} are all \code{NULL}. } Another special case is when every component of \code{x} is an object of class \code{"fv"} representing a function. If \code{equal.scales=TRUE} then all these functions will be plotted with the same axis scales (i.e. with the same \code{xlim} and the same \code{ylim}). } \section{Spacing between plots}{ The spacing between individual plots is controlled by the parameters \code{mar.panel}, \code{hsep} and \code{vsep}. If \code{equal.scales=FALSE}, the plot panels are logically separate plots. The margins for each panel are determined by the argument \code{mar.panel} which becomes the graphics parameter \code{mar} described in the help file for \code{\link{par}}. One unit of \code{mar} corresponds to one line of text in the margin. If \code{hsep} or \code{vsep} are present, \code{mar.panel} is augmented by \code{c(vsep, hsep, vsep, hsep)/2}. If \code{equal.scales=TRUE}, all the plot panels are drawn in the same coordinate system which represents a physical scale. The unit of measurement for \code{mar.panel[1,3]} is one-sixth of the greatest height of any object plotted in the same row of panels, and the unit for \code{mar.panel[2,4]} is one-sixth of the greatest width of any object plotted in the same column of panels. If \code{hsep} or \code{vsep} are present, they are interpreted in the same units as \code{mar.panel[2]} and \code{mar.panel[1]} respectively. } \seealso{ \code{\link{plot.anylist}}, \code{\link{contour.listof}}, \code{\link{image.listof}}, \code{\link{density.splitppp}} } \section{Error messages}{ If the error message \sQuote{Figure margins too large} occurs, this generally means that one of the objects had a much smaller physical scale than the others. Ensure that \code{equal.scales=FALSE} and increase the values of \code{mar.panel}. } \examples{ # Intensity estimate of multitype point pattern plot(D <- density(split(amacrine))) plot(D, main="", equal.ribbon=TRUE, panel.end=function(i,y,...){contour(y, ...)}) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{hplot} spatstat/man/thomas.estK.Rd0000644000176200001440000001426113333543264015371 0ustar liggesusers\name{thomas.estK} \alias{thomas.estK} \title{Fit the Thomas Point Process by Minimum Contrast} \description{ Fits the Thomas point process to a point pattern dataset by the Method of Minimum Contrast using the K function. } \usage{ thomas.estK(X, startpar=c(kappa=1,scale=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the Thomas model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the Thomas process. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits the Thomas point process model to a point pattern dataset by the Method of Minimum Contrast, using the \eqn{K} function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits the Thomas point process to \code{X}, by finding the parameters of the Thomas model which give the closest match between the theoretical \eqn{K} function of the Thomas process and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Thomas point process is described in \Moller and Waagepetersen (2003, pp. 61--62). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent are independent and isotropically Normally distributed around the parent point with standard deviation \eqn{\sigma}{sigma} which is equal to the parameter \code{scale}. The named vector of stating values can use either \code{sigma2} (\eqn{\sigma^2}{sigma^2}) or \code{scale} as the name of the second component, but the latter is recommended for consistency with other cluster models. The theoretical \eqn{K}-function of the Thomas process is \deqn{ K(r) = \pi r^2 + \frac 1 \kappa (1 - \exp(-\frac{r^2}{4\sigma^2})). }{ K(r) = pi r^2 + (1 - exp(-r^2/(4 sigma^2)))/kappa. } The theoretical intensity of the Thomas process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\kappa}{kappa} and \eqn{\sigma^2}{sigma^2}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The Thomas process can be simulated, using \code{\link{rThomas}}. Homogeneous or inhomogeneous Thomas process models can also be fitted using the function \code{\link{kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Diggle, P. J., Besag, J. and Gleaves, J. T. (1976) Statistical analysis of spatial point patterns by means of distance methods. \emph{Biometrics} \bold{32} 659--667. \Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Thomas, M. (1949) A generalisation of Poisson's binomial limit for use in ecology. \emph{Biometrika} \bold{36}, 18--25. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ Rasmus Waagepetersen \email{rw@math.auc.dk} Adapted for \pkg{spatstat} by \adrian } \seealso{ \code{\link{kppm}}, \code{\link{lgcp.estK}}, \code{\link{matclust.estK}}, \code{\link{mincontrast}}, \code{\link{Kest}}, \code{\link{rThomas}} to simulate the fitted model. } \examples{ data(redwood) u <- thomas.estK(redwood, c(kappa=10, scale=0.1)) u plot(u) } \keyword{spatial} \keyword{models} spatstat/man/formula.fv.Rd0000644000176200001440000000360713333543263015251 0ustar liggesusers\name{formula.fv} \alias{formula.fv} \alias{formula<-} \alias{formula<-.fv} \title{ Extract or Change the Plot Formula for a Function Value Table } \description{ Extract or change the default plotting formula for an object of class \code{"fv"} (function value table). } \usage{ \method{formula}{fv}(x, \dots) formula(x, \dots) <- value \method{formula}{fv}(x, \dots) <- value } \arguments{ \item{x}{ An object of class \code{"fv"}, containing the values of several estimates of a function. } \item{\dots}{ Arguments passed to other methods. } \item{value}{ New value of the formula. Either a \code{formula} or a character string. } } \details{ A function value table (object of class \code{"fv"}, see \code{\link{fv.object}}) is a convenient way of storing and plotting several different estimates of the same function. The default behaviour of \code{plot(x)} for a function value table \code{x} is determined by a formula associated with \code{x} called its \emph{plot formula}. See \code{\link{plot.fv}} for explanation about these formulae. The function \code{formula.fv} is a method for the generic command \code{\link{formula}}. It extracts the plot formula associated with the object. The function \code{formula<-} is generic. It changes the formula associated with an object. The function \code{formula<-.fv} is the method for \code{formula<-} for the class \code{"fv"}. It changes the plot formula associated with the object. } \value{ The result of \code{formula.fv} is a character string containing the plot formula. The result of \code{formula<-.fv} is a new object of class \code{"fv"}. } \author{ \adrian and \rolf } \seealso{ \code{\link{fv}}, \code{\link{plot.fv}}, \code{\link[stats]{formula}}. } \examples{ K <- Kest(cells) formula(K) formula(K) <- (iso ~ r) } \keyword{spatial} \keyword{methods} spatstat/man/Kdot.Rd0000644000176200001440000001675213500643101014064 0ustar liggesusers\name{Kdot} \alias{Kdot} \title{ Multitype K Function (i-to-any) } \description{ For a multitype point pattern, estimate the multitype \eqn{K} function which counts the expected number of other points of the process within a given distance of a point of type \eqn{i}. } \usage{ Kdot(X, i, r=NULL, breaks=NULL, correction, ..., ratio=FALSE, from) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype \eqn{K} function \eqn{K_{i\bullet}(r)}{Ki.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{\dots}{Ignored.} \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{from}{An alternative way to specify \code{i}.} } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{i\bullet}(r)}{Ki.(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{i\bullet}(r)}{Ki.(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{i\bullet}(r)}{Ki.(r)} obtained by the edge corrections named. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K(r)}. } \details{ This function \code{Kdot} and its companions \code{\link{Kcross}} and \code{\link{Kmulti}} are generalisations of the function \code{\link{Kest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument \code{i} will be interpreted as a level of the factor \code{X$marks}. If \code{i} is missing, it defaults to the first level of the marks factor, \code{i = levels(X$marks)[1]}. The ``type \eqn{i} to any type'' multitype \eqn{K} function of a stationary multitype point process \eqn{X} is defined so that \eqn{\lambda K_{i\bullet}(r)}{lambda Ki.(r)} equals the expected number of additional random points within a distance \eqn{r} of a typical point of type \eqn{i} in the process \eqn{X}. Here \eqn{\lambda}{lambda} is the intensity of the process, i.e. the expected number of points of \eqn{X} per unit area. The function \eqn{K_{i\bullet}}{Ki.} is determined by the second order moment properties of \eqn{X}. An estimate of \eqn{K_{i\bullet}(r)}{Ki.(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the subprocess of type \eqn{i} points were independent of the subprocess of points of all types not equal to \eqn{i}, then \eqn{K_{i\bullet}(r)}{Ki.(r)} would equal \eqn{\pi r^2}{pi * r^2}. Deviations between the empirical \eqn{K_{i\bullet}}{Ki.} curve and the theoretical curve \eqn{\pi r^2}{pi * r^2} may suggest dependence between types. This algorithm estimates the distribution function \eqn{K_{i\bullet}(r)}{Ki.(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}, using the chosen edge correction(s). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. The pair correlation function can also be applied to the result of \code{Kdot}; see \code{\link{pcf}}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. The reduced sample estimator of \eqn{K_{i\bullet}}{Ki.} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. } \seealso{ \code{\link{Kdot}}, \code{\link{Kest}}, \code{\link{Kmulti}}, \code{\link{pcf}} } \examples{ # Lansing woods data: 6 types of trees woods <- lansing \testonly{woods <- woods[seq(1, npoints(woods), by=80)]} Kh. <- Kdot(woods, "hickory") # diagnostic plot for independence between hickories and other trees plot(Kh.) \dontrun{ # synthetic example with two marks "a" and "b" pp <- runifpoispp(50) pp <- pp \%mark\% factor(sample(c("a","b"), npoints(pp), replace=TRUE)) K <- Kdot(pp, "a") } } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/im.Rd0000644000176200001440000001165713333543263013603 0ustar liggesusers\name{im} \alias{im} \title{Create a Pixel Image Object} \description{ Creates an object of class \code{"im"} representing a two-dimensional pixel image. } \usage{ im(mat, xcol=seq_len(ncol(mat)), yrow=seq_len(nrow(mat)), xrange=NULL, yrange=NULL, unitname=NULL) } \arguments{ \item{mat}{ matrix or vector containing the pixel values of the image. } \item{xcol}{ vector of \eqn{x} coordinates for the pixel grid } \item{yrow}{ vector of \eqn{y} coordinates for the pixel grid } \item{xrange,yrange}{ Optional. Vectors of length 2 giving the \eqn{x} and \eqn{y} limits of the enclosing rectangle. (Ignored if \code{xcol}, \code{yrow} are present.) } \item{unitname}{ Optional. Name of unit of length. Either a single character string, or a vector of two character strings giving the singular and plural forms, respectively. } } \details{ This function creates an object of class \code{"im"} representing a \sQuote{pixel image} or two-dimensional array of values. The pixel grid is rectangular and occupies a rectangular window in the spatial coordinate system. The pixel values are \emph{scalars}: they can be real numbers, integers, complex numbers, single characters or strings, logical values, or categorical values. A pixel's value can also be \code{NA}, meaning that no value is defined at that location, and effectively that pixel is \sQuote{outside} the window. Although the pixel values must be scalar, photographic colour images (i.e., with red, green, and blue brightness channels) can be represented as character-valued images in \pkg{spatstat}, using \R's standard encoding of colours as character strings. The matrix \code{mat} contains the \sQuote{greyscale} values for a rectangular grid of pixels. Note carefully that the entry \code{mat[i,j]} gives the pixel value at the location \code{(xcol[j],yrow[i])}. That is, the \bold{row} index of the matrix \code{mat} corresponds to increasing \bold{y} coordinate, while the column index of \code{mat} corresponds to increasing \bold{x} coordinate. Thus \code{yrow} has one entry for each row of \code{mat} and \code{xcol} has one entry for each column of \code{mat}. Under the usual convention in \R, a correct display of the image would be obtained by transposing the matrix, e.g. \code{image.default(xcol, yrow, t(mat))}, if you wanted to do it by hand. The entries of \code{mat} may be numeric (real or integer), complex, logical, character, or factor values. If \code{mat} is not a matrix, it will be converted into a matrix with \code{nrow(mat) = length(yrow)} and \code{ncol(mat) = length(xcol)}. To make a factor-valued image, note that \R has a quirky way of handling matrices with factor-valued entries. The command \code{\link{matrix}} cannot be used directly, because it destroys factor information. To make a factor-valued image, do one of the following: \itemize{ \item Create a \code{factor} containing the pixel values, say \code{mat <- factor(.....)}, and then assign matrix dimensions to it by \code{dim(mat) <- c(nr, nc)} where \code{nr, nc} are the numbers of rows and columns. The resulting object \code{mat} is both a factor and a vector. \item Supply \code{mat} as a one-dimensional factor and specify the arguments \code{xcol} and \code{yrow} to determine the dimensions of the image. \item Use the functions \code{\link{cut.im}} or \code{\link{eval.im}} to make factor-valued images from other images). } For a description of the methods available for pixel image objects, see \code{\link{im.object}}. To convert other kinds of data to a pixel image (for example, functions or windows), use \code{\link{as.im}}. } \seealso{ \code{\link{im.object}} for details of the class. \code{\link{as.im}} for converting other kinds of data to an image. \code{\link{as.matrix.im}}, \code{\link{[.im}}, \code{\link{eval.im}} for manipulating images. } \section{Warnings}{ The internal representation of images is likely to change in future releases of \pkg{spatstat}. The safe way to extract pixel values from an image object is to use \code{\link{as.matrix.im}} or \code{\link{[.im}}. } \examples{ vec <- rnorm(1200) mat <- matrix(vec, nrow=30, ncol=40) whitenoise <- im(mat) whitenoise <- im(mat, xrange=c(0,1), yrange=c(0,1)) whitenoise <- im(mat, xcol=seq(0,1,length=40), yrow=seq(0,1,length=30)) whitenoise <- im(vec, xcol=seq(0,1,length=40), yrow=seq(0,1,length=30)) plot(whitenoise) # Factor-valued images: f <- factor(letters[1:12]) dim(f) <- c(3,4) Z <- im(f) # Factor image from other image: cutwhite <- cut(whitenoise, 3) plot(cutwhite) # Factor image from raw data cutmat <- cut(mat, 3) dim(cutmat) <- c(30,40) cutwhite <- im(cutmat) plot(cutwhite) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{datagen} spatstat/man/deltametric.Rd0000644000176200001440000000565113333543263015470 0ustar liggesusers\name{deltametric} \Rdversion{1.1} \alias{deltametric} \title{ Delta Metric } \description{ Computes the discrepancy between two sets \eqn{A} and \eqn{B} according to Baddeley's delta-metric. } \usage{ deltametric(A, B, p = 2, c = Inf, ...) } \arguments{ \item{A,B}{ The two sets which will be compared. Windows (objects of class \code{"owin"}), point patterns (objects of class \code{"ppp"}) or line segment patterns (objects of class \code{"psp"}). } \item{p}{ Index of the \eqn{L^p} metric. Either a positive numeric value, or \code{Inf}. } \item{c}{ Distance threshold. Either a positive numeric value, or \code{Inf}. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution of the distance maps computed by \code{\link{distmap}}. } } \details{ Baddeley (1992a, 1992b) defined a distance between two sets \eqn{A} and \eqn{B} contained in a space \eqn{W} by \deqn{ \Delta(A,B) = \left[ \frac 1 {|W|} \int_W \left| \min(c, d(x,A)) - \min(c, d(x,B)) \right|^p \, {\rm d}x \right]^{1/p} }{ \Delta(A,B) = [ (1/|W|) * integral of |min(c, d(x,A))-min(c, d(x,B))|^p dx ]^(1/p) } where \eqn{c \ge 0}{c \ge 0} is a distance threshold parameter, \eqn{0 < p \le \infty}{0 < p \le Inf} is the exponent parameter, and \eqn{d(x,A)} denotes the shortest distance from a point \eqn{x} to the set \eqn{A}. Also \code{|W|} denotes the area or volume of the containing space \eqn{W}. This is defined so that it is a \emph{metric}, i.e. \itemize{ \item \eqn{\Delta(A,B)=0}{\Delta(A,B)=0} if and only if \eqn{A=B} \item \eqn{\Delta(A,B)=\Delta(B,A)}{\Delta(A,B)=\Delta(B,A)} \item \eqn{\Delta(A,C) \le \Delta(A,B) + \Delta(B,C)}{\Delta(A,C) \le \Delta(A,B) + \Delta(B,C)} } It is topologically equivalent to the Hausdorff metric (Baddeley, 1992a) but has better stability properties in practical applications (Baddeley, 1992b). If \eqn{p=\infty}{p=Inf} and \eqn{c=\infty}{c=Inf} the Delta metric is equal to the Hausdorff metric. The algorithm uses \code{\link{distmap}} to compute the distance maps \eqn{d(x,A)} and \eqn{d(x,B)}, then approximates the integral numerically. The accuracy of the computation depends on the pixel resolution which is controlled through the extra arguments \code{\dots} passed to \code{\link{as.mask}}. } \value{ A numeric value. } \references{ Baddeley, A.J. (1992a) Errors in binary images and an \eqn{L^p} version of the Hausdorff metric. \emph{Nieuw Archief voor Wiskunde} \bold{10}, 157--183. Baddeley, A.J. (1992b) An error metric for binary images. In W. Foerstner and S. Ruwiedel (eds) \emph{Robust Computer Vision}. Karlsruhe: Wichmann. Pages 59--78. } \author{ \adrian and \rolf } \seealso{ \code{\link{distmap}} } \examples{ X <- runifpoint(20) Y <- runifpoint(10) deltametric(X, Y, p=1,c=0.1) } \keyword{spatial} \keyword{math} spatstat/man/Penttinen.Rd0000644000176200001440000000520013333543262015124 0ustar liggesusers\name{Penttinen} \alias{Penttinen} \title{Penttinen Interaction} \description{ Creates an instance of the Penttinen pairwise interaction point process model, which can then be fitted to point pattern data. } \usage{ Penttinen(r) } \arguments{ \item{r}{circle radius} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ Penttinen (1984, Example 2.1, page 18), citing Cormack (1979), described the pairwise interaction point process with interaction factor \deqn{ h(d) = e^{\theta A(d)} = \gamma^{A(d)} }{ h(d) = exp(theta * A(d)) = gamma^(A(d)) } between each pair of points separated by a distance $d$. Here \eqn{A(d)} is the area of intersection between two discs of radius \eqn{r} separated by a distance \eqn{d}, normalised so that \eqn{A(0) = 1}. The scale of interaction is controlled by the disc radius \eqn{r}: two points interact if they are closer than \eqn{2 r}{2 * r} apart. The strength of interaction is controlled by the canonical parameter \eqn{\theta}{theta}, which must be less than or equal to zero, or equivalently by the parameter \eqn{\gamma = e^\theta}{gamma = exp(theta)}, which must lie between 0 and 1. The potential is inhibitory, i.e.\ this model is only appropriate for regular point patterns. For \eqn{\gamma=0}{gamma=0} the model is a hard core process with hard core diameter \eqn{2 r}{2 * r}. For \eqn{\gamma=1}{gamma=1} the model is a Poisson process. The irregular parameter \eqn{r} must be given in the call to \code{Penttinen}, while the regular parameter \eqn{\theta}{theta} will be estimated. This model can be considered as a pairwise approximation to the area-interaction model \code{\link{AreaInter}}. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{Pairwise}}, \code{\link{AreaInter}}. } \examples{ fit <- ppm(cells ~ 1, Penttinen(0.07)) fit reach(fit) # interaction range is circle DIAMETER } \references{ Cormack, R.M. (1979) Spatial aspects of competition between individuals. Pages 151--212 in \emph{Spatial and Temporal Analysis in Ecology}, eds. R.M. Cormack and J.K. Ord, International Co-operative Publishing House, Fairland, MD, USA. Penttinen, A. (1984) \emph{Modelling Interaction in Spatial Point Patterns: Parameter Estimation by the Maximum Likelihood Method.} \ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla} Studies in Computer Science, Economics and Statistics \bold{7}, University of \ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla}, Finland. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat/man/summary.psp.Rd0000644000176200001440000000136613333543264015471 0ustar liggesusers\name{summary.psp} \alias{summary.psp} \title{Summary of a Line Segment Pattern Dataset} \description{ Prints a useful summary of a line segment pattern dataset. } \usage{ \method{summary}{psp}(object, \dots) } \arguments{ \item{object}{Line segment pattern (object of class \code{"psp"}).} \item{\dots}{Ignored.} } \details{ A useful summary of the line segment pattern \code{object} is printed. This is a method for the generic function \code{\link{summary}}. } \seealso{ \code{\link{summary}}, \code{\link{summary.owin}}, \code{\link{print.psp}} } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) summary(a) # describes it } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/pairs.im.Rd0000644000176200001440000000562413333543263014715 0ustar liggesusers\name{pairs.im} \alias{pairs.im} \title{ Scatterplot Matrix for Pixel Images } \description{ Produces a scatterplot matrix of the pixel values in two or more pixel images. } \usage{ \method{pairs}{im}(..., plot=TRUE) } \arguments{ \item{\dots}{ Any number of arguments, each of which is either a pixel image (object of class \code{"im"}) or a named argument to be passed to \code{\link{pairs.default}}. } \item{plot}{ Logical. If \code{TRUE}, the scatterplot matrix is plotted. } } \details{ This is a method for the generic function \code{\link{pairs}} for the class of pixel images. It produces a square array of plot panels, in which each panel shows a scatterplot of the pixel values of one image against the corresponding pixel values of another image. At least two of the arguments \code{\dots} should be pixel images (objects of class \code{"im"}). Their spatial domains must overlap, but need not have the same pixel dimensions. First the pixel image domains are intersected, and converted to a common pixel resolution. Then the corresponding pixel values of each image are extracted. Then \code{\link{pairs.default}} is called to plot the scatterplot matrix. Any arguments in \code{\dots} which are not pixel images will be passed to \code{\link{pairs.default}} to control the plot. } \section{Image or Contour Plots}{ Since the scatterplots may show very dense concentrations of points, it may be useful to set \code{panel=panel.image} or \code{panel=panel.contour} to draw a colour image or contour plot of the kernel-smoothed density of the scatterplot in each panel. The argument \code{panel} is passed to \code{\link{pairs.default}}. See the help for \code{\link{panel.image}} and \code{\link{panel.contour}}. } \section{Low Level Control of Graphics}{ To control the appearance of the individual scatterplot panels, see \code{\link{pairs.default}}, \code{\link{points}} or \code{\link{par}}. To control the plotting symbol for the points in the scatterplot, use the arguments \code{pch}, \code{col}, \code{bg} as described under \code{\link{points}} (because the default panel plotter is the function \code{\link{points}}). To suppress the tick marks on the plot axes, type \code{par(xaxt="n", yaxt="n")} before calling \code{pairs}. } \value{ Invisible. A \code{data.frame} containing the corresponding pixel values for each image. The return value also belongs to the class \code{plotpairsim} which has a plot method, so that it can be re-plotted. } \seealso{ \code{\link{pairs}}, \code{\link{pairs.default}}, \code{\link{panel.contour}}, \code{\link{panel.image}}, \code{\link{plot.im}}, \code{\link{im}}, \code{\link{par}} } \examples{ X <- density(rpoispp(30)) Y <- density(rpoispp(40)) Z <- density(rpoispp(30)) p <- pairs(X,Y,Z) p plot(p) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{hplot} spatstat/man/Linhom.Rd0000644000176200001440000000574713571674202014431 0ustar liggesusers\name{Linhom} \alias{Linhom} \title{Inhomogeneous L-function} \description{ Calculates an estimate of the inhomogeneous version of the \eqn{L}-function (Besag's transformation of Ripley's \eqn{K}-function) for a spatial point pattern. } \usage{ Linhom(X, ..., correction) } \arguments{ \item{X}{ The observed point pattern, from which an estimate of \eqn{L(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{correction,\dots}{ Other arguments passed to \code{\link{Kinhom}} to control the estimation procedure. } } \details{ This command computes an estimate of the inhomogeneous version of the \eqn{L}-function for a spatial point pattern. The original \eqn{L}-function is a transformation (proposed by Besag) of Ripley's \eqn{K}-function, \deqn{L(r) = \sqrt{\frac{K(r)}{\pi}}}{L(r) = sqrt(K(r)/pi)} where \eqn{K(r)} is the Ripley \eqn{K}-function of a spatially homogeneous point pattern, estimated by \code{\link{Kest}}. The inhomogeneous \eqn{L}-function is the corresponding transformation of the inhomogeneous \eqn{K}-function, estimated by \code{\link{Kinhom}}. It is appropriate when the point pattern clearly does not have a homogeneous intensity of points. It was proposed by Baddeley, \Moller and Waagepetersen (2000). The command \code{Linhom} first calls \code{\link{Kinhom}} to compute the estimate of the inhomogeneous K-function, and then applies the square root transformation. For a Poisson point pattern (homogeneous or inhomogeneous), the theoretical value of the inhomogeneous \eqn{L}-function is \eqn{L(r) = r}. The square root also has the effect of stabilising the variance of the estimator, so that \eqn{L} is more appropriate for use in simulation envelopes and hypothesis tests. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{L} has been estimated } \item{theo}{the theoretical value \eqn{L(r) = r} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L(r)} obtained by the edge corrections named. } \references{ Baddeley, A., \Moller, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. } \seealso{ \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{Kinhom}}, \code{\link{pcf}} } \examples{ data(japanesepines) X <- japanesepines L <- Linhom(X, sigma=0.1) plot(L, main="Inhomogeneous L function for Japanese Pines") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/as.im.Rd0000644000176200001440000003154513522237767014214 0ustar liggesusers\name{as.im} \alias{as.im} \alias{as.im.im} \alias{as.im.leverage.ppm} \alias{as.im.owin} \alias{as.im.matrix} \alias{as.im.tess} \alias{as.im.function} \alias{as.im.funxy} \alias{as.im.expression} \alias{as.im.distfun} \alias{as.im.nnfun} \alias{as.im.densityfun} \alias{as.im.Smoothfun} \alias{as.im.data.frame} \alias{as.im.default} \title{Convert to Pixel Image} \description{ Converts various kinds of data to a pixel image } \usage{ as.im(X, \dots) \method{as.im}{im}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) \method{as.im}{owin}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL, value=1) \method{as.im}{matrix}(X, W=NULL, \dots) \method{as.im}{tess}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) \method{as.im}{function}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL, stringsAsFactors=default.stringsAsFactors(), strict=FALSE, drop=TRUE) \method{as.im}{funxy}(X, W=Window(X), \dots) \method{as.im}{expression}(X, W=NULL, \dots) \method{as.im}{distfun}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL, approx=TRUE) \method{as.im}{nnfun}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL, approx=TRUE) \method{as.im}{densityfun}(X, W=Window(X), \dots, approx=TRUE) \method{as.im}{Smoothfun}(X, W=Window(X), \dots, approx=TRUE) \method{as.im}{leverage.ppm}(X, \dots, what=c("smooth", "nearest")) \method{as.im}{data.frame}(X, \dots, step, fatal=TRUE, drop=TRUE) \method{as.im}{default}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) } \arguments{ \item{X}{Data to be converted to a pixel image.} \item{W}{Window object which determines the spatial domain and pixel array geometry. } \item{\dots}{Additional arguments passed to \code{X} when \code{X} is a function.} \item{eps,dimyx,xy}{ Optional parameters passed to \code{\link{as.mask}} which determine the pixel array geometry. See \code{\link{as.mask}}. } \item{na.replace}{Optional value to replace \code{NA} entries in the output image. } \item{value}{Optional. The value to be assigned to pixels inside the window, if \code{X} is a window. } \item{strict}{ Logical value indicating whether to match formal arguments of \code{X} when \code{X} is a function. If \code{strict=FALSE} (the default), all the \code{\dots} arguments are passed to \code{X}. If \code{strict=TRUE}, only named arguments are passed, and only if they match the names of formal arguments of \code{X}. } \item{step}{ Optional. A single number, or numeric vector of length 2, giving the grid step lengths in the \eqn{x} and \eqn{y} directions. } \item{fatal}{ Logical value indicating what to do if the resulting image would be too large for available memory. If \code{fatal=TRUE} (the default), an error occurs. If \code{fatal=FALSE}, a warning is issued and \code{NULL} is returned. } \item{drop}{ Logical value indicating what to do if the result would normally be a list of pixel images but the list contains only one image. If \code{drop=TRUE} (the default), the pixel image is extracted and the result is a pixel image. If \code{drop=FALSE}, this list is returned as the result. } \item{stringsAsFactors}{ Logical value (passed to \code{\link[base]{data.frame}}) specifying how to handle pixel values which are character strings. If \code{TRUE}, character values are interpreted as factor levels. If \code{FALSE}, they remain as character strings. The factory-fresh befault is \code{TRUE}, but that can be changed by setting \code{options(stringsAsFactors=FALSE)}. } \item{approx}{ Logical value indicating whether to compute an approximate result at faster speed. } \item{what}{ Character string (partially matched) specifying which image data should be extracted. See \code{\link{plot.leverage.ppm}} for explanation. } } \details{ This function converts the data \code{X} into a pixel image object of class \code{"im"} (see \code{\link{im.object}}). The function \code{as.im} is generic, with methods for the classes listed above. Currently \code{X} may be any of the following: \itemize{ \item a pixel image object, of class \code{"im"}. \item a window object, of class \code{"owin"} (see \code{\link{owin.object}}). The result is an image with all pixel entries equal to \code{value} inside the window \code{X}, and \code{NA} outside. \item a matrix. \item a tessellation (object of class \code{"tess"}). The result is a factor-valued image, with one factor level corresponding to each tile of the tessellation. Pixels are classified according to the tile of the tessellation into which they fall. \item a single number (or a single logical, complex, factor or character value). The result is an image with all pixel entries equal to this constant value inside the window \code{W} (and \code{NA} outside, unless the argument \code{na.replace} is given). Argument \code{W} is required. \item a function of the form \code{function(x, y, ...)} which is to be evaluated to yield the image pixel values. In this case, the additional argument \code{W} must be present. This window will be converted to a binary image mask. Then the function \code{X} will be evaluated in the form \code{X(x, y, ...)} where \code{x} and \code{y} are \bold{vectors} containing the \eqn{x} and \eqn{y} coordinates of all the pixels in the image mask, and \code{...} are any extra arguments given. This function must return a vector or factor of the same length as the input vectors, giving the pixel values. \item an object of class \code{"funxy"} representing a \code{function(x,y,...)} defined in a spatial region. The function will be evaluated as described above. The window \code{W} defaults to the domain of definition of the function. \item an object of class \code{"funxy"} which also belongs to one of the following special classes. If \code{approx=TRUE} (the default), the function will be evaluated approximately using a very fast algorithm. If \code{approx=FALSE}, the function will be evaluated exactly at each grid location as described above. \itemize{ \item an object of class \code{"distfun"} representing a distance function (created by the command \code{\link{distfun}}). The fast approximation is the distance transform \code{\link{distmap}}. \item an object of class \code{"nnfun"} representing a nearest neighbour function (created by the command \code{\link{nnfun}}). The fast approximation is \code{\link{nnmap}}. \item an object of class \code{"densityfun"} representing a kernel estimate of intensity (created by the command \code{\link{densityfun}}). The fast approximation is the Fast Fourier Transform algorithm in \code{\link{density.ppp}}. \item an object of class \code{"Smoothfun"} representing kernel-smoothed values (created by the command \code{\link{Smoothfun}}). The fast approximation is the Fast Fourier Transform algorithm in \code{\link{Smooth.ppp}}. } \item An \code{expression} involving the variables \code{x} and \code{y} representing the spatial coordinates, and possibly also involving other variables. The additional argument \code{W} must be present; it will be converted to a binary image mask. The expression \code{X} will be evaluated in an environment where \code{x} and \code{y} are \bold{vectors} containing the spatial coordinates of all the pixels in the image mask. Evaluation of the expression \code{X} must yield a vector or factor, of the same length as \code{x} and \code{y}, giving the pixel values. \item a list with entries \code{x, y, z} in the format expected by the standard \code{R} functions \code{\link{image.default}} and \code{\link{contour.default}}. That is, \code{z} is a matrix of pixel values, \code{x} and \code{y} are vectors of \eqn{x} and \eqn{y} coordinates respectively, and \code{z[i,j]} is the pixel value for the location \code{(x[i],y[j])}. \item a point pattern (object of class \code{"ppp"}). See the separate documentation for \code{\link{as.im.ppp}}. \item A data frame with at least three columns. Columns named \code{x}, \code{y} and \code{z}, if present, will be assumed to contain the spatial coordinates and the pixel values, respectively. Otherwise the \code{x} and \code{y} coordinates will be taken from the first two columns of the data frame, and any remaining columns will be interpreted as pixel values. } The spatial domain (enclosing rectangle) of the pixel image is determined by the argument \code{W}. If \code{W} is absent, the spatial domain is determined by \code{X}. When \code{X} is a function, a matrix, or a single numerical value, \code{W} is required. The pixel array dimensions of the final resulting image are determined by (in priority order) \itemize{ \item the argument \code{eps}, \code{dimyx} or \code{xy} if present; \item the pixel dimensions of the window \code{W}, if it is present and if it is a binary mask; \item the pixel dimensions of \code{X} if it is an image, a binary mask, or a \code{list(x,y,z)}; \item the default pixel dimensions, controlled by \code{\link{spatstat.options}}. } Note that if \code{eps}, \code{dimyx} or \code{xy} is given, this will override the pixel dimensions of \code{X} if it has them. Thus, \code{as.im} can be used to change an image's pixel dimensions. If the argument \code{na.replace} is given, then all \code{NA} entries in the image will be replaced by this value. The resulting image is then defined everwhere on the full rectangular domain, instead of a smaller window. Here \code{na.replace} should be a single value, of the same type as the other entries in the image. If \code{X} is a pixel image that was created by an older version of \pkg{spatstat}, the command \code{X <- as.im(X)} will repair the internal format of \code{X} so that it conforms to the current version of \pkg{spatstat}. If \code{X} is a data frame with \code{m} columns, then \code{m-2} columns of data are interpreted as pixel values, yielding \code{m-2} pixel images. The result of \code{as.im.data.frame} is a list of pixel images, belonging to the class \code{"imlist"}. If \code{m = 3} and \code{drop=TRUE} (the default), then the result is a pixel image rather than a list containing this image. If \code{X} is a \code{function(x,y)} which returns a matrix of values, then \code{as.im(X, W)} will be a list of pixel images. } \section{Character-valued images}{ By default, if the pixel value data are character strings, they will be treated as levels of a factor, and the resulting image will be factor-valued. To prevent the conversion of character strings to factors, use the argument \code{stringsAsFactors=FALSE}, which is recognised by most of the methods for \code{as.im}, or alternatively set \code{options(stringsAsFactors=FALSE)}. } \value{ A pixel image (object of class \code{"im"}), or a list of pixel images, or \code{NULL} if the conversion failed. } \seealso{ Separate documentation for \code{\link{as.im.ppp}} } \examples{ data(demopat) # window object W <- Window(demopat) plot(W) Z <- as.im(W) image(Z) # function Z <- as.im(function(x,y) {x^2 + y^2}, unit.square()) image(Z) # or as an expression Z <- as.im(expression(x^2+y^2), square(1)) # function with extra arguments f <- function(x, y, x0, y0) { sqrt((x - x0)^2 + (y-y0)^2) } Z <- as.im(f, unit.square(), x0=0.5, y0=0.5) image(Z) # Revisit the Sixties Z <- as.im(f, letterR, x0=2.5, y0=2) image(Z) # usual convention in R stuff <- list(x=1:10, y=1:10, z=matrix(1:100, nrow=10)) Z <- as.im(stuff) # convert to finer grid Z <- as.im(Z, dimyx=256) #' distance functions d <- distfun(redwood) Zapprox <- as.im(d) Zexact <- as.im(d, approx=FALSE) plot(solist(approx=Zapprox, exact=Zexact), main="") # pixellate the Dirichlet tessellation Di <- dirichlet(runifpoint(10)) plot(as.im(Di)) plot(Di, add=TRUE) # as.im.data.frame is the reverse of as.data.frame.im grad <- bei.extra$grad slopedata <- as.data.frame(grad) slope <- as.im(slopedata) unitname(grad) <- unitname(slope) <- unitname(grad) # for compatibility all.equal(slope, grad) # TRUE ## handling of character values as.im("a", W=letterR, na.replace="b") as.im("a", W=letterR, na.replace="b", stringsAsFactors=FALSE) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/joinVertices.Rd0000644000176200001440000000357413333543263015641 0ustar liggesusers\name{joinVertices} \alias{joinVertices} \title{ Join Vertices in a Network } \description{ Join the specified vertices in a linear network, creating a new network. } \usage{ joinVertices(L, from, to) } \arguments{ \item{L}{ A linear network (object of class \code{"linnet"}) or point pattern on a linear network (object of class \code{"lpp"}). } \item{from,to}{ Integers, or integer vectors of equal length, specifying the vertices which should be joined. Alternatively \code{from} can be a 2-column matrix of integers and \code{to} is missing or \code{NULL}. } } \details{ Vertices of the network are numbered by their order of appearance in the point pattern \code{vertices(L)}. If \code{from} and \code{to} are single integers, then the pair of vertices numbered \code{from} and \code{to} will be joined to make a new segment of the network. If \code{from} and \code{to} are vectors of integers, then vertex \code{from[i]} will be joined to vertex \code{to[i]} for each \code{i = 1,2,..}. If \code{L} is a network (class \code{"linnet"}), the result is another network, created by adding new segments. If \code{L} is a point pattern on a network (class \code{"lpp"}), the result is another point pattern object, created by adding new segments to the underlying network, and retaining the points. In the resulting object, the new line segments are appended to the existing list of line segments. } \value{ A linear network (object of class \code{"linnet"}) or point pattern on a linear network (object of class \code{"lpp"}). } \author{ \spatstatAuthors. } \seealso{ \code{\link{linnet}}, \code{\link{methods.linnet}}, \code{\link{thinNetwork}} } \examples{ snet <- joinVertices(simplenet, 4, 8) plot(solist(simplenet, snet), main="") X <- runiflpp(3, simplenet) Y <- joinVertices(X, 4, 8) } \keyword{spatial} \keyword{manip} spatstat/man/whichhalfplane.Rd0000644000176200001440000000202113333543265016136 0ustar liggesusers\name{whichhalfplane} \alias{whichhalfplane} \title{ Test Which Side of Infinite Line a Point Falls On } \description{ Given an infinite line and a spatial point location, determine which side of the line the point falls on. } \usage{ whichhalfplane(L, x, y = NULL) } \arguments{ \item{L}{ Object of class \code{"infline"} specifying one or more infinite straight lines in two dimensions. } \item{x,y}{ Arguments acceptable to \code{\link[grDevices]{xy.coords}} specifying the locations of the points. } } \details{ An infinite line \eqn{L} divides the two-dimensional plane into two half-planes. This function returns a matrix \code{M} of logical values in which \code{M[i,j] = TRUE} if the \code{j}th spatial point lies below or to the left of the \code{i}th line. } \value{ A logical matrix. } \author{ \adrian. } \seealso{ \code{\link{infline}} } \examples{ L <- infline(p=runif(3), theta=runif(3, max=2*pi)) X <- runifpoint(4) whichhalfplane(L, X) } \keyword{spatial} \keyword{manip} spatstat/man/methods.unitname.Rd0000644000176200001440000000610613333543263016451 0ustar liggesusers\name{methods.unitname} \Rdversion{1.1} \alias{methods.unitname} %DoNotExport \alias{print.unitname} \alias{summary.unitname} \alias{rescale.unitname} \alias{compatible.unitname} \alias{harmonise.unitname} \alias{harmonize.unitname} \title{ Methods for Units } \description{ Methods for class \code{"unitname"}. } \usage{ \method{print}{unitname}(x, ...) \method{summary}{unitname}(object, ...) \method{rescale}{unitname}(X, s, unitname) \method{compatible}{unitname}(A,B, ..., coerce=TRUE) \method{harmonise}{unitname}(..., coerce=TRUE, single=FALSE) \method{harmonize}{unitname}(..., coerce=TRUE, single=FALSE) } \arguments{ \item{x,X,A,B,object}{ Objects of class \code{"unitname"} representing units of length. } \item{\dots}{ Other arguments. For \code{print.unitname} these arguments are passed to \code{\link[base]{print.default}}. For \code{summary.unitname} they are ignored. For \code{compatible.unitname} and \code{harmonise.unitname} these arguments are other objects of class \code{"unitname"}. } \item{s}{ Conversion factor: the new units are \code{s} times the old units. } \item{unitname}{ Optional new name for the unit. If present, this overrides the rescaling operation and simply substitutes the new name for the old one. } \item{coerce}{ Logical. If \code{TRUE}, a null unit of length is compatible with any non-null unit. } \item{single}{ Logical value indicating whether to return a single unitname, or a list of unitnames. } } \details{ These are methods for the generic functions \code{\link{print}}, \code{\link{summary}}, \code{\link{rescale}} and \code{\link{compatible}} for the class \code{"unitname"}. An object of class \code{"unitname"} represents a unit of length. The \code{print} method prints a description of the unit of length, and the \code{summary} method gives a more detailed description. The \code{rescale} method changes the unit of length by rescaling it. The \code{compatible} method tests whether two or more units of length are compatible. The \code{harmonise} method returns the common unit of length if there is one. For consistency with other methods for \code{\link{harmonise}}, the result is a list of unitname objects, with one entry for each argument in \code{\dots}. All of these entries are identical. This can be overridden by setting \code{single=TRUE} when the result will be a single unitname object. } \value{ For \code{print.unitname} the value is \code{NULL}. For \code{summary.unitname} the value is an object of class \code{summary.unitname} (with its own print method). For \code{rescale.unitname} the value is another object of class \code{"unitname"}. For \code{compatible.unitname} the result is logical. For \code{harmonise.unitname} the result is a list of identical unitnames if \code{single=FALSE} (the default), or a single unitname if \code{single=TRUE}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{box3}}, \code{\link{print}}, \code{\link{unitname}} } \keyword{spatial} \keyword{methods} spatstat/man/dfbetas.ppm.Rd0000644000176200001440000000735213512337523015375 0ustar liggesusers\name{dfbetas.ppm} \alias{dfbetas.ppm} \title{ Parameter Influence Measure } \description{ Computes the deletion influence measure for each parameter in a fitted point process model. } \usage{ \method{dfbetas}{ppm}(model, \dots, drop = FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Ignored, except for the arguments \code{dimyx} and \code{eps} which are passed to \code{\link{as.mask}} to control the spatial resolution of the image of the density component. } \item{drop}{ Logical. Whether to include (\code{drop=FALSE}) or exclude (\code{drop=TRUE}) contributions from quadrature points that were not used to fit the model. } \item{iScore,iHessian}{ Components of the score vector and Hessian matrix for the irregular parameters, if required. See Details. } \item{iArgs}{ List of extra arguments for the functions \code{iScore}, \code{iHessian} if required. } } \details{ Given a fitted spatial point process \code{model}, this function computes the influence measure for each parameter, as described in Baddeley, Chang and Song (2013) and Baddeley, Rubak and Turner (2019). This is a method for the generic function \code{\link[stats]{dfbetas}}. The influence measure for each parameter \eqn{\theta}{\theta} is a signed measure in two-dimensional space. It consists of a discrete mass on each data point (i.e. each point in the point pattern to which the \code{model} was originally fitted) and a continuous density at all locations. The mass at a data point represents the change in the fitted value of the parameter \eqn{\theta}{\theta} that would occur if this data point were to be deleted. The density at other non-data locations represents the effect (on the fitted value of \eqn{\theta}{\theta}) of deleting these locations (and their associated covariate values) from the input to the fitting procedure. If the point process model trend has irregular parameters that were fitted (using \code{\link{ippm}}) then the influence calculation requires the first and second derivatives of the log trend with respect to the irregular parameters. The argument \code{iScore} should be a list, with one entry for each irregular parameter, of \R functions that compute the partial derivatives of the log trend (i.e. log intensity or log conditional intensity) with respect to each irregular parameter. The argument \code{iHessian} should be a list, with \eqn{p^2} entries where \eqn{p} is the number of irregular parameters, of \R functions that compute the second order partial derivatives of the log trend with respect to each pair of irregular parameters. } \value{ An object of class \code{"msr"} representing a signed or vector-valued measure. This object can be printed and plotted. } \references{ Baddeley, A. and Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. Baddeley, A., Rubak, E. and Turner, R. (2019) Leverage and influence diagnostics for Gibbs spatial point processes. \emph{Spatial Statistics} \bold{29}, {15--48}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{leverage.ppm}}, \code{\link{influence.ppm}}, \code{\link{ppmInfluence}}. See \code{\link{msr}} for information on how to use a measure. } \examples{ \testonly{op <- spatstat.options(npixel=32)} X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) \testonly{fit <- ppm(X ~x+y, nd=16)} plot(dfbetas(fit)) plot(Smooth(dfbetas(fit))) \testonly{spatstat.options(op)} } \keyword{spatial} \keyword{models} spatstat/man/methods.pp3.Rd0000644000176200001440000000311313333543263015326 0ustar liggesusers\name{methods.pp3} \Rdversion{1.1} \alias{methods.pp3} %DoNotExport \alias{print.pp3} \alias{summary.pp3} \alias{print.summary.pp3} \alias{unitname.pp3} \alias{unitname<-.pp3} \title{ Methods for three-dimensional point patterns } \description{ Methods for class \code{"pp3"}. } \usage{ \method{print}{pp3}(x, ...) \method{print}{summary.pp3}(x, ...) \method{summary}{pp3}(object, ...) \method{unitname}{pp3}(x) \method{unitname}{pp3}(x) <- value } \arguments{ \item{x,object}{ Object of class \code{"pp3"}. } \item{\dots}{ Ignored. } \item{value}{ Name of the unit of length. See \code{\link{unitname}}. } } \details{ These are methods for the generic functions \code{\link{print}}, \code{\link{summary}}, \code{\link{unitname}} and \code{\link{unitname<-}} for the class \code{"pp3"} of three-dimensional point patterns. The \code{print} and \code{summary} methods print a description of the point pattern. The \code{unitname} method extracts the name of the unit of length in which the point coordinates are expressed. The \code{unitname<-} method assigns the name of the unit of length. } \value{ For \code{print.pp3} the value is \code{NULL}. For \code{unitname.pp3} an object of class \code{"units"}. } \author{\adrian and \rolf } \seealso{ \code{\link{pp3}}, \code{\link{print}}, \code{\link{unitname}} \code{\link{unitname<-}} } \examples{ X <- pp3(runif(42),runif(42),runif(42), box3(c(0,1), unitname="mm")) X unitname(X) unitname(X) <- c("foot", "feet") summary(X) } \keyword{spatial} \keyword{methods} spatstat/man/print.im.Rd0000644000176200001440000000120513333543264014723 0ustar liggesusers\name{print.im} \alias{print.im} \title{Print Brief Details of an Image} \description{ Prints a very brief description of a pixel image object. } \usage{ \method{print}{im}(x, \dots) } \arguments{ \item{x}{Pixel image (object of class \code{"im"}).} \item{\dots}{Ignored.} } \details{ A very brief description of the pixel image \code{x} is printed. This is a method for the generic function \code{\link{print}}. } \seealso{ \code{\link{print}}, \code{\link{im.object}}, \code{\link{summary.im}} } \examples{ data(letterR) U <- as.im(letterR) U } \author{\adrian and \rolf } \keyword{spatial} \keyword{print} spatstat/man/repul.Rd0000644000176200001440000000340013571674202014312 0ustar liggesusers\name{repul.dppm} \alias{repul} \alias{repul.dppm} \title{ Repulsiveness Index of a Determinantal Point Process Model } \description{ Computes a measure of the degree of repulsion between points in a determinantal point process model. } \usage{ repul(model, \dots) \method{repul}{dppm}(model, \dots) } \arguments{ \item{model}{ A fitted point process model of determinantal type (object of class \code{"dppm"}). } \item{\dots}{ Ignored. } } \details{ The repulsiveness index \eqn{\mu}{mu} of a determinantal point process model was defined by Lavancier, \Moller and Rubak (2015) as \deqn{ \mu = \lambda \int (1- g(x)) \, dx }{ mu = lambda * integral[(1-g(x))] } where \eqn{\lambda}{\lambda} is the intensity of the model and \eqn{g(x)} is the pair correlation function, and the integral is taken over all two-dimensional vectors \eqn{x}. Values of \eqn{\mu} are dimensionless. Larger values of \eqn{\mu}{mu} indicate stronger repulsion between points. If the model is stationary, the result is a single number. If the model is not stationary, the result is a pixel image (obtained by multiplying the spatially-varying intensity by the integral defined above). } \value{ A numeric value or a pixel image. } \references{ Lavancier, F., \Moller, J. and Rubak, E. (2015), Determinantal point process models and statistical inference. \emph{Journal of Royal Statistical Society: Series B (Statistical Methodology)}, \bold{77}, 853--877. } \author{ \adrian. } \seealso{ \code{\link{dppm}} } \examples{ jpines <- residualspaper$Fig1 \testonly{ # smaller dataset for testing jpines <- jpines[c(TRUE,FALSE)] } fit <- dppm(jpines ~ 1, dppGauss) repul(fit) } \keyword{spatial} \keyword{models} spatstat/man/plot.cdftest.Rd0000644000176200001440000000642613333543264015606 0ustar liggesusers\name{plot.cdftest} \alias{plot.cdftest} \title{Plot a Spatial Distribution Test} \description{ Plot the result of a spatial distribution test computed by \code{cdf.test}. } \usage{ \method{plot}{cdftest}(x, ..., style=c("cdf", "PP", "QQ"), lwd=par("lwd"), col=par("col"), lty=par("lty"), lwd0=lwd, col0=2, lty0=2, do.legend) } \arguments{ \item{x}{ Object to be plotted. An object of class \code{"cdftest"} produced by a method for \code{\link{cdf.test}}. } \item{\dots}{ extra arguments that will be passed to the plotting function \code{\link{plot.default}}. } \item{style}{ Style of plot. See Details. } \item{col,lwd,lty}{ The width, colour and type of lines used to plot the empirical curve (the empirical distribution, or PP plot or QQ plot). } \item{col0,lwd0,lty0}{ The width, colour and type of lines used to plot the reference curve (the predicted distribution, or the diagonal). } \item{do.legend}{ Logical value indicating whether to add an explanatory legend. Applies only when \code{style="cdf"}. } } \value{ \code{NULL}. } \details{ This is the \code{plot} method for the class \code{"cdftest"}. An object of this class represents the outcome of a spatial distribution test, computed by \code{\link{cdf.test}}, and based on either the Kolmogorov-Smirnov, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises or Anderson-Darling test. If \code{style="cdf"} (the default), the plot displays the two cumulative distribution functions that are compared by the test: namely the empirical cumulative distribution function of the covariate at the data points, and the predicted cumulative distribution function of the covariate under the model, both plotted against the value of the covariate. The Kolmogorov-Smirnov test statistic (for example) is the maximum vertical separation between the two curves. If \code{style="PP"} then the P-P plot is drawn. The \eqn{x} coordinates of the plot are cumulative probabilities for the covariate under the model. The \eqn{y} coordinates are cumulative probabilities for the covariate at the data points. The diagonal line \eqn{y=x} is also drawn for reference. The Kolmogorov-Smirnov test statistic is the maximum vertical separation between the P-P plot and the diagonal reference line. If \code{style="QQ"} then the Q-Q plot is drawn. The \eqn{x} coordinates of the plot are quantiles of the covariate under the model. The \eqn{y} coordinates are quantiles of the covariate at the data points. The diagonal line \eqn{y=x} is also drawn for reference. The Kolmogorov-Smirnov test statistic cannot be read off the Q-Q plot. } \seealso{ \code{\link{cdf.test}} } \examples{ op <- options(useFancyQuotes=FALSE) # synthetic data: nonuniform Poisson process X <- rpoispp(function(x,y) { 100 * exp(x) }, win=square(1)) # fit uniform Poisson process fit0 <- ppm(X, ~1) # test covariate = x coordinate xcoord <- function(x,y) { x } # test wrong model k <- cdf.test(fit0, xcoord) # plot result of test plot(k, lwd0=3) plot(k, style="PP") plot(k, style="QQ") options(op) } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/nncross.lpp.Rd0000644000176200001440000001042413333543263015444 0ustar liggesusers\name{nncross.lpp} \alias{nncross.lpp} \title{Nearest Neighbours on a Linear Network} \description{ Given two point patterns \code{X} and \code{Y} on a linear network, finds the nearest neighbour in \code{Y} of each point of \code{X} using the shortest path in the network. } \usage{ \method{nncross}{lpp}(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), \dots, k = 1, method="C") } \arguments{ \item{X,Y}{ Point patterns on a linear network (objects of class \code{"lpp"}). They must lie on the \emph{same} linear network. } \item{iX, iY}{ Optional identifiers, used to determine whether a point in \code{X} is identical to a point in \code{Y}. See Details. } \item{what}{ Character string specifying what information should be returned. Either the nearest neighbour distance (\code{"dist"}), the identifier of the nearest neighbour (\code{"which"}), or both. } \item{\dots}{Ignored.} \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour, for each value of \code{k}. } \item{method}{ Internal use only. } } \details{ Given two point patterns \code{X} and \code{Y} on the same linear network, this function finds, for each point of \code{X}, the nearest point of \code{Y}, measuring distance by the shortest path in the network. The distance between these points is also computed. The return value is a data frame, with rows corresponding to the points of \code{X}. The first column gives the nearest neighbour distances (i.e. the \code{i}th entry is the distance from the \code{i}th point of \code{X} to the nearest element of \code{Y}). The second column gives the indices of the nearest neighbours (i.e.\ the \code{i}th entry is the index of the nearest element in \code{Y}.) If \code{what="dist"} then only the vector of distances is returned. If \code{what="which"} then only the vector of indices is returned. Note that this function is not symmetric in \code{X} and \code{Y}. To find the nearest neighbour in \code{X} of each point in \code{Y}, use \code{nncross(Y,X)}. The arguments \code{iX} and \code{iY} are used when the two point patterns \code{X} and \code{Y} have some points in common. In this situation \code{nncross(X, Y)} would return some zero distances. To avoid this, attach a unique integer identifier to each point, such that two points are identical if their identifying numbers are equal. Let \code{iX} be the vector of identifier values for the points in \code{X}, and \code{iY} the vector of identifiers for points in \code{Y}. Then the code will only compare two points if they have different values of the identifier. See the Examples. The \code{k}th nearest neighbour may be undefined, for example if there are fewer than \code{k+1} points in the dataset, or if the linear network is not connected. In this case, the \code{k}th nearest neighbour distance is infinite. } \value{ By default (if \code{what=c("dist", "which")} and \code{k=1}) a data frame with two columns: \item{dist}{Nearest neighbour distance} \item{which}{Nearest neighbour index in \code{Y}} If \code{what="dist"}, a vector of nearest neighbour distances. If \code{what="which"}, a vector of nearest neighbour indices. If \code{k} is a vector of integers, the result is a matrix with one row for each point in \code{X}, giving the distances and/or indices of the \code{k}th nearest neighbours in \code{Y}. } \seealso{ \code{\link{nndist.lpp}} for nearest neighbour distances in a single point pattern. \code{\link{nnwhich.lpp}} to identify which points are nearest neighbours in a single point pattern. } \examples{ # two different point patterns X <- runiflpp(3, simplenet) Y <- runiflpp(5, simplenet) nn <- nncross(X,Y) nn plot(simplenet, main="nncross") plot(X, add=TRUE, cols="red") plot(Y, add=TRUE, cols="blue", pch=16) XX <- as.ppp(X) YY <- as.ppp(Y) i <- nn$which arrows(XX$x, XX$y, YY[i]$x, YY[i]$y, length=0.15) # nearest and second-nearest neighbours nncross(X, Y, k=1:2) # two patterns with some points in common X <- Y[1:2] iX <- 1:2 iY <- 1:5 nncross(X,Y, iX, iY) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{math} spatstat/man/dclf.test.Rd0000644000176200001440000002576013333543263015064 0ustar liggesusers\name{dclf.test} \alias{dclf.test} \alias{mad.test} \title{ Diggle-Cressie-Loosmore-Ford and Maximum Absolute Deviation Tests } \description{ Perform the Diggle (1986) / Cressie (1991) / Loosmore and Ford (2006) test or the Maximum Absolute Deviation test for a spatial point pattern. } \usage{ dclf.test(X, \dots, alternative=c("two.sided", "less", "greater"), rinterval = NULL, leaveout=1, scale=NULL, clamp=FALSE, interpolate=FALSE) mad.test(X, \dots, alternative=c("two.sided", "less", "greater"), rinterval = NULL, leaveout=1, scale=NULL, clamp=FALSE, interpolate=FALSE) } \arguments{ \item{X}{ Data for the test. Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class), a simulation envelope (object of class \code{"envelope"}) or a previous result of \code{dclf.test} or \code{mad.test}. } \item{\dots}{ Arguments passed to \code{\link{envelope}}. Useful arguments include \code{fun} to determine the summary function, \code{nsim} to specify the number of Monte Carlo simulations, \code{verbose=FALSE} to turn off the messages, \code{savefuns} or \code{savepatterns} to save the simulation results, and \code{use.theory} described under Details. } \item{alternative}{ The alternative hypothesis. A character string. The default is a two-sided alternative. See Details. } \item{rinterval}{ Interval of values of the summary function argument \code{r} over which the maximum absolute deviation, or the integral, will be computed for the test. A numeric vector of length 2. } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{scale}{ Optional. A function in the \R language which determines the relative scale of deviations, as a function of distance \eqn{r}. Summary function values for distance \code{r} will be \emph{divided} by \code{scale(r)} before the test statistic is computed. } \item{clamp}{ Logical value indicating how to compute deviations in a one-sided test. Deviations of the observed summary function from the theoretical summary function are initially evaluated as signed real numbers, with large positive values indicating consistency with the alternative hypothesis. If \code{clamp=FALSE} (the default), these values are not changed. If \code{clamp=TRUE}, any negative values are replaced by zero. } \item{interpolate}{ Logical value specifying whether to calculate the \eqn{p}-value by interpolation. If \code{interpolate=FALSE} (the default), a standard Monte Carlo test is performed, yielding a \eqn{p}-value of the form \eqn{(k+1)/(n+1)} where \eqn{n} is the number of simulations and \eqn{k} is the number of simulated values which are more extreme than the observed value. If \code{interpolate=TRUE}, the \eqn{p}-value is calculated by applying kernel density estimation to the simulated values, and computing the tail probability for this estimated distribution. } } \details{ These functions perform hypothesis tests for goodness-of-fit of a point pattern dataset to a point process model, based on Monte Carlo simulation from the model. \code{dclf.test} performs the test advocated by Loosmore and Ford (2006) which is also described in Diggle (1986), Cressie (1991, page 667, equation (8.5.42)) and Diggle (2003, page 14). See Baddeley et al (2014) for detailed discussion. \code{mad.test} performs the \sQuote{global} or \sQuote{Maximum Absolute Deviation} test described by Ripley (1977, 1981). See Baddeley et al (2014). The type of test depends on the type of argument \code{X}. \itemize{ \item If \code{X} is some kind of point pattern, then a test of Complete Spatial Randomness (CSR) will be performed. That is, the null hypothesis is that the point pattern is completely random. \item If \code{X} is a fitted point process model, then a test of goodness-of-fit for the fitted model will be performed. The model object contains the data point pattern to which it was originally fitted. The null hypothesis is that the data point pattern is a realisation of the model. \item If \code{X} is an envelope object generated by \code{\link{envelope}}, then it should have been generated with \code{savefuns=TRUE} or \code{savepatterns=TRUE} so that it contains simulation results. These simulations will be treated as realisations from the null hypothesis. \item Alternatively \code{X} could be a previously-performed test of the same kind (i.e. the result of calling \code{dclf.test} or \code{mad.test}). The simulations used to perform the original test will be re-used to perform the new test (provided these simulations were saved in the original test, by setting \code{savefuns=TRUE} or \code{savepatterns=TRUE}). } The argument \code{alternative} specifies the alternative hypothesis, that is, the direction of deviation that will be considered statistically significant. If \code{alternative="two.sided"} (the default), both positive and negative deviations (between the observed summary function and the theoretical function) are significant. If \code{alternative="less"}, then only negative deviations (where the observed summary function is lower than the theoretical function) are considered. If \code{alternative="greater"}, then only positive deviations (where the observed summary function is higher than the theoretical function) are considered. In all cases, the algorithm will first call \code{\link{envelope}} to generate or extract the simulated summary functions. The number of simulations that will be generated or extracted, is determined by the argument \code{nsim}, and defaults to 99. The summary function that will be computed is determined by the argument \code{fun} (or the first unnamed argument in the list \code{\dots}) and defaults to \code{\link{Kest}} (except when \code{X} is an envelope object generated with \code{savefuns=TRUE}, when these functions will be taken). The choice of summary function \code{fun} affects the power of the test. It is normally recommended to apply a variance-stabilising transformation (Ripley, 1981). If you are using the \eqn{K} function, the normal practice is to replace this by the \eqn{L} function (Besag, 1977) computed by \code{\link{Lest}}. If you are using the \eqn{F} or \eqn{G} functions, the recommended practice is to apply Fisher's variance-stabilising transformation \eqn{\sin^{-1}\sqrt x}{asin(sqrt(x))} using the argument \code{transform}. See the Examples. The argument \code{rinterval} specifies the interval of distance values \eqn{r} which will contribute to the test statistic (either maximising over this range of values for \code{mad.test}, or integrating over this range of values for \code{dclf.test}). This affects the power of the test. General advice and experiments in Baddeley et al (2014) suggest that the maximum \eqn{r} value should be slightly larger than the maximum possible range of interaction between points. The \code{dclf.test} is quite sensitive to this choice, while the \code{mad.test} is relatively insensitive. It is also possible to specify a pointwise test (i.e. taking a single, fixed value of distance \eqn{r}) by specifing \code{rinterval = c(r,r)}. The argument \code{use.theory} passed to \code{\link{envelope}} determines whether to compare the summary function for the data to its theoretical value for CSR (\code{use.theory=TRUE}) or to the sample mean of simulations from CSR (\code{use.theory=FALSE}). The argument \code{leaveout} specifies how to calculate the discrepancy between the summary function for the data and the nominal reference value, when the reference value must be estimated by simulation. The values \code{leaveout=0} and \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014, Appendix) to computing the difference \code{observed - reference} where the \code{reference} is the mean of simulated values. The value \code{leaveout=2} gives the leave-two-out discrepancy proposed by Dao and Genton (2014). } \section{Handling Ties}{ If the observed value of the test statistic is equal to one or more of the simulated values (called a \emph{tied value}), then the tied values will be assigned a random ordering, and a message will be printed. } \value{ An object of class \code{"htest"}. Printing this object gives a report on the result of the test. The \eqn{p}-value is contained in the component \code{p.value}. } \references{ Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84}(3) 477--489. Baddeley, A., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2015) \emph{Pushing the envelope}. In preparation. Besag, J. (1977) Discussion of Dr Ripley's paper. \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 193--195. Cressie, N.A.C. (1991) \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neuroscience Methods} \bold{18}, 115--125. Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. Loosmore, N.B. and Ford, E.D. (2006) Statistical inference using the \emph{G} or \emph{K} point pattern spatial statistics. \emph{Ecology} \bold{87}, 1925--1931. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. } \author{ \adrian, Andrew Hardegen and Suman Rakshit. } \seealso{ \code{\link{envelope}}, \code{\link{dclf.progress}} } \examples{ dclf.test(cells, Lest, nsim=39) m <- mad.test(cells, Lest, verbose=FALSE, rinterval=c(0, 0.1), nsim=19) m # extract the p-value m$p.value # variance stabilised G function dclf.test(cells, Gest, transform=expression(asin(sqrt(.))), verbose=FALSE, nsim=19) ## one-sided test ml <- mad.test(cells, Lest, verbose=FALSE, nsim=19, alternative="less") ## scaled mad.test(cells, Kest, verbose=FALSE, nsim=19, rinterval=c(0.05, 0.2), scale=function(r) { r }) } \keyword{spatial} \keyword{htest} spatstat/man/bw.pcf.Rd0000644000176200001440000001132713544333571014352 0ustar liggesusers\name{bw.pcf} \alias{bw.pcf} \title{ Cross Validated Bandwidth Selection for Pair Correlation Function } \description{ Uses composite likelihood or generalized least squares cross-validation to select a smoothing bandwidth for the kernel estimation of pair correlation function. } \usage{ bw.pcf(X, rmax=NULL, lambda=NULL, divisor="r", kernel="epanechnikov", nr=10000, bias.correct=TRUE, cv.method=c("compLik", "leastSQ"), simple=TRUE, srange=NULL, \dots, verbose=FALSE, warn=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{rmax}{ Numeric. Maximum value of the spatial lag distance \eqn{r} for which \eqn{g(r)} should be evaluated. } \item{lambda}{ Optional. Values of the estimated intensity function. A vector giving the intensity values at the points of the pattern \code{X}. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See \code{pcf.ppp}. } \item{kernel}{ Choice of smoothing kernel, passed to \code{density}; see \code{\link{pcf}} and \code{\link{pcfinhom}}. } \item{nr}{ Integer. Number of subintervals for discretization of [0, rmax] to use in computing numerical integrals. } \item{bias.correct}{ Logical. Whether to use bias corrected version of the kernel estimate. See Details. } \item{cv.method}{ Choice of cross validation method: either \code{"compLik"} or \code{"leastSQ"} (partially matched). } \item{simple}{ Logical. Whether to use simple removal of spatial lag distances. See Details. } \item{srange}{ Optional. Numeric vector of length 2 giving the range of bandwidth values that should be searched to find the optimum bandwidth. } \item{\dots}{ Other arguments, passed to \code{\link{pcf}} or \code{\link{pcfinhom}}. } \item{verbose}{ Logical value indicating whether to print progress reports during the optimization procedure. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the optimum value of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function selects an appropriate bandwidth \code{bw} for the kernel estimator of the pair correlation function of a point process intensity computed by \code{\link{pcf.ppp}} (homogeneous case) or \code{\link{pcfinhom}} (inhomogeneous case). With \code{cv.method="leastSQ"}, the bandwidth \eqn{h} is chosen to minimise an unbiased estimate of the integrated mean-square error criterion \eqn{M(h)} defined in equation (4) in Guan (2007a). The code implements the fast algorithm of Jalilian and Waagepetersen (2018). With \code{cv.method="compLik"}, the bandwidth \eqn{h} is chosen to maximise a likelihood cross-validation criterion \eqn{CV(h)} defined in equation (6) of Guan (2007b). \deqn{ M(b) = \frac{\mbox{MSE}(\sigma)}{\lambda^2} - g(0) }{ M(b) = \int_{0}^{rmax} \hat{g}^2(r;b) r dr - \sum_{u,v} } The result is a numerical value giving the selected bandwidth. } \section{Definition of bandwidth}{ The bandwidth \code{bw} returned by \code{bw.pcf} corresponds to the standard deviation of the smoothoing kernel. As mentioned in the documentation of \code{\link{density.default}} and \code{\link{pcf.ppp}}, this differs from the scale parameter \code{h} of the smoothing kernel which is often considered in the literature as the bandwidth of the kernel function. For example for the Epanechnikov kernel, \code{bw=h/sqrt(h)}. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{pcf.ppp}}, \code{\link{pcfinhom}} } \examples{ b <- bw.pcf(redwood) plot(pcf(redwood, bw=b)) } \references{ Guan, Y. (2007a). A composite likelihood cross-validation approach in selecting bandwidth for the estimation of the pair correlation function. \emph{Scandinavian Journal of Statistics}, \bold{34}(2), 336--346. Guan, Y. (2007b). A least-squares cross-validation bandwidth selection approach in pair correlation function estimations. \emph{Statistics & Probability Letters}, \bold{77}(18), 1722--1729. Jalilian, A. and Waagepetersen, R. (2018) Fast bandwidth selection for estimation of the pair correlation function. \emph{Journal of Statistical Computation and Simulation}, \bold{88}(10), 2001--2011. \url{https://www.tandfonline.com/doi/full/10.1080/00949655.2018.1428606} } \author{ Rasmus Waagepetersen and Abdollah Jalilian. Adapted for \pkg{spatstat} by \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/harmonise.msr.Rd0000644000176200001440000000173313333543263015755 0ustar liggesusers\name{harmonise.msr} \alias{harmonise.msr} \title{Make Measures Compatible} \description{ Convert several measures to a common quadrature scheme } \usage{ \method{harmonise}{msr}(\dots) } \arguments{ \item{\dots}{ Any number of measures (objects of class \code{"msr"}). } } \details{ This function makes any number of measures compatible, by converting them all to a common quadrature scheme. The command \code{\link{harmonise}} is generic. This is the method for objects of class \code{"msr"}. } \value{ A list, of length equal to the number of arguments \code{\dots}, whose entries are measures. } \author{ \spatstatAuthors. } \examples{ fit1 <- ppm(cells ~ x) fit2 <- ppm(rpoispp(ex=cells) ~ x) m1 <- residuals(fit1) m2 <- residuals(fit2) harmonise(m1, m2) s1 <- residuals(fit1, type="score") s2 <- residuals(fit2, type="score") harmonise(s1, s2) } \seealso{ \code{\link{harmonise}}, \code{\link{msr}} } \keyword{spatial} \keyword{manip} spatstat/man/transmat.Rd0000644000176200001440000000571313333543264015024 0ustar liggesusers\name{transmat} \alias{transmat} \title{ Convert Pixel Array Between Different Conventions } \description{ This function provides a simple way to convert arrays of pixel data between different display conventions. } \usage{ transmat(m, from, to) } \arguments{ \item{m}{ A matrix. } \item{from,to}{ Specifications of the spatial arrangement of the pixels. See Details. } } \details{ Pixel images are handled by many different software packages. In virtually all of these, the pixel values are stored in a matrix, and are accessed using the row and column indices of the matrix. However, different pieces of software use different conventions for mapping the matrix indices \eqn{[i,j]} to the spatial coordinates \eqn{(x,y)}. \itemize{ \item In the \emph{Cartesian} convention, the first matrix index \eqn{i} is associated with the first Cartesian coordinate \eqn{x}, and \eqn{j} is associated with \eqn{y}. This convention is used in \code{\link[graphics]{image.default}}. \item In the \emph{European reading order} convention, a matrix is displayed in the spatial coordinate system as it would be printed in a page of text: \eqn{i} is effectively associated with the negative \eqn{y} coordinate, and \eqn{j} is associated with \eqn{x}. This convention is used in some image file formats. \item In the \code{spatstat} convention, \eqn{i} is associated with the increasing \eqn{y} coordinate, and \eqn{j} is associated with \eqn{x}. This is also used in some image file formats. } To convert between these conventions, use the function \code{transmat}. If a matrix \code{m} contains pixel image data that is correctly displayed by software that uses the Cartesian convention, and we wish to convert it to the European reading convention, we can type \code{mm <- transmat(m, from="Cartesian", to="European")}. The transformed matrix \code{mm} will then be correctly displayed by software that uses the European convention. Each of the arguments \code{from} and \code{to} can be one of the names \code{"Cartesian"}, \code{"European"} or \code{"spatstat"} (partially matched) or it can be a list specifying another convention. For example \code{to=list(x="-i", y="-j")!} specifies that rows of the output matrix are expected to be displayed as vertical columns in the plot, starting at the right side of the plot, as in the traditional Chinese, Japanese and Korean writing order. } \value{ Another matrix obtained by rearranging the entries of \code{m}. } \author{ \adrian \rolf and \ege } \examples{ opa <- par(mfrow=c(1,2)) # image in spatstat format Z <- bei.extra$elev plot(Z, main="plot.im", ribbon=FALSE) m <- as.matrix(Z) # convert matrix to format suitable for display by image.default Y <- transmat(m, from="spatstat", to="Cartesian") image(Y, asp=0.5, main="image.default", axes=FALSE) par(opa) } \keyword{spatial} \keyword{hplot} \keyword{manip} spatstat/man/dirichletAreas.Rd0000644000176200001440000000173513333543263016115 0ustar liggesusers\name{dirichletAreas} \alias{dirichletAreas} \title{ Compute Areas of Tiles in Dirichlet Tessellation } \description{ Calculates the area of each tile in the Dirichlet-Voronoi tessellation of a point pattern. } \usage{ dirichletAreas(X) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } } \details{ This is an efficient algorithm to calculate the areas of the tiles in the Dirichlet-Voronoi tessellation. If the window of \code{X} is a binary pixel mask, the tile areas are computed by counting pixels. Otherwise the areas are computed exactly using analytic geometry. If any points of \code{X} are duplicated, the duplicates will have tile area zero. } \value{ Numeric vector with one entry for each point of \code{X}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{dirichlet}}, \code{\link{dirichletVertices}} } \examples{ aa <- dirichletAreas(cells) } \keyword{spatial} \keyword{math} \keyword{manip} spatstat/man/Gres.Rd0000644000176200001440000000534213571674202014072 0ustar liggesusers\name{Gres} \Rdversion{1.1} \alias{Gres} \title{ Residual G Function } \description{ Given a point process model fitted to a point pattern dataset, this function computes the residual \eqn{G} function, which serves as a diagnostic for goodness-of-fit of the model. } \usage{ Gres(object, ...) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}), a point pattern (object of class \code{"ppp"}), a quadrature scheme (object of class \code{"quad"}), or the value returned by a previous call to \code{\link{Gcom}}. } \item{\dots}{ Arguments passed to \code{\link{Gcom}}. } } \details{ This command provides a diagnostic for the goodness-of-fit of a point process model fitted to a point pattern dataset. It computes a residual version of the \eqn{G} function of the dataset, which should be approximately zero if the model is a good fit to the data. In normal use, \code{object} is a fitted point process model or a point pattern. Then \code{Gres} first calls \code{\link{Gcom}} to compute both the nonparametric estimate of the \eqn{G} function and its model compensator. Then \code{Gres} computes the difference between them, which is the residual \eqn{G}-function. Alternatively, \code{object} may be a function value table (object of class \code{"fv"}) that was returned by a previous call to \code{\link{Gcom}}. Then \code{Gres} computes the residual from this object. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Related functions: \code{\link{Gcom}}, \code{\link{Gest}}. Alternative functions: \code{\link{Kres}}, \code{\link{psstA}}, \code{\link{psstG}}, \code{\link{psst}}. Model-fitting: \code{\link{ppm}}. } \examples{ data(cells) fit0 <- ppm(cells, ~1) # uniform Poisson G0 <- Gres(fit0) plot(G0) # Hanisch correction estimate plot(G0, hres ~ r) # uniform Poisson is clearly not correct fit1 <- ppm(cells, ~1, Strauss(0.08)) plot(Gres(fit1), hres ~ r) # fit looks approximately OK; try adjusting interaction distance plot(Gres(cells, interaction=Strauss(0.12))) # How to make envelopes \dontrun{ E <- envelope(fit1, Gres, model=fit1, nsim=39) plot(E) } # For computational efficiency Gc <- Gcom(fit1) G1 <- Gres(Gc) } \keyword{spatial} \keyword{models} spatstat/man/density.splitppp.Rd0000644000176200001440000000454513333543263016525 0ustar liggesusers\name{density.splitppp} \alias{density.splitppp} \alias{density.ppplist} \title{Kernel Smoothed Intensity of Split Point Pattern} \description{ Compute a kernel smoothed intensity function for each of the components of a split point pattern, or each of the point patterns in a list. } \usage{ \method{density}{splitppp}(x, \dots, se=FALSE) \method{density}{ppplist}(x, \dots, se=FALSE) } \arguments{ \item{x}{ Split point pattern (object of class \code{"splitppp"} created by \code{\link{split.ppp}}) to be smoothed. Alternatively a list of point patterns, of class \code{"ppplist"}. } \item{\dots}{ Arguments passed to \code{\link{density.ppp}} to control the smoothing, pixel resolution, edge correction etc. } \item{se}{ Logical value indicating whether to compute standard errors as well. } } \value{ A list of pixel images (objects of class \code{"im"}) which can be plotted or printed; or a list of numeric vectors giving the values at specified points. If \code{se=TRUE}, the result is a list with two elements named \code{estimate} and \code{SE}, each of the format described above. } \details{ This is a method for the generic function \code{density}. The argument \code{x} should be a list of point patterns, and should belong to one of the classes \code{"ppplist"} or \code{"splitppp"}. Typically \code{x} is obtained by applying the function \code{\link{split.ppp}} to a point pattern \code{y} by calling \code{split(y)}. This splits the points of \code{y} into several sub-patterns. A kernel estimate of the intensity function of each of the point patterns is computed using \code{\link{density.ppp}}. The return value is usually a list, each of whose entries is a pixel image (object of class \code{"im"}). The return value also belongs to the class \code{"solist"} and can be plotted or printed. If the argument \code{at="points"} is given, the result is a list of numeric vectors giving the intensity values at the data points. If \code{se=TRUE}, the result is a list with two elements named \code{estimate} and \code{SE}, each of the format described above. } \seealso{ \code{\link{ppp.object}}, \code{\link{im.object}} } \examples{ Z <- density(split(amacrine), 0.05) plot(Z) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/summary.dppm.Rd0000644000176200001440000000414213552233663015624 0ustar liggesusers\name{summary.dppm} \alias{summary.dppm} \alias{print.summary.dppm} \title{Summarizing a Fitted Determinantal Point Process Model} \description{ \code{summary} method for class \code{"dppm"}. } \usage{ \method{summary}{dppm}(object, \dots, quick=FALSE) \method{print}{summary.dppm}(x, \dots) } \arguments{ \item{object}{ A fitted determinantal point process model (object of class \code{"dppm"}). } \item{quick}{Logical value controlling the scope of the summary.} \item{\dots}{Arguments passed to \code{\link{summary.ppm}} or \code{\link{print.summary.ppm}} controlling the treatment of the trend component of the model.} \item{x}{Object of class \code{"summary.dppm"} as returned by \code{summary.dppm}. } } \details{ This is a method for the generic \code{\link{summary}} for the class \code{"dppm"}. An object of class \code{"dppm"} describes a fitted determinantal point process model. See \code{\link{dppm}}. \code{summary.dppm} extracts information about the type of model that has been fitted, the data to which the model was fitted, and the values of the fitted coefficients. \code{print.summary.dppm} prints this information in a comprehensible format. In normal usage, \code{print.summary.dppm} is invoked implicitly when the user calls \code{summary.dppm} without assigning its value to anything. See the examples. } \value{ \code{summary.dppm} returns an object of class \code{"summary.dppm"}, while \code{print.summary.dppm} returns \code{NULL}. The result of \code{summary.dppm} includes at least the following components: \item{Xname}{character string name of the original point pattern data} \item{stationary}{logical value indicating whether the model is stationary} \item{trend}{Object of class \code{summary.ppm} summarising the trend} \item{repul}{Repulsiveness index} } \examples{ jpines <- residualspaper$Fig1 \testonly{ # smaller dataset for testing jpines <- jpines[c(TRUE,FALSE)] } fit <- dppm(jpines ~ 1, dppGauss) summary(fit) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/rotate.owin.Rd0000644000176200001440000000273313333543264015443 0ustar liggesusers\name{rotate.owin} \alias{rotate.owin} \title{Rotate a Window} \description{ Rotates a window } \usage{ \method{rotate}{owin}(X, angle=pi/2, \dots, rescue=TRUE, centre=NULL) } \arguments{ \item{X}{A window (object of class \code{"owin"}).} \item{angle}{Angle of rotation.} \item{rescue}{ Logical. If \code{TRUE}, the rotated window will be processed by \code{\link{rescue.rectangle}}. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} controlling the resolution of the rotated window, if \code{X} is a binary pixel mask. Ignored if \code{X} is not a binary mask. } \item{centre}{ Centre of rotation. Either a vector of length 2, or a character string (partially matched to \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}). The default is the coordinate origin \code{c(0,0)}. } } \value{ Another object of class \code{"owin"} representing the rotated window. } \details{ Rotates the window by the specified angle. Angles are measured in radians, anticlockwise. The default is to rotate the window 90 degrees anticlockwise. The centre of rotation is the origin, by default, unless \code{centre} is specified. } \seealso{ \code{\link{owin.object}} } \examples{ w <- owin(c(0,1),c(0,1)) v <- rotate(w, pi/3) e <- rotate(w, pi/2, centre="midpoint") \dontrun{ plot(v) } w <- as.mask(letterR) v <- rotate(w, pi/5) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/plot.imlist.Rd0000644000176200001440000000524413333543264015450 0ustar liggesusers\name{plot.imlist} \alias{plot.imlist} \alias{image.imlist} \alias{image.listof} \title{Plot a List of Images} \description{ Plots an array of pixel images. } \usage{ \method{plot}{imlist}(x, \dots, plotcommand="image", equal.ribbon=FALSE, ribmar=NULL) \method{image}{imlist}(x, \dots, equal.ribbon=FALSE, ribmar=NULL) \method{image}{listof}(x, \dots, equal.ribbon=FALSE, ribmar=NULL) } \arguments{ \item{x}{ An object of the class \code{"imlist"} representing a list of pixel images. Alternatively \code{x} may belong to the outdated class \code{"listof"}. } \item{\dots}{ Arguments passed to \code{\link{plot.solist}} to control the spatial arrangement of panels, and arguments passed to \code{\link{plot.im}} to control the display of each panel. } \item{equal.ribbon}{ Logical. If \code{TRUE}, the colour maps of all the images will be the same. If \code{FALSE}, the colour map of each image is adjusted to the range of values of that image. } \item{ribmar}{ Numeric vector of length 4 specifying the margins around the colour ribbon, if \code{equal.ribbon=TRUE}. Entries in the vector give the margin at the bottom, left, top, and right respectively, as a multiple of the height of a line of text. } \item{plotcommand}{ Character string giving the name of a function to be used to display each image. Recognised by \code{plot.imlist} only. } } \value{ Null. } \details{ These are methods for the generic plot commands \code{plot} and \code{image} for the class \code{"imlist"}. They are currently identical. An object of class \code{"imlist"} represents a list of pixel images. (The outdated class \code{"listof"} is also handled.) Each entry in the list \code{x} will be displayed as a pixel image, in an array of panels laid out on the same graphics display, using \code{\link{plot.solist}}. Individual panels are plotted by \code{\link{plot.im}}. If \code{equal.ribbon=FALSE} (the default), the images are rendered using different colour maps, which are displayed as colour ribbons beside each image. If \code{equal.ribbon=TRUE}, the images are rendered using the same colour map, and a single colour ribbon will be displayed at the right side of the array. The colour maps and the placement of the colour ribbons are controlled by arguments \code{\dots} passed to \code{\link{plot.im}}. } \seealso{ \code{\link{plot.solist}}, \code{\link{plot.im}} } \examples{ D <- density(split(amacrine)) image(D, equal.ribbon=TRUE, main="", col.ticks="red", col.axis="red") } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{hplot} spatstat/man/delaunayDistance.Rd0000644000176200001440000000254213333543263016444 0ustar liggesusers\name{delaunayDistance} \alias{delaunayDistance} \title{Distance on Delaunay Triangulation} \description{ Computes the graph distance in the Delaunay triangulation of a point pattern. } \usage{ delaunayDistance(X) } \arguments{ \item{X}{Spatial point pattern (object of class \code{"ppp"}).} } \details{ The Delaunay triangulation of a spatial point pattern \code{X} is defined as follows. First the Dirichlet/Voronoi tessellation of \code{X} computed; see \code{\link{dirichlet}}. Then two points of \code{X} are defined to be Delaunay neighbours if their Dirichlet/Voronoi tiles share a common boundary. Every pair of Delaunay neighbours is joined by a straight line. The \emph{graph distance} in the Delaunay triangulation between two points \code{X[i]} and \code{X[j]} is the minimum number of edges of the Delaunay triangulation that must be traversed to go from \code{X[i]} to \code{X[j]}. This command returns a matrix \code{D} such that \code{D[i,j]} is the graph distance between \code{X[i]} and \code{X[j]}. } \value{ A symmetric square matrix with integer entries. } \seealso{ \code{\link{delaunay}}, \code{\link{delaunayNetwork}} } \examples{ X <- runifpoint(20) M <- delaunayDistance(X) plot(delaunay(X), lty=3) text(X, labels=M[1, ], cex=2) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{manip} spatstat/man/layout.boxes.Rd0000644000176200001440000000255513333543263015627 0ustar liggesusers\name{layout.boxes} \alias{layout.boxes} \title{ Generate a Row or Column Arrangement of Rectangles. } \description{ A simple utility to generate a row or column of boxes (rectangles) for use in point-and-click panels. } \usage{ layout.boxes(B, n, horizontal = FALSE, aspect = 0.5, usefrac = 0.9) } \arguments{ \item{B}{ Bounding rectangle for the boxes. An object of class \code{"owin"}. } \item{n}{ Integer. The number of boxes. } \item{horizontal}{ Logical. If \code{TRUE}, arrange the boxes in a horizontal row. If \code{FALSE} (the default), arrange them in a vertical column. } \item{aspect}{ Aspect ratio (height/width) of each box. } \item{usefrac}{ Number between 0 and 1. The fraction of height or width of \code{B} that should be occupied by boxes. } } \details{ This simple utility generates a list of boxes (rectangles) inside the bounding box \code{B} arranged in a regular row or column. It is useful for generating the positions of the panel buttons in the function \code{\link{simplepanel}}. } \value{ A list of rectangles. } \examples{ B <- owin(c(0,10),c(0,1)) boxes <- layout.boxes(B, 5, horizontal=TRUE) plot(B, main="", col="blue") niets <- lapply(boxes, plot, add=TRUE, col="grey") } \author{\adrian and \rolf } \seealso{ \code{\link{simplepanel}} } \keyword{utilities} spatstat/man/vertices.Rd0000644000176200001440000000257713333543264015024 0ustar liggesusers\name{vertices} \alias{vertices} \alias{vertices.owin} \title{Vertices of a Window} \description{ Finds the vertices of a window, or similar object. } \usage{ vertices(w) \method{vertices}{owin}(w) } \arguments{ \item{w}{A window (object of class \code{"owin"}) or similar object.} } \value{ A list with components \code{x} and \code{y} giving the coordinates of the vertices. } \details{ This function computes the vertices (`corners') of a spatial window or other object. For \code{vertices.owin}, the argument \code{w} should be a window (an object of class \code{"owin"}, see \code{\link{owin.object}} for details). If \code{w} is a rectangle, the coordinates of the four corner points are returned. If \code{w} is a polygonal window (consisting of one or more polygons), the coordinates of the vertices of all polygons are returned. If \code{w} is a binary mask, then a `boundary pixel' is defined to be a pixel inside the window which has at least one neighbour outside the window. The coordinates of the centres of all boundary pixels are returned. } \seealso{ \code{\link{owin.object}}. } \examples{ data(letterR) vert <- vertices(letterR) plot(letterR, main="Polygonal vertices") points(vert) plot(letterR, main="Boundary pixels") points(vertices(as.mask(letterR))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/headtail.Rd0000644000176200001440000000354213333543263014743 0ustar liggesusers\name{headtail} \alias{head.ppp} \alias{head.ppx} \alias{head.psp} \alias{head.tess} \alias{tail.ppp} \alias{tail.ppx} \alias{tail.psp} \alias{tail.tess} \title{ First or Last Part of a Spatial Pattern } \description{ Returns the first few elements (\code{head}) or the last few elements (\code{tail}) of a spatial pattern. } \usage{ \method{head}{ppp}(x, n = 6L, \dots) \method{head}{ppx}(x, n = 6L, \dots) \method{head}{psp}(x, n = 6L, \dots) \method{head}{tess}(x, n = 6L, \dots) \method{tail}{ppp}(x, n = 6L, \dots) \method{tail}{ppx}(x, n = 6L, \dots) \method{tail}{psp}(x, n = 6L, \dots) \method{tail}{tess}(x, n = 6L, \dots) } \arguments{ \item{x}{ A spatial pattern of geometrical figures, such as a spatial pattern of points (an object of class \code{"ppp"}, \code{"pp3"}, \code{"ppx"} or \code{"lpp"}) or a spatial pattern of line segments (an object of class \code{"psp"}) or a tessellation (object of class \code{"tess"}). } \item{n}{ Integer. The number of elements of the pattern that should be extracted. } \item{\dots}{ Ignored. } } \details{ These are methods for the generic functions \code{\link[utils]{head}} and \code{\link[utils]{tail}}. They extract the first or last \code{n} elements from \code{x} and return them as an object of the same kind as \code{x}. To inspect the spatial coordinates themselves, use \code{\link[utils]{View}(x)} or \code{head(as.data.frame(x))}. } \value{ An object of the same class as \code{x}. } \author{ \spatstatAuthors. } \seealso{ \code{\link[utils]{View}}, \code{\link[utils]{edit}}. Conversion to data frame: \code{\link{as.data.frame.ppp}}, \code{\link{as.data.frame.ppx}}, \code{\link{as.data.frame.psp}} } \examples{ head(cells) tail(as.psp(spiders), 10) head(dirichlet(cells), 4) } \keyword{spatial} \keyword{manip} spatstat/man/shift.owin.Rd0000644000176200001440000000457413442350577015273 0ustar liggesusers\name{shift.owin} \alias{shift.owin} \title{Apply Vector Translation To Window} \description{ Applies a vector shift to a window } \usage{ \method{shift}{owin}(X, vec=c(0,0), \dots, origin=NULL) } \arguments{ \item{X}{Window (object of class \code{"owin"}).} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{Ignored} \item{origin}{ Location that will be shifted to the origin. Either a numeric vector of length 2 giving the location, or a point pattern containing only one point, or a list with two entries named \code{x} and \code{y}, or one of the character strings \code{"centroid"}, \code{"midpoint"}, \code{"left"}, \code{"right"}, \code{"top"}, \code{"bottom"}, \code{"topleft"}, \code{"bottomleft"}, \code{"topright"} or \code{"bottomright"} (partially matched). } } \value{ Another window (of class \code{"owin"}) representing the result of applying the vector shift. } \details{ The window is translated by the vector \code{vec}. This is a method for the generic function \code{\link{shift}}. If \code{origin} is given, the argument \code{vec} will be ignored; instead the shift will be performed so that the specified geometric location is shifted to the coordinate origin \eqn{(0,0)}. The argument \code{origin} should be either a numeric vector of length 2 giving the spatial coordinates of a location, or one of the character strings \code{"centroid"}, \code{"midpoint"}, \code{"left"}, \code{"right"}, \code{"top"}, \code{"bottom"}, \code{"topleft"}, \code{"bottomleft"}, \code{"topright"} or \code{"bottomright"} (partially matched). If \code{origin="centroid"} then the centroid of the window will be shifted to the origin. If \code{origin="midpoint"} then the centre of the bounding rectangle of the window will be shifted to the origin. If \code{origin="bottomleft"} then the bottom left corner of the bounding rectangle of the window will be shifted to the origin, and so on. } \seealso{ \code{\link{shift}}, \code{\link{shift.ppp}}, \code{\link{periodify}}, \code{\link{rotate}}, \code{\link{affine}}, \code{\link{centroid.owin}} } \examples{ W <- owin(c(0,1),c(0,1)) X <- shift(W, c(2,3)) \dontrun{ plot(W) # no discernible difference except coordinates are different } shift(W, origin="top") } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/split.ppx.Rd0000644000176200001440000000736413344410707015135 0ustar liggesusers\name{split.ppx} \alias{split.ppx} \title{Divide Multidimensional Point Pattern into Sub-patterns} \description{ Divides a multidimensional point pattern into several sub-patterns, according to their marks, or according to any user-specified grouping. } \usage{ \method{split}{ppx}(x, f = marks(x), drop=FALSE, un=NULL, \dots) } \arguments{ \item{x}{ A multi-dimensional point pattern. An object of class \code{"ppx"}. } \item{f}{ Data determining the grouping. Either a factor, a logical vector, or the name of one of the columns of marks. } \item{drop}{ Logical. Determines whether empty groups will be deleted. } \item{un}{ Logical. Determines whether the resulting subpatterns will be unmarked (i.e. whether marks will be removed from the points in each subpattern). } \item{\dots}{ Other arguments are ignored. } } \value{ A list of point patterns. The components of the list are named by the levels of \code{f}. The list also has the class \code{"splitppx"} and \code{"anylist"}. } \details{ The generic command \code{\link[base]{split}} allows a dataset to be separated into subsets according to the value of a grouping variable. The function \code{split.ppx} is a method for the generic \code{\link[base]{split}} for the class \code{"ppx"} of multidimensional point patterns. It divides up the points of the point pattern \code{x} into several sub-patterns according to the values of \code{f}. The result is a list of point patterns. The argument \code{f} may be \itemize{ \item a factor, of length equal to the number of points in \code{x}. The levels of \code{f} determine the destination of each point in \code{x}. The \code{i}th point of \code{x} will be placed in the sub-pattern \code{split.ppx(x)$l} where \code{l = f[i]}. \item a character string, matching the name of one of the columns of marks, if \code{marks(x)} is a data frame. This column should be a factor. } If \code{f} is missing, then it will be determined by the marks of the point pattern. The pattern \code{x} can be either \itemize{ \item a multitype point pattern (a marked point pattern whose marks vector is a factor). Then \code{f} is taken to be the marks vector. The effect is that the points of each type are separated into different point patterns. \item a marked point pattern with a data frame or hyperframe of marks, containing at least one column that is a factor. The first such column will be used to determine the splitting factor \code{f}. } Some of the sub-patterns created by the split may be empty. If \code{drop=TRUE}, then empty sub-patterns will be deleted from the list. If \code{drop=FALSE} then they are retained. The argument \code{un} determines how to handle marks in the case where \code{x} is a marked point pattern. If \code{un=TRUE} then the marks of the points will be discarded when they are split into groups, while if \code{un=FALSE} then the marks will be retained. If \code{f} and \code{un} are both missing, then the default is \code{un=TRUE} for multitype point patterns and \code{un=FALSE} for marked point patterns with a data frame of marks. The result of \code{split.ppx} has class \code{"splitppx"} and \code{"anylist"}. There are methods for \code{print}, \code{summary} and \code{plot}. } \seealso{ \code{\link{ppx}}, \code{\link{plot.anylist}} } \examples{ df <- data.frame(x=runif(4),y=runif(4),t=runif(4), age=rep(c("old", "new"), 2), size=runif(4)) X <- ppx(data=df, coord.type=c("s","s","t","m","m")) X split(X) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{manip} spatstat/man/as.ppp.Rd0000644000176200001440000001142413612467346014376 0ustar liggesusers\name{as.ppp} \alias{as.ppp} \alias{as.ppp.ppp} \alias{as.ppp.psp} \alias{as.ppp.quad} \alias{as.ppp.matrix} \alias{as.ppp.data.frame} \alias{as.ppp.influence.ppm} \alias{as.ppp.default} \title{Convert Data To Class ppp} \description{ Tries to coerce any reasonable kind of data to a spatial point pattern (an object of class \code{"ppp"}) for use by the \pkg{spatstat} package). } \usage{ as.ppp(X, \dots, fatal=TRUE) \method{as.ppp}{ppp}(X, \dots, fatal=TRUE) \method{as.ppp}{psp}(X, \dots, fatal=TRUE) \method{as.ppp}{quad}(X, \dots, fatal=TRUE) \method{as.ppp}{matrix}(X, W=NULL, \dots, fatal=TRUE) \method{as.ppp}{data.frame}(X, W=NULL, \dots, fatal=TRUE) \method{as.ppp}{influence.ppm}(X, \dots) \method{as.ppp}{default}(X, W=NULL, \dots, fatal=TRUE) } \arguments{ \item{X}{Data which will be converted into a point pattern} \item{W}{ Data which define a window for the pattern, when \code{X} does not contain a window. (Ignored if \code{X} contains window information.) } \item{\dots}{Ignored.} \item{fatal}{ Logical value specifying what to do if the data cannot be converted. See Details. } } \value{ An object of class \code{"ppp"} (see \code{\link{ppp.object}}) describing the point pattern and its window of observation. The value \code{NULL} may also be returned; see Details. } \details{ Converts the dataset \code{X} to a point pattern (an object of class \code{"ppp"}; see \code{\link{ppp.object}} for an overview). This function is normally used to convert an existing point pattern dataset, stored in another format, to the \code{"ppp"} format. To create a new point pattern from raw data such as \eqn{x,y} coordinates, it is normally easier to use the creator function \code{\link{ppp}}. The function \code{as.ppp} is generic, with methods for the classes \code{"ppp"}, \code{"psp"}, \code{"quad"}, \code{"matrix"}, \code{"data.frame"} and a default method. The dataset \code{X} may be: \itemize{ \item an object of class \code{"ppp"} \item an object of class \code{"psp"} \item a point pattern object created by the \pkg{spatial} library \item an object of class \code{"quad"} representing a quadrature scheme (see \code{\link{quad.object}}) \item a matrix or data frame with at least two columns \item a structure with entries \code{x}, \code{y} which are numeric vectors of equal length \item a numeric vector of length 2, interpreted as the coordinates of a single point. } In the last three cases, we need the second argument \code{W} which is converted to a window object by the function \code{\link{as.owin}}. In the first four cases, \code{W} will be ignored. If \code{X} is a line segment pattern (an object of class \code{psp}) the point pattern returned consists of the endpoints of the segments. If \code{X} is marked then the point pattern returned will also be marked, the mark associated with a point being the mark of the segment of which that point was an endpoint. If \code{X} is a matrix or data frame, the first and second columns will be interpreted as the \eqn{x} and \eqn{y} coordinates respectively. Any additional columns will be interpreted as marks. The argument \code{fatal} indicates what to do when \code{W} is missing and \code{X} contains no information about the window. If \code{fatal=TRUE}, a fatal error will be generated; if \code{fatal=FALSE}, the value \code{NULL} is returned. In the \pkg{spatial} library, a point pattern is represented in either of the following formats: \itemize{ \item (in \pkg{spatial} versions 1 to 6) a structure with entries \code{x}, \code{y} \code{xl}, \code{xu}, \code{yl}, \code{yu} \item (in \pkg{spatial} version 7) a structure with entries \code{x}, \code{y} and \code{area}, where \code{area} is a structure with entries \code{xl}, \code{xu}, \code{yl}, \code{yu} } where \code{x} and \code{y} are vectors of equal length giving the point coordinates, and \code{xl}, \code{xu}, \code{yl}, \code{yu} are numbers giving the dimensions of a rectangular window. Point pattern datasets can also be created by the function \code{\link{ppp}}. } \seealso{ \code{\link{ppp}}, \code{\link{ppp.object}}, \code{\link{as.owin}}, \code{\link{owin.object}} } \examples{ xy <- matrix(runif(40), ncol=2) pp <- as.ppp(xy, c(0,1,0,1)) # Venables-Ripley format # check for 'spatial' package spatialpath <- system.file(package="spatial") if(nchar(spatialpath) > 0) { require(spatial) towns <- ppinit("towns.dat") pp <- as.ppp(towns) # converted to our format detach(package:spatial) } xyzt <- matrix(runif(40), ncol=4) Z <- as.ppp(xyzt, square(1)) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/Lcross.inhom.Rd0000644000176200001440000000775413571674202015561 0ustar liggesusers\name{Lcross.inhom} \alias{Lcross.inhom} \title{ Inhomogeneous Cross Type L Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the cross-type \eqn{L} function. } \usage{ Lcross.inhom(X, i, j, \dots, correction) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross type \eqn{L} function \eqn{L_{ij}(r)}{Lij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{correction,\dots}{ Other arguments passed to \code{\link{Kcross.inhom}}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{L_{ij}(r)}{Lij(r)} has been estimated } \item{theo}{the theoretical value of \eqn{L_{ij}(r)}{Lij(r)} for a marked Poisson process, identically equal to \code{r} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L_{ij}(r)}{Lij(r)} obtained by the edge corrections named. } \details{ This is a generalisation of the function \code{\link{Lcross}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Linhom}}. All the arguments are passed to \code{\link{Kcross.inhom}}, which estimates the inhomogeneous multitype K function \eqn{K_{ij}(r)}{Kij(r)} for the point pattern. The resulting values are then transformed by taking \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}. } \references{ \Moller, J. and Waagepetersen, R. Statistical Inference and Simulation for Spatial Point Processes Chapman and Hall/CRC Boca Raton, 2003. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Lcross}}, \code{\link{Linhom}}, \code{\link{Kcross.inhom}} } \examples{ # Lansing Woods data woods <- lansing \testonly{woods <- woods[seq(1,npoints(woods), by=10)]} ma <- split(woods)$maple wh <- split(woods)$whiteoak # method (1): estimate intensities by nonparametric smoothing lambdaM <- density.ppp(ma, sigma=0.15, at="points") lambdaW <- density.ppp(wh, sigma=0.15, at="points") L <- Lcross.inhom(woods, "whiteoak", "maple", lambdaW, lambdaM) # method (2): fit parametric intensity model fit <- ppm(woods ~marks * polynom(x,y,2)) # evaluate fitted intensities at data points # (these are the intensities of the sub-processes of each type) inten <- fitted(fit, dataonly=TRUE) # split according to types of points lambda <- split(inten, marks(woods)) L <- Lcross.inhom(woods, "whiteoak", "maple", lambda$whiteoak, lambda$maple) # synthetic example: type A points have intensity 50, # type B points have intensity 100 * x lamB <- as.im(function(x,y){50 + 100 * x}, owin()) X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB)) L <- Lcross.inhom(X, "A", "B", lambdaI=as.im(50, Window(X)), lambdaJ=lamB) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/identify.ppp.Rd0000644000176200001440000000351213333543263015576 0ustar liggesusers\name{identify.ppp} \alias{identify.ppp} \alias{identify.lpp} \title{Identify Points in a Point Pattern} \description{ If a point pattern is plotted in the graphics window, this function will find the point of the pattern which is nearest to the mouse position, and print its mark value (or its serial number if there is no mark). } \usage{ \method{identify}{ppp}(x, \dots) \method{identify}{lpp}(x, \dots) } \arguments{ \item{x}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"}). } \item{\dots}{ Arguments passed to \code{\link[graphics]{identify.default}}. } } \value{ If \code{x} is unmarked, the result is a vector containing the serial numbers of the points in the pattern \code{x} that were identified. If \code{x} is marked, the result is a 2-column matrix, the first column containing the serial numbers and the second containing the marks for these points. } \details{ This is a method for the generic function \code{\link[graphics]{identify}} for point pattern objects. The point pattern \code{x} should first be plotted using \code{\link{plot.ppp}} or \code{\link{plot.lpp}} as appropriate. Then \code{identify(x)} reads the position of the graphics pointer each time the left mouse button is pressed. It then finds the point of the pattern \code{x} closest to the mouse position. If this closest point is sufficiently close to the mouse pointer, its index (and its mark if any) will be returned as part of the value of the call. Each time a point of the pattern is identified, text will be displayed next to the point, showing its serial number (if \code{x} is unmarked) or its mark value (if \code{x} is marked). } \seealso{ \code{\link[graphics]{identify}}, \code{\link{clickppp}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{iplot} spatstat/man/clickbox.Rd0000644000176200001440000000246113333543263014765 0ustar liggesusers\name{clickbox} \alias{clickbox} \title{Interactively Define a Rectangle} \description{ Allows the user to specify a rectangle by point-and-click in the display. } \usage{ clickbox(add=TRUE, \dots) } \arguments{ \item{add}{ Logical value indicating whether to create a new plot (\code{add=FALSE}) or draw over the existing plot (\code{add=TRUE}). } \item{\dots}{ Graphics arguments passed to \code{\link[graphics]{polygon}} to plot the box. } } \value{ A window (object of class \code{"owin"}) representing the selected rectangle. } \details{ This function allows the user to create a rectangular window by interactively clicking on the screen display. The user is prompted to point the mouse at any desired locations for two corners of the rectangle, and click the left mouse button to add each point. The return value is a window (object of class \code{"owin"}) representing the rectangle. This function uses the \R command \code{\link[graphics]{locator}} to input the mouse clicks. It only works on screen devices such as \sQuote{X11}, \sQuote{windows} and \sQuote{quartz}. } \seealso{ \code{\link{clickpoly}}, \code{\link{clickppp}}, \code{\link{clickdist}}, \code{\link[graphics]{locator}} } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{iplot} spatstat/man/unmark.Rd0000644000176200001440000000240013333543264014456 0ustar liggesusers\name{unmark} \alias{unmark} \alias{unmark.ppp} \alias{unmark.splitppp} \alias{unmark.psp} \alias{unmark.ppx} \title{Remove Marks} \description{ Remove the mark information from a spatial dataset. } \usage{ unmark(X) \method{unmark}{ppp}(X) \method{unmark}{splitppp}(X) \method{unmark}{psp}(X) \method{unmark}{ppx}(X) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}), a split point pattern (object of class \code{"splitppp"}), a line segment pattern (object of class \code{"psp"}) or a multidimensional space-time point pattern (object of class \code{"ppx"}). } } \value{ An object of the same class as \code{X} with any mark information deleted. } \details{ A `mark' is a value attached to each point in a spatial point pattern, or attached to each line segment in a line segment pattern, etc. The function \code{unmark} is a simple way to remove the marks from such a dataset. } \seealso{ \code{\link{ppp.object}}, \code{\link{psp.object}} } \examples{ data(lansing) hicks <- lansing[lansing$marks == "hickory", ] \dontrun{ plot(hicks) # still a marked point pattern, but only 1 value of marks plot(unmark(hicks)) # unmarked } } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/plot.symbolmap.Rd0000644000176200001440000000456513333543264016157 0ustar liggesusers\name{plot.symbolmap} \alias{plot.symbolmap} \title{ Plot a Graphics Symbol Map } \description{ Plot a representation of a graphics symbol map, similar to a plot legend. } \usage{ \method{plot}{symbolmap}(x, \dots, main, xlim = NULL, ylim = NULL, vertical = FALSE, side = c("bottom", "left", "top", "right"), annotate = TRUE, labelmap = NULL, add = FALSE, nsymbols = NULL) } \arguments{ \item{x}{ Graphics symbol map (object of class \code{"symbolmap"}). } \item{\dots}{ Additional graphics arguments passed to \code{\link{points}}, \code{\link{symbols}} or \code{\link{axis}}. } \item{main}{ Main title for the plot. A character string. } \item{xlim,ylim}{ Coordinate limits for the plot. Numeric vectors of length 2. } \item{vertical}{ Logical. Whether to plot the symbol map in a vertical orientation. } \item{side}{ Character string specifying the position of the text that annotates the symbols. } \item{annotate}{ Logical. Whether to annotate the symbols with labels. } \item{labelmap}{ Transformation of the labels. A function or a scale factor which will be applied to the data values corresponding to the plotted symbols. } \item{add}{ Logical value indicating whether to add the plot to the current plot (\code{add=TRUE}) or to initialise a new plot. } \item{nsymbols}{ Optional. The number of symbols that should be displayed. (This may not be exactly obeyed.) } } \details{ A graphics symbol map is an association between data values and graphical symbols. This command plots the graphics symbol map itself, in the style of a plot legend. } \value{ None. } \author{ \spatstatAuthors. } \seealso{ \code{\link{symbolmap}} to create a symbol map. \code{\link{invoke.symbolmap}} to apply the symbol map to some data and plot the resulting symbols. } \examples{ g <- symbolmap(inputs=letters[1:10], pch=11:20) plot(g) g2 <- symbolmap(range=c(-1,1), shape=function(x) ifelse(x > 0, "circles", "squares"), size=function(x) sqrt(ifelse(x > 0, x/pi, -x)), bg = function(x) ifelse(abs(x) < 1, "red", "black")) plot(g2, vertical=TRUE, side="left", col.axis="blue", cex.axis=2) } \keyword{spatial} \keyword{hplot} spatstat/man/scanpp.Rd0000644000176200001440000000716513333543264014462 0ustar liggesusers\name{scanpp} \alias{scanpp} \title{Read Point Pattern From Data File} \description{ Reads a point pattern dataset from a text file. } \usage{ scanpp(filename, window, header=TRUE, dir="", factor.marks=NULL, ...) } \arguments{ \item{filename}{ String name of the file containing the coordinates of the points in the point pattern, and their marks if any. } \item{window}{ Window for the point pattern. An object of class \code{"owin"}. } \item{header}{ Logical flag indicating whether the first line of the file contains headings for the columns. Passed to \code{\link[utils]{read.table}}. } \item{dir}{ String containing the path name of the directory in which \code{filename} is to be found. Default is the current directory. } \item{factor.marks}{ Logical vector (or NULL) indicating whether marks are to be interpreted as factors. Defaults to \code{NULL} which means that strings will be interpreted as factors while numeric variables will not. See details. } \item{\dots}{ Ignored. } } \value{ A point pattern (an object of class \code{"ppp"}, see \code{\link{ppp.object}}). } \details{ This simple function reads a point pattern dataset from a file containing the cartesian coordinates of its points, and optionally the mark values for these points. The file identified by \code{filename} in directory \code{dir} should be a text file that can be read using \code{\link[utils]{read.table}}. Thus, each line of the file (except possibly the first line) contains data for one point in the point pattern. Data are arranged in columns. There should be either two columns (for an unmarked point pattern) or more columns (for a marked point pattern). If \code{header=FALSE} then the first two columns of data will be interpreted as the \eqn{x} and \eqn{y} coordinates of points. Remaining columns, if present, will be interpreted as containing the marks for these points. If \code{header=TRUE} then the first line of the file should contain string names for each of the columns of data. If there are columns named \code{x} and \code{y} then these will be taken as the cartesian coordinates, and any remaining columns will be taken as the marks. If there are no columns named \code{x} and \code{y} then the first and second columns will be taken as the cartesian coordinates. If a logical vector is provided for \code{factor.marks} the length should equal the number of mark columns (a shorter \code{factor.marks} is recycled to this length). This vector is then used to determine which mark columns should be interpreted as factors. Note: Strings will not be interpreted as factors if the corresponding entry in \code{factor.marks} is \code{FALSE}. Note that there is intentionally no default for \code{window}. The window of observation should be specified. If you really need to estimate the window, use the Ripley-Rasson estimator \code{\link{ripras}}. } \seealso{ \code{\link{ppp.object}}, \code{\link{ppp}}, \code{\link{as.ppp}}, \code{\link{ripras}} } \author{ \adrian and \rolf. } \examples{ ## files installed with spatstat, for demonstration d <- system.file("rawdata", "finpines", package="spatstat.data") if(nzchar(d)) { W <- owin(c(-5,5), c(-8,2)) X <- scanpp("finpines.txt", dir=d, window=W) print(X) } d <- system.file("rawdata", "amacrine", package="spatstat.data") if(nzchar(d)) { W <- owin(c(0, 1060/662), c(0, 1)) Y <- scanpp("amacrine.txt", dir=d, window=W, factor.marks=TRUE) print(Y) } } \keyword{spatial} \keyword{IO} spatstat/man/rescale.owin.Rd0000644000176200001440000000364713333543264015570 0ustar liggesusers\name{rescale.owin} \alias{rescale.owin} \title{Convert Window to Another Unit of Length} \description{ Converts a window to another unit of length. } \usage{ \method{rescale}{owin}(X, s, unitname) } \arguments{ \item{X}{Window (object of class \code{"owin"}).} \item{s}{Conversion factor: the new units are \code{s} times the old units.} \item{unitname}{ Optional. New name for the unit of length. See \code{\link{unitname}}. } } \value{ Another window object (of class \code{"owin"}) representing the same window, but expressed in the new units. } \details{ This is a method for the generic function \code{\link{rescale}}. The spatial coordinates in the window \code{X} (and its window) will be re-expressed in terms of a new unit of length that is \code{s} times the current unit of length given in \code{X}. (Thus, the coordinate values are \emph{divided} by \code{s}, while the unit value is multiplied by \code{s}). The result is a window representing the \emph{same} region of space, but re-expressed in a different unit. If \code{s} is missing, then the coordinates will be re-expressed in \sQuote{native} units; for example if the current unit is equal to 0.1 metres, then the coordinates will be re-expressed in metres. } \section{Note}{ The result of this operation is equivalent to the original window. If you want to actually change the coordinates by a linear transformation, producing a window that is larger or smaller than the original one, use \code{\link{affine}}. } \seealso{ \code{\link{unitname}}, \code{\link{rescale}}, \code{\link{rescale.owin}}, \code{\link{affine}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ data(swedishpines) W <- Window(swedishpines) W # coordinates are in decimetres (0.1 metre) # convert to metres: rescale(W, 10) # or equivalently rescale(W) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/influence.ppm.Rd0000644000176200001440000000706113512337523015732 0ustar liggesusers\name{influence.ppm} \alias{influence.ppm} \title{ Influence Measure for Spatial Point Process Model } \description{ Computes the influence measure for a fitted spatial point process model. } \usage{ \method{influence}{ppm}(model, \dots, drop = FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Ignored. } \item{drop}{ Logical. Whether to include (\code{drop=FALSE}) or exclude (\code{drop=TRUE}) contributions from quadrature points that were not used to fit the model. } \item{iScore,iHessian}{ Components of the score vector and Hessian matrix for the irregular parameters, if required. See Details. } \item{iArgs}{ List of extra arguments for the functions \code{iScore}, \code{iHessian} if required. } } \details{ Given a fitted spatial point process model \code{model}, this function computes the influence measure described in Baddeley, Chang and Song (2013) and Baddeley, Rubak and Turner (2019). The function \code{\link[stats]{influence}} is generic, and \code{influence.ppm} is the method for objects of class \code{"ppm"} representing point process models. The influence of a point process model is a value attached to each data point (i.e. each point of the point pattern to which the \code{model} was fitted). The influence value \eqn{s(x_i)}{s(x[i])} at a data point \eqn{x_i}{x[i]} represents the change in the maximised log (pseudo)likelihood that occurs when the point \eqn{x_i}{x[i]} is deleted. A relatively large value of \eqn{s(x_i)}{s(x[i])} indicates a data point with a large influence on the fitted model. If the point process model trend has irregular parameters that were fitted (using \code{\link{ippm}}) then the influence calculation requires the first and second derivatives of the log trend with respect to the irregular parameters. The argument \code{iScore} should be a list, with one entry for each irregular parameter, of \R functions that compute the partial derivatives of the log trend (i.e. log intensity or log conditional intensity) with respect to each irregular parameter. The argument \code{iHessian} should be a list, with \eqn{p^2} entries where \eqn{p} is the number of irregular parameters, of \R functions that compute the second order partial derivatives of the log trend with respect to each pair of irregular parameters. The result of \code{influence.ppm} is an object of class \code{"influence.ppm"}. It can be printed and plotted. It can be converted to a marked point pattern by \code{as.ppp} (see \code{\link{as.ppp.influence.ppm}}). There are also methods for \code{[}, \code{\link{as.owin}}, \code{\link{domain}}, \code{\link{shift}}, \code{\link{integral}} and \code{\link{Smooth}}. } \value{ An object of class \code{"influence.ppm"}. } \references{ Baddeley, A. and Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. Baddeley, A., Rubak, E. and Turner, R. (2019) Leverage and influence diagnostics for Gibbs spatial point processes. \emph{Spatial Statistics} \bold{29}, {15--48}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{leverage.ppm}}, \code{\link{dfbetas.ppm}}, \code{\link{ppmInfluence}}, \code{\link{plot.influence.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) plot(influence(fit)) } \keyword{spatial} \keyword{models} spatstat/man/cut.im.Rd0000644000176200001440000000311413333543263014362 0ustar liggesusers\name{cut.im} \alias{cut.im} \title{Convert Pixel Image from Numeric to Factor} \description{ Transform the values of a pixel image from numeric values into a factor. } \usage{ \method{cut}{im}(x, \dots) } \arguments{ \item{x}{ A pixel image. An object of class \code{"im"}. } \item{\dots}{ Arguments passed to \code{\link{cut.default}}. They determine the breakpoints for the mapping from numerical values to factor values. See \code{\link{cut.default}}. } } \value{ A pixel image (object of class \code{"im"}) with pixel values that are a factor. See \code{\link{im.object}}. } \details{ This simple function applies the generic \code{\link{cut}} operation to the pixel values of the image \code{x}. The range of pixel values is divided into several intervals, and each interval is associated with a level of a factor. The result is another pixel image, with the same window and pixel grid as \code{x}, but with the numeric value of each pixel discretised by replacing it by the factor level. This function is a convenient way to inspect an image and to obtain summary statistics. See the examples. To select a subset of an image, use the subset operator \code{\link{[.im}} instead. } \seealso{ \code{\link{cut}}, \code{\link{im.object}} } \examples{ # artificial image data Z <- setcov(square(1)) Y <- cut(Z, 3) Y <- cut(Z, breaks=seq(0,1,length=5)) # cut at the quartiles # (divides the image into 4 equal areas) Y <- cut(Z, quantile(Z)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/print.quad.Rd0000644000176200001440000000150013333543264015246 0ustar liggesusers\name{print.quad} \alias{print.quad} \title{Print a Quadrature Scheme} \description{ \code{print} method for a quadrature scheme. } \usage{ \method{print}{quad}(x,\dots) } \arguments{ \item{x}{ A quadrature scheme object, typically obtained from \code{\link{quadscheme}}. An object of class \code{"quad"}. } \item{\dots}{Ignored.} } \value{ none. } \details{ This is the \code{print} method for the class \code{"quad"}. It prints simple information about the quadrature scheme. See \code{\link{quad.object}} for details of the class \code{"quad"}. } \seealso{ \code{\link{quadscheme}}, \code{\link{quad.object}}, \code{\link{plot.quad}}, \code{\link{summary.quad}} } \examples{ data(cells) Q <- quadscheme(cells) Q } \author{\adrian and \rolf } \keyword{spatial} \keyword{print} spatstat/man/fv.object.Rd0000644000176200001440000000301713333543263015045 0ustar liggesusers\name{fv.object} \alias{fv.object} %DoNotExport \title{Function Value Table} \description{ A class \code{"fv"} to support the convenient plotting of several estimates of the same function. } \details{ An object of this class is a convenient way of storing and plotting several different estimates of the same function. It is a data frame with extra attributes indicating the recommended way of plotting the function, and other information. There are methods for \code{print} and \code{plot} for this class. Objects of class \code{"fv"} are returned by \code{\link{Fest}}, \code{\link{Gest}},\code{\link{Jest}}, and \code{\link{Kest}} along with many other functions. } \seealso{ Objects of class \code{"fv"} are returned by \code{\link{Fest}}, \code{\link{Gest}},\code{\link{Jest}}, and \code{\link{Kest}} along with many other functions. See \code{\link{plot.fv}} for plotting an \code{"fv"} object. See \code{\link{as.function.fv}} to convert an \code{"fv"} object to an \R function. Use \code{\link{cbind.fv}} to combine several \code{"fv"} objects. Use \code{\link{bind.fv}} to glue additional columns onto an existing \code{"fv"} object. \emph{Undocumented} functions for modifying an \code{"fv"} object include \code{fvnames}, \code{fvnames<-}, \code{tweak.fv.entry} and \code{rebadge.fv}. } \examples{ data(cells) K <- Kest(cells) class(K) K # prints a sensible summary plot(K) } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat/man/leverage.ppm.Rd0000644000176200001440000000773013512337523015557 0ustar liggesusers\name{leverage.ppm} \alias{leverage} \alias{leverage.ppm} \title{ Leverage Measure for Spatial Point Process Model } \description{ Computes the leverage measure for a fitted spatial point process model. } \usage{ leverage(model, \dots) \method{leverage}{ppm}(model, \dots, drop = FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Ignored, except for the arguments \code{dimyx} and \code{eps} which are passed to \code{\link{as.mask}} to control the spatial resolution of the result. } \item{drop}{ Logical. Whether to include (\code{drop=FALSE}) or exclude (\code{drop=TRUE}) contributions from quadrature points that were not used to fit the model. } \item{iScore,iHessian}{ Components of the score vector and Hessian matrix for the irregular parameters, if required. See Details. } \item{iArgs}{ List of extra arguments for the functions \code{iScore}, \code{iHessian} if required. } } \details{ The function \code{leverage} is generic, and \code{leverage.ppm} is the method for objects of class \code{"ppm"}. Given a fitted spatial point process model \code{model}, the function \code{leverage.ppm} computes the leverage of the model, described in Baddeley, Chang and Song (2013) and Baddeley, Rubak and Turner (2019). The leverage of a spatial point process model is a function of spatial location, and is typically displayed as a colour pixel image. The leverage value \eqn{h(u)} at a spatial location \eqn{u} represents the change in the fitted trend of the fitted point process model that would have occurred if a data point were to have occurred at the location \eqn{u}. A relatively large value of \eqn{h()} indicates a part of the space where the data have a \emph{potentially} strong effect on the fitted model (specifically, a strong effect on the intensity or conditional intensity of the fitted model) due to the values of the covariates. If the point process model trend has irregular parameters that were fitted (using \code{\link{ippm}}) then the leverage calculation requires the first and second derivatives of the log trend with respect to the irregular parameters. The argument \code{iScore} should be a list, with one entry for each irregular parameter, of \R functions that compute the partial derivatives of the log trend (i.e. log intensity or log conditional intensity) with respect to each irregular parameter. The argument \code{iHessian} should be a list, with \eqn{p^2} entries where \eqn{p} is the number of irregular parameters, of \R functions that compute the second order partial derivatives of the log trend with respect to each pair of irregular parameters. The result of \code{leverage.ppm} is an object of class \code{"leverage.ppm"}. It can be printed or plotted. It can be converted to a pixel image by \code{as.im} (see \code{\link{as.im.leverage.ppm}}). There are also methods for \code{contour}, \code{persp}, \code{[}, \code{as.function}, \code{\link{as.owin}}, \code{\link{domain}}, \code{\link{Smooth}}, \code{\link{integral}}, and \code{mean}. } \value{ An object of class \code{"leverage.ppm"}. } \references{ Baddeley, A., Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. Baddeley, A., Rubak, E. and Turner, R. (2019) Leverage and influence diagnostics for Gibbs spatial point processes. \emph{Spatial Statistics} \bold{29}, {15--48}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{influence.ppm}}, \code{\link{dfbetas.ppm}}, \code{\link{ppmInfluence}}, \code{\link{plot.leverage.ppm}} \code{\link{as.function.leverage.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) plot(le <- leverage(fit)) mean(le) } \keyword{spatial} \keyword{models} spatstat/man/residuals.dppm.Rd0000644000176200001440000000215013333543264016115 0ustar liggesusers\name{residuals.dppm} \alias{residuals.dppm} \title{ Residuals for Fitted Determinantal Point Process Model } \description{ Given a determinantal point process model fitted to a point pattern, compute residuals. } \usage{ \method{residuals}{dppm}(object, \dots) } \arguments{ \item{object}{ The fitted determinatal point process model (an object of class \code{"dppm"}) for which residuals should be calculated. } \item{\dots}{ Arguments passed to \code{\link{residuals.ppm}}. } } \value{ An object of class \code{"msr"} representing a signed measure or vector-valued measure (see \code{\link{msr}}). This object can be plotted. } \details{ This function extracts the intensity component of the model using \code{\link{as.ppm}} and then applies \code{\link{residuals.ppm}} to compute the residuals. Use \code{\link{plot.msr}} to plot the residuals directly. } \seealso{ \code{\link{msr}}, \code{\link{dppm}} } \examples{ fit <- dppm(swedishpines ~ x, dppGauss()) rr <- residuals(fit) } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/panel.contour.Rd0000644000176200001440000000442213333543264015756 0ustar liggesusers\name{panel.contour} \alias{panel.contour} \alias{panel.image} \alias{panel.histogram} \title{ Panel Plots using Colour Image or Contour Lines } \description{ These functions can be passed to \code{\link[graphics]{pairs}} or \code{\link[graphics]{coplot}} to determine what kind of plotting is done in each panel of a multi-panel graphical display. } \usage{ panel.contour(x, y, ..., sigma = NULL) panel.image(x, y, ..., sigma = NULL) panel.histogram(x, ...) } \arguments{ \item{x,y}{ Coordinates of points in a scatterplot. } \item{\dots}{ Extra graphics arguments, passed to \code{\link{contour.im}}, \code{\link{plot.im}} or \code{\link[graphics]{rect}}, respectively, to control the appearance of the panel. } \item{sigma}{ Bandwidth of kernel smoother, on a scale where \eqn{x} and \eqn{y} range between 0 and 1. } } \details{ These functions can serve as one of the arguments \code{panel}, \code{lower.panel}, \code{upper.panel}, \code{diag.panel} passed to graphics commands like \code{\link[graphics]{pairs}} or \code{\link[graphics]{coplot}}, to determine what kind of plotting is done in each panel of a multi-panel graphical display. In particular they work with \code{\link{pairs.im}}. The functions \code{panel.contour} and \code{panel.contour} are suitable for the off-diagonal plots which involve two datasets \code{x} and \code{y}. They first rescale \code{x} and \code{y} to the unit square, then apply kernel smoothing with bandwidth \code{sigma} using \code{\link{density.ppp}}. Then \code{panel.contour} draws a contour plot while \code{panel.image} draws a colour image. The function \code{panel.histogram} is suitable for the diagonal plots which involve a single dataset \code{x}. It displays a histogram of the data. } \value{ Null. } \author{ \spatstatAuthors. } \seealso{ \code{\link{pairs.im}}, \code{\link{pairs.default}}, \code{\link{panel.smooth}} } \examples{ pairs(bei.extra, panel = panel.contour, diag.panel = panel.histogram) with(bei.extra, pairs(grad, elev, panel = panel.image, diag.panel = panel.histogram)) pairs(marks(finpines), panel=panel.contour, diag.panel=panel.histogram) } \keyword{spatial} \keyword{hplot} spatstat/man/methods.dppm.Rd0000644000176200001440000000302313333543263015564 0ustar liggesusers\name{methods.dppm} \alias{methods.dppm} %DoNotExport \alias{coef.dppm} \alias{formula.dppm} \alias{print.dppm} \alias{terms.dppm} \alias{labels.dppm} \title{ Methods for Determinantal Point Process Models } \description{ These are methods for the class \code{"dppm"}. } \usage{ \method{coef}{dppm}(object, \dots) \method{formula}{dppm}(x, \dots) \method{print}{dppm}(x, ...) \method{terms}{dppm}(x, \dots) \method{labels}{dppm}(object, \dots) } \arguments{ \item{x,object}{ An object of class \code{"dppm"}, representing a fitted determinantal point process model. } \item{\dots}{ Arguments passed to other methods. } } \details{ These functions are methods for the generic commands \code{\link{coef}}, \code{\link{formula}}, \code{\link{print}}, \code{\link{terms}} and \code{\link{labels}} for the class \code{"dppm"}. An object of class \code{"dppm"} represents a fitted determinantal point process model. It is obtained from \code{\link{dppm}}. The method \code{coef.dppm} returns the vector of \emph{regression coefficients} of the fitted model. It does not return the interaction parameters. } \value{ See the help files for the corresponding generic functions. } \author{ \adrian } \seealso{ \code{\link{dppm}}, \code{\link{plot.dppm}}, \code{\link{predict.dppm}}, \code{\link{simulate.dppm}}, \code{\link{as.ppm.dppm}}. } \examples{ fit <- dppm(swedishpines ~ x + y, dppGauss()) coef(fit) formula(fit) tf <- terms(fit) labels(fit) } \keyword{spatial} \keyword{methods} spatstat/man/plot.owin.Rd0000644000176200001440000002125313622670141015115 0ustar liggesusers\name{plot.owin} \alias{plot.owin} \title{Plot a Spatial Window} \description{ Plot a two-dimensional window of observation for a spatial point pattern } \usage{ \method{plot}{owin}(x, main, add=FALSE, \dots, box, edge=0.04, type=c("w","n"), show.all=!add, hatch=FALSE, hatchargs=list(), invert=FALSE, do.plot=TRUE, claim.title.space=FALSE, use.polypath=TRUE) } \arguments{ \item{x}{ The window to be plotted. An object of class \code{\link{owin}}, or data which can be converted into this format by \code{\link{as.owin}()}. } \item{main}{ text to be displayed as a title above the plot. } \item{add}{ logical flag: if \code{TRUE}, draw the window in the current plot; if \code{FALSE}, generate a new plot. } \item{\dots}{ extra arguments controlling the appearance of the plot. These arguments are passed to \code{\link[graphics]{polygon}} if \code{x} is a polygonal or rectangular window, or passed to \code{\link[graphics]{image.default}} if \code{x} is a binary mask. Some arguments are passed to \code{\link[graphics]{plot.default}}. See Details. } \item{box}{ logical flag; if \code{TRUE}, plot the enclosing rectangular box } \item{edge}{ nonnegative number; the plotting region will have coordinate limits that are \code{1 + edge} times as large as the limits of the rectangular box that encloses the pattern. } \item{type}{ Type of plot: either \code{"w"} or \code{"n"}. If \code{type="w"} (the default), the window is plotted. If \code{type="n"} and \code{add=TRUE}, a new plot is initialised and the coordinate system is established, but nothing is drawn. } \item{show.all}{ Logical value indicating whether to plot everything including the main title. } \item{hatch}{ logical flag; if \code{TRUE}, the interior of the window will be shaded by texture, such as a grid of parallel lines. } \item{hatchargs}{ List of arguments passed to \code{\link{add.texture}} to control the texture shading when \code{hatch=TRUE}. } \item{invert}{ logical flag; when the window is a binary pixel mask, the mask colours will be inverted if \code{invert=TRUE}. } \item{do.plot}{ Logical value indicating whether to actually perform the plot. } \item{claim.title.space}{ Logical value indicating whether extra space for the main title should be allocated when declaring the plot dimensions. Should be set to \code{FALSE} under normal conditions. } \item{use.polypath}{ Logical value indicating what graphics capabilities should be used to draw a polygon filled with colour when the polygon has holes. If \code{TRUE} (the default), then the polygon will be filled using \code{\link[graphics]{polypath}}, provided the graphics device supports this function. If \code{FALSE}, the polygon will be decomposed into simple closed polygons, which will be colour filled using \code{\link[graphics]{polygon}}. } } \value{ none. } \details{ This is the \code{plot} method for the class \code{\link{owin}}. The action is to plot the boundary of the window on the current plot device, using equal scales on the \code{x} and \code{y} axes. If the window \code{x} is of type \code{"rectangle"} or \code{"polygonal"}, the boundary of the window is plotted as a polygon or series of polygons. If \code{x} is of type \code{"mask"} the discrete raster approximation of the window is displayed as a binary image (white inside the window, black outside). Graphical parameters controlling the display (e.g. setting the colours) may be passed directly via the \code{...} arguments, or indirectly reset using \code{\link{spatstat.options}}. If \code{add=FALSE} (the default), the plot is initialised by calling the base graphics function \code{\link[graphics]{plot.default}} to create the plot area. By default, coordinate axes and axis labels are not plotted. To plot coordinate axes, use the argument \code{axes=TRUE}; to plot axis labels, use the argument \code{ann=TRUE} and then specify the labels with \code{xlab} and \code{ylab}; see the help file for \code{\link[graphics]{plot.default}} for information on these arguments, and for additional arguments controlling the appearance of the axes. See the Examples also. When \code{x} is of type \code{"rectangle"} or \code{"polygonal"}, it is plotted by the \R function \code{\link[graphics]{polygon}}. To control the appearance (colour, fill density, line density etc) of the polygon plot, determine the required argument of \code{\link[graphics]{polygon}} and pass it through \code{...} For example, to paint the interior of the polygon in red, use the argument \code{col="red"}. To draw the polygon edges in green, use \code{border="green"}. To suppress the drawing of polygon edges, use \code{border=NA}. When \code{x} is of type \code{"mask"}, it is plotted by \code{\link[graphics]{image.default}}. The appearance of the image plot can be controlled by passing arguments to \code{\link[graphics]{image.default}} through \code{...}. The default appearance can also be changed by setting the parameter \code{par.binary} of \code{\link{spatstat.options}}. To zoom in (to view only a subset of the window at higher magnification), use the graphical arguments \code{xlim} and \code{ylim} to specify the desired rectangular field of view. (The actual field of view may be larger, depending on the graphics device). } \section{Notes on Filled Polygons with Holes}{ The function \code{\link[graphics]{polygon}} can only handle polygons without holes. To plot polygons with holes in a solid colour, we have implemented two workarounds. \describe{ \item{polypath function:}{ The first workaround uses the relatively new function \code{\link[graphics]{polypath}} which \emph{does} have the capability to handle polygons with holes. However, not all graphics devices support \code{\link[graphics]{polypath}}. The older devices \code{\link{xfig}} and \code{\link{pictex}} do not support \code{\link[graphics]{polypath}}. On a Windows system, the default graphics device #ifdef windows \code{\link{windows}} #endif #ifndef windows \code{windows} #endif supports \code{\link[graphics]{polypath}}. #ifdef unix On a Linux system, the default graphics device \code{X11(type="Xlib")} does \emph{not} support \code{\link[graphics]{polypath}} but \code{X11(type="cairo")} does support it. See \code{\link{X11}} and the section on Cairo below. #endif } \item{polygon decomposition:}{ The other workaround involves decomposing the polygonal window into pieces which do not have holes. This code is experimental but works in all our test cases. If this code fails, a warning will be issued, and the filled colours will not be plotted. } } } #ifdef unix \section{Cairo graphics on a Linux system}{ Linux systems support the graphics device \code{X11(type="cairo")} (see \code{\link{X11}}) provided the external library \pkg{cairo} is installed on the computer. See \code{www.cairographics.org} for instructions on obtaining and installing \pkg{cairo}. After having installed \pkg{cairo} one needs to re-install \R from source so that it has \pkg{cairo} capabilites. To check whether your current installation of R has \pkg{cairo} capabilities, type (in \R) \code{capabilities()["cairo"]}. The default type for \code{\link{X11}} is controlled by \code{\link[grDevices]{X11.options}}. You may find it convenient to make \pkg{cairo} the default, e.g. via your \code{.Rprofile}. The magic incantation to put into \code{.Rprofile} is \preformatted{ setHook(packageEvent("graphics", "onLoad"), function(...) grDevices::X11.options(type="cairo")) } } #endif \seealso{ \code{\link{owin.object}}, \code{\link{plot.ppp}}, \code{\link[graphics]{polygon}}, \code{\link[graphics]{image.default}}, \code{\link{spatstat.options}} } \examples{ # rectangular window plot(Window(nztrees)) abline(v=148, lty=2) # polygonal window w <- Window(demopat) plot(w) plot(w, col="red", border="green", lwd=2) plot(w, hatch=TRUE, lwd=2) # binary mask we <- as.mask(w) plot(we) op <- spatstat.options(par.binary=list(col=grey(c(0.5,1)))) plot(we) spatstat.options(op) ## axis annotation plot(letterR, axes=TRUE, ann=TRUE, xlab="Easting", ylab="Northing") plot(letterR, ann=TRUE, xlab="Declination", ylab="Right Ascension") } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{hplot} spatstat/man/markcorr.Rd0000644000176200001440000002655213613547037015022 0ustar liggesusers\name{markcorr} \alias{markcorr} \title{ Mark Correlation Function } \description{ Estimate the marked correlation function of a marked point pattern. } \usage{ markcorr(X, f = function(m1, m2) { m1 * m2}, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", \dots, weights=NULL, f1=NULL, normalise=TRUE, fargs=NULL, internal=NULL) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. } \item{f}{Optional. Test function \eqn{f} used in the definition of the mark correlation function. An \R function with at least two arguments. There is a sensible default. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the mark correlation function \eqn{k_f(r)}{k[f](r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{weights}{ Optional. Numeric weights for each data point in \code{X}. A numeric vector, a pixel image, or a \code{function(x,y)}. Alternatively, an \code{expression} to be evaluated to yield the weights; the expression may involve the variables \code{x,y,marks} representing the coordinates and marks of\code{X}. } \item{f1}{ An alternative to \code{f}. If this argument is given, then \eqn{f} is assumed to take the form \eqn{f(u,v)=f_1(u)f_1(v)}{f(u,v)=f1(u) * f1(v)}. } \item{normalise}{ If \code{normalise=FALSE}, compute only the numerator of the expression for the mark correlation. } \item{fargs}{ Optional. A list of extra arguments to be passed to the function \code{f} or \code{f1}. } \item{internal}{Do not use this argument.} } \value{ A function value table (object of class \code{"fv"}) or a list of function value tables, one for each column of marks. An object of class \code{"fv"} (see \code{\link{fv.object}}) is essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the mark correlation function \eqn{k_f(r)}{k[f](r)} has been estimated } \item{theo}{the theoretical value of \eqn{k_f(r)}{k[f](r)} when the marks attached to different points are independent, namely 1 } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the mark correlation function \eqn{k_f(r)}{k[f](r)} obtained by the edge corrections named. } \details{ By default, this command calculates an estimate of Stoyan's mark correlation \eqn{k_{mm}(r)}{k[mm](r)} for the point pattern. Alternatively if the argument \code{f} or \code{f1} is given, then it calculates Stoyan's generalised mark correlation \eqn{k_f(r)}{k[f](r)} with test function \eqn{f}. Theoretical definitions are as follows (see Stoyan and Stoyan (1994, p. 262)): \itemize{ \item For a point process \eqn{X} with numeric marks, Stoyan's mark correlation function \eqn{k_{mm}(r)}{k[mm](r)}, is \deqn{ k_{mm}(r) = \frac{E_{0u}[M(0) M(u)]}{E[M,M']} }{ k[mm](r) = E[0u](M(0) * M(u))/E(M * M') } where \eqn{E_{0u}}{E[0u]} denotes the conditional expectation given that there are points of the process at the locations \eqn{0} and \eqn{u} separated by a distance \eqn{r}, and where \eqn{M(0),M(u)} denote the marks attached to these two points. On the denominator, \eqn{M,M'} are random marks drawn independently from the marginal distribution of marks, and \eqn{E} is the usual expectation. \item For a multitype point process \eqn{X}, the mark correlation is \deqn{ k_{mm}(r) = \frac{P_{0u}[M(0) M(u)]}{P[M = M']} }{ k[mm](r) = P[0u](M(0) = M(u))/P(M = M') } where \eqn{P} and \eqn{P_{0u}}{P[0u]} denote the probability and conditional probability. \item The \emph{generalised} mark correlation function \eqn{k_f(r)}{k[f](r)} of a marked point process \eqn{X}, with test function \eqn{f}, is \deqn{ k_f(r) = \frac{E_{0u}[f(M(0),M(u))]}{E[f(M,M')]} }{ k[f](r) = E[0u](f(M(0),M(u))]/E(f(M,M')) } } The test function \eqn{f} is any function \eqn{f(m_1,m_2)}{f(m1,m2)} with two arguments which are possible marks of the pattern, and which returns a nonnegative real value. Common choices of \eqn{f} are: for continuous nonnegative real-valued marks, \deqn{f(m_1,m_2) = m_1 m_2}{f(m1,m2)= m1 * m2} for discrete marks (multitype point patterns), \deqn{f(m_1,m_2) = 1(m_1 = m_2)}{f(m1,m2)= (m1 == m2)} and for marks taking values in \eqn{[0,2\pi)}{[0,2 * pi)}, \deqn{f(m_1,m_2) = \sin(m_1 - m_2)}{f(m1,m2) = sin(m1-m2)}. Note that \eqn{k_f(r)}{k[f](r)} is not a ``correlation'' in the usual statistical sense. It can take any nonnegative real value. The value 1 suggests ``lack of correlation'': if the marks attached to the points of \code{X} are independent and identically distributed, then \eqn{k_f(r) \equiv 1}{k[f](r) = 1}. The interpretation of values larger or smaller than 1 depends on the choice of function \eqn{f}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern. The argument \code{f} determines the function to be applied to pairs of marks. It has a sensible default, which depends on the kind of marks in \code{X}. If the marks are numeric values, then \code{f <- function(m1, m2) { m1 * m2}} computes the product of two marks. If the marks are a factor (i.e. if \code{X} is a multitype point pattern) then \code{f <- function(m1, m2) { m1 == m2}} yields the value 1 when the two marks are equal, and 0 when they are unequal. These are the conventional definitions for numerical marks and multitype points respectively. The argument \code{f} may be specified by the user. It must be an \R function, accepting two arguments \code{m1} and \code{m2} which are vectors of equal length containing mark values (of the same type as the marks of \code{X}). (It may also take additional arguments, passed through \code{fargs}). It must return a vector of numeric values of the same length as \code{m1} and \code{m2}. The values must be non-negative, and \code{NA} values are not permitted. Alternatively the user may specify the argument \code{f1} instead of \code{f}. This indicates that the test function \eqn{f} should take the form \eqn{f(u,v)=f_1(u)f_1(v)}{f(u,v)=f1(u) * f1(v)} where \eqn{f_1(u)}{f1(u)} is given by the argument \code{f1}. The argument \code{f1} should be an \R function with at least one argument. (It may also take additional arguments, passed through \code{fargs}). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{k_f(r)}{k[f](r)} is estimated. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}. The edge corrections implemented here are \describe{ \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is implemented only for rectangular and polygonal windows (not for binary masks). } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries, but slow for complex windows. } } Note that the estimator assumes the process is stationary (spatially homogeneous). The numerator and denominator of the mark correlation function (in the expression above) are estimated using density estimation techniques. The user can choose between \describe{ \item{\code{"density"}}{ which uses the standard kernel density estimation routine \code{\link{density}}, and works only for evenly-spaced \code{r} values; } \item{\code{"loess"}}{ which uses the function \code{loess} in the package \pkg{modreg}; } \item{\code{"sm"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is extremely slow; } \item{\code{"smrep"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is relatively fast, but may require manual control of the smoothing parameter \code{hmult}. } } If \code{normalise=FALSE} then the algorithm will compute only the numerator \deqn{ c_f(r) = E_{0u} f(M(0),M(u)) }{ c[f](r) = E[0u] f(M(0),M(u)) } of the expression for the mark correlation function. } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ Mark variogram \code{\link{markvario}} for numeric marks. Mark connection function \code{\link{markconnect}} and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}} for factor-valued marks. Mark cross-correlation function \code{\link{markcrosscorr}} for point patterns with several columns of marks. \code{\link{Kmark}} to estimate a cumulative function related to the mark correlation function. } \examples{ # CONTINUOUS-VALUED MARKS: # (1) Spruces # marks represent tree diameter # mark correlation function ms <- markcorr(spruces) plot(ms) # (2) simulated data with independent marks X <- rpoispp(100) X <- X \%mark\% runif(npoints(X)) \dontrun{ Xc <- markcorr(X) plot(Xc) } # MULTITYPE DATA: # Hughes' amacrine data # Cells marked as 'on'/'off' # (3) Kernel density estimate with Epanecnikov kernel # (as proposed by Stoyan & Stoyan) M <- markcorr(amacrine, function(m1,m2) {m1==m2}, correction="translate", method="density", kernel="epanechnikov") plot(M) # Note: kernel="epanechnikov" comes from help(density) # (4) Same again with explicit control over bandwidth \dontrun{ M <- markcorr(amacrine, correction="translate", method="density", kernel="epanechnikov", bw=0.02) # see help(density) for correct interpretation of 'bw' } \testonly{ niets <- markcorr(amacrine, function(m1,m2){m1 == m2}, method="loess") if(require(sm)) niets <- markcorr(X, correction="isotropic", method="smrep", hmult=2) } # weighted mark correlation Y <- subset(betacells, select=type) a <- marks(betacells)$area v <- markcorr(Y, weights=a) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat/man/ragsAreaInter.Rd0000644000176200001440000000565213333543264015724 0ustar liggesusers\name{ragsAreaInter} \alias{ragsAreaInter} \title{ Alternating Gibbs Sampler for Area-Interaction Process } \description{ Generate a realisation of the area-interaction process using the alternating Gibbs sampler. Applies only when the interaction parameter \eqn{eta} is greater than 1. } \usage{ ragsAreaInter(beta, eta, r, \dots, win = NULL, bmax = NULL, periodic = FALSE, ncycles = 100) } \arguments{ \item{beta}{ First order trend. A number, a pixel image (object of class \code{"im"}), or a \code{function(x,y)}. } \item{eta}{ Interaction parameter (canonical form) as described in the help for \code{\link{AreaInter}}. A number greater than 1. } \item{r}{ Disc radius in the model. A number greater than 1. } \item{\dots}{ Additional arguments for \code{beta} if it is a function. } \item{win}{ Simulation window. An object of class \code{"owin"}. (Ignored if \code{beta} is a pixel image.) } \item{bmax}{ Optional. The maximum possible value of \code{beta}, or a number larger than this. } \item{periodic}{ Logical value indicating whether to treat opposite sides of the simulation window as being the same, so that points close to one side may interact with points close to the opposite side. Feasible only when the window is a rectangle. } \item{ncycles}{ Number of cycles of the alternating Gibbs sampler to be performed. } } \details{ This function generates a simulated realisation of the area-interaction process (see \code{\link{AreaInter}}) using the alternating Gibbs sampler (see \code{\link{rags}}). It exploits a mathematical relationship between the (unmarked) area-interaction process and the two-type hard core process (Baddeley and Van Lieshout, 1995; Widom and Rowlinson, 1970). This relationship only holds when the interaction parameter \code{eta} is greater than 1 so that the area-interaction process is clustered. The parameters \code{beta,eta} are the canonical parameters described in the help for \code{\link{AreaInter}}. The first order trend \code{beta} may be a constant, a function, or a pixel image. The simulation window is determined by \code{beta} if it is a pixel image, and otherwise by the argument \code{win} (the default is the unit square). } \value{ A point pattern (object of class \code{"ppp"}). } \references{ Baddeley, A.J. and Van Lieshout, M.N.M. (1995). Area-interaction point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{47} (1995) 601--619. Widom, B. and Rowlinson, J.S. (1970). New model for the study of liquid-vapor phase transitions. \emph{The Journal of Chemical Physics} \bold{52} (1970) 1670--1684. } \author{ \adrian. } \seealso{ \code{\link{rags}}, \code{\link{ragsMultiHard}} \code{\link{AreaInter}} } \examples{ plot(ragsAreaInter(100, 2, 0.07, ncycles=15)) } \keyword{spatial} \keyword{datagen} spatstat/man/clarkevans.test.Rd0000644000176200001440000000675113333543263016304 0ustar liggesusers\name{clarkevans.test} \alias{clarkevans.test} \title{Clark and Evans Test} \description{ Performs the Clark-Evans test of aggregation for a spatial point pattern. } \usage{ clarkevans.test(X, ..., correction="none", clipregion=NULL, alternative=c("two.sided", "less", "greater", "clustered", "regular"), nsim=999) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}). } \item{\dots}{Ignored.} \item{correction}{ Character string. The type of edge correction to be applied. See \code{\link{clarkevans}} } \item{clipregion}{ Clipping region for the guard area correction. A window (object of class \code{"owin"}). See \code{\link{clarkevans}} } \item{alternative}{ String indicating the type of alternative for the hypothesis test. Partially matched. } \item{nsim}{ Number of Monte Carlo simulations to perform, if a Monte Carlo p-value is required. } } \details{ This command uses the Clark and Evans (1954) aggregation index \eqn{R} as the basis for a crude test of clustering or ordering of a point pattern. The Clark-Evans index is computed by the function \code{\link{clarkevans}}. See the help for \code{\link{clarkevans}} for information about the Clark-Evans index \eqn{R} and about the arguments \code{correction} and \code{clipregion}. This command performs a hypothesis test of clustering or ordering of the point pattern \code{X}. The null hypothesis is Complete Spatial Randomness, i.e.\ a uniform Poisson process. The alternative hypothesis is specified by the argument \code{alternative}: \itemize{ \item \code{alternative="less"} or \code{alternative="clustered"}: the alternative hypothesis is that \eqn{R < 1} corresponding to a clustered point pattern; \item \code{alternative="greater"} or \code{alternative="regular"}: the alternative hypothesis is that \eqn{R > 1} corresponding to a regular or ordered point pattern; \item \code{alternative="two.sided"}: the alternative hypothesis is that \eqn{R \neq 1}{R != 1} corresponding to a clustered or regular pattern. } The Clark-Evans index \eqn{R} is computed for the data as described in \code{\link{clarkevans}}. If \code{correction="none"} and \code{nsim} is missing, the \eqn{p}-value for the test is computed by standardising \eqn{R} as proposed by Clark and Evans (1954) and referring the statistic to the standard Normal distribution. Otherwise, the \eqn{p}-value for the test is computed by Monte Carlo simulation of \code{nsim} realisations of Complete Spatial Randomness conditional on the observed number of points. } \value{ An object of class \code{"htest"} representing the result of the test. } \references{ Clark, P.J. and Evans, F.C. (1954) Distance to nearest neighbour as a measure of spatial relationships in populations. \emph{Ecology} \bold{35}, 445--453. Donnelly, K. (1978) Simulations to determine the variance and edge-effect of total nearest neighbour distance. In \emph{Simulation methods in archaeology}, Cambridge University Press, pp 91--95. } \author{ \adrian } \seealso{ \code{\link{clarkevans}}, \code{\link{hopskel.test}} } \examples{ # Redwood data - clustered clarkevans.test(redwood) clarkevans.test(redwood, alternative="clustered") clarkevans.test(redwood, correction="cdf", nsim=39) } \keyword{spatial} \keyword{nonparametric} \keyword{htest} spatstat/man/linearKdot.Rd0000644000176200001440000000526113623712063015262 0ustar liggesusers\name{linearKdot} \alias{linearKdot} \title{ Multitype K Function (Dot-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the multitype \eqn{K} function which counts the expected number of points (of any type) within a given distance of a point of type \eqn{i}. } \usage{ linearKdot(X, i, r=NULL, \dots, correction="Ang") } \arguments{ \item{X}{The observed point pattern, from which an estimate of the dot type \eqn{K} function \eqn{K_{i\bullet}(r)}{K[i.](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the \eqn{K}-function \eqn{K_{i\bullet}(r)}{K[i.](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{Ignored.} } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{Kdot}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), \bold{63}, 673--694. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{Kdot}}, \code{\link{linearKcross}}, \code{\link{linearK}}. } \examples{ data(chicago) K <- linearKdot(chicago, "assault") } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/pp3.Rd0000644000176200001440000000236613421754011013666 0ustar liggesusers\name{pp3} \Rdversion{1.1} \alias{pp3} \title{ Three Dimensional Point Pattern } \description{ Create a three-dimensional point pattern } \usage{ pp3(x, y, z, ..., marks=NULL) } \arguments{ \item{x,y,z}{ Numeric vectors of equal length, containing Cartesian coordinates of points in three-dimensional space. } \item{\dots}{ Arguments passed to \code{\link{as.box3}} to determine the three-dimensional box in which the points have been observed. } \item{marks}{ Optional. Vector, data frame, or hyperframe of mark values associated with the points. } } \details{ An object of class \code{"pp3"} represents a pattern of points in three-dimensional space. The points are assumed to have been observed by exhaustively inspecting a three-dimensional rectangular box. The boundaries of the box are included as part of the dataset. } \value{ Object of class \code{"pp3"} representing a three dimensional point pattern. Also belongs to class \code{"ppx"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{box3}}, \code{\link{print.pp3}}, \code{\link{ppx}} } \examples{ X <- pp3(runif(10), runif(10), runif(10), box3(c(0,1)), marks=rnorm(10)) X } \keyword{spatial} \keyword{datagen} spatstat/man/as.interact.Rd0000644000176200001440000000364113333543262015402 0ustar liggesusers\name{as.interact} \alias{as.interact} \alias{as.interact.fii} \alias{as.interact.interact} \alias{as.interact.ppm} \title{Extract Interaction Structure} \description{ Extracts the interpoint interaction structure from a point pattern model. } \usage{ as.interact(object) \method{as.interact}{fii}(object) \method{as.interact}{interact}(object) \method{as.interact}{ppm}(object) } \arguments{ \item{object}{A fitted point process model (object of class \code{"ppm"}) or an interpoint interaction structure (object of class \code{"interact"}). } } \details{ The function \code{as.interact} extracts the interpoint interaction structure from a suitable object. An object of class \code{"interact"} describes an interpoint interaction structure, before it has been fitted to point pattern data. The irregular parameters of the interaction (such as the interaction range) are fixed, but the regular parameters (such as interaction strength) are undetermined. Objects of this class are created by the functions \code{\link{Poisson}}, \code{\link{Strauss}} and so on. The main use of such objects is in a call to \code{\link{ppm}}. The function \code{as.interact} is generic, with methods for the classes \code{"ppm"}, \code{"fii"} and \code{"interact"}. The result is an object of class \code{"interact"} which can be printed. } \section{Note on parameters}{ This function does \bold{not} extract the fitted coefficients of the interaction. To extract the fitted interaction including the fitted coefficients, use \code{\link{fitin}}. } \value{ An object of class \code{"interact"} representing the interpoint interaction. This object can be printed and plotted. } \author{\adrian and \rolf } \seealso{ \code{\link{fitin}}, \code{\link{ppm}}. } \examples{ data(cells) model <- ppm(cells, ~1, Strauss(0.07)) f <- as.interact(model) f } \keyword{spatial} \keyword{models} spatstat/man/plot.splitppp.Rd0000644000176200001440000000306613333543264016022 0ustar liggesusers\name{plot.splitppp} \alias{plot.splitppp} \title{Plot a List of Point Patterns} \description{ Plots a list of point patterns. } \usage{ \method{plot}{splitppp}(x, \dots, main) } \arguments{ \item{x}{ A named list of point patterns, typically obtained from \code{\link{split.ppp}}. } \item{\dots}{ Arguments passed to \code{\link{plot.listof}} which control the layout of the plot panels, their appearance, and the plot behaviour in individual plot panels. } \item{main}{ Optional main title for the plot. } } \value{ Null. } \details{ This is the \code{plot} method for the class \code{"splitppp"}. It is typically used to plot the result of the function \code{\link{split.ppp}}. The argument \code{x} should be a named list of point patterns (objects of class \code{"ppp"}, see \code{\link{ppp.object}}). Each of these point patterns will be plotted in turn using \code{\link{plot.ppp}}. Plotting is performed by \code{\link{plot.listof}}. } \seealso{ \code{\link{plot.listof}} for arguments controlling the plot. \code{\link{split.ppp}}, \code{\link{plot.ppp}}, \code{\link{ppp.object}}. } \section{Error messages}{ If the error message \sQuote{Figure margins too large} occurs, ensure that \code{equal.scales=FALSE} and increase the values of \code{mar.panel}. } \examples{ # Multitype point pattern plot(split(amacrine)) plot(split(amacrine), main="", panel.begin=function(i, y, ...) { plot(density(y), ribbon=FALSE, ...) }) } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat/man/deriv.fv.Rd0000644000176200001440000000730613333543263014715 0ustar liggesusers\name{deriv.fv} \alias{deriv.fv} \title{ Calculate Derivative of Function Values } \description{ Applies numerical differentiation to the values in selected columns of a function value table. } \usage{ \method{deriv}{fv}(expr, which = "*", ..., method=c("spline", "numeric"), kinks=NULL, periodic=FALSE, Dperiodic=periodic) } \arguments{ \item{expr}{ Function values to be differentiated. A function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). } \item{which}{ Character vector identifying which columns of the table should be differentiated. Either a vector containing names of columns, or one of the wildcard strings \code{"*"} or \code{"."} explained below. } \item{\dots}{ Extra arguments passed to \code{\link[stats]{smooth.spline}} to control the differentiation algorithm, if \code{method="spline"}. } \item{method}{ Differentiation method. A character string, partially matched to either \code{"spline"} or \code{"numeric"}. } \item{kinks}{ Optional vector of \eqn{x} values where the derivative is allowed to be discontinuous. } \item{periodic}{ Logical value indicating whether the function \code{expr} is periodic. } \item{Dperiodic}{ Logical value indicating whether the resulting derivative should be a periodic function. } } \details{ This command performs numerical differentiation on the function values in a function value table (object of class \code{"fv"}). The differentiation is performed either by \code{\link[stats]{smooth.spline}} or by a naive numerical difference algorithm. The command \code{\link{deriv}} is generic. This is the method for objects of class \code{"fv"}. Differentiation is applied to every column (or to each of the selected columns) of function values in turn, using the function argument as the \eqn{x} coordinate and the selected column as the \eqn{y} coordinate. The original function values are then replaced by the corresponding derivatives. The optional argument \code{which} specifies which of the columns of function values in \code{expr} will be differentiated. The default (indicated by the wildcard \code{which="*"}) is to differentiate all function values, i.e.\ all columns except the function argument. Alternatively \code{which="."} designates the subset of function values that are displayed in the default plot. Alternatively \code{which} can be a character vector containing the names of columns of \code{expr}. If the argument \code{kinks} is given, it should be a numeric vector giving the discontinuity points of the function: the value or values of the function argument at which the function is not differentiable. Differentiation will be performed separately on intervals between the discontinuity points. If \code{periodic=TRUE} then the function \code{expr} is taken to be periodic, with period equal to the range of the function argument in \code{expr}. The resulting derivative is periodic. If \code{periodic=FALSE} but \code{Dperiodic=TRUE}, then the \emph{derivative} is assumed to be periodic. This would be appropriate if \code{expr} is the cumulative distribution function of an angular variable, for example. } \value{ Another function value table (object of class \code{"fv"}) of the same format. } \author{\adrian and \rolf } \seealso{ \code{\link{with.fv}}, \code{\link{fv.object}}, \code{\link[stats]{smooth.spline}} } \examples{ G <- Gest(cells) plot(deriv(G, which=".", spar=0.5)) A <- pairorient(redwood, 0.05, 0.15) DA <- deriv(A, spar=0.6, Dperiodic=TRUE) } \keyword{spatial} \keyword{math} \keyword{nonparametric} spatstat/man/dppm.Rd0000644000176200001440000002661113571674202014134 0ustar liggesusers\name{dppm} \alias{dppm} \concept{point process model} \concept{determinantal point process} \title{Fit Determinantal Point Process Model} \description{ Fit a determinantal point process model to a point pattern. } \usage{ dppm(formula, family, data=NULL, ..., startpar = NULL, method = c("mincon", "clik2", "palm"), weightfun=NULL, control=list(), algorithm="Nelder-Mead", statistic="K", statargs=list(), rmax = NULL, covfunargs=NULL, use.gam=FALSE, nd=NULL, eps=NULL) } \arguments{ \item{formula}{ A \code{formula} in the \R language specifying the data (on the left side) and the form of the model to be fitted (on the right side). For a stationary model it suffices to provide a point pattern without a formula. See Details. } \item{family}{ Information specifying the family of point processes to be used in the model. Typically one of the family functions \code{\link{dppGauss}}, \code{\link{dppMatern}}, \code{\link{dppCauchy}}, \code{\link{dppBessel}} or \code{\link{dppPowerExp}}. Alternatively a character string giving the name of a family function, or the result of calling one of the family functions. See Details. } \item{data}{ The values of spatial covariates (other than the Cartesian coordinates) required by the model. A named list of pixel images, functions, windows, tessellations or numeric constants. } \item{\dots}{ Additional arguments. See Details. } \item{startpar}{ Named vector of starting parameter values for the optimization. } \item{method}{ The fitting method. Either \code{"mincon"} for minimum contrast, \code{"clik2"} for second order composite likelihood, or \code{"palm"} for Palm likelihood. Partially matched. } \item{weightfun}{ Optional weighting function \eqn{w} in the composite likelihood or Palm likelihood. A \code{function} in the \R language. See Details. } \item{control}{ List of control parameters passed to the optimization function \code{\link[stats]{optim}}. } \item{algorithm}{ Character string determining the mathematical optimisation algorithm to be used by \code{\link[stats]{optim}}. See the argument \code{method} of \code{\link[stats]{optim}}. } \item{statistic}{ Name of the summary statistic to be used for minimum contrast estimation: either \code{"K"} or \code{"pcf"}. } \item{statargs}{ Optional list of arguments to be used when calculating the \code{statistic}. See Details. } \item{rmax}{ Maximum value of interpoint distance to use in the composite likelihood. } \item{covfunargs,use.gam,nd,eps}{ Arguments passed to \code{\link{ppm}} when fitting the intensity. } } \details{ This function fits a determinantal point process model to a point pattern dataset as described in Lavancier et al. (2015). The model to be fitted is specified by the arguments \code{formula} and \code{family}. The argument \code{formula} should normally be a \code{formula} in the \R language. The left hand side of the formula specifies the point pattern dataset to which the model should be fitted. This should be a single argument which may be a point pattern (object of class \code{"ppp"}) or a quadrature scheme (object of class \code{"quad"}). The right hand side of the formula is called the \code{trend} and specifies the form of the \emph{logarithm of the intensity} of the process. Alternatively the argument \code{formula} may be a point pattern or quadrature scheme, and the trend formula is taken to be \code{~1}. The argument \code{family} specifies the family of point processes to be used in the model. It is typically one of the family functions \code{\link{dppGauss}}, \code{\link{dppMatern}}, \code{\link{dppCauchy}}, \code{\link{dppBessel}} or \code{\link{dppPowerExp}}. Alternatively it may be a character string giving the name of a family function, or the result of calling one of the family functions. A family function belongs to class \code{"detpointprocfamilyfun"}. The result of calling a family function is a point process family, which belongs to class \code{"detpointprocfamily"}. The algorithm first estimates the intensity function of the point process using \code{\link{ppm}}. If the trend formula is \code{~1} (the default if a point pattern or quadrature scheme is given rather than a \code{"formula"}) then the model is \emph{homogeneous}. The algorithm begins by estimating the intensity as the number of points divided by the area of the window. Otherwise, the model is \emph{inhomogeneous}. The algorithm begins by fitting a Poisson process with log intensity of the form specified by the formula \code{trend}. (See \code{\link{ppm}} for further explanation). The interaction parameters of the model are then fitted either by minimum contrast estimation, or by maximum composite likelihood. \describe{ \item{Minimum contrast:}{ If \code{method = "mincon"} (the default) interaction parameters of the model will be fitted by minimum contrast estimation, that is, by matching the theoretical \eqn{K}-function of the model to the empirical \eqn{K}-function of the data, as explained in \code{\link{mincontrast}}. For a homogeneous model (\code{ trend = ~1 }) the empirical \eqn{K}-function of the data is computed using \code{\link{Kest}}, and the interaction parameters of the model are estimated by the method of minimum contrast. For an inhomogeneous model, the inhomogeneous \eqn{K} function is estimated by \code{\link{Kinhom}} using the fitted intensity. Then the interaction parameters of the model are estimated by the method of minimum contrast using the inhomogeneous \eqn{K} function. This two-step estimation procedure is heavily inspired by Waagepetersen (2007). If \code{statistic="pcf"} then instead of using the \eqn{K}-function, the algorithm will use the pair correlation function \code{\link{pcf}} for homogeneous models and the inhomogeneous pair correlation function \code{\link{pcfinhom}} for inhomogeneous models. In this case, the smoothing parameters of the pair correlation can be controlled using the argument \code{statargs}, as shown in the Examples. Additional arguments \code{\dots} will be passed to \code{\link{clusterfit}} to control the minimum contrast fitting algorithm. } \item{Composite likelihood:}{ If \code{method = "clik2"} the interaction parameters of the model will be fitted by maximising the second-order composite likelihood (Guan, 2006). The log composite likelihood is \deqn{ \sum_{i,j} w(d_{ij}) \log\rho(d_{ij}; \theta) - \left( \sum_{i,j} w(d_{ij}) \right) \log \int_D \int_D w(\|u-v\|) \rho(\|u-v\|; \theta)\, du\, dv }{ \sum[i,j] w(d[i,j]) log(\rho(d[i,j]; \theta)) - (\sum[i,j] w(d[i,j])) log(integral[D,D] w(||u-v||) \rho(||u-v||; \theta) du dv) } where the sums are taken over all pairs of data points \eqn{x_i, x_j}{x[i], x[j]} separated by a distance \eqn{d_{ij} = \| x_i - x_j\|}{d[i,j] = ||x[i] - x[j]||} less than \code{rmax}, and the double integral is taken over all pairs of locations \eqn{u,v} in the spatial window of the data. Here \eqn{\rho(d;\theta)}{\rho(d;\theta)} is the pair correlation function of the model with cluster parameters \eqn{\theta}{\theta}. The function \eqn{w} in the composite likelihood is a weighting function and may be chosen arbitrarily. It is specified by the argument \code{weightfun}. If this is missing or \code{NULL} then the default is a threshold weight function, \eqn{w(d) = 1(d \le R)}{w(d) = 1(d \le R)}, where \eqn{R} is \code{rmax/2}. } \item{Palm likelihood:}{ If \code{method = "palm"} the interaction parameters of the model will be fitted by maximising the Palm loglikelihood (Tanaka et al, 2008) \deqn{ \sum_{i,j} w(x_i, x_j) \log \lambda_P(x_j \mid x_i; \theta) - \int_D w(x_i, u) \lambda_P(u \mid x_i; \theta) {\rm d} u }{ \sum[i,j] w(x[i], x[j]) log(\lambda[P](x[j] | x[i]; \theta) - integral[D] w(x[i], u) \lambda[P](u | x[i]; \theta) du } with the same notation as above. Here \eqn{\lambda_P(u|v;\theta}{\lambda[P](u|v;\theta)} is the Palm intensity of the model at location \eqn{u} given there is a point at \eqn{v}. } } In all three methods, the optimisation is performed by the generic optimisation algorithm \code{\link[stats]{optim}}. The behaviour of this algorithm can be modified using the argument \code{control}. Useful control arguments include \code{trace}, \code{maxit} and \code{abstol} (documented in the help for \code{\link[stats]{optim}}). Finally, it is also possible to fix any parameters desired before the optimisation by specifying them as \code{name=value} in the call to the family function. See Examples. } \value{ An object of class \code{"dppm"} representing the fitted model. There are methods for printing, plotting, predicting and simulating objects of this class. } \seealso{ methods for \code{dppm} objects: \code{\link{plot.dppm}}, \code{\link{fitted.dppm}}, \code{\link{predict.dppm}}, \code{\link{simulate.dppm}}, \code{\link{methods.dppm}}, \code{\link{as.ppm.dppm}}, \code{\link{Kmodel.dppm}}, \code{\link{pcfmodel.dppm}}. Minimum contrast fitting algorithm: higher level interface \code{\link{clusterfit}}; low-level algorithm \code{\link{mincontrast}}. Deterimantal point process models: \code{\link{dppGauss}}, \code{\link{dppMatern}}, \code{\link{dppCauchy}}, \code{\link{dppBessel}}, \code{\link{dppPowerExp}}, Summary statistics: \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{pcf}}, \code{\link{pcfinhom}}. See also \code{\link{ppm}} } \references{ Lavancier, F. \Moller, J. and Rubak, E. (2015) Determinantal point process models and statistical inference \emph{Journal of the Royal Statistical Society, Series B} \bold{77}, 853--977. Guan, Y. (2006) A composite likelihood approach in fitting spatial point process models. \emph{Journal of the American Statistical Association} \bold{101}, 1502--1512. Tanaka, U. and Ogata, Y. and Stoyan, D. (2008) Parameter estimation and model selection for Neyman-Scott point processes. \emph{Biometrical Journal} \bold{50}, 43--57. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \examples{ jpines <- residualspaper$Fig1 \testonly{ # smaller dataset for testing jpines <- jpines[c(TRUE,FALSE)] } dppm(jpines ~ 1, dppGauss) dppm(jpines ~ 1, dppGauss, method="c") dppm(jpines ~ 1, dppGauss, method="p") # Fixing the intensity to lambda=2 rather than the Poisson MLE 2.04: dppm(jpines ~ 1, dppGauss(lambda=2)) if(interactive()) { # The following is quite slow (using K-function) dppm(jpines ~ x, dppMatern) } # much faster using pair correlation function dppm(jpines ~ x, dppMatern, statistic="pcf", statargs=list(stoyan=0.2)) # Fixing the Matern shape parameter to nu=2 rather than estimating it: dppm(jpines ~ x, dppMatern(nu=2)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat/man/increment.fv.Rd0000644000176200001440000000166513333543263015572 0ustar liggesusers\name{increment.fv} \alias{increment.fv} \title{ Increments of a Function } \description{ Compute the change in the value of a function \code{f} when the function argument increases by \code{delta}. } \usage{ increment.fv(f, delta) } \arguments{ \item{f}{ Object of class \code{"fv"} representing a function. } \item{delta}{ Numeric. The increase in the value of the function argument. } } \details{ This command computes the new function \deqn{g(x) = f(x+h) - f(x-h)} where \code{h = delta/2}. The value of \eqn{g(x)} is the change in the value of \eqn{f} over an interval of length \code{delta} centred at \eqn{x}. } \value{ Another object of class \code{"fv"} compatible with \code{X}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{fv.object}}, \code{\link{deriv.fv}} } \examples{ plot(increment.fv(Kest(cells), 0.05)) } \keyword{spatial} \keyword{math} \keyword{nonparametric} spatstat/man/affine.tess.Rd0000644000176200001440000000561513333543262015377 0ustar liggesusers\name{affine.tess} \alias{reflect.tess} \alias{flipxy.tess} \alias{shift.tess} \alias{rotate.tess} \alias{scalardilate.tess} \alias{affine.tess} \title{Apply Geometrical Transformation To Tessellation} \description{ Apply various geometrical transformations of the plane to each tile in a tessellation. } \usage{ \method{reflect}{tess}(X) \method{flipxy}{tess}(X) \method{shift}{tess}(X, \dots) \method{rotate}{tess}(X, angle=pi/2, \dots, centre=NULL) \method{scalardilate}{tess}(X, f, \dots) \method{affine}{tess}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) } \arguments{ \item{X}{Tessellation (object of class \code{"tess"}).} \item{angle}{ Rotation angle in radians (positive values represent anticlockwise rotations). } \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{f}{Positive number giving scale factor.} \item{\dots}{Arguments passed to other methods.} \item{centre}{ Centre of rotation. Either a vector of length 2, or a character string (partially matched to \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}). The default is the coordinate origin \code{c(0,0)}. } } \value{ Another tessellation (of class \code{"tess"}) representing the result of applying the geometrical transformation. } \details{ These are method for the generic functions \code{\link{reflect}}, \code{\link{flipxy}}, \code{\link{shift}}, \code{\link{rotate}}, \code{\link{scalardilate}}, \code{\link{affine}} for tessellations (objects of class \code{"tess"}). The individual tiles of the tessellation, and the window containing the tessellation, are all subjected to the same geometrical transformation. The transformations are performed by the corresponding method for windows (class \code{"owin"}) or images (class \code{"im"}) depending on the type of tessellation. If the argument \code{origin} is used in \code{shift.tess} it is interpreted as applying to the window containing the tessellation. Then all tiles are shifted by the same vector. } \seealso{ Generic functions \code{\link{reflect}}, \code{\link{shift}}, \code{\link{rotate}}, \code{\link{scalardilate}}, \code{\link{affine}}. Methods for windows: \code{\link{reflect.default}}, \code{\link{shift.owin}}, \code{\link{rotate.owin}}, \code{\link{scalardilate.owin}}, \code{\link{affine.owin}}. Methods for images: \code{\link{reflect.im}}, \code{\link{shift.im}}, \code{\link{rotate.im}}, \code{\link{scalardilate.im}}, \code{\link{affine.im}}. } \examples{ live <- interactive() if(live) { H <- hextess(letterR, 0.2) plot(H) plot(reflect(H)) plot(rotate(H, pi/3)) } else H <- hextess(letterR, 0.6) # shear transformation shear <- matrix(c(1,0,0.6,1),2,2) sH <- affine(H, shear) if(live) plot(sH) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/densityVoronoi.Rd0000644000176200001440000001421613427256630016226 0ustar liggesusers\name{densityVoronoi} \alias{densityVoronoi} \alias{densityVoronoi.ppp} \title{Intensity Estimate of Point Pattern Using Voronoi-Dirichlet Tessellation} \description{ Computes an adaptive estimate of the intensity function of a point pattern using the Dirichlet-Voronoi tessellation. } \usage{ densityVoronoi(X, \dots) \method{densityVoronoi}{ppp}(X, f = 1, \dots, counting=FALSE, fixed=FALSE, nrep = 1, verbose=TRUE) } \arguments{ \item{X}{Point pattern dataset (object of class \code{"ppp"}).} \item{f}{ Fraction (between 0 and 1 inclusive) of the data points that will be used to build a tessellation for the intensity estimate. } \item{\dots}{Arguments passed to \code{\link{as.im}} determining the pixel resolution of the result. } \item{counting}{ Logical value specifying the choice of estimation method. See Details. } \item{fixed}{ Logical. If \code{FALSE} (the default), the data points are independently randomly thinned, so the number of data points that are retained is random. If \code{TRUE}, the number of data points retained is fixed. See Details. } \item{nrep}{Number of independent repetitions of the randomised procedure.} \item{verbose}{Logical value indicating whether to print progress reports.} } \details{ This function is an alternative to \code{\link{density.ppp}}. It computes an estimate of the intensity function of a point pattern dataset. The result is a pixel image giving the estimated intensity. If \code{f=1} (the default), the Voronoi estimate (Barr and Schoenberg, 2010) is computed: the point pattern \code{X} is used to construct a Voronoi/Dirichlet tessellation (see \code{\link{dirichlet}}); the areas of the Dirichlet tiles are computed; the estimated intensity in each tile is the reciprocal of the tile area. The result is a pixel image of intensity estimates which are constant on each tile of the tessellation. If \code{f=0}, the intensity estimate at every location is equal to the average intensity (number of points divided by window area). The result is a pixel image of intensity estimates which are constant. If \code{f} is strictly between 0 and 1, the estimation method is applied to a random subset of \code{X}. This randomised procedure is repeated \code{nrep} times, and the results are averaged. The subset is selected as follows: \itemize{ \item if \code{fixed=FALSE}, the dataset \code{X} is randomly thinned by deleting or retaining each point independently, with probability \code{f} of retaining a point. \item if \code{fixed=TRUE}, a random sample of fixed size \code{m} is taken from the dataset \code{X}, where \code{m} is the largest integer less than or equal to \code{f*n} and \code{n} is the number of points in \code{X}. } Then the intensity estimate is calculated as follows: \itemize{ \item if \code{counting = FALSE} (the default), the thinned pattern is used to construct a Dirichlet tessellation and form the Voronoi estimate (Barr and Schoenberg, 2010) which is then adjusted by a factor \code{1/f} or \code{n/m} as appropriate. to obtain an estimate of the intensity of \code{X} in the tile. \item if \code{counting = TRUE}, the randomly selected subset \code{A} is used to construct a Dirichlet tessellation, while the complementary subset \code{B} (consisting of points that were not selected in the sample) is used for counting to calculate a quadrat count estimate of intensity. For each tile of the Dirichlet tessellation formed by \code{A}, we count the number of points of \code{B} falling in the tile, and divide by the area of the same tile, to obtain an estimate of the intensity of the pattern \code{B} in the tile. This estimate is adjusted by \code{1/(1-f)} or \code{n/(n-m)} as appropriate to obtain an estimate of the intensity of \code{X} in the tile. } Ogata et al. (2003) and Ogata (2004) estimated intensity using the Dirichlet-Voronoi tessellation in a modelling context. Baddeley (2007) proposed intensity estimation by subsampling with \code{0 < f < 1}, and used the technique described above with \code{fixed=TRUE} and \code{counting=TRUE}. Barr and Schoenberg (2010) described and analysed the Voronoi estimator (corresponding to \code{f=1}). Moradi et al (2019) developed the subsampling technique with \code{fixed=FALSE} and \code{counting=FALSE} and called it the \emph{smoothed Voronoi estimator}. } \value{ A pixel image (object of class \code{"im"}) whose values are estimates of the intensity of \code{X}. } \seealso{ \code{\link{densityVoronoi.lpp}}, \code{\link{adaptive.density}}, \code{\link{density.ppp}}, \code{\link{dirichlet}}, \code{\link{im.object}}. } \references{ Baddeley, A. (2007) Validation of statistical models for spatial point patterns. In J.G. Babu and E.D. Feigelson (eds.) \emph{SCMA IV: Statistical Challenges in Modern Astronomy IV}, volume 317 of Astronomical Society of the Pacific Conference Series, San Francisco, California USA, 2007. Pages 22--38. Barr, C., and Schoenberg, F.P. (2010). On the Voronoi estimator for the intensity of an inhomogeneous planar Poisson process. \emph{Biometrika} \bold{97} (4), 977--984. Moradi, M., Cronie, 0., Rubak, E., Lachieze-Rey, R., Mateu, J. and Baddeley, A. (2019) Resample-smoothing of Voronoi intensity estimators. \emph{Statistics and Computing}, in press. Ogata, Y. (2004) Space-time model for regional seismicity and detection of crustal stress changes. \emph{Journal of Geophysical Research}, \bold{109}, 2004. Ogata, Y., Katsura, K. and Tanemura, M. (2003). Modelling heterogeneous space-time occurrences of earthquakes and its residual analysis. \emph{Applied Statistics} \bold{52} 499--509. } \examples{ plot(densityVoronoi(nztrees, 1, f=1), main="Voronoi estimate") nr <- if(interactive()) 100 else 5 plot(densityVoronoi(nztrees, f=0.5, nrep=nr), main="smoothed Voronoi estimate") } \author{ \spatstatAuthors and Mehdi Moradi. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/intensity.ppm.Rd0000644000176200001440000000567413333543263016021 0ustar liggesusers\name{intensity.ppm} \alias{intensity.ppm} \title{ Intensity of Fitted Point Process Model } \description{ Computes the intensity of a fitted point process model. } \usage{ \method{intensity}{ppm}(X, \dots) } \arguments{ \item{X}{ A fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Arguments passed to \code{\link{predict.ppm}} in some cases. See Details. } } \details{ This is a method for the generic function \code{\link{intensity}} for fitted point process models (class \code{"ppm"}). The intensity of a point process model is the expected number of random points per unit area. If \code{X} is a Poisson point process model, the intensity of the process is computed exactly. The result is a numerical value if \code{X} is a stationary Poisson point process, and a pixel image if \code{X} is non-stationary. (In the latter case, the resolution of the pixel image is controlled by the arguments \code{\dots} which are passed to \code{\link{predict.ppm}}.) If \code{X} is another Gibbs point process model, the intensity is computed approximately using the Poisson-saddlepoint approximation (Baddeley and Nair, 2012a, 2012b, 2016; Anderssen et al, 2014). The approximation is currently available for pairwise-interaction models (Baddeley and Nair, 2012a, 2012b) and for the area-interaction model and Geyer saturation model (Baddeley and Nair, 2016). For a non-stationary Gibbs model, the pseudostationary solution (Baddeley and Nair, 2012b; Anderssen et al, 2014) is used. The result is a pixel image, whose resolution is controlled by the arguments \code{\dots} which are passed to \code{\link{predict.ppm}}. } \value{ A numeric value (if the model is stationary) or a pixel image. } \references{ Anderssen, R.S., Baddeley, A., DeHoog, F.R. and Nair, G.M. (2014) Solution of an integral equation arising in spatial point process theory. \emph{Journal of Integral Equations and Applications} \bold{26} (4) 437--453. Baddeley, A. and Nair, G. (2012a) Fast approximation of the intensity of Gibbs point processes. \emph{Electronic Journal of Statistics} \bold{6} 1155--1169. Baddeley, A. and Nair, G. (2012b) Approximating the moments of a spatial point process. \emph{Stat} \bold{1}, 1, 18--30. doi: 10.1002/sta4.5 Baddeley, A. and Nair, G. (2016) Poisson-saddlepoint approximation for spatial point processes with infinite order interaction. Submitted for publication. } \seealso{ \code{\link{intensity}}, \code{\link{intensity.ppp}} } \examples{ fitP <- ppm(swedishpines ~ 1) intensity(fitP) fitS <- ppm(swedishpines ~ 1, Strauss(9)) intensity(fitS) fitSx <- ppm(swedishpines ~ x, Strauss(9)) lamSx <- intensity(fitSx) fitG <- ppm(swedishpines ~ 1, Geyer(9, 1)) lamG <- intensity(fitG) fitA <- ppm(swedishpines ~ 1, AreaInter(7)) lamA <- intensity(fitA) } \author{ \adrian and Gopalan Nair. } \keyword{spatial} \keyword{models} spatstat/man/pairdist.psp.Rd0000644000176200001440000000436313333543263015612 0ustar liggesusers\name{pairdist.psp} \alias{pairdist.psp} \title{Pairwise distances between line segments} \description{ Computes the matrix of distances between all pairs of line segments in a line segment pattern. } \usage{ \method{pairdist}{psp}(X, \dots, method="C", type="Hausdorff") } \arguments{ \item{X}{ A line segment pattern (object of class \code{"psp"}). } \item{\dots}{ Ignored. } \item{method}{ String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. Usually not specified. } \item{type}{ Type of distance to be computed. Options are \code{"Hausdorff"} and \code{"separation"}. Partial matching is used. } } \value{ A square matrix whose \code{[i,j]} entry is the distance between the line segments numbered \code{i} and \code{j}. } \details{ This function computes the distance between each pair of line segments in \code{X}, and returns the matrix of distances. This is a method for the generic function \code{\link{pairdist}} for the class \code{"psp"}. The distances between line segments are measured in one of two ways: \itemize{ \item if \code{type="Hausdorff"}, distances are computed in the Hausdorff metric. The Hausdorff distance between two line segments is the \emph{maximum} distance from any point on one of the segments to the nearest point on the other segment. \item if \code{type="separation"}, distances are computed as the \emph{minimum} distance from a point on one line segment to a point on the other line segment. For example, line segments which cross over each other have separation zero. } The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted \R code only. If \code{method="C"} (the default) then compiled \code{C} code is used, which is somewhat faster. } \seealso{ \code{\link{crossdist}}, \code{\link{nndist}}, \code{\link{pairdist.ppp}} } \examples{ L <- psp(runif(10), runif(10), runif(10), runif(10), owin()) D <- pairdist(L) S <- pairdist(L, type="sep") } \author{ \adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/matchingdist.Rd0000644000176200001440000000755413333543263015655 0ustar liggesusers\name{matchingdist} \alias{matchingdist} \title{Distance for a Point Pattern Matching} \description{ Computes the distance associated with a matching between two point patterns. } \usage{ matchingdist(matching, type = NULL, cutoff = NULL, q = NULL) } \arguments{ \item{matching}{A point pattern matching (an object of class \code{"pppmatching"}).} \item{type}{ A character string giving the type of distance to be computed. One of \code{"spa"}, \code{"ace"} or \code{"mat"}. See details below. } \item{cutoff}{ The value \eqn{> 0} at which interpoint distances are cut off. } \item{q}{ The order of the average that is applied to the interpoint distances. May be \code{Inf}, in which case the maximum of the interpoint distances is taken. } } \details{ Computes the distance specified by \code{type}, \code{cutoff}, and \code{order} for a point matching. If any of these arguments are not provided, the function uses the corresponding elements of \code{matching} (if available). For the type \code{"spa"} (subpattern assignment) it is assumed that the points of the point pattern with the smaller cardinality \eqn{m} are matched to a \eqn{m}-point subpattern of the point pattern with the larger cardinality \eqn{n} in a 1-1 way. The distance is then given as the \code{q}-th order average of the \eqn{m} distances between matched points (minimum of Euclidean distance and \code{cutoff}) and \eqn{n-m} "penalty distances" of value \code{cutoff}. For the type \code{"ace"} (assignment only if cardinalities equal) the matching is assumed to be 1-1 if the cardinalities of the point patterns are the same, in which case the \code{q}-th order average of the matching distances (minimum of Euclidean distance and \code{cutoff}) is taken. If the cardinalities are different, the matching may be arbitrary and the distance returned is always equal to \code{cutoff}. For the type \code{mat} (mass transfer) it is assumed that each point of the point pattern with the smaller cardinality \eqn{m} has mass \eqn{1}, each point of the point pattern with the larger cardinality \eqn{n} has mass \eqn{m/n}, and fractions of these masses are matched in such a way that each point contributes exactly its mass. The distance is then given as the \code{q}-th order weighted average of all distances (minimum of Euclidean distance and \code{cutoff}) of (partially) matched points with weights equal to the fractional masses divided by \eqn{m}. If the cardinalities of the two point patterns are equal, \code{matchingdist(m, type, cutoff, q)} yields the same result no matter if \code{type} is \code{"spa"}, \code{"ace"} or \code{"mat"}. } \value{ Numeric value of the distance associated with the matching. } \author{ Dominic Schuhmacher \email{dominic.schuhmacher@stat.unibe.ch} \url{http://www.dominic.schuhmacher.name} } \seealso{ \code{\link{pppdist}} \code{\link{pppmatching.object}} } \examples{ # an optimal matching X <- runifpoint(20) Y <- runifpoint(20) m.opt <- pppdist(X, Y) summary(m.opt) matchingdist(m.opt) # is the same as the distance given by summary(m.opt) # sequential nearest neighbour matching # (go through all points of point pattern X in sequence # and match each point with the closest point of Y that is # still unmatched) am <- matrix(0, 20, 20) h <- matrix(c(1:20, rep(0,20)), 20, 2) h[1,2] = nncross(X[1],Y)[1,2] for (i in 2:20) { nn <- nncross(X[i],Y[-h[1:(i-1),2]])[1,2] h[i,2] <- ((1:20)[-h[1:(i-1),2]])[nn] } am[h] <- 1 m.nn <- pppmatching(X, Y, am) matchingdist(m.nn, type="spa", cutoff=1, q=1) # is >= the distance obtained for m.opt # in most cases strictly > opa <- par(mfrow=c(1,2)) plot(m.opt, main="optimal") plot(m.nn, main="nearest neighbour") text(X, 1:20, pos=1, offset=0.3, cex=0.8) par(opa) } \keyword{spatial} \keyword{math} spatstat/man/plot.kppm.Rd0000644000176200001440000000563613333543264015123 0ustar liggesusers\name{plot.kppm} \alias{plot.kppm} \title{Plot a fitted cluster point process} \description{ Plots a fitted cluster point process model, displaying the fitted intensity and the fitted \eqn{K}-function. } \usage{ \method{plot}{kppm}(x, \dots, what=c("intensity", "statistic", "cluster"), pause=interactive(), xname) } \arguments{ \item{x}{ Fitted cluster point process model. An object of class \code{"kppm"}. } \item{\dots}{ Arguments passed to \code{\link{plot.ppm}} and \code{\link{plot.fv}} to control the plot. } \item{what}{ Character vector determining what will be plotted. } \item{pause}{ Logical value specifying whether to pause between plots. } \item{xname}{ Optional. Character string. The name of the object \code{x} for use in the title of the plot. } } \details{ This is a method for the generic function \code{\link[graphics]{plot}} for the class \code{"kppm"} of fitted cluster point process models. The argument \code{x} should be a cluster point process model (object of class \code{"kppm"}) obtained using the function \code{\link{kppm}}. The choice of plots (and the order in which they are displayed) is controlled by the argument \code{what}. The options (partially matched) are \code{"intensity"}, \code{"statistic"} and \code{"cluster"}. This command is capable of producing three different plots: \describe{ \item{what="intensity"}{specifies the fitted intensity of the model, which is plotted using \code{\link{plot.ppm}}. By default this plot is not produced for stationary models.} \item{what="statistic"}{specifies the empirical and fitted summary statistics, which are plotted using \code{\link{plot.fv}}. This is only meaningful if the model has been fitted using the Method of Minimum Contrast, and it is turned off otherwise.} \item{what="cluster"}{specifies a fitted cluster, which is computed by \code{\link{clusterfield}} and plotted by \code{\link{plot.im}}. It is only meaningful for Poisson cluster (incl. Neyman-Scott) processes, and it is turned off for log-Gaussian Cox processes (LGCP). If the model is stationary (and non-LGCP) this option is turned on by default and shows a fitted cluster positioned at the centroid of the observation window. For non-stationary (and non-LGCP) models this option is only invoked if explicitly told so, and in that case an additional argument \code{locations} (see \code{\link{clusterfield}}) must be given to specify where to position the parent point(s) .} } Alternatively \code{what="all"} selects all available options. } \value{ Null. } \examples{ data(redwood) fit <- kppm(redwood~1, "Thomas") plot(fit) } \seealso{ \code{\link{kppm}}, \code{\link{plot.ppm}}, } \author{ \adrian \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/alltypes.Rd0000644000176200001440000002342613443662274015036 0ustar liggesusers\name{alltypes} \alias{alltypes} \title{Calculate Summary Statistic for All Types in a Multitype Point Pattern} \description{ Given a marked point pattern, this computes the estimates of a selected summary function (\eqn{F},\eqn{G}, \eqn{J}, \eqn{K} etc) of the pattern, for all possible combinations of marks, and returns these functions in an array. } \usage{ alltypes(X, fun="K", \dots, dataname=NULL,verb=FALSE,envelope=FALSE,reuse=TRUE) } \arguments{ \item{X}{The observed point pattern, for which summary function estimates are required. An object of class \code{"ppp"} or \code{"lpp"}. } \item{fun}{The summary function. Either an \R function, or a character string indicating the summary function required. Options for strings are \code{"F"}, \code{"G"}, \code{"J"}, \code{"K"}, \code{"L"}, \code{"pcf"}, \code{"Gcross"}, \code{"Jcross"}, \code{"Kcross"}, \code{"Lcross"}, \code{"Gdot"}, \code{"Jdot"}, \code{"Kdot"}, \code{"Ldot"}. } \item{\dots}{ Arguments passed to the summary function (and to the function \code{\link{envelope}} if appropriate) } \item{dataname}{Character string giving an optional (alternative) name to the point pattern, different from what is given in the call. This name, if supplied, may be used by \code{\link{plot.fasp}()} in forming the title of the plot. If not supplied it defaults to the parsing of the argument supplied as \code{X} in the call. } \item{verb}{ Logical value. If \code{verb} is true then terse ``progress reports'' (just the values of the mark indices) are printed out when the calculations for that combination of marks are completed. } \item{envelope}{ Logical value. If \code{envelope} is true, then simulation envelopes of the summary function will also be computed. See Details. } \item{reuse}{ Logical value indicating whether the envelopes in each panel should be based on the same set of simulated patterns (\code{reuse=TRUE}) or on different, independent sets of simulated patterns (\code{reuse=FALSE}). } } \details{ This routine is a convenient way to analyse the dependence between types in a multitype point pattern. It computes the estimates of a selected summary function of the pattern, for all possible combinations of marks. It returns these functions in an array (an object of class \code{"fasp"}) amenable to plotting by \code{\link{plot.fasp}()}. The argument \code{fun} specifies the summary function that will be evaluated for each type of point, or for each pair of types. It may be either an \R function or a character string. Suppose that the points have possible types \eqn{1,2,\ldots,m} and let \eqn{X_i}{X[i]} denote the pattern of points of type \eqn{i} only. If \code{fun="F"} then this routine calculates, for each possible type \eqn{i}, an estimate of the Empty Space Function \eqn{F_i(r)}{F[i](r)} of \eqn{X_i}{X[i]}. See \code{\link{Fest}} for explanation of the empty space function. The estimate is computed by applying \code{\link{Fest}} to \eqn{X_i}{X[i]} with the optional arguments \code{\dots}. If \code{fun} is \code{"Gcross"}, \code{"Jcross"}, \code{"Kcross"} or \code{"Lcross"}, the routine calculates, for each pair of types \eqn{(i,j)}, an estimate of the ``\code{i}-to\code{j}'' cross-type function \eqn{G_{ij}(r)}{G[i,j](r)}, \eqn{J_{ij}(r)}{J[i,j](r)}, \eqn{K_{ij}(r)}{K[i,j](r)} or \eqn{L_{ij}(r)}{L[i,j](r)} respectively describing the dependence between \eqn{X_i}{X[i]} and \eqn{X_j}{X[j]}. See \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}} or \code{\link{Lcross}} respectively for explanation of these functions. The estimate is computed by applying the relevant function (\code{\link{Gcross}} etc) to \code{X} using each possible value of the arguments \code{i,j}, together with the optional arguments \code{\dots}. If \code{fun} is \code{"pcf"} the routine calculates the cross-type pair correlation function \code{\link{pcfcross}} between each pair of types. If \code{fun} is \code{"Gdot"}, \code{"Jdot"}, \code{"Kdot"} or \code{"Ldot"}, the routine calculates, for each type \eqn{i}, an estimate of the ``\code{i}-to-any'' dot-type function \eqn{G_{i\bullet}(r)}{G[i.](r)}, \eqn{J_{i\bullet}(r)}{J[i.](r)} or \eqn{K_{i\bullet}(r)}{K[i.](r)} or \eqn{L_{i\bullet}(r)}{L[i.](r)} respectively describing the dependence between \eqn{X_i}{X[i]} and \eqn{X}{X}. See \code{\link{Gdot}}, \code{\link{Jdot}}, \code{\link{Kdot}} or \code{\link{Ldot}} respectively for explanation of these functions. The estimate is computed by applying the relevant function (\code{\link{Gdot}} etc) to \code{X} using each possible value of the argument \code{i}, together with the optional arguments \code{\dots}. The letters \code{"G"}, \code{"J"}, \code{"K"} and \code{"L"} are interpreted as abbreviations for \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}} and \code{\link{Lcross}} respectively, assuming the point pattern is marked. If the point pattern is unmarked, the appropriate function \code{\link{Fest}}, \code{\link{Jest}}, \code{\link{Kest}} or \code{\link{Lest}} is invoked instead. If \code{envelope=TRUE}, then as well as computing the value of the summary function for each combination of types, the algorithm also computes simulation envelopes of the summary function for each combination of types. The arguments \code{\dots} are passed to the function \code{\link{envelope}} to control the number of simulations, the random process generating the simulations, the construction of envelopes, and so on. When \code{envelope=TRUE} it is possible that errors could occur because the simulated point patterns do not satisfy the requirements of the summary function (for example, because the simulated pattern is empty and \code{fun} requires at least one point). If the number of such errors exceeds the maximum permitted number \code{maxnerr}, then the envelope algorithm will give up, and will return the empirical summary function for the data point pattern, \code{fun(X)}, in place of the envelope. } \value{ A function array (an object of class \code{"fasp"}, see \code{\link{fasp.object}}). This can be plotted using \code{\link{plot.fasp}}. If the pattern is not marked, the resulting ``array'' has dimensions \eqn{1 \times 1}{1 x 1}. Otherwise the following is true: If \code{fun="F"}, the function array has dimensions \eqn{m \times 1}{m * 1} where \eqn{m} is the number of different marks in the point pattern. The entry at position \code{[i,1]} in this array is the result of applying \code{\link{Fest}} to the points of type \code{i} only. If \code{fun} is \code{"Gdot"}, \code{"Jdot"}, \code{"Kdot"} or \code{"Ldot"}, the function array again has dimensions \eqn{m \times 1}{m * 1}. The entry at position \code{[i,1]} in this array is the result of \code{Gdot(X, i)}, \code{Jdot(X, i)} \code{Kdot(X, i)} or \code{Ldot(X, i)} respectively. If \code{fun} is \code{"Gcross"}, \code{"Jcross"}, \code{"Kcross"} or \code{"Lcross"} (or their abbreviations \code{"G"}, \code{"J"}, \code{"K"} or \code{"L"}), the function array has dimensions \eqn{m \times m}{m * m}. The \code{[i,j]} entry of the function array (for \eqn{i \neq j}{i != j}) is the result of applying the function \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}} or\code{\link{Lcross}} to the pair of types \code{(i,j)}. The diagonal \code{[i,i]} entry of the function array is the result of applying the univariate function \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}} or \code{\link{Lest}} to the points of type \code{i} only. If \code{envelope=FALSE}, then each function entry \code{fns[[i]]} retains the format of the output of the relevant estimating routine \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{Gcross}}, \code{\link{Jcross}} ,\code{\link{Kcross}}, \code{\link{Lcross}}, \code{\link{Gdot}}, \code{\link{Jdot}}, \code{\link{Kdot}} or \code{\link{Ldot}} The default formulae for plotting these functions are \code{cbind(km,theo) ~ r} for F, G, and J functions, and \code{cbind(trans,theo) ~ r} for K and L functions. If \code{envelope=TRUE}, then each function entry \code{fns[[i]]} has the same format as the output of the \code{\link{envelope}} command. } \note{ Sizeable amounts of memory may be needed during the calculation. } \seealso{ \code{\link{plot.fasp}}, \code{\link{fasp.object}}, \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}}, \code{\link{Lcross}}, \code{\link{Gdot}}, \code{\link{Jdot}}, \code{\link{Kdot}}, \code{\link{envelope}}. } \examples{ # bramblecanes (3 marks). bram <- bramblecanes \testonly{ bram <- bram[c(seq(1, 744, by=20), seq(745, 823, by=4))] } bF <- alltypes(bram,"F",verb=TRUE) plot(bF) if(interactive()) { plot(alltypes(bram,"G")) plot(alltypes(bram,"Gdot")) } # Swedishpines (unmarked). swed <- swedishpines \testonly{ swed <- swed[1:25] } plot(alltypes(swed,"K")) plot(alltypes(amacrine, "pcf"), ylim=c(0,1.3)) # A setting where you might REALLY want to use dataname: \dontrun{ xxx <- alltypes(ppp(Melvin$x,Melvin$y, window=as.owin(c(5,20,15,50)),marks=clyde), fun="F",verb=TRUE,dataname="Melvin") } # envelopes bKE <- alltypes(bram,"K",envelope=TRUE,nsim=19) \dontrun{ bFE <- alltypes(bram,"F",envelope=TRUE,nsim=19,global=TRUE) } # extract one entry as.fv(bKE[1,1]) } \author{\adrian and \rolf. } \keyword{spatial} \keyword{nonparametric} spatstat/man/pcf.fasp.Rd0000644000176200001440000001110013333543264014656 0ustar liggesusers\name{pcf.fasp} \alias{pcf.fasp} \title{Pair Correlation Function obtained from array of K functions} \description{ Estimates the (bivariate) pair correlation functions of a point pattern, given an array of (bivariate) K functions. } \usage{ \method{pcf}{fasp}(X, \dots, method="c") } \arguments{ \item{X}{ An array of multitype \eqn{K} functions (object of class \code{"fasp"}). } \item{\dots}{ Arguments controlling the smoothing spline function \code{smooth.spline}. } \item{method}{ Letter \code{"a"}, \code{"b"}, \code{"c"} or \code{"d"} indicating the method for deriving the pair correlation function from the \code{K} function. } } \value{ A function array (object of class \code{"fasp"}, see \code{\link{fasp.object}}) representing an array of pair correlation functions. This can be thought of as a matrix \code{Y} each of whose entries \code{Y[i,j]} is a function value table (class \code{"fv"}) representing the pair correlation function between points of type \code{i} and points of type \code{j}. } \details{ The pair correlation function of a stationary point process is \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the reduced second moment function (aka ``Ripley's \eqn{K} function'') of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. For a stationary Poisson process, the pair correlation function is identically equal to 1. Values \eqn{g(r) < 1} suggest inhibition between points; values greater than 1 suggest clustering. We also apply the same definition to other variants of the classical \eqn{K} function, such as the multitype \eqn{K} functions (see \code{\link{Kcross}}, \code{\link{Kdot}}) and the inhomogeneous \eqn{K} function (see \code{\link{Kinhom}}). For all these variants, the benchmark value of \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} corresponds to \eqn{g(r) = 1}. This routine computes an estimate of \eqn{g(r)} from an array of estimates of \eqn{K(r)} or its variants, using smoothing splines to approximate the derivatives. It is a method for the generic function \code{\link{pcf}}. The argument \code{X} should be a function array (object of class \code{"fasp"}, see \code{\link{fasp.object}}) containing several estimates of \eqn{K} functions. This should have been obtained from \code{\link{alltypes}} with the argument \code{fun="K"}. The smoothing spline operations are performed by \code{\link{smooth.spline}} and \code{\link{predict.smooth.spline}} from the \code{modreg} library. Four numerical methods are available: \itemize{ \item \bold{"a"} apply smoothing to \eqn{K(r)}, estimate its derivative, and plug in to the formula above; \item \bold{"b"} apply smoothing to \eqn{Y(r) = \frac{K(r)}{2 \pi r}}{Y(r) = K(r)/(2 * pi * r)} constraining \eqn{Y(0) = 0}, estimate the derivative of \eqn{Y}, and solve; \item \bold{"c"} apply smoothing to \eqn{Z(r) = \frac{K(r)}{\pi r^2}}{Y(r) = K(r)/(pi * r^2)} constraining \eqn{Z(0)=1}, estimate its derivative, and solve. \item \bold{"d"} apply smoothing to \eqn{V(r) = \sqrt{K(r)}}{V(r) = sqrt(K(r))}, estimate its derivative, and solve. } Method \code{"c"} seems to be the best at suppressing variability for small values of \eqn{r}. However it effectively constrains \eqn{g(0) = 1}. If the point pattern seems to have inhibition at small distances, you may wish to experiment with method \code{"b"} which effectively constrains \eqn{g(0)=0}. Method \code{"a"} seems comparatively unreliable. Useful arguments to control the splines include the smoothing tradeoff parameter \code{spar} and the degrees of freedom \code{df}. See \code{\link{smooth.spline}} for details. } \references{ Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kmulti}}, \code{\link{alltypes}}, \code{\link{smooth.spline}}, \code{\link{predict.smooth.spline}} } \examples{ # multitype point pattern KK <- alltypes(amacrine, "K") p <- pcf.fasp(KK, spar=0.5, method="b") plot(p) # strong inhibition between points of the same type } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/data.lppm.Rd0000644000176200001440000000217313524136171015045 0ustar liggesusers\name{data.lppm} \alias{data.lppm} \title{Extract Original Data from a Fitted Point Process Model on a Network} \description{ Given a fitted point process model on a linear network, this function extracts the original point pattern dataset to which the model was fitted. } \usage{ data.lppm(object) } \arguments{ \item{object}{ fitted point process model on a linear network (an object of class \code{"lppm"}). } } \value{ A point pattern on a linear network (object of class \code{"lpp"}). } \details{ An object of class \code{"lppm"} represents a point process model that has been fitted to a point pattern dataset on a linear network. It is typically produced by the model-fitting algorithm \code{\link{lppm}}. The object contains complete information about the original data point pattern to which the model was fitted. This function extracts the original data pattern. } \seealso{ \code{\link{lppm}}, \code{\link{data.ppm}} } \examples{ fit <- lppm(spiders ~ x) X <- data.lppm(fit) # 'X' is identical to 'spiders' } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} \keyword{models} spatstat/man/Geyer.Rd0000644000176200001440000001101213547301023014223 0ustar liggesusers\name{Geyer} \alias{Geyer} \title{Geyer's Saturation Point Process Model} \description{ Creates an instance of Geyer's saturation point process model which can then be fitted to point pattern data. } \usage{ Geyer(r,sat) } \arguments{ \item{r}{Interaction radius. A positive real number.} \item{sat}{Saturation threshold. A non-negative real number.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of Geyer's saturation point process with interaction radius \eqn{r} and saturation threshold \code{sat}. } \details{ Geyer (1999) introduced the \dQuote{saturation process}, a modification of the Strauss process (see \code{\link{Strauss}}) in which the total contribution to the potential from each point (from its pairwise interaction with all other points) is trimmed to a maximum value \eqn{s}. The interaction structure of this model is implemented in the function \code{\link{Geyer}()}. The saturation point process with interaction radius \eqn{r}, saturation threshold \eqn{s}, and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, is the point process in which each point \eqn{x_i}{x[i]} in the pattern \eqn{X} contributes a factor \deqn{ \beta \gamma^{\min(s, t(x_i, X))} }{ beta gamma^min(s, t(x[i],X)) } to the probability density of the point pattern, where \eqn{t(x_i, X)}{t(x[i],X)} denotes the number of \sQuote{close neighbours} of \eqn{x_i}{x[i]} in the pattern \eqn{X}. A close neighbour of \eqn{x_i}{x[i]} is a point \eqn{x_j}{x[j]} with \eqn{j \neq i}{j != i} such that the distance between \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]} is less than or equal to \eqn{r}. If the saturation threshold \eqn{s} is set to infinity, this model reduces to the Strauss process (see \code{\link{Strauss}}) with interaction parameter \eqn{\gamma^2}{gamma^2}. If \eqn{s = 0}, the model reduces to the Poisson point process. If \eqn{s} is a finite positive number, then the interaction parameter \eqn{\gamma}{gamma} may take any positive value (unlike the case of the Strauss process), with values \eqn{\gamma < 1}{gamma < 1} describing an \sQuote{ordered} or \sQuote{inhibitive} pattern, and values \eqn{\gamma > 1}{gamma > 1} describing a \sQuote{clustered} or \sQuote{attractive} pattern. The nonstationary saturation process is similar except that the value \eqn{\beta}{beta} is replaced by a function \eqn{\beta(x_i)}{beta(x[i])} of location. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the saturation process interaction is yielded by \code{Geyer(r, sat)} where the arguments \code{r} and \code{sat} specify the Strauss interaction radius \eqn{r} and the saturation threshold \eqn{s}, respectively. See the examples below. Note the only arguments are the interaction radius \code{r} and the saturation threshold \code{sat}. When \code{r} and \code{sat} are fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(beta)} and \eqn{\log(\gamma)}{log(gamma)} are estimated by \code{\link{ppm}()}, not fixed in \code{Geyer()}. } \section{Zero saturation}{ The value \code{sat=0} is permitted by \code{Geyer}, but this is not very useful. For technical reasons, when \code{\link{ppm}} fits a Geyer model with \code{sat=0}, the default behaviour is to return an \dQuote{invalid} fitted model in which the estimate of \eqn{\gamma}{gamma} is \code{NA}. In order to get a Poisson process model returned when \code{sat=0}, you would need to set \code{emend=TRUE} in the call to \code{\link{ppm}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{Strauss}}. To make an interaction object like \code{\link{Geyer}} but having multiple interaction radii, see \code{\link{BadGey}} or \code{\link{Hybrid}}. } \references{ Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \examples{ ppm(cells, ~1, Geyer(r=0.07, sat=2)) # fit the stationary saturation process to `cells' } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/marks.tess.Rd0000644000176200001440000000473713536641574015302 0ustar liggesusers\name{marks.tess} \alias{marks.tess} \alias{marks.lintess} \alias{marks<-.tess} \alias{marks<-.lintess} \alias{unmark.tess} \alias{unmark.lintess} \title{Marks of a Tessellation} \description{ Extract or change the marks attached to the tiles of a tessellation. } \usage{ \method{marks}{tess}(x, \dots) \method{marks}{tess}(x, \dots) <- value \method{unmark}{tess}(X) \method{marks}{lintess}(x, \dots) \method{marks}{lintess}(x, \dots) <- value \method{unmark}{lintess}(X) } \arguments{ \item{x,X}{ Tessellation (object of class \code{"tess"}) or tessellation on a linear network (object of class \code{"lintess"}). } \item{\dots}{ Ignored. } \item{value}{ Vector or data frame of mark values, or \code{NULL}. } } \value{ For \code{marks(x)}, the result is a vector, factor or data frame, containing the mark values attached to the tiles of \code{x}. If there are no marks, the result is \code{NULL}. For \code{unmark(x)}, the result is the tessellation without marks. For \code{marks(x) <- value}, the result is the updated tessellation \code{x} (with the side-effect that the dataset \code{x} is updated in the current environment). } \details{ These functions extract or change the marks attached to each of the tiles in the tessellation \code{x}. They are methods for the generic functions \code{\link{marks}} and \code{\link{marks<-}} for the class \code{"tess"} of tessellations and the class \code{"lintess"} of tessellations on a network. The expression \code{marks(x)} extracts the marks of \code{x}. The assignment \code{marks(x) <- value} assigns new marks to the dataset \code{x}, and updates the dataset \code{x} in the current environment. The marks can be a vector, a factor, or a data frame. For the assignment \code{marks(x) <- value}, the \code{value} should be a vector or factor of length equal to the number of tiles in \code{x}, or a data frame with as many rows as there are tiles in \code{x}. If \code{value} is a single value, or a data frame with one row, then it will be replicated so that the same marks will be attached to each tile. To remove marks, use \code{marks(x) <- NULL} or \code{unmark(x)}. } \seealso{ \code{\link{marks}}, \code{\link{marks<-}} } \examples{ D <- dirichlet(cells) marks(D) <- tile.areas(D) B <- lineardirichlet(runiflpp(5, simplenet)) marks(B) <- letters[1:5] } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/bw.frac.Rd0000644000176200001440000000435013355027163014510 0ustar liggesusers\name{bw.frac} \alias{bw.frac} \title{ Bandwidth Selection Based on Window Geometry } \description{ Select a smoothing bandwidth for smoothing a point pattern, based only on the geometry of the spatial window. The bandwidth is a specified quantile of the distance between two independent random points in the window. } \usage{ bw.frac(X, \dots, f=1/4) } \arguments{ \item{X}{ A window (object of class \code{"owin"}) or point pattern (object of class \code{"ppp"}) or other data which can be converted to a window using \code{\link{as.owin}}. } \item{\dots}{ Arguments passed to \code{\link{distcdf}}. } \item{f}{ Probability value (between 0 and 1) determining the quantile of the distribution. } } \details{ This function selects an appropriate bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}}. The bandwidth \eqn{\sigma}{\sigma} is computed as a quantile of the distance between two independent random points in the window. The default is the lower quartile of this distribution. If \eqn{F(r)} is the cumulative distribution function of the distance between two independent random points uniformly distributed in the window, then the value returned is the quantile with probability \eqn{f}. That is, the bandwidth is the value \eqn{r} such that \eqn{F(r) = f}. The cumulative distribution function \eqn{F(r)} is computed using \code{\link{distcdf}}. We then we compute the smallest number \eqn{r} such that \eqn{F(r) \ge f}{F(r) >= f}. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.frac"} which can be plotted to show the cumulative distribution function and the selected quantile. } \seealso{ For estimating point process intensity, see \code{\link{density.ppp}}, \code{\link{bw.diggle}}, \code{\link{bw.ppl}}, \code{\link{bw.scott}}, \code{\link{bw.CvL}}. For other smoothing purposes, see \code{\link{bw.stoyan}}, \code{\link{bw.smoothppp}}, \code{\link{bw.relrisk}}. } \examples{ h <- bw.frac(letterR) h plot(h, main="bw.frac(letterR)") } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/contour.imlist.Rd0000644000176200001440000000251013333543263016153 0ustar liggesusers\name{contour.imlist} \alias{contour.imlist} \alias{contour.listof} \title{Array of Contour Plots} \description{ Generates an array of contour plots. } \usage{ \method{contour}{imlist}(x, \dots) \method{contour}{listof}(x, \dots) } \arguments{ \item{x}{ An object of the class \code{"imlist"} representing a list of pixel images. Alternatively \code{x} may belong to the outdated class \code{"listof"}. } \item{\dots}{ Arguments passed to \code{\link{plot.solist}} to control the spatial arrangement of panels, and arguments passed to \code{\link{contour.im}} to control the display of each panel. } } \value{ Null. } \details{ This is a method for the generic command \code{contour} for the class \code{"imlist"}. An object of class \code{"imlist"} represents a list of pixel images. (The outdated class \code{"listof"} is also handled.) Each entry in the list \code{x} will be displayed as a contour plot, in an array of panels laid out on the same graphics display, using \code{\link{plot.solist}}. Invididual panels are plotted by \code{\link{contour.im}}. } \seealso{ \code{\link{plot.solist}}, \code{\link{contour.im}} } \examples{ # Multitype point pattern contour(D <- density(split(amacrine))) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{hplot} spatstat/man/Extract.layered.Rd0000644000176200001440000000472113333543263016226 0ustar liggesusers\name{Extract.layered} \alias{[.layered} \alias{[<-.layered} \alias{[[<-.layered} \title{Extract or Replace Subset of a Layered Object} \description{ Extract or replace some or all of the layers of a layered object, or extract a spatial subset of each layer. } \usage{ \method{[}{layered}(x, i, j, drop=FALSE, ...) \method{[}{layered}(x, i) <- value \method{[[}{layered}(x, i) <- value } \arguments{ \item{x}{ A layered object (class \code{"layered"}). } \item{i}{ Subset index for the list of layers. A logical vector, integer vector or character vector specifying which layers are to be extracted or replaced. } \item{j}{ Subset index to be applied to the data in each layer. Typically a spatial window (class \code{"owin"}). } \item{drop}{ Logical. If \code{i} specifies only a single layer and \code{drop=TRUE}, then the contents of this layer will be returned. } \item{\dots}{ Additional arguments, passed to other subset methods if the subset index is a window. } \item{value}{List of objects which shall replace the designated subset, or an object which shall replace the designated element. } } \value{ Usually an object of class \code{"layered"}. } \details{ A layered object represents data that should be plotted in successive layers, for example, a background and a foreground. See \code{\link{layered}}. The function \code{[.layered} extracts a designated subset of a layered object. It is a method for \code{\link{[}} for the class \code{"layered"}. The functions \code{[<-.layered} and \code{[[<-.layered} replace a designated subset or designated entry of the object by new values. They are methods for \code{\link{[<-}} and \code{\link{[[<-}} for the \code{"layered"} class. The index \code{i} specifies which layers will be retained. It should be a valid subset index for the list of layers. The index \code{j} will be applied to each layer. It is typically a spatial window (class \code{"owin"}) so that each of the layers will be restricted to the same spatial region. Alternatively \code{j} may be any subset index which is permissible for the \code{"["} method for each of the layers. } \seealso{ \code{\link{layered}} } \examples{ D <- distmap(cells) L <- layered(D, cells, plotargs=list(list(ribbon=FALSE), list(pch=16))) L[-2] L[, square(0.5)] L[[3]] <- japanesepines L } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/closing.Rd0000644000176200001440000000467713333543263014640 0ustar liggesusers\name{closing} \alias{closing} \alias{closing.owin} \alias{closing.ppp} \alias{closing.psp} \title{Morphological Closing} \description{ Perform morphological closing of a window, a line segment pattern or a point pattern. } \usage{ closing(w, r, \dots) \method{closing}{owin}(w, r, \dots, polygonal=NULL) \method{closing}{ppp}(w, r, \dots, polygonal=TRUE) \method{closing}{psp}(w, r, \dots, polygonal=TRUE) } \arguments{ \item{w}{ A window (object of class \code{"owin"} or a line segment pattern (object of class \code{"psp"}) or a point pattern (object of class \code{"ppp"}). } \item{r}{positive number: the radius of the closing.} \item{\dots}{extra arguments passed to \code{\link{as.mask}} controlling the pixel resolution, if a pixel approximation is used} \item{polygonal}{ Logical flag indicating whether to compute a polygonal approximation to the erosion (\code{polygonal=TRUE}) or a pixel grid approximation (\code{polygonal=FALSE}). } } \value{ If \code{r > 0}, an object of class \code{"owin"} representing the closed region. If \code{r=0}, the result is identical to \code{w}. } \details{ The morphological closing (Serra, 1982) of a set \eqn{W} by a distance \eqn{r > 0} is the set of all points that cannot be separated from \eqn{W} by any circle of radius \eqn{r}. That is, a point \eqn{x} belongs to the closing \eqn{W*} if it is impossible to draw any circle of radius \eqn{r} that has \eqn{x} on the inside and \eqn{W} on the outside. The closing \eqn{W*} contains the original set \eqn{W}. For a small radius \eqn{r}, the closing operation has the effect of smoothing out irregularities in the boundary of \eqn{W}. For larger radii, the closing operation smooths out concave features in the boundary. For very large radii, the closed set \eqn{W*} becomes more and more convex. The algorithm applies \code{\link{dilation}} followed by \code{\link{erosion}}. } \seealso{ \code{\link{opening}} for the opposite operation. \code{\link{dilation}}, \code{\link{erosion}} for the basic operations. \code{\link{owin}}, \code{\link{as.owin}} for information about windows. } \examples{ v <- closing(letterR, 0.25) plot(v, main="closing") plot(letterR, add=TRUE) plot(closing(cells, 0.1)) points(cells) } \references{ Serra, J. (1982) Image analysis and mathematical morphology. Academic Press. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat/man/rVarGamma.Rd0000644000176200001440000001473513333543264015054 0ustar liggesusers\name{rVarGamma} \alias{rVarGamma} \title{Simulate Neyman-Scott Point Process with Variance Gamma cluster kernel} \description{ Generate a random point pattern, a simulated realisation of the Neyman-Scott process with Variance Gamma (Bessel) cluster kernel. } \usage{ rVarGamma(kappa, nu, scale, mu, win = owin(), thresh = 0.001, nsim=1, drop=TRUE, saveLambda=FALSE, expand = NULL, ..., poisthresh=1e-6, saveparents=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{nu}{ Shape parameter for the cluster kernel. A number greater than -1. } \item{scale}{ Scale parameter for cluster kernel. Determines the size of clusters. A positive number in the same units as the spatial coordinates. } \item{mu}{ Mean number of points per cluster (a single positive number) or reference intensity for the cluster points (a function or a pixel image). } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{thresh}{ Threshold relative to the cluster kernel value at the origin (parent location) determining when the cluster kernel will be treated as zero for simulation purposes. Will be overridden by argument \code{expand} if that is given. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } \item{saveLambda}{ Logical. If \code{TRUE} then the random intensity corresponding to the simulated parent points will also be calculated and saved, and returns as an attribute of the point pattern. } \item{expand}{ Numeric. Size of window expansion for generation of parent points. By default determined by calling \code{\link{clusterradius}} with the numeric threshold value given in \code{thresh}. } \item{\dots}{ Passed to \code{\link{clusterfield}} to control the image resolution when \code{saveLambda=TRUE} and to \code{\link{clusterradius}} when \code{expand} is missing or \code{NULL}. } \item{poisthresh}{ Numerical threshold below which the model will be treated as a Poisson process. See Details. } \item{saveparents}{ Logical value indicating whether to save the locations of the parent points as an attribute. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. Additionally, some intermediate results of the simulation are returned as attributes of this point pattern (see \code{\link{rNeymanScott}}). Furthermore, the simulated intensity function is returned as an attribute \code{"Lambda"}, if \code{saveLambda=TRUE}. } \details{ This algorithm generates a realisation of the Neyman-Scott process with Variance Gamma (Bessel) cluster kernel, inside the window \code{win}. The process is constructed by first generating a Poisson point process of ``parent'' points with intensity \code{kappa}. Then each parent point is replaced by a random cluster of points, the number of points in each cluster being random with a Poisson (\code{mu}) distribution, and the points being placed independently and uniformly according to a Variance Gamma kernel. The shape of the kernel is determined by the dimensionless index \code{nu}. This is the parameter \eqn{\nu^\prime = \alpha/2-1}{nu' = alpha/2 - 1} appearing in equation (12) on page 126 of Jalilian et al (2013). The scale of the kernel is determined by the argument \code{scale}, which is the parameter \eqn{\eta}{eta} appearing in equations (12) and (13) of Jalilian et al (2013). It is expressed in units of length (the same as the unit of length for the window \code{win}). In this implementation, parent points are not restricted to lie in the window; the parent process is effectively the uniform Poisson process on the infinite plane. This model can be fitted to data by the method of minimum contrast, maximum composite likelihood or Palm likelihood using \code{\link{kppm}}. The algorithm can also generate spatially inhomogeneous versions of the cluster process: \itemize{ \item The parent points can be spatially inhomogeneous. If the argument \code{kappa} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is taken as specifying the intensity function of an inhomogeneous Poisson process that generates the parent points. \item The offspring points can be inhomogeneous. If the argument \code{mu} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is interpreted as the reference density for offspring points, in the sense of Waagepetersen (2006). } When the parents are homogeneous (\code{kappa} is a single number) and the offspring are inhomogeneous (\code{mu} is a function or pixel image), the model can be fitted to data using \code{\link{kppm}}, or using \code{\link{vargamma.estK}} or \code{\link{vargamma.estpcf}} applied to the inhomogeneous \eqn{K} function. If the pair correlation function of the model is very close to that of a Poisson process, deviating by less than \code{poisthresh}, then the model is approximately a Poisson process, and will be simulated as a Poisson process with intensity \code{kappa * mu}, using \code{\link{rpoispp}}. This avoids computations that would otherwise require huge amounts of memory. } \seealso{ \code{\link{rpoispp}}, \code{\link{rNeymanScott}}, \code{\link{kppm}}. \code{\link{vargamma.estK}}, \code{\link{vargamma.estpcf}}. } \examples{ # homogeneous X <- rVarGamma(30, 2, 0.02, 5) # inhomogeneous ff <- function(x,y){ exp(2 - 3 * abs(x)) } Z <- as.im(ff, W= owin()) Y <- rVarGamma(30, 2, 0.02, Z) YY <- rVarGamma(ff, 2, 0.02, 3) } \references{ Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by \adrian } \keyword{spatial} \keyword{datagen} spatstat/man/bw.stoyan.Rd0000644000176200001440000000355113425767112015117 0ustar liggesusers\name{bw.stoyan} \alias{bw.stoyan} \title{ Stoyan's Rule of Thumb for Bandwidth Selection } \description{ Computes a rough estimate of the appropriate bandwidth for kernel smoothing estimators of the pair correlation function and other quantities. } \usage{ bw.stoyan(X, co=0.15) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{co}{ Coefficient appearing in the rule of thumb. See Details. } } \details{ Estimation of the pair correlation function and other quantities by smoothing methods requires a choice of the smoothing bandwidth. Stoyan and Stoyan (1995, equation (15.16), page 285) proposed a rule of thumb for choosing the smoothing bandwidth. For the Epanechnikov kernel, the rule of thumb is to set the kernel's half-width \eqn{h} to \eqn{0.15/\sqrt{\lambda}}{0.15/sqrt(\lambda)} where \eqn{\lambda}{\lambda} is the estimated intensity of the point pattern, typically computed as the number of points of \code{X} divided by the area of the window containing \code{X}. For a general kernel, the corresponding rule is to set the standard deviation of the kernel to \eqn{\sigma = 0.15/\sqrt{5\lambda}}{\sigma = 0.15/sqrt(5 * \lambda)}. The coefficient \eqn{0.15} can be tweaked using the argument \code{co}. To ensure the bandwidth is finite, an empty point pattern is treated as if it contained 1 point. } \value{ A finite positive numerical value giving the selected bandwidth (the standard deviation of the smoothing kernel). } \seealso{ \code{\link{pcf}}, \code{\link{bw.relrisk}} } \examples{ data(shapley) bw.stoyan(shapley) } \references{ Stoyan, D. and Stoyan, H. (1995) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/crossdist.default.Rd0000644000176200001440000000525013333543263016626 0ustar liggesusers\name{crossdist.default} \alias{crossdist.default} \title{Pairwise distances between two different sets of points} \description{ Computes the distances between each pair of points taken from two different sets of points. } \usage{ \method{crossdist}{default}(X, Y, x2, y2, \dots, period=NULL, method="C", squared=FALSE) } \arguments{ \item{X,Y}{ Numeric vectors of equal length specifying the coordinates of the first set of points. } \item{x2,y2}{ Numeric vectors of equal length specifying the coordinates of the second set of points. } \item{\dots}{ Ignored. } \item{period}{ Optional. Dimensions for periodic edge correction. } \item{method}{ String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. } \item{squared}{ Logical. If \code{squared=TRUE}, the squared distances are returned instead (this computation is faster). } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th point in the first set of points to the \code{j}-th point in the second set of points. } \details{ Given two sets of points, this function computes the Euclidean distance from each point in the first set to each point in the second set, and returns a matrix containing these distances. This is a method for the generic function \code{\link{crossdist}}. This function expects \code{X} and \code{Y} to be numeric vectors of equal length specifying the coordinates of the first set of points. The arguments \code{x2},\code{y2} specify the coordinates of the second set of points. Alternatively if \code{period} is given, then the distances will be computed in the `periodic' sense (also known as `torus' distance). The points will be treated as if they are in a rectangle of width \code{period[1]} and height \code{period[2]}. Opposite edges of the rectangle are regarded as equivalent. The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is faster by a factor of 4. } \seealso{ \code{\link{crossdist}}, \code{\link{crossdist.ppp}}, \code{\link{crossdist.psp}}, \code{\link{pairdist}}, \code{\link{nndist}}, \code{\link{Gest}} } \examples{ d <- crossdist(runif(7), runif(7), runif(12), runif(12)) d <- crossdist(runif(7), runif(7), runif(12), runif(12), period=c(1,1)) } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and \adrian } \keyword{spatial} \keyword{math} spatstat/man/rtemper.Rd0000644000176200001440000000557713362051152014651 0ustar liggesusers\name{rtemper} \alias{rtemper} \title{ Simulated Annealing or Simulated Tempering for Gibbs Point Processes } \description{ Performs simulated annealing or simulated tempering for a Gibbs point process model using a specified annealing schedule. } \usage{ rtemper(model, invtemp, nrep, \dots, track=FALSE, start = NULL, verbose = FALSE) } \arguments{ \item{model}{ A Gibbs point process model: a fitted Gibbs point process model (object of class \code{"ppm"}), or any data acceptable to \code{\link{rmhmodel}}. } \item{invtemp}{ A numeric vector of positive numbers. The sequence of values of inverse temperature that will be used. } \item{nrep}{ An integer vector of the same length as \code{invtemp}. The value \code{nrep[i]} specifies the number of steps of the Metropolis-Hastings algorithm that will be performed at inverse temperature \code{invtemp[i]}. } \item{start}{ Initial starting state for the simulation. Any data acceptable to \code{\link{rmhstart}}. } \item{track}{ Logical flag indicating whether to save the transition history of the simulations. } \item{\dots}{ Additional arguments passed to \code{\link{rmh.default}}. } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ The Metropolis-Hastings simulation algorithm \code{\link{rmh}} is run for \code{nrep[1]} steps at inverse temperature \code{invtemp[1]}, then for \code{nrep[2]} steps at inverse temperature \code{invtemp[2]}, and so on. Setting the inverse temperature to a value \eqn{\alpha}{alpha} means that the probability density of the Gibbs model, \eqn{f(x)}, is replaced by \eqn{g(x) = C\, f(x)^\alpha}{g(x) = C f(x)^alpha} where \eqn{C} is a normalising constant depending on \eqn{\alpha}{alpha}. Larger values of \eqn{\alpha}{alpha} exaggerate the high and low values of probability density, while smaller values of \eqn{\alpha}{alpha} flatten out the probability density. For example if the original \code{model} is a Strauss process, the modified model is close to a hard core process for large values of inverse temperature, and close to a Poisson process for small values of inverse temperature. } \value{ A point pattern (object of class \code{"ppp"}). If \code{track=TRUE}, the result also has an attribute \code{"history"} which is a data frame with columns \code{proposaltype}, \code{accepted}, \code{numerator} and \code{denominator}, as described in \code{\link{rmh.default}}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{rmh.default}}, \code{\link{rmh}}. } \examples{ stra <- rmhmodel(cif="strauss", par=list(beta=2,gamma=0.2,r=0.7), w=square(10)) nr <- if(interactive()) 1e5 else 1e4 Y <- rtemper(stra, c(1, 2, 4, 8), nr * (1:4), verbose=TRUE, track=TRUE) } \keyword{spatial} \keyword{datagen} spatstat/man/rex.Rd0000644000176200001440000000523313333543264013766 0ustar liggesusers\name{rex} \alias{rex} \title{ Richardson Extrapolation } \description{ Performs Richardson Extrapolation on a sequence of approximate values. } \usage{ rex(x, r = 2, k = 1, recursive = FALSE) } \arguments{ \item{x}{ A numeric vector or matrix, whose columns are successive estimates or approximations to a vector of parameters. } \item{r}{ A number greater than 1. The ratio of successive step sizes. See Details. } \item{k}{ Integer. The order of convergence assumed. See Details. } \item{recursive}{ Logical value indicating whether to perform one step of Richardson extrapolation (\code{recursive=FALSE}, the default) or repeat the extrapolation procedure until a best estimate is obtained (\code{recursive=TRUE}. } } \details{ Richardson extrapolation is a general technique for improving numerical approximations, often used in numerical integration (Brezinski and Zaglia, 1991). It can also be used to improve parameter estimates in statistical models (Baddeley and Turner, 2014). The successive columns of \code{x} are assumed to have been obtained using approximations with step sizes \eqn{a, a/r, a/r^2, \ldots}{a, a/r, a/r^2, ...} where \eqn{a} is the initial step size (which does not need to be specified). Estimates based on a step size \eqn{s} are assumed to have an error of order \eqn{s^k}. Thus, the default values \code{r=2} and {k=1} imply that the errors in the second column of \code{x} should be roughly \eqn{(1/r)^k = 1/2} as large as the errors in the first column, and so on. } \value{ A matrix whose columns contain a sequence of improved estimates. } \references{ Baddeley, A. and Turner, R. (2014) Bias correction for parameter estimates of spatial point process models. \emph{Journal of Statistical Computation and Simulation} \bold{84}, 1621--1643. DOI: 10.1080/00949655.2012.755976 Brezinski, C. and Zaglia, M.R. (1991) \emph{Extrapolation Methods. Theory and Practice}. North-Holland. } \author{ \adrian and \rolf. } \seealso{ \code{\link{bc}} } \examples{ # integrals of sin(x) and cos(x) from 0 to pi # correct answers: 2, 0 est <- function(nsteps) { xx <- seq(0, pi, length=nsteps) ans <- pi * c(mean(sin(xx)), mean(cos(xx))) names(ans) <- c("sin", "cos") ans } X <- cbind(est(10), est(20), est(40)) X rex(X) rex(X, recursive=TRUE) # fitted Gibbs point process model fit0 <- ppm(cells ~ 1, Strauss(0.07), nd=16) fit1 <- update(fit0, nd=32) fit2 <- update(fit0, nd=64) co <- cbind(coef(fit0), coef(fit1), coef(fit2)) co rex(co, k=2, recursive=TRUE) } \keyword{math} \keyword{optimize} spatstat/man/Smooth.msr.Rd0000644000176200001440000000401213571674202015234 0ustar liggesusers\name{Smooth.msr} \alias{Smooth.msr} \title{ Smooth a Signed or Vector-Valued Measure } \description{ Apply kernel smoothing to a signed measure or vector-valued measure. } \usage{ \method{Smooth}{msr}(X, ..., drop=TRUE) } \arguments{ \item{X}{ Object of class \code{"msr"} representing a signed measure or vector-valued measure. } \item{\dots}{ Arguments passed to \code{\link{density.ppp}} controlling the smoothing bandwidth and the pixel resolution. } \item{drop}{ Logical. If \code{TRUE} (the default), the result of smoothing a scalar-valued measure is a pixel image. If \code{FALSE}, the result of smoothing a scalar-valued measure is a list containing one pixel image. } } \details{ This function applies kernel smoothing to a signed measure or vector-valued measure \code{X}. The Gaussian kernel is used. The object \code{X} would typically have been created by \code{\link{residuals.ppm}} or \code{\link{msr}}. } \value{ A pixel image or a list of pixel images. For scalar-valued measures, a pixel image (object of class \code{"im"}) provided \code{drop=TRUE}. For vector-valued measures (or if \code{drop=FALSE}), a list of pixel images; the list also belongs to the class \code{"solist"} so that it can be printed and plotted. } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., \Moller, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. } \author{ \adrian } \seealso{ \code{\link{Smooth}}, \code{\link{msr}}, \code{\link{plot.msr}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") rs <- residuals(fit, type="score") plot(Smooth(rp)) plot(Smooth(rs)) } \keyword{spatial} \keyword{models} spatstat/man/divide.linnet.Rd0000644000176200001440000000214513426703276015727 0ustar liggesusers\name{divide.linnet} \alias{divide.linnet} \title{ Divide Linear Network at Cut Points } \description{ Make a tessellation of a linear network by dividing it into pieces demarcated by the points of a point pattern. } \usage{ divide.linnet(X) } \arguments{ \item{X}{ Point pattern on a linear network (object of class \code{"lpp"}). } } \details{ The points \code{X} are interpreted as dividing the linear network \code{L=as.linnet(X)} into separate pieces. Two locations on \code{L} belong to the same piece if and only if they can be joined by a path in \code{L} that does not cross any of the points of \code{X}. The result is a tessellation of the network (object of class \code{"lintess"}) representing the division of \code{L} into pieces. } \value{ A tessellation on a linear network (object of class \code{"lintess"}). } \author{ \spatstatAuthors and Greg McSwiggan. } \seealso{ \code{\link{linnet}}, \code{\link{lintess}}. } \examples{ X <- runiflpp(5, simplenet) plot(divide.linnet(X)) plot(X, add=TRUE, pch=16, show.network=FALSE) } \keyword{spatial} \keyword{manip} spatstat/man/as.data.frame.envelope.Rd0000644000176200001440000000260713333543262017410 0ustar liggesusers\name{as.data.frame.envelope} \alias{as.data.frame.envelope} \title{Coerce Envelope to Data Frame} \description{ Converts an envelope object to a data frame. } \usage{ \method{as.data.frame}{envelope}(x, \dots, simfuns=FALSE) } \arguments{ \item{x}{Envelope object (class \code{"envelope"}).} \item{\dots}{Ignored.} \item{simfuns}{Logical value indicating whether the result should include the values of the simulated functions that were used to build the envelope. } } \details{ This is a method for the generic function \code{\link{as.data.frame}} for the class of envelopes (see \code{\link{envelope}}. The result is a data frame with columns containing the values of the function argument (usually named \code{r}), the function estimate for the original point pattern data (\code{obs}), the upper and lower envelope limits (\code{hi} and \code{lo}), and possibly additional columns. If \code{simfuns=TRUE}, the result also includes columns of values of the simulated functions that were used to compute the envelope. This is possible only when the envelope was computed with the argument \code{savefuns=TRUE} in the call to \code{\link{envelope}}. } \value{ A data frame. } \examples{ E <- envelope(cells, nsim=5, savefuns=TRUE) tail(as.data.frame(E)) tail(as.data.frame(E, simfuns=TRUE)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/density.ppp.Rd0000644000176200001440000003742013371515460015447 0ustar liggesusers\name{density.ppp} \alias{density.ppp} \title{Kernel Smoothed Intensity of Point Pattern} \description{ Compute a kernel smoothed intensity function from a point pattern. } \usage{ \method{density}{ppp}(x, sigma=NULL, \dots, weights=NULL, edge=TRUE, varcov=NULL, at="pixels", leaveoneout=TRUE, adjust=1, diggle=FALSE, se=FALSE, kernel="gaussian", scalekernel=is.character(kernel), positive=FALSE, verbose=TRUE) } \arguments{ \item{x}{ Point pattern (object of class \code{"ppp"}). } \item{sigma}{ Standard deviation of isotropic smoothing kernel. Either a numerical value, or a function that computes an appropriate value of \code{sigma}. } \item{weights}{ Optional weights to be attached to the points. A numeric vector, numeric matrix, an \code{expression}, or a pixel image. } \item{\dots}{ Additional arguments passed to \code{\link{pixellate.ppp}} and \code{\link{as.mask}} to determine the pixel resolution, or passed to \code{sigma} if it is a function. } \item{edge}{ Logical value indicating whether to apply edge correction. } \item{varcov}{ Variance-covariance matrix of anisotropic smoothing kernel. Incompatible with \code{sigma}. } \item{at}{ String specifying whether to compute the intensity values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{x} (\code{at="points"}). } \item{leaveoneout}{ Logical value indicating whether to compute a leave-one-out estimator. Applicable only when \code{at="points"}. } \item{adjust}{ Optional. Adjustment factor for the smoothing parameter. } \item{diggle}{ Logical. If \code{TRUE}, use the Jones-Diggle improved edge correction, which is more accurate but slower to compute than the default correction. } \item{kernel}{ The smoothing kernel. A character string specifying the smoothing kernel (current options are \code{"gaussian"}, \code{"epanechnikov"}, \code{"quartic"} or \code{"disc"}), or a pixel image (object of class \code{"im"}) containing values of the kernel, or a \code{function(x,y)} which yields values of the kernel. } \item{scalekernel}{ Logical value. If \code{scalekernel=TRUE}, then the kernel will be rescaled to the bandwidth determined by \code{sigma} and \code{varcov}: this is the default behaviour when \code{kernel} is a character string. If \code{scalekernel=FALSE}, then \code{sigma} and \code{varcov} will be ignored: this is the default behaviour when \code{kernel} is a function or a pixel image. } \item{se}{ Logical value indicating whether to compute standard errors as well. } \item{positive}{ Logical value indicating whether to force all density values to be positive numbers. Default is \code{FALSE}. } \item{verbose}{ Logical value indicating whether to issue warnings about numerical problems and conditions. } } \value{ By default, the result is a pixel image (object of class \code{"im"}). Pixel values are estimated intensity values, expressed in \dQuote{points per unit area}. If \code{at="points"}, the result is a numeric vector of length equal to the number of points in \code{x}. Values are estimated intensity values at the points of \code{x}. In either case, the return value has attributes \code{"sigma"} and \code{"varcov"} which report the smoothing bandwidth that was used. If \code{weights} is a matrix with more than one column, then the result is a list of images (if \code{at="pixels"}) or a matrix of numerical values (if \code{at="points"}). If \code{se=TRUE}, the result is a list with two elements named \code{estimate} and \code{SE}, each of the format described above. } \details{ This is a method for the generic function \code{density}. It computes a fixed-bandwidth kernel estimate (Diggle, 1985) of the intensity function of the point process that generated the point pattern \code{x}. By default it computes the convolution of the isotropic Gaussian kernel of standard deviation \code{sigma} with point masses at each of the data points in \code{x}. Anisotropic Gaussian kernels, and non-Gaussian kernels, are also supported. Each point has unit weight, unless the argument \code{weights} is given. If \code{edge=TRUE}, the intensity estimate is corrected for edge effect bias in one of two ways: \itemize{ \item If \code{diggle=FALSE} (the default) the intensity estimate is correted by dividing it by the convolution of the Gaussian kernel with the window of observation. This is the approach originally described in Diggle (1985). Thus the intensity value at a point \eqn{u} is \deqn{ \hat\lambda(u) = e(u) \sum_i k(x_i - u) w_i }{ \lambda(u) = e(u) \sum[i] k(x[i] - u) w[i] } where \eqn{k} is the Gaussian smoothing kernel, \eqn{e(u)} is an edge correction factor, and \eqn{w_i}{w[i]} are the weights. \item If \code{diggle=TRUE} then the code uses the improved edge correction described by Jones (1993) and Diggle (2010, equation 18.9). This has been shown to have better performance (Jones, 1993) but is slightly slower to compute. The intensity value at a point \eqn{u} is \deqn{ \hat\lambda(u) = \sum_i k(x_i - u) w_i e(x_i) }{ \lambda(u) = \sum[i] k(x[i] - u) w[i] e(x[i]) } where again \eqn{k} is the Gaussian smoothing kernel, \eqn{e(x_i)}{e(x[i])} is an edge correction factor, and \eqn{w_i}{w[i]} are the weights. } In both cases, the edge correction term \eqn{e(u)} is the reciprocal of the kernel mass inside the window: \deqn{ \frac{1}{e(u)} = \int_W k(v-u) \, {\rm d}v }{ 1/e(u) = integral[v in W] k(v-u) dv } where \eqn{W} is the observation window. By default, smoothing is performed using a Gaussian kernel, with smoothing bandwidth determined by the arguments \code{sigma}, \code{varcov} and \code{adjust}. \itemize{ \item if \code{sigma} is a single numerical value, this is taken as the standard deviation of the isotropic Gaussian kernel. \item alternatively \code{sigma} may be a function that computes an appropriate bandwidth from the data point pattern by calling \code{sigma(x)}. To perform automatic bandwidth selection using cross-validation, it is recommended to use the functions \code{\link{bw.diggle}}, \code{\link{bw.CvL}}, \code{\link{bw.scott}} or \code{\link{bw.ppl}}. \item The smoothing kernel may be made anisotropic by giving the variance-covariance matrix \code{varcov}. The arguments \code{sigma} and \code{varcov} are incompatible. \item Alternatively \code{sigma} may be a vector of length 2 giving the standard deviations of the \eqn{x} and \eqn{y} coordinates, thus equivalent to \code{varcov = diag(rep(sigma^2, 2))}. \item if neither \code{sigma} nor \code{varcov} is specified, an isotropic Gaussian kernel will be used, with a default value of \code{sigma} calculated by a simple rule of thumb that depends only on the size of the window. \item The argument \code{adjust} makes it easy for the user to change the bandwidth specified by any of the rules above. The value of \code{sigma} will be multiplied by the factor \code{adjust}. The matrix \code{varcov} will be multiplied by \code{adjust^2}. To double the smoothing bandwidth, set \code{adjust=2}. \item An infinite bandwidth, \code{sigma=Inf} or \code{adjust=Inf}, is permitted, and yields an intensity estimate which is constant over the spatial domain. } The choice of smoothing kernel is determined by the argument \code{kernel}. This should be a character string giving the name of a recognised two-dimensional kernel (current options are \code{"gaussian"}, \code{"epanechnikov"}, \code{"quartic"} or \code{"disc"}), or a pixel image (object of class \code{"im"}) containing values of the kernel, or a \code{function(x,y)} which yields values of the kernel. The default is a Gaussian kernel. If \code{scalekernel=TRUE} then the kernel values will be rescaled according to the arguments \code{sigma}, \code{varcov} and \code{adjust} as explained above, effectively treating \code{kernel} as the template kernel with standard deviation equal to 1. This is the default behaviour when \code{kernel} is a character string. If \code{scalekernel=FALSE}, the kernel values will not be altered, and the arguments \code{sigma}, \code{varcov} and \code{adjust} are ignored. This is the default behaviour when \code{kernel} is a pixel image or a function. If \code{at="pixels"} (the default), intensity values are computed at every location \eqn{u} in a fine grid, and are returned as a pixel image. The point pattern is first discretised using \code{\link{pixellate.ppp}}, then the intensity is computed using the Fast Fourier Transform. Accuracy depends on the pixel resolution and the discretisation rule. The pixel resolution is controlled by the arguments \code{\dots} passed to \code{\link{as.mask}} (specify the number of pixels by \code{dimyx} or the pixel size by \code{eps}). The discretisation rule is controlled by the arguments \code{\dots} passed to \code{\link{pixellate.ppp}} (the default rule is that each point is allocated to the nearest pixel centre; this can be modified using the arguments \code{fractional} and \code{preserve}). If \code{at="points"}, the intensity values are computed to high accuracy at the points of \code{x} only. Computation is performed by directly evaluating and summing the kernel contributions without discretising the data. The result is a numeric vector giving the density values. The intensity value at a point \eqn{x_i}{x[i]} is (if \code{diggle=FALSE}) \deqn{ \hat\lambda(x_i) = e(x_i) \sum_j k(x_j - x_i) w_j }{ \lambda(x[i]) = e(x[i]) \sum[j] k(x[j] - x[i]) w[j] } or (if \code{diggle=TRUE}) \deqn{ \hat\lambda(x_i) = \sum_j k(x_j - x_i) w_j e(x_j) }{ \lambda(x[i]) = \sum[j] k(x[j] - x[i]) w[j] e(x[j]) } If \code{leaveoneout=TRUE} (the default), then the sum in the equation is taken over all \eqn{j} not equal to \eqn{i}, so that the intensity value at a data point is the sum of kernel contributions from all \emph{other} data points. If \code{leaveoneout=FALSE} then the sum is taken over all \eqn{j}, so that the intensity value at a data point includes a contribution from the same point. If \code{weights} is a matrix with more than one column, then the calculation is effectively repeated for each column of weights. The result is a list of images (if \code{at="pixels"}) or a matrix of numerical values (if \code{at="points"}). The argument \code{weights} can also be an \code{expression}. It will be evaluated in the data frame \code{as.data.frame(x)} to obtain a vector or matrix of weights. The expression may involve the symbols \code{x} and \code{y} representing the Cartesian coordinates, the symbol \code{marks} representing the mark values if there is only one column of marks, and the names of the columns of marks if there are several columns. The argument \code{weights} can also be a pixel image (object of class \code{"im"}). numerical weights for the data points will be extracted from this image (by looking up the pixel values at the locations of the data points in \code{x}). To select the bandwidth \code{sigma} automatically by cross-validation, use \code{\link{bw.diggle}}, \code{\link{bw.CvL}}, \code{\link{bw.scott}} or \code{\link{bw.ppl}}. To perform spatial interpolation of values that were observed at the points of a point pattern, use \code{\link{Smooth.ppp}}. For adaptive nonparametric estimation, see \code{\link{adaptive.density}}. For data sharpening, see \code{\link{sharpen.ppp}}. To compute a relative risk surface or probability map for two (or more) types of points, use \code{\link{relrisk}}. } \seealso{ \code{\link{bw.diggle}}, \code{\link{bw.CvL}}, \code{\link{bw.scott}} \code{\link{bw.ppl}} for bandwidth selection. \code{\link{Smooth.ppp}}, \code{\link{sharpen.ppp}}, \code{\link{adaptive.density}}, \code{\link{relrisk}}, \code{\link{ppp.object}}, \code{\link{im.object}}. } \note{ This function is often misunderstood. The result of \code{density.ppp} is not a spatial smoothing of the marks or weights attached to the point pattern. To perform spatial interpolation of values that were observed at the points of a point pattern, use \code{\link{Smooth.ppp}}. The result of \code{density.ppp} is not a probability density. It is an estimate of the \emph{intensity function} of the point process that generated the point pattern data. Intensity is the expected number of random points per unit area. The units of intensity are \dQuote{points per unit area}. Intensity is usually a function of spatial location, and it is this function which is estimated by \code{density.ppp}. The integral of the intensity function over a spatial region gives the expected number of points falling in this region. Inspecting an estimate of the intensity function is usually the first step in exploring a spatial point pattern dataset. For more explanation, see Baddeley, Rubak and Turner (2015) or Diggle (2003, 2010). If you have two (or more) types of points, and you want a probability map or relative risk surface (the spatially-varying probability of a given type), use \code{\link{relrisk}}. } \section{Negative Values}{ Negative and zero values of the density estimate are possible when \code{at="pixels"} because of numerical errors in finite-precision arithmetic. By default, \code{density.ppp} does not try to repair such errors. This would take more computation time and is not always needed. (Also it would not be appropriate if \code{weights} include negative values.) To ensure that the resulting density values are always positive, set \code{positive=TRUE}. } \examples{ if(interactive()) { opa <- par(mfrow=c(1,2)) plot(density(cells, 0.05)) plot(density(cells, 0.05, diggle=TRUE)) par(opa) v <- diag(c(0.05, 0.07)^2) plot(density(cells, varcov=v)) } \donttest{ Z <- density(cells, 0.05) Z <- density(cells, 0.05, diggle=TRUE) Z <- density(cells, 0.05, se=TRUE) Z <- density(cells, varcov=diag(c(0.05^2, 0.07^2))) Z <- density(cells, 0.05, weights=data.frame(a=1:42,b=42:1)) Z <- density(cells, 0.05, weights=expression(x)) } # automatic bandwidth selection plot(density(cells, sigma=bw.diggle(cells))) # equivalent: plot(density(cells, bw.diggle)) # evaluate intensity at points density(cells, 0.05, at="points") plot(density(cells, sigma=0.4, kernel="epanechnikov")) # relative risk calculation by hand (see relrisk.ppp) lung <- split(chorley)$lung larynx <- split(chorley)$larynx D <- density(lung, sigma=2) plot(density(larynx, sigma=2, weights=1/D)) } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. Diggle, P.J. (1985) A kernel method for smoothing point process data. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C) \bold{34} (1985) 138--147. Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. Diggle, P.J. (2010) Nonparametric methods. Chapter 18, pp. 299--316 in A.E. Gelfand, P.J. Diggle, M. Fuentes and P. Guttorp (eds.) \emph{Handbook of Spatial Statistics}, CRC Press, Boca Raton, FL. Jones, M.C. (1993) Simple boundary corrections for kernel density estimation. \emph{Statistics and Computing} \bold{3}, 135--146. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/summary.listof.Rd0000644000176200001440000000150313333543264016160 0ustar liggesusers\name{summary.listof} \alias{summary.listof} \title{Summary of a List of Things} \description{ Prints a useful summary of each item in a list of things. } \usage{ \method{summary}{listof}(object, \dots) } \arguments{ \item{object}{ An object of class \code{"listof"}. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{summary}}. An object of the class \code{"listof"} is effectively a list of things which are all of the same class. This function extracts a useful summary of each of the items in the list. } \seealso{ \code{\link{summary}}, \code{\link{plot.listof}} } \examples{ x <- list(A=runif(10), B=runif(10), C=runif(10)) class(x) <- c("listof", class(x)) summary(x) } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/spatstat.options.Rd0000644000176200001440000003740413333543264016532 0ustar liggesusers\name{spatstat.options} \alias{spatstat.options} \alias{reset.spatstat.options} \title{Internal Options in Spatstat Package} \description{ Allows the user to examine and reset the values of global parameters which control actions in the \pkg{spatstat} package. } \usage{ spatstat.options(...) reset.spatstat.options() } \arguments{ \item{\dots}{ Either empty, or a succession of parameter names in quotes, or a succession of \code{name=value} pairs. See below for the parameter names. } } \value{ Either a list of parameters and their values, or a single value. See Details. } \details{ The function \code{spatstat.options} allows the user to examine and reset the values of global parameters which control actions in the \pkg{spatstat} package. It is analogous to the system function \code{\link[base]{options}}. The function \code{reset.spatstat.options} resets all the global parameters in \pkg{spatstat} to their original, default values. The global parameters of interest to the user are: \describe{ \item{checkpolygons}{ Logical flag indicating whether the functions \code{\link[spatstat]{owin}} and \code{\link[spatstat]{as.owin}} should apply very strict checks on the validity of polygon data. These strict checks are no longer necessary, and the default is \code{checkpolygons=FALSE}. See also \code{fixpolygons} below. } \item{checksegments}{ Logical flag indicating whether the functions \code{\link[spatstat]{psp}} and \code{\link[spatstat]{as.psp}} should check the validity of line segment data (in particular, checking that the endpoints of the line segments are inside the specified window). It is advisable to leave this flag set to \code{TRUE}. } \item{eroded.intensity}{ Logical flag affecting the behaviour of the score and pseudo-score residual functions \code{\link[spatstat]{Gcom}}, \code{\link[spatstat]{Gres}} \code{\link[spatstat]{Kcom}}, \code{\link[spatstat]{Kres}}, \code{\link[spatstat]{psstA}}, \code{\link[spatstat]{psstG}}, \code{\link[spatstat]{psst}}. The flag indicates whether to compute intensity estimates on an eroded window (\code{eroded.intensity=TRUE}) or on the original data window (\code{eroded.intensity=FALSE}, the default). } \item{expand}{ The default expansion factor (area inflation factor) for expansion of the simulation window in \code{\link[spatstat]{rmh}} (see \code{\link[spatstat]{rmhcontrol}}). Initialised to \code{2}. } \item{expand.polynom}{ Logical. Whether expressions involving \code{\link[spatstat]{polynom}} in a model formula should be expanded, so that \code{polynom(x,2)} is replaced by \code{x + I(x^2)} and so on. Initialised to \code{TRUE}. } \item{fastpois}{ Logical. Whether to use a fast algorithm (introduced in \pkg{spatstat 1.42-3}) for simulating the Poisson point process in \code{\link[spatstat]{rpoispp}} when the argument \code{lambda} is a pixel image. Initialised to \code{TRUE}. Should be set to \code{FALSE} if needed to guarantee repeatability of results computed using earlier versions of \pkg{spatstat}. } \item{fastthin}{ Logical. Whether to use a fast C language algorithm (introduced in \pkg{spatstat 1.42-3}) for random thinning in \code{\link[spatstat]{rthin}} when the argument \code{P} is a single number. Initialised to \code{TRUE}. Should be set to \code{FALSE} if needed to guarantee repeatability of results computed using earlier versions of \pkg{spatstat}. } \item{fastK.lgcp}{ Logical. Whether to use fast or slow algorithm to compute the (theoretical) \eqn{K}-function of a log-Gaussian Cox process for use in \code{\link[spatstat]{lgcp.estK}} or \code{\link[spatstat]{Kmodel}}. The slow algorithm uses accurate numerical integration; the fast algorithm uses Simpson's Rule for numerical integration, and is about two orders of magnitude faster. Initialised to \code{FALSE}. } \item{fixpolygons}{ Logical flag indicating whether the functions \code{\link[spatstat]{owin}} and \code{\link[spatstat]{as.owin}} should repair errors in polygon data. For example, self-intersecting polygons and overlapping polygons will be repaired. The default is \code{fixpolygons=TRUE}. } \item{fftw}{ Logical value indicating whether the two-dimensional Fast Fourier Transform should be computed using the package \pkg{fftwtools}, instead of the \code{fft} function in the \pkg{stats} package. This affects the speed of \code{\link[spatstat]{density.ppp}}, \code{\link[spatstat]{density.psp}}, \code{\link[spatstat]{blur}} \code{\link[spatstat]{setcov}} and \code{\link[spatstat]{Smooth.ppp}}. } \item{gpclib}{ Defunct. This parameter was used to permit or forbid the use of the package \pkg{gpclib}, because of its restricted software licence. This package is no longer needed. } \item{huge.npoints}{ The maximum value of \code{n} for which \code{runif(n)} will not generate an error (possible errors include failure to allocate sufficient memory, and integer overflow of \code{n}). An attempt to generate more than this number of random points triggers a warning from \code{\link[spatstat]{runifpoint}} and other functions. Defaults to \code{1e6}. } \item{image.colfun}{ Function determining the default colour map for \code{\link[spatstat]{plot.im}}. When called with one integer argument \code{n}, this function should return a character vector of length \code{n} specifying \code{n} different colours. } \item{Kcom.remove.zeroes}{ Logical value, determining whether the algorithm in \code{\link[spatstat]{Kcom}} and \code{\link[spatstat]{Kres}} removes or retains the contributions to the function from pairs of points that are identical. If these are retained then the function has a jump at \eqn{r=0}. Initialised to \code{TRUE}. } \item{maxedgewt}{ Edge correction weights will be trimmed so as not to exceed this value. This applies to the weights computed by \code{\link[spatstat]{edge.Trans}} or \code{\link[spatstat]{edge.Ripley}} and used in \code{\link[spatstat]{Kest}} and its relatives. } \item{maxmatrix}{ The maximum permitted size (rows times columns) of matrices generated by \pkg{spatstat}'s internal code. Used by \code{\link[spatstat]{ppm}} and \code{\link[spatstat]{predict.ppm}} (for example) to decide when to split a large calculation into blocks. Defaults to \code{2^24=16777216}. } \item{monochrome}{ Logical flag indicating whether graphics should be plotted in grey scale (\code{monochrome=TRUE}) or in colour (\code{monochrome=FALSE}, the default). } \item{n.bandwidth}{ Integer. Number of trial values of smoothing bandwidth to use for cross-validation in \code{\link[spatstat]{bw.relrisk}} and similar functions. } \item{ndummy.min}{ The minimum number of dummy points in a quadrature scheme created by \code{\link[spatstat]{default.dummy}}. Either an integer or a pair of integers giving the minimum number of dummy points in the \code{x} and \code{y} directions respectively. } \item{ngrid.disc}{ Number of points in the square grid used to compute a discrete approximation to the areas of discs in \code{\link[spatstat]{areaLoss}} and \code{\link[spatstat]{areaGain}} when exact calculation is not available. A single integer. } \item{npixel}{ Default number of pixels in a binary mask or pixel image. Either an integer, or a pair of integers, giving the number of pixels in the \code{x} and \code{y} directions respectively. } \item{nvoxel}{ Default number of voxels in a 3D image, typically for calculating the distance transform in \code{\link[spatstat]{F3est}}. Initialised to 4 megavoxels: \code{nvoxel = 2^22 = 4194304}. } \item{par.binary}{ List of arguments to be passed to the function \code{\link[graphics]{image}} when displaying a binary image mask (in \code{\link[spatstat]{plot.owin}} or \code{\link[spatstat]{plot.ppp}}). Typically used to reset the colours of foreground and background. } \item{par.contour}{ List of arguments controlling contour plots of pixel images by \code{\link[spatstat]{contour.im}}. } \item{par.fv}{ List of arguments controlling the plotting of functions by \code{\link[spatstat]{plot.fv}} and its relatives. } \item{par.persp}{ List of arguments to be passed to the function \code{\link[graphics]{persp}} when displaying a real-valued image, such as the fitted surfaces in \code{\link[spatstat]{plot.ppm}}. } \item{par.points}{ List of arguments controlling the plotting of point patterns by \code{\link[spatstat]{plot.ppp}}. } \item{par.pp3}{ List of arguments controlling the plotting of three-dimensional point patterns by \code{\link[spatstat]{plot.pp3}}. } \item{print.ppm.SE}{ Default rule used by \code{\link[spatstat]{print.ppm}} to decide whether to calculate and print standard errors of the estimated coefficients of the model. One of the strings \code{"always"}, \code{"never"} or \code{"poisson"} (the latter indicating that standard errors will be calculated only for Poisson models). The default is \code{"poisson"} because the calculation for non-Poisson models can take a long time. } \item{progress}{ Character string determining the style of progress reports printed by \code{\link[spatstat]{progressreport}}. Either \code{"tty"}, \code{"tk"} or \code{"txtbar"}. For explanation of these options, see \code{\link[spatstat]{progressreport}}. } \item{project.fast}{ Logical. If \code{TRUE}, the algorithm of \code{\link[spatstat]{project.ppm}} will be accelerated using a shorcut. Initialised to \code{FALSE}. } \item{psstA.ngrid}{ Single integer, controlling the accuracy of the discrete approximation of areas computed in the function \code{\link[spatstat]{psstA}}. The area of a disc is approximated by counting points on an \eqn{n \times n}{n * n} grid. Initialised to 32. } \item{psstA.nr}{ Single integer, determining the number of distances \eqn{r} at which the function \code{\link[spatstat]{psstA}} will be evaluated (in the default case where argument \code{r} is absent). Initialised to 30. } \item{psstG.remove.zeroes}{ Logical value, determining whether the algorithm in \code{\link[spatstat]{psstG}} removes or retains the contributions to the function from pairs of points that are identical. If these are retained then the function has a jump at \eqn{r=0}. Initialised to \code{TRUE}. } \item{rmh.p, rmh.q, rmh.nrep}{ New default values for the parameters \code{p}, \code{q} and \code{nrep} in the Metropolis-Hastings simulation algorithm. These override the defaults in \code{\link[spatstat]{rmhcontrol.default}}. } \item{scalable}{ Logical flag indicating whether the new code in \code{rmh.default} which makes the results scalable (invariant to change of units) should be used. In order to recover former behaviour (so that previous results can be reproduced) set this option equal to \code{FALSE}. See the \dQuote{Warning} section in the help for \code{\link[spatstat]{rmh}()} for more detail. } \item{terse}{ Integer between 0 and 4. The level of terseness (brevity) in printed output from many functions in \pkg{spatstat}. Higher values mean shorter output. A rough guide is the following: \tabular{ll}{ 0 \tab Full output\cr 1 \tab Avoid wasteful output \cr 2 \tab Remove space between paragraphs\cr 3 \tab Suppress extras such as standard errors \cr 4 \tab Compress text, suppress internal warnings } The value of \code{terse} is initialised to 0. } \item{transparent}{ Logical value indicating whether default colour maps are allowed to include semi-transparent colours, where possible. Default is \code{TRUE}. Currently this only affects \code{\link[spatstat]{plot.ppp}}. } \item{units.paren}{ The kind of parenthesis which encloses the text that explains a \code{unitname}. This text is seen in the text output of functions like \code{\link[spatstat]{print.ppp}} and in the graphics generated by \code{\link[spatstat]{plot.fv}}. The value should be one of the character strings \code{'('}, \code{'['}, \code{'{'} or \code{''}. The default is \code{'('}. } } If no arguments are given, the current values of all parameters are returned, in a list. If one parameter name is given, the current value of this parameter is returned (\bold{not} in a list, just the value). If several parameter names are given, the current values of these parameters are returned, in a list. If \code{name=value} pairs are given, the named parameters are reset to the given values, and the \bold{previous} values of these parameters are returned, in a list. } \section{Internal parameters}{ The following parameters may also be specified to \code{spatstat.options} but are intended for software development or testing purposes. \describe{ \item{closepairs.newcode}{ Logical. Whether to use new version of the code for \code{\link[spatstat]{closepairs}}. Initialised to \code{TRUE}. } \item{crossing.psp.useCall}{ Logical. Whether to use new version of the code for \code{\link[spatstat]{crossing.psp}}. Initialised to \code{TRUE}. } \item{crosspairs.newcode}{ Logical. Whether to use new version of the code for \code{\link[spatstat]{crosspairs}}. Initialised to \code{TRUE}. } \item{densityC}{ Logical. Indicates whether to use accelerated C code (\code{densityC=TRUE}) or interpreted R code (\code{densityC=FALSE}) to evaluate \code{density.ppp(X, at="points")}. Initialised to \code{TRUE}. } \item{exactdt.checks.data}{ Logical. Do not change this value, unless you are \adrian. } \item{fasteval}{ One of the strings \code{'off'}, \code{'on'} or \code{'test'} determining whether to use accelerated C code to evaluate the conditional intensity of a Gibbs model. Initialised to \code{'on'}. } \item{old.morpho.psp}{ Logical. Whether to use old R code for morphological operations. Initialise to \code{FALSE}. } \item{selfcrossing.psp.useCall}{ Logical. Whether to use new version of the code for \code{\link[spatstat]{selfcrossing.psp}}. Initialised to \code{TRUE}. } \item{use.Krect}{ Logical. Whether to use new code for the K-function in a rectangular window. Initialised to \code{TRUE}. } } } \seealso{ \code{\link[base]{options}} } \examples{ # save current values oldopt <- spatstat.options() spatstat.options("npixel") spatstat.options(npixel=150) spatstat.options(npixel=c(100,200)) spatstat.options(par.binary=list(col=grey(c(0.5,1)))) spatstat.options(par.persp=list(theta=-30,phi=40,d=4)) # see help(persp.default) for other options # revert spatstat.options(oldopt) } \author{\adrian and \rolf } \keyword{spatial} spatstat/man/plot.onearrow.Rd0000644000176200001440000000474313333543264016006 0ustar liggesusers\name{plot.onearrow} \alias{plot.onearrow} \title{Plot an Arrow} \description{Plots an object of class \code{"onearrow"}.} \usage{ \method{plot}{onearrow}(x, \dots, add = FALSE, main = "", retract = 0.05, headfraction = 0.25, headangle = 12, headnick = 0.1, col.head = NA, lwd.head = lwd, lwd = 1, col = 1, zap = FALSE, zapfraction = 0.07, pch = 1, cex = 1, do.plot = TRUE, do.points = FALSE, show.all = !add) } \arguments{ \item{x}{ Object of class \code{"onearrow"} to be plotted. This object is created by the command \code{\link{onearrow}}. } \item{\dots}{ Additional graphics arguments passed to \code{\link[graphics]{segments}} to control the appearance of the line. } \item{add}{Logical value indicating whether to add graphics to the existing plot (\code{add=TRUE}) or to start a new plot (\code{add=FALSE}). } \item{main}{Main title for the plot.} \item{retract}{ Fraction of length of arrow to remove at each end. } \item{headfraction}{ Length of arrow head as a fraction of overall length of arrow. } \item{headangle}{ Angle (in degrees) between the outer edge of the arrow head and the shaft of the arrow. } \item{headnick}{ Size of the nick in the trailing edge of the arrow head as a fraction of length of arrow head. } \item{col.head,lwd.head}{ Colour and line style of the filled arrow head. } \item{col,lwd}{ Colour and line style of the arrow shaft. } \item{zap}{ Logical value indicating whether the arrow should include a Z-shaped (lightning-bolt) feature in the middle of the shaft. } \item{zapfraction}{ Size of Z-shaped deviation as a fraction of total arrow length. } \item{pch,cex}{ Plot character and character size for the two end points of the arrow, if \code{do.points=TRUE}. } \item{do.plot}{ Logical. Whether to actually perform the plot. } \item{do.points}{ Logical. Whether to display the two end points of the arrow as well. } \item{show.all}{ Internal use only. } } \details{ The argument \code{x} should be an object of class \code{"onearrow"} created by the command \code{\link{onearrow}}. } \value{ A window (class \code{"owin"}) enclosing the plotted graphics. } \examples{ oa <- onearrow(cells[c(1, 42)]) oa plot(oa) plot(oa, zap=TRUE, do.points=TRUE, col.head="pink", col="red") } \author{ \spatstatAuthors. } \seealso{ \code{\link{onearrow}}, \code{\link{yardstick}} } \keyword{spatial} \keyword{hplot} spatstat/man/beginner.Rd0000644000176200001440000000142413333543262014755 0ustar liggesusers\name{beginner} \alias{beginner} \title{ Print Introduction For Beginners } \description{ Prints an introduction for beginners to the \code{spatstat} package, or another specified package. } \usage{ beginner(package = "spatstat") } \arguments{ \item{package}{ Name of package. } } \details{ This function prints an introduction for beginners to the \pkg{spatstat} package. The function can be executed simply by typing \code{beginner} without parentheses. If the argument \code{package} is given, then the function prints the beginner's help file \code{BEGINNER.txt} from the specified package (if it has one). } \value{ Null. } \author{\adrian and \rolf } \seealso{ \code{\link{latest.news}} } \examples{ beginner } \keyword{documentation} spatstat/man/print.ppp.Rd0000644000176200001440000000154713333543264015126 0ustar liggesusers\name{print.ppp} \alias{print.ppp} \title{Print Brief Details of a Point Pattern Dataset} \description{ Prints a very brief description of a point pattern dataset. } \usage{ \method{print}{ppp}(x, \dots) } \arguments{ \item{x}{Point pattern (object of class \code{"ppp"}).} \item{\dots}{Ignored.} } \details{ A very brief description of the point pattern \code{x} is printed. This is a method for the generic function \code{\link{print}}. } \seealso{ \code{\link{print}}, \code{\link{print.owin}}, \code{\link{summary.ppp}} } \examples{ data(cells) # plain vanilla point pattern cells data(lansing) # multitype point pattern lansing data(longleaf) # numeric marks longleaf data(demopat) # weird polygonal window demopat } \author{\adrian and \rolf } \keyword{spatial} \keyword{print} spatstat/man/linearpcfdot.Rd0000644000176200001440000000540713623712063015642 0ustar liggesusers\name{linearpcfdot} \alias{linearpcfdot} \title{ Multitype Pair Correlation Function (Dot-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the multitype pair correlation function from points of type \eqn{i} to points of any type. } \usage{ linearpcfdot(X, i, r=NULL, \dots, correction="Ang") } \arguments{ \item{X}{The observed point pattern, from which an estimate of the \eqn{i}-to-any pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{\link[stats]{density.default}} to control the kernel smoothing. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{pcfdot}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), \bold{63}, 673--694. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearpcfcross}}, \code{\link{linearpcf}}. \code{\link{pcfcross}}. } \examples{ data(chicago) g <- linearpcfdot(chicago, "assault") } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/StraussHard.Rd0000644000176200001440000001000613333543262015423 0ustar liggesusers\name{StraussHard} \alias{StraussHard} \title{The Strauss / Hard Core Point Process Model} \description{ Creates an instance of the ``Strauss/ hard core'' point process model which can then be fitted to point pattern data. } \usage{ StraussHard(r, hc=NA) } \arguments{ \item{r}{The interaction radius of the Strauss interaction} \item{hc}{The hard core distance. Optional.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the ``Strauss/hard core'' process with Strauss interaction radius \eqn{r} and hard core distance \code{hc}. } \details{ A Strauss/hard core process with interaction radius \eqn{r}, hard core distance \eqn{h < r}, and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, is a pairwise interaction point process in which \itemize{ \item distinct points are not allowed to come closer than a distance \eqn{h} apart \item each pair of points closer than \eqn{r} units apart contributes a factor \eqn{\gamma}{gamma} to the probability density. } This is a hybrid of the Strauss process and the hard core process. The probability density is zero if any pair of points is closer than \eqn{h} units apart, and otherwise equals \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{s(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) gamma^s(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{s(x)} is the number of distinct unordered pairs of points that are closer than \eqn{r} units apart, and \eqn{\alpha}{alpha} is the normalising constant. The interaction parameter \eqn{\gamma}{gamma} may take any positive value (unlike the case for the Strauss process). If \eqn{\gamma < 1}{gamma < 1}, the model describes an ``ordered'' or ``inhibitive'' pattern. If \eqn{\gamma > 1}{gamma > 1}, the model is ``ordered'' or ``inhibitive'' up to the distance \eqn{h}, but has an ``attraction'' between points lying at distances in the range between \eqn{h} and \eqn{r}. If \eqn{\gamma = 1}{gamma = 1}, the process reduces to a classical hard core process with hard core distance \eqn{h}. If \eqn{\gamma = 0}{gamma = 0}, the process reduces to a classical hard core process with hard core distance \eqn{r}. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Strauss/hard core process pairwise interaction is yielded by the function \code{StraussHard()}. See the examples below. The canonical parameter \eqn{\log(\gamma)}{log(gamma)} is estimated by \code{\link{ppm}()}, not fixed in \code{StraussHard()}. If the hard core distance argument \code{hc} is missing or \code{NA}, it will be estimated from the data when \code{\link{ppm}} is called. The estimated value of \code{hc} is the minimum nearest neighbour distance multiplied by \eqn{n/(n+1)}, where \eqn{n} is the number of data points. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. Strauss, D.J. (1975) A model for clustering. \emph{Biometrika} \bold{62}, 467--475. } \examples{ StraussHard(r=1,hc=0.02) # prints a sensible description of itself data(cells) \dontrun{ ppm(cells, ~1, StraussHard(r=0.1, hc=0.05)) # fit the stationary Strauss/hard core process to `cells' } ppm(cells, ~ polynom(x,y,3), StraussHard(r=0.1, hc=0.05)) # fit a nonstationary Strauss/hard core process # with log-cubic polynomial trend } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/hyperframe.Rd0000644000176200001440000000663013333543263015333 0ustar liggesusers\name{hyperframe} \alias{hyperframe} \title{Hyper Data Frame} \description{ Create a hyperframe: a two-dimensional array in which each column consists of values of the same atomic type (like the columns of a data frame) or objects of the same class. } \usage{ hyperframe(..., row.names=NULL, check.rows=FALSE, check.names=TRUE, stringsAsFactors=default.stringsAsFactors()) } \arguments{ \item{\dots}{ Arguments of the form \code{value} or \code{tag=value}. Each \code{value} is either an atomic vector, or a list of objects of the same class, or a single atomic value, or a single object. Each \code{value} will become a column of the array. The \code{tag} determines the name of the column. See Details. } \item{row.names,check.rows,check.names,stringsAsFactors}{ Arguments passed to \code{\link{data.frame}} controlling the names of the rows, whether to check that rows are consistent, whether to check validity of the column names, and whether to convert character columns to factors. } } \details{ A hyperframe is like a data frame, except that its entries can be objects of any kind. A hyperframe is a two-dimensional array in which each column consists of values of one atomic type (as in a data frame) or consists of objects of one class. The arguments \code{\dots} are any number of arguments of the form \code{value} or \code{tag=value}. Each \code{value} will become a column of the array. The \code{tag} determines the name of the column. Each \code{value} can be either \itemize{ \item an atomic vector or factor (i.e. numeric vector, integer vector, character vector, logical vector, complex vector or factor) \item a list of objects which are all of the same class \item one atomic value, which will be replicated to make an atomic vector or factor \item one object, which will be replicated to make a list of objects. } All columns (vectors, factors and lists) must be of the same length, if their length is greater than 1. } \section{Methods for Hyperframes}{ There are methods for \code{print}, \code{plot}, \code{summary}, \code{with}, \code{split}, \code{[}, \code{[<},\code{$}, \code{$<-}, \code{names}, \code{as.data.frame} \code{as.list}, \code{cbind} and \code{rbind} for the class of hyperframes. There is also \code{is.hyperframe} and \code{\link{as.hyperframe}}. } \value{ An object of class \code{"hyperframe"}. } \author{\adrian and \rolf } \seealso{ \code{\link{as.hyperframe}}, \code{\link{as.hyperframe.ppx}}, \code{\link{plot.hyperframe}}, \code{\link{[.hyperframe}}, \code{\link{with.hyperframe}}, \code{\link{split.hyperframe}}, \code{\link{as.data.frame.hyperframe}}, \code{\link{cbind.hyperframe}}, \code{\link{rbind.hyperframe}} } \examples{ # equivalent to a data frame hyperframe(X=1:10, Y=3) # list of functions hyperframe(f=list(sin, cos, tan)) # table of functions and matching expressions hyperframe(f=list(sin, cos, tan), e=list(expression(sin(x)), expression(cos(x)), expression(tan(x)))) hyperframe(X=1:10, Y=letters[1:10], Z=factor(letters[1:10]), stringsAsFactors=FALSE) lambda <- runif(4, min=50, max=100) X <- lapply(as.list(lambda), function(x) { rpoispp(x) }) h <- hyperframe(lambda=lambda, X=X) h h$lambda2 <- lambda^2 h[, "lambda3"] <- lambda^3 h[, "Y"] <- X } \keyword{spatial} \keyword{manip} spatstat/man/eval.fv.Rd0000644000176200001440000001202313333543263014523 0ustar liggesusers\name{eval.fv} \alias{eval.fv} \title{Evaluate Expression Involving Functions} \description{ Evaluates any expression involving one or more function value (fv) objects, and returns another object of the same kind. } \usage{ eval.fv(expr, envir, dotonly=TRUE, equiv=NULL, relabel=TRUE) } \arguments{ \item{expr}{An expression.} \item{envir}{ Optional. The environment in which to evaluate the expression, or a named list containing \code{"fv"} objects to be used in the expression. } \item{dotonly}{Logical. See Details.} \item{equiv}{Mapping between column names of different objects that are deemed to be equivalent. See Details.} \item{relabel}{ Logical value indicating whether to compute appropriate labels for the resulting function. This should normally be \code{TRUE} (the default). See Details. } } \details{ This is a wrapper to make it easier to perform pointwise calculations with the summary functions used in spatial statistics. An object of class \code{"fv"} is essentially a data frame containing several different statistical estimates of the same function. Such objects are returned by \code{\link{Kest}} and its relatives. For example, suppose \code{X} is an object of class \code{"fv"} containing several different estimates of the Ripley's K function \eqn{K(r)}, evaluated at a sequence of values of \eqn{r}. Then \code{eval.fv(X+3)} effectively adds 3 to each function estimate in \code{X}, and returns the resulting object. Suppose \code{X} and \code{Y} are two objects of class \code{"fv"} which are compatible (in particular they have the same vector of \eqn{r} values). Then \code{eval.im(X + Y)} will add the corresponding function values in \code{X} and \code{Y}, and return the resulting function. In general, \code{expr} can be any expression involving (a) the \emph{names} of objects of class \code{"fv"}, (b) scalar constants, and (c) functions which are vectorised. See the Examples. First \code{eval.fv} determines which of the \emph{variable names} in the expression \code{expr} refer to objects of class \code{"fv"}. Each such name is replaced by a vector containing the function values. The expression is then evaluated. The result should be a vector; it is taken as the new vector of function values. The expression \code{expr} must be vectorised. There must be at least one object of class \code{"fv"} in the expression. If the objects are not compatible, they will be made compatible by \code{\link{harmonise.fv}}. If \code{dotonly=TRUE} (the default), the expression will be evaluated only for those columns of an \code{"fv"} object that contain values of the function itself (rather than values of the derivative of the function, the hazard rate, etc). If \code{dotonly=FALSE}, the expression will be evaluated for all columns. For example the result of \code{\link{Fest}} includes several columns containing estimates of the empty space function \eqn{F(r)}, but also includes an estimate of the \emph{hazard} \eqn{h(r)} of \eqn{F(r)}. Transformations that are valid for \eqn{F} may not be valid for \eqn{h}. Accordingly, \eqn{h} would normally be omitted from the calculation. The columns of an object \code{x} that represent the function itself are identified by its \dQuote{dot} names, \code{fvnames(x, ".")}. They are the columns normally plotted by \code{\link{plot.fv}} and identified by the symbol \code{"."} in plot formulas in \code{\link{plot.fv}}. The argument \code{equiv} can be used to specify that two different column names in different function objects are mathematically equivalent or cognate. It should be a list of \code{name=value} pairs, or a named vector of character strings, indicating the pairing of equivalent names. (Without this argument, these columns would be discarded.) See the Examples. The argument \code{relabel} should normally be \code{TRUE} (the default). It determines whether to compute appropriate mathematical labels and descriptions for the resulting function object (used when the object is printed or plotted). If \code{relabel=FALSE} then this does not occur, and the mathematical labels and descriptions in the result are taken from the function object that appears first in the expression. This reduces computation time slightly (for advanced use only). } \value{ Another object of class \code{"fv"}. } \seealso{ \code{\link{fv.object}}, \code{\link{Kest}} } \examples{ # manipulating the K function X <- rpoispp(42) Ks <- Kest(X) eval.fv(Ks + 3) Ls <- eval.fv(sqrt(Ks/pi)) # manipulating two K functions Y <- rpoispp(20) Kr <- Kest(Y) Kdif <- eval.fv(Ks - Kr) Z <- eval.fv(sqrt(Ks/pi) - sqrt(Kr/pi)) ## Use of 'envir' U <- eval.fv(sqrt(K), list(K=Kest(cells))) ## Use of 'equiv' Fc <- Fest(cells) Gc <- Gest(cells) # Hanisch and Chiu-Stoyan estimators are cognate Dc <- eval.fv(Fc - Gc, equiv=list(cs="han")) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/update.interact.Rd0000644000176200001440000000227713333543264016267 0ustar liggesusers\name{update.interact} \alias{update.interact} \title{ Update an Interpoint Interaction } \description{ This command updates the \code{object} using the arguments given. } \usage{ \method{update}{interact}(object, \dots) } \arguments{ \item{object}{ Interpoint interaction (object of class \code{"interact"}). } \item{\dots}{ Additional or replacement values of parameters of \code{object}. } } \details{ This is a method for the generic function \code{\link[stats]{update}} for the class \code{"interact"} of interpoint interactions. It updates the \code{object} using the parameters given in the extra arguments \code{\dots}. The extra arguments must be given in the form \code{name=value} and must be recognisable to the interaction object. They override any parameters of the same name in \code{object}. } \value{ Another object of class \code{"interact"}, equivalent to \code{object} except for changes in parameter values. } \author{ \spatstatAuthors. } \seealso{ \code{\link{update.ppm}} } \examples{ Str <- Strauss(r=1) Str update(Str, r=2) M <- MultiStrauss(radii=matrix(1,2,2)) update(M, types=c("on", "off")) } \keyword{spatial} \keyword{models} spatstat/man/triplet.family.Rd0000644000176200001440000000230713333543264016132 0ustar liggesusers\name{triplet.family} \alias{triplet.family} \title{Triplet Interaction Family} \description{ An object describing the family of all Gibbs point processes with interaction order equal to 3. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the interaction structure of Gibbs point processes which have infinite order of interaction, such as the triplet interaction process \cite{\link{Triplets}}. Anyway, \code{triplet.family} is an object of class \code{"isf"} containing a function \code{triplet.family$eval} for evaluating the sufficient statistics of a Gibbs point process model taking an exponential family form. } \seealso{ \code{\link{Triplets}} to create the triplet interaction process structure. Other families: \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{inforder.family}}, \code{\link{ord.family}}. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/is.linim.Rd0000644000176200001440000000124113613547037014710 0ustar liggesusers\name{is.linim} \alias{is.linim} \title{Test Whether an Object is a Pixel Image on a Linear Network} \description{ Tests whether its argument is a pixel image on a linear network (object of class \code{"linim"}). } \usage{ is.linim(x) } \arguments{ \item{x}{Any object.} } \details{ This function tests whether the argument \code{x} is a pixel image on a linear network (object of class \code{"linim"}). The object is determined to be an image if it inherits from class \code{"linim"}. } \value{ \code{TRUE} if \code{x} is a pixel image on a linear network, otherwise \code{FALSE}. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/run.simplepanel.Rd0000644000176200001440000001173613333543264016311 0ustar liggesusers\name{run.simplepanel} \alias{clear.simplepanel} \alias{redraw.simplepanel} \alias{run.simplepanel} \title{ Run Point-and-Click Interface } \description{ Execute various operations in a simple point-and-click user interface. } \usage{ run.simplepanel(P, popup=TRUE, verbose = FALSE) clear.simplepanel(P) redraw.simplepanel(P, verbose = FALSE) } \arguments{ \item{P}{ An interaction panel (object of class \code{"simplepanel"}, created by \code{\link{simplepanel}} or \code{\link{grow.simplepanel}}). } \item{popup}{ Logical. If \code{popup=TRUE} (the default), the panel will be displayed in a new popup window. If \code{popup=FALSE}, the panel will be displayed on the current graphics window if it already exists, and on a new window otherwise. } \item{verbose}{ Logical. If \code{TRUE}, debugging information will be printed. } } \details{ These commands enable the user to run a simple, robust, point-and-click interface to any \R code. The interface is implemented using only the basic graphics package in \R. The argument \code{P} is an object of class \code{"simplepanel"}, created by \code{\link{simplepanel}} or \code{\link{grow.simplepanel}}, which specifies the graphics to be displayed and the actions to be performed when the user interacts with the panel. The command \code{run.simplepanel(P)} activates the panel: the display is initialised and the graphics system waits for the user to click the panel. While the panel is active, the user can only interact with the panel; the \R command line interface and the \R GUI cannot be used. When the panel terminates (typically because the user clicked a button labelled Exit), control returns to the \R command line interface and the \R GUI. The command \code{clear.simplepanel(P)} clears all the display elements in the panel, resulting in a blank display except for the title of the panel. The command \code{redraw.simplepanel(P)} redraws all the buttons of the panel, according to the \code{redraw} functions contained in the panel. If \code{popup=TRUE} (the default), \code{run.simplepanel} begins by calling \code{\link[grDevices]{dev.new}} so that a new popup window is created; this window is closed using \code{\link[grDevices]{dev.off}} when \code{run.simplepanel} terminates. If \code{popup=FALSE}, the panel will be displayed on the current graphics window if it already exists, and on a new window otherwise; this window is not closed when \code{run.simplepanel} terminates. For more sophisticated control of the graphics focus (for example, to use the panel to control the display on another window), initialise the graphics devices yourself using \code{\link[grDevices]{dev.new}} or similar commands; save these devices in the shared environment \code{env} of the panel \code{P}; and write the click/redraw functions of \code{P} in such a way that they access these devices using \code{\link[grDevices]{dev.set}}. Then use \code{run.simplepanel} with \code{popup=FALSE}. } \value{ The return value of \code{run.simplepanel(P)} is the value returned by the \code{exit} function of \code{P}. See \code{\link{simplepanel}}. The functions \code{clear.simplepanel} and \code{redraw.simplepanel} return \code{NULL}. } \author{\adrian and \rolf } \seealso{ \code{\link{simplepanel}} } \examples{ if(interactive()) { # make boxes (alternatively use layout.boxes()) Bminus <- square(1) Bvalue <- shift(Bminus, c(1.2, 0)) Bplus <- shift(Bvalue, c(1.2, 0)) Bdone <- shift(Bplus, c(1.2, 0)) myboxes <- list(Bminus, Bvalue, Bplus, Bdone) myB <- do.call(boundingbox,myboxes) # make environment containing an integer count myenv <- new.env() assign("answer", 0, envir=myenv) # what to do when finished: return the count. myexit <- function(e) { return(get("answer", envir=e)) } # button clicks # decrement the count Cminus <- function(e, xy) { ans <- get("answer", envir=e) assign("answer", ans - 1, envir=e) return(TRUE) } # display the count (clicking does nothing) Cvalue <- function(...) { TRUE } # increment the count Cplus <- function(e, xy) { ans <- get("answer", envir=e) assign("answer", ans + 1, envir=e) return(TRUE) } # quit button Cdone <- function(e, xy) { return(FALSE) } myclicks <- list("-"=Cminus, value=Cvalue, "+"=Cplus, done=Cdone) # redraw the button that displays the current value of the count Rvalue <- function(button, nam, e) { plot(button, add=TRUE) ans <- get("answer", envir=e) text(centroid.owin(button), labels=ans) return(TRUE) } # make the panel P <- simplepanel("Counter", B=myB, boxes=myboxes, clicks=myclicks, redraws = list(NULL, Rvalue, NULL, NULL), exit=myexit, env=myenv) P run.simplepanel(P) } } \keyword{iplot} \keyword{utilities} spatstat/man/simplify.owin.Rd0000644000176200001440000000256613333543264016005 0ustar liggesusers\name{simplify.owin} \Rdversion{1.1} \alias{simplify.owin} \title{ Approximate a Polygon by a Simpler Polygon } \description{ Given a polygonal window, this function finds a simpler polygon that approximates it. } \usage{ simplify.owin(W, dmin) } \arguments{ \item{W}{ The polygon which is to be simplied. An object of class \code{"owin"}. } \item{dmin}{ Numeric value. The smallest permissible length of an edge. } } \details{ This function simplifies a polygon \code{W} by recursively deleting the shortest edge of \code{W} until all remaining edges are longer than the specified minimum length \code{dmin}, or until there are only three edges left. The argument \code{W} must be a window (object of class \code{"owin"}). It should be of type \code{"polygonal"}. If \code{W} is a rectangle, it is returned without alteration. The simplification algorithm is not yet implemented for binary masks. If \code{W} is a mask, an error is generated. } \value{ Another window (object of class \code{"owin"}) of type \code{"polygonal"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{owin}} } \examples{ plot(letterR, col="red") plot(simplify.owin(letterR, 0.3), col="blue", add=TRUE) W <- Window(chorley) plot(W) WS <- simplify.owin(W, 2) plot(WS, add=TRUE, border="green") points(vertices(WS)) } \keyword{spatial} \keyword{math} spatstat/man/WindowOnly.Rd0000644000176200001440000000455213333543262015302 0ustar liggesusers\name{WindowOnly} \alias{Window.ppm} \alias{Window.kppm} \alias{Window.dppm} \alias{Window.lpp} \alias{Window.lppm} \alias{Window.msr} \alias{Window.quad} \alias{Window.quadratcount} \alias{Window.quadrattest} \alias{Window.tess} \alias{Window.layered} \alias{Window.distfun} \alias{Window.nnfun} \alias{Window.funxy} \alias{Window.rmhmodel} \alias{Window.leverage.ppm} \alias{Window.influence.ppm} \title{Extract Window of Spatial Object} \description{ Given a spatial object (such as a point pattern or pixel image) in two dimensions, these functions extract the window in which the object is defined. } \usage{ \method{Window}{ppm}(X, \dots, from=c("points", "covariates")) \method{Window}{kppm}(X, \dots, from=c("points", "covariates")) \method{Window}{dppm}(X, \dots, from=c("points", "covariates")) \method{Window}{lpp}(X, \dots) \method{Window}{lppm}(X, \dots) \method{Window}{msr}(X, \dots) \method{Window}{quad}(X, \dots) \method{Window}{quadratcount}(X, \dots) \method{Window}{quadrattest}(X, \dots) \method{Window}{tess}(X, \dots) \method{Window}{layered}(X, \dots) \method{Window}{distfun}(X, \dots) \method{Window}{nnfun}(X, \dots) \method{Window}{funxy}(X, \dots) \method{Window}{rmhmodel}(X, \dots) \method{Window}{leverage.ppm}(X, \dots) \method{Window}{influence.ppm}(X, \dots) } \arguments{ \item{X}{A spatial object.} \item{\dots}{Ignored.} \item{from}{Character string. See Details.} } \value{ An object of class \code{"owin"} (see \code{\link{owin.object}}) specifying an observation window. } \details{ These are methods for the generic function \code{\link{Window}} which extract the spatial window in which the object \code{X} is defined. The argument \code{from} applies when \code{X} is a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{from="data"} (the default), \code{Window} extracts the window of the original point pattern data to which the model was fitted. If \code{from="covariates"} then \code{Window} returns the window in which the spatial covariates of the model were provided. } \seealso{ \code{\link{Window}}, \code{\link{Window.ppp}}, \code{\link{Window.psp}}. \code{\link{owin.object}} } \examples{ X <- quadratcount(cells, 4) Window(X) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{manip} spatstat/man/is.hybrid.Rd0000644000176200001440000000350313333543263015060 0ustar liggesusers\name{is.hybrid} \alias{is.hybrid} \alias{is.hybrid.ppm} \alias{is.hybrid.interact} \title{ Test Whether Object is a Hybrid } \description{ Tests where a point process model or point process interaction is a hybrid of several interactions. } \usage{ is.hybrid(x) \method{is.hybrid}{ppm}(x) \method{is.hybrid}{interact}(x) } \arguments{ \item{x}{ A point process model (object of class \code{"ppm"}) or a point process interaction structure (object of class \code{"interact"}). } } \details{ A \emph{hybrid} (Baddeley, Turner, Mateu and Bevan, 2012) is a point process model created by combining two or more point process models, or an interpoint interaction created by combining two or more interpoint interactions. The function \code{is.hybrid} is generic, with methods for point process models (objects of class \code{"ppm"}) and point process interactions (objects of class \code{"interact"}). These functions return \code{TRUE} if the object \code{x} is a hybrid, and \code{FALSE} if it is not a hybrid. Hybrids of two or more interpoint interactions are created by the function \code{\link{Hybrid}}. Such a hybrid interaction can then be fitted to point pattern data using \code{\link{ppm}}. } \value{ \code{TRUE} if the object is a hybrid, and \code{FALSE} otherwise. } \references{ Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013) Hybrids of Gibbs point process models and their implementation. \emph{Journal of Statistical Software} \bold{55}:11, 1--43. \url{http://www.jstatsoft.org/v55/i11/} } \seealso{ \code{\link{Hybrid}} } \examples{ S <- Strauss(0.1) is.hybrid(S) H <- Hybrid(Strauss(0.1), Geyer(0.2, 3)) is.hybrid(H) data(redwood) fit <- ppm(redwood, ~1, H) is.hybrid(fit) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/model.images.Rd0000644000176200001440000001130713333543263015532 0ustar liggesusers\name{model.images} \alias{model.images} \alias{model.images.ppm} \alias{model.images.dppm} \alias{model.images.kppm} \alias{model.images.lppm} \alias{model.images.slrm} \title{Compute Images of Constructed Covariates} \description{ For a point process model fitted to spatial point pattern data, this function computes pixel images of the covariates in the design matrix. } \usage{ model.images(object, ...) \method{model.images}{ppm}(object, W = as.owin(object), ...) \method{model.images}{kppm}(object, W = as.owin(object), ...) \method{model.images}{dppm}(object, W = as.owin(object), ...) \method{model.images}{lppm}(object, L = as.linnet(object), ...) \method{model.images}{slrm}(object, ...) } \arguments{ \item{object}{ The fitted point process model. An object of class \code{"ppm"} or \code{"kppm"} or \code{"lppm"} or \code{"slrm"} or \code{"dppm"}. } \item{W}{ A window (object of class \code{"owin"}) in which the images should be computed. Defaults to the window in which the model was fitted. } \item{L}{ A linear network (object of class \code{"linnet"}) in which the images should be computed. Defaults to the network in which the model was fitted. } \item{\dots}{ Other arguments (such as \code{na.action}) passed to \code{\link{model.matrix.lm}}. } } \details{ This command is similar to \code{\link{model.matrix.ppm}} except that it computes pixel images of the covariates, instead of computing the covariate values at certain points only. The \code{object} must be a fitted spatial point process model object of class \code{"ppm"} (produced by the model-fitting function \code{\link{ppm}}) or class \code{"kppm"} (produced by the fitting function \code{\link{kppm}}) or class \code{"dppm"} (produced by the fitting function \code{\link{dppm}}) or class \code{"lppm"} (produced by \code{\link{lppm}}) or class \code{"slrm"} (produced by \code{\link{slrm}}). The spatial covariates required by the model-fitting procedure are computed at every pixel location in the window \code{W}. For \code{lppm} objects, the covariates are computed at every location on the network \code{L}. For \code{slrm} objects, the covariates are computed on the pixels that were used to fit the model. Note that the spatial covariates computed here are not the original covariates that were supplied when fitting the model. Rather, they are the covariates that actually appear in the loglinear representation of the (conditional) intensity and in the columns of the design matrix. For example, they might include dummy or indicator variables for different levels of a factor, depending on the contrasts that are in force. The pixel resolution is determined by \code{W} if \code{W} is a mask (that is \code{W$type = "mask"}). Otherwise, the pixel resolution is determined by \code{\link{spatstat.options}}. The format of the result depends on whether the original point pattern data were marked or unmarked. \itemize{ \item If the original dataset was unmarked, the result is a named list of pixel images (objects of class \code{"im"}) containing the values of the spatial covariates. The names of the list elements are the names of the covariates determined by \code{\link{model.matrix.lm}}. The result is also of class \code{"solist"} so that it can be plotted immediately. \item If the original dataset was a multitype point pattern, the result is a \code{\link{hyperframe}} with one column for each possible type of points. Each column is a named list of pixel images (objects of class \code{"im"}) containing the values of the spatial covariates. The row names of the hyperframe are the names of the covariates determined by \code{\link{model.matrix.lm}}. } } \value{ A list (of class \code{"solist"}) or array (of class \code{"hyperframe"}) containing pixel images (objects of class \code{"im"}). For \code{model.images.lppm}, the images are also of class \code{"linim"}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{model.matrix.ppm}}, \code{\link[stats]{model.matrix}}, \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{lppm}}, \code{\link{dppm}}, \code{\link{kppm}}, \code{\link{slrm}}, \code{\link{im}}, \code{\link{im.object}}, \code{\link{plot.solist}}, \code{\link{spatstat.options}} } \examples{ fit <- ppm(cells ~ x) model.images(fit) B <- owin(c(0.2, 0.4), c(0.3, 0.8)) model.images(fit, B) fit2 <- ppm(cells ~ cut(x,3)) model.images(fit2) fit3 <- slrm(japanesepines ~ x) model.images(fit3) fit4 <- ppm(amacrine ~ marks + x) model.images(fit4) } \keyword{spatial} \keyword{models} spatstat/man/dclf.sigtrace.Rd0000644000176200001440000001364313333543263015703 0ustar liggesusers\name{dclf.sigtrace} \alias{dclf.sigtrace} \alias{mad.sigtrace} \alias{mctest.sigtrace} \title{ Significance Trace of Cressie-Loosmore-Ford or Maximum Absolute Deviation Test } \description{ Generates a Significance Trace of the Diggle(1986)/ Cressie (1991)/ Loosmore and Ford (2006) test or the Maximum Absolute Deviation test for a spatial point pattern. } \usage{ dclf.sigtrace(X, \dots) mad.sigtrace(X, \dots) mctest.sigtrace(X, fun=Lest, \dots, exponent=1, interpolate=FALSE, alpha=0.05, confint=TRUE, rmin=0) } \arguments{ \item{X}{ Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class) or an envelope object (class \code{"envelope"}). } \item{\dots}{ Arguments passed to \code{\link{envelope}} or \code{\link{mctest.progress}}. Useful arguments include \code{fun} to determine the summary function, \code{nsim} to specify the number of Monte Carlo simulations, \code{alternative} to specify a one-sided test, and \code{verbose=FALSE} to turn off the messages. } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{exponent}{ Positive number. The exponent of the \eqn{L^p} distance. See Details. } \item{interpolate}{ Logical value specifying whether to calculate the \eqn{p}-value by interpolation. If \code{interpolate=FALSE} (the default), a standard Monte Carlo test is performed, yielding a \eqn{p}-value of the form \eqn{(k+1)/(n+1)} where \eqn{n} is the number of simulations and \eqn{k} is the number of simulated values which are more extreme than the observed value. If \code{interpolate=TRUE}, the \eqn{p}-value is calculated by applying kernel density estimation to the simulated values, and computing the tail probability for this estimated distribution. } \item{alpha}{ Significance level to be plotted (this has no effect on the calculation but is simply plotted as a reference value). } \item{confint}{ Logical value indicating whether to compute a confidence interval for the \sQuote{true} \eqn{p}-value. } \item{rmin}{ Optional. Left endpoint for the interval of \eqn{r} values on which the test statistic is calculated. } } \details{ The Diggle (1986)/ Cressie (1991)/Loosmore and Ford (2006) test and the Maximum Absolute Deviation test for a spatial point pattern are described in \code{\link{dclf.test}}. These tests depend on the choice of an interval of distance values (the argument \code{rinterval}). A \emph{significance trace} (Bowman and Azzalini, 1997; Baddeley et al, 2014, 2015) of the test is a plot of the \eqn{p}-value obtained from the test against the length of the interval \code{rinterval}. The command \code{dclf.sigtrace} performs \code{\link{dclf.test}} on \code{X} using all possible intervals of the form \eqn{[0,R]}, and returns the resulting \eqn{p}-values as a function of \eqn{R}. Similarly \code{mad.sigtrace} performs \code{\link{mad.test}} using all possible intervals and returns the \eqn{p}-values. More generally, \code{mctest.sigtrace} performs a test based on the \eqn{L^p} discrepancy between the curves. The deviation between two curves is measured by the \eqn{p}th root of the integral of the \eqn{p}th power of the absolute value of the difference between the two curves. The exponent \eqn{p} is given by the argument \code{exponent}. The case \code{exponent=2} is the Cressie-Loosmore-Ford test, while \code{exponent=Inf} is the MAD test. If the argument \code{rmin} is given, it specifies the left endpoint of the interval defining the test statistic: the tests are performed using intervals \eqn{[r_{\mbox{\scriptsize min}},R]}{[rmin,R]} where \eqn{R \ge r_{\mbox{\scriptsize min}}}{R \ge rmin}. The result of each command is an object of class \code{"fv"} that can be plotted to obtain the significance trace. The plot shows the Monte Carlo \eqn{p}-value (solid black line), the critical value \code{0.05} (dashed red line), and a pointwise 95\% confidence band (grey shading) for the \sQuote{true} (Neyman-Pearson) \eqn{p}-value. The confidence band is based on the Agresti-Coull (1998) confidence interval for a binomial proportion (when \code{interpolate=FALSE}) or the delta method and normal approximation (when \code{interpolate=TRUE}). If \code{X} is an envelope object and \code{fun=NULL} then the code will re-use the simulated functions stored in \code{X}. } \value{ An object of class \code{"fv"} that can be plotted to obtain the significance trace. } \references{ Agresti, A. and Coull, B.A. (1998) Approximate is better than \dQuote{Exact} for interval estimation of binomial proportions. \emph{American Statistician} \bold{52}, 119--126. Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T., Milne, R. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84}(3) 477--489. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2015) Pushing the envelope: extensions of graphical Monte Carlo tests. Submitted for publication. Bowman, A.W. and Azzalini, A. (1997) \emph{Applied smoothing techniques for data analysis: the kernel approach with S-Plus illustrations}. Oxford University Press, Oxford. } \author{ \adrian, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \adrian \rolf and \ege } \seealso{ \code{\link{dclf.test}} for the tests; \code{\link{dclf.progress}} for progress plots. See \code{\link{plot.fv}} for information on plotting objects of class \code{"fv"}. See also \code{\link{dg.sigtrace}}. } \examples{ plot(dclf.sigtrace(cells, Lest, nsim=19)) } \keyword{spatial} \keyword{htest} spatstat/man/sumouter.Rd0000644000176200001440000000464713333543264015063 0ustar liggesusers\name{sumouter} \alias{sumouter} \alias{quadform} \alias{bilinearform} \title{Compute Quadratic Forms} \description{ Calculates certain quadratic forms of matrices. } \usage{ sumouter(x, w=NULL, y=x) quadform(x, v) bilinearform(x, v, y) } \arguments{ \item{x,y}{A matrix, whose rows are the vectors in the quadratic form.} \item{w}{Optional vector of weights} \item{v}{Matrix determining the quadratic form} } \value{ A vector or matrix. } \details{ The matrices \code{x} and \code{y} will be interpreted as collections of row vectors. They must have the same number of rows. The command \code{sumouter} computes the sum of the outer products of corresponding row vectors, weighted by the entries of \code{w}: \deqn{ M = \sum_i w_i x_i y_i^\top }{ M = sum[i] (w[i] * outer(x[i,], y[i,])) } where the sum is over all rows of \code{x} (after removing any rows containing \code{NA} or other non-finite values). If \code{w} is missing, the weights will be taken as 1. The result is a \eqn{p \times q}{p * q} matrix where \code{p = ncol(x)} and \code{q = ncol(y)}. The command \code{quadform} evaluates the quadratic form, defined by the matrix \code{v}, for each of the row vectors of \code{x}: \deqn{ y_i = x_i V x_i^\top }{ y[i] = x[i,] \%*\% v \%*\% t(x[i,]) } The result \code{y} is a numeric vector of length \code{n} where \code{n = nrow(x)}. If \code{x[i,]} contains \code{NA} or other non-finite values, then \code{y[i] = NA}. The command \code{bilinearform} evaluates the more general bilinear form defined by the matrix \code{v}. Here \code{x} and \code{y} must be matrices of the same dimensions. For each row vector of \code{x} and corresponding row vector of \code{y}, the bilinear form is \deqn{ z_i = x_i V y_i^\top }{ z[i] = x[i,] \%*\% v \%*\% t(y[i,]) } The result \code{z} is a numeric vector of length \code{n} where \code{n = nrow(x)}. If \code{x[i,]} or \code{y[i,]} contains \code{NA} or other non-finite values, then \code{z[i] = NA}. } \examples{ x <- matrix(1:12, 4, 3) dimnames(x) <- list(c("Wilma", "Fred", "Barney", "Betty"), letters[1:3]) x sumouter(x) w <- 4:1 sumouter(x, w) v <- matrix(1, 3, 3) quadform(x, v) # should be the same as quadform(x, v) bilinearform(x, v, x) # See what happens with NA's x[3,2] <- NA sumouter(x, w) quadform(x, v) } \author{\adrian and \rolf } \keyword{array} spatstat/man/texturemap.Rd0000644000176200001440000000323613333543264015367 0ustar liggesusers\name{texturemap} \alias{texturemap} \title{ Texture Map } \description{ Create a map that associates data values with graphical textures. } \usage{ texturemap(inputs, textures, ...) } \arguments{ \item{inputs}{ A vector containing all the data values that will be mapped to textures. } \item{textures}{ Optional. A vector of integer codes specifying the textures to which the \code{inputs} will be mapped. } \item{\dots}{ Other graphics parameters such as \code{col}, \code{lwd}, \code{lty}. } } \details{ A texture map is an association between data values and graphical textures. The command \code{texturemap} creates an object of class \code{"texturemap"} that represents a texture map. Once a texture map has been created, it can be applied to any suitable data to generate a texture plot of those data using \code{\link{textureplot}}. This makes it easy to ensure that the \emph{same} texture map is used in two different plots. The texture map can also be plotted in its own right. The argument \code{inputs} should be a vector containing all the possible data values (such as the levels of a factor) that are to be mapped. The \code{textures} should be integer values between 1 and 8, representing the eight possible textures described in the help for \code{\link{add.texture}}. The default is \code{textures = 1:n} where \code{n} is the length of \code{inputs}. } \value{ An object of class \code{"texturemap"} representing the texture map. } \author{ \spatstatAuthors. } \seealso{ \code{\link{textureplot}} } \examples{ texturemap(letters[1:4], 2:5, col=1:4, lwd=2) } \keyword{spatial} \keyword{hplot} spatstat/man/objsurf.Rd0000644000176200001440000000533113333543263014640 0ustar liggesusers\name{objsurf} \alias{objsurf} \alias{objsurf.dppm} \alias{objsurf.kppm} \alias{objsurf.minconfit} \title{ Objective Function Surface } \description{ For a model that was fitted by optimisation, compute the values of the objective function in a neighbourhood of the optimal value. } \usage{ objsurf(x, \dots) \method{objsurf}{dppm}(x, ..., ngrid = 32, ratio = 1.5, verbose = TRUE) \method{objsurf}{kppm}(x, ..., ngrid = 32, ratio = 1.5, verbose = TRUE) \method{objsurf}{minconfit}(x, ..., ngrid = 32, ratio = 1.5, verbose = TRUE) } \arguments{ \item{x}{ Some kind of model that was fitted by finding the optimal value of an objective function. An object of class \code{"dppm"}, \code{"kppm"} or \code{"minconfit"}. } \item{\dots}{ Extra arguments are usually ignored. } \item{ngrid}{ Number of grid points to evaluate along each axis. Either a single integer, or a pair of integers. For example \code{ngrid=32} would mean a \code{32 * 32} grid. } \item{ratio}{ Number greater than 1 determining the range of parameter values to be considered. If the optimal parameter value is \code{opt} then the objective function will be evaluated for values between \code{opt/ratio} and \code{opt * ratio}. } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ The object \code{x} should be some kind of model that was fitted by maximising or minimising the value of an objective function. The objective function will be evaluated on a grid of values of the model parameters. Currently the following types of objects are accepted: \itemize{ \item an object of class \code{"dppm"} representing a determinantal point process. See \code{\link{dppm}}. \item an object of class \code{"kppm"} representing a cluster point process or Cox point process. See \code{\link{kppm}}. \item an object of class \code{"minconfit"} representing a minimum-contrast fit between a summary function and its theoretical counterpart. See \code{\link{mincontrast}}. } The result is an object of class \code{"objsurf"} which can be printed and plotted: see \code{\link{methods.objsurf}}. } \value{ An object of class \code{"objsurf"} which can be printed and plotted. Essentially a list containing entries \code{x}, \code{y}, \code{z} giving the parameter values and objective function values. } \author{ \adrian and \ege. } \seealso{ \code{\link{methods.objsurf}}, \code{\link{kppm}}, \code{\link{mincontrast}} } \examples{ fit <- kppm(redwood ~ 1, "Thomas") os <- objsurf(fit) if(interactive()) { plot(os) contour(os, add=TRUE) persp(os) } } \keyword{spatial} \keyword{models} spatstat/man/as.function.leverage.ppm.Rd0000644000176200001440000000243213333543262017777 0ustar liggesusers\name{as.function.leverage.ppm} \alias{as.function.leverage.ppm} \title{ Convert Leverage Object to Function of Coordinates } \description{ Converts an object of class \code{"leverage.ppm"} to a function of the \eqn{x} and \eqn{y} coordinates. } \usage{ \method{as.function}{leverage.ppm}(x, ...) } \arguments{ \item{x}{ Object of class \code{"leverage.ppm"} produced by \code{\link{leverage.ppm}}. } \item{\dots}{ Ignored. } } \details{ An object of class \code{"leverage.ppm"} represents the leverage function of a fitted point process model. This command converts the object to a \code{function(x,y)} where the arguments \code{x} and \code{y} are (vectors of) spatial coordinates. This function returns the leverage values at the specified locations (calculated by referring to the nearest location where the leverage has been computed). } \value{ A function in the \R language, also belonging to the class \code{"funxy"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{as.im.leverage.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X ~x+y) lev <- leverage(fit) f <- as.function(lev) f(0.2, 0.3) # evaluate at (x,y) coordinates y <- f(X) # evaluate at a point pattern } \keyword{spatial} \keyword{manip} spatstat/man/nnfun.lpp.Rd0000644000176200001440000000570413537662317015120 0ustar liggesusers\name{nnfun.lpp} \Rdversion{1.1} \alias{nnfun.lpp} \title{ Nearest Neighbour Map on Linear Network } \description{ Compute the nearest neighbour function of a point pattern on a linear network. } \usage{ \method{nnfun}{lpp}(X, ..., k=1, value=c("index", "mark")) } \arguments{ \item{X}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{k}{ Integer. The algorithm finds the \code{k}th nearest neighbour in \code{X} from any spatial location. } \item{value}{ String (partially matched) specifying whether to return the index of the neighbour (\code{value="index"}, the default) or the mark value of the neighbour (\code{value="mark"}). } \item{\dots}{ Other arguments are ignored. } } \details{ The (geodesic) \emph{nearest neighbour function} of a point pattern \code{X} on a linear network \code{L} tells us which point of \code{X} is closest to any given location. If \code{X} is a point pattern on a linear network \code{L}, the \emph{nearest neighbour function} of \code{X} is the mathematical function \eqn{f} defined for any location \eqn{s} on the network by \code{f(s) = i}, where \code{X[i]} is the closest point of \code{X} to the location \code{s} measured by the shortest path. In other words the value of \code{f(s)} is the identifier or serial number of the closest point of \code{X}. The command \code{nnfun.lpp} is a method for the generic command \code{\link{nnfun}} for the class \code{"lpp"} of point patterns on a linear network. If \code{X} is a point pattern on a linear network, \code{f <- nnfun(X)} returns a \emph{function} in the \R language, with arguments \code{x,y, \dots}, that represents the nearest neighbour function of \code{X}. Evaluating the function \code{f} in the form \code{v <- f(x,y)}, where \code{x} and \code{y} are any numeric vectors of equal length containing coordinates of spatial locations, yields a vector of identifiers or serial numbers of the data points closest to these spatial locations. More efficiently \code{f} can take the arguments \code{x, y, seg, tp} where \code{seg} and \code{tp} are the local coordinates on the network. The result of \code{f <- nnfun(X)} also belongs to the class \code{"linfun"}. It can be printed and plotted immediately as shown in the Examples. It can be converted to a pixel image using \code{\link{as.linim}}. } \value{ A \code{function} in the \R language, with arguments \code{x,y} and optional arguments \code{seg,tp}. It also belongs to the class \code{"linfun"} which has methods for \code{plot}, \code{print} etc. } \seealso{ \code{\link{linfun}}, \code{\link{methods.linfun}}. To compute the \emph{distance} to the nearest neighbour, see \code{\link{distfun.lpp}}. } \examples{ X <- runiflpp(3, simplenet) f <- nnfun(X) f plot(f) plot(nnfun(chicago, value="m")) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{math} spatstat/man/plot.quadrattest.Rd0000644000176200001440000000311413333543264016502 0ustar liggesusers\name{plot.quadrattest} \alias{plot.quadrattest} \title{ Display the result of a quadrat counting test. } \description{ Given the result of a quadrat counting test, graphically display the quadrats that were used, the observed and expected counts, and the residual in each quadrat. } \usage{ \method{plot}{quadrattest}(x, ..., textargs=list()) } \arguments{ \item{x}{ Object of class \code{"quadrattest"} containing the result of \code{\link{quadrat.test}}. } \item{\dots}{ Additional arguments passed to \code{\link{plot.tess}} to control the display of the quadrats. } \item{textargs}{ List of additional arguments passed to \code{\link[graphics]{text.default}} to control the appearance of the text. } } \details{ This is the plot method for objects of class \code{"quadrattest"}. Such an object is produced by \code{\link{quadrat.test}} and represents the result of a \eqn{\chi^2}{chi^2} test for a spatial point pattern. The quadrats are first plotted using \code{\link{plot.tess}}. Then in each quadrat, the observed and expected counts and the Pearson residual are displayed as text using \code{\link[graphics]{text.default}}. Observed count is displayed at top left; expected count at top right; and Pearson residual at bottom. } \value{ Null. } \examples{ plot(quadrat.test(swedishpines, 3)) } \seealso{ \code{\link{quadrat.test}}, \code{\link{plot.tess}}, \code{\link[graphics]{text.default}}, \code{\link{plot.quadratcount}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \keyword{hplot} spatstat/man/relrisk.Rd0000644000176200001440000000253313333543264014643 0ustar liggesusers\name{relrisk} \alias{relrisk} \title{ Estimate of Spatially-Varying Relative Risk } \description{ Generic command to estimate the spatially-varying probability of each type of point, or the ratios of such probabilities. } \usage{ relrisk(X, \dots) } \arguments{ \item{X}{ Either a point pattern (class \code{"ppp"}) or a fitted point process model (class \code{"ppm"}) from which the probabilities will be estimated. } \item{\dots}{ Additional arguments appropriate to the method. } } \details{ In a point pattern containing several different types of points, we may be interested in the spatially-varying probability of each possible type, or the relative risks which are the ratios of such probabilities. The command \code{\link{relrisk}} is generic and can be used to estimate relative risk in different ways. The function \code{\link{relrisk.ppp}} is the method for point pattern datasets. It computes \emph{nonparametric} estimates of relative risk by kernel smoothing. The function \code{\link{relrisk.ppm}} is the method for fitted point process models (class \code{"ppm"}). It computes \emph{parametric} estimates of relative risk, using the fitted model. } \seealso{ \code{\link{relrisk.ppp}}, \code{\link{relrisk.ppm}}. } \author{\adrian \rolf and \ege } \keyword{spatial} spatstat/man/lppm.Rd0000644000176200001440000000723113333543263014137 0ustar liggesusers\name{lppm} \alias{lppm} \alias{lppm.formula} \alias{lppm.lpp} \title{ Fit Point Process Model to Point Pattern on Linear Network } \description{ Fit a point process model to a point pattern dataset on a linear network } \usage{ lppm(X, ...) \method{lppm}{formula}(X, interaction=NULL, ..., data=NULL) \method{lppm}{lpp}(X, ..., eps=NULL, nd=1000, random=FALSE) } \arguments{ \item{X}{ Either an object of class \code{"lpp"} specifying a point pattern on a linear network, or a \code{formula} specifying the point process model. } \item{\dots}{ Arguments passed to \code{\link{ppm}}. } \item{interaction}{ An object of class \code{"interact"} describing the point process interaction structure, or \code{NULL} indicating that a Poisson process (stationary or nonstationary) should be fitted. } \item{data}{ Optional. The values of spatial covariates (other than the Cartesian coordinates) required by the model. A list whose entries are images, functions, windows, tessellations or single numbers. } \item{eps}{ Optional. Spacing between dummy points along each segment of the network. } \item{nd}{ Optional. Total number of dummy points placed on the network. Ignored if \code{eps} is given. } \item{random}{ Logical value indicating whether the grid of dummy points should be placed at a randomised starting position. } } \details{ This function fits a point process model to data that specify a point pattern on a linear network. It is a counterpart of the model-fitting function \code{\link{ppm}} designed to work with objects of class \code{"lpp"} instead of \code{"ppp"}. The function \code{lppm} is generic, with methods for the classes \code{formula} and \code{lppp}. In \code{lppm.lpp} the first argument \code{X} should be an object of class \code{"lpp"} (created by the command \code{\link{lpp}}) specifying a point pattern on a linear network. In \code{lppm.formula}, the first argument is a \code{formula} in the \R language describing the spatial trend model to be fitted. It has the general form \code{pattern ~ trend} where the left hand side \code{pattern} is usually the name of a point pattern on a linear network (object of class \code{"lpp"}) to which the model should be fitted, or an expression which evaluates to such a point pattern; and the right hand side \code{trend} is an expression specifying the spatial trend of the model. Other arguments \code{...} are passed from \code{lppm.formula} to \code{lppm.lpp} and from \code{lppm.lpp} to \code{\link{ppm}}. } \value{ An object of class \code{"lppm"} representing the fitted model. There are methods for \code{print}, \code{predict}, \code{coef} and similar functions. } \author{ \adrian and Greg McSwiggan. } \seealso{ \code{\link{methods.lppm}}, \code{\link{predict.lppm}}, \code{\link{ppm}}, \code{\link{lpp}}. } \examples{ X <- runiflpp(15, simplenet) lppm(X ~1) lppm(X ~x) marks(X) <- factor(rep(letters[1:3], 5)) lppm(X ~ marks) lppm(X ~ marks * x) } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. McSwiggan, G., Nair, M.G. and Baddeley, A. (2012) Fitting Poisson point process models to events on a linear network. Manuscript in preparation. } \keyword{spatial} \keyword{models} spatstat/man/rpoisppOnLines.Rd0000644000176200001440000000742613333543264016162 0ustar liggesusers\name{rpoisppOnLines} \alias{rpoisppOnLines} \title{Generate Poisson Point Pattern on Line Segments} \description{ Given a line segment pattern, generate a Poisson random point pattern on the line segments. } \usage{ rpoisppOnLines(lambda, L, lmax = NULL, \dots, nsim=1, drop=TRUE) } \arguments{ \item{lambda}{Intensity of the Poisson process. A single number, a \code{function(x,y)}, a pixel image (object of class \code{"im"}), or a vector of numbers, a list of functions, or a list of images. } \item{L}{Line segment pattern (object of class \code{"psp"}) on which the points should be generated. } \item{lmax}{ Optional upper bound (for increased computational efficiency). A known upper bound for the values of \code{lambda}, if \code{lambda} is a function or a pixel image. That is, \code{lmax} should be a number which is known to be greater than or equal to all values of \code{lambda}. } \item{\dots}{Additional arguments passed to \code{lambda} if it is a function. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This command generates a Poisson point process on the one-dimensional system of line segments in \code{L}. The result is a point pattern consisting of points lying on the line segments in \code{L}. The number of random points falling on any given line segment follows a Poisson distribution. The patterns of points on different segments are independent. The intensity \code{lambda} is the expected number of points per unit \bold{length} of line segment. It may be constant, or it may depend on spatial location. In order to generate an unmarked Poisson process, the argument \code{lambda} may be a single number, or a \code{function(x,y)}, or a pixel image (object of class \code{"im"}). In order to generate a \emph{marked} Poisson process, \code{lambda} may be a numeric vector, a list of functions, or a list of images, each entry giving the intensity for a different mark value. If \code{lambda} is not numeric, then the (Lewis-Shedler) rejection method is used. The rejection method requires knowledge of \code{lmax}, the maximum possible value of \code{lambda}. This should be either a single number, or a numeric vector of the same length as \code{lambda}. If \code{lmax} is not given, it will be computed approximately, by sampling many values of \code{lambda}. If \code{lmax} is given, then it \bold{must} be larger than any possible value of \code{lambda}, otherwise the results of the algorithm will be incorrect. } \value{ If \code{nsim = 1}, a point pattern (object of class \code{"ppp"}) in the same window as \code{L}. If \code{nsim > 1}, a list of such point patterns. } \seealso{ \code{\link{psp}}, \code{\link{ppp}}, \code{\link{runifpointOnLines}}, \code{\link{rpoispp}} } \examples{ live <- interactive() L <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) if(live) plot(L, main="") # uniform intensity Y <- rpoisppOnLines(4, L) if(live) plot(Y, add=TRUE, pch="+") # uniform MARKED process with types 'a' and 'b' Y <- rpoisppOnLines(c(a=4, b=5), L) if(live) { plot(L, main="") plot(Y, add=TRUE, pch="+") } # intensity is a function Y <- rpoisppOnLines(function(x,y){ 10 * x^2}, L, 10) if(live) { plot(L, main="") plot(Y, add=TRUE, pch="+") } # intensity is an image Z <- as.im(function(x,y){10 * sqrt(x+y)}, unit.square()) Y <- rpoisppOnLines(Z, L, 15) if(live) { plot(L, main="") plot(Y, add=TRUE, pch="+") } } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/Ldot.inhom.Rd0000644000176200001440000000655313571674202015212 0ustar liggesusers\name{Ldot.inhom} \alias{Ldot.inhom} \title{ Inhomogeneous Multitype L Dot Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the dot \eqn{L} function. } \usage{ Ldot.inhom(X, i, \dots, correction) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross type \eqn{L} function \eqn{L_{i\bullet}(r)}{Li.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{correction,\dots}{ Other arguments passed to \code{\link{Kdot.inhom}}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{L_{i\bullet}(r)}{Li.(r)} has been estimated } \item{theo}{the theoretical value of \eqn{L_{i\bullet}(r)}{Li.(r)} for a marked Poisson process, identical to \eqn{r}. } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L_{i\bullet}(r)}{Li.(r)} obtained by the edge corrections named. } \details{ This a generalisation of the function \code{\link{Ldot}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Linhom}}. All the arguments are passed to \code{\link{Kdot.inhom}}, which estimates the inhomogeneous multitype K function \eqn{K_{i\bullet}(r)}{Ki.(r)} for the point pattern. The resulting values are then transformed by taking \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}. } \references{ \Moller, J. and Waagepetersen, R. Statistical Inference and Simulation for Spatial Point Processes Chapman and Hall/CRC Boca Raton, 2003. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Ldot}}, \code{\link{Linhom}}, \code{\link{Kdot.inhom}}, \code{\link{Lcross.inhom}}. } \examples{ # Lansing Woods data lan <- lansing lan <- lan[seq(1,npoints(lan), by=10)] ma <- split(lan)$maple lg <- unmark(lan) # Estimate intensities by nonparametric smoothing lambdaM <- density.ppp(ma, sigma=0.15, at="points") lambdadot <- density.ppp(lg, sigma=0.15, at="points") L <- Ldot.inhom(lan, "maple", lambdaI=lambdaM, lambdadot=lambdadot) # synthetic example: type A points have intensity 50, # type B points have intensity 50 + 100 * x lamB <- as.im(function(x,y){50 + 100 * x}, owin()) lamdot <- as.im(function(x,y) { 100 + 100 * x}, owin()) X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB)) L <- Ldot.inhom(X, "B", lambdaI=lamB, lambdadot=lamdot) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat/man/triangulate.owin.Rd0000644000176200001440000000236713333543264016467 0ustar liggesusers\name{triangulate.owin} \alias{triangulate.owin} \title{ Decompose Window into Triangles } \description{ Given a spatial window, this function decomposes the window into disjoint triangles. The result is a tessellation of the window in which each tile is a triangle. } \usage{ triangulate.owin(W) } \arguments{ \item{W}{Window (object of class \code{"owin"}).} } \details{ The window \code{W} will be decomposed into disjoint triangles. The result is a tessellation of \code{W} in which each tile is a triangle. All triangle vertices lie on the boundary of the original polygon. The window is first converted to a polygonal window using \code{\link{as.polygonal}}. The vertices of the polygonal window are extracted, and the Delaunay triangulation of these vertices is computed using \code{\link{delaunay}}. Each Delaunay triangle is intersected with the window: if the result is not a triangle, the triangulation procedure is applied recursively to this smaller polygon. } \value{ Tessellation (object of class \code{"tess"}). } \author{ \spatstatAuthors } \seealso{ \code{\link{tess}}, \code{\link{delaunay}}, \code{\link{as.polygonal}} } \examples{ plot(triangulate.owin(letterR)) } \keyword{spatial} \keyword{manip} spatstat/man/rcell.Rd0000644000176200001440000000645013557001576014276 0ustar liggesusers\name{rcell} \alias{rcell} \title{Simulate Baddeley-Silverman Cell Process} \description{ Generates a random point pattern, a simulated realisation of the Baddeley-Silverman cell process model. } \usage{ rcell(win=square(1), nx=NULL, ny=nx, \dots, dx=NULL, dy=dx, N=10, nsim=1, drop=TRUE) } \arguments{ \item{win}{ A window. An object of class \code{\link{owin}}, or data in any format acceptable to \code{\link{as.owin}()}. } \item{nx}{ Number of columns of cells in the window. Incompatible with \code{dx}. } \item{ny}{ Number of rows of cells in the window. Incompatible with \code{dy}. } \item{\dots}{Ignored.} \item{dx}{ Width of the cells. Incompatible with \code{nx}. } \item{dy}{ Height of the cells. Incompatible with \code{ny}. } \item{N}{ Integer. Distributional parameter: the maximum number of random points in each cell. Passed to \code{\link{rcellnumber}}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ A point pattern (an object of class \code{"ppp"}) if \code{nsim=1}, or a list of point patterns if \code{nsim > 1}. } \details{ This function generates a simulated realisation of the \dQuote{cell process} (Baddeley and Silverman, 1984), a random point process with the same second-order properties as the uniform Poisson process. In particular, the \eqn{K} function of this process is identical to the \eqn{K} function of the uniform Poisson process (aka Complete Spatial Randomness). The same holds for the pair correlation function and all other second-order properties. The cell process is a counterexample to the claim that the \eqn{K} function completely characterises a point pattern. A cell process is generated by dividing space into equal rectangular tiles. In each tile, a random number of random points is placed. By default, there are either \eqn{0}, \eqn{1} or \eqn{10} points, with probabilities \eqn{1/10}, \eqn{8/9} and \eqn{1/90} respectively. The points within a tile are independent and uniformly distributed in that tile, and the numbers of points in different tiles are independent random integers. The tile width is determined either by the number of columns \code{nx} or by the horizontal spacing \code{dx}. The tile height is determined either by the number of rows \code{ny} or by the vertical spacing \code{dy}. The cell process is then generated in these tiles. The random numbers of points are generated by \code{\link{rcellnumber}}. Some of the resulting random points may lie outside the window \code{win}: if they do, they are deleted. The result is a point pattern inside the window \code{win}. } \seealso{ \code{\link{rcellnumber}}, \code{\link{rstrat}}, \code{\link{rsyst}}, \code{\link{runifpoint}}, \code{\link{Kest}} } \examples{ X <- rcell(nx=15) plot(X) plot(Kest(X)) } \references{ Baddeley, A.J. and Silverman, B.W. (1984) A cautionary example on the use of second-order methods for analyzing point patterns. \emph{Biometrics} \bold{40}, 1089-1094. } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/Kmulti.inhom.Rd0000644000176200001440000002615713571674202015557 0ustar liggesusers\name{Kmulti.inhom} \alias{Kmulti.inhom} \title{ Inhomogeneous Marked K-Function } \description{ For a marked point pattern, estimate the inhomogeneous version of the multitype \eqn{K} function which counts the expected number of points of subset \eqn{J} within a given distance from a typical point in subset \code{I}, adjusted for spatially varying intensity. } \usage{ Kmulti.inhom(X, I, J, lambdaI=NULL, lambdaJ=NULL, \dots, r=NULL, breaks=NULL, correction=c("border", "isotropic", "Ripley", "translate"), lambdaIJ=NULL, sigma=NULL, varcov=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous multitype \eqn{K} function \eqn{K_{IJ}(r)}{KIJ(r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset index specifying the points of \code{X} from which distances are measured. See Details. } \item{J}{Subset index specifying the points in \code{X} to which distances are measured. See Details. } \item{lambdaI}{ Optional. Values of the estimated intensity of the sub-process \code{X[I]}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X[I]}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location, } \item{lambdaJ}{ Optional. Values of the estimated intensity of the sub-process \code{X[J]}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X[J]}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{Ignored.} \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the multitype \eqn{K} function \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{lambdaIJ}{ Optional. A matrix containing estimates of the product of the intensities \code{lambdaI} and \code{lambdaJ} for each pair of points, the first point belonging to subset \code{I} and the second point to subset \code{J}. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{lambdaX}{ Optional. Values of the intensity for all points of \code{X}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. If present, this argument overrides both \code{lambdaI} and \code{lambdaJ}. } \item{update}{ Logical value indicating what to do when \code{lambdaI}, \code{lambdaJ} or \code{lambdaX} is a fitted point process model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.ppm}} or \code{\link{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{IJ}(r)}{KIJ(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{IJ}(r)}{KIJ(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{IJ}(r)}{KIJ(r)} obtained by the edge corrections named. } \details{ The function \code{Kmulti.inhom} is the counterpart, for spatially-inhomogeneous marked point patterns, of the multitype \eqn{K} function \code{\link{Kmulti}}. Suppose \eqn{X} is a marked point process, with marks of any kind. Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are two sub-processes, possibly overlapping. Typically \eqn{X_I}{X[I]} would consist of those points of \eqn{X} whose marks lie in a specified range of mark values, and similarly for \eqn{X_J}{X[J]}. Suppose that \eqn{\lambda_I(u)}{lambdaI(u)}, \eqn{\lambda_J(u)}{lambdaJ(u)} are the spatially-varying intensity functions of \eqn{X_I}{X[I]} and \eqn{X_J}{X[J]} respectively. Consider all the pairs of points \eqn{(u,v)} in the point process \eqn{X} such that the first point \eqn{u} belongs to \eqn{X_I}{X[I]}, the second point \eqn{v} belongs to \eqn{X_J}{X[J]}, and the distance between \eqn{u} and \eqn{v} is less than a specified distance \eqn{r}. Give this pair \eqn{(u,v)} the numerical weight \eqn{1/(\lambda_I(u)\lambda_J(u))}{1/(lambdaI(u) lambdaJ(u))}. Calculate the sum of these weights over all pairs of points as described. This sum (after appropriate edge-correction and normalisation) is the estimated inhomogeneous multitype \eqn{K} function. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. The arguments \code{I} and \code{J} specify two subsets of the point pattern. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. The argument \code{lambdaI} supplies the values of the intensity of the sub-process identified by index \code{I}. It may be either \describe{ \item{a pixel image}{(object of class \code{"im"}) which gives the values of the intensity of \code{X[I]} at all locations in the window containing \code{X}; } \item{a numeric vector}{containing the values of the intensity of \code{X[I]} evaluated only at the data points of \code{X[I]}. The length of this vector must equal the number of points in \code{X[I]}. } \item{a function}{ of the form \code{function(x,y)} which can be evaluated to give values of the intensity at any locations. } \item{a fitted point process model}{ (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{omitted:}{ if \code{lambdaI} is omitted then it will be estimated using a leave-one-out kernel smoother. } } If \code{lambdaI} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \Moller and Waagepetersen (2000). The estimate of \code{lambdaI} for a given point is computed by removing the point from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point in question. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Similarly \code{lambdaJ} supplies the values of the intensity of the sub-process identified by index \code{J}. Alternatively if the argument \code{lambdaX} is given, then it specifies the intensity values for all points of \code{X}, and the arguments \code{lambdaI}, \code{lambdaJ} will be ignored. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Biases due to edge effects are treated in the same manner as in \code{\link{Kinhom}}. The edge corrections implemented here are \describe{ \item{border}{the border method or ``reduced sample'' estimator (see Ripley, 1988). This is the least efficient (statistically) and the fastest to compute. It can be computed for a window of arbitrary shape. } \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is currently implemented only for rectangular windows. } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries. } } The pair correlation function \code{\link{pcf}} can also be applied to the result of \code{Kmulti.inhom}. } \references{ Baddeley, A., \Moller, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. } \seealso{ \code{\link{Kmulti}}, \code{\link{Kdot.inhom}}, \code{\link{Kcross.inhom}}, \code{\link{pcf}} } \examples{ # Finnish Pines data: marked by diameter and height plot(finpines, which.marks="height") II <- (marks(finpines)$height <= 2) JJ <- (marks(finpines)$height > 3) K <- Kmulti.inhom(finpines, II, JJ) plot(K) # functions determining subsets f1 <- function(X) { marks(X)$height <= 2 } f2 <- function(X) { marks(X)$height > 3 } K <- Kmulti.inhom(finpines, f1, f2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/unstack.solist.Rd0000644000176200001440000000343613333543264016157 0ustar liggesusers\name{unstack.solist} \alias{unstack.solist} \alias{unstack.layered} \title{ Unstack Each Spatial Object in a List of Objects } \description{ Given a list of two-dimensional spatial objects, apply } \usage{ \method{unstack}{solist}(x, \dots) \method{unstack}{layered}(x, \dots) } \arguments{ \item{x}{ An object of class \code{"solist"} or \code{"layered"} representing a list of two-dimensional spatial objects. } \item{\dots}{ Ignored. } } \details{ The functions defined here are methods for the generic \code{\link[utils]{unstack}}. They expect the argument \code{x} to be a list of spatial objects, of class \code{"solist"} or \code{"layered"}. Each spatial object in the list \code{x} will be unstacked by applying the relevant method for \code{\link[utils]{unstack}}. This means that \itemize{ \item a marked point pattern with several columns of marks will be separated into several point patterns, each having a single column of marks \item a measure with \eqn{k}-dimensional vector values will be separated into \eqn{k} measures with scalar values } The resulting unstacked objects will be collected into a list of the same kind as \code{x}. Typically the length of \code{unstack(x)} is greater than the length of \code{x}. } \value{ A list belonging to the same class as \code{x}. } \author{ \spatstatAuthors. } \seealso{ \code{\link[utils]{unstack}} \code{\link{unstack.msr}}, \code{\link{unstack.ppp}}, \code{\link{unstack.lpp}}, \code{\link{unstack.psp}} } \examples{ A <- solist(finpines=finpines, cells=cells) A unstack(A) B <- layered(fin=finpines, loc=unmark(finpines), plotargs=list(list(), list(pch=16))) B plot(B) unstack(B) plot(unstack(B)) } \keyword{spatial} \keyword{manip} spatstat/man/spatstat-package.Rd0000644000176200001440000024242613571674202016434 0ustar liggesusers\name{spatstat-package} \alias{spatstat-package} \alias{spatstat} \docType{package} \title{The Spatstat Package} \description{ This is a summary of the features of \pkg{spatstat}, a package in \R for the statistical analysis of spatial point patterns. } \details{ \pkg{spatstat} is a package for the statistical analysis of spatial data. Its main focus is the analysis of spatial patterns of points in two-dimensional space. The points may carry auxiliary data (`marks'), and the spatial region in which the points were recorded may have arbitrary shape. The package is designed to support a complete statistical analysis of spatial data. It supports \itemize{ \item creation, manipulation and plotting of point patterns; \item exploratory data analysis; \item spatial random sampling; \item simulation of point process models; \item parametric model-fitting; \item non-parametric smoothing and regression; \item formal inference (hypothesis tests, confidence intervals); \item model diagnostics. } Apart from two-dimensional point patterns and point processes, \pkg{spatstat} also supports point patterns in three dimensions, point patterns in multidimensional space-time, point patterns on a linear network, patterns of line segments in two dimensions, and spatial tessellations and random sets in two dimensions. The package can fit several types of point process models to a point pattern dataset: \itemize{ \item Poisson point process models (by Berman-Turner approximate maximum likelihood or by spatial logistic regression) \item Gibbs/Markov point process models (by Baddeley-Turner approximate maximum pseudolikelihood, Coeurjolly-Rubak logistic likelihood, or Huang-Ogata approximate maximum likelihood) \item Cox/cluster point process models (by Waagepetersen's two-step fitting procedure and minimum contrast, composite likelihood, or Palm likelihood) \item determinantal point process models (by Waagepetersen's two-step fitting procedure and minimum contrast, composite likelihood, or Palm likelihood) } The models may include spatial trend, dependence on covariates, and complicated interpoint interactions. Models are specified by a \code{formula} in the \R language, and are fitted using a function analogous to \code{\link{lm}} and \code{\link{glm}}. Fitted models can be printed, plotted, predicted, simulated and so on. } \section{Getting Started}{ For a quick introduction to \pkg{spatstat}, read the package vignette \emph{Getting started with spatstat} installed with \pkg{spatstat}. To read that document, you can either \itemize{ \item visit \url{https://cran.r-project.org/package=spatstat} and click on \code{Getting Started with Spatstat} \item start \R, type \code{library(spatstat)} and \code{vignette('getstart')} \item start \R, type \code{help.start()} to open the help browser, and navigate to \code{Packages > spatstat > Vignettes}. } Once you have installed \pkg{spatstat}, start \R and type \code{library(spatstat)}. Then type \code{beginner} for a beginner's introduction, or \code{demo(spatstat)} for a demonstration of the package's capabilities. For a complete course on \pkg{spatstat}, and on statistical analysis of spatial point patterns, read the book by Baddeley, Rubak and Turner (2015). Other recommended books on spatial point process methods are Diggle (2014), Gelfand et al (2010) and Illian et al (2008). The \pkg{spatstat} package includes over 50 datasets, which can be useful when learning the package. Type \code{demo(data)} to see plots of all datasets available in the package. Type \code{vignette('datasets')} for detailed background information on these datasets, and plots of each dataset. For information on converting your data into \pkg{spatstat} format, read Chapter 3 of Baddeley, Rubak and Turner (2015). This chapter is available free online, as one of the sample chapters at the book companion website, \url{https://spatstat.github.io/book}. For information about handling data in \bold{shapefiles}, see Chapter 3, or the Vignette \emph{Handling shapefiles in the spatstat package}, installed with \pkg{spatstat}, accessible as \code{vignette('shapefiles')}. } \section{Updates}{ New versions of \pkg{spatstat} are released every 8 weeks. Users are advised to update their installation of \pkg{spatstat} regularly. Type \code{latest.news} to read the news documentation about changes to the current installed version of \pkg{spatstat}. See the Vignette \emph{Summary of recent updates}, installed with \pkg{spatstat}, which describes the main changes to \pkg{spatstat} since the book (Baddeley, Rubak and Turner, 2015) was published. It is accessible as \code{vignette('updates')}. Type \code{news(package="spatstat")} to read news documentation about all previous versions of the package. } \section{FUNCTIONS AND DATASETS}{ Following is a summary of the main functions and datasets in the \pkg{spatstat} package. Alternatively an alphabetical list of all functions and datasets is available by typing \code{library(help=spatstat)}. For further information on any of these, type \code{help(name)} or \code{?name} where \code{name} is the name of the function or dataset. } \section{CONTENTS:}{ \tabular{ll}{ I. \tab Creating and manipulating data \cr II. \tab Exploratory Data Analysis \cr III. \tab Model fitting (Cox and cluster models) \cr IV. \tab Model fitting (Poisson and Gibbs models) \cr V. \tab Model fitting (determinantal point processes)\cr VI. \tab Model fitting (spatial logistic regression)\cr VII. \tab Simulation \cr VIII. \tab Tests and diagnostics\cr IX. \tab Documentation } } \section{I. CREATING AND MANIPULATING DATA}{ \bold{Types of spatial data:} The main types of spatial data supported by \pkg{spatstat} are: \tabular{ll}{ \code{\link{ppp}} \tab point pattern \cr \code{\link{owin}} \tab window (spatial region) \cr \code{\link{im}} \tab pixel image \cr \code{\link{psp}} \tab line segment pattern \cr \code{\link{tess}} \tab tessellation \cr \code{\link{pp3}} \tab three-dimensional point pattern \cr \code{\link{ppx}} \tab point pattern in any number of dimensions \cr \code{\link{lpp}} \tab point pattern on a linear network } \bold{To create a point pattern:} \tabular{ll}{ \code{\link{ppp}} \tab create a point pattern from \eqn{(x,y)} and window information \cr \tab \code{ppp(x, y, xlim, ylim)} for rectangular window\cr \tab \code{ppp(x, y, poly)} for polygonal window \cr \tab \code{ppp(x, y, mask)} for binary image window \cr \code{\link{as.ppp}} \tab convert other types of data to a \code{ppp} object \cr \code{\link{clickppp}} \tab interactively add points to a plot \cr \code{\link{marks<-}}, \code{\%mark\%} \tab attach/reassign marks to a point pattern } \bold{To simulate a random point pattern:} \tabular{ll}{ \code{\link{runifpoint}} \tab generate \eqn{n} independent uniform random points \cr \code{\link{rpoint}} \tab generate \eqn{n} independent random points \cr \code{\link{rmpoint}} \tab generate \eqn{n} independent multitype random points \cr \code{\link{rpoispp}} \tab simulate the (in)homogeneous Poisson point process \cr \code{\link{rmpoispp}} \tab simulate the (in)homogeneous multitype Poisson point process \cr \code{\link{runifdisc}} \tab generate \eqn{n} independent uniform random points in disc\cr \code{\link{rstrat}} \tab stratified random sample of points \cr \code{\link{rsyst}} \tab systematic random sample of points \cr \code{\link{rjitter}} \tab apply random displacements to points in a pattern\cr \code{\link{rMaternI}} \tab simulate the \Matern Model I inhibition process\cr \code{\link{rMaternII}} \tab simulate the \Matern Model II inhibition process\cr \code{\link{rSSI}} \tab simulate Simple Sequential Inhibition process\cr \code{\link{rStrauss}} \tab simulate Strauss process (perfect simulation)\cr \code{\link{rHardcore}} \tab simulate Hard Core process (perfect simulation)\cr \code{\link{rStraussHard}} \tab simulate Strauss-hard core process (perfect simulation)\cr \code{\link{rDiggleGratton}} \tab simulate Diggle-Gratton process (perfect simulation)\cr \code{\link{rDGS}} \tab simulate Diggle-Gates-Stibbard process (perfect simulation)\cr \code{\link{rPenttinen}} \tab simulate Penttinen process (perfect simulation)\cr \code{\link{rNeymanScott}} \tab simulate a general Neyman-Scott process\cr \code{\link{rPoissonCluster}} \tab simulate a general Poisson cluster process\cr \code{\link{rMatClust}} \tab simulate the \Matern Cluster process\cr \code{\link{rThomas}} \tab simulate the Thomas process \cr \code{\link{rGaussPoisson}} \tab simulate the Gauss-Poisson cluster process\cr \code{\link{rCauchy}} \tab simulate Neyman-Scott Cauchy cluster process \cr \code{\link{rVarGamma}} \tab simulate Neyman-Scott Variance Gamma cluster process \cr \code{\link{rthin}} \tab random thinning \cr \code{\link{rcell}} \tab simulate the Baddeley-Silverman cell process \cr \code{\link{rmh}} \tab simulate Gibbs point process using Metropolis-Hastings \cr \code{\link{simulate.ppm}} \tab simulate Gibbs point process using Metropolis-Hastings \cr \code{\link{runifpointOnLines}} \tab generate \eqn{n} random points along specified line segments \cr \code{\link{rpoisppOnLines}} \tab generate Poisson random points along specified line segments } \bold{To randomly change an existing point pattern:} \tabular{ll}{ \code{\link{rshift}} \tab random shifting of points \cr \code{\link{rjitter}} \tab apply random displacements to points in a pattern\cr \code{\link{rthin}} \tab random thinning \cr \code{\link{rlabel}} \tab random (re)labelling of a multitype point pattern \cr \code{\link{quadratresample}} \tab block resampling } \bold{Standard point pattern datasets:} Datasets in \pkg{spatstat} are lazy-loaded, so you can simply type the name of the dataset to use it; there is no need to type \code{\link{data}(amacrine)} etc. Type \code{demo(data)} to see a display of all the datasets installed with the package. Type \code{vignette('datasets')} for a document giving an overview of all datasets, including background information, and plots. \tabular{ll}{ \code{\link[spatstat.data]{amacrine}} \tab Austin Hughes' rabbit amacrine cells \cr \code{\link[spatstat.data]{anemones}} \tab Upton-Fingleton sea anemones data\cr \code{\link[spatstat.data]{ants}} \tab Harkness-Isham ant nests data\cr \code{\link[spatstat.data]{bdspots}} \tab Breakdown spots in microelectrodes \cr \code{\link[spatstat.data]{bei}} \tab Tropical rainforest trees \cr \code{\link[spatstat.data]{betacells}} \tab Waessle et al. cat retinal ganglia data \cr \code{\link[spatstat.data]{bramblecanes}} \tab Bramble Canes data \cr \code{\link[spatstat.data]{bronzefilter}} \tab Bronze Filter Section data \cr \code{\link[spatstat.data]{cells}} \tab Crick-Ripley biological cells data \cr \code{\link[spatstat.data]{chicago}} \tab Chicago crimes \cr \code{\link[spatstat.data]{chorley}} \tab Chorley-Ribble cancer data \cr \code{\link[spatstat.data]{clmfires}} \tab Castilla-La Mancha forest fires \cr \code{\link[spatstat.data]{copper}} \tab Berman-Huntington copper deposits data \cr \code{\link[spatstat.data]{dendrite}} \tab Dendritic spines \cr \code{\link[spatstat.data]{demohyper}} \tab Synthetic point patterns\cr \code{\link[spatstat.data]{demopat}} \tab Synthetic point pattern \cr \code{\link[spatstat.data]{finpines}} \tab Finnish Pines data \cr \code{\link[spatstat.data]{flu}} \tab Influenza virus proteins \cr \code{\link[spatstat.data]{gordon}} \tab People in Gordon Square, London \cr \code{\link[spatstat.data]{gorillas}} \tab Gorilla nest sites \cr \code{\link[spatstat.data]{hamster}} \tab Aherne's hamster tumour data \cr \code{\link[spatstat.data]{humberside}} \tab North Humberside childhood leukaemia data \cr \code{\link[spatstat.data]{hyytiala}} \tab {Mixed forest in \ifelse{latex}{\out{Hyyti{\"a}l{\"a}}}{Hyytiala}, Finland}\cr \code{\link[spatstat.data]{japanesepines}} \tab Japanese Pines data \cr \code{\link[spatstat.data]{lansing}} \tab Lansing Woods data \cr \code{\link[spatstat.data]{longleaf}} \tab Longleaf Pines data \cr \code{\link[spatstat.data]{mucosa}} \tab Cells in gastric mucosa \cr \code{\link[spatstat.data]{murchison}} \tab Murchison gold deposits \cr \code{\link[spatstat.data]{nbfires}} \tab New Brunswick fires data \cr \code{\link[spatstat.data]{nztrees}} \tab Mark-Esler-Ripley trees data \cr \code{\link[spatstat.data]{osteo}} \tab Osteocyte lacunae (3D, replicated) \cr \code{\link[spatstat.data]{paracou}} \tab Kimboto trees in Paracou, French Guiana \cr \code{\link[spatstat.data]{ponderosa}} \tab Getis-Franklin ponderosa pine trees data \cr \code{\link[spatstat.data]{pyramidal}} \tab Pyramidal neurons from 31 brains \cr \code{\link[spatstat.data]{redwood}} \tab Strauss-Ripley redwood saplings data \cr \code{\link[spatstat.data]{redwoodfull}} \tab Strauss redwood saplings data (full set) \cr \code{\link[spatstat.data]{residualspaper}} \tab Data from Baddeley et al (2005) \cr \code{\link[spatstat.data]{shapley}} \tab Galaxies in an astronomical survey \cr \code{\link[spatstat.data]{simdat}} \tab Simulated point pattern (inhomogeneous, with interaction) \cr \code{\link[spatstat.data]{spiders}} \tab Spider webs on mortar lines of brick wall \cr \code{\link[spatstat.data]{sporophores}} \tab Mycorrhizal fungi around a tree \cr \code{\link[spatstat.data]{spruces}} \tab Spruce trees in Saxonia \cr \code{\link[spatstat.data]{swedishpines}} \tab Strand-Ripley Swedish pines data \cr \code{\link[spatstat.data]{urkiola}} \tab Urkiola Woods data \cr \code{\link[spatstat.data]{waka}} \tab Trees in Waka national park \cr \code{\link[spatstat.data]{waterstriders}} \tab Insects on water surface } \bold{To manipulate a point pattern:} \tabular{ll}{ \code{\link{plot.ppp}} \tab plot a point pattern (e.g. \code{plot(X)}) \cr \code{\link{iplot}} \tab plot a point pattern interactively \cr \code{\link{edit.ppp}} \tab interactive text editor \cr \code{\link{[.ppp}} \tab extract or replace a subset of a point pattern \cr \tab \code{pp[subset]} or \code{pp[subwindow]} \cr \code{\link{subset.ppp}} \tab extract subset of point pattern satisfying a condition \cr \code{\link{superimpose}} \tab combine several point patterns \cr \code{\link{by.ppp}} \tab apply a function to sub-patterns of a point pattern \cr \code{\link{cut.ppp}} \tab classify the points in a point pattern \cr \code{\link{split.ppp}} \tab divide pattern into sub-patterns \cr \code{\link{unmark}} \tab remove marks \cr \code{\link{npoints}} \tab count the number of points \cr \code{\link{coords}} \tab extract coordinates, change coordinates \cr \code{\link{marks}} \tab extract marks, change marks or attach marks \cr \code{\link{rotate}} \tab rotate pattern \cr \code{\link{shift} } \tab translate pattern \cr \code{\link{flipxy} } \tab swap \eqn{x} and \eqn{y} coordinates \cr \code{\link{reflect} } \tab reflect in the origin \cr \code{\link{periodify} } \tab make several translated copies \cr \code{\link{affine}} \tab apply affine transformation\cr \code{\link{scalardilate}} \tab apply scalar dilation\cr \code{\link{density.ppp}} \tab kernel estimation of point pattern intensity\cr \code{\link{Smooth.ppp}} \tab kernel smoothing of marks of point pattern\cr \code{\link{nnmark}} \tab mark value of nearest data point\cr \code{\link{sharpen.ppp}} \tab data sharpening\cr \code{\link{identify.ppp}} \tab interactively identify points \cr \code{\link{unique.ppp}} \tab remove duplicate points \cr \code{\link{duplicated.ppp}} \tab determine which points are duplicates \cr \code{\link{uniquemap.ppp}} \tab map duplicated points to unique points \cr \code{\link{connected.ppp}} \tab find clumps of points \cr \code{\link{dirichlet}} \tab compute Dirichlet-Voronoi tessellation \cr \code{\link{delaunay}} \tab compute Delaunay triangulation \cr \code{\link{delaunayDistance}} \tab graph distance in Delaunay triangulation \cr \code{\link{convexhull}} \tab compute convex hull \cr \code{\link{discretise}} \tab discretise coordinates \cr \code{\link{pixellate.ppp}} \tab approximate point pattern by pixel image \cr \code{\link{as.im.ppp}} \tab approximate point pattern by pixel image } See \code{\link{spatstat.options}} to control plotting behaviour. \bold{To create a window:} An object of class \code{"owin"} describes a spatial region (a window of observation). \tabular{ll}{ \code{\link{owin}} \tab Create a window object \cr \tab \code{owin(xlim, ylim)} for rectangular window \cr \tab \code{owin(poly)} for polygonal window \cr \tab \code{owin(mask)} for binary image window \cr \code{\link{Window}} \tab Extract window of another object \cr \code{\link{Frame}} \tab Extract the containing rectangle ('frame') of another object \cr \code{\link{as.owin}} \tab Convert other data to a window object \cr \code{\link{square}} \tab make a square window \cr \code{\link{disc}} \tab make a circular window \cr \code{\link{ellipse}} \tab make an elliptical window \cr \code{\link{ripras}} \tab Ripley-Rasson estimator of window, given only the points \cr \code{\link{convexhull}} \tab compute convex hull of something \cr \code{\link[spatstat.data]{letterR}} \tab polygonal window in the shape of the \R logo \cr \code{\link{clickpoly}} \tab interactively draw a polygonal window \cr \code{\link{clickbox}} \tab interactively draw a rectangle } \bold{To manipulate a window:} \tabular{ll}{ \code{\link{plot.owin}} \tab plot a window. \cr \tab \code{plot(W)}\cr \code{\link{boundingbox}} \tab Find a tight bounding box for the window \cr \code{\link{erosion}} \tab erode window by a distance r\cr \code{\link{dilation}} \tab dilate window by a distance r\cr \code{\link{closing}} \tab close window by a distance r\cr \code{\link{opening}} \tab open window by a distance r\cr \code{\link{border}} \tab difference between window and its erosion/dilation \cr \code{\link{complement.owin}} \tab invert (swap inside and outside)\cr \code{\link{simplify.owin}} \tab approximate a window by a simple polygon \cr \code{\link{rotate}} \tab rotate window \cr \code{\link{flipxy}} \tab swap \eqn{x} and \eqn{y} coordinates \cr \code{\link{shift} } \tab translate window \cr \code{\link{periodify} } \tab make several translated copies \cr \code{\link{affine}} \tab apply affine transformation \cr \code{\link{as.data.frame.owin}} \tab convert window to data frame } \bold{Digital approximations:} \tabular{ll}{ \code{\link{as.mask}} \tab Make a discrete pixel approximation of a given window \cr \code{\link{as.im.owin}} \tab convert window to pixel image \cr \code{\link{pixellate.owin}} \tab convert window to pixel image \cr \code{\link{commonGrid}} \tab find common pixel grid for windows \cr \code{\link{nearest.raster.point}} \tab map continuous coordinates to raster locations\cr \code{\link{raster.x}} \tab raster x coordinates \cr \code{\link{raster.y}} \tab raster y coordinates \cr \code{\link{raster.xy}} \tab raster x and y coordinates \cr \code{\link{as.polygonal}} \tab convert pixel mask to polygonal window } See \code{\link{spatstat.options}} to control the approximation \bold{Geometrical computations with windows:} \tabular{ll}{ \code{\link{edges}} \tab extract boundary edges \cr \code{\link{intersect.owin}} \tab intersection of two windows\cr \code{\link{union.owin}} \tab union of two windows\cr \code{\link{setminus.owin}} \tab set subtraction of two windows\cr \code{\link{inside.owin}} \tab determine whether a point is inside a window\cr \code{\link{area.owin}} \tab compute area \cr \code{\link{perimeter}} \tab compute perimeter length \cr \code{\link{diameter.owin}} \tab compute diameter\cr \code{\link{incircle}} \tab find largest circle inside a window \cr \code{\link{inradius}} \tab radius of incircle \cr \code{\link{connected.owin}} \tab find connected components of window \cr \code{\link{eroded.areas}} \tab compute areas of eroded windows\cr \code{\link{dilated.areas}} \tab compute areas of dilated windows\cr \code{\link{bdist.points}} \tab compute distances from data points to window boundary \cr \code{\link{bdist.pixels}} \tab compute distances from all pixels to window boundary \cr \code{\link{bdist.tiles}} \tab boundary distance for each tile in tessellation \cr \code{\link{distmap.owin}} \tab distance transform image \cr \code{\link{distfun.owin}} \tab distance transform \cr \code{\link{centroid.owin}} \tab compute centroid (centre of mass) of window\cr \code{\link{is.subset.owin}} \tab determine whether one window contains another \cr \code{\link{is.convex}} \tab determine whether a window is convex \cr \code{\link{convexhull}} \tab compute convex hull \cr \code{\link{triangulate.owin}} \tab decompose into triangles \cr \code{\link{as.mask}} \tab pixel approximation of window \cr \code{\link{as.polygonal}} \tab polygonal approximation of window \cr \code{\link{is.rectangle}} \tab test whether window is a rectangle \cr \code{\link{is.polygonal}} \tab test whether window is polygonal \cr \code{\link{is.mask}} \tab test whether window is a mask \cr \code{\link{setcov}} \tab spatial covariance function of window \cr \code{\link{pixelcentres}} \tab extract centres of pixels in mask \cr \code{\link{clickdist}} \tab measure distance between two points clicked by user } \bold{Pixel images:} An object of class \code{"im"} represents a pixel image. Such objects are returned by some of the functions in \pkg{spatstat} including \code{\link{Kmeasure}}, \code{\link{setcov}} and \code{\link{density.ppp}}. \tabular{ll}{ \code{\link{im}} \tab create a pixel image\cr \code{\link{as.im}} \tab convert other data to a pixel image\cr \code{\link{pixellate}} \tab convert other data to a pixel image\cr \code{\link{as.matrix.im}} \tab convert pixel image to matrix\cr \code{\link{as.data.frame.im}} \tab convert pixel image to data frame\cr \code{\link{as.function.im}} \tab convert pixel image to function\cr \code{\link{plot.im}} \tab plot a pixel image on screen as a digital image\cr \code{\link{contour.im}} \tab draw contours of a pixel image \cr \code{\link{persp.im}} \tab draw perspective plot of a pixel image \cr \code{\link{rgbim}} \tab create colour-valued pixel image \cr \code{\link{hsvim}} \tab create colour-valued pixel image \cr \code{\link{[.im}} \tab extract a subset of a pixel image\cr \code{\link{[<-.im}} \tab replace a subset of a pixel image\cr \code{\link{rotate.im}} \tab rotate pixel image \cr \code{\link{shift.im}} \tab apply vector shift to pixel image \cr \code{\link{affine.im}} \tab apply affine transformation to image \cr \code{X} \tab print very basic information about image \code{X}\cr \code{\link{summary}(X)} \tab summary of image \code{X} \cr \code{\link{hist.im}} \tab histogram of image \cr \code{\link{mean.im}} \tab mean pixel value of image \cr \code{\link{integral.im}} \tab integral of pixel values \cr \code{\link{quantile.im}} \tab quantiles of image \cr \code{\link{cut.im}} \tab convert numeric image to factor image \cr \code{\link{is.im}} \tab test whether an object is a pixel image\cr \code{\link{interp.im}} \tab interpolate a pixel image\cr \code{\link{blur}} \tab apply Gaussian blur to image\cr \code{\link{Smooth.im}} \tab apply Gaussian blur to image\cr \code{\link{connected.im}} \tab find connected components \cr \code{\link{compatible.im}} \tab test whether two images have compatible dimensions \cr \code{\link{harmonise.im}} \tab make images compatible \cr \code{\link{commonGrid}} \tab find a common pixel grid for images \cr \code{\link{eval.im}} \tab evaluate any expression involving images\cr \code{\link{im.apply}} \tab evaluate a function of several images \cr \code{\link{scaletointerval}} \tab rescale pixel values \cr \code{\link{zapsmall.im}} \tab set very small pixel values to zero \cr \code{\link{levelset}} \tab level set of an image\cr \code{\link{solutionset}} \tab region where an expression is true \cr \code{\link{imcov}} \tab spatial covariance function of image \cr \code{\link{convolve.im}} \tab spatial convolution of images \cr \code{\link{transect.im}} \tab line transect of image \cr \code{\link{pixelcentres}} \tab extract centres of pixels \cr \code{\link{transmat}} \tab convert matrix of pixel values \cr \tab to a different indexing convention \cr \code{\link{rnoise}} \tab random pixel noise } \bold{Line segment patterns} An object of class \code{"psp"} represents a pattern of straight line segments. \tabular{ll}{ \code{\link{psp}} \tab create a line segment pattern \cr \code{\link{as.psp}} \tab convert other data into a line segment pattern \cr \code{\link{edges}} \tab extract edges of a window \cr \code{\link{is.psp}} \tab determine whether a dataset has class \code{"psp"} \cr \code{\link{plot.psp}} \tab plot a line segment pattern \cr \code{\link{print.psp}} \tab print basic information \cr \code{\link{summary.psp}} \tab print summary information \cr \code{\link{[.psp}} \tab extract a subset of a line segment pattern \cr \code{\link{subset.psp}} \tab extract subset of line segment pattern \cr \code{\link{as.data.frame.psp}} \tab convert line segment pattern to data frame \cr \code{\link{marks.psp}} \tab extract marks of line segments \cr \code{\link{marks<-.psp}} \tab assign new marks to line segments \cr \code{\link{unmark.psp}} \tab delete marks from line segments \cr \code{\link{midpoints.psp}} \tab compute the midpoints of line segments \cr \code{\link{endpoints.psp}} \tab extract the endpoints of line segments \cr \code{\link{lengths.psp}} \tab compute the lengths of line segments \cr \code{\link{angles.psp}} \tab compute the orientation angles of line segments \cr \code{\link{superimpose}} \tab combine several line segment patterns \cr \code{\link{flipxy}} \tab swap \eqn{x} and \eqn{y} coordinates \cr \code{\link{rotate.psp}} \tab rotate a line segment pattern \cr \code{\link{shift.psp}} \tab shift a line segment pattern \cr \code{\link{periodify}} \tab make several shifted copies \cr \code{\link{affine.psp}} \tab apply an affine transformation \cr \code{\link{pixellate.psp}} \tab approximate line segment pattern by pixel image \cr \code{\link{as.mask.psp}} \tab approximate line segment pattern by binary mask \cr \code{\link{distmap.psp}} \tab compute the distance map of a line segment pattern \cr \code{\link{distfun.psp}} \tab compute the distance map of a line segment pattern \cr \code{\link{density.psp}} \tab kernel smoothing of line segments\cr \code{\link{selfcrossing.psp}} \tab find crossing points between line segments \cr \code{\link{selfcut.psp}} \tab cut segments where they cross \cr \code{\link{crossing.psp}} \tab find crossing points between two line segment patterns \cr \code{\link{extrapolate.psp}} \tab extrapolate line segments to infinite lines \cr \code{\link{nncross}} \tab find distance to nearest line segment from a given point\cr \code{\link{nearestsegment}} \tab find line segment closest to a given point \cr \code{\link{project2segment}} \tab find location along a line segment closest to a given point \cr \code{\link{pointsOnLines}} \tab generate points evenly spaced along line segment \cr \code{\link{rpoisline}} \tab generate a realisation of the Poisson line process inside a window\cr \code{\link{rlinegrid}} \tab generate a random array of parallel lines through a window } \bold{Tessellations} An object of class \code{"tess"} represents a tessellation. \tabular{ll}{ \code{\link{tess}} \tab create a tessellation \cr \code{\link{quadrats}} \tab create a tessellation of rectangles\cr \code{\link{hextess}} \tab create a tessellation of hexagons \cr \code{\link{polartess}} \tab tessellation using polar coordinates \cr \code{\link{quantess}} \tab quantile tessellation \cr \code{\link{venn.tess}} \tab Venn diagram tessellation \cr \code{\link{dirichlet}} \tab compute Dirichlet-Voronoi tessellation of points\cr \code{\link{delaunay}} \tab compute Delaunay triangulation of points\cr \code{\link{as.tess}} \tab convert other data to a tessellation \cr \code{\link{plot.tess}} \tab plot a tessellation \cr \code{\link{tiles}} \tab extract all the tiles of a tessellation \cr \code{\link{[.tess}} \tab extract some tiles of a tessellation \cr \code{\link{[<-.tess}} \tab change some tiles of a tessellation \cr \code{\link{intersect.tess}} \tab intersect two tessellations \cr \tab or restrict a tessellation to a window \cr \code{\link{chop.tess}} \tab subdivide a tessellation by a line \cr \code{\link{rpoislinetess}} \tab generate tessellation using Poisson line process \cr \code{\link{tile.areas}} \tab area of each tile in tessellation \cr \code{\link{bdist.tiles}} \tab boundary distance for each tile in tessellation \cr \code{\link{connected.tess}} \tab find connected components of tiles \cr \code{\link{shift.tess}} \tab shift a tessellation \cr \code{\link{rotate.tess}} \tab rotate a tessellation \cr \code{\link{reflect.tess}} \tab reflect about the origin \cr \code{\link{flipxy.tess}} \tab reflect about the diagonal \cr \code{\link{affine.tess}} \tab apply affine transformation } \bold{Three-dimensional point patterns} An object of class \code{"pp3"} represents a three-dimensional point pattern in a rectangular box. The box is represented by an object of class \code{"box3"}. \tabular{ll}{ \code{\link{pp3}} \tab create a 3-D point pattern \cr \code{\link{plot.pp3}} \tab plot a 3-D point pattern \cr \code{\link{coords}} \tab extract coordinates \cr \code{\link{as.hyperframe}} \tab extract coordinates \cr \code{\link{subset.pp3}} \tab extract subset of 3-D point pattern \cr \code{\link{unitname.pp3}} \tab name of unit of length \cr \code{\link{npoints}} \tab count the number of points \cr \code{\link{runifpoint3}} \tab generate uniform random points in 3-D \cr \code{\link{rpoispp3}} \tab generate Poisson random points in 3-D \cr \code{\link{envelope.pp3}} \tab generate simulation envelopes for 3-D pattern \cr \code{\link{box3}} \tab create a 3-D rectangular box \cr \code{\link{as.box3}} \tab convert data to 3-D rectangular box \cr \code{\link{unitname.box3}} \tab name of unit of length \cr \code{\link{diameter.box3}} \tab diameter of box \cr \code{\link{volume.box3}} \tab volume of box \cr \code{\link{shortside.box3}} \tab shortest side of box \cr \code{\link{eroded.volumes}} \tab volumes of erosions of box } \bold{Multi-dimensional space-time point patterns} An object of class \code{"ppx"} represents a point pattern in multi-dimensional space and/or time. \tabular{ll}{ \code{\link{ppx}} \tab create a multidimensional space-time point pattern \cr \code{\link{coords}} \tab extract coordinates \cr \code{\link{as.hyperframe}} \tab extract coordinates \cr \code{\link{subset.ppx}} \tab extract subset \cr \code{\link{unitname.ppx}} \tab name of unit of length \cr \code{\link{npoints}} \tab count the number of points \cr \code{\link{runifpointx}} \tab generate uniform random points \cr \code{\link{rpoisppx}} \tab generate Poisson random points \cr \code{\link{boxx}} \tab define multidimensional box \cr \code{\link{diameter.boxx}} \tab diameter of box \cr \code{\link{volume.boxx}} \tab volume of box \cr \code{\link{shortside.boxx}} \tab shortest side of box \cr \code{\link{eroded.volumes.boxx}} \tab volumes of erosions of box } \bold{Point patterns on a linear network} An object of class \code{"linnet"} represents a linear network (for example, a road network). \tabular{ll}{ \code{\link{linnet}} \tab create a linear network \cr \code{\link{clickjoin}} \tab interactively join vertices in network \cr \code{\link{iplot.linnet}} \tab interactively plot network \cr \code{\link[spatstat.data]{simplenet}} \tab simple example of network \cr \code{\link{lineardisc}} \tab disc in a linear network \cr \code{\link{delaunayNetwork}} \tab network of Delaunay triangulation \cr \code{\link{dirichletNetwork}} \tab network of Dirichlet edges \cr \code{\link{methods.linnet}} \tab methods for \code{linnet} objects\cr \code{\link{vertices.linnet}} \tab nodes of network \cr \code{\link{pixellate.linnet}} \tab approximate by pixel image } An object of class \code{"lpp"} represents a point pattern on a linear network (for example, road accidents on a road network). \tabular{ll}{ \code{\link{lpp}} \tab create a point pattern on a linear network \cr \code{\link{methods.lpp}} \tab methods for \code{lpp} objects \cr \code{\link{subset.lpp}} \tab method for \code{subset} \cr \code{\link{rpoislpp}} \tab simulate Poisson points on linear network \cr \code{\link{runiflpp}} \tab simulate random points on a linear network \cr \code{\link[spatstat.data]{chicago}} \tab Chicago crime data \cr \code{\link[spatstat.data]{dendrite}} \tab Dendritic spines data \cr \code{\link[spatstat.data]{spiders}} \tab Spider webs on mortar lines of brick wall } \bold{Hyperframes} A hyperframe is like a data frame, except that the entries may be objects of any kind. \tabular{ll}{ \code{\link{hyperframe}} \tab create a hyperframe \cr \code{\link{as.hyperframe}} \tab convert data to hyperframe \cr \code{\link{plot.hyperframe}} \tab plot hyperframe \cr \code{\link{with.hyperframe}} \tab evaluate expression using each row of hyperframe \cr \code{\link{cbind.hyperframe}} \tab combine hyperframes by columns\cr \code{\link{rbind.hyperframe}} \tab combine hyperframes by rows\cr \code{\link{as.data.frame.hyperframe}} \tab convert hyperframe to data frame \cr \code{\link{subset.hyperframe}} \tab method for \code{subset} \cr \code{\link{head.hyperframe}} \tab first few rows of hyperframe \cr \code{\link{tail.hyperframe}} \tab last few rows of hyperframe } \bold{Layered objects} A layered object represents data that should be plotted in successive layers, for example, a background and a foreground. \tabular{ll}{ \code{\link{layered}} \tab create layered object \cr \code{\link{plot.layered}} \tab plot layered object\cr \code{\link{[.layered}} \tab extract subset of layered object } \bold{Colour maps} A colour map is a mechanism for associating colours with data. It can be regarded as a function, mapping data to colours. Using a \code{colourmap} object in a plot command ensures that the mapping from numbers to colours is the same in different plots. \tabular{ll}{ \code{\link{colourmap}} \tab create a colour map \cr \code{\link{plot.colourmap}} \tab plot the colour map only\cr \code{\link{tweak.colourmap}} \tab alter individual colour values \cr \code{\link{interp.colourmap}} \tab make a smooth transition between colours \cr \code{\link{beachcolourmap}} \tab one special colour map } } \section{II. EXPLORATORY DATA ANALYSIS}{ \bold{Inspection of data:} \tabular{ll}{ \code{\link{summary}(X)} \tab print useful summary of point pattern \code{X}\cr \code{X} \tab print basic description of point pattern \code{X} \cr \code{any(duplicated(X))} \tab check for duplicated points in pattern \code{X} \cr \code{\link{istat}(X)} \tab Interactive exploratory analysis \cr \code{\link{View}(X)} \tab spreadsheet-style viewer } \bold{Classical exploratory tools:} \tabular{ll}{ \code{\link{clarkevans}} \tab Clark and Evans aggregation index \cr \code{\link{fryplot}} \tab Fry plot \cr \code{\link{miplot}} \tab Morisita Index plot } \bold{Smoothing:} \tabular{ll}{ \code{\link{density.ppp}} \tab kernel smoothed density/intensity\cr \code{\link{relrisk}} \tab kernel estimate of relative risk\cr \code{\link{Smooth.ppp}} \tab spatial interpolation of marks \cr \code{\link{bw.diggle}} \tab cross-validated bandwidth selection for \code{\link{density.ppp}}\cr \code{\link{bw.ppl}} \tab likelihood cross-validated bandwidth selection for \code{\link{density.ppp}}\cr \code{\link{bw.CvL}} \tab Cronie-Van Lieshout bandwidth selection for density estimation\cr \code{\link{bw.scott}} \tab Scott's rule of thumb for density estimation\cr \code{\link{bw.abram}} \tab Abramson's rule for adaptive bandwidths\cr \code{\link{bw.relrisk}} \tab cross-validated bandwidth selection for \code{\link{relrisk}} \cr \code{\link{bw.smoothppp}} \tab cross-validated bandwidth selection for \code{\link{Smooth.ppp}} \cr \code{\link{bw.frac}} \tab bandwidth selection using window geometry\cr \code{\link{bw.stoyan}} \tab Stoyan's rule of thumb for bandwidth for \code{\link{pcf}} } \bold{Modern exploratory tools:} \tabular{ll}{ \code{\link{clusterset}} \tab Allard-Fraley feature detection \cr \code{\link{nnclean}} \tab Byers-Raftery feature detection \cr \code{\link{sharpen.ppp}} \tab Choi-Hall data sharpening \cr \code{\link{rhohat}} \tab Kernel estimate of covariate effect\cr \code{\link{rho2hat}} \tab Kernel estimate of effect of two covariates\cr \code{\link{spatialcdf}} \tab Spatial cumulative distribution function\cr \code{\link{roc}} \tab Receiver operating characteristic curve } \bold{Summary statistics for a point pattern:} Type \code{demo(sumfun)} for a demonstration of many of the summary statistics. \tabular{ll}{ \code{\link{intensity}} \tab Mean intensity \cr \code{\link{quadratcount}} \tab Quadrat counts \cr \code{\link{intensity.quadratcount}} \tab Mean intensity in quadrats \cr \code{\link{Fest}} \tab empty space function \eqn{F} \cr \code{\link{Gest}} \tab nearest neighbour distribution function \eqn{G} \cr \code{\link{Jest}} \tab \eqn{J}-function \eqn{J = (1-G)/(1-F)} \cr \code{\link{Kest}} \tab Ripley's \eqn{K}-function\cr \code{\link{Lest}} \tab Besag \eqn{L}-function\cr \code{\link{Tstat}} \tab Third order \eqn{T}-function \cr \code{\link{allstats}} \tab all four functions \eqn{F}, \eqn{G}, \eqn{J}, \eqn{K} \cr \code{\link{pcf}} \tab pair correlation function \cr \code{\link{Kinhom}} \tab \eqn{K} for inhomogeneous point patterns \cr \code{\link{Linhom}} \tab \eqn{L} for inhomogeneous point patterns \cr \code{\link{pcfinhom}} \tab pair correlation for inhomogeneous patterns\cr \code{\link{Finhom}} \tab \eqn{F} for inhomogeneous point patterns \cr \code{\link{Ginhom}} \tab \eqn{G} for inhomogeneous point patterns \cr \code{\link{Jinhom}} \tab \eqn{J} for inhomogeneous point patterns \cr \code{\link{localL}} \tab Getis-Franklin neighbourhood density function\cr \code{\link{localK}} \tab neighbourhood K-function\cr \code{\link{localpcf}} \tab local pair correlation function\cr \code{\link{localKinhom}} \tab local \eqn{K} for inhomogeneous point patterns \cr \code{\link{localLinhom}} \tab local \eqn{L} for inhomogeneous point patterns \cr \code{\link{localpcfinhom}} \tab local pair correlation for inhomogeneous patterns\cr \code{\link{Ksector}} \tab Directional \eqn{K}-function\cr \code{\link{Kscaled}} \tab locally scaled \eqn{K}-function \cr \code{\link{Kest.fft}} \tab fast \eqn{K}-function using FFT for large datasets \cr \code{\link{Kmeasure}} \tab reduced second moment measure \cr \code{\link{envelope}} \tab simulation envelopes for a summary function \cr \code{\link{varblock}} \tab variances and confidence intervals\cr \tab for a summary function \cr \code{\link{lohboot}} \tab bootstrap for a summary function } Related facilities: \tabular{ll}{ \code{\link{plot.fv}} \tab plot a summary function\cr \code{\link{eval.fv}} \tab evaluate any expression involving summary functions\cr \code{\link{harmonise.fv}} \tab make functions compatible \cr \code{\link{eval.fasp}} \tab evaluate any expression involving an array of functions\cr \code{\link{with.fv}} \tab evaluate an expression for a summary function\cr \code{\link{Smooth.fv}} \tab apply smoothing to a summary function\cr \code{\link{deriv.fv}} \tab calculate derivative of a summary function\cr \code{\link{pool.fv}} \tab pool several estimates of a summary function\cr \code{\link{nndist}} \tab nearest neighbour distances \cr \code{\link{nnwhich}} \tab find nearest neighbours \cr \code{\link{pairdist}} \tab distances between all pairs of points\cr \code{\link{crossdist}} \tab distances between points in two patterns\cr \code{\link{nncross}} \tab nearest neighbours between two point patterns \cr \code{\link{exactdt}} \tab distance from any location to nearest data point\cr \code{\link{distmap}} \tab distance map image\cr \code{\link{distfun}} \tab distance map function\cr \code{\link{nnmap}} \tab nearest point image \cr \code{\link{nnfun}} \tab nearest point function \cr \code{\link{density.ppp}} \tab kernel smoothed density\cr \code{\link{Smooth.ppp}} \tab spatial interpolation of marks \cr \code{\link{relrisk}} \tab kernel estimate of relative risk\cr \code{\link{sharpen.ppp}} \tab data sharpening \cr \code{\link{rknn}} \tab theoretical distribution of nearest neighbour distance } \bold{Summary statistics for a multitype point pattern:} A multitype point pattern is represented by an object \code{X} of class \code{"ppp"} such that \code{marks(X)} is a factor. \tabular{ll}{ \code{\link{relrisk}} \tab kernel estimation of relative risk \cr \code{\link{scan.test}} \tab spatial scan test of elevated risk \cr \code{\link{Gcross},\link{Gdot},\link{Gmulti}} \tab multitype nearest neighbour distributions \eqn{G_{ij}, G_{i\bullet}}{G[i,j], G[i.]} \cr \code{\link{Kcross},\link{Kdot}, \link{Kmulti}} \tab multitype \eqn{K}-functions \eqn{K_{ij}, K_{i\bullet}}{K[i,j], K[i.]} \cr \code{\link{Lcross},\link{Ldot}} \tab multitype \eqn{L}-functions \eqn{L_{ij}, L_{i\bullet}}{L[i,j], L[i.]} \cr \code{\link{Jcross},\link{Jdot},\link{Jmulti}} \tab multitype \eqn{J}-functions \eqn{J_{ij}, J_{i\bullet}}{J[i,j],J[i.]} \cr \code{\link{pcfcross}} \tab multitype pair correlation function \eqn{g_{ij}}{g[i,j]} \cr \code{\link{pcfdot}} \tab multitype pair correlation function \eqn{g_{i\bullet}}{g[i.]} \cr \code{\link{pcfmulti}} \tab general pair correlation function \cr \code{\link{markconnect}} \tab marked connection function \eqn{p_{ij}}{p[i,j]} \cr \code{\link{alltypes}} \tab estimates of the above for all \eqn{i,j} pairs \cr \code{\link{Iest}} \tab multitype \eqn{I}-function\cr \code{\link{Kcross.inhom},\link{Kdot.inhom}} \tab inhomogeneous counterparts of \code{Kcross}, \code{Kdot} \cr \code{\link{Lcross.inhom},\link{Ldot.inhom}} \tab inhomogeneous counterparts of \code{Lcross}, \code{Ldot} \cr \code{\link{pcfcross.inhom},\link{pcfdot.inhom}} \tab inhomogeneous counterparts of \code{pcfcross}, \code{pcfdot} \cr \code{\link{localKcross},\link{localKdot}} \tab local counterparts of \code{Kcross}, \code{Kdot} \cr \code{\link{localLcross},\link{localLdot}} \tab local counterparts of \code{Lcross}, \code{Ldot} \cr \code{\link{localKcross.inhom},\link{localLcross.inhom}} \tab local counterparts of \code{Kcross.inhom}, \code{Lcross.inhom} } \bold{Summary statistics for a marked point pattern:} A marked point pattern is represented by an object \code{X} of class \code{"ppp"} with a component \code{X$marks}. The entries in the vector \code{X$marks} may be numeric, complex, string or any other atomic type. For numeric marks, there are the following functions: \tabular{ll}{ \code{\link{markmean}} \tab smoothed local average of marks \cr \code{\link{markvar}} \tab smoothed local variance of marks \cr \code{\link{markcorr}} \tab mark correlation function \cr \code{\link{markcrosscorr}} \tab mark cross-correlation function \cr \code{\link{markvario}} \tab mark variogram \cr \code{\link{markmarkscatter}} \tab mark-mark scatterplot \cr \code{\link{Kmark}} \tab mark-weighted \eqn{K} function \cr \code{\link{Emark}} \tab mark independence diagnostic \eqn{E(r)} \cr \code{\link{Vmark}} \tab mark independence diagnostic \eqn{V(r)} \cr \code{\link{nnmean}} \tab nearest neighbour mean index \cr \code{\link{nnvario}} \tab nearest neighbour mark variance index } For marks of any type, there are the following: \tabular{ll}{ \code{\link{Gmulti}} \tab multitype nearest neighbour distribution \cr \code{\link{Kmulti}} \tab multitype \eqn{K}-function \cr \code{\link{Jmulti}} \tab multitype \eqn{J}-function } Alternatively use \code{\link{cut.ppp}} to convert a marked point pattern to a multitype point pattern. \bold{Programming tools:} \tabular{ll}{ \code{\link{applynbd}} \tab apply function to every neighbourhood in a point pattern \cr \code{\link{markstat}} \tab apply function to the marks of neighbours in a point pattern \cr \code{\link{marktable}} \tab tabulate the marks of neighbours in a point pattern \cr \code{\link{pppdist}} \tab find the optimal match between two point patterns } \bold{Summary statistics for a point pattern on a linear network:} These are for point patterns on a linear network (class \code{lpp}). For unmarked patterns: \tabular{ll}{ \code{\link{linearK}} \tab \eqn{K} function on linear network \cr \code{\link{linearKinhom}} \tab inhomogeneous \eqn{K} function on linear network \cr \code{\link{linearpcf}} \tab pair correlation function on linear network \cr \code{\link{linearpcfinhom}} \tab inhomogeneous pair correlation on linear network } For multitype patterns: \tabular{ll}{ \code{\link{linearKcross}} \tab \eqn{K} function between two types of points \cr \code{\link{linearKdot}} \tab \eqn{K} function from one type to any type \cr \code{\link{linearKcross.inhom}} \tab Inhomogeneous version of \code{\link{linearKcross}} \cr \code{\link{linearKdot.inhom}} \tab Inhomogeneous version of \code{\link{linearKdot}} \cr \code{\link{linearmarkconnect}} \tab Mark connection function on linear network \cr \code{\link{linearmarkequal}} \tab Mark equality function on linear network \cr \code{\link{linearpcfcross}} \tab Pair correlation between two types of points \cr \code{\link{linearpcfdot}} \tab Pair correlation from one type to any type \cr \code{\link{linearpcfcross.inhom}} \tab Inhomogeneous version of \code{\link{linearpcfcross}} \cr \code{\link{linearpcfdot.inhom}} \tab Inhomogeneous version of \code{\link{linearpcfdot}} } Related facilities: \tabular{ll}{ \code{\link{pairdist.lpp}} \tab distances between pairs \cr \code{\link{crossdist.lpp}} \tab distances between pairs \cr \code{\link{nndist.lpp}} \tab nearest neighbour distances \cr \code{\link{nncross.lpp}} \tab nearest neighbour distances \cr \code{\link{nnwhich.lpp}} \tab find nearest neighbours \cr \code{\link{nnfun.lpp}} \tab find nearest data point \cr \code{\link{density.lpp}} \tab kernel smoothing estimator of intensity \cr \code{\link{distfun.lpp}} \tab distance transform \cr \code{\link{envelope.lpp}} \tab simulation envelopes \cr \code{\link{rpoislpp}} \tab simulate Poisson points on linear network \cr \code{\link{runiflpp}} \tab simulate random points on a linear network } It is also possible to fit point process models to \code{lpp} objects. See Section IV. \bold{Summary statistics for a three-dimensional point pattern:} These are for 3-dimensional point pattern objects (class \code{pp3}). \tabular{ll}{ \code{\link{F3est}} \tab empty space function \eqn{F} \cr \code{\link{G3est}} \tab nearest neighbour function \eqn{G} \cr \code{\link{K3est}} \tab \eqn{K}-function \cr \code{\link{pcf3est}} \tab pair correlation function } Related facilities: \tabular{ll}{ \code{\link{envelope.pp3}} \tab simulation envelopes \cr \code{\link{pairdist.pp3}} \tab distances between all pairs of points \cr \code{\link{crossdist.pp3}} \tab distances between points in two patterns \cr \code{\link{nndist.pp3}} \tab nearest neighbour distances \cr \code{\link{nnwhich.pp3}} \tab find nearest neighbours \cr \code{\link{nncross.pp3}} \tab find nearest neighbours in another pattern } \bold{Computations for multi-dimensional point pattern:} These are for multi-dimensional space-time point pattern objects (class \code{ppx}). \tabular{ll}{ \code{\link{pairdist.ppx}} \tab distances between all pairs of points \cr \code{\link{crossdist.ppx}} \tab distances between points in two patterns \cr \code{\link{nndist.ppx}} \tab nearest neighbour distances \cr \code{\link{nnwhich.ppx}} \tab find nearest neighbours } \bold{Summary statistics for random sets:} These work for point patterns (class \code{ppp}), line segment patterns (class \code{psp}) or windows (class \code{owin}). \tabular{ll}{ \code{\link{Hest}} \tab spherical contact distribution \eqn{H} \cr \code{\link{Gfox}} \tab Foxall \eqn{G}-function \cr \code{\link{Jfox}} \tab Foxall \eqn{J}-function } } \section{III. MODEL FITTING (COX AND CLUSTER MODELS)}{ Cluster process models (with homogeneous or inhomogeneous intensity) and Cox processes can be fitted by the function \code{\link{kppm}}. Its result is an object of class \code{"kppm"}. The fitted model can be printed, plotted, predicted, simulated and updated. \tabular{ll}{ \code{\link{kppm}} \tab Fit model\cr \code{\link{plot.kppm}} \tab Plot the fitted model\cr \code{\link{summary.kppm}} \tab Summarise the fitted model\cr \code{\link{fitted.kppm}} \tab Compute fitted intensity \cr \code{\link{predict.kppm}} \tab Compute fitted intensity \cr \code{\link{update.kppm}} \tab Update the model \cr \code{\link{improve.kppm}} \tab Refine the estimate of trend \cr \code{\link{simulate.kppm}} \tab Generate simulated realisations \cr \code{\link{vcov.kppm}} \tab Variance-covariance matrix of coefficients \cr \code{\link[spatstat:methods.kppm]{coef.kppm}} \tab Extract trend coefficients \cr \code{\link[spatstat:methods.kppm]{formula.kppm}} \tab Extract trend formula \cr \code{\link{parameters}} \tab Extract all model parameters \cr \code{\link{clusterfield}} \tab Compute offspring density \cr \code{\link{clusterradius}} \tab Radius of support of offspring density \cr \code{\link{Kmodel.kppm}} \tab \eqn{K} function of fitted model \cr \code{\link{pcfmodel.kppm}} \tab Pair correlation of fitted model } For model selection, you can also use the generic functions \code{\link{step}}, \code{\link{drop1}} and \code{\link{AIC}} on fitted point process models. For variable selection, see \code{\link{sdr}}. The theoretical models can also be simulated, for any choice of parameter values, using \code{\link{rThomas}}, \code{\link{rMatClust}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}}, and \code{\link{rLGCP}}. Lower-level fitting functions include: \tabular{ll}{ \code{\link{lgcp.estK}} \tab fit a log-Gaussian Cox process model\cr \code{\link{lgcp.estpcf}} \tab fit a log-Gaussian Cox process model\cr \code{\link{thomas.estK}} \tab fit the Thomas process model \cr \code{\link{thomas.estpcf}} \tab fit the Thomas process model \cr \code{\link{matclust.estK}} \tab fit the \Matern Cluster process model \cr \code{\link{matclust.estpcf}} \tab fit the \Matern Cluster process model \cr \code{\link{cauchy.estK}} \tab fit a Neyman-Scott Cauchy cluster process \cr \code{\link{cauchy.estpcf}} \tab fit a Neyman-Scott Cauchy cluster process\cr \code{\link{vargamma.estK}} \tab fit a Neyman-Scott Variance Gamma process\cr \code{\link{vargamma.estpcf}} \tab fit a Neyman-Scott Variance Gamma process\cr \code{\link{mincontrast}} \tab low-level algorithm for fitting models \cr \tab by the method of minimum contrast } } \section{IV. MODEL FITTING (POISSON AND GIBBS MODELS)}{ \bold{Types of models} Poisson point processes are the simplest models for point patterns. A Poisson model assumes that the points are stochastically independent. It may allow the points to have a non-uniform spatial density. The special case of a Poisson process with a uniform spatial density is often called Complete Spatial Randomness. Poisson point processes are included in the more general class of Gibbs point process models. In a Gibbs model, there is \emph{interaction} or dependence between points. Many different types of interaction can be specified. For a detailed explanation of how to fit Poisson or Gibbs point process models to point pattern data using \pkg{spatstat}, see Baddeley and Turner (2005b) or Baddeley (2008). \bold{To fit a Poisson or Gibbs point process model:} Model fitting in \pkg{spatstat} is performed mainly by the function \code{\link{ppm}}. Its result is an object of class \code{"ppm"}. Here are some examples, where \code{X} is a point pattern (class \code{"ppp"}): \tabular{ll}{ \emph{command} \tab \emph{model} \cr \code{ppm(X)} \tab Complete Spatial Randomness \cr \code{ppm(X ~ 1)} \tab Complete Spatial Randomness \cr \code{ppm(X ~ x)} \tab Poisson process with \cr \tab intensity loglinear in \eqn{x} coordinate \cr \code{ppm(X ~ 1, Strauss(0.1))} \tab Stationary Strauss process \cr \code{ppm(X ~ x, Strauss(0.1))} \tab Strauss process with \cr \tab conditional intensity loglinear in \eqn{x} } It is also possible to fit models that depend on other covariates. \bold{Manipulating the fitted model:} \tabular{ll}{ \code{\link{plot.ppm}} \tab Plot the fitted model\cr \code{\link{predict.ppm}} \tab Compute the spatial trend and conditional intensity\cr \tab of the fitted point process model \cr \code{\link{coef.ppm}} \tab Extract the fitted model coefficients\cr \code{\link{parameters}} \tab Extract all model parameters\cr \code{\link{formula.ppm}} \tab Extract the trend formula\cr \code{\link{intensity.ppm}} \tab Compute fitted intensity \cr \code{\link{Kmodel.ppm}} \tab \eqn{K} function of fitted model \cr \code{\link{pcfmodel.ppm}} \tab pair correlation of fitted model \cr \code{\link{fitted.ppm}} \tab Compute fitted conditional intensity at quadrature points \cr \code{\link{residuals.ppm}} \tab Compute point process residuals at quadrature points \cr \code{\link{update.ppm}} \tab Update the fit \cr \code{\link{vcov.ppm}} \tab Variance-covariance matrix of estimates\cr \code{\link{rmh.ppm}} \tab Simulate from fitted model \cr \code{\link{simulate.ppm}} \tab Simulate from fitted model \cr \code{\link{print.ppm}} \tab Print basic information about a fitted model\cr \code{\link{summary.ppm}} \tab Summarise a fitted model\cr \code{\link{effectfun}} \tab Compute the fitted effect of one covariate\cr \code{\link{logLik.ppm}} \tab log-likelihood or log-pseudolikelihood\cr \code{\link{anova.ppm}} \tab Analysis of deviance \cr \code{\link{model.frame.ppm}} \tab Extract data frame used to fit model \cr \code{\link{model.images}} \tab Extract spatial data used to fit model \cr \code{\link{model.depends}} \tab Identify variables in the model \cr \code{\link{as.interact}} \tab Interpoint interaction component of model \cr \code{\link{fitin}} \tab Extract fitted interpoint interaction \cr \code{\link{is.hybrid}} \tab Determine whether the model is a hybrid \cr \code{\link{valid.ppm}} \tab Check the model is a valid point process \cr \code{\link{project.ppm}} \tab Ensure the model is a valid point process } For model selection, you can also use the generic functions \code{\link{step}}, \code{\link{drop1}} and \code{\link{AIC}} on fitted point process models. For variable selection, see \code{\link{sdr}}. See \code{\link{spatstat.options}} to control plotting of fitted model. \bold{To specify a point process model:} The first order ``trend'' of the model is determined by an \R language formula. The formula specifies the form of the \emph{logarithm} of the trend. \tabular{ll}{ \code{X ~ 1} \tab No trend (stationary) \cr \code{X ~ x} \tab Loglinear trend \eqn{\lambda(x,y) = \exp(\alpha + \beta x)}{lambda(x,y) = exp(alpha + beta * x)} \cr \tab where \eqn{x,y} are Cartesian coordinates \cr \code{X ~ polynom(x,y,3)} \tab Log-cubic polynomial trend \cr \code{X ~ harmonic(x,y,2)} \tab Log-harmonic polynomial trend \cr \code{X ~ Z} \tab Loglinear function of covariate \code{Z} \cr \tab \eqn{\lambda(x,y) = \exp(\alpha + \beta Z(x,y))}{lambda(x,y) = exp(alpha + beta * Z(x,y))} } The higher order (``interaction'') components are described by an object of class \code{"interact"}. Such objects are created by: \tabular{ll}{ \code{\link{Poisson}()} \tab the Poisson point process\cr \code{\link{AreaInter}()} \tab Area-interaction process\cr \code{\link{BadGey}()} \tab multiscale Geyer process\cr \code{\link{Concom}()} \tab connected component interaction\cr \code{\link{DiggleGratton}() } \tab Diggle-Gratton potential \cr \code{\link{DiggleGatesStibbard}() } \tab Diggle-Gates-Stibbard potential \cr \code{\link{Fiksel}()} \tab Fiksel pairwise interaction process\cr \code{\link{Geyer}()} \tab Geyer's saturation process\cr \code{\link{Hardcore}()} \tab Hard core process\cr \code{\link{HierHard}()} \tab Hierarchical multiype hard core process\cr \code{\link{HierStrauss}()} \tab Hierarchical multiype Strauss process\cr \code{\link{HierStraussHard}()} \tab Hierarchical multiype Strauss-hard core process\cr \code{\link{Hybrid}()} \tab Hybrid of several interactions\cr \code{\link{LennardJones}() } \tab Lennard-Jones potential \cr \code{\link{MultiHard}()} \tab multitype hard core process \cr \code{\link{MultiStrauss}()} \tab multitype Strauss process \cr \code{\link{MultiStraussHard}()} \tab multitype Strauss/hard core process \cr \code{\link{OrdThresh}()} \tab Ord process, threshold potential\cr \code{\link{Ord}()} \tab Ord model, user-supplied potential \cr \code{\link{PairPiece}()} \tab pairwise interaction, piecewise constant \cr \code{\link{Pairwise}()} \tab pairwise interaction, user-supplied potential\cr \code{\link{Penttinen}()} \tab Penttinen pairwise interaction\cr \code{\link{SatPiece}()} \tab Saturated pair model, piecewise constant potential\cr \code{\link{Saturated}()} \tab Saturated pair model, user-supplied potential\cr \code{\link{Softcore}()} \tab pairwise interaction, soft core potential\cr \code{\link{Strauss}()} \tab Strauss process \cr \code{\link{StraussHard}()} \tab Strauss/hard core point process \cr \code{\link{Triplets}()} \tab Geyer triplets process } Note that it is also possible to combine several such interactions using \code{\link{Hybrid}}. \bold{Finer control over model fitting:} A quadrature scheme is represented by an object of class \code{"quad"}. To create a quadrature scheme, typically use \code{\link{quadscheme}}. \tabular{ll}{ \code{\link{quadscheme}} \tab default quadrature scheme \cr \tab using rectangular cells or Dirichlet cells\cr \code{\link{pixelquad}} \tab quadrature scheme based on image pixels \cr \code{\link{quad}} \tab create an object of class \code{"quad"} } To inspect a quadrature scheme: \tabular{ll}{ \code{plot(Q)} \tab plot quadrature scheme \code{Q}\cr \code{print(Q)} \tab print basic information about quadrature scheme \code{Q}\cr \code{\link{summary}(Q)} \tab summary of quadrature scheme \code{Q} } A quadrature scheme consists of data points, dummy points, and weights. To generate dummy points: \tabular{ll}{ \code{\link{default.dummy}} \tab default pattern of dummy points \cr \code{\link{gridcentres}} \tab dummy points in a rectangular grid \cr \code{\link{rstrat}} \tab stratified random dummy pattern \cr \code{\link{spokes}} \tab radial pattern of dummy points \cr \code{\link{corners}} \tab dummy points at corners of the window } To compute weights: \tabular{ll}{ \code{\link{gridweights}} \tab quadrature weights by the grid-counting rule \cr \code{\link{dirichletWeights}} \tab quadrature weights are Dirichlet tile areas } \bold{Simulation and goodness-of-fit for fitted models:} \tabular{ll}{ \code{\link{rmh.ppm}} \tab simulate realisations of a fitted model \cr \code{\link{simulate.ppm}} \tab simulate realisations of a fitted model \cr \code{\link{envelope}} \tab compute simulation envelopes for a fitted model } \bold{Point process models on a linear network:} An object of class \code{"lpp"} represents a pattern of points on a linear network. Point process models can also be fitted to these objects. Currently only Poisson models can be fitted. \tabular{ll}{ \code{\link{lppm}} \tab point process model on linear network \cr \code{\link{anova.lppm}} \tab analysis of deviance for \cr \tab point process model on linear network \cr \code{\link{envelope.lppm}} \tab simulation envelopes for \cr \tab point process model on linear network \cr \code{\link{fitted.lppm}} \tab fitted intensity values \cr \code{\link{predict.lppm}} \tab model prediction on linear network \cr \code{\link{linim}} \tab pixel image on linear network \cr \code{\link{plot.linim}} \tab plot a pixel image on linear network \cr \code{\link{eval.linim}} \tab evaluate expression involving images \cr \code{\link{linfun}} \tab function defined on linear network \cr \code{\link{methods.linfun}} \tab conversion facilities } } \section{V. MODEL FITTING (DETERMINANTAL POINT PROCESS MODELS)}{ Code for fitting \emph{determinantal point process models} has recently been added to \pkg{spatstat}. For information, see the help file for \code{\link{dppm}}. } \section{VI. MODEL FITTING (SPATIAL LOGISTIC REGRESSION)}{ \bold{Logistic regression} Pixel-based spatial logistic regression is an alternative technique for analysing spatial point patterns that is widely used in Geographical Information Systems. It is approximately equivalent to fitting a Poisson point process model. In pixel-based logistic regression, the spatial domain is divided into small pixels, the presence or absence of a data point in each pixel is recorded, and logistic regression is used to model the presence/absence indicators as a function of any covariates. Facilities for performing spatial logistic regression are provided in \pkg{spatstat} for comparison purposes. \bold{Fitting a spatial logistic regression} Spatial logistic regression is performed by the function \code{\link{slrm}}. Its result is an object of class \code{"slrm"}. There are many methods for this class, including methods for \code{print}, \code{fitted}, \code{predict}, \code{simulate}, \code{anova}, \code{coef}, \code{logLik}, \code{terms}, \code{update}, \code{formula} and \code{vcov}. For example, if \code{X} is a point pattern (class \code{"ppp"}): \tabular{ll}{ \emph{command} \tab \emph{model} \cr \code{slrm(X ~ 1)} \tab Complete Spatial Randomness \cr \code{slrm(X ~ x)} \tab Poisson process with \cr \tab intensity loglinear in \eqn{x} coordinate \cr \code{slrm(X ~ Z)} \tab Poisson process with \cr \tab intensity loglinear in covariate \code{Z} } \bold{Manipulating a fitted spatial logistic regression} \tabular{ll}{ \code{\link{anova.slrm}} \tab Analysis of deviance \cr \code{\link{coef.slrm}} \tab Extract fitted coefficients \cr \code{\link{vcov.slrm}} \tab Variance-covariance matrix of fitted coefficients \cr \code{\link{fitted.slrm}} \tab Compute fitted probabilities or intensity \cr \code{\link{logLik.slrm}} \tab Evaluate loglikelihood of fitted model \cr \code{\link{plot.slrm}} \tab Plot fitted probabilities or intensity \cr \code{\link{predict.slrm}} \tab Compute predicted probabilities or intensity with new data \cr \code{\link{simulate.slrm}} \tab Simulate model } There are many other undocumented methods for this class, including methods for \code{print}, \code{update}, \code{formula} and \code{terms}. Stepwise model selection is possible using \code{step} or \code{stepAIC}. For variable selection, see \code{\link{sdr}}. } \section{VII. SIMULATION}{ There are many ways to generate a random point pattern, line segment pattern, pixel image or tessellation in \pkg{spatstat}. \bold{Random point patterns:} \tabular{ll}{ \code{\link{runifpoint}} \tab generate \eqn{n} independent uniform random points \cr \code{\link{rpoint}} \tab generate \eqn{n} independent random points \cr \code{\link{rmpoint}} \tab generate \eqn{n} independent multitype random points \cr \code{\link{rpoispp}} \tab simulate the (in)homogeneous Poisson point process \cr \code{\link{rmpoispp}} \tab simulate the (in)homogeneous multitype Poisson point process \cr \code{\link{runifdisc}} \tab generate \eqn{n} independent uniform random points in disc\cr \code{\link{rstrat}} \tab stratified random sample of points \cr \code{\link{rsyst}} \tab systematic random sample (grid) of points \cr \code{\link{rMaternI}} \tab simulate the \Matern Model I inhibition process\cr \code{\link{rMaternII}} \tab simulate the \Matern Model II inhibition process\cr \code{\link{rSSI}} \tab simulate Simple Sequential Inhibition process\cr \code{\link{rHardcore}} \tab simulate hard core process (perfect simulation)\cr \code{\link{rStrauss}} \tab simulate Strauss process (perfect simulation)\cr \code{\link{rStraussHard}} \tab simulate Strauss-hard core process (perfect simulation)\cr \code{\link{rDiggleGratton}} \tab simulate Diggle-Gratton process (perfect simulation)\cr \code{\link{rDGS}} \tab simulate Diggle-Gates-Stibbard process (perfect simulation)\cr \code{\link{rPenttinen}} \tab simulate Penttinen process (perfect simulation)\cr \code{\link{rNeymanScott}} \tab simulate a general Neyman-Scott process\cr \code{\link{rMatClust}} \tab simulate the \Matern Cluster process\cr \code{\link{rThomas}} \tab simulate the Thomas process \cr \code{\link{rLGCP}} \tab simulate the log-Gaussian Cox process \cr \code{\link{rGaussPoisson}} \tab simulate the Gauss-Poisson cluster process\cr \code{\link{rCauchy}} \tab simulate Neyman-Scott process with Cauchy clusters \cr \code{\link{rVarGamma}} \tab simulate Neyman-Scott process with Variance Gamma clusters \cr \code{\link{rcell}} \tab simulate the Baddeley-Silverman cell process \cr \code{\link{runifpointOnLines}} \tab generate \eqn{n} random points along specified line segments \cr \code{\link{rpoisppOnLines}} \tab generate Poisson random points along specified line segments } \bold{Resampling a point pattern:} \tabular{ll}{ \code{\link{quadratresample}} \tab block resampling \cr \code{\link{rjitter}} \tab apply random displacements to points in a pattern\cr \code{\link{rshift}} \tab random shifting of (subsets of) points\cr \code{\link{rthin}} \tab random thinning } See also \code{\link{varblock}} for estimating the variance of a summary statistic by block resampling, and \code{\link{lohboot}} for another bootstrap technique. \bold{Fitted point process models:} If you have fitted a point process model to a point pattern dataset, the fitted model can be simulated. Cluster process models are fitted by the function \code{\link{kppm}} yielding an object of class \code{"kppm"}. To generate one or more simulated realisations of this fitted model, use \code{\link{simulate.kppm}}. Gibbs point process models are fitted by the function \code{\link{ppm}} yielding an object of class \code{"ppm"}. To generate a simulated realisation of this fitted model, use \code{\link{rmh}}. To generate one or more simulated realisations of the fitted model, use \code{\link{simulate.ppm}}. \bold{Other random patterns:} \tabular{ll}{ \code{\link{rlinegrid}} \tab generate a random array of parallel lines through a window \cr \code{\link{rpoisline}} \tab simulate the Poisson line process within a window \cr \code{\link{rpoislinetess}} \tab generate random tessellation using Poisson line process \cr \code{\link{rMosaicSet}} \tab generate random set by selecting some tiles of a tessellation \cr \code{\link{rMosaicField}} \tab generate random pixel image by assigning random values in each tile of a tessellation } \bold{Simulation-based inference} \tabular{ll}{ \code{\link{envelope}} \tab critical envelope for Monte Carlo test of goodness-of-fit \cr \code{\link{bits.envelope}} \tab critical envelope for balanced two-stage Monte Carlo test \cr \code{\link{qqplot.ppm}} \tab diagnostic plot for interpoint interaction \cr \code{\link{scan.test}} \tab spatial scan statistic/test \cr \code{\link{studpermu.test}} \tab studentised permutation test\cr \code{\link{segregation.test}} \tab test of segregation of types } } \section{VIII. TESTS AND DIAGNOSTICS}{ \bold{Hypothesis tests:} \tabular{ll}{ \code{\link{quadrat.test}} \tab \eqn{\chi^2}{chi^2} goodness-of-fit test on quadrat counts \cr \code{\link{clarkevans.test}} \tab Clark and Evans test \cr \code{\link{cdf.test}} \tab Spatial distribution goodness-of-fit test\cr \code{\link{berman.test}} \tab Berman's goodness-of-fit tests\cr \code{\link{envelope}} \tab critical envelope for Monte Carlo test of goodness-of-fit \cr \code{\link{scan.test}} \tab spatial scan statistic/test \cr \code{\link{dclf.test}} \tab Diggle-Cressie-Loosmore-Ford test \cr \code{\link{mad.test}} \tab Mean Absolute Deviation test \cr \code{\link{anova.ppm}} \tab Analysis of Deviance for point process models } More recently-developed tests: \tabular{ll}{ \code{\link{dg.test}} \tab Dao-Genton test \cr \code{\link{bits.test}} \tab Balanced independent two-stage test \cr \code{\link{dclf.progress}} \tab Progress plot for DCLF test \cr \code{\link{mad.progress}} \tab Progress plot for MAD test \cr } \bold{Sensitivity diagnostics:} Classical measures of model sensitivity such as leverage and influence have been adapted to point process models. \tabular{ll}{ \code{\link{leverage.ppm}} \tab Leverage for point process model\cr \code{\link{influence.ppm}} \tab Influence for point process model\cr \code{\link{dfbetas.ppm}} \tab Parameter influence\cr \code{\link{dffit.ppm}} \tab Effect change diagnostic } \bold{Diagnostics for covariate effect:} Classical diagnostics for covariate effects have been adapted to point process models. \tabular{ll}{ \code{\link{parres}} \tab Partial residual plot\cr \code{\link{addvar}} \tab Added variable plot \cr \code{\link{rhohat}} \tab Kernel estimate of covariate effect\cr \code{\link{rho2hat}} \tab Kernel estimate of covariate effect (bivariate) } \bold{Residual diagnostics:} Residuals for a fitted point process model, and diagnostic plots based on the residuals, were introduced in Baddeley et al (2005) and Baddeley, Rubak and \Moller (2011). Type \code{demo(diagnose)} for a demonstration of the diagnostics features. \tabular{ll}{ \code{\link{diagnose.ppm}} \tab diagnostic plots for spatial trend\cr \code{\link{qqplot.ppm}} \tab diagnostic Q-Q plot for interpoint interaction\cr \code{\link[spatstat.data]{residualspaper}} \tab examples from Baddeley et al (2005) \cr \code{\link{Kcom}} \tab model compensator of \eqn{K} function \cr \code{\link{Gcom}} \tab model compensator of \eqn{G} function \cr \code{\link{Kres}} \tab score residual of \eqn{K} function \cr \code{\link{Gres}} \tab score residual of \eqn{G} function \cr \code{\link{psst}} \tab pseudoscore residual of summary function \cr \code{\link{psstA}} \tab pseudoscore residual of empty space function \cr \code{\link{psstG}} \tab pseudoscore residual of \eqn{G} function \cr \code{\link{compareFit}} \tab compare compensators of several fitted models } \bold{Resampling and randomisation procedures} You can build your own tests based on randomisation and resampling using the following capabilities: \tabular{ll}{ \code{\link{quadratresample}} \tab block resampling \cr \code{\link{rjitter}} \tab apply random displacements to points in a pattern\cr \code{\link{rshift}} \tab random shifting of (subsets of) points\cr \code{\link{rthin}} \tab random thinning } } \section{IX. DOCUMENTATION}{ The online manual entries are quite detailed and should be consulted first for information about a particular function. The book Baddeley, Rubak and Turner (2015) is a complete course on analysing spatial point patterns, with full details about \pkg{spatstat}. Older material (which is now out-of-date but is freely available) includes Baddeley and Turner (2005a), a brief overview of the package in its early development; Baddeley and Turner (2005b), a more detailed explanation of how to fit point process models to data; and Baddeley (2010), a complete set of notes from a 2-day workshop on the use of \pkg{spatstat}. Type \code{citation("spatstat")} to get a list of these references. } \references{ Baddeley, A. (2010) \emph{Analysing spatial point patterns in R}. Workshop notes, Version 4.1. Online technical publication, CSIRO. \url{https://research.csiro.au/software/wp-content/uploads/sites/6/2015/02/Rspatialcourse_CMIS_PDF-Standard.pdf} Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. Baddeley, A. and Turner, R. (2005a) Spatstat: an R package for analyzing spatial point patterns. \emph{Journal of Statistical Software} \bold{12}:6, 1--42. URL: \code{www.jstatsoft.org}, ISSN: 1548-7660. Baddeley, A. and Turner, R. (2005b) Modelling spatial point patterns in R. In: A. Baddeley, P. Gregori, J. Mateu, R. Stoica, and D. Stoyan, editors, \emph{Case Studies in Spatial Point Pattern Modelling}, Lecture Notes in Statistics number 185. Pages 23--74. Springer-Verlag, New York, 2006. ISBN: 0-387-28311-0. Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013) Hybrids of Gibbs point process models and their implementation. \emph{Journal of Statistical Software} \bold{55}:11, 1--43. \url{http://www.jstatsoft.org/v55/i11/} Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. Diggle, P.J. (2014) \emph{Statistical Analysis of Spatial and Spatio-Temporal Point Patterns}, Third edition. {Chapman and Hall/CRC}. Gelfand, A.E., Diggle, P.J., Fuentes, M. and Guttorp, P., editors (2010) \emph{Handbook of Spatial Statistics}. CRC Press. Huang, F. and Ogata, Y. (1999) Improvements of the maximum pseudo-likelihood estimators in various spatial statistical models. \emph{Journal of Computational and Graphical Statistics} \bold{8}, 510--530. Illian, J., Penttinen, A., Stoyan, H. and Stoyan, D. (2008) \emph{Statistical Analysis and Modelling of Spatial Point Patterns.} Wiley. Waagepetersen, R. An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63} (2007) 252--258. } \section{Licence}{ This library and its documentation are usable under the terms of the "GNU General Public License", a copy of which is distributed with the package. } \author{ \spatstatAuthors. } \section{Acknowledgements}{ Kasper Klitgaard Berthelsen, Ottmar Cronie, Tilman Davies, Yongtao Guan, Ute Hahn, Abdollah Jalilian, Marie-Colette van Lieshout, Greg McSwiggan, Tuomas Rajala, Suman Rakshit, Dominic Schuhmacher, Rasmus Waagepetersen and Hangsheng Wang made substantial contributions of code. Additional contributions and suggestions from Monsuru Adepeju, Corey Anderson, Ang Qi Wei, Ryan Arellano, Jens \ifelse{latex}{\out{{\AA}str{\" o}m}}{Astrom}, Marcel Austenfeld, Sandro Azaele, Malissa Baddeley, Guy Bayegnak, Colin Beale, Melanie Bell, Thomas Bendtsen, Ricardo Bernhardt, Andrew Bevan, Brad Biggerstaff, Anders Bilgrau, Leanne Bischof, Christophe Biscio, Roger Bivand, Jose M. Blanco Moreno, Florent Bonneu, Jordan Brown, Julian Burgos, Simon Byers, Ya-Mei Chang, Jianbao Chen, Igor Chernayavsky, Y.C. Chin, Bjarke Christensen, \ifelse{latex}{\out{Luc\'{\i{}}a}}{Lucia} Cobo Sanchez, Jean-Francois Coeurjolly, Kim Colyvas, Hadrien Commenges, Rochelle Constantine, Robin Corria Ainslie, Richard Cotton, Marcelino de la Cruz, Peter Dalgaard, Mario D'Antuono, Sourav Das, Peter Diggle, Patrick Donnelly, Ian Dryden, Stephen Eglen, Ahmed El-Gabbas, Belarmain Fandohan, Olivier Flores, David Ford, Peter Forbes, Shane Frank, Janet Franklin, Funwi-Gabga Neba, Oscar Garcia, Agnes Gault, Jonas Geldmann, Marc Genton, Shaaban Ghalandarayeshi, Julian Gilbey, Jason Goldstick, Pavel Grabarnik, C. Graf, Ute Hahn, Andrew Hardegen, Martin \ifelse{latex}{\out{B{\o}gsted}}{Bogsted} Hansen, Martin Hazelton, Juha Heikkinen, Mandy Hering, Markus Herrmann, Maximilian Hesselbarth, Paul Hewson, Hamidreza Heydarian, Kassel Hingee, Kurt Hornik, Philipp Hunziker, Jack Hywood, Ross Ihaka, \ifelse{latex}{\out{\u{C}enk I\c{c}\"{o}s}}{Cenk Icos}, Aruna Jammalamadaka, Robert John-Chandran, Devin Johnson, Mahdieh Khanmohammadi, Bob Klaver, Lily Kozmian-Ledward, Peter Kovesi, Mike Kuhn, Jeff Laake, Robert Lamb, \ifelse{latex}{\out{Fr\'{e}d\'{e}ric}}{Frederic} Lavancier, Tom Lawrence, Tomas Lazauskas, Jonathan Lee, George Leser, Angela Li, Li Haitao, George Limitsios, Andrew Lister, Nestor Luambua, Ben Madin, Martin Maechler, Kiran Marchikanti, Jeff Marcus, Robert Mark, Peter McCullagh, Monia Mahling, Jorge Mateu Mahiques, Ulf Mehlig, Frederico Mestre, Sebastian Wastl Meyer, Mi Xiangcheng, Lore De Middeleer, Robin Milne, Enrique Miranda, Jesper \Moller, Annie \ifelse{latex}{\out{Molli{\'e}}}{Mollie}, Ines Moncada, Mehdi Moradi, Virginia Morera Pujol, Erika Mudrak, Gopalan Nair, Nader Najari, Nicoletta Nava, Linda Stougaard Nielsen, Felipe Nunes, Jens Randel Nyengaard, Jens \ifelse{latex}{\out{Oehlschl\"{a}gel}}{Oehlschlaegel}, Thierry Onkelinx, Sean O'Riordan, Evgeni Parilov, Jeff Picka, Nicolas Picard, Tim Pollington, Mike Porter, Sergiy Protsiv, Adrian Raftery, Suman Rakshit, Ben Ramage, Pablo Ramon, Xavier Raynaud, Nicholas Read, Matt Reiter, Ian Renner, Tom Richardson, Brian Ripley, Ted Rosenbaum, Barry Rowlingson, Jason Rudokas, John Rudge, Christopher Ryan, Farzaneh Safavimanesh, Aila \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, Cody Schank, Katja Schladitz, Sebastian Schutte, Bryan Scott, Olivia Semboli, \ifelse{latex}{\out{Fran\c{c}ois S\'{e}m\'{e}curbe}}{Francois Semecurbe}, Vadim Shcherbakov, Shen Guochun, Shi Peijian, Harold-Jeffrey Ship, Tammy L Silva, Ida-Maria Sintorn, Yong Song, Malte Spiess, Mark Stevenson, Kaspar Stucki, Jan Sulavik, Michael Sumner, P. Surovy, Ben Taylor, Thordis Linda Thorarinsdottir, Leigh Torres, Berwin Turlach, Torben Tvedebrink, Kevin Ummer, Medha Uppala, Andrew van Burgel, Tobias Verbeke, Mikko Vihtakari, Alexendre Villers, Fabrice Vinatier, Sasha Voss, Sven Wagner, Hao Wang, H. Wendrock, Jan Wild, Carl G. Witthoft, Selene Wong, Maxime Woringer, Luke Yates, Mike Zamboni and Achim Zeileis. } \keyword{spatial} \keyword{package} spatstat/man/localKdot.Rd0000644000176200001440000001144213503055420015072 0ustar liggesusers\name{localKdot} \alias{localKdot} \alias{localLdot} \title{Local Multitype K Function (Dot-Type)} \description{ for a multitype point pattern, computes the dot-type version of the local K function. } \usage{ localKdot(X, from, \dots, rmax = NULL, correction = "Ripley", verbose = TRUE, rvalue=NULL) localLdot(X, from, \dots, rmax = NULL, correction = "Ripley") } \arguments{ \item{X}{ A multitype point pattern (object of class \code{"ppp"} with marks which are a factor). } \item{\dots}{ Further arguments passed from \code{localLdot} to \code{localKdot}. } \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{from}{ Type of points from which distances should be measured. A single value; one of the possible levels of \code{marks(X)}, or an integer indicating which level. } \item{correction}{ String specifying the edge correction to be applied. Options are \code{"none"}, \code{"translate"}, \code{"translation"}, \code{"Ripley"}, \code{"isotropic"} or \code{"best"}. Only one correction may be specified. } \item{verbose}{ Logical flag indicating whether to print progress reports during the calculation. } \item{rvalue}{Optional. A \emph{single} value of the distance argument \eqn{r} at which the function L or K should be computed. } } \details{ Given a multitype spatial point pattern \code{X}, the local dot-type \eqn{K} function \code{localKdot} is the local version of the multitype \eqn{K} function \code{\link{Kdot}}. Recall that \code{Kdot(X, from)} is a sum of contributions from all pairs of points in \code{X} where the first point belongs to \code{from}. The \emph{local} dot-type \eqn{K} function is defined for each point \code{X[i]} that belongs to type \code{from}, and it consists of all the contributions to the dot-type \eqn{K} function that originate from point \code{X[i]}: \deqn{ K_{i,from,to}(r) = \sqrt{\frac a {(n-1) \pi} \sum_j e_{ij}} }{ K[i,from,to](r) = sqrt( (a/((n-1)* pi)) * sum[j] e[i,j]) } where the sum is over all points \eqn{j \neq i}{j != i} that lie within a distance \eqn{r} of the \eqn{i}th point, \eqn{a} is the area of the observation window, \eqn{n} is the number of points in \code{X}, and \eqn{e_{ij}}{e[i,j]} is an edge correction term (as described in \code{\link{Kest}}). The value of \eqn{K_{i,from}(r)}{K[i,from](r)} can also be interpreted as one of the summands that contributes to the global estimate of the \code{\link{Kdot}} function. By default, the function \eqn{K_{i,from}(r)}{K[i,from](r)} is computed for a range of \eqn{r} values for each point \eqn{i} belonging to type \code{from}. The results are stored as a function value table (object of class \code{"fv"}) with a column of the table containing the function estimates for each point of the pattern \code{X} belonging to type \code{from}. Alternatively, if the argument \code{rvalue} is given, and it is a single number, then the function will only be computed for this value of \eqn{r}, and the results will be returned as a numeric vector, with one entry of the vector for each point of the pattern \code{X} belonging to type \code{from}. The local dot-type \eqn{L} function \code{localLdot} is computed by applying the transformation \eqn{L(r) = \sqrt{K(r)/(2\pi)}}{L(r) = sqrt(K(r)/(2*pi))}. } \value{ If \code{rvalue} is given, the result is a numeric vector of length equal to the number of points in the point pattern that belong to type \code{from}. If \code{rvalue} is absent, the result is an object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the neighbourhood density function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point of type \code{from}. The last two columns contain the \code{r} and \code{theo} values. } \seealso{ \code{\link{Kdot}}, \code{\link{Ldot}}, \code{\link{localK}}, \code{\link{localL}}. } \examples{ X <- amacrine # compute all the local Ldot functions L <- localLdot(X) # plot all the local Ldot functions against r plot(L, main="local Ldot functions for amacrine", legend=FALSE) # plot only the local L function for point number 7 plot(L, iso007 ~ r) # compute the values of L(r) for r = 0.1 metres L12 <- localLdot(X, rvalue=0.1) } \author{ \ege and \adrian. } \keyword{spatial} \keyword{nonparametric} spatstat/man/nndist.lpp.Rd0000644000176200001440000000343613333543263015263 0ustar liggesusers\name{nndist.lpp} \alias{nndist.lpp} \title{ Nearest neighbour distances on a linear network } \description{ Given a pattern of points on a linear network, compute the nearest-neighbour distances, measured by the shortest path in the network. } \usage{ \method{nndist}{lpp}(X, ..., k=1, method="C") } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{method}{ Optional string determining the method of calculation. Either \code{"interpreted"} or \code{"C"}. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } \item{\dots}{ Ignored. } } \details{ Given a pattern of points on a linear network, this function computes the nearest neighbour distance for each point (i.e. the distance from each point to the nearest other point), measuring distance by the shortest path in the network. If \code{method="C"} the distances are computed using code in the C language. If \code{method="interpreted"} then the computation is performed using interpreted \R code. The \R code is much slower, but is provided for checking purposes. The \code{k}th nearest neighbour distance is infinite if the \code{k}th nearest neighbour does not exist. This can occur if there are fewer than \code{k+1} points in the dataset, or if the linear network is not connected. } \value{ A numeric vector, of length equal to the number of points in \code{X}, or a matrix, with one row for each point in \code{X} and one column for each entry of \code{k}. Entries are nonnegative numbers or infinity (\code{Inf}). } \author{ \adrian } \seealso{ \code{\link{lpp}} } \examples{ X <- runiflpp(12, simplenet) nndist(X) nndist(X, k=2) } \keyword{spatial} spatstat/man/rhohat.Rd0000644000176200001440000004226413535577366014500 0ustar liggesusers\name{rhohat} \alias{rhohat} \alias{rhohat.ppp} \alias{rhohat.quad} \alias{rhohat.ppm} \alias{rhohat.lpp} \alias{rhohat.lppm} \concept{Resource Selection Function} \concept{Prospectivity} \title{ Nonparametric Estimate of Intensity as Function of a Covariate } \description{ Computes a nonparametric estimate of the intensity of a point process, as a function of a (continuous) spatial covariate. } \usage{ rhohat(object, covariate, ...) \method{rhohat}{ppp}(object, covariate, ..., baseline=NULL, weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing"), subset=NULL, dimyx=NULL, eps=NULL, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95, positiveCI) \method{rhohat}{quad}(object, covariate, ..., baseline=NULL, weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing"), subset=NULL, dimyx=NULL, eps=NULL, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95, positiveCI) \method{rhohat}{ppm}(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing"), subset=NULL, dimyx=NULL, eps=NULL, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95, positiveCI) \method{rhohat}{lpp}(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing"), subset=NULL, nd=1000, eps=NULL, random=TRUE, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95, positiveCI) \method{rhohat}{lppm}(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing"), subset=NULL, nd=1000, eps=NULL, random=TRUE, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95, positiveCI) } \arguments{ \item{object}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"}), a quadrature scheme (object of class \code{"quad"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{covariate}{ Either a \code{function(x,y)} or a pixel image (object of class \code{"im"}) providing the values of the covariate at any location. Alternatively one of the strings \code{"x"} or \code{"y"} signifying the Cartesian coordinates. } \item{weights}{ Optional weights attached to the data points. Either a numeric vector of weights for each data point, or a pixel image (object of class \code{"im"}) or a \code{function(x,y)} providing the weights. } \item{baseline}{ Optional baseline for intensity function. A \code{function(x,y)} or a pixel image (object of class \code{"im"}) providing the values of the baseline at any location. } \item{method}{ Character string determining the smoothing method. See Details. } \item{horvitz}{ Logical value indicating whether to use Horvitz-Thompson weights. See Details. } \item{smoother}{ Character string determining the smoothing algorithm. See Details. } \item{subset}{ Optional. A spatial window (object of class \code{"owin"}) specifying a subset of the data, from which the estimate should be calculated. } \item{dimyx,eps,nd,random}{ Arguments controlling the pixel resolution at which the covariate will be evaluated. See Details. } \item{bw}{ Smoothing bandwidth or bandwidth rule (passed to \code{\link{density.default}}). } \item{adjust}{ Smoothing bandwidth adjustment factor (passed to \code{\link{density.default}}). } \item{n, from, to}{ Arguments passed to \code{\link{density.default}} to control the number and range of values at which the function will be estimated. } \item{bwref}{ Optional. An alternative value of \code{bw} to use when smoothing the reference density (the density of the covariate values observed at all locations in the window). } \item{\dots}{ Additional arguments passed to \code{\link{density.default}} or \code{\link[locfit]{locfit}}. } \item{covname}{ Optional. Character string to use as the name of the covariate. } \item{confidence}{ Confidence level for confidence intervals. A number between 0 and 1. } \item{positiveCI}{ Logical value. If \code{TRUE}, confidence limits are always positive numbers; if \code{FALSE}, the lower limit of the confidence interval may sometimes be negative. Default is \code{FALSE} if \code{smoother="kernel"} and \code{TRUE} if \code{smoother="local"}. See Details. } } \details{ This command estimates the relationship between point process intensity and a given spatial covariate. Such a relationship is sometimes called a \emph{resource selection function} (if the points are organisms and the covariate is a descriptor of habitat) or a \emph{prospectivity index} (if the points are mineral deposits and the covariate is a geological variable). This command uses nonparametric methods which do not assume a particular form for the relationship. If \code{object} is a point pattern, and \code{baseline} is missing or null, this command assumes that \code{object} is a realisation of a point process with intensity function \eqn{\lambda(u)}{lambda(u)} of the form \deqn{\lambda(u) = \rho(Z(u))}{lambda(u) = rho(Z(u))} where \eqn{Z} is the spatial covariate function given by \code{covariate}, and \eqn{\rho(z)}{rho(z)} is the resource selection function or prospectivity index. A nonparametric estimator of the function \eqn{\rho(z)}{rho(z)} is computed. If \code{object} is a point pattern, and \code{baseline} is given, then the intensity function is assumed to be \deqn{\lambda(u) = \rho(Z(u)) B(u)}{lambda(u) = rho(Z(u)) * B(u)} where \eqn{B(u)} is the baseline intensity at location \eqn{u}. A nonparametric estimator of the relative intensity \eqn{\rho(z)}{rho(z)} is computed. If \code{object} is a fitted point process model, suppose \code{X} is the original data point pattern to which the model was fitted. Then this command assumes \code{X} is a realisation of a Poisson point process with intensity function of the form \deqn{ \lambda(u) = \rho(Z(u)) \kappa(u) }{ lambda(u) = rho(Z(u)) * kappa(u) } where \eqn{\kappa(u)}{kappa(u)} is the intensity of the fitted model \code{object}. A nonparametric estimator of the relative intensity \eqn{\rho(z)}{rho(z)} is computed. The nonparametric estimation procedure is controlled by the arguments \code{smoother}, \code{method} and \code{horvitz}. The argument \code{smoother} selects the type of estimation technique. \itemize{ \item If \code{smoother="kernel"} (the default) or \code{smoother="local"}, the nonparametric estimator is a \emph{smoothing estimator} of \eqn{\rho(z)}{rho(z)}, effectively a kind of density estimator (Baddeley et al, 2012). The estimated function \eqn{\rho(z)}{rho(z)} will be a smooth function of \eqn{z}. Confidence bands are also computed, assuming a Poisson point process. See the section on \emph{Smooth estimates}. \item If \code{smoother="increasing"} or \code{smoother="decreasing"}, we use the \emph{nonparametric maximum likelihood estimator} of \eqn{\rho(z)}{rho(z)} described by Sager (1982). This assumes that \eqn{\rho(z)}{rho(z)} is either an increasing function of \eqn{z}, or a decreasing function of \eqn{z}. The estimated function will be a step function, increasing or decreasing as a function of \eqn{z}. See the section on \emph{Monotone estimates}. } See Baddeley (2018) for a comparison of these estimation techniques. If the argument \code{weights} is present, then the contribution from each data point \code{X[i]} to the estimate of \eqn{\rho}{rho} is multiplied by \code{weights[i]}. If the argument \code{subset} is present, then the calculations are performed using only the data inside this spatial region. This technique assumes that \code{covariate} has continuous values. It is not applicable to covariates with categorical (factor) values or discrete values such as small integers. For a categorical covariate, use \code{\link{intensity.quadratcount}} applied to the result of \code{\link{quadratcount}(X, tess=covariate)}. The argument \code{covariate} should be a pixel image, or a function, or one of the strings \code{"x"} or \code{"y"} signifying the cartesian coordinates. It will be evaluated on a fine grid of locations, with spatial resolution controlled by the arguments \code{dimyx,eps,nd,random}. In two dimensions (i.e. if \code{object} is of class \code{"ppp"}, \code{"ppm"} or \code{"quad"}) the arguments \code{dimyx, eps} are passed to \code{\link{as.mask}} to control the pixel resolution. On a linear network (i.e. if \code{object} is of class \code{"lpp"}) the argument \code{nd} specifies the total number of test locations on the linear network, \code{eps} specifies the linear separation between test locations, and \code{random} specifies whether the test locations have a randomised starting position. } \section{Smooth estimates}{ Smooth estimators of \eqn{\rho(z)}{rho(z)} were proposed by Baddeley and Turner (2005) and Baddeley et al (2012). Similar estimators were proposed by Guan (2008) and in the literature on relative distributions (Handcock and Morris, 1999). The estimated function \eqn{\rho(z)}{rho(z)} will be a smooth function of \eqn{z}. The smooth estimation procedure involves computing several density estimates and combining them. The algorithm used to compute density estimates is determined by \code{smoother}: \itemize{ \item If \code{smoother="kernel"}, the smoothing procedure is based on fixed-bandwidth kernel density estimation, performed by \code{\link{density.default}}. \item If \code{smoother="local"}, the smoothing procedure is based on local likelihood density estimation, performed by \code{\link[locfit]{locfit}}. } The argument \code{method} determines how the density estimates will be combined to obtain an estimate of \eqn{\rho(z)}{rho(z)}: \itemize{ \item If \code{method="ratio"}, then \eqn{\rho(z)}{rho(z)} is estimated by the ratio of two density estimates, The numerator is a (rescaled) density estimate obtained by smoothing the values \eqn{Z(y_i)}{Z(y[i])} of the covariate \eqn{Z} observed at the data points \eqn{y_i}{y[i]}. The denominator is a density estimate of the reference distribution of \eqn{Z}. See Baddeley et al (2012), equation (8). This is similar but not identical to an estimator proposed by Guan (2008). \item If \code{method="reweight"}, then \eqn{\rho(z)}{rho(z)} is estimated by applying density estimation to the values \eqn{Z(y_i)}{Z(y[i])} of the covariate \eqn{Z} observed at the data points \eqn{y_i}{y[i]}, with weights inversely proportional to the reference density of \eqn{Z}. See Baddeley et al (2012), equation (9). \item If \code{method="transform"}, the smoothing method is variable-bandwidth kernel smoothing, implemented by applying the Probability Integral Transform to the covariate values, yielding values in the range 0 to 1, then applying edge-corrected density estimation on the interval \eqn{[0,1]}, and back-transforming. See Baddeley et al (2012), equation (10). } If \code{horvitz=TRUE}, then the calculations described above are modified by using Horvitz-Thompson weighting. The contribution to the numerator from each data point is weighted by the reciprocal of the baseline value or fitted intensity value at that data point; and a corresponding adjustment is made to the denominator. Pointwise confidence intervals for the true value of \eqn{\rho(z)} are also calculated for each \eqn{z}, and will be plotted as grey shading. The confidence intervals are derived using the central limit theorem, based on variance calculations which assume a Poisson point process. If \code{positiveCI=FALSE}, the lower limit of the confidence interval may sometimes be negative, because the confidence intervals are based on a normal approximation to the estimate of \eqn{\rho(z)}. If \code{positiveCI=TRUE}, the confidence limits are always positive, because the confidence interval is based on a normal approximation to the estimate of \eqn{\log(\rho(z))}{log(\rho(z))}. For consistency with earlier versions, the default is \code{positiveCI=FALSE} for \code{smoother="kernel"} and \code{positiveCI=TRUE} for \code{smoother="local"}. } \section{Monotone estimates}{ The nonparametric maximum likelihood estimator of a monotone function \eqn{\rho(z)}{rho(z)} was described by Sager (1982). This method assumes that \eqn{\rho(z)}{rho(z)} is either an increasing function of \eqn{z}, or a decreasing function of \eqn{z}. The estimated function will be a step function, increasing or decreasing as a function of \eqn{z}. This estimator is chosen by specifying \code{smoother="increasing"} or \code{smoother="decreasing"}. The argument \code{method} is ignored this case. To compute the estimate of \eqn{\rho(z)}{rho(z)}, the algorithm first computes several primitive step-function estimates, and then takes the maximum of these primitive functions. If \code{smoother="decreasing"}, each primitive step function takes the form \eqn{\rho(z) = \lambda}{rho(z) = lambda} when \eqn{z \le t}, and \eqn{\rho(z) = 0}{rho(z) = 0} when \eqn{z > t}, where and \eqn{\lambda}{lambda} is a primitive estimate of intensity based on the data for \eqn{Z \le t}{Z <= t}. The jump location \eqn{t} will be the value of the covariate \eqn{Z} at one of the data points. The primitive estimate \eqn{\lambda}{lambda} is the average intensity (number of points divided by area) for the region of space where the covariate value is less than or equal to \eqn{t}. If \code{horvitz=TRUE}, then the calculations described above are modified by using Horvitz-Thompson weighting. The contribution to the numerator from each data point is weighted by the reciprocal of the baseline value or fitted intensity value at that data point; and a corresponding adjustment is made to the denominator. Confidence intervals are not available for the monotone estimators. } \value{ A function value table (object of class \code{"fv"}) containing the estimated values of \eqn{\rho}{rho} (and confidence limits) for a sequence of values of \eqn{Z}. Also belongs to the class \code{"rhohat"} which has special methods for \code{print}, \code{plot} and \code{predict}. } \references{ Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2012) Nonparametric estimation of the dependence of a point process on spatial covariates. \emph{Statistics and Its Interface} \bold{5} (2), 221--236. Baddeley, A. and Turner, R. (2005) Modelling spatial point patterns in R. In: A. Baddeley, P. Gregori, J. Mateu, R. Stoica, and D. Stoyan, editors, \emph{Case Studies in Spatial Point Pattern Modelling}, Lecture Notes in Statistics number 185. Pages 23--74. Springer-Verlag, New York, 2006. ISBN: 0-387-28311-0. Baddeley, A. (2018) A statistical commentary on mineral prospectivity analysis. Chapter 2, pages 25--65 in \emph{Handbook of Mathematical Geosciences: Fifty Years of IAMG}, edited by B.S. Daya Sagar, Q. Cheng and F.P. Agterberg. Springer, Berlin. Guan, Y. (2008) On consistent nonparametric intensity estimation for inhomogeneous spatial point processes. \emph{Journal of the American Statistical Association} \bold{103}, 1238--1247. Handcock, M.S. and Morris, M. (1999) \emph{Relative Distribution Methods in the Social Sciences}. Springer, New York. Sager, T.W. (1982) Nonparametric maximum likelihood estimation of spatial patterns. \emph{Annals of Statistics} \bold{10}, 1125--1136. } \author{ Smoothing algorithm by \adrian, Ya-Mei Chang, Yong Song, and \rolf. Nonparametric maximum likelihood algorithm by \adrian. } \seealso{ \code{\link{rho2hat}}, \code{\link{methods.rhohat}}, \code{\link{parres}}. See \code{\link{ppm}} for a parametric method for the same problem. } \examples{ X <- rpoispp(function(x,y){exp(3+3*x)}) rho <- rhohat(X, "x") rho <- rhohat(X, function(x,y){x}) plot(rho) curve(exp(3+3*x), lty=3, col=2, add=TRUE) rhoB <- rhohat(X, "x", method="reweight") rhoC <- rhohat(X, "x", method="transform") rhoM <- rhohat(X, "x", smoother="increasing") plot(rhoM, add=TRUE, col=5) \testonly{rh <- rhohat(X, "x", dimyx=32)} fit <- ppm(X, ~x) rr <- rhohat(fit, "y") # linear network Y <- runiflpp(30, simplenet) rhoY <- rhohat(Y, "y") } \keyword{spatial} \keyword{models} \keyword{nonparametric} spatstat/man/emend.Rd0000644000176200001440000000165713333543263014265 0ustar liggesusers\name{emend} \alias{emend} \title{ Force Model to be Valid } \description{ Check whether a model is valid, and if not, find the nearest model which is valid. } \usage{ emend(object, \dots) } \arguments{ \item{object}{ A statistical model, belonging to some class. } \item{\dots}{Arguments passed to methods.} } \details{ The function \code{emend} is generic, and has methods for several classes of statistical models in the \pkg{spatstat} package (mostly point process models). Its purpose is to check whether a given model is valid (for example, that none of the model parameters are \code{NA}) and, if not, to find the nearest model which is valid. See the methods for more information. } \value{ Another model of the same kind. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{emend.ppm}}, \code{\link{emend.lppm}}, \code{\link{valid}}. } \keyword{spatial} \keyword{models} spatstat/man/solapply.Rd0000644000176200001440000000325213333543264015032 0ustar liggesusers\name{solapply} \alias{solapply} \alias{anylapply} \title{ Apply a Function Over a List and Obtain a List of Objects } \description{ Applies the function \code{FUN} to each element of the list \code{X}, and returns the result as a list of class \code{"solist"} or \code{"anylist"} as appropriate. } \usage{ anylapply(X, FUN, \dots) solapply(X, FUN, \dots, check = TRUE, promote = TRUE, demote = FALSE) } \arguments{ \item{X}{A list.} \item{FUN}{ Function to be applied to each element of \code{X}. } \item{\dots}{ Additional arguments to \code{FUN}. } \item{check,promote,demote}{ Arguments passed to \code{\link{solist}} which determine how to handle different classes of objects. } } \details{ These convenience functions are similar to \code{\link[base]{lapply}} except that they return a list of class \code{"solist"} or \code{"anylist"}. In both functions, the result is computed by \code{lapply(X, FUN, \dots)}. In \code{anylapply} the result is converted to a list of class \code{"anylist"} and returned. In \code{solapply} the result is converted to a list of class \code{"solist"} \bold{if possible}, using \code{\link{as.solist}}. If this is not possible, then the behaviour depends on the argument \code{demote}. If \code{demote=TRUE} the result will be returned as a list of class \code{"anylist"}. If \code{demote=FALSE} (the default), an error occurs. } \value{ A list, usually of class \code{"solist"}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{solist}}, \code{\link{anylist}}. } \examples{ solapply(waterstriders, density) } \keyword{spatial} \keyword{list} \keyword{manip} spatstat/man/simulate.slrm.Rd0000644000176200001440000000503713333543264015771 0ustar liggesusers\name{simulate.slrm} \alias{simulate.slrm} \title{Simulate a Fitted Spatial Logistic Regression Model} \description{ Generates simulated realisations from a fitted spatial logistic regresson model } \usage{ \method{simulate}{slrm}(object, nsim = 1, seed=NULL, ..., window=NULL, covariates=NULL, verbose=TRUE, drop=FALSE) } \arguments{ \item{object}{ Fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{nsim}{ Number of simulated realisations. } \item{seed}{ an object specifying whether and how to initialise the random number generator. Either \code{NULL} or an integer that will be used in a call to \code{\link[base:Random]{set.seed}} before simulating the point patterns. } \item{\dots}{Ignored.} \item{window}{ Optional. Window (object of class \code{"owin"}) in which the model should be simulated. } \item{covariates}{ Optional. A named list containing new values for the covariates in the model. } \item{verbose}{ Logical. Whether to print progress reports (when \code{nsim > 1}). } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE}, the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"slrm"} of fitted spatial logistic regression models. Simulations are performed by \code{\link{rpoispp}} after the intensity has been computed by \code{\link{predict.slrm}}. The return value is a list of point patterns. It also carries an attribute \code{"seed"} that captures the initial state of the random number generator. This follows the convention used in \code{simulate.lm} (see \code{\link[stats]{simulate}}). It can be used to force a sequence of simulations to be repeated exactly, as shown in the examples for \code{\link[stats]{simulate}}. } \value{ A list of length \code{nsim} containing simulated point patterns (objects of class \code{"ppp"}). The return value also carries an attribute \code{"seed"} that captures the initial state of the random number generator. See Details. } \examples{ X <- copper$SouthPoints fit <- slrm(X ~ 1) simulate(fit, 2) fitxy <- slrm(X ~ x+y) simulate(fitxy, 2, window=square(2)) } \seealso{ \code{\link{slrm}}, \code{\link{rpoispp}}, \code{\link{simulate.ppm}}, \code{\link{simulate.kppm}}, \code{\link[stats]{simulate}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/quadrat.test.mppm.Rd0000644000176200001440000000752413333543265016565 0ustar liggesusers\name{quadrat.test.mppm} \alias{quadrat.test.mppm} \title{Chi-Squared Test for Multiple Point Process Model Based on Quadrat Counts} \description{ Performs a chi-squared goodness-of-fit test of a Poisson point process model fitted to multiple point patterns. } \usage{ \method{quadrat.test}{mppm}(X, ...) } \arguments{ \item{X}{ An object of class \code{"mppm"} representing a point process model fitted to multiple point patterns. It should be a Poisson model. } \item{\dots}{ Arguments passed to \code{\link{quadrat.test.ppm}} which determine the size of the quadrats. } } \details{ This function performs a \eqn{\chi^2}{chi^2} test of goodness-of-fit for a Poisson point process model, based on quadrat counts. It can also be used to perform a test of Complete Spatial Randomness for a list of point patterns. The function \code{quadrat.test} is generic, with methods for point patterns (class \code{"ppp"}), point process models (class \code{"ppm"}) and multiple point process models (class \code{"mppm"}). For this function, the argument \code{X} should be a multiple point process model (object of class \code{"mppm"}) obtained by fitting a point process model to a list of point patterns using the function \code{\link{mppm}}. To perform the test, the data point patterns are extracted from \code{X}. For each point pattern \itemize{ \item the window of observation is divided into rectangular tiles, and the number of data points in each tile is counted, as described in \code{\link{quadratcount}}. \item The expected number of points in each quadrat is calculated, as determined by the fitted model. } Then we perform a single \eqn{\chi^2}{chi^2} test of goodness-of-fit based on these observed and expected counts. } \section{Testing Complete Spatial Randomness}{ If the intention is to test Complete Spatial Randomness (CSR) there are two options: \itemize{ \item CSR with the same intensity of points in each point pattern; \item CSR with a different, unrelated intensity of points in each point pattern. } In the first case, suppose \code{P} is a list of point patterns we want to test. Then fit the multiple model \code{fit1 <- mppm(P, ~1)} which signifies a Poisson point process model with a constant intensity. Then apply \code{quadrat.test(fit1)}. In the second case, fit the model code{fit2 <- mppm(P, ~id)} which signifies a Poisson point process with a different constant intensity for each point pattern. Then apply \code{quadrat.test(fit2)}. } \value{ An object of class \code{"htest"}. Printing the object gives comprehensible output about the outcome of the test. The \eqn{p}-value of the test is stored in the component \code{p.value}. The return value also belongs to the special class \code{"quadrat.test"}. Plotting the object will display, for each window, the position of the quadrats, annotated by their observed and expected counts and the Pearson residuals. See the examples. The return value also has an attribute \code{"components"} which is a list containing the results of \eqn{\chi^2}{chi^2} tests of goodness-of-fit for each individual point pattern. } \seealso{ \code{\link{mppm}}, \code{\link{quadrat.test}} } \examples{ H <- hyperframe(X=waterstriders) # Poisson with constant intensity for all patterns fit1 <- mppm(X~1, H) quadrat.test(fit1, nx=2) # uniform Poisson with different intensity for each pattern fit2 <- mppm(X ~ id, H) quadrat.test(fit2, nx=2) } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ \adrian, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \adrian \rolf and \ege } \keyword{spatial} \keyword{htest} spatstat/man/psstA.Rd0000644000176200001440000001522513571674202014265 0ustar liggesusers\name{psstA} \Rdversion{1.1} \alias{psstA} \title{ Pseudoscore Diagnostic For Fitted Model against Area-Interaction Alternative } \description{ Given a point process model fitted to a point pattern dataset, this function computes the pseudoscore diagnostic of goodness-of-fit for the model, against moderately clustered or moderately inhibited alternatives of area-interaction type. } \usage{ psstA(object, r = NULL, breaks = NULL, \dots, model = NULL, trend = ~1, interaction = Poisson(), rbord = reach(interaction), ppmcorrection = "border", correction = "all", truecoef = NULL, hi.res = NULL, nr=spatstat.options("psstA.nr"), ngrid=spatstat.options("psstA.ngrid")) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the diagnostic should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{ Extra arguments passed to \code{\link{quadscheme}} to determine the quadrature scheme, if \code{object} is a point pattern. } \item{model}{ Optional. A fitted point process model (object of class \code{"ppm"}) to be re-fitted to the data using \code{\link{update.ppm}}, if \code{object} is a point pattern. Overrides the arguments \code{trend,interaction,rbord,ppmcorrection}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{ppmcorrection}{ Optional. Character string specifying the edge correction for the pseudolikelihood to be used in fitting the point process model. Passed to \code{\link{ppm}}. } \item{correction}{ Optional. Character string specifying which diagnostic quantities will be computed. Options are \code{"all"} and \code{"best"}. The default is to compute all diagnostic quantities. } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } \item{nr}{ Optional. Number of \code{r} values to be used if \code{r} is not specified. } \item{ngrid}{ Integer. Number of points in the square grid used to compute the approximate area. } } \details{ This function computes the pseudoscore test statistic which can be used as a diagnostic for goodness-of-fit of a fitted point process model. Let \eqn{x} be a point pattern dataset consisting of points \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} in a window \eqn{W}. Consider a point process model fitted to \eqn{x}, with conditional intensity \eqn{\lambda(u,x)}{lambda(u,x)} at location \eqn{u}. For the purpose of testing goodness-of-fit, we regard the fitted model as the null hypothesis. The alternative hypothesis is a family of hybrid models obtained by combining the fitted model with the area-interaction process (see \code{\link{AreaInter}}). The family of alternatives includes models that are slightly more regular than the fitted model, and others that are slightly more clustered than the fitted model. The pseudoscore, evaluated at the null model, is \deqn{ V(r) = \sum_i A(x_i, x, r) - \int_W A(u,x, r) \lambda(u,x) {\rm d} u }{ V(r) = sum( A(x[i], x, r)) - integral( A(u,x,r) lambda(u,x) du) } where \deqn{ A(u,x,r) = B(x\cup\{u\},r) - B(x\setminus u, r) }{ A(u,x,r) = B(x union u, r) - B(x setminus u, r) } where \eqn{B(x,r)} is the area of the union of the discs of radius \eqn{r} centred at the points of \eqn{x} (i.e. \eqn{B(x,r)} is the area of the dilation of \eqn{x} by a distance \eqn{r}). Thus \eqn{A(u,x,r)} is the \emph{unclaimed area} associated with \eqn{u}, that is, the area of that part of the disc of radius \eqn{r} centred at the point \eqn{u} that is not covered by any of the discs of radius \eqn{r} centred at points of \eqn{x}. According to the Georgii-Nguyen-Zessin formula, \eqn{V(r)} should have mean zero if the model is correct (ignoring the fact that the parameters of the model have been estimated). Hence \eqn{V(r)} can be used as a diagnostic for goodness-of-fit. The diagnostic \eqn{V(r)} is also called the \bold{pseudoresidual} of \eqn{S}. On the right hand side of the equation for \eqn{V(r)} given above, the sum over points of \eqn{x} is called the \bold{pseudosum} and the integral is called the \bold{pseudocompensator}. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. Columns in this data frame include \code{dat} for the pseudosum, \code{com} for the compensator and \code{res} for the pseudoresidual. There is a plot method for this class. See \code{\link{fv.object}}. } \section{Warning}{ This computation can take a \bold{very long time}. To shorten the computation time, choose smaller values of the arguments \code{nr} and \code{ngrid}, or reduce the values of their defaults \code{spatstat.options("psstA.nr")} and \code{spatstat.options("psstA.ngrid")}. Computation time is roughly proportional to \code{nr * npoints * ngrid^2} where \code{npoints} is the number of points in the point pattern. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Alternative functions: \code{\link{psstG}}, \code{\link{psst}}, \code{\link{Gres}}, \code{\link{Kres}}. Point process models: \code{\link{ppm}}. Options: \code{\link{spatstat.options}} } \examples{ pso <- spatstat.options(psstA.ngrid=16,psstA.nr=10) X <- rStrauss(200,0.1,0.05) plot(psstA(X)) plot(psstA(X, interaction=Strauss(0.05))) spatstat.options(pso) } \keyword{spatial} \keyword{models} spatstat/man/density.lpp.Rd0000644000176200001440000001232213513111501015417 0ustar liggesusers\name{density.lpp} \alias{density.lpp} \alias{density.splitppx} \title{ Kernel Estimate of Intensity on a Linear Network } \description{ Estimates the intensity of a point process on a linear network by applying kernel smoothing to the point pattern data. } \usage{ \method{density}{lpp}(x, sigma=NULL, \dots, weights=NULL, distance=c("path", "euclidean"), kernel="gaussian", continuous=TRUE, epsilon = 1e-06, verbose = TRUE, debug = FALSE, savehistory = TRUE, old=FALSE) \method{density}{splitppx}(x, sigma=NULL, \dots) } \arguments{ \item{x}{ Point pattern on a linear network (object of class \code{"lpp"}) to be smoothed. } \item{sigma}{ Smoothing bandwidth (standard deviation of the kernel) in the same units as the spatial coordinates of \code{x}. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} determining the resolution of the result. } \item{weights}{ Optional. Numeric vector of weights associated with the points of \code{x}. Weights may be positive, negative or zero. } \item{distance}{ Character string (partially matched) specifying whether to use a kernel based on paths in the network (\code{distance="path"}, the default) or a two-dimensional kernel (\code{distance="euclidean"}). } \item{kernel}{ Character string specifying the smoothing kernel. See \code{\link{dkernel}} for possible options. } \item{continuous}{ Logical value indicating whether to compute the \dQuote{equal-split continuous} smoother (\code{continuous=TRUE}, the default) or the \dQuote{equal-split discontinuous} smoother (\code{continuous=FALSE}). Applies only when \code{distance="path"}. } \item{epsilon}{ Tolerance value. A tail of the kernel with total mass less than \code{epsilon} may be deleted. } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{debug}{ Logical value indicating whether to print debugging information. } \item{savehistory}{ Logical value indicating whether to save the entire history of the algorithm, for the purposes of evaluating performance. } \item{old}{ Logical value indicating whether to use the old, very slow algorithm for the equal-split continuous estimator. } } \details{ Kernel smoothing is applied to the points of \code{x} using either a kernel based on path distances in the network, or a two-dimensional kernel. The result is a pixel image on the linear network (class \code{"linim"}) which can be plotted. \itemize{ \item If \code{distance="path"} (the default) then the smoothing is performed using a kernel based on path distances in the network, as described in described in Okabe and Sugihara (2012) and McSwiggan et al (2016). \itemize{ \item If \code{continuous=TRUE} (the default), smoothing is performed using the \dQuote{equal-split continuous} rule described in Section 9.2.3 of Okabe and Sugihara (2012). The resulting function is continuous on the linear network. \item If \code{continuous=FALSE}, smoothing is performed using the \dQuote{equal-split discontinuous} rule described in Section 9.2.2 of Okabe and Sugihara (2012). The resulting function is not continuous. \item In the default case (where \code{distance="path"} and \code{continuous=TRUE} and \code{kernel="gaussian"} and \code{old=FALSE}), computation is performed rapidly by solving the classical heat equation on the network, as described in McSwiggan et al (2016). Computational time is short, but increases quadratically with \code{sigma}. The arguments \code{epsilon,debug,verbose,savehistory} are ignored. \item In all other cases, computation is performed by path-tracing as described in Okabe and Sugihara (2012); computation can be extremely slow, and time increases exponentially with \code{sigma}. } \item If \code{distance="euclidean"}, the smoothing is performed using a two-dimensional kernel. The arguments are passed to \code{\link{densityQuick.lpp}} to perform the computation. See the help for \code{\link{densityQuick.lpp}} for further details. } There is also a method for split point patterns on a linear network (class \code{"splitppx"}) which will return a list of pixel images. } \value{ A pixel image on the linear network (object of class \code{"linim"}). } \references{ McSwiggan, G., Baddeley, A. and Nair, G. (2016) Kernel density estimation on a linear network. \emph{Scandinavian Journal of Statistics} \bold{44}, 324--345. Okabe, A. and Sugihara, K. (2012) \emph{Spatial analysis along networks}. Wiley. } \author{ \adrian and Greg McSwiggan. } \seealso{ \code{\link{lpp}}, \code{\link{linim}}, \code{\link{densityQuick.lpp}} } \examples{ X <- runiflpp(3, simplenet) D <- density(X, 0.2, verbose=FALSE) plot(D, style="w", main="", adjust=2) Dw <- density(X, 0.2, weights=c(1,2,-1), verbose=FALSE) De <- density(X, 0.2, kernel="epanechnikov", verbose=FALSE) Ded <- density(X, 0.2, kernel="epanechnikov", continuous=FALSE, verbose=FALSE) } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/delaunayNetwork.Rd0000644000176200001440000000250413333543263016341 0ustar liggesusers\name{delaunayNetwork} \alias{delaunayNetwork} \alias{dirichletNetwork} \title{ Linear Network of Delaunay Triangulation or Dirichlet Tessellation } \description{ Computes the edges of the Delaunay triangulation or Dirichlet tessellation of a point pattern, and returns the result as a linear network object. } \usage{ delaunayNetwork(X) dirichletNetwork(X, \dots) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{\dots}{Arguments passed to \code{\link{as.linnet.psp}}} } \details{ For \code{delaunayNetwork}, points of \code{X} which are neighbours in the Delaunay triangulation (see \code{\link{delaunay}}) will be joined by a straight line. The result will be returned as a linear network (object of class \code{"linnet"}). For \code{dirichletNetwork}, the Dirichlet tessellation is computed (see \code{\link{dirichlet}}) and the edges of the tiles of the tessellation are extracted. This is converted to a linear network using \code{\link{as.linnet.psp}}. } \value{ Linear network (object of class \code{"linnet"}) or \code{NULL}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{delaunay}}, \code{\link{dirichlet}}, \code{\link{delaunayDistance}} } \examples{ LE <- delaunayNetwork(cells) LI <- dirichletNetwork(cells) } \keyword{spatial} \keyword{manip} spatstat/man/Smooth.fv.Rd0000644000176200001440000000550013333543264015050 0ustar liggesusers\name{Smooth.fv} \alias{Smooth.fv} \title{ Apply Smoothing to Function Values } \description{ Applies smoothing to the values in selected columns of a function value table. } \usage{ \method{Smooth}{fv}(X, which = "*", ..., method=c("smooth.spline", "loess"), xinterval=NULL) } \arguments{ \item{X}{ Values to be smoothed. A function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). } \item{which}{ Character vector identifying which columns of the table should be smoothed. Either a vector containing names of columns, or one of the wildcard strings \code{"*"} or \code{"."} explained below. } \item{\dots}{ Extra arguments passed to \code{\link[stats]{smooth.spline}} or \code{\link[stats]{loess}} to control the smoothing. } \item{method}{ Smoothing algorithm. A character string, partially matched to either \code{"smooth.spline"} or \code{"loess"}. } \item{xinterval}{ Optional. Numeric vector of length 2 specifying a range of \eqn{x} values. Smoothing will be performed only on the part of the function corresponding to this range. } } \details{ The command \code{Smooth.fv} applies smoothing to the function values in a function value table (object of class \code{"fv"}). \code{Smooth.fv} is a method for the generic function \code{\link{Smooth}}. The smoothing is performed either by \code{\link[stats]{smooth.spline}} or by \code{\link[stats]{loess}}. Smoothing is applied to every column (or to each of the selected columns) of function values in turn, using the function argument as the \eqn{x} coordinate and the selected column as the \eqn{y} coordinate. The original function values are then replaced by the corresponding smooth interpolated function values. The optional argument \code{which} specifies which of the columns of function values in \code{x} will be smoothed. The default (indicated by the wildcard \code{which="*"}) is to smooth all function values, i.e.\ all columns except the function argument. Alternatively \code{which="."} designates the subset of function values that are displayed in the default plot. Alternatively \code{which} can be a character vector containing the names of columns of \code{x}. If the argument \code{xinterval} is given, then smoothing will be performed only in the specified range of \eqn{x} values. } \value{ Another function value table (object of class \code{"fv"}) of the same format. } \author{\adrian and \rolf } \seealso{ \code{\link{Smooth}}, \code{\link{with.fv}}, \code{\link{fv.object}}, \code{\link[stats]{smooth.spline}}, \code{\link[stats]{smooth.spline}} } \examples{ data(cells) G <- Gest(cells) plot(G) plot(Smooth(G, df=9), add=TRUE) } \keyword{spatial} \keyword{nonparametric} spatstat/man/rpoispp3.Rd0000644000176200001440000000300613333543264014743 0ustar liggesusers\name{rpoispp3} \alias{rpoispp3} \title{ Generate Poisson Point Pattern in Three Dimensions } \description{ Generate a random three-dimensional point pattern using the homogeneous Poisson process. } \usage{ rpoispp3(lambda, domain = box3(), nsim=1, drop=TRUE) } \arguments{ \item{lambda}{ Intensity of the Poisson process. A single positive number. } \item{domain}{ Three-dimensional box in which the process should be generated. An object of class \code{"box3"}. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern in three dimensions (an object of class \code{"pp3"}). If \code{nsim > 1}, a list of such point patterns. } \details{ This function generates a realisation of the homogeneous Poisson process in three dimensions, with intensity \code{lambda} (points per unit volume). The realisation is generated inside the three-dimensional region \code{domain} which currently must be a rectangular box (object of class \code{"box3"}). } \note{ The intensity \code{lambda} is the expected number of points \emph{per unit volume}. } \seealso{ \code{\link{runifpoint3}}, \code{\link{pp3}}, \code{\link{box3}} } \examples{ X <- rpoispp3(50) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/valid.Rd0000644000176200001440000000211513333543264014263 0ustar liggesusers\name{valid} \alias{valid} \title{ Check Whether Point Process Model is Valid } \description{ Determines whether a point process model object corresponds to a valid point process. } \usage{ valid(object, \dots) } \arguments{ \item{object}{ Object of some class, describing a point process model. } \item{\dots}{ Additional arguments passed to methods. } } \details{ The function \code{valid} is generic, with methods for the classes \code{"ppm"} and \code{"dppmodel"}. An object representing a point process is called valid if all its parameter values are known (for example, no parameter takes the value \code{NA} or \code{NaN}) and the parameter values correspond to a well-defined point process (for example, the parameter values satisfy all the constraints that are imposed by mathematical theory.) See the methods for further details. } \value{ A logical value, or \code{NA}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{valid.ppm}}, \code{\link{valid.detpointprocfamily}} } \keyword{spatial} \keyword{models} spatstat/man/default.dummy.Rd0000644000176200001440000000644613333543263015754 0ustar liggesusers\name{default.dummy} \alias{default.dummy} \title{Generate a Default Pattern of Dummy Points} \description{ Generates a default pattern of dummy points for use in a quadrature scheme. } \usage{ default.dummy(X, nd, random=FALSE, ntile=NULL, npix=NULL, quasi=FALSE, \dots, eps=NULL, verbose=FALSE) } \arguments{ \item{X}{ The observed data point pattern. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{nd}{ Optional. Integer, or integer vector of length 2, specifying an \code{nd * nd} or \code{nd[1] * nd[2]} rectangular array of dummy points. } \item{random}{ Logical value. If \code{TRUE}, the dummy points are generated randomly. } \item{quasi}{ Logical value. If \code{TRUE}, the dummy points are generated by a quasirandom sequence. } \item{ntile}{ Optional. Integer or pair of integers specifying the number of rows and columns of tiles used in the counting rule. } \item{npix}{ Optional. Integer or pair of integers specifying the number of rows and columns of pixels used in computing approximate areas. } \item{\dots}{ Ignored. } \item{eps}{ Optional. Grid spacing. A positive number, or a vector of two positive numbers, giving the horizontal and vertical spacing, respectively, of the grid of dummy points. Incompatible with \code{nd}. } \item{verbose}{ If \code{TRUE}, information about the construction of the quadrature scheme is printed. } } \value{ A point pattern (an object of class \code{"ppp"}, see \code{\link{ppp.object}}) containing the dummy points. } \details{ This function provides a sensible default for the dummy points in a quadrature scheme. A quadrature scheme consists of the original data point pattern, an additional pattern of dummy points, and a vector of quadrature weights for all these points. See \code{\link{quad.object}} for further information about quadrature schemes. If \code{random} and \code{quasi} are both false (the default), then the function creates dummy points in a regular \code{nd[1]} by \code{nd[1]} rectangular grid. If \code{random} is true and \code{quasi} is false, then the frame of the window is divided into an \code{nd[1]} by \code{nd[1]} array of tiles, and one dummy point is generated at random inside each tile. If \code{quasi} is true, a quasirandom pattern of \code{nd[1] * nd[2]} points is generated. In all cases, the four corner points of the frame of the window are added. Then if the window is not rectangular, any dummy points lying outside it are deleted. If \code{nd} is missing, a default value (depending on the data pattern \code{X}) is computed by \code{default.ngrid}. Alternative functions for creating dummy patterns include \code{\link{corners}}, \code{\link{gridcentres}}, \code{\link{stratrand}} and \code{\link{spokes}}. } \seealso{ \code{\link{quad.object}}, \code{\link{quadscheme}}, \code{\link{corners}}, \code{\link{gridcentres}}, \code{\link{stratrand}}, \code{\link{spokes}} } \examples{ data(simdat) P <- simdat D <- default.dummy(P, 100) \dontrun{plot(D)} Q <- quadscheme(P, D, "grid") \dontrun{plot(union.quad(Q))} } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/cut.lpp.Rd0000644000176200001440000001001513333543263014546 0ustar liggesusers\name{cut.lpp} \alias{cut.lpp} \title{Classify Points in a Point Pattern on a Network} \description{ For a point pattern on a linear network, classify the points into distinct types according to the numerical marks in the pattern, or according to another variable. } \usage{ \method{cut}{lpp}(x, z=marks(x), ...) } \arguments{ \item{x}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{z}{ Data determining the classification. A numeric vector, a factor, a pixel image on a linear network (class \code{"linim"}), a function on a linear network (class \code{"linfun"}), a tessellation on a linear network (class \code{"lintess"}), a string giving the name of a column of marks, or one of the coordinate names \code{"x"}, \code{"y"}, \code{"seg"} or \code{"tp"}. } \item{\dots}{ Arguments passed to \code{\link{cut.default}}. They determine the breakpoints for the mapping from numerical values in \code{z} to factor values in the output. See \code{\link{cut.default}}. } } \value{ A multitype point pattern on the same linear network, that is, a point pattern object (of class \code{"lpp"}) with a \code{marks} vector that is a factor. } \details{ This function has the effect of classifying each point in the point pattern \code{x} into one of several possible types. The classification is based on the dataset \code{z}, which may be either \itemize{ \item a factor (of length equal to the number of points in \code{z}) determining the classification of each point in \code{x}. Levels of the factor determine the classification. \item a numeric vector (of length equal to the number of points in \code{z}). The range of values of \code{z} will be divided into bands (the number of bands is determined by \code{\dots}) and \code{z} will be converted to a factor using \code{\link{cut.default}}. \item a pixel image on a network (object of class \code{"linim"}). The value of \code{z} at each point of \code{x} will be used as the classifying variable. \item a function on a network (object of class \code{"linfun"}, see \code{\link{linfun}}). The value of \code{z} at each point of \code{x} will be used as the classifying variable. \item a tessellation on a network (object of class \code{"lintess"}, see \code{\link{lintess}}). Each point of \code{x} will be classified according to the tile of the tessellation into which it falls. \item a character string, giving the name of one of the columns of \code{marks(x)}, if this is a data frame. \item a character string identifying one of the coordinates: the spatial coordinates \code{"x"}, \code{"y"} or the segment identifier \code{"seg"} or the fractional coordinate along the segment, \code{"tp"}. } The default is to take \code{z} to be the vector of marks in \code{x} (or the first column in the data frame of marks of \code{x}, if it is a data frame). If the marks are numeric, then the range of values of the numerical marks is divided into several intervals, and each interval is associated with a level of a factor. The result is a marked point pattern, on the same linear network, with the same point locations as \code{x}, but with the numeric mark of each point discretised by replacing it by the factor level. This is a convenient way to transform a marked point pattern which has numeric marks into a multitype point pattern, for example to plot it or analyse it. See the examples. To select some points from \code{x}, use the subset operators \code{\link{[.lpp}} or \code{\link{subset.lpp}} instead. } \seealso{ \code{\link{cut}}, \code{\link{lpp}}, \code{\link{lintess}}, \code{\link{linfun}}, \code{\link{linim}} } \examples{ X <- runiflpp(20, simplenet) f <- linfun(function(x,y,seg,tp) { x }, simplenet) plot(cut(X, f, breaks=4)) plot(cut(X, "x", breaks=4)) plot(cut(X, "seg")) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} spatstat/man/anylist.Rd0000644000176200001440000000206313506343371014650 0ustar liggesusers\name{anylist} \alias{anylist} \alias{as.anylist} \title{ List of Objects } \description{ Make a list of objects of any type. } \usage{ anylist(\dots) as.anylist(x) } \arguments{ \item{\dots}{ Any number of arguments of any type. } \item{x}{ A list. } } \details{ An object of class \code{"anylist"} is a list of objects that the user intends to treat in a similar fashion. For example it may be desired to plot each of the objects side-by-side: this can be done using the function \code{\link{plot.anylist}}. The objects can belong to any class; they may or may not all belong to the same class. In the \pkg{spatstat} package, various functions produce an object of class \code{"anylist"}. } \value{ A list, belonging to the class \code{"anylist"}, containing the original objects. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{solist}}, \code{\link{as.solist}}, \code{\link{anylapply}}. } \examples{ anylist(cells, intensity(cells), Kest(cells)) anylist() } \keyword{list} \keyword{manip} spatstat/man/model.depends.Rd0000644000176200001440000000672013333543263015712 0ustar liggesusers\name{model.depends} \alias{model.depends} \alias{model.is.additive} \alias{model.covariates} \alias{has.offset.term} \alias{has.offset} \title{ Identify Covariates Involved in each Model Term } \description{ Given a fitted model (of any kind), identify which of the covariates is involved in each term of the model. } \usage{ model.depends(object) model.is.additive(object) model.covariates(object, fitted=TRUE, offset=TRUE) has.offset.term(object) has.offset(object) } \arguments{ \item{object}{ A fitted model of any kind. } \item{fitted,offset}{ Logical values determining which type of covariates to include. } } \details{ The \code{object} can be a fitted model of any kind, including models of the classes \code{\link{lm}}, \code{\link{glm}} and \code{\link{ppm}}. To be precise, \code{object} must belong to a class for which there are methods for \code{\link{formula}}, \code{\link{terms}} and \code{\link{model.matrix}}. The command \code{model.depends} determines the relationship between the original covariates (the data supplied when \code{object} was fitted) and the canonical covariates (the columns of the design matrix). It returns a logical matrix, with one row for each canonical covariate, and one column for each of the original covariates, with the \code{i,j} entry equal to \code{TRUE} if the \code{i}th canonical covariate depends on the \code{j}th original covariate. If the model formula of \code{object} includes offset terms (see \code{\link{offset}}), then the return value of \code{model.depends} also has an attribute \code{"offset"}. This is a logical value or matrix with one row for each offset term and one column for each of the original covariates, with the \code{i,j} entry equal to \code{TRUE} if the \code{i}th offset term depends on the \code{j}th original covariate. The command \code{model.covariates} returns a character vector containing the names of all (original) covariates that were actually used to fit the model. By default, this includes all covariates that appear in the model formula, including offset terms as well as canonical covariate terms. To omit the offset terms, set \code{offset=FALSE}. To omit the canonical covariate terms, set \code{fitted=FALSE}. The command \code{model.is.additive} determines whether the model is additive, in the sense that there is no canonical covariate that depends on two or more original covariates. It returns a logical value. The command \code{has.offset.term} is a faster way to determine whether the model \emph{formula} includes an \code{offset} term. The functions \code{model.depends} and \code{has.offset.term} only detect \code{offset} terms which are present in the model formula. They do not detect numerical offsets in the model object, that were inserted using the \code{offset} argument in \code{lm}, \code{glm} etc. To detect the presence of offsets of both kinds, use \code{has.offset}. } \value{ A logical value or matrix. } \seealso{ \code{\link{ppm}}, \code{\link{model.matrix}} } \examples{ x <- 1:10 y <- 3*x + 2 z <- rep(c(-1,1), 5) fit <- lm(y ~ poly(x,2) + sin(z)) model.depends(fit) model.covariates(fit) model.is.additive(fit) fitoff1 <- lm(y ~ x + offset(z)) fitoff2 <- lm(y ~ x, offset=z) has.offset.term(fitoff1) has.offset(fitoff1) has.offset.term(fitoff2) has.offset(fitoff2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{models} spatstat/man/markstat.Rd0000644000176200001440000000672613333543263015025 0ustar liggesusers\name{markstat} \alias{markstat} \title{Summarise Marks in Every Neighbourhood in a Point Pattern} \description{ Visit each point in a point pattern, find the neighbouring points, and summarise their marks } \usage{ markstat(X, fun, N=NULL, R=NULL, \dots) } \arguments{ \item{X}{ A marked point pattern. An object of class \code{"ppp"}. } \item{fun}{ Function to be applied to the vector of marks. } \item{N}{ Integer. If this argument is present, the neighbourhood of a point of \code{X} is defined to consist of the \code{N} points of \code{X} which are closest to it. } \item{R}{ Nonnegative numeric value. If this argument is present, the neighbourhood of a point of \code{X} is defined to consist of all points of \code{X} which lie within a distance \code{R} of it. } \item{\dots}{ extra arguments passed to the function \code{fun}. They must be given in the form \code{name=value}. } } \value{ Similar to the result of \code{\link{apply}}. if each call to \code{fun} returns a single numeric value, the result is a vector of dimension \code{npoints(X)}, the number of points in \code{X}. If each call to \code{fun} returns a vector of the same length \code{m}, then the result is a matrix of dimensions \code{c(m,n)}; note the transposition of the indices, as usual for the family of \code{apply} functions. If the calls to \code{fun} return vectors of different lengths, the result is a list of length \code{npoints(X)}. } \details{ This algorithm visits each point in the point pattern \code{X}, determines which points of \code{X} are ``neighbours'' of the current point, extracts the marks of these neighbouring points, applies the function \code{fun} to the marks, and collects the value or values returned by \code{fun}. The definition of ``neighbours'' depends on the arguments \code{N} and \code{R}, exactly one of which must be given. If \code{N} is given, then the neighbours of the current point are the \code{N} points of \code{X} which are closest to the current point (including the current point itself). If \code{R} is given, then the neighbourhood of the current point consists of all points of \code{X} which lie closer than a distance \code{R} from the current point. Each point of \code{X} is visited; the neighbourhood of the current point is determined; the marks of these points are extracted as a vector \code{v}; then the function \code{fun} is called as: \code{fun(v, \dots)} where \code{\dots} are the arguments passed from the call to \code{markstat}. The results of each call to \code{fun} are collected and returned according to the usual rules for \code{\link{apply}} and its relatives. See the section on \bold{Value}. This function is just a convenient wrapper for a common use of the function \code{\link{applynbd}}. For more complex tasks, use \code{\link{applynbd}}. To simply tabulate the marks in every \code{R}-neighbourhood, use \code{\link{marktable}}. } \seealso{ \code{\link{applynbd}}, \code{\link{marktable}}, \code{\link{ppp.object}}, \code{\link{apply}} } \examples{ trees <- longleaf \testonly{ trees <- trees[seq(1, npoints(trees), by=6)] } # average diameter of 5 closest neighbours of each tree md <- markstat(trees, mean, N=5) # range of diameters of trees within 10 metre radius rd <- markstat(trees, range, R=10) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{programming} spatstat/man/Smooth.ppp.Rd0000644000176200001440000002053413613547037015243 0ustar liggesusers\name{Smooth.ppp} \alias{Smooth.ppp} \alias{markmean} \alias{markvar} \title{Spatial smoothing of observations at irregular points} \description{ Performs spatial smoothing of numeric values observed at a set of irregular locations. Uses kernel smoothing and least-squares cross-validated bandwidth selection. } \usage{ \method{Smooth}{ppp}(X, sigma=NULL, ..., weights = rep(1, npoints(X)), at="pixels", adjust=1, varcov=NULL, edge=TRUE, diggle=FALSE, kernel="gaussian", scalekernel=is.character(kernel), geometric=FALSE) markmean(X, ...) markvar(X, sigma=NULL, ..., weights=NULL, varcov=NULL) } \arguments{ \item{X}{A marked point pattern (object of class \code{"ppp"}).} \item{sigma}{ Smoothing bandwidth. A single positive number, a numeric vector of length 2, or a function that selects the bandwidth automatically. See \code{\link{density.ppp}}. } \item{\dots}{ Further arguments passed to \code{\link{bw.smoothppp}} and \code{\link{density.ppp}} to control the kernel smoothing and the pixel resolution of the result. } \item{weights}{ Optional weights attached to the observations. A numeric vector, a \code{function(x,y)}, a pixel image, or an \code{expression}. See \code{\link{density.ppp}}. } \item{at}{ String specifying whether to compute the smoothed values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). } \item{edge,diggle}{ Arguments passed to \code{\link{density.ppp}} to determine the edge correction. } \item{adjust}{ Optional. Adjustment factor for the bandwidth \code{sigma}. } \item{varcov}{ Variance-covariance matrix. An alternative to \code{sigma}. See \code{\link{density.ppp}}. } \item{kernel}{ The smoothing kernel. A character string specifying the smoothing kernel (current options are \code{"gaussian"}, \code{"epanechnikov"}, \code{"quartic"} or \code{"disc"}), or a pixel image (object of class \code{"im"}) containing values of the kernel, or a \code{function(x,y)} which yields values of the kernel. } \item{scalekernel}{ Logical value. If \code{scalekernel=TRUE}, then the kernel will be rescaled to the bandwidth determined by \code{sigma} and \code{varcov}: this is the default behaviour when \code{kernel} is a character string. If \code{scalekernel=FALSE}, then \code{sigma} and \code{varcov} will be ignored: this is the default behaviour when \code{kernel} is a function or a pixel image. } \item{geometric}{ Logical value indicating whether to perform geometric mean smoothing instead of arithmetic mean smoothing. See Details. } } \details{ The function \code{Smooth.ppp} performs spatial smoothing of numeric values observed at a set of irregular locations. The functions \code{markmean} and \code{markvar} are wrappers for \code{Smooth.ppp} which compute the spatially-varying mean and variance of the marks of a point pattern. \code{Smooth.ppp} is a method for the generic function \code{\link{Smooth}} for the class \code{"ppp"} of point patterns. Thus you can type simply \code{Smooth(X)}. Smoothing is performed by kernel weighting, using the Gaussian kernel by default. If the observed values are \eqn{v_1,\ldots,v_n}{v[1],...,v[n]} at locations \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} respectively, then the smoothed value at a location \eqn{u} is (ignoring edge corrections) \deqn{ g(u) = \frac{\sum_i k(u-x_i) v_i}{\sum_i k(u-x_i)} }{ g(u) = (sum of k(u-x[i]) v[i])/(sum of k(u-x[i])) } where \eqn{k} is the kernel (a Gaussian kernel by default). This is known as the Nadaraya-Watson smoother (Nadaraya, 1964, 1989; Watson, 1964). By default, the smoothing kernel bandwidth is chosen by least squares cross-validation (see below). The argument \code{X} must be a marked point pattern (object of class \code{"ppp"}, see \code{\link{ppp.object}}). The points of the pattern are taken to be the observation locations \eqn{x_i}{x[i]}, and the marks of the pattern are taken to be the numeric values \eqn{v_i}{v[i]} observed at these locations. The marks are allowed to be a data frame (in \code{Smooth.ppp} and \code{markmean}). Then the smoothing procedure is applied to each column of marks. The numerator and denominator are computed by \code{\link{density.ppp}}. The arguments \code{...} control the smoothing kernel parameters and determine whether edge correction is applied. The smoothing kernel bandwidth can be specified by either of the arguments \code{sigma} or \code{varcov} which are passed to \code{\link{density.ppp}}. If neither of these arguments is present, then by default the bandwidth is selected by least squares cross-validation, using \code{\link{bw.smoothppp}}. The optional argument \code{weights} allows numerical weights to be applied to the data. If a weight \eqn{w_i}{w[i]} is associated with location \eqn{x_i}{x[i]}, then the smoothed function is (ignoring edge corrections) \deqn{ g(u) = \frac{\sum_i k(u-x_i) v_i w_i}{\sum_i k(u-x_i) w_i} }{ g(u) = (sum of k(u-x[i]) v[i] w[i])/(sum of k(u-x[i]) w[i]) } If \code{geometric=TRUE} then geometric mean smoothing is performed instead of arithmetic mean smoothing. The mark values must be non-negative numbers. The logarithm of the mark values is computed; these logarithmic values are kernel-smoothed as described above; then the exponential function is applied to the smoothed values. An alternative to kernel smoothing is inverse-distance weighting, which is performed by \code{\link{idw}}. } \section{Very small bandwidth}{ If the chosen bandwidth \code{sigma} is very small, kernel smoothing is mathematically equivalent to nearest-neighbour interpolation; the result will be computed by \code{\link{nnmark}}. This is unless \code{at="points"} and \code{leaveoneout=FALSE}, when the original mark values are returned. } \value{ \emph{If \code{X} has a single column of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a pixel image (object of class \code{"im"}). Pixel values are values of the interpolated function. \item If \code{at="points"}, the result is a numeric vector of length equal to the number of points in \code{X}. Entries are values of the interpolated function at the points of \code{X}. } \emph{If \code{X} has a data frame of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a named list of pixel images (object of class \code{"im"}). There is one image for each column of marks. This list also belongs to the class \code{"solist"}, for which there is a plot method. \item If \code{at="points"}, the result is a data frame with one row for each point of \code{X}, and one column for each column of marks. Entries are values of the interpolated function at the points of \code{X}. } The return value has attributes \code{"sigma"} and \code{"varcov"} which report the smoothing bandwidth that was used. } \seealso{ \code{\link{Smooth}}, \code{\link{density.ppp}}, \code{\link{bw.smoothppp}}, \code{\link{nnmark}}, \code{\link{ppp.object}}, \code{\link{im.object}}. See \code{\link{idw}} for inverse-distance weighted smoothing. To perform interpolation, see also the \code{akima} package. } \examples{ # Longleaf data - tree locations, marked by tree diameter # Local smoothing of tree diameter (automatic bandwidth selection) Z <- Smooth(longleaf) # Kernel bandwidth sigma=5 plot(Smooth(longleaf, 5)) # mark variance plot(markvar(longleaf, sigma=5)) # data frame of marks: trees marked by diameter and height plot(Smooth(finpines, sigma=2)) head(Smooth(finpines, sigma=2, at="points")) } \author{ \spatstatAuthors. } \references{ Nadaraya, E.A. (1964) On estimating regression. \emph{Theory of Probability and its Applications} \bold{9}, 141--142. Nadaraya, E.A. (1989) \emph{Nonparametric estimation of probability densities and regression curves}. Kluwer, Dordrecht. Watson, G.S. (1964) Smooth regression analysis. \emph{Sankhya A} \bold{26}, 359--372. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/bdist.tiles.Rd0000644000176200001440000000206313333543262015410 0ustar liggesusers\name{bdist.tiles} \alias{bdist.tiles} \title{Distance to Boundary of Window} \description{ Computes the shortest distances from each tile in a tessellation to the boundary of the window. } \usage{ bdist.tiles(X) } \arguments{ \item{X}{A tessellation (object of class \code{"tess"}).} } \value{ A numeric vector, giving the shortest distance from each tile in the tessellation to the boundary of the window. Entries of the vector correspond to the entries of \code{tiles(X)}. } \details{ This function computes, for each tile \eqn{s_i}{s[[i]]} in the tessellation \code{X}, the shortest distance from \eqn{s_i}{s[[i]]} to the boundary of the window \eqn{W} containing the tessellation. } \seealso{ \code{\link{tess}}, \code{\link{bdist.points}}, \code{\link{bdist.pixels}} } \examples{ P <- runifpoint(15) X <- dirichlet(P) plot(X, col="red") B <- bdist.tiles(X) # identify tiles that do not touch the boundary plot(X[B > 0], add=TRUE, col="green", lwd=3) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/compatible.Rd0000644000176200001440000000163413333543263015307 0ustar liggesusers\name{compatible} \alias{compatible} \title{Test Whether Objects Are Compatible} \description{ Tests whether two or more objects of the same class are compatible. } \usage{ compatible(A, B, \dots) } \arguments{ \item{A,B,\dots}{Two or more objects of the same class} } \details{ This generic function is used to check whether the objects \code{A} and \code{B} (and any additional objects \code{\dots}) are compatible. What is meant by \sQuote{compatible} depends on the class of object. There are methods for the classes \code{"fv"}, \code{"fasp"}, \code{"im"} and \code{"unitname"}. } \value{ Logical value: \code{TRUE} if the objects are compatible, and \code{FALSE} if they are not. } \seealso{ \code{\link{compatible.fv}}, \code{\link{compatible.fasp}}, \code{\link{compatible.im}}, \code{\link{compatible.unitname}} } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/maxnndist.Rd0000644000176200001440000000341413604234565015176 0ustar liggesusers\name{maxnndist} \alias{maxnndist} \alias{minnndist} \title{ Compute Minimum or Maximum Nearest-Neighbour Distance } \description{ A faster way to compute the minimum or maximum nearest-neighbour distance in a point pattern. } \usage{ minnndist(X, positive=FALSE, by=NULL) maxnndist(X, positive=FALSE, by=NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{positive}{ Logical. If \code{FALSE} (the default), compute the usual nearest-neighbour distance. If \code{TRUE}, ignore coincident points, so that the nearest neighbour distance for each point is greater than zero. } \item{by}{ Optional. A factor, which separates \code{X} into groups. The algorithm will compute the distance to the nearest point in each group. } } \details{ These functions find the minimum and maximum values of nearest-neighbour distances in the point pattern \code{X}. \code{minnndist(X)} and \code{maxnndist(X)} are equivalent to, but faster than, \code{min(nndist(X))} and \code{max(nndist(X))} respectively. The value is \code{NA} if \code{npoints(X) < 2}. } \value{ A single numeric value (possibly \code{NA}). If \code{by} is given, the result is a numeric matrix giving the minimum or maximum nearest neighbour distance between each subset of \code{X}. } \seealso{ \code{\link{nndist}} } \examples{ min(nndist(swedishpines)) minnndist(swedishpines) max(nndist(swedishpines)) maxnndist(swedishpines) minnndist(lansing, positive=TRUE) if(interactive()) { X <- rpoispp(1e6) system.time(min(nndist(X))) system.time(minnndist(X)) } minnndist(amacrine, by=marks(amacrine)) maxnndist(amacrine, by=marks(amacrine)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat/man/HierStraussHard.Rd0000644000176200001440000001172513333543262016244 0ustar liggesusers\name{HierStraussHard} \alias{HierStraussHard} \title{The Hierarchical Strauss Hard Core Point Process Model} \description{ Creates an instance of the hierarchical Strauss-hard core point process model which can then be fitted to point pattern data. } \usage{ HierStraussHard(iradii, hradii=NULL, types=NULL, archy=NULL) } \arguments{ \item{iradii}{Matrix of interaction radii} \item{hradii}{Optional matrix of hard core distances} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} \item{archy}{Optional: the hierarchical order. See Details.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the hierarchical Strauss-hard core process with interaction radii \eqn{iradii[i,j]} and hard core distances \eqn{hradii[i,j]}. } \details{ This is a hierarchical point process model for a multitype point pattern (\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander} and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, 1999; Grabarnik and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, 2009). It is appropriate for analysing multitype point pattern data in which the types are ordered so that the points of type \eqn{j} depend on the points of type \eqn{1,2,\ldots,j-1}{1,2,...,j-1}. The hierarchical version of the (stationary) Strauss hard core process with \eqn{m} types, with interaction radii \eqn{r_{ij}}{r[i,j]}, hard core distances \eqn{h_{ij}}{h[i,j]} and parameters \eqn{\beta_j}{beta[j]} and \eqn{\gamma_{ij}}{gamma[i,j]} is a point process in which each point of type \eqn{j} contributes a factor \eqn{\beta_j}{beta[j]} to the probability density of the point pattern, and a pair of points of types \eqn{i} and \eqn{j} closer than \eqn{r_{ij}}{r[i,j]} units apart contributes a factor \eqn{\gamma_{ij}}{gamma[i,j]} to the density \bold{provided} \eqn{i \le j}{i <= j}. If any pair of points of types \eqn{i} and \eqn{j} lies closer than \eqn{h_{ij}}{h[i,j]} units apart, the configuration of points is impossible (probability density zero). The nonstationary hierarchical Strauss hard core process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location and type, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the hierarchical Strauss hard core process pairwise interaction is yielded by the function \code{HierStraussHard()}. See the examples below. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the HierStraussHard interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{radii}. The argument \code{archy} can be used to specify a hierarchical ordering of the types. It can be either a vector of integers or a character vector matching the possible types. The default is the sequence \eqn{1,2, \ldots, m}{1,2, ..., m} meaning that type \eqn{j} depends on types \eqn{1,2, \ldots, j-1}{1,2, ..., j-1}. The matrices \code{iradii} and \code{hradii} must be square, with entries which are either positive numbers or zero or \code{NA}. A value of zero or \code{NA} indicates that no interaction term should be included for this combination of types. Note that only the interaction radii and hard core distances are specified in \code{HierStraussHard}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} and \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by \code{\link{ppm}()}, not fixed in \code{HierStraussHard()}. } \seealso{ \code{\link{MultiStraussHard}} for the corresponding symmetrical interaction. \code{\link{HierHard}}, \code{\link{HierStrauss}}. } \examples{ r <- matrix(c(30, NA, 40, 30), nrow=2,ncol=2) h <- matrix(c(4, NA, 10, 15), 2, 2) HierStraussHard(r, h) # prints a sensible description of itself ppm(ants ~1, HierStraussHard(r, h)) # fit the stationary hierarchical Strauss-hard core process to ants data } \author{\adrian , \rolf and \ege. } \references{ Grabarnik, P. and \ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}, A. (2009) Modelling the spatial structure of forest stands by multivariate point processes with hierarchical interactions. \emph{Ecological Modelling} \bold{220}, 1232--1240. \ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}, H. and \ifelse{latex}{\out{S{\"a}rkk{\"a}}}{Sarkka}, A. (1999) Multitype spatial point patterns with hierarchical interactions. \emph{Biometrics} \bold{55}, 1051--1058. } \keyword{spatial} \keyword{models} spatstat/man/Kcom.Rd0000644000176200001440000002207113571674202014061 0ustar liggesusers\name{Kcom} \Rdversion{1.1} \alias{Kcom} \title{ Model Compensator of K Function } \description{ Given a point process model fitted to a point pattern dataset, this function computes the \emph{compensator} of the \eqn{K} function based on the fitted model (as well as the usual nonparametric estimates of \eqn{K} based on the data alone). Comparison between the nonparametric and model-compensated \eqn{K} functions serves as a diagnostic for the model. } \usage{ Kcom(object, r = NULL, breaks = NULL, ..., correction = c("border", "isotropic", "translate"), conditional = !is.poisson(object), restrict = FALSE, model = NULL, trend = ~1, interaction = Poisson(), rbord = reach(interaction), compute.var = TRUE, truecoef = NULL, hi.res = NULL) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the function \eqn{K(r)} should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ This argument is for advanced use only. } \item{\dots}{ Ignored. } \item{correction}{ Optional vector of character strings specifying the edge correction(s) to be used. See \code{\link{Kest}} for options. } \item{conditional}{ Optional. Logical value indicating whether to compute the estimates for the conditional case. See Details. } \item{restrict}{ Logical value indicating whether to compute the restriction estimator (\code{restrict=TRUE}) or the reweighting estimator (\code{restrict=FALSE}, the default). Applies only if \code{conditional=TRUE}. See Details. } \item{model}{ Optional. A fitted point process model (object of class \code{"ppm"}) to be re-fitted to the data using \code{\link{update.ppm}}, if \code{object} is a point pattern. Overrides the arguments \code{trend,interaction,rbord}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{compute.var}{ Logical value indicating whether to compute the Poincare variance bound for the residual \eqn{K} function (calculation is only implemented for the isotropic correction). } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } } \details{ This command provides a diagnostic for the goodness-of-fit of a point process model fitted to a point pattern dataset. It computes an estimate of the \eqn{K} function of the dataset, together with a \emph{model compensator} of the \eqn{K} function, which should be approximately equal if the model is a good fit to the data. The first argument, \code{object}, is usually a fitted point process model (object of class \code{"ppm"}), obtained from the model-fitting function \code{\link{ppm}}. For convenience, \code{object} can also be a point pattern (object of class \code{"ppp"}). In that case, a point process model will be fitted to it, by calling \code{\link{ppm}} using the arguments \code{trend} (for the first order trend), \code{interaction} (for the interpoint interaction) and \code{rbord} (for the erosion distance in the border correction for the pseudolikelihood). See \code{\link{ppm}} for details of these arguments. The algorithm first extracts the original point pattern dataset (to which the model was fitted) and computes the standard nonparametric estimates of the \eqn{K} function. It then also computes the \emph{model compensator} of the \eqn{K} function. The different function estimates are returned as columns in a data frame (of class \code{"fv"}). The argument \code{correction} determines the edge correction(s) to be applied. See \code{\link{Kest}} for explanation of the principle of edge corrections. The following table gives the options for the \code{correction} argument, and the corresponding column names in the result: \tabular{llll}{ \code{correction} \tab \bold{description of correction} \tab \bold{nonparametric} \tab \bold{compensator} \cr \code{"isotropic"} \tab Ripley isotropic correction \tab \code{iso} \tab \code{icom} \cr \code{"translate"} \tab Ohser-Stoyan translation correction \tab \code{trans} \tab \code{tcom} \cr \code{"border"} \tab border correction \tab \code{border} \tab \code{bcom} } The nonparametric estimates can all be expressed in the form \deqn{ \hat K(r) = \sum_i \sum_{j < i} e(x_i,x_j,r,x) I\{ d(x_i,x_j) \le r \} }{ K(r) = sum[i] sum[j < i] e(x[i], x[j], r, x) I( d(x[i],x[j]) <= r ) } where \eqn{x_i}{x[i]} is the \eqn{i}-th data point, \eqn{d(x_i,x_j)}{d(x[i],x[j])} is the distance between \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}, and \eqn{e(x_i,x_j,r,x)}{e(x[i],x[j],r,x)} is a term that serves to correct edge effects and to re-normalise the sum. The corresponding model compensator is \deqn{ {\bf C} \, \tilde K(r) = \int_W \lambda(u,x) \sum_j e(u,x_j,r,x \cup u) I\{ d(u,x_j) \le r\} }{ C K(r) = integral[u] lambda(u,x) sum[j] e(u, x[j], r, x+u) I( d(u,x[j]) <= r ) } where the integral is over all locations \eqn{u} in the observation window, \eqn{\lambda(u,x)}{lambda(u,x)} denotes the conditional intensity of the model at the location \eqn{u}, and \eqn{x \cup u}{x+u} denotes the data point pattern \eqn{x} augmented by adding the extra point \eqn{u}. If the fitted model is a Poisson point process, then the formulae above are exactly what is computed. If the fitted model is not Poisson, the formulae above are modified slightly to handle edge effects. The modification is determined by the arguments \code{conditional} and \code{restrict}. The value of \code{conditional} defaults to \code{FALSE} for Poisson models and \code{TRUE} for non-Poisson models. If \code{conditional=FALSE} then the formulae above are not modified. If \code{conditional=TRUE}, then the algorithm calculates the \emph{restriction estimator} if \code{restrict=TRUE}, and calculates the \emph{reweighting estimator} if \code{restrict=FALSE}. See Appendix D of Baddeley, Rubak and \Moller (2011). Thus, by default, the reweighting estimator is computed for non-Poisson models. The nonparametric estimates of \eqn{K(r)} are approximately unbiased estimates of the \eqn{K}-function, assuming the point process is stationary. The model compensators are unbiased estimates \emph{of the mean values of the corresponding nonparametric estimates}, assuming the model is true. Thus, if the model is a good fit, the mean value of the difference between the nonparametric estimates and model compensators is approximately zero. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and \Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ \adrian, \ege and Jesper \Moller. } \seealso{ Related functions: \code{\link{Kres}}, \code{\link{Kest}}. Alternative functions: \code{\link{Gcom}}, \code{\link{psstG}}, \code{\link{psstA}}, \code{\link{psst}}. Point process models: \code{\link{ppm}}. } \examples{ fit0 <- ppm(cells, ~1) # uniform Poisson \testonly{fit0 <- ppm(cells, ~1, nd=16)} if(interactive()) { plot(Kcom(fit0)) # compare the isotropic-correction estimates plot(Kcom(fit0), cbind(iso, icom) ~ r) # uniform Poisson is clearly not correct } fit1 <- ppm(cells, ~1, Strauss(0.08)) \testonly{fit1 <- ppm(cells, ~1, Strauss(0.08), nd=16)} K1 <- Kcom(fit1) K1 if(interactive()) { plot(K1) plot(K1, cbind(iso, icom) ~ r) plot(K1, cbind(trans, tcom) ~ r) # how to plot the difference between nonparametric estimates and compensators plot(K1, iso - icom ~ r) # fit looks approximately OK; try adjusting interaction distance } fit2 <- ppm(cells, ~1, Strauss(0.12)) \testonly{fit2 <- ppm(cells, ~1, Strauss(0.12), nd=16)} K2 <- Kcom(fit2) if(interactive()) { plot(K2) plot(K2, cbind(iso, icom) ~ r) plot(K2, iso - icom ~ r) } } \keyword{spatial} \keyword{models} spatstat/man/Frame.Rd0000644000176200001440000000312713333543262014220 0ustar liggesusers\name{Frame} \alias{Frame} \alias{Frame<-} \alias{Frame.default} \alias{Frame<-.default} \alias{Frame<-.owin} \alias{Frame<-.ppp} \alias{Frame<-.im} \title{ Extract or Change the Containing Rectangle of a Spatial Object } \description{ Given a spatial object (such as a point pattern or pixel image) in two dimensions, these functions extract or change the containing rectangle inside which the object is defined. } \usage{ Frame(X) \method{Frame}{default}(X) Frame(X) <- value \method{Frame}{owin}(X) <- value \method{Frame}{ppp}(X) <- value \method{Frame}{im}(X) <- value \method{Frame}{default}(X) <- value } \arguments{ \item{X}{ A spatial object such as a point pattern, line segment pattern or pixel image. } \item{value}{ A rectangular window (object of class \code{"owin"} of type \code{"rectangle"}) to be used as the new containing rectangle for \code{X}. } } \details{ The functions \code{Frame} and \code{Frame<-} are generic. \code{Frame(X)} extracts the rectangle inside which \code{X} is defined. \code{Frame(X) <- R} changes the rectangle inside which \code{X} is defined to the new rectangle \code{R}. } \value{ The result of \code{Frame} is a rectangular window (object of class \code{"owin"} of type \code{"rectangle"}). The result of \code{Frame<-} is the updated object \code{X}, of the same class as \code{X}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{Window}} } \examples{ Frame(cells) X <- demopat Frame(X) Frame(X) <- owin(c(0, 11000), c(400, 8000)) } \keyword{spatial} \keyword{manip} spatstat/man/Extract.tess.Rd0000644000176200001440000000444213333543263015557 0ustar liggesusers\name{Extract.tess} \alias{[.tess} \alias{[<-.tess} \title{Extract or Replace Subset of Tessellation} \description{ Extract, change or delete a subset of the tiles of a tessellation, to make a new tessellation. } \usage{ \method{[}{tess}(x, i, \dots) \method{[}{tess}(x, i, \dots) <- value } \arguments{ \item{x}{A tessellation (object of class \code{"tess"}).} \item{i}{ Subset index for the tiles of the tessellation. Alternatively a window (object of class \code{"owin"}). } \item{\dots}{ One argument that specifies the subset to be extracted or changed. Any valid format for the subset index in a list. } \item{value}{ Replacement value for the selected tiles of the tessellation. A list of windows (objects of class \code{"owin"}) or \code{NULL}. } } \details{ A tessellation (object of class \code{"tess"}, see \code{\link{tess}}) is effectively a list of tiles (spatial regions) that cover a spatial region. The subset operator \code{[.tess} extracts some of these tiles and forms a new tessellation, which of course covers a smaller region than the original. For \code{[.tess} only, the subset index can also be a window (object of class \code{"owin"}). The tessellation \code{x} is then intersected with the window. The replacement operator changes the selected tiles. The replacement \code{value} may be either \code{NULL} (which causes the selected tiles to be removed from \code{x}) or a list of the same length as the selected subset. The entries of \code{value} may be windows (objects of class \code{"owin"}) or \code{NULL} to indicate that the corresponding tile should be deleted. Generally it does not make sense to replace a tile in a tessellation with a completely different tile, because the tiles are expected to fit together. However this facility is sometimes useful for making small adjustments to polygonal tiles. } \value{ A tessellation (object of class \code{"tess"}). } \seealso{ \code{\link{tess}}, \code{\link{tiles}}, \code{\link{intersect.tess}}. } \examples{ \testonly{op <- spatstat.options(npixel=10)} A <- tess(xgrid=0:4, ygrid=0:3) B <- A[c(1, 3, 7)] E <- A[-1] A[c(2, 5, 11)] <- NULL \testonly{spatstat.options(op)} } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/quadrat.test.Rd0000644000176200001440000002544513532404174015613 0ustar liggesusers\name{quadrat.test} \alias{quadrat.test} \alias{quadrat.test.ppp} \alias{quadrat.test.ppm} \alias{quadrat.test.quadratcount} \title{Dispersion Test for Spatial Point Pattern Based on Quadrat Counts} \description{ Performs a test of Complete Spatial Randomness for a given point pattern, based on quadrat counts. Alternatively performs a goodness-of-fit test of a fitted inhomogeneous Poisson model. By default performs chi-squared tests; can also perform Monte Carlo based tests. } \usage{ quadrat.test(X, ...) \method{quadrat.test}{ppp}(X, nx=5, ny=nx, alternative=c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, lambda=NULL, df.est=NULL, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) \method{quadrat.test}{ppm}(X, nx=5, ny=nx, alternative=c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, df.est=NULL, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) \method{quadrat.test}{quadratcount}(X, alternative=c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, lambda=NULL, df.est=NULL, ..., nsim=1999) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) to be subjected to the goodness-of-fit test. Alternatively a fitted point process model (object of class \code{"ppm"}) to be tested. Alternatively \code{X} can be the result of applying \code{\link{quadratcount}} to a point pattern. } \item{nx,ny}{ Numbers of quadrats in the \eqn{x} and \eqn{y} directions. Incompatible with \code{xbreaks} and \code{ybreaks}. } \item{alternative}{ Character string (partially matched) specifying the alternative hypothesis. } \item{method}{ Character string (partially matched) specifying the test to use: either \code{method="Chisq"} for the chi-squared test (the default), or \code{method="MonteCarlo"} for a Monte Carlo test. } \item{conditional}{ Logical. Should the Monte Carlo test be conducted conditionally upon the observed number of points of the pattern? Ignored if \code{method="Chisq"}. } \item{CR}{ Optional. Numerical value. The exponent for the Cressie-Read test statistic. See Details. } \item{lambda}{ Optional. Pixel image (object of class \code{"im"}) or function (class \code{"funxy"}) giving the predicted intensity of the point process. } \item{df.est}{ Optional. Advanced use only. The number of fitted parameters, or the number of degrees of freedom lost by estimation of parameters. } \item{\dots}{Ignored.} \item{xbreaks}{ Optional. Numeric vector giving the \eqn{x} coordinates of the boundaries of the quadrats. Incompatible with \code{nx}. } \item{ybreaks}{ Optional. Numeric vector giving the \eqn{y} coordinates of the boundaries of the quadrats. Incompatible with \code{ny}. } \item{tess}{ Tessellation (object of class \code{"tess"} or something acceptable to \code{\link{as.tess}}) determining the quadrats. Incompatible with \code{nx, ny, xbreaks, ybreaks}. } \item{nsim}{ The number of simulated samples to generate when \code{method="MonteCarlo"}. } } \details{ These functions perform \eqn{\chi^2}{chi^2} tests or Monte Carlo tests of goodness-of-fit for a point process model, based on quadrat counts. The function \code{quadrat.test} is generic, with methods for point patterns (class \code{"ppp"}), split point patterns (class \code{"splitppp"}), point process models (class \code{"ppm"}) and quadrat count tables (class \code{"quadratcount"}). \itemize{ \item if \code{X} is a point pattern, we test the null hypothesis that the data pattern is a realisation of Complete Spatial Randomness (the uniform Poisson point process). Marks in the point pattern are ignored. (If \code{lambda} is given then the null hypothesis is the Poisson process with intensity \code{lambda}.) \item if \code{X} is a split point pattern, then for each of the component point patterns (taken separately) we test the null hypotheses of Complete Spatial Randomness. See \code{\link{quadrat.test.splitppp}} for documentation. \item If \code{X} is a fitted point process model, then it should be a Poisson point process model. The data to which this model was fitted are extracted from the model object, and are treated as the data point pattern for the test. We test the null hypothesis that the data pattern is a realisation of the (inhomogeneous) Poisson point process specified by \code{X}. } In all cases, the window of observation is divided into tiles, and the number of data points in each tile is counted, as described in \code{\link{quadratcount}}. The quadrats are rectangular by default, or may be regions of arbitrary shape specified by the argument \code{tess}. The expected number of points in each quadrat is also calculated, as determined by CSR (in the first case) or by the fitted model (in the second case). Then the Pearson \eqn{X^2} statistic \deqn{ X^2 = sum((observed - expected)^2/expected) } is computed. If \code{method="Chisq"} then a \eqn{\chi^2}{chi^2} test of goodness-of-fit is performed by comparing the test statistic to the \eqn{\chi^2}{chi^2} distribution with \eqn{m-k} degrees of freedom, where \code{m} is the number of quadrats and \eqn{k} is the number of fitted parameters (equal to 1 for \code{quadrat.test.ppp}). The default is to compute the \emph{two-sided} \eqn{p}-value, so that the test will be declared significant if \eqn{X^2} is either very large or very small. One-sided \eqn{p}-values can be obtained by specifying the \code{alternative}. An important requirement of the \eqn{\chi^2}{chi^2} test is that the expected counts in each quadrat be greater than 5. If \code{method="MonteCarlo"} then a Monte Carlo test is performed, obviating the need for all expected counts to be at least 5. In the Monte Carlo test, \code{nsim} random point patterns are generated from the null hypothesis (either CSR or the fitted point process model). The Pearson \eqn{X^2} statistic is computed as above. The \eqn{p}-value is determined by comparing the \eqn{X^2} statistic for the observed point pattern, with the values obtained from the simulations. Again the default is to compute the \emph{two-sided} \eqn{p}-value. If \code{conditional} is \code{TRUE} then the simulated samples are generated from the multinomial distribution with the number of \dQuote{trials} equal to the number of observed points and the vector of probabilities equal to the expected counts divided by the sum of the expected counts. Otherwise the simulated samples are independent Poisson counts, with means equal to the expected counts. If the argument \code{CR} is given, then instead of the Pearson \eqn{X^2} statistic, the Cressie-Read (1984) power divergence test statistic \deqn{ 2nI = \frac{2}{CR(CR+1)} \sum_i \left[ \left( \frac{X_i}{E_i} \right)^CR - 1 \right] }{ 2nI = (2/(CR * (CR+1))) * sum((X[i]/E[i])^CR - 1) } is computed, where \eqn{X_i}{X[i]} is the \eqn{i}th observed count and \eqn{E_i}{E[i]} is the corresponding expected count. The value \code{CR=1} gives the Pearson \eqn{X^2} statistic; \code{CR=0} gives the likelihood ratio test statistic \eqn{G^2}; \code{CR=-1/2} gives the Freeman-Tukey statistic \eqn{T^2}; \code{CR=-1} gives the modified likelihood ratio test statistic \eqn{GM^2}; and \code{CR=-2} gives Neyman's modified statistic \eqn{NM^2}. In all cases the asymptotic distribution of this test statistic is the same \eqn{\chi^2}{chi^2} distribution as above. The return value is an object of class \code{"htest"}. Printing the object gives comprehensible output about the outcome of the test. The return value also belongs to the special class \code{"quadrat.test"}. Plotting the object will display the quadrats, annotated by their observed and expected counts and the Pearson residuals. See the examples. } \seealso{ \code{\link{quadrat.test.splitppp}}, \code{\link{quadratcount}}, \code{\link{quadrats}}, \code{\link{quadratresample}}, \code{\link{chisq.test}}, \code{\link{cdf.test}}. To test a Poisson point process model against a specific alternative, use \code{\link{anova.ppm}}. } \value{ An object of class \code{"htest"}. See \code{\link{chisq.test}} for explanation. The return value is also an object of the special class \code{"quadrattest"}, and there is a plot method for this class. See the examples. } \references{ Cressie, N. and Read, T.R.C. (1984) Multinomial goodness-of-fit tests. \emph{Journal of the Royal Statistical Society, Series B} \bold{46}, 440--464. } \examples{ quadrat.test(simdat) quadrat.test(simdat, 4, 3) quadrat.test(simdat, alternative="regular") quadrat.test(simdat, alternative="clustered") ## Likelihood ratio test quadrat.test(simdat, CR=0) ## Power divergence tests quadrat.test(simdat, CR=-1)$p.value quadrat.test(simdat, CR=-2)$p.value # Using Monte Carlo p-values quadrat.test(swedishpines) # Get warning, small expected values. \dontrun{ quadrat.test(swedishpines, method="M", nsim=4999) quadrat.test(swedishpines, method="M", nsim=4999, conditional=FALSE) } \testonly{ quadrat.test(swedishpines, method="M", nsim=19) quadrat.test(swedishpines, method="M", nsim=19, conditional=FALSE) } # quadrat counts qS <- quadratcount(simdat, 4, 3) quadrat.test(qS) # fitted model: inhomogeneous Poisson fitx <- ppm(simdat ~ x) quadrat.test(fitx) # an equivalent test (results differ due to discretisation effects): quadrat.test(simdat, lambda=predict(fitx), df.est=length(coef(fitx))) te <- quadrat.test(simdat, 4) residuals(te) # Pearson residuals plot(te) plot(simdat, pch="+", cols="green", lwd=2) plot(te, add=TRUE, col="red", cex=1.4, lty=2, lwd=3) sublab <- eval(substitute(expression(p[chi^2]==z), list(z=signif(te$p.value,3)))) title(sub=sublab, cex.sub=3) # quadrats of irregular shape B <- dirichlet(runifpoint(6, Window(simdat))) qB <- quadrat.test(simdat, tess=B) plot(simdat, main="quadrat.test(simdat, tess=B)", pch="+") plot(qB, add=TRUE, col="red", lwd=2, cex=1.2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} spatstat/man/linearpcfcross.Rd0000644000176200001440000000570613623712063016207 0ustar liggesusers\name{linearpcfcross} \alias{linearpcfcross} \title{ Multitype Pair Correlation Function (Cross-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the multitype pair correlation function from points of type \eqn{i} to points of type \eqn{j}. } \usage{ linearpcfcross(X, i, j, r=NULL, \dots, correction="Ang") } \arguments{ \item{X}{The observed point pattern, from which an estimate of the \eqn{i}-to-any pair correlation function \eqn{g_{ij}(r)}{g[ij](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. Defaults to the second level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{g_{ij}(r)}{g[ij](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{\link[stats]{density.default}} to control the kernel smoothing. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{pcfcross}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g_{ij}(r)}{g[ij](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), \bold{63}, 673--694. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearpcfdot}}, \code{\link{linearpcf}}, \code{\link{pcfcross}}. } \examples{ data(chicago) g <- linearpcfcross(chicago, "assault") } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat/man/rMosaicSet.Rd0000644000176200001440000000261313333543264015240 0ustar liggesusers\name{rMosaicSet} \alias{rMosaicSet} \title{Mosaic Random Set} \description{ Generate a random set by taking a random selection of tiles of a given tessellation. } \usage{ rMosaicSet(X, p=0.5) } \arguments{ \item{X}{ A tessellation (object of class \code{"tess"}). } \item{p}{ Probability of including a given tile. A number strictly between 0 and 1. } } \details{ Given a tessellation \code{X}, this function randomly selects some of the tiles of \code{X}, including each tile with probability \eqn{p} independently of the other tiles. The selected tiles are then combined to form a set in the plane. One application of this is Switzer's (1965) example of a random set which has a Markov property. It is constructed by generating \code{X} according to a Poisson line tessellation (see \code{\link{rpoislinetess}}). } \value{ A window (object of class \code{"owin"}). } \references{ Switzer, P. A random set process in the plane with a Markovian property. \emph{Annals of Mathematical Statistics} \bold{36} (1965) 1859--1863. } \author{\adrian and \rolf } \seealso{ \code{\link{rpoislinetess}}, \code{\link{rMosaicField}} } \examples{ # Switzer's random set X <- rpoislinetess(3) plot(rMosaicSet(X, 0.5), col="green", border=NA) # another example plot(rMosaicSet(dirichlet(runifpoint(30)), 0.4)) } \keyword{spatial} \keyword{datagen} spatstat/man/plot.textstring.Rd0000644000176200001440000000214113333543264016353 0ustar liggesusers\name{plot.textstring} \alias{plot.textstring} \title{Plot a Text String} \description{Plots an object of class \code{"textstring"}.} \usage{ \method{plot}{textstring}(x, \dots, do.plot = TRUE) } \arguments{ \item{x}{ Object of class \code{"textstring"} to be plotted. This object is created by the command \code{\link{textstring}}. } \item{\dots}{ Additional graphics arguments passed to \code{\link[graphics]{text}} to control the plotting of text. } \item{do.plot}{ Logical value indicating whether to actually plot the text. } } \details{ The argument \code{x} should be an object of class \code{"textstring"} created by the command \code{\link{textstring}}. This function displays the text using \code{\link[graphics]{text}}. } \value{ A window (class \code{"owin"}) enclosing the plotted graphics. } \examples{ W <- Window(humberside) te <- textstring(centroid.owin(W), txt="Humberside", cex=2.5) te plot(layered(W, te), main="") } \author{ \spatstatAuthors. } \seealso{ \code{\link{onearrow}}, \code{\link{yardstick}} } \keyword{spatial} \keyword{hplot} spatstat/man/rmhmodel.default.Rd0000644000176200001440000005142213430245337016421 0ustar liggesusers\name{rmhmodel.default} \alias{rmhmodel.default} \title{Build Point Process Model for Metropolis-Hastings Simulation.} \description{ Builds a description of a point process model for use in simulating the model by the Metropolis-Hastings algorithm. } \usage{ \method{rmhmodel}{default}(..., cif=NULL, par=NULL, w=NULL, trend=NULL, types=NULL) } \arguments{ \item{\dots}{Ignored.} \item{cif}{Character string specifying the choice of model} \item{par}{Parameters of the model} \item{w}{Spatial window in which to simulate} \item{trend}{Specification of the trend in the model} \item{types}{A vector of factor levels defining the possible marks, for a multitype process. } } \value{ An object of class \code{"rmhmodel"}, which is essentially a list of parameter values for the model. There is a \code{print} method for this class, which prints a sensible description of the model chosen. } \details{ The generic function \code{\link{rmhmodel}} takes a description of a point process model in some format, and converts it into an object of class \code{"rmhmodel"} so that simulations of the model can be generated using the Metropolis-Hastings algorithm \code{\link{rmh}}. This function \code{rmhmodel.default} is the default method. It builds a description of the point process model from the simple arguments listed. The argument \code{cif} is a character string specifying the choice of interpoint interaction for the point process. The current options are \describe{ \item{\code{'areaint'}}{Area-interaction process.} \item{\code{'badgey'}}{Baddeley-Geyer (hybrid Geyer) process.} \item{\code{'dgs'}}{Diggle, Gates and Stibbard (1987) process} \item{\code{'diggra'}}{Diggle and Gratton (1984) process} \item{\code{'fiksel'}}{Fiksel double exponential process (Fiksel, 1984).} \item{\code{'geyer'}}{Saturation process (Geyer, 1999).} \item{\code{'hardcore'}}{Hard core process} \item{\code{'lennard'}}{Lennard-Jones process} \item{\code{'lookup'}}{General isotropic pairwise interaction process, with the interaction function specified via a ``lookup table''.} \item{\code{'multihard'}}{Multitype hardcore process} \item{\code{'penttinen'}}{The Penttinen process} \item{\code{'strauss'}}{The Strauss process} \item{\code{'straush'}}{The Strauss process with hard core} \item{\code{'sftcr'}}{The Softcore process} \item{\code{'straussm'}}{ The multitype Strauss process} \item{\code{'straushm'}}{Multitype Strauss process with hard core} \item{\code{'triplets'}}{Triplets process (Geyer, 1999).} } It is also possible to specify a \emph{hybrid} of these interactions in the sense of Baddeley et al (2013). In this case, \code{cif} is a character vector containing names from the list above. For example, \code{cif=c('strauss', 'geyer')} would specify a hybrid of the Strauss and Geyer models. The argument \code{par} supplies parameter values appropriate to the conditional intensity function being invoked. For the interactions listed above, these parameters are: \describe{ \item{areaint:}{ (Area-interaction process.) A \bold{named} list with components \code{beta,eta,r} which are respectively the ``base'' intensity, the scaled interaction parameter and the interaction radius. } \item{badgey:}{ (Baddeley-Geyer process.) A \bold{named} list with components \code{beta} (the ``base'' intensity), \code{gamma} (a vector of non-negative interaction parameters), \code{r} (a vector of interaction radii, of the same length as \code{gamma}, in \emph{increasing} order), and \code{sat} (the saturation parameter(s); this may be a scalar, or a vector of the same length as \code{gamma} and \code{r}; all values should be at least 1). Note that because of the presence of ``saturation'' the \code{gamma} values are permitted to be larger than 1. } \item{dgs:}{ (Diggle, Gates, and Stibbard process. See Diggle, Gates, and Stibbard (1987)) A \bold{named} list with components \code{beta} and \code{rho}. This process has pairwise interaction function equal to \deqn{ e(t) = \sin^2\left(\frac{\pi t}{2\rho}\right) }{ e(t) = sin^2((pi * t)/(2 * rho)) } for \eqn{t < \rho}{t < rho}, and equal to 1 for \eqn{t \ge \rho}{t >= rho}. } \item{diggra:}{ (Diggle-Gratton process. See Diggle and Gratton (1984) and Diggle, Gates and Stibbard (1987).) A \bold{named} list with components \code{beta}, \code{kappa}, \code{delta} and \code{rho}. This process has pairwise interaction function \eqn{e(t)} equal to 0 for \eqn{t < \delta}{t < delta}, equal to \deqn{ \left(\frac{t-\delta}{\rho-\delta}\right)^\kappa }{ ((t-delta)/(rho-delta))^kappa } for \eqn{\delta \le t < \rho}{delta <= t < rho}, and equal to 1 for \eqn{t \ge \rho}{t >= rho}. Note that here we use the symbol \eqn{\kappa}{kappa} where Diggle, Gates, and Stibbard use \eqn{\beta}{beta} since we reserve the symbol \eqn{\beta}{beta} for an intensity parameter. } \item{fiksel:}{ (Fiksel double exponential process, see Fiksel (1984)) A \bold{named} list with components \code{beta}, \code{r}, \code{hc}, \code{kappa} and \code{a}. This process has pairwise interaction function \eqn{e(t)} equal to 0 for \eqn{t < hc}, equal to \deqn{ \exp(a \exp(- \kappa t)) }{ exp(a * exp( - kappa * t)) } for \eqn{hc \le t < r}{hc <= t < r}, and equal to 1 for \eqn{t \ge r}{t >= r}. } \item{geyer:}{ (Geyer's saturation process. See Geyer (1999).) A \bold{named} list with components \code{beta}, \code{gamma}, \code{r}, and \code{sat}. The components \code{beta}, \code{gamma}, \code{r} are as for the Strauss model, and \code{sat} is the ``saturation'' parameter. The model is Geyer's ``saturation'' point process model, a modification of the Strauss process in which we effectively impose an upper limit (\code{sat}) on the number of neighbours which will be counted as close to a given point. Explicitly, a saturation point process with interaction radius \eqn{r}, saturation threshold \eqn{s}, and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, is the point process in which each point \eqn{x_i}{x[i]} in the pattern \eqn{X} contributes a factor \deqn{\beta \gamma^{\min(s, t(x_i,X))}}{beta gamma^min(s,t(x[i],X))} to the probability density of the point pattern, where \eqn{t(x_i,X)}{t(x[i],X)} denotes the number of ``\eqn{r}-close neighbours'' of \eqn{x_i}{x[i]} in the pattern \eqn{X}. If the saturation threshold \eqn{s} is infinite, the Geyer process reduces to a Strauss process with interaction parameter \eqn{\gamma^2}{gamma^2} rather than \eqn{\gamma}{gamma}. } \item{hardcore:}{ (Hard core process.) A \bold{named} list with components \code{beta} and \code{hc} where \code{beta} is the base intensity and \code{hc} is the hard core distance. This process has pairwise interaction function \eqn{e(t)} equal to 1 if \eqn{t > hc} and 0 if \eqn{t <= hc}. } \item{lennard:}{ (Lennard-Jones process.) A \bold{named} list with components \code{sigma} and \code{epsilon}, where \code{sigma} is the characteristic diameter and \code{epsilon} is the well depth. See \code{\link{LennardJones}} for explanation. } \item{multihard:}{ (Multitype hard core process.) A \bold{named} list with components \code{beta} and \code{hradii}, where \code{beta} is a vector of base intensities for each type of point, and \code{hradii} is a matrix of hard core radii between each pair of types. } \item{penttinen:}{ (Penttinen process.) A \bold{named} list with components \code{beta,gamma,r} which are respectively the ``base'' intensity, the pairwise interaction parameter, and the disc radius. Note that \code{gamma} must be less than or equal to 1. See \code{\link{Penttinen}} for explanation. (Note that there is also an algorithm for perfect simulation of the Penttinen process, \code{\link{rPenttinen}}) } \item{strauss:}{ (Strauss process.) A \bold{named} list with components \code{beta,gamma,r} which are respectively the ``base'' intensity, the pairwise interaction parameter and the interaction radius. Note that \code{gamma} must be less than or equal to 1. (Note that there is also an algorithm for perfect simulation of the Strauss process, \code{\link{rStrauss}}) } \item{straush:}{ (Strauss process with hardcore.) A \bold{named} list with entries \code{beta,gamma,r,hc} where \code{beta}, \code{gamma}, and \code{r} are as for the Strauss process, and \code{hc} is the hardcore radius. Of course \code{hc} must be less than \code{r}. } \item{sftcr:}{ (Softcore process.) A \bold{named} list with components \code{beta,sigma,kappa}. Again \code{beta} is a ``base'' intensity. The pairwise interaction between two points \eqn{u \neq v}{u != v} is \deqn{ \exp \left \{ - \left ( \frac{\sigma}{||u-v||} \right )^{2/\kappa} \right \} }{-(sigma/||u-v||)^(2/kappa)} Note that it is necessary that \eqn{0 < \kappa < 1}{0 < kappa <1}. } \item{straussm:}{ (Multitype Strauss process.) A \bold{named} list with components \itemize{ \item \code{beta}: A vector of ``base'' intensities, one for each possible type. \item \code{gamma}: A \bold{symmetric} matrix of interaction parameters, with \eqn{\gamma_{ij}}{gamma_ij} pertaining to the interaction between type \eqn{i} and type \eqn{j}. \item \code{radii}: A \bold{symmetric} matrix of interaction radii, with entries \eqn{r_{ij}}{r_ij} pertaining to the interaction between type \eqn{i} and type \eqn{j}. } } \item{straushm:}{ (Multitype Strauss process with hardcore.) A \bold{named} list with components \code{beta} and \code{gamma} as for \code{straussm} and \bold{two} ``radii'' components: \itemize{ \item \code{iradii}: the interaction radii \item \code{hradii}: the hardcore radii } which are both symmetric matrices of nonnegative numbers. The entries of \code{hradii} must be less than the corresponding entries of \code{iradii}. } \item{triplets:}{ (Triplets process.) A \bold{named} list with components \code{beta,gamma,r} which are respectively the ``base'' intensity, the triplet interaction parameter and the interaction radius. Note that \code{gamma} must be less than or equal to 1. } \item{lookup:}{ (Arbitrary pairwise interaction process with isotropic interaction.) A \bold{named} list with components \code{beta}, \code{r}, and \code{h}, or just with components \code{beta} and \code{h}. This model is the pairwise interaction process with an isotropic interaction given by any chosen function \eqn{H}. Each pair of points \eqn{x_i, x_j}{x[i], x[j]} in the point pattern contributes a factor \eqn{H(d(x_i, x_j))}{H(d(x[i],x[j]))} to the probability density, where \eqn{d} denotes distance and \eqn{H} is the pair interaction function. The component \code{beta} is a (positive) scalar which determines the ``base'' intensity of the process. In this implementation, \eqn{H} must be a step function. It is specified by the user in one of two ways. \itemize{ \item \bold{as a vector of values:} If \code{r} is present, then \code{r} is assumed to give the locations of jumps in the function \eqn{H}, while the vector \code{h} gives the corresponding values of the function. Specifically, the interaction function \eqn{H(t)} takes the value \code{h[1]} for distances \eqn{t} in the interval \code{[0, r[1])}; takes the value \code{h[i]} for distances \eqn{t} in the interval \code{[r[i-1], r[i])} where \eqn{i = 2,\ldots, n}{i = 2, ..., n}; and takes the value 1 for \eqn{t \ge r[n]}{t >= r[n]}. Here \eqn{n} denotes the length of \code{r}. The components \code{r} and \code{h} must be numeric vectors of equal length. The \code{r} values must be strictly positive, and sorted in increasing order. The entries of \code{h} must be non-negative. If any entry of \code{h} is greater than 1, then the entry \code{h[1]} must be 0 (otherwise the specified process is non-existent). Greatest efficiency is achieved if the values of \code{r} are equally spaced. [\bold{Note:} The usage of \code{r} and \code{h} has \emph{changed} from the previous usage in \pkg{spatstat} versions 1.4-7 to 1.5-1, in which ascending order was not required, and in which the first entry of \code{r} had to be 0.] \item \bold{as a stepfun object:} If \code{r} is absent, then \code{h} must be an object of class \code{"stepfun"} specifying a step function. Such objects are created by \code{\link{stepfun}}. The stepfun object \code{h} must be right-continuous (which is the default using \code{\link{stepfun}}.) The values of the step function must all be nonnegative. The values must all be less than 1 unless the function is identically zero on some initial interval \eqn{[0,r)}. The rightmost value (the value of \code{h(t)} for large \code{t}) must be equal to 1. Greatest efficiency is achieved if the jumps (the ``knots'' of the step function) are equally spaced. } } } For a hybrid model, the argument \code{par} should be a list, of the same length as \code{cif}, such that \code{par[[i]]} is a list of the parameters required for the interaction \code{cif[i]}. See the Examples. The optional argument \code{trend} determines the spatial trend in the model, if it has one. It should be a function or image (or a list of such, if the model is multitype) to provide the value of the trend at an arbitrary point. \describe{ \item{trend given as a function:}{A trend function may be a function of any number of arguments, but the first two must be the \eqn{x,y} coordinates of a point. Auxiliary arguments may be passed to the \code{trend} function at the time of simulation, via the \code{\dots} argument to \code{\link{rmh}}. The function \bold{must} be \bold{vectorized}. That is, it must be capable of accepting vector valued \code{x} and \code{y} arguments. Put another way, it must be capable of calculating the trend value at a number of points, simultaneously, and should return the \bold{vector} of corresponding trend values. } \item{trend given as an image:}{ An image (see \code{\link{im.object}}) provides the trend values at a grid of points in the observation window and determines the trend value at other points as the value at the nearest grid point. } } Note that the trend or trends must be \bold{non-negative}; no checking is done for this. The optional argument \code{w} specifies the window in which the pattern is to be generated. If specified, it must be in a form which can be coerced to an object of class \code{owin} by \code{\link{as.owin}}. The optional argument \code{types} specifies the possible types in a multitype point process. If the model being simulated is multitype, and \code{types} is not specified, then this vector defaults to \code{1:ntypes} where \code{ntypes} is the number of types. } \references{ Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013) Hybrids of Gibbs point process models and their implementation. \emph{Journal of Statistical Software} \bold{55}:11, 1--43. \url{http://www.jstatsoft.org/v55/i11/} Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. Fiksel, T. (1984) Estimation of parameterized pair potentials of marked and non-marked Gibbsian point processes. \emph{Electronische Informationsverabeitung und Kybernetika} \bold{20}, 270--278. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \section{Warnings in Respect of ``lookup''}{ For the \code{lookup} cif, the entries of the \code{r} component of \code{par} must be \emph{strictly positive} and sorted into ascending order. Note that if you specify the \code{lookup} pairwise interaction function via \code{\link{stepfun}()} the arguments \code{x} and \code{y} which are passed to \code{stepfun()} are slightly different from \code{r} and \code{h}: \code{length(y)} is equal to \code{1+length(x)}; the final entry of \code{y} must be equal to 1 --- i.e. this value is explicitly supplied by the user rather than getting tacked on internally. The step function returned by \code{stepfun()} must be right continuous (this is the default behaviour of \code{stepfun()}) otherwise an error is given. } \seealso{ \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{ppm}}, \rmhInteractionsList. } \examples{ # Strauss process: mod01 <- rmhmodel(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) mod01 # The above could also be simulated using 'rStrauss' # Strauss with hardcore: mod04 <- rmhmodel(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3), w=owin(c(0,10),c(0,5))) # Hard core: mod05 <- rmhmodel(cif="hardcore",par=list(beta=2,hc=0.3), w=square(5)) # Soft core: w <- square(10) mod07 <- rmhmodel(cif="sftcr", par=list(beta=0.8,sigma=0.1,kappa=0.5), w=w) # Penttinen process: modpen <- rmhmodel(cif="penttinen",par=list(beta=2,gamma=0.6,r=1), w=c(0,10,0,10)) # Area-interaction process: mod42 <- rmhmodel(cif="areaint",par=list(beta=2,eta=1.6,r=0.7), w=c(0,10,0,10)) # Baddeley-Geyer process: mod99 <- rmhmodel(cif="badgey",par=list(beta=0.3, gamma=c(0.2,1.8,2.4),r=c(0.035,0.07,0.14),sat=5), w=unit.square()) # Multitype Strauss: beta <- c(0.027,0.008) gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2) r <- matrix(c(45,45,45,45),2,2) mod08 <- rmhmodel(cif="straussm", par=list(beta=beta,gamma=gmma,radii=r), w=square(250)) # specify types mod09 <- rmhmodel(cif="straussm", par=list(beta=beta,gamma=gmma,radii=r), w=square(250), types=c("A", "B")) # Multitype Hardcore: rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2) mod08hard <- rmhmodel(cif="multihard", par=list(beta=beta,hradii=rhc), w=square(250), types=c("A", "B")) # Multitype Strauss hardcore with trends for each type: beta <- c(0.27,0.08) ri <- matrix(c(45,45,45,45),2,2) rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2) tr3 <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend tr4 <- function(x,y){x <- x/250; y <- y/250; exp(-0.6*x+0.5*y)} # log linear trend mod10 <- rmhmodel(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=ri,hradii=rhc),w=c(0,250,0,250), trend=list(tr3,tr4)) # Triplets process: mod11 <- rmhmodel(cif="triplets",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) # Lookup (interaction function h_2 from page 76, Diggle (2003)): r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0. h <- 20*(r-0.05) h[r<0.05] <- 0 h[r>0.10] <- 1 mod17 <- rmhmodel(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1)) # hybrid model modhy <- rmhmodel(cif=c('strauss', 'geyer'), par=list(list(beta=100,gamma=0.5,r=0.05), list(beta=1, gamma=0.7,r=0.1, sat=2)), w=square(1)) modhy } \author{ \adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/Fest.Rd0000644000176200001440000002703213556444621014076 0ustar liggesusers\name{Fest} \alias{Fest} \alias{Fhazard} \title{Estimate the Empty Space Function or its Hazard Rate} \description{ Estimates the empty space function \eqn{F(r)} or its hazard rate \eqn{h(r)} from a point pattern in a window of arbitrary shape. } \usage{ Fest(X, \dots, eps, r=NULL, breaks=NULL, correction=c("rs", "km", "cs"), domain=NULL) Fhazard(X, \dots) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{F(r)} will be computed. An object of class \code{ppp}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{ Extra arguments, passed from \code{Fhazard} to \code{Fest}. Extra arguments to \code{Fest} are ignored. } \item{eps}{Optional. A positive number. The resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which \eqn{F(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. The edge correction(s) to be used to estimate \eqn{F(r)}. A vector of character strings selected from \code{"none"}, \code{"rs"}, \code{"km"}, \code{"cs"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{domain}{ Optional. Calculations will be restricted to this subset of the window. See Details. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. The result of \code{Fest} is essentially a data frame containing up to seven columns: \item{r}{the values of the argument \eqn{r} at which the function \eqn{F(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{F(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{F(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{F(r)} by the spatial Kaplan-Meier method } \item{cs}{the Chiu-Stoyan estimator of \eqn{F(r)} } \item{raw}{the uncorrected estimate of \eqn{F(r)}, i.e. the empirical distribution of the distance from a random point in the window to the nearest point of the data pattern \code{X} } \item{theo}{the theoretical value of \eqn{F(r)} for a stationary Poisson process of the same estimated intensity. } The result of \code{Fhazard} contains only three columns \item{r}{the values of the argument \eqn{r} at which the hazard rate \eqn{h(r)} has been estimated } \item{hazard}{the spatial Kaplan-Meier estimate of the hazard rate \eqn{h(r)}} \item{theo}{ the theoretical value of \eqn{h(r)} for a stationary Poisson process of the same estimated intensity. } } \details{ \code{Fest} computes an estimate of the empty space function \eqn{F(r)}, and \code{Fhazard} computes an estimate of its hazard rate \eqn{h(r)}. The empty space function (also called the ``\emph{spherical contact distribution}'' or the ``\emph{point-to-nearest-event}'' distribution) of a stationary point process \eqn{X} is the cumulative distribution function \eqn{F} of the distance from a fixed point in space to the nearest point of \eqn{X}. An estimate of \eqn{F} derived from a spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern (Cressie, 1991; Diggle, 1983; Ripley, 1988). In exploratory analyses, the estimate of \eqn{F} is a useful statistic summarising the sizes of gaps in the pattern. For inferential purposes, the estimate of \eqn{F} is usually compared to the true value of \eqn{F} for a completely random (Poisson) point process, which is \deqn{F(r) = 1 - e^{ - \lambda \pi r^2}}{% F(r) = 1 - exp( - \lambda * \pi * r^2) % } where \eqn{\lambda}{\lambda} is the intensity (expected number of points per unit area). Deviations between the empirical and theoretical \eqn{F} curves may suggest spatial clustering or spatial regularity. This algorithm estimates the empty space function \eqn{F} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X}) may have arbitrary shape. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link{as.ppp}}. The algorithm uses two discrete approximations which are controlled by the parameter \code{eps} and by the spacing of values of \code{r} respectively. (See below for details.) First-time users are strongly advised not to specify these arguments. The estimation of \eqn{F} is hampered by edge effects arising from the unobservability of points of the random pattern outside the window. An edge correction is needed to reduce bias (Baddeley, 1998; Ripley, 1988). The edge corrections implemented here are the border method or "\emph{reduced sample}" estimator, the spatial Kaplan-Meier estimator (Baddeley and Gill, 1997) and the Chiu-Stoyan estimator (Chiu and Stoyan, 1998). Our implementation makes essential use of the distance transform algorithm of image processing (Borgefors, 1986). A fine grid of pixels is created in the observation window. The Euclidean distance between two pixels is approximated by the length of the shortest path joining them in the grid, where a path is a sequence of steps between adjacent pixels, and horizontal, vertical and diagonal steps have length \eqn{1}, \eqn{1} and \eqn{\sqrt 2}{sqrt(2)} respectively in pixel units. If the pixel grid is sufficiently fine then this is an accurate approximation. The parameter \code{eps} is the pixel width of the rectangular raster used to compute the distance transform (see below). It must not be too large: the absolute error in distance values due to discretisation is bounded by \code{eps}. If \code{eps} is not specified, the function checks whether the window \code{Window(X)} contains pixel raster information. If so, then \code{eps} is set equal to the pixel width of the raster; otherwise, \code{eps} defaults to 1/100 of the width of the observation window. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{F(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The estimators are computed from histogram counts. This introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the spacing of successive \code{r} values must be very fine (ideally not greater than \code{eps/4}). The algorithm also returns an estimate of the hazard rate function, \eqn{h(r)} of \eqn{F(r)}. The hazard rate is defined by \deqn{h(r) = - \frac{d}{dr} \log(1 - F(r))}{% h(r) = - (d/dr) log(1 - F(r)) % } The hazard rate of \eqn{F} has been proposed as a useful exploratory statistic (Baddeley and Gill, 1994). The estimate of \eqn{h(r)} given here is a discrete approximation to the hazard rate of the Kaplan-Meier estimator of \eqn{F}. Note that \eqn{F} is absolutely continuous (for any stationary point process \eqn{X}), so the hazard function always exists (Baddeley and Gill, 1997). If the argument \code{domain} is given, the estimate of \eqn{F(r)} will be based only on the empty space distances measured from locations inside \code{domain} (although their nearest data points may lie outside \code{domain}). This is useful in bootstrap techniques. The argument \code{domain} should be a window (object of class \code{"owin"}) or something acceptable to \code{\link{as.owin}}. It must be a subset of the window of the point pattern \code{X}. The naive empirical distribution of distances from each location in the window to the nearest point of the data pattern, is a biased estimate of \eqn{F}. However this is also returned by the algorithm (if \code{correction="none"}), as it is sometimes useful in other contexts. Care should be taken not to use the uncorrected empirical \eqn{F} as if it were an unbiased estimator of \eqn{F}. } \note{ Sizeable amounts of memory may be needed during the calculation. } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37-78. Baddeley, A.J. and Gill, R.D. The empty space hazard of a spatial pattern. Research Report 1994/3, Department of Mathematics, University of Western Australia, May 1994. Baddeley, A.J. and Gill, R.D. Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25} (1997) 263-292. Borgefors, G. Distance transformations in digital images. \emph{Computer Vision, Graphics and Image Processing} \bold{34} (1986) 344-371. Chiu, S.N. and Stoyan, D. (1998) Estimators of distance distributions for spatial patterns. \emph{Statistica Neerlandica} \bold{52}, 239--246. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \section{Warnings}{ The reduced sample (border method) estimator of \eqn{F} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{F} is always nondecreasing but its maximum value may be less than \eqn{1}. The estimate of hazard rate \eqn{h(r)} returned by the algorithm is an approximately unbiased estimate for the integral of \eqn{h()} over the corresponding histogram cell. It may exhibit oscillations due to discretisation effects. We recommend modest smoothing, such as kernel smoothing with kernel width equal to the width of a histogram cell, using \code{\link{Smooth.fv}}. } \seealso{ \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}}, \code{\link{km.rs}}, \code{\link{reduced.sample}}, \code{\link{kaplan.meier}} } \examples{ Fc <- Fest(cells, 0.01) # Tip: don't use F for the left hand side! # That's an abbreviation for FALSE plot(Fc) # P-P style plot plot(Fc, cbind(km, theo) ~ theo) # The empirical F is above the Poisson F # indicating an inhibited pattern \dontrun{ plot(Fc, . ~ theo) plot(Fc, asin(sqrt(.)) ~ asin(sqrt(theo))) } \testonly{ Fh <- Fhazard(cells) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/summary.ppp.Rd0000644000176200001440000000306713333543264015466 0ustar liggesusers\name{summary.ppp} \alias{summary.ppp} \title{Summary of a Point Pattern Dataset} \description{ Prints a useful summary of a point pattern dataset. } \usage{ \method{summary}{ppp}(object, \dots, checkdup=TRUE) } \arguments{ \item{object}{ Point pattern (object of class \code{"ppp"}). } \item{\dots}{ Ignored. } \item{checkdup}{ Logical value indicating whether to check for the presence of duplicate points. } } \details{ A useful summary of the point pattern \code{object} is printed. This is a method for the generic function \code{\link{summary}}. If \code{checkdup=TRUE}, the pattern will be checked for the presence of duplicate points, using \code{\link{duplicated.ppp}}. This can be time-consuming if the pattern contains many points, so the checking can be disabled by setting \code{checkdup=FALSE}. If the point pattern was generated by simulation using \code{\link{rmh}}, the parameters of the algorithm are printed. } \seealso{ \code{\link{summary}}, \code{\link{summary.owin}}, \code{\link{print.ppp}} } \examples{ summary(cells) # plain vanilla point pattern # multitype point pattern woods <- lansing \testonly{woods <- woods[seq(1, npoints(woods), length=40)]} summary(woods) # tabulates frequencies of each mark # numeric marks trees <- longleaf \testonly{trees <- trees[seq(1, npoints(trees), length=40)]} summary(trees) # prints summary.default(marks(trees)) # weird polygonal window summary(demopat) # describes it } \author{ \spatstatAuthors } \keyword{spatial} \keyword{methods} spatstat/man/rlabel.Rd0000644000176200001440000000515113460231506014422 0ustar liggesusers\name{rlabel} \alias{rlabel} \title{Random Re-Labelling of Point Pattern} \description{ Randomly allocates marks to a point pattern, or permutes the existing marks, or resamples from the existing marks. } \usage{ rlabel(X, labels=marks(X), permute=TRUE, nsim=1, drop=TRUE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}, \code{"lpp"}, \code{"pp3"} or \code{"ppx"}) or line segment pattern (object of class \code{"psp"}). } \item{labels}{ Vector of values from which the new marks will be drawn at random. Defaults to the vector of existing marks. } \item{permute}{ Logical value indicating whether to generate new marks by randomly permuting \code{labels} or by drawing a random sample with replacement. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a marked point pattern (of the same class as \code{X}). If \code{nsim > 1}, a list of point patterns. } \details{ This very simple function allocates random marks to an existing point pattern \code{X}. It is useful for hypothesis testing purposes. (The function can also be applied to line segment patterns.) In the simplest case, the command \code{rlabel(X)} yields a point pattern obtained from \code{X} by randomly permuting the marks of the points. If \code{permute=TRUE}, then \code{labels} should be a vector of length equal to the number of points in \code{X}. The result of \code{rlabel} will be a point pattern with locations given by \code{X} and marks given by a random permutation of \code{labels} (i.e. a random sample without replacement). If \code{permute=FALSE}, then \code{labels} may be a vector of any length. The result of \code{rlabel} will be a point pattern with locations given by \code{X} and marks given by a random sample from \code{labels} (with replacement). } \seealso{ \code{\link{marks<-}} to assign arbitrary marks. } \examples{ amacrine # Randomly permute the marks "on" and "off" # Result always has 142 "off" and 152 "on" Y <- rlabel(amacrine) # randomly allocate marks "on" and "off" # with probabilities p(off) = 0.48, p(on) = 0.52 Y <- rlabel(amacrine, permute=FALSE) # randomly allocate marks "A" and "B" with equal probability data(cells) Y <- rlabel(cells, labels=factor(c("A", "B")), permute=FALSE) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{datagen} spatstat/man/lut.Rd0000644000176200001440000000767313573615567014022 0ustar liggesusers\name{lut} \alias{lut} \title{Lookup Tables} \description{ Create a lookup table. } \usage{ lut(outputs, ..., range=NULL, breaks=NULL, inputs=NULL, gamma=1) } \arguments{ \item{outputs}{Vector of output values} \item{\dots}{Ignored.} \item{range}{ Interval of numbers to be mapped. A numeric vector of length 2, specifying the ends of the range of values to be mapped. Incompatible with \code{breaks} or \code{inputs}. } \item{inputs}{ Input values to which the output values are associated. A factor or vector of the same length as \code{outputs}. Incompatible with \code{breaks} or \code{range}. } \item{breaks}{ Breakpoints for the lookup table. A numeric vector of length equal to \code{length(outputs)+1}. Incompatible with \code{range} or \code{inputs}. } \item{gamma}{ Exponent for gamma correction, when \code{range} is given. A single positive number. See Details. } } \details{ A lookup table is a function, mapping input values to output values. The command \code{lut} creates an object representing a lookup table, which can then be used to control various behaviour in the \pkg{spatstat} package. It can also be used to compute the output value assigned to any input value. The argument \code{outputs} specifies the output values to which input data values will be mapped. It should be a vector of any atomic type (e.g. numeric, logical, character, complex) or factor values. Exactly one of the arguments \code{range}, \code{inputs} or \code{breaks} must be specified by name. \itemize{ \item If \code{inputs} is given, then it should be a vector or factor, of the same length as \code{outputs}. The entries of \code{inputs} can be any atomic type (e.g. numeric, logical, character, complex) or factor values. The resulting lookup table associates the value \code{inputs[i]} with the value \code{outputs[i]}. The argument \code{outputs} should have the same length as \code{inputs}. \item If \code{range} is given, then it determines the interval of the real number line that will be mapped. It should be a numeric vector of length 2. The interval will be divided evenly into bands, each of which is mapped to an entry of \code{outputs}. (If \code{gamma} is given, then the bands are equally spaced on a scale where the original values are raised to the power \code{gamma}.) \item If \code{breaks} is given, then it determines intervals of the real number line which are mapped to each output value. It should be a numeric vector, of length at least 2, with entries that are in increasing order. Infinite values are allowed. Any number in the range between \code{breaks[i]} and \code{breaks[i+1]} will be mapped to the value \code{outputs[i]}. The argument \code{outputs} should have length equal to \code{length(breaks) - 1}. } It is also permissible for \code{outputs} to be a single value, representing a trivial lookup table in which all data values are mapped to the same output value. The result is an object of class \code{"lut"}. There is a \code{print} method for this class. Some plot commands in the \pkg{spatstat} package accept an object of this class as a specification of a lookup table. The result is also a function \code{f} which can be used to compute the output value assigned to any input data value. That is, \code{f(x)} returns the output value assigned to \code{x}. This also works for vectors of input data values. } \value{ A function, which is also an object of class \code{"lut"}. } \seealso{ \code{\link{colourmap}}. } \examples{ # lookup table for real numbers, using breakpoints cr <- lut(factor(c("low", "medium", "high")), breaks=c(0,5,10,15)) cr cr(3.2) cr(c(3,5,7)) # lookup table for discrete set of values ct <- lut(c(0,1), inputs=c(FALSE, TRUE)) ct(TRUE) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/dummy.ppm.Rd0000644000176200001440000000431313333543263015113 0ustar liggesusers\name{dummy.ppm} \alias{dummy.ppm} \title{Extract Dummy Points Used to Fit a Point Process Model} \description{ Given a fitted point process model, this function extracts the `dummy points' of the quadrature scheme used to fit the model. } \usage{ dummy.ppm(object, drop=FALSE) } \arguments{ \item{object}{ fitted point process model (an object of class \code{"ppm"}). } \item{drop}{ Logical value determining whether to delete dummy points that were not used to fit the model. } } \value{ A point pattern (object of class \code{"ppp"}). } \details{ An object of class \code{"ppm"} represents a point process model that has been fitted to data. It is typically produced by the model-fitting algorithm \code{\link{ppm}}. The maximum pseudolikelihood algorithm in \code{\link{ppm}} approximates the pseudolikelihood integral by a sum over a finite set of quadrature points, which is constructed by augmenting the original data point pattern by a set of ``dummy'' points. The fitted model object returned by \code{\link{ppm}} contains complete information about this quadrature scheme. See \code{\link{ppm}} or \code{\link{ppm.object}} for further information. This function \code{dummy.ppm} extracts the dummy points of the quadrature scheme. A typical use of this function would be to count the number of dummy points, to gauge the accuracy of the approximation to the exact pseudolikelihood. It may happen that some dummy points are not actually used in fitting the model (typically because the value of a covariate is \code{NA} at these points). The argument \code{drop} specifies whether these unused dummy points shall be deleted (\code{drop=TRUE}) or retained (\code{drop=FALSE}) in the return value. See \code{\link{ppm.object}} for a list of all operations that can be performed on objects of class \code{"ppm"}. } \seealso{ \code{\link{ppm.object}}, \code{\link{ppp.object}}, \code{\link{ppm}} } \examples{ data(cells) fit <- ppm(cells, ~1, Strauss(r=0.1)) X <- dummy.ppm(fit) npoints(X) # this is the number of dummy points in the quadrature scheme } \author{\adrian and \rolf } \keyword{spatial} \keyword{utilities} \keyword{models} spatstat/man/as.mask.Rd0000644000176200001440000000662713333543262014533 0ustar liggesusers\name{as.mask} \alias{as.mask} \title{Pixel Image Approximation of a Window} \description{ Obtain a discrete (pixel image) approximation of a given window } \usage{ as.mask(w, eps=NULL, dimyx=NULL, xy=NULL) } \arguments{ \item{w}{A window (object of class \code{"owin"}) or data acceptable to \code{\link{as.owin}}.} \item{eps}{(optional) width and height of pixels.} \item{dimyx}{(optional) pixel array dimensions} \item{xy}{(optional) data containing pixel coordinates} } \value{ A window (object of class \code{"owin"}) of type \code{"mask"} representing a binary pixel image. } \details{ This function generates a rectangular grid of locations in the plane, tests whether each of these locations lies inside the window \code{w}, and stores the results as a binary pixel image or `mask' (an object of class \code{"owin"}, see \code{\link{owin.object}}). The most common use of this function is to approximate the shape of another window \code{w} by a binary pixel image. In this case, we will usually want to have a very fine grid of pixels. This function can also be used to generate a coarsely-spaced grid of locations inside a window, for purposes such as subsampling and prediction. The grid spacing and location are controlled by the arguments \code{eps}, \code{dimyx} and \code{xy}, which are mutually incompatible. If \code{eps} is given, then it determines the grid spacing. If \code{eps} is a single number, then the grid spacing will be approximately \code{eps} in both the \eqn{x} and \eqn{y} directions. If \code{eps} is a vector of length 2, then the grid spacing will be approximately \code{eps[1]} in the \eqn{x} direction and \code{eps[2]} in the \eqn{y} direction. If \code{dimyx} is given, then the pixel grid will be an \eqn{m \times n}{m * n} rectangular grid where \eqn{m, n} are given by \code{dimyx[2]}, \code{dimyx[1]} respectively. \bold{Warning:} \code{dimyx[1]} is the number of pixels in the \eqn{y} direction, and \code{dimyx[2]} is the number in the \eqn{x} direction. If \code{xy} is given, then this should be some kind of data specifing the coordinates of a pixel grid. It may be \itemize{ \item a list or structure containing elements \code{x} and \code{y} which are numeric vectors of equal length. These will be taken as \eqn{x} and \code{y} coordinates of the margins of the grid. The pixel coordinates will be generated from these two vectors. \item a pixel image (object of class \code{"im"}). \item a window (object of class \code{"owin"}) which is of type \code{"mask"} so that it contains pixel coordinates. } If \code{xy} is given, \code{w} may be omitted. If neither \code{eps} nor \code{dimyx} nor \code{xy} is given, the pixel raster dimensions are obtained from \code{\link{spatstat.options}("npixel")}. There is no inverse of this function. However, the function \code{\link{as.polygonal}} will compute a polygonal approximation of a binary mask. } \seealso{ \code{\link{owin.object}}, \code{\link{as.rectangle}}, \code{\link{as.polygonal}}, \code{\link{spatstat.options}} } \examples{ w <- owin(c(0,10),c(0,10), poly=list(x=c(1,2,3,2,1), y=c(2,3,4,6,7))) \dontrun{plot(w)} m <- as.mask(w) \dontrun{plot(m)} x <- 1:9 y <- seq(0.25, 9.75, by=0.5) m <- as.mask(w, xy=list(x=x, y=y)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/padimage.Rd0000644000176200001440000000320513333543263014733 0ustar liggesusers\name{padimage} \alias{padimage} \title{ Pad the Border of a Pixel Image } \description{ Fills the border of a pixel image with a given value or values, or extends a pixel image to fill a larger window. } \usage{ padimage(X, value=NA, n=1, W=NULL) } \arguments{ \item{X}{ Pixel image (object of class \code{"im"}). } \item{value}{ Single value to be placed around the border of \code{X}. } \item{n}{ Width of border, in pixels. See Details. } \item{W}{ Window for the resulting image. Incompatible with \code{n}. } } \details{ The image \code{X} will be expanded by a margin of \code{n} pixels, or extended to fill the window \code{W}, with new pixel values set to \code{value}. The argument \code{value} should be a single value (a vector of length 1), normally a value of the same type as the pixel values of \code{X}. It may be \code{NA}. Alternatively if \code{X} is a factor-valued image, \code{value} can be one of the levels of \code{X}. If \code{n} is given, it may be a single number, specifying the width of the border in pixels. Alternatively it may be a vector of length 2 or 4. It will be replicated to length 4, and these numbers will be interpreted as the border widths for the (left, right, top, bottom) margins respectively. Alternatively if \code{W} is given, the image will be extended to the window \code{W}. } \value{ Another object of class \code{"im"}, of the same type as \code{X}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{im}} } \examples{ Z <- setcov(owin()) plot(padimage(Z, 1, 10)) } \keyword{spatial} \keyword{manip} spatstat/man/bw.voronoi.Rd0000644000176200001440000000424613544333571015277 0ustar liggesusers\name{bw.voronoi} \alias{bw.voronoi} \title{ Cross Validated Bandwidth Selection for Voronoi Estimator of Intensity on a Network } \description{ Uses cross-validation to select a smoothing bandwidth for the Voronoi estimate of point process intensity on a linear network. } \usage{ bw.voronoi(X, \dots, probrange = c(0.2, 0.8), nprob = 10, prob = NULL, nrep = 100, verbose = TRUE, warn=TRUE) } \arguments{ \item{X}{ Point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Ignored. } \item{probrange}{ Numeric vector of length 2 giving the range of bandwidths (retention probabilities) to be assessed. } \item{nprob}{ Integer. Number of bandwidths to be assessed. } \item{prob}{ Optional. A numeric vector of bandwidths (retention probabilities) to be assessed. Entries must be probabilities between 0 and 1. Overrides \code{nprob} and \code{probrange}. } \item{nrep}{ Number of simulated realisations to be used for the computation. } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the maximum of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function uses likelihood cross-validation to choose the optimal value of the thinning fraction \code{f} (the retention probability) to be used in the smoothed Voronoi estimator of point process intensity \code{\link{densityVoronoi.lpp}}. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \references{ Moradi, M., Cronie, 0., Rubak, E., Lachieze-Rey, R., Mateu, J. and Baddeley, A. (2019) Resample-smoothing of Voronoi intensity estimators. \emph{Statistics and Computing}, in press. } \author{ \spatstatAuthors and Mehdi Moradi. } \seealso{ \code{\link{densityVoronoi.lpp}} } \examples{ np <- if(interactive()) 10 else 3 nr <- if(interactive()) 100 else 2 b <- bw.voronoi(spiders, nprob=np, nrep=nr) b plot(b) } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/pixelcentres.Rd0000644000176200001440000000237013333543264015674 0ustar liggesusers\name{pixelcentres} \alias{pixelcentres} \title{ Extract Pixel Centres as Point Pattern } \description{ Given a pixel image or binary mask window, extract the centres of all pixels and return them as a point pattern. } \usage{ pixelcentres(X, W = NULL, ...) } \arguments{ \item{X}{ Pixel image (object of class \code{"im"}) or window (object of class \code{"owin"}). } \item{W}{ Optional window to contain the resulting point pattern. } \item{\dots}{ Optional arguments defining the pixel resolution. } } \details{ If the argument \code{X} is a pixel image, the result is a point pattern, consisting of the centre of every pixel whose pixel value is not \code{NA}. If \code{X} is a window which is a binary mask, the result is a point pattern consisting of the centre of every pixel inside the window (i.e. every pixel for which the mask value is \code{TRUE}). Otherwise, \code{X} is first converted to a window, then converted to a mask using \code{\link{as.mask}}, then handled as above. } \value{ A point pattern (object of class \code{"ppp"}). } \seealso{ \code{\link{raster.xy}} } \examples{ pixelcentres(letterR, dimyx=5) } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{manip} spatstat/man/summary.owin.Rd0000644000176200001440000000146313333543264015641 0ustar liggesusers\name{summary.owin} \alias{summary.owin} \title{Summary of a Spatial Window} \description{ Prints a useful description of a window object. } \usage{ \method{summary}{owin}(object, \dots) } \arguments{ \item{object}{Window (object of class \code{"owin"}).} \item{\dots}{Ignored.} } \details{ A useful description of the window \code{object} is printed. This is a method for the generic function \code{\link{summary}}. } \seealso{ \code{\link{summary}}, \code{\link{summary.ppp}}, \code{\link{print.owin}} } \examples{ summary(owin()) # the unit square data(demopat) W <- Window(demopat) # weird polygonal window summary(W) # describes it summary(as.mask(W)) # demonstrates current pixel resolution } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} spatstat/man/plot.yardstick.Rd0000644000176200001440000000531613333543264016144 0ustar liggesusers\name{plot.yardstick} \alias{plot.yardstick} \title{ Plot a Yardstick or Scale Bar } \description{ Plots an object of class \code{"yardstick"}. } \usage{ \method{plot}{yardstick}(x, \dots, angle = 20, frac = 1/8, split = FALSE, shrink = 1/4, pos = NULL, txt.args=list(), txt.shift=c(0,0), do.plot = TRUE) } \arguments{ \item{x}{ Object of class \code{"yardstick"} to be plotted. This object is created by the command \code{\link{yardstick}}. } \item{\dots}{ Additional graphics arguments passed to \code{\link[graphics]{segments}} to control the appearance of the line. } \item{angle}{ Angle between the arrows and the line segment, in degrees. } \item{frac}{ Length of arrow as a fraction of total length of the line segment. } \item{split}{ Logical. If \code{TRUE}, then the line will be broken in the middle, and the text will be placed in this gap. If \code{FALSE}, the line will be unbroken, and the text will be placed beside the line. } \item{shrink}{ Fraction of total length to be removed from the middle of the line segment, if \code{split=TRUE}. } \item{pos}{ Integer (passed to \code{\link[graphics]{text}}) determining the position of the annotation text relative to the line segment, if \code{split=FALSE}. Values of 1, 2, 3 and 4 indicate positions below, to the left of, above and to the right of the line, respectively. } \item{txt.args}{ Optional list of additional arguments passed to \code{\link[graphics]{text}} controlling the appearance of the text. Examples include \code{adj}, \code{srt}, \code{col}, \code{cex}, \code{font}. } \item{txt.shift}{ Optional numeric vector of length 2 specifying displacement of the text position relative to the centre of the yardstick. } \item{do.plot}{ Logical. Whether to actually perform the plot (\code{do.plot=TRUE}). } } \details{ A yardstick or scale bar is a line segment, drawn on any spatial graphics display, indicating the scale of the plot. The argument \code{x} should be an object of class \code{"yardstick"} created by the command \code{\link{yardstick}}. } \value{ A window (class \code{"owin"}) enclosing the plotted graphics. } \examples{ plot(owin(), main="Yardsticks") ys <- yardstick(as.psp(list(xmid=0.5, ymid=0.1, length=0.4, angle=0), window=owin(c(0.2, 0.8), c(0, 0.2))), txt="1 km") plot(ys) ys <- shift(ys, c(0, 0.3)) plot(ys, angle=90, frac=0.08) ys <- shift(ys, c(0, 0.3)) plot(ys, split=TRUE) } \author{\adrian \rolf and \ege } \seealso{ \code{\link{yardstick}} } \keyword{spatial} \keyword{hplot} spatstat/man/infline.Rd0000644000176200001440000000603113565656166014626 0ustar liggesusers\name{infline} \alias{infline} \alias{plot.infline} \alias{print.infline} \title{Infinite Straight Lines} \description{ Define the coordinates of one or more straight lines in the plane } \usage{ infline(a = NULL, b = NULL, h = NULL, v = NULL, p = NULL, theta = NULL) \method{print}{infline}(x, \dots) \method{plot}{infline}(x, \dots) } \arguments{ \item{a,b}{Numeric vectors of equal length giving the intercepts \eqn{a} and slopes \eqn{b} of the lines. Incompatible with \code{h,v,p,theta} } \item{h}{Numeric vector giving the positions of horizontal lines when they cross the \eqn{y} axis. Incompatible with \code{a,b,v,p,theta} } \item{v}{Numeric vector giving the positions of vertical lines when they cross the \eqn{x} axis. Incompatible with \code{a,b,h,p,theta} } \item{p,theta}{Numeric vectors of equal length giving the polar coordinates of the line. Incompatible with \code{a,b,h,v} } \item{x}{An object of class \code{"infline"}} \item{\dots}{ Extra arguments passed to \code{\link[base]{print}} for printing or \code{\link[graphics]{abline}} for plotting } } \details{ The class \code{infline} is a convenient way to handle infinite straight lines in the plane. The position of a line can be specified in several ways: \itemize{ \item its intercept \eqn{a} and slope \eqn{b} in the equation \eqn{y = a + b x}{y = a + b * x} can be used unless the line is vertical. \item for vertical lines we can use the position \eqn{v} where the line crosses the \eqn{y} axis \item for horizontal lines we can use the position \eqn{h} where the line crosses the \eqn{x} axis \item the polar coordinates \eqn{p} and \eqn{\theta}{theta} can be used for any line. The line equation is \deqn{ x \cos\theta + y \sin\theta = p }{ x * cos(theta) + y * sin(theta) = p } } The command \code{infline} will accept line coordinates in any of these formats. The arguments \code{a,b,h,v} have the same interpretation as they do in the line-plotting function \code{\link[graphics]{abline}}. The command \code{infline} converts between different coordinate systems (e.g. from \code{a,b} to \code{p,theta}) and returns an object of class \code{"infline"} that contains a representation of the lines in each appropriate coordinate system. This object can be printed and plotted. } \value{ The value of \code{infline} is an object of class \code{"infline"} which is basically a data frame with columns \code{a,b,h,v,p,theta}. Each row of the data frame represents one line. Entries may be \code{NA} if a coordinate is not applicable to a particular line. } \seealso{ \code{\link{rotate.infline}}, \code{\link{clip.infline}}, \code{\link{chop.tess}}, \code{\link{whichhalfplane}} } \examples{ infline(a=10:13,b=1) infline(p=1:3, theta=pi/4) plot(c(-1,1),c(-1,1),type="n",xlab="",ylab="", asp=1) plot(infline(p=0.4, theta=seq(0,pi,length=20))) } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{datagen} spatstat/man/periodify.Rd0000644000176200001440000000732213333543264015163 0ustar liggesusers\name{periodify} \alias{periodify} \alias{periodify.owin} \alias{periodify.ppp} \alias{periodify.psp} \title{ Make Periodic Copies of a Spatial Pattern } \description{ Given a spatial pattern (point pattern, line segment pattern, window, etc) make shifted copies of the pattern and optionally combine them to make a periodic pattern. } \usage{ periodify(X, ...) \method{periodify}{ppp}(X, nx = 1, ny = 1, ..., combine=TRUE, warn=TRUE, check=TRUE, ix=(-nx):nx, iy=(-ny):ny, ixy=expand.grid(ix=ix,iy=iy)) \method{periodify}{psp}(X, nx = 1, ny = 1, ..., combine=TRUE, warn=TRUE, check=TRUE, ix=(-nx):nx, iy=(-ny):ny, ixy=expand.grid(ix=ix,iy=iy)) \method{periodify}{owin}(X, nx = 1, ny = 1, ..., combine=TRUE, warn=TRUE, ix=(-nx):nx, iy=(-ny):ny, ixy=expand.grid(ix=ix,iy=iy)) } \arguments{ \item{X}{ An object representing a spatial pattern (point pattern, line segment pattern or window). } \item{nx,ny}{ Integers. Numbers of additional copies of \code{X} in each direction. The result will be a grid of \code{2 * nx + 1} by \code{2 * ny + 1} copies of the original object. (Overruled by \code{ix, iy, ixy}). } \item{\dots}{ Ignored. } \item{combine}{ Logical flag determining whether the copies should be superimposed to make an object like \code{X} (if \code{combine=TRUE}) or simply returned as a list of objects (\code{combine=FALSE}). } \item{warn}{ Logical flag determining whether to issue warnings. } \item{check}{ Logical flag determining whether to check the validity of the combined pattern. } \item{ix, iy}{ Integer vectors determining the grid positions of the copies of \code{X}. (Overruled by \code{ixy}). } \item{ixy}{ Matrix or data frame with two columns, giving the grid positions of the copies of \code{X}. } } \details{ Given a spatial pattern (point pattern, line segment pattern, etc) this function makes a number of shifted copies of the pattern and optionally combines them. The function \code{periodify} is generic, with methods for various kinds of spatial objects. The default is to make a 3 by 3 array of copies of \code{X} and combine them into a single pattern of the same kind as \code{X}. This can be used (for example) to compute toroidal or periodic edge corrections for various operations on \code{X}. If the arguments \code{nx}, \code{ny} are given and other arguments are missing, the original object will be copied \code{nx} times to the right and \code{nx} times to the left, then \code{ny} times upward and \code{ny} times downward, making \code{(2 * nx + 1) * (2 * ny + 1)} copies altogether, arranged in a grid, centred on the original object. If the arguments \code{ix}, \code{iy} or \code{ixy} are specified, then these determine the grid positions of the copies of \code{X} that will be made. For example \code{(ix,iy) = (1, 2)} means a copy of \code{X} shifted by the vector \code{(ix * w, iy * h)} where \code{w,h} are the width and height of the bounding rectangle of \code{X}. If \code{combine=TRUE} (the default) the copies of \code{X} are superimposed to create an object of the same kind as \code{X}. If \code{combine=FALSE} the copies of \code{X} are returned as a list. } \value{ If \code{combine=TRUE}, an object of the same class as \code{X}. If \code{combine=FALSE}, a list of objects of the same class as \code{X}. } \seealso{ \code{\link{shift}} } \examples{ data(cells) plot(periodify(cells)) a <- lapply(periodify(Window(cells), combine=FALSE), plot, add=TRUE,lty=2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat/man/Ldot.Rd0000644000176200001440000000513413551505532014070 0ustar liggesusers\name{Ldot} \alias{Ldot} \title{Multitype L-function (i-to-any)} \description{ Calculates an estimate of the multitype L-function (from type \code{i} to any type) for a multitype point pattern. } \usage{ Ldot(X, i, ..., from, correction) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the dot-type \eqn{L} function \eqn{L_{ij}(r)}{Lij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{correction,\dots}{ Arguments passed to \code{\link{Kdot}}. } \item{from}{An alternative way to specify \code{i}.} } \details{ This command computes \deqn{L_{i\bullet}(r) = \sqrt{\frac{K_{i\bullet}(r)}{\pi}}}{Li.(r) = sqrt(Ki.(r)/pi)} where \eqn{K_{i\bullet}(r)}{Ki.(r)} is the multitype \eqn{K}-function from points of type \code{i} to points of any type. See \code{\link{Kdot}} for information about \eqn{K_{i\bullet}(r)}{Ki.(r)}. The command \code{Ldot} first calls \code{\link{Kdot}} to compute the estimate of the \code{i}-to-any \eqn{K}-function, and then applies the square root transformation. For a marked Poisson point process, the theoretical value of the L-function is \eqn{L_{i\bullet}(r) = r}{Li.(r) = r}. The square root also has the effect of stabilising the variance of the estimator, so that \eqn{L_{i\bullet}}{Li.} is more appropriate for use in simulation envelopes and hypothesis tests. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{L_{i\bullet}}{Li.} has been estimated } \item{theo}{the theoretical value \eqn{L_{i\bullet}(r) = r}{Li.(r) = r} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L_{i\bullet}}{Li.} obtained by the edge corrections named. } \seealso{ \code{\link{Kdot}}, \code{\link{Lcross}}, \code{\link{Lest}} } \examples{ data(amacrine) L <- Ldot(amacrine, "off") plot(L) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat/man/mean.linim.Rd0000644000176200001440000000306413333543263015216 0ustar liggesusers\name{mean.linim} \alias{mean.linim} \alias{median.linim} \alias{quantile.linim} \title{Mean, Median, Quantiles of Pixel Values on a Linear Network} \description{ Calculates the mean, median, or quantiles of the pixel values in a pixel image on a linear network. } \usage{ \method{mean}{linim}(x, \dots) \method{median}{linim}(x, \dots) \method{quantile}{linim}(x, probs=seq(0,1,0.25), \dots) } \arguments{ \item{x}{ A pixel image on a linear network (object of class \code{"linim"}). } \item{probs}{ Vector of probabilities for which quantiles should be calculated. } \item{\dots}{Arguments passed to other methods.} } \details{ These functions calculate the mean, median and quantiles of the pixel values in the image \code{x} on a linear network. An object of class \code{"linim"} describes a pixel image on a linear network. See \code{\link{linim}}. The functions described here are methods for the generic \code{\link{mean}}, \code{\link[stats]{median}} and \code{\link[stats]{quantile}} for the class \code{"linim"}. } \value{ For \code{mean} and \code{median}, a single number. For \code{quantile}, a numeric vector of the same length as \code{probs}. } \seealso{ \code{\link{mean}}, \code{\link[stats]{median}}, \code{\link[stats]{quantile}}, \code{\link{mean.im}}. } \examples{ M <- as.mask.psp(as.psp(simplenet)) Z <- as.im(function(x,y) {x-y}, W=M) X <- linim(simplenet, Z) X mean(X) median(X) quantile(X) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{univar} spatstat/man/plot.scan.test.Rd0000644000176200001440000000471013333543264016046 0ustar liggesusers\name{plot.scan.test} \alias{plot.scan.test} \alias{as.im.scan.test} \title{ Plot Result of Scan Test } \description{ Computes or plots an image showing the likelihood ratio test statistic for the scan test, or the optimal circle radius. } \usage{ \method{plot}{scan.test}(x, \dots, what=c("statistic", "radius"), do.window = TRUE) \method{as.im}{scan.test}(X, \dots, what=c("statistic", "radius")) } \arguments{ \item{x,X}{ Result of a scan test. An object of class \code{"scan.test"} produced by \code{\link{scan.test}}. } \item{\dots}{ Arguments passed to \code{\link{plot.im}} to control the appearance of the plot. } \item{what}{ Character string indicating whether to produce an image of the (profile) likelihood ratio test statistic (\code{what="statistic"}, the default) or an image of the optimal value of circle radius (\code{what="radius"}). } \item{do.window}{ Logical value indicating whether to plot the original window of the data as well. } } \details{ These functions extract, and plot, the spatially-varying value of the likelihood ratio test statistic which forms the basis of the scan test. If the test result \code{X} was based on circles of the same radius \code{r}, then \code{as.im(X)} is a pixel image of the likelihood ratio test statistic as a function of the position of the centre of the circle. If the test result \code{X} was based on circles of several different radii \code{r}, then \code{as.im(X)} is a pixel image of the profile (maximum value over all radii \code{r}) likelihood ratio test statistic as a function of the position of the centre of the circle, and \code{as.im(X, what="radius")} is a pixel image giving for each location \eqn{u} the value of \code{r} which maximised the likelihood ratio test statistic at that location. The \code{plot} method plots the corresponding image. } \value{ The value of \code{as.im.scan.test} is a pixel image (object of class \code{"im"}). The value of \code{plot.scan.test} is \code{NULL}. } \author{\adrian and \rolf } \seealso{ \code{\link{scan.test}}, \code{\link{scanLRTS}} } \examples{ if(interactive()) { a <- scan.test(redwood, seq(0.04, 0.1, by=0.01), method="poisson", nsim=19) } else { a <- scan.test(redwood, c(0.05, 0.1), method="poisson", nsim=2) } plot(a) as.im(a) plot(a, what="radius") } \keyword{htest} \keyword{spatial} spatstat/man/ppm.Rd0000644000176200001440000003727313350410544013766 0ustar liggesusers\name{ppm} \alias{ppm} \alias{ppm.formula} \concept{point process model} \concept{Poisson point process} \concept{Gibbs point process} \title{ Fit Point Process Model to Data } \description{ Fits a point process model to an observed point pattern. } \usage{ ppm(Q, \dots) \method{ppm}{formula}(Q, interaction=NULL, \dots, data=NULL, subset) } \arguments{ \item{Q}{ A \code{formula} in the \R language describing the model to be fitted. } \item{interaction}{ An object of class \code{"interact"} describing the point process interaction structure, or a function that makes such an object, or \code{NULL} indicating that a Poisson process (stationary or nonstationary) should be fitted. } \item{\dots}{ Arguments passed to \code{\link{ppm.ppp}} or \code{\link{ppm.quad}} to control the model-fitting process. } \item{data}{ Optional. The values of spatial covariates (other than the Cartesian coordinates) required by the model. Either a data frame, or a list whose entries are images, functions, windows, tessellations or single numbers. See Details. } \item{subset}{ Optional. An expression (which may involve the names of the Cartesian coordinates \code{x} and \code{y} and the names of entries in \code{data}) defining a subset of the spatial domain, to which the model-fitting should be restricted. The result of evaluating the expression should be either a logical vector, or a window (object of class \code{"owin"}) or a logical-valued pixel image (object of class \code{"im"}). } } \value{ An object of class \code{"ppm"} describing a fitted point process model. See \code{\link{ppm.object}} for details of the format of this object and methods available for manipulating it. } \details{ This function fits a point process model to an observed point pattern. The model may include spatial trend, interpoint interaction, and dependence on covariates. The model fitted by \code{ppm} is either a Poisson point process (in which different points do not interact with each other) or a Gibbs point process (in which different points typically inhibit each other). For clustered point process models, use \code{\link{kppm}}. The function \code{ppm} is generic, with methods for the classes \code{formula}, \code{ppp} and \code{quad}. This page describes the method for a \code{formula}. The first argument is a \code{formula} in the \R language describing the spatial trend model to be fitted. It has the general form \code{pattern ~ trend} where the left hand side \code{pattern} is usually the name of a spatial point pattern (object of class \code{"ppp"}) to which the model should be fitted, or an expression which evaluates to a point pattern; and the right hand side \code{trend} is an expression specifying the spatial trend of the model. Systematic effects (spatial trend and/or dependence on spatial covariates) are specified by the \code{trend} expression on the right hand side of the formula. The trend may involve the Cartesian coordinates \code{x}, \code{y}, the marks \code{marks}, the names of entries in the argument \code{data} (if supplied), or the names of objects that exist in the \R session. The trend formula specifies the \bold{logarithm} of the intensity of a Poisson process, or in general, the logarithm of the first order potential of the Gibbs process. The formula should not use any names beginning with \code{.mpl} as these are reserved for internal use. If the formula is \code{pattern~1}, then the model to be fitted is stationary (or at least, its first order potential is constant). The symbol \code{.} in the trend expression stands for all the covariates supplied in the argument \code{data}. For example the formula \code{pattern ~ .} indicates an additive model with a main effect for each covariate in \code{data}. Stochastic interactions between random points of the point process are defined by the argument \code{interaction}. This is an object of class \code{"interact"} which is initialised in a very similar way to the usage of family objects in \code{\link{glm}} and \code{gam}. The interaction models currently available are: \GibbsInteractionsList. See the examples below. Note that it is possible to combine several interactions using \code{\link{Hybrid}}. If \code{interaction} is missing or \code{NULL}, then the model to be fitted has no interpoint interactions, that is, it is a Poisson process (stationary or nonstationary according to \code{trend}). In this case the methods of maximum pseudolikelihood and maximum logistic likelihood coincide with maximum likelihood. The fitted point process model returned by this function can be printed (by the print method \code{\link{print.ppm}}) to inspect the fitted parameter values. If a nonparametric spatial trend was fitted, this can be extracted using the predict method \code{\link{predict.ppm}}. To fit a model involving spatial covariates other than the Cartesian coordinates \eqn{x} and \eqn{y}, the values of the covariates should either be supplied in the argument \code{data}, or should be stored in objects that exist in the \R session. Note that it is not sufficient to have observed the covariate only at the points of the data point pattern; the covariate must also have been observed at other locations in the window. If it is given, the argument \code{data} is typically a list, with names corresponding to variables in the \code{trend} formula. Each entry in the list is either \describe{ \item{a pixel image,}{ giving the values of a spatial covariate at a fine grid of locations. It should be an object of class \code{"im"}, see \code{\link{im.object}}. } \item{a function,}{ which can be evaluated at any location \code{(x,y)} to obtain the value of the spatial covariate. It should be a \code{function(x, y)} or \code{function(x, y, ...)} in the \R language. The first two arguments of the function should be the Cartesian coordinates \eqn{x} and \eqn{y}. The function may have additional arguments; if the function does not have default values for these additional arguments, then the user must supply values for them, in \code{covfunargs}. See the Examples. } \item{a window,}{ interpreted as a logical variable which is \code{TRUE} inside the window and \code{FALSE} outside it. This should be an object of class \code{"owin"}. } \item{a tessellation,}{ interpreted as a factor covariate. For each spatial location, the factor value indicates which tile of the tessellation it belongs to. This should be an object of class \code{"tess"}. (To make a covariate in which each tile of the tessellation has a numerical value, convert the tessellation to a \code{function(x,y)} using \code{\link{as.function.tess}}.) } \item{a single number,}{indicating a covariate that is constant in this dataset. } } The software will look up the values of each covariate at the required locations (quadrature points). Note that, for covariate functions, only the \emph{name} of the function appears in the trend formula. A covariate function is treated as if it were a single variable. The function arguments do not appear in the trend formula. See the Examples. If \code{data} is a list, the list entries should have names corresponding to (some of) the names of covariates in the model formula \code{trend}. The variable names \code{x}, \code{y} and \code{marks} are reserved for the Cartesian coordinates and the mark values, and these should not be used for variables in \code{data}. Alternatively, \code{data} may be a data frame giving the values of the covariates at specified locations. Then \code{pattern} should be a quadrature scheme (object of class \code{"quad"}) giving the corresponding locations. See \code{\link{ppm.quad}} for details. } \section{Interaction parameters}{ Apart from the Poisson model, every point process model fitted by \code{ppm} has parameters that determine the strength and range of \sQuote{interaction} or dependence between points. These parameters are of two types: \describe{ \item{regular parameters:}{ A parameter \eqn{\phi}{phi} is called \emph{regular} if the log likelihood is a linear function of \eqn{\theta}{theta} where \eqn{\theta = \theta(\psi)}{theta = theta(psi)} is some transformation of \eqn{\psi}{psi}. [Then \eqn{\theta}{theta} is called the canonical parameter.] } \item{irregular parameters}{ Other parameters are called \emph{irregular}. } } Typically, regular parameters determine the \sQuote{strength} of the interaction, while irregular parameters determine the \sQuote{range} of the interaction. For example, the Strauss process has a regular parameter \eqn{\gamma}{gamma} controlling the strength of interpoint inhibition, and an irregular parameter \eqn{r} determining the range of interaction. The \code{ppm} command is only designed to estimate regular parameters of the interaction. It requires the values of any irregular parameters of the interaction to be fixed. For example, to fit a Strauss process model to the \code{cells} dataset, you could type \code{ppm(cells ~ 1, Strauss(r=0.07))}. Note that the value of the irregular parameter \code{r} must be given. The result of this command will be a fitted model in which the regular parameter \eqn{\gamma}{gamma} has been estimated. To determine the irregular parameters, there are several practical techniques, but no general statistical theory available. Useful techniques include maximum profile pseudolikelihood, which is implemented in the command \code{\link{profilepl}}, and Newton-Raphson maximisation, implemented in the experimental command \code{\link{ippm}}. Some irregular parameters can be estimated directly from data: the hard-core radius in the model \code{\link{Hardcore}} and the matrix of hard-core radii in \code{\link{MultiHard}} can be estimated easily from data. In these cases, \code{ppm} allows the user to specify the interaction without giving the value of the irregular parameter. The user can give the hard core interaction as \code{interaction=Hardcore()} or even \code{interaction=Hardcore}, and the hard core radius will then be estimated from the data. } \section{Technical Warnings and Error Messages}{ See \code{\link{ppm.ppp}} for some technical warnings about the weaknesses of the algorithm, and explanation of some common error messages. } \references{ Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2014) Logistic regression for spatial Gibbs point processes. \emph{Biometrika} \bold{101} (2) 377--392. Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} 283--322. Berman, M. and Turner, T.R. (1992) Approximating point process likelihoods with GLIM. \emph{Applied Statistics} \bold{41}, 31--38. Besag, J. (1975) Statistical analysis of non-lattice data. \emph{The Statistician} \bold{24}, 179-195. Diggle, P.J., Fiksel, T., Grabarnik, P., Ogata, Y., Stoyan, D. and Tanemura, M. (1994) On parameter estimation for pairwise interaction processes. \emph{International Statistical Review} \bold{62}, 99-117. Huang, F. and Ogata, Y. (1999) Improvements of the maximum pseudo-likelihood estimators in various spatial statistical models. \emph{Journal of Computational and Graphical Statistics} \bold{8}, 510--530. Jensen, J.L. and Moeller, M. (1991) Pseudolikelihood for exponential family models of spatial point processes. \emph{Annals of Applied Probability} \bold{1}, 445--461. Jensen, J.L. and Kuensch, H.R. (1994) On asymptotic normality of pseudo likelihood estimates for pairwise interaction processes, \emph{Annals of the Institute of Statistical Mathematics} \bold{46}, 475--486. } \seealso{ \code{\link{ppm.ppp}} and \code{\link{ppm.quad}} for more details on the fitting technique and edge correction. \code{\link{ppm.object}} for details of how to print, plot and manipulate a fitted model. \code{\link{ppp}} and \code{\link{quadscheme}} for constructing data. Interactions: \GibbsInteractionsList. See \code{\link{profilepl}} for advice on fitting nuisance parameters in the interaction, and \code{\link{ippm}} for irregular parameters in the trend. See \code{\link{valid.ppm}} and \code{\link{project.ppm}} for ensuring the fitted model is a valid point process. See \code{\link{kppm}} for fitting Cox point process models and cluster point process models, and \code{\link{dppm}} for fitting determinantal point process models. } \examples{ # fit the stationary Poisson process # to point pattern 'nztrees' ppm(nztrees ~ 1) \dontrun{ Q <- quadscheme(nztrees) ppm(Q ~ 1) # equivalent. } fit1 <- ppm(nztrees ~ x) # fit the nonstationary Poisson process # with intensity function lambda(x,y) = exp(a + bx) # where x,y are the Cartesian coordinates # and a,b are parameters to be estimated fit1 coef(fit1) coef(summary(fit1)) \dontrun{ ppm(nztrees ~ polynom(x,2)) } \testonly{ ppm(nztrees ~ polynom(x,2), nd=16) } # fit the nonstationary Poisson process # with intensity function lambda(x,y) = exp(a + bx + cx^2) \dontrun{ library(splines) ppm(nztrees ~ bs(x,df=3)) } # WARNING: do not use predict.ppm() on this result # Fits the nonstationary Poisson process # with intensity function lambda(x,y) = exp(B(x)) # where B is a B-spline with df = 3 \dontrun{ ppm(nztrees ~ 1, Strauss(r=10), rbord=10) } \testonly{ ppm(nztrees ~ 1, Strauss(r=10), rbord=10, nd=16) } # Fit the stationary Strauss process with interaction range r=10 # using the border method with margin rbord=10 \dontrun{ ppm(nztrees ~ x, Strauss(13), correction="periodic") } \testonly{ ppm(nztrees ~ x, Strauss(13), correction="periodic", nd=16) } # Fit the nonstationary Strauss process with interaction range r=13 # and exp(first order potential) = activity = beta(x,y) = exp(a+bx) # using the periodic correction. # Compare Maximum Pseudolikelihood, Huang-Ogata and Variational Bayes fits: \dontrun{ppm(swedishpines ~ 1, Strauss(9))} \dontrun{ppm(swedishpines ~ 1, Strauss(9), method="ho")} \testonly{ppm(swedishpines ~ 1, Strauss(9), method="ho", nd=16, nsim=8)} ppm(swedishpines ~ 1, Strauss(9), method="VBlogi") # COVARIATES # X <- rpoispp(42) weirdfunction <- function(x,y){ 10 * x^2 + 5 * sin(10 * y) } # # (a) covariate values as function ppm(X ~ y + weirdfunction) # # (b) covariate values in pixel image Zimage <- as.im(weirdfunction, unit.square()) ppm(X ~ y + Z, covariates=list(Z=Zimage)) # # (c) covariate values in data frame Q <- quadscheme(X) xQ <- x.quad(Q) yQ <- y.quad(Q) Zvalues <- weirdfunction(xQ,yQ) ppm(Q ~ y + Z, data=data.frame(Z=Zvalues)) # Note Q not X # COVARIATE FUNCTION WITH EXTRA ARGUMENTS # f <- function(x,y,a){ y - a } ppm(X ~ x + f, covfunargs=list(a=1/2)) # COVARIATE: inside/outside window b <- owin(c(0.1, 0.6), c(0.1, 0.9)) ppm(X ~ b) ## MULTITYPE POINT PROCESSES ### # fit stationary marked Poisson process # with different intensity for each species \dontrun{ppm(lansing ~ marks, Poisson())} \testonly{ ama <- amacrine[square(0.7)] a <- ppm(ama ~ marks, Poisson(), nd=16) } # fit nonstationary marked Poisson process # with different log-cubic trend for each species \dontrun{ppm(lansing ~ marks * polynom(x,y,3), Poisson())} \testonly{b <- ppm(ama ~ marks * polynom(x,y,2), Poisson(), nd=16)} } \author{ \spatstatAuthors } \keyword{spatial} \keyword{models} spatstat/man/as.data.frame.im.Rd0000644000176200001440000000164413333543262016200 0ustar liggesusers\name{as.data.frame.im} \alias{as.data.frame.im} \title{Convert Pixel Image to Data Frame} \description{ Convert a pixel image to a data frame } \usage{ \method{as.data.frame}{im}(x, ...) } \arguments{ \item{x}{A pixel image (object of class \code{"im"}).} \item{\dots}{Further arguments passed to \code{\link[base:as.data.frame]{as.data.frame.default}} to determine the row names and other features.} } \details{ This function takes the pixel image \code{x} and returns a data frame with three columns containing the pixel coordinates and the pixel values. The data frame entries are automatically sorted in increasing order of the \code{x} coordinate (and in increasing order of \code{y} within \code{x}). } \value{ A data frame. } \examples{ # artificial image Z <- setcov(square(1)) Y <- as.data.frame(Z) head(Y) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} spatstat/man/yardstick.Rd0000644000176200001440000000516213333543265015167 0ustar liggesusers\name{yardstick} \alias{textstring} \alias{onearrow} \alias{yardstick} \title{ Text, Arrow or Scale Bar in a Diagram } \description{ Create spatial objects that represent a text string, an arrow, or a yardstick (scale bar). } \usage{ textstring(x, y, txt = NULL, \dots) onearrow(x0, y0, x1, y1, txt = NULL, \dots) yardstick(x0, y0, x1, y1, txt = NULL, \dots) } \arguments{ \item{x,y}{ Coordinates where the text should be placed. } \item{x0,y0,x1,y1}{ Spatial coordinates of both ends of the arrow or yardstick. Alternatively \code{x0} can be a point pattern (class \code{"ppp"}) containing exactly two points, or a line segment pattern (class \code{"psp"}) consisting of exactly one line segment. } \item{txt}{ The text to be displayed beside the line segment. Either a character string or an expression. } \item{\dots}{ Additional named arguments for plotting the object. } } \details{ These commands create objects that represent components of a diagram: \itemize{ \item \code{textstring} creates an object that represents a string of text at a particular spatial location. \item \code{onearrow} creates an object that represents an arrow between two locations. \item \code{yardstick} creates an object that represents a scale bar: a line segment indicating the scale of the plot. } To display the relevant object, it should be plotted, using \code{plot}. See the help files for the plot methods \code{\link{plot.textstring}}, \code{\link{plot.onearrow}} and \code{\link{plot.yardstick}}. These objects are designed to be included as components in a \code{\link{layered}} object or a \code{\link{solist}}. This makes it possible to build up a diagram consisting of many spatial objects, and to annotate the diagram with arrows, text and so on, so that ultimately the entire diagram is plotted using \code{plot}. } \value{ An object of class \code{"diagramobj"} which also belongs to one of the special classes \code{"textstring"}, \code{"onearrow"} or \code{"yardstick"}. There are methods for \code{plot}, \code{print}, \code{"["} and \code{\link{shift}}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{plot.textstring}}, \code{\link{plot.onearrow}}, \code{\link{plot.yardstick}}. } \examples{ X <- rescale(swedishpines) plot(X, pch=16, main="") yd <- yardstick(0,0,1,1, "diagonal") yy <- yardstick(X[1:2]) ys <- yardstick(as.psp(list(xmid=4, ymid=0.5, length=1, angle=0), window=Window(X)), txt="1 m") ys plot(ys, angle=90) scalardilate(ys, 2) } \keyword{spatial} \keyword{hplot} spatstat/man/tileindex.Rd0000644000176200001440000000275713532403247015162 0ustar liggesusers\name{tileindex} \alias{tileindex} \title{ Determine Which Tile Contains Each Given Point } \description{ Given a tessellation and a list of spatial points, determine which tile of the tessellation contains each of the given points. } \usage{ tileindex(x, y, Z) } \arguments{ \item{x,y}{ Spatial coordinates. Numeric vectors of equal length. (Alternatively \code{y} may be missing and \code{x} may be an object containing spatial coordinates). } \item{Z}{ A tessellation (object of class \code{"tess"}). } } \details{ This function determines which tile of the tessellation \code{Z} contains each of the spatial points with coordinates \code{(x[i],y[i])}. The result is a factor, of the same length as \code{x} and \code{y}, indicating which tile contains each point. The levels of the factor are the names of the tiles of \code{Z}. Values are \code{NA} if the corresponding point lies outside the tessellation. } \value{ A factor, of the same length as \code{x} and \code{y}, whose levels are the names of the tiles of \code{Z}. } \author{ \spatstatAuthors } \seealso{ \code{\link{cut.ppp}} and \code{\link{split.ppp}} to divide up the points of a point pattern according to a tessellation. \code{\link{as.function.tess}} to create a function whose value is the tile index. } \examples{ X <- runifpoint(7) V <- dirichlet(X) tileindex(0.1, 0.4, V) tileindex(list(x=0.1, y=0.4), Z=V) tileindex(X, Z=V) } \keyword{spatial} \keyword{manip} spatstat/man/insertVertices.Rd0000644000176200001440000000453513333543263016204 0ustar liggesusers\name{insertVertices} \alias{insertVertices} \title{ Insert New Vertices in a Linear Network } \description{ Adds new vertices to a linear network at specified locations along the network. } \usage{ insertVertices(L, \dots) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}) or point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Additional arguments passed to \code{\link{as.lpp}} specifying the positions of the new vertices along the network. } } \details{ This function adds new vertices at locations along an existing linear network. The argument \code{L} can be either a linear network (class \code{"linnet"}) or some other object that includes a linear network. The new vertex locations can be specified either as a point pattern (class \code{"lpp"} or \code{"ppp"}) or using coordinate vectors \code{x,y} or \code{seg,tp} or \code{x,y,seg,tp} as explained in the help for \code{\link{as.lpp}}. This function breaks the existing line segments of \code{L} into pieces at the locations specified by the coordinates \code{seg,tp} and creates new vertices at these locations. The result is the modified object, with an attribute \code{"id"} such that the \code{i}th added vertex has become the \code{id[i]}th vertex of the new network. } \value{ An object of the same class as \code{L} representing the result of adding the new vertices. The result also has an attribute \code{"id"} as described in Details. } \author{ Adrian Baddeley } \seealso{ \code{\link{as.lpp}}, \code{\link{linnet}}, \code{\link{methods.linnet}}, \code{\link{joinVertices}}, \code{\link{thinNetwork}}. } \examples{ opa <- par(mfrow=c(1,3), mar=rep(0,4)) simplenet plot(simplenet, main="") plot(vertices(simplenet), add=TRUE) # add two new vertices at specified local coordinates L <- insertVertices(simplenet, seg=c(3,7), tp=c(0.2, 0.5)) L plot(L, main="") plot(vertices(L), add=TRUE) id <- attr(L, "id") id plot(vertices(L)[id], add=TRUE, pch=16) # add new vertices at three randomly-generated points X <- runiflpp(3, simplenet) LL <- insertVertices(simplenet, X) plot(LL, main="") plot(vertices(LL), add=TRUE) ii <- attr(LL, "id") plot(vertices(LL)[ii], add=TRUE, pch=16) par(opa) } \keyword{spatial} \keyword{manip} spatstat/man/rdpp.Rd0000644000176200001440000000267113333543264014140 0ustar liggesusers\name{rdpp} \alias{rdpp} \title{Simulation of a Determinantal Point Process} \description{ Generates simulated realisations from a determinantal point process. } \usage{ rdpp(eig, index, basis = "fourierbasis", window = boxx(rep(list(0:1), ncol(index))), reject_max = 10000, progress = 0, debug = FALSE, \dots) } \arguments{ \item{eig}{ vector of values between 0 and 1 specifying the non-zero eigenvalues for the process. } \item{index}{ \code{data.frame} or \code{matrix} (or something acceptable to \code{\link{as.matrix}}) specifying indices of the basis functions. } \item{basis}{character string giving the name of the basis.} \item{window}{ window (of class \code{"owin"}, \code{"box3"} or \code{"boxx"}) giving the domain of the point process. } \item{reject_max}{ integer giving the maximal number of trials for rejection sampling. } \item{progress}{ integer giving the interval for making a progress report. The value zero turns reporting off. } \item{debug}{ logical value indicating whether debug informationb should be outputted. } \item{\dots}{Ignored.} } \author{ \adrian \rolf and \ege } \examples{ index <- expand.grid(-2:2,-2:2) eig <- exp(-rowSums(index^2)) X <- rdpp(eig, index) X ## To simulate a det. projection p. p. with the given indices set eig=1: XX <- rdpp(1, index) XX } \keyword{datagen} \keyword{spatial} \keyword{models} spatstat/man/border.Rd0000644000176200001440000000342513333543262014444 0ustar liggesusers\name{border} \alias{border} \title{Border Region of a Window} \description{ Computes the border region of a window, that is, the region lying within a specified distance of the boundary of a window. } \usage{ border(w, r, outside=FALSE, ...) } \arguments{ \item{w}{A window (object of class \code{"owin"}) or something acceptable to \code{\link{as.owin}}. } \item{r}{Numerical value.} \item{outside}{Logical value determining whether to compute the border outside or inside \code{w}.} \item{\dots}{ Optional arguments passed to \code{\link{erosion}} (if \code{outside=FALSE}) or to \code{\link{dilation}} (if \code{outside=TRUE}). } } \value{ A window (object of class \code{"owin"}). } \details{ By default (if \code{outside=FALSE}), the border region is the subset of \code{w} lying within a distance \code{r} of the boundary of \code{w}. It is computed by eroding \code{w} by the distance \code{r} (using \code{\link{erosion}}) and subtracting this eroded window from the original window \code{w}. If \code{outside=TRUE}, the border region is the set of locations outside \code{w} lying within a distance \code{r} of \code{w}. It is computed by dilating \code{w} by the distance \code{r} (using \code{\link{dilation}}) and subtracting the original window \code{w} from the dilated window. } \author{\adrian and \rolf } \seealso{ \code{\link{erosion}}, \code{\link{dilation}} } \examples{ # rectangle u <- unit.square() border(u, 0.1) border(u, 0.1, outside=TRUE) # polygon \testonly{opa <- spatstat.options(npixel=32)} data(letterR) plot(letterR) plot(border(letterR, 0.1), add=TRUE) plot(border(letterR, 0.1, outside=TRUE), add=TRUE) \testonly{spatstat.options(opa)} } \keyword{spatial} \keyword{math} spatstat/man/has.close.Rd0000644000176200001440000000406113333543263015044 0ustar liggesusers\name{has.close} \alias{has.close} \alias{has.close.default} \alias{has.close.ppp} \alias{has.close.pp3} \title{ Check Whether Points Have Close Neighbours } \description{ For each point in a point pattern, determine whether the point has a close neighbour in the same pattern. } \usage{ has.close(X, r, Y=NULL, \dots) \method{has.close}{default}(X,r, Y=NULL, \dots, periodic=FALSE) \method{has.close}{ppp}(X,r, Y=NULL, \dots, periodic=FALSE, sorted=FALSE) \method{has.close}{pp3}(X,r, Y=NULL, \dots, periodic=FALSE, sorted=FALSE) } \arguments{ \item{X,Y}{ Point patterns of class \code{"ppp"} or \code{"pp3"} or \code{"lpp"}. } \item{r}{ Threshold distance: a number greater than zero. } \item{periodic}{ Logical value indicating whether to measure distances in the periodic sense, so that opposite sides of the (rectangular) window are treated as identical. } \item{sorted}{ Logical value, indicating whether the points of \code{X} (and \code{Y}, if given) are already sorted into increasing order of the \eqn{x} coordinates. } \item{\dots}{Other arguments are ignored.} } \details{ This is simply a faster version of \code{(nndist(X) <= r)} or \code{(nncross(X,Y,what="dist") <= r)}. \code{has.close(X,r)} determines, for each point in the pattern \code{X}, whether or not this point has a neighbour in the same pattern \code{X} which lies at a distance less than or equal to \code{r}. \code{has.close(X,r,Y)} determines, for each point in the pattern \code{X}, whether or not this point has a neighbour in the \emph{other} pattern \code{Y} which lies at a distance less than or equal to \code{r}. The function \code{has.close} is generic, with methods for \code{"ppp"} and \code{"pp3"} and a default method. } \value{ A logical vector, with one entry for each point of \code{X}. } \author{ \adrian. } \seealso{ \code{\link{nndist}} } \examples{ has.close(redwood, 0.05) with(split(amacrine), has.close(on, 0.05, off)) with(osteo, sum(has.close(pts, 20))) } \keyword{spatial} \keyword{math} spatstat/man/rotate.psp.Rd0000644000176200001440000000272013333543264015265 0ustar liggesusers\name{rotate.psp} \alias{rotate.psp} \title{Rotate a Line Segment Pattern} \description{ Rotates a line segment pattern } \usage{ \method{rotate}{psp}(X, angle=pi/2, \dots, centre=NULL) } \arguments{ \item{X}{A line segment pattern (object of class \code{"psp"}).} \item{angle}{Angle of rotation.} \item{\dots}{ Arguments passed to \code{\link{rotate.owin}} affecting the handling of the observation window, if it is a binary pixel mask. } \item{centre}{ Centre of rotation. Either a vector of length 2, or a character string (partially matched to \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}). The default is the coordinate origin \code{c(0,0)}. } } \value{ Another object of class \code{"psp"} representing the rotated line segment pattern. } \details{ The line segments of the pattern, and the window of observation, are rotated about the origin by the angle specified. Angles are measured in radians, anticlockwise. The default is to rotate the pattern 90 degrees anticlockwise. If the line segments carry marks, these are preserved. } \seealso{ \code{\link{psp.object}}, \code{\link{rotate.owin}}, \code{\link{rotate.ppp}} } \examples{ oldpar <- par(mfrow=c(2,1)) X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(X, main="original") Y <- rotate(X, pi/4) plot(Y, main="rotated") par(oldpar) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/plot.pp3.Rd0000644000176200001440000000517613551474410014653 0ustar liggesusers\name{plot.pp3} \Rdversion{1.1} \alias{plot.pp3} \title{ Plot a Three-Dimensional Point Pattern } \description{ Plots a three-dimensional point pattern. } \usage{ \method{plot}{pp3}(x, ..., eye=NULL, org=NULL, theta=25, phi=15, type=c("p", "n", "h"), box.back=list(col="pink"), box.front=list(col="blue", lwd=2)) } \arguments{ \item{x}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Arguments passed to \code{\link[graphics]{points}} controlling the appearance of the points. } \item{eye}{ Optional. Eye position. A numeric vector of length 3 giving the location from which the scene is viewed. } \item{org}{ Optional. Origin (centre) of the view. A numeric vector of length 3 which will be at the centre of the view. } \item{theta,phi}{ Optional angular coordinates (in degrees) specifying the direction from which the scene is viewed: \code{theta} is the azimuth and \code{phi} is the colatitude. Ignored if \code{eye} is given. } \item{type}{ Type of plot: \code{type="p"} for points, \code{type="h"} for points on vertical lines, \code{type="n"} for box only. } \item{box.front,box.back}{ How to plot the three-dimensional box that contains the points. A list of graphical arguments passed to \code{\link[graphics]{segments}}, or a logical value indicating whether or not to plot the relevant part of the box. See Details. } } \details{ This is the plot method for objects of class \code{"pp3"}. It generates a two-dimensional plot of the point pattern \code{x} and its containing box as if they had been viewed from the location specified by \code{eye} (or from the direction specified by \code{theta} and \code{phi}). The edges of the box at the \sQuote{back} of the scene (as viewed from the eye position) are plotted first. Then the points are added. Finally the remaining \sQuote{front} edges are plotted. The arguments \code{box.back} and \code{box.front} specify graphical parameters for drawing the back and front edges, respectively. Alternatively \code{box.back=FALSE} specifies that the back edges shall not be drawn. Note that default values of arguments to \code{plot.pp3} can be set by \code{\link{spatstat.options}("par.pp3")}. } \value{Null.} \author{ \spatstatAuthors. } \seealso{ \code{\link{pp3}}, \code{\link{spatstat.options}}. } \examples{ X <- osteo$pts[[1]] plot(X, main="Osteocyte lacunae, animal 1, brick 1", cex=1.5, pch=16) plot(X, type="h", main="", box.back=list(lty=3)) } \keyword{spatial} \keyword{hplot} spatstat/man/rmhmodel.list.Rd0000644000176200001440000001127613333543264015755 0ustar liggesusers\name{rmhmodel.list} \alias{rmhmodel.list} \title{Define Point Process Model for Metropolis-Hastings Simulation.} \description{ Given a list of parameters, builds a description of a point process model for use in simulating the model by the Metropolis-Hastings algorithm. } \usage{ \method{rmhmodel}{list}(model, ...) } \arguments{ \item{model}{A list of parameters. See Details.} \item{\dots}{ Optional list of additional named parameters. } } \value{ An object of class \code{"rmhmodel"}, which is essentially a validated list of parameter values for the model. There is a \code{print} method for this class, which prints a sensible description of the model chosen. } \details{ The generic function \code{\link{rmhmodel}} takes a description of a point process model in some format, and converts it into an object of class \code{"rmhmodel"} so that simulations of the model can be generated using the Metropolis-Hastings algorithm \code{\link{rmh}}. This function \code{rmhmodel.list} is the method for lists. The argument \code{model} should be a named list of parameters of the form \code{list(cif, par, w, trend, types)} where \code{cif} and \code{par} are required and the others are optional. For details about these components, see \code{\link{rmhmodel.default}}. The subsequent arguments \code{\dots} (if any) may also have these names, and they will take precedence over elements of the list \code{model}. } \references{ Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \seealso{ \code{\link{rmhmodel}}, \code{\link{rmhmodel.default}}, \code{\link{rmhmodel.ppm}}, \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{ppm}}, \code{\link{Strauss}}, \code{\link{Softcore}}, \code{\link{StraussHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{DiggleGratton}}, \code{\link{PairPiece}} } \examples{ # Strauss process: mod01 <- list(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) mod01 <- rmhmodel(mod01) # Strauss with hardcore: mod04 <- list(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3), w=owin(c(0,10),c(0,5))) mod04 <- rmhmodel(mod04) # Soft core: w <- square(10) mod07 <- list(cif="sftcr", par=list(beta=0.8,sigma=0.1,kappa=0.5), w=w) mod07 <- rmhmodel(mod07) # Multitype Strauss: beta <- c(0.027,0.008) gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2) r <- matrix(c(45,45,45,45),2,2) mod08 <- list(cif="straussm", par=list(beta=beta,gamma=gmma,radii=r), w=square(250)) mod08 <- rmhmodel(mod08) # specify types mod09 <- rmhmodel(list(cif="straussm", par=list(beta=beta,gamma=gmma,radii=r), w=square(250), types=c("A", "B"))) # Multitype Strauss hardcore with trends for each type: beta <- c(0.27,0.08) ri <- matrix(c(45,45,45,45),2,2) rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2) tr3 <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend tr4 <- function(x,y){x <- x/250; y <- y/250; exp(-0.6*x+0.5*y)} # log linear trend mod10 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=ri,hradii=rhc),w=c(0,250,0,250), trend=list(tr3,tr4)) mod10 <- rmhmodel(mod10) # Lookup (interaction function h_2 from page 76, Diggle (2003)): r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0. h <- 20*(r-0.05) h[r<0.05] <- 0 h[r>0.10] <- 1 mod17 <- list(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1)) mod17 <- rmhmodel(mod17) } \author{\adrian and \rolf } \keyword{spatial} \keyword{datagen} spatstat/man/bounding.box.xy.Rd0000644000176200001440000000243213333543262016217 0ustar liggesusers\name{bounding.box.xy} \alias{bounding.box.xy} \title{Convex Hull of Points} \description{ Computes the smallest rectangle containing a set of points. } \usage{ bounding.box.xy(x, y=NULL) } \arguments{ \item{x}{ vector of \code{x} coordinates of observed points, or a 2-column matrix giving \code{x,y} coordinates, or a list with components \code{x,y} giving coordinates (such as a point pattern object of class \code{"ppp"}.) } \item{y}{(optional) vector of \code{y} coordinates of observed points, if \code{x} is a vector.} } \value{ A window (an object of class \code{"owin"}). } \details{ Given an observed pattern of points with coordinates given by \code{x} and \code{y}, this function finds the smallest rectangle, with sides parallel to the coordinate axes, that contains all the points, and returns it as a window. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{convexhull.xy}}, \code{\link{ripras}} } \examples{ x <- runif(30) y <- runif(30) w <- bounding.box.xy(x,y) plot(owin(), main="bounding.box.xy(x,y)") plot(w, add=TRUE) points(x,y) X <- rpoispp(30) plot(X, main="bounding.box.xy(X)") plot(bounding.box.xy(X), add=TRUE) } \author{\adrian and \rolf } \keyword{spatial} \keyword{utilities} spatstat/man/dkernel.Rd0000644000176200001440000000611713333543263014615 0ustar liggesusers\name{dkernel} \alias{dkernel} \alias{pkernel} \alias{qkernel} \alias{rkernel} \title{Kernel distributions and random generation} \description{Density, distribution function, quantile function and random generation for several distributions used in kernel estimation for numerical data. } \usage{ dkernel(x, kernel = "gaussian", mean = 0, sd = 1) pkernel(q, kernel = "gaussian", mean = 0, sd = 1, lower.tail = TRUE) qkernel(p, kernel = "gaussian", mean = 0, sd = 1, lower.tail = TRUE) rkernel(n, kernel = "gaussian", mean = 0, sd = 1) } \arguments{ \item{x, q}{Vector of quantiles.} \item{p}{Vector of probabilities.} \item{kernel}{ String name of the kernel. Options are \code{"gaussian"}, \code{"rectangular"}, \code{"triangular"}, \code{"epanechnikov"}, \code{"biweight"}, \code{"cosine"} and \code{"optcosine"}. (Partial matching is used). } \item{n}{Number of observations.} \item{mean}{Mean of distribution.} \item{sd}{Standard deviation of distribution.} \item{lower.tail}{logical; if \code{TRUE} (the default), then probabilities are \eqn{P(X \le x)}{P[X \le x]}, otherwise, \eqn{P(X > x)}. } } \details{ These functions give the probability density, cumulative distribution function, quantile function and random generation for several distributions used in kernel estimation for one-dimensional (numerical) data. The available kernels are those used in \code{\link[stats]{density.default}}, namely \code{"gaussian"}, \code{"rectangular"}, \code{"triangular"}, \code{"epanechnikov"}, \code{"biweight"}, \code{"cosine"} and \code{"optcosine"}. For more information about these kernels, see \code{\link[stats]{density.default}}. \code{dkernel} gives the probability density, \code{pkernel} gives the cumulative distribution function, \code{qkernel} gives the quantile function, and \code{rkernel} generates random deviates. } \value{ A numeric vector. For \code{dkernel}, a vector of the same length as \code{x} containing the corresponding values of the probability density. For \code{pkernel}, a vector of the same length as \code{x} containing the corresponding values of the cumulative distribution function. For \code{qkernel}, a vector of the same length as \code{p} containing the corresponding quantiles. For \code{rkernel}, a vector of length \code{n} containing randomly generated values. } \examples{ x <- seq(-3,3,length=100) plot(x, dkernel(x, "epa"), type="l", main=c("Epanechnikov kernel", "probability density")) plot(x, pkernel(x, "opt"), type="l", main=c("OptCosine kernel", "cumulative distribution function")) p <- seq(0,1, length=256) plot(p, qkernel(p, "biw"), type="l", main=c("Biweight kernel", "cumulative distribution function")) y <- rkernel(100, "tri") hist(y, main="Random variates from triangular density") rug(y) } \seealso{ \code{\link[stats]{density.default}}, \code{\link{kernel.factor}} } \author{\adrian \email{adrian@maths.uwa.edu.au} and Martin Hazelton } \keyword{methods} \keyword{nonparametric} \keyword{smooth} spatstat/man/overlap.owin.Rd0000644000176200001440000000160013333543263015604 0ustar liggesusers\name{overlap.owin} \alias{overlap.owin} \title{ Compute Area of Overlap } \description{ Computes the area of the overlap (intersection) of two windows. } \usage{ overlap.owin(A, B) } \arguments{ \item{A,B}{ Windows (objects of class \code{"owin"}). } } \details{ This function computes the area of the overlap between the two windows \code{A} and \code{B}. If one of the windows is a binary mask, then both windows are converted to masks on the same grid, and the area is computed by counting pixels. Otherwise, the area is computed analytically (using the discrete Stokes theorem). } \value{ A single numeric value. } \seealso{ \code{\link{intersect.owin}}, \code{\link{area.owin}}, \code{\link{setcov}}. } \examples{ A <- square(1) B <- shift(A, c(0.3, 0.2)) overlap.owin(A, B) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math}spatstat/man/as.fv.Rd0000644000176200001440000000613313333543262014203 0ustar liggesusers\name{as.fv} \alias{as.fv} \alias{as.fv.fv} \alias{as.fv.fasp} \alias{as.fv.data.frame} \alias{as.fv.matrix} \alias{as.fv.minconfit} \alias{as.fv.dppm} \alias{as.fv.kppm} \alias{as.fv.bw.optim} \title{Convert Data To Class fv} \description{ Converts data into a function table (an object of class \code{"fv"}). } \usage{ as.fv(x) \method{as.fv}{fv}(x) \method{as.fv}{data.frame}(x) \method{as.fv}{matrix}(x) \method{as.fv}{fasp}(x) \method{as.fv}{minconfit}(x) \method{as.fv}{dppm}(x) \method{as.fv}{kppm}(x) \method{as.fv}{bw.optim}(x) } \arguments{ \item{x}{Data which will be converted into a function table} } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This command converts data \code{x}, that could be interpreted as the values of a function, into a function value table (object of the class \code{"fv"} as described in \code{\link{fv.object}}). This object can then be plotted easily using \code{\link{plot.fv}}. The dataset \code{x} may be any of the following: \itemize{ \item an object of class \code{"fv"}; \item a matrix or data frame with at least two columns; \item an object of class \code{"fasp"}, representing an array of \code{"fv"} objects. \item an object of class \code{"minconfit"}, giving the results of a minimum contrast fit by the command \code{\link{mincontrast}}. The \item an object of class \code{"kppm"}, representing a fitted Cox or cluster point process model, obtained from the model-fitting command \code{\link{kppm}}; \item an object of class \code{"dppm"}, representing a fitted determinantal point process model, obtained from the model-fitting command \code{\link{dppm}}; \item an object of class \code{"bw.optim"}, representing an optimal choice of smoothing bandwidth by a cross-validation method, obtained from commands like \code{\link{bw.diggle}}. } The function \code{as.fv} is generic, with methods for each of the classes listed above. The behaviour is as follows: \itemize{ \item If \code{x} is an object of class \code{"fv"}, it is returned unchanged. \item If \code{x} is a matrix or data frame, the first column is interpreted as the function argument, and subsequent columns are interpreted as values of the function computed by different methods. \item If \code{x} is an object of class \code{"fasp"} representing an array of \code{"fv"} objects, these are combined into a single \code{"fv"} object. \item If \code{x} is an object of class \code{"minconfit"}, or an object of class \code{"kppm"} or \code{"dppm"}, the result is a function table containing the observed summary function and the best fit summary function. \item If \code{x} is an object of class \code{"bw.optim"}, the result is a function table of the optimisation criterion as a function of the smoothing bandwidth. } } \examples{ r <- seq(0, 1, length=101) x <- data.frame(r=r, y=r^2) as.fv(x) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat/man/harmonise.owin.Rd0000644000176200001440000000367713333543263016141 0ustar liggesusers\name{harmonise.owin} \alias{harmonise.owin} \alias{harmonize.owin} \title{Make Windows Compatible} \description{ Convert several windows to a common pixel raster. } \usage{ \method{harmonise}{owin}(\dots) \method{harmonize}{owin}(\dots) } \arguments{ \item{\dots}{ Any number of windows (objects of class \code{"owin"}) or data which can be converted to windows by \code{\link{as.owin}}. } } \details{ This function makes any number of windows compatible, by converting them all to a common pixel grid. This only has an effect if one of the windows is a binary mask. If all the windows are rectangular or polygonal, they are returned unchanged. The command \code{\link{harmonise}} is generic. This is the method for objects of class \code{"owin"}. Each argument must be a window (object of class \code{"owin"}), or data that can be converted to a window by \code{\link{as.owin}}. The common pixel grid is determined by inspecting all the windows in the argument list, computing the bounding box of all the windows, then finding the binary mask with the finest spatial resolution, and extending its pixel grid to cover the bounding box. The return value is a list with entries corresponding to the input arguments. If the arguments were named (\code{name=value}) then the return value also carries these names. If you just want to determine the appropriate pixel resolution, without converting the windows, use \code{\link{commonGrid}}. } \value{ A list of windows, of length equal to the number of arguments \code{\dots}. The list belongs to the class \code{"solist"}. } \author{\adrian and \rolf } \examples{ harmonise(X=letterR, Y=grow.rectangle(Frame(letterR), 0.2), Z=as.mask(letterR, eps=0.1), V=as.mask(letterR, eps=0.07)) } \seealso{ \code{\link{commonGrid}}, \code{\link{harmonise.im}}, \code{\link{as.owin}} } \keyword{spatial} \keyword{manip} spatstat/man/MultiStraussHard.Rd0000644000176200001440000000673613333543262016455 0ustar liggesusers\name{MultiStraussHard} \alias{MultiStraussHard} \title{The Multitype/Hard Core Strauss Point Process Model} \description{ Creates an instance of the multitype/hard core Strauss point process model which can then be fitted to point pattern data. } \usage{ MultiStraussHard(iradii, hradii, types=NULL) } \arguments{ \item{iradii}{Matrix of interaction radii} \item{hradii}{Matrix of hard core radii} \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the multitype/hard core Strauss process with interaction radii \eqn{iradii[i,j]} and hard core radii \eqn{hradii[i,j]}. } \details{ This is a hybrid of the multitype Strauss process (see \code{\link{MultiStrauss}}) and the hard core process (case \eqn{\gamma=0}{gamma = 0} of the Strauss process). A pair of points of types \eqn{i} and \eqn{j} must not lie closer than \eqn{h_{ij}}{h[i,j]} units apart; if the pair lies more than \eqn{h_{ij}}{h[i,j]} and less than \eqn{r_{ij}}{r[i,j]} units apart, it contributes a factor \eqn{\gamma_{ij}}{gamma[i,j]} to the probability density. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the MultiStraussHard interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrices \code{iradii} and \code{hradii}. The matrices \code{iradii} and \code{hradii} must be symmetric, with entries which are either positive numbers or \code{NA}. A value of \code{NA} indicates that no interaction term should be included for this combination of types. Note that only the interaction radii and hardcore radii are specified in \code{MultiStraussHard}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} and \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by \code{\link{ppm}()}, not fixed in \code{MultiStraussHard()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{MultiStrauss}}, \code{\link{MultiHard}}, \code{\link{Strauss}} } \examples{ r <- matrix(3, nrow=2,ncol=2) h <- matrix(c(1,2,2,1), nrow=2,ncol=2) MultiStraussHard(r,h) # prints a sensible description of itself r <- 0.04 * matrix(c(1,2,2,1), nrow=2,ncol=2) h <- 0.02 * matrix(c(1,NA,NA,1), nrow=2,ncol=2) X <- amacrine \testonly{ X <- X[owin(c(0,0.8), c(0,1))] } fit <- ppm(X ~1, MultiStraussHard(r,h)) # fit stationary multitype hardcore Strauss process to `amacrine' } \section{Warnings}{ In order that \code{\link{ppm}} can fit the multitype/hard core Strauss model correctly to a point pattern \code{X}, this pattern must be marked, with \code{markformat} equal to \code{vector} and the mark vector \code{marks(X)} must be a factor. If the argument \code{types} is specified it is interpreted as a set of factor levels and this set must equal \code{levels(marks(X))}. } \section{Changed Syntax}{ Before \pkg{spatstat} version \code{1.37-0}, the syntax of this function was different: \code{MultiStraussHard(types=NULL, iradii, hradii)}. The new code attempts to handle the old syntax as well. } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{models} spatstat/man/as.owin.Rd0000644000176200001440000002147113612467346014556 0ustar liggesusers\name{as.owin} \alias{as.owin} \alias{as.owin.owin} \alias{as.owin.ppp} \alias{as.owin.ppm} \alias{as.owin.kppm} \alias{as.owin.dppm} \alias{as.owin.lpp} \alias{as.owin.lppm} \alias{as.owin.msr} \alias{as.owin.psp} \alias{as.owin.quad} \alias{as.owin.quadratcount} \alias{as.owin.quadrattest} \alias{as.owin.tess} \alias{as.owin.im} \alias{as.owin.layered} \alias{as.owin.data.frame} \alias{as.owin.distfun} \alias{as.owin.nnfun} \alias{as.owin.funxy} \alias{as.owin.boxx} \alias{as.owin.rmhmodel} \alias{as.owin.leverage.ppm} \alias{as.owin.influence.ppm} \alias{as.owin.default} \title{Convert Data To Class owin} \description{ Converts data specifying an observation window in any of several formats, into an object of class \code{"owin"}. } \usage{ as.owin(W, \dots, fatal=TRUE) \method{as.owin}{owin}(W, \dots, fatal=TRUE) \method{as.owin}{ppp}(W, \dots, fatal=TRUE) \method{as.owin}{ppm}(W, \dots, from=c("points", "covariates"), fatal=TRUE) \method{as.owin}{kppm}(W, \dots, from=c("points", "covariates"), fatal=TRUE) \method{as.owin}{dppm}(W, \dots, from=c("points", "covariates"), fatal=TRUE) \method{as.owin}{lpp}(W, \dots, fatal=TRUE) \method{as.owin}{lppm}(W, \dots, fatal=TRUE) \method{as.owin}{msr}(W, \dots, fatal=TRUE) \method{as.owin}{psp}(W, \dots, fatal=TRUE) \method{as.owin}{quad}(W, \dots, fatal=TRUE) \method{as.owin}{quadratcount}(W, \dots, fatal=TRUE) \method{as.owin}{quadrattest}(W, \dots, fatal=TRUE) \method{as.owin}{tess}(W, \dots, fatal=TRUE) \method{as.owin}{im}(W, \dots, fatal=TRUE) \method{as.owin}{layered}(W, \dots, fatal=TRUE) \method{as.owin}{data.frame}(W, \dots, step, fatal=TRUE) \method{as.owin}{distfun}(W, \dots, fatal=TRUE) \method{as.owin}{nnfun}(W, \dots, fatal=TRUE) \method{as.owin}{funxy}(W, \dots, fatal=TRUE) \method{as.owin}{boxx}(W, \dots, fatal=TRUE) \method{as.owin}{rmhmodel}(W, \dots, fatal=FALSE) \method{as.owin}{leverage.ppm}(W, \dots, fatal=TRUE) \method{as.owin}{influence.ppm}(W, \dots, fatal=TRUE) \method{as.owin}{default}(W, \dots, fatal=TRUE) } \arguments{ \item{W}{ Data specifying an observation window, in any of several formats described under \emph{Details} below. } \item{fatal}{ Logical value determining what to do if the data cannot be converted to an observation window. See Details. } \item{\dots}{Ignored.} \item{from}{Character string. See Details.} \item{step}{ Optional. A single number, or numeric vector of length 2, giving the grid step lengths in the \eqn{x} and \eqn{y} directions. } } \value{ An object of class \code{"owin"} (see \code{\link{owin.object}}) specifying an observation window. } \details{ The class \code{"owin"} is a way of specifying the observation window for a point pattern. See \code{\link{owin.object}} for an overview. The function \code{as.owin} converts data in any of several formats into an object of class \code{"owin"} for use by the \pkg{spatstat} package. The function \code{as.owin} is generic, with methods for different classes of objects, and a default method. The argument \code{W} may be \itemize{ \item an object of class \code{"owin"} \item a structure with entries \code{xrange}, \code{yrange} specifying the \eqn{x} and \eqn{y} dimensions of a rectangle \item a structure with entries named \code{xmin}, \code{xmax}, \code{ymin}, \code{ymax} (in any order) specifying the \eqn{x} and \eqn{y} dimensions of a rectangle. This will accept objects of class \code{bbox} in the \code{sf} package. \item a numeric vector of length 4 (interpreted as \code{(xmin, xmax, ymin, ymax)} in that order) specifying the \eqn{x} and \eqn{y} dimensions of a rectangle \item a structure with entries named \code{xl}, \code{xu}, \code{yl}, \code{yu} (in any order) specifying the \eqn{x} and \eqn{y} dimensions of a rectangle as \code{(xmin, xmax) = (xl, xu)} and \code{(ymin, ymax) = (yl, yu)}. This will accept objects of class \code{spp} used in the Venables and Ripley \pkg{spatial} package. \item an object of class \code{"ppp"} representing a point pattern. In this case, the object's \code{window} structure will be extracted. \item an object of class \code{"psp"} representing a line segment pattern. In this case, the object's \code{window} structure will be extracted. \item an object of class \code{"tess"} representing a tessellation. In this case, the object's \code{window} structure will be extracted. \item an object of class \code{"quad"} representing a quadrature scheme. In this case, the window of the \code{data} component will be extracted. \item an object of class \code{"im"} representing a pixel image. In this case, a window of type \code{"mask"} will be returned, with the same pixel raster coordinates as the image. An image pixel value of \code{NA}, signifying that the pixel lies outside the window, is transformed into the logical value \code{FALSE}, which is the corresponding convention for window masks. \item an object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"} representing a fitted point process model. In this case, if \code{from="data"} (the default), \code{as.owin} extracts the original point pattern data to which the model was fitted, and returns the observation window of this point pattern. If \code{from="covariates"} then \code{as.owin} extracts the covariate images to which the model was fitted, and returns a binary mask window that specifies the pixel locations. \item an object of class \code{"lpp"} representing a point pattern on a linear network. In this case, \code{as.owin} extracts the linear network and returns a window containing this network. \item an object of class \code{"lppm"} representing a fitted point process model on a linear network. In this case, \code{as.owin} extracts the linear network and returns a window containing this network. \item A \code{data.frame} with exactly three columns. Each row of the data frame corresponds to one pixel. Each row contains the \eqn{x} and \eqn{y} coordinates of a pixel, and a logical value indicating whether the pixel lies inside the window. \item A \code{data.frame} with exactly two columns. Each row of the data frame contains the \eqn{x} and \eqn{y} coordinates of a pixel that lies inside the window. \item an object of class \code{"distfun"}, \code{"nnfun"} or \code{"funxy"} representing a function of spatial location, defined on a spatial domain. The spatial domain of the function will be extracted. \item an object of class \code{"rmhmodel"} representing a point process model that can be simulated using \code{\link{rmh}}. The window (spatial domain) of the model will be extracted. The window may be \code{NULL} in some circumstances (indicating that the simulation window has not yet been determined). This is not treated as an error, because the argument \code{fatal} defaults to \code{FALSE} for this method. \item an object of class \code{"layered"} representing a list of spatial objects. See \code{\link{layered}}. In this case, \code{as.owin} will be applied to each of the objects in the list, and the union of these windows will be returned. \item an object of class \code{"SpatialPolygon"}, \code{"SpatialPolygons"} or \code{"SpatialPolygonsDataFrame"}. To handle these data types, \bold{the package} \pkg{maptools} \bold{must be loaded}, because it provides the methods for \code{as.owin} for these classes. For full details, see \code{vignette('shapefiles')}. } If the argument \code{W} is not in one of these formats and cannot be converted to a window, then an error will be generated (if \code{fatal=TRUE}) or a value of \code{NULL} will be returned (if \code{fatal=FALSE}). When \code{W} is a data frame, the argument \code{step} can be used to specify the pixel grid spacing; otherwise, the spacing will be guessed from the data. } \seealso{ \code{\link{owin.object}}, \code{\link{owin}}. Additional methods for \code{as.owin} are provided in the \pkg{maptools} package: \code{as.owin.SpatialPolygon}, \code{as.owin.SpatialPolygons}, \code{as.owin.SpatialPolygonsDataFrame}. } \examples{ w <- as.owin(c(0,1,0,1)) w <- as.owin(list(xrange=c(0,5),yrange=c(0,10))) # point pattern data(demopat) w <- as.owin(demopat) # image Z <- as.im(function(x,y) { x + 3}, unit.square()) w <- as.owin(Z) # Venables & Ripley 'spatial' package spatialpath <- system.file(package="spatial") if(nchar(spatialpath) > 0) { require(spatial) towns <- ppinit("towns.dat") w <- as.owin(towns) detach(package:spatial) } } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat/man/plot.msr.Rd0000644000176200001440000000702713333543264014751 0ustar liggesusers\name{plot.msr} \alias{plot.msr} \title{Plot a Signed or Vector-Valued Measure} \description{ Plot a signed measure or vector-valued measure. } \usage{ \method{plot}{msr}(x, \dots, add = FALSE, how = c("image", "contour", "imagecontour"), main = NULL, do.plot = TRUE, multiplot = TRUE, massthresh = 0, equal.markscale = FALSE, equal.ribbon = FALSE) } \arguments{ \item{x}{ The signed or vector measure to be plotted. An object of class \code{"msr"} (see \code{\link{msr}}). } \item{\dots}{ Extra arguments passed to \code{\link{Smooth.ppp}} to control the interpolation of the continuous density component of \code{x}, or passed to \code{\link{plot.im}} or \code{\link{plot.ppp}} to control the appearance of the plot. } \item{add}{ Logical flag; if \code{TRUE}, the graphics are added to the existing plot. If \code{FALSE} (the default) a new plot is initialised. } \item{how}{ String indicating how to display the continuous density component. } \item{main}{ String. Main title for the plot. } \item{do.plot}{ Logical value determining whether to actually perform the plotting. } \item{multiplot}{ Logical value indicating whether it is permissible to display a plot with multiple panels (representing different components of a vector-valued measure, or different types of points in a multitype measure.) } \item{massthresh}{ Threshold for plotting atoms. A single numeric value or \code{NULL}. If \code{massthresh=0} (the default) then only atoms with nonzero mass will be plotted. If \code{massthresh > 0} then only atoms whose absolute mass exceeds \code{massthresh} will be plotted. If \code{massthresh=NULL}, then all atoms of the measure will be plotted. } \item{equal.markscale}{ Logical value indicating whether different panels should use the same symbol map (to represent the masses of atoms of the measure). } \item{equal.ribbon}{ Logical value indicating whether different panels should use the same colour map (to represent the density values in the diffuse component of the measure). } } \value{ (Invisible) colour map (object of class \code{"colourmap"}) for the colour image. } \details{ This is the \code{plot} method for the class \code{"msr"}. The continuous density component of \code{x} is interpolated from the existing data by \code{\link{Smooth.ppp}}, and then displayed as a colour image by \code{\link{plot.im}}. The discrete atomic component of \code{x} is then superimposed on this image by plotting the atoms as circles (for positive mass) or squares (for negative mass) by \code{\link{plot.ppp}}. By default, atoms with zero mass are not plotted at all. To smooth both the discrete and continuous components, use \code{\link{Smooth.msr}}. Use the argument \code{clipwin} to restrict the plot to a subset of the full data. To remove atoms with tiny masses, use the argument \code{massthresh}. } \seealso{ \code{\link{msr}}, \code{\link{Smooth.ppp}}, \code{\link{Smooth.msr}}, \code{\link{plot.im}}, \code{\link{plot.ppp}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") rs <- residuals(fit, type="score") plot(rp) plot(rs) plot(rs, how="contour") } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{hplot} spatstat/man/rescale.ppp.Rd0000644000176200001440000000375313333543264015411 0ustar liggesusers\name{rescale.ppp} \alias{rescale.ppp} \title{Convert Point Pattern to Another Unit of Length} \description{ Converts a point pattern dataset to another unit of length. } \usage{ \method{rescale}{ppp}(X, s, unitname) } \arguments{ \item{X}{Point pattern (object of class \code{"ppp"}).} \item{s}{Conversion factor: the new units are \code{s} times the old units.} \item{unitname}{ Optional. New name for the unit of length. See \code{\link{unitname}}. } } \value{ Another point pattern (of class \code{"ppp"}), representing the same data, but expressed in the new units. } \details{ This is a method for the generic function \code{\link{rescale}}. The spatial coordinates in the point pattern \code{X} (and its window) will be re-expressed in terms of a new unit of length that is \code{s} times the current unit of length given in \code{X}. (Thus, the coordinate values are \emph{divided} by \code{s}, while the unit value is multiplied by \code{s}). The result is a point pattern representing the \emph{same} data but re-expressed in a different unit. Mark values are unchanged. If \code{s} is missing, then the coordinates will be re-expressed in \sQuote{native} units; for example if the current unit is equal to 0.1 metres, then the coordinates will be re-expressed in metres. } \section{Note}{ The result of this operation is equivalent to the original point pattern. If you want to actually change the coordinates by a linear transformation, producing a point pattern that is not equivalent to the original one, use \code{\link{affine}}. } \seealso{ \code{\link{unitname}}, \code{\link{rescale}}, \code{\link{rescale.owin}}, \code{\link{affine}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ # Bramble Canes data: 1 unit = 9 metres data(bramblecanes) # convert to metres bram <- rescale(bramblecanes, 1/9) # or equivalently bram <- rescale(bramblecanes) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat/man/solutionset.Rd0000644000176200001440000000523313333543264015560 0ustar liggesusers\name{solutionset} \alias{solutionset} \title{Evaluate Logical Expression Involving Pixel Images and Return Region Where Expression is True} \description{ Given a logical expression involving one or more pixel images, find all pixels where the expression is true, and assemble these pixels into a window. } \usage{ solutionset(\dots, envir) } \arguments{ \item{\dots}{An expression in the \R language, involving one or more pixel images.} \item{envir}{Optional. The environment in which to evaluate the expression.} } \details{ Given a logical expression involving one or more pixel images, this function will find all pixels where the expression is true, and assemble these pixels into a spatial window. Pixel images in \code{spatstat} are represented by objects of class \code{"im"} (see \code{\link{im.object}}). These are essentially matrices of pixel values, with extra attributes recording the pixel dimensions, etc. Suppose \code{X} is a pixel image. Then \code{solutionset(abs(X) > 3)} will find all the pixels in \code{X} for which the pixel value is greater than 3 in absolute value, and return a window containing all these pixels. If \code{X} and \code{Y} are two pixel images, \code{solutionset(X > Y)} will find all pixels for which the pixel value of \code{X} is greater than the corresponding pixel value of \code{Y}, and return a window containing these pixels. In general, \code{\dots} can be any logical expression involving pixel images. The code first tries to evaluate the expression using \code{\link{eval.im}}. This is successful if the expression involves only (a) the \emph{names} of pixel images, (b) scalar constants, and (c) functions which are vectorised. There must be at least one pixel image in the expression. The expression \code{expr} must be vectorised. See the Examples. If this is unsuccessful, the code then tries to evaluate the expression using pixel arithmetic. This is successful if all the arithmetic operations in the expression are listed in \code{\link{Math.im}}. } \value{ A spatial window (object of class \code{"owin"}, see \code{\link{owin.object}}). } \seealso{ \code{\link{im.object}}, \code{\link{owin.object}}, \code{\link{eval.im}}, \code{\link{levelset}} } \examples{ # test images X <- as.im(function(x,y) { x^2 - y^2 }, unit.square()) Y <- as.im(function(x,y) { 3 * x + y - 1}, unit.square()) W <- solutionset(abs(X) > 0.1) W <- solutionset(X > Y) W <- solutionset(X + Y >= 1) area(solutionset(X < Y)) solutionset(density(cells) > 20) } \author{\adrian and \rolf } \keyword{spatial} \keyword{programming} \keyword{manip} spatstat/man/logLik.mppm.Rd0000644000176200001440000001023513333543265015360 0ustar liggesusers\name{logLik.mppm} \alias{logLik.mppm} \alias{AIC.mppm} \alias{extractAIC.mppm} \alias{nobs.mppm} \alias{getCall.mppm} \alias{terms.mppm} \title{Log Likelihood and AIC for Multiple Point Process Model} \description{ For a point process model that has been fitted to multiple point patterns, these functions extract the log likelihood and AIC, or analogous quantities based on the pseudolikelihood. } \usage{ \method{logLik}{mppm}(object, \dots, warn=TRUE) \method{AIC}{mppm}(object, \dots, k=2, takeuchi=TRUE) \method{extractAIC}{mppm}(fit, scale = 0, k = 2, \dots, takeuchi = TRUE) \method{nobs}{mppm}(object, \dots) \method{getCall}{mppm}(x, \dots) \method{terms}{mppm}(x, \dots) } \arguments{ \item{object,fit,x}{ Fitted point process model (fitted to multiple point patterns). An object of class \code{"mppm"}. } \item{\dots}{Ignored.} \item{warn}{ If \code{TRUE}, a warning is given when the pseudolikelihood is returned instead of the likelihood. } \item{scale}{Ignored.} \item{k}{Numeric value specifying the weight of the equivalent degrees of freedom in the AIC. See Details. } \item{takeuchi}{ Logical value specifying whether to use the Takeuchi penalty (\code{takeuchi=TRUE}) or the number of fitted parameters (\code{takeuchi=FALSE}) in calculating AIC. } } \details{ These functions are methods for the generic commands \code{\link[stats]{logLik}}, \code{\link[stats]{AIC}}, \code{\link[stats]{extractAIC}}, \code{\link[stats]{terms}} and \code{\link[stats:update]{getCall}} for the class \code{"mppm"}. An object of class \code{"mppm"} represents a fitted Poisson or Gibbs point process model fitted to several point patterns. It is obtained from the model-fitting function \code{\link{mppm}}. The method \code{logLik.mppm} extracts the maximised value of the log likelihood for the fitted model (as approximated by quadrature using the Berman-Turner approximation). If \code{object} is not a Poisson process, the maximised log \emph{pseudolikelihood} is returned, with a warning. The Akaike Information Criterion AIC for a fitted model is defined as \deqn{ AIC = -2 \log(L) + k \times \mbox{penalty} }{ AIC = -2 * log(L) + k * penalty } where \eqn{L} is the maximised likelihood of the fitted model, and \eqn{\mbox{penalty}}{penalty} is a penalty for model complexity, usually equal to the effective degrees of freedom of the model. The method \code{extractAIC.mppm} returns the \emph{analogous} quantity \eqn{AIC*} in which \eqn{L} is replaced by \eqn{L*}, the quadrature approximation to the likelihood (if \code{fit} is a Poisson model) or the pseudolikelihood (if \code{fit} is a Gibbs model). The \eqn{\mbox{penalty}}{penalty} term is calculated as follows. If \code{takeuchi=FALSE} then \eqn{\mbox{penalty}}{penalty} is the number of fitted parameters. If \code{takeuchi=TRUE} then \eqn{\mbox{penalty} = \mbox{trace}(J H^{-1})}{penalty = trace(J H^(-1))} where \eqn{J} and \eqn{H} are the estimated variance and hessian, respectively, of the composite score. These two choices are equivalent for a Poisson process. The method \code{nobs.mppm} returns the total number of points in the original data point patterns to which the model was fitted. The method \code{getCall.mppm} extracts the original call to \code{\link{mppm}} which caused the model to be fitted. The method \code{terms.mppm} extracts the covariate terms in the model formula as a \code{terms} object. Note that these terms do not include the interaction component of the model. The \R function \code{\link[stats]{step}} uses these methods. } \value{ See the help files for the corresponding generic functions. } \seealso{ \code{\link{mppm}} } \references{ Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. London: Chapman and Hall/CRC Press. } \author{ Adrian Baddeley, Ida-Maria Sintorn and Leanne Bischoff. Implemented by \spatstatAuthors. } \examples{ fit <- mppm(Bugs ~ x, hyperframe(Bugs=waterstriders)) logLik(fit) AIC(fit) nobs(fit) getCall(fit) } \keyword{spatial} \keyword{models} spatstat/DESCRIPTION0000644000176200001440000001474513624534362013646 0ustar liggesusersPackage: spatstat Version: 1.63-2 Date: 2020-02-22 Title: Spatial Point Pattern Analysis, Model-Fitting, Simulation, Tests Author: Adrian Baddeley , Rolf Turner and Ege Rubak , with substantial contributions of code by Kasper Klitgaard Berthelsen; Ottmar Cronie; Tilman Davies; Yongtao Guan; Ute Hahn; Abdollah Jalilian; Marie-Colette van Lieshout; Greg McSwiggan; Tuomas Rajala; Suman Rakshit; Dominic Schuhmacher; Rasmus Waagepetersen; and Hangsheng Wang. Additional contributions by M. Adepeju; C. Anderson; Q.W. Ang; R. Arellano; J. Astrom; M. Austenfeld; S. Azaele; M. Baddeley; C. Beale; M. Bell; R. Bernhardt; T. Bendtsen; A. Bevan; B. Biggerstaff; A. Bilgrau; L. Bischof; C. Biscio; R. Bivand; J.M. Blanco Moreno; F. Bonneu; J. Brown; J. Burgos; S. Byers; Y.M. Chang; J.B. Chen; I. Chernayavsky; Y.C. Chin; B. Christensen; L. Cobo Sanchez; J.-F. Coeurjolly; K. Colyvas; H. Commenges; R. Constantine; R. Corria Ainslie; R. Cotton; M. de la Cruz; P. Dalgaard; M. D'Antuono; S. Das; P.J. Diggle; P. Donnelly; I. Dryden; S. Eglen; A. El-Gabbas; B. Fandohan; O. Flores; E.D. Ford; P. Forbes; S. Frank; J. Franklin; N. Funwi-Gabga; O. Garcia; A. Gault; J. Geldmann; M. Genton; S. Ghalandarayeshi; J. Gilbey; J. Goldstick; P. Grabarnik; C. Graf; U. Hahn; A. Hardegen; M.B. Hansen; M. Hazelton; J. Heikkinen; M. Hering; M. Herrmann; M. Hesselbarth; P. Hewson; H. Heydarian; K. Hingee; K. Hornik; P. Hunziker; J. Hywood; R. Ihaka; C. Icos; A. Jammalamadaka; R. John-Chandran; D. Johnson; M. Khanmohammadi; R. Klaver; P. Kovesi; L. Kozmian-Ledward; M. Kuhn; J. Laake; R.A. Lamb; F. Lavancier; T. Lawrence; T. Lazauskas; J. Lee; G.P. Leser; A. Li; H.T. Li; G. Limitsios; A. Lister; N. Luambua; B. Madin; M. Maechler; J. Marcus; K. Marchikanti; R. Mark; J. Mateu; P. McCullagh; U. Mehlig; F. Mestre; S. Meyer; X.C. Mi; L. De Middeleer; R.K. Milne; E. Miranda; J. Moller; A. Mollie; I. Moncada; M. Moradi; V. Morera Pujol; E. Mudrak; G.M. Nair; N. Najari; N. Nava; L.S. Nielsen; F. Nunes; J.R. Nyengaard; J. Oehlschlaegel; T. Onkelinx; S. O'Riordan; E. Parilov; J. Picka; N. Picard; T. Pollington; M. Porter; S. Protsiv; A. Raftery; S. Rakshit; B. Ramage; P. Ramon; X. Raynaud; N. Read; M. Reiter; I. Renner; T.O. Richardson; B.D. Ripley; E. Rosenbaum; B. Rowlingson; J. Rudokas; J. Rudge; C. Ryan; F. Safavimanesh; A. Sarkka; C. Schank; K. Schladitz; S. Schutte; B.T. Scott; O. Semboli; F. Semecurbe; V. Shcherbakov; G.C. Shen; P. Shi; H.-J. Ship; T.L. Silva; I.-M. Sintorn; Y. Song; M. Spiess; M. Stevenson; K. Stucki; J. Sulavik; M. Sumner; P. Surovy; B. Taylor; T. Thorarinsdottir; L. Torres; B. Turlach; T. Tvedebrink; K. Ummer; M. Uppala; A. van Burgel; T. Verbeke; M. Vihtakari; A. Villers; F. Vinatier; S. Voss; S. Wagner; H. Wang; H. Wendrock; J. Wild; C. Witthoft; S. Wong; M. Woringer; L. Yates; M.E. Zamboni and A. Zeileis. Maintainer: Adrian Baddeley Depends: R (>= 3.3.0), spatstat.data (>= 1.4-2), stats, graphics, grDevices, utils, methods, nlme, rpart Imports: spatstat.utils (>= 1.17-0), mgcv, Matrix, deldir (>= 0.0-21), abind, tensor, polyclip (>= 1.10-0), goftest (>= 1.2-2) Suggests: sm, maptools, gsl, locfit, spatial, rpanel, tkrplot, RandomFields (>= 3.1.24.1), RandomFieldsUtils(>= 0.3.3.1), fftwtools (>= 0.9-8) Description: Comprehensive open-source toolbox for analysing Spatial Point Patterns. Focused mainly on two-dimensional point patterns, including multitype/marked points, in any spatial region. Also supports three-dimensional point patterns, space-time point patterns in any number of dimensions, point patterns on a linear network, and patterns of other geometrical objects. Supports spatial covariate data such as pixel images. Contains over 2000 functions for plotting spatial data, exploratory data analysis, model-fitting, simulation, spatial sampling, model diagnostics, and formal inference. Data types include point patterns, line segment patterns, spatial windows, pixel images, tessellations, and linear networks. Exploratory methods include quadrat counts, K-functions and their simulation envelopes, nearest neighbour distance and empty space statistics, Fry plots, pair correlation function, kernel smoothed intensity, relative risk estimation with cross-validated bandwidth selection, mark correlation functions, segregation indices, mark dependence diagnostics, and kernel estimates of covariate effects. Formal hypothesis tests of random pattern (chi-squared, Kolmogorov-Smirnov, Monte Carlo, Diggle-Cressie-Loosmore-Ford, Dao-Genton, two-stage Monte Carlo) and tests for covariate effects (Cox-Berman-Waller-Lawson, Kolmogorov-Smirnov, ANOVA) are also supported. Parametric models can be fitted to point pattern data using the functions ppm(), kppm(), slrm(), dppm() similar to glm(). Types of models include Poisson, Gibbs and Cox point processes, Neyman-Scott cluster processes, and determinantal point processes. Models may involve dependence on covariates, inter-point interaction, cluster formation and dependence on marks. Models are fitted by maximum likelihood, logistic regression, minimum contrast, and composite likelihood methods. A model can be fitted to a list of point patterns (replicated point pattern data) using the function mppm(). The model can include random effects and fixed effects depending on the experimental design, in addition to all the features listed above. Fitted point process models can be simulated, automatically. Formal hypothesis tests of a fitted model are supported (likelihood ratio test, analysis of deviance, Monte Carlo tests) along with basic tools for model selection (stepwise(), AIC()) and variable selection (sdr). Tools for validating the fitted model include simulation envelopes, residuals, residual plots and Q-Q plots, leverage and influence diagnostics, partial residuals, and added variable plots. License: GPL (>= 2) URL: http://www.spatstat.org LazyData: true NeedsCompilation: yes ByteCompile: true BugReports: https://github.com/spatstat/spatstat/issues Packaged: 2020-02-22 08:14:02 UTC; adrian Repository: CRAN Date/Publication: 2020-02-23 17:40:02 UTC spatstat/build/0000755000176200001440000000000013624161300013210 5ustar liggesusersspatstat/build/vignette.rds0000644000176200001440000000060713624161300015552 0ustar liggesusersRN0uRhy H@UAbZXM@;v 0q\0BE(D(6uyν: ShF&v YUhFCT-H2^trdPU+<s1}=X283k=qB'3qrʹ"si/.)Vʸ.pRraiͤP?9!&wi T:z$r J ֥X*}KcX8 v,Rye8!^P ذ0#zȖ K6zOs5d2מke{*&-[5[h(fÊ^a[o҂(Ǩ8*spatstat/tests/0000755000176200001440000000000013603007371013257 5ustar liggesusersspatstat/tests/testsK.R0000644000176200001440000004316013606020450014657 0ustar liggesusers#' #' tests/Kfuns.R #' #' Various K and L functions and pcf #' #' $Revision: 1.37 $ $Date: 2020/01/05 03:11:59 $ #' require(spatstat) myfun <- function(x,y){(x+1) * y } # must be outside local({ #' supporting code rmax.rule("Kscaled", owin(), 42) implemented.for.K(c("border", "bord.modif", "translate", "good", "best"), "polygonal", TRUE) implemented.for.K(c("border", "bord.modif", "translate", "good", "best"), "mask", TRUE) implemented.for.K(c("border", "isotropic"), "mask", TRUE) implemented.for.K(c("border", "isotropic"), "mask", FALSE) #' shortcuts D <- density(cells) K <- Kborder.engine(cells, rmax=0.4, weights=D, ratio=TRUE) K <- Knone.engine(cells, rmax=0.4, weights=D, ratio=TRUE) allcor <- c("none", "border", "bord.modif","isotropic", "translate") K <- Krect.engine(cells, rmax=0.4, ratio=TRUE, correction=allcor) K <- Krect.engine(cells, rmax=0.4, ratio=TRUE, correction=allcor, weights=D) K <- Krect.engine(cells, rmax=0.4, ratio=TRUE, correction=allcor, use.integers=FALSE) #' Kest special code blocks K <- Kest(cells, var.approx=TRUE, ratio=FALSE) Z <- distmap(cells) + 1 Kb <- Kest(cells, correction=c("border","bord.modif"), weights=Z, ratio=TRUE) Kn <- Kest(cells, correction="none", weights=Z, ratio=TRUE) Knb <- Kest(cells, correction=c("border","bord.modif","none"), weights=Z, ratio=TRUE) bigint <- 50000 # This is only "big" on a 32-bit system where # sqrt(.Machine$integer.max) = 46340.9 X <- runifpoint(bigint) Z <- as.im(1/bigint, owin()) Kb <- Kest(X, correction=c("border","bord.modif"), rmax=0.02, weights=Z, ratio=TRUE) Kn <- Kest(X, correction="none", rmax=0.02, weights=Z, ratio=TRUE) Knb <- Kest(X, correction=c("border","bord.modif","none"), rmax=0.02, weights=Z, ratio=TRUE) #' pcf.ppp special code blocks pr <- pcf(cells, ratio=TRUE, var.approx=TRUE) pc <- pcf(cells, domain=square(0.5)) pcr <- pcf(cells, domain=square(0.5), ratio=TRUE) pw <- pcf(redwood, correction="none") pwr <- pcf(redwood, correction="none", ratio=TRUE) pv <- pcf(redwood, kernel="rectangular") p1 <- pcf(redwood[1]) #' pcf.fv K <- Kest(redwood) g <- pcf(K, method="a") g <- pcf(K, method="c") g <- pcf(K, method="d") #' Kinhom code blocks X <- rpoispp(function(x,y) { 100 * x }, 100, square(1)) lambda <- 100 * X$x Kin <- Kinhom(X, lambda, correction=c("none", "border")) lambda2 <- outer(lambda, lambda, "*") Ki2 <- Kinhom(X, lambda2=lambda2, diagonal=FALSE, correction=c("translate", "isotropic")) fut <- ppm(X ~ x) Kio <- Kinhom(X, fut, update=FALSE) Kiu <- Kinhom(X, fut, update=TRUE, diagonal=FALSE) #' edge corrections rr <- rep(0.1, npoints(cells)) eC <- edge.Ripley(cells, rr) eI <- edge.Ripley(cells, rr, method="interpreted") if(max(abs(eC-eI)) > 0.1) stop("Ripley edge correction results do not match") a <- rmax.Ripley(square(1)) a <- rmax.Rigid(square(1)) a <- rmax.Ripley(as.polygonal(square(1))) a <- rmax.Rigid(as.polygonal(square(1))) a <- rmax.Ripley(letterR) a <- rmax.Rigid(letterR) #' run slow code for edge correction and compare results X <- redwood[c(TRUE, FALSE, FALSE)] Window(X) <- as.polygonal(Window(X)) Eapprox <- edge.Trans(X) Eexact <- edge.Trans(X, exact=TRUE) maxrelerr <- max(abs(1 - range(Eapprox/Eexact))) if(maxrelerr > 0.1) stop(paste("Exact and approximate algorithms for edge.Trans disagree by", paste0(round(100*maxrelerr), "%")), call.=FALSE) }) local({ #' ---- multitype ------ K <- Kcross(amacrine, correction=c("none", "bord.modif")) #' inhomogeneous multitype fit <- ppm(amacrine ~ marks) K1 <- Kcross.inhom(amacrine, lambdaX=fit) K2 <- Kcross.inhom(amacrine, lambdaX=densityfun(amacrine)) K3 <- Kcross.inhom(amacrine, lambdaX=density(amacrine, at="points")) On <- split(amacrine)$on Off <- split(amacrine)$off K4 <- Kcross.inhom(amacrine, lambdaI=ppm(On), lambdaJ=ppm(Off)) K5 <- Kcross.inhom(amacrine, correction="bord.modif") #' markconnect, markcorr M <- markconnect(amacrine, "on", "off", normalise=TRUE) M <- markcorr(longleaf, normalise=TRUE, correction=c("isotropic", "translate", "border", "none")) M <- markcorr(longleaf, normalise=TRUE, fargs=list()) #' Kmark (=markcorrint) X <- runifpoint(100) %mark% runif(100) km <- Kmark(X, f=atan2) km <- Kmark(X, f1=sin) km <- Kmark(X, f="myfun") aa <- Kmark(X, normalise=FALSE, returnL=FALSE) aa <- Kmark(X, normalise=FALSE, returnL=TRUE) aa <- Kmark(X, normalise=TRUE, returnL=FALSE) aa <- Kmark(X, normalise=TRUE, returnL=TRUE) }) local({ #' various modified K functions #' #' directional K functions #' a <- Ksector(swedishpines, -pi/2, pi/2, units="radians", correction=c("none", "border", "bord.modif", "Ripley", "translate"), ratio=TRUE) plot(a) #' #' local K functions #' fut <- ppm(swedishpines ~ polynom(x,y,2)) Z <- predict(fut) Lam <- fitted(fut, dataonly=TRUE) a <- localLinhom(swedishpines, lambda=fut) a <- localLinhom(swedishpines, lambda=Z) a <- localLinhom(swedishpines, lambda=Lam) a <- localLinhom(swedishpines, lambda=Z, correction="none") a <- localLinhom(swedishpines, lambda=Z, correction="translate") a <- localLcross(amacrine) a <- localLcross(amacrine, from="off", to="off") a <- localKdot(amacrine) a <- localLdot(amacrine) a <- localKcross.inhom(amacrine) a <- localLcross.inhom(amacrine) fat <- ppm(amacrine ~ x * marks) Zed <- predict(fat) Lum <- fitted(fat, dataonly=TRUE) moff <- (marks(amacrine) == "off") a <- localLcross.inhom(amacrine, from="off", to="on", lambdaX=Zed) a <- localLcross.inhom(amacrine, from="off", to="on", lambdaX=Lum) a <- localLcross.inhom(amacrine, from="off", to="on", lambdaX=fat) a <- localLcross.inhom(amacrine, from="off", to="on", lambdaFrom=Lum[moff], lambdaTo=Lum[!moff]) a <- localLcross.inhom(amacrine, from="off", to="on", lambdaX=Zed, correction="none") a <- localLcross.inhom(amacrine, from="off", to="on", lambdaX=Zed, correction="translate") #' #' cases of resolve.lambda.cross #' h <- resolve.lambda.cross(amacrine, moff, !moff) h <- resolve.lambda.cross(amacrine, moff, !moff, lambdaX=Zed) h <- resolve.lambda.cross(amacrine, moff, !moff, lambdaX=Lum) h <- resolve.lambda.cross(amacrine, moff, !moff, lambdaX=fat) h <- resolve.lambda.cross(amacrine, moff, !moff, lambdaX=fat, update=FALSE) h <- resolve.lambda.cross(amacrine, moff, !moff, lambdaI=Zed[["off"]], lambdaJ=Zed[["on"]]) h <- resolve.lambda.cross(amacrine, moff, !moff, lambdaI=Lum[moff], lambdaJ=Lum[!moff]) h <- resolve.lambda.cross(amacrine, moff, !moff, lambdaI=fat, lambdaJ=fat) h <- resolve.lambda.cross(amacrine, moff, !moff, lambdaI=fat, lambdaJ=fat, update=FALSE) d <- densityfun(unmark(amacrine), sigma=0.1) dm <- lapply(split(amacrine), densityfun, sigma=0.1) h <- resolve.lambda.cross(amacrine, moff, !moff, lambdaX=d) h <- resolve.lambda.cross(amacrine, moff, !moff, lambdaI=dm[["off"]], lambdaJ=dm[["on"]]) h <- resolve.lambda.cross(amacrine, moff, !moff, lambdaX=function(x,y,m){ d(x,y) }) #' multitype inhomogeneous pcf g <- pcfcross.inhom(amacrine, lambdaI=dm[["off"]], lambdaJ=dm[["on"]]) #' #' lohboot code blocks #' Ared <- lohboot(redwood, fun="Kest", block=TRUE, Vcorrection=TRUE, global=FALSE, correction="none") Bred <- lohboot(redwood, block=TRUE, basicboot=TRUE, global=FALSE) Cred <- lohboot(redwood, fun=Kest, block=TRUE, global=TRUE, correction="translate") Dred <- lohboot(redwood, Lest) Kred <- lohboot(redwood, Kinhom) Lred <- lohboot(redwood, Linhom) gred <- lohboot(redwood, pcfinhom, sigma=0.1) Zred <- predict(ppm(redwood ~ x+y)) Lred <- lohboot(redwood, Linhom, lambda=Zred) #' X <- runifpoint(100, letterR) AX <- lohboot(X, block=TRUE, nx=7, ny=10) #' multitype b <- lohboot(amacrine, Kcross) b <- lohboot(amacrine, Lcross) b <- lohboot(amacrine, Kdot) b <- lohboot(amacrine, Ldot) b <- lohboot(amacrine, Kcross.inhom) b <- lohboot(amacrine, Lcross.inhom) b <- lohboot(amacrine, Lcross.inhom, from="off", to="on", lambdaX=Zed) b <- lohboot(amacrine, Lcross.inhom, from="off", to="on", lambdaX=Lum) b <- lohboot(amacrine, Lcross.inhom, from="off", to="on", lambdaX=fat) b <- lohboot(amacrine, Lcross.inhom, from="off", to="on", lambdaFrom=Lum[moff], lambdaTo=Lum[!moff]) #' #' residual K functions etc #' rco <- compareFit(cells, Kcom, interaction=anylist(P=Poisson(), S=Strauss(0.08)), same="trans", different="tcom") fit <- ppm(cells ~ x, Strauss(0.07)) K <- Kcom(cells, model=fit, restrict=TRUE) ## Kscaled A <- Lscaled(japanesepines, renormalise=TRUE, correction="all") }) local({ #' From Ege, in response to a stackoverflow question. #' The following example has two points separated by r = 1 with 1/4 of the #' circumference outside the 10x10 window (i.e. area 100). #' Thus the value of K^(r) should jump from 0 to #' 100/(2\cdot 1)\cdot ((3/4)^{-1} + (3/4)^{-1}) = 100 \cdot 4/3 = 133.333. x <- c(4.5,5.5) y <- c(10,10)-sqrt(2)/2 W <- square(10) X <- ppp(x, y, W) compere <- function(a, b, where, tol=1e-6) { descrip <- paste("discrepancy in isotropic edge correction", where) err <- as.numeric(a) - as.numeric(b) maxerr <- max(abs(err)) blurb <- paste(descrip, "is", paste0(signif(maxerr, 4), ","), if(maxerr > tol) "exceeding" else "within", "tolerance of", tol) message(blurb) if(maxerr > tol) { message(paste("Discrepancies:", paste(err, collapse=", "))) stop(paste("excessive", descrip), call.=FALSE) } invisible(TRUE) } ## Testing: eX <- edge.Ripley(X, c(1,1)) compere(eX, c(4/3,4/3), "at interior point of rectangle") ## Corner case: Y <- X Y$x <- X$x-4.5+sqrt(2)/2 eY <- edge.Ripley(Y, c(1,1)) compere(eY, c(2,4/3), "near corner of rectangle") ## Invoke polygonal code Z <- rotate(Y, pi/4) eZdebug <- edge.Ripley(Z, c(1,1), internal=list(debug=TRUE)) compere(eZdebug, c(2,4/3), "at interior point of polygon (debug on)") ## test validity without debugger, in case of quirks of compiler optimisation eZ <- edge.Ripley(Z, c(1,1)) compere(eZ, c(2,4/3), "at interior point of polygon (debug off)") }) # # tests/kppm.R # # $Revision: 1.32 $ $Date: 2020/01/08 01:28:46 $ # # Test functionality of kppm that depends on RandomFields # Test update.kppm for old style kppm objects require(spatstat) local({ fit <- kppm(redwood ~1, "Thomas") # sic fitx <- kppm(redwood ~x, "Thomas", verbose=TRUE) fitx <- update(fit, ~ . + x) fitM <- update(fit, clusters="MatClust") fitC <- update(fit, cells) fitCx <- update(fit, cells ~ x) #' Wsub <- owin(c(0, 0.5), c(-0.5, 0)) Zsub <- (bdist.pixels(Window(redwood)) > 0.1) fitWsub <- kppm(redwood ~1, "Thomas", subset=Wsub) fitZsub <- kppm(redwood ~1, "Thomas", subset=Zsub) fitWsub #' various methods ff <- as.fv(fitx) Y <- simulate(fitx, seed=42, saveLambda=TRUE)[[1]] uu <- unitname(fitx) unitname(fitCx) <- "furlong" mo <- model.images(fitCx) p <- psib(fit) px <- psib(fitx) # vcov.kppm different algorithms vc <- vcov(fitx) vc2 <- vcov(fitx, fast=TRUE) vc3 <- vcov(fitx, fast=TRUE, splitup=TRUE) vc4 <- vcov(fitx, splitup=TRUE) ## other code blocks a <- varcount(fitx, function(x,y){x+1}) # always positive a <- varcount(fitx, function(x,y){y-1}) # always negative a <- varcount(fitx, function(x,y){x+y}) # positive or negative # improve.kppm fitI <- update(fit, improve.type="quasi") fitxI <- update(fitx, improve.type="quasi") fitxIs <- update(fitx, improve.type="quasi", fast=FALSE) # vcov.kppm vcI <- vcov(fitxI) # plot.kppm including predict.kppm fitMC <- kppm(redwood ~ x, "Thomas") fitCL <- kppm(redwood ~ x, "Thomas", method="c") fitPA <- kppm(redwood ~ x, "Thomas", method="p") plot(fitMC) plot(fitCL) plot(fitPA) # fit with composite likelihood method [thanks to Abdollah Jalilian] fut <- kppm(redwood ~ x, "VarGamma", method="clik2", nu.ker=-3/8) kfut <- as.fv(fut) if(require(RandomFields)) { fit0 <- kppm(redwood ~1, "LGCP") is.poisson(fit0) Y0 <- simulate(fit0, saveLambda=TRUE)[[1]] stopifnot(is.ppp(Y0)) p0 <- psib(fit0) # issues a warning ## fit LGCP using K function: slow fit1 <- kppm(redwood ~x, "LGCP", covmodel=list(model="matern", nu=0.3), control=list(maxit=3)) Y1 <- simulate(fit1, saveLambda=TRUE)[[1]] stopifnot(is.ppp(Y1)) ## fit LGCP using pcf fit1p <- kppm(redwood ~x, "LGCP", covmodel=list(model="matern", nu=0.3), statistic="pcf") Y1p <- simulate(fit1p, saveLambda=TRUE)[[1]] stopifnot(is.ppp(Y1p)) ## .. and using different fitting methods fit1pClik <- update(fit1p, method="clik") fit1pPalm <- update(fit1p, method="palm") ## image covariate (a different code block) xx <- as.im(function(x,y) x, Window(redwood)) fit1xx <- update(fit1p, . ~ xx, data=solist(xx=xx)) Y1xx <- simulate(fit1xx, saveLambda=TRUE)[[1]] stopifnot(is.ppp(Y1xx)) fit1xxVG <- update(fit1xx, clusters="VarGamma", nu=-1/4) Y1xxVG <- simulate(fit1xxVG, saveLambda=TRUE)[[1]] stopifnot(is.ppp(Y1xxVG)) fit1xxLG <- update(fit1xx, clusters="LGCP", covmodel=list(model="matern", nu=0.3), statistic="pcf") Y1xxLG <- simulate(fit1xxLG, saveLambda=TRUE, drop=TRUE) stopifnot(is.ppp(Y1xxLG)) # ... and Abdollah's code fit2 <- kppm(redwood ~x, cluster="Cauchy", statistic="K") Y2 <- simulate(fit2, saveLambda=TRUE)[[1]] stopifnot(is.ppp(Y2)) # check package mechanism kraever("RandomFields") } }) local({ #' various code blocks fut <- kppm(redwood, ~x) fet <- update(fut, redwood3) fot <- update(fut, trend=~y) fit <- kppm(redwoodfull ~ x) Y <- simulate(fit, window=redwoodfull.extra$regionII, saveLambda=TRUE) gut <- improve.kppm(fit, type="wclik1") gut <- improve.kppm(fit, vcov=TRUE, fast.vcov=TRUE, save.internals=TRUE) hut <- kppm(redwood ~ x, method="clik", weightfun=NULL) hut <- kppm(redwood ~ x, method="palm", weightfun=NULL) mut <- kppm(redwood) nut <- update(mut, Y) }) local({ #' minimum contrast code K <- Kest(redwood) a <- matclust.estK(K) a <- thomas.estK(K) a <- cauchy.estK(K) a <- vargamma.estK(K) a <- lgcp.estK(K) print(a) u <- unitname(a) g <- pcf(redwood) a <- matclust.estpcf(g) a <- thomas.estpcf(g) a <- cauchy.estpcf(g) a <- vargamma.estpcf(g) a <- lgcp.estpcf(g) #' auxiliary functions b <- resolve.vargamma.shape(nu.pcf=1.5) Z <- clusterfield("Thomas", kappa=1, scale=0.2) aa <- NULL aa <- accumulateStatus(simpleMessage("Woof"), aa) aa <- accumulateStatus(simpleMessage("Sit"), aa) aa <- accumulateStatus(simpleMessage("Woof"), aa) printStatusList(aa) RMIN <- 0.01 fit <- kppm(redwood ~ 1, ctrl=list(rmin=RMIN,q=1/2)) if(fit$Fit$mcfit$ctrl$rmin != RMIN) stop("kppm did not handle parameter 'rmin' in argument 'ctrl' ") fit <- kppm(redwood ~ 1, ctrl=list(rmin=0,q=1/2), rmin=RMIN) if(fit$Fit$mcfit$ctrl$rmin != RMIN) stop("kppm did not handle parameter 'rmin' in argument 'ctrl'") RMIN <- 2 fit <- dppm(swedishpines~1, dppGauss(), ctrl=list(rmin=RMIN,q=1)) if(fit$Fit$mcfit$ctrl$rmin != RMIN) stop("dppm did not handle parameter 'rmin' in argument 'ctrl'") fit <- dppm(swedishpines~1, dppGauss(), ctrl=list(rmin=0,q=1), rmin=RMIN) if(fit$Fit$mcfit$ctrl$rmin != RMIN) stop("dppm did not handle argument 'rmin'") }) local({ #' experimental spatstat.options(kppm.canonical=TRUE, kppm.adjusted=TRUE) futTT1 <- kppm(redwood) futTT2 <- kppm(redwood, method="palm") futTT3 <- kppm(redwood, method="clik2") spatstat.options(kppm.canonical=TRUE, kppm.adjusted=FALSE) futTF1 <- kppm(redwood) futTF2 <- kppm(redwood, method="palm") futTF3 <- kppm(redwood, method="clik2") spatstat.options(kppm.canonical=FALSE, kppm.adjusted=TRUE) futFT1 <- kppm(redwood) futFT2 <- kppm(redwood, method="palm") futFT3 <- kppm(redwood, method="clik2") spatstat.options(kppm.canonical=FALSE, kppm.adjusted=FALSE) futFF1 <- kppm(redwood) futFF2 <- kppm(redwood, method="palm") futFF3 <- kppm(redwood, method="clik2") ## unsupported options that give a warning spatstat.options(kppm.canonical=TRUE, kppm.adjusted=TRUE) futXX1 <- kppm(redwood, clusters="MatClust") futXX2 <- kppm(redwood, clusters="MatClust", method="palm") futXX3 <- kppm(redwood, clusters="MatClust", method="clik2") jpines <- residualspaper$Fig1 fut <- dppm(jpines ~ 1, dppGauss) print(fut) spatstat.options(kppm.canonical=FALSE, kppm.adjusted=FALSE) }) local({ #' cover a few code blocks fut <- kppm(redwood ~ x, method="clik") print(summary(fut)) a <- residuals(fut) fut2 <- kppm(redwood ~ x, "LGCP", method="palm") print(summary(fut2)) b <- residuals(fut2) #' po <- ppm(redwood ~ 1) A <- kppmComLik(redwood, Xname="redwood", po=po, clusters="Thomas", statistic="pcf", statargs=list(), control=list(), weightfun=NULL, rmax=0.1) A <- kppmPalmLik(redwood, Xname="redwood", po=po, clusters="Thomas", statistic="pcf", statargs=list(), control=list(), weightfun=NULL, rmax=0.1) }) reset.spatstat.options() spatstat/tests/testsGtoJ.R0000644000176200001440000003004613616220211015325 0ustar liggesusers## ## tests/gcc323.R ## ## $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $ ## require(spatstat) local({ # critical R values that provoke GCC bug #323 a <- marktable(lansing, R=0.25) a <- marktable(lansing, R=0.21) a <- marktable(lansing, R=0.20) a <- marktable(lansing, R=0.10) }) # # tests/hobjects.R # # Validity of methods for ppm(... method="ho") # require(spatstat) local({ set.seed(42) fit <- ppm(cells ~1, Strauss(0.1), method="ho", nsim=10) fitx <- ppm(cells ~offset(x), Strauss(0.1), method="ho", nsim=10) a <- AIC(fit) ax <- AIC(fitx) f <- fitted(fit) fx <- fitted(fitx) p <- predict(fit) px <- predict(fitx) }) # # tests/hyperframe.R # # test "[.hyperframe" etc # # $Revision: 1.7 $ $Date: 2020/01/11 04:55:17 $ # require(spatstat) local({ lambda <- runif(4, min=50, max=100) X <- lapply(as.list(lambda), function(x) { rpoispp(x) }) h <- hyperframe(lambda=lambda, X=X) h$lambda2 <- lambda^2 h[, "lambda3"] <- lambda^3 h[, "Y"] <- X h[, "X"] <- lapply(X, flipxy) h[, c("X", "Y")] <- hyperframe(X=X, Y=X) names(h) <- LETTERS[1:5] print(h) summary(h) str(h) head(h) tail(h) rn <- rownames(h) r.n <- row.names(h) if(!identical(rn, r.n)) stop("rownames and row.names conflict for hyperframes") dn <- dimnames(h) dimnames(h) <- dn dimnames(h)[[2]][2] <- "copacetic" dimnames(h)[[1]][2] <- "second" #' hyperframe with a hyperatom H <- hyperframe(A=runif(3), B=1:3, D=runifpoint(10)) H[,3] H[,3,drop=TRUE] #' special cases of [<- H$B <- H[,1] H[2:3,1] <- H[2:3,2] H[2:3,1] <- H[2,2] H[2,1:2] <- H[3,1:2] #' split f <- factor(c("a", "a", "b")) G <- split(H, f) G[["a"]]$B <- 42 split(H, f) <- G }) #' tests/hypotests.R #' Hypothesis tests #' #' $Revision: 1.5 $ $Date: 2019/12/14 03:11:05 $ require(spatstat) local({ hopskel.test(redwood, method="MonteCarlo", nsim=5) berman.test(spiders, "x") berman.test(lppm(spiders ~ x), "y") #' quadrat test - spatial methods a <- quadrat.test(redwood, 3) domain(a) shift(a, c(1,1)) #' cases of studpermu.test #' X is a hyperframe b <- studpermu.test(pyramidal, nperm=9) b <- studpermu.test(pyramidal, nperm=9, use.Tbar=TRUE) #' X is a list of lists of ppp ZZ <- split(pyramidal$Neurons, pyramidal$group) bb <- studpermu.test(ZZ, nperm=9) #' Issue #115 X <- runifpoint(50, nsim = 3) Y <- runifpoint(3000, nsim = 3) h <- hyperframe(ppp = c(X, Y), group = rep(1:2, 3)) studpermu.test(h, ppp ~ group) #' scan test Z <- scanmeasure(cells, 0.1, method="fft") rr <- c(0.05, 1) scan.test(amacrine, rr, nsim=5, method="binomial", alternative="less") fit <- ppm(cells ~ x) lam <- predict(fit) scan.test(cells, rr, nsim=5, method="poisson", baseline=fit, alternative="less") scan.test(cells, rr, nsim=5, method="poisson", baseline=lam, alternative="less") }) # # tests/imageops.R # # $Revision: 1.27 $ $Date: 2020/02/04 06:07:48 $ # require(spatstat) local({ #' cases of 'im' data tab <- table(sample(factor(letters[1:10]), 30, replace=TRUE)) b <- im(tab, xrange=c(0,1), yrange=c(0,10)) b <- update(b) mat <- matrix(sample(0:4, 12, replace=TRUE), 3, 4) levels(mat) <- 0:4 b <- im(mat) b <- update(b) D <- as.im(mat, letterR) df <- as.data.frame(D) DD <- as.im(df, step=c(D$xstep, D$ystep)) #' various manipulations AA <- A <- as.im(owin()) BB <- B <- as.im(owin(c(1.1, 1.9), c(0,1))) Z <- imcov(A, B) stopifnot(abs(max(Z) - 0.8) < 0.1) Frame(AA) <- Frame(B) Frame(BB) <- Frame(A) ## handling images with 1 row or column ycov <- function(x, y) y E <- as.im(ycov, owin(), dimyx = c(2,1)) G <- cut(E, 2) H <- as.tess(G) E12 <- as.im(ycov, owin(), dimyx = c(1,2)) G12 <- cut(E12, 2) H12 <- as.tess(G12) AAA <- as.array(AA) EEE <- as.array(E) AAD <- as.double(AA) EED <- as.double(E) aaa <- xtfrm(AAA) eee <- xtfrm(E) ## d <- distmap(cells, dimyx=32) Z <- connected(d <= 0.06, method="interpreted") a <- where.max(d, first=FALSE) a <- where.min(d, first=FALSE) dx <- raster.x(d) dy <- raster.y(d) dxy <- raster.xy(d) xyZ <- raster.xy(Z, drop=TRUE) horosho <- conform.imagelist(cells, list(d, Z)) #' split.im W <- square(1) X <- as.im(function(x,y){x}, W) Y <- dirichlet(runifpoint(7, W)) Z <- split(X, as.im(Y)) ## cases of "[.im" ee <- d[simplenet, drop=FALSE] eev <- d[simplenet] Empty <- cells[FALSE] EmptyFun <- ssf(Empty, numeric(0)) ff <- d[Empty] ff <- d[EmptyFun] gg <- d[2,] gg <- d[,2] gg <- d[j=2] gg <- d[2:4, 3:5] hh <- d[2:4, 3:5, rescue=TRUE] if(!is.im(hh)) stop("rectangle was not rescued in [.im") ## cases of "[<-.im" d[,] <- d[] + 1 d[Empty] <- 42 d[EmptyFun] <- 42 ## smudge() and rasterfilter() dd <- smudge(d) ## rgb/hsv options X <- setcov(owin()) M <- Window(X) Y <- as.im(function(x,y) x, W=M) Z <- as.im(function(x,y) y, W=M) # convert after rescaling RGBscal <- rgbim(X, Y, Z, autoscale=TRUE, maxColorValue=1) HSVscal <- hsvim(X, Y, Z, autoscale=TRUE) #' cases of [.im Ma <- as.mask(M, dimyx=37) ZM <- Z[raster=Ma, drop=FALSE] ZM[solutionset(Y+Z > 0.4)] <- NA ZF <- cut(ZM, breaks=5) ZL <- (ZM > 0) P <- list(x=c(0.511, 0.774, 0.633, 0.248, 0.798), y=c(0.791, 0.608, 0.337, 0.613, 0.819)) zmp <- ZM[P, drop=TRUE] zfp <- ZF[P, drop=TRUE] zlp <- ZL[P, drop=TRUE] P <- as.ppp(P, owin()) zmp <- ZM[P, drop=TRUE] zfp <- ZF[P, drop=TRUE] zlp <- ZL[P, drop=TRUE] #' miscellaneous ZZ <- zapsmall.im(Z, digits=6) ZZ <- zapsmall.im(Z) ZS <- shift(Z, origin="centroid") ZS <- shift(Z, origin="bottomleft") ZA <- affine(Z, mat=diag(c(-1,-2))) U <- scaletointerval(Z) C <- as.im(1, W=U) U <- scaletointerval(C) #' hist.im h <- hist(Z) h <- hist(Z, probability=TRUE) h <- hist(Z, plot=FALSE) Zcut <- cut(Z, breaks=5) h <- hist(Zcut) # barplot plot(h) # plot.barplotdata #' plot.im code blocks plot(Z, ribside="left") plot(Z, ribside="top") plot(Z, riblab="value") plot(Z, clipwin=square(0.5)) plot(Z - mean(Z), log=TRUE) plot(Z, valuesAreColours=TRUE) # rejected with a warning IX <- as.im(function(x,y) { as.integer(round(3*x)) }, square(1)) co <- colourmap(rainbow(4), inputs=0:3) plot(IX, col=co) CX <- eval.im(col2hex(IX+1L)) plot(CX, valuesAreColours=TRUE) plot(CX, valuesAreColours=FALSE) #' pairs.im pairs(solist(Z)) pairs(solist(A=Z)) #' handling and plotting of character and factor images Afactor <- as.im(col2hex("green"), letterR, na.replace=col2hex("blue")) Acharacter <- as.im(col2hex("green"), letterR, na.replace=col2hex("blue"), stringsAsFactors=FALSE) plot(Afactor) plot(Acharacter, valuesAreColours=TRUE) print(summary(Afactor)) print(summary(Acharacter)) #' safelookup (including extrapolation case) Z <- as.im(function(x,y) { x - y }, letterR) B <- grow.rectangle(Frame(letterR), 1) X <- superimpose(runifpoint(10,letterR), runifpoint(20, setminus.owin(B, letterR)), vertices(Frame(B)), W=B) a <- safelookup(Z, X) b <- safelookup(cut(Z, breaks=4), X) #' Smooth.im -> blur.im with sigma=NULL ZS <- Smooth(Z) #' check nearest.valid.pixel W <- Window(demopat) set.seed(911911) X <- runifpoint(1000, W) Z <- quantess(W, function(x,y) { x }, 9)$image x <- X$x y <- X$y a <- nearest.valid.pixel(x, y, Z, method="interpreted") b <- nearest.valid.pixel(x, y, Z, method="C") if(!isTRUE(all.equal(a,b))) stop("Unequal results in nearest.valid.pixel") if(!identical(a,b)) stop("Equal, but not identical, results in nearest.valid.pixel") #' cases of distcdf distcdf(cells[1:5]) distcdf(W=cells[1:5], dW=1:5) distcdf(W=Window(cells), V=cells[1:5]) distcdf(W=Window(cells), V=cells[1:5], dV=1:5) #' im.apply DA <- density(split(amacrine)) Z <- im.apply(DA, sd) Z <- which.max.im(DA) # deprecated -> im.apply(DA, which.max) #' Math.imlist, Ops.imlist, Complex.imlist U <- Z+2i B <- U * (2+1i) print(summary(B)) V <- solist(A=U, B=B) negV <- -V E <- Re(V) negE <- -E #' rotmean U <- rotmean(Z, origin="midpoint", result="im", padzero=FALSE) }) #' indices.R #' Tests of code for understanding index vectors etc #' $Revision: 1.1 $ $Date: 2018/03/01 03:38:07 $ require(spatstat) local({ a <- grokIndexVector(c(FALSE,TRUE), 10) b <- grokIndexVector(rep(c(FALSE,TRUE), 7), 10) d <- grokIndexVector(c(2,12), 10) e <- grokIndexVector(letters[4:2], nama=letters) f <- grokIndexVector(letters[10:1], nama=letters[1:5]) g <- grokIndexVector(-c(2, 5), 10) h <- grokIndexVector(-c(2, 5, 15), 10) Nam <- letters[1:10] j <- positiveIndex(-c(2,5), nama=Nam) jj <- logicalIndex(-c(2,5), nama=Nam) k <- positiveIndex(-c(2,5), nama=Nam) kk <- logicalIndex(-c(2,5), nama=Nam) mm <- positiveIndex(c(FALSE,TRUE), nama=Nam) nn <- positiveIndex(FALSE, nama=Nam) aa <- ppsubset(cells, square(0.1)) }) #' #' tests/interact.R #' #' Support for interaction objects #' #' $Revision: 1.1 $ $Date: 2019/12/10 01:57:18 $ require(spatstat) local({ #' print.intermaker Strauss Geyer Ord #' intermaker BS <- get("BlankStrauss", envir=environment(Strauss)) BD <- function(r) { instantiate.interact(BS, list(r=r)) } BlueDanube <- intermaker(BD, BS) }) #' tests/ippm.R #' Tests of 'ippm' class #' $Revision: 1.4 $ $Date: 2020/01/07 09:36:42 $ require(spatstat) local({ # .......... set up example from help file ................. nd <- 10 gamma0 <- 3 delta0 <- 5 POW <- 3 # Terms in intensity Z <- function(x,y) { -2*y } f <- function(x,y,gamma,delta) { 1 + exp(gamma - delta * x^POW) } # True intensity lamb <- function(x,y,gamma,delta) { 200 * exp(Z(x,y)) * f(x,y,gamma,delta) } # Simulate realisation lmax <- max(lamb(0,0,gamma0,delta0), lamb(1,1,gamma0,delta0)) set.seed(42) X <- rpoispp(lamb, lmax=lmax, win=owin(), gamma=gamma0, delta=delta0) # Partial derivatives of log f DlogfDgamma <- function(x,y, gamma, delta) { topbit <- exp(gamma - delta * x^POW) topbit/(1 + topbit) } DlogfDdelta <- function(x,y, gamma, delta) { topbit <- exp(gamma - delta * x^POW) - (x^POW) * topbit/(1 + topbit) } # irregular score Dlogf <- list(gamma=DlogfDgamma, delta=DlogfDdelta) # fit model fit <- ippm(X ~Z + offset(log(f)), covariates=list(Z=Z, f=f), iScore=Dlogf, start=list(gamma=1, delta=1), nd=nd) # fit model with logistic likelihood but without iScore fitlo <- ippm(X ~Z + offset(log(f)), method="logi", covariates=list(Z=Z, f=f), start=list(gamma=1, delta=1), nd=nd) ## ............. test ippm class support ...................... Ar <- model.matrix(fit) Ai <- model.matrix(fit, irregular=TRUE) Zr <- model.images(fit) Zi <- model.images(fit, irregular=TRUE) ## update.ippm fit2 <- update(fit, . ~ . + I(Z^2)) fit0 <- update(fit, . ~ . - Z, start=list(gamma=2, delta=4)) oldfit <- ippm(X, ~Z + offset(log(f)), covariates=list(Z=Z, f=f), iScore=Dlogf, start=list(gamma=1, delta=1), nd=nd) oldfit2 <- update(oldfit, . ~ . + I(Z^2)) oldfit0 <- update(oldfit, . ~ . - Z, start=list(gamma=2, delta=4)) ## again with logistic fitlo2 <- update(fitlo, . ~ . + I(Z^2)) fitlo0 <- update(fitlo, . ~ . - Z, start=list(gamma=2, delta=4)) oldfitlo <- ippm(X, ~Z + offset(log(f)), method="logi", covariates=list(Z=Z, f=f), start=list(gamma=1, delta=1), nd=nd) oldfitlo2 <- update(oldfitlo, . ~ . + I(Z^2)) oldfitlo0 <- update(oldfitlo, . ~ . - Z, start=list(gamma=2, delta=4)) ## anova.ppm including ippm objects fit0 <- update(fit, . ~ Z) fit0lo <- update(fitlo, . ~ Z) A <- anova(fit0, fit) Alo <- anova(fit0lo, fitlo) }) spatstat/tests/testsR2.R0000644000176200001440000010136613603007371014757 0ustar liggesusers# # tests/rmhAux.R # # $Revision: 1.1 $ $Date: 2013/02/18 10:41:27 $ # # For interactions which maintain 'auxiliary data', # verify that the auxiliary data are correctly updated. # # To do this we run rmh with nsave=1 so that the point pattern state # is saved after every iteration, then the algorithm is restarted, # and the auxiliary data are re-initialised. The final state must agree with # the result of simulation without saving. # ---------------------------------------------------- require(spatstat) local({ # Geyer: mod <- list(cif="geyer", par=list(beta=1.25,gamma=1.6,r=0.2,sat=4.5), w=square(10)) set.seed(42) X.nosave <- rmh(model=mod, start=list(n.start=50), control=list(nrep=1e3, periodic=FALSE, expand=1)) set.seed(42) X.save <- rmh(model=mod, start=list(n.start=50), control=list(nrep=1e3, periodic=FALSE, expand=1, nburn=0, nsave=1, pstage="start")) #' Need to set pstage='start' so that proposals are generated #' at the start of the procedure in both cases. stopifnot(npoints(X.save) == npoints(X.nosave)) stopifnot(max(nncross(X.save, X.nosave)$dist) == 0) stopifnot(max(nncross(X.nosave, X.save)$dist) == 0) }) ## ## tests/rmhBasic.R ## ## $Revision: 1.20 $ $Date: 2019/12/31 05:01:21 $ # # Test examples for rmh.default # run to reasonable length # and with tests for validity added # ---------------------------------------------------- require(spatstat) local({ if(!exists("nr")) nr <- 2e3 spatstat.options(expand=1.1) # Strauss process. mod01 <- list(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) X1.strauss <- rmh(model=mod01,start=list(n.start=80), control=list(nrep=nr)) X1.strauss2 <- rmh(model=mod01,start=list(n.start=80), control=list(nrep=nr, periodic=FALSE)) # Strauss process, conditioning on n = 80: X2.strauss <- rmh(model=mod01,start=list(n.start=80), control=list(p=1,nrep=nr)) stopifnot(npoints(X2.strauss) == 80) # test tracking mechanism X1.strauss <- rmh(model=mod01,start=list(n.start=80), control=list(nrep=nr), track=TRUE) X2.strauss <- rmh(model=mod01,start=list(n.start=80), control=list(p=1,nrep=nr), track=TRUE) # Hard core process: mod02 <- list(cif="hardcore",par=list(beta=2,hc=0.7),w=c(0,10,0,10)) X3.hardcore <- rmh(model=mod02,start=list(n.start=60), control=list(nrep=nr)) X3.hardcore2 <- rmh(model=mod02,start=list(n.start=60), control=list(nrep=nr, periodic=FALSE)) # Strauss process equal to pure hardcore: mod02 <- list(cif="strauss",par=list(beta=2,gamma=0,r=0.7),w=c(0,10,0,10)) X3.strauss <- rmh(model=mod02,start=list(n.start=60), control=list(nrep=nr)) # Strauss process in a polygonal window. x <- c(0.55,0.68,0.75,0.58,0.39,0.37,0.19,0.26,0.42) y <- c(0.20,0.27,0.68,0.99,0.80,0.61,0.45,0.28,0.33) mod03 <- list(cif="strauss",par=list(beta=2000,gamma=0.6,r=0.07), w=owin(poly=list(x=x,y=y))) X4.strauss <- rmh(model=mod03,start=list(n.start=90), control=list(nrep=nr)) # Strauss process in a polygonal window, conditioning on n = 42. X5.strauss <- rmh(model=mod03,start=list(n.start=42), control=list(p=1,nrep=nr)) stopifnot(npoints(X5.strauss) == 42) # Strauss process, starting off from X4.strauss, but with the # polygonal window replace by a rectangular one. At the end, # the generated pattern is clipped to the original polygonal window. xxx <- X4.strauss xxx$window <- as.owin(c(0,1,0,1)) X6.strauss <- rmh(model=mod03,start=list(x.start=xxx), control=list(nrep=nr)) # Strauss with hardcore: mod04 <- list(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3), w=c(0,10,0,10)) X1.straush <- rmh(model=mod04,start=list(n.start=70), control=list(nrep=nr)) X1.straush2 <- rmh(model=mod04,start=list(n.start=70), control=list(nrep=nr, periodic=FALSE)) # Another Strauss with hardcore (with a perhaps surprising result): mod05 <- list(cif="straush",par=list(beta=80,gamma=0.36,r=45,hc=2.5), w=c(0,250,0,250)) X2.straush <- rmh(model=mod05,start=list(n.start=250), control=list(nrep=nr)) # Pure hardcore (identical to X3.strauss). mod06 <- list(cif="straush",par=list(beta=2,gamma=1,r=1,hc=0.7), w=c(0,10,0,10)) X3.straush <- rmh(model=mod06,start=list(n.start=60), control=list(nrep=nr)) # Fiksel modFik <- list(cif="fiksel", par=list(beta=180,r=0.15,hc=0.07,kappa=2,a= -1.0), w=square(1)) X.fiksel <- rmh(model=modFik,start=list(n.start=10), control=list(nrep=nr)) X.fiksel2 <- rmh(model=modFik,start=list(n.start=10), control=list(nrep=nr,periodic=FALSE)) # Penttinen process: modpen <- rmhmodel(cif="penttinen",par=list(beta=2,gamma=0.6,r=1), w=c(0,10,0,10)) X.pen <- rmh(model=modpen,start=list(n.start=10), control=list(nrep=nr)) X.pen2 <- rmh(model=modpen,start=list(n.start=10), control=list(nrep=nr, periodic=FALSE)) # Area-interaction, inhibitory mod.area <- list(cif="areaint",par=list(beta=2,eta=0.5,r=0.5), w=square(10)) X.area <- rmh(model=mod.area,start=list(n.start=60), control=list(nrep=nr)) X.areaE <- rmh(model=mod.area,start=list(n.start=60), control=list(nrep=nr, periodic=FALSE)) # Area-interaction, clustered mod.area2 <- list(cif="areaint",par=list(beta=2,eta=1.5,r=0.5), w=square(10)) X.area2 <- rmh(model=mod.area2,start=list(n.start=60), control=list(nrep=nr)) # Area-interaction close to hard core set.seed(42) mod.area0 <- list(cif="areaint",par=list(beta=2,eta=1e-300,r=0.35), w=square(10)) X.area0 <- rmh(model=mod.area0,start=list(x.start=X3.hardcore), control=list(nrep=nr)) stopifnot(nndist(X.area0) > 0.6) # Soft core: w <- c(0,10,0,10) mod07 <- list(cif="sftcr",par=list(beta=0.8,sigma=0.1,kappa=0.5), w=c(0,10,0,10)) X.sftcr <- rmh(model=mod07,start=list(n.start=70), control=list(nrep=nr)) X.sftcr2 <- rmh(model=mod07,start=list(n.start=70), control=list(nrep=nr, periodic=FALSE)) # Diggle, Gates, and Stibbard: mod12 <- list(cif="dgs",par=list(beta=3600,rho=0.08),w=c(0,1,0,1)) X.dgs <- rmh(model=mod12,start=list(n.start=300), control=list(nrep=nr)) X.dgs2 <- rmh(model=mod12,start=list(n.start=300), control=list(nrep=nr, periodic=FALSE)) # Diggle-Gratton: mod13 <- list(cif="diggra", par=list(beta=1800,kappa=3,delta=0.02,rho=0.04), w=square(1)) X.diggra <- rmh(model=mod13,start=list(n.start=300), control=list(nrep=nr)) X.diggra2 <- rmh(model=mod13,start=list(n.start=300), control=list(nrep=nr, periodic=FALSE)) # Geyer: mod14 <- list(cif="geyer",par=list(beta=1.25,gamma=1.6,r=0.2,sat=4.5), w=c(0,10,0,10)) X1.geyer <- rmh(model=mod14,start=list(n.start=200), control=list(nrep=nr)) # Geyer; same as a Strauss process with parameters # (beta=2.25,gamma=0.16,r=0.7): mod15 <- list(cif="geyer",par=list(beta=2.25,gamma=0.4,r=0.7,sat=10000), w=c(0,10,0,10)) X2.geyer <- rmh(model=mod15,start=list(n.start=200), control=list(nrep=nr)) X2.geyer2 <- rmh(model=mod15,start=list(n.start=200), control=list(nrep=nr, periodic=FALSE)) mod16 <- list(cif="geyer",par=list(beta=8.1,gamma=2.2,r=0.08,sat=3)) data(redwood) X3.geyer <- rmh(model=mod16,start=list(x.start=redwood), control=list(periodic=TRUE,nrep=nr)) # Geyer, starting from the redwood data set, simulating # on a torus, and conditioning on n: X4.geyer <- rmh(model=mod16,start=list(x.start=redwood), control=list(p=1,periodic=TRUE,nrep=nr)) # Lookup (interaction function h_2 from page 76, Diggle (2003)): r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0. h <- 20*(r-0.05) h[r<0.05] <- 0 h[r>0.10] <- 1 mod17 <- list(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1)) X.lookup <- rmh(model=mod17,start=list(n.start=100), control=list(nrep=nr, periodic=TRUE)) X.lookup2 <- rmh(model=mod17,start=list(n.start=100), control=list(nrep=nr, expand=1, periodic=FALSE)) # Strauss with trend tr <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } beta <- 0.3 gmma <- 0.5 r <- 45 tr3 <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend mod17 <- list(cif="strauss",par=list(beta=beta,gamma=gmma,r=r),w=c(0,250,0,250), trend=tr3) X1.strauss.trend <- rmh(model=mod17,start=list(n.start=90), control=list(nrep=nr)) #' trend is an image mod18 <- mod17 mod18$trend <- as.im(mod18$trend, square(10)) X1.strauss.trendim <- rmh(model=mod18,start=list(n.start=90), control=list(nrep=nr)) #'..... Test other code blocks ................. #' argument passing to rmhcontrol X1S <- rmh(model=mod01, control=NULL, nrep=nr) X1f <- rmh(model=mod01, fixall=TRUE, nrep=nr) # issues a warning #' nsim > 1 Xlist <- rmh(model=mod01,start=list(n.start=80), control=list(nrep=nr), nsim=2) #' Condition on contents of window XX <- Xlist[[1]] YY <- XX[square(2)] XXwindow <- rmh(model=mod01, start=list(n.start=80), control=list(nrep=nr, x.cond=YY)) XXwindowTrend <- rmh(model=mod17, start=list(n.start=80), control=list(nrep=nr, x.cond=YY)) #' Palm conditioning XXpalm <- rmh(model=mod01,start=list(n.start=80), control=list(nrep=nr, x.cond=coords(YY))) XXpalmTrend <- rmh(model=mod17,start=list(n.start=80), control=list(nrep=nr, x.cond=coords(YY))) #' nsave, nburn chq <- function(X) { Xname <- deparse(substitute(X)) A <- attr(X, "saved") if(length(A) == 0) stop(paste(Xname, "did not include a saved list of patterns")) return("ok") } XXburn <- rmh(model=mod01,start=list(n.start=80), verbose=FALSE, control=list(nrep=nr, nsave=500, nburn=100)) chq(XXburn) XXburnTrend <- rmh(model=mod17,start=list(n.start=80), verbose=FALSE, control=list(nrep=nr, nsave=500, nburn=100)) chq(XXburnTrend) XXburn0 <- rmh(model=mod01,start=list(n.start=80), verbose=FALSE, control=list(nrep=nr, nsave=500, nburn=0)) chq(XXburn0) XXsaves <- rmh(model=mod01,start=list(n.start=80), verbose=FALSE, control=list(nrep=nr, nsave=c(500, 200))) chq(XXsaves) XXsaves0 <- rmh(model=mod01,start=list(n.start=80), verbose=FALSE, control=list(nrep=nr, nsave=c(500, 200), nburn=0)) chq(XXsaves0) #' code blocks for various interactions, not otherwise tested rr <- seq(0,0.2,length=8)[-1] gmma <- c(0.5,0.6,0.7,0.8,0.7,0.6,0.5) mod18 <- list(cif="badgey",par=list(beta=4000, gamma=gmma,r=rr,sat=5), w=square(1)) Xbg <- rmh(model=mod18,start=list(n.start=20), control=list(nrep=1e4, periodic=TRUE)) Xbg2 <- rmh(model=mod18,start=list(n.start=20), control=list(nrep=1e4, periodic=FALSE)) }) local({ #' supporting classes rs <- rmhstart() print(rs) rs <- rmhstart(x.start=cells) print(rs) rc <- rmhcontrol(x.cond=as.list(as.data.frame(cells))) print(rc) rc <- rmhcontrol(x.cond=as.data.frame(cells)[FALSE, , drop=FALSE]) print(rc) rc <- rmhcontrol(nsave=100, ptypes=c(0.7, 0.3), x.cond=amacrine) print(rc) rc <- rmhcontrol(ptypes=c(0.7, 0.3), x.cond=as.data.frame(amacrine)) print(rc) }) reset.spatstat.options() ## ## tests/rmhErrors.R ## ## $Revision: 1.5 $ $Date: 2015/12/29 08:54:49 $ ## # Things which should cause an error require(spatstat) local({ if(!exists("nv")) nv <- 0 if(!exists("nr")) nr <- 1e3 # Strauss with zero intensity and p = 1 mod0S <- list(cif="strauss",par=list(beta=0,gamma=0.6,r=0.7), w = square(3)) out <- try(X0S <- rmh(model=mod0S,start=list(n.start=80), control=list(p=1,nrep=nr,nverb=nv),verbose=FALSE)) if(!inherits(out, "try-error")) stop("Error not trapped (Strauss with zero intensity and p = 1) in tests/rmhErrors.R") }) # # tests/rmhExpand.R # # test decisions about expansion of simulation window # # $Revision: 1.4 $ $Date: 2018/07/21 00:46:16 $ # require(spatstat) local({ fit <- ppm(cells ~x) # check rmhmodel.ppm mod <- rmhmodel(fit) is.expandable(mod) wsim <- as.rectangle(mod$trend) # work around changes in 'unitname' wcel <- as.owin(cells) unitname(wcel) <- unitname(cells) # test if(!identical(wsim, wcel)) stop("Expansion occurred improperly in rmhmodel.ppm") }) # # tests/rmhMulti.R # # tests of rmh, running multitype point processes # # $Revision: 1.12 $ $Date: 2019/12/13 00:57:28 $ require(spatstat) local({ if(!exists("nr")) nr <- 5e3 if(!exists("nv")) nv <- 0 spatstat.options(expand=1.1) # Multitype Poisson modp2 <- list(cif="poisson", par=list(beta=2), types=letters[1:3], w = square(10)) Xp2 <- rmh(modp2, start=list(n.start=0), control=list(p=1)) # Multinomial Xp2fix <- rmh(modp2, start=list(n.start=c(10,20,30)), control=list(fixall=TRUE, p=1)) Xp2fixr <- rmh(modp2, start=list(x.start=Xp2fix), control=list(fixall=TRUE, p=1)) # Multitype Strauss: beta <- c(0.027,0.008) gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2) r <- matrix(c(45,45,45,45),2,2) mod08 <- list(cif="straussm",par=list(beta=beta,gamma=gmma,radii=r), w=c(0,250,0,250)) X1.straussm <- rmh(model=mod08,start=list(n.start=80), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv)) # Multitype Strauss conditioning upon the total number # of points being 80: X2.straussm <- rmh(model=mod08,start=list(n.start=80), control=list(p=1,ptypes=c(0.75,0.25),nrep=nr, nverb=nv)) stopifnot(X2.straussm$n == 80) # Conditioning upon the number of points of type 1 being 60 # and the number of points of type 2 being 20: X3.straussm <- rmh(model=mod08,start=list(n.start=c(60,20)), control=list(fixall=TRUE,p=1,ptypes=c(0.75,0.25), nrep=nr,nverb=nv)) stopifnot(all(table(X3.straussm$marks) == c(60,20))) # Multitype hardcore: rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2) mod085 <- list(cif="multihard",par=list(beta=beta,hradii=rhc), w=c(0,250,0,250)) X.multihard <- rmh(model=mod085,start=list(n.start=80), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv)) X.multihardP <- rmh(model=mod085,start=list(n.start=80), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv, periodic=TRUE)) #' check handling of initial state which violates hard core set.seed(19171025) mod087 <- list(cif="multihard",par=list(beta=5*beta,hradii=rhc), w=square(12)) #' (cannot use 'x.start' here because it disables thinning) X.multihard.close <- rmh(model=mod087,start=list(n.start=100), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv)) X.multihard.closeP <- rmh(model=mod087,start=list(n.start=100), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv, periodic=TRUE)) # Multitype Strauss hardcore: mod09 <- list(cif="straushm",par=list(beta=5*beta,gamma=gmma, iradii=r,hradii=rhc),w=square(12)) X.straushm <- rmh(model=mod09,start=list(n.start=100), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv)) X.straushmP <- rmh(model=mod09,start=list(n.start=100), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv, periodic=TRUE)) # Multitype Strauss hardcore with trends for each type: beta <- c(0.27,0.08) tr3 <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend tr4 <- function(x,y){x <- x/250; y <- y/250; exp(-0.6*x+0.5*y)} # log linear trend mod10 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=c(0,250,0,250), trend=list(tr3,tr4)) X1.straushm.trend <- rmh(model=mod10,start=list(n.start=350), control=list(ptypes=c(0.75,0.25), nrep=nr,nverb=nv)) # Multitype Strauss hardcore with trends for each type, given as images: bigwin <- square(250) i1 <- as.im(tr3, bigwin) i2 <- as.im(tr4, bigwin) mod11 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=bigwin, trend=list(i1,i2)) X2.straushm.trend <- rmh(model=mod11,start=list(n.start=350), control=list(ptypes=c(0.75,0.25),expand=1, nrep=nr,nverb=nv)) #' nsave, nburn chq <- function(X) { Xname <- deparse(substitute(X)) A <- attr(X, "saved") if(length(A) == 0) stop(paste(Xname, "did not include a saved list of patterns")) return("ok") } XburnMS <- rmh(model=mod08,start=list(n.start=80), verbose=FALSE, control=list(ptypes=c(0.75,0.25), nrep=nr,nsave=500, nburn=100)) chq(XburnMS) XburnMStrend <- rmh(model=mod10,start=list(n.start=350), verbose=FALSE, control=list(ptypes=c(0.75,0.25), nrep=nr,nsave=500, nburn=100)) chq(XburnMStrend) ####################################################################### ############ checks on distribution of output ####################### ####################################################################### checkp <- function(p, context, testname, failmessage, pcrit=0.01) { if(missing(failmessage)) failmessage <- paste("output failed", testname) if(p < pcrit) warning(paste(context, ",", failmessage), call.=FALSE) cat(paste("\n", context, ",", testname, "has p-value", signif(p,4), "\n")) } # Multitype Strauss code; output is multitype Poisson beta <- 100 * c(1,1) ri <- matrix(0.07, 2, 2) gmma <- matrix(1, 2, 2) # no interaction tr1 <- function(x,y){ rep(1, length(x)) } tr2 <- function(x,y){ rep(2, length(x)) } mod <- rmhmodel(cif="straussm", par=list(beta=beta,gamma=gmma,radii=ri), w=owin(), trend=list(tr1,tr2)) X <- rmh(mod, start=list(n.start=0), control=list(nrep=1e6)) # The model is Poisson with intensity 100 for type 1 and 200 for type 2. # Total number of points is Poisson (300) # Marks are i.i.d. with P(type 1) = 1/3, P(type 2) = 2/3. # Test whether the total intensity looks right # p <- ppois(X$n, 300) p.val <- 2 * min(p, 1-p) checkp(p.val, "In multitype Poisson simulation", "test whether total number of points has required mean value") # Test whether the mark distribution looks right ta <- table(X$marks) cat("Frequencies of marks:") print(ta) checkp(chisq.test(ta, p = c(1,2)/3)$p.value, "In multitype Poisson simulation", "chi-squared goodness-of-fit test for mark distribution (1/3, 2/3)") ##### #### multitype Strauss code; fixall=TRUE; #### output is multinomial process with nonuniform locations #### the.context <- "In nonuniform multinomial simulation" beta <- 100 * c(1,1) ri <- matrix(0.07, 2, 2) gmma <- matrix(1, 2, 2) # no interaction tr1 <- function(x,y){ ifelse(x < 0.5, 0, 2) } tr2 <- function(x,y){ ifelse(y < 0.5, 1, 3) } # cdf of these distributions Fx1 <- function(x) { ifelse(x < 0.5, 0, ifelse(x < 1, 2 * x - 1, 1)) } Fy2 <- function(y) { ifelse(y < 0, 0, ifelse(y < 0.5, y/2, ifelse(y < 1, (1/2 + 3 * (y-1/2))/2, 1))) } mod <- rmhmodel(cif="straussm", par=list(beta=beta,gamma=gmma,radii=ri), w=owin(), trend=list(tr1,tr2)) X <- rmh(mod, start=list(n.start=c(50,50)), control=list(nrep=1e6, expand=1, p=1, fixall=TRUE)) # The model is Poisson # Mean number of type 1 points = 100 # Mean number of type 2 points = 200 # Total intensity = 300 # Marks are i.i.d. with P(type 1) = 1/3, P(type 2) = 2/3 # Test whether the coordinates look OK Y <- split(X) X1 <- Y[[names(Y)[1]]] X2 <- Y[[names(Y)[2]]] checkp(ks.test(X1$y, "punif")$p.value, the.context, "Kolmogorov-Smirnov test of uniformity of y coordinates of type 1 points") if(any(X1$x < 0.5)) { stop(paste(the.context, ",", "x-coordinates of type 1 points are IMPOSSIBLE"), call.=FALSE) } else { checkp(ks.test(Fx1(X1$x), "punif")$p.value, the.context, "Kolmogorov-Smirnov test of uniformity of transformed x coordinates of type 1 points") } checkp(ks.test(X2$x, "punif")$p.value, the.context, "Kolmogorov-Smirnov test of uniformity of x coordinates of type 2 points") checkp(ks.test(Fy2(X2$y), "punif")$p.value, the.context, "Kolmogorov-Smirnov test of uniformity of transformed y coordinates of type 2 points") }) reset.spatstat.options() # # tests/rmhTrend.R # # Problems with trend images (rmhmodel.ppm or rmhEngine) # require(spatstat) local({ set.seed(42) # Bug folder 37 of 8 feb 2011 # rmhmodel.ppm -> predict.ppm # + rmhResolveTypes -> is.subset.owin data(demopat) Z <- rescale(demopat, 7000) X <- unmark(Z) X1 <- split(Z)[[1]] Int <- density(X,dimyx=200) Lint <- eval.im(log(npoints(X1)*Int/npoints(X))) M <- as.owin(Int) MR <- intersect.owin(M,scalardilate(M,0.5,origin="midpoint")) X1 <- X1[MR] Fut <- ppm(X1~offset(Lint),covariates=list(Lint=Lint), inter=BadGey(r=c(0.03,0.05),sat=3)) Y <- rmh(Fut,control=list(expand=M,nrep=1e3), verbose=FALSE) }) # # tests/rmhWeird.R # # $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $ # # strange boundary cases require(spatstat) local({ if(!exists("nv")) nv <- 0 if(!exists("nr")) nr <- 5e3 # Poisson process cat("Poisson\n") modP <- list(cif="poisson",par=list(beta=10), w = square(3)) XP <- rmh(model = modP, start = list(n.start=25), control=list(nrep=nr,nverb=nv)) # Poisson process case of Strauss cat("\nPoisson case of Strauss\n") modPS <- list(cif="strauss",par=list(beta=10,gamma=1,r=0.7), w = square(3)) XPS <- rmh(model=modPS, start=list(n.start=25), control=list(nrep=nr,nverb=nv)) # Strauss with zero intensity cat("\nStrauss with zero intensity\n") mod0S <- list(cif="strauss",par=list(beta=0,gamma=0.6,r=0.7), w = square(3)) X0S <- rmh(model=mod0S,start=list(n.start=80), control=list(nrep=nr,nverb=nv)) stopifnot(X0S$n == 0) # Poisson with zero intensity cat("\nPoisson with zero intensity\n") mod0P <- list(cif="poisson",par=list(beta=0), w = square(3)) X0P <- rmh(model = mod0P, start = list(n.start=25), control=list(nrep=nr,nverb=nv)) # Poisson conditioned on zero points cat("\nPoisson conditioned on zero points\n") modp <- list(cif="poisson", par=list(beta=2), w = square(10)) Xp <- rmh(modp, start=list(n.start=0), control=list(p=1, nrep=nr)) stopifnot(Xp$n == 0) # Multitype Poisson conditioned on zero points cat("\nMultitype Poisson conditioned on zero points\n") modp2 <- list(cif="poisson", par=list(beta=2), types=letters[1:3], w = square(10)) Xp2 <- rmh(modp2, start=list(n.start=0), control=list(p=1, nrep=nr)) stopifnot(is.marked(Xp2)) stopifnot(Xp2$n == 0) # Multitype Poisson conditioned on zero points of each type cat("\nMultitype Poisson conditioned on zero points of each type\n") Xp2fix <- rmh(modp2, start=list(n.start=c(0,0,0)), control=list(p=1, fixall=TRUE, nrep=nr)) stopifnot(is.marked(Xp2fix)) stopifnot(Xp2fix$n == 0) }) # # tests/rmhmodel.ppm.R # # $Revision: 1.8 $ $Date: 2015/12/29 08:54:49 $ # # Case-by-case tests of rmhmodel.ppm # require(spatstat) local({ f <- ppm(cells) m <- rmhmodel(f) f <- ppm(cells ~x) m <- rmhmodel(f) f <- ppm(cells ~1, Strauss(0.1)) m <- rmhmodel(f) f <- ppm(cells ~1, StraussHard(r=0.1,hc=0.05)) m <- rmhmodel(f) f <- ppm(cells ~1, Hardcore(0.07)) m <- rmhmodel(f) f <- ppm(cells ~1, DiggleGratton(0.05,0.1)) m <- rmhmodel(f) f <- ppm(cells ~1, Softcore(0.5), correction="isotropic") m <- rmhmodel(f) f <- ppm(cells ~1, Geyer(0.07,2)) m <- rmhmodel(f) f <- ppm(cells ~1, BadGey(c(0.07,0.1,0.13),2)) m <- rmhmodel(f) f <- ppm(cells ~1, PairPiece(r = c(0.05, 0.1, 0.2))) m <- rmhmodel(f) f <- ppm(cells ~1, AreaInter(r=0.06)) m <- rmhmodel(f) # multitype r <- matrix(0.07, 2, 2) f <- ppm(amacrine ~1, MultiStrauss(c("off","on"),r)) m <- rmhmodel(f) h <- matrix(min(nndist(amacrine))/2, 2, 2) f <- ppm(amacrine ~1, MultiStraussHard(c("off","on"),r, h)) m <- rmhmodel(f) diag(r) <- NA diag(h) <- NA f <- ppm(amacrine ~1, MultiStrauss(c("off","on"),r)) m <- rmhmodel(f) f <- ppm(amacrine ~1, MultiStraussHard(c("off","on"),r, h)) m <- rmhmodel(f) # multitype data, interaction not dependent on type f <- ppm(amacrine ~marks, Strauss(0.05)) m <- rmhmodel(f) # trends f <- ppm(cells ~x, Strauss(0.1)) m <- rmhmodel(f) f <- ppm(cells ~y, StraussHard(r=0.1,hc=0.05)) m <- rmhmodel(f) f <- ppm(cells ~x+y, Hardcore(0.07)) m <- rmhmodel(f) f <- ppm(cells ~polynom(x,y,2), Softcore(0.5), correction="isotropic") m <- rmhmodel(f) # covariates Z <- as.im(function(x,y){ x^2+y^2 }, as.owin(cells)) f <- ppm(cells ~z, covariates=list(z=Z)) m <- rmhmodel(f) m <- rmhmodel(f, control=list(p=1)) Zim <- as.im(Z, as.owin(cells)) f <- ppm(cells ~z, covariates=list(z=Zim)) m <- rmhmodel(f) Z <- as.im(function(x,y){ x^2+y }, as.owin(amacrine)) f <- ppm(amacrine ~z + marks, covariates=list(z=Z)) m <- rmhmodel(f) m <- rmhmodel(f, control=list(p=1)) m <- rmhmodel(f, control=list(p=1,fixall=TRUE)) Zim <- as.im(Z, as.owin(amacrine)) f <- ppm(amacrine ~z + marks, covariates=list(z=Zim)) m <- rmhmodel(f) }) # # tests/rmhmodelHybrids.R # # Test that rmhmodel.ppm and rmhmodel.default # work on Hybrid interaction models # # $Revision: 1.4 $ $Date: 2015/12/29 08:54:49 $ # require(spatstat) local({ # ......... rmhmodel.ppm ....................... fit1 <- ppm(redwood ~1, Hybrid(A=Strauss(0.02), B=Geyer(0.1, 2), C=Geyer(0.15, 1))) m1 <- rmhmodel(fit1) m1 reach(m1) ## Test of handling 'IsOffset' fit2 <- ppm(cells ~1, Hybrid(H=Hardcore(0.05), G=Geyer(0.15, 2))) m2 <- rmhmodel(fit2) ## also test C code for hybrid interaction with hard core fakecells <- rmh(fit2, nrep=1e4) # Test of handling Poisson components fit3 <- ppm(cells ~1, Hybrid(P=Poisson(), S=Strauss(0.05))) X3 <- rmh(fit3, control=list(nrep=1e3,expand=1), verbose=FALSE) # ............ rmhmodel.default ............................ modH <- list(cif=c("strauss","geyer"), par=list(list(beta=50,gamma=0.5, r=0.1), list(beta=1, gamma=0.7, r=0.2, sat=2)), w = square(1)) rmodH <- rmhmodel(modH) rmodH reach(rmodH) # test handling of Poisson components modHP <- list(cif=c("poisson","strauss"), par=list(list(beta=5), list(beta=10,gamma=0.5, r=0.1)), w = square(1)) rmodHP <- rmhmodel(modHP) rmodHP reach(rmodHP) modPP <- list(cif=c("poisson","poisson"), par=list(list(beta=5), list(beta=10)), w = square(1)) rmodPP <- rmhmodel(modPP) rmodPP reach(rmodPP) }) # # tests/rmh.ppm.R # # $Revision: 1.4 $ $Date: 2019/02/21 01:59:48 $ # # Examples removed from rmh.ppm.Rd # stripped down to minimal tests of validity # require(spatstat) local({ op <- spatstat.options() spatstat.options(rmh.nrep=10, npixel=10, ndummy.min=10) spatstat.options(project.fast=TRUE) Nrep <- 10 X <- swedishpines # Poisson process fit <- ppm(X ~1, Poisson()) Xsim <- rmh(fit) # Strauss process fit <- ppm(X ~1, Strauss(r=7)) Xsim <- rmh(fit) # Strauss process simulated on a larger window # then clipped to original window Xsim <- rmh(fit, control=list(nrep=Nrep, expand=1.1, periodic=TRUE)) # Extension of model to another window (thanks to Tuomas Rajala) Xsim <- rmh(fit, w=square(2)) Xsim <- simulate(fit, w=square(2)) # Strauss - hard core process # fit <- ppm(X ~1, StraussHard(r=7,hc=2)) # Xsim <- rmh(fit, start=list(n.start=X$n)) # Geyer saturation process # fit <- ppm(X ~1, Geyer(r=7,sat=2)) # Xsim <- rmh(fit, start=list(n.start=X$n)) # Area-interaction process fit <- ppm(X ~1, AreaInter(r=7)) Xsim <- rmh(fit, start=list(n.start=X$n)) # Penttinen process fit <- ppm(X ~1, Penttinen(r=7)) Xsim <- rmh(fit, start=list(n.start=X$n)) # soft core interaction process # X <- quadscheme(X, nd=50) # fit <- ppm(X ~1, Softcore(kappa=0.1), correction="isotropic") # Xsim <- rmh(fit, start=list(n.start=X$n)) # Diggle-Gratton pairwise interaction model # fit <- ppm(cells ~1, DiggleGratton(0.05, 0.1)) # Xsim <- rmh(fit, start=list(n.start=cells$n)) # plot(Xsim, main="simulation from fitted Diggle-Gratton model") X <- rSSI(0.05, 100) # piecewise-constant pairwise interaction function fit <- ppm(X ~1, PairPiece(seq(0.02, 0.1, by=0.01))) Xsim <- rmh(fit) # marked point pattern Y <- amacrine #' marked Poisson models fit <- ppm(Y) Ysim <- rmh(fit) fit <- ppm(Y~marks) Ysim <- rmh(fit) fit <- ppm(Y~x) Ysim <- rmh(fit) fit <- ppm(Y~marks+x) Ysim <- rmh(fit) #' multitype Strauss typ <- levels(Y$marks) MS <- MultiStrauss(types = typ, radii=matrix(0.07, ncol=2, nrow=2)) fit <- ppm(Y~marks*x, MS) Ysim <- rmh(fit) #' multitype Hardcore h0 <- minnndist(unmark(Y)) * 0.95 MH <- MultiHard(types = typ, hradii=matrix(h0, ncol=2, nrow=2)) fit <- ppm(Y ~ marks+x, MH) Ysim <- rmh(fit) #' other code blocks Ysim <- rmh(fit, control=list(periodic=TRUE, expand=1)) Ysim <- rmh(fit, control=list(periodic=FALSE, expand=1)) #' multihard core with invalid initial state Ydouble <- superimpose(Y, rjitter(Y, h0/10)) Ysim <- rmh(fit, start=list(x.start=Ydouble)) #' Lennard-Jones fut <- ppm(unmark(longleaf) ~ 1, LennardJones(), rbord=1) Ysim <- rmh(fut) Ysim <- rmh(fut, control=list(periodic=TRUE, expand=1)) spatstat.options(op) }) reset.spatstat.options() #' #' tests/rmhsnoopy.R #' #' Test the rmh interactive debugger #' #' $Revision: 1.8 $ $Date: 2018/10/17 08:57:32 $ require(spatstat) local({ ## fit a model and prepare to simulate R <- 0.1 fit <- ppm(cells ~ 1, Strauss(R)) siminfo <- rmh(fit, preponly=TRUE) Wsim <- siminfo$control$internal$w.sim Wclip <- siminfo$control$internal$w.clip if(is.null(Wclip)) Wclip <- Window(cells) ## determine debugger interface panel geometry P <- rmhsnoop(Wsim=Wsim, Wclip=Wclip, R=R, xcoords=runif(40), ycoords=runif(40), mlevels=NULL, mcodes=NULL, irep=3, itype=1, proptype=1, proplocn=c(0.5, 0.5), propmark=0, propindx=0, numerator=42, denominator=24, panel.only=TRUE) boxes <- P$boxes clicknames <- names(P$clicks) boxcentres <- do.call(concatxy, lapply(boxes, centroid.owin)) ## design a sequence of clicks actionsequence <- c("Up", "Down", "Left", "Right", "At Proposal", "Zoom Out", "Zoom In", "Reset", "Accept", "Reject", "Print Info", "Next Iteration", "Next Shift", "Next Death", "Skip 10", "Skip 100", "Skip 1000", "Skip 10,000", "Skip 100,000", "Exit Debugger") actionsequence <- match(actionsequence, clicknames) actionsequence <- actionsequence[!is.na(actionsequence)] xy <- lapply(boxcentres, "[", actionsequence) ## queue the click sequence spatstat.utils::queueSpatstatLocator(xy$x,xy$y) ## go rmh(fit, snoop=TRUE) }) spatstat/tests/testsR1.R0000644000176200001440000001376213603007370014757 0ustar liggesusers#' tests/randoms.R #' Further tests of random generation code #' $Revision: 1.11 $ $Date: 2019/12/11 00:30:12 $ require(spatstat) local({ A <- runifrect(6, nsim=2) A <- runifdisc(6, nsim=2) A <- runifpoispp(5, nsim=2) A <- runifpoispp(0, nsim=2) A <- rSSI(0.05, 6, nsim=2) A <- rSSI(0.05, 10, win=square(c(-0.5, 1.5)), x.init=A[[1]], nsim=2) A <- rstrat(nx=4, nsim=2) A <- rsyst(nx=4, nsim=2) A <- rthin(cells, P=0.5, nsim=2) A <- rthin(cells, runif(42)) A <- rthin(cells[FALSE], P=0.5, nsim=2) A <- rjitter(cells, nsim=2, retry=FALSE) A <- rjitter(cells[FALSE]) A <- rcell(square(1), nx=5, nsim=2) f <- function(x,y) { 10*x } Z <- as.im(f, square(1)) A <- rpoint(n=6, f=f, fmax=10, nsim=2) A <- rpoint(n=6, f=Z, fmax=10, nsim=2) A <- rpoint(n=0, f=f, fmax=10, nsim=2) A <- rpoint(n=0, f=Z, fmax=10, nsim=2) op <- spatstat.options(fastpois=FALSE) A <- runifpoispp(5, nsim=2) A <- rpoispp(Z) spatstat.options(op) b3 <- box3(c(0,1)) b4 <- boxx(c(0,1), c(0,1), c(0,1), c(0,1)) b5 <- c(0, 2, 0, 2) X <- rMaternInhibition(2, kappa=20, r=0.1, win=b3) Y <- rMaternInhibition(2, kappa=20, r=0.1, win=b4) Y <- rMaternInhibition(2, kappa=20, r=0.1, win=b5, nsim=2) X <- rSSI(0.05, 6) Y <- rSSI(0.05, 6, x.init=X) # no extra points Z <- rlabel(finpines) }) local({ f1 <- function(x,y){(x^2 + y^3)/10} f2 <- function(x,y){(x^3 + y^2)/10} ZZ <- solist(A=as.im(f1, letterR), B=as.im(f2, letterR)) XX <- rmpoispp(ZZ, nsim=3) YY <- rmpoint(10, f=ZZ, nsim=3) g <- function(x,y,m){(10+as.integer(m)) * (x^2 + y^3)} VV <- rpoint.multi(10, f=g, marks=factor(sample(letters[1:3], 10, replace=TRUE)), nsim=3) L <- edges(letterR) E <- runifpoisppOnLines(5, L) G <- rpoisppOnLines(ZZ, L) G2 <- rpoisppOnLines(list(A=f1, B=f2), L, lmax=max(sapply(ZZ, max))) }) local({ #' cluster models + bells + whistles X <- rThomas(10, 0.2, 5, saveLambda=TRUE) if(is.null(attr(X, "Lambda"))) stop("rThomas did not save Lambda image") Y <- rThomas(0, 0.2, 5, saveLambda=TRUE) if(is.null(attr(Y, "Lambda"))) stop("rThomas did not save Lambda image when kappa=0") X <- rMatClust(10, 0.05, 4, saveLambda=TRUE) X <- rCauchy(30, 0.01, 5, saveLambda=TRUE) X <- rVarGamma(30, 2, 0.02, 5, saveLambda=TRUE) Z <- as.im(function(x,y){ 5 * exp(2 * x - 1) }, owin()) Y <- rThomas(10, 0.2, Z, saveLambda=TRUE) Y <- rMatClust(10, 0.05, Z, saveLambda=TRUE) Y <- rCauchy(30, 0.01, Z, saveLambda=TRUE) Y <- rVarGamma(30, 2, 0.02, Z, saveLambda=TRUE) }) #' perfect simulation code expandwinPerfect(letterR, 2, 3) reset.spatstat.options() #' tests/resid.R #' #' Stuff related to residuals and residual diagnostics #' #' $Revision: 1.5 $ $Date: 2019/12/21 04:59:50 $ #' require(spatstat) local({ fit <- ppm(cells ~x, Strauss(r=0.15)) diagnose.ppm(fit, cumulative=FALSE) diagnose.ppm(fit, cumulative=FALSE, type="pearson") rr <- residuals(fit, quad=quadscheme(cells, nd=128)) fitoff <- ppm(cells ~ sin(x) + offset(y)) plot(a <- parres(fitoff, "x")) plot(b <- parres(fitoff, "y")) print(a) print(b) d <- diagnose.ppm(fit, which="marks") plot(d, plot.neg="discrete") plot(d, plot.neg="imagecontour") d <- diagnose.ppm(fit, type="pearson", which="smooth") plot(d, plot.smooth="image") plot(d, plot.smooth="contour") plot(d, plot.smooth="imagecontour") d <- diagnose.ppm(fit, type="pearson", which="x") plot(d) d <- diagnose.ppm(fit, type="pearson", which="y") plot(d) diagnose.ppm(fit, type="pearson", which="x", cumulative=FALSE) diagnose.ppm(fit, type="pearson", which="x", cumulative=FALSE) diagnose.ppm(fit, type="raw", plot.neg="discrete", plot.smooth="image") diagnose.ppm(fit, type="pearson", plot.neg="contour", plot.smooth="contour") diagnose.ppm(fitoff, type="raw", which="smooth", plot.smooth="persp") diagnose.ppm(fitoff, type="pearson", plot.neg="imagecontour") plot(Frame(letterR), main="") ploterodewin(letterR, erosion(letterR, 0.05), main="jeans") W <- as.mask(letterR) plot(Frame(W), main="") ploterodewin(W, erosion(W, 0.05), main="JeAnS") #' entangled terms in model U <- as.im(1, owin()) Z <- as.im(function(x,y) x, owin()) X <- runifpoint(40) fut <- ppm(X ~ Z:U) a <- parres(fut, "Z") futoff <- ppm(X ~ offset(Z*U)) a <- parres(futoff, "Z") }) ## ## tests/rhohat.R ## ## Test all combinations of options for rhohatCalc ## ## $Revision: 1.3 $ $Date: 2018/05/13 04:42:21 $ local({ require(spatstat) X <- rpoispp(function(x,y){exp(3+3*x)}) ## rhohat.ppp ## done in example(rhohat): ## rhoA <- rhohat(X, "x") ## rhoB <- rhohat(X, "x", method="reweight") ## rhoC <- rhohat(X, "x", method="transform") ## alternative smoother (if package locfit available) rhoA <- rhohat(X, "x", smoother="local") rhoB <- rhohat(X, "x", smoother="local", method="reweight") rhoC <- rhohat(X, "x", smoother="local", method="transform") ## rhohat.ppm fit <- ppm(X, ~x) rhofitA <- rhohat(fit, "x") rhofitB <- rhohat(fit, "x", method="reweight") rhofitC <- rhohat(fit, "x", method="transform") ## Baseline lam <- predict(fit) rhoAb <- rhohat(X, "x", baseline=lam) rhoBb <- rhohat(X, "x", method="reweight", baseline=lam) rhoCb <- rhohat(X, "x", method="transform", baseline=lam) ## Horvitz-Thompson rhoAH <- rhohat(X, "x", horvitz=TRUE) rhoBH <- rhohat(X, "x", method="reweight", horvitz=TRUE) rhoCH <- rhohat(X, "x", method="transform", horvitz=TRUE) rhofitAH <- rhohat(fit, "x", horvitz=TRUE) rhofitBH <- rhohat(fit, "x", method="reweight", horvitz=TRUE) rhofitCH <- rhohat(fit, "x", method="transform", horvitz=TRUE) ## class support plot(rhoA) plot(rhoA, rho ~ x, shade=NULL) plot(rhoA, log(rho) ~ x, shade=NULL) plot(rhoA, log(.) ~ x) ## rho2hat r2xy <- rho2hat(X, "x", "y") r2xyw <- rho2hat(X, "x", "y", method="reweight") plot(r2xy, do.points=TRUE) xcoord <- function(x,y) x ycoord <- function(x,y) y xim <- as.im(xcoord, W=Window(X)) r2fi <- rho2hat(X, ycoord, xim) r2if <- rho2hat(X, xim, ycoord) }) spatstat/tests/testsP2.R0000644000176200001440000005107313616730337014765 0ustar liggesusers# # tests/ppmBadData.R # # $Revision: 1.5 $ $Date: 2015/12/29 08:54:49 $ # Testing robustness of ppm and support functions # when data are rubbish require(spatstat) local({ # --------------------------------------------------- # from Rolf: very large proportion of data is NA SEED <- 42 K <- 101 A <- 500 X <- seq(0, A, length=K) G <- expand.grid(x=X, y=X) FOO <- function(x,y) { sin(x)^2 + cos(y)^2 } M1 <- im(matrix(FOO(G$x, G$y), K, K), xcol=X, yrow=X) M <- im(matrix(FOO(G$x, G$y), K, K)) BAR <- function(x) { exp(-6.618913 + 5.855337 * x - 8.432483 * x^2) } V <- im(BAR(M$v), xcol=X, yrow=X) # V <- eval.im(exp(-6.618913 + 5.855337 * M - 8.432483 * M^2)) set.seed(SEED) Y <- rpoispp(V) fY <- ppm(Y ~cv + I(cv^2), data=list(cv=M), correction="translate") diagnose.ppm(fY) lurking(fY, covariate=as.im(function(x,y){x}, square(A)), type="raw") }) # -------------------------------------------------------- # from Andrew Bevan: numerical overflow, ill-conditioned Fisher information local({ SEED <- 42 nongranite<- owin(poly = list(x = c(0, 8500, 7000, 6400, 6400, 6700, 7000, 7200, 7300, 8000, 8100, 8800, 9500, 10000, 10000, 0), y = c(0, 0, 2000, 3800, 4000, 5000, 6500, 7400, 7500, 8000, 8100, 9000, 9500, 9600, 10000, 10000))) #Trend on raster grid rain <- as.im(X=function(x,y) { x^2 + y^2 }, W=nongranite, dimyx=100) #Generate a point pattern via a Lennard-Jones process set.seed(SEED) mod4<- rmhmodel(cif="lennard", par=list(beta=1, sigma=250, epsilon=2.2), trend=rain, w=nongranite) ljtr<- rmh(mod4, start=list(n.start=80), control=list(p=1, nrep=1e5)) #Fit a point process model to the pattern with rain as a covariate # NOTE INCORRECT TREND FORMULA ljtrmod <- ppm(ljtr, trend= ~ Z, interaction=NULL, data=list(Z=rain)) ss <- summary(ljtrmod) }) local({ # From Ege # Degenerate but non-null argument 'covariates' xx <- list() names(xx) <- character(0) fit <- ppm(cells ~x, covariates = xx) st <- summary(fit) }) #' tests/ppmclass.R #' #' Class support for ppm #' #' $Revision: 1.5 $ $Date: 2020/01/18 01:57:17 $ require(spatstat) local({ #' (1) print.ppm, summary.ppm, print.summary.ppm Z <- as.im(function(x,y){x}, Window(cells)) fitZ <- ppm(cells ~ Z) print(fitZ) print(summary(fitZ)) #' logistic fitl <- ppm(swedishpines ~ x+y, method="logi") print(fitl) print(summary(fitl)) #' Model with covariate arguments f <- function(x,y,b) { x+b } fitf <- ppm(cells ~ f, covfunargs=list(b=1)) print(fitf) print(summary(fitf)) #' Invalid model fitN <- ppm(redwood ~ 1, Strauss(0.1)) print(fitN) print(summary(fitN)) #' standard errors in output fat <- ppm(cells ~ x, Strauss(0.12)) op <- spatstat.options(print.ppm.SE='always') print(fat) spatstat.options(print.ppm.SE='never') print(fat) print(fitZ) spatstat.options(op) ## (2) plot.ppm plot(fitZ) plot(fat, trend=FALSE, cif=FALSE, se=FALSE) ## (3) emend.ppm fitZe <- emend(fitZ, trace=TRUE) ZZ <- Z fitZZ <- ppm(cells ~ Z + ZZ) fitZZe <- emend(fitZZ, trace=TRUE) fitOK <- ppm(redwood ~1, Strauss(0.1), emend=TRUE) print(fitOK) fitNot <- ppm(redwood ~1, Strauss(0.1)) fitSlow <- emend(fitNot, trace=TRUE) print(fitSlow) op <- spatstat.options(project.fast=TRUE) fitFast <- emend(fitNot, trace=TRUE) print(fitFast) fitZZe <- emend(fitZZ, trace=TRUE) spatstat.options(op) #' (4) methods for other generics logLik(fitZ, absolute=TRUE) unitname(fitZ) unitname(fat) <- c("metre", "metres") is.expandable(fitf) fit0 <- update(fitZ, . ~ 1) anova(fit0, fitZ, override=TRUE) }) reset.spatstat.options() # # tests/ppmgam.R # # Test ppm with use.gam=TRUE # # $Revision: 1.3 $ $Date: 2015/09/01 02:01:33 $ # require(spatstat) local({ fit <- ppm(nztrees ~s(x,y), use.gam=TRUE) mm <- model.matrix(fit) mf <- model.frame(fit) v <- vcov(fit) prd <- predict(fit) }) #' #' tests/ppmlogi.R #' #' Tests of ppm(method='logi') #' and related code (predict, leverage etc) #' #' $Revision: 1.14 $ $Date: 2020/01/11 09:55:34 $ #' require(spatstat) local({ fit <- ppm(cells ~x, method="logi") f <- fitted(fit) p <- predict(fit) u <- summary(fit) fitS <- ppm(cells ~x, Strauss(0.12), method="logi") fS <- fitted(fitS) pS <- predict(fitS) uS <- summary(fitS) print(uS) plot(leverage(fit)) plot(influence(fit)) plot(dfbetas(fit)) plot(leverage(fitS)) plot(influence(fitS)) plot(dfbetas(fitS)) }) local({ #' same with hard core - A1 is singular fitH <- ppm(cells ~x, Strauss(0.08), method="logi") print(fitH) fH <- fitted(fitH) pH <- predict(fitH) uH <- summary(fitH) print(uH) plot(leverage(fitH)) plot(influence(fitH)) plot(dfbetas(fitH)) }) local({ #' logistic fit to data frame of covariates z <- c(rep(TRUE, 5), rep(FALSE, 5)) df <- data.frame(A=z + 2* runif(10), B=runif(10)) Y <- quadscheme.logi(runifpoint(5), runifpoint(5)) fut <- ppm(Y ~ A+B, data=df, method="logi") sf <- summary(fut) print(sf) }) local({ #' vblogit code, just to check that it runs. fee <- ppm(cells ~ x, method="VBlogi", nd=21) print(fee) summary(fee) logLik(fee) AIC(fee) extractAIC(fee) Z <- predict(fee) summary(Z) print(fee$internal$glmfit) # print.vblogit }) # # tests/ppmmarkorder.R # # $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $ # # Test that predict.ppm, plot.ppm and plot.fitin # tolerate marks with levels that are not in alpha order # require(spatstat) local({ X <- amacrine levels(marks(X)) <- c("ZZZ", "AAA") fit <- ppm(X ~marks, MultiStrauss(c("ZZZ","AAA"), matrix(0.06, 2, 2))) aa <- predict(fit, type="trend") bb <- predict(fit, type="cif") plot(fit) plot(fitin(fit)) }) # # tests/ppmscope.R # # Test things that might corrupt the internal format of ppm objects # # $Revision: 1.5 $ $Date: 2015/12/29 08:54:49 $ # require(spatstat) local({ ## (1) Scoping problem that can arise when ppm splits the data fit <- ppm(bei ~elev, data=bei.extra) mm <- model.matrix(fit) ## (2) Fast update mechanism fit1 <- ppm(cells ~x+y, Strauss(0.07)) fit2 <- update(fit1, ~y) fit3 <- update(fit2, ~x) ## (3) New formula-based syntax attach(bei.extra) slfit <- ppm(bei ~ grad) sl2fit <- update(slfit, ~grad + I(grad^2)) slfitup <- update(slfit, use.internal=TRUE) sl2fitup <- update(sl2fit, use.internal=TRUE) ## (4) anova.ppm fut1 <- ppm(cells ~ 1, Strauss(0.1)) futx <- ppm(cells ~ x, Strauss(0.1)) anova(fut1, test="Chi") anova(futx, test="Chi") fut1a <- ppm(cells ~ 1, Strauss(0.1), rbord=0) anova(fut1a, futx, test="Chi") fut1d <- ppm(cells ~ 1, Strauss(0.1), nd=23) anova(fut1d, futx, test="Chi") ## The following doesn't work yet ## futxyg <- ppm(cells ~ x + s(y), Strauss(0.1), use.gam=TRUE) ## anova(futx, futxyg) fatP <- ppm(amacrine ~ marks) fatM <- ppm(amacrine ~ marks, MultiStrauss(matrix(0.07, 2, 2))) anova(fatP, fatM, test="Chi") }) grep# # tests/ppmtricks.R # # Test backdoor exits, hidden options, internals and tricks in ppm # # $Revision: 1.18 $ $Date: 2020/01/10 04:10:34 $ # require(spatstat) local({ ## (1) skip.border fit <- ppm(cells, ~1, Strauss(0.1), skip.border=TRUE) ## (2) subset arguments of different kinds fut <- ppm(cells ~ x, subset=(x > 0.5)) fot <- ppm(cells ~ x, subset=(x > 0.5), method="logi") W <- owin(c(0.4, 0.8), c(0.2, 0.7)) fut <- ppm(cells ~ x, subset=W) fot <- ppm(cells ~ x, subset=W, method="logi") V <- as.im(inside.owin, Window(cells), w=W) fet <- ppm(cells ~ x, subset=V) fet <- ppm(cells ~ x, subset=V, method="logi") ## (3) profilepl -> ppm ## uses 'skip.border' and 'precomputed' ## also tests scoping for covariates splants <- split(ants) mess <- splants[["Messor"]] cats <- splants[["Cataglyphis"]] ss <- data.frame(r=seq(60,120,by=20),hc=29/6) dM <- distmap(mess,dimyx=256) mungf <- profilepl(ss, StraussHard, cats ~ dM) mungp <- profilepl(ss, StraussHard, trend=~dM, Q=cats) ## (4) splitting large quadschemes into blocks mop <- spatstat.options(maxmatrix=5000) qr <- quadBlockSizes(quadscheme(cells)) pr <- predict(ppm(cells ~ x, AreaInter(0.05))) spatstat.options(mop) qr <- quadBlockSizes(quadscheme(cells)) ## (5) shortcuts in summary.ppm ## and corresponding behaviour of print.summary.ppm print(summary(fit, quick=TRUE)) print(summary(fit, quick="entries")) print(summary(fit, quick="no prediction")) print(summary(fit, quick="no variances")) ## (6) suffstat.R fitP <- update(fit, Poisson()) suffstat.poisson(fitP, cells) fit0 <- killinteraction(fit) suffstat.poisson(fit0, cells) ## (7) various support for class ppm fut <- kppm(redwood ~ x) A <- quad.ppm(fut) Z <- as.im(function(x,y){x}, Window(cells)) fitZ <- ppm(cells ~ Z) U <- getppmOriginalCovariates(fitZ) ## (8) support for class profilepl rr <- data.frame(r=seq(0.05, 0.15, by=0.02)) ps <- profilepl(rr, Strauss, cells) ## plot(ps) ## covered in plot.profilepl.Rd simulate(ps, nrep=1e4) parameters(ps) fitin(ps) predict(ps, type="cif") ## (9) class 'plotppm' fut <- ppm(amacrine ~ marks + polynom(x,y,2), Strauss(0.07)) p <- plot(fut, plot.it=FALSE) print(p) plot(p, how="contour") plot(p, how="persp") ## (10) ppm -> mpl.engine -> mpl.prepare fit <- ppm(cells, NULL) fit <- ppm(cells ~ x, clipwin=square(0.7)) fit <- ppm(cells ~ x, subset=square(0.7)) DG <- as.im(function(x,y){x+y < 1}, square(1)) fit <- ppm(cells ~ x, subset=DG) fit <- ppm(cells ~ x, GLM=glm) fit <- ppm(cells ~ x, famille=quasi(link='log', variance='mu')) fit <- ppm(cells ~ x, Hardcore(0.07), skip.border=TRUE, splitInf=TRUE) ## (11) unidentifiable model (triggers an error in ppm) Q <- quadscheme(cells) M <- mpl.prepare(Q, cells, as.ppp(Q), trend=~1, covariates=NULL, interaction=Hardcore(0.3), correction="none") }) reset.spatstat.options() #' #' tests/ppp.R #' #' $Revision: 1.9 $ $Date: 2019/12/03 03:05:34 $ #' #' Untested cases in ppp() or associated code require(spatstat) local({ X <- runifpoint(10, letterR) Y <- runifpoint(10, complement.owin(letterR)) #' test handling of points out-of-bounds df <- rbind(as.data.frame(X), as.data.frame(Y)) A <- ppp(df$x, df$y, window=letterR, marks=1:20) #' test handling of points with bad coordinates df$x[1:3] <- c(Inf, NA, NaN) df$y[18:20] <- c(Inf, NA, NaN) B <- ppp(df$x, df$y, window=letterR, marks=1:20) D <- ppp(df$x, df$y, window=letterR, marks=data.frame(id=1:20, u=runif(20))) #' test print/summary/plot methods on these bad objects print(A) print(B) print(D) print(summary(A)) print(summary(B)) print(summary(D)) plot(A) plot(B) plot(D) plot(attr(A, "rejects")) plot(attr(B, "rejects")) plot(attr(D, "rejects")) #' subset operator --- cases not covered elsewhere #' subset index is a logical image Z <- distmap(letterR, invert=TRUE) V <- (Z > 0.2) XV <- X[V] #' multiple columns of marks fun3 <- finpines[1:3] #' multiple columns of marks, one of which is a factor U <- finpines marks(U)[,2] <- factor(c(rep("A", 60), rep("B", npoints(U)-60))) UU <- U[1:3, drop=TRUE] #' cut.ppp CU <- cut(U, "height") CU <- cut(U, breaks=3) #' cases of [<-.ppp set.seed(999) X <- cells B <- square(0.2) X[B] <- runifpoint(3, B) #' checking 'value' Y <- flipxy(X) X[B] <- Y[square(0.3)] # deprecated use of second argument X[,1:4] <- runifpoint(3) # deprecated X[,B] <- runifpoint(3, B) # deprecated X[1:3, B] <- runifpoint(3, B) # deprecated but does not crash #' test as.ppp for spatial package if it is not installed FR <- Frame(letterR) as.ppp(list(x=X$x, y=X$y, xl=FR$xrange[1], xu=FR$xrange[2], yl=FR$yrange[1], yu=FR$yrange[2])) #' various utilities periodify(cells, 2) periodify(demopat, 2) #' a <- multiplicity(finpines) a <- multiplicity(longleaf) ## superimpose.ppp, extra cases X <- runifpoint(20) A <- superimpose(cells, X, W="convex") A <- superimpose(cells, X, W=ripras) B <- superimpose(concatxy(cells), concatxy(X), W=NULL) ## superimpose.splitppp Y <- superimpose(split(amacrine)) ## catch outdated usage of scanpp d <- system.file("rawdata", "amacrine", package="spatstat.data") if(nzchar(d)) { W <- owin(c(0, 1060/662), c(0, 1)) Y <- scanpp("amacrine.txt", dir=d, window=W, multitype=TRUE) print(Y) } ## (bad) usage of cobble.xy xx <- runif(10) yy <- runif(10) W1 <- cobble.xy(xx, yy) W2 <- cobble.xy(xx, yy, boundingbox) Wnope <- cobble.xy(xx, yy, function(x,y) {cbind(x,y)}, fatal=FALSE) }) # # tests/ppx.R # # Test operations for ppx objects # # $Revision: 1.5 $ $Date: 2019/01/02 07:58:20 $ # require(spatstat) local({ #' general tests df <- data.frame(x=c(1,2,2,1)/4, y=c(1,2,3,1)/4, z=c(2,3,4,3)/5) X <- ppx(data=df, coord.type=rep("s", 3), domain=box3()) unique(X) duplicated(X) anyDuplicated(X) multiplicity(X) uniquemap(X) print(X) summary(X) plot(X) domain(X) unitname(X) <- c("metre", "metres") unitname(X) #' subset operator X[integer(0)] Y <- X %mark% data.frame(a=df$x, b=1:4) Y[1:2] Y[FALSE] marks(Y) <- as.data.frame(marks(Y)) Y[integer(0)] Y[1:2] Y[FALSE] #' two dimensional A <- ppx(data=df[,1:2], coord.type=rep("s", 2), domain=square(1)) plot(A) B <- ppx(data=df[,1:2], coord.type=rep("s", 2), domain=NULL) plot(B) #' one dimensional E <- ppx(data=data.frame(x=runif(10))) plot(E) #' bug stopifnot(identical(unmark(chicago[1]), unmark(chicago)[1])) #' ppx with zero points U <- chicago[integer(0)] V <- U %mark% 1 V <- U %mark% factor("a") #' simplify lower-dimensional patterns X3 <- ppx(data=df, coord.type=rep("s", 3), domain=box3(), simplify=TRUE) stopifnot(is.pp3(X3)) X2 <- ppx(data=df[,1:2], coord.type=rep("s", 2), domain=square(1), simplify=TRUE) stopifnot(is.ppp(X2)) #' marks<-.ppx M <- as.matrix(X) marks(X) <- df[,1] marks(X) <- df[,integer(0)] #' trivial cases of random generators B4 <- boxx(0:1, 0:1, 0:1, 0:1) Z0 <- runifpointx(0, domain=B4, nsim=2) Z1 <- runifpointx(1, domain=B4, nsim=2) }) # # tests/prediction.R # # Things that might go wrong with predict() # # $Revision: 1.19 $ $Date: 2020/02/06 05:38:29 $ # require(spatstat) local({ # test of 'covfunargs' f <- function(x,y,a){ y - a } fit <- ppm(cells ~x + f, covariates=list(f=f), covfunargs=list(a=1/2)) p <- predict(fit) # prediction involving 0 * NA qc <- quadscheme(cells, nd=10) r <- minnndist(as.ppp(qc))/10 fit <- ppm(qc ~ 1, Strauss(r)) # model has NA for interaction coefficient p1 <- predict(fit) p2 <- predict(fit, type="cif", ngrid=10) stopifnot(all(is.finite(as.matrix(p1)))) stopifnot(all(is.finite(as.matrix(p2)))) # test of 'new.coef' mechanism fut <- ppm(cells ~ x, Strauss(0.15), rbord=0) p0 <- predict(fut, type="cif") pe <- predict(fut, type="cif", new.coef=coef(fut)) pn <- predict(fut, type="cif", new.coef=unname(coef(fut))) if(max(abs(pe-p0)) > 0.01) stop("new.coef mechanism is broken!") if(max(abs(pn-p0)) > 0.01) stop("new.coef mechanism gives wrong answer, for unnamed vectors") #' adaptcoef a <- c(A=1,B=2,Z=42) b <- c(B=41,A=0) ab <- adaptcoef(a, b, drop=TRUE) # tests of relrisk.ppm fut <- ppm(amacrine ~ x * marks) a <- relrisk(fut, control=2, relative=TRUE) a <- relrisk(fut, se=TRUE) a <- relrisk(fut, relative=TRUE, se=TRUE) fut <- ppm(sporophores ~ marks + x) a <- relrisk(fut, control=2, relative=TRUE) a <- relrisk(fut, se=TRUE) a <- relrisk(fut, relative=TRUE, se=TRUE) ## untested cases of predict.ppm fit0 <- ppm(cells) a <- predict(fit0, interval="confidence") a <- predict(fit0, interval="confidence", type="count") fit <- ppm(cells ~ x) b <- predict(fit, se=TRUE, locations=cells) b <- predict(fit, se=TRUE, interval="confidence") b <- predict(fit, type="count", se=TRUE) b <- predict(fit, type="count", window=square(0.5), se=TRUE) b <- predict(fit, type="count", window=quadrats(cells, 3), se=TRUE) d <- predict(fit, type="count", interval="prediction", se=TRUE) d <- predict(fit, type="count", interval="confidence", se=TRUE) d <- predict(fit, interval="confidence", se=TRUE) foot <- ppm(cells ~ x, StraussHard(0.12)) d <- predict(foot, ignore.hardcore=TRUE) dX <- predict(foot, ignore.hardcore=TRUE, locations=cells) ## superseded usages b <- predict(fit, type="se", getoutofjail=TRUE) b <- predict(fit, type="se", locations=cells) # warning b <- predict(fit, total=TRUE) b <- predict(fit, total=square(0.5)) b <- predict(fit, total=quadrats(cells, 3)) ## supporting code u <- model.se.image(fit, square(0.5)) u <- model.se.image(fit, square(0.5), what="cv") u <- model.se.image(fit, square(0.5), what="ce") co <- c(Intercept=5, slope=3, kink=2) re <- c("Intercept", "slope") a <- fill.coefs(co, re) # warning b <- fill.coefs(co, rev(names(co))) d <- fill.coefs(co, letters[1:3]) ## model matrix etc v <- model.frame(ppm(cells)) fut <- ppm(cells ~ x, Strauss(0.1)) v <- model.matrix(fut, subset=(x<0.5), keepNA=FALSE) df <- data.frame(x=runif(10), y=runif(10), Interaction=sample(0:1, 10, TRUE)) m10 <- PPMmodelmatrix(fut, data=df) mmm <- PPMmodelmatrix(fut, Q=quad.ppm(fut)) #' effectfun for Gibbs effectfun(fut, "x") effectfun(fut, "x", se.fit=TRUE) #' implicit covariate when there is only one effectfun(fut) effectfun(fut, se.fit=TRUE) #' given covariate dlin <- distfun(copper$SouthLines) copfit <- ppm(copper$SouthPoints ~ dlin, Geyer(1,1)) effectfun(copfit, "dlin") effectfun(copfit) #' covariate that is not used in model effectfun(fut, "y", x=0) futS <- ppm(cells ~ 1, Strauss(0.1)) effectfun(futS, "x") effectfun(futS, "y") #' factor covariate fot <- ppm(amacrine~x+marks) effectfun(fot, "marks", x=0.5, se.fit=TRUE) #' covariate retained but not used W <- Window(swedishpines) a <- solist(A=funxy(function(x,y){x < 20}, W), B=funxy(function(x,y){factor(x < 20)}, W)) fvt <- ppm(swedishpines ~ A, data=a, allcovar=TRUE) effectfun(fvt, "A", se.fit=TRUE) effectfun(fvt, "B", A=TRUE, se.fit=TRUE) ## ppm with covariate values in data frame X <- rpoispp(42) Q <- quadscheme(X) weirdfunction <- function(x,y){ 10 * x^2 + 5 * sin(10 * y) } Zvalues <- weirdfunction(x.quad(Q), y.quad(Q)) fot <- ppm(Q ~ y + Z, data=data.frame(Z=Zvalues)) effectfun(fot, "y", Z=0) effectfun(fot, "Z", y=0) #' multitype modX <- ppm(amacrine ~ polynom(x,2)) effectfun(modX) effectfun(modX, "x") modXM <- ppm(amacrine ~ marks*polynom(x,2)) effectfun(modXM, "x", marks="on") modXYM <- ppm(amacrine ~ marks*polynom(x,y,2)) effectfun(modXYM, "x", y=0, marks="on") df <- as.data.frame(simulate(modXM, drop=TRUE)) df$marks <- as.character(df$marks) dfpr <- predict(modXM, locations=df) }) # # tests/project.ppm.R # # $Revision: 1.6 $ $Date: 2015/08/27 08:19:03 $ # # Tests of projection mechanism # require(spatstat) local({ chk <- function(m) { if(!valid.ppm(m)) stop("Projected model was still not valid") return(invisible(NULL)) } # a very unidentifiable model fit <- ppm(cells ~Z, Strauss(1e-06), covariates=list(Z=0)) chk(emend(fit)) # multitype r <- matrix(1e-06, 2, 2) fit2 <- ppm(amacrine ~1, MultiStrauss(types=c("off", "on"), radii=r)) chk(emend(fit2)) # complicated multitype fit3 <- ppm(amacrine ~1, MultiStraussHard(types=c("off", "on"), iradii=r, hradii=r/5)) chk(emend(fit3)) #' code coverage op <- spatstat.options(project.fast=TRUE) fut <- emend(fit, trace=TRUE) chk(fut) spatstat.options(op) # hierarchical ra <- r r[2,1] <- NA fit4 <- ppm(amacrine ~1, HierStrauss(types=c("off", "on"), radii=r)) chk(emend(fit4)) # complicated hierarchical fit5 <- ppm(amacrine ~1, HierStraussHard(types=c("off", "on"), iradii=r, hradii=r/5)) chk(emend(fit5)) # hybrids r0 <- min(nndist(redwood)) ra <- 1.25 * r0 rb <- 0.8 * r0 f1 <- ppm(redwood ~1, Hybrid(A=Strauss(ra), B=Geyer(0.1, 2)), project=TRUE) chk(f1) f2 <- ppm(redwood ~1, Hybrid(A=Strauss(rb), B=Geyer(0.1, 2)), project=TRUE) chk(f2) f3 <- ppm(redwood ~1, Hybrid(A=Strauss(ra), B=Strauss(0.1)), project=TRUE) chk(f3) f4 <- ppm(redwood ~1, Hybrid(A=Strauss(rb), B=Strauss(0.1)), project=TRUE) chk(f4) f5 <- ppm(redwood ~1, Hybrid(A=Hardcore(rb), B=Strauss(0.1)), project=TRUE) chk(f5) f6 <- ppm(redwood ~1, Hybrid(A=Hardcore(rb), B=Geyer(0.1, 2)), project=TRUE) chk(f6) f7 <- ppm(redwood ~1, Hybrid(A=Geyer(rb, 1), B=Strauss(0.1)), project=TRUE) chk(f7) }) reset.spatstat.options() spatstat/tests/testsL.R0000644000176200001440000007014513616744211014674 0ustar liggesusers#' #' tests/layered.R #' #' Tests of 'layered' class #' #' $Revision: 1.1 $ $Date: 2018/07/14 06:23:45 $ #' require(spatstat) local({ D <- distmap(cells) L <- layered(D, cells, plotargs=list(list(ribbon=FALSE), list(pch=16))) #' plot(L, which=2, plotargs=list(list(pch=3))) plot(L, plotargs=list(list(pch=3))) #' W <- as.owin(L) V <- domain(L) #' methods L2 <- L[square(0.5)] Lr <- reflect(L) Lf <- flipxy(L) Ls <- scalardilate(L, 2) La <- shift(L, origin="midpoint") Lo <- rotate(L, pi/3, origin="bottomleft") Lu <- rescale(L, 0.1, "parsec") #' as.layered M <- as.layered(finpines) M2 <- as.layered(split(amacrine)) }) ## ## tests/legacy.R ## ## Test that current version of spatstat is compatible with outmoded usage ## $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $ local({ require(spatstat) ## (1) Old syntax of ppm ppm(cells, ~x) ## (2) Old syntax of MultiStrauss etc. r <- matrix(3, 2, 2) a <- MultiStrauss( , r) a <- MultiStrauss(NULL, r) a <- MultiHard(, r) h <- r/2 a <- MultiStraussHard( , r, h) NULL }) #' #' tests/leverinf.R #' #' leverage and influence for Gibbs models #' #' $Revision: 1.28 $ $Date: 2020/02/06 08:03:59 $ #' require(spatstat) local({ cat("Running non-sparse algorithm...", fill=TRUE) # original non-sparse algorithm Leverage <- function(...) leverage(..., sparseOK=FALSE) Influence <- function(...) influence(..., sparseOK=FALSE) Dfbetas <- function(...) dfbetas(..., sparseOK=FALSE) # Strauss()$delta2 fitS <- ppm(cells ~ x, Strauss(0.12), rbord=0) levS <- Leverage(fitS) infS <- Influence(fitS) dfbS <- Dfbetas(fitS) # Geyer()$delta2 fitG <- ppm(redwood ~ 1, Geyer(0.1, 2), rbord=0) levG <- Leverage(fitG) infG <- Influence(fitG) # AreaInter()$delta2 fitA <- ppm(cells ~ 1, AreaInter(0.07), rbord=0, nd=11) levA <- Leverage(fitA) infA <- Influence(fitA) # pairwise.family$delta2 fitD <- ppm(cells ~ 1, DiggleGatesStibbard(0.12), rbord=0) levD <- Leverage(fitD) infD <- Influence(fitD) # DiggleGratton() special code fitDG <- ppm(cells ~ 1, DiggleGratton(0.05, 0.12), rbord=0) levDG <- Leverage(fitDG) infDG <- Influence(fitDG) # ppmInfluence; offset is present; coefficient vector has length 0 fitH <- ppm(cells ~ 1, Hardcore(0.07)) levH <- Leverage(fitH) infH <- Influence(fitH) # ppmInfluence; hard core fitSH <- ppm(cells ~ 1, StraussHard(0.07, 0.01)) levSH <- Leverage(fitSH) infSH <- Influence(fitSH) # ppmInfluence; offset is present; coefficient vector has length 1 fitHx <- ppm(cells ~ x, Hardcore(0.07), rbord=0) levHx <- Leverage(fitHx) infHx <- Influence(fitHx) ## multitype futAm <- ppm(amacrine ~ x + marks, Strauss(0.07)) levAm <- leverage(futAm) ## ......... class support ............................. ## other methods for classes leverage.ppm and influence.ppm ## not elsewhere tested cat("Testing class support...", fill=TRUE) w <- domain(levS) w <- Window(infS) vv <- shift(levS, c(1.2, 1.3)) vv <- shift(infS, c(1.2, 1.3)) A <- quadrats(Window(cells), 2) a <- integral(levS,domain=A) b <- integral(infS,domain=A) u <- Smooth(levS, sigma=0.07) v <- Smooth(infS, sigma=0.1) ## plot options plot(levS, what="exact") plot(levS, what="nearest") contour(levS, what="nearest") persp(levS, what="nearest") ## plotting for multitype models plot(levAm) contour(levAm) persp(levAm) plot(levAm, multiplot=FALSE) contour(levAm, multiplot=FALSE) ## .......... compare algorithms ......................... ## divide and recombine algorithm cat("Reduce maximum block side to 50,000 ...", fill=TRUE) op <- spatstat.options(maxmatrix=50000) ## non-sparse levSB <- Leverage(fitS) infSB <- Influence(fitS) dfbSB <- Dfbetas(fitS) chk <- function(x, y, what, from="single-block and multi-block", thresh=1e-12) { if(max(abs(x-y)) > thresh) stop(paste("Different results for", what, "obtained from", from, "algorithms"), call.=FALSE) invisible(NULL) } cat("Compare single-block to multi-block...", fill=TRUE) chk(marks(as.ppp(infS)), marks(as.ppp(infSB)), "influence") chk(as.im(levS), as.im(levSB), "leverage") chk(dfbS$val, dfbSB$val, "dfbetas$value") chk(dfbS$density, dfbSB$density, "dfbetas$density") # also check case of zero cif cat("Check zero cif cases...", fill=TRUE) levHB <- Leverage(fitH) infHB <- Influence(fitH) dfbHB <- Dfbetas(fitH) levHxB <- Leverage(fitHx) infHxB <- Influence(fitHx) dfbHxB <- Dfbetas(fitHx) ## run all code segments Everything <- function(model, ...) { ppmInfluence(model, ..., what="all") } cat("Run full code on AreaInteraction model...", fill=TRUE) pmiA <- Everything(fitA) ## sparse algorithm, with blocks cat("Run sparse algorithm with blocks...", fill=TRUE) pmiSSB <- Everything(fitS, sparseOK=TRUE) # also check case of zero cif pmiHSB <- Everything(fitH, sparseOK=TRUE) pmiSHSB <- Everything(fitSH, sparseOK=TRUE) pmiHxSB <- Everything(fitHx, sparseOK=TRUE) cat("Reinstate maxmatrix...", fill=TRUE) spatstat.options(op) ## sparse algorithm, no blocks cat("Compare sparse and non-sparse results...", fill=TRUE) pmi <- Everything(fitS, sparseOK=TRUE) levSp <- pmi$leverage infSp <- pmi$influence dfbSp <- pmi$dfbetas chks <- function(...) chk(..., from="sparse and non-sparse") chks(marks(as.ppp(infS)), marks(as.ppp(infSp)), "influence") chks(as.im(levS), as.im(levSp), "leverage") chks(dfbS$val, dfbSp$val, "dfbetas$value") chks(dfbS$density, dfbSp$density, "dfbetas$density") #' case of zero cif cat("zero cif...", fill=TRUE) pmiH <- Everything(fitH, sparseOK=TRUE) pmiSH <- Everything(fitSH, sparseOK=TRUE) pmiHx <- Everything(fitHx, sparseOK=TRUE) #' other code blocks - check execution only cat("other code blocks...", fill=TRUE) a <- Everything(fitS) a <- Everything(fitS, method="interpreted") a <- Everything(fitS, method="interpreted", entrywise=FALSE) a <- Everything(fitS, entrywise=FALSE) #' zero cif b <- Everything(fitSH) b <- Everything(fitSH, method="interpreted") b <- Everything(fitSH, method="interpreted", entrywise=FALSE) b <- Everything(fitSH, entrywise=FALSE) #' NOTE: code for irregular parameters is tested below, and in 'make bookcheck' ## ........... logistic fits ....................... cat("Logistic fits...", fill=TRUE) #' special algorithm for delta2 fitSlogi <- ppm(cells ~ x, Strauss(0.12), rbord=0, method="logi") pmiSlogi <- Everything(fitSlogi) #' special algorithm for delta2 fitGlogi <- ppm(redwood ~ 1, Geyer(0.1, 2), rbord=0, method="logi") pmiGlogi <- Everything(fitGlogi) #' generic algorithm for delta2 fitDlogi <- ppm(cells ~ 1, DiggleGatesStibbard(0.12), rbord=0, method="logi") pmiDlogi <- Everything(fitDlogi) #' generic algorithm for delta2 : offset; zero-dimensional fitHlogi <- ppm(cells ~ 1, Hardcore(0.07), method="logi") pmiHlogi <- Everything(fitHlogi) #' generic algorithm for delta2 : offset; 1-dimensional fitHxlogi <- ppm(cells ~ x, Hardcore(0.07), rbord=0, method="logi") pmiHxlogi <- Everything(fitHxlogi) #' plotting plot(leverage(fitSlogi)) plot(influence(fitSlogi)) plot(dfbetas(fitSlogi)) #' other code blocks - check execution only cat("Other code blocks...", fill=TRUE) b <- Everything(fitSlogi) # i.e. full set of results b <- Everything(fitSlogi, method="interpreted") b <- Everything(fitSlogi, method="interpreted", entrywise=FALSE) b <- Everything(fitSlogi, entrywise=FALSE) #' irregular parameters cat("Irregular parameters...", fill=TRUE) ytoa <- function(x,y, alpha=1) { y^alpha } lam <- function(x,y,alpha=1) { exp(4 + y^alpha) } set.seed(90210) X <- rpoispp(lam, alpha=2) iScor <- list(alpha=function(x,y,alpha) { alpha * y^(alpha-1) } ) iHess <- list(alpha=function(x,y,alpha) { alpha * (alpha-1) * y^(alpha-2) } ) gogo <- function(tag, ..., iS=iScor, iH=iHess) { cat(tag, fill=TRUE) #' compute all leverage+influence terms ppmInfluence(..., what="all", iScore=iS, iHessian=iH) } gogogo <- function(hdr, fit) { cat(hdr, fill=TRUE) force(fit) #' try all code options d <- gogo("a", fit) d <- gogo("b", fit, method="interpreted") d <- gogo("c", fit, method="interpreted", entrywise=FALSE) d <- gogo("d", fit, entrywise=FALSE) invisible(NULL) } gogogo("Offset model...", ippm(X ~ offset(ytoa), start=list(alpha=1), iterlim=40)) gogogo("Offset model (logistic) ...", ippm(X ~ offset(ytoa), start=list(alpha=1), method="logi", iterlim=40)) gogogo("Offset+x model...", ippm(X ~ x + offset(ytoa), start=list(alpha=1), iterlim=40)) gogogo("Offset+x model (logistic) ...", ippm(X ~ x + offset(ytoa), start=list(alpha=1), method="logi", iterlim=40)) gogogo("Offset model Strauss ...", ippm(X ~ offset(ytoa), Strauss(0.07), start=list(alpha=1), iterlim=40)) gogogo("Offset model Strauss (logistic) ...", ippm(X ~ offset(ytoa), Strauss(0.07), start=list(alpha=1), method="logi", iterlim=40)) gogogo("Offset+x model Strauss ...", ippm(X ~ x + offset(ytoa), Strauss(0.07), start=list(alpha=1), iterlim=40)) gogogo("Offset+x model Strauss (logistic)...", ippm(X ~ x + offset(ytoa), Strauss(0.07), start=list(alpha=1), method="logi", iterlim=40)) #' set.seed(452) foo <- ppm(cells ~ 1, Strauss(0.15), method="ho", nsim=5) aa <- Everything(foo) #' Gradient and Hessian obtained by symbolic differentiation f <- deriv(expression((1+x)^a), "a", function.arg=c("x", "y", "a"), hessian=TRUE) #' check they can be extracted fit <- ippm(cells ~offset(f), start=list(a=0.7)) Everything(fit) }) reset.spatstat.options() ## ## tests/linalgeb.R ## ## checks validity of linear algebra code ## ## $Revision: 1.5 $ $Date: 2020/01/05 02:34:17 $ ## require(spatstat) local({ p <- 3 n <- 4 k <- 2 x <- matrix(1:(n*p), n, p) w <- runif(n) y <- matrix(1:(2*n), n, k) zUS <- zWS <- matrix(0, p, p) zUA <- zWA <- matrix(0, p, k) for(i in 1:n) { zUS <- zUS + outer(x[i,],x[i,]) zWS <- zWS + w[i] * outer(x[i,],x[i,]) zUA <- zUA + outer(x[i,],y[i,]) zWA <- zWA + w[i] * outer(x[i,],y[i,]) } if(!identical(zUS, sumouter(x))) stop("sumouter gives incorrect result in Unweighted Symmetric case") if(!identical(zWS, sumouter(x,w))) stop("sumouter gives incorrect result in Weighted Symmetric case") if(!identical(zUA, sumouter(x, y=y))) stop("sumouter gives incorrect result in Unweighted Asymmetric case") if(!identical(zWA, sumouter(x, w, y))) stop("sumouter gives incorrect result in Weighted Asymmetric case") x <- array(as.numeric(1:(p * n * n)), dim=c(p, n, n)) w <- matrix(1:(n*n), n, n) y <- matrix(numeric(p * p), p, p) for(i in 1:n) for(j in (1:n)[-i]) y <- y + w[i,j] * outer(x[,i,j], x[,j,i]) z <- sumsymouter(x, w) if(!identical(y,z)) stop("sumsymouter gives incorrect result") #' power of complex matrix M <- diag(c(1,-1)) V <- matrixsqrt(M) V <- matrixinvsqrt(M) V <- matrixpower(M, 1/2) #' infrastructure A <- matrix(1:12, 3, 4) B <- matrix(1:8, 4, 2) check.mat.mul(A, B) check.mat.mul(A, B[,1]) check.mat.mul(A, A, fatal=FALSE) }) ## ## tests/localpcf.R ## ## temporary test file for localpcfmatrix ## $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $ require(spatstat) local({ a <- localpcfmatrix(redwood) a plot(a) a[, 3:5] }) # # tests/lppstuff.R # # Tests for lpp code # # $Revision: 1.50 $ $Date: 2020/02/02 03:25:57 $ require(spatstat) local({ #' lpp class support X <- runiflpp(5, simplenet) Xone <- X %mark% runif(5) Xtwo <- X %mark% data.frame(a=1:5, b=runif(5)) print(summary(Xone)) print(summary(Xtwo)) plot(X, show.window=TRUE) plot(Xone) plot(Xtwo, do.several=FALSE) #' geometry etc rotate(X, pi/3, centre=c(0.2,0.3)) superimpose.lpp(L=simplenet) W <- Window(X) #' cut.lpp tes <- lineardirichlet(runiflpp(4, simplenet)) f <- as.linfun(tes) Z <- as.linim(f) cut(X, tes) cut(X, f) cut(X, Z) #' check 'normalise' option in linearKinhom fit <- lppm(X ~x) K <- linearKinhom(X, lambda=fit, normalise=FALSE) plot(K) g <- linearpcfinhom(X, lambda=fit, normalise=FALSE) plot(g) K <- linearKinhom(X, lambda=fit, normalise=TRUE) plot(K) g <- linearpcfinhom(X, lambda=fit, normalise=TRUE) plot(g) ## other code blocks K <- linearKinhom(X, lambda=fit, correction="none", ratio=TRUE) g <- linearpcf(X, correction="none", ratio=TRUE) g1 <- linearpcf(X[1], ratio=TRUE) K1 <- linearKcross(dendrite[1], "thin", "thin", ratio=TRUE) # check empty patterns OK X0 <- runiflpp(0, simplenet) print(X0) g <- linearpcf(X0, ratio=TRUE) ## nearest neighbour distances eps <- sqrt(.Machine$double.eps) f <- function(mat,k) { apply(mat, 1, function(z,n) { sort(z)[n] }, n=k+1) } g <- function(mat,k) { apply(mat, 1, function(z,n) { order(z)[n] }, n=k+1) } XX <- spiders nn <- nndist(XX) nnP <- f(pairdist(XX), 1) if(any(abs(nn - nnP) > eps)) stop("nndist.lpp does not agree with pairdist.lpp") nw <- nnwhich(XX) nwP <- g(pairdist(XX), 1) if(any(nw != nwP)) stop("nnwhich.lpp does not agree with pairdist") #' code blocks in nndist.lpp/nnwhich.lpp #' non-sparse network, interpreted code Ad <- nndist(spiders, method="interpreted") Aw <- nnwhich(spiders, method="interpreted") #' sparse network, older C code opa <- spatstat.options(Cnndistlpp=FALSE) Bd <- nndist(dendrite) Bw <- nnwhich(dendrite) spatstat.options(opa) #' undefined nearest neighbours Ed <- nndist(spiders[1:3], k=1:3) Ew <- nnwhich(spiders[1:3], k=1:3) #' trivial cases in nncross.lpp a <- nncross(runiflpp(0, simplenet), runiflpp(1, simplenet), what="which", format="list")$which a <- nncross(runiflpp(0, simplenet), runiflpp(1, simplenet), what="dist", format="list")$dist #' compare algorithms ZZ <- split(chicago) XX <- ZZ$damage YY <- ZZ$assault op <- spatstat.options(Cnncrosslpp=FALSE) a <- nncross(XX, YY) spatstat.options(Cnncrosslpp=TRUE) b <- nncross(XX, YY) if(any(a$which != b$which)) stop("Inconsistent values of nncross.lpp()$which from different C code") if(max(abs(a$dist - b$dist)) > eps) stop("Inconsistent values of nncross.lpp()$dist from different C code") spatstat.options(Cnncrosslpp=TRUE) b2 <- nncross(XX, YY, k=1:2, what="which") if(any(b2$which.1 != b$which)) stop("inconsistent values of nncross.lpp()$which from k=1:2 and k=1") a2 <- nncross(XX, YY, k=1:2, what="dist") if(max(abs(a2$dist.1 - a$dist)) > eps) stop("Inconsistent values of nncross.lpp()$dist from k=1:2 and k=1") spatstat.options(Cnncrosslpp=TRUE) ii <- seq_len(npoints(XX)) w1 <- nnwhich(XX) w2 <- nncross(XX, XX, iX=ii, iY=ii, what="which") w3 <- nncross(XX, XX, iX=ii, iY=ii, what="which", method="interpreted") if(any(w1 != w2)) stop("nnwhich.lpp disagrees with nncross.lpp(iX, iY)") if(any(w2 != w3)) stop("Different results for nncross.lpp(iX, iY, 'which') using R and C") d1 <- nndist(XX) d2 <- nncross(XX, XX, iX=ii, iY=ii, what="dist") d3 <- nncross(XX, XX, iX=ii, iY=ii, what="dist", method="interpreted") if(max(abs(d1-d2)) > eps) stop("nndist.lpp disagrees with nncross.lpp(iX, iY)") if(max(abs(d2-d3)) > eps) stop("Different results for nncross.lpp(iX, iY, 'dist') using R and C") spatstat.options(Cnncrosslpp=FALSE) w4 <- nncross(XX, XX, iX=ii, iY=ii, what="which") d4 <- nncross(XX, XX, iX=ii, iY=ii, what="dist") if(any(w2 != w4)) stop("Different results for nncross.lpp(iX, iY, 'which') fast and slow C") if(max(abs(d2-d4)) > eps) stop("Different results for nncross.lpp(iX, iY, 'dist') fast and slow C") spatstat.options(Cnncrosslpp=TRUE) spatstat.options(op) reset.spatstat.options() # test handling marginal cases xyd <- nncross(XX, YY[1]) A <- runiflpp(5, simplenet) B <- runiflpp(2, simplenet) aaa <- nncross(A,B,k=3:5) #' all undefined aaa <- nncross(A,B,k=1:4) #' some undefined spatstat.options(Cnncrosslpp=FALSE) aaa <- nncross(A,B,k=3:5) aaa <- nncross(A,B,k=1:4) bbb <- nncross(B,A, iX=1:2, iY=1:5) # another code block spatstat.options(Cnncrosslpp=TRUE) reset.spatstat.options() ## as.linnet.psp (Suman's example) Lines <- as.data.frame(as.psp(simplenet)) newseg <- c(Lines[1,1:2], Lines[10,3:4]) Lines <- rbind(Lines, newseg) Y <- as.psp(Lines, window=Window(simplenet)) marks(Y) <- c(3, 4, 5, 5, 3, 4, 5, 5,5, 5,1) Z <- as.linnet(Y) # can crash if marks don't match segments ## Test linnet surgery code SL <- joinVertices(simplenet, matrix(c(2,3), ncol=2)) set.seed(42) X <- runiflpp(30, simplenet) V <- runiflpp(30, simplenet) XV <- insertVertices(X, V) validate.lpp.coords(XV, context="calculated by insertVertices") X0 <- insertVertices(X, x=numeric(0), y=numeric(0)) ## Test [.lpp internal data B <- owin(c(0.1,0.7),c(0.19,0.6)) XB <- X[B] validate.lpp.coords(XB, context="returned by [.lpp") ## Tests related to linearK, etc testcountends <- function(X, r=100, s=1) { if(s != 1) { X <- rescale(X, s) r <- r/s } L <- as.linnet(X) n1 <- countends(L, X[1], r) n2 <- npoints(lineardisc(L, X[1], r, plotit=FALSE)$endpoints) if(n1 != n2) stop(paste("Incorrect result from countends:", n1, "!=", n2, paren(paste("scale=", 1/s))), call.=FALSE) } # original scale X <- unmark(chicago) testcountends(X) # finer scale testcountends(X, s=1000) #' disconnected L <- thinNetwork(simplenet, retainedges = -c(3,8)) S <- as.psp(L) x <- midpoints.psp(S)[1] len <- lengths.psp(S)[1] A <- lineardisc(L, x, len, plotit=FALSE) # involves many segments of network B <- lineardisc(L, x, len/5, plotit=FALSE) # involves one segment of network op <- spatstat.options(Ccountends=FALSE) A <- lineardisc(L, x, len, plotit=FALSE) B <- lineardisc(L, x, len/5, plotit=FALSE) spatstat.options(op) reset.spatstat.options() ## Test algorithms for boundingradius.linnet L <- as.linnet(chicago, sparse=TRUE) L$boundingradius <- NULL # artificially remove opa <- spatstat.options(Clinearradius=FALSE) bR <- as.linnet(L, sparse=FALSE)$boundingradius spatstat.options(Clinearradius=TRUE) bC <- as.linnet(L, sparse=FALSE)$boundingradius spatstat.options(opa) if(abs(bR-bC) > 0.001 * (bR+bC)/2) stop("Disagreement between R and C algorithms for boundingradius.linnet", call.=FALSE) ## linnet things is.connected(as.linnet(dendrite)) zik <- rescale(chicago, 39.37/12, "m") Simon <- simplenet unitname(Simon) <- list("metre", "metres", 0.5) b <- rescale(Simon) ds <- density(simplenet, 0.05) ## invoke dist2dpath LS <- as.linnet(simplenet, sparse=TRUE) LF <- as.linnet(LS, sparse=FALSE) ## direct call dist2dpath d <- simplenet$dpath d[!simplenet$m] <- Inf diag(d) <- 0 dd <- dist2dpath(d, method="interpreted") ra <- range(dd - simplenet$dpath) if(max(abs(ra)) > sqrt(.Machine$double.eps)) stop("dist2dpath gives different answers in C and R code") ## integral.linim with missing entries xcoord <- linfun(function(x,y,seg,tp) { x }, domain(chicago)) xcoord <- as.linim(xcoord, dimyx=32) integral(xcoord) ## options to plot.linim plot(xcoord, legend=FALSE) plot(xcoord, leg.side="top") plot(xcoord, style="width", leg.side="bottom") ## as.linim.linim xxcc <- as.linim(xcoord) xxcceps <- as.linim(xcoord, eps=15) xxccdel <- as.linim(xcoord, delta=30) df1 <- attr(xxcc, "df") df2 <- attr(xxccdel, "df") df3 <- resampleNetworkDataFrame(df1, df2) ## linim with complex values Zc <- as.im(function(x,y){(x-y) + x * 1i}, Frame(simplenet)) Fc <- linim(simplenet, Zc) print(Fc) print(summary(Fc)) ## linim with df provided Z <- as.im(function(x,y) {x-y}, Frame(simplenet)) X <- linim(simplenet, Z) df <- attr(X, "df") XX <- linim(simplenet, Z, df=df) dfwithout <- df[, colnames(df) != "values"] XXX <- linim(simplenet, Z, df=dfwithout) plot(XXX, zlim=c(-1,1)) plot(XXX, legend=FALSE) plot(XXX, leg.side="bottom") ## lpp with multiple columns of marks M <- chicago marks(M) <- cbind(type=marks(M), data.frame(distnearest=nndist(M))) plot(M, main="") summary(M) MM <- cut(M) #' other cases CX <- cut(chicago) nd <- nndist(spiders) SX <- cut(spiders %mark% nd, breaks=3) SX <- cut(spiders, nd, breaks=c(0,100,200,Inf), include.lowest=TRUE) ## linequad X <- runiflpp(6, simplenet) Y <- X %mark% factor(rep(c("A", "B"), 3)) aX <- linequad(X) aY <- linequad(Y) aXR <- linequad(X, random=TRUE) aYR <- linequad(Y, random=TRUE) P <- as.ppp(X) S <- as.psp(domain(X)) d <- linequad(P, S) oop <- spatstat.options(Clinequad=FALSE) bX <- linequad(X) spatstat.options(oop) ## other internal utilities df <- pointsAlongNetwork(simplenet, 0.05) X <- as.ppp(df[,c("x", "y")], W=Frame(simplenet)) A <- local2lpp(simplenet, seg=df$seg, tp=df$tp, X=X, df.only=FALSE) ## mark-mark scatterplot uses pairdist X <- runiflpp(20, simplenet) %mark% runif(20) markmarkscatter(X, 0.2) markmarkscatter(X[FALSE], 0.1) ## tree branches # make a simple tree m <- simplenet$m m[8,10] <- m[10,8] <- FALSE L <- linnet(vertices(simplenet), m) tb <- treebranchlabels(L, 1) X <- runiflpp(50, L) # delete branch B XminusB <- deletebranch(X, "b", tb) # extract branch B XB <- extractbranch(X, "b", tb) ## cases of lintess() A <- lintess(simplenet) # argument df missing S <- as.psp(simplenet) ns <- nsegments(S) df <- data.frame(seg=1:ns, t0=0, t1=1, tile=letters[1:ns]) M <- data.frame(len=lengths.psp(S), ang=angles.psp(S)) V <- lintess(simplenet, df, marks=M) ## methods for class lintess U <- unmark(V) U <- unstack(V) print(summary(V)) W <- Window(V) plot(V, style="image") plot(V, style="width") ## linear tessellations infrastructure nX <- 100 nY <- 20 X <- runiflpp(nX, simplenet) Y <- runiflpp(nY, simplenet) tes <- divide.linnet(Y) cX <- coords(X) iI <- lineartileindex(cX$seg, cX$tp, tes, method="interpreted") iC <- lineartileindex(cX$seg, cX$tp, tes, method="C") iE <- lineartileindex(cX$seg, cX$tp, tes, method="encode") if(!identical(iI,iC)) stop("conflicting results from lineartileindex (interpreted vs C)") if(!identical(iI,iE)) stop("conflicting results from lineartileindex (interpreted vs encoded)") iA <- as.linfun(tes)(X) if(!identical(iI, iA)) stop("disagreement between as.linfun.lintess and lineartileindex") ## intersection of lintess X <- divide.linnet(runiflpp(4, simplenet)) Y <- divide.linnet(runiflpp(3, simplenet)) marks(X) <- factor(letters[c(1,2,1,2)]) marks(Y) <- runif(3) Zmm <- intersect.lintess(X,Y) Zum <- intersect.lintess(unmark(X),Y) Zmu <- intersect.lintess(X,unmark(Y)) }) reset.spatstat.options() local({ #' handling by 'solist', 'unstack', 'plot.solist' etc L <- simplenet X <- runiflpp(5, L) %mark% cbind(a=1:5, b=letters[1:5]) ns <- nsegments(L) df <- data.frame(seg=1:ns, t0=0, t1=1, tile=letters[1:ns]) S <- lintess(L, df) f <- as.linfun(S) g <- as.linfun(S, values=seq_len(nsegments(L))) V <- as.linim(f) Z <- as.linim(g) shebang <- solist(L=L, X=X, S=S, f=f, g=g, V=V, Z=Z) plot(shebang) plot(shebang, valuesAreColours=FALSE) kapow <- unstack(shebang) plot(kapow) }) reset.spatstat.options() local({ #' density.lpp X <- runiflpp(5, simplenet) D <- density(X, 0.05, old=TRUE, weights=runif(npoints(X))) # interpreted code D <- density(X, 0.05, finespacing=TRUE) # } D <- density(X, 0.05, eps=0.008) # } code blocks in PDEdensityLPP D <- density(X, 0.05, dimyx=256) # } D <- density(X[FALSE], 0.05) # } #' density.splitppx Y <- split(chicago)[1:3] D <- density(Y, 7) #' densityVoronoi.lpp and related code densityVoronoi(X, f=0) densityVoronoi(X, f=1e-8) densityVoronoi(X, f=1) densityVoronoi(X[FALSE], f=0.5) XX <- X[rep(1:npoints(X), 4)] densityVoronoi(XX, f=0.99999, nrep=5) #' bandwidth selection bw.voronoi(X, nrep=4, prob=c(0.2, 0.4, 0.6)) #' inhomogeneous K and g SD <- split(dendrite) DT <- density(SD[["thin"]], 100, distance="euclidean") DS <- density(SD[["stubby"]], 100, distance="euclidean") Kii <- linearKcross.inhom(dendrite, "thin", "thin", DT, DT) Kij <- linearKcross.inhom(dendrite, "thin", "stubby", DT, DS, correction="none", ratio=TRUE) Kx <- linearKcross.inhom(dendrite[1], "thin", "stubby", DT, DS) gii <- linearpcfcross.inhom(dendrite, "thin", "thin", DT, DT) gij <- linearpcfcross.inhom(dendrite, "thin", "stubby", DT, DS, correction="none", ratio=TRUE) gx <- linearpcfcross.inhom(dendrite[1], "thin", "stubby", DT, DS) }) local({ #' pairs.linim X <- runiflpp(6, simplenet) Z <- density(X, 0.5, distance="euclidean") pairs(solist(Z)) pairs(solist(A=Z)) U <- density(as.ppp(X), 0.5) pairs(solist(U, Z)) pairs(solist(Z, U)) }) local({ #' complex-valued functions and images f <- function(x,y,seg,tp) { x + y * 1i } g <- linfun(f, simplenet) h <- as.linim(g) plot(Re(h)) plot(h) plot(g) integral(h) integral(g) }) local({ ## 'lixellate' ## Cases where no subdivision occurs P <- runiflpp(4, simplenet) A <- lixellate(P, nsplit=1) B <- lixellate(P, eps=2) ## bug in 'lixellate' (Jakob Gulddahl Rasmussen) X <- ppp(c(0,1), c(0,0), owin()) L <- linnet(X, edges = matrix(1:2, ncol=2)) Y <- lpp(X, L) ## The left end point is OK lixellate(Y[1], nsplit=30) d <- density(Y[1], .1) ## The right end point gave an error lixellate(Y[2], nsplit=30) d <- density(Y[2], .1) }) local({ ## make some bad data and repair it L <- simplenet ## reverse edges a <- L$from[c(FALSE,TRUE)] L$from[c(FALSE,TRUE)] <- L$to[c(FALSE,TRUE)] L$to[c(FALSE,TRUE)] <- a ## duplicate edges ns <- nsegments(L) ii <- c(seq_len(ns), 2) L$from <- L$from[ii] L$to <- L$to[ii] L$lines <- L$lines[ii] ## Y <- repairNetwork(L) ## add points X <- runiflpp(4, L) Z <- repairNetwork(X) }) local({ #' random generation bugs and code blocks A <- runiflpp(5, simplenet, nsim=2) D <- density(A[[1]], 0.3) B <- rpoislpp(D, nsim=2) stopifnot(is.multitype(rlpp(c(10,5), list(a=D,b=D)))) stopifnot(is.multitype(rlpp(5, list(a=D,b=D)))) stopifnot(is.multitype(rlpp(c(10,5), D))) }) local({ ## rhohat.lppm fut <- lppm(spiders ~ 1) rx <- rhohat(fut, "x") Z <- linfun(function(x,y,seg,tp) { x }, domain(spiders)) rZ <- rhohat(fut, Z) U <- predict(rx) U <- predict(rZ) Y <- simulate(rx) Y <- simulate(rZ) futm <- lppm(chicago ~ x + marks) ry <- rhohat(futm, "y") U <- predict(ry) Y <- simulate(ry) }) #' #' lppmodels.R #' #' Tests of lppm and class support #' #' $Revision: 1.1 $ $Date: 2018/05/13 04:14:28 $ #' require(spatstat) local({ fit0 <- lppm(spiders) fit1 <- lppm(spiders ~ x) fit2 <- lppm(chicago ~ x+y) X <- runiflpp(10, simplenet) Z <- distfun(runiflpp(10, simplenet)) fit3 <- lppm(X ~ Z) summary(fit0) summary(fit1) summary(fit2) summary(fit3) pseudoR2(fit0) pseudoR2(fit1) pseudoR2(fit2) pseudoR2(fit3) Window(fit1) a <- model.images(fit0) a <- model.images(fit1) a <- model.images(fit2) a <- model.images(fit3) b <- model.matrix(fit0) b <- model.matrix(fit1) b <- model.matrix(fit2) b <- model.matrix(fit3) is.multitype(fit0) is.multitype(fit1) is.multitype(fit2) is.multitype(fit3) fit0e <- emend(fit0) fit1e <- emend(fit1) fit2e <- emend(fit2) fit3e <- emend(fit3) #' fundamental utilities: #' evalCovar ycoord <- function(x,y) { y } YS <- as.linim(ycoord, L=domain(spiders)) YC <- as.linim(ycoord, L=domain(chicago)) aT <- evalCovar(fit1, YS, interpolate=TRUE) aF <- evalCovar(fit1, YS, interpolate=FALSE) dT <- evalCovar(fit1, ycoord, interpolate=TRUE) dF <- evalCovar(fit1, ycoord, interpolate=FALSE) bT <- evalCovar(fit2, YC, interpolate=TRUE) bF <- evalCovar(fit2, YC, interpolate=FALSE) cT <- evalCovar(fit2, ycoord, interpolate=TRUE) cF <- evalCovar(fit2, ycoord, interpolate=FALSE) }) spatstat/tests/testsUtoZ.R0000644000176200001440000003410613616732353015403 0ustar liggesusers# # tests/undoc.R # # $Revision: 1.12 $ $Date: 2020/01/26 04:38:19 $ # # Test undocumented hacks, experimental code, etc require(spatstat) local({ ## cases of 'pickoption' aliases <- c(Lenin="Ulyanov", Stalin="Djugashvili", Trotsky="Bronstein") surname <- "Trot" pickoption("leader", surname, aliases) pickoption("leader", surname, aliases, exact=TRUE, die=FALSE) ## pixellate.ppp accepts a data frame of weights pixellate(cells, weights=data.frame(a=1:42, b=42:1)) ## test parts of 'rmhsnoop' that don't require interaction with user rmhSnoopEnv(cells, Window(cells), 0.1) ## linim helper functions df <- pointsAlongNetwork(simplenet, 0.2) ## Berman-Turner frame A <- bt.frame(quadscheme(cells), ~x, Strauss(0.07), rbord=0.07) print(A) ## digestCovariates D <- distfun(cells) Z <- distmap(cells) U <- dirichlet(cells) stopifnot(is.scov(D)) stopifnot(is.scov(Z)) stopifnot(is.scov(U)) stopifnot(is.scov("x")) dg <- digestCovariates(D=D,Z=Z,U=U,"x",list(A="x", B=D)) ## a <- getfields(dg, c("A", "D", "niets"), fatal=FALSE) ## util.R gg <- pointgrid(owin(), 7) checkbigmatrix(1000000L, 1000000L, FALSE, TRUE) spatstatDiagnostic("whatever") M <- list(list(a=2, b=FALSE), list(a=2, b=TRUE)) stopifnot(!allElementsIdentical(M)) stopifnot(allElementsIdentical(M, "a")) ## A <- Strauss(0.1) A <- reincarnate.interact(A) ## ## special lists B <- solist(a=cells, b=redwood, c=japanesepines) BB <- as.ppplist(B) BL <- as.layered(B) DB <- as.imlist(lapply(B, density)) is.solist(B) is.ppplist(B) is.imlist(DB) ## case of density.ppplist DEB <- density(BB, se=TRUE) ## fft z <- matrix(1:16, 4, 4) a <- fft2D(z, west=FALSE) if(fftwAvailable()) b <- fft2D(z, west=TRUE) ## experimental interactions pot <- function(d, par) { d <= 0.1 } A <- Saturated(pot) print(A) A <- update(A, name="something") ppm(amacrine ~ x, A, rbord=0.1) ## infrastructure of iplot, istat fakepanel <- list(x=redwood, xname="redwood", envel="none", stat="data", sigma=0.08, pcfbw=0.01, simx=rpoispp(ex=redwood, nsim=39)) a <- do.istat(fakepanel) for(envel in c("none", "pointwise", "simultaneous")) { fakepanel$envel <- envel for(stat in c("density", "Kest", "Lest", "pcf", "Kinhom", "Linhom", "Fest", "Gest", "Jest")) { fakepanel$stat <- stat a <- do.istat(fakepanel) } } #' version-checking now <- Sys.Date() versioncurrency.spatstat(now + 80, FALSE) versioncurrency.spatstat(now + 140, FALSE) versioncurrency.spatstat(now + 400, FALSE) versioncurrency.spatstat(now + 1000) #' general Ord interaction gradual <- function(d, pars) { y <- pmax(0, 0.005 - d)/0.005 if(is.matrix(d)) y <- matrix(y, nrow(d), ncol(d)) return(y) } B <- Ord(gradual, "gradual Ord process") }) ## ## tests/updateppm.R ## ## Check validity of update.ppm ## ## $Revision: 1.4 $ $Date: 2016/03/08 06:30:46 $ local({ require(spatstat) h <- function(m1, m2) { mc <- deparse(sys.call()) cat(paste(mc, "\t... ")) m1name <- deparse(substitute(m1)) m2name <- deparse(substitute(m2)) if(!identical(names(coef(m1)), names(coef(m2)))) stop(paste("Differing results for", m1name, "and", m2name, "in updateppm.R"), call.=FALSE) cat("OK\n") } X <- redwood[c(TRUE,FALSE)] Y <- redwood[c(FALSE,TRUE)] fit0f <- ppm(X ~ 1, nd=8) fit0p <- ppm(X, ~1, nd=8) fitxf <- ppm(X ~ x, nd=8) fitxp <- ppm(X, ~x, nd=8) cat("Basic consistency ...\n") h(fit0f, fit0p) h(fitxf, fitxp) cat("\nTest correct handling of model formulas ...\n") h(update(fitxf, Y), fitxf) h(update(fitxf, Q=Y), fitxf) h(update(fitxf, Y~x), fitxf) h(update(fitxf, Q=Y~x), fitxf) h(update(fitxf, ~x), fitxf) h(update(fitxf, Y~1), fit0f) h(update(fitxf, ~1), fit0f) h(update(fit0f, Y~x), fitxf) h(update(fit0f, ~x), fitxf) h(update(fitxp, Y), fitxp) h(update(fitxp, Q=Y), fitxp) h(update(fitxp, Y~x), fitxp) h(update(fitxp, Q=Y~x), fitxp) h(update(fitxp, ~x), fitxp) h(update(fitxp, Y~1), fit0p) h(update(fitxp, ~1), fit0p) h(update(fit0p, Y~x), fitxp) h(update(fit0p, ~x), fitxp) cat("\nTest scope handling for left hand side ...\n") X <- Y h(update(fitxf), fitxf) cat("\nTest scope handling for right hand side ...\n") Z <- distmap(X) fitZf <- ppm(X ~ Z) fitZp <- ppm(X, ~ Z) h(update(fitxf, X ~ Z), fitZf) h(update(fitxp, X ~ Z), fitZp) h(update(fitxf, . ~ Z), fitZf) h(update(fitZf, . ~ x), fitxf) h(update(fitZf, . ~ . - Z), fit0f) h(update(fitxp, . ~ Z), fitZp) h(update(fitZp, . ~ . - Z), fit0p) h(update(fit0p, . ~ . + Z), fitZp) h(update(fitZf, . ~ . ), fitZf) h(update(fitZp, . ~ . ), fitZp) cat("\nTest use of internal data ...\n") h(update(fitZf, ~ x, use.internal=TRUE), fitxf) fitsin <- update(fitZf, X~sin(Z)) h(update(fitZf, ~ sin(Z), use.internal=TRUE), fitsin) cat("\nTest step() ... ") fut <- ppm(X ~ Z + x + y, nd=8) fut0 <- step(fut, trace=0) cat("OK\n") }) # test update.lppm local({ X <- runiflpp(20, simplenet) fit0 <- lppm(X ~ 1) fit1 <- update(fit0, ~ x) anova(fit0, fit1, test="LR") cat("update.lppm(fit, ~trend) is OK\n") fit2 <- update(fit0, . ~ x) anova(fit0, fit2, test="LR") cat("update.lppm(fit, . ~ trend) is OK\n") }) # # tests/vcovppm.R # # Check validity of vcov.ppm algorithms # # Thanks to Ege Rubak # # $Revision: 1.11 $ $Date: 2020/01/11 09:38:40 $ # require(spatstat) local({ set.seed(42) X <- rStrauss(200, .5, .05) model <- ppm(X, inter = Strauss(.05)) b <- vcov(model, generic = TRUE, algorithm = "basic") v <- vcov(model, generic = TRUE, algorithm = "vector") vc <- vcov(model, generic = TRUE, algorithm = "vectorclip") vn <- vcov(model, generic = FALSE) disagree <- function(x, y, tol=1e-7) { max(abs(x-y)) > tol } asymmetric <- function(x) { disagree(x, t(x)) } if(asymmetric(b)) stop("Non-symmetric matrix produced by vcov.ppm 'basic' algorithm") if(asymmetric(v)) stop("Non-symmetric matrix produced by vcov.ppm 'vector' algorithm") if(asymmetric(vc)) stop("Non-symmetric matrix produced by vcov.ppm 'vectorclip' algorithm") if(asymmetric(vn)) stop("Non-symmetric matrix produced by vcov.ppm Strauss algorithm") if(disagree(v, b)) stop("Disagreement between vcov.ppm algorithms 'vector' and 'basic' ") if(disagree(v, vc)) stop("Disagreement between vcov.ppm algorithms 'vector' and 'vectorclip' ") if(disagree(vn, vc)) stop("Disagreement between vcov.ppm generic and Strauss algorithms") # Geyer code xx <- c(0.7375956, 0.6851697, 0.6399788, 0.6188382) yy <- c(0.5816040, 0.6456319, 0.5150633, 0.6191592) Y <- ppp(xx, yy, window=square(1)) modelY <- ppm(Y ~1, Geyer(0.1, 1)) b <- vcov(modelY, generic = TRUE, algorithm = "basic") v <- vcov(modelY, generic = TRUE, algorithm = "vector") vc <- vcov(modelY, generic = TRUE, algorithm = "vectorclip") if(asymmetric(b)) stop("Non-symmetric matrix produced by vcov.ppm 'basic' algorithm for Geyer model") if(asymmetric(v)) stop("Non-symmetric matrix produced by vcov.ppm 'vector' algorithm for Geyer model") if(asymmetric(vc)) stop("Non-symmetric matrix produced by vcov.ppm 'vectorclip' algorithm for Geyer model") if(disagree(v, b)) stop("Disagreement between vcov.ppm algorithms 'vector' and 'basic' for Geyer model") if(disagree(v, vc)) stop("Disagreement between vcov.ppm algorithms 'vector' and 'vectorclip' for Geyer model") ## tests of 'deltasuffstat' code ## Handling of offset terms modelH <- ppm(cells ~x, Hardcore(0.05)) a <- vcov(modelH, generic=TRUE) ## may fall over b <- vcov(modelH, generic=FALSE) if(disagree(a, b)) stop("Disagreement between vcov.ppm algorithms for Hardcore model") ## Correctness of pairwise.family$delta2 modelZ <- ppm(amacrine ~1, MultiStrauss(radii=matrix(0.1, 2, 2))) b <- vcov(modelZ, generic=FALSE) g <- vcov(modelZ, generic=TRUE) if(disagree(b, g)) stop("Disagreement between vcov.ppm algorithms for MultiStrauss model") ## Test that 'deltasuffstat' works for Hybrids modelHyb <- ppm(japanesepines ~ 1, Hybrid(Strauss(0.05), Strauss(0.1))) vHyb <- vcov(modelHyb) ## Code blocks for other choices of 'what' model <- ppm(X ~ 1, Strauss(.05)) cG <- vcov(model, what="corr") cP <- vcov(update(model, Poisson()), what="corr") ## outdated usage cX <- vcov(model, A1dummy=TRUE) ## Model with zero-length coefficient vector lam <- intensity(X) f <- function(x,y) { rep(lam, length(x)) } model0 <- ppm(X ~ offset(log(f)) - 1) dd <- vcov(model0) cc <- vcov(model0, what="corr") ## Model with NA coefficients fit <- ppm(X ~ log(f)) vcov(fit) fitE <- emend(fit, trace=TRUE) ## Other weird stuff su <- suffloc(ppm(X ~ x)) }) # # tests/windows.R # # Tests of owin geometry code # # $Revision: 1.15 $ $Date: 2020/02/06 05:48:33 $ require(spatstat) local({ # Ege Rubak spotted this problem in 1.28-1 A <- as.owin(ants) B <- dilation(A, 140) if(!is.subset.owin(A, B)) stop("is.subset.owin fails in polygonal case") # thanks to Tom Rosenbaum A <- shift(square(3), origin="midpoint") B <- shift(square(1), origin="midpoint") AB <- setminus.owin(A, B) D <- shift(square(2), origin="midpoint") if(is.subset.owin(D,AB)) stop("is.subset.owin fails for polygons with holes") ## thanks to Brian Ripley / SpatialVx M <- as.mask(letterR) stopifnot(area(bdry.mask(M)) > 0) stopifnot(area(convexhull(M)) > 0) R <- as.mask(square(1)) stopifnot(area(bdry.mask(R)) > 0) stopifnot(area(convexhull(R)) > 0) ## RR <- convexify(as.mask(letterR)) CC <- covering(letterR, 0.05, eps=0.1) #' as.owin.data.frame V <- as.mask(letterR, eps=0.2) Vdf <- as.data.frame(V) Vnew <- as.owin(Vdf) zz <- mask2df(V) RM <- owinpoly2mask(letterR, as.mask(Frame(letterR)), check=TRUE) #' as.owin U <- as.owin(quadscheme(cells)) U2 <- as.owin(list(xmin=0, xmax=1, ymin=0, ymax=1)) #' intersections involving masks B1 <- square(1) B2 <- as.mask(shift(B1, c(0.2, 0.3))) o12 <- overlap.owin(B1, B2) o21 <- overlap.owin(B2, B1) i12 <- intersect.owin(B1, B2, eps=0.01) i21 <- intersect.owin(B2, B1, eps=0.01) E2 <- emptywindow(square(2)) e12 <- intersect.owin(B1, E2) e21 <- intersect.owin(E2, B1) #' geometry inradius(B1) inradius(B2) inradius(letterR) inpoint(B1) inpoint(B2) inpoint(letterR) is.convex(B1) is.convex(B2) is.convex(letterR) volume(letterR) perimeter(as.mask(letterR)) boundingradius(cells) boundingbox(letterR) boundingbox(letterR, NULL) boundingbox(cells, ppm(cells ~ 1)) boundingbox(solist(letterR)) spatstat.options(Cbdrymask=FALSE) bb <- bdry.mask(letterR) spatstat.options(Cbdrymask=TRUE) X <- longleaf[square(50)] marks(X) <- marks(X)/8 D <- discs(X) D <- discs(X, delta=5, separate=TRUE) AD <- dilated.areas(cells, r=0.01 * matrix(1:10, 10,1), constrained=FALSE, exact=FALSE) periodify(B1, 2) periodify(union.owin(B1, B2), 2) periodify(letterR, 2) #' Ancient bug in inside.owin W5 <- owin(poly=1e5*cbind(c(-1,1,1,-1),c(-1,-1,1,1))) W6 <- owin(poly=1e6*cbind(c(-1,1,1,-1),c(-1,-1,1,1))) i5 <- inside.owin(0,0,W5) i6 <- inside.owin(0,0,W6) if(!i5) stop("Wrong answer from inside.owin") if(i5 != i6) stop("Results from inside.owin are scale-dependent") #' miscellaneous utilities thrash <- function(f) { f(letterR) f(Frame(letterR)) f(as.mask(letterR)) } thrash(meanX.owin) thrash(meanY.owin) thrash(intX.owin) thrash(intY.owin) interpretAsOrigin("right", letterR) interpretAsOrigin("bottom", letterR) interpretAsOrigin("bottomright", letterR) interpretAsOrigin("topleft", letterR) interpretAsOrigin("topright", letterR) A <- break.holes(letterR) B <- break.holes(letterR, splitby="y") plot(letterR, col="blue", use.polypath=FALSE) }) local({ #' mask conversion M <- as.mask(letterR) D2 <- as.data.frame(M) # two-column D3 <- as.data.frame(M, drop=FALSE) # three-column M2 <- as.owin(D2) M3 <- as.owin(D3) W2 <- owin(mask=D2) W3 <- owin(mask=D3) #' void/empty cases nix <- nearest.raster.point(numeric(0), numeric(0), M) E <- emptywindow(Frame(letterR)) print(E) #' cases of summary.owin print(summary(E)) # empty print(summary(Window(humberside))) # single polygon #' additional cases of owin() B <- owin(mask=M$m) # no pixel size or coordinate info xy <- as.data.frame(letterR) xxyy <- split(xy[,1:2], xy$id) spatstat.options(checkpolygons=TRUE) H <- owin(poly=xxyy, check=TRUE) }) local({ #' Code for/using intersection and union of windows Empty <- emptywindow(Frame(letterR)) a <- intersect.owin() a <- intersect.owin(Empty) a <- intersect.owin(Empty, letterR) a <- intersect.owin(letterR, Empty) b <- intersect.owin() b <- intersect.owin(Empty) b <- intersect.owin(Empty, letterR) b <- intersect.owin(letterR, Empty) d <- union.owin(as.mask(square(1)), as.mask(square(2))) #' [.owin A <- erosion(letterR, 0.2) Alogi <- as.im(TRUE, W=A) B <- letterR[A] B <- letterR[Alogi] #' miscellaneous D <- convexhull(Alogi) }) reset.spatstat.options() ## ## tests/xysegment.R ## [SEE ALSO tests/segments.R] ## ## Test weird problems and boundary cases for line segment code ## ## $Version$ $Date: 2018/05/13 04:22:28 $ ## require(spatstat) local({ # segment of length zero B <- psp(1/2, 1/2, 1/2, 1/2, window=square(1)) BB <- angles.psp(B) A <- runifpoint(3) AB <- project2segment(A,B) # mark inheritance X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) marks(X) <- 1:10 Y <- selfcut.psp(X) marks(X) <- data.frame(A=1:10, B=factor(letters[1:10])) Z <- selfcut.psp(X) #' psp class support S <- as.psp(simplenet) marks(S) <- sample(factor(c("A","B")), nobjects(S), replace=TRUE) intensity(S) intensity(S, weights=runif(nsegments(S))) }) spatstat/tests/testsQ.R0000644000176200001440000000335013603007370014665 0ustar liggesusers#' #' tests/quadschemes.R #' #' Quadrature schemes, dummy points etc #' #' $Revision: 1.6 $ $Date: 2019/01/20 05:49:40 $ #' require(spatstat) local({ ## class 'quad' qu <- quadscheme(cells) qm <- quadscheme(amacrine) plot(qu) plot(qm) is.multitype(qu) is.multitype(qm) a <- param.quad(qu) a <- param.quad(qm) a <- equals.quad(qu) a <- equals.quad(qm) a <- domain(qu) unitname(qu) <- c("Furlong", "Furlongs") ## utilities b <- cellmiddles(square(1), 3, 4) b <- cellmiddles(letterR, 3, 4, distances=FALSE) b <- cellmiddles(letterR, 3, 4, distances=TRUE) v <- tilecentroids(square(1), 3, 4) v <- tilecentroids(letterR, 3, 4) n <- default.n.tiling(cells) n <- default.n.tiling(cells, nd=4) n <- default.n.tiling(cells, ntile=4) n <- default.n.tiling(cells, ntile=4, quasi=TRUE) ## quadrature weights - special cases X <- runifpoint(10, as.mask(letterR)) gr <- gridweights(X, ntile=12, npix=7) # causes warnings about zero digital area ## plot.quad plot(quadscheme(cells, method="dirichlet", nd=7), tiles=TRUE) plot(quadscheme(cells, method="dirichlet", nd=7, exact=FALSE), tiles=TRUE) ## logistic d <- quadscheme.logi(cells, logi.dummy(cells, "binomial")) print(summary(d)) d <- quadscheme.logi(cells, logi.dummy(cells, "poisson")) print(summary(d)) d <- quadscheme.logi(cells, logi.dummy(cells, "grid")) print(summary(d)) d <- quadscheme.logi(cells, logi.dummy(cells, "transgrid")) print(summary(d)) d <- quadscheme.logi(amacrine, logi.dummy(amacrine, "binomial", mark.repeat=TRUE)) print(summary(d)) d <- quadscheme.logi(amacrine, logi.dummy(amacrine, "poisson", mark.repeat=FALSE)) print(summary(d)) }) spatstat/tests/testsEtoF.R0000644000176200001440000005012313606020450015317 0ustar liggesusers# tests/emptymarks.R # # test cases where there are no (rows or columns of) marks # # $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $ require(spatstat) local({ n <- npoints(cells) df <- data.frame(x=1:n, y=factor(sample(letters, n, replace=TRUE))) nocolumns <- c(FALSE, FALSE) norows <- rep(FALSE, n) X <- cells marks(X) <- df marks(X) <- df[,1] marks(X) <- df[,nocolumns] Z <- Y <- X[integer(0)] marks(Y) <- df[norows,] stopifnot(is.marked(Y)) marks(Z) <- df[norows,nocolumns] stopifnot(!is.marked(Z)) }) # # tests/envelopes.R # # Test validity of envelope data # # $Revision: 1.21 $ $Date: 2019/12/14 02:15:38 $ # require(spatstat) local({ checktheo <- function(fit) { fitname <- deparse(substitute(fit)) en <- envelope(fit, nsim=4, verbose=FALSE, nrep=1e3) nama <- names(en) expecttheo <- is.poisson(fit) && is.stationary(fit) context <- paste("Envelope of", fitname) if(expecttheo) { if(!("theo" %in% nama)) stop(paste(context, "did not contain", sQuote("theo"))) if("mmean" %in% nama) stop(paste(context, "unexpectedly contained", sQuote("mmean"))) } else { if("theo" %in% nama) stop(paste(context, "unexpectedly contained", sQuote("theo"))) if(!("mmean" %in% nama)) stop(paste(context, "did not contain", sQuote("mmean"))) } cat(paste(context, "has correct format\n")) } checktheo(ppm(cells)) checktheo(ppm(cells ~x)) checktheo(ppm(cells ~1, Strauss(0.1))) # check envelope calls from 'alltypes' a <- alltypes(demopat, Kcross, nsim=4, envelope=TRUE) b <- alltypes(demopat, Kcross, nsim=4, envelope=TRUE, global=TRUE) # check 'transform' idioms A <- envelope(cells, Kest, nsim=4, transform=expression(. - .x)) B <- envelope(cells, Kest, nsim=4, transform=expression(sqrt(./pi) - .x)) #' check savefuns/savepatterns with global fit <- ppm(cells~x) Ef <- envelope(fit, Kest, nsim=4, savefuns=TRUE, global=TRUE) Ep <- envelope(fit, Kest, nsim=4, savepatterns=TRUE, global=TRUE) #' check handling of 'dangerous' cases fut <- ppm(redwood ~ x) Ek <- envelope(fut, Kinhom, update=FALSE, nsim=4) kfut <- kppm(redwood3 ~ x) Ekk <- envelope(kfut, Kinhom, lambda=density(redwood3), nsim=7) # check conditional simulation e1 <- envelope(cells, Kest, nsim=4, fix.n=TRUE) e2 <- envelope(amacrine, Kest, nsim=4, fix.n=TRUE) e3 <- envelope(amacrine, Kcross, nsim=4, fix.marks=TRUE) e4 <- envelope(finpines, Kest, nsim=4, fix.n=TRUE) # multiple columns of marks e5 <- envelope(finpines, Kest, nsim=4, fix.marks=TRUE) fit <- ppm(japanesepines ~ 1, Strauss(0.04)) e6 <- envelope(fit, Kest, nsim=4, fix.n=TRUE) fit2 <- ppm(amacrine ~ 1, Strauss(0.03)) e7 <- envelope(fit2, Gcross, nsim=4, fix.marks=TRUE) # check pooling of envelopes in global case E1 <- envelope(cells, Kest, nsim=5, savefuns=TRUE, global=TRUE) E2 <- envelope(cells, Kest, nsim=12, savefuns=TRUE, global=TRUE) p12 <- pool(E1, E2) p12 <- pool(E1, E2, savefuns=TRUE) F1 <- envelope(cells, Kest, nsim=5, savefuns=TRUE, savepatterns=TRUE, global=TRUE) F2 <- envelope(cells, Kest, nsim=12, savefuns=TRUE, savepatterns=TRUE, global=TRUE) p12 <- pool(F1, F2) p12 <- pool(F1, F2, savefuns=TRUE, savepatterns=TRUE) E1r <- envelope(cells, Kest, nsim=5, savefuns=TRUE, global=TRUE, ginterval=c(0.05, 0.15)) E2r <- envelope(cells, Kest, nsim=12, savefuns=TRUE, global=TRUE, ginterval=c(0.05, 0.15)) p12r <- pool(E1r, E2r) }) local({ #' as.data.frame.envelope Nsim <- 5 E <- envelope(cells, nsim=Nsim, savefuns=TRUE) A <- as.data.frame(E) B <- as.data.frame(E, simfuns=TRUE) stopifnot(ncol(B) - ncol(A) == Nsim) }) local({ #' cases not covered elsewhere A <- envelope(cells, nsim=5, alternative="less", do.pwrong=TRUE, use.theory=FALSE, savepatterns=TRUE, savefuns=TRUE) print(A) B <- envelope(A, nsim=5, savefuns=TRUE) D <- envelope(cells, "Lest", nsim=5) UU <- envelope(cells, nsim=5, foreignclass="ppp", clipdata=TRUE) AA <- envelope(cells, nsim=5, jsim=5, alternative="greater", global=TRUE) AA <- envelope(cells, nsim=5, jsim=5, alternative="less", global=TRUE) AA <- envelope(cells, nsim=5, jsim=5, alternative="greater", VARIANCE=TRUE) AA <- envelope(cells, nsim=5, jsim=5, alternative="greater", VARIANCE=TRUE) fit <- ppm(cells ~ 1, Strauss(0.07)) U <- envelope(fit, nsim=3, simulate=expression(runifpoint(20))) kfit <- kppm(redwood3 ~ x) UU <- envelope(kfit, nsim=7, simulate=expression(simulate(kfit, drop=TRUE))) VV <- envelope(kfit, nsim=7, weights=1:7) MM <- envelope(kfit, nsim=7, Kinhom, lambda=density(redwood3)) #' envelopes based on sample variance E <- envelope(cells, nsim=8, VARIANCE=TRUE) G <- envelope(cells, nsim=8, VARIANCE=TRUE, use.theory=FALSE, do.pwrong=TRUE) print(G) #' summary method summary(E) summary(envelope(cells, nsim=5, simulate=expression(runifpoint(42)))) #' weights argument H1 <- envelope(cells, nsim=4, weights=npoints, savefuns=TRUE) H2 <- envelope(cells, nsim=4, weights=npoints, savefuns=TRUE) J1 <- envelope(cells, nsim=4, weights=npoints, VARIANCE=TRUE) J2 <- envelope(cells, nsim=4, weights=npoints, VARIANCE=TRUE) #' pooling with weights H <- pool(H1, H2) J <- pool(J1, J2) #' pooling envelopes with non-identical attributes H0 <- envelope(cells, nsim=4, savefuns=TRUE) HH <- pool(H0, H1) #' undocumented/secret K <- envelope(cells, nsim=4, saveresultof=npoints, collectrubbish=TRUE) #' so secret I've even forgotten how to do it M <- envelope(cells, nsim=4, internal=list(eject="patterns")) }) local({ #' envelope computations in other functions P <- lurking(cells, expression(x), envelope=TRUE, nsim=9) print(P) #' re-using envelope objects in other functions A <- envelope(cells, nsim=9, savepatterns=TRUE, savefuns=TRUE) S <- lurking(cells, expression(x), envelope=A, nsim=9) #' envelope.envelope B <- envelope(cells, nsim=5, savepatterns=TRUE, savefuns=FALSE) envelope(B) }) local({ X <- runiflpp(10, simplenet) Xr <- X %mark% runif(10) Xc <- X %mark% factor(letters[c(1:4,3,2,4:1)]) X2 <- X %mark% data.frame(height=runif(10), width=runif(10)) E <- envelope(X, linearK, nsim=9) Er <- envelope(Xr, linearK, nsim=9) Ec <- envelope(Xc, linearK, nsim=9) E2 <- envelope(X2, linearK, nsim=9) Erf <- envelope(Xr, linearK, nsim=9, fix.n=TRUE) E2f <- envelope(X2, linearK, nsim=9, fix.n=TRUE) Ecf <- envelope(Xc, linearK, nsim=9, fix.n=TRUE) Ecm <- envelope(Xc, linearKcross, nsim=9, fix.n=TRUE, fix.marks=TRUE) fut <- lppm(Xc ~ marks) EEf <- envelope(fut, linearK, fix.n=TRUE) EEm <- envelope(fut, linearKcross, fix.n=TRUE, fix.marks=TRUE) }) local({ #' Test robustness of envelope() sorting procedure when NA's are present #' Fails with spatstat.utils 1.12-0 set.seed(42) EP <- envelope(longleaf, pcf, nsim=10, nrank=2) #' Test case when the maximum permitted number of failures is exceeded X <- amacrine[1:153] # contains exactly one point with mark='off' #' High probability of generating a pattern with no marks = 'off' E <- envelope(X, Kcross, nsim=39, maxnerr=2, maxerr.action="warn") A <- alltypes(X, Kcross, envelope=TRUE, nsim=39, maxnerr=2) }) local({ #' Internals: envelope.matrix Y <- matrix(rnorm(200), 10, 20) rr <- 1:10 oo <- rnorm(10) zz <- numeric(10) E <- envelope(Y, rvals=rr, observed=oo, nsim=10) E <- envelope(Y, rvals=rr, observed=oo, jsim=1:10) E <- envelope(Y, rvals=rr, observed=oo, theory=zz, type="global", use.theory=TRUE) E <- envelope(Y, rvals=rr, observed=oo, theory=zz, type="global", use.theory=TRUE, nsim=10) E <- envelope(Y, rvals=rr, observed=oo, theory=zz, type="global", use.theory=FALSE, nsim=10) E <- envelope(Y, rvals=rr, observed=oo, type="global", nsim=10, nsim2=10) E <- envelope(Y, rvals=rr, observed=oo, type="global", jsim=1:10, jsim.mean=11:20) print(E) E <- envelope(Y, rvals=rr, observed=oo, type="global", nsim=10, jsim.mean=11:20) E <- envelope(Y, rvals=rr, observed=oo, type="global", jsim=1:10, nsim2=10) }) local({ #' quirk with handmade summary functions ('conserve' attribute) Kdif <- function(X, r=NULL) { # note no ellipsis Y <- split(X) K1 <- Kest(Y[[1]], r=r) K2 <- Kest(Y[[2]], r=r) D <- eval.fv(K1-K2) return(D) } envelope(amacrine, Kdif, nsim=3) }) #' tests/enveltest.R #' Envelope tests (dclf.test, mad.test) #' and two-stage tests (bits.test, dg.test, bits.envelope, dg.envelope) #' #' $Revision: 1.2 $ $Date: 2020/01/10 06:41:04 $ #' require(spatstat) local({ #' handling of NA function values (due to empty point patterns) set.seed(1234) X <- rThomas(5, 0.05, 10) fit <- kppm(X ~ 1, "Thomas") set.seed(100000) dclf.test(fit) set.seed(909) dg.test(fit, nsim=9) #' other code blocks dclf.test(fit, rinterval=c(0, 3), nsim=9) envelopeTest(X, exponent=3, clamp=TRUE, nsim=9) }) # # tests/factorbugs.R # # check for various bugs related to factor conversions # # $Revision: 1.4 $ $Date: 2019/02/10 07:21:02 $ # require(spatstat) local({ ## make a factor image m <- factor(rep(letters[1:4], 4)) Z <- im(m, xcol=1:4, yrow=1:4) ## make a point pattern set.seed(42) X <- runifpoint(20, win=as.owin(Z)) ## look up the image at the points of X ## (a) internal ans1 <- lookup.im(Z, X$x, X$y) stopifnot(is.factor(ans1)) ## (b) user level ans2 <- Z[X] stopifnot(is.factor(ans2)) ## (c) turn the image into a tessellation ## and apply quadratcount V <- tess(image = Z) quadratcount(X, tess=V) ## (d) pad image Y <- padimage(Z, factor("b", levels=levels(Z))) stopifnot(Y$type == "factor") U <- padimage(Z, "b") stopifnot(U$type == "factor") ## (e) manipulate levels Zb <- relevel(Z, "b") Zv <- mergeLevels(Z, vowel="a", consonant=c("b","c","d")) P <- X %mark% Z[X] Pv <- mergeLevels(P, vowel="a", consonant=c("b","c","d")) }) # # tests/fastgeyer.R # # checks validity of fast C implementation of Geyer interaction # # $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $ # require(spatstat) local({ X <- redwood Q <- quadscheme(X) U <- union.quad(Q) EP <- equalpairs.quad(Q) G <- Geyer(0.11, 2) # The value r=0.11 is chosen to avoid hardware numerical effects (gcc bug 323). # It avoids being close any value of pairdist(redwood). # The nearest such values are 0.1077.. and 0.1131.. # By contrast if r = 0.1 there are values differing from 0.1 by 3e-17 a <- pairsat.family$eval(X,U,EP,G$pot,G$par,"border") b <- G$fasteval(X,U,EP,G$pot,G$par,"border") if(!all(a==b)) stop("Results of Geyer()$fasteval and pairsat.family$eval do not match") # ... # and again for a non-integer value of 'sat' # (spotted by Thordis Linda Thorarinsdottir) G <- Geyer(0.11, 2.5) a <- pairsat.family$eval(X,U,EP,G$pot,G$par,"border") b <- G$fasteval(X,U,EP,G$pot,G$par,"border") if(!all(a==b)) stop("Results of Geyer()$fasteval and pairsat.family$eval do not match when sat is not an integer") # and again for sat < 1 # (spotted by Rolf) G <- Geyer(0.11, 0.5) a <- pairsat.family$eval(X,U,EP,G$pot,G$par,"border") b <- G$fasteval(X,U,EP,G$pot,G$par,"border") if(!all(a==b)) stop("Results of Geyer()$fasteval and pairsat.family$eval do not match when sat < 1") }) # # tests/fastK.R # # check fast and slow code for Kest # and options not tested elsewhere # # $Revision: 1.4 $ $Date: 2018/04/13 04:01:25 $ # require(spatstat) local({ ## fast code Kb <- Kest(cells, nlarge=0) Ku <- Kest(cells, correction="none") Kbu <- Kest(cells, correction=c("none", "border")) ## slow code, full set of corrections, sqrt transformation, ratios Ldd <- Lest(unmark(demopat), correction="all", var.approx=TRUE, ratio=TRUE) ## Lotwick-Silverman var approx (rectangular window) Loo <- Lest(cells, correction="all", var.approx=TRUE, ratio=TRUE) ## Code for large dataset nbig <- .Machine$integer.max if(!is.null(nbig)) { nn <- ceiling(sqrt(nbig)) if(nn < 1e6) Kbig <- Kest(runifpoint(nn), correction=c("border", "bord.modif", "none"), ratio=TRUE) } ## Kinhom lam <- density(cells, at="points", leaveoneout=TRUE) ## fast code Kib <- Kinhom(cells, lam, nlarge=0) Kiu <- Kest(cells, lam, correction="none") Kibu <- Kest(cells, lam, correction=c("none", "border")) ## slow code Lidd <- Linhom(unmark(demopat), sigma=bw.scott) }) #' tests/formuli.R #' #' Test machinery for manipulating formulae #' #' $Revision: 1.6 $ $Date: 2020/01/08 01:38:24 $ require(spatstat) local({ ff <- function(A, deletevar, B) { D <- reduceformula(A, deletevar) if(!spatstat.utils::identical.formulae(D, B)) { AD <- as.expression(substitute(reduceformula(A,d), list(A=A, d=deletevar))) stop(paste(AD, "\n\tyields ", spatstat.utils::pasteFormula(D), " instead of ", spatstat.utils::pasteFormula(B)), call.=FALSE) } invisible(NULL) } ff(~ x + z, "x", ~z) ff(y ~ x + z, "x", y~z) ff(~ I(x^2) + z, "x", ~z) ff(y ~ poly(x,2) + poly(z,3), "x", y ~poly(z,3)) ff(y ~ x + z, "g", y ~ x + z) reduceformula(y ~ x+z, "g", verbose=TRUE) reduceformula(y ~ sin(x-z), "z", verbose=TRUE) illegal.iformula(~str*g, itags="str", dfvarnames=c("marks", "g", "x", "y")) }) # # tests/func.R # # $Revision: 1.4 $ $Date: 2019/01/14 07:05:27 $ # # Tests of 'funxy' infrastructure etc require(spatstat) local({ ## Check the peculiar function-building code in funxy W <- square(1) f1a <- function(x, y) sqrt(x^2 + y^2) f1b <- function(x, y) { sqrt(x^2 + y^2) } f2a <- function(x, y) sin(x) f2b <- function(x, y) { sin(x) } f3a <- function(x, y) sin(x) + cos(x) f3b <- function(x, y) { sin(x) + cos(x) } f4a <- function(x, y) { z <- x + y ; z } f4b <- function(x, y) { x + y } F1a <- funxy(f1a, W) F1b <- funxy(f1b, W) F2a <- funxy(f2a, W) F2b <- funxy(f2b, W) F3a <- funxy(f3a, W) F3b <- funxy(f3b, W) F4a <- funxy(f4a, W) F4b <- funxy(f4b, W) stopifnot(identical(F1a(cells), F1b(cells))) stopifnot(identical(F2a(cells), F2b(cells))) stopifnot(identical(F3a(cells), F3b(cells))) stopifnot(identical(F4a(cells), F4b(cells))) ## check coordinate extraction from objects X <- runifpoint(9) Y <- runiflpp(5, simplenet) Q <- quadscheme(X) a <- F1a(X) b <- F1a(Y) d <- F1a(Q) }) ## ## tests/funnymarks.R ## ## tests involving strange mark values ## $Revision: 1.6 $ $Date: 2020/01/05 02:32:34 $ require(spatstat) local({ ## ppm() where mark levels contain illegal characters hyphenated <- c("a", "not-a") spaced <- c("U", "non U") suffixed <- c("a+", "a*") charred <- c("+", "*") irad <- matrix(0.1, 2,2) hrad <- matrix(0.005, 2, 2) tryit <- function(types, X, irad, hrad) { levels(marks(X)) <- types fit <- ppm(X ~marks + polynom(x,y,2), MultiStraussHard(types=types,iradii=irad,hradii=hrad)) print(fit) print(coef(fit)) val <- fitted(fit) pred <- predict(fit) return(invisible(NULL)) } tryit(hyphenated, amacrine, irad, hrad) tryit(spaced, amacrine, irad, hrad) tryit(suffixed, amacrine, irad, hrad) tryit(charred, amacrine, irad, hrad) ## marks which are dates X <- cells n <- npoints(X) endoftime <- rep(ISOdate(2001,1,1), n) eotDate <- rep(as.Date("2001-01-01"), n) markformat(endoftime) markformat(eotDate) marks(X) <- endoftime print(X) Y <- X %mark% data.frame(id=1:42, date=endoftime, dd=eotDate) print(Y) md <- markformat(endoftime) ## mark formats Z <- Y marks(Z) <- marks(Z)[1,,drop=FALSE] ms <- markformat(solist(cells, redwood)) marks(Z) <- factor(1:npoints(Z)) marks(Z)[12] <- NA mz <- is.multitype(Z) cZ <- coerce.marks.numeric(Z) marks(Z) <- data.frame(n=1:npoints(Z), a=factor(sample(letters, npoints(Z), replace=TRUE))) cZ <- coerce.marks.numeric(Z) stopifnot(is.multitype(cells %mark% data.frame(a=factor(1:npoints(cells))))) a <- numeric.columns(finpines) b1 <- numeric.columns(amacrine) b2 <- coerce.marks.numeric(amacrine) d <- numeric.columns(cells) f <- numeric.columns(longleaf) ff <- data.frame(a=factor(letters[1:10]), y=factor(sample(letters, 10))) numeric.columns(ff) ## mark operations df <- data.frame(x=1:2, y=sample(letters, 2)) h <- hyperframe(z=1:2, p=solist(cells, cells)) a <- NULL %mrep% 3 a <- 1:4 %mrep% 3 a <- df %mrep% 3 a <- h %mrep% 3 b <- markcbind(df, h) b <- markcbind(h, df) }) ## ## tests/fvproblems.R ## ## problems with fv, ratfv and fasp code ## ## $Revision: 1.14 $ $Date: 2020/01/10 03:11:49 $ require(spatstat) #' This appears in the workshop notes #' Problem detected by Martin Bratschi local({ Jdif <- function(X, ..., i) { Jidot <- Jdot(X, ..., i=i) J <- Jest(X, ...) dif <- eval.fv(Jidot - J) return(dif) } Z <- Jdif(amacrine, i="on") }) #' #' Test mathlegend code #' local({ K <- Kest(cells) plot(K) plot(K, . ~ r) plot(K, . - theo ~ r) plot(K, sqrt(./pi) ~ r) plot(K, cbind(iso, theo) ~ r) plot(K, cbind(iso, theo) - theo ~ r) plot(K, sqrt(cbind(iso, theo)/pi) ~ r) plot(K, cbind(iso/2, -theo) ~ r) plot(K, cbind(iso/2, trans/2) - theo ~ r) # test expansion of .x and .y plot(K, . ~ .x) plot(K, . - theo ~ .x) plot(K, .y - theo ~ .x) plot(K, sqrt(.y) - sqrt(theo) ~ .x) # problems with parsing weird strings in levels(marks(X)) # noted by Ulf Mehlig levels(marks(amacrine)) <- c("Nasticreechia krorluppia", "Homo habilis") plot(Kcross(amacrine)) plot(alltypes(amacrine, "K")) plot(alltypes(amacrine, "J")) plot(alltypes(amacrine, pcfcross)) }) #' #' Test quirks related to 'alim' attribute local({ K <- Kest(cells) attr(K, "alim") <- NULL plot(K) attr(K, "alim") <- c(0, 0.1) plot(tail(K)) }) #' #' Check that default 'r' vector passes the test for fine spacing local({ a <- Fest(cells) A <- Fest(cells, r=a$r) b <- Hest(heather$coarse) B <- Hest(heather$coarse, r=b$r) # from Cenk Icos X <- runifpoint(100, owin(c(0,3), c(0,10))) FX <- Fest(X) FXr <- Fest(X, r=FX$r) JX <- Jest(X) }) ##' various functionality in fv.R local({ M <- cbind(1:20, matrix(runif(100), 20, 5)) A <- as.fv(M) fvlabels(A) <- c("r","%s(r)", "%s[A](r)", "%s[B](r)", "%s[C](r)", "%s[D](r)") A <- rename.fv(A, "M", quote(M(r))) A <- tweak.fv.entry(A, "V1", new.tag="r") A[,3] <- NULL A$hogwash <- runif(nrow(A)) fvnames(A, ".") <- NULL #' bind.fv with qualitatively different functions GK <- harmonise(G=Gest(cells), K=Kest(cells)) G <- GK$G K <- GK$K ss <- c(rep(TRUE, nrow(K)-10), rep(FALSE, 10)) U <- bind.fv(G, K[ss, ], clip=TRUE) #' H <- rebadge.as.crossfun(K, "H", "inhom", 1, 2) H <- rebadge.as.dotfun(K, "H", "inhom", 3) #' text layout op <- options(width=27) print(K) options(width=18) print(K) options(op) #' collapse.fv Kb <- Kest(cells, correction="border") Ki <- Kest(cells, correction="isotropic") collapse.fv(Kb, Ki, same="theo") collapse.fv(anylist(B=Kb, I=Ki), same="theo") collapse.fv(anylist(B=Kb), I=Ki, same="theo") Xlist <- replicate(3, runifpoint(30), simplify=FALSE) Klist <- anylapply(Xlist, Kest) collapse.fv(Klist, same="theo", different=c("iso", "border")) names(Klist) <- LETTERS[24:26] collapse.fv(Klist, same="theo", different=c("iso", "border")) }) local({ ## rat K <- Kest(cells, ratio=TRUE) G <- Gest(cells, ratio=TRUE) print(K) compatible(K, K) compatible(K, G) H <- rat(K, attr(K, "numerator"), attr(K, "denominator"), check=TRUE) }) local({ ## bug in Jmulti.R colliding with breakpts.R B <- owin(c(0,3), c(0,10)) Y <- superimpose(A=runifpoint(1212, B), B=runifpoint(496, B)) JDX <- Jdot(Y) JCX <- Jcross(Y) Jdif <- function(X, ..., i) { Jidot <- Jdot(X, ..., i=i) J <- Jest(X, ...) dif <- eval.fv(Jidot - J) return(dif) } E <- envelope(Y, Jdif, nsim=19, i="A", simulate=expression(rlabel(Y))) }) local({ #' fasp axes, title, dimnames a <- alltypes(amacrine) a$title <- NULL plot(a, samex=TRUE, samey=TRUE) dimnames(a) <- lapply(dimnames(a), toupper) b <- as.fv(a) }) local({ ## plot.anylist (fv) b <- anylist(A=Kcross(amacrine), B=Kest(amacrine)) plot(b, equal.scales=TRUE, main=expression(sqrt(pi))) plot(b, arrange=FALSE) }) spatstat/tests/testsS.R0000644000176200001440000005777513616730337014725 0ustar liggesusers#' tests/sdr.R #' #' $Revision: 1.1 $ $Date: 2018/05/13 03:14:49 $ require(spatstat) local({ AN <- sdr(bei, bei.extra, method="NNIR") AV <- sdr(bei, bei.extra, method="SAVE") AI <- sdr(bei, bei.extra, method="SIR") AT <- sdr(bei, bei.extra, method="TSE") subspaceDistance(AN$B, AV$B) dimhat(AN$M) }) ## ## tests/segments.R ## Tests of psp class and related code ## [SEE ALSO: tests/xysegment.R] ## ## $Revision: 1.24 $ $Date: 2020/01/27 09:29:59 $ require(spatstat) local({ # pointed out by Jeff Laake W <- owin() X <- psp(x0=.25,x1=.25,y0=0,y1=1,window=W) X[W] # migrated from 'lpp' X <- psp(runif(10),runif(10),runif(10),runif(10), window=owin()) Z <- as.mask.psp(X) Z <- pixellate(X) # add short segment Shorty <- psp(0.5, 0.6, 0.5001, 0.6001, window=Window(X)) XX <- superimpose(X[1:5], Shorty, X[6:10]) ZZ <- as.mask.psp(XX) ZZ <- pixellate(XX) #' misc PX <- periodify(X, 2) # more tests of lppm code fit <- lppm(unmark(chicago) ~ polynom(x,y,2)) Z <- predict(fit) # tests of pixellate.psp -> seg2pixL ns <- 50 out <- numeric(ns) for(i in 1:ns) { X <- psp(runif(1), runif(1), runif(1), runif(1), window=owin()) len <- lengths.psp(X) dlen <- sum(pixellate(X)$v) out[i] <- if(len > 1e-7) dlen/len else 1 } if(diff(range(out)) > 0.01) stop(paste( "pixellate.psp test 1: relative error [", paste(diff(range(out)), collapse=", "), "]")) # Michael Sumner's test examples set.seed(33) n <- 2001 co <- cbind(runif(n), runif(n)) ow <- owin() X <- psp(co[-n,1], co[-n,2], co[-1,1], co[-1,2], window=ow) s1 <- sum(pixellate(X)) s2 <- sum(lengths.psp(X)) if(abs(s1 - s2)/s2 > 0.01) { stop(paste("pixellate.psp test 2:", "sum(pixellate(X)) = ", s1, "!=", s2, "= sum(lengths.psp(X))")) } wts <- 1/(lengths.psp(X) * X$n) s1 <- sum(pixellate(X, weights=wts)) if(abs(s1-1) > 0.01) { stop(paste("pixellate.psp test 3:", "sum(pixellate(X, weights))=", s1, " (should be 1)")) } X <- psp(0, 0, 0.01, 0.001, window=owin()) s1 <- sum(pixellate(X)) s2 <- sum(lengths.psp(X)) if(abs(s1 - s2)/s2 > 0.01) { stop(paste("pixellate.psp test 4:", "sum(pixellate(X)) = ", s1, "!=", s2, "= sum(lengths.psp(X))")) } X <- psp(0, 0, 0.001, 0.001, window=owin()) s1 <- sum(pixellate(X)) s2 <- sum(lengths.psp(X)) if(abs(s1 - s2)/s2 > 0.01) { stop(paste("pixellate.psp test 5:", "sum(pixellate(X)) = ", s1, "!=", s2, "= sum(lengths.psp(X))")) } #' cases of superimpose.psp A <- as.psp(matrix(runif(40), 10, 4), window=owin()) B <- as.psp(matrix(runif(40), 10, 4), window=owin()) superimpose(A, B, W=ripras) superimpose(A, B, W="convex") #' tests of density.psp Y <- as.psp(simplenet) YC <- density(Y, 0.2, method="C", edge=FALSE, dimyx=64) YI <- density(Y, 0.2, method="interpreted", edge=FALSE, dimyx=64) YF <- density(Y, 0.2, method="FFT", edge=FALSE, dimyx=64) xCI <- max(abs(YC/YI - 1)) xFI <- max(abs(YF/YI - 1)) if(xCI > 0.01) stop(paste("density.psp C algorithm relative error =", xCI)) if(xFI > 0.01) stop(paste("density.psp FFT algorithm relative error =", xFI)) B <- square(0.3) density(Y, 0.2, at=B) density(Y, 0.2, at=B, edge=TRUE, method="C") Z <- runifpoint(3, B) density(Y, 0.2, at=Z) density(Y, 0.2, at=Z, edge=TRUE, method="C") #' as.psp.data.frame df <- as.data.frame(matrix(runif(40), ncol=4)) A <- as.psp(df, window=square(1)) colnames(df) <- c("x0","y0","x1","y1") df <- cbind(df, data.frame(marks=1:nrow(df))) B <- as.psp(df, window=square(1)) colnames(df) <- c("xmid", "ymid", "length", "angle", "marks") E <- as.psp(df, window=square(c(-1,2))) G <- E %mark% factor(sample(letters[1:3], nsegments(E), replace=TRUE)) H <- E %mark% runif(nsegments(E)) #' print and summary methods A B E G H summary(B) summary(G) summary(H) M <- B marks(M) <- data.frame(id=marks(B), len=lengths.psp(B)) M summary(M) subset(M, select=len) #' plot method cases spatstat.options(monochrome=TRUE) plot(B) plot(G) plot(M) spatstat.options(monochrome=FALSE) plot(B) plot(G) plot(M) #' misuse of 'col' argument - several cases plot(G, col="grey") # discrete plot(B, col="grey") plot(unmark(B), col="grey") plot(M, col="grey") #' miscellaneous class support cases marks(M) <- marks(M)[1,,drop=FALSE] #' undocumented as.ppp(B) #' segment crossing code X <- psp(runif(30),runif(30),runif(30),runif(30), window=owin()) A <- selfcut.psp(X, eps=1e-11) B <- selfcut.psp(X[1]) #' Y <- psp(runif(30),runif(30),runif(30),runif(30), window=owin()) Z <- edges(letterR)[c(FALSE,TRUE)] spatstat.options(selfcrossing.psp.useCall=FALSE, crossing.psp.useCall=FALSE) A <- selfcrossing.psp(X) B <- selfcrossing.psp(Z) D <- crossing.psp(X,Y,details=TRUE) spatstat.options(selfcrossing.psp.useCall=TRUE, crossing.psp.useCall=TRUE) A <- selfcrossing.psp(X) B <- selfcrossing.psp(Z) D <- crossing.psp(X,Y,details=TRUE) }) local({ #' test rshift.psp and append.psp with marks (Ute Hahn) m <- data.frame(A=1:10, B=letters[1:10]) g <- gl(3, 3, length=10) X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin(), marks=m) Y <- rshift(X, radius = 0.1) Y <- rshift(X, radius = 0.1, group=g) #' mark management b <- data.frame(A=1:10) X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin(), marks=b) stopifnot(is.data.frame(marks(X))) Y <- rshift(X, radius = 0.1) Y <- rshift(X, radius = 0.1, group=g) }) local({ #' geometry m <- data.frame(A=1:10, B=letters[1:10]) X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin(), marks=m) Z <- rotate(X, angle=pi/3, centre=c(0.5, 0.5)) Y <- endpoints.psp(X, which="lower") Y <- endpoints.psp(X, which="upper") Y <- endpoints.psp(X, which="right") U <- flipxy(X) }) local({ ## nnfun.psp P <- psp(runif(10), runif(10), runif(10), runif(10), window=square(1), marks=runif(10)) f <- nnfun(P) f <- nnfun(P, value="mark") d <- domain(f) Z <- as.im(f) }) reset.spatstat.options() # ## tests/sigtraceprogress.R # ## Tests of *.sigtrace and *.progress # ## $Revision: 1.4 $ $Date: 2018/11/02 00:53:45 $ require(spatstat) local({ plot(dclf.sigtrace(redwood, nsim=19, alternative="greater", rmin=0.02, verbose=FALSE)) plot(dclf.progress(redwood, nsim=19, alternative="greater", rmin=0.02, verbose=FALSE)) plot(dg.sigtrace(redwood, nsim=5, alternative="greater", rmin=0.02, verbose=FALSE)) plot(dg.progress(redwood, nsim=5, alternative="greater", rmin=0.02, verbose=FALSE)) ## test 'leave-two-out' algorithm a <- dclf.sigtrace(redwood, Lest, nsim=9, use.theory=FALSE, leaveout=2, verbose=FALSE) aa <- dclf.progress(redwood, Lest, nsim=9, use.theory=FALSE, leaveout=2, verbose=FALSE) b <- dg.sigtrace(redwood, Lest, nsim=5, use.theory=FALSE, leaveout=2) bb <- dg.progress(redwood, Lest, nsim=5, use.theory=FALSE, leaveout=2, verbose=FALSE) ## other code blocks e <- mad.progress(redwood, nsim=5) e <- mad.progress(redwood, nsim=19, alpha=0.05) f <- dclf.progress(redwood, nsim=5, scale=function(x) x^2) f <- dclf.progress(redwood, nsim=5, normalize=TRUE, deflate=TRUE) g <- dg.progress(redwood, nsim=5, scale=function(x) x^2) g <- dg.progress(redwood, nsim=5, normalize=TRUE, deflate=TRUE) }) #' #' tests/simplepan.R #' #' Tests of user interaction in simplepanel #' Handled by spatstatLocator() #' #' $Revision: 1.2 $ $Date: 2018/10/16 00:46:41 $ #' require(spatstat) local({ ## Adapted from example(simplepanel) ## make boxes outerbox <- owin(c(0,4), c(0,1)) buttonboxes <- layout.boxes(outerbox, 4, horizontal=TRUE, aspect=1) ## make environment containing an integer count myenv <- new.env() assign("answer", 0, envir=myenv) ## what to do when finished: return the count. myexit <- function(e) { return(get("answer", envir=e)) } ## button clicks ## decrement the count Cminus <- function(e, xy) { ans <- get("answer", envir=e) assign("answer", ans - 1, envir=e) return(TRUE) } ## display the count (clicking does nothing) Cvalue <- function(...) { TRUE } ## increment the count Cplus <- function(e, xy) { ans <- get("answer", envir=e) assign("answer", ans + 1, envir=e) return(TRUE) } ## 'Clear' button Cclear <- function(e, xy) { assign("answer", 0, envir=e) return(TRUE) } ## quit button Cdone <- function(e, xy) { return(FALSE) } myclicks <- list("-"=Cminus, value=Cvalue, "+"=Cplus, done=Cdone) ## redraw the button that displays the current value of the count Rvalue <- function(button, nam, e) { plot(button, add=TRUE) ans <- get("answer", envir=e) text(centroid.owin(button), labels=ans) return(TRUE) } ## make the panel P <- simplepanel("Counter", B=outerbox, boxes=buttonboxes, clicks=myclicks, redraws = list(NULL, Rvalue, NULL, NULL), exit=myexit, env=myenv) ## queue up a sequence of inputs boxcentres <- do.call(concatxy, unname(lapply(buttonboxes[c(3,3,1,3,2,4)], centroid.owin))) spatstat.utils::queueSpatstatLocator(boxcentres$x, boxcentres$y) ## go run.simplepanel(P) }) # # tests/slrm.R # # $Revision: 1.2 $ $Date: 2020/01/10 04:54:49 $ # # Test slrm fitting and prediction when there are NA's # require(spatstat) local({ X <- copper$SouthPoints W <- owin(poly=list(x=c(0,35,35,1),y=c(1,1,150,150))) Y <- X[W] fit <- slrm(Y ~ x+y) pred <- predict(fit) extractAIC(fit) fitx <- update(fit, . ~ x) simulate(fitx, seed=42) unitname(fitx) unitname(fitx) <- "km" mur <- solapply(murchison,rescale, 1000, "km") mur$dfault <- distfun(mur$faults) fut <- slrm(gold ~ dfault, data=mur, splitby="greenstone") A <- model.images(fut) }) #' tests/sparse3Darrays.R #' Basic tests of code in sparse3Darray.R and sparsecommon.R #' $Revision: 1.21 $ $Date: 2019/12/31 02:38:48 $ require(spatstat) local({ #' forming arrays #' creation by specifying nonzero elements M <- sparse3Darray(i=1:3, j=c(3,1,2), k=4:2, x=runif(3), dims=rep(4, 3)) #' duplicate entries Mn <- sparse3Darray(i=c(1,1,2), j=c(2,2,1), k=c(3,3,2), x=runif(3), dims=rep(3, 3)) #' cumulate entries in duplicate positions Ms <- sparse3Darray(i=c(1,1,2), j=c(2,2,1), k=c(3,3,2), x=runif(3), dims=rep(3, 3), strict=TRUE) #' print method print(M) #' conversion of other data A <- array(c(1,3,0,0,0,0,0,4,0,2,0,5, 0,0,1,0,0,0,1,0,0,0,1,0), dim=c(3,4,2)) A1 <- A[,,1] A2 <- A[,,2] Z <- A[integer(0), , ] #' array to sparse array AA <- as.sparse3Darray(A) # positive extent ZZ <- as.sparse3Darray(Z) # zero extent #' list of matrices to sparse array AA <- as.sparse3Darray(list(A1, A2)) #' matrix to sparse array AA1 <- as.sparse3Darray(A1) #' vector to sparse array A11 <- A[,1,1] AA11 <- as.sparse3Darray(A11) #' NULL with warning as.sparse3Darray(list()) #' dim(AA) <- dim(AA) + 1 I1 <- SparseIndices(A1) I11 <- SparseIndices(A11) if(require(Matrix)) { #' sparse matrices from Matrix package A1 <- as(A1, "sparseMatrix") A2 <- as(A2, "sparseMatrix") A11 <- as(A11, "sparseVector") #' convert a list of sparse matrices to sparse array AA <- as.sparse3Darray(list(A1, A2)) #' sparse matrix to sparse array AA1 <- as.sparse3Darray(A1) #' sparse vector to sparse array AA11 <- as.sparse3Darray(A11) #' internals E1 <- SparseEntries(A1) I1 <- SparseIndices(A1) I11 <- SparseIndices(A11) df <- data.frame(i=c(1,3,5), j=3:1, k=rep(2, 3), x=runif(3)) aa <- EntriesToSparse(df, NULL) bb <- EntriesToSparse(df, 7) cc <- EntriesToSparse(df, c(7, 4)) dd <- EntriesToSparse(df, c(7, 4, 3)) #' duplicated entries dfdup <- df[c(1:3, 2), ] aa <- EntriesToSparse(dfdup, NULL) bb <- EntriesToSparse(dfdup, 7) cc <- EntriesToSparse(dfdup, c(7, 4)) dd <- EntriesToSparse(dfdup, c(7, 4, 3)) } BB <- evalSparse3Dentrywise(AA + AA/2) MM <- bind.sparse3Darray(M, M, along=1) MM <- bind.sparse3Darray(M, M, along=2) RelevantEmpty(42) }) local({ if(require(Matrix)) { M <- sparse3Darray(i=1:4, j=sample(1:4, replace=TRUE), k=c(1,2,1,2), x=1:4, dims=c(5,5,2)) M dimnames(M) <- list(letters[1:5], LETTERS[1:5], c("yes", "no")) M U <- aperm(M, c(1,3,2)) U #' tests of [.sparse3Darray M[ 3:4, , ] M[ 3:4, 2:4, ] M[ 4:3, 4:2, 1:2] M[, 3, ] M[, 3, , drop=FALSE] M[c(FALSE,TRUE,FALSE,FALSE,TRUE), , ] M[, , c(FALSE,FALSE), drop=FALSE] M[1:2, 1, 2:3] # exceeds array bounds # matrix index M[cbind(3:5, 3:5, c(1,2,1))] M[cbind(3:5, 3:5, 2)] M[cbind(3:5, 2, 2)] M[cbind(c(2,2,4), c(3,3,2), 1)] # repeated indices M[cbind(1:4, 1, 2:3)] # exceeds array bounds MA <- as.array(M) UA <- as.array(U) Mfix <- sparse3Darray(i=1:3, j=c(3,1,2), k=4:2, x=runif(3), dims=rep(4, 3)) Mfix[cbind(1,3,4)] # single entry - occupied Mfix[cbind(1,2,4)] # single entry - unoccupied Mfix[cbind(1,c(2,3,2,3),4)] # sparse vector with repeated entries ## tests of "[<-.sparse3Darray" Mflip <- Mzero <- MandM <- Mnew <- Mext <- M Mflip[ , , 2:1] <- M stopifnot(Mflip[3,1,1] == M[3,1,2]) Mzero[1:3,1:3,] <- 0 stopifnot(all(Mzero[1,1,] == 0)) M2a <- M[,,2,drop=FALSE] M2d <- M[,,2,drop=TRUE] MandM[,,1] <- M2a MandM[,,1] <- M2d ## slices of different dimensions M[ , 3, 1] <- 1:5 M[2, , 2] <- 1:5 M[ 1, 3:5, 2] <- 4:6 M[ 2, 5:3, 2] <- 4:6 V3 <- sparseVector(x=1, i=2, length=3) M[ 1, 3:5, 2] <- V3 M[ 2, 5:3, 2] <- V3 M[,,2] <- M2a M[,,2] <- (M2a + 1) V5 <- sparseVector(x=1:2, i=2:3, length=5) M[,2,2] <- V5 M[,,2] <- V5 Mext[1,2,3] <- 4 # exceeds array bounds ## integer matrix index Mnew[cbind(3:5, 3:5, c(1,2,1))] <- 1:3 Mnew[cbind(3:5, 3:5, 2)] <- 1:3 Mnew[cbind(3:5, 2, 2)] <- 1:3 Mnew[cbind(3:5, 3:5, c(1,2,1))] <- V3 Mnew[cbind(3:5, 3:5, 2)] <- V3 Mnew[cbind(3:5, 2, 2)] <- V3 ## tests of arithmetic (Math, Ops, Summary) negM <- -M oneM <- 1 * M oneM <- M * 1 twoM <- M + M range(M) cosM <- cos(M) # non-sparse sinM <- sin(M) # sparse Mpos <- (M > 0) # sparse Mzero <- !Mpos # non-sparse stopifnot(all((M+M) == 2*M)) # non-sparse stopifnot(!any((M+M) != 2*M)) # sparse ztimesM <- (1:5) * M # sparse zplusM <- (1:5) + M # non-sparse ## reconcile dimensions Msub <- M[,,1,drop=FALSE] Mdif <- M - Msub Mduf <- Msub - M ## tensor operator tenseur(c(1,-1), M, 1, 3) tenseur(M, M, 1:2, 1:2) tenseur(M, M, 1:2, 2:1) V <- sparseVector(i=c(1,3,6),x=1:3, length=7) tenseur(V,V) tenseur(V,V,1,1) A <- sparseMatrix(i=integer(0), j=integer(0), x=numeric(0), dims=c(7, 15)) A[1:4, 2:5] <- 3 tenseur(A, A, 1, 1) tenseur(t(A), A, 2, 1) tenseur(V, A, 1, 1) tenseur(t(A), V, 2, 1) tenseur(as.vector(V), A, 1, 1) tenseur(t(A), as.vector(V), 2, 1) v <- 0:3 tensor1x1(v, Mfix) tensor1x1(as(v, "sparseVector"), Mfix) ## test of anyNA method anyNA(M) ## a possible application in spatstat cl10 <- as.data.frame(closepairs(cells, 0.1)) cl12 <- as.data.frame(closepairs(cells, 0.12)) cl10$k <- 1 cl12$k <- 2 cl <- rbind(cl10, cl12) n <- npoints(cells) Z <- with(cl, sparse3Darray(i=i, j=j, k=k, x=1, dims=c(n,n,2))) dimnames(Z) <- list(NULL, NULL, c("r=0.1", "r=0.12")) Z <- aperm(Z, c(3,1,2)) stopifnot(all(sumsymouterSparse(Z) == sumsymouter(as.array(Z)))) # no entries indexed Z[integer(0), integer(0), integer(0)] <- 42 Z[matrix(, 0, 3)] <- 42 ## complex valued arrays Mcplx <- sparse3Darray(i=1:3, j=c(3,1,2), k=4:2, x=runif(3)+runif(3)*1i, dims=rep(4, 3)) print(Mcplx) #' ----------- sparsecommon.R ----------------------- B <- sparseMatrix(i=1:3, j=3:1, x= 10 * (1:3), dims=c(4,4)) #' (and using sparse 3D array M and sparse vector V from above) V2 <- sparseVector(i=c(2,3,6),x=4:6, length=7) # different pattern check.anySparseVector(V2, 10, fatal=FALSE) Bmap <- mapSparseEntries(B, 1, 4:1) Mmap1 <- mapSparseEntries(M, 1, 5:1, across=3) Mmap2 <- mapSparseEntries(M, 3, 2:1, conform=FALSE) Mmap3 <- mapSparseEntries(M, 1, matrix(1:10, 5, 2), across=3) Vmap <- mapSparseEntries(V, 1, V2) Vmap <- mapSparseEntries(V, 1, 8) Vthrice <- expandSparse(V, 3) VthriceT <- expandSparse(V, 3, 1) VF <- as.vector(V) # non-sparse VFmap <- mapSparseEntries(VF, 1, V2) VFmap <- mapSparseEntries(VF, 1, 8) VFthrice <- expandSparse(VF, 3) VFthriceT <- expandSparse(VF, 3, 1) VFthriceX <- expandSparse(VF, 3, 2) VV <- sparseVectorCumul(rep(1:3,2), rep(c(3,1,2), 2), 5) Vsum <- applySparseEntries(V, sum) Bdouble <- applySparseEntries(B, function(x) { 2 * x }) Mminus <- applySparseEntries(M, function(x) -x) # empty sparse matrices/arrays Bempty <- B Bempty[] <- 0 mapSparseEntries(Bempty, 1, 42) Mempty <- M Mempty[] <- 0 Mmap1 <- mapSparseEntries(Mempty, 1, 5:1, across=3) Mmap2 <- mapSparseEntries(Mempty, 3, 2:1, conform=FALSE) Mmap3 <- mapSparseEntries(Mempty, 1, matrix(1:10, 5, 2), across=3) #' -------------- sparselinalg.R ------------------------- U <- aperm(M,c(3,1,2)) # 2 x 5 x 5 w <- matrix(0, 5, 5) w[cbind(1:3,2:4)] <- 0.5 w <- as(w, "sparseMatrix") UU <- sumsymouterSparse(U, w, dbg=TRUE) Uempty <- sparse3Darray(dims=c(2,5,5)) UU <- sumsymouterSparse(Uempty, w, dbg=TRUE) } }) local({ # 1 x 1 x 1 arrays M1 <- sparse3Darray(i=1, j=1, k=1, x=42, dims=rep(1,3)) M0 <- sparse3Darray( dims=rep(1,3)) i1 <- matrix(1, 1, 3) a1 <- M1[i1] a0 <- M0[i1] A <- array(runif(75) * (runif(75) < 0.7), dim=c(3,5,5)) M <- as.sparse3Darray(A) M[rep(1,3), c(1,1,2), rep(2, 3)] }) # # tests/splitpea.R # # Check behaviour of split.ppp etc # # Thanks to Marcelino de la Cruz # # $Revision: 1.13 $ $Date: 2019/12/15 04:46:57 $ # require(spatstat) local({ W <- square(8) X <- ppp(c(2.98, 4.58, 7.27, 1.61, 7.19), c(7.56, 5.29, 5.03, 0.49, 1.65), window=W) Z <- quadrats(W, 4, 4) Yall <- split(X, Z, drop=FALSE) Ydrop <- split(X, Z, drop=TRUE) P <- Yall[[1]] if(!all(inside.owin(P$x, P$y, P$window))) stop("Black hole detected when drop=FALSE") P <- Ydrop[[1]] if(!all(inside.owin(P$x, P$y, P$window))) stop("Black hole detected when drop=TRUE") Ydrop[[1]] <- P[1] split(X, Z, drop=TRUE) <- Ydrop # test NA handling Zbad <- quadrats(square(4), 2, 2) Ybdrop <- split(X, Zbad, drop=TRUE) Yball <- split(X, Zbad, drop=FALSE) # other bugs/ code blocks in split.ppp, split<-.ppp, [<-.splitppp flog <- rep(c(TRUE,FALSE), 21) fimg <- as.im(dirichlet(runifpoint(5, Window(cells)))) A <- split(cells, flog) B <- split(cells, square(0.5)) D <- split(cells, fimg) E <- split(cells, logical(42), drop=TRUE) Cellules <- cells split(Cellules, flog) <- solapply(A, rjitter) split(Cellules, fimg) <- solapply(D, rjitter) D[[2]] <- rjitter(D[[2]]) Funpines <- finpines marks(Funpines)[,"diameter"] <- factor(marks(Funpines)[,"diameter"]) G <- split(Funpines) H <- split(Funpines, "diameter") split(Funpines) <- solapply(G, rjitter) split(Funpines, "diameter") <- solapply(H, rjitter) # From Marcelino set.seed(1) W<- square(10) # the big window puntos<- rpoispp(0.5, win=W) data(letterR) r00 <- letterR r05 <- shift(letterR,c(0,5)) r50 <- shift(letterR,c(5,0)) r55 <- shift(letterR,c(5,5)) tessr4 <- tess(tiles=list(r00, r05,r50,r55)) puntosr4 <- split(puntos, tessr4, drop=TRUE) split(puntos, tessr4, drop=TRUE) <- puntosr4 ## More headaches with mark format A <- runifpoint(10) B <- runifpoint(10) AB <- split(superimpose(A=A, B=B)) #' check that split<- respects ordering where possible X <- amacrine Y <- split(X) split(X) <- Y stopifnot(identical(X, amacrine)) #' split.ppx df <- data.frame(x=runif(4),y=runif(4),t=runif(4), age=rep(c("old", "new"), 2), mineral=factor(rep(c("Au","Cu"), each=2), levels=c("Au", "Cu", "Pb")), size=runif(4)) X <- ppx(data=df, coord.type=c("s","s","t","m", "m","m")) Y <- split(X, "age") Y <- split(X, "mineral", drop=TRUE) Y <- split(X, "mineral") print(Y) print(summary(Y)) Y[c(TRUE,FALSE,TRUE)] Y[1:2] Y[3] <- Y[1] }) #' #' tests/ssf.R #' #' Tests of 'ssf' class #' #' $Revision: 1.2 $ $Date: 2018/10/21 04:05:33 $ #' require(spatstat) local({ Y <- cells[1:5] X <- rsyst(Window(Y), 5) Z <- runifpoint(3, Window(Y)) f1 <- ssf(X, nncross(X,Y,what="dist")) f2 <- ssf(X, nncross(X,Y,what="dist", k=1:2)) image(f1) g1 <- as.function(f1) g1(Z) g2 <- as.function(f2) g2(Z) plot(f1, style="contour") plot(f1, style="imagecontour") contour(f1) apply.ssf(f2, 1, sum) range(f1) min(f1) max(f1) integral(f1, weights=tile.areas(dirichlet(X))) }) # # tests/step.R # # $Revision: 1.4 $ $Date: 2015/12/29 08:54:49 $ # # test for step() operation # require(spatstat) local({ Z <- as.im(function(x,y){ x^3 - y^2 }, nztrees$window) fitP <- ppm(nztrees ~x+y+Z, covariates=list(Z=Z)) step(fitP) fitS <- update(fitP, Strauss(7)) step(fitS) fitM <- ppm(amacrine ~ marks*(x+y), MultiStrauss(types=levels(marks(amacrine)), radii=matrix(0.04, 2, 2))) step(fitM) }) #' #' tests/sumfun.R #' #' Tests of code for summary functions #' including score residual functions etc #' #' $Revision: 1.5 $ $Date: 2020/02/06 05:38:15 $ require(spatstat) local({ W <- owin(c(0,1), c(-1/2, 0)) Gr <- Gest(redwood, correction="all",domain=W) Fr <- Fest(redwood, correction="all",domain=W) Jr <- Jest(redwood, correction="all",domain=W) F0 <- Fest(redwood[FALSE], correction="all") Fh <- Fest(humberside, domain=erosion(Window(humberside), 100)) FIr <- Finhom(redwood, savelambda=TRUE) JIr <- Jinhom(redwood, savelambda=TRUE) Ga <- Gcross(amacrine, correction="all") Ia <- Iest(amacrine, correction="all") lam <- intensity(amacrine) lmin <- 0.9 * min(lam) nJ <- sum(marks(amacrine) == "off") FM <- FmultiInhom(amacrine, marks(amacrine) == "off", lambdaJ=rep(lam["off"], nJ), lambdamin = lmin) GM <- GmultiInhom(amacrine, marks(amacrine) == "on", marks(amacrine) == "off", lambda=lam[marks(amacrine)], lambdamin=lmin, ReferenceMeasureMarkSetI=42) pt <- psst(cells, interaction=Strauss(0.1), fun=nndcumfun) a <- compileCDF(D=nndist(redwood), B=bdist.points(redwood), r=seq(0, 1, length=256)) ## distance argument spacing and breakpoints e <- check.finespacing(c(0,1,2), eps=0.1, action="silent") b <- as.breakpts(pi, 20) b <- as.breakpts(42, max=pi, npos=20) b <- even.breaks.owin(letterR) }) ## ## tests/symbolmaps.R ## ## Quirks associated with symbolmaps, etc. ## ## $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $ local({ require(spatstat) set.seed(100) X <- runifpoint(8) ## symbolmap g1 <- symbolmap(range=c(0,100), size=function(x) x/50) invoke.symbolmap(g1, 50, x=numeric(0), y=numeric(0), add=TRUE) plot(g1, labelmap=100) ## constant/trivial a <- symbolmap(pch=16) print(a) plot(a) symbolmapdomain(a) b <- symbolmap() print(b) ## textureplot V <- as.im(dirichlet(X)) tmap <- textureplot(V) textureplot(V, textures=tmap, legend=TRUE, leg.side="left") textureplot(V, leg.side="bottom") textureplot(V, leg.side="top") ## spacing too large for tiles - upsets various pieces of code textureplot(V, spacing=2) ## plot.texturemap plot(tmap, vertical=TRUE) plot(tmap, vertical=TRUE, xlim=c(0,1)) plot(tmap, vertical=TRUE, ylim=c(0,1)) plot(tmap, vertical=FALSE, xlim=c(0,1)) plot(tmap, vertical=FALSE, ylim=c(0,1)) ## infrastructure plan.legend.layout(owin(), side="top", started=TRUE) }) spatstat/tests/testsP1.R0000644000176200001440000000450213604250402014742 0ustar liggesusers#' spatstat/tests/package.R #' Package information #' $Revision$ $Date$ require(spatstat) local({ a <- bugfixes("book", show=FALSE) bugfixes(package="deldir") }) ## ## tests/percy.R ## ## Tests of Percus-Yevick approximations ## ## $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $ require(spatstat) local({ fit <- ppm(swedishpines ~1, DiggleGatesStibbard(6)) K <- Kmodel(fit) }) #' tests/perspim.R #' #' Check persp.im handling of NA, etc #' #' $Revision: 1.1 $ $Date: 2016/08/27 02:53:35 $ require(spatstat) local({ set.seed(42) Z <- distmap(letterR, invert=TRUE)[letterR, drop=FALSE] X <- runifpoint(100, Frame(Z)) M <- persp(Z, colin=Z, visible=TRUE, phi=50) perspPoints(X, Z=Z, M=M) P <- psp(c(2.360, 3.079, 2.211), c(0.934, 1.881, 2.184), c(2.337, 3.654, 3.274), c(1.829, 0.883, 2.093), window=letterR) perspSegments(P, Z=Z, M=M) persp(Z, colmap=rainbow) persp(Z, colmap=beachcolours, sealevel=mean(Z)) persp(Z, colin=as.im(Z, dimyx=dim(Z)/4)) }) ## ## tests/pixelgripes.R ## Problems related to pixellation of windows ## ## $Revision: 1.4 $ $Date: 2018/10/10 08:04:10 $ require(spatstat) local({ ## From Philipp Hunziker: bug in rNeymanScott (etc) ## Create an irregular window PM <- matrix(c(1,0,0.5,1,0,0), 3, 2, byrow=TRUE) P <- owin(poly=PM) ## Generate Matern points X <- rMatClust(50, 0.05, 5, win=P) ## Some distance function as a covariate distorigin <- function(x, y) { sqrt(x^2 + y^2) } ## No covariates: works fine fit0 <- kppm(X ~ 1, clusters="MatClust") Y0 <- simulate(fit0, retry=0) ## Covariates: Simulation fails fit1 <- kppm(X ~ distorigin, clusters="MatClust") Y1 <- simulate(fit1, retry=0) }) local({ ## pixellate.ppp includes mapping from (x,y) to (row, col) Z <- pixellate(cells, savemap=TRUE) ind <- attr(Z, "map") m <- (as.matrix(Z))[ind] if(!all(m == 1)) stop("Coordinate mismatch in pixellate.ppp") }) ## ## tests/polygons.R ## ## $Revision: 1.4 $ $Date: 2019/11/03 03:18:05 $ ## require(spatstat) local({ co <- as.ppp(corners(letterR), letterR, check=FALSE) co[letterR] b <- letterR$bdry a <- sapply(b, xypolyselfint, yesorno=TRUE) a <- lapply(b, xypolyselfint, proper=TRUE) ## Simple example of self-crossing polygon x <- read.table("selfcross.txt", header=TRUE) y <- xypolyselfint(x) }) spatstat/tests/badwindow.txt0000644000176200001440000005451113115225157016006 0ustar liggesusers x y i 486959 6497047 1 487223 6497012 1 487293 6497170 1 487434 6497187 1 487504 6497047 1 487539 6496959 1 487557 6496889 1 488875 6496924 1 488945 6496643 1 490808 6496643 1 490737 6496854 1 490298 6497644 1 490140 6498541 1 490298 6498857 1 490491 6497855 1 490948 6496854 1 491036 6496555 1 491950 6496537 1 491282 6500298 1 491282 6501546 1 491124 6501792 1 491124 6501985 1 491563 6502319 1 491493 6502740 1 491475 6503355 1 491686 6504375 1 491616 6505324 1 490772 6505675 1 490526 6506237 1 489683 6506237 1 489490 6505605 1 489578 6505359 1 489191 6505078 1 488892 6504023 1 488910 6503795 1 488716 6503812 1 488611 6504568 1 488031 6505201 1 487522 6505042 1 487522 6504919 1 487486 6504849 1 487416 6504884 1 487399 6504814 1 487346 6504832 1 487240 6504638 1 487117 6504515 1 487117 6503935 1 487276 6504006 1 487346 6503971 1 487399 6503865 1 487486 6503812 1 487574 6503777 1 487557 6503689 1 487082 6503303 1 486994 6502266 1 487205 6501159 1 487117 6500526 1 487188 6499437 1 487012 6498259 1 486924 6497029 1 487186 6499396 2 487182 6499396 2 487186 6499426 2 487186 6499396 2 487126 6500589 2 487126 6500476 2 487156 6500476 2 487156 6500176 2 487186 6500176 2 487186 6499462 2 487117 6500526 2 487126 6500589 2 487156 6500686 2 487140 6500686 2 487156 6500805 2 487156 6500686 2 487186 6500986 2 487181 6500986 2 487186 6501021 2 487186 6500986 2 487216 6501076 2 487194 6501076 2 487205 6501159 2 487187 6501256 2 487216 6501256 2 487216 6501076 2 487186 6501406 2 487186 6501260 2 487158 6501406 2 487186 6501406 2 487156 6501466 2 487156 6501417 2 487147 6501466 2 487156 6501466 2 487096 6501766 2 487096 6501732 2 487090 6501766 2 487096 6501766 2 487066 6502936 2 487066 6502636 2 487096 6502636 2 487096 6502606 2 487156 6502606 2 487156 6502486 2 487066 6502486 2 487066 6502456 2 487036 6502456 2 487036 6502156 2 487015 6502156 2 486994 6502266 2 487064 6503086 2 487066 6503086 2 487066 6503112 2 487066 6503116 2 487156 6503116 2 487156 6503026 2 487126 6503026 2 487126 6502996 2 487096 6502996 2 487096 6502936 2 487066 6502936 2 488956 6501496 3 488956 6501256 3 488926 6501256 3 488926 6501046 3 488896 6501046 3 488896 6500806 3 488866 6500806 3 488866 6500506 3 488836 6500506 3 488836 6500236 3 488806 6500236 3 488806 6499996 3 488776 6499996 3 488776 6499486 3 488686 6499486 3 488686 6499126 3 488716 6499126 3 488716 6499006 3 488626 6499006 3 488626 6499036 3 488596 6499036 3 488596 6499066 3 488566 6499066 3 488566 6499126 3 488536 6499126 3 488536 6499216 3 488416 6499216 3 488416 6499456 3 488446 6499456 3 488446 6499696 3 488416 6499696 3 488416 6499936 3 488446 6499936 3 488446 6500056 3 488476 6500056 3 488476 6500146 3 488506 6500146 3 488506 6500266 3 488536 6500266 3 488536 6500386 3 488566 6500386 3 488566 6500656 3 488536 6500656 3 488536 6500986 3 488566 6500986 3 488566 6501136 3 488536 6501136 3 488536 6501376 3 488566 6501376 3 488566 6501406 3 488596 6501406 3 488596 6501496 3 488566 6501496 3 488566 6501616 3 488596 6501616 3 488596 6501796 3 488626 6501796 3 488626 6502036 3 488656 6502036 3 488656 6502096 3 488686 6502096 3 488686 6502246 3 488716 6502246 3 488716 6502276 3 488776 6502276 3 488776 6502336 3 488806 6502336 3 488806 6502426 3 488836 6502426 3 488836 6502636 3 488866 6502636 3 488866 6502666 3 488926 6502666 3 488926 6502696 3 488986 6502696 3 488986 6502726 3 489046 6502726 3 489046 6502756 3 489136 6502756 3 489136 6502096 3 489106 6502096 3 489106 6501976 3 489076 6501976 3 489076 6501826 3 489046 6501826 3 489046 6501736 3 489016 6501736 3 489016 6501586 3 488986 6501586 3 488986 6501496 3 490216 6502426 4 490216 6502246 4 490246 6502246 4 490246 6502186 4 490306 6502186 4 490306 6501946 4 490216 6501946 4 490216 6502096 4 490186 6502096 4 490186 6502156 4 490036 6502156 4 490036 6502126 4 489946 6502126 4 489946 6502096 4 489916 6502096 4 489916 6502066 4 489826 6502066 4 489826 6502036 4 489796 6502036 4 489796 6501946 4 489706 6501946 4 489706 6502036 4 489736 6502036 4 489736 6502186 4 489766 6502186 4 489766 6502216 4 489796 6502216 4 489796 6502276 4 489946 6502276 4 489946 6502306 4 489976 6502306 4 489976 6502336 4 490006 6502336 4 490006 6502366 4 490036 6502366 4 490036 6502426 4 488642 6504346 5 488716 6503812 5 488910 6503795 5 488892 6504023 5 488926 6504143 5 488926 6503806 5 488956 6503806 5 488956 6503686 5 488986 6503686 5 488986 6503566 5 489016 6503566 5 489016 6503476 5 489046 6503476 5 489046 6503386 5 489076 6503386 5 489076 6503296 5 489106 6503296 5 489106 6503206 5 489136 6503206 5 489136 6503086 5 489166 6503086 5 489166 6502846 5 489046 6502846 5 489046 6503086 5 488926 6503086 5 488926 6503236 5 488746 6503236 5 488746 6503266 5 488536 6503266 5 488536 6503296 5 488506 6503296 5 488506 6503326 5 488416 6503326 5 488416 6503386 5 488326 6503386 5 488326 6503506 5 488356 6503506 5 488356 6503536 5 488416 6503536 5 488416 6503566 5 488446 6503566 5 488446 6503656 5 488626 6503656 5 488626 6503746 5 488656 6503746 5 488656 6503776 5 488686 6503776 5 488686 6503956 5 488656 6503956 5 488656 6503986 5 488626 6503986 5 488626 6504046 5 488596 6504046 5 488596 6504076 5 488566 6504076 5 488566 6504106 5 488536 6504106 5 488536 6504166 5 488506 6504166 5 488506 6504226 5 488476 6504226 5 488476 6504346 5 489886 6503386 6 489886 6503146 6 489916 6503146 6 489916 6503056 6 489736 6503056 6 489736 6503206 6 489706 6503206 6 489706 6503266 6 489676 6503266 6 489676 6503356 6 489796 6503356 6 489796 6503596 6 489916 6503596 6 489916 6503386 6 490006 6505666 7 489916 6505666 7 489916 6505756 7 490006 6505756 7 487426 6504856 8 487396 6504796 8 487276 6504676 8 490786 6505366 9 490786 6505336 9 491176 6505336 9 491176 6505276 9 491236 6505276 9 491236 6505126 9 491266 6505126 9 491266 6504976 9 491236 6504976 9 491236 6504916 9 491206 6504916 9 491206 6504886 9 491176 6504886 9 491176 6504856 9 491086 6504856 9 491086 6504886 9 490996 6504886 9 490996 6504916 9 490966 6504916 9 490966 6504946 9 490936 6504946 9 490936 6505006 9 490876 6505006 9 490876 6505186 9 490846 6505186 9 490846 6505246 9 490726 6505246 9 490726 6505276 9 490696 6505276 9 490696 6505366 9 487906 6505066 10 487906 6505036 10 487936 6505036 10 487936 6505006 10 487966 6505006 10 487966 6504616 10 487906 6504616 10 487906 6504586 10 487846 6504586 10 487846 6504556 10 487756 6504556 10 487756 6504526 10 487606 6504526 10 487606 6504646 10 487636 6504646 10 487636 6504766 10 487666 6504766 10 487666 6504886 10 487726 6504886 10 487726 6504976 10 487756 6504976 10 487756 6505006 10 487786 6505006 10 487786 6505036 10 487816 6505036 10 487816 6505066 10 491416 6504856 11 491326 6504856 11 491326 6505006 11 491416 6505006 11 491386 6504736 12 491266 6504736 12 491266 6504826 12 491386 6504826 12 487456 6504586 13 487456 6504436 13 487366 6504436 13 487366 6504466 13 487306 6504466 13 487306 6504556 13 487336 6504556 13 487336 6504586 13 487396 6504586 13 487396 6504676 13 487486 6504676 13 487486 6504586 13 489226 6504646 14 489226 6504616 14 489256 6504616 14 489256 6504556 14 489286 6504556 14 489286 6504466 14 489106 6504466 14 489106 6504586 14 489136 6504586 14 489136 6504646 14 488296 6504406 15 488296 6504316 15 488206 6504316 15 488206 6504376 15 488176 6504376 15 488176 6504466 15 488206 6504466 15 488206 6504496 15 488236 6504496 15 488236 6504526 15 488326 6504526 15 488326 6504406 15 490666 6504466 16 490666 6504376 16 490696 6504376 16 490696 6504316 16 490756 6504316 16 490756 6504256 16 490786 6504256 16 490786 6504166 16 490696 6504166 16 490696 6504226 16 490576 6504226 16 490576 6504286 16 490546 6504286 16 490546 6504466 16 489346 6503986 17 489346 6504076 17 489406 6504076 17 489406 6504166 17 489526 6504166 17 489526 6504256 17 489496 6504256 17 489496 6504346 17 489586 6504346 17 489586 6504256 17 489646 6504256 17 489646 6504196 17 489706 6504196 17 489706 6504016 17 489676 6504016 17 489676 6503896 17 489586 6503896 17 489586 6503956 17 489496 6503956 17 489496 6503986 17 489346 6503986 17 489346 6503986 17 489346 6503836 17 489376 6503836 17 489376 6503746 17 489346 6503746 17 489346 6503566 17 489256 6503566 17 489256 6503506 17 489226 6503506 17 489226 6503416 17 489196 6503416 17 489196 6503386 17 489076 6503386 17 489076 6503446 17 489046 6503446 17 489046 6503566 17 489016 6503566 17 489016 6503626 17 488986 6503626 17 488986 6503836 17 489106 6503836 17 489106 6504106 17 489076 6504106 17 489076 6504226 17 489196 6504226 17 489196 6504196 17 489226 6504196 17 489226 6504076 17 489256 6504076 17 489256 6503986 17 487936 6504166 18 487936 6504136 18 487966 6504136 18 487966 6504016 18 487846 6504016 18 487846 6504046 18 487816 6504046 18 487816 6504136 18 487846 6504136 18 487846 6504166 18 488596 6504046 19 488596 6503986 19 488626 6503986 19 488626 6503896 19 488596 6503896 19 488596 6503716 19 488506 6503716 19 488506 6503806 19 488476 6503806 19 488476 6503986 19 488506 6503986 19 488506 6504046 19 487396 6503896 20 487486 6503836 20 487516 6503806 20 487126 6503956 20 487216 6503986 20 488296 6503806 21 488296 6503776 21 488326 6503776 21 488326 6503746 21 488356 6503746 21 488356 6503626 21 488236 6503626 21 488236 6503656 21 488206 6503656 21 488206 6503686 21 488146 6503686 21 488146 6503776 21 488176 6503776 21 488176 6503806 21 491146 6503686 22 491146 6503626 22 491176 6503626 22 491176 6503536 22 491146 6503536 22 491146 6503476 22 491026 6503476 22 491026 6503656 22 491056 6503656 22 491056 6503686 22 487816 6503506 23 487816 6503476 23 487846 6503476 23 487846 6503386 23 487936 6503386 23 487936 6503356 23 487966 6503356 23 487966 6503296 23 488026 6503296 23 488026 6503236 23 488086 6503236 23 488086 6503116 23 487936 6503116 23 487936 6503146 23 487846 6503146 23 487846 6503176 23 487816 6503176 23 487816 6503206 23 487786 6503206 23 487786 6503386 23 487696 6503386 23 487696 6503356 23 487606 6503356 23 487606 6503506 23 490036 6503506 24 490036 6503386 24 490096 6503386 24 490096 6503266 24 490066 6503266 24 490066 6503176 24 490096 6503176 24 490096 6503026 24 489976 6503026 24 489976 6503086 24 489946 6503086 24 489946 6503146 24 489916 6503146 24 489916 6503386 24 489946 6503386 24 489946 6503506 24 489496 6503356 25 489406 6503356 25 489406 6503446 25 489496 6503446 25 488386 6503356 26 488386 6503326 26 488416 6503326 26 488416 6503236 26 488326 6503236 26 488326 6503266 26 488296 6503266 26 488296 6503356 26 490726 6503206 27 490636 6503206 27 490636 6503326 27 490726 6503326 27 489496 6503056 28 489406 6503056 28 489406 6503176 28 489526 6503176 28 489526 6503086 28 489496 6503086 28 490726 6503086 29 490726 6502996 29 490756 6502996 29 490756 6502876 29 490666 6502876 29 490666 6502936 29 490636 6502936 29 490636 6503086 29 491176 6502996 30 491086 6502996 30 491086 6503086 30 491176 6503086 30 487786 6503056 31 487786 6503026 31 488116 6503026 31 488116 6502996 31 488266 6502996 31 488266 6502936 31 488626 6502936 31 488626 6502906 31 488806 6502906 31 488806 6502876 31 488836 6502876 31 488836 6502786 31 488806 6502786 31 488806 6502636 31 488776 6502636 31 488776 6502606 31 488686 6502606 31 488686 6502576 31 488656 6502576 31 488656 6502546 31 488506 6502546 31 488506 6502516 31 488476 6502516 31 488476 6502486 31 488416 6502486 31 488416 6502456 31 488356 6502456 31 488356 6502396 31 488296 6502396 31 488296 6502306 31 488326 6502306 31 488326 6502216 31 488416 6502216 31 488416 6502246 31 488446 6502246 31 488446 6502276 31 488476 6502276 31 488476 6502306 31 488506 6502306 31 488506 6502336 31 488536 6502336 31 488536 6502366 31 488566 6502366 31 488566 6502426 31 488596 6502426 31 488596 6502456 31 488656 6502456 31 488656 6502486 31 488806 6502486 31 488806 6502396 31 488776 6502396 31 488776 6502366 31 488746 6502366 31 488746 6502306 31 488686 6502306 31 488686 6502246 31 488626 6502246 31 488626 6502186 31 488536 6502186 31 488536 6502156 31 488506 6502156 31 488506 6502126 31 488476 6502126 31 488476 6502006 31 488416 6502006 31 488416 6501976 31 488386 6501976 31 488386 6501946 31 488326 6501946 31 488326 6501886 31 488296 6501886 31 488296 6501856 31 488266 6501856 31 488266 6501706 31 488206 6501706 31 488206 6501676 31 488176 6501676 31 488176 6501646 31 488086 6501646 31 488086 6501616 31 487996 6501616 31 487996 6501586 31 487876 6501586 31 487876 6501556 31 487786 6501556 31 487786 6501646 31 487756 6501646 31 487756 6501766 31 487726 6501766 31 487726 6501856 31 487756 6501856 31 487756 6501946 31 487816 6501946 31 487816 6502066 31 487786 6502066 31 487786 6502096 31 487666 6502096 31 487666 6502186 31 487606 6502186 31 487606 6502246 31 487576 6502246 31 487576 6502276 31 487546 6502276 31 487546 6502306 31 487516 6502306 31 487516 6502426 31 487456 6502426 31 487456 6502636 31 487486 6502636 31 487486 6502696 31 487546 6502696 31 487546 6502786 31 487516 6502786 31 487516 6502906 31 487546 6502906 31 487546 6502966 31 487606 6502966 31 487606 6502996 31 487636 6502996 31 487636 6503026 31 487666 6503026 31 487666 6503056 31 489466 6502816 32 489466 6502786 32 489496 6502786 32 489496 6502756 32 489526 6502756 32 489526 6502726 32 489586 6502726 32 489586 6502696 32 489616 6502696 32 489616 6502486 32 489586 6502486 32 489586 6502366 32 489616 6502366 32 489616 6502156 32 489586 6502156 32 489586 6502096 32 489556 6502096 32 489556 6501976 32 489586 6501976 32 489586 6501796 32 489556 6501796 32 489556 6501766 32 489436 6501766 32 489436 6501646 32 489406 6501646 32 489406 6501616 32 489316 6501616 32 489316 6501526 32 489196 6501526 32 489196 6501586 32 489106 6501586 32 489106 6501856 32 489166 6501856 32 489166 6502096 32 489226 6502096 32 489226 6502246 32 489166 6502246 32 489166 6502426 32 489196 6502426 32 489196 6502486 32 489226 6502486 32 489226 6502576 32 489256 6502576 32 489256 6502606 32 489286 6502606 32 489286 6502726 32 489316 6502726 32 489316 6502786 32 489376 6502786 32 489376 6502816 32 487276 6502336 33 487276 6502306 33 487306 6502306 33 487306 6502216 33 487216 6502216 33 487216 6502096 33 487126 6502096 33 487126 6502246 33 487156 6502246 33 487156 6502306 33 487186 6502306 33 487186 6502336 33 490126 6501856 34 490036 6501856 34 490036 6501976 34 490186 6501976 34 490186 6501886 34 490126 6501886 34 490756 6501406 35 490666 6501406 35 490666 6501496 35 490756 6501496 35 488116 6501346 36 488116 6501316 36 488146 6501316 36 488146 6501076 36 488116 6501076 36 488116 6501016 36 488056 6501016 36 488056 6500866 36 488086 6500866 36 488086 6500836 36 488116 6500836 36 488116 6500746 36 488146 6500746 36 488146 6500716 36 488236 6500716 36 488236 6500776 36 488296 6500776 36 488296 6500926 36 488386 6500926 36 488386 6500776 36 488356 6500776 36 488356 6500656 36 488326 6500656 36 488326 6500566 36 488356 6500566 36 488356 6500476 36 488236 6500476 36 488236 6500506 36 488146 6500506 36 488146 6500416 36 488206 6500416 36 488206 6500326 36 488116 6500326 36 488116 6500296 36 488086 6500296 36 488086 6500206 36 487996 6500206 36 487996 6500116 36 488026 6500116 36 488026 6500026 36 488056 6500026 36 488056 6499846 36 488116 6499846 36 488116 6499786 36 488146 6499786 36 488146 6499696 36 488176 6499696 36 488176 6499606 36 488056 6499606 36 488056 6499636 36 487966 6499636 36 487966 6499606 36 487876 6499606 36 487876 6499636 36 487846 6499636 36 487846 6499726 36 487816 6499726 36 487816 6499786 36 487786 6499786 36 487786 6499936 36 487846 6499936 36 487846 6500026 36 487726 6500026 36 487726 6499996 36 487636 6499996 36 487636 6500086 36 487666 6500086 36 487666 6500356 36 487636 6500356 36 487636 6500446 36 487756 6500446 36 487756 6500566 36 487786 6500566 36 487786 6500656 36 487816 6500656 36 487816 6500746 36 487846 6500746 36 487846 6500896 36 487816 6500896 36 487816 6501076 36 487846 6501076 36 487846 6501166 36 487906 6501166 36 487906 6501286 36 487996 6501286 36 487996 6501316 36 488026 6501316 36 488026 6501346 36 489226 6501046 37 489136 6501046 37 489136 6501196 37 489226 6501196 37 490666 6500896 38 490576 6500896 38 490576 6501106 38 490636 6501106 38 490636 6501196 38 490726 6501196 38 490726 6501046 38 490696 6501046 38 490696 6501016 38 490666 6501016 38 489646 6500926 39 489646 6500836 39 489676 6500836 39 489676 6500716 39 489556 6500716 39 489556 6500926 39 488986 6500836 40 488986 6500776 40 489046 6500776 40 489046 6500626 40 489106 6500626 40 489106 6500446 40 489016 6500446 40 489016 6500416 40 488986 6500416 40 488986 6500356 40 488896 6500356 40 488896 6500836 40 488356 6500296 41 488356 6500176 41 488386 6500176 41 488386 6500026 41 488266 6500026 41 488266 6500056 41 488206 6500056 41 488206 6500116 41 488176 6500116 41 488176 6500236 41 488206 6500236 41 488206 6500296 41 489226 6500146 42 489136 6500146 42 489136 6500236 42 489226 6500236 42 489226 6499756 43 489046 6499756 43 489046 6499846 43 489106 6499846 43 489106 6499876 43 489136 6499876 43 489136 6499936 43 489226 6499936 43 487486 6499666 44 487396 6499666 44 487396 6499756 44 487486 6499756 44 488386 6499666 45 488386 6499636 45 488416 6499636 45 488416 6499546 45 488386 6499546 45 488386 6499486 45 488296 6499486 45 488296 6499576 45 488266 6499576 45 488266 6499666 45 487936 6499546 46 487936 6499186 46 487906 6499186 46 487906 6499156 46 487876 6499156 46 487876 6499126 46 487816 6499126 46 487816 6499066 46 487786 6499066 46 487786 6498886 46 487636 6498886 46 487636 6499066 46 487606 6499066 46 487606 6499186 46 487576 6499186 46 487576 6499306 46 487696 6499306 46 487696 6499396 46 487606 6499396 46 487606 6499486 46 487786 6499486 46 487786 6499516 46 487846 6499516 46 487846 6499546 46 489286 6499396 47 489166 6499396 47 489166 6499486 47 489286 6499486 47 488296 6499036 48 488296 6498886 48 488446 6498886 48 488446 6498796 48 488506 6498796 48 488506 6498706 48 488446 6498706 48 488446 6498676 48 488386 6498676 48 488386 6498646 48 488356 6498646 48 488356 6498616 48 488116 6498616 48 488116 6498586 48 488056 6498586 48 488056 6498556 48 488026 6498556 48 488026 6498526 48 487876 6498526 48 487876 6498646 48 487996 6498646 48 487996 6498676 48 488026 6498676 48 488026 6498706 48 488116 6498706 48 488116 6498976 48 488146 6498976 48 488146 6499006 48 488176 6499006 48 488176 6499096 48 488236 6499096 48 488236 6499306 48 488266 6499306 48 488266 6499396 48 488386 6499396 48 488386 6499306 48 488356 6499306 48 488356 6499096 48 488326 6499096 48 488326 6499036 48 489886 6499276 49 489766 6499276 49 489766 6499396 49 489886 6499396 49 490156 6499066 50 490156 6499006 50 490186 6499006 50 490186 6498766 50 490156 6498766 50 490096 6498556 50 490096 6498526 50 489976 6498526 50 489976 6498706 50 490066 6498706 50 490066 6498826 50 489766 6498826 50 489766 6498916 50 489736 6498916 50 489736 6499006 50 489766 6499006 50 489766 6499066 50 489976 6499066 50 489976 6499036 50 490066 6499036 50 490066 6499066 50 487756 6498466 51 487756 6498256 51 487666 6498256 51 487666 6498226 51 487636 6498226 51 487636 6498196 51 487516 6498196 51 487516 6498226 51 487486 6498226 51 487486 6498376 51 487396 6498376 51 487396 6498406 51 487336 6498406 51 487336 6498526 51 487576 6498526 51 487576 6498556 51 487816 6498556 51 487816 6498466 51 489316 6498106 52 489226 6498106 52 489226 6498226 52 489316 6498226 52 490066 6497836 53 489976 6497836 53 489976 6497956 53 490066 6497956 53 489436 6497536 54 489346 6497536 54 489346 6497926 54 489466 6497926 54 489466 6497596 54 489436 6497596 54 490726 6497926 55 490726 6497656 55 490756 6497656 55 490756 6497596 55 490816 6497596 55 490816 6497506 55 490786 6497506 55 490786 6497476 55 490696 6497476 55 490696 6497536 55 490666 6497536 55 490666 6497656 55 490636 6497656 55 490636 6497746 55 490606 6497746 55 490606 6497776 55 490576 6497776 55 490576 6497926 55 490156 6497746 56 490156 6497716 56 490186 6497716 56 490186 6497656 56 490216 6497656 56 490216 6497566 56 490336 6497566 56 490336 6497476 56 490306 6497476 56 490306 6497326 56 490246 6497326 56 490246 6497296 56 490096 6497296 56 490096 6497356 56 490066 6497356 56 490066 6497596 56 490036 6497596 56 490036 6497716 56 490066 6497716 56 490066 6497746 56 488026 6497536 57 487936 6497536 57 487936 6497626 57 488026 6497626 57 489466 6497206 58 489346 6497206 58 489346 6497446 58 489376 6497446 58 489376 6497506 58 489526 6497506 58 489526 6497296 58 489466 6497296 58 490876 6497266 59 490786 6497266 59 490786 6497356 59 490876 6497356 59 490936 6497236 60 490936 6497206 60 490996 6497206 60 490996 6497176 60 491026 6497176 60 491026 6496996 60 491086 6496996 60 491086 6496936 60 491206 6496936 60 491206 6496696 60 491116 6496696 60 491116 6496726 60 491086 6496726 60 491086 6496846 60 491056 6496846 60 491056 6496906 60 490996 6496906 60 490996 6496966 60 490936 6496966 60 490936 6497026 60 490906 6497026 60 490906 6497116 60 490876 6497116 60 490876 6497146 60 490846 6497146 60 490846 6497236 60 490366 6496906 61 490276 6496906 61 490276 6497026 61 490306 6497026 61 490306 6497176 61 490396 6497176 61 490396 6497206 61 490516 6497206 61 490516 6497116 61 490456 6497116 61 490456 6497056 61 490396 6497056 61 490396 6497026 61 490366 6497026 61 487456 6497146 62 487486 6497116 62 487486 6497086 62 487546 6497086 62 487546 6496936 62 487216 6497026 62 487216 6497086 62 487126 6497086 62 487126 6497176 62 489586 6496936 63 489376 6496936 63 489376 6497026 63 489586 6497026 63 spatstat/tests/testsAtoC.R0000644000176200001440000003761213616730337015335 0ustar liggesusers#' tests/aucroc.R #' #' AUC and ROC code #' #' $Revision: 1.3 $ $Date: 2019/12/06 06:32:06 $ require(spatstat) local({ A <- roc(spiders, "x") B <- auc(spiders, "y") fit <- kppm(redwood ~ I(y-x)) a <- roc(fit) b <- auc(fit) fet <- ppm(amacrine~x+y+marks) d <- roc(fet) e <- auc(fet) fut <- lppm(spiders ~ I(y-x)) f <- roc(fut) g <- auc(fut) }) ## badwindowcheck.R ## $Revision: 1.2 $ $Date: 2014/01/27 07:18:41 $ ## require(spatstat) local({ ## Simple example of self-crossing polygon x <- read.table("selfcross.txt", header=TRUE) ## Auto-repair w <- owin(poly=x) ## Real data involving various quirks b <- read.table("badwindow.txt", header=TRUE) b <- split(b, factor(b$i)) b <- lapply(b, function(z) { as.list(z[,-3]) }) ## make owin without checking W <- owin(poly=b, check=FALSE, fix=FALSE) ## Apply stringent checks owinpolycheck(W,verbose=FALSE) ## Auto-repair W2 <- owin(poly=b) }) ## tests/cdf.test.R require(spatstat) local({ ## (1) check cdf.test with strange data ## Marked point patterns with some marks not represented AC <- split(ants, un=FALSE)$Cataglyphis AM <- split(ants, un=FALSE)$Messor DM <- distmap(AM) ## should produce a warning, rather than a crash: cdf.test(AC, DM) ## should be OK: cdf.test(unmark(AC), DM) cdf.test(unmark(AC), DM, "cvm") cdf.test(unmark(AC), DM, "ad") ## other code blocks cdf.test(finpines, "x") ## (2) linear networks set.seed(42) X <- runiflpp(20, simplenet) cdf.test(X, "x") cdf.test(X, "x", "cvm") cdf.test(X %mark% runif(20), "x") fit <- lppm(X ~1) cdf.test(fit, "y") cdf.test(fit, "y", "cvm") cdf.test(fit, "y", "ad") ## marked cdf.test(chicago, "y") cdf.test(subset(chicago, marks != "assault"), "y") ## (3) Monte Carlo test for Gibbs model fit <- ppm(cells ~ 1, Strauss(0.07)) cdf.test(fit, "x", nsim=9) ## cdf.test.slrm fut <- slrm(japanesepines ~ x + y) Z <- distmap(japanesepines) cdf.test(fut, Z) }) #' tests/circular.R #' #' Circular data and periodic distributions #' #' $Revision: 1.3 $ $Date: 2019/12/06 06:15:22 $ require(spatstat) local({ a <- pairorient(redwood, 0.05, 0.15, correction="none") b <- pairorient(redwood, 0.05, 0.15, correction="best") rose(a) rose(b, start="N", clockwise=TRUE) #' arcs on the circle set.seed(19171025) aa <- replicate(7, runif(1, 0, 2*pi) + c(0, runif(1, 0, pi)), simplify=FALSE) bb <- circunion(aa) assertsingle <- function(x, a, id) { y <- circunion(x) if(length(y) != 1 || max(abs(y[[1]] - a)) > .Machine$double.eps) stop(paste("Incorrect result from circunion in case", id), call.=FALSE) invisible(NULL) } assertsingle(list(c(pi/3, pi), c(pi/2, 3*pi/2)), c(pi/3, 3*pi/2), 1) assertsingle(list(c(0, pi/2), c(pi/4, pi)), c(0,pi), 2) assertsingle(list(c(-pi/4, pi/2), c(pi/4, pi)), c((2-1/4)*pi, pi), 3) }) ## tests/closeshave.R ## check 'closepairs/crosspairs' code ## validity and memory allocation ## $Revision: 1.22 $ $Date: 2020/02/06 05:53:13 $ local({ r <- 0.12 close.all <- closepairs(redwood, r) close.ij <- closepairs(redwood, r, what="indices") close.ijd <- closepairs(redwood, r, what="ijd") close.every <- closepairs(redwood, r, what="all", distinct=FALSE) stopifnot(identical(close.ij, close.all[c("i","j")])) stopifnot(identical(close.ijd, close.all[c("i","j","d")])) #' test memory overflow code close.cigar <- closepairs(redwood, r, what="ijd", nsize=2) close.cigar <- closepairs(redwood, r, what="ijd", nsize=2, periodic=TRUE) Y <- split(amacrine) on <- Y$on off <- Y$off cross.all <- crosspairs(on, off, r) cross.ij <- crosspairs(on, off, r, what="indices") cross.ijd <- crosspairs(on, off, r, what="ijd") cross.every <- crosspairs(on, off, r, what="all", distinct=FALSE) stopifnot(identical(cross.ij, cross.all[c("i","j")])) stopifnot(identical(cross.ijd, cross.all[c("i","j","d")])) # closethresh vs closepairs: EXACT agreement thresh <- 0.08 clt <- closethresh(redwood, r, thresh) cl <- with(closepairs(redwood, r), list(i=i, j=j, th = (d <= thresh))) if(!identical(cl, clt)) stop("closepairs and closethresh disagree") reordered <- function(a) { o <- with(a, order(i,j)) as.list(as.data.frame(a)[o,,drop=FALSE]) } samesame <- function(a, b) { identical(reordered(a), reordered(b)) } ## ............................................... #' compare with older, slower code op <- spatstat.options(closepairs.newcode=FALSE, closepairs.altcode=FALSE, crosspairs.newcode=FALSE) ## ............................................... old.close.ij <- closepairs(redwood, r, what="indices") old.cross.ij <- crosspairs(on, off, r, what="indices") stopifnot(samesame(close.ij, old.close.ij)) stopifnot(samesame(cross.ij, old.cross.ij)) # execute only: old.close.every <- closepairs(redwood, r, what="all", distinct=FALSE) old.close.once <- closepairs(redwood, r, what="all", twice=FALSE) #' test memory overflow code old.close.cigar <- closepairs(redwood, r, what="ijd", nsize=2) old.close.cigar <- closepairs(redwood, r, what="ijd", nsize=2, periodic=TRUE) ## ............................................... spatstat.options(op) ## ............................................... ## ............................................... #' alternative code - execution only op <- spatstat.options(closepairs.newcode=FALSE, closepairs.altcode=TRUE) alt.close.ij <- closepairs(redwood, r, what="indices") alt.close.ijd <- closepairs(redwood, r, what="ijd") alt.close.all <- closepairs(redwood, r, what="all") #' test memory overflow code alt.close.cigar <- closepairs(redwood, r, what="ijd", nsize=2) alt.close.cigar <- closepairs(redwood, r, what="ijd", nsize=2, periodic=TRUE) spatstat.options(op) ## ............................................... # Rasmus' example R <- 0.04 U <- as.ppp(gridcenters(owin(), 50, 50), W=owin()) cp <- crosspairs(U, U, R) G <- matrix(0, npoints(U), npoints(U)) G[cbind(cp$i, cp$j)] <- 1 if(!isSymmetric(G)) stop("crosspairs is not symmetric in Rasmus example") #' periodic distance pclose <- function(X, R, method=c("raw", "C")) { method <- match.arg(method) switch(method, raw = { D <- pairdist(X, periodic=TRUE) diag(D) <- Inf result <- which(D <= R, arr.ind=TRUE) }, C = { result <- closepairs(X, R, periodic=TRUE, what="indices") }) result <- as.data.frame(result) colnames(result) <- c("i","j") return(result) } #' pick a threshold value which avoids GCC bug 323 RR <- 0.193 A <- pclose(redwood, RR, "raw") B <- pclose(redwood, RR, "C") if(!samesame(A,B)) stop("closepairs.ppp(periodic=TRUE) gives wrong answer") #' other functions that don't have a help file niets <- crosspairquad(quadscheme(cells), 0.1) #' other code blocks u <- closepairs(cells, 0.09, periodic=TRUE, what="all") v <- closepairs(cells, 0.07, twice=FALSE, neat=TRUE) #' tight cluster - guess count does not work Xc <- runifpoint(100, square(0.01)) Window(Xc) <- square(1) z <- closepairs(Xc, 0.02, what="indices", distinct=FALSE) z <- closepairs(Xc, 0.02, what="ijd", distinct=FALSE) z <- closepairs(Xc, 0.02, what="all", distinct=FALSE) #' same task, older code aop <- spatstat.options(closepairs.newcode=FALSE) z <- closepairs(Xc, 0.02, what="indices", distinct=FALSE) z <- closepairs(Xc, 0.02, what="ijd", distinct=FALSE) z <- closepairs(Xc, 0.02, what="all", distinct=FALSE) spatstat.options(aop) }) local({ #' Three-dimensional X <- runifpoint3(100) cl <- closepairs(X, 0.2, what="indices") cl <- closepairs(X, 0.2, what="ijd") cl <- closepairs(X, 0.2, distinct=FALSE) cl <- closepairs(X, 0.2, distinct=FALSE, what="indices") cl <- closepairs(X, 0.2, distinct=FALSE, what="ijd") cl <- closepairs(X, 0.2, twice=FALSE, neat=TRUE) #' Test memory overflow code cl <- closepairs(X, 0.2, what="ijd", nsize=2) #' trap obsolete usage cl <- closepairs(X, 0.2, ordered=FALSE) #' crosspairs Y <- runifpoint3(100) cr <- crosspairs(X, Y, 0.2, what="indices") cr <- crosspairs(X, Y, 0.2, what="ijd") #' Test memory overflow code cr <- crosspairs(X, Y, 0.2, what="ijd", nsize=2) #' markmarkscatter uses closepairs.pp3 marks(X) <- runif(npoints(X)) markmarkscatter(X, 0.2) markmarkscatter(X[FALSE], 0.2) }) local({ #' weightedclosepairs is currently in strauss.R wi <- weightedclosepairs(redwood, 0.05, "isotropic") wt <- weightedclosepairs(redwood, 0.05, "translate") wp <- weightedclosepairs(redwood, 0.05, "periodic") }) local({ #' experimental r <- 0.08 a <- closepairs(redwood, r) b <- tweak.closepairs(a, r, 26, 0.1, 0.1) X <- runifpoint3(30) rr <- 0.2 cl <- closepairs(X, rr) ii <- cl$i[[1]] xl <- tweak.closepairs(cl, rr, ii, 0.05, -0.05, 0.05) }) reset.spatstat.options() #' #' tests/cluck.R #' #' Tests of "click*" functions #' using queueing feature of spatstatLocator #' #' $Revision: 1.3 $ $Date: 2019/12/21 04:43:36 $ require(spatstat) local({ #' clickppp spatstat.utils::queueSpatstatLocator(runif(5), runif(5)) XA <- clickppp(hook=square(0.5)) spatstat.utils::queueSpatstatLocator(runif(6), runif(6)) XB <- clickppp(n=3, types=c("a", "b")) #' clickbox spatstat.utils::queueSpatstatLocator(runif(2), runif(2)) BB <- clickbox() #' clickdist spatstat.utils::queueSpatstatLocator(runif(2), runif(2)) dd <- clickdist() #' clickpoly hex <- vertices(disc(radius=0.4, centre=c(0.5, 0.5), npoly=6)) spatstat.utils::queueSpatstatLocator(hex) PA <- clickpoly() holy <- vertices(disc(radius=0.2, centre=c(0.5, 0.5), npoly=6)) holy <- lapply(holy, rev) spatstat.utils::queueSpatstatLocator(concatxy(hex, holy)) PB <- clickpoly(np=2, nv=6) #' clicklpp Y <- coords(runiflpp(6, simplenet)) spatstat.utils::queueSpatstatLocator(Y) XL <- clicklpp(simplenet) spatstat.utils::queueSpatstatLocator(Y) XM <- clicklpp(simplenet, n=3, types=c("a", "b")) #' identify.psp E <- edges(letterR)[c(FALSE, TRUE)] Z <- ppp(c(2.86, 3.65, 3.15), c(1.69, 1.98, 2.56), window=Frame(letterR)) spatstat.utils::queueSpatstatLocator(Z) identify(E) #' lineardisc plot(simplenet) spatstat.utils::queueSpatstatLocator(as.ppp(runiflpp(1, simplenet))) V <- lineardisc(simplenet, r=0.3) #' transect.im Z <- density(cells) spatstat.utils::queueSpatstatLocator(runifpoint(2, Window(cells))) TZ <- transect.im(Z, click=TRUE) }) ## tests/colour.R ## ## Colour value manipulation and colour maps ## ## $Revision: 1.7 $ $Date: 2020/01/11 05:01:45 $ ## require(spatstat) local({ f <- function(n) grey(seq(0,1,length=n)) z <- to.grey(f) h <- colourmap(rainbow(9), range=c(0.01, 0.1)) plot(h, labelmap=100) a <- colourmap(rainbow(12), range=as.Date(c("2018-01-01", "2018-12-31"))) print(a) print(summary(a)) a(as.Date("2018-06-15")) g <- colourmap(rainbow(4), breaks=as.Date(c("2018-01-01", "2018-04-01", "2018-07-01", "2018-10-01", "2018-12-31"))) print(g) print(summary(g)) g(as.Date("2018-06-15")) b <- colourmap(rainbow(12), inputs=month.name) print(b) print(summary(b)) to.grey(b) to.grey(b, transparent=TRUE) plot(b, vertical=FALSE) plot(b, vertical=TRUE) plot(b, vertical=FALSE, gap=0) plot(b, vertical=TRUE, gap=0) plot(b, vertical=FALSE, xlim=c(0, 2)) plot(b, vertical=TRUE, xlim=c(0,2)) plot(b, vertical=FALSE, ylim=c(0, 2)) plot(b, vertical=TRUE, ylim=c(0,2)) argh <- list(a="iets", e="niets", col=b, f=42) arr <- col.args.to.grey(argh) rrgh <- col.args.to.grey(argh, transparent=TRUE) #' constant colour map colourmap("grey", range=c(0.01, 0.1)) colourmap("grey", range=as.Date(c("2018-01-01", "2018-12-31"))) colourmap("grey", breaks=as.Date(c("2018-01-01", "2018-04-01", "2018-07-01", "2018-10-01", "2018-12-31"))) colourmap("grey", inputs=month.name) #' empty colour map niets <- lut() print(niets) summary(niets) niets <- colourmap() print(niets) summary(niets) plot(niets) #' interpolation - of transparent colours co <- colourmap(inputs=c(0, 0.5, 1), rgb(red=c(1,0,0), green=c(0,1,0), blue=c(0,0,1), alpha=c(0.3, 0.6, 0.9))) plot(interp.colourmap(co)) }) #' #' contact.R #' #' Check machinery for first contact distributions #' #' $Revision: 1.5 $ $Date: 2019/10/14 08:34:27 $ require(spatstat) local({ #' reduce complexity Y <- as.mask(heather$coarse, dimyx=c(100, 50)) X <- runifpoint(100, win = complement.owin(Y)) G <- Gfox(X, Y) J <- Jfox(X, Y) Y <- as.polygonal(Y) X <- runifpoint(100, win = complement.owin(Y)) G <- Gfox(X, Y) J <- Jfox(X, Y) op <- spatstat.options(exactdt.checks.data=TRUE) U <- exactdt(X) spatstat.options(op) }) reset.spatstat.options() #' #' tests/contrib.R #' #' Tests for user-contributed code in spatstat #' #' $Revision: 1.1 $ $Date: 2018/07/01 04:48:25 $ require(spatstat) local({ #' Jinhom #' Marie-Colette van Lieshout and Ottmar Cronie X <- redwood3 fit <- ppm(X ~ polynom(x,y,2)) lam <- predict(fit) lamX <- fitted(fit, dataonly=TRUE) lmin <- 0.9 * min(lam) g1 <- Ginhom(X, lambda=fit, update=TRUE) g2 <- Ginhom(X, lambda=fit, update=FALSE, lmin = lmin) g3 <- Ginhom(X, lambda=lam, lmin=lmin) g4 <- Ginhom(X, lambda=lamX, lmin=lmin) f1 <- Finhom(X, lambda=fit, update=TRUE) f2 <- Finhom(X, lambda=fit, update=FALSE) f3 <- Finhom(X, lambda=lam, lmin=lmin) }) # tests/correctC.R # check for agreement between C and interpreted code # for interpoint distances etc. # $Revision: 1.6 $ $Date: 2018/10/07 09:58:42 $ require(spatstat) local({ eps <- .Machine$double.eps * 4 checkagree <- function(A, B, blurb) { maxerr <- max(abs(A-B)) cat("Discrepancy", maxerr, "for", blurb, fill=TRUE) if(maxerr > eps) stop(paste("Algorithms for", blurb, "disagree")) return(TRUE) } ## pairdist.ppp set.seed(190901) X <- rpoispp(42) dC <- pairdist(X, method="C") dR <- pairdist(X, method="interpreted") checkagree(dC, dR, "pairdist()") dCp <- pairdist(X, periodic=TRUE, method="C") dRp <- pairdist(X, periodic=TRUE, method="interpreted") checkagree(dCp, dRp, "pairdist(periodic=TRUE)") dCp2 <- pairdist(X, periodic=TRUE, squared=TRUE, method="C") dRp2 <- pairdist(X, periodic=TRUE, squared=TRUE, method="interpreted") checkagree(dCp2, dRp2, "pairdist(periodic=TRUE, squared=TRUE)") ## crossdist.ppp Y <- rpoispp(42) dC <- crossdist(X, Y, method="C") dR <- crossdist(X, Y, method="interpreted") checkagree(dC, dR, "crossdist()") dC <- crossdist(X, Y, periodic=TRUE, method="C") dR <- crossdist(X, Y, periodic=TRUE, method="interpreted") checkagree(dC, dR, "crossdist(periodic=TRUE)") dC2 <- crossdist(X, Y, periodic=TRUE, squared=TRUE, method="C") dR2 <- crossdist(X, Y, periodic=TRUE, squared=TRUE, method="interpreted") checkagree(dC2, dR2, "crossdist(periodic=TRUE, squared=TRUE)") # nndist.ppp nnC <- nndist(X, method="C") nnI <- nndist(X, method="interpreted") checkagree(nnC, nnI, "nndist()") nn3C <- nndist(X, k=3, method="C") nn3I <- nndist(X, k=3, method="interpreted") checkagree(nn3C, nn3I, "nndist(k=3)") # nnwhich.ppp nwC <- nnwhich(X, method="C") nwI <- nnwhich(X, method="interpreted") checkagree(nwC, nwI, "nnwhich()") nw3C <- nnwhich(X, k=3, method="C") nw3I <- nnwhich(X, k=3, method="interpreted") checkagree(nw3C, nw3I, "nnwhich(k=3)") # whist set.seed(98123) x <- runif(1000) w <- sample(1:5, 1000, replace=TRUE) b <- seq(0,1,length=101) op <- spatstat.options(Cwhist=TRUE) aT <- whist(x,b,w) spatstat.options(Cwhist=FALSE) aF <- whist(x,b,w) if(!all(aT == aF)) stop("Algorithms for whist disagree") spatstat.options(op) }) reset.spatstat.options() spatstat/tests/testsT.R0000644000176200001440000002036113616162335014700 0ustar liggesusers#' tests/tessera.R #' Tessellation code, not elsewhere tested #' $Revision: 1.7 $ $Date: 2019/10/17 01:45:56 $ #' require(spatstat) local({ W <- owin() Wsub <- square(0.5) X <- runifpoint(7, W) A <- dirichlet(X) marks(A) <- 1:nobjects(A) Z <- distmap(letterR, invert=TRUE)[letterR, drop=FALSE] H <- tess(xgrid=0:2, ygrid=0:3) #' discretisation of tiles V <- as.im(A) B <- tess(window=as.mask(W), tiles=tiles(A)) #' logical images D <- tess(image=(Z > 0.2)) U <- (Z > -0.2) # TRUE or NA E <- tess(image=U, keepempty=TRUE) G <- tess(image=U, keepempty=FALSE) #' methods flay <- function(op, ..., Rect=H, Poly=A, Img=E) { a <- do.call(op, list(Rect, ...)) b <- do.call(op, list(Poly, ...)) e <- do.call(op, list(Img, ...)) } flay(reflect) flay(flipxy) flay(shift, vec=c(1,2)) flay(scalardilate, f=2) flay(rotate, angle=pi/3, centre=c(0, 0)) flay(rotate, angle=pi/2) flay(affine, mat=matrix(c(1,2,0,1), 2, 2), vec=c(1,2)) flay(affine, mat=diag(c(1,2))) flay(as.data.frame) ## unitname(A) <- "km" unitname(B) <- c("metre", "metres") unitname(B) print(B) Bsub <- B[c(3,5,7)] print(Bsub) tilenames(H) <- letters[seq_along(tilenames(H))] G <- tess(xgrid=(0:3)/3, ygrid=(0:3)/3) tilenames(G) <- letters[1:9] h <- tilenames(G) GG <- as.tess(tiles(G)) #' Pe <- intersect.tess(A, Wsub, keepmarks=TRUE) Pm <- intersect.tess(A, as.mask(Wsub), keepmarks=TRUE) H <- dirichlet(runifpoint(4, W)) AxH <- intersect.tess(A, H, keepmarks=TRUE) # A is marked, H is not HxA <- intersect.tess(H, A, keepmarks=TRUE) # A is marked, H is not b <- bdist.tiles(D) b <- bdist.tiles(A[c(3,5,7)]) #' Eim <- as.im(E, W=letterR) #' #' chop.tess #' horiz/vert lines W <- square(1) H <- infline(h=(2:4)/5) V <- infline(v=(3:4)/5) WH <- chop.tess(W, H) WV <- chop.tess(W, V) #' polygonal tessellation D <- dirichlet(runifpoint(4)) DH <- chop.tess(D, H) DV <- chop.tess(D, V) #' image-based tessellation f <- function(x,y){factor(round(4* (x^2 + y^2)))} A <- tess(image=as.im(f, W=W)) L <- infline(p=(1:3)/3, theta=pi/4) AL <- chop.tess(A, L) AH <- chop.tess(A, H) AV <- chop.tess(A, V) #' #' quantess #' quantess.owin a <- quantess(square(1), "x", 3) a <- quantess(square(1), "y", 3) a <- quantess(square(1), "rad", 5, origin=c(1/2, 1/3)) a <- quantess(square(1), "ang", 7, origin=c(1/2, 1/3)) ZFUN <- function(x,y){y-x} a <- quantess(square(1), ZFUN, 3) b <- quantess(letterR, "y", 3) #' quantess.ppp d <- quantess(cells, "y", 4) g <- quantess(demopat, "x", 5) g <- quantess(demopat, "y", 5) g <- quantess(demopat, "rad", 5, origin=c(4442, 4214)) g <- quantess(demopat, "ang", 5, origin=c(4442, 4214)) g <- quantess(demopat, ZFUN, 7) #' quantess.im D <- distmap(demopat) h <- quantess(D, "y", 4) h <- quantess(D, ZFUN, 5) g <- quantess(D, "rad", 5, origin=c(4442, 4214)) g <- quantess(D, "ang", 5, origin=c(4442, 4214)) #' X <- shift(chorley, vec = c(1e6, 0)) tes <- quantess(X, "x", 4) if(anyDuplicated(tilenames(tes))) stop("quantess produced non-unique tilenames") ## ## da <- dirichletAreas(discretise(runifpoint(15, letterR))) }) # # tests/testaddvar.R # # test addvar options # # $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $ require(spatstat) local({ X <- rpoispp(function(x,y){exp(3+3*x)}) model <- ppm(X ~y) addvar(model, "x", crosscheck=TRUE) addvar(model, "x", bw.input="quad") w <- square(0.5) addvar(model, "x", subregion=w) addvar(model, "x", subregion=w, bw.input="points") Z <- as.im(function(x,y) { x }, Window(X)) addvar(model, Z) }) # # tests/testparres.R # # additional test of parres # # $Revision: 1.6 $ $Date: 2020/02/04 03:23:38 $ # require(spatstat) local({ X <- rpoispp(function(x,y){exp(3+x+2*x^2)}) model <- ppm(X ~x+y) # options in parres (and code blocks in print.parres) parres(model, "x") parres(model, "x", smooth.effect=TRUE) parres(model, "x", bw.input="quad") w <- square(0.5) parres(model, "x", subregion=w) parres(model, "x", subregion=w, bw.input="quad") f <- function(x,y) { x + y } parres(model, f) # check whether 'update.ppm' has messed up internals mod2 <- update(model, ~x) parres(mod2, "x") #' other kinds of covariates mod3 <- ppm(X ~ x + offset(y)) parres(mod3, "offset(y)") Z <- distmap(runifpoint(3)) parres(mod3, Z) mod4 <- ppm(X ~ sin(x), data=solist(B=Z)) parres(mod4, "sin(x)") parres(mod4, "B") #' models with interaction mod5 <- ppm(cells ~ x, AreaInter(0.06)) parres(mod5, "x") dlin <- distfun(copper$SouthLines) copfit <- ppm(copper$SouthPoints ~ dlin, Geyer(1,1)) parres(copfit, "dlin") #' covariate need not be specified if there is only one. parres(mod5) parres(copfit) #' infrastructure ltuae <- evalCovariate(42, cells) LTUAE <- evalCovariate(ltuae, cells) fit <- ppm(amacrine ~ x * marks, nd=16) dmat <- model.depends(fit) check.separable(dmat, "x", c(x=FALSE, marks=FALSE), FALSE) check.separable(dmat, "x", c(FALSE, FALSE), FALSE) check.separable(dmat, "x", c(x=FALSE, marks=TRUE), FALSE) }) #' #' tests/threedee.R #' #' Tests of 3D code #' #' $Revision: 1.7 $ $Date: 2020/01/21 04:16:32 $ #' require(spatstat) local({ A <- runifpoint3(10, nsim=2) ## X <- runifpoint3(30) Y <- runifpoint3(20) d <- pairdist(X, periodic=TRUE, squared=TRUE) d <- crossdist(X, Y, squared=TRUE) d <- crossdist(X, Y, squared=TRUE, periodic=TRUE) Z <- ppsubset(X, 2:4) #' h <- has.close(X, 0.2) h <- has.close(X, 0.2, periodic=TRUE) h <- has.close(X, 0.2, Y=Y) h <- has.close(X, 0.2, Y=Y, periodic=TRUE) #' code blocks not otherwise reached rmax <- 0.6 * max(nndist(X)) g <- G3est(X, rmax=rmax, correction="rs") g <- G3est(X, rmax=rmax, correction="km") g <- G3est(X, rmax=rmax, correction="Hanisch") g <- G3est(X, rmax=rmax, sphere="ideal") g <- G3est(X, rmax=rmax, sphere="digital") v <- sphere.volume() v <- digital.volume() #' older code co <- coords(X) xx <- co$x yy <- co$y zz <- co$z gg1 <- g3engine(xx, yy, zz, correction="Hanisch G3") gg2 <- g3engine(xx, yy, zz, correction="minus sampling") ff1 <- f3engine(xx, yy, zz, correction="no") ff2 <- f3engine(xx, yy, zz, correction="minus sampling") ## X <- runifpoint3(10) print(X) print(X %mark% runif(10)) print(X %mark% factor(letters[c(1:5,5:1)])) print(X %mark% data.frame(a=1:10, b=runif(10))) da <- as.Date(paste0("2020-01-0", c(1:5,5:1))) print(X %mark% da) print(X %mark% data.frame(a=1:10, b=da)) }) #' tests/trigraph.R #' #' Tests for C code in trigraf.c #' #' $Revision: 1.3 $ $Date: 2018/09/23 09:37:29 $ #' require(spatstat) local({ #' called from deldir.R spatstat.deldir.setopt(FALSE, TRUE) A <- delaunay(redwood) spatstat.deldir.setopt(FALSE, FALSE) B <- delaunay(redwood) spatstat.deldir.setopt(TRUE, TRUE) #' called from edges2triangles.R tryangles <- function(iedge, jedge, nt=0) { spatstat.options(fast.trigraph=FALSE) A <- edges2triangles(iedge, jedge) spatstat.options(fast.trigraph=TRUE) B <- edges2triangles(iedge, jedge) if(!all(dim(A) == dim(B)) || !all(A == B)) stop(paste("Discrepancy in edges2triangles (with", nt, "triangles)")) } ii <- simplenet$from jj <- simplenet$to tryangles(ii, jj, 0) tryangles(c(ii, 1), c(jj, 5), 1) tryangles(c(ii, 1, 8), c(jj, 5, 9), 2) }) reset.spatstat.options() # # tests/triplets.R # # test code for triplet interaction and associated summary function Tstat # # $Revision: 1.7 $ $Date: 2020/01/26 04:49:09 $ # require(spatstat) local({ #' valid model fit <- ppm(cells ~1, Triplets(0.1)) fit suffstat(fit) #' invalid model fitR <- ppm(redwood ~1, Triplets(0.1)) fitR suffstat(fitR) #' hard core (zero triangles, coefficient is NA) fit0 <- ppm(cells ~1, Triplets(0.05)) fit0 suffstat(fit0) #' bug case (1 triangle in data) fit1 <- ppm(cells ~1, Triplets(0.15)) fit1 suffstat(fit1) #' Tstat function, all code blocks a <- Tstat(redwood, ratio=TRUE, correction=c("none", "border", "bord.modif", "translate")) #' simulation X <- simulate(fit) mod <- list(cif="triplets",par=list(beta=50,gamma=0.2,r=0.07), w=square(1)) Xm <- rmh(model=mod,start=list(n.start=5), control=list(nrep=1e5)) #' hard core mod$par$gamma <- 0 XmHard <- rmh(model=mod,start=list(n.start=5), control=list(nrep=1e5)) }) spatstat/tests/selfcross.txt0000644000176200001440000000071513115225157016030 0ustar liggesusers x y 0.3057897 0.1518920 0.6038506 0.3132859 0.6343093 0.2740279 0.5364061 0.2936569 0.8170620 0.4681368 0.8083595 0.6535217 0.6125531 0.6796937 0.6103774 0.6360737 0.4363273 0.6338927 0.4689617 0.6927797 0.6538900 0.7560286 0.6169043 0.7756576 0.5994993 0.7276756 0.3514779 0.7363996 0.3123166 0.6622457 0.1447933 0.4877658 0.2274671 0.4332408 0.1578471 0.3721728 0.2753309 0.4027068 0.1817790 0.4136118 0.2100621 0.3067429 spatstat/tests/testsD.R0000644000176200001440000006070113616150152014654 0ustar liggesusers#' #' tests/deltasuffstat.R #' #' Explicit tests of 'deltasuffstat' #' #' $Revision: 1.2 $ $Date: 2018/03/28 09:04:06 $ require(spatstat) local({ disagree <- function(x, y, tol=1e-7) { !is.null(x) && !is.null(y) && max(abs(x-y)) > tol } flydelta <- function(model, modelname="") { ## Check execution of different algorithms for 'deltasuffstat' dSS <- deltasuffstat(model, sparseOK=TRUE) dBS <- deltasuffstat(model, sparseOK=TRUE, use.special=FALSE, force=TRUE) dBF <- deltasuffstat(model, sparseOK=FALSE, use.special=FALSE, force=TRUE) ## Compare results if(disagree(dBS, dSS)) stop(paste(modelname, "model: Brute force algorithm disagrees with special algorithm")) if(disagree(dBF, dBS)) stop(paste(modelname, "model: Sparse and full versions of brute force algorithm disagree")) return(invisible(NULL)) } modelS <- ppm(cells ~ x, Strauss(0.13), nd=10) flydelta(modelS, "Strauss") antsub <- ants[c(FALSE,TRUE,FALSE)] rmat <- matrix(c(130, 90, 90, 60), 2, 2) modelM <- ppm(antsub ~ 1, MultiStrauss(rmat), nd=16) flydelta(modelM, "MultiStrauss") modelA <- ppm(antsub ~ 1, HierStrauss(rmat, archy=c(2,1)), nd=16) flydelta(modelA, "HierStrauss") }) #' #' tests/density.R #' #' Test behaviour of density() methods, #' relrisk(), Smooth() #' and inhomogeneous summary functions #' and idw, adaptive.density, intensity #' #' $Revision: 1.52 $ $Date: 2020/02/04 01:54:56 $ #' require(spatstat) local({ # test all cases of density.ppp and densityfun.ppp tryit <- function(..., do.fun=TRUE) { Z <- density(cells, ..., at="pixels") Z <- density(cells, ..., at="points") if(do.fun) { f <- densityfun(cells, ...) U <- f(0.1, 0.3) } return(invisible(NULL)) } tryit(0.05) tryit(0.05, diggle=TRUE) tryit(0.05, se=TRUE) tryit(0.05, weights=expression(x)) tryit(0.07, kernel="epa") tryit(0.07, kernel="quartic") tryit(0.07, kernel="disc") tryit(0.07, kernel="epa", weights=expression(x)) tryit(sigma=Inf) tryit(sigma=Inf, weights=expression(x)) V <- diag(c(0.05^2, 0.07^2)) tryit(varcov=V) tryit(varcov=V, diggle=TRUE) tryit(varcov=V, weights=expression(x)) tryit(varcov=V, weights=expression(x), diggle=TRUE) Z <- distmap(runifpoint(5, Window(cells))) tryit(0.05, weights=Z) tryit(0.05, weights=Z, diggle=TRUE) trymost <- function(...) tryit(..., do.fun=FALSE) wdf <- data.frame(a=1:42,b=42:1) trymost(0.05, weights=wdf) trymost(0.05, weights=wdf, diggle=TRUE) trymost(sigma=Inf, weights=wdf) trymost(varcov=V, weights=wdf) trymost(varcov=V, weights=expression(cbind(x,y))) ## run C algorithm 'denspt' opa <- spatstat.options(densityC=TRUE, densityTransform=FALSE) tryit(varcov=V) tryit(varcov=V, weights=expression(x)) trymost(varcov=V, weights=wdf) spatstat.options(opa) crossit <- function(..., sigma=NULL) { U <- runifpoint(20, Window(cells)) a <- densitycrossEngine(cells, U, ..., sigma=sigma) a <- densitycrossEngine(cells, U, ..., sigma=sigma, diggle=TRUE) invisible(NULL) } crossit(varcov=V, weights=cells$x) crossit(varcov=V, weights=wdf) crossit(sigma=0.1, weights=wdf) crossit(sigma=0.1, kernel="epa", weights=wdf) crossit(sigma=Inf) # apply different discretisation rules Z <- density(cells, 0.05, fractional=TRUE) Z <- density(cells, 0.05, preserve=TRUE) Z <- density(cells, 0.05, fractional=TRUE, preserve=TRUE) ## compare results with different algorithms crosscheque <- function(expr) { e <- as.expression(substitute(expr)) ename <- sQuote(deparse(substitute(expr))) ## interpreted R opa <- spatstat.options(densityC=FALSE, densityTransform=FALSE) val.interpreted <- eval(e) ## established C algorithm 'denspt' spatstat.options(densityC=TRUE, densityTransform=FALSE) val.C <- eval(e) ## new C algorithm 'Gdenspt' using transformed coordinates spatstat.options(densityC=TRUE, densityTransform=TRUE) val.Transform <- eval(e) spatstat.options(opa) if(max(abs(val.interpreted - val.C)) > 0.001) stop(paste("Numerical discrepancy between R and C algorithms in", ename)) if(max(abs(val.C - val.Transform)) > 0.001) stop(paste("Numerical discrepancy between C algorithms", "using transformed and untransformed coordinates in", ename)) invisible(NULL) } ## execute & compare results of density(at="points") with different algorithms wdfr <- cbind(1:npoints(redwood), 2) crosscheque(density(redwood, at="points", sigma=0.13, edge=FALSE)) crosscheque(density(redwood, at="points", sigma=0.13, edge=FALSE, weights=wdfr[,1])) crosscheque(density(redwood, at="points", sigma=0.13, edge=FALSE, weights=wdfr)) ## correctness of non-Gaussian kernel calculation leavein <- function(ker, maxd=0.025) { ZI <- density(redwood, 0.12, kernel=ker, edge=FALSE, dimyx=256)[redwood] ZP <- density(redwood, 0.12, kernel=ker, edge=FALSE, at="points", leaveoneout=FALSE) discrep <- max(abs(ZP - ZI))/npoints(redwood) if(discrep > maxd) stop(paste("Discrepancy", signif(discrep, 3), "in calculation for", ker, "kernel")) return(invisible(NULL)) } leavein("epanechnikov", 0.015) leavein("quartic", 0.010) leavein("disc", 0.100) ## bandwidth selection code blocks sigvec <- 0.01 * 2:15 sigran <- range(sigvec) bw.ppl(redwood, sigma=sigvec) bw.ppl(redwood, srange=sigran, ns=5) bw.CvL(redwood, sigma=sigvec) bw.CvL(redwood, srange=sigran, ns=5) ## adaptive bandwidth a <- bw.abram(redwood) a <- bw.abram(redwood, pilot=density(redwood, 0.2)) a <- bw.abram(redwood, smoother="densityVoronoi", at="pixels") ## Kinhom lam <- density(redwood) K <- Kinhom(redwood, lam) lamX <- density(redwood, at="points") KX <- Kinhom(redwood, lamX) ## test all code cases of new 'relrisk.ppp' algorithm pants <- function(..., X=ants, sigma=100, se=TRUE) { a <- relrisk(X, sigma=sigma, se=se, ...) return(TRUE) } pants() pants(diggle=TRUE) pants(edge=FALSE) pants(diggle=TRUE, at="points") pants(edge=FALSE, at="points") pants(casecontrol=FALSE) pants(relative=TRUE) pants(casecontrol=FALSE, relative=TRUE) pants(at="points") pants(casecontrol=FALSE,at="points") pants(relative=TRUE,at="points") pants(casecontrol=FALSE, relative=TRUE,at="points") pants(relative=TRUE, control="Cataglyphis", case="Messor") pants(relative=TRUE, control="Cataglyphis", case="Messor", at="points") pants(casecontrol=FALSE, case="Messor", se=FALSE) pants(case=2, at="pixels", relative=TRUE) pants(case=2, at="points", relative=TRUE) pants(case=2, at="pixels", relative=FALSE) pants(case=2, at="points", relative=FALSE) pants(sigma=Inf) pants(sigma=NULL, varcov=diag(c(100,100)^2)) ## more than 2 types pants(X=sporophores) pants(X=sporophores, sigma=20, at="points") pants(X=sporophores, sigma=20, relative=TRUE, at="points") pants(X=sporophores, sigma=20, at="pixels", se=FALSE) pants(X=sporophores, sigma=20, relative=TRUE, at="pixels", se=FALSE) bw.relrisk(sporophores, method="leastsquares") bw.relrisk(sporophores, method="weightedleastsquares") ## likewise 'relrisk.ppm' fit <- ppm(ants ~ x) rants <- function(..., model=fit) { a <- relrisk(model, sigma=100, se=TRUE, ...) return(TRUE) } rants() rants(diggle=TRUE) rants(edge=FALSE) rants(diggle=TRUE, at="points") rants(edge=FALSE, at="points") rants(casecontrol=FALSE) rants(relative=TRUE) rants(casecontrol=FALSE, relative=TRUE) rants(at="points") rants(casecontrol=FALSE,at="points") rants(relative=TRUE,at="points") rants(casecontrol=FALSE, relative=TRUE,at="points") rants(relative=TRUE, control="Cataglyphis", case="Messor") rants(relative=TRUE, control="Cataglyphis", case="Messor", at="points") ## more than 2 types fut <- ppm(sporophores ~ x) rants(model=fut) rants(model=fut, at="points") rants(model=fut, relative=TRUE, at="points") ## execute Smooth.ppp and Smoothfun.ppp in all cases stroke <- function(..., Y = longleaf) { Z <- Smooth(Y, ..., at="pixels") Z <- Smooth(Y, ..., at="points", leaveoneout=TRUE) Z <- Smooth(Y, ..., at="points", leaveoneout=FALSE) f <- Smoothfun(Y, ...) f(120, 80) f(Y[1:2]) f(Y[FALSE]) U <- as.im(f) return(invisible(NULL)) } stroke() stroke(5, diggle=TRUE) stroke(5, geometric=TRUE) stroke(1e-6) # generates warning about small bandwidth stroke(5, weights=runif(npoints(longleaf))) stroke(5, weights=expression(x)) stroke(5, kernel="epa") stroke(varcov=diag(c(25, 36))) stroke(varcov=diag(c(25, 36)), weights=runif(npoints(longleaf))) stroke(5, Y=longleaf %mark% 1) stroke(5, Y=cut(longleaf,breaks=3)) Z <- as.im(function(x,y){abs(x)+1}, Window(longleaf)) stroke(5, weights=Z) stroke(5, weights=Z, geometric=TRUE) stroke(sigma=Inf) markmean(longleaf, 9) strike <- function(..., Y=finpines) { Z <- Smooth(Y, ..., at="pixels") Z <- Smooth(Y, ..., at="points", leaveoneout=TRUE) Z <- Smooth(Y, ..., at="points", leaveoneout=FALSE) f <- Smoothfun(Y, ...) f(4, 1) f(Y[1:2]) f(Y[FALSE]) U <- as.im(f) return(invisible(NULL)) } strike() strike(sigma=1.5, kernel="epa") strike(varcov=diag(c(1.2, 2.1))) strike(sigma=1e-6) strike(sigma=1e-6, kernel="epa") strike(sigma=Inf) strike(1.5, weights=runif(npoints(finpines))) strike(1.5, weights=expression(y)) strike(1.5, geometric=TRUE) strike(1.5, Y=finpines[FALSE]) flatfin <- finpines %mark% data.frame(a=rep(1, npoints(finpines)), b=2) strike(1.5, Y=flatfin) strike(1.5, Y=flatfin, geometric=TRUE) opx <- spatstat.options(densityTransform=FALSE) stroke(5, Y=longleaf[order(longleaf$x)], sorted=TRUE) strike(1.5, Y=finpines[order(finpines$x)], sorted=TRUE) spatstat.options(opx) ## detect special cases Smooth(longleaf[FALSE]) Smooth(longleaf, minnndist(longleaf)) Xconst <- cells %mark% 1 Smooth(Xconst, 0.1) Smooth(Xconst, 0.1, at="points") Smooth(cells %mark% runif(42), sigma=Inf) Smooth(cells %mark% runif(42), sigma=Inf, at="points") Smooth(cells %mark% runif(42), sigma=Inf, at="points", leaveoneout=FALSE) Smooth(cut(longleaf, breaks=4)) ## code not otherwise reached smoothpointsEngine(cells, values=rep(1, npoints(cells)), sigma=0.2) smoothpointsEngine(cells, values=runif(npoints(cells)), sigma=Inf) smoothpointsEngine(cells, values=runif(npoints(cells)), sigma=1e-16) ## validity of Smooth.ppp(at='points') Y <- longleaf %mark% runif(npoints(longleaf), min=41, max=43) Z <- Smooth(Y, 5, at="points", leaveoneout=TRUE) rZ <- range(Z) if(rZ[1] < 40 || rZ[2] > 44) stop("Implausible results from Smooth.ppp(at=points, leaveoneout=TRUE)") Z <- Smooth(Y, 5, at="points", leaveoneout=FALSE) rZ <- range(Z) if(rZ[1] < 40 || rZ[2] > 44) stop("Implausible results from Smooth.ppp(at=points, leaveoneout=FALSE)") ## compare Smooth.ppp results with different algorithms crosscheque(Smooth(longleaf, at="points", sigma=6)) wt <- runif(npoints(longleaf)) vc <- diag(c(25,36)) crosscheque(Smooth(longleaf, at="points", sigma=6, weights=wt)) crosscheque(Smooth(longleaf, at="points", varcov=vc)) crosscheque(Smooth(longleaf, at="points", varcov=vc, weights=wt)) ## drop-dimension coding errors X <- longleaf marks(X) <- cbind(marks(X), 1) Z <- Smooth(X, 5) ZZ <- bw.smoothppp(finpines, hmin=0.01, hmax=0.012, nh=2) # reshaping problem ## geometric-mean smoothing U <- Smooth(longleaf, 5, geometric=TRUE) UU <- Smooth(X, 5, geometric=TRUE) V <- Smooth(longleaf, 5, geometric=TRUE, at="points") VV <- Smooth(X, 5, geometric=TRUE, at="points") }) reset.spatstat.options() local({ #' Kmeasure, second.moment.engine #' Expansion of window Zno <- Kmeasure(redwood, sigma=0.2, expand=FALSE) Zyes <- Kmeasure(redwood, sigma=0.2, expand=TRUE) #' All code blocks sigmadouble <- rep(0.1, 2) diagmat <- diag(sigmadouble^2) generalmat <- matrix(c(1, 0.5, 0.5, 1)/100, 2, 2) Z <- Kmeasure(redwood, sigma=sigmadouble) Z <- Kmeasure(redwood, varcov=diagmat) Z <- Kmeasure(redwood, varcov=generalmat) A <- second.moment.calc(redwood, 0.1, what="all", debug=TRUE) B <- second.moment.calc(redwood, varcov=diagmat, what="all") B <- second.moment.calc(redwood, varcov=diagmat, what="all") D <- second.moment.calc(redwood, varcov=generalmat, what="all") PR <- pixellate(redwood) DRno <- second.moment.calc(PR, 0.2, debug=TRUE, expand=FALSE, npts=npoints(redwood), obswin=Window(redwood)) DRyes <- second.moment.calc(PR, 0.2, debug=TRUE, expand=TRUE, npts=npoints(redwood), obswin=Window(redwood)) DR2 <- second.moment.calc(solist(PR, PR), 0.2, debug=TRUE, expand=TRUE, npts=npoints(redwood), obswin=Window(redwood)) Gmat <- generalmat * 100 isoGauss <- function(x,y) {dnorm(x) * dnorm(y)} ee <- evaluate2Dkernel(isoGauss, runif(10), runif(10), varcov=Gmat, scalekernel=TRUE) isoGaussIm <- as.im(isoGauss, square(c(-3,3))) gg <- evaluate2Dkernel(isoGaussIm, runif(10), runif(10), varcov=Gmat, scalekernel=TRUE) ## experimental code op <- spatstat.options(developer=TRUE) DR <- density(redwood, 0.1) spatstat.options(op) }) local({ #' bandwidth selection op <- spatstat.options(n.bandwidth=8) bw.diggle(cells) bw.diggle(cells, method="interpreted") # undocumented test # bw.relrisk(urkiola, hmax=20) is tested in man/bw.relrisk.Rd bw.relrisk(urkiola, hmax=20, method="leastsquares") bw.relrisk(urkiola, hmax=20, method="weightedleastsquares") ZX <- density(swedishpines, at="points") bw.pcf(swedishpines, lambda=ZX) bw.pcf(swedishpines, lambda=ZX, bias.correct=FALSE, simple=FALSE, cv.method="leastSQ") spatstat.options(op) }) local({ #' code in kernels.R kernames <- c("gaussian", "rectangular", "triangular", "epanechnikov", "biweight", "cosine", "optcosine") X <- rnorm(20) U <- runif(20) for(ker in kernames) { dX <- dkernel(X, ker) fX <- pkernel(X, ker) qU <- qkernel(U, ker) m0 <- kernel.moment(0, 0, ker) m1 <- kernel.moment(1, 0, ker) m2 <- kernel.moment(2, 0, ker) m3 <- kernel.moment(3, 0, ker) } }) local({ ## idw Z <- idw(longleaf, power=4) Z <- idw(longleaf, power=4, se=TRUE) ZX <- idw(longleaf, power=4, at="points") ZX <- idw(longleaf, power=4, at="points", se=TRUE) ## dodgy code blocks in densityVoronoi.R A <- adaptive.density(nztrees, nrep=2, f=0.5, counting=TRUE) B <- adaptive.density(nztrees, nrep=2, f=0.5, counting=TRUE, fixed=TRUE) D <- adaptive.density(nztrees, nrep=2, f=0.5, counting=FALSE) E <- adaptive.density(nztrees, nrep=2, f=0.5, counting=FALSE, fixed=TRUE) #' adaptive kernel estimation d10 <- nndist(nztrees, k=10) d10fun <- distfun(nztrees, k=10) d10im <- as.im(d10fun) uN <- 2 * runif(npoints(nztrees)) AA <- densityAdaptiveKernel(nztrees, bw=d10) BB <- densityAdaptiveKernel(nztrees, bw=d10, weights=uN) DD <- densityAdaptiveKernel(nztrees, bw=d10fun, weights=uN) EE <- densityAdaptiveKernel(nztrees, bw=d10im, weights=uN) }) local({ ## unnormdensity x <- rnorm(20) d0 <- unnormdensity(x, weights=rep(0, 20)) dneg <- unnormdensity(x, weights=c(-runif(19), 0)) }) local({ ## cases of 'intensity' etc a <- intensity(amacrine, weights=expression(x)) a <- intensity(split(amacrine), weights=expression(x)) a <- intensity(split(amacrine), weights=amacrine$x) a <- intensity(ppm(amacrine ~ 1)) }) reset.spatstat.options() #' #' tests/diagnostique.R #' #' Diagnostic tools such as diagnose.ppm, qqplot.ppm #' #' $Revision: 1.5 $ $Date: 2019/12/31 03:32:54 $ #' require(spatstat) local({ fit <- ppm(cells ~ x) diagE <- diagnose.ppm(fit, type="eem") diagI <- diagnose.ppm(fit, type="inverse") diagP <- diagnose.ppm(fit, type="Pearson") plot(diagE, which="all") plot(diagI, which="smooth") plot(diagP, which="x") plot(diagP, which="marks", plot.neg="discrete") plot(diagP, which="marks", plot.neg="contour") plot(diagP, which="smooth", srange=c(-5,5)) plot(diagP, which="smooth", plot.smooth="contour") plot(diagP, which="smooth", plot.smooth="image") fitS <- ppm(cells ~ x, Strauss(0.08)) diagES <- diagnose.ppm(fitS, type="eem", clip=FALSE) diagIS <- diagnose.ppm(fitS, type="inverse", clip=FALSE) diagPS <- diagnose.ppm(fitS, type="Pearson", clip=FALSE) plot(diagES, which="marks", plot.neg="imagecontour") plot(diagPS, which="marks", plot.neg="discrete") plot(diagPS, which="marks", plot.neg="contour") plot(diagPS, which="smooth", plot.smooth="image") plot(diagPS, which="smooth", plot.smooth="contour") plot(diagPS, which="smooth", plot.smooth="persp") #' infinite reach, not border-corrected fut <- ppm(cells ~ x, Softcore(0.5), correction="isotropic") diagnose.ppm(fut) #' diagPX <- diagnose.ppm(fit, type="Pearson", cumulative=FALSE) plot(diagPX, which="y") #' simulation based e <- envelope(cells, nsim=4, savepatterns=TRUE, savefuns=TRUE) Plist <- rpoispp(40, nsim=5) qf <- qqplot.ppm(fit, nsim=4, expr=e, plot.it=FALSE) print(qf) qp <- qqplot.ppm(fit, nsim=5, expr=Plist, fast=FALSE) print(qp) qp <- qqplot.ppm(fit, nsim=5, expr=expression(rpoispp(40)), plot.it=FALSE) print(qp) qg <- qqplot.ppm(fit, nsim=5, style="classical", plot.it=FALSE) print(qg) #' lurking.ppm #' covariate is numeric vector fitx <- ppm(cells ~ x) yvals <- coords(as.ppp(quad.ppm(fitx)))[,"y"] lurking(fitx, yvals) #' covariate is stored but is not used in model Z <- as.im(function(x,y){ x+y }, Window(cells)) fitxx <- ppm(cells ~ x, data=solist(Zed=Z), allcovar=TRUE) lurking(fitxx, expression(Zed)) #' envelope is a ppplist; length < nsim; glmdata=NULL fit <- ppm(cells ~ 1) stuff <- lurking(fit, expression(x), envelope=Plist, plot.sd=FALSE) #' plot.lurk plot(stuff, shade=NULL) }) #' #' tests/discarea.R #' #' $Revision: 1.2 $ $Date: 2019/01/20 08:44:50 $ #' require(spatstat) local({ u <- c(0.5,0.5) B <- owin(poly=list(x=c(0.3, 0.5, 0.7, 0.4), y=c(0.3, 0.3, 0.6, 0.8))) areaGain(u, cells, 0.1, exact=TRUE) areaGain(u, cells, 0.1, W=NULL) areaGain(u, cells, 0.1, W=B) X <- cells[square(0.4)] areaLoss(X, 0.1, exact=TRUE) # -> areaLoss.diri areaLoss(X, 0.1, exact=FALSE) # -> areaLoss.grid areaLoss.poly(X, 0.1) areaLoss(X, 0.1, exact=FALSE, method="distmap") # -> areaLoss.grid areaLoss(X, c(0.1, 0.15), exact=FALSE, method="distmap") # -> areaLoss.grid }) #' #' tests/disconnected.R #' #' disconnected linear networks #' #' $Revision: 1.3 $ $Date: 2018/07/21 03:00:09 $ require(spatstat) local({ #' disconnected network m <- simplenet$m m[4,5] <- m[5,4] <- m[6,10] <- m[10,6] <- m[4,6] <- m[6,4] <- FALSE L <- linnet(vertices(simplenet), m) L summary(L) is.connected(L) Z <- connected(L, what="components") #' point pattern with no points in one connected component set.seed(42) X <- rpoislpp(lambda=function(x,y) { 10 * (x < 0.5)}, L) B <- lineardirichlet(X) plot(B) summary(B) D <- pairdist(X) A <- nndist(X) H <- nnwhich(X) Y <- rpoislpp(lambda=function(x,y) { 10 * (x < 0.5)}, L) G <- nncross(X, Y) J <- crossdist(X, Y) plot(distfun(X)) # includes evaluation of nncross(what="dist") #' K functions in disconnected network K <- linearK(X) lamX <- intensity(X) nX <- npoints(X) KI <- linearKinhom(X, lambda=rep(lamX, nX)) P <- linearpcf(X) PJ <- linearpcfinhom(X, lambda=rep(lamX, nX)) Y <- X %mark% factor(rep(1:2, nX)[1:nX]) Y1 <- split(Y)[[1]] Y2 <- split(Y)[[2]] KY <- linearKcross(Y) PY <- linearpcfcross(Y) KYI <- linearKcross.inhom(Y, lambdaI=rep(intensity(Y1), npoints(Y1)), lambdaJ=rep(intensity(Y2), npoints(Y2))) PYI <- linearpcfcross.inhom(Y, lambdaI=rep(intensity(Y1), npoints(Y1)), lambdaJ=rep(intensity(Y2), npoints(Y2))) #' internal utilities K <- ApplyConnected(X, linearK, rule=function(...) list()) }) #' #' tests/dominic.R #' #' Additional tests for Dominic Schuhmacher's code #' #' $Revision: 1.3 $ $Date: 2019/10/11 04:33:29 $ require(spatstat) local({ X <- runifpoint(10) Y <- runifpoint(10) d <- pppdist(X, Y, type="ace", show.rprimal=TRUE) a <- matchingdist(d, type="ace") b <- matchingdist(d, type="mat") d2 <- pppdist(X, Y, type="spa", ccode=FALSE) d2 <- pppdist(X, Y, type="spa", ccode=TRUE, auction=FALSE) d3 <- pppdist(X, Y, type="mat", ccode=TRUE, auction=FALSE) d4 <- pppdist(X[FALSE], Y[FALSE], matching=TRUE, type="spa") d4 <- pppdist(X[FALSE], Y[FALSE], matching=FALSE, type="spa") d4 <- pppdist(X[FALSE], Y[FALSE], matching=TRUE, type="ace") d4 <- pppdist(X[FALSE], Y[FALSE], matching=FALSE, type="ace") m <- pppdist.mat(X, Y, q=Inf, cutoff=0.001) m2 <- pppdist.mat(X[FALSE], Y[FALSE], q=Inf, cutoff=0.001) m3 <- pppdist.mat(X[FALSE], Y[FALSE], q=2, cutoff=0.001) }) #' #' tests/deepeepee.R #' #' Tests for determinantal point process models #' #' $Revision: 1.6 $ $Date: 2020/01/10 03:10:21 $ require(spatstat) local({ #' simulate.dppm jpines <- residualspaper$Fig1 fit <- dppm(jpines ~ 1, dppGauss) set.seed(10981) simulate(fit, W=square(5)) #' simulate.detpointprocfamily - code blocks model <- dppGauss(lambda=100, alpha=.05, d=2) simulate(model, seed=1999, correction="border") u <- is.stationary(model) #' other methods for dppm kay <- Kmodel(fit) gee <- pcfmodel(fit) lam <- intensity(fit) arr <- reach(fit) pah <- parameters(fit) #' dppeigen code blocks mod <- dppMatern(lambda=2, alpha=0.01, nu=1, d=2) uT <- dppeigen(mod, trunc=1.1, Wscale=c(1,1), stationary=TRUE) uF <- dppeigen(mod, trunc=1.1, Wscale=c(1,1), stationary=FALSE) vT <- dppeigen(mod, trunc=0.98, Wscale=c(1,1), stationary=TRUE) vF <- dppeigen(mod, trunc=0.98, Wscale=c(1,1), stationary=FALSE) }) #' #' tests/duplicity.R #' #' Tests of duplicated/multiplicity code #' #' $Revision: 1.7 $ $Date: 2019/12/06 02:41:32 $ require(spatstat) local({ X <- ppp(c(1,1,0.5,1), c(2,2,1,2), window=square(3), check=FALSE) Y <- X %mark% factor(letters[c(3,2,4,3)]) ZC <- X %mark% letters[c(3,2,4,3)] ZM <- Y %mark% matrix(c(3,2,4,3), 4, 2) ZD <- Y %mark% as.data.frame(marks(ZM)) #' multiplicity m <- multiplicity(X) mf <- multiplicity(Y) mm <- multiplicity(ZM) mz <- multiplicity(ZD) mc <- multiplicity(ZC) ## default method kk <- c(1,2,3,1,1,2) mk <- multiplicity(kk) ml <- multiplicity(list(sin, cos, tan)[kk]) mc <- multiplicity(c("sin", "cos", "tan")[kk]) if(!identical(ml, mk)) stop("multiplicity.default() disagrees with multiplicityNumeric") if(!identical(mc, mk)) stop("multiplicity() disagrees with multiplicity()") ## data frame method df <- data.frame(x=c(1:4, 1,3,2,4, 0,0, 3,4), y=factor(rep(letters[1:4], 3))) md <- multiplicity(df) ## uniquemap.ppp checkum <- function(X, blurb) { a <- uniquemap(X) if(any(a > seq_along(a))) stop(paste("uniquemap", blurb, "does not respect sequential ordering")) return(invisible(NULL)) } checkum(X, "") checkum(Y, "") checkum(ZC, "") checkum(ZM, "") checkum(ZD, "") ## uniquemap.data.frame dfbase <- as.data.frame(replicate(3, sample(1:20, 10), simplify=FALSE)) df <- dfbase[sample(1:10, 30, replace=TRUE), , drop=FALSE] #' faster algorithm for numeric values checkum(df, "") a <- uniquemap(df) #' general algorithm using 'duplicated' and 'match' dfletters <- as.data.frame(matrix(letters[as.matrix(df)], nrow=nrow(df))) checkum(dfletters, "") b <- uniquemap(dfletters) if(!isTRUE(all.equal(a,b))) stop("inconsistency between algorithms in uniquemap.data.frame") ## uniquemap.matrix M0 <- matrix(1:12, 3, 4) ii <- sample(1:3, 5, replace=TRUE) M4 <- M0[ii, , drop=FALSE] checkum(M4, "") u4 <- uniquemap(M4) C4 <- matrix(letters[M4], 5, 4) uc4 <- uniquemap(C4) checkum(C4, "") if(!isTRUE(all.equal(u4, uc4))) stop("Inconsistency between algorithms in uniquemap.matrix") ## uniquemap.default a <- letters[c(1, 1:4, 3:2)] checkum(a, "") checkum(as.list(a), "") u1 <- uniquemap(a) u2 <- uniquemap(as.list(a)) if(!isTRUE(all.equal(u1, u2))) stop("Inconsistency between algorithms in uniquemap.default") }) spatstat/tests/testsM.R0000644000176200001440000002457513611545450014702 0ustar liggesusers## ## tests/marcelino.R ## ## $Revision: 1.3 $ $Date: 2015/12/29 08:54:49 $ ## require(spatstat) local({ Y <- split(urkiola) B <- Y$birch O <- Y$oak B.lam <- predict (ppm(B ~polynom(x,y,2)), type="trend") O.lam <- predict (ppm(O ~polynom(x,y,2)), type="trend") Kinhom(B, lambda=B.lam, correction="iso") Kinhom(B, lambda=B.lam, correction="border") Kcross.inhom(urkiola, i="birch", j="oak", B.lam, O.lam) Kcross.inhom(urkiola, i="birch", j="oak", B.lam, O.lam, correction = "iso") Kcross.inhom(urkiola, i="birch", j="oak", B.lam, O.lam, correction = "border") }) ## ## tests/markcor.R ## ## Tests of mark correlation code (etc) ## ## $Revision: 1.5 $ $Date: 2019/08/01 06:32:20 $ require(spatstat) local({ ## check.testfun checks equality of functions ## and is liable to break if the behaviour of all.equal is changed fe <- function(m1, m2) {m1 == m2} fm <- function(m1, m2) {m1 * m2} fs <- function(m1, m2) {sqrt(m1)} if(check.testfun(fe, X=amacrine)$ftype != "equ") warning("check.testfun fails to recognise mark equality function") if(check.testfun(fm, X=longleaf)$ftype != "mul") warning("check.testfun fails to recognise mark product function") check.testfun(fs, X=longleaf) check.testfun("mul") check.testfun("equ") ## test all is well in Kmark -> Kinhom MA <- Kmark(amacrine,function(m1,m2){m1==m2}) set.seed(42) AR <- rlabel(amacrine) MR <- Kmark(AR,function(m1,m2){m1==m2}) if(isTRUE(all.equal(MA,MR))) stop("Kmark unexpectedly ignores marks") ## cover code blocks in markcorr() X <- runifpoint(100) %mark% runif(100) Y <- X %mark% data.frame(u=runif(100), v=runif(100)) ww <- runif(100) fone <- function(x) { x/2 } ffff <- function(x,y) { fone(x) * fone(y) } aa <- markcorr(Y) bb <- markcorr(Y, ffff, weights=ww, normalise=TRUE) bb <- markcorr(Y, ffff, weights=ww, normalise=FALSE) bb <- markcorr(Y, f1=fone, weights=ww, normalise=TRUE) bb <- markcorr(Y, f1=fone, weights=ww, normalise=FALSE) ## markcrosscorr a <- markcrosscorr(betacells, normalise=FALSE) if(require(sm)) { b <- markcrosscorr(betacells, method="sm") } }) #' tests/mctests.R #' Monte Carlo tests #' (mad.test, dclf.test, envelopeTest, hasenvelope) #' $Revision: 1.2 $ $Date: 2019/06/03 10:39:31 $ require(spatstat) local({ envelopeTest(cells, Lest, exponent=1, nsim=9, savepatterns=TRUE) (a3 <- envelopeTest(cells, Lest, exponent=3, nsim=9, savepatterns=TRUE)) envelopeTest(a3, Lest, exponent=3, nsim=9, alternative="less") fitx <- ppm(redwood~x) ax <- envelopeTest(fitx, exponent=2, nsim=9, savefuns=TRUE) print(ax) envelopeTest(redwood, Lest, exponent=1, nsim=19, rinterval=c(0, 0.1), alternative="greater", clamp=TRUE) envelopeTest(redwood, pcf, exponent=Inf, nsim=19, rinterval=c(0, 0.1), alternative="greater", clamp=TRUE) }) #' tests/morpho.R #' #' morphology code blocks #' #' $Revision: 1.2 $ $Date: 2018/05/13 04:02:40 $ require(spatstat) local({ #' owin a <- erosion(letterR, 0.1, polygonal=FALSE) b <- dilation(letterR, 0.1, polygonal=FALSE) at <- erosion(letterR, 0.1, polygonal=FALSE, strict=TRUE) bt <- dilation(letterR, 0.1, polygonal=FALSE, tight=FALSE) #' psp S <- edges(letterR) dm <- dilation(S, 0.1, polygonal=FALSE) dt <- dilation(S, 0.1, polygonal=FALSE, tight=FALSE) op <- spatstat.options(old.morpho.psp=TRUE) dn <- dilation(S, 0.1, polygonal=TRUE) spatstat.options(op) cS <- closing(S, 0.1, polygonal=FALSE) eS <- erosion(S, 0) oS <- opening(S, 0) #' ppp dc <- dilation(cells, 0.06, polygonal=FALSE) ec <- erosion(cells, 0) oc <- opening(cells, 0) }) reset.spatstat.options() # # tests/mppm.R # # Basic tests of mppm # # $Revision: 1.14 $ $Date: 2020/01/01 05:45:26 $ # require(spatstat) local({ ## test interaction formulae and subfits fit1 <- mppm(Points ~ group, simba, hyperframe(po=Poisson(), str=Strauss(0.1)), iformula=~ifelse(group=="control", po, str)) fit2 <- mppm(Points ~ group, simba, hyperframe(po=Poisson(), str=Strauss(0.1)), iformula=~str/id) fit2w <- mppm(Points ~ group, simba, hyperframe(po=Poisson(), str=Strauss(0.1)), iformula=~str/id, weights=runif(nrow(simba))) # currently invalid # fit3 <- mppm(Points ~ group, simba, # hyperframe(po=Poisson(), pie=PairPiece(c(0.05,0.1))), # iformula=~I((group=="control") * po) + I((group=="treatment") * pie)) fit1 fit2 fit2w # fit3 ## run summary.mppm which currently sits in spatstat-internal.Rd summary(fit1) summary(fit2) summary(fit2w) # summary(fit3) ## test vcov algorithm vcov(fit1) vcov(fit2) # vcov(fit3) ## test subfits algorithm s1 <- subfits(fit1) s2 <- subfits(fit2) # s3 <- subfits(fit3) ## validity of results of subfits() p1 <- solapply(s1, predict) p2 <- solapply(s2, predict) # p3 <- solapply(s3, predict) }) local({ ## cases of predict.mppm W <- solapply(waterstriders, Window) Fakes <- solapply(W, runifpoint, n=30) FakeDist <- solapply(Fakes, distfun) H <- hyperframe(Bugs=waterstriders, D=FakeDist) fit <- mppm(Bugs ~ D, data=H) p1 <- predict(fit) p2 <- predict(fit, locations=Fakes) p3 <- predict(fit, locations=solapply(W, erosion, r=4)) locn <- as.data.frame(do.call(cbind, lapply(Fakes, coords))) df <- data.frame(id=sample(1:3, nrow(locn), replace=TRUE), D=runif(nrow(locn))) p4 <- predict(fit, locations=locn, newdata=df) fitG <- mppm(Bugs ~ D, data=H, use.gam=TRUE) p1G <- predict(fitG) p2G <- predict(fitG, locations=Fakes) p3G <- predict(fitG, locations=solapply(W, erosion, r=4)) p4G <- predict(fitG, locations=locn, newdata=df) }) local({ ## [thanks to Sven Wagner] ## factor covariate, with some levels unused in some rows set.seed(14921788) H <- hyperframe(X=replicate(3, runifpoint(20), simplify=FALSE), Z=solist(as.im(function(x,y){x}, owin()), as.im(function(x,y){y}, owin()), as.im(function(x,y){x+y}, owin()))) H$Z <- solapply(H$Z, cut, breaks=(0:4)/2) fit6 <- mppm(X ~ Z, H) v6 <- vcov(fit6) s6 <- subfits(fit6) p6 <- solapply(s6, predict) # random effects fit7 <- mppm(X ~ Z, H, random=~1|id) v7 <- vcov(fit7) s7 <- subfits(fit7) p7 <- solapply(s7, predict) fit7a <- mppm(X ~ Z, H, random=~x|id) v7a <- vcov(fit7a) s7a <- subfits(fit7a) p7a <- solapply(s7a, predict) # multitype: collisions in vcov.ppm, predict.ppm H$X <- lapply(H$X, rlabel, labels=factor(c("a","b")), permute=FALSE) M <- MultiStrauss(matrix(0.1, 2, 2), c("a","b")) fit8 <- mppm(X ~ Z, H, M) v8 <- vcov(fit8, fine=TRUE) s8 <- subfits(fit8) p8 <- lapply(s8, predict) c8 <- lapply(s8, predict, type="cif") fit9 <- mppm(X ~ Z, H, M, iformula=~Interaction * id) v9 <- vcov(fit9, fine=TRUE) s9 <- subfits(fit9) p9 <- lapply(s9, predict) c9 <- lapply(s9, predict, type="cif") # and a simple error in recognising 'marks' fit10 <- mppm(X ~ marks, H) }) local({ ## test handling of offsets and zero cif values in mppm H <- hyperframe(Y = waterstriders) mppm(Y ~ 1, data=H, Hardcore(1.5)) mppm(Y ~ 1, data=H, StraussHard(7, 1.5)) ## prediction, in training/testing context ## (example from Markus Herrmann and Ege Rubak) X <- waterstriders dist <- solapply(waterstriders, function(z) distfun(runifpoint(1, Window(z)))) i <- 3 train <- hyperframe(pattern = X[-i], dist = dist[-i]) test <- hyperframe(pattern = X[i], dist = dist[i]) fit <- mppm(pattern ~ dist, data = train) pred <- predict(fit, type="cif", newdata=test, verbose=TRUE) }) local({ ## test handling of interaction coefficients in multitype case set.seed(42) XX <- as.solist(replicate(3, rthin(amacrine, 0.8), simplify=FALSE)) H <- hyperframe(X=XX) M <- MultiStrauss(matrix(0.1, 2, 2), levels(marks(amacrine))) fit <- mppm(X ~ 1, H, M) co <- coef(fit) subco <- sapply(subfits(fit), coef) if(max(abs(subco - co)) > 0.001) stop("Wrong coefficient values in subfits, for multitype interaction") }) local({ ## test lurking.mppm # example from 'mppm' n <- 7 H <- hyperframe(V=1:n, U=runif(n, min=-1, max=1)) H$Z <- setcov(square(1)) H$U <- with(H, as.im(U, as.rectangle(Z))) H$Y <- with(H, rpoispp(eval.im(exp(2+3*Z)))) fit <- mppm(Y ~ Z + U + V, data=H) lurking(fit, expression(Z), type="P") lurking(fit, expression(V), type="raw") # design covariate lurking(fit, expression(U), type="raw") # image, constant in each row lurking(fit, H$Z, type="P") # list of images }) local({ ## test anova.mppm code blocks and scoping problem H <- hyperframe(X=waterstriders) mod0 <- mppm(X~1, data=H, Poisson()) modxy <- mppm(X~x+y, data=H, Poisson()) mod0S <- mppm(X~1, data=H, Strauss(2)) modxyS <- mppm(X~x+y, data=H, Strauss(2)) anova(mod0, modxy, test="Chi") anova(mod0S, modxyS, test="Chi") anova(modxy, test="Chi") anova(modxyS, test="Chi") }) local({ ## test multitype stuff foo <- flu[1:3,] msh <- MultiStraussHard(iradii=matrix(100, 2, 2), hradii=matrix(10,2,2), types=levels(marks(foo$pattern[[1]]))) msh0 <- MultiStraussHard(iradii=matrix(100, 2, 2), hradii=matrix(10,2,2)) fit <- mppm(pattern ~ 1, data=foo, interaction=msh0) print(fit) print(summary(fit)) v <- vcov(fit) }) #' #' tests/msr.R #' #' $Revision: 1.2 $ $Date: 2020/01/11 10:42:58 $ #' #' Tests of code for measures #' require(spatstat) local({ ## cases of 'msr' Q <- quadscheme(cells) nQ <- n.quad(Q) nX <- npoints(cells) A <- matrix(nX * 3, nX, 3) B <- matrix(nQ * 3, nQ, 3) M <- msr(Q, A, B) M <- msr(Q, A, 1) M <- msr(Q, 1, B) M <- msr(Q, A, B[,1]) M <- msr(Q, A[,1], B) M <- msr(Q, A, B[,1,drop=FALSE]) M <- msr(Q, A[,1,drop=FALSE], B) ## methods rr <- residuals(ppm(cells ~ x)) a <- summary(rr) b <- is.marked(rr) w <- as.owin(rr) z <- domain(rr) ss <- scalardilate(rr, 2) tt <- rescale(rr, 2) ee <- rotate(rr, pi/4) aa <- affine(rr, mat=diag(c(1,2)), vec=c(0,1)) ff <- flipxy(rr) rrr <- augment.msr(rr, sigma=0.08) uuu <- update(rrr) mm <- residuals(ppm(amacrine ~ x)) ss <- residuals(ppm(amacrine ~ x), type="score") gg <- rescale(ss, 1/662, c("micron", "microns")) plot(mm) plot(mm, multiplot=FALSE) plot(mm, equal.markscale=TRUE, equal.ribbon=TRUE) plot(ss) plot(ss, multiplot=FALSE) }) spatstat/tests/testsNtoO.R0000644000176200001440000002557113613216551015361 0ustar liggesusers# # tests/NAinCov.R # # Testing the response to the presence of NA's in covariates # # $Revision: 1.6 $ $Date: 2019/10/14 08:39:58 $ require(spatstat) local({ X <- runifpoint(42) Y <- as.im(function(x,y) { x+y }, owin()) Y[owin(c(0.2,0.4),c(0.2,0.4))] <- NA # fit model: should produce a warning but no failure misfit <- ppm(X ~Y, covariates=list(Y=Y)) # prediction Z <- predict(misfit, type="trend", se=TRUE) # covariance matrix: all should be silent v <- vcov(misfit) ss <- vcov(misfit, what="internals") NULL #' quantile.ewcdf f <- ewcdf(runif(100), runif(100)) qf <- quantile(f, probs=c(0.1, NA, 0.8)) #' quantile.density f <- density(runif(100)) qf <- quantile(f, probs=c(0.1, NA, 0.8)) }) # # tests/nndist.R # # Check that nndist and nnwhich give # results consistent with direct calculation from pairdist # # Similarly for nncross and distfun # # Also test whether minnndist(X) == min(nndist(X)) # # Also test nnorient() # # $Revision: 1.32 $ $Date: 2020/01/23 03:50:14 $ # require(spatstat) local({ eps <- sqrt(.Machine$double.eps) f <- function(mat,k) { apply(mat, 1, function(z,n) { sort(z)[n] }, n=k+1) } g <- function(mat,k) { apply(mat, 1, function(z,n) { order(z)[n] }, n=k+1) } ## ....... Two dimensions ................ X <- runifpoint(42) nn <- nndist(X) nnP <- f(pairdist(X), 1) if(any(abs(nn - nnP) > eps)) stop("nndist.ppp does not agree with pairdist") nn5 <- nndist(X, k=5) nn5P <- f(pairdist(X), 5) if(any(abs(nn5 - nn5P) > eps)) stop("nndist.ppp(k=5) does not agree with pairdist") nw <- nnwhich(X) nwP <- g(pairdist(X), 1) if(any(nw != nwP)) stop("nnwhich.ppp does not agree with pairdist") nw5 <- nnwhich(X, k=5) nw5P <- g(pairdist(X), 5) if(any(nw5 != nw5P)) stop("nnwhich.ppp(k=5) does not agree with pairdist") a <- nndist(X, method="test") b <- nnwhich(X, method="test") a <- nndist(X, method="test", k=1:2) b <- nnwhich(X, method="test", k=1:2) a2 <- nndist(cells[1:3], k=1:3) b2 <- nnwhich(cells[1:3], k=1:3) a3 <- nndist(cells[1]) b3 <- nnwhich(cells[1]) m <- factor((1:npoints(X)) %% 2 == 0) a4 <- nndist.default(X, by=m, k=2) b4 <- nnwhich.default(X, by=m, k=2) ## nncross.ppp without options Y <- runifpoint(30) Y <- Y[nndist(Y) > 0.02] nc <- nncross(X,Y) ncd <- nc$dist ncw <- nc$which cd <- crossdist(X,Y) cdd <- apply(cd, 1, min) cdw <- apply(cd, 1, which.min) if(any(abs(ncd - cdd) > eps)) stop("nncross()$dist does not agree with apply(crossdist(), 1, min)") if(any(ncw != cdw)) stop("nncross()$which does not agree with apply(crossdist(), 1, which.min)") ## nncross with sort on x nc <- nncross(X,Y, sortby="x") ncd <- nc$dist ncw <- nc$which if(any(abs(ncd - cdd) > eps)) stop("nncross(sortby=x)$dist does not agree with apply(crossdist(), 1, min)") if(any(ncw != cdw)) stop("nncross(sortby=x)$which does not agree with apply(crossdist(), 1, which.min)") ## nncross with data pre-sorted on x Y <- Y[order(Y$x)] nc <- nncross(X,Y, is.sorted.Y=TRUE, sortby="x") ncd <- nc$dist ncw <- nc$which cd <- crossdist(X,Y) cdd <- apply(cd, 1, min) cdw <- apply(cd, 1, which.min) if(any(abs(ncd - cdd) > eps)) stop("For sorted data, nncross()$dist does not agree with apply(crossdist(), 1, min)") if(any(ncw != cdw)) stop("For sorted data, nncross()$which does not agree with apply(crossdist(), 1, which.min)") ## sanity check for nncross with k > 1 ndw <- nncross(X, Y, k=1:4, what="which") if(any(is.na(ndw))) stop("NA's returned by nncross.ppp(k > 1, what='which')") nnc4 <- nncross(X, Y, k=1:4) iswhich <- (substr(colnames(nnc4), 1, nchar("which")) == "which") ndw <- nnc4[,iswhich] if(any(is.na(ndw))) stop("NA's returned by nncross.ppp(k > 1)$which") ## test of correctness for nncross with k > 1 flipcells <- flipxy(cells) calcwhich <- nncross(cells, flipcells, k=1:4, what="which") truewhich <- t(apply(crossdist(cells,flipcells), 1, order))[,1:4] if(any(calcwhich != truewhich)) stop("nncross(k > 1) gives wrong answer") #' cover some C code blocks Z <- runifpoint(50) X <- Z[1:30] Y <- Z[20:50] iX <- 1:30 iY <- 20:50 Ndw <- nncross(X,Y, iX, iY, k=3) Nw <- nncross(X,Y, iX, iY, k=3, what="which") Nd <- nncross(X,Y, iX, iY, k=3, what="dist") ## special cases nndist(X[FALSE]) nndist(X[1]) nndist(X[1:3], k=4) nndist(X[1:3], k=1:4) nnwhich(X[FALSE]) nnwhich(X[1]) nnwhich(X[1:3], k=4) nnwhich(X[1:3], k=1:4) nncross(X[1:3], Y[FALSE]) nncross(X[1:3], Y[1]) nncross(X[1:3], Y[1:3], k=4) nncross(X[1:3], Y[1:3], k=1:4) ## ....... Three dimensions ................ X <- runifpoint3(42) nn <- nndist(X) nnP <- f(pairdist(X), 1) if(any(abs(nn - nnP) > eps)) stop("nndist.pp3 does not agree with pairdist") nn5 <- nndist(X, k=5) nn5P <- f(pairdist(X), 5) if(any(abs(nn5 - nn5P) > eps)) stop("nndist.pp3(k=5) does not agree with pairdist") nw <- nnwhich(X) nwP <- g(pairdist(X), 1) if(any(nw != nwP)) stop("nnwhich.pp3 does not agree with pairdist") nw5 <- nnwhich(X, k=5) nw5P <- g(pairdist(X), 5) if(any(nw5 != nw5P)) stop("nnwhich.pp3(k=5) does not agree with pairdist") ff <- function(mat,k) { apply(mat, 1, function(z,n) { sort(z)[n] }, n=k) } gg <- function(mat,k) { apply(mat, 1, function(z,n) { order(z)[n] }, n=k) } Y <- runifpoint3(20) Y <- Y[nndist(Y) > 0.02] DXY <- crossdist(X,Y) a <- nncross(X,Y) a <- nncross(X,Y, what="dist") a <- nncross(X,Y, what="which") if(any(a != gg(DXY, 1))) stop("incorrect result from nncross.pp3(what='which')") a2 <- nncross(X,Y, k=2) a2 <- nncross(X,Y, what="dist", k=2) a2 <- nncross(X,Y, what="which", k=2) if(any(a2 != gg(DXY, 2))) stop("incorrect result from nncross.pp3(k=2, what='which')") iX <- 1:42 iZ <- 30:42 Z <- X[iZ] b <- nncross(X, Z, iX=iX, iY=iZ) b <- nncross(X, Z, iX=iX, iY=iZ, what="which") b <- nncross(X, Z, iX=iX, iY=iZ, what="dist") b2 <- nncross(X, Z, iX=iX, iY=iZ, k=2) b2 <- nncross(X, Z, iX=iX, iY=iZ, what="which", k=2) b2 <- nncross(X, Z, iX=iX, iY=iZ, what="dist", k=2) e1 <- nncross(X, Y[1:3], k=2:4) c1 <- nncross(X, Y, sortby="var") c2 <- nncross(X, Y, sortby="x") c3 <- nncross(X, Y, sortby="y") c4 <- nncross(X, Y, sortby="z") Xsort <- X[order(coords(X)$x)] c5 <- nncross(Xsort, Y, is.sorted.X=TRUE, sortby="x") Ysort <- Y[order(coords(Y)$x)] c6 <- nncross(Xsort, Ysort, is.sorted.X=TRUE, is.sorted.Y=TRUE, sortby="x") ## special cases nndist(X[FALSE]) nndist(X[1]) nndist(X[1:3], k=4) nndist(X[1:3], k=1:4) nnwhich(X[FALSE]) nnwhich(X[1]) nnwhich(X[1:3], k=4) nnwhich(X[1:3], k=1:4) nncross(X[1:3], Y[FALSE]) nncross(X[1:3], Y[1]) nncross(X[1:3], Y[1:3], k=4) nncross(X[1:3], Y[1:3], k=1:4) ## ....... m dimensions ................ B <- boxx(c(0,1),c(0,1),c(0,1),c(0,1)) X <- runifpointx(42, B) Y <- runifpointx(50, B) Y <- Y[nndist(Y) > 0.02] DXY <- crossdist(X,Y) nn <- nndist(X) nnP <- f(pairdist(X), 1) if(any(abs(nn - nnP) > eps)) stop("nndist.ppx does not agree with pairdist") nn5 <- nndist(X, k=5) nn5P <- f(pairdist(X), 5) if(any(abs(nn5 - nn5P) > eps)) stop("nndist.ppx(k=5) does not agree with pairdist") nw <- nnwhich(X) nwP <- g(pairdist(X), 1) if(any(nw != nwP)) stop("nnwhich.ppx does not agree with pairdist") nw5 <- nnwhich(X, k=5) nw5P <- g(pairdist(X), 5) if(any(nw5 != nw5P)) stop("nnwhich.ppx(k=5) does not agree with pairdist") a <- nncross(X,Y) ncd <- nncross(X,Y, what="dist") ncw <- nncross(X,Y, what="which") if(any(ncw != gg(DXY, 1))) stop("incorrect result from nncross.ppx(what='which')") a2 <- nncross(X,Y, k=2) ncd <- nncross(X,Y, what="dist", k=2) ncw <- nncross(X,Y, what="which", k=2) if(any(ncw != gg(DXY, 2))) stop("incorrect result from nncross.ppx(k=2, what='which')") ## special cases nndist(X[FALSE]) nndist(X[1]) nndist(X[1:3], k=4) nndist(X[1:3], k=1:4) nnwhich(X[FALSE]) nnwhich(X[1]) nnwhich(X[1:3], k=4) nnwhich(X[1:3], k=1:4) nncross(X[1:3], Y[FALSE]) nncross(X[1:3], Y[1]) nncross(X[1:3], Y[1:3], k=4) nncross(X[1:3], Y[1:3], k=1:4) }) local({ ## test of agreement between nngrid.h and knngrid.h ## dimyx=23 (found by trial-and-error) ensures that there are no ties a <- as.matrix(nnmap(cells, what="which", dimyx=23)) b <- as.matrix(nnmap(cells, what="which", dimyx=23, k=1:2)[[1]]) if(any(a != b)) stop("algorithms in nngrid.h and knngrid.h disagree") ## minnndist correctness X <- redwood3 eps <- sqrt(.Machine$double.eps) mfast <- minnndist(X) mslow <- min(nndist(X)) if(abs(mfast-mslow) > eps) stop("minnndist(X) disagrees with min(nndist(X))") ## maxnndist correctness mfast <- maxnndist(X) mslow <- max(nndist(X)) if(abs(mfast-mslow) > eps) stop("maxnndist(X) disagrees with max(nndist(X))") ## minnndist, maxnndist code blocks Y <- superimpose(amacrine, amacrine[10:20]) a <- maxnndist(Y, positive=TRUE) u <- maxnndist(Y, positive=TRUE, by=marks(Y)) b <- minnndist(Y, positive=TRUE) v <- minnndist(Y, positive=TRUE, by=marks(Y)) ## nnmap code blocks A <- nnmap(cells[FALSE]) A <- nnmap(cells, sortby="var") A <- nnmap(cells, sortby="x") A <- nnmap(cells, sortby="y") B <- nnmap(cells[1:3], k=4) B <- nnmap(cells[1:3], k=2:4) D <- nnmap(cells, outputarray=TRUE) }) local({ # tests for has.close() # (the default method uses nndist or pairdist, and can be trusted!) a <- has.close(redwood, 0.05) b <- has.close.default(redwood, 0.05) if(any(a != b)) stop("Incorrect result for has.close(X, r)") a <- has.close(redwood, 0.05, periodic=TRUE) a <- has.close.default(redwood, 0.05, periodic=TRUE) if(any(a != b)) stop("Incorrect result for has.close(X, r, periodic=TRUE)") Y <- split(amacrine) a <- with(Y, has.close(on, 0.05, off)) b <- with(Y, has.close.default(on, 0.05, off)) if(any(a != b)) stop("Incorrect result for has.close(X, r, Y)") a <- with(Y, has.close(on, 0.05, off, periodic=TRUE)) b <- with(Y, has.close.default(on, 0.05, off, periodic=TRUE)) if(any(a != b)) stop("Incorrect result for has.close(X, r, Y, periodic=TRUE)") }) local({ b <- bdist.pixels(letterR, style="coords") d <- bdist.pixels(letterR, dimyx=64, method="interpreted") }) local({ #' test nnorient nnorient(cells, domain=erosion(Window(cells), 0.1)) #' degenerate case X <- cells[nndist(cells) > bdist.points(cells)] f <- nnorient(X) #' nnclean A <- nnclean(shapley, k=17, edge.correct=TRUE) B <- nnclean(runifpoint3(300), 3) #' stienen set #' bug when disc radius is zero Y <- unmark(humberside)[40:100] # contains duplicated points stienen(Y) Z <- stienenSet(Y) #' other cases U <- stienen(cells[1]) V <- stienenSet(cells, edge=FALSE) }) local({ ## nnfun.ppp f <- nnfun(cells) Z <- as.im(f) d <- domain(f) f <- nnfun(amacrine, value="mark") d <- domain(f) Z <- as.im(f) f <- nnfun(longleaf, value="mark") d <- domain(f) Z <- as.im(f) }) spatstat/src/0000755000176200001440000000000013624161312012703 5ustar liggesusersspatstat/src/idw.c0000755000176200001440000002175413602774005013653 0ustar liggesusers/* idw.c Inverse-distance weighted smoothing $Revision: 1.12 $ $Date: 2020/01/01 01:27:35 $ Cidw inverse distance smoothing from data points onto pixel grid idwloo leave-one-out estimate at data points Cidw2 Cidw with variance estimate idwloo2 idwloo with variance estimate Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include #include "chunkloop.h" #define MAT(X,I,J,NROW) (X)[(J) + (NROW) * (I)] /* inverse-distance smoothing from data points onto pixel grid */ void Cidw(x, y, v, n, xstart, xstep, nx, ystart, ystep, ny, power, num, den, rat) double *x, *y, *v; /* data points and values */ int *n; double *xstart, *xstep, *ystart, *ystep; /* pixel grid */ int *nx, *ny; double *power; /* exponent for IDW */ double *num, *den, *rat; /* output arrays - assumed initialised 0 */ { int N, i, Nx, Ny, ix, iy; double xg, yg, x0, dx, y0, dy, pon2, d2, w, sumw, sumwv; N = *n; Nx = *nx; Ny = *ny; x0 = *xstart; y0 = *ystart; dx = *xstep; dy = *ystep; pon2 = (*power)/2.0; if(pon2 == 1.0) { /* slightly faster code when power=2 */ for(ix = 0, xg=x0; ix < Nx; ix++, xg+=dx) { if(ix % 256 == 0) R_CheckUserInterrupt(); for(iy = 0, yg=y0; iy < Ny; iy++, yg+=dy) { sumwv = sumw = 0.0; /* loop over data points, accumulating numerator and denominator */ for(i = 0; i < N; i++) { d2 = (xg - x[i]) * (xg - x[i]) + (yg - y[i]) * (yg - y[i]); w = 1.0/d2; sumwv += w * v[i]; sumw += w; } /* compute ratio */ MAT(num, ix, iy, Ny) = sumwv; MAT(den, ix, iy, Ny) = sumw; MAT(rat, ix, iy, Ny) = sumwv/sumw; } } } else { /* general case */ for(ix = 0, xg=x0; ix < Nx; ix++, xg+=dx) { if(ix % 256 == 0) R_CheckUserInterrupt(); for(iy = 0, yg=y0; iy < Ny; iy++, yg+=dy) { sumwv = sumw = 0.0; /* loop over data points, accumulating numerator and denominator */ for(i = 0; i < N; i++) { d2 = (xg - x[i]) * (xg - x[i]) + (yg - y[i]) * (yg - y[i]); w = 1.0/pow(d2, pon2); sumwv += w * v[i]; sumw += w; } /* compute ratio */ MAT(num, ix, iy, Ny) = sumwv; MAT(den, ix, iy, Ny) = sumw; MAT(rat, ix, iy, Ny) = sumwv/sumw; } } } } /* Leave-one-out IDW at data points only */ void idwloo(x, y, v, n, power, num, den, rat) double *x, *y, *v; /* data points and values */ int *n; double *power; /* exponent for IDW */ double *num, *den, *rat; /* output vectors - assumed initialised 0 */ { int N, i, j, maxchunk; double xi, yi, d2, w, pon2, sumw, sumwv; N = *n; pon2 = (*power)/2.0; if(pon2 == 1.0) { /* slightly faster code when power=2 */ OUTERCHUNKLOOP(i, N, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 16384) { xi = x[i]; yi = y[i]; sumwv = sumw = 0.0; if(i > 0) { for(j = 0; j < i; j++) { d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/d2; sumwv += w * v[j]; sumw += w; } } if(i < N-1) { for(j = i+1; j < N; j++) { d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/d2; sumwv += w * v[j]; sumw += w; } } /* compute ratio */ num[i] = sumwv; den[i] = sumw; rat[i] = sumwv/sumw; } } } else { /* general case */ OUTERCHUNKLOOP(i, N, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 16384) { xi = x[i]; yi = y[i]; sumwv = sumw = 0.0; if(i > 0) { for(j = 0; j < i; j++) { d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/pow(d2, pon2); sumwv += w * v[j]; sumw += w; } } if(i < N-1) { for(j = i+1; j < N; j++) { d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/pow(d2, pon2); sumwv += w * v[j]; sumw += w; } } /* compute ratio */ num[i] = sumwv; den[i] = sumw; rat[i] = sumwv/sumw; } } } } /* ---------------------------------------------------- VERSIONS WITH VARIANCE CALCULATION --------------------------------------------------- */ /* inverse-distance smoothing from data points onto pixel grid */ void Cidw2(x, y, v, n, xstart, xstep, nx, ystart, ystep, ny, power, num, den, rat, mtwo, wtwo) double *x, *y, *v; /* data points and values */ int *n; double *xstart, *xstep, *ystart, *ystep; /* pixel grid */ int *nx, *ny; double *power; /* exponent for IDW */ double *num, *den, *rat; /* output arrays - assumed initialised 0 */ double *mtwo, *wtwo; /* output arrays - assumed initialised 0 */ { int N, i, Nx, Ny, ix, iy; double xg, yg, x0, dx, y0, dy, pon2, d2, w, vi, sumw, sumwv, sumw2, runmean, m2, delta, epsilon; N = *n; Nx = *nx; Ny = *ny; x0 = *xstart; y0 = *ystart; dx = *xstep; dy = *ystep; pon2 = (*power)/2.0; if(pon2 == 1.0) { /* slightly faster code when power=2 */ for(ix = 0, xg=x0; ix < Nx; ix++, xg+=dx) { if(ix % 256 == 0) R_CheckUserInterrupt(); for(iy = 0, yg=y0; iy < Ny; iy++, yg+=dy) { sumwv = sumw = sumw2 = m2 = runmean = 0.0; /* loop over data points, accumulating numerator and denominator */ for(i = 0; i < N; i++) { vi = v[i]; d2 = (xg - x[i]) * (xg - x[i]) + (yg - y[i]) * (yg - y[i]); w = 1.0/d2; sumw += w; sumw2 += w * w; sumwv += w * vi; delta = vi - runmean; epsilon = delta * w / sumw; runmean += epsilon; m2 += (sumw - w) * delta * epsilon; } /* compute ratio */ MAT(num, ix, iy, Ny) = sumwv; MAT(den, ix, iy, Ny) = sumw; MAT(rat, ix, iy, Ny) = sumwv/sumw; MAT(mtwo, ix, iy, Ny) = m2; MAT(wtwo, ix, iy, Ny) = sumw2; } } } else { /* general case */ for(ix = 0, xg=x0; ix < Nx; ix++, xg+=dx) { if(ix % 256 == 0) R_CheckUserInterrupt(); for(iy = 0, yg=y0; iy < Ny; iy++, yg+=dy) { sumwv = sumw = sumw2 = m2 = runmean = 0.0; /* loop over data points, accumulating numerator and denominator */ for(i = 0; i < N; i++) { vi = v[i]; d2 = (xg - x[i]) * (xg - x[i]) + (yg - y[i]) * (yg - y[i]); w = 1.0/pow(d2, pon2); sumw += w; sumw2 += w * w; sumwv += w * vi; delta = vi - runmean; epsilon = delta * w / sumw; runmean += epsilon; m2 += (sumw - w) * delta * epsilon; } /* compute ratio */ MAT(num, ix, iy, Ny) = sumwv; MAT(den, ix, iy, Ny) = sumw; MAT(rat, ix, iy, Ny) = sumwv/sumw; MAT(mtwo, ix, iy, Ny) = m2; MAT(wtwo, ix, iy, Ny) = sumw2; } } } } /* Leave-one-out IDW at data points only */ void idwloo2(x, y, v, n, power, num, den, rat, mtwo, wtwo) double *x, *y, *v; /* data points and values */ int *n; double *power; /* exponent for IDW */ double *num, *den, *rat, *mtwo, *wtwo; /* output vectors - initialised 0 */ { int N, i, j, maxchunk; double xi, yi, d2, w, pon2, vj, sumw, sumwv, sumw2, runmean, m2, delta, epsilon; N = *n; pon2 = (*power)/2.0; if(pon2 == 1.0) { /* slightly faster code when power=2 */ OUTERCHUNKLOOP(i, N, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 16384) { xi = x[i]; yi = y[i]; sumwv = sumw = sumw2 = m2 = runmean = 0.0; if(i > 0) { for(j = 0; j < i; j++) { vj = v[j]; d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/d2; sumwv += w * vj; sumw += w; sumw2 += w * w; delta = vj - runmean; epsilon = delta * w / sumw; runmean += epsilon; m2 += (sumw - w) * delta * epsilon; } } if(i < N-1) { for(j = i+1; j < N; j++) { vj = v[j]; d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/d2; sumwv += w * vj; sumw += w; sumw2 += w * w; delta = vj - runmean; epsilon = delta * w / sumw; runmean += epsilon; m2 += (sumw - w) * delta * epsilon; } } /* compute ratio */ num[i] = sumwv; den[i] = sumw; rat[i] = sumwv/sumw; mtwo[i] = m2; wtwo[i] = sumw2; } } } else { /* general case */ OUTERCHUNKLOOP(i, N, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 16384) { xi = x[i]; yi = y[i]; sumwv = sumw = sumw2 = m2 = runmean = 0.0; if(i > 0) { for(j = 0; j < i; j++) { vj = v[j]; d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/pow(d2, pon2); sumwv += w * vj; sumw += w; sumw2 += w * w; delta = vj - runmean; epsilon = delta * w / sumw; runmean += epsilon; m2 += (sumw - w) * delta * epsilon; } } if(i < N-1) { for(j = i+1; j < N; j++) { vj = v[j]; d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/pow(d2, pon2); sumwv += w * vj; sumw += w; sumw2 += w * w; delta = vj - runmean; epsilon = delta * w / sumw; runmean += epsilon; m2 += (sumw - w) * delta * epsilon; } } /* compute ratio */ num[i] = sumwv; den[i] = sumw; rat[i] = sumwv/sumw; mtwo[i] = m2; wtwo[i] = sumw2; } } } } spatstat/src/linvknndist.c0000644000176200001440000001373513406057617015435 0ustar liggesusers#include #include "yesno.h" /* linvknndist.c k-th nearest neighbour function at vertices (distance from each vertex to the nearest, second nearest, ... k-th nearest target data point) Needs only the sparse representation of the network $Revision: 1.6 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 ! Data points must be ordered by segment index ! */ #undef HUH #define DIST(VERTEX, ORDER) dist[(ORDER) + (VERTEX) * Kmax] #define WHICH(VERTEX, ORDER) which[(ORDER) + (VERTEX) * Kmax] #define UPDATE(VERTEX, D, J, EPS) \ UpdateKnnList(D, J, \ dist + (VERTEX) * Kmax, \ which + (VERTEX) * Kmax, \ Kmax, \ EPS) void linvknndist(kmax, /* number of neighbours required */ nq, sq, tq, /* target data points (ordered by sq) */ nv, /* number of network vertices */ ns, from, to, /* segments (pairs of vertices) */ seglen, /* segment lengths */ huge, /* value taken as infinity */ tol, /* tolerance for updating distances */ /* OUTPUT */ dist, /* distance from each vertex to the nearest, ..., kth nearest data points */ which /* identifies which data points */ ) int *kmax; int *nq, *nv, *ns; /* number of points, vertices, segments */ int *sq, *from, *to; /* integer vectors (mappings) */ double *tq; /* fractional location coordinates */ double *huge, *tol; double *seglen; double *dist; int *which; { int Nq, Nv, Ns, Kmax, Nout, i, j, k, m; int segQj, ivleft, ivright, changed; double hugevalue, eps, slen, d, tqj; char converged; int UpdateKnnList(); Kmax = *kmax; Nq = *nq; Nv = *nv; Ns = *ns; hugevalue = *huge; eps = *tol; /* number of values in 'dist' and in 'which' */ Nout = Nv * Kmax; #ifdef HUH Rprintf("Initialise dist\n"); #endif /* initialise to huge value */ for(i = 0; i < Nout; i++) { dist[i] = hugevalue; which[i] = -1; } #ifdef HUH Rprintf("Run through target points\n"); #endif /* assign value to endpoints of segments containing target points */ for(j = 0; j < Nq; j++) { segQj = sq[j]; tqj = tq[j]; slen = seglen[segQj]; ivleft = from[segQj]; d = slen * tqj; UPDATE(ivleft, d, j, (double) 0.0); ivright = to[segQj]; d = slen * (1.0 - tqj); UPDATE(ivright, d, j, (double) 0.0); } #ifdef HUH Rprintf("Initialised values at vertices:\n"); Rprintf("\ti\twhich\tdist\n"); for(i = 0; i < Nv; i++) { Rprintf("\t%d", i); for(k = 0; k < Kmax; k++) Rprintf(" %d ", WHICH(i, k)); for(k = 0; k < Kmax; k++) Rprintf(" %lf ", DIST(i, k)); Rprintf("\n"); } #endif /* recursively update */ #ifdef HUH Rprintf("Recursive update\n"); #endif converged = NO; while(!converged) { converged = YES; #ifdef HUH Rprintf("........... starting new pass ...................... \n"); Rprintf("Current state:\n"); Rprintf("\ti\twhich\tdist\n"); for(i = 0; i < Nv; i++) { Rprintf("\t%d", i); for(k = 0; k < Kmax; k++) Rprintf(" %d ", WHICH(i, k)); for(k = 0; k < Kmax; k++) Rprintf(" %lf ", DIST(i, k)); Rprintf("\n"); } #endif for(m = 0; m < Ns; m++) { ivleft = from[m]; ivright = to[m]; slen = seglen[m]; #ifdef HUH Rprintf("updating right=%d from left=%d\n", ivright, ivleft); #endif for(k = 0; k < Kmax; k++) { changed = UPDATE(ivright, DIST(ivleft, k)+slen, WHICH(ivleft, k), eps); converged = converged && !changed; } #ifdef HUH Rprintf("updating left=%d from right=%d\n", ivleft, ivright); #endif for(k = 0; k < Kmax; k++) { changed = UPDATE(ivleft, DIST(ivright, k)+slen, WHICH(ivright, k), eps); converged = converged && !changed; } } } #ifdef HUH Rprintf("Done\nVertex values:\n"); Rprintf("\ti\twhich\tdist\n"); for(i = 0; i < Nv; i++) { Rprintf("\t%d", i); for(k = 0; k < Kmax; k++) Rprintf(" %d ", WHICH(i, k)); for(k = 0; k < Kmax; k++) Rprintf(" %lf ", DIST(i, k)); Rprintf("\n"); } #endif } /* update a list of nearest, second nearest, ..., k-th nearest neighbours */ int UpdateKnnList(d, j, dist, which, Kmax, eps) double d; /* candidate distance */ int j; /* corresponding candidate target point */ int Kmax; double *dist; /* pointer to start of vector of length Kmax */ int *which; /* pointer to start of vector of length Kmax */ double eps; /* numerical tolerance, to prevent infinite loops */ { char matched, unsorted, changed; int k, Klast, itmp; double dtmp, dPlusEps; Klast = Kmax - 1; dPlusEps = d + eps; if(dPlusEps > dist[Klast]) return(NO); changed = NO; /* Check whether this data point is already listed as a neighbour */ matched = NO; for(k = 0; k < Kmax; k++) { if(which[k] == j) { matched = YES; #ifdef HUH Rprintf("\tMatch: which[%d] = %d\n", k, j); #endif if(dPlusEps <= dist[k]) { changed = YES; #ifdef HUH Rprintf("\t\tUpdated distance from %lf to %lf\n", dist[k], d); #endif dist[k] = d; } break; } } if(!matched) { #ifdef HUH Rprintf("\tNo match with current list\n"); Rprintf("\t\tUpdated distance from %lf to %lf\n", dist[Klast], d); #endif /* replace furthest point */ changed = YES; dist[Klast] = d; which[Klast] = j; } /* Bubble sort entries */ if(changed) { #ifdef HUH Rprintf("Bubble sort.\nCurrent state:\n\tk\twhich\tdist\n"); for(k = 0; k <= Klast; k++) Rprintf("\t%d\t%d\t%lf\n", k, which[k], dist[k]); #endif do { unsorted = NO; for(k = 0; k < Klast; k++) { if(dist[k] > dist[k+1]) { unsorted = YES; dtmp = dist[k]; dist[k] = dist[k+1]; dist[k+1] = dtmp; itmp = which[k]; which[k] = which[k+1]; which[k+1] = itmp; } } } while(unsorted); } #ifdef HUH Rprintf("Return state:\n\tk\twhich\tdist\n"); for(k = 0; k <= Klast; k++) Rprintf("\t%d\t%d\t%lf\n", k, which[k], dist[k]); #endif return( (int) changed); } spatstat/src/periodic.c0000644000176200001440000001103213406057617014654 0ustar liggesusers/* periodic.c Routines for periodic edge correction Naive algorithms O(n^2) in time (but memory-efficient) which can easily be adapted to more general metrics. Coordinates are NOT assumed to be sorted $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include #include #define OK 0 #define ERR_OVERFLOW 1 #define ERR_ALLOC 2 #define intRealloc(PTR, OLDLENGTH, NEWLENGTH) \ (int *) S_realloc((char *) PTR, NEWLENGTH, OLDLENGTH, sizeof(int)) #define dblRealloc(PTR, OLDLENGTH, NEWLENGTH) \ (double *) S_realloc((char *) PTR, NEWLENGTH, OLDLENGTH, sizeof(double)) double sqrt(); /* counterpart of 'closepairs' */ SEXP closePpair(SEXP xx, /* spatial coordinates */ SEXP yy, SEXP pp, /* period */ SEXP rr, /* max distance */ SEXP nguess) { double *x, *y; double xi, yi, rmax, r2max, dx, dy, d2, dxp, dyp; int n, k, kmax, kmaxold, maxchunk, i, j, m; double *period; double xperiod, yperiod; /* local storage */ int *iout, *jout; double *dout; /* R objects in return value */ SEXP Out, iOut, jOut, dOut; /* external storage pointers */ int *iOutP, *jOutP; double *dOutP; /* protect R objects from garbage collector */ PROTECT(xx = AS_NUMERIC(xx)); PROTECT(yy = AS_NUMERIC(yy)); PROTECT(pp = AS_NUMERIC(pp)); PROTECT(rr = AS_NUMERIC(rr)); PROTECT(nguess = AS_INTEGER(nguess)); /* that's 5 protected arguments */ #define NINPUTS 5 /* Translate arguments from R to C */ x = NUMERIC_POINTER(xx); y = NUMERIC_POINTER(yy); n = LENGTH(xx); period = NUMERIC_POINTER(pp); xperiod = period[0]; yperiod = period[1]; rmax = *(NUMERIC_POINTER(rr)); r2max = rmax * rmax; kmax = *(INTEGER_POINTER(nguess)); k = 0; /* k is the next available storage location and also the current length of the list */ if(n > 0 && kmax > 0) { /* allocate space */ iout = (int *) R_alloc(kmax, sizeof(int)); jout = (int *) R_alloc(kmax, sizeof(int)); dout = (double *) R_alloc(kmax, sizeof(double)); /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; if(i > 0) { /* scan backward */ for(j = i - 1; j >= 0; j--) { dx = x[j] - xi; if(dx < 0.0) dx = -dx; dxp = xperiod - dx; if(dxp < dx) dx = dxp; if(dx < rmax) { dy = y[j] - yi; if(dy < 0.0) dy = -dy; dyp = yperiod - dy; if(dyp < dy) dy = dyp; d2 = dx * dx + dy * dy; if(d2 <= r2max) { /* add this (i, j) pair to output */ if(k >= kmax) { /* overflow; allocate more space */ kmaxold = kmax; kmax = 2 * kmax; iout = intRealloc(iout, kmaxold, kmax); jout = intRealloc(jout, kmaxold, kmax); dout = dblRealloc(dout, kmaxold, kmax); } jout[k] = j + 1; /* R indexing */ iout[k] = i + 1; dout[k] = sqrt(d2); ++k; } } } } if(i + 1 < n) { /* scan forward */ for(j = i + 1; j < n; j++) { dx = x[j] - xi; if(dx < 0.0) dx = -dx; dxp = xperiod - dx; if(dxp < dx) dx = dxp; if(dx < rmax) { dy = y[j] - yi; if(dy < 0.0) dy = -dy; dyp = yperiod - dy; if(dyp < dy) dy = dyp; d2 = dx * dx + dy * dy; if(d2 <= r2max) { /* add this (i, j) pair to output */ if(k >= kmax) { /* overflow; allocate more space */ kmaxold = kmax; kmax = 2 * kmax; iout = intRealloc(iout, kmaxold, kmax); jout = intRealloc(jout, kmaxold, kmax); dout = dblRealloc(dout, kmaxold, kmax); } jout[k] = j + 1; /* R indexing */ iout[k] = i + 1; dout[k] = sqrt(d2); ++k; } } } } /* end of i loop */ } } } /* return a list of vectors */ PROTECT(Out = NEW_LIST(3)); PROTECT(iOut = NEW_INTEGER(k)); PROTECT(jOut = NEW_INTEGER(k)); PROTECT(dOut = NEW_NUMERIC(k)); #define NALLOCATED 4 /* copy results into return object */ if(k > 0) { iOutP = INTEGER_POINTER(iOut); jOutP = INTEGER_POINTER(jOut); dOutP = NUMERIC_POINTER(dOut); for(m = 0; m < k; m++) { iOutP[m] = iout[m]; jOutP[m] = jout[m]; dOutP[m] = dout[m]; } } SET_VECTOR_ELT(Out, 0, iOut); SET_VECTOR_ELT(Out, 1, jOut); SET_VECTOR_ELT(Out, 2, dOut); /* relinquish and return */ UNPROTECT(NINPUTS+NALLOCATED); return(Out); } spatstat/src/nndist.h0000644000176200001440000000405113406057617014365 0ustar liggesusers/* nndist.h Code template for C functions supporting nndist and nnwhich (k=1) THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER This code is #included multiple times in nndistance.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour Either or both DIST and WHICH may be defined. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2012 Licence: GPL >= 2 $Revision: 1.2 $ $Date: 2012/03/14 02:37:27 $ */ void FNAME(n, x, y, #ifdef DIST nnd, #endif #ifdef WHICH nnwhich, #endif huge) /* inputs */ int *n; double *x, *y, *huge; /* outputs */ #ifdef DIST double *nnd; #endif #ifdef WHICH int *nnwhich; #endif { int npoints, i, maxchunk, left, right; double d2, d2min, xi, yi, dx, dy, dy2, hu, hu2; #ifdef WHICH int which; #endif hu = *huge; hu2 = hu * hu; npoints = *n; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < npoints) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > npoints) maxchunk = npoints; for(; i < maxchunk; i++) { d2min = hu2; #ifdef WHICH which = -1; #endif xi = x[i]; yi = y[i]; if(i < npoints - 1) { /* search forward */ for(right = i + 1; right < npoints; ++right) { dy = y[right] - yi; dy2 = dy * dy; if(dy2 > d2min) break; dx = x[right] - xi; d2 = dx * dx + dy2; if (d2 < d2min) { d2min = d2; #ifdef WHICH which = right; #endif } } } if(i > 0){ /* search backward */ for(left = i - 1; left >= 0; --left) { dy = yi - y[left]; dy2 = dy * dy; if(dy2 > d2min) break; dx = x[left] - xi; d2 = dx * dx + dy2; if (d2 < d2min) { d2min = d2; #ifdef WHICH which = left; #endif } } } #ifdef DIST nnd[i] = sqrt(d2min); #endif #ifdef WHICH nnwhich[i] = which + 1; /* R indexing */ #endif } } } spatstat/src/ripleypoly.h0000644000176200001440000002375513542614444015310 0ustar liggesusers/* ripleypoly.h Ripley's edge correction for polygonal windows This file is #included multiple times in corrections.c Macros used: RIPLEYFUN Name of C function DEBUGPOLY #defined if debugging information should be printed. TESTINSIDE defined in corrections.c *CHUNKLOOP defined in chunkloop.h TWOPI defined in Rmath.h $Revision: 1.20 $ $Date: 2019/09/25 05:58:56 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2019 Licence: GNU Public Licence >= 2 */ #undef DEBUGLEVEL #ifndef DEBUGPOLY #define DEBUGLEVEL 0 #else #define DEBUGLEVEL 3 #endif /* SPLITPOINT is used only when DEBUGLEVEL = 2 */ #undef SPLITPOINT #define SPLITPOINT 0 #undef ROUNDED #ifdef _WIN32 /* Avoid quirks of Windows i386 */ #define ROUNDED(X) ((float)(X)) #else #define ROUNDED(X) ((float)(X)) /* WAS: define ROUNDED(X) ((double)(X)) */ #endif void RIPLEYFUN(nc, xc, yc, bd, nr, rmat, nseg, x0, y0, x1, y1, out) /* inputs */ int *nc, *nr, *nseg; double *xc, *yc, *bd, *rmat; double *x0, *y0, *x1, *y1; /* output */ double *out; { int n, m, i, j, k, l, nradperpt, ncut, nchanges, maxchunk; double xcentre, ycentre, xx0, yy0, xx1, yy1, xx01, yy01; double bdisti; double x, y, radius, radius2, dx0, dx1, dy0; double a, b, c, t, det, sqrtdet, tmp; double theta[6], delta[7], tmid[7]; double xtest, ytest, contrib, total; n = *nc; nradperpt = *nr; m = *nseg; #if (DEBUGLEVEL == 2) Rprintf("/// Debug level 2, split point %d ///\n", (int) SPLITPOINT); #elif (DEBUGLEVEL > 0) Rprintf("/// Debug level %d ///\n", (int) DEBUGLEVEL); #endif OUTERCHUNKLOOP(i, n, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n, maxchunk, 16384) { xcentre = xc[i]; ycentre = yc[i]; bdisti = bd[i]; #if (DEBUGLEVEL >= 3) Rprintf("------- centre[%d] = (%lf, %lf) ------\n", i, xcentre, ycentre); Rprintf(" boundary distance %lf \n", bdisti); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 1)) Rprintf("------- centre[%d] ------\n", i); #endif for(j = 0; j < nradperpt; j++) { radius = rmat[ j * n + i]; radius2 = (double) (radius * radius); #if (DEBUGLEVEL >= 3) Rprintf("radius[%d] = %lf\n", j, radius); #elif (DEBUGLEVEL >= 2) Rprintf("radius[%d]\n", j); #endif if(bdisti > radius) { /* no crossings */ total = TWOPI; #if (DEBUGLEVEL >= 2) Rprintf("no crossings; total = 2*pi\n"); #endif } else { /* run through all boundary segments */ total = 0.0; for(k=0; k < m; k++) { ncut = 0; xx0 = x0[k]; yy0 = y0[k]; xx1 = x1[k]; yy1 = y1[k]; #if (DEBUGLEVEL >= 3) Rprintf("... Edge[%d] = (%lf,%lf) to (%lf,%lf)\n", k, xx0, yy0, xx1, yy1); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 2)) Rprintf("... Edge[%d]\n", k); #endif /* intersection with left edge */ dx0 = xx0 - xcentre; det = (double) (radius2 - dx0 * dx0); #if (DEBUGLEVEL >= 3) Rprintf("Left: det = %lf\n", det); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 3)) Rprintf("Left:\n"); #endif if(ROUNDED(det) > ROUNDED(0.0)) { #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 4))) Rprintf("\tdet > 0\n"); #endif sqrtdet = (double) sqrt(det); y = (double) (ycentre + sqrtdet); if(ROUNDED(y) < ROUNDED(yy0)) { theta[ncut] = (double) atan2(y - ycentre, dx0); #if (DEBUGLEVEL >= 3) Rprintf("\tcut left at theta= %lf\n", theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 5)) Rprintf("\tcut left (+)\n"); #endif ncut++; } y = (double) (ycentre - sqrtdet); if(ROUNDED(y) < ROUNDED(yy0)) { theta[ncut] = (double) atan2(y-ycentre, dx0); #if (DEBUGLEVEL >= 3) Rprintf("\tcut left at theta= %lf\n", theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 6)) Rprintf("\tcut left (-)\n"); #endif ncut++; } } else if(ROUNDED(det) == ROUNDED(0.0)) { #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 7))) Rprintf("\tdet = 0\n"); #endif if(ROUNDED(ycentre) < ROUNDED(yy0)) { theta[ncut] = (double) atan2(0.0, dx0); #if (DEBUGLEVEL >= 3) Rprintf("\ttangent left at theta= %lf\n", theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 8)) Rprintf("\ttangent left\n"); #endif ncut++; } } /* intersection with right edge */ dx1 = xx1 - xcentre; det = (double) (radius2 - dx1 * dx1); #if (DEBUGLEVEL >= 3) Rprintf("Right: det = %lf\n", det); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 9)) Rprintf("Right:\n"); #endif if(ROUNDED(det) > ROUNDED(0.0)) { #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 10))) Rprintf("\tdet > 0\n"); #endif sqrtdet = (double) sqrt(det); y = (double) (ycentre + sqrtdet); if(ROUNDED(y) < ROUNDED(yy1)) { theta[ncut] = (double) atan2(y - ycentre, dx1); #if (DEBUGLEVEL >= 3) Rprintf("\tcut right at theta= %lf\n", theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 11)) Rprintf("\tcut right (+)\n"); #endif ncut++; } y = (double) (ycentre - sqrtdet); if(ROUNDED(y) < ROUNDED(yy1)) { theta[ncut] = (double) atan2(y - ycentre, dx1); #if (DEBUGLEVEL >= 3) Rprintf("\tcut right at theta= %lf\n", theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 12)) Rprintf("\tcut right (-)\n"); #endif ncut++; } } else if(ROUNDED(det) == ROUNDED(0.0)) { #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 13))) Rprintf("\tdet = 0\n"); #endif if(ycentre < yy1) { theta[ncut] = (double) atan2(0.0, dx1); #if (DEBUGLEVEL >= 3) Rprintf("\ttangent right at theta= %lf\n", theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 14)) Rprintf("\ttangent right\n"); #endif ncut++; } } /* intersection with top segment */ xx01 = xx1 - xx0; yy01 = yy1 - yy0; dy0 = yy0 - ycentre; a = xx01 * xx01 + yy01 * yy01; b = 2 * (xx01 * dx0 + yy01 * dy0); c = dx0 * dx0 + dy0 * dy0 - radius2; det = (double) (b * b - 4 * a * c); #if (DEBUGLEVEL >= 3) Rprintf("Top: det = %lf\n", det); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 15)) Rprintf("Top:\n"); #endif if(ROUNDED(det) > ROUNDED(0.0)) { #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 16))) Rprintf("\tdet > 0\n"); #endif sqrtdet = (double) sqrt(det); t = (double) ((sqrtdet - b)/(2 * a)); if(ROUNDED(0.0) <= ROUNDED(t) && ROUNDED(t) <= ROUNDED(1.0)) { x = xx0 + t * xx01; y = yy0 + t * yy01; theta[ncut] = (double) atan2(y - ycentre, x - xcentre); #if (DEBUGLEVEL >= 3) Rprintf("\thits + segment: t = %lf, theta = %lf\n", t, theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 17)) Rprintf("\thits + segment\n"); #endif ++ncut; } t = (double) ((-sqrtdet - b)/(2 * a)); if(ROUNDED(0.0) <= ROUNDED(t) && ROUNDED(t) <= ROUNDED(1.0)) { x = xx0 + t * xx01; y = yy0 + t * yy01; theta[ncut] = (double) atan2(y - ycentre, x - xcentre); #if (DEBUGLEVEL >= 3) Rprintf("\thits - segment: t = %lf, theta = %lf\n", t, theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 18)) Rprintf("\thits - segment\n"); #endif ++ncut; } } else if(ROUNDED(det) == ROUNDED(0.0)) { #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 19))) Rprintf("\tdet = 0\n"); #endif t = (double) (- b/(2 * a)); if(ROUNDED(0.0) <= ROUNDED(t) && ROUNDED(t) <= ROUNDED(1.0)) { x = xx0 + t * xx01; y = yy0 + t * yy01; theta[ncut] = (double) atan2(y - ycentre, x - xcentre); #if (DEBUGLEVEL >= 3) Rprintf("\ttangent to segment: t = %lf, theta = %lf\n", t, theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 20)) Rprintf("\ttangent to segment\n"); #endif ++ncut; } } #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 21))) Rprintf("Finished cutting; ncut = %d\n", ncut); #endif /* for safety, force all angles to be in range [0, 2 * pi] */ if(ncut > 0) for(l = 0; l < ncut; l++) if(theta[l] < 0) theta[l] += TWOPI; /* sort angles */ if(ncut > 1) { do { nchanges = 0; for(l = 0; l < ncut - 1; l++) { if(theta[l] > theta[l+1]) { /* swap */ ++nchanges; tmp = theta[l]; theta[l] = theta[l+1]; theta[l+1] = tmp; } } } while(nchanges > 0); } #if (DEBUGLEVEL >= 3) if(ncut > 0) { for(l = 0; l < ncut; l++) Rprintf("theta[%d] = %lf\n", l, theta[l]); } #endif /* compute length of circumference inside polygon */ if(ncut == 0) { /* entire circle is either in or out */ xtest = xcentre + radius; ytest = ycentre; if(TESTINSIDE(xtest, ytest, xx0, yy0, xx1, yy1)) contrib = TWOPI; else contrib = 0.0; } else { /* find midpoints and lengths of pieces (adding theta = ) */ delta[0] = theta[0]; tmid[0] = theta[0]/2; if(ncut > 1) { for(l = 1; l < ncut; l++) { delta[l] = theta[l] - theta[l-1]; tmid[l] = (theta[l] + theta[l-1])/2; } } delta[ncut] = TWOPI - theta[ncut - 1]; tmid[ncut] = (TWOPI + theta[ncut-1])/2; contrib = 0.0; for(l = 0; l <= ncut; l++) { #if (DEBUGLEVEL >= 3) Rprintf("Interval %d, width %lf:", l, delta[l]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 22)) Rprintf("Interval %d:", l); #endif xtest = (double) (xcentre + radius * cos(tmid[l])); ytest = (double) (ycentre + radius * sin(tmid[l])); if(TESTINSIDE(xtest, ytest, xx0, yy0, xx1, yy1)) { contrib += delta[l]; #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 23))) Rprintf("inside\n"); } else { Rprintf("outside\n"); #endif } } } /* multiply by sign of trapezium */ if(xx0 < xx1) contrib = -contrib; #if (DEBUGLEVEL >= 3) Rprintf("contrib = %lf\n", contrib); #endif total += contrib; } } out[ j * n + i] = total; #if (DEBUGLEVEL >= 1) Rprintf("\nTotal = %lf = %lf * (2 * pi)\n", total, total/TWOPI); #endif } } } } spatstat/src/Ediggatsti.c0000755000176200001440000000354313406057617015155 0ustar liggesusers#include #include #include #include "chunkloop.h" #include "looptest.h" #include "constants.h" /* Ediggatsti.c $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ C implementation of 'eval' for DiggleGatesStibbard interaction Assumes point patterns are sorted in increasing order of x coordinate Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void Ediggatsti(nnsource, xsource, ysource, idsource, nntarget, xtarget, ytarget, idtarget, rrho, values) /* inputs */ int *nnsource, *nntarget; double *xsource, *ysource, *xtarget, *ytarget; int *idsource, *idtarget; double *rrho; /* output */ double *values; { int nsource, ntarget, maxchunk, j, i, ileft, idsourcej; double xsourcej, ysourcej, xleft, dx, dy, dx2, d2; double rho, rho2, rho2pluseps, coef, product; nsource = *nnsource; ntarget = *nntarget; rho = *rrho; if(nsource == 0 || ntarget == 0) return; rho2 = rho * rho; coef = M_PI_2/rho; rho2pluseps = rho2 + EPSILON(rho2); ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 65536) { product = 1; xsourcej = xsource[j]; ysourcej = ysource[j]; idsourcej = idsource[j]; /* adjust starting position */ xleft = xsourcej - rho; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process from ileft until dx > rho */ for(i=ileft; i < ntarget; i++) { dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > rho2pluseps) break; if(idtarget[i] != idsourcej) { dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= rho2) product *= sin(sqrt(d2) * coef); } } values[j] = log(product * product); } } } spatstat/src/constants.h0000644000176200001440000000074713406057617015112 0ustar liggesusers/* constants.h Ensure that required constants are defined (Insurance against flaky installations) $Revision: 1.2 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #ifndef M_PI #define M_PI 3.141592653589793 #endif #ifndef M_PI_2 #define M_PI_2 1.570796326794897 #endif #ifndef M_2_PI #define M_2_PI (2.0/M_PI) #endif #ifndef M_2PI #define M_2PI 6.283185307179586 #endif spatstat/src/discs.c0000644000176200001440000000367113406057617014175 0ustar liggesusers#include #include /* discs.c Fill binary mask with discs with given centres and radii $Revision: 1.5 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void discs2grid(nx, x0, xstep, ny, y0, ystep, /* pixel grid dimensions */ nd, xd, yd, rd, /* disc parameters */ out) /* inputs */ int *nx, *ny, *nd; double *x0, *xstep, *y0, *ystep; double *xd, *yd, *rd; /* output */ int *out; { int Nxcol, Nyrow, Ndiscs; double X0, Y0, Xstep, Ystep; int i, j, k; double xk, yk, rk, rk2, dx, dymax; int imin, imax, jmin, jmax, iminj, imaxj, Nxcol1, Nyrow1; Nxcol = *nx; Nyrow = *ny; Ndiscs = *nd; X0 = *x0; Y0 = *y0; Xstep = *xstep; Ystep = *ystep; if(Ndiscs == 0) return; Nxcol1 = Nxcol - 1; Nyrow1 = Nyrow - 1; /* loop over discs */ for(k = 0; k < Ndiscs; k++) { R_CheckUserInterrupt(); xk = xd[k]; yk = yd[k]; rk = rd[k]; /* find valid range of i and j */ imax = floor( (yk + rk - Y0)/Ystep); imin = ceil((yk - rk - Y0)/Ystep); jmax = floor( (xk + rk - X0)/Xstep); jmin = ceil((xk - rk - X0)/Xstep); if(imax >= 0 && imin < Nyrow && jmax >= 0 && jmin < Nxcol && imax >= imin && jmax >= jmin) { if(imin < 0) imin = 0; if(imax > Nyrow1) imax = Nyrow1; if(jmin < 0) jmin = 0; if(jmax > Nxcol1) jmax = Nxcol1; rk2 = rk * rk; /* loop over relevant pixels */ for(j = jmin, dx=X0 + jmin * Xstep - xk; j <= jmax; j++, dx += Xstep) { dymax = sqrt(rk2 - dx * dx); imaxj = floor( (yk + dymax - Y0)/Ystep); iminj = ceil((yk - dymax - Y0)/Ystep); if(imaxj >= 0 && iminj < Nyrow) { if(iminj < 0) iminj = 0; if(imaxj > Nyrow1) imaxj = Nyrow1; for(i = iminj; i <= imaxj; i++) out[i + j * Nyrow] = 1; } } } } } spatstat/src/uniquemap.c0000644000176200001440000000120713470731536015065 0ustar liggesusers/* uniquemap.c !! Assumes points are ordered by increasing x value !! $Revision: 1.2 $ $Date: 2019/05/21 07:36:34 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2019 Licence: GNU Public Licence >= 2 */ #include #undef ZCOORD #undef MARKED #undef QUITANY #define FUNNAME uniqmapxy #include "uniquemap.h" #undef FUNNAME #define QUITANY #define FUNNAME anydupxy #include "uniquemap.h" #undef FUNNAME #undef QUITANY #define MARKED #undef QUITANY #define FUNNAME uniqmap2M #include "uniquemap.h" #undef FUNNAME #define QUITANY #define FUNNAME anydup2M #include "uniquemap.h" #undef FUNNAME #undef QUITANY spatstat/src/maxnnd.h0000644000176200001440000000373113406057617014357 0ustar liggesusers/* maxnnd.h Code template for maxnnd to be #included in minnnd.c Macros: FNAME Function name IGNOREZERO #defined if zero distances should be ignored Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ */ /* THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER */ void FNAME(n, x, y, huge, result) /* inputs */ int *n; double *x, *y, *huge; /* outputs */ double *result; { int npoints, i, maxchunk, left, right; double d2, d2mini, d2max, xi, yi, dx, dy, dy2, hu, hu2; hu = *huge; hu2 = hu * hu; npoints = *n; /* maximum (over all i) nearest-neighbour distance, squared */ d2max = 0.0; if(npoints == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < npoints) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > npoints) maxchunk = npoints; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; /* nearest-neighbour distance for point i, squared */ d2mini = hu2; if(i < npoints - 1) { /* search forward */ for(right = i + 1; right < npoints; ++right) { dy = y[right] - yi; dy2 = dy * dy; if(dy2 > d2mini) break; dx = x[right] - xi; d2 = dx * dx + dy2; if (d2 < d2mini) { #ifdef IGNOREZERO if(d2 > 0) { #endif d2mini = d2; if(d2mini <= d2max) break; #ifdef IGNOREZERO } #endif } } } if(i > 0 && d2mini > d2max){ /* search backward */ for(left = i - 1; left >= 0; --left) { dy = yi - y[left]; dy2 = dy * dy; if(dy2 > d2mini) break; dx = x[left] - xi; d2 = dx * dx + dy2; if (d2 < d2mini) { #ifdef IGNOREZERO if(d2 > 0) { #endif d2mini = d2; if(d2mini <= d2max) break; #ifdef IGNOREZERO } #endif } } } if(d2mini > d2max) d2max = d2mini; } } *result = d2max; } spatstat/src/rthin.c0000644000176200001440000000362513406057617014213 0ustar liggesusers#include #include #include /* rthin.c Select from the integers 1:n with probability p by simulating geometric(p) jumps between selected integers $Revision: 1.2 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ SEXP thinjumpequal(SEXP n, SEXP p, SEXP guess) { int N; double P; int *w; /* temporary storage for selected integers */ int nw, nwmax; int i, j, k; double log1u, log1p; /* R object return value */ SEXP Out; /* external storage pointer */ int *OutP; /* protect R objects from garbage collector */ PROTECT(p = AS_NUMERIC(p)); PROTECT(n = AS_INTEGER(n)); PROTECT(guess = AS_INTEGER(guess)); /* Translate arguments from R to C */ N = *(INTEGER_POINTER(n)); P = *(NUMERIC_POINTER(p)); nwmax = *(INTEGER_POINTER(guess)); /* Allocate space for result */ w = (int *) R_alloc(nwmax, sizeof(int)); /* set up */ GetRNGstate(); log1p = -log(1.0 - P); /* main loop */ i = 0; /* last selected element of 1...N */ nw = 0; /* number of selected elements */ while(i <= N) { log1u = exp_rand(); /* an exponential rv is equivalent to -log(1-U) */ j = (int) ceil(log1u/log1p); /* j is geometric(p) */ i += j; if(nw >= nwmax) { /* overflow; allocate more space */ w = (int *) S_realloc((char *) w, 2 * nwmax, nwmax, sizeof(int)); nwmax = 2 * nwmax; } /* add 'i' to output vector */ w[nw] = i; ++nw; } /* The last saved 'i' could have exceeded 'N' */ /* For efficiency we don't check this in the loop */ if(nw > 0 && w[nw-1] > N) --nw; PutRNGstate(); /* create result vector */ PROTECT(Out = NEW_INTEGER(nw)); /* copy results into output */ OutP = INTEGER_POINTER(Out); for(k = 0; k < nw; k++) OutP[k] = w[k]; UNPROTECT(4); return(Out); } spatstat/src/linknnd.h0000644000176200001440000001030713406057617014524 0ustar liggesusers/* linknnd.h k-th nearest neighbours in a linear network Using sparse representation of network ! Data points must be ordered by segment index ! This code is #included several times in linknnd.c Macros required: FNAME Function name CROSS #defined for X-to-Y, undefined for X-to-X HUH debugging flag $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define MAT(MATRIXNAME, INDEX, ORDER) MATRIXNAME[(ORDER) + (INDEX) * Kmax] #define NNDIST(INDEX, ORDER) MAT(nndist, (INDEX), (ORDER)) #define NNWHICH(INDEX, ORDER) MAT(nnwhich, (INDEX), (ORDER)) #define VDIST(INDEX, ORDER) MAT(dminvert, (INDEX), (ORDER)) #define VWHICH(INDEX, ORDER) MAT(whichvert, (INDEX), (ORDER)) #define UPDATENN(INDEX, D, J) \ UpdateKnnList(D, J, \ nndist + (INDEX) * Kmax, \ nnwhich + (INDEX) * Kmax, \ Kmax, \ (double) 0.0) /* ................. */ void FNAME(kmax, /* number of neighbours required */ np, sp, tp, /* source data points (ordered by sp) */ #ifdef CROSS nq, sq, tq, /* target data points (ordered by sq) */ #endif nv, /* number of network vertices */ ns, from, to, /* segments (pairs of vertices) */ seglen, /* segment lengths */ huge, /* value taken as infinity */ tol, /* tolerance for updating distances */ /* OUTPUT */ nndist, /* distance from each source point to the nearest, ..., kth nearest target points */ nnwhich /* identifies which target points */ ) int *kmax; int *np, *nv, *ns; /* number of points, vertices, segments */ int *sp, *from, *to; /* integer vectors (mappings) */ double *tp; /* fractional location coordinates */ #ifdef CROSS int *nq, *sq; double *tq; #endif double *huge, *tol; double *seglen; double *nndist; int *nnwhich; { int Np, Nv, Kmax, Nout, i, j, ivleft, ivright, jfirst, jlast, k, m; double d, hugevalue, slen, tpi, deltad; double *dminvert; /* min dist from each vertex */ int *whichvert; /* which min from each vertex */ int linvknndist(), UpdateKnnList(); #ifdef CROSS int Nq; #else #define Nq Np #define nq np #define sq sp #define tq tp #endif Kmax = *kmax; Np = *np; Nv = *nv; hugevalue = *huge; #ifdef CROSS Nq = *nq; #endif /* First compute min distances to target set from each vertex */ #ifdef HUH Rprintf("Computing distances from each vertex\n"); #endif dminvert = (double *) R_alloc(Nv * Kmax, sizeof(double)); whichvert = (int *) R_alloc(Nv * Kmax, sizeof(int)); linvknndist(kmax, nq, sq, tq, nv, ns, from, to, seglen, huge, tol, dminvert, whichvert); #ifdef HUH Rprintf("Initialise answer\n"); #endif /* initialise nn distances from source points */ Nout = Np * Kmax; for(i = 0; i < Nout; i++) { nndist[i] = hugevalue; nnwhich[i] = -1; } /* run through all source points */ #ifdef HUH Rprintf("Run through source points\n"); #endif jfirst = 0; for(i = 0; i < Np; i++) { tpi = tp[i]; m = sp[i]; /* segment containing this point */ slen = seglen[m]; ivleft = from[m]; ivright = to[m]; #ifdef HUH Rprintf("Source point %d lies on segment %d = [%d,%d]\n", i, m, ivleft, ivright); #endif deltad = slen * tpi; #ifdef HUH Rprintf("\tComparing to left endpoint %d, distance %lf\n", ivleft, deltad); #endif for(k = 0; k < Kmax; k++) UPDATENN(i, deltad + VDIST(ivleft, k), VWHICH(ivleft, k)); deltad = slen * (1.0 - tpi); #ifdef HUH Rprintf("\tComparing to right endpoint %d, distance %lf\n", ivright, deltad); #endif for(k = 0; k < Kmax; k++) UPDATENN(i, deltad + VDIST(ivright, k), VWHICH(ivright, k)); /* find any target points in this segment */ while(jfirst < Nq && sq[jfirst] < m) jfirst++; jlast = jfirst; while(jlast < Nq && sq[jlast] == m) jlast++; --jlast; /* if there are no such points, then jlast < jfirst */ if(jfirst <= jlast) { for(j = jfirst; j <= jlast; j++) { d = slen * fabs(tq[j] - tpi); UPDATENN(i, d, j); } } } } #undef MAT #undef NNDIST #undef NNWHICH #undef VDIST #undef VWHICH #undef UPDATENN #ifndef CROSS #undef nq #undef Nq #undef sq #undef tq #endif spatstat/src/dist2dpath.c0000755000176200001440000000070113406057617015130 0ustar liggesusers#include #include /* given matrix of edge lengths compute matrix of shortest-path distances Uses dist2dpath.h Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define FNAME Ddist2dpath #define DTYPE double #define FLOATY #include "dist2dpath.h" #undef FNAME #undef DTYPE #undef FLOATY #define FNAME Idist2dpath #define DTYPE int #include "dist2dpath.h" spatstat/src/fardist.c0000644000176200001440000000070413406057617014516 0ustar liggesusers/* fardist.c Furthest data point from each grid point Uses code template 'fardist.h' Copyright (C) Adrian Baddeley, Rolf Turner and Ege Rubak 2014 Licence: GPL >= 2 $Revision: 1.2 $ $Date: 2014/08/31 06:43:42 $ */ #include #include #include double sqrt(); #define FNAME fardistgrid #undef SQUARED #include "fardist.h" #undef FNAME #define FNAME fardist2grid #define SQUARED #include "fardist.h" spatstat/src/knngrid.c0000644000176200001440000000400613406057617014515 0ustar liggesusers/* knngrid.c K-th Nearest Neighbour Distances from a pixel grid to a point pattern Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2013 Licence: GNU Public Licence >= 2 $Revision: 1.6 $ $Date: 2013/11/03 05:06:28 $ Function body definition is #included from knngrid.h THE FOLLOWING FUNCTIONS ASSUME THAT x IS SORTED IN ASCENDING ORDER */ #undef SPATSTAT_DEBUG #include #include #include #include "yesno.h" double sqrt(); /* THE FOLLOWING CODE ASSUMES THAT x IS SORTED IN ASCENDING ORDER */ /* general interface */ void knnGinterface(nx, x0, xstep, ny, y0, ystep, /* pixel grid dimensions */ np, xp, yp, /* data points */ kmax, wantdist, wantwhich, nnd, nnwhich, huge) /* inputs */ int *nx, *ny, *np; double *x0, *xstep, *y0, *ystep, *huge; double *xp, *yp; int *kmax; /* options */ int *wantdist, *wantwhich; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { void knnGdw(), knnGd(), knnGw(); int di, wh; di = (*wantdist != 0); wh = (*wantwhich != 0); if(di && wh) { knnGdw(nx, x0, xstep, ny, y0, ystep, np, xp, yp, kmax, nnd, nnwhich, huge); } else if(di) { knnGd(nx, x0, xstep, ny, y0, ystep, np, xp, yp, kmax, nnd, nnwhich, huge); } else if(wh) { knnGw(nx, x0, xstep, ny, y0, ystep, np, xp, yp, kmax, nnd, nnwhich, huge); } } #undef FNAME #undef DIST #undef WHICH /* knnGdw nearest neighbours 1:kmax returns distances and indices */ #define FNAME knnGdw #define DIST #define WHICH #include "knngrid.h" #undef FNAME #undef DIST #undef WHICH /* knnGd nearest neighbours 1:kmax returns distances only */ #define FNAME knnGd #define DIST #include "knngrid.h" #undef FNAME #undef DIST #undef WHICH /* knnGw nearest neighbours 1:kmax returns indices only */ #define FNAME knnGw #define WHICH #include "knngrid.h" #undef FNAME #undef DIST #undef WHICH spatstat/src/geom3.h0000755000176200001440000000041013406057617014076 0ustar liggesusers/* $Revision: 1.1 $ $Date: 2009/11/04 23:54:15 $ Definitions for 3D geometrical structures */ typedef struct Point { double x; double y; double z; } Point; typedef struct Box { double x0; double x1; double y0; double y1; double z0; double z1; } Box; spatstat/src/dist2dpath.h0000644000176200001440000001002613406057617015133 0ustar liggesusers/* Function body for dist2dpath.c Macros used: FNAME function name DTYPE declaration for distance values ('double' or 'int') FLOATY (DTYPE == 'double') $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef DEBUG #define MATRIX(X,I,J) (X)[(J) + n * (I)] #define D(I,J) MATRIX(d, I, J) #define DPATH(I,J) MATRIX(dpath, I, J) #define ADJ(I,J) (MATRIX(adj, I, J) != 0) #define INFIN -1 #define FINITE(X) ((X) >= 0) void FNAME(nv, d, adj, dpath, tol, niter, status) int *nv; /* number of vertices */ DTYPE *d; /* matrix of edge lengths */ int *adj; /* 0/1 edge matrix of graph */ DTYPE *tol; /* tolerance threshold (ignored in integer case) */ DTYPE *dpath; /* output - shortest path distance matrix */ int *niter, *status; /* status = 0 for convergence */ { int i, j, k, n, iter, maxiter, changed; DTYPE dij, dik, dkj, dikj; #ifdef FLOATY DTYPE eps, diff, maxdiff; #endif int totaledges, starti, nneighi, increm, pos; int *start, *nneigh, *indx; n = *nv; #ifdef FLOATY eps = *tol; #endif /* initialise and count edges */ *status = -1; totaledges = 0; for(i = 0; i < n; i++) { for(j = 0; j < n; j++) { DPATH(i, j) = (i == j) ? 0 : ((ADJ(i,j)) ? D(i, j) : INFIN); if((i != j) && ADJ(i,j)) ++totaledges; } } maxiter = 2 + ((totaledges > n) ? totaledges : n); /* store indices j for each edge (i,j) */ indx = (int *) R_alloc(totaledges, sizeof(int)); nneigh = (int *) R_alloc(n, sizeof(int)); start = (int *) R_alloc(n, sizeof(int)); pos = 0; for(i = 0; i < n; i++) { nneigh[i] = 0; start[i] = pos; #ifdef DEBUG Rprintf("Neighbours of %d:\n", i); #endif for(j = 0; j < n; j++) { if((i != j) && ADJ(i,j) && FINITE(D(i,j))) { #ifdef DEBUG Rprintf("\t%d\n", j); #endif ++(nneigh[i]); if(pos > totaledges) error("internal error: pos exceeded storage"); indx[pos] = j; ++pos; } } } /* run */ for(iter = 0; iter < maxiter; iter++) { changed = 0; #ifdef FLOATY maxdiff = 0; #endif #ifdef DEBUG Rprintf("--------- iteration %d ---------------\n", iter); #endif for(i = 0; i < n; i++) { R_CheckUserInterrupt(); nneighi = nneigh[i]; if(nneighi > 0) { /* run through neighbours k of i */ starti = start[i]; for(increm = 0, pos=starti; increm < nneighi; ++increm, ++pos) { k = indx[pos]; dik = DPATH(i,k); #ifdef DEBUG #ifdef FLOATY Rprintf("i=%d k=%d dik=%lf\n", i, k, dik); #else Rprintf("i=%d k=%d dik=%d\n", i, k, dik); #endif #endif /* now run through all other vertices j */ for(j = 0; j < n; j++) { if(j != i && j != k) { dij = DPATH(i,j); dkj = DPATH(k,j); if(FINITE(dkj)) { dikj = dik + dkj; #ifdef DEBUG #ifdef FLOATY Rprintf("considering %d -> (%d) -> %d,\t dij=%lf, dikj=%lf\n", i, k, j, dij, dikj); #else Rprintf("considering %d -> (%d) -> %d,\t dij=%d, dikj=%d\n", i, k, j, dij, dikj); #endif #endif if(!FINITE(dij) || dikj < dij) { #ifdef DEBUG #ifdef FLOATY Rprintf("updating i=%d j=%d via k=%d from %lf to %lf\n", i, j, k, dij, dikj); #else Rprintf("updating i=%d j=%d via k=%d from %d to %d\n", i, j, k, dij, dikj); #endif #endif DPATH(i,j) = DPATH(j,i) = dikj; changed = 1; #ifdef FLOATY diff = (FINITE(dij)) ? dij - dikj : dikj; if(diff > maxdiff) maxdiff = diff; #endif } } } } } } } if(changed == 0) { /* algorithm converged */ #ifdef DEBUG Rprintf("Algorithm converged\n"); #endif *status = 0; break; #ifdef FLOATY } else if(FINITE(maxdiff) && maxdiff < eps) { /* tolerance reached */ #ifdef DEBUG Rprintf("Algorithm terminated with maxdiff=%lf\n", maxdiff); #endif *status = 1; break; #endif } } #ifdef DEBUG Rprintf("Returning after %d iterations on %d vertices\n", iter, n); #endif *niter = iter; } #undef DEBUG #undef MATRIX #undef D #undef DPATH #undef ADJ #undef INFIN #undef FINITE spatstat/src/sphevol.c0000755000176200001440000000746413406057617014557 0ustar liggesusers#include #include #include "geom3.h" /* $Revision: 1.2 $ $Date: 2013/05/27 02:09:10 $ Routine for calculating ABSOLUTE volume of intersection between sphere and box Arbitrary positions: point is allowed to be inside or outside box. # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009 # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ #ifdef DEBUG #define DBG(X,Y) Rprintf("%s: %f\n", (X), (Y)); #else #define DBG(X,Y) #endif #include "yesno.h" #define ABS(X) ((X >= 0.0) ? (X) : -(X)) static double rcubed, spherevol; double sphevol(point, box, r) Point *point; Box *box; double r; { double sum, p[4], q[4]; double v1(), v2(), v3(); int i, j; rcubed = r * r * r; spherevol = (4.0/3.0) * PI * rcubed; p[1] = box->x0 - point->x; p[2] = box->y0 - point->y; p[3] = box->z0 - point->z; q[1] = box->x1 - point->x; q[2] = box->y1 - point->y; q[3] = box->z1 - point->z; sum = 0; for(i = 1; i <= 3; i++) { sum += v1(p[i], -1, r) + v1(q[i], 1, r); #ifdef DEBUG Rprintf("i = %d, v1 = %f, v1 = %f\n", i, v1(p[i], -1, r), v1(q[i], 1, r)); #endif } DBG("Past v1", sum) for(i = 1; i < 3; i++) for(j = i+1; j <= 3; j++) { sum -= v2(p[i], -1, p[j], -1, r) + v2(p[i], -1, q[j], 1, r) + v2(q[i], 1, p[j], -1, r) + v2(q[i], 1, q[j], 1, r); #ifdef DEBUG Rprintf("i = %d, j = %d, sum = %f\n", i, j, sum); #endif } DBG("Past v2", sum) sum += v3(p[1], -1, p[2], -1, p[3], -1, r) + v3(p[1], -1, p[2], -1, q[3], 1, r); DBG("sum", sum) sum += v3(p[1], -1, q[2], 1, p[3], -1, r) + v3(p[1], -1, q[2], 1, q[3], 1, r); DBG("sum", sum) sum += v3(q[1], 1, p[2], -1, p[3], -1, r) + v3(q[1], 1, p[2], -1, q[3], 1, r); DBG("sum", sum) sum += v3(q[1], 1, q[2], 1, p[3], -1, r) + v3(q[1], 1, q[2], 1, q[3], 1, r); DBG("Past v3", sum) DBG("sphere volume", spherevol) return(spherevol - sum); } double v1(a,s,r) double a, r; int s; { double value; double u(); short sign; value = 4.0 * rcubed * u(ABS(a)/r, 0.0, 0.0); sign = (a >= 0.0) ? 1 : -1; if(sign == s) return(value); else return(spherevol - value); } double v2(a, sa, b, sb, r) double a, b, r; int sa, sb; { short sign; double u(); sign = (b >= 0.0) ? 1 : -1; if(sign != sb ) return(v1(a, sa, r) - v2(a, sa, ABS(b), 1, r)); b = ABS(b); sb = 1; sign = (a >= 0.0) ? 1 : -1; if(sign != sa) return(v1(b, sb, r) - v2(ABS(a), 1, b, sb, r)); a = ABS(a); return(2.0 * rcubed * u(a/r, b/r, 0.0)); } double v3(a, sa, b, sb, c, sc, r) double a, b, c, r; int sa, sb, sc; { short sign; double u(); sign = (c >= 0.0) ? 1 : -1; if(sign != sc) return(v2(a,sa,b,sb,r) - v3(a,sa,b,sb, ABS(c), 1, r)); c = ABS(c); sc = 1; sign = (b >= 0.0) ? 1 : -1; if(sign != sb) return(v2(a,sa,c,sc,r) - v3(a,sa,ABS(b),1,c,sc,r)); b = ABS(b); sb = 1; sign = (a >= 0.0) ? 1 : -1; if(sign != sa) return(v2(b,sb, c, sc, r) - v3(ABS(a),1, b, sb, c, sc, r)); a = ABS(a); return(rcubed * u(a/r, b/r, c/r)); } double u(a, b, c) double a, b, c; { double w(); if(a * a + b * b + c * c >= 1.0) return(0.0); return( (PI/12.0) * (2.0 - 3.0 * (a + b + c) + (a * a * a + b * b * b + c * c * c)) + w(a,b) + w(b,c) + w(a,c) - a * b * c ); } double w(x,y) double x,y; /* Arguments assumed >= 0 */ { double z; z = sqrt(1 - x * x - y * y); return( (x / 2.0 - x * x * x / 6.0) * atan2(y, z) + (y / 2.0 - y * y * y / 6.0) * atan2(x, z) - ( atan2(x * y , z) - x * y * z )/3.0 ); } spatstat/src/xyseg.c0000755000176200001440000005300713406057617014230 0ustar liggesusers/* xyseg.c Computation with line segments xysegint compute intersections between line segments $Revision: 1.20 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include #include #include #include #include "chunkloop.h" #define NIETS -1.0 #undef DEBUG #define INSIDE01(X,E) (X * (1.0 - X) >= -E) /* --------------- PAIRS OF PSP OBJECTS ---------------------- */ /* xysegint Determines intersections between each pair of line segments drawn from two lists of line segments. Line segments are given as x0, y0, dx, dy where (x0,y0) is the first endpoint and (dx, dy) is the vector from the first to the second endpoint. Points along a line segment are represented in parametric coordinates, (x,y) = (x0, y0) + t * (dx, dy). Output from xysegint() consists of five matrices xx, yy, ta, tb, ok. The (i,j)-th entries in these matrices give information about the intersection between the i-th segment in list 'a' and the j-th segment in list 'b'. The information is ok[i,j] = 1 if there is an intersection = 0 if not xx[i,j] = x coordinate of intersection yy[i,j] = y coordinate of intersection ta[i,j] = parameter of intersection point relative to i-th segment in list 'a' tb[i,j] = parameter of intersection point relative to j-th segment in list 'b' */ void xysegint(na, x0a, y0a, dxa, dya, nb, x0b, y0b, dxb, dyb, eps, xx, yy, ta, tb, ok) /* inputs (vectors of coordinates) */ int *na, *nb; double *x0a, *y0a, *dxa, *dya, *x0b, *y0b, *dxb, *dyb; /* input (tolerance for determinant) */ double *eps; /* outputs (matrices) */ double *xx, *yy, *ta, *tb; int *ok; { int i, j, ma, mb, ijpos, maxchunk; double determinant, absdet, diffx, diffy, tta, ttb, epsilon; ma = *na; mb = *nb; epsilon = *eps; OUTERCHUNKLOOP(j, mb, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mb, maxchunk, 8196) { for(i = 0; i < ma; i++) { ijpos = j * ma + i; ok[ijpos] = 0; xx[ijpos] = yy[ijpos] = ta[ijpos] = tb[ijpos] = NIETS; determinant = dxb[j] * dya[i] - dyb[j] * dxa[i]; absdet = (determinant > 0) ? determinant : -determinant; #ifdef DEBUG Rprintf("i = %d, j = %d\n", i, j); Rprintf("segment A[i]: (%lf, %lf) to (%lf, %lf)\n", x0a[i], y0a[i], x0a[i] + dxa[i], y0a[i] + dya[i]); Rprintf("segment B[j]: (%lf, %lf) to (%lf, %lf)\n", x0b[j], y0b[j], x0b[j] + dxb[j], y0b[j] + dyb[j]); Rprintf("determinant=%lf\n", determinant); #endif if(absdet > epsilon) { diffx = (x0b[j] - x0a[i])/determinant; diffy = (y0b[j] - y0a[i])/determinant; ta[ijpos] = tta = - dyb[j] * diffx + dxb[j] * diffy; tb[ijpos] = ttb = - dya[i] * diffx + dxa[i] * diffy; #ifdef DEBUG Rprintf("ta = %lf, tb = %lf\n", tta, ttb); #endif if(INSIDE01(tta, epsilon) && INSIDE01(ttb, epsilon)) { /* intersection */ ok[ijpos] = 1; xx[ijpos] = x0a[i] + tta * dxa[i]; yy[ijpos] = y0a[i] + tta * dya[i]; #ifdef DEBUG Rprintf("segments intersect at (%lf, %lf)\n", xx[ijpos], yy[ijpos]); #endif } } } } } } /* Stripped-down version of xysegint that just returns logical matrix */ void xysi(na, x0a, y0a, dxa, dya, nb, x0b, y0b, dxb, dyb, eps, ok) /* inputs (vectors of coordinates) */ int *na, *nb; double *x0a, *y0a, *dxa, *dya, *x0b, *y0b, *dxb, *dyb; /* input (tolerance for determinant) */ double *eps; /* outputs (matrices) */ int *ok; { int i, j, ma, mb, ijpos, maxchunk; double determinant, absdet, diffx, diffy, tta, ttb, epsilon; ma = *na; mb = *nb; epsilon = *eps; OUTERCHUNKLOOP(j, mb, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mb, maxchunk, 8196) { for(i = 0; i < ma; i++) { ijpos = j * ma + i; ok[ijpos] = 0; determinant = dxb[j] * dya[i] - dyb[j] * dxa[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = (x0b[j] - x0a[i])/determinant; diffy = (y0b[j] - y0a[i])/determinant; tta = - dyb[j] * diffx + dxb[j] * diffy; ttb = - dya[i] * diffx + dxa[i] * diffy; if(INSIDE01(tta, epsilon) && INSIDE01(ttb, epsilon)) { /* intersection */ ok[ijpos] = 1; } } } } } } /* Test whether there is at least one intersection */ void xysiANY(na, x0a, y0a, dxa, dya, nb, x0b, y0b, dxb, dyb, eps, ok) /* inputs (vectors of coordinates) */ int *na, *nb; double *x0a, *y0a, *dxa, *dya, *x0b, *y0b, *dxb, *dyb; /* input (tolerance for determinant) */ double *eps; /* output (single logical value) */ int *ok; { int i, j, ma, mb, maxchunk; double determinant, absdet, diffx, diffy, tta, ttb, epsilon; *ok = 0; ma = *na; mb = *nb; epsilon = *eps; OUTERCHUNKLOOP(j, mb, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mb, maxchunk, 8196) { for(i = 0; i < ma; i++) { determinant = dxb[j] * dya[i] - dyb[j] * dxa[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = (x0b[j] - x0a[i])/determinant; diffy = (y0b[j] - y0a[i])/determinant; tta = - dyb[j] * diffx + dxb[j] * diffy; ttb = - dya[i] * diffx + dxa[i] * diffy; if(INSIDE01(tta, epsilon) && INSIDE01(ttb, epsilon)) { /* intersection */ *ok = 1; return; } } } } } } /* Analogue of xysegint when segments in list 'a' are infinite vertical lines */ void xysegVslice(na, xa, nb, x0b, y0b, dxb, dyb, eps, yy, ok) /* inputs (vectors of coordinates) */ int *na, *nb; double *xa, *x0b, *y0b, *dxb, *dyb; /* input (tolerance for determinant) */ double *eps; /* outputs (matrices) */ double *yy; int *ok; { int i, j, ma, mb, ijpos, maxchunk; double diffx0, diffx1, width, abswidth, epsilon; int notvertical; ma = *na; mb = *nb; epsilon = *eps; OUTERCHUNKLOOP(j, mb, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mb, maxchunk, 8196) { /* determine whether segment j is nearly vertical */ width = dxb[j]; abswidth = (width > 0) ? width : -width; notvertical = (abswidth <= epsilon); for(i = 0; i < ma; i++) { ijpos = j * ma + i; ok[ijpos] = 0; yy[ijpos] = NIETS; /* test whether vertical line i separates endpoints of segment j */ diffx0 = xa[i] - x0b[j]; diffx1 = diffx0 - width; if(diffx0 * diffx1 <= 0) { /* intersection */ ok[ijpos] = 1; /* compute y-coordinate of intersection point */ if(notvertical) { yy[ijpos] = y0b[j] + diffx0 * dyb[j]/width; } else { /* vertical or nearly-vertical segment: pick midpoint */ yy[ijpos] = y0b[j] + dyb[j]/2.0; } } } } } } /* -------------- ONE PSP OBJECT ---------------------------- */ /* Similar to xysegint, but computes intersections between all pairs of segments in a single list, excluding the diagonal comparisons of course */ void xysegXint(n, x0, y0, dx, dy, eps, xx, yy, ti, tj, ok) /* inputs (vectors of coordinates) */ int *n; double *x0, *y0, *dx, *dy; /* input (tolerance for determinant) */ double *eps; /* outputs (matrices) */ double *xx, *yy, *ti, *tj; int *ok; { int i, j, m, mm1, ijpos, jipos, iipos, maxchunk; double determinant, absdet, diffx, diffy, tti, ttj, epsilon; m = *n; epsilon = *eps; mm1 = m - 1; OUTERCHUNKLOOP(j, mm1, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mm1, maxchunk, 8196) { for(i = j+1; i < m; i++) { ijpos = j * m + i; jipos = i * m + j; ok[ijpos] = ok[jipos] = 0; xx[ijpos] = yy[ijpos] = ti[ijpos] = ti[jipos] = NIETS; xx[jipos] = yy[jipos] = tj[ijpos] = tj[jipos] = NIETS; determinant = dx[j] * dy[i] - dy[j] * dx[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = (x0[j] - x0[i])/determinant; diffy = (y0[j] - y0[i])/determinant; ti[ijpos] = tti = - dy[j] * diffx + dx[j] * diffy; tj[ijpos] = ttj = - dy[i] * diffx + dx[i] * diffy; tj[jipos] = ti[ijpos]; ti[jipos] = tj[ijpos]; if(INSIDE01(tti, epsilon) && INSIDE01(ttj, epsilon)) { ok[ijpos] = ok[jipos] = 1; xx[ijpos] = xx[jipos] = x0[i] + tti * dx[i]; yy[ijpos] = yy[jipos] = y0[i] + tti * dy[i]; } } } } } /* assign diagonal */ for(i = 0; i < m; i++) { iipos = i * m + i; ok[iipos] = 0; xx[iipos] = yy[iipos] = ti[iipos] = tj[iipos] = NIETS; } } /* Reduced version of xysegXint that returns logical matrix 'ok' only */ void xysxi(n, x0, y0, dx, dy, eps, ok) /* inputs (vectors of coordinates) */ int *n; double *x0, *y0, *dx, *dy; /* input (tolerance for determinant) */ double *eps; /* outputs (matrices) */ int *ok; { int i, j, m, mm1, ijpos, jipos, iipos, maxchunk; double determinant, absdet, diffx, diffy, tti, ttj, epsilon; m = *n; epsilon = *eps; mm1 = m - 1; OUTERCHUNKLOOP(j, mm1, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mm1, maxchunk, 8196) { for(i = j+1; i < m; i++) { ijpos = j * m + i; jipos = i * m + j; ok[ijpos] = ok[jipos] = 0; determinant = dx[j] * dy[i] - dy[j] * dx[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = (x0[j] - x0[i])/determinant; diffy = (y0[j] - y0[i])/determinant; tti = - dy[j] * diffx + dx[j] * diffy; ttj = - dy[i] * diffx + dx[i] * diffy; if(INSIDE01(tti, epsilon) && INSIDE01(ttj, epsilon)) { ok[ijpos] = ok[jipos] = 1; } } } } } /* assign diagonal */ for(i = 0; i < m; i++) { iipos = i * m + i; ok[iipos] = 0; } } /* ---------------------- ONE CLOSED POLYGON ------------------------ */ /* Identify self-intersections in a closed polygon (Similar to xysegXint, but does not compare segments which are cyclically adjacent in the list) */ void Cxypolyselfint(n, x0, y0, dx, dy, eps, xx, yy, ti, tj, ok) /* inputs (vectors of coordinates) */ int *n; double *x0, *y0, *dx, *dy; /* input (tolerance for determinant) */ double *eps; /* outputs (matrices) */ double *xx, *yy, *ti, *tj; int *ok; { int i, j, k, m, m2, mm1, mm2, mstop, ijpos, jipos, maxchunk; double determinant, absdet, diffx, diffy, tti, ttj, epsilon; m = *n; epsilon = *eps; m2 = m * m; /* initialise matrices */ for(k = 0; k < m2; k++) { ok[k] = 0; xx[k] = yy[k] = ti[k] = tj[k] = NIETS; } if(m <= 2) return; /* Compare j with j+2, j+3, ...., m-1 Don't compare 0 with m-1 */ mm1 = m - 1; mm2 = m - 2; OUTERCHUNKLOOP(j, mm2, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mm2, maxchunk, 8196) { mstop = (j > 0) ? m : mm1; for(i = j+2; i < mstop; i++) { ijpos = j * m + i; jipos = i * m + j; determinant = dx[j] * dy[i] - dy[j] * dx[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = (x0[j] - x0[i])/determinant; diffy = (y0[j] - y0[i])/determinant; ti[ijpos] = tti = - dy[j] * diffx + dx[j] * diffy; tj[ijpos] = ttj = - dy[i] * diffx + dx[i] * diffy; tj[jipos] = ti[ijpos]; ti[jipos] = tj[ijpos]; if(INSIDE01(tti, epsilon) && INSIDE01(ttj, epsilon)) { ok[ijpos] = ok[jipos] = 1; xx[ijpos] = xx[jipos] = x0[i] + tti * dx[i]; yy[ijpos] = yy[jipos] = y0[i] + tti * dy[i]; } } } } } } /* Just determines whether there is self-intersection (exits quicker & uses less space) */ void xypsi(n, x0, y0, dx, dy, xsep, ysep, eps, proper, answer) /* inputs (vectors of coordinates) */ int *n; double *x0, *y0, *dx, *dy; /* inputs (distances beyond which intersection is impossible) */ double *xsep, *ysep; /* input (tolerance for determinant) */ double *eps; /* input (flag) */ int *proper; /* output */ int *answer; { int i, j, m, mm1, mm2, mstop, prop, maxchunk; double determinant, absdet, diffx, diffy, tti, ttj, epsilon; double Xsep, Ysep; m = *n; prop = *proper; Xsep = *xsep; Ysep = *ysep; epsilon = *eps; *answer = 0; if(m <= 2) return; /* Compare j with j+2, j+3, ...., m-1 Don't compare 0 with m-1 */ mm1 = m - 1; mm2 = m - 2; OUTERCHUNKLOOP(j, mm2, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mm2, maxchunk, 8196) { mstop = (j > 0) ? m : mm1; for(i = j+2; i < mstop; i++) { diffx = x0[j] - x0[i]; diffy = y0[j] - y0[i]; if(diffx < Xsep && diffx > -Xsep && diffy < Ysep && diffy > -Ysep) { determinant = dx[j] * dy[i] - dy[j] * dx[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = diffx/determinant; diffy = diffy/determinant; tti = - dy[j] * diffx + dx[j] * diffy; ttj = - dy[i] * diffx + dx[i] * diffy; if(INSIDE01(tti, epsilon) && INSIDE01(ttj, epsilon)) { /* intersection occurs */ if(prop == 0 || (tti != 0.0 && tti != 1.0) || (ttj != 0.0 && ttj != 1.0)) { /* proper intersection */ *answer = 1; return; } } } } } } } } /* ---------------- .Call INTERFACE --------------------------- Analogues of functions above, but using the .Call interface and dynamic storage allocation, to save space. */ SEXP Cxysegint(SEXP x0a, SEXP y0a, SEXP dxa, SEXP dya, SEXP x0b, SEXP y0b, SEXP dxb, SEXP dyb, SEXP eps) { int i, j, k, na, nb; double determinant, absdet, diffx, diffy, tta, ttb; int nout, noutmax, newmax, maxchunk; double epsilon; double *x0A, *y0A, *dxA, *dyA, *x0B, *y0B, *dxB, *dyB; double *ta, *tb, *x, *y; int *ia, *jb; SEXP out, iAout, jBout, tAout, tBout, xout, yout; double *tAoutP, *tBoutP, *xoutP, *youtP; int *iAoutP, *jBoutP; PROTECT(x0a = AS_NUMERIC(x0a)); PROTECT(y0a = AS_NUMERIC(y0a)); PROTECT(dxa = AS_NUMERIC(dxa)); PROTECT(dya = AS_NUMERIC(dya)); PROTECT(x0b = AS_NUMERIC(x0b)); PROTECT(y0b = AS_NUMERIC(y0b)); PROTECT(dxb = AS_NUMERIC(dxb)); PROTECT(dyb = AS_NUMERIC(dyb)); PROTECT(eps = AS_NUMERIC(eps)); /* that's 9 protected */ /* get pointers */ x0A = NUMERIC_POINTER(x0a); y0A = NUMERIC_POINTER(y0a); dxA = NUMERIC_POINTER(dxa); dyA = NUMERIC_POINTER(dya); x0B = NUMERIC_POINTER(x0b); y0B = NUMERIC_POINTER(y0b); dxB = NUMERIC_POINTER(dxb); dyB = NUMERIC_POINTER(dyb); /* determine length of vectors */ na = LENGTH(x0a); nb = LENGTH(x0b); epsilon = *(NUMERIC_POINTER(eps)); /* guess amount of storage required for output */ noutmax = (na > nb) ? na : nb; nout = 0; ia = (int *) R_alloc(noutmax, sizeof(int)); jb = (int *) R_alloc(noutmax, sizeof(int)); ta = (double *) R_alloc(noutmax, sizeof(double)); tb = (double *) R_alloc(noutmax, sizeof(double)); x = (double *) R_alloc(noutmax, sizeof(double)); y = (double *) R_alloc(noutmax, sizeof(double)); /* scan data and collect intersections */ OUTERCHUNKLOOP(j, nb, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nb, maxchunk, 8196) { for(i = 0; i < na; i++) { determinant = dxB[j] * dyA[i] - dyB[j] * dxA[i]; absdet = (determinant > 0) ? determinant : -determinant; #ifdef DEBUG Rprintf("i = %d, j = %d\n", i, j); Rprintf("segment A[i]: (%lf, %lf) to (%lf, %lf)\n", x0A[i], y0A[i], x0A[i] + dxA[i], y0A[i] + dyA[i]); Rprintf("segment B[j]: (%lf, %lf) to (%lf, %lf)\n", x0B[j], y0B[j], x0B[j] + dxB[j], y0B[j] + dyB[j]); Rprintf("determinant=%lf\n", determinant); #endif if(absdet > epsilon) { diffx = (x0B[j] - x0A[i])/determinant; diffy = (y0B[j] - y0A[i])/determinant; tta = - dyB[j] * diffx + dxB[j] * diffy; ttb = - dyA[i] * diffx + dxA[i] * diffy; #ifdef DEBUG Rprintf("ta = %lf, tb = %lf\n", tta, ttb); #endif if(INSIDE01(tta, epsilon) && INSIDE01(ttb, epsilon)) { /* intersection */ if(nout >= noutmax) { /* storage overflow - increase space */ newmax = 4 * noutmax; ia = (int *) S_realloc((char *) ia, newmax, noutmax, sizeof(int)); jb = (int *) S_realloc((char *) jb, newmax, noutmax, sizeof(int)); ta = (double *) S_realloc((char *) ta, newmax, noutmax, sizeof(double)); tb = (double *) S_realloc((char *) tb, newmax, noutmax, sizeof(double)); x = (double *) S_realloc((char *) x, newmax, noutmax, sizeof(double)); y = (double *) S_realloc((char *) y, newmax, noutmax, sizeof(double)); noutmax = newmax; } ta[nout] = tta; tb[nout] = ttb; ia[nout] = i; jb[nout] = j; x[nout] = x0A[i] + tta * dxA[i]; y[nout] = y0A[i] + tta * dyA[i]; #ifdef DEBUG Rprintf("segments intersect at (%lf, %lf)\n", x[nout], y[nout]); #endif ++nout; } } } } } /* pack up */ PROTECT(iAout = NEW_INTEGER(nout)); PROTECT(jBout = NEW_INTEGER(nout)); PROTECT(tAout = NEW_NUMERIC(nout)); PROTECT(tBout = NEW_NUMERIC(nout)); PROTECT(xout = NEW_NUMERIC(nout)); PROTECT(yout = NEW_NUMERIC(nout)); /* 9 + 6 = 15 protected */ iAoutP = INTEGER_POINTER(iAout); jBoutP = INTEGER_POINTER(jBout); tAoutP = NUMERIC_POINTER(tAout); tBoutP = NUMERIC_POINTER(tBout); xoutP = NUMERIC_POINTER(xout); youtP = NUMERIC_POINTER(yout); for(k = 0; k < nout; k++) { iAoutP[k] = ia[k]; jBoutP[k] = jb[k]; tAoutP[k] = ta[k]; tBoutP[k] = tb[k]; xoutP[k] = x[k]; youtP[k] = y[k]; } PROTECT(out = NEW_LIST(6)); /* 15 + 1 = 16 protected */ SET_VECTOR_ELT(out, 0, iAout); SET_VECTOR_ELT(out, 1, jBout); SET_VECTOR_ELT(out, 2, tAout); SET_VECTOR_ELT(out, 3, tBout); SET_VECTOR_ELT(out, 4, xout); SET_VECTOR_ELT(out, 5, yout); UNPROTECT(16); return(out); } /* Similar to Cxysegint, but computes intersections between all pairs of segments in a single list, excluding the diagonal comparisons of course */ SEXP CxysegXint(SEXP x0, SEXP y0, SEXP dx, SEXP dy, SEXP eps) { int i, j, k, n, n1; double determinant, absdet, diffx, diffy, tti, ttj; int nout, noutmax, newmax, maxchunk; double epsilon; double *X0, *Y0, *Dx, *Dy; double *ti, *tj, *x, *y; int *ii, *jj; SEXP out, iout, jout, tiout, tjout, xout, yout; double *tioutP, *tjoutP, *xoutP, *youtP; int *ioutP, *joutP; PROTECT(x0 = AS_NUMERIC(x0)); PROTECT(y0 = AS_NUMERIC(y0)); PROTECT(dx = AS_NUMERIC(dx)); PROTECT(dy = AS_NUMERIC(dy)); PROTECT(eps = AS_NUMERIC(eps)); /* that's 5 protected */ /* get pointers */ X0 = NUMERIC_POINTER(x0); Y0 = NUMERIC_POINTER(y0); Dx = NUMERIC_POINTER(dx); Dy = NUMERIC_POINTER(dy); /* determine length of vectors */ n = LENGTH(x0); epsilon = *(NUMERIC_POINTER(eps)); /* guess amount of storage required for output */ noutmax = n; nout = 0; ii = (int *) R_alloc(noutmax, sizeof(int)); jj = (int *) R_alloc(noutmax, sizeof(int)); ti = (double *) R_alloc(noutmax, sizeof(double)); tj = (double *) R_alloc(noutmax, sizeof(double)); x = (double *) R_alloc(noutmax, sizeof(double)); y = (double *) R_alloc(noutmax, sizeof(double)); /* scan data */ n1 = n - 1; OUTERCHUNKLOOP(j, n1, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, n1, maxchunk, 8196) { for(i = j+1; i < n; i++) { determinant = Dx[j] * Dy[i] - Dy[j] * Dx[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = (X0[j] - X0[i])/determinant; diffy = (Y0[j] - Y0[i])/determinant; tti = - Dy[j] * diffx + Dx[j] * diffy; ttj = - Dy[i] * diffx + Dx[i] * diffy; if(INSIDE01(tti,epsilon) && INSIDE01(ttj,epsilon)) { /* intersection */ if(nout >= noutmax) { /* storage overflow - increase space */ newmax = 4 * noutmax; ii = (int *) S_realloc((char *) ii, newmax, noutmax, sizeof(int)); jj = (int *) S_realloc((char *) jj, newmax, noutmax, sizeof(int)); ti = (double *) S_realloc((char *) ti, newmax, noutmax, sizeof(double)); tj = (double *) S_realloc((char *) tj, newmax, noutmax, sizeof(double)); x = (double *) S_realloc((char *) x, newmax, noutmax, sizeof(double)); y = (double *) S_realloc((char *) y, newmax, noutmax, sizeof(double)); noutmax = newmax; } ti[nout] = tti; tj[nout] = ttj; ii[nout] = i; jj[nout] = j; x[nout] = X0[i] + tti * Dx[i]; y[nout] = Y0[i] + tti * Dy[i]; ++nout; } } } } } /* pack up */ PROTECT(iout = NEW_INTEGER(nout)); PROTECT(jout = NEW_INTEGER(nout)); PROTECT(tiout = NEW_NUMERIC(nout)); PROTECT(tjout = NEW_NUMERIC(nout)); PROTECT(xout = NEW_NUMERIC(nout)); PROTECT(yout = NEW_NUMERIC(nout)); /* 5 + 6 = 11 protected */ ioutP = INTEGER_POINTER(iout); joutP = INTEGER_POINTER(jout); tioutP = NUMERIC_POINTER(tiout); tjoutP = NUMERIC_POINTER(tjout); xoutP = NUMERIC_POINTER(xout); youtP = NUMERIC_POINTER(yout); for(k = 0; k < nout; k++) { ioutP[k] = ii[k]; joutP[k] = jj[k]; tioutP[k] = ti[k]; tjoutP[k] = tj[k]; xoutP[k] = x[k]; youtP[k] = y[k]; } PROTECT(out = NEW_LIST(6)); /* 11 + 1 = 12 protected */ SET_VECTOR_ELT(out, 0, iout); SET_VECTOR_ELT(out, 1, jout); SET_VECTOR_ELT(out, 2, tiout); SET_VECTOR_ELT(out, 3, tjout); SET_VECTOR_ELT(out, 4, xout); SET_VECTOR_ELT(out, 5, yout); UNPROTECT(12); return(out); } spatstat/src/lookup.c0000755000176200001440000001234613615441326014377 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity function for a general pairwise interaction process with the pairwise interaction function given by a ``lookup table'', passed through the par argument. */ /* For debugging code, insert the line: #define DEBUG 1 */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Lookup { int nlook; int equisp; double delta; double rmax; double r2max; double *h; /* values of pair interaction */ double *r; /* r values if not equally spaced */ double *r2; /* r^2 values if not equally spaced */ double *period; int per; } Lookup; /* initialiser function */ Cdata *lookupinit(state, model, algo) State state; Model model; Algor algo; { int i, nlook; double ri; Lookup *lookup; lookup = (Lookup *) R_alloc(1, sizeof(Lookup)); /* Interpret model parameters*/ lookup->nlook = nlook = model.ipar[0]; lookup->equisp = (model.ipar[1] > 0); lookup->delta = model.ipar[2]; lookup->rmax = model.ipar[3]; lookup->r2max = pow(lookup->rmax, 2); /* periodic boundary conditions? */ lookup->period = model.period; lookup->per = (model.period[0] > 0.0); /* If the r-values are equispaced only the h vector is included in ``par'' after ``rmax''; the entries of h then consist of h[0] = par[5], h[1] = par[6], ..., h[k-1] = par[4+k], ..., h[nlook-1] = par[4+nlook]. If the r-values are NOT equispaced then the individual r values are needed and these are included as r[0] = par[5+nlook], r[1] = par[6+nlook], ..., r[k-1] = par[4+nlook+k], ..., r[nlook-1] = par[4+2*nlook]. */ lookup->h = (double *) R_alloc((size_t) nlook, sizeof(double)); for(i = 0; i < nlook; i++) lookup->h[i] = model.ipar[4+i]; if(!(lookup->equisp)) { lookup->r = (double *) R_alloc((size_t) nlook, sizeof(double)); lookup->r2 = (double *) R_alloc((size_t) nlook, sizeof(double)); for(i = 0; i < nlook; i++) { ri = lookup->r[i] = model.ipar[4+nlook+i]; lookup->r2[i] = ri * ri; } } #ifdef DEBUG Rprintf("Exiting lookupinit: nlook=%d, equisp=%d\n", nlook, lookup->equisp); #endif return((Cdata *) lookup); } /* conditional intensity evaluator */ double lookupcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, nlook, k, kk, ix, ixp1, j; double *x, *y; double u, v; double r2max, d2, d, delta, cifval, ux, vy; Lookup *lookup; lookup = (Lookup *) cdata; r2max = lookup->r2max; delta = lookup->delta; nlook = lookup->nlook; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = 1.0; if(npts == 0) return(cifval); ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(lookup->equisp) { /* equispaced r values */ if(lookup->per) { /* periodic distance */ /* Rprintf("Lookup equispaced table, periodic distance\n"); */ if(ix > 0) { for(j=0; j < ix; j++) { d = sqrt(dist2(u,v,x[j],y[j],lookup->period)); k = floor(d/delta); if(k < nlook) { if(k < 0) k = 0; cifval *= lookup->h[k]; } } } if(ixp1 < npts) { for(j=ixp1; jperiod)); k = floor(d/delta); if(k < nlook) { if(k < 0) k = 0; cifval *= lookup->h[k]; } } } } else { /* Euclidean distance */ /* Rprintf("Lookup equispaced table, Euclidean distance\n"); */ if(ix > 0) { for(j=0; j < ix; j++) { d = hypot(u - x[j], v-y[j]); k = floor(d/delta); if(k < nlook) { if(k < 0) k = 0; cifval *= lookup->h[k]; } } } if(ixp1 < npts) { for(j=ixp1; jh[k]; } } } } } else { /* non-equispaced r values */ if(lookup->per) { /* periodic distance */ /* Rprintf("Lookup non-equispaced table, periodic distance\n"); */ if(ix > 0) { for(j=0; j < ix; j++) { d2 = dist2(u,v,x[j],y[j],lookup->period); if(d2 < r2max) { for(kk = 0; kk < nlook && lookup->r2[kk] <= d2; kk++) ; k = (kk == 0) ? 0 : kk-1; cifval *= lookup->h[k]; } } } if(ixp1 < npts) { for(j=ixp1; jperiod); if(d2 < r2max) { for(kk = 0; kk < nlook && lookup->r2[kk] <= d2; kk++) ; k = (kk == 0) ? 0 : kk-1; cifval *= lookup->h[k]; } } } } else { /* Euclidean distance */ /* Rprintf("Lookup non-equispaced table, Euclidean distance\n"); */ if(ix > 0) { for(j=0; j < ix; j++) { ux = u - x[j]; vy = v - y[j]; d2 = ux * ux + vy * vy; if(d2 < r2max) { for(kk = 0; kk < nlook && lookup->r2[kk] <= d2; kk++) ; k = (kk == 0) ? 0 : kk-1; cifval *= lookup->h[k]; } } } if(ixp1 < npts) { for(j=ixp1; jr2[kk] <= d2; kk++) ; k = (kk == 0) ? 0 : kk-1; cifval *= lookup->h[k]; } } } } } return cifval; } Cifns LookupCifns = { &lookupinit, &lookupcif, (updafunptr) NULL, NO}; spatstat/src/Kborder.h0000755000176200001440000001103413406057617014460 0ustar liggesusers/* Kborder.h Code template for K function estimators in Kborder.c Variables: FNAME function name OUTTYPE storage type of the output vectors ('int' or 'double') WEIGHTED #defined for weighted (inhom) K function Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.12 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void FNAME( nxy, x, y, #ifdef WEIGHTED w, #endif b, nr, rmax, numer, denom) /* inputs */ int *nxy, *nr; double *x, *y, *b, *rmax; #ifdef WEIGHTED double *w; #endif /* outputs */ OUTTYPE *numer, *denom; { int i, j, l, n, nt, n1, nt1, lmin, lmax, maxchunk; double dt, tmax, xi, yi, bi, maxsearch, max2search; double bratio, dratio, dij, dij2, dx, dy, dx2; OUTTYPE *numerLowAccum, *numerHighAccum, *denomAccum; OUTTYPE naccum, daccum; #ifdef WEIGHTED double wi, wj, wij; #endif #ifdef WEIGHTED #define ZERO 0.0 #define WI wi #define WJ wj #define WIJ wij #else #define ZERO 0 #define WI 1 #define WJ 1 #define WIJ 1 #endif n = *nxy; nt = *nr; n1 = n - 1; nt1 = nt - 1; dt = (*rmax)/(nt-1); tmax = *rmax; /* initialise */ numerLowAccum = (OUTTYPE *) R_alloc(nt, sizeof(OUTTYPE)); numerHighAccum = (OUTTYPE *) R_alloc(nt, sizeof(OUTTYPE)); denomAccum = (OUTTYPE *) R_alloc(nt, sizeof(OUTTYPE)); for(l = 0; l < nt; l++) numer[l] = denom[l] = numerLowAccum[l] = numerHighAccum[l] = denomAccum[l] = ZERO; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { /* -------- DENOMINATOR -------------*/ bi = b[i]; #ifdef WEIGHTED wi = w[i]; #endif /* increment denominator for all r < b[i] */ bratio = bi/dt; /* lmax is the largest integer STRICTLY less than bratio */ lmax = (int) ceil(bratio) - 1; lmax = (lmax <= nt1) ? lmax : nt1; /* effectively increment entries 0 to lmax */ if(lmax >= 0) denomAccum[lmax] += WI; /* ---------- NUMERATOR -----------*/ /* scan through points (x[j],y[j]) */ xi = x[i]; yi = y[i]; maxsearch = (bi < tmax) ? bi : tmax; max2search = maxsearch * maxsearch; /* scan backward from i-1 until |x[j]-x[i]| > maxsearch or until we run out */ if(i > 0) { for(j=i-1; j >= 0; j--) { /* squared interpoint distance */ dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= max2search) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < max2search) { #ifdef WEIGHTED wj = w[j]; #endif /* increment numerator for all r such that dij <= r < bi */ dij = (double) sqrt(dij2); dratio = dij/dt; /* smallest integer greater than or equal to dratio */ lmin = (int) ceil(dratio); /* increment entries lmin to lmax inclusive */ if(lmax >= lmin) { #ifdef WEIGHTED wij = wi * wj; #endif numerLowAccum[lmin] += WIJ; numerHighAccum[lmax] += WIJ; } } } } /* scan forward from i+1 until x[j]-x[i] > maxsearch or until we run out */ if(i < n1) { for(j=i+1; j < n; j++) { /* squared interpoint distance */ dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= max2search) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < max2search) { #ifdef WEIGHTED wj = w[j]; #endif /* increment numerator for all r such that dij <= r < bi */ dij = (double) sqrt(dij2); dratio = dij/dt; /* smallest integer greater than or equal to dratio */ lmin = (int) ceil(dratio); /* increment entries lmin to lmax inclusive */ if(lmax >= lmin) { #ifdef WEIGHTED wij = wi * wj; #endif numerLowAccum[lmin] += WIJ; numerHighAccum[lmax] += WIJ; } } } } } } /* Now use the accumulated values to compute the numerator and denominator. The value of denomAccum[l] should be added to denom[k] for all k <= l. numerHighAccum[l] should be added to numer[k] for all k <=l numerLowAccum[l] should then be subtracted from numer[k] for k <= l. */ for(l=nt1, naccum=daccum=ZERO; l>=0; l--) { daccum += denomAccum[l]; denom[l] = daccum; naccum += numerHighAccum[l]; numer[l] = naccum; naccum -= numerLowAccum[l]; } } #undef ZERO #undef WI #undef WJ #undef WIJ spatstat/src/KrectFunDec.h0000644000176200001440000000563013406057617015227 0ustar liggesusers/* KrectFunDec.h $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ Function declarations for Krect Macros: FNAME function name WEIGHTED #defined for weighted version (Kinhom etc) +++ Copyright (C) Adrian Baddeley 2014 ++++ */ void FNAME(width, height, nxy, x, y, #ifdef WEIGHTED w, #endif nr, rmax, trimedge, doIso, doTrans, doBord, doUnco, iso, trans, bnumer, bdenom, unco) /* input data */ double *width, *height; /* window is (0, width) x (0, height) */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ #ifdef WEIGHTED double *w; /* weights (e.g. reciprocal intensities) */ #endif /* algorithm parameters */ int *nr; /* number of r values */ double *rmax; /* maximum r value */ double *trimedge; /* maximum edge correction weight */ int *doIso; /* logical: whether to do isotropic correction */ int *doTrans; /* logical: whether to do translation correction */ int *doBord; /* logical: whether to do border correction */ int *doUnco; /* logical: whether to do uncorrected estimator */ /* outputs */ /* These are vectors of length nr if required, otherwise ignored */ double *iso; /* isotropic-corrected estimator */ double *trans; /* translation-corrected estimator */ COUNTTYPE *bnumer; /* numerator of border-corrected estimator */ COUNTTYPE *bdenom; /* denominator of border-corrected estimator */ COUNTTYPE *unco; /* uncorrected estimator */ { int i, j, l, ldist, lbord, M, maxchunk, N, Nr, N1, Nr1; double rstep, Rmax, R2max, wide, high, trim; double xi, yi, bdisti, bx, by, bratio; double dx, dy, dx2, dij, dij2, dratio, edgetrans, edgeiso; double dL, dR, dD, dU, bLU, bLD, bRU, bRD, bUL, bUR, bDL, bDR; double aL, aR, aD, aU, cL, cR, cU, cD, extang; int ncor, corner; COUNTTYPE *numerLowAccum, *numerHighAccum, *denomAccum; COUNTTYPE naccum, daccum; double accum; #ifdef WEIGHTED double wi, wj, wij; #endif #ifdef WEIGHTED #define ZERO 0.0 #define WIJ wij #else #define ZERO 0 #define WIJ 1 #endif N = *nxy; if(N == 0) return; Nr = *nr; Rmax = *rmax; trim = *trimedge; N1 = N - 1; Nr1 = Nr - 1; R2max = Rmax * Rmax; rstep = Rmax/Nr1; wide = *width; high = *height; /* Allocate and initialise scratch space - for border correction, but do it in all cases to keep the compiler happy */ M = (*doBord == 1) ? Nr : 1; numerLowAccum = (COUNTTYPE *) R_alloc(M, sizeof(COUNTTYPE)); numerHighAccum = (COUNTTYPE *) R_alloc(M, sizeof(COUNTTYPE)); denomAccum = (COUNTTYPE *) R_alloc(M, sizeof(COUNTTYPE)); for(l = 0; l < M; l++) numerLowAccum[l] = numerHighAccum[l] = denomAccum[l] = ZERO; #include "KrectV1.h" } #undef ZERO #undef WIJ spatstat/src/metricPdist.h0000644000176200001440000001075013406057617015360 0ustar liggesusers/* metricPdist.h Distance transform of a discrete binary image using a general metric Code template which is #included several times in metricPdist.c $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Uses the following definitions FNAME Function name (called from R) MARGLIST List of function arguments specifying the metric MARGDECLARE Declarations of function arguments specifying the metric MTEMPDECLARE Declaration and initialisation of variables for metric METRIC Expression for calculating the metric (x1,y1,x2,y2) Also uses definitions from raster.h and metricPdist.c Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void FNAME(xmin, ymin, xmax, ymax, nr, nc, mr, mc, inp, MARGLIST, npasses, distances, rows, cols ) double *xmin, *ymin, *xmax, *ymax; /* x, y dimensions */ int *nr, *nc; /* raster dimensions EXCLUDING margins */ int *mr, *mc; /* margins */ int *inp; /* input: binary image */ MARGDECLARE; int *npasses; /* number of passes over raster */ double *distances; /* output: distance to nearest point */ int *rows; /* output: row of nearest point (start= 0) */ int *cols; /* output: column of nearest point (start = 0) */ /* all images must have identical dimensions including a margin of 1 on each side */ { Raster data, dist, row, col; int mrow, mcol, nrow, ncol; int j,k; double d, x, y; int r, c; int Npass, ipass; double dnew, diam, dd, huge; double Xmin, Ymin, Xmax, Ymax; /* declare any variables used for the metric */ MTEMPDECLARE; Xmin = *xmin; Xmax = *xmax; Ymin = *ymin; Ymax = *ymax; mrow = *mr; mcol = *mc; Npass = *npasses; /* Determine diameter of window. (must be achieved as distance between two of the vertices) */ /* diagonals */ diam = METRIC(Xmin,Ymin,Xmax,Ymax); dd = METRIC(Xmin,Ymax,Xmax,Ymin); if(dd > diam) diam = dd; dd = METRIC(Xmin,Ymin,Xmin,Ymax); if(dd > diam) diam = dd; /* horizontals */ dd = METRIC(Xmin,Ymin,Xmax,Ymin); if(dd > diam) diam = dd; dd = METRIC(Xmin,Ymax,Xmax,Ymax); if(dd > diam) diam = dd; /* verticals */ dd = METRIC(Xmin,Ymin,Xmin,Ymax); if(dd > diam) diam = dd; dd = METRIC(Xmax,Ymin,Xmax,Ymax); if(dd > diam) diam = dd; /* create raster structures */ /* full dimensions */ nrow = *nr + 2 * mrow; ncol = *nc + 2 * mcol; shape_raster( &data, (void *) inp, Xmin, Ymin, Xmax, Ymax, nrow, ncol, mrow, mcol); shape_raster( &dist, (void *) distances, Xmin, Ymin, Xmax, Ymax, nrow, ncol, mrow, mcol); shape_raster( &row, (void *) rows, Xmin, Ymin, Xmax, Ymax, nrow, ncol, mrow, mcol); shape_raster( &col, (void *) cols, Xmin, Ymin, Xmax, Ymax, nrow, ncol, mrow, mcol); /* initialise arrays */ Clear(row,int,UNDEFINED) Clear(col,int,UNDEFINED) huge = 2.0 * diam; Clear(dist,double,huge) /* if input pixel is TRUE, set distance to 0 and make pixel point to itself */ for(j = data.rmin; j <= data.rmax; j++) for(k = data.cmin; k <= data.cmax; k++) if(Entry(data, j, k, int) != 0) { Entry(dist, j, k, double) = 0.0; Entry(row, j, k, int) = j; Entry(col, j, k, int) = k; } /* how to update the distance values */ #undef GETVALUES #define GETVALUES(ROW,COL) \ x = Xpos(data, COL); \ y = Ypos(data, ROW); \ d = Entry(dist,ROW,COL,double); #undef COMPARE #define COMPARE(ROW,COL,RR,CC) \ r = Entry(row,RR,CC,int); \ c = Entry(col,RR,CC,int); \ if(Is_Defined(r) && Is_Defined(c) \ && Entry(dist,RR,CC,double) < d) { \ dnew = METRIC(x, y, Xpos(data,c), Ypos(data,r)); \ if(dnew < d) { \ Entry(row,ROW,COL,int) = r; \ Entry(col,ROW,COL,int) = c; \ Entry(dist,ROW,COL,double) = dnew; \ d = dnew; \ } \ } for(ipass = 0; ipass < Npass; ipass++) { /* forward pass */ for(j = data.rmin; j <= data.rmax; j++) for(k = data.cmin; k <= data.cmax; k++) { GETVALUES(j, k) COMPARE(j,k, j-1,k-1) COMPARE(j,k, j-1, k) COMPARE(j,k, j-1,k+1) COMPARE(j,k, j, k-1) } /* backward pass */ for(j = data.rmax; j >= data.rmin; j--) for(k = data.cmax; k >= data.cmin; k--) { GETVALUES(j, k) COMPARE(j,k, j+1,k+1) COMPARE(j,k, j+1, k) COMPARE(j,k, j+1,k-1) COMPARE(j,k, j, k+1) } } } spatstat/src/KrectIncrem.h0000644000176200001440000000462613406057617015304 0ustar liggesusers/* KrectIncrem.h Code to increment numerators of K-function $Revision: 1.5 $ $Date: 2014/02/09 03:00:51 $ +++ Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2014 ++++ */ #ifdef WEIGHTED wj = w[j]; wij = wi * wj; #endif /* determine index of entry to be incremented */ dij = (double) sqrt(dij2); dratio = dij/rstep; /* smallest integer greater than or equal to dratio */ ldist = (int) ceil(dratio); #ifdef UNCORRECTED /* ............ uncorrected estimate ................. */ #ifdef WEIGHTED unco[ldist] += wij; #else (unco[ldist])++; #endif #endif #ifdef BORDER /* ............ border correction ................. */ /* increment numerator for all r such that dij <= r < bi */ /* increment entries ldist to lbord inclusive */ #ifdef WEIGHTED if(lbord >= ldist) { numerLowAccum[ldist] += wij; numerHighAccum[lbord] += wij; } #else if(lbord >= ldist) { (numerLowAccum[ldist])++; (numerHighAccum[lbord])++; } #endif #endif #ifdef TRANSLATION /* ............ translation correction ................. */ edgetrans = 1.0/((1.0 - ABS(dx)/wide) * (1.0 - ABS(dy)/high)); edgetrans = MIN(edgetrans, trim); #ifdef WEIGHTED trans[ldist] += wij * edgetrans; #else trans[ldist] += edgetrans; #endif #endif #ifdef ISOTROPIC /* ............ isotropic correction ................. */ /* half the angle subtended by the intersection between the circle of radius d[i,j] centred on point i and each edge of the rectangle (prolonged to an infinite line) */ aL = (dL < dij) ? acos(dL/dij) : 0.0; aR = (dR < dij) ? acos(dR/dij) : 0.0; aD = (dD < dij) ? acos(dD/dij) : 0.0; aU = (dU < dij) ? acos(dU/dij) : 0.0; /* apply maxima */ cL = MIN(aL, bLU) + MIN(aL, bLD); cR = MIN(aR, bRU) + MIN(aR, bRD); cU = MIN(aU, bUL) + MIN(aU, bUR); cD = MIN(aD, bDL) + MIN(aD, bDR); /* total exterior angle over 2 pi */ extang = (cL + cR + cU + cD)/TWOPI; /* add pi/2 for corners */ if(corner) extang += 1/4; /* edge correction factor */ edgeiso = 1 / (1 - extang); edgeiso = MIN(edgeiso, trim); #ifdef WEIGHTED iso[ldist] += wij * edgeiso; #else iso[ldist] += edgeiso; #endif #endif spatstat/src/linknnd.c0000644000176200001440000000100613406057617014513 0ustar liggesusers#include #include "yesno.h" /* linknnd.c k-th nearest neighbours in a linear network Sparse representation of network ! Data points must be ordered by segment index ! $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef HUH #undef CROSS #define FNAME linknnd #include "linknnd.h" #undef FNAME #define CROSS #define FNAME linknncross #include "linknnd.h" #undef CROSS #undef FNAME spatstat/src/getcif.c0000755000176200001440000000331413115271120014306 0ustar liggesusers#include #include "methas.h" void fexitc(const char *msg); extern Cifns AreaIntCifns, BadGeyCifns, DgsCifns, DiggraCifns, FikselCifns, GeyerCifns, HardcoreCifns, LennardCifns, LookupCifns, SoftcoreCifns, StraussCifns, StraussHardCifns, MultiStraussCifns, MultiStraussHardCifns, MultiHardCifns, TripletsCifns, PenttinenCifns; Cifns NullCifns = NULL_CIFNS; typedef struct CifPair { char *name; Cifns *p; } CifPair; CifPair CifTable[] = { {"areaint", &AreaIntCifns}, {"badgey", &BadGeyCifns}, {"dgs", &DgsCifns}, {"diggra", &DiggraCifns}, {"geyer", &GeyerCifns}, {"fiksel", &FikselCifns}, {"hardcore", &HardcoreCifns}, {"lookup", &LookupCifns}, {"lennard", &LennardCifns}, {"multihard", &MultiHardCifns}, {"penttinen", &PenttinenCifns}, {"sftcr", &SoftcoreCifns}, {"strauss", &StraussCifns}, {"straush", &StraussHardCifns}, {"straussm", &MultiStraussCifns}, {"straushm", &MultiStraussHardCifns}, {"triplets", &TripletsCifns}, {(char *) NULL, (Cifns *) NULL} }; Cifns getcif(cifname) char *cifname; { int i; CifPair cp; for(i = 0; CifTable[i].name; i++) { cp = CifTable[i]; if(strcmp(cifname, cp.name) == 0) return(*(cp.p)); } fexitc("Unrecognised cif name; bailing out.\n"); /* control never passes to here, but compilers don't know that */ return(NullCifns); } /* R interface function, to check directly whether cif is recognised */ void knownCif(cifname, answer) char** cifname; int* answer; { int i; CifPair cp; for(i = 0; CifTable[i].name; i++) { cp = CifTable[i]; if(strcmp(*cifname, cp.name) == 0) { *answer = 1; return; } } *answer = 0; return; } spatstat/src/knndistance.c0000644000176200001440000001043313406057617015363 0ustar liggesusers/* knndistance.c K-th Nearest Neighbour Distances between points Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2013 Licence: GNU Public Licence >= 2 $Revision: 1.8 $ $Date: 2013/12/10 03:29:45 $ Function definitions are #included from knndist.h and knnXdist.h THE FOLLOWING FUNCTIONS ASSUME THAT y IS SORTED IN ASCENDING ORDER SINGLE LIST: knndsort k-th nearest neighbour distances knnwhich k-th nearest neighbours knnsort k-th nearest neighbours and their distances ONE LIST TO ANOTHER LIST: knnXdist Nearest neighbour distance from one list to another knnXwhich Nearest neighbour ID from one list to another knnX Nearest neighbour ID & distance from one list to another ONE LIST TO ANOTHER OVERLAPPING LIST: knnXEdist Nearest neighbour distance from one list to another, overlapping knnXEwhich Nearest neighbour ID from one list to another, overlapping knnXE Nearest neighbour ID & distance */ #undef SPATSTAT_DEBUG #include #include #include #include "yesno.h" double sqrt(); /* THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER */ /* ------------------- one point pattern X --------------------- */ /* knndsort nearest neighbours 1:kmax returns distances only */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knndsort #define DIST #include "knndist.h" /* knnwhich nearest neighbours 1:kmax returns identifiers only */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnwhich #define WHICH #include "knndist.h" /* knnsort nearest neighbours 1:kmax returns distances and indices */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnsort #define DIST #define WHICH #include "knndist.h" /* --------------- two distinct point patterns X and Y --------------- */ /* general interface */ void knnXinterface(n1, x1, y1, id1, n2, x2, y2, id2, kmax, exclude, wantdist, wantwhich, nnd, nnwhich, huge) /* inputs */ int *n1, *n2; double *x1, *y1, *x2, *y2, *huge; int *id1, *id2; int *kmax; /* options */ int *exclude, *wantdist, *wantwhich; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { void knnX(), knnXdist(), knnXwhich(); void knnXE(), knnXEdist(), knnXEwhich(); int ex, di, wh; ex = (*exclude != 0); di = (*wantdist != 0); wh = (*wantwhich != 0); if(!ex) { if(di && wh) { knnX(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge); } else if(di) { knnXdist(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge); } else if(wh) { knnXwhich(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge); } } else { if(di && wh) { knnXE(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge); } else if(di) { knnXEdist(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge); } else if(wh) { knnXEwhich(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge); } } } /* Turn off the debugging tracer in knnXdist.h */ #undef TRACER /* knnXdist returns distances only */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnXdist #define DIST #include "knnXdist.h" /* knnXwhich returns identifiers only */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnXwhich #define WHICH #include "knnXdist.h" /* knnX returns distances and indices */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnX #define DIST #define WHICH #include "knnXdist.h" /* --------------- overlapping point patterns X and Y --------------- */ /* knnXEdist returns distances only */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnXEdist #define DIST #define EXCLUDE #include "knnXdist.h" /* knnXEwhich returns identifiers only */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnXEwhich #define WHICH #define EXCLUDE #include "knnXdist.h" /* knnXE returns distances and indices */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnXE #define DIST #define WHICH #define EXCLUDE #include "knnXdist.h" spatstat/src/discarea.c0000755000176200001440000001517513406057617014650 0ustar liggesusers/* disc.c Area of intersection between disc and polygonal window $Revision: 1.7 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef DEBUG #include #include #define MIN(A,B) (((A) < (B)) ? (A) : (B)) #define MAX(A,B) (((A) > (B)) ? (A) : (B)) #ifndef PI #define PI 3.1415926535898 #endif void discareapoly(nc, xc, yc, nr, rmat, nseg, x0, y0, x1, y1, eps, out) /* inputs */ int *nc, *nr, *nseg; double *xc, *yc, *rmat; double *x0, *y0, *x1, *y1; double *eps; /* output */ double *out; { int n, m, i, j, k, nradperpt; double radius, radius2, total, contrib; double xx0, xx1, yy0, yy1, xleft, xright, yleft, yright, xcentre, ycentre; double epsilon; double DiscContrib(); n = *nc; nradperpt = *nr; m = *nseg; epsilon = *eps; for(i = 0; i < n; i++) { xcentre = xc[i]; ycentre = yc[i]; #ifdef DEBUG Rprintf("\ni = %d:\n centre = (%lf, %lf)\n", i, xcentre, ycentre); #endif for(j = 0; j < nradperpt; j++) { radius = rmat[ j * n + i]; radius2 = radius * radius; #ifdef DEBUG Rprintf("radius = %lf\n", radius); #endif total = 0.0; for(k=0; k < m; k++) { #ifdef DEBUG Rprintf("k = %d\n", k); #endif xx0 = x0[k]; yy0 = y0[k]; xx1 = x1[k]; yy1 = y1[k]; #ifdef DEBUG Rprintf("(%lf,%lf) to (%lf,%lf)\n", xx0, yy0, xx1, yy1); #endif /* refer to unit disc at origin */ /* arrange so that xleft < xright */ if(radius <= epsilon) contrib = 0.0; else if(xx0 < xx1) { xleft = (xx0 - xcentre)/radius; xright = (xx1 - xcentre)/radius; yleft = (yy0 - ycentre)/radius; yright = (yy1 - ycentre)/radius; contrib = - radius2 * DiscContrib(xleft,yleft,xright,yright,epsilon); } else { xleft = (xx1 - xcentre)/radius; xright = (xx0 - xcentre)/radius; yleft = (yy1 - ycentre)/radius; yright = (yy0 - ycentre)/radius; contrib = radius2 * DiscContrib(xleft,yleft,xright,yright,epsilon); } #ifdef DEBUG Rprintf("contrib = %lf\n contrib/(pi * r^2)=%lf\n", contrib, contrib/(PI * radius2)); #endif total += contrib; } out[ j * n + i] = total; #ifdef DEBUG Rprintf("total = %lf\ntotal/(pi * r^2) = %lf\n", total, total/(PI * radius2)); #endif } } } /* area of intersection of unit disc with halfplane x <= v */ #ifdef DEBUG #define TRIGBIT(V) trigbit(V) double trigbit(v) double v; { double zero, result; zero = 0.0; if(v < -1.0) return(zero); if(v > 1.0) return(PI); result = PI/2 + asin(v) + v * sqrt(1 - v * v); Rprintf("trigbit: v = %lf, asin(v)=%lf, result=%lf\n", v, asin(v), result); return(result); } #else #define TRIGBIT(V) (((V) <= -1.0) ? 0.0 : (((V) >= 1.0) ? PI : \ (PI/2 + asin(V) + (V) * sqrt(1 - (V) * (V))))) #endif /* Find the area of intersection between a disc centre = (0,0), radius = 1 and the trapezium with upper segment (xleft, yleft) to (xright, yright) ASSUMES xleft < xright */ double DiscContrib(xleft, yleft, xright, yright, eps) double xleft, yleft, xright, yright, eps; /* NOTE: unit disc centred at origin */ { double xlo, xhi, zero, slope, intercept, A, B, C, det; double xcut1, xcut2, ycut1, ycut2, xunder1, xunder2, dx, dx2, result; #ifdef DEBUG double increm; Rprintf( "DiscContrib: xleft=%lf, yleft=%lf, xright=%lf, yright=%lf\n", xleft, yleft, xright, yright); #endif zero = 0.0; /* determine relevant range of x coordinates */ xlo = MAX(xleft, (-1.0)); xhi = MIN(xright, 1.0); if(xlo >= xhi - eps) { /* intersection is empty or negligible */ #ifdef DEBUG Rprintf("intersection is empty or negligible\n"); #endif return(zero); } /* find intersection points between the circle and the line containing upper segment */ slope = (yright - yleft)/(xright - xleft); intercept = yleft - slope * xleft; A = 1 + slope * slope; B = 2 * slope * intercept; C = intercept * intercept - 1.0; det = B * B - 4 * A * C; #ifdef DEBUG Rprintf("slope=%lf, intercept=%lf\nA = %lf, B=%lf, C=%lf, det=%lf\n", slope, intercept, A, B, C, det); #endif if(det <= 0.0) { /* no crossing between disc and infinite line */ if(intercept < 0.0) /* segment is below disc; intersection is empty */ return(zero); /* segment is above disc */ result = TRIGBIT(xhi) - TRIGBIT(xlo); return(result); } xcut1 = (- B - sqrt(det))/(2 * A); xcut2 = (- B + sqrt(det))/(2 * A); /* partition [xlo, xhi] into pieces delimited by {xcut1, xcut2} */ if(xcut1 >= xhi || xcut2 <= xlo) { /* segment is outside disc */ if(yleft < 0.0) { #ifdef DEBUG Rprintf("segment is beneath disc\n"); #endif result = zero; } else { #ifdef DEBUG Rprintf("segment is above disc\n"); #endif result = TRIGBIT(xhi) - TRIGBIT(xlo); } return(result); } /* possibly three parts */ #ifdef DEBUG Rprintf("up to three pieces\n"); #endif result = zero; ycut1 = intercept + slope * xcut1; ycut2 = intercept + slope * xcut2; if(xcut1 > xlo) { /* part to left of cut */ #ifdef DEBUG Rprintf("left of cut: [%lf, %lf]\n", xlo, xcut1); if(ycut1 < 0.0) Rprintf("below disc - no intersection\n"); else { increm = TRIGBIT(xcut1) - TRIGBIT(xlo); Rprintf("increment = %lf\n", increm); result += increm; } #else if(ycut1 >= 0.0) result += TRIGBIT(xcut1) - TRIGBIT(xlo); #endif } if(xcut2 < xhi) { /* part to right of cut */ #ifdef DEBUG Rprintf("right of cut: [%lf, %lf]\n", xcut2, xhi); if(ycut2 < 0.0) Rprintf("below disc - no intersection\n"); else { increm = TRIGBIT(xhi) - TRIGBIT(xcut2); Rprintf("increment = %lf\n", increm); result += increm; } #else if(ycut2 >= 0.0) result += TRIGBIT(xhi) - TRIGBIT(xcut2); #endif } /* part underneath cut */ xunder1 = MAX(xlo, xcut1); xunder2 = MIN(xhi, xcut2); dx = xunder2 - xunder1; dx2 = xunder2 * xunder2 - xunder1 * xunder1; #ifdef DEBUG Rprintf("underneath cut: [%lf, %lf]\n", xunder1, xunder2); increm = intercept * dx + slope * dx2/2 + (TRIGBIT(xunder2) - TRIGBIT(xunder1))/2; Rprintf("increment = %lf\n", increm); result += increm; #else result += intercept * dx + slope * dx2/2 + (TRIGBIT(xunder2) - TRIGBIT(xunder1))/2; #endif return(result); } #ifdef DEBUG /* interface to low level function, for debugging only */ void RDCtest(xleft, yleft, xright, yright, eps, value) double *xleft, *yleft, *xright, *yright, *eps, *value; { double DiscContrib(); *value = DiscContrib(*xleft, *yleft, *xright, *yright, *eps); } #endif spatstat/src/loccumx.h0000644000176200001440000000410413406057617014537 0ustar liggesusers/* loccumx.h C template for loccum.c grid-to-data or data-cross-data functions $Revision: 1.6 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 macros: FNAME function name NULVAL initial value (empty sum = 0, empty product = 1) INC(A,B) increment operation A += B or A *= B */ void FNAME(ntest, xtest, ytest, ndata, xdata, ydata, vdata, nr, rmax, ans) /* inputs */ int *ntest, *ndata, *nr; double *xtest, *ytest, *xdata, *ydata, *vdata; double *rmax; /* output */ double *ans; /* matrix of column vectors of functions for each point of first pattern */ { int Ntest, Ndata, Nr, Nans; double Rmax; int i, j, k, jleft, kmin, maxchunk, columnstart; double Rmax2, rstep, xtesti, ytesti, xleft; double dx, dy, dx2, d2, d, contrib; Ntest = *ntest; Ndata = *ndata; Nr = *nr; Rmax = *rmax; if(Ntest == 0) return; Nans = Nr * Ntest; /* initialise products to 1 */ OUTERCHUNKLOOP(k, Nans, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(k, Nans, maxchunk, 8196) { ans[k] = NULVAL; } } if(Ndata == 0) return; rstep = Rmax/(Nr-1); Rmax2 = Rmax * Rmax; jleft = 0; OUTERCHUNKLOOP(i, Ntest, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Ntest, maxchunk, 8196) { xtesti = xtest[i]; ytesti = ytest[i]; columnstart = Nr * i; /* start position for f_i(.) in 'ans' */ /* adjust starting point */ xleft = xtesti - Rmax; while((xdata[jleft] < xleft) && (jleft+1 < Ndata)) ++jleft; /* process from jleft until |dx| > Rmax */ for(j=jleft; j < Ndata; j++) { dx = xdata[j] - xtesti; dx2 = dx * dx; if(dx2 > Rmax2) break; dy = ydata[j] - ytesti; d2 = dx2 + dy * dy; if(d2 <= Rmax2) { d = sqrt(d2); kmin = (int) ceil(d/rstep); if(kmin < Nr) { contrib = vdata[j]; for(k = kmin; k < Nr; k++) INC(ans[columnstart + k] , contrib); } } } } } } spatstat/src/Efiksel.c0000755000176200001440000000334613406057617014454 0ustar liggesusers#include #include #include "chunkloop.h" #include "looptest.h" /* Efiksel.c $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ C implementation of 'eval' for Fiksel interaction (non-hardcore part) Assumes point patterns are sorted in increasing order of x coordinate Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ double sqrt(), exp(); void Efiksel(nnsource, xsource, ysource, nntarget, xtarget, ytarget, rrmax, kkappa, values) /* inputs */ int *nnsource, *nntarget; double *xsource, *ysource, *xtarget, *ytarget, *rrmax, *kkappa; /* output */ double *values; { int nsource, ntarget, maxchunk, j, i, ileft; double xsourcej, ysourcej, xleft, dx, dy, dx2, d2; double rmax, r2max, r2maxpluseps, kappa, total; nsource = *nnsource; ntarget = *nntarget; rmax = *rrmax; kappa = *kkappa; if(nsource == 0 || ntarget == 0) return; r2max = rmax * rmax; r2maxpluseps = r2max + EPSILON(r2max); ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 16384) { total = 0; xsourcej = xsource[j]; ysourcej = ysource[j]; /* adjust starting point */ xleft = xsourcej - rmax; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process from ileft until dx > rmax */ for(i=ileft; i < ntarget; i++) { /* squared interpoint distance */ dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > r2maxpluseps) break; dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= r2max) total += exp(- kappa * sqrt(d2)); } values[j] = total; } } } spatstat/src/exactPdist.c0000755000176200001440000001024713406057617015200 0ustar liggesusers/* exactPdist.c `Pseudoexact' distance transform of a discrete binary image (the closest counterpart to `exactdist.c') $Revision: 1.13 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include "raster.h" void dist_to_bdry(); void shape_raster(); void ps_exact_dt(in, dist, row, col) Raster *in; /* input: binary image */ Raster *dist; /* output: exact distance to nearest point */ Raster *row; /* output: row index of closest point */ Raster *col; /* output: column index of closest point */ /* rasters must have been dimensioned by shape_raster() and must all have identical dimensions and margins */ { int j,k; double d, x, y; int r, c; double dnew; double huge; /* double bdiag; */ /* initialise */ #define UNDEFINED -1 #define Is_Defined(I) (I >= 0) #define Is_Undefined(I) (I < 0) Clear(*row,int,UNDEFINED) Clear(*col,int,UNDEFINED) huge = 2.0 * DistanceSquared(dist->xmin,dist->ymin,dist->xmax,dist->ymax); Clear(*dist,double,huge) /* if input pixel is TRUE, set distance to 0 and make pixel point to itself */ for(j = in->rmin; j <= in->rmax; j++) for(k = in->cmin; k <= in->cmax; k++) if(Entry(*in, j, k, int) != 0) { Entry(*dist, j, k, double) = 0.0; Entry(*row, j, k, int) = j; Entry(*col, j, k, int) = k; } /* how to update the distance values */ #define GETVALUES(ROW,COL) \ x = Xpos(*in, COL); \ y = Ypos(*in, ROW); \ d = Entry(*dist,ROW,COL,double); #define COMPARE(ROW,COL,RR,CC) \ r = Entry(*row,RR,CC,int); \ c = Entry(*col,RR,CC,int); \ if(Is_Defined(r) && Is_Defined(c) \ && Entry(*dist,RR,CC,double) < d) { \ dnew = DistanceSquared(x, y, Xpos(*in,c), Ypos(*in,r)); \ if(dnew < d) { \ Entry(*row,ROW,COL,int) = r; \ Entry(*col,ROW,COL,int) = c; \ Entry(*dist,ROW,COL,double) = dnew; \ d = dnew; \ } \ } /* bound on diagonal step distance squared */ /* bdiag = (in->xstep * in->xstep + in->ystep * in->ystep); */ /* forward pass */ for(j = in->rmin; j <= in->rmax; j++) for(k = in->cmin; k <= in->cmax; k++) { GETVALUES(j, k) COMPARE(j,k, j-1,k-1) COMPARE(j,k, j-1, k) COMPARE(j,k, j-1,k+1) COMPARE(j,k, j, k-1) } /* backward pass */ for(j = in->rmax; j >= in->rmin; j--) for(k = in->cmax; k >= in->cmin; k--) { GETVALUES(j, k) COMPARE(j,k, j+1,k+1) COMPARE(j,k, j+1, k) COMPARE(j,k, j+1,k-1) COMPARE(j,k, j, k+1) } /* take square roots of distances^2 */ for(j = in->rmax; j >= in->rmin; j--) for(k = in->cmax; k >= in->cmin; k--) Entry(*dist,j,k,double) = sqrt(Entry(*dist,j,k,double)); } /* R interface */ void ps_exact_dt_R(xmin, ymin, xmax, ymax, nr, nc, mr, mc, inp, distances, rows, cols, boundary) double *xmin, *ymin, *xmax, *ymax; /* x, y dimensions */ int *nr, *nc; /* raster dimensions EXCLUDING margins */ int *mr, *mc; /* margins */ int *inp; /* input: binary image */ double *distances; /* output: distance to nearest point */ int *rows; /* output: row of nearest point (start= 0) */ int *cols; /* output: column of nearest point (start = 0) */ double *boundary; /* output: distance to boundary of rectangle */ /* all images must have identical dimensions including a margin of 1 on each side */ { Raster data, dist, row, col, bdist; int mrow, mcol, nrow, ncol; mrow = *mr; mcol = *mc; /* full dimensions */ nrow = *nr + 2 * mrow; ncol = *nc + 2 * mcol; shape_raster( &data, (void *) inp, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); shape_raster( &dist, (void *) distances, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); shape_raster( &row, (void *) rows, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); shape_raster( &col, (void *) cols, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); shape_raster( &bdist, (void *) boundary, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); ps_exact_dt(&data, &dist, &row, &col); dist_to_bdry(&bdist); } spatstat/src/mhv1.h0000644000176200001440000000055213406057617013743 0ustar liggesusers/* mhv1.h marked or unmarked simulation Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef MH_MARKED if(marked) { /* marked process */ #define MH_MARKED YES #include "mhv2.h" #undef MH_MARKED } else { /* unmarked process */ #define MH_MARKED NO #include "mhv2.h" #undef MH_MARKED } spatstat/src/closepair.c0000755000176200001440000002403313406057617015047 0ustar liggesusers/* closepair.c $Revision: 1.34 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Assumes point pattern is sorted in increasing order of x coordinate paircount() count the total number of pairs (i, j) with distance < rmax Cclosepaircounts count for each i the number of j with distance < rmax crosscount() count number of close pairs in two patterns (note: Ccrosspaircounts is defined in Estrauss.c) duplicatedxy() find duplicated (x,y) pairs Fclosepairs() extract close pairs of coordinates .C interface - output vectors have Fixed length Fcrosspairs() extract close pairs in two patterns .C interface - output vectors have Fixed length Vclosepairs() extract close pairs of coordinates .Call interface - output vectors have Variable length Vcrosspairs() extract close pairs in two patterns .Call interface - output vectors have Variable length */ #include #include #include #define OK 0 #define ERR_OVERFLOW 1 #define ERR_ALLOC 2 #define FAILED(X) ((void *)(X) == (void *)NULL) #define intRealloc(PTR, OLDLENGTH, NEWLENGTH) \ (int *) S_realloc((char *) PTR, NEWLENGTH, OLDLENGTH, sizeof(int)) #define dblRealloc(PTR, OLDLENGTH, NEWLENGTH) \ (double *) S_realloc((char *) PTR, NEWLENGTH, OLDLENGTH, sizeof(double)) double sqrt(); /* count TOTAL number of close pairs */ void paircount(nxy, x, y, rmaxi, count) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance */ /* output */ int *count; { int n, maxchunk, i, j, counted; double xi, yi, rmax, r2max, dx, dy, a; n = *nxy; rmax = *rmaxi; r2max = rmax * rmax; *count = counted = 0; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; if(i > 0) { /* scan backwards from i */ for(j = i - 1; j >= 0; j--) { dx = x[j] - xi; a = r2max - dx * dx; if(a < 0) break; dy = y[j] - yi; a -= dy * dy; if(a >= 0) ++counted; } } if(i + 1 < n) { /* scan forwards from i */ for(j = i + 1; j < n; j++) { dx = x[j] - xi; a = r2max - dx * dx; if(a < 0) break; dy = y[j] - yi; a -= dy * dy; if(a >= 0) ++counted; } } /* end loop over i */ } } *count = counted; } /* count for each i the number of j closer than distance r */ void Cclosepaircounts(nxy, x, y, rmaxi, counts) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance */ /* output VECTOR, assumed initialised to 0 */ int *counts; { int n, maxchunk, i, j; double xi, yi, rmax, r2max, dx, dy, a; n = *nxy; rmax = *rmaxi; r2max = rmax * rmax; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; if(i > 0) { /* scan backwards from i */ for(j = i - 1; j >= 0; j--) { dx = x[j] - xi; a = r2max - dx * dx; if(a < 0) break; dy = y[j] - yi; a -= dy * dy; if(a >= 0) (counts[i])++; } } if(i + 1 < n) { /* scan forwards from i */ for(j = i + 1; j < n; j++) { dx = x[j] - xi; a = r2max - dx * dx; if(a < 0) break; dy = y[j] - yi; a -= dy * dy; if(a >= 0) (counts[i])++; } } /* end loop over i */ } } } /* analogue for two different point patterns */ void crosscount(nn1, x1, y1, nn2, x2, y2, rmaxi, count) /* inputs */ int *nn1, *nn2; double *x1, *y1, *x2, *y2, *rmaxi; /* output */ int *count; { int n1, n2, maxchunk, i, j, jleft, counted; double x1i, y1i, rmax, r2max, xleft, dx, dy, a; n1 = *nn1; n2 = *nn2; rmax = *rmaxi; r2max = rmax * rmax; *count = counted = 0; if(n1 == 0 || n2 == 0) return; jleft = 0; i = 0; maxchunk = 0; while(i < n1) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n1) maxchunk = n1; for(; i < maxchunk; i++) { x1i = x1[i]; y1i = y1[i]; /* adjust starting index */ xleft = x1i - rmax; while((x2[jleft] < xleft) && (jleft+1 < n2)) ++jleft; /* process from j=jleft until dx > rmax */ for(j=jleft; j < n2; j++) { dx = x2[j] - x1i; a = r2max - dx * dx; if(a < 0) break; dy = y2[j] - y1i; a -= dy * dy; if(a > 0) ++counted; } } } *count = counted; } /* Find duplicated locations xx, yy are not sorted */ void duplicatedxy(n, x, y, out) /* inputs */ int *n; double *x, *y; /* output */ int *out; /* logical vector */ { int m, i, j; double xi, yi; m = *n; for(i = 1; i < m; i++) { R_CheckUserInterrupt(); xi = x[i]; yi = y[i]; for(j = 0; j < i; j++) if((x[j] == xi) && (y[j] == yi)) break; if(j == i) out[i] = 0; else out[i] = 1; } } /* ............... fixed output length .............. */ void Fclosepairs(nxy, x, y, r, noutmax, nout, iout, jout, xiout, yiout, xjout, yjout, dxout, dyout, dout, status) /* inputs */ int *nxy, *noutmax; double *x, *y, *r; /* outputs */ int *nout, *iout, *jout; double *xiout, *yiout, *xjout, *yjout, *dxout, *dyout, *dout; int *status; { int n, k, kmax, maxchunk, i, j; double xi, yi, rmax, r2max, dx, dy, dx2, d2; n = *nxy; rmax = *r; r2max = rmax * rmax; *status = OK; *nout = 0; k = 0; /* k is the next available storage location and also the current length of the list */ kmax = *noutmax; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; if(i > 0) { /* scan backwards */ for(j = i - 1; j >= 0; j--) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 > r2max) break; dy = y[j] - yi; d2 = dx2 + dy * dy; if(d2 <= r2max) { /* add this (i, j) pair to output */ if(k >= kmax) { *nout = k; *status = ERR_OVERFLOW; return; } jout[k] = j + 1; /* R indexing */ iout[k] = i + 1; xiout[k] = xi; yiout[k] = yi; xjout[k] = x[j]; yjout[k] = y[j]; dxout[k] = dx; dyout[k] = dy; dout[k] = sqrt(d2); ++k; } } } if(i + 1 < n) { /* scan forwards */ for(j = i + 1; j < n; j++) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 > r2max) break; dy = y[j] - yi; d2 = dx2 + dy * dy; if(d2 <= r2max) { /* add this (i, j) pair to output */ if(k >= kmax) { *nout = k; *status = ERR_OVERFLOW; return; } jout[k] = j + 1; /* R indexing */ iout[k] = i + 1; xiout[k] = xi; yiout[k] = yi; xjout[k] = x[j]; yjout[k] = y[j]; dxout[k] = dx; dyout[k] = dy; dout[k] = sqrt(d2); ++k; } } } } } *nout = k; } void Fcrosspairs(nn1, x1, y1, nn2, x2, y2, rmaxi, noutmax, nout, iout, jout, xiout, yiout, xjout, yjout, dxout, dyout, dout, status) /* inputs */ int *nn1, *nn2, *noutmax; double *x1, *y1, *x2, *y2, *rmaxi; /* outputs */ int *nout, *iout, *jout; double *xiout, *yiout, *xjout, *yjout, *dxout, *dyout, *dout; int *status; { int n1, n2, maxchunk, k, kmax, i, j, jleft; double x1i, y1i, rmax, r2max, xleft, dx, dy, dx2, d2; n1 = *nn1; n2 = *nn2; rmax = *rmaxi; r2max = rmax * rmax; *status = OK; *nout = 0; k = 0; /* k is the next available storage location and also the current length of the list */ kmax = *noutmax; if(n1 == 0 || n2 == 0) return; jleft = 0; i = 0; maxchunk = 0; while(i < n1) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n1) maxchunk = n1; for(; i < maxchunk; i++) { x1i = x1[i]; y1i = y1[i]; /* adjust starting position jleft */ xleft = x1i - rmax; while((x2[jleft] < xleft) && (jleft+1 < n2)) ++jleft; /* process from j=jleft until dx > rmax */ for(j=jleft; j < n2; j++) { dx = x2[j] - x1i; dx2 = dx * dx; if(dx2 > r2max) break; dy = y2[j] - y1i; d2 = dx2 + dy * dy; if(d2 <= r2max) { /* add this (i, j) pair to output */ if(k >= kmax) { *nout = k; *status = ERR_OVERFLOW; return; } jout[k] = j + 1; /* R indexing */ iout[k] = i + 1; xiout[k] = x1i; yiout[k] = y1i; xjout[k] = x2[j]; yjout[k] = y2[j]; dxout[k] = dx; dyout[k] = dy; dout[k] = sqrt(d2); ++k; } } } } *nout = k; } /* ........ versions that return variable-length vectors ......... */ #define SINGLE /* return i, j only */ #define CLOSEFUN VcloseIJpairs #define CROSSFUN VcrossIJpairs #undef THRESH #undef COORDS #undef DIST #include "closefuns.h" #undef CLOSEFUN #undef CROSSFUN #undef THRESH #undef COORDS #undef DIST /* return i, j, d */ #define CLOSEFUN VcloseIJDpairs #define CROSSFUN VcrossIJDpairs #undef THRESH #undef COORDS #define DIST #include "closefuns.h" #undef CLOSEFUN #undef CROSSFUN #undef THRESH #undef COORDS #undef DIST /* return i, j, xi, yi, xj, yj, dx, dy, d */ #define CLOSEFUN Vclosepairs #define CROSSFUN Vcrosspairs #undef THRESH #define COORDS #define DIST #include "closefuns.h" #undef CLOSEFUN #undef CROSSFUN #undef THRESH #undef COORDS #undef DIST /* return i, j, t where t = 1{d < s} */ #define CLOSEFUN Vclosethresh #define CROSSFUN Vcrossthresh #define THRESH #undef COORDS #include "closefuns.h" #undef CLOSEFUN #undef CROSSFUN #undef THRESH #undef COORDS spatstat/src/linvdist.c0000644000176200001440000000113413406057617014714 0ustar liggesusers#include #include "yesno.h" /* linvdist.c Distance function at vertices (shortest distance from each vertex to a data point) Sparse representation of network $Revision: 1.1 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 ! Data points must be ordered by segment index ! */ #undef HUH /* definition of Clinvdist */ #define FNAME Clinvdist #undef WHICH #include "linvdist.h" /* definition of Clinvwhichdist */ #undef FNAME #define FNAME Clinvwhichdist #define WHICH #include "linvdist.h" spatstat/src/sumsymouter.h0000644000176200001440000000360513406057617015506 0ustar liggesusers/* sumsymouter.h Code template for some functions in linalg.c $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Macros used: FNAME = function name, WEIGHTED = #defined for weighted version */ void FNAME( x, #ifdef WEIGHTED w, #endif p, n, y ) double *x; /* p by n by n array */ #ifdef WEIGHTED double *w; /* n by n matrix (symmetric) */ #endif int *p, *n; double *y; /* output matrix p by p, initialised to zero */ { int N, P; register int i, j, k, m, ijpos, jipos, maxchunk; register double *xij, *xji; #ifdef WEIGHTED register double wij; #endif N = *n; P = *p; OUTERCHUNKLOOP(i, N, maxchunk, 256) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 256) { /* loop over j != i */ if(i > 0) { for(j = 0; j < i; j++) { /* pointers to [i,j] and [j,i] in N*N matrices */ ijpos = i + N * j; jipos = j + N * i; /* pointers to x[, i, j] and x[ , j, i] */ xij = x + ijpos * P; xji = x + jipos * P; /* outer product */ #ifdef WEIGHTED wij = w[ijpos]; #endif for(k = 0; k < P; k++) { for(m = 0; m < P; m++) { #ifdef WEIGHTED y[m + k * P] += wij * xij[m] * xji[k]; #else y[m + k * P] += xij[m] * xji[k]; #endif } } } } if(i + 1 < N) { for(j = i+1; j < N; j++) { /* pointers to [i,j] and [j,i] in N*N matrices */ ijpos = i + N * j; jipos = j + N * i; /* pointers to x[, i, j] and x[ , j, i] */ xij = x + ijpos * P; xji = x + jipos * P; /* outer product */ #ifdef WEIGHTED wij = w[ijpos]; #endif for(k = 0; k < P; k++) { for(m = 0; m < P; m++) { #ifdef WEIGHTED y[m + k * P] += wij * xij[m] * xji[k]; #else y[m + k * P] += xij[m] * xji[k]; #endif } } } } /* end of loop over j */ } } } spatstat/src/whist.c0000644000176200001440000000207713406057617014225 0ustar liggesusers/* whist.c Weighted histogram Designed for very fine bins Cwhist(indices, weights, nbins) indices point to bins (range: 0 to nbins-1) $Revision: 1.6 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include #include SEXP Cwhist(SEXP indices, SEXP weights, SEXP nbins) { int i, j, N, M; int *x; double *w, *y; SEXP result; /* =================== Protect R objects from garbage collector ======= */ PROTECT(indices = AS_INTEGER(indices)); PROTECT(weights = AS_NUMERIC(weights)); PROTECT(nbins = AS_INTEGER(nbins)); N = LENGTH(indices); M = *(INTEGER_POINTER(nbins)); x = INTEGER_POINTER(indices); w = NUMERIC_POINTER(weights); PROTECT(result = NEW_NUMERIC(M)); y = NUMERIC_POINTER(result); for(j = 0; j < M; j++) y[j] = 0.0; for(i = 0; i < N; i++) { j = x[i]; if(j != NA_INTEGER && R_FINITE(w[i]) && j >= 0 && j < M) y[j] += w[i]; } UNPROTECT(4); return(result); } spatstat/src/nngrid.h0000644000176200001440000000572613406057617014361 0ustar liggesusers #if (1 == 0) /* nngrid.h Code template for C functions nearest neighbour of each grid point THE FOLLOWING CODE ASSUMES THAT POINT PATTERN (xp, yp) IS SORTED IN ASCENDING ORDER OF x COORDINATE This code is #included multiple times in nngrid.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour Either or both DIST and WHICH may be defined. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.4 $ $Date: 2014/02/18 08:43:29 $ */ #endif void FNAME(nx, x0, xstep, ny, y0, ystep, /* pixel grid dimensions */ np, xp, yp, /* data points */ nnd, nnwhich, huge) /* inputs */ int *nx, *ny, *np; double *x0, *xstep, *y0, *ystep, *huge; double *xp, *yp; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { int Nxcol, Nyrow, Npoints; int i, j, ijpos; int mleft, mright, mwhich, lastmwhich; double X0, Y0, Xstep, Ystep; double d2, d2min, xj, yi, dx, dy, dx2, hu, hu2; Nxcol = *nx; Nyrow = *ny; Npoints = *np; hu = *huge; X0 = *x0; Y0 = *y0; Xstep = *xstep; Ystep = *ystep; hu2 = hu * hu; if(Npoints == 0) return; lastmwhich = 0; /* loop over pixels */ for(j = 0, xj = X0; j < Nxcol; j++, xj += Xstep) { R_CheckUserInterrupt(); for(i = 0, yi = Y0; i < Nyrow; i++, yi += Ystep) { /* reset nn distance and index */ d2min = hu2; mwhich = -1; if(lastmwhich < Npoints) { /* search forward from previous nearest neighbour */ for(mright = lastmwhich; mright < Npoints; ++mright) { dx = xp[mright] - xj; dx2 = dx * dx; if(dx2 > d2min) /* note that dx2 >= d2min could break too early */ break; dy = yp[mright] - yi; d2 = dy * dy + dx2; if (d2 < d2min) { /* save as nearest neighbour */ d2min = d2; mwhich = mright; } } /* end forward search */ } if(lastmwhich > 0) { /* search backward from previous nearest neighbour */ for(mleft = lastmwhich - 1; mleft >= 0; --mleft) { dx = xj - xp[mleft]; dx2 = dx * dx; if(dx2 > d2min) /* note that dx2 >= d2min could break too early */ break; dy = yp[mleft] - yi; d2 = dy * dy + dx2; if (d2 < d2min) { /* save as nearest neighbour */ d2min = d2; mwhich = mleft; } } /* end backward search */ } /* remember index of most recently-encountered neighbour */ lastmwhich = mwhich; /* copy nn distance for grid point (i, j) to output array nnd[i, j] */ ijpos = i + j * Nyrow; #ifdef DIST nnd[ijpos] = sqrt(d2min); #endif #ifdef WHICH nnwhich[ijpos] = mwhich + 1; /* R indexing */ #endif /* end of loop over grid points (i, j) */ } } } spatstat/src/mhsnoopdef.h0000644000176200001440000000121213406057617015224 0ustar liggesusers/* mhsnoopdef.h Define structure 'Snoop' containing visual debugger parameters and state $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #ifndef R_INTERNALS_H_ #include #endif typedef struct Snoop { int active; /* true or false */ int nextstop; /* jump to iteration number 'nextstop' */ int nexttype; /* jump to the next proposal of type 'nexttype' */ SEXP env; /* environment for exchanging data with R */ SEXP expr; /* callback expression for visual debugger */ } Snoop; #define NO_TYPE -1 spatstat/src/crossloop.h0000644000176200001440000000356213406057617015117 0ustar liggesusers/* crossloop.h Generic code template for loop for cross-close-pairs operations collecting contributions to point x_i from all points y_j such that ||x_i - y_j|| <= r cpp variables used: INITIAL_I code executed at start of 'i' loop CONTRIBUTE_IJ code executed to compute contribution from j to i COMMIT_I code executed to save total contribution to i C variables used: int i, j, n1, n2, maxchunk, jleft; double x1i, y1i, xleft, dx, dy, d2, rmax, r2max; double *x1, *y1, *x2, *y2; $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #ifndef CHUNKLOOP_H #include "chunkloop.h" #endif #define CROSSLOOP(INITIAL_I, CONTRIBUTE_IJ, COMMIT_I) \ OUTERCHUNKLOOP(i, n1, maxchunk, 65536) { \ R_CheckUserInterrupt(); \ INNERCHUNKLOOP(i, n1, maxchunk, 65536) { \ \ x1i = x1[i]; \ y1i = y1[i]; \ \ INITIAL_I; \ \ jleft = 0; \ \ /* \ adjust starting point jleft \ */ \ xleft = x1i - rmax; \ while((x2[jleft] < xleft) && (jleft+1 < n2)) \ ++jleft; \ \ /* \ process from j = jleft until dx > rmax \ */ \ for(j=jleft; j < n2; j++) { \ dx = x2[j] - x1i; \ if(dx > rmax) \ break; \ dy = y2[j] - y1i; \ d2 = dx * dx + dy * dy; \ if(d2 <= r2max) { \ /* add this (i, j) pair to output */ \ CONTRIBUTE_IJ; \ } \ } \ COMMIT_I; \ } \ } spatstat/src/call3d.c0000755000176200001440000002474713406057617014244 0ustar liggesusers/* $Revision: 1.5 $ $Date: 2010/10/24 10:57:02 $ R interface Pass data between R and internally-defined data structures # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009 # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ #include #include "geom3.h" #include "functable.h" #undef DEBUG #ifdef DEBUG #define DEBUGMESSAGE(S) Rprintf(S); #else #define DEBUGMESSAGE(S) #endif void g3one(Point *p, int n, Box *b, Ftable *g); void g3three(Point *p, int n, Box *b, Ftable *g); void g3cen(Point *p, int n, Box *b, H4table *count); void k3trans(Point *p, int n, Box *b, Ftable *k); void k3isot(Point *p, int n, Box *b, Ftable *k); void pcf3trans(Point *p, int n, Box *b, Ftable *pcf, double delta); void pcf3isot(Point *p, int n, Box *b, Ftable *pcf, double delta); void phatminus(Point *p, int n, Box *b, double vside, Itable *count); void phatnaive(Point *p, int n, Box *b, double vside, Itable *count); void p3hat4(Point *p, int n, Box *b, double vside, H4table *count); /* ALLOCATION OF SPACE FOR STRUCTURES/ARRAYS We have defined an alloc() and free() function for each type. However, the free() functions currently do nothing, because we use R_alloc to allocate transient space, which is freed automatically by R. */ Ftable * allocFtable(n) /* allocate function table of size n */ int n; { Ftable *x; x = (Ftable *) R_alloc(1, sizeof(Ftable)); x->n = n; x->f = (double *) R_alloc(n, sizeof(double)); x->num = (double *) R_alloc(n, sizeof(double)); x->denom = (double *) R_alloc(n, sizeof(double)); return(x); } void freeFtable(x) Ftable *x; { } Itable * allocItable(n) int n; { Itable *x; x = (Itable *) R_alloc(1, sizeof(Itable)); x->n = n; x->num = (int *) R_alloc(n, sizeof(int)); x->denom = (int *) R_alloc(n, sizeof(int)); return(x); } void freeItable(x) Itable *x; { } H4table * allocH4table(n) int n; { H4table *x; x = (H4table *) R_alloc(1, sizeof(H4table)); x->n = n; x->obs = (int *) R_alloc(n, sizeof(int)); x->nco = (int *) R_alloc(n, sizeof(int)); x->cen = (int *) R_alloc(n, sizeof(int)); x->ncc = (int *) R_alloc(n, sizeof(int)); return(x); } void freeH4table(x) H4table *x; { } Box * allocBox() /* I know this is ridiculous but it's consistent. */ { Box *b; b = (Box *) R_alloc(1, sizeof(Box)); return(b); } void freeBox(x) Box *x; { } Point * allocParray(n) /* allocate array of n Points */ int n; { Point *p; p = (Point *) R_alloc(n, sizeof(Point)); return(p); } void freeParray(x) Point *x; { } /* CREATE AND INITIALISE DATA STORAGE */ Ftable * MakeFtable(t0, t1, n) double *t0, *t1; int *n; { Ftable *tab; int i, nn; nn = *n; tab = allocFtable(nn); tab->t0 = *t0; tab->t1 = *t1; for(i = 0; i < nn; i++) { tab->f[i] = 0.0; tab->num[i] = 0; tab->denom[i] = 0; } return(tab); } Itable * MakeItable(t0, t1, n) double *t0, *t1; int *n; { Itable *tab; int i, nn; nn = *n; tab = allocItable(nn); tab->t0 = *t0; tab->t1 = *t1; for(i = 0; i < nn; i++) { tab->num[i] = 0; tab->denom[i] = 0; } return(tab); } H4table * MakeH4table(t0, t1, n) double *t0, *t1; int *n; { H4table *tab; int i, nn; nn = *n; tab = allocH4table(nn); tab->t0 = *t0; tab->t1 = *t1; for(i = 0; i < nn; i++) { tab->obs[i] = 0; tab->nco[i] = 0; tab->cen[i] = 0; tab->ncc[i] = 0; } tab->upperobs = 0; tab->uppercen = 0; return(tab); } /* CONVERSION OF DATA TYPES R -> internal including allocation of internal data types as needed */ Point * RtoPointarray(x,y,z,n) double *x, *y, *z; int *n; { int i, nn; Point *p; nn = *n; p = allocParray(nn); for(i = 0; i < nn; i++) { p[i].x = x[i]; p[i].y = y[i]; p[i].z = z[i]; } return(p); } Box * RtoBox(x0, x1, y0, y1, z0, z1) double *x0, *x1, *y0, *y1, *z0, *z1; { Box *b; b = allocBox(); b->x0 = *x0; b->x1 = *x1; b->y0 = *y0; b->y1 = *y1; b->z0 = *z0; b->z1 = *z1; return(b); } /* CONVERSION OF DATA TYPES internal -> R Note: it can generally be assumed that the R arguments are already allocated vectors of correct length, so we do not allocate them. */ void FtabletoR(tab, t0, t1, n, f, num, denom) /* internal */ Ftable *tab; /* R representation */ double *t0, *t1; int *n; double *f, *num, *denom; { int i; *t0 = tab->t0; *t1 = tab->t1; *n = tab->n; for(i = 0; i < tab->n; i++) { f[i] = tab->f[i]; num[i] = tab->num[i]; denom[i] = tab->denom[i]; } freeFtable(tab); } void ItabletoR(tab, t0, t1, m, num, denom) /* internal */ Itable *tab; /* R representation */ double *t0, *t1; int *m; int *num, *denom; { int i; *t0 = tab->t0; *t1 = tab->t1; *m = tab->n; for(i = 0; i < tab->n; i++) { num[i] = tab->num[i]; denom[i] = tab->denom[i]; } freeItable(tab); } void H4tabletoR(tab, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen) /* internal */ H4table *tab; /* R representation */ double *t0, *t1; int *m; int *obs, *nco, *cen, *ncc; int *upperobs, *uppercen; { int i; *t0 = tab->t0; *t1 = tab->t1; *m = tab->n; *upperobs = tab->upperobs; *uppercen = tab->uppercen; for(i = 0; i < tab->n; i++) { obs[i] = tab->obs[i]; nco[i] = tab->nco[i]; cen[i] = tab->cen[i]; ncc[i] = tab->ncc[i]; } freeH4table(tab); } /* R CALLING INTERFACE These routines are called from R by > .C("routine-name", ....) */ void RcallK3(x,y,z, n, x0, x1, y0, y1, z0, z1, t0, t1, m, f, num, denom, method) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *t0, *t1; /* Ftable */ int *m; double *f, *num, *denom; int *method; { Point *p; Box *b; Ftable *tab; p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); tab = MakeFtable(t0, t1, m); switch((int) *method) { case 0: k3trans(p, (int) *n, b, tab); break; case 1: k3isot(p, (int) *n, b, tab); break; default: Rprintf("Method %d not implemented: defaults to 0\n", *method); k3trans(p, (int) *n, b, tab); break; } FtabletoR(tab, t0, t1, m, f, num, denom); } void RcallG3(x,y,z, n, x0, x1, y0, y1, z0, z1, t0, t1, m, f, num, denom, method) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *t0, *t1; /* Ftable */ int *m; double *f, *num, *denom; int *method; { Point *p; Box *b; Ftable *tab; p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); tab = MakeFtable(t0, t1, m); switch(*method) { case 1: g3one(p, (int) *n, b, tab); break; case 3: g3three(p, (int) *n, b, tab); break; default: Rprintf("Method %d not implemented: defaults to 3\n", *method); g3three(p, (int) *n, b, tab); } FtabletoR(tab, t0, t1, m, f, num, denom); } void RcallG3cen(x,y,z, n, x0, x1, y0, y1, z0, z1, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *t0, *t1; int *m; /* H4table */ int *obs, *nco, *cen, *ncc; int *upperobs, *uppercen; { Point *p; Box *b; H4table *count; DEBUGMESSAGE("Inside RcallG3cen\n") p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); count = MakeH4table(t0, t1, m); g3cen(p, (int) *n, b, count); H4tabletoR(count, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen); DEBUGMESSAGE("Leaving RcallG3cen\n") } void RcallF3(x,y,z, n, x0, x1, y0, y1, z0, z1, vside, t0, t1, m, num, denom, method) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *vside; double *t0, *t1; int *m; /* Itable */ int *num, *denom; int *method; { Point *p; Box *b; Itable *count; DEBUGMESSAGE("Inside Rcall_f3\n") p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); count = MakeItable(t0, t1, m); switch((int) *method) { case 0: phatnaive(p, (int) *n, b, *vside, count); break; case 1: phatminus(p, (int) *n, b, *vside, count); break; default: Rprintf("Method %d not recognised: defaults to 1\n", *method); phatminus(p, (int) *n, b, *vside, count); } ItabletoR(count, t0, t1, m, num, denom); DEBUGMESSAGE("Leaving Rcall_f3\n") } void RcallF3cen(x,y,z, n, x0, x1, y0, y1, z0, z1, vside, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *vside; double *t0, *t1; int *m; /* H4table */ int *obs, *nco, *cen, *ncc; int *upperobs, *uppercen; { Point *p; Box *b; H4table *count; DEBUGMESSAGE("Inside Rcallf3cen\n") p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); count = MakeH4table(t0, t1, m); p3hat4(p, (int) *n, b, *vside, count); H4tabletoR(count, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen); DEBUGMESSAGE("Leaving Rcallf3cen\n") } void Rcallpcf3(x,y,z, n, x0, x1, y0, y1, z0, z1, t0, t1, m, f, num, denom, method, delta) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *t0, *t1; /* Ftable */ int *m; double *f, *num, *denom; int *method; double *delta; /* Epanechnikov kernel halfwidth */ { Point *p; Box *b; Ftable *tab; p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); tab = MakeFtable(t0, t1, m); switch((int) *method) { case 0: pcf3trans(p, (int) *n, b, tab, (double) *delta); break; case 1: pcf3isot(p, (int) *n, b, tab, (double) *delta); break; default: Rprintf("Method %d not implemented: defaults to 0\n", *method); pcf3trans(p, (int) *n, b, tab, (double) *delta); break; } FtabletoR(tab, t0, t1, m, f, num, denom); } spatstat/src/knnXdist.h0000644000176200001440000001501313406057617014670 0ustar liggesusers #if (1 == 0) /* knnXdist.h Code template for C functions supporting nncross for k-nearest neighbours (k > 1) THE FOLLOWING CODE ASSUMES THAT LISTS ARE SORTED IN ASCENDING ORDER OF y COORDINATE This code is #included multiple times in knndistance.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour EXCLUDE #defined if exclusion mechanism is used Either or both DIST and WHICH may be defined. When EXCLUDE is defined, code numbers id1, id2 are attached to the patterns X and Y respectively, such that x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j]. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.10 $ $Date: 2013/12/10 03:29:55 $ */ #endif void FNAME(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge) /* inputs */ int *n1, *n2; double *x1, *y1, *x2, *y2, *huge; int *id1, *id2; int *kmax; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { int npoints1, npoints2, nk, nk1; int maxchunk, i, jleft, jright, jwhich, lastjwhich, unsorted, k, k1; double d2, d2minK, x1i, y1i, dx, dy, dy2, hu, hu2, tmp; double *d2min; #ifdef WHICH int *which; int itmp; #endif #ifdef EXCLUDE int id1i; #endif #ifdef TRACER int kk; #endif npoints1 = *n1; npoints2 = *n2; nk = *kmax; nk1 = nk - 1; hu = *huge; hu2 = hu * hu; if(npoints1 == 0 || npoints2 == 0) return; lastjwhich = 0; /* create space to store the nearest neighbour distances and indices for the current point */ d2min = (double *) R_alloc((size_t) nk, sizeof(double)); #ifdef WHICH which = (int *) R_alloc((size_t) nk, sizeof(int)); #endif /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < npoints1) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > npoints1) maxchunk = npoints1; for(; i < maxchunk; i++) { /* initialise nn distances and indices */ d2minK = hu2; jwhich = -1; for(k = 0; k < nk; k++) { d2min[k] = hu2; #ifdef WHICH which[k] = -1; #endif } x1i = x1[i]; y1i = y1[i]; #ifdef EXCLUDE id1i = id1[i]; #endif #ifdef TRACER Rprintf("i=%d : (%lf, %lf) ..................... \n", i, x1i, y1i); #endif if(lastjwhich < npoints2) { #ifdef TRACER Rprintf("\tForward search from lastjwhich=%d:\n", lastjwhich); #endif /* search forward from previous nearest neighbour */ for(jright = lastjwhich; jright < npoints2; ++jright) { #ifdef TRACER Rprintf("\tjright=%d \t (%lf, %lf)\n", jright, x2[jright], y2[jright]); #endif dy = y2[jright] - y1i; dy2 = dy * dy; #ifdef TRACER Rprintf("\t\t dy2=%lf,\t d2minK=%lf\n", dy2, d2minK); #endif if(dy2 > d2minK) /* note that dy2 >= d2minK could break too early */ break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[jright] != id1i) { #ifdef TRACER Rprintf("\t\t %d and %d are not identical\n", i, jright); #endif #endif dx = x2[jright] - x1i; d2 = dx * dx + dy2; #ifdef TRACER Rprintf("\t\t d2=%lf\n", d2); #endif if (d2 < d2minK) { /* overwrite last entry in list of neighbours */ #ifdef TRACER Rprintf("\t\t overwrite d2min[nk1]=%lf by d2=%lf\n", d2min[nk1], d2); #endif d2min[nk1] = d2; jwhich = jright; #ifdef WHICH which[nk1] = jright; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } #ifdef TRACER Rprintf("\t\t sorted nn distances:\n"); for(kk = 0; kk < nk; kk++) Rprintf("\t\t d2min[%d] = %lf\n", kk, d2min[kk]); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; #ifdef TRACER Rprintf("\t\t d2minK=%lf\n", d2minK); #endif } #ifdef EXCLUDE } #endif } /* end forward search */ #ifdef TRACER Rprintf("\tEnd forward search\n"); #endif } if(lastjwhich > 0) { #ifdef TRACER Rprintf("\tBackward search from lastjwhich=%d:\n", lastjwhich); #endif /* search backward from previous nearest neighbour */ for(jleft = lastjwhich - 1; jleft >= 0; --jleft) { #ifdef TRACER Rprintf("\tjleft=%d \t (%lf, %lf)\n", jleft, x2[jleft], y2[jleft]); #endif dy = y1i - y2[jleft]; dy2 = dy * dy; #ifdef TRACER Rprintf("\t\t dy2=%lf,\t d2minK=%lf\n", dy2, d2minK); #endif if(dy2 > d2minK) /* note that dy2 >= d2minK could break too early */ break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[jleft] != id1i) { #ifdef TRACER Rprintf("\t\t %d and %d are not identical\n", i, jleft); #endif #endif dx = x2[jleft] - x1i; d2 = dx * dx + dy2; #ifdef TRACER Rprintf("\t\t d2=%lf\n", d2); #endif if (d2 < d2minK) { /* overwrite last entry in list of neighbours */ #ifdef TRACER Rprintf("\t\t overwrite d2min[nk1]=%lf by d2=%lf\n", d2min[nk1], d2); #endif d2min[nk1] = d2; jwhich = jleft; #ifdef WHICH which[nk1] = jleft; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } #ifdef TRACER Rprintf("\t\t sorted nn distances:\n"); for(kk = 0; kk < nk; kk++) Rprintf("\t\t d2min[%d] = %lf\n", kk, d2min[kk]); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; #ifdef TRACER Rprintf("\t\t d2minK=%lf\n", d2minK); #endif } #ifdef EXCLUDE } #endif } /* end backward search */ #ifdef TRACER Rprintf("\tEnd backward search\n"); #endif } /* copy nn distances for point i to output matrix in ROW MAJOR order */ for(k = 0; k < nk; k++) { #ifdef DIST nnd[nk * i + k] = sqrt(d2min[k]); #endif #ifdef WHICH nnwhich[nk * i + k] = which[k] + 1; /* R indexing */ #endif } /* save index of last neighbour encountered */ lastjwhich = jwhich; /* end of loop over points i */ } } } spatstat/src/sparselinalg.c0000644000176200001440000000071313406057617015546 0ustar liggesusers#include #include /* sparselinalg.c Counterpart of 'linalg.c' for sparse matrices/arrays $Revision: 1.7 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef DBG #define FNAME CspaSumSymOut #undef WEIGHTS #include "spasumsymout.h" #undef FNAME #define FNAME CspaWtSumSymOut #define WEIGHTS #include "spasumsymout.h" #undef FNAME spatstat/src/KrectV3.h0000644000176200001440000000025313115225157014340 0ustar liggesusers/* KrectV4.h with or without border correction */ if((*doBord) == 1) { #define BORDER #include "KrectV4.h" } else { #undef BORDER #include "KrectV4.h" } spatstat/src/linnncross.c0000644000176200001440000000136313406057617015254 0ustar liggesusers#include /* linnncross.c Shortest-path distances between nearest neighbours in linear network One pattern to another pattern $Revision: 1.2 $ $Date: 2018/12/18 02:43:11 $ linndcross linndxcross Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define DPATH(I,J) dpath[(I) + Nv * (J)] #define ANSWER(I,J) answer[(I) + Np * (J)] #define EUCLID(X,Y,U,V) sqrt(pow((X)-(U),2)+pow((Y)-(V),2)) /* definition of linndcross */ #define FNAME linndcross #undef EXCLU #define WHICH #include "linnncross.h" #undef FNAME #undef EXCLU #undef WHICH /* definition of linndxcross */ #define FNAME linndxcross #define EXCLU #define WHICH #include "linnncross.h" spatstat/src/Knone.h0000644000176200001440000000567213406057617014152 0ustar liggesusers/* Knone.h Code template for K function estimators in Knone.c Variables: FNAME function name OUTTYPE storage type of the output 'numer' ('int' or 'double') WEIGHTED #defined for weighted (inhom) K function Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.6 $ $Date: 2013/09/18 04:08:26 $ */ void FNAME( nxy, x, y, #ifdef WEIGHTED w, #endif nr, rmax, numer) /* inputs */ int *nxy, *nr; double *x, *y, *rmax; #ifdef WEIGHTED double *w; #endif /* output */ OUTTYPE *numer; { int i, j, l, n, nt, n1, lmin, lmax, maxchunk; double dt, tmax, tmax2, xi, yi; double dratio, dij, dij2, dx, dy, dx2; #ifdef WEIGHTED double wi, wj, wij; #endif #ifdef WEIGHTED #define ZERO 0.0 #define WI wi #define WJ wj #define WIJ wij #else #define ZERO 0 #define WI 1 #define WJ 1 #define WIJ 1 #endif n = *nxy; nt = *nr; n1 = n - 1; lmax = nt - 1; dt = (*rmax)/(nt-1); tmax = *rmax; tmax2 = tmax * tmax; /* initialise */ for(l = 0; l < nt; l++) numer[l] = ZERO; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { #ifdef WEIGHTED wi = w[i]; #endif xi = x[i]; yi = y[i]; /* scan backward from i-1 until x[j] < x[i] -tmax or until we run out */ if(i > 0) { for(j=i-1; j >= 0; j--) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= tmax2) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < tmax2) { #ifdef WEIGHTED wj = w[j]; #endif /* increment numerator for all r >= dij */ dij = (double) sqrt(dij2); dratio = dij/dt; /* smallest integer greater than or equal to dratio */ lmin = (int) ceil(dratio); /* effectively increment entries lmin to lmax inclusive */ if(lmin <= lmax) { #ifdef WEIGHTED wij = wi * wj; #endif numer[lmin] += WIJ; } } } } /* scan forward from i+1 until x[j] > x[i] + tmax or until we run out */ if(i < n1) { for(j=i+1; j < n; j++) { /* squared interpoint distance */ dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= tmax2) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < tmax2) { #ifdef WEIGHTED wj = w[j]; #endif /* increment numerator for all r >= dij */ dij = (double) sqrt(dij2); dratio = dij/dt; /* smallest integer greater than or equal to dratio */ lmin = (int) ceil(dratio); /* increment entries lmin to lmax inclusive */ if(lmin <= lmax) { #ifdef WEIGHTED wij = wi * wj; #endif numer[lmin] += WIJ; } } } } } } /* Now accumulate the numerator. */ if(nt > 1) for(l=1; l < nt; l++) numer[l] += numer[l-1]; } #undef ZERO #undef WI #undef WJ #undef WIJ spatstat/src/auctionbf.c0000644000176200001440000001572413406057617015044 0ustar liggesusers/* auctionbf.c $Revision: 1.1 $ $Date: 2014/06/28 02:14:04 $ Code by Dominic Schuhmacher up to local adaptations for spatstat this code is identical to Revision 0.4 for the R package transport */ /* n >= 2 is assumed throughout !!!!!!!!! */ #include #include #include typedef struct State { int n; double epsbid; /* the current eps */ int backwards; /* 0 if we should do forward auction, 1 if we should do backward auction */ int nofassigned; /* number of assigned persons */ int *pers_to_obj; /* -1 means unassigned */ int *obj_to_pers; /* -1 means unassigned */ double *price; double *profit; int *desiremat; /* matrix of desires */ double *persvalue; /* desire minus price of current person in forward phase */ double *objvalue; /* desire minus profit of current object in reverse phase */ /* last three only used in bid, but maybe better to reserve memory once and for all */ } State; #define DESIRE(I,J,STATE,NVALUE) ((STATE)->desiremat)[(NVALUE) * (J) + (I)] #define DESIREMAIN(I,J,STATE,NVALUE) ((STATE).desiremat)[(NVALUE) * (J) + (I)] #define MIN(A,B) ((A)<(B) ? (A) : (B)) void bidbf(State *state, int person); void lurebf(State *state, int obj); int arrayargmax(double *a, int n); double arraysec(double *a, int n, int arg); /* void printit(State *state); */ /* ------------ The main function ----------------------------- */ void auctionbf(int *desirem, int *nn, int *pers_to_obj, double *price, double *profit, int *kk, double *eps) { int i,j,r; /* indices */ int k,n; State state; /* inputs */ state.n = n = *nn; k = *kk; /* length of eps, only needed in outside loop */ state.pers_to_obj = pers_to_obj; /* n vector: person i gets which object */ state.price = price; /* n vector: price of object j */ state.profit = profit; /* n vector: profit of person i */ state.desiremat = desirem; /* n x n vector: desire of person i for object j */ /* scratch space */ state.obj_to_pers = (int *) R_alloc((long) n, sizeof(int)); state.persvalue = (double *) R_alloc((long) n, sizeof(double)); state.objvalue = (double *) R_alloc((long) n, sizeof(double)); /* Prices start at what the R-function supplied (usually 0) */ /* Profits are set to the rowwise max that satisfies eps-CS */ for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { state.persvalue[j] = DESIREMAIN(i,j,state,n); } state.profit[i] = arrayargmax(state.persvalue, n); } for (r = 0; r < k; r++) { state.backwards = 0; state.epsbid = eps[r]; /* At start everything is unassigned */ state.nofassigned = 0; for (j = 0; j < n; j++) { state.pers_to_obj[j] = -1; state.obj_to_pers[j] = -1; } while (state.nofassigned < n) { /* printit(&state); */ R_CheckUserInterrupt(); if (state.backwards == 0) { /* printit(&state); */ for (i = 0; i < n; i++) { if (state.pers_to_obj[i] == -1) { /* Rprintf("Bid \n"); */ bidbf(&state, i); /* bid does assigning and unassigning and changes nofassigned */ } } } else { /* printit(&state); */ for (j = 0; j < n; j++) { if (state.obj_to_pers[j] == -1) { /* Rprintf("Lure \n"); */ lurebf(&state, j); /* lure does assigning and unassigning and changes nofassigned */ } } } } /* eof while */ } /* eof eps-scaling for-loop */ } /* ------------ Functions called by auction ------------------------- */ void bidbf(State *state, int person) { int j; int n; int bidfor, oldpers; double bidamount; n = state->n; for (j = 0; j < n; j++) { state->persvalue[j] = DESIRE(person,j,state,n) - state->price[j]; } bidfor = arrayargmax(state->persvalue, n); bidamount = state->persvalue[bidfor] - arraysec(state->persvalue,n,bidfor) + state->epsbid; /* here we get a float result, the rest are int results */ oldpers = state->obj_to_pers[bidfor]; if (oldpers == -1) { state->nofassigned++; state->backwards = 1; } else { state->pers_to_obj[oldpers] = -1; } state->pers_to_obj[person] = bidfor; state->obj_to_pers[bidfor] = person; state->price[bidfor] = state->price[bidfor] + bidamount; /* new forward/reverse auction algo */ state->profit[person] = DESIRE(person,bidfor,state,n) - state->price[bidfor]; } /* like bidbf, but for reverse auction */ void lurebf(State *state, int obj) { int i; int n; int lurepno, oldobj; double lureamount; n = state->n; for (i = 0; i < n; i++) { state->objvalue[i] = DESIRE(i,obj,state,n) - state->profit[i]; } lurepno = arrayargmax(state->objvalue, n); lureamount = state->objvalue[lurepno] - arraysec(state->objvalue,n,lurepno) + state->epsbid; /* here we get a float result, the rest are int results */ oldobj = state->pers_to_obj[lurepno]; if (oldobj == -1) { state->nofassigned++; state->backwards = 0; } else { state->obj_to_pers[oldobj] = -1; } state->obj_to_pers[obj] = lurepno; state->pers_to_obj[lurepno] = obj; state->profit[lurepno] = state->profit[lurepno] + lureamount; /* new forward/reverse auction algo */ state->price[obj] = DESIRE(lurepno,obj,state,n) - state->profit[lurepno]; } /* ------------ Little helpers ------------------------- */ /* Gives first index that maximizes array */ int arrayargmax(double *a, int n) { int i, arg; double amax; arg = 0; amax = a[0]; for (i = 1; i < n; i++) if (a[i] > amax) { arg = i; amax = a[i]; } return(arg); } /* Second largest element of a non-negative integer array knowing the largest is at index arg */ double arraysec(double *a, int n, int arg) { int i; double amax; if (arg > 0) amax = a[0]; else amax = a[1]; for (i = 0; i < arg; i++) if (a[i] > amax) amax = a[i]; for (i = arg+1; i < n; i++) if (a[i] > amax) amax = a[i]; return(amax); } /* void printit(State *state) { int i=0,n=0; n = state->n; Rprintf("Current state: \n"); Rprintf("backwards: %d \n", state->backwards); Rprintf("nofassigned: %d \n", state->nofassigned); Rprintf("pers_to_obj: "); for (i = 0; i < n; i++) { Rprintf("%d ", state->pers_to_obj[i]); } Rprintf("\n"); Rprintf("obj_to_pers: "); for (i = 0; i < n; i++) { Rprintf("%d ", state->obj_to_pers[i]); } Rprintf("\n"); Rprintf("price: "); for (i = 0; i < n; i++) { Rprintf("%2.9lf ", state->price[i]); } Rprintf("\n"); Rprintf("profit: "); for (i = 0; i < n; i++) { Rprintf("%2.9lf ", state->profit[i]); } Rprintf("\n"); Rprintf("persvalue: "); for (i = 0; i < n; i++) { Rprintf("%2.9lf ", state->persvalue[i]); } Rprintf("\n"); Rprintf("objvalue: "); for (i = 0; i < n; i++) { Rprintf("%2.9lf ", state->objvalue[i]); } Rprintf("\n"); Rprintf("\n\n\n"); } */ spatstat/src/corrections.c0000755000176200001440000000762713447541751015434 0ustar liggesusers/* corrections.c Edge corrections $Revision: 1.15 $ $Date: 2019/03/30 01:15:48 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include #include #include #include "chunkloop.h" #include "yesno.h" #include "constants.h" #undef DEBUG /* This constant is defined in Rmath.h */ #define TWOPI M_2PI #define MIN(A,B) (((A) < (B)) ? (A) : (B)) #define BETWEEN(X,X0,X1) ((double) ( ( (X) - (X0) ) * ( (X) - (X1) ) ) <= 0.0) #define UNDER(X,Y,X0,Y0,X1,Y1) \ ((double) ( ( (Y1) - (Y0) ) * ( (X) - (X0) ) ) >= (double) ( ( (Y) - (Y0) ) * ( (X1) - (X0) ) ) ) #define UNDERNEATH(X,Y,X0,Y0,X1,Y1) \ ((((double) (X0)) < ((double) (X1))) ? UNDER(X,Y,X0,Y0,X1,Y1) : UNDER(X,Y,X1,Y1,X0,Y0)) #define TESTINSIDE(X,Y,X0,Y0,X1,Y1) \ (BETWEEN(X,X0,X1) && UNDERNEATH(X, Y, X0, Y0, X1, Y1)) void ripleybox(nx, x, y, rmat, nr, xmin, ymin, xmax, ymax, epsilon, out) /* inputs */ int *nx, *nr; /* dimensions */ double *x, *y; /* coordinate vectors of length nx */ double *rmat; /* matrix nx by nr */ double *xmin, *ymin, *xmax, *ymax; /* box dimensions */ double *epsilon; /* threshold for proximity to corner */ /* output */ double *out; /* output matrix nx by nr */ { int i, j, n, m, ijpos, ncor, maxchunk; double xx, yy, x0, y0, x1, y1, dL, dR, dU, dD, aL, aU, aD, aR, rij; double cL, cU, cD, cR, bLU, bLD, bRU, bRD, bUL, bUR, bDL, bDR; double corner, extang; double eps; n = *nx; m = *nr; x0 = *xmin; y0 = *ymin; x1 = *xmax; y1 = *ymax; eps = *epsilon; OUTERCHUNKLOOP(i, n, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n, maxchunk, 16384) { xx = x[i]; yy = y[i]; /* perpendicular distance from point to each edge of rectangle L = left, R = right, D = down, U = up */ dL = xx - x0; dR = x1 - xx; dD = yy - y0; dU = y1 - yy; /* test for corner of the rectangle */ #define ABS(X) (((X) >= 0) ? (X) : (-X)) #define SMALL(X) ((ABS(X) < eps) ? 1 : 0) ncor = SMALL(dL) + SMALL(dR) + SMALL(dD) + SMALL(dU); corner = (ncor >= 2) ? YES : NO; /* angle between - perpendicular to edge of rectangle and - line from point to corner of rectangle */ bLU = atan2(dU, dL); bLD = atan2(dD, dL); bRU = atan2(dU, dR); bRD = atan2(dD, dR); bUL = atan2(dL, dU); bUR = atan2(dR, dU); bDL = atan2(dL, dD); bDR = atan2(dR, dD); for(j = 0; j < m; j++) { ijpos = j * n + i; rij = rmat[ijpos]; #ifdef DEBUG Rprintf("rij = %lf\n", rij); #endif /* half the angle subtended by the intersection between the circle of radius r[i,j] centred on point i and each edge of the rectangle (prolonged to an infinite line) */ aL = (dL < rij) ? acos(dL/rij) : 0.0; aR = (dR < rij) ? acos(dR/rij) : 0.0; aD = (dD < rij) ? acos(dD/rij) : 0.0; aU = (dU < rij) ? acos(dU/rij) : 0.0; #ifdef DEBUG Rprintf("aL = %lf\n", aL); Rprintf("aR = %lf\n", aR); Rprintf("aD = %lf\n", aD); Rprintf("aU = %lf\n", aU); #endif /* apply maxima */ cL = MIN(aL, bLU) + MIN(aL, bLD); cR = MIN(aR, bRU) + MIN(aR, bRD); cU = MIN(aU, bUL) + MIN(aU, bUR); cD = MIN(aD, bDL) + MIN(aD, bDR); #ifdef DEBUG Rprintf("cL = %lf\n", cL); Rprintf("cR = %lf\n", cR); Rprintf("cD = %lf\n", cD); Rprintf("cU = %lf\n", cU); #endif /* total exterior angle over 2 pi */ extang = (cL + cR + cU + cD)/TWOPI; /* add pi/2 for corners */ if(corner) extang += 1/4; #ifdef DEBUG Rprintf("extang = %lf\n", extang); #endif /* OK, now compute weight */ out[ijpos] = 1 / (1 - extang); } } } } /* C function ripleypoly */ #undef DEBUGPOLY #define RIPLEYFUN ripleypoly #include "ripleypoly.h" #undef RIPLEYFUN /* C function rippolDebug */ #define RIPLEYFUN rippolDebug #define DEBUGPOLY #include "ripleypoly.h" #undef RIPLEYFUN #undef DEBUGPOLY spatstat/src/nnMDdist.c0000755000176200001440000007257413553316305014616 0ustar liggesusers/* nnMDdist.c Nearest Neighbour Distances in m dimensions $Revision: 1.18 $ $Date: 2019/10/21 11:12:32 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Argument x is an m * n matrix with columns corresponding to points and rows corresponding to coordinates. Spatial dimension m must be > 1 THE FOLLOWING FUNCTIONS ASSUME THAT THE ROWS OF x ARE SORTED IN ASCENDING ORDER OF THE FIRST COLUMN nndMD Nearest neighbour distances nnwMD Nearest neighbours and their distances nnXwMD Nearest neighbour from one list to another nnXxMD Nearest neighbour from one list to another, with overlaps knndMD k-th nearest neighbour distances knnwMD k-th nearest neighbours and their distances knnXwMD k-th nearest neighbours from one list to another knnXxMD k-th nearest neighbours from one list to another, with overlaps */ #undef SPATSTAT_DEBUG #include #include #include #include "chunkloop.h" #include "yesno.h" double sqrt(); void nndMD(n, m, x, nnd, huge) /* inputs */ int *n, *m; double *x, *huge; /* output */ double *nnd; { int npoints, mdimen, i, j, left, right, leftpos, rightpos, maxchunk; double d2, d2min, hu, hu2, xi0, dx0, dxj; double *xi; npoints = *n; mdimen = *m; xi = (double *) R_alloc((size_t) mdimen, sizeof(double)); /* dx = (double *) R_alloc((size_t) mdimen, sizeof(double)); */ hu = *huge; hu2 = hu * hu; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { #ifdef SPATSTAT_DEBUG Rprintf("\ni=%d\n", i); #endif d2min = hu2; for(j = 0; j < mdimen; j++) xi[j] = x[i * mdimen + j]; xi0 = xi[0]; #ifdef SPATSTAT_DEBUG Rprintf("\n ("); for(j = 0; j < mdimen; j++) Rprintf("%lf, ", x[i * mdimen + j]); Rprintf(")\n"); #endif /* search backward */ if(i > 0) { for(left = i - 1; left >= 0; --left) { #ifdef SPATSTAT_DEBUG Rprintf("L=%d, d2min=%lf\n", left, d2min); #endif dx0 = xi0 - x[left * mdimen]; d2 = dx0 * dx0; if(d2 > d2min) break; leftpos = left * mdimen; for(j = 1; j < mdimen && d2 < d2min; j++) { dxj = xi[j] - x[leftpos + j]; d2 += dxj * dxj; } if (d2 < d2min) { d2min = d2; #ifdef SPATSTAT_DEBUG Rprintf("\tupdating d2min=%lf\n", d2min); #endif } } } /* search forward */ if(i < npoints - 1) { for(right = i + 1; right < npoints; ++right) { #ifdef SPATSTAT_DEBUG Rprintf("R=%d, d2min=%lf\n", right, d2min); #endif dx0 = x[right * mdimen] - xi0; d2 = dx0 * dx0; if(d2 > d2min) break; rightpos = right * mdimen; for(j = 1; j < mdimen && d2 < d2min; j++) { dxj = xi[j] - x[rightpos + j]; d2 += dxj * dxj; } if (d2 < d2min) { d2min = d2; #ifdef SPATSTAT_DEBUG Rprintf("\tupdating d2min=%lf\n", d2min); #endif } } } #ifdef SPATSTAT_DEBUG Rprintf("\n"); #endif nnd[i] = sqrt(d2min); } } } /* nnwMD: same as nndMD, but also returns id of nearest neighbour */ void nnwMD(n, m, x, nnd, nnwhich, huge) /* inputs */ int *n, *m; double *x, *huge; /* output */ double *nnd; int *nnwhich; { int npoints, mdimen, i, j, left, right, leftpos, rightpos, which, maxchunk; double d2, d2min, hu, hu2, xi0, dx0, dxj; double *xi; npoints = *n; mdimen = *m; xi = (double *) R_alloc((size_t) mdimen, sizeof(double)); /* dx = (double *) R_alloc((size_t) mdimen, sizeof(double)); */ hu = *huge; hu2 = hu * hu; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { #ifdef SPATSTAT_DEBUG Rprintf("\ni=%d\n", i); #endif d2min = hu2; which = -1; for(j = 0; j < mdimen; j++) xi[j] = x[i * mdimen + j]; xi0 = xi[0]; /* search backward */ if(i > 0) { for(left = i - 1; left >= 0; --left) { #ifdef SPATSTAT_DEBUG Rprintf("L"); #endif dx0 = xi0 - x[left * mdimen]; d2 = dx0 * dx0; if(d2 > d2min) break; leftpos = left * mdimen; for(j = 1; j < mdimen && d2 < d2min; j++) { dxj = xi[j] - x[leftpos + j]; d2 += dxj * dxj; } if (d2 < d2min) { d2min = d2; which = left; } } } /* search forward */ if(i < npoints - 1) { for(right = i + 1; right < npoints; ++right) { #ifdef SPATSTAT_DEBUG Rprintf("R"); #endif dx0 = x[right * mdimen] - xi0; d2 = dx0 * dx0; if(d2 > d2min) break; rightpos = right * mdimen; for(j = 1; j < mdimen && d2 < d2min; j++) { dxj = xi[j] - x[rightpos + j]; d2 += dxj * dxj; } if (d2 < d2min) { d2min = d2; which = right; } } } #ifdef SPATSTAT_DEBUG Rprintf("\n"); #endif nnd[i] = sqrt(d2min); /* convert index to R convention */ nnwhich[i] = which + 1; } } } /* nnXwMD: for TWO point patterns X and Y, find the nearest neighbour (from each point of X to the nearest point of Y) returning both the distance and the identifier Requires both patterns to be sorted in order of increasing first coord */ void nnXwMD(m, n1, x1, n2, x2, nnd, nnwhich, huge) /* inputs */ int *m, *n1, *n2; double *x1, *x2, *huge; /* outputs */ double *nnd; int *nnwhich; { int mdimen, npoints1, npoints2, i, ell, jleft, jright, jwhich, lastjwhich; double d2, d2min, x1i0, dx0, dxell, hu, hu2; double *x1i; int maxchunk; hu = *huge; hu2 = hu * hu; npoints1 = *n1; npoints2 = *n2; mdimen = *m; if(npoints1 == 0 || npoints2 == 0) return; x1i = (double *) R_alloc((size_t) mdimen, sizeof(double)); /* dx = (double *) R_alloc((size_t) mdimen, sizeof(double)); */ lastjwhich = 0; OUTERCHUNKLOOP(i, npoints1, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints1, maxchunk, 16384) { d2min = hu2; jwhich = -1; for(ell = 0; ell < mdimen; ell++) x1i[ell] = x1[i * mdimen + ell]; x1i0 = x1i[0]; /* search backward from previous nearest neighbour */ if(lastjwhich > 0) { for(jleft = lastjwhich - 1; jleft >= 0; --jleft) { dx0 = x1i0 - x2[jleft * mdimen]; d2 = dx0 * dx0; if(d2 > d2min) break; for(ell = 1; ell < mdimen && d2 < d2min; ell++) { dxell = x1i[ell] - x2[jleft * mdimen + ell]; d2 += dxell * dxell; } if (d2 < d2min) { d2min = d2; jwhich = jleft; } } } /* search forward from previous nearest neighbour */ if(lastjwhich < npoints2) { for(jright = lastjwhich; jright < npoints2; ++jright) { dx0 = x2[jright * mdimen] - x1i0; d2 = dx0 * dx0; if(d2 > d2min) break; for(ell = 1; ell < mdimen && d2 < d2min; ell++) { dxell = x1i[ell] - x2[jright * mdimen + ell]; d2 += dxell * dxell; } if (d2 < d2min) { d2min = d2; jwhich = jright; } } } nnd[i] = sqrt(d2min); nnwhich[i] = jwhich + 1; /* R convention */ lastjwhich = jwhich; } } } /* nnXxMD: similar to nnXwMD but allows X and Y to include common points (which are not to be counted as neighbours) Code numbers id1, id2 are attached to the patterns X and Y respectively, such that x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j]. Requires both patterns to be sorted in order of increasing first coord */ void nnXxMD(m, n1, x1, id1, n2, x2, id2, nnd, nnwhich, huge) /* inputs */ int *m, *n1, *n2; double *x1, *x2, *huge; int *id1, *id2; /* outputs */ double *nnd; int *nnwhich; { int mdimen, npoints1, npoints2, i, ell, jleft, jright, jwhich, lastjwhich, id1i; double d2, d2min, x1i0, dx0, dxell, hu, hu2; double *x1i; int maxchunk; hu = *huge; hu2 = hu * hu; npoints1 = *n1; npoints2 = *n2; mdimen = *m; if(npoints1 == 0 || npoints2 == 0) return; x1i = (double *) R_alloc((size_t) mdimen, sizeof(double)); /* dx = (double *) R_alloc((size_t) mdimen, sizeof(double)); */ lastjwhich = 0; OUTERCHUNKLOOP(i, npoints1, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints1, maxchunk, 16384) { d2min = hu2; jwhich = -1; id1i = id1[i]; for(ell = 0; ell < mdimen; ell++) x1i[ell] = x1[i * mdimen + ell]; x1i0 = x1i[0]; /* search backward from previous nearest neighbour */ if(lastjwhich > 0) { for(jleft = lastjwhich - 1; jleft >= 0; --jleft) { dx0 = x1i0 - x2[jleft * mdimen]; d2 = dx0 * dx0; if(d2 > d2min) break; /* do not compare identical points */ if(id2[jleft] != id1i) { for(ell = 1; ell < mdimen && d2 < d2min; ell++) { dxell = x1i[ell] - x2[jleft * mdimen + ell]; d2 += dxell * dxell; } if (d2 < d2min) { d2min = d2; jwhich = jleft; } } } } /* search forward from previous nearest neighbour */ if(lastjwhich < npoints2) { for(jright = lastjwhich; jright < npoints2; ++jright) { dx0 = x2[jright * mdimen] - x1i0; d2 = dx0 * dx0; if(d2 > d2min) break; /* do not compare identical points */ if(id2[jright] != id1i) { for(ell = 1; ell < mdimen && d2 < d2min; ell++) { dxell = x1i[ell] - x2[jright * mdimen + ell]; d2 += dxell * dxell; } if (d2 < d2min) { d2min = d2; jwhich = jright; } } } } nnd[i] = sqrt(d2min); nnwhich[i] = jwhich + 1L; /* R convention */ lastjwhich = jwhich; } } } /* knndMD nearest neighbours 1:kmax */ void knndMD(n, m, kmax, x, nnd, huge) /* inputs */ int *n, *m, *kmax; double *x, *huge; /* output matrix (kmax * npoints) */ double *nnd; { int npoints, mdimen, nk, nk1, i, j, k, k1, left, right, unsorted, maxchunk; double d2, d2minK, xi0, dx0, dxj, hu, hu2, tmp; double *d2min, *xi; hu = *huge; hu2 = hu * hu; npoints = *n; mdimen = *m; nk = *kmax; nk1 = nk - 1; /* create space to store the squared k-th nearest neighbour distances for the current point */ d2min = (double *) R_alloc((size_t) nk, sizeof(double)); /* scratch space */ xi = (double *) R_alloc((size_t) mdimen, sizeof(double)); /* loop over points */ OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { #ifdef SPATSTAT_DEBUG Rprintf("\ni=%d\n", i); #endif /* initialise nn distances */ d2minK = hu2; for(k = 0; k < nk; k++) d2min[k] = hu2; for(j = 0; j < mdimen; j++) xi[j] = x[i* mdimen + j]; xi0 = xi[0]; #ifdef SPATSTAT_DEBUG Rprintf("\n ("); for(j = 0; j < mdimen; j++) Rprintf("%lf, ", xi[j]); Rprintf(")\n"); #endif /* search backward */ for(left = i - 1; left >= 0; --left) { dx0 = xi0 - x[left * mdimen]; d2 = dx0 * dx0; if(d2 > d2minK) break; #ifdef SPATSTAT_DEBUG Rprintf("L=%d\n", left); Rprintf("\t 0 "); #endif for(j = 1; j < mdimen && d2 < d2minK; j++) { #ifdef SPATSTAT_DEBUG Rprintf("%d ", j); #endif dxj = xi[j] - x[left * mdimen + j]; d2 += dxj * dxj; } #ifdef SPATSTAT_DEBUG Rprintf("\n\t d2=%lf\n", d2); #endif if (d2 < d2minK) { /* overwrite last entry */ #ifdef SPATSTAT_DEBUG Rprintf("\td2=%lf overwrites d2min[%d] = %lf\n", d2, nk1, d2min[nk1]); #endif d2min[nk1] = d2; /* bubble sort */ #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); #endif unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; } else { unsorted = NO; } } #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; } } /* search forward */ for(right = i + 1; right < npoints; ++right) { #ifdef SPATSTAT_DEBUG Rprintf("R=%d\n", right); Rprintf("\t 0 "); #endif dx0 = x[right * mdimen] - xi0; d2 = dx0 * dx0; if(d2 > d2minK) break; for(j = 1; j < mdimen && d2 < d2minK; j++) { #ifdef SPATSTAT_DEBUG Rprintf("%d ", j); #endif dxj = xi[j] - x[right * mdimen + j]; d2 += dxj * dxj; } #ifdef SPATSTAT_DEBUG Rprintf("\n\t d2=%lf\n", d2); #endif if (d2 < d2minK) { #ifdef SPATSTAT_DEBUG Rprintf("\td2=%lf overwrites d2min[%d] = %lf\n", d2, nk1, d2min[nk1]); #endif /* overwrite last entry */ d2min[nk1] = d2; /* bubble sort */ #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); #endif unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; } else { unsorted = NO; } } #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; } } #ifdef SPATSTAT_DEBUG Rprintf("\n"); #endif /* copy nn distances for point i to output matrix in ROW MAJOR order */ for(k = 0; k < nk; k++) { nnd[nk * i + k] = sqrt(d2min[k]); } } } } /* knnwMD nearest neighbours 1:kmax returns distances and indices */ void knnwMD(n, m, kmax, x, nnd, nnwhich, huge) /* inputs */ int *n, *m, *kmax; double *x, *huge; /* output matrix (kmax * npoints) */ double *nnd; int *nnwhich; { int npoints, mdimen, nk, nk1, i, j, k, k1, left, right, unsorted, itmp; double d2, d2minK, xi0, dx0, dxj, hu, hu2, tmp; double *d2min, *xi; int *which; int maxchunk; hu = *huge; hu2 = hu * hu; npoints = *n; mdimen = *m; nk = *kmax; nk1 = nk - 1; /* create space to store the nearest neighbour distances and indices for the current point */ d2min = (double *) R_alloc((size_t) nk, sizeof(double)); which = (int *) R_alloc((size_t) nk, sizeof(int)); /* scratch space */ xi = (double *) R_alloc((size_t) mdimen, sizeof(double)); /* loop over points */ OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { #ifdef SPATSTAT_DEBUG Rprintf("\ni=%d\n", i); #endif /* initialise nn distances */ d2minK = hu2; for(k = 0; k < nk; k++) { d2min[k] = hu2; which[k] = -1; } for(j = 0; j < mdimen; j++) xi[j] = x[i* mdimen + j]; xi0 = xi[0]; #ifdef SPATSTAT_DEBUG Rprintf("\n ("); for(j = 0; j < mdimen; j++) Rprintf("%lf, ", x[i * mdimen + j]); Rprintf(")\n"); #endif /* search backward */ for(left = i - 1; left >= 0; --left) { #ifdef SPATSTAT_DEBUG Rprintf("L=%d, d2minK=%lf\n", left, d2minK); Rprintf("\t 0 "); #endif dx0 = xi0 - x[left * mdimen]; d2 = dx0 * dx0; if(d2 > d2minK) break; for(j = 1; j < mdimen && d2 < d2minK; j++) { #ifdef SPATSTAT_DEBUG Rprintf("%d ", j); #endif dxj = xi[j] - x[left * mdimen + j]; d2 += dxj * dxj; } if (d2 < d2minK) { #ifdef SPATSTAT_DEBUG Rprintf("\td2=%lf overwrites d2min[%d] = %lf\n", d2, nk1, d2min[nk1]); #endif /* overwrite last entry */ d2min[nk1] = d2; which[nk1] = left; /* bubble sort */ #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); Rprintf("\twhich[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%d, ", which[k]); Rprintf("\n"); #endif unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; } else { unsorted = NO; } } #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); Rprintf("\twhich[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%d, ", which[k]); Rprintf("\n"); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; } } /* search forward */ for(right = i + 1; right < npoints; ++right) { #ifdef SPATSTAT_DEBUG Rprintf("R=%d, d2minK=%lf\n", right, d2minK); Rprintf("\t 0 "); #endif dx0 = x[right * mdimen] - xi0; d2 = dx0 * dx0; if(d2 > d2minK) break; for(j = 1; j < mdimen && d2 < d2minK; j++) { #ifdef SPATSTAT_DEBUG Rprintf("%d ", j); #endif dxj = xi[j] - x[right * mdimen + j]; d2 += dxj * dxj; } if (d2 < d2minK) { #ifdef SPATSTAT_DEBUG Rprintf("\td2=%lf overwrites d2min[%d] = %lf\n", d2, nk1, d2min[nk1]); #endif /* overwrite last entry */ d2min[nk1] = d2; which[nk1] = right; /* bubble sort */ #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); Rprintf("\twhich[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%d, ", which[k]); Rprintf("\n"); #endif unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; } else { unsorted = NO; } } #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); Rprintf("\twhich[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%d, ", which[k]); Rprintf("\n"); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; } } #ifdef SPATSTAT_DEBUG Rprintf("\n"); #endif /* copy nn distances for point i to output matrix in ROW MAJOR order */ for(k = 0; k < nk; k++) { nnd[nk * i + k] = sqrt(d2min[k]); /* convert index back to R convention */ nnwhich[nk * i + k] = which[k] + 1; } } } } /* -------------- TWO POINT PATTERNS, k-nearest ------------- */ /* knnXwMD nearest neighbours 1:kmax returns distances and indices */ void knnXwMD(m, n1, x1, n2, x2, kmax, nnd, nnwhich, huge) /* inputs */ int *m, *n1, *n2, *kmax; double *x1, *x2, *huge; /* output matrix (kmax * n1) */ double *nnd; int *nnwhich; { int mdimen, npoints1, npoints2, nk, nk1; int i, ell, jleft, jright, jwhich, lastjwhich; int k, k1, unsorted, itmp; double d2, d2minK, x1i0, dx0, dxell, hu, hu2, tmp; double *d2min, *x1i; int *which; int maxchunk; hu = *huge; hu2 = hu * hu; npoints1 = *n1; npoints2 = *n2; mdimen = *m; nk = *kmax; nk1 = nk - 1; /* create space to store the nearest neighbour distances and indices for the current point */ d2min = (double *) R_alloc((size_t) nk, sizeof(double)); which = (int *) R_alloc((size_t) nk, sizeof(int)); /* scratch space for current 'from' point coordinates */ x1i = (double *) R_alloc((size_t) mdimen, sizeof(double)); lastjwhich = 0; /* loop over 'from' points */ OUTERCHUNKLOOP(i, npoints1, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints1, maxchunk, 16384) { #ifdef SPATSTAT_DEBUG Rprintf("\ni=%d\n", i); #endif /* initialise nn distances */ d2minK = hu2; jwhich = -1; for(k = 0; k < nk; k++) { d2min[k] = hu2; which[k] = -1; } /* copy coordinates of current 'from' point */ for(ell = 0; ell < mdimen; ell++) x1i[ell] = x1[i* mdimen + ell]; x1i0 = x1i[0]; #ifdef SPATSTAT_DEBUG Rprintf("\n From ("); for(ell = 0; ell < mdimen; ell++) Rprintf("%lf, ", x1[i * mdimen + ell]); Rprintf(")\n"); #endif if(lastjwhich > 0) { /* search backward from previous nearest neighbour */ for(jleft = lastjwhich - 1; jleft >= 0; --jleft) { #ifdef SPATSTAT_DEBUG Rprintf("L=%d, d2minK=%lf\n", jleft, d2minK); Rprintf("\t 0 "); #endif dx0 = x1i0 - x2[jleft * mdimen]; d2 = dx0 * dx0; if(d2 > d2minK) break; for(ell = 1; ell < mdimen && d2 < d2minK; ell++) { #ifdef SPATSTAT_DEBUG Rprintf("%d ", ell); #endif dxell = x1i[ell] - x2[jleft * mdimen + ell]; d2 += dxell * dxell; } if (d2 < d2minK) { #ifdef SPATSTAT_DEBUG Rprintf("\td2=%lf overwrites d2min[%d] = %lf\n", d2, nk1, d2min[nk1]); #endif /* overwrite last entry */ d2min[nk1] = d2; which[nk1] = jleft; jwhich = jleft; /* bubble sort */ #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); Rprintf("\twhich[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%d, ", which[k]); Rprintf("\n"); #endif unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; } else { unsorted = NO; } } #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); Rprintf("\twhich[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%d, ", which[k]); Rprintf("\n"); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; } } } /* search forward */ if(lastjwhich < npoints2) { for(jright = lastjwhich; jright < npoints2; ++jright) { #ifdef SPATSTAT_DEBUG Rprintf("R=%d, d2minK=%lf\n", jright, d2minK); Rprintf("\t 0 "); #endif dx0 = x2[jright * mdimen] - x1i0; d2 = dx0 * dx0; if(d2 > d2minK) break; for(ell = 1; ell < mdimen && d2 < d2minK; ell++) { #ifdef SPATSTAT_DEBUG Rprintf("%d ", ell); #endif dxell = x1i[ell] - x2[jright * mdimen + ell]; d2 += dxell * dxell; } if (d2 < d2minK) { #ifdef SPATSTAT_DEBUG Rprintf("\td2=%lf overwrites d2min[%d] = %lf\n", d2, nk1, d2min[nk1]); #endif /* overwrite last entry */ d2min[nk1] = d2; which[nk1] = jright; jwhich = jright; /* bubble sort */ #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); Rprintf("\twhich[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%d, ", which[k]); Rprintf("\n"); #endif unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; } else { unsorted = NO; } } #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); Rprintf("\twhich[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%d, ", which[k]); Rprintf("\n"); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; } } } #ifdef SPATSTAT_DEBUG Rprintf("\n"); #endif /* copy nn distances for point i to output matrix in ROW MAJOR order */ for(k = 0; k < nk; k++) { nnd[nk * i + k] = sqrt(d2min[k]); /* convert index back to R convention */ nnwhich[nk * i + k] = which[k] + 1; } /* save index of last neighbour encountered */ lastjwhich = jwhich; } } } /* knnXxMD nearest neighbours 1:kmax with exclusions returns distances and indices */ void knnXxMD(m, n1, x1, id1, n2, x2, id2, kmax, nnd, nnwhich, huge) /* inputs */ int *m, *n1, *n2, *kmax; double *x1, *x2, *huge; int *id1, *id2; /* output matrix (kmax * n1) */ double *nnd; int *nnwhich; { int mdimen, npoints1, npoints2, nk, nk1; int i, ell, jleft, jright, jwhich, lastjwhich; int k, k1, unsorted, itmp, id1i; double d2, d2minK, x1i0, dx0, dxell, hu, hu2, tmp; double *d2min, *x1i; int *which; int maxchunk; hu = *huge; hu2 = hu * hu; npoints1 = *n1; npoints2 = *n2; mdimen = *m; nk = *kmax; nk1 = nk - 1; /* create space to store the nearest neighbour distances and indices for the current point */ d2min = (double *) R_alloc((size_t) nk, sizeof(double)); which = (int *) R_alloc((size_t) nk, sizeof(int)); /* scratch space for current 'from' point coordinates */ x1i = (double *) R_alloc((size_t) mdimen, sizeof(double)); lastjwhich = 0; /* loop over 'from' points */ OUTERCHUNKLOOP(i, npoints1, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints1, maxchunk, 16384) { #ifdef SPATSTAT_DEBUG Rprintf("\ni=%d\n", i); #endif /* initialise nn distances */ d2minK = hu2; jwhich = -1; for(k = 0; k < nk; k++) { d2min[k] = hu2; which[k] = -1; } /* copy coordinates of current 'from' point */ for(ell = 0; ell < mdimen; ell++) x1i[ell] = x1[i* mdimen + ell]; x1i0 = x1i[0]; id1i = id1[i]; #ifdef SPATSTAT_DEBUG Rprintf("\n From ("); for(ell = 0; ell < mdimen; ell++) Rprintf("%lf, ", x1[i * mdimen + ell]); Rprintf(")\n"); #endif if(lastjwhich > 0) { /* search backward from previous nearest neighbour */ for(jleft = lastjwhich - 1; jleft >= 0; --jleft) { #ifdef SPATSTAT_DEBUG Rprintf("L=%d, d2minK=%lf\n", jleft, d2minK); Rprintf("\t 0 "); #endif dx0 = x1i0 - x2[jleft * mdimen]; d2 = dx0 * dx0; if(d2 > d2minK) break; /* don't compare identical points */ if(id2[jleft] != id1i) { for(ell = 1; ell < mdimen && d2 < d2minK; ell++) { #ifdef SPATSTAT_DEBUG Rprintf("%d ", ell); #endif dxell = x1i[ell] - x2[jleft * mdimen + ell]; d2 += dxell * dxell; } if (d2 < d2minK) { #ifdef SPATSTAT_DEBUG Rprintf("\td2=%lf overwrites d2min[%d] = %lf\n", d2, nk1, d2min[nk1]); #endif /* overwrite last entry */ d2min[nk1] = d2; which[nk1] = jleft; jwhich = jleft; /* bubble sort */ #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); Rprintf("\twhich[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%d, ", which[k]); Rprintf("\n"); #endif unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; } else { unsorted = NO; } } #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); Rprintf("\twhich[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%d, ", which[k]); Rprintf("\n"); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; } } } } /* search forward */ if(lastjwhich < npoints2) { for(jright = lastjwhich; jright < npoints2; ++jright) { #ifdef SPATSTAT_DEBUG Rprintf("R=%d, d2minK=%lf\n", jright, d2minK); Rprintf("\t 0 "); #endif dx0 = x2[jright * mdimen] - x1i0; d2 = dx0 * dx0; if(d2 > d2minK) break; /* don't compare identical points */ if(id2[jright] != id1i) { for(ell = 1; ell < mdimen && d2 < d2minK; ell++) { #ifdef SPATSTAT_DEBUG Rprintf("%d ", ell); #endif dxell = x1i[ell] - x2[jright * mdimen + ell]; d2 += dxell * dxell; } if (d2 < d2minK) { #ifdef SPATSTAT_DEBUG Rprintf("\td2=%lf overwrites d2min[%d] = %lf\n", d2, nk1, d2min[nk1]); #endif /* overwrite last entry */ d2min[nk1] = d2; which[nk1] = jright; jwhich = jright; /* bubble sort */ #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); Rprintf("\twhich[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%d, ", which[k]); Rprintf("\n"); #endif unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; } else { unsorted = NO; } } #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); Rprintf("\twhich[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%d, ", which[k]); Rprintf("\n"); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; } } } } #ifdef SPATSTAT_DEBUG Rprintf("\n"); #endif /* copy nn distances for point i to output matrix in ROW MAJOR order */ for(k = 0; k < nk; k++) { nnd[nk * i + k] = sqrt(d2min[k]); /* convert index back to R convention */ nnwhich[nk * i + k] = which[k] + 1; } /* save index of last neighbour encountered */ lastjwhich = jwhich; } } } spatstat/src/PerfectStraussHard.h0000644000176200001440000001272613406057617016652 0ustar liggesusers // ..................... Strauss-Hardcore process .......................... // $Revision: 1.3 $ $Date: 2014/02/18 10:42:53 $ class StraussHardProcess : public PointProcess { public: double beta, gamma, H, R, Hsquared, Rsquared; StraussHardProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double Ri, double Hc); ~StraussHardProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); // void CalcBeta(long int xsidepomm, long int ysidepomm, // double *betapomm); // void CheckBeta(long int xsidepomm, long int ysidepomm, // double *betapomm); // double lnCondInt(struct Point2 *TempCell, Point2Pattern *p2p); // void Beta(struct Point2 *TempCell); // void CalcBeta(Point2Pattern *p2p); }; StraussHardProcess::StraussHardProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double Ri, double Hc) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; gamma = g; R = Ri; H = Hc; Rsquared = R * R; Hsquared = H * H; InteractionRange = R; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double StraussHardProcess::Interaction(double dsquared) { if(dsquared >= Rsquared) return(1.0); if(dsquared >= Hsquared) return(gamma); return(0.0); } void StraussHardProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void StraussHardProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating StraussHardProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating StraussHardProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating StraussHardProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } // ........................... Interface to R .......................... extern "C" { SEXP PerfectStraussHard(SEXP beta, SEXP gamma, SEXP r, SEXP hc, SEXP xrange, SEXP yrange) { // input parameters double Beta, Gamma, R, H, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int StartTime, EndTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(gamma = AS_NUMERIC(gamma)); PROTECT(r = AS_NUMERIC(r)); PROTECT(hc = AS_NUMERIC(hc)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 6 protected objects // extract arguments Beta = *(NUMERIC_POINTER(beta)); Gamma = *(NUMERIC_POINTER(gamma)); R = *(NUMERIC_POINTER(r)); H = *(NUMERIC_POINTER(hc)); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ R); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ R); if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1; #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise StraussHard point process StraussHardProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax, Beta, Gamma, R, H); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); // pack up into output list PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); // return UNPROTECT(10); // 6 arguments plus xout, yout, nout, out return(out); } } spatstat/src/loccums.h0000644000176200001440000000415213406057617014535 0ustar liggesusers/* loccums.h C template for loccum.c data-to-data functions $Revision: 1.6 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 macros: FNAME function name NULVAL initial value (empty sum = 0, empty product = 1) INC(A,B) increment operation A += B or A *= B */ void FNAME(n, x, y, v, nr, rmax, ans) /* inputs */ int *n, *nr; double *x, *y, *v; double *rmax; /* output */ double *ans; /* matrix of column vectors of functions for each point */ { int N, Nr, Nans; double Rmax; int i, j, k, kmin, maxchunk, columnstart; double Rmax2, rstep, xi, yi; double dx, dy, dx2, d2, d, contrib; N = *n; Nr = *nr; Rmax = *rmax; if(N == 0) return; rstep = Rmax/(Nr-1); Rmax2 = Rmax * Rmax; Nans = Nr * N; /* initialise products to 1 */ OUTERCHUNKLOOP(k, Nans, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(k, Nans, maxchunk, 8196) { ans[k] = NULVAL; } } OUTERCHUNKLOOP(i, N, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 8196) { xi = x[i]; yi = y[i]; columnstart = Nr * i; /* start position for f_i(.) in 'ans' */ /* process backward until |dx| > Rmax */ if(i > 0) { for(j=i-1; j >= 0; j--) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 > Rmax2) break; dy = y[j] - yi; d2 = dx2 + dy * dy; if(d2 <= Rmax2) { d = sqrt(d2); kmin = (int) ceil(d/rstep); if(kmin < Nr) { contrib = v[j]; for(k = kmin; k < Nr; k++) INC(ans[columnstart + k] , contrib); } } } } /* process forward until |dx| > Rmax */ if(i < N - 1) { for(j=i+1; j < N; j++) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 > Rmax2) break; dy = y[j] - yi; d2 = dx2 + dy * dy; if(d2 <= Rmax2) { d = sqrt(d2); kmin = (int) ceil(d/rstep); if(kmin < Nr) { contrib = v[j]; for(k = kmin; k < Nr; k++) INC(ans[columnstart + k] , contrib); } } } } } } } spatstat/src/init.c0000644000176200001440000003207013623714547014031 0ustar liggesusers /* Native symbol registration table for spatstat package Automatically generated - do not edit this file! */ #include "proto.h" #include #include #include // for NULL #include /* See proto.h for declarations for the native routines registered below. */ static const R_CMethodDef CEntries[] = { {"acrdenspt", (DL_FUNC) &acrdenspt, 10}, {"acrsmoopt", (DL_FUNC) &acrsmoopt, 10}, {"adenspt", (DL_FUNC) &adenspt, 7}, {"anydupxy", (DL_FUNC) &anydupxy, 4}, {"areaBdif", (DL_FUNC) &areaBdif, 11}, {"areadifs", (DL_FUNC) &areadifs, 7}, {"asmoopt", (DL_FUNC) &asmoopt, 8}, {"auctionbf", (DL_FUNC) &auctionbf, 7}, {"awtcrdenspt", (DL_FUNC) &awtcrdenspt, 11}, {"awtcrsmoopt", (DL_FUNC) &awtcrsmoopt, 11}, {"awtdenspt", (DL_FUNC) &awtdenspt, 8}, {"awtsmoopt", (DL_FUNC) &awtsmoopt, 9}, {"bdrymask", (DL_FUNC) &bdrymask, 4}, {"Cbiform", (DL_FUNC) &Cbiform, 6}, {"Cclosepaircounts", (DL_FUNC) &Cclosepaircounts, 5}, {"Ccountends", (DL_FUNC) &Ccountends, 14}, {"Ccrossdist", (DL_FUNC) &Ccrossdist, 8}, {"Ccrosspaircounts", (DL_FUNC) &Ccrosspaircounts, 8}, {"CcrossPdist", (DL_FUNC) &CcrossPdist, 10}, {"Cidw", (DL_FUNC) &Cidw, 14}, {"Cidw2", (DL_FUNC) &Cidw2, 16}, {"ClineMquad", (DL_FUNC) &ClineMquad, 23}, {"Clinequad", (DL_FUNC) &Clinequad, 18}, {"ClineRMquad", (DL_FUNC) &ClineRMquad, 23}, {"ClineRquad", (DL_FUNC) &ClineRquad, 18}, {"Clinvwhichdist", (DL_FUNC) &Clinvwhichdist, 12}, {"Clixellate", (DL_FUNC) &Clixellate, 16}, {"cocoGraph", (DL_FUNC) &cocoGraph, 6}, {"cocoImage", (DL_FUNC) &cocoImage, 3}, {"Corput", (DL_FUNC) &Corput, 3}, {"Cpairdist", (DL_FUNC) &Cpairdist, 5}, {"CpairPdist", (DL_FUNC) &CpairPdist, 7}, {"Cquadform", (DL_FUNC) &Cquadform, 5}, {"crdenspt", (DL_FUNC) &crdenspt, 9}, {"crosscount", (DL_FUNC) &crosscount, 8}, {"crsmoopt", (DL_FUNC) &crsmoopt, 10}, {"CspaSumSymOut", (DL_FUNC) &CspaSumSymOut, 9}, {"CspaWtSumSymOut", (DL_FUNC) &CspaWtSumSymOut, 13}, {"Csum2outer", (DL_FUNC) &Csum2outer, 6}, {"Csumouter", (DL_FUNC) &Csumouter, 4}, {"Csumsymouter", (DL_FUNC) &Csumsymouter, 4}, {"Cwsum2outer", (DL_FUNC) &Cwsum2outer, 7}, {"Cwsumouter", (DL_FUNC) &Cwsumouter, 5}, {"Cwsumsymouter", (DL_FUNC) &Cwsumsymouter, 5}, {"Cxypolyselfint", (DL_FUNC) &Cxypolyselfint, 11}, {"D3crossdist", (DL_FUNC) &D3crossdist, 10}, {"D3crossPdist", (DL_FUNC) &D3crossPdist, 13}, {"D3pairdist", (DL_FUNC) &D3pairdist, 6}, {"D3pairPdist", (DL_FUNC) &D3pairPdist, 9}, {"Ddist2dpath", (DL_FUNC) &Ddist2dpath, 7}, {"delta2area", (DL_FUNC) &delta2area, 10}, {"denspt", (DL_FUNC) &denspt, 6}, {"digberJ", (DL_FUNC) &digberJ, 6}, {"dinfty_R", (DL_FUNC) &dinfty_R, 3}, {"discareapoly", (DL_FUNC) &discareapoly, 12}, {"discs2grid", (DL_FUNC) &discs2grid, 11}, {"distmapbin", (DL_FUNC) &distmapbin, 9}, {"dwpure", (DL_FUNC) &dwpure, 6}, {"Ediggatsti", (DL_FUNC) &Ediggatsti, 10}, {"Ediggra", (DL_FUNC) &Ediggra, 11}, {"Efiksel", (DL_FUNC) &Efiksel, 9}, {"Egeyer", (DL_FUNC) &Egeyer, 11}, {"ESdiggra", (DL_FUNC) &ESdiggra, 12}, {"exact_dt_R", (DL_FUNC) &exact_dt_R, 14}, {"fardist2grid", (DL_FUNC) &fardist2grid, 10}, {"fardistgrid", (DL_FUNC) &fardistgrid, 10}, {"Fclosepairs", (DL_FUNC) &Fclosepairs, 16}, {"Fcrosspairs", (DL_FUNC) &Fcrosspairs, 19}, {"Gdenspt", (DL_FUNC) &Gdenspt, 5}, {"Gsmoopt", (DL_FUNC) &Gsmoopt, 7}, {"Gwtdenspt", (DL_FUNC) &Gwtdenspt, 6}, {"Gwtsmoopt", (DL_FUNC) &Gwtsmoopt, 8}, {"hasX3close", (DL_FUNC) &hasX3close, 6}, {"hasX3pclose", (DL_FUNC) &hasX3pclose, 7}, {"hasXclose", (DL_FUNC) &hasXclose, 5}, {"hasXpclose", (DL_FUNC) &hasXpclose, 6}, {"hasXY3close", (DL_FUNC) &hasXY3close, 10}, {"hasXY3pclose", (DL_FUNC) &hasXY3pclose, 11}, {"hasXYclose", (DL_FUNC) &hasXYclose, 8}, {"hasXYpclose", (DL_FUNC) &hasXYpclose, 9}, {"Idist2dpath", (DL_FUNC) &Idist2dpath, 7}, {"idwloo", (DL_FUNC) &idwloo, 8}, {"idwloo2", (DL_FUNC) &idwloo2, 10}, {"KborderD", (DL_FUNC) &KborderD, 8}, {"KborderI", (DL_FUNC) &KborderI, 8}, {"knnd3D", (DL_FUNC) &knnd3D, 8}, {"knndMD", (DL_FUNC) &knndMD, 6}, {"knndsort", (DL_FUNC) &knndsort, 6}, {"knnGinterface", (DL_FUNC) &knnGinterface, 15}, {"knnw3D", (DL_FUNC) &knnw3D, 8}, {"knnwhich", (DL_FUNC) &knnwhich, 6}, {"knnwMD", (DL_FUNC) &knnwMD, 7}, {"knnX3Dinterface", (DL_FUNC) &knnX3Dinterface, 17}, {"knnXinterface", (DL_FUNC) &knnXinterface, 15}, {"knnXwMD", (DL_FUNC) &knnXwMD, 9}, {"knnXxMD", (DL_FUNC) &knnXxMD, 11}, {"KnoneD", (DL_FUNC) &KnoneD, 6}, {"KnoneI", (DL_FUNC) &KnoneI, 6}, {"knownCif", (DL_FUNC) &knownCif, 2}, {"KrectDbl", (DL_FUNC) &KrectDbl, 17}, {"KrectInt", (DL_FUNC) &KrectInt, 17}, {"KrectWtd", (DL_FUNC) &KrectWtd, 18}, {"Kwborder", (DL_FUNC) &Kwborder, 9}, {"Kwnone", (DL_FUNC) &Kwnone, 7}, {"lincrossdist", (DL_FUNC) &lincrossdist, 16}, {"linearradius", (DL_FUNC) &linearradius, 8}, {"linknncross", (DL_FUNC) &linknncross, 16}, {"linknnd", (DL_FUNC) &linknnd, 13}, {"linndcross", (DL_FUNC) &linndcross, 18}, {"linndxcross", (DL_FUNC) &linndxcross, 20}, {"linnndist", (DL_FUNC) &linnndist, 13}, {"linnnwhich", (DL_FUNC) &linnnwhich, 14}, {"linpairdist", (DL_FUNC) &linpairdist, 12}, {"linSnndwhich", (DL_FUNC) &linSnndwhich, 15}, {"lintileindex", (DL_FUNC) &lintileindex, 9}, {"linvknndist", (DL_FUNC) &linvknndist, 13}, {"locpcfx", (DL_FUNC) &locpcfx, 12}, {"locprod", (DL_FUNC) &locprod, 7}, {"locWpcfx", (DL_FUNC) &locWpcfx, 13}, {"locxprod", (DL_FUNC) &locxprod, 10}, {"maxnnd2", (DL_FUNC) &maxnnd2, 5}, {"maxPnnd2", (DL_FUNC) &maxPnnd2, 5}, {"mdtPOrect", (DL_FUNC) &mdtPOrect, 14}, {"minnnd2", (DL_FUNC) &minnnd2, 5}, {"minPnnd2", (DL_FUNC) &minPnnd2, 5}, {"nearestvalidpixel", (DL_FUNC) &nearestvalidpixel, 10}, {"nnd3D", (DL_FUNC) &nnd3D, 7}, {"nndistsort", (DL_FUNC) &nndistsort, 5}, {"nndMD", (DL_FUNC) &nndMD, 5}, {"nnGinterface", (DL_FUNC) &nnGinterface, 14}, {"nnw3D", (DL_FUNC) &nnw3D, 7}, {"nnwhichsort", (DL_FUNC) &nnwhichsort, 5}, {"nnwMD", (DL_FUNC) &nnwMD, 6}, {"nnX3Dinterface", (DL_FUNC) &nnX3Dinterface, 16}, {"nnXinterface", (DL_FUNC) &nnXinterface, 14}, {"nnXwMD", (DL_FUNC) &nnXwMD, 8}, {"nnXxMD", (DL_FUNC) &nnXxMD, 10}, {"paircount", (DL_FUNC) &paircount, 5}, {"poly2imA", (DL_FUNC) &poly2imA, 7}, {"poly2imI", (DL_FUNC) &poly2imI, 6}, {"ps_exact_dt_R", (DL_FUNC) &ps_exact_dt_R, 13}, {"raster3filter", (DL_FUNC) &raster3filter, 5}, {"RcallF3", (DL_FUNC) &RcallF3, 17}, {"RcallF3cen", (DL_FUNC) &RcallF3cen, 20}, {"RcallG3", (DL_FUNC) &RcallG3, 17}, {"RcallG3cen", (DL_FUNC) &RcallG3cen, 19}, {"RcallK3", (DL_FUNC) &RcallK3, 17}, {"Rcallpcf3", (DL_FUNC) &Rcallpcf3, 18}, {"ripleybox", (DL_FUNC) &ripleybox, 11}, {"ripleypoly", (DL_FUNC) &ripleypoly, 12}, {"rippolDebug", (DL_FUNC) &rippolDebug, 12}, {"scantrans", (DL_FUNC) &scantrans, 11}, {"seg2pixI", (DL_FUNC) &seg2pixI, 8}, {"seg2pixL", (DL_FUNC) &seg2pixL, 11}, {"seg2pixN", (DL_FUNC) &seg2pixN, 9}, {"segdens", (DL_FUNC) &segdens, 10}, {"smoopt", (DL_FUNC) &smoopt, 8}, {"tabsumweight", (DL_FUNC) &tabsumweight, 6}, {"trigraf", (DL_FUNC) &trigraf, 10}, {"trigrafS", (DL_FUNC) &trigrafS, 10}, {"uniqmap2M", (DL_FUNC) &uniqmap2M, 5}, {"uniqmapxy", (DL_FUNC) &uniqmapxy, 4}, {"wtcrdenspt", (DL_FUNC) &wtcrdenspt, 10}, {"wtcrsmoopt", (DL_FUNC) &wtcrsmoopt, 11}, {"wtdenspt", (DL_FUNC) &wtdenspt, 7}, {"wtsmoopt", (DL_FUNC) &wtsmoopt, 9}, {"xypsi", (DL_FUNC) &xypsi, 10}, {"xysegint", (DL_FUNC) &xysegint, 16}, {"xysegXint", (DL_FUNC) &xysegXint, 11}, {"xysi", (DL_FUNC) &xysi, 12}, {"xysiANY", (DL_FUNC) &xysiANY, 12}, {"xysxi", (DL_FUNC) &xysxi, 7}, {NULL, NULL, 0} }; static const R_CallMethodDef CallEntries[] = { {"altVcloseIJDpairs", (DL_FUNC) &altVcloseIJDpairs, 4}, {"altVcloseIJpairs", (DL_FUNC) &altVcloseIJpairs, 4}, {"altVclosepairs", (DL_FUNC) &altVclosepairs, 4}, {"close3IJDpairs", (DL_FUNC) &close3IJDpairs, 5}, {"close3IJpairs", (DL_FUNC) &close3IJpairs, 5}, {"close3pairs", (DL_FUNC) &close3pairs, 5}, {"closePpair", (DL_FUNC) &closePpair, 5}, {"cross3IJDpairs", (DL_FUNC) &cross3IJDpairs, 8}, {"cross3IJpairs", (DL_FUNC) &cross3IJpairs, 8}, {"cross3pairs", (DL_FUNC) &cross3pairs, 8}, {"Cwhist", (DL_FUNC) &Cwhist, 3}, {"Cxysegint", (DL_FUNC) &Cxysegint, 9}, {"CxysegXint", (DL_FUNC) &CxysegXint, 5}, {"graphVees", (DL_FUNC) &graphVees, 3}, {"PerfectDGS", (DL_FUNC) &PerfectDGS, 4}, {"PerfectDiggleGratton", (DL_FUNC) &PerfectDiggleGratton, 6}, {"PerfectHardcore", (DL_FUNC) &PerfectHardcore, 4}, {"PerfectPenttinen", (DL_FUNC) &PerfectPenttinen, 5}, {"PerfectStrauss", (DL_FUNC) &PerfectStrauss, 5}, {"PerfectStraussHard", (DL_FUNC) &PerfectStraussHard, 6}, {"thinjumpequal", (DL_FUNC) &thinjumpequal, 3}, {"triDgraph", (DL_FUNC) &triDgraph, 4}, {"triDRgraph", (DL_FUNC) &triDRgraph, 5}, {"trigraph", (DL_FUNC) &trigraph, 3}, {"triograph", (DL_FUNC) &triograph, 3}, {"trioxgraph", (DL_FUNC) &trioxgraph, 4}, {"VcloseIJDpairs", (DL_FUNC) &VcloseIJDpairs, 4}, {"VcloseIJpairs", (DL_FUNC) &VcloseIJpairs, 4}, {"Vclosepairs", (DL_FUNC) &Vclosepairs, 4}, {"Vclosethresh", (DL_FUNC) &Vclosethresh, 5}, {"VcrossIJDpairs", (DL_FUNC) &VcrossIJDpairs, 6}, {"VcrossIJpairs", (DL_FUNC) &VcrossIJpairs, 6}, {"Vcrosspairs", (DL_FUNC) &Vcrosspairs, 6}, {"xmethas", (DL_FUNC) &xmethas, 25}, {NULL, NULL, 0} }; void R_init_spatstat(DllInfo *dll) { R_registerRoutines(dll, CEntries, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } spatstat/src/KrectV4.h0000644000176200001440000000027513115225157014345 0ustar liggesusers/* KrectV5.h with or without uncorrected estimator */ if((*doUnco) == 1) { #define UNCORRECTED #include "KrectBody.h" } else { #undef UNCORRECTED #include "KrectBody.h" } spatstat/src/lintileindex.c0000644000176200001440000000406213426517203015545 0ustar liggesusers#include #include #include "chunkloop.h" /* lintileindex.c Given a tessellation on a linear network, compute tile index for each query point NOTE: data are assumed to be sorted by segment $Revision: 1.2 $ $Date: 2019/02/06 08:36:02 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void lintileindex(n, seg, tp, /* query points, sorted by segment */ dfn, dfseg, dft0, dft1, dftile, /* tessellation data, sorted */ answer) /* query points */ int *n; /* number of query points */ int *seg; /* which segment contains this query point*/ double *tp; /* position along segment */ /* tessellation pieces */ int *dfn; /* number of pieces */ int *dfseg; /* which segment contains this piece */ double *dft0, *dft1; /* positions of endpoints of this piece */ int *dftile; /* which tile the piece belongs to */ /* output */ int *answer; /* which tile the query point belongs to */ { int N, M, i, start, finish, j, segi, currentseg, maxchunk; double tpi; N = *n; M = *dfn; currentseg = -1; start = finish = 0; /* answer[] is implicitly initialised to zero, which will serve as NA */ OUTERCHUNKLOOP(i, N, maxchunk, 1024) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 1024) { segi = seg[i]; tpi = tp[i]; if(segi > currentseg) { /* advance the bookmark until reaching data for this segment */ while(start < M && dfseg[start] < segi) ++start; if(start == M) { /* Reached end of data for tessellation */ /* All remaining results are NA */ return; } currentseg = dfseg[start]; for(finish = start; finish < M && dfseg[finish] == currentseg; ++finish) ; if(finish == M || dfseg[finish] > currentseg) --finish; } if(currentseg == segi) { for(j = start; j <= finish; ++j) { if(dft0[j] <= tpi && tpi <= dft1[j]) { answer[i] = dftile[j]; break; } } } } } } spatstat/src/sphefrac.c0000755000176200001440000000622613406057617014665 0ustar liggesusers#include #include #include "geom3.h" /* $Revision: 1.2 $ $Date: 2018/12/18 02:43:11 $ Routine for calculating surface area of sphere intersected with box # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009, 2013 # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ #ifdef DEBUG #define DBG(X,Y) Rprintf("%s: %f\n", (X), (Y)); #else #define DBG(X,Y) #endif static double pi = 3.141592653589793; /* Factor of 4 * pi * r * r IS ALREADY TAKEN OUT */ double sphesfrac(point, box, r) Point *point; Box *box; double r; { double sum, p[4], q[4]; double a1(), a2(), a3(); int i, j; p[1] = point->x - box->x0; p[2] = point->y - box->y0; p[3] = point->z - box->z0; q[1] = box->x1 - point->x; q[2] = box->y1 - point->y; q[3] = box->z1 - point->z; sum = 0; for(i = 1; i <= 3; i++) { sum += a1(p[i],r) + a1(q[i],r); #ifdef DEBUG Rprintf("i = %d, a1 = %f, a1 = %f\n", i, a1(p[i],r), a1(q[i],r)); #endif } DBG("Past a1", sum) for(i = 1; i < 3; i++) for(j = i+1; j <= 3; j++) { sum -= a2(p[i], p[j], r) + a2(p[i], q[j], r) + a2(q[i], p[j], r) + a2(q[i], q[j], r); #ifdef DEBUG Rprintf("i = %d, j = %d, sum = %f\n", i, j, sum); #endif } DBG("Past a2", sum) sum += a3(p[1], p[2], p[3], r) + a3(p[1], p[2], q[3], r); DBG("sum", sum) sum += a3(p[1], q[2], p[3], r) + a3(p[1], q[2], q[3], r); DBG("sum", sum) sum += a3(q[1], p[2], p[3], r) + a3(q[1], p[2], q[3], r); DBG("sum", sum) sum += a3(q[1], q[2], p[3], r) + a3(q[1], q[2], q[3], r); DBG("Past a3", sum) return(1 - sum); } double a1(t, r) double t, r; { /* This is the function A1 divided by 4 pi r^2 */ if(t >= r) return(0.0); return((1 - t/r) * 0.5); } double a2(t1, t2, r) double t1, t2, r; { double c2(); /* This is A2 divided by 4 pi r^2 because c2 is C divided by pi */ return(c2( t1 / r, t2 / r) / 2.0); } double a3(t1, t2, t3, r) double t1, t2, t3, r; { double c3(); /* This is A3 divided by 4 pi r^2 because c3 is C divided by pi */ return(c3(t1 / r, t2 / r, t3 / r) / 4.0); } double c2(a, b) double a, b; { double z, z2; double c2(); /* This is the function C(a, b, 0) divided by pi - assumes a, b > 0 */ if( ( z2 = 1.0 - a * a - b * b) < 0.0 ) return(0.0); z = sqrt(z2); return((atan2(z, a * b) - a * atan2(z, b) - b * atan2(z, a)) / pi); } double c3(a, b, c) double a, b, c; { double za, zb, zc, sum; /* This is C(a,b,c) divided by pi. Arguments assumed > 0 */ if(a * a + b * b + c * c >= 1.0) return(0.0); za = sqrt(1 - b * b - c * c); zb = sqrt(1 - a * a - c * c); zc = sqrt(1 - a * a - b * b); sum = atan2(zb, a * c) + atan2(za, b * c) + atan2(zc, a * b) - a * atan2(zb, c) + a * atan2(b, zc) - b * atan2(za, c) + b * atan2(a, zc) - c * atan2(zb, a) + c * atan2(b, za); return(sum / pi - 1); } spatstat/src/lennard.c0000755000176200001440000000712113115271120014470 0ustar liggesusers#include #include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Lennard-Jones process */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Lennard { double sigma; double epsilon; double sigma2; /* sigma^2 */ double foureps; /* 4 * epsilon */ double d2min; /* minimum value of d^2 which yields nonzero intensity */ double d2max; /* maximum value of d^2 which has nontrivial contribution */ double *period; int per; } Lennard; /* MAXEXP is intended to be the largest x such that exp(-x) != 0 although the exact value is not needed */ #define MAXEXP (-log(DOUBLE_XMIN)) #define MINEXP (log(1.001)) /* initialiser function */ Cdata *lennardinit(state, model, algo) State state; Model model; Algor algo; { Lennard *lennard; double sigma2, foureps, minfrac, maxfrac; lennard = (Lennard *) R_alloc(1, sizeof(Lennard)); /* Interpret model parameters*/ lennard->sigma = model.ipar[0]; lennard->epsilon = model.ipar[1]; lennard->period = model.period; /* constants */ lennard->sigma2 = sigma2 = pow(lennard->sigma, 2); lennard->foureps = foureps = 4 * lennard->epsilon; /* thresholds where the interaction becomes trivial */ minfrac = pow(foureps/MAXEXP, (double) 1.0/6.0); if(minfrac > 0.5) minfrac = 0.5; maxfrac = pow(foureps/MINEXP, (double) 1.0/3.0); if(maxfrac < 2.0) maxfrac = 2.0; lennard->d2min = sigma2 * minfrac; lennard->d2max = sigma2 * maxfrac; /* periodic boundary conditions? */ lennard->per = (model.period[0] > 0.0); return((Cdata *) lennard); } /* conditional intensity evaluator */ double lennardcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, ratio6, pairsum, cifval; double sigma2, d2max, d2min; double *period; Lennard *lennard; DECLARE_CLOSE_D2_VARS; lennard = (Lennard *) cdata; sigma2 = lennard->sigma2; d2max = lennard->d2max; d2min = lennard->d2min; period = lennard->period; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = 1.0; if(npts == 0) return(cifval); pairsum = 0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(lennard->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,d2max,d2)) { if(d2 < d2min) { cifval = 0.0; return cifval; } ratio6 = pow(sigma2/d2, 3); pairsum += ratio6 * (1.0 - ratio6); } } } if(ixp1 < npts) { for(j=ixp1; j 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], d2max, d2)) { if(d2 < lennard->d2min) { cifval = 0.0; return cifval; } ratio6 = pow(sigma2/d2, 3); pairsum += ratio6 * (1.0 - ratio6); } } } if(ixp1 < npts) { for(j=ixp1; jd2min) { cifval = 0.0; return cifval; } ratio6 = pow(sigma2/d2, 3); pairsum += ratio6 * (1.0 - ratio6); } } } } cifval *= exp(lennard->foureps * pairsum); return cifval; } Cifns LennardCifns = { &lennardinit, &lennardcif, (updafunptr) NULL, NO}; spatstat/src/linnncross.h0000644000176200001440000000642713406057617015267 0ustar liggesusers/* linnncross.h Function body definitions with macros $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Macros used: FNAME name of function EXCLU whether serial numbers are provided WHICH whether 'nnwhich' is required Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void FNAME(np, xp, yp, /* data points 'from' */ nq, xq, yq, /* data points 'to' */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ psegmap, /* map from data points to segments */ qsegmap, /* map from data points to segments */ #ifdef EXCLU idP, idQ, /* serial numbers for patterns p and q */ #endif huge, /* value taken as infinity */ /* OUTPUT */ #ifdef WHICH nndist, /* nearest neighbour distance for each point */ nnwhich /* identifies nearest neighbour */ #else nndist /* nearest neighbour distance for each point */ #endif ) int *np, *nq, *nv, *ns; int *from, *to, *psegmap, *qsegmap; /* integer vectors (mappings) */ #ifdef EXCLU int *idP, *idQ; #endif double *xp, *yp, *xq, *yq, *xv, *yv; /* vectors of coordinates */ double *huge; double *dpath; /* matrix */ double *nndist; /* nearest neighbour distance for each point */ #ifdef WHICH int *nnwhich; /* identifies nearest neighbour */ #endif { int Np, Nq, Nv, i, j; int segPi, segQj, nbi1, nbi2, nbj1, nbj2; double d, xpi, ypi, xqj, yqj, dXi1, dXi2, d1Xj, d2Xj, d11, d12, d21, d22; double dmin, hugevalue; #ifdef EXCLU int idPi; #endif #ifdef WHICH int whichmin; #endif Np = *np; Nq = *nq; Nv = *nv; hugevalue = *huge; /* initialise nn distances */ for(i = 0; i < Np; i++) { nndist[i] = hugevalue; #ifdef WHICH nnwhich[i] = -1; #endif } /* main loop */ for(i = 0; i < Np; i++) { xpi = xp[i]; ypi = yp[i]; #ifdef EXCLU idPi = idP[i]; #endif segPi = psegmap[i]; nbi1 = from[segPi]; nbi2 = to[segPi]; dXi1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]); dXi2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]); dmin = nndist[i]; #ifdef WHICH whichmin = nnwhich[i]; #endif for(j = 0; j < Nq; j++) { #ifdef EXCLU if(idQ[j] != idPi) { #endif xqj = xq[j]; yqj = yq[j]; segQj = qsegmap[j]; /* compute path distance between i and j */ if(segPi == segQj) { /* points i and j lie on the same segment; use Euclidean distance */ d = EUCLID(xpi, ypi, xqj, yqj); } else { /* Shortest path from i to j passes through ends of segments; Calculate shortest of 4 possible paths from i to j */ nbj1 = from[segQj]; nbj2 = to[segQj]; d1Xj = EUCLID(xv[nbj1], yv[nbj1], xqj, yqj); d2Xj = EUCLID(xv[nbj2], yv[nbj2], xqj, yqj); d11 = dXi1 + DPATH(nbi1,nbj1) + d1Xj; d12 = dXi1 + DPATH(nbi1,nbj2) + d2Xj; d21 = dXi2 + DPATH(nbi2,nbj1) + d1Xj; d22 = dXi2 + DPATH(nbi2,nbj2) + d2Xj; d = d11; if(d12 < d) d = d12; if(d21 < d) d = d21; if(d22 < d) d = d22; } /* OK, distance between i and j is d */ /* update nn for point i */ if(d < dmin) { dmin = d; #ifdef WHICH whichmin = j; #endif } #ifdef EXCLU } #endif } /* commit nn distance for point i */ nndist[i] = dmin; #ifdef WHICH nnwhich[i] = whichmin; #endif } } spatstat/src/fardist.h0000644000176200001440000000254113406057617014524 0ustar liggesusers/* fardist.h Code template for fardist.c Macros used: FNAME function name SQUARED #defined if squared distances should be returned. Copyright (C) Adrian Baddeley, Rolf Turner and Ege Rubak 2014 Licence: GPL >= 2 $Revision: 1.3 $ $Date: 2014/08/31 06:42:50 $ */ void FNAME(nx, x0, xstep, ny, y0, ystep, /* pixel grid dimensions */ np, xp, yp, /* data points */ dfar) /* output grid */ /* inputs */ int *nx, *ny, *np; double *x0, *xstep, *y0, *ystep; double *xp, *yp; /* outputs */ double *dfar; { int Nxcol, Nyrow, Npoints; int i, j, k, ijpos; double X0, Y0, Xstep, Ystep, yi, xj; double d2, d2max, dx, dy; Nxcol = *nx; Nyrow = *ny; Npoints = *np; X0 = *x0; Y0 = *y0; Xstep = *xstep; Ystep = *ystep; if(Npoints == 0) return; /* loop over pixels */ for(j = 0, xj = X0; j < Nxcol; j++, xj += Xstep) { R_CheckUserInterrupt(); for(i = 0, yi = Y0; i < Nyrow; i++, yi += Ystep) { d2max = 0.0; for(k = 0; k < Npoints; k++) { dx = xj - xp[k]; dy = yi - yp[k]; d2 = dx * dx + dy * dy; if(d2 > d2max) d2max = d2; } ijpos = i + j * Nyrow; #ifdef SQUARED dfar[ijpos] = d2max; #else dfar[ijpos] = sqrt(d2max); #endif /* end of loop over grid points (i, j) */ } } } spatstat/src/dist2.h0000755000176200001440000000451313406057617014121 0ustar liggesusers/* dist2.h External declarations for the functions defined in dist2.c and In-line cpp macros for similar purposes $Revision: 1.20 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ double dist2(double u, double v, double x, double y, double *period); double dist2either(double u, double v, double x, double y, double *period); int dist2thresh(double u, double v, double x, double y, double *period, double r2); int dist2Mthresh(double u, double v, double x, double y, double *period, double r2); /* Efficient macros to test closeness of points */ /* These variables must be declared (note: some files e.g. straush.c use 'RESIDUE' explicitly) */ #define DECLARE_CLOSE_VARS \ register double DX, DY, DXP, DYP, RESIDUE #define DECLARE_CLOSE_D2_VARS \ register double DX, DY, DXP, DYP, DX2 #define CLOSE(U,V,X,Y,R2) \ ((DX = X - U), \ (RESIDUE = R2 - DX * DX), \ ((RESIDUE > 0.0) && \ ((DY = Y - V), \ (RESIDUE = RESIDUE - DY * DY), \ (RESIDUE > 0.0)))) #define CLOSE_D2(U,V,X,Y,R2,D2) \ ((DX = X - U), \ (DX2 = DX * DX), \ (DX2 < R2) && (((DY = Y - V), \ (D2 = DX2 + DY * DY), \ (D2 < R2)))) /* The following calculates X mod P, but it works only if X \in [-P, P] so that X is the difference between two values that lie in an interval of length P */ #define CLOSE_PERIODIC(U,V,X,Y,PERIOD,R2) \ ((DX = X - U), \ (DX = (DX < 0.0) ? -DX : DX), \ (DXP = (PERIOD)[0] - DX), \ (DX = (DX < DXP) ? DX : DXP), \ (RESIDUE = R2 - DX * DX), \ ((RESIDUE > 0.0) && ((DY = Y - V), \ (DY = (DY < 0.0) ? -DY : DY), \ (DYP = (PERIOD)[1] - DY), \ (DY = (DY < DYP) ? DY : DYP), \ (RESIDUE = RESIDUE - DY * DY), \ (RESIDUE > 0.0) ))) #define CLOSE_PERIODIC_D2(U,V,X,Y,PERIOD,R2,D2) \ ((DX = X - U), \ (DX = (DX < 0.0) ? -DX : DX), \ (DXP = (PERIOD)[0] - DX), \ (DX = (DX < DXP) ? DX : DXP), \ (D2 = DX * DX), \ ((D2 < R2) && ((DY = Y - V), \ (DY = (DY < 0.0) ? -DY : DY), \ (DYP = (PERIOD)[1] - DY), \ (DY = (DY < DYP) ? DY : DYP), \ (D2 += DY * DY), \ (D2 < R2) ))) spatstat/src/multihard.c0000755000176200001440000000735713604115255015062 0ustar liggesusers#include #include #include "methas.h" #include "dist2.h" /* for debugging code, include #define DEBUG 1 */ /* Conditional intensity computation for Multitype Hardcore process */ /* NOTE: types (marks) are numbered from 0 to ntypes-1 */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct MultiHard { int ntypes; double *hc; /* hc[i,j] = hc[j+ntypes*i] for i,j = 0... ntypes-1 */ double *hc2; /* squared radii */ double range2; /* square of interaction range */ double *period; int per; } MultiHard; /* initialiser function */ Cdata *multihardinit(state, model, algo) State state; Model model; Algor algo; { int i, j, ntypes, n2; double h, h2, range2; MultiHard *multihard; multihard = (MultiHard *) R_alloc(1, sizeof(MultiHard)); multihard->ntypes = ntypes = model.ntypes; n2 = ntypes * ntypes; #ifdef DEBUG Rprintf("initialising space for %d types\n", ntypes); #endif /* Allocate space for parameters */ multihard->hc = (double *) R_alloc((size_t) n2, sizeof(double)); /* Allocate space for transformed parameters */ multihard->hc2 = (double *) R_alloc((size_t) n2, sizeof(double)); /* Copy and process model parameters*/ range2 = 0.0; for(i = 0; i < ntypes; i++) { for(j = 0; j < ntypes; j++) { h = model.ipar[i + j*ntypes]; h2 = h * h; MAT(multihard->hc, i, j, ntypes) = h; MAT(multihard->hc2, i, j, ntypes) = h2; if(range2 < h2) range2 = h2; } } multihard->range2 = range2; /* periodic boundary conditions? */ multihard->period = model.period; multihard->per = (model.period[0] > 0.0); #ifdef DEBUG Rprintf("end initialiser\n"); #endif return((Cdata *) multihard); } /* conditional intensity evaluator */ double multihardcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ntypes, ix, ixp1, j, mrk, mrkj; int *marks; double *x, *y; double u, v; double d2, range2, cifval; double *period; MultiHard *multihard; DECLARE_CLOSE_D2_VARS; multihard = (MultiHard *) cdata; range2 = multihard->range2; period = multihard->period; u = prop.u; v = prop.v; mrk = prop.mrk; ix = prop.ix; x = state.x; y = state.y; marks = state.marks; npts = state.npts; #ifdef DEBUG Rprintf("computing cif: u=%lf, v=%lf, mrk=%d\n", u, v, mrk); #endif cifval = 1.0; if(npts == 0) return(cifval); ntypes = multihard->ntypes; #ifdef DEBUG Rprintf("scanning data\n"); #endif ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(multihard->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,range2,d2)) { mrkj = marks[j]; if(d2 < MAT(multihard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } } } } if(ixp1 < npts) { for(j=ixp1; jhc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], range2, d2)) { mrkj = marks[j]; if(d2 < MAT(multihard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } } } } if(ixp1 < npts) { for(j=ixp1; jhc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } } } } } #ifdef DEBUG Rprintf("returning positive cif\n"); #endif return cifval; } Cifns MultiHardCifns = { &multihardinit, &multihardcif, (updafunptr) NULL, YES}; spatstat/src/areadiff.c0000755000176200001440000001550413406057617014632 0ustar liggesusers/* areadiff.c Area difference function Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 $Revision: 1.15 $ $Date: 2018/12/18 02:43:11 $ A(x,r) = area of disc b(0,r) not covered by discs b(x_i,r) for x_i in x Area estimated by point-counting on a fine grid For use in area-interaction model and related calculations */ #undef DEBUG #include #include #include #include "chunkloop.h" #include "constants.h" /* Original version areadiff() 1 point u No trimming of discs */ void areadiff(rad,x,y,nn,ngrid,answer) /* inputs */ double *rad; /* radius */ double *x, *y; /* coordinate vectors for point pattern */ int *nn; /* length of vectors x and y */ int *ngrid; /* dimensions of point-counting grid */ /* output */ double *answer; /* computed area */ { double dx, dy, xg, yg, r, r2, a2, b2, xdif, ydif; int i, j, k, m, n, count, covered; r = *rad; r2 = r * r; n = *nn; m = *ngrid; dx = dy = 2 * r / (m-1); count = 0; /* run through grid points */ for(i = 0, xg = -r; i < m; i++, xg += dx) { a2 = r2 - xg *xg; for(j = 0, yg = -r; j < m; j++, yg += dy) { /* test for inside disc */ if(yg * yg < a2) { #ifdef DEBUG Rprintf("\n\n (xg,yg) = (%lf, %lf)\n", xg, yg); #endif /* run through data points seeking one close to (xy, yg) */ covered = 0; if(n > 0) { for(k = 0; k < n; k++) { #ifdef DEBUG Rprintf("(x[%d],y[%d]) = (%lf,%lf)\n", k, k, x[k], y[k]); #endif xdif = x[k] - xg; b2 = r2 - xdif * xdif; if(b2 > 0) { ydif = y[k] - yg; if(b2 - ydif * ydif > 0) { #ifdef DEBUG Rprintf("(x[%d], y[%d]) = (%lf, %lf) covers!\n", k, k, x[k], y[k]); #endif covered = 1; break; } } } } if(covered == 0) { ++count; #ifdef DEBUG Rprintf("Not covered; incrementing count\n"); #endif } } } } #ifdef DEBUG Rprintf("Count = %d\n", count); #endif /* calculate area */ *answer = ((double) count) * dx * dy; } /* similar function, handles multiple values of 'r' */ void areadifs(rad,nrads,x,y,nxy,ngrid,answer) /* inputs */ double *rad; /* vector of radii */ int *nrads; /* length of 'rads' */ double *x, *y; /* coordinate vectors for point pattern */ int *nxy; /* length of vectors x and y */ int *ngrid; /* dimensions of point-counting grid */ /* output */ double *answer; /* computed areas (vector of length 'nrads') */ { double dx, dy, xg, yg, r, r2, a2, b2, xdif, ydif; int i, j, k, l, m, n, nr, m0, count, covered, maxchunk; n = *nxy; nr = *nrads; m = *ngrid; /* run through radii in chunks of 2^14 */ OUTERCHUNKLOOP(l, nr, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(l, nr, maxchunk, 16384) { r = rad[l]; if(r == 0.0) { answer[l] = 0.0; } else if(n == 0) { answer[l] = M_PI * r * r; } else { r2 = r * r; dx = dy = 2 * r / (m-1); count = 0; /* run through grid points in disc of radius r */ for(i = 0, xg = -r; i < m; i++, xg += dx) { a2 = r2 - xg * xg; m0 = (a2 > 0.0) ? floor(sqrt(a2)/dy) : 0; for(j = -m0, yg = -m0 * dy; j <= m0; j++, yg += dy) { #ifdef DEBUG Rprintf("\n\n (xg,yg) = (%lf, %lf)\n", xg, yg); #endif /* run through data points seeking one close to (xy, yg) */ covered = 0; for(k = 0; k < n; k++) { #ifdef DEBUG Rprintf("(x[%d],y[%d]) = (%lf,%lf)\n", k, k, x[k], y[k]); #endif xdif = x[k] - xg; b2 = r2 - xdif * xdif; if(b2 > 0) { ydif = y[k] - yg; if(b2 - ydif * ydif > 0) { #ifdef DEBUG Rprintf("(x[%d], y[%d]) = (%lf, %lf) covers!\n", k, k, x[k], y[k]); #endif covered = 1; break; } } } /* end of loop through data points */ if(covered == 0) { ++count; #ifdef DEBUG Rprintf("Not covered; incrementing count\n"); #endif } } } /* end of loop over grid points */ #ifdef DEBUG Rprintf("Count = %d\n", count); #endif /* calculate area for this value of r*/ answer[l] = ((double) count) * dx * dy; } /* end of if(r==0).. else {...} */ } } } /* Modified version multiple test points u discs constrained inside a rectangle */ void areaBdif(rad,nrads,x,y,nxy,ngrid,x0,y0,x1,y1,answer) /* inputs */ double *rad; /* vector of radii */ int *nrads; /* length of 'rads' */ double *x, *y; /* coordinate vectors for point pattern */ int *nxy; /* length of vectors x and y */ int *ngrid; /* dimensions of point-counting grid */ double *x0,*y0,*x1,*y1; /* constraint rectangle */ /* output */ double *answer; /* computed areas (vector of length 'nrads') */ { double dx, dy, xg, yg, r, r2, a, a2, b2, xdif, ydif; double xleft, xright, ylow, yhigh; double xmin, ymin, xmax, ymax; int i, j, k, l, m, n, nr, ileft, iright, mlow, mhigh, count, covered; n = *nxy; nr = *nrads; m = *ngrid; xmin = *x0; ymin = *y0; xmax = *x1; ymax = *y1; /* run through radii */ for(l = 0; l < nr; l++) { r = rad[l]; if(r == 0.0) { answer[l] = 0.0; } else if (n == 0) { answer[l]= M_PI * r * r; } else { r2 = r * r; dx = dy = 2 * r / (m-1); count = 0; /* run through grid points in disc intersected with box */ xleft = (xmin > -r) ? xmin : -r; xright = (xmax < r) ? xmax : r; ileft = ceil(xleft/dx); iright = floor(xright/dx); if(ileft <= iright) { for(i = ileft, xg = ileft * dx; i <= iright; i++, xg += dx) { a2 = r2 - xg * xg; a = (a2 > 0) ? sqrt(a2): 0.0; yhigh = (ymax < a) ? ymax: a; ylow = (ymin > -a) ? ymin: -a; mhigh = floor(yhigh/dy); mlow = ceil(ylow/dy); if(mlow <= mhigh) { for(j = mlow, yg = mlow * dy; j <= mhigh; j++, yg += dy) { #ifdef DEBUG Rprintf("\n\n (xg,yg) = (%lf, %lf)\n", xg, yg); #endif /* run through data points seeking one close to (xy, yg) */ covered = 0; for(k = 0; k < n; k++) { #ifdef DEBUG Rprintf("(x[%d],y[%d]) = (%lf,%lf)\n", k, k, x[k], y[k]); #endif xdif = x[k] - xg; b2 = r2 - xdif * xdif; if(b2 > 0) { ydif = y[k] - yg; if(b2 - ydif * ydif > 0) { #ifdef DEBUG Rprintf("(x[%d], y[%d]) = (%lf, %lf) covers!\n", k, k, x[k], y[k]); #endif covered = 1; break; } } } /* end of loop over data points */ if(covered == 0) { ++count; #ifdef DEBUG Rprintf("Not covered; incrementing count\n"); #endif } } } } } /* end of loop over grid points */ #ifdef DEBUG Rprintf("Count = %d\n", count); #endif /* calculate area for this value of r*/ answer[l] = ((double) count) * dx * dy; } /* end of if(r==0).. else {...} */ } /* end of loop over r values */ } spatstat/src/metricPdist.c0000644000176200001440000000320213406057617015345 0ustar liggesusers/* metricPdist.c Distance transform of binary pixel image using arbitrary metric This code #includes metricPdist.h multiple times. $Revision: 1.2 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ /* Once-only declarations: */ #include #include "raster.h" void dist_to_bdry(); void shape_raster(); #define UNDEFINED -1 #define Is_Defined(I) (I >= 0) #define Is_Undefined(I) (I < 0) /* Definitions for each metric For each definition we need the following macros: FNAME Name of the function (that will be called from R) MARGLIST List of arguments to FNAME specifying the metric MARGDECLARE Declarations of these function arguments MTEMPDECLARE Declaration and initialisation of variables for use by metric METRIC Expression for calculating the metric (x1,y1,x2,y2) */ /* (1) Rectangular metric Unit ball is a rectangle with width 1 unit, height 'aspect' units. mdt = metric distance transform P = pixel image input O = orthogonally oriented to axis rect = rectangular */ #define FNAME mdtPOrect #define MARGLIST aspect #define MARGDECLARE double *aspect #define MTEMPDECLARE double asp; asp=*aspect #define METRIC(X,Y,XX,YY) rectdist(X,Y,XX,YY,asp) double rectdist(x, y, xx, yy, asp) double x, y, xx, yy, asp; { double dx, dy, d; dx = x-xx; dy = (y-yy)/asp; if(dx < 0) dx = -dx; if(dy < 0) dy = -dy; d = (dx > dy)? dx : dy; return d; } #include "metricPdist.h" #undef FNAME #undef MARGLIST #undef MARGDECLARE #undef MTEMPDECLARE #undef METRIC spatstat/src/quasirandom.c0000644000176200001440000000105613406057617015406 0ustar liggesusers/* quasirandom.c Quasi-random sequence generators Copyright (C) Adrian Baddeley 2014 GNU Public Licence version 2 | 3 $Revision: 1.1 $ $Date: 2014/03/17 03:31:59 $ */ #include void Corput(base, n, result) int *base, *n; double *result; { int b, N, i, j; register double f, f0, z; N = *n; b = *base; f0 = 1.0/((double) b); for(i = 0; i < N; i++) { j = i+1; z = 0; f = f0; while(j > 0) { z = z + f * (j % b); j = j/b; f = f / ((double) b); } result[i] = z; } } spatstat/src/closefuns.h0000644000176200001440000006314013406057617015073 0ustar liggesusers/* closefuns.h Function definitions to be #included in closepair.c several times with different values of macros. Macros used: CLOSEFUN name of function for 'closepairs' CROSSFUN name of function for 'crosspairs' DIST if defined, also return d COORDS if defined, also return xi, yi, xj, yj, dx, dy THRESH if defined, also return 1(d < s) ZCOORD if defined, coordinates are 3-dimensional SINGLE if defined, capture only i < j $Revision: 1.11 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #ifdef ZCOORD #define SPACEDIM 3 #else #define SPACEDIM 2 #endif SEXP CLOSEFUN(SEXP xx, SEXP yy, #ifdef ZCOORD SEXP zz, #endif SEXP rr, #ifdef THRESH SEXP ss, #endif SEXP nguess) { double *x, *y; double xi, yi, rmax, r2max, rmaxplus, dx, dy, d2; #ifdef ZCOORD double *z; double zi, dz; #endif int n, k, kmax, kmaxold, maxchunk, i, j, m; /* local storage */ int *iout, *jout; /* R objects in return value */ SEXP Out, iOut, jOut; /* external storage pointers */ int *iOutP, *jOutP; #ifdef COORDS double *xiout, *yiout, *xjout, *yjout, *dxout, *dyout; SEXP xiOut, yiOut, xjOut, yjOut, dxOut, dyOut; double *xiOutP, *yiOutP, *xjOutP, *yjOutP, *dxOutP, *dyOutP; #ifdef ZCOORD double *ziout, *zjout, *dzout; SEXP ziOut, zjOut, dzOut; double *ziOutP, *zjOutP, *dzOutP; #endif #endif #ifdef DIST double *dout; SEXP dOut; double *dOutP; #endif #ifdef THRESH double s, s2; int *tout; SEXP tOut; int *tOutP; #endif /* protect R objects from garbage collector */ PROTECT(xx = AS_NUMERIC(xx)); PROTECT(yy = AS_NUMERIC(yy)); #ifdef ZCOORD PROTECT(zz = AS_NUMERIC(zz)); #endif PROTECT(rr = AS_NUMERIC(rr)); PROTECT(nguess = AS_INTEGER(nguess)); #ifdef THRESH PROTECT(ss = AS_NUMERIC(ss)); #define NINPUTS (3+SPACEDIM) #else #define NINPUTS (2+SPACEDIM) #endif /* Translate arguments from R to C */ x = NUMERIC_POINTER(xx); y = NUMERIC_POINTER(yy); #ifdef ZCOORD z = NUMERIC_POINTER(zz); #endif n = LENGTH(xx); rmax = *(NUMERIC_POINTER(rr)); kmax = *(INTEGER_POINTER(nguess)); r2max = rmax * rmax; rmaxplus = rmax + rmax/16.0; #ifdef THRESH s = *(NUMERIC_POINTER(ss)); s2 = s * s; #endif k = 0; /* k is the next available storage location and also the current length of the list */ if(n > 0 && kmax > 0) { /* allocate space */ iout = (int *) R_alloc(kmax, sizeof(int)); jout = (int *) R_alloc(kmax, sizeof(int)); #ifdef COORDS xiout = (double *) R_alloc(kmax, sizeof(double)); yiout = (double *) R_alloc(kmax, sizeof(double)); xjout = (double *) R_alloc(kmax, sizeof(double)); yjout = (double *) R_alloc(kmax, sizeof(double)); dxout = (double *) R_alloc(kmax, sizeof(double)); dyout = (double *) R_alloc(kmax, sizeof(double)); #ifdef ZCOORD ziout = (double *) R_alloc(kmax, sizeof(double)); zjout = (double *) R_alloc(kmax, sizeof(double)); dzout = (double *) R_alloc(kmax, sizeof(double)); #endif #endif #ifdef DIST dout = (double *) R_alloc(kmax, sizeof(double)); #endif #ifdef THRESH tout = (int *) R_alloc(kmax, sizeof(int)); #endif /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; #ifdef ZCOORD zi = z[i]; #endif #ifndef SINGLE if(i > 0) { /* scan backward */ for(j = i - 1; j >= 0; j--) { dx = x[j] - xi; if(dx < -rmaxplus) break; dy = y[j] - yi; d2 = dx * dx + dy * dy; #ifdef ZCOORD if(d2 <= r2max) { dz = z[j] - zi; d2 = d2 + dz * dz; #endif if(d2 <= r2max) { /* add this (i, j) pair to output */ if(k >= kmax) { /* overflow; allocate more space */ kmaxold = kmax; kmax = 2 * kmax; iout = intRealloc(iout, kmaxold, kmax); jout = intRealloc(jout, kmaxold, kmax); #ifdef COORDS xiout = dblRealloc(xiout, kmaxold, kmax); yiout = dblRealloc(yiout, kmaxold, kmax); xjout = dblRealloc(xjout, kmaxold, kmax); yjout = dblRealloc(yjout, kmaxold, kmax); dxout = dblRealloc(dxout, kmaxold, kmax); dyout = dblRealloc(dyout, kmaxold, kmax); #ifdef ZCOORD ziout = dblRealloc(ziout, kmaxold, kmax); zjout = dblRealloc(zjout, kmaxold, kmax); dzout = dblRealloc(dzout, kmaxold, kmax); #endif #endif #ifdef DIST dout = dblRealloc(dout, kmaxold, kmax); #endif #ifdef THRESH tout = intRealloc(tout, kmaxold, kmax); #endif } jout[k] = j + 1; /* R indexing */ iout[k] = i + 1; #ifdef COORDS xiout[k] = xi; yiout[k] = yi; xjout[k] = x[j]; yjout[k] = y[j]; dxout[k] = dx; dyout[k] = dy; #ifdef ZCOORD ziout[k] = zi; zjout[k] = z[j]; dzout[k] = dz; #endif #endif #ifdef DIST dout[k] = sqrt(d2); #endif #ifdef THRESH tout[k] = (d2 <= s2) ? 1 : 0; #endif ++k; } #ifdef ZCOORD } #endif } } #endif if(i + 1 < n) { /* scan forward */ for(j = i + 1; j < n; j++) { dx = x[j] - xi; if(dx > rmaxplus) break; dy = y[j] - yi; d2 = dx * dx + dy * dy; #ifdef ZCOORD if(d2 <= r2max) { dz = z[j] - zi; d2 = d2 + dz * dz; #endif if(d2 <= r2max) { /* add this (i, j) pair to output */ if(k >= kmax) { /* overflow; allocate more space */ kmaxold = kmax; kmax = 2 * kmax; iout = intRealloc(iout, kmaxold, kmax); jout = intRealloc(jout, kmaxold, kmax); #ifdef COORDS xiout = dblRealloc(xiout, kmaxold, kmax); yiout = dblRealloc(yiout, kmaxold, kmax); xjout = dblRealloc(xjout, kmaxold, kmax); yjout = dblRealloc(yjout, kmaxold, kmax); dxout = dblRealloc(dxout, kmaxold, kmax); dyout = dblRealloc(dyout, kmaxold, kmax); #ifdef ZCOORD ziout = dblRealloc(ziout, kmaxold, kmax); zjout = dblRealloc(zjout, kmaxold, kmax); dzout = dblRealloc(dzout, kmaxold, kmax); #endif #endif #ifdef DIST dout = dblRealloc(dout, kmaxold, kmax); #endif #ifdef THRESH tout = intRealloc(tout, kmaxold, kmax); #endif } jout[k] = j + 1; /* R indexing */ iout[k] = i + 1; #ifdef COORDS xiout[k] = xi; yiout[k] = yi; xjout[k] = x[j]; yjout[k] = y[j]; dxout[k] = dx; dyout[k] = dy; #ifdef ZCOORD ziout[k] = zi; zjout[k] = z[j]; dzout[k] = dz; #endif #endif #ifdef DIST dout[k] = sqrt(d2); #endif #ifdef THRESH tout[k] = (d2 <= s2) ? 1 : 0; #endif ++k; } #ifdef ZCOORD } #endif } } /* end of i loop */ } } } /* return a list of vectors */ PROTECT(iOut = NEW_INTEGER(k)); PROTECT(jOut = NEW_INTEGER(k)); #ifdef COORDS PROTECT(xiOut = NEW_NUMERIC(k)); PROTECT(yiOut = NEW_NUMERIC(k)); PROTECT(xjOut = NEW_NUMERIC(k)); PROTECT(yjOut = NEW_NUMERIC(k)); PROTECT(dxOut = NEW_NUMERIC(k)); PROTECT(dyOut = NEW_NUMERIC(k)); #ifdef ZCOORD PROTECT(ziOut = NEW_NUMERIC(k)); PROTECT(zjOut = NEW_NUMERIC(k)); PROTECT(dzOut = NEW_NUMERIC(k)); #endif #endif #ifdef DIST PROTECT(dOut = NEW_NUMERIC(k)); #endif #ifdef THRESH PROTECT(tOut = NEW_INTEGER(k)); #endif if(k > 0) { iOutP = INTEGER_POINTER(iOut); jOutP = INTEGER_POINTER(jOut); #ifdef COORDS xiOutP = NUMERIC_POINTER(xiOut); yiOutP = NUMERIC_POINTER(yiOut); xjOutP = NUMERIC_POINTER(xjOut); yjOutP = NUMERIC_POINTER(yjOut); dxOutP = NUMERIC_POINTER(dxOut); dyOutP = NUMERIC_POINTER(dyOut); #ifdef ZCOORD ziOutP = NUMERIC_POINTER(ziOut); zjOutP = NUMERIC_POINTER(zjOut); dzOutP = NUMERIC_POINTER(dzOut); #endif #endif #ifdef DIST dOutP = NUMERIC_POINTER(dOut); #endif #ifdef THRESH tOutP = INTEGER_POINTER(tOut); #endif for(m = 0; m < k; m++) { iOutP[m] = iout[m]; jOutP[m] = jout[m]; #ifdef COORDS xiOutP[m] = xiout[m]; yiOutP[m] = yiout[m]; xjOutP[m] = xjout[m]; yjOutP[m] = yjout[m]; dxOutP[m] = dxout[m]; dyOutP[m] = dyout[m]; #ifdef ZCOORD ziOutP[m] = ziout[m]; zjOutP[m] = zjout[m]; dzOutP[m] = dzout[m]; #endif #endif #ifdef DIST dOutP[m] = dout[m]; #endif #ifdef THRESH tOutP[m] = tout[m]; #endif } } #define HEAD 2 #ifdef THRESH #define NECK 1 #else #define NECK 0 #endif #ifdef COORDS #define MIDDLE (3*SPACEDIM) #else #define MIDDLE 0 #endif #ifdef DIST #define TAIL 1 #else #define TAIL 0 #endif PROTECT(Out = NEW_LIST(HEAD+NECK+MIDDLE+TAIL)); SET_VECTOR_ELT(Out, 0, iOut); SET_VECTOR_ELT(Out, 1, jOut); #ifdef THRESH SET_VECTOR_ELT(Out, HEAD, tOut); #endif #ifdef COORDS #ifdef ZCOORD SET_VECTOR_ELT(Out, HEAD+NECK, xiOut); SET_VECTOR_ELT(Out, HEAD+NECK+1, yiOut); SET_VECTOR_ELT(Out, HEAD+NECK+2, ziOut); SET_VECTOR_ELT(Out, HEAD+NECK+3, xjOut); SET_VECTOR_ELT(Out, HEAD+NECK+4, yjOut); SET_VECTOR_ELT(Out, HEAD+NECK+5, zjOut); SET_VECTOR_ELT(Out, HEAD+NECK+6, dxOut); SET_VECTOR_ELT(Out, HEAD+NECK+7, dyOut); SET_VECTOR_ELT(Out, HEAD+NECK+8, dzOut); #else SET_VECTOR_ELT(Out, HEAD+NECK, xiOut); SET_VECTOR_ELT(Out, HEAD+NECK+1, yiOut); SET_VECTOR_ELT(Out, HEAD+NECK+2, xjOut); SET_VECTOR_ELT(Out, HEAD+NECK+3, yjOut); SET_VECTOR_ELT(Out, HEAD+NECK+4, dxOut); SET_VECTOR_ELT(Out, HEAD+NECK+5, dyOut); #endif #endif #ifdef DIST SET_VECTOR_ELT(Out, HEAD+NECK+MIDDLE, dOut); #endif UNPROTECT(NINPUTS+1+HEAD+NECK+MIDDLE+TAIL); /* 1 is for 'Out' itself */ return(Out); } #undef NINPUTS #undef HEAD #undef NECK #undef MIDDLE #undef TAIL /* ........................................................ */ SEXP CROSSFUN(SEXP xx1, SEXP yy1, #ifdef ZCOORD SEXP zz1, #endif SEXP xx2, SEXP yy2, #ifdef ZCOORD SEXP zz2, #endif SEXP rr, #ifdef THRESH SEXP ss, #endif SEXP nguess) { /* input vectors */ double *x1, *y1, *x2, *y2; #ifdef ZCOORD double *z1, *z2; #endif /* lengths */ int n1, n2, nout, noutmax, noutmaxold, maxchunk; /* distance parameter */ double rmax, r2max, rmaxplus; /* indices */ int i, j, jleft, m; /* temporary values */ double x1i, y1i, xleft, dx, dy, dx2, d2; #ifdef ZCOORD double z1i, dz; #endif /* local storage */ int *iout, *jout; /* R objects in return value */ SEXP Out, iOut, jOut; /* external storage pointers */ int *iOutP, *jOutP; #ifdef COORDS SEXP xiOut, yiOut, xjOut, yjOut, dxOut, dyOut; double *xiOutP, *yiOutP, *xjOutP, *yjOutP, *dxOutP, *dyOutP; double *xiout, *yiout, *xjout, *yjout, *dxout, *dyout; #ifdef ZCOORD SEXP ziOut, zjOut, dzOut; double *ziOutP, *zjOutP, *dzOutP; double *ziout, *zjout, *dzout; #endif #endif #ifdef DIST SEXP dOut; double *dOutP; double *dout; #endif #ifdef THRESH double s, s2; int *tout; SEXP tOut; int *tOutP; #endif /* protect R objects from garbage collector */ PROTECT(xx1 = AS_NUMERIC(xx1)); PROTECT(yy1 = AS_NUMERIC(yy1)); PROTECT(xx2 = AS_NUMERIC(xx2)); PROTECT(yy2 = AS_NUMERIC(yy2)); #ifdef ZCOORD PROTECT(zz1 = AS_NUMERIC(zz1)); PROTECT(zz2 = AS_NUMERIC(zz2)); #endif PROTECT(rr = AS_NUMERIC(rr)); PROTECT(nguess = AS_INTEGER(nguess)); #ifdef THRESH PROTECT(ss = AS_NUMERIC(ss)); #define NINPUTS (2*SPACEDIM + 3) #else #define NINPUTS (2*SPACEDIM + 2) #endif /* Translate arguments from R to C */ x1 = NUMERIC_POINTER(xx1); y1 = NUMERIC_POINTER(yy1); x2 = NUMERIC_POINTER(xx2); y2 = NUMERIC_POINTER(yy2); #ifdef ZCOORD z1 = NUMERIC_POINTER(zz1); z2 = NUMERIC_POINTER(zz2); #endif n1 = LENGTH(xx1); n2 = LENGTH(xx2); rmax = *(NUMERIC_POINTER(rr)); noutmax = *(INTEGER_POINTER(nguess)); r2max = rmax * rmax; rmaxplus = rmax + rmax/16.0; #ifdef THRESH s = *(NUMERIC_POINTER(ss)); s2 = s * s; #endif nout = 0; /* nout is the next available storage location and also the current length of the list */ if(n1 > 0 && n2 > 0 && noutmax > 0) { /* allocate space */ iout = (int *) R_alloc(noutmax, sizeof(int)); jout = (int *) R_alloc(noutmax, sizeof(int)); #ifdef COORDS xiout = (double *) R_alloc(noutmax, sizeof(double)); yiout = (double *) R_alloc(noutmax, sizeof(double)); xjout = (double *) R_alloc(noutmax, sizeof(double)); yjout = (double *) R_alloc(noutmax, sizeof(double)); dxout = (double *) R_alloc(noutmax, sizeof(double)); dyout = (double *) R_alloc(noutmax, sizeof(double)); #ifdef ZCOORD ziout = (double *) R_alloc(noutmax, sizeof(double)); zjout = (double *) R_alloc(noutmax, sizeof(double)); dzout = (double *) R_alloc(noutmax, sizeof(double)); #endif #endif #ifdef DIST dout = (double *) R_alloc(noutmax, sizeof(double)); #endif #ifdef THRESH tout = (int *) R_alloc(noutmax, sizeof(int)); #endif jleft = 0; i = 0; maxchunk = 0; while(i < n1) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n1) maxchunk = n1; for( ; i < maxchunk; i++) { x1i = x1[i]; y1i = y1[i]; #ifdef ZCOORD z1i = z1[i]; #endif /* adjust starting point jleft */ xleft = x1i - rmaxplus; while((x2[jleft] < xleft) && (jleft+1 < n2)) ++jleft; /* process from j = jleft until dx > rmax + epsilon */ for(j=jleft; j < n2; j++) { /* squared interpoint distance */ dx = x2[j] - x1i; if(dx > rmaxplus) break; dx2 = dx * dx; dy = y2[j] - y1i; d2 = dx2 + dy * dy; #ifdef ZCOORD if(d2 <= r2max) { dz = z2[j] - z1i; d2 = d2 + dz * dz; #endif if(d2 <= r2max) { /* add this (i, j) pair to output */ if(nout >= noutmax) { /* overflow; allocate more space */ noutmaxold = noutmax; noutmax = 2 * noutmax; iout = intRealloc(iout, noutmaxold, noutmax); jout = intRealloc(jout, noutmaxold, noutmax); #ifdef COORDS xiout = dblRealloc(xiout, noutmaxold, noutmax); yiout = dblRealloc(yiout, noutmaxold, noutmax); xjout = dblRealloc(xjout, noutmaxold, noutmax); yjout = dblRealloc(yjout, noutmaxold, noutmax); dxout = dblRealloc(dxout, noutmaxold, noutmax); dyout = dblRealloc(dyout, noutmaxold, noutmax); #ifdef ZCOORD ziout = dblRealloc(ziout, noutmaxold, noutmax); zjout = dblRealloc(zjout, noutmaxold, noutmax); dzout = dblRealloc(dzout, noutmaxold, noutmax); #endif #endif #ifdef DIST dout = dblRealloc(dout, noutmaxold, noutmax); #endif #ifdef THRESH tout = intRealloc(tout, noutmaxold, noutmax); #endif } iout[nout] = i + 1; /* R indexing */ jout[nout] = j + 1; #ifdef COORDS xiout[nout] = x1i; yiout[nout] = y1i; xjout[nout] = x2[j]; yjout[nout] = y2[j]; dxout[nout] = dx; dyout[nout] = dy; #ifdef ZCOORD ziout[nout] = z1i; zjout[nout] = z2[j]; dzout[nout] = dz; #endif #endif #ifdef DIST dout[nout] = sqrt(d2); #endif #ifdef THRESH tout[nout] = (d2 <= s2) ? 1 : 0; #endif ++nout; } #ifdef ZCOORD } #endif } } } } /* return a list of vectors */ PROTECT(iOut = NEW_INTEGER(nout)); PROTECT(jOut = NEW_INTEGER(nout)); #ifdef COORDS PROTECT(xiOut = NEW_NUMERIC(nout)); PROTECT(yiOut = NEW_NUMERIC(nout)); PROTECT(xjOut = NEW_NUMERIC(nout)); PROTECT(yjOut = NEW_NUMERIC(nout)); PROTECT(dxOut = NEW_NUMERIC(nout)); PROTECT(dyOut = NEW_NUMERIC(nout)); #ifdef ZCOORD PROTECT(ziOut = NEW_NUMERIC(nout)); PROTECT(zjOut = NEW_NUMERIC(nout)); PROTECT(dzOut = NEW_NUMERIC(nout)); #endif #endif #ifdef DIST PROTECT(dOut = NEW_NUMERIC(nout)); #endif #ifdef THRESH PROTECT(tOut = NEW_INTEGER(nout)); #endif if(nout > 0) { iOutP = INTEGER_POINTER(iOut); jOutP = INTEGER_POINTER(jOut); #ifdef COORDS xiOutP = NUMERIC_POINTER(xiOut); yiOutP = NUMERIC_POINTER(yiOut); xjOutP = NUMERIC_POINTER(xjOut); yjOutP = NUMERIC_POINTER(yjOut); dxOutP = NUMERIC_POINTER(dxOut); dyOutP = NUMERIC_POINTER(dyOut); #ifdef ZCOORD ziOutP = NUMERIC_POINTER(ziOut); zjOutP = NUMERIC_POINTER(zjOut); dzOutP = NUMERIC_POINTER(dzOut); #endif #endif #ifdef DIST dOutP = NUMERIC_POINTER(dOut); #endif #ifdef THRESH tOutP = INTEGER_POINTER(tOut); #endif for(m = 0; m < nout; m++) { iOutP[m] = iout[m]; jOutP[m] = jout[m]; #ifdef COORDS xiOutP[m] = xiout[m]; yiOutP[m] = yiout[m]; xjOutP[m] = xjout[m]; yjOutP[m] = yjout[m]; dxOutP[m] = dxout[m]; dyOutP[m] = dyout[m]; #ifdef ZCOORD ziOutP[m] = ziout[m]; zjOutP[m] = zjout[m]; dzOutP[m] = dzout[m]; #endif #endif #ifdef DIST dOutP[m] = dout[m]; #endif #ifdef THRESH tOutP[m] = tout[m]; #endif } } #define HEAD 2 #ifdef THRESH #define NECK 1 #else #define NECK 0 #endif #ifdef COORDS #define MIDDLE (3*SPACEDIM) #else #define MIDDLE 0 #endif #ifdef DIST #define TAIL 1 #else #define TAIL 0 #endif PROTECT(Out = NEW_LIST(HEAD+NECK+MIDDLE+TAIL)); SET_VECTOR_ELT(Out, 0, iOut); SET_VECTOR_ELT(Out, 1, jOut); #ifdef THRESH SET_VECTOR_ELT(Out, HEAD, tOut); #endif #ifdef COORDS #ifdef ZCOORD SET_VECTOR_ELT(Out, HEAD+NECK, xiOut); SET_VECTOR_ELT(Out, HEAD+NECK+1, yiOut); SET_VECTOR_ELT(Out, HEAD+NECK+2, ziOut); SET_VECTOR_ELT(Out, HEAD+NECK+3, xjOut); SET_VECTOR_ELT(Out, HEAD+NECK+4, yjOut); SET_VECTOR_ELT(Out, HEAD+NECK+5, zjOut); SET_VECTOR_ELT(Out, HEAD+NECK+6, dxOut); SET_VECTOR_ELT(Out, HEAD+NECK+7, dyOut); SET_VECTOR_ELT(Out, HEAD+NECK+8, dzOut); #else SET_VECTOR_ELT(Out, HEAD+NECK, xiOut); SET_VECTOR_ELT(Out, HEAD+NECK+1, yiOut); SET_VECTOR_ELT(Out, HEAD+NECK+2, xjOut); SET_VECTOR_ELT(Out, HEAD+NECK+3, yjOut); SET_VECTOR_ELT(Out, HEAD+NECK+4, dxOut); SET_VECTOR_ELT(Out, HEAD+NECK+5, dyOut); #endif #endif #ifdef DIST SET_VECTOR_ELT(Out, HEAD+NECK+MIDDLE, dOut); #endif UNPROTECT(NINPUTS+1+HEAD+NECK+MIDDLE+TAIL); /* 1 is for 'Out' itself */ return(Out); } #undef NINPUTS #undef HEAD #undef NECK #undef MIDDLE #undef TAIL /* ........................................................ */ /* Alternative code for CLOSEFUN, based on algorithm in CROSSFUN */ #define ALT_ALGO(NAME) ALT_PREFIX(NAME) #define ALT_PREFIX(NAME) alt ## NAME SEXP ALT_ALGO(CLOSEFUN)(SEXP xx, SEXP yy, #ifdef ZCOORD SEXP zz, #endif SEXP rr, #ifdef THRESH SEXP ss, #endif SEXP nguess) { /* input vectors */ double *x, *y; #ifdef ZCOORD double *z; #endif /* lengths */ int n, nout, noutmax, noutmaxold, maxchunk; /* distance parameter */ double rmax, r2max, rmaxplus; /* indices */ int i, j, jleft, m; /* temporary values */ double xi, yi, xleft, dx, dy, dx2, d2; #ifdef ZCOORD double zi, dz; #endif /* local storage */ int *iout, *jout; /* R objects in return value */ SEXP Out, iOut, jOut; /* external storage pointers */ int *iOutP, *jOutP; #ifdef COORDS SEXP xiOut, yiOut, xjOut, yjOut, dxOut, dyOut; double *xiOutP, *yiOutP, *xjOutP, *yjOutP, *dxOutP, *dyOutP; double *xiout, *yiout, *xjout, *yjout, *dxout, *dyout; #ifdef ZCOORD SEXP ziOut, zjOut, dzOut; double *ziOutP, *zjOutP, *dzOutP; double *ziout, *zjout, *dzout; #endif #endif #ifdef DIST SEXP dOut; double *dOutP; double *dout; #endif #ifdef THRESH double s, s2; int *tout; SEXP tOut; int *tOutP; #endif /* protect R objects from garbage collector */ PROTECT(xx = AS_NUMERIC(xx)); PROTECT(yy = AS_NUMERIC(yy)); #ifdef ZCOORD PROTECT(zz = AS_NUMERIC(zz)); #endif PROTECT(rr = AS_NUMERIC(rr)); PROTECT(nguess = AS_INTEGER(nguess)); #ifdef THRESH PROTECT(ss = AS_NUMERIC(ss)); #define NINPUTS (SPACEDIM + 3) #else #define NINPUTS (SPACEDIM + 2) #endif /* Translate arguments from R to C */ x = NUMERIC_POINTER(xx); y = NUMERIC_POINTER(yy); #ifdef ZCOORD z = NUMERIC_POINTER(zz); #endif n = LENGTH(xx); rmax = *(NUMERIC_POINTER(rr)); noutmax = *(INTEGER_POINTER(nguess)); r2max = rmax * rmax; rmaxplus = rmax + rmax/16.0; #ifdef THRESH s = *(NUMERIC_POINTER(ss)); s2 = s * s; #endif nout = 0; /* nout is the next available storage location and also the current length of the list */ if(n > 0 && noutmax > 0) { /* allocate space */ iout = (int *) R_alloc(noutmax, sizeof(int)); jout = (int *) R_alloc(noutmax, sizeof(int)); #ifdef COORDS xiout = (double *) R_alloc(noutmax, sizeof(double)); yiout = (double *) R_alloc(noutmax, sizeof(double)); xjout = (double *) R_alloc(noutmax, sizeof(double)); yjout = (double *) R_alloc(noutmax, sizeof(double)); dxout = (double *) R_alloc(noutmax, sizeof(double)); dyout = (double *) R_alloc(noutmax, sizeof(double)); #ifdef ZCOORD ziout = (double *) R_alloc(noutmax, sizeof(double)); zjout = (double *) R_alloc(noutmax, sizeof(double)); dzout = (double *) R_alloc(noutmax, sizeof(double)); #endif #endif #ifdef DIST dout = (double *) R_alloc(noutmax, sizeof(double)); #endif #ifdef THRESH tout = (int *) R_alloc(noutmax, sizeof(int)); #endif jleft = 0; i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for( ; i < maxchunk; i++) { xi = x[i]; yi = y[i]; #ifdef ZCOORD zi = z[i]; #endif /* adjust starting point jleft */ xleft = xi - rmaxplus; while((x[jleft] < xleft) && (jleft+1 < n)) ++jleft; /* process from j = jleft until dx > rmax + epsilon */ for(j=jleft; j < n; j++) { /* squared interpoint distance */ dx = x[j] - xi; if(dx > rmaxplus) break; dx2 = dx * dx; dy = y[j] - yi; d2 = dx2 + dy * dy; #ifdef ZCOORD if(d2 <= r2max) { dz = z[j] - zi; d2 = d2 + dz * dz; #endif if(d2 <= r2max) { /* add this (i, j) pair to output */ if(nout >= noutmax) { /* overflow; allocate more space */ noutmaxold = noutmax; noutmax = 2 * noutmax; iout = intRealloc(iout, noutmaxold, noutmax); jout = intRealloc(jout, noutmaxold, noutmax); #ifdef COORDS xiout = dblRealloc(xiout, noutmaxold, noutmax); yiout = dblRealloc(yiout, noutmaxold, noutmax); xjout = dblRealloc(xjout, noutmaxold, noutmax); yjout = dblRealloc(yjout, noutmaxold, noutmax); dxout = dblRealloc(dxout, noutmaxold, noutmax); dyout = dblRealloc(dyout, noutmaxold, noutmax); #ifdef ZCOORD ziout = dblRealloc(ziout, noutmaxold, noutmax); zjout = dblRealloc(zjout, noutmaxold, noutmax); dzout = dblRealloc(dzout, noutmaxold, noutmax); #endif #endif #ifdef DIST dout = dblRealloc(dout, noutmaxold, noutmax); #endif #ifdef THRESH tout = intRealloc(tout, noutmaxold, noutmax); #endif } iout[nout] = i + 1; /* R indexing */ jout[nout] = j + 1; #ifdef COORDS xiout[nout] = xi; yiout[nout] = yi; xjout[nout] = x[j]; yjout[nout] = y[j]; dxout[nout] = dx; dyout[nout] = dy; #ifdef ZCOORD ziout[nout] = zi; zjout[nout] = z[j]; dzout[nout] = dz; #endif #endif #ifdef DIST dout[nout] = sqrt(d2); #endif #ifdef THRESH tout[nout] = (d2 <= s2) ? 1 : 0; #endif ++nout; } #ifdef ZCOORD } #endif } } } } /* return a list of vectors */ PROTECT(iOut = NEW_INTEGER(nout)); PROTECT(jOut = NEW_INTEGER(nout)); #ifdef COORDS PROTECT(xiOut = NEW_NUMERIC(nout)); PROTECT(yiOut = NEW_NUMERIC(nout)); PROTECT(xjOut = NEW_NUMERIC(nout)); PROTECT(yjOut = NEW_NUMERIC(nout)); PROTECT(dxOut = NEW_NUMERIC(nout)); PROTECT(dyOut = NEW_NUMERIC(nout)); #ifdef ZCOORD PROTECT(ziOut = NEW_NUMERIC(nout)); PROTECT(zjOut = NEW_NUMERIC(nout)); PROTECT(dzOut = NEW_NUMERIC(nout)); #endif #endif #ifdef DIST PROTECT(dOut = NEW_NUMERIC(nout)); #endif #ifdef THRESH PROTECT(tOut = NEW_INTEGER(nout)); #endif if(nout > 0) { iOutP = INTEGER_POINTER(iOut); jOutP = INTEGER_POINTER(jOut); #ifdef COORDS xiOutP = NUMERIC_POINTER(xiOut); yiOutP = NUMERIC_POINTER(yiOut); xjOutP = NUMERIC_POINTER(xjOut); yjOutP = NUMERIC_POINTER(yjOut); dxOutP = NUMERIC_POINTER(dxOut); dyOutP = NUMERIC_POINTER(dyOut); #ifdef ZCOORD ziOutP = NUMERIC_POINTER(ziOut); zjOutP = NUMERIC_POINTER(zjOut); dzOutP = NUMERIC_POINTER(dzOut); #endif #endif #ifdef DIST dOutP = NUMERIC_POINTER(dOut); #endif #ifdef THRESH tOutP = INTEGER_POINTER(tOut); #endif for(m = 0; m < nout; m++) { iOutP[m] = iout[m]; jOutP[m] = jout[m]; #ifdef COORDS xiOutP[m] = xiout[m]; yiOutP[m] = yiout[m]; xjOutP[m] = xjout[m]; yjOutP[m] = yjout[m]; dxOutP[m] = dxout[m]; dyOutP[m] = dyout[m]; #ifdef ZCOORD ziOutP[m] = ziout[m]; zjOutP[m] = zjout[m]; dzOutP[m] = dzout[m]; #endif #endif #ifdef DIST dOutP[m] = dout[m]; #endif #ifdef THRESH tOutP[m] = tout[m]; #endif } } #define HEAD 2 #ifdef THRESH #define NECK 1 #else #define NECK 0 #endif #ifdef COORDS #define MIDDLE (3*SPACEDIM) #else #define MIDDLE 0 #endif #ifdef DIST #define TAIL 1 #else #define TAIL 0 #endif PROTECT(Out = NEW_LIST(HEAD+NECK+MIDDLE+TAIL)); SET_VECTOR_ELT(Out, 0, iOut); SET_VECTOR_ELT(Out, 1, jOut); #ifdef THRESH SET_VECTOR_ELT(Out, HEAD, tOut); #endif #ifdef COORDS #ifdef ZCOORD SET_VECTOR_ELT(Out, HEAD+NECK, xiOut); SET_VECTOR_ELT(Out, HEAD+NECK+1, yiOut); SET_VECTOR_ELT(Out, HEAD+NECK+2, ziOut); SET_VECTOR_ELT(Out, HEAD+NECK+3, xjOut); SET_VECTOR_ELT(Out, HEAD+NECK+4, yjOut); SET_VECTOR_ELT(Out, HEAD+NECK+5, zjOut); SET_VECTOR_ELT(Out, HEAD+NECK+6, dxOut); SET_VECTOR_ELT(Out, HEAD+NECK+7, dyOut); SET_VECTOR_ELT(Out, HEAD+NECK+8, dzOut); #else SET_VECTOR_ELT(Out, HEAD+NECK, xiOut); SET_VECTOR_ELT(Out, HEAD+NECK+1, yiOut); SET_VECTOR_ELT(Out, HEAD+NECK+2, xjOut); SET_VECTOR_ELT(Out, HEAD+NECK+3, yjOut); SET_VECTOR_ELT(Out, HEAD+NECK+4, dxOut); SET_VECTOR_ELT(Out, HEAD+NECK+5, dyOut); #endif #endif #ifdef DIST SET_VECTOR_ELT(Out, HEAD+NECK+MIDDLE, dOut); #endif UNPROTECT(NINPUTS+1+HEAD+NECK+MIDDLE+TAIL); /* 1 is for 'Out' itself */ return(Out); } #undef NINPUTS #undef HEAD #undef NECK #undef MIDDLE #undef TAIL #undef ALT_ALGO #undef ALT_PREFIX spatstat/src/nearestpix.c0000644000176200001440000000506513432703010015231 0ustar liggesusers/* nearestpix.c Find the nearest TRUE pixel to a given (x,y) location Query locations (x,y) are transformed to coordinates in which the pixels are unit squares and the pixel centres start at (0,0) $Revision: 1.2 $ $Date: 2019/02/19 04:28:47 $ */ #include #include #include #include #include "chunkloop.h" #include "yesno.h" double sqrt(), fround(); void nearestvalidpixel(n, x, y, nr, nc, aspect, z, nsearch, rr, cc ) int *n; /* number of query points */ double *x, *y; /* coordinates of query points (transformed) */ int *nr, *nc; /* matrix dimensions */ double *aspect; /* aspect ratio (y/x) of original pixels */ int *z; /* entries of logical matrix */ int *nsearch; /* maximum permitted number of pixel steps on each axis */ int *rr, *cc; /* row and column indices (-1) of nearest pixel centre */ { int maxchunk, N, Nrow, Ncol, maxrow, maxcol, maxsearch; double asp, xi, yi, ddd, ddi, huge, deltax, deltay; int i, row, col, zvalue; int rrr, ccc, rri, cci, startrow, endrow, startcol, endcol; N = *n; Nrow = *nr; Ncol = *nc; maxsearch = *nsearch; asp = *aspect; maxrow = Nrow - 1; maxcol = Ncol - 1; huge = sqrt(((double) Ncol) * ((double) Ncol) + asp * asp * ((double) Nrow) * ((double) Nrow)); OUTERCHUNKLOOP(i, N, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 8196) { xi = x[i]; yi = y[i]; row = (int) fround(yi, (double) 0); col = (int) fround(xi, (double) 0); if(row < 0) row = 0; else if(row > maxrow) row = maxrow; if(col < 0) col = 0; else if(col > maxcol) col = maxcol; zvalue = z[row + Nrow * col]; if(zvalue != 0) { /* pixel is TRUE */ rr[i] = row; cc[i] = col; } else { /* initialise result to NA */ rri = cci = -1; ddi = huge; /* search neighbouring pixels */ startrow = imax2(row - maxsearch, 0); endrow = imin2(row + maxsearch, maxrow); startcol = imax2(col - maxsearch, 0); endcol = imin2(col + maxsearch, maxcol); if(startrow <= endrow && startcol <= endcol) { for(rrr = startrow; rrr <= endrow; rrr++) { for(ccc = startcol; ccc <= endcol; ccc++) { zvalue = z[rrr + Nrow * ccc]; if(zvalue != 0) { /* pixel is TRUE */ deltax = xi - (double) ccc; deltay = asp * (yi - (double) rrr); ddd = sqrt(deltax * deltax + deltay * deltay); if(ddd < ddi) { /* pixel is closer */ rri = rrr; cci = ccc; ddi = ddd; } } } } } /* save result */ rr[i] = rri; cc[i] = cci; } } } } spatstat/src/localpcf.c0000755000176200001440000000064313406057617014652 0ustar liggesusers#include #include #include #include "chunkloop.h" /* localpcf.c $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Assumes point patterns are sorted in increasing order of x coordinate */ #undef WEIGHTED #include "localpcf.h" #define WEIGHTED 1 #include "localpcf.h" spatstat/src/linearradius.c0000644000176200001440000000367013406057617015551 0ustar liggesusers#include #include #include "chunkloop.h" /* linearradius.c Bounding radius in linear network $Revision: 1.2 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define DPATH(I,J) dpath[(J) + Nv * (I)] #include "yesno.h" #undef DEBUG void linearradius(ns, from, to, /* network segments */ lengths, /* segment lengths */ nv, dpath, /* shortest path distances between vertices */ huge, result) int *nv, *ns; int *from, *to; /* integer vectors (mappings) */ double *dpath; /* matrix of shortest path distances between vertices */ double *lengths; /* vector of segment lengths */ double *huge; /* very large value */ double *result; { int Nv, Ns; int i, j, A, B, C, D; double AB, AC, AD, BC, BD, CD; double sAij, sBij, sAiMax, sBiMax, smin; int maxchunk; Nv = *nv; Ns = *ns; smin = *huge; OUTERCHUNKLOOP(i, Ns, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Ns, maxchunk, 16384) { /* indices of endpoints of segment i */ A = from[i]; B = to[i]; AB = lengths[i]; sAiMax = sBiMax = AB/2.0; for(j = 0; j < Ns; j++) { if(j != i) { /* indices of endpoints of segment i */ C = from[j]; D = to[j]; CD = lengths[j]; AC = DPATH(A,C); AD = DPATH(A,D); BC = DPATH(B,C); BD = DPATH(B,D); /* max dist from A to any point in segment j */ sAij = (AD > AC + CD) ? AC + CD : (AC > AD + CD) ? AD + CD : (AC + AD + CD)/2.0; /* max dist from B to any point in segment j */ sBij = (BD > BC + CD) ? BC + CD : (BC > BD + CD) ? BD + CD : (BC + BD + CD)/2.0; /* row-wise maximum */ if(sAij > sAiMax) sAiMax = sAij; if(sBij > sBiMax) sBiMax = sBij; } } if(sAiMax < smin) smin = sAiMax; if(sBiMax < smin) smin = sBiMax; } } *result = smin; } spatstat/src/areapair.c0000644000176200001440000000375413406057617014656 0ustar liggesusers/* areapair.c $Revision: 1.7 $ $Date: 2018/12/18 02:43:11 $ Specialised code for the second order conditional intensity of the area-interaction process Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include #include "yesno.h" /* computes area of b(A, r) \int b(B, r) \setminus \bigcup_i b(X[i], r) */ void delta2area(xa, ya, xb, yb, nother, xother, yother, radius, epsilon, pixcount) double *xa, *ya, *xb, *yb; int *nother; double *xother, *yother; double *radius, *epsilon; int *pixcount; { int Ni, Nj, Nk, i, j, k, count, covered; double xA, yA, xB, yB, r, eps, r2; double xmin, xmax, ymin, ymax, xi, yj; double dxA, dyA; double dxB, dyB; double dx, dy; Nk = *nother; xA = *xa; yA = *ya; xB = *xb; yB = *yb; r = *radius; eps = *epsilon; r2 = r * r; /* find intersection of squares centred on A and B */ if(xA < xB) { xmin = xB - r; xmax = xA + r; } else { xmin = xA - r; xmax = xB + r; } if(xmin > xmax) return; if(yA < yB) { ymin = yB - r; ymax = yA + r; } else { ymin = yA - r; ymax = yB + r; } if(ymin > ymax) return; /* set up grid */ Ni = (int) ceil((xmax - xmin)/eps) + 1; Nj = (int) ceil((ymax - ymin)/eps) + 1; count = 0; for(i = 0, xi = xmin; i < Ni; i++, xi += eps) { dxA = xi - xA; for(j = 0, yj = ymin; j < Nj; j++, yj += eps) { dyA = yj - yA; if(dxA * dxA + dyA * dyA <= r2) { /* grid point belongs to b(A, r) */ dxB = xi - xB; dyB = yj - yB; if(dxB * dxB + dyB * dyB <= r2) { /* grid point belongs to b(A,r) \cap b(B,r) */ covered = NO; /* test whether it is covered by another b(X[k], r) */ for(k = 0; k < Nk; k++) { dx = xi - xother[k]; dy = yj - yother[k]; if(dx * dx + dy * dy <= r2) { covered = YES; break; } } if(!covered) { ++count; } } } } } *pixcount = count; } spatstat/src/g3.c0000755000176200001440000001266513433406761013405 0ustar liggesusers#include #include #include "geom3.h" #include "functable.h" /* $Revision: 1.3 $ $Date: 2012/05/22 07:17:31 $ G function (nearest neighbour distribution) of 3D point pattern Let b = distance from point p[i] to boundary of box d = distance from p[i] to nearest p[j] method = 1 naive ratio estimator (Ripley 1981) numerator(r) = count(i: b >= r, d <= r) denominator(r) = count(i: b >= r) method = 2 minus sampling estimator numerator(r) = count(i: b >= r, d <= r) denominator(r) = lambda * volume(x: b >= r) where lambda = (no of points)/volume(box) method = 3 Hanisch's G3 numerator(r) = count(i: b >= d, d <= r) denominator(r) = count(i: b >= d) method = 4 Hanisch's G4 numerator(r) = count(i: b >= d, d <= r) denominator(r) = fudge * volume(x: b >= r) fudge = numerator(R)/denominator(R) R = sup{r : denominator(r) > 0 } # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009, 2012. # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ #define MIN(X,Y) (((X) > (Y)) ? (Y) : (X)) double * nndist3(p, n, b) /* compute nearest neighbour distance for each p[i] */ Point *p; int n; Box *b; { register int i, j; register double dx, dy, dz, dist2, nearest2, huge2; Point *ip, *jp; double *nnd; nnd = (double *) R_alloc(n, sizeof(double)); dx = b->x1 - b->x0; dy = b->y1 - b->y0; dz = b->z1 - b->z0; huge2 = 2.0 * (dx * dx + dy * dy + dz * dz); /* scan each point and find closest */ for( i = 0; i < n; i++) { ip = p + i; nearest2 = huge2; for(j = 0; j < n; j++) if(j != i) { jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist2 = dx * dx + dy * dy + dz * dz; if(dist2 < nearest2) nearest2 = dist2; } nnd[i] = sqrt(nearest2); } return(nnd); } double * border3(p, n, b) /* compute distances to border */ Point *p; int n; Box *b; { register int i; register double bord; register Point *ip; double *bored; bored = (double *) R_alloc(n, sizeof(double)); for( i = 0; i < n; i++) { ip = p + i; bord = MIN(ip->x - b->x0, b->x1 - ip->x); bord = MIN(bord, ip->y - b->y0); bord = MIN(bord, b->y1 - ip->y); bord = MIN(bord, ip->z - b->z0); bord = MIN(bord, b->z1 - ip->z); bored[i] = bord; } return(bored); } void g3one(p, n, b, g) Point *p; int n; Box *b; Ftable *g; { register int i, l, lbord, lnnd; double dt; double *bord, *nnd; bord = border3(p, n, b); nnd = nndist3(p, n, b); /* initialise */ for(l = 0; l < g->n; l++) (g->num)[l] = (g->denom)[l] = 0.0; /* spacing of argument in result vector g */ dt = (g->t1 - g->t0)/(g->n - 1); for(i = 0; i < n; i++) { lbord = floor( (bord[i] - g->t0) / dt ); if(lbord >= g->n) lbord = g->n - 1; for(l = 0; l <= lbord; l++) (g->denom)[l] += 1.0; lnnd = ceil( (nnd[i] - g->t0) / dt ); if(lnnd < 0) lnnd = 0; for(l = lnnd; l <= lbord; l++) (g->num)[l] += 1.0; } /* compute ratio */ for(l = 0; l < g->n; l++) (g->f)[l] = ((g->denom)[l] > 0)? (g->num)[l] / (g->denom)[l] : 1.0; } void g3three(p, n, b, g) Point *p; int n; Box *b; Ftable *g; { register int i, l, lmin; double dt; int denom; double *bord, *nnd; bord = border3(p, n, b); nnd = nndist3(p, n, b); /* initialise */ denom = 0; for(l = 0; l < g->n; l++) (g->num)[l] = 0.0; /* spacing of argument in result vector g */ dt = (g->t1 - g->t0)/(g->n - 1); for(i = 0; i < n; i++) { if(nnd[i] <= bord[i]) { ++denom; lmin = ceil( (nnd[i] - g->t0) / dt ); if(lmin < 0) lmin = 0; for(l = lmin; l < g->n; l++) (g->num)[l] += 1.0; } } /* compute ratio */ for(l = 0; l < g->n; l++) { (g->denom)[l] = denom; (g->f)[l] = (denom > 0)? (g->num)[l] / (double) denom : 1.0; } } void g3cen(p, n, b, count) Point *p; int n; Box *b; H4table *count; { register int i, lcen, lobs; register double dt, cens, obsv; double *bord, *nnd; bord = border3(p, n, b); nnd = nndist3(p, n, b); /* spacing of histogram cells */ dt = (count->t1 - count->t0)/(count->n - 1); /* 'count' is assumed to have been initialised */ for(i = 0; i < n; i++) { obsv = nnd[i]; cens = bord[i]; lobs = ceil( (obsv - count->t0) / dt ); lcen = floor( (cens - count->t0) / dt ); if(obsv <= cens) { /* observation is uncensored; increment all four histograms */ if(lobs >= count->n) ++(count->upperobs); else if(lobs >= 0) { (count->obs)[lobs]++; (count->nco)[lobs]++; } if(lcen >= count->n) ++(count->uppercen); else if(lcen >= 0) { (count->cen)[lcen]++; (count->ncc)[lcen]++; } } else { /* observation is censored; increment only two histograms */ lobs = MIN(lobs, lcen); if(lobs >= count->n) ++(count->upperobs); else if(lobs >= 0) (count->obs)[lobs]++; if(lcen >= count->n) ++(count->uppercen); else if(lcen >= 0) (count->cen)[lcen]++; } } } spatstat/src/sftcr.c0000755000176200001440000000436613115271120014176 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Soft Core process */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Softcore { double sigma; double kappa; double nook; /* -1/kappa */ double stok; /* sigma^(2/kappa) */ double *period; int per; } Softcore; /* initialiser function */ Cdata *sftcrinit(state, model, algo) State state; Model model; Algor algo; { Softcore *softcore; softcore = (Softcore *) R_alloc(1, sizeof(Softcore)); /* Interpret model parameters*/ softcore->sigma = model.ipar[0]; softcore->kappa = model.ipar[1]; softcore->period = model.period; /* constants */ softcore->nook = -1/softcore->kappa; softcore->stok = pow(softcore->sigma, 2/softcore->kappa); /* periodic boundary conditions? */ softcore->per = (model.period[0] > 0.0); return((Cdata *) softcore); } /* conditional intensity evaluator */ double sftcrcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, pairsum, cifval, nook, stok; Softcore *softcore; softcore = (Softcore *) cdata; nook = softcore->nook; stok = softcore->stok; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = 1.0; if(npts == 0) return(cifval); pairsum = 0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(softcore->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { d2 = dist2(u,v,x[j],y[j],softcore->period); pairsum += pow(d2, nook); } } if(ixp1 < npts) { for(j=ixp1; jperiod); pairsum += pow(d2, nook); } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { d2 = pow(u - x[j],2) + pow(v-y[j],2); pairsum += pow(d2, nook); } } if(ixp1 < npts) { for(j=ixp1; j= 2 Given the edges of a graph, determine all "Vees" i.e. triples (i, j, k) where i ~ j and i ~ k. */ #include #include #include #include "chunkloop.h" #undef DEBUGVEE SEXP graphVees(SEXP nv, /* number of vertices */ SEXP iedge, /* vectors of indices of ends of each edge */ SEXP jedge) /* all arguments are integer */ /* Edges should NOT be repeated symmetrically. Indices need not be sorted. */ { int Nv, Ne; int *ie, *je; /* edges */ int *it, *jt, *kt; /* vectors of indices of triples */ int Nt, Ntmax; /* number of triples */ int Nj; int *jj; /* scratch storage */ int i, j, k, m, mj, mk, Nmore, maxchunk; /* output */ SEXP iTout, jTout, kTout, out; int *ito, *jto, *kto; /* =================== Protect R objects from garbage collector ======= */ PROTECT(nv = AS_INTEGER(nv)); PROTECT(iedge = AS_INTEGER(iedge)); PROTECT(jedge = AS_INTEGER(jedge)); /* That's 3 protected objects */ /* numbers of vertices and edges */ Nv = *(INTEGER_POINTER(nv)); Ne = LENGTH(iedge); /* input arrays */ ie = INTEGER_POINTER(iedge); je = INTEGER_POINTER(jedge); /* initialise storage (with a guess at max size) */ Ntmax = 3 * Ne; it = (int *) R_alloc(Ntmax, sizeof(int)); jt = (int *) R_alloc(Ntmax, sizeof(int)); kt = (int *) R_alloc(Ntmax, sizeof(int)); Nt = 0; /* initialise scratch storage */ jj = (int *) R_alloc(Ne, sizeof(int)); XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { R_CheckUserInterrupt(); XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { #ifdef DEBUGVEE Rprintf("i=%d ---------- \n", i); #endif /* Find Vee triples with apex 'i' */ /* First, find all vertices j connected to i */ Nj = 0; for(m = 0; m < Ne; m++) { if(ie[m] == i) { jj[Nj] = je[m]; Nj++; } else if(je[m] == i) { jj[Nj] = ie[m]; Nj++; } } /* save triples (i,j,k) */ #ifdef DEBUGVEE Rprintf("Nj = %d\n", Nj); #endif if(Nj > 1) { #ifdef DEBUGVEE Rprintf("i=%d\njj=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - allocate more space */ Nmore = 2 * Ntmax; #ifdef DEBUGVEE Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore); #endif it = (int *) S_realloc((char *) it, Nmore, Ntmax, sizeof(int)); jt = (int *) S_realloc((char *) jt, Nmore, Ntmax, sizeof(int)); kt = (int *) S_realloc((char *) kt, Nmore, Ntmax, sizeof(int)); Ntmax = Nmore; } it[Nt] = i; jt[Nt] = j; kt[Nt] = k; Nt++; } } } } } /* allocate space for output */ PROTECT(iTout = NEW_INTEGER(Nt)); PROTECT(jTout = NEW_INTEGER(Nt)); PROTECT(kTout = NEW_INTEGER(Nt)); PROTECT(out = NEW_LIST(3)); /* that's 3+4=7 protected objects */ ito = INTEGER_POINTER(iTout); jto = INTEGER_POINTER(jTout); kto = INTEGER_POINTER(kTout); /* copy triplet indices to output vectors */ for(m = 0; m < Nt; m++) { ito[m] = it[m]; jto[m] = jt[m]; kto[m] = kt[m]; } /* insert output vectors in output list */ SET_VECTOR_ELT(out, 0, iTout); SET_VECTOR_ELT(out, 1, jTout); SET_VECTOR_ELT(out, 2, kTout); UNPROTECT(7); return(out); } spatstat/src/mhsnoop.c0000644000176200001440000001102013406057617014536 0ustar liggesusers#include #include #include #include "methas.h" #include "mhsnoopdef.h" /* mhsnoop.c $Revision: 1.9 $ $Date: 2018/12/18 02:43:11 $ support for visual debugger in RMH Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ /* To switch on debugging code, insert the line: #define MH_DEBUG YES */ #ifndef MH_DEBUG #define MH_DEBUG NO #endif void initmhsnoop(Snoop *s, SEXP env) { s->active = isEnvironment(env); s->nextstop = 0; /* stop at iteration 0 */ s->nexttype = NO_TYPE; /* deactivated */ if(s->active) { s->env = env; s->expr = findVar(install("callbackexpr"), env); } else { s->env = s->expr = R_NilValue; } } void mhsnoop(Snoop *s, int irep, Algor *algo, State *state, Propo *prop, double numer, double denom, int *itype) { SEXP e; int npts, j; /* passed from C to R before debugger */ SEXP Sirep, Sx, Sy, Sm, Sproptype, Sproplocn, Spropmark, Spropindx; SEXP Snumer, Sdenom, Sitype; double *Px, *Py, *Pproplocn; int *Pm; /* passed from R to C after debugger */ SEXP Sinxt, Stnxt, SitypeUser; #if MH_DEBUG Rprintf("mhsnoop called at iteration %d\n", irep); #endif if(!(s->active)) return; #if MH_DEBUG Rprintf("mhsnoop is active\n"); #endif /* execute when the simulation reaches the next stopping time: a specified iteration number 'nextstop' or a specified proposal type 'nexttype' */ if(irep != s->nextstop && prop->itype != s->nexttype) return; #if MH_DEBUG Rprintf("debug triggered\n"); #endif /* environment for communication with R */ e = s->env; /* copy data to R */ /* copy iteration number */ PROTECT(Sirep = NEW_INTEGER(1)); *(INTEGER_POINTER(Sirep)) = irep; setVar(install("irep"), Sirep, e); UNPROTECT(1); /* copy (x,y) coordinates */ npts = state->npts; PROTECT(Sx = NEW_NUMERIC(npts)); PROTECT(Sy = NEW_NUMERIC(npts)); Px = NUMERIC_POINTER(Sx); Py = NUMERIC_POINTER(Sy); for(j = 0; j < npts; j++) { Px[j] = state->x[j]; Py[j] = state->y[j]; } setVar(install("xcoords"), Sx, e); setVar(install("ycoords"), Sy, e); UNPROTECT(2); /* copy marks */ if(state->ismarked) { PROTECT(Sm = NEW_INTEGER(npts)); Pm = INTEGER_POINTER(Sm); for(j = 0; j < npts; j++) { Pm[j] = state->marks[j]; } setVar(install("mcodes"), Sm, e); UNPROTECT(1); } /* proposal type */ PROTECT(Sproptype = NEW_INTEGER(1)); *(INTEGER_POINTER(Sproptype)) = prop->itype; setVar(install("proptype"), Sproptype, e); UNPROTECT(1); /* proposal coordinates */ PROTECT(Sproplocn = NEW_NUMERIC(2)); Pproplocn = NUMERIC_POINTER(Sproplocn); Pproplocn[0] = prop->u; Pproplocn[1] = prop->v; setVar(install("proplocn"), Sproplocn, e); UNPROTECT(1); /* proposal mark value */ if(state->ismarked) { PROTECT(Spropmark = NEW_INTEGER(1)); *(INTEGER_POINTER(Spropmark)) = prop->mrk; setVar(install("propmark"), Spropmark, e); UNPROTECT(1); } /* proposal point index */ PROTECT(Spropindx = NEW_INTEGER(1)); *(INTEGER_POINTER(Spropindx)) = prop->ix; setVar(install("propindx"), Spropindx, e); UNPROTECT(1); /* Metropolis-Hastings numerator and denominator */ PROTECT(Snumer = NEW_NUMERIC(1)); PROTECT(Sdenom = NEW_NUMERIC(1)); *(NUMERIC_POINTER(Snumer)) = numer; *(NUMERIC_POINTER(Sdenom)) = denom; setVar(install("numerator"), Snumer, e); setVar(install("denominator"), Sdenom, e); UNPROTECT(2); /* tentative outcome of proposal */ PROTECT(Sitype = NEW_INTEGER(1)); *(INTEGER_POINTER(Sitype)) = *itype; setVar(install("itype"), Sitype, e); UNPROTECT(1); /* ..... call visual debugger */ #if MH_DEBUG Rprintf("executing callback\n"); #endif eval(s->expr, s->env); /* update outcome of proposal */ SitypeUser = findVar(install("itype"), e); *itype = *(INTEGER_POINTER(SitypeUser)); #if MH_DEBUG Rprintf("Assigning itype = %d\n", *itype); #endif /* update stopping time */ Sinxt = findVar(install("inxt"), e); s->nextstop = *(INTEGER_POINTER(Sinxt)); Stnxt = findVar(install("tnxt"), e); s->nexttype = *(INTEGER_POINTER(Stnxt)); #if MH_DEBUG if(s->nextstop >= 0) Rprintf("Next stop: iteration %d\n", s->nextstop); if(s->nexttype >= 0) { if(s->nexttype == BIRTH) Rprintf("Next stop: first birth proposal\n"); if(s->nexttype == DEATH) Rprintf("Next stop: first death proposal\n"); if(s->nexttype == SHIFT) Rprintf("Next stop: first shift proposal\n"); } #endif return; } spatstat/src/PerfectHardcore.h0000644000176200001440000001137713406057617016137 0ustar liggesusers // ........................... Hardcore process .......................... // $Revision: 1.4 $ $Date: 2012/03/10 11:23:09 $ class HardcoreProcess : public PointProcess { public: double beta, R, Rsquared; HardcoreProcess(double xmin, double xmax, double ymin, double ymax, double b, double Ri); ~HardcoreProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); }; HardcoreProcess::HardcoreProcess(double xmin, double xmax, double ymin, double ymax, double b, double Ri) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; R = Ri; Rsquared = R * R; InteractionRange = R; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double HardcoreProcess::Interaction(double dsquared) { double rtn; rtn = 1; if(dsquared < Rsquared) rtn = 0; return(rtn); } void HardcoreProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void HardcoreProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating HardcoreProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating HardcoreProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating HardcoreProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } // ........................... Interface to R .......................... extern "C" { SEXP PerfectHardcore(SEXP beta, SEXP r, SEXP xrange, SEXP yrange) { // input parameters double Beta, R, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int StartTime, EndTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(r = AS_NUMERIC(r)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 4 protected objects // extract arguments Beta = *(NUMERIC_POINTER(beta)); R = *(NUMERIC_POINTER(r)); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ R); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ R); if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1; #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise Hardcore point process HardcoreProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax, Beta, R); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); // pack up into output list PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); // return UNPROTECT(8); // 4 arguments plus xout, yout, nout, out return(out); } } spatstat/src/Knone.c0000644000176200001440000000172013406057617014133 0ustar liggesusers#include #include #include /* Knone.c Efficient computation of uncorrected estimates of K for large datasets KnoneI() Estimates K function, returns integer numerator KnoneD() Estimates K function, returns double precision numerator Kwnone() Estimates Kinhom, returns double precision numerator Functions require (x,y) data to be sorted in ascending order of x and expect r values to be equally spaced and starting at zero $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef WEIGHTED #define FNAME KnoneI #define OUTTYPE int #include "Knone.h" #undef FNAME #undef OUTTYPE #define FNAME KnoneD #define OUTTYPE double #include "Knone.h" #undef FNAME #undef OUTTYPE #define FNAME Kwnone #define WEIGHTED #define OUTTYPE double #include "Knone.h" spatstat/src/segdens.c0000644000176200001440000000234713115271120014477 0ustar liggesusers#include #include #include #include /* segdens.c Convolution of segments with Gaussian kernel Adrian Baddeley, 02 dec 2016 Licence: GPL >= 2.0 */ #define DNORM(X, SIG) dnorm((X), (double) 0.0, (SIG), FALSE) #define PNORM(X, SIG) pnorm((X), (double) 0.0, (SIG), TRUE, FALSE) void segdens(sigma, ns, xs, ys, alps, lens, np, xp, yp, z) double *sigma; /* bandwidth */ int *ns; /* number of line segments */ double *xs, *ys, *alps, *lens; /* first endpoint, angle, length */ int *np; /* number of pixels or test locations */ double *xp, *yp; /* pixel coordinates */ double *z; /* result, assumed initially 0 */ { int i, j, Ns, Np; double Sigma; double xsi, ysi, angi, leni, cosi, sini; double dx, dy, u1, u2; Ns = *ns; Np = *np; Sigma = *sigma; for(i = 0; i < Ns; i++) { R_CheckUserInterrupt(); xsi = xs[i]; ysi = ys[i]; angi = alps[i]; leni = lens[i]; cosi = cos(angi); sini = sin(angi); for(j = 0; j < Np; j++) { dx = xp[j] - xsi; dy = yp[j] - ysi; u1 = dx * cosi + dy * sini; u2 = -dx * sini + dy * cosi; z[j] += DNORM(u2, Sigma) * (PNORM(u1, Sigma) - PNORM(u1-leni, Sigma)); } } } spatstat/src/f3.c0000755000176200001440000002477313115271120013371 0ustar liggesusers#include #include #include #include "geom3.h" #include "functable.h" #ifdef DEBUG #define DEBUGMESSAGE(S) Rprintf(S); #else #define DEBUGMESSAGE(S) #endif /* $Revision: 1.4 $ $Date: 2016/10/23 04:24:03 $ 3D distance transform # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009 # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ /* step lengths in distance transform */ #define STEP1 41 #define STEP2 58 #define STEP3 71 /* (41,58,71)/41 is a good rational approximation to (1, sqrt(2), sqrt(3)) */ #define MIN(X,Y) (((X) < (Y)) ? (X) : (Y)) #define MAX(X,Y) (((X) > (Y)) ? (X) : (Y)) typedef struct IntImage { int *data; int Mx, My, Mz; /* dimensions */ int length; } IntImage; typedef struct BinaryImage { unsigned char *data; int Mx, My, Mz; /* dimensions */ int length; } BinaryImage; #define VALUE(I,X,Y,Z) \ ((I).data)[ (Z) * ((I).Mx) * ((I).My) + (Y) * ((I).Mx) + (X) ] void allocBinImage(b, ok) BinaryImage *b; int *ok; { b->length = b->Mx * b->My * b->Mz; b->data = (unsigned char *) R_alloc(b->length, sizeof(unsigned char)); if(b->data == 0) { Rprintf("Can't allocate memory for %d binary voxels\n", b->length); *ok = 0; } *ok = 1; } void allocIntImage(v, ok) IntImage *v; int *ok; { v->length = v->Mx * v->My * v->Mz; v->data = (int *) R_alloc(v->length, sizeof(int)); if(v->data == 0) { Rprintf("Can't allocate memory for %d integer voxels\n", v->length); *ok = 0; } *ok = 1; } void freeBinImage(b) BinaryImage *b; { } void freeIntImage(v) IntImage *v; { } void cts2bin(p, n, box, vside, b, ok) /* convert a list of points inside a box into a 3D binary image */ Point *p; int n; Box *box; double vside; /* side of a (cubic) voxel */ BinaryImage *b; int *ok; { int i, lx, ly, lz; unsigned char *cp; b->Mx = (int) ceil((box->x1 - box->x0)/vside) + 1; b->My = (int) ceil((box->y1 - box->y0)/vside) + 1; b->Mz = (int) ceil((box->z1 - box->z0)/vside) + 1; allocBinImage(b, ok); if(! (*ok)) return; for(i = b->length, cp = b->data; i ; i--, cp++) *cp = 1; for(i=0;ix0)/vside)-1; ly = (int) ceil((p[i].y - box->y0)/vside)-1; lz = (int) ceil((p[i].z - box->z0)/vside)-1; if( lx >= 0 && lx < b->Mx && ly >= 0 && ly < b->My && lz >= 0 && lz < b->Mz ) VALUE((*b),lx,ly,lz) = 0; } } void distrans3(b, v, ok) /* Distance transform in 3D */ BinaryImage *b; /* input */ IntImage *v; /* output */ int *ok; { register int x, y, z; int infinity, q; /* allocate v same size as b */ v->Mx = b->Mx; v->My = b->My; v->Mz = b->Mz; allocIntImage(v, ok); if(! (*ok)) return; /* compute largest possible distance */ infinity = (int) ceil( ((double) STEP3) * sqrt( ((double) b->Mx) * b->Mx + ((double) b->My) * b->My + ((double) b->Mz) * b->Mz)); /* Forward pass: Top to Bottom; Back to Front; Left to Right. */ for(z=0;zMz;z++) { R_CheckUserInterrupt(); for(y=0;yMy;y++) { for(x=0;xMx;x++) { if(VALUE((*b),x,y,z) == 0) VALUE((*v),x,y,z) = 0; else { q = infinity; #define INTERVAL(W, DW, MW) \ ((DW == 0) || (DW == -1 && W > 0) || (DW == 1 && W < MW - 1)) #define BOX(X,Y,Z,DX,DY,DZ) \ (INTERVAL(X,DX,v->Mx) && INTERVAL(Y,DY,v->My) && INTERVAL(Z,DZ,v->Mz)) #define TEST(DX,DY,DZ,DV) \ if(BOX(x,y,z,DX,DY,DZ) && q > VALUE((*v),x+DX,y+DY,z+DZ) + DV) \ q = VALUE((*v),x+DX,y+DY,z+DZ) + DV /* same row */ TEST(-1, 0, 0, STEP1); /* same plane */ TEST(-1,-1, 0, STEP2); TEST( 0,-1, 0, STEP1); TEST( 1,-1, 0, STEP2); /* previous plane */ TEST( 1, 1,-1, STEP3); TEST( 0, 1,-1, STEP2); TEST(-1, 1,-1, STEP3); TEST( 1, 0,-1, STEP2); TEST( 0, 0,-1, STEP1); TEST(-1, 0,-1, STEP2); TEST( 1,-1,-1, STEP3); TEST( 0,-1,-1, STEP2); TEST(-1,-1,-1, STEP3); VALUE((*v),x,y,z) = q; } } } } /* Backward pass: Bottom to Top; Front to Back; Right to Left. */ for(z = b->Mz - 1; z >= 0; z--) { R_CheckUserInterrupt(); for(y = b->My - 1; y >= 0; y--) { for(x = b->Mx - 1; x >= 0; x--) { if((q = VALUE((*v),x,y,z)) != 0) { /* same row */ TEST(1, 0, 0, STEP1); /* same plane */ TEST(-1, 1, 0, STEP2); TEST( 0, 1, 0, STEP1); TEST( 1, 1, 0, STEP2); /* plane below */ TEST( 1, 1, 1, STEP3); TEST( 0, 1, 1, STEP2); TEST(-1, 1, 1, STEP3); TEST( 1, 0, 1, STEP2); TEST( 0, 0, 1, STEP1); TEST(-1, 0, 1, STEP2); TEST( 1,-1, 1, STEP3); TEST( 0,-1, 1, STEP2); TEST(-1,-1, 1, STEP3); VALUE((*v),x,y,z) = q; } } } } } void hist3d(v, vside, count) /* compute histogram of all values in *v using count->n histogram cells ranging from count->t0 to count->t1 and put results in count->num */ IntImage *v; double vside; Itable *count; { register int i, j, k; register int *ip; register double scale, width; /* relationship between distance transform units and physical units */ scale = vside/STEP1; width = (count->t1 - count->t0)/(count->n - 1); for(i = 0; i < count->n ; i++) { (count->num)[i] = 0; (count->denom)[i] = v->length; } for(i = v->length, ip = v->data; i; i--, ip++) { k = (int) ceil((*ip * scale - count->t0)/width); k = MAX(k, 0); for(j = k; j < count->n; j++) (count->num)[j]++; } } void hist3dminus(v, vside, count) /* minus sampling */ IntImage *v; double vside; Itable *count; { register int x, y, z, val, border, bx, by, bz, byz, j, kbord, kval; register double scale, width; DEBUGMESSAGE("inside hist3dminus\n") scale = vside/STEP1; width = (count->t1 - count->t0)/(count->n - 1); /* table is assumed to have been initialised in MakeItable */ for(z = 0; z < v->Mz; z++) { bz = MIN(z + 1, v->Mz - z); for(y = 0; y < v->My; y++) { by = MIN(y + 1, v->My - y); byz = MIN(by, bz); for(x = 0; x < v->Mx; x++) { bx = MIN(x + 1, v->My - x); border = MIN(bx, byz); kbord = (int) floor((vside * border - count->t0)/width); kbord = MIN(kbord, count->n - 1); /* denominator counts all voxels with distance to boundary >= r */ if(kbord >= 0) for(j = 0; j <= kbord; j++) (count->denom)[j]++; val = VALUE((*v), x, y, z); kval = (int) ceil((val * scale - count->t0)/width); kval = MAX(kval, 0); #ifdef DEBUG /* Rprintf("border=%lf\tkbord=%d\tval=%lf\tkval=%d\n", vside * border, kbord, scale * val, kval); */ #endif /* numerator counts all voxels with distance to boundary >= r and distance to nearest point <= r */ if(kval <= kbord) for(j = kval; j <= kbord; j++) (count->num)[j]++; } } } DEBUGMESSAGE("leaving hist3dminus\n") } void hist3dCen(v, vside, count) /* four censoring-related histograms */ IntImage *v; double vside; H4table *count; { register int x, y, z, val, border, bx, by, bz, byz, kbord, kval; register double scale, width, realborder, realval; DEBUGMESSAGE("inside hist3dCen\n") scale = vside/STEP1; width = (count->t1 - count->t0)/(count->n - 1); /* table is assumed to have been initialised in MakeH4table */ for(z = 0; z < v->Mz; z++) { bz = MIN(z + 1, v->Mz - z); for(y = 0; y < v->My; y++) { by = MIN(y + 1, v->My - y); byz = MIN(by, bz); for(x = 0; x < v->Mx; x++) { bx = MIN(x + 1, v->My - x); border = MIN(bx, byz); realborder = vside * border; kbord = (int) floor((realborder - count->t0)/width); val = VALUE((*v), x, y, z); realval = scale * val; kval = (int) ceil((realval - count->t0)/width); /* this could exceed array limits; that will be detected below */ #ifdef DEBUG Rprintf("border=%lf\tkbord=%d\tval=%lf\tkval=%d\n", realborder, kbord, realval, kval); #endif if(realval <= realborder) { /* observation is uncensored; increment all four histograms */ if(kval >= count->n) ++(count->upperobs); else if(kval >= 0) { (count->obs)[kval]++; (count->nco)[kval]++; } if(kbord >= count->n) ++(count->uppercen); else if(kbord >= 0) { (count->cen)[kbord]++; (count->ncc)[kbord]++; } } else { /* observation is censored; increment only two histograms */ kval = MIN(kval, kbord); if(kval >= count->n) ++(count->upperobs); else if(kval >= 0) (count->obs)[kval]++; if(kbord >= count->n) ++(count->uppercen); else if(kbord >= 0) (count->cen)[kbord]++; } } } } DEBUGMESSAGE("leaving hist3dCen\n") } /* CALLING ROUTINES */ void phatminus(p, n, box, vside, count) Point *p; int n; Box *box; double vside; Itable *count; { BinaryImage b; IntImage v; int ok; DEBUGMESSAGE("in phatminus\ncalling cts2bin...") cts2bin(p, n, box, vside, &b, &ok); DEBUGMESSAGE("out of cts2bin\ninto distrans3...") if(ok) distrans3(&b, &v, &ok); if(ok) { freeBinImage(&b); DEBUGMESSAGE("out of distrans3\ninto hist3dminus...") hist3dminus(&v, vside, count); DEBUGMESSAGE("out of hist3dminus\n") freeIntImage(&v); } } void phatnaive(p, n, box, vside, count) Point *p; int n; Box *box; double vside; Itable *count; { BinaryImage b; IntImage v; int ok; DEBUGMESSAGE("in phatnaive\ncalling cts2bin...") cts2bin(p, n, box, vside, &b, &ok); DEBUGMESSAGE("out of cts2bin\n into distrans3...") if(ok) distrans3(&b, &v, &ok); if(ok) { freeBinImage(&b); DEBUGMESSAGE("out of distrans3\ninto hist3d..."); hist3d(&v, vside, count); DEBUGMESSAGE("out of hist3d\n") freeIntImage(&v); } } void p3hat4(p, n, box, vside, count) Point *p; int n; Box *box; double vside; H4table *count; { BinaryImage b; IntImage v; int ok; DEBUGMESSAGE("in phatminus\ncalling cts2bin...") cts2bin(p, n, box, vside, &b, &ok); DEBUGMESSAGE("out of cts2bin\ninto distrans3...") if(ok) distrans3(&b, &v, &ok); if(ok) { freeBinImage(&b); DEBUGMESSAGE("out of distrans3\ninto hist3dminus...") hist3dCen(&v, vside, count); DEBUGMESSAGE("out of hist3dminus\n") freeIntImage(&v); } } spatstat/src/diggra.c0000755000176200001440000000637013115271120014307 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Diggle-Gratton process */ /* Conditional intensity function for a pairwise interaction point process with interaction function as given by e(t) = 0 for t < delta = (t-delta)/(rho-delta)^kappa for delta <= t < rho = 1 for t >= rho (See page 767 of Diggle, Gates, and Stibbard, Biometrika vol. 74, 1987, pages 763 -- 770.) */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Diggra { double kappa; double delta; double rho; double delta2; /* delta^2 */ double rho2; /* rho^2 */ double fac; /* 1/(rho-delta) */ double *period; int per; } Diggra; /* initialiser function */ Cdata *diggrainit(state, model, algo) State state; Model model; Algor algo; { Diggra *diggra; diggra = (Diggra *) R_alloc(1, sizeof(Diggra)); /* Interpret model parameters*/ diggra->kappa = model.ipar[0]; diggra->delta = model.ipar[1]; diggra->rho = model.ipar[2]; diggra->period = model.period; /* constants */ diggra->delta2 = pow(diggra->delta, 2); diggra->rho2 = pow(diggra->rho, 2); diggra->fac = 1/(diggra->rho - diggra->delta); /* periodic boundary conditions? */ diggra->per = (model.period[0] > 0.0); return((Cdata *) diggra); } /* conditional intensity evaluator */ double diggracif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, pairprod, cifval; double rho2, delta, delta2, fac; double *period; DECLARE_CLOSE_D2_VARS; Diggra *diggra; diggra = (Diggra *) cdata; period = diggra->period; rho2 = diggra->rho2; delta = diggra->delta; delta2 = diggra->delta2; fac = diggra->fac; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = pairprod = 1.0; if(npts == 0) return(cifval); ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(diggra->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,rho2,d2)) { if(d2 < delta2) { cifval = 0.0; return(cifval); } else { pairprod *= fac * (sqrt(d2)-delta); } } } } if(ixp1 < npts) { for(j=ixp1; j 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], rho2, d2)) { if(d2 <= delta2) { cifval = 0.0; return(cifval); } else { pairprod *= fac * (sqrt(d2)-delta); } } } } if(ixp1 < npts) { for(j=ixp1; jkappa); return cifval; } Cifns DiggraCifns = { &diggrainit, &diggracif, (updafunptr) NULL, NO}; spatstat/src/uniquemap.h0000644000176200001440000000403113470731536015070 0ustar liggesusers/* uniquemap.h Function definitions to be #included in uniquemap.c several times with different values of macros. !! Assumes points are ordered by increasing x value !! Assumes is included Macros used: FUNNAME name of function QUITANY return TRUE immediately if any duplicates are found ZCOORD if defined, coordinates are 3-dimensional MARKED if defined, points have INTEGER marks (tested for equality) $Revision: 1.6 $ $Date: 2019/05/21 07:30:51 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2019 Licence: GNU Public Licence >= 2 */ #ifdef ZCOORD #define SPACEDIM 3 #else #define SPACEDIM 2 #endif void FUNNAME(int *n, double *x, double *y, #ifdef ZCOORD double *z, #endif #ifdef MARKED int *marks, #endif #ifdef QUITANY int *anydup #else int *uniqmap #endif ) { double xi, yi, dx, dy, d2; #ifdef ZCOORD double zi, dz; #endif #ifdef MARKED int mi; #endif int N, maxchunk, i, j; /* loop in chunks of 2^16 */ N = *n; i = 0; maxchunk = 0; while(i < N) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > N) maxchunk = N; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; #ifdef ZCOORD zi = z[i]; #endif #ifdef MARKED mi = marks[i]; #endif if(i + 1 < N) { #ifndef QUITANY if(uniqmap[i] == 0) { /* i.e. don't seek duplicates of a duplicate */ #endif /* scan forward */ for(j = i + 1; j < N; j++) { dx = x[j] - xi; if(dx > DOUBLE_EPS) break; dy = y[j] - yi; d2 = dx * dx + dy * dy; #ifdef ZCOORD if(d2 <= 0.0) { dz = z[j] - zi; d2 = d2 + dz * dz; #endif if(d2 <= 0.0) { #ifdef MARKED if(marks[j] == mi) { #endif /* j is a duplicate of i */ #ifdef QUITANY *anydup = 1; /* i.e. TRUE */ return; #else uniqmap[j] = i + 1; /* R indexing */ #endif #ifdef MARKED } #endif } #ifdef ZCOORD } #endif } #ifndef QUITANY } #endif } } } } spatstat/src/loccum.c0000644000176200001440000000304213406057617014342 0ustar liggesusers#include #include #include #include "chunkloop.h" /* loccum.c $Revision: 1.2 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Compute local cumulative sums or products of weights locsum: f_i(t) = \sum_{j: j \neq i, ||x_j - x_i|| \le t} v(x_j) for a data point pattern {x_i} locxsum: f_u(t) = \sum_{||x_i - u|| \le t} v(x_i) for a grid of points {u} and a data point pattern {x_i} (also works if {u} is another point pattern) locprod: f_i(t) = \prod_{j: j \neq i, ||x_j - x_i|| \le t} v(x_j) for a data point pattern {x_i} locxprod: f_u(t) = \prod_{||x_i - u|| \le t} v(x_i) for a grid of points {u} and a data point pattern {x_i} (also works if {u} is another point pattern) Assumes point patterns are sorted in increasing order of x coordinate Uses C code template files : loccums.h, loccumx.h */ /* data-to-data */ #undef FNAME #undef NULVAL #undef INC #define FNAME locsum #define NULVAL 0.0 #define INC(A,B) A += B #include "loccums.h" #undef FNAME #undef NULVAL #undef INC #define FNAME locprod #define NULVAL 1.0 #define INC(A,B) A *= B #include "loccums.h" /* test-grid-to-data */ #undef FNAME #undef NULVAL #undef INC #define FNAME locxsum #define NULVAL 0.0 #define INC(A,B) A += B #include "loccumx.h" #undef FNAME #undef NULVAL #undef INC #define FNAME locxprod #define NULVAL 1.0 #define INC(A,B) A *= B #include "loccumx.h" spatstat/src/nn3Ddist.c0000755000176200001440000001750413406057617014561 0ustar liggesusers/* nn3Ddist.c Nearest Neighbour Distances in 3D $Revision: 1.12 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 THE FOLLOWING FUNCTIONS ASSUME THAT z IS SORTED IN ASCENDING ORDER nnd3D Nearest neighbour distances nnw3D Nearest neighbours (id) nndw3D Nearest neighbours (id) and distances nnXdw3D Nearest neighbour from one list to another nnXEdw3D Nearest neighbour from one list to another, with overlaps knnd3D k-th nearest neighbour distances knnw3D k-th nearest neighbours (id) knndw3D k-th nearest neighbours (id) and distances */ #undef SPATSTAT_DEBUG #include #include #include #include "chunkloop.h" #include "yesno.h" double sqrt(); /* .......... Single point pattern ...............................*/ #undef FNAME #undef DIST #undef WHICH /* nnd3D: returns nn distance */ #define FNAME nnd3D #define DIST #include "nn3Ddist.h" #undef FNAME #undef DIST #undef WHICH /* nnw3D: returns id of nearest neighbour */ #define FNAME nnw3D #define WHICH #include "nn3Ddist.h" #undef FNAME #undef DIST #undef WHICH /* nndw3D: returns nn distance .and. id of nearest neighbour */ #define FNAME nndw3D #define DIST #define WHICH #include "nn3Ddist.h" #undef FNAME #undef DIST #undef WHICH /* .......... Two point patterns ...............................*/ /* common interface */ void nnX3Dinterface(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, exclude, wantdist, wantwhich, nnd, nnwhich, huge) /* inputs */ int *n1, *n2, *id1, *id2; double *x1, *y1, *z1, *x2, *y2, *z2, *huge; /* options */ int *exclude, *wantdist, *wantwhich; /* outputs */ double *nnd; int *nnwhich; { void nnXdw3D(), nnXd3D(), nnXw3D(); void nnXEdw3D(), nnXEd3D(), nnXEw3D(); int ex, di, wh; ex = (*exclude != 0); di = (*wantdist != 0); wh = (*wantwhich != 0); if(!ex) { if(di && wh) { nnXdw3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge); } else if(di) { nnXd3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge); } else if(wh) { nnXw3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge); } } else { if(di && wh) { nnXEdw3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge); } else if(di) { nnXEd3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge); } else if(wh) { nnXEw3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge); } } } /* nnXdw3D: for TWO point patterns X and Y, find the nearest neighbour (from each point of X to the nearest point of Y) returning both the distance and the identifier Requires both patterns to be sorted in order of increasing z coord */ #define FNAME nnXdw3D #define DIST #define WHICH #undef EXCLUDE #include "nn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* nnXd3D: returns distance only */ #define FNAME nnXd3D #define DIST #undef EXCLUDE #include "nn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* nnXw3D: returns identifier only */ #define FNAME nnXw3D #define WHICH #undef EXCLUDE #include "nn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* .......... Two point patterns with exclusion ........................*/ /* nnXEdw3D: similar to nnXdw3D but allows X and Y to include common points (which are not to be counted as neighbours) Code numbers id1, id2 are attached to the patterns X and Y respectively, such that x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j]. Requires both patterns to be sorted in order of increasing z coord */ #define FNAME nnXEdw3D #define DIST #define WHICH #define EXCLUDE #include "nn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* nnXEd3D: returns distances only */ #define FNAME nnXEd3D #define DIST #define EXCLUDE #include "nn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* nnXEw3D: returns identifiers only */ #define FNAME nnXEw3D #define WHICH #define EXCLUDE #include "nn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* .......... Single point pattern ...............................*/ /* .......... k-th nearest neighbours ...............................*/ /* knnd3D nearest neighbour distances 1:kmax */ #define FNAME knnd3D #define DIST #include "knn3Ddist.h" #undef FNAME #undef DIST #undef WHICH /* knnw3D nearest neighbour indices 1:kmax */ #define FNAME knnw3D #define WHICH #include "knn3Ddist.h" #undef FNAME #undef DIST #undef WHICH /* knndw3D nearest neighbours 1:kmax returns distances and indices */ #define FNAME knndw3D #define DIST #define WHICH #include "knn3Ddist.h" #undef FNAME #undef DIST #undef WHICH /* .......... Two point patterns ...............................*/ /* .......... k-th nearest neighbours ...............................*/ /* general interface */ void knnX3Dinterface(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, kmax, exclude, wantdist, wantwhich, nnd, nnwhich, huge) /* inputs */ int *n1, *n2; double *x1, *y1, *z1, *x2, *y2, *z2, *huge; int *id1, *id2; int *kmax; /* options */ int *exclude, *wantdist, *wantwhich; /* output matrices (npoints * kmax) in ROW MAJOR order */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { void knnXdw3D(), knnXd3D(), knnXw3D(); void knnXEdw3D(), knnXEd3D(), knnXEw3D(); int ex, di, wh; ex = (*exclude != 0); di = (*wantdist != 0); wh = (*wantwhich != 0); if(!ex) { if(di && wh) { knnXdw3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge); } else if(di) { knnXd3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge); } else if(wh) { knnXw3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge); } } else { if(di && wh) { knnXEdw3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge); } else if(di) { knnXEd3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge); } else if(wh) { knnXEw3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge); } } } #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* knnXdw3D nearest neighbours 1:kmax between two point patterns returns distances and indices */ #define FNAME knnXdw3D #define DIST #define WHICH #include "knn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* knnXd3D nearest neighbours 1:kmax between two point patterns returns distances */ #define FNAME knnXd3D #define DIST #include "knn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* knnXw3D nearest neighbours 1:kmax between two point patterns returns indices */ #define FNAME knnXw3D #define WHICH #include "knn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* .......... Two point patterns with exclusion ..........................*/ /* .......... k-th nearest neighbours ...............................*/ /* knnXEdw3D nearest neighbours 1:kmax between two point patterns with exclusion returns distances and indices */ #define FNAME knnXEdw3D #define DIST #define WHICH #define EXCLUDE #include "knn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* knnXEd3D nearest neighbours 1:kmax between two point patterns with exclusion returns distances */ #define FNAME knnXEd3D #define DIST #define EXCLUDE #include "knn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* knnXEw3D nearest neighbours 1:kmax between two point patterns with exclusion returns indices */ #define FNAME knnXEw3D #define WHICH #define EXCLUDE #include "knn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE spatstat/src/geyer.c0000755000176200001440000002363213115271120014165 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" void fexitc(const char *msg); #undef MH_DEBUG /* Conditional intensity function for a Geyer saturation process. */ typedef struct Geyer { /* model parameters */ double gamma; double r; double s; /* transformations of the parameters */ double r2; double loggamma; int hard; /* periodic distance */ double *period; int per; /* auxiliary counts */ int *aux; #ifdef MH_DEBUG int *freshaux; int prevtype; #endif } Geyer; Cdata *geyerinit(state, model, algo) State state; Model model; Algor algo; { int i, j, n1; Geyer *geyer; double r2; double *period; DECLARE_CLOSE_VARS; geyer = (Geyer *) R_alloc(1, sizeof(Geyer)); /* Interpret model parameters*/ geyer->gamma = model.ipar[0]; geyer->r = model.ipar[1]; /* not squared any more */ geyer->s = model.ipar[2]; geyer->r2 = geyer->r * geyer->r; #ifdef MHDEBUG Rprintf("Initialising Geyer gamma=%lf, r=%lf, sat=%lf\n", geyer->gamma, geyer->r, geyer->s); #endif /* is the model numerically equivalent to hard core ? */ geyer->hard = (geyer->gamma < DOUBLE_EPS); geyer->loggamma = (geyer->hard) ? 0 : log(geyer->gamma); /* periodic boundary conditions? */ geyer->period = model.period; geyer->per = (model.period[0] > 0.0); /* allocate storage for auxiliary counts */ geyer->aux = (int *) R_alloc((size_t) state.npmax, sizeof(int)); #ifdef MH_DEBUG geyer->freshaux = (int *) R_alloc((size_t) state.npmax, sizeof(int)); geyer->prevtype = -42; #endif r2 = geyer->r2; /* Initialise auxiliary counts */ for(i = 0; i < state.npmax; i++) geyer->aux[i] = 0; if(geyer->per) { /* periodic */ period = geyer->period; if(state.npts > 1) { n1 = state.npts - 1; for(i = 0; i < n1; i++) { for(j = i+1; j < state.npts; j++) { if(CLOSE_PERIODIC(state.x[i], state.y[i], state.x[j], state.y[j], period, r2)) { geyer->aux[i] += 1; geyer->aux[j] += 1; } } } } } else { /* Euclidean distance */ if(state.npts > 1) { n1 = state.npts - 1; for(i = 0; i < n1; i++) { for(j = i+1; j < state.npts; j++) { if(CLOSE(state.x[i], state.y[i], state.x[j], state.y[j], r2)) { geyer->aux[i] += 1; geyer->aux[j] += 1; } } } } } return((Cdata *) geyer); } double geyercif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int ix, j, npts, tee; double u, v, r2, s; double w, a, b, f, cifval; double *x, *y; int *aux; double *period; Geyer *geyer; DECLARE_CLOSE_VARS; geyer = (Geyer *) cdata; npts = state.npts; if(npts==0) return ((double) 1.0); x = state.x; y = state.y; u = prop.u; v = prop.v; ix = prop.ix; r2 = geyer->r2; s = geyer->s; period = geyer->period; aux = geyer->aux; /* tee = neighbour count at the point in question; w = sum of changes in (saturated) neighbour counts at other points */ tee = w = 0.0; if(prop.itype == BIRTH) { if(geyer->per) { /* periodic distance */ for(j=0; j 1) /* j is not saturated after addition of (u,v) */ w = w + 1; /* addition of (u,v) increases count by 1 */ else if(f > 0) /* j becomes saturated by addition of (u,v) */ w = w + f; } } } else { /* Euclidean distance */ for(j=0; j 1) /* j is not saturated after addition of (u,v) */ w = w + 1; /* addition of (u,v) increases count by 1 */ else if(f > 0) /* j becomes saturated by addition of (u,v) */ w = w + f; } } } } else if(prop.itype == DEATH) { tee = aux[ix]; if(geyer->per) { /* Periodic distance */ for(j=0; j 0) /* j is not saturated */ w = w + 1; /* deletion of 'ix' decreases count by 1 */ else { f = f+1; if(f > 0) { /* j is not saturated after deletion of 'ix' (s must be fractional) */ w = w + f; } } } } } else { /* Euclidean distance */ for(j=0; j 0) /* j was not saturated */ w = w + 1; /* deletion of 'ix' decreases count by 1 */ else { f = f+1; if(f > 0) { /* j is not saturated after deletion of 'ix' (s must be fractional) */ w = w + f; } } } } } } else if(prop.itype == SHIFT) { /* Compute the cif at the new point, not the ratio of new/old */ if(geyer->per) { /* Periodic distance */ for(j=0; j= b) w = w + 1; } } } else { /* Euclidean distance */ for(j=0; j= b) w = w + 1; } } } } w = w + ((tee < s) ? tee : s); if(geyer->hard) { if(tee > 0) cifval = 0.0; else cifval = 1.0; } else cifval = exp(geyer->loggamma*w); return cifval; } void geyerupd(state, prop, cdata) State state; Propo prop; Cdata *cdata; { /* Declare other variables */ int ix, npts, j; int oldclose, newclose; double u, v, xix, yix, r2; double *x, *y; int *aux; double *period; Geyer *geyer; #ifdef MH_DEBUG int *freshaux; int i; int oc, nc; #endif DECLARE_CLOSE_VARS; geyer = (Geyer *) cdata; period = geyer->period; aux = geyer->aux; r2 = geyer->r2; x = state.x; y = state.y; npts = state.npts; #ifdef MH_DEBUG /* ........................ debugging cross-check ................ */ /* recompute 'aux' values afresh */ freshaux = geyer->freshaux; for(i = 0; i < state.npts; i++) freshaux[i] = 0; if(geyer->per) { /* periodic */ for(i = 0; i < state.npts; i++) { for(j = 0; j < state.npts; j++) { if(i == j) continue; if(CLOSE_PERIODIC(state.x[i], state.y[i], state.x[j], state.y[j], period, r2)) freshaux[i] += 1; } } } else { /* Euclidean distance */ for(i = 0; i < state.npts; i++) { for(j = 0; j < state.npts; j++) { if(i == j) continue; if(CLOSE(state.x[i], state.y[i], state.x[j], state.y[j], r2)) freshaux[i] += 1; } } } /* Check agreement with 'aux' */ for(j = 0; j < state.npts; j++) { if(aux[j] != freshaux[j]) { Rprintf("\n\taux[%d] = %d, freshaux[%d] = %d\n", j, aux[j], j, freshaux[j]); Rprintf("\tnpts = %d\n", state.npts); Rprintf("\tperiod = (%lf, %lf)\n", period[0], period[1]); if(geyer->prevtype == BIRTH) error("updaux failed after BIRTH"); if(geyer->prevtype == DEATH) error("updaux failed after DEATH"); if(geyer->prevtype == SHIFT) error("updaux failed after SHIFT"); error("updaux failed at start"); } } /* OK. Record type of this transition */ geyer->prevtype = prop.itype; /* ................ end debug cross-check ................ */ #endif if(prop.itype == BIRTH) { /* Birth */ u = prop.u; v = prop.v; /* initialise auxiliary counter for new point */ aux[npts] = 0; /* update all auxiliary counters */ if(geyer->per) { /* periodic distance */ for(j=0; j < npts; j++) { if(CLOSE_PERIODIC(u,v,x[j],y[j],period,r2)) { aux[j] += 1; aux[npts] += 1; } } } else { /* Euclidean distance */ for(j=0; j < npts; j++) { if(CLOSE(u,v,x[j],y[j],r2)) { aux[j] += 1; aux[npts] += 1; } } } } else if(prop.itype == DEATH) { /* Death */ ix = prop.ix; u = x[ix]; v = y[ix]; /* decrement auxiliary counter for each point */ if(geyer->per) { /* periodic distance */ for(j=0; j= ix) aux[j-1] = aux[j]; } } else { /* Euclidean distance */ for(j=0; j= ix) aux[j-1] = aux[j]; } } } else if(prop.itype == SHIFT) { /* Shift */ u = prop.u; v = prop.v; ix = prop.ix; xix = x[ix]; yix = y[ix]; /* recompute auxiliary counter for point 'ix' */ aux[ix] = 0; /* update auxiliary counters for other points */ if(geyer->per) { for(j=0; j #include #include "methas.h" #include "dist2.h" /* for debugging code, include #define DEBUG 1 */ /* Conditional intensity computation for Multitype Strauss hardcore process */ /* NOTE: types (marks) are numbered from 0 to ntypes-1 */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct MultiStraussHard { int ntypes; double *gamma; /* gamma[i,j] = gamma[i+ntypes*j] for i,j = 0... ntypes-1 */ double *rad; /* rad[i,j] = rad[j+ntypes*i] for i,j = 0... ntypes-1 */ double *hc; /* hc[i,j] = hc[j+ntypes*i] for i,j = 0... ntypes-1 */ double *rad2; /* squared radii */ double *hc2; /* squared radii */ double *rad2hc2; /* r^2 - h^2 */ double range2; /* square of interaction range */ double *loggamma; /* logs of gamma[i,j] */ double *period; int *hard; /* hard[i,j] = 1 if gamma[i,j] ~~ 0 */ int *kount; /* space for kounting pairs of each type */ int per; } MultiStraussHard; /* initialiser function */ Cdata *straushminit(state, model, algo) State state; Model model; Algor algo; { int i, j, ntypes, n2, hard; double g, r, h, r2, h2, logg, range2; MultiStraussHard *multistrausshard; multistrausshard = (MultiStraussHard *) R_alloc(1, sizeof(MultiStraussHard)); multistrausshard->ntypes = ntypes = model.ntypes; n2 = ntypes * ntypes; #ifdef DEBUG Rprintf("initialising space for %d types\n", ntypes); #endif /* Allocate space for parameters */ multistrausshard->gamma = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->rad = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->hc = (double *) R_alloc((size_t) n2, sizeof(double)); /* Allocate space for transformed parameters */ multistrausshard->rad2 = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->hc2 = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->rad2hc2 = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->loggamma = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->hard = (int *) R_alloc((size_t) n2, sizeof(int)); /* Allocate scratch space for counts of each pair of types */ multistrausshard->kount = (int *) R_alloc((size_t) n2, sizeof(int)); /* Copy and process model parameters*/ /* ipar will contain n^2 values of gamma, then n^2 values of r, then n^2 values of h */ range2 = 0.0; for(i = 0; i < ntypes; i++) { for(j = 0; j < ntypes; j++) { g = model.ipar[ i + j*ntypes]; r = model.ipar[ n2 + i + j*ntypes]; h = model.ipar[2*n2 + i + j*ntypes]; r2 = r * r; h2 = h * h; hard = (g < DOUBLE_EPS); logg = (hard) ? 0 : log(g); MAT(multistrausshard->gamma, i, j, ntypes) = g; MAT(multistrausshard->rad, i, j, ntypes) = r; MAT(multistrausshard->hc, i, j, ntypes) = h; MAT(multistrausshard->rad2, i, j, ntypes) = r2; MAT(multistrausshard->hc2, i, j, ntypes) = h2; MAT(multistrausshard->rad2hc2, i, j, ntypes) = r2-h2; MAT(multistrausshard->hard, i, j, ntypes) = hard; MAT(multistrausshard->loggamma, i, j, ntypes) = logg; if(r2 > range2) range2 = r2; } } multistrausshard->range2 = range2; /* periodic boundary conditions? */ multistrausshard->period = model.period; multistrausshard->per = (model.period[0] > 0.0); #ifdef DEBUG Rprintf("end initialiser\n"); #endif return((Cdata *) multistrausshard); } /* conditional intensity evaluator */ double straushmcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ntypes, kount, ix, ixp1, j, mrk, mrkj, m1, m2; int *marks; double *x, *y; double u, v, lg; double d2, cifval; double range2; double *period; MultiStraussHard *multistrausshard; DECLARE_CLOSE_D2_VARS; multistrausshard = (MultiStraussHard *) cdata; range2 = multistrausshard->range2; period = multistrausshard->period; u = prop.u; v = prop.v; mrk = prop.mrk; ix = prop.ix; x = state.x; y = state.y; marks = state.marks; npts = state.npts; #ifdef DEBUG Rprintf("computing cif: u=%lf, v=%lf, mrk=%d\n", u, v, mrk); #endif cifval = 1.0; if(npts == 0) return(cifval); ntypes = multistrausshard->ntypes; #ifdef DEBUG Rprintf("initialising pair counts\n"); #endif /* initialise pair counts */ for(m1 = 0; m1 < ntypes; m1++) for(m2 = 0; m2 < ntypes; m2++) MAT(multistrausshard->kount, m1, m2, ntypes) = 0; /* compile pair counts */ #ifdef DEBUG Rprintf("compiling pair counts\n"); #endif ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(multistrausshard->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,range2,d2)) { mrkj = marks[j]; if(d2 < MAT(multistrausshard->rad2, mrk, mrkj, ntypes)) { if(d2 < MAT(multistrausshard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } MAT(multistrausshard->kount, mrk, mrkj, ntypes)++; } } } } if(ixp1 < npts) { for(j=ixp1; jrad2, mrk, mrkj, ntypes)) { if(d2 < MAT(multistrausshard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } MAT(multistrausshard->kount, mrk, mrkj, ntypes)++; } } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], range2, d2)) { mrkj = marks[j]; if(d2 < MAT(multistrausshard->rad2, mrk, mrkj, ntypes)) { if(d2 < MAT(multistrausshard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } MAT(multistrausshard->kount, mrk, mrkj, ntypes)++; } } } } if(ixp1 < npts) { for(j=ixp1; jrad2, mrk, mrkj, ntypes)) { if(d2 < MAT(multistrausshard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } MAT(multistrausshard->kount, mrk, mrkj, ntypes)++; } } } } } #ifdef DEBUG Rprintf("multiplying cif factors\n"); #endif /* multiply cif value by pair potential */ for(m1 = 0; m1 < ntypes; m1++) { for(m2 = 0; m2 < ntypes; m2++) { kount = MAT(multistrausshard->kount, m1, m2, ntypes); if(MAT(multistrausshard->hard, m1, m2, ntypes)) { if(kount > 0) { cifval = 0.0; return(cifval); } } else { lg = MAT(multistrausshard->loggamma, m1, m2, ntypes); cifval *= exp(lg * kount); } } } #ifdef DEBUG Rprintf("returning positive cif\n"); #endif return cifval; } Cifns MultiStraussHardCifns = { &straushminit, &straushmcif, (updafunptr) NULL, YES}; spatstat/src/pairloop.h0000644000176200001440000000344713406057617014723 0ustar liggesusers/* pairloop.h Generic code template for loop collecting contributions to point x_i from all points x_j such that ||x_i - x_j|| <= r cpp variables used: INITIAL_I code executed at start of 'i' loop CONTRIBUTE_IJ code executed to compute contribution from j to i COMMIT_I code executed to save total contribution to i C variables used: int i, j, n, maxchunk; double xi, yi, dx, dy, dx2, d2, r2max; double *x, *y; $Revision: 1.5 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #ifndef CHUNKLOOP_H #include "chunkloop.h" #endif #define PAIRLOOP(INITIAL_I, CONTRIBUTE_IJ, COMMIT_I) \ OUTERCHUNKLOOP(i, n, maxchunk, 65536) { \ R_CheckUserInterrupt(); \ INNERCHUNKLOOP(i, n, maxchunk, 65536) { \ \ xi = x[i]; \ yi = y[i]; \ \ INITIAL_I; \ \ if(i > 0) { \ for(j=i-1; j >= 0; j--) { \ dx = x[j] - xi; \ dx2 = dx * dx; \ if(dx2 > r2max) \ break; \ dy = y[j] - yi; \ d2 = dx2 + dy * dy; \ if(d2 <= r2max) { \ CONTRIBUTE_IJ; \ } \ } \ } \ \ if(i+1 < n) { \ for(j=i+1; j < n; j++) { \ dx = x[j] - xi; \ dx2 = dx * dx; \ if(dx2 > r2max) \ break; \ dy = y[j] - yi; \ d2 = dx2 + dy * dy; \ if(d2 <= r2max) { \ CONTRIBUTE_IJ; \ } \ } \ } \ COMMIT_I; \ } \ } spatstat/src/proto.h0000644000176200001440000005265613623714551014245 0ustar liggesusers#include #include /* Prototype declarations for all native routines in spatstat package Automatically generated - do not edit! */ /* Functions invoked by .C */ void areadifs(double *, int *, double *, double *, int *, int *, double *); void areaBdif(double *, int *, double *, double *, int *, int *, double *, double *, double *, double *, double *); void delta2area(double *, double *, double *, double *, int *, double *, double *, double *, double *, int *); void delta2area(double *, double *, double *, double *, int *, double *, double *, double *, double *, int *); void digberJ(double *, double *, int *, int *, int *, double *); void xysegint(int *, double *, double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *); void Fclosepairs(int *, double *, double *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *, double *, double *, int *); void paircount(int *, double *, double *, double *, int *); void Fclosepairs(int *, double *, double *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *, double *, double *, int *); void crosscount(int *, double *, double *, int *, double *, double *, double *, int *); void Fcrosspairs(int *, double *, double *, int *, double *, double *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *, double *, double *, int *); void cocoImage(int *, int *, int *); void cocoGraph(int *, int *, int *, int *, int *, int *); void lincrossdist(int *, double *, double *, int *, double *, double *, int *, double *, double *, double *, int *, int *, double *, int *, int *, double *); void trigrafS(int *, int *, int *, int *, int *, int *, int *, int *, int *, int *); void trigraf(int *, int *, int *, int *, int *, int *, int *, int *, int *, int *); void Idist2dpath(int *, int *, int *, int *, int *, int *, int *); void Gdenspt(int *, double *, double *, double *, double *); void Gwtdenspt(int *, double *, double *, double *, double *, double *); void Gwtdenspt(int *, double *, double *, double *, double *, double *); void denspt(int *, double *, double *, double *, double *, double *); void wtdenspt(int *, double *, double *, double *, double *, double *, double *); void wtdenspt(int *, double *, double *, double *, double *, double *, double *); void adenspt(int *, double *, double *, double *, double *, double *, double *); void awtdenspt(int *, double *, double *, double *, double *, double *, double *, double *); void awtdenspt(int *, double *, double *, double *, double *, double *, double *, double *); void crdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *); void wtcrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void wtcrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void acrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void awtcrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *); void awtcrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *); void segdens(double *, int *, double *, double *, double *, double *, int *, double *, double *, double *); void Ediggra(int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, double *); void ESdiggra(int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, double *, int *); void Ediggatsti(int *, double *, double *, int *, int *, double *, double *, int *, double *, double *); void discareapoly(int *, double *, double *, int *, double *, int *, double *, double *, double *, double *, double *, double *); void Ddist2dpath(int *, double *, int *, double *, double *, int *, int *); void D3pairdist(int *, double *, double *, double *, int *, double *); void D3pairPdist(int *, double *, double *, double *, double *, double *, double *, int *, double *); void nnd3D(int *, double *, double *, double *, double *, int *, double *); void knnd3D(int *, int *, double *, double *, double *, double *, int *, double *); void nnw3D(int *, double *, double *, double *, double *, int *, double *); void knnw3D(int *, int *, double *, double *, double *, double *, int *, double *); void D3crossdist(int *, double *, double *, double *, int *, double *, double *, double *, int *, double *); void D3crossPdist(int *, double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, int *, double *); void Cpairdist(int *, double *, double *, int *, double *); void CpairPdist(int *, double *, double *, double *, double *, int *, double *); void Ccrossdist(int *, double *, double *, int *, double *, double *, int *, double *); void CcrossPdist(int *, double *, double *, int *, double *, double *, double *, double *, int *, double *); void nndMD(int *, int *, double *, double *, double *); void knndMD(int *, int *, int *, double *, double *, double *); void nnwMD(int *, int *, double *, double *, int *, double *); void knnwMD(int *, int *, int *, double *, double *, int *, double *); void nnXwMD(int *, int *, double *, int *, double *, double *, int *, double *); void nnXxMD(int *, int *, double *, int *, int *, double *, int *, double *, int *, double *); void knnXwMD(int *, int *, double *, int *, double *, int *, double *, int *, double *); void knnXxMD(int *, int *, double *, int *, int *, double *, int *, int *, double *, int *, double *); void distmapbin(double *, double *, double *, double *, int *, int *, int *, double *, double *); void ripleybox(int *, double *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void ripleypoly(int *, double *, double *, double *, int *, double *, int *, double *, double *, double *, double *, double *); void rippolDebug(int *, double *, double *, double *, int *, double *, int *, double *, double *, double *, double *, double *); void tabsumweight(int *, double *, double *, int *, double *, double *); void exact_dt_R(double *, double *, int *, double *, double *, double *, double *, int *, int *, int *, int *, double *, int *, double *); void ps_exact_dt_R(double *, double *, double *, double *, int *, int *, int *, int *, int *, double *, int *, int *, double *); void fardist2grid(int *, double *, double *, int *, double *, double *, int *, double *, double *, double *); void fardistgrid(int *, double *, double *, int *, double *, double *, int *, double *, double *, double *); void RcallK3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, int *, double *, double *, double *, int *); void RcallG3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, int *, double *, double *, double *, int *); void RcallF3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, int *, int *, int *); void RcallF3cen(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, int *, int *, int *, int *, int *, int *); void RcallG3cen(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, int *, int *, int *, int *, int *, int *, int *); void Rcallpcf3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, int *, double *, double *, double *, int *, double *); void RcallF3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, int *, int *, int *); void locxprod(int *, double *, double *, int *, double *, double *, double *, int *, double *, double *); void Efiksel(int *, double *, double *, int *, double *, double *, double *, double *, double *); void Egeyer(int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, double *); void hasXclose(int *, double *, double *, double *, int *); void hasXpclose(int *, double *, double *, double *, double *, int *); void hasXYclose(int *, double *, double *, int *, double *, double *, double *, int *); void hasXYpclose(int *, double *, double *, int *, double *, double *, double *, double *, int *); void hasX3close(int *, double *, double *, double *, double *, int *); void hasX3pclose(int *, double *, double *, double *, double *, double *, int *); void hasXY3close(int *, double *, double *, double *, int *, double *, double *, double *, double *, int *); void hasXY3pclose(int *, double *, double *, double *, int *, double *, double *, double *, double *, double *, int *); void Cidw(double *, double *, double *, int *, double *, double *, int *, double *, double *, int *, double *, double *, double *, double *); void Cidw2(double *, double *, double *, int *, double *, double *, int *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void idwloo(double *, double *, double *, int *, double *, double *, double *, double *); void idwloo2(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void nearestvalidpixel(int *, double *, double *, int *, int *, double *, int *, int *, int *, int *); void locprod(int *, double *, double *, double *, int *, double *, double *); void locxprod(int *, double *, double *, int *, double *, double *, double *, int *, double *, double *); void KborderI(int *, double *, double *, double *, int *, double *, int *, int *); void KborderD(int *, double *, double *, double *, int *, double *, double *, double *); void Kwborder(int *, double *, double *, double *, double *, int *, double *, double *, double *); void KnoneI(int *, double *, double *, int *, double *, int *); void KnoneD(int *, double *, double *, int *, double *, double *); void Kwnone(int *, double *, double *, double *, int *, double *, double *); void KrectWtd(double *, double *, int *, double *, double *, double *, int *, double *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *); void KrectInt(double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, int *, double *, double *, int *, int *, int *); void KrectDbl(double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *); void Csumouter(double *, int *, int *, double *); void Cwsumouter(double *, int *, int *, double *, double *); void Csum2outer(double *, double *, int *, int *, int *, double *); void Cwsum2outer(double *, double *, int *, int *, int *, double *, double *); void Cquadform(double *, int *, int *, double *, double *); void Cbiform(double *, double *, int *, int *, double *, double *); void Csumsymouter(double *, int *, int *, double *); void Cwsumsymouter(double *, double *, int *, int *, double *); void Ccountends(int *, double *, int *, double *, int *, double *, double *, int *, int *, int *, double *, double *, double *, int *); void Clinequad(int *, int *, int *, int *, double *, double *, double *, int *, int *, double *, double *, int *, double *, double *, int *, double *, double *, int *); void ClineRquad(int *, int *, int *, int *, double *, double *, double *, int *, int *, double *, double *, int *, double *, double *, int *, double *, double *, int *); void ClineMquad(int *, int *, int *, int *, double *, double *, double *, int *, int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, int *, int *, double *, double *, int *); void ClineRMquad(int *, int *, int *, int *, double *, double *, double *, int *, int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, int *, int *, double *, double *, int *); void linearradius(int *, int *, int *, double *, int *, double *, double *, double *); void cocoGraph(int *, int *, int *, int *, int *, int *); void cocoGraph(int *, int *, int *, int *, int *, int *); void lintileindex(int *, int *, double *, int *, int *, double *, double *, int *, int *); void Clixellate(int *, int *, int *, int *, int *, int *, double *, double *, int *, double *, int *, int *, int *, double *, int *, double *); void locpcfx(int *, double *, double *, int *, int *, double *, double *, int *, int *, double *, double *, double *); void locWpcfx(int *, double *, double *, int *, int *, double *, double *, int *, double *, int *, double *, double *, double *); void cocoGraph(int *, int *, int *, int *, int *, int *); void mdtPOrect(double *, double *, double *, double *, int *, int *, int *, int *, int *, double *, int *, double *, int *, int *); void mdtPOrect(double *, double *, double *, double *, int *, int *, int *, int *, int *, double *, int *, double *, int *, int *); void minPnnd2(int *, double *, double *, double *, double *); void minnnd2(int *, double *, double *, double *, double *); void maxPnnd2(int *, double *, double *, double *, double *); void maxnnd2(int *, double *, double *, double *, double *); void nnX3Dinterface(int *, double *, double *, double *, int *, int *, double *, double *, double *, int *, int *, int *, int *, double *, int *, double *); void knnX3Dinterface(int *, double *, double *, double *, int *, int *, double *, double *, double *, int *, int *, int *, int *, int *, double *, int *, double *); void nnXinterface(int *, double *, double *, int *, int *, double *, double *, int *, int *, int *, int *, double *, int *, double *); void knnXinterface(int *, double *, double *, int *, int *, double *, double *, int *, int *, int *, int *, int *, double *, int *, double *); void linnndist(int *, double *, double *, int *, double *, double *, int *, int *, int *, double *, int *, double *, double *); void linknnd(int *, int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *); void linnnwhich(int *, double *, double *, int *, double *, double *, int *, int *, int *, double *, int *, double *, double *, int *); void linknnd(int *, int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *); void linknncross(int *, int *, int *, double *, int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *); void linSnndwhich(int *, int *, double *, int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *); void linndcross(int *, double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, double *, int *, int *, double *, double *, int *); void linndxcross(int *, double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, double *, int *, int *, int *, int *, double *, double *, int *); void nndistsort(int *, double *, double *, double *, double *); void knndsort(int *, int *, double *, double *, double *, double *); void nnwhichsort(int *, double *, double *, int *, double *); void knnwhich(int *, int *, double *, double *, int *, double *); void Clinvwhichdist(int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *); void linvknndist(int *, int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *); void nnGinterface(int *, double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, double *, int *, double *); void knnGinterface(int *, double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, double *, int *, double *); void linpairdist(int *, double *, double *, int *, double *, double *, double *, int *, int *, double *, int *, double *); void poly2imA(int *, int *, double *, double *, int *, double *, int *); void xypsi(int *, double *, double *, double *, double *, double *, double *, double *, int *, int *); void Cxypolyselfint(int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *); void auctionbf(int *, int *, int *, double *, double *, int *, double *); void dwpure(int *, int *, int *, int *, int *, int *); void auctionbf(int *, int *, int *, double *, double *, int *, double *); void dwpure(int *, int *, int *, int *, int *, int *); void dinfty_R(int *, int *, int *); void dwpure(int *, int *, int *, int *, int *, int *); void dwpure(int *, int *, int *, int *, int *, int *); void seg2pixI(int *, double *, double *, double *, double *, int *, int *, int *); void seg2pixL(int *, double *, double *, double *, double *, double *, double *, double *, int *, int *, double *); void seg2pixN(int *, double *, double *, double *, double *, double *, int *, int *, double *); void xysegint(int *, double *, double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *); void xysi(int *, double *, double *, double *, double *, int *, double *, double *, double *, double *, double *, int *); void xysiANY(int *, double *, double *, double *, double *, int *, double *, double *, double *, double *, double *, int *); void xysegXint(int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *); void xysxi(int *, double *, double *, double *, double *, double *, int *); void Corput(int *, int *, double *); void raster3filter(int *, int *, double *, double *, double *); void knownCif(char *, int *); void scantrans(double *, double *, int *, double *, double *, double *, double *, int *, int *, double *, int *); void Gsmoopt(int *, double *, double *, double *, int *, double *, double *); void Gwtsmoopt(int *, double *, double *, double *, int *, double *, double *, double *); void smoopt(int *, double *, double *, double *, int *, double *, double *, double *); void wtsmoopt(int *, double *, double *, double *, int *, double *, double *, double *, double *); void asmoopt(int *, double *, double *, double *, int *, double *, double *, double *); void awtsmoopt(int *, double *, double *, double *, int *, double *, double *, double *, double *); void crsmoopt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void wtcrsmoopt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *); void acrsmoopt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void awtcrsmoopt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *); void CspaSumSymOut(int *, int *, int *, int *, int *, int *, double *, int *, double *); void CspaWtSumSymOut(int *, int *, int *, int *, int *, int *, double *, int *, int *, int *, int *, double *, double *); void Ccrosspaircounts(int *, double *, double *, int *, double *, double *, double *, int *); void Cclosepaircounts(int *, double *, double *, double *, int *); void uniqmapxy(int *, double *, double *, int *); void uniqmap2M(int *, double *, double *, int *, int *); void anydupxy(int *, double *, double *, int *); void poly2imI(double *, double *, int *, int *, int *, int *); void bdrymask(int *, int *, int *, int *); void discs2grid(int *, double *, double *, int *, double *, double *, int *, double *, double *, double *, int *); /* Functions invoked by .Call */ SEXP close3pairs(SEXP, SEXP, SEXP, SEXP, SEXP); SEXP close3IJpairs(SEXP, SEXP, SEXP, SEXP, SEXP); SEXP close3IJDpairs(SEXP, SEXP, SEXP, SEXP, SEXP); SEXP cross3pairs(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP cross3IJpairs(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP cross3IJDpairs(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP closePpair(SEXP, SEXP, SEXP, SEXP, SEXP); SEXP Vclosepairs(SEXP, SEXP, SEXP, SEXP); SEXP VcloseIJpairs(SEXP, SEXP, SEXP, SEXP); SEXP VcloseIJDpairs(SEXP, SEXP, SEXP, SEXP); SEXP altVclosepairs(SEXP, SEXP, SEXP, SEXP); SEXP altVcloseIJpairs(SEXP, SEXP, SEXP, SEXP); SEXP altVcloseIJDpairs(SEXP, SEXP, SEXP, SEXP); SEXP Vcrosspairs(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP VcrossIJpairs(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP VcrossIJDpairs(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP Vclosethresh(SEXP, SEXP, SEXP, SEXP, SEXP); SEXP trioxgraph(SEXP, SEXP, SEXP, SEXP); SEXP triograph(SEXP, SEXP, SEXP); SEXP trigraph(SEXP, SEXP, SEXP); SEXP triDgraph(SEXP, SEXP, SEXP, SEXP); SEXP triDRgraph(SEXP, SEXP, SEXP, SEXP, SEXP); SEXP graphVees(SEXP, SEXP, SEXP); SEXP Cxysegint(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP CxysegXint(SEXP, SEXP, SEXP, SEXP, SEXP); SEXP CxysegXint(SEXP, SEXP, SEXP, SEXP, SEXP); SEXP thinjumpequal(SEXP, SEXP, SEXP); SEXP xmethas(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP xmethas(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP PerfectStrauss(SEXP, SEXP, SEXP, SEXP, SEXP); SEXP PerfectHardcore(SEXP, SEXP, SEXP, SEXP); SEXP PerfectStraussHard(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP PerfectDiggleGratton(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); SEXP PerfectDGS(SEXP, SEXP, SEXP, SEXP); SEXP PerfectPenttinen(SEXP, SEXP, SEXP, SEXP, SEXP); SEXP Cwhist(SEXP, SEXP, SEXP); spatstat/src/distmapbin.c0000755000176200001440000000661613406057617015227 0ustar liggesusers/* distmapbin.c Distance transform of a discrete binary image (8-connected path metric) $Revision: 1.7 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include "raster.h" #include void dist_to_bdry(); void shape_raster(); void distmap_bin(in, dist) Raster *in; /* input: binary image */ Raster *dist; /* output: distance to nearest point */ /* rasters must have been dimensioned by shape_raster() and must all have identical dimensions and margins */ { int j,k; double d, dnew; double xstep, ystep, diagstep, huge; int rmin, rmax, cmin, cmax; /* distances between neighbouring pixels */ xstep = in->xstep; ystep = in->ystep; diagstep = sqrt(xstep * xstep + ystep * ystep); if(xstep < 0) xstep = -xstep; if(ystep < 0) ystep = -ystep; /* effectively infinite distance */ huge = 2.0 * Distance(dist->xmin,dist->ymin,dist->xmax,dist->ymax); /* image boundaries */ rmin = in->rmin; rmax = in->rmax; cmin = in->cmin; cmax = in->cmax; #define DISTANCE(ROW, COL) Entry(*dist, ROW, COL, double) #define MASKTRUE(ROW, COL) (Entry(*in, ROW, COL, int) != 0) #define MASKFALSE(ROW, COL) (Entry(*in, ROW, COL, int) == 0) #define UPDATE(D, ROW, COL, STEP) \ dnew = STEP + DISTANCE(ROW, COL); \ if(D > dnew) D = dnew /* initialise edges to boundary condition */ for(j = rmin-1; j <= rmax+1; j++) { DISTANCE(j, cmin-1) = (MASKTRUE(j, cmin-1)) ? 0.0 : huge; DISTANCE(j, cmax+1) = (MASKTRUE(j, cmax+1)) ? 0.0 : huge; } for(k = cmin-1; k <= cmax+1; k++) { DISTANCE(rmin-1, k) = (MASKTRUE(rmin-1, k)) ? 0.0 : huge; DISTANCE(rmax+1, k) = (MASKTRUE(rmax+1, k)) ? 0.0 : huge; } /* forward pass */ for(j = rmin; j <= rmax; j++) { R_CheckUserInterrupt(); for(k = cmin; k <= cmax; k++) { if(MASKTRUE(j, k)) d = DISTANCE(j, k) = 0.0; else { d = huge; UPDATE(d, j-1, k-1, diagstep); UPDATE(d, j-1, k, ystep); UPDATE(d, j-1, k+1, diagstep); UPDATE(d, j, k-1, xstep); DISTANCE(j,k) = d; } } } /* backward pass */ for(j = rmax; j >= rmin; j--) { R_CheckUserInterrupt(); for(k = cmax; k >= cmin; k--) { if(MASKFALSE(j,k)) { d = DISTANCE(j,k); UPDATE(d, j+1, k+1, diagstep); UPDATE(d, j+1, k, ystep); UPDATE(d, j+1, k-1, diagstep); UPDATE(d, j, k+1, xstep); DISTANCE(j,k) = d; } } } } /* R interface */ void distmapbin(xmin, ymin, xmax, ymax, nr, nc, inp, distances, boundary) double *xmin, *ymin, *xmax, *ymax; /* x, y dimensions */ int *nr, *nc; /* raster dimensions EXCLUDING margin of 1 on each side */ int *inp; /* input: binary image */ double *distances; /* output: distance to nearest point */ double *boundary; /* output: distance to boundary of rectangle */ /* all images must have identical dimensions including a margin of 1 on each side */ { Raster data, dist, bdist; shape_raster( &data, (void *) inp, *xmin,*ymin,*xmax,*ymax, *nr+2, *nc+2, 1, 1); shape_raster( &dist, (void *) distances,*xmin,*ymin,*xmax,*ymax, *nr+2,*nc+2,1,1); shape_raster( &bdist, (void *) boundary, *xmin,*ymin,*xmax,*ymax, *nr+2,*nc+2,1,1); distmap_bin(&data, &dist); dist_to_bdry(&bdist); } spatstat/src/yesno.h0000644000176200001440000000011613115225157014212 0ustar liggesusers/* yesno.h */ #ifndef YES #define YES (0 == 0) #define NO (!YES) #endif spatstat/src/KrectV2.h0000644000176200001440000000027313115225157014341 0ustar liggesusers/* KrectV3.h with or without translation correction */ if((*doTrans) == 1) { #define TRANSLATION #include "KrectV3.h" } else { #undef TRANSLATION #include "KrectV3.h" } spatstat/src/Perfect.cc0000755000176200001440000005764113553250551014627 0ustar liggesusers// Debug switch // #define DBGS #include #include #include #include #include #include #include #include #include // #include // FILE *out; // File i/o is deprecated in R implementation #ifdef DBGS #define CHECK(PTR,MESSAGE) if(((void *) PTR) == ((void *) NULL)) error(MESSAGE) #define CLAMP(X, LOW, HIGH, XNAME) \ if((X) > (HIGH)) { \ Rprintf("Value of %s exceeds upper limit %d\n", XNAME, HIGH); \ X = HIGH; \ } else if((X) < (LOW)) { \ Rprintf("Value of %s is below %d\n", XNAME, LOW); \ X = LOW; \ } #else #define CHECK(PTR,MESSAGE) #define CLAMP(X, LOW, HIGH, XNAME) \ if((X) > (HIGH)) X = HIGH; else if((X) < (LOW)) X = LOW; #endif // ......................................... // memory allocation // using R_alloc #define ALLOCATE(TYPE) (TYPE *) R_alloc(1, sizeof(TYPE)) #define FREE(PTR) // Alternative using Calloc and Free // #define ALLOCATE(TYPE) (TYPE *) Calloc(1, sizeof(TYPE)) // #define FREE(PTR) Free(PTR) void R_CheckUserInterrupt(void); struct Point{ long int No; float X; float Y; float R; struct Point *next; }; struct Point2{ long int No; float X; float Y; char InLower[2]; double Beta; double TempBeta; struct Point2 *next; }; struct Point3{ char Case; char XCell; char YCell; struct Point3 *next; }; // const float Pi=3.141593; double slumptal(void){ return(runif((double) 0.0, (double) 1.0)); } long int poisson(double lambda){ return((long int)rpois(lambda)); } // ........................... Point patterns .......................... class Point2Pattern { public: long int UpperLiving[2]; long int MaxXCell, MaxYCell, NoP; double XCellDim, YCellDim, Xmin, Xmax, Ymin, Ymax; struct Point2 *headCell[10][10],*dummyCell; char DirX[10], DirY[10]; Point2Pattern(double xmin, double xmax, double ymin, double ymax, long int mxc, long int myc){ long int i,j; UpperLiving[0] = 0; UpperLiving[1] = 0; Xmin = xmin; Xmax = xmax; Ymin = ymin; Ymax = ymax; DirX[1] = 1; DirY[1] = 0; DirX[2] = 1; DirY[2] = -1; DirX[3] = 0; DirY[3] = -1; DirX[4] = -1; DirY[4] = -1; DirX[5] = -1; DirY[5] = 0; DirX[6] = -1; DirY[6] = 1; DirX[7] = 0; DirY[7] = 1; DirX[8] = 1; DirY[8] = 1; NoP = 0; // dummyCell = ALLOCATE(struct Point2); // dummyCell->next = dummyCell; dummyCell->No = 0; MaxXCell = mxc; MaxYCell = myc; if(MaxXCell>9) MaxXCell = 9; if(MaxYCell>9) MaxYCell = 9; for(i=0;i<=MaxXCell;i++){ for(j=0;j<=MaxYCell;j++){ // headCell[i][j] = ALLOCATE(struct Point2); // headCell[i][j]->next=dummyCell; } } XCellDim = (Xmax-Xmin)/((double)(MaxXCell+1)); YCellDim = (Ymax-Ymin)/((double)(MaxYCell+1)); }; ~Point2Pattern(){} // void Print(); void Return(double *X, double *Y, int *num, int maxnum); long int Count(); long int UpperCount(); void Empty(); void Clean(); // void DumpToFile(char FileName[100]); // void ReadFromFile(char FileName[100]); }; // void Point2Pattern::Print(){ // long int i,j,k; // k = 0; // struct Point2 *TempCell; // for(i=0;i<=MaxXCell;i++){ // for(j=0;j<=MaxYCell;j++){ // //Rprintf("%d %d:\n",i,j); // TempCell = headCell[i][j]->next; // CHECK(TempCell, "internal error: TempCell is null in Print()"); // while(TempCell->next != TempCell){ // k++; // Rprintf("%f %f %ld %ld %ld=%d %ld=%d UL0 %d UL1 %d %f\n", // TempCell->X,TempCell->Y,k, // TempCell->No, // i,int(TempCell->X/XCellDim), // j,int(TempCell->Y/YCellDim), // TempCell->InLower[0],TempCell->InLower[1], // TempCell->Beta); // TempCell = TempCell->next; // CHECK(TempCell, "internal error: TempCell is null in Print() loop"); // } // } // } // Rprintf("Printed %ld points.\n",k); // } void Point2Pattern::Return(double *X, double *Y, int *num, int maxnum){ long int i,j,k; k =0; *num = 0; #ifdef DBGS Rprintf("executing Return()\n"); #endif if(UpperLiving[0]<=maxnum){ struct Point2 *TempCell; for(i=0;i<=MaxXCell;i++){ for(j=0;j<=MaxYCell;j++){ #ifdef DBGS // Rprintf("%d %d:\n",i,j); #endif TempCell = headCell[i][j]->next; CHECK(TempCell, "internal error: TempCell is null in Return()"); while(TempCell->next != TempCell){ X[k] = TempCell->X; Y[k] = TempCell->Y; k++; TempCell = TempCell->next; CHECK(TempCell, "internal error: TempCell is null in Return() loop"); } } } *num = k; } else { *num = -1; } } long int Point2Pattern::Count(){ long int i,j,k; k = 0; struct Point2 *TempCell; for(i=0;i<=MaxXCell;i++){ for(j=0;j<=MaxYCell;j++){ // Rprintf("%d %d:\n",i,j); TempCell = headCell[i][j]->next; CHECK(TempCell, "internal error: TempCell is null in Count()"); while(TempCell->next != TempCell){ k++; TempCell = TempCell->next; CHECK(TempCell, "internal error: TempCell is null in Count() loop"); } } } //Rprintf("Printed %d points.\n",k); return(k); } // a quick (over)estimate of the number of points in the pattern, // for storage allocation long int Point2Pattern::UpperCount(){ return(UpperLiving[0]); } void Point2Pattern::Empty(){ struct Point2 *TempCell, *TempCell2; long int i,j; #ifdef DBGS long int k; k=0; Rprintf("executing Empty()\n"); #endif for(i=0; i<=this->MaxXCell; i++){ for(j=0; j<=this->MaxYCell; j++){ TempCell = headCell[i][j]->next; CHECK(TempCell, "internal error: TempCell is null in Empty()"); while(TempCell!=TempCell->next){ #ifdef DBGS // k++; Rprintf("%d %d %d\n",i,j,k); #endif TempCell2 = TempCell->next; FREE(TempCell); TempCell = TempCell2; CHECK(TempCell, "internal error: TempCell is null in Empty() loop"); } headCell[i][j]->next = dummyCell; } } } void Point2Pattern::Clean(){ struct Point2 *TempCell, *TempCell2; long int i,j; #ifdef DBGS Rprintf("executing Clean()\n"); #endif for(i=0; i<=MaxXCell; i++){ for(j=0; j<=MaxYCell; j++){ TempCell = headCell[i][j]; CHECK(TempCell, "internal error: TempCell is null in Clean()"); TempCell2 = headCell[i][j]->next; CHECK(TempCell2, "internal error: TempCell2 is null in Clean()"); while(TempCell2!=TempCell2->next){ TempCell2->No = 0; if(TempCell2->InLower[0]==0){ TempCell->next = TempCell2->next; FREE(TempCell2); TempCell2 = TempCell->next; CHECK(TempCell2, "internal error: TempCell2 is null in Clean() loop A"); } else{ TempCell2 = TempCell2->next; TempCell = TempCell->next; CHECK(TempCell, "internal error: TempCell is null in Clean() loop B"); CHECK(TempCell2, "internal error: TempCell2 is null in Clean() loop B"); } } } } } //void Point2Pattern::DumpToFile(char FileName[100]){ // FILE *out; // long int i,j; // out = fopen(FileName,"w"); // struct Point2 *TempCell; // for(i=0;i<=MaxXCell;i++){ // for(j=0;j<=MaxYCell;j++){ // //Rprintf("%d %d:\n",i,j); // TempCell = headCell[i][j]->next; // while(TempCell->next != TempCell){ // fprintf(out,"%f\t%f\t%ld\n", // TempCell->X,TempCell->Y,TempCell->No); // TempCell = TempCell->next; // } // } //} //fclose(out); //} //void Point2Pattern::ReadFromFile(char FileName[100]){ // FILE *out; //long int k,XCell,YCell; //float f1,xs,ys; //out = fopen(FileName,"r"); //struct Point2 *TempCell; //k=0; //while(feof(out)==0){ // k++; // fscanf(out,"%f%f\n",&xs,&ys); // //Rprintf("%f %f\n",xs,ys); // // // TempCell = ALLOCATE(struct Point2); // // // TempCell->No = k; // TempCell->X = xs; // TempCell->Y = ys; // TempCell->InLower[0] = 1; // TempCell->InLower[1] = 1; // // f1 = (xs-Xmin)/XCellDim; XCell = int(f1); // if(XCell>MaxXCell) XCell = MaxXCell; // f1 = (ys-Ymin)/YCellDim; YCell = int(f1); // if(YCell>MaxYCell) YCell = MaxYCell; // // TempCell->next = headCell[XCell][YCell]->next; // headCell[XCell][YCell]->next = TempCell; // //} //fclose(out); //Rprintf("%ld points loaded.\n",k); // //} // ........................... Point processes .......................... // ...................... (stationary, pairwise interaction) ............ class PointProcess { public: double Xmin, Xmax, Ymin, Ymax, TotalBirthRate, InteractionRange; PointProcess(double xmin, double xmax, double ymin, double ymax){ Xmin = xmin; Xmax = xmax; Ymin = ymin; Ymax = ymax; } ~PointProcess(){} virtual void NewEvent(double *x, double *y, char *InWindow)=0; virtual void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP)=0; virtual double Interaction(double dsquared)=0; // virtual void CalcBeta(long int xsidepomm, long int ysidepomm, // double *betapomm){ // Rprintf("Define CalcBeta...\n"); // } // virtual void CheckBeta(long int xsidepomm, long int ysidepomm, // double *betapomm){ //Rprintf("Define CheckBeta...\n"); //} // virtual double lnCondInt(struct Point2 *TempCell, Point2Pattern *p2p) //{ return(0.0);}; // virtual double lnDens(Point2Pattern *p2p); // virtual void Beta(struct Point2 *TempCell){ // TempCell->Beta = 0; // Rprintf("Define Beta...\n");}; }; //double PointProcess::lnDens(Point2Pattern *p2p){ //// double f1; //long int xco,yco,xc,yc,fx,tx,fy,ty,ry,rx; //double dy,dx, lnDens,dst2; //struct Point2 *TempCell, *TempCell2; // //dx = (Xmax-Xmin)/(double(p2p->MaxXCell+1)); //dy = (Ymax-Ymin)/(double(p2p->MaxYCell+1)); //rx = int(InteractionRange/dx+1.0); //ry = int(InteractionRange/dy+1.0); // // //Rprintf("1:%f 2:%f 3:%d 4:%d 5:%f 6:%f\n",dx,dy,rx,ry, // // this->InteractionRange,InteractionRange); // //Rprintf("mx:%d my:%d\n",p2p->MaxXCell,p2p->MaxYCell); // // lnDens = 0; // // //Rprintf("lnDens: %f (0)\n",lnDens); // // for(xc = 0; xc <= p2p->MaxXCell; xc++){ // for(yc = 0; yc <= p2p->MaxYCell; yc++){ // //if(xc==1) Rprintf("%d %d\n",xc,yc); // CHECK(p2p->headCell[xc][yc], // "internal error: p2p->headCell[xc][yc] is null in lnDens()"); // TempCell = p2p->headCell[xc][yc]->next; // CHECK(TempCell, "internal error: TempCell is null in lnDens()"); // while(TempCell != TempCell->next){ // lnDens += log(TempCell->Beta); // //Rprintf("lnDens: %f (1) %d %d %d %d Beta %f\n",lnDens,xc,yc, // // p2p->MaxXCell,p2p->MaxYCell,TempCell->Beta); // //if(lnDens<(-100000)){Rprintf("%f",lnDens); scanf("%f",&f1);} // if(InteractionRange>0){ // if((xc+rx)<=p2p->MaxXCell) tx=xc+rx; else tx = p2p->MaxXCell; // if((yc+ry)<=p2p->MaxYCell) ty=yc+ry; else ty = p2p->MaxYCell; // if((xc-rx)>=0) fx=xc-rx; else fx = 0; // if((yc-ry)>=0) fy=yc-ry; else fy = 0; // for(xco = fx; xco <= tx; xco++){ // for(yco = fy; yco <= ty; yco++){ // //if(xc==1) Rprintf("%d %d %d %d %d %d\n",xco,yco,fx,tx,fy,ty); // CHECK(p2p->headCell[xco][yco], // "internal error: p2p->headCell[xco][yco] is null in lnDens() loop"); // TempCell2 = p2p->headCell[xco][yco]->next; // CHECK(TempCell2, // "internal error: TempCell2 is null in lnDens() loop A"); // while(TempCell2!=TempCell2->next){ // if(TempCell2 != TempCell){ // dst2 = pow(TempCell->X-TempCell2->X,2)+ // pow(TempCell->Y-TempCell2->Y,2); // lnDens += log(Interaction(dst2)); // } // TempCell2 = TempCell2->next; // CHECK(TempCell2, // "internal error: TempCell2 is null in lnDens() loop B"); // } // } // } // //Rprintf("lnDens: %f\n",lnDens); // } // TempCell = TempCell->next; // CHECK(TempCell, // "internal error: TempCell is null in lnDens() at end"); // } // } // } // return(lnDens); // //} // ........................... Sampler .......................... class Sampler{ public: PointProcess *PP; Point2Pattern *P2P; long int GeneratedPoints, LivingPoints, NoP; //long int UpperLiving[2]; Sampler(PointProcess *p){ PP = p;} ~Sampler(){} void Sim(Point2Pattern *p2p, long int *ST, long int *ET); long int BirthDeath(long int TimeStep, struct Point *headLiving, struct Point *headDeleted, struct Point3 *headTransition); // WAS: Sampler::Forward void Forward(long int TS, long int TT, char TX, char TY, struct Point *Proposal, long int *DDD); }; void Sampler::Forward(long int TS, long int TT, char TX, char TY, struct Point *Proposal, long int *DDD){ long int XCell, YCell, DirectionN; double dtmp2,dtmpx,dtmpy, tmpR, TempGamma[2], TempI; struct Point2 *TempCell, *TempCell2; float f1; /* Birth */ if(TT==1){ f1 = (Proposal->X-P2P->Xmin)/P2P->XCellDim; XCell = int(f1); CLAMP(XCell, 0, P2P->MaxXCell, "XCell"); f1 = (Proposal->Y-P2P->Ymin)/P2P->YCellDim; YCell = int(f1); CLAMP(YCell, 0, P2P->MaxYCell, "YCell"); // TempCell = ALLOCATE(struct Point2); // TempCell->No = Proposal->No; TempCell->X = Proposal->X; TempCell->Y = Proposal->Y; tmpR = Proposal->R; TempCell->next = P2P->headCell[XCell][YCell]->next; P2P->headCell[XCell][YCell]->next = TempCell; TempCell->InLower[0]=0; TempCell->InLower[1]=0; TempGamma[0] = 1.0; TempGamma[1] = 1.0; /*same cell*/ TempCell2 = TempCell->next; CHECK(TempCell2, "internal error: TempCell2 is null in Forward() birth case"); while(TempCell2 != TempCell2->next){ dtmpx = TempCell->X - TempCell2->X; dtmpy = TempCell->Y - TempCell2->Y; dtmp2 = dtmpx*dtmpx+dtmpy*dtmpy; TempI = PP->Interaction(dtmp2); if(TempCell2->InLower[0]==1) TempGamma[0] = TempGamma[0]*TempI; if(TempCell2->InLower[1]==1) TempGamma[1] = TempGamma[1]*TempI; TempCell2=TempCell2->next; CHECK(TempCell2, "internal error: TempCell2 is null in Forward() birth case loop"); } /*eight other cells*/ for(DirectionN=1;DirectionN<=8;DirectionN++){ if(((XCell+P2P->DirX[DirectionN])>=0) && ((XCell+P2P->DirX[DirectionN])<=P2P->MaxXCell) && ((YCell+P2P->DirY[DirectionN])>=0) && ((YCell+P2P->DirY[DirectionN])<=P2P->MaxYCell)){ CHECK(P2P->headCell[XCell+P2P->DirX[DirectionN]][YCell+P2P->DirY[DirectionN]], "internal error: HUGE P2P EXPRESSION is null in Forward() birth case loop A"); TempCell2 = P2P->headCell[XCell+P2P->DirX[DirectionN]] [YCell+P2P->DirY[DirectionN]]->next; CHECK(TempCell2, "internal error: TempCell2 is null in Forward() birth case loop B"); while(TempCell2!=TempCell2->next){ dtmpx = TempCell->X - TempCell2->X; dtmpy = TempCell->Y - TempCell2->Y; dtmp2 = dtmpx*dtmpx+dtmpy*dtmpy; TempI = PP->Interaction(dtmp2); if(TempCell2->InLower[0]==1) TempGamma[0] = TempGamma[0]*TempI; if(TempCell2->InLower[1]==1) TempGamma[1] = TempGamma[1]*TempI; TempCell2=TempCell2->next; CHECK(TempCell2, "internal error: TempCell2 is null in Forward() birth case loop C"); } } } if(tmpR <= TempGamma[1] ){ TempCell->InLower[0]=1; P2P->UpperLiving[0] = P2P->UpperLiving[0] +1; } if(tmpR <= TempGamma[0] ){ TempCell->InLower[1]=1; P2P->UpperLiving[1] = P2P->UpperLiving[1] +1; } } /* Death */ if(TT==0){ TempCell=P2P->headCell[(int)TX][(int)TY]; CHECK(TempCell, "internal error: TempCell is null in Forward() death case"); while(TempCell->next->No != *DDD){ TempCell = TempCell->next; CHECK(TempCell, "internal error: TempCell is null in Forward() death case loop"); if(TempCell->next == TempCell) { // Rprintf("internal error: unexpected self-reference. Dumping...\n"); // P2P->Print(); error("internal error: unexpected self-reference"); break; } }; CHECK(TempCell->next, "internal error: TempCell->next is null in Forward() death case"); if(*DDD!=TempCell->next->No) Rprintf("diagnostic message: multi cell: !!DDD:%ld TempUpper->No:%ld ", *DDD,TempCell->No); if(TempCell->next->InLower[0]==1) P2P->UpperLiving[0] = P2P->UpperLiving[0] -1; if(TempCell->next->InLower[1]==1) P2P->UpperLiving[1] = P2P->UpperLiving[1] -1; TempCell2 = TempCell->next; CHECK(TempCell2, "internal error: TempCell2 is null in Forward() death case B"); TempCell->next = TempCell2->next; FREE(TempCell2); /* Common stuff */ //KillCounter ++; *DDD = *DDD - 1; } } long int Sampler::BirthDeath(long int TimeStep, struct Point *headLiving, struct Point *headDeleted, struct Point3 *headTransition){ long int i,n; float f1,f2,f3,f4; double xtemp,ytemp; char InWindow, Success; struct Point *TempPoint, *TempPoint2; struct Point3 *TempTransition; R_CheckUserInterrupt(); f1 = LivingPoints; f2 = PP->TotalBirthRate; f3 = f2/(f1+f2); f4 = slumptal(); n = 0; Success = 0; //Rprintf("LivingPoints: %d TotalBirthRate %f GeneratedPoints %d\n", // LivingPoints,PP->TotalBirthRate,GeneratedPoints); /* Birth */ while(Success==0){ if(f4NewEvent(&xtemp, &ytemp, &InWindow); //Rprintf("Ping 2 (BD)\n"); if(InWindow==1){ Success = 1; // TempTransition = ALLOCATE(struct Point3); // //Rprintf("Ping 3 (BD)\n"); TempTransition->Case = 0; LivingPoints ++; GeneratedPoints ++; // TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = GeneratedPoints; TempPoint->R = slumptal(); TempPoint->next = headLiving->next; headLiving->next = TempPoint; NoP ++; f1 = (TempPoint->X-P2P->Xmin)/P2P->XCellDim; TempTransition->XCell = int(f1); f1 = (TempPoint->Y-P2P->Ymin)/P2P->YCellDim; TempTransition->YCell = int(f1); //Rprintf("X %f XCell %d\n",TempPoint->X,TempTransition->XCell); // CLAMP(TempTransition->XCell, 0, P2P->MaxXCell, "TempTransition->XCell"); CLAMP(TempTransition->YCell, 0, P2P->MaxYCell, "TempTransition->YCell"); TempTransition->next = headTransition->next; headTransition->next = TempTransition; } } /* Death */ else{ Success = 1; // TempTransition = ALLOCATE(struct Point3); // TempTransition->Case = 1; f1 = LivingPoints; f2 = f1*slumptal()+1.0; n = int(f2); if(n < 1) n = 1; if(n>LivingPoints){ // Rprintf("diagnostic message: random integer n=%ld > %ld = number of living points\n", n,LivingPoints); n=LivingPoints; } TempPoint2 = TempPoint = headLiving; for(i=1; i<=n; i++){ TempPoint2 = TempPoint; TempPoint = TempPoint->next; } TempPoint2->next = TempPoint->next; TempPoint->next = headDeleted->next; headDeleted->next = TempPoint; LivingPoints --; NoP --; TempTransition->next = headTransition->next; headTransition->next = TempTransition; } } return(n); } void Sampler::Sim(Point2Pattern *p2p, long int *ST, long int *ET) { P2P = p2p; long int StartTime, EndTime, TimeStep, D0Time, D0Living; long int XCell, YCell, DDD, i; float f1; /* Initialising linked listed for backward simulation */ struct Point *headDeleted, *headLiving, *dummyDeleted, *dummyLiving; struct Point *TempPoint; // headLiving = ALLOCATE(struct Point); dummyLiving = ALLOCATE(struct Point); // headLiving->next = dummyLiving; dummyLiving->next = dummyLiving; // headDeleted = ALLOCATE(struct Point); dummyDeleted = ALLOCATE(struct Point); // headDeleted->next = dummyDeleted; dummyDeleted->next = dummyDeleted; struct Point2 *TempCell2; struct Point3 *headTransition, *dummyTransition; // headTransition = ALLOCATE(struct Point3); dummyTransition = ALLOCATE(struct Point3); // headTransition->next = dummyTransition; dummyTransition->next = dummyTransition; PP->GeneratePoisson(headLiving, &GeneratedPoints, &LivingPoints, &NoP); StartTime=1; EndTime=1; TimeStep = 0; D0Time = 0; D0Living = GeneratedPoints; long int tmp, D0; do{ tmp=BirthDeath(TimeStep, headLiving, headDeleted, headTransition); if(tmp>0){ if(tmp>(LivingPoints+1-D0Living)){ D0Living --; } } D0Time++; }while(D0Living>0); tmp=BirthDeath(TimeStep, headLiving, headDeleted, headTransition); StartTime=1; EndTime=D0Time+1; D0 = 0; do{ if(D0==1){ for(TimeStep=StartTime;TimeStep<=EndTime;TimeStep ++){ tmp=BirthDeath(TimeStep, headLiving, headDeleted, headTransition); } } D0 = 1; P2P->Empty(); /* headUpper->next = dummyUpper; dummyUpper->next = dummyUpper; for(XCell=0;XCell<=P2P->MaxXCell;XCell++){ for(YCell=0;YCell<=P2P->MaxYCell;YCell++){ headUpperCell[XCell][YCell]->next=dummyUpper; } } */ P2P->UpperLiving[0] = LivingPoints; P2P->UpperLiving[1] = 0; P2P->NoP = 0; i=0; TempPoint = headLiving->next; CHECK(TempPoint, "internal error: TempPoint is null in Sim()"); while(TempPoint!=TempPoint->next){ i++; // TempCell2 = ALLOCATE(struct Point2); // TempCell2->No = TempPoint->No; TempCell2->X = TempPoint->X; TempCell2->Y = TempPoint->Y; TempCell2->InLower[0] = 1; TempCell2->InLower[1] = 0; f1 = (TempPoint->X-P2P->Xmin)/P2P->XCellDim; XCell = int(floor(f1)); CLAMP(XCell, 0, P2P->MaxXCell, "XCell"); f1 = (TempPoint->Y-P2P->Ymin)/P2P->YCellDim; YCell = int(floor(f1)); CLAMP(YCell, 0, P2P->MaxYCell, "YCell"); TempCell2->next = P2P->headCell[XCell][YCell]->next; P2P->headCell[XCell][YCell]->next = TempCell2; TempPoint = TempPoint->next; CHECK(TempPoint, "internal error: TempPoint is null in Sim() loop"); } //P2P->DumpToFile("temp0.dat"); struct Point3 *TempTransition; struct Point *Proposal; TempTransition = headTransition->next; CHECK(TempTransition, "internal error: TempTransition is null in Sim()"); Proposal = headDeleted->next; DDD = GeneratedPoints; for(TimeStep=EndTime;TimeStep>=1;TimeStep--){ R_CheckUserInterrupt(); Forward(TimeStep,TempTransition->Case, TempTransition->XCell,TempTransition->YCell, Proposal,&DDD); if(TempTransition->Case == 1) Proposal = Proposal ->next; TempTransition = TempTransition->next; CHECK(TempTransition, "internal error: TempTransition is null in Sim() loop"); } /* Doubling strategy used!*/ StartTime = EndTime+1; EndTime=EndTime*2; //P2P->DumpToFile("temp.dat"); }while(P2P->UpperLiving[0]!=P2P->UpperLiving[1]); P2P->Clean(); i=0; struct Point *TempPoint2; TempPoint = headLiving; TempPoint2 = headLiving->next; CHECK(TempPoint2, "internal error: TempPoint2 is null in Sim() position B"); while(TempPoint!=TempPoint->next){ i++; FREE(TempPoint); TempPoint = TempPoint2; TempPoint2 = TempPoint2->next; CHECK(TempPoint2, "internal error: TempPoint2 is null in Sim() loop C"); } FREE(TempPoint); i = 0; TempPoint = headDeleted; TempPoint2 = headDeleted->next; CHECK(TempPoint2, "internal error: TempPoint2 is null in Sim() position D"); while(TempPoint!=TempPoint->next){ i++; FREE(TempPoint); TempPoint = TempPoint2; TempPoint2 = TempPoint2->next; CHECK(TempPoint2, "internal error: TempPoint2 is null in Sim() loop D"); } FREE(TempPoint); //Rprintf("%d ",i); struct Point3 *TempTransition,*TempTransition2; i = 0; TempTransition = headTransition; TempTransition2 = headTransition->next; CHECK(TempTransition2, "internal error: TempTransition2 is null in Sim() position E"); while(TempTransition!=TempTransition->next){ i++; FREE(TempTransition); TempTransition = TempTransition2; TempTransition2 = TempTransition2->next; CHECK(TempTransition2, "internal error: TempTransition2 is null in Sim() loop F"); } FREE(TempTransition); //Rprintf("%d ST: %d ET: %d\n",i,StartTime,EndTime); //scanf("%f",&f1); *ST = StartTime; *ET = EndTime; } #include "PerfectStrauss.h" #include "PerfectStraussHard.h" #include "PerfectHardcore.h" #include "PerfectDiggleGratton.h" #include "PerfectDGS.h" #include "PerfectPenttinen.h" spatstat/src/KrectV1.h0000644000176200001440000000026313115225157014337 0ustar liggesusers/* KrectV2.h with or without isotropic correction */ if((*doIso) == 1) { #define ISOTROPIC #include "KrectV2.h" } else { #undef ISOTROPIC #include "KrectV2.h" } spatstat/src/nn3Ddist.h0000644000176200001440000000401513406057617014554 0ustar liggesusers/* nn3Ddist.h Code template for nearest-neighbour algorithms for 3D point patterns Input is a single point pattern - supports 'nndist' and 'nnwhich' This code is #included multiple times in nn3Ddist.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour Either or both DIST and WHICH may be defined. THE FOLLOWING CODE ASSUMES THAT THE POINT PATTERN IS SORTED IN ASCENDING ORDER OF THE z COORDINATE $Revision: 1.6 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void FNAME(n, x, y, z, nnd, nnwhich, huge) /* inputs */ int *n; double *x, *y, *z, *huge; /* outputs */ double *nnd; int *nnwhich; { int npoints, i, j, maxchunk; double d2, d2min, xi, yi, zi, dx, dy, dz, dz2, hu, hu2; #ifdef WHICH int which; #endif hu = *huge; hu2 = hu * hu; npoints = *n; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { d2min = hu2; #ifdef WHICH which = -1; #endif xi = x[i]; yi = y[i]; zi = z[i]; /* search backward */ if(i > 0){ for(j = i - 1; j >= 0; --j) { dz = z[j] - zi; dz2 = dz * dz; if(dz2 > d2min) break; dx = x[j] - xi; dy = y[j] - yi; d2 = dx * dx + dy * dy + dz2; if (d2 < d2min) { d2min = d2; #ifdef WHICH which = j; #endif } } } /* search forward */ if(i < npoints - 1) { for(j = i + 1; j < npoints; ++j) { dz = z[j] - zi; dz2 = dz * dz; if(dz2 > d2min) break; dx = x[j] - xi; dy = y[j] - yi; d2 = dx * dx + dy * dy + dz2; if (d2 < d2min) { d2min = d2; #ifdef WHICH which = j; #endif } } } #ifdef DIST nnd[i] = sqrt(d2min); #endif #ifdef WHICH /* convert to R indexing */ nnwhich[i] = which + 1; #endif } } } spatstat/src/PerfectDGS.h0000644000176200001440000001231513115225157015007 0ustar liggesusers // ........................... Diggle-Gates-Stibbard process ................ // $Revision: 1.3 $ $Date: 2012/03/10 11:22:50 $ #ifndef PI #define PI 3.14159265358979 #endif class DgsProcess : public PointProcess { public: double beta, rho, rhosquared; DgsProcess(double xmin, double xmax, double ymin, double ymax, double b, double r); ~DgsProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); }; DgsProcess::DgsProcess(double xmin, double xmax, double ymin, double ymax, double b, double r) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; rho = r; rhosquared = rho * rho; InteractionRange = rho; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double DgsProcess::Interaction(double dsquared) { double rtn, dist, t; rtn = 1; if(dsquared < rhosquared) { dist = sqrt(dsquared); t = sin((PI/2) * dist/rho); rtn = t * t; } return(rtn); } void DgsProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void DgsProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating DgsProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating DgsProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating DgsProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } // ........................... Interface to R .......................... extern "C" { SEXP PerfectDGS(SEXP beta, SEXP rho, SEXP xrange, SEXP yrange) { // input parameters double Beta, Rho, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int StartTime, EndTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(rho = AS_NUMERIC(rho)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 4 protected objects // extract arguments Beta = *(NUMERIC_POINTER(beta)); Rho = *(NUMERIC_POINTER(rho)); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ Rho); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ Rho); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ Rho); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ Rho); if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1; #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise Diggle-Gates-Stibbard point process DgsProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax,Beta,Rho); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); // pack up into output list PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); // return UNPROTECT(8); // 4 arguments plus xout, yout, nout, out return(out); } } spatstat/src/badgey.c0000755000176200001440000003136513115271120014307 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" /* To get debug output, insert the line: #define DEBUG 1 */ void fexitc(const char *msg); /* Conditional intensity function for a multiscale saturation process. parameter vector: ipar[0] = ndisc ipar[1] = gamma[0] ipar[2] = r[0] ipar[3] = s[0] ... */ typedef struct BadGey { /* model parameters */ int ndisc; double *gamma; double *r; double *s; /* transformations of the parameters */ double *r2; double *loggamma; int *hard; /* periodic distance */ double *period; int per; /* auxiliary counts */ int *aux; /* matrix[ndisc, npmax]: neighbour counts in current state */ int *tee; /* vector[ndisc] : neighbour count at point in question */ double *w; /* vector[ndisc] : sum of changes in counts at other points */ } BadGey; Cdata *badgeyinit(state, model, algo) State state; Model model; Algor algo; { int i, j, k, i0, ndisc, nmatrix; double r, g, d2; BadGey *badgey; /* create storage */ badgey = (BadGey *) R_alloc(1, sizeof(BadGey)); badgey->ndisc = ndisc = model.ipar[0]; /* Allocate space for parameter vectors */ badgey->gamma = (double *) R_alloc((size_t) ndisc, sizeof(double)); badgey->r = (double *) R_alloc((size_t) ndisc, sizeof(double)); badgey->s = (double *) R_alloc((size_t) ndisc, sizeof(double)); /* Derived values */ badgey->r2 = (double *) R_alloc((size_t) ndisc, sizeof(double)); badgey->loggamma = (double *) R_alloc((size_t) ndisc, sizeof(double)); badgey->hard = (int *) R_alloc((size_t) ndisc, sizeof(int)); /* copy and transform parameters */ for(i=0; i < ndisc; i++) { i0 = 3*i + 1; g = badgey->gamma[i] = model.ipar[i0]; r = badgey->r[i] = model.ipar[i0 + 1]; badgey->s[i] = model.ipar[i0 + 2]; badgey->r2[i] = r * r; badgey->hard[i] = (g < DOUBLE_EPS); badgey->loggamma[i] = (g < DOUBLE_EPS) ? 0 : log(g); } /* periodic boundary conditions? */ badgey->period = model.period; badgey->per = (model.period[0] > 0.0); /* Allocate scratch space */ badgey->tee = (int *) R_alloc((size_t) ndisc, sizeof(int)); badgey->w = (double *) R_alloc((size_t) ndisc, sizeof(double)); /* Allocate space for auxiliary counts */ nmatrix = ndisc * state.npmax; badgey->aux = (int *) R_alloc((size_t) nmatrix, sizeof(int)); /* Initialise auxiliary counts */ for(i = 0; i < nmatrix; i++) badgey->aux[i] = 0; for(i = 0; i < state.npts; i++) { for(j = 0; j < state.npts; j++) { if(j == i) continue; d2 = dist2either(state.x[i], state.y[i], state.x[j], state.y[j], badgey->period); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) MAT(badgey->aux, k, i, ndisc) += 1; } } } #ifdef DEBUG Rprintf("Finished initialiser; ndisc=%d\n", ndisc); #endif return((Cdata *) badgey); } #define AUX(I,J) MAT(aux, I, J, ndisc) double badgeycif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int ix, j, k, npts, ndisc, tk; double u, v, d2; double a, dd2, b, f, r2, s, cifval; double *x, *y; int *tee, *aux; double *w; BadGey *badgey; badgey = (BadGey *) cdata; #ifdef DEBUG Rprintf("Entering badgeycif\n"); #endif npts = state.npts; cifval = 1.0; if(npts==0) return cifval; x = state.x; y = state.y; u = prop.u; v = prop.v; ix = prop.ix; ndisc = badgey->ndisc; tee = badgey->tee; aux = badgey->aux; w = badgey->w; /* For disc k, tee[k] = neighbour count at the point in question; w[k] = sum of changes in (saturated) neighbour counts at other points */ if(prop.itype == BIRTH) { /* compute tee[k] and w[k] from scratch */ for(k = 0; k < ndisc; k++) { tee[k] = 0; w[k] = 0.0; } if(badgey->per) { /* periodic distance */ for(j=0; jperiod); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) { tee[k]++; f = badgey->s[k] - AUX(k,j); if(f > 1) /* j is not saturated after addition of (u,v) */ w[k] += 1; /* addition of (u,v) increases count by 1 */ else if(f > 0) /* j becomes saturated by addition of (u,v) */ w[k] += f; } } } } else { /* Euclidean distance */ for(j=0; jr2[k]) { tee[k]++; f = badgey->s[k] - AUX(k,j); if(f > 1) /* j is not saturated after addition of (u,v) */ w[k] += 1; /* addition of (u,v) increases count by 1 */ else if(f > 0) /* j becomes saturated by addition of (u,v) */ w[k] += f; } } } } } else if(prop.itype == DEATH) { /* extract current auxiliary counts for point ix */ /* compute w[k] from scratch */ for(k = 0; k < ndisc; k++) { tee[k] = AUX(k,ix); w[k] = 0.0; } /* compute change in counts for other points */ if(badgey->per) { /* Periodic distance */ for(j=0; jperiod); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) { f = badgey->s[k] - AUX(k,j); if(f > 0) /* j is not saturated */ w[k] += 1; /* deletion of 'ix' decreases count by 1 */ else { f += 1; if(f > 0) { /* j is not saturated after deletion of 'ix' (s must be fractional) */ w[k] += f; } } } } } } else { /* Euclidean distance */ for(j=0; jr2[k]) { f = badgey->s[k] - AUX(k,j); if(f > 0) /* j is not saturated */ w[k] += 1; /* deletion of 'ix' decreases count by 1 */ else { f += 1; if(f > 0) { /* j is not saturated after deletion of 'ix' (s must be fractional) */ w[k] += f; } } } } } } } else if(prop.itype == SHIFT) { /* compute auxiliary counts from scratch */ for(k = 0; k < ndisc; k++) { tee[k] = 0; w[k] = 0.0; } /* Compute the cif at the new point, not the ratio of new/old */ if(badgey->per) { /* periodic distance */ for(j=0; jperiod); for(k = 0; k < ndisc; k++) { r2 = badgey->r2[k]; if(d2 < r2) { /* shifted point is a neighbour of point j */ tee[k]++; a = AUX(k,j); s = badgey->s[k]; /* Adjust */ dd2 = dist2(x[ix],y[ix], x[j],y[j],badgey->period); if(dd2 < r2) a -= 1; b = a + 1; /* b is the number of neighbours of point j in new state */ if(a < s && s < b) { w[k] += s - a; /* s is fractional and j is saturated */ } else if(s >= b) w[k] += 1; } } } } else { /* Euclidean distance */ for(j=0; jr2[k]; if(d2 < r2) { /* shifted point is a neighbour of point j */ tee[k]++; a = AUX(k,j); s = badgey->s[k]; /* Adjust */ dd2 = pow(x[ix] - x[j], 2) + pow(y[ix] - y[j], 2); if(dd2 < r2) a -= 1; b = a + 1; /* b is the number of neighbours of point j in new state */ if(a < s && s < b) { w[k] += s - a; /* s is fractional and j is saturated */ } else if(s >= b) w[k] += 1; } } } } } #ifdef DEBUG Rprintf("ndisc=%d\n", ndisc); #endif /* compute total change in saturated count */ for(k = 0; k < ndisc; k++) { s = badgey->s[k]; tk = tee[k]; w[k] += ((tk < s) ? tk : s); #ifdef DEBUG Rprintf("s[%d]=%lf, t[%d]=%d, w[%d]=%lf\n", k, s, k, tk, k, w[k]); #endif } /* evaluate cif */ for(k = 0; k < ndisc; k++) { if(badgey->hard[k]) { if(tee[k] > 0) return(0.0); /* else cifval multiplied by 0^0 = 1 */ } else cifval *= exp(badgey->loggamma[k] * w[k]); } return cifval; } void badgeyupd(state, prop, cdata) State state; Propo prop; Cdata *cdata; { /* Declare other variables */ int ix, npts, ndisc, j, k; double u, v, xix, yix, r2, d2, d2old, d2new; double *x, *y; int *aux; BadGey *badgey; badgey = (BadGey *) cdata; aux = badgey->aux; /* 'state' is current state before transition */ x = state.x; y = state.y; npts = state.npts; ndisc = badgey->ndisc; #ifdef DEBUG Rprintf("start update ---- \n"); for(j=0; j < npts; j++) { for(k=0; k < ndisc; k++) Rprintf("aux[%d,%d]=%d\t", k, j, AUX(k,j)); Rprintf("\n"); } #endif if(prop.itype == BIRTH) { #ifdef DEBUG Rprintf("Update for birth ---- \n"); #endif /* Birth */ u = prop.u; v = prop.v; /* initialise auxiliary counters for new point x[npts], y[npts] */ for(k = 0; k < ndisc; k++) AUX(k, npts) = 0; /* update all auxiliary counters */ if(badgey->per) { /* periodic distance */ for(j=0; j < npts; j++) { d2 = dist2(u,v,x[j],y[j],badgey->period); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) { AUX(k, j) += 1; AUX(k, npts) += 1; } } } } else { /* Euclidean distance */ for(j=0; j < npts; j++) { d2 = pow(u - x[j], 2) + pow(v - y[j], 2); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) { AUX( k, j) += 1; AUX( k, npts) += 1; } } } } #ifdef DEBUG Rprintf("end update ---- \n"); for(j=0; j <= npts; j++) { for(k=0; k < ndisc; k++) Rprintf("aux[%d,%d]=%d\t", k, j, AUX(k,j)); Rprintf("\n"); } #endif return; } if(prop.itype == DEATH) { /* Death */ ix = prop.ix; u = x[ix]; v = y[ix]; #ifdef DEBUG Rprintf("--- Update for death of point %d = (%lf,%lf) ---- \n", ix, u, v); #endif /* Decrement auxiliary counter for each neighbour of deleted point, and remove entry corresponding to deleted point */ if(badgey->per) { /* periodic distance */ for(j=0; jperiod); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) { if(j < ix) AUX(k,j) -= 1; else AUX(k,j-1) = AUX(k,j) - 1; } else if(j >= ix) AUX(k,j-1) = AUX(k,j); } } } else { /* Euclidean distance */ for(j=0; jr2[k]) { #ifdef DEBUG Rprintf("hit for point %d with radius r[%d]\n", j, k); #endif if(j < ix) AUX(k,j) -= 1; else AUX(k,j-1) = AUX(k,j) - 1; } else if(j >= ix) AUX(k,j-1) = AUX(k,j); } } } #ifdef DEBUG Rprintf("end update ---- \n"); for(j=0; j < npts-1; j++) { for(k=0; k < ndisc; k++) Rprintf("aux[%d,%d]=%d\t", k, j, AUX(k,j)); Rprintf("\n"); } #endif return; } if(prop.itype == SHIFT) { #ifdef DEBUG Rprintf("Update for shift ---- \n"); #endif /* Shift */ u = prop.u; v = prop.v; ix = prop.ix; xix = x[ix]; yix = y[ix]; /* recompute all auxiliary counters for point ix */ for(k = 0; k < ndisc; k++) AUX(k,ix) = 0; if(badgey->per) { for(j=0; jperiod); d2old = dist2(xix,yix,x[j],y[j],badgey->period); for(k = 0; k < ndisc; k++) { r2 = badgey->r2[k]; if(d2old >= r2 && d2new >= r2) continue; if(d2new < r2) { /* increment neighbour count for new point */ AUX(k,ix) += 1; if(d2old >= r2) AUX(k,j) += 1; /* point j gains a new neighbour */ } else if(d2old < r2) AUX(k,j) -= 1; /* point j loses a neighbour */ } } } else { /* Euclidean distance */ for(j=0; jr2[k]; if(d2old >= r2 && d2new >= r2) continue; if(d2new < r2) { #ifdef DEBUG Rprintf("shifted point is close to j=%d\n", j); #endif /* increment neighbour count for new point */ AUX(k,ix) += 1; if(d2old >= r2) { #ifdef DEBUG Rprintf("\t(previous position was not)\n"); #endif AUX(k,j) += 1; /* point j gains a new neighbour */ } } else if(d2old < r2) { #ifdef DEBUG Rprintf("previous position was close to j=%d, shifted point is not\n", j); #endif AUX(k,j) -= 1; /* point j loses a neighbour */ } } } } #ifdef DEBUG Rprintf("end update ---- \n"); for(j=0; j < npts; j++) { for(k=0; k < ndisc; k++) Rprintf("aux[%d,%d]=%d\t", k, j, AUX(k,j)); Rprintf("\n"); } #endif return; } fexitc("Unrecognised transition type; bailing out.\n"); } Cifns BadGeyCifns = { &badgeyinit, &badgeycif, &badgeyupd, NO}; spatstat/src/hasclose.c0000644000176200001440000000135613406057617014667 0ustar liggesusers/* hasclose.c $Revision: 1.4 $ $Date: 2016/11/29 05:09:25 $ Determine whether a point has a neighbour closer than 'r' Data must be ordered by increasing x coordinate */ #include #undef BUG #undef TORUS #undef ZCOORD #define CLOSEFUN hasXclose #define CROSSFUN hasXYclose #include "hasclose.h" #undef CLOSEFUN #undef CROSSFUN #define ZCOORD #define CLOSEFUN hasX3close #define CROSSFUN hasXY3close #include "hasclose.h" #undef CLOSEFUN #undef CROSSFUN #define TORUS #undef ZCOORD #define CLOSEFUN hasXpclose #define CROSSFUN hasXYpclose #include "hasclose.h" #undef CLOSEFUN #undef CROSSFUN #define ZCOORD #define CLOSEFUN hasX3pclose #define CROSSFUN hasXY3pclose #include "hasclose.h" #undef CLOSEFUN #undef CROSSFUN spatstat/src/dwpure.c0000755000176200001440000002320213406057617014371 0ustar liggesusers/* dwpure.c $Revision: 1.5 $ $Date: 2011/09/20 07:54:53 $ Code by Dominic Schuhmacher */ #include #include #include typedef struct State { int n1, n2; /* vectors of length n1 (rows) and n2 (cols) */ int *rowmass, *colmass; /* mass to be moved from row / to col */ int *rowlab, *collab; /* row and col labels (specify previous node (row for collab, col for rowlab)) */ int *rowflow, *colflow; /* second component of labels (specify flow through current node) */ int *rowsurplus, *colsurplus; /* the surplus in each row/col under the current flow */ int *dualu, *dualv; /* vectors of dual variables (u for rows, v for cols) */ int *rowhelper, *colhelper; /* helping vector to store intermediate results */ /* could be local in initcost at the moment */ /* n by n matrices */ int *d; /* matrix of costs */ int *flowmatrix; /* matrix of flows */ int *arcmatrix; /* matrix of arcs for restriced primal problem (1 if arc, 0 if no arc) should be unsigned char to save memory however need to workout problem with R_alloc first (see below) */ /* n*n vector */ int *collectvals; } State; #define COST(I,J,STATE,NVALUE) ((STATE)->d)[(NVALUE) * (J) + (I)] #define FLOW(I,J,STATE,NVALUE) ((STATE)->flowmatrix)[(NVALUE) * (J) + (I)] #define ARC(I,J,STATE,NVALUE) ((STATE)->arcmatrix)[(NVALUE) * (J) + (I)] #define MIN(A,B) ((A)<(B) ? (A) : (B)) int arraysum(int *a, int n); int arraymin(int *a, int n); void initvalues(State *state); void maxflow(State *state); void updateduals(State *state); void augmentflow(int startcol, State *state); /* ------------ The main function ----------------------------- */ void dwpure(int *d, int *rmass, int *cmass, int *numr, int *numc, int *flowmatrix) { int i,j; /* indices */ int n1,n2; unsigned char feasible = 0; /* boolean for main loop */ State state; /* inputs */ state.n1 = n1 = *numr; state.n2 = n2 = *numc; state.d = d; state.rowmass = rmass; state.colmass = cmass; /* scratch space */ state.rowlab = (int *) R_alloc((long) n1, sizeof(int)); state.collab = (int *) R_alloc((long) n2, sizeof(int)); state.rowflow = (int *) R_alloc((long) n1, sizeof(int)); state.colflow = (int *) R_alloc((long) n2, sizeof(int)); state.rowsurplus = (int *) R_alloc((long) n1, sizeof(int)); state.colsurplus = (int *) R_alloc((long) n2, sizeof(int)); state.dualu = (int *) R_alloc((long) n1, sizeof(int)); state.dualv = (int *) R_alloc((long) n2, sizeof(int)); state.rowhelper = (int *) R_alloc((long) n1, sizeof(int)); state.colhelper = (int *) R_alloc((long) n2, sizeof(int)); state.flowmatrix = (int *) R_alloc((long) (n1 * n2), sizeof(int)); state.arcmatrix = (int *) R_alloc((long) (n1 * n2), sizeof(int)); state.collectvals = (int *) R_alloc((long) (n1 * n2), sizeof(int)); for (i = 0; i < n1; ++i) { for (j = 0; j < n2; ++j) { state.flowmatrix[(n1)*(j) + i] = 0; state.arcmatrix[(n1)*(j) + i] = 0; state.collectvals[(n1)*(j) + i] = 0; } } for (i = 0; i < n1; ++i) { state.rowlab[i] = 0; state.rowflow[i] = 0; state.rowsurplus[i] = 0; state.dualu[i] = 0; state.rowhelper[i] = 0; } for (j = 0; j < n2; ++j) { state.collab[j] = 0; state.colflow[j] = 0; state.colsurplus[j] = 0; state.dualv[j] = 0; state.colhelper[j] = 0; } /* Initialize dual variables, arcmatrix, and surpluses */ initvalues(&state); /* For testing: print out cost matrix for (i = 0; i < n1; ++i) { for (j = 0; j < n2; ++j) { Rprintf("%d ", COST(i, j, &state, n1)); } Rprintf("\n"); } */ /* The main loop */ while(feasible == 0) { maxflow(&state); if (arraysum(state.rowsurplus, n1) > 0) { updateduals(&state); /* also updates arcmatrix */ } else { feasible = 1; } } /* "Return" the final flowmatrix */ for (i = 0; i < n1; i++) { for (j = 0; j < n2; j++) { flowmatrix[n1*j+i] = state.flowmatrix[n1*j+i]; } } } /* ------------ Functions called by dwpure_R ------------------------- */ /* Sum of integer array */ int arraysum(int *a, int n) { int i; int asum = 0; for (i = 0; i < n; i++) asum += a[i]; return(asum); } /* Minimal element of an integer array */ int arraymin(int *a, int n) { int i, amin; if (n < 1) return(-1); amin = a[0]; if (n > 1) for (i = 0; i < n; i++) if (a[i] < amin) amin = a[i]; return(amin); } /* Initialize cost matrix: subtract in each row its minimal entry (from all the entries in the row), then subtract in each column its minimal entry (from all the entries in the column) */ void initvalues(State *state) { int i,j,n1,n2; n1 = state->n1; n2 = state->n2; /* Initial surpluses; can I do this shorter? later on surpluses are updated in flow augmentation step */ for (i = 0; i < n1; i++) state->rowsurplus[i] = state->rowmass[i]; for (j = 0; j < n2; j++) state->colsurplus[j] = state->colmass[j]; for (i = 0; i < n1; i++) { for (j = 0; j < n2; j++) state->colhelper[j] = COST(i, j, state, n1); state->dualu[i] = arraymin(state->colhelper, n2); } for (j = 0; j < n2; j++) { for (i = 0; i < n1; i++) state->rowhelper[i] = COST(i, j, state, n1) - state->dualu[i]; state->dualv[j] = arraymin(state->rowhelper, n1); } for (i = 0; i < n1; i++) { for (j = 0; j < n2; j++) { if (COST(i, j, state, n1) == state->dualu[i] + state->dualv[j]) ARC(i, j, state, n1) = 1; else ARC(i, j, state, n1) = 0; } } } /* Maximize the flow on the (zeros of the) current cost matrix */ void maxflow(State *state) { int breakthrough; /* col. no. in which breakthrough occurs */ unsigned char labelfound = 1; /* 0 if no more labels can be found */ int i,j,n1,n2; n1 = state->n1; n2 = state->n2; while (labelfound == 1) { breakthrough = -1; /* initialize labels */ for (i = 0; i < n1; i++) { if (state->rowsurplus[i] > 0) { state->rowlab[i] = -5; state->rowflow[i] = state->rowsurplus[i]; } else { state->rowlab[i] = -1; /* setting rowflow to zero isn't necessary! */ } } for (j = 0; j < n2; j++) state->collab[j] = -1; /* setting colflow to zero isn't necessary! */ /* -1 means "no index", -5 means "source label" (rows only) */ while (labelfound == 1 && breakthrough == -1) { labelfound = 0; /* label unlabeled column j that permits flow from some labeled row i */ /* ("permits flow" means arcmatrix[i][j] = 1). Do so for every j */ for (i = 0; i < n1; i++) { if (state->rowlab[i] != -1) { for (j = 0; j < n2; j++) { if (ARC(i, j, state, n1) == 1 && state->collab[j] == -1) { state->collab[j] = i; state->colflow[j] = state->rowflow[i]; labelfound = 1; if (state->colsurplus[j] > 0 && breakthrough == -1) breakthrough = j; } } } } /* label unlabeled row i that already sends flow to some labeled col j */ /* ("already sends" means flowmatrix[i][j] > 0). Do so for every i */ for (j = 0; j < n2; j++) { if (state->collab[j] != -1) { for (i = 0; i < n1; i++) { if (FLOW(i, j, state, n1) > 0 && state->rowlab[i] == -1) { state->rowlab[i] = j; state->rowflow[i] = MIN(state->colflow[j],FLOW(i, j, state, n1)); labelfound = 1; } } } } } if (breakthrough != -1) augmentflow(breakthrough, state); } } /* Update the dual variables (called if solution of restricted primal is not feasible for the original problem): determine the minimum over the submatrix given by all labeled rows and unlabeled columns, and subtract it from all labeled rows and add it to all labeled columns. */ void updateduals(State *state) { int i,j,n1,n2,mini; int count = 0; n1 = state->n1; n2 = state->n2; for (i = 0; i < n1; i++) { for (j = 0; j < n2; j++) { if (state->rowlab[i] != -1 && state->collab[j] == -1) { state->collectvals[count] = COST(i, j, state, n1) - state->dualu[i] - state->dualv[j]; count++; } } } mini = arraymin(state->collectvals, count); for (i = 0; i < n1; i++) { if (state->rowlab[i] != -1) state->dualu[i] += mini; } for (j = 0; j < n2; j++){ if (state->collab[j] != -1) state->dualv[j] -= mini; } for (i = 0; i < n1; i++) { for (j = 0; j < n2; j++) { if (COST(i, j, state, n1) == state->dualu[i] + state->dualv[j]) ARC(i, j, state, n1) = 1; else ARC(i, j, state, n1) = 0; } } } /* Augment the flow on the graph given by arcmatrix (by aug) according to the row and column labels starting in column startcol */ /* Adjust the surpluses while we're at it (first row and last col have -aug) */ void augmentflow(int startcol, State *state) { int k,l,aug,n1; /* int i,j,k,l,aug,n1,n2; */ n1 = state->n1; l = startcol; aug = MIN(state->colflow[l], state->colsurplus[l]); state->colsurplus[l] -= aug; k = state->collab[l]; FLOW(k, l, state, n1) += aug; l = state->rowlab[k]; while (l != -5) { FLOW(k, l, state, n1) -= aug; k = state->collab[l]; FLOW(k, l, state, n1) += aug; l = state->rowlab[k]; } state->rowsurplus[k] -= aug; } spatstat/src/PerfectPenttinen.h0000644000176200001440000001301013406057617016336 0ustar liggesusers // ........................... Penttinen process ................ // $Revision: 1.2 $ $Date: 2016/02/02 01:30:01 $ class PenttProcess : public PointProcess { public: double beta, gamma, radius, reachsquared, loggamma2pi; int ishard; PenttProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double r); ~PenttProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); }; PenttProcess::PenttProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double r) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; gamma = g; radius = r; ishard = (gamma <= DOUBLE_EPS); loggamma2pi = M_2PI * (ishard? 0.0 : log(gamma)); reachsquared = 4.0 * radius * radius; InteractionRange = 2.0 * radius; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double PenttProcess::Interaction(double dsquared) { double rtn, z, z2; rtn = 1.0; if(dsquared < reachsquared) { if(ishard) return(0.0); z2 = dsquared/reachsquared; z = sqrt(z2); if(z < 1.0) { rtn = exp(loggamma2pi * (acos(z) - z * sqrt(1.0 - z2))); } } return(rtn); } void PenttProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void PenttProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating PenttProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating PenttProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating PenttProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } // ........................... Interface to R .......................... extern "C" { SEXP PerfectPenttinen(SEXP beta, SEXP gamma, SEXP r, SEXP xrange, SEXP yrange) { // input parameters double Beta, Gamma, R, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int StartTime, EndTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(gamma = AS_NUMERIC(gamma)); PROTECT(r = AS_NUMERIC(r)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 5 protected objects // extract arguments Beta = *(NUMERIC_POINTER(beta)); Gamma = *(NUMERIC_POINTER(gamma)); R = *(NUMERIC_POINTER(r)); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ R); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ R); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ R); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ R); if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1; #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise Penttinen point process PenttProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax,Beta,Gamma,R); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); // pack up into output list PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); // return UNPROTECT(9); // 5 arguments plus xout, yout, nout, out return(out); } } spatstat/src/pcf3.c0000755000176200001440000001216213406057617013721 0ustar liggesusers#include #include #include #include #include "geom3.h" #include "functable.h" #include "chunkloop.h" #include "constants.h" /* $Revision: 1.8 $ $Date: 2018/12/18 02:43:11 $ pair correlation function of 3D point pattern (Epanechnikov kernel) pcf3trans translation correction pcf3isot isotropic correction Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define FOURPI (2.0 * M_2PI) void pcf3trans(p, n, b, pcf, delta) Point *p; int n; Box *b; Ftable *pcf; double delta; { register int i, j, l, lmin, lmax, maxchunk; register double dx, dy, dz, dist; register double vx, vy, vz, tval; Point *ip, *jp; double dt, vol, lambda, denom; double coef, twocoef, frac, invweight, kernel; double sphesfrac(), sphevol(); /* compute denominator & initialise numerator*/ vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0); lambda = ((double) n )/ vol; denom = lambda * lambda; for(l = 0; l < pcf->n; l++) { (pcf->denom)[l] = denom; (pcf->num)[l] = 0.0; } /* spacing of argument in result vector */ dt = (pcf->t1 - pcf->t0)/(pcf->n - 1); /* compute numerator */ OUTERCHUNKLOOP(i, n, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n, maxchunk, 8196) { ip = p + i; for(j = i + 1; j < n; j++) { /* compute pairwise distance */ jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist = sqrt(dx * dx + dy * dy + dz * dz); lmin = ceil( ((dist - delta) - pcf->t0) / dt ); lmax = floor( ((dist + delta) - pcf->t0) / dt ); if(lmax >= 0 && lmin < pcf->n) { /* kernel centred at 'dist' has nonempty intersection with specified range of t values */ /* compute intersection */ if(lmin < 0) lmin = 0; if(lmax >= pcf->n) lmax = pcf->n - 1; /* compute (inverse) edge correction weight */ vx = b->x1 - b->x0 - (dx > 0 ? dx : -dx); vy = b->y1 - b->y0 - (dy > 0 ? dy : -dy); vz = b->z1 - b->z0 - (dz > 0 ? dz : -dz); invweight = vx * vy * vz * FOURPI * dist * dist; if(invweight > 0.0) { for(l = lmin; l < pcf->n; l++) { tval = pcf->t0 + l * dt; /* unnormalised Epanechnikov kernel with halfwidth delta */ frac = (dist - tval)/delta; kernel = (1 - frac * frac); if(kernel > 0) (pcf->num)[l] += kernel / invweight; } } } } } } /* constant factor in kernel */ coef = 3.0/(4.0 * delta); /* multiplied by 2 because we only visited i < j pairs */ twocoef = 2.0 * coef; /* normalise kernel and compute ratio estimate */ for(l = 0; l < pcf->n; l++) { (pcf->num)[l] *= twocoef; (pcf->f)[l] = ((pcf->denom)[l] > 0.0) ? (pcf->num)[l] / (pcf->denom)[l] : 0.0; } } void pcf3isot(p, n, b, pcf, delta) Point *p; int n; Box *b; Ftable *pcf; double delta; { register int i, j, l, lmin, lmax, maxchunk; register double dx, dy, dz, dist; Point *ip, *jp; double dt, vol, denom, mass, tval; double coef, frac, kernel; double sphesfrac(), sphevol(); Point vertex; Box half; /* compute denominator & initialise numerator*/ vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0); denom = ((double) (n * n))/vol; for(l = 0; l < pcf->n; l++) { (pcf->denom)[l] = denom; (pcf->num)[l] = 0.0; } /* spacing of argument in result vector */ dt = (pcf->t1 - pcf->t0)/(pcf->n - 1); /* set up for volume correction */ vertex.x = b->x0; vertex.y = b->y0; vertex.z = b->z0; half.x1 = b->x1; half.y1 = b->y1; half.z1 = b->z1; half.x0 = (b->x0 + b->x1)/2.0; half.y0 = (b->y0 + b->y1)/2.0; half.z0 = (b->z0 + b->z1)/2.0; /* compute numerator */ OUTERCHUNKLOOP(i, n, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n, maxchunk, 8196) { ip = p + i; for(j = i + 1; j < n; j++) { jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist = sqrt(dx * dx + dy * dy + dz * dz); lmin = ceil( ((dist - delta) - pcf->t0) / dt ); lmax = floor( ((dist + delta) - pcf->t0) / dt ); if(lmax >= 0 && lmin < pcf->n) { /* kernel centred at 'dist' has nonempty intersection with specified range of t values */ /* compute intersection */ if(lmin < 0) lmin = 0; if(lmax >= pcf->n) lmax = pcf->n - 1; /* compute edge correction weight */ mass = (1.0 / sphesfrac(ip, b, dist)) + (1.0 / sphesfrac(jp, b, dist)); mass *= 1.0 - 8.0 * sphevol(&vertex, &half, dist) / vol; if(mass > 0.0) { mass /= FOURPI * dist * dist; for(l = lmin; l < pcf->n; l++) { tval = pcf->t0 + l * dt; /* unnormalised Epanechnikov kernel with halfwidth delta */ frac = (dist - tval)/delta; kernel = (1 - frac * frac); if(kernel > 0) (pcf->num)[l] += kernel * mass; } } } } } } /* constant factor in kernel */ coef = 3.0/(4.0 * delta); /* normalise kernel and compute ratio estimate */ for(l = 0; l < pcf->n; l++) { (pcf->num)[l] *= coef; (pcf->f)[l] = ((pcf->denom)[l] > 0.0)? (pcf->num)[l] / (pcf->denom)[l] : 0.0; } } spatstat/src/minnnd.h0000644000176200001440000000323013406057617014347 0ustar liggesusers/* minnnd.h Code template for minnnd to be #included in minnnd.c Macros: FNAME Function name IGNOREZERO #defined if zero distances should be ignored $Revision: 1.2 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ /* THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER */ void FNAME(n, x, y, huge, result) /* inputs */ int *n; double *x, *y, *huge; /* outputs */ double *result; { int npoints, i, maxchunk, left, right; double d2, d2min, xi, yi, dx, dy, dy2, hu, hu2; hu = *huge; hu2 = hu * hu; npoints = *n; d2min = hu2; if(npoints == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < npoints) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > npoints) maxchunk = npoints; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; if(i < npoints - 1) { /* search forward */ for(right = i + 1; right < npoints; ++right) { dy = y[right] - yi; dy2 = dy * dy; if(dy2 > d2min) break; dx = x[right] - xi; d2 = dx * dx + dy2; if (d2 < d2min) { #ifdef IGNOREZERO if(d2 > 0) d2min = d2; #else d2min = d2; #endif } } } if(i > 0){ /* search backward */ for(left = i - 1; left >= 0; --left) { dy = yi - y[left]; dy2 = dy * dy; if(dy2 > d2min) break; dx = x[left] - xi; d2 = dx * dx + dy2; if (d2 < d2min) { #ifdef IGNOREZERO if(d2 > 0) d2min = d2; #else d2min = d2; #endif } } } } } *result = d2min; } spatstat/src/looptest.h0000644000176200001440000000030213406057617014732 0ustar liggesusers/* looptest.h Utilities for looping $Revision: 1.1 $ $Date: 2014/09/19 00:47:34 $ */ /* a small value relative to threshold X, for loop exit test */ #define EPSILON(X) ((X)/64) spatstat/src/poly2im.c0000755000176200001440000002034213406057617014460 0ustar liggesusers/* poly2im.c Conversion from (x,y) polygon to pixel image poly2imI pixel value = 1{pixel centre is inside polygon} poly2imA pixel value = area of intersection between pixel and polygon $Revision: 1.10 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef DEBUG #include #include #include #include "chunkloop.h" void poly2imI(xp, yp, np, nx, ny, out) double *xp, *yp; /* polygon vertices, anticlockwise, CLOSED */ int *np; int *nx, *ny; /* INTEGER raster points from (0,0) to (nx-1, ny-1) */ int *out; /* output matrix [ny, nx], byrow=FALSE, initialised to 0 */ { int Np, Nx, Ny, Np1, maxchunk, mstart, mend; int j, k, m; double x0, y0, x1, y1, xleft, xright, yleft, yright; double dx, dy, y, slope, intercept; int jleft, jright, imax; int sign; Np = *np; Nx = *nx; Ny = *ny; /* Nxy = Nx * Ny; */ Np1 = Np - 1; /* run through polygon edges */ OUTERCHUNKLOOP(k, Np1, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(k, Np1, maxchunk, 8196) { x0 = xp[k]; y0 = yp[k]; x1 = xp[k+1]; y1 = yp[k+1]; if(x0 < x1) { xleft = x0; xright = x1; yleft = y0; yright = y1; sign = -1; } else { xleft = x1; xright = x0; yleft = y1; yright = y0; sign = +1; } /* determine relevant columns of pixels */ jleft = (int) ceil(xleft); jright = (int) floor(xright); if(jleft < Nx && jright >= 0 && jleft <= jright) { if(jleft < 0) { jleft = 0; } if(jright >= Nx) {jright = Nx - 1; } /* equation of edge */ dx = xright - xleft; dy = yright - yleft; slope = dy/dx; intercept = yleft - slope * xleft; /* visit relevant columns */ for(j = jleft; j <= jright; j++) { y = slope * ((double) j) + intercept; imax = (int) floor(y); if(imax >= Ny) imax = Ny-1; if(imax >= 0) { /* increment entries below edge in this column: out[i + j * Ny] += sign for 0 <= i <= imax */ mstart = j * Ny; mend = mstart + imax; for(m = mstart; m <= mend; m++) { out[m] += sign; } } } } } } } #define BELOW -1 #define INSIDE 0 #define ABOVE 1 void poly2imA(ncol, nrow, xpoly, ypoly, npoly, out, status) int *ncol, *nrow; /* pixels are unit squares from (0,0) to (ncol,nrow) */ double *xpoly, *ypoly; /* vectors of coordinates of polygon vertices */ int *npoly; double *out; /* double array [nrow, ncol] of pixel areas, byrow=TRUE, initialised to 0 */ int *status; { double *xp, *yp; int nx, ny, nxy, np, np1, maxchunk; int i, j, k; double xcur, ycur, xnext, ynext, xleft, yleft, xright, yright; int sgn, jmin, jmax, imin, imax; double x0, y0, x1, y1, slope, yhi, ylo, area, xcut, xcutA, xcutB; int klo, khi; nx = *ncol; ny = *nrow; xp = xpoly; yp = ypoly; np = *npoly; *status = 0; /* initialise output array */ nxy = nx * ny; for(k = 0; k < nxy; k++) out[k] = 0; /* ............ loop over polygon edges ...................*/ np1 = np - 1; OUTERCHUNKLOOP(k, np1, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(k, np1, maxchunk, 8196) { xcur = xp[k]; ycur = yp[k]; xnext = xp[k+1]; ynext = yp[k+1]; #ifdef DEBUG Rprintf("\nEdge %d from (%lf, %lf) to (%lf, %lf) .........\n", k, xcur, ycur, xnext, ynext); #endif if(xcur != xnext) { /* vertical edges are ignored */ if(xcur < xnext) { #ifdef DEBUG Rprintf("negative sign\n"); #endif sgn = -1; xleft = xcur; yleft = ycur; xright = xnext; yright = ynext; } else { #ifdef DEBUG Rprintf("positive sign\n"); #endif sgn = 1; xleft = xnext; yleft = ynext; xright = xcur; yright = ycur; } /* we have now ensured xleft < xright */ slope = (yright - yleft)/(xright - xleft); /* Find relevant columns of pixels */ jmin = floor(xleft); jmin = (jmin < 0) ? 0 : jmin; jmax = ceil(xright); jmax = (jmax > nx - 1) ? nx - 1 : jmax; /* Find relevant rows of pixels */ imin = floor((yleft < yright) ? yleft : yright); imin = (imin < 0) ? 0 : imin; imax = ceil((yleft < yright) ? yright : yleft); imax = (imax > ny - 1) ? ny - 1 : imax; #ifdef DEBUG Rprintf( "imin=%d, imax=%d, jmin=%d, jmax=%d\n", imin, imax, jmin, jmax); #endif /* ........... loop over columns of pixels ..............*/ for(j = jmin; j <= jmax; j++) { #ifdef DEBUG Rprintf( "\t j=%d:\n", j); #endif /* Intersect trapezium with column of pixels */ if(xleft <= j+1 && xright >= j) { if(xleft >= j) { /* retain left corner */ #ifdef DEBUG Rprintf( "\tretain left corner\n"); #endif x0 = xleft; y0 = yleft; } else { /* trim left corner */ #ifdef DEBUG Rprintf( "\ttrim left corner\n"); #endif x0 = (double) j; y0 = yleft + slope * (x0 - xleft); } if(xright <= j+1) { /* retain right corner */ #ifdef DEBUG Rprintf( "\tretain right corner\n"); #endif x1 = xright; y1 = yright; } else { /* trim right corner */ #ifdef DEBUG Rprintf( "\ttrim right corner\n"); #endif x1 = (double) (j+1); y1 = yright + slope * (x1 - xright); } /* save min and max y */ if(y0 < y1) { #ifdef DEBUG Rprintf( "slope %lf > 0\n", slope); #endif ylo = y0; yhi = y1; } else { #ifdef DEBUG Rprintf( "slope %lf <= 0\n", slope); #endif ylo = y1; yhi = y0; } /* ............ loop over pixels within column ......... */ /* first part */ if(imin > 0) { for(i = 0; i < imin; i++) { #ifdef DEBUG Rprintf( "\ti=%d:\n", i); #endif /* The trimmed pixel [x0, x1] * [i, i+1] lies below the polygon edge. */ area = (x1 - x0); #ifdef DEBUG Rprintf( "\tIncrementing area by %lf\n", sgn * area); #endif out[i + ny * j] += sgn * area; } } /* second part */ for(i = imin; i <= imax; i++) { #ifdef DEBUG Rprintf( "\ti=%d:\n", i); #endif /* Compute area of intersection between trapezium and trimmed pixel [x0, x1] x [i, i+1] */ klo = (ylo <= i) ? BELOW : (ylo >= (i+1))? ABOVE: INSIDE; khi = (yhi <= i) ? BELOW : (yhi >= (i+1))? ABOVE: INSIDE; if(klo == ABOVE) { /* trapezium covers pixel */ #ifdef DEBUG Rprintf( "\t\ttrapezium covers pixel\n"); #endif area = (x1-x0); } else if(khi == BELOW) { #ifdef DEBUG Rprintf( "\t\tpixel avoids trapezium\n"); #endif /* pixel avoids trapezium */ area = 0.0; } else if(klo == INSIDE && khi == INSIDE) { /* polygon edge is inside pixel */ #ifdef DEBUG Rprintf( "\t\t polygon edge is inside pixel\n"); #endif area = (x1-x0) * ((ylo + yhi)/2.0 - i); } else if(klo == INSIDE && khi == ABOVE) { /* polygon edge crosses upper edge of pixel */ #ifdef DEBUG Rprintf( "\t\t polygon edge crosses upper edge of pixel\n"); #endif xcut = x0 + ((i+1) - y0)/slope; if(slope > 0) area = (xcut - x0) * ((y0 + (i+1))/2 - i) + (x1 - xcut); else area = (x1 - xcut) * ((y1 + (i+1))/2 - i) + (xcut - x0); } else if(klo == BELOW && khi == INSIDE) { /* polygon edge crosses lower edge of pixel */ #ifdef DEBUG Rprintf( "\t\t polygon edge crosses lower edge of pixel\n"); #endif xcut = x0 + (i - y0)/slope; if(slope > 0) area = (x1 - xcut) * ((y1 + i)/2 - i); else area = (xcut - x0) * ((y0 + i)/2 - i); } else if(klo == BELOW && khi == ABOVE) { /* polygon edge crosses upper and lower edges of pixel */ #ifdef DEBUG Rprintf( "\t\t polygon edge crosses upper and lower edges of pixel\n"); #endif xcutA = x0 + (i - y0)/slope; xcutB = x0 + ((i+1) - y0)/slope; if(slope > 0) area = (xcutB - xcutA)/2 + (x1 - xcutB); else area = (xcutB - x0) + (xcutA - xcutB)/2; } else { /* control should not pass to here */ *status = 1; return; } /* add contribution to area of pixel */ #ifdef DEBUG Rprintf( "\tIncrementing area by %lf\n", sgn * area); #endif out[i + ny * j] += sgn * area; } /* ............ end of loop over pixels within column ......... */ } } /* ........ end of loop over columns of pixels ...............*/ } } } /* ......... end of loop over polygon edges ...................*/ } spatstat/src/lineardisc.c0000755000176200001440000002067613570037757015221 0ustar liggesusers#include #include #include "chunkloop.h" /* lineardisc.c Disc of radius r in linear network Clineardisc determine the linear disc (NOT USED) Ccountends count the number of endpoints $Revision: 1.14 $ $Date: 2019/11/28 20:53:23 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define DPATH(I,J) dpath[(J) + Nv * (I)] #include "yesno.h" #undef DEBUG #ifdef DEBUG void Clineardisc(f, seg, /* centre of disc (local coords, f = tp) */ r, /* radius of disc */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ lengths, /* segment lengths */ allinside, boundary, dxv, nendpoints) int *nv, *ns; int *from, *to; /* integer vectors (mappings) */ double *f, *r; int *seg; double *xv, *yv; /* vectors of coordinates of vertices */ double *dpath; /* matrix of shortest path distances between vertices */ double *lengths; /* vector of segment lengths */ /* OUTPUTS */ int *allinside, *boundary; /* vectors of status for each segment */ double *dxv; /* vector of distances for each vertex */ int *nendpoints; { int Nv, Ns; double f0, rad; int seg0; int i, A, B, fromi, toi, allin, bdry, reachable, nends, maxchunk; double length0, dxA, dxB, dxAvi, dxBvi, residue; double *resid; int *covered; Nv = *nv; Ns = *ns; f0 = *f; seg0 = *seg; rad = *r; /* endpoints of segment containing centre */ A = from[seg0]; B = to[seg0]; /* distances from x to A and B */ length0 = lengths[seg0]; dxA = f0 * length0; dxB = (1-f0) * length0; /* visit vertices */ covered = (int *) R_alloc((size_t) Nv, sizeof(int)); resid = (double *) R_alloc((size_t) Nv, sizeof(double)); OUTERCHUNKLOOP(i, Nv, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Nv, maxchunk, 16384) { /* distance going through A */ dxAvi = dxA + DPATH(A,i); /* distance going through B */ dxBvi = dxB + DPATH(B,i); /* shortest path distance to this vertex */ dxv[i] = (dxAvi < dxBvi) ? dxAvi : dxBvi; /* distance left to 'spend' from this vertex */ residue = rad - dxv[i]; resid[i] = (residue > 0)? residue : 0; /* determine whether vertex i is inside the disc of radius r */ covered[i] = (residue >= 0); } } /* Now visit line segments. */ nends = 0; OUTERCHUNKLOOP(i, Ns, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Ns, maxchunk, 16384) { /* Determine which line segments are completely inside the disc, and which cross the boundary. */ if(i == seg0) { /* initial segment: disc starts from centre (x, y) */ allin = covered[A] && covered[B]; bdry = !allin; if(bdry) { if(!covered[A]) nends++; if(!covered[B]) nends++; } } else { /* another segment: disc extends in from either endpoint */ fromi = from[i]; toi = to[i]; reachable = (covered[fromi] || covered[toi]); if(reachable) { allin = covered[fromi] && covered[toi] && (resid[fromi] + resid[toi] >= lengths[i]); bdry = !allin; } else allin = bdry = NO; if(bdry) { if(covered[fromi]) nends++; if(covered[toi]) nends++; } } allinside[i] = allin; boundary[i] = bdry; } } *nendpoints = nends; } #endif /* ------------------------------------------------- */ /* count endpoints of several discs in a network */ /* ------------------------------------------------- */ void Ccountends(np, f, seg, /* centres of discs (local coords) */ r, /* radii of discs */ nv, xv, yv, /* network vertices */ ns, from, to, /* network segments */ dpath, /* shortest path distances between vertices */ lengths, /* segment lengths */ toler, /* tolerance */ nendpoints /* output counts of endpoints */ ) int *np, *nv, *ns; int *from, *to; /* integer vectors (mappings) */ double *f, *r; int *seg; double *xv, *yv; /* vectors of coordinates of vertices */ double *dpath; /* matrix of shortest path distances between vertices */ double *lengths; /* vector of segment lengths */ double *toler; /* tolerance for merging endpoints and vertices */ /* OUTPUT */ int *nendpoints; { int Np, Nv, Ns; double f0, rad; int seg0; int i, m, A, B, fromi, toi, reachable, nends, maxchunk, covfrom, covto, allin; double length0, dxA, dxB, dxAvi, dxBvi, dxvi, residue, resfrom, resto, tol; double *resid; int *covered, *terminal; Np = *np; Nv = *nv; Ns = *ns; tol = *toler; #ifdef DEBUG Rprintf("\nTolerance = %lf\n", tol); #endif covered = (int *) R_alloc((size_t) Nv, sizeof(int)); terminal = (int *) R_alloc((size_t) Nv, sizeof(int)); resid = (double *) R_alloc((size_t) Nv, sizeof(double)); /* loop over centre points */ OUTERCHUNKLOOP(m, Np, maxchunk, 256) { R_CheckUserInterrupt(); INNERCHUNKLOOP(m, Np, maxchunk, 256) { f0 = f[m]; seg0 = seg[m]; rad = r[m]; #ifdef DEBUG Rprintf("\nCentre point %d lies in segment %d\n", m, seg0); #endif /* endpoints of segment containing centre */ A = from[seg0]; B = to[seg0]; /* distances from centre to A and B */ length0 = lengths[seg0]; dxA = f0 * length0; dxB = (1-f0) * length0; #ifdef DEBUG Rprintf("Distances to endpoints: dxA=%lf, dxB=%lf\n", dxA, dxB); #endif nends = 0; /* visit vertices */ for(i = 0; i < Nv; i++) { #ifdef DEBUG Rprintf("\nConsidering vertex %d\n", i); #endif /* distance going through A */ dxAvi = dxA + DPATH(A,i); /* distance going through B */ dxBvi = dxB + DPATH(B,i); /* shortest path distance to this vertex */ dxvi = (dxAvi < dxBvi) ? dxAvi : dxBvi; /* distance left to 'spend' from this vertex */ residue = rad - dxvi; #ifdef DEBUG Rprintf("dxAvi = %lf; dxBvi = %lf; residue = %lf\n", dxAvi, dxBvi, residue); #endif if(residue > tol) { resid[i] = residue; covered[i] = YES; terminal[i] = NO; #ifdef DEBUG Rprintf("Vertex is covered\n"); #endif } else if(residue < -tol) { resid[i] = 0; covered[i] = terminal[i] = NO; #ifdef DEBUG Rprintf("Vertex is not covered\n"); #endif } else { /* vertex is within 'tol' of an endpoint - deem it to be one */ resid[i] = 0; covered[i] = terminal[i] = YES; /* vertex is an endpoint of disc */ ++nends; #ifdef DEBUG Rprintf("Vertex is a terminal endpoint\n"); #endif } } #ifdef DEBUG Rprintf("%d terminal endpoints\n", nends); #endif /* Now visit line segments to count any endpoints that are interior to the segments. */ for(i = 0; i < Ns; i++) { /* Determine which line segments are completely inside the disc, and which cross the boundary. */ if(i == seg0) { /* initial segment: disc starts from (x0, y0) */ if(!covered[A]) nends++; if(!covered[B]) nends++; #ifdef DEBUG if(!covered[A]) Rprintf("A not covered\n"); if(!covered[B]) Rprintf("B not covered\n"); #endif } else { /* another segment: disc extends in from either endpoint */ fromi = from[i]; toi = to[i]; covfrom = covered[fromi]; covto = covered[toi]; resfrom = resid[fromi]; resto = resid[toi]; reachable = covfrom || covto; #ifdef DEBUG residue = resfrom + resto - lengths[i]; Rprintf("%d: %s %s: %lf + %lf - %lf = %lf sign %s\n", i, (terminal[fromi]) ? "T" : ((covfrom) ? "Y" : "N"), (terminal[toi]) ? "T" : ((covto) ? "Y" : "N"), resfrom, resto, lengths[i], residue, (residue < 0) ? "-" : ((residue > 0) ? "+" : "0")); #endif if(reachable) { residue = resfrom + resto - lengths[i]; allin = covfrom && covto && (residue >= 0); #ifdef DEBUG if(allin) { Rprintf("Covered\n"); } else if((terminal[fromi] || terminal[toi]) && (residue >= - tol * lengths[i])) { Rprintf("Deemed to be covered\n"); } else Rprintf("Reachable\n"); #endif allin = allin || ((terminal[fromi] || terminal[toi]) && (residue >= - tol)); if(!allin) { /* segment is not entirely covered by disc - infer endpoint(s) in interior of segment */ if(covfrom && !terminal[fromi]) nends++; if(covto && !terminal[toi]) nends++; #ifdef DEBUG if(covfrom && !terminal[fromi]) Rprintf("fromi => end\n"); if(covto && !terminal[toi]) Rprintf("toi => end\n"); #endif } } } } nendpoints[m] = nends; } } } spatstat/src/straussm.c0000755000176200001440000001300313115271120014722 0ustar liggesusers#include #include #include "methas.h" #include "dist2.h" /* for debugging code, include #define DEBUG 1 */ /* Conditional intensity computation for Multitype Strauss process */ /* NOTE: types (marks) are numbered from 0 to ntypes-1 */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct MultiStrauss { int ntypes; double *gamma; /* gamma[i,j] = gamma[i+ntypes*j] for i,j = 0... ntypes-1 */ double *rad; /* rad[i,j] = rad[j+ntypes*i] for i,j = 0... ntypes-1 */ double *rad2; /* squared radii */ double range2; /* square of interaction range */ double *loggamma; /* logs of gamma[i,j] */ double *period; int *hard; /* hard[i,j] = 1 if gamma[i,j] ~~ 0 */ int *kount; /* space for kounting pairs of each type */ int per; } MultiStrauss; /* initialiser function */ Cdata *straussminit(state, model, algo) State state; Model model; Algor algo; { int i, j, ntypes, n2, hard; double g, r, r2, logg, range2; MultiStrauss *multistrauss; multistrauss = (MultiStrauss *) R_alloc(1, sizeof(MultiStrauss)); multistrauss->ntypes = ntypes = model.ntypes; n2 = ntypes * ntypes; #ifdef DEBUG Rprintf("initialising space for %d types\n", ntypes); #endif /* Allocate space for parameters */ multistrauss->gamma = (double *) R_alloc((size_t) n2, sizeof(double)); multistrauss->rad = (double *) R_alloc((size_t) n2, sizeof(double)); /* Allocate space for transformed parameters */ multistrauss->rad2 = (double *) R_alloc((size_t) n2, sizeof(double)); multistrauss->loggamma = (double *) R_alloc((size_t) n2, sizeof(double)); multistrauss->hard = (int *) R_alloc((size_t) n2, sizeof(int)); /* Allocate scratch space for counts of each pair of types */ multistrauss->kount = (int *) R_alloc((size_t) n2, sizeof(int)); /* Copy and process model parameters*/ /* ipar will contain n^2 gamma values followed by n^2 values of r */ range2 = 0.0; for(i = 0; i < ntypes; i++) { for(j = 0; j < ntypes; j++) { g = model.ipar[i + j*ntypes]; r = model.ipar[n2 + i + j*ntypes]; r2 = r * r; hard = (g < DOUBLE_EPS); logg = (hard) ? 0 : log(g); MAT(multistrauss->gamma, i, j, ntypes) = g; MAT(multistrauss->rad, i, j, ntypes) = r; MAT(multistrauss->hard, i, j, ntypes) = hard; MAT(multistrauss->loggamma, i, j, ntypes) = logg; MAT(multistrauss->rad2, i, j, ntypes) = r2; if(r2 > range2) range2 = r2; } } multistrauss->range2 = range2; /* periodic boundary conditions? */ multistrauss->period = model.period; multistrauss->per = (model.period[0] > 0.0); #ifdef DEBUG Rprintf("end initialiser\n"); #endif return((Cdata *) multistrauss); } /* conditional intensity evaluator */ double straussmcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ntypes, kount, ix, ixp1, j, mrk, mrkj, m1, m2; int *marks; double *x, *y; double u, v, lg; double d2, cifval; double range2; double *period; MultiStrauss *multistrauss; DECLARE_CLOSE_D2_VARS; multistrauss = (MultiStrauss *) cdata; range2 = multistrauss->range2; period = multistrauss->period; u = prop.u; v = prop.v; mrk = prop.mrk; ix = prop.ix; x = state.x; y = state.y; marks = state.marks; npts = state.npts; #ifdef DEBUG Rprintf("computing cif: u=%lf, v=%lf, mrk=%d\n", u, v, mrk); #endif cifval = 1.0; if(npts == 0) return(cifval); ntypes = multistrauss->ntypes; #ifdef DEBUG Rprintf("initialising pair counts\n"); #endif /* initialise pair counts */ for(m1 = 0; m1 < ntypes; m1++) for(m2 = 0; m2 < ntypes; m2++) MAT(multistrauss->kount, m1, m2, ntypes) = 0; /* compile pair counts */ #ifdef DEBUG Rprintf("compiling pair counts\n"); #endif ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(multistrauss->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,range2,d2)) { mrkj = marks[j]; if(d2 < MAT(multistrauss->rad2, mrk, mrkj, ntypes)) MAT(multistrauss->kount, mrk, mrkj, ntypes)++; } } } if(ixp1 < npts) { for(j=ixp1; jrad2, mrk, mrkj, ntypes)) MAT(multistrauss->kount, mrk, mrkj, ntypes)++; } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], range2, d2)) { mrkj = marks[j]; if(d2 < MAT(multistrauss->rad2, mrk, mrkj, ntypes)) MAT(multistrauss->kount, mrk, mrkj, ntypes)++; } } } if(ixp1 < npts) { for(j=ixp1; jrad2, mrk, mrkj, ntypes)) MAT(multistrauss->kount, mrk, mrkj, ntypes)++; } } } } #ifdef DEBUG Rprintf("multiplying cif factors\n"); #endif /* multiply cif value by pair potential */ for(m1 = 0; m1 < ntypes; m1++) { for(m2 = 0; m2 < ntypes; m2++) { kount = MAT(multistrauss->kount, m1, m2, ntypes); if(MAT(multistrauss->hard, m1, m2, ntypes)) { if(kount > 0) { cifval = 0.0; return(cifval); } } else { lg = MAT(multistrauss->loggamma, m1, m2, ntypes); cifval *= exp(lg * kount); } } } #ifdef DEBUG Rprintf("returning positive cif\n"); #endif return cifval; } Cifns MultiStraussCifns = { &straussminit, &straussmcif, (updafunptr) NULL, YES}; spatstat/src/linequad.c0000644000176200001440000000122513406057617014663 0ustar liggesusers#include #include #include "yesno.h" /* linequad.c make a quadrature scheme on a linear network Clinequad unmarked pattern ClineMquad multitype pattern $Revision: 1.6 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define SWAP(X,Y,TMP) TMP = Y; Y = X; X = TMP #undef HUH #define FUNNAME Clinequad #define FMKNAME ClineMquad #undef ALEA #include "linequad.h" #undef FUNNAME #undef FMKNAME #define FUNNAME ClineRquad #define FMKNAME ClineRMquad #define ALEA #include "linequad.h" #undef FUNNAME #undef FMKNAME #undef ALEA spatstat/src/knn3Ddist.h0000644000176200001440000000745213406057617014737 0ustar liggesusers/* knn3Ddist.h Code template for k-nearest-neighbour algorithms for 3D point patterns Input is a single point pattern - supports 'nndist' and 'nnwhich' This code is #included multiple times in nn3Ddist.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour Either or both DIST and WHICH may be defined. THE FOLLOWING CODE ASSUMES THAT THE POINT PATTERN IS SORTED IN ASCENDING ORDER OF THE z COORDINATE $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void FNAME(n, kmax, x, y, z, nnd, nnwhich, huge) /* inputs */ int *n, *kmax; double *x, *y, *z, *huge; /* output matrices (npoints * kmax) in ROW MAJOR order */ double *nnd; int *nnwhich; { int npoints, nk, nk1, i, j, k, k1, unsorted, maxchunk; double d2, d2minK, xi, yi, zi, dx, dy, dz, dz2, hu, hu2, tmp; double *d2min; #ifdef WHICH int *which; int itmp; #endif hu = *huge; hu2 = hu * hu; npoints = *n; nk = *kmax; nk1 = nk - 1; /* create space to store the nearest neighbour distances and indices for the current point */ d2min = (double *) R_alloc((size_t) nk, sizeof(double)); #ifdef WHICH which = (int *) R_alloc((size_t) nk, sizeof(int)); #endif /* loop over points */ OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { #ifdef SPATSTAT_DEBUG Rprintf("\ni=%d\n", i); #endif /* initialise nn distances and indices */ d2minK = hu2; for(k = 0; k < nk; k++) { d2min[k] = hu2; #ifdef WHICH which[k] = -1; #endif } xi = x[i]; yi = y[i]; zi = z[i]; /* search backward */ if(i > 0) { for(j = i - 1; j >= 0; --j) { #ifdef SPATSTAT_DEBUG Rprintf("L"); #endif dz = z[j] - zi; dz2 = dz * dz; if(dz2 > d2minK) break; dx = x[j] - xi; dy = y[j] - yi; d2 = dx * dx + dy * dy + dz2; if (d2 < d2minK) { /* overwrite last entry */ d2min[nk1] = d2; #ifdef WHICH which[nk1] = j; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[nk1]; } } } /* search forward */ if(i + 1 < npoints) { for(j = i + 1; j < npoints; ++j) { #ifdef SPATSTAT_DEBUG Rprintf("R"); #endif dz = z[j] - zi; dz2 = dz * dz; if(dz2 > d2minK) break; dx = x[j] - xi; dy = y[j] - yi; d2 = dx * dx + dy * dy + dz2; if (d2 < d2minK) { /* overwrite last entry */ d2min[nk1] = d2; #ifdef WHICH which[nk1] = j; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[nk1]; } } } #ifdef SPATSTAT_DEBUG Rprintf("\n"); #endif /* calculate nn distances for point i and copy to output matrix in ROW MAJOR order */ for(k = 0; k < nk; k++) { #ifdef DIST nnd[nk * i + k] = sqrt(d2min[k]); #endif #ifdef WHICH /* convert from C to R indexing */ nnwhich[nk * i + k] = which[k] + 1; #endif } } } } spatstat/src/fiksel.c0000755000176200001440000000574413115271120014333 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Fiksel process */ /* Conditional intensity function for a pairwise interaction point process with interaction function e(t) = 0 for t < h = exp(a * exp(- kappa * t)) for h <= t < r = 1 for t >= r */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Fiksel { double r; double h; double kappa; double a; double h2; /* h^2 */ double r2; /* r^2 */ double *period; int per; } Fiksel; /* initialiser function */ Cdata *fikselinit(state, model, algo) State state; Model model; Algor algo; { Fiksel *fiksel; fiksel = (Fiksel *) R_alloc(1, sizeof(Fiksel)); /* Interpret model parameters*/ fiksel->r = model.ipar[0]; fiksel->h = model.ipar[1]; fiksel->kappa = model.ipar[2]; fiksel->a = model.ipar[3]; fiksel->period = model.period; /* constants */ fiksel->h2 = pow(fiksel->h, 2); fiksel->r2 = pow(fiksel->r, 2); /* periodic boundary conditions? */ fiksel->per = (model.period[0] > 0.0); return((Cdata *) fiksel); } /* conditional intensity evaluator */ double fikselcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, pairpotsum, cifval; double kappa, r2, h2; double *period; Fiksel *fiksel; DECLARE_CLOSE_D2_VARS; fiksel = (Fiksel *) cdata; period = fiksel->period; kappa = fiksel->kappa; r2 = fiksel->r2; h2 = fiksel->h2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = 1.0; if(npts == 0) return(cifval); pairpotsum = 0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(fiksel->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,r2,d2)) { if(d2 < h2) { cifval = 0.0; return(cifval); } else { pairpotsum += exp(-kappa * sqrt(d2)); } } } } if(ixp1 < npts) { for(j=ixp1; j 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u,v,x[j],y[j],r2,d2)) { if(d2 < h2) { cifval = 0.0; return(cifval); } else { pairpotsum += exp(-kappa * sqrt(d2)); } } } } if(ixp1 < npts) { for(j=ixp1; ja * pairpotsum); return cifval; } Cifns FikselCifns = { &fikselinit, &fikselcif, (updafunptr) NULL, NO}; spatstat/src/mhv5.h0000644000176200001440000000054113406057617013745 0ustar liggesusers/* mhv5.h tempered or not Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef MH_TEMPER if(tempered) { /* tempering */ #define MH_TEMPER YES #include "mhloop.h" #undef MH_TEMPER } else { /* usual, no tempering */ #define MH_TEMPER NO #include "mhloop.h" #undef MH_TEMPER } spatstat/src/methas.c0000755000176200001440000002750513115271120014336 0ustar liggesusers#include #include #include #include "methas.h" #include "chunkloop.h" #include "mhsnoop.h" void fexitc(const char *msg); /* To switch on debugging code, insert the line: #define MH_DEBUG YES */ #ifndef MH_DEBUG #define MH_DEBUG NO #endif /* This is the value of 'ix' when we are proposing a birth. It must be equal to -1 so that NONE+1 = 0. */ #define NONE -1 extern Cifns getcif(char *); SEXP xmethas( SEXP ncif, SEXP cifname, SEXP beta, SEXP ipar, SEXP iparlen, SEXP period, SEXP xprop, SEXP yprop, SEXP mprop, SEXP ntypes, SEXP nrep, SEXP p, SEXP q, SEXP nverb, SEXP nrep0, SEXP x, SEXP y, SEXP marks, SEXP ncond, SEXP fixall, SEXP track, SEXP thin, SEXP snoopenv, SEXP temper, SEXP invertemp) { char *cifstring; double cvd, cvn, qnodds, anumer, adenom, betavalue; double *iparvector; int verb, marked, tempered, mustupdate, itype; int nfree, nsuspect; int irep, ix, j, maxchunk, iverb; int Ncif; int *plength; long Nmore; int permitted; double invtemp; double *xx, *yy, *xpropose, *ypropose; int *mm, *mpropose, *pp, *aa; SEXP out, xout, yout, mout, pout, aout; int tracking, thinstart; #ifdef HISTORY_INCLUDES_RATIO SEXP numout, denout; double *nn, *dd; #endif State state; Model model; Algor algo; Propo birthprop, deathprop, shiftprop; History history; Snoop snooper; /* The following variables are used only for a non-hybrid interaction */ Cifns thecif; /* cif structure */ Cdata *thecdata; /* pointer to initialised cif data block */ /* The following variables are used only for a hybrid interaction */ Cifns *cif; /* vector of cif structures */ Cdata **cdata; /* vector of pointers to initialised cif data blocks */ int *needupd; /* vector of logical values */ int k; /* loop index for cif's */ /* =================== Protect R objects from garbage collector ======= */ PROTECT(ncif = AS_INTEGER(ncif)); PROTECT(cifname = AS_CHARACTER(cifname)); PROTECT(beta = AS_NUMERIC(beta)); PROTECT(ipar = AS_NUMERIC(ipar)); PROTECT(iparlen = AS_INTEGER(iparlen)); PROTECT(period = AS_NUMERIC(period)); PROTECT(xprop = AS_NUMERIC(xprop)); PROTECT(yprop = AS_NUMERIC(yprop)); PROTECT(mprop = AS_INTEGER(mprop)); PROTECT(ntypes = AS_INTEGER(ntypes)); PROTECT(nrep = AS_INTEGER(nrep)); PROTECT( p = AS_NUMERIC(p)); PROTECT( q = AS_NUMERIC(q)); PROTECT(nverb = AS_INTEGER(nverb)); PROTECT(nrep0 = AS_INTEGER(nrep0)); PROTECT( x = AS_NUMERIC(x)); PROTECT( y = AS_NUMERIC(y)); PROTECT( marks = AS_INTEGER(marks)); PROTECT(fixall = AS_INTEGER(fixall)); PROTECT(ncond = AS_INTEGER(ncond)); PROTECT(track = AS_INTEGER(track)); PROTECT(thin = AS_INTEGER(thin)); PROTECT(temper = AS_INTEGER(temper)); PROTECT(invertemp = AS_NUMERIC(invertemp)); /* that's 24 protected objects */ /* =================== Translate arguments from R to C ================ */ /* Ncif is the number of cif's plength[i] is the number of interaction parameters in the i-th cif */ Ncif = *(INTEGER_POINTER(ncif)); plength = INTEGER_POINTER(iparlen); /* copy RMH algorithm parameters */ algo.nrep = *(INTEGER_POINTER(nrep)); algo.nverb = *(INTEGER_POINTER(nverb)); algo.nrep0 = *(INTEGER_POINTER(nrep0)); algo.p = *(NUMERIC_POINTER(p)); algo.q = *(NUMERIC_POINTER(q)); algo.fixall = ((*(INTEGER_POINTER(fixall))) == 1); algo.ncond = *(INTEGER_POINTER(ncond)); algo.tempered = tempered = (*(INTEGER_POINTER(temper)) != 0); algo.invtemp = invtemp = *(NUMERIC_POINTER(invertemp)); /* copy model parameters without interpreting them */ model.beta = NUMERIC_POINTER(beta); model.ipar = iparvector = NUMERIC_POINTER(ipar); model.period = NUMERIC_POINTER(period); model.ntypes = *(INTEGER_POINTER(ntypes)); state.ismarked = marked = (model.ntypes > 1); /* copy initial state */ state.npts = LENGTH(x); state.npmax = 4 * ((state.npts > 256) ? state.npts : 256); state.x = (double *) R_alloc(state.npmax, sizeof(double)); state.y = (double *) R_alloc(state.npmax, sizeof(double)); xx = NUMERIC_POINTER(x); yy = NUMERIC_POINTER(y); if(marked) { state.marks =(int *) R_alloc(state.npmax, sizeof(int)); mm = INTEGER_POINTER(marks); } if(!marked) { for(j = 0; j < state.npts; j++) { state.x[j] = xx[j]; state.y[j] = yy[j]; } } else { for(j = 0; j < state.npts; j++) { state.x[j] = xx[j]; state.y[j] = yy[j]; state.marks[j] = mm[j]; } } #if MH_DEBUG Rprintf("\tnpts=%d\n", state.npts); #endif /* access proposal data */ xpropose = NUMERIC_POINTER(xprop); ypropose = NUMERIC_POINTER(yprop); mpropose = INTEGER_POINTER(mprop); /* we need to initialise 'mpropose' to keep compilers happy. mpropose is only used for marked patterns. Note 'mprop' is always a valid pointer */ /* ================= Allocate space for cifs etc ========== */ if(Ncif > 1) { cif = (Cifns *) R_alloc(Ncif, sizeof(Cifns)); cdata = (Cdata **) R_alloc(Ncif, sizeof(Cdata *)); needupd = (int *) R_alloc(Ncif, sizeof(int)); } else { /* Keep the compiler happy */ cif = (Cifns *) R_alloc(1, sizeof(Cifns)); cdata = (Cdata **) R_alloc(1, sizeof(Cdata *)); needupd = (int *) R_alloc(1, sizeof(int)); } /* ================= Determine process to be simulated ========== */ /* Get the cif's */ if(Ncif == 1) { cifstring = (char *) STRING_VALUE(cifname); thecif = getcif(cifstring); mustupdate = NEED_UPDATE(thecif); if(thecif.marked && !marked) fexitc("cif is for a marked point process, but proposal data are not marked points; bailing out."); /* Keep compiler happy*/ cif[0] = thecif; needupd[0] = mustupdate; } else { mustupdate = NO; for(k = 0; k < Ncif; k++) { cifstring = (char *) CHAR(STRING_ELT(cifname, k)); cif[k] = getcif(cifstring); needupd[k] = NEED_UPDATE(cif[k]); if(needupd[k]) mustupdate = YES; if(cif[k].marked && !marked) fexitc("component cif is for a marked point process, but proposal data are not marked points; bailing out."); } } /* ============= Initialise transition history ========== */ tracking = (*(INTEGER_POINTER(track)) != 0); /* Initialise even if not needed, to placate the compiler */ if(tracking) { history.nmax = algo.nrep; } else { history.nmax = 1; } history.n = 0; history.proptype = (int *) R_alloc(history.nmax, sizeof(int)); history.accepted = (int *) R_alloc(history.nmax, sizeof(int)); #ifdef HISTORY_INCLUDES_RATIO history.numerator = (double *) R_alloc(history.nmax, sizeof(double)); history.denominator = (double *) R_alloc(history.nmax, sizeof(double)); #endif /* ============= Visual debugging ========== */ /* Active if 'snoopenv' is an environment */ #if MH_DEBUG Rprintf("Initialising mhsnoop\n"); #endif initmhsnoop(&snooper, snoopenv); #if MH_DEBUG Rprintf("Initialised\n"); if(snooper.active) Rprintf("Debugger is active.\n"); #endif /* ================= Thinning of initial state ==================== */ thinstart = (*(INTEGER_POINTER(thin)) != 0); /* ================= Initialise algorithm ==================== */ /* Interpret the model parameters and initialise auxiliary data */ if(Ncif == 1) { thecdata = (*(thecif.init))(state, model, algo); /* keep compiler happy */ cdata[0] = thecdata; } else { for(k = 0; k < Ncif; k++) { if(k > 0) model.ipar += plength[k-1]; cdata[k] = (*(cif[k].init))(state, model, algo); } /* keep compiler happy */ thecdata = cdata[0]; } /* Set the fixed elements of the proposal objects */ birthprop.itype = BIRTH; deathprop.itype = DEATH; shiftprop.itype = SHIFT; birthprop.ix = NONE; if(!marked) birthprop.mrk = deathprop.mrk = shiftprop.mrk = NONE; /* Set up some constants */ verb = (algo.nverb !=0); qnodds = (1.0 - algo.q)/algo.q; /* Set value of beta for unmarked process */ /* (Overwritten for marked process, but keeps compiler happy) */ betavalue = model.beta[0]; /* ============= Run Metropolis-Hastings ================== */ /* Initialise random number generator */ GetRNGstate(); /* Here comes the code for the M-H loop. The basic code (in mhloop.h) is #included many times using different options The C preprocessor descends through a chain of files mhv1.h, mhv2.h, ... to enumerate all possible combinations of flags. */ #include "mhv1.h" /* relinquish random number generator */ PutRNGstate(); /* ============= Done ================== */ /* Create space for output, and copy final state */ /* Point coordinates */ PROTECT(xout = NEW_NUMERIC(state.npts)); PROTECT(yout = NEW_NUMERIC(state.npts)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); for(j = 0; j < state.npts; j++) { xx[j] = state.x[j]; yy[j] = state.y[j]; } /* Marks */ if(marked) { PROTECT(mout = NEW_INTEGER(state.npts)); mm = INTEGER_POINTER(mout); for(j = 0; j < state.npts; j++) mm[j] = state.marks[j]; } else { /* Keep the compiler happy */ PROTECT(mout = NEW_INTEGER(1)); mm = INTEGER_POINTER(mout); mm[0] = 0; } /* Transition history */ if(tracking) { PROTECT(pout = NEW_INTEGER(algo.nrep)); PROTECT(aout = NEW_INTEGER(algo.nrep)); pp = INTEGER_POINTER(pout); aa = INTEGER_POINTER(aout); for(j = 0; j < algo.nrep; j++) { pp[j] = history.proptype[j]; aa[j] = history.accepted[j]; } #ifdef HISTORY_INCLUDES_RATIO PROTECT(numout = NEW_NUMERIC(algo.nrep)); PROTECT(denout = NEW_NUMERIC(algo.nrep)); nn = NUMERIC_POINTER(numout); dd = NUMERIC_POINTER(denout); for(j = 0; j < algo.nrep; j++) { nn[j] = history.numerator[j]; dd[j] = history.denominator[j]; } #endif } else { /* Keep the compiler happy */ PROTECT(pout = NEW_INTEGER(1)); PROTECT(aout = NEW_INTEGER(1)); pp = INTEGER_POINTER(pout); aa = INTEGER_POINTER(aout); pp[0] = aa[0] = 0; #ifdef HISTORY_INCLUDES_RATIO PROTECT(numout = NEW_NUMERIC(1)); PROTECT(denout = NEW_NUMERIC(1)); nn = NUMERIC_POINTER(numout); dd = NUMERIC_POINTER(denout); nn[0] = dd[0] = 0; #endif } /* Pack up into list object for return */ if(!tracking) { /* no transition history */ if(!marked) { PROTECT(out = NEW_LIST(2)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); } else { PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, mout); } } else { /* transition history */ if(!marked) { #ifdef HISTORY_INCLUDES_RATIO PROTECT(out = NEW_LIST(6)); #else PROTECT(out = NEW_LIST(4)); #endif SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, pout); SET_VECTOR_ELT(out, 3, aout); #ifdef HISTORY_INCLUDES_RATIO SET_VECTOR_ELT(out, 4, numout); SET_VECTOR_ELT(out, 5, denout); #endif } else { #ifdef HISTORY_INCLUDES_RATIO PROTECT(out = NEW_LIST(7)); #else PROTECT(out = NEW_LIST(5)); #endif SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, mout); SET_VECTOR_ELT(out, 3, pout); SET_VECTOR_ELT(out, 4, aout); #ifdef HISTORY_INCLUDES_RATIO SET_VECTOR_ELT(out, 5, numout); SET_VECTOR_ELT(out, 6, denout); #endif } } #ifdef HISTORY_INCLUDES_RATIO UNPROTECT(32); /* 24 arguments plus xout, yout, mout, pout, aout, out, numout, denout */ #else UNPROTECT(30); /* 24 arguments plus xout, yout, mout, pout, aout, out */ #endif return(out); } spatstat/src/exactdist.c0000755000176200001440000001475613406057617015071 0ustar liggesusers/* exactdist.c Exact distance transform of a point pattern (used to estimate the empty space function F) $Revision: 1.13 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Author: Adrian Baddeley Sketch of functionality: the 'data' are a finite list of points in R^2 (x,y coordinates) and the 'output' is a real valued image whose entries are distances, with the value for each pixel equalling the distance from that pixel to the nearest point of the data pattern. Routines: exact_dt_R() interface to R exact_dt() implementation of distance transform dist_to_bdry() compute distance to edge of image frame shape_raster() initialise a Raster structure The appropriate calling sequence for exact_dt_R() is exemplified in 'exactdt.R' */ #undef DEBUG #include #include "raster.h" #ifdef DEBUG #include #endif void shape_raster(ras,data,xmin,ymin,xmax,ymax,nrow,ncol,mrow,mcol) Raster *ras; /* the raster structure to be initialised */ void *data; int nrow, ncol; /* absolute dimensions of storage array */ int mrow, mcol; /* margins clipped off */ /* e.g. valid width is ncol - 2*mcol columns */ double xmin, ymin, /* image dimensions in R^2 after clipping */ xmax, ymax; { ras->data = data; ras->nrow = nrow; ras->ncol = ncol; ras->length = nrow * ncol; ras->rmin = mrow; ras->rmax = nrow - mrow - 1; ras->cmin = mcol; ras->cmax = ncol - mcol - 1; ras->x0 = ras->xmin = xmin; ras->x1 = ras->xmax = xmax; ras->y0 = ras->ymin = ymin; ras->y1 = ras->ymax = ymax; ras->xstep = (xmax-xmin)/(ncol - 2 * mcol - 1); ras->ystep = (ymax-ymin)/(nrow - 2 * mrow - 1); /* Rprintf("xstep,ystep = %lf,%lf\n", ras->xstep,ras->ystep); */ } void exact_dt(x, y, npt, dist, index) double *x, *y; /* data points */ int npt; Raster *dist; /* exact distance to nearest point */ Raster *index; /* which point x[i],y[i] is closest */ { int i,j,k,l,m; double d; int ii; double dd; /* double bdiag; */ /* initialise rasters */ #define UNDEFINED -1 #define Is_Defined(I) (I >= 0) #define Is_Undefined(I) (I < 0) Clear(*index,int,UNDEFINED) d = 2.0 * DistanceSquared(dist->xmin,dist->ymin,dist->xmax,dist->ymax); Clear(*dist,double,d) /* If the list of data points is empty, ... exit now */ if(npt == 0) return; for(i = 0; i < npt; i++) { /* Rprintf("%ld -> (%lf,%lf)\n", i, x[i], y[i]); */ j = RowIndex(*dist,y[i]); k = ColIndex(*dist,x[i]); /* if(!Inside(*dist,j,k)) Rprintf("(%ld,%ld) out of bounds\n",j,k); else if (!Inside(*dist,j+1,k+1)) Rprintf("(%ld+1,%ld+1) out of bounds\n",j,k); */ for(l = j; l <= j+1; l++) for(m = k; m <= k+1; m++) { d = DistanceToSquared(x[i],y[i],*index,l,m); if( Is_Undefined(Entry(*index,l,m,int)) || Entry(*dist,l,m,double) > d) { /* Rprintf("writing (%ld,%ld) -> %ld\t%lf\n", l,m,i,d); */ Entry(*index,l,m,int) = i; Entry(*dist,l,m,double) = d; /* Rprintf("checking: %ld, %lf\n", Entry(*index,l,m,int), Entry(*dist,l,m,double)); */ } } } /* for(j = 0; j <= index->nrow; j++) for(k = 0; k <= index->ncol; k++) Rprintf("[%ld,%ld] %ld\t%lf\n", j,k,Entry(*index,j,k,int),Entry(*dist,j,k,double)); */ /* how to update the distance values */ #define COMPARE(ROW,COL,RR,CC) \ d = Entry(*dist,ROW,COL,double); \ ii = Entry(*index,RR,CC,int); \ /* Rprintf(" %lf\t (%ld,%ld) |-> %ld\n", d, RR, CC, ii); */ \ if(Is_Defined(ii) /* && ii < npt */ \ && Entry(*dist,RR,CC,double) < d) { \ dd = DistanceSquared(x[ii],y[ii],Xpos(*index,COL),Ypos(*index,ROW)); \ if(dd < d) { \ /* Rprintf("(%ld,%ld) <- %ld\n", ROW, COL, ii); */ \ Entry(*index,ROW,COL,int) = ii; \ Entry(*dist,ROW,COL,double) = dd; \ /* Rprintf("checking: %ld, %lf\n", Entry(*index,ROW,COL,int), Entry(*dist,ROW,COL,double)); */\ } \ } /* bound on diagonal step distance */ /* bdiag = sqrt(index->xstep * index->xstep + index->ystep * index->ystep); */ /* forward pass */ for(j = index->rmin; j <= index->rmax; j++) for(k = index->cmin; k <= index->cmax; k++) { /* Rprintf("Neighbourhood of (%ld,%ld):\n", j,k); */ COMPARE(j,k, j-1,k-1) COMPARE(j,k, j-1, k) COMPARE(j,k, j-1,k+1) COMPARE(j,k, j, k-1) } /* backward pass */ for(j = index->rmax; j >= index->rmin; j--) for(k = index->cmax; k >= index->cmin; k--) { COMPARE(j,k, j+1,k+1) COMPARE(j,k, j+1, k) COMPARE(j,k, j+1,k-1) COMPARE(j,k, j, k+1) } /* take square roots of the distances^2 */ for(j = index->rmin; j <= index->rmax; j++) for(k = index->cmin; k <= index->cmax; k++) Entry(*dist,j,k,double) = sqrt(Entry(*dist,j,k,double)); } #define MIN(A,B) (((A) < (B)) ? (A) : (B)) void dist_to_bdry(d) /* compute distance to boundary from each raster point */ Raster *d; /* of course this is easy for a rectangular grid but we implement it in C for ease of future modification */ { int j, k; double x, y, xd, yd; for(j = d->rmin; j <= d->rmax;j++) { y = Ypos(*d,j); yd = MIN(y - d->ymin, d->ymax - y); for(k = d->cmin; k <= d->cmax;k++) { x = Xpos(*d,k); xd = MIN(x - d->xmin, d->xmax - x); Entry(*d,j,k,double) = MIN(xd,yd); } } } /* R interface */ void exact_dt_R(x, y, npt, xmin, ymin, xmax, ymax, nr, nc, mr, mc, distances, indices, boundary) double *x, *y; /* input data points */ int *npt; double *xmin, *ymin, *xmax, *ymax; /* guaranteed bounding box */ int *nr, *nc; /* desired raster dimensions EXCLUDING margins */ int *mr, *mc; /* margins */ /* output arrays */ double *distances; /* distance to nearest point */ int *indices; /* index to nearest point */ double *boundary; /* distance to boundary */ { Raster dist, index, bdist; int mrow, mcol, nrow, ncol; mrow = *mr; mcol = *mc; /* full dimensions */ nrow = *nr + 2 * mrow; ncol = *nc + 2 * mcol; shape_raster( &dist, (void *) distances,*xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); shape_raster( &index, (void *) indices, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); shape_raster( &bdist, (void *) boundary, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); exact_dt(x, y, (int) *npt, &dist, &index); dist_to_bdry(&bdist); } spatstat/src/localpcf.h0000755000176200001440000000470613406057617014663 0ustar liggesusers/* localpcf.h Source template for versions of local pair correlation Requires variable: WEIGHTED Assumes point patterns are sorted in increasing order of x coordinate $Revision: 1.6 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #ifdef WEIGHTED #define FNAME locWpcfx #else #define FNAME locpcfx #endif void FNAME(nn1, x1, y1, id1, nn2, x2, y2, id2, #ifdef WEIGHTED w2, #endif nnr, rmaxi, del, pcf) /* inputs */ int *nn1, *nn2, *nnr; double *x1, *y1, *x2, *y2; int *id1, *id2; double *rmaxi, *del; #ifdef WEIGHTED double *w2; #endif /* output */ double *pcf; /* matrix of column vectors of pcf's for each point of first pattern */ { int n1, n2, nr, i, j, k, jleft, kmin, kmax, id1i, maxchunk; double x1i, y1i, rmax, delta, xleft, dx, dy, dx2; double d2, d2max, dmax, d; double rstep, rvalue, frac, contrib, weight, coef; n1 = *nn1; n2 = *nn2; nr = *nnr; rmax = *rmaxi; delta = *del; dmax = rmax + delta; /* maximum relevant value of interpoint distance */ d2max = dmax * dmax; rstep = rmax/(nr-1); coef = 3.0 /(4.0 * delta); if(n1 == 0 || n2 == 0) return; jleft = 0; OUTERCHUNKLOOP(i, n1, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n1, maxchunk, 8196) { x1i = x1[i]; y1i = y1[i]; id1i = id1[i]; /* adjust starting point */ xleft = x1i - dmax; while((x2[jleft] < xleft) && (jleft+1 < n2)) ++jleft; /* process from jleft until |dx| > dmax */ for(j=jleft; j < n2; j++) { dx = x2[j] - x1i; dx2 = dx * dx; if(dx2 > d2max) break; dy = y2[j] - y1i; d2 = dx2 + dy * dy; if(d2 <= d2max && id2[j] != id1i) { d = sqrt(d2); kmin = (int) floor((d-delta)/rstep); kmax = (int) ceil((d+delta)/rstep); if(kmin <= nr-1 && kmax >= 0) { /* nonempty intersection with range of r values */ /* compute intersection */ if(kmin < 0) kmin = 0; if(kmax >= nr) kmax = nr-1; /* */ weight = coef/d; #ifdef WEIGHTED weight = weight * w2[j]; #endif for(k = kmin; k <= kmax; k++) { rvalue = k * rstep; frac = (d - rvalue)/delta; /* Epanechnikov kernel with halfwidth delta */ contrib = (1 - frac * frac); if(contrib > 0) pcf[k + nr * i] += contrib * weight; } } } } } } } #undef FNAME spatstat/src/KrectBody.h0000644000176200001440000001042513406057617014756 0ustar liggesusers /* KrectBody.h +++ Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2014 ++++ Main function body for 'Krect' Included multiple times with different values of the macros: (#define or #undef) WEIGHTED ISOTROPIC TRANSLATION BORDER UNCORRECTED **Assumes point pattern is sorted in increasing order of x coordinate** **Assumes window is (0,wide) x (0, high) ** **Assumes output vectors were initialised to zero** Variables are declared in 'KrectFunDec.c' This algorithm is optimal (amongst the choices in spatstat) when the window is a rectangle *and* at least one of the ISOTROPIC, TRANSLATION corrections is needed. There are faster algorithms for the border correction on its own. $Revision: 1.3 $ $Date: 2014/02/09 03:01:27 $ */ /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < N) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > N) maxchunk = N; /* ............. LOOP OVER i ................. */ for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; #ifdef WEIGHTED wi = w[i]; #endif #ifdef BORDER /* For border correction */ /* compute distance to border */ bx = MIN(xi, (wide - xi)); by = MIN(yi, (high - yi)); bdisti = MIN(bx, by); /* denominator will ultimately be incremented for all r < b[i] */ bratio = bdisti/rstep; /* lbord is the largest integer STRICTLY less than bratio */ lbord = (int) ceil(bratio) - 1; lbord = (lbord <= Nr1) ? lbord : Nr1; /* increment entry corresponding to r = b[i] */ #ifdef WEIGHTED if(lbord >= 0) denomAccum[lbord] += wi; #else if(lbord >= 0) (denomAccum[lbord])++; #endif #endif #ifdef ISOTROPIC /* For isotropic correction */ /* perpendicular distance from point i to each edge of rectangle L = left, R = right, D = down, U = up */ dL = xi; dR = wide - xi; dD = yi; dU = high - yi; /* test for corner of the rectangle */ ncor = SMALL(dL) + SMALL(dR) + SMALL(dD) + SMALL(dU); corner = (ncor >= 2); /* angle between - perpendicular to edge of rectangle and - line from point to corner of rectangle */ bLU = atan2(dU, dL); bLD = atan2(dD, dL); bRU = atan2(dU, dR); bRD = atan2(dD, dR); bUL = atan2(dL, dU); bUR = atan2(dR, dU); bDL = atan2(dL, dD); bDR = atan2(dR, dD); #endif /* ............. LOOP OVER j ................. */ /* scan through points (x[j],y[j]) */ /* scan backward from i-1 until |x[j]-x[i]| > Rmax */ if(i > 0) { for(j=i-1; j >= 0; j--) { /* squared interpoint distance */ dx = xi - x[j]; dx2 = dx * dx; if(dx2 >= R2max) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < R2max) { #include "KrectIncrem.h" } } } /* scan forward from i+1 until x[j]-x[i] > Rmax */ if(i < N1) { for(j=i+1; j < N; j++) { /* squared interpoint distance */ dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= R2max) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < R2max) { #include "KrectIncrem.h" } } } } } /* .................. END OF LOOPS ................................ */ /* ............. compute cumulative functions ..................... */ #ifdef UNCORRECTED naccum = ZERO; for(l = 0; l < Nr; l++) { unco[l] += naccum; naccum = unco[l]; } #endif #ifdef ISOTROPIC accum = 0.0; for(l = 0; l < Nr; l++) { iso[l] += accum; accum = iso[l]; } #endif #ifdef TRANSLATION accum = 0.0; for(l = 0; l < Nr; l++) { trans[l] += accum; accum = trans[l]; } #endif #ifdef BORDER /* Now use the accumulated values to compute the numerator and denominator. The value of denomAccum[l] should be added to denom[k] for all k <= l. numerHighAccum[l] should be added to numer[k] for all k <=l numerLowAccum[l] should then be subtracted from numer[k] for k <= l. */ for(l=Nr1, naccum=daccum=ZERO; l>=0; l--) { daccum += denomAccum[l]; bdenom[l] = daccum; naccum += numerHighAccum[l]; bnumer[l] = naccum; naccum -= numerLowAccum[l]; } #endif spatstat/src/scan.c0000644000176200001440000000402513406057617014006 0ustar liggesusers/* scan.c Scan transform $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include #include "raster.h" void shape_raster(); void Cscantrans(x, y, npt, R, out) double *x, *y; /* data points */ int npt; double R; /* radius */ Raster *out; /* scan image */ { int i,j,k,l,m; double d2, R2; int rmin, rmax, cmin, cmax, Rrow, Rcol, lmin, lmax, mmin, mmax; /* initialise raster */ Clear(*out,int,0); /* If the list of data points is empty, ... exit now */ if(npt == 0) return; R2 = R * R; cmin = out->cmin; cmax = out->cmax; rmin = out->rmin; rmax = out->rmax; /* disc size in rows/columns */ Rrow = (int) ceil(R/(out->ystep)); Rcol = (int) ceil(R/(out->xstep)); if(Rrow < 1) Rrow = 1; if(Rcol < 1) Rcol = 1; /* run through points */ for(i = 0; i < npt; i++) { j = RowIndex(*out,y[i]); k = ColIndex(*out,x[i]); lmin = j - Rrow; if(lmin < rmin) lmin = rmin; lmax = j + Rrow; if(lmax > rmax) lmax = rmax; mmin = k - Rcol; if(mmin < cmin) mmin = cmin; mmax = k + Rcol; if(mmax > cmax) mmax = cmax; for(l = lmin; l <= lmax; l++) { for(m = mmin; m <= mmax; m++) { d2 = DistanceToSquared(x[i],y[i],*out,l,m); if(d2 <= R2) Entry(*out,l,m,int) += 1; } } } } /* R interface */ void scantrans(x, y, n, xmin, ymin, xmax, ymax, nr, nc, R, counts) double *x, *y; /* input data points */ int *n; double *xmin, *ymin, *xmax, *ymax; /* guaranteed bounding box */ int *nr, *nc; /* desired raster dimensions */ double *R; /* radius */ /* output array */ int *counts; /* number of R-close points */ { Raster out; int nrow, ncol, npoints; double r; nrow = *nr; ncol = *nc; npoints = *n; r = *R; shape_raster( &out, (void *) counts, *xmin,*ymin,*xmax,*ymax, nrow, ncol, 0, 0); Cscantrans(x, y, npoints, r, &out); } spatstat/src/straush.c0000755000176200001440000000602513115271120014540 0ustar liggesusers#include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Hard core Strauss process */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct StraussHard { double gamma; double r; /* interaction distance */ double h; /* hard core distance */ double loggamma; double r2; double h2; double r2h2; /* r^2 - h^2 */ double *period; int hard; int per; } StraussHard; /* initialiser function */ Cdata *straushinit(state, model, algo) State state; Model model; Algor algo; { StraussHard *strausshard; strausshard = (StraussHard *) R_alloc(1, sizeof(StraussHard)); /* Interpret model parameters*/ strausshard->gamma = model.ipar[0]; strausshard->r = model.ipar[1]; /* No longer passed as r^2 */ strausshard->h = model.ipar[2]; /* No longer passed as h^2 */ strausshard->r2 = pow(strausshard->r, 2); strausshard->h2 = pow(strausshard->h, 2); strausshard->r2h2 = strausshard->r2 - strausshard->h2; strausshard->period = model.period; /* is the interaction numerically equivalent to hard core ? */ strausshard->hard = (strausshard->gamma < DOUBLE_EPS); strausshard->loggamma = (strausshard->hard) ? 0.0 : log(strausshard->gamma); /* periodic boundary conditions? */ strausshard->per = (model.period[0] > 0.0); return((Cdata *) strausshard); } /* conditional intensity evaluator */ double straushcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, kount, ix, ixp1, j; double *x, *y; double u, v; double r2, r2h2, cifval; StraussHard *strausshard; double *period; DECLARE_CLOSE_VARS; strausshard = (StraussHard *) cdata; r2 = strausshard->r2; r2h2 = strausshard->r2h2; period = strausshard->period; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return((double) 1.0); kount = 0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(strausshard->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC(u,v,x[j],y[j],period,r2)) { /* RESIDUE = r2 - distance^2 */ if(RESIDUE > r2h2) return((double) 0.0); ++kount; } } } if(ixp1 < npts) { for(j=ixp1; j r2h2) return((double) 0.0); ++kount; } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE(u,v,x[j],y[j],r2)) { if(RESIDUE > r2h2) return((double) 0.0); ++kount; } } } if(ixp1 < npts) { for(j=ixp1; j r2h2) return((double) 0.0); ++kount; } } } } if(strausshard->hard) { if(kount > 0) cifval = (double) 0.0; else cifval = (double) 1.0; } else cifval = exp(strausshard->loggamma*kount); return cifval; } Cifns StraussHardCifns = { &straushinit, &straushcif, (updafunptr) NULL, NO}; spatstat/src/chunkloop.h0000644000176200001440000000161513406057617015073 0ustar liggesusers/* chunkloop.h Divide a loop into chunks Convenient for divide-and-recombine, and reducing calls to R_CheckUserInterrupt, etc. $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define OUTERCHUNKLOOP(IVAR, LOOPLENGTH, ICHUNK, CHUNKSIZE) \ IVAR = 0; \ ICHUNK = 0; \ while(IVAR < LOOPLENGTH) #define INNERCHUNKLOOP(IVAR, LOOPLENGTH, ICHUNK, CHUNKSIZE) \ ICHUNK += CHUNKSIZE; \ if(ICHUNK > LOOPLENGTH) ICHUNK = LOOPLENGTH; \ for(; IVAR < ICHUNK; IVAR++) #define XOUTERCHUNKLOOP(IVAR, ISTART, IEND, ICHUNK, CHUNKSIZE) \ IVAR = ISTART; \ ICHUNK = 0; \ while(IVAR <= IEND) #define XINNERCHUNKLOOP(IVAR, ISTART, IEND, ICHUNK, CHUNKSIZE) \ ICHUNK += CHUNKSIZE; \ if(ICHUNK > IEND) ICHUNK = IEND; \ for(; IVAR <= IEND; IVAR++) #define CHUNKLOOP_H spatstat/src/methas.h0000755000176200001440000000712313115271120014335 0ustar liggesusers/* Definitions of types and data structures for Metropolis-Hastings State Current state of point pattern Model Model parameters passed from R Cdata (pointer to) model parameters and precomputed data in C Algor Algorithm parameters (p, q, nrep etc) Propo Proposal in Metropolis-Hastings algorithm History Transition history of MH algorithm Cifns Set of functions for computing the conditional intensity for a point process model. This consists of three functions init(State, Model, Algor) .... initialises auxiliary data eval(State, Propo) ........... evaluates cif update(State,Propo) .......... updates auxiliary data */ /* Current state of point pattern */ typedef struct State { double *x; /* vectors of Cartesian coordinates */ double *y; int *marks; /* vector of mark values */ int npts; /* current number of points */ int npmax; /* storage limit */ int ismarked; /* whether the pattern is marked */ } State; /* Parameters of model passed from R */ typedef struct Model { double *beta; /* vector of activity parameters */ double *ipar; /* vector of interaction parameters */ double *period; /* width & height of rectangle, if torus */ int ntypes; /* number of possible marks */ } Model; /* A pointer to Cdata is a pointer to C storage for parameters of model */ typedef void Cdata; /* RMH Algorithm parameters */ typedef struct Algor { double p; /* probability of proposing shift */ double q; /* conditional probability of proposing death */ int fixall; /* if TRUE, only shifts of location are feasible */ int ncond; /* For conditional simulation, the first 'ncond' points are fixed */ int nrep; /* number of iterations */ int nverb; /* print report every 'nverb' iterations */ int nrep0; /* number of iterations already performed in previous blocks - for reporting purposes */ int tempered; /* TRUE if tempering is applied */ double invtemp; /* inverse temperature if tempering is applied */ } Algor; /* Metropolis-Hastings proposal */ typedef struct Propo { double u; /* location of point of interest */ double v; int mrk; /* mark of point of interest */ int ix; /* index of point of interest, if already in pattern */ int itype; /* transition type */ } Propo; /* transition codes 'itype' */ #define REJECT 0 #define BIRTH 1 #define DEATH 2 #define SHIFT 3 #define HISTORY_INCLUDES_RATIO /* Record of transition history */ typedef struct History { int nmax; /* length of vectors */ int n; /* number of events recorded */ int *proptype; /* vector: proposal type */ int *accepted; /* vector: 0 for reject, 1 for accept */ #ifdef HISTORY_INCLUDES_RATIO double *numerator; /* vectors: Hastings ratio numerator & denominator */ double *denominator; #endif } History; /* conditional intensity functions */ typedef Cdata * (*initfunptr)(State state, Model model, Algor algo); typedef double (*evalfunptr)(Propo prop, State state, Cdata *cdata); typedef void (*updafunptr)(State state, Propo prop, Cdata *cdata); typedef struct Cifns { initfunptr init; evalfunptr eval; updafunptr update; int marked; } Cifns; #define NEED_UPDATE(X) ((X).update != (updafunptr) NULL) #define NULL_CIFNS { (initfunptr) NULL, (evalfunptr) NULL, (updafunptr) NULL, NO} /* miscellaneous macros */ #include "yesno.h" # define MAT(X,I,J,M) (X[(I)+(J)*(M)]) spatstat/src/knn3DdistX.h0000644000176200001440000001243513406057617015064 0ustar liggesusers #if (1 == 0) /* knn3DdistX.h Code template for C functions supporting nncross for k-nearest neighbours (k > 1) for 3D point patterns THE FOLLOWING CODE ASSUMES THAT LISTS ARE SORTED IN ASCENDING ORDER OF z COORDINATE This code is #included multiple times in nn3Ddist.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour EXCLUDE #defined if exclusion mechanism is used Either or both DIST and WHICH may be defined. When EXCLUDE is defined, code numbers id1, id2 are attached to the patterns X and Y respectively, such that X[i] and Y[j] are the same point iff id1[i] = id2[j]. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.2 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #endif void FNAME(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, kmax, nnd, nnwhich, huge) /* inputs */ int *n1, *n2; double *x1, *y1, *z1, *x2, *y2, *z2, *huge; int *id1, *id2; int *kmax; /* output matrices (npoints * kmax) in ROW MAJOR order */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { int npoints1, npoints2, nk, nk1; int maxchunk, i, jleft, jright, jwhich, lastjwhich, unsorted, k, k1; double d2, d2minK, x1i, y1i, z1i, dx, dy, dz, dz2, hu, hu2, tmp; double *d2min; #ifdef WHICH int *which; int itmp; #endif #ifdef EXCLUDE int id1i; #endif npoints1 = *n1; npoints2 = *n2; nk = *kmax; nk1 = nk - 1; hu = *huge; hu2 = hu * hu; if(npoints1 == 0 || npoints2 == 0) return; lastjwhich = 0; /* create space to store the nearest neighbour distances and indices for the current point */ d2min = (double *) R_alloc((size_t) nk, sizeof(double)); #ifdef WHICH which = (int *) R_alloc((size_t) nk, sizeof(int)); #endif /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < npoints1) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > npoints1) maxchunk = npoints1; for(; i < maxchunk; i++) { /* initialise nn distances and indices */ d2minK = hu2; jwhich = -1; for(k = 0; k < nk; k++) { d2min[k] = hu2; #ifdef WHICH which[k] = -1; #endif } x1i = x1[i]; y1i = y1[i]; z1i = z1[i]; #ifdef EXCLUDE id1i = id1[i]; #endif if(lastjwhich < npoints2) { /* search forward from previous nearest neighbour */ for(jright = lastjwhich; jright < npoints2; ++jright) { dz = z2[jright] - z1i; dz2 = dz * dz; if(dz2 > d2minK) /* note that dz2 >= d2minK could break too early */ break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[jright] != id1i) { #endif dy = y2[jright] - y1i; d2 = dy * dy + dz2; if(d2 < d2minK) { dx = x2[jright] - x1i; d2 = dx * dx + d2; if (d2 < d2minK) { /* overwrite last entry in list of neighbours */ d2min[nk1] = d2; jwhich = jright; #ifdef WHICH which[nk1] = jright; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[nk1]; } } #ifdef EXCLUDE } #endif } /* end forward search */ } if(lastjwhich > 0) { /* search backward from previous nearest neighbour */ for(jleft = lastjwhich - 1; jleft >= 0; --jleft) { dz = z1i - z2[jleft]; dz2 = dz * dz; if(dz2 > d2minK) /* note that dz2 >= d2minK could break too early */ break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[jleft] != id1i) { #endif dy = y2[jleft] - y1i; d2 = dy * dy + dz2; if(d2 < d2minK) { dx = x2[jleft] - x1i; d2 = dx * dx + d2; if (d2 < d2minK) { /* overwrite last entry in list of neighbours */ d2min[nk1] = d2; jwhich = jleft; #ifdef WHICH which[nk1] = jleft; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[nk1]; } } #ifdef EXCLUDE } #endif } /* end backward search */ } /* copy nn distances for point i to output matrix in ROW MAJOR order */ for(k = 0; k < nk; k++) { #ifdef DIST nnd[nk * i + k] = sqrt(d2min[k]); #endif #ifdef WHICH nnwhich[nk * i + k] = which[k] + 1; /* R indexing */ #endif } /* save index of last neighbour encountered */ lastjwhich = jwhich; /* end of loop over points i */ } } } spatstat/src/triplets.c0000644000176200001440000000615513115225157014727 0ustar liggesusers#include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Triplets process */ /* Format for storage of parameters and precomputed/auxiliary data */ typedef struct Triplets { double gamma; double r; double loggamma; double r2; double *period; int hard; int per; int *neighbour; /* scratch list of neighbours of current point */ int Nmax; /* length of scratch space allocated */ } Triplets; /* initialiser function */ Cdata *tripletsinit(state, model, algo) State state; Model model; Algor algo; { /* create storage for model parameters */ Triplets *triplets; triplets = (Triplets *) R_alloc(1, sizeof(Triplets)); /* create scratch space */ triplets->Nmax = 1024; triplets->neighbour = (int *) R_alloc(1024, sizeof(int)); /* Interpret model parameters*/ triplets->gamma = model.ipar[0]; triplets->r = model.ipar[1]; /* No longer passed as r^2 */ triplets->r2 = triplets->r * triplets->r; triplets->period = model.period; #ifdef MHDEBUG Rprintf("Initialising Triplets gamma=%lf, r=%lf\n", triplets->gamma, triplets->r); #endif /* is the model numerically equivalent to hard core ? */ triplets->hard = (triplets->gamma < DOUBLE_EPS); triplets->loggamma = (triplets->hard) ? 0 : log(triplets->gamma); /* periodic boundary conditions? */ triplets->per = (model.period[0] > 0.0); return((Cdata *) triplets); } /* conditional intensity evaluator */ double tripletscif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, kount, ix, j, k, nj, nk, N, Nmax, Nmore, N1; int *neighbour; double *x, *y; double u, v; double r2, d2, cifval; Triplets *triplets; triplets = (Triplets *) cdata; r2 = triplets->r2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return((double) 1.0); neighbour = triplets->neighbour; Nmax = triplets->Nmax; N = 0; /* compile list of neighbours */ for(j=0; j < npts; j++) { if(j != ix) { d2 = dist2either(u,v,x[j],y[j],triplets->period); if(d2 < r2) { /* add j to list of neighbours of current point */ if(N >= Nmax) { /* storage space overflow: reallocate */ Nmore = 2 * Nmax; triplets->neighbour = neighbour = (int *) S_realloc((char *) triplets->neighbour, Nmore, Nmax, sizeof(int)); triplets->Nmax = Nmax = Nmore; } neighbour[N] = j; N++; } } } /* count r-close (ordered) pairs of neighbours */ kount = 0; if(N > 1) { N1 = N - 1; for(j = 0; j < N1; j++) { nj = neighbour[j]; for(k = j+1; k < N; k++) { nk = neighbour[k]; if(nj != nk) { d2 = dist2either(x[nj],y[nj],x[nk],y[nk],triplets->period); if(d2 < r2) kount++; } } } } if(triplets->hard) { if(kount > 0) cifval = 0.0; else cifval = 1.0; } else cifval = exp((triplets->loggamma) * kount); #ifdef MHDEBUG Rprintf("triplet count=%d cif=%lf\n", kount, cifval); #endif return cifval; } Cifns TripletsCifns = { &tripletsinit, &tripletscif, (updafunptr) NULL, NO}; spatstat/src/distan3.c0000755000176200001440000002453413406057617014441 0ustar liggesusers/* distan3.c Distances between pairs of 3D points $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ D3pairdist Pairwise distances D3pair2dist Pairwise distances squared D3pairPdist Pairwise distances with periodic correction D3pairP2dist Pairwise distances squared, with periodic correction D3crossdist Pairwise distances for two sets of points D3cross2dist Pairwise distances squared, for two sets of points D3crossPdist Pairwise distances for two sets of points, periodic correction matchxyz Find matches between two sets of points Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include /* #include */ double sqrt(); void D3pairdist(n, x, y, z, squared, d) /* inputs */ int *n; double *x, *y, *z; int *squared; /* output */ double *d; { void D3pair1dist(), D3pair2dist(); if(*squared == 0) { D3pair1dist(n, x, y, z, d); } else { D3pair2dist(n, x, y, z, d); } } void D3pair1dist(n, x, y, z, d) /* inputs */ int *n; double *x, *y, *z; /* output */ double *d; { int i, j, npoints; double *dp; double xi, yi, zi, dx, dy, dz, dist; npoints = *n; /* set d[0,0] = 0 */ *d = 0.0; for (i=1; i < npoints; i++) { xi = x[i]; yi = y[i]; zi = z[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dz = z[j] - zi; dist = sqrt( dx * dx + dy * dy + dz * dz ); /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } /* squared distances */ void D3pair2dist(n, x, y, z, d) /* inputs */ int *n; double *x, *y, *z; /* output */ double *d; { int i, j, npoints; double *dp; double xi, yi, zi, dx, dy, dz, dist; npoints = *n; /* set d[0,0] = 0 */ *d = 0.0; for (i=1; i < npoints; i++) { xi = x[i]; yi = y[i]; zi = z[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dz = z[j] - zi; dist = dx * dx + dy * dy + dz * dz; /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } void D3crossdist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, squared, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto; int *squared; /* output */ double *d; { void D3cross1dist(), D3cross2dist(); if(*squared == 0) { D3cross1dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, d); } else { D3cross2dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, d); } } void D3cross1dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto; /* output */ double *d; { int i, j, nf, nt; double *dptr; double xj, yj, zj, dx, dy, dz; nf = *nfrom; nt = *nto; dptr = d; for (j=0; j < nt; j++) { xj = xto[j]; yj = yto[j]; zj = zto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; dz = zj - zfrom[i]; *dptr = sqrt( dx * dx + dy * dy + dz * dz ); } } } /* squared distances */ void D3cross2dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto; /* output */ double *d; { int i, j, nf, nt; double *dptr; double xj, yj, zj, dx, dy, dz; nf = *nfrom; nt = *nto; dptr = d; for (j=0; j < nt; j++) { xj = xto[j]; yj = yto[j]; zj = zto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; dz = zj - zfrom[i]; *dptr = dx * dx + dy * dy + dz * dz; } } } /* distances with periodic correction */ void D3pairPdist(n, x, y, z, xwidth, yheight, zdepth, squared, d) /* inputs */ int *n; double *x, *y, *z, *xwidth, *yheight, *zdepth; int *squared; /* output */ double *d; { void D3pairP1dist(), D3pairP2dist(); if(*squared == 0) { D3pairP1dist(n, x, y, z, xwidth, yheight, zdepth, d); } else { D3pairP2dist(n, x, y, z, xwidth, yheight, zdepth, d); } } void D3pairP1dist(n, x, y, z, xwidth, yheight, zdepth, d) /* inputs */ int *n; double *x, *y, *z, *xwidth, *yheight, *zdepth; /* output */ double *d; { int i, j, npoints; double *dp; double xi, yi, zi, dx, dy, dz, dx2, dy2, dz2, dx2p, dy2p, dz2p, dist, wide, high, deep; npoints = *n; wide = *xwidth; high = *yheight; deep = *zdepth; /* set d[0,0] = 0 */ *d = 0.0; for (i=1; i < npoints; i++) { xi = x[i]; yi = y[i]; zi = z[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dz = z[j] - zi; dx2p = dx * dx; dy2p = dy * dy; dz2p = dz * dz; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); dz2 = (dz - deep) * (dz - deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); dz2 = (dz + deep) * (dz + deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; dist = sqrt( dx2p + dy2p + dz2p ); /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } /* same function without the sqrt */ void D3pairP2dist(n, x, y, z, xwidth, yheight, zdepth, d) /* inputs */ int *n; double *x, *y, *z, *xwidth, *yheight, *zdepth; /* output */ double *d; { int i, j, npoints; double *dp; double xi, yi, zi, dx, dy, dz, dx2, dy2, dz2, dx2p, dy2p, dz2p, dist, wide, high, deep; npoints = *n; wide = *xwidth; high = *yheight; deep = *zdepth; /* set d[0,0] = 0 */ *d = 0.0; for (i=1; i < npoints; i++) { xi = x[i]; yi = y[i]; zi = z[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dz = z[j] - zi; dx2p = dx * dx; dy2p = dy * dy; dz2p = dz * dz; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); dz2 = (dz - deep) * (dz - deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); dz2 = (dz + deep) * (dz + deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; dist = dx2p + dy2p + dz2p; /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } void D3crossPdist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, xwidth, yheight, zdepth, squared, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto, *xwidth, *yheight, *zdepth; int *squared; /* output */ double *d; { void D3crossP1dist(), D3crossP2dist(); if(*squared == 0) { D3crossP1dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, xwidth, yheight, zdepth, d); } else { D3crossP2dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, xwidth, yheight, zdepth, d); } } void D3crossP1dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, xwidth, yheight, zdepth, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto, *xwidth, *yheight, *zdepth; /* output */ double *d; { int i, j, nf, nt; double *dptr; double xj, yj, zj, dx, dy, dz, dx2, dy2, dz2, dx2p, dy2p, dz2p, wide, high, deep; nf = *nfrom; nt = *nto; wide = *xwidth; high = *yheight; deep = *zdepth; dptr = d; for (j=0; j < nt; j++) { xj = xto[j]; yj = yto[j]; zj = zto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; dz = zj - zfrom[i]; dx2p = dx * dx; dy2p = dy * dy; dz2p = dz * dz; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); dz2 = (dz - deep) * (dz - deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); dz2 = (dy + deep) * (dz + deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; *dptr = sqrt( dx2p + dy2p + dz2p ); } } } void D3crossP2dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, xwidth, yheight, zdepth, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto, *xwidth, *yheight, *zdepth; /* output */ double *d; { int i, j, nf, nt; double *dptr; double xj, yj, zj, dx, dy, dz, dx2, dy2, dz2, dx2p, dy2p, dz2p, wide, high, deep; nf = *nfrom; nt = *nto; wide = *xwidth; high = *yheight; deep = *zdepth; dptr = d; for (j=0; j < nt; j++) { xj = xto[j]; yj = yto[j]; zj = zto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; dz = zj - zfrom[i]; dx2p = dx * dx; dy2p = dy * dy; dz2p = dz * dz; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); dz2 = (dz - deep) * (dz - deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); dz2 = (dy + deep) * (dz + deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; *dptr = dx2p + dy2p + dz2p; } } } /* matchxyz Find matches between two lists of points */ void matchxyz(na, xa, ya, za, nb, xb, yb, zb, match) /* inputs */ int *na, *nb; double *xa, *ya, *za, *xb, *yb, *zb; /* output */ int *match; { int i, j, Na, Nb; double xai, yai, zai; Na = *na; Nb = *nb; for (i=1; i < Na; i++) { xai = xa[i]; yai = ya[i]; zai = za[i]; match[i] = 0; for (j=0; j < Nb; j++) if(xai == xb[j] && yai == yb[j] && zai == zb[i]) { match[i] = j; break; } } } spatstat/src/dgs.c0000755000176200001440000000505613115271120013627 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" #include "constants.h" /* Conditional intensity computation for Diggle-Gates-Stibbard process */ /* Conditional intensity function for a pairwise interaction point process with interaction function as given by e(t) = sin^2(pi*t/2*rho) for t < rho = 1 for t >= rho (See page 767 of Diggle, Gates, and Stibbard, Biometrika vol. 74, 1987, pages 763 -- 770.) */ #define PION2 M_PI_2 /* pi/2 defined in Rmath.h */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Dgs { double rho; double rho2; double pion2rho; double *period; int per; } Dgs; /* initialiser function */ Cdata *dgsinit(state, model, algo) State state; Model model; Algor algo; { Dgs *dgs; /* allocate storage */ dgs = (Dgs *) R_alloc(1, sizeof(Dgs)); /* Interpret model parameters*/ dgs->rho = model.ipar[0]; dgs->period = model.period; /* constants */ dgs->rho2 = pow(dgs->rho, 2); dgs->pion2rho = PION2/dgs->rho; /* periodic boundary conditions? */ dgs->per = (model.period[0] > 0.0); return((Cdata *) dgs); } /* conditional intensity evaluator */ double dgscif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, r2, pairprod, cifval; Dgs *dgs; DECLARE_CLOSE_D2_VARS; dgs = (Dgs *) cdata; r2 = dgs->rho2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = pairprod = 1.0; if(npts == 0) return(cifval); ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(dgs->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],dgs->period,r2,d2)) pairprod *= sin(dgs->pion2rho * sqrt(d2)); } } if(ixp1 < npts) { for(j=ixp1; jperiod,r2,d2)) pairprod *= sin(dgs->pion2rho * sqrt(d2)); } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], r2, d2)) pairprod *= sin(dgs->pion2rho * sqrt(d2)); } } if(ixp1 < npts) { for(j=ixp1; jpion2rho * sqrt(d2)); } } } /* sin to sin^2 */ cifval = pairprod * pairprod; return cifval; } Cifns DgsCifns = { &dgsinit, &dgscif, (updafunptr) NULL, NO}; spatstat/src/mhloop.h0000755000176200001440000003036613406057617014377 0ustar liggesusers /* mhloop.h This file contains the iteration loop for the Metropolis-Hastings algorithm methas.c It is #included several times in methas.c with different #defines for the following variables MH_MARKED whether the simulation is marked (= the variable 'marked' is TRUE) MH_SINGLE whether there is a single interaction (as opposed to a hybrid of several interactions) MH_TEMPER whether tempering is applied MH_TRACKING whether to save transition history MH_DEBUG whether to print debug information MH_SNOOP whether to run visual debugger $Revision: 1.23 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #ifndef MH_DEBUG #define MH_DEBUG NO #endif /* ..... Pre-processing: recursively delete illegal/improbable points ..... */ nfree = state.npts - algo.ncond; /* number of 'free' points */ if(thinstart && nfree > 0) { nsuspect = nfree; while(nsuspect > 0) { /* scan for illegal points */ ix = state.npts - nsuspect; deathprop.ix = ix; deathprop.u = state.x[ix]; deathprop.v = state.y[ix]; #if MH_MARKED deathprop.mrk = state.marks[ix]; #endif #if MH_DEBUG #if MH_MARKED Rprintf("check legality of point %d = (%lf, %lf) with mark %d\n", ix, deathprop.u, deathprop.v, deathprop.mrk); #else Rprintf("check legality of point %d = (%lf, %lf)\n", ix, deathprop.u, deathprop.v); #endif #endif /* evaluate conditional intensity without trend terms */ #if MH_SINGLE adenom = (*(thecif.eval))(deathprop, state, thecdata); #else adenom = 1.0; for(k = 0; k < Ncif; k++) adenom *= (*(cif[k].eval))(deathprop, state, cdata[k]); #endif #if MH_TEMPER adenom = pow(adenom, invtemp); #endif #if MH_DEBUG Rprintf("cif = %lf\n", adenom); #endif /* accept/reject */ if(unif_rand() >= adenom) { #if MH_DEBUG Rprintf("deleting illegal/improbable point\n"); #endif /* delete point x[ix], y[ix] */ if(mustupdate) { /* Update auxiliary variables first */ #if MH_SINGLE (*(thecif.update))(state, deathprop, thecdata); #else for(k = 0; k < Ncif; k++) { if(needupd[k]) (*(cif[k].update))(state, deathprop, cdata[k]); } #endif } state.npts--; nfree--; #if MH_DEBUG Rprintf("deleting point %d\n", ix); Rprintf("\tnpts=%d\n", state.npts); #endif if(ix < state.npts) { for(j = ix; j < state.npts; j++) { state.x[j] = state.x[j+1]; state.y[j] = state.y[j+1]; #if MH_MARKED state.marks[j] = state.marks[j+1]; #endif } } } nsuspect--; } } /* ............... MAIN ITERATION LOOP ............................. */ OUTERCHUNKLOOP(irep, algo.nrep, maxchunk, 1024) { R_CheckUserInterrupt(); INNERCHUNKLOOP(irep, algo.nrep, maxchunk, 1024) { #if MH_DEBUG Rprintf("iteration %d\n", irep); #endif if(verb) { /* print progress message every nverb iterations */ iverb = irep + 1 + algo.nrep0; if((iverb % algo.nverb) == 0) Rprintf("iteration %d\n", iverb); } itype = REJECT; nfree = state.npts - algo.ncond; /* number of 'free' points */ /* ................ generate proposal ..................... */ /* Shift or birth/death: */ if(unif_rand() > algo.p) { #if MH_DEBUG Rprintf("propose birth or death\n"); #endif /* Birth/death: */ if(unif_rand() > algo.q) { /* Propose birth: */ birthprop.u = xpropose[irep]; birthprop.v = ypropose[irep]; #if MH_MARKED birthprop.mrk = mpropose[irep]; #endif #if MH_DEBUG #if MH_MARKED Rprintf("propose birth at (%lf, %lf) with mark %d\n", birthprop.u, birthprop.v, birthprop.mrk); #else Rprintf("propose birth at (%lf, %lf)\n", birthprop.u, birthprop.v); #endif #endif /* evaluate conditional intensity */ #if MH_MARKED betavalue = model.beta[birthprop.mrk]; #endif #if MH_SINGLE anumer = betavalue * (*(thecif.eval))(birthprop, state, thecdata); #else anumer = betavalue; for(k = 0; k < Ncif; k++) anumer *= (*(cif[k].eval))(birthprop, state, cdata[k]); #endif #if MH_TEMPER anumer = pow(anumer, invtemp); #endif adenom = qnodds*(nfree+1); #if MH_DEBUG Rprintf("cif = %lf, Hastings ratio = %lf\n", anumer, anumer/adenom); #endif /* accept/reject */ if(unif_rand() * adenom < anumer) { #if MH_DEBUG Rprintf("accepted birth\n"); #endif itype = BIRTH; /* Birth proposal accepted. */ } #if MH_SNOOP /* visual debug */ mhsnoop(&snooper, irep, &algo, &state, &birthprop, anumer, adenom, &itype); #endif #if MH_TRACKING /* save transition history */ if(irep < history.nmax) { history.n++; history.proptype[irep] = BIRTH; history.accepted[irep] = (itype == REJECT) ? 0 : 1; #ifdef HISTORY_INCLUDES_RATIO history.numerator[irep] = anumer; history.denominator[irep] = adenom; #endif } #endif } else if(nfree > 0) { /* Propose death: */ ix = floor(nfree * unif_rand()); if(ix < 0) ix = 0; ix = algo.ncond + ix; if(ix >= state.npts) ix = state.npts - 1; deathprop.ix = ix; deathprop.u = state.x[ix]; deathprop.v = state.y[ix]; #if MH_MARKED deathprop.mrk = state.marks[ix]; #endif #if MH_DEBUG #if MH_MARKED Rprintf("propose death of point %d = (%lf, %lf) with mark %d\n", ix, deathprop.u, deathprop.v, deathprop.mrk); #else Rprintf("propose death of point %d = (%lf, %lf)\n", ix, deathprop.u, deathprop.v); #endif #endif /* evaluate conditional intensity */ #if MH_MARKED betavalue = model.beta[deathprop.mrk]; #endif #if MH_SINGLE adenom = betavalue * (*(thecif.eval))(deathprop, state, thecdata); #else adenom = betavalue; for(k = 0; k < Ncif; k++) adenom *= (*(cif[k].eval))(deathprop, state, cdata[k]); #endif #if MH_TEMPER adenom = pow(adenom, invtemp); #endif anumer = qnodds * nfree; #if MH_DEBUG Rprintf("cif = %lf, Hastings ratio = %lf\n", adenom, anumer/adenom); #endif /* accept/reject */ if(unif_rand() * adenom < anumer) { #if MH_DEBUG Rprintf("accepted death\n"); #endif itype = DEATH; /* Death proposal accepted. */ } #if MH_SNOOP /* visual debug */ mhsnoop(&snooper, irep, &algo, &state, &deathprop, anumer, adenom, &itype); #endif #if MH_TRACKING /* save transition history */ if(irep < history.nmax) { history.n++; history.proptype[irep] = DEATH; history.accepted[irep] = (itype == REJECT) ? 0 : 1; #ifdef HISTORY_INCLUDES_RATIO history.numerator[irep] = anumer; history.denominator[irep] = adenom; #endif } #endif } } else if(nfree > 0) { /* Propose shift: */ /* point to be shifted */ ix = floor(nfree * unif_rand()); if(ix < 0) ix = 0; ix = algo.ncond + ix; if(ix >= state.npts) ix = state.npts - 1; deathprop.ix = ix; deathprop.u = state.x[ix]; deathprop.v = state.y[ix]; #if MH_MARKED deathprop.mrk = state.marks[ix]; #endif /* where to shift */ permitted = YES; shiftprop.ix = ix; shiftprop.u = xpropose[irep]; shiftprop.v = ypropose[irep]; #if MH_MARKED shiftprop.mrk = mpropose[irep]; if(algo.fixall) permitted = (shiftprop.mrk == deathprop.mrk); #endif #if MH_DEBUG #if MH_MARKED Rprintf("propose shift of point %d = (%lf, %lf)[mark %d] to (%lf, %lf)[mark %d]\n", ix, deathprop.u, deathprop.v, deathprop.mrk, shiftprop.u, shiftprop.v, shiftprop.mrk); #else Rprintf("propose shift of point %d = (%lf, %lf) to (%lf, %lf)\n", ix, deathprop.u, deathprop.v, shiftprop.u, shiftprop.v); #endif #endif /* evaluate cif in two stages */ cvn = cvd = 1.0; if(permitted) { #if MH_SINGLE cvn = (*(thecif.eval))(shiftprop, state, thecdata); if(cvn > 0.0) { cvd = (*(thecif.eval))(deathprop, state, thecdata); } else { permitted = NO; } #else for(k = 0; k < Ncif; k++) { cvn *= (*(cif[k].eval))(shiftprop, state, cdata[k]); if(cvn > 0.0) { cvd *= (*(cif[k].eval))(deathprop, state, cdata[k]); } else { permitted = NO; break; } } #endif } if(permitted) { #if MH_MARKED cvn *= model.beta[shiftprop.mrk]; cvd *= model.beta[deathprop.mrk]; #endif #if MH_TEMPER cvn = pow(cvn, invtemp); cvd = pow(cvd, invtemp); #endif #if MH_DEBUG Rprintf("cif[old] = %lf, cif[new] = %lf, Hastings ratio = %lf\n", cvd, cvn, cvn/cvd); #endif /* accept/reject */ if(unif_rand() * cvd < cvn) { #if MH_DEBUG Rprintf("accepted shift\n"); #endif itype = SHIFT; /* Shift proposal accepted . */ } } else { cvn = 0.0; cvd = 1.0; #if MH_DEBUG Rprintf("Forbidden shift"); #endif } #if MH_SNOOP /* visual debug */ mhsnoop(&snooper, irep, &algo, &state, &shiftprop, cvn, cvd, &itype); #endif #if MH_TRACKING /* save transition history */ if(irep < history.nmax) { history.n++; history.proptype[irep] = SHIFT; history.accepted[irep] = (itype == REJECT) ? 0 : 1; #ifdef HISTORY_INCLUDES_RATIO history.numerator[irep] = cvn; history.denominator[irep] = cvd; #endif } #endif } if(itype != REJECT) { /* ....... implement the transition ............ */ if(itype == BIRTH) { /* Birth transition */ /* add point at (u,v) */ #if MH_DEBUG #if MH_MARKED Rprintf("implementing birth at (%lf, %lf) with mark %d\n", birthprop.u, birthprop.v, birthprop.mrk); #else Rprintf("implementing birth at (%lf, %lf)\n", birthprop.u, birthprop.v); #endif #endif if(state.npts + 1 > state.npmax) { #if MH_DEBUG Rprintf("!!!!!!!!!!! storage overflow !!!!!!!!!!!!!!!!!\n"); #endif /* storage overflow; allocate more storage */ Nmore = 2 * state.npmax; state.x = (double *) S_realloc((char *) state.x, Nmore, state.npmax, sizeof(double)); state.y = (double *) S_realloc((char *) state.y, Nmore, state.npmax, sizeof(double)); #if MH_MARKED state.marks = (int *) S_realloc((char *) state.marks, Nmore, state.npmax, sizeof(int)); #endif state.npmax = Nmore; /* call the initialiser again, to allocate additional space */ #if MH_SINGLE thecdata = (*(thecif.init))(state, model, algo); #else model.ipar = iparvector; for(k = 0; k < Ncif; k++) { if(k > 0) model.ipar += plength[k-1]; cdata[k] = (*(cif[k].init))(state, model, algo); } #endif #if MH_DEBUG Rprintf("........... storage extended .................\n"); #endif } if(mustupdate) { /* Update auxiliary variables first */ #if MH_SINGLE (*(thecif.update))(state, birthprop, thecdata); #else for(k = 0; k < Ncif; k++) { if(needupd[k]) (*(cif[k].update))(state, birthprop, cdata[k]); } #endif } /* Now add point */ state.x[state.npts] = birthprop.u; state.y[state.npts] = birthprop.v; #if MH_MARKED state.marks[state.npts] = birthprop.mrk; #endif state.npts = state.npts + 1; #if MH_DEBUG Rprintf("\tnpts=%d\n", state.npts); #endif } else if(itype==DEATH) { /* Death transition */ /* delete point x[ix], y[ix] */ if(mustupdate) { /* Update auxiliary variables first */ #if MH_SINGLE (*(thecif.update))(state, deathprop, thecdata); #else for(k = 0; k < Ncif; k++) { if(needupd[k]) (*(cif[k].update))(state, deathprop, cdata[k]); } #endif } ix = deathprop.ix; state.npts = state.npts - 1; #if MH_DEBUG Rprintf("implementing death of point %d\n", ix); Rprintf("\tnpts=%d\n", state.npts); #endif if(ix < state.npts) { for(j = ix; j < state.npts; j++) { state.x[j] = state.x[j+1]; state.y[j] = state.y[j+1]; #if MH_MARKED state.marks[j] = state.marks[j+1]; #endif } } } else { /* Shift transition */ /* Shift (x[ix], y[ix]) to (u,v) */ #if MH_DEBUG #if MH_MARKED Rprintf("implementing shift from %d = (%lf, %lf)[%d] to (%lf, %lf)[%d]\n", deathprop.ix, deathprop.u, deathprop.v, deathprop.mrk, shiftprop.u, shiftprop.v, shiftprop.mrk); #else Rprintf("implementing shift from %d = (%lf, %lf) to (%lf, %lf)\n", deathprop.ix, deathprop.u, deathprop.v, shiftprop.u, shiftprop.v); Rprintf("\tnpts=%d\n", state.npts); #endif #endif if(mustupdate) { /* Update auxiliary variables first */ #if MH_SINGLE (*(thecif.update))(state, shiftprop, thecdata); #else for(k = 0; k < Ncif; k++) { if(needupd[k]) (*(cif[k].update))(state, shiftprop, cdata[k]); } #endif } ix = shiftprop.ix; state.x[ix] = shiftprop.u; state.y[ix] = shiftprop.v; #if MH_MARKED state.marks[ix] = shiftprop.mrk; #endif } #if MH_DEBUG } else { Rprintf("rejected\n"); #endif } } } spatstat/src/knngrid.h0000644000176200001440000001273513406057617014532 0ustar liggesusers #if (1 == 0) /* knngrid.h Code template for C functions k-nearest neighbours (k > 1) of each grid point THE FOLLOWING CODE ASSUMES THAT POINT PATTERN (xp, yp) IS SORTED IN ASCENDING ORDER OF x COORDINATE This code is #included multiple times in knngrid.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour Either or both DIST and WHICH may be defined. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.6 $ $Date: 2016/02/02 01:31:50 $ */ #endif #undef PRINTALOT void FNAME(nx, x0, xstep, ny, y0, ystep, /* pixel grid dimensions */ np, xp, yp, /* data points */ kmax, nnd, nnwhich, huge) /* inputs */ int *nx, *ny, *np; double *x0, *xstep, *y0, *ystep, *huge; double *xp, *yp; int *kmax; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { int Nxcol, Nyrow; int i, j, ijpos; int Npoints, Nk, Nk1; int mleft, mright, mwhich, lastmwhich, unsorted, k, k1; double X0, Y0, Xstep, Ystep; double d2, d2minK, xj, yi, dx, dy, dx2, hu, hu2, tmp; double *d2min; #ifdef WHICH int *which; int itmp; #endif Nxcol = *nx; Nyrow = *ny; Npoints = *np; Nk = *kmax; hu = *huge; X0 = *x0; Y0 = *y0; Xstep = *xstep; Ystep = *ystep; Nk1 = Nk - 1; hu2 = hu * hu; if(Npoints == 0) return; lastmwhich = mwhich = 0; /* create space to store the nearest neighbour distances and indices for the current grid point */ d2min = (double *) R_alloc((size_t) Nk, sizeof(double)); #ifdef WHICH which = (int *) R_alloc((size_t) Nk, sizeof(int)); #endif /* loop over pixels */ for(j = 0, xj = X0; j < Nxcol; j++, xj += Xstep) { R_CheckUserInterrupt(); #ifdef PRINTALOT Rprintf("j=%d, xj=%lf\n", j, xj); #endif for(i = 0, yi = Y0; i < Nyrow; i++, yi += Ystep) { #ifdef PRINTALOT Rprintf("\ti=%d, yi = %lf\n", i, yi); #endif /* initialise nn distances and indices */ d2minK = hu2; for(k = 0; k < Nk; k++) { d2min[k] = hu2; #ifdef WHICH which[k] = -1; #endif } if(lastmwhich < Npoints) { /* search forward from previous nearest neighbour */ for(mright = lastmwhich; mright < Npoints; ++mright) { dx = xp[mright] - xj; dx2 = dx * dx; #ifdef PRINTALOT Rprintf("\t\t%d\n", mright); #endif if(dx2 > d2minK) /* note that dx2 >= d2minK could break too early */ break; dy = yp[mright] - yi; d2 = dy * dy + dx2; if (d2 < d2minK) { #ifdef PRINTALOT Rprintf("\t\t\tNeighbour: d2=%lf\n", d2); #endif /* overwrite last entry in list of neighbours */ d2min[Nk1] = d2; mwhich = mright; #ifdef WHICH which[Nk1] = mright; #endif /* bubble sort */ unsorted = YES; for(k = Nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[Nk1]; #ifdef PRINTALOT Rprintf("\t\t\tUpdated d2minK=%lf\n", d2minK); for(k = 0; k < Nk; k++) Rprintf("\t\t\t\td2min[%d]=%lf\n", k, d2min[k]); #ifdef WHICH for(k = 0; k < Nk; k++) Rprintf("\t\t\t\twhich[%d]=%d\n", k, which[k]); #endif #endif } } /* end forward search */ } if(lastmwhich > 0) { /* search backward from previous nearest neighbour */ for(mleft = lastmwhich - 1; mleft >= 0; --mleft) { dx = xj - xp[mleft]; dx2 = dx * dx; #ifdef PRINTALOT Rprintf("\t\t%d\n", mleft); #endif if(dx2 > d2minK) /* note that dx2 >= d2minK could break too early */ break; dy = yp[mleft] - yi; d2 = dy * dy + dx2; if (d2 < d2minK) { #ifdef PRINTALOT Rprintf("\t\t\tNeighbour: d2=%lf\n", d2); #endif /* overwrite last entry in list of neighbours */ mwhich = mleft; d2min[Nk1] = d2; #ifdef WHICH which[Nk1] = mleft; #endif /* bubble sort */ unsorted = YES; for(k = Nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[Nk1]; #ifdef PRINTALOT Rprintf("\t\t\tUpdated d2minK=%lf\n", d2minK); for(k = 0; k < Nk; k++) Rprintf("\t\t\t\td2min[%d]=%lf\n", k, d2min[k]); #ifdef WHICH for(k = 0; k < Nk; k++) Rprintf("\t\t\t\twhich[%d]=%d\n", k, which[k]); #endif #endif } } /* end backward search */ } /* remember index of most recently-encountered neighbour */ lastmwhich = mwhich; #ifdef PRINTALOT Rprintf("\t\tlastmwhich=%d\n", lastmwhich); #endif /* copy nn distances for grid point (i, j) to output array nnd[ , i, j] */ ijpos = Nk * (i + j * Nyrow); for(k = 0; k < Nk; k++) { #ifdef DIST nnd[ijpos + k] = sqrt(d2min[k]); #endif #ifdef WHICH nnwhich[ijpos + k] = which[k] + 1; /* R indexing */ #endif } /* end of loop over points i */ } } } spatstat/src/hardcore.c0000755000176200001440000000410713115271120014635 0ustar liggesusers#include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Hard core process */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Hardcore { double h; /* hard core distance */ double h2; double *period; int per; } Hardcore; /* initialiser function */ Cdata *hardcoreinit(state, model, algo) State state; Model model; Algor algo; { Hardcore *hardcore; double h; hardcore = (Hardcore *) R_alloc(1, sizeof(Hardcore)); /* Interpret model parameters*/ hardcore->h = h = model.ipar[0]; hardcore->h2 = h * h; hardcore->period = model.period; /* periodic boundary conditions? */ hardcore->per = (model.period[0] > 0.0); return((Cdata *) hardcore); } /* conditional intensity evaluator */ double hardcorecif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double h2, a; Hardcore *hardcore; hardcore = (Hardcore *) cdata; h2 = hardcore->h2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return((double) 1.0); ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(hardcore->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(dist2thresh(u,v,x[j],y[j],hardcore->period, h2)) return((double) 0.0); } } if(ixp1 < npts) { for(j=ixp1; jperiod, h2)) return((double) 0.0); } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { a = h2 - pow(u - x[j], 2); if(a > 0) { a -= pow(v - y[j], 2); if(a > 0) return((double) 0.0); } } } if(ixp1 < npts) { for(j=ixp1; j 0) { a -= pow(v - y[j], 2); if(a > 0) return((double) 0.0); } } } } return ((double) 1.0); } Cifns HardcoreCifns = { &hardcoreinit, &hardcorecif, (updafunptr) NULL, NO}; spatstat/src/nngrid.c0000644000176200001440000000356213406057617014350 0ustar liggesusers/* nngrid.c Nearest Neighbour Distances from a pixel grid to a point pattern Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2013 Licence: GNU Public Licence >= 2 $Revision: 1.4 $ $Date: 2013/11/03 03:41:23 $ Function body definition is #included from nngrid.h THE FOLLOWING FUNCTIONS ASSUME THAT x IS SORTED IN ASCENDING ORDER */ #undef SPATSTAT_DEBUG #include #include #include #include "yesno.h" double sqrt(); /* THE FOLLOWING CODE ASSUMES THAT x IS SORTED IN ASCENDING ORDER */ /* general interface */ void nnGinterface(nx, x0, xstep, ny, y0, ystep, /* pixel grid dimensions */ np, xp, yp, /* data points */ wantdist, wantwhich, /* options */ nnd, nnwhich, huge) /* inputs */ int *nx, *ny, *np; double *x0, *xstep, *y0, *ystep, *huge; double *xp, *yp; /* options */ int *wantdist, *wantwhich; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { void nnGdw(), nnGd(), nnGw(); int di, wh; di = (*wantdist != 0); wh = (*wantwhich != 0); if(di && wh) { nnGdw(nx, x0, xstep, ny, y0, ystep, np, xp, yp, nnd, nnwhich, huge); } else if(di) { nnGd(nx, x0, xstep, ny, y0, ystep, np, xp, yp, nnd, nnwhich, huge); } else if(wh) { nnGw(nx, x0, xstep, ny, y0, ystep, np, xp, yp, nnd, nnwhich, huge); } } #undef FNAME #undef DIST #undef WHICH /* nnGdw returns distances and indices */ #define FNAME nnGdw #define DIST #define WHICH #include "nngrid.h" #undef FNAME #undef DIST #undef WHICH /* nnGd returns distances only */ #define FNAME nnGd #define DIST #include "nngrid.h" #undef FNAME #undef DIST #undef WHICH /* nnGw returns indices only */ #define FNAME nnGw #define WHICH #include "nngrid.h" #undef FNAME #undef DIST #undef WHICH spatstat/src/PerfectStrauss.h0000644000176200001440000002146013406057617016046 0ustar liggesusers // ........................... Strauss process .......................... // $Revision: 1.4 $ $Date: 2014/02/18 10:43:00 $ class StraussProcess : public PointProcess { public: double beta, gamma, R, Rsquared; StraussProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double Ri); ~StraussProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); // void CalcBeta(long int xsidepomm, long int ysidepomm, // double *betapomm); // void CheckBeta(long int xsidepomm, long int ysidepomm, // double *betapomm); // double lnCondInt(struct Point2 *TempCell, Point2Pattern *p2p); // void Beta(struct Point2 *TempCell); // void CalcBeta(Point2Pattern *p2p); }; StraussProcess::StraussProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double Ri) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; gamma = g; R = Ri; Rsquared = R * R; InteractionRange = R; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double StraussProcess::Interaction(double dsquared) { double rtn; rtn = 1; if(dsquared < Rsquared) rtn = gamma; return(rtn); } void StraussProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void StraussProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating StraussProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating StraussProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating StraussProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } //void StraussProcess::CalcBeta(long int xsidepomm, long int ysidepomm, // double *betapomm){ // long int i,j,k; // k=0; // // Rprintf("\ndiagnostic message: Strauss CalcBeta... %ld %ld\n",xsidepomm,ysidepomm); // for(i=0; ibeta; // k++; // } // } //} //void StraussProcess::CheckBeta(long int xsidepomm, long int ysidepomm, // double *betapomm){ // long int i,j,k; // // double d1; // k=0; // // Rprintf("\ndiagnostic message: Strauss CalcBeta... %ld %ld\n",xsidepomm,ysidepomm); // for(i=0; i0.001) && (k==0)){ // Rprintf("%f %f %f %ld %ld\n",fabs(*(betapomm + i*ysidepomm + j)- beta), // *(betapomm + i*ysidepomm + j),beta,i,j); // k++; // // scanf("%lf",&d1); // } // } // } //} //double StraussProcess::lnCondInt(struct Point2 *TempCell, // Point2Pattern *p2p){ // double f1; // long int xco,yco,xc,yc,fx,tx,fy,ty,ry,rx,k; // double dy,dx, lnCI,dst2; // struct Point2 *TempCell2; // // f1 = (TempCell->X-p2p->Xmin)/p2p->XCellDim; xc = int(f1); // CLAMP(xc, 0, p2p->MaxXCell, "xc"); // f1 = (TempCell->Y-p2p->Ymin)/p2p->YCellDim; yc = int(f1); // CLAMP(yc, 0, p2p->MaxYCell, "yc"); // // dx = (Xmax-Xmin)/(double(p2p->MaxXCell+1)); // dy = (Ymax-Ymin)/(double(p2p->MaxYCell+1)); // rx = int(this->InteractionRange/dx+1.0); // ry = int(this->InteractionRange/dy+1.0); // // lnCI = log(TempCell->Beta); // // k = 0; // // if((xc+rx)<=p2p->MaxXCell) tx=xc+rx; else tx = p2p->MaxXCell; // if((yc+ry)<=p2p->MaxYCell) ty=yc+ry; else ty = p2p->MaxYCell; // if((xc-rx)>=0) fx=xc-rx; else fx = 0; // if((yc-ry)>=0) fy=yc-ry; else fy = 0; // // //Rprintf("MCI! %d %d %d %d\n",fx,tx,fy,ty); // // for(xco = fx; xco <= tx; xco++){ // for(yco = fy; yco <= ty; yco++){ // CHECK(p2p->headCell[xco][yco], // "internal error: p2p->headCell[xco][yco] is null in lnCondInt()"); // TempCell2 = p2p->headCell[xco][yco]->next; // CHECK(TempCell2, "internal error: TempCell2 is null in lnCondInt()"); // while(TempCell2!=TempCell2->next){ // if(TempCell2 != TempCell){ // k++; // dst2 = pow(TempCell->X-TempCell2->X,2)+ // pow(TempCell->Y-TempCell2->Y,2); // lnCI += log(Interaction(dst2)); // } // TempCell2 = TempCell2->next; // CHECK(TempCell2, // "internal error: TempCell2 is null in lnCondInt() loop"); // } // } // } // return(lnCI); //} //void StraussProcess::Beta(struct Point2 *TempCell){ // TempCell->Beta = beta; //} //void StraussProcess::CalcBeta(Point2Pattern *p2p){ // long int xco,yco; // // double dy,dx; // struct Point2 *TempMother; // // for(xco = 0; xco <= p2p->MaxXCell; xco++){ // for(yco = 0; yco <= p2p->MaxYCell; yco++){ // CHECK(p2p->headCell[xco][yco], // "internal error: p2p->headCell[xco][yco] is null in CalcBeta()"); // TempMother = p2p->headCell[xco][yco]->next; // CHECK(TempMother, "internal error: TempMother is null in CalcBeta()"); // while(TempMother!=TempMother->next){ // TempMother->Beta = this->beta; // TempMother = TempMother->next; // CHECK(TempMother, // "internal error: TempMother is null in CalcBeta() loop"); // } // } // } //} // ........................... Interface to R .......................... extern "C" { SEXP PerfectStrauss(SEXP beta, SEXP gamma, SEXP r, SEXP xrange, SEXP yrange) { // input parameters double Beta, Gamma, R, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int EndTime, StartTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; SEXP stout, etout; int *ss, *ee; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(gamma = AS_NUMERIC(gamma)); PROTECT(r = AS_NUMERIC(r)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 5 protected objects // extract arguments Beta = *(NUMERIC_POINTER(beta)); Gamma = *(NUMERIC_POINTER(gamma)); R = *(NUMERIC_POINTER(r)); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ R); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ R); if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1; #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise Strauss point process StraussProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax, Beta, Gamma, R); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); PROTECT(stout = NEW_INTEGER(1)); PROTECT(etout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); ss = INTEGER_POINTER(stout); ee = INTEGER_POINTER(etout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); *ss = StartTime; *ee = EndTime; // pack up into output list PROTECT(out = NEW_LIST(5)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); SET_VECTOR_ELT(out, 3, stout); SET_VECTOR_ELT(out, 4, etout); // return UNPROTECT(11); // 5 arguments plus xout, yout, nout, stout, etout, out return(out); } } spatstat/src/distances.c0000755000176200001440000002203713406057617015045 0ustar liggesusers/* distances.c Distances between pairs of points $Revision: 1.32 $ $Date: 2018/12/18 02:43:11 $ Cpairdist Pairwise distances Cpair2dist Pairwise distances squared CpairPdist Pairwise distances with periodic correction CpairP2dist Pairwise distances squared, with periodic correction Ccrossdist Pairwise distances for two sets of points Ccross2dist Pairwise distances squared, for two sets of points CcrossPdist Pairwise distances for two sets of points, periodic correction Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include #include "chunkloop.h" double sqrt(); void Cpairdist(n, x, y, squared, d) /* inputs */ int *n; double *x, *y; int *squared; /* output */ double *d; { void Cpair1dist(), Cpair2dist(); if(*squared == 0) { Cpair1dist(n, x, y, d); } else { Cpair2dist(n, x, y, d); } } void Cpair1dist(n, x, y, d) /* inputs */ int *n; double *x, *y; /* output */ double *d; { int i, j, npoints, maxchunk; double *dp; double xi, yi, dx, dy, dist; npoints = *n; /* set d[0,0] = 0 */ *d = 0.0; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { xi = x[i]; yi = y[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dist = sqrt( dx * dx + dy * dy ); /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } } /* squared distances */ void Cpair2dist(n, x, y, d) /* inputs */ int *n; double *x, *y; /* output */ double *d; { int i, j, npoints, maxchunk; double *dp; double xi, yi, dx, dy, dist; npoints = *n; /* set d[0,0] = 0 */ *d = 0.0; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { xi = x[i]; yi = y[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dist = dx * dx + dy * dy; /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } } void Ccrossdist(nfrom, xfrom, yfrom, nto, xto, yto, squared, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *xto, *yto; int *squared; /* output */ double *d; { void Ccross1dist(), Ccross2dist(); if(*squared == 0) { Ccross1dist(nfrom, xfrom, yfrom, nto, xto, yto, d); } else { Ccross2dist(nfrom, xfrom, yfrom, nto, xto, yto, d); } } void Ccross1dist(nfrom, xfrom, yfrom, nto, xto, yto, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *xto, *yto; /* output */ double *d; { int i, j, nf, nt, maxchunk; double *dptr; double xj, yj, dx, dy; nf = *nfrom; nt = *nto; dptr = d; OUTERCHUNKLOOP(j, nt, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nt, maxchunk, 16384) { xj = xto[j]; yj = yto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; *dptr = sqrt( dx * dx + dy * dy ); } } } } /* squared distances */ void Ccross2dist(nfrom, xfrom, yfrom, nto, xto, yto, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *xto, *yto; /* output */ double *d; { int i, j, nf, nt, maxchunk; double *dptr; double xj, yj, dx, dy; nf = *nfrom; nt = *nto; dptr = d; OUTERCHUNKLOOP(j, nt, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nt, maxchunk, 16384) { xj = xto[j]; yj = yto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; *dptr = dx * dx + dy * dy; } } } } /* distances with periodic correction */ void CpairPdist(n, x, y, xwidth, yheight, squared, d) /* inputs */ int *n; double *x, *y, *xwidth, *yheight; int *squared; /* output */ double *d; { void CpairP1dist(), CpairP2dist(); if(*squared == 0) { CpairP1dist(n, x, y, xwidth, yheight, d); } else { CpairP2dist(n, x, y, xwidth, yheight, d); } } void CpairP1dist(n, x, y, xwidth, yheight, d) /* inputs */ int *n; double *x, *y, *xwidth, *yheight; /* output */ double *d; { int i, j, npoints, maxchunk; double *dp; double xi, yi, dx, dy, dx2, dy2, dx2p, dy2p, dist, wide, high; npoints = *n; wide = *xwidth; high = *yheight; /* set d[0,0] = 0 */ *d = 0.0; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { xi = x[i]; yi = y[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dx2p = dx * dx; dy2p = dy * dy; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; dist = sqrt( dx2p + dy2p ); /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } } /* same function without the sqrt */ void CpairP2dist(n, x, y, xwidth, yheight, d) /* inputs */ int *n; double *x, *y, *xwidth, *yheight; /* output */ double *d; { int i, j, npoints, maxchunk; double *dp; double xi, yi, dx, dy, dx2, dy2, dx2p, dy2p, dist, wide, high; npoints = *n; wide = *xwidth; high = *yheight; /* set d[0,0] = 0 */ *d = 0.0; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { xi = x[i]; yi = y[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dx2p = dx * dx; dy2p = dy * dy; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; dist = dx2p + dy2p; /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } } void CcrossPdist(nfrom, xfrom, yfrom, nto, xto, yto, xwidth, yheight, squared, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *xto, *yto, *xwidth, *yheight; int *squared; /* output */ double *d; { void CcrossP1dist(), CcrossP2dist(); if(*squared == 0) { CcrossP1dist(nfrom, xfrom, yfrom, nto, xto, yto, xwidth, yheight, d); } else { CcrossP2dist(nfrom, xfrom, yfrom, nto, xto, yto, xwidth, yheight, d); } } void CcrossP1dist(nfrom, xfrom, yfrom, nto, xto, yto, xwidth, yheight, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *xto, *yto, *xwidth, *yheight; /* output */ double *d; { int i, j, nf, nt, maxchunk; double *dptr; double xj, yj, dx, dy, dx2, dy2, dx2p, dy2p, wide, high; nf = *nfrom; nt = *nto; wide = *xwidth; high = *yheight; dptr = d; OUTERCHUNKLOOP(j, nt, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nt, maxchunk, 16384) { xj = xto[j]; yj = yto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; dx2p = dx * dx; dy2p = dy * dy; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; *dptr = sqrt( dx2p + dy2p ); } } } } void CcrossP2dist(nfrom, xfrom, yfrom, nto, xto, yto, xwidth, yheight, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *xto, *yto, *xwidth, *yheight; /* output */ double *d; { int i, j, nf, nt, maxchunk; double *dptr; double xj, yj, dx, dy, dx2, dy2, dx2p, dy2p, wide, high; nf = *nfrom; nt = *nto; wide = *xwidth; high = *yheight; dptr = d; OUTERCHUNKLOOP(j, nt, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nt, maxchunk, 16384) { xj = xto[j]; yj = yto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; dx2p = dx * dx; dy2p = dy * dy; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; *dptr = dx2p + dy2p; } } } } spatstat/src/linnndist.c0000755000176200001440000001204513406057617015070 0ustar liggesusers#include /* linnndist.c Shortest-path distances between nearest neighbours in linear network $Revision: 1.2 $ $Date: 2018/12/18 02:43:11 $ linnndist linnnwhich Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define DPATH(I,J) dpath[(J) + Nv * (I)] #define ANSWER(I,J) answer[(J) + Np * (I)] #define EUCLID(X,Y,U,V) sqrt(pow((X)-(U),2)+pow((Y)-(V),2)) void linnndist(np, xp, yp, /* data points */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ segmap, /* map from data points to segments */ huge, /* value taken as infinity */ /* OUTPUT */ answer /* nearest neighbour distance for each point */ ) int *np, *nv, *ns; int *from, *to, *segmap; /* integer vectors (mappings) */ double *xp, *yp, *xv, *yv; /* vectors of coordinates */ double *huge; double *dpath; /* matrix */ double *answer; /* vector of output values */ { int Np, Nv, i, j, Np1; int segi, segj, nbi1, nbi2, nbj1, nbj2; double d, xpi, ypi, xpj, ypj, dXi1, dXi2, d1Xj, d2Xj, d11, d12, d21, d22; double dmin, hugevalue; Np = *np; Nv = *nv; Np1 = Np - 1; hugevalue = *huge; /* initialise nn distances */ for(i = 0; i < Np; i++) answer[i] = hugevalue; /* main loop */ for(i = 0; i < Np1; i++) { xpi = xp[i]; ypi = yp[i]; segi = segmap[i]; nbi1 = from[segi]; nbi2 = to[segi]; dXi1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]); dXi2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]); dmin = answer[i]; for(j = i+1; j < Np; j++) { xpj = xp[j]; ypj = yp[j]; segj = segmap[j]; /* compute path distance between i and j */ if(segi == segj) { /* points i and j lie on the same segment; use Euclidean distance */ d = sqrt(pow(xpi - xpj, 2) + pow(ypi - ypj, 2)); } else { /* Shortest path from i to j passes through ends of segments; Calculate shortest of 4 possible paths from i to j */ nbj1 = from[segj]; nbj2 = to[segj]; d1Xj = EUCLID(xv[nbj1], yv[nbj1], xpj, ypj); d2Xj = EUCLID(xv[nbj2], yv[nbj2], xpj, ypj); d11 = dXi1 + DPATH(nbi1,nbj1) + d1Xj; d12 = dXi1 + DPATH(nbi1,nbj2) + d2Xj; d21 = dXi2 + DPATH(nbi2,nbj1) + d1Xj; d22 = dXi2 + DPATH(nbi2,nbj2) + d2Xj; d = d11; if(d12 < d) d = d12; if(d21 < d) d = d21; if(d22 < d) d = d22; } /* OK, distance between i and j is d */ /* update nn distance for point i */ if(d < dmin) dmin = d; /* update nn distance for point j */ if(d < answer[j]) answer[j] = d; } /* commit nn distance for point i */ answer[i] = dmin; } } void linnnwhich(np, xp, yp, /* data points */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ segmap, /* map from data points to segments */ huge, /* value taken as infinity */ /* OUTPUT */ nndist, /* nearest neighbour distance for each point */ nnwhich /* identifies nearest neighbour */ ) int *np, *nv, *ns; int *from, *to, *segmap; /* integer vectors (mappings) */ double *xp, *yp, *xv, *yv; /* vectors of coordinates */ double *huge; double *dpath; /* matrix */ double *nndist; /* vector of output values */ int *nnwhich; /* vector of output values */ { int Np, Nv, i, j, Np1; int segi, segj, nbi1, nbi2, nbj1, nbj2; double d, xpi, ypi, xpj, ypj, dXi1, dXi2, d1Xj, d2Xj, d11, d12, d21, d22; double dmin, hugevalue; int whichmin; Np = *np; Nv = *nv; Np1 = Np - 1; hugevalue = *huge; /* initialise nn distances and identifiers */ for(i = 0; i < Np; i++) { nndist[i] = hugevalue; nnwhich[i] = -1; } /* main loop */ for(i = 0; i < Np1; i++) { xpi = xp[i]; ypi = yp[i]; segi = segmap[i]; nbi1 = from[segi]; nbi2 = to[segi]; dXi1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]); dXi2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]); dmin = nndist[i]; whichmin = nnwhich[i]; for(j = i+1; j < Np; j++) { xpj = xp[j]; ypj = yp[j]; segj = segmap[j]; if(segi == segj) { /* points i and j lie on the same segment; use Euclidean distance */ d = sqrt(pow(xpi - xpj, 2) + pow(ypi - ypj, 2)); } else { /* Shortest path from i to j passes through ends of segments; Calculate shortest of 4 possible paths from i to j */ nbj1 = from[segj]; nbj2 = to[segj]; d1Xj = EUCLID(xv[nbj1], yv[nbj1], xpj, ypj); d2Xj = EUCLID(xv[nbj2], yv[nbj2], xpj, ypj); d11 = dXi1 + DPATH(nbi1,nbj1) + d1Xj; d12 = dXi1 + DPATH(nbi1,nbj2) + d2Xj; d21 = dXi2 + DPATH(nbi2,nbj1) + d1Xj; d22 = dXi2 + DPATH(nbi2,nbj2) + d2Xj; d = d11; if(d12 < d) d = d12; if(d21 < d) d = d21; if(d22 < d) d = d22; } /* OK, distance between i and j is d */ /* update nn for point i */ if(d < dmin) { dmin = d; whichmin = j; } /* update nn for point j */ if(d < nndist[j]) { nndist[j] = d; nnwhich[j] = i; } } /* commit nn for point i */ nndist[i] = dmin; nnwhich[i] = whichmin; } } spatstat/src/seg2pix.h0000644000176200001440000001022413406057617014446 0ustar liggesusers/* seg2pix.h Code template for seg2pix.c $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Macros: FNAME name of function SUMUP #defined if crossings should be counted (weights summed) V matrix index macro (in seg2pix.c) DEBUG debug if #defined */ #undef INCREMENT #undef ZERO #ifdef SUMUP #define ZERO (double) 0.0 #define INCREMENT(I,J) V(I,J) += wi #else #define ZERO 0 #define INCREMENT(I,J) V(I,J) = 1 #endif void FNAME(ns,x0,y0,x1,y1, #ifdef SUMUP w, #endif nx,ny,out) int *ns; /* number of segments */ double *x0,*y0,*x1,*y1; /* coordinates of segment endpoints */ int *nx, *ny; /* dimensions of pixel array (columns, rows) */ #ifdef SUMUP double *w; /* weights attached to segments */ double *out; /* output totals */ #else int *out; /* output indicators */ #endif { int Ns, Nx, Ny, i, j, k, m, m0, m1, mmin, mmax, maxchunk; double x0i, x1i, y0i, y1i, dx, dy; double leni; double xleft, yleft, xright, yright, slope; double xstart, ystart, xfinish, yfinish; int mleft, mright, kstart, kfinish, kmin, kmax; #ifdef SUMUP double wi; #endif Ns = *ns; Nx = *nx; Ny = *ny; for(k = 0; k < Ny - 1; k++) for(j = 0; j < Nx - 1; j++) V(k, j) = ZERO; OUTERCHUNKLOOP(i, Ns, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Ns, maxchunk, 8196) { x0i = x0[i]; y0i = y0[i]; x1i = x1[i]; y1i = y1[i]; #ifdef SUMUP wi = w[i]; #endif dx = x1i - x0i; dy = y1i - y0i; leni = hypot(dx, dy); #ifdef DEBUG Rprintf("(%lf, %lf) to (%lf, %lf)\n", x0i, y0i, x1i, y1i); #endif if(leni < 0.001) { /* tiny segment */ #ifdef DEBUG Rprintf("tiny\n"); #endif k = clamp((int) floor(x0i), 0, Nx-1); j = clamp((int) floor(y0i), 0, Ny-1); INCREMENT(j, k); } else if(floor(x1i) == floor(x0i) && floor(y1i) == floor(y0i)) { /* contained in one cell */ #ifdef DEBUG Rprintf("contained in one cell\n"); #endif k = clamp((int) floor(x0i), 0, Nx-1); j = clamp((int) floor(y0i), 0, Ny-1); INCREMENT(j, k); } else if(floor(y1i) == floor(y0i)) { /* horizontal */ #ifdef DEBUG Rprintf("horizontal\n"); #endif j = clamp((int) floor(y1i), 0, Ny-1); m0 = clamp((int) floor(x0i), 0, Nx-1); m1 = clamp((int) floor(x1i), 0, Nx-1); mmin = (m0 < m1) ? m0: m1; mmax = (m0 < m1) ? m1: m0; #ifdef DEBUG Rprintf("row %d: columns [%d, %d]\n", j, mmin, mmax); #endif for(k = mmin; k <= mmax; k++) INCREMENT(j,k); } else if(floor(x1i) == floor(x0i)) { /* vertical */ #ifdef DEBUG Rprintf("vertical\n"); #endif k = clamp((int) floor(x1i), 0, Nx-1); m0 = clamp((int) floor(y0i), 0, Ny-1); m1 = clamp((int) floor(y1i), 0, Ny-1); mmin = (m0 < m1) ? m0: m1; mmax = (m0 < m1) ? m1: m0; #ifdef DEBUG Rprintf("column %d: rows [%d, %d]\n", k, mmin, mmax); #endif for(j = mmin; j <= mmax; j++) INCREMENT(j,k); } else { /* general case */ #ifdef DEBUG Rprintf("general\n"); #endif if(x1i > x0i) { xleft = x0i; yleft = y0i; xright = x1i; yright = y1i; } else { xleft = x1i; yleft = y1i; xright = x0i; yright = y0i; } slope = (yright - yleft)/(xright - xleft); mleft = clamp((int) floor(xleft), 0, Nx-1); mright = clamp((int) floor(xright), 0, Nx-1); #ifdef DEBUG Rprintf("column range [%d, %d]\n", mleft, mright); #endif /* treat each vertical slice */ for(m = mleft; m <= mright; m++) { if(m == mleft) { xstart = xleft; ystart = yleft; } else { xstart = m; ystart = yleft + slope * (xstart - xleft); } if(m == mright) { xfinish = xright; yfinish = yright; } else { xfinish = m+1; yfinish = yleft + slope * (xfinish - xleft); } kstart = clamp((int) floor(ystart), 0, Ny-1); kfinish = clamp((int) floor(yfinish), 0, Ny-1); kmin = (kstart < kfinish) ? kstart : kfinish; kmax = (kstart < kfinish) ? kfinish : kstart; #ifdef DEBUG Rprintf("column %d: rows [%d, %d]\n", m, kmin, kmax); #endif for(k = kmin; k <= kmax; k++) INCREMENT(k, m); } } /* end of if-else */ } } #ifdef DEBUG Rprintf("done\n"); #endif } spatstat/src/densptcross.c0000644000176200001440000002243413406057617015435 0ustar liggesusers#include #include #include "chunkloop.h" #include "crossloop.h" #include "constants.h" /* densptcross.c $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Assumes point patterns are sorted in increasing order of x coordinate *crdenspt Density estimate at points *crsmoopt Smoothed mark values at points Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define TWOPI M_2PI double sqrt(), exp(); #define STD_DECLARATIONS \ int i, j, n1, n2, maxchunk, jleft; \ double x1i, y1i, xleft, dx, dy, d2, rmax, r2max; \ double *x1, *y1, *x2, *y2; #define STD_INITIALISE \ n1 = *nquery; \ x1 = xq; y1 = yq; \ n2 = *ndata; \ x2 = xd; y2 = yd; \ rmax = *rmaxi; \ r2max = rmax * rmax /* ----------------- density estimation -------------------- */ void crdenspt(nquery, xq, yq, ndata, xd, yd, rmaxi, sig, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *rmaxi; /* maximum distance at which points contribute */ double *sig; /* Gaussian sd */ /* output */ double *result; /* vector of computed density values */ { STD_DECLARATIONS; double resulti, coef; double sigma, twosig2; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; coef = 1.0/(TWOPI * sigma * sigma); if(n1 == 0 || n2 == 0) return; CROSSLOOP( { resulti = 0.0; }, { resulti += exp(-d2/twosig2); } , { result[i] = coef * resulti; }) } void wtcrdenspt(nquery, xq, yq, ndata, xd, yd, wd, rmaxi, sig, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *wd; /* weights of data points */ double *rmaxi; /* maximum distance at which points contribute */ double *sig; /* Gaussian sd */ /* output */ double *result; /* vector of computed density values */ { STD_DECLARATIONS; double resulti, coef; double sigma, twosig2; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; coef = 1.0/(TWOPI * sigma * sigma); if(n1 == 0 || n2 == 0) return; CROSSLOOP( { resulti = 0.0; }, { resulti += wd[j] * exp(-d2/twosig2); }, { result[i] = coef * resulti; } ) } /* ------------- anisotropic versions -------------------- */ void acrdenspt(nquery, xq, yq, ndata, xd, yd, rmaxi, detsigma, sinv, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *rmaxi; /* maximum distance at which points contribute */ double *detsigma; /* determinant of variance matrix */ double *sinv; /* inverse variance matrix (2x2, flattened) */ /* output */ double *result; /* vector of computed density values */ { STD_DECLARATIONS; double resulti, coef; double detsig, s11, s12, s21, s22; STD_INITIALISE; detsig = *detsigma; coef = 1.0/(TWOPI * sqrt(detsig)); s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n1 == 0 || n2 == 0) return; CROSSLOOP( { resulti = 0.0; }, { resulti += exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); }, { result[i] = coef * resulti; }) } void awtcrdenspt(nquery, xq, yq, ndata, xd, yd, wd, rmaxi, detsigma, sinv, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *wd; /* weights of data points */ double *rmaxi; /* maximum distance at which points contribute */ double *detsigma; /* determinant of variance matrix */ double *sinv; /* inverse variance matrix (2x2, flattened) */ /* output */ double *result; /* vector of weighted density values */ { STD_DECLARATIONS; double resulti, coef; double detsig, s11, s12, s21, s22; STD_INITIALISE; detsig = *detsigma; coef = 1.0/(TWOPI * sqrt(detsig)); s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n1 == 0 || n2 == 0) return; CROSSLOOP( { resulti = 0.0; }, { resulti += wd[j] * \ exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); }, { result[i] = coef * resulti; }) } /* --------------- smoothing --------------------------- */ void crsmoopt(nquery, xq, yq, ndata, xd, yd, vd, rmaxi, sig, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *vd; /* mark values at data points */ double *rmaxi; /* maximum distance at which points contribute */ double *sig; /* Gaussian sd */ /* output */ double *result; /* vector of computed smoothed values */ { STD_DECLARATIONS; double sigma, twosig2; double numer, denom, wij; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; if(n1 == 0 || n2 == 0) return; CROSSLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2/twosig2); \ denom += wij; \ numer += wij * vd[j]; \ }, { \ result[i] = numer/denom; \ }) } void wtcrsmoopt(nquery, xq, yq, ndata, xd, yd, vd, wd, rmaxi, sig, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *vd; /* mark values at data points */ double *wd; /* weights of data points */ double *rmaxi; /* maximum distance */ double *sig; /* Gaussian sd */ /* output */ double *result; /* vector of computed smoothed values */ { STD_DECLARATIONS; double sigma, twosig2; double numer, denom, wij; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; if(n1 == 0 || n2 == 0) return; CROSSLOOP({ numer = denom = 0.0; }, { \ wij = wd[j] * exp(-d2/twosig2); \ denom += wij; \ numer += wij * vd[j]; \ }, { \ result[i] = numer/denom; \ }) } /* ------------- anisotropic versions -------------------- */ void acrsmoopt(nquery, xq, yq, ndata, xd, yd, vd, rmaxi, sinv, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *vd; /* mark values at data points */ double *rmaxi; /* maximum distance at which points contribute */ double *sinv; /* inverse variance matrix (2x2, flattened) */ /* output */ double *result; /* vector of smoothed values */ { STD_DECLARATIONS; double s11, s12, s21, s22; double numer, denom, wij; STD_INITIALISE; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n1 == 0 || n2 == 0) return; CROSSLOOP({ numer = denom = 0.0; }, { \ wij = exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * vd[j]; \ }, { \ result[i] = numer/denom; \ }) } void awtcrsmoopt(nquery, xq, yq, ndata, xd, yd, vd, wd, rmaxi, sinv, result) /* inputs */ int *nquery; /* number of locations to be interrogated */ double *xq, *yq; /* (x,y) coordinates to be interrogated */ int *ndata; /* number of data points */ double *xd, *yd; /* (x,y) coordinates of data */ double *vd; /* mark values at data points */ double *wd; /* weights of data points */ double *rmaxi; /* maximum distance at which points contribute */ double *sinv; /* inverse variance matrix (2x2, flattened) */ /* output */ double *result; /* vector of smoothed values */ { STD_DECLARATIONS; double s11, s12, s21, s22; double numer, denom, wij; STD_INITIALISE; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n1 == 0 || n2 == 0) return; CROSSLOOP({ numer = denom = 0.0; }, { \ wij = wd[j] * exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * vd[j]; \ }, { \ result[i] = numer/denom; \ }) } spatstat/src/k3.c0000755000176200001440000000716313406057617013410 0ustar liggesusers#include #include #include "geom3.h" #include "functable.h" /* $Revision: 1.1 $ $Date: 2009/11/04 23:54:15 $ K function of 3D point pattern k3trans translation correction k3isot isotropic correction # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009. # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ void k3trans(p, n, b, k) Point *p; int n; Box *b; Ftable *k; { register int i, j, l, lmin; register double dx, dy, dz, dist; register double vx, vy, vz; Point *ip, *jp; double dt, vol, lambda, denom, term; double sphesfrac(), sphevol(); /* compute denominator & initialise numerator*/ vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0); lambda = ((double) n )/ vol; denom = lambda * lambda; for(l = 0; l < k->n; l++) { (k->denom)[l] = denom; (k->num)[l] = 0.0; } /* spacing of argument in result vector k */ dt = (k->t1 - k->t0)/(k->n - 1); /* compute numerator */ for( i = 0; i < n; i++) { ip = p + i; for(j = i + 1; j < n; j++) { jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist = sqrt(dx * dx + dy * dy + dz * dz); lmin = ceil( (dist - k->t0) / dt ); if(lmin < 0) lmin = 0; vx = b->x1 - b->x0 - (dx > 0 ? dx : -dx); vy = b->y1 - b->y0 - (dy > 0 ? dy : -dy); vz = b->z1 - b->z0 - (dz > 0 ? dz : -dz); if(vx >= 0.0 && vy >= 0.0 && vz >= 0.0) { term = 2.0 /(vx * vy * vz); /* 2 because they're ordered pairs */ for(l = lmin; l < k->n; l++) (k->num)[l] += term; } } } /* compute ratio */ for(l = 0; l < k->n; l++) (k->f)[l] = ((k->denom)[l] > 0.0)? (k->num)[l] / (k->denom)[l] : 0.0; } void k3isot(p, n, b, k) Point *p; int n; Box *b; Ftable *k; { register int i, j, l, lmin; register double dx, dy, dz, dist; Point *ip, *jp; double dt, vol, denom, term; double sphesfrac(), sphevol(); Point vertex; Box half; /* compute denominator & initialise numerator*/ vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0); denom = ((double) (n * n))/vol; for(l = 0; l < k->n; l++) { (k->denom)[l] = denom; (k->num)[l] = 0.0; } /* spacing of argument in result vector k */ dt = (k->t1 - k->t0)/(k->n - 1); /* set up for volume correction */ vertex.x = b->x0; vertex.y = b->y0; vertex.z = b->z0; half.x1 = b->x1; half.y1 = b->y1; half.z1 = b->z1; half.x0 = (b->x0 + b->x1)/2.0; half.y0 = (b->y0 + b->y1)/2.0; half.z0 = (b->z0 + b->z1)/2.0; /* compute numerator */ for( i = 0; i < n; i++) { ip = p + i; for(j = i + 1; j < n; j++) { jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist = sqrt(dx * dx + dy * dy + dz * dz); lmin = ceil( (dist - k->t0) / dt ); if(lmin < 0) lmin = 0; term = (1.0 / sphesfrac(ip, b, dist)) + (1.0 / sphesfrac(jp, b, dist)); term *= 1.0 - 8.0 * sphevol(&vertex, &half, dist) / vol; for(l = lmin; l < k->n; l++) (k->num)[l] += term; } } /* compute ratio */ for(l = 0; l < k->n; l++) (k->f)[l] = ((k->denom)[l] > 0.0)? (k->num)[l] / (k->denom)[l] : 0.0; } spatstat/src/trigraf.c0000755000176200001440000007014113406057617014525 0ustar liggesusers/* trigraf.c Form list of all triangles in a planar graph, given list of edges $Revision: 1.15 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Form list of all triangles in a planar graph, given list of edges Note: vertex indices ie, je are indices in R. They are handled without converting to C convention, because we only need to test equality and ordering. (*except in 'trioxgraph'*) Called by .C: ------------- trigraf() Generic C implementation with fixed storage limit usable with Delaunay triangulation trigrafS() Faster version when input data are sorted (again with predetermined storage limit) suited for handling Delaunay triangulation Called by .Call: --------------- trigraph() Version with dynamic storage allocation triograph() Faster version assuming 'iedge' is sorted in increasing order trioxgraph() Even faster version for use with quadrature schemes Diameters: ----------- triDgraph() Also computes diameters of triangles */ #include #include #include #include "chunkloop.h" #undef DEBUGTRI void trigraf(nv, ne, ie, je, ntmax, nt, it, jt, kt, status) /* inputs */ int *nv; /* number of graph vertices */ int *ne; /* number of edges */ int *ie, *je; /* vectors of indices of ends of each edge */ int *ntmax; /* length of storage space for triangles */ /* output */ int *nt; /* number of triangles (<= *ntmax) */ int *it, *jt, *kt; /* vectors of indices of vertices of triangles */ int *status; /* 0 if OK, 1 if overflow */ { int Nv, Ne, Ntmax; int Nt, Nj, m, i, j, k, mj, mk, maxchunk; int *jj; Nv = *nv; Ne = *ne; Ntmax = *ntmax; /* initialise scratch storage */ jj = (int *) R_alloc(Ne, sizeof(int)); Nt = 0; /* vertex index i ranges from 1 to Nv */ XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { R_CheckUserInterrupt(); XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { /* Find triangles involving vertex 'i' in which 'i' is the lowest-numbered vertex */ /* First, find vertices j > i connected to i */ Nj = 0; for(m = 0; m < Ne; m++) { if(ie[m] == i) { j = je[m]; if(j > i) { jj[Nj] = j; Nj++; } } else if(je[m] == i) { j = ie[m]; if(j > i) { jj[Nj] = j; Nj++; } } } /* Determine which pairs of vertices j, k are joined by an edge; save triangles (i,j,k) */ if(Nj > 1) { /* Sort jj in ascending order */ for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(k < j) { /* swap */ jj[mk] = j; jj[mj] = k; j = k; } } } for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(j != k) { /* Run through edges to determine whether j, k are neighbours */ for(m = 0; m < Ne; m++) { if((ie[m] == j && je[m] == k) || (ie[m] == k && je[m] == j)) { /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - exit */ *status = 1; return; } it[Nt] = i; jt[Nt] = j; kt[Nt] = k; Nt++; } } } } } } } } *nt = Nt; *status = 0; } /* faster version of trigraf() assuming that ie[m] < je[m] ie[] is in ascending order je[] is in ascending order within ie[], that is, je[ie[]=i] is in ascending order for each fixed i */ void trigrafS(nv, ne, ie, je, ntmax, nt, it, jt, kt, status) /* inputs */ int *nv; /* number of graph vertices */ int *ne; /* number of edges */ int *ie, *je; /* vectors of indices of ends of each edge */ int *ntmax; /* length of storage space for triangles */ /* output */ int *nt; /* number of triangles */ int *it, *jt, *kt; /* vectors of indices of vertices of triangles */ int *status; /* 0 if OK, 1 if overflow */ { int Ne, Nt, Ntmax; int m, i, j, k, mj, mk; int firstedge, lastedge; Ne = *ne; Ntmax = *ntmax; /* nv is not used, but retained for harmony with trigraf */ /* Avoid compiler warnings */ Nt = *nv; /* initialise output */ Nt = 0; lastedge = -1; while(lastedge + 1 < Ne) { if(lastedge % 256 == 0) R_CheckUserInterrupt(); /* Consider next vertex i. The edges (i,j) with i < j appear contiguously in the edge list. */ firstedge = lastedge + 1; i = ie[firstedge]; for(m= firstedge+1; m < Ne && ie[m] == i; m++) ; lastedge = m-1; /* Consider each pair j, k of neighbours of i, where i < j < k. Scan entire edge list to determine whether j, k are joined by an edge. If so, save triangle (i,j,k) */ if(lastedge > firstedge) { for(mj = firstedge; mj < lastedge; mj++) { j = je[mj]; for(mk = firstedge+1; mk <= lastedge; mk++) { k = je[mk]; /* Run through edges to determine whether j, k are neighbours */ for(m = 0; m < Ne && ie[m] < j; m++) ; while(m < Ne && ie[m] == j) { if(je[m] == k) { /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - exit */ *status = 1; return; } it[Nt] = i; jt[Nt] = j; kt[Nt] = k; Nt++; } m++; } } } } } *nt = Nt; *status = 0; } /* ------------------- callable by .Call ------------------------- */ SEXP trigraph(SEXP nv, /* number of vertices */ SEXP iedge, /* vectors of indices of ends of each edge */ SEXP jedge) /* all arguments are integer */ { int Nv, Ne; int *ie, *je; /* edges */ int *it, *jt, *kt; /* vectors of indices of vertices of triangles */ int Nt, Ntmax; /* number of triangles */ int Nj; int *jj; /* scratch storage */ int i, j, k, m, mj, mk, Nmore, maxchunk; /* output */ SEXP iTout, jTout, kTout, out; int *ito, *jto, *kto; /* =================== Protect R objects from garbage collector ======= */ PROTECT(nv = AS_INTEGER(nv)); PROTECT(iedge = AS_INTEGER(iedge)); PROTECT(jedge = AS_INTEGER(jedge)); /* That's 3 protected objects */ /* numbers of vertices and edges */ Nv = *(INTEGER_POINTER(nv)); Ne = LENGTH(iedge); /* input arrays */ ie = INTEGER_POINTER(iedge); je = INTEGER_POINTER(jedge); /* initialise storage (with a guess at max size) */ Ntmax = 3 * Ne; it = (int *) R_alloc(Ntmax, sizeof(int)); jt = (int *) R_alloc(Ntmax, sizeof(int)); kt = (int *) R_alloc(Ntmax, sizeof(int)); Nt = 0; /* initialise scratch storage */ jj = (int *) R_alloc(Ne, sizeof(int)); XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { R_CheckUserInterrupt(); XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { #ifdef DEBUGTRI Rprintf("i=%d ---------- \n", i); #endif /* Find triangles involving vertex 'i' in which 'i' is the lowest-numbered vertex */ /* First, find vertices j > i connected to i */ Nj = 0; for(m = 0; m < Ne; m++) { if(ie[m] == i) { j = je[m]; if(j > i) { jj[Nj] = j; Nj++; } } else if(je[m] == i) { j = ie[m]; if(j > i) { jj[Nj] = j; Nj++; } } } /* Determine which pairs of vertices j, k are joined by an edge; save triangles (i,j,k) */ #ifdef DEBUGTRI Rprintf("Nj = %d\n", Nj); #endif if(Nj > 1) { #ifdef DEBUGTRI Rprintf("i=%d\njj=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif /* Sort jj in ascending order */ for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(k < j) { /* swap */ jj[mk] = j; jj[mj] = k; j = k; } } } #ifdef DEBUGTRI Rprintf("sorted=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(j != k) { /* Run through edges to determine whether j, k are neighbours */ for(m = 0; m < Ne; m++) { if((ie[m] == j && je[m] == k) || (ie[m] == k && je[m] == j)) { /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - allocate more space */ Nmore = 2 * Ntmax; #ifdef DEBUGTRI Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore); #endif it = (int *) S_realloc((char *) it, Nmore, Ntmax, sizeof(int)); jt = (int *) S_realloc((char *) jt, Nmore, Ntmax, sizeof(int)); kt = (int *) S_realloc((char *) kt, Nmore, Ntmax, sizeof(int)); Ntmax = Nmore; } /* output indices in R convention */ it[Nt] = i; jt[Nt] = j; kt[Nt] = k; Nt++; } } } } } } } } /* allocate space for output */ PROTECT(iTout = NEW_INTEGER(Nt)); PROTECT(jTout = NEW_INTEGER(Nt)); PROTECT(kTout = NEW_INTEGER(Nt)); PROTECT(out = NEW_LIST(3)); /* that's 3+4=7 protected objects */ ito = INTEGER_POINTER(iTout); jto = INTEGER_POINTER(jTout); kto = INTEGER_POINTER(kTout); /* copy triangle indices to output vectors */ for(m = 0; m < Nt; m++) { ito[m] = it[m]; jto[m] = jt[m]; kto[m] = kt[m]; } /* insert output vectors in output list */ SET_VECTOR_ELT(out, 0, iTout); SET_VECTOR_ELT(out, 1, jTout); SET_VECTOR_ELT(out, 2, kTout); UNPROTECT(7); return(out); } /* faster version assuming iedge is in increasing order */ SEXP triograph(SEXP nv, /* number of vertices */ SEXP iedge, /* vectors of indices of ends of each edge */ SEXP jedge) /* all arguments are integer */ { int Nv, Ne; int *ie, *je; /* edges */ int *it, *jt, *kt; /* vectors of indices of vertices of triangles */ int Nt, Ntmax; /* number of triangles */ int Nj; int *jj; /* scratch storage */ int i, j, k, m, mj, mk, maxjk, Nmore, maxchunk; /* output */ SEXP iTout, jTout, kTout, out; int *ito, *jto, *kto; /* =================== Protect R objects from garbage collector ======= */ PROTECT(nv = AS_INTEGER(nv)); PROTECT(iedge = AS_INTEGER(iedge)); PROTECT(jedge = AS_INTEGER(jedge)); /* That's 3 protected objects */ /* numbers of vertices and edges */ Nv = *(INTEGER_POINTER(nv)); Ne = LENGTH(iedge); /* input arrays */ ie = INTEGER_POINTER(iedge); je = INTEGER_POINTER(jedge); /* initialise storage (with a guess at max size) */ Ntmax = 3 * Ne; it = (int *) R_alloc(Ntmax, sizeof(int)); jt = (int *) R_alloc(Ntmax, sizeof(int)); kt = (int *) R_alloc(Ntmax, sizeof(int)); Nt = 0; /* initialise scratch storage */ jj = (int *) R_alloc(Ne, sizeof(int)); XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { R_CheckUserInterrupt(); XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { #ifdef DEBUGTRI Rprintf("i=%d ---------- \n", i); #endif /* Find triangles involving vertex 'i' in which 'i' is the lowest-numbered vertex */ /* First, find vertices j > i connected to i */ Nj = 0; for(m = 0; m < Ne; m++) { if(ie[m] == i) { j = je[m]; if(j > i) { jj[Nj] = j; Nj++; } } else if(je[m] == i) { j = ie[m]; if(j > i) { jj[Nj] = j; Nj++; } } } /* Determine which pairs of vertices j, k are joined by an edge; save triangles (i,j,k) */ #ifdef DEBUGTRI Rprintf("Nj = %d\n", Nj); #endif if(Nj > 1) { #ifdef DEBUGTRI Rprintf("i=%d\njj=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif /* Sort jj in ascending order */ for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(k < j) { /* swap */ jj[mk] = j; jj[mj] = k; j = k; } } } #ifdef DEBUGTRI Rprintf("sorted=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(j != k) { /* Run through edges to determine whether j, k are neighbours */ maxjk = (j > k) ? j : k; for(m = 0; m < Ne; m++) { if(ie[m] > maxjk) break; /* since iedge is in increasing order, the test below will always be FALSE when ie[m] > max(j,k) */ if((ie[m] == j && je[m] == k) || (ie[m] == k && je[m] == j)) { /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - allocate more space */ Nmore = 2 * Ntmax; #ifdef DEBUGTRI Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore); #endif it = (int *) S_realloc((char *) it, Nmore, Ntmax, sizeof(int)); jt = (int *) S_realloc((char *) jt, Nmore, Ntmax, sizeof(int)); kt = (int *) S_realloc((char *) kt, Nmore, Ntmax, sizeof(int)); Ntmax = Nmore; } it[Nt] = i; jt[Nt] = j; kt[Nt] = k; Nt++; } } } } } } } } /* allocate space for output */ PROTECT(iTout = NEW_INTEGER(Nt)); PROTECT(jTout = NEW_INTEGER(Nt)); PROTECT(kTout = NEW_INTEGER(Nt)); PROTECT(out = NEW_LIST(3)); /* that's 3+4=7 protected objects */ ito = INTEGER_POINTER(iTout); jto = INTEGER_POINTER(jTout); kto = INTEGER_POINTER(kTout); /* copy triangle indices to output vectors */ for(m = 0; m < Nt; m++) { ito[m] = it[m]; jto[m] = jt[m]; kto[m] = kt[m]; } /* insert output vectors in output list */ SET_VECTOR_ELT(out, 0, iTout); SET_VECTOR_ELT(out, 1, jTout); SET_VECTOR_ELT(out, 2, kTout); UNPROTECT(7); return(out); } /* Even faster version using information about dummy vertices. Dummy-to-dummy edges are forbidden. For generic purposes use 'friendly' for 'isdata' Edge between j and k is possible iff friendly[j] || friendly[k]. Edges with friendly = FALSE cannot be connected to one another. */ SEXP trioxgraph(SEXP nv, /* number of vertices */ SEXP iedge, /* vectors of indices of ends of each edge */ SEXP jedge, SEXP friendly) /* indicator vector, length nv */ { /* input */ int Nv, Ne; int *ie, *je; /* edges */ int *friend; /* indicator */ /* output */ int *it, *jt, *kt; /* vectors of indices of vertices of triangles */ int Nt, Ntmax; /* number of triangles */ /* scratch storage */ int Nj; int *jj; int i, j, k, m, mj, mk, maxjk, Nmore, maxchunk; /* output to R */ SEXP iTout, jTout, kTout, out; int *ito, *jto, *kto; /* =================== Protect R objects from garbage collector ======= */ PROTECT(nv = AS_INTEGER(nv)); PROTECT(iedge = AS_INTEGER(iedge)); PROTECT(jedge = AS_INTEGER(jedge)); PROTECT(friendly = AS_INTEGER(friendly)); /* That's 4 protected objects */ /* numbers of vertices and edges */ Nv = *(INTEGER_POINTER(nv)); Ne = LENGTH(iedge); /* input arrays */ ie = INTEGER_POINTER(iedge); je = INTEGER_POINTER(jedge); friend = INTEGER_POINTER(friendly); /* initialise storage (with a guess at max size) */ Ntmax = 3 * Ne; it = (int *) R_alloc(Ntmax, sizeof(int)); jt = (int *) R_alloc(Ntmax, sizeof(int)); kt = (int *) R_alloc(Ntmax, sizeof(int)); Nt = 0; /* initialise scratch storage */ jj = (int *) R_alloc(Ne, sizeof(int)); /* convert to C indexing convention */ for(m = 0; m < Ne; m++) { ie[m] -= 1; je[m] -= 1; } OUTERCHUNKLOOP(i, Nv, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Nv, maxchunk, 8196) { #ifdef DEBUGTRI Rprintf("i=%d ---------- \n", i); #endif /* Find triangles involving vertex 'i' in which 'i' is the lowest-numbered vertex */ /* First, find vertices j > i connected to i */ Nj = 0; for(m = 0; m < Ne; m++) { if(ie[m] == i) { j = je[m]; if(j > i) { jj[Nj] = j; Nj++; } } else if(je[m] == i) { j = ie[m]; if(j > i) { jj[Nj] = j; Nj++; } } } /* Determine which pairs of vertices j, k are joined by an edge; save triangles (i,j,k) */ #ifdef DEBUGTRI Rprintf("Nj = %d\n", Nj); #endif if(Nj > 1) { #ifdef DEBUGTRI Rprintf("i=%d\njj=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif /* Sort jj in ascending order */ for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(k < j) { /* swap */ jj[mk] = j; jj[mj] = k; j = k; } } } #ifdef DEBUGTRI Rprintf("sorted=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(j != k && (friend[j] || friend[k])) { /* Run through edges to determine whether j, k are neighbours */ maxjk = (j > k) ? j : k; for(m = 0; m < Ne; m++) { if(ie[m] > maxjk) break; /* since iedge is in increasing order, the test below will always be FALSE when ie[m] > max(j,k) */ if((ie[m] == j && je[m] == k) || (ie[m] == k && je[m] == j)) { /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - allocate more space */ Nmore = 2 * Ntmax; #ifdef DEBUGTRI Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore); #endif it = (int *) S_realloc((char *) it, Nmore, Ntmax, sizeof(int)); jt = (int *) S_realloc((char *) jt, Nmore, Ntmax, sizeof(int)); kt = (int *) S_realloc((char *) kt, Nmore, Ntmax, sizeof(int)); Ntmax = Nmore; } /* convert back to R indexing */ it[Nt] = i + 1; jt[Nt] = j + 1; kt[Nt] = k + 1; Nt++; } } } } } } } } /* allocate space for output */ PROTECT(iTout = NEW_INTEGER(Nt)); PROTECT(jTout = NEW_INTEGER(Nt)); PROTECT(kTout = NEW_INTEGER(Nt)); PROTECT(out = NEW_LIST(3)); /* that's 4+4=8 protected objects */ ito = INTEGER_POINTER(iTout); jto = INTEGER_POINTER(jTout); kto = INTEGER_POINTER(kTout); /* copy triangle indices to output vectors */ for(m = 0; m < Nt; m++) { ito[m] = it[m]; jto[m] = jt[m]; kto[m] = kt[m]; } /* insert output vectors in output list */ SET_VECTOR_ELT(out, 0, iTout); SET_VECTOR_ELT(out, 1, jTout); SET_VECTOR_ELT(out, 2, kTout); UNPROTECT(8); return(out); } /* also calculates diameter (max edge length) of triangle */ SEXP triDgraph(SEXP nv, /* number of vertices */ SEXP iedge, /* vectors of indices of ends of each edge */ SEXP jedge, SEXP edgelength) /* edge lengths */ { int Nv, Ne; int *ie, *je; /* edges */ double *edgelen; int *it, *jt, *kt; /* vectors of indices of vertices of triangles */ double *dt; /* diameters (max edge lengths) of triangles */ int Nt, Ntmax; /* number of triangles */ /* scratch storage */ int Nj; int *jj; double *dd; int i, j, k, m, mj, mk, Nmore, maxchunk; double dij, dik, djk, diam; /* output */ SEXP iTout, jTout, kTout, dTout, out; int *ito, *jto, *kto; double *dto; /* =================== Protect R objects from garbage collector ======= */ PROTECT(nv = AS_INTEGER(nv)); PROTECT(iedge = AS_INTEGER(iedge)); PROTECT(jedge = AS_INTEGER(jedge)); PROTECT(edgelength = AS_NUMERIC(edgelength)); /* That's 4 protected objects */ /* numbers of vertices and edges */ Nv = *(INTEGER_POINTER(nv)); Ne = LENGTH(iedge); /* input arrays */ ie = INTEGER_POINTER(iedge); je = INTEGER_POINTER(jedge); edgelen = NUMERIC_POINTER(edgelength); /* initialise storage (with a guess at max size) */ Ntmax = 3 * Ne; it = (int *) R_alloc(Ntmax, sizeof(int)); jt = (int *) R_alloc(Ntmax, sizeof(int)); kt = (int *) R_alloc(Ntmax, sizeof(int)); dt = (double *) R_alloc(Ntmax, sizeof(double)); Nt = 0; /* initialise scratch storage */ jj = (int *) R_alloc(Ne, sizeof(int)); dd = (double *) R_alloc(Ne, sizeof(double)); XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { R_CheckUserInterrupt(); XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { #ifdef DEBUGTRI Rprintf("i=%d ---------- \n", i); #endif /* Find triangles involving vertex 'i' in which 'i' is the lowest-numbered vertex */ /* First, find vertices j > i connected to i */ Nj = 0; for(m = 0; m < Ne; m++) { if(ie[m] == i) { j = je[m]; if(j > i) { jj[Nj] = j; dd[Nj] = edgelen[m]; Nj++; } } else if(je[m] == i) { j = ie[m]; if(j > i) { jj[Nj] = j; dd[Nj] = edgelen[m]; Nj++; } } } /* Determine which pairs of vertices j, k are joined by an edge; save triangles (i,j,k) */ #ifdef DEBUGTRI Rprintf("Nj = %d\n", Nj); #endif if(Nj > 1) { #ifdef DEBUGTRI Rprintf("i=%d\njj=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif /* Sort jj in ascending order */ for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(k < j) { /* swap */ jj[mk] = j; jj[mj] = k; dik = dd[mj]; dd[mj] = dd[mk]; dd[mk] = dik; j = k; } } } #ifdef DEBUGTRI Rprintf("sorted=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; dij = dd[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; dik = dd[mk]; if(j != k) { /* Run through edges to determine whether j, k are neighbours */ for(m = 0; m < Ne; m++) { if((ie[m] == j && je[m] == k) || (ie[m] == k && je[m] == j)) { /* triangle (i, j, k) */ /* determine triangle diameter */ diam = (dij > dik) ? dij : dik; djk = edgelen[m]; if(djk > diam) diam = djk; /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - allocate more space */ Nmore = 2 * Ntmax; #ifdef DEBUGTRI Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore); #endif it = (int *) S_realloc((char *) it, Nmore, Ntmax, sizeof(int)); jt = (int *) S_realloc((char *) jt, Nmore, Ntmax, sizeof(int)); kt = (int *) S_realloc((char *) kt, Nmore, Ntmax, sizeof(int)); dt = (double *) S_realloc((char *) dt, Nmore, Ntmax, sizeof(double)); Ntmax = Nmore; } it[Nt] = i; jt[Nt] = j; kt[Nt] = k; dt[Nt] = diam; Nt++; } } } } } } } } /* allocate space for output */ PROTECT(iTout = NEW_INTEGER(Nt)); PROTECT(jTout = NEW_INTEGER(Nt)); PROTECT(kTout = NEW_INTEGER(Nt)); PROTECT(dTout = NEW_NUMERIC(Nt)); PROTECT(out = NEW_LIST(4)); /* that's 4+5=9 protected objects */ ito = INTEGER_POINTER(iTout); jto = INTEGER_POINTER(jTout); kto = INTEGER_POINTER(kTout); dto = NUMERIC_POINTER(dTout); /* copy triangle indices to output vectors */ for(m = 0; m < Nt; m++) { ito[m] = it[m]; jto[m] = jt[m]; kto[m] = kt[m]; dto[m] = dt[m]; } /* insert output vectors in output list */ SET_VECTOR_ELT(out, 0, iTout); SET_VECTOR_ELT(out, 1, jTout); SET_VECTOR_ELT(out, 2, kTout); SET_VECTOR_ELT(out, 3, dTout); UNPROTECT(9); return(out); } /* same as triDgraph but returns only triangles with diameter <= dmax */ SEXP triDRgraph(SEXP nv, /* number of vertices */ SEXP iedge, /* vectors of indices of ends of each edge */ SEXP jedge, SEXP edgelength, /* edge lengths */ SEXP dmax) { int Nv, Ne; int *ie, *je; /* edges */ double *edgelen; int *it, *jt, *kt; /* vectors of indices of vertices of triangles */ double *dt; /* diameters (max edge lengths) of triangles */ int Nt, Ntmax; /* number of triangles */ /* scratch storage */ int Nj; int *jj; double *dd; int i, j, k, m, mj, mk, Nmore, maxchunk; double dij, dik, djk, diam, Dmax; /* output */ SEXP iTout, jTout, kTout, dTout, out; int *ito, *jto, *kto; double *dto; /* =================== Protect R objects from garbage collector ======= */ PROTECT(nv = AS_INTEGER(nv)); PROTECT(iedge = AS_INTEGER(iedge)); PROTECT(jedge = AS_INTEGER(jedge)); PROTECT(edgelength = AS_NUMERIC(edgelength)); PROTECT(dmax = AS_NUMERIC(dmax)); /* That's 5 protected objects */ /* numbers of vertices and edges */ Nv = *(INTEGER_POINTER(nv)); Ne = LENGTH(iedge); /* input arrays */ ie = INTEGER_POINTER(iedge); je = INTEGER_POINTER(jedge); edgelen = NUMERIC_POINTER(edgelength); /* maximum diameter */ Dmax = *(NUMERIC_POINTER(dmax)); /* initialise storage (with a guess at max size) */ Ntmax = 3 * Ne; it = (int *) R_alloc(Ntmax, sizeof(int)); jt = (int *) R_alloc(Ntmax, sizeof(int)); kt = (int *) R_alloc(Ntmax, sizeof(int)); dt = (double *) R_alloc(Ntmax, sizeof(double)); Nt = 0; /* initialise scratch storage */ jj = (int *) R_alloc(Ne, sizeof(int)); dd = (double *) R_alloc(Ne, sizeof(double)); XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { R_CheckUserInterrupt(); XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { #ifdef DEBUGTRI Rprintf("i=%d ---------- \n", i); #endif /* Find triangles involving vertex 'i' in which 'i' is the lowest-numbered vertex */ /* First, find vertices j > i connected to i */ Nj = 0; for(m = 0; m < Ne; m++) { if(ie[m] == i) { j = je[m]; if(j > i) { jj[Nj] = j; dd[Nj] = edgelen[m]; Nj++; } } else if(je[m] == i) { j = ie[m]; if(j > i) { jj[Nj] = j; dd[Nj] = edgelen[m]; Nj++; } } } /* Determine which pairs of vertices j, k are joined by an edge; save triangles (i,j,k) */ #ifdef DEBUGTRI Rprintf("Nj = %d\n", Nj); #endif if(Nj > 1) { #ifdef DEBUGTRI Rprintf("i=%d\njj=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif /* Sort jj in ascending order */ for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(k < j) { /* swap */ jj[mk] = j; jj[mj] = k; dik = dd[mj]; dd[mj] = dd[mk]; dd[mk] = dik; j = k; } } } #ifdef DEBUGTRI Rprintf("sorted=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; dij = dd[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; dik = dd[mk]; if(j != k) { /* Run through edges to determine whether j, k are neighbours */ for(m = 0; m < Ne; m++) { if((ie[m] == j && je[m] == k) || (ie[m] == k && je[m] == j)) { /* triangle (i, j, k) */ /* determine triangle diameter */ diam = (dij > dik) ? dij : dik; djk = edgelen[m]; if(djk > diam) diam = djk; if(diam <= Dmax) { /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - allocate more space */ Nmore = 2 * Ntmax; #ifdef DEBUGTRI Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore); #endif it = (int *) S_realloc((char *) it, Nmore, Ntmax, sizeof(int)); jt = (int *) S_realloc((char *) jt, Nmore, Ntmax, sizeof(int)); kt = (int *) S_realloc((char *) kt, Nmore, Ntmax, sizeof(int)); dt = (double *) S_realloc((char *) dt, Nmore, Ntmax, sizeof(double)); Ntmax = Nmore; } it[Nt] = i; jt[Nt] = j; kt[Nt] = k; dt[Nt] = diam; Nt++; } } } } } } } } } /* allocate space for output */ PROTECT(iTout = NEW_INTEGER(Nt)); PROTECT(jTout = NEW_INTEGER(Nt)); PROTECT(kTout = NEW_INTEGER(Nt)); PROTECT(dTout = NEW_NUMERIC(Nt)); PROTECT(out = NEW_LIST(4)); /* that's 5+5=10 protected objects */ ito = INTEGER_POINTER(iTout); jto = INTEGER_POINTER(jTout); kto = INTEGER_POINTER(kTout); dto = NUMERIC_POINTER(dTout); /* copy triangle indices to output vectors */ for(m = 0; m < Nt; m++) { ito[m] = it[m]; jto[m] = jt[m]; kto[m] = kt[m]; dto[m] = dt[m]; } /* insert output vectors in output list */ SET_VECTOR_ELT(out, 0, iTout); SET_VECTOR_ELT(out, 1, jTout); SET_VECTOR_ELT(out, 2, kTout); SET_VECTOR_ELT(out, 3, dTout); UNPROTECT(10); return(out); } spatstat/src/minnnd.c0000644000176200001440000000122413406057617014343 0ustar liggesusers/* minnnd.c Minimum/Maximum Nearest Neighbour Distance Uses code templates in minnnd.h, maxnnd.h $Revision: 1.5 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include #include #undef IGNOREZERO #define FNAME minnnd2 #include "minnnd.h" #undef FNAME #define FNAME maxnnd2 #include "maxnnd.h" #undef FNAME /* min/max nearest neighbour distance ignoring zero distances */ #define IGNOREZERO #define FNAME minPnnd2 #include "minnnd.h" #undef FNAME #define FNAME maxPnnd2 #include "maxnnd.h" #undef FNAME spatstat/src/Krect.c0000644000176200001440000000373713406057617014143 0ustar liggesusers/* Krect.c $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ +++ Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2014 ++++ Fast code for K function in rectangular case. **Assumes point pattern is sorted in increasing order of x coordinate** **Assumes window is (0,wide) x (0, high) ** **Assumes output vectors were initialised to zero** Krect.c defines three interface functions, for weighted, unweighted double, and unweighted integer cases KrectFunDec.h (#included thrice) Function declaration, arguments, storage allocation KrectV1.h split according to whether Isotropic Correction is wanted Macro ISOTROPIC is #defined KrectV2.h split according to whether Translation Correction is wanted Macro TRANSLATION is #defined KrectV3.h split according to whether Border Correction is wanted Macro BORDER is #defined KrectV4.h split according to whether Uncorrected estimate is wanted Macro UNCORRECTED is #defined KrectBody.h Function body, including loops over i and j KrectIncrem.h (#included twice) Code performed when a close pair of points has been found: calculate edge corrections, increment results. */ #include #include #include /* This constant is defined in Rmath.h */ #define TWOPI M_2PI #define ABS(X) (((X) >= 0) ? (X) : (-X)) #define SMALL(X) ((ABS(X) < 1.0e-12) ? 1 : 0) #define MIN(X,Y) (((X) < (Y)) ? (X) : (Y)) #undef FNAME #undef WEIGHTED #undef COUNTTYPE #define FNAME KrectInt #define COUNTTYPE int #include "KrectFunDec.h" #undef FNAME #undef WEIGHTED #undef COUNTTYPE #define FNAME KrectDbl #define COUNTTYPE double #include "KrectFunDec.h" #undef FNAME #undef WEIGHTED #undef COUNTTYPE #define FNAME KrectWtd #define COUNTTYPE double #define WEIGHTED #include "KrectFunDec.h" spatstat/src/linpairdist.c0000755000176200001440000000445213406057617015413 0ustar liggesusers#include #include #include "chunkloop.h" /* linpairdist.c Shortest-path distances between each pair of points in linear network $Revision: 1.6 $ $Date: 2018/12/18 02:43:11 $ linpairdist Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define DPATH(I,J) dpath[(I) + Nv * (J)] #define ANSWER(I,J) answer[(I) + Np * (J)] #define EUCLID(X,Y,U,V) sqrt(pow((X)-(U),2)+pow((Y)-(V),2)) void linpairdist(np, xp, yp, /* data points */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ segmap, /* map from data points to segments */ /* OUTPUT */ answer /* shortest path distances between points */ ) int *np, *nv, *ns; int *from, *to, *segmap; /* integer vectors (mappings) */ double *xp, *yp, *xv, *yv; /* vectors of coordinates */ double *dpath, *answer; /* matrices */ { int Np, Nv, i, j, Np1, maxchunk; int segi, segj, nbi1, nbi2, nbj1, nbj2; double d, xpi, ypi, xpj, ypj, dXi1, dXi2, d1Xj, d2Xj, d11, d12, d21, d22; Np = *np; Nv = *nv; Np1 = Np - 1; OUTERCHUNKLOOP(i, Np1, maxchunk, 1024) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Np1, maxchunk, 1024) { xpi = xp[i]; ypi = yp[i]; segi = segmap[i]; nbi1 = from[segi]; nbi2 = to[segi]; dXi1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]); dXi2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]); for(j = i+1; j < Np; j++) { xpj = xp[j]; ypj = yp[j]; segj = segmap[j]; if(segi == segj) { /* points i and j lie on the same segment; use Euclidean distance */ d = sqrt(pow(xpi - xpj, 2) + pow(ypi - ypj, 2)); } else { /* Shortest path from i to j passes through ends of segments; Calculate shortest of 4 possible paths from i to j */ nbj1 = from[segj]; nbj2 = to[segj]; d1Xj = EUCLID(xv[nbj1], yv[nbj1], xpj, ypj); d2Xj = EUCLID(xv[nbj2], yv[nbj2], xpj, ypj); d11 = dXi1 + DPATH(nbi1,nbj1) + d1Xj; d12 = dXi1 + DPATH(nbi1,nbj2) + d2Xj; d21 = dXi2 + DPATH(nbi2,nbj1) + d1Xj; d22 = dXi2 + DPATH(nbi2,nbj2) + d2Xj; d = d11; if(d12 < d) d = d12; if(d21 < d) d = d21; if(d22 < d) d = d22; } /* write */ ANSWER(i,j) = ANSWER(j,i) = d; } ANSWER(i,i) = 0; } } } spatstat/src/linSnncross.c0000644000176200001440000000144413406057617015377 0ustar liggesusers#include #include "yesno.h" /* linSnncross.c Shortest-path distances between nearest neighbours in linear network One pattern to another pattern $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ 'Sparse version' Works with sparse representation Does not allow 'exclusion' Requires point data to be ordered by segment index. linSnndcross linSnndwhich Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void Clinvdist(), Clinvwhichdist(); /* functions from linvdist.c */ #undef HUH /* definition of linSnndcross */ #define FNAME linSnndcross #undef WHICH #include "linSnncross.h" /* definition of linSnndwhich */ #undef FNAME #define FNAME linSnndwhich #define WHICH #include "linSnncross.h" spatstat/src/connect.c0000755000176200001440000000644213406057617014523 0ustar liggesusers/* connect.c Connected component transforms cocoImage: connected component transform of a discrete binary image (8-connected topology) cocoGraph: connected component labels for a discrete graph specified by a list of edges $Revision: 1.9 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include #include #include #include "raster.h" void shape_raster(); #include "yesno.h" /* workhorse function for cocoImage */ void comcommer(im) Raster *im; /* raster must have been dimensioned by shape_raster() */ /* Pixel values assumed to be 0 in background, and distinct nonzero integers in foreground */ { int j,k; int rmin, rmax, cmin, cmax; int label, curlabel, minlabel; int nchanged; /* image boundaries */ rmin = im->rmin; rmax = im->rmax; cmin = im->cmin; cmax = im->cmax; #define ENTRY(ROW, COL) Entry(*im, ROW, COL, int) #define UPDATE(ROW,COL,BEST,NEW) \ NEW = ENTRY(ROW, COL); \ if(NEW != 0 && NEW < BEST) \ BEST = NEW nchanged = 1; while(nchanged >0) { nchanged = 0; R_CheckUserInterrupt(); for(j = rmin; j <= rmax; j++) { for(k = cmin; k <= cmax; k++) { curlabel = ENTRY(j, k); if(curlabel != 0) { minlabel = curlabel; UPDATE(j-1, k-1, minlabel, label); UPDATE(j-1, k, minlabel, label); UPDATE(j-1, k+1, minlabel, label); UPDATE(j, k-1, minlabel, label); UPDATE(j, k, minlabel, label); UPDATE(j, k+1, minlabel, label); UPDATE(j+1, k-1, minlabel, label); UPDATE(j+1, k, minlabel, label); UPDATE(j+1, k+1, minlabel, label); if(minlabel < curlabel) { ENTRY(j, k) = minlabel; nchanged++; } } } } } } void cocoImage(mat, nr, nc) int *mat; /* input: binary image */ int *nr, *nc; /* raster dimensions EXCLUDING margin of 1 on each side */ { Raster im; shape_raster( &im, (void *) mat, (double) 1, (double) 1, (double) *nc, (double) *nr, *nr+2, *nc+2, 1, 1); comcommer(&im); } void cocoGraph(nv, ne, ie, je, label, status) /* inputs */ int *nv; /* number of graph vertices */ int *ne; /* number of edges */ int *ie, *je; /* vectors of indices of ends of each edge */ /* output */ int *label; /* vector of component labels for each vertex */ /* Component label is lowest serial number of any vertex in the connected component */ int *status; /* 0 if OK, 1 if overflow */ { int Nv, Ne, i, j, k, niter, labi, labj, changed; Nv = *nv; Ne = *ne; /* initialise labels */ for(k = 0; k < Nv; k++) label[k] = k; for(niter = 0; niter < Nv; niter++) { R_CheckUserInterrupt(); changed = NO; for(k = 0; k < Ne; k++) { i = ie[k]; j = je[k]; labi = label[i]; labj = label[j]; if(labi < labj) { label[j] = labi; changed = YES; } else if(labj < labi) { label[i] = labj; changed = YES; } } if(!changed) { /* algorithm has converged */ *status = 0; return; } } /* error exit */ *status = 1; return; } spatstat/src/mhsnoop.h0000644000176200001440000000065113406057617014553 0ustar liggesusers/* Function declarations from mhsnoop.c $Revision: 1.5 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include "mhsnoopdef.h" void initmhsnoop(Snoop *s, SEXP env); void mhsnoop(Snoop *s, int irep, Algor *algo, State *state, Propo *prop, double numer, double denom, int *itype); spatstat/src/mhv4.h0000644000176200001440000000055213406057617013746 0ustar liggesusers/* mhv4.h visual debugger or not Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef MH_SNOOP if(snooper.active) { /* visual debugger */ #define MH_SNOOP YES #include "mhv5.h" #undef MH_SNOOP } else { /* no visual debugger */ #define MH_SNOOP NO #include "mhv5.h" #undef MH_SNOOP } spatstat/src/dinfty.c0000755000176200001440000000677613406057617014401 0ustar liggesusers/* dinfty.c $Revision: 1.6 $ $Date: 2011/09/20 07:42:18 $ Code by Dominic Schuhmacher Modified by Adrian Baddeley */ #include #include #define COST(I,J) (d)[n * (J) + (I)] int arraymax(int *a, int n); void swap(int i, int j, int *a); int largestmobpos(int *mobile, int *current, int *collectvals, int n); /* ------------ The main function ----------------------------- */ void dinfty_R(int *d, int *num, int *assignment) { int i,j; /* indices */ int lmp, lmq; /* largest mobile position and its neighbor */ int newmax; int n, currmin; int *current, *travel, *mobile, *assig, *distrelev, *collectvals; n = *num; /* scratch space */ assig = (int *) R_alloc((long) n, sizeof(int)); travel = (int *) R_alloc((long) n, sizeof(int)); mobile = (int *) R_alloc((long) n, sizeof(int)); current = (int *) R_alloc((long) n, sizeof(int)); distrelev = (int *) R_alloc((long) n, sizeof(int)); collectvals = (int *) R_alloc((long) (n * n), sizeof(int)); /* */ /* We use the Johnson-Trotter Algorithm for listing permutations */ /* */ /* Initialize the algorithm */ for (i = 0; i < n; i++) { travel[i] = -1; /* all numbers traveling to the left */ mobile[i] = 1; /* all numbers mobile */ current[i] = i; /* current permutation is the identity */ assig[i] = i; /* best permutation up to now is the identity */ distrelev[i] = COST(i, i); /* pick relevant entries in the cost matrix */ } currmin = arraymax(distrelev, n); /* minimal max up to now */ /* The main loop */ while(arraymax(mobile, n) == 1) { lmp = largestmobpos(mobile, current, collectvals, n); lmq = lmp + travel[lmp]; swap(lmp, lmq, current); swap(lmp, lmq, travel); for (i = 0; i < n; i++) { if (current[i] > current[lmq]) travel[i] = -travel[i]; j = i + travel[i]; if (j < 0 || j > n-1 || current[i] < current[j]) mobile[i] = 0; else mobile[i] = 1; distrelev[i] = COST(i, current[i]); } /* Calculation of new maximal value */ newmax = arraymax(distrelev, n); if (newmax < currmin) { currmin = newmax; for (i = 0; i < n; i++) { assig[i] = current[i]; } } } /* For testing: print distance from within C program Rprintf("Prohorov distance is %d\n", currmin); */ /* "Return" the final assignment */ for (i = 0; i < n; i++) { assignment[i] = assig[i] + 1; } } /* ------------------------------------------------------------*/ /* Maximal element of an integer array */ int arraymax(int *a, int n) { int i, amax; if(n < 1) return(-1); amax = a[0]; if(n > 1) for(i = 0; i < n; i++) if(a[i] > amax) amax = a[i]; return(amax); } /* Swap elements i and j in array a */ void swap(int i, int j, int *a) { int v; v = a[i]; a[i] = a[j]; a[j] = v; } /* Return index of largest mobile number in current */ int largestmobpos(int *mobile, int *current, int *collectvals, int n) { int i,j, maxval; j = 0; for (i = 0; i < n; i++) { if (mobile[i] == 1) { collectvals[j] = current[i]; j++; } } maxval = arraymax(collectvals, j); for (i = 0; i < n; i++) { if (current[i] == maxval) { return(i); } } error("Internal error: largestmobpos failed"); return(0); } spatstat/src/denspt.c0000755000176200001440000003142113406057617014362 0ustar liggesusers#include #include #include "chunkloop.h" #include "pairloop.h" #include "constants.h" /* denspt.c Calculation of density estimate at data points $Revision: 1.19 $ $Date: 2018/12/18 02:43:11 $ Assumes point pattern is sorted in increasing order of x coordinate *denspt* Density estimate at points *smoopt* Smoothed mark values at points Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define TWOPI M_2PI double sqrt(), exp(); #define STD_DECLARATIONS \ int n, i, j, maxchunk; \ double xi, yi, rmax, r2max, dx, dy, dx2, d2 #define STD_INITIALISE \ n = *nxy; \ rmax = *rmaxi; \ r2max = rmax * rmax /* ----------------- density estimation -------------------- */ void denspt(nxy, x, y, rmaxi, sig, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance at which points contribute */ double *sig; /* Gaussian sd */ /* output */ double *result; /* vector of computed density values */ { STD_DECLARATIONS; double resulti, coef; double sigma, twosig2; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; coef = 1.0/(TWOPI * sigma * sigma); if(n == 0) return; PAIRLOOP( { resulti = 0.0; }, { resulti += exp(-d2/twosig2); } , { result[i] = coef * resulti; }) } void wtdenspt(nxy, x, y, rmaxi, sig, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance */ double *sig; /* Gaussian sd */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of weighted density values */ { STD_DECLARATIONS; double resulti, coef; double sigma, twosig2; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; coef = 1.0/(TWOPI * sigma * sigma); if(n == 0) return; PAIRLOOP( { resulti = 0.0; }, { resulti += weight[j] * exp(-d2/twosig2); }, { result[i] = coef * resulti; } ) } /* ------------- anisotropic versions -------------------- */ void adenspt(nxy, x, y, rmaxi, detsigma, sinv, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance at which points contribute */ double *detsigma; /* determinant of variance matrix */ double *sinv; /* inverse variance matrix (2x2, flattened) */ /* output */ double *result; /* vector of density values */ { STD_DECLARATIONS; double resulti, coef; double detsig, s11, s12, s21, s22; STD_INITIALISE; detsig = *detsigma; coef = 1.0/(TWOPI * sqrt(detsig)); s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; PAIRLOOP( { resulti = 0.0; }, { resulti += exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); }, { result[i] = coef * resulti; }) } void awtdenspt(nxy, x, y, rmaxi, detsigma, sinv, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance at which points contribute */ double *detsigma; /* determinant of variance matrix */ double *sinv; /* inverse variance matrix (2x2, flattened) */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of weighted density values */ { STD_DECLARATIONS; double resulti, coef; double detsig, s11, s12, s21, s22; STD_INITIALISE; detsig = *detsigma; coef = 1.0/(TWOPI * sqrt(detsig)); s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n == 0) return; PAIRLOOP( { resulti = 0.0; }, { resulti += weight[j] * \ exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); }, { result[i] = coef * resulti; }) } /* --------------- smoothing --------------------------- */ void smoopt(nxy, x, y, v, self, rmaxi, sig, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance at which points contribute */ double *sig; /* Gaussian sd */ /* output */ double *result; /* vector of computed smoothed values */ { STD_DECLARATIONS; int countself; double sigma, twosig2; double numer, denom, wij; STD_INITIALISE; sigma = *sig; countself = *self; twosig2 = 2.0 * sigma * sigma; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2/twosig2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += 1; \ numer += v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2/twosig2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } void wtsmoopt(nxy, x, y, v, self, rmaxi, sig, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance */ double *sig; /* Gaussian sd */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of computed smoothed values */ { STD_DECLARATIONS; int countself; double sigma, twosig2; double numer, denom, wij; STD_INITIALISE; sigma = *sig; countself = *self; twosig2 = 2.0 * sigma * sigma; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-d2/twosig2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += weight[i]; \ numer += weight[i] * v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-d2/twosig2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } /* ------------- anisotropic versions -------------------- */ void asmoopt(nxy, x, y, v, self, rmaxi, sinv, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance at which points contribute */ double *sinv; /* inverse variance matrix (2x2, flattened) */ /* output */ double *result; /* vector of smoothed values */ { STD_DECLARATIONS; int countself; double s11, s12, s21, s22; double numer, denom, wij; STD_INITIALISE; countself = *self; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += 1; \ numer += v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } void awtsmoopt(nxy, x, y, v, self, rmaxi, sinv, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance at which points contribute */ double *sinv; /* inverse variance matrix (2x2, flattened) */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of smoothed values */ { STD_DECLARATIONS; int countself; double s11, s12, s21, s22; double numer, denom, wij; STD_INITIALISE; countself = *self; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += weight[i]; \ numer += weight[i] * v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } /* ----------------- transformed coordinates -------------------- */ /* The following functions assume that x, y have been transformed by the inverse of the variance matrix, and subsequently scaled by 1/sqrt(2) so that the Gaussian density is proportional to exp(-(x^2+y^2)). Constant factor in density is omitted. */ void Gdenspt(nxy, x, y, rmaxi, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance at which points contribute */ /* output */ double *result; /* vector of computed density values */ { STD_DECLARATIONS; double resulti; STD_INITIALISE; if(n == 0) return; PAIRLOOP( { resulti = 0.0; }, { resulti += exp(-d2); } , { result[i] = resulti; }) } void Gwtdenspt(nxy, x, y, rmaxi, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of weighted density values */ { STD_DECLARATIONS; double resulti; STD_INITIALISE; if(n == 0) return; PAIRLOOP( { resulti = 0.0; }, { resulti += weight[j] * exp(-d2); }, { result[i] = resulti; } ) } void Gsmoopt(nxy, x, y, v, self, rmaxi, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance at which points contribute */ /* output */ double *result; /* vector of computed smoothed values */ { STD_DECLARATIONS; int countself; double numer, denom, wij; STD_INITIALISE; countself = *self; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += 1; \ numer += v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } void Gwtsmoopt(nxy, x, y, v, self, rmaxi, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of computed smoothed values */ { STD_DECLARATIONS; int countself; double numer, denom, wij; STD_INITIALISE; countself = *self; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-d2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += weight[i]; \ numer += weight[i] * v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-d2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } spatstat/src/penttinen.c0000644000176200001440000000602613115225157015062 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" #include "constants.h" /* Conditional intensity computation for Penttinen process */ /* Format for storage of parameters and precomputed/auxiliary data */ typedef struct Penttinen { double gamma; double r; double loggamma; double reach2; double *period; int hard; int per; } Penttinen; /* initialiser function */ Cdata *penttineninit(state, model, algo) State state; Model model; Algor algo; { /* create storage for model parameters */ Penttinen *penttinen; penttinen = (Penttinen *) R_alloc(1, sizeof(Penttinen)); /* Interpret model parameters*/ penttinen->gamma = model.ipar[0]; penttinen->r = model.ipar[1]; penttinen->reach2 = 4.0 * penttinen->r * penttinen->r; penttinen->period = model.period; #ifdef MHDEBUG Rprintf("Initialising Penttinen gamma=%lf, r=%lf\n", penttinen->gamma, penttinen->r); #endif /* is the model numerically equivalent to hard core ? */ penttinen->hard = (penttinen->gamma < DOUBLE_EPS); penttinen->loggamma = (penttinen->hard) ? 0 : log(penttinen->gamma); /* periodic boundary conditions? */ penttinen->per = (model.period[0] > 0.0); return((Cdata *) penttinen); } /* conditional intensity evaluator */ double penttinencif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, reach2, z, z2, logpot, cifval; Penttinen *penttinen; DECLARE_CLOSE_D2_VARS; penttinen = (Penttinen *) cdata; reach2 = penttinen->reach2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return((double) 1.0); logpot = 0.0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(penttinen->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],penttinen->period,reach2,d2)) { z2 = d2/reach2; z = sqrt(z2); if(z < 1.0) { logpot += acos(z) - z * sqrt(1 - z2); } } } } if(ixp1 < npts) { for(j=ixp1; jperiod,reach2,d2)) { z2 = d2/reach2; z = sqrt(z2); if(z < 1.0) { logpot += acos(z) - z * sqrt(1 - z2); } } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], reach2, d2)) { z2 = d2/reach2; z = sqrt(z2); if(z < 1.0) { logpot += acos(z) - z * sqrt(1 - z2); } } } } if(ixp1 < npts) { for(j=ixp1; jhard) { if(logpot > 0) cifval = 0.0; else cifval = 1.0; } else cifval = exp((penttinen->loggamma) * M_2_PI * logpot); return cifval; } Cifns PenttinenCifns = { &penttineninit, &penttinencif, (updafunptr) NULL, NO}; spatstat/src/Estrauss.c0000755000176200001440000000336013406057617014677 0ustar liggesusers#include #include #include "chunkloop.h" #include "looptest.h" /* Estrauss.c $Revision: 1.5 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 C implementation of 'eval' for Strauss interaction Calculates number of data points within distance r of each quadrature point (when 'source' = quadrature points, 'target' = data points) Assumes point patterns are sorted in increasing order of x coordinate */ double sqrt(); void Ccrosspaircounts(nnsource, xsource, ysource, nntarget, xtarget, ytarget, rrmax, counts) /* inputs */ int *nnsource, *nntarget; double *xsource, *ysource, *xtarget, *ytarget, *rrmax; /* output */ int *counts; { int nsource, ntarget, maxchunk, j, i, ileft, counted; double xsourcej, ysourcej, rmax, r2max, r2maxpluseps, xleft, dx, dy, dx2, d2; nsource = *nnsource; ntarget = *nntarget; rmax = *rrmax; r2max = rmax * rmax; r2maxpluseps = r2max + EPSILON(r2max); if(nsource == 0 || ntarget == 0) return; ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 65536) { counted = 0; xsourcej = xsource[j]; ysourcej = ysource[j]; /* adjust starting point */ xleft = xsourcej - rmax; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process from ileft to iright */ for(i=ileft; i < ntarget; i++) { dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > r2maxpluseps) break; dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= r2max) ++counted; } counts[j] = counted; } } } spatstat/src/Egeyer.c0000755000176200001440000000464213406057617014312 0ustar liggesusers#include #include #include "chunkloop.h" #include "looptest.h" /* Egeyer.c $Revision: 1.7 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Part of C implementation of 'eval' for Geyer interaction Calculates change in saturated count (xquad, yquad): quadscheme (xdata, ydata): data tdata: unsaturated pair counts for data pattern quadtodata[j] = i if quad[j] == data[i] (indices start from ZERO) Assumes point patterns are sorted in increasing order of x coordinate */ double sqrt(); void Egeyer(nnquad, xquad, yquad, quadtodata, nndata, xdata, ydata, tdata, rrmax, ssat, result) /* inputs */ int *nnquad, *nndata, *quadtodata, *tdata; double *xquad, *yquad, *xdata, *ydata, *rrmax, *ssat; /* output */ double *result; { int nquad, ndata, maxchunk, j, i, ileft, dataindex, isdata; double xquadj, yquadj, rmax, sat, r2max, r2maxpluseps, xleft, dx, dy, dx2, d2; double tbefore, tafter, satbefore, satafter, delta, totalchange; nquad = *nnquad; ndata = *nndata; rmax = *rrmax; sat = *ssat; if(nquad == 0 || ndata == 0) return; r2max = rmax * rmax; r2maxpluseps = r2max + EPSILON(r2max); ileft = 0; OUTERCHUNKLOOP(j, nquad, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nquad, maxchunk, 65536) { totalchange = 0.0; xquadj = xquad[j]; yquadj = yquad[j]; dataindex = quadtodata[j]; isdata = (dataindex >= 0); /* adjust starting point */ xleft = xquadj - rmax; while((xdata[ileft] < xleft) && (ileft+1 < ndata)) ++ileft; /* process until dx > rmax */ for(i=ileft; i < ndata; i++) { dx = xdata[i] - xquadj; dx2 = dx * dx; if(dx2 > r2maxpluseps) break; if(i != dataindex) { dy = ydata[i] - yquadj; d2 = dx2 + dy * dy; if(d2 <= r2max) { /* effect of adding dummy point j or negative effect of removing data point */ tbefore = tdata[i]; tafter = tbefore + ((isdata) ? -1 : 1); /* effect on saturated values */ satbefore = (double) ((tbefore < sat)? tbefore : sat); satafter = (double) ((tafter < sat)? tafter : sat); /* sum changes over all i */ delta = satafter - satbefore; totalchange += ((isdata) ? -delta : delta); } } } result[j] = totalchange; } } } spatstat/src/linvdist.h0000644000176200001440000000672113406057617014730 0ustar liggesusers/* linvdist.h Distance function at vertices (shortest distance from each vertex to a data point) Function body definitions with macros Sparse representation of network $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Macros used: FNAME name of function WHICH whether 'nnwhich' is required HUH debugging flag ! Data points must be ordered by segment index ! */ void FNAME(np, sp, tp, /* target data points (ordered by sp) */ nv, /* number of network vertices */ ns, from, to, /* segments */ seglen, /* segment lengths */ huge, /* value taken as infinity */ tol, /* tolerance for updating distances */ /* OUTPUT */ #ifdef WHICH dist, /* distance from each vertex to nearest data point */ which /* identifies nearest data point */ #else dist /* distance from each vertex to nearest data point */ #endif ) int *np, *nv, *ns; /* number of points, vertices, segments */ int *sp, *from, *to; /* integer vectors (mappings) */ double *tp; /* fractional location coordinates */ double *huge, *tol; double *seglen; double *dist; #ifdef WHICH int *which; #endif { int Np, Nv, Ns, i, j, k, segPj, ivleft, ivright; double hugevalue, eps, dleft, dright, slen, d, tpj; char converged; Np = *np; Nv = *nv; Ns = *ns; hugevalue = *huge; eps = *tol; #ifdef HUH Rprintf("Initialise dist\n"); #endif /* initialise to huge value */ for(i = 0; i < Nv; i++) { dist[i] = hugevalue; #ifdef WHICH which[i] = -1; #endif } #ifdef HUH Rprintf("Run through target points\n"); #endif /* assign correct value to endpoints of segments containing target points */ for(j = 0; j < Np; j++) { segPj = sp[j]; tpj = tp[j]; slen = seglen[segPj]; ivleft = from[segPj]; d = slen * tpj; if(d < dist[ivleft]) { dist[ivleft] = d; #ifdef WHICH which[ivleft] = j; #endif } ivright = to[segPj]; d = slen * (1.0 - tpj); if(d < dist[ivright]) { dist[ivright] = d; #ifdef WHICH which[ivright] = j; #endif } } /* recursively update */ #ifdef HUH Rprintf("Recursive update\n"); #endif converged = NO; while(!converged) { converged = YES; #ifdef HUH Rprintf("........... starting new pass ...................... \n"); #endif for(k = 0; k < Ns; k++) { ivleft = from[k]; ivright = to[k]; slen = seglen[k]; dleft = (double) dist[ivleft]; dright = (double) dist[ivright]; d = (double) (dleft + slen); if(d < dright - eps) { #ifdef HUH Rprintf("Updating ivright=%d using ivleft=%d, from %lf to %lf+%lf=%lf\n", ivright, ivleft, dright, dleft, slen, d); #endif converged = NO; dist[ivright] = d; #ifdef WHICH which[ivright] = which[ivleft]; #endif } else { d = (double) (dright + slen); if(d < dleft - eps) { #ifdef HUH Rprintf("Updating ivleft=%d using ivright=%d, from %lf to %lf+%lf=%lf\n", ivleft, ivright, dleft, dright, slen, d); #endif converged = NO; dist[ivleft] = d; #ifdef WHICH which[ivleft] = which[ivright]; #endif } } } } #ifdef HUH Rprintf("Done\nVertex values:\n"); #ifdef WHICH Rprintf("\ti\twhich\tdist\n"); for(i = 0; i < Nv; i++) Rprintf("\t%d\t%d\t%lf\n", i, which[i], dist[i]); #else Rprintf("\ti\tdist\n"); for(i = 0; i < Nv; i++) Rprintf("\t%d\t%lf\n", i, dist[i]); #endif #endif } spatstat/src/fexitc.c0000755000176200001440000000045513115271120014332 0ustar liggesusers# include # include # include void fexitc(const char *msg) { size_t nc = strlen(msg); char buf[256]; if(nc > 255) { warning("invalid character length in fexitc"); nc = 255; } strncpy(buf, msg, nc); buf[nc] = '\0'; error(buf); } spatstat/src/rasterfilter.c0000644000176200001440000000243313406057617015571 0ustar liggesusers/* rasterfilter.c Apply linear filter to a raster image Copyright (C) Adrian Baddeley, Rolf Turner and Ege Rubak 2017 Licence: GPL >= 2 $Revision: 1.6 $ $Date: 2017/11/18 05:14:53 $ */ #include #include #include void raster3filter(nx, ny, a, w, b) int *nx, *ny; /* raster dimensions */ double *a; /* input image */ double *w; /* 3x3 filter coefficients */ double *b; /* output image */ { int Nxcol, Nyrow, Nx1, Ny1; int i, j; double value; Nxcol = *nx; Nyrow = *ny; Nx1 = Nxcol - 1; Ny1 = Nyrow - 1; #define A(I,J) a[(I) + (J) * Nyrow] #define B(I,J) b[(I) + (J) * Nyrow] #define WEIGHT(DI,DJ) w[((DI)+1) + ((DJ)+1)*3] #define FILTER(DI,DJ) WEIGHT(DI,DJ) * A(i+(DI), j+(DJ)) /* loop over pixels */ for(j = 0; j < Nxcol; j++) { R_CheckUserInterrupt(); for(i = 0; i < Nyrow; i++) { value = FILTER(0,0); if(j > 0) value += FILTER(0,-1); if(j < Nx1) value += FILTER(0, 1); if(i > 0) { if(j > 0) value += FILTER(-1,-1); value += FILTER(-1, 0); if(j < Nx1) value += FILTER(-1, 1); } if(i < Ny1) { if(j > 0) value += FILTER(1, -1); value += FILTER(1, 0); if(j < Nx1) value += FILTER(1, 1); } B(i,j) = value; } } } spatstat/src/linequad.h0000644000176200001440000003450113406057617014673 0ustar liggesusers/* linequad.h Template code, #included several times in linequad.c Macros used: FUNNAME function name (unmarked version) FMKNAME function name (marked version) ALEA #defined if grid location should be randomised HUH #defined if debugging is on SWAP swap macro $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void FUNNAME(ns, from, to, nv, xv, yv, eps, ndat, sdat, tdat, wdat, ndum, xdum, ydum, sdum, tdum, wdum, maxscratch) /* A linear network with *ns segments and *nv vertices is specified by the vectors from, to, xv, yv. Data points on the network are specified by *ndat, sdat, tdat. *** Assumed to be sorted in order of 'sdat' ** Dummy points will be placed every 'eps' units along each segment. Output vectors: wdat quadrature weights for the data points wdum quadrature weights for the dummy points xdum, | ydum, | coordinates of dummy points sdum, | tdum | Space must be allocated for sum(ceiling(lengths/eps)) dummy points. */ int *ns; /* number of segments */ int *from, *to; /* endpoints of each segment */ int *nv; /* number of vertices */ double *xv, *yv; /* cartesian coords of vertices */ double *eps; /* desired spacing of dummy points */ int *ndat, *ndum; /* number of data & dummy points */ int *sdat, *sdum; /* segment id (local coordinate) */ double *tdat, *tdum; /* location (local coordinate) */ double *wdat, *wdum; /* quadrature weights */ double *xdum, *ydum; /* spatial coordinates of dummy points */ int *maxscratch; { int Nseg, Ndat, Ndum, Lmax, i, j, k, ll, m, fromi, toi; #ifdef HUH int Nvert; #endif int SegmentForData, nwhole, nentries, npieces, npieces1; double x0, y0, x1, y1, dx, dy; double seglength, ratio, epsilon, rump, epsfrac, rumpfrac, gridstart; double tfirst, tlast, tcurrent, plen, w; int *serial, *count, *pieceid; char *isdata; double *tvalue, *pieceweight; Nseg = *ns; Ndat = *ndat; Ndum = 0; Lmax = *maxscratch; epsilon = *eps; #ifdef HUH Nvert = *nv; Rprintf("Nseg=%d, Nvert=%d, Ndat=d, Lmax = %d\n\n", Nseg, Nvert, Ndat, Lmax); #endif /* allocate scratch space, one for each data/dummy point in current segment */ serial = (int *) R_alloc(Lmax, sizeof(int)); isdata = (char *) R_alloc(Lmax, sizeof(char)); tvalue = (double *) R_alloc(Lmax, sizeof(double)); pieceid = (int *) R_alloc(Lmax, sizeof(int)); /* allocate scratch space, one for each piece of current segment */ count = (int *) R_alloc(Lmax, sizeof(int)); pieceweight = (double *) R_alloc(Lmax, sizeof(double)); /* initialise pointer at start of point pattern Determine which segment contains first point */ k = 0; SegmentForData = (Ndat > 0) ? sdat[0] : -1; #ifdef ALEA GetRNGstate(); #endif /* loop over line segments */ for(i = 0; i < Nseg; i++) { #ifdef HUH Rprintf("Segment %d\n", i); #endif /* endpoints of segment */ fromi = from[i]; toi = to[i]; x0 = xv[fromi]; y0 = yv[fromi]; x1 = xv[toi]; y1 = yv[toi]; dx = x1 - x0; dy = y1 - y0; seglength = sqrt(dx * dx + dy * dy); /* divide segment into pieces of length eps with shorter bits at each end */ ratio = seglength/epsilon; nwhole = (int) floor(ratio); if(nwhole > 2 && ratio - nwhole < 0.5) --nwhole; rump = (seglength - nwhole * epsilon)/2.0; epsfrac = epsilon/seglength; rumpfrac = rump/seglength; /* There are nwhole+2 pieces, with endpoints 0, rumpfrac, rumpfrac+epsfrac, rumpfrac+2*epsfrac, ..., 1-rumpfrac, 1 */ /* Now place dummy points in these pieces */ #ifdef ALEA tfirst = rumpfrac * unif_rand(); #else tfirst = rumpfrac/2.0; #endif #ifdef HUH Rprintf("\tnwhole=%d, epsfrac=%lf, rumpfrac=%lf, tfirst=%lf\n", nwhole, epsfrac, rumpfrac, tfirst); Rprintf("\tsegment length %lf divided into %d pieces\n", seglength, nwhole+2); #endif /* create a new dummy point in each piece */ #ifdef HUH Rprintf("\tMaking left dummy point %d\n", Ndum); #endif tvalue[0] = tfirst; serial[0] = Ndum; isdata[0] = NO; count[0] = 1; pieceid[0] = 0; xdum[Ndum] = x0 + dx * tfirst; ydum[Ndum] = y0 + dy * tfirst; sdum[Ndum] = i; tdum[Ndum] = tfirst; ++Ndum; if(nwhole > 0) { #ifdef HUH Rprintf("\tMaking %d middle dummy points\n", nwhole); #endif #ifdef ALEA gridstart = rumpfrac - unif_rand() * epsfrac; #else gridstart = rumpfrac - epsfrac/2.0; #endif for(j = 1; j <= nwhole; j++) { serial[j] = Ndum; tvalue[j] = tcurrent = gridstart + ((double) j) * epsfrac; isdata[j] = NO; count[j] = 1; pieceid[j] = j; xdum[Ndum] = x0 + dx * tcurrent; ydum[Ndum] = y0 + dy * tcurrent; sdum[Ndum] = i; tdum[Ndum] = tcurrent; ++Ndum; } } j = nwhole + 1; #ifdef HUH Rprintf("\tMaking right dummy point %d\n", Ndum); #endif serial[j] = Ndum; isdata[j] = NO; tvalue[j] = tlast = 1.0 - tfirst; count[j] = 1; pieceid[j] = j; xdum[Ndum] = x0 + dx * tlast; ydum[Ndum] = y0 + dy * tlast; sdum[Ndum] = i; tdum[Ndum] = tlast; ++Ndum; nentries = npieces = nwhole + 2; npieces1 = npieces-1; /* add any data points lying on current segment i */ while(SegmentForData == i) { #ifdef HUH Rprintf("\tData point %d lies on segment %d\n", k, i); #endif serial[nentries] = k; tvalue[nentries] = tcurrent = tdat[k]; isdata[nentries] = YES; /* determine which piece contains the data point */ ll = (int) ceil((tcurrent - rumpfrac)/epsfrac); if(ll < 0) ll = 0; else if(ll >= npieces) ll = npieces1; #ifdef HUH Rprintf("\tData point %d mapped to piece %d\n", k, ll); #endif count[ll]++; pieceid[nentries] = ll; ++nentries; ++k; SegmentForData = (k < Ndat) ? sdat[k] : -1; } /* compute counting weights for each piece of segment */ #ifdef HUH Rprintf("\tcounting weights..\n"); #endif for(ll = 0; ll < npieces; ll++) { plen = (ll == 0 || ll == npieces1)? rump : epsilon; pieceweight[ll] = plen/count[ll]; } /* apply weights to data/dummy points */ #ifdef HUH Rprintf("\tdistributing weights..\n"); #endif for(j = 0; j < nentries; j++) { m = serial[j]; ll = pieceid[j]; if(ll >= 0 && ll < npieces) { w = pieceweight[ll]; if(isdata[j]) { #ifdef HUH Rprintf("\t\tEntry %d: data point %d, piece %d\n", j, m, ll); #endif wdat[m] = w; } else { #ifdef HUH Rprintf("\t\tEntry %d: dummy point %d, piece %d\n", j, m, ll); #endif wdum[m] = w; } } } } *ndum = Ndum; #ifdef ALEA PutRNGstate(); #endif } void FMKNAME(ns, from, to, nv, xv, yv, eps, ntypes, ndat, xdat, ydat, mdat, sdat, tdat, wdat, ndum, xdum, ydum, mdum, sdum, tdum, wdum, maxscratch) /* A linear network with *ns segments and *nv vertices is specified by the vectors from, to, xv, yv. Data points on the network are specified by *ndat, xdat, ydat, mdat, sdat, tdat. *** Assumed to be sorted in order of 'sdat' ** Dummy points will be placed every 'eps' units along each segment and replicated for each possible mark. Each data point location is also replicated by dummy points with each possible mark except the mark of the data point. Output vectors: wdat quadrature weights for the data points wdum quadrature weights for the dummy points xdum, | ydum, | coordinates of dummy points sdum, | tdum | mdum marks for dummy points Space must be allocated for ntypes * sum(ceiling(lengths/eps)) dummy points. */ int *ns; /* number of segments */ int *from, *to; /* endpoints of each segment */ int *nv; /* number of vertices */ double *xv, *yv; /* cartesian coords of vertices */ double *eps; /* desired spacing of dummy points */ int *ndat, *ndum; /* number of data & dummy points */ int *ntypes; /* number of types */ double *xdat, *ydat; /* spatial coordinates of data points */ double *xdum, *ydum; /* spatial coordinates of dummy points */ int *mdat, *mdum; /* mark values */ int *sdat, *sdum; /* segment id (local coordinate) */ double *tdat, *tdum; /* location (local coordinate) */ double *wdat, *wdum; /* quadrature weights */ int *maxscratch; { int Nseg, Ndat, Ndum, Ntypes, Lmax, i, k, ll, m, fromi, toi; #ifdef HUH int Nvert; #endif int SegmentForData, nwhole, nentries, npieces, npieces1, nMpieces; int jpiece, jentry, jpdata, type, mcurrent; double x0, y0, x1, y1, dx, dy, xcurrent, ycurrent; double seglength, ratio, epsilon, rump, epsfrac, rumpfrac, gridstart; double tfirst, tlast, tcurrent, plen, w; int *serial, *count, *mkpieceid; char *isdata; double *tvalue, *countingweight; Nseg = *ns; Ndat = *ndat; Ntypes = *ntypes; Ndum = 0; Lmax = *maxscratch; epsilon = *eps; #ifdef HUH Nvert = *nv; Rprintf("Nseg=%d, Nvert=%d, Ndat=d, Lmax = %d\n\n", Nseg, Nvert, Ndat, Lmax); #endif /* allocate scratch space, one for each data/dummy point in current segment */ serial = (int *) R_alloc(Lmax, sizeof(int)); isdata = (char *) R_alloc(Lmax, sizeof(char)); tvalue = (double *) R_alloc(Lmax, sizeof(double)); mkpieceid = (int *) R_alloc(Lmax, sizeof(int)); /* allocate scratch space, one for each piece of current segment */ count = (int *) R_alloc(Lmax, sizeof(int)); countingweight = (double *) R_alloc(Lmax, sizeof(double)); /* initialise pointer at start of point pattern Determine which segment contains first point */ k = 0; SegmentForData = (Ndat > 0) ? sdat[0] : -1; #ifdef ALEA GetRNGstate(); #endif /* loop over line segments */ for(i = 0; i < Nseg; i++) { #ifdef HUH Rprintf("Segment %d\n", i); #endif /* endpoints of segment */ fromi = from[i]; toi = to[i]; x0 = xv[fromi]; y0 = yv[fromi]; x1 = xv[toi]; y1 = yv[toi]; dx = x1 - x0; dy = y1 - y0; seglength = sqrt(dx * dx + dy * dy); /* divide segment into pieces of length eps with shorter bits at each end */ ratio = seglength/epsilon; nwhole = (int) floor(ratio); if(nwhole > 2 && ratio - nwhole < 0.5) --nwhole; npieces = nwhole + 2; rump = (seglength - nwhole * epsilon)/2.0; epsfrac = epsilon/seglength; rumpfrac = rump/seglength; /* There are nwhole+2 pieces, with endpoints 0, rumpfrac, rumpfrac+epsfrac, rumpfrac+2*epsfrac, ..., 1-rumpfrac, 1 */ /* Now place dummy points in these pieces */ #ifdef ALEA tfirst = rumpfrac * unif_rand(); gridstart = rumpfrac - epsfrac * unif_rand(); #else tfirst = rumpfrac/2.0; gridstart = rumpfrac - epsfrac/2.0; #endif tlast = 1.0 - tfirst; #ifdef HUH Rprintf("\tnwhole=%d, epsfrac=%lf, rumpfrac=%lf, tfirst=%lf\n", nwhole, epsfrac, rumpfrac, tfirst); Rprintf("\tsegment length %lf divided into %d pieces\n", seglength, npieces); #endif /* 'Marked pieces' of segment are numbered in order (piece 0, mark 0), (piece 0, mark 1), ..., (piece 0, mark Ntypes-1), (piece 1, mark 0), ..... mpieceid = type + pieceid * Ntypes */ #ifdef HUH Rprintf("\tMaking %d x %d = %d dummy points\n", npieces, Ntypes, npieces * Ntypes); #endif /* create a new dummy point in each piece */ npieces1 = npieces-1; for(jpiece = 0; jpiece < npieces; jpiece++) { tcurrent = (jpiece == 0) ? tfirst : (jpiece == npieces1) ? tlast : (gridstart + ((double) jpiece) * epsfrac); xcurrent = x0 + dx * tcurrent; ycurrent = y0 + dy * tcurrent; for(type = 0; type < Ntypes; type++) { /* position in list of relevant data/dummy points */ jentry = type + jpiece * Ntypes; /* serial number of marked piece */ ll = jentry; tvalue[jentry] = tcurrent; serial[jentry] = Ndum; isdata[jentry] = NO; mkpieceid[jentry] = ll; count[ll] = 1; xdum[Ndum] = xcurrent; ydum[Ndum] = ycurrent; mdum[Ndum] = type; sdum[Ndum] = i; tdum[Ndum] = tcurrent; ++Ndum; } } nentries = npieces * Ntypes; /* handle any data points lying on current segment i */ while(SegmentForData == i) { #ifdef HUH Rprintf("\tData point %d lies on segment %d\n", k, i); #endif xcurrent = xdat[k]; ycurrent = ydat[k]; tcurrent = tdat[k]; mcurrent = mdat[k]; /* determine which piece contains the data point */ jpdata = (int) ceil((tcurrent - rumpfrac)/epsfrac); if(jpdata < 0) jpdata = 0; else if(jpdata >= npieces) jpdata = npieces1; #ifdef HUH Rprintf("\tData point %d falls in piece %d\n", k, jpdata); #endif /* copy data point, and create dummy points at same location with different marks */ for(type = 0; type < Ntypes; type++) { tvalue[nentries] = tcurrent; ll = type + jpdata * Ntypes; mkpieceid[nentries] = ll; count[ll]++; if(type == mcurrent) { /* data point */ isdata[nentries] = YES; serial[nentries] = k; } else { /* create dummy point */ isdata[nentries] = NO; serial[nentries] = Ndum; xdum[Ndum] = xcurrent; ydum[Ndum] = ycurrent; mdum[Ndum] = type; sdum[Ndum] = i; tdum[Ndum] = tcurrent; ++Ndum; } ++nentries; } ++k; SegmentForData = (k < Ndat) ? sdat[k] : -1; } /* compute counting weights for each piece of segment */ #ifdef HUH Rprintf("\tcounting weights..\n"); #endif for(jpiece = 0; jpiece < npieces; jpiece++) { plen = (jpiece == 0 || jpiece == npieces1)? rump : epsilon; for(type = 0; type < Ntypes; type++) { ll = type + jpiece * Ntypes; countingweight[ll] = plen/count[ll]; } } /* apply weights to data/dummy points */ #ifdef HUH Rprintf("\tdistributing weights..\n"); #endif nMpieces = npieces * Ntypes; for(jentry = 0; jentry < nentries; jentry++) { m = serial[jentry]; ll = mkpieceid[jentry]; if(ll >= 0 && ll < nMpieces) { w = countingweight[ll]; if(isdata[jentry]) { #ifdef HUH Rprintf("\t\tEntry %d: data point %d, piece %d\n", jentry, m, ll); #endif wdat[m] = w; } else { #ifdef HUH Rprintf("\t\tEntry %d: dummy point %d, piece %d\n", jentry, m, ll); #endif wdum[m] = w; } } } } *ndum = Ndum; #ifdef ALEA PutRNGstate(); #endif } spatstat/src/dist2.c0000755000176200001440000000415113406057617014112 0ustar liggesusers# include #include #include "yesno.h" /* dist2: squared distance in torus dist2thresh: faster code for testing whether dist2 < r2 dist2Mthresh: same as dist2thresh, but does not assume the points are within one period of each other. Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ double dist2(u,v,x,y,period) double u, v, x, y; double *period; { double wide, high, dx, dy, dxp, dyp, a, b, d2; /* points are assumed to lie within one period of each other */ wide = period[0]; high = period[1]; dx = u - x; if(dx < 0.0) dx = -dx; dxp = wide - dx; a = (dx < dxp)? dx : dxp; dy = v - y; if(dy < 0.0) dy = -dy; dyp = high - dy; b = (dy < dyp)? dy : dyp; d2 = a * a + b * b; return d2; } double dist2either(u,v,x,y,period) double u, v, x, y; double *period; { if(period[0] < 0.0) return pow(u-x,2) + pow(v-y,2); return(dist2(u,v,x,y,period)); } int dist2thresh(u,v,x,y,period,r2) double u, v, x, y, r2; double *period; { double wide, high, dx, dy, dxp, dyp, a, b, residue; /* points are assumed to lie within one period of each other */ wide = period[0]; high = period[1]; dx = u - x; if(dx < 0.0) dx = -dx; dxp = wide - dx; a = (dx < dxp) ? dx : dxp; residue = r2 - a * a; if(residue <= 0.0) return NO; dy = v - y; if(dy < 0.0) dy = -dy; dyp = high - dy; b = (dy < dyp) ? dy : dyp; if (residue > b * b) return YES; return NO; } int dist2Mthresh(u,v,x,y,period,r2) double u, v, x, y, r2; double *period; { double wide, high, dx, dy, dxp, dyp, a, b, residue; /* points are NOT assumed to lie within one period of each other */ wide = period[0]; high = period[1]; dx = u - x; if(dx < 0.0) dx = -dx; while(dx > wide) dx -= wide; dxp = wide - dx; a = (dx < dxp) ? dx : dxp; residue = r2 - a * a; if(residue < 0.0) return NO; dy = v - y; if(dy < 0.0) dy = -dy; while(dy > high) dy -= high; dyp = high - dy; b = (dy < dyp) ? dy : dyp; if (residue >= b * b) return YES; return NO; } spatstat/src/raster.h0000755000176200001440000000512013406057617014367 0ustar liggesusers/* raster.h Definition of raster structures & operations requires (for floor()) $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ typedef struct Raster{ /* array of data */ char *data; /* coerced to appropriate type */ int nrow; /* dimensions of entire array */ int ncol; int length; int rmin; /* position of valid subrectangle */ int rmax; int cmin; int cmax; /* definition of mapping into continuous space */ double x0; /* position of entry (rmin,cmin) */ double y0; double x1; /* position of entry (rmax,cmax) */ double y1; double xstep; /* x increment for each column step */ double ystep; /* y increment for each row step */ /* xstep = (x1 - x0)/(cmax - cmin) = (x1 - x0)/(number of valid columns - 1) CAN BE POSITIVE OR NEGATIVE */ /* image of valid subrectangle */ double xmin; /* = min{x0,x1} */ double xmax; double ymin; double ymax; } Raster; /* how to clear the data */ #define Clear(ARRAY,TYPE,VALUE) \ { unsigned int i; TYPE *p; \ for(i = 0, p = (TYPE *) (ARRAY).data; i < (ARRAY).length; i++, p++) \ *p = VALUE; } /* how to index a rectangular array stored sequentially in row-major order */ #define Entry(ARRAY,ROW,COL,TYPE) \ ((TYPE *)((ARRAY).data))[COL + (ROW) * ((ARRAY).ncol)] /* test for indices inside subrectangle */ #define Inside(ARRAY,ROW,COL) \ ( (ROW >= (ARRAY).rmin) && (ROW <= (ARRAY).rmax) && \ (COL >= (ARRAY).cmin) && (COL <= (ARRAY).cmax)) /* how to compute the position in R^2 corresponding to a raster entry */ #define Xpos(ARRAY,COL) \ ((ARRAY).x0 + (ARRAY).xstep * (COL - (ARRAY).cmin)) #define Ypos(ARRAY,ROW) \ ((ARRAY).y0 + (ARRAY).ystep * (ROW - (ARRAY).rmin)) #define Distance(X,Y,XX,YY) sqrt((X - XX)* (X - XX) + (Y - YY) * (Y - YY)) #define DistanceTo(X,Y,ARRAY,ROW,COL)\ Distance(X,Y,Xpos(ARRAY,COL),Ypos(ARRAY,ROW)) #define DistanceSquared(X,Y,XX,YY) ((X - XX)* (X - XX) + (Y - YY) * (Y - YY)) #define DistanceToSquared(X,Y,ARRAY,ROW,COL)\ DistanceSquared(X,Y,Xpos(ARRAY,COL),Ypos(ARRAY,ROW)) /* how to map a point (x,y) in R^2 to a raster entry */ /* (x,y) is guaranteed to lie in the rectangle bounded by the images of the entries (r,c), (r+1,c), (r,c+1), (r+1,c+1) where r = RowIndex(..) and c = ColIndex(..). */ #define RowIndex(ARRAY,Y) \ ((ARRAY).rmin + (int) floor(((Y) - (ARRAY).y0)/(ARRAY).ystep)) #define ColIndex(ARRAY,X) \ ((ARRAY).cmin + (int) floor(((X) - (ARRAY).x0)/(ARRAY).xstep)) spatstat/src/nn3DdistX.h0000644000176200001440000000535513406057617014714 0ustar liggesusers/* nn3DdistX.h Code template for nearest-neighbour algorithms for 3D point patterns Input is two point patterns - supports 'nncross' This code is #included multiple times in nn3Ddist.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour EXCLUDE #defined if the two patterns may include common points (which are not to be counted as neighbours) Either or both DIST and WHICH may be defined. THE FOLLOWING CODE ASSUMES THAT BOTH POINT PATTERNS ARE SORTED IN ASCENDING ORDER OF THE z COORDINATE If EXCLUDE is #defined, Code numbers id1, id2 are attached to the patterns X and Y respectively, such that x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j]. $Revision: 1.6 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void FNAME(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge) /* inputs */ int *n1, *n2, *id1, *id2; double *x1, *y1, *z1, *x2, *y2, *z2, *huge; /* outputs */ double *nnd; int *nnwhich; { int npoints1, npoints2, i, j, jwhich, lastjwhich; double d2, d2min, x1i, y1i, z1i, dx, dy, dz, dz2, hu, hu2; #ifdef EXCLUDE int id1i; #endif hu = *huge; hu2 = hu * hu; npoints1 = *n1; npoints2 = *n2; if(npoints1 == 0 || npoints2 == 0) return; lastjwhich = 0; for(i = 0; i < npoints1; i++) { R_CheckUserInterrupt(); d2min = hu2; jwhich = -1; x1i = x1[i]; y1i = y1[i]; z1i = z1[i]; #ifdef EXCLUDE id1i = id1[i]; #endif /* search backward from previous nearest neighbour */ if(lastjwhich > 0) { for(j = lastjwhich - 1; j >= 0; --j) { dz = z2[j] - z1i; dz2 = dz * dz; if(dz2 > d2min) break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[j] != id1i) { #endif dx = x2[j] - x1i; dy = y2[j] - y1i; d2 = dx * dx + dy * dy + dz2; if (d2 < d2min) { d2min = d2; jwhich = j; } #ifdef EXCLUDE } #endif } } /* search forward from previous nearest neighbour */ if(lastjwhich < npoints2) { for(j = lastjwhich; j < npoints2; ++j) { dz = z2[j] - z1i; dz2 = dz * dz; if(dz2 > d2min) break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[j] != id1i) { #endif dx = x2[j] - x1i; dy = y2[j] - y1i; d2 = dx * dx + dy * dy + dz2; if (d2 < d2min) { d2min = d2; jwhich = j; } #ifdef EXCLUDE } #endif } } #ifdef DIST nnd[i] = sqrt(d2min); #endif #ifdef WHICH /* convert to R indexing */ nnwhich[i] = jwhich + 1; #endif lastjwhich = jwhich; } } spatstat/src/lincrossdist.c0000644000176200001440000000473713406057617015614 0ustar liggesusers#include #include #include "chunkloop.h" /* lincrossdist.c Shortest-path distances between pairs of points in linear network $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ lincrossdist Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define DPATH(I,J) dpath[(I) + Nv * (J)] #define ANSWER(I,J) answer[(I) + Np * (J)] #define EUCLID(X,Y,U,V) sqrt(pow((X)-(U),2)+pow((Y)-(V),2)) void lincrossdist(np, xp, yp, /* data points from which distances are measured */ nq, xq, yq, /* data points to which distances are measured */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ psegmap, /* map from data points to segments */ qsegmap, /* map from data points to segments */ /* OUTPUT */ answer /* shortest path distances between points */ ) int *np, *nq, *nv, *ns; int *from, *to, *psegmap, *qsegmap; /* integer vectors (mappings) */ double *xp, *yp, *xq, *yq, *xv, *yv; /* vectors of coordinates */ double *dpath, *answer; /* matrices */ { int Np, Nq, Nv, i, j, maxchunk; int Psegi, Qsegj, nbi1, nbi2, nbj1, nbj2; double xpi, ypi, xqj, yqj; double d, dPiV1, dPiV2, dV1Qj, dV2Qj, d11, d12, d21, d22; Np = *np; Nq = *nq; Nv = *nv; OUTERCHUNKLOOP(i, Np, maxchunk, 1024) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Np, maxchunk, 1024) { xpi = xp[i]; ypi = yp[i]; Psegi = psegmap[i]; nbi1 = from[Psegi]; nbi2 = to[Psegi]; dPiV1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]); dPiV2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]); for(j = 0; j < Nq; j++) { xqj = xq[j]; yqj = yq[j]; Qsegj = qsegmap[j]; if(Psegi == Qsegj) { /* points i and j lie on the same segment; use Euclidean distance */ d = sqrt(pow(xpi - xqj, 2) + pow(ypi - yqj, 2)); } else { /* Shortest path from i to j passes through ends of segments; Calculate shortest of 4 possible paths from i to j */ nbj1 = from[Qsegj]; nbj2 = to[Qsegj]; dV1Qj = EUCLID(xv[nbj1], yv[nbj1], xqj, yqj); dV2Qj = EUCLID(xv[nbj2], yv[nbj2], xqj, yqj); d11 = dPiV1 + DPATH(nbi1,nbj1) + dV1Qj; d12 = dPiV1 + DPATH(nbi1,nbj2) + dV2Qj; d21 = dPiV2 + DPATH(nbi2,nbj1) + dV1Qj; d22 = dPiV2 + DPATH(nbi2,nbj2) + dV2Qj; d = d11; if(d12 < d) d = d12; if(d21 < d) d = d21; if(d22 < d) d = d22; } /* write */ ANSWER(i,j) = d; } } } } spatstat/src/digber.c0000644000176200001440000000237413406057617014323 0ustar liggesusers/* digber.c Diggle-Berman function J used in bandwidth selection J(r) = \int_0^(2r) phi(t, r) dK(t) where K is the K-function and phi(t, r) = 2 r^2 * (acos(y) - y sqrt(1 - y^2)) where y = t/(2r). $Revision: 1.8 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include double sqrt(), acos(); /* r is the vector of distance values, starting from 0, with length nr, equally spaced. dK = diff(K) is the vector of increments of the K-function, with length ndK = nr-1. values of J are computed only up to max(r)/2 nrmax = floor(nr/2). */ void digberJ(r, dK, nr, nrmax, ndK, J) /* inputs */ int *nr, *nrmax, *ndK; double *r, *dK; /* output */ double *J; { int i, j, Ni, NdK; double ri, twori, tj, y, phiy, integral; Ni = *nrmax; NdK = *ndK; J[0] = 0.0; for(i = 1; i < Ni; i++) { ri = r[i]; twori = 2 * ri; integral = 0.0; for(j = 0; j < NdK; j++) { tj = r[j]; y = tj/twori; if(y >= 1.0) break; phiy = acos(y) - y * sqrt(1 - y * y); integral += phiy * dK[j]; } J[i] = 2 * ri * ri * integral; } } spatstat/src/lixel.c0000644000176200001440000000767713554500715014213 0ustar liggesusers#include #include /* lixel.c divide a linear network into shorter segments Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void Clixellate(ns, fromcoarse, tocoarse, fromfine, tofine, nv, xv, yv, svcoarse, tvcoarse, nsplit, np, spcoarse, tpcoarse, spfine, tpfine) /* A linear network with *ns segments and *nv vertices is specified by the vectors from, to, xv, yv. The i-th segment will be subdivided into nsplit[i] subsegments. New data will be added at the end of the vectors 'xv' and 'yv' representing additional vertices in the new network. The point pattern data (*np points with local coordinates sp, tp in the coarse network) will be mapped to the new 'fine' network. Points are sorted by 'spcoarse' value. 'xv', 'yv', 'svcoarse', 'tvcoarse' must each have space for (nv + sum(nsplit-1)) entries. 'fromfine', 'tofine' must have length = sum(nsplit). */ int *ns; /* number of segments (input & output) */ int *fromcoarse, *tocoarse; /* endpoints of each segment (input) */ int *fromfine, *tofine; /* endpoints of each segment (output) */ int *nv; /* number of vertices (input & output) */ double *xv, *yv; /* cartesian coords of vertices (input & output) */ int *svcoarse; /* segment id of new vertex in COARSE network */ double *tvcoarse; /* location coordinate of new vertex on COARSE network */ int *nsplit; /* number of pieces into which each segment should be split */ int *np; /* number of data points */ double *tpcoarse, *tpfine; /* location coordinate */ int *spcoarse, *spfine; /* segment id coordinate */ { int Np, oldNs, oldNv, i, j, k, ll; int oldfromi, oldtoi, newlines, newNv, newNs, SegmentForData; double xstart, xend, ystart, yend, xincr, yincr, tn; Np = *np; newNv = oldNv = *nv; oldNs = *ns; newNs = 0; /* initialise pointer at start of point pattern Determine which segment contains first point */ k = 0; SegmentForData = (Np > 0) ? spcoarse[0] : -1; /* loop over line segments in original network */ for(i = 0; i < oldNs; i++) { newlines = nsplit[i]; oldfromi = fromcoarse[i]; oldtoi = tocoarse[i]; /* local coordinates of endpoints of segment, in ***coarse*** network */ svcoarse[oldfromi] = svcoarse[oldtoi] = i; tvcoarse[oldfromi] = 0.0; tvcoarse[oldtoi] = 1.0; if(newlines == 1) { /* copy existing segment to new segment list */ fromfine[newNs] = oldfromi; tofine[newNs] = oldtoi; /* advance pointer */ ++newNs; } else if(newlines > 1) { /* split segment into 'newlines' pieces */ xstart = xv[oldfromi]; ystart = yv[oldfromi]; xend = xv[oldtoi]; yend = yv[oldtoi]; xincr = (xend-xstart)/newlines; yincr = (yend-ystart)/newlines; for(j = 1; j < newlines; j++) { /* create new vertex, number 'newNv' */ xv[newNv] = xstart + j * xincr; yv[newNv] = ystart + j * yincr; /* local coordinates of new vertex relative to ***coarse*** network */ svcoarse[newNv] = i; tvcoarse[newNv] = ((double) j)/((double) newlines); /* create new segment, number 'newNs', ending at new vertex */ fromfine[newNs] = (j == 1) ? oldfromi : (newNv-1); tofine[newNs] = newNv; /* advance */ ++newNv; ++newNs; } /* create segment from last added vertex to end of old segment */ fromfine[newNs] = newNv-1; tofine[newNs] = oldtoi; ++newNs; } /* handle data points lying on current segment i */ while(SegmentForData == i) { if(newlines == 1) { spfine[k] = spcoarse[k]; tpfine[k] = tpcoarse[k]; } else { tn = tpcoarse[k] * newlines; ll = (int) floor(tn); ll = (ll < 0) ? 0 : (ll >= newlines) ? (newlines - 1): ll; tpfine[k] = tn - ll; spfine[k] = newNs - newlines + ll; } ++k; SegmentForData = (k < Np) ? spcoarse[k] : -1; } } *nv = newNv; *ns = newNs; } spatstat/src/linalg.c0000755000176200001440000001274213433406761014336 0ustar liggesusers/* linalg.c Home made linear algebra Yes, really $Revision: 1.13 $ $Date: 2019/02/21 02:21:17 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Csumouter Cwsumouter Csum2outer Cwsum2outer Cquadform Csumsymouter Cwsumsymouter */ #include #include #include "chunkloop.h" /* ............... matrices ..............................*/ /* ........................sums of outer products ........*/ /* Csumouter computes the sum of outer products of columns of x y = sum[j] (x[,j] %o% x[,j]) */ void Csumouter(x, n, p, y) double *x; /* p by n matrix */ int *n, *p; double *y; /* output matrix p by p, initialised to zero */ { int N, P; register int i, j, k, maxchunk; register double xij, xkj; register double *xcolj; N = *n; P = *p; OUTERCHUNKLOOP(j, N, maxchunk, 2048) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, N, maxchunk, 2048) { xcolj = x + j * P; for(i = 0; i < P; i++) { xij = xcolj[i]; for(k = 0; k < P; k++) { xkj = xcolj[k]; y[k * P + i] += xij * xkj; } } } } } /* Cwsumouter computes the weighted sum of outer products of columns of x y = sum[j] (w[j] * x[,j] %o% x[,j]) */ void Cwsumouter(x, n, p, w, y) double *x; /* p by n matrix */ int *n, *p; double *w; /* weight vector, length n */ double *y; /* output matrix p by p, initialised to zero */ { int N, P; register int i, j, k, maxchunk; register double wj, xij, wjxij, xkj; register double *xcolj; N = *n; P = *p; OUTERCHUNKLOOP(j, N, maxchunk, 2048) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, N, maxchunk, 2048) { wj = w[j]; xcolj = x + j * P; for(i = 0; i < P; i++) { xij = xcolj[i]; wjxij = wj * xij; for(k = 0; k < P; k++) { xkj = xcolj[k]; y[k * P + i] += wjxij * xkj; } } } } } /* Csum2outer computes the sum of outer products of columns of x and y z = sum[j] (x[,j] %o% y[,j]) */ void Csum2outer(x, y, n, px, py, z) double *x, *y; /* matrices (px by n) and (py by n) */ int *n, *px, *py; double *z; /* output matrix px by py, initialised to zero */ { int N, Px, Py; register int i, j, k, maxchunk; register double xij, ykj; register double *xcolj, *ycolj; N = *n; Px = *px; Py = *py; OUTERCHUNKLOOP(j, N, maxchunk, 2048) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, N, maxchunk, 2048) { xcolj = x + j * Px; ycolj = y + j * Py; for(i = 0; i < Px; i++) { xij = xcolj[i]; for(k = 0; k < Py; k++) { ykj = ycolj[k]; z[k * Px + i] += xij * ykj; } } } } } /* Cwsum2outer computes the weighted sum of outer products of columns of x and y z = sum[j] (w[j] * x[,j] %o% y[,j]) */ void Cwsum2outer(x, y, n, px, py, w, z) double *x, *y; /* matrices (px by n) and (py by n) */ int *n, *px, *py; double *w; /* weight vector, length n */ double *z; /* output matrix px by py, initialised to zero */ { int N, Px, Py; register int i, j, k, maxchunk; register double wj, xij, wjxij, ykj; register double *xcolj, *ycolj; N = *n; Px = *px; Py = *py; OUTERCHUNKLOOP(j, N, maxchunk, 2048) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, N, maxchunk, 2048) { wj = w[j]; xcolj = x + j * Px; ycolj = y + j * Py; for(i = 0; i < Px; i++) { xij = xcolj[i]; wjxij = wj * xij; for(k = 0; k < Py; k++) { ykj = ycolj[k]; z[k * Px + i] += wjxij * ykj; } } } } } /* ........................quadratic/bilinear forms ......*/ /* computes the quadratic form values y[j] = x[,j] %*% v %*% t(x[,j]) */ void Cquadform(x, n, p, v, y) double *x; /* p by n matrix */ int *n, *p; double *v; /* p by p matrix */ double *y; /* output vector, length n */ { int N, P; register int i, j, k, maxchunk; register double xij, xkj, vik, yj; register double *xcolj; N = *n; P = *p; OUTERCHUNKLOOP(j, N, maxchunk, 2048) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, N, maxchunk, 2048) { xcolj = x + j * P; yj = 0; for(i = 0; i < P; i++) { xij = xcolj[i]; for(k = 0; k < P; k++) { xkj = xcolj[k]; vik = v[k * P + i]; yj += xij * vik * xkj; } } y[j] = yj; } } } /* computes the bilinear form values z[j] = x[,j] %*% v %*% t(y[,j]) */ void Cbiform(x, y, n, p, v, z) double *x, *y; /* p by n matrices */ int *n, *p; double *v; /* p by p matrix */ double *z; /* output vector, length n */ { int N, P; register int i, j, k, maxchunk; register double xij, vik, ykj, zj; register double *xcolj, *ycolj; N = *n; P = *p; OUTERCHUNKLOOP(j, N, maxchunk, 2048) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, N, maxchunk, 2048) { xcolj = x + j * P; ycolj = y + j * P; zj = 0; for(i = 0; i < P; i++) { xij = xcolj[i]; for(k = 0; k < P; k++) { ykj = ycolj[k]; vik = v[k * P + i]; zj += xij * vik * ykj; } } z[j] = zj; } } } /* ............... 3D arrays ...................... */ #undef FNAME #undef WEIGHTED /* sumsymouter computes the sum of outer products x[,i,j] %o% x[,j,i] over all pairs i, j */ #define FNAME Csumsymouter #include "sumsymouter.h" #undef FNAME /* wsumsymouter computes the weighted sum of outer products w[i,j] * (x[,i,j] %o% x[,j,i]) over all pairs i, j */ #define FNAME Cwsumsymouter #define WEIGHTED #include "sumsymouter.h" #undef FNAME #undef WEIGHTED spatstat/src/Kborder.c0000755000176200001440000000172213406057617014456 0ustar liggesusers#include #include #include /* Kborder.c Efficient computation of border-corrected estimates of K for large datasets KborderI() Estimates K function, returns integer numerator & denominator KborderD() Estimates K function, returns double precision numerator & denominator Kwborder() Estimates Kinhom. Functions require (x,y) data to be sorted in ascending order of x and expect r values to be equally spaced and starting at zero $Revision: 1.5 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef WEIGHTED #define FNAME KborderI #define OUTTYPE int #include "Kborder.h" #undef FNAME #undef OUTTYPE #define FNAME KborderD #define OUTTYPE double #include "Kborder.h" #undef FNAME #undef OUTTYPE #define FNAME Kwborder #define WEIGHTED #define OUTTYPE double #include "Kborder.h" spatstat/src/Ediggra.c0000755000176200001440000000732013406057617014430 0ustar liggesusers#include #include #include "chunkloop.h" #include "looptest.h" /* Ediggra.c $Revision: 1.8 $ $Date: 2018/12/18 02:43:11 $ C implementation of 'eval' for DiggleGratton interaction (exponentiated) Assumes point patterns are sorted in increasing order of x coordinate Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ double sqrt(); void Ediggra(nnsource, xsource, ysource, idsource, nntarget, xtarget, ytarget, idtarget, ddelta, rrho, values) /* inputs */ int *nnsource, *nntarget; double *xsource, *ysource, *xtarget, *ytarget; int *idsource, *idtarget; double *ddelta, *rrho; /* output */ double *values; { int nsource, ntarget, maxchunk, j, i, ileft, idsourcej; double xsourcej, ysourcej, xleft, dx, dy, dx2, d2; double delta, rho, delta2, rho2, rho2pluseps, rhominusdelta; double product; nsource = *nnsource; ntarget = *nntarget; delta = *ddelta; rho = *rrho; if(nsource == 0 || ntarget == 0) return; rho2 = rho * rho; delta2 = delta * delta; rhominusdelta = rho - delta; rho2pluseps = rho2 + EPSILON(rho2); ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 65536) { product = 1; xsourcej = xsource[j]; ysourcej = ysource[j]; idsourcej = idsource[j]; /* adjust starting point */ xleft = xsourcej - rho; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process until dx > rho (or until product is zero) */ for(i=ileft; i < ntarget; i++) { dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > rho2pluseps) break; if(idtarget[i] != idsourcej) { dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= rho2) { if(d2 <= delta2) { product = 0; break; } else product *= (sqrt(d2) - delta)/rhominusdelta; } } } values[j] = product; } } } /* 'split' version separating hard core terms from others */ void ESdiggra(nnsource, xsource, ysource, idsource, nntarget, xtarget, ytarget, idtarget, ddelta, rrho, positive, hardcore) /* inputs */ int *nnsource, *nntarget; double *xsource, *ysource, *xtarget, *ytarget; int *idsource, *idtarget; double *ddelta, *rrho; /* output */ double *positive; int *hardcore; { int nsource, ntarget, maxchunk, j, i, ileft, idsourcej; double xsourcej, ysourcej, xleft, dx, dy, dx2, d2; double delta, rho, delta2, rho2, rho2pluseps, rhominusdelta; double product; nsource = *nnsource; ntarget = *nntarget; delta = *ddelta; rho = *rrho; if(nsource == 0 || ntarget == 0) return; rho2 = rho * rho; delta2 = delta * delta; rhominusdelta = rho - delta; rho2pluseps = rho2 + EPSILON(rho2); ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 65536) { product = 1; xsourcej = xsource[j]; ysourcej = ysource[j]; idsourcej = idsource[j]; /* adjust starting point */ xleft = xsourcej - rho; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process until dx > rho */ for(i=ileft; i < ntarget; i++) { dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > rho2pluseps) break; if(idtarget[i] != idsourcej) { dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= rho2) { if(d2 <= delta2) { hardcore[j] = 1; } else { product *= (sqrt(d2) - delta)/rhominusdelta; } } } } positive[j] = product; } } } spatstat/src/spasumsymout.h0000644000176200001440000001044013406057617015656 0ustar liggesusers/* spasumsymout.h Function definitions for 'sumsymouter' for sparse matrices/arrays This file is #included in sparselinalg.c several times. Macros used FNAME function name DBG (#ifdef) debug WEIGHTS (#ifdef) use weights $Revision: 1.6 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void FNAME(m, n, lenx, ix, jx, kx, x, flip, #ifdef WEIGHTS lenw, jw, kw, w, #endif y) int *m, *n; /* dimensions of array m * n * n */ int *lenx; /* number of nonzero entries in sparse array x */ int *ix, *jx, *kx; /* indices of entries in sparse array x */ double *x; /* values in sparse array x */ /* NB: ix, jx, kx are assumed to be sorted by order(j,k,i) i.e. in increasing order of j, then k within j, then i within (j,k) */ int *flip; /* reordering of ix, jx, kx, x that would achieve increasing order(k,j,i) */ #ifdef WEIGHTS int *lenw; /* length of jw, kw */ int *jw, *kw; /* indices of entries in sparse matrix w of weights */ /* Assumed sorted by order (j,k) */ double *w; /* values of weights w */ #endif double *y; /* output: full m * m matrix */ { /* Compute the sum of outer(x[,j,k], x[,k,j]) for all j != k */ int M,N,L, i,j,k,ii, l, ll, lstart, lend, t, tstart, tend, r; double xijk, xx; int *it, *jt, *kt; double *xt; #ifdef WEIGHTS int R; double wjk; #endif M = *m; N = *n; L = *lenx; #ifdef WEIGHTS R = *lenw; #endif if(L <= 1 || N <= 1 || M <= 0) return; /* Create space to store array in k-major order*/ it = (int *) R_alloc(L, sizeof(int)); jt = (int *) R_alloc(L, sizeof(int)); kt = (int *) R_alloc(L, sizeof(int)); xt = (double *) R_alloc(L, sizeof(double)); /* copy reordered array */ #ifdef DBG Rprintf("---------- Reordered: -------------------\n"); #endif for(l = 0; l < L; l++) { ll = flip[l]; it[l] = ix[ll]; jt[l] = jx[ll]; kt[l] = kx[ll]; xt[l] = x[ll]; #ifdef DBG Rprintf("%d \t [%d, %d, %d] = %lf\n", l, it[l], jt[l], kt[l], xt[l]); #endif } /* Now process array */ lstart = tstart = r = 0; lend = tend = -1; /* to keep compiler happy */ while(lstart < L && tstart < L) { /* Consider a new entry x[,j,k] */ j = jx[lstart]; k = kx[lstart]; #ifdef DBG Rprintf("Entry %d: [, %d, %d]\n", lstart, j, k); #endif #ifdef WEIGHTS /* Find weight w[j,k] */ while(r < R && ((jw[r] < j) || ((jw[r] == j) && (kw[r] < k)))) ++r; if(r < R && jw[r] == j && kw[r] == k) { /* weight w[j,k] is present */ wjk = w[r]; #endif /* Find all entries in x with the same j,k */ for(lend = lstart+1; lend < L && jx[lend] == j && kx[lend] == k; ++lend) ; --lend; #ifdef DBG Rprintf("\t lstart=%d, lend=%d\n", lstart, lend); #endif /* Find corresponding entries in transpose (k'=j, j'=k) */ /* search forward to find start of run */ while(tstart < L && ((kt[tstart] < j) || (kt[tstart] == j && jt[tstart] < k))) ++tstart; #ifdef DBG Rprintf("\t tstart=%d\n", tstart); Rprintf("\t kt[tstart]=%d, jt[tstart]=%d\n", kt[tstart], jt[tstart]); #endif if(tstart < L && kt[tstart] == j && jt[tstart] == k) { /* Both x[,j,k] and x[,k,j] are present so a contribution will occur */ /* seek end of run */ for(tend = tstart+1; tend < L && kt[tend] == j && jt[tend] == k; ++tend) ; --tend; #ifdef DBG Rprintf("\t tend=%d\n", tend); #endif /* Form products */ for(l = lstart; l <= lend; l++) { i = ix[l]; xijk = x[l]; #ifdef DBG Rprintf("Entry %d: [%d, %d, %d] = %lf\n", l, i, j, k, xijk); #endif for(t = tstart; t <= tend; t++) { ii = it[t]; xx = xijk * xt[t]; #ifdef WEIGHTS xx *= wjk; #endif /* increment result at [i, ii] and [ii, i]*/ y[i + M * ii] += xx; /* y[ii + M * i] += xx; */ #ifdef DBG Rprintf("-- matches entry %d: [%d, %d, %d] = %lf\n", t, ii, k, j, xt[t]); Rprintf("++ %lf\n", xx); #endif } } } #ifdef WEIGHTS } #endif lstart = ((lend > lstart) ? lend : lstart) + 1; tstart = ((tend > tstart) ? tend : tstart) + 1; } } spatstat/src/areaint.c0000755000176200001440000001641513406057617014516 0ustar liggesusers#include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity function for an area-interaction process: cif = eta^(1-B) where B = (uncovered area)/(pi r^2) Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define NGRID 16 /* To explore serious bug, #define BADBUG */ #undef BADBUG /* Format for storage of parameters and precomputed/auxiliary data */ typedef struct AreaInt { /* model parameters */ double eta; double r; /* transformations of the parameters */ double r2; double range2; double logeta; int hard; /* periodic distance */ double *period; int per; /* grid counting */ double dx; double xgrid0; int *my; int kdisc; /* scratch space for saving list of neighbours */ int *neighbour; } AreaInt; /* initialiser function */ Cdata *areaintInit(state, model, algo) State state; Model model; Algor algo; { double r, dx, dy, x0; int i, my, kdisc; AreaInt *areaint; /* create storage */ areaint = (AreaInt *) R_alloc(1, sizeof(AreaInt)); /* Interpret model parameters*/ areaint->eta = model.ipar[0]; areaint->r = r = model.ipar[1]; #ifdef BADBUG Rprintf("r = %lf\n", r); #endif areaint->r2 = r * r; areaint->range2 = 4 * r * r; /* square of interaction distance */ /* is the model numerically equivalent to hard core ? */ areaint->hard = (areaint->eta == 0.0); areaint->logeta = (areaint->hard) ? log(DOUBLE_XMIN) : log(areaint->eta); #ifdef BADBUG if(areaint->hard) Rprintf("Hard core recognised\n"); #endif /* periodic boundary conditions? */ areaint->period = model.period; areaint->per = (model.period[0] > 0.0); #ifdef BADBUG if(areaint->per) { Rprintf("*** periodic boundary conditions ***\n"); Rprintf("period = %lf, %lf\n", model.period[0], model.period[1]); } #endif /* grid counting */ dx = dy = areaint->dx = (2 * r)/NGRID; #ifdef BADBUG Rprintf("areaint->dx = %lf\n", areaint->dx); #endif areaint->xgrid0 = -r + dx/2; areaint->my = (int *) R_alloc((long) NGRID, sizeof(int)); kdisc = 0; for(i = 0; i < NGRID; i++) { x0 = areaint->xgrid0 + i * dx; my = floor(sqrt(r * r - x0 * x0)/dy); my = (my < 0) ? 0 : my; areaint->my[i] = my; #ifdef BADBUG Rprintf("\tmy[%ld] = %ld\n", i, my); #endif kdisc += 2 * my + 1; } areaint->kdisc = kdisc; #ifdef BADBUG Rprintf("areaint->kdisc = %ld\n", areaint->kdisc); #endif /* allocate space for neighbour indices */ areaint->neighbour = (int *) R_alloc((long) state.npmax, sizeof(int)); return((Cdata *) areaint); } #ifdef BADBUG void fexitc(); #endif /* conditional intensity evaluator */ double areaintCif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *period, *x, *y; double u, v; double r2, dx, dy, a, range2; double xgrid, ygrid, xgrid0, covfrac, cifval; int kount, kdisc, kx, my, ky; int *neighbour; int nn, k; AreaInt *areaint; areaint = (AreaInt *) cdata; r2 = areaint->r2; range2 = areaint->range2; /* square of interaction distance */ dy = dx = areaint->dx; kdisc = areaint->kdisc; /* pointers */ period = areaint->period; neighbour = areaint->neighbour; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return ((double) 1.0); if(!areaint->per) { /* .......... Euclidean distance .................... First identify which data points are neighbours of (u,v) */ nn = 0; ixp1 = ix + 1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(ix > 0) { for(j=0; j < ix; j++) { a = range2 - pow(u - x[j], 2); if(a > 0.) { a -= pow(v - y[j], 2); if(a > 0.) { /* point j is a neighbour of (u,v) */ neighbour[nn] = j; ++nn; } } } } if(ixp1 < npts) { for(j=ixp1; j < npts; j++) { a = range2 - pow(u - x[j], 2); if(a > 0.) { a -= pow(v - y[j], 2); if(a > 0.) { /* point j is a neighbour of (u,v) */ neighbour[nn] = j; ++nn; } } } } if(nn == 0) { /* no neighbours; no interaction */ cifval = 1.0; return cifval; } else if(areaint->hard) { /* neighbours forbidden if it's a hard core process */ cifval = 0.0; return cifval; } else { /* scan a grid of points centred at (u,v) */ kount = 0; xgrid0 = u + areaint->xgrid0; for(kx=0; kxmy[kx]; for(ky=(-my); ky<=my; ky++) { ygrid = v + ky * dy; /* Grid point (xgrid,ygrid) is inside disc of radius r centred at (u,v) Loop through all neighbouring data points to determine whether the grid point is covered by another disc */ if(nn > 0) { for(k=0; k < nn; k++) { j = neighbour[k]; a = r2 - pow(xgrid - x[j], 2); if(a > 0) { a -= pow(ygrid - y[j], 2); if(a > 0) { /* point j covers grid point */ ++kount; break; } } } } /* finished consideration of grid point (xgrid, ygrid) */ } } } } else { /* ............. periodic distance ...................... First identify which data points are neighbours of (u,v) */ nn = 0; ixp1 = ix + 1; if(ix > 0) { for(j=0; j < ix; j++) { if(dist2thresh(u,v,x[j],y[j],period,range2)) { /* point j is a neighbour of (u,v) */ neighbour[nn] = j; ++nn; } } } if(ixp1 < npts) { for(j=ixp1; jhard) { /* neighbours forbidden if it's a hard core process */ cifval = 0.0; return cifval; } else { /* scan a grid of points centred at (u,v) */ kount = 0; xgrid0 = u + areaint->xgrid0; for(kx=0; kxmy[kx]; for(ky=(-my); ky<=my; ky++) { ygrid = v + ky * dy; /* Grid point (xgrid,ygrid) is inside disc of radius r centred at (u,v) Loop through all neighbouring data points to determine whether the grid point is covered by another disc */ for(k=0; k < nn; k++) { j = neighbour[k]; if(dist2Mthresh(xgrid,ygrid,x[j],y[j],period,r2)) { /* point j covers grid point */ ++kount; break; } } /* finished considering grid point (xgrid,ygrid) */ } } } } /* `kdisc' is the number of grid points in the disc `kount' is the number of COVERED grid points in the disc */ /* Hard core case has been handled. */ /* Usual calculation: covered area fraction */ covfrac = ((double) kount)/((double) kdisc); cifval = exp(areaint->logeta * covfrac); #ifdef BADBUG if(!R_FINITE(cifval)) { Rprintf("Non-finite CIF value\n"); Rprintf("kount=%ld, kdisc=%ld, covfrac=%lf, areaint->logeta=%lf\n", kount, kdisc, covfrac, areaint->logeta); Rprintf("u=%lf, v=%lf\n", u, v); fexitc("Non-finite CIF"); } #endif return cifval; } Cifns AreaIntCifns = { &areaintInit, &areaintCif, (updafunptr) NULL, NO}; spatstat/src/functable.h0000755000176200001440000000310213406057617015030 0ustar liggesusers/* $Revision: 1.1 $ $Date: 2009/11/04 23:54:15 $ Definitions of C structures for spatial statistics function estimates. Usually the estimates are of the form f^(x) = a^(x)/b^(x); we store f^ and also a^ and b^ to cater for applications with replicated data. # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ typedef struct Ftable { /* double precision function table */ double t0; double t1; int n; /* number of entries */ double *f; double *num; /* f[i] = num[i]/denom[i] */ double *denom; } Ftable; typedef struct Itable { /* integer count table e.g for histograms */ double t0; double t1; int n; int *num; int *denom; /* usually p[i] = num[i]/denom[i] */ } Itable; typedef struct H4table { /* Four histograms, for censored data */ double t0; double t1; int n; int *obs; /* observed lifetimes: o_i = min(t_i, c_i) */ int *nco; /* uncensored lifetimes: o_i for which t_i <= c_i */ int *cen; /* censoring times: c_i */ int *ncc; /* censor times of uncensored data: c_i for which t_i <= c_i */ int upperobs; /* number of o_i that exceed t1 */ int uppercen; /* number of c_i that exceed t1 */ } H4table; spatstat/src/nndistX.h0000644000176200001440000000606513406057617014524 0ustar liggesusers #if (1 == 0) /* nndistX.h Code template for C functions supporting nncross THE FOLLOWING CODE ASSUMES THAT LISTS ARE SORTED IN ASCENDING ORDER OF y COORDINATE This code is #included multiple times in nndistance.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour EXCLUDE #defined if exclusion mechanism is used Either or both DIST and WHICH may be defined. When EXCLUDE is defined, code numbers id1, id2 are attached to the patterns X and Y respectively, such that x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j]. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2012 Licence: GPL >= 2 $Revision: 1.5 $ $Date: 2013/09/18 04:49:18 $ */ #endif void FNAME(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge) /* inputs */ int *n1, *n2; double *x1, *y1, *x2, *y2, *huge; int *id1, *id2; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { int npoints1, npoints2, maxchunk, i, jleft, jright, jwhich, lastjwhich; double d2, d2min, x1i, y1i, dx, dy, dy2, hu, hu2; #ifdef EXCLUDE int id1i; #endif hu = *huge; hu2 = hu * hu; npoints1 = *n1; npoints2 = *n2; if(npoints1 == 0 || npoints2 == 0) return; lastjwhich = 0; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < npoints1) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > npoints1) maxchunk = npoints1; for(; i < maxchunk; i++) { d2min = hu2; jwhich = -1; x1i = x1[i]; y1i = y1[i]; #ifdef EXCLUDE id1i = id1[i]; #endif if(lastjwhich < npoints2) { /* search forward from previous nearest neighbour */ for(jright = lastjwhich; jright < npoints2; ++jright) { dy = y2[jright] - y1i; dy2 = dy * dy; if(dy2 > d2min) /* note that dy2 >= d2min could break too early */ break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[jright] != id1i) { #endif dx = x2[jright] - x1i; d2 = dx * dx + dy2; if (d2 < d2min) { d2min = d2; jwhich = jright; } #ifdef EXCLUDE } #endif } /* end forward search */ } if(lastjwhich > 0) { /* search backward from previous nearest neighbour */ for(jleft = lastjwhich - 1; jleft >= 0; --jleft) { dy = y1i - y2[jleft]; dy2 = dy * dy; if(dy2 > d2min) /* note that dy2 >= d2min could break too early */ break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[jleft] != id1i) { #endif dx = x2[jleft] - x1i; d2 = dx * dx + dy2; if (d2 < d2min) { d2min = d2; jwhich = jleft; } #ifdef EXCLUDE } #endif } /* end backward search */ } /* commit values */ #ifdef DIST nnd[i] = sqrt(d2min); #endif #ifdef WHICH nnwhich[i] = jwhich + 1; /* R indexing */ #endif lastjwhich = jwhich; } } } spatstat/src/tabnum.c0000644000176200001440000000256513441676776014373 0ustar liggesusers/* tabnum.c table(x) or tapply(x, w, sum) where x is numeric and we are given the sorted unique values $Revision: 1.4 $ $Date: 2019/03/12 09:34:33 $ */ #include #include #include "chunkloop.h" void tabnum(nx, x, nv, v, z) int *nx; double *x; /* values (sorted) */ int *nv; double *v; /* unique values (sorted) */ double *z; /* output */ { int i, j, Nx, Nv, maxchunk; double xi; Nx = *nx; Nv = *nv; j = 0; OUTERCHUNKLOOP(i, Nx, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Nx, maxchunk, 16384) { xi = x[i]; /* Find the smallest v[j] greater than or equal to x[i] */ for( ; j < Nv && xi > v[j]; j++) ; /* increment */ if(j < Nv) z[j] += 1.0; } } } void tabsumweight(nx, x, w, nv, v, z) int *nx; double *x; /* values */ double *w; /* weights */ int *nv; double *v; /* unique values (sorted) */ double *z; /* output */ { int i, j, Nx, Nv, maxchunk; double xi; Nx = *nx; Nv = *nv; j = 0; OUTERCHUNKLOOP(i, Nx, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Nx, maxchunk, 16384) { xi = x[i]; /* Find the smallest v[j] greater than or equal to x[i] */ for(; j < Nv && xi > v[j]; j++) ; /* add weight */ if(j < Nv) z[j] += w[i]; } } } spatstat/src/mhv3.h0000644000176200001440000000060013406057617013737 0ustar liggesusers/* mhv3.h tracking or not Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef MH_TRACKING if(tracking) { /* saving transition history */ #define MH_TRACKING YES #include "mhv4.h" #undef MH_TRACKING } else { /* not saving transition history */ #define MH_TRACKING NO #include "mhv4.h" #undef MH_TRACKING } spatstat/src/seg2pix.c0000755000176200001440000001452513406057617014454 0ustar liggesusers#include #include #include #include #include "chunkloop.h" #undef DEBUG /* seg2pix.c Discretise line segment on pixel grid seg2pixI pixel value is indicator = 1 if any line crosses pixel seg2pixN pixel value is (weighted) number of lines crossing pixel seg2pixL pixel value is total (weighted) length of lines inside pixel (rescale R data so that pixels are integer) pixels numbered 0, ..., nx-1 and 0, ..., ny-1 with boundaries at x=0, x=nx, y=0, y=ny. Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define V(I,J) out[(I) + (J) * (Ny)] int clamp(k, n0, n1) int k, n0, n1; { int m; m = k; if(m < n0) m = n0; if(m > n1) m = n1; return(m); } /* function 'seg2pixI' returns indicator = 1 if pixel is hit by any segment */ #define FNAME seg2pixI #undef SUMUP #include "seg2pix.h" #undef FNAME /* function 'seg2pixN' returns (weighted) number of segments hitting pixel */ #define FNAME seg2pixN #define SUMUP #include "seg2pix.h" #undef FNAME #undef SUMUP /* the other one is anomalous... */ void seg2pixL(ns,x0,y0,x1,y1,weights,pixwidth,pixheight,nx,ny,out) int *ns; double *x0,*y0,*x1,*y1,*weights; /* segment coordinates and weights */ double *pixwidth, *pixheight; /* original pixel dimensions */ int *nx, *ny; double *out; /* output matrix */ { int Ns, Nx, Ny, i, j, k, m, mmin, mmax, maxchunk; double x0i, x1i, y0i, y1i; double leni; double xleft, yleft, xright, yright, slope, scalesecant; double xlow, xhigh, ylow, yhigh, invslope, scalecosecant; double xstart, ystart, xfinish, yfinish; double xxx0, xxx1, yyy0, yyy1; int mleft, mright, kstart, kfinish, kmin, kmax; double pwidth, pheight, pwidth2, pheight2; double wti; Ns = *ns; Nx = *nx; Ny = *ny; /* one scaled x unit = 'pwidth' original x units one scaled y unit = 'pheight' original y units */ pwidth = *pixwidth; pheight = *pixheight; pwidth2 = pwidth * pwidth; pheight2 = pheight * pheight; /* zero the matrix */ for(k = 0; k < Ny - 1; k++) for(j = 0; j < Nx - 1; j++) V(k, j) = 0; OUTERCHUNKLOOP(i, Ns, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Ns, maxchunk, 8196) { x0i = x0[i]; y0i = y0[i]; x1i = x1[i]; y1i = y1[i]; wti = weights[i]; leni = sqrt(pwidth2 * pow(x1i - x0i, 2) + pheight2 * pow(y1i-y0i, 2)); #ifdef DEBUG Rprintf("(%lf, %lf) to (%lf, %lf), length %lf\n", x0i, y0i, x1i, y1i, leni); #endif if(leni < 0.001) { /* tiny segment */ #ifdef DEBUG Rprintf("tiny\n"); #endif k = clamp((int) floor(x0i), 0, Nx-1); j = clamp((int) floor(y0i), 0, Ny-1); V(j,k) += wti * leni; } else if(floor(x1i) == floor(x0i) && floor(y1i) == floor(y0i)) { /* contained in one cell */ #ifdef DEBUG Rprintf("contained in one cell\n"); #endif k = clamp((int) floor(x0i), 0, Nx-1); j = clamp((int) floor(y0i), 0, Ny-1); V(j,k) += wti * leni; } else if(floor(y1i) == floor(y0i)) { /* horizontal */ #ifdef DEBUG Rprintf("horizontal\n"); #endif j = clamp((int) floor(y1i), 0, Ny-1); if(x1i > x0i) { xleft = x0i; yleft = y0i; xright = x1i; yright = y1i; } else { xleft = x1i; yleft = y1i; xright = x0i; yright = y0i; } mmin = clamp((int) floor(xleft), 0, Nx-1); mmax = clamp((int) floor(xright), 0, Nx-1); slope = (yright - yleft)/(xright - xleft); scalesecant = wti * sqrt(pwidth2 + slope * slope * pheight2); /* For this slope, one scaled x unit means 'pwidth' original x units and slope * pheight original y units i.e. line length sqrt(pwidth^2 + slope^2 * pheight^2) */ for(k = mmin; k <= mmax; k++) { xstart = (k == mmin) ? xleft : k; xfinish = (k == mmax) ? xright : (k+1); V(j,k) += (xfinish - xstart) * scalesecant; } } else if(floor(x1i) == floor(x0i)) { /* vertical */ #ifdef DEBUG Rprintf("vertical\n"); #endif k = clamp((int) floor(x1i), 0, Nx-1); if(y1i > y0i) { xlow = x0i; ylow = y0i; xhigh = x1i; yhigh = y1i; } else { xlow = x1i; ylow = y1i; xhigh = x0i; yhigh = y0i; } mmin = clamp((int) floor(ylow), 0, Ny-1); mmax = clamp((int) floor(yhigh), 0, Ny-1); invslope = (xhigh - xlow)/(yhigh - ylow); scalecosecant = wti * sqrt(pheight2 + invslope * invslope * pwidth2); #ifdef DEBUG Rprintf("i = %d\n", i); Rprintf("inverse slope = %lf\n", invslope); Rprintf("scaled cosecant = %lf\n", scalecosecant); #endif /* For this slope, one scaled y unit means 'pheight' original y units and invslope * pwidth original x units i.e. line length sqrt(pheight^2 + invslope^2 * pwidth^2) */ for(j = mmin; j <= mmax; j++) { ystart = (j == mmin)? ylow : j; yfinish = (j == mmax)? yhigh : (j+1); V(j,k) += (yfinish - ystart) * scalecosecant; } } else { /* general case */ #ifdef DEBUG Rprintf("general\n"); #endif if(x1i > x0i) { xleft = x0i; yleft = y0i; xright = x1i; yright = y1i; } else { xleft = x1i; yleft = y1i; xright = x0i; yright = y0i; } slope = (yright - yleft)/(xright - xleft); mleft = clamp((int) floor(xleft), 0, Nx-1); mright = clamp((int) floor(xright), 0, Nx-1); #ifdef DEBUG Rprintf("column range [%d, %d]\n", mleft, mright); #endif /* treat each vertical slice */ for(m = mleft; m <= mright; m++) { if(m == mleft) { xstart = xleft; ystart = yleft; } else { xstart = m; ystart = yleft + slope * (xstart - xleft); } if(m == mright) { xfinish = xright; yfinish = yright; } else { xfinish = m+1; yfinish = yleft + slope * (xfinish - xleft); } kstart = clamp((int) floor(ystart), 0, Ny-1); kfinish = clamp((int) floor(yfinish), 0, Ny-1); if(ystart < yfinish) { kmin = kstart; kmax = kfinish; ylow = ystart; yhigh = yfinish; } else { kmin = kfinish; kmax = kstart; ylow = yfinish; yhigh = ystart; } #ifdef DEBUG Rprintf("column %d: rows [%d, %d]\n", m, kmin, kmax); #endif for(k = kmin; k <= kmax; k++) { yyy0 = (k == kmin) ? ylow : k; yyy1 = (k == kmax) ? yhigh : (k+1); xxx0 = xstart + (yyy0 - ystart)/slope; xxx1 = xstart + (yyy1 - ystart)/slope; V(k, m) += wti * sqrt(pow(yyy1 - yyy0, 2) * pheight2 + pow(xxx1 - xxx0, 2) * pwidth2); } } } } } #ifdef DEBUG Rprintf("done.\n"); #endif } spatstat/src/mhv2.h0000644000176200001440000000056313406057617013746 0ustar liggesusers/* mhv2.h single interaction or hybrid Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef MH_SINGLE if(Ncif == 1) { /* single interaction */ #define MH_SINGLE YES #include "mhv3.h" #undef MH_SINGLE } else { /* hybrid interaction */ #define MH_SINGLE NO #include "mhv3.h" #undef MH_SINGLE } spatstat/src/strauss.c0000755000176200001440000000474313115271120014560 0ustar liggesusers#include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Strauss process */ /* Format for storage of parameters and precomputed/auxiliary data */ typedef struct Strauss { double gamma; double r; double loggamma; double r2; double *period; int hard; int per; } Strauss; /* initialiser function */ Cdata *straussinit(state, model, algo) State state; Model model; Algor algo; { /* create storage for model parameters */ Strauss *strauss; strauss = (Strauss *) R_alloc(1, sizeof(Strauss)); /* Interpret model parameters*/ strauss->gamma = model.ipar[0]; strauss->r = model.ipar[1]; /* No longer passed as r^2 */ strauss->r2 = strauss->r * strauss->r; strauss->period = model.period; #ifdef MHDEBUG Rprintf("Initialising Strauss gamma=%lf, r=%lf\n", strauss->gamma, strauss->r); #endif /* is the model numerically equivalent to hard core ? */ strauss->hard = (strauss->gamma < DOUBLE_EPS); strauss->loggamma = (strauss->hard) ? 0 : log(strauss->gamma); /* periodic boundary conditions? */ strauss->per = (model.period[0] > 0.0); return((Cdata *) strauss); } /* conditional intensity evaluator */ double strausscif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, kount, ix, ixp1, j; double *x, *y; double u, v; double r2, cifval; Strauss *strauss; DECLARE_CLOSE_VARS; strauss = (Strauss *) cdata; r2 = strauss->r2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return((double) 1.0); kount = 0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(strauss->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC(u,v,x[j],y[j],strauss->period, r2)) ++kount; } } if(ixp1 < npts) { for(j=ixp1; jperiod, r2)) ++kount; } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE(u,v,x[j],y[j], r2)) ++kount; } } if(ixp1 < npts) { for(j=ixp1; jhard) { if(kount > 0) cifval = 0.0; else cifval = 1.0; } else cifval = exp((strauss->loggamma) * kount); return cifval; } Cifns StraussCifns = { &straussinit, &strausscif, (updafunptr) NULL, NO}; spatstat/src/PerfectDiggleGratton.h0000644000176200001440000001342113406057617017132 0ustar liggesusers // ........................... Diggle-Gratton process .......................... // $Revision: 1.5 $ $Date: 2012/03/10 11:22:56 $ class DiggleGrattonProcess : public PointProcess { public: double beta, delta, rho, kappa, rhominusdelta, deltasquared, rhosquared; DiggleGrattonProcess(double xmin, double xmax, double ymin, double ymax, double b, double d, double r, double k); ~DiggleGrattonProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); }; DiggleGrattonProcess::DiggleGrattonProcess(double xmin, double xmax, double ymin, double ymax, double b, double d, double r, double k) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; delta = d; rho = r; kappa = k; deltasquared = delta * delta; rhosquared = rho * rho; rhominusdelta = rho - delta; InteractionRange = rho; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double DiggleGrattonProcess::Interaction(double dsquared) { double rtn, dist, t; rtn = 1; if(dsquared < rhosquared) { if(dsquared < deltasquared) { rtn = 0; } else { dist = sqrt(dsquared); t = (dist - delta)/rhominusdelta; rtn = pow(t, kappa); } } return(rtn); } void DiggleGrattonProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void DiggleGrattonProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating DiggleGrattonProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating DiggleGrattonProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating DiggleGrattonProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } // ........................... Interface to R .......................... extern "C" { SEXP PerfectDiggleGratton(SEXP beta, SEXP delta, SEXP rho, SEXP kappa, SEXP xrange, SEXP yrange) { // input parameters double Beta, Delta, Rho, Kappa, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int StartTime, EndTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(delta = AS_NUMERIC(delta)); PROTECT(rho = AS_NUMERIC(rho)); PROTECT(kappa = AS_NUMERIC(kappa)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 6 protected objects // extract arguments Beta = *(NUMERIC_POINTER(beta)); Delta = *(NUMERIC_POINTER(delta)); Rho = *(NUMERIC_POINTER(rho)); Kappa = *(NUMERIC_POINTER(kappa)); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ Rho); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ Rho); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ Rho); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ Rho); if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1; #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise DiggleGratton point process DiggleGrattonProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax,Beta,Delta,Rho,Kappa); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); // pack up into output list PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); // return UNPROTECT(10); // 6 arguments plus xout, yout, nout, out return(out); } } spatstat/src/linSnncross.h0000644000176200001440000000661313406057617015407 0ustar liggesusers/* linSnncross.h Function body definitions with macros Sparse representation of network $Revision: 1.5 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Macros used: FNAME name of function WHICH whether 'nnwhich' is required HUH debugging ! Data points must be ordered by segment index ! */ void FNAME(np, sp, tp, /* data points 'from' (ordered by sp) */ nq, sq, tq, /* data points 'to' (ordered by sq) */ nv, /* number of network vertices */ ns, from, to, /* segments */ seglen, /* segment lengths */ huge, /* value taken as infinity */ tol, /* tolerance for updating distances */ /* OUTPUT */ #ifdef WHICH nndist, /* nearest neighbour distance for each point */ nnwhich /* identifies nearest neighbour */ #else nndist /* nearest neighbour distance for each point */ #endif ) int *np, *nq, *nv, *ns; int *from, *to, *sp, *sq; /* integer vectors (mappings) */ double *tp, *tq; /* fractional location coordinates */ double *huge, *tol; double *seglen; double *nndist; /* nearest neighbour distance for each point */ #ifdef WHICH int *nnwhich; /* identifies nearest neighbour */ #endif { int Np, Nq, Nv, i, j, ivleft, ivright, jfirst, jlast, k; double d, hugevalue, slen, tpi; double *dminvert; /* min dist from each vertex */ #ifdef WHICH int *whichvert; /* which min from each vertex */ #endif Np = *np; Nq = *nq; Nv = *nv; hugevalue = *huge; /* First compute min distance to target set from each vertex */ dminvert = (double *) R_alloc(Nv, sizeof(double)); #ifdef WHICH whichvert = (int *) R_alloc(Nv, sizeof(int)); Clinvwhichdist(nq, sq, tq, nv, ns, from, to, seglen, huge, tol, dminvert, whichvert); #else Clinvdist(nq, sq, tq, nv, ns, from, to, seglen, huge, tol, dminvert); #endif #ifdef HUH Rprintf("Initialise answer\n"); #endif /* initialise nn distances from source points */ for(i = 0; i < Np; i++) { nndist[i] = hugevalue; #ifdef WHICH nnwhich[i] = -1; #endif } /* run through all source points */ #ifdef HUH Rprintf("Run through source points\n"); #endif jfirst = 0; for(i = 0; i < Np; i++) { tpi = tp[i]; k = sp[i]; /* segment containing this point */ slen = seglen[k]; ivleft = from[k]; ivright = to[k]; #ifdef HUH Rprintf("Source point %d lies on segment %d = [%d,%d]\n", i, k, ivleft, ivright); #endif d = slen * tpi + dminvert[ivleft]; if(nndist[i] > d) { #ifdef HUH Rprintf("\tMapping to left endpoint %d, distance %lf\n", ivleft, d); #endif nndist[i] = d; #ifdef WHICH nnwhich[i] = whichvert[ivleft]; #endif } d = slen * (1.0 - tpi) + dminvert[ivright]; if(nndist[i] > d) { #ifdef HUH Rprintf("\tMapping to right endpoint %d, distance %lf\n", ivright, d); #endif nndist[i] = d; #ifdef WHICH nnwhich[i] = whichvert[ivright]; #endif } /* find any target points in this segment */ while(jfirst < Nq && sq[jfirst] < k) jfirst++; jlast = jfirst; while(jlast < Nq && sq[jlast] == k) jlast++; --jlast; /* if there are no such points, then jlast < jfirst */ if(jfirst <= jlast) { for(j = jfirst; j <= jlast; j++) { d = slen * fabs(tq[j] - tpi); if(nndist[i] > d) { nndist[i] = d; #ifdef WHICH nnwhich[i] = j; #endif } } } } } spatstat/src/knndist.h0000644000176200001440000000744313406057617014550 0ustar liggesusers/* knndist.h Code template for C functions supporting knndist and knnwhich THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER This code is #included multiple times in knndistance.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour Either or both DIST and WHICH may be defined. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2012 Licence: GPL >= 2 $Revision: 1.3 $ $Date: 2013/05/27 02:09:10 $ */ void FNAME(n, kmax, x, y, #ifdef DIST nnd, #endif #ifdef WHICH nnwhich, #endif huge) /* inputs */ int *n, *kmax; double *x, *y, *huge; /* output matrices (npoints * kmax) in ROW MAJOR order */ #ifdef DIST double *nnd; #endif #ifdef WHICH int *nnwhich; #endif { int npoints, maxchunk, nk, nk1, i, k, k1, left, right, unsorted; double d2, d2minK, xi, yi, dx, dy, dy2, hu, hu2, tmp; double *d2min; #ifdef WHICH int *which; int itmp; #endif hu = *huge; hu2 = hu * hu; npoints = *n; nk = *kmax; nk1 = nk - 1; /* create space to store the nearest neighbour distances and indices for the current point */ d2min = (double *) R_alloc((size_t) nk, sizeof(double)); #ifdef WHICH which = (int *) R_alloc((size_t) nk, sizeof(int)); #endif /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < npoints) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > npoints) maxchunk = npoints; for(; i < maxchunk; i++) { #ifdef SPATSTAT_DEBUG Rprintf("\ni=%d\n", i); #endif /* initialise nn distances and indices */ d2minK = hu2; for(k = 0; k < nk; k++) { d2min[k] = hu2; #ifdef WHICH which[k] = -1; #endif } xi = x[i]; yi = y[i]; /* search backward */ for(left = i - 1; left >= 0; --left) { #ifdef SPATSTAT_DEBUG Rprintf("L"); #endif dy = yi - y[left]; dy2 = dy * dy; if(dy2 > d2minK) break; dx = x[left] - xi; d2 = dx * dx + dy2; if (d2 < d2minK) { /* overwrite last entry */ d2min[nk1] = d2; #ifdef WHICH which[nk1] = left; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[nk1]; } } /* search forward */ for(right = i + 1; right < npoints; ++right) { #ifdef SPATSTAT_DEBUG Rprintf("R"); #endif dy = y[right] - yi; dy2 = dy * dy; if(dy2 > d2minK) break; dx = x[right] - xi; d2 = dx * dx + dy2; if (d2 < d2minK) { /* overwrite last entry */ d2min[nk1] = d2; #ifdef WHICH which[nk1] = right; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[nk1]; } } /* search finished for point i */ #ifdef SPATSTAT_DEBUG Rprintf("\n"); #endif /* copy nn distances for point i to output matrix in ROW MAJOR order */ for(k = 0; k < nk; k++) { #ifdef DIST nnd[nk * i + k] = sqrt(d2min[k]); #endif #ifdef WHICH nnwhich[nk * i + k] = which[k] + 1; /* R indexing */ #endif } /* end of i loop */ } } } spatstat/src/nndistance.c0000755000176200001440000001071613406057617015217 0ustar liggesusers/* nndistance.c Nearest Neighbour Distances between points Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2012 Licence: GNU Public Licence >= 2 $Revision: 1.22 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 THE FOLLOWING FUNCTIONS ASSUME THAT y IS SORTED IN ASCENDING ORDER SINGLE LIST: nndistsort Nearest neighbour distances nnwhichsort Nearest neighbours nnsort Nearest neighbours & distances ONE LIST TO ANOTHER LIST: nnXdist Nearest neighbour distance from one list to another nnXwhich Nearest neighbour ID from one list to another nnX Nearest neighbour ID & distance from one list to another ONE LIST TO ANOTHER OVERLAPPING LIST: nnXEdist Nearest neighbour distance from one list to another, overlapping nnXEwhich Nearest neighbour ID from one list to another, overlapping nnXE Nearest neighbour ID & distance */ #undef SPATSTAT_DEBUG #include #include #include #include "yesno.h" double sqrt(); /* THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER */ /* ------------------- one point pattern X --------------------- */ /* nndistsort: nearest neighbour distances */ #undef FNAME #undef DIST #undef WHICH #define FNAME nndistsort #define DIST #include "nndist.h" /* nnwhichsort: id of nearest neighbour */ #undef FNAME #undef DIST #undef WHICH #define FNAME nnwhichsort #define WHICH #include "nndist.h" /* nnsort: distance & id of nearest neighbour */ #undef FNAME #undef DIST #undef WHICH #define FNAME nnsort #define DIST #define WHICH #include "nndist.h" /* --------------- two distinct point patterns X and Y ----------------- */ /* general interface */ void nnXinterface(n1, x1, y1, id1, n2, x2, y2, id2, exclude, wantdist, wantwhich, nnd, nnwhich, huge) /* inputs */ int *n1, *n2; double *x1, *y1, *x2, *y2, *huge; int *id1, *id2; /* options */ int *exclude, *wantdist, *wantwhich; /* outputs */ double *nnd; int *nnwhich; { void nnX(), nnXdist(), nnXwhich(); void nnXE(), nnXEdist(), nnXEwhich(); int ex, di, wh; ex = (*exclude != 0); di = (*wantdist != 0); wh = (*wantwhich != 0); if(!ex) { if(di && wh) { nnX(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge); } else if(di) { nnXdist(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge); } else if(wh) { nnXwhich(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge); } } else { if(di && wh) { nnXE(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge); } else if(di) { nnXEdist(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge); } else if(wh) { nnXEwhich(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge); } } } /* nnXdist: nearest neighbour distance (from each point of X to the nearest point of Y) */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME nnXdist #define DIST #include "nndistX.h" /* nnXwhich: nearest neighbour id */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME nnXwhich #define WHICH #include "nndistX.h" /* nnX: nearest neighbour distance and id */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME nnX #define DIST #define WHICH #include "nndistX.h" /* --------------- two point patterns X and Y with common points --------- */ /* Code numbers id1, id2 are attached to the patterns X and Y respectively, such that x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j]. */ /* nnXEdist: similar to nnXdist but allows X and Y to include common points (which are not to be counted as neighbours) */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME nnXEdist #define DIST #define EXCLUDE #include "nndistX.h" /* nnXEwhich: similar to nnXwhich but allows X and Y to include common points (which are not to be counted as neighbours) */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME nnXEwhich #define WHICH #define EXCLUDE #include "nndistX.h" /* nnXE: similar to nnX but allows X and Y to include common points (which are not to be counted as neighbours) */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME nnXE #define DIST #define WHICH #define EXCLUDE #include "nndistX.h" spatstat/src/bdrymask.c0000644000176200001440000000163213406057617014677 0ustar liggesusers/* bdrymask.c Boundary pixels of binary mask Copyright (C) Adrian Baddeley, Rolf Turner and Ege Rubak 2014 Licence: GPL >= 2 $Revision: 1.3 $ $Date: 2016/02/02 01:29:50 $ */ #include #include #include void bdrymask(nx, ny, m, b) /* inputs */ int *nx, *ny, *m; /* outputs */ int *b; { int Nxcol, Nyrow, Nx1, Ny1; int i, j, mij; Nxcol = *nx; Nyrow = *ny; Nx1 = Nxcol - 1; Ny1 = Nyrow - 1; #define MAT(A,I,J) A[(I) + (J) * Nyrow] /* loop over pixels */ for(j = 0; j < Nxcol; j++) { R_CheckUserInterrupt(); for(i = 0; i < Nyrow; i++) { mij = MAT(m, i, j); if(i == 0 || i == Ny1 || j == 0 || j == Nx1) { MAT(b, i, j) = mij; } else if((mij != MAT(m, (i-1), j)) || (mij != MAT(m, (i+1), j)) || (mij != MAT(m, i, (j-1))) || (mij != MAT(m, i, (j+1)))) { MAT(b, i, j) = 1; } } } } spatstat/src/hasclose.h0000644000176200001440000001621713406057617014676 0ustar liggesusers/* hasclose.h Function definitions to be #included in hasclose.c several times with different values of macros. Macros used: CLOSEFUN name of function for pairs in a single pattern CROSSFUN name of function for pairs between two patterns ZCOORD if defined, coordinates are 3-dimensional TORUS if defined, distances are periodic BUG debugger flag $Revision: 1.11 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void CLOSEFUN(n, x, y, #ifdef ZCOORD z, #endif r, /* distance deemed 'close' */ #ifdef TORUS b, /* box dimensions */ #endif t) /* result: true/false */ int *n, *t; double *x, *y, *r; #ifdef ZCOORD double *z; #endif #ifdef TORUS double *b; #endif { double xi, yi, rmax, r2max, rmaxplus, dx, dy, d2minr2; #ifdef ZCOORD double zi, dz; #endif int N, maxchunk, i, j; #ifdef TORUS double Bx, By, Hy; #ifdef ZCOORD double Bz, Hz; #endif #endif N = *n; rmax = *r; r2max = rmax * rmax; rmaxplus = rmax + rmax/16.0; #ifdef TORUS Bx = b[0]; By = b[1]; Hy = By/2.0; #ifdef ZCOORD Bz = b[2]; Hz = Bz/2.0; #endif #endif /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < N) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > N) maxchunk = N; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; #ifdef ZCOORD zi = z[i]; #endif if(i > 0) { /* scan backward from i */ for(j = i - 1; j >= 0; j--) { dx = xi - x[j]; if(dx > rmaxplus) break; dy = y[j] - yi; #ifdef TORUS if(dy < 0.0) dy = -dy; if(dy > Hy) dy = By - dy; #endif d2minr2 = dx * dx + dy * dy - r2max; #ifdef ZCOORD if(d2minr2 <= 0.0) { dz = z[j] - zi; #ifdef TORUS if(dz < 0.0) dz = -dz; if(dz > Hz) dz = Bz - dz; #endif d2minr2 = d2minr2 + dz * dz; #endif if(d2minr2 <= 0.0) { /* pair (i, j) is close */ t[i] = t[j] = 1; } #ifdef ZCOORD } #endif } #ifdef TORUS /* wrap-around */ /* scan forward from 0 */ for(j = 0; j < i; j++) { dx = Bx + x[j] - xi; if(dx > rmaxplus) break; dy = y[j] - yi; #ifdef TORUS if(dy < 0.0) dy = -dy; if(dy > Hy) dy = By - dy; #endif d2minr2 = dx * dx + dy * dy - r2max; #ifdef ZCOORD if(d2minr2 <= 0.0) { dz = z[j] - zi; #ifdef TORUS if(dz < 0.0) dz = -dz; if(dz > Hz) dz = Bz - dz; #endif d2minr2 = d2minr2 + dz * dz; #endif if(d2minr2 <= 0.0) { /* pair (i, j) is close */ t[i] = t[j] = 1; } #ifdef ZCOORD } #endif } #endif } } } } /* ........................................................ */ void CROSSFUN(n1, x1, y1, #ifdef ZCOORD z1, #endif n2, x2, y2, #ifdef ZCOORD z2, #endif r, #ifdef TORUS b, /* box dimensions (same for both patterns!!) */ #endif t) int *n1, *n2, *t; double *x1, *y1, *x2, *y2, *r; #ifdef ZCOORD double *z1, *z2; #endif #ifdef TORUS double *b; #endif { /* lengths */ int N1, N2, maxchunk; /* distance parameter */ double rmax, r2max, rmaxplus; /* indices */ int i, j, jleft; /* temporary values */ double x1i, y1i, xleft, dx, dy, dx2, d2minr2; #ifdef ZCOORD double z1i, dz; #endif #ifdef TORUS double Bx, By, Hx, Hy; int jright; #ifdef ZCOORD double Bz, Hz; #endif #endif N1 = *n1; N2 = *n2; rmax = *r; r2max = rmax * rmax; rmaxplus = rmax + rmax/16.0; #ifdef TORUS Bx = b[0]; By = b[1]; Hx = Bx/2.0; Hy = By/2.0; #ifdef BUG Rprintf("=> PERIODIC: Bx = %lf, By = %lf <= \n", Bx, By); #endif #ifdef ZCOORD Bz = b[2]; Hz = Bz/2.0; #endif #endif if(N1 > 0 && N2 > 0) { i = 0; maxchunk = 0; jleft = 0; while(i < N1) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > N1) maxchunk = N1; for( ; i < maxchunk; i++) { x1i = x1[i]; y1i = y1[i]; #ifdef ZCOORD z1i = z1[i]; #endif #ifdef BUG Rprintf("------ i = %d --------\n", i); Rprintf(" [%d] = (%lf, %lf)\n", i, x1i, y1i); #endif /* adjust starting point jleft */ xleft = x1i - rmaxplus; while((x2[jleft] < xleft) && (jleft+1 < N2)) ++jleft; #ifdef BUG Rprintf("\t jleft = %d\n", jleft); #endif /* process from j = jleft until dx > rmax + epsilon */ for(j=jleft; j < N2; j++) { dx = x2[j] - x1i; #ifdef BUG Rprintf("\t Central loop, j = %d, dx = %lf\n", j, dx); #endif if(dx > rmaxplus) break; dx2 = dx * dx; dy = y2[j] - y1i; #ifdef BUG Rprintf("\t\t Did not break\n\t\t dy = %lf\n", dy); #endif #ifdef TORUS if(dy < 0.0) dy = -dy; if(dy > Hy) dy = By - dy; #ifdef BUG Rprintf("\t\t periodic dy = %lf\n", dy); #endif #endif d2minr2 = dx2 + dy * dy - r2max; #ifdef ZCOORD if(d2minr2 <= 0.0) { dz = z2[j] - z1i; #ifdef TORUS if(dz < 0.0) dz = -dz; if(dz > Hz) dz = Bz - dz; #endif d2minr2 = d2minr2 + dz * dz; #endif if(d2minr2 <= 0.0) { #ifdef BUG Rprintf("\t\t Point %d has close neighbour\n", i); #endif /* point i has a close neighbour */ t[i] = 1; break; } #ifdef ZCOORD } #endif } #ifdef TORUS jright = j; /* wrap-around at start */ #ifdef BUG Rprintf("\t Wrap around at start for j = 0 to %d\n", jleft); #endif for(j=0; j < jleft; j++) { dx = x1i - x2[j]; #ifdef BUG Rprintf("\t\t j = %d, dx = %lf\n", j, dx); #endif if(dx < 0.0) dx = -dx; if(dx > Hx) dx = Bx - dx; #ifdef BUG Rprintf("\t\t periodic dx = %lf\n", dx); #endif if(dx > rmaxplus) break; dx2 = dx * dx; dy = y2[j] - y1i; #ifdef BUG Rprintf("\t\t Did not break\n\t\t dy = %lf\n", dy); #endif if(dy < 0.0) dy = -dy; if(dy > Hy) dy = By - dy; #ifdef BUG Rprintf("\t\t periodic dy = %lf\n", dy); #endif d2minr2 = dx2 + dy * dy - r2max; #ifdef ZCOORD if(d2minr2 <= 0.0) { dz = z2[j] - z1i; if(dz < 0.0) dz = -dz; if(dz > Hz) dz = Bz - dz; d2minr2 = d2minr2 + dz * dz; #endif if(d2minr2 <= 0.0) { /* point i has a close neighbour */ #ifdef BUG Rprintf("\t\t Point %d has close neighbour\n", i); #endif t[i] = 1; break; } #ifdef ZCOORD } #endif } /* wrap around at end */ #ifdef BUG Rprintf("\t Wrap around at end for j = %d to %d\n", N2-1, jright); #endif for(j=N2-1; j >= jright; j--) { dx = x1i - x2[j]; #ifdef BUG Rprintf("\t\t j = %d, dx = %lf\n", j, dx); #endif if(dx < 0.0) dx = -dx; if(dx > Hx) dx = Bx - dx; #ifdef BUG Rprintf("\t\t periodic dx = %lf\n", dx); #endif if(dx > rmaxplus) break; dx2 = dx * dx; dy = y2[j] - y1i; #ifdef BUG Rprintf("\t\t Did not break\n\t\t dy = %lf\n", dy); #endif if(dy < 0.0) dy = -dy; if(dy > Hy) dy = By - dy; #ifdef BUG Rprintf("\t\t periodic dy = %lf\n", dy); #endif d2minr2 = dx2 + dy * dy - r2max; #ifdef ZCOORD if(d2minr2 <= 0.0) { dz = z2[j] - z1i; if(dz < 0.0) dz = -dz; if(dz > Hz) dz = Bz - dz; d2minr2 = d2minr2 + dz * dz; #endif if(d2minr2 <= 0.0) { #ifdef BUG Rprintf("\t\t Point %d has close neighbour\n", i); #endif /* point i has a close neighbour */ t[i] = 1; break; } #ifdef ZCOORD } #endif } #endif } } } } spatstat/src/close3pair.c0000644000176200001440000000334613406057617015133 0ustar liggesusers/* close3pair.c $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ closepairs and crosspairs for 3D Assumes point pattern is sorted in increasing order of x coordinate Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include #include #define OK 0 #define ERR_OVERFLOW 1 #define ERR_ALLOC 2 #define intRealloc(PTR, OLDLENGTH, NEWLENGTH) \ (int *) S_realloc((char *) PTR, NEWLENGTH, OLDLENGTH, sizeof(int)) #define dblRealloc(PTR, OLDLENGTH, NEWLENGTH) \ (double *) S_realloc((char *) PTR, NEWLENGTH, OLDLENGTH, sizeof(double)) double sqrt(); /* ....... define functions, using closefuns.h ........*/ /* return only one of the pairs (i,j) and (j,i) */ #define SINGLE /* enable 3D code */ #define ZCOORD /* return i, j only */ #define CLOSEFUN close3IJpairs #define CROSSFUN cross3IJpairs #undef THRESH #undef COORDS #undef DIST #include "closefuns.h" #undef CLOSEFUN #undef CROSSFUN #undef THRESH #undef COORDS #undef DIST /* return i, j, d */ #define CLOSEFUN close3IJDpairs #define CROSSFUN cross3IJDpairs #undef THRESH #undef COORDS #define DIST #include "closefuns.h" #undef CLOSEFUN #undef CROSSFUN #undef THRESH #undef COORDS #undef DIST /* return i, j, xi, yi, zi, xj, yj, zj, dx, dy, dz, d */ #define CLOSEFUN close3pairs #define CROSSFUN cross3pairs #undef THRESH #define COORDS #define DIST #include "closefuns.h" #undef CLOSEFUN #undef CROSSFUN #undef THRESH #undef COORDS #undef DIST /* return i, j, t where t = 1{d < s} */ #define CLOSEFUN close3thresh #define CROSSFUN cross3thresh #define THRESH #undef COORDS #undef DIST #include "closefuns.h" #undef CLOSEFUN #undef CROSSFUN #undef THRESH #undef COORDS #undef DIST spatstat/vignettes/0000755000176200001440000000000013624161300014121 5ustar liggesusersspatstat/vignettes/updates.Rnw0000644000176200001440000030022113623712063016263 0ustar liggesusers\documentclass[11pt]{article} \usepackage{graphicx} \usepackage{Sweave} \usepackage{bm} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} % \VignetteIndexEntry{Summary of Recent Updates to Spatstat} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\code}[1]{\texttt{#1}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \begin{document} \bibliographystyle{plain} <>= library(spatstat) sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) @ \title{Summary of recent updates to \spst} \author{Adrian Baddeley, Rolf Turner and Ege Rubak} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle \thispagestyle{empty} This is a summary of changes that have been made to the \spst\ package since the publication of the accompanying book \cite{baddrubaturn15}. <>= readSizeTable <- function(fname) { if(is.null(fname) || !file.exists(fname)) return(NULL) a <- read.table(fname, header=TRUE) a$date <- as.Date(a$date) return(a) } getSizeTable <- function(packagename="spatstat", tablename="packagesizes.txt") { fname <- system.file("doc", tablename, package=packagename) readSizeTable(fname) } counts <- c("nhelpfiles", "nobjects", "ndatasets", "Rlines", "srclines") mergeSizeTables <- function(a, b) { if(is.null(b)) return(a) for(i in seq_len(nrow(a))) { j <- which(b$date <= a$date[i]) if(length(j) > 0) a[i,counts] <- a[i,counts] + b[max(j), counts] } return(a) } z <- getSizeTable() zutils <- getSizeTable("spatstat.utils") zdata <- getSizeTable("spatstat.data") zlocal <- getSizeTable("spatstat", "spatstatlocalsize.txt") z <- mergeSizeTables(z, zutils) z <- mergeSizeTables(z, zdata) z <- mergeSizeTables(z, zlocal) # currentcount <- z[nrow(z), counts] bookcount <- z[z$version == "1.42-0", counts] changes <- currentcount - bookcount newobj <- changes[["nobjects"]] newdat <- changes[["ndatasets"]] + 1 # counting rule doesn't detect redwood3 newcode <- changes[["Rlines"]] + changes[["srclines"]] bookcode <- bookcount[["Rlines"]] + bookcount[["srclines"]] growth <- signif((100 * newcode)/bookcode, digits=2) @ %$ The book \cite{baddrubaturn15}, published in December 2015, covers everything in \spst\ up to version \texttt{1.42-0}, released in May 2015. The \spst\ package has grown by \Sexpr{growth}\% since the book was published. This document summarises the most important changes. The current version of \spst\ is \texttt{\Sexpr{sversion}}. It contains \Sexpr{newobj} new functions and \Sexpr{newdat} new datasets introduced after May 2015. <>= options(SweaveHooks=list(fig=function() par(mar=0.2+c(2,4,2,0)))) Plot <- function(fmla, ..., dat=z) { yvals <- eval(as.expression(fmla[[2]]), envir=dat) plot(fmla, ..., data=dat, type="l", xlab="", lwd=2, ylim=c(0, max(yvals))) } @ \SweaveOpts{eps=TRUE} \setkeys{Gin}{width=0.4\textwidth} \centerline{ <>= Plot((Rlines + srclines)/1000 ~ date, ylab="Lines of code (x 1000)", main="Spatstat growth") lines(srclines/1000 ~ date, data=z) text(as.Date("2015-01-01"), 9.5, "C code") text(as.Date("2015-01-01"), 60, "R code") @ } \tableofcontents \newpage \section{\pkg{spatstat} is splitting into parts} \pkg{spatstat} is being split into sub-packages, to satisfy the requirements of CRAN. This should not affect the user: existing code will continue to work in the same way. Typing \code{library(spatstat)} will load the familiar \pkg{spatstat} package which can be used as before. \subsection{The parts of \pkg{spatstat}} Currently there are three sub-packages, called \pkg{spatstat.utils}, \pkg{spatstat.data} and \pkg{spatstat}. \begin{itemize} \item The \code{spatstat} package contains the main code. \item The \pkg{spatstat.data} package now contains all the datasets for \pkg{spatstat}. \item The \pkg{spatstat.utils} package contains utility functions for \pkg{spatstat}. \end{itemize} Typing \code{library(spatstat)} will automatically load \pkg{spatstat.data} and silently ``import'' \pkg{spatstat.utils}. To access the functions in \pkg{spatstat.utils} directly, you would need to type \code{library(spatstat.utils)}. \subsection{Extension packages} There are also extension packages which provide additional capabilities and must be loaded explicitly when you need them. Currently there are three extension packages: \begin{itemize} \item \pkg{spatstat.local} for local model-fitting, \item \pkg{spatstat.sphere} for analysing point patterns on a sphere, \item \pkg{spatstat.Knet} for analysing point patterns on a network. \end{itemize} \pagebreak \section{Precis of all changes} Here is the text from the `overview' sections of the News and Release Notes for each update. \begin{itemize} \item \spst\ now Imports the package \pkg{spatstat.utils}. \item \spst\ now requires the package \pkg{spatstat.data} which contains the datasets. \item \spst\ now suggests the package \pkg{fftwtools}. \item Tessellations on a linear network can now have marks. \item More functions for manipulating tessellations on a linear network. \item New functions for simulating point processes on a linear network. \item Nearest Neighbour Index function can now return mark values. \item Index of repulsion strength for determinantal point process models. \item Nearest neighbours between two point patterns in any number of dimensions. \item More options for handling bad simulation outcomes in \texttt{envelope}. \item \texttt{mppm} accepts case weights. \item Bandwidth selectors warn about extreme values of bandwidth. \item Fast kernel estimation on a linear network using 2D kernels. \item Extension of Scott's rule for bandwidth selection. \item Cross-validated bandwidth selection on a linear network. \item Random thinning and random labelling of spatial patterns extended to different types of pattern. \item Confidence intervals for multitype $K$ function. \item Envelopes for balanced two-stage test \item Extensions to adaptive intensity estimators \item `Dartboard' tessellation using polar coordinates. \item Standard error calculation for inverse-distance weighting. \item Kernel estimate of intensity as a \texttt{function(x,y)}. \item Extract discrete and continuous components of a measure. \item Improvements and extensions to leverage and influence code. \item Plot a line segment pattern using line widths. \item Find connected components of each tile in a tessellation. \item Geometrical operations on \texttt{distfun} objects. \item Join vertices in a linear network. \item Distance map and contact distribution for rectangular structuring element. \item Lurking variable plot for models fitted to several point patterns. \item New dataset \code{cetaceans}. \item Gamma correction for colour maps and image plots. \item Class \code{units} has been renamed \code{unitname} to avoid package collision. \item More support for tessellations. \item Fixed longstanding bug in leverage and influence diagnostics. \item Improvements and bug fixes for leverage and influence diagnostics. \item Tighter bounding box for \code{psp}, \code{lpp}, \code{linnet} objects. \item Improved layout in \code{plot.solist} \item Tools to increase colour saturation. \item Connected components of a 3D point pattern. \item Accelerated computations on linear networks. \item Accelerated simulation of determinantal point processes. \item Improved printing of 3D point patterns. \item Minor corrections to handling of unitnames. \item Improvements to \texttt{ppm} and \texttt{update.ppm}. \item Correction to \texttt{lohboot} \item Numerous bug fixes for linear networks code. \item Now handles disconnected linear networks. \item Effect function is now available for all types of fitted model. \item Geometric-mean smoothing. \item A model can be fitted or re-fitted to a sub-region of data. \item New fast algorithm for kernel smoothing on a linear network. \item Leverage and influence diagnostics extended to Poisson/Gibbs models fitted by logistic composite likelihood. \item Two-stage Monte Carlo test. \item Dirichlet/Voronoi tessellation on a linear network. \item Thinning of point patterns on a linear network. \item More support for functions and tessellations on a linear network. \item Bandwidth selection for pair correlation function. \item Pooling operations improved. \item Operations on signed measures. \item Operations on lists of pixel images. \item Improved pixellation of point patterns. \item Stieltjes integral extended. \item Subset operators extended. \item Greatly accelerated \texttt{rmh} when using \texttt{nsave} \item Sufficient Dimension Reduction for point processes. \item Alternating Gibbs Sampler for point process simulation. \item New class of spatially sampled functions. \item ROC and AUC extended to other types of point patterns and models. \item More support for linear networks. \item More support for infinite straight lines. \item \spst\ now depends on the packages \pkg{nlme} and \pkg{rpart}. \item Important bug fix in \code{linearK}, \code{linearpcf} \item Changed internal format of \code{linnet} and \code{lpp} objects. \item Faster computation in linear networks. \item Bias correction techniques. \item Bounding circle of a spatial object. \item Option to plot marked points as arrows. \item Kernel smoothing accelerated. \item Workaround for bug in some graphics drivers affecting image orientation. \item Non-Gaussian smoothing kernels. \item Improvements to inhomogeneous multitype $K$ and $L$ functions. \item Variance approximation for pair correlation function. \item Leverage and influence for multitype point process models. \item Functions for extracting components of vector-valued objects. \item Recursive-partition point process models. \item Minkowski sum, morphological dilation and erosion with any shape. \item Minkowski sum also applicable to point patterns and line segment patterns. \item Important bug fix in Smooth.ppp \item Important bug fix in spatial CDF tests. \item More bug fixes for replicated patterns. \item Simulate a model fitted to replicated point patterns. \item Inhomogeneous multitype $F$ and $G$ functions. \item Summary functions recognise \texttt{correction="all"} \item Leverage and influence code handles bigger datasets. \item More support for pixel images. \item Improved progress reports. \item New dataset \texttt{redwood3} \item Fixed namespace problems arising when spatstat is not loaded. \item Important bug fix in leverage/influence diagnostics for Gibbs models. \item Surgery with linear networks. \item Tessellations on a linear network. \item Laslett's Transform. \item Colour maps for point patterns with continuous marks are easier to define. \item Pair correlation function estimates can be pooled. \item Stipulate a particular version of a package. \item More support for replicated point patterns. \item More support for tessellations. \item More support for multidimensional point patterns and point processes. \item More options for one-sided envelopes. \item More support for model comparison. \item Convexifying operation. \item Subdivide a linear network. \item Penttinen process can be simulated (by Metropolis-Hastings or CFTP). \item Calculate the predicted variance of number of points. \item Accelerated algorithms for linear networks. \item Quadrat counting accelerated, in some cases. \item Simulation algorithms have been accelerated; simulation outcomes are \emph{not} identical to those obtained from previous versions of \spst. \item Determinantal point process models. \item Random-effects and mixed-effects models for replicated patterns. \item Dao-Genton test, and corresponding simulation envelopes. \item Simulated annealing and simulated tempering. \item spatstat colour tools now handle transparent colours. \item Improvements to \verb![! and \texttt{subset} methods \item Extensions to kernel smoothing on a linear network. \item Support for one-dimensional smoothing kernels. \item Mark correlation function may include weights. \item Cross-correlation version of the mark correlation function. \item Penttinen pairwise interaction model. \item Improvements to simulation of Neyman-Scott processes. \item Improvements to fitting of Neyman-Scott models. \item Extended functionality for pixel images. \item Fitted intensity on linear network \item Triangulation of windows. \item Corrected an edge correction. \end{itemize} \section{New datasets} The following datasets have been added to the package. \begin{itemize} \item \texttt{austates}: The states and large mainland territories of Australia represented as polygonal regions forming a tessellation. \item \texttt{redwood3}: a more accurate version of the \texttt{redwood} data. \item \texttt{cetaceans}: point patterns of whale and dolphin sightings. \end{itemize} \section{New classes} \begin{itemize} \item \texttt{ssf}: Class of spatially sampled functions. \end{itemize} \section{New Functions} Following is a list of all the functions that have been added. \begin{itemize} \item \texttt{is.linim}: test whether an object is a pixel image on a linear network (class \verb!"linim"!). \item \texttt{rcelllpp}: Simulate the cell point process on a linear network. \item \texttt{rSwitzerlpp}: Simulate the Switzer-type point process on a linear network. \item \texttt{intersect.lintess}: Form the intersection of two tessellations on a linear network. \item \texttt{chop.linnet}: Divide a linear network into tiles using infinite lines. \item \texttt{repairNetwork}: Detect and repair inconsistencies in internal data in a \texttt{linnet} or \texttt{lpp} object. \item \verb!marks<-.lintess!, \texttt{unmark.lintess}: Assign marks to the tiles of a tessellation on a linear network. \item \texttt{marks.lintess}: Extract the marks of the tiles of a tessellation on a linear network. \item \texttt{tilenames.lintess}: Extract the names of the tiles in a tessellation on a linear network \item \verb!tilenames<-.lintess!: Change the names of the tiles in a tessellation on a linear network \item \texttt{nobjects.lintess}: Count the number of tiles in a tessellation on a linear network \item \texttt{as.data.frame.lintess}: Convert a tessellation on a linear network into a data frame. \item \texttt{repul}: Repulsiveness index for a determinantal point process model. \item \texttt{reach.kppm}: Reach (interaction distance) for a Cox or cluster point process model. \item \texttt{summary.dppm}, \texttt{print.summary.dppm}: Summary method for determinantal point process models. \item \texttt{nncross.ppx}: Nearest neighbours between two point patterns in any number of dimensions. \item \texttt{rthinclumps}: Divide a spatial region into clumps and randomly delete some of them. \item \texttt{densityQuick.lpp}: Fast kernel estimator of point process intensity on a network using 2D smoothing kernel. \item \texttt{data.lppm}: Extract the original point pattern dataset (on a linear network) to which the model was fitted. \item \texttt{bw.scott.iso}: Isotropic version of Scott's rule (for point patterns in any dimension). \item \texttt{bits.envelope}: Global simulation envelope corresponding to \texttt{bits.test}, the balanced independent two-stage Monte Carlo test. \item \texttt{extrapolate.psp}: Extrapolate line segments to obtain infinite lines. \item \texttt{uniquemap}: Map duplicate points to unique representatives. Generic with methods for \texttt{ppp}, \texttt{lpp}, \texttt{ppx} \item \texttt{uniquemap.data.frame}, \texttt{uniquemap.matrix}: Map duplicate rows to unique representatives \item \texttt{localKcross}, \texttt{localLcross}, \texttt{localKdot}, \texttt{localLdot}, \texttt{localKcross.inhom}, \texttt{localLcross.inhom}: Multitype local $K$ functions. \item \texttt{polartess}: tessellation using polar coordinates. \item \texttt{densityVoronoi}: adaptive estimate of point process intensity using tessellation methods. \item \texttt{densityAdaptiveKernel}: adaptive estimate of point process intensity using variable kernel methods. \item \texttt{bw.abram}: compute adaptive smoothing bandwidths using Abramson's rule. \item \texttt{coords.quad}: method for \texttt{coords}, to extract the coordinates of the points in a quadrature scheme. \item \texttt{lineartileindex}: low-level function to classify points on a linear network according to which tile of a tessellation they fall inside. \item \texttt{markmarkscatter}: Mark--mark scatterplot. \item \texttt{bw.CvL}: Cronie-van Lieshout bandwidth selection for density estimation. \item \texttt{subset.psp}: subset method for line segment patterns. \item \texttt{densityfun}, \texttt{densityfun.ppp}: Compute a kernel estimate of intensity of a point pattern and return it as a function of spatial location. \item \texttt{as.im.densityfun}: Convert \texttt{function(x,y)} to a pixel image. \item \texttt{measureDiscrete}, \texttt{measureContinuous}: Extract the discrete and continuous components of a measure. \item \texttt{connected.tess}: Find connected components of each tile in a tessellation and make a new tessellation composed of these pieces. \item \texttt{dffit.ppm}: Effect change diagnostic \texttt{DFFIT} for spatial point process models. \item \texttt{shift.distfun}, \texttt{rotate.distfun}, \texttt{reflect.distfun}, \texttt{flipxy.distfun}, \texttt{affine.distfun}, \texttt{scalardilate.distfun}: Methods for geometrical operations on \texttt{distfun} objects. \item \texttt{rescale.distfun}: Change the unit of length in a \texttt{distfun} object. \item \texttt{plot.indicfun}: Plot method for indicator functions created by \texttt{as.function.owin}. \item \texttt{Smooth.leverage.ppm}, \texttt{Smooth.influence.ppm}: Smooth a leverage function or an influence measure. \item \texttt{integral.leverage.ppm}, \texttt{integral.influence.ppm}: Compute the integral of a leverage function or an influence measure. \item \texttt{mean.leverage.ppm}: Compute the mean value of a leverage function. \item \texttt{rectdistmap}: Distance map using rectangular metric. \item \texttt{rectcontact}: Contact distribution function using rectangular structuring element. \item \texttt{joinVertices}: Join specified vertices in a linear network. \item \code{summary.ssf}: Summary method for a spatially sampled function (class \code{ssf}). \item \code{unstack.tess}: Given a tessellation with multiple columns of marks, take the columns one at a time, and return a list of tessellations, each carrying only one of the original columns of marks. \item \code{contour.leverage.ppm}: Method for \code{contour} for leverage functions of class \code{leverage.ppm} \item \code{lurking}: New generic function for lurking variable plots. \item \code{lurking.ppp}, \code{lurking.ppm}: These are equivalent to the original function \code{lurking}. They are now methods for the new generic \code{lurking}. \item \code{lurking.mppm}: New method for class \code{mppm}. Lurking variable plot for models fitted to several point patterns. \item \code{print.lurk}: Prints information about the object returned by the function \code{lurking} representing a lurking variable plot. \item \code{model.matrix.mppm}: Method for \code{model.matrix} for models of class \code{mppm}. \item \code{test.crossing.psp}, \code{test.selfcrossing.psp}: Previously undocumented functions for testing whether segments cross. \item \code{to.saturated}: Convert a colour value to the corresponding fully-saturated colour. \item \code{intensity.psp}: Compute the average total length of segments per unit area. \item \code{boundingbox.psp}: Bounding box for line segment patterns. This produces a tighter bounding box than the previous default behaviour. \item \code{boundingbox.lpp}: Bounding box for point patterns on a linear network. This produces a tighter bounding box than the previous default behaviour. \item \code{boundingbox.linnet}: Bounding box for a linear network. This produces a tighter bounding box than the previous default behaviour. \item \verb!"Frame<-.default"!: New default method for assigning bounding frame to a spatial object. \item \code{connected.pp3}: Connected components of a 3D point pattern. \item \code{colouroutputs}, \verb!"colouroutputs<-"!: Extract or assign colour values in a colour map. (Documented a previously-existing function) \item \texttt{fitin.profilepl}: Extract the fitted interaction from a model fitted by profile likelihood. \item \verb![<-.linim!: Subset assignment method for pixel images on a linear network. \item \texttt{nnfromvertex}: Given a point pattern on a linear network, find the nearest data point from each vertex of the network. \item \texttt{tile.lengths}: Calculate the length of each tile in a tessellation on a network. \item \texttt{text.ppp}, \texttt{text.lpp}, \texttt{text.psp}: Methods for \texttt{text} for spatial patterns. \item \texttt{as.data.frame.envelope}: Extract function data from an envelope object, including the functions for the simulated data ('simfuns') if they were saved. \item \texttt{is.connected}, \texttt{is.connected.default}, \texttt{is.connected.linnet}: Determines whether a spatial object consists of one topologically connected piece, or several pieces. \item \texttt{is.connected.ppp}: Determines whether a point pattern is connected after all pairs of points closer than distance R are joined. \item \texttt{hist.funxy}: Histogram of values of a spatial function. \item \texttt{model.matrix.ippm}: Method for \texttt{model.matrix} which allows computation of regular and irregular score components. \item \texttt{harmonise.msr}: Convert several measures (objects of class \texttt{msr}) to a common quadrature scheme. \item \texttt{bits.test}: Balanced Independent Two-Stage Monte Carlo test, an improvement on the Dao-Genton test. \item \texttt{lineardirichlet}: Computes the Dirichlet-Voronoi tessellation associated with a point pattern on a linear network. \item \texttt{domain.lintess}, \texttt{domain.linfun}: Extract the linear network from a \texttt{lintess} or \texttt{linfun} object. \item \texttt{summary.lintess}: Summary of a tessellation on a linear network. \item \texttt{clicklpp}: Interactively add points on a linear network. \item \texttt{envelopeArray}: Generate an array of envelopes using a function that returns \texttt{fasp} objects. \item \texttt{bw.pcf}: Bandwidth selection for pair correlation function. \item \texttt{grow.box3}: Expand a three-dimensional box. \item \texttt{hexagon}, \texttt{regularpolygon}: Create regular polygons. \item \texttt{Ops.msr}: Arithmetic operations for measures. \item \texttt{Math.imlist}, \texttt{Ops.imlist}, \texttt{Summary.imlist}, \texttt{Complex.imlist}: Arithmetic operations for lists of pixel images. \item \texttt{measurePositive}, \texttt{measureNegative}, \texttt{measureVariation}, \texttt{totalVariation}: Positive and negative parts of a measure, and variation of a measure. \item \texttt{as.function.owin}: Convert a spatial window to a \texttt{function(x,y)}, the indicator function. \item \texttt{as.function.ssf}: Convert an object of class \texttt{ssf} to a \texttt{function(x,y)} \item \texttt{as.function.leverage.ppm} Convert an object of class \texttt{leverage.ppm} to a \texttt{function(x,y)} \item \texttt{sdr}, \texttt{dimhat}: Sufficient Dimension Reduction for point processes. \item \texttt{simulate.rhohat}: Simulate a Poisson point process with the intensity estimated by \texttt{rhohat}. \item \texttt{rlpp}: Random points on a linear network with a specified probability density. \item \texttt{cut.lpp}: Method for \texttt{cut} for point patterns on a linear network. \item \texttt{has.close}: Faster way to check whether a point has a close neighbour. \item \texttt{psib}: Sibling probability (index of clustering strength in a cluster process). \item \texttt{rags}, \texttt{ragsAreaInter}, \texttt{ragsMultiHard}: Alternating Gibbs Sampler for point processes. \item \texttt{bugfixes}: List all bug fixes in recent versions of a package. \item \texttt{ssf}: Create a spatially sampled function \item \texttt{print.ssf}, \texttt{plot.ssf}, \texttt{contour.ssf}, \texttt{image.ssf}: Display a spatially sampled function \item \texttt{as.im.ssf}, \texttt{as.ppp.ssf}, \texttt{marks.ssf}, \verb!marks<-.ssf!, \texttt{unmark.ssf}, \verb![.ssf!, \texttt{with.ssf}: Manipulate data in a spatially sampled function \item \texttt{Smooth.ssf}: Smooth a spatially sampled function \item \texttt{integral.ssf}: Approximate integral of spatially sampled function \item \texttt{roc.kppm}, \texttt{roc.lppm}, \texttt{roc.lpp}: Methods for \texttt{roc} for fitted models of class \texttt{"kppm"} and \texttt{"lppm"} and point patterns of class \texttt{"lpp"} \item \texttt{auc.kppm}, \texttt{auc.lppm}, \texttt{auc.lpp}: Methods for \texttt{auc} for fitted models of class \texttt{"kppm"} and \texttt{"lppm"} and point patterns of class \texttt{"lpp"} \item \texttt{timeTaken}: Extract the timing data from a \texttt{"timed"} object or objects. \item \texttt{rotate.infline}, \texttt{shift.infline}, \texttt{reflect.infline}, \texttt{flipxy.infline}: Geometrical transformations for infinite straight lines. \item \texttt{whichhalfplane}: Determine which side of an infinite line a point lies on. \item \texttt{matrixpower}, \texttt{matrixsqrt}, \texttt{matrixinvsqrt}: Raise a matrix to any power. \item \texttt{points.lpp}: Method for \texttt{points} for point patterns on a linear network. \item \texttt{pairs.linim}: Pairs plot for images on a linear network. \item \texttt{closetriples}: Find close triples of points. \item \texttt{anyNA.im}: Method for \texttt{anyNA} for pixel images. \item \texttt{bc}: Bias correction (Newton-Raphson) for fitted model parameters. \item \texttt{rex}: Richardson extrapolation for numerical integrals and statistical model parameter estimates. \item \texttt{boundingcircle}, \texttt{boundingcentre}: Find the smallest circle enclosing a window or point pattern. \item \verb![.linim! : Subset operator for pixel images on a linear network. \item \texttt{mean.linim}, \texttt{median.linim}, \texttt{quantile.linim}: The mean, median, or quantiles of pixel values in a pixel image on a linear network. \item \texttt{weighted.median}, \texttt{weighted.quantile}: Median or quantile of numerical data with associated weights. \item \verb!"[.linim"!: Subset operator for pixel images on a linear network. \item \texttt{mean.linim}, \texttt{median.linim}, \texttt{quantile.linim}: The mean, median, or quantiles of pixel values in a pixel image on a linear network. \item \texttt{boundingcircle}, \texttt{boundingcentre}: Smallest circle enclosing a spatial object. \item \texttt{split.msr}: Decompose a measure into parts. \item \texttt{unstack.msr}: Decompose a vector-valued measure into its component measures. \item \texttt{unstack.ppp}, \texttt{unstack.psp}, \texttt{unstack.lpp}: Given a spatial pattern with several columns of marks, separate the columns and return a list of spatial patterns, each having only one column of marks. \item \texttt{kernel.squint}: Integral of squared kernel, for the kernels used in density estimation. \item \texttt{as.im.data.frame}: Build a pixel image from a data frame of coordinates and pixel values. \item \texttt{covering}: Cover a window using discs of a given radius. \item \texttt{dilationAny}, \texttt{erosionAny}, \verb!%(-)%! : Morphological dilation and erosion by any shape. \item \texttt{FmultiInhom}, \texttt{GmultiInhom} Inhomogeneous multitype/marked versions of the summary functions \texttt{Fest}, \texttt{Gest}. \item \texttt{kernel.moment} Moment or incomplete moment of smoothing kernel. \item \texttt{MinkowskiSum}, \verb!%(+)%!: Minkowski sum of two windows: \verb!A %(+)% B!, or \texttt{MinkowskiSum(A,B)} \item \texttt{nobjects}: New generic function for counting the number of 'things' in a dataset. There are methods for \texttt{ppp}, \texttt{ppx}, \texttt{psp}, \texttt{tess}. \item \texttt{parameters.interact}, \texttt{parameters.fii}: Extract parameters from interpoint interactions. (These existing functions are now documented.) \item \texttt{ppmInfluence}: Calculate \texttt{leverage.ppm}, \texttt{influence.ppm} and \texttt{dfbetas.ppm} efficiently. \item \texttt{rppm}, \texttt{plot.rppm}, \texttt{predict.rppm}, \texttt{prune.rppm}: Recursive-partition point process models. \item \texttt{simulate.mppm} Simulate a point process model fitted to replicated point patterns. \item \texttt{update.interact}: Update the parameters of an interpoint interaction. [This existing function is now documented.] \item \texttt{where.max}, \texttt{where.min} Find the spatial location(s) where a pixel image achieves its maximum or minimum value. \item \texttt{compileK}, \texttt{compilepcf}: make a $K$ function or pair correlation function given the pairwise distances and their weights. [These existing internal functions are now documented.] \item \texttt{laslett}: Laslett's Transform. \item \texttt{lintess}: Tessellation on a linear network. \item \texttt{divide.linnet}: Divide a linear network into pieces demarcated by a point pattern. \item \texttt{insertVertices}: Insert new vertices in a linear network. \item \texttt{thinNetwork}: Remove vertices and/or segments from a linear network etc. \item \texttt{connected.linnet}: Find connected components of a linear network. \item \texttt{nvertices}, \texttt{nvertices.linnet}, \texttt{nvertices.owin}: Count the number of vertices in a linear network or vertices of the boundary of a window. \item \texttt{as.data.frame.linim}, \texttt{as.data.frame.linfun}: Extract a data frame of spatial locations and function values from an object of class \texttt{linim} or \texttt{linfun}. \item \texttt{as.linfun}, \texttt{as.linfun.linim}, \texttt{as.linfun.lintess}: Convert other kinds of data to a \texttt{linfun} object. \item \texttt{requireversion}: Require a particular version of a package (for use in stand-alone R scripts). \item \texttt{as.function.tess}: Convert a tessellation to a \texttt{function(x,y)}. The function value indicates which tile of the tessellation contains the point $(x,y)$. \item \texttt{tileindex}: Determine which tile of a tessellation contains a given point $(x,y)$. \item \texttt{persp.leverage.ppm}: Method for persp plots for objects of class \texttt{leverage.ppm} \item \texttt{AIC.mppm}, \texttt{extractAIC.mppm}: AIC for point process models fitted to replicated point patterns. \item \texttt{nobs.mppm}, \texttt{terms.mppm}, \texttt{getCall.mppm}: Methods for point process models fitted to replicated point patterns. \item \texttt{rPenttinen}: Simulate the Penttinen process using perfect simulation. \item \texttt{varcount}: Given a point process model, compute the predicted variance of the number of points falling in a window. \item \texttt{inside.boxx}: Test whether multidimensional points lie inside a specified multidimensional box. \item \texttt{lixellate}: Divide each segment of a linear network into smaller segments. \item \texttt{nsegments.linnet}, \texttt{nsegments.lpp}: Count the number of line segments in a linear network. \item \texttt{grow.boxx}: Expand a multidimensional box. \item \texttt{deviance.ppm}, \texttt{deviance.lppm}: Deviance for a fitted point process model. \item \texttt{pseudoR2}: Pseudo-R-squared for a fitted point process model. \item \texttt{tiles.empty} Checks whether each tile of a tessellation is empty or nonempty. \item \texttt{summary.linim}: Summary for a pixel image on a linear network. \item Determinantal Point Process models: \begin{itemize} \item \texttt{dppm}: Fit a determinantal point process model. \item \texttt{fitted.dppm}, \texttt{predict.dppm}, \texttt{intensity.dppm}: prediction for a fitted determinantal point process model. \item \texttt{Kmodel.dppm}, \texttt{pcfmodel.dppm}: Second moments of a determinantal point process model. \item \texttt{rdpp}, \texttt{simulate.dppm}: Simulation of a determinantal point process model. \item \texttt{logLik.dppm}, \texttt{AIC.dppm}, \texttt{extractAIC.dppm}, \texttt{nobs.dppm}: Likelihood and AIC for a fitted determinantal point process model. \item \texttt{print.dppm}, \texttt{reach.dppm}, \texttt{valid.dppm}: Basic information about a \texttt{dpp} model. \item \texttt{coef.dppm}, \texttt{formula.dppm}, \texttt{print.dppm}, \texttt{terms.dppm}, \texttt{labels.dppm}, \texttt{model.frame.dppm}, \texttt{model.matrix.dppm}, \texttt{model.images.dppm}, \texttt{is.stationary.dppm}, \texttt{reach.dppm}, \texttt{unitname.dppm}, \verb!unitname<-.dppm!, \texttt{Window.dppm}: Various methods for \texttt{dppm} objects. \item \texttt{parameters.dppm}: Extract meaningful list of model parameters. \item \texttt{objsurf.dppm}: Objective function surface of a \texttt{dppm} object. \item \texttt{residuals.dppm}: Residual measure for a \texttt{dppm} object. \end{itemize} \item Determinantal Point Process model families: \begin{itemize} \item \texttt{dppBessel}, \texttt{dppCauchy}, \texttt{dppGauss}, \texttt{dppMatern}, \texttt{dppPowerExp}: Determinantal Point Process family functions. \item \texttt{detpointprocfamilyfun}: Create a family function. \item \texttt{update.detpointprocfamily}: Set parameter values in a determinantal point process model family. \item \texttt{simulate.dppm}: Simulation. \item \texttt{is.stationary.detpointprocfamily}, \texttt{intensity.detpointprocfamily}, \texttt{Kmodel.detpointprocfamily}, \texttt{pcfmodel.detpointprocfamily}: Moments. \item \texttt{dim.detpointprocfamily}, \texttt{dppapproxkernel}, \texttt{dppapproxpcf}, \texttt{dppeigen}, \texttt{dppkernel}, \texttt{dppparbounds}, \texttt{dppspecdenrange}, \texttt{dppspecden}: Helper functions. \end{itemize} \item \texttt{dg.envelope}: Simulation envelopes corresponding to Dao-Genton test. \item \texttt{dg.progress}: Progress plot (envelope representation) for the Dao-Genton test. \item \texttt{dg.sigtrace}: significance trace for the Dao-Genton test. \item \texttt{markcrosscorr}: Mark cross-correlation function for point patterns with several columns of marks. \item \texttt{rtemper}: Simulated annealing or simulated tempering. \item \texttt{rgb2hsva}: Convert RGB to HSV data, like \texttt{rgb2hsv}, but preserving transparency. \item \texttt{superimpose.ppplist}, \texttt{superimpose.splitppp}: New methods for 'superimpose' for lists of point patterns. \item \texttt{dkernel}, \texttt{pkernel}, \texttt{qkernel}, \texttt{rkernel}: Probability density, cumulative probability, quantiles and random generation from distributions used in basic one-dimensional kernel smoothing. \item \texttt{kernel.factor}: Auxiliary calculations for one-dimensional kernel smoothing. \item \texttt{spatdim}: Spatial dimension of any object in the \spst\ package. \item \texttt{as.boxx}: Convert data to a multi-dimensional box. \item \texttt{intensity.ppx}: Method for \texttt{intensity} for multi-dimensional space-time point patterns. \item \texttt{fourierbasis}: Evaluate Fourier basis functions in any number of dimensions. \item \texttt{valid}: New generic function, with methods \texttt{valid.ppm}, \texttt{valid.lppm}, \texttt{valid.dppm}. \item \texttt{emend}, \texttt{emend.ppm}, \texttt{emend.lppm}: New generic function with methods for \texttt{ppm} and \texttt{lppm}. \texttt{emend.ppm} is equivalent to \texttt{project.ppm}. \item \texttt{Penttinen}: New pairwise interaction model. \item \texttt{quantile.density}: Calculates quantiles from kernel density estimates. \item \texttt{CDF.density}: Calculates cumulative distribution function from kernel density estimates. \item \texttt{triangulate.owin}: decompose a spatial window into triangles. \item \texttt{fitted.lppm}: fitted intensity values for a point process on a linear network. \item \texttt{parameters}: Extract all parameters from a fitted model. \end{itemize} \section{Alphabetical list of changes} Here is a list of all changes made to existing functions, listed alphabetically. \begin{itemize} %%A \item \texttt{adaptive.density}: This function can now perform adaptive estimation by two methods: either tessellation-based methods or variable-bandwidth kernel estimation. The calculations are performed by either \texttt{densityVoronoi} or \texttt{densityAdaptiveKernel}. \item \texttt{affine.owin}: Allows transformation matrix to be singular, if the window is polygonal. \item \texttt{alltypes}: If \texttt{envelope=TRUE} and the envelope computation reaches the maximum permitted number of errors (\texttt{maxnerr}) in evaluating the summary function for the simulated patterns, then instead of triggering a fatal error, the envelope limits will be set to \texttt{NA}. \item \texttt{anova.mppm}: Now handles Gibbs models, and performs the adjusted composite likelihood ratio test. New argument \texttt{fine}. \item \texttt{anyDuplicated.ppp}: Accelerated. \item \texttt{append.psp}: arguments may be \texttt{NULL}. \item \texttt{as.function.tess}: New argument \texttt{values} specifies the function values. \item \texttt{as.im.distfun}: New argument \texttt{approx} specifies the choice of algorithm. \item \texttt{as.im.function}: \begin{itemize} \item New argument \texttt{strict}. \item New argument \texttt{stringsAsFactors}. \end{itemize} \item \texttt{as.im.leverage.ppm}: New argument \texttt{what}. \item \texttt{as.im.nnfun}: New argument \texttt{approx} chooses between a fast, approximate algorithm and a slow, exact algorithm. \item \texttt{as.im.smoothfun}: New argument \texttt{approx} chooses between a fast, approximate algorithm and a slow, exact algorithm. \item \texttt{as.layered}: Default method now handles a (vanilla) list of spatial objects. \item \texttt{as.linfun.lintess}: \begin{itemize} \item New argument \texttt{values} specifies the function value for each tile. \item The default \texttt{values} are the marks, if present. \item New argument \texttt{navalue}. \item Computation accelerated. \end{itemize} \item \texttt{as.linim.default}: New argument \texttt{delta} controls spacing of sample points in internal data. \item \texttt{as.linnet.psp}: \begin{itemize} \item If the line segment pattern has marks, then the resulting linear network also carries these marks in the \verb!$lines! component. \item Computation accelerated. \end{itemize} \item \texttt{as.owin.default}: \begin{itemize} \item Now refuses to convert a \code{box3} to a two-dimensional window. \item Now accepts a structure with entries named \code{xmin},\code{xmax}, \code{ymin}, \code{ymax} in any order. This handles objects of class \code{bbox} in the \pkg{sf} package. \item Now detects objects of class \code{SpatialPolygons} and issues a more helpful error message. \end{itemize} \item \texttt{as.owin.data.frame}: New argument \texttt{step} \item \texttt{as.polygonal}: \begin{itemize} \item Can now repair errors in polygon data, if \texttt{repair=TRUE}. \item Accelerated when \texttt{w} is a pixel mask. \end{itemize} \item \texttt{as.psp}: now permits a data frame of marks to have only one column, instead of coercing it to a vector. \item \texttt{as.solist}: The argument \texttt{x} can now be a spatial object; \texttt{as.solist(cells)} is the same as \texttt{solist(cells)}. %%B \item \texttt{bdist.pixels}: Accelerated for polygonal windows. New argument \texttt{method}. \item \texttt{bdist.points}: Accelerated for polygonal windows. \item \texttt{beachcolours}: \begin{itemize} \item Improved positioning of the yellow colour band. \item If \texttt{sealevel} lies outside \texttt{srange}, then \texttt{srange} will be extended to include it (without a warning). \end{itemize} \item \texttt{beachcolourmap}: Improved positioning of the yellow colour band. \item \texttt{bind.fv}: New argument \texttt{clip}. \item \texttt{blur}: New argument \texttt{kernel}. \item \texttt{bw.abram}: \begin{itemize} \item New argument \texttt{smoother} determines how the pilot estimate is computed. \item Formal arguments rearranged. \end{itemize} \item \texttt{bw.diggle}, \texttt{bw.ppl}, \texttt{bw.relrisk}, \texttt{bw.smoothppp}: \begin{itemize} \item These functions now extract and store the name of the unit of length from the point pattern dataset. When the bandwidth selection criterion is plotted, the name of the unit of length is shown on the x-axis. \item A warning is issued if the optimal value of the cross-validation criterion occurs at an endpoint of the search interval. New argument \texttt{warn}. \end{itemize} \item \texttt{bw.ppl}: \begin{itemize} \item New arguments \texttt{weights} and \texttt{sigma}. \item New argument \texttt{shortcut} allows faster computation. \item Additional arguments \verb!...! are now passed to \texttt{density.ppp}. \end{itemize} \item \texttt{bw.scott}: \begin{itemize} \item the two bandwidth values in the result now have names \texttt{sigma.x} and \texttt{sigma.y}. \item Now handles point patterns of any dimension. \item New arguments \texttt{isotropic} and \texttt{d}. \end{itemize} \item \texttt{bw.stoyan}: The rule has been modified so that, if the pattern is empty, it is now treated as if it contained 1 point, so that a finite bandwidth value is returned. %%C \item \texttt{cbind.hyperframe}: The result now retains the \texttt{row.names} of the original arguments. \item \texttt{cdf.test}: \begin{itemize} \item Calculations are more robust against numerical rounding effects. \item The methods for classes \texttt{ppp}, \texttt{ppm}, \texttt{lpp}, \texttt{lppm}, \texttt{slrm} have a new argument \texttt{interpolate}. \item Monte Carlo test runs much faster. \item More jittering is applied when \texttt{jitter=TRUE}. Warnings about tied values should not occur any more. \end{itemize} \item \texttt{cdf.test.mppm}: \begin{itemize} \item Now handles Gibbs models. \item Now recognises \texttt{covariate="x"} or \texttt{"y"}. \end{itemize} \item \texttt{clarkevans}: The argument \texttt{correction="all"} is now recognised: it selects all the available options. [This is also the default.] \item \texttt{clickpoly}: The polygon is now drawn progressively as the user clicks new vertices. \item \texttt{closepairs.ppp}: New argument \code{periodic}. \item \texttt{closepairs.ppp}, \texttt{closepairs.pp3}: \begin{itemize} \item New arguments \texttt{distinct} and \texttt{neat} allow more options. \item Argument \texttt{ordered} has been replaced by \texttt{twice} (but \texttt{ordered} is still accepted, with a warning). \item Performance improved (computation time and memory requirements reduced.) This should improve the performance of many functions in \texttt{spatstat}. \end{itemize} \item \texttt{closepairs.pp3}: Argument \texttt{what} can take the value \texttt{"ijd"} \item \texttt{clusterset}: Improved behaviour. \item \texttt{clusterfit}: \begin{itemize} \item New argument \texttt{algorithm} specifies the choice of optimisation algorithm. \item Changed precedence rule for handling the algorithm parameters in the minimum contrast algorithm. Individually-named arguments \texttt{q,p,rmax,rmin} now take precedence over entries with the same names in the list \texttt{ctrl}. \item New argument \texttt{verbose}. \end{itemize} \item \texttt{colourmap}: argument \texttt{col} have have length 1, representing a trivial colour map in which all data values are mapped to the same colour. \item \texttt{collapse.fv}: This is now treated as a method for the \texttt{nlme} generic \texttt{collapse}. Its syntax has been adjusted slightly. \item \texttt{connected.im}: Now handles a logical-valued image properly. Arguments \texttt{...} now determine pixel resolution. \item \texttt{connected.owin}: Arguments \texttt{...} now determine pixel resolution. \item \texttt{contour.im}: New argument \texttt{col} specifies the colour of the contour lines. If \texttt{col} is a colour map, then the contours are drawn in different colours. \item \texttt{convolve.im}: the name of the unit of length is preserved. \item \texttt{crossing.psp}: New argument \texttt{details} gives more information about the intersections between the segments. \item \texttt{crosspairs.pp3}: Argument \texttt{what} can take the value \texttt{"ijd"} \item \texttt{cut.ppp}: Argument \texttt{z} can be \texttt{"x"} or \texttt{"y"} indicating one of the spatial coordinates. %%D \item \texttt{dclf.test, mad.test, dclf.progress, mad.progress,} \texttt{dclf.sigtrace, mad.sigtrace}, \texttt{dg.progress, dg.sigtrace}: \begin{itemize} \item New argument \texttt{clamp} determines the test statistic for one-sided tests. \item New argument \texttt{rmin} determines the left endpoint of the test interval. \item New argument \texttt{leaveout} specifies how to calculate discrepancy between observed and simulated function values. \item New argument \texttt{scale} allows summary function values to be rescaled before the comparison is performed. \item New argument \texttt{interpolate} supports interpolation of $p$-value. \item New argument \texttt{interpolate} supports interpolation of critical value of test. \item Function values which are infinite, \texttt{NaN} or \texttt{NA} are now ignored in the calculation (with a warning) instead of causing an error. Warning messages are more detailed. \end{itemize} \item \texttt{default.rmhcontrol, default.rmhexpand}: New argument \texttt{w}. \item \texttt{density.lpp}: \begin{itemize} \item New fast algorithm (up to 1000 times faster) for the default case where \texttt{kernel="gaussian"} and \texttt{continuous=TRUE}. Generously contributed by Greg McSwiggan. \item Fast algorithm has been further accelerated. \item New argument \texttt{kernel} specifies the smoothing kernel. Any of the standard one-dimensional smoothing kernels can be used. \item Now supports both the `equal-split continuous' and `equal-split discontinuous' smoothers. New argument \texttt{continuous} determines the choice of smoother. \item New arguments \texttt{weights} and \texttt{old}. \item New argument \texttt{distance} offers a choice of different kernel methods. \end{itemize} \item \texttt{density.ppp}: \begin{itemize} \item A non-Gaussian kernel can now be specified using the argument \texttt{kernel}. \item Argument \texttt{weights} can now be a pixel image. \item Infinite bandwidth \texttt{sigma=Inf} is supported. \item Accelerated by about 30\% when \texttt{at="pixels"}. \item Accelerated by about 15\% in the case where \texttt{at="points"} and \texttt{kernel="gaussian"}. \item Accelerated in the cases where weights are given or \texttt{diggle=TRUE}. \item New argument \texttt{verbose}. \end{itemize} \item \texttt{density.psp}: \begin{itemize} \item New argument \texttt{method}. \item Accelerated by 1 to 2 orders of magnitude. \end{itemize} \item \texttt{dfbetas.ppm}: \begin{itemize} \item For Gibbs models, memory usage has been dramatically reduced, so the code can handle larger datasets and finer quadrature schemes. \item Increased the default resolution of the pixel images. Spatial resolution can now be controlled by the arguments \code{dimyx}, \code{eps}. \end{itemize} \item \texttt{diagnose.ppm}: \begin{itemize} \item Infinite values of \texttt{rbord} are now ignored and treated as zero. This ensures that \texttt{diagnose.ppm} has a sensible default when the fitted model has infinite reach. \item Accelerated, when \texttt{type="inverse"}, for models without a hard core. \end{itemize} \item \texttt{diagnose.ppm, plot.diagppm}: \begin{itemize} \item New arguments \texttt{col.neg, col.smooth} control the colour maps. \item Accelerated, when \texttt{type="inverse"}, for models without a hard core. \end{itemize} \item \texttt{dilation.ppp}: Improved geometrical accuracy. Now accepts arguments to control resolution of polygonal approximation. \item \texttt{discs}: \begin{itemize} \item Now accepts a single numeric value for \texttt{radii}. \item New argument \texttt{npoly}. \item Accelerated in some cases. \end{itemize} \item \texttt{distfun}: When the user calls a distance function that was created by \texttt{distfun}, the user may now give a \texttt{ppp} or \texttt{lpp} object for the argument \texttt{x}, instead of giving two coordinate vectors \texttt{x} and \texttt{y}. \item \texttt{dppm}: Changed precedence rule for handling the algorithm parameters in the minimum contrast algorithm. Individually-named arguments \texttt{q,p,rmax,rmin} now take precedence over entries with the same names in the list \texttt{ctrl}. \item \texttt{duplicated.ppp}: accelerated. %%E \item \texttt{edge.Trans}: New argument \texttt{gW} for efficiency. \item \texttt{effectfun}: \begin{itemize} \item Now works for \texttt{ppm}, \texttt{kppm}, \texttt{lppm}, \texttt{dppm}, \texttt{rppm} and \texttt{profilepl} objects. \item New argument \texttt{nvalues}. \end{itemize} \item \texttt{envelope}: \begin{itemize} \item New argument \texttt{clamp} gives greater control over one-sided envelopes. \item New argument \texttt{funargs} \item New argument \texttt{scale} allows global envelopes to have width proportional to a specified function of $r$, rather than constant width. \item New argument \texttt{funYargs} contains arguments to the summary function when applied to the data pattern only. \item The argument \texttt{simulate} can now be a function (such as \texttt{rlabel}). The function will be applied repeatedly to the original data pattern. \item \texttt{rejectNA} and \texttt{silent}. \end{itemize} \item \texttt{envelope.lpp}, \texttt{envelope.lppm}: \begin{itemize} \item New arguments \texttt{fix.n} and \texttt{fix.marks} allow envelopes to be computed using simulations conditional on the observed number of points. \item New arguments \texttt{maxnerr}, \texttt{rejectNA} and \texttt{silent}. \end{itemize} \item \texttt{eval.im}: New argument \texttt{warn}. \item \texttt{eval.linim}: New argument \texttt{warn}. \item \texttt{ewcdf}: \begin{itemize} \item Argument \texttt{weights} can now be \texttt{NULL}. \item New arguments \texttt{normalise} and \texttt{adjust}. \item Computation accelerated. \item The result does not inherit class \texttt{"ecdf"} if \texttt{normalise=FALSE}. \end{itemize} %%F \item \texttt{Fest}: Additional checks for errors in input data. \item \texttt{Finhom}: \begin{itemize} \item A warning is issued if bias is likely to occur because of undersmoothing. \item New arguments \texttt{warn.bias} and \texttt{savelambda}. \end{itemize} \item \texttt{fitted.lppm}: New argument \texttt{leaveoneout} allows leave-one-out computation of fitted value. \item \texttt{fitted.ppm}: \begin{itemize} \item New option, \texttt{type="link"}. \item New argument \code{ignore.hardcore}. \end{itemize} \item \texttt{funxy}: \begin{itemize} \item When the user calls a function that was created by \texttt{funxy}, the user may now give a \texttt{ppp} or \texttt{lpp} object for the argument \texttt{x}, instead of giving two coordinate vectors \texttt{x} and \texttt{y}. \item Functions of class \texttt{"funxy"} can now be applied to quadrature schemes. \end{itemize} %%G \item \texttt{Geyer}: The saturation parameter \texttt{sat} can now be less than 1. \item \texttt{Ginhom}: \begin{itemize} \item A warning is issued if bias is likely to occur because of undersmoothing. \item New arguments \texttt{warn.bias} and \texttt{savelambda}. \end{itemize} \item \texttt{grow.rectangle}: New argument \texttt{fraction}. %%H \item \texttt{Hest}: \begin{itemize} \item Argument \texttt{X} can now be a pixel image with logical values. \item New argument \texttt{W}. [Based on code by Kassel Hingee.] \item Additional checks for errors in input data. \end{itemize} \item \texttt{hist.im}: New argument \texttt{xname}. %%I \item \texttt{identify.psp}: Improved placement of labels. Arguments can be passed to \texttt{text.default} to control the plotting of labels. \item \texttt{idw}: Standard errors can now be calculated by setting \texttt{se=TRUE}. \item \texttt{imcov}: the name of the unit of length is preserved. \item \texttt{im.apply}: \begin{itemize} \item Computation accelerated \item New argument \texttt{fun.handles.na} \item New argument \texttt{check} \end{itemize} \item \texttt{influence.ppm}: For Gibbs models, memory usage has been dramatically reduced, so the code can handle larger datasets and finer quadrature schemes. \item \texttt{integral.linfun}: \begin{itemize} \item New argument \texttt{delta} controls step length of approximation to integral. \item Argument \code{domain} can be a tessellation. \end{itemize} \item \texttt{integral.linim}: Argument \code{domain} can be a tessellation. \item \texttt{integral.ssf}: Argument \code{domain} can be a tessellation. \item \texttt{intensity.ppm}: Intensity approximation is now implemented for area-interaction model, and Geyer saturation model. \item \texttt{interp.im}: New argument \texttt{bilinear}. \item \texttt{ippm}: \begin{itemize} \item Accelerated. \item The internal format of the result has been extended slightly. \item Improved defaults for numerical algorithm parameters. \end{itemize} %%J \item \texttt{Jfox}: new argument \texttt{warn.trim}. \item \texttt{Jinhom}: \begin{itemize} \item A warning is issued if bias is likely to occur because of undersmoothing. \item New arguments \texttt{warn.bias} and \texttt{savelambda}. \end{itemize} %%K \item \texttt{Kcross.inhom}, \texttt{Kdot.inhom}, \texttt{Kmulti.inhom}: \begin{itemize} \item These functions now allow intensity values to be given by a fitted point process model. \item New arguments \texttt{update}, \texttt{leaveoneout}, \texttt{lambdaX}. \item Leave-one-out calculation is now implemented when \texttt{lambbdaX} is a fitted model of class \texttt{"dppm"}. \end{itemize} \item \texttt{Kest} \begin{itemize} \item Accelerated computation (for translation and rigid corrections) when window is an irregular shape. \item Calculation of isotropic edge correction for polygonal windows has changed slightly. Results are believed to be more accurate. Computation has been accelerated by about 20 percent in typical cases. \end{itemize} \item \texttt{Kest.fft}: Now has \verb!...! arguments allowing control of spatial resolution. \item \texttt{Kinhom}: \begin{itemize} \item New argument \texttt{ratio}. \item Stops gracefully if \texttt{lambda} contains any zero values. \item Leave-one-out calculation is implemented when \texttt{lambda} is a fitted model of class \texttt{"dppm"}. \end{itemize} \item \texttt{kppm}: \begin{itemize} \item Fitting a model with \texttt{clusters="LGCP"} no longer requires the package \pkg{RandomFields} to be loaded explicitly. \item New argument \texttt{algorithm} specifies the choice of optimisation algorithm. \item Left hand side of formula can now involve entries in the list \texttt{data}. \item refuses to fit a log-Gaussian Cox model with anisotropic covariance. \item A warning about infinite values of the summary function no longer occurs when the default settings are used. Also affects \texttt{mincontrast}, \texttt{cauchy.estpcf}, \texttt{lgcp.estpcf}, \texttt{matclust.estpcf}, \texttt{thomas.estpcf}, \texttt{vargamma.estpcf}. \item Changed precedence rule for handling the algorithm parameters in the minimum contrast algorithm. Individually-named arguments \texttt{q,p,rmax,rmin} now take precedence over entries with the same names in the list \texttt{ctrl}. \item Improved printed output. \end{itemize} %%L \item \texttt{latest.news}: Now prints news documentation for the current major version, by default. New argument \texttt{major}. \item \texttt{Lcross.inhom}, \texttt{Ldot.inhom}: These functions now allow intensity values to be given by a fitted point process model. New arguments \texttt{update}, \texttt{leaveoneout}, \texttt{lambdaX}. \item \texttt{lengths.psp}: New argument \texttt{squared}. \item \texttt{Lest}, \texttt{Linhom}, \texttt{Ldot}, \texttt{Lcross}, \texttt{Ldot.inhom}, \texttt{Lcross.inhom}: These summary functions now have explicit argument \texttt{"correction"}. \item \texttt{leverage.ppm}: \begin{itemize} \item For Gibbs models, memory usage has been dramatically reduced, so the code can handle larger datasets and finer quadrature schemes. \item Increased the default resolution of the pixel images. Spatial resolution can now be controlled by the arguments \code{dimyx}, \code{eps}. \end{itemize} \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}: \begin{itemize} \item These methods now work for models that were fitted by logistic composite likelihood (\texttt{method='logi'}). \item Computation has been vastly accelerated for models with Geyer interaction fitted using isotropic or translation edge corrections. \item Faster computation in many cases. \item Virtually all models and edge corrections are now supported, using a ``brute force'' algorithm. This can be slow in some cases. \end{itemize} \item \texttt{lineardisc}: \begin{itemize} \item New argument \texttt{add}. \item Default plotting behaviour has changed. \end{itemize} \item \texttt{linearK}, \texttt{linearpcf} and relatives: \\ \begin{itemize} \item substantially accelerated. \item ratio calculations are now supported. \item new argument \texttt{ratio}. \end{itemize} \item \texttt{linearKinhom}: new argument \texttt{normpower}. \item \texttt{linearKinhom}, \texttt{linearpcfinhom}: \begin{itemize} \item Changed behaviour when \texttt{lambda} is a fitted model. \item New arguments \texttt{update} and \texttt{leaveoneout}. \end{itemize} \item \texttt{linearpcf}: new argument \texttt{normpower}. \item \texttt{linim}: \begin{itemize} \item The image \texttt{Z} is now automatically restricted to the network. \item New argument \texttt{restrict}. \end{itemize} \item \texttt{linnet}: \begin{itemize} \item The internal format of a \texttt{linnet} (linear network) object has been changed. Existing datasets of class \texttt{linnet} are still supported. However, computation will be faster if they are converted to the new format. To convert a linnet object \texttt{L} to the new format, use \verb!L <- as.linnet(L)!. \item If the argument \texttt{edges} is given, then this argument now determines the ordering of the sequence of line segments. For example, the \texttt{i}-th row of \texttt{edges} specifies the \texttt{i}-th line segment in \texttt{as.psp(L)}. \item New argument \texttt{warn}. \item When argument \texttt{edges} is specified, the code now checks whether any edges are duplicated. \end{itemize} \item \texttt{lintess}: \begin{itemize} \item Argument \texttt{df} can be missing or \texttt{NULL}, resulting in a tesellation with only one tile. \item Tessellations can now have marks. New argument \texttt{marks}. \end{itemize} \item \texttt{localpcfinhom}: \begin{itemize} \item New arguments \texttt{update} and \texttt{leaveoneout}. \end{itemize} \item \texttt{logLik.ppm}: \begin{itemize} \item New argument \texttt{absolute}. \item The warning about pseudolikelihood (`log likelihood not available') is given only once, and is not repeated in subsequent calls, within a spatstat session. \end{itemize} \item \texttt{logLik.mppm}: new argument \texttt{warn}. \item \texttt{lohboot}: \begin{itemize} \item Algorithm has been corrected and extended thanks to Christophe Biscio and Rasmus Waagepetersen. \item New arguments \texttt{block}, \texttt{basicboot}, \texttt{Vcorrection}. \item Accelerated when the window is a rectangle. \item Now works for multitype $K$ functions \texttt{Kcross}, \texttt{Kdot}, \texttt{Lcross}, \texttt{Ldot}, \texttt{Kcross.inhom}, \texttt{Lcross.inhom} \item Confidence bands for \texttt{Lest}, \texttt{Linhom}, \texttt{Lcross}, \texttt{Ldot}, \texttt{Lcross.inhom} are now computed differently. First a confidence band is computed for the corresponding $K$ function \texttt{Kest}, \texttt{Kinhom}, \texttt{Kcross}, \texttt{Kdot}, \texttt{Kcross.inhom} respectively. Then this is transformed to a confidence band for the $L$ function by applying the square root transformation. \end{itemize} \item \texttt{lpp}: \begin{itemize} \item The internal format of an \texttt{lpp} object has been changed. Existing datasets of class \texttt{lpp} are still supported. However, computation will be faster if they are converted to the new format. To convert an \texttt{lpp} object \texttt{X} to the new format, use \verb!X <- as.lpp(X)!. \item \texttt{X} can be missing or \texttt{NULL}, resulting in an empty point pattern. \end{itemize} \item \texttt{lpp}, \texttt{as.lpp}: These functions now handle the case where coordinates \texttt{seg} and \texttt{tp} are given but \texttt{x} and \texttt{y} are missing. \item \texttt{lppm}: \begin{itemize} \item New argument \texttt{random} controls placement of dummy points. \item Computation accelerated. \end{itemize} \item \texttt{lurking.ppm}: accelerated. \item \texttt{lut}: argument \texttt{outputs} may have length 1, representing a lookup table in which all data values are mapped to the same output value. %%M \item \texttt{markconnect}: Accepts the argument \texttt{weights} which is passed to \texttt{markcorr}. \item \texttt{markcorr}: New argument \texttt{weights} allows computation of the weighted version of the mark correlation function. Weights can be an expression to be evaluated, or a function, or a pixel image, or a numeric vector. \item \texttt{markvario}: Accepts the argument \texttt{weights} which is passed to \texttt{markcorr}. \item \texttt{minnndist}, \texttt{maxnndist}: New argument \texttt{by} makes it possible to find the minimum or maximum nearest neighbour distance between each pair of possible types in a multitype pattern. \item \texttt{mppm}: \begin{itemize} \item Now handles models with a random effect component. (This is covered in \cite[Chap.\ 16]{baddrubaturn15}.) \item New argument \texttt{random} is a formula specifying the random effect. (This is covered in \cite[Chap.\ 16]{baddrubaturn15}.) \item Performs more checks for consistency of the input data. \item New arguments \texttt{gcontrol} and \texttt{reltol.pql} control the fitting algorithm. \item New argument \texttt{weights} specifies case weights for each row of data. \end{itemize} \item \texttt{msr}: Infinite and \texttt{NA} values are now detected (if \texttt{check=TRUE}) and are reset to zero, with a warning. %%N \item \texttt{nbfires}: \begin{itemize} \item the unit of length for the coordinates is now specified in this dataset. \item This dataset now includes information about the different land and sea borders of New Brunswick. \end{itemize} \item \texttt{nncorr,nnmean,nnvario}: New argument \texttt{na.action}. \item \texttt{nndist.lpp, nnwhich.lpp, nncross.lpp, distfun.lpp}: New argument \texttt{k} allows computation of $k$-th nearest point. Computation accelerated. \texttt{nnfun.lpp}: \begin{itemize} \item New argument \texttt{k}. \item New argument \texttt{value} specifies whether to return the index of the nearest neighbour or the mark value of the nearest neighbour. \end{itemize} \texttt{nnfun.ppp}: \begin{itemize} \item New argument \texttt{value} specifies whether to return the index of the nearest neighbour or the mark value of the nearest neighbour. \end{itemize} \texttt{nnfun.psp}: \begin{itemize} \item New argument \texttt{value} specifies whether to return the index of the nearest neighbour or the mark value of the nearest neighbour. \end{itemize} %%O %%P \item \texttt{padimage}: New argument \texttt{W} allows an image to be padded out to fill any window. \item \texttt{pairorient}: Default edge corrections now include \texttt{"bord.modif"}. \item \texttt{parres}: the argument \texttt{covariate} is allowed to be missing if the model only depends on one covariate. \item \texttt{pcf.ppp}: \begin{itemize} \item New argument \code{close} for advanced use. \item New argument \texttt{ratio} allows several estimates of pcf to be pooled. \item Now calculates an analytic approximation to the variance of the estimate of the pair correlation function (when \texttt{var.approx=TRUE}). \item Now returns the smoothing bandwidth used, as an attribute of the result. \item New argument \texttt{close} for advanced use. \item Now accepts \texttt{correction="none"}. \end{itemize} \item \texttt{pcfinhom}: \begin{itemize} \item New argument \code{close} for advanced use. \item Default behaviour is changed when \texttt{lambda} is a fitted model. The default is now to re-fit the model to the data before computing pcf. New arguments \texttt{update} and \texttt{leaveoneout} control this. \item New argument \texttt{close} for advanced use. \item Now handles \texttt{correction="good"} \item Leave-one-out calculation is implemented when \texttt{lambda} is a fitted model of class \texttt{"dppm"}. \end{itemize} \item \code{persp.funxy}: Improved $z$-axis label. \item \texttt{pixellate.ppp}: \begin{itemize} \item If the pattern is empty, the result is an integer-valued image (by default) for consistency with the results for non-empty patterns. \item Accelerated in the case where weights are given. \item New arguments \texttt{fractional} and \texttt{preserve} for more accurate discretisation. \item New argument \texttt{savemap}. \end{itemize} \item \texttt{plot.anylist}: \begin{itemize} \item If a list entry \verb!x[[i]]! belongs to class \texttt{"anylist"}, it will be expanded so that each entry \verb!x[[i]][[j]]! will be plotted as a separate panel. \item New arguments \texttt{panel.begin.args}, \texttt{panel.end.args} \item Result is now an (invisible) list containing the result from executing the plot of each panel. \end{itemize} \item \texttt{plot.colourmap}: \begin{itemize} \item Now handles a colour map for a zero-length interval [a,a] \item New argument \texttt{increasing} specifies whether the colours are displayed in order left-to-right/bottom-to-top. \item Changed default behaviour for discrete colour maps when \texttt{vertical=FALSE}. \end{itemize} \item \texttt{plot.im}: \begin{itemize} \item Now handles complex-valued images. \item New argument \texttt{workaround} to avoid a bug in some MacOS device drivers that causes the image to be displayed in the wrong spatial orientation. \item The number of tick marks in the colour ribbon can now be controlled using the argument \texttt{nint} in \texttt{ribargs}. \item Improved behaviour when all pixel values are \texttt{NA}. \item Improved handling of tickmarks on colour ribbon. \item Improved behaviour when the image values are almost constant. \item New argument \texttt{riblab}. \item Axes are prevented from extending outside the image rectangle. \item New argument \texttt{zap}. \item Some warnings are suppressed when \texttt{do.plot=FALSE}. \end{itemize} \item \texttt{plot.imlist}: Result is now an (invisible) list containing the result from executing the plot of each panel. \item \texttt{plot.influence.ppm}: New argument \texttt{multiplot}. \item \texttt{plot.kppm}: \begin{itemize} \item New arguments \texttt{pause} and \texttt{xname}. \item The argument \texttt{what="all"} is now recognised: it selects all the available options. [This is also the default.] \end{itemize} \item \texttt{plot.leverage.ppm}: \begin{itemize} \item New arguments \texttt{multiplot} and \code{what}. \item A contour line showing the average value of leverage is now drawn on the colour ribbon, as well as on the main image. New argument \texttt{args.contour}. \end{itemize} \item \texttt{plot.linfun}: \begin{itemize} \item Now passes arguments to the function being plotted. \item A scale bar is now plotted when \texttt{style="width"}. \item New argument \texttt{legend}. \item The return value has a different format. \end{itemize} \item \texttt{plot.linim}: \begin{itemize} \item The return value has a different format. \item New argument \texttt{fatten} improves visual appearance when \texttt{style="colour"}. \item A scale bar is now plotted when \texttt{style="width"}. \item When \texttt{style="width"}, negative values are plotted in red (by default). New argument \texttt{negative.args} controls this. \item New argument \texttt{zlim} specifies the range of values to be mapped. \item New explicit argument \texttt{box} determines whether to plot a bounding box; default is \texttt{FALSE} in all cases. \end{itemize} \item \texttt{plot.lintess}: \begin{itemize} \item Improved plot method, with more options. \item Modified to display the marks attached to the tiles. \item Options: \verb!style=c("colour", "width", "image")!. \end{itemize} \item \texttt{plot.lpp}: \begin{itemize} \item New argument \texttt{show.network}. \item For a point pattern with continuous marks (``real numbers'') the colour arguments \texttt{cols}, \texttt{fg}, \texttt{bg} can now be vectors of colour values, and will be used to determine the default colour map for the marks. \end{itemize} \item \texttt{plot.mppm}: New argument \texttt{se}. \item \texttt{plot.msr}: \begin{itemize} \item Now handles multitype measures. \item New argument \texttt{multiplot}. \item New argument \texttt{massthresh}. \item New arguments \texttt{equal.markscale} and \texttt{equal.ribbon}. \end{itemize} \item \texttt{plot.onearrow:} Graphical parameters, specified when the object was created, are now taken as the defaults for graphical parameters to the plot. \item \texttt{plot.owin:} New argument \texttt{use.polypath} controls how to plot a filled polygon when it has holes. \item \texttt{plot.profilepl}: This function has now been documented, and the graphics improved. \item \texttt{plot.psp}: \begin{itemize} \item Segments can be plotted with widths proportional to their mark values. \item New argument \texttt{style}. \item New argument \texttt{col} gives control over the colour map representing the values of marks attached to the segments. \end{itemize} \item \texttt{plot.pp3}: New arguments \texttt{box.front}, \texttt{box.back} control plotting of the box. \item \texttt{plot.ppp}: \begin{itemize} \item The default colour for the points is now a transparent grey, if this is supported by the plot device. \item For a point pattern with continuous marks (``real numbers'') the colour arguments \texttt{cols}, \texttt{fg}, \texttt{bg} can now be vectors of colour values, and will be used to determine the default colour map for the marks. \item Now recognises graphics parameters for text, such as \texttt{family} and \texttt{srt} \item When \texttt{clipwin} is given, any parts of the boundary of the window of \texttt{x} that lie inside \texttt{clipwin} will also be plotted. \item Improved placement of symbol map legend when argument \texttt{symap} is given. \end{itemize} \item \code{plot.tess}: \begin{itemize} \item This plot method can now fill each tile with a different colour. \item New arguments \code{do.col}, \code{values}, \code{col} and \code{ribargs}. Old argument \code{col} has been renamed \code{border} for consistency. \item Now generates a separate plot panel for each column of marks, if \texttt{do.col=TRUE}. \item New argument \texttt{multiplot}. \end{itemize} \item \texttt{plot.profilepl} ,\texttt{plot.quadratcount}, \texttt{plot.quadrattest}, \texttt{plot.tess}: Now recognise graphics parameters for text, such as \texttt{family} and \texttt{srt} \item \texttt{plot.solist}: \begin{itemize} \item New arguments \texttt{panel.begin.args}, \texttt{panel.end.args} \item Result is now an (invisible) list containing the result from executing the plot of each panel. \end{itemize} \item \texttt{plot.studpermutest}: This existing function now has a help file. \item \texttt{plot.symbolmap}: New argument \texttt{nsymbols} controls the number of symbols plotted. \item \code{ponderosa}: In this installed dataset, the function \code{ponderosa.extra\$plotit} has changed slightly (to accommodate the dependence on the package \pkg{spatstat.utils}). \item \texttt{polynom}: This function now has a help file. \item \texttt{pool.fv}: \begin{itemize} \item The default plot of the pooled function no longer includes the variance curves. \item New arguments \texttt{relabel} and \texttt{variance}. \end{itemize} \item \texttt{pool.rat}: New arguments \texttt{weights}, \texttt{relabel} and \texttt{variance}. \item \texttt{ppm}: \begin{itemize} \item Argument \code{interaction} can now be a function that makes an interaction, such as \code{Poisson}, \code{Hardcore}, \code{MultiHard}. \item Argument \texttt{subset} can now be a window (class \texttt{"owin"}) specifying the sub-region of data to which the model should be fitted. \end{itemize} \item \texttt{ppm.ppp, ppm.quad}: \begin{itemize} \item New argument \texttt{emend}, equivalent to \texttt{project}. \item New arguments \texttt{subset} and \texttt{clipwin}. \end{itemize} \item \code{ppmInfluence}: The result now belongs to class \code{ppmInfluence}, for which there are methods for \code{leverage}, \code{influence}, \code{dfbetas} which extract the desired component. \item \texttt{ppp}: \begin{itemize} \item New argument \texttt{checkdup}. \item If the coordinate vectors \code{x} and \code{y} contain \code{NA}, \code{NaN} or infinite values, these points are deleted with a warning, instead of causing a fatal error. \end{itemize} \item \texttt{pp3}: New argument \texttt{marks}. \item \texttt{predict.kppm, residuals.kppm} Now issues a warning when the calculation ignores the cluster/Cox component and treats the model as if it were Poisson. (This currently happens in predict.kppm when se=TRUE or interval != "none", and in residuals.kppm when type != "raw"). \item \texttt{predict.lppm}: Argument \texttt{locations} can now be an \texttt{lpp} object. \item \texttt{predict.mppm}: The argument \texttt{type="all"} is now recognised: it selects all the available options. [This is also the default.] \item \texttt{predict.ppm}: \begin{itemize} \item Now recognises the arguments \code{dimyx} and \code{eps} for specifying the resolution of the grid of prediction points. \item New argument \code{ignore.hardcore}. \item Accelerated for models fitted with \texttt{method="VBlogi"} \end{itemize} \item \texttt{predict.rhohat}: New argument \texttt{what} determines which value should be calculated: the function estimate, the upper/lower confidence limits, or the standard error. \item \texttt{print.linim}: More information is printed. \item \texttt{print.lintess}: Output includes information about marks. \item \texttt{print.quad}: More information is printed. \item \texttt{print.rmhmodel}: More information is printed. \item \texttt{progressreport} \begin{itemize} \item Behaviour improved. \item New arguments \texttt{state}, \texttt{tick}, \texttt{showtime}. \item New option: \verb!style="tk"! \end{itemize} \item \code{pseudoR2.ppm}, \code{pseudoR2.lppm}: \begin{itemize} \item The null model now includes any offset terms, by default. \item New argument \code{keepoffset}. \end{itemize} %%Q \item \texttt{quadratcount.ppp}: Computation accelerated in some cases. \item \texttt{quadrat.test.ppm}: Computation accelerated in some cases. \item \texttt{quantess}: \begin{itemize} \item The covariate \texttt{Z} can now be \texttt{"rad"} or \texttt{"ang"} representing polar coordinates. \item New argument \texttt{origin} specifies the origin of polar coordinates. \item New argument \texttt{eps} controls the accuracy of the calculation. \end{itemize} \item \texttt{quantile.ewcdf}: The function is now normalised to the range \verb![0,1]! before the quantiles are computed. This can be suppressed by setting \texttt{normalise=FALSE}. \item \texttt{qqplot.ppm} Argument \texttt{expr} can now be a list of point patterns, or an envelope object containing a list of point patterns. %%R \item \texttt{rbind.hyperframe}: The result now retains the \texttt{row.names} of the original arguments. \item \texttt{rcellnumber}: New argument \texttt{mu}. \item \texttt{rebound.owin}: Now preserves unitnames of the objects. \item \texttt{rescale.owin}, \texttt{rescale.ppp}, \texttt{rescale.psp}: The geometrical type of the window is now preserved in all cases. (Previously if the window was polygonal but was equivalent to a rectangle, the rescaled window was a rectangle.) \item \texttt{rgbim, hsvim}: New argument \texttt{A} controls the alpha (transparency) channel. \item \texttt{rgb2hex, col2hex, paletteindex, is.colour, samecolour,} \texttt{complementarycolour, is.grey, to.grey} These colour tools now handle transparent colours. \item \texttt{rgb2hex}: New argument \texttt{maxColorValue} \item \texttt{relrisk.ppp}: \begin{itemize} \item If \texttt{se=TRUE} and \texttt{at="pixels"}, the result belongs to class \texttt{solist}. \item The arguments \texttt{adjust}, \texttt{edge}, \texttt{diggle} are now explicit formal arguments. \end{itemize} \texttt{rhohat}: \begin{itemize} \item Nonparametric maximum likelihood estimation is now supported, assuming the intensity is a monotone function of the covariate. \item New options \texttt{smoother="increasing"} and \texttt{smoother="decreasing"}. \item New argument \texttt{subset} allows computation for a subset of the data. \item New argument \texttt{positiveCI} specifies whether confidence limits should always be positive. \end{itemize} \texttt{rhohat.lpp}: New argument \texttt{random} controls placement of dummy points. \item \texttt{rlabel}: \begin{itemize} \item New arguments \texttt{nsim} and \texttt{drop}. \item \texttt{X} can now be a point pattern of any type (\texttt{ppp}, \texttt{lpp}, \texttt{pp3}, \texttt{ppx}) or a line segment pattern (\texttt{psp}). \end{itemize} \item \texttt{rLGCP}: \begin{itemize} \item Accelerated. \item This function no longer requires the package \pkg{RandomFields} to be loaded explicitly. \end{itemize} \item \texttt{rMaternI, rMaternII}: These functions can now generate random patterns in three dimensions and higher dimensions, when the argument \texttt{win} is of class \texttt{box3} or \texttt{boxx}. \item \texttt{rmh}: \begin{itemize} \item Accelerated, in the case where multiple patterns are saved using \texttt{nsave}. \item The printed output of the debugger (invoked by \texttt{snoop=TRUE}) has been improved. \end{itemize} \item \texttt{rmh.ppm, rmhmodel.ppm, simulate.ppm}: A model fitted using the \texttt{Penttinen} interaction can now be simulated. \item \texttt{rmh.default, rmhmodel.default}: \begin{itemize} \item These functions now recognise \verb!cif='penttinen'! for the Penttinen interaction. \item New arguments \texttt{nsim}, \texttt{saveinfo}. \item The printed output of the debugger (invoked by \texttt{snoop=TRUE}) has been improved. \end{itemize} \item \texttt{rmhcontrol}: \begin{itemize} \item New parameter \texttt{pstage} determines when to generate random proposal points. \item The parameter \texttt{nsave} can now be a vector of integers. \end{itemize} \item \texttt{rose.default} New argument \texttt{weights}. \item \texttt{rose} New arguments \texttt{start} and \texttt{clockwise} specify the convention for measuring and plotting angles. \item \texttt{rotmean}: \begin{itemize} \item New argument \texttt{padzero}. \item Default behaviour has changed. \item Improved algorithm stability. \end{itemize} \item \texttt{rpoispp}: Accelerated, when \texttt{lambda} is a pixel image. \item \texttt{rpoisppx}: New argument \code{drop}. \item \texttt{rpoisline}: Also returns information about the original infinite random lines. \item \texttt{rpoislpp}: If \texttt{lambda} is a list of \texttt{"linim"} or \texttt{"linfun"} objects, then the argument \texttt{L} can be omitted. \item \texttt{rStrauss, rHardcore, rStraussHard, rDiggleGratton, rDGS, rPenttinen:} New argument \texttt{drop}. \item \texttt{rtemper:} new argument \texttt{track}. \item \texttt{rthin} \begin{itemize} \item Accelerated, when \texttt{P} is a single number. \item \texttt{X} can now be a point pattern of any type (\texttt{ppp}, \texttt{lpp}, \texttt{pp3}, \texttt{ppx}) or a line segment pattern (\texttt{psp}). \end{itemize} \item \texttt{rThomas, rMatClust, rCauchy, rVarGamma}: \begin{itemize} \item When the model is approximately Poisson, it is simulated using rpoispp. This avoids computations which would require huge amounts of memory. New argument \texttt{poisthresh} controls this behaviour. \item New argument \texttt{saveparents}. \end{itemize} \item \texttt{runifpointOnLines}, \texttt{rpoisppOnLines}: New argument \code{drop}. \item \texttt{runifpointx}: New argument \code{drop}. %%S \item \texttt{selfcut.psp}: \begin{itemize} \item Computation accelerated. \item The result now has an attribute \texttt{"camefrom"} indicating the provenance of each segment in the result. \end{itemize} \item \texttt{setcov}: the name of the unit of length is preserved. \item \code{shapley}: In this installed dataset, the function \code{shapley.extra\$plotit} has changed slightly (to accommodate the dependence on the package \pkg{spatstat.utils}). \item \texttt{shift.im}, \texttt{shift.owin}, \texttt{shift.ppp}, \texttt{shift.psp}: More options for the argument \texttt{origin}. \item Simulation: Several basic simulation algorithms have been accelerated. Consequently, simulation outcomes are not identical to those obtained with previous versions of \spst, even when the same random seed is used. To ensure compatibility with previous versions of spatstat, revert to the slower code by setting \texttt{spatstat.options(fastthin=FALSE, fastpois=FALSE)}. \item \texttt{simulate.kppm}: \begin{itemize} \item Accelerated for LGCP models. \item Additional arguments \verb!...! are now passed to the function that performs the simulation. \end{itemize} \item \texttt{simulate.ppm}: New argument \texttt{w} controls the window of the simulated patterns. New argument \texttt{verbose}. \item \texttt{Smooth.ppp}: \begin{itemize} \item A non-Gaussian kernel can now be specified using the argument \texttt{kernel}. \item Argument \texttt{weights} can now be a pixel image, a function, a numeric vector or an expression to be evaluated. \item Infinite bandwidth \texttt{sigma=Inf} is supported. \item Accelerated by about 30\% in the case where \texttt{at="pixels"}. \item Accelerated by about 15\% in the case where \texttt{at="points"} and \texttt{kernel="gaussian"}. \item Now exits gracefully if any mark values are \texttt{NA}, \texttt{NaN} or \texttt{Inf}. \item New argument \texttt{geometric} supports geometric-mean smoothing. \item The arguments \texttt{adjust}, \texttt{edge}, \texttt{diggle} and \texttt{kernel} are now explicit formal arguments. \end{itemize} \item \texttt{solist}: New argument \verb!.NameBase! \item \texttt{spatialcdf}: \begin{itemize} \item Computation accelerated. \item The result does not inherit class \texttt{"ecdf"} if \texttt{normalise=FALSE}. \end{itemize} \item \texttt{spatstat.options} New options \texttt{fastthin} and \texttt{fastpois} enable fast simulation algorithms. Set these options to \texttt{FALSE} to reproduce results obtained with previous versions of \spst. \item \texttt{split.ppp}, \texttt{split.ppx}: The splitting variable \texttt{f} can now be a logical vector. \item \verb!split<-.ppp!: The default for argument \texttt{un} in \verb!split<-.ppp! now agrees with the default for the same argument in \texttt{split.ppp}. \item \texttt{square}: Handles a common error in the format of the arguments. \item \texttt{step}: now works for models of class \texttt{"mppm"}. \item \texttt{stieltjes}: Argument \texttt{M} can be a stepfun object (such as an empirical CDF). \item \texttt{subset.ppp}, \texttt{subset.lpp}, \texttt{subset.pp3}, \texttt{subset.ppx}: The argument \texttt{subset} can now be any argument acceptable to the \verb!"["! method. \item summary functions The argument \texttt{correction="all"} is now recognised: it selects all the available options. \begin{quote} This applies to \texttt{Fest}, \texttt{F3est}, \texttt{Gest}, \texttt{Gcross}, \texttt{Gdot}, \texttt{Gmulti}, \texttt{G3est}, \texttt{Gfox}, \texttt{Gcom}, \texttt{Gres}, \texttt{Hest}, \texttt{Jest}, \texttt{Jmulti}, \texttt{Jcross}, \texttt{Jdot}, \texttt{Jfox}, \texttt{Kest}, \texttt{Kinhom}, \texttt{Kmulti}, \texttt{Kcross}, \texttt{Kdot}, \texttt{Kcom}, \texttt{Kres}, \texttt{Kmulti.inhom}, \texttt{Kcross.inhom}, \texttt{Kdot.inhom}, \texttt{Kscaled}, \texttt{Ksector}, \texttt{Kmark}, \texttt{K3est}, \texttt{Lscaled}, \texttt{markcorr}, \texttt{markcrosscorr}, \texttt{nnorient}, \texttt{pairorient}, \texttt{pcfinhom}, \texttt{pcfcross.inhom}, \texttt{pcfcross}, \texttt{pcf}, \texttt{Tstat}. \end{quote} \item \texttt{Summary.linim} family supporting \texttt{range}, \texttt{max}, \texttt{min} etc: Recognises the argument \texttt{finite} so that \texttt{range(x, finite=TRUE)} works for a linim object \texttt{x}. \item \texttt{summary.distfun}, \texttt{summary.funxy}: \begin{itemize} \item More information is printed. \item Pixel resolution can now be controlled. \end{itemize} \item \texttt{summary.kppm}: prints more information about algorithm convergence. \item \texttt{summary.lintess}: prints information about marks. \item \texttt{summary.ppm}: New argument \texttt{fine} selects the algorithm for variance estimation. \item \texttt{summary.owin}, \texttt{summary.im}: The fraction of frame area that is occupied by the window/image is now reported. \item \texttt{sumouter}: New argument \texttt{y} allows computation of asymmetric outer products. \item \texttt{symbolmap}: \begin{itemize} \item Now accepts a vector of colour values for the arguments \texttt{col}, \texttt{cols}, \texttt{fg}, \texttt{bg} if the argument \texttt{range} is given. \item New option: \texttt{shape="arrows"}. \end{itemize} %%T \item \texttt{tess}: Argument \texttt{window} is ignored when xgrid, ygrid are given. \item \texttt{texturemap}: Argument \texttt{textures} can be missing or NULL. \item \texttt{textureplot}: Argument \texttt{x} can now be something acceptable to \texttt{as.im}. \item \texttt{tilenames}, \verb!tilenames<-!: These functions are now generic, with methods for \texttt{tess} and \texttt{lintess}. \item \texttt{to.grey} New argument \texttt{transparent}. %%U \item \texttt{union.owin}: Improved behaviour when there are more than 2 windows. \item \texttt{unstack.lintess}: now handles marks. \item \texttt{update}: now works for models of class \texttt{"mppm"}. \item \texttt{update.kppm}: \begin{itemize} \item New argument \texttt{evaluate}. \item Now handles additional arguments in any order, with or without names. \item Changed arguments. \item Improved behaviour. \end{itemize} \item \texttt{update.ppm}: For the case \texttt{update(model, X)} where \texttt{X} is a point pattern, if the window of \texttt{X} is different from the original window, then the model is re-fitted from scratch (i.e. \texttt{use.internal=FALSE}). %%V \item \texttt{valid.ppm} This is now a method for the generic function \texttt{valid}. \item \texttt{vcov.mppm}: \begin{itemize} \item Now handles models with Gibbs interactions. \item New argument \texttt{nacoef.action} specifies what to do if some of the fitted coefficients are \texttt{NA}, \texttt{NaN} or \texttt{Inf}. \end{itemize} \item \texttt{vcov.ppm}: \begin{itemize} \item Performance slightly improved, for Gibbs models. \item New argument \texttt{nacoef.action} specifies what to do if some of the fitted model coefficients are \texttt{NA}, \texttt{NaN} or infinite. \end{itemize} %%W %%X %%Y %%Z \item \verb![<-.im! \begin{itemize} \item Accepts an array for \texttt{value}. \item The subset index \texttt{i} can now be a linear network. Then the result of \verb!x[i, drop=FALSE]! is a pixel image of class \texttt{linim}. \item New argument \texttt{drop} controls behaviour when indices are missing as in \verb!x[] <- value! \end{itemize} \item \verb![.layered!: \begin{itemize} \item Subset index \texttt{i} can now be an \texttt{owin} object. \item Additional arguments \verb!...! are now passed to other methods. \end{itemize} \item \verb![.leverage.ppm!: New argument \texttt{update}. \item \verb![.linnet!: \begin{itemize} \item New argument \texttt{snip} determines what to do with segments of the network that cross the boundary of the window. Default behaviour has changed. \item More robust against artefacts when the subset index is a pixel mask. \end{itemize} \item \verb![.linim!: More robust against artefacts. \item \verb![.lpp!: New argument \texttt{snip} determines what to do with segments of the network that cross the boundary of the window. Default behaviour has changed. \item \verb![.ppx!: The subset index \texttt{i} may now be a spatial domain of class \texttt{boxx} or \texttt{box3}. \item \verb![.ppp! New argument \texttt{clip} determines whether the window is clipped. \item \verb![.ppp! The previously-unused argument \texttt{drop} now determines whether to remove unused levels of a factor. \item \verb![.pp3!, \verb![.lpp!, \verb![.ppx!, \texttt{subset.ppp, subset.pp3, subset.lpp, subset.ppx}: These methods now have an argument \texttt{drop} which determines whether to remove unused levels of a factor. \item \verb![.psp!: \begin{itemize} \item accelerated. \item New argument \texttt{fragments} specifies whether to keep fragments of line segments that are cut by the new window, or only to retain segments that lie entirely inside the window. \end{itemize} \item \verb![.solist!: Subset index \texttt{i} can now be an \texttt{owin} object. \end{itemize} \begin{thebibliography}{1} \bibitem{badd10wshop} A.~Baddeley. \newblock Analysing spatial point patterns in {{R}}. \newblock Technical report, CSIRO, 2010. \newblock Version 4. \newblock URL \texttt{https://research.csiro.au/software/r-workshop-notes/} \bibitem{baddrubaturn15} A. Baddeley, E. Rubak, and R. Turner. \newblock {\em Spatial Point Patterns: Methodology and Applications with {{R}}}. \newblock Chapman \& Hall/CRC Press, 2015. \end{thebibliography} \end{document} spatstat/vignettes/bugfixes.Rnw0000644000176200001440000007354213623712063016447 0ustar liggesusers\documentclass[11pt]{article} \usepackage{graphicx} \usepackage{Sweave} \usepackage{bm} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} % \VignetteIndexEntry{Bugs Fixed in Spatstat} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\code}[1]{\texttt{#1}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \begin{document} \bibliographystyle{plain} <>= library(spatstat) sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) @ \title{Bugs fixed in \spst} \author{Adrian Baddeley, Rolf Turner and Ege Rubak} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle \thispagestyle{empty} This vignette lists all \emph{important} bugs detected and fixed in the \spst\ package since 2010. It also explains how to search the list of all recorded bugs in \spst. <>= nbugs <- nrow(news(grepl("^BUG", Category), package="spatstat")) nbugssince <- nrow(news(Version > "1.42-0" & grepl("^BUG", Category), package="spatstat")) @ \tableofcontents \pagebreak \section{Bug history} Thousands of bugs have been detected and fixed in \spst\ during its 25-year history. We started recording the bug history in 2010. Bugs that may have affected the user are listed in the package \texttt{NEWS} file, and can be searched using the \R\ command \texttt{news} or the \spst\ command \texttt{bugfixes}. To see the bugs which have just been fixed in the latest version of \spst, type <>= bugfixes @ To see all bugs which were fixed after a particular version of \spst, for example, bugs that were fixed in version \texttt{1.50-0} or later, type <>= bugfixes(sinceversion="1.50-0") @ To see all bugs in \spst\ that were fixed after a particular date, for example 30 June 2017, type <>= bugfixes(sincedate="2017-06-30") @ To see all bugs fixed after the book \cite{baddrubaturn15} was written, type <>= bugfixes("book") @ To see all bugs in the entire recorded history of \spst, type <>= bugfixes("all") @ which currently produces a list of \Sexpr{nbugs} bugs, of which \Sexpr{nbugssince} were detected after publication of the book \cite{baddrubaturn15}. \pagebreak \section{Serious bugs} Following is a list of the {\bf most serious bugs}, in decreasing order of potential impact. \newcommand\bugger[4]{% \\ {} % {\small (Bug introduced in \texttt{spatstat {#1}}, {#2}; % fixed in \texttt{spatstat {#3}}, {#4})}% } %%% LEVEL 1 \subsection{Serious Bugs, Always Wrong, Broad Impact} \begin{itemize} \item \texttt{nncross.ppp}: Results were completely incorrect if $k > 1$. \bugger{1.31-2}{april 2013}{1.35-0}{december 2013} \item \texttt{nncross.pp3}: Results were completely incorrect in some cases. \bugger{1.32-0}{august 2013}{1.34-0}{october 2013} \item \texttt{cdf.test.ppm}: Calculation of $p$-values was incorrect for Gibbs models: $1-p$ was computed instead of $p$. \bugger{1.40-0}{december 2014}{1.45-2}{may 2016} \item \texttt{Smooth.ppp}: Results of \verb!Smooth(X, at="points", leaveoneout=FALSE)! were completely incorrect. \bugger{1.20-5}{august 2010}{1.46-0}{july 2016} \item \texttt{rmh}: \begin{itemize} \item Simulation was completely incorrect in the case of a multitype point process with an interaction that does not depend on the marks, such as \verb!ppm(betacells, ~marks, Strauss(60))! due to a coding error in the \texttt{C} interface. \bugger{1.22-3}{march 2010}{1.22-3}{june 2011} \item Simulation of the Area-Interaction model was completely incorrect. \bugger{1.23-6}{october 2011}{1.31-0}{january 2013} \item Simulation of the Geyer saturation process was completely incorrect. \bugger{1.31-0}{january 2013}{1.31-1}{march 2013} \item Simulation of the Strauss-Hard Core process was partially incorrect, giving point patterns with a slightly lower intensity. \bugger{1.31-0}{january 2013}{1.37-0}{may 2014} \item Simulation of the \emph{multitype} hard core model was completely incorrect (the interaction was effectively removed, changing the model into a Poisson process). \bugger{1.31-0}{january 2013}{1.63-0}{january 2020} \item The result of simulating a model with a hard core did not necessarily respect the hard core constraint, and simulation of a model with strong inhibition did not necessarily converge. This only happened if the first order trend was large, the starting state (\texttt{n.start} or \texttt{x.start}) was not given, and the number of iterations \texttt{nrep} was not very large. It occurred because of a poor choice for the default starting state. {\small (Bug was present since about 2010. Fixed in \texttt{spatstat 1.40-0}, december 2014)} \item Simulation was incorrect in the case of an inhomogeneous multitype model with \texttt{fixall=TRUE} (i.e.\ with a fixed number of points of each type) if the model was segregated (i.e.\ if different types of points had different first order trend). The effect of the error was that all types of points had the same first order trend. {\small (Bug was present since about 2010. Fixed in \texttt{spatstat 1.43-0}, september 2015)} \item Simulation of the Geyer saturation process was incorrectly initialised, so that the results of a short run (i.e. small value of \texttt{nrep}) were incorrect, while long runs were correct. \bugger{1.17-0}{october 2009}{1.31-1}{march 2013} \end{itemize} \item \texttt{nnmark, as.im.ssf}: If \code{marks(X)} was a matrix rather than a data frame, the results were completely incorrect. \bugger{1.32-0}{august 2013}{1.55-1}{april 2018} \item \texttt{rVarGamma}: Simulations were incorrect; they were generated using the wrong value of the parameter \texttt{nu.ker}. \bugger{1.25-0}{december 2011}{1.35-0}{december 2013} \item \texttt{rCauchy}: Simulations were incorrect; they were generated using the wrong value of the parameter \texttt{omega}. \bugger{1.25-0}{december 2011}{1.25-2}{january 2012} \item \texttt{lppm}: For multitype patterns, the fitted model was completely incorrect due to an error in constructing the quadrature scheme. \bugger{1.23-0}{july 2011}{1.30-0}{december 2012} \item \verb![.lpp!: The local coordinate \texttt{seg} was completely incorrect, when \texttt{i} was a window. \bugger{1.31-2}{april 2013}{1.45-0}{march 2016} \item \texttt{lohboot}: Implementation was completely incorrect. \bugger{1.26-1}{april 2012}{1.53-2}{october 2017} \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}: Results were incorrect for non-Poisson processes due to a mathematical error. \bugger{1.25-0}{december 2011}{1.51-0}{may 2017} \end{itemize} %%% LEVEL 2 \subsection{Serious Bugs, Often Completely Wrong, Moderate Impact} \begin{itemize} \item \texttt{bw.pcf}: Results were totally incorrect due to a typo. \bugger{1.51-0}{may 2017}{1.52-0}{august 2017} \item \texttt{density.ppp}: The standard error (calculated when \texttt{se=TRUE}) was incorrect when \texttt{sigma} was a single numeric value. The output was equal to \texttt{sqrt(sigma)} times the correct answer. \bugger{1.41-1}{february 2015}{1.57-0}{october 2018} \item \texttt{rthin}: If \texttt{P} was close to 1, the result was sometimes an empty point pattern when it should have been identical to \texttt{X}. \bugger{1.43-0}{october 2015}{1.57-0}{october 2018} \item \texttt{predict.mppm}: If the model included random effects, and if the library \pkg{MASS} was not loaded, the predictions were on the log scale (i.e.\ they were logarithms of the correct values). \bugger{1.43-0}{october 2015}{1.55-1}{april 2018} \item \texttt{nnmap}, \texttt{nnmark}: Values were incorrect if the resulting pixel image had unequal numbers of rows and columns. \bugger{1.35-0}{december 2013}{1.55-0}{january 2018} \item \texttt{vcov.mppm}: Format was incorrect (rows/columns were omitted) in some cases. \bugger{1.45-1}{may 2016}{1.55-0}{january 2018} \item \texttt{model.matrix.ppm}, \texttt{model.frame.ppm}: Values were sometimes incorrect when applied to the result of \texttt{subfits}. To be precise, if \texttt{fit} was an \texttt{mppm} object fitted to a hyperframe that included ``design covariates'' (covariates that take a constant value in each row of the hyperframe), and if \verb!futs <- subfits(fit)!, then \verb!model.matrix(futs[[i]])! gave incorrect values in the columns corresponding to the design covariates. \bugger{1.45-1}{may 2016}{1.55-0}{january 2018} \item \texttt{predict.rhohat}, \texttt{simulate.rhohat}: Results were incorrect for a \texttt{rhohat} object computed from linear network data (class \texttt{"lpp"} or \texttt{"lppm"}). \bugger{1.31-0}{march 2013}{1.63-1}{february 2020} \item \texttt{predict.rho2hat}: Results were incorrect for a \texttt{rho2hat} object computed from a point pattern. \bugger{1.42-0}{may 2015}{1.52-0}{august 2017} \item \texttt{density.ppp}: Result was incorrect for non-Gaussian kernels when \texttt{at="points"} and \texttt{leaveoneout=FALSE}. \bugger{1.47-0}{october 2016}{1.57-0}{october 2018} \item \texttt{envelope.ppm}: If the model was an inhomogeneous Poisson process, the resulting envelope object was incorrect (the simulations were correct, but the envelopes were calculated assuming the model was CSR). \bugger{1.23-5}{september 2011}{1.23-6}{october 2011} \item \texttt{linearK}, \texttt{linearpcf}, \texttt{linearKinhom}, \texttt{linearpcfinhom} and multitype versions: These functions were sometimes greatly underestimated when the network had segments shorter than 10 coordinate units. \bugger{1.44-0}{december 2015}{1.46-2}{july 2016} \item \texttt{nncross}, \texttt{distfun}, \texttt{AreaInter}: Results of \texttt{nncross} were possibly incorrect when \code{X} and \code{Y} did not have the same window. This bug affected values of \texttt{distfun} and may also have affected ppm objects with interaction \texttt{AreaInter}. \bugger{1.9-4}{june 2006}{1.25-2}{january 2012} \item \texttt{update.kppm}: \begin{itemize} \item Did not function correctly when several additional arguments were given. \bugger{1.42-2}{june 2015}{1.54-0}{november 2017} \item If the call to \texttt{update} did not include a formula argument or a point pattern argument, then all arguments were ignored. Example: \texttt{update(fit, improve.type="quasi")} was identical to \texttt{fit}. \bugger{1.42-2}{june 2015}{1.45-0}{march 2016} \end{itemize} \item \texttt{markcorrint}: Results were completely incorrect. \bugger{1.39-0}{october 2014}{1.40-0}{december 2014} \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}: Results were slightly incorrect for models with a hard core, due to a mathematical error. \bugger{1.51-0}{may 2017}{1.55-1}{april 2018} \item \texttt{Ops.msr}: If the input data contained a pixel image of the smoothed density, this image was not updated; it was copied to the output unchanged. Plots of the resulting measure were incorrect. \bugger{1.52-0}{august 2017}{1.55-1}{april 2018} \item \verb![.linnet!: in calculating \verb!L[W]! where \texttt{W} is a window, the code ignored segments of \code{L} that crossed \code{W} without having a vertex in \code{W}. \bugger{1.53-0}{september 2017}{1.55-1}{april 2015} \item \verb!as.im.function!: if the function domain was not a rectangle and the function values were categorical (factor) values, the result was an empty image. \bugger{1.42-0}{may 2015}{1.57-0}{october 2018} \end{itemize} %%% LEVEL 3 \subsection{Bugs, Substantially Incorrect, Moderate Impact} \begin{itemize} \item \texttt{as.linnet.psp}: Sometimes produced a network with duplicated segments. [Such objects can be repaired using \texttt{repairNetwork}.] \bugger{1.41-1}{february 2015}{1.62-0}{december 2019} \item \texttt{rlpp}: The resulting pattern was unmarked even when it should have been multitype. \bugger{1.48-0}{december 2016}{1.63-0}{january 2020} \item \texttt{spatialcdf}: Argument \texttt{weights} was ignored, unless it was a fitted model. {\small (Bug was present since about 2010. Fixed in \texttt{spatstat 1.59-0}, march 2019)} \item \texttt{ppp}: Points inside the window were erroneously rejected as lying outside the window, if the window was a polygon equivalent to a rectangle with sides longer than $10^6$ units. {\small (Bug was present since the beginning. Fixed in \texttt{spatstat 1.59-0}, march 2019)} \item \texttt{inside.owin}: All results were \texttt{FALSE} if the window was a polygon equivalent to a rectangle with sides longer than $10^6$ units. {\small (Bug was present since the beginning. Fixed in \texttt{spatstat 1.59-0}, march 2019)} \item \texttt{sumouter}: result was incorrect (all entries were zero) if \texttt{w} was missing and \texttt{y} was given. \bugger{1.47-0}{october 2016}{1.59-0}{march 2019} \item \texttt{simulate.dppm}, \texttt{simulate.detpointprocfamily}: In dimensions higher than 2, the result was shifted so that it was centred at the origin. \bugger{1.54-0}{december 2017}{1.55-0}{january 2018} \item \texttt{integral.msr}: If the result was a matrix, it was the transpose of the correct answer. \bugger{1.35-0}{december 2012}{1.55-1}{april 2018} \item \texttt{density.ppp}: Values of \verb!density(X, at="points")! and \verb!Smooth(X, at="points")! were sometimes incorrect, due to omission of the contribution from the data point with the smallest $x$ coordinate. \bugger{1.26-0}{april 2012}{1.46-1}{july 2016} \item \texttt{multiplicity.default}: The first occurrence of any value in the input was incorrectly assigned a multiplicity of 1. \bugger{1.32-0}{december 2013}{1.57-1}{november 2018} \item \texttt{update.ppm}: If the argument \texttt{Q} was given, the results were usually incorrect, or an error was generated. \bugger{1.38-0}{august 2014}{1.38-1}{august 2014} \item \texttt{subfits}: The interaction coefficients of the submodels were incorrect for Gibbs models with a multitype interaction (\texttt{MultiStrauss}, etc). \bugger{1.35-0}{december 2013}{1.45-2}{may 2016} \item \texttt{F3est}: Estimates of $F(r)$ for the largest value of $r$ were wildly incorrect. {\small (Bug was present since about 2010. Fixed in \texttt{spatstat 1.48-0}, december 2016)} \item \texttt{kppm}, \texttt{matclust.estpcf}, \texttt{pcfmodel}: The pair correlation function of the M\'atern Cluster Process was evaluated incorrectly at distances close to 0. This could have affected the fitted parameters in \texttt{matclust.estpcf()} or \texttt{kppm(clusters="MatClust")}. \bugger{1.20-2}{august 2010}{1.33-0}{september 2013} \item \texttt{ppm}: Results were incorrect for the Geyer saturation model with a non-integer value of the saturation parameter \texttt{sat}. \bugger{1.20-0}{july 2010}{1.31-2}{april 2013} \item \texttt{clip.infline}: Results were incorrect unless the midpoint of the window was the coordinate origin. \bugger{1.15-1}{april 2009}{1.48-0}{december 2016} \item \texttt{intensity.ppm}: Result was incorrect for Gibbs models if the model was exactly equivalent to a Poisson process (i.e. if all interaction coefficients were exactly zero). \bugger{1.28-1}{june 2012}{1.47-0}{october 2016} \item \texttt{idw}: Results were incorrect if \texttt{se=TRUE} and \verb!at="pixels"! and \texttt{power} was not equal to 2. The pixel values of \verb!$estimate! were all equal to zero. \bugger{1.58-0}{january 2019}{1.63-0}{january 2020} \item \texttt{funxy}: Did not correctly handle one-line functions. The resulting objects evaluated the wrong function in some cases. \bugger{1.45-0}{march 2016}{1.46-0}{july 2016} \item \texttt{kernel.moment}: Result was incorrect for \texttt{kernel="cosine"} and \texttt{kernel="optcosine"}. \bugger{1.45-2}{may 2016}{1.56-0}{june 2018} \item \verb![.msr!: Format was mangled if the subset contained exactly one quadrature point. \bugger{1.21-3}{january 2011}{1.56-0}{june 2018} \item \texttt{hyperframe}: Did not correctly handle date-time values (columns of class \texttt{"Date"}, etc). \bugger{1.19-1}{may 2010}{1.63-0}{january 2020} \item \texttt{tess}: If a list of tiles was given, and the tiles were pixel images or masks, their pixel resolutions were ignored, and reset to the default $128 \times 128$. {\small (Bug fixed in \texttt{spatstat 1.56-0}, june 2018)} \item \texttt{nnorient}: crashed if the point pattern was empty. \bugger{1.40-0}{december 2015}{1.57-0}{october 2018} \item \verb!as.im.data.frame!: Results were incorrect for factor-valued data. \bugger{1.45-2}{may 2016}{1.63-0}{january 2020} \end{itemize} %% LEVEL 4: \subsection{Partially Incorrect} \begin{itemize} \item \texttt{kppm}, \texttt{AIC}: For kppm models fitted with \verb!method='clik2'!, the resulting value of \texttt{logLik()} was equal to $1/2$ of the correct value. This would have affected model comparison using AIC, and model selection using \texttt{step}. \bugger{1.42-0}{may 2015}{1.63-0}{january 2020}. \item \texttt{edge.Ripley}, \texttt{Kest}, \texttt{Kinhom}: Isotropic correction weights for polygonal windows were sometimes incorrect for small radius \texttt{r} if the polygon contained many small segments or if the polygon was very long and thin. \bugger{1.60-0}{june 2019}{1.62-0}{december 2019}. \item \texttt{beachcolours}, \texttt{beachcolourmap}: The number of colours was not always equal to \texttt{ncolours}. \bugger{1.32-0}{august 2013}{1.59-0}{march 2019} \item \texttt{extractbranch.lpp}: Point pattern coordinates were sometimes erroneously set to \texttt{NA}. \bugger{1.42-0}{may 2015}{1.59-0}{march 2019} \item \texttt{rotmean}: When \texttt{result="im"} the resulting image did not have the same dimensions as the input. \bugger{1.42-2}{june 2015}{1.58-0}{january 2019} \item \texttt{quadratcount.ppp}: Sometimes issued an incorrect warning that data points were outside the tessellation, when \texttt{tess} was a tessellation represented by a pixel image. {\small (Bug fixed in \texttt{spatstat 1.59-0}, march 2019)} \item \texttt{quadrat.test}: the $p$-value was \texttt{NA} if one of the observed counts was zero, for the Cressie-Read tests with \texttt{CR} not equal to $1$ or $-1$. \bugger{1.38-0}{august 2014}{1.59-0}{march 2019} \item \texttt{quadrat.test}: argument \texttt{CR} was ignored if \texttt{method="MonteCarlo"}. \bugger{1.38-0}{august 2014}{1.61-0}{september 2019} \item \texttt{rotmean}: If argument \texttt{origin} was given, and if \texttt{result="im"} was specified, the resulting image was wrongly displaced. \bugger{1.42-2}{june 2015}{1.58-0}{january 2019} \item \texttt{runifpointx}: Result was mangled when \texttt{n=0} or \texttt{n=1}. \bugger{1.50-0}{march 2017}{1.58-0}{january 2019} \item \texttt{model.matrix.ppm}: The attribute \texttt{assign} was omitted in some cases. \bugger{1.45-1}{may 2016}{1.55-0}{january 2018} \item \texttt{model.matrix.mppm}: Sometimes returned a matrix with the wrong number of rows. \bugger{1.55-0}{january 2018}{1.63-0}{january 2020} \item \texttt{density.ppp}: If the smoothing bandwidth \texttt{sigma} was very small (e.g.\ less than the width of a pixel), results were inaccurate if the default resolution was used, and completely incorrect if a user-specified resolution was given. \bugger{1.26-0}{april 2012}{1.52-0}{august 2017} \item \texttt{selfcrossing.psp}: $y$ coordinate values were incorrect. \bugger{1.23-2}{august 2011}{1.25-3}{february 2012} \item \texttt{Geyer}: For point process models with the \texttt{Geyer} interaction, \texttt{vcov.ppm} and \texttt{suffstat} sometimes gave incorrect answers. \bugger{1.27-0}{may 2012}{1.30-0}{december 2012} \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}: Calculations were incorrect for a Geyer model fitted using an edge correction other than \texttt{"border"} or \texttt{"none"}. \bugger{1.25-0}{december 2011}{1.51-0}{may 2017} \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}: Results were slightly incorrect for models fitted using the border correction. \bugger{1.25-0}{december 2011}{1.54-0}{november 2017} \item \texttt{leverage.ppm}: The mean leverage value (shown as a contour level in \texttt{plot.leverage.ppm}) was slightly incorrect for Gibbs models. \bugger{1.25-0}{december 2011}{1.54-0}{november 2017} \item \texttt{vcov.ppm}, \texttt{suffstat}: These functions sometimes gave incorrect values for marked point process models. \bugger{1.27-0}{may 2012}{1.29-0}{october 2012} \item \texttt{diagnose.ppm}: When applied to a model obtained from \texttt{subfits()}, in the default case (\texttt{oldstyle=FALSE}) the variance calculations were incorrect. Consequently the dotted lines representing significance bands were incorrect. An error or warning about negative variances occurred sometimes. However, calculations with \texttt{oldstyle=TRUE} were correct. The default has now been changed to \texttt{oldstyle=TRUE} for such models. \bugger{1.35-0}{december 2013}{1.45-0}{march 2016} \item \texttt{Smooth.ppp}: Results for \verb!at="points"! were garbled, for some values of \texttt{sigma}, if \texttt{X} had more than one column of marks. \bugger{1.38-0}{october 2014}{1.46-0}{july 2016} \item \texttt{linearK}, \texttt{linearKinhom}: If any data points were located exactly at a vertex of the linear network, the weights for Ang's correction were incorrect, due to numerical error. This sometimes produced infinite or NA values of the linear $K$ function. \bugger{1.23-0}{july 2011}{1.27-0}{may 2012} \item \texttt{Kinhom}, \texttt{Linhom}: the results were not renormalised (even if \texttt{renormalise=TRUE}) in some cases. \bugger{1.21-0}{december 2010}{1.37-0}{may 2014} \item \texttt{Kinhom}, \texttt{Linhom}: Ignored argument \texttt{reciplambda2} in some cases. \bugger{1.39-0}{october 2014}{1.40-0}{december 2014} \item \texttt{Kinhom}, \texttt{Linhom}: Calculations were incorrect if \texttt{lambda} was a fitted point process model. \bugger{1.38-0}{august 2014}{1.38-1}{august 2014} \item \texttt{integral.linim}, \texttt{integral.linfun}: \begin{itemize} \item results were inaccurate because of a bias in the distribution of sample points. \bugger{1.41-0}{february 2015}{1.47-0}{october 2016} \item results were inaccurate if many of the segment lengths were shorter than the width of a pixel. \bugger{1.41-0}{february 2015}{1.48-0}{december 2016} \item results were wildly inaccurate in some extreme cases where many segments were very short. \bugger{1.41-0}{february 2015}{1.54-0}{november 2017} \end{itemize} \item \texttt{predict.ppm}: Calculation of the conditional intensity omitted the edge correction if \texttt{correction='translate'} or \texttt{correction='periodic'}. \bugger{1.17-0}{october 2009}{1.31-3}{may 2013} \item \texttt{varblock}: Calculations were incorrect if more than one column of edge corrections was computed. \bugger{1.21-1}{november 2010}{1.39-0}{october 2014} \item \texttt{scan.test} Results were sometimes incorrect due to numerical instability (a 'Gibbs phenomenon'). \bugger{1.24-1}{october 2011}{1.26-1}{april 2012} \item \texttt{relrisk}: When \verb!at="pixels"!, a small fraction of pixel values were sometimes wildly inaccurate, due to numerical errors. This affected the range of values in the result, and therefore the appearance of plots. {\small (Bug fixed in \texttt{spatstat 1.40-0}, december 2014)} \item \texttt{predict.slrm}: Results of \texttt{predict(object, newdata)} were incorrect if the spatial domain of \texttt{newdata} was larger than the original domain. \bugger{1.21-0}{november 2010}{1.25-3}{february 2012} \item \texttt{Lest}: The variance approximations (Lotwick-Silverman and Ripley) obtained with \texttt{var.approx=TRUE} were incorrect for \texttt{Lest} (although they were correct for \texttt{Kest}) due to a coding error. \bugger{1.24-1}{october 2011}{1.24-2}{november 2011} \item \texttt{bw.diggle}: Bandwidth was too large by a factor of 2. \bugger{1.23-4}{september 2011}{1.23-5}{september 2011} \item pair correlation functions (\texttt{pcf.ppp}, \texttt{pcfdot}, \texttt{pcfcross} etc:) The result had a negative bias at the maximum $r$ value, because contributions to the pcf estimate from interpoint distances greater than \texttt{max(r)} were mistakenly omitted. {\small (Bugs fixed in \texttt{spatstat 1.35-0}, december 2013)} \item \texttt{Kest}, \texttt{Lest}: Gave incorrect values in very large datasets, due to numerical overflow. `Very large' typically means about 1 million points in a random pattern, or 100,000 points in a tightly clustered pattern. [Overflow cannot occur unless there are at least 46,341 points.] \item \texttt{bw.relrisk}: Implementation of \texttt{method="weightedleastsquares"} was incorrect and was equivalent to \texttt{method="leastsquares"}. \bugger{1.21-0}{november 2010}{1.23-4}{september 2011} \item \texttt{triangulate.owin}: Results were incorrect in some special cases. \bugger{1.42-2}{june 2015}{1.44-0}{december 2015} \item \texttt{crosspairs}: If \texttt{X} and \texttt{Y} were identical point patterns, the result was not necessarily symmetric (on some machines) due to numerical artifacts. \bugger{1.35-0}{december 2013}{1.44-0}{december 2015} \item \texttt{bdist.tiles}: Values were incorrect in some cases due to numerical error. {\small (Bug fixed in \texttt{spatstat 1.29-0}, october 2012)} \item \texttt{Kest.fft}: Result was incorrectly normalised. \bugger{1.21-2}{january 2011}{1.44-0}{december 2015} \item \texttt{crossdist.ppp}: Ignored argument \texttt{squared} if \texttt{periodic=FALSE}. {\small (Bug fixed in \texttt{spatstat 1.38-0}, july 2014)} \item polygon geometry: The point-in-polygon test gave the wrong answer in some boundary cases. {\small (Bug fixed in \texttt{spatstat 1.23-2}, august 2011)} \item \texttt{MultiStraussHard}: If a fitted model with \texttt{MultiStraussHard} interaction was invalid, \texttt{project.ppm} sometimes yielded a model that was still invalid. {\small (Bug fixed in \texttt{spatstat 1.42-0}, may 2015)} \item \texttt{pool.envelope}: Did not always respect the value of \texttt{use.theory}. \bugger{1.23-5}{september 2011}{1.43-0}{september 2015} \item \texttt{nncross.lpp}, \texttt{nnwhich.lpp}, \texttt{distfun.lpp}: Sometimes caused a segmentation fault. \bugger{1.44-0}{december 2015}{1.44-1}{december 2015} \item \texttt{anova.ppm}: If a single \texttt{object} was given, and it was a Gibbs model, then \texttt{adjust} was effectively set to \texttt{FALSE}. \bugger{1.39-0}{october 2014}{1.44-1}{december 2015} \item \verb![.linim!: the result sometimes had the wrong class. \bugger{1.53-0}{september 2017}{1.55-1}{april 2015} \item \verb![.linim!: factor values were erroneously converted to integers, in some cases. \bugger{1.53-0}{september 2017}{1.61-0}{september 2019} \item \verb!is.subset.owin!: sometimes gave the wrong result for polygonal windows due to numerical rounding error. {\small (Bug was always present. Fixed in \texttt{spatstat 1.59-0}, march 2019)} \item \texttt{plot.tess}: the legend showed the tile names in lexicographical order, rather than their original order. \bugger{1.55-1}{april 2018}{1.59-0}{march 2019} \item \texttt{rThomas}, \texttt{rMatClust}, \texttt{rCauchy}, \texttt{rVarGamma}: If the simulation window was not a rectangle, the attribute \texttt{Lambda} was a numeric vector, rather than a pixel image as intended. \bugger{1.43-0}{october 2015}{1.59-0}{march 2019} \item \texttt{effectfun}: In a multitype point process model, \texttt{effectfun} ignored any user-specified value of \texttt{marks}. \bugger{1.52-0}{august 2017}{1.61-0}{september 2019} \item \verb!"[<-.hyperframe"!: Some classes of objects were not handled correctly. \bugger{1.37-0}{may 2014}{1.61-0}{september 2019} \item \texttt{relrisk.ppp}: Crashed if there were more than 2 types of points and \texttt{method = "leastsquares"} or \texttt{method = "weightedleastsquares"}. \bugger{1.23-4}{september 2011}{1.63-0}{january 2020} \item \texttt{nncross.ppp}: Format of output was incorrect if \texttt{X} was an empty pattern. \bugger{1.56-0}{june 2018}{1.63-0}{january 2020} \item \texttt{rmh}, \texttt{rmh.default}: For a marked point process, the debugger did not display the marks. (The \texttt{rmh} debugger is invoked by calling \texttt{rmh} with \texttt{snoop=TRUE}). \bugger{1.31-1}{march 2013}{1.63-0}{january 2020} \end{itemize} \begin{thebibliography}{1} \bibitem{baddrubaturn15} A. Baddeley, E. Rubak, and R. Turner. \newblock {\em Spatial Point Patterns: Methodology and Applications with {{R}}}. \newblock Chapman \& Hall/CRC Press, 2015. \end{thebibliography} \end{document} spatstat/vignettes/hexagon.pdf0000644000176200001440000000473213115225157016261 0ustar liggesusers%PDF-1.4 %쏢 5 0 obj <> stream x-1As^1/@`X^` 2E0T YA=qNOc?4M 9,T;SvzI!ŕehV)OkbJ#S|8xh+] bendstream endobj 6 0 obj 118 endobj 4 0 obj <> /Contents 5 0 R >> endobj 3 0 obj << /Type /Pages /Kids [ 4 0 R ] /Count 1 >> endobj 1 0 obj <> endobj 7 0 obj <>endobj 8 0 obj <> endobj 9 0 obj <>stream 2013-12-23T19:49:36+08:00 2013-12-23T19:49:36+08:00 fig2dev Version 3.2 Patchlevel 5d hexagon.fig endstream endobj 2 0 obj <>endobj xref 0 10 0000000000 65535 f 0000000410 00000 n 0000001982 00000 n 0000000351 00000 n 0000000222 00000 n 0000000015 00000 n 0000000203 00000 n 0000000474 00000 n 0000000515 00000 n 0000000544 00000 n trailer << /Size 10 /Root 1 0 R /Info 2 0 R /ID [<7169BA68125AE1AEC0984268ECC4E10A><7169BA68125AE1AEC0984268ECC4E10A>] >> startxref 2169 %%EOF spatstat/vignettes/datasets.Rnw0000644000176200001440000006437413417031501016436 0ustar liggesusers\documentclass[11pt]{article} % \VignetteIndexEntry{Datasets Provided for the Spatstat Package} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \usepackage{graphicx} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \newcommand{\sdat}{\pkg{spatstat.data}} \newcommand{\Sdat}{\pkg{Spatstat.data}} \begin{document} \bibliographystyle{plain} \thispagestyle{empty} \SweaveOpts{eps=TRUE} \setkeys{Gin}{width=0.6\textwidth} <>= library(spatstat) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") spatstat.options(transparent=FALSE) options(useFancyQuotes=FALSE) @ \title{Datasets provided for \spst} \author{Adrian Baddeley, Rolf Turner and Ege Rubak} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle This document is an overview of the spatial datasets that are provided for the \spst\ package. To flick through a nice display of all the data sets that come with \spst\ type \texttt{demo(data)}. To see information about a given data set, type \texttt{help({\em name})} where \emph{name} is the name of the data set. To plot a given data set, type \texttt{plot({\em name})}. Datasets in \spst\ are ``lazy-loaded'', which means that they can be accessed simply by typing their name. Not all packages do this; in some packages you have to type \texttt{data({\em name})} in order to access a data set. To list all the datasets in \spst, you need to type \texttt{data(package="spatstat.data")}. This is because, for efficiency, the datasets are actually installed in a sub-package \sdat. This is the only time you should ever need to mention \sdat\ explicitly. When the \spst\ package is loaded by the command \texttt{library(spatstat)}, the sub-package \sdat\ is automatically loaded. \section{List of datasets} \subsection{Point patterns in 2D} Here is a list of the standard point pattern data sets that are supplied with the current installation of \sdat: \newcommand{\recto}{\framebox{\hphantom{re}\vphantom{re}}} \newcommand{\irregpoly}{\includegraphics*[width=6mm]{irregpoly}} \newcommand{\convpoly}{\includegraphics*[width=4mm]{hexagon}} \newcommand{\disc}{$\bigcirc$} \newcommand{\nomarks}{$\cdot$} \newcommand{\nocov}{$\cdot$} \begin{tabular}{l|l|ccc} {\sf name} & {\sf description} & {\sf marks} & {\sf covariates} & {\sf window} \\ \hline {\tt amacrine} & rabbit amacrine cells & cell type & \nocov & \recto \\ {\tt anemones} & sea anemones & diameter & \nocov & \recto \\ {\tt ants} & ant nests& species & zones & \convpoly \\ {\tt bdspots} & breakdown spots & \nomarks & \nocov & \disc \\ {\tt bei} & rainforest trees & \nomarks & topography & \recto \\ {\tt betacells} & cat retinal ganglia & cell type, area & \nocov & \recto \\ {\tt bramblecanes} & bramble canes & age & \nocov & \recto \\ {\tt bronzefilter} & bronze particles & diameter & \nocov & \recto \\ {\tt cells} & biological cells & \nomarks &\nocov & \recto \\ {\tt chorley} & cancers & case/control &\nocov & \irregpoly \\ {\tt clmfires} & forest fires & cause, size, date & \shortstack[c]{elevation, orientation,\\ slope, land use} & \irregpoly \\ {\tt copper} & copper deposits & \nomarks & fault lines & \recto \\ {\tt demopat} & artificial data & type & \nocov & \irregpoly \\ {\tt finpines} & trees & diam, height & \nocov & \recto \\ {\tt gordon} & people in a park & \nomarks & \nocov & \irregpoly \\ {\tt gorillas} & gorilla nest sites & group, season & \shortstack[c]{terrain, vegetation,\\ heat, water} & \irregpoly \\ {\tt hamster} & hamster tumour cells & cell type &\nocov & \recto \\ {\tt humberside} & child leukaemia & case/control & \nocov & \irregpoly\\ {\tt hyytiala} & mixed forest & species &\nocov & \recto \\ {\tt japanesepines} & Japanese pines & \nomarks &\nocov & \recto \\ {\tt lansing} & mixed forest & species & \nocov & \recto \\ {\tt longleaf} & trees & diameter & \nocov & \recto \\ {\tt mucosa} & gastric mucosa cells & cell type & \nocov & \recto \\ {\tt murchison} & gold deposits & \nomarks & faults, rock type & \irregpoly \\ {\tt nbfires} & wildfires & several & \nocov & \irregpoly \\ {\tt nztrees} & trees & \nomarks & \nocov & \recto \\ {\tt paracou} & trees & adult/juvenile & \nocov & \recto \\ {\tt ponderosa} & trees & \nomarks & \nocov & \recto \\ {\tt redwood} & saplings & \nomarks & \nocov & \recto \\ {\tt redwood3} & saplings & \nomarks & \nocov & \recto \\ {\tt redwoodfull} & saplings & \nomarks & zones & \recto \\ {\tt shapley} & galaxies & magnitude, recession, SE & \nocov & \convpoly \\ {\tt simdat} & simulated pattern & \nomarks & \nocov & \recto \\ {\tt sporophores} & fungi & species & \nocov & \disc \\ {\tt spruces} & trees & diameter & \nocov & \recto \\ {\tt swedishpines} & trees & \nomarks & \nocov & \recto \\ {\tt urkiola} & mixed forest & species & \nocov & \irregpoly \\ {\tt vesicles} & synaptic vesicles & \nomarks & zones & \irregpoly \\ {\tt waka} & trees & diameter & \nocov & \recto \\ \hline \end{tabular} \bigskip \noindent The shape of the window containing the point pattern is indicated by the symbols \recto\ (rectangle), \disc\ (disc), \convpoly\ (convex polygon) and \irregpoly\ (irregular polygon). Additional information about the data set \texttt{\em name} may be stored in a separate list \texttt{{\em name}.extra}. Currently these are the available options: \begin{tabular}[!h]{ll} {\sc Name} & {\sc Contents} \\ \hline {\tt ants.extra} & field and scrub subregions; \\ & additional map elements; plotting function \\ {\tt bei.extra} & covariate images \\ {\tt chorley.extra} & incinerator location; plotting function \\ {\tt gorillas.extra} & covariate images\\ {\tt nbfires.extra} & inscribed rectangle; border type labels \\ {\tt ponderosa.extra} & data points of interest; plotting function\\ {\tt redwoodfull.extra} & subregions; plotting function \\ {\tt shapley.extra} & individual survey fields; plotting function \\ {\tt vesicles.extra} & anatomical regions \\ \hline \end{tabular} For demonstration and instruction purposes, raw data files are available for the datasets \texttt{vesicles}, \texttt{gorillas} and \texttt{osteo}. \subsection{Other Data Types} There are also the following spatial data sets which are not 2D point patterns: \begin{tabular}[c]{l|l|l} {\sf name} & {\sf description} & {\sf format} \\ \hline {\tt austates} & Australian states & tessellation \\ {\tt cetaceans} & marine survey & replicated 2D point patterns \\ {\tt chicago} & crimes & point pattern on linear network \\ {\tt demohyper} & simulated data & replicated 2D point patterns with covariates\\ {\tt dendrite} & dendritic spines & point pattern on linear network \\ {\tt flu} & virus proteins & replicated 2D point patterns \\ {\tt heather} & heather mosaic & binary image (three versions) \\ {\tt osteo} & osteocyte lacunae & replicated 3D point patterns with covariates\\ {\tt pyramidal} & pyramidal neurons & replicated 2D point patterns in 3 groups\\ {\tt residualspaper} & data \& code from Baddeley et al (2005) & 2D point patterns, \R\ function \\ {\tt simba} & simulated data & replicated 2D point patterns in 2 groups\\ {\tt spiders} & spider webs & point pattern on linear network \\ {\tt waterstriders} & insects on water & replicated 2D point patterns\\ \hline \end{tabular} Additionally there is a dataset \texttt{Kovesi} containing several colour maps with perceptually uniform contrast. \section{Information on each dataset} Here we give basic information about each dataset. For further information, consult the help file for the particular dataset. <>= opa <- par() ## How to set all margins to zero and eliminate all outer spaces zeromargins <- function() { par( mar=rep(0,4), omd=c(0,1,0,1), xaxs="i", yaxs="i" ) invisible(NULL) } ## Set 'mar' setmargins <- function(...) { x <- c(...) x <- rep(x, 4)[1:4] par(mar=x) invisible(NULL) } @ \subsubsection*{\texttt{amacrine}: Amacrine cells} Locations of displaced amacrine cells in the retina of a rabbit. There are two types of points, ``on'' and ``off''. \SweaveOpts{width=5.5,height=3}\setkeys{Gin}{width=0.8\textwidth} <>= plot(amacrine) @ <>= setmargins(0,1,2,0) plot(amacrine) @ \subsubsection*{\texttt{anemones}: Sea Anemones} These data give the spatial locations and diameters of sea anemones on a boulder near sea level. \SweaveOpts{width=7,height=4.5}\setkeys{Gin}{width=0.8\textwidth} <>= plot(anemones, markscale=1) @ <>= setmargins(0,0,2,0) plot(anemones, markscale=1) @ \subsubsection*{\texttt{ants}: Ants' nests} Spatial locations of nests of two species of ants at a site in Greece. The full dataset (supplied here) has an irregular polygonal boundary, while most analyses have been confined to two rectangular subsets of the pattern (also supplied here). % Parameters for Ants data with key at right \SweaveOpts{width=6.3,height=4}\setkeys{Gin}{width=0.7\textwidth} <>= ants.extra$plotit() @ %$ <>= setmargins(0,0,1,0) ants.extra$plotit() @ %$ \subsubsection*{\texttt{austates}: Australian states} The states and large mainland territories of Australia are represented as polygonal regions forming a tessellation. <>= plot(austates) @ \subsubsection*{\texttt{bdspots}: Breakdown spots} A list of three point patterns, each giving the locations of electrical breakdown spots on a circular electrode in a microelectronic capacitor. \SweaveOpts{width=12,height=6}\setkeys{Gin}{width=\textwidth} <>= plot(bdspots, equal.scales=TRUE, pch="+", panel.args=function(i)list(cex=c(0.15, 0.2, 0.7)[i])) @ <>= zeromargins() plot(bdspots, equal.scales=TRUE, pch="+", main="", mar.panel=0, hsep=1, panel.args=function(i)list(cex=c(0.15, 0.2, 0.7)[i])) @ \subsubsection*{\texttt{bei}: Beilschmiedia data} Locations of 3605 trees in a tropical rain forest. Accompanied by covariate data giving the elevation (altitude) and slope of elevation in the study region. \SweaveOpts{width=12,height=6}\setkeys{Gin}{width=0.8\textwidth} <>= plot(bei.extra$elev, main="Beilschmiedia") plot(bei, add=TRUE, pch=16, cex=0.3) @ <>= setmargins(0,0,2,0) plot(bei.extra$elev, main="Beilschmiedia") plot(bei, add=TRUE, pch=16, cex=0.3) @ <>= M <- persp(bei.extra$elev, theta=-45, phi=18, expand=7, border=NA, apron=TRUE, shade=0.3, box=FALSE, visible=TRUE, main="") perspPoints(bei, Z=bei.extra$elev, M=M, pch=16, cex=0.3) @ \subsubsection*{\texttt{betacells}: Beta ganglion cells} Locations of beta ganglion cells in cat retina, each cell classified as `on' or `off' and also labelled with the cell profile area. <>= plot(betacells) @ \subsubsection*{\texttt{bramblecanes}: Bramble canes} <>= plot(bramblecanes, cols=1:3) @ <>= plot(split(bramblecanes)) @ \subsubsection*{\texttt{bronzefilter}: Bronze filter section profiles} Spatially inhomogeneous pattern of circular section profiles of particles, observed in a longitudinal plane section through a gradient sinter filter made from bronze powder. <>= plot(bronzefilter,markscale=2) @ \subsubsection*{\texttt{cells}: Biological cells} Locations of the centres of 42 biological cells observed under optical microscopy in a histological section. Often used as a demonstration example. <>= plot(cells) @ \subsubsection*{\texttt{cetaceans}: Survey of marine species} Recorded sightings of whales, dolphins and other marine species in a series of surveys. Replicated 2D marked point patterns. <>= plot(cetaceans.extra$patterns, main="Cetaceans data", cols=1:5, hsep=1) @ \subsubsection*{\texttt{chicago}: Chicago crimes} Locations (street addresses) of crimes reported in a two-week period in an area close to the University of Chicago. A multitype point pattern on a linear network. <>= plot(chicago, main="Chicago Crimes", col="grey", cols=c("red", "blue", "black", "blue", "red", "blue", "blue"), chars=c(16,2,22,17,24,15,6), leg.side="left", show.window=FALSE) @ \subsubsection*{\texttt{chorley}: Chorley-Ribble cancer data} Spatial locations of cases of cancer of the larynx and cancer of the lung, and the location of a disused industrial incinerator. A marked point pattern, with an irregular window and a simple covariate. <>= chorley.extra$plotit() @ %$ \subsubsection*{\texttt{clmfires}: Castilla-La Mancha Fires} Forest fires in the Castilla-La Mancha region of Spain between 1998 and 2007. A point pattern with 4 columns of marks: \begin{tabular}{ll} \texttt{cause} & cause of fire\\ \texttt{burnt.area} & total area burned, in hectares \\ \texttt{date} & date of fire \\ \texttt{julian.date} & date of fire in days since 1.1.1998 \end{tabular} <>= plot(clmfires, which.marks="cause", cols=2:5, cex=0.25, main="Castilla-La Mancha forest fires") @ The accompanying dataset \texttt{clmfires.extra} is a list of two items \texttt{clmcov100} and \texttt{clmcov200} containing covariate information for the entire Castilla-La Mancha region. Each of these two elements is a list of four pixel images named \texttt{elevation}, \texttt{orientation}, \texttt{slope} and \texttt{landuse}. <>= plot(clmfires.extra$clmcov200, main="Covariates for forest fires") @ %$ \subsubsection*{\texttt{copper}: Queensland copper data} These data come from an intensive geological survey in central Queensland, Australia. They consist of 67 points representing copper ore deposits, and 146 line segments representing geological `lineaments', mostly faults. <>= plot(copper$Points, main="Copper") plot(copper$Lines, add=TRUE) @ \subsubsection*{\texttt{demohyper}} A synthetic example of a \texttt{hyperframe} for demonstration purposes. <>= plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) }), parargs=list(mar=rep(1,4))) @ \subsubsection*{\texttt{demopat}} A synthetic example of a point pattern for demonstration purposes. <>= plot(demopat) @ \subsubsection*{\texttt{dendrite}} Dendrites are branching filaments which extend from the main body of a neuron (nerve cell) to propagate electrochemical signals. Spines are small protrusions on the dendrites. This dataset gives the locations of 566 spines observed on one branch of the dendritic tree of a rat neuron. The spines are classified according to their shape into three types: mushroom, stubby or thin. <>= plot(dendrite, leg.side="bottom", main="", cex=0.75, cols=2:4) @ \subsubsection*{\texttt{finpines}: Finnish pine saplings} Locations of 126 pine saplings in a Finnish forest, their heights and their diameters. <>= plot(finpines, main="Finnish pines") @ \subsubsection*{\texttt{flu}: Influenza virus proteins} The \texttt{flu} dataset contains replicated spatial point patterns giving the locations of two different virus proteins on the membranes of cells infected with influenza virus. It is a \texttt{hyperframe} containing point patterns and explanatory variables. <>= wildM1 <- with(flu, virustype == "wt" & stain == "M2-M1") plot(flu[wildM1, 1, drop=TRUE], main=c("flu data", "wild type virus, M2-M1 stain"), chars=c(16,3), cex=0.4, cols=2:3) @ \subsubsection*{\texttt{gordon}: People in Gordon Square} Locations of people sitting on a grass patch on a sunny afternoon. <>= plot(gordon, main="People in Gordon Square", pch=16) @ \subsubsection*{\texttt{gorillas}: Gorilla nesting sites} Locations of nesting sites of gorillas, and associated covariates, in a National Park in Cameroon. \texttt{gorillas} is a marked point pattern (object of class \texttt{"ppp"}) representing nest site locations. \texttt{gorillas.extra} is a named list of 7 pixel images (objects of class \texttt{"im"}) containing spatial covariates. It also belongs to the class \texttt{"listof"}. <>= plot(gorillas, which.marks=1, chars=c(1,3), cols=2:3, main="Gorilla nest sites") @ The \texttt{vegetation} covariate is also available as a raw ASCII format file, <>= system.file("rawdata/gorillas/vegetation.asc", package="spatstat") @ \subsubsection*{\texttt{hamster}: Hamster kidney cells} Cell nuclei in hamster kidney, each nucleus classified as either `dividing' or `pyknotic'. A multitype point pattern. <>= plot(hamster, cols=c(2,4)) @ \subsubsection*{\texttt{heather}: Heather mosaic} The spatial mosaic of vegetation of the heather plant, recorded in a 10 by 20 metre sampling plot in Sweden. A list with three entries, representing the same data at different spatial resolutions. <>= plot(heather) @ \subsubsection*{\texttt{humberside}: Childhood Leukemia and Lymphoma} Spatial locations of cases of childhood leukaemia and lymphoma, and randomly-selected controls, in North Humberside. A marked point pattern. <>= plot(humberside) @ The dataset \texttt{humberside.convex} is an object of the same format, representing the same point pattern data, but contained in a larger, 5-sided convex polygon. \subsubsection*{\texttt{hyytiala}: Mixed forest} Spatial locations and species classification for trees in a Finnish forest. <>= plot(hyytiala, cols=2:5) @ \subsubsection*{\texttt{japanesepines}: Japanese black pine saplings} Locations of Japanese black pine saplings in a square sampling region in a natural forest. Often used as a standard example. <>= plot(japanesepines) @ \subsubsection*{\texttt{lansing}: Lansing Woods} Locations and botanical classification of trees in a forest. A multitype point pattern with 6 different types of points. Includes duplicated points. <>= plot(lansing) @ <>= plot(split(lansing)) @ \subsubsection*{\texttt{longleaf}: Longleaf Pines} Locations and diameters of Longleaf pine trees. <>= plot(longleaf) @ \subsubsection*{\texttt{mucosa}: Gastric Mucosa Cells} A bivariate inhomogeneous point pattern, giving the locations of the centres of two types of cells in a cross-section of the gastric mucosa of a rat. <>= plot(mucosa, chars=c(1,3), cols=c("red", "green")) plot(mucosa.subwin, add=TRUE, lty=3) @ \subsubsection*{\texttt{murchison}: Murchison Gold Deposits} Spatial locations of gold deposits and associated geological features in the Murchison area of Western Australia. A list of three elements: \begin{itemize} \item \texttt{gold}, the point pattern of gold deposits; \item \texttt{faults}, the line segment pattern of geological faults; \item \texttt{greenstone}, the subregion of greenstone outcrop. \end{itemize} <>= plot(murchison$greenstone, main="Murchison data", col="lightgreen") plot(murchison$gold, add=TRUE, pch=3, col="blue") plot(murchison$faults, add=TRUE, col="red") @ \subsubsection*{\texttt{nbfires}: New Brunswick Fires} Fires in New Brunswick (Canada) with marks giving information about each fire. <>= plot(nbfires, use.marks=FALSE, pch=".") @ <>= plot(split(nbfires), use.marks=FALSE, chars=".") @ <>= par(mar=c(0,0,2,0)) plot(split(nbfires)$"2000", which.marks="fire.type", main=c("New Brunswick fires 2000", "by fire type"), cols=c("blue", "green", "red", "cyan"), leg.side="left") @ \subsubsection*{\texttt{nztrees}: New Zealand Trees} Locations of trees in a forest plot in New Zealand. Often used as a demonstration example. <>= plot(nztrees) plot(trim.rectangle(as.owin(nztrees), c(0,5), 0), add=TRUE, lty=3) @ \subsubsection*{\texttt{osteo}: Osteocyte Lacunae} Replicated three-dimensional point patterns: the three-dimensional locations of osteocyte lacunae observed in rectangular volumes of solid bone using a confocal microscope. A \texttt{hyperframe} containing 3D point patterns and explanatory variables. <>= plot(osteo[1:10,], main.panel="", pch=21, bg='white') @ For demonstration and instruction purposes, the raw data from the 36th point pattern are available in a plain ascii file in the \texttt{spatstat} installation, <>= system.file("rawdata/osteo/osteo36.txt", package="spatstat") @ \subsubsection*{\texttt{paracou}: Kimboto trees} Point pattern of adult and juvenile Kimboto trees recorded at Paracou in French Guiana. A bivariate point pattern. <>= plot(paracou, cols=2:3, chars=c(16,3)) @ \subsubsection*{\texttt{ponderosa}: Ponderosa Pines} Locations of Ponderosa Pine trees in a forest. Several special points are identified. <>= ponderosa.extra$plotit() @ %$ \subsubsection*{\texttt{pyramidal}: Pyramidal Neurons in Brain} Locations of pyramidal neurons in sections of human brain. There is one point pattern from each of 31 human subjects. The subjects are divided into three groups: controls (12 subjects), schizoaffective (9 subjects) and schizophrenic (10 subjects). <>= pyr <- pyramidal pyr$grp <- abbreviate(pyramidal$group, minlength=7) plot(pyr, quote(plot(Neurons, pch=16, main=grp)), main="Pyramidal Neurons") @ \subsubsection*{\texttt{redwood}, \texttt{redwood3}, \texttt{redwoodfull}: Redwood seedlings and saplings} California Redwood seedlings and saplings in a forest. There are two versions of this dataset: \texttt{redwood} and \texttt{redwoodfull}. The \texttt{redwoodfull} dataset is the full data. It is spatially inhomogeneous in density and spacing of points. The \texttt{redwood} dataset is a subset of the full data, selected because it is apparently homogeneous, and has often been used as a demonstration example. This comes in two versions commonly used in the literature: \texttt{redwood} (coordinates given to 2 decimal places) and \texttt{redwood3} (coordinates given to 3 decimal places). <>= plot(redwood) plot(redwood3, add=TRUE, pch=20) @ <>= redwoodfull.extra$plotit() @ %$ \subsubsection*{\texttt{residualspaper}: Data from residuals paper} Contains the point patterns used as examples in \begin{quote} A. Baddeley, R. Turner, J. M{\o}ller and M. Hazelton (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \textbf{67}, 617--666 \end{quote} along with {\sf R} code. <>= plot(as.solist(residualspaper[c("Fig1", "Fig4a", "Fig4b", "Fig4c")]), main="") @ \subsubsection*{\texttt{shapley}: Shapley Galaxy Concentration} Sky positions of 4215 galaxies in the Shapley Supercluster (mapped by radioastronomy). <>= shapley.extra$plotit(main="Shapley") @ %$ \subsubsection*{\texttt{simdat}: Simulated data} Another simulated dataset used for demonstration purposes. <>= plot(simdat) @ \subsubsection*{\texttt{spiders}: Spider webs} Spider webs across the mortar lines of a brick wall. A point pattern on a linear network. <>= plot(spiders, pch=16, show.window=FALSE) @ \subsubsection*{\texttt{sporophores}: Sporophores} Sporophores of three species of fungi around a tree. <>= plot(sporophores, chars=c(16,1,2), cex=0.6) points(0,0,pch=16, cex=2) text(15,8,"Tree", cex=0.75) @ \subsubsection*{\texttt{spruces}: Spruces in Saxony} Locations of Norwegian spruce trees in a natural forest stand in Saxonia, Germany. Each tree is marked with its diameter at breast height. <>= plot(spruces, maxsize=min(nndist(spruces))) @ \subsubsection*{\texttt{swedishpines}: Swedish Pines} Locations of pine saplings in a Swedish forest. Often used as a demonstration example. <>= plot(swedishpines) @ \subsubsection*{\texttt{urkiola}: trees in a wood} Locations of birch and oak trees in a secondary wood in Urkiola Natural Park (Basque country, northern Spain). Irregular window, bivariate point pattern. <>= plot(urkiola, cex=0.5, cols=2:3) @ \subsubsection*{\texttt{waka}: trees in Waka National Park} Spatial coordinates of each tree, marked by the tree diameter at breast height. <>= par(mar=c(0,0,2,0)) plot(waka, markscale=0.04, main=c("Waka national park", "tree diameters")) @ \subsubsection*{\texttt{vesicles}: synaptic vesicles} Point pattern of synaptic vesicles observed in rat brain tissue. <>= v <- rotate(vesicles, pi/2) ve <- lapply(vesicles.extra, rotate, pi/2) plot(v, main="Vesicles") plot(ve$activezone, add=TRUE, lwd=3) @ The auxiliary dataset \texttt{vesicles.extra} is a list with entries\\ \begin{tabular}{ll} \texttt{presynapse} & outer polygonal boundary of presynapse \\ \texttt{mitochondria} & polygonal boundary of mitochondria \\ \texttt{mask} & binary mask representation of vesicles window \\ \texttt{activezone} & line segment pattern representing the active zone. \end{tabular} For demonstration and training purposes, the raw data files for this dataset are also provided in the \pkg{spatstat} package installation:\\ \begin{tabular}{ll} \texttt{vesicles.txt} & spatial locations of vesicles \\ \texttt{presynapse.txt} & vertices of \texttt{presynapse} \\ \texttt{mitochondria.txt} & vertices of \texttt{mitochondria} \\ \texttt{vesiclesimage.tif} & greyscale microscope image \\ \texttt{vesiclesmask.tif} & binary image of \texttt{mask} \\ \texttt{activezone.txt} & coordinates of \texttt{activezone} \end{tabular} The files are in the folder \texttt{rawdata/vesicles} in the \texttt{spatstat} installation directory. The precise location of the files can be obtained using \texttt{system.file}, for example <>= system.file("rawdata/vesicles/mitochondria.txt", package="spatstat") @ \subsubsection*{\texttt{waterstriders}: Insects on a pond} Three independent replications of a point pattern formed by insects on the surface of a pond. <>= plot(waterstriders) @ \end{document} spatstat/vignettes/irregpoly.pdf0000644000176200001440000000524113115225157016640 0ustar liggesusers%PDF-1.4 %쏢 5 0 obj <> stream xm=n1 =O0?}n #EJH6@ob%^EsMo? 'P*fvjqp}]",D <qe]X/pRiuS\ *qX2B*XkӜ\lDU1J卼PX]rrZ0CP$!7D,(ƃ[Q5C2)RHQ5A70=x"EuZ1m=@4)YJ9VL,6}9f,q^ѕ:'1V8'3&R[UXߑX |endstream endobj 6 0 obj 311 endobj 4 0 obj <> /Contents 5 0 R >> endobj 3 0 obj << /Type /Pages /Kids [ 4 0 R ] /Count 1 >> endobj 1 0 obj <> endobj 7 0 obj <>endobj 8 0 obj <> endobj 9 0 obj <>stream 2013-12-23T19:50:47+08:00 2013-12-23T19:50:47+08:00 fig2dev Version 3.2 Patchlevel 5d irregpoly.fig endstream endobj 2 0 obj <>endobj xref 0 10 0000000000 65535 f 0000000605 00000 n 0000002179 00000 n 0000000546 00000 n 0000000415 00000 n 0000000015 00000 n 0000000396 00000 n 0000000669 00000 n 0000000710 00000 n 0000000739 00000 n trailer << /Size 10 /Root 1 0 R /Info 2 0 R /ID [] >> startxref 2368 %%EOF spatstat/vignettes/getstart.Rnw0000644000176200001440000003126413265066746016477 0ustar liggesusers\documentclass[11pt]{article} % \VignetteIndexEntry{Getting Started with Spatstat} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \usepackage{graphicx} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \begin{document} \bibliographystyle{plain} \thispagestyle{empty} \SweaveOpts{eps=TRUE} \setkeys{Gin}{width=0.6\textwidth} <>= library(spatstat) spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) }) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) @ \title{Getting started with \texttt{spatstat}} \author{Adrian Baddeley, Rolf Turner and Ege Rubak} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle Welcome to \spst, a package in the \R\ language for analysing spatial point patterns. This document will help you to get started with \spst. It gives you a quick overview of \spst, and some cookbook recipes for doing basic calculations. \section*{What kind of data does \spst\ handle?} \Spst\ is mainly designed for analysing \emph{spatial point patterns}. For example, suppose you are an ecologist studying plant seedlings. You have pegged out a $10 \times 10$ metre rectangle for your survey. Inside the rectangle you identify all the seedlings of the species you want, and record their $(x,y)$ locations. You can plot the $(x,y)$ locations: <>= data(redwood) plot(redwood, pch=16, main="") @ This is a \emph{spatial point pattern} dataset. Methods for analysing this kind of data are summarised in the highly recommended book by Diggle \cite{digg03}, or our own book \cite{baddrubaturn15}, or other references in the bibliography below. \nocite{handbook10,bivapebegome08} Alternatively the points could be locations in one dimension (such as road accidents recorded on a road network) or in three dimensions (such as cells observed in 3D microscopy). You might also have recorded additional information about each seedling, such as its height, or the number of fronds. Such information, attached to each point in the point pattern, is called a \emph{mark} variable. For example, here is a stand of pine trees, with each tree marked by its diameter at breast height (dbh). The circle radii represent the dbh values (not to scale). <>= data(longleaf) plot(longleaf, main="") @ You might also have recorded supplementary data, such as the terrain elevation, which might serve as explanatory variables. These data can be in any format. \Spst\ does not usually provide capabilities for analysing such data in their own right, but \spst\ does allow such explanatory data to be taken into account in the analysis of a spatial point pattern. \Spst\ is \underline{\bf not} designed to handle point data where the $(x,y)$ locations are fixed (e.g.\ temperature records from the state capital cities in Australia) or where the different $(x,y)$ points represent the same object at different times (e.g.\ hourly locations of a tiger shark with a GPS tag). These are different statistical problems, for which you need different methodology. \section*{What can \spst\ do?} \Spst\ supports a very wide range of popular techniques for statistical analysis for spatial point patterns, for example \begin{itemize} \item kernel estimation of density/intensity \item quadrat counting and clustering indices \item detection of clustering using Ripley's $K$-function \item spatial logistic regression \item model-fitting \item Monte Carlo tests \end{itemize} as well as some advanced statistical techniques. \Spst\ is one of the largest packages available for \R, containing over 1000 commands. It is the product of 25 years of software development by leading researchers in spatial statistics. \section*{How do I start using \spst?} \begin{enumerate} \item Install \R\ on your computer \begin{quote} Go to \texttt{r-project.org} and follow the installation instructions. \end{quote} \item Install the \spst\ package in your \R\ system \begin{quote} Start \R\ and type \verb!install.packages("spatstat")!. If that doesn't work, go to \texttt{r-project.org} to learn how to install Contributed Packages. \end{quote} \item Start \R\ \item Type \texttt{library(spatstat)} to load the package. \item Type \texttt{help(spatstat)} for information. \end{enumerate} \section*{How do I get my data into \spst?} <>= data(finpines) mypattern <- unmark(finpines) mydata <- round(as.data.frame(finpines), 2) @ Here is a cookbook example. Suppose you've recorded the $(x,y)$ locations of seedlings, in an Excel spreadsheet. You should also have recorded the dimensions of the survey area in which the seedlings were mapped. \begin{enumerate} \item In Excel, save the spreadsheet into a comma-separated values (CSV) file. \item Start \R\ \item Read your data into \R\ using \texttt{read.csv}. \begin{quote} If your CSV file is called \texttt{myfile.csv} then you could type something like <>= mydata <- read.csv("myfile.csv") @ to read the data from the file and save them in an object called \texttt{mydata} (or whatever you want to call it). You may need to set various options inside the \texttt{read.csv()} command to get this to work for your file format: type \texttt{help(read.csv)} for information. \end{quote} \item Check that \texttt{mydata} contains the data you expect. \begin{quote} For example, to see the first few rows of data from the spreadsheet, type <<>>= head(mydata) @ To select a particular column of data, you can type \texttt{mydata[,3]} to extract the third column, or \verb!mydata$x! to extract the column labelled \texttt{x}. \end{quote} \item Type \texttt{library(spatstat)} to load the \spst\ package \item Now convert the data to a point pattern object using the \spst\ command \texttt{ppp}. \begin{quote} Suppose that the \texttt{x} and \texttt{y} coordinates were stored in columns 3 and 7 of the spreadsheet. Suppose that the sampling plot was a rectangle, with the $x$ coordinates ranging from 100 to 200, and the $y$ coordinates ranging from 10 to 90. Then you would type <>= mypattern <- ppp(mydata[,3], mydata[,7], c(100,200), c(10,90)) @ The general form is <>= ppp(x.coordinates, y.coordinates, x.range, y.range) @ Note that this only stores the seedling locations. If you have additional columns of data (such as seedling height, seedling sex, etc) these can be added as \emph{marks}, later. \end{quote} \item Check that the point pattern looks right by plotting it: <>= plot(mypattern) @ \item Now you are ready to do some statistical analysis. Try the following: \begin{itemize} \item Basic summary of data: type <>= summary(mypattern) @ \item Ripley's $K$-function: <>= options(SweaveHooks=list(fig=function() par(mar=rep(4,4)+0.1))) @ <>= plot(Kest(mypattern)) @ For more information, type \texttt{help(Kest)} \item Envelopes of $K$-function: <>= plot(envelope(mypattern,Kest)) @ <>= env <- envelope(mypattern,Kest, nsim=39) @ <>= plot(env, main="envelope(mypattern, Kest)") @ <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ For more information, type \texttt{help(envelope)} \item kernel smoother of point density: <>= plot(density(mypattern)) @ For more information, type \texttt{help(density.ppp)} \end{itemize} \item Next if you have additional columns of data recording (for example) the seedling height and seedling sex, you can add these data as \emph{marks}. Suppose that columns 5 and 9 of the spreadsheet contained such values. Then do something like <>= marks(mypattern) <- mydata[, c(5,9)] @ <>= mypattern <-finpines @ Now you can try things like the kernel smoother of mark values: <>= plot(Smooth(mypattern)) @ \setkeys{Gin}{width=0.8\textwidth} <>= plot(Smooth(mypattern, sigma=1.2), main="Smooth(mypattern)") @ \setkeys{Gin}{width=0.4\textwidth} \item You are airborne! Now look at the book \cite{baddrubaturn15} for more hints. \end{enumerate} \section*{How do I find out which command to use?} Information sources for \spst\ include: \begin{itemize} \item the Quick Reference guide: a list of the most useful commands. \begin{quote} To view the quick reference guide, start \R, then type \texttt{library(spatstat)} and then \texttt{help(spatstat)}. Alternatively you can download a pdf of the Quick Reference guide from the website \texttt{www.spatstat.org} \end{quote} \item online help: \begin{quote} The online help files are useful --- they give detailed information and advice about each command. They are available when you are running \spst. To get help about a particular command \texttt{blah}, type \texttt{help(blah)}. There is a graphical help interface, which you can start by typing \texttt{help.start()}. Alternatively you can download a pdf of the entire manual (1000 pages!) from the website \texttt{www.spatstat.org}. \end{quote} \item vignettes: \begin{quote} \Spst\ comes installed with several `vignettes' (introductory documents with examples) which can be accessed using the graphical help interface. They include a document about \texttt{Handling shapefiles}. \end{quote} \item book: \begin{quote} Our book \cite{baddrubaturn15} contains a complete course on \texttt{spatstat}. \end{quote} \item website: \begin{quote} Visit the \spst\ package website \texttt{www.spatstat.org} \end{quote} \item forums: \begin{quote} Join the forum \texttt{R-sig-geo} by visiting \texttt{r-project.org}. Then email your questions to the forum. Alternatively you can ask the authors of the \spst\ package (their email addresses are given in the package documentation). \end{quote} \end{itemize} \begin{thebibliography}{10} % \bibitem{badd10wshop} % A. Baddeley. % \newblock Analysing spatial point patterns in {{R}}. % \newblock Technical report, CSIRO, 2010. % \newblock Version 4. % \newblock URL \texttt{https://research.csiro.au/software/r-workshop-notes/} % \bibitem{baddrubaturn15} A. Baddeley, E. Rubak, and R. Turner. \newblock {\em Spatial Point Patterns: Methodology and Applications with {{R}}}. \newblock Chapman \& Hall/CRC Press, 2015. \bibitem{bivapebegome08} R. Bivand, E.J. Pebesma, and V. G{\'{o}}mez-Rubio. \newblock {\em Applied spatial data analysis with {R}}. \newblock Springer, 2008. \bibitem{cres93} N.A.C. Cressie. \newblock {\em Statistics for Spatial Data}. \newblock {John Wiley and Sons}, {New York}, second edition, 1993. \bibitem{digg03} P.J. Diggle. \newblock {\em Statistical Analysis of Spatial Point Patterns}. \newblock Hodder Arnold, London, second edition, 2003. \bibitem{fortdale05} M.J. Fortin and M.R.T. Dale. \newblock {\em Spatial analysis: a guide for ecologists}. \newblock Cambridge University Press, Cambridge, UK, 2005. \bibitem{fothroge09handbook} A.S. Fotheringham and P.A. Rogers, editors. \newblock {\em The {SAGE} {H}andbook on {S}patial {A}nalysis}. \newblock SAGE Publications, London, 2009. \bibitem{gaetguyo09} C. Gaetan and X. Guyon. \newblock {\em Spatial statistics and modeling}. \newblock Springer, 2009. \newblock Translated by Kevin Bleakley. \bibitem{handbook10} A.E. Gelfand, P.J. Diggle, M. Fuentes, and P. Guttorp, editors. \newblock {\em Handbook of Spatial Statistics}. \newblock CRC Press, 2010. \bibitem{illietal08} J. Illian, A. Penttinen, H. Stoyan, and D. Stoyan. \newblock {\em Statistical Analysis and Modelling of Spatial Point Patterns}. \newblock John Wiley and Sons, Chichester, 2008. \bibitem{mollwaag04} J. M{\o}ller and R.P. Waagepetersen. \newblock {\em Statistical Inference and Simulation for Spatial Point Processes}. \newblock Chapman and Hall/CRC, Boca Raton, 2004. \bibitem{pfeietal08} D.U. Pfeiffer, T. Robinson, M. Stevenson, K. Stevens, D. Rogers, and A. Clements. \newblock {\em Spatial analysis in epidemiology}. \newblock Oxford University Press, Oxford, UK, 2008. \bibitem{wallgotw04} L.A. Waller and C.A. Gotway. \newblock {\em Applied spatial statistics for public health data}. \newblock Wiley, 2004. \end{thebibliography} \end{document} spatstat/vignettes/replicated.Rnw0000644000176200001440000014174113265066746016760 0ustar liggesusers\documentclass[11pt]{article} % \VignetteIndexEntry{Analysing Replicated Point Patterns in Spatstat} \usepackage{graphicx} \usepackage{Sweave} \usepackage{bm} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\code}[1]{\texttt{#1}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\indicate}[1]{\boldmaths{1}\{ {#1} \}} \newcommand{\dee}[1]{\, {\rm d}{#1}} \newcommand{\boldmaths}[1]{{\ensuremath\boldsymbol{#1}}} \newcommand{\xx}{\boldmaths{x}} \begin{document} \bibliographystyle{plain} \thispagestyle{empty} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \SweaveOpts{eps=TRUE} \setkeys{Gin}{width=0.6\textwidth} <>= library(spatstat) spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) }) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) @ \title{Analysing replicated point patterns in \texttt{spatstat}} \author{Adrian Baddeley} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle \begin{abstract} This document describes \spst's capabilities for fitting models to replicated point patterns. More generally it applies to data from a designed experiment in which the response from each unit is a spatial point pattern. \end{abstract} \tableofcontents \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Introduction} `Replicated point patterns' are datasets consisting of several point patterns which can be regarded as independent repetitions of the same experiment. For example, three point patterns taken from micrographs of three pipette samples of the same jug of milk, could be assumed to be replicated observations. More generally we could have several experimental groups, with replicated point pattern data in each group. For example there may be two jugs of milk that were treated differently, and we take three pipette samples from each jug. Even more generally our point patterns could be the result of a designed experiment involving control and treatment groups, covariates such as temperature, and even spatial covariates (such as image data). This document describes some capabilities available in the \spst\ package for analysing such data. \textbf{For further detail, see Chapter 16 of the spatstat book \cite{TheBook}.} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Overview of software} The main components needed are: \begin{itemize} \item the model-fitting function \texttt{mppm}, an extension of the \texttt{spatstat} function \texttt{ppm}, that will fit Gibbs point process models to multiple point pattern datasets; \item support for the class \texttt{"mppm"} of point process models fitted by \texttt{mppm} (e.g. functions to print and plot the fitted model, analysis of deviance for Poisson models) \item some tools for exploratory data analysis; \item basic support for the data from such experiments by storing the data in a \emph{``hyperframe''}. A hyperframe is like a data frame, except that each entry in a column can be a point pattern or a pixel image, as well as a single number or categorical value. \item four example datasets. \end{itemize} \section{Formulating the problem} We view the experiment as involving a series of {\em `units'\/}. Each unit is subjected to a known set of experimental conditions (described by the values of the {\em covariates\/}), and each unit yields a {\em response\/} which is a spatial point pattern. The value of a particular covariate for each unit can be either a single value (numerical, logical or factor), or a pixel image. Three important cases are: \begin{description} \item[independent replicates:] We observe $n$ different point patterns that can be regarded as independent replicates, i.e.\ independent realisations of the same point process. The `responses' are the point patterns; there are no covariates. \item[replication in groups:] there are $K$ different experimental groups (e.g. control, aspirin, nurofen). In group $k$ ($k=1,\ldots,K$) we observe $n_k$ point patterns which can be regarded as independent replicates within this group. We regard this as an experiment with $n = \sum_k n_k$ units. The responses are the point patterns; there is one covariate which is a factor (categorical variable) identifying which group each point pattern belongs to. \item[general case:] there are covariates other than factors that influence the response. The point patterns are assumed to be independent, but no two patterns have the same distribution. \end{description} Examples of these three cases are given in the datasets \texttt{waterstriders}, \texttt{pyramidal} and \texttt{demohyper} respectively, which are installed in \spst. \section{Installed datasets} The following datasets are currently installed in \spst. \begin{itemize} \item \texttt{waterstriders}: Penttinen's \cite{pent84} waterstriders data recording the locations of insect larvae on a pond in 3 independent experiments. \item \texttt{pyramidal}: data from Diggle, Lange and Benes \cite{digglangbene91} on the locations of pyramidal neurons in human brain, 31 human subjects grouped into 3 groups (controls, schizoaffective and schizophrenic). \item \texttt{flu}: data from Chen et al \cite{chenetal08} giving the locations of two different virus proteins on the membranes of cells infected with influenza virus; 41 multitype point patterns divided into two virus types (wild and mutant) and two stain types. \item \texttt{simba}: simulated data from an experiment with two groups and 5 replicate point patterns per group. \item \texttt{demohyper}: simulated data from an experiment with two groups in which each experimental unit has a point pattern response and a pixel image covariate. \end{itemize} \section{Lists of point patterns} First we need a convenient way to store the \emph{responses} from all the units in an experiment. An individual point pattern is stored as an object of class \verb!"ppp"!. The easiest way to store all the responses is to form a list of \verb!"ppp"! objects. \subsection{Waterstriders data} The \texttt{waterstriders} data are an example of this type. The data consist of 3 independent point patterns representing the locations of insect larvae on a pond. See \texttt{help(waterstriders)}. <<>>= waterstriders @ The \texttt{waterstriders} dataset is a list of point patterns. It is a list, each of whose entries is a point pattern (object of class \verb!"ppp"!). Note that the observation windows of the three point patterns are {\tt not\/} identical. \subsection{The class \texttt{listof}} For convenience, the \texttt{waterstriders} dataset also belongs to the class \verb!"listof"!. This is a simple mechanism to allow us to handle the list neatly --- for example, we can provide special methods for printing, plotting and summarising the list. \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= plot(waterstriders, main="") @ Notice that the plot method displays each entry of the list in a separate panel. There's also the summary method: <<>>= summary(waterstriders) @ \subsection{Creating a \texttt{listof} object} For example, here is a simulated dataset containing three independent realisations of the Poisson process with intensity 100. <<>>= X <- listof(rpoispp(100), rpoispp(100), rpoispp(100)) @ Then it can be printed and plotted. <>= plot(X) X @ To convert an existing list to the class \code{listof}, use \code{as.listof}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Hyperframes} A \emph{hyperframe} is like a data frame, except that its entries can be objects of any kind. A hyperframe is effectively a two-dimensional array in which each column consists of values of one type (as in a data frame) or consists of objects of one class. The entries in a hyperframe can be point patterns, pixel images, windows, or any other objects. To analyse an experiment, we will store {\bf all} the data from the experiment in a single hyperframe. The rows of the hyperframe will correspond to different experimental units, while the columns represent different variables (response variables or covariates). \subsection{Creating hyperframes} The function \texttt{hyperframe} will create a hyperframe. <>= hyperframe(...) @ The arguments \verb!...! are any number of arguments of the form \texttt{tag=value}. Each \texttt{value} will become a column of the array. The \texttt{tag} determines the name of the column. Each \texttt{value} can be either \begin{itemize} \item an atomic vector or factor (i.e. numeric vector, integer vector, character vector, logical vector, complex vector or factor) \item a list of objects which are all of the same class \item one atomic value, which will be replicated to make an atomic vector or factor \item one object, which will be replicated to make a list of identical objects. \end{itemize} All columns (vectors, factors and lists) must be of the same length, if their length is greater than 1. For example, here is a hyperframe containing a column of numbers and a column of \emph{functions}: <<>>= H <- hyperframe(X=1:3, Y=list(sin,cos,tan)) H @ Note that a column of character strings will be converted to a factor, unless you set \texttt{stringsAsFactors=FALSE} in the call to \code{hyperframe}. This is the same behaviour as for the function \code{data.frame}. <<>>= G <- hyperframe(X=1:3, Y=letters[1:3], Z=factor(letters[1:3]), W=list(rpoispp(100),rpoispp(100), rpoispp(100)), U=42, V=rpoispp(100), stringsAsFactors=FALSE) G @ This hyperframe has 3 rows and 6 columns. The columns named \texttt{U} and \texttt{V} are constant (all entries in a column are the same). The column named \texttt{Y} is a character vector while \texttt{Z} is a factor. \subsection{Hyperframes of data} To analyse an experiment, we will store {\bf all} the data from the experiment in a single hyperframe. The rows of the hyperframe will correspond to different experimental units, while the columns represent different variables (response variables or covariates). Several examples of hyperframes are provided with the package, including \texttt{demohyper}, \texttt{flu}, \texttt{simba} and \texttt{pyramidal}, described above. The \texttt{simba} dataset contains simulated data from an experiment with a `control' group and a `treatment' group, each group containing 5 experimental units. The responses in the control group are independent Poisson point patterns with intensity 80. The responses in the treatment group are independent realisations of a Strauss process (see \texttt{help(simba)} for details). The \texttt{simba} dataset is a hyperframe with 10 rows and 2 columns: \texttt{Points} (the point patterns) and \texttt{group} (a factor with levels \texttt{control} and \texttt{treatment}). <<>>= simba @ The \texttt{pyramidal} dataset contains data from Diggle, Lange and Benes \cite{digglangbene91} on the locations of pyramidal neurons in human brain. One point pattern was observed in each of 31 human subjects. The subjects were classified into 3 groups (controls, schizoaffective and schizophrenic). The \texttt{pyramidal} dataset is a hyperframe with 31 rows and 2 columns: \code{Neurons} (the point patterns) and \code{group} (a factor with levels \texttt{control}, \texttt{schizoaffective} and \texttt{schizophrenic}). <<>>= pyramidal @ The \texttt{waterstriders} dataset is not a hyperframe; it's just a list of point patterns. It can easily be converted into a hyperframe: <<>>= ws <- hyperframe(Striders=waterstriders) @ \subsection{Columns of a hyperframe} Individual columns of a hyperframe can be extracted using \verb!$!: <<>>= H$X H$Y @ The result of \verb!$! is a vector or factor if the column contains atomic values; otherwise it is a list of objects (with class \texttt{"listof"} to make it easier to print and plot). Individual columns can also be assigned (overwritten or created) using \verb!$<-!: <<>>= H$U <- letters[1:3] H @ This can be used to build up a hyperframe column-by-column: <<>>= G <- hyperframe() G$X <- waterstriders G$Y <- 1:3 G @ \subsection{Subsets of a hyperframe} Other subsets of a hyperframe can be extracted with \verb![!: <<>>= H[,1] H[2,] H[2:3, ] H[1,1] @ The result of \verb![! is a hyperframe, unless you set \verb!drop=TRUE! and the subset consists of only one element or one column: <<>>= H[,1,drop=TRUE] H[1,1,drop=TRUE] H[1,2,drop=TRUE] @ There is also a method for \verb![<-! that allows you to assign values to a subset of a hyperframe. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Plotting} \subsection{Plotting a \code{listof} object} The plot method for \code{listof} objects has formal arguments <>= plot.listof(x, ..., main, arrange = TRUE, nrows = NULL, ncols = NULL) @ where \code{main} is a title for the entire page. If \code{arrange=TRUE} then the entries of the list are displayed in separate panels on the same page (with \code{nrows} rows and \code{ncols} columns of panels), while if \code{arrange=FALSE} then the entries are just plotted as a series of plot frames. The extra arguments \verb!...! control the individual plot panels. These arguments will be passed to the plot method that displays each entry of the list. Suitable arguments depend on the type of entries. <>= plot(waterstriders, pch=16, nrows=1) @ \subsection{Plotting a hyperframe} \subsubsection{Plotting one column} If \code{h} is a hyperframe, then the default action of \code{plot(h)} is to extract the first column of \code{h} and plot each of the entries in a separate panel on one page (actually using the plot method for class \verb!"listof"!). \SweaveOpts{width=7,height=5} \setkeys{Gin}{width=0.9\textwidth} <>= plot(simba) @ This only works if the entries in the first column are objects for which a plot method is defined (for example, point patterns, images, windows). To select a different column, use \verb!$! or \verb![!: \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= H <- hyperframe(X=1:3, Y=list(sin,cos,tan)) plot(H$Y) @ The plot can be controlled using the arguments for \code{plot.listof} (and, in this case, \code{plot.function}, since \verb!H$Y! consists of functions). \subsubsection{Complex plots} More generally, we can display any kind of higher-order plot involving one or more columns of a hyperframe: <>= plot(h, e) @ where \code{h} is a hyperframe and \code{e} is an \R\ language call or expression that must be evaluated in each row to generate each plot panel. \SweaveOpts{width=9,height=5} \setkeys{Gin}{width=0.9\textwidth} <>= plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) })) @ Note the use of \code{quote}, which prevents the code inside the braces from being evaluated immediately. To plot the $K$-functions of each of the patterns in the \code{waterstriders} dataset, \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= H <- hyperframe(Bugs=waterstriders) plot(H, quote(plot(Kest(Bugs))), marsize=1) @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Data analysis} \subsection{Computing with hyperframes} Often we want to perform some computation on each row of a hyperframe. In a data frame, this can be done using the command \code{with}: <<>>= df <- data.frame(A=1:10, B=10:1) with(df, A-B) @ In this example, the expression \code{A-B} is evaluated in each row of the data frame, and the result is a vector containing the computed values for each row. The function \code{with} is generic, and has a method for data frames, \code{with.data.frame}. The computation above was executed by \code{with.data.frame}. The same syntax is available for hyperframes using the method \code{with.hyperframe}: <>= with(h,e) @ Here \code{h} is a hyperframe, and \code{e} is an {\sf R} language construct involving the names of columns in \code{h}. For each row of \code{h}, the expression \code{e} will be evaluated in such a way that each entry in the row is identified by its column name. <<>>= H <- hyperframe(Bugs=waterstriders) with(H, npoints(Bugs)) with(H, distmap(Bugs)) @ The result of \code{with.hyperframe} is a list of objects (of class \verb!"listof"!), or a vector or factor if appropriate. Notice that (unlike the situation for data frames) the operations in the expression \code{e} do not have to be vectorised. For example, \code{distmap} expects a single point pattern, and is not vectorised to deal with a list of point patterns. Instead, the expression \code{distmap(Bugs)} is evaluated separately in each row of the hyperframe. \subsection{Summary statistics} One application of \code{with.hyperframe} is to calculate summary statistics for each row of a hyperframe. For example, the number of points in a point pattern \code{X} is returned by \code{npoints(X)}. To calculate this for each of the responses in the \code{simba} dataset, <<>>= with(simba, npoints(Points)) @ The summary statistic can be any kind of object. For example, to compute the empirical $K$-functions for each of the patterns in the \code{waterstriders} dataset, <<>>= H <- hyperframe(Bugs=waterstriders) K <- with(H, Kest(Bugs)) @ To plot these $K$-functions you can then just type \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= plot(K) @ The summary statistic for each row could be a numeric vector: <<>>= H <- hyperframe(Bugs=waterstriders) with(H, nndist(Bugs)) @ The result is a list, each entry being a vector of nearest neighbour distances. To find the minimum interpoint distance in each pattern: <<>>= with(H, min(nndist(Bugs))) @ \subsection{Generating new columns} New columns of a hyperframe can be created by computation from the existing columns. For example, I can add a new column to the \code{simba} dataset that contains pixel images of the distance maps for each of the point pattern responses. <>= simba$Dist <- with(simba, distmap(Points)) @ \subsection{Simulation} This can be useful for simulation. For example, to generate Poisson point patterns with different intensities, where the intensities are given by a numeric vector \code{lambda}: \SweaveOpts{width=6,height=6} \setkeys{Gin}{width=0.7\textwidth} <>= lambda <- rexp(6, rate=1/50) H <- hyperframe(lambda=lambda) H$Points <- with(H, rpoispp(lambda)) plot(H, quote(plot(Points, main=paste("lambda=", signif(lambda, 4))))) @ It's even simpler to generate 10 independent Poisson point patterns with the \emph{same} intensity 50, say: <>= H$X <- with(H, rpoispp(50)) @ \noindent The expression \code{rpoispp(50)} is evaluated once in each row, yielding a different point pattern in each row because of the randomness. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Exploratory data analysis} Before fitting models to the data, it is prudent to explore the data to detect unusual features and to suggest appropriate models. \subsection{Exploring spatial trend and covariate effects} Points may be distributed non-uniformly either because they are intrinsically non-uniform (``spatial trend'') or because their abundance depends on a spatial covariate (``covariate effects''). Non-uniformity of a point pattern can be investigated using the kernel smoothed intensity. This is the convolution of the point pattern with a smooth density called the kernel. Effectively each point in the pattern is replaced by a copy of the kernel, and the sum of all copies of the kernel is the kernel-smoothed intensity function. It is computed by \texttt{density.ppp} separately for each point pattern. <>= plot(simba, quote(plot(density(Points), main="")), nrows=2) @ Covariate effects due to a real-valued spatial covariate (a real-valued pixel image) can be investigated using the command \code{rhohat}. This uses a kernel smoothing technique to fit a model of the form \[ \lambda(u) = \rho(Z(u)) \] where $\lambda(u)$ is the point process intensity at a location $u$, and $Z(u)$ is the value of the spatial covariate at that location. Here $\rho$ is an unknown, smooth function which is to be estimated. The function $\rho$ expresses the effect of the spatial covariate on the point process intensity. If $\rho$ turns out to be constant, then the covariate has no effect on point process intensity (and the constant value of $\rho$ is the constant intensity of the point process). <>= rhos <- with(demohyper, rhohat(Points, Image)) plot(rhos) @ \SweaveOpts{width=6,height=4} \setkeys{Gin}{width=0.9\textwidth} \subsection{Exploring interpoint interaction} Still to be written. See Chapter 16 of \cite{baddrubaturn15}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Fitting models of spatial trend} The command \code{mppm} fits models to multiple point patterns. Its syntax is very similar to that of \code{lm} and \code{glm}: <>= mppm(formula, data, interaction, ...) @ where \code{formula} is a formula describing the systematic trend part of the model, \code{data} is a hyperframe containing all the data (responses and covariates), and \code{interaction} determines the stochastic interpoint interaction part of the model. For example: <>= mppm(Points ~ group, simba, Poisson()) @ Note that the formula has a left hand side, which identifies the response. This should be the name of a column of \code{data}. \subsection{Trend formula} The right side of \code{formula} is an expression for the linear predictor (effectively the {\bf logarithm} of the spatial trend). The variables appearing in the right hand side of \code{formula} should be either \begin{itemize} \item names of columns in \code{data} \item objects in the {\sf R} global environment (such as \code{pi} and \code{log}) \item the reserved names \code{x}, \code{y} (representing Cartesian coordinates), \code{marks} (representing mark values attached to points) or \code{id} (a factor representing the row number in the hyperframe). \end{itemize} \subsubsection{Design covariates} The variables in the trend could be `design covariates'. For example, to fit a model to the \code{simba} dataset in which all patterns are independent replicates of the same uniform Poisson process, with the same constant intensity: <<>>= mppm(Points ~ 1, simba) @ To fit a model in which the two groups of patterns (control and treatment groups) each consist of independent replicates of a uniform Poisson process, but with possibly different intensity in each group: <<>>= mppm(Points ~ group, simba) @ To fit a uniform Poisson process to each pattern, with different intensity for each pattern: <<>>= mppm(Points ~ id, simba) @ \subsubsection{Spatial covariates} The variables in the trend could be `spatial covariates'. For example, the \code{demohyper} dataset has a column \code{Image} containing pixel images. <<>>= mppm(Points ~ Image, data=demohyper) @ This model postulates that each pattern is a Poisson process with intensity of the form \[ \lambda(u) = \exp(\beta_0 + \beta_1 Z(u)) \] at location $u$, where $\beta_0, \beta_1$ are coefficients to be estimated, and $Z(u)$ is the value of the pixel image \code{Image} at location $u$. It may or may not be appropriate to assume that the intensity of the points is an exponential function of the image pixel value $Z$. If instead we wanted the intensity $\lambda(u)$ to be \emph{proportional} to $Z(u)$, the appropriate model is <>= mppm(Points ~ offset(log(Image)), data=demohyper) @ which corresponds to an intensity proportional to \code{Image}, \[ \lambda(u) = \exp(\beta_0 + \log Z(u)) = e^{\beta_0} \; Z(u). \] The \code{offset} indicates that there is no coefficient in front of $\log Z(u)$. Alternatively we could allow a coefficient: <>= mppm(Points ~ log(Image), data=demop) @ which corresponds to a gamma transformation of \code{Image}, \[ \lambda(u) = \exp(\beta_0 + \beta_1 \log Z(u)) = e^{\beta_0} \; Z(u)^{\beta_1}. \] %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Interpoint interaction} The stochastic interpoint interaction in a point process model is specified by the arguments \code{interaction} and (optionally) \code{iformula} in <>= mppm(formula, data, interaction, ..., iformula=NULL) @ \subsection{Same interaction for all patterns} In the simplest case, the argument \texttt{interaction} is one of the familiar objects that describe the point process interaction structure. It is an object of class \texttt{"interact"} created by calling one of the functions \begin{center} \begin{tabular}{rl} \texttt{Poisson()} & the Poisson point process\\ \texttt{Hardcore()} & the hard core process \\ \texttt{Strauss()} & the Strauss process \\ \texttt{StraussHard()} & the Strauss/hard core point process\\ \texttt{Softcore()} & pairwise interaction, soft core potential\\ \texttt{PairPiece()} & pairwise interaction, piecewise constant \\ \texttt{DiggleGatesStibbard() } & Diggle-Gates-Stibbard pair potential \\ \texttt{DiggleGratton() } & Diggle-Gratton pair potential \\ \texttt{Fiksel() } & Fiksel pair potential \\ \texttt{LennardJones() } & Lennard-Jones pair potential \\ \texttt{Pairwise()} & pairwise interaction, user-supplied potential\\ \texttt{AreaInter()} & area-interaction potential\\ \texttt{Geyer()} & Geyer's saturation process\\ \texttt{BadGey()} & multiscale Geyer saturation process\\ \texttt{Saturated()} & Saturated pair model, user-supplied potential\\ \texttt{OrdThresh()} & Ord process, threshold potential\\ \texttt{Ord()} & Ord model, user-supplied potential \\ \texttt{MultiStrauss()} & multitype Strauss process \\ \texttt{MultiStraussHard()} & multitype Strauss/hard core process \\ \texttt{Concom()} & connected component interaction \\ \texttt{Hybrid()} & hybrid of several interactions \\ \end{tabular} \end{center} In this `simple' usage of \texttt{mppm}, the point process model assumes that all point patterns have exactly the same interpoint interaction, (with the same interaction parameters), and only differ in their spatial trend. \subsection{Hyperframe of interactions} More generally the argument \code{interaction} can be a hyperframe containing objects of class \texttt{"interact"}. For example, we might want to fit a Strauss process to each point pattern, but with a different Strauss interaction radius for each pattern. <>= radii <- with(simba, mean(nndist(Points))) @ Then \code{radii} is a vector of numbers which we could use as the values of the interaction radius for each case. First we need to make the interaction objects: <<>>= Rad <- hyperframe(R=radii) Str <- with(Rad, Strauss(R)) @ Then we put them into a hyperframe and fit the model: <<>>= Int <- hyperframe(str=Str) mppm(Points ~ 1, simba, interaction=Int) @ An important constraint is that all of the interaction objects in one column must be \emph{instances of the same process} (e.g. Strauss) albeit possibly having different parameter values. For example, you cannot put Poisson and Strauss processes in the same column. \subsection{Interaction formula} If \code{interaction} is a hyperframe, then the additional argument \code{iformula} may be used to fully specify the interaction. (An \code{iformula} is also required if \code{interaction} has more than one column.) The \code{iformula} should be a formula without a left hand side. Variables on the right hand side are typically the names of columns in \code{interaction}. \subsubsection{Selecting one column} If the right hand side of \code{iformula} is a single name, then this identifies the column in \code{interaction} to be used as the interpoint interaction structure. <<>>= h <- hyperframe(Y=waterstriders) g <- hyperframe(po=Poisson(), str4 = Strauss(4), str7= Strauss(7)) mppm(Y ~ 1, data=h, interaction=g, iformula=~str4) @ \subsubsection{Interaction depending on design} The \code{iformula} can also involve columns of \code{data}, but only those columns that are vectors or factors. This allows us to specify an interaction that depends on the experimental design. [This feature is {\bf experimental}.] For example <<>>= fit <- mppm(Points ~ 1, simba, Strauss(0.07), iformula = ~Interaction*group) @ Since \code{Strauss(0.1)} is not a hyperframe, it is first converted to a hyperframe with a single column named \code{Interaction}. The \code{iformula = ~Interaction*group} specifies (since \code{group} is a factor) that the interpoint interaction shall have a different coefficient in each experimental group. That is, we fit a model which has two different values for the Strauss interaction parameter $\gamma$, one for the control group and one for the treatment group. When you print the result of such a fit, the package tries to do `automatic interpretation' of the fitted model (translating the fitted interaction coefficients into meaningful numbers like $\gamma$). This will be successful in \emph{most} cases: <<>>= fit @ <>= co <- coef(fit) si <- function(x) { signif(x, 4) } @ Thus we see that the estimate of the Strauss parameter $\gamma$ for the control group is \Sexpr{si(exp(co[2]))}, and for the treatment group \Sexpr{si(exp(sum(co[c(2,4)])))} (the correct values in this simulated dataset were $1$ and $0.5$). The fitted model can also be interpreted directly from the fitted canonical coefficients: <<>>= coef(fit) @ The last output shows all the coefficients $\beta_j$ in the linear predictor for the (log) conditional intensity. The interpretation of the model coefficients, for any fitted model in \R, depends on the \emph{contrasts} which were applicable when the model was fitted. This is part of the core {\sf R} system: see \code{help(contrasts)} or \code{options(contrasts)}. If you did not specify otherwise, the default is to use \emph{treatment contrasts}. This means that, for an explanatory variable which is a \texttt{factor} with $N$ levels, the first level of the factor is used as a baseline, and the fitted model coefficients represent the factor levels $2, 3, \ldots, N$ relative to this baseline. In the output above, there is a coefficient for \code{(Intercept)} and one for \code{grouptreatment}. These are coefficients related to the \code{group} factor. According to the ``treatment contrasts'' rule, the \code{(Intercept)} coefficient is the estimated effect for the control group, and the \code{grouptreatment} coefficient is the estimated difference between the treatment and control groups. Thus the fitted first order trend is $\exp(\Sexpr{si(co[1])}) = \Sexpr{si(exp(co[1]))}$ for the control group and $\exp(\Sexpr{si(co[1])} + \Sexpr{si(co[3])}) = \Sexpr{si(exp(sum(co[c(1,3)])))}$ for the treatment group. The correct values in this simulated dataset were $80$ and $100$. The remaining coefficients in the output are \code{Interaction} and \code{Interaction:grouptreatment}. Recall that the Strauss process interaction term is $\gamma^{t(u,\xx)} = \exp(t(u,\xx) \log\gamma)$ at a spatial location $u$, for a point pattern $\xx$. Since we're using treatment contrasts, the coefficient \code{Interaction} is the estimate of $\log\gamma$ for the control group. The coefficient \code{Interaction:grouptreatment} is the estimate of the difference in $\log\gamma$ between the treatment and control groups. Thus the estimated Strauss interaction parameter $\gamma$ is $\exp(\Sexpr{si(co[2])}) = \Sexpr{si(exp(co[2]))}$ for the control group and $\exp(\Sexpr{si(co[2])} + (\Sexpr{si(co[4])})) = \Sexpr{si(exp(co[2]+co[4]))}$ for the treatment group. The correct values were $1$ and $0.5$. \subsubsection{Completely different interactions for different cases} In the previous example, when we fitted a Strauss model to all point patterns in the \code{simba} dataset, the fitted model for the patterns in the control group was close to Poisson ($\gamma \approx 1$). Suppose we now want to fit a model which {\it is} Poisson in the control group, and Strauss in the treatment group. The Poisson and Strauss interactions must be given as separate columns in a hyperframe of interactions: <>= interaction=hyperframe(po=Poisson(), str=Strauss(0.07)) @ What do we write for the \code{iformula}? The following \emph{will not} work: <>= iformula=~ifelse(group=="control", po, str) @ This does not work because the Poisson and Strauss models are `incompatible' inside such expressions. The canonical sufficient statistics for the Poisson and Strauss processes do not have the same dimension. Internally in \code{mppm} we translate the symbols \code{po} and \code{str} into matrices; the dimensions of these matrices are different, so the \code{ifelse} expression cannot be evaluated. Instead we need something like the following: <>= iformula=~I((group=="control")*po) + I((group=="treatment") * str) @ The letter \code{I} here is a standard R function that prevents its argument from being interpreted as a formula (thus the \code{*} is interpreted as multiplication instead of a model interaction). The expression \code{(group=="control")} is logical, and when multiplied by the matrix \code{po}, yields a matrix. So the following does work: <<>>= g <- hyperframe(po=Poisson(), str=Strauss(0.07)) fit2 <- mppm(Points ~ 1, simba, g, iformula=~I((group=="control")*po) + I((group=="treatment") * str)) fit2 @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %#%^!ifdef RANDOMEFFECTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Random effects} \subsection{Mixed effects models} It is also possible to fit models that include `random effects'. Effectively, some of the coefficients in the model are assumed to be Normally-distributed random variables instead of constants. \subsubsection{Mixed Poisson model} Consider the simplest model of a uniform Poisson process which we fitted to the 3 point patterns of waterstriders. It might be sensible to assume that each pattern is a realisation of a Poisson process, but with {\em random intensity\/}. In each realisation the intensity $\lambda$ is constant across different locations, but it is a different, random value in different realisations. This example is called a `mixed Poisson process' and belongs to the class of `Cox processes' (Poisson processes with random intensity functions). Let's assume further that the log-intensity is a Normal random variable. Then the model is a (very degenerate) special case of a `log-Gaussian Cox process'. To fit such a model we use the standard techniques of mixed effects models \cite{lairware82,davigilt95,pinhbate00}. The mixed Poisson process which we discussed above would be written in standard form \begin{equation} \label{mixPois} \lambda_i(u) = \exp(\mu + Z_i) \end{equation} for the $i$th point pattern, where $\mu$ is a parameter to be estimated (the `fixed effect') and $Z_i \sim N(0, \sigma^2)$ is a zero-mean Normal random variable (the `random effect' for point pattern $i$). In the simplest case we would assume that $Z_1, \ldots, Z_n$ are independent. The variance $\sigma^2$ of the random effects would be estimated. One can also estimate the individual realised values $z_i$ of the random effects for each point pattern, although these are usually not of such great interest. Since the model includes both fixed and random effects, it is called a ``mixed-effects'' model. \subsubsection{Dependence structure} When we formulate a random-effects or mixed-effects model, we must specify the dependence structure of the random effects. In the model above we assumed that the $Z_i$ are independent for all point patterns $i$. If the experiment consists of two groups, we could alternatively assume that $Z_i = Z_j$ whenever $i$ and $j$ belong to the same group. In other words all the patterns in one group have the same value of the random effect. So the random effect is associated with the group rather than with individual patterns. This could be appropriate if, for example, the groups represent different batches of a chemical. Each batch is prepared under slightly different conditions so we believe that there are random variations between batches, but within a batch we believe that the chemical is well-mixed. \subsubsection{Random effects are coefficients} In the mixed Poisson model (\ref{mixPois}), the random effect is an additive constant (with a random value) in the log-intensity. In general, a random effect is a \emph{coefficient} of one of the covariates. For example if $v$ is a real-valued design covariate (e.g. `temperature'), with value $v_i$ for the $i$th point pattern, then we could assume \begin{equation} \label{ranef2} \lambda_i(u) = \exp(\mu + Z_i v_i) \end{equation} where $Z_i \sim N(0, \sigma^2)$ are independent for different $i$. This model has a random effect in the dependence on $v$. We could also have a random effect for a spatial covariate $V$. Suppose $V_i$ is a real-valued image for the $i$th pattern (so that $V_i(u)$ is the value of some covariate at the location $u$ for the $i$th case). Then we could assume \begin{equation} \label{ranef3} \lambda_i(u) = \exp(\mu + Z_i V_i(u)) \end{equation} where $Z_i \sim N(0, \sigma^2)$ are independent for different $i$. This kind of random effect would be appropriate if, for example, the images $V_i$ are not `normalised' or `standardised' relative to each other (e.g.\ they are images taken under different illumination). Then the coefficients $Z_i$ effectively include the rescaling necessary to standardise the images. \subsection{Fitting a mixed-effects model} The call to \texttt{mppm} can also include the argument \texttt{random}. This should be a formula (with no left-hand side) describing the structure of random effects. The formula for random effects must be recognisable to \texttt{lme}. It is typically of the form \begin{verbatim} ~x1 + ... + xn | g \end{verbatim} or \begin{verbatim} ~x1 + ... + xn | g1/.../gm \end{verbatim} where \verb!x1 + ... + xn! specifies the covariates for the random effects and \texttt{g} or \verb!g1/.../gm! determines the grouping (dependence) structure. Here \code{g} or \code{g1, \ldots, gm} should be factors. To fit the mixed Poisson model (\ref{mixPois}) to the waterstriders, we want to have a random intercept coefficient (so \texttt{x} is \texttt{1}) that varies for different point patterns (so \texttt{g} is \texttt{id}). The reserved name \code{id} is a factor referring to the individual point pattern. Thus <<>>= H <- hyperframe(P=waterstriders) mppm(P ~ 1, H, random=~1|id) @ To fit the mixed effects model (\ref{ranef2}) to the coculture data with the \code{AstroIm} covariate, with a random effect associated with each well, <>= mppm(Neurons ~ AstroIm, random=~AstroIm|WellNumber) @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %#%^!endif %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Studying the fitted model} Fitted models produced by \code{mppm} can be examined and validated in many ways. \subsection{Fits for each pattern} \subsubsection{Subfits} The command \code{subfits} takes an \code{mppm} object and extracts, for each individual point pattern, the fitted point process model for that pattern \emph{that is implied by the overall fit}. It returns a list of objects of class \code{ppm}. <<>>= H <- hyperframe(W=waterstriders) fit <- mppm(W ~ 1, H) subfits(fit) @ In this example the result is a list of three \code{ppm} objects representing the implied fits for each of the three point patterns in the \code{waterstriders} dataset. Notice that {\bf the fitted coefficients are the same} in all three models. Note that there are some unresolved difficulties with the implementation of \code{subfits}. Two completely different implementations are supplied in the package; they are called \code{subfits.old} %(used in versions 0.1--1 and earlier) and \code{subfits.new}.% (introduced in 0.1--2). The old version would occasionally crash. Unfortunately the newer version \code{subfits.new} is quite memory-hungry and sometimes causes R to hang. We're still working on this problem. So for the time being, \code{subfits} is the same as \code{subfits.old}. You can change this simply by reassigning, e.g. <>= subfits <- subfits.new @ \subsubsection{Fitting separately to each pattern} For comparison, we could fit a point process model separately to each point pattern dataset using \code{ppm}. The easy way to do this is with \code{with.hyperframe}. To fit a \emph{separate} uniform Poisson point process to each of the three waterstriders patterns, <<>>= H <- hyperframe(W=waterstriders) with(H, ppm(W)) @ The result is again a list of three fitted point process models (objects of class \code{ppm}), but now the fitted coefficients are different. \subsection{Residuals} One standard way to check a fitted model is to examine the residuals. \subsubsection{Point process residuals} Some recent papers \cite{baddetal05,baddmollpake08} have defined residuals for a fitted point process model (fitted to a \emph{single} point pattern). These residuals are implemented in \code{spatstat} as \code{residuals.ppm} and apply to an object of class \code{ppm}, that is, a model fitted to a \emph{single} point pattern. The command \code{residuals.mppm} computes the point process residuals for an \code{mppm} object. <<>>= fit <- mppm(P ~ x, hyperframe(P=waterstriders)) res <- residuals(fit) @ The result is a list, with one entry for each of the point pattern datasets. Each list entry contains the point process residuals for the corresponding point pattern dataset. Each entry in the list is a signed measure (object of class \code{"msr"}) as explained in the help for \code{residuals.ppm}). It can be plotted: <>= plot(res) @ You probably want the smoothed residual field: <>= smor <- with(hyperframe(res=res), Smooth(res, sigma=4)) plot(smor) @ \subsubsection{Sums of residuals} It would be useful to have a residual that is a single value for each point pattern (representing how much that point pattern departs from the model fitted to all the point patterns). That can be computed by \emph{integrating} the residual measures using the function \code{integral.msr}: <<>>= fit <- mppm(P ~ x, hyperframe(P=waterstriders)) res <- residuals(fit) totres <- sapply(res, integral.msr) @ In designed experiments we can plot these total residuals against the design covariates: <>= fit <- mppm(Points~Image, data=demohyper) resids <- residuals(fit, type="Pearson") totres <- sapply(resids, integral.msr) areas <- with(demohyper, area.owin(as.owin(Points))) df <- as.data.frame(demohyper[, "Group"]) df$resids <- totres/areas plot(resids~Group, df) @ \subsubsection{Four-panel diagnostic plots} Sometimes a more useful tool is the function \code{diagnose.ppm} which produces a four-panel diagnostic plot based on the point process residuals. However, it is only available for \code{ppm} objects. To obtain a four-panel diagnostic plot for each of the point patterns, do the following: \begin{enumerate} \item fit a model to multiple point patterns using \code{mppm}. \item extract the individual fits using \code{subfits}. \item plot the residuals of the individual fits. \end{enumerate} For example: <>= fit <- mppm(P ~ 1, hyperframe(P=waterstriders)) sub <- hyperframe(Model=subfits(fit)) plot(sub, quote(diagnose.ppm(Model))) @ (One could also do this for models fitted separately to the individual point patterns.) \subsubsection{Residuals of the parameter estimates} We can also compare the parameter estimates obtained by fitting the model simultaneously to all patterns (using \code{mppm}) with those obtained by fitting the model separately to each pattern (using \code{ppm}). <<>>= H <- hyperframe(P = waterstriders) fitall <- mppm(P ~ 1, H) together <- subfits(fitall) separate <- with(H, ppm(P)) Fits <- hyperframe(Together=together, Separate=separate) dr <- with(Fits, unlist(coef(Separate)) - unlist(coef(Together))) dr exp(dr) @ One could also try deletion residuals, etc. \subsection{Goodness-of-fit tests} \subsubsection{Quadrat count test} The $\chi^2$ goodness-of-fit test based on quadrat counts is implemented for objects of class \code{ppm} (in \code{quadrat.test.ppm}) and also for objects of class \code{mppm} (in \code{quadrat.test.mppm}). This is a goodness-of-fit test for a fitted {\bf Poisson} point process model only. The model could be uniform or non-uniform and the intensity might depend on covariates. <<>>= H <- hyperframe(X=waterstriders) # Poisson with constant intensity for all patterns fit1 <- mppm(X~1, H) quadrat.test(fit1, nx=2) # uniform Poisson with different intensity for each pattern fit2 <- mppm(X ~ id, H) quadrat.test(fit2, nx=2) @ See the help for \code{quadrat.test.ppm} and \code{quadrat.test.mppm} for further details. \subsubsection{Kolmogorov-Smirnov test} The Kolmogorov-Smirnov test of goodness-of-fit of a Poisson point process model compares the observed and predicted distributions of the values of a spatial covariate. We want to test the null hypothesis $H_0$ that the observed point pattern ${\mathbf x}$ is a realisation from the Poisson process with intensity function $\lambda(u)$ (for locations $u$ in the window $W$). Let $Z(u)$ be a given, real-valued covariate defined at each spatial location $u$. Under $H_0$, the \emph{observed} values of $Z$ at the data points, $Z(x_i)$ for each $x_i \in {\mathbf x}$, are independent random variables with common probability distribution function \[ F_0(z) = \frac{\int_W \lambda(u) \indicate{Z(u) \le z} \dee u} {\int_W \lambda(u) \dee u}. \] We can therefore apply the Kolmogorov-Smirnov test of goodness-of-fit. This compares the empirical cumulative distribution of the observed values $Z(x_i)$ to the predicted c.d.f. $F_0$. The test is implemented as \code{kstest.ppm}. The syntax is <>= kstest.mppm(model, covariate) @ where \code{model} is a fitted model (of class \texttt{"mppm"}) and \code{covariate} is either \begin{itemize} \item a \code{function(x,y)} making it possible to compute the value of the covariate at any location \code{(x,y)} \item a pixel image containing the covariate values \item a list of functions, one for each row of the hyperframe of original data \item a list of pixel images, one for each row of the hyperframe of original data \item a hyperframe with one column containing either functions or pixel images. \end{itemize} See Chapter 16 of \cite{baddrubaturn15} for further information. \newpage \addcontentsline{toc}{section}{Bibliography} %\bibliography{% %extra,% %extra2,% %biblio/badd,% %biblio/bioscience,% %biblio/censoring,% %biblio/mcmc,% %biblio/spatstat,% %biblio/stat,% %biblio/stochgeom% %} \begin{thebibliography}{1} \bibitem{baddmollpake08} A. Baddeley, J. M{\o}ller, and A.G. Pakes. \newblock Properties of residuals for spatial point processes. \newblock {\em Annals of the Institute of Statistical Mathematics}, 60:627--649, 2008. \bibitem{TheBook} A. Baddeley, E. Rubak, and R. Turner. \newblock {\em Spatial Point Patterns: Methodology and Applications with R}. \newblock Chapman \& Hall/CRC Press, 2015. \bibitem{statpaper} A. Baddeley, I. Sintorn, L. Bischof, R. Turner, and S. Heggarty. \newblock Analysing designed experiments where the response is a spatial point pattern. \newblock In preparation. \bibitem{baddetal05} A. Baddeley, R. Turner, J. M{\o}ller, and M. Hazelton. \newblock Residual analysis for spatial point processes (with discussion). \newblock {\em Journal of the Royal Statistical Society, series B}, 67(5):617--666, 2005. \bibitem{chenetal08} B.J. Chen, G.P. Leser, D. Jackson, and R.A. Lamb. \newblock The influenza virus {M2} protein cytoplasmic tail interacts with the {M1} protein and influences virus assembly at the site of virus budding. \newblock {\em Journal of Virology}, 82:10059--10070, 2008. %#%^!ifdef RANDOMEFFECTS \bibitem{davigilt95} M. Davidian and D.M. Giltinan. \newblock {\em Nonlinear Mixed Effects Models for Repeated Measurement Data}. \newblock Chapman and Hall, 1995. %#%^!endif \bibitem{digglangbene91} P.J. Diggle, N. Lange, and F. M. Benes. \newblock Analysis of variance for replicated spatial point patterns in clinical neuroanatomy. \newblock {\em Journal of the {A}merican {S}tatistical {A}ssociation}, 86:618--625, 1991. %#%^!ifdef RANDOMEFFECTS \bibitem{lairware82} N.M. Laird and J.H. Ware. \newblock Random-effects models for longitudinal data. \newblock {\em Biometrics}, 38:963--974, 1982. %#%^!endif \bibitem{pent84} A. Penttinen. \newblock {\em Modelling Interaction in Spatial Point Patterns: Parameter Estimation by the Maximum Likelihood Method}. \newblock Number 7 in {Jyv\"askyl\"a} Studies in Computer Science, Economics and Statistics. University of {Jyv\"askyl\"a}, 1984. %#%^!ifdef RANDOMEFFECTS \bibitem{pinhbate00} J.C. Pinheiro and D.M. Bates. \newblock {\em Mixed-Effects Models in {S} and {S-PLUS}}. \newblock Springer, 2000. %#%^!endif \end{thebibliography} %\addcontentsline{toc}{section}{Index} %\printindex \end{document} spatstat/vignettes/hexagon.eps0000755000176200001440000000577013115273007016302 0ustar liggesusers%!PS-Adobe-2.0 EPSF-2.0 %%Title: hexagon.fig %%Creator: fig2dev Version 3.2 Patchlevel 5a %%CreationDate: Tue Nov 23 11:04:35 2010 %%BoundingBox: 0 0 98 98 %Magnification: 1.0000 %%EndComments %%BeginProlog /$F2psDict 200 dict def $F2psDict begin $F2psDict /mtrx matrix put /col-1 {0 setgray} bind def /col0 {0.000 0.000 0.000 srgb} bind def /col1 {0.000 0.000 1.000 srgb} bind def /col2 {0.000 1.000 0.000 srgb} bind def /col3 {0.000 1.000 1.000 srgb} bind def /col4 {1.000 0.000 0.000 srgb} bind def /col5 {1.000 0.000 1.000 srgb} bind def /col6 {1.000 1.000 0.000 srgb} bind def /col7 {1.000 1.000 1.000 srgb} bind def /col8 {0.000 0.000 0.560 srgb} bind def /col9 {0.000 0.000 0.690 srgb} bind def /col10 {0.000 0.000 0.820 srgb} bind def /col11 {0.530 0.810 1.000 srgb} bind def /col12 {0.000 0.560 0.000 srgb} bind def /col13 {0.000 0.690 0.000 srgb} bind def /col14 {0.000 0.820 0.000 srgb} bind def /col15 {0.000 0.560 0.560 srgb} bind def /col16 {0.000 0.690 0.690 srgb} bind def /col17 {0.000 0.820 0.820 srgb} bind def /col18 {0.560 0.000 0.000 srgb} bind def /col19 {0.690 0.000 0.000 srgb} bind def /col20 {0.820 0.000 0.000 srgb} bind def /col21 {0.560 0.000 0.560 srgb} bind def /col22 {0.690 0.000 0.690 srgb} bind def /col23 {0.820 0.000 0.820 srgb} bind def /col24 {0.500 0.190 0.000 srgb} bind def /col25 {0.630 0.250 0.000 srgb} bind def /col26 {0.750 0.380 0.000 srgb} bind def /col27 {1.000 0.500 0.500 srgb} bind def /col28 {1.000 0.630 0.630 srgb} bind def /col29 {1.000 0.750 0.750 srgb} bind def /col30 {1.000 0.880 0.880 srgb} bind def /col31 {1.000 0.840 0.000 srgb} bind def end /cp {closepath} bind def /ef {eofill} bind def /gr {grestore} bind def /gs {gsave} bind def /sa {save} bind def /rs {restore} bind def /l {lineto} bind def /m {moveto} bind def /rm {rmoveto} bind def /n {newpath} bind def /s {stroke} bind def /sh {show} bind def /slc {setlinecap} bind def /slj {setlinejoin} bind def /slw {setlinewidth} bind def /srgb {setrgbcolor} bind def /rot {rotate} bind def /sc {scale} bind def /sd {setdash} bind def /ff {findfont} bind def /sf {setfont} bind def /scf {scalefont} bind def /sw {stringwidth} bind def /tr {translate} bind def /tnt {dup dup currentrgbcolor 4 -2 roll dup 1 exch sub 3 -1 roll mul add 4 -2 roll dup 1 exch sub 3 -1 roll mul add 4 -2 roll dup 1 exch sub 3 -1 roll mul add srgb} bind def /shd {dup dup currentrgbcolor 4 -2 roll mul 4 -2 roll mul 4 -2 roll mul srgb} bind def /$F2psBegin {$F2psDict begin /$F2psEnteredState save def} def /$F2psEnd {$F2psEnteredState restore end} def /pageheader { save newpath 0 98 moveto 0 0 lineto 98 0 lineto 98 98 lineto closepath clip newpath -11.0 102.4 translate 1 -1 scale $F2psBegin 10 setmiterlimit 0 slj 0 slc 0.06299 0.06299 sc } bind def /pagefooter { $F2psEnd restore } bind def %%EndProlog pageheader % % Fig objects follow % % % here starts figure with depth 50 % Polyline 0 slj 0 slc 30.000 slw n 1485 1395 m 1683 657 l 1143 117 l 405 315 l 207 1053 l 747 1593 l cp gs col0 s gr % here ends figure; pagefooter showpage %%Trailer %EOF spatstat/vignettes/irregpoly.eps0000755000176200001440000000646113115273007016663 0ustar liggesusers%!PS-Adobe-2.0 EPSF-2.0 %%Title: irregpoly.fig %%Creator: fig2dev Version 3.2 Patchlevel 5a %%CreationDate: Tue Nov 23 11:04:01 2010 %%BoundingBox: 0 0 226 144 %Magnification: 1.0000 %%EndComments %%BeginProlog /$F2psDict 200 dict def $F2psDict begin $F2psDict /mtrx matrix put /col-1 {0 setgray} bind def /col0 {0.000 0.000 0.000 srgb} bind def /col1 {0.000 0.000 1.000 srgb} bind def /col2 {0.000 1.000 0.000 srgb} bind def /col3 {0.000 1.000 1.000 srgb} bind def /col4 {1.000 0.000 0.000 srgb} bind def /col5 {1.000 0.000 1.000 srgb} bind def /col6 {1.000 1.000 0.000 srgb} bind def /col7 {1.000 1.000 1.000 srgb} bind def /col8 {0.000 0.000 0.560 srgb} bind def /col9 {0.000 0.000 0.690 srgb} bind def /col10 {0.000 0.000 0.820 srgb} bind def /col11 {0.530 0.810 1.000 srgb} bind def /col12 {0.000 0.560 0.000 srgb} bind def /col13 {0.000 0.690 0.000 srgb} bind def /col14 {0.000 0.820 0.000 srgb} bind def /col15 {0.000 0.560 0.560 srgb} bind def /col16 {0.000 0.690 0.690 srgb} bind def /col17 {0.000 0.820 0.820 srgb} bind def /col18 {0.560 0.000 0.000 srgb} bind def /col19 {0.690 0.000 0.000 srgb} bind def /col20 {0.820 0.000 0.000 srgb} bind def /col21 {0.560 0.000 0.560 srgb} bind def /col22 {0.690 0.000 0.690 srgb} bind def /col23 {0.820 0.000 0.820 srgb} bind def /col24 {0.500 0.190 0.000 srgb} bind def /col25 {0.630 0.250 0.000 srgb} bind def /col26 {0.750 0.380 0.000 srgb} bind def /col27 {1.000 0.500 0.500 srgb} bind def /col28 {1.000 0.630 0.630 srgb} bind def /col29 {1.000 0.750 0.750 srgb} bind def /col30 {1.000 0.880 0.880 srgb} bind def /col31 {1.000 0.840 0.000 srgb} bind def end /cp {closepath} bind def /ef {eofill} bind def /gr {grestore} bind def /gs {gsave} bind def /sa {save} bind def /rs {restore} bind def /l {lineto} bind def /m {moveto} bind def /rm {rmoveto} bind def /n {newpath} bind def /s {stroke} bind def /sh {show} bind def /slc {setlinecap} bind def /slj {setlinejoin} bind def /slw {setlinewidth} bind def /srgb {setrgbcolor} bind def /rot {rotate} bind def /sc {scale} bind def /sd {setdash} bind def /ff {findfont} bind def /sf {setfont} bind def /scf {scalefont} bind def /sw {stringwidth} bind def /tr {translate} bind def /tnt {dup dup currentrgbcolor 4 -2 roll dup 1 exch sub 3 -1 roll mul add 4 -2 roll dup 1 exch sub 3 -1 roll mul add 4 -2 roll dup 1 exch sub 3 -1 roll mul add srgb} bind def /shd {dup dup currentrgbcolor 4 -2 roll mul 4 -2 roll mul 4 -2 roll mul srgb} bind def /$F2psBegin {$F2psDict begin /$F2psEnteredState save def} def /$F2psEnd {$F2psEnteredState restore end} def /pageheader { save newpath 0 144 moveto 0 0 lineto 226 0 lineto 226 144 lineto closepath clip newpath -3.6 146.6 translate 1 -1 scale $F2psBegin 10 setmiterlimit 0 slj 0 slc 0.06299 0.06299 sc } bind def /pagefooter { $F2psEnd restore } bind def %%EndProlog pageheader % % Fig objects follow % % % here starts figure with depth 50 % Polyline 0 slj 0 slc 30.000 slw n 945 180 m 1170 1035 l 225 315 l 135 405 l 90 1215 l 675 1350 l 675 1665 l 135 1755 l 180 2205 l 990 2295 l 1260 1350 l 1530 1440 l 1440 2205 l 2250 2115 l 1890 1350 l 2520 1305 l 2250 1530 l 2475 2250 l 3330 2250 l 3330 1575 l 2790 1530 l 3600 1260 l 3465 720 l 2790 810 l 2475 765 l 3465 585 l 3510 360 l 2430 90 l 2115 225 l 2070 630 l 1800 945 l 1935 135 l 990 225 l gs col0 s gr % here ends figure; pagefooter showpage %%Trailer %EOF spatstat/vignettes/shapefiles.Rnw0000755000176200001440000005064713512337523016763 0ustar liggesusers\documentclass[twoside,11pt]{article} % \VignetteIndexEntry{Handling shapefiles in the spatstat package} \SweaveOpts{eps=TRUE} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \usepackage{graphicx} \usepackage[colorlinks=true,urlcolor=blue]{hyperref} \usepackage{color} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\R}{{\sf R}} \begin{document} %\bibliographystyle{plain} \thispagestyle{empty} <>= library(spatstat) options(useFancyQuotes=FALSE) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") @ \title{Handling shapefiles in the \texttt{spatstat} package} \author{Adrian Baddeley, Rolf Turner and Ege Rubak} \date{ \Sexpr{sdate} \\ \pkg{spatstat} version \texttt{\Sexpr{sversion}} } \maketitle This vignette explains how to read data into the \pkg{spatstat} package from files in the popular `shapefile' format. This vignette is part of the documentation included in \pkg{spatstat} version \texttt{\Sexpr{sversion}}. The information applies to \pkg{spatstat} versions \texttt{1.36-0} and above. \section{Shapefiles} A shapefile represents a list of spatial objects --- a list of points, a list of lines, or a list of polygonal regions --- and each object in the list may have additional variables attached to it. A dataset stored in shapefile format is actually stored in a collection of text files, for example \begin{verbatim} mydata.shp mydata.prj mydata.sbn mydata.dbf \end{verbatim} which all have the same base name \texttt{mydata} but different file extensions. To refer to this collection you will always use the filename with the extension \texttt{shp}, for example \texttt{mydata.shp}. \section{Helper packages} \label{S:helpers} We'll use two other packages to handle shapefile data. The \pkg{maptools} package is designed specifically for handling file formats for spatial data. It contains facilities for reading and writing files in shapefile format.% \footnote{ Some code in \pkg{maptools} is no longer maintained, and may give you a message recommending that you use the packages \pkg{rgdal} and \pkg{sf}. However these packages are more difficult to install than \pkg{maptools} because of their software requirements. So we recommend that you try \pkg{maptools} first. } The \pkg{sp} package supports a standard set of spatial data types in \R. These standard data types can be handled by many other packages, so it is useful to convert your spatial data into one of the data types supported by \pkg{sp}. \section{Caveat about longitude-latitude coordinates} The shapefile format supports geographical coordinates, usually longitude-latitude coordinates, which specify locations on the curved surface of the Earth. However, \texttt{spatstat} deals only with spatial data on a flat two-dimensional plane. When shapefile data are converted into \texttt{spatstat} objects, longitude and latitude coordinates are (currently) treated as $x$ and $y$ coordinates, so that the Earth's surface is effectively mapped to a rectangle. This mapping distorts distances and areas. If your study region is a \emph{small} region of the Earth's surface (about 3 degrees, 180 nautical miles, 200 statute miles, 320 km across) then a reasonable approach is to use the latitude and longitude as $x$ and $y$ coordinates, after multiplying the longitude coordinates by the cosine of the latitude of the centre of the region. This will approximately preserve areas and distances. This calculation is a simple example of a \emph{geographical projection} and there are some much better projections available. It may be wise to use another package to perform the appropriate projection for you, and then to convert the projected data into \texttt{spatstat} objects. If your study region is a large part of the sphere, then your data may not be amenable to the techniques provided by \texttt{spatstat} because the geometry is fundamentally different. Please consider the extension package \texttt{spatstat.sphere}. \section{How to read shapefiles into \pkg{spatstat}} To read shapefile data into \pkg{spatstat}, you follow two steps: \begin{enumerate} \item using the facilities of \pkg{maptools}, read the shapefiles and store the data in one of the standard formats supported by \pkg{sp}. \item convert the \pkg{sp} data type into one of the data types supported by \pkg{spatstat}. \end{enumerate} \subsection{Read shapefiles using \pkg{maptools}} Here's how to read shapefile data. \begin{enumerate} \item ensure that the package \pkg{maptools} is installed. You will need version \texttt{0.7-16} or later. \item start R and load the package: <>= library(maptools) @ \item read the shapefile into an object in the \pkg{sp} package using \texttt{readShapeSpatial}, for example <>= x <- readShapeSpatial("mydata.shp") @ In recent versions of \pkg{maptools} you may get a warning, saying that this code is no longer supported, and recommending the packages \pkg{rgdal} and \pkg{sf}. As far as we know, this warning is premature, as the code still works fine! \item To find out what kind of spatial objects are represented by the dataset, inspect its class: <>= class(x) @ The class may be either \texttt{SpatialPoints} indicating a point pattern, \texttt{SpatialLines} indicating a list of polygonal lines, or \texttt{SpatialPolygons} indicating a list of polygons. It may also be \texttt{SpatialPointsDataFrame}, \texttt{SpatialLinesDataFrame} or \texttt{SpatialPolygonsDataFrame} indicating that, in addition to the spatial objects, there is a data frame of additional variables. The classes \texttt{SpatialPixelsDataFrame} and \texttt{SpatialGridDataFrame} represent pixel image data. \end{enumerate} Here are some examples, using the example shapefiles supplied in the \pkg{maptools} package itself. % fake data because we don't want spatstat to depend on maptools <>= baltim <- columbus <- fylk <- list() class(baltim) <- "SpatialPointsDataFrame" class(columbus) <- "SpatialPolygonsDataFrame" class(fylk) <- "SpatialLinesDataFrame" @ <>= setwd(system.file("shapes", package="maptools")) baltim <- readShapeSpatial("baltim.shp") columbus <- readShapeSpatial("columbus.shp") fylk <- readShapeSpatial("fylk-val.shp") @ <<>>= class(baltim) class(columbus) class(fylk) @ \subsection{Convert data to \pkg{spatstat} format} To convert the dataset to an object in the \pkg{spatstat} package, the procedure depends on the type of data, as explained below. Both packages \pkg{maptools} and \pkg{spatstat} must be \textbf{loaded} in order to convert the data. \subsubsection{Objects of class \texttt{SpatialPoints}} An object \texttt{x} of class \texttt{SpatialPoints} represents a spatial point pattern. Use \verb!as(x, "ppp")! or \texttt{as.ppp(x)} to convert it to a spatial point pattern in \pkg{spatstat}. (The conversion is performed by \texttt{as.ppp.SpatialPoints}, a function in \pkg{maptools}.) The window for the point pattern will be taken from the bounding box of the points. You will probably wish to change this window, usually by taking another dataset to provide the window information. Use \verb![.ppp! to change the window: if \texttt{X} is a point pattern object of class \verb!"ppp"! and \texttt{W} is a window object of class \verb!"owin"!, type <>= X <- X[W] @ \subsubsection{Objects of class \texttt{SpatialPointsDataFrame }} An object \texttt{x} of class \texttt{SpatialPointsDataFrame} represents a pattern of points with additional variables (`marks') attached to each point. It includes an object of class \texttt{SpatialPoints} giving the point locations, and a data frame containing the additional variables attached to the points. Use \verb!as(x, "ppp")! or \texttt{as.ppp(x)} to convert an object \texttt{x} of class \texttt{SpatialPointsDataFrame} to a spatial point pattern in \pkg{spatstat}. In this conversion, the data frame of additional variables in \texttt{x} will become the \texttt{marks} of the point pattern \texttt{z}. <>= y <- as(x, "ppp") @ (The conversion is performed by \texttt{as.ppp.SpatialPointsDataFrame}, a function in \pkg{maptools}. So the \pkg{maptools} and \pkg{spatstat} packages must be loaded in order for this to work.) Before the conversion you can extract the data frame of auxiliary data by \verb!df <- x@data! or \verb!df <- slot(x, "data")!. After the conversion you can extract these data by \verb!df <- marks(y)!. For example: <>= balt <- as(baltim, "ppp") bdata <- slot(baltim, "data") @ \subsubsection{Objects of class \texttt{SpatialLines}} \label{spatiallines.2.psp} A ``line segment'' is the straight line between two points in the plane. In the \pkg{spatstat} package, an object of class \texttt{psp} (``planar segment pattern'') represents a pattern of line segments, which may or may not be connected to each other (like matches which have fallen at random on the ground). In the \pkg{sp} package, an object of class \texttt{SpatialLines} represents a \textbf{list of lists} of \textbf{connected curves}, each curve consisting of a sequence of straight line segments that are joined together (like several pieces of a broken bicycle chain.) So these two data types do not correspond exactly. The list-of-lists hierarchy in a \texttt{SpatialLines} object is useful when representing internal divisions in a country. For example, if \texttt{USA} is an object of class \texttt{SpatialLines} representing the borders of the United States of America, then \verb!USA@lines! might be a list of length 52, with \verb!USA@lines[[i]]! representing the borders of the \texttt{i}-th State. The borders of each State consist of several different curved lines. Thus \verb!USA@lines[[i]]@Lines[[j]]! would represent the \texttt{j}th piece of the boundary of the \texttt{i}-th State. If \texttt{x} is an object of class \texttt{SpatialLines}, there are several things that you might want to do: \begin{enumerate} \item collect together all the line segments (all the segments that make up all the connected curves) and store them as a single object of class \texttt{psp}. \begin{quote} To do this, use \verb!as(x, "psp")! or \texttt{as.psp(x)} to convert it to a spatial line segment pattern. \end{quote} \item convert each connected curve to an object of class \texttt{psp}, keeping different connected curves separate. To do this, type something like the following: <>= out <- lapply(x@lines, function(z) { lapply(z@Lines, as.psp) }) @ (The conversion is performed by \texttt{as.psp.SpatialLines}, a function in \pkg{maptools}. So the \pkg{maptools} and \pkg{spatstat} packages must be loaded in order for this to work.) The result will be a \textbf{list of lists} of objects of class \texttt{psp}. Each one of these objects represents a connected curve, although the \pkg{spatstat} package does not know that. The list structure will reflect the list structure of the original \texttt{SpatialLines} object \texttt{x}. If that's not what you want, then use \verb!curvelist <- do.call("c", out)! or <>= curvegroup <- lapply(out, function(z) { do.call("superimpose", z)}) @ to collapse the list-of-lists-of-\texttt{psp}'s into a list-of-\texttt{psp}'s. In the first case, \texttt{curvelist[[i]]} is a \texttt{psp} object representing the \texttt{i}-th connected curve. In the second case, \texttt{curvegroup[[i]]} is a \texttt{psp} object containing all the line segments in the \texttt{i}-th group of connected curves (for example the \texttt{i}-th State in the \texttt{USA} example). \end{enumerate} The window for the spatial line segment pattern can be specified as an argument \texttt{window} to the function \texttt{as.psp}. (The conversion is performed by \texttt{as.psp.SpatialLines} or \texttt{as.psp.Lines}, which are functions in \pkg{maptools}.) \subsubsection{Objects of class \texttt{SpatialLinesDataFrame}} An object \texttt{x} of class \texttt{SpatialLinesDataFrame} is a \texttt{SpatialLines} object with additional data. The additional data is stored as a data frame \verb!x@data! with one row for each entry in \verb!x@lines!, that is, one row for each group of connected curves. In the \pkg{spatstat} package, an object of class \texttt{psp} (representing a collection of line segments) may have a data frame of marks. Note that each \emph{line segment} in a \texttt{psp} object may have different mark values. If \texttt{x} is an object of class \texttt{SpatialLinesDataFrame}, there are two things that you might want to do: \begin{enumerate} \item collect together all the line segments that make up all the connected lines, and store them as a single object of class \texttt{psp}. \begin{quote} To do this, use \verb!as(x, "psp")! or \texttt{as.psp(x)} to convert it to a marked spatial line segment pattern. \end{quote} \item keep each connected curve separate, and convert each connected curve to an object of class \texttt{psp}. To do this, type something like the following: <>= out <- lapply(x@lines, function(z) { lapply(z@Lines, as.psp) }) dat <- x@data for(i in seq(nrow(dat))) out[[i]] <- lapply(out[[i]], "marks<-", value=dat[i, , drop=FALSE]) @ The result is a list-of-lists-of-\texttt{psp}'s. See the previous subsection for explanation on how to change this using \texttt{c()} or \texttt{superimposePSP}. \end{enumerate} (The conversion is performed by \texttt{as.psp.SpatialLines}, a function in \pkg{maptools}. So the \pkg{maptools} and \pkg{spatstat} packages must be loaded in order for this to work.) In either case, the mark variables attached to a particular \emph{group of connected lines} in the \texttt{SpatialLinesDataFrame} object, will be duplicated and attached to each \emph{line segment} in the resulting \texttt{psp} object. \subsubsection{Objects of class \texttt{SpatialPolygons}} First, so that we don't go completely crazy, let's introduce some terminology. A \emph{polygon} is a closed curve that is composed of straight line segments. You can draw a polygon without lifting your pen from the paper. \setkeys{Gin}{width=0.4\textwidth} \begin{center} <>= data(chorley) plot(as.owin(chorley), lwd=3, main="polygon") @ \end{center} A \emph{polygonal region} is a region in space whose boundary is composed of straight line segments. A polygonal region may consist of several unconnected pieces, and each piece may have holes. The boundary of a polygonal region consists of one or more polygons. To draw the boundary of a polygonal region, you may need to lift and drop the pen several times. \setkeys{Gin}{width=0.4\textwidth} \begin{center} <>= data(demopat) plot(as.owin(demopat), col="blue", main="polygonal region") @ \end{center} An object of class \texttt{owin} in \pkg{spatstat} represents a polygonal region. It is a region of space that is delimited by boundaries made of lines. An object \texttt{x} of class \texttt{SpatialPolygons} represents a \textbf{list of polygonal regions}. For example, a single object of class \texttt{SpatialPolygons} could store information about every State in the United States of America (or the United States of Malaysia). Each State would be a separate polygonal region (and it might contain holes such as lakes). There are two things that you might want to do with an object of class \texttt{SpatialPolygons}: \begin{enumerate} \item combine all the polygonal regions together into a single polygonal region, and convert this to a single object of class \texttt{owin}. \begin{quote} For example, you could combine all the States of the USA together and obtain a single object that represents the territory of the USA. To do this, use \verb!as(x, "owin")! or \texttt{as.owin(x)}. The result is a single window (object of class \texttt{"owin"}) in the \pkg{spatstat} package. \end{quote} \item keep the different polygonal regions separate; convert each one of the polygonal regions to an object of class \texttt{owin}. \begin{quote} For example, you could keep the States of the USA separate, and convert each State to an object of class \texttt{owin}. \end{quote} To do this, type the following: <>= regions <- slot(x, "polygons") regions <- lapply(regions, function(x) { SpatialPolygons(list(x)) }) windows <- lapply(regions, as.owin) @ The result is a list of objects of class \texttt{owin}. Often it would make sense to convert this to a tessellation object, by typing <>= te <- tess(tiles=windows) @ \end{enumerate} (The conversion is performed by \texttt{as.owin.SpatialPolygons}, a function in \pkg{maptools}. So the \pkg{maptools} and \pkg{spatstat} packages must be loaded in order for this to work.) {\bf The following is different from what happened in previous versions of \pkg{spatstat}} (prior to version \texttt{1.36-0}.) During the conversion process, the geometry of the polygons will be automatically ``repaired'' if needed. Polygon data from shapefiles often contain geometrical inconsistencies such as self-intersecting boundaries and overlapping pieces. For example, these can arise from small errors in curve-tracing. Geometrical inconsistencies are tolerated in an object of class \texttt{SpatialPolygons} which is a list of lists of polygonal curves. However, they are not tolerated in an object of class \texttt{owin}, because an \texttt{owin} must specify a well-defined region of space. These data inconsistencies must be repaired to prevent technical problems. \pkg{Spatstat} uses polygon-clipping code to automatically convert polygonal lines into valid polygon boundaries. The repair process changes the number of vertices in each polygon, and the number of polygons (if you chose option 1). To disable the repair process, set \texttt{spatstat.options(fixpolygons=FALSE)}. \subsubsection{Objects of class \texttt{SpatialPolygonsDataFrame}} What a mouthful! An object \texttt{x} of class \texttt{SpatialPolygonsDataFrame} represents a list of polygonal regions, with additional variables attached to each region. It includes an object of class \texttt{SpatialPolygons} giving the spatial regions, and a data frame containing the additional variables attached to the regions. The regions are extracted by <>= y <- as(x, "SpatialPolygons") @ and you then proceed as above to convert the curves to \pkg{spatstat} format. The data frame of auxiliary data is extracted by \verb!df <- x@data! or \verb!df <- slot(x, "data")!. For example: <>= cp <- as(columbus, "SpatialPolygons") cregions <- slot(cp, "polygons") cregions <- lapply(cregions, function(x) { SpatialPolygons(list(x)) }) cwindows <- lapply(cregions, as.owin) @ There is currently no facility in \pkg{spatstat} for attaching marks to an \texttt{owin} object directly. However, \pkg{spatstat} supports objects called \textbf{hyperframes}, which are like data frames except that the entries can be any type of object. Thus we can represent the \texttt{columbus} data in \pkg{spatstat} as follows: <>= ch <- hyperframe(window=cwindows) ch <- cbind.hyperframe(ch, columbus@data) @ Then \texttt{ch} is a hyperframe containing a column of \texttt{owin} objects followed by the columns of auxiliary data. \subsubsection{Objects of class \texttt{SpatialGridDataFrame} and \texttt{SpatialPixelsDataFrame}} An object \texttt{x} of class \texttt{SpatialGridDataFrame} represents a pixel image on a rectangular grid. It includes a \texttt{SpatialGrid} object \texttt{slot(x, "grid")} defining the full rectangular grid of pixels, and a data frame \texttt{slot(x, "data")} containing the pixel values (which may include \texttt{NA} values). The command \texttt{as(x, "im")} converts \texttt{x} to a pixel image of class \texttt{"im"}, taking the pixel values from the \emph{first column} of the data frame. If the data frame has multiple columns, these have to be converted to separate pixel images in \pkg{spatstat}. For example <>= y <- as(x, "im") ylist <- lapply(slot(x, "data"), function(z, y) { y[,] <- z; y }, y=y) @ An object \texttt{x} of class \texttt{SpatialPixelsDataFrame} represents a \emph{subset} of a pixel image. To convert this to a \pkg{spatstat} object, it should first be converted to a \texttt{SpatialGridDataFrame} by \texttt{as(x, "SpatialGridDataFrame")}, then handled as described above. \end{document} spatstat/NEWS0000644000176200001440000131766513624152254012643 0ustar liggesusers CHANGES IN spatstat VERSION 1.63-2 OVERVIEW o Minor changes to satisfy the package checker. CHANGES IN spatstat VERSION 1.63-1 OVERVIEW o Enhancement to plot.linim o Fill missing pixel values by taking the nearest defined pixel value. o Minor improvements and bug fixes. o spatstat now requires spatstat.utils version 1.17-0. o Version nickname: 'Made from recycled electrons' NEW FUNCTIONS o is.linim Tests whether an object belongs to class 'linim'. o nearestValue Given a pixel image on a subset of a rectangle, extend the image to the entire rectangle, using the nearest well-defined pixel value. SIGNIFICANT USER-VISIBLE CHANGES o markconnect Runs faster and handles much larger datasets. o markvario Runs faster and handles much larger datasets. o markcorr, markconnect, markvario The 'weights' can now be an expression to be evaluated, or a function, or a pixel image, as well as a numeric vector. o Smooth.ppp The 'weights' can now be an expression to be evaluated, or a function, or a pixel image, as well as a numeric vector. o rpoislpp If 'lambda' is a list of 'linim' or 'linfun' objects, then the argument L can be omitted. o plot.linim New argument 'fatten' improves visual appearance when style="colour". o plot.im, plot.owin The coordinate axes will be plotted if axes=TRUE. Axis labels xlab, ylab will be plotted if ann=TRUE. o bugfixes If sincedate="all" or sinceversion="all", then all bugs will be listed. o Jfox New argument 'warn.trim' makes it possible to suppress repeated warnings. o requireversion New argument 'fatal' BUG FIXES o predict.rhohat, simulate.rhohat When applied to a 'rhohat' object computed from linear network data (lpp or lppm), there was a warning about the lengths of vectors, and the results were incorrect. Fixed. o predict.rhohat, simulate.rhohat Crashed when applied to a 'rhohat' object computed from *multitype* linear network data (multitype lpp or lppm). Fixed. o Jfox envelope() commands using the summary function 'Jfox' crashed sometimes with a message about illegal spacing of 'r' values. Fixed. o leverage, influence, dfbetas Crashed when applied to an 'ippm' object in which the irregular components of the score, but not the Hessian, were provided by symbolic differentiation. Fixed. o parres Crashed in rare circumstances, when the data did not contain enough useable values to perform smoothing. Fixed. CHANGES IN spatstat VERSION 1.63-0 OVERVIEW o Compute minimum or maximum nearest-neighbour distance between each pair of types in a multitype point pattern. o Important bug fix in simulations of the multitype hard core process. o Numerous improvements and bug fixes. o Deprecated functions have been removed. o Version nickname: "Trees in space" SIGNIFICANT USER-VISIBLE CHANGES o minnndist, maxnndist New argument 'by' makes it possible to find the minimum or maximum nearest neighbour distance between each pair of possible types in a multitype pattern. o beachcolours If 'sealevel' lies outside 'srange', then 'srange' will be extended to include it (without a warning). o split<-.ppp The default for argument 'un' in 'split<-.ppp' now agrees with the default for the same argument in 'split.ppp'. o lineardisc New argument 'add'. Default plotting behaviour has changed. o rmh, rmh.default The printed output of the debugger (invoked by snoop=TRUE) has been improved. o plot.owin New argument 'use.polypath' controls how to plot a filled polygon when it has holes. o plot.profilepl This function has now been documented, and the graphics improved. o erode.owin, dilate.owin These deprecated functions have now been deleted (replaced by erosion.owin and dilation.owin) o delaunay.distance, delaunay.network These deprecated functions have now been deleted (replaced by delaunayDistance, delaunayNetwork) o dirichlet.edges, dirichlet.network, dirichlet.vertices, dirichlet.weights These deprecated functions have now been deleted (replaced by dirichletEdges, dirichletNetwork, dirichletVertices, dirichletWeights) BUG FIXES o rmh, simulate.ppm, MultiHard Simulated realisations of the multitype hard core model were completely incorrect (the interaction was effectively removed, changing the model into a Poisson process). Fixed. o kppm, AIC For kppm models fitted with method='clik2', the resulting value of logLik() was equal to 1/2 of the correct value. This would have affected model comparison using AIC, and model selection using step(). Fixed. o hyperframe Did not correctly handle date-time values (columns of class 'Date', etc). Fixed o rlpp The resulting pattern was unmarked even when it should have been multitype. Fixed. o idw Estimates were zero if 'se=TRUE' and 'power != 2' and 'at="pixels"'. Fixed. o model.matrix.mppm Sometimes returned a matrix with the wrong number of rows. Fixed. o nncross.ppp Format of output was incorrect if X was an empty pattern. Fixed. o rmh, rmh.default For a marked point process, the debugger did not display the marks. (The rmh debugger is invoked by calling rmh with snoop=TRUE). Fixed. o pairs.im, pairs.linim The argument 'labels' was sometimes ignored. Fixed. o as.im.data.frame Results were incorrect for factor-valued data. Fixed. o relrisk.ppp Crashed if there were more than 2 types of points and method = "leastsquares" or "weightedleastsquares". Fixed. o as.im.nnfun Crashed when applied to a function generated by nnfun.psp. Fixed. o diagnose.ppm Crashed for some models with an error message from 'beachcolours'. Fixed. o predict.rho2hat Crashed if one of the original covariates was a function rather than an image. Fixed. o lineardisc Crashed in some graphics environments. Fixed. o lineardisc Crashed if the network segments had marks. Fixed. o rmh Crashed for multitype models if 'nsave' was specified. Fixed. o vcov.mppm, simulate.mppm Crashed for multitype models with a hardcore interaction component. Fixed. o effectfun Crashed if 'covname' was not the name of a covariate appearing in the model and was not one of the reserved names 'x', 'y', 'marks'. Fixed. CHANGES IN spatstat VERSION 1.62-2 OVERVIEW o Urgent bug fixes and workarounds. o Version nickname: "Shape-shifting lizard" SIGNIFICANT USER-VISIBLE CHANGES o colourmap Argument 'col' is now permitted to have length 1, representing a colour map in which all values are mapped to the same colour. o lut Argument 'outputs' is now permitted to have length 1, representing a lookup table in which all inputs are mapped to the same output value. BUG FIXES o envelope Crashed sometimes, with a message about unrecognised arguments, when applied to a summary function created by the user. Fixed. CHANGES IN spatstat VERSION 1.62-1 OVERVIEW o We thank Mohammad Ghorbani, Ute Hahn, Abdollah Jalilian, Nestor Luambua, Greg McSwiggan, Annie Mollie and Jakob Gulddahl Rasmussen for contributions. o spatstat now requires spatstat.utils version 1.15-0 and goftest version 1.2-2. o Nearest Neighbour Index function can now return mark values. o Important fix in Ripley isotropic correction. o Index of repulsion strength for determinantal point process models. o Nearest neighbours between two point patterns in any number of dimensions. o More options for handling bad simulation outcomes in envelope(). o Bandwidth selectors warn about extreme values of bandwidth. o Tessellations on a linear network can now have marks. o New functions for simulating point processes on a linear network. o More functions for manipulating tessellations on a linear network. o mppm accepts case weights. o Bug fixes and minor improvements. o Nickname: 'An update which will live in infamy' NEW FUNCTIONS o repul Repulsiveness index for a determinantal point process model. o reach.kppm Reach (interaction distance) for a Cox or cluster point process model. o summary.dppm, print.summary.dppm Summary method for determinantal point process models. o nncross.ppx Nearest neighbours between two point patterns in any number of dimensions. o uniquemap.matrix Method for uniquemap for matrices. o repairNetwork Detect and repair inconsistencies in internal data in a linnet or lpp object. o marks<-.lintess, unmark.lintess Assign marks to the tiles of a tessellation on a linear network. o marks.lintess Extract the marks of the tiles of a tessellation on a linear network. o tilenames.lintess Extract the names of the tiles in a tessellation on a linear network o tilenames<-.lintess Change the names of the tiles in a tessellation on a linear network o nobjects.lintess Count the number of tiles in a tessellation on a linear network o as.data.frame.lintess Convert a tessellation on a linear network into a data frame. o rcelllpp Simulate the cell point process on a linear network o rSwitzerlpp Simulate the Switzer-type point process on a linear network o intersect.lintess Form the intersection of two tessellations on a linear network o chop.linnet Divide a linear network into tiles using infinite lines SIGNIFICANT USER-VISIBLE CHANGES o lintess New argument 'marks' Tessellations can now have marks. o tilenames, tilenames<- These functions are now generic, with methods for 'tess' and 'lintess' o mppm New argument 'weights' specifies case weights for each row of data. o unstack.lintess Now handles marks. o plot.lintess Modified to display the marks attached to the tiles. Changed options: style=c("colour", "width", "image"). o as.linfun.lintess The default function values are the marks. o print.lintess, summary.lintess, print.summary.lintess Output now includes information about marks. o nnfun.ppp, nnfun.psp, nnfun.lpp New argument 'value' specifies whether to return the index of the nearest neighbour or the mark value of the nearest neighbour. o envelope.ppp, envelope.ppm, envelope.kppm, envelope.pp3 New arguments 'rejectNA' and 'silent'. o envelope.lpp, envelope.lppm New arguments 'maxnerr', 'rejectNA' and 'silent'. o plot.psp New argument 'col' gives control over the colour map representing the values of marks attached to the segments. o plot.im Some warnings are suppressed when do.plot=FALSE. o plot.linim New explicit argument 'box' determines whether to plot a bounding box. Default is now FALSE in all cases. o Kest, Kinhom, pcf, pcfinhom, edge.Ripley Calculation of isotropic edge correction for polygonal windows has changed slightly. Results are believed to be more accurate. Computation has been accelerated by about 20 percent in typical cases. o bw.diggle, bw.ppl, bw.lppl, bw.pcf, bw.CvL, bw.voronoi A warning is issued if the optimal value of the cross-validation criterion occurs at an endpoint of the search interval. New argument 'warn'. o mad.test, dclf.test, dg.test, bits.test Function values which are infinite, NA or NaN are now ignored in the calculation (with a warning) instead of causing an error. Warning messages are more detailed. o rmhcontrol, rmh The parameter 'nsave' can now be a vector of integers. o diagnose.ppm Accelerated, when type="inverse", for models without a hard core. o uniquemap.data.frame Accelerated for some cases. o vcov.ppm, vcov.mppm New argument 'nacoef.action' specifies what to do if some of the fitted coefficients are NA, NaN or Inf. o Lest, Linhom, Ldot, Lcross, Ldot.inhom, Lcross.inhom These summary functions now have an explicit argument 'correction'. Behaviour is unchanged. o bugfixes Arguments sinceversion="book" or sincedate="book" are interpreted to give all bugs reported after publication of the spatstat book. o cbind.hyperframe, rbind.hyperframe The result now retains the row.names of the original arguments. o print.summary.owin More information is printed. o append.psp Arguments may be NULL. o as.psp Now permits a data frame of marks to have only one column, instead of coercing it to a vector. BUG FIXES o as.linnet.psp Sometimes produced a network with duplicated segments. [Such objects can be repaired using 'repairNetwork'.] Fixed. o edge.Ripley, Kest, Kinhom Isotropic correction weights for polygonal windows were sometimes incorrect for small radius 'r' if the polygon contained many small segments [spotted by Annie Mollie] or if the polygon was very long and thin [spotted by Nestor Luambua]. Problem arose in spatstat 1.60-0. Fixed. o lppm Did not correctly handle the case where the left-hand side of the formula is the name of an entry in the 'data' argument. Fixed. o plot.lpp Did not correctly handle the argument 'which.marks'. Fixed. o plot.im Did not correctly handle the argument 'ribargs$at'. Fixed. o density.lpp Sometimes requested a larger value of 'iterMax' but ignored it. Fixed. o [.linnet, [.lpp Crashed if x contained inconsistent internal data (when index 'j' was a window, and snip=TRUE). Fixed. o plot.linim Crashed if the pixel values were complex numbers. Fixed. o plot.linfun Crashed if the function values were complex numbers. Fixed. o integral.linim, mean.linim Crashed if the image had logical values. Fixed. o Re, Im, Arg, Mod, Conj For pixel images on a linear network (class 'linim') these operations crashed if the pixel values were complex numbers. o studpermu.test Crashed if the hyperframe was very large. Fixed. o studpermu.test Crashed in some cases when the simulated functions were incompatible with each other, due to the use of different edge corrections. Fixed. o vcov.ppm, print.ppm, summary.ppm Crashed in some cases if the fitted coefficients were NA. Fixed. o quantess.owin, quantess.ppp, quantess.im Crashed in some cases because the tile labels were not unique. Fixed. o plot.pp3 Did not correctly handle graphical arguments ('col', 'pch', 'cex') if they were vectors [Spotted by Abdollah Jalilian]. Fixed. o shift.linnet Generated a spurious warning. Fixed. o density.lpp, lixellate Crashed in some cases when a data point was exactly at a vertex. [Spotted by Jakob Gulddahl Rasmussen.] Fixed. o plot.linim Crashed (when style="width") if any pixel values were NaN. Fixed. o Fest, Jest, Jdot, Jcross, Hest, Iest, rectcontact envelope() commands using one of these summary functions crashed sometimes with a message about illegal spacing of 'r' values. Fixed. o plot.linnet, plot.psp Ignored argument 'col' when style="width". Fixed. o rshift.psp Crashed if X had a data frame of marks. [Spotted by Ute Hahn.] Fixed. o Kscaled Crashed if Ripley's isotropic edge correction was selected but the translation edge correction was not. Fixed. CHANGES IN spatstat VERSION 1.61-0 OVERVIEW o We thank Jordan Brown, Tilman Davies and Greg McSwiggan for contributions. o Fast kernel estimation on a linear network using 2D kernels. o Nonparametric maximum likelihood estimation of 'rho'. o Extension of Scott's rule for bandwidth selection. o Cross-validated bandwidth selection on a linear network. o More support for character-valued images. o Random thinning of clumps. o Bug fixes and minor improvements. o Nickname: 'Puppy zoomies' NEW FUNCTIONS o densityQuick.lpp Fast kernel estimator of point process intensity on a network using 2D smoothing kernel. o bw.scott.iso Isotropic version of Scott's rule (for point patterns in any dimension). o data.lppm Extract the original data point pattern from a fitted model of class 'lppm'. o rthinclumps Divide a spatial region into clumps and randomly delete some of them. o dimnames.hyperframe, dimnames<-.hyperframe Methods for extracting and changing the 'dimnames' of a hyperframe. SIGNIFICANT USER-VISIBLE CHANGES o rhohat Estimation by nonparametric maximum likelihood is now supported, assuming the intensity is a monotone function of the covariate. New options: smoother="increasing" and smoother="decreasing". o density.lpp New argument 'distance' offers a choice of different kernel methods. o bw.scott Now handles point patterns of any dimension. New arguments 'isotropic' and 'd'. o bw.ppl New argument 'shortcut' allows faster computation. Additional arguments '...' are now passed to density.ppp. o [<-.im New argument 'drop' controls behaviour when indices are missing as in 'x[] <- value' o mppm Now supports 'self-starting' interactions. o as.im New argument 'stringsAsFactors' is recognised by many methods. It enables the creation of character-string-valued images. o plot.im Axes are now prevented from extending outside the image rectangle. o plot.im New argument 'zap'. o blur New argument 'kernel'. o Smooth.im New argument 'kernel'. o quadrat.test New argument 'df.est'. o edge.Ripley Numerical stability has been improved on some platforms. Results may have changed in marginal cases (e.g. where the circle of radius r centred at X is tangent to an edge of the polygonal window). o rownames, rownames<-, colnames, colnames<- These operations now work for hyperframes. o quadrat.test Improved labelling of test when argument 'CR' is given. o plot.pppmatching This existing function now has a help file. New argument 'adjust'. o solist, is.sob, lintess Objects of class 'lintess' are now recognised as 2D spatial objects for the purposes of solist() and is.sob(). o as.linfun.lintess Functions created by as.linfun.lintess() now generate better output when the function is printed. BUG FIXES o densityVoronoi.lpp Did not correctly handle patterns containing duplicated points. Fixed. o quadrat.test Argument 'CR' was ignored when method="MonteCarlo". Fixed. o localKcross.inhom, localLcross.inhom Argument lambdaX was ignored. Fixed. o "[.linim" Factor values were erroneously converted to integers, in some cases. Fixed. o "[<-.hyperframe" Did not handle some classes of objects correctly. Fixed. o effectfun In a multitype point process model, effectfun() ignored any user-specified value of 'marks'. Fixed. o as.linim.linfun Additional arguments (other than spatial coordinates) were ignored. Fixed. o plot.solist Display output was mangled if one entry in the list was an object that would normally generate multiple panels of graphics, such as an 'lpp' or 'tess' object with multiple columns of marks, or a marked 'msr', 'leverage.ppm' or 'influence.ppm' object. Fixed. o plot.lpp Return value was mangled when x had multiple columns of marks. Fixed. o colourtable Crashed in some cases when 'breaks' was given. Fixed. o rLGCP Crashed if 'win' was not a rectangle and 'mu' was not a constant. Fixed. o intersect.tess Crashed if 'Y' was a window object and 'keepmarks=TRUE'. Fixed. o envelope.lppm Crashed if argument 'simulate' was given. Fixed. o unstack.solist Did not correctly handle objects of class 'lintess'. o unstack.solist Did not correctly handle objects of class 'tess' if they had multiple columns of marks. o plot.pppmatching Issued spurious warnings about unrecognised plot arguments. Fixed. o plot.lintess Issued spurious warnings about unrecognised plot arguments. Fixed. o shift.lpp, rotate.lpp Issued spurious warnings if argument 'origin' was given. Fixed. CHANGES IN spatstat VERSION 1.60-1 OVERVIEW o Version number incremented for administrative reasons. CHANGES IN spatstat VERSION 1.60-0 OVERVIEW o We thank Ottmar Cronie, Tilman Davies, Andrew Hardegen, Tom Lawrence, Robin Milne, Mehdi Moradi, Gopalan Nair, Tim Pollington and Suman Rakshit for contributions. o Random thinning and random labelling of spatial patterns extended to different types of pattern. o Confidence intervals for multitype K functions. o Envelopes for balanced two-stage test o Accelerated some code. o Minor bug fixes and improvements. o Package built under R 3.6.0. o Version nickname: 'Swinging Sixties' NEW FUNCTIONS o bits.envelope Global simulation envelope corresponding to bits.test, the balanced independent two-stage Monte Carlo test. o extrapolate.psp Extrapolate line segments to obtain infinite lines. o uniquemap Map duplicate points to unique representatives. Generic with methods for ppp, lpp, ppx o uniquemap.data.frame Map duplicate rows to unique representatives o localKcross, localLcross, localKdot, localLdot, localKcross.inhom, localLcross.inhom Multitype local K functions. SIGNIFICANT USER-VISIBLE CHANGES o lohboot Now works for multitype K functions Kcross, Kdot, Lcross, Ldot, Kcross.inhom, Lcross.inhom. o Kinhom, pcfinhom Leave-one-out calculation is implemented when 'lambda' is a fitted model of class 'dppm'. o Kcross.inhom, Kdot.inhom, Lcross.inhom, Ldot.inhom Leave-one-out calculation is implemented when 'lambdaX' is a fitted model of class 'dppm'. o rthin, rlabel These functions now work on point patterns of all classes (ppp, lpp, pp3, ppx) and line segment patterns (psp). o bw.abram New argument 'smoother' determines how the pilot estimate is computed. Formal arguments rearranged. o plot.im New argument 'riblab'. o rlabel New arguments 'nsim' and 'drop'. o localK, localKinhom New argument 'rmax'. o rLGCP Accelerated. o anyDuplicated.ppp Accelerated. o duplicated.ppp Accelerated, in most cases. o simulate.kppm Accelerated, for LGCP models. o predict.ppm Accelerated, for models fitted with method="VBlogi" o print.rmhmodel Output improved. BUG FIXES o plot.linim, plot.linfun Not all the entries of 'leg.args' were passed to text.default. Fixed. o densityVoronoi.ppp Did not correctly handle patterns containing duplicated points. Fixed. o markcorr The argument 'correction="none"' did not generate any results. [Spotted by Tim Pollington.] Fixed. o names<-.fv Did not adjust the plotting formula, so that a subsequent call to plot.fv would complain about missing variables. Fixed. o im.apply Crashed if 'FUN' returned factor values. Fixed. o stienenSet Crashed if the data contained duplicated points. Fixed. o predict.ppm, effectfun Crashed in some cases, with the message 'ncol(x)=nrow(v) is not TRUE'. Fixed. o parres Crashed in some cases, with the message 'logical subscript too long'. Fixed. o dclf.test, mad.test, dg.test, bits.test Crashed in some cases if the summary function values were infinite or NA. Fixed. CHANGES IN spatstat VERSION 1.59-0 OVERVIEW o We thank Lucia Cobo Sanchez, Tilman Davies, Maximilian Hesselbarth, Kassel Hingee, Mehdi Moradi, Suman Rakshit, Jan Sulavik and Luke Yates for contributions. o Extensions to adaptive intensity estimators. o 'Dartboard' tessellation using polar coordinates. o Performance improvements. o Minor improvements and bug fixes. o Version nickname: "J'ai omis les oeufs de caille" NEW FUNCTIONS o polartess Tessellation using polar coordinates. o bw.abram Variable bandwidths for adaptive smoothing, using Abramson's Rule. o densityAdaptiveKernel Calculates adaptive estimate of point process intensity using variable-bandwidth kernel estimation. o densityVoronoi, densityVoronoi.ppp Calculates adaptive estimate of point process intensity using tessellation methods. This is an extension of the old function adaptive.density. o densityVoronoi.lpp Voronoi or smoothed Voronoi estimator of intensity for point pattern on a linear network. o coords.quad New method for 'coords', to extract the coordinates of the points in a quadrature scheme. o lineartileindex Low-level function to classify points on a linear network according to which tile of a tessellation they fall inside. SIGNIFICANT USER-VISIBLE CHANGES o latest.news Now prints news documentation for the current major version, by default. New argument 'major'. o quantess The covariate Z can now be "rad" or "ang" representing polar coordinates. New argument 'origin' specifies the origin of polar coordinates. New argument 'eps' controls accuracy of calculation. o envelope The argument 'simulate' can now be a function (such as 'rlabel'). The function will be applied repeatedly to the original data Y, and should yield point patterns of the same kind. o adaptive.density This function can now perform adaptive estimation by two methods: either tessellation-based methods or variable-bandwidth kernel estimation. The calculations are performed by either 'densityVoronoi' or 'densityAdaptiveKernel'. o densityVoronoi This performs the calculations of the old function 'adaptive.density'. New argument 'fixed' specifies the subsampling. New argument 'method' allows a choice between the original algorithm and the recently described 'smoothed Voronoi' estimator. Default value of 'f' changed to 'f=1'. o pcf.ppp Now accepts correction="none". o pairorient Default edge corrections now include 'bord.modif'. o funxy Functions of class 'funxy' can now be applied to quadrature schemes. o lohboot Computation accelerated when the window is a rectangle. o nncorr, nnmean, nnvario New argument 'na.action'. o pp3 New argument 'marks'. o clusterfit New argument 'verbose'. o beachcolours, beachcolourmap Improved positioning of the yellow colour band. o linearK, linearpcf, linearKdot, linearKcross, linearpcfcross, linearpcfdot Computation accelerated for networks which are not connected. o as.linnet.psp Computation accelerated. o as.linfun.lintess Computation accelerated. o selfcut.psp Computation accelerated. The result now has an attribute "camefrom" indicating the provenance of each segment in the result. o bw.stoyan The rule has been modified so that, if the pattern is empty, it is now treated as if it contained 1 point, so that a finite bandwidth value is returned. o rebound.owin Now preserves unitnames of the objects. o rescale.owin, rescale.ppp, rescale.psp The geometrical type of the window is now preserved in all cases (previously if the window was polygonal but was equivalent to a rectangle, the rescaled window was a rectangle). o shift.im, shift.owin, shift.ppp, shift.psp More options for the argument 'origin'. o nnwhich.ppp, nnwhich.default Accelerated, in the case k > 1. o is.subset.owin Improved robustness against numerical error. o plot.im Improved behaviour when the pixel values are almost constant. o Finhom, Ginhom, Jinhom A warning is issued if bias is likely to occur because of undersmoothing. New arguments 'warn.bias' and 'savelambda'. o plot.colourmap Now handles a colourmap defined on an interval of length zero. o ewcdf Computation accelerated. New arguments 'normalise' and 'adjust'. The result does not inherit class 'ecdf' if normalise=FALSE. o spatialcdf Computation accelerated. The result does not inherit class 'ecdf' if normalise=FALSE. o effectfun New argument 'nvalues'. o parres The argument 'covariate' may be omitted if the model involves only one covariate. o alltypes If 'envelope=TRUE' and the envelope computation reaches the maximum permitted number of errors (maxnerr) in evaluating the summary function for the simulated patterns, then instead of triggering a fatal error, the envelope limits will be set to NA. o simulate.kppm Additional arguments '...' are now passed to the function that performs the simulation. BUG FIXES o spatialcdf The argument 'weights' was ignored in some cases. Fixed. o ppp Points inside the window were erroneously rejected as lying outside the window, if the window was a polygon equivalent to a rectangle with sides longer than 10^6 units. Fixed. o inside.owin All results were FALSE if the window was a polygon equivalent to a rectangle with sides longer than 10^6 units. Fixed. o sumouter Result was incorrect (all entries were zero) if 'w' was missing and 'y' was given. Fixed. o extractbranch.lpp Point pattern coordinates were sometimes erroneously set to NA. Fixed. o beachcolours, beachcolourmap The number of colours was not always equal to 'ncolours'. [Spotted by Tilman Davies.] Fixed. o is.subset.owin Sometimes gave the wrong answer for polygonal windows due to numerical rounding error. Fixed. o update.kppm Crashed if the updating information was a point pattern and the original model call did not include a formula. Spotted by Luke Yates. Fixed. o incircle, inpoint Crashed if the window was extremely thin. Fixed. o effectfun Crashed in some obscure cases. Fixed. o closepairs.pp3 Crashed if distinct=FALSE and what="all". Fixed. o update.ippm Crashed if the model was fitted using method="logi". Fixed. o plot.msr Crashed sometimes if x was multitype and multiplot=FALSE. Fixed. o anova.mppm Crashed if applied to a single model, unless the current environment was the global environment. Fixed. o lurking.mppm If 'covariate' was a list of images, the code crashed sometimes with message 'Fisher information is singular'. Fixed. o im Crashed if 'mat' was a 1-dimensional table (class 'table'). Fixed. o dirichlet Crashed if the pattern was empty or contained only 1 point. Fixed. o rjitter Crashed if the pattern contained 0 or 1 points and the argument 'radius' was not specified. Fixed. o quantess.owin Crashed if Z was a function(x,y). Fixed. o quadrat.test The p-value was NA if one of the observed counts was zero, for the Cressie-Read tests with CR not equal to 1 or -1. Fixed. o quadratcount.ppp Sometimes issued an incorrect warning that data points were outside the tessellation, when 'tess' was a tessellation represented by a pixel image. Fixed. o as.linim.linfun Factor-valued functions were converted to integer-valued images. Spotted by Suman Rakshit. Fixed. o plot.linfun Did not display factor-valued functions correctly. Spotted by Suman Rakshit. Fixed. o dclf.test, mad.test Crashed, in rare cases, when applied to an 'envelope' object. Spotted by Jan Sulavik. Fixed. o plot.spatialcdf, plot.ewcdf A horizontal line was plotted at height 1, even if the CDF was unnormalised. Fixed. o plot.tess The names of the tiles were sometimes re-ordered in the legend. Fixed. o rThomas, rMatClust, rCauchy, rVarGamma If the simulation window was not a rectangle, the attribute 'Lambda' was a numeric vector, rather than a pixel image. Fixed. CHANGES IN spatstat VERSION 1.58-2 OVERVIEW o Venn diagram tessellation o Internal bug fixes. NEW FUNCTIONS o venn.tess Venn diagram tessellation. CHANGES IN spatstat VERSION 1.58-1 OVERVIEW o Bug fixes. o Version nickname: "Compliment Sandwich" BUG FIXES o lpp Did not correctly detect some situations where the input data are invalid. Fixed. o lurking.ppp, lurking.ppm Did not correctly detect some situations where the input data are invalid. Fixed. CHANGES IN spatstat VERSION 1.58-0 OVERVIEW o We thank Andrew Bevan, Hamidreza Heydarian and Andrew P Webster for contributions. o Mark-mark scatter plot. o Standard error calculation for inverse-distance weighting o Minor improvements and extensions. o Version nickname: 'Drop Bear' NEW FUNCTIONS o markmarkscatter Mark-mark scatterplot SIGNIFICANT USER-VISIBLE CHANGES o idw Standard errors can now be calculated by setting 'se=TRUE'. o im.apply Computation accelerated, especially when NA's are absent. New arguments 'fun.handles.na' and 'check'. o kppm, dppm, clusterfit Changed precedence rule for handling the algorithm parameters in the minimum contrast algorithm. Individually-named arguments 'q,p,rmax,rmin' now take precedence over entries with the same names in the list 'ctrl'. o rotmean Improved algorithm stability o summary.kppm Prints more information about algorithm convergence. o closepairs.pp3, crosspairs.pp3 Argument 'what' can take the value "ijd" o plot.onearrow Graphical parameters, specified when the object was created, are now taken as the defaults for graphical parameters to the plot. BUG FIXES o rotmean When result="im" the resulting image did not have the same dimensions as the input. [Spotted by Hamidreza Heydarian.] Fixed. o rotmean If argument 'origin' was given, and if result="im" was specified, the resulting image was wrongly displaced. Fixed. o runifpointx Result was mangled when n=0 or n=1. Fixed. CHANGES IN spatstat VERSION 1.57-1 OVERVIEW o Bug fixes. o Version nickname: 'Cartoon Physics' BUG FIXES o multiplicity.default Some entries of the result were incorrectly set to 1. Fixed. o anova.ppm Crashed if adjust=TRUE and the models were fitted with use.gam=TRUE. Fixed. CHANGES IN spatstat VERSION 1.57-0 OVERVIEW o We thank Corey Anderson, Ryan Arellano, Hadrien Commenges, Ottmar Cronie, Tilman Davies, Maximilian Hesselbarth, Kassel Hingee, Tomas Lazauskas, Marie-Colette van Lieshout, Rasmus Waagepetersen and 'daitakahashi' for contributions. o Cronie-van Lieshout bandwidth selection. o Smoothing functions handle non-Gaussian kernels. o Infinite smoothing bandwidths permitted. o Positive confidence limits for rhohat. o Improved bivariate interpolation. o subset() method for line segment patterns. o Important bug fixes in rthin and density.ppp o Minor bug fixes and improvements. o Version nickname: 'Zombie apocalypse' NEW FUNCTIONS o bw.CvL Cronie-Van Lieshout bandwidth selection for density estimation. [Contributed by Ottmar Cronie and Marie-Colette van Lieshout.] o subset.psp Method for 'subset' for line segment patterns. SIGNIFICANT USER-VISIBLE CHANGES o densityfun.ppp, Smoothfun.ppp, Smooth.ppp These commands now handle non-Gaussian kernels. o density.ppp, relrisk.ppp, Smooth.ppp, densityfun.ppp, Smoothfun.ppp Argument 'sigma' can be infinite. o interp.im New argument 'bilinear' specifies the choice of interpolation rule. o rhohat Methods for rhohat have a new argument 'positiveCI' specifying whether confidence limits should be positive. o plot.colourmap New argument 'increasing' specifies whether the colours are displayed in order left-to-right/bottom-to-top. Changed default behaviour for discrete colour maps when vertical=FALSE. o split.ppx Argument 'f' can be a logical vector. o relrisk.ppp If se=TRUE and at="pixels", the result belongs to class 'solist'. o imcov, setcov, convolve.im The name of the unit of length is preserved. o density.ppp Slightly accelerated for non-Gaussian kernels. o bw.scott The two bandwidth values in the result now have names ('sigma.x' and 'sigma.y'). o pairdist.default Now checks whether the data are valid 2-dimensional coordinates. o pixellate.ppp New argument 'savemap' o rtemper New argument 'track'. Code runs slightly faster. o eval.im, eval.linim New argument 'warn'. o Kres, Kcom, Gcom, psstG If any of the calculated weights for the summary function are infinite or NA, they are reset to zero, with a warning, instead of a fatal error. BUG FIXES o rthin If P was close to 1, the result was sometimes an empty point pattern when it should have been identical to X. [Spotted by Maximilian Hesselbarth.] Fixed. o density.ppp Standard error calculations were incorrect when sigma was a single numeric value. The output was equal to 'sqrt(sigma)' times the correct answer. Fixed. o density.ppp Result was incorrect for non-Gaussian kernels when at="points" and leaveoneout=FALSE. Fixed. o density.ppp Did not pass additional arguments "..." to a user-supplied kernel function, in some cases. Fixed. o as.im.function, as.im.funxy If the function values were factor values and the window was not a rectangle, the result was an image with all pixel values NA. [Spotted by Corey Anderson.] Fixed. o plot.funxy If the function values were factor values and the window was not a rectangle, the plot was empty. [Spotted by Corey Anderson.] Fixed. o nnorient Crashed if the border correction did not retain any data points. [Spotted by Tomas Lazauskas.] Fixed. o linim Crashed in some cases with a message about unitnames. Fixed. o density.lpp Default value of 'dx' was sometimes incorrect. Fixed. o rMatClust, rThomas, rCauchy, rVarGamma Issued a spurious warning about bandwidth selection when saveLambda=TRUE. Fixed. o density.ppp Issued a spurious warning about bandwidth selection when 'kernel' was a user-supplied function. Fixed. o clusterfield.function Issued a spurious warning about bandwidth selection. Fixed. o relrisk.ppp Issued a spurious warning if the argument 'case' or 'control' was given, for a bivariate point pattern. Fixed. o superimpose.ppp If 'W' was a character string or function, the behaviour was not exactly as described in the help file. Fixed. o plot.psp If the marks were factor values, the colour map was displayed upside down. Fixed. o eval.fv If one of the 'fv' objects included ratio information (class 'rat') then this was erroneously retained, in some cases. Fixed. o linearKcross Crashed (with a message about a missing value of 'correction') if there were no pairs of points to count. Fixed. o envelope.lpp Crashed (randomly) when fix.n=TRUE and fix.marks=TRUE. Fixed. CHANGES IN spatstat VERSION 1.56-1 OVERVIEW o We thank Agustin Lobo for contributions. o Improvements to infrastructure. o Bug fixes. o Version nickname: "Invisible Friend" NEW FUNCTIONS o as.im.expression New method for 'as.im' for expressions. o flipxy.tess Method for 'flipxy' for tessellations. SIGNIFICANT USER-VISIBLE CHANGES o sdr This is now a generic function, with a method for class 'ppp'. o pointsOnLines The result now has an attribute named "map" which maps each point to its parent line segment. o summary.lpp Improved output. o intersect.owin Argument 'fatal' now defaults to FALSE. o quadrature schemes (class "quad" and "logiquad") Improved print and summary methods. BUG FIXES o cut.lpp Crashed if the marks were a data frame or hyperframe. Fixed. o summary.lpp, print.summary.lpp Output was garbled if the marks were a data frame or hyperframe. Fixed. o integral.linim Crashed if the function had NA values. Fixed. o Tstat Crashed if ratio=TRUE. Fixed. o intersect.owin Ignored argument 'fatal' in some cases. [Spotted by Agustin Lobo.] Fixed. o plot.tess Crashed if do.col=TRUE and 'values' was a factor. Fixed. o pcf.ppp Crashed if 'domain' was given and ratio=TRUE. Fixed. o "[<-.sparse3Darray" Crashed if 'value' was one-dimensional and the indices i, j, k specified a two-dimensional subset of x. Fixed. o plot.quad Crashed if tiles=TRUE for a quadrature scheme created by quadscheme(method="dirichlet", exact=TRUE). Fixed. o bugtable Crashed if there were no bugs! Fixed. o sparse array code An array bounds violation (segmentation fault) could occur. Fixed. o internal code Numerous internal bugs have been fixed. CHANGES IN spatstat VERSION 1.56-0 OVERVIEW o We thank Sebastian Meyer and Suman Rakshit for contributions. o Kernel estimate of intensity as a function(x,y) o Extract discrete and continuous components of a measure. o Improvements and extensions to leverage and influence code. o Plot a line segment pattern using line widths. o Find connected components of each tile in a tessellation. o Geometrical operations on 'distfun' objects. o Join vertices in a linear network. o Distance map and contact distribution for rectangular structuring element. o An infinite number of infinitesimal bugs has been detected and fixed. o Version nickname: "Bondi Tram" NEW FUNCTIONS o densityfun, densityfun.ppp Compute a kernel estimate of intensity of a point pattern and return it as a function of spatial location. o as.im.densityfun Convert function(x,y) to a pixel image. o measureDiscrete, measureContinuous Extract the discrete and continuous components of a measure. o connected.tess Find connected components of each tile in a tessellation and make a new tessellation composed of these pieces. o dffit.ppm Effect change diagnostic DFFIT for spatial point process models. o shift.distfun, rotate.distfun, reflect.distfun, flipxy.distfun, affine.distfun, scalardilate.distfun Methods for geometrical operations on 'distfun' objects. o rescale.distfun Change the unit of length in a 'distfun' object. o plot.indicfun Plot method for indicator functions created by as.function.owin. o Smooth.leverage.ppm, Smooth.influence.ppm Smooth a leverage function or an influence measure. o integral.leverage.ppm, integral.influence.ppm Compute the integral of a leverage function or an influence measure. o mean.leverage.ppm Compute the mean value of a leverage function. o rectdistmap Distance map using rectangular metric. o rectcontact Contact distribution function using rectangular structuring element. o joinVertices Join specified vertices in a linear network. SIGNIFICANT USER-VISIBLE CHANGES o plot.psp Segments can be plotted with widths proportional to their mark values. New argument 'style'. o msr Infinite and NA values are now detected (if check=TRUE) and are reset to zero, with a warning. o leverage.ppm, influence.ppm, dfbetas.ppm Faster computation in some cases. o as.im.nnfun, as.im.smoothfun New argument 'approx' chooses between a fast, approximate algorithm and a slow, exact algorithm. o cdf.test More jittering is applied when jitter=TRUE. Warnings about tied values should not occur any more. o plot.im Improved behaviour when all pixel values are NA. o plot.tess Now generates a separate plot panel for each column of marks, if do.col=TRUE. New argument 'multiplot'. o pcfinhom Now handles correction='good' o solist New argument '.NameBase' o runifpointOnLines, rpoisppOnLines New argument 'drop' o plot.studpermutest This existing function now has a help file. o bdist.points Accelerated, for polygonal windows (thanks to Sebastian Meyer). o linnet When argument 'edges' is specified, the code now checks whether any edges are duplicated. BUG FIXES o kernel.moment Result was incorrect for kernel="cosine" and kernel="optcosine". Fixed. o "[.msr" Format was mangled if the subset contained exactly one quadrature point. Fixed. o tess If a list of tiles was given, and the tiles were pixel images or masks, their pixel resolutions were ignored, and reset to the default 128x128. Fixed. o plot.linim Ignored argument 'legend' when style="colour". [Spotted by Suman Rakshit.] Fixed. o leverage.ppm, influence.ppm, dfbetas.ppm Crashed sometimes if the model was fitted by method="logi". Fixed. o Smooth.ppp Crashed with some combinations of the arguments varcov and weights when X had several columns of marks. Fixed. o plot.tess Crashed sometimes when do.col=TRUE and values=NULL. Fixed. o dilation.ppp Crashed if polygonal=FALSE. o plot.tess Ignored the marks of a tessellation defined by a pixel image. Fixed. o predict.ppm Crashed when computing a confidence interval or prediction interval if 'window' was a tessellation. Fixed. o tiles names(tiles(x)) was not always identical to tilenames(x). Fixed. o model.images.lppm Crashed in many cases. Fixed. o model.images.lppm Names or dimnames were missing in some cases. Fixed. o nncross.ppp Result had the wrong format if npoints(X) = 0 and length(what) = 1. (Spotted by Sebastian Meyer). Fixed. o plot.colourmap Crashed if the colourmap was defined on an interval of dates or times. Fixed. o StraussHard, leverage.ppm, influence.ppm, dfbetas.ppm Leverage and influence calculations generated an error when applied to models fitted with the StraussHard interaction. Fixed. o "[.ppp" Crashed if there were several columns of marks, some of which were factors, and drop=TRUE. Fixed. CHANGES IN spatstat VERSION 1.55-1 OVERVIEW o We thank Jens Astrom, Ines Moncada, Mehdi Moradi and Nicholas Read for contributions. o More support for tessellations. o Improved support for linear networks. o Fixed longstanding bug in leverage and influence diagnostics. o Minor improvements and bug fixes. o Version nickname: "Gamble Responsibly" NEW FUNCTIONS o summary.ssf Summary method for a spatially sampled function (class 'ssf'). o unstack.tess Given a tessellation with multiple columns of marks, take the columns one at a time, and return a list of tessellations, each carrying only one of the original columns of marks. SIGNIFICANT USER-VISIBLE CHANGES o plot.tess This plot method can now fill each tile with a different colour. New arguments 'do.col', 'values', 'col' and 'ribargs'. Old argument 'col' has been renamed 'border' for consistency. o integral.linim, integral.linfun Argument 'domain' can now be a tessellation. o integral.ssf Argument 'domain' can now be a tessellation. o as.owin.default Now accepts a structure with entries named 'xmin,ymin,xmax,ymax' in any order. This handles objects of class 'bbox' in the 'sf' package. o as.owin.default Now detects objects of class "SpatialPolygons" and issues a more helpful error message. o pseudoR2.ppm, pseudoR2.lppm The null model now includes any offset terms, by default. [Suggested by Jens Astrom.] New argument 'keepoffset'. o closepairs.ppp New argument 'periodic' o fitted.ppm New argument 'ignore.hardcore'. o predict.ppm New argument 'ignore.hardcore'. o leverage.ppm, influence.ppm, dfbetas.ppm Computation has been vastly accelerated for models with Geyer interaction fitted using isotropic or translation edge corrections. o leverage.ppm, influence.ppm, dfbetas.ppm Virtually all models and edge corrections are now supported, using a "brute force" algorithm. This can be slow in some cases. o cdf.test Monte Carlo test runs faster. o summary.distfun, summary.funxy Pixel resolution can now be controlled. o persp.funxy Improved z-axis label. o plot.ppp Improved placement of symbol legend when argument 'symap' is given. o plot.msr Changed the default rule for bandwidth for smoothing the density. BUG FIXES o nnmark, as.im.ssf if marks(X) was a matrix rather than a data frame, the results were completely incorrect (and had completely wrong format). Fixed. o predict.mppm If the model included random effects, and if the library 'MASS' was not loaded, the predictions were on the log scale (i.e. they were logarithms of the correct values). [Spotted by Nicholas Read.] Fixed. o leverage.ppm, influence.ppm, dfbetas.ppm Calculations were slightly incorrect for models with a hard core. Fixed. o leverage.ppm The mean leverage value (shown as a contour line in plot.leverage.ppm) was slightly incorrect for Gibbs models. Fixed. o Ops.msr If the input data contained an auxiliary pixel image of the density component of the measure (attribute "smoothdensity") this image was not updated; it was copied to the output unchanged. Plots of the resulting measure were incorrect, but calculations with the measure were correct. Fixed. o integral.msr If the result was a matrix, it was the transpose of the correct answer. Fixed. o "[.linim" The result sometimes had the wrong class. Fixed. o "[.linnet" In calculating L[W] where W is a window, the code ignored segments of L that crossed W without having a vertex in W. Fixed. o nnmap Crashed if W = NULL. Fixed. o density.lpp, nncross.lpp Crashed sometimes with an obscure message about "z$which". [Spotted by Ines Moncada.] Fixed. o as.im.distfun Crashed, for the distfun of a point pattern, if approx=FALSE. Fixed. o as.solist Crashed when x was a 'layered' object. Fixed. o linnet Crashed in some trivial cases where there were no points or lines. Fixed. CHANGES IN spatstat VERSION 1.55-0 OVERVIEW o We thank 'AdriMaz' and Nicholas Read for contributions. o Lurking variable plot for models fitted to several point patterns. o Improvements to code for class 'mppm'. o Improvements to leverage and influence diagnostics. o Improved summary information for funxy and distfun objects. o Bug fixes and improvements. o Removed old warnings and deprecated functions. o Nickname: "Stunned Mullet" NEW FUNCTIONS o contour.leverage.ppm Method for 'contour' for leverage functions of class 'leverage.ppm' o lurking New generic function for lurking variable plots. o lurking.ppp, lurking.ppm These are equivalent to the original function 'lurking'. They are now methods for the new generic 'lurking'. o lurking.mppm New method for class 'mppm' Lurking variable plot for models fitted to several point patterns. o print.lurk Prints information about the object returned by the function 'lurking' representing a lurking variable plot. o model.matrix.mppm Method for 'model.matrix' for models of class 'mppm'. o test.crossing.psp, test.selfcrossing.psp Previously undocumented functions for testing whether segments cross. SIGNIFICANT USER-VISIBLE CHANGES o predict.ppm Now recognises the arguments 'dimyx' and 'eps' for specifying the resolution of the grid of prediction points. o leverage.ppm, dfbetas.ppm Increased the default resolution of the pixel images. Spatial resolution can now be controlled by the arguments 'dimyx', 'eps'. o ppmInfluence The result now belongs to class 'ppmInfluence', for which there are methods for 'leverage', 'influence', 'dfbetas' which extract the desired component. o plot.leverage.ppm New argument 'what'. o persp.leverage.ppm New arguments 'zlab' and 'what'. o as.im.leverage.ppm New argument 'what'. o summary.funxy, summary.distfun Printed information now includes a summary of the function values. o lurking.ppm Accelerated. o "[.psp" Accelerated. o clf.test, conspire, bounding.box, ksmooth.ppp, mpl, superimposePSP, eval.hyper, smooth.fv, smooth.ppp, smooth.msr, rtoro, plot.kstest These deprecated functions have now been removed. o bermantest This deprecated function has now been removed. Use berman.test instead. o kstest This deprecated function has now been removed. Use cdf.test instead. o plot.ppp A very old warning, about the interpretation of the mark scale as the circle diameter, is no longer printed. BUG FIXES o nnmap, nnmark Values were incorrect if the resulting pixel image had unequal numbers of rows and columns. Fixed. o vcov.mppm Format was incorrect (rows/columns were omitted) in some cases. Fixed. o model.matrix.ppm, model.frame.ppm Values were sometimes incorrect when applied to the result of subfits(). To be precise, if 'fit' was an mppm object fitted to a hyperframe that included 'design covariates' (covariates that take a constant value in each row of the hyperframe), and if 'futs <- subfits(fit)', then model.matrix(futs[[i]]) gave incorrect values in the columns corresponding to the design covariates. Fixed. o model.matrix.ppm The attribute 'assign' was omitted, in some cases. Fixed. o simulate.dppm, simulate.detpointprocfamily In dimensions higher than 2, the result was shifted so that it was centred at the origin. Fixed. o Smooth.ppp Crashed if geometric=TRUE and there were several columns of marks. Fixed. o simulate.dppm, simulate.detpointprocfamily Crashed if nsim > 1 and the spatial dimension was not equal to 2. Fixed. o plot.leverage.ppm Contour line was annotated, which was not intended. Fixed. o leverage.ppm The leverage function was oversmoothed, when the model was fitted with method="logi". Fixed. CHANGES IN spatstat VERSION 1.54-0 OVERVIEW o We thank Rochelle Constantine, Lily Kozmian-Ledward, Ian Renner and Leigh Torres for contributions. o New dataset 'cetaceans'. o Gamma correction for colour maps and image plots. o Class 'units' has been renamed 'unitname' to avoid package collision. o Bug fix in leverage code o Tighter bounding box for psp, lpp, linnet objects. o Improved layout in plot.solist o Tools to increase colour saturation. o Connected components of a 3D point pattern. o Accelerated computations on linear networks. o Accelerated simulation of determinantal point processes. o Improved printing of 3D point patterns. o Minor corrections to handling of unitnames. o Nickname: 'Vacuous Mission Statement' NEW DATASETS o cetaceans Nine replicates of a marine survey in New Zealand, consisting of recorded sightings of dolphins, whales and other species. Generously contributed by Lily Kozmian-Ledward, Rochelle Constantine and Leigh Torres. NEW FUNCTIONS o to.saturated Convert a colour value to the corresponding fully-saturated colour. o intensity.psp Compute the average total length of segments per unit area. o boundingbox.psp Bounding box for line segment patterns. This produces a tighter bounding box than the previous default behaviour. o boundingbox.lpp Bounding box for point patterns on a linear network. This produces a tighter bounding box than the previous default behaviour. o boundingbox.linnet Bounding box for a linear network. This produces a tighter bounding box than the previous default behaviour. o "Frame<-.default" New default method for assigning bounding frame to a spatial object. o connected.pp3 Connected components of a 3D point pattern. o colouroutputs, "colouroutputs<-" Extract or assign colour values in a colour map. (Documented a previously-existing function) SIGNIFICANT USER-VISIBLE CHANGES o plot.im New argument 'gamma' supports gamma correction of colour maps. New argument 'ncolours' specifies the default number of colours. o colourmap, lut New argument 'gamma' supports gamma correction of colour maps. o plot.solist, plot.anylist New argument 'panel.vpad' controls vertical space for panel title when equal.scales=FALSE. o class 'units' The class 'units' has been renamed 'unitname' to avoid a clash with other packages. o unitname The generic function 'unitname' now returns an object of class 'unitname'. o print.units, summary.units, print.summary.units, as.character.units, compatible.units These methods are now called print.unitname, summary.unitname, print.summary.unitname, as.character.unitname and compatible.unitname. o as.units This function has been renamed 'as.unitname' and now returns an object of class 'unitname'. o rescale.units This method has been renamed 'rescale.unitname' and now returns an object of class 'unitname'. o profilepl New argument 'fast' controls the use of shorcuts. o reload.or.compute New argument 'force'. o pixellate.ppp, pixellate.owin, pixellate.psp New argument 'DivideByPixelArea'. o density.psp New argument 'at' determines locations where the density is evaluated. o as.solist as.solist(x) always returns an object of class 'solist', removing any additional classes. o lineardirichlet Accelerated. o integral.linim Accelerated. o "[.ppp", "[.lpp", "[.psp" In the expression X[W] where W is a window, if X has a unitname but W does not, the result now inherits the unitname of X. o distfun.ppp New argument 'undef'. o print.pp3 More informative output when x is marked. BUG FIXES o leverage.ppm, influence.ppm, dfbetas.ppm Calculations were slightly incorrect for models fitted using the border correction. Fixed. o integral.linim Gave incorrect value in some extreme cases (where many network segments were shorter than one pixel width). Fixed. o update.kppm Did not function correctly when several additional arguments were given. Fixed. o plot.solist Panel titles were cut off, when equal.scales=FALSE (the default). Fixed. o intersection.owin, union.owin, setminus.owin The result sometimes did not inherit the correct 'unitname'. Fixed. CHANGES IN spatstat VERSION 1.53-2 OVERVIEW o We thank Christophe Biscio and Rasmus Waagepetersen for contributions. o Correction to 'lohboot' o Improvements to ppm and update.ppm o Bug fixes and minor improvements. o Nickname: "Quantum Entanglement" NEW FUNCTIONS o fitin.profilepl Extract the fitted interaction from a model fitted by profile likelihood. SIGNIFICANT USER-VISIBLE CHANGES o lohboot Algorithm has been corrected and extended thanks to Christophe Biscio and Rasmus Waagepetersen. New arguments 'block', 'basicboot', 'Vcorrection'. o ppm.ppp, ppm.quad New argument 'clipwin' o update.ppm For the case 'update(model, X)' where X is a point pattern, if the window of X is different from the original window, then the model is re-fitted from scratch (i.e. use.internal=FALSE). o plot.leverage.ppm A contour line showing the average value of leverage is now drawn on the colour ribbon, as well as on the main image. New argument 'args.contour'. BUG FIXES o lohboot Implementation was completely incorrect. [Spotted and fixed by Christophe Biscio and Rasmus Waagepetersen.] Fixed. o update.ppm Did not always work correctly with formulae that included 'polynom()' terms. Fixed. CHANGES IN spatstat VERSION 1.53-1 OVERVIEW o We thank Suman Rakshit for contributions. o Bug fix in plot.linim o Nickname: "Drongo" BUG FIXES o plot.linim Colour map was mangled if log=TRUE. Fixed. CHANGES IN spatstat VERSION 1.53-0 OVERVIEW o We thank Tilman Davies and Mehdi Moradi for contributions. o Numerous bug fixes for linear networks code. o spatstat now requires the sub-package 'spatstat.data' which contains the datasets. o Minor enhancements and bug fixes. o Nickname: "Tinfoil Hat" NEW FUNCTIONS o "[<-.linim" Subset assignment method for pixel images on a linear network. o nnfromvertex Given a point pattern on a linear network, find the nearest data point from each vertex of the network. o tile.lengths Calculate the length of each tile in a tessellation on a network. o text.ppp, text.lpp, text.psp Methods for 'text' for spatial patterns. SIGNIFICANT USER-VISIBLE CHANGES o datasets All datasets installed in 'spatstat' have now been moved into the sub-package 'spatstat.data'. This should not have any effect on normal use. The 'spatstat.data' package is automatically loaded when spatstat is loaded, and the datasets are lazy-loaded so that they are available in the usual way. To list all datasets you now need to type 'data(package="spatstat.data")' o nbfires This dataset now includes information about the different land and sea borders of New Brunswick. o rhohat New argument 'subset' allows computation for a subset of the data. o predict.lppm Argument 'locations' can now be an 'lpp' object. o ewcdf Argument 'weights' can now be NULL. o plot.msr New arguments 'equal.markscale' and 'equal.ribbon'. o plot.im The number of tick marks in the colour ribbon can now be controlled using the argument 'nint' in 'ribargs'. o plot.symbolmap New argument 'nsymbols' controls the number of symbols plotted. o square Handles a common error in the format of the arguments. o [.linim More robust against artefacts. o [.linnet More robust against artefacts when the subset index is a pixel mask. o linim The image Z is now automatically restricted to the network. New argument 'restrict'. o plot.linim When style="width", negative values are plotted in red (by default). New argument 'negative.args' controls this. o plot.linim New argument 'zlim' specifies the range of values to be mapped. o Summary.linim Recognises the argument 'finite' so that range(x, finite=TRUE) works for a linim object x. o identify.psp Improved placement of labels. Arguments can be passed to text.default to control the plotting of labels. o as.polygonal Accelerated when w is a pixel mask. o density.lpp Accelerated in the default case. o Kinhom Stops gracefully if 'lambda' contains any zero values. o print.linim Prints more information. BUG FIXES o with.msr The value of 'atommass' was incorrect, due to a coding error. Fixed. o [.linim Internal data was sometimes corrupted. Fixed. o as.linim The result had incorrect internal format when Window(X) was a mask and one of the arguments 'eps', 'dimyx', 'xy' was present. Fixed. o as.im.im If W was a rectangle or polygonal window, the pixel resolution of the result was determined by the spatstat defaults, rather than being determined by the image argument X. This was contrary to the rule advertised in help(as.im). Fixed. o density.lpp In the 'slow' case (kernel not Gaussian, or continuous=FALSE), occasionally a pixel could incorrectly be assigned the value 1. [Spotted by Mehdi Moradi.] Fixed. o "[.solist" Ignored the "..." arguments in some cases. Fixed. o density.lpp Ignored the resolution arguments 'eps', 'dimyx' in the default case. Fixed. o plot.msr Plotted the panel titles on top of each other, if how="contour". Fixed. o contour.im Plotted the title text at the wrong place when add=TRUE and show.all=TRUE. Fixed. o predict.lppm Crashed if 'locations' was an 'lpp' object. Fixed. o plot.ppp Crashed if the window had height 0 and width 0 and the pattern had several columns of marks. Fixed. o plot.solist Crashed if all panels had windows of height 0 and width 0. Fixed. o linearK, linearKinhom, linearpcf, linearpcfinhom Crashed if the linear network was disconnected and one component of the network contained fewer than 2 points. Fixed. o integral.linim Crashed in some cases. Fixed. o "[.linim" Crashed in some cases. Fixed. CHANGES IN spatstat VERSION 1.52-1 OVERVIEW o Bug fix to satisfy the development version of R. o Nickname: "Apophenia" SIGNIFICANT USER-VISIBLE CHANGES o Ops.imlist Improved the 'names' of the result. BUG FIXES o bw.smoothppp Crashes in R-devel. Fixed. CHANGES IN spatstat VERSION 1.52-0 OVERVIEW o We thank Nicholas Read, Abdollah Jalilian, Suman Rakshit, Dominic Schuhmacher and Rasmus Waagepetersen for contributions. o Important bug fixes. o Now handles disconnected linear networks. o Effect function is now available for all types of fitted model. o A model can be fitted or re-fitted to a sub-region of data. o More support for measures. o 'Pool' operations improved. o Geometric-mean smoothing. o Changed algorithm defaults in ippm. o Version nickname: "Rudimentary Lathe" NEW FUNCTIONS o as.data.frame.envelope Extract function data from an envelope object, including the functions for the simulated data ('simfuns') if they were saved. o is.connected, is.connected.default, is.connected.linnet Determines whether a spatial object consists of one topologically connected piece, or several pieces. o is.connected.ppp Determines whether a point pattern is connected after all pairs of points closer than distance R are joined. o hist.funxy Histogram of values of a spatial function. o model.matrix.ippm Method for 'model.matrix' which allows computation of regular and irregular score components. o harmonise.msr Convert several measures (objects of class 'msr') to a common quadrature scheme. SIGNIFICANT USER-VISIBLE CHANGES o Smooth.ppp New argument 'geometric' supports geometric-mean smoothing. o Kinhom New argument 'ratio'. o linearKinhom, linearpcfinhom Changed default behaviour when 'lambda' is a fitted model. New arguments 'update' and 'leaveoneout'. o linearK, linearKinhom, linearpcf, linearpcfinhom, compilepcf Ratio calculations are now supported. New argument 'ratio'. o effectfun Now works for 'ppm', 'kppm', 'lppm', 'dppm', 'rppm' and 'profilepl' objects. o ppm, kppm The argument 'subset' can now be a window (class 'owin') specifying the subset of data to which the model should be fitted. o fitted.lppm New argument 'leaveoneout' allows leave-one-out computation of fitted value. o pool.rat New arguments 'relabel' and 'variance'. o density.lpp The return value is a pixel image of class 'linim' in all cases. o plot.linim, plot.linfun A scale bar is now plotted when style="width". New argument 'legend'. o ippm Default values for the parameters of the optimisation algorithm (nlm.args) have changed. o ippm The internal format of the result has been extended slightly. o bind.fv New argument 'clip'. o as.im.distfun New argument 'approx' specifies the choice of algorithm. o "[.psp" New argument 'fragments' specifies whether to keep fragments of line segments that are cut by the new window, or only to retain segments that lie entirely inside the window. o predict.rhohat New argument 'what' determines which value should be calculated: the function estimate, the upper/lower confidence limits, or the standard error. o pool.fv New arguments 'relabel' and 'variance' o pool.rat New argument 'weights'. o plot.msr New argument 'massthresh'. o Ops.msr Calculations like A+B can now be performed even when the measures A and B are not defined on the same quadrature scheme. o density.ppp New argument 'verbose'. o bw.pcf New argument 'verbose'. o hist.im New argument 'xname'. o [.leverage.ppm New argument 'update'. o [.layered Additional arguments '...' are now passed to other methods. o logLik.ppm The warning about pseudolikelihood ('log likelihood not available') is given only once, and is not repeated in subsequent calls, within a spatstat session. o kppm Refuses to fit a log-Gaussian Cox model with anisotropic covariance. o plot.linim, plot.linfun The return value has a different format. Arguments have been renamed and reorganised. o density.lpp New argument 'old'. o ippm Accelerated. o Smooth.ppp Now exits gracefully if any mark values are NA, NaN or Inf. o timeTaken Now exits gracefully if there is no timing information. o nbfires The unit of length for the coordinates is now specified in this dataset. BUG FIXES o bw.pcf Results were totally incorrect due to a typo. [Spotted by Abdollah Jalilian and Rasmus Waagepetersen.] Fixed. o predict.rho2hat Results were incorrect for a rho2hat object computed from a point pattern. Fixed. o density.ppp If the smoothing bandwidth was very small (e.g.\ smaller than pixel width), results were inaccurate if the default resolution was used, and completely wrong if another resolution was specified. [Spotted by Dominic Schuhmacher.] Fixed. o linearK, linearKinhom, linearpcf, linearpcfinhom, linearKcross, linearKdot, linearpcfcross, linearpcfdot, linearKcross.inhom, linearKdot.inhom, linearpcfcross.inhom, linearpcfdot.inhom Crashed if the network was disconnected. Fixed. o crossdist.lpp Crashed if the network was disconnected. Fixed. o countends Crashed if the network was disconnected. Fixed. o model.images.ppm Crashed for models fitted using 'covfunargs'. Fixed. o model.matrix.ppm Crashed for models fitted using 'covfunargs', if argument 'Q' was given. Fixed. o polynom Expansion of some polynomials caused an error message about 'invalid model formula'. Fixed. o plot.ppp The argument 'type="n"' did not suppress plotting of the legend, for marked point patterns. Fixed. o plot.psp Ignored 'show.all' when 'add=TRUE'. Fixed. o intensity.ppm Result had incorrect 'names' attribute in some cases. Fixed. o marks<-.ppx The assignment marks(X) <- a, where 'a' is a single atomic value, caused an error if 'X' contained zero points. Fixed o model.depends Crashed when applied to regression models fitted by 'gam', or point process models fitted by 'ppm' with 'use.gam=TRUE'. Fixed. o pool.fv Crashed sometimes, if the arguments did not have the same set of column names. Fixed. o pool.rat Crashed with an error message from 'fmt' if there were more than 20 objects to be pooled. Fixed. o linearK The 'theo' column was missing if npoints(X) < 2 and correction="Ang". Fixed. o model.matrix.ppm Result was malformed if the model was fitted with 'use.gam=TRUE'. Fixed. o effectfun Crashed if 'covname' was omitted, if the model was fitted with 'use.gam=TRUE'. Fixed. o nncross.lpp Result had incorrect format if Y was empty, in some cases. Fixed. o linearKinhom Plot label for y axis was incorrect. [Spotted by Suman Rakshit.] Fixed. o plot.solist If the entries were 'linim' objects, they were plotted using image() so arguments like 'style="w"' were ignored. Fixed. o as.ppp.data.frame Crashed if X was an object of class 'tbl_df' from the dplyr package. Fixed. o plot.lpp Crashed if there were multiple columns of marks. Fixed. CHANGES IN spatstat VERSION 1.51-0 OVERVIEW o We thank Greg McSwiggan, Mehdi Moradi and Tammy L Silva for contributions. o New fast algorithm for kernel smoothing on a linear network. o Leverage and influence diagnostics extended to Poisson/Gibbs models fitted by logistic composite likelihood. o Two-stage Monte Carlo test. o Dirichlet/Voronoi tessellation on a linear network. o Thinning of point patterns on a linear network. o More support for functions and tessellations on a linear network. o Improvements and bug fixes. o Version nickname: 'Poetic Licence' NEW FUNCTIONS o bits.test: Balanced Independent Two-Stage Monte Carlo test, an improvement on the Dao-Genton test. o lineardirichlet Computes the Dirichlet-Voronoi tessellation associated with a point pattern on a linear network. o domain.lintess, domain.linfun Extract the linear network from a 'lintess' or 'linfun' object. o summary.lintess Summary of a tessellation on a linear network. o clicklpp Interactively add points on a linear network. o envelopeArray Generate an array of envelopes using a function that returns 'fasp' objects. SIGNIFICANT USER-VISIBLE CHANGES o density.lpp New fast algorithm (up to 1000 times faster) for the default case where kernel="gaussian" and continuous=TRUE. Generously contributed by Greg McSwiggan. o leverage.ppm, influence.ppm, dfbetas.ppm These methods now work for models that were fitted by logistic composite likelihood (method='logi'). o rthin Argument X can now be a point pattern on a linear network (class 'lpp'). o fitted.ppm New option: type = "link" o update.kppm New argument 'evaluate'. o integral.linfun New argument 'delta' controls step length of approximation to integral. o as.linim.default New argument 'delta' controls spacing of sample points in internal data. o as.linfun.lintess New argument 'values' specifies the function value for each tile. New argument 'navalue'. BUG FIXES o leverage.ppm, influence.ppm, dfbetas.ppm Results for Gibbs models were incorrect due to a mathematical error. (Results for Poisson models were correct). Fixed. o leverage.ppm, influence.ppm, dfbetas.ppm, ppmInfluence Calculations were incorrect for a Geyer model fitted using an edge correction other than "border" or "none". Fixed. o step, kppm, update.kppm 'step' did not work for kppm objects in some cases due to a scoping problem in update.kppm. Fixed. o improve.kppm Crashed if the window was not a rectangle. Fixed. o pcf.ppp, pcfinhom Crashed if kernel="epa" rather than "epanechnikov". Fixed. o alltypes Crashed if envelope=TRUE and reuse=FALSE. Fixed. o pairdist.lpp, nndist.lpp, nnwhich.lpp, nncross.lpp Crashed if the network was disconnected. Fixed. o as.im.linim, as.linim.linim Additional arguments such as 'eps' and 'dimyx' were ignored. Fixed. o as.im.default Arguments 'eps and 'xy' were ignored if X was a single numeric value. Fixed. o 'timed' class Printing of these objects did not work in some locales. Fixed. o runifpoint Ignored 'drop' argument if the window was a rectangle. Fixed. CHANGES IN spatstat VERSION 1.50-0 OVERVIEW o We thank Richard Cotton, Adrian Heyner, Abdollah Jalilian, Dominic Schuhmacher and Rasmus Waagepetersen for contributions. o spatstat now 'Imports' the package 'spatstat.utils'. o Bandwidth selection for pair correlation function. o Improvements and bug fixes. o Version nickname: 'Bunyip Aristocracy' NEW PACKAGE STRUCTURE o spatstat is being split into several sub-packages, to satisfy the requirements of CRAN. This should not affect the user: existing code will continue to work in the same way. Currently there are two sub-packages, called 'spatstat.utils' and 'spatstat'. Typing 'library(spatstat)' will load the familiar 'spatstat' package which can be used as before, and will silently import the 'spatstat.utils' package. The 'spatstat.utils' package contains utility functions that were originally written for 'spatstat': they were undocumented internal functions in 'spatstat', but are now documented and accessible in a separate package because they may be useful for other purposes. To access these functions, you need to type 'library(spatstat.utils)'. NEW FUNCTIONS o bw.pcf Bandwidth selection for pair correlation function. Original code contributed by Abdollah Jalilian and Rasmus Waagepetersen. o grow.box3 Expand a three-dimensional box. SIGNIFICANT USER-VISIBLE CHANGES o as.owin Now refuses to convert a 'box3' to a two-dimensional window. o pixellate.ppp If the pattern is empty, the result is an integer-valued image (by default) for consistency with the results for non-empty patterns. o ppp If the coordinate vectors x and y contain NA, NaN or infinite values, these points are deleted with a warning, instead of causing a fatal error. o ppm Argument 'interaction' can now be a function that makes an interaction, such as Poisson, Hardcore, MultiHard. o pcf, pcfinhom New argument 'close' for advanced use. o runifpointx, rpoisppx New argument 'drop'. o shapley, ponderosa In these installed datasets, the functions shapley.extra$plotit and ponderosa.extra$plotit have changed slightly (to accommodate the dependence on the package spatstat.utils). o kppm Improved printed output. BUG FIXES o rMaternI, rMaternII If 'win' was a three-dimensional box of class 'box3', the result was a two-dimensional point pattern. [Spotted by Adrian Heyner.] Fixed. o rmhmodel.ppm, simulate.ppm Crashed when applied to a fitted Lennard-Jones model. [Spotted by Dominic Schuhmacher.] Fixed. o leverage.ppm, influence.ppm, dfbetas.ppm Crashed when applied to some hard-core models. Fixed. o "[.ppx" The format of the result was slightly malformed if exactly one point was selected. Fixed. o unmark.lpp, marks<-.lpp The result had class c("lpp", "lpp", "ppx") instead of c("lpp", "ppx"). Fixed. CHANGES IN spatstat VERSION 1.49-0 OVERVIEW o We thank Tilman Davies, Kassel Hingee, Abdollah Jalilian, Brian Ripley and Dominic Schuhmacher for contributions. o spatstat now 'Suggests' the package 'fftwtools'. o Operations on signed measures. o Operations on lists of pixel images. o Improved pixellation of point patterns. o Stieltjes integral extended. o Subset operators extended. o Greatly accelerated 'rmh' when using 'nsave' o Some computations accelerated. o Size of namespace reduced, for efficiency. o Bug fixes. o Version nickname: 'So-Called Software' NEW DEPENDENCIES o fftwtools spatstat now 'Suggests' the package 'fftwtools'. This package provides a very fast implementation of the Fast Fourier Transform, leading to much faster computation in the spatstat functions 'density.ppp', 'relrisk.ppp', 'convolve.im', 'blur', 'scan.test' and many other functions. The 'fftwtools' package requires the external software library 'fftw'. We strongly recommend installing this library if possible. NEW FUNCTIONS o hexagon, regularpolygon Create regular polygons. o Ops.msr Arithmetic operations for measures. o Math.imlist, Ops.imlist, Summary.imlist, Complex.imlist Arithmetic operations for lists of pixel images. o measurePositive, measureNegative, measureVariation, totalVariation Positive and negative parts of a measure, and variation of a measure. o as.function.owin Convert a spatial window to a function (x,y), the indicator function. o as.function.ssf Convert an object of class 'ssf' to a function(x,y) o as.function.leverage.ppm Convert an object of class 'leverage.ppm' to a function(x,y) SIGNIFICANT USER-VISIBLE CHANGES o stieltjes Argument 'M' can be a stepfun object (such as an empirical CDF). o quantile.ewcdf The function is now normalised to the range [0,1] before the quantiles are computed. This can be suppressed by setting normalise=FALSE. o pixellate.ppp New arguments 'fractional' and 'preserve' for more accurate discretisation. o "[.layered" Subset index i can now be an 'owin' object. o "[.solist" Subset index i can now be an 'owin' object. o plot.solist, plot.imlist, plot.anylist Result is now an (invisible) list containing the result from executing the plot of each panel. o ppp New argument 'checkdup'. o Summary.im Argument 'na.rm' is no longer ignored. o cdf.test The methods for classes ppp, ppm, lpp, lppm, slrm have a new argument 'interpolate'. o as.solist The argument x can now be a spatial object; as.solist(cells) is the same as solist(cells). o bw.diggle, bw.ppl, bw.relrisk, bw.smoothppp These functions now extract and store the name of the unit of length from the point pattern dataset. When the bandwidth selection criterion is plotted, the name of the unit of length is shown on the x-axis. o polynom This function now has a help file. o rmhcontrol New parameter 'pstage' determines when to generate random proposal points. o rmh Accelerated, in the case where multiple patterns are saved using 'nsave'. o bdist.pixels Accelerated for polygonal windows. New argument 'method'. o spatstat namespace The namespace of the spatstat package has been shortened (by internally registering the native routines) which should make the package run faster. o sum.im, range.im, max.im, min.im These functions have been removed, as they are now subsumed in Summary.im. BUG FIXES o plot.msr If one of 'nrows' or 'ncols' was specified, but not both, an obscure error occurred. Fixed. o plot.solist, plot.imlist, plot.anylist Crashed if 'nrows' and 'ncols' were given values implying that some rows or columns would not contain any plots. Fixed. o as.ppp.lpp Crashed if there was more than one column of marks. Fixed. o has.close.pp3 Results were incorrect, or a crash occurred, when argument 'Y' was given. Fixed. o rmpoispp If 'lambda' was a list of images, 'names(lambda)' was ignored, rather than serving as the default value of 'types'. Fixed. o bugfixes Output was garbled, in rare cases. Fixed. o kppm Result was malformed when clusters="VarGamma" and method="clik2". Spotted by Abdollah Jalilian. Fixed. o QQversion Plotting labels were malformed. Fixed. CHANGES IN spatstat VERSION 1.48-0 OVERVIEW o We thank Kim Colyvas, Yongtao Guan, Gopalan Nair, Nader Najari, Suman Rakshit, Ian Renner and Hangsheng Wang for contributions. o Sufficient Dimension Reduction for point processes. o Alternating Gibbs Sampler for point process simulation. o Intensity approximation for area-interaction and Geyer models. o New class of spatially sampled functions. o ROC and AUC extended to other types of point patterns and models. o More support for linear networks. o More support for infinite straight lines. o Simulation of 'rhohat' objects. o Kernel smoothing accelerated. o Methods for 'head' and 'tail' for spatial patterns. o More low-level functionality. o Improvements and bug fixes. o spatstat now has more than 1000 help files. o Nickname: 'Model Prisoner' NEW CLASSES o ssf Class of spatially sampled functions. NEW FUNCTIONS o sdr, dimhat Sufficient Dimension Reduction for point processes. Matlab code contributed by Yongtao Guan, translated by Suman Rakshit. o rags, ragsAreaInter, ragsMultiHard Alternating Gibbs Sampler for point processes. o psib Sibling probability (index of clustering strength in a cluster process). o bugfixes List all bug fixes in recent versions of a package. o roc.kppm, roc.lppm, roc.lpp Methods for 'roc' (receiver operating characteristic curve) for fitted models of class 'kppm' and 'lppm' and point patterns of class 'lpp' o auc.kppm, auc.lppm, auc.lpp Methods for 'auc' (area under the ROC curve) for fitted models of class 'kppm' and 'lppm' and point patterns of class 'lpp' o rlpp Random points on a linear network with a specified probability density. o cut.lpp Method for 'cut' for point patterns on a linear network. o crossing.linnet Find crossing points between a linear network and another set of lines. o ssf Create a spatially sampled function o print.ssf, plot.ssf, contour.ssf, image.ssf Display a spatially sampled function o as.im.ssf, as.ppp.ssf, marks.ssf, marks<-.ssf, unmark.ssf, [.ssf, with.ssf Manipulate data in a spatially sampled function o Smooth.ssf Smooth a spatially sampled function o integral.ssf Approximate integral of spatially sampled function o simulate.rhohat Generate a Poisson random point pattern with intensity that is a function of a covariate, given by a 'rhohat' object. o head.ppp, head.ppx, head.psp, head.tess, tail.ppp, tail.ppx, tail.psp, tail.tess Methods for 'head' and 'tail' for spatial patterns. o as.data.frame.tess Convert a tessellation to a data frame. o timeTaken Extract the timing data from a 'timed' object or objects. o rotate.infline, shift.infline, reflect.infline, flipxy.infline Geometrical transformations for infinite straight lines. o whichhalfplane Determine which side of an infinite line a point lies on. o points.lpp Method for 'points' for point patterns on a linear network. o pairs.linim Pairs plot for images on a linear network. o has.close Faster way to check whether a point has a close neighbour. o closetriples Low-level function to find all close triples of points. o matrixpower, matrixsqrt, matrixinvsqrt Raise a matrix to any power. SIGNIFICANT USER-VISIBLE CHANGES o intensity.ppm Intensity approximation is now available for the Geyer saturation process and the area-interaction process (results of research with Gopalan Nair). o envelope.lpp, envelope.lppm New arguments 'fix.n' and 'fix.marks' allow envelopes to be computed using simulations conditional on the observed number of points. o "[.im" The subset index "i" can now be a linear network (object of class 'linnet'). The result of "x[i, drop=FALSE]" is then a pixel image of class 'linim'. o cut.ppp Argument z can be "x" or "y" indicating one of the spatial coordinates. o rThomas, rMatClust, rCauchy, rVarGamma, rPoissonCluster, rNeymanScott New argument 'saveparents'. o lintess Argument 'df' can be missing or NULL, resulting in a tesellation with only one tile. o lpp X can be missing or NULL, resulting in an empty point pattern. o plot.lintess Improved plot method, with more options. o rpoisline Also returns information about the original infinite random lines. o density.ppp, Smooth.ppp Accelerated. o density.psp New argument 'method' controls the method of computation. New faster option 'method="FFT"' o nndist.lpp Accelerated. BUG FIXES o F3est Estimates of F(r) for the largest value of r were wildly incorrect. Fixed. o clip.infline Results were incorrect unless the midpoint of the window was the coordinate origin. Fixed. o integral.linim Results were inaccurate if many of the segment lengths were shorter than the width of a pixel. Fixed. o predict.lppm Bizarre error messages about 'class too long' or 'names too long' occurred if the model was multitype. Fixed. o superimpose Point patterns containing 0 points were ignored when determining the list of possible marks. Fixed. o chop.tess Vertical lines were not handled correctly with pixellated tessellations. Fixed. o timed Argument 'timetaken' was ignored. Fixed. o ppm Crashed if method="logi" and the 'covariates' were a data frame. [Spotted by Kim Colyvas and Ian Renner.] Fixed. o rpoislpp, runiflpp Crashed if nsim > 1. Fixed. o rpoisline Crashed if zero lines were generated. Fixed. o model.frame.ppm Crashed if the original model was fitted to a data frame of covariates and there were NA's amongst the covariate values. [Spotted by Kim Colyvas.] Fixed. o any, all When applied to pixel images (objects of class 'im') the result was sometimes NA when a finite value should have been returned. Fixed. o predict.rhohat When the original data were on a linear network, the result of predict.rhohat did not belong to the correct class 'linim'. Fixed. CHANGES IN spatstat VERSION 1.47-0 OVERVIEW o We thank Marcel Austenfeld, Guy Bayegnak, Tilman Davies, Cenk Icos, Jorge Mateu, Frederico Mestre, Mehdi Moradi, Virginia Morera Pujol, Suman Rakshit and Sven Wagner for contributions. o Non-Gaussian smoothing kernels. o Important bug fix in linearK, linearpcf o Changed internal format of linnet and lpp objects. o Faster computation in linear networks. o Bias correction techniques. o Bounding circle of a spatial object. o Minkowski sum also applicable to point patterns and line segment patterns. o Option to plot marked points as arrows. o Kernel smoothing accelerated. o Workaround for bug in some graphics drivers affecting image orientation. o Bug fixes and improvements. o Version nickname: 'Responsible Gambler' NEW FUNCTIONS o anyNA.im Method for 'anyNA' for pixel images. o bc Bias correction (Newton-Raphson) for fitted model parameters. See also 'rex'. o boundingcircle, boundingcentre Find the smallest circle enclosing a window or point pattern. o "[.linim" Subset operator for pixel images on a linear network. o mean.linim, median.linim, quantile.linim The mean, median, or quantiles of pixel values in a pixel image on a linear network. o rex Richardson extrapolation for numerical integrals and statistical model parameter estimates. o weighted.median, weighted.quantile Median or quantile of numerical data with associated weights. SIGNIFICANT USER-VISIBLE CHANGES o linear networks The internal format of a 'linnet' (linear network) object has been changed. Existing datasets of class 'linnet' and 'lpp' are still supported. However, computation will be faster if they are converted to the new format. To convert a linnet object L to the new format, use L <- as.linnet(L). To convert an lpp object X to the new format, use X <- as.lpp(X). o density.ppp, Smooth.ppp New argument 'kernel' allows the user to specify the smoothing kernel. o density.ppp, Smooth.ppp Argument 'weights' can now be a pixel image. o MinkowskiSum, %(+)% Now accepts arguments which are point patterns or line segment patterns as well as windows. o plot.im New argument 'workaround' to avoid a bug in some device drivers that causes the image to be displayed in the wrong spatial orientation. [Thanks to Marcel Austenfeld for drawing attention to this.] o sumouter New argument 'y' allows computation of asymmetric outer products. o linearKinhom, linearpcfinhom New argument 'normpower'. o rmh.default, rmh.ppm New arguments 'nsim', 'saveinfo'. o symbolmap, plot.ppp, plot.lpp New option: shape="arrows" o rcellnumber New argument 'mu'. o lengths.psp New argument 'squared'. o plot.linfun Now passes arguments to the function being plotted. o as.linnet.psp If the line segment pattern has marks, then the resulting linear network also carries these marks in the $lines component. o summary.owin, summary.im The fraction of frame area that is occupied by the window/image is now reported. o density.ppp, Smooth.ppp Computation accelerated by about 15% in the case where at='points' and kernel='gaussian'. o linearK, linearpcf Accelerated by about 40%. o pixellate.ppp Accelerated in the case where weights are given o density.ppp Accelerated in the cases where weights are given or 'diggle=TRUE' o dilation.ppp Improved geometrical accuracy. Now accepts arguments to control resolution of polygonal approximation. o discs New argument 'npoly'. Accelerated in some cases. o plot.pp3 New arguments 'box.front', 'box.back' control plotting of the box. o grow.rectangle New argument 'fraction'. o nnfun.lpp New argument 'k'. o bw.ppl New argument 'sigma'. o lppm New argument 'random' controls placement of dummy points. o rhohat.lpp New argument 'random' controls placement of dummy points. o quadrat.test.ppm Accelerated in the case where the original window is a rectangle. o kppm, mincontrast, cauchy.estpcf, lgcp.estpcf, matclust.estpcf, thomas.estpcf, vargamma.estpcf A warning about infinite values of the summary function no longer occurs when the default settings are used. o circumradius This function is now deprecated, in favour of 'boundingradius' o print.quad More information is printed. BUG FIXES o linearK, linearpcf, and relatives: These functions were sometimes greatly underestimated when the network had segments shorter than 10 coordinate units. [Bug introduced in spatstat 1.44-0, december 2015.] Fixed. o integral.linim, integral.linfun Results were slightly inaccurate because of a bias in the distribution of sample points. [Bug introduced in spatstat 1.41-0, february 2015.] Fixed. o intensity.ppm Result was incorrect for Gibbs models if the model was *exactly* equivalent to a Poisson process (i.e. if all interaction coefficients were exactly zero). [Bug introduced in spatstat 1.28-1, june 2012.] Fixed. o rSSI Sometimes terminated prematurely. [Spotted by Frederico Mestre.] Fixed. o perspPoints Crashed if the image Z contained NA (i.e. if Z was only defined on a subset of the bounding frame). Spotted by Guy Bayegnak. Fixed. o plot.ppp, plot.lpp Crashed if the argument 'shape' was given. Fixed. o plot.kppm Crashed if the model was not fitted by minimum contrast. Fixed. o superimpose Crashed if the argument was a 'solist' containing line segment patterns. Fixed. o Jest Crashed sometimes, depending on the shape of the observation window. [Spotted by Cenk Icos.] Fixed. o plot.studpermutest Crashed when the summary statistic was a multitype pair correlation function or multitype K function. [Spotted by Sven Wagner.] Fixed. o pool.anylist Crashed with a message about buffer size, if the list was longer than about 100 items. Fixed. o diagnose.ppm, plot.diagppm Crashed in some cases when cumulative=FALSE. Fixed. o leverage.ppm, influence.ppm, dfbetas.ppm Crashed sometimes with a message about wrong replacement length. [Spotted by Virginia Morera Pujol.] Fixed. o as.linnet.psp Crashed with marked segment patterns, if any segments were very short. [Spotted by Suman Rakshit.] Fixed. o stieltjes Returned NA if some values of f were not finite. Fixed. o plot.symbolmap If a new plot window was initialised, it was sometimes too small to contain the geometric figures (circles, squares etc) in the symbol map. Fixed. o plot.ppp, plot.im Ignored xlim, ylim. Fixed. o rhohat.lpp Ignored nd, eps. Fixed. o nnfun.lpp Print method gave incorrect information about the point pattern. Fixed. o "[.fv" The default plot formula was not updated. Fixed. o fitted.ppm The result was sometimes a 1-dimensional array rather than a numeric vector. Fixed. CHANGES IN spatstat VERSION 1.46-1 OVERVIEW o Important bug fix. o Version nickname: 'Spoiler Alert' BUG FIXES o density.ppp, Smooth.ppp The results of density(X, at="points") and Smooth(X, at="points") were incorrect in some cases. The contribution from the left-most data point (the point with the smallest x coordinate) was omitted. [Bug introduced in spatstat 1.26-0, April 2012.] Fixed. CHANGES IN spatstat VERSION 1.46-0 OVERVIEW o We thank Corey Anderson and Sebastian Meyer for contributions. o spatstat now depends on R 3.3.0 or later. o Improvements to inhomogeneous multitype K and L functions. o Variance approximation for pair correlation function. o Leverage and influence for multitype point process models. o Functions for extracting components of vector-valued objects. o Important bug fix in Smooth.ppp o Minor improvements and bug fixes. o Version nickname: 'Multidimensional Toothbrush' NEW FUNCTIONS o split.msr Decompose a measure into parts. o unstack.msr Decompose a vector-valued measure into its component measures. o unstack.ppp, unstack.psp, unstack.lpp Given a spatial pattern with several columns of marks, separate the columns and return a list of spatial patterns, each having only one column of marks. o kernel.squint Integral of squared kernel, for the kernels used in density estimation. SIGNIFICANT USER-VISIBLE CHANGES o Kcross.inhom, Kdot.inhom, Kmulti.inhom, Ldot.inhom, Lcross.inhom These functions now allow intensity values to be given by a fitted point process model. New arguments 'update', 'leaveoneout', 'lambdaX'. o diagnose.ppm Infinite values of 'rbord' are now ignored and treated as zero. This ensures that diagnose.ppm has a sensible default when the fitted model has infinite reach. o pcf.ppp Now calculates an analytic approximation to the variance of the estimate of the pair correlation function (when var.approx=TRUE). Now returns the smoothing bandwidth used, as an attribute of the result. o plot.ppp When 'clipwin' is given, any parts of the boundary of the window of x that lie inside 'clipwin' will also be plotted. o plot.msr Now handles multitype measures. New argument 'multiplot'. o plot.anylist If a list entry x[[i]] belongs to class 'anylist', it will be expanded so that each entry x[[i]][[j]] will be plotted as a separate panel. o influence.ppm, leverage.ppm These can now be applied to multitype point process models and the results can be plotted. o plot.influence.ppm, plot.leverage.ppm New argument 'multiplot'. o plot.anylist, plot.solist, plot.listof New arguments panel.begin.args, panel.end.args o influence.ppm, leverage.ppm, dfbetas.ppm For Gibbs models, memory usage has been dramatically reduced, so the code can handle larger datasets and finer quadrature schemes. BUG FIXES o Smooth.ppp Results were incorrect when at='points' and leaveoneout=FALSE. [Bug introduced in spatstat 1.20-5, October 2010.] Fixed. o funxy Did not correctly handle one-line functions: the resulting objects evaluated the wrong function in some cases. [Spotted by Sebastian Meyer. Bug introduced in spatstat 1.45-0] Fixed. o mppm Did not recognise the variable 'marks' in a formula. Fixed. o Smooth.ppp, bw.smoothppp Crashed if X had two columns of marks and one column was constant. [Bug introduced in spatstat 1.38-0, October 2014] Fixed. o Smooth.ppp Results for 'at="points"' were garbled, for some values of 'sigma', if X had more than one column of marks. [Bug introduced in spatstat 1.38-0, October 2014] Fixed. o plot.layered Crashed if one layer was a point pattern with several columns of marks. Fixed. o plot.ppm Sometimes gave a spurious warning about a singular matrix. Fixed. o setminus.owin Gave wrong or strange answer if the correct answer was empty. Fixed. o parameters.dppm Crashed, due to a typo. Fixed. o progressreport Crashed if n = 1. Fixed. CHANGES IN spatstat VERSION 1.45-2 OVERVIEW o We thank Ottmar Cronie, Virginia Morera Pujol, Sven Wagner and Marie-Colette van Lieshout for contributions. o Recursive-partition point process models. o Minkowski sum, morphological dilation and erosion with any shape. o Important bug fix in spatial CDF tests. o More bug fixes for replicated patterns. o Simulate a model fitted to replicated point patterns. o Inhomogeneous multitype F and G functions. o Summary functions recognise correction="all" o Leverage and influence code handles bigger datasets. o More support for pixel images. o Improved progress reports. o New dataset 'redwood3' o spatstat now Depends on the package 'rpart' o Version nickname: 'Caretaker Mode' NEW DATASETS o redwood3 A more accurate version of the 'redwood' data. NEW FUNCTIONS o as.im.data.frame Build a pixel image from a data frame of coordinates and pixel values. o covering Cover a window using discs of a given radius. o dilationAny, erosionAny, %(-)% Morphological dilation and erosion by any shape. o FmultiInhom, GmultiInhom Inhomogeneous multitype/marked versions of the summary functions Fest, Gest. o kernel.moment Moment or incomplete moment of smoothing kernel. o MinkowskiSum, %(+)% Minkowski sum of two windows: A %(+)% B, or MinkowskiSum(A,B) o nobjects New generic function for counting the number of 'things' in a dataset. There are methods for ppp, ppx, psp, tess. o parameters.interact, parameters.fii Extract parameters from interpoint interactions. [These existing functions are now documented.] o ppmInfluence Calculate leverage.ppm, influence.ppm and dfbetas.ppm efficiently. o rppm, plot.rppm, predict.rppm, prune.rppm Recursive-partition point process models o simulate.mppm Simulate a point process model fitted to replicated point patterns. o update.interact Update the parameters of an interpoint interaction. [This existing function is now documented.] o where.max, where.min Find the spatial location(s) where a pixel image achieves its maximum or minimum value. SIGNIFICANT USER-VISIBLE CHANGES o cdf.test.mppm Now handles Gibbs models. Now recognises covariate="x" or "y". o leverage.ppm, influence.ppm, dfbetas.ppm For Gibbs models, memory usage has been dramatically reduced, so the code can handle larger datasets and finer quadrature schemes. o plot.im Now handles complex-valued images. o connected.im Now handles a logical-valued image properly. o qqplot.ppm Argument 'expr' can now be a list of point patterns, or an envelope object containing a list of point patterns. o as.layered Default method now handles a (vanilla) list of spatial objects. o summary functions The argument 'correction="all"' is now recognised: it selects all the available options. This applies to Fest, F3est, Gest, Gcross, Gdot, Gmulti, G3est, Gfox, Gcom, Gres, Hest, Jest, Jmulti, Jcross, Jdot, Jfox, Kest, Kinhom, Kmulti, Kcross, Kdot, Kcom, Kres, Kmulti.inhom, Kcross.inhom, Kdot.inhom, Kscaled, Ksector, Kmark, K3est, Lscaled, markcorr, markcrosscorr, nnorient, pairorient, pcfinhom, pcfcross.inhom, pcfcross, pcf, Tstat. o clarkevans The argument 'correction="all"' is now recognised: it selects all the available options. [This is also the default.] o predict.mppm The argument 'type="all"' is now recognised: it selects all the available options. [This is also the default.] o plot.kppm The argument 'what="all"' is now recognised: it selects all the available options. [This is also the default.] o connected.im, connected.owin Arguments '...' now determine pixel resolution. o anova.mppm New argument 'fine' o as.owin.data.frame New argument 'step' o discs Now accepts a single numeric value for 'radii'. o plot.ppp, plot.profilepl, plot.quadratcount, plot.quadrattest, plot.tess Now recognise graphics parameters for text, such as 'family' and 'srt' o as.function.tess New argument 'values' specifies the function values. o cdf.test Calculations are more robust against numerical rounding effects. o progressreport Behaviour improved. New arguments 'tick', 'showtime'. o simulate.ppm New argument 'verbose' o compileK, compilepcf These internal functions are now documented. BUG FIXES o cdf.test.ppm Calculation of p-values was incorrect for Gibbs models: 1-p was computed instead of p. [Spotted by Sven Wagner.] Fixed. o subfits The interaction coefficients of the submodels were incorrect for Gibbs models with a multitype interaction (MultiStrauss, etc). [Spotted by Sven Wagner.] Fixed. o subfits Crashed when a Gibbs model included factor-valued spatial covariates and not all levels of the factor were present in each row of the data. [Spotted by Sven Wagner.] Fixed. o subfits For Gibbs models with a multitype interaction (MultiStrauss, etc), computation of the conditional intensity caused an error. [Spotted by Sven Wagner.] Fixed. o diagnose.ppm Crashed if what="smooth", when the original window was a rectangle. [Spotted by Virginia Morera Pujol.] Fixed. o mppm The x and y coordinates were not permitted in the random-effects formula 'random'. [Spotted by Sven Wagner.] Fixed. o vcov.ppm The result had no 'dimnames', if the model was fitted using method="ho". Fixed. CHANGES IN spatstat VERSION 1.45-1 OVERVIEW o This version was never released. CHANGES IN spatstat VERSION 1.45-0 OVERVIEW o We thank Monsuru Adepeju, Mario D'Antuono, Markus Herrmann, Paul Hewson, Kassel Hingee, Greg McSwiggan, Suman Rakshit and Sven Wagner for contributions. o Important bug fix in leverage/influence diagnostics for Gibbs models. o Numerous bug fixes in code for replicated point patterns. o Surgery on linear networks. o Tessellations on a linear network. o Laslett's Transform. o Colour maps for point patterns with continuous marks are easier to define. o Pair correlation function estimates can be pooled. o Stipulate a particular version of a package. o Fixed namespace problems arising when spatstat is not loaded. o Bug fixes and performance improvements. o spatstat now contains 100,000 lines of R code. o Version nickname: 'One Lakh' NEW FUNCTIONS o laslett Laslett's Transform. [Thanks to Kassel Hingee] o lintess Tessellation on a linear network. o divide.linnet Divide a linear network into pieces demarcated by a point pattern. o insertVertices Insert new vertices in a linear network. o thinNetwork Remove vertices and/or segments from a linear network etc o connected.linnet Find connected components of a linear network. o nvertices, nvertices.linnet, nvertices.owin Count the number of vertices in a linear network or vertices of the boundary of a window. o as.data.frame.linim, as.data.frame.linfun Extract a data frame of spatial locations and function values from an object of class 'linim' or 'linfun'. o as.linfun, as.linfun.linim, as.linfun.lintess Convert other kinds of data to a 'linfun' object. o requireversion Require a particular version of a package (for use in stand-alone R scripts). SIGNIFICANT USER-VISIBLE CHANGES o [.linnet, [.lpp New argument 'snip' determines what to do with segments of the network that cross the boundary of the window. Default behaviour has changed. o pcfinhom Default behaviour is changed when 'lambda' is a fitted model. The default is now to re-fit the model to the data before computing pcf. New arguments 'update' and 'leaveoneout' control this. o envelope methods New argument 'funYargs' contains arguments to the summary function when applied to the data pattern only. o plot.ppp, plot.lpp For a point pattern with continuous marks ('real numbers') the colour arguments 'cols', 'fg', 'bg' can now be vectors of colour values, and will be used to determine the default colour map for the marks. o symbolmap Now accepts a vector of colour values for the arguments 'col', 'cols', 'fg', 'bg' if argument 'range' is given. o closepairs.ppp, closepairs.pp3 New arguments 'distinct' and 'neat' allow more options. o closepairs.ppp, closepairs.pp3 Argument 'ordered' has been replaced by 'twice' (but 'ordered' is still accepted, with a warning). o closepairs.ppp, closepairs.pp3 Performance improved (computation time and memory requirements reduced.) This should improve the performance of many functions in spatstat. o Geyer The saturation parameter 'sat' can now be less than 1. o lpp, as.lpp These functions now handle the case where 'seg' and 'tp' are given but 'x' and 'y' are missing. o linnet If the argument 'edges' is given, then this argument now determines the ordering of the sequence of line segments. For example, the i-th row of 'edges' specifies the i-th line segment in as.psp(L). o funxy, distfun The functions created by funxy and distfun have arguments (x,y). The user may now give a ppp or lpp object for the argument 'x', instead of giving two coordinate vectors 'x' and 'y'. o crossing.psp New argument 'details' gives more information about the intersections between the segments. o subset.ppp, subset.lpp, subset.pp3, subset.ppx The argument 'subset' can now be any argument acceptable to the "[" method. o density.lpp New argument 'weights'. o pcf.ppp New argument 'ratio' allows several estimates of pcf to be pooled. o summary.ppm New argument 'fine' selects the algorithm for variance estimation. o texturemap Argument 'textures' can be missing or NULL. o plot.lpp New argument 'show.network' o linnet New argument 'warn' o mppm Performs more checks for consistency of the input data. o mppm New arguments 'gcontrol' and 'reltol.pql' control the fitting algorithm. o edge.Trans New argument 'gW' for efficiency. o pool.fv The default plot of the pooled function no longer includes the variance curves. o clickpoly The polygon is now drawn progressively as the user clicks new vertices. o Kest Accelerated computation (for translation and rigid corrections) when window is an irregular shape. o vcov.ppm, leverage.ppm, influence.ppm, dfbetas.ppm Performance slightly improved, for Gibbs models. o Internal code Performance slightly improved. o Fest, Hest Additional checks for errors in input data. BUGS o leverage.ppm, influence.ppm, parres.ppm, addvar.ppm Calculations were completely incorrect for Gibbs models, due to a coding error. Fixed. o update.kppm If the call to 'update' did not include a formula argument or a point pattern argument, then all arguments were ignored. Example: update(fit, improve.type="quasi") was identical to 'fit'. Fixed. o diagnose.ppm When applied to a model obtained from subfits(), in the default case (oldstyle=FALSE) the variance calculations were incorrect. Consequently the dotted lines representing significance bands were incorrect. An error or warning about negative variances occurred sometimes. However, calculations with oldstyle=TRUE were correct. The default has now been changed to oldstyle=TRUE for such models. o [.lpp The local coordinate 'seg' was completely incorrect, when 'i' was a window. Fixed. o leverage.ppm, influence.ppm, parres.ppm, addvar.ppm Crashed for Gibbs models in which the coefficient vector had length 1, such as the stationary Hardcore model. Fixed. o subfits Crashed if the model included factor-valued spatial covariates. [Spotted by Sven Wagner] Fixed. o subfits If the model included factor-valued spatial covariates, and if not all levels of the factor were present in each row of the data, the resulting objects were malformed and caused errors in other code. [Spotted by Sven Wagner] Fixed. o subfits Crashed with some random-effects formulas. [Spotted by Sven Wagner] Fixed. o improve.kppm An error message about a missing object 'gminus1' occurred when vcov=TRUE, fast.vcov=FALSE and type="clik1" or "wclik1". Fixed. o plot.profilepl Failed with a message about a missing object 'finite'. Fixed. o selfcut.psp Gave an error if marks(A) was a vector rather than a data frame. [Spotted by Paul Hewson.] Fixed. o suffstat Gave an error for point process models with Geyer interaction. Fixed. o nncross.lpp, distfun.lpp Crashed with obscure errors if Y consisted of a single point. Fixed. o scan.test, scanmeasure Crashed sometimes with an error message from 'grow.mask'. Fixed. o dppm Crashed sometimes with a message that the point pattern could not be found. [Scoping bug.] Fixed. o mppm, profilepl Crashed, with a message about 'SpatstatVersion', if the 'spatstat' package was neither loaded nor attached. [Spotted by Markus Herrmann.] Fixed. o qqplot.ppm Crashed sometimes when applied to a model obtained from subfits(). Fixed. o anova.mppm Crashed sometimes with a message about mismatched coefficients. [Spotted by Sven Wagner.] Fixed. o anova.mppm Crashed sometimes with a message about unrecognised option 'type="score"'. [Spotted by Sven Wagner.] Fixed. o split.ppx Crashed if 'f' was not a factor. Fixed. o idw The result was a pixel image defined in the rectangle Frame(X) instead of Window(X). Fixed. o ppm Parameter estimates were slightly inaccurate when the model included the Geyer() interaction and the "isotropic" edge correction was used. Fixed. o [.ppx Crashed if the number of points selected was less than 2. Fixed. o linnet Crashed if there were no line segments at all. Fixed. o kppm, improve.kppm Crashed if the model was stationary and improve.type != "none". Fixed. o as.linim.default Did not correctly handle factor-valued data. Fixed. o texturemap Crashed if no graphical arguments were specified. Fixed. o vcov.mppm Ignored "..." arguments. Fixed. o Kest If ratio=TRUE and correction=c('border', 'none') the result did not contain ratio information. Fixed. o plot.ppp, plot.lpp Arguments 'chars' and 'cols' were ignored in some cases. Fixed. o ppm Ignored argument 'emend'. Fixed. o plot.dppm Gave warnings about unrecognised argument 'objectname'. Fixed. o overlap.owin Sometimes returned a very small negative value, when the correct answer was 0. Fixed. CHANGES IN spatstat VERSION 1.44-1 OVERVIEW o We thank Brian Ripley for contributions. o Urgent bug fix. o More support for replicated point patterns. o More support for tessellations. o Version nickname: 'Gift Horse' NEW FUNCTIONS o as.function.tess Convert a tessellation to a function(x,y). The function value indicates which tile of the tessellation contains the point (x,y). o tileindex Determine which tile of a tessellation contains a given point (x,y). o persp.leverage.ppm Method for persp plots for objects of class leverage.ppm o AIC.mppm, extractAIC.mppm AIC for point process models fitted to replicated point patterns. o nobs.mppm, terms.mppm, getCall.mppm Methods for point process models fitted to replicated point patterns. SIGNIFICANT USER-VISIBLE CHANGES o anova.mppm Now handles Gibbs models, and performs the adjusted composite likelihood ratio test. o update, step These functions now work for models of class 'mppm'. o textureplot Argument x can now be something acceptable to as.im o logLik.mppm New argument 'warn'. BUGS o nncross.lpp, nnwhich.lpp, distfun.lpp Caused a segmentation fault. [Spotted by Brian Ripley.] Fixed. o anova.ppm If a single 'object' was given, and the object was a Gibbs model, then 'adjust' was effectively set to FALSE. Fixed. CHANGES IN spatstat VERSION 1.44-0 OVERVIEW o We thank Jonas Geldmann, Andrew Hardegen, Kassel Hingee, Tom Lawrence, Robin Milne, Gopalan Nair, Suman Rakshit, Peijian Shi and Rasmus Waagepetersen for contributions. o More support for multidimensional point patterns and point processes. o More options for envelopes and related Monte Carlo tests. o More support for model comparison. o k-th nearest neighbours on a linear network. o Penttinen process can be simulated (by Metropolis-Hastings or CFTP). o Calculate the predicted variance of number of points. o Convexifying operation for sets. o Subdivide a linear network. o Accelerated algorithms for linear networks. o Quadrat counting accelerated, in some cases. o Version nickname: 'The Sound of One Hand Typing' NEW FUNCTIONS o rPenttinen Simulate the Penttinen process using perfect simulation. o varcount Given a point process model, compute the predicted variance of the number of points falling in a window. o inside.boxx Test whether multidimensional points lie inside a specified multidimensional box. o lixellate Divide each segment of a linear network into smaller segments. o nsegments.linnet, nsegments.lpp Count the number of line segments in a linear network. o grow.boxx Expand a multidimensional box. o deviance.ppm, deviance.lppm Deviance for a fitted point process model. o pseudoR2 Pseudo-R-squared for a fitted point process model. o tiles.empty Checks whether each tile of a tessellation is empty or nonempty. o summary.linim Summary for a pixel image on a linear network. SIGNIFICANT USER-VISIBLE CHANGES o rMaternI, rMaternII These functions can now generate random patterns in three dimensions and higher dimensions, when the argument 'win' is of class 'box3' or 'boxx'. o "[.ppx" The subset index 'i' may now be a spatial domain of class 'boxx' or 'box3'. o rmh.ppm, rmhmodel.ppm, simulate.ppm A model fitted using the 'Penttinen' interaction can now be simulated. o rmh.default, rmhmodel.default These functions now recognise cif='penttinen' for the Penttinen interaction. o envelope New argument 'clamp' gives greater control over one-sided envelopes. o dclf.test, mad.test, dclf.progress, mad.progress, dclf.sigtrace, mad.sigtrace New argument 'clamp' determines the test statistic for one-sided tests. o dclf.progress, mad.progress, dclf.sigtrace, mad.sigtrace, mctest.progress, mctest.sigtrace, dg.progress, dg.sigtrace New argument 'rmin' determines the left endpoint of the test interval. o dclf.test, mad.test, dg.test, dg.progress, dg.sigtrace, dg.envelope (also accepted by dclf.progress, mad.progress, dclf.sigtrace, mad.sigtrace) New argument 'leaveout' specifies how to calculate the deviation between the observed summary function and nominal reference value. o envelope New argument 'funargs' o Hest Argument X can now be a pixel image with logical values. New argument 'W'. [Based on code by Kassel Hingee.] o nncross.lpp, distfun.lpp New argument 'k' allows calculation of k-th nearest neighbour. Computation accelerated. o logLik.ppm New argument 'absolute'. o plot.kppm New arguments 'pause' and 'xname'. o tess Argument 'window' is ignored when xgrid, ygrid are given. o as.polygonal Can now repair errors in polygon data, if repair=TRUE. o rStrauss, rHardcore, rStraussHard, rDiggleGratton, rDGS, rPenttinen New argument 'drop'. o Kest.fft Now has '...' arguments allowing control of spatial resolution. o lppm Computation accelerated. o quadratcount.ppp Computation accelerated in some cases. o dg.test Computation accelerated. BUGS o runifpointx, rpoisppx Crashed if nsim > 1. Fixed. o triangulate.owin Results were incorrect in some special cases. Fixed. o quadrat.test, clarkevans.test In rare cases, the computed Monte Carlo p-value could have been greater than 1. This could have occurred only when nsim was an even number and when the correct p-value was equal to 1. Fixed. o linearmarkequal Result was a data frame instead of an 'fv' object. Fixed. o point-in-polygon test The function inside.owin could take a very long time to check whether points are inside a polygonal window, if the coordinates were very large numbers. This was due to numerical overflow. (Fixed??) o as.fv.kppm Crashed if the model was not fitted by minimum contrast. Fixed. o plot.fv Crashed in some obscure cases. Fixed. o collapse.fv Did not allow 'same=NULL'. Fixed. o dclf.progress, mad.progress, dg.progress, dclf.sigtrace, mad.sigtrace, dg.sigtrace The results could not be re-plotted using a plot formula, because the internal data were slightly corrupted. Fixed. o Kest.fft Result was incorrectly normalised. Fixed. o crosspairs If X and Y were identical point patterns, the result was not necessarily symmetric (on some machines) due to numerical artifacts. Fixed. o plot.fv Lines were not correctly clipped to the plot region when 'ylim' was given. Fixed. o pool.envelope The 'scale' argument was not handled correctly. Fixed. CHANGES IN spatstat VERSION 1.43-0 OVERVIEW o We thank Leanne Bischof, Christophe Biscio, Belarmain Fandohan, Andrew Hardegen, Frederic Lavancier, Tom Lawrence, Martin Maechler, Greg McSwiggan, Robin Milne, Gopalan Nair, Tuomas Rajala, Suman Rakshit, Ben Ramage, Francois Semecurbe and Ida-Maria Sintorn for contributions. o spatstat now depends on the package 'nlme'. o spatstat now depends on R 3.2.2 or later. o Simulation algorithms have been accelerated; simulation outcomes are *not* identical to those obtained from previous versions of spatstat. o Determinantal point process models. o Random-effects and mixed-effects models for replicated patterns. o Dao-Genton test, and corresponding simulation envelopes. o Simulated annealing and simulated tempering. o spatstat colour tools now handle transparent colours. o Improvements to "[" and subset() methods o Extensions to kernel smoothing on a linear network. o Support for one-dimensional smoothing kernels. o Bug fix in Metropolis-Hastings simulation. o Mark correlation function may include weights. o Cross-correlation version of the mark correlation function. o Variance calculations for replicated patterns. o Penttinen pairwise interaction model. o Contour plots with colours determined by a colour map. o New dataset: Australian states and territories. o More support for multi-dimensional point patterns. o Minor improvements and bug fixes. o Version nickname: "Mixed Effects" NEW DATASET o austates The states and large mainland territories of Australia represented as polygonal regions forming a tessellation. NEW FUNCTIONS o dppm Fit a determinantal point process model to point pattern data. o fitted.dppm, predict.dppm, intensity.dppm Predict a fitted dppm object. o logLik.dppm, AIC.dppm, extractAIC.dppm, nobs.dppm Likelihood and AIC for determinantal point process models (enabling the use of 'step') o coef.dppm, formula.dppm, print.dppm, terms.dppm, labels.dppm, model.frame.dppm, model.matrix.dppm, model.images.dppm, is.stationary.dppm, reach.dppm, unitname.dppm, unitname<-.dppm, Window.dppm Various methods for dppm objects. o parameters.dppm Extract meaningful list of model parameters o objsurf.dppm Objective function surface of a dppm object o residuals.dppm Residual measure for a dppm object. o dppBessel, dppCauchy, dppGauss, dppMatern, dppPowerExp Determinantal Point Process models. o update.dppmodel Set parameter values in a dpp model. o is.stationary.dppmodel, print.dppmodel, reach.dppmodel, valid.dppmodel Basic information about a dpp model o rdpp, simulate.dppmodel Simulation of a dpp model. o intensity.dppmodel, Kmodel.dppmodel, pcfmodel.dppmodel Moments of a dpp model o dim.dppmodel, dppapproxkernel, dppapproxpcf, dppeigen, dppfamily, dppkernel, dppparbounds, dppspecdenrange, dppspecden Helper functions for dpp models. o dclf.sigtrace, mad.sigtrace, mctest.sigtrace Significance trace of Monte Carlo test o dg.test Dao-Genton adjusted Monte Carlo goodness-of-fit test. o dg.envelope Simulation envelopes corresponding to Dao-Genton test. o dg.sigtrace Significance trace for Dao-Genton test o dg.progress Progress plot for Dao-Genton test o markcrosscorr Mark cross-correlation function for point patterns with several columns of marks o fixef.mppm, ranef.mppm Extract fixed effects and random effects from a point process model fitted to replicated point patterns. o rtemper Simulated annealing or simulated tempering. o to.opaque, to.transparent Change transparency value in colours o rgb2hsva Convert RGB to HSV data, like rgb2hsv, but preserving transparency. o superimpose.ppplist, superimpose.splitppp New methods for 'superimpose' for lists of point patterns. o dkernel, pkernel, qkernel, rkernel Probability density, cumulative probability, quantiles and random generation from distributions used in basic one-dimensional kernel smoothing. o kernel.factor Auxiliary calculations for one-dimensional kernel smoothing. o PPversion, QQversion Transformation of a summary function to its P-P or Q-Q counterpart. o spatdim Spatial dimension of any object in the spatstat package. o as.boxx Convert data to a multi-dimensional box. o intensity.ppx Method for 'intensity' for multi-dimensional space-time point patterns. o fourierbasis Evaluate Fourier basis functions in any number of dimensions. o valid New generic function, with methods valid.ppm, valid.lppm, valid.dppmodel o emend, emend.ppm, emend.lppm New generic function with methods for ppm and lppm. emend.ppm is equivalent to project.ppm o Penttinen New pairwise interaction model. o quantile.density Calculates quantiles from kernel density estimates. o CDF.density Calculates cumulative distribution function from kernel density estimates. SIGNIFICANT USER-VISIBLE CHANGES o simulation Several basic simulation algorithms have been accelerated. Consequently, simulation outcomes are not identical to those obtained with previous versions of spatstat, even when the same random seed is used. To ensure compatibility with previous versions of spatstat, revert to the slower code by setting spatstat.options(fastthin=FALSE, fastpois=FALSE). o mppm Now handles models with a random effect component. New argument 'random' is a formula specifying the random effect. o vcov.mppm Now handles models with Gibbs interactions. o [.ppp New argument 'clip' determines whether the window is clipped. o [.ppp The previously-unused argument 'drop' now determines whether to remove unused levels of a factor. o [.pp3, [.lpp, [.ppx, subset.ppp, subset.pp3, subset.lpp, subset.ppx These methods now have an argument 'drop' which determines whether to remove unused levels of a factor. o density.lpp Now supports both the 'equal-split continuous' and 'equal-split discontinuous' smoothers. New argument 'continuous' determines the choice of smoother. o envelope New argument 'scale' allows global envelopes to have width proportional to a specified function of r, rather than constant width. o dclf.test, mad.test, dclf.progress, mad.progress, mctest.progress New argument 'scale' allows summary function values to be rescaled before the comparison is performed. o dclf.test, mad.test New argument 'interpolate' supports interpolation of p-value. o dclf.progress, mad.progress, mctest.progress New argument 'interpolate' supports interpolation of critical value of test. o simulate.ppm New argument 'w' controls the window of the simulated patterns. o default.rmhcontrol, default.rmhexpand New argument 'w'. o markcorr New argument 'weights' allows computation of the weighted version of the mark correlation function. o density.lpp New argument 'kernel' specifies the smoothing kernel. Any of the standard one-dimensional smoothing kernels can be used. o contour.im New argument 'col' specifies the colour of the contour lines. If 'col' is a colour map, then the contours are drawn in different colours. o plot.ppp The default colour for the points is now a transparent grey, if this is supported by the plot device. o rgbim, hsvim New argument 'A' controls the alpha (transparency) channel. o rgb2hex, col2hex, paletteindex, is.colour, samecolour, complementarycolour, is.grey, to.grey These colour tools now handle transparent colours. o rgb2hex New argument 'maxColorValue' o to.grey New argument 'transparent'. o progressreport New argument 'state' New option: style="tk" o rLGCP This function no longer requires the package 'RandomFields' to be loaded explicitly. o kppm Fitting a model with clusters="LGCP" no longer requires the package 'RandomFields' to be loaded explicitly. o rpoispp Accelerated, when 'lambda' is a pixel image. o rthin Accelerated, when 'P' is a single number. o spatstat.options New options 'fastthin' and 'fastpois' enable fast simulation algorithms. Set these options to FALSE to reproduce results obtained with previous versions of spatstat. o split.ppp The splitting variable 'f' can now be a logical vector. o collapse.fv This is now treated as a method for the 'nlme' generic 'collapse'. Its syntax has been adjusted slightly. o diagnose.ppm, plot.diagppm New arguments col.neg, col.smooth control the colour maps. o valid.ppm This is now a method for the generic function 'valid'. o ppm.ppp, ppm.quad New argument 'emend', equivalent to 'project'. o "[<-.im" Accepts an array for 'value'. o as.im.function New argument 'strict'. o bw.ppl New argument 'weights'. o plot.mppm New argument 'se'. o dclf.test, mad.test Formal arguments 'use.theo' and 'internal' have been removed. o predict.kppm, residuals.kppm Now issues a warning when the calculation ignores the cluster/Cox component and treats the model as if it were Poisson. (This currently happens in predict.kppm when se=TRUE or interval != "none", and in residuals.kppm when type != "raw"). BUG FIXES o lpp Crashed if X was a 4-column matrix. Fixed. o plot.fv Crashed with some graphics devices, if legend=TRUE. Fixed. o effectfun Crashed if 'covname' was missing. Fixed. o rVarGamma, rMatClust, rThomas, rCauchy, rNeymanScott Crashed if 'kappa' was a function or image instead of a single number. [Spotted by Ben Ramage.] Fixed. o plot.mppm Crashed with a message about "figure margins too large" unless the argument se=FALSE was given explicitly. Fixed. o opening.owin, closing.owin Crashed sometimes, with a message about a rectangle not containing a window. Fixed. o persp.im Crashed if all pixel values were equal to zero (unless zlim was given). Fixed. o predict.ppm Crashed sometimes if the model was fitted with use.gam=TRUE. o as.linim.linfun Generated an error ('L must be a linear network') if extra arguments were given. o as.function.fv Generated an error when executed in the 'covr' package. Fixed. o rmh, simulate.ppm Results were incorrect for inhomogeneous multitype models simulated with fixall=TRUE (i.e. prescribing a fixed number of points of each type) if the model was segregated (i.e. if different types of points had different first order trend). Fixed. o dclf.progress, mad.progress Ignored the argument 'alternative'. Fixed. o $<-.hyperframe, [<-.hyperframe Result was garbled if 'value' was a hyperframe with one column. o rmh.ppm Argument 'w' was ignored in some cases. Fixed. o Hest There was an artefact at r=0 when conditional=TRUE. Fixed. o [.msr The result of M[W] where W is a window was a measure with window W, instead of intersect.owin(W, Window(M)). Fixed. o pool.envelope Did not always respect the value of 'use.theory'. Fixed. o envelope, pool.envelope If 'ginterval' was given, the results were in a slightly incorrect format. Fixed. o pool.envelope Did not check for compatible values of 'ginterval'. Fixed. CHANGES IN spatstat VERSION 1.42-2 OVERVIEW o We thank Bob Klaver and Harold-Jeffrey Ship for contributions. o Improvements to simulation of Neyman-Scott processes. o Improvements to fitting of Neyman-Scott models. o Extended functionality for pixel images. o Fitted intensity on linear network o Triangulation of windows. o Corrected an edge correction. o Bug fixes and performance improvements. o Nickname: 'Barking at Balloons' NEW FUNCTIONS o triangulate.owin Decompose a spatial window into triangles. o fitted.lppm Fitted intensity values for a point process on a linear network. SIGNIFICANT USER-VISIBLE CHANGES o rThomas, rMatClust, rCauchy, rVarGamma When the model is approximately Poisson, it is simulated using rpoispp. This avoids computations which would require huge amounts of memory. New argument 'poisthresh' controls this behaviour. o update.kppm Now handles additional arguments in any order, with or without names. Changed arguments. Improved behaviour. o kppm, clusterfit New argument 'algorithm' specifies the choice of optimisation algorithm. o kppm Left hand side of formula can now involve entries in the list 'data'. o rotmean New argument 'padzero'. Default behaviour has changed. o rose.default New argument 'weights'. o rose New arguments 'start' and 'clockwise' specify the convention for measuring and plotting angles. o padimage New argument 'W' allows an image to be padded out to fill any window. o union.owin Improved behaviour when there are more than 2 windows. o clusterset Improved behaviour. o affine.owin Allows transformation matrix to be singular, if the window is polygonal. BUG FIXES o spatstat spatstat could not be installed on some 64-bit VM systems because of an apparent bug in R. Fixed. o rThomas, rMatClust, rCauchy, rVarGamma Large values of the scale parameter could cause the algorithm to freeze or require huge amounts of memory. Fixed. o pcf, pcfinhom Crashed if the point pattern was empty. Fixed. o plot.fv Gave an error message if all 'y' values were equal, when legend=TRUE. Fixed. o rose.default Display was incorrect when unit="radian". Fixed. o Kest Ohser-Stoyan rigid motion correction (correction='rigid') was calculated incorrectly at large distances. Fixed. o summary.im Issued a warning about numerical overflow in some cases. [Spotted by Bob Klaver.] Fixed. o plot.im Sometimes warned that 'box' is not a graphical parameter. Fixed. CHANGES IN spatstat VERSION 1.42-1 OVERVIEW o We thank Andrew Hardegen, Tom Lawrence, Robin Milne, Suman Rakshit, and Brian Ripley for contributions. o Urgent bug fix. o More robust simulation of cluster processes. o Slightly accelerated. o Version nickname: 'Vogon Poetry' NEW FUNCTIONS o boundingbox.solist Method for boundingbox for lists of spatial objects. SIGNIFICANT USER-VISIBLE CHANGES o rThomas, rMatClust, rCauchy, rVarGamma, rNeymanScott New faster algorithm which is more robust against extreme values of the parameters. o rNeymanScott New argument 'nonempty' controls choice of algorithm. o solist, as.solist Accelerated. o as.list.hyperframe Accelerated. BUG FIXES o residuals.mppm Brought some computers to a grinding halt, due to the bug in solist(). Fixed. o solist, as.solist In rare cases, the format was corrupted, or the algorithm never terminated. Fixed. CHANGES IN spatstat VERSION 1.42-0 OVERVIEW o We thank Anders Bilgrau, Ute Hahn, Jack Hywood, Tuomas Rajala, Cody Schank, Olivia Semboli and Ben Taylor for contributions. o Version nickname: 'Life, The Universe and Everything' o Permutation test for difference between groups of point patterns. o Variational Bayes estimation for point process models. o Score test in anova.ppm o ROC curve, and discrimination index AUC, for fitted models. o Interactive text editor for spatial datasets. o Tools for analysing data on a tree. o Kernel density/intensity estimation on a linear network. o Random pixel noise. o Improved behaviour of polygon geometry operations. o Improved support for cluster and Cox models. o Improved basic support for owin objects. o Improved support for tessellations. o More hierarchical Gibbs interactions. o Modifications to Kest. o summary method for Cox and cluster models. o class 'listof' is almost completely replaced by 'anylist' and 'solist'. o Improvements and bug fixes. o spatstat now depends on R version 3.2.0 or later. NEW FUNCTIONS o studpermu.test Studentised permutation test for difference between groups of point patterns. Generously contributed by Ute Hahn. o AIC.kppm, extractAIC.kppm, logLik.kppm, nobs.kppm Methods for computing AIC for fitted Cox and cluster models. o transmat Convert pixel arrays between different display conventions. o roc Receiver Operating Characteristic curve. o auc Discrimination index AUC (area under the ROC curve) o edit.ppp, edit.psp, edit.im Interactive text editor works for spatial datasets. o edit.hyperframe Interactive text editor works for hyperframes. o parameters Extract all parameters from a fitted model. o density.lpp Kernel estimation of point process intensity on a linear network. o extractbranch, deletebranch, treeprune, treebranchlabels, begins Tools for analysing data on a tree. o rnoise Random pixel noise. o as.data.frame.owin Convert a window to a data frame. o harmonise.owin Convert several binary mask windows to a common pixel grid. o copyExampleFiles Copy the raw data files from an installed dataset to a chosen folder, for use in a practice exercise. o density.ppplist Method for 'density' for lists of point patterns. o inradius Radius of largest circle inside a window. o mergeLevels Merge different levels of a factor. o relevel.im, relevel.ppp, relevel.ppx Change the reference level of a factor. o simulate.profilepl simulation method for models fitted by profile maximum pseudolikelihood. o predict.rho2hat Prediction method for class rho2hat o with.msr Evaluate (an expression involving) components of a measure. o summary.kppm, print.summary.kppm, coef.summary.kppm Methods for 'summary' and 'coef(summary(..))' for Cox and cluster models. o as.im.funxy Method for as.im for class funxy. o shift.linim, scalardilate.linim, affine.linim Geometrical transformations for 'linim' objects. o Smooth.solist Smooth method for a list of spatial objects. o unitname.tess, unitname<-.tess Tessellations now keep track of the name of the unit of length. o dirichletAreas Faster algorithm for tile.areas(dirichlet(X)). o identify.lpp Method for 'identify' for point patterns on a linear network. o HierStraussHard, HierHard Hierarchical interactions for Gibbs models. o delaunayDistance, delaunayNetwork, dirichletEdges, dirichletNetwork, dirichletVertices, dirichletWeights These functions will replace delaunay.distance, delaunay.network, dirichlet.edges, dirichlet.network, dirichlet.vertices and dirichlet.weights respectively. The latter are now 'deprecated'. SIGNIFICANT USER-VISIBLE CHANGES o ppm Now supports Variational Bayes fitting method. o kppm 'AIC' and 'step' now work for kppm objects fitted using maximum Palm likelihood. o kppm The default for the weight function 'weightfun' has been changed, for better performance. o envelope envelope methods now have argument 'use.theory' specifying whether to use the 'theoretical' value of the summary function when constructing simultaneous envelopes. o anova.ppm Now performs the Score Test, for Poisson models only, if argument test="Rao" or test="score". o Kest New argument 'rmax' controls maximum value of argument 'r' o diagnose.ppm Now computes and prints the null standard deviation of the smoothed Pearson residual field, when appropriate. o nncorr, nnmean, nnvario New argument 'k' specifies k-th nearest neighbour. o quadrat.test.ppp, quadrat.test.quadratcount New argument 'lambda' supports a test of the Poisson process with given intensity 'lambda'. o clickpoly, clickbox These functions now handle graphical arguments to polygon() when drawing the resulting polygon or rectangle. o owin, as.owin, as.mask owin(mask=D) or as.owin(D) or as.mask(D) will produce a binary mask window if D is a data frame with two columns of (x,y) coordinates or a data frame with three columns containing (x,y,logical). o as.owin.data.frame W can now be a data frame with only two columns, giving the spatial coordinates of the pixels that are inside the window. o rose Tick marks now have labels showing the angle (in degrees or radians). o distcdf New argument 'regularise' determines whether values at short distances will be smoothed to avoid discretisation artefacts. o rpoislinetess Return value now has an attribute 'lines' giving the realisation of the Poisson line process. o intersect.owin, union.owin, setminus.owin New argument 'p' controls resolution of polygon clipping algorithm. o intersect.owin, union.owin Arguments may be lists of windows, of class 'solist'. Formal arguments A and B have been removed. o superimpose Now handles lists of point patterns (objects of class 'ppplist' or 'splitppp') o density.ppp New argument 'positive' allows the user to stipulate that density values must be positive (avoiding numerical errors which occasionally produce small negative values). o adaptive.density Now accepts f = 0 (uniform intensity estimate) and f = 1 (Voronoi intensity estimate) as well as 0 < f < 1. Algorithm accelerated. o rSSI Can now generate inhomogeneous patterns. o effectfun Now works for 'kppm' and 'lppm' objects as well. o integral.im, integral.msr Argument 'domain' can now be a tessellation; the integral over each tile of the tessellation is returned. o allstats, compareFit, markcorr, split.ppx, by.ppp Result is now of class 'anylist'. o by.im, density.splitppp, idw, model.images, nnmark, pixellate.ppp, predict.lppm, predict.ppm, quadratcount.splitppp, quadratresample, relrisk, Smooth.msr, split.im, tiles Result is now of class 'solist'. o split.ppp New argument 'reduce'. Result now inherits class 'ppplist' and 'solist', as well as 'splitppp' o rLGCP New argument 'nsim' allows multiple patterns to be generated. o alltypes New argument 'reuse' determines whether all simulation envelopes are based on the same set of simulated patterns, or on independent sets. o rpoispp, runifpoint New argument 'ex' makes it possible to generate a random pattern similar to an example point pattern. o effectfun Argument 'covname' is not needed if the model has only one covariate. o quadratcount Argument 'tess' can now be anything acceptable to as.tess. o tess New argument 'unitname' specifies the name of the unit of length. If it is missing, unitname information will be extracted from the other data. o intersect.tess, chop.tess, quadrats Results of these functions now have the same 'unitname' as their input. o persp.im, nnclean, plot.qqppm, plot.bw.optim These plotting functions now obey spatstat.options('monochrome') o lurking Now returns an object of class 'lurk' which has a plot method. Two-standard-deviation limits are now plotted using grey shading. o marktable New argument 'N' for studying the N nearest neighbours. New argument 'collapse' for manipulating the contingency table. o harmonise.fv Now discards columns with names which do not match. o eval.fv New argument 'equiv' can be used to declare that two columns with different names in different objects are equivalent. o quantile.ewcdf New argument 'type' controls the type of quantile. o plot.imlist New argument 'plotcommand' specifies how to plot each image. o persp.im The lower extent of the apron can now be controlled by 'zlim'. o quadscheme Argument 'method' is partially matched. o Kdot, Ldot New argument 'from' is an alternative to 'i'. o Kcross, Lcross New arguments 'from' and 'to' are alternatives to 'i' and 'j' respectively. o varblock Changed the ordering (and therefore default colours/styles) of curves in the plot, to match other functions like lohboot. o bw.diggle New argument 'nr' controls accuracy. o textureplot Now accepts a pixel image, a tessellation, or anything acceptable to as.tess. o textureplot Line spacing in legend now matches line spacing in main display. o [.tess Subset index can now be a window. o plot.tess Can now plot a text label in each tile. o plot.tess New argument 'do.plot'. o MultiHard, MultiStrauss, MultiStraussHard, HierStrauss Printed output of fitted model now respects spatstat.options('terse'). o print.ppm Reduced redundancy in output in some cases. o print.msr Responds better to spatstat.options('terse'). o print.ppm, print.fii, print.interact Irregular parameters are now printed to the number of significant figures specified by options("digits"). o square New argument 'unitname'. o plot.fv Return value is now invisible. o delaunay.distance, delaunay.network, dirichlet.edges, dirichlet.network, dirichlet.vertices These functions are now 'deprecated', and will be replaced by delaunayDistance, delaunayNetwork, dirichletEdges, dirichletNetwork and dirichletVertices respectively. o data(residualspaper) In the real datasets (Fig1 and Fig11), the name of the unit of length has now been recorded. o rLGCP This function now requires the package 'RandomFields' to be loaded explicitly by library(RandomFields) or require(RandomFields), unless model="exp". o iplot, istat These functions now require the package 'rpanel' to be loaded explicitly by library(rpanel) or require(rpanel). o ppm, quadscheme Improved calculation of Dirichlet weights. o countends New argument 'toler' controls numerical errors o diagnose.ppm Improved handling of additional graphics arguments. o pcf3est Mathematical labels changed. o plot.hyperframe Default margin spacing has been increased. BUG FIXES o Kinhom, Linhom The value of 'theo' was erroneously rescaled by a small amount, when renormalise=TRUE (the default). Fixed. o Kmark Values were erroneously rescaled. Fixed. o union.owin Strange results were sometimes obtained when taking the union of more than two windows. Fixed. o rpoispp3 Implementation was incorrect for nsim > 1. (Results may have been incorrect.) Spotted by Jack Hywood. Fixed. o as.owin.data.frame Crashed if the window was not connected. Fixed. o Frame<- Crashed when applied to a binary mask. Fixed. o rho2hat Crashed if cov1="x" and cov2="y". Fixed. o as.mask Crashed sometimes when only the argument 'xy' was given. Fixed. o ppm Crashed (rarely) when method='ho' if the simulated pattern was empty. Fixed. o istat, iplot Crashed in recent versions of rpanel. Fixed. o convexhull Crashed if applied to a 'psp' object. Fixed. o plot.ppm Crashed with message about 'variable lengths differ'. Fixed. o plot.solist Crashed when applied to a list of point patterns if some patterns had more than one column of marks. Fixed. o Smooth.ppp Crashed if applied to a point pattern with several columns of marks if some of the columns were factors. Fixed. o runifpoint3, rpoispp3 Crashed if nsim > 1. Spotted by Jack Hywood. Fixed. o hist.im Crashed if argument 'freq' was given. Fixed. o MultiStraussHard Generated misleading error messages (e.g. 'model is invalid') when arguments 'iradii' and 'hradii' did not have the same pattern of NA's. Fixed. o plot.solist Figures were sometimes aligned incorrectly when the argument 'panel.args' was given. Fixed. o scaletointerval Results sometimes fell slightly outside the desired interval due to numerical error. Fixed. o plot.solist Behaved incorrectly when plotcommand='persp'. Fixed. o "[.hyperframe" Sometimes returned an 'anylist' when it should have returned a 'solist'. Fixed. o plot.im Did not plot surrounding frame box when ribbon=FALSE. Fixed. o envelope The functions stored when savefuns=TRUE did not inherit the correct name for the unit of length. Fixed. o print.ppm, print.fii, print.interact Layout was misaligned. Fixed. o plot.plotppm Paused for input when it was not appropriate. Fixed. o plot.fv On png devices, the legend box was drawn with a white background, obscuring the main plot. Fixed. o plot.owin, plot.ppp, plot.im There was unnecessary extra space above the main title. Fixed. o plot.rho2hat Colour map ribbon was drawn but not annotated. Fixed. o density.splitppp, density.ppplist Format was out of order if se=TRUE. Fixed. o MultiStraussHard project.ppm sometimes yielded a model that was still invalid. Fixed. CHANGES IN spatstat VERSION 1.41-1 OVERVIEW o This is identical to the major release 1.41-0 except for minor bug fixes. The change log for 1.41-0 is repeated here with minor modifications. o Version nickname: 'Ides of March' o We thank Ahmed El-Gabbas, Ute Hahn, Aruna Jammalamadaka, Ian Renner, Brian Ripley, Torben Tvedebrink and Sasha Voss for contributions. o Fixed a bug causing a segmentation fault. o Standard errors for kernel estimates of intensity. o Test for segregation. o Tessellations may now have marks. o Nested splitting. o More support for cluster models. Reorganised parametrisation. o Sparse data representation of linear networks. o More support for data on a linear network. o New datasets: 'spiders' and 'dendrite'. o Improvements and bug fixes. o spatstat no longer uses Fortran. o spatstat no longer depends on the package 'scatterplot3d'. o spatstat now imports (rather than 'suggests') the Matrix package. NEW DATASETS o dendrite Dendritic spines on the dendrite network of a neuron. A point pattern on a linear network. Generously contributed by Aruna Jammalamadaka. o spiders Spider webs on the mortar lines of a brick wall. A point pattern on a linear network. Generously contributed by Sasha Voss. NEW FUNCTIONS o segregation.test Test of spatial segregation of types in a multitype point pattern. o clusterfield, clusterkernel Compute the cluster kernel (offspring density) of a cluster process model, or compute the cluster field generated by superimposing copies of the cluster kernel at specified locations. o clusterradius Compute the radius of the support of the offspring density of a cluster process model. o as.linnet.psp Convert a line segment pattern to a linear network by guessing the connectivity using a distance threshold. o iplot.linnet, iplot.lpp Methods for interactive plotting 'iplot' for objects of class lpp and linnet. o Mathematical operations are now supported for pixel images on a linear network. See help(Math.linim) o dirichlet.network, delaunay.network The linear networks formed by the Dirichlet tessellation and Delaunay triangulation. o dirichlet.edges The edges of the Dirichlet tessellation. o selfcut.psp Cut line segments where they cross each other. o vertices.linnet Extract the vertices (nodes) of the linear network. o vertexdegree Compute the degree of each vertex in a linear network. o pixellate.linnet Pixellate a linear network. o subset.hyperframe 'subset' method for class 'hyperframe'. o head.hyperframe, tail.hyperframe 'head' and 'tail' methods for hyperframes. o clickdist Measures the distance between two spatial locations clicked by the user. o solapply, anylapply wrappers for 'lapply' which return a list of class 'solist' or 'anylist'. o Kmark Weighted K-function. Identical to 'markcorrint' and will eventually replace it. o marks.tess, marks<-.tess, unmark.tess: Extract or change the marks associated with the tiles of a tessellation. o quantess Quantile tessellation: divide space into pieces which contain equal amounts of 'stuff'. o nestsplit Nested split o integral New generic function for integrating functions, with methods for 'im', 'msr', 'linim' and 'linfun'. o selfcut.psp Cut line segments where they cross each other o as.function.im Convert a pixel image to a function(x,y). o as.linnet.linim Extract the linear network from a 'linim' object. o pool.fv, pool.anylist New methods for 'pool' o Window.linnet Extract the two-dimensional window containing a linear network. SIGNIFICANT USER-VISIBLE CHANGES o linnet, lpp A linear network can now be built in 'sparse matrix' form which requires much less memory. o chicago The Chicago street crimes data are now stored in 'sparse matrix' form. To convert them to non-sparse form, use as.lpp(chicago, sparse=FALSE) o kppm The parametrisation of cluster models has been reorganised. The scale parameter is now always called 'scale'. Results should be backward-compatible. o cauchy.estK, cauchy.estpcf, matclust.estK, matclust.estpcf, thomas.estK, thomas.estpcf, vargamma.estK, vargamma.estpcf The parametrisation of cluster models has been reorganised. The scale parameter is now always called 'scale'. o plot.kppm Also plots the cluster kernel. o density.ppp New argument 'se' allows calculation of standard errors as well. o plot.pp3 Now produces a genuine perspective view. New arguments control the eye position for the perspective view. o Emark, Vmark These functions can now work with multiple columns of marks. o pixellate.psp Can now count the number of segments that intersect each pixel, instead of the total length of intersection. o linfun If g = linfun(f, L), the function f will always be called as f(x,y,seg,tp, ...) It is no longer expected to handle the case where 'seg' and 'tp' are absent. The resulting function g can now be called as g(X) where X is an lpp object, or as g(x,y) or g(x,y,seg,tp) where x,y,seg,tp are coordinates. o tess New argument 'marks' allows marks to be associated with tiles. o anova.lppm Outdated argument 'override' has been removed. o split<-.ppp Preserves the original ordering of the data, if possible. o MultiHard, MultiStrauss, MultiStraussHard, HierStrauss Zero values in the interaction radii are now treated as NA. Improved handling of missing arguments. Printed output now respects options('width') o linearKinhom, linearKcross.inhom, linearKdot.inhom, linearpcfinhom, linearpcfcross.inhom, linearpcfdot.inhom If the intensity argument lambda, lambdaI, lambdaJ, lambdadot is a fitted point process model, the model is first updated by re-fitting it to the data, before computing the fitted intensity. o solutionset The expression will be evaluated using pixel arithmetic (Math.im) if it cannot be evaluated using eval.im. o to.grey Now uses better weights for the R, G, B channels. o rVarGamma Accelerated. o summary.mppm, print.mppm These functions now respect options('width') and spatstat.options('terse'). o print.quadrattest Now respects options('width') and spatstat.options('terse'). o print.pp3 Now respects options('width') o print.lpp Now respects options('width') and options('digits'). o print.owin, print.im, print.summary.owin, print.summary.im Now respect options('width'). o nnmean Now yields a vector, instead of a 1-column matrix, when there is only a single column of marks. o pairdist.psp, crossdist.psp, nndist.psp The option 'method="Fortran"' is no longer supported. The default is 'method="C"'. o [.hyperframe: When a row of data is extracted with drop=TRUE, the result belongs to class 'anylist'. o installation of spatstat A Fortran compiler is no longer needed to compile spatstat from source. o hyperframe class The internal structure of hyperframes has changed slightly: columns of objects are now stored and returned as lists of class 'anylist' or 'solist'. There should be no change in behaviour. o datasets Internal format of the datasets bdspots, bei, clmfires, demohyper, flu, gorillas, heather, Kovesi, murchison, osteo, pyramidal, waterstriders has changed slightly to use the classes 'anylist' and 'solist'. There should be no change in behaviour. o K3est New argument 'ratio'. o spatstat.options New option 'par.points3d' sets default arguments for plot.pp3. o diagnose.ppm New arguments 'xlab', 'ylab', 'rlab' determine the labels in the 4-panel plot, and new argument 'outer' controls their position. The confusing default value for 'compute.sd' has been changed. o iplot.layered New argument 'visible' controls which layers are initially visible. o plot.lpp New argument 'show.window' controls whether to plot the containing window. o textstring Any number of spatial locations (x,y) can be specified, with a corresponding vector of text strings. o plot.hyperframe New argument 'mar' o plot.linnet New argument 'do.plot' o summary.hyperframe Improved output. o eval.linim Improved scoping rules. o pixellate.owin Accelerated. o summary.linnet Now prints more information, and respects options('digits'). o rmpoispp, rmpoint The vector of possible types of points will default to the 'names' vector of the argument 'lambda', 'n', or 'f' where appropriate. o rpoislpp Argument 'L' can be omitted when lambda is a 'linim' or 'linfun' o simulate.ppm, simulate.kppm, simulate.lppm, simulate.slrm New argument 'drop': if nsim = 1 and drop=TRUE, the result is a point pattern rather than a list containing one point pattern. o runifdisc, runifpoint, rpoint, rpoispp, rmpoint, rmpoispp, rMaternI, rMaternII, rSSI, rPoissonCluster, rGaussPoisson, rstrat, rsyst, rcell, rthin, rNeymanScott, rMatClust, rThomas, rCauchy, rVarGamma, rpoispp3, runifpoint3 New argument 'drop': if nsim = 1 and drop=TRUE, the result is a point pattern rather than a list containing one point pattern. o spatstat.options New option 'units.paren' controls the type of parenthesis enclosing the explanatory text about the unit of length, in print.ppm, plot.fv, etc. o closepairs, crosspairs New option: what="ijd" returns only the indices i, j and the distance d o rCauchy, rMatClust, rNeymanScott, rPoissonCluster, rThomas, rVarGamma Argument names have changed. BUG FIXES o sumouter A segmentation fault could occur if any data were NA. Fixed. o simulate.kppm Simulation failed for log-Gaussian Cox processes (in simulate.kppm only) with an error message from the RandomFields package. Fixed. o ppm, predict.ppm, profilepl Crashed sometimes with message "interaction evaluator did not return a matrix". Fixed. o lppm step() did not work correctly on 'lppm' objects. Fixed. o quadscheme If quadscheme() was called explicitly, with the stipulated number of tiles exceeding the number of dummy points given, then the quadrature weights were sometimes vastly inflated - total quadrature weight was much larger than window area. Spotted by Ian Renner. Fixed. o predict.rhohat Result was incorrect for data on a non-rectangular window (and a warning was issued about incorrect vector length). Fixed. o Math.im Unary operators did not work (e.g."-x") Fixed. o density.ppp Crashed when at="points" if the dataset had exactly 1 point. Fixed. o rSSI Crashed if nsim > 1. Fixed. o influence.ppm, leverage.ppm, dfbetas.ppm Crashed or issued a warning if any quadrature points had conditional intensity zero under the model (negative infinite values of the sufficient statistic). Fixed. o clickppp, clickpoly Did not work correctly in the RStudio display device. Fixed. o Iest Ignored the arguments 'r' and 'eps'. Fixed. o markvario Result was garbled, when X had more than one column of marks. Fixed. o rMatClust, rVarGamma, rCauchy, rNeymanScott Result was a list, but not a 'solist', when nsim > 1. Fixed. o print.mppm, summary.mppm, subfits Crashed if a Poisson interaction was implied but not given explicitly. Fixed. o Kest Crashed if ratio=TRUE and the window was a rectangle. Fixed. o anova.ppm Crashed sometimes with message 'models were not all fitted to the same size of dataset'. (This occurred if there were quadrature points with conditional intensity equal to zero in some models but not in all models.) Fixed. o vcov.kppm Occasionally ran out of memory. Fixed. o as.linim.linfun Erroneously converted the pixel values to numeric values. Fixed. o as.owin.layered Ignored layers with zero area. Fixed. o plot.ppm Paused the plot between frames even when there was only one frame. Fixed. o plot.layered Did not allocate space for legends of 'lpp' objects. Fixed. o plot.lpp Ignored symbolmap arguments like 'cex' and confused the arguments 'col' and 'cols'. Fixed. o plot.diagppm Ignored add=TRUE in some cases. Fixed. o iplot.layered Did not handle 'diagramobj' objects correctly. Fixed. o plot.yardstick Changed arguments. CHANGES IN spatstat VERSION 1.41-0 OVERVIEW o We thank Ahmed El-Gabbas, Ute Hahn, Aruna Jammalamadaka, Ian Renner, Brian Ripley, Torben Tvedebrink and Sasha Voss for contributions. o Fixed a bug causing a segmentation fault. o Standard errors for kernel estimates of intensity. o Test for segregation. o Tessellations may now have marks. o Nested splitting. o More support for cluster models. Reorganised parametrisation. o Sparse data representation of linear networks. o More support for data on a linear network. o New datasets: 'spiders' and 'dendrite'. o Improvements and bug fixes. o spatstat no longer uses Fortran. o spatstat no longer depends on the package 'scatterplot3d'. o spatstat now imports (rather than 'suggests') the Matrix package. o Nickname: 'Team Australia' NEW DATASETS o dendrite Dendritic spines on the dendrite network of a neuron. A point pattern on a linear network. Generously contributed by Aruna Jammalamadaka. o spiders Spider webs on the mortar lines of a brick wall. A point pattern on a linear network. Generously contributed by Sasha Voss. NEW FUNCTIONS o segregation.test Test of spatial segregation of types in a multitype point pattern. o clusterfield, clusterkernel Compute the cluster kernel (offspring density) of a cluster process model, or compute the cluster field generated by superimposing copies of the cluster kernel at specified locations. o clusterradius Compute the radius of the support of the offspring density of a cluster process model. o as.linnet.psp Convert a line segment pattern to a linear network by guessing the connectivity using a distance threshold. o iplot.linnet, iplot.lpp Methods for interactive plotting 'iplot' for objects of class lpp and linnet. o Mathematical operations are now supported for pixel images on a linear network. See help(Math.linim) o dirichlet.network, delaunay.network The linear networks formed by the Dirichlet tessellation and Delaunay triangulation. o dirichlet.edges The edges of the Dirichlet tessellation. o selfcut.psp Cut line segments where they cross each other. o vertices.linnet Extract the vertices (nodes) of the linear network. o vertexdegree Compute the degree of each vertex in a linear network. o pixellate.linnet Pixellate a linear network. o subset.hyperframe 'subset' method for class 'hyperframe'. o head.hyperframe, tail.hyperframe 'head' and 'tail' methods for hyperframes. o clickdist Measures the distance between two spatial locations clicked by the user. o solapply, anylapply wrappers for 'lapply' which return a list of class 'solist' or 'anylist'. o Kmark Weighted K-function. Identical to 'markcorrint' and will eventually replace it. o marks.tess, marks<-.tess, unmark.tess: Extract or change the marks associated with the tiles of a tessellation. o quantess Quantile tessellation: divide space into pieces which contain equal amounts of 'stuff'. o nestsplit Nested split o integral New generic function for integrating functions, with methods for 'im', 'msr', 'linim' and 'linfun'. o selfcut.psp Cut line segments where they cross each other o as.function.im Convert a pixel image to a function(x,y). o as.linnet.linim Extract the linear network from a 'linim' object. o pool.fv, pool.anylist New methods for 'pool' o Window.linnet Extract the two-dimensional window containing a linear network. SIGNIFICANT USER-VISIBLE CHANGES o linnet, lpp A linear network can now be built in 'sparse matrix' form which requires much less memory. o chicago The Chicago street crimes data are now stored in 'sparse matrix' form. To convert them to non-sparse form, use as.lpp(chicago, sparse=FALSE) o kppm The parametrisation of cluster models has been reorganised. The scale parameter is now always called 'scale'. Results should be backward-compatible. o cauchy.estK, cauchy.estpcf, matclust.estK, matclust.estpcf, thomas.estK, thomas.estpcf, vargamma.estK, vargamma.estpcf The parametrisation of cluster models has been reorganised. The scale parameter is now always called 'scale'. o plot.kppm Also plots the cluster kernel. o density.ppp New argument 'se' allows calculation of standard errors as well. o plot.pp3 Now produces a genuine perspective view. New arguments control the eye position for the perspective view. o Emark, Vmark These functions can now work with multiple columns of marks. o pixellate.psp Can now count the number of segments that intersect each pixel, instead of the total length of intersection. o linfun If g = linfun(f, L), the function f will always be called as f(x,y,seg,tp, ...) It is no longer expected to handle the case where 'seg' and 'tp' are absent. The resulting function g can now be called as g(X) where X is an lpp object, or as g(x,y) or g(x,y,seg,tp) where x,y,seg,tp are coordinates. o tess New argument 'marks' allows marks to be associated with tiles. o anova.lppm Outdated argument 'override' has been removed. o split<-.ppp Preserves the original ordering of the data, if possible. o MultiHard, MultiStrauss, MultiStraussHard, HierStrauss Zero values in the interaction radii are now treated as NA. Improved handling of missing arguments. Printed output now respects options('width') o linearKinhom, linearKcross.inhom, linearKdot.inhom, linearpcfinhom, linearpcfcross.inhom, linearpcfdot.inhom If the intensity argument lambda, lambdaI, lambdaJ, lambdadot is a fitted point process model, the model is first updated by re-fitting it to the data, before computing the fitted intensity. o solutionset The expression will be evaluated using pixel arithmetic (Math.im) if it cannot be evaluated using eval.im. o to.grey Now uses better weights for the R, G, B channels. o rVarGamma Accelerated. o summary.mppm, print.mppm These functions now respect options('width') and spatstat.options('terse'). o print.quadrattest Now respects options('width') and spatstat.options('terse'). o print.pp3 Now respects options('width') o print.lpp Now respects options('width') and options('digits'). o print.owin, print.im, print.summary.owin, print.summary.im Now respect options('width'). o nnmean Now yields a vector, instead of a 1-column matrix, when there is only a single column of marks. o pairdist.psp, crossdist.psp, nndist.psp The option 'method="Fortran"' is no longer supported. The default is 'method="C"'. o [.hyperframe: When a row of data is extracted with drop=TRUE, the result belongs to class 'anylist'. o installation of spatstat A Fortran compiler is no longer needed to compile spatstat from source. o hyperframe class The internal structure of hyperframes has changed slightly: columns of objects are now stored and returned as lists of class 'anylist' or 'solist'. There should be no change in behaviour. o datasets Internal format of the datasets bdspots, bei, clmfires, demohyper, flu, gorillas, heather, Kovesi, murchison, osteo, pyramidal, waterstriders has changed slightly to use the classes 'anylist' and 'solist'. There should be no change in behaviour. o K3est New argument 'ratio'. o spatstat.options New option 'par.points3d' sets default arguments for plot.pp3. o diagnose.ppm New arguments 'xlab', 'ylab', 'rlab' determine the labels in the 4-panel plot, and new argument 'outer' controls their position. The confusing default value for 'compute.sd' has been changed. o iplot.layered New argument 'visible' controls which layers are initially visible. o plot.lpp New argument 'show.window' controls whether to plot the containing window. o textstring Any number of spatial locations (x,y) can be specified, with a corresponding vector of text strings. o plot.hyperframe New argument 'mar' o plot.linnet New argument 'do.plot' o summary.hyperframe Improved output. o eval.linim Improved scoping rules. o pixellate.owin Accelerated. o summary.linnet Now prints more information, and respects options('digits'). o rmpoispp, rmpoint The vector of possible types of points will default to the 'names' vector of the argument 'lambda', 'n', or 'f' where appropriate. o rpoislpp Argument 'L' can be omitted when lambda is a 'linim' or 'linfun' o simulate.ppm, simulate.kppm, simulate.lppm, simulate.slrm New argument 'drop': if nsim = 1 and drop=TRUE, the result is a point pattern rather than a list containing one point pattern. o runifdisc, runifpoint, rpoint, rpoispp, rmpoint, rmpoispp, rMaternI, rMaternII, rSSI, rPoissonCluster, rGaussPoisson, rstrat, rsyst, rcell, rthin, rNeymanScott, rMatClust, rThomas, rCauchy, rVarGamma New argument 'drop' o spatstat.options New option 'units.paren' controls the type of parenthesis enclosing the explanatory text about the unit of length, in print.ppm, plot.fv, etc. o closepairs, crosspairs New option: what="ijd" returns only the indices i, j and the distance d o rCauchy, rMatClust, rNeymanScott, rPoissonCluster, rThomas, rVarGamma Argument names have changed. BUG FIXES o sumouter A segmentation fault could occur if any data were NA. Fixed. o simulate.kppm Simulation failed for log-Gaussian Cox processes (in simulate.kppm only) with an error message from the RandomFields package. Fixed. o ppm, predict.ppm, profilepl Crashed sometimes with message "interaction evaluator did not return a matrix". Fixed. o lppm step() did not work correctly on 'lppm' objects. Fixed. o quadscheme If quadscheme() was called explicitly, with the stipulated number of tiles exceeding the number of dummy points given, then the quadrature weights were sometimes vastly inflated - total quadrature weight was much larger than window area. Spotted by Ian Renner. Fixed. o predict.rhohat Result was incorrect for data on a non-rectangular window (and a warning was issued about incorrect vector length). Fixed. o Math.im Unary operators did not work (e.g."-x") Fixed. o density.ppp Crashed when at="points" if the dataset had exactly 1 point. Fixed. o rSSI Crashed if nsim > 1. Fixed. o influence.ppm, leverage.ppm, dfbetas.ppm Crashed or issued a warning if any quadrature points had conditional intensity zero under the model (negative infinite values of the sufficient statistic). Fixed. o clickppp, clickpoly Did not work correctly in the RStudio display device. Fixed. o Iest Ignored the arguments 'r' and 'eps'. Fixed. o markvario Result was garbled, when X had more than one column of marks. Fixed. o rMatClust, rVarGamma, rCauchy, rNeymanScott Result was a list, but not a 'solist', when nsim > 1. Fixed. o print.mppm, summary.mppm, subfits Crashed if a Poisson interaction was implied but not given explicitly. Fixed. o Kest Crashed if ratio=TRUE and the window was a rectangle. Fixed. o anova.ppm Crashed sometimes with message 'models were not all fitted to the same size of dataset'. (This occurred if there were quadrature points with conditional intensity equal to zero in some models but not in all models.) Fixed. o vcov.kppm Occasionally ran out of memory. Fixed. o as.linim.linfun Erroneously converted the pixel values to numeric values. Fixed. o as.owin.layered Ignored layers with zero area. Fixed. o plot.ppm Paused the plot between frames even when there was only one frame. Fixed. o plot.layered Did not allocate space for legends of 'lpp' objects. Fixed. o plot.lpp Ignored symbolmap arguments like 'cex' and confused the arguments 'col' and 'cols'. Fixed. o plot.diagppm Ignored add=TRUE in some cases. Fixed. o iplot.layered Did not handle 'diagramobj' objects correctly. Fixed. o plot.yardstick Changed arguments. CHANGES IN spatstat VERSION 1.40-0 OVERVIEW o We thank Markus Herrmann, Peter Kovesi, Andrew Lister, Enrique Miranda, Tuomas Rajala, Brian Ripley, Dominic Schuhmacher and Maxime Woringer for contributions. o Important bug fixes. o Mathematical operators now apply to images. o Parametric estimates of relative risk from fitted point process models. o Standard errors for relative risk (parametric and non-parametric). o Kernel smoothing and rose diagrams for angular data. o Perceptually uniform colour maps. o Hierarchical interactions for multitype patterns. o Hard core parameters in all interactions no longer need to be specified and will be estimated from data. o Improvements to analysis of deviance and model selection. o New datasets. o New vignette, summarising all datasets installed with spatstat. o Tests and diagnostics now include a Monte Carlo option. o Faster checking of large datasets. o Faster simulations. o Code for drawing diagrams (arrows, scale bars). o Version nickname: 'Do The Maths' NEW DATASETS o bdspots Breakdown spots on microelectronic capacitor electrodes. Generously contributed by Prof Enrique Miranda. o Kovesi Colour maps with perceptually uniform contrast. Generously contributed by Peter Kovesi. NEW FUNCTIONS o Mathematical operations are now supported for images. For example: alpha <- atan(bei.extra$grad) * 180/pi See help(Math.im) o relrisk.ppm Spatially-varying probabilities of different types of points predicted by a fitted point process model. o circdensity Kernel density estimate for angular data o rose Rose diagram (rose of directions) for angular data o nnorient Nearest neighbour orientation distribution. o AIC.ppm Calculate AIC of a Gibbs model using Takeuchi's rule. o interp.colours Interpolate a sequence of colour values. o anyDuplicated.ppp, anyDuplicated.ppx Fast replacements for any(duplicated(x)) for point patterns. o textstring, onearrow, yardstick Objects representing a text string, an arrow, or a scale bar, for use in drawing spatial diagrams. o plot.imlist, image.imlist, contour.imlist Methods for the new class 'imlist' o [<-.layered, [[<-.layered More support for class 'layered' SIGNIFICANT USER-VISIBLE CHANGES o (vignettes) New vignette 'datasets' summarises all the datasets installed with the spatstat package. o relrisk The function relrisk is now generic, with methods for ppp and ppm. New argument 'relative' specifies whether to calculate the relative risk or the absolute probability of each type of point. New argument 'se' specifies whether to calculate standard errors. o plot.im The default colour map for plotting images, specified by spatstat.options('image.colfun'), has been changed to a perceptually uniform map. o DiggleGratton, Fiksel, MultiHard, MultiStraussHard The hard core distance parameters in these models can now be omitted by the user, and will be estimated automatically from data (by the 'self-starting' feature of interactions). This was already true of Hardcore and StraussHard. o Hybrid Hybrid models now apply the 'self-starting' feature to each component model. o anova.ppm Can now reconcile models fitted using different dummy points, different values of 'rbord', different values of 'use.gam', etc. o profilepl New argument 'aic' makes it possible to optimise the parameters by minimising AIC. o profilepl No longer requires values for parameters which are 'optional' (such as the hard core distance). o rmh, simulate.ppm, rmh.ppm, rmh.default The Metropolis-Hastings algorithm now starts by deleting any points in the initial state that are 'illegal' (i.e. whose conditional intensity is equal to zero). This ensures that the result of rmh never contains illegal points. o runifpoint, rpoispp, rStrauss, rHardcore, rStraussHard, rDiggleGratton, rDGS, runifdisc, rpoint, rMaternI, rMaternII, rSSI, rPoissonCluster, rGaussPoisson, rstrat, rsyst, rcell, rthin, rjitter, rNeymanScott, rMatClust, rThomas, rCauchy, rVarGamma, rmpoint, rmpoispp, runifpointOnLines, rpoisppOnLines, runiflpp, rpoislpp, runifpointx, rpoisppx, runifpoint3, rpoispp3 These random point pattern generators now have an argument 'nsim' specifying the number of simulated realisations to be generated. o pairorient New argument 'cumulative'. New algorithm to compute kernel estimate of probability density. Default behaviour changed. Argument 'units' has been renamed 'unit' for consistency. Labels and descriptions of columns have been corrected. o predict.ppm New syntax (backward-compatible). New argument 'se' replaces option 'type="se"'. Old argument 'total' is deprecated: use 'window' and set 'type="count"'. o cdf.test The methods for class 'ppm' and 'lppm' now handle Gibbs models and perform a Monte Carlo test in this case. o lurking, diagnose.ppm Lurking variable plot can now include simulation envelopes. o rmh.ppm New argument 'w' determines the window in which the simulated pattern is generated. o ppp Accelerated. o Gcom, Gres When conditional=TRUE and restrict=TRUE, the Hanisch estimate was not calculated exactly as described in Appendix E.1 of Baddeley, Rubak and Moller (2011). The intensity was estimated on the full window rather than the eroded window. Fixed. o step, drop1, add1, extractAIC The AIC of a Gibbs model is now calculated using Takeuchi's rule for the degrees of freedom. o model.matrix.ppm, model.matrix.kppm New argument 'Q' allows prediction at any desired locations. o vcov.ppm New argument 'fine' gives more control over computation. o predict.ppm For multitype models, when the result is a list of images, the names of list entries are now identical to the mark levels (e.g. "hickory" instead of "markhickory") o print.slrm Output now respects options('width') o image.listof New argument 'ribmar' controls margin space around the ribbon when equal.ribbon=TRUE. o integral.im New argument 'domain' specifies the domain of integration. o plot.fasp New argument 'transpose' allows rows and columns to be exchanged. o plot.im The list 'ribargs' can now include the parameter 'labels'. o rmh, rpoint, rpoispp, rmpoint, rmpoispp Accelerated, for inhomogeneous processes. o stienen Now recognises the parameter 'lwd'. o suffstat Accelerated (also affects ppm with method='ho'). o Poisson, AreaInter, BadGey, Concom, DiggleGatesStibbard, DiggleGratton, Fiksel, Geyer, Hardcore, Hybrid, LennardJones, MultiHard, MultiStrauss, MultiStraussHard, OrdThresh, Ord, PairPiece, Pairwise, SatPiece, Saturated, Softcore, Strauss, StraussHard, Triplets These functions can now be printed (by typing the function name) to give a sensible description of the required syntax. o fitin A plot of the fitted interpoint interaction of a point process model e.g. plot(fitin(ppm(swedishpines ~ 1, Strauss(9)))) now shows the unit of length on the x-axis. o fitin Plots of the fitted interpoint interaction are now possible for some higher-order interactions such as Geyer and AreaInter. o anova.ppm New argument 'warn' to suppress warnings. o rmhmodel.ppm Argument 'win' renamed 'w' for consistency with other functions. o print.ppm Printed output for the fitted regular parameters now respects options('digits'). o print.ppm, print.summary.ppm Output now respects options('width') and spatstat.options('terse') o print.ppm By default, standard errors are not printed for a model fitted with method="logi" (due to computational load) o plot.profilepl Now recognises 'lty', 'lwd', 'col' etc o vesicles, gorillas Some of the raw data files for these datasets are also installed in spatstat for demonstration and training purposes. BUG FIXES o rmh, rmh.ppm, rmh.default, simulate.ppm The result of simulating a model with a hard core did not necessarily respect the hard core constraint, and simulation of a model with strong inhibition did not necessarily converge. This only happened if the first order trend was large, the starting state (n.start or x.start) was not given, and the number of iterations (nrep) was not very large. It occurred because of a poor choice for the default starting state. Bug was present since about 2010. Fixed. o markcorrint Results were completely incorrect. Bug introduced in spatstat 1.39-0, october 2014. Fixed. o Kinhom Ignored argument 'reciplambda2' in some cases. Bug introduced in spatstat 1.39-0, october 2014. Fixed. o relrisk When at="pixels", a small fraction of pixel values were sometimes wildly inaccurate, due to numerical errors. This affected the range of values in the result, and therefore the appearance of plots. Fixed. o model.images Crashed if the model was multitype. Fixed. o profilepl Crashed in some cases when the interaction was multitype. [Spotted by Andrew Lister.] Fixed. o profilepl Crashed if the model involved covariates that were not given in a 'data' argument. Fixed. o envelope.ppm Crashed if global=TRUE and savefuns=TRUE. Fixed. o setminus.owin Crashed if the result was empty and the input was polygonal. Fixed. o predict.ppm Crashed sometimes when type="cif" and ngrid was very large. Fixed. o pixelquad If X was a multitype point pattern, the result was mangled. Fixed. o relrisk Did not accept a character string value for the argument 'case'. Fixed. o intensity.ppm Format of result was incorrect for ppm(Y ~ 1) where Y is multitype. Fixed. o $<-.hyperframe Columns containing character values were converted to factors. Fixed. o clickppp Sometimes filled the window in solid black colour.. Fixed. o plot.psp Ignored 'show.all' in some cases. Fixed. o plot.ppp Warned about NA values amongst the marks, even if there were no NA's in the column(s) of marks selected by the argument 'which.marks'. Fixed. o stienen Did not suppress the border circles when border=FALSE. Fixed. CHANGES IN spatstat VERSION 1.39-1 OVERVIEW o Urgent bug fix. o We thank Suman Rakshit and Brian Ripley for contributions. BUG FIXES o bdry.mask, convexhull In R-devel only, these functions could return an empty window, causing errors in other packages. [Spotted by Brian Ripley.] Fixed. o project2segment An error occurred if any line segments had length zero. [Spotted by Suman Rakshit.] Fixed. CHANGES IN spatstat VERSION 1.39-0 OVERVIEW o We thank Shane Frank, Shaaban Ghalandarayeshi, Ute Hahn, Mahdieh Khanmohammadi, Nicoletta Nava, Jens Randel Nyengaard, Sebastian Schutte, Rasmus Waagepetersen and Carl G. Witthoft for contributions. o ANOVA extended to Gibbs models. o Improved algorithm for locally-scaled K-function. o Leave-one-out calculation of fitted values in ppm objects. o New dataset: presynaptic vesicles. o Geometrical operations with windows and images. o More edge corrections for K-function. o Improved handling and plotting of 'fv' objects. o Utilities for perspective views of surfaces. o New classes 'anylist', 'solist' will ultimately replace 'listof'. o Bug fixes. o Version nickname: 'Smoke and Mirrors' NEW DATASETS o vesicles Synaptic vesicles (includes raw data files for training purposes) NEW CLASSES o anylist List of objects. (A replacement for 'listof') o solist List of two-dimensional spatial objects. (A replacement for some uses of 'listof') NEW FUNCTIONS o perspPoints, perspLines, perspSegments, perspContour Draw points and lines on a surface, as seen in perspective view. o hopskel.test Hopkins-Skellam test of CSR o project2set For each data point in a point pattern X, find the nearest spatial location in a given window W. o stienen, stienenset Stienen diagrams o dirichlet.vertices Vertices of the Dirichlet tessellation o discs Union of discs. Given a point pattern dataset recording the locations and diameters of objects, find the region covered by the objects. o increment.fv Increments of a summary function: g(x) = f(x+h)-f(x-h). o rotmean Rotational average of pixel values in an image o fardist Distance from each pixel/data point to farthest part of window boundary o circumradius.owin Circumradius of a window o rmax.Trans, rmax.Ripley Compute the maximum distance 'r' for which the translation edge correction and isotropic edge correction are valid. o is.grey Determines whether a colour value is a shade of grey. o harmonise Convert several objects of the same class to a common format. (New generic function with existing methods harmonise.im and harmonise.fv) o area New generic function, with methods for 'owin' and 'default'. o Fhazard Hazard rate of empty space function o anylist, as.anylist, [.anylist, [<-.anylist, print.anylist, summary.anylist Support for new class 'anylist' o solist, as.solist, [.solist, [<-.solist, print.solist, summary.solist Support for new class 'solist' o plot.anylist, plot.solist Plot methods for the classes 'anylist' and 'solist' (Currently identical to 'plot.listof') SIGNIFICANT USER-VISIBLE CHANGES o anova.ppm Now applies to Gibbs models as well as Poisson models, using adjusted composite likelihood ratio test statistic. o persp.im If visible=TRUE, the algorithm will also calculate which pixels of x are visible in the perspective view. This is useful for drawing points or lines on a perspective surface. o Kscaled Improved algorithm [thanks to Ute Hahn.] New arguments 'renormalise' and 'normpower' allow renormalisation of intensity, similar to Kinhom. o Kest New option: correction="rigid" computes the rigid motion correction. o pairwise interactions Fitted parameters and other calculations for pairwise interaction models DiggleGatesStibbard, DiggleGratton, Fiksel, Geyer, Strauss may change slightly due to a change in handling numerical rounding effects. o eval.fv Functions no longer need to have exactly the same sequence of 'r' values. They will now be made compatible using 'harmonise.fv'. o fitted.ppm New argument 'leaveoneout' allows leave-one-out calculation of fitted intensity at original data points. o Kinhom, Linhom New argument 'leaveoneout' specifies whether the leave-one-out rule should be applied when calculating the fitted intensities. o crosspaircounts Results may change slightly due to a change in handling numerical rounding effects. o Fest, Gest New argument 'domain' supports bootstrap methods. o plot.fv New argument 'mathfont' determines the font (e.g. plain, italic, bold) for mathematical expressions on the axes and in the legend. Defaults to italic. o scanpp Upgraded to handle multiple columns of mark data. o circumradius The function 'circumradius' is now generic, with methods for the classes 'owin' and 'linnet'. o edge.Trans New argument 'give.rmax' o fvnames, plot.fv The symbol '.a' is now recognised. It stands for 'all function values'. o as.function.fv Argument 'extrapolate' can have length 1 or 2. o varblock New argument 'confidence' determines the confidence level. o $<-.fv This can now be used to add an extra column to an 'fv' object (previously it refused). o minnndist, maxnndist New argument 'positive'. If TRUE, coincident points are ignored: the nearest-neighbour distance of a point is the distance to the nearest point that does not coincide with the current point. o plot.fv Improved handling of 'shade' argument. o Kmeasure Now passes '...' arguments to as.mask() o Ksector Now allows start < 0. New arguments 'units' and 'domain'. o pairorient New arguments 'units' and 'domain'. o eroded.areas New argument 'subset' o disc New argument 'delta' o plot.plotppm New argument 'pppargs' o harmonise.fv, harmonise.im These are now methods for the new generic 'harmonise' o Fest, Gest These functions now also compute the theoretical value of hazard for a Poisson process, if correction = "km". o with.fv Improved mathematical labels. o Gfox, Jfox Improved mathematical labels. o area.owin This function is now a method for the new generic 'area' o edges Default for argument 'check' changed to FALSE. BUG FIXES o varblock Calculations were incorrect if more than one column of edge corrections was computed. [Bug introduced in spatstat 1.21-1, november 2010.] Fixed. o varblock Crashed if one of the quadrats contained no data points. Fixed. o lohboot Interval was calculated wrongly when global=TRUE and fun="Lest" or "Linhom". Fixed. o nnmark Crashed when at="points" if there was only a single column of marks. [Spotted by Shane Frank.] Fixed. o plot.msr Some elements of the plot were omitted or cut off. Fixed. o plot.msr Did not work with 'equal.scales=TRUE'. Fixed. o plot.msr, augment.msr Crashed if every data point was duplicated. Fixed. o as.im.owin Crashed if X was a 1 x 1 pixel array. Fixed. o owin Coordinates of polygon data were altered slightly when fix=TRUE. [Spotted by Carl Witthoft.] Fixed. o objects of class 'fv' Assigning a new value to names(x) or colnames(x) or dimnames(x) would cause the internal data format to become corrupted. Fixed. o to.grey, complementarycolour Did not work properly on 'colourmap' objects. Fixed. o Kest Ignored argument 'var.approx' if the window was a rectangle. Fixed. o rmh.ppm, rmhmodel.ppm Ignored the argument 'new.coef'. [Spotted by Sebastian Schutte] Fixed. o as.function.fv The meanings of 'extrapolate=TRUE' and 'extrapolate=FALSE' were swapped. Fixed. o varblock Handled the case 'fun=Lest' incorrectly. Fixed. o [.fv Sometimes garbled the internal data format, causing plot.fv to crash. Fixed. o range.fv Sometimes returned NA even when na.rm=TRUE. Fixed. o Fest Argument 'eps' was not interpreted correctly. Fixed. o plot.fv Argument 'lwd' was not passed to legend() o flipxy.owin Sometimes deleted the name of the unit of length. Fixed. CHANGES IN spatstat VERSION 1.38-1 OVERVIEW o We thank Ute Hahn and Xavier Raynaud for contributions. o Urgent Bug Fixes. o Nickname: 'Le Hardi' NEW FUNCTIONS o "[<-.fv", "$<-.fv" Subset replacement methods for 'fv' objects. SIGNIFICANT USER-VISIBLE CHANGES o clarkevans.test Simulations are now performed with a fixed number of points. o plot.owin, plot.ppp, plot.psp, plot.im The default size of the outer margin of white space has been reduced. o dclf.test Improved information in printed output. BUG FIXES o update.ppm Results were incorrect in several cases. [Spotted by Xavier Raynaud.] Bug introduced in spatstat 1.38-0. Fixed. o Kinhom, Linhom Calculations were incorrect if 'lambda' was a fitted point process model. [Spotted by Xavier Raynaud.] Bug introduced in spatstat 1.38-0. Fixed. o envelope.envelope Ignored the arguments 'global' and 'VARIANCE'. Fixed. o fv objects If 'f' was an object of class 'fv', then an assignment like f$name <- NULL mangled the internal format of the object 'f', leading to errors in print.fv and plot.fv. [Spotted by Ute Hahn.] Fixed. o split.ppp split(X, A) where A is a rectangular tessellation, produced errors if the window of 'A' did not include the window of 'X'. [Spotted by Ute Hahn.] Fixed. o names<-.hyperframe Mangled the internal format. [Spotted by Ute Hahn.] Fixed. o plot.fv y axis label was incorrect in some cases when the 'fv' object had only a single column of function values. [Spotted by Ute Hahn.] Fixed. CHANGES IN spatstat VERSION 1.38-0 OVERVIEW o We thank Malissa Baddeley, Colin Beale, Oscar Garcia, Daniel Esser, David Ford, Eric Gilleland, Andrew Hardegen, Philipp Hunziker, Abdollah Jalilian, Tom Lawrence, Lore De Middeleer, Robin Milne, Mike Porter, Suman Rakshit, Pablo Ramon, Jason Rudokas, Christopher Ryan, Dominic Schuhmacher, Medha Uppala and Rasmus Waagepetersen for contributions. o spatstat now Requires the package 'goftest' and Suggests the package 'Matrix'. o New dataset: 'sporophores' o Palm likelihood method for fitting cluster processes and Cox processes. o Quasi-likelihood and weighted composite likelihood methods for estimating trend in cluster processes and Cox processes. o Further extensions to model formulas in ppm and kppm. o Faster variance calculations for ppm objects. o One-sided tests and one-sided envelopes of summary functions. o Cramer-Von Mises and Anderson-Darling tests of spatial distribution. o Cressie-Read test statistic in quadrat counting tests. o Spatial cumulative distribution functions. o Faster algorithm for point pattern matching. o Improvements to plots. o Increased support for envelopes. o New generic functions 'Window', 'Frame' and 'domain'. o Directional K-function and directional distribution. o Raster calculations accelerated. o Summary functions accelerated. o Many improvements and bug fixes. o Version nickname: 'Wicked Plot' NEW DATASETS o sporophores Spatial pattern of three species of mycorrhizal fungi around a tree. [Contributed by E. David Ford.] NEW FUNCTIONS o improve.kppm Re-estimate the trend in a kppm (cluster or Cox) model using quasi-likelihood or weighted first-order composite likelihood. [Contributed by Abdollah Jalilian and Rasmus Waagepetersen.] o Window, Window<- Generic functions to extract and change the window of a spatial object in two dimensions. Methods for ppp, psp, im, and many others. o Frame, Frame<- Generic functions to extract and change the containing rectangle ('frame') of a spatial object in two dimensions. o domain Generic function to extract the spatial domain of a spatial object in any number of dimensions. o Ksector Directional version of the K-function. o pairorient Point pair orientation distribution. o spatialcdf Compute the spatial cumulative distribution of a spatial covariate, optionally using spatially-varying weights. o cdf.test [Supersedes 'kstest'.] Test of goodness-of-fit of a Poisson point process model. The observed and predicted distributions of the values of a spatial covariate are compared using either the Kolmogorov-Smirnov, Cramer-Von Mises or Anderson-Darling test. o berman.test Replaces 'bermantest'. o harmonise.fv Make several functions compatible. o simulate.lppm Simulate a fitted point process model on a linear network. o subset.ppp, subset.lpp, subset.pp3, subset.ppx Methods for 'subset', for point patterns. o closepairs.pp3, crosspairs.pp3 Low-level functions to find all close pairs of points in three dimensions o volume.linnet Method for the generic 'volume'. Returns the length of the linear network. o padimage Pad the border of a pixel image. o as.layered Convert spatial data to a layered object. o panel.contour, panel.image, panel.histogram Panel functions for 'pairs' plots. o range.fv, min.fv, max.fv Range, minimum and maximum of function values. SIGNIFICANT USER-VISIBLE CHANGES o ppm.formula The left hand side of the formula can now be the name of an object in the list 'data', or an expression involving such objects. o ppm The right hand side of the formula can now include the symbol '.' representing all covariates in the list 'data'. o ppm New argument 'subset' makes it possible to fit the model in a subset of the spatial domain defined by an expression. o kppm New option: method="palm", will fit the model by maximising Palm likelihood. o pppdist Substantially accelerated. New argument 'auction' controls choice of algorithm. o rhohat New arguments 'weights' and 'horvitz' for weighted calculations. o persp.im Surface heights and colours can now be controlled by different images. Option to draw a grey apron around the sides of the perspective plot. Return value has a new attribute 'expand'. o plot.listof New arguments 'halign' and 'valign' give improved control over the alignment of panels. o plot.listof If all components of the list are objects of class 'fv' representing functions, then if equal.scales=TRUE, these functions will all be plotted with the same axes (i.e. the same xlim and the same ylim). o envelope The argument 'transform' is now processed by 'with.fv' giving more options, such as 'transform=expression(. - r)' o envelope, dclf.test, mad.test One-sided tests and one-sided envelopes can now be produced, by specifying the argument 'alternative'. o dclf.test, mad.test A pointwise test at fixed distance 'r' can now be performed by setting rinterval = c(r,r). o envelope New arguments 'fix.n' and 'fix.marks' for envelope.ppp and envelope.ppm make it easy to generate simulated patterns conditional on the total number of points, or on the number of points of each type. o quadrat.test Can now calculate the Cressie-Read test statistic instead of the Pearson X2 statistic. o Kres, Gres, Kcom, Gcom, psst, psstA, psstG New argument 'model' makes it easier to generate simulation envelopes of the residual summary functions. o layered, plot.layered The layer plot arguments can include the argument '.plot' specifying a function to perform the plotting instead of the generic 'plot'. o deriv.fv New arguments make it possible to differentiate a periodic function. o ppm Argument 'data' or 'covariates' can now include entries which are not spatial covariates, provided they do not appear in the model formula. o closepairs, crosspairs These functions are now generic, with methods for 'ppp' and 'pp3' o rLGCP Updated to conform to new usage of RandomFields package. Argument syntax has changed. Now allows control over pixel resolution. o bw.diggle New arguments 'correction' and 'hmax' for controlling the calculation. o predict.lppm New argument 'new.coef' for computing predictions with a different vector of model coefficients. o predict.ppm If 'locations' is a pixel image, its pixels determine the spatial locations for prediction. o cut.ppp Argument 'z' can now be a window. o split.ppp Argument 'f' can now be a window. o print.ppm, summary.ppm, coef.summary.ppm The table of parameter estimates, standard errors and confidence intervals now also includes the value of the (signed square root) Wald test statistic. o plot.im Now automatically detects problems in some Windows graphics displays and tries to avoid them. o plot.im The position of axis tick marks alongside the colour ribbon can now be controlled using the parameter 'at' in the argument 'ribargs'. o plot.ppp Can now plot numeric marks using characters chosen by 'pch' or 'chars' with size determined by mark value. o plot.ppp New argument 'meansize' for controlling mark scale. o hsvim, rgbim New argument 'autoscale' causes automatic scaling of colour channel values. o plot.ppp If type='n', a legend is now displayed when x is a marked point pattern. o whist Accelerated by a factor of 5. o Fest, Jest Accelerated by a factor of 2 to 3. o fryplot Accelerated. Now displays a legend if the point pattern is marked. Now handles numerical marks nicely. New argument 'axes'. o frypoints Accelerated. New arguments 'to', 'from' and 'dmax'. o duplicated.ppp New option: rule = 'unmark' o rjitter Argument 'radius' now has a default. o Smooth.msr New argument 'drop' o LambertW Now handles NA and infinite values. o update.ppm Now handles formulae with a left-hand side. o raster.x, raster.y, raster.xy These functions can now handle images, as well as masks. o Smooth.ppp If the mark values are exactly constant, the resulting smoothed values are now exactly constant. o eval.im, eval.fv, eval.fasp Argument 'envir' can now be a list, instead of an environment. o plot.ppp The printout (of the resulting symbol map object) now displays the numerical value of the mark scale. o with.fv Improved mathematical labels. o plot.fv Improved mathematical labels on x axis. o ppm Improved error messages. o vcov.ppm Computations greatly accelerated for Hybrid interactions and for Area-interaction models. o vcov.kppm Computations greatly accelerated (when fast=TRUE) o interp.im Argument 'x' can now be a point pattern. o pool.envelope Improved handling of text information. o miplot Improved layout. o print.summary.ppp Improved layout. Now respects spatstat.options('terse') o print.profilepl Improved layout. Now respects spatstat.options('terse') o anova.ppm Now respects spatstat.options('terse') o print.fv, print.envelope Now respect spatstat.options('terse') and options('width') o summary.envelope Now respects options('width') o kstest, bermantest These functions will soon be Deprecated. They are retained only for backward compatibility. BUG FIXES o vcov.ppm Sometimes gave wrong answers for Poisson models fitted by method='logi'. Fixed. o unnormdensity If weights were missing, the density was normalised, contrary to the documentation. Fixed. o logLik.ppm, anova.ppm, AIC For models fitted by 'ippm', the number of degrees of freedom was incorrect. Fixed. o im.apply Pixels outside the window were not assigned the value NA as they should. Fixed. o pixellate.owin Crashed, unpredictably, if the pixel raster had unequal numbers of rows and columns. [Spotted by Rasmus Waagepetersen.] Fixed. o vcov.ppm Crashed for pairwise interaction models fitted by method="logi". Fixed. o predict.ppm Crashed for models fitted by method="logi" if the model included external covariates. Fixed. o predict.ppm Crashed if the argument 'covariates' or 'data' in the original call to 'ppm' included entries that were not spatial covariates. [These entries were ignored by ppm but caused predict.ppm to crash.] Fixed. o simulate.kppm, rNeymanScott, rThomas, rMatClust Crashed randomly when simulating an inhomogeneous model. [Spotted by Philipp Hunziker.] Fixed. o bw.diggle In some extreme cases, generated an error message about `NaN values in Foreign function call.' [Spotted by Colin Beale.] Fixed. o textureplot Crashed if 'spacing' was too large. Fixed. o superimpose.psp Crashed if the result was empty. Fixed. o istat Crashed with an error message about 'vars'. Fixed. o dirichlet, delaunay, delaunay.distance Crashed in rare cases due to a problem in package 'deldir'. [Spotted by Pierre Legendre.] Fixed. o rgbim, hsvim Crashed if any argument was constant. Fixed. o scaletointerval Crashed if x was constant. Fixed. o linnet, [.linnet Crashed if the result contained only a single vertex. [Spotted by Daniel Esser.] Fixed. o plot.fv If some of the function values were NA, they were replaced by fictitious values (by linearly interpolating). Fixed. o crossdist.ppp Ignored argument 'squared' if periodic=FALSE. [Spotted by Mike Porter.] Fixed. o marks<-.ppp Ignored argument 'drop'. [Spotted by Oscar Garcia.] Fixed. o update.ppm Sometimes did not respect the argument 'use.internal'. Fixed. o plot.rhohat Did not respect the argument 'limitsonly'. Fixed. o contour.im Argument 'axes' defaulted to TRUE, but FALSE was intended. Fixed. o print.hyperframe, as.data.frame.hyperframe Column names were mangled if the hyperframe had a single row. Fixed. o as.psp.data.frame Generated a warning about partially-matched names in a data frame. [Spotted by Eric Gilleland.] Fixed. o plot.leverage.ppm Generated a warning from 'contour.default' if the leverage function was constant. Fixed. o plot.diagppm Issued warnings about unrecognised graphics parameters. Fixed. o update.symbolmap Discarded information about the range of input values. Fixed. o plot.fv Label for y axis was garbled, if argument 'shade' was given. Fixed. o plot.ppp The legend was sometimes plotted when it should not have been (e.g. when add=TRUE). Fixed. o plot.listof, plot.im In an array of plots, containing both images and other spatial objects, the titles of the panels were not correctly aligned. Fixed. o plot.tess, plot.quadratcount Ignored arguments like 'cex.main'. Fixed. o iplot Navigation buttons (Left, Right, Up, Down, Zoom In, Zoom Out) did not immediately refresh the plot. Fixed. o iplot.layered Reported an error 'invalid argument type' if all layers were deselected. Fixed. CHANGES IN spatstat VERSION 1.37-0 OVERVIEW o Ege Rubak is now a joint author of spatstat. o We thank Peter Forbes, Tom Lawrence and Mikko Vihtakari for contributions. o Spatstat now exceeds 100,000 lines of code. o New syntax for point process models (ppm, kppm, lppm) equivalent to syntax of lm, glm, ... o Covariates in ppm and kppm can now be tessellations. o Confidence intervals and prediction intervals for fitted models. o Quasirandom point patterns and sequences. o Plots using texture fill. o Support for mappings from data to graphical symbols and textures. o Automatic re-fitting of model in Ginhom, Kinhom, Finhom, Jinhom. o Support for Mixed Poisson distribution. o Interpretation of mark scale parameters has changed in plot.ppp o Syntax of multitype interactions (eg MultiStrauss) has changed. o Bug fix in Metropolis-Hastings simulation of 'StraussHard' models o Changed default behaviour of perfect simulation algorithms. o Improvements to layout of text output. o Version nickname: 'Model Prisoner' NEW CLASSES o symbolmap An object of class 'symbolmap' represents a mapping from data to graphical symbols o texturemap An object of class 'texturemap' represents a mapping from data to graphical textures. NEW FUNCTIONS o split.hyperframe, split<-.hyperframe methods for split and split<- for hyperframes. o dmixpois, pmixpois, qmixpois, rmixpois (log-)normal mixture of Poisson distributions. o vdCorput, Halton, Hammersley, rQuasi quasirandom sequences and quasirandom point patterns. o Smoothfun create a function(x,y) equivalent to the result of Smooth.ppp o minnndist, maxnndist Faster ways to compute min(nndist(X)), max(nndist(X)) o add.texture Draw a simple texture inside a specified region. o textureplot Display a factor-valued pixel image using texture fill. o texturemap Create a texture map o plot.texturemap Plot a texture map in the style of a legend o symbolmap Create a symbol map o update.symbolmap Modify a symbol map o invoke.symbolmap Apply symbol map to data values, and plot them o plot.symbolmap Plot the symbol map in the style of a legend o as.owin.boxx Converts a 'boxx' to an 'owin' if possible. o ellipse Create an elliptical window. o clickbox Interactively specify a rectangle, by point-and-click on a graphics device. o complementarycolour Compute the complementary colour value of a given colour value, or the complementary colour map of a given colour map. o gauss.hermite Gauss-Hermite quadrature approximation to the expectation of any function of a normally-distributed random variable. o boundingbox Generic function, replaces bounding.box o edges Extract boundary edges of a window. Replaces and extends 'as.psp.owin' o pixelcentres Extract centres of pixels as a point pattern. SIGNIFICANT USER-VISIBLE CHANGES o ppm, kppm, lppm NEW SYNTAX FOR POINT PROCESS MODELS The model-fitting functions 'ppm', 'kppm' and 'lppm' now accept a syntax similar to 'lm' or 'glm', for example ppm(X ~ Z), but still accept the older syntax ppm(X, ~Z). To support both kinds of syntax, the functions 'ppm' and 'kppm' are now generic, with methods for the classes 'formula', 'ppp' and 'quad'. The 'formula' method handles a syntax like ppm(X ~ Z) while the 'ppp' method handles the old syntax ppm(X, ~Z). Similarly 'lppm' is generic with methods for 'formula' and 'lpp'. o ppm, kppm, lppm Covariates appearing in the model formula can be objects which exist in the R session, instead of always having to be elements of the list `covariates'. o ppm.formula, kppm.formula, lppm.formula Formulae involving polynom() are now expanded, symbolically, so that polynom(x, 3) becomes x + I(x^2) + I(x^3) and polynom(x,y,2) becomes x + y + I(x^2) + I(x*y) + I(y^2). This neatens the model output, and also makes it possible for anova() and step() to add or delete single terms in the polynomial. o predict.ppm New argument 'interval' allows confidence intervals or prediction intervals to be calculated. o predict.ppm New argument 'total' allows for prediction of the total number of points in a specified region. o plot.ppp, plot.lpp For marked point patterns, a legend is automatically added to the plot, by default. Arguments have changed: new arguments include parameters of the legend, and an optional symbol map. Result has changed: it is now an object of class 'symbolmap'. o plot.ppp, plot.lpp Interpretation of the parameters 'markscale' and 'maxsize' has changed. The size of a circle in the plot is now defined as the circle's diameter instead of its radius. (Size of a square is measured, as before, by its side length). o parres Now handles the case where the fitted model is not separable but its restriction to the given 'subregion' is separable. o envelope Now issues a warning if the usage of envelope() appears to be `invalid' in the sense that the simulated patterns and the data pattern have not been treated equally. o Kinhom, Finhom, Ginhom, Jinhom New argument 'update'. If 'lambda' is a fitted model (class ppm or kppm) and update=TRUE, the model is re-fitted to the data pattern, before the intensities are computed. o rDiggleGratton, rDGS, rHardcore, rStrauss, rStraussHard By default the point pattern is now generated on a larger window, and trimmed to the original window. New argument expand=TRUE. o MultiStrauss, MultiHard, MultiStraussHard The syntax of these functions has changed. The new code should still accept the old syntax. o rhohat rhohat.ppp and rhohat.quad have new argument 'baseline' o ippm Algorithm improved. Argument syntax changed. o default.dummy, quadscheme Dummy points can now be generated by a quasirandom sequence. o plot.owin The window can now be filled with one of 8 different textures. Arguments changed. o ppm, kppm Covariates in the model can now be tessellations. o [.im New argument 'tight' allows the resulting image to be trimmed to the smallest possible rectangle. o [.psp, rlinegrid, rpoisline These functions now handle binary mask windows. o rotate The user can specify the centre of rotation. o rescale rescale() and all its methods now have argument 'unitname' which can be used to change the name of the unit of length. o anova.ppm Output format has been improved. Number of columns of result has changed. o print.ppp, print.summary.ppp, print.owin, print.summary.owin, print.im, print.summary.im, print.fv, print.msr, print.profilepl These functions now avoid over-running the text margin (i.e. they respect options('width') where possible). o layerplotargs<- Now handles any spatial object, converting it to a 'layered' object. o effectfun Improved display in case se.fit=TRUE. o scaletointerval New argument 'xrange' o contour.im New argument 'show.all'. Default value of 'axes' changed to FALSE. o identify.ppp Now handles multivariate marks. o plot.listof Improved layout. New arguments 'hsep', 'vsep'. Argument 'mar.panel' may have length 1, 2 or 4. o plot.splitppp This function is no longer identical to plot.listof. Instead it is a much simpler function which just calls plot.listof with equal.scales=TRUE. o anova.ppm Output is neater. o plot.layered New argument 'do.plot' o plot.psp New argument 'do.plot' o as.psp.owin New argument 'window' o plot.im, contour.im, textureplot New argument 'clipwin' o plot.ppp New argument 'clipwin' o plot.msr New argument 'how' allows density to be plotted as image and/or contour o diagnose.ppm, plot.diagppm More options for 'plot.neg' o plot.leverage.ppm, plot.influence.ppm, plot.msr Argument 'clipwin' can now be used to restrict the display to a subset of the full data. o [.hyperframe, [<-.hyperframe, $.hyperframe, $<-.hyperframe These functions are now documented. o leverage.ppm, influence.ppm, dfbetas.ppm Resulting objects are now smaller (in memory size). o print.ppm Now indicates whether the irregular parameters 'covfunargs' were optimised (by profilepl or ippm) or whether they were simply provided by the user. o plot.ppp A point pattern with numerical marks can now be plotted as filled dots with colours determined by the marks, by setting pch=21 and bg= o colourmap Now handles dates and date-time values (of class 'Date' or 'POSIXt'). o plot.ppp, print.ppp, summary.ppp Improved handling of dates and date-time values (of class 'Date' or 'POSIXt') in the marks of a point pattern. o cut.im Now refuses to handle images whose pixel values are factor, logical or character. o centroid.owin New argument 'as.ppp' o superimpose Improved default names for columns of marks. o Softcore() Improved printout. o kppm, lgcp.estpcf, lgcp.estK Adjusted to new structure of RandomFields package. No change in syntax. o data(murchison) This dataset now belongs to class 'listof' so that it can be plotted directly. o data(clmfires) The format of the covariate data has changed. The objects 'clmcov100' and 'clmcov200' are now elements of a list 'clmfires.extra'. o bounding.box This function is now Deprecated; it has been replaced by the generic boundingbox(). o as.psp.owin This function is now Deprecated; it has been replaced and extended by the function edges(). o plot.kstest Changed defaults so that the two curves are distinguishable. o with.fv Improved mathematical labels. BUG FIXES o intensity.quadratcount Values were incorrect for a rectangular tessellation (the matrix of intensities was transposed). Fixed. o rmh, simulate.ppm Simulation of the Strauss-hard core model (StraussHard) was incorrect (intensity of the simulated process was about 15% too low). Bug introduced in spatstat 1.31-0 (January 2013). o intensity.quadratcount Crashed for a rectangular tessellation with only a single row or column. Fixed. o model.images.ppm Crashed sometimes if the argument W was given. Fixed. o eval.im Crashed when applied to images with only a single row or column. Fixed. o ppp, marks<-.ppp If the marks were a vector of dates, they were erroneously converted to numbers. Fixed. o ippm Crashed if the model formula included an offset term that was not a function. Fixed. o leverage.ppm Crashed sometimes when the model had irregular parameters ('covfunargs'). Fixed. o residuals.ppm Crashed sometimes when type='score'. Fixed. o scaletointerval Did not handle dates and date-time values correctly. Fixed. o rbind.hyperframe, as.list.hyperframe Gave incorrect results for hyperframes with 1 row. Fixed. o Kinhom Did not renormalise the result (even when renormalise=TRUE), in some cases. Spotted by Peter Forbes. Fixed. o disc If mask=TRUE the disc was erroneously clipped to the square [-1,1] x [-1,1]. Fixed. o plot.fv Sometimes shaded the wrong half of the graph when the 'shade' coordinates were infinite. Fixed. o print.ppm Gave an error message if the coefficient vector had length zero. Fixed. o vcov.ppm Gave an error message if the coefficient vector had length zero. Fixed. o plot.distfun, as.im.distfun These functions effectively ignored the argument 'invert' in the original call to distfun. Fixed. o plot.msr Ignored certain additional arguments such as 'pch'. Fixed. o cut.im Crashed if the image had 1 row or 1 column of pixels. Fixed. o iplot.ppp Crashed with message about missing object 'vals'. Fixed. o effectfun Demanded a value for every covariate supplied in the original call to ppm, even for covariates which were not used in the model. Fixed. o plot.listof, plot.hyperframe When plotting 3D point patterns (class pp3), these functions issued warnings about 'add' being an unrecognised graphics argument. Fixed. CHANGES IN spatstat VERSION 1.36-0 OVERVIEW o We thank Sebastian Meyer, Kevin Ummer, Jean-Francois Coeurjolly, Ege Rubak, Rasmus Waagepetersen, Oscar Garcia and Sourav Das for contributions. o Important change to package dependencies. o Geometrical inconsistencies in polygons are now repaired automatically. o Improved quadrature schemes and reduced bias in ppm. o New vignette 'Summary of Recent Changes to Spatstat'. o Approximation to K function and pcf for Gibbs models. o Counterpart of 'apply' for lists of images. o Hexagonal grids and tessellations. o Extensions to scan test and Allard-Fraley cluster set estimator. o Change the parameters of a fitted model before simulating it. o Accelerated Kest, Kinhom for rectangular windows. o Extensions and improvements to plotting functions. o Improvements to labelling of 'fv' objects. o New demo of summary functions. o More methods for 'intensity'. o Version nickname: 'Intense Scrutiny' NEW FUNCTIONS o Kmodel.ppm, pcfmodel.ppm Compute approximation to K-function or pair correlation function of a Gibbs point process model. o im.apply Apply a function to corresponding pixel values in several images. o hexgrid, hextess Create a hexagonal grid of points, or a tessellation of hexagonal tiles o shift.tess, rotate.tess, reflect.tess, scalardilate.tess, affine.tess Apply a geometrical transformation to a tessellation. o quantile.ewcdf Extract quantiles from a weighted cumulative distribution function. o scanLRTS Evaluate the spatially-varying test statistic for the scan test. o pcfmulti General multitype pair correlation function o intensity.splitppp Estimate intensity in each component of a split point pattern. o intensity.quadratcount Use quadrat counts to estimate intensity in each quadrat. o as.owin.quadratcount, as.owin.quadrattest Extract the spatial window in which quadrat counts were performed. o reload.or.compute Utility function for R scripts: either reload results from file, or compute them. o to.grey Convert colour to greyscale. o Smooth.im Method for Smooth() for pixel images. Currently identical to blur(). o demo(sumfun) Demonstration of nonparametric summary functions in spatstat. SIGNIFICANT USER-VISIBLE CHANGES o Package Dependencies spatstat now "Imports" (rather than "Depends" on) the libraries mgcv, deldir, abind, tensor, polyclip. This means that these libraries are not accessible to the user unless the user explicitly loads them by typing 'library(mgcv)' and so on. o owin, as.owin Polygon data are no longer subjected to strict checks on geometrical validity (self-crossing points, overlaps etc.) Instead, polygon geometry is automatically repaired. o ppm The default quadrature scheme for a point pattern has been improved (in the case of a non-rectangular window) to remove a possible source of bias. o Performance various parts of spatstat now run slightly faster. o scan.test Now handles multiple values of circle radius 'r'. o plot.scan.test, as.im.scan.test These functions can now give the optimal value of circle radius 'r'. o pcfcross, pcfdot Algorithms have been reimplemented using a single-pass kernel smoother and now run much faster. Bandwidth selection rule improved. o plot.listof, plot.splitppp Default behaviour has changed: panels are now plotted on different scales. o plot.listof, plot.splitppp When 'equal.scales=TRUE' the panels are plotted on exactly equal scales and are exactly aligned (under certain conditions). o ppp, marks.ppp, marks<-.ppp New argument 'drop' determines whether a data frame with a single column will be converted to a vector. o simulate.ppm, rmh.ppm, rmhmodel.ppm New argument 'new.coef' allows the user to change the parameters of a fitted model, before it is simulated. o logLik.ppm New argument 'new.coef' allows the user to evaluate the loglikelihood for a different value of the parameter. o clusterset The argument 'result' has been renamed 'what'. It is now possible to give multiple values to 'what' so that both types of result can be computed together. o residuals.ppm Argument 'coefs' has been renamed 'new.coef' for consistency with fitted.ppm etc. o residuals.ppm If drop=TRUE the window associated with the residuals is now taken to be the domain of integration of the composite likelihood. o intensity.ppp Now has argument 'weights' o density.ppp, Smooth.ppp, markmean, markvar, intensity.ppp Argument 'weights' can now be an 'expression'. o pcf New argument 'domain' causes the computation to be restricted to a subset of the window. o nnclean The result now has attributes which give the fitted parameter values, information about the fitting procedure, and the histogram bar heights. o nnclean Extra arguments are now passed to hist.default. o plot.tess For a tessellation represented by a pixel image, plot.tess no longer treats the pixel labels as palette colours. o relrisk New argument 'case' allows the user to specify which mark value corresponds to the cases in a case-control dataset. o Kinhom Now accepts correction="good" o spatstat.options New option ('monochrome') controls whether plots generated by spatstat will be displayed in colour or in greyscale. This will eventually be applied to all plot commands in spatstat. o plot.im, persp.im, contour.im, plot.owin, plot.psp, plot.fv, plot.fasp These functions now obey spatstat.options('monochrome') o plot.ppp, plot.owin, plot.im, plot.psp, plot.tess, plot.layered New universal argument 'show.all' determines what happens when a plot is added to an existing plot. If show.all = TRUE then everything is plotted, including the main title and colour ribbon. o plot.ppp New argument 'show.window' o plot.im New arguments 'add' and 'do.plot'. More arguments recognised by 'ribargs' o plot.layered New arguments 'add', 'main' Better argument handling. o plot.fv Improved handling of argument 'shade' o layered, layerplotargs, plot.layered The plotting argument can now be a list of length 1, which will be replicated to the correct length. o varblock Ugly legends have been repaired. o quad.ppm New argument 'clip' o edge.Trans New arguments 'dx', 'dy' o disc Argument 'centre' can be in various formats. o affine, shift Argument 'vec' can be in various formats. o Geyer, BadGey A warning is no longer issued when the parameter 'sat' is fractional. o adaptive.density Now has argument 'verbose' o Smooth.ppp 'sigma' is now a formal argument of Smooth.ppp o plot.quadratcount, plot.quadrattest These functions have now been documented. o Summary functions and envelopes Improved mathematical labels in plots. o Kest Accelerated, in the case of a rectangular window. o Kscaled Argument 'lambda' can now be a fitted model (class ppm) o print.fv Improved layout. o plot.bermantest Improved graphics. o which.max.im This function is now deprecated. which.max.im(x) is superseded by im.apply(x, which.max) o smooth.ppp, smooth.fv, smooth.msr These functions are now deprecated, in favour of 'Smooth' with a capital 'S' BUG FIXES o bw.ppl Crashed if the point pattern had multiple points at the same location. Fixed. o quantile Crashed when applied to the result of 'ewcdf'. Fixed. o marks<-.ppp Crashed with a message about 'unrecognised format' if the current or replacement values of marks were date/time values (belonging to class 'Date' or 'POSIXt'). Fixed. o plot.im Crashed in case log=TRUE if the window was not a rectangle. Fixed. o vcov.ppm Crashed sometimes for models with a hard core term (Hardcore, StraussHard, MultiHard or MultiStrauss interactions). Spotted by Rasmus Waagepetersen. Fixed. o multiplicity.data.frame Results were incorrect and included NA's (spotted by Sebastian Meyer). Fixed. o markvar Values were incorrect. Fixed. o Smooth.ppp Ignored argument 'diggle'. Fixed. o rotate.im, affine.im Factor-valued images were not handled correctly. Fixed. o shift.layered If argument 'origin' was used, different layers were shifted by different amounts. Fixed. o tile.areas Sometimes returned a list instead of a numeric vector. Fixed. o print.ppp If the marks were date/time values (belonging to class 'Date' or 'POSIXt'), print.ppp reported that they were double precision numbers. Fixed. o plot.layered Graphics were mangled if the argument 'add=FALSE' was given explicitly. Fixed. o Smooth.ppp The argument 'sigma' was only recognised if it was explicitly named. For example in 'Smooth(X, 5)' the '5' was ignored. Fixed. o clusterset The bounding frame of the result was smaller than the original bounding frame of the point pattern dataset, when result="domain" and exact=TRUE. Fixed. o plot.im Ignored argument 'col' if it was a 'function(n)'. Fixed. o Kinhom Ignored argument 'correction' if there were more than 1000 points. Fixed. o [.fv Mangled the plot label for the y axis. Fixed. o cbind.fv Mangled the plot label for the y axis. Fixed. o plot.envelope Main title was always 'x'. Fixed. o print.ppp Ran over the right margin. Fixed. o union.owin, intersect.owin, setminus.owin Sometimes deleted the name of the unit of length. Fixed. CHANGES IN spatstat VERSION 1.35-0 OVERVIEW o We thank Melanie Bell, Leanne Bischof, Ida-Maria Sintorn, Ege Rubak, Martin Hazelton, Oscar Garcia, Rasmus Waagepetersen, Abdollah Jalilian and Jens Oehlschlaegel for contributions. o Support for analysing replicated spatial point patterns. o New vignette on analysing replicated spatial point patterns. o Objective function surface plots. o Estimator of point process intensity using nearest neighbour distances. o Improved estimator of pair correlation function. o Four new datasets. o Simple point-and-click interface functions for general use. o More support for fv objects. o More support for ppx objects. o Extensions to nearest neighbour functions. o Morphological operations accelerated. o Bug fix to pair correlation functions. o Bug fix to k-th nearest neighbour distances o Version nickname: 'Multiple Personality' NEW CLASSES o mppm An object of class 'mppm' represents a Gibbs point process model fitted to several point pattern datasets. The point patterns may be treated as independent replicates of the same point process, or as the responses in an experimental design, so that the model may depend on covariates associated with the design. Methods for this class include print, plot, predict, anova and so on. o objsurf An object of class 'objsurf' contains values of the likelihood or objective function in a neighbourhood of the maximum. o simplepanel An object of class 'simplepanel' represents a spatial arrangement of buttons that respond to mouse clicks, supporting a simple, robust graphical interface. NEW FUNCTIONS o mppm Fit a Gibbs model to several point patterns. The point pattern data may be organised as a designed experiment and the model may depend on covariates associated with the design. o anova.mppm Analysis of Deviance for models of class mppm o coef.mppm Extract fitted coefficients from a model of class mppm o fitted.mppm Fitted intensity or conditional intensity for a model of class mppm o kstest.mppm Kolmogorov-Smirnov test of goodness-of-fit for a model of class mppm o logLik.mppm log likelihood or log pseudolikelihood for a model of class mppm o plot.mppm Plot the fitted intensity or conditional intensity of a model of class mppm o predict.mppm Compute the fitted intensity or conditional intensity of a model of class mppm o quadrat.test Quadrat counting test of goodness-of-fit for a model of class mppm o residuals.mppm Point process residuals for a model of class mppm o subfits Extract point process models for each individual point pattern dataset, from a model of class mppm o vcov.mppm Variance-covariance matrix for a model of class mppm o integral.msr Integral of a measure. o objsurf For a model fitted by optimising an objective function, this command computes the objective function in a neighbourhood of the optimal value. o contour.objsurf, image.objsurf, persp.objsurf, plot.objsurf Plot an 'objsurf' object. o fvnames Define groups of columns in a function value table, for use in plot.fv, etc o multiplicity New generic function for which multiplicity.ppp is a method. o unique.ppx, duplicated.ppx, multiplicity.ppx Methods for unique(), duplicated() and multiplicity() for 'ppx' objects. These also work for 'pp3' and 'lpp' objects. o closepairs, crosspairs, closepaircounts, crosspaircounts Low-level functions for finding all close pairs of points o nndensity Estimate point process intensity using k-th nearest neighbour distances o simplepanel, run.simplepanel Support for a simple point-and-click interface for general use. NEW DATASETS o pyramidal Diggle-Lange-Benes data on pyramidal neurons in cingulate cortex. 31 point patterns divided into 3 groups. o waterstriders Nummelin-Penttinen waterstriders data. Three independent replicates of a point pattern formed by insects. o simba Simulated data example for mppm. Two groups of point patterns with different interpoint interactions. o demohyper Simulated data example for mppm. Point patterns and pixel image covariates, in two groups with different regression coefficients. SIGNIFICANT USER-VISIBLE CHANGES o plot.hyperframe The argument 'e' now has a different format. Instead of plot(h, plot(XYZ)) one must now type plot(h, quote(plot(XYZ))) This is necessary in order to avoid problems with 'S4 method dispatch'. o pcf.ppp, pcfinhom New argument 'divisor' enables better performance of the estimator of pair correlation function for distances close to zero. o applynbd The arguments N, R and criterion may now be specified together. o markstat The arguments N and R may now be specified together. o ppx New argument 'simplify' allows the result to be converted to an object of class 'ppp' or 'pp3' if appropriate. o as.function.fv Now allows multiple columns to be interpolated o multiplicity.ppp This function is now a method for the generic 'multiplicity'. It has also been accelerated. o nnfun.ppp, distfun.ppp New argument 'k' allows these functions to compute k-th nearest neighbours. o rVarGamma, kppm, vargamma.estK, vargamma.estpcf New argument 'nu.pcf' provides an alternative way to specify the kernel shape in the VarGamma model, instead of the existing argument 'nu.ker'. Function calls that use the ambiguous argument name 'nu' will no longer be accepted. o nnmap Image is now clipped to the original window. o dilation, erosion, opening, closing Polygonal computations greatly accelerated. o plot.colourmap Improved appearance and increased options, for discrete colourmaps. o plot.msr Improved appearance o plot.ppp, plot.owin An `empty' plot can now be generated by setting type="n" o nndist.ppp, nnwhich.ppp, nncross.ppp Column names of the result are now more informative. BUG FIXES o nncross.ppp Results were completely incorrect when k > 1. Spotted by Jens Oehschlaegel. Bug was introduced in spatstat 1.34-1. Fixed. o rVarGamma Simulations were incorrect; they were generated using the wrong value of the parameter 'nu.ker'. Spotted by Rasmus Waagepetersen and Abdollah Jalilian. Bug was always present. Fixed. o pair correlation functions (pcf.ppp, pcfdot, pcfcross, pcfinhom, ...) The result had a negative bias at the maximum 'r' value, because contributions to the pcf estimate from interpoint distances greater than max(r) were mistakenly omitted. Spotted by Rasmus Waagepetersen and Abdollah Jalilian. Bug was always present. Fixed. o demo(spatstat) This demonstration script had some unwanted side-effects, such as rescaling the coordinates of standard datasets 'bramblecanes', 'amacrine' and 'demopat', which caused the demonstration to crash when it was repeated several times, and caused errors in demo(data). Fixed. o rmh Visual debugger crashed sometimes with message 'XI not found'. Fixed. o predict.ppm Crashed if the model was fitted using 'covfunargs'. Fixed. o bounding.box Crashed if one of the arguments was NULL. Fixed. o multiplicity.ppp Did not handle data frames of marks. Fixed. CHANGES IN spatstat VERSION 1.34-1 OVERVIEW o We thank Kurt Hornik, Ted Rosenbaum, Ege Rubak and Achim Zeileis for contributions. o Important bug fix. SIGNIFICANT USER-VISIBLE CHANGES o as.box3 Now accepts objects of class 'ppx' or 'boxx'. o crossdist.ppp, crossdist.pp3, crossdist.default New argument 'squared' allows the squared distances to be computed (saving computation time in some applications) BUG FIXES o union.owin, is.subset.owin, dilation.owin Results were sometimes completely wrong for polygons with holes. Spotted by Ted Rosenbaum. Fixed. o psstA, areaLoss Crashed in some cases, with error message 'Number of items to replace is not a multiple of replacement length'. Spotted by Achim Zeileis. Fixed. CHANGES IN spatstat VERSION 1.34-0 OVERVIEW o We thank Andrew Bevan, Ege Rubak, Aruna Jammalamadaka, Greg McSwiggan, Jeff Marcus, Jose M Blanco Moreno, and Brian Ripley for contributions. o spatstat and all its dependencies are now Free Open Source. o spatstat does not require the package 'gpclib' any more. o spatstat now depends on the packages 'tensor', 'abind' and 'polyclip' o polygon clipping is now enabled always. o Substantially more support for point patterns on linear networks. o Faster computations for pairwise interaction models. o Bug fixes in nearest neighbour calculations. o Bug fix in leverage and influence diagnostics. o Version nickname: "Window Cleaner" o spatstat now requires R version 3.0.2 or later NEW FUNCTIONS o as.lpp Convert data to a point pattern on a linear network. o distfun.lpp Distance function for point pattern on a linear network. o eval.linim Evaluate expression involving pixel images on a linear network. o linearKcross, linearKdot, linearKcross.inhom, linearKdot.inhom Multitype K functions for point patterns on a linear network o linearmarkconnect, linearmarkequal Mark connection function and mark equality function for multitype point patterns on a linear network o linearpcfcross, linearpcfdot, linearpcfcross.inhom, linearpcfdot.inhom Multitype pair correlation functions for point patterns on a linear network o linfun New class of functions defined on a linear network o nndist.lpp, nnwhich.lpp, nncross.lpp Methods for nndist, nnwhich, nncross for point patterns on a linear network o nnfun.lpp Method for nnfun for point patterns on a linear network o vcov.lppm Variance-covariance matrix for parameter estimates of a fitted point process model on a linear network. o bilinearform Computes a bilinear form o tilenames, tilenames<- Extract or change the names of tiles in a tessellation. SIGNIFICANT USER-VISIBLE CHANGES o package dependencies Previous versions of spatstat used the package 'gpclib' to perform geometrical calculations on polygons. Spatstat now uses the package 'polyclip' for polygon calculations instead. o free open-source licence The restrictive licence conditions of 'gpclib' no longer apply to users of spatstat. Spatstat and all its dependencies are now covered by a free open-source licence. o polygon clipping In previous versions of spatstat, geometrical calculations on polygons could be performed 'exactly' using gpclib or 'approximately' using pixel discretisation. Polygon calculations are now always performed 'exactly'. o intersect.owin, union.owin, setminus.owin If A and B are polygons, the result is a polygon. o erosion, dilation, opening, closing If the original set is a polygon, the result is a polygon. o intersect.tess, dirichlet The tiles of the resulting tessellation are polygons if the input was polygonal. o plot.owin Polygons with holes can now be plotted with filled colours on any device. o lppm New arguments 'eps' and 'nd' control the quadrature scheme. o pairwise interaction Gibbs models Many calculations for these models have been accelerated. BUG FIXES o nncross.pp3 Values were completely incorrect in some cases. Usually accompanied by a warning about NA values. (Spotted by Andrew Bevan.) Fixed. o nnmap, nnmark A small proportion of pixels had incorrect values. [These were the pixels lying on the boundary of a Dirichlet cell.] Fixed. o leverage.ppm, influence.ppm, dfbetas.ppm Results were incorrect for non-Poisson processes. Fixed. o distcdf Results were incorrect in some cases when W was a window and V was a point pattern. Fixed. o Kcross, Kdot, pcfcross, pcfdot Results were incorrect in some rare cases. Fixed. o as.fv.kppm Erroneously returned a NULL value. Fixed. o vcov.ppm For point process models fitted with method = 'logi', sometimes crashed with error "object 'fit' not found". (Spotted by Ege Rubak). Fixed. o vcov.ppm For multitype point process models, sometimes crashed with error "argument 'par' is missing". Fixed. o plot.im Crashed if some of the pixel values were infinite. Fixed. o owin owin(poly=..) crashed if there were NA's in the polygon coordinates. Spotted by Jeff Marcus. Fixed. o plot.fv Crashed, giving an incomprehensible error, if the plot formula contained a number with a decimal point. Fixed. o alltypes Crashed if envelopes=TRUE and global=TRUE, with error message 'csr.theo not found'. Spotted by Jose M Blanco Moreno. Fixed. o chop.tess, rMosaicField Format of result was garbled in some cases. Fixed. o vcov.ppm Sometimes gave an irrelevant warning "parallel option not available". Fixed. CHANGES IN spatstat VERSION 1.33-0 OVERVIEW o We thank Kurt Hornik and Brian Ripley for advice. o The package namespace has been modified. o Numerous internal changes. o Likelihood cross-validation for smoothing bandwidth. o More flexible models of intensity in cluster/Cox processes. o New generic function for smoothing. o Version nickname: 'Titanic Deckchair' NEW FUNCTIONS o bw.ppl Likelihood cross-validation technique for bandwidth selection in kernel smoothing. o is.lppm, is.kppm, is.slrm Tests whether an object is of class 'lppm', 'kppm' or 'slrm' o Smooth New generic function for spatial smoothing. o Smooth.ppp, Smooth.fv, Smooth.msr Methods for Smooth (identical to smooth.ppp, smooth.fv, smooth.msr respectively) o fitted.kppm Method for 'fitted' for cluster/Cox models SIGNIFICANT USER-VISIBLE CHANGES o namespace The namespace of the spatstat package has been changed. o internal functions Some undocumented internal functions are no longer visible, as they are no longer exported in the namespace. These functions can still be accessed using the form spatstat:::functionname. Functions that are not visible are not guaranteed to exist or to remain the same in future. o methods For some generic functions defined in the spatstat package, it is possible that R may fail to find one of the methods for the generic. This is a temporary problem due to a restriction on the size of the namespace in R 3.0.1. It will be fixed in future versions of R and spatstat. It only applies to methods for a generic which is a spatstat function (such as nndist) and does not apply to methods for generics defined elsewhere (such as density). In the meantime, if this problem should occur, it can be avoided by calling the method explicitly, in the form spatstat:::genericname.classname. o speed The package should run slightly faster overall, due to the improvement of the namespace, and changes to internal code. o envelope New argument 'envir.simul' determines the environment in which to evaluate the expression 'simulate'. o kppm More flexible models of the intensity, and greater control over the intensity fitting procedure, are now possible using the arguments 'covfunargs', 'use.gam', 'nd', 'eps' passed to ppm. Also the argument 'X' may now be a quadrature scheme. o distcdf Arguments W and V can now be point patterns. o Kest New option: correction = "good" selects the best edge correction that can be computed in reasonable time. o bw.diggle Accelerated. o predict.ppm Calculation of standard error has been accelerated. o smooth.ppp, smooth.fv, smooth.msr These functions will soon be 'Deprecated' in favour of the methods Smooth.ppp, Smooth.fv, Smooth.msr respectively. o stratrand, overlap.owin, update.slrm, edge.Trans, edge.Ripley These already-existing functions are now documented. BUG FIXES o kppm, matclust.estpcf, pcfmodel The pair correlation function of the Matern Cluster Process was evaluated incorrectly at distances close to 0. This could have affected the fitted parameters in matclust.estpcf() or kppm(clusters="MatClust"). Fixed. o anova.ppm Would cause an error in future versions of R when 'anova.glm' is removed from the namespace. Fixed. CHANGES IN spatstat VERSION 1.32-0 OVERVIEW o We thank Ege Rubak for major contributions. o Thanks also to Patrick Donnelly, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Sean O'Riordan. o New 'logistic likelihood' method for fitting Gibbs models. o Substantial acceleration of several functions including profile maximum pseudolikelihood and variance calculations for Gibbs models. o Nearest neighbours for point patterns in 3D o Nearest-neighbour interpolation in 2D o New 'progress plots' o Hard core thresholds can be estimated automatically. o More support for colour maps o More support for 'fv' objects o Spatstat now has version nicknames. The current version is "Logistical Nightmare". o Minor improvements and bug fixes. NEW FUNCTIONS o nncross.pp3 Method for 'nncross' for point patterns in 3D o nnmark Mark of nearest neighbour - can be used for interpolation o dclf.progress, mad.progress Progress plots (envelope representations) for the DCLF and MAD tests. o deriv.fv Numerical differentiation for 'fv' objects. o interp.colourmap Smooth interpolation of colour map objects - makes it easy to build colour maps with gradual changes in colour o tweak.colourmap Change individual colour values in a colour map object o beachcolourmap Colour scheme appropriate for `altitudes' (signed numerical values) o as.fv Convert various kinds of data to an 'fv' object o quadscheme.logi Generates quadrature schemes for the logistic method of ppm. o beginner Introduction for beginners. SIGNIFICANT USER-VISIBLE CHANGES o ppm New option: method = "logi" Fits a Gibbs model by the newly developed 'logistic likelihood' method which is often faster and more accurate than maximum pseudolikelihood. Code contributed by Ege Rubak. o profilepl Greatly accelerated, especially for area-interaction models. o vcov.ppm Greatly accelerated for higher-order interaction models. o smooth.ppp Now handles bandwidths equal to zero (by invoking 'nnmark') o Hardcore, StraussHard The hard core distance 'hc' can now be omitted; it will be estimated from data. o plot.ppp Now behaves differently if there are multiple columns of marks. Each column of marks is plotted, in a series of separate plots arranged side-by-side. o plot.im Argument 'col' can now be a function o lohboot Now computes confidence intervals for L-functions as well (fun="Lest" or fun="Linhom") o dclf.test, mad.test The argument X can now be an object produced by a previous call to dclf.test or mad. o plot.fv Labelling of plots has been improved in some cases. o smooth.fv Further options added. o density.ppp The argument 'weights' can now be a matrix. o smooth.ppp Accelerated, when there are several columns of marks. o density.ppp Accelerated slightly. o simulate.ppm, simulate.kppm The total computation time is also returned. o simulate.kppm Now catches errors (such as 'insufficient memory'). o latest.news, licence.polygons Can now be executed by typing the name of the function without parentheses. o latest.news The text is now displayed one page at a time. BUG FIXES o Hest, Gfox, Jfox The 'raw' estimate was not computed correctly (or at least it was not the raw estimate described in the help files). Spotted by Tom Lawrence. Fixed. o edges2vees Format of result was incorrect if there were fewer than 3 edges. Fixed. o Jfox The theoretical value (corresponding to independence between X and Y) was erroneously given as 0 instead of 1. Spotted by Patrick Donnelly. Fixed. o ppm, quadscheme, default.dummy If the grid spacing parameter 'eps' was specified, the quadrature scheme was sometimes slightly incorrect (missing a few dummy points near the window boundary). Fixed. o print.timed Matrices were printed incorrectly. Fixed. CHANGES IN spatstat VERSION 1.31-3 OVERVIEW o spatstat now 'Suggests' the package 'tensor' o Code slightly accelerated. o More support for pooling of envelopes. o Bug fixes. NEW FUNCTIONS o nnmap Given a point pattern, finds the k-th nearest point in the pattern from each pixel in a raster. o coef.fii, coef.summary.fii Extract the interaction coefficients of a fitted interpoint interaction o edges2vees Low-level function for finding triples in a graph. SIGNIFICANT USER-VISIBLE CHANGES o predict.ppm New argument 'correction' allows choice of edge correction when calculating the conditional intensity. o pool.envelope New arguments 'savefuns' and 'savepatterns'. o pool.envelope Envelopes generated with VARIANCE=TRUE can now be pooled. o pool.envelope The plot settings of the input data are now respected. o Numerous functions have been slightly accelerated. BUG FIXES o predict.ppm Calculation of the conditional intensity omitted the edge correction if correction='translate' or correction='periodic'. Fixed. o shift.lpp, rotate.lpp, scalardilate.lpp, affine.lpp, shift.linnet, rotate.linnet, scalardilate.linnet, affine.linnet The enclosing window was not correctly transformed. Fixed. o rHardcore, rStraussHard, rDiggleGratton, rDGS The return value was invisible. Fixed. o ppm In rare cases the results obtained with forcefit=FALSE and forcefit=TRUE were different, due to numerical rounding effects. Fixed. CHANGES IN spatstat VERSION 1.31-2 OVERVIEW o We thank Robin Corria Anslie, Julian Gilbey, Kiran Marchikanti, Ege Rubak and Thordis Linda Thorarinsdottir for contributions. o spatstat now depends on R 3.0.0 o More support for linear networks o More functionality for nearest neighbours o Bug fix in fitting Geyer model o Performance improvements and bug fixes NEW FUNCTIONS o affine.lpp, shift.lpp, rotate.lpp, rescale.lpp, scalardilate.lpp Geometrical transformations for point patterns on a linear network o affine.linnet, shift.linnet, rotate.linnet, rescale.linnet, scalardilate.linnet Geometrical transformations for linear networks o [.linnet Subset operator for linear networks o timed Records the computation time taken SIGNIFICANT USER-VISIBLE CHANGES o nncross nncross.ppp can now find the k-th nearest neighbours, for any k. o nndist, nnwhich New argument 'by' makes it possible to find nearest neighbours belonging to specified subsets in a point pattern, for example, the nearest neighbour of each type in a multitype point pattern. o [.fv Now handles the argument 'drop'. o with.fv Argument 'drop' replaced by new argument 'fun' (with different interpretation). o [.lpp Subset index may now be a window (class 'owin') o Kest Options correction='border' and correction='none' now run about 4 times faster, thanks to Julian Gilbey. o density.ppp Numerical underflow no longer occurs when sigma is very small and 'at="points"'. A warning is no longer issued. Thanks to Robin Corria Anslie. o crossing.psp New argument 'fatal' allows the user to handle empty intersections o union.owin It is now guaranteed that if A is a subset of B, then union.owin(A,B)=B. o plot.colourmap Now passes arguments to axis() to control the plot. Appearance of plot improved. o image.listof Now passes arguments to plot.colourmap() if equal.ribbon=TRUE. o kppm Accelerated (especially for large datasets). o plot.envelope plot.envelope is now equivalent to plot.fv and is essentially redundant. o rThomas, rMatClust, rNeymanScott Improved explanations in help files. o All functions Many functions have been slightly accelerated. BUG FIXES o ppm Results were incorrect for the Geyer saturation model with a non-integer value of the saturation parameter 'sat'. Spotted by Thordis Linda Thorarinsdottir. Bug introduced in spatstat 1.20-0, July 2010. Fixed. o ppm Fitting a stationary Poisson process using a nonzero value of 'rbord', as in "ppm(X, rbord=R)" with R > 0, gave incorrect results. Fixed. o predict.slrm Crashed with message 'longer object length is not a multiple of shorter object length' if the original data window was not a rectangle. Fixed. o iplot Main title was sometimes incorrect. Fixed. o plot.layered Ignored argument 'main' in some cases. Fixed. o plot.listof, image.listof Crashed sometimes with a message 'figure margins too large' when equal.ribbon=TRUE. Fixed. o print.ppx Crashed if the object contained local coordinates. Fixed. o transect.im Crashed if the transect lay partially outside the image domain. Fixed. o rthin Crashed if X was empty. Fixed. o max.im, min.im, range.im Ignored additional arguments after the first argument. Fixed. o update.lppm Updated object did not remember the name of the original dataset. Fixed. o envelope Grey shading disappeared from plots of envelope objects when the envelopes were transformed using eval.fv or eval.fasp. Fixed. CHANGES IN spatstat VERSION 1.31-1 OVERVIEW o We thank Marcelino de la Cruz, Daniel Esser, Jason Goldstick, Abdollah Jalilian, Ege Rubak and Fabrice Vinatier for contributions. o Nonparametric estimation and tests for point patterns in a linear network. o More support for 'layered' objects. o Find clumps in a point pattern. o Connected component interaction model. o Improvements to interactive plots. o Visual debugger for Metropolis-Hastings algorithm. o Bug fix in Metropolis-Hastings simulation of Geyer process. o Faster Metropolis-Hastings simulation. o Faster computation of 'envelope', 'fv' and 'fasp' objects. o Improvements and bug fixes. NEW FUNCTIONS o connected.ppp Find clumps in a point pattern. o kstest.lpp, kstest.lppm The spatial Kolmogorov-Smirnov test can now be applied to point patterns on a linear network (class 'lpp') and point processes on a linear network (class 'lppm'). o bermantest.lpp, bermantest.lppm Berman's Z1 and Z2 tests can now be applied to point patterns on a linear network (class 'lpp') and point processes on a linear network (class 'lppm'). o rhohat.lpp, rhohat.lppm Nonparametric estimation of the dependence of a point pattern on a spatial covariate: 'rhohat' now applies to objects of class 'lpp' and 'lppm'. o intensity.lpp Empirical intensity of a point pattern on a linear network. o as.function.rhohat Converts a 'rhohat' object to a function, with extrapolation beyond the endpoints. o [.layered Subset operator for layered objects. o shift, rotate, affine, rescale, reflect, flipxy, scalardilate These geometrical transformations now work for 'layered' objects. o iplot.layered Interactive plotting for 'layered' objects. o as.owin.layered Method for as.owin for layered objects. o [.owin Subset operator for windows, equivalent to intersect.owin. o rcellnumber Generates random integers for the Baddeley-Silverman counterexample. o is.lpp Tests whether an object is a point pattern on a linear network. o is.stationary.lppm, is.poisson.lppm New methods for is.stationary and is.poisson for class 'lppm' o sessionLibs Print library names and version numbers (for use in Sweave scripts) SIGNIFICANT USER-VISIBLE CHANGES o iplot iplot is now generic, with methods for 'ppp', 'layered' and 'default'. iplot methods now support zoom and pan navigation. o rmh.default New argument 'snoop' allows the user to activate a visual debugger for the Metropolis-Hastings algorithm. o connected connected() is now generic, with methods for 'im', 'owin' and 'ppp'. o alltypes Now works for lpp objects o rlabel Now works for lpp, pp3, ppx objects o plot.kstest Can now perform P-P and Q-Q plots as well. o plot.fasp New argument 'samey' controls whether all panels have the same y limits. o plot.fasp Changed default value of 'samex'. o Objects of class 'envelope', 'fv' and 'fasp' Reduced computation time and storage required for these objects. o pcfmodel.kppm Improved calculation. o plot.fv Improved collision-avoidance algorithm (for avoiding overlaps between curves and legend) o ppm Improved error handling o envelope All methods for 'envelope' now handle fun=NULL o setminus.owin Better handling of the case where both arguments are rectangles. o rmh Simulation has been further accelerated. o lppm Accelerated. o vcov.ppm Accelerated. o marktable Accelerated. o Triplets() interaction Accelerated. o alltypes Accelerated when envelope=TRUE. BUG FIXES o rmh Simulation of the Geyer saturation process was incorrect. [Bug introduced in previous version, spatstat 1.31-0.] Fixed. o rmh Simulation of the Geyer saturation process was incorrectly initialised, so that the results of a short run (i.e. small value of 'nrep') were incorrect, while long runs were correct. [Bug introduced in spatstat 1.17-0, october 2009.] Fixed. o ppm Objects fitted with use.gam=TRUE caused fatal errors in various functions including print, summary, vcov and model.frame. Spotted by Jason Goldstick. Fixed. o lpp, runiflpp, rpoislpp Empty point patterns caused an error. Fixed. o rmh.default Crashed for hybrid models, with message 'Attempt to apply non-function'. Spotted by Ege Rubak. Fixed. o relrisk Crashed when 'at="points"' for a multitype pattern with more than 2 types. Spotted by Marcelino de la Cruz. Fixed. o erosion.owin, dilation.psp, border Ignored the arguments "..." in some cases (namely when the window was polygonal and 'gpclib' was disabled). Fixed. o rsyst, rcell Did not correctly handle the argument 'dx'. Spotted by Fabrice Vinatier. Fixed. o correction="trans" Various functions such as Kest no longer recognised 'correction = "trans"'. Fixed. o istat Crashed with an error message about envelopes. Fixed. o summary.ppm, print.ppm p-values which were exactly equal to zero were reported as NA. Fixed. o [.im Crashed if the intersection consisted of a single row or column of pixels. Fixed. o plot.im Sometimes incorrectly displayed an image consisting of a single row or column of pixels. Fixed. o plot.layered The plot region was determined by the first layer, so that objects in subsequent layers could sometimes fall outside the plot region. Fixed. o transect.im If the arguments 'from' and 'to' were numeric vectors of length 2, the result was garbled. Fixed. o Inhomogeneous K functions and pair correlation functions [Kinhom, pcfinhom, Kcross.inhom, Kdot.inhom, pcfcross.inhom, etc.] These functions reported an error 'lambda is not a vector' if the intensity argument lambda was computed using density(, at="points"). Fixed. o rlabel Did not accept a point pattern with a hyperframe of marks. Fixed. o alltypes Crashed when envelope=TRUE if the summary function 'fun' did not have default values for the marks i and j. Fixed. o Kres, Gres, psst, psstA Ignored the unit of length. Fixed. CHANGES IN spatstat VERSION 1.31-0 OVERVIEW o We thank Frederic Lavancier and Ege Rubak for contributions. o Major bug fix in simulation of area-interaction process. o Metropolis-Hastings simulations accelerated. o Rounding of spatial coordinates o clmfires dataset corrected. o Bug fixes and minor improvements. NEW FUNCTIONS o round.ppp Round the spatial coordinates of a point pattern to a specified number of decimal places. o rounding Determine whether a dataset has been rounded. SIGNIFICANT USER-VISIBLE CHANGES o rmh Simulation of the following models has been accelerated: areaint, dgs, diggra, fiksel, geyer, hardcore, lennard, multihard, strauss, straush, straussm, strausshm. o rmh The transition history of the simulation (which is saved if 'track=TRUE') now also contains the value of the Hastings ratio for each proposal. o clmfires The clmfires dataset has been modified to remove errors and inconsistencies. o plot.linim Appearance of the plot has been improved, when style='width'. o summary.ppm Now reports whether the spatial coordinates have been rounded. o dclf.test, mad.test The range of distance values ('rinterval') used in the test is now printed in the test output, and is saved as an attribute. BUG FIXES o rmh Simulation of the Area-Interaction model was completely incorrect. Spotted by Frederic Lavancier. The bug was introduced in spatstat version 1.23-6 or later. Fixed. o dclf.test The test statistic was incorrectly scaled (by a few percent). This did not affect the p-value of the test. Fixed. o ppx If argument 'coord.type' was missing, various errors occurred: a crash may have occurred, or the results may have depended on the storage type of the data. Spotted by Ege Rubak. Fixed. o plot.ppx Crashed for 1-dimensional point patterns. Spotted by Ege Rubak. Fixed. CHANGES IN spatstat VERSION 1.30-0 OVERVIEW o We thank Jorge Mateu, Andrew Bevan, Olivier Flores, Marie-Colette van Lieshout, Nicolas Picard and Ege Rubak for contributions. o The spatstat manual now exceeds 1000 pages. o Hybrids of point process models. o Five new datasets o Second order composite likelihood method for kppm. o Inhomogeneous F, G and J functions. o Delaunay graph distance o Fixed serious bug in 'lppm' for marked patterns. o bug fix in some calculations for Geyer model o Improvements to linear networks code o Pixel images can now be displayed with a logarithmic colour map. o spatstat now formally 'Depends' on the R core package 'grDevices' o miscellaneous improvements and bug fixes NEW DATASETS o clmfires Forest fires in Castilla-La Mancha o gordon People sitting on the grass in Gordon Square, London o hyytiala Mixed forest in Hyytiala, Finland (marked by species) o paracou Kimboto trees in Paracou, French Guiana (marked as adult/juvenile) o waka Trees in Waka national park (marked with diameters) NEW FUNCTIONS o Hybrid The hybrid of several point process interactions [Joint research with Jorge Mateu and Andrew Bevan] o is.hybrid Recognise a hybrid interaction or hybrid point process model. o Finhom, Ginhom, Jinhom Inhomogeneous versions of the F, G and J functions [Thanks to Marie-Colette van Lieshout] o delaunay.distance Graph distance in the Delaunay triangulation. o distcdf Cumulative distribution function of the distance between two independent random points in a given window. o bw.frac Bandwidth selection based on window geometry o shortside.owin, sidelengths.owin Side lengths of (enclosing rectangle of) a window SIGNIFICANT USER-VISIBLE CHANGES o ppm Can now fit models with 'hybrid' interactions [Joint research with Jorge Mateu and Andrew Bevan] o kppm Now has the option of fitting models using Guan's (2006) second order composite likelihood. o envelope.lpp Now handles multitype point patterns. o envelope.envelope New argument 'transform' allows the user to apply a transformation to previously-computed summary functions. o runifpointOnLines, rpoisppOnLines, runiflpp, rpoislpp Can now generate multitype point patterns. o rmhmodel, rmh, simulate.ppm Now handle point process models with 'hybrid' interactions. o kppm Accelerated, and more reliable, due to better choice of starting values in the optimisation procedure. o kppm The internal format of kppm objects has changed. o minimum contrast estimation Error messages from the optimising function 'optim' are now trapped and handled. o rhohat This command is now generic, with methods for ppp, quad, and ppm. o raster.x, raster.y, raster.xy These functions have a new argument 'drop' o summary.ppm Improved behaviour when the model covariates are a data frame. o progressreport Output improved. o second order summary functions (Kest, Lest, Kinhom, pcf.ppp, Kdot, Kcross, Ldot etc etc) These functions now accept correction="translation" as an alternative to correction = "translate", for consistency. o plot.im New argument 'log' allows colour map to be equally spaced on a log scale. o as.owin.ppm, as.owin.kppm New argument 'from' allows the user to extract the spatial window of the point data (from="points") or the covariate images (from="covariates") o dclf.test, mad.test The rule for handling tied values of the test statistic has been changed. The tied values are now randomly ordered to obtain a randomised integer rank. o with.fv New argument 'enclos' allows evaluation in other environments BUG FIXES o lppm For multitype patterns, the fitted model was completely incorrect due to an error in constructing the quadrature scheme. Fixed. o Geyer For point process models with the 'Geyer' interaction, vcov.ppm() and suffstat() sometimes gave incorrect answers. [Spotted by Ege Rubak.] Fixed. o as.im.im Did not correctly handle factor-valued images if one of the arguments 'dimyx', 'eps', 'xy' was given. Fixed. o envelope.lppm Crashed if the model was multitype. Fixed. o lpp Did not handle empty patterns. Fixed. o density.ppp If 'sigma' was a bandwidth selection function such as bw.scott() which returned a numeric vector of length 2, a warning message was issued, and the smoothing bandwidth was erroneously taken to be the first element of the vector. Fixed. o Fest, Jcross, Jdot, Jmulti If these functions were computed using correction = 'rs', plotting them would sometimes give an error, with the message "no finite x/y limits". Fixed. o pcfmodel.kppm For models with clusters="VarGamma" the value of the pcf at distance r=0 was given as NaN. Fixed. o vcov.ppm Result was incorrect in rare cases, due to numerical rounding effects. Fixed. o rLGCP, simulate.kppm For models fitted to point patterns in an irregular window, simulation sometimes failed, with a message that the image 'mu' did not cover the simulation window. (Spotted by George Limitsios.) Fixed. o rLGCP, simulate.kppm Crashed sometimes with an error about unequal x and y steps (from 'GaussRF'). Fixed. CHANGES IN spatstat VERSION 1.29-0 OVERVIEW o We thank Colin Beale, Li Haitao, Frederic Lavancier, Erika Mudrak and Ege Rubak for contributions. o random sequential packing o Allard-Fraley estimator o method for pooling several quadrat tests o better control over dummy points in ppm o more support for data on a linear network o nearest neighbour map o changes to subsetting of images o improvements and bug fixes NEW FUNCTIONS o clusterset Allard-Fraley estimator of high-density features in a point pattern o pool.quadrattest Pool several quadrat tests o nnfun Nearest-neighbour map of a point pattern or a line segment pattern o as.ppm Converts various kinds of objects to ppm o crossdist.lpp Shortest-path distances between pairs of points in a linear network o nobs.lppm Method for 'nobs' for lppm objects. o as.linim Converts various kinds of objects to 'linim' o model.images.slrm Method for model.images for slrm objects o rotate.im Rotate a pixel image SIGNIFICANT USER-VISIBLE CHANGES o "[.im" and "[<-.im" New argument 'j' allows any type of matrix indexing to be used. o "[.im" Default behaviour changed in the case of a rectangular subset. New argument 'rescue' can be set to TRUE to reinstate previous behaviour. o rSSI Performs 'Random Sequential Packing' if n=Inf. o ppm New argument 'eps' determines the spacing between dummy points. (also works for related functions quadscheme, default.dummy, ...) o fitted.ppm, predict.ppm Argument 'new.coef' specifies a vector of parameter values to replace the fitted coefficients of the model. o lppm Stepwise model selection using step() now works for lppm objects. o vcov.slrm Can now calculate correlation matrix or Fisher information matrix as well as variance-covariance matrix. o eval.fv Improved behaviour when plotted. o "[.fv" Improved behaviour when plotted. o lohboot When the result is plotted, the confidence limits are now shaded. o lohboot New argument 'global' allows global (simultaneous) confidence bands instead of pointwise confidence intervals. o vcov.ppm Accelerated by 30% in some cases. o quadrat.test.splitppp The result is now a single object of class 'quadrattest' o progressreport Improved output (also affects many functions which print progress reports) o Full redwood data (redwoodfull) Plot function redwoodfull.extra$plotit has been slightly improved. o nncross This function is now generic, with methods for 'ppp' and 'default'. o distfun The internal format of objects of class 'distfun' has been changed. o duplicated.ppp, unique.ppp New argument 'rule' allows behaviour to be consistent with package 'deldir' BUG FIXES o bdist.tiles Values were incorrect in some cases due to numerical error. (Spotted by Erika Mudrak.) Fixed. o vcov.ppm, suffstat These functions sometimes gave incorrect values for marked point process models. Fixed. o simulate.ppm, predict.ppm Did not correctly handle the 'window' argument. (Spotted by Li Haitao). Fixed. o smooth.ppp, markmean If sigma was very small, strange values were produced, due to numerical underflow. (Spotted by Colin Beale). Fixed. o MultiHard, MultiStrauss, MultiStraussHard Crashed if the data point pattern was empty. (Spotted by Ege Rubak). Fixed. o vcov.ppm Crashed sporadically, with multitype interactions. (Spotted by Ege Rubak). Fixed. o rStrauss, rHardcore, rStraussHard, rDiggleGratton, rDGS If the simulated pattern was empty, these functions would either crash, or return a pattern containing 1 point. (Spotted by Frederic Lavancier). Fixed. o model.matrix.slrm Crashed if the model was fitted using split pixels. Fixed. o residuals.ppm, diagnose.ppm Did not always correctly handle models that included offset terms. Fixed. o project.ppm When a model was projected by project.ppm or by ppm(project=TRUE), the edge corrections used the projected models were sometimes different from the edge corrections in the original model, so that the projected and unprojected models were not comparable. Fixed. o plot.listof, plot.splitppp Crashed sometimes due to a scoping problem. Fixed. o dclf.test, mad.test Crashed if any of the function values were infinite or NaN. Fixed. o psstA Default plot did not show the horizontal line at y=0 corresponding to a perfect fit. Fixed. o vcov.ppm names attribute was spelt incorrectly in some cases. Fixed. CHANGES IN spatstat VERSION 1.28-2 OVERVIEW o We thank Thomas Bendtsen, Ya-Mei Chang, Daniel Esser, Robert John-Chandran, Ege Rubak and Yong Song for contributions. o New code for Partial Residual Plots and Added Variable Plots. o maximum profile pseudolikelihood computations vastly accelerated. o New dataset: cells in gastric mucosa o now possible to capture every k-th state of Metropolis-Hastings algorithm. o size of 'ppm' objects reduced. o scope of 'intensity.ppm' extended. o quadrat.test can now perform Monte Carlo tests and one/two-sided tests o improvements to 'plot.fv' o improvement to 'rescale' o some datasets reorganised. o numerous bug fixes NEW DATASET o mucosa Cells in gastric mucosa Kindly contributed by Dr Thomas Bendtsen NEW FUNCTIONS o parres Partial residual plots for spatial point process models. A diagnostic for the form of a covariate effect. o addvar Added variable plots for spatial point process models. A diagnostic for the existence of a covariate effect. SIGNIFICANT USER-VISIBLE CHANGES o profilepl Accelerated (typically by a factor of 5). o rmh, rmhcontrol It is now possible to save every k-th iteration of the Metropolis-Hastings algorithm. The arguments 'nsave' and 'nburn' may be given to rmh or to rmhcontrol. They specify that the point pattern will be saved every 'nsave' iterations, after an initial burn-in of 'nburn' iterations. o simulate.ppm New argument 'singlerun' determines whether the simulated patterns are generated using independent runs of the Metropolis-Hastings algorithm or are obtained by performing one long run of the algorithm and saving every k-th iteration. o exactMPLEstrauss New argument 'project' determines whether the parameter gamma is constrained to lie in [0,1]. o intensity.ppm Now works for stationary point process models with the interactions DiggleGratton, DiggleGatesStibbard, Fiksel, PairPiece and Softcore. o plot.fv Improved algorithm for avoiding collisions between graphics and legend. o plot.fv New argument 'log' allows plotting on logarithmic axes. o envelope Can now calculate an estimate of the true significance level of the "wrong" test (which declares the observed summary function to be significant if it lies outside the pointwise critical boundary anywhere). Controlled by new argument 'do.pwrong'. o quadrat.test New argument 'alternative' allows choice of alternative hypothesis and returns one-sided or two-sided p-values as appropriate. o quadrat.test Can now perform Monte Carlo test as well (for use in small samples where the chi^2 approximation is inaccurate) o Softcore Improved numerical stability. New argument 'sigma0' for manual control over rescaling. o rescale If scale argument 's' is missing, then the data are rescaled to native units. For example if the current unit is 0.1 metres, coordinates will be re-expressed in metres. o psst Extra argument 'verbose=TRUE' o is.subset.owin Accelerated for polygonal windows o rmh.default 'track' is no longer a formal argument of rmh.default; it is now a parameter of rmhcontrol. However there is no change in usage: the argument 'track' can still be given to rmh.default. o clf.test Has been renamed 'dclf.test' to give proper attribution to Peter Diggle. o betacells This dataset has been restructured. The vector of cell profile areas, formerly given by betacells.extra$area, has now been included as a column of marks in the point pattern 'betacells'. o ants The function ants.extra$plot() has been renamed plotit() for conformity with other datasets. o redwoodfull The function redwoodfull.extra$plot() has been renamed plotit() for conformity with other datasets. o nbfires For conformity with other datasets, there is now an object nbfires.extra BUG FIXES o ripras Expansion factor was incorrect in the rectangular case. Fixed. o Triplets Crashed sometimes with error "dim(X) must have positive length". Fixed. o affine.im Crashed in the case of a diagonal transformation matrix! Spotted by Ege Rubak. Fixed. o envelope.envelope Ignored the argument 'global'. Fixed. o MultiStraussHard The printed output showed the hardcore radii as NULL. Spotted by Ege Rubak. Fixed. o "[.psp" Crashed if the data were generated by rpoisline(). Spotted by Marcelino de la Cruz. Fixed. o plot.linim If style="colour", the main title was always "x". Fixed. o plot.ppx Setting add=TRUE did not prevent the domain being plotted. Fixed. o rmh Crashed if x.start was an empty point pattern. Spotted by Ege Rubak. Fixed. o as.ppp.data.frame Crashed if any points lay outside the window. Spotted by Ege Rubak. Fixed. o Ripley isotropic edge correction Divide-by-zero error in rare cases. Spotted by Daniel Esser. Fixed. o summary functions For many of the summary functions (e.g. Kest, pcf), the result of saving the object to disc was an enormous file. Spotted by Robert John-Chandran. Fixed. o pcf.fv Default plot was wrongly coloured. Fixed. CHANGES IN spatstat VERSION 1.28-1 OVERVIEW o We thank Ege Rubak, Gopal Nair, Jens Oehlschlaegel and Mike Zamboni for contributions. o New approximation to the intensity of a fitted Gibbs model. o Minor improvements and bug fixes o spatstat now 'Suggests' the package 'gsl' NEW FUNCTIONS o intensity, intensity.ppp, intensity.ppm Calculate the intensity of a dataset or fitted model. Includes new approximation to the intensity of a fitted Gibbs model o LambertW Lambert's W-function SIGNIFICANT USER-VISIBLE CHANGES o envelope Improved plot labels for envelopes that were generated using the 'transform' argument. o plot.fv Improved algorithm for collision detection. o plot.im Now returns the colour map used. o plot.listof, plot.splitppp Slight change to handling of plot.begin and plot.end o square Now accepts vectors of length 2 o plot.fii Increased resolution of the plot obtained from plot(fitin(ppm(...))) o image.listof If equal.ribbon=TRUE, the colour ribbon will no longer be displayed repeatedly for each panel, but will now be plotted only once, at the right hand side of the plot array. BUG FIXES o vcov.ppm Results were sometimes incorrect for a Gibbs model with non-trivial trend. Spotted by Ege Rubak. Fixed. o nncross In rare cases the results could be slightly incorrect. Spotted by Jens Oehlschlaegel. Fixed. o plot.fv When add=TRUE, the x limits were sometimes truncated. Spotted by Mike Zamboni. Fixed. o plot.im Labels for the tick marks on the colour ribbon were sometimes ridiculous, e.g. "2.00000001". Fixed. CHANGES IN spatstat VERSION 1.28-0 OVERVIEW o We thank Farzaneh Safavimanesh, Andrew Hardegen and Tom Lawrence for contributions. o Improvements to 3D summary functions. o A multidimensional point pattern (ppx) can now have 'local' coordinates as well as spatial and temporal coordinates and marks. o Changed format for point patterns on a linear network (lpp). Changes are backward compatible. Many computations run faster. o More support for fitted cluster models (kppm). o split method for multidimensional point patterns (ppx) and point patterns on a linear network (lpp). o Fixed bug causing errors in plot.im o Miscellaneous improvements and bug fixes NEW FUNCTIONS o exactMPLEstrauss Fits the stationary Strauss point process model using an exact maximum pseudolikelihood technique. This is mainly intended for technical investigation of algorithms. o split.ppx Method for 'split' for multidimensional point patterns (class 'ppx'). This also works for point patterns on a linear network (class 'lpp'). o model.images This function is now generic, with methods for classes ppm, kppm, lppm o model.frame, model.matrix These generic functions now have methods for classes kppm, lppm o as.owin.kppm, as.owin.lppm New methods for 'as.owin' for objects of class kppm, lppm o as.linnet.lppm Extracts the linear network in which a point process model was fitted. SIGNIFICANT USER-VISIBLE CHANGES o class 'ppx' An object of class 'ppx' may now include 'local' coordinates as well as 'spatial' and 'temporal' coordinates, and marks. o ppx Arguments have changed. o class 'lpp' The internal format of lpp objects has been extended (but is backward-compatible). Many computations run faster. To convert an object to the new format: X <- lpp(as.ppp(X), as.linnet(X)). o F3est Calculation of theoretical Poisson curve ('theo') has changed, and is now controlled by the argument 'sphere'. o rmh, rmhstart The initial state ('start') can now be missing or null. o im, as.im The pixel coordinates in an image object are now generated more accurately. This avoids a numerical error in plot.im. o eval.fv, eval.fasp Evaluation is now applied only to columns that contain values of the function itself (rather than values of the derivative, hazard rate, etc). This is controlled by the new argument 'dotonly'. o spatstat.options New option 'nvoxel' o quad.ppm Now accepts kppm objects. o str This generic function (for inspecting the internal structure of an object) now produces sensible output for objects of class 'hyperframe', 'ppx', 'lpp' o ppx, coords.ppx, coords<-.ppx The arguments to these functions have changed. o lgcp.estK, Kmodel Computation can be greatly accelerated by setting spatstat.options(fastK.lgcp=TRUE). o G3est Computation accelerated. o envelope Computation slightly accelerated. o spatstat.options New option 'fastK.lgcp' BUG FIXES o nndist.psp Caused an error if length(k) > 1. Fixed. o plot.im Sometimes reported an error "useRaster=TRUE can only be used with a regular grid." This was due to numerical rounding effects on the coordinates of a pixel image. Fixed. o plot.fv If a formula was used to specify the plot, the names of variables in the formula were sometimes incorrectly matched to *functions*. Spotted by Farzaneh Safavimanesh. Fixed. o F3est Took a very long time if the containing box was very flat, due to the default value of 'vside'. Fixed. o rmh, rmhmodel An erroneous warning about 'outdated format of rmhmodel object' sometimes occurred. Fixed. o marks<-.ppx Names of result were incorrect. Fixed. o hyperframe class Various minor bug fixes. CHANGES IN spatstat VERSION 1.27-0 OVERVIEW o Variance estimates are now available for all Gibbs point process models. o Cressie-Loosmore-Ford test implemented o plot.fv now avoids collisions between the legend and the graphics. o Extension to predict.ppm o Improvements to envelopes and multitype summary functions. o Line transects of a pixel image. o Changes to defaults in Metropolis-Hastings simulations. o More geometrical operations o Bug fixes. o We thank Aruna Jammalamadaka for contributions. NEW FUNCTIONS o clf.test Perform the Cressie (1991)/ Loosmore and Ford (2006) test of CSR (or another model) o mad.test Perform the Maximum Absolute Deviation test of CSR (or another model). o convolve.im Compute convolution of pixel images. o Kmulti.inhom Counterpart of 'Kmulti' for spatially-varying intensity. o rmhexpand Specify a simulation window, or a rule for expanding the simulation window, in Metropolis-Hastings simulation (rmh) o transect.im Extract pixel values along a line transect. o affine.im Apply an affine transformation to a pixel image. o scalardilate Perform scalar dilation of a geometrical object relative to a specified origin. o reflect Reflect a geometrical object through the origin. o "[.lpp", "[.ppx" Subset operators for the classes "lpp" (point pattern on linear network) and "ppx" (multidimensional space-time point pattern). o is.rectangle, is.polygonal, is.mask Determine whether a window w is a rectangle, a domain with polygonal boundaries, or a binary pixel mask. o has.offset Determines whether a fitted model object (of any kind) has an offset. SIGNIFICANT USER-VISIBLE CHANGES o predict.ppm This function can now calculate the conditional intensity of a model relative to any point pattern X (not just the original data pattern). o vcov.ppm This function now handles all Gibbs point process models. o plot.fv Collisions between the legend box and the graphics are now detected and avoided. o rmh.ppm, rmh.default, simulate.ppm, qqplot.ppm, envelope.ppm These functions now have slightly different default behaviour because of changes to the handling of arguments to 'rmhcontrol'. o rmhcontrol The default value of the parameters 'periodic' and 'expand' has changed. o rmhcontrol The parameter 'expand' can now be in any format acceptable to rmhexpand(). o rmh.ppm, rmh.default, simulate.ppm Any 'rmhcontrol' parameter can now be given directly as an argument to rmh.ppm, rmh.default or simulate.ppm. o Kmulti, Gmulti, Jmulti The arguments I, J can now be any kind of subset index or can be functions that yield a subset index. o envelope.envelope In envelope(E, fun=NULL) if E does not contain simulated summary functions, but does contain simulated point patterns, then 'fun' now defaults to Kest, instead of flagging an error. o print.ppp, summary.ppp If the point pattern x was generated by Metropolis-Hastings simulation using 'rmh', then print(x) and summary(x) show information about the simulation parameters. o print.ppm Standard errors for the parameter estimates, and confidence intervals for the parameters, can now be printed for all Gibbs models (but are printed only for Poisson models by default). o eval.im Images with incompatible dimensions are now resampled to make them compatible (if harmonize=TRUE). o spatstat.options New option 'print.ppm.SE' controls whether standard errors and confidence intervals are printed for all Gibbs models, for Poisson models only, or are never printed. o inside.owin Now accepts the form list(x,y) for the first argument. o image.listof New argument 'equal.ribbon' allows several images to be plotted with the same colour map. o is.subset.owin Improved accuracy in marginal cases. o expand.owin Functionality extended to handle all types of expansion rule. o default.rmhcontrol, default.expand These functions now work with models of class 'rmhmodel' as well as 'ppm' o print.rmhcontrol Output improved. BUG FIXES o linearK, linearKinhom If any data points were located exactly at a vertex of the linear network, the weights for Ang's correction were incorrect, due to numerical error. This sometimes produced infinite or NA values of the linear K function. Fixed. o predict.ppm In some cases, predict.ppm(type="cif") generated a spurious warning that "number of rows of result is not a multiple of vector length." Fixed. o crossing.psp Results were sometimes incorrect due to numerical rounding error associated with GCC bug #323. Fixed. o MultiHard, MultiStrauss, MultiStraussHard If the mark values contained non-alphanumeric characters, the names of the interaction coefficients in coef(ppm(...)) were sometimes garbled. Fixed. o profilepl For edge corrections other than the border correction, an error message about 'rbord' would sometimes occur. Fixed. o is.marked, is.multitype These functions gave the wrong answer for 'lpp' objects. Fixed. o marks<-.lpp, marks<-.ppx Format of result was garbled if new columns of marks were added. Fixed. o reach.rmhmodel Gave the wrong answer for Geyer and BadGey models. Fixed. o envelope.envelope Ignored the argument 'savefuns'. Fixed. o BadGey Sometimes wrongly asserted that the parameter 'sat' was invalid. Occurred only in ppm(project=TRUE). Fixed. CHANGES IN spatstat VERSION 1.26-1 OVERVIEW o Variance-covariance matrix for Gibbs point process models. o Bootstrap confidence bands for pair correlation function and K function. o Bug fix in scan test. o Area-interaction model accelerated. o we thank Jean-Francois Coeurjolly and Ege Rubak for contributions. NEW FUNCTIONS o lohboot Computes bootstrap confidence bands for pair correlation function and K function using Loh's (2008) mark bootstrap. SIGNIFICANT USER-VISIBLE CHANGES o vcov.ppm Now works for all Gibbs point process models, thanks to new code (and theory) from Jean-Francois Coeurjolly and Ege Rubak o AreaInter Computations related to the area-interaction point process (ppm, predict.ppm, residuals.ppm, diagnose.ppm, qqplot.ppm) have been accelerated. BUG FIXES o scan.test Results were sometimes incorrect due to numerical instability (a 'Gibbs phenomenon'). Fixed. CHANGES IN spatstat VERSION 1.26-0 OVERVIEW o We thank Jens Oehlschlaegel for contributions. o Further substantial acceleration of spatstat functions. o Workaround for bug in RandomFields package. o Numerous modifications to internal code. NEW FUNCTIONS o RandomFieldsSafe There is a bug in the package 'RandomFields' (version <= 2.0.54) which causes a crash to occur, in the development version of R but not in R 2.15.0. To avoid crashing spatstat, we have written the temporary, undocumented function RandomFieldsSafe() which returns TRUE if it is safe to use the RandomFields package. Examples in the spatstat help files for kppm, lgcp.estK, lgcp.estpcf and rLGCP are only executed if RandomFieldsSafe() returns TRUE. SIGNIFICANT USER-VISIBLE CHANGES o Many functions Many spatstat functions now run faster, and will handle larger datasets, thanks to improvements in the internal code, following suggestions from Jens Oehlschlaegel. o Many functions The response to an 'Interrupt' signal is slightly slower. CHANGES IN spatstat VERSION 1.25-5 OVERVIEW o We thank Ya-Mei Chang, Jens Oehlschlaegel and Yong Song for contributions. o Extended functionality of 'rhohat' to local likelihood smoothing and bivariate smoothing. o Nearest neighbour distance computations accelerated. o spatstat now 'Suggests:' the package 'locfit' NEW FUNCTIONS o rho2hat Bivariate extension of 'rhohat' for estimating spatial residual risk, or intensity as a function of two covariates. SIGNIFICANT USER-VISIBLE CHANGES o rhohat Estimation can now be performed using local likelihood fitting with the 'locfit' package, or using kernel smoothing. o nncross Substantially accelerated. New arguments added to control the return value and the sorting of data. BUG FIXES o plot.msr Crashed if the argument 'box' was given. Fixed. CHANGES IN spatstat VERSION 1.25-4 OVERVIEW o We thank Jonathan Lee and Sergiy Protsiv for contributions. o Improvements and bug fixes to K function for very large datasets NEW FUNCTIONS o rStraussHard Perfect simulation for Strauss-hardcore process (with gamma <= 1) SIGNIFICANT USER-VISIBLE CHANGES o plot.im The colour ribbon can now be placed left, right, top or bottom using new argument 'ribside' o profilepl Does not generate warnings when some of the candidate models have zero likelihood - for example when fitting model with a hard core. o Kest Now includes fast algorithm for 'correction="none"' which will handle patterns containing millions of points. BUG FIXES o Kest, Lest Gave incorrect values in very large datasets, due to numerical overflow. `Very large' typically means about 1 million points in a random pattern, or 100,000 points in a tightly clustered pattern. [Overflow cannot occur unless there are at least 46,341 points.] [Spotted by Sergiy Protsiv.] Fixed. o Kest, Lest Ignored 'ratio=TRUE' if the argument 'domain' was given. [Spotted by Jonathan Lee.] Fixed. o rjitter Output was sometimes incorrect. [Spotted by Sergiy Protsiv.] Fixed. CHANGES IN spatstat VERSION 1.25-3 OVERVIEW o We thank Daniel Esser for contributions. o Improved support for fitted point process models. o Bug fixes. NEW FUNCTIONS o simulate.slrm Method for 'simulate' for spatial logistic regression models. o labels.ppm, labels.kppm, labels.slrm Methods for 'labels' for fitted point process models. o commonGrid Determine a common spatial domain and pixel resolution for several pixel images and/or binary masks SIGNIFICANT USER-VISIBLE CHANGES o effectfun Now has argument 'se.fit' allowing calculation of standard errors and confidence intervals. o [.msr Now handles character-valued indices. o print.summary.ppm Output gives a more precise description of the fitting method. o ppm, kppm, slrm Confidence intervals for the fitted trend parameters can now be obtained using 'confint' o predict.slrm New argument 'window' o union.owin Now handles a single argument: union.owin(A) returns A. BUG FIXES o selfcrossing.psp y coordinate values were incorrect. [Spotted by Daniel Esser.] Fixed. o as.im.owin Did not handle a binary mask with a 1 x 1 pixel array. Fixed. o predict.slrm Results of predict(object, newdata) were incorrect if the spatial domain of 'newdata' was larger than the original domain. Fixed. o ppm If the model was the uniform Poisson process, the argument 'rbord' was ignored. Fixed. o image subset assignment "[<-.im" Generated an error if the indexing argument 'i' was a point pattern containing zero points. Fixed. o hyperframe subset assignment "[<-.hyperframe" Did not correctly handle the case where a single column of the hyperframe was to be changed. Fixed. o help(bw.relrisk), help(rmh.ppm), help(plot.plotppm) These help files had the side-effect of changing some options in spatstat.options. Fixed. CHANGES IN spatstat VERSION 1.25-2 OVERVIEW o We thank Abdollah Jalilian and Thierry Onkelinx for contributions. o Very Important Bug fixes. o Improved mechanism for handling 'invalid' point processes NEW FUNCTIONS o as.matrix.owin Converts a window to a logical matrix. SIGNIFICANT USER-VISIBLE CHANGES o project.ppm Improved algorithm. Now handles terms in the trend formula as well as the interaction. The projected point process is now obtained by re-fitting the model, and is guaranteed to be the maximum pseudolikelihood fit. o plot.im Now handles many arguments recognised by plot.default such as 'cex.main'. Also handles argument 'box'. New argument 'ribargs' contains parameters controlling the ribbon plot only. o spatstat.options New option 'project.fast' allows a faster shortcut for project.ppm o spatstat.options New options 'rmh.p', 'rmh.q', 'rmh.nrep' determine the default values of the parameters p, q and nrep of the Metropolis-Hastings algorithm. See rmhcontrol o ppm Slightly accelerated. BUG FIXES o nncross, distfun, AreaInter Results of nncross were possibly incorrect when X and Y did not have the same window. This bug affected values of 'distfun' and may also have affected ppm objects with interaction 'AreaInter'. [Spotted by Thierry Onkelinx] Bug introduced in spatstat 1.9-4 (June 2006). Fixed. o rCauchy Simulations were incorrect in the sense that the value of 'omega' was inadvertently doubled (i.e. omega was incorrectly replaced by 2 * omega). Bug introduced in spatstat 1.25-0. Fixed. o plot.im White lines were present in the image display, on some graphics devices, due to changes in R 2.14. Fixed. o update.ppm The result of 'update(object, formula)' sometimes contained errors in the internal format. Bug introduced in spatstat 1.25-0. Fixed. o example(AreaInter), example(bw.smoothppp), example(Kest.fft), example(plot.owin), example(predict.ppm), example(simulate.ppm) Executing these examples had the side-effect of changing some of the parameters in spatstat.options. Fixed. CHANGES IN spatstat VERSION 1.25-1 OVERVIEW o We thank Neba Funwi-Gabga and Jorge Mateu for contributions. o New dataset of gorilla nest sites o New functions for perfect simulation o Bug fix for rare crashes in rStrauss o Code for ensuring a fitted point process model is a valid point process NEW DATASET o gorillas Gorilla nest sites in a National Park in Cameroon. Generously contributed by Neba Funwi-Gabga NEW FUNCTIONS o rDiggleGratton, rDGS, rHardcore Perfect simulation for the Diggle-Gratton process, Diggle-Gates-Stibbard process, and Hardcore process. o bw.scott Scott's rule of thumb for bandwidth selection in multidimensional smoothing o valid.ppm Checks whether a fitted point process model is a valid point process o project.ppm Forces a fitted point process model to be a valid point process SIGNIFICANT USER-VISIBLE CHANGES o ppm New argument 'project' determines whether the fitted model is forced to be a valid point process o linnet Substantially accelerated. o rStrauss Slightly accelerated. o summary.lpp Now prints the units of length. BUG FIXES o rStrauss Crashed rarely (once every 10 000 realisations) with a memory segmentation fault. Fixed. CHANGES IN spatstat VERSION 1.25-0 OVERVIEW o Leverage and influence for point process models o New cluster models (support for model-fitting and simulation). o Fit irregular parameters in trend of point process model o Third order summary statistic. o Improvements to speed and robustness of code. o spatstat now depends on R 2.14 o We thank Abdollah Jalilian and Rasmus Waagepetersen for contributions. NEW FUNCTIONS o leverage.ppm, influence.ppm, dfbetas.ppm Leverage and influence for point process models o ippm Experimental extension to 'ppm' which fits irregular parameters in trend by Fisher scoring algorithm. o Tstat Third order summary statistic for point patterns based on counting triangles. o rCauchy, rVarGamma simulation of a Neyman-Scott process with Cauchy clusters or Variance Gamma (Bessel) clusters. Contributed by Abdollah Jalilian. o rPoissonCluster simulation of a general Poisson cluster process o model.covariates Identify the covariates involved in a model (lm, glm, ppm etc) o as.im.distfun Converts a 'distfun' to a pixel image. o cauchy.estK, cauchy.estpcf, vargamma.estK, vargamma.estpcf Low-level model-fitting functions for the Neyman-Scott process with Cauchy or Variance-Gamma cluster kernel. Contributed by Abdollah Jalilian. SIGNIFICANT USER-VISIBLE CHANGES o kppm Now accepts clusters="Cauchy" or clusters="VarGamma" for the Neyman-Scott process with Cauchy or Variance-Gamma cluster kernel. Code contributed by Abdollah Jalilian. o rNeymanScott Argument 'rcluster' may now take a different format. o psst Argument 'funcorrection' changed to 'funargs' allowing greater flexibility. o plot.fv, plot.envelope New argument 'limitsonly' allows calculation of a common x,y scale for several plots. o overall speed spatstat is now byte-compiled and runs slightly faster. o user interrupt Long calculations in spatstat now respond to the Interrupt/Stop signal. o update.ppm Now runs faster and uses much less memory, when the update only affects the model formula (trend formula). o rNeymanScott, rThomas, rMatClust Accelerated thanks to Rasmus Waagepetersen. o multitype data and models Second order multitype statistics (such as Kcross, pcfcross) and multitype interaction models (such as MultiStrauss) now run faster, by a further 5%. BUG FIXES o distfun Some manipulations involving 'distfun' objects failed if the original data X in distfun(X) did not have a rectangular window. Fixed. CHANGES IN spatstat VERSION 1.24-2 OVERVIEW o Geyer's triplet interaction o more functionality for replicated point patterns o changed default for simulation window in point process simulation o changed default for edge correction in Kcom, Gcom o data in spatstat is now lazy-loaded o bug fixes NEW FUNCTIONS o Triplets Geyer's triplet interaction, for point process models o coef.summary.ppm New method coef.summary.ppm You can now type 'coef(summary(fit))' to extract a table of the fitted coefficients of the point process model 'fit' SIGNIFICANT USER-VISIBLE CHANGES o data in spatstat are now lazy-loaded so you don't have to type data(amacrine), etc. o rmh.default, rmh.ppm, simulate.ppm These now handle the 'triplets' interaction o fryplot Now has arguments 'to' and 'from', allowing selection of a subset of points. o fryplot, frypoints These functions now handle marked point patterns properly. o Kcross, Kdot, Kmulti New argument 'ratio' determines whether the numerator and denominator of the estimate of the multitype K-function will be stored. This enables analysis of replicated point patterns, using 'pool.rat()' to pool the K function estimates. o rmh.ppm, simulate.ppm, default.expand For point process models which have a trend depending only on x and y, the simulation window is now taken to be the same as the original window containing the data (by default). That is, `expansion' does not take place, by default. (In previous versions of spatstat the simulation window was larger than the original data window.) o rmh.ppm, simulate.ppm The argument sequence for these functions has changed. New argument 'expand' allows more explicit control over simulation domain. o Kcom, Gcom New argument 'conditional' gives more explicit control over choice of edge correction in compensator. Simplified defaults for edge correction. o Kinhom Improved plot labels. o profilepl Printed output improved. BUG FIXES o Lest The variance approximations (Lotwick-Silverman and Ripley) obtained with var.approx=TRUE, were incorrect for Lest (although they were correct for Kest) due to a coding error. Fixed. o simulate.ppm Ignored the argument 'control' in some cases. Fixed. o pcf and its relatives (pcfinhom, pcfcross.inhom, pcfdot.inhom) Sometimes gave a warning about 'extra arguments ignored'. Fixed. CHANGES IN spatstat VERSION 1.24-1 OVERVIEW o Spatial Scan Test o Functionality for replicated point patterns o Bug fixes NEW FUNCTIONS o scan.test Spatial scan test of clustering o rat New class of 'ratio objects' o pool.rat New method for 'pool'. Combines K function estimates for replicated point patterns (etc) by computing ratio-of-sums o unnormdensity Weighted kernel density with weights that do not sum to 1 and may be negative. o compatible New generic function with methods for 'fv', 'im', 'fasp' and 'units' SIGNIFICANT USER-VISIBLE CHANGES o Kest New argument 'ratio' determines whether the numerator and denominator of the estimate of the K-function will be stored. This enables analysis of replicated point patterns, using 'pool.rat()' to pool the K function estimates. o Lest Now handles theoretical variance estimates (using delta method) if var.approx=TRUE o as.mask Argument 'eps' can now be a 2-vector, specifying x and y resolutions. o default.expand Behaviour changed slightly. o plot.listof, plot.splitppp, contour.listof, image.listof The arguments 'panel.begin' and 'panel.end' can now be objects such as windows. BUG FIXES o rgbim, hsvim Did not work on images with non-rectangular domains. Fixed. o scaletointerval Did not handle NA's. Fixed. CHANGES IN spatstat VERSION 1.24-0 OVERVIEW o This version was not released publicly. CHANGES IN spatstat VERSION 1.23-6 OVERVIEW o Spatial covariance functions of windows and pixel images. o Area-interaction models can now be fitted in non-rectangular windows o Bug fix for envelope of inhomogeneous Poisson process o Bug fix for raster conversion o New vignette on 'Getting Started with Spatstat' o Code accelerated. NEW FUNCTIONS o imcov Spatial covariance function of pixel image or spatial cross-covariance function of two pixel images o harmonise.im Make several pixel images compatible by converting them to the same pixel grid o contour.listof, image.listof Methods for contour() and image() for lists of objects o dummify Convert data to numeric values by constructing dummy variables. SIGNIFICANT USER-VISIBLE CHANGES o setcov Can now compute the `cross-covariance' between two regions o AreaInter Point process models with the AreaInter() interaction can now be fitted to point pattern data X in any window. o areaGain, areaLoss These now handle arbitrary windows W. They are now more accurate when r is very small. o Kcom Computation vastly accelerated, for non-rectangular windows. o vignettes New vignette 'Getting Started with the Spatstat Package' o nncorr, nnmean, nnvario These functions now handle data frames of marks. BUG FIXES o envelope.ppm If the model was an inhomogeneous Poisson process, the resulting envelope object was incorrect (the simulations were correct, but the envelopes were calculated assuming the model was CSR). Bug was introduced in spatstat 1.23-5. Fixed. o envelope.ppm If the model was an inhomogeneous Poisson process with intensity a function of x and y only, overflow errors sometimes occurred ('insufficient storage' or 'attempting to generate a large number of random points'). Fixed. o as.im.im The result of as.im(X, W) was incorrect if 'W' did not cover 'X'. Fixed. o as.mask The result of as.mask(w, xy) was incorrect if 'xy' did not cover 'w'. Fixed. o plot.fv Legend was incorrectly labelled if 'shade' variables were not included in the plot formula. Fixed. o areaGain, areaLoss Crashed if the radius r was close to zero. Fixed. CHANGES IN spatstat VERSION 1.23-5 OVERVIEW o Bug fix to bandwidth selection. o Functions to pool data from several objects of the same class. o Improvements and bug fixes. o We thank Michael Sumner for contributions. NEW FUNCTIONS o pool Pool data from several objects of the same class o pool.envelope Pool simulated data from several envelope objects and create a new envelope o pool.fasp Pool simulated data from several function arrays and create a new array o envelope.envelope Recalculate an envelope from simulated data using different parameters SIGNIFICANT USER-VISIBLE CHANGES o bw.diggle, bw.relrisk, bw.smoothppp, bw.optim Plot method modified. o model.depends Now also recognises 'offset' terms. BUG FIXES o bw.diggle Bandwidth was too large by a factor of 2. Fixed. o plot.psp Crashed if any marks were NA. Fixed. o pointsOnLines Crashed if any segments had zero length. Ignored argument 'np' in some cases. Fixed. o stieltjes Crashed if M had only a single column of function values. Fixed. CHANGES IN spatstat VERSION 1.23-4 OVERVIEW o Bandwidth selection for density.ppp and smooth.ppp o Layered plots. o Model-handling facilities. o Improvements and bug fixes. NEW FUNCTIONS o bw.diggle Bandwidth selection for density.ppp by mean square error cross-validation. o bw.smoothppp Bandwidth selection for smooth.ppp by least-squares cross-validation. o layered, plot.layered A simple mechanism for controlling plots that consist of several successive layers of data. o model.depends Given a fitted model (of any kind), identify which of the covariates is involved in each term of the model. o model.is.additive Determine whether a fitted model (of any kind) is additive, in the sense that each term in the model involves at most one covariate. SIGNIFICANT USER-VISIBLE CHANGES o smooth.ppp Bandwidth 'sigma' is now selected by least-squares cross-validation o bw.relrisk Computation in large datasets accelerated. New arguments 'hmin', 'hmax' control the range of trial values of bandwidth. o Hest, Gfox, Jfox Improved algebraic labels for plot o spatstat.options New parameter 'n.bandwidth' o density.ppp, smooth.ppp Slightly accelerated. o point-in-polygon test Accelerated. BUG FIXES o with.fv Mathematical labels were incorrect in some cases. Fixed. o bw.relrisk Implementation of method="weightedleastsquares" was incorrect and was equivalent to method="leastsquares". Fixed. o smooth.ppp NaN values occurred if the bandwidth was very small. Fixed. CHANGES IN spatstat VERSION 1.23-3 OVERVIEW o Urgent bug fix. BUG FIXES o crossing.psp Crashed occasionally with a message about NA or NaN values. Fixed. o affine.ppp Crashed if the point pattern was empty. Fixed. CHANGES IN spatstat VERSION 1.23-2 OVERVIEW o Bug fixes. o Several functions have been accelerated. o We thank Marcelino de la Cruz and Ben Madin for contributions. NEW FUNCTIONS o sumouter, quadform Evaluate certain quadratic forms. o flipxy Exchange x and y coordinates. SIGNIFICANT USER-VISIBLE CHANGES o vcov.ppm Accelerated. o owin, as.owin Checking the validity of polygons has been accelerated. o crossing.psp, selfcrossing.psp Accelerated. BUG FIXES o split.ppp If drop=TRUE then some of the point patterns had the wrong windows. Spotted by Marcelino de la Cruz. Fixed. o split.ppp Crashed if the tessellation did not cover the point pattern. Fixed. o predict.ppm Crashed when type="se" if NA's were present. Spotted by Ben Madin. Fixed. o plot.ppp Incorrectly handled the case where both 'col' and 'cols' were present. Fixed. o polygon geometry The point-in-polygon test gave the wrong answer in some boundary cases. Fixed. CHANGES IN spatstat VERSION 1.23-1 OVERVIEW o Important bug fix to 'localpcf'. o Inverse-distance weighted smoothing. o Inhomogeneous versions of neighbourhood density functions. o Internal repairs and bug fixes. o We thank Mike Kuhn and Ben Madin for contributions. NEW FUNCTIONS o idw Inverse-distance weighted smoothing. o localKinhom, localLinhom, localpcfinhom Inhomogeneous versions of localK, localL, localpcf BUG FIXES o localpcf The columns of the result were in the wrong order. [i.e. pair correlation functions were associated with the wrong points.] Fixed. o delaunay If the union of several Delaunay triangles formed a triangle, this was erroneously included in the result of delaunay(). Fixed. o predict.ppm, plot.ppm Sometimes crashed with a warning about 'subscript out of bounds'. Fixed. o point-in-polygon test Vertices of a polygon were sometimes incorrectly classified as lying outside the polygon. Fixed. o Internal code Numerous tweaks and repairs to satisfy the package checker for the future R version 2.14. CHANGES IN spatstat VERSION 1.23-0 OVERVIEW o point patterns on a linear network: new tools including geometrically-corrected linear K function, pair correlation function, point process models, envelopes o changes to renormalisation of estimates in Kinhom and pcfinhom o new dataset: Chicago street crime o spatstat now 'Suggests:' the package RandomFields o spatstat now has a Namespace o we thank Mike Kuhn, Monia Mahling, Brian Ripley for contributions. NEW DATASET o chicago Street crimes in the University district of Chicago. A point pattern on a linear network. NEW FUNCTIONS o envelope.lpp Simulation envelopes for point patterns on a linear network o lineardisc Compute the 'disc' of radius r in a linear network o linearpcf Pair correlation for point pattern on a linear network o linearKinhom, linearpcfinhom Inhomogeneous versions of the K function and pair correlation function for point patterns on a linear network o lppm Fit point process models on a linear network. o anova.lppm Analysis of deviance for point process models on a linear network. o predict.lppm Prediction for point process models on a linear network. o envelope.lppm Simulation envelopes for point process models on a linear network. o linim Pixel image on a linear network o plot.linim Plot a pixel image on a linear network SIGNIFICANT USER-VISIBLE CHANGES o linearK New argument 'correction'. Geometrically-corrected estimation is performed by default (based on forthcoming paper by Ang, Baddeley and Nair) o Kinhom New argument 'normpower' allows different types of renormalisation. o pcfinhom Now performs renormalisation of estimate. Default behaviour changed - estimates are now renormalised by default. BUG FIXES o density.ppp Crashed if argument 'varcov' was given. Fixed. CHANGES IN spatstat VERSION 1.22-4 OVERVIEW o new diagnostics based on score residuals o new dataset o improvements to plotting summary functions o We thank Ege Rubak, Jesper Moller, George Leser, Robert Lamb and Ulf Mehlig for contributions. NEW FUNCTIONS o Gcom, Gres, Kcom, Kres New diagnostics for fitted Gibbs or Poisson point process models based on score residuals. Gcom is the compensator of the G function Gres is the residual of the G function Kcom is the compensator of the K function Kres is the residual of the K function o psst, psstA, psstG New diagnostics for fitted Gibbs or Poisson point process models based on pseudoscore residuals. psst is the pseudoscore diagnostic for a general alternative psstA is the pseudoscore diagnostic for an Area-interaction alternative psstG is the pseudoscore diagnostic for a Geyer saturation alternative o compareFit Computes and compares several point process models fitted to the same dataset, using a chosen diagnostic. o as.interact Extracts the interpoint interaction structure (without parameters) from a fitted point process model or similar object. NEW DATASET o flu Spatial point patterns giving the locations of influenza virus proteins on cell membranes. Kindly released by Dr George Leser and Dr Robert Lamb. SIGNIFICANT USER-VISIBLE CHANGES o pixel images and grids The default size of a pixel grid, given by spatstat.options("npixel"), has been changed from 100 to 128. A power of 2 gives faster and more accurate results in many cases. o residuals.ppm New arguments 'coefs' and 'quad' for advanced use (make it possible to compute residuals from a modified version of the fitted model.) o relrisk New argument 'casecontrol' determines whether a bivariate point pattern should be treated as case-control data. o plot.fv Further improvements in mathematical labels. o plot.fv The formula can now include the symbols .x and .y as abbreviation for the function argument and the recommended function value, respectively. o plot.fv New argument 'add' BUG FIXES o multitype summary functions (Kcross, Kdot, Gcross, Gdot, .....) Plotting these functions generated an error if the name of one of the types of points contained spaces, e.g. "Escherichia coli". Fixed. CHANGES IN spatstat VERSION 1.22-3 OVERVIEW o Important bug fix to simulation code o Miscellaneous improvements o spatstat now depends on R 2.13.0 or later o We thank Ege Rubak, Kaspar Stucki, Vadim Shcherbakov, Jesper Moller and Ben Taylor for contributions. NEW FUNCTIONS o is.stationary, is.poisson New generic functions for testing whether a point process model is stationary and/or Poisson. Methods for ppm, kppm, slrm etc o raster.xy raster coordinates of a pixel mask o zapsmall.im 'zapsmall' for pixel images SIGNIFICANT USER-VISIBLE CHANGES o density.ppp New argument 'diggle' allows choice of edge correction o rotate.owin, affine.owin These functions now handle binary pixel masks. New argument 'rescue' determines whether rectangles will be preserved BUG FIXES o rmh, simulate.ppm Serious bug - simulation was completely incorrect in the case of a multitype point process with an interaction that does not depend on the marks, such as ppm(betacells, ~marks, Strauss(60)) The calling parameters were garbled. Fixed. o effectfun Crashed if the covariate was a function(x,y). Fixed. o lurking Gave erroneous error messages about 'damaged' models. Fixed. o envelope.ppm Did not recognise when the fitted model was equivalent to CSR. Fixed. o plot.ppx Crashed in some cases. Fixed. CHANGES IN spatstat VERSION 1.22-2 OVERVIEW o Fitting and simulation of log-Gaussian Cox processes with any covariance function o More support for 'kppm' and 'rhohat' objects o K-function for point patterns on a linear network o Metropolis-Hastings algorithm now saves its transition history o Easier control of dummy points in ppm o Convert an 'fv' object to an R function o spatstat now depends on the package 'RandomFields' o We thank Abdollah Jalilian, Shen Guochun, Rasmus Waagepetersen, Ege Rubak and Ang Qi Wei for contributions. NEW FUNCTIONS o linearK Computes the Okabe-Yamada network K-function for a point pattern on a linear network. o pairdist.lpp Shortest-path distances between each pair of points on a linear network. o vcov.kppm Asymptotic variance-covariance matrix for regression parameters in kppm object. [Contributed by Abdollah Jalilian and Rasmus Waagepetersen] o rLGCP Simulation of log-Gaussian Cox processes [Contributed by Abdollah Jalilian and Rasmus Waagepetersen] o predict.rhohat Method for 'predict' for objects of class 'rhohat' Computes a pixel image of the predicted intensity. o Kmodel, pcfmodel Generic functions that compute the K-function or pair correlation function of a point process *model*. So far the only methods are for the class 'kppm'. o as.function.fv Converts a function value table (class 'fv') to a function in R o coef.kppm Method for 'coef' for objects of class 'kppm' o unitname, unitname<- These generic functions now have methods for fitted model objects (classes ppm, slrm, kppm, minconfit) and quadrature schemes (quad). o nobs.ppm Method for 'nobs' for class 'ppm'. Returns the number of points in the original data. SIGNIFICANT USER-VISIBLE CHANGES o kppm Can now fit a log-Gaussian Cox process o simulate.kppm Can now simulate a fitted log-Gaussian Cox process o lgcp.estK, lgcp.estpcf These functions previously fitted a log-Gaussian Cox process with exponential covariance. They can now fit a log-Gaussian Cox process with any covariance function implemented by the RandomFields package. o rmh If track=TRUE, the history of transitions of the Metropolis-Hastings algorithm is saved and returned. o ppm New argument 'nd' controls the number of dummy points. o as.fv Now handles objects of class kppm or minconfit. o rhohat If covariate = "x" or "y", the resulting object has the same 'unitname' as the original point pattern data. o rhohat Now has arguments 'eps, 'dimyx' to control pixel resolution. o MultiStrauss, MultiHard, MultiStraussHard Default value of 'types' has been changed to NULL. o data(ants) The auxiliary data 'ants.extra' now includes a function called 'side' determining whether a given location is in the scrub or field region. Can be used as a covariate in ppm, kppm, slrm. o print.ppm Now has argument 'what' to allow only selected information to be printed. BUG FIXES o profilepl Crashed in some cases involving multitype interactions. Fixed. o plot.splitppp Behaved incorrectly if 'main' was an expression. Fixed. o effectfun Crashed in trivial cases. Fixed. o kppm, thomas.estpcf, matclust.estpcf, lgcp.estpcf Gave a spurious warning message. Fixed. o step When applied to ppm objects this gave a spurious warning. Fixed. CHANGES IN spatstat VERSION 1.22-1 OVERVIEW o marked line segment patterns can now be plotted o multitype point process models are now 'self-starting' o new functions to manipulate colour images NEW FUNCTIONS o rgbim, hsvim Specify three colour channels. These functions convert three pixel images with numeric values into a single image whose pixel values are strings representing colours. o scaletointerval Generic utility function to rescale data (including spatial data) to a specified interval SIGNIFICANT USER-VISIBLE CHANGES o plot.im Can now plot images whose pixel values are strings representing colours. New argument 'valuesAreColours' o plot.psp Now handles marked line segment patterns and plots the marks as colours. o MultiHard, MultiStrauss, MultiStraussHard The argument 'types' can now be omitted; it will be inferred from the point pattern data. o rhohat Improved mathematical labels (when the result of rhohat is plotted) o plot.fv Minor improvements in graphics BUG FIXES o several minor bug fixes and improvements to satisfy R-devel CHANGES IN spatstat VERSION 1.22-0 OVERVIEW o support for point patterns on a linear network o 'superimpose' is now generic o improved mathematical labels when plotting functions NEW CLASSES o linnet An object of class 'linnet' represents a linear network, i.e. a connected network of line segments, such as a road network. Methods for this class include plot, print, summary etc. o lpp An object of class 'lpp' represents a point pattern on a linear network, such as a record of the locations of road accidents on a road network. Methods for this class include plot, print, summary etc. NEW FUNCTIONS o runiflpp Uniformly distributed random points on a linear network o rpoislpp Poisson point process on a linear network o clickjoin Interactive graphics to create a linear network o superimpose The function 'superimpose' is now generic, with methods for ppp, psp and a default method. o as.ppp.psp New method for as.ppp extracts the endpoints and marks from a line segment pattern NEW DATASETS o simplenet Simple example of a linear network SIGNIFICANT USER-VISIBLE CHANGES o superimposePSP This function is now deprecated in favour of 'superimpose' o superimpose Now handles data frames of marks. o plot.fv Argument 'legendmath' now defaults to TRUE. New argument 'legendargs' gives more control over appearance of legend. Increased default spacing between lines in legend. o eval.fv, with.fv Functions computed using eval.fv or with.fv now have better labels when plotted. o summary functions (Kest, Kest.fft, Kcross, Kdot, Kmulti, Kinhom, Kcross.inhom, Kdot.inhom, Kmulti.inhom, Lest, Lcross, Ldot, pcf, pcfcross, pcfdot, pcfinhom, pcfcross.inhom, pcfdot.inhom, Fest, Gest, Gcross, Gdot, Gmulti, Jest, Jcross, Jdot, Jmulti, Iest, localL, localK, markcorr, markvario, markconnect, Emark, Vmark, allstats, alltypes) Improved plot labels. BUG FIXES o superimpose If the marks components of patterns consisted of character vectors (rather than factors or non-factor numeric vectors) an error was triggered. Fixed. o plot.fv The y axis limits did not always cover the range of values if the argument 'shade' was used. Fixed. o plot.rhohat The y axis label was sometimes incorrect. Fixed. o plot.rhohat If argument 'xlim' was used, a warning was generated from 'rug'. Fixed. CHANGES IN spatstat VERSION 1.21-6 OVERVIEW o A line segment pattern can now have a data frame of marks. o Various minor extensions and alterations in behaviour NEW FUNCTIONS o nsegments Number of segments in a line segment pattern SIGNIFICANT USER-VISIBLE CHANGES o psp class A line segment pattern (object of class 'psp') can now have a data frame of marks. o density.ppp New argument 'adjust' makes it easy to adjust the smoothing bandwidth o plot.envelope If the upper envelope is NA but the lower envelope is finite, the upper limit is now treated as +Infinity o msr Argument 'continuous' renamed 'density' BUG FIXES o [.psp In X[W] if X is a line segment pattern and W is a polygonal window, marks were sometimes discarded, leading to an error. Fixed. o [.psp In X[W] if X is a line segment pattern and W is a rectangular window, if the marks of X were factor values, they were converted to integers. Fixed. o superimposePSP If the marks were a factor, they were mistakenly converted to integers. Fixed. o is.marked.ppp Did not generate a fatal error when na.action="fatal" as described in the help file. Fixed. CHANGES IN spatstat VERSION 1.21-5 OVERVIEW o Increased numerical stability. o New 'self-starting' feature of interpoint interactions. SIGNIFICANT USER-VISIBLE CHANGES o ppm Interaction objects may now be 'self-starting' i.e. initial parameter estimates can be computed from the point pattern dataset. So far, only the LennardJones() interaction has a self-starting feature. o LennardJones Increased numerical stability. New (optional) scaling argument 'sigma0'. Interpoint distances are automatically rescaled using 'self-starting' feature. o vcov.ppm New argument 'matrix.action' controls what happens when the matrix is ill-conditioned. Changed name of argument 'gamaction' to 'gam.action' o rmhmodel.ppm Default resolution of trend image has been increased. o is.poisson.ppm Accelerated. o ppm, kppm, qqplot.ppm Improved robustness to numerical error CHANGES IN spatstat VERSION 1.21-4 OVERVIEW o Urgent bug fix BUG FIXES o print.summary.ppm exited with an error message, if the model had external covariates. Fixed. CHANGES IN spatstat VERSION 1.21-3 OVERVIEW o Point process model covariates may now depend on additional parameters. o New class of signed measures, for residual analysis. o Miscellaneous improvements and bug fixes. NEW FUNCTIONS o clarkevans.test Classical Clark-Evans test of randomness o msr New class 'msr' of signed measures and vector-valued measures supporting residual analysis. o quadrat.test.quadratcount Method for 'quadrat.test' for objects of class 'quadratcount' (allows a chi-squared test to be performed on quadrat counts rather than recomputing from the original data) o tile.areas Computes areas of tiles in a tessellation (efficiently) SIGNIFICANT USER-VISIBLE CHANGES o ppm The spatial trend can now depend on additional parameters. This is done by allowing spatial covariate functions to have additional parameters: function(x, y, ...) where ... is controlled by the new argument 'covfunargs' to ppm o profilepl Can now maximise over trend parameters as well as interaction parameters o residuals.ppm The value returned by residuals.ppm is now an object of class 'msr'. It can be plotted directly. o eval.im When the argument 'envir' is used, eval.im() now recognises functions as well as variables in 'envir' o colourmap The argument 'col' can now be any kind of colour data o persp.im The 'colmap' argument can now be a 'colourmap' object o ppm The print and summary methods for 'ppm' objects now show standard errors for parameter estimates if the model is Poisson. o quadrat.test The print method for 'quadrattest' objects now displays information about the quadrats o lurking Improved format of x axis label o distmap.ppp Internal code is more robust. BUG FIXES o im Did not correctly handle 1 x 1 arrays. Fixed. o as.mask, pixellate.ppp Weird things happened if the argument 'eps' was set to a value greater than the size of the window. Fixed. CHANGES IN spatstat VERSION 1.21-2 OVERVIEW o New multitype hardcore interaction. o Nonparametric estimation of covariate effects on point patterns. o Output of 'Kmeasure' has been rescaled. o Numerous improvements and bug fixes. NEW FUNCTIONS o MultiHard multitype hard core interaction for use in ppm() o coords<- Assign new coordinates to the points in a point pattern o rhohat Kernel estimate for the effect of a spatial covariate on point process intensity SIGNIFICANT USER-VISIBLE CHANGES o as.ppp.matrix, as.ppp.data.frame These methods for 'as.ppp' now accept a matrix or data frame with any number of columns (>= 2) and interpret the additional columns as marks. o Kmeasure The interpretation of the output has changed: the pixel values are now density estimates. o rmh.ppm, rmhmodel.ppm These functions now accept a point process model fitted with the 'MultiHard' interaction o rmh.default, rmhmodel.default These functions now accept the option: cif='multihard' defining a multitype hard core interaction. o markcorr Now handles a data frame of marks o varblock Improved estimate in the case of the K function o colourmap, lut New argument 'range' makes it easier to specify a colour map or lookup table o [<-.hyperframe Now handles multiple columns o plot.fv Improved y axis labels o spatstat.options New option 'par.fv' controls default parameters for line plotting o rmhmodel More safety checks on parameter values. o quadratresample New argument 'verbose' o smooth.fv Default value of 'which' has been changed. BUG FIXES o Kest If the argument 'domain' was used, the resulting estimate was not correctly normalised. Fixed. o Kest The Lotwick-Silverman variance approximation was incorrectly calculated. (Spotted by Ian Dryden and Igor Chernayavsky). Fixed. o plot.owin, plot.ppp Display of binary masks was garbled if the window was empty or if it was equivalent to a rectangle. Fixed. o plot.bermantest One of the vertical lines for the Z1 test was in the wrong place. Fixed. o marks<-.ppx Crashed in some cases. Fixed. o is.convex An irrelevant warning was issued (for non-convex polygons). Fixed. CHANGES IN spatstat VERSION 1.21-1 OVERVIEW o Confidence intervals for K-function and other statistics o Bug fixes for smoothing and relative risk estimation NEW FUNCTIONS o varblock Variance estimation (and confidence intervals) for summary statistics such as Kest, using subdivision technique o bw.stoyan Bandwidth selection by Stoyan's rule of thumb. o which.max.im Applied to a list of images, this determines which image has the largest value at each pixel. o as.array.im Convert image to array SIGNIFICANT USER-VISIBLE CHANGES o smooth.ppp, markmean, sharpen.ppp, relrisk, bw.relrisk Further acceleration achieved. o Kest Argument 'correction' now explicitly overrides automatic defaults o plot.fv More robust handling of 'shade' BUG FIXES o relrisk Format of relrisk(at="points") was incorrect. Fixed. o bw.relrisk Result was incorrect in the default case method="likelihood" because of previous bug. Fixed. o Jdot, Jcross, Jmulti Return value did not include the hazard function, when correction="km" Fixed. o Jdot, Jcross, Jmulti Format of output was incompatible with format of Jest. Fixed. CHANGES IN spatstat VERSION 1.21-0 OVERVIEW o Implemented Spatial Logistic Regression o Implemented nonparametric estimation of relative risk with bandwidth selection by cross-validation. o Smoothing functions can handle a data frame of marks. o New options in Kinhom; default behaviour has changed. NEW FUNCTIONS o slrm Fit a spatial logistic regression model o anova.slrm, coef.slrm, fitted.slrm, logLik.slrm, plot.slrm, predict.slrm Methods for spatial logistic regression models o relrisk Nonparametric estimation of relative risk o bw.relrisk Automatic bandwidth selection by cross-validation o default.rmhcontrol Sets default values of Metropolis-Hastings parameters SIGNIFICANT USER-VISIBLE CHANGES o smooth.ppp, markmean These functions now accept a data frame of marks. o Kinhom Default behaviour has changed. New argument 'renormalise=TRUE' determines scaling of estimator and affects bias and variance in small samples. o residuals.ppm Now also computes the score residuals. o plot.im New argument 'ribscale' o plot.listof, plot.splitppp New arguments panel.begin, panel.end and panel.args o ppp Now checks for NA/NaN/Inf values in the coordinates o envelope.ppm Changed default value of 'control' New argument 'nrep' o qqplot.ppm Changed default value of 'control' BUG FIXES o marks<-.ppp, setmarks, %mark% A matrix of marks was accepted by ppp() but not by these assignment functions. Fixed. o density.ppp, smooth.ppp, sharpen.ppp, markmean Crashed if the bandwidth was extremely small. Fixed. CHANGES IN spatstat VERSION 1.20-5 OVERVIEW o Accelerated computations of kernel smoothing. o Implemented Choi-Hall data sharpening. NEW FUNCTIONS o sharpen.ppp Performs Choi-Hall data sharpening of a point pattern SIGNIFICANT USER-VISIBLE CHANGES o density.ppp, smooth.ppp Computation has been vastly accelerated for density(X, at="points") and smooth.ppp(X, at="points") o Kinhom Accelerated in case where lambda=NULL o Vignette 'shapefiles' updated CHANGES IN spatstat VERSION 1.20-4 OVERVIEW o New functions for inhomogeneous point patterns and local analysis. o Pair correlation function for 3D point patterns o Minor improvements and bug fixes to simulation code and image functions NEW FUNCTIONS o pcf3est Pair correlation function for 3D point patterns. o Kscaled, Lscaled Estimator of the template K function (and L-function) for a locally-scaled point process. o localpcf Local version of pair correlation function o identify.psp Method for 'identify' for line segment patterns. o as.im.matrix Converts a matrix to a pixel image SIGNIFICANT USER-VISIBLE CHANGES o rMaternI, rMaternII New argument 'stationary=TRUE' controls whether the simulated process is stationary (inside the simulation window). Default simulation behaviour has changed. o im New arguments 'xrange', 'yrange' o envelope Improvements to robustness of code. BUG FIXES o quadratcount If V was a tessellation created using a factor-valued image, quadratcount(X, tess=V) crashed with the error "Tessellation does not contain all the points of X". Fixed. o [.im If Z was a factor valued image and X was a point pattern then Z[X] was not a factor. Fixed. CHANGES IN spatstat VERSION 1.20-3 OVERVIEW o minor improvements (mostly internal). NEW FUNCTIONS o unmark.ppx Method for 'unmark' for general space-time point patterns SIGNIFICANT USER-VISIBLE CHANGES o plot.ppx Now handles marked patterns, in two-dimensional case o as.psp.psp Default value of argument 'check' set to FALSE CHANGES IN spatstat VERSION 1.20-2 OVERVIEW o Extensions to minimum contrast estimation. o Bug fix in simulation of Lennard-Jones model. o More support for distance functions. o Changes to point process simulations. NEW FUNCTIONS o thomas.estpcf Fit Thomas process model by minimum contrast using the pair correlation function (instead of the K-function). o matclust.estpcf Fit Matern Cluster model by minimum contrast using the pair correlation function (instead of the K-function). o lgcp.estpcf Fit log-Gaussian Cox process model by minimum contrast using the pair correlation function (instead of the K-function). o contour.distfun, persp.distfun Methods for 'contour' and 'persp' for distance functions o default.expand Computes default window for simulation of a fitted point process model. SIGNIFICANT USER-VISIBLE CHANGES o kppm Models can now be fitted using either the K-function or the pair correlation function. o ppm The list of covariates can now include windows (objects of class 'owin'). A window will be treated as a logical covariate that equals TRUE inside the window and FALSE outside it. o plot.distfun Pixel resolution can now be controlled. o envelope.ppm, qqplot.ppm The default value of 'control' has changed; simulation results may be slightly different. o rmh Slightly accelerated. BUG FIXES o rmh Simulation of the Lennard-Jones model (cif = 'lennard') was incorrect due to an obscure bug, introduced in spatstat 1.20-1. Fixed. o thomas.estK, matclust.estK, lgcp.estK The value of 'lambda' (if given) was ignored if X was a point pattern. Fixed. CHANGES IN spatstat VERSION 1.20-1 OVERVIEW o Further increases in speed and efficiency of ppm and rmh o New pairwise interaction model NEW FUNCTIONS o DiggleGatesStibbard Diggle-Gates-Stibbard pairwise interaction for use in ppm() SIGNIFICANT USER-VISIBLE CHANGES o ppm has been accelerated by a factor of 10 for the BadGey interaction. o rmh simulation of the Lennard-Jones model (cif='lennard') has been greatly accelerated. o rmh, rmhmodel.ppm Point process models fitted by ppm() using the DiggleGatesStibbard interaction can be simulated automatically using rmh. BUG FIXES o fitin The plot of a fitted Hardcore interaction was incorrect. Fixed. CHANGES IN spatstat VERSION 1.20-0 OVERVIEW o spatstat now contains over 1000 functions. o Substantial increase in speed and efficiency of model-fitting code. o Changes to factor-valued images. SIGNIFICANT USER-VISIBLE CHANGES o ppm has been accelerated by a factor of 10, and can handle datasets with 20,000 points, for the following interactions: DiggleGratton, Fiksel, Geyer, Hardcore, Strauss, StraussHard o predict.ppm accelerated by a factor of 3 (when type = "cif") with vastly reduced memory requirements for the following interactions: DiggleGratton, Fiksel, Geyer, Hardcore, Strauss, StraussHard o pixel images (class "im") The internal representation of factor-valued images has changed. Existing objects in the old format should still work. o im The syntax for creating a factor-valued image has changed. Argument 'lev' has been deleted. o ppm Some warnings have been reworded for greater clarity. BUG FIXES o [.im Mishandled some factor-valued images. Fixed. o hist.im Produced slightly erroneous output for some factor-valued images. Fixed. o plot.owin Filled polygons appeared to contain criss-cross lines on some graphics drivers. Fixed. o deltametric Did not handle windows with different enclosing frames (error message: 'dA and dB are incompatible') Fixed. o quadratcount Crashed if the pattern was empty and the window was a rectangle. (Noticed by Sandro Azaele) Fixed. o rNeymanScott Crashed if the parent process realisation was empty. (Noticed by Sandro Azaele) Fixed. CHANGES IN spatstat VERSION 1.19-3 ACKNOWLEDGEMENTS o We thank David Dereudre for contributions. OVERVIEW o Urgent bug fix to Metropolis-Hastings for Lennard-Jones model. o Miscellaneous additions to plotting and colour management. NEW FUNCTIONS o col2hex, rgb2hex, paletteindex, samecolour Functions for converting and comparing colours. o plot.envelope New method for plotting envelopes. By default the area between the upper and lower envelopes is shaded in grey. SIGNIFICANT USER-VISIBLE CHANGES o plot.fasp If the entries in the array are envelopes, they are plotted using plot.envelope (hence the envelope region is shaded grey). o plot.fv Now displays mathematical notation for each curve, if legendmath=TRUE. o print.fv Now prints the available range of 'r' values as well as the recommended range of 'r' values. BUG FIXES o rmh Simulation of Lennard-Jones model was incorrect; the simulations were effectively Poisson patterns. (Spotted by David Dereudre.) Fixed. o plot.fv Did not correctly handle formulas that included I( ) Fixed. CHANGES IN spatstat VERSION 1.19-2 ACKNOWLEDGEMENTS o We thank Jorge Mateu, Michael Sumner and Sebastian Luque for contributions. OVERVIEW o More support for fitted point process models and pixel images. o Improved plotting of pixel images and envelopes. o Simulation algorithm for Lennard-Jones process. o Improvements and bug fixes to envelopes. o Bug fixes to Metropolis-Hastings simulation. NEW FUNCTIONS o pairs.im Creates a scatterplot matrix for several pixel images. o model.frame.ppm Method for 'model.frame' for point process models. o sort.im Method for 'sort' for pixel images. SIGNIFICANT USER-VISIBLE CHANGES o plot.fv, plot.fasp New argument 'shade' enables confidence intervals or significance bands to be displayed as filled grey shading. o LennardJones The parametrisation of this interaction function has been changed. o rmh, rmhmodel These functions will now simulate a point process model that was fitted using the LennardJones() interaction. o rmh.default, rmhmodel.default These functions will now simulate a point process model with the Lennard-Jones interaction (cif='lennard'). o ecdf This function now works for pixel images. o dim, row, col These functions now work for pixel images. o order This function now works for pixel images. o [.im and [<-.im The subset index can now be any valid subset index for a matrix. o density.ppp, smooth.ppp The return value now has attributes 'sigma' and 'varcov' reporting the smoothing bandwidth. o plot.im The argument 'col' can now be a 'colourmap' object. This makes it possible to specify a fixed mapping between numbers and colours (e.g. so that it is consistent between plots of several different images). o rmh, spatstat.options spatstat.options now recognises the parameter 'expand' which determines the default window expansion factor in rmh. o rmh Improved handling of ppm objects with covariates. o kstest The 'covariate' can now be one of the characters "x" or "y" indicating the Cartesian coordinates. BUG FIXES o model.matrix.ppm For a fitted model that used a large number of quadrature points, model.matrix.ppm sometimes reported an internal error about mismatch between the model matrix and the quadrature scheme. Fixed. o plot.ppx Minor bugs fixed. o rmh In rare cases, the simulated point pattern included multiple points at the origin (0,0). (Bug introduced in spatstat 1.17-0.) Fixed. o rmh, rmhmodel.ppm Crashed when applied to a fitted multitype point process model if the model involved more than one covariate image. (Spotted by Jorge Mateu) Fixed. o density.psp If any segment had zero length, the result contained NaN values. (Spotted by Michael Sumner and Sebastian Luque.) Fixed. o envelope Crashed with fun=Lest or fun=Linhom if the number of points in a simulated pattern exceeded 3000. Fixed. o plot.kstest Main title was corrupted if the covariate was a function. Fixed. CHANGES IN spatstat VERSION 1.19-1 OVERVIEW o New dataset: replicated 3D point patterns. o Improvements to Metropolis-Hastings simulation code. o More support for hyperframes. o Bug fixes. NEW DATASETS o osteo: Osteocyte Lacunae data: replicated 3D point patterns NEW FUNCTIONS o rbind.hyperframe: Method for rbind for hyperframes. o as.data.frame.hyperframe: Converts a hyperframe to a data frame. SIGNIFICANT USER-VISIBLE CHANGES o Fiksel: Fitted point process models (class ppm) with the Fiksel() double exponential interaction can now be simulated by rmh. o rmh.default: Point processes with the Fiksel interaction can now be simulated by specifying parameters in rmh.default. o logLik.ppm: New argument 'warn' controls warnings. o profilepl: No longer issues spurious warnings. BUG FIXES o Hardcore, rmh: Simulation of the 'Hardcore' process was incorrect. The hard core radius was erroneously set to zero so that the simulated patterns were Poisson. Fixed. o fitin: A plot of the pairwise interaction function of a fitted model, generated by plot(fitin(model)) where model <- ppm(...), was sometimes incorrect when the model included a hard core. Fixed. CHANGES IN spatstat VERSION 1.19-0 OVERVIEW o Numerous bugs fixed in the implementation of the Huang-Ogata approximate maximum likelihood method. o New interpoint interaction model. NEW FUNCTIONS o Fiksel: new interpoint interaction: Fiksel's double exponential model. SIGNIFICANT USER-VISIBLE CHANGES o runifpoint, rpoispp, envelope These functions now issue a warning if the number of random points to be generated is very large. This traps a common error in simulation experiments. BUG FIXES o predict.ppm, fitted.ppm: Predictions and fitted values were incorrect for objects fitted using ppm(..., method="ho"). Fixed. o logLik, AIC: Values of logLik() and AIC() were incorrect for objects fitted using ppm(..., method="ho"). Fixed. o profilepl: Results were incorrect if the argument 'method="ho"' was used. Fixed. o fitin The result of fitin() was incorrect for objects fitted using ppm(..., method="ho"). Fixed. o rmhcontrol: rmhcontrol(NULL) generated an error. Fixed. CHANGES IN spatstat VERSION 1.18-4 ACKNOWLEDGEMENTS o We thank Michael Sumner for contributions. BUG FIXES o pixellate.psp: segments shorter than one pixel width were measured incorrectly if the 'weights' argument was present. Fixed. NEW FUNCTIONS o pairdist.ppx, crossdist.ppx, nndist.ppx, nnwhich.ppx: Methods for pairdist, crossdist, nndist, nnwhich for multidimensional point patterns (class 'ppx') o runifpointx, rpoisppx: Random point patterns in any number of dimensions o boxx: Multidimensional box in any number of dimensions o diameter.boxx, volume.boxx, shortside.boxx, eroded.volumes.boxx: Geometrical computations for multidimensional boxes o sum.im, max.im, min.im: Methods for sum(), min(), max() for pixel images. o as.matrix.ppx: Convert a multidimensional point pattern to a matrix SIGNIFICANT USER-VISIBLE CHANGES o plot.ppp: New argument 'zap' o diameter: This function is now generic, with methods for "owin", "box3" and "boxx" o eroded.volumes: This function is now generic, with methods for "box3" and "boxx" CHANGES IN spatstat VERSION 1.18-3 ACKNOWLEDGEMENTS o We thank Michael Sumner for contributions. BUG FIXES o pixellate.psp: segments shorter than one pixel width were measured incorrectly. Fixed. o fv: 'alim' not handled correctly. Fixed. NEW FUNCTIONS o smooth.fv: Applies spline smoothing to the columns of an fv object. CHANGES IN spatstat VERSION 1.18-2 ACKNOWLEDGEMENTS o We thank Michael Sumner for contributions. NEW FUNCTIONS o Gfox, Jfox: Foxall's G and J functions o as.owin.distfun: New method for as.owin extracts the domain of a distfun object. SIGNIFICANT USER-VISIBLE CHANGES o distfun: objects of class 'distfun', when called as functions, will now accept either two vectors (x,y) or a point pattern x. o Hest: this function can now compute the Hanisch estimator. It now has arguments 'r', 'breaks' and 'correction', like other summary functions. o Hest: new argument 'conditional'. BUG FIXES o pixellate.psp: Values were sometimes incorrect due to coding error. (Spotted by Michael Sumner) Fixed. o kstest: Crashed if the covariate contained NA's. Fixed. o kstest: Crashed if X was a multitype point pattern in which some mark values were unrepresented. Fixed. o lurking: Minor bug in handling of NA values. Fixed. o Hest: labels of columns were incorrect. Fixed. CHANGES IN spatstat VERSION 1.18-1 ACKNOWLEDGEMENTS o we thank Andrew Bevan and Ege Rubak for suggestions. NEW FUNCTIONS o Hardcore: Hard core interaction (for use in ppm) o envelope.pp3: simulation envelopes for 3D point patterns o npoints: number of points in a point pattern of any kind SIGNIFICANT USER-VISIBLE CHANGES o rmh.ppm, rmhmodel.ppm: It is now possible to simulate Gibbs point process models that are fitted to multitype point patterns using a non-multitype interaction, e.g. data(amacrine) fit <- ppm(amacrine, ~marks, Strauss(0.1)) rmh(fit, ...) o rmh.ppm, rmhmodel.ppm, rmh.default, rmhmodel.default: Hard core models can be simulated. o rmh.default, rmhmodel.default: The argument 'par' is now required to be a list, in all cases (previously it was sometimes a list and sometimes a vector). o Fest: Calculation has been accelerated in some cases. o summary.pp3 now returns an object of class 'summary.pp3' containing useful summary information. It is plotted by 'plot.summary.pp3'. o F3est, G3est, K3est: these functions now accept 'correction="best"' o union.owin, intersect.owin: these functions now handle any number of windows. o envelope.ppp, envelope.ppm, envelope.kppm: argument lists have changed slightly BUG FIXES o Fest: The result of Fest(X, correction="rs") had a slightly corrupted format, so that envelope(X, Fest, correction="rs") in fact computed the envelopes based on the "km" correction. (Spotted by Ege Rubak). Fixed. o rmh (rmh.ppm, rmhmodel.ppm): rmh sometimes failed for non-stationary point process models, with a message about "missing value where TRUE/FALSE needed". (Spotted by Andrew Bevan). Fixed. o diagnose.ppm, lurking: Calculations were not always correct if the model had conditional intensity equal to zero at some locations. Fixed. o ppm, profilepl: If data points are illegal under the model (i.e. if any data points have conditional intensity equal to zero) the log pseudolikelihood should be -Inf but was sometimes returned as a finite value. Thus profilepl did not always work correctly for models with a hard core. Fixed. o F3est, G3est: Debug messages were printed unnecessarily. Fixed. CHANGES IN spatstat VERSION 1.18-0 ACKNOWLEDGEMENTS o we thank Ege Rubak and Tyler Dean Rudolph for suggestions. HEADLINES o A point pattern is now allowed to have a data frame of marks (previously the marks had to be a vector). o Extended capabilities for 'envelope' and 'kstest'. NEW FUNCTIONS o pixellate.psp, as.mask.psp Convert a line segment pattern to a pixel image or binary mask o as.data.frame.im Convert a pixel image to a data frame SIGNIFICANT USER-VISIBLE CHANGES o A point pattern is now allowed to have a data frame of marks (previously the marks had to be a vector). o Many functions in spatstat now handle point patterns with a data frame of marks. These include print.ppp, summary.ppp, plot.ppp, split.ppp. o finpines, nbfires, shapley: The format of these datasets has changed. They are now point patterns with a data frame of marks. o envelope() is now generic, with methods for "ppp", "ppm" and "kppm". o kstest() now handles multitype point patterns and multitype point process models. o nnclean() now returns a point pattern with a data frame of marks. o plot.ppp() has new argument 'which.marks' to select one column from a data frame of marks to be plotted. o plot.ppp() now handles marks that are POSIX times. o complement.owin now handles any object acceptable to as.owin. BUG FIXES o erosion(w) and opening(w) crashed if w was not a window. Fixed. o diameter() and eroded.areas() refused to work if w was not a window. Fixed. CHANGES IN spatstat VERSION 1.17-6 ACKNOWLEDGEMENTS o We thank Simon Byers and Adrian Raftery for generous contributions. OVERVIEW o Nearest neighbour clutter removal algorithm o New documentation for the 'fv' class. o Minor improvements and bug fixes. NEW FUNCTIONS o nnclean: Nearest neighbour clutter removal for recognising features in spatial point patterns. Technique of Byers and Raftery (1998) [From original code by Simon Byers and Adrian Raftery, adapted for spatstat.] o marks.ppx, marks<-.ppx: Methods for extracting and changing marks in a multidimensional point pattern o latest.news: print news about the current version of the package SIGNIFICANT USER-VISIBLE CHANGES o news: spatstat now has a NEWS file which can be printed by typing news(package="spatstat"). o areaGain, areaLoss: New algorithms in case exact=TRUE. Syntax slightly modified. o with.hyperframe: - The result now inherits 'names' from the row names of the hyperframe. - New argument 'enclos' controls the environment in which the expression is evaluated. - The algorithm is now smarter at simplifying the result when simplify=TRUE. o update.ppm: Tweaked to improve the ability of ppm objects to be re-fitted in different contexts. ADVANCED USERS ONLY o Documentation for the class 'fv' of function value tables - fv: Creates an object of class 'fv' - cbind.fv, collapse.fv: Combine objects of class 'fv' - bind.fv: Add additional columns of data to an 'fv' object BUG FIXES o "$<-.hyperframe" destroyed the row names of the hyperframe. Fixed. o model.matrix.ppm had minor inconsistencies. Fixed. o ppm: The fitted coefficient vector had incorrect format in the default case of a uniform Poisson process. Fixed. o plot.ppx: Crashed if the argument 'main' was given. Fixed. o envelope.ppp: Crashed if the object returned by 'fun' did not include a column called "theo". Fixed. spatstat/R/0000755000176200001440000000000013613547031012321 5ustar liggesusersspatstat/R/triangulate.R0000644000176200001440000000156613333543255014776 0ustar liggesusers#' #' triangulate.R #' #' Decompose a polygon into triangles #' #' $Revision: 1.4 $ $Date: 2015/11/21 11:13:00 $ #' triangulate.owin <- local({ is.triangle <- function(p) { return((length(p$bdry) == 1) && (length(p$bdry[[1]]$x) == 3)) } triangulate.owin <- function(W) { stopifnot(is.owin(W)) W <- as.polygonal(W, repair=TRUE) P <- as.ppp(vertices(W), W=Frame(W), check=FALSE) D <- delaunay(P) V <- intersect.tess(W, D) Candidates <- tiles(V) istri <- sapply(Candidates, is.triangle) Accepted <- Candidates[istri] if(any(!istri)) { # recurse Worries <- unname(Candidates[!istri]) Fixed <- lapply(Worries, triangulate.owin) Fixed <- do.call(c, lapply(Fixed, tiles)) Accepted <- append(Accepted, Fixed) } result <- tess(tiles=Accepted, window=W) return(result) } triangulate.owin }) spatstat/R/classes.R0000644000176200001440000000232313333543254014103 0ustar liggesusers# # # classes.S # # $Revision: 1.7 $ $Date: 2006/10/09 03:38:14 $ # # Generic utilities for classes # # #-------------------------------------------------------------------------- verifyclass <- function(X, C, N=deparse(substitute(X)), fatal=TRUE) { if(!inherits(X, C)) { if(fatal) { gripe <- paste("argument", sQuote(N), "is not of class", sQuote(C)) stop(gripe) } else return(FALSE) } return(TRUE) } #-------------------------------------------------------------------------- checkfields <- function(X, L) { # X is a list, L is a vector of strings # Checks for presence of field named L[i] for all i return(all(!is.na(match(L,names(X))))) } getfields <- function(X, L, fatal=TRUE) { # X is a list, L is a vector of strings # Extracts all fields with names L[i] from list X # Checks for presence of all desired fields # Returns the sublist of X with fields named L[i] absent <- is.na(match(L, names(X))) if(any(absent)) { gripe <- paste("Needed the following components:", paste(L, collapse=", "), "\nThese ones were missing: ", paste(L[absent], collapse=", ")) if(fatal) stop(gripe) else warning(gripe) } return(X[L[!absent]]) } spatstat/R/lixellate.R0000644000176200001440000000606013615441326014433 0ustar liggesusers#' #' lixellate.R #' #' Divide each segment of a linear network into several pieces #' #' $Revision: 1.7 $ $Date: 2020/02/02 03:18:11 $ #' lixellate <- function(X, ..., nsplit, eps, sparse=TRUE) { missn <- missing(nsplit) || (length(nsplit) == 0) misse <- missing(eps) || (length(eps) == 0) if(missn && misse) stop("One of the arguments 'nsplit' or 'eps' must be given") if(!missn && !misse) stop("The arguments 'nsplit' or 'eps' are incompatible") if(!missn) { stopifnot(is.numeric(nsplit)) stopifnot(all(is.finite(nsplit))) stopifnot(all(nsplit >= 0)) if(!all(nsplit == as.integer(nsplit))) stop("nsplit should be an integer or vector of integers", call.=FALSE) } else { check.1.real(eps) stopifnot(eps > 0) } if(is.lpp(X)) { rtype <- "lpp" np <- npoints(X) L <- as.linnet(X) } else if(inherits(X, "linnet")) { rtype <- "linnet" L <- X X <- runiflpp(1, L) np <- 0 } else stop("X should be a linnet or lpp object") if(is.null(sparse)) sparse <- identical(L$sparse, TRUE) from <- L$from to <- L$to ns <- length(from) if(missn) { lenfs <- lengths.psp(as.psp(L)) nsplit <- ceiling(lenfs/eps) } else { if(length(nsplit) == 1) { nsplit <- rep(nsplit, ns) } else if(length(nsplit) != ns) { stop(paste("nsplit should be a single number,", "or a vector of length equal to the number of segments")) } } sumN <- sum(nsplit) sumN1 <- sum(nsplit-1) V <- vertices(L) nv <- npoints(V) xv <- V$x yv <- V$y coordsX <- coords(X) sp <- coordsX$seg tp <- coordsX$tp ## sort data in increasing order of 'sp' oo <- order(sp) z <- .C("Clixellate", ns=as.integer(ns), fromcoarse=as.integer(from-1L), tocoarse = as.integer(to-1L), fromfine=as.integer(integer(sumN)), tofine = as.integer(integer(sumN)), nv = as.integer(nv), xv = as.double(c(xv, numeric(sumN1))), yv = as.double(c(yv, numeric(sumN1))), svcoarse = as.integer(integer(nv + sumN1)), tvcoarse = as.double(numeric(nv + sumN1)), nsplit = as.integer(nsplit), np = as.integer(np), spcoarse = as.integer(sp[oo]-1L), tpcoarse = as.double(tp[oo]), spfine = as.integer(integer(np)), tpfine = as.double(numeric(np)), PACKAGE = "spatstat") Lfine <- with(z, { ii <- seq_len(nv) Vnew <- ppp(xv[ii], yv[ii], window=Frame(L), check=FALSE) Lfine <- linnet(Vnew, edges=cbind(fromfine,tofine)+1, sparse=sparse) marks(Lfine$vertices) <- markcbind(marks(Lfine$vertices), data.frame(segcoarse=svcoarse+1, tpcoarse=tvcoarse)) Lfine }) if(rtype == "linnet") return(Lfine) ## put coordinates back in original order sp[oo] <- as.integer(z$spfine + 1L) tp[oo] <- z$tpfine coordsX$seg <- sp coordsX$tp <- tp ## make lpp Xfine <- lpp(coordsX, Lfine) marks(Xfine) <- marks(X) return(Xfine) } spatstat/R/Kinhom.R0000644000176200001440000004627713551507016013711 0ustar liggesusers# # Kinhom.S Estimation of K function for inhomogeneous patterns # # $Revision: 1.99 $ $Date: 2019/10/16 03:26:26 $ # # Kinhom() compute estimate of K_inhom # # # Reference: # Non- and semiparametric estimation of interaction # in inhomogeneous point patterns # A.Baddeley, J.Moller, R.Waagepetersen # Statistica Neerlandica 54 (2000) 329--350. # # -------- functions ---------------------------------------- # Kinhom() compute estimate of K # using various edge corrections # # Kwtsum() internal routine for border correction # # -------- standard arguments ------------------------------ # X point pattern (of class 'ppp') # # r distance values at which to compute K # # lambda vector of intensity values for points of X # # -------- standard output ------------------------------ # A data frame (class "fv") with columns named # # r: same as input # # trans: K function estimated by translation correction # # iso: K function estimated by Ripley isotropic correction # # theo: K function for Poisson ( = pi * r ^2 ) # # border: K function estimated by border method # (denominator = sum of weights of points) # # bord.modif: K function estimated by border method # (denominator = area of eroded window) # # ------------------------------------------------------------------------ "Linhom" <- function(X, ..., correction) { if(missing(correction)) correction <- NULL K <- Kinhom(X, ..., correction=correction) L <- eval.fv(sqrt(pmax.int(K,0)/pi)) # relabel the fv object L <- rebadge.fv(L, quote(L[inhom](r)), c("L", "inhom"), names(K), new.labl=attr(K, "labl")) attr(L, "labl") <- attr(K, "labl") attr(L, "dangerous") <- attr(K, "dangerous") # return(L) } "Kinhom"<- function (X, lambda=NULL, ..., r = NULL, breaks = NULL, correction=c("border", "bord.modif", "isotropic", "translate"), renormalise=TRUE, normpower=1, update = TRUE, leaveoneout = TRUE, nlarge = 1000, lambda2=NULL, reciplambda=NULL, reciplambda2=NULL, diagonal=TRUE, sigma=NULL, varcov=NULL, ratio=FALSE) { verifyclass(X, "ppp") nlarge.given <- !missing(nlarge) rfixed <- !missing(r) || !missing(breaks) miss.update <- missing(update) # determine basic parameters W <- X$window npts <- npoints(X) areaW <- area(W) diamW <- diameter(W) rmaxdefault <- rmax.rule("K", W, npts/areaW) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # match corrections correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "bord.modif", "isotropic", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", good="good", best="best"), multi=TRUE) # best.wanted <- ("best" %in% correction) ## replace 'good' by the optimal choice for this size of dataset if("good" %in% correction) correction[correction == "good"] <- good.correction.K(X) ## retain only corrections that are implemented for the window correction <- implemented.for.K(correction, W$type, correction.given) ########################################################### # DETERMINE WEIGHTS AND VALIDATE # # The matrix 'lambda2' or 'reciplambda2' is sufficient information # unless we want the border correction. lambda2.given <- !is.null(lambda2) || !is.null(reciplambda2) lambda2.suffices <- !any(correction %in% c("bord", "bord.modif")) ## Arguments that are 'dangerous' for envelope, if fixed dangerous <- c("lambda", "reciplambda", "lambda2", "reciplambda2") danger <- TRUE # Use matrix of weights if it was provided and if it is sufficient if(lambda2.suffices && lambda2.given) { if(!is.null(reciplambda2)) { check.nmatrix(reciplambda2, npts) validate.weights(reciplambda2, recip=TRUE) } else { check.nmatrix(lambda2, npts) validate.weights(lambda2) reciplambda2 <- 1/lambda2 } # renormalise if(renormalise) { check.1.real(normpower) stopifnot(normpower %in% 1:2) rlam2 <- reciplambda2 if(!diagonal) diag(rlam2) <- 0 renorm.factor <- (areaW^2/sum(rlam2))^(normpower/2) } } else { # Vector lambda or reciplambda is required if(missing(lambda) && is.null(reciplambda)) { # No intensity data provided danger <- FALSE # Estimate density by leave-one-out kernel smoothing lambda <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=leaveoneout) lambda <- as.numeric(lambda) validate.weights(lambda, how="density estimation") reciplambda <- 1/lambda } else if(!is.null(reciplambda)) { # 1/lambda values provided if(is.im(reciplambda)) reciplambda <- safelookup(reciplambda, X) else if(is.function(reciplambda)) reciplambda <- reciplambda(X$x, X$y) else if(is.numeric(reciplambda) && is.vector(as.numeric(reciplambda))) check.nvector(reciplambda, npts) else stop(paste(sQuote("reciplambda"), "should be a vector, a pixel image, or a function")) validate.weights(reciplambda, recip=TRUE) } else { # lambda values provided if(is.im(lambda)) lambda <- safelookup(lambda, X) else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) { model <- lambda if(!update) { ## just use intensity of fitted model lambda <- predict(model, locations=X, type="trend") } else { ## re-fit model to data X if(is.ppm(model)) { model <- update(model, Q=X) lambda <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else { model <- update(model, X=X) lambda <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } danger <- FALSE if(miss.update) warn.once(key="Kinhom.update", "The behaviour of Kinhom when lambda is a ppm object", "has changed (in spatstat 1.37-0 and later).", "See help(Kinhom)") } } else if(is.function(lambda)) lambda <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) check.nvector(lambda, npts) else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, or a function")) validate.weights(lambda) # evaluate reciprocal reciplambda <- 1/lambda } # renormalise if(renormalise) { check.1.real(normpower) stopifnot(normpower %in% 1:2) if(!diagonal && normpower == 2) { renorm.factor <- (areaW^2)/(sum(reciplambda)^2 - sum(reciplambda^2)) } else { renorm.factor <- (areaW/sum(reciplambda))^normpower } } } # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) ########################################### # Efficient code for border correction and no correction # Usable only if r values are evenly spaced from 0 to rmax # Invoked automatically if number of points is large can.do.fast <- breaks$even && !lambda2.given large.n <- (npts >= nlarge) # demand.best <- correction.given && best.wanted large.n.trigger <- large.n && !correction.given fastcorrections <- c("border", "bord.modif", "none") fastdefault <- "border" correction.fast <- all(correction %in% fastcorrections) will.do.fast <- can.do.fast && (correction.fast || large.n.trigger) asked.fast <- (correction.given && correction.fast) || (nlarge.given && large.n.trigger) if(!can.do.fast && asked.fast) { whynot <- if(!(breaks$even)) "r values not evenly spaced" else if(!missing(lambda)) "matrix lambda2 was given" else NULL warning(paste("cannot use efficient code", whynot, sep="; ")) } if(will.do.fast) { ## Compute Kinhom using fast algorithm(s) ## determine correction(s) ok <- correction %in% fastcorrections correction <- if(any(ok)) correction[ok] else fastdefault bord <- any(correction %in% c("border", "bord.modif")) none <- any(correction =="none") if(!all(ok)) { ## some corrections were overridden; notify user corx <- c(if(bord) "border correction estimate" else NULL, if(none) "uncorrected estimate" else NULL) corx <- paste(corx, collapse=" and ") message(paste("number of data points exceeds", nlarge, "- computing", corx , "only")) } ## restrict r values to recommended range, unless specifically requested if(!rfixed) r <- seq(from=0, to=alim[2], length.out=length(r)) ## border method if(bord) { Kb <- Kborder.engine(X, max(r), length(r), correction, weights=reciplambda, ratio=ratio) if(renormalise) { ynames <- setdiff(fvnames(Kb, "*"), "theo") Kb <- adjust.ratfv(Kb, ynames, denfactor=1/renorm.factor) } Kb <- tweak.ratfv.entry(Kb, "border", new.labl="{hat(%s)[%s]^{bord}} (r)") Kb <- tweak.ratfv.entry(Kb, "bord.modif", new.labl="{hat(%s)[%s]^{bordm}} (r)") } ## uncorrected if(none) { Kn <- Knone.engine(X, max(r), length(r), weights=reciplambda, ratio=ratio) if(renormalise) Kn <- adjust.ratfv(Kn, "un", denfactor=1/renorm.factor) Kn <- tweak.ratfv.entry(Kn, "un", new.labl="{hat(%s)[%s]^{un}} (r)") } K <- if(bord && !none) Kb else if(!bord && none) Kn else if(!ratio) cbind.fv(Kb, Kn[, c("r", "un")]) else bind.ratfv(Kb, Kn[, c("r", "un")], ratio=TRUE) ## tweak labels K <- rebadge.fv(K, quote(K[inhom](r)), c("K", "inhom")) if(danger) attr(K, "dangerous") <- dangerous return(K) } ########################################### # Fast code for rectangular window ########################################### if(can.do.fast && is.rectangle(W) && spatstat.options("use.Krect")) { K <- Krect.engine(X, rmax, length(r), correction, weights=reciplambda, ratio=ratio, fname=c("K", "inhom")) if(renormalise) { allfun <- setdiff(fvnames(K, "*"), "theo") K <- adjust.ratfv(K, allfun, denfactor=1/renorm.factor) } K <- rebadge.fv(K, quote(K[inhom](r)), c("K", "inhom")) attr(K, "alim") <- alim if(danger) attr(K, "dangerous") <- dangerous return(K) } ########################################### # Slower code ########################################### # this will be the output data frame K <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") denom <- if(renormalise) (areaW / renorm.factor) else areaW K <- ratfv(K, NULL, denom, argu="r", ylab=quote(K[inhom](r)), valu="theo", fmla=NULL, alim=alim, labl=c("r","{%s[%s]^{pois}}(r)"), desc=desc, fname=c("K", "inhom"), ratio=ratio) # identify all close pairs rmax <- max(r) what <- if(any(correction == "translate")) "all" else "ijd" close <- closepairs(X, rmax, what=what) dIJ <- close$d # compute weights for these pairs I <- close$i J <- close$j # wI <- reciplambda[I] wIJ <- if(!lambda2.given) reciplambda[I] * reciplambda[J] else reciplambda2[cbind(I,J)] # # compute edge corrected estimates if(any(correction == "border" | correction == "bord.modif")) { # border method # Compute distances to boundary b <- bdist.points(X) bI <- b[I] # apply reduced sample algorithm RS <- Kwtsum(dIJ, bI, wIJ, b, w=reciplambda, breaks) if(any(correction == "border")) { Kb <- RS$ratio if(renormalise) Kb <- Kb * renorm.factor K <- bind.ratfv(K, quotient = data.frame(border=Kb), denominator = denom, labl = "{hat(%s)[%s]^{bord}}(r)", desc = "border-corrected estimate of %s", preferred = "border", ratio=ratio) } if(any(correction == "bord.modif")) { Kbm <- RS$numerator/eroded.areas(W, r) if(renormalise) Kbm <- Kbm * renorm.factor K <- bind.ratfv(K, quotient = data.frame(bord.modif=Kbm), denominator = denom, labl = "{hat(%s)[%s]^{bordm}}(r)", desc = "modified border-corrected estimate of %s", preferred = "bord.modif", ratio=ratio) } } if(any(correction == "translate")) { # translation correction edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=W, paired=TRUE) allweight <- edgewt * wIJ wh <- whist(dIJ, breaks$val, allweight) Ktrans <- cumsum(wh)/areaW if(renormalise) Ktrans <- Ktrans * renorm.factor rmax <- diamW/2 Ktrans[r >= rmax] <- NA K <- bind.ratfv(K, quotient = data.frame(trans=Ktrans), denominator = denom, labl ="{hat(%s)[%s]^{trans}}(r)", desc = "translation-correction estimate of %s", preferred = "trans", ratio=ratio) } if(any(correction == "isotropic" | correction == "Ripley")) { # Ripley isotropic correction edgewt <- edge.Ripley(X[I], matrix(dIJ, ncol=1)) allweight <- edgewt * wIJ wh <- whist(dIJ, breaks$val, allweight) Kiso <- cumsum(wh)/areaW if(renormalise) Kiso <- Kiso * renorm.factor rmax <- diamW/2 Kiso[r >= rmax] <- NA K <- bind.ratfv(K, quotient = data.frame(iso=Kiso), denominator = denom, labl = "{hat(%s)[%s]^{iso}}(r)", desc = "Ripley isotropic correction estimate of %s", preferred = "iso", ratio=ratio) } # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) if(danger) attr(K, "dangerous") <- dangerous return(K) } Kwtsum <- function(dIJ, bI, wIJ, b, w, breaks, fatal=TRUE) { # # "internal" routine to compute border-correction estimates of Kinhom # # dIJ: vector containing pairwise distances for selected I,J pairs # bI: corresponding vector of boundary distances for I # wIJ: product weight for selected I, J pairs # # b: vector of ALL distances to window boundary # w: weights for ALL points # # breaks : breakpts object # stopifnot(length(dIJ) == length(bI)) stopifnot(length(bI) == length(wIJ)) stopifnot(length(w) == length(b)) if(!is.finite(sum(w, wIJ))) { if(fatal) stop("Weights in K-function were infinite or NA", call.=FALSE) #' set non-finite weights to zero if(any(bad <- !is.finite(w))) { warning(paste(sum(bad), "out of", length(bad), paren(percentage(bad)), "of the boundary weights", "in the K-function were NA or NaN or Inf", "and were reset to zero"), call.=FALSE) w[bad] <- 0 } if(any(bad <- !is.finite(wIJ))) { warning(paste(sum(bad), "out of", length(bad), paren(percentage(bad)), "of the weights for pairwise distances", "in the K-function were NA or NaN or Inf", "and were reset to zero"), call.=FALSE) wIJ[bad] <- 0 } } bkval <- breaks$val # determine which distances d_{ij} were observed without censoring uncen <- (dIJ <= bI) # # histogram of noncensored distances nco <- whist(dIJ[uncen], bkval, wIJ[uncen]) # histogram of censoring times for noncensored distances ncc <- whist(bI[uncen], bkval, wIJ[uncen]) # histogram of censoring times (yes, this is a different total size) cen <- whist(b, bkval, w) # total weight of censoring times beyond rightmost breakpoint uppercen <- sum(w[b > breaks$max]) # go RS <- reduced.sample(nco, cen, ncc, show=TRUE, uppercen=uppercen) # extract results numerator <- RS$numerator denominator <- RS$denominator ratio <- RS$numerator/RS$denominator # check if(length(numerator) != breaks$ncells) stop("internal error: length(numerator) != breaks$ncells") if(length(denominator) != breaks$ncells) stop("internal error: length(denom.count) != breaks$ncells") return(list(numerator=numerator, denominator=denominator, ratio=ratio)) } validate.weights <- function(x, recip=FALSE, how = NULL, allowzero = recip, allowinf = !recip) { xname <- deparse(substitute(x)) ra <- range(x) offence <- if(!allowinf && !all(is.finite(ra))) "infinite" else if(ra[1] < 0) "negative" else if(!allowzero && ra[1] == 0) "zero" else NULL if(!is.null(offence)) { offenders <- paste(offence, "values of", sQuote(xname)) if(is.null(how)) stop(paste(offenders, "are not allowed"), call.=FALSE) stop(paste(how, "yielded", offenders), call.=FALSE) } return(TRUE) } resolve.lambda <- function(X, lambda=NULL, ..., sigma=NULL, varcov=varcov, leaveoneout=TRUE, update=TRUE) { dangerous <- "lambda" danger <- TRUE if(is.null(lambda)) { ## No intensity data provided ## Estimate density by leave-one-out kernel smoothing lambda <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=leaveoneout) lambda <- as.numeric(lambda) danger <- FALSE } else if(is.im(lambda)) { lambda <- safelookup(lambda, X) } else if(is.function(lambda)) { lambda <- lambda(X$x, X$y) } else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) { check.nvector(lambda, npoints(X)) } else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) { model <- lambda if(!update) { ## use intensity of model lambda <- predict(model, locations=X, type="trend") } else { ## re-fit model to data X model <- if(is.ppm(model)) update(model, Q=X) else update(model, X=X) lambda <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) danger <- FALSE } } else stop(paste(sQuote("lambda"), "should be a vector, a pixel image,", "a fitted model, or a function")) return(list(lambda=lambda, danger=danger, dangerous=if(danger) dangerous else NULL)) } spatstat/R/polynom.R0000644000176200001440000000450513333543255014150 0ustar liggesusers#' #' polynom.R #' #' $Revision: 1.1 $ $Date: 2017/01/02 09:48:36 $ #' polynom <- function(x, ...) { rest <- list(...) # degree not given if(length(rest) == 0) stop("degree of polynomial must be given") #call with single variable and degree if(length(rest) == 1) { degree <- ..1 if((degree %% 1) != 0 || length(degree) != 1 || degree < 1) stop("degree of polynomial should be a positive integer") # compute values result <- outer(x, 1:degree, "^") # compute column names - the hard part ! namex <- deparse(substitute(x)) # check whether it needs to be parenthesised if(!is.name(substitute(x))) namex <- paste("(", namex, ")", sep="") # column names namepowers <- if(degree == 1) namex else c(namex, paste(namex, "^", 2:degree, sep="")) namepowers <- paste("[", namepowers, "]", sep="") # stick them on dimnames(result) <- list(NULL, namepowers) return(result) } # call with two variables and degree if(length(rest) == 2) { y <- ..1 degree <- ..2 # list of exponents of x and y, in nice order xexp <- yexp <- numeric() for(i in 1:degree) { xexp <- c(xexp, i:0) yexp <- c(yexp, 0:i) } nterms <- length(xexp) # compute result <- matrix(, nrow=length(x), ncol=nterms) for(i in 1:nterms) result[, i] <- x^xexp[i] * y^yexp[i] # names of these terms namex <- deparse(substitute(x)) # namey <- deparse(substitute(..1)) ### seems not to work in R zzz <- as.list(match.call()) namey <- deparse(zzz[[3]]) # check whether they need to be parenthesised # if so, add parentheses if(!is.name(substitute(x))) namex <- paste("(", namex, ")", sep="") if(!is.name(zzz[[3]])) namey <- paste("(", namey, ")", sep="") nameXexp <- c("", namex, paste(namex, "^", 2:degree, sep="")) nameYexp <- c("", namey, paste(namey, "^", 2:degree, sep="")) # make the term names termnames <- paste(nameXexp[xexp + 1], ifelse(xexp > 0 & yexp > 0, ".", ""), nameYexp[yexp + 1], sep="") termnames <- paste("[", termnames, "]", sep="") dimnames(result) <- list(NULL, termnames) # return(result) } stop("Can't deal with more than 2 variables yet") } spatstat/R/factors.R0000644000176200001440000000310313333543255014105 0ustar liggesusers#' #' factors.R #' #' Tools for manipulating factors and factor-valued things #' #' $Revision: 1.4 $ $Date: 2016/04/25 02:34:40 $ relevel.im <- function(x, ref, ...) { if(x$type != "factor") stop("Only valid for factor-valued images") x[] <- relevel(x[], ref, ...) return(x) } relevel.ppp <- relevel.ppx <- function(x, ref, ...) { stopifnot(is.multitype(x)) marks(x) <- relevel(marks(x), ref, ...) return(x) } mergeLevels <- function(.f, ...) { if(is.im(.f)) { aa <- mergeLevels(.f[], ...) .f[] <- aa return(.f) } if(is.multitype(.f)) { marks(.f) <- mergeLevels(marks(.f), ...) return(.f) } stopifnot(is.factor(.f)) map <- list(...) n <- length(map) if(n == 0) return(.f) # mapping for 'other' if(any(isnul <- (lengths(map) == 0))) { if(sum(isnul) > 1) stop("At most one argument should be NULL or character(0)") otherlevels <- setdiff(levels(.f), unlist(map)) map[[which(isnul)]] <- otherlevels } newlevels <- names(map) oldlevels <- levels(.f) mappedlevels <- unlist(map) if(sum(nzchar(newlevels)) != n) stop("Arguments must be in the form name=value") if(!all(mappedlevels %in% oldlevels)) stop("Argument values must be levels of .f") ## construct mapping fullmap <- oldlevels for(i in seq_len(n)) { relevant <- oldlevels %in% map[[i]] fullmap[relevant] <- newlevels[i] } ## apply mapping newf <- factor(fullmap[.f], levels=unique(fullmap)) return(newf) } levelsAsFactor <- function(x) { lev <- levels(x) if(is.null(lev)) return(NULL) return(factor(lev, levels=lev)) } spatstat/R/rose.R0000644000176200001440000002424713333543255013430 0ustar liggesusers#' #' rose.R #' #' Rose diagrams #' #' $Revision: 1.9 $ $Date: 2015/08/25 08:19:19 $ #' rose <- function(x, ...) UseMethod("rose") rose.default <- local({ rose.default <- function(x, breaks = NULL, ..., weights=NULL, nclass=NULL, unit=c("degree", "radian"), start=0, clockwise=FALSE, main) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) stopifnot(is.numeric(x)) if(!is.null(weights)) check.nvector(weights, length(x), things="observations") #' determine units missu <- missing(unit) unit <- match.arg(unit) unit <- validate.angles(x, unit, missu) FullCircle <- switch(unit, degree = 360, radian = 2*pi) #' reduce to [0, 2pi] x <- x %% FullCircle #' determine breakpoints strictly inside full circle breaks <- makebreaks(x, c(0, FullCircle), breaks, nclass) #' histogram without weights h <- do.call.matched(hist.default, list(x=x, breaks=breaks, ..., plot=FALSE), skipargs=graphicsAargh, sieve=TRUE) result <- h$result otherargs <- h$otherargs #' redo weights, if given if(!is.null(weights)) { wh <- whist(x=x, breaks=breaks, weights=weights) result$count <- wh result$density <- wh/diff(breaks) } # do.call(rose.histogram, c(list(x=result, main=main, unit=unit, start=start, clockwise=clockwise), otherargs)) } graphicsAargh <- c("density", "angle", "col", "border", "xlim", "ylim", "xlab", "ylab", "axes") makebreaks <- function(x, r, breaks=NULL, nclass=NULL) { use.br <- !is.null(breaks) if (use.br) { if (!is.null(nclass)) warning("'nclass' not used when 'breaks' is specified") } else if (!is.null(nclass) && length(nclass) == 1L) { breaks <- nclass } else breaks <- "Sturges" use.br <- use.br && (nB <- length(breaks)) > 1L if (use.br) breaks <- sort(breaks) else { if (is.character(breaks)) { breaks <- match.arg(tolower(breaks), c("sturges", "fd", "freedman-diaconis", "scott")) breaks <- switch(breaks, sturges = nclass.Sturges(x), `freedman-diaconis` = , fd = nclass.FD(x), scott = nclass.scott(x), stop("unknown 'breaks' algorithm")) } else if (is.function(breaks)) { breaks <- breaks(x) } if (length(breaks) == 1) { if (!is.numeric(breaks) || !is.finite(breaks) || breaks < 1L) stop("invalid number of 'breaks'") breaks <- seq(r[1], r[2], length.out=breaks) } else { if (!is.numeric(breaks) || length(breaks) <= 1) stop(gettextf("Invalid breakpoints produced by 'breaks(x)': %s", format(breaks)), domain = NA) breaks <- sort(breaks) } } return(breaks) } rose.default }) rose.histogram <- function(x, ..., unit=c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot=TRUE) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) #' determine units missu <- missing(unit) unit <- match.arg(unit) #' validate bks <- x$breaks unit <- validate.angles(bks, unit, missu) # FullCircle <- switch(unit, degree = 360, radian = 2*pi) #' get sector sizes y <- x$density ymax <- max(y) #' draw disc insideclearance <- 0.1 outsidespace <- if(!is.null(at) && length(at) == 0) 0 else if(identical(labels, FALSE)) 0.1 else 0.25 R <- (1+insideclearance) * ymax DD <- disc(R) Rout <- (1 + outsidespace) * R result <- do.call.matched(plot.owin, resolve.defaults(list(x=disc(Rout), main=main, type="n"), list(...))) do.call.matched(plot.owin, resolve.defaults(list(x=DD, hatch=FALSE, add=TRUE), list(...)), extrargs=graphicsPars("owin"), skipargs="col") if(do.plot) { #' draw sectors ang <- ang2rad(bks, unit=unit, start=start, clockwise=clockwise) eps <- min(diff(ang), pi/128)/2 for(i in seq_along(y)) { aa <- seq(ang[i], ang[i+1], by=eps) aa[length(aa)] <- ang[i+1] yi <- y[i] xx <- c(0, yi * cos(aa), 0) yy <- c(0, yi * sin(aa), 0) do.call.matched(polygon, list(x=xx, y=yy, ...)) } #' add tick marks circticks(R, at=at, unit=unit, start=start, clockwise=clockwise, labels=labels) } #' return(invisible(result)) } rose.density <- function(x, ..., unit=c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot=TRUE) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) ang <- x$x rad <- x$y missu <- missing(unit) unit <- match.arg(unit) unit <- validate.angles(ang, unit, missu) #' result <- roseContinuous(ang, rad, unit, ..., start=start, clockwise=clockwise, main=main, labels=labels, at=at, do.plot=do.plot) return(invisible(result)) } rose.fv <- function(x, ..., unit=c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot=TRUE) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) ang <- with(x, .x) rad <- with(x, .y) missu <- missing(unit) unit <- match.arg(unit) unit <- validate.angles(ang, unit, missu) #' result <- roseContinuous(ang, rad, unit, ..., start=start, clockwise=clockwise, main=main, labels=labels, at=at, do.plot=do.plot) return(invisible(result)) } roseContinuous <- function(ang, rad, unit, ..., start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot=TRUE) { rmax <- max(rad) #' draw disc insideclearance <- 0.1 outsidespace <- if(!is.null(at) && length(at) == 0) 0 else if(identical(labels, FALSE)) 0.1 else 0.25 R <- (1+insideclearance) * rmax DD <- disc(R) Rout <- (1 + outsidespace) * R result <- do.call.matched(plot.owin, resolve.defaults(list(x=disc(Rout), main=main, type="n"), list(...))) do.call.matched(plot.owin, resolve.defaults(list(x=DD, add=TRUE, hatch=FALSE), list(...)), extrargs=graphicsPars("owin"), skipargs="col") #' draw plot if(do.plot) { ang <- ang2rad(ang, unit=unit, start=start, clockwise=clockwise) xx <- rad * cos(ang) yy <- rad * sin(ang) do.call.matched(polygon, list(x=xx, y=yy, ...), extrargs="lwd") circticks(R, at=at, unit=unit, start=start, clockwise=clockwise, labels=labels) } return(result) } ang2rad <- local({ compasspoints <- c(E=0,N=90,W=180,S=270) ang2rad <- function(ang, unit=c("degree", "radian"), start=0, clockwise=FALSE) { unit <- match.arg(unit) clocksign <- if(clockwise) -1 else 1 stopifnot(length(start) == 1) if(is.character(start)) { if(is.na(match(toupper(start), names(compasspoints)))) stop(paste("Unrecognised compass point", sQuote(start)), call.=FALSE) startdegrees <- compasspoints[[start]] start <- switch(unit, degree = startdegrees, radian = pi * (startdegrees/180)) # start is measured anticlockwise ang <- start + clocksign * ang } else { stopifnot(is.numeric(start)) # start is measured according to value of 'clockwise' ang <- clocksign * (start + ang) } rad <- switch(unit, degree = pi * (ang/180), radian = ang) return(rad) } ang2rad }) circticks <- function(R, at=NULL, unit=c("degree", "radian"), start=0, clockwise=FALSE, labels=TRUE) { unit <- match.arg(unit) FullCircle <- switch(unit, degree = 360, radian = 2*pi) if(is.null(at)) { at <- FullCircle * (0:23)/24 major <- ((0:23) %% 6 == 0) } else { if(length(at) == 0) return(invisible(NULL)) nat <- (at/FullCircle) * 4 major <- abs(nat - round(nat)) < 0.01 } atradians <- ang2rad(ang=at, unit=unit, start=start, clockwise=clockwise) tx <- R * cos(atradians) ty <- R * sin(atradians) expan <- ifelse(major, 1.1, 1.05) segments(tx, ty, expan * tx, expan * ty, lwd=major+1) if(!identical(labels, FALSE)) { if(identical(labels, TRUE)) { labels <- switch(unit, degree=paste(round(at)), radian=parse(text= simplenumber(at/pi, "pi", "*", 1e-3))) } else stopifnot(is.vector(labels) && length(labels) == length(at)) big <- expan + 0.1 text(big * tx, big * ty, labels=labels) } invisible(NULL) } validate.angles <- function(angles, unit=c("degree", "radian"), guess=TRUE) { #' validate width <- diff(range(angles)) if(missing(unit) && guess && width <= 6.2832) { warning("Very small range of angles: treating them as radian") unit <- "radian" } else unit <- match.arg(unit) FullCircle <- switch(unit, degree = 360, radian = 2*pi) if(width > 1.002 * FullCircle) stop("Range of angles exceeds a full circle") return(unit) } spatstat/R/convexify.R0000644000176200001440000000076013333543254014463 0ustar liggesusers## ## convexify.R ## ## $Revision: 1.1 $ $Date: 2015/10/23 12:34:17 $ convexify <- function(W, eps) { if(!is.polygonal(W)) { if(missing(eps)) eps <- diameter(Frame(W))/20 W <- simplify.owin(W, eps) } e <- edges(W) len <- lengths.psp(e) ang <- angles.psp(e, directed=TRUE) df <- data.frame(ang=ang, len=len) df <- df[order(df$ang), ] df <- within(df, { dx <- len * cos(ang); dy <- len * sin(ang)}) owin(poly=with(df, list(x=cumsum(c(0,dx)), y=cumsum(c(0,dy))))) } spatstat/R/centroid.R0000644000176200001440000001016213333543254014255 0ustar liggesusers# # centroid.S Centroid of a window # and related operations # # $Revision: 1.6 $ $Date: 2014/11/10 08:20:59 $ # # Function names (followed by "xypolygon" or "owin") # # intX integral of x dx dy # intY integral of y dx dy # meanX mean of x dx dy # meanY mean of y dx dy # centroid (meanX, meanY) # #------------------------------------- intX.xypolygon <- function(polly) { # # polly: list(x,y) vertices of a single polygon (n joins to 1) # verify.xypolygon(polly) x <- polly$x y <- polly$y # nedges <- length(x) # sic # place x axis below polygon y <- y - min(y) # join vertex n to vertex 1 xr <- c(x, x[1L]) yr <- c(y, y[1L]) # slope dx <- diff(xr) dy <- diff(yr) slope <- ifelseAX(dx == 0, 0, dy/dx) # integrate integrals <- x * y * dx + (y + slope * x) * (dx^2)/2 + slope * (dx^3)/3 -sum(integrals) } intX.owin <- function(w) { verifyclass(w, "owin") switch(w$type, rectangle = { width <- abs(diff(w$xrange)) height <- abs(diff(w$yrange)) answer <- width * height * mean(w$xrange) }, polygonal = { answer <- sum(unlist(lapply(w$bdry, intX.xypolygon))) }, mask = { pixelarea <- abs(w$xstep * w$ystep) x <- rasterx.mask(w, drop=TRUE) answer <- (pixelarea * length(x)) * mean(x) }, stop("Unrecognised window type") ) return(answer) } meanX.owin <- function(w) { verifyclass(w, "owin") switch(w$type, rectangle = { answer <- mean(w$xrange) }, polygonal = { area <- sum(unlist(lapply(w$bdry, Area.xypolygon))) integrated <- sum(unlist(lapply(w$bdry, intX.xypolygon))) answer <- integrated/area }, mask = { x <- rasterx.mask(w, drop=TRUE) answer <- mean(x) }, stop("Unrecognised window type") ) return(answer) } intY.xypolygon <- function(polly) { # # polly: list(x,y) vertices of a single polygon (n joins to 1) # verify.xypolygon(polly) x <- polly$x y <- polly$y # nedges <- length(x) # sic # place x axis below polygon yadjust <- min(y) y <- y - yadjust # join vertex n to vertex 1 xr <- c(x, x[1L]) yr <- c(y, y[1L]) # slope dx <- diff(xr) dy <- diff(yr) slope <- ifelseAX(dx == 0, 0, dy/dx) # integrate integrals <- (1/2) * (dx * y^2 + slope * y * dx^2 + slope^2 * dx^3/3) total <- sum(integrals) - yadjust * Area.xypolygon(polly) # change sign to adhere to anticlockwise convention -total } intY.owin <- function(w) { verifyclass(w, "owin") switch(w$type, rectangle = { width <- abs(diff(w$xrange)) height <- abs(diff(w$yrange)) answer <- width * height * mean(w$yrange) }, polygonal = { answer <- sum(unlist(lapply(w$bdry, intY.xypolygon))) }, mask = { pixelarea <- abs(w$xstep * w$ystep) y <- rastery.mask(w, drop=TRUE) answer <- (pixelarea * length(y)) * mean(y) }, stop("Unrecognised window type") ) return(answer) } meanY.owin <- function(w) { verifyclass(w, "owin") switch(w$type, rectangle = { answer <- mean(w$yrange) }, polygonal = { area <- sum(unlist(lapply(w$bdry, Area.xypolygon))) integrated <- sum(unlist(lapply(w$bdry, intY.xypolygon))) answer <- integrated/area }, mask = { y <- rastery.mask(w, drop=TRUE) answer <- mean(y) }, stop("Unrecognised window type") ) return(answer) } centroid.owin <- function(w, as.ppp = FALSE) { w <- as.owin(w) out <- list(x=meanX.owin(w), y=meanY.owin(w)) if(as.ppp){ if(!inside.owin(out$x, out$y, w)) w <- as.rectangle(w) out <- as.ppp(out, W=w) } return(out) } spatstat/R/pairorient.R0000644000176200001440000001624713416566717014647 0ustar liggesusers## ## pairorient.R ## ## point pair orientation distribution ## ## Function O_{r1,r2}(phi) defined in ## Stoyan & Stoyan (1994) equ (14.53) page 271 ## ## and its derivative estimated by kernel smoothing ## ## $Revision: 1.10 $ $Date: 2019/01/13 07:33:20 $ pairorient <- function(X, r1, r2, ..., cumulative=FALSE, correction, ratio=FALSE, unit=c("degree", "radian"), domain=NULL) { stopifnot(is.ppp(X)) check.1.real(r1) check.1.real(r2) stopifnot(r1 < r2) W <- Window(X) if(!is.null(domain)) stopifnot(is.subset.owin(domain, W)) unit <- match.arg(unit) switch(unit, degree = { FullCircle <- 360 Convert <- 180/pi }, radian = { FullCircle <- 2 * pi Convert <- 1 }) ## choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(!correction.given) correction <- c("border", "bord.modif", "translate", "isotropic") correction <- pickoption("correction", correction, c(none="none", border="border", bord.modif="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", good="good", best="best"), multi=TRUE) # best.wanted <- ("best" %in% correction) ## replace 'good' by the optimal choice for this size of dataset if("good" %in% correction) correction[correction == "good"] <- good.correction.K(X) ## retain only corrections that are implemented for the window correction <- implemented.for.K(correction, W$type, correction.given) ## Find close pairs in range [r1, r2] close <- as.data.frame(closepairs(X, r2)) ok <- with(close, r1 <= d & d <= r2) if(!is.null(domain)) ok <- ok & with(close, inside.owin(xi, yi, domain)) if(!any(ok)) { warning(paste("There are no pairs of points in the distance range", prange(c(r1,r2)))) return(NULL) } close <- close[ok, , drop=FALSE] ANGLE <- with(close, atan2(dy, dx) * Convert) %% FullCircle ## initialise output object Nphi <- 512 breaks <- make.even.breaks(bmax=FullCircle, npos=Nphi-1) phi <- breaks$r Odf <- data.frame(phi = phi, theo = (if(cumulative) phi else 1)/FullCircle) desc <- c("angle argument phi", "theoretical isotropic %s") Oletter <- if(cumulative) "O" else "o" Osymbol <- as.name(Oletter) OO <- ratfv(Odf, NULL, denom=nrow(close), argu="phi", ylab=substitute(fn[R1,R2](phi), list(R1=r1, R2=r2, fn=Osymbol)), valu="theo", fmla = . ~ phi, alim = c(0, FullCircle), c("phi", "{%s[%s]^{pois}}(phi)"), desc, fname=c(Oletter, paste0("list(", r1, ",", r2, ")")), yexp=substitute(fn[list(R1,R2)](phi), list(R1=r1,R2=r2,fn=Osymbol))) ## ^^^^^^^^^^^^^^^ Compute edge corrected estimates ^^^^^^^^^^^^^^^^ nangles <- length(ANGLE) if(any(correction == "none")) { ## uncorrected! For demonstration purposes only! if(cumulative) { wh <- whist(ANGLE, breaks$val) # no weights num.un <- cumsum(wh) } else { kd <- circdensity(ANGLE, ..., n=Nphi, unit=unit) num.un <- kd$y * nangles } den.un <- nangles ## uncorrected estimate OO <- bind.ratfv(OO, data.frame(un=num.un), den.un, "{hat(%s)[%s]^{un}}(phi)", "uncorrected estimate of %s", "un", ratio=ratio) } if(any(c("border", "bord.modif") %in% correction)) { ## border type corrections bX <- bdist.points(X) bI <- bX[close$i] if("border" %in% correction) { bok <- (bI > r2) ANGLEok <- ANGLE[bok] nok <- length(ANGLEok) if(cumulative) { wh <- whist(ANGLEok, breaks$val) num.bord <- cumsum(wh) } else { kd <- circdensity(ANGLEok, ..., n=Nphi, unit=unit) num.bord <- kd$y * nok } den.bord <- nok OO <- bind.ratfv(OO, data.frame(border=num.bord), den.bord, "{hat(%s)[%s]^{bord}}(phi)", "border-corrected estimate of %s", "border", ratio=ratio) } if("bord.modif" %in% correction) { ok <- (close$d < bI) nok <- sum(ok) inradius <- max(distmap(W, invert=TRUE)) rrr <- range(r2, inradius) rr <- seq(rrr[1], rrr[2], length=256) Ar <- eroded.areas(W, rr) Arf <- approxfun(rr, Ar, rule=2) AI <- (Arf(bX))[close$i] edgewt <- ifelse(ok, pmin(area(W)/AI, 100), 0) if(cumulative) { wh <- whist(ANGLE, breaks$val, edgewt) num.bm <- cumsum(wh)/mean(edgewt) } else { w <- edgewt/sum(edgewt) kd <- circdensity(ANGLE, ..., weights=w, n=Nphi, unit=unit) num.bm <- kd$y * nok } den.bm <- nok OO <- bind.ratfv(OO, data.frame(bordm=num.bm), den.bm, "{hat(%s)[%s]^{bordm}}(phi)", "modified border-corrected estimate of %s", "bordm", ratio=ratio) } } if(any(correction == "translate")) { ## Ohser-Stoyan translation correction edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=W, paired=TRUE) if(cumulative) { wh <- whist(ANGLE, breaks$val, edgewt) num.trans <- cumsum(wh)/mean(edgewt) } else { w <- edgewt/sum(edgewt) kd <- circdensity(ANGLE, ..., weights=w, n=Nphi, unit=unit) num.trans <- kd$y * nangles } den.trans <- nangles OO <- bind.ratfv(OO, data.frame(trans=num.trans), den.trans, "{hat(%s)[%s]^{trans}}(phi)", "translation-corrected estimate of %s", "trans", ratio=ratio) } if(any(correction == "isotropic")) { ## Ripley isotropic correction XI <- ppp(close$xi, close$yi, window=W, check=FALSE) DIJ <- close$d edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) if(cumulative) { wh <- whist(ANGLE, breaks$val, edgewt) num.iso <- cumsum(wh)/mean(edgewt) } else { w <- edgewt/sum(edgewt) kd <- circdensity(ANGLE, ..., weights=w, n=Nphi, unit=unit) num.iso <- kd$y * nangles } den.iso <- nangles OO <- bind.ratfv(OO, data.frame(iso=num.iso), den.iso, "{hat(%s)[%s]^{iso}}(phi)", "Ripley isotropic-corrected estimate of %s", "iso", ratio=ratio) } unitname(OO) <- switch(unit, degree = c("degree", "degrees"), radian = c("radian", "radians")) return(OO) } spatstat/R/periodify.R0000644000176200001440000000751113333543255014445 0ustar liggesusers# # periodify.R # # replicate a pattern periodically # # $Revision: 1.3 $ $Date: 2011/04/17 05:52:50 $ # periodify <- function(X, ...) { UseMethod("periodify") } periodify.ppp <- function(X, nx=1, ny=1, ..., combine=TRUE, warn=TRUE, check=TRUE, ix=(-nx):nx, iy=(-ny):ny, ixy=expand.grid(ix=ix,iy=iy)) { # sanity checks if(!missing(nx) || !missing(ny)) { if(is.null(nx)) nx <- 1 if(is.null(ny)) ny <- 1 if(length(nx) != 1 || length(ny) != 1) stop("nx and ny should be single integers") if(nx != round(nx) || ny != round(ny)) stop("nx and ny should be integers") } force(ixy) W <- X$window isrect <- (W$type == "rectangle") if(warn && combine && !isrect) warning("X has a non-rectangular window") else isrect <- isrect && all(diff(nx) == 1) && all(diff(ny) == 1) width <- diff(W$xrange) height <- diff(W$yrange) shifts <- cbind(ixy[,1] * width, ixy[,2] * height) Xshift <- list() for(i in 1:nrow(shifts)) Xshift[[i]] <- shift(X, vec=as.numeric(shifts[i, ])) if(!combine) return(Xshift) Wnew <- if(isrect) { owin(range(range(W$xrange) + range(shifts[,1])), range(range(W$yrange) + range(shifts[,2]))) } else NULL Z <- do.call(superimpose, append(Xshift, list(W=Wnew, check=check))) return(Z) } periodify.psp <- function(X, nx=1, ny=1, ..., combine=TRUE, warn=TRUE, check=TRUE, ix=(-nx):nx, iy=(-ny):ny, ixy=expand.grid(ix=ix,iy=iy)) { # sanity checks if(!missing(nx) || !missing(ny)) { if(is.null(nx)) nx <- 1 if(is.null(ny)) ny <- 1 if(length(nx) != 1 || length(ny) != 1) stop("nx and ny should be single integers") if(nx != round(nx) || ny != round(ny)) stop("nx and ny should be integers") } force(ixy) W <- X$window isrect <- (W$type == "rectangle") if(warn && combine && !isrect) warning("X has a non-rectangular window") else isrect <- isrect && all(diff(nx) == 1) && all(diff(ny) == 1) width <- diff(W$xrange) height <- diff(W$yrange) shifts <- cbind(ixy[,1] * width, ixy[,2] * height) Xshift <- list() for(i in 1:nrow(shifts)) Xshift[[i]] <- shift(X, vec=as.numeric(shifts[i, ])) if(!combine) return(Xshift) Wnew <- if(isrect) { owin(range(range(W$xrange) + range(shifts[,1])), range(range(W$yrange) + range(shifts[,2]))) } else NULL Z <- do.call(superimpose, append(Xshift, list(W=Wnew, check=check))) return(Z) } periodify.owin <- function(X, nx=1, ny=1, ..., combine=TRUE, warn=TRUE, ix=(-nx):nx, iy=(-ny):ny, ixy=expand.grid(ix=ix,iy=iy)) { # sanity checks if(!missing(nx) || !missing(ny)) { if(is.null(nx)) nx <- 1 if(is.null(ny)) ny <- 1 if(length(nx) != 1 || length(ny) != 1) stop("nx and ny should be single integers") if(nx != round(nx) || ny != round(ny)) stop("nx and ny should be integers") } force(ixy) isrect <- (X$type == "rectangle") if(warn && combine && !isrect) warning("X is not rectangular") else isrect <- isrect && all(diff(nx) == 1) && all(diff(ny) == 1) width <- diff(X$xrange) height <- diff(X$yrange) shifts <- cbind(ixy[,1] * width, ixy[,2] * height) if(combine) { if(isrect) { # result is a rectangle Y <- owin(range(range(X$xrange) + range(shifts[,1])), range(range(X$yrange) + range(shifts[,2]))) } else { # result is another type of window for(i in 1:nrow(shifts)) { Xi <- shift(X, vec=as.numeric(shifts[i, ])) Y <- if(i == 1) Xi else union.owin(Y, Xi) } } } else { # result is a list Y <- list() for(i in 1:nrow(shifts)) Y[[i]] <- shift(X, vec=as.numeric(shifts[i, ])) } return(Y) } spatstat/R/wingeom.R0000644000176200001440000007441013616732345014126 0ustar liggesusers# # wingeom.R Various geometrical computations in windows # # $Revision: 4.135 $ $Date: 2020/02/06 06:39:51 $ # volume.owin <- function(x) { area.owin(x) } area <- function(w) UseMethod("area") area.default <- function(w) area.owin(as.owin(w)) area.owin <- function(w) { stopifnot(is.owin(w)) switch(w$type, rectangle = { width <- abs(diff(w$xrange)) height <- abs(diff(w$yrange)) area <- width * height }, polygonal = { area <- sum(unlist(lapply(w$bdry, Area.xypolygon))) }, mask = { pixelarea <- abs(w$xstep * w$ystep) npixels <- sum(w$m) area <- pixelarea * npixels }, stop("Unrecognised window type") ) return(area) } perimeter <- function(w) { w <- as.owin(w) switch(w$type, rectangle = { return(2*(diff(w$xrange)+diff(w$yrange))) }, polygonal={ return(sum(lengths.psp(edges(w)))) }, mask={ p <- as.polygonal(w) if(is.null(p)) return(NA) delta <- sqrt(w$xstep^2 + w$ystep^2) p <- simplify.owin(p, delta * 1.15) return(sum(lengths.psp(edges(p)))) }) return(NA) } framebottomleft <- function(w) { f <- Frame(w) c(f$xrange[1L], f$yrange[1L]) } sidelengths.owin <- function(x) { if(x$type != "rectangle") warning("Computing the side lengths of a non-rectangular window") with(x, c(diff(xrange), diff(yrange))) } shortside.owin <- function(x) { min(sidelengths(x)) } eroded.areas <- function(w, r, subset=NULL) { w <- as.owin(w) if(!is.null(subset) && !is.mask(w)) w <- as.mask(w) switch(w$type, rectangle = { width <- abs(diff(w$xrange)) height <- abs(diff(w$yrange)) areas <- pmax(width - 2 * r, 0) * pmax(height - 2 * r, 0) }, polygonal = { ## warning("Approximating polygonal window by digital image") w <- as.mask(w) areas <- eroded.areas(w, r) }, mask = { ## distances from each pixel to window boundary b <- if(is.null(subset)) bdist.pixels(w, style="matrix") else bdist.pixels(w)[subset, drop=TRUE, rescue=FALSE] ## histogram breaks to satisfy hist() Bmax <- max(b, r) breaks <- c(-1,r,Bmax+1) ## histogram of boundary distances h <- hist(b, breaks=breaks, plot=FALSE)$counts ## reverse cumulative histogram H <- revcumsum(h) ## drop first entry corresponding to r=-1 H <- H[-1] ## convert count to area pixarea <- w$xstep * w$ystep areas <- pixarea * H }, stop("unrecognised window type") ) areas } even.breaks.owin <- function(w) { verifyclass(w, "owin") Rmax <- diameter(w) make.even.breaks(bmax=Rmax, npos=128) } unit.square <- function() { owin(c(0,1),c(0,1)) } square <- function(r=1, unitname=NULL) { stopifnot(is.numeric(r)) if(is.numeric(unitname) && length(unitname) == 1 && length(r) == 1) { #' common error warning("Interpreting square(a, b) as square(c(a,b))", call.=FALSE) r <- c(r, unitname) unitname <- NULL } if(!all(is.finite(r))) stop("argument r is NA or infinite") if(length(r) == 1) { stopifnot(r > 0) r <- c(0,r) } else if(length(r) == 2) { stopifnot(r[1L] < r[2L]) } else stop("argument r must be a single number, or a vector of length 2") owin(r,r, unitname=unitname) } # convert polygonal window to mask window owinpoly2mask <- function(w, rasta, check=TRUE) { if(check) { verifyclass(w, "owin") stopifnot(w$type == "polygonal") verifyclass(rasta, "owin") stopifnot(rasta$type == "mask") } bdry <- w$bdry x0 <- rasta$xcol[1L] y0 <- rasta$yrow[1L] xstep <- rasta$xstep ystep <- rasta$ystep dimyx <- rasta$dim nx <- dimyx[2L] ny <- dimyx[1L] epsilon <- with(.Machine, double.base^floor(double.ulp.digits/2)) score <- numeric(nx*ny) for(i in seq_along(bdry)) { p <- bdry[[i]] xp <- p$x yp <- p$y np <- length(p$x) # repeat last vertex xp <- c(xp, xp[1L]) yp <- c(yp, yp[1L]) np <- np + 1 # rescale coordinates so that pixels are at integer locations xp <- (xp - x0)/xstep yp <- (yp - y0)/ystep # avoid exact integer locations for vertices whole <- (ceiling(xp) == floor(xp)) xp[whole] <- xp[whole] + epsilon whole <- (ceiling(yp) == floor(yp)) yp[whole] <- yp[whole] + epsilon # call C z <- .C("poly2imI", xp=as.double(xp), yp=as.double(yp), np=as.integer(np), nx=as.integer(nx), ny=as.integer(ny), out=as.integer(integer(nx * ny)), PACKAGE="spatstat") if(i == 1) score <- z$out else score <- score + z$out } status <- (score != 0) out <- owin(rasta$xrange, rasta$yrange, mask=matrix(status, ny, nx)) return(out) } overlap.owin <- function(A, B) { # compute the area of overlap between two windows # check units if(!compatible(unitname(A), unitname(B))) warning("The two windows have incompatible units of length") At <- A$type Bt <- B$type if(At=="rectangle" && Bt=="rectangle") { xmin <- max(A$xrange[1L],B$xrange[1L]) xmax <- min(A$xrange[2L],B$xrange[2L]) if(xmax <= xmin) return(0) ymin <- max(A$yrange[1L],B$yrange[1L]) ymax <- min(A$yrange[2L],B$yrange[2L]) if(ymax <= ymin) return(0) return((xmax-xmin) * (ymax-ymin)) } if((At=="rectangle" && Bt=="polygonal") || (At=="polygonal" && Bt=="rectangle") || (At=="polygonal" && Bt=="polygonal")) { AA <- as.polygonal(A)$bdry BB <- as.polygonal(B)$bdry area <- 0 for(i in seq_along(AA)) for(j in seq_along(BB)) area <- area + overlap.xypolygon(AA[[i]], BB[[j]]) # small negative numbers can occur due to numerical error return(max(0, area)) } if(At=="mask") { # count pixels in A that belong to B pixelarea <- abs(A$xstep * A$ystep) rxy <- rasterxy.mask(A, drop=TRUE) x <- rxy$x y <- rxy$y ok <- inside.owin(x, y, B) return(pixelarea * sum(ok)) } if(Bt== "mask") { # count pixels in B that belong to A pixelarea <- abs(B$xstep * B$ystep) rxy <- rasterxy.mask(B, drop=TRUE) x <- rxy$x y <- rxy$y ok <- inside.owin(x, y, A) return(pixelarea * sum(ok)) } stop("Internal error") } # # subset operator for window # "[.owin" <- function(x, i, ...) { if(!missing(i) && !is.null(i)) { if(is.im(i) && i$type == "logical") { # convert to window i <- as.owin(eval.im(ifelse1NA(i))) } else stopifnot(is.owin(i)) x <- intersect.owin(x, i, fatal=FALSE) } return(x) } # # # Intersection and union of windows # # intersect.owin <- function(..., fatal=FALSE, p) { argh <- list(...) ## p is a list of arguments to polyclip::polyclip if(missing(p) || is.null(p)) p <- list() ## handle 'solist' objects argh <- expandSpecialLists(argh, "solist") rasterinfo <- list() if(length(argh) > 0) { # explicit arguments controlling raster info israster <- names(argh) %in% names(formals(as.mask)) if(any(israster)) { rasterinfo <- argh[israster] ## remaining arguments argh <- argh[!israster] } } ## look for window arguments isowin <- as.logical(sapply(argh, is.owin)) if(any(!isowin)) warning("Some arguments were not windows") argh <- argh[isowin] nwin <- length(argh) if(nwin == 0) { warning("No windows were given") return(NULL) } ## at least one window A <- argh[[1L]] if(is.empty(A)) { if(fatal) stop("Intersection is empty", call.=FALSE) return(A) } if(nwin == 1) return(A) ## at least two windows B <- argh[[2L]] if(is.empty(B)) { if(fatal) stop("Intersection is empty", call.=FALSE) return(B) } if(nwin > 2) { ## handle union of more than two windows windows <- argh[-c(1,2)] ## determine a common set of parameters for polyclip p <- commonPolyclipArgs(A, B, do.call(boundingbox, windows), p=p) ## absorb all windows into B for(i in seq_along(windows)) { B <- do.call(intersect.owin, append(list(B, windows[[i]], p=p, fatal=fatal), rasterinfo)) if(is.empty(B)) return(B) } } ## There are now only two windows, which are not empty. if(identical(A, B)) return(A) # check units if(!compatible(unitname(A), unitname(B))) warning("The two windows have incompatible units of length") uname <- harmonise(unitname(A), unitname(B), single=TRUE) # determine intersection of x and y ranges xr <- intersect.ranges(A$xrange, B$xrange, fatal=fatal) yr <- intersect.ranges(A$yrange, B$yrange, fatal=fatal) if(!fatal && (is.null(xr) || is.null(yr))) return(emptywindow(A)) #' non-empty intersection of Frames C <- owin(xr, yr, unitname=uname) # Determine type of intersection Arect <- is.rectangle(A) Brect <- is.rectangle(B) # Apoly <- is.polygonal(A) # Bpoly <- is.polygonal(B) Amask <- is.mask(A) Bmask <- is.mask(B) # Rectangular case if(Arect && Brect) return(C) if(!Amask && !Bmask) { ####### Result is polygonal ############ a <- lapply(as.polygonal(A)$bdry, reverse.xypolygon) b <- lapply(as.polygonal(B)$bdry, reverse.xypolygon) ab <- do.call(polyclip::polyclip, append(list(a, b, "intersection", fillA="nonzero", fillB="nonzero"), p)) if(length(ab)==0) { if(fatal) stop("Intersection is empty", call.=FALSE) return(emptywindow(C)) } # ensure correct polarity totarea <- sum(unlist(lapply(ab, Area.xypolygon))) if(totarea < 0) ab <- lapply(ab, reverse.xypolygon) AB <- owin(poly=ab, check=FALSE, unitname=uname) AB <- rescue.rectangle(AB) return(AB) } ######### Result is a mask ############## # Restrict domain where possible if(Arect) A <- C if(Brect) B <- C if(Amask) A <- trim.mask(A, C) if(Bmask) B <- trim.mask(B, C) #' trap empty windows if(is.empty(A)) { if(fatal) stop("Intersection is empty", call.=FALSE) return(A) } if(is.empty(B)) { if(fatal) stop("Intersection is empty", call.=FALSE) return(B) } # Did the user specify the pixel raster? if(length(rasterinfo) > 0) { # convert to masks with specified parameters, and intersect if(Amask) { A <- do.call(as.mask, append(list(A), rasterinfo)) AB <- restrict.mask(A, B) if(fatal && is.empty(AB)) stop("Intersection is empty", call.=FALSE) return(AB) } else { B <- do.call(as.mask, append(list(B), rasterinfo)) BA <- restrict.mask(B,A) if(fatal && is.empty(BA)) stop("Intersection is empty", call.=FALSE) return(BA) } } # One mask and one rectangle? if(Arect && Bmask) return(B) if(Amask && Brect) return(A) # One mask and one polygon? if(Amask && !Bmask) { AB <- restrict.mask(A, B) if(fatal && is.empty(AB)) stop("Intersection is empty", call.=FALSE) return(AB) } if(!Amask && Bmask) { BA <- restrict.mask(B, A) if(fatal && is.empty(BA)) stop("Intersection is empty", call.=FALSE) return(BA) } # Two existing masks? if(Amask && Bmask) { # choose the finer one AB <- if(A$xstep <= B$xstep) restrict.mask(A, B) else restrict.mask(B, A) if(fatal && is.empty(AB)) stop("Intersection is empty", call.=FALSE) return(AB) } stop("Internal error: never reached") # # No existing masks. No clipping applied so far. # # Convert one window to a mask with default pixel raster, and intersect. # if(Arect) { # A <- as.mask(A) # AB <- restrict.mask(A, B) # if(fatal && is.empty(AB)) stop("Intersection is empty", call.=FALSE) # return(AB) # } else { # B <- as.mask(B) # BA <- restrict.mask(B, A) # if(fatal && is.empty(BA)) stop("Intersection is empty", call.=FALSE) # return(BA) # } } union.owin <- function(..., p) { argh <- list(...) ## weed out NULL arguments argh <- argh[!sapply(argh, is.null)] ## p is a list of arguments to polyclip::polyclip if(missing(p) || is.null(p)) p <- list() ## handle 'solist' objects argh <- expandSpecialLists(argh, "solist") rasterinfo <- list() if(length(argh) > 0) { ## arguments controlling raster info israster <- names(argh) %in% names(formals(as.mask)) if(any(israster)) { rasterinfo <- argh[israster] ## remaining arguments argh <- argh[!israster] } } ## look for window arguments isowin <- as.logical(sapply(argh, is.owin)) if(any(!isowin)) warning("Some arguments were not windows") argh <- argh[isowin] ## nwin <- length(argh) if(nwin == 0) { warning("No windows were given") return(NULL) } ## find non-empty ones if(any(isemp <- sapply(argh, is.empty))) argh <- argh[!isemp] nwin <- length(argh) if(nwin == 0) { warning("All windows were empty") return(NULL) } ## at least one window A <- argh[[1L]] if(nwin == 1) return(A) ## more than two windows if(nwin > 2) { ## check if we need polyclip somepoly <- !all(sapply(argh, is.mask)) if(somepoly) { ## determine a common set of parameters for polyclip p <- commonPolyclipArgs(do.call(boundingbox, argh), p=p) ## apply these parameters now to avoid numerical errors argh <- applyPolyclipArgs(argh, p=p) A <- argh[[1L]] } ## absorb all windows into A without rescaling nullp <- list(eps=1, x0=0, y0=0) for(i in 2:nwin) A <- do.call(union.owin, append(list(A, argh[[i]], p=nullp), rasterinfo)) if(somepoly) { ## undo rescaling A <- reversePolyclipArgs(A, p=p) } return(A) } ## Exactly two windows B <- argh[[2L]] if(identical(A, B)) return(A) ## check units if(!compatible(unitname(A), unitname(B))) warning("The two windows have incompatible units of length") uname <- harmonise(unitname(A), unitname(B), single=TRUE) ## Determine type of intersection ## Arect <- is.rectangle(A) ## Brect <- is.rectangle(B) ## Apoly <- is.polygonal(A) ## Bpoly <- is.polygonal(B) Amask <- is.mask(A) Bmask <- is.mask(B) ## Create a rectangle to contain the result C <- owin(range(A$xrange, B$xrange), range(A$yrange, B$yrange), unitname=uname) if(!Amask && !Bmask) { ####### Result is polygonal (or rectangular) ############ a <- lapply(as.polygonal(A)$bdry, reverse.xypolygon) b <- lapply(as.polygonal(B)$bdry, reverse.xypolygon) ab <- do.call(polyclip::polyclip, append(list(a, b, "union", fillA="nonzero", fillB="nonzero"), p)) if(length(ab) == 0) return(emptywindow(C)) ## ensure correct polarity totarea <- sum(unlist(lapply(ab, Area.xypolygon))) if(totarea < 0) ab <- lapply(ab, reverse.xypolygon) AB <- owin(poly=ab, check=FALSE, unitname=uname) AB <- rescue.rectangle(AB) return(AB) } ####### Result is a mask ############ ## Determine pixel raster parameters if(length(rasterinfo) == 0) { rasterinfo <- if(Amask) list(xy=list(x=as.numeric(prolongseq(A$xcol, C$xrange)), y=as.numeric(prolongseq(A$yrow, C$yrange)))) else if(Bmask) list(xy=list(x=as.numeric(prolongseq(B$xcol, C$xrange)), y=as.numeric(prolongseq(B$yrow, C$yrange)))) else list() } ## Convert C to mask C <- do.call(as.mask, append(list(w=C), rasterinfo)) rxy <- rasterxy.mask(C) x <- rxy$x y <- rxy$y ok <- inside.owin(x, y, A) | inside.owin(x, y, B) if(all(ok)) { ## result is a rectangle C <- as.rectangle(C) } else { ## result is a mask C$m[] <- ok } return(C) } setminus.owin <- function(A, B, ..., p) { if(is.null(B)) return(A) verifyclass(B, "owin") if(is.null(A)) return(emptywindow(Frame(B))) verifyclass(A, "owin") if(is.empty(A) || is.empty(B)) return(A) if(identical(A, B)) return(emptywindow(Frame(A))) ## p is a list of arguments to polyclip::polyclip if(missing(p) || is.null(p)) p <- list() ## check units if(!compatible(unitname(A), unitname(B))) warning("The two windows have incompatible units of length") uname <- harmonise(unitname(A), unitname(B), single=TRUE) ## Determine type of arguments Arect <- is.rectangle(A) Brect <- is.rectangle(B) ## Apoly <- is.polygonal(A) ## Bpoly <- is.polygonal(B) Amask <- is.mask(A) Bmask <- is.mask(B) ## Case where A and B are both rectangular if(Arect && Brect) { if(is.subset.owin(A, B)) return(emptywindow(B)) C <- intersect.owin(A, B, fatal=FALSE) if(is.null(C) || is.empty(C)) return(A) return(complement.owin(C, A)) } ## Polygonal case if(!Amask && !Bmask) { ####### Result is polygonal ############ a <- lapply(as.polygonal(A)$bdry, reverse.xypolygon) b <- lapply(as.polygonal(B)$bdry, reverse.xypolygon) ab <- do.call(polyclip::polyclip, append(list(a, b, "minus", fillA="nonzero", fillB="nonzero"), p)) if(length(ab) == 0) return(emptywindow(B)) ## ensure correct polarity totarea <- sum(unlist(lapply(ab, Area.xypolygon))) if(totarea < 0) ab <- lapply(ab, reverse.xypolygon) AB <- owin(poly=ab, check=FALSE, unitname=uname) AB <- rescue.rectangle(AB) return(AB) } ####### Result is a mask ############ ## Determine pixel raster parameters rasterinfo <- if((length(list(...)) > 0)) list(...) else if(Amask) list(xy=list(x=A$xcol, y=A$yrow)) else if(Bmask) list(xy=list(x=B$xcol, y=B$yrow)) else list() ## Convert A to mask AB <- do.call(as.mask, append(list(w=A), rasterinfo)) rxy <- rasterxy.mask(AB) x <- rxy$x y <- rxy$y ok <- inside.owin(x, y, A) & !inside.owin(x, y, B) if(!all(ok)) AB$m[] <- ok else AB <- rescue.rectangle(AB) return(AB) } ## auxiliary functions commonPolyclipArgs <- function(..., p=NULL) { # compute a common resolution for polyclip operations # on several windows if(!is.null(p) && !is.null(p$eps) && !is.null(p$x0) && !is.null(p$y0)) return(p) bb <- boundingbox(...) xr <- bb$xrange yr <- bb$yrange eps <- p$eps %orifnull% max(diff(xr), diff(yr))/(2^31) x0 <- p$x0 %orifnull% mean(xr) y0 <- p$y0 %orifnull% mean(yr) return(list(eps=eps, x0=x0, y0=y0)) } applyPolyclipArgs <- function(x, p=NULL) { if(is.null(p)) return(x) y <- lapply(x, shift, vec=-c(p$x0, p$y0)) z <- lapply(y, scalardilate, f=1/p$eps) return(z) } reversePolyclipArgs <- function(x, p=NULL) { if(is.null(p)) return(x) y <- scalardilate(x, f=p$eps) z <- shift(y, vec=c(p$x0, p$y0)) return(z) } trim.mask <- function(M, R, tolerant=TRUE) { ## M is a mask, ## R is a rectangle ## Ensure R is a subset of bounding rectangle of M R <- owin(intersect.ranges(M$xrange, R$xrange), intersect.ranges(M$yrange, R$yrange)) ## Deal with very thin rectangles if(tolerant) { R$xrange <- adjustthinrange(R$xrange, M$xstep, M$xrange) R$yrange <- adjustthinrange(R$yrange, M$ystep, M$yrange) } ## Extract subset of image grid yrowok <- inside.range(M$yrow, R$yrange) xcolok <- inside.range(M$xcol, R$xrange) if((ny <- sum(yrowok)) == 0 || (nx <- sum(xcolok)) == 0) return(emptywindow(R)) Z <- M Z$xrange <- R$xrange Z$yrange <- R$yrange Z$yrow <- M$yrow[yrowok] Z$xcol <- M$xcol[xcolok] Z$m <- M$m[yrowok, xcolok] if(ny < 2 || nx < 2) Z$m <- matrix(Z$m, nrow=ny, ncol=nx) Z$dim <- dim(Z$m) return(Z) } restrict.mask <- function(M, W) { ## M is a mask, W is any window stopifnot(is.mask(M)) stopifnot(inherits(W, "owin")) if(is.rectangle(W)) return(trim.mask(M, W)) M <- trim.mask(M, as.rectangle(W)) ## Determine which pixels of M are inside W rxy <- rasterxy.mask(M, drop=TRUE) x <- rxy$x y <- rxy$y ok <- inside.owin(x, y, W) Mm <- M$m Mm[Mm] <- ok M$m <- Mm return(M) } # SUBSUMED IN rmhexpand.R # expand.owin <- function(W, f=1) { # # # expand bounding box of 'win' # # by factor 'f' in **area** # if(f <= 0) # stop("f must be > 0") # if(f == 1) # return(W) # bb <- boundingbox(W) # xr <- bb$xrange # yr <- bb$yrange # fff <- (sqrt(f) - 1)/2 # Wexp <- owin(xr + fff * c(-1,1) * diff(xr), # yr + fff * c(-1,1) * diff(yr), # unitname=unitname(W)) # return(Wexp) #} trim.rectangle <- function(W, xmargin=0, ymargin=xmargin) { if(!is.rectangle(W)) stop("Internal error: tried to trim margin off non-rectangular window") xmargin <- ensure2vector(xmargin) ymargin <- ensure2vector(ymargin) if(any(xmargin < 0) || any(ymargin < 0)) stop("values of xmargin, ymargin must be nonnegative") if(sum(xmargin) > diff(W$xrange)) stop("window is too small to cut off margins of the width specified") if(sum(ymargin) > diff(W$yrange)) stop("window is too small to cut off margins of the height specified") owin(W$xrange + c(1,-1) * xmargin, W$yrange + c(1,-1) * ymargin, unitname=unitname(W)) } grow.rectangle <- function(W, xmargin=0, ymargin=xmargin, fraction=NULL) { if(!is.null(fraction)) { fraction <- ensure2vector(fraction) if(any(fraction < 0)) stop("fraction must be non-negative") if(missing(xmargin)) xmargin <- fraction[1L] * diff(W$xrange) if(missing(ymargin)) ymargin <- fraction[2L] * diff(W$yrange) } xmargin <- ensure2vector(xmargin) ymargin <- ensure2vector(ymargin) if(any(xmargin < 0) || any(ymargin < 0)) stop("values of xmargin, ymargin must be nonnegative") owin(W$xrange + c(-1,1) * xmargin, W$yrange + c(-1,1) * ymargin, unitname=unitname(W)) } grow.mask <- function(M, xmargin=0, ymargin=xmargin) { stopifnot(is.mask(M)) m <- as.matrix(M) Rplus <- grow.rectangle(as.rectangle(M), xmargin, ymargin) ## extend the raster xcolplus <- prolongseq(M$xcol, Rplus$xrange) yrowplus <- prolongseq(M$yrow, Rplus$yrange) mplus <- matrix(FALSE, length(yrowplus), length(xcolplus)) ## pad out the mask entries nleft <- attr(xcolplus, "nleft") nright <- attr(xcolplus, "nright") nbot <- attr(yrowplus, "nleft") ntop <- attr(yrowplus, "nright") mplus[ (nbot+1):(length(yrowplus)-ntop), (nleft+1):(length(xcolplus)-nright) ] <- m ## pack up result <- owin(xrange=Rplus$xrange, yrange=Rplus$yrange, xcol=as.numeric(xcolplus), yrow=as.numeric(yrowplus), mask=mplus, unitname=unitname(M)) return(result) } bdry.mask <- function(W) { verifyclass(W, "owin") W <- as.mask(W) m <- W$m nr <- nrow(m) nc <- ncol(m) if(!spatstat.options('Cbdrymask')) { ## old interpreted code b <- (m != rbind(FALSE, m[-nr, ])) b <- b | (m != rbind(m[-1, ], FALSE)) b <- b | (m != cbind(FALSE, m[, -nc])) b <- b | (m != cbind(m[, -1], FALSE)) } else { b <- integer(nr * nc) z <- .C("bdrymask", nx = as.integer(nc), ny = as.integer(nr), m = as.integer(m), b = as.integer(b), PACKAGE = "spatstat") b <- matrix(as.logical(z$b), nr, nc) } W$m <- b return(W) } nvertices <- function(x, ...) { UseMethod("nvertices") } nvertices.default <- function(x, ...) { v <- vertices(x) vx <- v$x n <- if(is.null(vx)) NA else length(vx) return(n) } nvertices.owin <- function(x, ...) { if(is.empty(x)) return(0) n <- switch(x$type, rectangle=4, polygonal=sum(lengths(lapply(x$bdry, getElement, name="x"))), mask=sum(bdry.mask(x)$m)) return(n) } vertices <- function(w) { UseMethod("vertices") } vertices.owin <- function(w) { verifyclass(w, "owin") if(is.empty(w)) return(NULL) switch(w$type, rectangle={ xr <- w$xrange yr <- w$yrange vert <- list(x=xr[c(1,2,2,1)], y=yr[c(1,1,2,2)]) }, polygonal={ vert <- do.call(concatxy,w$bdry) }, mask={ bm <- bdry.mask(w)$m rxy <- rasterxy.mask(w) xx <- rxy$x yy <- rxy$y vert <- list(x=as.vector(xx[bm]), y=as.vector(yy[bm])) }) return(vert) } diameter <- function(x) { UseMethod("diameter") } diameter.owin <- function(x) { w <- as.owin(x) if(is.empty(w)) return(NULL) vert <- vertices(w) if(length(vert$x) > 3) { # extract convex hull h <- with(vert, chull(x, y)) vert <- with(vert, list(x=x[h], y=y[h])) } d <- pairdist(vert, squared=TRUE) return(sqrt(max(d))) } ## radius of inscribed circle inradius <- function(W) { stopifnot(is.owin(W)) if(W$type == "rectangle") diameter(W)/2 else max(distmap(W, invert=TRUE)) } incircle <- function(W) { # computes the largest circle contained in W verifyclass(W, "owin") if(is.empty(W)) return(NULL) if(is.rectangle(W)) { xr <- W$xrange yr <- W$yrange x0 <- mean(xr) y0 <- mean(yr) radius <- min(diff(xr), diff(yr))/2 return(list(x=x0, y=y0, r=radius)) } # compute distance to boundary D <- distmap(W, invert=TRUE) D <- D[W, drop=FALSE] # find maximum distance v <- D$v ok <- !is.na(v) Dvalues <- as.vector(v[ok]) if(length(Dvalues) == 0) return(NULL) Dmax <- max(Dvalues) # find location of maximum locn <- which.max(Dvalues) locrow <- as.vector(row(v)[ok])[locn] loccol <- as.vector(col(v)[ok])[locn] x0 <- D$xcol[loccol] y0 <- D$yrow[locrow] if(is.mask(W)) { # radius could be one pixel diameter shorter than Dmax Dpixel <- sqrt(D$xstep^2 + D$ystep^2) radius <- max(0, Dmax - Dpixel) } else radius <- Dmax return(list(x=x0, y=y0, r=radius)) } inpoint <- function(W) { # selects a point that is always inside the window. verifyclass(W, "owin") if(is.empty(W)) return(NULL) if(is.rectangle(W)) return(c(mean(W$xrange), mean(W$yrange))) if(is.polygonal(W)) { xy <- centroid.owin(W) if(inside.owin(xy$x, xy$y, W)) return(xy) } W <- as.mask(W) Mm <- W$m if(!any(Mm)) return(NULL) Mrow <- as.vector(row(Mm)[Mm]) Mcol <- as.vector(col(Mm)[Mm]) selectmiddle <- function(x) { x[ceiling(length(x)/2)] } midcol <- selectmiddle(Mcol) midrow <- selectmiddle(Mrow[Mcol==midcol]) x <- W$xcol[midcol] y <- W$yrow[midrow] return(c(x,y)) } simplify.owin <- function(W, dmin) { verifyclass(W, "owin") if(is.empty(W)) return(W) W <- as.polygonal(W) W$bdry <- lapply(W$bdry, simplify.xypolygon, dmin=dmin) return(W) } is.convex <- function(x) { verifyclass(x, "owin") if(is.empty(x)) return(TRUE) switch(x$type, rectangle={return(TRUE)}, polygonal={ b <- x$bdry if(length(b) > 1) return(FALSE) b <- b[[1L]] xx <- b$x yy <- b$y ch <- chull(xx,yy) return(length(ch) == length(xx)) }, mask={ v <- vertices(x) v <- as.ppp(v, W=as.rectangle(x)) ch <- convexhull.xy(v) edg <- edges(ch) edgedist <- nncross(v, edg, what="dist") pixdiam <- sqrt(x$xstep^2 + x$ystep^2) return(all(edgedist <= pixdiam)) }) return(as.logical(NA)) } convexhull <- function(x) { if(inherits(x, "owin")) v <- vertices(x) else if(inherits(x, "psp")) v <- endpoints.psp(x) else if(inherits(x, "ppp")) v <- x else { x <- as.owin(x) v <- vertices(x) } b <- as.rectangle(x) if(is.empty(x)) return(emptywindow(b)) ch <- convexhull.xy(v) out <- rebound.owin(ch, b) return(out) } is.empty <- function(x) { UseMethod("is.empty") } is.empty.default <- function(x) { length(x) == 0 } is.empty.owin <- function(x) { switch(x$type, rectangle=return(FALSE), polygonal=return(length(x$bdry) == 0), mask=return(!any(x$m))) return(NA) } emptywindow <- function(w) { w <- as.owin(w) out <- owin(w$xrange, w$yrange, poly=list(), unitname=unitname(w)) return(out) } discs <- function(centres, radii=marks(centres)/2, ..., separate=FALSE, mask=FALSE, trim=TRUE, delta=NULL, npoly=NULL) { stopifnot(is.ppp(centres)) n <- npoints(centres) if(n == 0) return(emptywindow(Frame(centres))) check.nvector(radii, npoints(centres), oneok=TRUE) stopifnot(all(radii > 0)) if(sameradius <- (length(radii) == 1)) radii <- rep(radii, npoints(centres)) if(!separate && mask) { #' compute pixel approximation M <- as.mask(Window(centres), ...) z <- .C("discs2grid", nx = as.integer(M$dim[2L]), x0 = as.double(M$xcol[1L]), xstep = as.double(M$xstep), ny = as.integer(M$dim[1L]), y0 = as.double(M$yrow[1L]), ystep = as.double(M$ystep), nd = as.integer(n), xd = as.double(centres$x), yd = as.double(centres$y), rd = as.double(radii), out = as.integer(integer(prod(M$dim))), PACKAGE = "spatstat") M$m[] <- as.logical(z$out) return(M) } #' construct a list of discs D <- list() if(!sameradius && length(unique(radii)) > 1) { if(is.null(delta) && is.null(npoly)) { ra <- range(radii) rr <- ra[2L]/ra[1L] mm <- ceiling(128/rr) mm <- max(16, mm) ## equals 16 unless ra[2]/ra[1] < 8 delta <- 2 * pi * ra[1L]/mm } for(i in 1:n) D[[i]] <- disc(centre=centres[i], radius=radii[i], delta=delta, npoly=npoly) } else { #' congruent discs -- use 'shift' W0 <- disc(centre=c(0,0), radius=radii[1L], delta=delta, npoly=npoly) for(i in 1:n) D[[i]] <- shift(W0, vec=centres[i]) } D <- as.solist(D) #' return list of discs? if(separate) return(D) #' return union of discs W <- union.owin(D) if(trim) W <- intersect.owin(W, Window(centres)) return(W) } harmonise.owin <- harmonize.owin <- function(...) { argz <- list(...) wins <- solapply(argz, as.owin) if(length(wins) < 2L) return(wins) ismask <- sapply(wins, is.mask) if(!any(ismask)) return(wins) comgrid <- do.call(commonGrid, lapply(argz, as.owin)) result <- solapply(argz, "[", i=comgrid, drop=FALSE) return(result) } spatstat/R/bw.scott.R0000644000176200001440000000132613515321571014211 0ustar liggesusers#' #' bw.scott.R #' #' Bandwidth selection rule bw.scott for point patterns in any dimension #' #' $Revision: 1.1 $ $Date: 2019/07/22 11:41:41 $ bw.scott <- function(X, isotropic=FALSE, d=NULL) { stopifnot(is.ppp(X) || is.lpp(X) || is.pp3(X) || is.ppx(X)) if(is.null(d)) { d <- spatdim(X, intrinsic=FALSE) } else check.1.integer(d) nX <- npoints(X) cX <- coords(X, spatial=TRUE, temporal=FALSE, local=FALSE) sdX <- apply(cX, 2, sd) if(isotropic) { #' geometric mean sdX <- exp(mean(log(pmax(sdX, .Machine$double.eps)))) } b <- sdX * nX^(-1/(d+4)) names(b) <- if(isotropic) "sigma" else paste0("sigma.", colnames(cX)) return(b) } bw.scott.iso <- function(X) { bw.scott(X, isotropic=TRUE) } spatstat/R/rmh.default.R0000644000176200001440000010205513605016330014652 0ustar liggesusers# # $Id: rmh.default.R,v 1.115 2020/01/07 05:53:17 adrian Exp adrian $ # rmh.default <- function(model,start=NULL, control=default.rmhcontrol(model), ..., nsim=1, drop=TRUE, saveinfo=TRUE, verbose=TRUE, snoop=FALSE) { # # Function rmh. To simulate realizations of 2-dimensional point # patterns, given the conditional intensity function of the # underlying process, via the Metropolis-Hastings algorithm. # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # V A L I D A T E # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== if(verbose) cat("Checking arguments..") # validate arguments and fill in the defaults model <- rmhmodel(model) start <- rmhstart(start) if(is.null(control)) { control <- default.rmhcontrol(model) } else { control <- rmhcontrol(control) } # override if(length(list(...)) > 0) control <- update(control, ...) control <- rmhResolveControl(control, model) saveinfo <- as.logical(saveinfo) # retain "..." arguments unrecognised by rmhcontrol # These are assumed to be arguments of functions defining the trend argh <- list(...) known <- names(argh) %in% names(formals(rmhcontrol.default)) f.args <- argh[!known] #### Multitype models # Decide whether the model is multitype; if so, find the types. types <- rmhResolveTypes(model, start, control) ntypes <- length(types) mtype <- (ntypes > 1) # If the model is multitype, check that the model parameters agree with types # and digest them if(mtype && !is.null(model$check)) { model <- rmhmodel(model, types=types) } else { model$types <- types } ######## Check for illegal combinations of model, start and control ######## # No expansion can be done if we are using x.start if(start$given == "x") { if(control$expand$force.exp) stop("Cannot expand window when using x.start.\n", call.=FALSE) control$expand <- .no.expansion } # Warn about a silly value of fixall: if(control$fixall & ntypes==1) { warning("control$fixall applies only to multitype processes. Ignored.", call.=FALSE) control$fixall <- FALSE if(control$fixing == "n.each.type") control$fixing <- "n.total" } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # M O D E L P A R A M E T E R S # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== ####### Determine windows ################################ if(verbose) cat("determining simulation windows...") # these may be NULL w.model <- model$w x.start <- start$x.start trend <- model$trend trendy <- !is.null(trend) singletrend <- trendy && (is.im(trend) || is.function(trend) || (is.numeric(trend) && length(trend) == 1)) trendlist <- if(singletrend) list(trend) else trend # window implied by trend image, if any w.trend <- if(is.im(trend)) as.owin(trend) else if(is.list(trend) && any(ok <- unlist(lapply(trend, is.im)))) as.owin((trend[ok])[[1L]]) else NULL ## Clipping window (for final result) w.clip <- if(!is.null(w.model)) w.model else if(!will.expand(control$expand)) { if(start$given == "x" && is.ppp(x.start)) x.start$window else if(is.owin(w.trend)) w.trend } else NULL if(!is.owin(w.clip)) stop("Unable to determine window for pattern", call.=FALSE) ## Simulation window xpn <- rmhResolveExpansion(w.clip, control, trendlist, "trend") w.sim <- xpn$wsim expanded <- xpn$expanded ## Check the fine print if(expanded) { if(control$fixing != "none") stop(paste("If we're conditioning on the number of points,", "we cannot clip the result to another window."), call.=FALSE) if(!is.subset.owin(w.clip, w.sim)) stop("Expanded simulation window does not contain model window", call.=FALSE) } ####### Trend ################################ # Check that the expanded window fits inside the window # upon which the trend(s) live if there are trends and # if any trend is given by an image. if(expanded && !is.null(trend)) { trends <- if(is.im(trend)) list(trend) else trend images <- unlist(lapply(trends, is.im)) if(any(images)) { iwindows <- lapply(trends[images], as.owin) nimages <- length(iwindows) misfit <- !sapply(iwindows, is.subset.owin, A=w.sim) nmisfit <- sum(misfit) if(nmisfit > 1) stop(paste("Expanded simulation window is not contained in", "several of the trend windows.\n", "Bailing out."), call.=FALSE) else if(nmisfit == 1) { warning(paste("Expanded simulation window is not contained in", if(nimages == 1) "the trend window.\n" else "one of the trend windows.\n", "Expanding to this trend window (only)."), call.=FALSE) w.sim <- iwindows[[which(misfit)]] } } } # Extract the 'beta' parameters if(length(model$cif) == 1) { # single interaction beta <- model$C.beta betalist <- list(beta) } else { # hybrid betalist <- model$C.betalist # multiply beta vectors for each component beta <- Reduce("*", betalist) } ##### .................. CONDITIONAL SIMULATION ................... ##### #|| Determine windows for conditional simulation #|| #|| w.state = window for the full configuration #|| #|| w.sim = window for the 'free' (random) points #|| w.state <- w.sim condtype <- control$condtype x.cond <- control$x.cond # n.cond <- control$n.cond switch(condtype, none={ w.cond <- NULL }, window={ # conditioning on the realisation inside a subwindow w.cond <- as.owin(x.cond) # subtract from w.sim w.sim <- setminus.owin(w.state, w.cond) if(is.empty(w.sim)) stop(paste("Conditional simulation is undefined;", "the conditioning window", sQuote("as.owin(control$x.cond)"), "covers the entire simulation window"), call.=FALSE) }, Palm={ # Palm conditioning w.cond <- NULL }) ##### #|| Convert conditioning points to appropriate format x.condpp <- switch(condtype, none=NULL, window=x.cond, Palm=as.ppp(x.cond, w.state)) # validate if(!is.null(x.condpp)) { if(mtype) { if(!is.marked(x.condpp)) stop("Model is multitype, but x.cond is unmarked", call.=FALSE) if(!isTRUE(all.equal(types, levels(marks(x.condpp))))) stop("Types of points in x.cond do not match types in model", call.=FALSE) } } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # S T A R T I N G S T A T E # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== ###################### Starting state data ############################ # whether the initial state should be thinned thin <- (start$given != "x") && (control$fixing == "none") # There must now be a starting state. if(start$given == "none") { # For conditional simulation, the starting state must be given if(condtype != "none") stop("No starting state given", call.=FALSE) # Determine integral of beta * trend over data window. # This is the expected number of points in the reference Poisson process. area.w.clip <- area(w.clip) if(trendy) { tsummaries <- summarise.trend(trend, w=w.clip, a=area.w.clip) En <- beta * sapply(tsummaries, getElement, name="integral") } else { En <- beta * area.w.clip } # Fix n.start equal to this integral n.start <- if(spatstat.options("scalable")) round(En) else ceiling(En) start <- rmhstart(n.start=n.start) } # In the case of conditional simulation, the start data determine # the 'free' points (i.e. excluding x.cond) in the initial state. switch(start$given, none={ stop("No starting state given", call.=FALSE) }, x = { # x.start was given # coerce it to a ppp object if(!is.ppp(x.start)) x.start <- as.ppp(x.start, w.state) if(condtype == "window") { # clip to simulation window xs <- x.start[w.sim] nlost <- x.start$n - xs$n if(nlost > 0) warning(paste(nlost, ngettext(nlost, "point","points"), "of x.start", ngettext(nlost, "was", "were"), "removed because", ngettext(nlost, "it", "they"), "fell in the window of x.cond"), call.=FALSE) x.start <- xs } npts.free <- x.start$n }, n = { # n.start was given n.start <- start$n.start # Adjust the number of points in the starting state in accordance # with the expansion that has occurred. if(expanded) { holnum <- if(spatstat.options("scalable")) round else ceiling n.start <- holnum(n.start * area(w.sim)/area(w.clip)) } # npts.free <- sum(n.start) # The ``sum()'' is redundant if n.start # is scalar; no harm, but. }, stop("Internal error: start$given unrecognized"), call.=FALSE) #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # C O N T R O L P A R A M E T E R S # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== ################### Periodic boundary conditions ######################### periodic <- control$periodic if(is.null(periodic)) { # undecided. Use default rule control$periodic <- periodic <- expanded && is.rectangle(w.state) } else if(periodic && !is.rectangle(w.state)) { # if periodic is TRUE we have to be simulating in a rectangular window. stop("Need rectangular window for periodic simulation.", call.=FALSE) } # parameter passed to C: period <- if(periodic) c(diff(w.state$xrange), diff(w.state$yrange)) else c(-1,-1) #### vector of proposal probabilities if(!mtype) ptypes <- 1 else { ptypes <- control$ptypes if(is.null(ptypes)) { # default proposal probabilities ptypes <- if(start$given == "x" && (nx <- npoints(x.start)) > 0) { table(marks(x.start, dfok=FALSE))/nx } else rep.int(1/ntypes, ntypes) } else { # Validate ptypes if(length(ptypes) != ntypes | sum(ptypes) != 1) stop("Argument ptypes is mis-specified.", call.=FALSE) } } ######################################################################## # Normalising constant for proposal density # # Integral of trend over the expanded window (or area of window): # Iota == Integral Of Trend (or) Area. area.w.sim <- area(w.sim) if(trendy) { if(verbose) cat("Evaluating trend integral...") tsummaries <- summarise.trend(trend, w=w.sim, a=area.w.sim) mins <- sapply(tsummaries, getElement, name="min") if(any(mins < 0)) stop("Trend has negative values", call.=FALSE) iota <- sapply(tsummaries, getElement, name="integral") tmax <- sapply(tsummaries, getElement, name="max") } else { iota <- area.w.sim tmax <- NULL } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # A.S. EMPTY PROCESS # # for conditional simulation, 'empty' means there are no 'free' points # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== a.s.empty <- FALSE # # Empty pattern, simulated conditional on n # if(npts.free == 0 && control$fixing != "none") { a.s.empty <- TRUE if(verbose) { mess <- paste("Initial pattern has 0 random points,", "and simulation is conditional on the number of points -") if(condtype == "none") warning(paste(mess, "returning an empty pattern"), call.=FALSE) else warning(paste(mess, "returning a pattern with no random points"), call.=FALSE) } } # # If beta = 0, the process is almost surely empty # if(all(beta < .Machine$double.eps)) { if(control$fixing == "none" && condtype == "none") { # return empty pattern if(verbose) warning("beta = 0 implies an empty pattern", call.=FALSE) a.s.empty <- TRUE } else stop("beta = 0 implies an empty pattern, but we are simulating conditional on a nonzero number of points", call.=FALSE) } # # If we're conditioning on the contents of a subwindow, # and the subwindow covers the clipping region, # the result is deterministic. if(condtype == "window" && is.subset.owin(w.clip, w.cond)) { a.s.empty <- TRUE warning(paste("Model window is a subset of conditioning window:", "result is deterministic"), call.=FALSE) } # # if(a.s.empty) { # create empty pattern, to be returned if(!is.null(x.condpp)) empty <- x.condpp[w.clip] else { empty <- ppp(numeric(0), numeric(0), window=w.clip) if(mtype) { vide <- factor(types[integer(0)], levels=types) empty <- empty %mark% vide } } } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # PACK UP # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== ######### Store decisions Model <- model Start <- start Control <- control Model$w <- w.clip Model$types <- types Control$expand <- if(expanded) rmhexpand(w.state) else .no.expansion Control$internal <- list(w.sim=w.sim, w.state=w.state, x.condpp=x.condpp, ptypes=ptypes, period=period, thin=thin) Model$internal <- list(a.s.empty=a.s.empty, empty=if(a.s.empty) empty else NULL, mtype=mtype, trendy=trendy, betalist=betalist, beta=beta, iota=iota, tmax=tmax) Start$internal <- list(npts.free=npts.free) InfoList <- list(model=Model, start=Start, control=Control) class(InfoList) <- c("rmhInfoList", class(InfoList)) # go if(nsim == 1 && drop) { result <- do.call(rmhEngine, append(list(InfoList, verbose=verbose, snoop=snoop, kitchensink=saveinfo), f.args)) } else { result <- vector(mode="list", length=nsim) if(verbose) { splat("Generating", nsim, "point patterns...") pstate <- list() } subverb <- verbose && (nsim == 1) for(isim in 1:nsim) { if(verbose) pstate <- progressreport(isim, nsim, state=pstate) result[[isim]] <- do.call(rmhEngine, append(list(InfoList, verbose=subverb, snoop=snoop, kitchensink=saveinfo), f.args)) } if(verbose) splat("Done.\n") result <- simulationresult(result, nsim, drop) } return(result) } print.rmhInfoList <- function(x, ...) { cat("\nPre-digested Metropolis-Hastings algorithm parameters (rmhInfoList)\n") print(as.anylist(x)) } #--------------- rmhEngine ------------------------------------------- # # This is the interface to the C code. # # InfoList is a list of pre-digested, validated arguments # obtained from rmh.default. # # This function is called by rmh.default to generate one simulated # realisation of the model. # It's called repeatedly by ho.engine and qqplot.ppm to generate multiple # realisations (saving time by not repeating the argument checking # in rmh.default). # arguments: # kitchensink: whether to tack InfoList on to the return value as an attribute # preponly: whether to just return InfoList without simulating # # rmh.default digests arguments and calls rmhEngine with kitchensink=T # # qqplot.ppm first gets InfoList by calling rmh.default with preponly=T # (which digests the model arguments and calls rmhEngine # with preponly=T, returning InfoList), # then repeatedly calls rmhEngine(InfoList) to simulate. # # ------------------------------------------------------- rmhEngine <- function(InfoList, ..., verbose=FALSE, kitchensink=FALSE, preponly=FALSE, snoop=FALSE, overrideXstart=NULL, overrideclip=FALSE) { # Internal Use Only! # This is the interface to the C code. if(!inherits(InfoList, "rmhInfoList")) stop("data not in correct format for internal function rmhEngine", call.=FALSE) if(preponly) return(InfoList) model <- InfoList$model start <- InfoList$start control <- InfoList$control w.sim <- control$internal$w.sim w.state <- control$internal$w.state w.clip <- model$w condtype <- control$condtype x.condpp <- control$internal$x.condpp types <- model$types ntypes <- length(types) ptypes <- control$internal$ptypes period <- control$internal$period mtype <- model$internal$mtype trend <- model$trend trendy <- model$internal$trendy # betalist <- model$internal$betalist beta <- model$internal$beta iota <- model$internal$iota tmax <- model$internal$tmax npts.free <- start$internal$npts.free n.start <- start$n.start x.start <- start$x.start #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # E M P T Y P A T T E R N # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== if(model$internal$a.s.empty) { if(verbose) cat("\n") empty <- model$internal$empty attr(empty, "info") <- InfoList return(empty) } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # S I M U L A T I O N # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== ############################################# #### #### Random number seed: initialisation & capture #### ############################################# if(!exists(".Random.seed")) runif(1L) saved.seed <- .Random.seed ############################################# #### #### Poisson case #### ############################################# if(is.poisson.rmhmodel(model)) { if(verbose) cat("\n") intensity <- if(!trendy) beta else model$trend Xsim <- switch(control$fixing, none= { # Poisson process if(!mtype) rpoispp(intensity, win=w.sim, ..., warnwin=FALSE) else rmpoispp(intensity, win=w.sim, types=types, warnwin=FALSE) }, n.total = { # Binomial/multinomial process with fixed total number of points if(!mtype) rpoint(npts.free, intensity, win=w.sim, verbose=verbose) else rmpoint(npts.free, intensity, win=w.sim, types=types, verbose=verbose) }, n.each.type = { # Multinomial process with fixed number of points of each type npts.each <- switch(start$given, n = n.start, x = as.integer(table(marks(x.start, dfok=FALSE))), stop("No starting state given; can't condition on fixed number of points", call.=FALSE)) rmpoint(npts.each, intensity, win=w.sim, types=types, verbose=verbose) }, stop("Internal error: control$fixing unrecognised", call.=FALSE) ) # if conditioning, add fixed points if(condtype != "none") Xsim <- superimpose(Xsim, x.condpp, W=w.state) # clip result to output window Xclip <- if(!overrideclip) Xsim[w.clip] else Xsim attr(Xclip, "info") <- InfoList return(Xclip) } ######################################################################## # M e t r o p o l i s H a s t i n g s s i m u l a t i o n ######################################################################## if(verbose) cat("Starting simulation.\nInitial state...") #### Build starting state npts.cond <- if(condtype != "none") x.condpp$n else 0 # npts.total <- npts.free + npts.cond #### FIRST generate the 'free' points #### First the marks, if any. #### The marks must be integers 0 to (ntypes-1) for passing to C Ctypes <- if(mtype) 0:(ntypes-1) else 0 Cmarks <- if(!mtype) 0 else switch(start$given, n = { # n.start given if(control$fixing=="n.each.type") rep.int(Ctypes,n.start) else sample(Ctypes,npts.free,TRUE,ptypes) }, x = { # x.start given as.integer(marks(x.start, dfok=FALSE))-1L }, stop("internal error: start$given unrecognised", call.=FALSE) ) # # Then the x, y coordinates # switch(start$given, x = { x <- x.start$x y <- x.start$y }, n = { xy <- if(!trendy) runifpoint(npts.free, w.sim, ...) else rpoint.multi(npts.free, trend, tmax, factor(Cmarks,levels=Ctypes), w.sim, ...) x <- xy$x y <- xy$y }) ## APPEND the free points AFTER the conditioning points if(condtype != "none") { x <- c(x.condpp$x, x) y <- c(x.condpp$y, y) if(mtype) Cmarks <- c(as.integer(marks(x.condpp))-1L, Cmarks) } if(!is.null(overrideXstart)) { #' override the previous data x <- overrideXstart$x y <- overrideXstart$y if(mtype) Cmarks <- as.integer(marks(overrideXstart))-1L } # decide whether to activate visual debugger if(snoop) { Xinit <- ppp(x, y, window=w.sim) if(mtype) marks(Xinit) <- factor(Cmarks, levels=Ctypes, labels=types) if(verbose) cat("\nCreating debugger environment..") snoopenv <- rmhSnoopEnv(Xinit=Xinit, Wclip=w.clip, R=reach(model)) if(verbose) cat("Done.\n") } else snoopenv <- "none" ####################################################################### # Set up C call ###################################################################### # Determine the name of the cif used in the C code C.id <- model$C.id ncif <- length(C.id) # Get the parameters in C-ese ipar <- model$C.ipar iparlist <- if(ncif == 1) list(ipar) else model$C.iparlist iparlen <- lengths(iparlist) beta <- model$internal$beta # Absorb the constants or vectors `iota' and 'ptypes' into the beta parameters beta <- (iota/ptypes) * beta # Algorithm control parameters p <- control$p q <- control$q nrep <- control$nrep # fixcode <- control$fixcode # fixing <- control$fixing fixall <- control$fixall nverb <- control$nverb saving <- control$saving nsave <- control$nsave nburn <- control$nburn track <- control$track thin <- control$internal$thin pstage <- control$pstage %orifnull% "start" if(pstage == "block" && !saving) pstage <- "start" temper <- FALSE invertemp <- 1.0 if(verbose) cat("Ready to simulate. ") storage.mode(ncif) <- "integer" storage.mode(C.id) <- "character" storage.mode(beta) <- "double" storage.mode(ipar) <- "double" storage.mode(iparlen) <- "integer" storage.mode(period) <- "double" storage.mode(ntypes) <- "integer" storage.mode(nrep) <- "integer" storage.mode(p) <- storage.mode(q) <- "double" storage.mode(nverb) <- "integer" storage.mode(x) <- storage.mode(y) <- "double" storage.mode(Cmarks) <- "integer" storage.mode(fixall) <- "integer" storage.mode(npts.cond) <- "integer" storage.mode(track) <- "integer" storage.mode(thin) <- "integer" storage.mode(temper) <- "integer" storage.mode(invertemp) <- "double" if(pstage == "start" || !saving) { #' generate all proposal points now. if(verbose) cat("Generating proposal points...") #' If the pattern is multitype, generate the mark proposals (0 to ntypes-1) Cmprop <- if(mtype) sample(Ctypes,nrep,TRUE,prob=ptypes) else 0 storage.mode(Cmprop) <- "integer" #' Generate the ``proposal points'' in the expanded window. xy <- if(trendy) { rpoint.multi(nrep,trend,tmax, factor(Cmprop, levels=Ctypes), w.sim, ..., warn=FALSE) } else runifpoint(nrep, w.sim, warn=FALSE) xprop <- xy$x yprop <- xy$y storage.mode(xprop) <- storage.mode(yprop) <- "double" } if(!saving) { # ////////// Single block ///////////////////////////////// nrep0 <- 0 storage.mode(nrep0) <- "integer" # Call the Metropolis-Hastings C code: if(verbose) cat("Running Metropolis-Hastings.\n") out <- .Call("xmethas", ncif, C.id, beta, ipar, iparlen, period, xprop, yprop, Cmprop, ntypes, nrep, p, q, nverb, nrep0, x, y, Cmarks, npts.cond, fixall, track, thin, snoopenv, temper, invertemp, PACKAGE="spatstat") # Extract the point pattern returned from C X <- ppp(x=out[[1L]], y=out[[2L]], window=w.state, check=FALSE) if(mtype) { #' convert integer marks from C to R #' then restore original type levels marks(X) <- factor(out[[3L]], levels=Ctypes, labels=types) } # Now clip the pattern to the ``clipping'' window: if(!overrideclip && !control$expand$force.noexp) X <- X[w.clip] # Extract transition history: if(track) { usedout <- if(mtype) 3 else 2 proptype <- factor(out[[usedout+1]], levels=1:3, labels=c("Birth", "Death", "Shift")) accepted <- as.logical(out[[usedout+2]]) History <- data.frame(proposaltype=proptype, accepted=accepted) if(length(out) >= usedout + 4) { # history includes numerator & denominator of Hastings ratio numerator <- as.double(out[[usedout + 3]]) denominator <- as.double(out[[usedout + 4]]) History <- cbind(History, data.frame(numerator=numerator, denominator=denominator)) } } } else { # ////////// Multiple blocks ///////////////////////////////// ## determine length of each block of simulations nsuperblocks <- as.integer(1L + ceiling((nrep - nburn)/sum(nsave))) block <- c(nburn, rep.int(nsave, nsuperblocks-1L)) block <- block[cumsum(block) <= nrep] if((tot <- sum(block)) < nrep) block <- c(block, nrep-tot) block <- block[block >= 1L] nblocks <- length(block) blockend <- cumsum(block) ## set up list to contain the saved point patterns Xlist <- vector(mode="list", length=nblocks+1L) ## save initial state Xinit <- ppp(x=x, y=y, window=w.state, check=FALSE) if(mtype) { ## convert integer marks from C to R ## then restore original type levels marks(Xinit) <- factor(Cmarks, levels=Ctypes, labels=types) } Xlist[[1L]] <- Xinit # Call the Metropolis-Hastings C code repeatedly: xprev <- x yprev <- y Cmarksprev <- Cmarks # thinFALSE <- as.integer(FALSE) storage.mode(thinFALSE) <- "integer" # ................ loop ......................... for(I in 1:nblocks) { # number of iterations for this block nrepI <- block[I] storage.mode(nrepI) <- "integer" # number of previous iterations nrep0 <- if(I == 1) 0 else blockend[I-1] storage.mode(nrep0) <- "integer" # Generate or extract proposals switch(pstage, start = { #' extract proposals from previously-generated vectors if(verbose) cat("Extracting proposal points...") seqI <- 1:nrepI xpropI <- xprop[seqI] ypropI <- yprop[seqI] CmpropI <- Cmprop[seqI] storage.mode(xpropI) <- storage.mode(ypropI) <- "double" storage.mode(CmpropI) <- "integer" }, block = { # generate 'nrepI' random proposals if(verbose) cat("Generating proposal points...") #' If the pattern is multitype, generate the mark proposals CmpropI <- if(mtype) sample(Ctypes,nrepI,TRUE,prob=ptypes) else 0 storage.mode(CmpropI) <- "integer" #' Generate the ``proposal points'' in the expanded window. xy <- if(trendy) { rpoint.multi(nrepI,trend,tmax, factor(CmpropI, levels=Ctypes), w.sim, ..., warn=FALSE) } else runifpoint(nrepI, w.sim, warn=FALSE) xpropI <- xy$x ypropI <- xy$y storage.mode(xpropI) <- storage.mode(ypropI) <- "double" }) # no thinning in subsequent blocks if(I > 1) thin <- thinFALSE #' call if(verbose) cat("Running Metropolis-Hastings.\n") out <- .Call("xmethas", ncif, C.id, beta, ipar, iparlen, period, xpropI, ypropI, CmpropI, ntypes, nrepI, p, q, nverb, nrep0, xprev, yprev, Cmarksprev, npts.cond, fixall, track, thin, snoopenv, temper, invertemp, PACKAGE = "spatstat") # Extract the point pattern returned from C X <- ppp(x=out[[1L]], y=out[[2L]], window=w.state, check=FALSE) if(mtype) { # convert integer marks from C to R # then restore original type levels marks(X) <- factor(out[[3L]], levels=Ctypes, labels=types) } # Now clip the pattern to the ``clipping'' window: if(!overrideclip && !control$expand$force.noexp) X <- X[w.clip] # commit to list Xlist[[I+1L]] <- X # Extract transition history: if(track) { usedout <- if(mtype) 3 else 2 proptype <- factor(out[[usedout+1]], levels=1:3, labels=c("Birth", "Death", "Shift")) accepted <- as.logical(out[[usedout+2]]) HistoryI <- data.frame(proposaltype=proptype, accepted=accepted) if(length(out) >= usedout + 4) { # history includes numerator & denominator of Hastings ratio numerator <- as.double(out[[usedout + 3]]) denominator <- as.double(out[[usedout + 4]]) HistoryI <- cbind(HistoryI, data.frame(numerator=numerator, denominator=denominator)) } # concatenate with histories of previous blocks History <- if(I == 1) HistoryI else rbind(History, HistoryI) } # update 'previous state' xprev <- out[[1L]] yprev <- out[[2L]] Cmarksprev <- if(!mtype) 0 else out[[3]] storage.mode(xprev) <- storage.mode(yprev) <- "double" storage.mode(Cmarksprev) <- "integer" if(pstage == "start") { #' discard used proposals xprop <- xprop[-seqI] yprop <- yprop[-seqI] Cmprop <- Cmprop[-seqI] } } # .............. end loop ............................... # Result of simulation is final state 'X' # Tack on the list of intermediate states names(Xlist) <- paste("Iteration", c(0,as.integer(blockend)), sep="_") attr(X, "saved") <- as.solist(Xlist) } # Append to the result information about how it was generated. if(kitchensink) { attr(X, "info") <- InfoList attr(X, "seed") <- saved.seed } if(track) attr(X, "history") <- History return(X) } # helper function summarise.trend <- local({ # main function summarise.trend <- function(trend, w, a=area(w)) { tlist <- if(is.function(trend) || is.im(trend)) list(trend) else trend return(lapply(tlist, summarise1, w=w, a=a)) } # summarise1 <- function(x, w, a) { if(is.numeric(x)) { mini <- maxi <- x integ <- a*x } else { Z <- as.im(x, w)[w, drop=FALSE] ran <- range(Z) mini <- ran[1L] maxi <- ran[2L] integ <- integral.im(Z) } return(list(min=mini, max=maxi, integral=integ)) } summarise.trend }) spatstat/R/kppm.R0000644000176200001440000016703213606005001013410 0ustar liggesusers# # kppm.R # # kluster/kox point process models # # $Revision: 1.145 $ $Date: 2020/01/10 05:22:06 $ # kppm <- function(X, ...) { UseMethod("kppm") } kppm.formula <- function(X, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), ..., data=NULL) { ## remember call callstring <- short.deparse(sys.call()) cl <- match.call() ########### INTERPRET FORMULA ############################## if(!inherits(X, "formula")) stop(paste("Argument 'X' should be a formula")) formula <- X if(spatstat.options("expand.polynom")) formula <- expand.polynom(formula) ## check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Formula must have a left hand side")) Yexpr <- formula[[2L]] trend <- formula[c(1L,3L)] ## FIT ####################################### thecall <- call("kppm", X=Yexpr, trend=trend, data=data, clusters=clusters) ncall <- length(thecall) argh <- list(...) nargh <- length(argh) if(nargh > 0) { thecall[ncall + 1:nargh] <- argh names(thecall)[ncall + 1:nargh] <- names(argh) } ## result <- eval(thecall, ## envir=if(!is.null(data)) data else parent.frame(), ## enclos=if(!is.null(data)) parent.frame() else baseenv()) callenv <- list2env(as.list(data), parent=parent.frame()) result <- eval(thecall, envir=callenv, enclos=baseenv()) result$call <- cl result$callframe <- parent.frame() if(!("callstring" %in% names(list(...)))) result$callstring <- callstring return(result) } kppm.ppp <- kppm.quad <- function(X, trend = ~1, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), data=NULL, ..., covariates = data, subset, method = c("mincon", "clik2", "palm"), improve.type = c("none", "clik1", "wclik1", "quasi"), improve.args = list(), weightfun=NULL, control=list(), algorithm="Nelder-Mead", statistic="K", statargs=list(), rmax = NULL, covfunargs=NULL, use.gam=FALSE, nd=NULL, eps=NULL) { cl <- match.call() callstring <- paste(short.deparse(sys.call()), collapse="") Xname <- short.deparse(substitute(X)) clusters <- match.arg(clusters) improve.type <- match.arg(improve.type) method <- match.arg(method) if(method == "mincon") statistic <- pickoption("summary statistic", statistic, c(K="K", g="pcf", pcf="pcf")) ClusterArgs <- list(method = method, improve.type = improve.type, improve.args = improve.args, weightfun=weightfun, control=control, algorithm=algorithm, statistic=statistic, statargs=statargs, rmax = rmax) Xenv <- list2env(as.list(covariates), parent=parent.frame()) X <- eval(substitute(X), envir=Xenv, enclos=baseenv()) isquad <- is.quad(X) if(!is.ppp(X) && !isquad) stop("X should be a point pattern (ppp) or quadrature scheme (quad)") if(is.marked(X)) stop("Sorry, cannot handle marked point patterns") if(!missing(subset)) { W <- eval(subset, covariates, parent.frame()) if(!is.null(W)) { if(is.im(W)) { W <- solutionset(W) } else if(!is.owin(W)) { stop("Argument 'subset' should yield a window or logical image", call.=FALSE) } X <- X[W] } } po <- ppm(Q=X, trend=trend, covariates=covariates, forcefit=TRUE, rename.intercept=FALSE, covfunargs=covfunargs, use.gam=use.gam, nd=nd, eps=eps) XX <- if(isquad) X$data else X # set default weight function if(is.null(weightfun) && method != "mincon") { RmaxW <- (rmax %orifnull% rmax.rule("K", Window(XX), intensity(XX))) / 2 weightfun <- function(d, rr=RmaxW) { as.integer(d <= rr) } formals(weightfun)[[2]] <- RmaxW attr(weightfun, "selfprint") <- paste0("Indicator(distance <= ", RmaxW, ")") } # fit out <- switch(method, mincon = kppmMinCon(X=XX, Xname=Xname, po=po, clusters=clusters, control=control, statistic=statistic, statargs=statargs, rmax=rmax, algorithm=algorithm, ...), clik2 = kppmComLik(X=XX, Xname=Xname, po=po, clusters=clusters, control=control, weightfun=weightfun, rmax=rmax, algorithm=algorithm, ...), palm = kppmPalmLik(X=XX, Xname=Xname, po=po, clusters=clusters, control=control, weightfun=weightfun, rmax=rmax, algorithm=algorithm, ...)) # out <- append(out, list(ClusterArgs=ClusterArgs, call=cl, callframe=parent.frame(), callstring=callstring)) # Detect DPPs DPP <- list(...)$DPP class(out) <- c(ifelse(is.null(DPP), "kppm", "dppm"), class(out)) # Update intensity estimate with improve.kppm if necessary: if(improve.type != "none") out <- do.call(improve.kppm, append(list(object = out, type = improve.type), improve.args)) return(out) } kppmMinCon <- function(X, Xname, po, clusters, control, statistic, statargs, algorithm="Nelder-Mead", DPP=NULL, ...) { # Minimum contrast fit stationary <- is.stationary(po) # compute intensity if(stationary) { lambda <- summary(po)$trend$value } else { # compute intensity at high resolution if available w <- as.owin(po, from="covariates") if(!is.mask(w)) w <- NULL lambda <- predict(po, locations=w) } # Detect DPP model and change clusters and intensity correspondingly if(!is.null(DPP)){ tmp <- dppmFixIntensity(DPP, lambda, po) clusters <- tmp$clusters lambda <- tmp$lambda po <- tmp$po } mcfit <- clusterfit(X, clusters, lambda = lambda, dataname = Xname, control = control, statistic = statistic, statargs = statargs, algorithm=algorithm, ...) fitinfo <- attr(mcfit, "info") attr(mcfit, "info") <- NULL # all info that depends on the fitting method: Fit <- list(method = "mincon", statistic = statistic, Stat = fitinfo$Stat, StatFun = fitinfo$StatFun, StatName = fitinfo$StatName, FitFun = fitinfo$FitFun, statargs = statargs, mcfit = mcfit, maxlogcl = NULL) # results if(!is.null(DPP)){ clusters <- update(clusters, as.list(mcfit$par)) out <- list(Xname = Xname, X = X, stationary = stationary, fitted = clusters, po = po, Fit = Fit) } else{ out <- list(Xname = Xname, X = X, stationary = stationary, clusters = clusters, modelname = fitinfo$modelname, isPCP = fitinfo$isPCP, po = po, lambda = lambda, mu = mcfit$mu, par = mcfit$par, par.canon = mcfit$par.canon, clustpar = mcfit$clustpar, clustargs = mcfit$clustargs, modelpar = mcfit$modelpar, covmodel = mcfit$covmodel, Fit = Fit) } return(out) } clusterfit <- function(X, clusters, lambda = NULL, startpar = NULL, ..., q=1/4, p=2, rmin=NULL, rmax=NULL, ctrl=list(q=q, p=p, rmin=rmin, rmax=rmax), statistic = NULL, statargs = NULL, algorithm="Nelder-Mead", verbose=FALSE){ if(verbose) splat("Fitting cluster model") ## If possible get dataname from dots dataname <- list(...)$dataname ## Cluster info: info <- spatstatClusterModelInfo(clusters) if(verbose) splat("Retrieved cluster model information") ## Determine model type isPCP <- info$isPCP isDPP <- inherits(clusters, "detpointprocfamily") ## resolve algorithm parameters default.ctrl <- list(q=if(isDPP) 1/2 else 1/4, p=2, rmin=NULL, rmax=NULL) given.ctrl <- if(missing(ctrl)) list() else ctrl[names(default.ctrl)] given.args <- c(if(missing(q)) NULL else list(q=q), if(missing(p)) NULL else list(p=p), if(missing(rmin)) NULL else list(rmin=rmin), if(missing(rmax)) NULL else list(rmax=rmax)) ctrl <- resolve.defaults(given.args, given.ctrl, default.ctrl) if(verbose) { splat("Algorithm parameters:") print(ctrl) } ## if(inherits(X, "ppp")){ if(verbose) splat("Using point pattern data") if(is.null(dataname)) dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(is.null(statistic)) statistic <- "K" # Startpar: if(is.null(startpar)) startpar <- info$selfstart(X) stationary <- is.null(lambda) || (is.numeric(lambda) && length(lambda)==1) if(verbose) { splat("Starting parameters:") print(startpar) cat("Calculating summary function...") } # compute summary function if(stationary) { if(is.null(lambda)) lambda <- intensity(X) StatFun <- if(statistic == "K") "Kest" else "pcf" StatName <- if(statistic == "K") "K-function" else "pair correlation function" Stat <- do.call(StatFun, resolve.defaults(list(X=X), statargs, list(correction="best"))) } else { StatFun <- if(statistic == "K") "Kinhom" else "pcfinhom" StatName <- if(statistic == "K") "inhomogeneous K-function" else "inhomogeneous pair correlation function" Stat <- do.call(StatFun, resolve.defaults(list(X=X, lambda=lambda), statargs, list(correction="best"))) } if(verbose) splat("Done.") } else if(inherits(X, "fv")){ if(verbose) splat("Using the given summary function") Stat <- X ## Get statistic type stattype <- attr(Stat, "fname") StatFun <- paste0(stattype) StatName <- NULL if(is.null(statistic)){ if(is.null(stattype) || !is.element(stattype[1L], c("K", "pcf"))) stop("Cannot infer the type of summary statistic from argument ", sQuote("X"), " please specify this via argument ", sQuote("statistic")) statistic <- stattype[1L] } if(stattype[1L]!=statistic) stop("Statistic inferred from ", sQuote("X"), " not equal to supplied argument ", sQuote("statistic")) # Startpar: if(is.null(startpar)){ if(isDPP) stop("No rule for starting parameters in this case. Please set ", sQuote("startpar"), " explicitly.") startpar <- info$checkpar(startpar, old=FALSE) startpar[["scale"]] <- mean(range(Stat[[fvnames(Stat, ".x")]])) } } else{ stop("Unrecognised format for argument X") } ## avoid using g(0) as it may be infinite if(statistic=="pcf"){ if(verbose) splat("Checking g(0)") argu <- fvnames(Stat, ".x") rvals <- Stat[[argu]] if(rvals[1L] == 0 && (is.null(rmin) || rmin == 0)) { if(verbose) splat("Ignoring g(0)") rmin <- rvals[2L] } } ## DPP resolving algorithm and checking startpar changealgorithm <- length(startpar)==1 && algorithm=="Nelder-Mead" if(isDPP){ if(verbose) splat("Invoking dppmFixAlgorithm") alg <- dppmFixAlgorithm(algorithm, changealgorithm, clusters, startpar) algorithm <- alg$algorithm } dots <- info$resolvedots(...) # determine initial values of parameters startpar <- info$checkpar(startpar) # fit theoret <- info[[statistic]] desc <- paste("minimum contrast fit of", info$descname) #' ............ experimental ......................... do.adjust <- spatstat.options("kppm.adjusted") if(do.adjust) { if(verbose) splat("Applying kppm adjustment") W <- Window(X) adjdata <- list(paircorr = info[["pcf"]], pairWcdf = distcdf(W), tohuman = NULL) adjfun <- function(theo, par, auxdata, ...) { with(auxdata, { if(!is.null(tohuman)) par <- tohuman(par) a <- as.numeric(stieltjes(paircorr, pairWcdf, par=par, ...)) return(theo/a) }) } adjustment <- list(fun=adjfun, auxdata=adjdata) } else adjustment <- NULL #' ............ experimental ......................... usecanonical <- spatstat.options("kppm.canonical") if(usecanonical) { if(verbose) splat("Converting to canonical parameters") tocanonical <- info$tocanonical tohuman <- info$tohuman if(is.null(tocanonical) || is.null(tohuman)) { warning("Canonical parameters are not yet supported for this model") usecanonical <- FALSE } } startpar.human <- startpar if(usecanonical) { htheo <- theoret startpar <- tocanonical(startpar) theoret <- function(par, ...) { htheo(tohuman(par), ...) } if(do.adjust) adjustment$auxdata$tohuman <- tohuman } #' ................................................... mcargs <- resolve.defaults(list(observed=Stat, theoretical=theoret, startpar=startpar, ctrl=ctrl, method=algorithm, fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=statistic, modelname=info$modelname), margs=dots$margs, model=dots$model, funaux=info$funaux, adjustment=adjustment), list(...)) if(isDPP && algorithm=="Brent" && changealgorithm) mcargs <- resolve.defaults(mcargs, list(lower=alg$lower, upper=alg$upper)) if(verbose) splat("Starting minimum contrast fit") mcfit <- do.call(mincontrast, mcargs) if(verbose) splat("Returned from minimum contrast fit") # extract fitted parameters and reshape if(!usecanonical) { optpar.canon <- NULL optpar.human <- mcfit$par names(optpar.human) <- names(startpar.human) } else { optpar.canon <- mcfit$par names(optpar.canon) <- names(startpar) optpar.human <- tohuman(optpar.canon) names(optpar.human) <- names(startpar.human) } mcfit$par <- optpar.human mcfit$par.canon <- optpar.canon # Return results for DPPs if(isDPP){ extra <- list(Stat = Stat, StatFun = StatFun, StatName = StatName, modelname = info$modelabbrev, lambda = lambda) attr(mcfit, "info") <- extra if(verbose) splat("Returning from clusterfit (DPP case)") return(mcfit) } ## Extra stuff for ordinary cluster/lgcp models ## imbue with meaning ## infer model parameters mcfit$modelpar <- info$interpret(optpar.human, lambda) mcfit$internal <- list(model=ifelse(isPCP, clusters, "lgcp")) mcfit$covmodel <- dots$covmodel if(isPCP) { # Poisson cluster process: extract parent intensity kappa kappa <- mcfit$par[["kappa"]] # mu = mean cluster size mu <- lambda/kappa } else { # LGCP: extract variance parameter sigma2 sigma2 <- mcfit$par[["sigma2"]] # mu = mean of log intensity mu <- log(lambda) - sigma2/2 } ## Parameter values (new format) mcfit$mu <- mu mcfit$clustpar <- info$checkpar(mcfit$par, old=FALSE) mcfit$clustargs <- info$checkclustargs(dots$margs, old=FALSE) ## The old fit fun that would have been used (DO WE NEED THIS?) FitFun <- paste0(tolower(clusters), ".est", statistic) extra <- list(FitFun = FitFun, Stat = Stat, StatFun = StatFun, StatName = StatName, modelname = info$modelabbrev, isPCP = isPCP, lambda = lambda) attr(mcfit, "info") <- extra if(verbose) splat("Returning from clusterfit") return(mcfit) } kppmComLik <- function(X, Xname, po, clusters, control, weightfun, rmax, algorithm="Nelder-Mead", DPP=NULL, ...) { W <- as.owin(X) if(is.null(rmax)) rmax <- rmax.rule("K", W, intensity(X)) # identify pairs of points that contribute cl <- closepairs(X, rmax, what="ijd") # I <- cl$i # J <- cl$j dIJ <- cl$d # compute weights for pairs of points if(is.function(weightfun)) { wIJ <- weightfun(dIJ) sumweight <- sum(wIJ) } else { npairs <- length(dIJ) wIJ <- rep.int(1, npairs) sumweight <- npairs } # convert window to mask, saving other arguments for later dcm <- do.call.matched(as.mask, append(list(w=W), list(...)), sieve=TRUE) M <- dcm$result otherargs <- dcm$otherargs ## Detect DPP usage isDPP <- inherits(clusters, "detpointprocfamily") # compute intensity at pairs of data points # and c.d.f. of interpoint distance in window if(stationary <- is.stationary(po)) { # stationary unmarked Poisson process lambda <- intensity(X) # lambdaIJ <- lambda^2 # compute cdf of distance between two uniform random points in W g <- distcdf(W) # scaling constant is (area * intensity)^2 gscale <- npoints(X)^2 } else { # compute fitted intensity at data points and in window # lambdaX <- fitted(po, dataonly=TRUE) lambda <- lambdaM <- predict(po, locations=M) # lambda(x_i) * lambda(x_j) # lambdaIJ <- lambdaX[I] * lambdaX[J] # compute cdf of distance between two random points in W # with density proportional to intensity function g <- distcdf(M, dW=lambdaM) # scaling constant is (integral of intensity)^2 gscale <- integral.im(lambdaM)^2 } # Detect DPP model and change clusters and intensity correspondingly isDPP <- !is.null(DPP) if(isDPP){ tmp <- dppmFixIntensity(DPP, lambda, po) clusters <- tmp$clusters lambda <- tmp$lambda po <- tmp$po } # trim 'g' to [0, rmax] g <- g[with(g, .x) <= rmax,] # get pair correlation function (etc) for model info <- spatstatClusterModelInfo(clusters) pcfun <- info$pcf funaux <- info$funaux selfstart <- info$selfstart isPCP <- info$isPCP parhandler <- info$parhandler modelname <- info$modelname # Assemble information required for computing pair correlation pcfunargs <- list(funaux=funaux) if(is.function(parhandler)) { # Additional parameters of cluster model are required. # These may be given as individual arguments, # or in a list called 'covmodel' clustargs <- if("covmodel" %in% names(otherargs)) otherargs[["covmodel"]] else otherargs clargs <- do.call(parhandler, clustargs) pcfunargs <- append(clargs, pcfunargs) } else clargs <- NULL # determine starting parameter values startpar <- selfstart(X) #' ............ experimental ......................... usecanonical <- spatstat.options("kppm.canonical") if(usecanonical) { tocanonical <- info$tocanonical tohuman <- info$tohuman if(is.null(tocanonical) || is.null(tohuman)) { warning("Canonical parameters are not yet supported for this model") usecanonical <- FALSE } } startpar.human <- startpar if(usecanonical) { pcftheo <- pcfun startpar <- tocanonical(startpar) pcfun <- function(par, ...) { pcftheo(tohuman(par), ...) } } # ..................................................... # create local function to evaluate pair correlation # (with additional parameters 'pcfunargs' in its environment) paco <- function(d, par) { do.call(pcfun, append(list(par=par, rvals=d), pcfunargs)) } # define objective function if(!is.function(weightfun)) { # pack up necessary information objargs <- list(dIJ=dIJ, sumweight=sumweight, g=g, gscale=gscale, envir=environment(paco)) # define objective function (with 'paco' in its environment) # This is the log composite likelihood minus the constant term # sum(log(lambdaIJ)) - npairs * log(gscale) obj <- function(par, objargs) { with(objargs, 2*(sum(log(paco(dIJ, par))) - sumweight * log(unlist(stieltjes(paco, g, par=par)))), enclos=objargs$envir) } } else { # create local function to evaluate pair correlation(d) * weight(d) # (with additional parameters 'pcfunargs', 'weightfun' in its environment) force(weightfun) wpaco <- function(d, par) { y <- do.call(pcfun, append(list(par=par, rvals=d), pcfunargs)) w <- weightfun(d) return(y * w) } # pack up necessary information objargs <- list(dIJ=dIJ, wIJ=wIJ, sumweight=sumweight, g=g, gscale=gscale, envir=environment(wpaco)) # define objective function (with 'paco', 'wpaco' in its environment) # This is the log composite likelihood minus the constant term # sum(wIJ * log(lambdaIJ)) - sumweight * log(gscale) obj <- function(par, objargs) { with(objargs, 2*(sum(wIJ * log(paco(dIJ, par))) - sumweight * log(unlist(stieltjes(wpaco, g, par=par)))), enclos=objargs$envir) } } # arguments for optimization ctrl <- resolve.defaults(list(fnscale=-1), control, list(trace=0)) optargs <- list(par=startpar, fn=obj, objargs=objargs, control=ctrl, method=algorithm) ## DPP resolving algorithm and checking startpar changealgorithm <- length(startpar)==1 && algorithm=="Nelder-Mead" if(isDPP){ alg <- dppmFixAlgorithm(algorithm, changealgorithm, clusters, startpar.human) algorithm <- optargs$method <- alg$algorithm if(algorithm=="Brent" && changealgorithm){ optargs$lower <- alg$lower optargs$upper <- alg$upper } } # optimize it opt <- do.call(optim, optargs) # raise warning/error if something went wrong signalStatus(optimStatus(opt), errors.only=TRUE) # fitted parameters if(!usecanonical) { optpar.canon <- NULL optpar.human <- opt$par names(optpar.human) <- names(startpar.human) } else { optpar.canon <- opt$par names(optpar.canon) <- names(startpar) optpar.human <- tohuman(optpar.canon) names(optpar.human) <- names(startpar.human) } opt$par <- optpar.human opt$par.canon <- optpar.canon # Finish in DPP case if(!is.null(DPP)){ # all info that depends on the fitting method: Fit <- list(method = "clik2", clfit = opt, weightfun = weightfun, rmax = rmax, objfun = obj, objargs = objargs, maxlogcl = opt$value) # pack up clusters <- update(clusters, as.list(opt$par)) result <- list(Xname = Xname, X = X, stationary = stationary, fitted = clusters, modelname = modelname, po = po, lambda = lambda, Fit = Fit) return(result) } # meaningful model parameters modelpar <- info$interpret(optpar.human, lambda) # infer parameter 'mu' if(isPCP) { # Poisson cluster process: extract parent intensity kappa kappa <- optpar.human[["kappa"]] # mu = mean cluster size mu <- if(stationary) lambda/kappa else eval.im(lambda/kappa) } else { # LGCP: extract variance parameter sigma2 sigma2 <- optpar.human[["sigma2"]] # mu = mean of log intensity mu <- if(stationary) log(lambda) - sigma2/2 else eval.im(log(lambda) - sigma2/2) } # all info that depends on the fitting method: Fit <- list(method = "clik2", clfit = opt, weightfun = weightfun, rmax = rmax, objfun = obj, objargs = objargs, maxlogcl = opt$value) # pack up result <- list(Xname = Xname, X = X, stationary = stationary, clusters = clusters, modelname = modelname, isPCP = isPCP, po = po, lambda = lambda, mu = mu, par = optpar.human, par.canon = optpar.canon, clustpar = info$checkpar(par=optpar.human, old=FALSE), clustargs = info$checkclustargs(clargs$margs, old=FALSE), #clargs$margs, modelpar = modelpar, covmodel = clargs, Fit = Fit) return(result) } kppmPalmLik <- function(X, Xname, po, clusters, control, weightfun, rmax, algorithm="Nelder-Mead", DPP=NULL, ...) { W <- as.owin(X) if(is.null(rmax)) rmax <- rmax.rule("K", W, intensity(X)) # identify pairs of points that contribute cl <- closepairs(X, rmax) # I <- cl$i J <- cl$j dIJ <- cl$d # compute weights for pairs of points if(is.function(weightfun)) { wIJ <- weightfun(dIJ) # sumweight <- sum(wIJ) } else { npairs <- length(dIJ) wIJ <- rep.int(1, npairs) # sumweight <- npairs } # convert window to mask, saving other arguments for later dcm <- do.call.matched(as.mask, append(list(w=W), list(...)), sieve=TRUE) M <- dcm$result otherargs <- dcm$otherargs ## Detect DPP usage isDPP <- inherits(clusters, "detpointprocfamily") # compute intensity at data points # and c.d.f. of interpoint distance in window if(stationary <- is.stationary(po)) { # stationary unmarked Poisson process lambda <- intensity(X) lambdaJ <- rep(lambda, length(J)) # compute cdf of distance between a uniform random point in W # and a randomly-selected point in X g <- distcdf(X, M) # scaling constant is (integral of intensity) * (number of points) gscale <- npoints(X)^2 } else { # compute fitted intensity at data points and in window lambdaX <- fitted(po, dataonly=TRUE) lambda <- lambdaM <- predict(po, locations=M) lambdaJ <- lambdaX[J] # compute cdf of distance between a uniform random point in X # and a random point in W with density proportional to intensity function g <- distcdf(X, M, dV=lambdaM) # scaling constant is (integral of intensity) * (number of points) gscale <- integral.im(lambdaM) * npoints(X) } # Detect DPP model and change clusters and intensity correspondingly isDPP <- !is.null(DPP) if(isDPP){ tmp <- dppmFixIntensity(DPP, lambda, po) clusters <- tmp$clusters lambda <- tmp$lambda po <- tmp$po } # trim 'g' to [0, rmax] g <- g[with(g, .x) <= rmax,] # get pair correlation function (etc) for model info <- spatstatClusterModelInfo(clusters) pcfun <- info$pcf funaux <- info$funaux selfstart <- info$selfstart isPCP <- info$isPCP parhandler <- info$parhandler modelname <- info$modelname # Assemble information required for computing pair correlation pcfunargs <- list(funaux=funaux) if(is.function(parhandler)) { # Additional parameters of cluster model are required. # These may be given as individual arguments, # or in a list called 'covmodel' clustargs <- if("covmodel" %in% names(otherargs)) otherargs[["covmodel"]] else otherargs clargs <- do.call(parhandler, clustargs) pcfunargs <- append(clargs, pcfunargs) } else clargs <- NULL # determine starting parameter values startpar <- selfstart(X) #' ............ experimental ......................... usecanonical <- spatstat.options("kppm.canonical") if(usecanonical) { tocanonical <- info$tocanonical tohuman <- info$tohuman if(is.null(tocanonical) || is.null(tohuman)) { warning("Canonical parameters are not yet supported for this model") usecanonical <- FALSE } } startpar.human <- startpar if(usecanonical) { pcftheo <- pcfun startpar <- tocanonical(startpar) pcfun <- function(par, ...) { pcftheo(tohuman(par), ...) } } # ..................................................... # create local function to evaluate pair correlation # (with additional parameters 'pcfunargs' in its environment) paco <- function(d, par) { do.call(pcfun, append(list(par=par, rvals=d), pcfunargs)) } # define objective function if(!is.function(weightfun)) { # pack up necessary information objargs <- list(dIJ=dIJ, g=g, gscale=gscale, sumloglam=sum(log(lambdaJ)), envir=environment(paco)) # define objective function (with 'paco' in its environment) # This is the log Palm likelihood obj <- function(par, objargs) { with(objargs, sumloglam + sum(log(paco(dIJ, par))) - gscale * unlist(stieltjes(paco, g, par=par)), enclos=objargs$envir) } } else { # create local function to evaluate pair correlation(d) * weight(d) # (with additional parameters 'pcfunargs', 'weightfun' in its environment) force(weightfun) wpaco <- function(d, par) { y <- do.call(pcfun, append(list(par=par, rvals=d), pcfunargs)) w <- weightfun(d) return(y * w) } # pack up necessary information objargs <- list(dIJ=dIJ, wIJ=wIJ, g=g, gscale=gscale, wsumloglam=sum(wIJ * log(lambdaJ)), envir=environment(wpaco)) # define objective function (with 'paco', 'wpaco' in its environment) # This is the log Palm likelihood obj <- function(par, objargs) { with(objargs, wsumloglam + sum(wIJ * log(paco(dIJ, par))) - gscale * unlist(stieltjes(wpaco, g, par=par)), enclos=objargs$envir) } } # arguments for optimization ctrl <- resolve.defaults(list(fnscale=-1), control, list(trace=0)) optargs <- list(par=startpar, fn=obj, objargs=objargs, control=ctrl, method=algorithm) ## DPP resolving algorithm and checking startpar changealgorithm <- length(startpar)==1 && algorithm=="Nelder-Mead" if(isDPP){ alg <- dppmFixAlgorithm(algorithm, changealgorithm, clusters, startpar.human) algorithm <- optargs$method <- alg$algorithm if(algorithm=="Brent" && changealgorithm){ optargs$lower <- alg$lower optargs$upper <- alg$upper } } # optimize it opt <- do.call(optim, optargs) # raise warning/error if something went wrong signalStatus(optimStatus(opt), errors.only=TRUE) # Extract optimal values of parameters if(!usecanonical) { optpar.canon <- NULL optpar.human <- opt$par names(optpar.human) <- names(startpar.human) } else { optpar.canon <- opt$par names(optpar.canon) <- names(startpar) optpar.human <- tohuman(optpar.canon) names(optpar.human) <- names(startpar.human) } # Finish in DPP case if(!is.null(DPP)){ opt$par <- optpar.human opt$par.canon <- optpar.canon # all info that depends on the fitting method: Fit <- list(method = "palm", clfit = opt, weightfun = weightfun, rmax = rmax, objfun = obj, objargs = objargs, maxlogcl = opt$value) # pack up clusters <- update(clusters, as.list(optpar.human)) result <- list(Xname = Xname, X = X, stationary = stationary, fitted = clusters, modelname = modelname, po = po, lambda = lambda, Fit = Fit) return(result) } # meaningful model parameters modelpar <- info$interpret(optpar.human, lambda) # infer parameter 'mu' if(isPCP) { # Poisson cluster process: extract parent intensity kappa kappa <- optpar.human[["kappa"]] # mu = mean cluster size mu <- if(stationary) lambda/kappa else eval.im(lambda/kappa) } else { # LGCP: extract variance parameter sigma2 sigma2 <- optpar.human[["sigma2"]] # mu = mean of log intensity mu <- if(stationary) log(lambda) - sigma2/2 else eval.im(log(lambda) - sigma2/2) } # all info that depends on the fitting method: Fit <- list(method = "palm", clfit = opt, weightfun = weightfun, rmax = rmax, objfun = obj, objargs = objargs, maxlogcl = opt$value) # pack up result <- list(Xname = Xname, X = X, stationary = stationary, clusters = clusters, modelname = modelname, isPCP = isPCP, po = po, lambda = lambda, mu = mu, par = optpar.human, par.canon = optpar.canon, clustpar = info$checkpar(par=optpar.human, old=FALSE), clustargs = info$checkclustargs(clargs$margs, old=FALSE), #clargs$margs, modelpar = modelpar, covmodel = clargs, Fit = Fit) return(result) } improve.kppm <- local({ fnc <- function(r, eps, g){ (g(r) - 1)/(g(0) - 1) - eps} improve.kppm <- function(object, type=c("quasi", "wclik1", "clik1"), rmax = NULL, eps.rmax = 0.01, dimyx = 50, maxIter = 100, tolerance = 1e-06, fast = TRUE, vcov = FALSE, fast.vcov = FALSE, verbose = FALSE, save.internals = FALSE) { verifyclass(object, "kppm") type <- match.arg(type) gfun <- pcfmodel(object) X <- object$X win <- as.owin(X) ## simple (rectangular) grid quadrature scheme ## (using pixels with centers inside owin only) mask <- as.mask(win, dimyx = dimyx) wt <- pixellate(win, W = mask) wt <- wt[mask] Uxy <- rasterxy.mask(mask) U <- ppp(Uxy$x, Uxy$y, window = win, check=FALSE) U <- U[mask] # nU <- npoints(U) Yu <- pixellate(X, W = mask) Yu <- Yu[mask] ## covariates at quadrature points po <- object$po Z <- model.images(po, mask) Z <- sapply(Z, "[", i=U) ##obtain initial beta estimate using composite likelihood beta0 <- coef(po) ## determining the dependence range if (type != "clik1" && is.null(rmax)) { diamwin <- diameter(win) rmax <- if(fnc(diamwin, eps.rmax, gfun) >= 0) diamwin else uniroot(fnc, lower = 0, upper = diameter(win), eps=eps.rmax, g=gfun)$root if(verbose) splat(paste0("type: ", type, ", ", "dependence range: ", rmax, ", ", "dimyx: ", dimyx, ", g(0) - 1:", gfun(0) -1)) } ## preparing the WCL case if (type == "wclik1") Kmax <- 2*pi * integrate(function(r){r * (gfun(r) - 1)}, lower=0, upper=rmax)$value * exp(c(Z %*% beta0)) ## the g()-1 matrix without tapering if (!fast || (vcov && !fast.vcov)){ if (verbose) cat("computing the g(u_i,u_j)-1 matrix ...") gminus1 <- matrix(gfun(c(pairdist(U))) - 1, U$n, U$n) if (verbose) cat("..Done.\n") } if ( (fast && type == "quasi") | fast.vcov ){ if (verbose) cat("computing the sparse G-1 matrix ...\n") ## Non-zero gminus1 entries (when using tapering) cp <- crosspairs(U,U,rmax,what="ijd") if (verbose) cat("crosspairs done\n") Gtap <- (gfun(cp$d) - 1) if(vcov){ if(fast.vcov){ gminus1 <- Matrix::sparseMatrix(i=cp$i, j=cp$j, x=Gtap, dims=c(U$n, U$n)) } else{ if(fast) gminus1 <- matrix(gfun(c(pairdist(U))) - 1, U$n, U$n) } } if (verbose & type!="quasi") cat("..Done.\n") } if (type == "quasi" && fast){ mu0 <- exp(c(Z %*% beta0)) * wt mu0root <- sqrt(mu0) sparseG <- Matrix::sparseMatrix(i=cp$i, j=cp$j, x=mu0root[cp$i] * mu0root[cp$j] * Gtap, dims=c(U$n, U$n)) Rroot <- Matrix::Cholesky(sparseG, perm = TRUE, Imult = 1) ##Imult=1 means that we add 1*I if (verbose) cat("..Done.\n") } ## iterative weighted least squares/Fisher scoring bt <- beta0 noItr <- 1 repeat { mu <- exp(c(Z %*% bt)) * wt mu.root <- sqrt(mu) ## the core of estimating equation: ff=phi ## in case of ql, \phi=V^{-1}D=V_\mu^{-1/2}x where (G+I)x=V_\mu^{1/2} Z ff <- switch(type, clik1 = Z, wclik1= Z/(1 + Kmax), quasi = if(fast){ Matrix::solve(Rroot, mu.root * Z)/mu.root } else{ solve(diag(U$n) + t(gminus1 * mu), Z) } ) ##alternative ##R=chol(sparseG+sparseMatrix(i=c(1:U$n),j=c(1:U$n), ## x=rep(1,U$n),dims=c(U$n,U$n))) ##ff2 <- switch(type, ## clik1 = Z, ## wclik1= Z/(1 + Kmax), ## quasi = if (fast) ## solve(R,solve(t(R), mu.root * Z))/mu.root ## else solve(diag(U$n) + t(gminus1 * mu), Z)) ## print(summary(as.numeric(ff)-as.numeric(ff2))) ## the estimating equation: u_f(\beta) uf <- (Yu - mu) %*% ff ## inverse of minus expectation of Jacobian matrix: I_f Jinv <- solve(t(Z * mu) %*% ff) if(maxIter==0){ ## This is a built-in early exit for vcov internal calculations break } deltabt <- as.numeric(uf %*% Jinv) if (any(!is.finite(deltabt))) { warning(paste("Infinite value, NA or NaN appeared", "in the iterative weighted least squares algorithm.", "Returning the initial intensity estimate unchanged."), call.=FALSE) return(object) } ## updating the present estimate of \beta bt <- bt + deltabt if (verbose) splat(paste0("itr: ", noItr, ",\nu_f: ", as.numeric(uf), "\nbeta:", bt, "\ndeltabeta:", deltabt)) if (max(abs(deltabt/bt)) <= tolerance || max(abs(uf)) <= tolerance) break if (noItr > maxIter) stop("Maximum number of iterations reached without convergence.") noItr <- noItr + 1 } out <- object out$po$coef.orig <- beta0 out$po$coef <- bt loc <- if(is.sob(out$lambda)) as.mask(out$lambda) else mask out$lambda <- predict(out$po, locations = loc) out$improve <- list(type = type, rmax = rmax, dimyx = dimyx, fast = fast, fast.vcov = fast.vcov) if(save.internals){ out$improve <- append(out$improve, list(ff=ff, uf=uf, J.inv=Jinv)) } if(vcov){ if (verbose) cat("computing the asymptotic variance ...\n") ## variance of the estimation equation: Sigma_f = Var(u_f(bt)) trans <- if(fast) Matrix::t else t Sig <- trans(ff) %*% (ff * mu) + trans(ff * mu) %*% gminus1 %*% (ff * mu) ## note Abdollah's G does not have mu.root inside... ## the asymptotic variance of \beta: ## inverse of the Godambe information matrix out$vcov <- as.matrix(Jinv %*% Sig %*% Jinv) } return(out) } improve.kppm }) is.kppm <- function(x) { inherits(x, "kppm")} print.kppm <- print.dppm <- function(x, ...) { isPCP <- x$isPCP # detect DPP isDPP <- inherits(x, "dppm") # handle outdated objects - which were all cluster processes if(!isDPP && is.null(isPCP)) isPCP <- TRUE terselevel <- spatstat.options('terse') digits <- getOption('digits') splat(if(x$stationary) "Stationary" else "Inhomogeneous", if(isDPP) "determinantal" else if(isPCP) "cluster" else "Cox", "point process model") Xname <- x$Xname if(waxlyrical('extras', terselevel) && nchar(Xname) < 20) { has.subset <- ("subset" %in% names(x$call)) splat("Fitted to", if(has.subset) "(a subset of)" else NULL, "point pattern dataset", sQuote(Xname)) } if(waxlyrical('gory', terselevel)) { switch(x$Fit$method, mincon = { splat("Fitted by minimum contrast") splat("\tSummary statistic:", x$Fit$StatName) }, clik =, clik2 = { splat("Fitted by maximum second order composite likelihood") splat("\trmax =", x$Fit$rmax) if(!is.null(wtf <- x$Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } }, palm = { splat("Fitted by maximum Palm likelihood") splat("\trmax =", x$Fit$rmax) if(!is.null(wtf <- x$Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } }, warning(paste("Unrecognised fitting method", sQuote(x$Fit$method))) ) } parbreak(terselevel) # ............... trend ......................... if(!(isDPP && is.null(x$fitted$intensity))) print(x$po, what="trend") # ..................... clusters ................ # DPP case if(isDPP){ splat("Fitted DPP model:") print(x$fitted) return(invisible(NULL)) } tableentry <- spatstatClusterModelInfo(x$clusters) splat(if(isPCP) "Cluster" else "Cox", "model:", tableentry$printmodelname(x)) cm <- x$covmodel if(!isPCP) { # Covariance model - LGCP only splat("\tCovariance model:", cm$model) margs <- cm$margs if(!is.null(margs)) { nama <- names(margs) tags <- ifelse(nzchar(nama), paste(nama, "="), "") tagvalue <- paste(tags, margs) splat("\tCovariance parameters:", paste(tagvalue, collapse=", ")) } } pc <- x$par.canon if(!is.null(pc)) { splat("Fitted canonical parameters:") print(pc, digits=digits) } pa <- x$clustpar if (!is.null(pa)) { splat("Fitted", if(isPCP) "cluster" else "covariance", "parameters:") print(pa, digits=digits) } if(!is.null(mu <- x$mu)) { if(isPCP) { splat("Mean cluster size: ", if(!is.im(mu)) paste(signif(mu, digits), "points") else "[pixel image]") } else { splat("Fitted mean of log of random intensity:", if(!is.im(mu)) signif(mu, digits) else "[pixel image]") } } if(isDPP) { rx <- repul(x) splat(if(is.im(rx)) "(Average) strength" else "Strength", "of repulsion:", signif(mean(rx), 4)) } invisible(NULL) } plot.kppm <- local({ plotem <- function(x, ..., main=dmain, dmain) { plot(x, ..., main=main) } plot.kppm <- function(x, ..., what=c("intensity", "statistic", "cluster"), pause=interactive(), xname) { ## catch objectname from dots if present otherwise deparse x: if(missing(xname)) xname <- short.deparse(substitute(x)) nochoice <- missing(what) what <- pickoption("plot type", what, c(statistic="statistic", intensity="intensity", cluster="cluster"), multi=TRUE) ## handle older objects Fit <- x$Fit if(is.null(Fit)) { warning("kppm object is in outdated format") Fit <- x Fit$method <- "mincon" } ## Catch locations for clusters if given loc <- list(...)$locations inappropriate <- (nochoice & ((what == "intensity") & (x$stationary))) | ((what == "statistic") & (Fit$method != "mincon")) | ((what == "cluster") & (identical(x$isPCP, FALSE))) | ((what == "cluster") & (!x$stationary) & is.null(loc)) if(!nochoice && !x$stationary && "cluster" %in% what && is.null(loc)) stop("Please specify additional argument ", sQuote("locations"), " which will be passed to the function ", sQuote("clusterfield"), ".") if(any(inappropriate)) { what <- what[!inappropriate] if(length(what) == 0){ message("Nothing meaningful to plot. Exiting...") return(invisible(NULL)) } } pause <- pause && (length(what) > 1) if(pause) opa <- par(ask=TRUE) for(style in what) switch(style, intensity={ plotem(x$po, ..., dmain=c(xname, "Intensity"), how="image", se=FALSE) }, statistic={ plotem(Fit$mcfit, ..., dmain=c(xname, Fit$StatName)) }, cluster={ plotem(clusterfield(x, locations = loc, verbose=FALSE), ..., dmain=c(xname, "Fitted cluster")) }) if(pause) par(opa) return(invisible(NULL)) } plot.kppm }) predict.kppm <- predict.dppm <- function(object, ...) { se <- resolve.1.default(list(se=FALSE), list(...)) interval <- resolve.1.default(list(interval="none"), list(...)) if(se) warning("Standard error calculation assumes a Poisson process") if(interval != "none") warning(paste(interval, "interval calculation assumes a Poisson process")) predict(as.ppm(object), ...) } fitted.kppm <- fitted.dppm <- function(object, ...) { fitted(as.ppm(object), ...) } residuals.kppm <- residuals.dppm <- function(object, ...) { type <- resolve.1.default(list(type="raw"), list(...)) if(type != "raw") warning(paste("calculation of", type, "residuals", "assumes a Poisson process")) residuals(as.ppm(object), ...) } simulate.kppm <- function(object, nsim=1, seed=NULL, ..., window=NULL, covariates=NULL, verbose=TRUE, retry=10, drop=FALSE) { starttime <- proc.time() verbose <- verbose && (nsim > 1) check.1.real(retry) # .... copied from simulate.lm .... if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } # .................................. # determine window for simulation results if(!is.null(window)) { stopifnot(is.owin(window)) win <- window } else { win <- as.owin(object) } # .................................. # determine parameters mp <- as.list(object$modelpar) # parameter 'mu' # = parent intensity of cluster process # = mean log intensity of log-Gaussian Cox process if(is.null(covariates) && (object$stationary || is.null(window))) { # use existing 'mu' (scalar or image) mu <- object$mu } else { # recompute 'mu' using new data switch(object$clusters, Cauchy=, VarGamma=, Thomas=, MatClust={ # Poisson cluster process kappa <- mp$kappa lambda <- predict(object, window=win, covariates=covariates) mu <- eval.im(lambda/kappa) }, LGCP={ # log-Gaussian Cox process sigma2 <- mp$sigma2 lambda <- predict(object, window=win, covariates=covariates) mu <- eval.im(log(lambda) - sigma2/2) }, stop(paste("Simulation of", sQuote(object$clusters), "processes is not yet implemented")) ) } # prepare data for execution out <- list() switch(object$clusters, Thomas={ kappa <- mp$kappa sigma <- mp$sigma cmd <- expression(rThomas(kappa,sigma,mu,win, ...)) dont.complain.about(kappa, sigma, mu) }, MatClust={ kappa <- mp$kappa r <- mp$R cmd <- expression(rMatClust(kappa,r,mu,win, ...)) dont.complain.about(kappa, r) }, Cauchy = { kappa <- mp$kappa omega <- mp$omega cmd <- expression(rCauchy(kappa, omega, mu, win, ...)) dont.complain.about(kappa, omega, mu) }, VarGamma = { kappa <- mp$kappa omega <- mp$omega nu.ker <- object$covmodel$margs$nu.ker cmd <- expression(rVarGamma(kappa, nu.ker, omega, mu, win, ...)) dont.complain.about(kappa, nu.ker, omega, mu) }, LGCP={ sigma2 <- mp$sigma2 alpha <- mp$alpha cm <- object$covmodel model <- cm$model margs <- cm$margs param <- append(list(var=sigma2, scale=alpha), margs) #' if(!is.im(mu)) { # model will be simulated in 'win' cmd <- expression(rLGCP(model=model, mu=mu, param=param, ..., win=win)) #' check that RandomFields package recognises parameter format rfmod <- try(rLGCP(model, mu=mu, param=param, win=win, ..., modelonly=TRUE)) } else { # model will be simulated in as.owin(mu), then change window cmd <- expression(rLGCP(model=model, mu=mu, param=param, ...)[win]) #' check that RandomFields package recognises parameter format rfmod <- try(rLGCP(model, mu=mu, param=param, ..., modelonly=TRUE)) } #' suppress warnings from code checker dont.complain.about(model, mu, param) #' check that model is recognised if(inherits(rfmod, "try-error")) stop(paste("Internal error in simulate.kppm:", "unable to build Random Fields model", "for log-Gaussian Cox process")) }) # run if(verbose) { cat(paste("Generating", nsim, "simulations... ")) state <- list() } for(i in 1:nsim) { out[[i]] <- try(eval(cmd)) if(verbose) state <- progressreport(i, nsim, state=state) } # detect failures if(any(bad <- unlist(lapply(out, inherits, what="try-error")))) { nbad <- sum(bad) gripe <- paste(nbad, ngettext(nbad, "simulation was", "simulations were"), "unsuccessful") if(verbose) splat(gripe) if(retry <= 0) { fate <- "returned as NULL" out[bad] <- list(NULL) } else { if(verbose) cat("Retrying...") ntried <- 0 while(ntried < retry) { ntried <- ntried + 1 for(j in which(bad)) out[[j]] <- try(eval(cmd)) bad <- unlist(lapply(out, inherits, what="try-error")) nbad <- sum(bad) if(nbad == 0) break } if(verbose) cat("Done.\n") fate <- if(nbad == 0) "all recomputed" else paste(nbad, "simulations still unsuccessful") fate <- paste(fate, "after", ntried, ngettext(ntried, "further try", "further tries")) } warning(paste(gripe, fate, sep=": ")) } if(verbose) cat("Done.\n") #' pack up out <- simulationresult(out, nsim, drop) out <- timed(out, starttime=starttime) attr(out, "seed") <- RNGstate return(out) } formula.kppm <- formula.dppm <- function(x, ...) { formula(x$po, ...) } terms.kppm <- terms.dppm <- function(x, ...) { terms(x$po, ...) } labels.kppm <- labels.dppm <- function(object, ...) { labels(object$po, ...) } update.kppm <- function(object, ..., evaluate=TRUE) { argh <- list(...) nama <- names(argh) callframe <- object$callframe envir <- environment(terms(object)) #' look for a formula argument fmla <- formula(object) jf <- integer(0) if(!is.null(trend <- argh$trend)) { if(!can.be.formula(trend)) stop("Argument \"trend\" should be a formula") fmla <- newformula(formula(object), trend, callframe, envir) jf <- which(nama == "trend") } else if(any(isfo <- sapply(argh, can.be.formula))) { if(sum(isfo) > 1) { if(!is.null(nama)) isfo <- isfo & nzchar(nama) if(sum(isfo) > 1) stop(paste("Arguments not understood:", "there are two unnamed formula arguments")) } jf <- which(isfo) fmla <- argh[[jf]] fmla <- newformula(formula(object), fmla, callframe, envir) } #' look for a point pattern or quadscheme if(!is.null(X <- argh$X)) { if(!inherits(X, c("ppp", "quad"))) stop(paste("Argument X should be a formula,", "a point pattern or a quadrature scheme")) jX <- which(nama == "X") } else if(any(ispp <- sapply(argh, inherits, what=c("ppp", "quad")))) { if(sum(ispp) > 1) { if(!is.null(nama)) ispp <- ispp & nzchar(nama) if(sum(ispp) > 1) stop(paste("Arguments not understood:", "there are two unnamed point pattern/quadscheme arguments")) } jX <- which(ispp) X <- argh[[jX]] } else { X <- object$X jX <- integer(0) } Xexpr <- if(length(jX) > 0) sys.call()[[2L + jX]] else NULL #' remove arguments just recognised, if any jused <- c(jf, jX) if(length(jused) > 0) { argh <- argh[-jused] nama <- names(argh) } #' update the matched call thecall <- getCall(object) methodname <- as.character(thecall[[1L]]) switch(methodname, kppm.formula = { # original call has X = [formula with lhs] if(!is.null(Xexpr)) { lhs.of.formula(fmla) <- Xexpr } else if(is.null(lhs.of.formula(fmla))) { lhs.of.formula(fmla) <- as.name('.') } oldformula <- as.formula(getCall(object)$X) thecall$X <- newformula(oldformula, fmla, callframe, envir) }, { # original call has X = ppp and trend = [formula without lhs] oldformula <- as.formula(getCall(object)$trend %orifnull% (~1)) fom <- newformula(oldformula, fmla, callframe, envir) if(!is.null(Xexpr)) lhs.of.formula(fom) <- Xexpr if(is.null(lhs.of.formula(fom))) { # new call has same format thecall$trend <- fom if(length(jX) > 0) thecall$X <- X } else { # new call has formula with lhs thecall$trend <- NULL thecall$X <- fom } }) knownnames <- unique(c(names(formals(kppm.ppp)), names(formals(mincontrast)), names(formals(optim)))) knownnames <- setdiff(knownnames, c("X", "trend", "observed", "theoretical", "fn", "gr", "...")) ok <- nama %in% knownnames thecall <- replace(thecall, nama[ok], argh[ok]) thecall$formula <- NULL # artefact of 'step', etc thecall[[1L]] <- as.name("kppm") if(!evaluate) return(thecall) out <- eval(thecall, envir=parent.frame(), enclos=envir) #' update name of data if(length(jX) == 1) { mc <- match.call() Xlang <- mc[[2L+jX]] out$Xname <- short.deparse(Xlang) } #' return(out) } unitname.kppm <- unitname.dppm <- function(x) { return(unitname(x$X)) } "unitname<-.kppm" <- "unitname<-.dppm" <- function(x, value) { unitname(x$X) <- value if(!is.null(x$Fit$mcfit)) { unitname(x$Fit$mcfit) <- value } else if(is.null(x$Fit)) { warning("kppm object in outdated format") if(!is.null(x$mcfit)) unitname(x$mcfit) <- value } return(x) } as.fv.kppm <- as.fv.dppm <- function(x) { if(x$Fit$method == "mincon") return(as.fv(x$Fit$mcfit)) gobs <- pcfinhom(x$X, lambda=x, correction="good", update=FALSE) gfit <- (pcfmodel(x))(gobs$r) g <- bind.fv(gobs, data.frame(fit=gfit), "%s[fit](r)", "predicted %s for fitted model") return(g) } coef.kppm <- coef.dppm <- function(object, ...) { return(coef(object$po)) } Kmodel.kppm <- function(model, ...) { Kpcf.kppm(model, what="K") } pcfmodel.kppm <- function(model, ...) { Kpcf.kppm(model, what="pcf") } Kpcf.kppm <- function(model, what=c("K", "pcf", "kernel")) { what <- match.arg(what) # Extract function definition from internal table clusters <- model$clusters tableentry <- spatstatClusterModelInfo(clusters) if(is.null(tableentry)) stop("No information available for", sQuote(clusters), "cluster model") fun <- tableentry[[what]] if(is.null(fun)) stop("No expression available for", what, "for", sQuote(clusters), "cluster model") # Extract model parameters par <- model$par # Extract auxiliary definitions (if applicable) funaux <- tableentry$funaux # Extract covariance model (if applicable) cm <- model$covmodel model <- cm$model margs <- cm$margs # f <- function(r) as.numeric(fun(par=par, rvals=r, funaux=funaux, model=model, margs=margs)) return(f) } is.stationary.kppm <- is.stationary.dppm <- function(x) { return(x$stationary) } is.poisson.kppm <- function(x) { switch(x$clusters, Cauchy=, VarGamma=, Thomas=, MatClust={ # Poisson cluster process mu <- x$mu return(!is.null(mu) && (max(mu) == 0)) }, LGCP = { # log-Gaussian Cox process sigma2 <- x$par[["sigma2"]] return(sigma2 == 0) }, return(FALSE)) } # extract ppm component as.ppm.kppm <- as.ppm.dppm <- function(object) { object$po } # other methods that pass through to 'ppm' as.owin.kppm <- as.owin.dppm <- function(W, ..., from=c("points", "covariates"), fatal=TRUE) { from <- match.arg(from) as.owin(as.ppm(W), ..., from=from, fatal=fatal) } domain.kppm <- Window.kppm <- domain.dppm <- Window.dppm <- function(X, ..., from=c("points", "covariates")) { from <- match.arg(from) as.owin(X, from=from) } model.images.kppm <- model.images.dppm <- function(object, W=as.owin(object), ...) { model.images(as.ppm(object), W=W, ...) } model.matrix.kppm <- model.matrix.dppm <- function(object, data=model.frame(object, na.action=NULL), ..., Q=NULL, keepNA=TRUE) { if(missing(data)) data <- NULL model.matrix(as.ppm(object), data=data, ..., Q=Q, keepNA=keepNA) } model.frame.kppm <- model.frame.dppm <- function(formula, ...) { model.frame(as.ppm(formula), ...) } logLik.kppm <- logLik.dppm <- function(object, ...) { cl <- object$Fit$maxlogcl if(is.null(cl)) stop(paste("logLik is only available for kppm objects fitted with", "method='palm' or method='clik2'"), call.=FALSE) ll <- logLik(as.ppm(object)) # to inherit class and d.f. ll[] <- cl return(ll) } AIC.kppm <- AIC.dppm <- function(object, ..., k=2) { cl <- logLik(object) df <- attr(cl, "df") return(- 2 * as.numeric(cl) + k * df) } extractAIC.kppm <- extractAIC.dppm <- function (fit, scale = 0, k = 2, ...) { cl <- logLik(fit) edf <- attr(cl, "df") aic <- - 2 * as.numeric(cl) + k * edf return(c(edf, aic)) } nobs.kppm <- nobs.dppm <- function(object, ...) { nobs(as.ppm(object)) } psib <- function(object) UseMethod("psib") psib.kppm <- function(object) { clus <- object$clusters info <- spatstatClusterModelInfo(clus) if(!info$isPCP) { warning("The model is not a cluster process") return(NA) } g <- pcfmodel(object) p <- 1 - 1/g(0) return(p) } spatstat/R/rho2hat.R0000644000176200001440000002265513611503715014024 0ustar liggesusers# # rho2hat.R # # Relative risk for pairs of covariate values # # $Revision: 1.25 $ $Date: 2016/07/15 10:21:12 $ # rho2hat <- function(object, cov1, cov2, ..., method=c("ratio", "reweight")) { cov1name <- short.deparse(substitute(cov1)) cov2name <- short.deparse(substitute(cov2)) callstring <- short.deparse(sys.call()) method <- match.arg(method) # validate model if(is.ppp(object) || is.quad(object)) { model <- ppm(object, ~1, forcefit=TRUE) reference <- "area" modelcall <- NULL } else if(is.ppm(object)) { model <- object reference <- "model" modelcall <- model$call if(is.null(getglmfit(model))) model <- update(model, forcefit=TRUE) } else stop("object should be a point pattern or a point process model") # interpret string "x" or "y" as a coordinate function getxyfun <- function(s) { switch(s, x = { function(x,y) { x } }, y = { function(x,y) { y } }, stop(paste("Unrecognised covariate name", sQuote(s)))) } if(is.character(cov1) && length(cov1) == 1) { cov1name <- cov1 cov1 <- getxyfun(cov1name) } if(is.character(cov2) && length(cov2) == 1) { cov2name <- cov2 cov2 <- getxyfun(cov2name) } if( (cov1name == "x" && cov2name == "y") || (cov1name == "y" && cov2name == "x")) { # spatial relative risk isxy <- TRUE needflip <- (cov1name == "y" && cov2name == "x") X <- data.ppm(model) if(needflip) X <- flipxy(X) switch(method, ratio = { # ratio of smoothed intensity estimates den <- density(X, ...) sigma <- attr(den, "sigma") varcov <- attr(den, "varcov") W <- as.owin(den) if(!needflip) { lambda <- predict(model, locations=W) } else { lambda <- flipxy(predict(model, locations=flipxy(W))) } rslt <- switch(reference, area = { den }, model = { lam <- blur(lambda, sigma=sigma, varcov=varcov, normalise=TRUE) eval.im(den/lam) }) }, reweight = { # smoothed point pattern with weights = 1/reference W <- do.call.matched(as.mask, append(list(w=as.owin(X)), list(...))) if(!needflip) { lambda <- predict(model, locations=W) } else { lambda <- flipxy(predict(model, locations=flipxy(W))) } gstarX <- switch(reference, area = { rep.int(area(W), npoints(X)) }, model = { lambda[X] }) rslt <- density(X, ..., weights=1/gstarX) sigma <- attr(rslt, "sigma") varcov <- attr(rslt, "varcov") }) Z12points <- X r1 <- W$xrange r2 <- W$yrange lambda <- lambda[] } else { # general case isxy <- FALSE # harmonise covariates if(is.function(cov1) && is.im(cov2)) { cov1 <- as.im(cov1, W=cov2) } else if(is.im(cov1) && is.function(cov2)) { cov2 <- as.im(cov2, W=cov1) } # evaluate each covariate at data points and at pixels stuff1 <- evalCovar(model, cov1) stuff2 <- evalCovar(model, cov2) # unpack values1 <- stuff1$values values2 <- stuff2$values # covariate values at each data point Z1X <- values1$ZX Z2X <- values2$ZX # covariate values at each pixel Z1values <- values1$Zvalues Z2values <- values2$Zvalues # model intensity lambda <- values1$lambda # ranges of each covariate r1 <- range(Z1X, Z1values, finite=TRUE) r2 <- range(Z2X, Z2values, finite=TRUE) scal <- function(x, r) { (x - r[1])/diff(r) } # scatterplot coordinates Z12points <- ppp(scal(Z1X, r1), scal(Z2X, r2), c(0,1), c(0,1)) Z12pixels <- ppp(scal(Z1values, r1), scal(Z2values, r2), c(0,1), c(0,1)) # normalising constants # nX <- length(Z1X) npixel <- length(lambda) areaW <- area(Window(model)) pixelarea <- areaW/npixel baseline <- if(reference == "area") rep.int(1, npixel) else lambda wts <- baseline * pixelarea switch(method, ratio = { # estimate intensities fhat <- density(Z12points, ...) sigma <- attr(fhat, "sigma") varcov <- attr(fhat, "varcov") ghat <- do.call(density.ppp, resolve.defaults(list(Z12pixels, weights=wts), list(...), list(sigma=sigma, varcov=varcov))) # compute ratio of smoothed densities rslt <- eval.im(fhat/ghat) }, reweight = { # compute smoothed intensity with weight = 1/reference ghat <- density(Z12pixels, weights=wts, ...) rslt <- density(Z12points, weights=1/ghat[Z12points], ...) sigma <- attr(rslt, "sigma") varcov <- attr(rslt, "varcov") }) } # add scale and label info attr(rslt, "stuff") <- list(isxy=isxy, cov1=cov1, cov2=cov2, cov1name=cov1name, cov2name=cov2name, r1=r1, r2=r2, reference=reference, lambda=lambda, modelcall=modelcall, callstring=callstring, Z12points=Z12points, sigma=sigma, varcov=varcov) class(rslt) <- c("rho2hat", class(rslt)) rslt } plot.rho2hat <- function(x, ..., do.points=FALSE) { xname <- short.deparse(substitute(x)) s <- attr(x, "stuff") # resolve "..." arguments rd <- resolve.defaults(list(...), list(add=FALSE, axes=!s$isxy, xlab=s$cov1name, ylab=s$cov2name)) # plot image plotparams <- graphicsPars("plot") do.call.matched(plot.im, resolve.defaults(list(x=x, axes=FALSE), list(...), list(main=xname, ribargs=list(axes=TRUE))), extrargs=c(plotparams, "add", "zlim", "breaks")) # add axes if(rd$axes) { axisparams <- graphicsPars("axis") Axis <- function(..., extrargs=axisparams) { do.call.matched(graphics::axis, resolve.defaults(list(...)), extrargs=extrargs) } if(s$isxy) { # for (x,y) plots the image is at the correct physical scale xr <- x$xrange yr <- x$yrange spak <- 0.05 * max(diff(xr), diff(yr)) Axis(side=1, ..., at=pretty(xr), pos=yr[1] - spak) Axis(side=2, ..., at=pretty(yr), pos=xr[1] - spak) } else { # for other plots the image was scaled to the unit square rx <- s$r1 ry <- s$r2 px <- pretty(rx) py <- pretty(ry) Axis(side=1, labels=px, at=(px - rx[1])/diff(rx), ...) Axis(side=2, labels=py, at=(py - ry[1])/diff(ry), ...) } title(xlab=rd$xlab) title(ylab=rd$ylab) } if(do.points) { do.call.matched(plot.ppp, resolve.defaults(list(x=s$Z12points, add=TRUE), list(...)), extrargs=c("pch", "col", "cols", "bg", "cex", "lwd", "lty")) } invisible(NULL) } print.rho2hat <- function(x, ...) { s <- attr(x, "stuff") cat("Scatterplot intensity estimate (class rho2hat)\n") cat(paste("for the covariates", s$cov1name, "and", s$cov2name, "\n")) switch(s$reference, area=cat("Function values are absolute intensities\n"), model={ cat("Function values are relative to fitted model\n") print(s$modelcall) }) cat(paste("Call:", s$callstring, "\n")) if(s$isxy) { cat("Obtained by spatial smoothing of original data\n") cat("Smoothing parameters used by density.ppp:\n") } else { cat("Obtained by transforming to the unit square and smoothing\n") cat("Smoothing parameters (on unit square) used by density.ppp:\n") } if(!is.null(s$sigma)) cat(paste("\tsigma = ", signif(s$sigma, 5), "\n")) if(!is.null(s$varcov)) { cat("\tvarcov =\n") ; print(s$varcov) } cat("Intensity values:\n") NextMethod("print") } predict.rho2hat <- function(object, ..., relative=FALSE) { if(length(list(...)) > 0) warning("Additional arguments ignored in predict.rho2hat") # extract info s <- attr(object, "stuff") reference <- s$reference #' extract images of covariate Z1 <- s$cov1 Z2 <- s$cov2 if(!is.im(Z1)) Z1 <- as.im(Z1, Window(object)) if(!is.im(Z2)) Z2 <- as.im(Z2, Window(object)) #' rescale to [0,1] Z1 <- scaletointerval(Z1, xrange=s$r1) Z2 <- scaletointerval(Z2, xrange=s$r2) # extract pairs of covariate values ZZ <- pairs(Z1, Z2, plot=FALSE) # apply rho to Z YY <- safelookup(object, ppp(ZZ[,1], ZZ[,2], c(0,1), c(0,1)), warn=FALSE) # reform as image Y <- Z1 Y[] <- YY # adjust to reference baseline if(!(relative || reference == "area")) { lambda <- s$lambda Y <- Y * lambda } return(Y) } spatstat/R/dist2dpath.R0000644000176200001440000000415313333543254014517 0ustar liggesusers# # dist2dpath.R # # $Revision: 1.10 $ $Date: 2017/06/05 10:31:58 $ # # dist2dpath compute shortest path distances # dist2dpath <- function(dist, method="C") { ## given a matrix of distances between adjacent vertices ## (value = Inf if not adjacent) ## compute the matrix of shortest path distances stopifnot(is.matrix(dist) && isSymmetric(dist)) stopifnot(all(diag(dist) == 0)) findist <- dist[is.finite(dist)] if(any(findist < 0)) stop("Some distances are negative") ## n <- nrow(dist) if(n <= 1L) return(dist) cols <- col(dist) ## tol <- .Machine$double.eps posdist <- findist[findist > 0] if(length(posdist) > 0) { shortest <- min(posdist) tol2 <- shortest/max(n,1024) tol <- max(tol, tol2) } ## switch(method, interpreted={ dpathnew <- dpath <- dist changed <- TRUE while(changed) { for(j in 1:n) dpathnew[,j] <- apply(dpath + dist[j,][cols], 1L, min) unequal <- (dpathnew != dpath) changed <- any(unequal) & any(abs(dpathnew-dpath)[unequal] > tol) dpath <- dpathnew } }, C={ adj <- is.finite(dist) diag(adj) <- TRUE d <- dist d[!adj] <- -1 z <- .C("Ddist2dpath", nv=as.integer(n), d=as.double(d), adj=as.integer(adj), dpath=as.double(numeric(n*n)), tol=as.double(tol), niter=as.integer(integer(1L)), status=as.integer(integer(1L)), PACKAGE = "spatstat") if(z$status == -1L) warning(paste("C algorithm did not converge to tolerance", tol, "after", z$niter, "iterations", "on", n, "vertices and", sum(adj) - n, "edges")) dpath <- matrix(z$dpath, n, n) ## value=-1 implies unreachable dpath[dpath < 0] <- Inf }, stop(paste("Unrecognised method", sQuote(method)))) return(dpath) } spatstat/R/plot.owin.R0000644000176200001440000002543113622670134014403 0ustar liggesusers# # plot.owin.S # # The 'plot' method for observation windows (class "owin") # # $Revision: 1.60 $ $Date: 2020/02/18 03:33:02 $ # # # plot.owin <- function(x, main, add=FALSE, ..., box, edge=0.04, type = c("w", "n"), show.all=!add, hatch=FALSE, hatchargs=list(), invert=FALSE, do.plot=TRUE, claim.title.space=FALSE, use.polypath=TRUE) { # # Function plot.owin. A method for plot. # if(missing(main)) main <- short.deparse(substitute(x)) W <- x verifyclass(W, "owin") if(!do.plot) return(invisible(as.rectangle(W))) type <- match.arg(type) if(missing(box) || is.null(box)) { box <- is.mask(W) && show.all } else stopifnot(is.logical(box) && length(box) == 1) #### pt <- prepareTitle(main) main <- pt$main nlines <- pt$nlines ######### xlim <- xr <- W$xrange ylim <- yr <- W$yrange #################################################### ## graphics parameters that can be overridden by user gparam <- resolve.defaults(list(...), par()) ## character expansion factors ## main title size = 'cex.main' * par(cex.main) * par(cex) ## user's graphics expansion factor (*multiplies* par) cex.main.user <- resolve.1.default(list(cex.main=1), list(...)) ## size of main title as multiple of par('cex') cex.main.rela <- cex.main.user * par('cex.main') ## absolute size cex.main.absol <- cex.main.rela * par('cex') if(!add) { ## new plot if(claim.title.space && nlines > 0) { ## allow space for main title (only in multi-panel plots) guesslinespace <- 0.07 * sqrt(diff(xr)^2 + diff(yr)^2) * cex.main.absol added <- (nlines + 1) * guesslinespace ylim[2] <- ylim[2] + added } ## set up plot with equal scales do.call.plotfun(plot.default, resolve.defaults(list(x=numeric(0), y=numeric(0), type="n"), list(...), list(xlim=xlim, ylim=ylim, ann=FALSE, axes=FALSE, asp=1.0, xaxs="i", yaxs="i", xlab="", ylab=""), .MatchNull=FALSE)) } if(show.all && nlines > 0) { ## add title if(claim.title.space) { mainheight <- sum(strheight(main, units="user", cex=cex.main.rela)) gapheight <- (strheight("b\nb", units="user", cex=cex.main.rela) - 2 * strheight("b", units="user", cex=cex.main.rela)) if(nlines > 1 && !is.expression(main)) main <- paste(main, collapse="\n") text(x=mean(xr), y=yr[2] + mainheight + 0.5 * gapheight, labels=main, cex=cex.main.rela, col=gparam$col.main, font=gparam$font.main) } else { title(main=main, cex=cex.main.rela, col=gparam$col.main, font=gparam$font.main) } } # Draw surrounding box if(box) do.call.plotfun(segments, resolve.defaults( list(x0=xr[c(1,2,2,1)], y0=yr[c(1,1,2,2)], x1=xr[c(2,2,1,1)], y1=yr[c(1,2,2,1)]), list(...))) # If type = "n", do not plot the window. if(type == "n") return(invisible(as.rectangle(W))) # Draw window switch(W$type, rectangle = { Wpoly <- as.polygonal(W) po <- Wpoly$bdry[[1]] do.call.plotfun(polygon, resolve.defaults(list(x=po), list(...)), extrargs="lwd") if(hatch) do.call(add.texture, append(list(W=W), hatchargs)) }, polygonal = { p <- W$bdry # Determine whether user wants to fill the interior col.poly <- resolve.defaults(list(...), list(col=NA))$col den.poly <- resolve.defaults(list(...), list(density=NULL))$density no.fill <- is.null(den.poly) && (is.null(col.poly) || is.na(col.poly)) # Determine whether we need to triangulate the interior. # If it is required to fill the interior, # this can be done directly using polygon() provided # there are no holes. Otherwise we must triangulate the interior. if(no.fill) triangulate <- FALSE else { # Determine whether there are any holes holes <- unlist(lapply(p, is.hole.xypolygon)) triangulate <- any(holes) } if(!triangulate) { # No triangulation required; # simply plot the polygons for(i in seq_along(p)) do.call.plotfun(polygon, resolve.defaults( list(x=p[[i]]), list(...)), extrargs="lwd") } else { # Try using polypath(): if(use.polypath && !(names(dev.cur()) %in% c("xfig","pictex","X11"))) { ppa <- owin2polypath(W) do.call.plotfun(polypath, resolve.defaults(ppa, list(border=col.poly), list(...))) } else { # decompose window into simply-connected pieces broken <- try(break.holes(W)) if(inherits(broken, "try-error")) { warning("Unable to plot filled polygons") } else { # Fill pieces with colour (and draw border in same colour) pp <- broken$bdry for(i in seq_len(length(pp))) do.call.plotfun(polygon, resolve.defaults(list(x=pp[[i]], border=col.poly), list(...))) } } # Now draw polygon boundaries for(i in seq_along(p)) do.call.plotfun(polygon, resolve.defaults( list(x=p[[i]]), list(density=0, col=NA), list(...)), extrargs="lwd") } if(hatch) do.call(add.texture, append(list(W=W), hatchargs)) }, mask = { # capture 'col' argument and ensure it's at least 2 values coldefault <- c(par("bg"), par("fg")) col <- resolve.defaults( list(...), spatstat.options("par.binary"), list(col=coldefault) )$col if(length(col) == 1) { col <- unique(c(par("bg"), col)) if(length(col) == 1) col <- c(par("fg"), col) } ## invert colours? if(invert) col <- rev(col) ## convert to greyscale? if(spatstat.options("monochrome")) col <- to.grey(col) do.call.matched(image.default, resolve.defaults( list(x=W$xcol, y=W$yrow, z=t(W$m), add=TRUE), list(col=col), list(...), spatstat.options("par.binary"), list(zlim=c(FALSE, TRUE)))) if(hatch) do.call(add.texture, append(list(W=W), hatchargs)) }, stop(paste("Don't know how to plot window of type", sQuote(W$type))) ) return(invisible(as.rectangle(W))) } break.holes <- local({ insect <- function(A, Box) { ## efficient version of intersect.owin which doesn't 'fix' the polygons a <- lapply(A$bdry, reverse.xypolygon) b <- lapply(as.polygonal(Box)$bdry, reverse.xypolygon) ab <- polyclip::polyclip(a, b, "intersection", fillA="nonzero", fillB="nonzero") if(length(ab)==0) return(emptywindow(Box)) # ensure correct polarity totarea <- sum(unlist(lapply(ab, Area.xypolygon))) if(totarea < 0) ab <- lapply(ab, reverse.xypolygon) AB <- owin(Box$xrange, Box$yrange, poly=ab, check=FALSE, strict=FALSE, fix=FALSE, unitname=unitname(A)) return(AB) } break.holes <- function(x, splitby=NULL, depth=0, maxdepth=100) { if(is.null(splitby)) { ## first call: validate x stopifnot(is.owin(x)) splitby <- "x" } if(depth > maxdepth) stop("Unable to divide window into simply-connected pieces") p <- x$bdry holes <- unlist(lapply(p, is.hole.xypolygon)) if(!any(holes)) return(x) nholes <- sum(holes) maxdepth <- max(maxdepth, 4 * nholes) i <- min(which(holes)) p.i <- p[[i]] b <- as.rectangle(x) xr <- b$xrange yr <- b$yrange switch(splitby, x = { xsplit <- mean(range(p.i$x)) left <- c(xr[1], xsplit) right <- c(xsplit, xr[2]) xleft <- insect(x, owin(left, yr)) xright <- insect(x, owin(right, yr)) ## recurse xleft <- break.holes(xleft, splitby="y", depth=depth+1, maxdepth=maxdepth) xright <- break.holes(xright, splitby="y", depth=depth+1, maxdepth=maxdepth) ## recombine (without fusing polygons again!) result <- owin(xr, yr, poly=c(xleft$bdry, xright$bdry), check=FALSE, strict=FALSE, fix=FALSE) }, y = { ysplit <- mean(range(p.i$y)) lower <- c(yr[1], ysplit) upper <- c(ysplit, yr[2]) xlower <- insect(x, owin(xr, lower)) xupper <- insect(x, owin(xr, upper)) ## recurse xlower <- break.holes(xlower, splitby="x", depth=depth+1, maxdepth=maxdepth) xupper <- break.holes(xupper, splitby="x", depth=depth+1, maxdepth=maxdepth) ## recombine (without fusing polygons again!) result <- owin(xr, yr, poly=c(xlower$bdry, xupper$bdry), check=FALSE, strict=FALSE, fix=FALSE) }) return(result) } break.holes }) spatstat/R/adaptive.density.R0000644000176200001440000000053113426455675015734 0ustar liggesusers#' #' adaptive.density.R #' #' $Revision: 1.1 $ $Date: 2019/02/06 03:22:51 $ #' adaptive.density <- function(X, ..., method=c("voronoi", "kernel")) { method <- match.arg(method) result <- switch(method, voronoi = densityVoronoi(X, ...), kernel = densityAdaptiveKernel(X, ...)) return(result) } spatstat/R/quantiledensity.R0000644000176200001440000000513313333543255015673 0ustar liggesusers#' #' quantiledensity.R #' #' quantile method for class 'density' #' #' Also a CDF from a 'density' #' #' $Revision: 1.3 $ $Date: 2015/09/01 11:53:15 $ quantile.density <- local({ quantile.density <- function(x, probs = seq(0, 1, 0.25), names = TRUE, ..., warn=TRUE) { stopifnot(inherits(x, "density")) #' check whether density estimate was restricted to an interval if(warn && is.call(cl <- x$call) && any(c("from", "to") %in% names(cl))) warning(paste("Density was normalised within the computed range", "of x values", prange(c(cl$from, cl$to))), call.=FALSE) #' validate probs eps <- 100 * .Machine$double.eps if(any((p.ok <- !is.na(probs)) & (probs < -eps | probs > 1 + eps))) stop("'probs' outside [0,1]") if (na.p <- any(!p.ok)) { o.pr <- probs probs <- probs[p.ok] probs <- pmax(0, pmin(1, probs)) } np <- length(probs) qs <- rep(NA_real_, np) if (np > 0) { #' extract density values xx <- x$x yy <- x$y nn <- length(xx) #' integrate, normalise Fx <- cumsum(yy * c(0, diff(xx))) Fx <- Fx/Fx[nn] #' quantile for(j in 1:np) { ii <- min(which(Fx >= probs[j])) if(!is.na(ii) && ii >= 1 && ii <= nn) qs[j] <- xx[ii] } if (names && np > 0L) { names(qs) <- format_perc(probs) } } if (na.p) { o.pr[p.ok] <- qs names(o.pr) <- rep("", length(o.pr)) names(o.pr)[p.ok] <- names(qs) return(o.pr) } else return(qs) } format_perc <- function (x, digits = max(2L, getOption("digits")), probability = TRUE, use.fC = length(x) < 100, ...) { if (length(x)) { if (probability) x <- 100 * x paste0(if (use.fC) formatC(x, format = "fg", width = 1, digits = digits) else format(x, trim = TRUE, digits = digits, ...), "%") } else character(0) } quantile.density }) CDF <- function(f, ...) { UseMethod("CDF") } CDF.density <- function(f, ..., warn=TRUE) { stopifnot(inherits(f, "density")) #' check whether density estimate was restricted to an interval if(warn && is.call(cl <- f$call) && any(c("from", "to") %in% names(cl))) warning(paste("Density was normalised within the computed range", "of x values", prange(c(cl$from, cl$to))), call.=FALSE) #' integrate xx <- f$x yy <- f$y nn <- length(xx) Fx <- cumsum(yy * c(0, diff(xx))) #' normalise Fx <- Fx/Fx[nn] #' FF <- approxfun(xx, Fx, method="linear", rule=2) return(FF) } spatstat/R/circdensity.R0000644000176200001440000000275513333543254014777 0ustar liggesusers#' #' circdensity.R #' #' Kernel smoothing for circular data #' #' $Revision: 1.3 $ $Date: 2014/12/04 06:49:20 $ circdensity <- function(x, sigma="nrd0", ..., bw=NULL, weights=NULL, unit=c("degree", "radian")) { xname <- short.deparse(substitute(x)) missu <- missing(unit) if(missing(sigma) && !is.null(bw)) sigma <- bw unit <- match.arg(unit) unit <- validate.angles(x, unit, missu) FullCircle <- switch(unit, degree = 360, radian = 2*pi) if(is.character(sigma)) { sigma <- switch(sigma, bcv = bw.bcv, nrd = bw.nrd, nrd0 = bw.nrd0, SJ = bw.SJ, ucv = bw.ucv, get(paste0("bw.", sigma), mode="function")) } if(is.function(sigma)) { sigma <- sigma(x) if(!(is.numeric(sigma) && length(sigma) == 1L && sigma > 0)) stop("Bandwidth selector should return a single positive number") } check.1.real(sigma) #' replicate data x <- x %% FullCircle xx <- c(x - FullCircle, x, x + FullCircle) #' replicate weights if(!is.null(weights)) { stopifnot(length(weights) == length(x)) weights <- rep(weights, 3)/3 } #' smooth z <- do.call(density.default, resolve.defaults(list(x=xx, bw=sigma, weights=weights), list(...), list(from=0, to=FullCircle))) z$y <- 3 * z$y z$data.name <- xname return(z) } spatstat/R/unnormdensity.R0000644000176200001440000000453613333543255015375 0ustar liggesusers# # unnormdensity.R # # $Revision: 1.5 $ $Date: 2016/02/11 10:17:12 $ # unnormdensity <- function(x, ..., weights=NULL) { if(any(!nzchar(names(list(...))))) stop("All arguments must be named (tag=value)") if(is.null(weights)) { out <- do.call.matched(density.default, c(list(x=x), list(...))) out$y <- length(x) * out$y } else if(all(weights == 0)) { # result is zero out <- do.call.matched(density.default, c(list(x=x), list(...))) out$y <- 0 * out$y } else if(all(weights >= 0)) { # all masses are nonnegative w <- weights totmass <- sum(w) out <- do.call.matched(density.default, c(list(x=x), list(...), list(weights=w/totmass))) out$y <- out$y * totmass } else if(all(weights <= 0)) { # all masses are nonpositive w <- (- weights) totmass <- sum(w) out <- do.call.matched(density.default, c(list(x=x), list(...), list(weights=w/totmass))) out$y <- out$y * (- totmass) } else { # mixture of positive and negative masses w <- weights wabs <- abs(w) wpos <- pmax.int(0, w) wneg <- - pmin.int(0, w) # determine bandwidth using absolute masses dabs <- do.call.matched(density.default, c(list(x=x), list(...), list(weights=wabs/sum(wabs)))) bw <- dabs$bw # compute densities for positive and negative masses separately outpos <- do.call.matched(density.default, resolve.defaults(list(x=x), list(bw=bw, adjust=1), list(weights=wpos/sum(wpos)), list(...), .StripNull=TRUE)) outneg <- do.call.matched(density.default, resolve.defaults(list(x=x), list(bw=bw, adjust=1), list(weights=wneg/sum(wneg)), list(...), .StripNull=TRUE)) # combine out <- outpos out$y <- sum(wpos) * outpos$y - sum(wneg) * outneg$y } out$call <- match.call() return(out) } spatstat/R/plot.fv.R0000644000176200001440000006576313333543255014060 0ustar liggesusers# # plot.fv.R (was: conspire.S) # # $Revision: 1.129 $ $Date: 2017/12/30 05:01:31 $ # # # conspire <- function(...) { # .Deprecated("plot.fv", package="spatstat") # plot.fv(...) # } plot.fv <- local({ hasonlyone <- function(x, amongst) { sum(all.vars(parse(text=x)) %in% amongst) == 1 } extendifvector <- function(a, n, nmore) { if(is.null(a)) return(a) if(length(a) == 1) return(a) return(c(a, rep(a[1], nmore))) } fixit <- function(a, n, a0, a00) { # 'a' is formal argument # 'a0' and 'a00' are default and fallback default # 'n' is number of values required if(is.null(a)) a <- if(!is.null(a0)) a0 else a00 if(length(a) == 1) return(rep.int(a, n)) else if(length(a) != n) stop(paste("Length of", short.deparse(substitute(a)), "does not match number of curves to be plotted")) else return(a) } pow10 <- function(x) { 10^x } clip.to.usr <- function() { usr <- par('usr') clip(usr[1], usr[2], usr[3], usr[4]) } plot.fv <- function(x, fmla, ..., subset=NULL, lty=NULL, col=NULL, lwd=NULL, xlim=NULL, ylim=NULL, xlab=NULL, ylab=NULL, ylim.covers=NULL, legend=!add, legendpos="topleft", legendavoid=missing(legendpos), legendmath=TRUE, legendargs=list(), shade=fvnames(x, ".s"), shadecol="grey", add=FALSE, log="", mathfont=c("italic", "plain", "bold", "bolditalic"), limitsonly=FALSE) { xname <- if(is.language(substitute(x))) short.deparse(substitute(x)) else "" force(legendavoid) if(is.null(legend)) legend <- !add mathfont <- match.arg(mathfont) verifyclass(x, "fv") env.user <- parent.frame() indata <- as.data.frame(x) xlogscale <- (log %in% c("x", "xy", "yx")) ylogscale <- (log %in% c("y", "xy", "yx")) ## ---------------- determine plot formula ---------------- defaultplot <- missing(fmla) || is.null(fmla) if(defaultplot) fmla <- formula(x) ## This *is* the last possible moment, so... fmla <- as.formula(fmla, env=env.user) ## validate the variable names vars <- variablesinformula(fmla) reserved <- c(".", ".x", ".y", ".a", ".s") external <- !(vars %in% c(colnames(x), reserved)) if(any(external)) { sought <- vars[external] found <- unlist(lapply(sought, exists, envir=env.user, mode="numeric")) if(any(!found)) { nnot <- sum(!found) stop(paste(ngettext(nnot, "Variable", "Variables"), commasep(sQuote(sought[!found])), ngettext(nnot, "was", "were"), "not found")) } else { ## validate the found variables externvars <- lapply(sought, get, envir=env.user) isnum <- sapply(externvars, is.numeric) len <- lengths(externvars) ok <- isnum & (len == 1 | len == nrow(x)) if(!all(ok)) { nnot <- sum(!ok) stop(paste(ngettext(nnot, "Variable", "Variables"), commasep(sQuote(sought[!ok])), ngettext(nnot, "is", "are"), "not of the right format")) } } } ## Extract left hand side as given # lhs.original <- fmla[[2]] fmla.original <- fmla ## expand "." dotnames <- fvnames(x, ".") starnames <- fvnames(x, "*") umap <- fvexprmap(x) fmla <- eval(substitute(substitute(fom, um), list(fom=fmla, um=umap))) ## ------------------- extract data for plot --------------------- ## extract LHS and RHS of formula lhs <- fmla[[2]] rhs <- fmla[[3]] ## extract data lhsdata <- eval(lhs, envir=indata) rhsdata <- eval(rhs, envir=indata) ## reformat if(is.vector(lhsdata)) { lhsdata <- matrix(lhsdata, ncol=1) lhsvars <- all.vars(as.expression(lhs)) lhsvars <- lhsvars[lhsvars %in% names(x)] colnames(lhsdata) <- if(length(lhsvars) == 1) lhsvars else if(length(starnames) == 1 && (starnames %in% lhsvars)) starnames else paste(deparse(lhs), collapse="") } ## check lhs names exist lnames <- colnames(lhsdata) nc <- ncol(lhsdata) lnames0 <- paste("V", seq_len(nc), sep="") if(length(lnames) != nc) colnames(lhsdata) <- lnames0 else if(any(uhoh <- !nzchar(lnames))) colnames(lhsdata)[uhoh] <- lnames0[uhoh] lhs.names <- colnames(lhsdata) ## check whether each lhs column is associated with a single column of 'x' ## that is one of the alternative versions of the function. ## This may be unreliable, as it depends on the ## column names assigned to lhsdata by eval() one.star <- unlist(lapply(lhs.names, hasonlyone, amongst=fvnames(x, "*"))) one.dot <- unlist(lapply(lhs.names, hasonlyone, amongst=dotnames)) explicit.lhs.names <- ifelse(one.star, lhs.names, "") explicit.lhs.dotnames <- ifelse(one.star & one.dot, lhs.names, "") ## check rhs data if(is.matrix(rhsdata)) stop("rhs of formula should yield a vector") rhsdata <- as.numeric(rhsdata) nplots <- ncol(lhsdata) allind <- 1:nplots ## ---------- extra plots may be implied by 'shade' ----------------- extrashadevars <- NULL if(!is.null(shade)) { ## select columns by name or number names(allind) <- explicit.lhs.names shind <- try(allind[shade]) if(inherits(shind, "try-error")) stop(paste("The argument shade should be a valid subset index", "for columns of x"), call.=FALSE) if(any(nbg <- is.na(shind))) { ## columns not included in formula: add them morelhs <- try(as.matrix(indata[ , shade[nbg], drop=FALSE])) if(inherits(morelhs, "try-error")) stop(paste("The argument shade should be a valid subset index", "for columns of x"), call.=FALSE) nmore <- ncol(morelhs) extrashadevars <- colnames(morelhs) if(defaultplot) { success <- TRUE } else if("." %in% variablesinformula(fmla.original)) { ## evaluate lhs of formula, expanding "." to shade names u <- if(length(extrashadevars) == 1) as.name(extrashadevars) else { as.call(lapply(c("cbind", extrashadevars), as.name)) } ux <- as.name(fvnames(x, ".x")) uy <- as.name(fvnames(x, ".y")) foo <- eval(substitute(substitute(fom, list(.=u, .x=ux, .y=uy)), list(fom=fmla.original))) dont.complain.about(u, ux, uy) lhsnew <- foo[[2]] morelhs <- eval(lhsnew, envir=indata) success <- identical(colnames(morelhs), extrashadevars) } else if(is.name(lhs) && as.character(lhs) %in% names(indata)) { ## lhs is the name of a single column in x ## expand the LHS explicit.lhs.names <- c(explicit.lhs.names, extrashadevars) ff <- paste("cbind", paren(paste(explicit.lhs.names, collapse=", ")), "~ 1") lhs <- lhs.of.formula(as.formula(ff)) success <- TRUE } else if(length(explicit.lhs.dotnames) > 1) { ## lhs = cbind(...) where ... are dotnames cbound <- paste0("cbind", paren(paste(explicit.lhs.dotnames, collapse=", "))) if(identical(deparse(lhs), cbound)) { success <- TRUE explicit.lhs.names <- union(explicit.lhs.names, extrashadevars) ff <- paste("cbind", paren(paste(explicit.lhs.names, collapse=", ")), "~ 1") lhs <- lhs.of.formula(as.formula(ff)) } else success <- FALSE } else success <- FALSE if(success) { ## add these columns to the plotting data lhsdata <- cbind(lhsdata, morelhs) shind[nbg] <- nplots + seq_len(nmore) lty <- extendifvector(lty, nplots, nmore) col <- extendifvector(col, nplots, nmore) lwd <- extendifvector(lwd, nplots, nmore) nplots <- nplots + nmore ## update the names one.star <- unlist(lapply(explicit.lhs.names, hasonlyone, amongst=fvnames(x, "*"))) one.dot <- unlist(lapply(explicit.lhs.names, hasonlyone, amongst=dotnames)) explicit.lhs.names <- ifelse(one.star, explicit.lhs.names, "") explicit.lhs.dotnames <- ifelse(one.star & one.dot, explicit.lhs.names, "") } else { ## cannot add columns warning(paste("Shade", ngettext(sum(nbg), "column", "columns"), commasep(sQuote(shade[nbg])), "were missing from the plot formula, and were omitted")) shade <- NULL extrashadevars <- NULL } } } ## -------------------- determine plotting limits ---------------------- ## restrict data to subset if desired if(!is.null(subset)) { keep <- if(is.character(subset)) { eval(parse(text=subset), envir=indata) } else eval(subset, envir=indata) lhsdata <- lhsdata[keep, , drop=FALSE] rhsdata <- rhsdata[keep] } ## determine x and y limits and clip data to these limits if(is.null(xlim) && add) { ## x limits are determined by existing plot xlim <- par("usr")[1:2] } if(!is.null(xlim)) { ok <- !is.finite(rhsdata) | (xlim[1] <= rhsdata & rhsdata <= xlim[2]) rhsdata <- rhsdata[ok] lhsdata <- lhsdata[ok, , drop=FALSE] } else { ## if we're using the default argument, use its recommended range if(rhs == fvnames(x, ".x")) { xlim <- attr(x, "alim") %orifnull% range(as.vector(rhsdata), finite=TRUE) if(xlogscale && xlim[1] <= 0) xlim[1] <- min(rhsdata[is.finite(rhsdata) & rhsdata > 0], na.rm=TRUE) ok <- !is.finite(rhsdata) | (rhsdata >= xlim[1] & rhsdata <= xlim[2]) rhsdata <- rhsdata[ok] lhsdata <- lhsdata[ok, , drop=FALSE] } else { ## actual range of values to be plotted if(xlogscale) { ok <- is.finite(rhsdata) & (rhsdata > 0) & matrowany(lhsdata > 0) xlim <- range(rhsdata[ok]) } else { xlim <- range(rhsdata, na.rm=TRUE) } } } if(is.null(ylim)) { yok <- is.finite(lhsdata) if(ylogscale) yok <- yok & (lhsdata > 0) ylim <- range(lhsdata[yok],na.rm=TRUE) } if(!is.null(ylim.covers)) ylim <- range(ylim, ylim.covers) ## return x, y limits only? if(limitsonly) return(list(xlim=xlim, ylim=ylim)) ## ------------- work out how to label the plot -------------------- ## extract plot labels, substituting function name labl <- fvlabels(x, expand=TRUE) ## create plot label map (key -> algebraic expression) map <- fvlabelmap(x) ## ......... label for x axis .................. if(is.null(xlab)) { argname <- fvnames(x, ".x") if(as.character(fmla)[3] == argname) { ## The x axis variable is the default function argument. ArgString <- fvlabels(x, expand=TRUE)[[argname]] xexpr <- parse(text=ArgString) ## use specified font xexpr <- fontify(xexpr, mathfont) ## Add name of unit of length? ax <- summary(unitname(x))$axis if(is.null(ax)) { xlab <- xexpr } else { xlab <- expression(VAR ~ COMMENT) xlab[[1]][[2]] <- xexpr[[1]] xlab[[1]][[3]] <- ax } } else { ## map ident to label xlab <- eval(substitute(substitute(rh, mp), list(rh=rhs, mp=map))) ## use specified font xlab <- fontify(xlab, mathfont) } } if(is.language(xlab) && !is.expression(xlab)) xlab <- as.expression(xlab) ## ......... label for y axis ................... leftside <- lhs if(ncol(lhsdata) > 1 || length(dotnames) == 1) { ## For labelling purposes only, simplify the LHS by ## replacing 'cbind(.....)' by '.' ## even if not all columns are included. leftside <- paste(as.expression(leftside)) eln <- explicit.lhs.dotnames eln <- eln[nzchar(eln)] cb <- if(length(eln) == 1) eln else { paste("cbind(", paste(eln, collapse=", "), ")", sep="") } compactleftside <- gsub(cb, ".", leftside, fixed=TRUE) ## Separately expand "." to cbind(.....) ## and ".x", ".y" to their real names dotdot <- c(dotnames, extrashadevars) cball <- if(length(dotdot) == 1) dotdot else { paste("cbind(", paste(dotdot, collapse=", "), ")", sep="") } expandleftside <- gsub(".x", fvnames(x, ".x"), leftside, fixed=TRUE) expandleftside <- gsub(".y", fvnames(x, ".y"), expandleftside, fixed=TRUE) expandleftside <- gsubdot(cball, expandleftside) ## convert back to language compactleftside <- parse(text=compactleftside)[[1]] expandleftside <- parse(text=expandleftside)[[1]] } else { compactleftside <- expandleftside <- leftside } ## construct label for y axis if(is.null(ylab)) { yl <- attr(x, "yexp") if(defaultplot && !is.null(yl)) { ylab <- yl } else { ## replace "." and short identifiers by plot labels ylab <- eval(substitute(substitute(le, mp), list(le=compactleftside, mp=map))) } } if(is.language(ylab)) { ## use specified font ylab <- fontify(ylab, mathfont) ## ensure it's an expression if(!is.expression(ylab)) ylab <- as.expression(ylab) } ## ------------------ start plotting --------------------------- ## create new plot if(!add) do.call(plot.default, resolve.defaults(list(xlim, ylim, type="n", log=log), list(xlab=xlab, ylab=ylab), list(...), list(main=xname))) ## handle 'type' = "n" giventype <- resolve.defaults(list(...), list(type=NA))$type if(identical(giventype, "n")) return(invisible(NULL)) ## process lty, col, lwd arguments opt0 <- spatstat.options("par.fv") lty <- fixit(lty, nplots, opt0$lty, 1:nplots) col <- fixit(col, nplots, opt0$col, 1:nplots) lwd <- fixit(lwd, nplots, opt0$lwd, 1) ## convert to greyscale? if(spatstat.options("monochrome")) col <- to.grey(col) if(!is.null(shade)) { ## shade region between critical boundaries ## extract relevant columns for shaded bands shdata <- lhsdata[, shind] if(!is.matrix(shdata) || ncol(shdata) != 2) stop("The argument shade should select two columns of x") ## truncate infinite values to plot limits if(any(isinf <- is.infinite(shdata))) { if(is.null(ylim)) { warning("Unable to truncate infinite values to the plot area") } else { shdata[isinf & (shdata == Inf)] <- ylim[2] shdata[isinf & (shdata == -Inf)] <- ylim[1] } } ## determine limits of shading shdata1 <- shdata[,1] shdata2 <- shdata[,2] ## plot grey polygon xpoly <- c(rhsdata, rev(rhsdata)) ypoly <- c(shdata1, rev(shdata2)) miss1 <- !is.finite(shdata1) miss2 <- !is.finite(shdata2) if(!any(broken <- (miss1 | miss2))) { ## single polygon clip.to.usr() polygon(xpoly, ypoly, border=shadecol, col=shadecol) } else { ## interrupted dat <- data.frame(rhsdata=rhsdata, shdata1=shdata1, shdata2=shdata2) serial <- cumsum(broken) lapply(split(dat, serial), function(z) { with(z, { xp <- c(rhsdata, rev(rhsdata)) yp <- c(shdata1, rev(shdata2)) clip.to.usr() polygon(xp, yp, border=shadecol, col=shadecol) }) }) ## save for use in placing legend okp <- !c(broken, rev(broken)) xpoly <- xpoly[okp] ypoly <- ypoly[okp] } ## overwrite graphical parameters lty[shind] <- 1 ## try to preserve the same type of colour specification if(is.character(col) && is.character(shadecol)) { ## character representations col[shind] <- shadecol } else if(is.numeric(col) && !is.na(sc <- paletteindex(shadecol))) { ## indices in colour palette col[shind] <- sc } else { ## convert colours to hexadecimal and edit relevant values col <- col2hex(col) col[shind] <- col2hex(shadecol) } ## remove these columns from further plotting allind <- allind[-shind] ## } else xpoly <- ypoly <- numeric(0) ## ----------------- plot lines ------------------------------ for(i in allind) { clip.to.usr() lines(rhsdata, lhsdata[,i], lty=lty[i], col=col[i], lwd=lwd[i]) } if(nplots == 1) return(invisible(NULL)) ## ---------------- determine legend ------------------------- key <- colnames(lhsdata) mat <- match(key, names(x)) keyok <- !is.na(mat) matok <- mat[keyok] legdesc <- rep.int("constructed variable", length(key)) legdesc[keyok] <- attr(x, "desc")[matok] leglabl <- lnames0 leglabl[keyok] <- labl[matok] ylab <- attr(x, "ylab") if(!is.null(ylab)) { if(is.language(ylab)) ylab <- flat.deparse(ylab) legdesc <- sprintf(legdesc, ylab) } ## compute legend info legtxt <- key if(legendmath) { legtxt <- leglabl if(defaultplot) { ## try to convert individual labels to expressions fancy <- try(parse(text=leglabl), silent=TRUE) if(!inherits(fancy, "try-error")) legtxt <- fancy } else { ## try to navigate the parse tree fancy <- try(fvlegend(x, expandleftside), silent=TRUE) if(!inherits(fancy, "try-error")) legtxt <- fancy } } if(is.expression(legtxt) || is.language(legtxt) || all(sapply(legtxt, is.language))) legtxt <- fontify(legtxt, mathfont) ## --------------- handle legend plotting ----------------------------- if(identical(legend, TRUE)) { ## legend will be plotted ## Basic parameters of legend legendxpref <- if(identical(legendpos, "float")) NULL else legendpos optparfv <- spatstat.options("par.fv")$legendargs %orifnull% list() legendspec <- resolve.defaults(legendargs, list(lty=lty, col=col, lwd=lwd), optparfv, list(x=legendxpref, legend=legtxt, inset=0.05, y.intersp=if(legendmath) 1.3 else 1), .StripNull=TRUE) tB <- dev.capabilities()$transparentBackground if(!any(names(legendspec) == "bg") && !is.na(tB) && !identical(tB, "no")) legendspec$bg <- "transparent" if(legendavoid || identical(legendpos, "float")) { ## Automatic determination of legend position ## Assemble data for all plot objects linedata <- list() xmap <- if(xlogscale) log10 else identity ymap <- if(ylogscale) log10 else identity inv.xmap <- if(xlogscale) pow10 else identity inv.ymap <- if(ylogscale) pow10 else identity for(i in seq_along(allind)) linedata[[i]] <- list(x=xmap(rhsdata), y=ymap(lhsdata[,i])) polydata <- if(length(xpoly) > 0) list(x=xmap(xpoly), y=ymap(ypoly)) else NULL #' ensure xlim, ylim define a box boxXlim <- if(diff(xlim) > 0) xlim else par('usr')[1:2] boxYlim <- if(diff(ylim) > 0) ylim else par('usr')[3:4] #' objects <- assemble.plot.objects(xmap(boxXlim), ymap(boxYlim), lines=linedata, polygon=polydata) ## find best position to avoid them legendbest <- findbestlegendpos(objects, preference=legendpos, legendspec=legendspec) ## handle log scale if((xlogscale || ylogscale) && checkfields(legendbest, c("x", "xjust", "yjust"))) { ## back-transform x, y coordinates legendbest$x$x <- inv.xmap(legendbest$x$x) legendbest$x$y <- inv.ymap(legendbest$x$y) } } else legendbest <- list() ## ********** plot legend ************************* if(!is.null(legend) && legend) do.call(graphics::legend, resolve.defaults(legendargs, legendbest, legendspec, .StripNull=TRUE)) } ## convert labels back to character labl <- paste.expr(legtxt) labl <- gsub(" ", "", labl) ## return legend info df <- data.frame(lty=lty, col=col, key=key, label=labl, meaning=legdesc, row.names=key) return(invisible(df)) } plot.fv }) assemble.plot.objects <- function(xlim, ylim, ..., lines=NULL, polygon=NULL) { # Take data that would have been passed to the commands 'lines' and 'polygon' # and form corresponding geometrical objects. objects <- list() if(!is.null(lines)) { if(is.psp(lines)) { objects <- list(lines) } else { if(checkfields(lines, c("x", "y"))) { lines <- list(lines) } else if(!all(unlist(lapply(lines, checkfields, L=c("x", "y"))))) stop("lines should be a psp object, a list(x,y) or a list of list(x,y)") W <- owin(xlim, ylim) for(i in seq_along(lines)) { lines.i <- lines[[i]] x.i <- lines.i$x y.i <- lines.i$y n <- length(x.i) if(length(y.i) != n) stop(paste(paste("In lines[[", i, "]]", sep=""), "the vectors x and y have unequal length")) if(!all(ok <- (is.finite(x.i) & is.finite(y.i)))) { x.i <- x.i[ok] y.i <- y.i[ok] n <- sum(ok) } segs.i <- psp(x.i[-n], y.i[-n], x.i[-1], y.i[-1], W, check=FALSE) objects <- append(objects, list(segs.i)) } } } if(!is.null(polygon)) { # Add filled polygon pol <- polygon[c("x", "y")] ok <- with(pol, is.finite(x) & is.finite(y)) if(!all(ok)) pol <- with(pol, list(x=x[ok], y=y[ok])) if(Area.xypolygon(pol) < 0) pol <- lapply(pol, rev) P <- try(owin(poly=pol, xrange=xlim, yrange=ylim, check=FALSE)) if(!inherits(P, "try-error")) objects <- append(objects, list(P)) } return(objects) } findbestlegendpos <- local({ # Given a list of geometrical objects, find the best position # to avoid them. thefunction <- function(objects, show=FALSE, aspect=1, bdryok=TRUE, preference="float", verbose=FALSE, legendspec=NULL) { # find bounding box W <- do.call(boundingbox, lapply(objects, as.rectangle)) # convert to common box objects <- lapply(objects, rebound, rect=W) # comp # rescale x and y axes so that bounding box has aspect ratio 'aspect' aspectW <- with(W, diff(yrange)/diff(xrange)) s <- aspect/aspectW mat <- diag(c(1, s)) invmat <- diag(c(1, 1/s)) scaled.objects <- lapply(objects, affine, mat=mat) scaledW <- affine(W, mat=mat) if(verbose) { cat("Scaled space:\n") print(scaledW) } # pixellate the scaled objects pix.scal.objects <- lapply(scaled.objects, asma) # apply distance transforms in scaled space D1 <- distmap(pix.scal.objects[[1]]) Dlist <- lapply(pix.scal.objects, distmap, xy=list(x=D1$xcol, y=D1$yrow)) # distance transform of superposition D <- im.apply(Dlist, min) if(!bdryok) { # include distance to boundary B <- attr(D1, "bdry") D <- eval.im(pmin.int(D, B)) } if(show) { plot(affine(D, mat=invmat), add=TRUE) lapply(lapply(scaled.objects, affine, mat=invmat), plot, add=TRUE) } if(preference != "float") { # evaluate preferred location (check for collision) if(!is.null(legendspec)) { # pretend to plot the legend as specified legout <- do.call(graphics::legend, append(legendspec, list(plot=FALSE))) # determine bounding box legbox <- with(legout$rect, owin(c(left, left+w), c(top-h, top))) scaledlegbox <- affine(legbox, mat=mat) # check for collision Dmin <- min(D[scaledlegbox]) if(Dmin >= 0.02) { # no collision: stay at preferred location. Exit. return(list(x=preference)) } # collision occurred! } else { # no legend information. # Pretend legend is 15% of plot width and height xr <- scaledW$xrange yr <- scaledW$yrange testloc <- switch(preference, topleft = c(xr[1],yr[2]), top = c(mean(xr), yr[2]), topright = c(xr[2], yr[2]), right = c(xr[2], mean(yr)), bottomright = c(xr[2], yr[1]), bottom = c(mean(xr), yr[1]), bottomleft = c(xr[1], yr[1]), left = c(xr[1], mean(yr)), center = c(mean(xr), mean(yr)), NULL) if(!is.null(testloc)) { # look up distance value at preferred location testpat <- ppp(x=testloc[1], y=testloc[2], xr, yr, check=FALSE) val <- safelookup(D, testpat) crit <- 0.15 * min(diff(xr), diff(yr)) if(verbose) cat(paste("val=",val, ", crit=", crit, "\n")) if(val > crit) { # no collision: stay at preferred location. Exit. return(list(x=preference)) } # collision occurred! } } # collision occurred! } # find location of max locmax <- which(D$v == max(D), arr.ind=TRUE) locmax <- unname(locmax[1,]) pos <- list(x=D$xcol[locmax[2]], y=D$yrow[locmax[1]]) pos <- affinexy(pos, mat=invmat) if(show) points(pos) # determine justification of legend relative to this point # to avoid crossing edges of plot xrel <- (pos$x - W$xrange[1])/diff(W$xrange) yrel <- (pos$y - W$yrange[1])/diff(W$yrange) xjust <- if(xrel < 0.1) 0 else if(xrel > 0.9) 1 else 0.5 yjust <- if(yrel < 0.1) 0 else if(yrel > 0.9) 1 else 0.5 # out <- list(x=pos, xjust=xjust, yjust=yjust) return(out) } asma <- function(z) { if(is.owin(z)) as.mask(z) else if(is.psp(z)) as.mask.psp(z) else NULL } callit <- function(...) { rslt <- try(thefunction(...)) if(!inherits(rslt, "try-error")) return(rslt) return(list()) } callit }) spatstat/R/randomsets.R0000644000176200001440000000077413525477105014641 0ustar liggesusers#' #' randomsets.R #' #' Generation of random sets #' #' $Revision: 1.2 $ $Date: 2019/08/16 07:53:05 $ rthinclumps <- function(W, p, ...) { check.1.real(p) if(badprobability(p, TRUE)) stop("p must be a valid probability between 0 and 1", call.=FALSE) if(!(is.im(W) || is.owin(W))) stop("W should be a window or pixel image", call.=FALSE) clumps <- connected(W, ...) keep <- (runif(length(levels(clumps))) < p) retained <- eval.im(keep[clumps]) return(solutionset(retained)) } spatstat/R/simulate.detPPF.R0000644000176200001440000003703213424276302015415 0ustar liggesusers## simulate.detPPF.R ## $Revision: 1.7 $ $Date: 2019/01/29 05:21:22 $ ## ## This file contains functions to simulate DPP models. ## Two simulation functions are visible: ## - simulate.detpointprocfamily (most useful) ## - rdpp (more generic workhorse function -- actually the real workhorse is the locally defined rdppp) ## ## Furthermore the auxilliary function dppeigen is defined here. rdpp <- local({ ## Generates an empty point pattern emptyppx <- function(W, simplify = TRUE){ W <- as.boxx(W) r <- W$ranges d <- ncol(r) if(simplify){ if(d==2) return(ppp(numeric(0), numeric(0), window=as.owin(W))) if(d==3) return(pp3(numeric(0), numeric(0), numeric(0), W)) } rslt <- replicate(d, numeric(0), simplify=FALSE) names(rslt) <- paste("x",1:d,sep="") rslt <- as.data.frame(rslt) return(ppx(rslt, domain = W, coord.type= rep("spatial", d))) } rdppp <- function(index, basis = "fourierbasis", window = boxx(rep(list(0:1), ncol(index))), reject_max = 1e4, progress = 0, debug = FALSE, given = NULL, given_max_volume = 0.5, ...){ ## Check arguments: if (!(is.logical(debug))) stop(paste(sQuote("debug"), "must be TRUE or FALSE")) if (!is.numeric(reject_max)||reject_max<=1) stop(paste(sQuote("reject_max"), "must be a numeric greater than 1")) if (!is.numeric(progress)||reject_max<1) stop(paste(sQuote("progress"), "must be a numeric greater than or equal to 1")) index <- as.matrix(index) d <- ncol(index) window <- as.boxx(window) ranges <- window$ranges boxlengths <- as.numeric(ranges[2L, ] - ranges[1L, ]) if(ncol(ranges)!=d) stop("The dimension differs from the number of columns in index") if(basis != "fourierbasis"){ warning("Non Fourier basis probably doesn't work correctly! Fourier is assumed for bounds in rejection sampling.") userbasis <- get(basis) if (!(is.function(userbasis))) stop(paste(sQuote("basis"), "must be a function")) tmp <- userbasis(ranges[1,,drop=FALSE], index, window) if (!(is.numeric(tmp) || is.complex(tmp))) stop(paste("Output of", sQuote("basis"), "must be numeric or complex")) basis <- function(x, k, boxlengths){ userbasis(x, k, boxx(lapply(boxlengths, function(x) list(c(0,x))))) } } else{ basis <- fourierbasisraw } ## Number of points to simulate: n <- nrow(index) ## Resolve `given` for pseudo conditional simulation if(!is.null(given)){ # Make sure `given` is a list of point patterns if(is.ppp(given) || is.pp3(given) || is.ppx(given)){ given <- list(given) } stopifnot(all(sapply(given, function(x){ is.ppp(x) || is.pp3(x) || is.ppx(x)}))) # Check that the window (or its boundingbox) is inside the simulation window Wgiven <- lapply(given, function(x) as.boxx(domain(x))) stopifnot(all(sapply(Wgiven, function(w){ all(w$ranges[1,] >= ranges[1,]) && all(w$ranges[2,] <= ranges[2,]) }))) stopifnot(sum(sapply(Wgiven, volume))0) cat(paste("Simulating", n, "points:\n")) ## Main for loop over number of points: for(i in (n-1):1){ ## Print progress: if(progress>0) progressreport(n-i, n, every=progress) ## Aux. variable to count number of rejection steps: tries <- 0 # Debug info: if(debug){ rejected <- matrix(NA,reject_max,d) } repeat{ ## Proposed point: newx <- matrix(runif(d,as.numeric(ranges[1,]),as.numeric(ranges[2,])),ncol=d) if(!is.null(given)){ if(i>(n-ngiven)){ newx <- coordsgiven[n-i+1,,drop=FALSE] } else{ while(any(sapply(Wgiven, function(w) inside.boxx(as.hyperframe(newx), w = w)))) newx <- matrix(runif(d,as.numeric(ranges[1,]),as.numeric(ranges[2,])),ncol=d) } } ## Basis functions eval. at proposed point: v <- as.vector(basis(newx, index, boxlengths)) ## Vector of projection weights (has length n-i) wei <- t(v)%*%estar if(!is.null(given) && i>(n-ngiven)){ break } ## Accept probability: # tmp <- prod(ranges[2,]-ranges[1,])/n*(sum(abs(v)^2)-sum(abs(wei)^2)) tmp <- 1-prod(ranges[2,]-ranges[1,])/n*(sum(abs(wei)^2)) ## If proposal is accepted the loop is broken: if(runif(1)reject_max){ stop(paste("Rejection sampling failed reject_max =",reject_max,"times in a row")) } ## Increase the count of rejection steps: tries <- tries+1 # Debug info: if(debug){ rejected[tries,] <- newx } } ## END OF REJECTION LOOP # Record the accepted point: x[n-i+1,] <- newx # Debug info: if(debug){ if(tries==0){ rej <- empty } else{ rej <- ppx(rejected[1:tries,,drop=FALSE],window, simplify=TRUE) } debugList[[n-i+1]] = list( old=ppx(x[1:(n-i),,drop=FALSE],window, simplify=TRUE), accepted=ppx(newx,window,simplify=TRUE), rejected=rej, index=index, estar = estar) } ## If it is the last point exit the main loop: if(i==1){break} ## Calculate orthogonal vector for Gram-Schmidt procedure: # w <- v - rowSums(matrix(wei,n,n-i,byrow=TRUE)*e[,1:(n-i)]) w <- v - colSums(t(e)*as.vector(wei)) ## Record normalized version in the Gram-Schmidt matrices: enew <- w/sqrt(sum(abs(w)^2)) e <- cbind(e, enew) estar <- cbind(estar,Conj(enew)) } ## END OF MAIN FOR LOOP # Save points as point pattern: X <- ppx(x, window, simplify = TRUE) # Debug info: if(debug){ attr(X, "dpp") <- list(debug=debugList) } if(progress>0) cat(" Done!\n") return(X) } rdpp <- function(eig, index, basis = "fourierbasis", window = boxx(rep(list(0:1), ncol(index))), reject_max = 1e4, progress = 0, debug = FALSE, ...){ window2d <- NULL if (is.owin(window)) window2d <- window sampleindex <- as.matrix(index[rbinom(nrow(index), 1, eig)==1, ]) X <- rdppp(sampleindex, basis=basis, window=window, reject_max=reject_max, progress=progress, debug=debug, ...) if(!is.null(window2d)) X <- X[window2d] return(X) } rdpp } ) simulate.dppm <- simulate.detpointprocfamily <- function(object, nsim = 1, seed = NULL, ..., W = NULL, trunc = .99, correction = "periodic", rbord = reach(object) # parallel = FALSE ){ # .... copied from simulate.lm .... if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } # .................................. if(inherits(object, "dppm")){ if(is.null(W)) W <- Window(object$X) object <- object$fitted } if(!inherits(object, "detpointprocfamily")) stop("The model to simulate must be of class detpointprocfamily") if(length(tmp <- object$freepar)>0) stop(paste("The model to simulate must be completely specified. The following parameters are unspecified:", tmp)) if(!valid(object)) stop("The model is invalid. Please change parameter values to get a valid model") if(!is.numeric(nsim)||nsim<1) stop(paste(sQuote("nsim"), "must be a numeric greater than or equal to 1")) nsim <- floor(nsim) dim <- dim(object) basis <- object$basis ####### BACKDOOR TO SPHERICAL CASE ######## if(!is.null(spherefun <- object$sim_engine)){ sphereSimEngine <- get(spherefun) rslt <- sphereSimEngine(object, trunc, nsim, ...) attr(rslt, "seed") <- RNGstate return(rslt) } ########################################### # Check stationarity and window compatibility (if 'W' and 'thin' both are present) statmodel <- is.null(thin <- object$thin) if(is.null(W)){ if(!statmodel) W <- domain(thin) } Wowin <- if(is.owin(W)) W else NULL if(is.null(W)){ W <- boxx(rep(list(0:1), dim)) } else{ W <- as.boxx(W, warn.owin = FALSE) } if(!statmodel){ if(!is.subset.owin(Wowin,thin)) stop("The window of simulation is not contained in the window of the inhomogeneous intensity.") } r <- W$ranges if(dim!=ncol(r)) stop(paste("The dimension of the window:", ncol(r), "is inconsistent with the dimension of the model:", dim)) Wscale <- as.numeric(r[2,]-r[1,]) Wcenter <- as.numeric(colMeans(r)) if(correction=="border"){ if(!is.numeric(rbord)||any(rbord<0)) stop(paste(sQuote("rbord"), "must be a non-negative numeric")) borderscale <- pmin((Wscale+2*rbord)/Wscale, 2) Wscale <- borderscale*Wscale } ## lambda <- intensity(object) tmp <- dppeigen(object, trunc, Wscale) trunc <- tmp$trunc prec <- tmp$prec n <- length(tmp$eig) indexlist <- replicate(nsim, {x <- as.matrix(tmp$index[rbinom(n, 1, tmp$eig)==1, ]); gc(); x}, simplify = FALSE) rm(tmp) gc() onesim <- function(i, win=NULL){ X <- rdpp(1, indexlist[[i]], basis = basis, window = boxx(rep(list(c(-.5,.5)), dim)), ...) a <- attr(X, "dpp") a <- c(a, list(prec = prec, trunc = trunc)) if(correction=="border"){ if(dim!=2) stop("Border correction only implemented for dimension 2 at the moment.") X <- X[affine.owin(as.owin(X), mat = diag(1/borderscale))] } if(is.ppp(X)){ X <- affine(X, matrix(c(Wscale[1],0,0,Wscale[2]), 2, 2), Wcenter) if(!is.null(win)) X <- X[win] } else{ X <- ppx(X$data, domain = as.boxx(X$domain), coord.type = rep("spatial", dim)) X$data <- as.hyperframe(as.data.frame(X$data)*matrix(Wscale, nrow(X$data), ncol(X$data), byrow = TRUE)) X$domain$ranges <- X$domain$ranges*matrix(Wscale, 2, dim, byrow = TRUE) + matrix(Wcenter, 2, dim, byrow = TRUE) X <- ppx(X$data, X$domain, simplify = TRUE) } attr(X, "dpp") <- a return(X) } if(nsim==1){ rslt <- onesim(1,win=Wowin) if(!statmodel) rslt <- rthin(rslt, P=thin) } else{ ######## Old code for parallel simulation ######### # if(is.logical(parallel)){ # cl.cores <- if(parallel) NULL else 1 # } else{ # cl.cores <- parallel # } # rslt <- detlapply(1:nsim, onesim, cl.cores=cl.cores, win=Wowin) ################################################### rslt <- lapply(1:nsim, onesim, win=Wowin) if(!statmodel) rslt <- lapply(rslt, rthin, P=thin) names(rslt) <- paste("Simulation", 1:nsim) rslt <- if(dim == 2) as.solist(rslt) else as.anylist(rslt) } attr(rslt, "seed") <- RNGstate return(rslt) } dppeigen <- function(model, trunc, Wscale, stationary = FALSE){ dim <- dim(model) if(stationary && dim!=2) stop("Stationarity can only be exploited in dimension 2 at the moment.") Wscale <- as.numeric(Wscale) check.nvector(Wscale, dim, things="dimensions") ## Calculate expected number of points if the intensity is a parameter expnum <- NULL lambdaname <- model$intensity if(!is.null(lambdaname)) expnum <- getElement(model$fixedpar, lambdaname)*prod(Wscale) ## Get the maximal truncation in each dimension maxtrunc <- spatstat.options("dpp.maxmatrix")^(1/dim) ## Extract spectral density specden <- dppspecden(model) truncrange <- dppspecdenrange(model)*max(Wscale) if(trunc>=1){ ## Integer truncation fixed by user. if(stationary){ ## Coordinates on axes: index1a <- c(rep(0,trunc),1:trunc) index2a <- c(1:trunc,rep(0,trunc)) ## Coordinates of ordinary points: index1 <- rep(1:trunc,trunc) index2 <- rep(1:trunc,each=trunc) ## Spectral densities: eigo <- specden(0) eiga <- specden(sqrt((index1a/Wscale[1])^2+(index2a/Wscale[2])^2)) eig <- specden(sqrt((index1/Wscale[1])^2+(index2/Wscale[2])^2)) prec <- (eigo+2*sum(eiga)+4*sum(eig))/expnum } else{ trunc <- floor(trunc) index <- do.call(expand.grid, replicate(dim, seq(-trunc,trunc), simplify=FALSE)) indexscaled <- index*matrix(1/Wscale, nrow(index), ncol(index), byrow = TRUE) if(model$isotropic){ eig <- specden(sqrt(rowSums(indexscaled^2))) } else{ eig <- specden(indexscaled) } prec <- sum(eig)/expnum } } else{ ## Integer truncation calculated from user-specified precision. if(is.null(expnum)) stop("Cannot calculate truncation adaptively in a model without intensity parameter. Please specify trunc directly as a positive integer.") prec0 <- trunc trunc <- 1 prec <- 0 ## cat("truncation is being calculated adaptively. Current truncation:\n") while(prec<=prec0 && (2*trunc)<=maxtrunc && trunc<=truncrange){ trunc <- 2*trunc if(stationary){ ## Coordinates on axes: index1a <- c(rep(0,trunc),1:trunc) index2a <- c(1:trunc,rep(0,trunc)) ## Coordinates of ordinary points: index1 <- rep(1:trunc,trunc) index2 <- rep(1:trunc,each=trunc) ## Spectral densities: eigo <- specden(0) eiga <- specden(sqrt((index1a/Wscale[1])^2+(index2a/Wscale[2])^2)) eig <- specden(sqrt((index1/Wscale[1])^2+(index2/Wscale[2])^2)) prec <- (eigo+2*sum(eiga)+4*sum(eig))/expnum } else{ index <- do.call(expand.grid, replicate(dim, seq(-trunc,trunc), simplify=FALSE)) indexscaled <- index*matrix(1/Wscale, nrow(index), ncol(index), byrow = TRUE) if(model$isotropic){ eig <- specden(sqrt(rowSums(indexscaled^2))) } else{ eig <- specden(indexscaled) } prec <- sum(eig)/expnum } } ## cat("\n") if(prec tobs) nties <- sum(sim.pvals == pobs & sim.stats == tobs) result$p.value <- (nless + nplus + sample(0:nties, 1L))/(nsim+1L) ## result$method <- c("Monte Carlo test of fitted Gibbs model", paste("based on", nsim, "repetitions of"), sub("Spatial", "spatial", result$method)) return(result) } PoissonTest <- function(model, covariate, test=c("ks", "cvm", "ad"), ..., verbose=TRUE, interpolate=FALSE, fast=TRUE, jitter=TRUE, gibbsok=FALSE, modelname, covname) { if(missing(modelname)) modelname <- short.deparse(substitute(model)) if(missing(covname)) covname <- short.deparse(substitute(covariate)) test <- match.arg(test) stopifnot(is.mppm(model)) if(!gibbsok && !is.poisson.mppm(model)) stop("Only implemented for Poisson models") ## extract things from model data <- model$data npat <- model$npat Y <- data.mppm(model) if(fast) { ## extract original quadrature schemes and convert to point patterns QQ <- quad.mppm(model) PP <- lapply(QQ, union.quad) Zweights <- lapply(QQ, w.quad) } else Zweights <- list() ## `evaluate' covariate if(verbose) cat("Extracting covariate...") if(identical(covariate, "x")) covariate <- xcoord if(identical(covariate, "y")) covariate <- ycoord if(is.character(covariate)) { ## extract covariate with this name from data used to fit model if(!(covariate %in% names(data))) stop(paste("Model does not contain a covariate called", dQuote(covariate))) covname <- covariate covariate <- data[, covname, drop=TRUE] } else if(inherits(covariate, c("listof", "anylist"))) { if(length(covariate) != npat) stop(paste("Length of list of covariate values does not match", "number of point patterns in data of original model")) } else if(is.hyperframe(covariate)) { ## extract first column covariate <- covariate[,1L, drop=TRUE] if(length(covariate) != npat) stop(paste("Number of rows of covariate hyperframe does not match", "number of point patterns in data of original model")) } else if(is.function(covariate) || is.im(covariate)) { ## replicate to make a list covariate <- as.anylist(rep(list(covariate), npat)) } else stop(paste("Format of argument", sQuote("covariates"), "not understood")) if(verbose) { cat("done.\nComputing statistics for each pattern...") pstate <- list() } ## compile information for test from each row Zvalues <- ZX <- Win <- list() for(i in 1:npat) { if(verbose) pstate <- progressreport(i, npat, state=pstate) XI <- Y[[i]] if(fast) PI <- PP[[i]] else WI <- XI$window covariateI <- covariate[[i]] if(is.im(covariateI)) { type <- "im" ## evaluate at data points ZXI <- if(interpolate) interp.im(covariateI, XI$x, XI$y) else covariateI[XI] if(fast) { ## covariate values for quadrature points ZI <- covariateI[PI] } else { ## covariate image inside window ZI <- covariateI[WI, drop=FALSE] ## corresponding mask WI <- as.owin(ZI) ## pixel areas Zweights[[i]] <- rep(WI$xstep * WI$ystep, prod(WI$dim)) } } else if(is.function(covariateI)) { type <- "function" ## evaluate exactly at data points ZXI <- covariateI(XI$x, XI$y) if(fast) { ## covariate values for quadrature points ZI <- covariateI(PI$x, PI$y) } else { ## window WI <- as.mask(WI) ## covariate image inside window ZI <- as.im(covariateI, W=WI) ## pixel areas Zweights[[i]] <- rep(WI$xstep * WI$ystep, prod(WI$dim)) } } else stop("covariate should be an image or a function(x,y)") ZX[[i]] <- ZXI if(fast) Zvalues[[i]] <- ZI else { Win[[i]] <- WI ## values of covariate in window Zvalues[[i]] <- allpixelvalues(ZI) } } if(verbose) cat("done.\nComputing predicted intensity...") ## compute predicted intensities trend <- if(fast) fitted(model, type="trend") else predict(model, type="trend", locations=Win, verbose=verbose)$trend if(verbose) cat("done.\nExtracting...") ## extract relevant values lambda <- if(fast) trend else lapply(trend, allpixelvalues) if(verbose) cat("done.\nPerforming test...") ## flatten to vectors lambda <- unlist(lambda) Zweights <- unlist(Zweights) Zvalues <- unlist(Zvalues) ZX <- unlist(ZX) if(length(lambda) != length(Zvalues)) stop("Internal error: mismatch between predicted values and Z values") if(length(Zvalues) != length(Zweights)) stop("Internal error: mismatch between Z values and Z weights") lambda <- lambda * Zweights ## form weighted cdf of Z values in window FZ <- ewcdf(Zvalues, lambda/sum(lambda)) ## Ensure support of cdf includes the range of the data xxx <- knots(FZ) yyy <- FZ(xxx) if(min(xxx) > min(ZX)) { xxx <- c(min(ZX), xxx) yyy <- c(0, yyy) } if(max(xxx) < max(ZX)) { xxx <- c(xxx, max(ZX)) yyy <- c(yyy, 1) } ## make piecewise linear approximation of cdf FZ <- approxfun(xxx, yyy, rule=2) ## evaluate at data points if(!jitter) U <- FZ(ZX) else { ## jitter observed values to avoid ties grain <- min(diff(sortunique(ZX)))/8 jit <- runif(length(ZX), min=0, max=grain) sgn <- sample(c(-1L,1L), length(ZX), replace=TRUE) sgn[ZX==min(xxx)] <- 1L sgn[ZX==max(xxx)] <- -1L U <- FZ(ZX + sgn*jit) } ## Test uniformity result <- switch(test, ks = ks.test(U, "punif", ...), cvm = cvm.test(U, "punif", ...), ad = ad.test(U, "punif", ...)) testname <- switch(test, ks="Kolmogorov-Smirnov", cvm="Cramer-Von Mises", ad="Anderson-Darling") result$method <- paste("Spatial", testname, "test") result$data.name <- paste("predicted cdf of covariate", sQuote(paste(covname, collapse="")), "evaluated at data points of", sQuote(modelname)) if(verbose) cat("done.\n") class(result) <- c("cdftest", class(result)) attr(result, "prep") <- list(Zvalues = Zvalues, lambda = lambda, ZX = ZX, FZ = FZ, U = U, type = type) attr(result, "info") <- list(modelname = modelname, covname = covname) return(result) } cdf.test.mppm }) spatstat/R/multistrauss.R0000644000176200001440000002043713333543255015234 0ustar liggesusers# # # multistrauss.S # # $Revision: 2.23 $ $Date: 2015/03/31 03:57:11 $ # # The multitype Strauss process # # MultiStrauss() create an instance of the multitype Strauss process # [an object of class 'interact'] # # ------------------------------------------------------------------- # MultiStrauss <- local({ # ......... define interaction potential MSpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[j] type (mark) of point U[j] # # get matrix of interaction radii r[ , ] r <- par$radii # # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) # list all UNORDERED pairs of types to be checked # (the interaction must be symmetric in type, and scored as such) uptri <- (row(r) <= col(r)) & !is.na(r) mark1 <- (lx[row(r)])[uptri] mark2 <- (lx[col(r)])[uptri] # corresponding names mark1name <- (lxname[row(r)])[uptri] mark2name <- (lxname[col(r)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) # list all ORDERED pairs of types to be checked # (to save writing the same code twice) different <- mark1 != mark2 mark1o <- c(mark1, mark2[different]) mark2o <- c(mark2, mark1[different]) nordpairs <- length(mark1o) # unordered pair corresponding to each ordered pair ucode <- c(1:npairs, (1:npairs)[different]) # # create logical array for result z <- array(FALSE, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { # assemble the relevant interaction distance for each pair of points rxu <- r[ tx, tu ] # apply relevant threshold to each pair of points str <- (d <= rxu) # assign str[i,j] -> z[i,j,k] where k is relevant interaction code for(i in 1:nordpairs) { # data points with mark m1 Xsub <- (tx == mark1o[i]) # quadrature points with mark m2 Qsub <- (tu == mark2o[i]) # assign z[Xsub, Qsub, ucode[i]] <- str[Xsub, Qsub] } } return(z) } #### end of 'pot' function #### # ........ auxiliary functions .............. delMS <- function(which, types, radii) { radii[which] <- NA if(all(is.na(radii))) return(Poisson()) return(MultiStrauss(types, radii)) } # Set up basic object except for family and parameters BlankMSobject <- list( name = "Multitype Strauss process", creator = "MultiStrauss", family = "pairwise.family", # evaluated later pot = MSpotential, par = list(types=NULL, radii = NULL), # to be filled in later parnames = c("possible types", "interaction distances"), pardesc = c("vector of possible types", "matrix of hardcore distances"), hasInf = FALSE, selfstart = function(X, self) { if(!is.null(self$par$types)) return(self) types <- levels(marks(X)) MultiStrauss(types=types,radii=self$par$radii) }, init = function(self) { types <- self$par$types if(!is.null(types)) { radii <- self$par$radii nt <- length(types) MultiPair.checkmatrix(radii, nt, sQuote("radii")) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } } }, update = NULL, # default OK print = function(self) { radii <- self$par$radii types <- self$par$types if(waxlyrical('gory')) { splat(nrow(radii), "types of points") if(!is.null(types)) { splat("Possible types: ") print(noquote(types)) } else splat("Possible types:\t not yet determined") } cat("Interaction radii:\n") print(signif(radii, getOption("digits"))) invisible() }, interpret = function(coeffs, self) { # get possible types typ <- self$par$types ntypes <- length(typ) # get matrix of Strauss interaction radii r <- self$par$radii # list all unordered pairs of types uptri <- (row(r) <= col(r)) & (!is.na(r)) index1 <- (row(r))[uptri] index2 <- (col(r))[uptri] npairs <- length(index1) # extract canonical parameters; shape them into a matrix gammas <- matrix(, ntypes, ntypes) dimnames(gammas) <- list(typ, typ) expcoef <- exp(coeffs) gammas[ cbind(index1, index2) ] <- expcoef gammas[ cbind(index2, index1) ] <- expcoef # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_ij", printable=dround(gammas))) }, valid = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii radii <- self$par$radii # parameters to estimate required <- !is.na(radii) gr <- gamma[required] return(all(is.finite(gr) & gr <= 1)) }, project = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii and types radii <- self$par$radii types <- self$par$types # problems? required <- !is.na(radii) okgamma <- is.finite(gamma) & (gamma <= 1) naughty <- required & !okgamma # if(!any(naughty)) return(NULL) if(spatstat.options("project.fast")) { # remove ALL naughty terms simultaneously return(delMS(naughty, types, radii)) } else { # present a list of candidates rn <- row(naughty) cn <- col(naughty) uptri <- (rn <= cn) upn <- uptri & naughty rowidx <- as.vector(rn[upn]) colidx <- as.vector(cn[upn]) matindex <- function(v) { matrix(c(v, rev(v)), ncol=2, byrow=TRUE) } mats <- lapply(as.data.frame(rbind(rowidx, colidx)), matindex) inters <- lapply(mats, delMS, types=types, radii=radii) return(inters) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$radii active <- !is.na(r) if(any(!is.na(coeffs))) { gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- active & (abs(log(gamma)) > epsilon) } if(any(active)) return(max(r[active])) else return(0) }, version=NULL # to be added ) class(BlankMSobject) <- "interact" # finally create main function MultiStrauss <- function(radii, types=NULL) { if((missing(radii) || !is.matrix(radii)) && is.matrix(types)) { ## old syntax: (types=NULL, radii) radii <- types types <- NULL } radii[radii == 0] <- NA out <- instantiate.interact(BlankMSobject, list(types=types, radii = radii)) if(!is.null(types)) dimnames(out$par$radii) <- list(types, types) return(out) } MultiStrauss <- intermaker(MultiStrauss, BlankMSobject) MultiStrauss }) spatstat/R/sdr.R0000644000176200001440000002214413614463173013244 0ustar liggesusers#' #' sdr.R #' #' Sufficient Dimension Reduction #' #' Matlab original: Yongtao Guan #' Translated to R by: Suman Rakshit #' Adapted for spatstat: Adrian Baddeley #' #' GNU Public Licence 2.0 || 3.0 #' #' $Revision: 1.15 $ $Date: 2020/01/30 05:10:49 $ #' sdr <- function(X, covariates, ...) { UseMethod("sdr") } sdr.ppp <- local({ sdr.ppp <- function(X, covariates, method=c("DR", "NNIR", "SAVE", "SIR", "TSE"), Dim1=1, Dim2=1, predict=FALSE, ...) { stopifnot(is.ppp(X)) method <- match.arg(method) trap.extra.arguments(...) #' ensure 'covariates' is a list of compatible images if(!inherits(covariates, "imlist") && !all(sapply(covariates, is.im))) stop("Argument 'covariates' must be a list of images") nc <- length(covariates) if(nc == 0) stop("Need at least one covariate!") if(nc < Dim1 + (method == "TSE") * Dim2) stop(paste(if(method == "TSE") "Dim1 + Dim2" else "Dim1", "must not exceed the number of covariates"), call.=FALSE) if(nc > 1 && !do.call(compatible, unname(covariates))) covariates <- do.call(harmonise, covariates) #' extract corresponding pixel values including NA's Ypixval <- sapply(lapply(covariates, as.matrix), as.vector) #' compute sample mean and covariance matrix m <- colMeans(Ypixval, na.rm=TRUE) V <- cov(Ypixval, use="complete") #' evaluate each image at point data locations YX <- sapply(covariates, safelook, Y=X) #' apply precomputed standardisation Zx <- t(t(YX) - m) %*% matrixinvsqrt(V) #' ready coordsX <- coords(X) result <- switch(method, DR = calc.DR(COV=V, z=Zx, Dim=Dim1), NNIR = calc.NNIR(COV=V, z=Zx, pos=coordsX, Dim=Dim1), SAVE = calc.SAVE(COV=V, z=Zx, Dim=Dim1), SIR = calc.SIR(COV=V, z=Zx ), TSE = calc.TSE(COV=V, z=Zx, pos=coordsX, Dim1=Dim1, Dim2=Dim2) ) #' covnames <- names(covariates) %orifnull% paste0("Y", 1:nc) dimnames(result$B) <- list(covnames, paste0("B", 1:ncol(result$B))) if(method == "TSE") { result$M1 <- namez(result$M1) result$M2 <- namez(result$M2) } else { result$M <- namez(result$M) } if(predict) result$Y <- sdrPredict(covariates, result$B) return(result) } safelook <- function(Z, Y, ...) { safelookup(Z, Y, ...) } namez <- function(M, prefix="Z") { dimnames(M) <- list(paste0(prefix, 1:nrow(M)), paste0(prefix, 1:ncol(M))) return(M) } sdr.ppp }) sdrPredict <- function(covariates, B) { if(!is.matrix(B)) { if(is.list(B) && is.matrix(BB <- B$B)) B <- BB else stop("B should be a matrix, or the result of a call to sdr()", call.=FALSE) } if(!inherits(covariates, "imlist") && !all(sapply(covariates, is.im))) stop("Argument 'covariates' must be a list of images") stopifnot(nrow(B) == length(covariates)) result <- vector(mode="list", length=ncol(B)) for(j in seq_along(result)) { cj <- as.list(B[,j]) Zj <- mapply("*", cj, covariates, SIMPLIFY=FALSE) result[[j]] <- im.apply(Zj, sum) } names(result) <- colnames(B) return(as.solist(result)) } ##............ DR (Directional Regression) .......................... calc.DR <- function(COV, z, Dim){ ## Description: Naive Directional Regression Method ## Input: ## COV - cov{X(s)} ## z - standardized X(s) on SPP locations ## Dim - the CS dimension ## Output: ## B - the estimated CS basis ## M - the kernel matrix ss <- nrow(z) ncov <- ncol(z) ## M1 <- (t(z) %*% z)/ss - diag(1,ncov) M1 <- crossprod(z)/ss - diag(1,ncov) M1 <- M1 %*% M1 # the SAVE kernel covMean <- matrix(colMeans(z),ncol=1) M2 <- covMean %*% t(covMean) M3 <- M2 * (base::norm(covMean, type="2"))^2 # the SIR kernel M2 <- M2 %*% M2 # the SIR-2 kernel M <- (M1 + M2 + M3)/3 # the DR kernel SVD <- svd(M) B <- SVD$u[,1:Dim] B <- matrixinvsqrt(COV) %*% B # back to original scale return(list(B=B, M=M)) } ## ............ NNIR (Nearest Neighbor Inverse Regression) ........... calc.NNIR <- function(COV, z, pos, Dim) { ## Description: Nearest Neighbor Inverse Regression ## Input: ## COV - cov{X(s)} ## z - standardized X(s) on SPP locations ## pos - the position of SPP events ## Dim - the CS dimension ## Output: ## B - the estimated CS basis ## M - the kernel matrix ss <- nrow(z) # sample size # ncov <- ncol(z) # predictor dimension jj <- nnwhich(pos) # identify nearest neighbour of each point dir <- z - z[jj, , drop=FALSE] # empirical direction IM <- sumouter(dir) # inverse of kernel matrix: sum of outer(dir[i,], dir[i,]) M <- solve(IM/ss) # invert kernel matrix SVD <- svd(M) B <- matrixinvsqrt(COV) %*% SVD$u[, 1:Dim, drop=FALSE] return(list(B=B, M=M)) } ## ........... SAVE (Sliced Average Variance Estimation) ........... calc.SAVE <- function(COV, z, Dim){ ## Description: Naive Directional Regression Method ## Input ## COV - cov{X(s)} ## z - standardized X(s) on SPP locations ## Dim - the central space dimension ## Value ## B - the estimated CS basis ## M - the kernel matrix # ss <- nrow(z) ncov <- ncol(z) M <- diag(1,ncov) - cov(z) M <- M %*% M SVD <- svd(M) B <- SVD$u[,1:Dim] B <- matrixinvsqrt(COV) %*% B return(list(B=B, M=M)) } ##.......... SIR (Sliced Inverse Regression) ...................... calc.SIR <- function(COV, z){ ## Description: Naive Directional Regression Method ## Input: ## COV - cov{X(s)} ## z - standardized X(s) on SPP locations ## Output: ## B - the estimated CS basis ## M - the kernel matrix covMean <- colMeans(z) B <- matrixinvsqrt(COV) %*% covMean # do SIR estimation B <- B/sqrt(sum(B^2)) # normalise to unit length M <- covMean %*% t(covMean) # create kernel matrix return(list(B=B, M=M)) } ## ............. TSE (Two-Step Estimation) .................... calc.TSE <- function(COV, z, pos, Dim1, Dim2) { ## Description: A Two-Step Method ## Input: ## COV - cov{X(s)} ## z - standardized X(s) on SPP locations ## Dim1 - the S1 dimension ## Dim2 - the S2 dimension ## Output: ## B - the estimated CS basis. Its first Dim1 columns ## are estimating S1 and the remaining Dim2 columns are ## estimating S2. In case of null space, a zero vector is reported. ## M1 - the kernel matrix of DR ## M2 - the kernel matrix of NNIR, which might be subject ## to some change, depending on the results of M1. # ss <- nrow(z) # sample size ncov <- ncol(z) # predictor dimension est1 <- calc.DR(COV, z, ncov) # do DR estimation est2 <- calc.NNIR(COV, z, pos, ncov) # do NNIR estimation M1 <- est1$M M2 <- est2$M if(Dim1 > 0) { U <- svd(M1)$u B1 <- U[ , 1:Dim1, drop=FALSE] # get S1 estimate ## Q <- diag(1, ncov) - B1 %*% solve(t(B1) %*% B1) %*% t(B1) Q <- diag(1, ncov) - B1 %*% solve(crossprod(B1)) %*% t(B1) # contract orthogonal basis M2 <- Q %*% M2 %*% Q # do constrained NNIR } else { B1 <- matrix(0, ncov, 1) } if(Dim2 > 0) { U <- svd(M2)$u # do SVD for possibly updated M2 B2 <- U[ , 1:Dim2, drop=FALSE] # get basis estimator } else { B2 <- matrix(0, ncov, 1) } B <- matrixinvsqrt(COV) %*% cbind(B1,B2) return(list(B=B, M1=M1, M2=M2)) } ## ////////////////// ADDITIONAL FUNCTIONS ///////////////////// subspaceDistance <- function(B0,B1) { ## ======================================================== # ## Evaluate the distance between the two linear spaces S(B0) and S(B1). ## The measure used is the one proposed by Li et al. (2004). ## ======================================================== # stopifnot(is.matrix(B0)) stopifnot(is.matrix(B1)) ## Proj0 <- B0 %*% solve((t(B0) %*% B0)) %*% t(B0) # Proj matrix on S(B0) Proj0 <- B0 %*% solve(crossprod(B0)) %*% t(B0) # Proj matrix on S(B0) lam <- svd(B1) # check whether B1 is singular U <- lam$u D <- lam$d # V <- lam$v B2 <- U[, D > 1e-09] # keep non-singular directions Proj1 <- B2 %*% solve((t(B2) %*% B2)) %*% t(B2) # Proj matrix on S(B.hat) Svd <- svd(Proj0 - Proj1) # Do svd for P0-P1 dist <- max(abs(Svd$d)) # Get the maximum absolute svd value return(dist) } dimhat <- function(M){ #' Description: Maximum Descent Estimator for CS Dim #' Input: #' M - the estimated kernel matrix #' Output: #' dimhat - the estimated CS dim (assume dim>0) stopifnot(is.matrix(M)) ncov <- ncol(M) # predictor dimension maxdim <- max((ncov-1), 5) # maximum structure dimension SVD <- svd(M) # svd of kernel matrix lam <- SVD$d eps <- 1e-06 lam <- lam + rep(eps,ncov) # add ridge effect lam1 <- lam[-ncov] lam2 <- lam[-1] dif <- lam1/lam2 dif <- dif[1 : maxdim] # the magnitude of drop retval <- which.max(dif) # find Maximum Descent estimator return(retval) } spatstat/R/sigtrace.R0000644000176200001440000001430413333543255014252 0ustar liggesusers# # sigtrace.R # # $Revision: 1.10 $ $Date: 2016/02/11 09:36:11 $ # # Significance traces # dclf.sigtrace <- function(X, ...) mctest.sigtrace(X, ..., exponent=2) mad.sigtrace <- function(X, ...) mctest.sigtrace(X, ..., exponent=Inf) mctest.sigtrace <- function(X, fun=Lest, ..., exponent=1, interpolate=FALSE, alpha=0.05, confint=TRUE, rmin=0) { check.1.real(exponent) explain.ifnot(exponent >= 0) if(missing(fun) && inherits(X, c("envelope", "hasenvelope"))) fun <- NULL Z <- envelopeProgressData(X, fun=fun, ..., rmin=rmin, exponent=exponent) R <- Z$R devdata <- Z$devdata devsim <- Z$devsim result <- mctestSigtraceEngine(R, devdata, devsim, interpolate=interpolate, confint=confint, alpha=alpha, exponent=exponent, unitname=unitname(X)) result <- hasenvelope(result, Z$envelope) # envelope may be NULL return(result) } mctestSigtraceEngine <- local({ mctestSigtraceEngine <- function(R, devdata, devsim, ..., interpolate=FALSE, confint=TRUE, alpha=0.05, exponent=2, unitname=NULL) { nsim <- ncol(devsim) if(!interpolate) { #' Monte Carlo p-value datarank <- apply(devdata < devsim, 1, sum) + apply(devdata == devsim, 1, sum)/2 + 1 pvalue <- datarank/(nsim+1) } else { #' interpolated p-value devs <- cbind(devdata, devsim) pvalue <- apply(devs, 1, rowwise.interp.tailprob) } if(!confint) { #' create fv object without confidence interval p <- fv(data.frame(R=R, pest=pvalue, alpha=alpha), argu="R", ylab = quote(p(R)), valu="pest", fmla = . ~ R, desc = c("Interval endpoint R", "calculated p-value %s", "threshold for significance"), labl=c("R", "%s(R)", paste(alpha)), unitname = unitname, fname = "p") fvnames(p, ".") <- c("pest", "alpha") } else { # confidence interval if(!interpolate) { #' Agresti-Coull confidence interval successes <- datarank - 1 trials <- nsim z <- qnorm(1 - (1-0.95)/2) nplus <- trials + z^2 pplus <- (successes + z^2/2)/nplus sigmaplus <- sqrt(pplus * (1-pplus)/nplus) lo <- pplus - z * sigmaplus hi <- pplus + z * sigmaplus } else { #' confidence interval by delta method pSE <- apply(devs, 1, rowwise.se) z <- qnorm(1 - (1-0.95)/2) lo <- pmax(0, pvalue - z * pSE) hi <- pmin(1, pvalue + z * pSE) } #' create fv object with confidence interval p <- fv(data.frame(R=R, pest=pvalue, alpha=alpha, lo=lo, hi=hi), argu="R", ylab = quote(p(R)), valu="pest", fmla = . ~ R, desc = c("Interval endpoint R", "calculated p-value %s", "threshold for significance", "lower 95%% limit for p-value", "upper 95%% limit for p-value"), labl=c("R", "%s(R)", paste(alpha), "lo(R)", "hi(R)"), unitname = unitname, fname = "p") fvnames(p, ".") <- c("pest", "alpha", "lo", "hi") fvnames(p, ".s") <- c("lo", "hi") } return(p) } ## interpolated p-value interpol.tailprob <- function(x, q) { sigma <- bw.nrd0(x) mean(pnorm(q, mean=x, sd=sigma, lower.tail=FALSE)) } rowwise.interp.tailprob <- function(x) { interpol.tailprob(x[-1], x[1]) } ## estimated SE of p-value interpol.se <- function(x, q) { sigma <- bw.nrd0(x) z <- density(x, sigma) v <- mean(z$y * pnorm(q, mean=z$x, sd=sigma, lower.tail=FALSE)^2) * diff(range(z$x)) sqrt(v)/length(x) } rowwise.se <- function(x) { interpol.se(x[-1], x[1]) } mctestSigtraceEngine }) dg.sigtrace <- function(X, fun=Lest, ..., exponent=2, nsim=19, nsimsub=nsim-1, alternative=c("two.sided", "less", "greater"), rmin=0, leaveout=1, interpolate=FALSE, confint=TRUE, alpha=0.05, savefuns=FALSE, savepatterns=FALSE, verbose=FALSE) { alternative <- match.arg(alternative) env.here <- sys.frame(sys.nframe()) if(!missing(nsimsub) && !relatively.prime(nsim, nsimsub)) stop("nsim and nsimsub must be relatively prime") ## generate or extract simulated patterns and functions if(verbose) cat("Generating first-level data...") E <- envelope(X, fun=fun, ..., nsim=nsim, savepatterns=TRUE, savefuns=TRUE, verbose=verbose, envir.simul=env.here) ## get first level MC test significance trace if(verbose) cat("Computing significance trace...") T1 <- mctest.sigtrace(E, fun=fun, nsim=nsim, exponent=exponent, rmin=rmin, alternative=alternative, leaveout=leaveout, interpolate=interpolate, confint=FALSE, verbose=verbose, ...) R <- T1$R phat <- T1$pest ## second level traces if(verbose) cat(" Done.\nGenerating second-level data... [silently] ..") Pat <- attr(E, "simpatterns") T2list <- lapply(Pat, mctest.sigtrace, fun=fun, nsim=nsimsub, exponent=exponent, rmin=rmin, alternative=alternative, leaveout=leaveout, interpolate=interpolate, confint=FALSE, verbose=FALSE, ...) phati <- sapply(T2list, getElement, name="pest") ## Dao-Genton p-value if(verbose) cat(" Computing significance trace...") result <- mctestSigtraceEngine(R, -phat, -phati, interpolate=FALSE, confint=confint, exponent=exponent, alpha=alpha, unitname=unitname(X)) if(verbose) cat(" Done.\n") if(savefuns || savepatterns) result <- hasenvelope(result, E) return(result) } spatstat/R/lurking.R0000644000176200001440000004707213415511724014131 0ustar liggesusers# Lurking variable plot for arbitrary covariate. # # # $Revision: 1.64 $ $Date: 2019/01/10 00:21:28 $ # lurking <- function(object, ...) { UseMethod("lurking") } lurking.ppp <- lurking.ppm <- local({ cumsumna <- function(x) { cumsum(ifelse(is.na(x), 0, x)) } ## main function Lurking.ppm <- function(object, covariate, type="eem", cumulative=TRUE, ..., plot.it=TRUE, plot.sd=is.poisson(object), clipwindow=default.clipwindow(object), rv = NULL, envelope=FALSE, nsim=39, nrank=1, typename, covname, oldstyle=FALSE, check=TRUE, verbose=TRUE, nx=128, splineargs=list(spar=0.5), internal=NULL) { cl <- match.call() ## validate object if(is.ppp(object)) { X <- object object <- ppm(X ~1, forcefit=TRUE) dont.complain.about(X) } else verifyclass(object, "ppm") ## default name for covariate if(missing(covname) || is.null(covname)) { co <- cl$covariate covname <- if(is.name(co)) as.character(co) else if(is.expression(co)) format(co[[1]]) else NULL } ## handle secret data internal <- resolve.defaults(internal, list(saveworking=FALSE, Fisher=NULL, covrange=NULL)) saveworking <- internal$saveworking Fisher <- internal$Fisher # possibly from a larger model covrange <- internal$covrange if(!identical(envelope, FALSE)) { ## compute simulation envelope Xsim <- NULL if(!identical(envelope, TRUE)) { ## some kind of object Y <- envelope if(is.list(Y) && all(sapply(Y, is.ppp))) { Xsim <- Y envelope <- TRUE } else if(inherits(Y, "envelope")) { Xsim <- attr(Y, "simpatterns") if(is.null(Xsim)) stop("envelope does not contain simulated point patterns") envelope <- TRUE } else stop("Unrecognised format of argument: envelope") nXsim <- length(Xsim) if(missing(nsim) && (nXsim < nsim)) { warning(paste("Only", nXsim, "simulated patterns available")) nsim <- nXsim } } } ## may need to refit the model if(plot.sd && is.null(getglmfit(object))) object <- update(object, forcefit=TRUE, use.internal=TRUE) ## match type argument type <- pickoption("type", type, c(eem="eem", raw="raw", inverse="inverse", pearson="pearson", Pearson="pearson")) if(missing(typename)) typename <- switch(type, eem="exponential energy weights", raw="raw residuals", inverse="inverse-lambda residuals", pearson="Pearson residuals") ## extract spatial locations Q <- quad.ppm(object) datapoints <- Q$data quadpoints <- union.quad(Q) Z <- is.data(Q) wts <- w.quad(Q) ## subset of quadrature points used to fit model subQset <- getglmsubset(object) if(is.null(subQset)) subQset <- rep.int(TRUE, n.quad(Q)) ################################################################# ## compute the covariate if(is.im(covariate)) { covvalues <- covariate[quadpoints, drop=FALSE] covrange <- covrange %orifnull% range(covariate, finite=TRUE) } else if(is.vector(covariate) && is.numeric(covariate)) { covvalues <- covariate covrange <- covrange %orifnull% range(covariate, finite=TRUE) if(length(covvalues) != quadpoints$n) stop("Length of covariate vector,", length(covvalues), "!=", quadpoints$n, ", number of quadrature points") } else if(is.expression(covariate)) { ## Expression involving covariates in the model glmdata <- getglmdata(object) ## Fix special cases if(is.null(glmdata)) { ## default glmdata <- data.frame(x=quadpoints$x, y=quadpoints$y) if(is.marked(quadpoints)) glmdata$marks <- marks(quadpoints) } ## ensure x and y are in data frame if(!all(c("x","y") %in% names(glmdata))) { glmdata$x <- quadpoints$x glmdata$y <- quadpoints$y } if(!is.null(object$covariates)) { ## Expression may involve an external covariate that's not used in model neednames <- all.vars(covariate) if(!all(neednames %in% colnames(glmdata))) { moredata <- mpl.get.covariates(object$covariates, quadpoints, covfunargs=object$covfunargs) use <- !(names(moredata) %in% colnames(glmdata)) glmdata <- cbind(glmdata, moredata[,use,drop=FALSE]) } } ## Evaluate expression sp <- parent.frame() covvalues <- eval(covariate, envir= glmdata, enclos=sp) covrange <- covrange %orifnull% range(covvalues, finite=TRUE) if(!is.numeric(covvalues)) stop("The evaluated covariate is not numeric") } else stop(paste("The", sQuote("covariate"), "should be either", "a pixel image, an expression or a numeric vector")) ################################################################# ## Secret exit if(identical(internal$getrange, TRUE)) return(covrange) ################################################################# ## Validate covariate values nbg <- is.na(covvalues) if(any(offending <- nbg & subQset)) { if(is.im(covariate)) warning(paste(sum(offending), "out of", length(offending), "quadrature points discarded because", ngettext(sum(offending), "it lies", "they lie"), "outside the domain of the covariate image")) else warning(paste(sum(offending), "out of", length(offending), "covariate values discarded because", ngettext(sum(offending), "it is NA", "they are NA"))) } ## remove points ok <- !nbg & subQset Q <- Q[ok] covvalues <- covvalues[ok] quadpoints <- quadpoints[ok] ## adjust Z <- is.data(Q) wts <- w.quad(Q) if(any(is.infinite(covvalues) | is.nan(covvalues))) stop("covariate contains Inf or NaN values") ## Quadrature points marked by covariate value covq <- quadpoints %mark% as.numeric(covvalues) ################################################################ ## Residuals/marks attached to appropriate locations. ## Stoyan-Grabarnik weights are attached to the data points only. ## Others (residuals) are attached to all quadrature points. resvalues <- if(!is.null(rv)) rv else if(type=="eem") eem(object, check=check) else residuals.ppm(object, type=type, check=check) if(inherits(resvalues, "msr")) { ## signed or vector-valued measure resvalues <- resvalues$val if(ncol(as.matrix(resvalues)) > 1) stop("Not implemented for vector measures; use [.msr to split into separate components") } if(type != "eem") resvalues <- resvalues[ok] res <- (if(type == "eem") datapoints else quadpoints) %mark% as.numeric(resvalues) ## ... and the same locations marked by the covariate covres <- if(type == "eem") covq[Z] else covq ## NAMES OF THINGS ## name of the covariate if(is.null(covname)) covname <- if(is.expression(covariate)) covariate else "covariate" ## type of residual/mark if(missing(typename)) typename <- if(!is.null(rv)) "rv" else "" ####################################################################### ## START ANALYSIS ## Clip to subwindow if needed clip <- (!is.poisson.ppm(object) || !missing(clipwindow)) && !is.null(clipwindow) if(clip) { covq <- covq[clipwindow] res <- res[clipwindow] covres <- covres[clipwindow] clipquad <- inside.owin(quadpoints$x, quadpoints$y, clipwindow) wts <- wts[ clipquad ] } ## ----------------------------------------------------------------------- ## (A) EMPIRICAL CUMULATIVE FUNCTION ## based on data points if type="eem", otherwise on quadrature points ## Reorder the data/quad points in order of increasing covariate value ## and then compute the cumulative sum of their residuals/marks markscovres <- marks(covres) o <- fave.order(markscovres) covsort <- markscovres[o] cummark <- cumsumna(marks(res)[o]) if(anyDuplicated(covsort)) { right <- !duplicated(covsort, fromLast=TRUE) covsort <- covsort[right] cummark <- cummark[right] } ## we'll plot(covsort, cummark) in the cumulative case ## (B) THEORETICAL MEAN CUMULATIVE FUNCTION ## based on all quadrature points ## Range of covariate values covqmarks <- marks(covq) covrange <- covrange %orifnull% range(covqmarks, na.rm=TRUE) if(diff(covrange) > 0) { ## Suitable breakpoints cvalues <- seq(from=covrange[1L], to=covrange[2L], length.out=nx) csmall <- cvalues[1L] - diff(cvalues[1:2]) cbreaks <- c(csmall, cvalues) ## cumulative area as function of covariate values covclass <- cut(covqmarks, breaks=cbreaks) increm <- tapply(wts, covclass, sum) cumarea <- cumsumna(increm) } else { ## Covariate is constant cvalues <- covrange[1L] covclass <- factor(rep(1, length(wts))) cumarea <- increm <- sum(wts) } ## compute theoretical mean (when model is true) mean0 <- if(type == "eem") cumarea else numeric(length(cumarea)) ## we'll plot(cvalues, mean0) in the cumulative case ## (A'),(B') DERIVATIVES OF (A) AND (B) ## Required if cumulative=FALSE ## Estimated by spline smoothing (with x values jittered) if(!cumulative) { ## fit smoothing spline to (A) ss <- do.call(smooth.spline, append(list(covsort, cummark), splineargs) ) ## estimate derivative of (A) derivmark <- predict(ss, covsort, deriv=1)$y ## similarly for (B) ss <- do.call(smooth.spline, append(list(cvalues, mean0), splineargs) ) derivmean <- predict(ss, cvalues, deriv=1)$y } ## ----------------------------------------------------------------------- ## Store what will be plotted if(cumulative) { empirical <- data.frame(covariate=covsort, value=cummark) theoretical <- data.frame(covariate=cvalues, mean=mean0) } else { empirical <- data.frame(covariate=covsort, value=derivmark) theoretical <- data.frame(covariate=cvalues, mean=derivmean) } ## ------------------------------------------------------------------------ ## (C) STANDARD DEVIATION if desired ## (currently implemented only for Poisson) ## (currently implemented only for cumulative case) if(plot.sd && !is.poisson.ppm(object)) warning(paste("standard deviation is calculated for Poisson model;", "not valid for this model")) if(plot.sd && cumulative) { ## Fitted intensity at quadrature points lambda <- fitted.ppm(object, type="trend", check=check) lambda <- lambda[ok] ## Fisher information for coefficients asymp <- vcov(object,what="internals") Fisher <- Fisher %orifnull% asymp$fisher ## Local sufficient statistic at quadrature points suff <- asymp$suff suff <- suff[ok, ,drop=FALSE] ## Clip if required if(clip) { lambda <- lambda[clipquad] suff <- suff[clipquad, , drop=FALSE] ## suff is a matrix } ## First term: integral of lambda^(2p+1) switch(type, pearson={ varI <- cumarea }, raw={ ## Compute sum of w*lambda for quadrature points in each interval dvar <- tapply(wts * lambda, covclass, sum) ## tapply() returns NA when the table is empty dvar[is.na(dvar)] <- 0 ## Cumulate varI <- cumsum(dvar) }, inverse=, ## same as eem eem={ ## Compute sum of w/lambda for quadrature points in each interval dvar <- tapply(wts / lambda, covclass, sum) ## tapply() returns NA when the table is empty dvar[is.na(dvar)] <- 0 ## Cumulate varI <- cumsum(dvar) }) ## variance-covariance matrix of coefficients V <- try(solve(Fisher), silent=TRUE) if(inherits(V, "try-error")) { warning("Fisher information is singular; reverting to oldstyle=TRUE") oldstyle <- TRUE } if(any(dim(V) != ncol(suff))) { #' drop rows and columns nama <- colnames(suff) V <- V[nama, nama, drop=FALSE] } working <- NULL ## Second term: B' V B if(oldstyle) { varII <- 0 if(saveworking) working <- data.frame(varI=varI) } else { ## lamp = lambda^(p + 1) lamp <- switch(type, raw = lambda, pearson = sqrt(lambda), inverse =, eem = as.integer(lambda > 0)) ## Compute sum of w * lamp * suff for quad points in intervals Bcontrib <- as.vector(wts * lamp) * suff dB <- matrix(, nrow=length(cumarea), ncol=ncol(Bcontrib), dimnames=list(NULL, colnames(suff))) for(j in seq_len(ncol(dB))) dB[,j] <- tapply(Bcontrib[,j], covclass, sum, na.rm=TRUE) ## tapply() returns NA when the table is empty dB[is.na(dB)] <- 0 ## Cumulate columns B <- apply(dB, 2, cumsum) if(!is.matrix(B)) B <- matrix(B, nrow=1) ## compute B' V B for each i varII <- quadform(B, V) ## was: varII <- diag(B %*% V %*% t(B)) if(saveworking) working <- cbind(data.frame(varI=varI, varII=varII), as.data.frame(B)) } ## ## variance of residuals varR <- varI - varII ## trap numerical errors nbg <- (varR < 0) if(any(nbg)) { ran <- range(varR) varR[nbg] <- 0 relerr <- abs(ran[1L]/ran[2L]) nerr <- sum(nbg) if(relerr > 1e-6) { warning(paste(nerr, "negative", ngettext(nerr, "value (", "values (min="), signif(ran[1L], 4), ")", "of residual variance reset to zero", "(out of", length(varR), "values)")) } } theoretical$sd <- sqrt(varR) } ## if(envelope) { ## compute envelopes by simulation cl$plot.it <- FALSE cl$envelope <- FALSE cl$rv <- NULL if(is.null(Xsim)) Xsim <- simulate(object, nsim=nsim, progress=verbose) values <- NULL if(verbose) { cat("Processing.. ") state <- list() } for(i in seq_len(nsim)) { cl$object <- update(object, Xsim[[i]]) result.i <- eval(cl, parent.frame()) ## interpolate empirical values onto common sequence f.i <- with(result.i$empirical, approxfun(covariate, value, rule=2)) val.i <- f.i(theoretical$covariate) values <- cbind(values, val.i) if(verbose) state <- progressreport(i, nsim, state=state) } if(verbose) cat("Done.\n") hilo <- if(nrank == 1) apply(values, 1, range) else apply(values, 1, orderstats, k=c(nrank, nsim-nrank+1)) theoretical$upper <- hilo[1L,] theoretical$lower <- hilo[2L,] } ## ---------------- RETURN COORDINATES ---------------------------- stuff <- list(empirical=empirical, theoretical=theoretical) attr(stuff, "info") <- list(typename=typename, cumulative=cumulative, covrange=covrange, covname=covname, oldstyle=oldstyle) if(saveworking) attr(stuff, "working") <- working class(stuff) <- "lurk" ## --------------- PLOT THEM ---------------------------------- if(plot.it) { plot(stuff, ...) return(invisible(stuff)) } else { return(stuff) } } Lurking.ppm }) # plot a lurk object plot.lurk <- function(x, ..., shade="grey") { xplus <- append(x, attr(x, "info")) with(xplus, { ## work out plot range mr <- range(0, empirical$value, theoretical$mean, na.rm=TRUE) if(!is.null(theoretical$sd)) mr <- range(mr, theoretical$mean + 2 * theoretical$sd, theoretical$mean - 2 * theoretical$sd, na.rm=TRUE) if(!is.null(theoretical$upper)) mr <- range(mr, theoretical$upper, theoretical$lower, na.rm=TRUE) ## start plot vname <- paste(if(cumulative)"cumulative" else "marginal", typename) do.call(plot, resolve.defaults( list(covrange, mr), list(type="n"), list(...), list(xlab=covname, ylab=vname))) ## Envelopes if(!is.null(theoretical$upper)) { Upper <- theoretical$upper Lower <- theoretical$lower } else if(!is.null(theoretical$sd)) { Upper <- with(theoretical, mean+2*sd) Lower <- with(theoretical, mean-2*sd) } else Upper <- Lower <- NULL if(!is.null(Upper) && !is.null(Lower)) { xx <- theoretical$covariate if(!is.null(shade)) { ## shaded envelope region shadecol <- if(is.colour(shade)) shade else "grey" xx <- c(xx, rev(xx)) yy <- c(Upper, rev(Lower)) do.call.matched(polygon, resolve.defaults(list(x=xx, y=yy), list(...), list(border=shadecol, col=shadecol))) } else { do.call(lines, resolve.defaults( list(x = xx, y=Upper), list(...), list(lty=3))) do.call(lines, resolve.defaults( list(x = xx, y = Lower), list(...), list(lty=3))) } } ## Empirical lines(value ~ covariate, empirical, ...) ## Theoretical mean do.call(lines, resolve.defaults( list(mean ~ covariate, theoretical), list(...), list(lty=2))) }) return(invisible(NULL)) } #' print a lurk object print.lurk <- function(x, ...) { splat("Lurking variable plot (object of class 'lurk')") info <- attr(x, "info") with(info, { splat("Residual type: ", typename) splat("Covariate on horizontal axis: ", covname) splat("Range of covariate values: ", prange(covrange)) splat(if(cumulative) "Cumulative" else "Non-cumulative", "plot") }) has.bands <- !is.null(x$theoretical$upper) has.sd <- !is.null(x$theoretical$sd) if(!has.bands && !has.sd) { splat("No confidence bands computed") } else { splat("Includes", if(has.sd) "standard deviation for" else NULL, "confidence bands") if(!is.null(info$oldstyle)) splat("Variance calculation:", if(info$oldstyle) "old" else "new", "style") } return(invisible(NULL)) } spatstat/R/plot.fasp.R0000644000176200001440000001324113333543255014356 0ustar liggesusers# # plot.fasp.R # # $Revision: 1.29 $ $Date: 2016/02/11 10:17:12 $ # plot.fasp <- function(x, formule=NULL, ..., subset=NULL, title=NULL, banner=TRUE, transpose=FALSE, samex=FALSE, samey=FALSE, mar.panel=NULL, outerlabels=TRUE, cex.outerlabels=1.25, legend=FALSE) { # plot dimensions which <- x$which if(transpose) which <- t(which) nrows <- nrow(which) ncols <- ncol(which) # Determine the overall title of the plot if(banner) { if(!is.null(title)) overall <- title else if(!is.null(x$title)) overall <- x$title else { if(prod(dim(which)) > 1) overall <- "Array of diagnostic functions" else overall <- "Diagnostic function" if(is.null(x$dataname)) overall <- paste(overall,".",sep="") else overall <- paste(overall," for ",x$dataname,".",sep="") } if(length(overall) > 1) overall <- paste(overall, collapse="\n") nlines <- if(!is.character(overall)) 1 else length(unlist(strsplit(overall, "\n"))) } # If no formula is given, look for a default formula in x: defaultplot <- is.null(formule) if(defaultplot && !is.null(x$default.formula)) formule <- x$default.formula if(!is.null(formule)) { # ensure formulae are given as character strings. formule <- FormatFaspFormulae(formule, "formule") # Number of formulae should match number of functions. nf <- length(formule) nfun <- length(x$fns) if(nf == 1 && nfun > 1) formule <- rep.int(formule, nfun) else if(nf != nfun) stop(paste("Wrong number of entries in", sQuote("formule"))) } # Check on the length of the subset argument. ns <- length(subset) if(ns > 1) { if(ns != length(x$fns)) stop("Wrong number of entries in subset argument.\n") msub <- TRUE } else msub <- FALSE # compute common x, y axis limits for all plots ? xlim <- ylim <- NULL if(samex || samey) { cat("Computing limits\n") # call plot.fv to determine plot limits of each panel for(i in 1:nrows) { for(j in 1:ncols) { k <- which[i,j] if(!is.na(k)) { fun <- as.fv(x$fns[[k]]) fmla <- if(!defaultplot) formule[k] else NULL sub <- if(msub) subset[[k]] else subset lims <- plot(fun, fmla, subset=sub, limitsonly=TRUE) # update the limits if(samex) xlim <- range(xlim, lims$xlim) if(samey) ylim <- range(ylim, lims$ylim) } } } } ############################################################# # Set up the plot layout n <- nrows * ncols # panels 1..n = plot panels codes <- matrix(seq_len(n), byrow=TRUE, ncol=ncols, nrow=nrows) heights <- rep.int(1, nrows) widths <- rep.int(1, ncols) # annotation as chosen if(outerlabels) { # column headings colhead.codes <- max(codes) + (1:ncols) colhead.height <- 0.2 codes <- rbind(colhead.codes, codes) heights <- c(colhead.height, heights) # row headings rowhead.codes <- max(codes) + (1:nrows) rowhead.width <- 0.2 codes <- cbind(c(0,rowhead.codes), codes) widths <- c(rowhead.width, widths) } if(banner) { # overall banner top.code <- max(codes) + 1 top.height <- 0.1 * (1+nlines) codes <- rbind(top.code, codes) heights <- c(top.height, heights) } # declare layout layout(codes, widths=widths, heights=heights) ############################################################ # Plot the function panels # # determine annotation colNames <- colnames(which) rowNames <- rownames(which) nrc <- max(nrows, ncols) ann.def <- par("ann") && (nrc <= 3) # determine margin around each panel if(is.null(mar.panel)) mar.panel <- if(nrc > 3 && outerlabels) rep.int(1/nrc, 4) else par("mar") opa <- par(mar=mar.panel, xpd=TRUE) # # plot each function for(i in 1:nrows) { for(j in 1:ncols) { k <- which[i,j] if(is.na(k)) plot(0,0,type='n',xlim=c(0,1), ylim=c(0,1),axes=FALSE,xlab='',ylab='', ...) else { fun <- as.fv(x$fns[[k]]) fmla <- if(!defaultplot) formule[k] else NULL sub <- if(msub) subset[[k]] else subset main <- if(outerlabels) "" else if(nrows == 1) colNames[j] else if(ncols == 1) rowNames[i] else paren(paste(rowNames[i], colNames[j], sep=",")) do.call(plot, resolve.defaults(list(x=fun, fmla=fmla, subset=sub), list(...), list(xlim=xlim, ylim=ylim, main=main, legend=legend), list(ann=ann.def, axes=ann.def, frame.plot=TRUE))) } } } ############################################################ # # Annotation as selected if(outerlabels) { par(mar=rep.int(0,4), xpd=TRUE) # Plot the column headers for(j in 1:ncols) { plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) text(0,0,colNames[j], cex=cex.outerlabels) } # Plot the row labels for(i in 1:nrows) { plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) text(0,0,rowNames[i], srt=90, cex=cex.outerlabels) } } if(banner) { par(mar=rep.int(0,4), xpd=TRUE) # plot the banner plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) cex <- resolve.defaults(list(...), list(cex.title=2))$cex.title text(0,0, overall, cex=cex) } # revert layout(1) par(opa) return(invisible(NULL)) } spatstat/R/morisita.R0000644000176200001440000000237113333543255014301 0ustar liggesusers# # morisita.R # # $Revision: 1.2 $ $Date: 2016/02/11 10:17:12 $ # miplot <- function(X, ...) { Xname <- short.deparse(substitute(X)) X <- as.ppp(X) W <- X$window N <- X$n if(W$type != "rectangle") stop("Window of X is not a rectangle - Morisita index undefined") a <- min(diff(W$xrange), diff(W$yrange)) maxnquad <- floor(a/mean(nndist(X))) if(maxnquad <= 1) stop("Not enough points for a Morisita plot") mindex <- numeric(maxnquad) for(nquad in 1:maxnquad) { qq <- quadratcount(X, nquad, nquad) tt <- as.vector(as.table(qq)) mindex[nquad] <- length(tt) * sum(tt * (tt-1))/(N*(N-1)) } quadsize <- diameter(W)/(1:maxnquad) ok <- (quadsize <= a) quadsize <- quadsize[ok] mindex <- mindex[ok] unitinfo <- summary(unitname(W))$axis do.call(plot.default, resolve.defaults(list(quadsize, mindex), list(...), list(xlim=c(0,max(quadsize)), ylim=c(0,max(1, mindex)), xlab=paste("Diameter of quadrat", unitinfo), ylab="Morisita index", main=paste("Morisita plot for", Xname)))) abline(h=1, lty=2) return(invisible(NULL)) } spatstat/R/colourtables.R0000644000176200001440000004515713573571003015157 0ustar liggesusers# # colourtables.R # # support for colour maps and other lookup tables # # $Revision: 1.46 $ $Date: 2019/12/10 01:08:19 $ # colourmap <- function(col, ..., range=NULL, breaks=NULL, inputs=NULL, gamma=1) { if(nargs() == 0) { ## null colour map f <- lut() } else { ## validate colour data col2hex(col) ## store without conversion f <- lut(col, ..., range=range, breaks=breaks, inputs=inputs, gamma=gamma) } class(f) <- c("colourmap", class(f)) f } lut <- function(outputs, ..., range=NULL, breaks=NULL, inputs=NULL, gamma=1) { if(nargs() == 0) { ## null lookup table f <- function(x, what="value"){NULL} class(f) <- c("lut", class(f)) attr(f, "stuff") <- list(n=0) return(f) } if(is.null(gamma)) gamma <- 1 n <- length(outputs) given <- c(!is.null(range), !is.null(breaks), !is.null(inputs)) names(given) <- nama <- c("range", "breaks", "inputs") ngiven <- sum(given) if(ngiven == 0L) stop(paste("One of the arguments", commasep(sQuote(nama), "or"), "should be given")) if(ngiven > 1L) { offending <- nama[given] stop(paste("The arguments", commasep(sQuote(offending)), "are incompatible")) } if(!is.null(inputs)) { #' discrete set of input values mapped to output values if(n == 1L) { #' constant output n <- length(inputs) outputs <- rep(outputs, n) } else stopifnot(length(inputs) == length(outputs)) stuff <- list(n=n, discrete=TRUE, inputs=inputs, outputs=outputs) f <- function(x, what="value") { m <- match(x, stuff$inputs) if(what == "index") return(m) cout <- stuff$outputs[m] return(cout) } } else { #' range of numbers, or date/time interval, mapped to colours #' determine type of domain timeclasses <- c("Date", "POSIXt") is.time <- inherits(range, timeclasses) || inherits(breaks, timeclasses) #' determine breaks if(is.null(breaks)) { breaks <- gammabreaks(range, n + 1L, gamma) gamma.used <- gamma } else { stopifnot(length(breaks) >= 2) if(length(outputs) == 1L) { n <- length(breaks) - 1L outputs <- rep(outputs, n) } else stopifnot(length(breaks) == length(outputs) + 1L) if(!all(diff(breaks) > 0)) stop("breaks must be increasing") gamma.used <- NULL } stuff <- list(n=n, discrete=FALSE, breaks=breaks, outputs=outputs, gamma=gamma.used) #' use appropriate function if(is.time) { f <- function(x, what="value") { x <- as.vector(as.numeric(x)) z <- findInterval(x, stuff$breaks, rightmost.closed=TRUE) if(what == "index") return(z) cout <- stuff$outputs[z] return(cout) } } else { f <- function(x, what="value") { stopifnot(is.numeric(x)) x <- as.vector(x) z <- findInterval(x, stuff$breaks, rightmost.closed=TRUE) if(what == "index") return(z) cout <- stuff$outputs[z] return(cout) } } } attr(f, "stuff") <- stuff class(f) <- c("lut", class(f)) f } print.lut <- function(x, ...) { if(inherits(x, "colourmap")) { tablename <- "Colour map" outputname <- "colour" } else { tablename <- "Lookup table" outputname <- "output" } stuff <- attr(x, "stuff") n <- stuff$n if(n == 0) { ## Null map cat(paste("Null", tablename, "\n")) return(invisible(NULL)) } if(stuff$discrete) { cat(paste(tablename, "for discrete set of input values\n")) out <- data.frame(input=stuff$inputs, output=stuff$outputs) } else { b <- stuff$breaks cat(paste(tablename, "for the range", prange(b[c(1L,n+1L)]), "\n")) leftend <- rep("[", n) rightend <- c(rep(")", n-1), "]") inames <- paste(leftend, b[-(n+1L)], ", ", b[-1L], rightend, sep="") out <- data.frame(interval=inames, output=stuff$outputs) } colnames(out)[2L] <- outputname print(out) if(!is.null(gamma <- stuff$gamma) && gamma != 1) cat(paste("Generated using gamma =", gamma, "\n")) invisible(NULL) } print.colourmap <- function(x, ...) { NextMethod("print") } summary.lut <- function(object, ...) { s <- attr(object, "stuff") if(inherits(object, "colourmap")) { s$tablename <- "Colour map" s$outputname <- "colour" } else { s$tablename <- "Lookup table" s$outputname <- "output" } class(s) <- "summary.lut" return(s) } print.summary.lut <- function(x, ...) { n <- x$n if(n == 0) { cat(paste("Null", x$tablename, "\n")) return(invisible(NULL)) } if(x$discrete) { cat(paste(x$tablename, "for discrete set of input values\n")) out <- data.frame(input=x$inputs, output=x$outputs) } else { b <- x$breaks cat(paste(x$tablename, "for the range", prange(b[c(1L,n+1L)]), "\n")) leftend <- rep("[", n) rightend <- c(rep(")", n-1L), "]") inames <- paste(leftend, b[-(n+1L)], ", ", b[-1L], rightend, sep="") out <- data.frame(interval=inames, output=x$outputs) } colnames(out)[2L] <- x$outputname print(out) } plot.colourmap <- local({ # recognised additional arguments to image.default() and axis() imageparams <- c("main", "asp", "sub", "axes", "ann", "cex", "font", "cex.axis", "cex.lab", "cex.main", "cex.sub", "col.axis", "col.lab", "col.main", "col.sub", "font.axis", "font.lab", "font.main", "font.sub") axisparams <- c("cex", "cex.axis", "cex.lab", "col.axis", "col.lab", "font.axis", "font.lab", "las", "mgp", "xaxp", "yaxp", "tck", "tcl", "xpd") linmap <- function(x, from, to) { dFrom <- as.numeric(diff(from)) dTo <- as.numeric(diff(to)) b <- dTo/dFrom if(is.nan(b)) b <- 0 if(!is.finite(b)) stop("Internal error: Cannot map zero width interval") to[1L] + b * (x - from[1L]) } ensurenumeric <- function(x) { if(is.numeric(x)) x else as.numeric(x) } # rules to determine the ribbon dimensions when one dimension is given widthrule <- function(heightrange, separate, n, gap) { dh <- diff(heightrange) if(separate || dh == 0) 1 else dh/10 } heightrule <- function(widthrange, separate, n, gap) { dw <- diff(widthrange) if(dw == 0) 1 else (dw * (if(separate) (n + (n-1)*gap) else 10)) } plot.colourmap <- function(x, ..., main, xlim=NULL, ylim=NULL, vertical=FALSE, axis=TRUE, labelmap=NULL, gap=0.25, add=FALSE, increasing=NULL) { if(missing(main)) main <- short.deparse(substitute(x)) stuff <- attr(x, "stuff") col <- stuff$outputs n <- stuff$n if(n == 0) { ## Null map return(invisible(NULL)) } discrete <- stuff$discrete if(discrete) { check.1.real(gap, "In plot.colourmap") explain.ifnot(gap >= 0, "In plot.colourmap") } separate <- discrete && (gap > 0) if(is.null(labelmap)) { labelmap <- function(x) x } else if(is.numeric(labelmap) && length(labelmap) == 1L && !discrete) { labscal <- labelmap labelmap <- function(x) { x * labscal } } else stopifnot(is.function(labelmap)) if(is.null(increasing)) increasing <- !(discrete && vertical) reverse <- !increasing #' determine pixel entries 'v' and colour map breakpoints 'bks' #' to be passed to 'image.default' trivial <- FALSE if(!discrete) { # real numbers: continuous ribbon bks <- stuff$breaks rr <- range(bks) trivial <- (diff(rr) == 0) v <- if(trivial) rr[1] else seq(from=rr[1L], to=rr[2L], length.out=max(n+1L, 1024)) } else if(!separate) { # discrete values: blocks of colour, run together v <- (1:n) - 0.5 bks <- 0:n rr <- c(0,n) } else { # discrete values: separate blocks of colour vleft <- (1+gap) * (0:(n-1L)) vright <- vleft + 1 v <- vleft + 0.5 rr <- c(0, n + (n-1)*gap) } # determine position of ribbon or blocks of colour if(is.null(xlim) && is.null(ylim)) { u <- widthrule(rr, separate, n, gap) if(!vertical) { xlim <- rr ylim <- c(0,u) } else { xlim <- c(0,u) ylim <- rr } } else if(is.null(ylim)) { if(!vertical) ylim <- c(0, widthrule(xlim, separate, n, gap)) else ylim <- c(0, heightrule(xlim, separate, n, gap)) } else if(is.null(xlim)) { if(!vertical) xlim <- c(0, heightrule(ylim, separate, n, gap)) else xlim <- c(0, widthrule(ylim, separate, n, gap)) } # .......... initialise plot ............................... if(!add) do.call.matched(plot.default, resolve.defaults(list(x=xlim, y=ylim, type="n", main=main, axes=FALSE, xlab="", ylab="", asp=1.0), list(...))) if(separate) { # ................ plot separate blocks of colour ................. if(reverse) col <- rev(col) if(!vertical) { # horizontal arrangement of blocks xleft <- linmap(vleft, rr, xlim) xright <- linmap(vright, rr, xlim) y <- ylim z <- matrix(1, 1L, 1L) for(i in 1:n) { x <- c(xleft[i], xright[i]) do.call.matched(image.default, resolve.defaults(list(x=ensurenumeric(x), y=ensurenumeric(y), z=z, add=TRUE), list(...), list(col=col[i])), extrargs=imageparams) } } else { # vertical arrangement of blocks x <- xlim ylow <- linmap(vleft, rr, ylim) yupp <- linmap(vright, rr, ylim) z <- matrix(1, 1L, 1L) for(i in 1:n) { y <- c(ylow[i], yupp[i]) do.call.matched(image.default, resolve.defaults(list(x=ensurenumeric(x), y=ensurenumeric(y), z=z, add=TRUE), list(...), list(col=col[i])), extrargs=imageparams) } } } else { # ................... plot ribbon image ............................. if(!vertical) { # horizontal colour ribbon x <- linmap(v, rr, xlim) y <- ylim z <- matrix(v, ncol=1L) } else { # vertical colour ribbon y <- linmap(v, rr, ylim) z <- matrix(v, nrow=1L) x <- xlim } #' deal with Date or integer values x <- ensurenumeric(x) if(!trivial) { if(any(diff(x) == 0)) x <- seq(from=x[1L], to=x[length(x)], length.out=length(x)) y <- ensurenumeric(y) if(any(diff(y) == 0)) y <- seq(from=y[1L], to=y[length(y)], length.out=length(y)) bks <- ensurenumeric(bks) if(any(diff(bks) <= 0)) { ok <- (diff(bks) > 0) bks <- bks[ok] col <- col[ok] } } if(reverse) col <- rev(col) do.call.matched(image.default, resolve.defaults(list(x=x, y=y, z=z, add=TRUE), list(...), list(breaks=ensurenumeric(bks), col=col)), extrargs=imageparams) } if(axis) { # ................. draw annotation .................. if(!vertical) { # add horizontal axis/annotation if(discrete) { la <- paste(labelmap(stuff$inputs)) at <- linmap(v, rr, xlim) } else { la <- prettyinside(rr) at <- linmap(la, rr, xlim) la <- labelmap(la) } if(reverse) at <- rev(at) # default axis position is below the ribbon (side=1) sidecode <- resolve.1.default("side", list(...), list(side=1L)) if(!(sidecode %in% c(1L,3L))) warning(paste("side =", sidecode, "is not consistent with horizontal orientation")) pos <- c(ylim[1L], xlim[1L], ylim[2L], xlim[2L])[sidecode] # don't draw axis lines if plotting separate blocks lwd0 <- if(separate) 0 else 1 # draw axis do.call.matched(graphics::axis, resolve.defaults(list(...), list(side = 1L, pos = pos, at = ensurenumeric(at)), list(labels=la, lwd=lwd0)), extrargs=axisparams) } else { # add vertical axis if(discrete) { la <- paste(labelmap(stuff$inputs)) at <- linmap(v, rr, ylim) } else { la <- prettyinside(rr) at <- linmap(la, rr, ylim) la <- labelmap(la) } if(reverse) at <- rev(at) # default axis position is to the right of ribbon (side=4) sidecode <- resolve.1.default("side", list(...), list(side=4)) if(!(sidecode %in% c(2L,4L))) warning(paste("side =", sidecode, "is not consistent with vertical orientation")) pos <- c(ylim[1L], xlim[1L], ylim[2L], xlim[2L])[sidecode] # don't draw axis lines if plotting separate blocks lwd0 <- if(separate) 0 else 1 # draw labels horizontally if plotting separate blocks las0 <- if(separate) 1 else 0 # draw axis do.call.matched(graphics::axis, resolve.defaults(list(...), list(side=4, pos=pos, at=ensurenumeric(at)), list(labels=la, lwd=lwd0, las=las0)), extrargs=axisparams) } } invisible(NULL) } plot.colourmap }) # Interpolate a colourmap or lookup table defined on real numbers interp.colourmap <- function(m, n=512) { if(!inherits(m, "colourmap")) stop("m should be a colourmap") st <- attr(m, "stuff") if(st$discrete) { # discrete set of input values mapped to colours xknots <- st$inputs # Ensure the inputs are real numbers if(!is.numeric(xknots)) stop("Cannot interpolate: inputs are not numerical values") } else { # interval of real line, chopped into intervals, mapped to colours # Find midpoints of intervals bks <- st$breaks nb <- length(bks) xknots <- (bks[-1L] + bks[-nb])/2 } # corresponding colours in hsv coordinates yknots.hsv <- rgb2hsva(col2rgb(st$outputs, alpha=TRUE)) # transform 'hue' from polar to cartesian coordinate # divide domain into n equal intervals xrange <- range(xknots) xbreaks <- seq(xrange[1L], xrange[2L], length=n+1L) xx <- (xbreaks[-1L] + xbreaks[-(n+1L)])/2 # interpolate saturation and value in hsv coordinates yy.sat <- approx(x=xknots, y=yknots.hsv["s", ], xout=xx)$y yy.val <- approx(x=xknots, y=yknots.hsv["v", ], xout=xx)$y # interpolate hue by first transforming polar to cartesian coordinate yknots.hue <- 2 * pi * yknots.hsv["h", ] yy.huex <- approx(x=xknots, y=cos(yknots.hue), xout=xx)$y yy.huey <- approx(x=xknots, y=sin(yknots.hue), xout=xx)$y yy.hue <- (atan2(yy.huey, yy.huex)/(2 * pi)) %% 1 # handle transparency yknots.alpha <- yknots.hsv["alpha", ] if(all(yknots.alpha == 1)) { ## opaque colours: form using hue, sat, val yy <- hsv(yy.hue, yy.sat, yy.val) } else { ## transparent colours: interpolate alpha yy.alpha <- approx(x=xknots, y=yknots.alpha, xout=xx)$y ## form colours using hue, sat, val, alpha yy <- hsv(yy.hue, yy.sat, yy.val, yy.alpha) } # done f <- colourmap(yy, breaks=xbreaks) return(f) } interp.colours <- function(x, length.out=512) { y <- colourmap(x, range=c(0,1)) z <- interp.colourmap(y, length.out) oo <- attr(z, "stuff")$outputs return(oo) } tweak.colourmap <- local({ is.hex <- function(z) { is.character(z) && all(nchar(z, keepNA=TRUE) %in% c(7L,9L)) && identical(substr(z, 1L, 7L), substr(col2hex(z), 1L, 7L)) } tweak.colourmap <- function(m, col, ..., inputs=NULL, range=NULL) { if(!inherits(m, "colourmap")) stop("m should be a colourmap") if(is.null(inputs) && is.null(range)) stop("Specify either inputs or range") if(!is.null(inputs) && !is.null(range)) stop("Do not specify both inputs and range") ## determine indices of colours to be changed if(!is.null(inputs)) { ix <- m(inputs, what="index") } else { if(!(is.numeric(range) && length(range) == 2 && diff(range) > 0)) stop("range should be a numeric vector of length 2 giving (min, max)") if(length(col2hex(col)) != 1L) stop("When range is given, col should be a single colour value") ixr <- m(range, what="index") ix <- (ixr[1L]):(ixr[2L]) } ## reassign colours st <- attr(m, "stuff") outputs <- st$outputs result.hex <- FALSE if(is.hex(outputs)) { ## convert replacement data to hex col <- col2hex(col) result.hex <- TRUE } else if(is.hex(col)) { ## convert existing data to hex outputs <- col2hex(outputs) result.hex <- TRUE } else if(!(is.character(outputs) && is.character(col))) { ## unrecognised format - convert both to hex outputs <- col2hex(outputs) col <- col2hex(col) result.hex <- TRUE } if(result.hex) { ## hex codes may be 7 or 9 characters outlen <- nchar(outputs) collen <- nchar(col) if(length(unique(c(outlen, collen))) > 1L) { ## convert all to 9 characters if(any(bad <- (outlen == 7))) outputs[bad] <- paste0(outputs[bad], "FF") if(any(bad <- (collen == 7))) col[bad] <- paste0(col[bad], "FF") } } ## Finally, replace outputs[ix] <- col st$outputs <- outputs attr(m, "stuff") <- st assign("stuff", st, envir=environment(m)) return(m) } tweak.colourmap }) colouroutputs <- function(x) { stopifnot(inherits(x, "colourmap")) attr(x, "stuff")$outputs } "colouroutputs<-" <- function(x, value) { stopifnot(inherits(x, "colourmap")) st <- attr(x, "stuff") col2hex(value) # validates colours st$outputs[] <- value attr(x, "stuff") <- st assign("stuff", st, envir=environment(x)) return(x) } spatstat/R/evalcovar.R0000644000176200001440000004240313623714544014437 0ustar liggesusers#' #' evalcovar.R #' #' evaluate covariate values at data points and at pixels #' #' $Revision: 1.33 $ $Date: 2020/02/21 08:45:16 $ #' evalCovar <- function(model, covariate, ...) { UseMethod("evalCovar") } evalCovar.ppm <- local({ evalCovar.ppm <- function(model, covariate, ..., lambdatype=c("cif", "trend", "intensity"), dimyx=NULL, eps=NULL, interpolate=TRUE, jitter=TRUE, modelname=NULL, covname=NULL, dataname=NULL, subset=NULL) { lambdatype <- match.arg(lambdatype) #' evaluate covariate values at data points and at pixels ispois <- is.poisson(model) csr <- ispois && is.stationary(model) #' determine names if(is.null(modelname)) modelname <- if(csr) "CSR" else short.deparse(substitute(model)) if(is.null(covname)) { covname <- singlestring(short.deparse(substitute(covariate))) if(is.character(covariate)) covname <- covariate } if(is.null(dataname)) dataname <- model$Qname info <- list(modelname=modelname, covname=covname, dataname=dataname, csr=csr, ispois=ispois, spacename="two dimensions") X <- data.ppm(model) W <- as.owin(model) #' explicit control of pixel resolution if(!is.null(dimyx) || !is.null(eps)) W <- as.mask(W, dimyx=dimyx, eps=eps) if(!is.null(subset)) { #' restrict to subset if required X <- X[subset] W <- W[subset, drop=FALSE] } #' evaluate covariate if(is.character(covariate)) { #' One of the characters 'x' or 'y' #' Turn it into a function. ns <- length(covariate) if(ns == 0) stop("covariate is empty") if(ns > 1) stop("more than one covariate specified") covname <- covariate covariate <- switch(covariate, x=xcoordfun, y=ycoordfun, stop(paste("Unrecognised covariate", dQuote(covariate)))) } if(!is.marked(model)) { #' ................... unmarked ....................... if(is.im(covariate)) { type <- "im" if(!interpolate) { #' look up covariate values ZX <- safelookup(covariate, X) } else { #' evaluate at data points by interpolation ZX <- interp.im(covariate, X$x, X$y) #' fix boundary glitches if(any(uhoh <- is.na(ZX))) ZX[uhoh] <- safelookup(covariate, X[uhoh]) } #' covariate values for pixels inside window Z <- covariate[W, drop=FALSE] #' corresponding mask W <- as.owin(Z) } else if(is.function(covariate)) { type <- "function" #' evaluate exactly at data points ZX <- covariate(X$x, X$y) if(!all(is.finite(ZX))) warning("covariate function returned NA or Inf values") #' window W <- as.mask(W) #' covariate in window Z <- as.im(covariate, W=W) #' collapse function body to single string covname <- singlestring(covname) } else if(is.null(covariate)) { stop("The covariate is NULL", call.=FALSE) } else stop(paste("The covariate should be", "an image, a function(x,y)", "or one of the characters", sQuote("x"), "or", sQuote("y")), call.=FALSE) #' values of covariate in window Zvalues <- as.vector(Z[W, drop=TRUE]) #' corresponding fitted [conditional] intensity values lambda <- as.vector(predict(model, locations=W, type=lambdatype)[W, drop=TRUE]) #' pixel area (constant) pixelarea <- with(Z, xstep * ystep) } else { #' ................... marked ....................... if(!is.multitype(model)) stop("Only implemented for multitype models (factor marks)") marx <- marks(X, dfok=FALSE) possmarks <- levels(marx) npts <- npoints(X) #' single image: replicate if(is.im(covariate)) { covariate <- rep(list(covariate), times=length(possmarks)) names(covariate) <- as.character(possmarks) } #' if(is.list(covariate) && all(sapply(covariate, is.im))) { #' list of images type <- "im" if(length(covariate) != length(possmarks)) stop("Number of images does not match number of possible marks") #' evaluate covariate at each data point ZX <- numeric(npts) for(k in seq_along(possmarks)) { ii <- (marx == possmarks[k]) covariate.k <- covariate[[k]] if(!interpolate) { #' look up covariate values values <- safelookup(covariate, X) } else { #' interpolate values <- interp.im(covariate.k, x=X$x[ii], y=X$y[ii]) #' fix boundary glitches if(any(uhoh <- is.na(values))) values[uhoh] <- safelookup(covariate.k, X[ii][uhoh]) } ZX[ii] <- values } #' restrict covariate images to window Z <- solapply(covariate, "[", i=W, drop=FALSE) #' extract pixel locations and pixel values Zframes <- lapply(Z, as.data.frame) #' covariate values at each pixel inside window Zvalues <- unlist(lapply(Zframes, getElement, name="value")) #' pixel locations locn <- lapply(Zframes, getxy) #' tack on mark values for(k in seq_along(possmarks)) locn[[k]] <- cbind(locn[[k]], data.frame(marks=possmarks[k])) loc <- do.call(rbind, locn) #' corresponding fitted [conditional] intensity values lambda <- predict(model, locations=loc, type=lambdatype) #' pixel areas pixelarea <- rep(sapply(Z, pixarea), sapply(Z, npixdefined)) } else if(is.function(covariate)) { type <- "function" #' evaluate exactly at data points ZX <- functioncaller(x=X$x, y=X$y, m=marx, f=covariate, ...) #' functioncaller: function(x,y,m,f,...) { f(x,y,m,...) } #' same window W <- as.mask(W) #' covariate in window Z <- list() for(k in seq_along(possmarks)) Z[[k]] <- as.im(functioncaller, m=possmarks[k], f=covariate, W=W, ...) Zvalues <- unlist(lapply(Z, pixelvalues)) #' corresponding fitted [conditional] intensity values lambda <- predict(model, locations=W, type=lambdatype) lambda <- unlist(lapply(lambda, pixelvalues)) if(length(lambda) != length(Zvalues)) stop("Internal error: length(lambda) != length(Zvalues)") #' collapse function body to single string covname <- singlestring(covname) #' pixel areas pixelarea <- rep(sapply(Z, pixarea), sapply(Z, npixdefined)) } else if(is.null(covariate)) { stop("The covariate is NULL", call.=FALSE) } else stop(paste("For a multitype point process model,", "the covariate should be an image, a list of images,", "a function(x,y,m)", "or one of the characters", sQuote("x"), "or", sQuote("y")), call.=FALSE) } #' .......................................................... #' apply jittering to avoid ties if(jitter) { nX <- length(ZX) dZ <- 0.3 * quantile(diff(sortunique(c(ZX, Zvalues))), 1/min(20, nX)) ZX <- ZX + rnorm(nX, sd=dZ) Zvalues <- Zvalues + rnorm(length(Zvalues), sd=dZ) } lambdaname <- if(is.poisson(model)) "intensity" else lambdatype lambdaname <- paste("the fitted", lambdaname) check.finite(lambda, xname=lambdaname, usergiven=FALSE) check.finite(Zvalues, xname="the covariate", usergiven=TRUE) #' lambda values at data points lambdaX <- predict(model, locations=X, type=lambdatype) #' lambda image(s) lambdaimage <- predict(model, locations=W, type=lambdatype) #' wrap up values <- list(Zimage = Z, lambdaimage = lambdaimage, Zvalues = Zvalues, lambda = lambda, lambdaX = lambdaX, weights = pixelarea, ZX = ZX, type = type) return(list(values=values, info=info)) } xcoordfun <- function(x,y,m){x} ycoordfun <- function(x,y,m){y} pixarea <- function(z) { z$xstep * z$ystep } npixdefined <- function(z) { sum(!is.na(z$v)) } pixelvalues <- function(z) { as.data.frame(z)[,3L] } getxy <- function(z) { z[,c("x","y")] } functioncaller <- function(x,y,m,f,...) { nf <- length(names(formals(f))) if(nf < 2) stop("Covariate function must have at least 2 arguments") value <- if(nf == 2) f(x,y) else if(nf == 3) f(x,y,m) else f(x,y,m,...) return(value) } evalCovar.ppm }) evalCovar.lppm <- local({ evalCovar.lppm <- function(model, covariate, ..., lambdatype=c("cif", "trend", "intensity"), eps=NULL, nd=1000, interpolate=TRUE, jitter=TRUE, modelname=NULL, covname=NULL, dataname=NULL, subset=NULL) { lambdatype <- match.arg(lambdatype) #' evaluate covariate values at data points and at pixels ispois <- is.poisson(model) csr <- ispois && is.stationary(model) #' determine names if(is.null(modelname)) modelname <- if(csr) "CSR" else short.deparse(substitute(model)) if(is.null(covname)) { covname <- singlestring(short.deparse(substitute(covariate))) if(is.character(covariate)) covname <- covariate } if(is.null(dataname)) dataname <- model$Xname info <- list(modelname=modelname, covname=covname, dataname=dataname, csr=csr, ispois=ispois, spacename="linear network") #' convert character covariate to function if(is.character(covariate)) { #' One of the characters 'x' or 'y' #' Turn it into a function. ns <- length(covariate) if(ns == 0) stop("covariate is empty") if(ns > 1) stop("more than one covariate specified") covname <- covariate covariate <- switch(covariate, x=xcoordfun, y=ycoordfun, stop(paste("Unrecognised covariate", dQuote(covariate)))) } #' extract model components X <- model$X fit <- model$fit #' L <- as.linnet(X) Q <- quad.ppm(fit) #' restrict to subset if required if(!is.null(subset)) { X <- X[subset] Q <- Q[subset] } isdat <- is.data(Q) U <- union.quad(Q) wt <- w.quad(Q) #' evaluate covariate if(!is.marked(model)) { #' ................... unmarked ....................... if(is.im(covariate)) { if(is.linim(covariate)) { type <- "linim" Zimage <- covariate } else { type <- "im" Zimage <- as.linim(covariate, L) } if(!interpolate) { #' look up covariate values at quadrature points Zvalues <- safelookup(covariate, U) } else { #' evaluate at quadrature points by interpolation Zvalues <- interp.im(covariate, U$x, U$y) #' fix boundary glitches if(any(uhoh <- is.na(Zvalues))) Zvalues[uhoh] <- safelookup(covariate, U[uhoh]) } #' extract data values ZX <- Zvalues[isdat] } else if(is.function(covariate)) { type <- "function" Zimage <- as.linim(covariate, L) #' evaluate exactly at quadrature points Zvalues <- covariate(U$x, U$y) if(!all(is.finite(Zvalues))) warning("covariate function returned NA or Inf values") #' extract data values ZX <- Zvalues[isdat] #' collapse function body to single string covname <- singlestring(covname) } else if(is.null(covariate)) { stop("The covariate is NULL", call.=FALSE) } else stop(paste("The covariate should be", "an image, a function(x,y)", "or one of the characters", sQuote("x"), "or", sQuote("y")), call.=FALSE) #' corresponding fitted [conditional] intensity values lambda <- as.vector(predict(model, locations=U, type=lambdatype)) } else { #' ................... marked ....................... if(!is.multitype(model)) stop("Only implemented for multitype models (factor marks)") marx <- marks(U, dfok=FALSE) possmarks <- levels(marx) #' single image: replicate if(is.im(covariate)) { covariate <- rep(list(covariate), length(possmarks)) names(covariate) <- possmarks } #' if(is.list(covariate) && all(sapply(covariate, is.im))) { #' list of images if(length(covariate) != length(possmarks)) stop("Number of images does not match number of possible marks") #' determine type of data islinim <- sapply(covariate, is.linim) type <- if(all(islinim)) "linim" else "im" Zimage <- as.solist(covariate) Zimage[!islinim] <- lapply(Zimage[!islinim], as.linim, L=L) #' evaluate covariate at each data point by interpolation Zvalues <- numeric(npoints(U)) for(k in seq_along(possmarks)) { ii <- (marx == possmarks[k]) covariate.k <- covariate[[k]] if(!interpolate) { #' direct lookup values <- safelookup(covariate.k, U[ii]) } else { #' interpolation values <- interp.im(covariate.k, x=U$x[ii], y=U$y[ii]) #' fix boundary glitches if(any(uhoh <- is.na(values))) values[uhoh] <- safelookup(covariate.k, U[ii][uhoh]) } Zvalues[ii] <- values } #' extract data values ZX <- Zvalues[isdat] #' corresponding fitted [conditional] intensity values lambda <- predict(model, locations=U, type=lambdatype) if(length(lambda) != length(Zvalues)) stop("Internal error: length(lambda) != length(Zvalues)") } else if(is.function(covariate)) { type <- "function" #' evaluate exactly at quadrature points Zvalues <- functioncaller(x=U$x, y=U$y, m=marx, f=covariate, ...) #' functioncaller: function(x,y,m,f,...) { f(x,y,m,...) } #' extract data values ZX <- Zvalues[isdat] #' corresponding fitted [conditional] intensity values lambda <- predict(model, locations=U, type=lambdatype) if(length(lambda) != length(Zvalues)) stop("Internal error: length(lambda) != length(Zvalues)") #' images Zimage <- list() for(k in seq_along(possmarks)) Zimage[[k]] <- as.linim(functioncaller, L=L, m=possmarks[k], f=covariate) #' collapse function body to single string covname <- singlestring(covname) } else if(is.null(covariate)) { stop("The covariate is NULL", call.=FALSE) } else stop(paste("For a multitype point process model,", "the covariate should be an image, a list of images,", "a function(x,y,m)", "or one of the characters", sQuote("x"), "or", sQuote("y")), call.=FALSE) } #' .......................................................... #' apply jittering to avoid ties if(jitter) { nX <- length(ZX) dZ <- 0.3 * quantile(diff(sortunique(c(ZX, Zvalues))), 1/min(20, nX)) ZX <- ZX + rnorm(nX, sd=dZ) Zvalues <- Zvalues + rnorm(length(Zvalues), sd=dZ) } lambdaname <- if(is.poisson(model)) "intensity" else lambdatype lambdaname <- paste("the fitted", lambdaname) check.finite(lambda, xname=lambdaname, usergiven=FALSE) check.finite(Zvalues, xname="the covariate", usergiven=TRUE) #' lambda values at data points lambdaX <- predict(model, locations=X, type=lambdatype) #' lambda image(s) lambdaimage <- predict(model, type=lambdatype) #' restrict image to subset if(!is.null(subset)) { Zimage <- applySubset(Zimage, subset) lambdaimage <- applySubset(lambdaimage, subset) } #' wrap up values <- list(Zimage = Zimage, lambdaimage = lambdaimage, Zvalues = Zvalues, lambda = lambda, lambdaX = lambdaX, weights = wt, ZX = ZX, type = type) return(list(values=values, info=info)) } xcoordfun <- function(x,y,m){x} ycoordfun <- function(x,y,m){y} functioncaller <- function(x,y,m,f,...) { nf <- length(names(formals(f))) if(nf < 2) stop("Covariate function must have at least 2 arguments") value <- if(nf == 2) f(x,y) else if(nf == 3) f(x,y,m) else f(x,y,m,...) return(value) } applySubset <- function(X, subset) { if(is.im(X)) return(X[subset, drop=FALSE]) if(is.imlist(X)) return(solapply(X, "[", i=subset, drop=FALSE)) return(NULL) } evalCovar.lppm }) spatstat/R/density.lpp.R0000644000176200001440000002617213562463706014736 0ustar liggesusers#' #' density.lpp.R #' #' Method for 'density' for lpp objects #' #' Copyright (C) 2017 Greg McSwiggan and Adrian Baddeley #' density.lpp <- function(x, sigma=NULL, ..., weights=NULL, distance=c("path", "euclidean"), kernel="gaussian", continuous=TRUE, epsilon=1e-6, verbose=TRUE, debug=FALSE, savehistory=TRUE, old=FALSE) { stopifnot(inherits(x, "lpp")) distance <- match.arg(distance) if(distance == "euclidean") return(densityQuick.lpp(x, sigma, ..., kernel=kernel, weights=weights)) kernel <- match.kernel(kernel) if(continuous && (kernel == "gaussian") && !old) return(PDEdensityLPP(x, sigma, ..., weights=weights)) L <- as.linnet(x) # weights np <- npoints(x) if(is.null(weights)) { weights <- rep(1, np) } else { stopifnot(is.numeric(weights)) check.nvector(weights, np, oneok=TRUE) if(length(weights) == 1L) weights <- rep(weights, np) } # pixellate linear network Llines <- as.psp(L) linemask <- as.mask.psp(Llines, ...) lineimage <- as.im(linemask, value=0) # extract pixel centres xx <- raster.x(linemask) yy <- raster.y(linemask) mm <- linemask$m xx <- as.vector(xx[mm]) yy <- as.vector(yy[mm]) pixelcentres <- ppp(xx, yy, window=as.rectangle(linemask), check=FALSE) pixdf <- data.frame(xc=xx, yc=yy) # project pixel centres onto lines p2s <- project2segment(pixelcentres, Llines) projloc <- as.data.frame(p2s$Xproj) projmap <- as.data.frame(p2s[c("mapXY", "tp")]) projdata <- cbind(pixdf, projloc, projmap) # initialise pixel values values <- rep(0, nrow(pixdf)) # Extract local coordinates of data n <- npoints(x) coo <- coords(x) seg <- coo$seg tp <- coo$tp # lengths of network segments Llengths <- lengths.psp(Llines) # initialise stack stack <- data.frame(seg=integer(0), from=logical(0), distance=numeric(0), weight=numeric(0), generation=integer(0)) # process each data point for(i in seq_len(n)) { segi <- seg[i] tpi <- tp[i] len <- Llengths[segi] # evaluate kernel on segment containing x[i] relevant <- (projmap$mapXY == segi) values[relevant] <- values[relevant] + dkernel(len * (projmap$tp[relevant] - tpi), kernel=kernel, sd=sigma) # push the two tails onto the stack stack <- rbind(data.frame(seg = c(segi, segi), from = c(TRUE, FALSE), distance = len * c(tpi, 1-tpi), weight = rep(weights[i], 2L), generation = rep(1L, 2)), stack) } Lfrom <- L$from Lto <- L$to if(verbose) niter <- 0 if(savehistory) history <- data.frame(iter=integer(0), qlen=integer(0), totmass=numeric(0), maxmass=numeric(0)) lastgen <- resolve.1.default(list(lastgen=Inf), list(...)) sortgen <- resolve.1.default(list(sortgen=FALSE), list(...)) sortgen <- sortgen || is.finite(lastgen) ## process the stack while(nrow(stack) > 0) { if(debug) print(stack) masses <- with(stack, abs(weight) * pkernel(distance, kernel=kernel, sd=sigma, lower.tail=FALSE)) totmass <- sum(masses) maxmass <- max(masses) if(savehistory) history <- rbind(history, data.frame(iter=nrow(history)+1L, qlen=nrow(stack), totmass=totmass, maxmass=maxmass)) if(verbose) { niter <- niter + 1L cat(paste("Iteration", niter, "\tStack length", nrow(stack), "\n")) cat(paste("Total stack mass", totmass, "\tMaximum", maxmass, "\n")) } # trim tiny <- (masses < epsilon) if(any(tiny)) { if(verbose) { ntiny <- sum(tiny) cat(paste("Removing", ntiny, "tiny", ngettext(ntiny, "tail", "tails"), "\n")) } stack <- stack[!tiny, ] } if(nrow(stack) == 0) break; # pop the top of the stack H <- stack[1L, , drop=FALSE] stack <- stack[-1L, , drop=FALSE] # segment and vertex Hseg <- H$seg Hvert <- if(H$from) Lfrom[Hseg] else Lto[Hseg] Hdist <- H$distance Hgen <- H$generation ## finished processing? if(Hgen > lastgen) break; # find all segments incident to this vertex incident <- which((Lfrom == Hvert) | (Lto == Hvert)) degree <- length(incident) # exclude reflecting paths? if(!continuous) incident <- setdiff(incident, Hseg) for(J in incident) { lenJ <- Llengths[J] # determine whether Hvert is the 'to' or 'from' endpoint of segment J H.is.from <- (Lfrom[J] == Hvert) # update weight if(continuous) { Jweight <- H$weight * (2/degree - (J == Hseg)) } else { Jweight <- H$weight/(degree-1) } # increment density on segment relevant <- (projmap$mapXY == J) tp.rel <- projmap$tp[relevant] d.rel <- lenJ * (if(H.is.from) tp.rel else (1 - tp.rel)) values[relevant] <- values[relevant] + Jweight * dkernel(d.rel + Hdist, kernel=kernel, sd=sigma) # push other end of segment onto stack stack <- rbind(data.frame(seg = J, from = !(H.is.from), distance = lenJ + Hdist, weight = Jweight, generation = Hgen + 1L), stack) if(sortgen) stack <- stack[order(stack$generation), , drop=FALSE] print(stack) } } # attach values to nearest pixels Z <- lineimage Z[pixelcentres] <- values # attach exact line position data df <- cbind(projdata, values) out <- linim(L, Z, df=df) if(savehistory) attr(out, "history") <- history return(out) } density.splitppx <- function(x, sigma=NULL, ...) { if(!all(sapply(x, is.lpp))) stop("Only implemented for patterns on a linear network") solapply(x, density.lpp, sigma=sigma, ...) } PDEdensityLPP <- function(x, sigma, ..., weights=NULL, dx=NULL, dt=NULL, iterMax=1e6, fun=FALSE, finespacing=FALSE, finedata=finespacing) { stopifnot(is.lpp(x)) L <- as.linnet(x) check.1.real(sigma) check.finite(sigma) if(!is.null(weights)) check.nvector(weights, npoints(x)) if(is.null(dx)) { #' default rule for spacing of sample points lenths <- lengths.psp(as.psp(L)) lbar <- mean(lenths) nseg <- length(lenths) ltot <- lbar * nseg if(finespacing) { #' specify 30 steps per segment, on average dx <- lbar/30 } else { #' use pixel size argh <- list(...) W <- Frame(x) eps <- if(!is.null(argh$eps)) { min(argh$eps) } else if(!is.null(argh$dimyx)) { min(sidelengths(W)/argh$dimyx) } else if(!is.null(argh$xy)) { with(as.mask(W, xy=argh$xy), min(xstep, ystep)) } else min(sidelengths(W)/spatstat.options("npixel")) dx <- max(eps/1.4, lbar/30) } D <- ceiling(ltot/dx) D <- min(D, .Machine$integer.max) dx <- ltot/D } verdeg <- vertexdegree(L) amb <- max(verdeg[L$from] + verdeg[L$to]) dtmax <- min(0.95 * (dx^2)/amb, sigma^2/(2 * 10), sigma * dx/6) if(is.null(dt)) { dt <- dtmax } else if(dt > dtmax) { stop(paste("dt is too large: maximum value", dtmax), call.=FALSE) } a <- FDMKERNEL(lppobj=x, sigma=sigma, dtx=dx, dtt=dt, weights=weights, iterMax=iterMax, sparse=TRUE, stepnames=list(time="dt", space="dx")) f <- a$kernel_fun if(fun) { result <- f } else if(!finespacing) { result <- as.linim(f, ...) } else { Z <- as.im(as.linim(f, ...)) df <- a$df colnames(df)[colnames(df) == "seg"] <- "mapXY" ij <- nearest.valid.pixel(df$x, df$y, Z) xy <- data.frame(xc = Z$xcol[ij$col], yc = Z$yrow[ij$row]) df <- cbind(xy, df) result <- linim(domain(f), Z, restrict=FALSE, df=df) } attr(result, "sigma") <- sigma attr(result, "dx") <- a$deltax attr(result, "dt") <- a$deltat return(result) } # Greg's code FDMKERNEL <- function(lppobj, sigma, dtt, weights=NULL, iterMax=5000, sparse=FALSE, dtx, stepnames=list(time="dtt", space="dtx")) { net2 <- as.linnet(lppobj) # ends1 <- net2$lines$ends lenfs <- lengths.psp(as.psp(net2)) seg_in_lengths <- pmax(1, round(lenfs/dtx)) new_lpp <- lixellate(lppobj, nsplit=seg_in_lengths) net_nodes <- as.linnet(new_lpp) vvv <- as.data.frame(vertices(net_nodes)) vertco_new <- vvv[, c("x", "y")] vertseg_new <- vvv$segcoarse # marks verttp_new <- vvv$tpcoarse # marks if(npoints(lppobj) == 0) { U0 <- numeric(npoints(net_nodes$vertices)) } else { tp1 <- as.numeric(new_lpp$data$tp) tp2 <- as.vector(rbind(1 - tp1, tp1)) newseg <- as.integer(new_lpp$data$seg) vert_init_events1 <- as.vector(rbind(net_nodes$from[newseg], net_nodes$to[newseg])) highest_vert <- npoints(net_nodes$vertices) vert_numbers <- seq_len(highest_vert) ff <- factor(vert_init_events1, levels=vert_numbers) ww <- if(is.null(weights)) tp2 else (rep(weights, each=2) * tp2) ww <- ww/dtx U0 <- tapply(ww, ff, sum) U0[is.na(U0)] <- 0 } M <- round((sigma^2)/(2*dtt)) if(M < 10) stop(paste("No of time iterations must be > 10; decrease time step", stepnames[["time"]])) if(M > iterMax) stop(paste0("No of time iterations = ", M, " exceeds maximum number iterMax = ", iterMax, "; increase time step ", stepnames[["time"]], ", or increase iterMax")) alpha <- dtt/(dtx^2) A1 <- net_nodes$m *1 # ml <- nrow(net_nodes$m) degree <- colSums(A1) dmax <- max(degree) A2 <- A1 * alpha diag(A2) <- 1 - alpha * degree if(1 - dmax*alpha < 0) stop(paste0("Algorithm is unstable: alpha = ", stepnames[["time"]], "/", stepnames[["space"]], "^2 = ", alpha, " does not satisfy (dmax * alpha <= 1)", " where DMAX = highest vertex degree = ", dmax, "; decrease time step ", stepnames[["time"]], ", or increase spacing ", stepnames[["space"]])) if(npoints(lppobj) > 0) { v <- as.numeric(U0) for(j in 1:M) v <- A2 %*% v finalU <- as.numeric(v) } else finalU <- U0 vert_new <- cbind(vertco_new, vertseg_new, verttp_new) colnames(vert_new) <- c("x", "y", "seg", "tp") Nodes <- lpp(vert_new, net2, check=FALSE) nodemap <- nnfun(Nodes) interpUxyst <- function(x, y, seg, tp) { finalU[nodemap(x,y,seg,tp)] } interpU <- linfun(interpUxyst, net2) df <- cbind(vert_new, data.frame(values=finalU)) out <- list(kernel_fun = interpU, df = df, deltax = dtx, deltat = dtt) return(out) } spatstat/R/quasirandom.R0000644000176200001440000000266313333543255015001 0ustar liggesusers## ## quasirandom.R ## ## Quasi-random sequence generators ## ## $Revision: 1.6 $ $Date: 2017/06/05 10:31:58 $ ## vdCorput <- function(n, base) { stopifnot(is.prime(base)) z <- .C("Corput", base=as.integer(base), n=as.integer(n), result=as.double(numeric(n)), PACKAGE = "spatstat") return(z$result) } Halton <- function(n, bases=c(2,3), raw=FALSE, simplify=TRUE) { d <- length(bases) if(d==2 && !raw && simplify) return(ppp(vdCorput(n, bases[1]), vdCorput(n, bases[2]), window=owin(), check=FALSE)) z <- matrix(, nrow=n, ncol=d) for(j in 1:d) z[,j] <- vdCorput(n, bases[j]) if(raw || d < 2) return(z) b <- do.call(boxx, rep(list(c(0,1)), d)) return(ppx(z, b, simplify=simplify)) } Hammersley <- function(n, bases=2, raw=FALSE, simplify=TRUE) { d <- length(bases) + 1 z <- cbind(Halton(n, bases, raw=TRUE), (1:n)/n) dimnames(z) <- NULL if(raw || d < 2) return(z) b <- do.call(boxx, rep(list(c(0,1)), d)) return(ppx(z, b, simplify=simplify)) } rQuasi <- function(n, W, type=c("Halton", "Hammersley"), ...) { R <- as.rectangle(W) type <- match.arg(type) X <- switch(type, Halton=Halton(n, ...), Hammersley=Hammersley(n, ...)) Y <- ppp(R$xrange[1] + diff(R$xrange) * X$x, R$yrange[1] + diff(R$yrange) * X$y, window=R, check=FALSE) if(!is.rectangle(W)) Y <- Y[W] return(Y) } spatstat/R/mppm.R0000644000176200001440000006175713603031477013436 0ustar liggesusers# # mppm.R # # $Revision: 1.93 $ $Date: 2020/01/01 05:33:32 $ # mppm <- local({ mppm <- function(formula, data, interaction=Poisson(), ..., iformula=NULL, #%^!ifdef RANDOMEFFECTS random=NULL, #%^!endif weights=NULL, use.gam=FALSE, #%^!ifdef RANDOMEFFECTS reltol.pql=1e-3, #%^!endif gcontrol=list() ) { ## remember call cl <- match.call() callstring <- paste(short.deparse(sys.call()), collapse="") ## Validate arguments if(!inherits(formula, "formula")) stop(paste("Argument", dQuote("formula"), "should be a formula")) stopifnot(is.hyperframe(data)) data.sumry <- summary(data, brief=TRUE) npat <- data.sumry$ncases if(npat == 0) stop(paste("Hyperframe", sQuote("data"), "has zero rows")) if(!is.null(iformula) && !inherits(iformula, "formula")) stop(paste("Argument", sQuote("iformula"), "should be a formula or NULL")) #%^!ifdef RANDOMEFFECTS if(has.random <- !is.null(random)) { if(!inherits(random, "formula")) stop(paste(sQuote("random"), "should be a formula or NULL")) if(use.gam) stop("Sorry, random effects are not available in GAMs") } #%^!endif if(! (is.interact(interaction) || is.hyperframe(interaction))) stop(paste("The argument", sQuote("interaction"), "should be a point process interaction object (class", dQuote("interact"), "), or a hyperframe containing such objects", sep="")) if(is.null(weights)) { weights <- rep(1, npat) } else { check.nvector(weights, npat, things="rows of data", oneok=TRUE) if(length(weights) == 1L) weights <- rep(weights, npat) } backdoor <- list(...)$backdoor if(is.null(backdoor) || !is.logical(backdoor)) backdoor <- FALSE ############## HANDLE FORMULAS ############################ ##------ Trend Formula ------------------ ## check all variables in trend formula are recognised checkvars(formula, data.sumry$col.names, extra=c("x","y","id","marks"), bname="data") ## check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Argument", sQuote("formula"), "must have a left hand side")) Yname <- formula[[2]] trend <- formula[c(1,3)] if(!is.name(Yname)) stop("Left hand side of formula should be a single name") Yname <- paste(Yname) if(!inherits(trend, "formula")) stop("Internal error: failed to extract RHS of formula") allvars <- variablesinformula(trend) ## --- Interaction formula ----- ## names of interactions as they may appear in formulae itags <- if(is.hyperframe(interaction)) names(interaction) else "Interaction" ninteract <- length(itags) ## ensure `iformula' is a formula without a LHS ## and determine which columns of `interaction' are actually used if(is.null(iformula)) { if(ninteract > 1) stop(paste("interaction hyperframe has more than 1 column;", "you must specify the choice of interaction", "using argument", sQuote("iformula"))) iused <- TRUE iformula <- as.formula(paste("~", itags)) } else { if(length(iformula) > 2) stop(paste("The interaction formula", sQuote("iformula"), "should not have a left hand side")) ## valid variables in `iformula' are interactions and data frame columns permitted <- paste(sQuote("interaction"), "or permitted name in", sQuote("data")) checkvars(iformula, itags, extra=c(data.sumry$dfnames, "id"), bname=permitted) ivars <- variablesinformula(iformula) ## check which columns of `interaction' are actually used iused <- itags %in% ivars if(sum(iused) == 0) stop("No interaction specified in iformula") ## OK allvars <- c(allvars, ivars) } #%^!ifdef RANDOMEFFECTS ## --- Random effects formula ---- if(!is.null(random)) { if(length(random) > 2) stop(paste("The random effects formula", sQuote("random"), "should not have a left hand side")) checkvars(random, itags, extra=c(data.sumry$col.names, "x", "y", "id"), bname="either data or interaction") allvars <- c(allvars, variablesinformula(random)) } #%^!endif ## ---- variables required (on RHS of one of the above formulae) ----- allvars <- unique(allvars) ######## EXTRACT DATA ##################################### ## Insert extra variable 'id' data <- cbind.hyperframe(data, id=factor(1:npat)) data.sumry <- summary(data, brief=TRUE) allvars <- unique(c(allvars, "id")) ## Extract the list of responses (point pattern/quadscheme) Y <- data[, Yname, drop=TRUE] if(npat == 1) Y <- solist(Y) Yclass <- data.sumry$classes[Yname] if(Yclass == "ppp") { ## convert to quadrature schemes, for efficiency's sake Y <- solapply(Y, quadscheme) ## Ydescrip <- "point patterns" ## not used } else if(Yclass == "quad") { Y <- as.solist(Y) ## Ydescrip <- "quadrature schemes" ## not used } else { stop(paste("Column", dQuote(Yname), "of data", "does not consist of point patterns (class ppp)", "nor of quadrature schemes (class quad)"), call.=FALSE) } ## Extract sub-hyperframe of data named in formulae datanames <- names(data) used.cov.names <- allvars[allvars %in% datanames] has.covar <- (length(used.cov.names) > 0) if(has.covar) { dfvar <- used.cov.names %in% data.sumry$dfnames imvar <- data.sumry$types[used.cov.names] == "im" if(any(nbg <- !(dfvar | imvar))) stop(paste("Inappropriate format for", ngettext(sum(nbg), "covariate", "covariates"), paste(sQuote(used.cov.names[nbg]), collapse=", "), ": should contain image objects or vector/factor")) covariates.hf <- data[, used.cov.names, drop=FALSE] has.design <- any(dfvar) dfvarnames <- used.cov.names[dfvar] datadf <- if(has.design) as.data.frame(covariates.hf, discard=TRUE, warn=FALSE) else NULL if(has.design) { ## check for NA's in design covariates # if(any(nbg <- apply(is.na(datadf), 2, any))) if(any(nbg <- matcolany(is.na(datadf)))) stop(paste("There are NA's in the", ngettext(sum(nbg), "covariate", "covariates"), commasep(dQuote(names(datadf)[nbg])))) } } else { has.design <- FALSE datadf <- NULL } ############### INTERACTION ################################### ## ensure `interaction' is a hyperframe of `interact' objects ## with the right number of rows. ## All entries in a column must represent the same process ## (possibly with different values of the irregular parameters). ## Extract the names of the point processes. if(is.interact(interaction)) { ninteract <- 1 processes <- list(Interaction=interaction$name) interaction <- hyperframe(Interaction=interaction, id=1:npat)[,1] constant <- c(Interaction=TRUE) } else if(is.hyperframe(interaction)) { inter.sumry <- summary(interaction) ninteract <- inter.sumry$nvars ## ensure it has the same number of rows as 'data' nr <- inter.sumry$ncases if(nr == 1 && npat > 1) { interaction <- cbind.hyperframe(id=1:npat, interaction)[,-1] inter.sumry <- summary(interaction) } else if(nr != npat) stop(paste("Number of rows in", sQuote("interaction"), "=", nr, "!=", npat, "=", "number of rows in", sQuote("data"))) ## check all columns contain interaction objects ok <- (inter.sumry$classes == "interact") if(!all(ok)) { nbg <- names(interaction)[!ok] nn <- sum(!ok) stop(paste(ngettext(nn, "Column", "Columns"), paste(sQuote(nbg), collapse=", "), ngettext(nn, "does", "do"), "not consist of interaction objects")) } ## all entries in a column must represent the same process type ## (with possibly different values of the irregular parameters) ok <- unlist(lapply(as.list(interaction), consistentname)) if(!all(ok)) { nbg <- names(interaction)[!ok] stop(paste("Different interactions may not appear in a single column.", "Violated by", paste(sQuote(nbg), collapse=", "))) } processes <- lapply(as.list(interaction), firstname) ## determine whether all entries in a column are EXACTLY the same ## (=> have the same parameters) constant <- (inter.sumry$storage == "hyperatom") if(any(!constant)) { others <- interaction[,!constant] constant[!constant] <- sapply(lapply(as.list(others), unique), length) == 1 } } ## check for trivial (Poisson) interactions trivial <- unlist(lapply(as.list(interaction), allpoisson)) ## check that iformula does not combine two interactions on one row nondfnames <- datanames[!(datanames %in% data.sumry$dfnames)] ip <- impliedpresence(itags, iformula, datadf, nondfnames) if(any(rowSums(ip) > 1)) stop("iformula invokes more than one interaction on a single row") ## #################### BERMAN-TURNER DEVICE ######################### ## ## set up list to contain the glm variable names for each interaction. Vnamelist <- rep(list(character(0)), ninteract) names(Vnamelist) <- itags ## set up list to contain 'IsOffset' Isoffsetlist <- rep(list(logical(0)), ninteract) names(Isoffsetlist) <- itags #### ## ---------------- L O O P --------------------------------- for(i in 1:npat) { ## extract responses and covariates for presentation to ppm() Yi <- Y[[i]] covariates <- if(has.covar) covariates.hf[i, , drop=TRUE, strip=FALSE] else NULL if(has.design) { ## convert each data frame value to an image covariates[dfvarnames] <- lapply(as.list(as.data.frame(covariates[dfvarnames])), as.im, W=Yi$data$window) } ## Generate data frame and glm info for this point pattern ## First the trend covariates prep0 <- bt.frame(Yi, trend, Poisson(), ..., covariates=covariates, allcovar=TRUE, use.gam=use.gam) glmdat <- prep0$glmdata ## now the nontrivial interaction terms for(j in (1:ninteract)[iused & !trivial]) { inter <- interaction[i,j,drop=TRUE] if(!is.null(ss <- inter$selfstart)) interaction[i,j] <- inter <- ss(Yi$data, inter) prepj <- bt.frame(Yi, ~1, inter, ..., covariates=covariates, allcovar=TRUE, use.gam=use.gam, vnamebase=itags[j], vnameprefix=itags[j]) ## store GLM variable names & check consistency vnameij <- prepj$Vnames if(i == 1) Vnamelist[[j]] <- vnameij else if(!identical(vnameij, Vnamelist[[j]])) stop("Internal error: Unexpected conflict in glm variable names") ## store offset indicator vectors isoffset.ij <- prepj$IsOffset if(i == 1) Isoffsetlist[[j]] <- isoffset.ij else if(!identical(isoffset.ij, Isoffsetlist[[j]])) stop("Internal error: Unexpected conflict in offset indicators") ## GLM data frame for this interaction glmdatj <- prepj$glmdata if(nrow(glmdatj) != nrow(glmdat)) stop("Internal error: differing numbers of rows in glm data frame") iterms.ij <- glmdatj[vnameij] subset.ij <- glmdatj$.mpl.SUBSET ## tack on columns of interaction terms glmdat <- cbind(glmdat, iterms.ij) ## update subset (quadrature points where cif is positive) glmdat$.mpl.SUBSET <- glmdat$.mpl.SUBSET & subset.ij } ## assemble the Mother Of All Data Frames if(i == 1) { moadf <- glmdat } else { ## There may be new or missing columns recognised <- names(glmdat) %in% names(moadf) if(any(!recognised)) { newnames <- names(glmdat)[!recognised] zeroes <- as.data.frame(matrix(0, nrow(moadf), length(newnames))) names(zeroes) <- newnames moadf <- cbind(moadf, zeroes) } provided <- names(moadf) %in% names(glmdat) if(any(!provided)) { absentnames <- names(moadf)[!provided] zeroes <- as.data.frame(matrix(0, nrow(glmdat), length(absentnames))) names(zeroes) <- absentnames glmdat <- cbind(glmdat, zeroes) } ## Ensure factor columns are consistent m.isfac <- sapply(as.list(glmdat), is.factor) g.isfac <- sapply(as.list(glmdat), is.factor) if(any(uhoh <- (m.isfac != g.isfac))) errorInconsistentRows("values (factor and non-factor)", colnames(moadf)[uhoh]) if(any(m.isfac)) { m.levels <- lapply(as.list(moadf)[m.isfac], levels) g.levels <- lapply(as.list(glmdat)[g.isfac], levels) clash <- !mapply(identical, x=m.levels, y=g.levels) if(any(clash)) errorInconsistentRows("factor levels", (colnames(moadf)[m.isfac])[clash]) } ## Finally they are compatible moadf <- rbind(moadf, glmdat) } } ## ---------------- E N D o f L O O P -------------------------- ## ## add case weights moadf$caseweight <- weights[moadf$id] ## ## backdoor exit - Berman-Turner frame only - used by predict.mppm if(backdoor) return(moadf) ## ## ## -------------------------------------------------------------------- ## ## Construct the glm formula for the Berman-Turner device ## ## Get trend part from the last-computed prep0 fmla <- prep0$trendfmla ## Tack on the RHS of the interaction formula if(!all(trivial)) fmla <- paste(fmla, "+", as.character(iformula)[[2]]) ## Make it a formula fmla <- as.formula(fmla) ## Ensure that each interaction name is recognised. ## ## To the user, an interaction is identified by its `tag' name ## (default tag: "Interaction") ## ## Internally, an interaction is fitted using its sufficient statistic ## which may be 0, 1 or k-dimensional. ## The column names of the sufficient statistic are the Vnames ## returned from ppm. ## The Poisson process is a special case: it is 0-dimensional (no Vnames). ## ## For k-dimensional sufficient statistics, we modify the formulae, ## replacing the interaction name by (vname1 + vname2 + .... + vnamek) ## for(j in (1:ninteract)[iused]) { vnames <- Vnamelist[[j]] tag <- itags[j] isoffset <- Isoffsetlist[[j]] if(any(isoffset)) { ## enclose names of offset variables in 'offset()' vnames[isoffset] <- paste("offset(", vnames[isoffset], ")", sep="") } if(trivial[j]) ## Poisson case: add a column of zeroes moadf[[tag]] <- 0 else if(!identical(vnames, tag)) { if(length(vnames) == 1) ## tag to be replaced by vname vn <- paste("~", vnames[1]) else ## tag to be replaced by (vname1 + vname2 + .... + vnamek) vn <- paste("~(", paste(vnames, collapse=" + "), ")") ## pull out formula representation of RHS vnr <- as.formula(vn)[[2]] ## make substitution rule: list(=) vnsub <- list(vnr) names(vnsub) <- tag ## perform substitution in trend formula fmla <- eval(substitute(substitute(fom, vnsub), list(fom=fmla))) #%^!ifdef RANDOMEFFECTS ## perform substitution in random effects formula if(has.random && tag %in% variablesinformula(random)) random <- eval(substitute(substitute(fom, vnsub), list(fom=random))) #%^!endif } } fmla <- as.formula(fmla) ## Fix scoping problem assign("glmmsubset", moadf$.mpl.SUBSET, envir=environment(fmla)) for(nama in colnames(moadf)) assign(nama, moadf[[nama]], envir=environment(fmla)) ## Satisfy package checker glmmsubset <- .mpl.SUBSET <- moadf$.mpl.SUBSET .mpl.W <- moadf$.mpl.W caseweight <- moadf$caseweight ## ---------------- FIT THE MODEL ------------------------------------ want.trend <- prep0$info$want.trend if(want.trend && use.gam) { fitter <- "gam" ctrl <- do.call(gam.control, resolve.defaults(gcontrol, list(maxit=50))) FIT <- gam(fmla, family=quasi(link=log, variance=mu), weights=.mpl.W * caseweight, data=moadf, subset=(.mpl.SUBSET=="TRUE"), control=ctrl) deviants <- deviance(FIT) #%^!ifdef RANDOMEFFECTS } else if(!is.null(random)) { fitter <- "glmmPQL" ctrl <- do.call(lmeControl, resolve.defaults(gcontrol, list(maxIter=50))) attr(fmla, "ctrl") <- ctrl # very strange way to pass argument fixed <- 42 # to satisfy package checker FIT <- hackglmmPQL(fmla, random=random, family=quasi(link=log, variance=mu), weights=.mpl.W * caseweight, data=moadf, subset=glmmsubset, control=attr(fixed, "ctrl"), reltol=reltol.pql) deviants <- -2 * logLik(FIT) #%^!endif } else { fitter <- "glm" ctrl <- do.call(glm.control, resolve.defaults(gcontrol, list(maxit=50))) FIT <- glm(fmla, family=quasi(link="log", variance="mu"), weights=.mpl.W * caseweight, data=moadf, subset=(.mpl.SUBSET=="TRUE"), control=ctrl) deviants <- deviance(FIT) } env <- list2env(moadf, parent=sys.frame(sys.nframe())) environment(FIT$terms) <- env ## maximised log-pseudolikelihood W <- with(moadf, .mpl.W * caseweight) SUBSET <- moadf$.mpl.SUBSET Z <- (moadf$.mpl.Y != 0) maxlogpl <- -(deviants/2 + sum(log(W[Z & SUBSET])) + sum(Z & SUBSET)) ## ## ---------------- PACK UP THE RESULT -------------------------------- ## result <- list(Call = list(callstring=callstring, cl=cl), Info = list( #%^!ifdef RANDOMEFFECTS has.random=has.random, #%^!endif has.covar=has.covar, has.design=has.design, Yname=Yname, used.cov.names=used.cov.names, allvars=allvars, names.data=names(data), is.df.column=(data.sumry$storage == "dfcolumn"), rownames=row.names(data), correction=prep0$info$correction, rbord=prep0$info$rbord ), Fit= list( fitter=fitter, use.gam=use.gam, fmla=fmla, FIT=FIT, moadf=moadf, Vnamelist=Vnamelist ), Inter = list( ninteract=ninteract, interaction=interaction, iformula=iformula, iused=iused, itags=itags, processes=processes, trivial=trivial, constant=constant ), formula=formula, trend=trend, iformula=iformula, #%^!ifdef RANDOMEFFECTS random=random, #%^!endif npat=npat, data=data, Y=Y, maxlogpl=maxlogpl, datadf=datadf) class(result) <- c("mppm", class(result)) return(result) } # helper functions checkvars <- function(f, b, extra=NULL, bname=short.deparse(substitute(b))){ fname <- short.deparse(substitute(f)) fvars <- variablesinformula(f) bvars <- if(is.character(b)) b else names(b) bvars <- c(bvars, extra) nbg <- !(fvars %in% bvars) if(any(nbg)) { nn <- sum(nbg) stop(paste(ngettext(nn, "Variable", "Variables"), commasep(dQuote(fvars[nbg])), "in", fname, ngettext(nn, "is not one of the", "are not"), "names in", bname)) } return(NULL) } consistentname <- function(x) { xnames <- unlist(lapply(x, getElement, name="name")) return(length(unique(xnames)) == 1) } firstname <- function(z) { z[[1]]$name } allpoisson <- function(x) all(sapply(x, is.poisson.interact)) marklevels <- function(x) { levels(marks(x)) } errorInconsistentRows <- function(what, offending) { stop(paste("There are inconsistent", what, "for the", ngettext(length(offending), "variable", "variables"), commasep(sQuote(offending)), "between different rows of the hyperframe 'data'"), call.=FALSE) } mppm }) is.mppm <- function(x) { inherits(x, "mppm") } coef.mppm <- function(object, ...) { coef(object$Fit$FIT) } #%^!ifdef RANDOMEFFECTS fixef.mppm <- function(object, ...) { if(object$Fit$fitter == "glmmPQL") fixef(object$Fit$FIT) else coef(object$Fit$FIT) } ranef.mppm <- function(object, ...) { if(object$Fit$fitter == "glmmPQL") ranef(object$Fit$FIT) else as.data.frame(matrix(, nrow=object$npat, ncol=0)) } #%^!endif print.mppm <- function(x, ...) { print(summary(x, ..., brief=TRUE)) } is.poisson.mppm <- function(x) { trivial <- x$Inter$trivial iused <- x$Inter$iused all(trivial[iused]) } quad.mppm <- function(x) { as.solist(x$Y) } data.mppm <- function(x) { solapply(x$Y, getElement, name="data") } windows.mppm <- function(x) { solapply(data.mppm(x), Window) } logLik.mppm <- function(object, ..., warn=TRUE) { if(warn && !is.poisson.mppm(object)) warning(paste("log likelihood is not available for non-Poisson model;", "log-pseudolikelihood returned")) ll <- object$maxlogpl #%^!ifdef RANDOMEFFECTS attr(ll, "df") <- length(fixef(object)) #%^!else # attr(ll, "df") <- length(coef(object)) #%^!endif class(ll) <- "logLik" return(ll) } AIC.mppm <- function(object, ..., k=2, takeuchi=TRUE) { ll <- logLik(object, warn=FALSE) pen <- attr(ll, "df") if(takeuchi && !is.poisson(object)) { vv <- vcov(object, what="all") J <- vv$fisher H <- vv$internals$A1 ## Takeuchi penalty = trace of J H^{-1} = trace of H^{-1} J JiH <- try(solve(H, J), silent=TRUE) if(!inherits(JiH, "try-error")) pen <- sum(diag(JiH)) } return(- 2 * as.numeric(ll) + k * pen) } extractAIC.mppm <- function(fit, scale = 0, k = 2, ..., takeuchi = TRUE) { edf <- length(coef(fit)) aic <- AIC(fit, k = k, takeuchi = takeuchi) c(edf, aic) } getCall.mppm <- function(x, ...) { x$Call$cl } terms.mppm <- function(x, ...) { terms(formula(x)) } nobs.mppm <- function(object, ...) { sum(sapply(data.mppm(object), npoints)) } simulate.mppm <- function(object, nsim=1, ..., verbose=TRUE) { subs <- subfits(object) nr <- length(subs) sims <- list() if(verbose) { splat("Generating simulated realisations of", nr, "models..") state <- list() } for(irow in seq_len(nr)) { sims[[irow]] <- do.call(simulate, resolve.defaults(list(object=subs[[irow]], nsim=nsim, drop=FALSE), list(...), list(progress=FALSE))) if(verbose) state <- progressreport(irow, nr, state=state) } sim1list <- lapply(sims, "[[", i=1) h <- hyperframe("Sim1"=sim1list) if(nsim > 1) { for(j in 2:nsim) { simjlist <- lapply(sims, "[[", i=j) hj <- hyperframe(Sim=simjlist) names(hj) <- paste0("Sim", j) h <- cbind(h, hj) } } return(h) } model.matrix.mppm <- function(object, ..., keepNA=TRUE, separate=FALSE) { FIT <- object$Fit$FIT df <- object$Fit$moadf environment(FIT) <- list2env(df) if(keepNA) { mm <- model.matrix(FIT, ..., subset=NULL, na.action=NULL) if(nrow(mm) != nrow(df)) stop("Internal error: model matrix has wrong number of rows", call.=FALSE) } else { mm <- model.matrix(FIT, ...) ok <- complete.cases(df) & df$.mpl.SUBSET if(nrow(mm) != sum(ok)) stop("Internal error: model matrix has wrong number of rows", call.=FALSE) df <- df[ok, , drop=FALSE] } if(separate) { id <- df$id mm <- split.data.frame(mm, id) # see help(split) } return(mm) } spatstat/R/ord.R0000644000176200001440000000231013613216544013226 0ustar liggesusers# # # ord.S # # $Revision: 1.9 $ $Date: 2020/01/26 04:32:04 $ # # Ord process with user-supplied potential # # Ord() create an instance of the Ord process # [an object of class 'interact'] # with user-supplied potential # # # ------------------------------------------------------------------- # Ord <- local({ BlankOrd <- list( name = "Ord process with user-defined potential", creator = "Ord", family = "ord.family", pot = NULL, par = NULL, parnames = NULL, hasInf = NA, init = NULL, update = function(self, ...){ do.call(Ord, resolve.defaults(list(...), list(pot=self$pot, name=self$name))) } , print = function(self) { cat("Potential function:\n") print(self$pot) invisible() }, version=NULL ) class(BlankOrd) <- "interact" Ord <- function(pot, name) { out <- instantiate.interact(BlankOrd) out$pot <- pot if(!missing(name)) out$name <- name return(out) } Ord <- intermaker(Ord, BlankOrd) }) spatstat/R/plot.anylist.R0000644000176200001440000005265313536314547015127 0ustar liggesusers## ## plot.anylist.R ## ## Plotting functions for 'solist', 'anylist', 'imlist' ## and legacy class 'listof' ## ## $Revision: 1.29 $ $Date: 2019/09/11 11:52:28 $ ## plot.anylist <- plot.solist <- plot.listof <- local({ ## auxiliary functions classes.with.do.plot <- c("im", "ppp", "psp", "msr", "layered", "tess") classes.with.multiplot <- c("ppp", "lpp", "msr", "tess", "leverage.ppm", "influence.ppm") has.multiplot <- function(x) { inherits(x, classes.with.multiplot) || (is.function(x) && "multiplot" %in% names(formals(x))) } extraplot <- function(nnn, x, ..., add=FALSE, extrargs=list(), panel.args=NULL, plotcommand="plot") { argh <- list(...) if(has.multiplot(x) && identical(plotcommand,"plot")) argh <- c(argh, list(multiplot=FALSE)) if(!is.null(panel.args)) { xtra <- if(is.function(panel.args)) panel.args(nnn) else panel.args if(!is.list(xtra)) stop(paste0("panel.args", if(is.function(panel.args)) "(i)" else "", " should be a list")) argh <- resolve.defaults(xtra, argh) } if(length(extrargs) > 0) argh <- resolve.defaults(argh, extrargs) ## some plot commands don't recognise 'add' if(add) argh <- append(argh, list(add=TRUE)) do.call(plotcommand, append(list(x=x), argh)) } exec.or.plot <- function(cmd, i, xi, ..., extrargs=list(), add=FALSE) { if(is.null(cmd)) return(NULL) argh <- resolve.defaults(list(...), extrargs, ## some plot commands don't recognise 'add' if(add) list(add=TRUE) else NULL, if(has.multiplot(cmd)) list(multiplot=FALSE) else NULL) if(is.function(cmd)) { do.call(cmd, resolve.defaults(list(i, xi), argh)) } else { do.call(plot, resolve.defaults(list(cmd), argh)) } } exec.or.plotshift <- function(cmd, i, xi, ..., vec=vec, extrargs=list(), add=FALSE) { if(is.null(cmd)) return(NULL) argh <- resolve.defaults(list(...), extrargs, ## some plot commands don't recognise 'add' if(add) list(add=TRUE) else NULL, if(has.multiplot(cmd)) list(multiplot=FALSE) else NULL) if(is.function(cmd)) { do.call(cmd, resolve.defaults(list(i, xi), argh)) } else { cmd <- shift(cmd, vec) do.call(plot, resolve.defaults(list(cmd), argh)) } } ## bounding box, including ribbon for images, legend for point patterns getplotbox <- function(x, ..., do.plot, plotcommand="plot", multiplot) { if(inherits(x, classes.with.do.plot)) { if(identical(plotcommand, "plot")) { y <- if(has.multiplot(x)) plot(x, ..., multiplot=FALSE, do.plot=FALSE) else plot(x, ..., do.plot=FALSE) return(as.owin(y)) } else if(identical(plotcommand, "contour")) { y <- contour(x, ..., do.plot=FALSE) return(as.owin(y)) } else { plc <- plotcommand if(is.character(plc)) plc <- get(plc) if(!is.function(plc)) stop("Unrecognised plot function") if("do.plot" %in% names(args(plc))) { if(has.multiplot(plc)) { y <- do.call(plc, list(x=x, ..., multiplot=FALSE, do.plot=FALSE)) } else { y <- do.call(plc, list(x=x, ..., do.plot=FALSE)) } return(as.owin(y)) } } } return(try(as.rectangle(x), silent=TRUE)) } # calculate bounding boxes for each panel using intended arguments! getPlotBoxes <- function(xlist, ..., panel.args=NULL, extrargs=list()) { userargs <- list(...) n <- length(xlist) result <- vector(length=n, mode="list") for(i in seq_len(n)) { pai <- if(is.function(panel.args)) panel.args(i) else list() argh <- resolve.defaults(pai, userargs, extrargs) result[[i]] <- do.call(getplotbox, append(list(x=xlist[[i]]), argh)) } return(result) } is.shiftable <- function(x) { if(is.null(x)) return(TRUE) if(is.function(x)) return(FALSE) y <- try(as.rectangle(x), silent=TRUE) return(!inherits(y, "try-error")) } maxassigned <- function(i, values) max(-1, values[i[i > 0]]) plot.anylist <- function(x, ..., main, arrange=TRUE, nrows=NULL, ncols=NULL, main.panel=NULL, mar.panel=c(2,1,1,2), hsep = 0, vsep = 0, panel.begin=NULL, panel.end=NULL, panel.args=NULL, panel.begin.args=NULL, panel.end.args=NULL, panel.vpad = 0.2, plotcommand="plot", adorn.left=NULL, adorn.right=NULL, adorn.top=NULL, adorn.bottom=NULL, adorn.size=0.2, equal.scales=FALSE, halign=FALSE, valign=FALSE ) { xname <- short.deparse(substitute(x)) ## recursively expand entries which are 'anylist' etc while(any(sapply(x, inherits, what="anylist"))) x <- as.solist(expandSpecialLists(x, "anylist"), demote=TRUE) isSo <- inherits(x, "solist") isIm <- inherits(x, "imlist") || (isSo && all(unlist(lapply(x, is.im)))) ## `boomerang despatch' cl <- match.call() if(missing(plotcommand) && isIm) { cl[[1]] <- as.name("image.imlist") parenv <- sys.parent() return(invisible(eval(cl, envir=parenv))) } if(isSo) { allfv <- somefv <- FALSE } else { isfv <- unlist(lapply(x, is.fv)) allfv <- all(isfv) somefv <- any(isfv) } ## panel margins if(!missing(mar.panel)) { nm <- length(mar.panel) if(nm == 1) mar.panel <- rep(mar.panel, 4) else if(nm == 2) mar.panel <- rep(mar.panel, 2) else if(nm != 4) stop("mar.panel should have length 1, 2 or 4") } else if(somefv) { ## change default mar.panel <- 0.25+c(4,4,2,2) } n <- length(x) names(x) <- good.names(names(x), "Component_", 1:n) if(is.null(main.panel)) main.panel <- names(x) else { if(!is.expression(main.panel)) main.panel <- as.character(main.panel) nmp <- length(main.panel) if(nmp == 1) main.panel <- rep.int(main.panel, n) else if(nmp != n) stop("Incorrect length for main.panel") } if(allfv && equal.scales) { ## all entries are 'fv' objects: determine their plot limits fvlims <- lapply(x, plot, ..., limitsonly=TRUE) ## establish common x,y limits for all panels xlim <- range(unlist(lapply(fvlims, getElement, name="xlim"))) ylim <- range(unlist(lapply(fvlims, getElement, name="ylim"))) extrargs <- list(xlim=xlim, ylim=ylim) } else extrargs <- list() extrargs.begin <- resolve.defaults(panel.begin.args, extrargs) extrargs.end <- resolve.defaults(panel.end.args, extrargs) if(!arrange) { ## sequence of plots result <- vector(mode="list", length=n) for(i in 1:n) { xi <- x[[i]] exec.or.plot(panel.begin, i, xi, main=main.panel[i], extrargs=extrargs.begin) result[[i]] <- extraplot(i, xi, ..., add=!is.null(panel.begin), main=main.panel[i], panel.args=panel.args, extrargs=extrargs, plotcommand=plotcommand) %orifnull% list() exec.or.plot(panel.end, i, xi, add=TRUE, extrargs=extrargs.end) } if(!is.null(adorn.left)) warning("adorn.left was ignored because arrange=FALSE") if(!is.null(adorn.right)) warning("adorn.right was ignored because arrange=FALSE") if(!is.null(adorn.top)) warning("adorn.top was ignored because arrange=FALSE") if(!is.null(adorn.bottom)) warning("adorn.bottom was ignored because arrange=FALSE") return(invisible(result)) } ## ARRAY of plots ## decide whether to plot a main header main <- if(!missing(main) && !is.null(main)) main else xname if(!is.character(main)) { ## main title could be an expression nlines <- 1 banner <- TRUE } else { ## main title is character string/vector, possibly "" banner <- any(nzchar(main)) if(length(main) > 1) main <- paste(main, collapse="\n") nlines <- length(unlist(strsplit(main, "\n"))) } ## determine arrangement of plots ## arrange like mfrow(nrows, ncols) plus a banner at the top if(is.null(nrows) && is.null(ncols)) { nrows <- as.integer(floor(sqrt(n))) ncols <- as.integer(ceiling(n/nrows)) } else if(!is.null(nrows) && is.null(ncols)) ncols <- as.integer(ceiling(n/nrows)) else if(is.null(nrows) && !is.null(ncols)) nrows <- as.integer(ceiling(n/ncols)) else stopifnot(nrows * ncols >= length(x)) nblank <- ncols * nrows - n if(allfv || list(plotcommand) %in% list("persp", persp)) { ## Function plots do not have physical 'size' sizes.known <- FALSE } else { ## Determine dimensions of objects ## (including space for colour ribbons, if they are images) boxes <- getPlotBoxes(x, ..., plotcommand=plotcommand, panel.args=panel.args, extrargs=extrargs) sizes.known <- !any(sapply(boxes, inherits, what="try-error")) if(sizes.known) { extrargs <- resolve.defaults(extrargs, list(claim.title.space=TRUE)) boxes <- getPlotBoxes(x, ..., plotcommand=plotcommand, panel.args=panel.args, extrargs=extrargs) } if(equal.scales && !sizes.known) { warning("Ignored equal.scales=TRUE; scales could not be determined") equal.scales <- FALSE } } if(sizes.known) { ## determine size of each panel if(equal.scales) { ## do not rescale panels scaledboxes <- boxes } else { ## rescale panels sides <- lapply(boxes, sidelengths) bwidths <- unlist(lapply(sides, "[", 1)) bheights <- unlist(lapply(sides, "[", 2)) ## Force equal heights, unless there is only one column scales <- if(ncols > 1) 1/bheights else 1/bwidths if(all(is.finite(scales))) { scaledboxes <- vector(mode="list", length=n) for(i in 1:n) scaledboxes[[i]] <- scalardilate(boxes[[i]], scales[i]) } else { #' uh-oh equal.scales <- sizes.known <- FALSE scaledboxes <- boxes } } } ## determine whether to display all objects in one enormous plot ## Precondition is that everything has a spatial bounding box single.plot <- equal.scales && sizes.known if(equal.scales && !single.plot && !allfv) warning("equal.scales=TRUE ignored ", "because bounding boxes ", "could not be determined", call.=FALSE) ## enforce alignment by expanding boxes if(halign) { if(!equal.scales) warning("halign=TRUE ignored because equal.scales=FALSE") ## x coordinates align in each column xr <- range(sapply(scaledboxes, getElement, name="xrange")) scaledboxes <- lapply(scaledboxes, "[[<-", i="xrange", value=xr) } if(valign) { if(!equal.scales) warning("valign=TRUE ignored because equal.scales=FALSE") ## y coordinates align in each column yr <- range(sapply(scaledboxes, getElement, name="yrange")) scaledboxes <- lapply(scaledboxes, "[[<-", i="yrange", value=yr) } ## set up layout mat <- matrix(c(seq_len(n), integer(nblank)), byrow=TRUE, ncol=ncols, nrow=nrows) if(sizes.known) { boxsides <- lapply(scaledboxes, sidelengths) xwidths <- sapply(boxsides, "[", i=1) xheights <- sapply(boxsides, "[", i=2) heights <- apply(mat, 1, maxassigned, values=xheights) widths <- apply(mat, 2, maxassigned, values=xwidths) } else { heights <- rep.int(1, nrows) widths <- rep.int(1, ncols) } #' negative heights/widths arise if a row/column is not used. meanheight <- mean(heights[heights > 0]) meanwidth <- mean(widths[heights > 0]) heights[heights <= 0] <- meanheight widths[widths <= 0] <- meanwidth nall <- n ## if(single.plot) { ## ......... create a single plot .................. ## determine sizes ht <- max(heights) wd <- max(widths) marpar <- mar.panel * c(ht, wd, ht, wd)/6 vsep <- vsep * ht/6 hsep <- hsep * wd/6 mainheight <- any(nzchar(main.panel)) * ht/5 ewidths <- marpar[2] + widths + marpar[4] eheights <- marpar[1] + heights + marpar[3] + mainheight Width <- sum(ewidths) + hsep * (length(ewidths) - 1) Height <- sum(eheights) + vsep * (length(eheights) - 1) bigbox <- owin(c(0, Width), c(0, Height)) ox <- marpar[2] + cumsum(c(0, ewidths + hsep))[1:ncols] oy <- marpar[1] + cumsum(c(0, rev(eheights) + vsep))[nrows:1] panelorigin <- as.matrix(expand.grid(x=ox, y=oy)) ## initialise, with banner cex <- resolve.1.default(list(cex.title=1.5), list(...))/par('cex.main') plot(bigbox, type="n", main=main, cex.main=cex) ## plot individual objects result <- vector(mode="list", length=n) for(i in 1:n) { ## determine shift vector that moves bottom left corner of spatial box ## to bottom left corner of target area on plot device vec <- panelorigin[i,] - with(scaledboxes[[i]], c(xrange[1], yrange[1])) ## shift panel contents xi <- x[[i]] xishift <- shift(xi, vec) ## let rip if(!is.null(panel.begin)) exec.or.plotshift(panel.begin, i, xishift, add=TRUE, main=main.panel[i], show.all=TRUE, extrargs=extrargs.begin, vec=vec) result[[i]] <- extraplot(i, xishift, ..., add=TRUE, show.all=is.null(panel.begin), main=main.panel[i], extrargs=extrargs, panel.args=panel.args, plotcommand=plotcommand) %orifnull% list() exec.or.plotshift(panel.end, i, xishift, add=TRUE, extrargs=extrargs.end, vec=vec) } return(invisible(result)) } ## ................. multiple logical plots using 'layout' .............. ## adjust panel margins to accommodate desired extra separation mar.panel <- pmax(0, mar.panel + c(vsep, hsep, vsep, hsep)/2) ## increase heights to accommodate panel titles if(sizes.known && any(nzchar(main.panel))) heights <- heights * (1 + panel.vpad) ## check for adornment if(!is.null(adorn.left)) { ## add margin at left, of width adorn.size * meanwidth nall <- i.left <- n+1 mat <- cbind(i.left, mat) widths <- c(adorn.size * meanwidth, widths) } if(!is.null(adorn.right)) { ## add margin at right, of width adorn.size * meanwidth nall <- i.right <- nall+1 mat <- cbind(mat, i.right) widths <- c(widths, adorn.size * meanwidth) } if(!is.null(adorn.bottom)) { ## add margin at bottom, of height adorn.size * meanheight nall <- i.bottom <- nall+1 mat <- rbind(mat, i.bottom) heights <- c(heights, adorn.size * meanheight) } if(!is.null(adorn.top)) { ## add margin at top, of height adorn.size * meanheight nall <- i.top <- nall + 1 mat <- rbind(i.top, mat) heights <- c(adorn.size * meanheight, heights) } if(banner) { ## Increment existing panel numbers ## New panel 1 is the banner panels <- (mat > 0) mat[panels] <- mat[panels] + 1 mat <- rbind(1, mat) heights <- c(0.1 * meanheight * (1 + nlines), heights) } ## declare layout layout(mat, heights=heights, widths=widths, respect=sizes.known) ## start output ..... ## .... plot banner if(banner) { opa <- par(mar=rep.int(0,4), xpd=TRUE) plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) cex <- resolve.1.default(list(cex.title=1.5), list(...))/par('cex') text(0,0,main, cex=cex) } ## plot panels npa <- par(mar=mar.panel) if(!banner) opa <- npa result <- vector(mode="list", length=n) for(i in 1:n) { xi <- x[[i]] exec.or.plot(panel.begin, i, xi, main=main.panel[i], extrargs=extrargs.begin) result <- extraplot(i, xi, ..., add = !is.null(panel.begin), main = main.panel[i], extrargs=extrargs, panel.args=panel.args, plotcommand=plotcommand) %orifnull% list() exec.or.plot(panel.end, i, xi, add=TRUE, extrargs=extrargs.end) } ## adornments if(nall > n) { par(mar=rep.int(0,4), xpd=TRUE) if(!is.null(adorn.left)) adorn.left() if(!is.null(adorn.right)) adorn.right() if(!is.null(adorn.bottom)) adorn.bottom() if(!is.null(adorn.top)) adorn.top() } ## revert layout(1) par(opa) return(invisible(result)) } plot.anylist }) contour.imlist <- contour.listof <- function(x, ...) { xname <- short.deparse(substitute(x)) do.call(plot.solist, resolve.defaults(list(x=x, plotcommand="contour"), list(...), list(main=xname))) } plot.imlist <- local({ plot.imlist <- function(x, ..., plotcommand="image", equal.ribbon = FALSE, ribmar=NULL) { xname <- short.deparse(substitute(x)) if(missing(plotcommand) && any(sapply(x, inherits, what=c("linim", "linfun")))) plotcommand <- "plot" if(equal.ribbon && (list(plotcommand) %in% list("image", "plot", image, plot))) { out <- imagecommon(x, ..., xname=xname, ribmar=ribmar) } else { out <- do.call(plot.solist, resolve.defaults(list(x=x, plotcommand=plotcommand), list(...), list(main=xname))) } return(invisible(out)) } imagecommon <- function(x, ..., xname, zlim=NULL, ribbon=TRUE, ribside=c("right", "left", "bottom", "top"), ribsep=NULL, ribwid=0.5, ribn=1024, ribscale=NULL, ribargs=list(), ribmar = NULL, mar.panel = c(2,1,1,2)) { if(missing(xname)) xname <- short.deparse(substitute(x)) ribside <- match.arg(ribside) stopifnot(is.list(ribargs)) if(!is.null(ribsep)) warning("Argument ribsep is not yet implemented for image arrays") ## determine range of values if(is.null(zlim)) zlim <- range(unlist(lapply(x, range))) ## determine common colour map imcolmap <- plot.im(x[[1]], do.plot=FALSE, zlim=zlim, ..., ribn=ribn) ## plot ribbon? if(!ribbon) { ribadorn <- list() } else { ## determine plot arguments for colour ribbon vertical <- (ribside %in% c("right", "left")) scaleinfo <- if(!is.null(ribscale)) list(labelmap=ribscale) else list() sidecode <- match(ribside, c("bottom", "left", "top", "right")) ribstuff <- c(list(x=imcolmap, main="", vertical=vertical), ribargs, scaleinfo, list(side=sidecode)) if (is.null(mar.panel)) mar.panel <- c(2, 1, 1, 2) if (length(mar.panel) != 4) mar.panel <- rep(mar.panel, 4)[1:4] if (is.null(ribmar)) { ribmar <- mar.panel/2 newmar <- c(2, 0) switch(ribside, left = { ribmar[c(2, 4)] <- newmar }, right = { ribmar[c(4, 2)] <- newmar }, bottom = { ribmar[c(1, 3)] <- newmar }, top = { ribmar[c(3, 1)] <- newmar } ) } ## bespoke function executed to plot colour ribbon do.ribbon <- function() { opa <- par(mar=ribmar) do.call(plot, ribstuff) par(opa) } ## ribbon plot function encoded as 'adorn' argument ribadorn <- list(adorn=do.ribbon, adorn.size=ribwid) names(ribadorn)[1] <- paste("adorn", ribside, sep=".") } ## result <- do.call(plot.solist, resolve.defaults(list(x=x, plotcommand="image"), list(...), list(mar.panel=mar.panel, main=xname, col=imcolmap, zlim=zlim, ribbon=FALSE), ribadorn)) return(invisible(result)) } plot.imlist }) image.imlist <- image.listof <- function(x, ..., equal.ribbon = FALSE, ribmar=NULL) { plc <- resolve.1.default(list(plotcommand="image"), list(...)) if(list(plc) %in% list("image", "plot", image, plot)) { out <- plot.imlist(x, ..., plotcommand="image", equal.ribbon=equal.ribbon, ribmar=ribmar) } else { out <- plot.solist(x, ..., ribmar=ribmar) } return(invisible(out)) } spatstat/R/timed.R0000644000176200001440000000625113333543255013555 0ustar liggesusers#' #' timed.R #' #' Timed objects #' #' $Revision: 1.3 $ $Date: 2017/07/31 01:08:55 $ timed <- function(x, ..., starttime=NULL, timetaken=NULL) { if(is.null(starttime) && is.null(timetaken)) # time starts now. starttime <- proc.time() # evaluate expression if any object <- x if(is.null(timetaken)) timetaken <- proc.time() - starttime if(!inherits(object, "timed")) class(object) <- c("timed", class(object)) attr(object, "timetaken") <- timetaken return(object) } print.timed <- function(x, ...) { # strip the timing information and print the rest. taken <- attr(x, "timetaken") cx <- class(x) attr(x, "timetaken") <- NULL class(x) <- cx[cx != "timed"] NextMethod("print") # Now print the timing info cat(paste("\nTime taken:", codetime(taken), "\n")) return(invisible(NULL)) } timeTaken <- function(..., warn=TRUE) { allargs <- list(...) hastime <- sapply(allargs, inherits, what="timed") if(!any(hastime)) { if(warn) warning("Data did not contain timing information", call.=FALSE) return(NULL) } if(warn && !all(hastime)) warning("Some arguments did not contain timing information", call.=FALSE) times <- sapply(allargs[hastime], attr, which="timetaken") tottime <- rowSums(times) class(tottime) <- "proc_time" return(tottime) } #' .............. codetime .................................... #' Basic utility for converting times in seconds to text strings codetime <- local({ uname <- c("min", "hours", "days", "years", "thousand years", "million years", "billion years") u1name <- c("min", "hour", "day", "year", "thousand years", "million years", "billion years") multiple <- c(60, 60, 24, 365, 1e3, 1e3, 1e3) codehms <- function(x) { sgn <- if(x < 0) "-" else "" x <- round(abs(x)) hours <- x %/% 3600 mins <- (x %/% 60) %% 60 secs <- x %% 60 h <- if(hours > 0) paste(hours, ":", sep="") else "" started <- (hours > 0) m <- if(mins > 0) { paste(if(mins < 10 && started) "0" else "", mins, ":", sep="") } else if(started) "00:" else "" started <- started | (mins > 0) s <- if(secs > 0) { paste(if(secs < 10 && started) "0" else "", secs, sep="") } else if(started) "00" else "0" if(!started) s <- paste(s, "sec") paste(sgn, h, m, s, sep="") } codetime <- function(x, hms=TRUE, what=c("elapsed","user","system")) { if(inherits(x, "proc_time")) { what <- match.arg(what) x <- summary(x)[[match(what, c("user", "system", "elapsed"))]] } if(!is.numeric(x) || length(x) != 1) stop("codetime: x must be a proc_time object or a single number") sgn <- if(x < 0) "-" else "" x <- abs(x) if(x < 60) return(paste(sgn, signif(x, 3), " sec", sep="")) # more than 1 minute: round to whole number of seconds x <- round(x) if(hms && (x < 60 * 60 * 24)) return(paste(sgn, codehms(x), sep="")) u <- u1 <- "sec" for(k in seq_along(multiple)) { if(x >= multiple[k]) { x <- x/multiple[k] u <- uname[k] u1 <- u1name[k] } else break } xx <- round(x, 1) ux <- if(xx == 1) u1 else u paste(sgn, xx, " ", ux, sep="") } codetime }) spatstat/R/bw.CvL.R0000644000176200001440000000202313544333563013542 0ustar liggesusers#' #' bandwidth selection method of Cronie and Van Lieshout #' #' $Revision: 1.1 $ $Date: 2019/09/30 08:01:20 $ bw.CvL <- function(X, ..., srange=NULL, ns=16, sigma=NULL, warn=TRUE){ stopifnot(is.ppp(X)) W <- Window(X) areaW <- area.owin(W) if(!is.null(sigma)) { stopifnot(is.numeric(sigma) && is.vector(sigma)) ns <- length(sigma) } else { if(!is.null(srange)) check.range(srange) else { nnd <- nndist(X) srange <- c(min(nnd[nnd > 0]), diameter(W)/2) } sigma <- geomseq(from=srange[1L], to=srange[2L], length.out=ns) } cv <- numeric(ns) for(i in 1:ns) { si <- sigma[i] lamx <- density(X, sigma = si, at = "points", leaveoneout = FALSE, edge = FALSE) cv[i] <- ( sum(1/lamx) - areaW )^2 } result <- bw.optim(cv, sigma, iopt=which.min(cv), creator="bw.CvL", criterion="Cronie and van Lieshout", warnextreme=warn, hargnames="srange", unitname=unitname(X)) return(result) } spatstat/R/sysdata.rda0000644000176200001440000027676013624161312014477 0ustar liggesusers7zXZi"6!X4P])TW"nRʟKMd[_;zlI{eqon %V +[LFcG HrX-j]i1lh&U3Dvp#}}ӣ֚n^qfnXRuCFȴ ъPSn$ Hw f3VOdB.%k|єm[ULiICD{F 8z91YҊoL:Z~'|r?5cU(dc S@J-Υه/D;QVC4&!2Ď]U8)Q}F6t.N6tg 2w-L+6e7QK h>rFz fY(͠ Z@}Jދbee7>}-g T7ܺ]vw%@e&&>FI JIY7bPSfe ݘMόEm-Pm9t8 :Fߢ}C|h)J ꆗlc2iDprD^j ie:Xc$~ sk!ЯT n\nGV<őoJ%EF_J lۀ nws+$yyq )>Z9?iNs;_o*Z*(e P0迨v.XT9<3Z(#xHyQ1:OII d͹HFFuLl[i Q{JZFher d&;} u*M؈`V? xnxPNF4e7: MUԋ{r .7\W$I.CG)\+lz"+ƲR+S;DudQ+J$B@<\3lRbr0MRu"*U#E4eޙ$ T||2{%r0<|.V$A%E$bRALﲵTLפJ6MOE}q! T8x(;'>L<}뜰gW0/M4qy?i>sjyr}(F,l60r߻[)3Vuj"~ş60͚k%x;ڻԱI ~V~0:5\ ?$XVT]jatnq\pݞw=[R =0W\jwd;S?MՄ>4t-c\VI/8e7Y澺4dDvUJWl=,LD!ؓ޳U Hw+PzUAO{:қߩÕVUu=WFe|E Wq6Sqm*7]yb+(`0WM8Π3'jCFj$Lߗ]J͵enBn4q`*Lc *|: Y!utx6Dؚ!cxD NMZȤwUoZVDWXG`RtK8- ;e,RY١ ߽aޙu ^{"Lh;e98uޛo(̍cZ-J <3O†(sRCm*y s+W3i b;Zx$ʈ!=!-1'2v/&O|ww&81ΆnPX,[-rk !?s*puLcRyLS(bG{&Оm2L) y3@9{%Ze鵇(pi\ŻtIa иJtAޱ^زeAaDQ{B۩s&*a x{4;}K{ys [Izז Tbu }9ڟjav6 HJՔ sg a_ ׳RVʜW\*Kjs> VL Ѳ`qRZ:PU`$2NGQ,Jy"$dKxyfح.[}oZF1o?2\敾Xm`ձ!V ؕ K0`rC47P π=,i#}+0`bڃݺj2iө#ľIɯ!u:8Fݢ @Ic¦s;#Nwvкo{`0b )z>h|ϜI 1ט9L#9Wy9PL \(&NT4U~S]}&)WL{{XˠIױo*& cs e t7iFwj*C9AF\=t.?35W`0*?̵G.|&JUͰ8:]ϜrbxdlBcǁJ5v/KY\c)\1X&}w6UMbe;)cB"΃Wad ; mWgCb{=WBs$춣~8)\| ѸD%sx=CEzO97] uyZ(]4o=" n5Lct\0bv *>(,A{D9g/O:X6]?!R7}gU' Lg|Y4\i, A+h-iU'] -lC[XDJĩgd' Esl}f/D#'И9ۜ;5cmq]jN^ܠ4Z:lQj(m&E˓QOZ\RCgGCvg$OYAߞrwo4I]hEe]TWW" `/n Y?L)jI$ɋj(+h^;CiX` n|ΘDw5:n:3nh+K4, HV{vq6O%,4'@Z% mȔHb0Ԇ(rK0֛8\G?֑d-F"_ Yܮ9R4D1PfR>k&bLOyU0P汿:~ 598|#7a[ La*|Wȝ^oڼ#@ |jO>AMDQmFN6M͵X՟ $1֊TF,ɪ$z/LqU$4P"e}TdV̐:ҫEV82oKfԠg*WK =Ff%lB`! 7N-Ug1xDԶh3VȽ<"㗿aWq3gy$%8ƃT !aӂ621Ekm k8C2EB >t%!L}/MH}QhP! wķ:dʍ#/[aK<#+(L)FlX%l.djCtYmel2uU$HTo:w\[p^ U^j)XK K}I߮ ɀʚ~D腲[mFWWc%4 uG,ӗD~ƦAhkݹyhFё =:uC #_&r䦣cWr;jn8&4L9 ;pnp( /8¶pm6jiN4>D+d~3SÀl7Ѐ B:Q%f_uP,ا, vO2Yռ|fqD|FdhW-Z7,m~># '5[\WxG#OC i%̸X}XvVRL3;HMj|iq@aajCæSEH\KhCS)3@M݇8lOv\.Wb@1E]+I? ʝam@r7MՌoE$C؝XU QHp??RՋWhd&rr Q"RBͯ0H+#x~L&@񪉅JT1+&v ngzf9ǝm,!sPb|^ĉ|W%acO|$L}ķ|u|w"Xbl[d!7zeHfEINpw= kWQËNy~at{]YTwEܴ(m-k4e?48Z"7y q>JC;/G"} @CF?E%GPr{[BL*-*oxrZvH_IWx'M=1foR|}V2 ↧0R'4*!A5yQ̇ƻђ,(_)fb?(^| cde܉X;TOG7e-Q]3PM뼅SK,cu&&?rW jrh/mų}b]JْTgWX0.>9l;aJ<=N/\|"l J ]/u<@Lo3@JN!M%7Okυ}%a{ǩ nc5]6*&0[-vrF~OAG\;NoձtJJxOw7$vH,U)q] LRK },Gw9otL3_GqG Xh7|8>X^ eE[QK)&A;}Y!nG?,[8d@:sQ:tUW,>B433D61԰Ldizȅv;;)c; )r gw1?8} cQ9Ť ^ktp3 &?_nII$=o:QʧY`}hDm{rh7s`#pL+4\0~w]3ۛm]Kig0@HsI0> 0~ Y؄+ދ7x3cU&?ʪmv `&\Yt)>Y::m_JD ,O8c_kjDܞj2ڔ꬞q#1Cl9hItug+%zb?L˺IP{I͊ -hBzV # <#D$X\I/F |5}y~qnlsii.^~Dm_cӀDN"S_d}c=#v=P-fڥ}K7/cnw FC60?])VT5"^+wdi.|oh+bsJArӰC T:9| pEˊM9jb{+)0i/)T!f5f}xj͠H]4|6>-&XC zmLwGP9M͵:l7 ɄjF=/aϭiQU _*:=-g^_vYXp7r nM9 ~|-=kýuM\KAm!Sg5e݈E4z_\oH Z&X.HoT֐PLR">։ Jښ  D ,.ij9Oz4 QAT r eUj["T'o@Y vd,&hb+AQ0Ck?zta:^+'t%.2cE5x`~D6* +CʴBM~?тory%6$o-0_AoSj C;"{*3l6cfSغ}8ͺ&@ǖgՅsYn2>52ZޱT-?h҈)xEEeHPYϧCT^|1T ׁKA= J4?Ij <|fp5~EJ9&xDi=o H }8{a>cdܛE@iGaNE#9!%ġ'hc D N?&|Q&G+ lꢇ٧ʉdE̝X 8 ̚=2tPB8MgInlV c(4/V_~o'4P(h4@z!sX7Sc[e9_Xl aq7 mC~,VJk=EP{]]ؾ }.O1 s 5nqjaFDԒ֗)ugn$rN#!ղ7%2:0NvUl\/;USK$^Nőq= 'mx#sg-pu #sWbG)6t#9O007:'pk)}9nF׼³fISf+ȴ}NG01= =ᱹȾ^#X8ϹYeHFDpe墳 8(^UV|s U1&H% Y-ڵLjx޹m#џ!}rE #+`%AsVq8V{a^&_z.#9W)n*;~eG4ἰ-_pwGQJTC ec,tɴ-0%σ4}=/4=}-p]. O0^mxJ?]*DrjT4zR0@KT5晫2ɲطPpƣ`?諞])eg u4WޘѲjϬ v~{ 7aO89BEuҿU ީ:_$f$%_~}Lqʇbsf>:{¾Am TjKw'hJAP9cf>󮕏:۬RoW'fUp/[,gs.v/o"GYx*7\&CJegT .ǚEPBݳ4cG'K$ȡ^cOeU<@zw1}o8? Mtn 8αc3hk\:궴Y0F0bՅyǡ: 2kl5-ː(58Y1,kΚPV.;)'[I( IS6{>dQI Y[@[Z!(C pv>NWqc`LuC*:n_z E[ xzV' ΄kf,i6L`Zſ<F#n c.I.) 0HRn GewT,luO?RSަ2>nҋSAQhs!t1oԉhM© [H{ߓva,tci bɒSN-.:5JdsUyA6?$XeAZ3. SMkz~_hqP&:DgKDb%=(lqf' lv!m^75E&皑=lԹ ]jкذiZ4pcfy/Teq| `1 ~/o`ZMeE%T;C7eF8#mOجj8j jaqP MdЫ m~hzcH!Ha~LS |7: k+HMZYݞd0?L~.`ɿ^( )Khі)&5ךe|qDIu<ɢ\ѿ$,#>+CN.e @L?)RC|yj.ShLJM=2+/`v/, =ZzBO|vhг1FZښ~e󑅌ZxX]0S78+8́۵J^sK//soCWKL]{CeP=:u(9ieMď(B. CP~MlqVhM&іU 5#!Mު8ʛԺa1;/d~sdfSH%s+ jND!BpzA<& iI|ə)U1$ǥ'nxfhͧ[$-}o)4~Ԗo!|ĘTͳpK oG¬ÀlJ܁IOv0P7jI̖a&!:\ى"C=KB%v  @&E9?I^NNPx%ˤM~KjNC\]j rbkM*FUBʷ»I!]Iav0YxۉRӪb7k>I#a~ޚYÜgϳP27a\~}HT*|]%0qYr=%}ꤿw7ތ*կx{c Fej*x&Hlb=$LZ[Gdꚕ[к1/W 쨅B\co|KX3`I(U! G^?tyܞ=U.q  *#‚0it 88АۊM~UxADF$TڇjXH kx{SQXq>1ޖ4fc9W|4kU؉g p?$2>7-4Q]%¶_pL--=GuDfGQPL.1c7KrܳP̺BIg` ;!CI/*ћ\y;CꪵM y qkNn)MRE{r:Y)e K(&5+D6Lw&P +t̃e/ !h13]~*.XT &b=FyN KB[jZDTHX. %H/2Bw?ɟUvD$ ~gezKjq>DYza0m " *CFӷ ;憜>h7'p  $P |eR۸: V7㉚ux j/|F:(ӕ J(hcʈe\ Sd&!m6Ќc\H EV?{ jjjt LT Ymww_i ?8Xd%L՟Π^I,x N:g/#W` J澥2_kD:I>oYda(* yWD^$k.@-_r=px nf}ÐI2)0_&Cy +˱;ka |.{w#ʬbpW铄QB.ޛD*D(jn Ԩ^hȂQ"\ކ^=^El2W#H(5ѻ#Qt,ro/{K}"9`3&?niEw,g,Huy)j1,ޅsv4I'j-h x偰d ](Y2WšP:wu=սjO="_`loR2Oщ0 @wEQOj <,Wq7+#R(/񲀺r3Y\'Uc\uL˲$seRC8sg۷yq4W; W ljo1bx^' 8MZb\?q}zfq`*?l}hmd7%Q`½V53ߗBJ/Bk.6]%D7pYKGkIMmT&h}Ω.Y(ZɡJ۟tB9zVU8PgUnE[dN['y[mkSI S 2I(TQ]Ojwې#a/^>^fRy)ozҴN@Sz! *xYgPgS7DZuEҡ"rsERy2IR9KKaD[GZd.b q}fX)ZU(mps8OUCӍ<ǝsӔZƐ]enDZ/ɚjI8৫zQԅ@bwTZe? 0aqNSu&ެLZEg{&҈Cv_5P;gcޤDZQ%HW3Kv^J*XVEF"E>.x84Cq7!˗$縴5yKgU(~xRM*mΨ} 7߽xt^:7^} "يXG̔t}Z,5ifc\C=Iq./x'v="YR$e>S ɟZ=Iq/нҋr%nʆݎtPn .|8#6ؚee{Ы3],Fsm#I~ " 6D* /.W(8$G)t5Y4u%`r0xɲrXɂD/J#p {}؊# \M@MkqdUO:TTe%R*s5)I\V@O/+dZ`Cjw)RGI@.ߤF }_ȣCWpC3|9tͬ۔F_gO`VRkmY"gK |z%0E_sVZqG8~q\PpԁR V~1I 2cVo|8PVXED=\Մ5Z\6JYQZ\9uf|R]⋑L8eԂKQ8/G#|Y3v5Ә[*U4Z˃V6:L}B5bGBAkiIz*L^{$e juRU\4-j_FU/ʥQUiξ'8ˠK>p6|_mq‰ rƿPLh_~DPW<4&Mb:ᥨoOE,|9SGK;j^LBԊuw=~B31ػ>)Ɯf ru sjoN >BueltRD>?l[}NYPFƻg65FVZ,ܡ/$~8P#n)mXD{gC1Djj0w/MRbhkBtDRHܴ .]Hcו(Ƥ|x)`aJ wiO,R{8)}Mq3J>Tl}Ku5@ns&<9|tgci*4V EK<|ܬ;Ҟ~fZi-Iɦ|!6*l+!Dëx`锭]-~N3H l8:lJc.)͜ldAsjGg8RiO q['!2c-tŔ2ByF h~G1n;umi ;rA kɰloT.ϣ<ΥڱQb82顥i_!&x#7 (@>yQ+:`=ZM`ĹmBpǖ -pnyѱžel8+0F\qqۄX$!皯E״-\gL^s&U0==dCp ^ZOqvLWtyApFl;vIT g!g[8y7q"LNvKW"EBgJ@3jXmfpGFDj+Qg=`X ϼ<m}+ԾvhoHc թ\)Dd}+Mћ.1idӤHS+қ7pNt;A>fe8# OT:o1x4zb(ߒ}iM3ú _۫BR1yoL.uu qDbڢz#h[jr 1xY# bUg7WZuӵa߱tní P#(y7e}'$ (@Ø׉B{U g)(0Z9d"+ &B@]& /D9Bw zIODӣc x5z'#xv՞:/Tq6-"k&B`~֕!~gv*瑆)1.bA`7ȑs0/a~.,rR'3px'6Ȍa\;z%Үl૵kO'4}osLigW$s"^Q>Zf{ 凂.1u QkdGVR0݌O_<Z' |ķN1^sV [Dj0KࠈK5Nv%1zr?o M\]Jc%J7U2R <0d>`(]Xc}#r !?9ظ@X{C:HX0U.~ߡ7Hv R>|#E$f&<Ybx?+F w?g}cA`P 8TA0k)4驨ցpr*?fHSAK3/(LN9ii6?LW|ݱ9b#V싍vu@7{u8Zu_cPk")+FgSmv ;M8]f;e+ے^/_Q5WmRJ:łG:(}3u~U#0,,:PN Lf-4%D f0J$-,&r(kR1/*P F3{$:ׄZEQlOX_[][aEqaytqf(4 2ⲝUPHkّ4w0C5 h]v.~{'L*t뉽Nɟyo&)|,0AB"eO.ܽN_۝Qe(v.!>fca:A& ݋NLIǶ=O榆1Z){:+5<}FOn҇=DQlG^SѰǥWv}"=-S4Ց:\n8PwC=`\/(zso)/i_uvj^•\,~fĒN4*FO&"`n`t'"<†P lTxT_ L_Me@_&HRj &œFOH61*MvjjteTbJDp8l|dxU;5dZv6,R`t@&4֒ޅW~`|S/f&";-t02O'gpNm;eг>w `}π;* ;L}<8Nl]̞sc nmWռT(pQp_*W#y0C ce33k@}7ʛ놣K]1CwϮeo6ݳ*R/ϲz8(yT6%ߩ }HDURHQ}M_aAm)&-K-@TzLg\?w}0ϼy0y-h4Y=pK`Ijnᢉ[1K.=iS1P5wk=@qlS(G@T31Fs{fGX|A#m1EW}o΢p[4\i^Ys D?q}rEKcp*;jbO"@JHm+2ǒި~;Vq^"YH d;\ rx[6bHt>L5d47M\k:I*/roť?"M%O:%56-Nô*#yA9L~Hee}P=}>z8ÙX[_P=j JZyCb5 ]qXڮ'|Lg=-W9eԅfzɿw >@8ώ| ܆|)|W#d2Y; 敿4X5FjϋNaҟ \x9!lJ"QЌ8S/=xl !LP}*#U@/~)e2*EF,?ѵa}kmePd"(AsR08 1{rE=D"uBJ˜`R<wGxwjJ,FvpSͣtE7*|:-Jo EoG0D= ^|+.m|X3t0Ļmrr/ivWS-}& D<"a-)l_@fEKJVrtrK8 7OBb{]Ki_#ư ?'jWpl[^R^禃`U=_=i< z<){ p 2~R*#*g ʬ/9DСbҺ٭w-mJ`F9CA3סwH HD&7b,mű;r"!z<!|`ӾIy"f5@)9;ԶfseA;3U:=BZV;e+xw~rBXUB?A?Xf^ G Cv,@+Oc<{,OVc{4۫B\8"N5T\ȮDV80c{Rx8HP*ˎ'ҽX%Ɔi F]eOG+R6I* ȆywjY7Yy1jnW]~PNb) lcS%uOZ^4ZY!&8b2`2jr^q")w[t`jYS6ȽmŐ3P;7tc\B {㺛ny;g.N/Y?]clZwHN9p=?D?@CD-sv">.bW`$GU]#խܙk0CC?Ÿ\P6{Z)q3v2ujɕcwѐ+ Zqo_4lsk6*%BRg!PNx5]8mLNl2m0 8m~qbB7ͥe\j31:Po|ϪDFWJ۸QN3ZcP0 b~ -1xec-Q,+fQG7COM%!be$٧t g5]'XOсFf#-pL?8o|yC ^ffeN@;i.}(nH(j6\ 1Y+ }X{ꢗ[ G7걖S/0&z߯a!(`w 5rK07 ^ ٭З"}s]f]tG)qKf,t|##"6H?*/[NjLᄂm: sw"Yxbico"5LնCciΫ4-qKa='r&[7j\^|tyB@-{+0I JrAÌ ,Q2T˵&$x=0  +eE4w}Yuҏ}؇9Q'ou^i ;݀C8y9SA=KpہQ^Eq!a 7k/=YE! C6@)+v)߸D!2Ql,rE(ׯ=lS x@f` u}'8R?=E"'L.bUQx_$kNbNPZԴo{y0ia,M2r@3GhV: ک  l"QjcA-{HS_[@Rjn#Xj-lQ1tӄue,.AՔDﴲw~lFZ?O^!uy! e0l*9oUJ#Iq>S@ohnq$Low<_6rA+VB 9?tGt=$b|U죒OJ'ohe׻'_Yvᇷ)wO 󞄇οղdnχ@MppRYO]LE)˻HEfyib/EVIK+Po1mb:ʇ:b2$Q܍Xkc<9C`va,Ȯ= p,J:?f,zR(JO%ZNɈX7aZkC%Q_JtFX),!Do9t2utLTtS.GjN~WSlw:v (''{%ҹvG/{z@C*~ 2i Nؖ9l '{k] Ԋ}߳ /X'CE6kUnT7(ݝ>Țcjs{k~ Y]|2vI8iYdޣlWT\vY/p<] k7<6mA0l?IǑ;*Ft8+}jԱM3ݖNQ*R3;5gXs3t}65XXrqvK -$VH?ymAc̶'] w=SMF fc37v&Mz;:쟱aՉrM {}+DdXqY V#FyWY\״Ł-âeI4VMw⁸Ui8"wVcq FZ$=M11@݉V#ArP+ i%2zkY%O}n5#}d4.:/Y*'[)rzߟ I>/S1΍`FY9P$;9^t`jmVl:(@z?BcGJo=pV.k|k=Kbz7|(il&?E%ڊw[ydI Jn7mvR.3R\U/$;$x 5ʆ7QF|pɀ?`z%$v ^8|q}~a5w,KR~q$l^2-Sufsgs֦SL[ @ n#,_ V 32 qnW&megn~;da@yh c$=6$q+o7D*=1_*]*_PU?ufn@ NQC%YQplU^*8Vv>&y5^GL(Jܿ3C6ՋE5t'٢ALvKWu\ɠi𢦗M1uK?lx$ֈeX=_Ynn[&BB/3\hZ#N}2-$"H*3S%-F?MO}L!{ԼCW*I{UE٤Ч㯃#5WH{ؔٺ \b ]ô4&򑳣:wSƖKxatFn[ daQPx#hH'=wq|Z+=.d.%A)Qf80v*}j^!&?Kro-]t)`w(m'ujb?^BR0͜]m-@oJ!i۪h"s۝w}``jOx{WyfmU#q '`V᪖zIM82&p /z{р|5")'\>1x$]K,CYZԘ874K=i 7x~P4y4Ա 14|h{ dzڊJ)s-67RS5M\#r&,[jL3@Xm{xme ^E[9$u3%In ҁtfKGlN] z jCVsh.M㊭B[VE30Z,6-j-1|Ld~Wo7jۉܴCg4s}^f-CDJZ.:N~,M/3nUC+}(J(>٘6x沢ROmĭbR/К9l&,&)Ɨ $D5tHdNz5T&4-֗swSRƄ{Tͦyei7'U^5zt=$Na=2ggUӺQ? LEL S8iWNW S!q1xVP) eZo{'8iUg҅E-j|oh?j ,H?'v-355 IoaW-nW >T=*df3\d@;7CvLǸâ/b6ZI筕6aPTbCN^>D M[rs4+("{POp+z* iH۴rv1>2)8^ZWQ4.{&c  P?˕]e|L߆ >G7Cgy.f5X_(fk>aKdYB 8٥[;I%+"Jud#KCtD'Z8tH'Ȅ(**ZUձȀ~?b-˃#7,!0vݐMwoYiz1=em08gZNUÑSؔ[ڿ0gTn%I1MmEB- b\MdCɡ|]'7Ƹ(KlH/khWb)Nݢ큿RI ' ~vYp팀llT<`Xia>HވFk{[5=cA$@pxf%sXe bqObۄ8Dmk3RКƆ60+ |m g2@^E-J&=*UpMNF\ܛ2v@Dx4G|gi틏n|l,JHaYm z, |ZyOKaxAEfõn^pPx2 0'K!vב@hez%Bun$N }N}A*@ωFU5 nb[ e钚:uln.N謂~7 :chuDn2-`dOL؍D˷-u'.,Gw )dzN"8U:ʰ3Y%}m',&F3?X ن4}xTUjEgy+aBƐ.2]|ʉi.LH`EUEB/)J,@Kޯ\iюT.T+3Yge4/jwLP5b]er$!IB۸[jL[Mg6яk]ig!NRPdl=aE{ր7UL7ACC/oW)|{_>_Q$*&r ڰ_GU.w:]wͳRUr>{*#Y(T{a2i0ؖfq`bMCSPH?A~%"4@qYв͗XeH!\GA'wyYFSIןZQ"*9ԅmJSnzFIKh.L 3eGA0+6t~U*P!=jk@Qoі[3EƘPМ |pK;/#~`USRO:SPdhf[- S`o.~ent43$ǀ}9PYV^IihhGIN)O&%c~C_h͊yިݽR|wr?a =p~hd|~w:k _^khw PeO 1X [I9Kۦ_bZW8 #ٸ˵|~Ԣ<+mr=Cn\Tk5`VOh~.DdF*8s-[p0Joߊabpԃ Y1ǾE{ə`AGƊɴ盚ekZgFv4jM0&?BrjQ5vO q =U+?;y{npPZD'"ng0%(aAhdRsi,Y axik*Ī9sxn%mR]/h?oXqIX /~y9遘?YP# 6DL$K"XfP O37UclAe~A-\ES: 0~G<x=wmNHyV@9KB/C 7쿭37ocN[ցGuPd>)fLzSROYxD4G`:<. %Yq~IGQ XQJnyBz|L~y.˸^,/ Uふ~,-OᚾôOv֊F>l 1] +w''{У_Y֭H6ˆlcD[$nN%J(SA7^旞ǦTc7M*RNݙDٮMt٥f1=plV 0>AZ! Q濻d2ʉI÷dTj!)Sb5bgz.齶BC}>\w;`N=e!jȃX6-oob'~{Ė*]dvh6ٱ,)8 BZw0pZ.s.! Dɭ:|!F!D<: k cvE[hP)3G&w z&)p|i8>L^5BU%R tkEP^+zc"hݜ_<~btB~h7>Ҕd3_E&b|oba\k3a^ml16T?rcuÁe~ ,d?[<=a6k#wQ\D1 }]/ܻ2ŰNÆ㺐tBo%<|V&ѭ?0SIC4  r g$g /޺ǭx(j/F>KC QmKq`oEۦy\FY"+p)VLyo`DFs,ssڰCxHl &8ˈD8 ɃK^|F-]#=@K1*V,jqA.s}XVπVDqalsB[8܀KRĩl`!5a<]/'üQfX AY&R= \mWkl:l`#-/pxm\WG~<=^{X\8"EXlD-7n7 q1sP\+ I˞G/g߿79Y%D ؃Pԗx aqsxkyRzn#6[ ~<pc\R,}^ZX59]^gF( A] ՜]>'s ]նIW$K*0'xr5!YGZ;R)| FÃek#6OIC+.PWyD)=@@H;G(ߋh2R%eUԾ32}8k߃#qFHYߎsL$`Az,(O#䁷+n֠)M͈WBszُlCh{ptXc'un9)R) ]ih?l SE m|%ր jcRf^oloUY}!XBS~*#Z'mrj0jH5i1%[``5L J2nwD%7x, DtP~eǿU.UR pG߾;wթq4 A y_I&w U]e ʦC30p ֯h^/{Zo_*% ȴgs-R?6ɋ<>E_XG31ssQ3`ڐZuv]|n7+:Yq*r"= cDU奴5 mi@d ES1-H[PT@?n)kR̶Y;B e<%jJaP 3r%g?;EaA{. 큵x~:klGt|J][ D|5Fc}>R"]aэ4+e[xOD"Flj$OHڹg 5ƀ#4ɷ$˘˻QP" G PȼP"r-̊~eW?:cW`xm&TTڷ?Zj L7pu~4^?38 '>c x slQ3r! q0TD3O33vAYWj??kJF|牮ZKRˈΗ+q<@o'*Myp+,h` ?w=֗1T{4bKi{Yv~LЄ]=p$Bf8ts%}QIỲQŖNO9ds~BZd@DJ5ąHZd[2jT\63NXd|~@!/pScz?Oi AǦ:2O>&{-}K4 A/x=1R `5q]\Z^ ?$[if+ʪsuwZ$^gF&_ߛvUdes.Jș<(=C:hc6s;0-ׯ =Tumr]A׃VBkc4}b̏#KK^/x69II7oUNR ɘ.v=?MV:5`³;Y n9/-&|/ 0hN r s:HΡEqܘCSESm9ؗxPZu ʎ/G"3BMTf:0R?VtX)O#dVҗ%4|O=89$˕ 2]P`?\ON{Fvg0X&h@)ty@k<ح K3j$Wq%W 隹D+Lmjk+?*nMNal <vgxyoN8FI3xAMԇm1XyhnPO7!2*1ʑp=2BʘB+ ]1gFj =K j&Η78ysHjy4S/E-c6TW&IS:JGzY~jOV~=XC{rx @֏9˱5CWo̫u4KSSY\%= ;u?*FgAfc.Na|]MižUei`ڑlHE2 Wd6O\tG#V )AtUAīs=E;P'mD$XZJ]OcH[@82]rB^ѝJٖmzΏ@\UVQK7PND1/E2I9x7cΈ_RŽB Z,b{yy jJeI^VgA*^zIb]4B考+{g?!lYyOk TllMH0’s;bP?@ (#F,Jvu129j<4XǺǦM뗈Tv+B C>T:$i=4Mt_:dg$qu}z{<~:ZJZ鮺ݩǓ(:bZ0H)TJ`? ^Ӛe$yYVM h)127Iup%4>##Jn+\,1z9_]?~n*-մO+AV|S$ҙ<I qPJuq2g:u=#7?ܮ-ntK-%s9 9qUAK7V'y4 . JK2繪d pĥ;3 c<;$f,[++w$Zi5?gD1<ٷ3k4CũWm^$NA.XG̞#XLsNwL.>)"z,oL `{^ơDZF }/ң 6~*tH5%3juܨܐP qTK;}Jo5jmQ~(6:r1ض^h*qaT'5sM5F(M!Ǧg5I=Ҵ]q @)/z0 C]z€8n¨8Eբڽ%w7~݊ZXk)tA.F (XS- wR!$ٺ' ڊ 7jpBp85Fp,Ⲇw6> N:7pMR cQ MA{Xsn+k9nr൸CXmE>~M e.p9-N.XH_,3WE'Hh8ޞU9-J۞_g•1Q%m} q1ϒuu$9k:їˬcV3LӑUࡩtu˶0 pX>ނx(P)ˌoqJonEJ1G=G1ىp3aHUه6 >e{^Y$d$ͧKhxH1<ǜt0Oǭy܄.7b{*G {4d- e4/5RX'a*e 0]"~!U ~vrϛNk+y요|?YCJjF)X@(k.5]Q d-Y٨KhTBS <8@w}֨21 ޅrT ﲓ zٽǭ E(>îR>ٖ`rr60&YHMlcΗ|w 6 s D rJ+!}9u)ѦwϜ ((%\bxcЃ mP-kl][q?@qY0FY9ΗyvDS(Kv"^Q>@U TUHVJo$ DzP}th[f#ۛ lt[$%1g k.?mkfy%W=<*ls1HT3$HzZ.:4d埃2íխ3ܐ02GԿ89Ts?[5OG;@Xy''a|$/e)8xCQAƨny6 Pk7'Tݢ:xE$4^=DzQ#H?Kx svXd:o|XÞ'|F`': {B :`V.N8{/%"rNQc^Z\'%>9+|*@)Xt%q_I]A/#,πu깪/ wLȽ%ǵ49>9b w:NݿPU3R[H'y]~9S3^6 # *U2]VV6F;V:E/PEЬQ8k4juD˥T-* V.E[U葌8Ǹ` m򞎽Ž-}4Y#佴'FF}< O;qsW׮ pZWiZe A|Q|ZvJNPSh)Ei\utȈ$]2gY}HE Nx 0a2syvgZY3߉‰ (GD4=&ҧ4B^q 3],?FwΐQ zh&OdQWLHlv%/.g*8ʾD>!U[="o?z!Me*xq GBډ9hLwaJРBȨhV9_-"(aS`:"^JZɟl Pfjll5~8p lGS b9klJ#z.#PVM.QOccEW{TeqPJ꒣?58:gBo z,Dڢ(#ImGh=p6V'_;Y܎Gv/),Z.V^wmLm!Gl&hZM}(J 8{Rw[O6OQOy-v! ">K&%+xn #CYgbɘFibV鳁9Oή_̢2m-a4mr|R[jKZ_OV21)>79rlr2gq򗡷HOB MV> ~ONo&+zGG /xWFxQa "A>BㅪÆ_0 <\脀:mߏW 3upR k:Wn")OAAcW%%E$=FfCťḲC*b) +awࡤHJ;y3@ kEe/A dxSl#҅{VɕAb4)mARnP:=o&ZA?!T2-Fj [B# >V3;3%6h \݄+3}QHreDV݅Rœл84ߊe~I)nԎxKd-rl>, ~~j&+~ ]w!;3$21rS@cZ*lm d Au TZaڇc*O2ۣOsx~BЏZ,C/:fw Oy3䚮AxTfv_r@)H%7|^]U!}P)$=ۧg~zdsM=0{yon( W-)H=Ga;ԂC7ݔ)aݫy 4% (В}ImNm1 o xKTҿU6BWC7X08`2W>;(dQ߾?"ȎȢ@*xג_c::g$IOgR4'ƹ+ p 2_Ď3;}2SPL0,!/9kByW.YݪeVƐ+FT~I Hv˖VZ[ x$jPTa xu`$f sp*Dw^r]ć)3NdpJa=qXl3\"#Q6޾6roˆ /Ɂ#UĢz\\O0bcruyklj \.%|WwqJJz3ZdqY`Hi]ǡ' ;޻#oؘ-p>G$8۫(U0\`RIx{|+]@"p2X5Mȕ1ӄ@8ɑKJ.壳[IpT >-"veϤW[5UbF("#Ki% sdyAFm`x=?pӮi 3j n-@Pk~ͅ((J5c=Q u|S1R/YiQm~,p~E]%ݜ: ><emdxs"ZR4euS=FptģM؞VGL2jڲMs^UTߞEw,6h0->M9cQOMPYD&_GWpij5c3T7`Ped*4i7.Aڼb1RԈ(Rx Q/zoz}qepӺ=:>&n,!ٛe9glrjm+AxRn P|lT2[ui]S~Vj;Zf,UwLrM2.L #Cַ*qcLSwIn((6H4=+rc6]SFaskDtX#h4:4P~B9N>!^_?MH;d9d<(mE&bR0)kxYA,4j^}P|XƲbHoWhϧxY=PK5 pųFVt;S_PsO89\B(oTAwrl]鍻+MFic_%~J\y|@iwULhHL+k/Pze>@ H7TO^@Q [8Bꏫ,jq6 !s t{uSk=6彸wPC1=u6}NU*/gF"|9$NvX6@&R$ Wm0uE[ æ/SVWc伩ھ?:G{phC.}=KVcxrL`fhFMfD9ڥy6=r2鞒-O猼,?H CHk$iشVbp}߾FTH崬nq3SȐ~O$y8>Jb@><*!=Nҟq߾FB@ܐq͑.%ZF^Z'0" [ >7 t-B{E2>[ :r#G=Q`݄D7`l&_6>|g3; Oq%(%TgՂ e??e:gZR(Ux V9W q] 5-bG)thn)Ɇ"v.7܍|˫ӥ'P)Sy^3U˹;cm_`rfJZf*-lש#'*.KEWCJBI?!$p觞#L*K4FdM4ץ9qY#JE䷙2ePĉNejPDbVmg\6MhZ!;MuC}HyY>uZlqh 0{-L|~do (p["̛l`8c|:PGu5TU|uyz #d vIi,&wKw&wy-pb_J[9yjT&Іӽ&AWW6[@G񥹣7pcccؒLlٻZZcX2iؐzҞ7z/rUh04;ņc15=*lr̝a?csnʭJ]WDx2ɨ][XM'Yl5Cf^*)־0rnG SX%>:>R JP+f)hxTbp7~Dea.8@&k)k|p~[ )+hI P6SW܅ d\rHPI-afGvkcH78T;`SLzW2 ~F22b$,YLEjXCW/]XFMy?6K,~b>zE +xY:\zu5 1#EwX4z4ZFX㕖7 5qR^/gDŒ*9 Sp#}~y0U&Rc :o'rΓ=-8U&:e);{cM$%F޻A7g~7=$y33LUI|E7&HW 5D A}VvM]R ra߶]_ t*_@LPBE3&՜=tNזC4yuB$s*K衶hehVI+h7w&C}eќ +]_ن7Ɣ*@g 9䜧Z|ȪO=wɤ,+oJ=j({9\=U% E: *}$uzKӎ0Ŷp |5}:BG~a$4ͨ y|fKq7S_G@@>'ijnO{Hvu[ǤhSّz mL_n;&DՀL,'VŬwH"\yv.y|-:N ~9|SP,>W<픋% ['/31*54>r/s/@4{(9N^̆NF­HHEcTIuⱁNm)]yi0FCb*lZse۾`3rP)jWm!F+NzlPS .DiNaD)c8k+wB>l85 g RoUr$=a*C=-7[ؔȨV/7= o2tҝ:3מP?ƃf)o9$Z?+{^2lA5F:Sf~kʷ#:7k.qdAMSI.ܫZ m=A @!o nJ).B߉]%ͻwѹ:NE#ɬhny:Rq~MlxX2cgY$4I17wToϰfV.xsOWě8'S2;n|% +nAU&Ra%Ũ5R,9Cmw33ul{E-͎"y:Wic01W&zyzq{l~aK3h|8H\-SbB;HB[uV۹UEL [nu(Z3ºoH-yċ6Tc{߸ݍļe )r*vg9 !iszr(lN:zص]P4`J["ұzx>vgI[X>d}iO"tS YOa&PB[C@ɑߩd>Ӈrezs*AXݭ"jّl]Rk8&^HS߆yl')^𰭜 R.bW}a?8DLlK1ǘb:'Fz5ŖN# 6SXx"y @٤޾GC.mIxmteC?B`') &7o'tef v2ò{)lu Y#Nj$JsIG+4F8 ?mWNc>u~LG[mb0';)Z+ $yuTe~M<]kDm 3^nc4~ *)nCfsc>`=ۊ KC!0ݲRwW,ކ-EL0nOS \9AIbپ+mhZv% m޵G[[ڿׅ1"P=u;78*؉j1@hsT]Ps5:`V[?-WxbtYx ._\Oϝܱ0M"vie5,.C?c lG a֗T`>?tqtNN?=~;Jyld`Ţ5(81]9iCN(*OS3dv5+vنLz!d{ATf ע?8 O0ԭNЧL` xu413D[: l0KIh`G6N].]DLѧw'A: \ÙW,4ۛ}ժ?6N@TeD)xo-/^)~iw-}g:@r|9hCTB? W8po>o u} *z؅`%G$vUÉqLbnzIԅ24\@,М, V&_Cv0ɗ?7 ̀x|ݝUӕ`^ܢ?4%R٩%t4W>X*)T%Ӱqo}H,* -}փ`ӫ|:8} 12*Xj׹d'IG-JBkrDwPv8a)PlV.IHeًcEԨ!į0ySjg +Ȃ7a |OuCm.p:y m*=s6\MgI7vn'uԷ1YGL$s>%ԢԊ }r<ΜS{んD;N{Sw\q^sf$ /T-Z"]nsVYɟ*H* F*zrׯ,Z5ulfP瞊~NUk{ԕcU-kpSZYR/NP 7;K r4d#YZj.Fg̋XC\̼sͫp_-[p}=m8??'H\ c1 lZ>uxbXDЊĭb9 l@WGƐ(wE 0M6r:a4/XiZuǴm5&U¤)Ro]J"s[hE-{y@'Q< i$o 3dcHɮ=uJF[2*sjd?^mL4RxG^j`!'vlAE5r9.ޚg-ծO46~YuAY@E6 *Ι,G0W?,6̦ zi6_mL+M3zǁxEO( 0Yy-en|"l־"+ۭB#4K8E2\^>&agfwU%tn_DI>}= O O-{zW1l;Xt%5*Z=otHmm'!Z,]%һA+=(iEp.X<;vi5XCgx$'E`^/JI8e0z"G>L}*3ƻ^l]C+w0z$_NiMPcTZlzݣGm%Ah7!#o`Lr9)J6.Y6zrKVmoJJiLa );E淓Ȼ9×H v:Wxγ3q+[_FmcѪ~-"hŶ%* :POzjhe<3ƜU`Zuf֩٠w5aE K ;+g\ svf;7XapՐk!,ݗ]# :"iˬnn_̓bfBto2;mm5 h>ZȲ5*;1!CyG5X;s8:<#؏G| *gIޥ Ixo7hw0QX= 7Ft%G] Y@Nu.rFη#43AoD U$ 6CӥF9dJIؐY(J}I8s"htźm2je`u㖕DNGIU.k=;lZ[hBsxV\p4LC#d@K,{'؂}E1Xl:a%_NE̩"OO54dyVi Id^A:Vg#g \6PyMڷ9=jv #\RɘyvXٿaPVUvVY{sX/!J}#[@`AЁ87Dܡ!*ѠHI=1?Ye@ݖ|\=RkyC&r^@m ge {'ب9&E/BD 5[NU8 DU(.^[!lxS+]~|a>OVE00擯zBfK|p|#g pϔ{¶#Cǭո kNJ[FOtGH.;9;j88Y#a>(uQ-i8)#'.X' |.eWe5\: VZ5p{-,#Ҳ;.!dS R*S@: 3ᳰ@͝ glCpL%[fmP{S !rOz2P@9xQ: -owMPI;FrR2IM5-u&ͩ Es%&p !ñۻzcAV!@h-+MDi C}\w0+Vx&BA0p{eK:, s jh7t}mpE)rhbU&KztU+M2/8Cm%ʸ^ SuvDJw"#|n\9$ ENؖr' =p{^҈['f +Aq-MQXmfzܝb)*AaHsfAd<$EtA.9 i!P5N+^84լeIR4)CyZxkWYF<@%%x$X44$!jENhX*R6PAjDw]eEqg.BdxktI;!nX3"^ɨ CEV5t"hmhxA'Mf#\.vml7=X?&Q$OE~TЗ+_ɨG($$mmS8U OrP.% 媷|Ljuw{!ˆ yв"S+x*Ҭvho7i1JZt v~FE`1acr^nQ|+-?q:vG\ZwN69j}I) dy8A?3fȥe x?"f,:\m'癎YԃXgur!8T|JgLp‰] ;ˋSk!p-(k x{(vخ4FO"s펗A\Љj3 Hsx$͕pX0w8}M_-AOpk|GtPlpuz-oCSH7HBdexvڟ %'\w9NQcS{5s㖠>eƛ#gQaDO9ݴ cdˀXG=8p*/T gaKruEg 4E:]@qoKGz@kITfrQϤdE3n׾-jED]T V0&Om#`8B$a艂ʢWZ*5滹3EǞS o%)6䀋4qAg V#__' _gu]'Ov:'$6阘)׍8Yϙ'(Z;<|J' bR!T&$fo4{CoÒa_WvHqnY1P?̕ǧU]H">nauyҷvp­ #NWL"ahztjٰ Ι5%12\~Cאu`|ֿ((n^ÝU?#C=53|~z&ȋok9˷7emE'vS~\{y8"L1H$.<[=@ j/) 8oc,` c1I j 8*T,bGǰ inqPe#.;EF?]V c^e=y"6QJ^ aO2F' ӍjCole+kg~Lt}estV)yN'ׄTz|I$Uw$?KU^n~"2[W f+#6v ፼Pd1-nYZeyP4kz]tMB*M(IXft4L30nU]} p8||/(7ѣRI9B7S8Z ,~%g6_f_;WJSS?z,ndЧ=.*JBܵ`23,L A▗~gBC\JyBO׊4)bQ̚[H{*⛒t@pUd.NN?4LrSm)X \jMɟ}M979R}γk<祷=^e8IeEc(F fҝ0Î/5q8ZIp܈x?m8(?Y4; k(hFsї/X=E\1R$kXc"Jܽ(B=E9 EʩפѓtxMEחlB(3.{S%OYw߷3C@yuP~%J $,~)͛ggN%;޸+HĦa+ G|@Fܞ:l &p}i"1F@${Y N-)+"k6Gh@enK_y¸L"Em@ WA7>g|{%l  Z txj̀'6, ̓lQA2#Nb'P["4ǶT L&yмXe1)Y),1E*1\?`C9/܅ ˵Kr8_rkNzZ 7Q ."'mmO3dkX! ؙŠ@{LO߭Wo7ȹVo1-(qEHr>Gk FhBQ$ I5o([D6J\Eߡ49v__I +B5&8 >ׅf}?S])[= A ݕk>Uj DI~ePKŒ#~$b#Hq+ԸX\00G>H_;kBG3 w;<;L>epPg5_wx0M{&*Usfo>w@`T-7WvFK‡ *#rT]`QڌqsBSjVYZN\CM7r:е<@1EqMȳ#Bp)1o2D3Sf ϧ]6ZcrS^k0kSh Yv-$%oh mߟM.޿գp Gh4)e (MGB?h l 䛫!eb+HJxec]cMHk~ TyLM{|0B= ]a!DkF+bY3hvCŎ> f{0ʢ/!ҵT\tFltq> :q/-_$2?qP Lb3na !sS^J4L Tqrê'F~Yg\T,rs̠)u8|~χ.Z ٗf~7d@݊fzpm)T͝bUG<.rctkp-z8@@w ;[r"Y$bRTZs$Q [lK /Ic+3_ؾVk2Z7%e&jw"gRuVtaR\5pQ|NF 9N`}*#J`Qc=EnDܟAZлXc$gPMVltD,QF#E7pجsln R&DDzjRSHZ %h^{Zsvח|єҋ$=ZSjX>đ\]̜%_Vۃ"1ً3%^F==`X-]*m;cO0O̭_q&Hpl}yv̖cjwڪbIyk$ -Xҧ?BE #@DžQ9yUQYFyWNw-X|i|;̦1I5Aݳ4k 8oͤ`gm&TOq2M!p  38~ aP4s)֨i;W wP-0"9M{/5z0} ك o$7mk~>6)Ou+jNʂL&+r1PFS?'э! a_q|]2yHS;@\vw#aV A֫GX*P@0ӟ"FzfS.Q?5=J+vLÚArqJw/E(E;2=F[4H9܌ZH_W#M3&<Ĭ=ʹ59ە;c p>>gDy q[uSEAk{%8Cǣ K zGQ{T rb ̦_i2Ϗ{XaXx`c_ڛZ~ҿ5N.T2hM'p7c4fEuzz|'Zxc߁<2p!kg`,-lb-> 20]'iyvh/9&[D9Ԩ(=6+l2"*QSR{ 3_% u1=rƒg14HQ{lp3D3ɬc0Uǃ`a;R;V9kQQ`#t~Dij./u7֒’mU e:*~RFglrz5%J-,*;vr,X2] #G"u6!⭃Sq!qg1{7{Qn;뛭$Vg$V 2f=ge" =6ݽq`9Vч#_ nt.Ϲ_jօDw6MցF/&ѯ`PkoRZv䧘Y|@R3F~c(li4$ǗEIQZ-:o,JA.hcp/q O xeu&A~8u4J?5co| !ϒtmIw.T):Wcx+ky#+CoQ:V\eYyM 맘p)t=o$LPOH:FUDļAZ~8@B :S\+/y1'Ch} :B ~T'c |9 A숥l/>n^=B.&2LR؈oG(iȕE|{ KL{8zДǛHJ2޾2 3{Rfy^Lxpdc!Y#gK.DwCU^q@)a&veRM u6QWiUeOTX;zҫuy [o8m0$%IFm'71t٘>Hu7}b 6OKK9@a=Ί̤0EIg˚'UNԮt,8 P iUXW;7^=OyR:JjiS u޳rIDNCDq}O # =kq~!vsUWO7IZU}Zqو 2uas{L@egH(ڐS9&Ci*K!ik @;Xp\צr>> 8p%`Y~-PF 2x0, "F,-U(eAY?o |ϔ9`zԲ ֮!0HHWԎi&A=$"nh ৕xyj:2f4V2WΚo~H>igt|>v Ujxi5g`BtYdr˱I &Sa+~gMREd2Xt+WOCWҧqr_:9jbvb֭.}:q3zI# :(BٸFJl'=SH/%]|74W*>fe>ߧ5qB JSs\)Z%lm#*@i 0T2RibҠf:S:9Ao?oU_jFQ=\{q˳_[w ;1?w34k;Jv R6:ߨ.2[=dϥGm2𢹌MXh`'!BݐѹKxrPL$hA#'6r.y5] . aQXfq q 6 1+no)z|FK=3~ 䔐*9#G }E9TFQodA!h,")V!F'pR.BNo3Vk0SZ?x@BnFO&WXYpBFYuzCb5vpLRzT;.ύ LP]CE N~%ӭi#b A $P}wg -[*5 ?s ~bJJkء/m[Ð b /Hvì`Qrv^+ ,MjR.O2&6(QLqpԌBR/F svB5B%blCa\q1htW@B;g'0B61r)CLs2WX/:IEMlx[5˙ϼ%Y`$^3tUk觞=M cTʯ.^ #Gn"td#2+>>y~S11Ar?.Tr x@4@pZY#z-dtnc3w H=D&y @a{f@DEꝝ5}U}&!`P@""!M꛹e4&H/|9@uDprj72BmRUOVQBt]W!1v38ѵWq1%3PVnDM7"35) PBɆU%K`px (I qXkoW+9=*447:jbK-Cy6F)jwή/2=zIwI:FhɬL:lx]6=jj ]Z0>"O]c;\bʍXE Qbrо ꚼګeG @ZV}0L\6=9Ѱ\\^5k,bAl+jPb >'xM6$N!jj@s)pf za>]7Lq4ua,mS@cKMdyTlSꩌXY Ig@Nd+(b8O 9n59@gyo/D@4‹d*t(e1P {wv ~Vty mE`,Ǹ;&&h3&86Mö_0 q[ȁhz xCncА3#ô~l^@|-yG`Ɖ joyAKʆ8|^'+CJ?@ 1i`¡%! }X!IF5Q6Z <7rS-_ `i|5ۤ=@nGMZ7uX"klb)z4 Yn %N `ʍSpϖVWR˱!a=Bg+U8s[ P>_o ; l\2g()凖M[O{)5EȲ&벛eb~ 7.\f/'[I6g:F)>oFOn vl`QbOBW|\ nʶh 3M&Cd,s %?6Fu5D,{bhFSBDp&3?ؿ6Ӊ>Ka  |SThG{}Y"jV#cC=#f_U5 wn)Hc샒 cn4P^ Scwb@Gd0+~Z^:'fA(Ջϝ6pBXTМӰ"#z.q!]yWEv90)AKY¢(U-rt~גY=<@Kx,=tMnd[ N0; ͬDҧ}:Mj%w B = s0:M5Ԥcy >(]9LI$໅iadu"Vb [kMEG8J,/ :_h̵*.YOպژzPfnhC:iM uA c6f $#流x~y>QU,n*{4"gi9A:` 7 pN GM xǒ jV J}ؖԄ@VAPo#SgTWIKd"^A["̷'us鸑T=Cr "j5USeVLePiIOfJigHL(xO#~f ]xj? K.6}.~eFֽۺ$z!cX"m(?$h ЬZ4tV> ~^ikqtH"9Zi9%[;-Y֯oPjx Qy{ Պ9'ZciiDP4OW,N5PHJ !#xg*m7+h=jp'(CwȘ㶃$~mݬN$h&k FtAC(c-%ATvQ#!Ӣi{B? Rzohpz'jK}:*;䃅{mn\ɈHڗj.2D¢jP'6[8#&#?H ,skd *rąXفHnTW T7 O:#1J- |:$a~g57a/41v\a\7CD} 3x+@0B<{ĎCũ`ф[l&t wO}3XiX'}mϒƏ%m,p2sk% 5ȩt LK_l QGBpcLYۮý<œUToL%lPJ|m[YsJ~LTR}P[_/=RleX8'Qn譀Hߢ/`4r-YE/jĽ (Z0R7K~M-2}2k۫d:O!jRq 3&Ep0+j!e&Y'/-ͦ)/ cC@(P 2pjoKGx_iQ;>@B%BvK(7[4y.0ަpEjrWܩ";7lPiA^dٜ { Qdlk.jFI _1r[KuvJKBYwhL)˦ Q2κ,KӖ ʜVTMx@Efx8X刌4?",)$"Ju͎ R,gڙńfc}ƅ2q݂ۚqU_GUE5W2 *GOL_pե,F)[a ZQNq7z9f7$!iHyNˑ2=G )ud<2+ ɫ{fr;#VH'U \\}*(ܙ:xgǁܭBZOuOGhÍj=J% oSodFOv*0xjhӗ _)"զ[,D"4{ qsqUXmrk斆a{ߵkCX%ؖ% I֥Q9{ lJa@!_Jg%u )T7͈’ "(h)zsn/ T sck#:tP)dLը4؝+Ev`#azVRX3Dg:4Yk4[b.A4ڄɡ+.ʠ'*Pa!}27Ѳ;3:hPvWĄp9>#8q-qcv8W1IJ纤Y 6A83l ] B/Mר$Evf(WZbF+xV(uOU(` ^WoM6J|}R BNNi-x`C 6VƥxU5M4o0E֡]upoa6+ZAKI^o{̃\ה`1=j.{[fA]eLxƞjY[r:^ W=?tq6d{DFofA4ſ.OZ !g AkAmȵYOUorKa>fiyQ0m[yHq&$ ĐCGݘߠ4B&Z)$mpHiH:9Wx1]Cat^,Pnj9F4$\Ɓ-%q!`OhEa dTF@@9YxF%q\D*Rl{H!R/u2>dt}$? χJ%㲐0Gfl%4􅠘άuMr"qد38I~t/jK;Py8ir]K qtlXWyڴ4<63a?g*!h^I C,]sՆgRȩkW{jIafd4`_w'Сvlw\5W$=>ػs1>;s1C'_n}b//7Cԭi+OVeVEphWJR161E i9âbJ](ϭ`ݪ$mUt `g~@=%tx<mpHޑk4GVU9HQ:莑*QZ_4R[LnIIJP? oNܤwE1WspөN9Sv,׎u7zX1E *dqdbKQw},dqh2j[>E7J#u~{ Χ“EI6j7,K>.'u#=,( P e_:.: A<ٳ_mk&&aL>] 6R!ڨV?qK5kTF 2:ϻC8r3*Q.}܋B}өZ顣1Ȩ+V7ejV"t3>0 %9szзf" @,ۊkC9z&q*XO&_+⛴ܦ)< 4^ H1, Nb;D߽wrTN5_%w J $׽BW$+ݺrw"@BKkcBf[Mѩjq71V]68ű I X&е@:+M {ga@mM0C>[~UG;Qq}JJ)QfCQ!/3 X ]5-x$nw lBdl;gS*0uM:\r @YrA?1b& df-.D5Q[q`Jw}WH]N =GgBوdݎ8|\hDM49Lz'BILp`/Ba*kRSP:;:,&T]יj\ eS Ҥm 3/8@) Ssj[7O\ЩƯt5nl:a#L :CoA^%&Ŭ ;9@=K|/a[Ё4YOGor>m5č+ <:$b=q@c#)@*DF:I k頴4QGש;8= 2iE>$U’ՖCG6M؊G]I)pj{*8E \qyG{oE=NYAr=|'1;2knP^{q [q.:ZNtH;\ZB֫(4V%CQGA2/T6`]!B@JB }a`qړhFNw K_[>]kaOj(8ğ]ЫRƺ0csG(nkj/GnpCoG)q:B/—N}tVňY౉2FJ9fu|+Z2Or~=^Ro[7A LI$CN(+/b>f3*KPbd(1C6ac嘑zN VjC+#ݾ ATv,u O ΂_6a]AM 'a#^ \$P[t ^'9Z uoN)s)us1נإ)ҩ)x;ūxNRG4뽡GnpPꒉ$3rtB~&f !D}|{ hM>Y@rRgd;}qoDX6"8`9ⱫJ:K fMC; HK2B *H ]S0.ADMFD>Z@9#$jw)u`'{n%q_|o~y& FjjJ<'ٓ%G$Z^C=`Y,o?>&s܅N{v.^h6gY{l pK8:tVIG=K0NJKCtV]J8K>ېO|o ZU2.Κn߄e:+k]# 픆~iGG$ {@3z 4}`9퀏.IkC2M$߼bRE7πmA˦q"Z!,hiʈi"bvӼ#V9Ci6-}sYsgm|37a5|<fv栎Kfu $V ]ܒ8VS rT1Al1t> y̸`(@UUI@9סl5pf d '"I1%*Em'aw0Nc{AnA0CX`hXc£s4N.V+¥Dimw*}?Q=FYz<[4w#qq2!^H|ޓH%dQu ^Camd9WdM\@p(`tt \oj}0RW=tCӶƪX Y`(za$o]BYa곪j5uRl\'0bW*Yܖq\՛rtbB Dh DH&C")Q&F7fT2I$:Ӯx! L^ɚ'Fev5xo~Q呫-g]U}G.du]lvc9?յ2u:am(Wq1U's&A qj(a >Fk%o1ڽ#9{h@_hdQ:=&pL8XU43&'7ɾ h^suy_4=˻; ~:>K\QB^NlۖZ7Jq`e&@ؗ8Xa10n^EU׍x[-3l“D8R/s!$}${GA6u + q|/O1j'K6bl#ܮ 4+bR`&9/(DBOX`36cI>W~>vƟ\ejB_G }~z`:s rmob#$:ފG~k\OΞXv=[Y2lsOL` ʹPxf;9.H џwC ȑOigv)(r&Ub$q"T% 3y=O}ػ 7 ;J[d|7G;tzڞQ=k1;\ :2R{PrC9{&zC}dC6"{Iҭkl֍X ZL݄& tbț55[bΡi0+5Ui4q ڟ*V0W;6έ(Ȍ^6;lUԤ_`SJ{̏q0TCIGuyFÛ˥ g8 i)WMO73;}TBQl3 ҃ՋCZszWWFN>mnhlIF=0F/=5|w\ #C3Ď6 eL"f-8Duaxrf6?yz^6Vk"KuUY'49wǠK< "1v[n-jl Ɵ}*rF_v`F $V#jG;{Je_ghA=xܳt ^-t^*93s7<0\]L#NJ6Ş^cV𙭯Six1QhE6Kg Fg`Q,RɔZUB٪K'E9ߘIЌzt^)؊OBFu[tߎ_pFM6}{|zS xa8pxtD0Rj  jff 8MVSV~v3VqŏgpUo76t4X ]V,DGEatM?ڗʷ E .g|FF ΣUĪG,J8kPYpa/,a4YbA5.yoy )}s7AL&'׃hi7)>u=τ,c6{?TLKv_<7.I ͯlQmB9bqٳ;+^xY{n(CDG7T<␣ S̘tBq<hBA a[|yt4Eʠ#o )nJ~ Ag,3hK^?JlA;^*LGX1H[4(MkjTJi@{ť~.0J3T1A3lM<JшPUN_5#:g&Yyg!2Oe]Arn;pW8 mc(\ƘU*dxlzg5 JH,hI#ܼ؎w 7V1\t/x=D%NVlI|3 P84n1Q#uf$(>~_P } OdބbH66ZQ} ]Nҋ46%Wx)lYzmŢNw&%"bw9Acmvps+IU;$5 cz$Ƭ^ܢI5-.ېLwZ۴aޭ:w!-y8hB9kJUҏN.q/u }3mԩz/8Rj/c20;6EFM_n_*>_w-nYΜ%F^VD<48txȘjQ8BjE}41^\`~f? `rF爼F٧؁Adp%| .䗓= qrAIj0y;Y~lɬeh G^F;;X#ڿ^o^R|!cXUú7@"%IDR %FN@51$ B r%R+NuG㢱6f ptYEߤ!8:upjF#-!U&(Dagy|Ӯ N+RMJK^`OQR3R(դlL-$s$"ۡxTk)+\e!]">"fͬ^ƈ('S8K@,m$K*+ 4Cߓ 3$x70ɍXehѱԀCWO UqB6^ mvdl\4NSzS=֎PņMcd{'B^)Z̗)6Xe>a)j,⋐}=Pz'ɠȁ;g/ Ȉ~eC/]P''n0<|w{ϣɩc&!GSy9c'^W10&<9^"G.Է24k {.&(qXq u3O3$|`Zד^_j s[݂ƄYXGiYxu7АwԦDc=q^ht/7/Jj,rTe+/}?}fـSj&ǂt%5x>EmA8MөňIvCQt+4V6ϵ4?GsH-_.3AߩƔtCnHwƿsF {.;KwNGYzE<Yj%i 3DlOoO|6+Lr޻.p)RP?K&U18>Gn_k=z$r,!tU'O3è8.xEf] 9SW">;ۖD$~WpGB&WlFR L\q^gw.̯"VZe\ f5.'rMpqՄU F0$xUhnPX6'To+', ]+O`&h71nGa@zxX[Y+aݺIEXklؤnswJ!|9k҄O8ȶXJ\5DeEhNS4K:/m7RL>q'ޚ,FS,n ๯<51pTKt9_Pd<*%ol2zb{-\51=_Ç7/cP@e1lTAD"GlO;H>֜`OSsȈ)ȏǵSl-9!-{o[_)ן)}F v>EȺY9`?0TS[78ʫ+1z?:zBF7TX72$gS,}ANE@hǪe XɛXV0gF{ LUф!Q\kW(Y*Ȉ4ٖV?-h9WXNi=hGFmTS*~F#pp\2sAc;'Ea3Kf AHƒz8Hۄk~TvxF? fZ^.[_3JB탟B`8ErkM;^ƎXzFRaUݣjCl7O3wf!KNy&H8sGRSlwpb.%~K=C9]N7uHh;f?0GLh=Br#F1u-p/ɏ4/kaN-QQCP}is5l}xσ5c(?qn+㦬;!5JЦDg ʾÊҲys5*Ɵ|>ۭYLgn}JMy PuTeaIn/֫N$P<-'t>/=KX}HH-zP M7 ++ox珮=ZSb:{v1X䭃5#0Pt܇mSR=ɛdB;x+4Oq+znz"cM\F C }Jϰ tݳCP*'(X]k֘}OLk& .J?)Xi$ %s2-c$~ZxfO LE|hLSy!ܤlH]kxu5ˣ2sזi$hKmST Q}ݿLr=#Y3J٧ A#<xg_&b3t| #yWvV63IA=n1&Gyx'p(4m7k qKۮn׬Fm*DضƒNGxNu 9W*[e@#:1(cV*HkܦMn7l XUQJ?>0i[ :,*;FZn(Gw;p۶'7ŎG++!><@˔s:8=CӔ=|y˽PڒR<v1G9hmiqi-H*Ir`Wv٩[!^lqJ5* !?w/6|H=` V.>)B6T<|5T =ho7ɗ*T3R8vֶ'@KFqpI)Gr+U:ZxhI_a D+;S*Oh*MeQfQ<-!v*=U}]d8՗ACo%xSYA`лjqW<![35,E\2P6Z"md_MZ5$pw 貳Ze QNzz+ꐳsRNj >b%5U#1rA%+DN shH^%-d`! ]:O+f$a6CoA`rla\D{f^󬬋oWߓoip@Xя`DA9AތST& /'΄Q \(3;M BSawùѪڴeZe=M*7`p#9p8rJfC JGTτhnbʽM XMD=EJcG9p;b'"N|Cy$/p{ <#bڛMYa rECE(J~_'EkKY@>'W&[iRFej+J&,)x~ѿH۹ۭ/c.S4ÝKjv0g Ξ]]R5w>O?Д@` $ ?3;jޛ^4$bQQ0G3S^_>n&/sm^LkX0v劣$cf(LbNC&E6X=@nع34 x8"N tHҰ?D?!ĺfX3I v~TtH#v ڿ)!gty߲ iuEkF#Oرx%dd[kzmt6>=%e }!wzLjk.3|C ,c BE&OXy]{ʿjţRB/[%лڽ?#wxǡH[Nj2R`w,ϼw=Y"(:!þ|b9*>b44\Y]9Gz $8/JcQT[.1Qh(Ż7:Nx*]zge[Q)<ϵd,;)#D7ꁢ vt,bzlMRUQ?ձU#q?5xG OQhr{t| ~*+'v Ͼs?PaJ^q Zb ]-8hメSCp'3Jtnĸ*f\, )XdO+Ro| U U&CRDV:+K&X!-zւvWhmlc7=(͍y}HEo\9)kAsYkVlyvfl\%'8Ӟn7hO %&g˅LoL=34RBWl(bZ裚RL"ֿ2*ĬSچWFMe5!{S2vC^uW$ZҦnwa=[WN<<[X|0d7O+Z6rC}noT/ך.\z13B}{:gř 0d)zq+ 4VoRTnGsS)|[x17DϪvc|4 -"E8}UZ\8H FB|SlfZŔ/ q{; u'qC`o-$9&hkcޥ.Nɬ! -ܹaowQb>brpRq,w"*>$1 8 |jMlk_=6D©7is{c笨AT4Kq]a%Ɵ?pnDZ<}/vGwˋt#Rs =dM)b8/{8i3"\@xD$=ةSP8zE\g*Y9A RAO BٽZjatH1C̸RJďFVD@-|^S:B3y&mC&Ώ |yU>WΥ /q򐹱,/~.~8r*o!$cˤ859S* 3e+b;(|cBwL2BVRm &6 ^?Ѩ&A!w/g<0LU\Ō,|/}h}GR״5٭ 4w"Qm}d,ɳ,u(~{G;|r8}qPXpJ>evaT\{ q4צpOQQK.b >nr | Lz($3άo6J()]OkbRjtsZYJ ľXpxIu5s (Gf T6HD q/5xU"z(VWDVqǠA\aʟWC"Om"{0-_*e~*e6[hr2"07Iefxjܲ '{J `)o(IԹhh6R@}/ ;?AL80y ]YDqq?Vp)E~v#0V/]=˼rJ#E:\;UY@! v72J;栉+uQ>C, S77,>b J,'s E qY\Ќ̢6d{5|r_4x.K}q}yB(%w mWHzHv׫r? smijd֒$.l:"[-p^|f[qŠrs>&ԶKZhaLPpа*uhmRvC;%PVi5OI|fz&/yl]ZRk\CK~^AݪX1G ش5No `q*[$Ut`i*' 9L2UDj[ }4wҮj*b׭!1$zܰU+{KALtxj^sC#I:TI ڍސsӳ̧WV)YHHqHz oONFДc4E6 0`Cnb k  8X oT9eǪ1KhՇ\1AݮZB<-?`Tub J>^m/A3,BT^c~gXwpVV.#CqeO1QgL'"sHrm]9 ɱ&=9Efgh 2~sFx WSA gχW<""c'M1Z$j4Z<6#IG-j܉d㻨p+ Kf6WwdxZ~׀:X΁-:D..bQŇ1 ڒ!uyʽ3x#vukǟ} &;[ )lf|N: _Q\-֠T_M_~EA37WtS{Q_tƷCh %X[i!5FR5z[ypIhLoW<7~K1wBc+(:w ^@O3zoJZCCYnיp,aɭ}§[ IY1~oOS Eϲz>XTNۤsr9vEL-w75jX6+)@@٢$VexYKsfSFl*|kk>O#m\U(~.@11SEԢ9[=jJҸHꙮ{,dM876Ŵx(n䌦j愮JYH'8UnL0B[ c]]WgxIZ"71=W,2߂TN֜Sٲy鋆LN1IbxbYC$߆{?]\Kl>5gۧ$)& ;fmc](U{Kʤ#` QWn5`'~YeHpgL?%*tk 04Or"˘( u%sjDhmC/e)SDs_]6iBkҧ hE";;]]QmíCtK(Sq&pʷW;ȃ [FXz,R:$zCu 0P0׽Ok>!1\ `<l*&}0s΍~(Wă`c4ljs(EXN#3jpBBg֢,<%WW`i 0tE4y vpFVY3$Tv{>On:vV7DxL'T✊9|$Eo΄~[eim] ,#N-Xgs=!^k/-Iۡp. m za5}0ƚzd:&M9+=YrS׌gbd{ƲˤVD9hmmk҂SYA6Qಆ6Q~wEt7V[;pYes dwy\<ԿFӔ(qp˾lI葔Ԏ0ZB /Qb@wΣ$v8u!`槆GyeOI[x;է]q曹vDޝd-aʻ6 ęM~p)xK!xa5GK)X 9aa:e;@~$46z 7c', @eՌWup@ͳ%HjS!jtu1!rdZECV] lS_ STCZ *ʖel0Ku(t͞ve4U!)ij2Cj5ŭe%dz( 6E4]n|G;Ol_1\J=\峾9^ҏu 9KXg j=m=OUY0 O{^s+.^Z >};E2g( F0#EMXCV3R-;e;>'t-#/RTdnau-L` hڣZ,F'm%>F^]&7T2S5Ys/ə xY<ܽpȢ.n܌j,ǐ~} 7WO,dQe~"_gDU.Q|X|jdlOA].NIq,'w+˵U;J~\嗉_xy Tjφ\7G6Fwo M6Qdp' [V>ʏLWi  0b9NޚL#h aH}8C%d'V~;N1N{ q'Dn%c|UkEEJˎij&>9n5MxR/-|}↉7 LUi[V=Jm$6?!^RC}{ JTaiw?cز(8+!)gЀ@#{hkb}#c$Q/-__ETG̒EӚr} $tYhjrhծ173fs̥ЗqS:_JD5}IxfA푯H'9sw>7ld\ m+2($ pp$s75 OJ%mFAIH.ϷmO Gob:MK}'i 1 7RJ=_<`|Y~Qbe`jE|- %s䞟u B?" My o@l@zssFM 3o6q #lI^]gJ{񣸙 Y/dW6Q?RW0F-ԧv^w=5, `B0~bAT5 \Iu~7>b'9#:$SR̯KBH%t*@&q) !uvM^D}eҲy?G1g?`QC|Q#$Cn[dHs Ҡ27qgʣ'1OltYGe⟌( GULHI)LzOZ$JJ0l IEUH("ii\oSC%QU؃FŬ^tpRs+Æ,475$ :r"3'Z `F*(GDpMt䢛㔲4vo~B0HӲ#+tt@*l[5(2 '˕TJ {,dl  mlP vwRvkmv& 7 +- BfP](X8'p 1 lR]3soZvvP,m畯ۈ*sπ-Dt@!㛍T lezSs[iOD<;3Y M!:?n f,,P/zȇU9!ͰfQ'@J$N *2&;0-zUYD_[K.84Cx@/ 8G\o|I?i]9'vQoZ /fіp.Dv X|Cgo;We.Cymj#thcVB Aw[2tjl9>g|0uM Ľyr6{ H~ ^^,R)._ѣ>%f8d,6p}aH**Lw꼳9QXdpElqN@[@iƊkofndPN_Qv|l}aTa1uztSvEM Yx#_C@a}cyIgqn,DZ&DhЇq 얬pgU #/uwM^< K2U\x}Gq9̵wZψE(ZDДH@ _=jdb ,5H\g#4^`PS "# u;+`P@ 6bI_8lN`tۥ UƁ}C <377e*&:uZDAᣚ=0gU%GirtW.aʨ%:%_xui̞õ.lùk@_*9-^'?@7È@;t>4̜HiG#H5''F=j  B\TS?zz<(Uί64^"mo2bq?նYEh5j&,hurR"j(UOcD-C?xp^`^[?LB%< 91W# Gld:K9 2bw豜^Lj2P~ܻLBN=UwRg;&"AQϼ^ =QsK|V@L^Zp) :ݧMΒ o/fmq2&Iщu)F<,5R_©4Fο*Fl2 zI5f"#+#ri=cB e ~޸ w^rEJ#Tg.!jeu3ݏaT }+8c2I҅zKHf0-ZRL!ƅH--dAZz0]~y AiȬCGG:qMGa9|"/-,@˖TYڑN)FFq)Jr.RCD"NyyOp gZ}z}FuzL[a\[ sWB+e=l:_{VlQ"1RM*SP9n+,GFA"DD%xT"|G[8HJ3U ϵ,StV% 9Y$GM9%3Aϩ :v,$ u$cLER7 db!@a)} 'ͮ|Ӱ])VN=L`_<'o";P"Ş_LͯOo`j/NA- H%ur g~ qG=8Hwvz8:꼮}4N_IV>AGXW3^1,Xl6U!MlC+E=+N!\52$A5mCʩࡍ HW]?Q|kC WJE*FpvۦV~5z%YGCU)_]BZ#g@q34ˎĄl)tb7]Ô`S?;w1v)~85\`&|h"GiQu:CG-lmSgG.OR(!O)| #AKsǩgd# \^d፯zA+7V,|MIEIg?kz\ʲ^v$ϙsvH~MA& wmw:?I>o䐂]Qa5xl/!GoE*W囏wRhFN5ěEb~TM s]ۭ`Ðwd٬7W0MPX; $Jx2E04oWRԀPP'\kPx@e*Wgshn}j -k57 -vԢԎ ϕ6}B~)`,u~sa7&A7KYs s\Xn י6j S{Ht6L3Cٳ`MCZF)=A44u,ʀ(alzaӪEiDGh-GGtwrBnMhbC ɢȩ)7VD5 ad&{-nJ)WqӪ)V~7.NSAIu-eԻ?^14pL}A-0BOE|OH偦S:' )@7b`7'IMV(t_&5tVt tkYy 1̟pS/k88WۓCZF͖?\PEDcxE%h̨s}#)vBſ,4hVDsNm84?6/'c,}BG ׽"sD )u0'EJ$tZWx>kV)!"Iy>]ضv!]*]-$hWSⓆ`?Wt;)`{|F"޻Oan Ԛ j1KVe?NkS]NH֒Y06M_o>eO"x]~YE4L]bl9@"3jEJmP;|F3R }0_g/[ތTpk<QKayn_biubjʄ~q eY{ÿ7QO뿋zqv1.R 3xюGAێ"/E]?.`oǫ8ZI/h YnxC> Ĥ><,s(1|GH.Stln/kk-Y:+T7h?j3LSdY=_`0@!gya8\_4Qk{vߣsJ=قrC~%^Vm9r% ٖaj~zo`%Pq4.Fܪ16Ϸ( o1`{b*[Ļ)-ن7 53XF&0J[ P"fICI |@:qFz0&ih#_iwN#Yz ߇o :Xz24M7R`cqIF}꿲;;Lx.zN0_R̦oc*v?.w@q]^32V= kipoK70+|_KgK=UYgkyqWYk5N&-~ Aˌ֧b{8k0w `8 zM܍Ncg^ZH{4#Ņ•%ʠNT38y$7~ do)~'z6AeG=UR}/=W-XD<s/3Vwto 2ccdNq6WJH4f<$5n}}@r{v!&HiTP2RR̟̿9w^#G~O*5XMm¢AS]պh36!+h@Wx'.A{Ǻ&m7Pva{9c8וy5IY` b<5eXjb13+k,uNHbM{nȸ|6^HLuuT躯sQnRܾ6ol‚w{!p1w R A)]Jt=]M%z!гۈǫʴAf,Aܴ hכݪPWy庵XO ]b=ޜCp~d΁<>R{{%(Dxn=Հn-b۝3pQ;tx~9ߤџYywk }w_$ kn-xei rɰ( v+""a/&PMSڀlM"(,MX)r8@z9ݙQ~Vӯq; T%l!reSP¬k u)+WՃ.&!pG>aL6rZE^U>Cm{ޛdA^-@}w ?M$+6e_҉O嘣\mBL{!:Zj}4{;T&}"ΖpfeF/2l>lDl&ǯ\ TN&M+Ј=Z كv<4#U]Z?@mN@jKթ&m\GXdI_uVFx<&'q 0HO"/&as/XOj!qv2`ĵpHC.5aT {wk6R(Xq7֏fcH)A+jНAI70z5zx?~q) T&WEfO9ר8!-[t*ג0#rE.\%#}kx#]օaz3ΰL% z>;q̮~%~P,1 _O^#&ںz"u;Sm`E9Y93;;E;jMukI f&jJ`SLn.X1!,S7rxS7؂F3gz,Bv]/ 3iK̽rzhȫx*%yH039MD^HB`qp|A~5_lQh2}T#UM~),:fŦXs3\$슞':/=u(M| fx-sn[HIh%'<4ܘu^5ߥdk^hbKsVܗWqkB.,|i@Qјf$6CW3 mEIZ&I=맇P'7mzaT%AyLͦ((T0*b*BWW k $/dH.=Hn>a`P7A*C@4BwxQ+zEF("`~,aa_zpInH6?gj,3wXB$Œ͸)PMpڟ*k,Uw>Ez4[2 b:}o$o3nLԜZkҐ@pA B%kFp45O_\Z,57hMpbt)&KR]ݤ#o C' اE7ՍԒ7Dt?E-64ubA2 <0R&fiZZt dVׄa_x4,*%uDO pSQW4V{\}]ٖqY{rV:2hCK/R3\Ύ#*d7}j(wBJ[8#=\:;y_8+)DrŲș S/9_Enj&BEDlb6gxdYk w/+ь&0& 8p܊UBX{Edobκ;)uWU킭P}9Vmd@ny`x"A-Aθ,E&.B8 ':m尽|EG5b`S"("ko% +zmݨWLhUI0jZjG5Lm|m3mCGiQDKNeWVp\^UC#6؟(ß֕K HH0eeh73GL)/b=077Sgy75GY܄`6Ь\ז ,&P[UwsM}1PWiӭFVTHMp8W=2G mPG*zpZt#k'8"3IO@3%8`?svpЬn#i+Pq$V)ڟ'irPpTu@++ڏ2!6ŀiG.Z]޿lBCmsH.0 YZspatstat/R/scriptUtils.R0000644000176200001440000000301713333543255014775 0ustar liggesusers## scriptUtils.R ## $Revision: 1.6 $ $Date: 2017/11/14 06:42:02 $ ## slick way to use precomputed data ## If the named file exists, it is loaded, giving access to the data. ## Otherwise, 'expr' is evaluated, and all objects created ## are saved in the designated file, for loading next time. reload.or.compute <- function(filename, expr, objects=NULL, destination=parent.frame(), force=FALSE) { stopifnot(is.character(filename) && length(filename) == 1) if(force || !file.exists(filename)) { ## evaluate 'expr' in a fresh environment ee <- as.expression(substitute(expr)) en <- new.env() local(eval(ee), envir=en) ## default is to save all objects that were created if(is.null(objects)) objects <- ls(envir=en) ## save them in the designated file evalq(save(list=objects, file=filename, compress=TRUE), envir=en) ## assign them into the parent frame for(i in seq_along(objects)) assign(objects[i], get(objects[i], envir=en), envir=destination) result <- objects } else { result <- load(filename, envir=destination) if(!all(ok <- (objects %in% result))) { nbad <- sum(!ok) warning(paste(ngettext(nbad, "object", "objects"), commasep(sQuote(objects[!ok])), ngettext(nbad, "was", "were"), "not present in data file", dQuote(filename)), call.=FALSE) } } return(invisible(result)) } spatstat/R/vblogistic.R0000644000176200001440000002134313454003737014617 0ustar liggesusers#' Variational Bayesian Logistic regression #' #' author: Tuomas Rajala < tuomas.rajala a iki.fi > #' #' Copyright (C) Tuomas Rajala 2014 #' GNU Public License GPL 2.0 | 3.0 #' #' Special version for 'spatstat' #' #' $Revision: 1.6 $ $Date: 2019/04/12 03:34:48 $ #' #################################################### #' Used inside ppm vblogit.fmla <- function(formula, offset, data, subset, weights, verbose=FALSE, epsilon=0.01, ...) { mf <- match.call(expand.dots = FALSE) m <- match(c("formula", "data", "subset", "offset"), names(mf), 0L) mf <- mf[c(1L, m)] mf$drop.unused.levels <- TRUE mf[[1L]] <- quote(stats::model.frame) mf <- eval(mf, parent.frame()) mt <- attr(mf, "terms") offset <- model.offset(mf) y <- model.response(mf, "any") X <- model.matrix(mt, mf) colnames(X)[1] <- "(Intercept)" Vnames <- colnames(X) #' then we fit: fit <- vblogit(y=y, X=X, offset=offset, verb=verbose, eps=epsilon, ...) #' names(fit$coefficients) <- names(fit$coef) <- Vnames #' add some variables to conform to summary.ppm fit$se <- sqrt(diag(as.matrix(fit$S))) fit$call <- match.call(expand.dots=FALSE) fit$formula <- formula fit$method <- "vblogit" fit$model <- mf fit$terms <- mt fit$offset <- offset fit$data <- data fit$xlevels <- .getXlevels(mt, mf) fit } ################################################### # the fitting function: vblogit <- local({ ## helper functions needed: lambda <- function(x) { -tanh(x/2)/(4*x) } mygamma <- function(x) { x/2 - log(1+exp(x)) + x*tanh(x/2)/4 } vblogit <- function(y, X, offset, eps=1e-2, m0, S0, S0i, xi0, verb=FALSE, maxiter=1000, ...) { ## Logistic regression using JJ96 idea. Ormeron00 notation. ## p(y, w, t) = p(y | w) p(w | t) p(t) ## ## Y ~ Bern(logit(Xw + offset)) ## w ~ N(m0, S0) iid ## ## "*0" are fixed priors. ## cat2 <- if(verb) cat else function(...) NULL varnames <- colnames(data.frame(as.matrix(X[1:2,]))) ## Write N <- length(y) K <- ncol(X) #' #' #' offset if(missing('offset')) offset <- 0 if(length(offset) eps & (iter<-iter+1) <= maxiter le_hist <- c(le_hist, le) cat2("diff:", devi, " \r") } if(iter == maxiter) warning("Maximum iteration limit reached.") cat2("\n") ## done. Compile: est <- list(m=m, S=S, Si=Si, xi=xi, lambda_xi=la) #' Marginal evidence est$logLik <- le #' Compute max logLik with the Bernoulli model; #' this should be what glm gives: est$logLik_ML <- as.numeric( t(y)%*%(X%*%m+offset) - sum( log( 1 + exp(X%*%m+offset)) ) ) #' Max loglik with the approximation est$logLik_ML2 <- as.numeric( t(y)%*%(X%*%m + offset) + t(m)%*%t(X*la)%*%X%*%m - 0.5*sum(X%*%m) + sum(mygamma(xi)) + 2*t(offset*la)%*%X%*%m + t(offset*la)%*%offset - 0.5 * sum(offset) ) #' some additional parts, like in glm output est$coefficients <- est$m[,1] names(est$coefficients) <- varnames est$call <- sys.call() est$converged <- !(maxiter==iter) #' more additional stuff est$logp_hist <- le_hist est$parameters <- list(eps=eps, maxiter=maxiter) est$priors <- list(m=m0, S=S0) est$iterations <- iter class(est) <- "vblogit" ## return est } vblogit }) ################################################### #' Predict method predict.vblogit <- local({ sigmoid <- function(e) 1/(1+exp(-e)) predict.vblogit <- function(object, newdata = NULL, type = c("link", "response", "terms"), se.fit = FALSE, dispersion = NULL, terms = NULL, na.action = na.pass, ...) { type <- match.arg(type) if(type != "response") stop("type not supported.") if(missing(newdata)) { stop("not implemented.") } else{ # newdata #' build the new covariate matrix, inspired by predict.lm tt <- terms(object) Terms <- delete.response(tt) m <- model.frame(Terms, newdata, na.action = na.action, xlev = object$xlevels) X <- model.matrix(Terms, m, contrasts.arg = object$contrasts) offset <- rep(0, nrow(X)) if (!is.null(off.num <- attr(tt, "offset"))) for (i in off.num) offset <- offset + eval(attr(tt, "variables")[[i + 1]], newdata) if (!is.null(object$call$offset)) offset <- offset + eval(object$call$offset, newdata) #' predict using probit approximation to logit-function mu <- object$m S <- object$S mua <- as.numeric(X%*%mu)+offset #' was: s2a <- diag(X%*%S%*%t(X) ) s2a <- quadform(X, S) predictor <- sigmoid( as.numeric( mua/sqrt(1+pi*s2a/8) ) ) names(predictor) <- rownames(X) } predictor } predict.vblogit }) # ################################################### # print method print.vblogit <- function(x, ...) { splat("Variational Bayes logistic regression fit") cat("\nCall: ") print(x$call) cat("\nCoefficients:\n") print(x$coefficients) cat("\n") splat("Log-likelihood:", x$logLik) splat("Converged:", x$converged) splat("Convergence threshold:", x$parameters$eps) splat("Iterations / max:", x$iterations, "/", x$parameters$maxiter) splat("* Caution: the estimates are conditional on convergence.") invisible(NULL) } #################################################### # vblogit family method family.vblogit <- function(object, ...) binomial() #################################################### #' vblogit fit summary method summary.vblogit <- function(object, ...) { splat("Variational Bayes logistic regression fit") cat("\nCall: ") print(object$call) splat("\nCoefficients and posterior 95% central regions:") vna <- names(object$coefficients) s <- sqrt(diag(object$S)) q0 <- qnorm(c(0.025, 0.975)) m <- as.numeric(object$m) df <- data.frame(estimate=m, "low 0.05"=m+s*q0[1], "high 97.5"=m+s*q0[2], "prior mean"=object$priors$m, "prior var"=diag(object$priors$S)) rownames(df) <- vna print(df) cat("\n") splat("Lower bound for log-likelihood:", object$logLik) invisible(NULL) } #################################################### # Coef coef.vblogit <- function(object, ...) object$coefficients #################################################### # Log-evidence logLik.vblogit <- function(object, ...) { object$logLik } spatstat/R/plot3d.R0000644000176200001440000001651213553533332013660 0ustar liggesusers#' perspective plot of 3D #' #' $Revision: 1.7 $ $Date: 2019/10/22 07:53:14 $ #' project3Dhom <- local({ check3dvector <- function(x) { xname <- deparse(substitute(x)) if(!(is.numeric(x) && length(x) == 3)) stop(paste(xname, "should be a numeric vector of length 3"), call.=FALSE) return(NULL) } normalise <- function(x) { len <- sqrt(sum(x^2)) if(len == 0) stop("Attempted to normalise a vector of length 0") return(x/len) } innerprod <- function(a, b) sum(a*b) crossprod <- function(u, v) { c(u[2] * v[3] - u[3] * v[2], -(u[1] * v[3] - u[3] * v[1]), u[1] * v[2] - u[2] * v[1]) } project3Dhom <- function(xyz, eye=c(0,-3,1), org=c(0,0,0), vert=c(0,0,1)) { ## xyz: data to be projected (matrix n * 3) stopifnot(is.matrix(xyz) && ncol(xyz) == 3) ## eye: eye position (x,y,z) check3dvector(eye) ## org: origin (x,y,z) becomes middle of projection plane check3dvector(org) ## vert: unit vector in direction to become the 'vertical' if(!missing(vert)) { check3dvector(vert) vert <- normalise(vert) } ## vector pointing into screen vin <- normalise(org - eye) ## projection of vertical onto screen vup <- normalise(vert - innerprod(vert, vin) * vin) ## horizontal axis in screen vhoriz <- crossprod(vin, vup) ## # dbg <- FALSE # if(dbg) { # cat("vin=") # print(vin) # cat("vup=") # print(vup) # cat("vhoriz=") # print(vhoriz) # } ## homogeneous coordinates hom <- t(t(xyz) - eye) %*% cbind(vhoriz, vup, vin) colnames(hom) <- c("x", "y", "d") return(hom) } project3Dhom }) plot3Dpoints <- local({ plot3Dpoints <- function(xyz, eye=c(2,-3,2), org=c(0,0,0), ..., type=c("p", "n", "h"), xlim=c(0,1), ylim=c(0,1), zlim=c(0,1), add=FALSE, box=TRUE, main, cex=par('cex'), box.back=list(col="pink"), box.front=list(col="blue", lwd=2) ) { if(missing(main)) main <- short.deparse(substitute(xyz)) type <- match.arg(type) #' if(is.null(box.back) || (is.logical(box.back) && box.back)) box.back <- list(col="pink") if(is.null(box.front) || (is.logical(box.front) && box.front)) box.front <- list(col="blue", lwd=2) stopifnot(is.list(box.back) || is.logical(box.back)) stopifnot(is.list(box.front) || is.logical(box.front)) #' stopifnot(is.matrix(xyz) && ncol(xyz) == 3) if(nrow(xyz) > 0) { if(missing(xlim)) xlim <- range(pretty(xyz[,1])) if(missing(ylim)) ylim <- range(pretty(xyz[,2])) if(missing(zlim)) zlim <- range(pretty(xyz[,3])) if(missing(org)) org <- c(mean(xlim), mean(ylim), mean(zlim)) } if(!add) { #' initialise plot bb <- plot3Dbox(xlim, ylim, zlim, eye=eye, org=org, do.plot=FALSE) plot(bb$xlim, bb$ylim, axes=FALSE, asp=1, type="n", xlab="", ylab="", main=main) } if(is.list(box.back)) { #' plot rear of box do.call(plot3DboxPart, resolve.defaults(list(xlim=xlim, ylim=ylim, zlim=zlim, eye=eye, org=org, part="back"), box.back, list(...))) } if(type != "n") { #' plot points uv <- project3Dhom(xyz, eye=eye, org=org) uv <- as.data.frame(uv) dord <- order(uv$d, decreasing=TRUE) uv <- uv[dord, , drop=FALSE] #' capture graphics arguments which might be vectors grarg <- list(..., cex=cex) grarg <- grarg[names(grarg) %in% parsAll] if(any(lengths(grarg) > 1L)) { grarg <- as.data.frame(grarg, stringsAsFactors=FALSE) grarg <- grarg[dord, , drop=FALSE] grarg <- as.list(grarg) } #' draw segments if(type == "h") { xy0 <- cbind(xyz[,1:2,drop=FALSE], zlim[1]) uv0 <- as.data.frame(project3Dhom(xy0, eye=eye, org=org)) uv0 <- uv0[dord, , drop=FALSE] segargs <- grarg[names(grarg) %in% parsSegments] do.call(segments, append(list(x0=with(uv0, x/d), y0=with(uv0, y/d), x1=with(uv, x/d), y1=with(uv, y/d)), segargs)) } #' draw points ptargs <- grarg[names(grarg) %in% parsPoints] ptargs$cex <- ptargs$cex * with(uv, min(d)/d) do.call(points, c(list(x=with(uv, x/d), y=with(uv, y/d)), ptargs)) } if(is.list(box.front)) do.call(plot3DboxPart, resolve.defaults(list(xlim=xlim, ylim=ylim, zlim=zlim, eye=eye, org=org, part="front"), box.front, list(...))) return(invisible(NULL)) } vertexind <- data.frame(i=rep(1:2,4), j=rep(rep(1:2,each=2),2), k=rep(1:2, each=4)) edgepairs <- data.frame(from=c(1, 1, 2, 3, 1, 2, 5, 3, 5, 4, 6, 7), to = c(2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8)) vertexfrom <- vertexind[edgepairs$from,] vertexto <- vertexind[edgepairs$to,] parsPoints <- c("cex", "col", "fg", "bg", "pch", "lwd") parsSegments <- c("col", "lwd", "lty") parsAll <- union(parsPoints, parsSegments) hamming <- function(a, b) sum(abs(a-b)) ## determine projected positions of box vertices ## and optionally plot the box plot3Dbox <- function(xlim=c(0,1), ylim=xlim, zlim=ylim, eye=c(0,-3,1), org=c(0,0,0), do.plot=TRUE) { fromxyz <- with(vertexfrom, cbind(xlim[i], ylim[j], zlim[k])) toxyz <- with(vertexto, cbind(xlim[i], ylim[j], zlim[k])) fromuv <- project3Dhom(fromxyz, eye=eye, org=org) touv <- project3Dhom(toxyz, eye=eye, org=org) xfrom <- fromuv[,1]/fromuv[,3] xto <- touv[,1]/touv[,3] yfrom <- fromuv[,2]/fromuv[,3] yto <- touv[,2]/touv[,3] if(do.plot) segments(xfrom, yfrom, xto, yto) return(invisible(list(xlim=range(xfrom, xto), ylim=range(yfrom, yto)))) } ## plot either back or front of box plot3DboxPart <- function(xlim=c(0,1), ylim=xlim, zlim=ylim, eye=c(0,-3,1), org=c(0,0,0), part=c("front", "back"), ...) { part <- match.arg(part) boxvert <- with(vertexind, cbind(xlim[i], ylim[j], zlim[k])) pvert <- project3Dhom(boxvert, eye=eye, org=org) xyvert <- pvert[,c("x","y")]/pvert[,"d"] ## find vertex which is furthest away nback <- which.max(pvert[,"d"]) nearback <- with(edgepairs, (from==nback) | (to==nback)) ind <- if(part == "back") nearback else !nearback ## draw lines with(edgepairs[ind,], do.call.matched(segments, list(x0=xyvert[from, 1], y0=xyvert[from, 2], x1=xyvert[to, 1], y1=xyvert[to, 2], ...))) } plot3Dpoints }) spatstat/R/summary.kppm.R0000644000176200001440000001066413552260741015120 0ustar liggesusers#' #' summary.kppm.R #' #' $Revision: 1.6 $ $Date: 2018/11/10 09:43:05 $ #' summary.kppm <- function(object, ..., quick=FALSE) { nama <- names(object) result <- unclass(object)[!(nama %in% c("X", "po", "call", "callframe"))] ## handle old format if(is.null(result$isPCP)) result$isPCP <- TRUE ## summarise trend component result$trend <- summary(as.ppm(object), ..., quick=quick) if(identical(quick, FALSE)) { theta <- coef(object) if(length(theta) > 0) { vc <- vcov(object, matrix.action="warn") if(!is.null(vc)) { se <- if(is.matrix(vc)) sqrt(diag(vc)) else if(length(vc) == 1) sqrt(vc) else NULL } if(!is.null(se)) { two <- qnorm(0.975) lo <- theta - two * se hi <- theta + two * se zval <- theta/se pval <- 2 * pnorm(abs(zval), lower.tail=FALSE) psig <- cut(pval, c(0,0.001, 0.01, 0.05, 1), labels=c("***", "**", "*", " "), include.lowest=TRUE) ## table of coefficient estimates with SE and 95% CI result$coefs.SE.CI <- data.frame(Estimate=theta, S.E.=se, CI95.lo=lo, CI95.hi=hi, Ztest=psig, Zval=zval) } } } class(result) <- "summary.kppm" return(result) } coef.summary.kppm <- function(object, ...) { return(object$coefs.SE.CI) } print.summary.kppm <- function(x, ...) { terselevel <- spatstat.options('terse') digits <- getOption('digits') isPCP <- x$isPCP splat(if(x$stationary) "Stationary" else "Inhomogeneous", if(isPCP) "cluster" else "Cox", "point process model") if(waxlyrical('extras', terselevel) && nchar(x$Xname) < 20) splat("Fitted to point pattern dataset", sQuote(x$Xname)) Fit <- x$Fit if(waxlyrical('gory', terselevel)) { switch(Fit$method, mincon = { splat("Fitted by minimum contrast") splat("\tSummary statistic:", Fit$StatName) print(Fit$mcfit) }, clik =, clik2 = { splat("Fitted by maximum second order composite likelihood") splat("\trmax =", Fit$rmax) if(!is.null(wtf <- Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } printStatus(optimStatus(Fit$clfit)) }, palm = { splat("Fitted by maximum Palm likelihood") splat("\trmax =", Fit$rmax) if(!is.null(wtf <- Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } printStatus(optimStatus(Fit$clfit)) }, warning(paste("Unrecognised fitting method", sQuote(Fit$method))) ) } # ............... trend ......................... parbreak() splat("----------- TREND MODEL -----") print(x$trend, ...) # ..................... clusters ................ tableentry <- spatstatClusterModelInfo(x$clusters) parbreak() splat("-----------", if(isPCP) "CLUSTER" else "COX", "MODEL", "-----------") splat("Model:", tableentry$printmodelname(x)) parbreak() cm <- x$covmodel if(!isPCP) { # Covariance model - LGCP only splat("\tCovariance model:", cm$model) margs <- cm$margs if(!is.null(margs)) { nama <- names(margs) tags <- ifelse(nzchar(nama), paste(nama, "="), "") tagvalue <- paste(tags, margs) splat("\tCovariance parameters:", paste(tagvalue, collapse=", ")) } } pa <- x$clustpar if (!is.null(pa)) { splat("Fitted", if(isPCP) "cluster" else "covariance", "parameters:") print(pa, digits=digits) } if(!is.null(mu <- x$mu)) { if(isPCP) { splat("Mean cluster size: ", if(!is.im(mu)) paste(signif(mu, digits), "points") else "[pixel image]") } else { splat("Fitted mean of log of random intensity:", if(!is.im(mu)) signif(mu, digits) else "[pixel image]") } } # table of coefficient estimates with SE and 95% CI if(!is.null(cose <- x$coefs.SE.CI)) { parbreak() splat("Final standard error and CI") splat("(allowing for correlation of", if(isPCP) "cluster" else "Cox", "process):") print(cose) } invisible(NULL) } spatstat/R/pairsat.family.R0000644000176200001440000002071313333543255015375 0ustar liggesusers# # # pairsat.family.S # # $Revision: 1.44 $ $Date: 2016/02/11 09:36:11 $ # # The saturated pairwise interaction family of point process models # # (an extension of Geyer's saturation process to all pairwise interactions) # # pairsat.family: object of class 'isf' # defining saturated pairwise interaction # # # ------------------------------------------------------------------- # pairsat.family <- list( name = "saturated pairwise", print = function(self) { cat("Saturated pairwise interaction family\n") }, eval = function(X,U,EqualPairs,pairpot,potpars,correction, ..., Reach=NULL, precomputed=NULL, savecomputed=FALSE, halfway=FALSE) { # # This is the eval function for the `pairsat' family. # # This internal function is not meant to be called by the user. # It is called by mpl.prepare() during execution of ppm(). # # The eval functions perform all the manipulations that are common to # a given class of interactions. # # For the `pairsat' family of pairwise-interaction processes, # this eval function computes the distances between points, # invokes 'pairpot' to evaluate the potential between each pair of points, # applies edge corrections, and then sums the pair potential terms # applying the saturation threshold. # # ARGUMENTS: # All 'eval' functions have the following arguments # which are called in sequence (without formal names) # by mpl.prepare(): # # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function # potpars auxiliary parameters for pot list(......) # correction edge correction type (string) # # VALUE: # All `eval' functions must return a # matrix of values of the total potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # ######################################################################## # # POTENTIAL: # The pair potential function 'pairpot' will be called as # pairpot(M, potpars) where M is a matrix of interpoint distances. # It must return a matrix with the same dimensions as M # or an array with its first two dimensions the same as the dimensions of M. # # NOTE: # Note the Geyer saturation threshold must be given in 'potpars$sat' ########################################################################## # coercion should be unnecessary, but this is useful for debugging X <- as.ppp(X) U <- as.ppp(U, X$window) # i.e. X$window is DEFAULT window # saturation parameter(s) saturate <- potpars$sat # interaction distance of corresponding pairwise interaction PairReach <- if(!is.null(Reach) && is.finite(Reach)) Reach/2 else NULL if(is.null(saturate)) { # pairwise interaction V <- pairwise.family$eval(X, U, EqualPairs, pairpot, potpars, correction, ..., Reach=PairReach, precomputed=precomputed, savecomputed=savecomputed) return(V) } # first ensure all data points are included in the quadrature points nX <- npoints(X) nU <- npoints(U) Xseq <- seq_len(nX) if(length(EqualPairs) == 0) { # no data points currently included missingdata <- rep.int(TRUE, nX) } else { Xused <- EqualPairs[,1] missingdata <- !(Xseq %in% Xused) } somemissing <- any(missingdata) if(somemissing) { # add the missing data points originalrows <- seq_len(nU) nmiss <- sum(missingdata) U <- superimpose(U, X[missingdata], W=X$window, check=FALSE) # correspondingly augment the list of equal pairs newXindex <- Xseq[missingdata] newUindex <- nU + seq_len(nmiss) EqualPairs <- rbind(EqualPairs, cbind(newXindex, newUindex)) nU <- nU + nmiss } # compute the pair potentials POT and the unsaturated potential sums V V <- pairwise.family$eval(X, U, EqualPairs, pairpot, potpars, correction, ..., Reach=PairReach) POT <- attr(V, "POT") computed <- attr(V, "computed") # could be NULL # # V is a matrix with rows = quadrature points, # columns = coordinates of potential # POT is an array with rows = data points # columns = quadrature points # planes = coordinates of potential ################################################################# ################## saturation part ############################## ################################################################# # check dimensions and ensure 'saturate' is a vector ns <- length(saturate) np <- ncol(V) if(ns == 1 && np > 1) saturate <- rep.int(saturate, np) else if(ns != np) stop("Length of vector of saturation parameters is incompatible with the pair potential", call.=FALSE) # replicate as a matrix and as an array saturate2 <- array(saturate[slice.index(V, 2)], dim=dim(V)) saturate3 <- array(saturate[slice.index(POT, 3)], dim=dim(POT)) # # (a) compute SATURATED potential sums V.sat <- pmin(V, saturate2) if(halfway) return(V.sat) # # (b) compute effect of addition/deletion of dummy/data point j # on the UNSATURATED potential sum of each data point i # # Identify data points is.data <- seq_len(npoints(U)) %in% EqualPairs[,2] # logical vector corresp. to rows of V # Extract potential sums for data points only V.data <- V[is.data, , drop=FALSE] # replicate them so that V.dat.rep[i,j,k] = V.data[i, k] V.dat.rep <- aperm(array(V.data, dim=c(dim(V.data), U$n)), c(1,3,2)) # make a logical array col.is.data[i,j,k] = is.data[j] col.is.data <- array(is.data[slice.index(POT, 2)], dim=dim(POT)) # compute value of unsaturated potential sum for each data point i # obtained after addition/deletion of each dummy/data point j if(!(correction %in% c("isotropic", "Ripley"))) { dV <- ifelseNegPos(col.is.data, POT) ## equivalent to ifelse(col.is.data, -POT, POT) } else { ## Weighted potential is not exactly symmetric dV <- POT dV[col.is.data] <- - aperm(POT[ , is.data, , drop=FALSE], c(2,1,3)) } V.after <- V.dat.rep + dV # # # (c) difference of SATURATED potential sums for each data point i # before & after increment/decrement of each dummy/data point j # # saturated values after increment/decrement V.after.sat <- array(pmin.int(saturate3, V.after), dim=dim(V.after)) # saturated values before V.dat.rep.sat <- array(pmin.int(saturate3, V.dat.rep), dim=dim(V.dat.rep)) # difference V.delta <- V.after.sat - V.dat.rep.sat V.delta <- ifelseNegPos(col.is.data, V.delta) # # (d) Sum (c) over all data points i V.delta.sum <- apply(V.delta, c(2,3), sum) # # (e) Result V <- V.sat + V.delta.sum ########################################## # remove rows corresponding to supplementary points if(somemissing) V <- V[originalrows, , drop=FALSE] ### tack on the saved computations from pairwise.family$eval if(savecomputed) attr(V, "computed") <- computed return(V) }, ######### end of function $eval suffstat = function(model, X=NULL, callstring="pairsat.family$suffstat") { # for saturated pairwise models only (possibly nonstationary) verifyclass(model, "ppm") if(!identical(model$interaction$family$name,"saturated pairwise")) stop("Model is not a saturated pairwise interaction process") if(is.null(X)) { X <- data.ppm(model) modelX <- model } else { verifyclass(X, "ppp") modelX <- update(model, X, method="mpl") } # find data points which do not contribute to pseudolikelihood mplsubset <- getglmdata(modelX)$.mpl.SUBSET mpldata <- is.data(quad.ppm(modelX)) contribute <- mplsubset[mpldata] Empty <- X[integer(0)] mom <- partialModelMatrix(X, Empty, model, "suffstat", halfway=TRUE) # halfway=TRUE is passed to pairsat.family$eval # and yields matrix of saturated potential sums # take only those terms that contribute to the pseudolikelihood mom <- mom[contribute, , drop=FALSE] result <- apply(mom, 2, sum) return(result) } ######### end of function $suffstat ) ######### end of list class(pairsat.family) <- "isf" spatstat/R/is.subset.owin.R0000644000176200001440000000435513436145403015345 0ustar liggesusers# # is.subset.owin.R # # $Revision: 1.16 $ $Date: 2019/03/01 06:02:27 $ # # Determine whether a window is a subset of another window # # is.subset.owin() # is.subset.owin <- local({ is.subset.owin <- function(A, B) { A <- as.owin(A) B <- as.owin(B) if(identical(A, B)) return(TRUE) A <- rescue.rectangle(A) B <- rescue.rectangle(B) if(is.rectangle(B)) { # Some cases can be resolved using convexity of B # (1) A is also a rectangle if(is.rectangle(A)) { xx <- A$xrange[c(1L,2L,2L,1L)] yy <- A$yrange[c(1L,1L,2L,2L)] ok <- inside.owin(xx, yy, B) return(all(ok)) } # (2) A is polygonal # Then A is a subset of B iff, # for every constituent polygon of A with positive sign, # the vertices are all in B if(is.polygonal(A)) { ok <- unlist(lapply(A$bdry, okpolygon, B=B)) return(all(ok)) } # (3) Feeling lucky # Test whether the bounding box of A is a subset of B # Then a fortiori, A is a subset of B AA <- boundingbox(A) if(is.subset.owin(AA, B)) return(TRUE) } if(!is.mask(A) && !is.mask(B)) { ## rectangles or polygonal domains if(!all(inside.owin(vertices(A), , B))) return(FALSE) ## all vertices of A are inside B. if(is.convex(B)) return(TRUE) ## check for boundary crossings bx <- crossing.psp(edges(A), edges(B)) if(npoints(bx) > 0) return(FALSE) ## Absence of boundary crossings is sufficient if B has no holes if(length(B$bdry) == 1 || !any(sapply(B$bdry, is.hole.xypolygon))) return(TRUE) ## Compare area of intersection with area of putative subset ## (these are subject to numerical rounding error) areaA <- area(A) if(overlap.owin(A,B) >= areaA || overlap.owin(B,A) >= areaA) return(TRUE) ## continue... } # Discretise a <- as.mask(A) b <- as.mask(B) rxy <- rasterxy.mask(a, drop=TRUE) xx <- rxy$x yy <- rxy$y ok <- inside.owin(xx, yy, b) return(all(ok)) } okpolygon <- function(a, B) { if(Area.xypolygon(a) < 0) return(TRUE) ok <- inside.owin(a$x, a$y, B) return(all(ok)) } is.subset.owin }) spatstat/R/colourschemes.R0000644000176200001440000000350413575343014015323 0ustar liggesusers# # colourschemes.R # # $Revision: 1.5 $ $Date: 2019/12/15 05:25:53 $ # beachcolourmap <- function(range, ...) { col <- beachcolours(range, ...) z <- colourmap(col, range=range) return(z) } beachcolours <- function(range, sealevel = 0, monochrome=FALSE, ncolours=if(monochrome) 16 else 64, nbeach=1) { check.range(range) stopifnot(all(is.finite(range))) check.1.real(sealevel) range <- range(c(sealevel,range)) check.1.integer(ncolours) stopifnot(ncolours >= 3) check.1.integer(nbeach) stopifnot(nbeach >= 0) stopifnot(nbeach <= ncolours + 2) if(monochrome) return(grey(seq(from=0,to=1,length.out=ncolours))) depths <- range[1L] peaks <- range[2L] dv <- diff(range)/(ncolours - 1L) epsilon <- nbeach * dv/2 lowtide <- max(sealevel - epsilon, depths) hightide <- min(sealevel + epsilon, peaks) nsea <- max(0L, floor((lowtide - depths)/dv)) nland <- max(0L, floor((peaks - hightide)/dv)) discrep <- nsea + nland + nbeach - ncolours if(discrep != 0) { dd <- abs(discrep) ss <- as.integer(-sign(discrep)) smallhalf <- dd/2L largehalf <- dd - smallhalf if(nsea < nland) { nsea <- nsea + ss * smallhalf nland <- nland + ss * largehalf } else { nland <- nland + ss * smallhalf nsea <- nsea + ss * largehalf } if(nsea + nland + nbeach != ncolours) warning("Internal error: incorrect adjustment of length in beachcolours") } colours <- character(0) if(nsea > 0) colours <- rev(rainbow(nsea, start=3/6,end=4/6)) # cyan/blue if(nbeach > 0) colours <- c(colours, rev(rainbow(nbeach, start=3/12,end=5/12))) # green if(nland > 0) colours <- c(colours, rev(rainbow(nland, start=0, end=1/6))) # red/yellow return(colours) } spatstat/R/nndist.R0000644000176200001440000002531513606020442013743 0ustar liggesusers# # nndist.R # # nearest neighbour distances (nndist) and identifiers (nnwhich) # # $Revision: 1.12 $ $Date: 2020/01/10 06:37:05 $ # nndist <- function(X, ...) { UseMethod("nndist") } nndist.ppp <- local({ nndist.ppp <- function(X, ..., k=1, by=NULL, method="C") { verifyclass(X, "ppp") trap.extra.arguments(..., .Context="In nndist.ppp") if(is.null(by)) # usual case return(nndist.default(X$x, X$y, k=k, by=by, method=method)) return(nndistby(X, k=k, by=by)) } nndistby <- function(X, k, by) { # split by factor idX <- seq_len(npoints(X)) Y <- split(X %mark% idX, f=by, un=FALSE) distY <- lapply(Y, nndistsub, XX=X, iX=idX, k=k) result <- do.call(cbind, distY) return(result) } nndistsub <- function(Z, XX, iX, k) { nncross(XX, Z, iX=iX, iY=marks(Z), k=k, what="dist") } nndist.ppp }) nndist.default <- function(X, Y=NULL, ..., k=1, by=NULL, method="C") { # computes the vector of nearest-neighbour distances # for the pattern of points (x[i],y[i]) # xy <- xy.coords(X,Y)[c("x","y")] x <- xy$x y <- xy$y # validate n <- length(x) if(length(y) != n) stop("lengths of x and y do not match") method <- match.arg(method, c("C", "interpreted", "test")) # other arguments ignored trap.extra.arguments(..., .Context="In nndist.default") # split by factor ? if(!is.null(by)) { X <- as.ppp(xy, W=boundingbox) return(nndist(X, by=by, k=k)) } # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) # trivial cases if(n <= 1) { # empty pattern => return numeric(0) # or pattern with only 1 point => return Inf nnd <- matrix(Inf, nrow=n, ncol=kmax) nnd <- nnd[,k, drop=TRUE] return(nnd) } # number of neighbours that are well-defined kmaxcalc <- min(n-1, kmax) # calculate k-nn distances for k <= kmaxcalc if(kmaxcalc == 1) { # calculate nearest neighbour distance only switch(method, test = , interpreted={ # matrix of squared distances between all pairs of points sq <- function(a, b) { (a-b)^2 } squd <- outer(x, x, sq) + outer(y, y, sq) # reset diagonal to a large value so it is excluded from minimum diag(squd) <- Inf # nearest neighbour distances nnd <- sqrt(apply(squd,1,min)) }, C={ nnd<-numeric(n) o <- fave.order(y) big <- sqrt(.Machine$double.xmax) z<- .C("nndistsort", n= as.integer(n), x= as.double(x[o]), y= as.double(y[o]), nnd= as.double(nnd), as.double(big), PACKAGE = "spatstat") nnd[o] <- z$nnd }, stop(paste("Unrecognised method", sQuote(method))) ) } else { # case kmaxcalc > 1 switch(method, test = , interpreted={ if(n <= 1000 && method == "interpreted") { # form n x n matrix of squared distances D2 <- pairdist.default(x, y, method=method, squared=TRUE) # find k'th smallest squared distance diag(D2) <- Inf NND2 <- t(apply(D2, 1, sort))[, 1:kmaxcalc] nnd <- sqrt(NND2) } else { # avoid creating huge matrix # handle one row of D at a time NND2 <- matrix(numeric(n * kmaxcalc), nrow=n, ncol=kmaxcalc) for(i in seq_len(n)) { D2i <- (x - x[i])^2 + (y - y[i])^2 D2i[i] <- Inf NND2[i,] <- orderstats(D2i, k=1:kmaxcalc) } nnd <- sqrt(NND2) } }, C={ nnd<-numeric(n * kmaxcalc) o <- fave.order(y) big <- sqrt(.Machine$double.xmax) z<- .C("knndsort", n = as.integer(n), kmax = as.integer(kmaxcalc), x = as.double(x[o]), y = as.double(y[o]), nnd = as.double(nnd), huge = as.double(big), PACKAGE = "spatstat") nnd <- matrix(nnd, nrow=n, ncol=kmaxcalc) nnd[o, ] <- matrix(z$nnd, nrow=n, ncol=kmaxcalc, byrow=TRUE) }, stop(paste("Unrecognised method", sQuote(method))) ) } # post-processing if(kmax > kmaxcalc) { # add columns of Inf infs <- matrix(Inf, nrow=n, ncol=kmax-kmaxcalc) nnd <- cbind(nnd, infs) } if(kmax > 1) colnames(nnd) <- paste0("dist.", 1:kmax) if(length(k) < kmax) { # select only the specified columns nnd <- nnd[, k, drop=TRUE] } return(nnd) } nnwhich <- function(X, ...) { UseMethod("nnwhich") } nnwhich.ppp <- local({ nnwhich.ppp <- function(X, ..., k=1, by=NULL, method="C") { verifyclass(X, "ppp") trap.extra.arguments(..., .Context="In nnwhich.ppp") if(is.null(by)) return(nnwhich.default(X$x, X$y, k=k, method=method)) return(nnwhichby(X, k=k, by=by)) } nnwhichby <- function(X, k, by) { # split by factor idX <- seq_len(npoints(X)) Y <- split(X %mark% idX, f=by, un=FALSE) whichY <- lapply(Y, nnwhichsub, XX=X, iX=idX, k=k) result <- do.call(cbind, whichY) return(result) } nnwhichsub <- function(Z, XX, iX, k) { # marks(Z) gives original serial numbers of subset Z iY <- marks(Z) Zid <- nncross(XX, Z, iX=iX, iY=iY, k=k, what="which") nk <- length(k) if(nk == 1) { Yid <- iY[Zid] } else { Zid <- as.vector(as.matrix(Zid)) Yid <- iY[Zid] Yid <- data.frame(which=matrix(Yid, ncol=nk)) } return(Yid) } nnwhich.ppp }) nnwhich.default <- function(X, Y=NULL, ..., k=1, by=NULL, method="C") { # identifies nearest neighbour of each point in # the pattern of points (x[i],y[i]) # xy <- xy.coords(X,Y)[c("x","y")] x <- xy$x y <- xy$y # validate n <- length(x) if(length(y) != n) stop("lengths of x and y do not match") method <- match.arg(method, c("C", "interpreted", "test")) # other arguments ignored trap.extra.arguments(..., .Context="In nnwhich.default") # split by factor ? if(!is.null(by)) { X <- as.ppp(xy, W=boundingbox) return(nnwhich(X, by=by, k=k)) } # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) # special cases if(n <= 1) { # empty pattern => return integer(0) # or pattern with only 1 point => return NA nnw <- matrix(as.integer(NA), nrow=n, ncol=kmax) nnw <- nnw[,k, drop=TRUE] return(nnw) } # number of neighbours that are well-defined kmaxcalc <- min(n-1, kmax) # identify k-nn for k <= kmaxcalc if(kmaxcalc == 1) { # identify nearest neighbour only switch(method, test = , interpreted={ # matrix of squared distances between all pairs of points sq <- function(a, b) { (a-b)^2 } squd <- outer(x, x, sq) + outer(y, y, sq) # reset diagonal to a large value so it is excluded from minimum diag(squd) <- Inf # nearest neighbours nnw <- apply(squd,1,which.min) }, C={ nnw <- integer(n) o <- fave.order(y) big <- sqrt(.Machine$double.xmax) z<- .C("nnwhichsort", n = as.integer(n), x = as.double(x[o]), y = as.double(y[o]), nnwhich = as.integer(nnw), huge = as.double(big), PACKAGE = "spatstat") witch <- z$nnwhich # sic if(any(witch <= 0)) stop("Internal error: non-positive index returned from C code") if(any(witch > n)) stop("Internal error: index returned from C code exceeds n") nnw[o] <- o[witch] }, stop(paste("Unrecognised method", sQuote(method))) ) } else { # case kmaxcalc > 1 switch(method, test = , interpreted={ if(n <= 1000 && method == "interpreted") { # form n x n matrix of squared distances D2 <- pairdist.default(x, y, method=method, squared=TRUE) # find k'th smallest squared distance diag(D2) <- Inf nnw <- t(apply(D2, 1, fave.order))[, 1:kmaxcalc] } else { # avoid creating huge matrix # handle one row of D at a time nnw <- matrix(as.integer(NA), nrow=n, ncol=kmaxcalc) for(i in seq_len(n)) { D2i <- (x - x[i])^2 + (y - y[i])^2 D2i[i] <- Inf nnw[i,] <- fave.order(D2i)[1:kmaxcalc] } } }, C={ nnw <- matrix(integer(n * kmaxcalc), nrow=n, ncol=kmaxcalc) o <- fave.order(y) big <- sqrt(.Machine$double.xmax) z<- .C("knnwhich", n = as.integer(n), kmax = as.integer(kmaxcalc), x = as.double(x[o]), y = as.double(y[o]), nnwhich = as.integer(nnw), huge = as.double(big), PACKAGE = "spatstat") witch <- z$nnwhich # sic witch <- matrix(witch, nrow=n, ncol=kmaxcalc, byrow=TRUE) if(any(witch <= 0)) stop("Internal error: non-positive index returned from C code") if(any(witch > n)) stop("Internal error: index returned from C code exceeds n") # convert back to original ordering nnw[o,] <- matrix(o[witch], nrow=n, ncol=kmaxcalc) }, stop(paste("Unrecognised method", sQuote(method))) ) } # post-processing if(kmax > kmaxcalc) { # add columns of NA's nas <- matrix(as.numeric(NA), nrow=n, ncol=kmax-kmaxcalc) nnw <- cbind(nnw, nas) } if(kmax > 1) colnames(nnw) <- paste0("which.", 1:kmax) if(length(k) < kmax) { # select only the specified columns nnw <- nnw[, k, drop=TRUE] } return(nnw) } spatstat/R/pairs.im.R0000644000176200001440000001043413606336175014177 0ustar liggesusers# # pairs.im.R # # $Revision: 1.16 $ $Date: 2020/01/11 12:15:24 $ # pairs.listof <- pairs.solist <- function(..., plot=TRUE) { argh <- expandSpecialLists(list(...), special=c("solist", "listof")) haslines <- any(sapply(argh, inherits, what="linim")) names(argh) <- good.names(names(argh), "V", seq_along(argh)) if(haslines) { do.call(pairs.linim, append(argh, list(plot=plot))) } else { do.call(pairs.im, append(argh, list(plot=plot))) } } pairs.im <- function(..., plot=TRUE) { argh <- list(...) cl <- match.call() ## unpack single argument which is a list of images if(length(argh) == 1) { arg1 <- argh[[1]] if(is.list(arg1) && all(unlist(lapply(arg1, is.im)))) argh <- arg1 } ## identify which arguments are images isim <- unlist(lapply(argh, is.im)) nim <- sum(isim) if(nim == 0) stop("No images provided") ## separate image arguments from others imlist <- argh[isim] rest <- argh[!isim] ## determine image names for plotting imnames <- argh$labels %orifnull% names(imlist) if(length(imnames) != nim || !all(nzchar(imnames))) { #' names not given explicitly callednames <- paste(cl)[c(FALSE, isim, FALSE)] backupnames <- paste0("V", seq_len(nim)) if(length(callednames) != nim) { callednames <- backupnames } else if(any(toolong <- (nchar(callednames) > 15))) { callednames[toolong] <- backupnames[toolong] } imnames <- good.names(imnames, good.names(callednames, backupnames)) } ## if(nim == 1) { ## one image: plot histogram do.call(hist, append(list(imlist[[1L]], xname=imnames[1L], plot=plot), rest)) ## save pixel values Z <- imlist[[1]] pixvals <- list(Z[]) names(pixvals) <- imnames } else { ## extract pixel rasters and reconcile them imwins <- lapply(imlist, as.owin) names(imwins) <- NULL rasta <- do.call(intersect.owin, imwins) ## extract image pixel values on common raster pixvals <- lapply(imlist, "[.im", i=rasta, raster=rasta, drop=TRUE) } ## combine into data frame pixdf <- do.call(data.frame, pixvals) ## pairs plot if(plot && nim > 1) do.call(pairs, resolve.defaults(list(x=pixdf), rest, list(labels=imnames, pch="."))) labels <- resolve.defaults(rest, list(labels=imnames))$labels colnames(pixdf) <- labels class(pixdf) <- c("plotpairsim", class(pixdf)) return(invisible(pixdf)) } plot.plotpairsim <- function(x, ...) { xname <- short.deparse(substitute(x)) x <- as.data.frame(x) if(ncol(x) == 1) { do.call(hist.default, resolve.defaults(list(x=x[,1]), list(...), list(main=xname))) } else { do.call(pairs.default, resolve.defaults(list(x=x), list(...), list(pch="."))) } return(invisible(NULL)) } print.plotpairsim <- function(x, ...) { cat("Object of class plotpairsim\n") cat(paste("contains pixel data for", commasep(sQuote(colnames(x))), "\n")) return(invisible(NULL)) } panel.image <- function(x, y, ..., sigma=NULL) { usr <- par("usr"); on.exit(par(usr)) par(usr = c(0, 1, 0, 1)) xx <- scaletointerval(x) yy <- scaletointerval(y) p <- ppp(xx, yy, window=square(1), check=FALSE) plot(density(p, sigma=sigma), add=TRUE, ...) } panel.contour <- function(x, y, ..., sigma=NULL) { usr <- par("usr"); on.exit(par(usr)) par(usr = c(0, 1, 0, 1)) xx <- scaletointerval(x) yy <- scaletointerval(y) p <- ppp(xx, yy, window=square(1), check=FALSE) Z <- density(p, sigma=sigma) do.call(contour, resolve.defaults(list(x=Z, add=TRUE), list(...), list(drawlabels=FALSE))) } panel.histogram <- function(x, ...) { usr <- par("usr"); on.exit(par(usr)) par(usr = c(usr[1:2], 0, 1.5) ) h <- hist(x, plot = FALSE) breaks <- h$breaks; nB <- length(breaks) y <- h$counts; y <- y/max(y) do.call(rect, resolve.defaults(list(xleft = breaks[-nB], ybottom = 0, xright = breaks[-1], ytop = y), list(...), list(col="grey"))) } spatstat/R/metricPdt.R0000644000176200001440000001513113556707650014413 0ustar liggesusers#' #' metricPdt.R #' #' Metric distance transform of pixel mask #' #' $Revision: 1.5 $ $Date: 2019/11/01 01:34:30 $ rectdistmap <- function(X, asp=1.0, npasses=1, verbose=FALSE) { w <- as.mask(X) check.1.real(asp) check.1.integer(npasses) stopifnot(asp > 0) #' ensure grid has suitable aspect ratio dx <- w$xstep dy <- w$ystep a <- dy/(asp*dx) if(verbose) splat("grid aspect", signif(a, 3)) refined <- (a > 1.2 || a < 0.8) if(refined) { flipped <- (a < 1) if(flipped) a <- 1/a n <- if(a > 10) 1 else if(a > 6) 2 else if(a > 4) 4 else 12 an <- if(n > 1) round(a * n) else ceiling(a) k <- c(an, n)/greatest.common.divisor(an, n) if(flipped) k <- rev(k) woriginal <- w w <- as.owin(w, dimyx=k * dim(w)) if(verbose) { splat("Grid expansion", k[1], "x", k[2]) splat("Adjusted grid aspect", (a * k[2])/k[1]) } } #' nr <- w$dim[1L] nc <- w$dim[2L] #' input image will be padded out with a margin of width 2 on all sides mr <- mc <- 2L #' full dimensions of padded image Nnr <- nr + 2 * mr Nnc <- nc + 2 * mc N <- Nnr * Nnc #' output image (subset): rows & columns (R indexing) rmin <- mr + 1L rmax <- Nnr - mr cmin <- mc + 1L cmax <- Nnc - mc #' do padding x <- matrix(FALSE, nrow=Nnr, ncol=Nnc) x[rmin:rmax, cmin:cmax] <- w$m #' compute distmap res <- .C("mdtPOrect", as.double(w$xrange[1L]), as.double(w$yrange[1L]), as.double(w$xrange[2L]), as.double(w$yrange[2L]), nr = as.integer(nr), nc = as.integer(nc), mr = as.integer(mr), mc = as.integer(mc), inp = as.integer(t(x)), asp = as.double(asp), npasses = as.integer(npasses), distances = as.double (double(N)), rows = as.integer(integer(N)), cols = as.integer(integer(N)), PACKAGE = "spatstat") dist <- matrix(res$distances, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] result <- as.im(dist, w) if(refined) result <- as.im(result, W=woriginal) # rows <- matrix(res$rows, # ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] # cols <- matrix(res$cols, # ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] # convert from C to R indexing # rows <- rows + 1L - as.integer(mr) # cols <- cols + 1L - as.integer(mc) # return(list(d=dist,row=rows,col=cols,b=bdist, w=w)) edge <- TRUE if(edge) { #' calculate distance transform to boundary y <- x y[] <- TRUE y[rmin:rmax, cmin:cmax] <- FALSE y[rmin, ] <- TRUE y[rmax, ] <- TRUE y[, cmin] <- TRUE y[, cmax] <- TRUE #' compute distmap bres <- .C("mdtPOrect", as.double(w$xrange[1L]), as.double(w$yrange[1L]), as.double(w$xrange[2L]), as.double(w$yrange[2L]), nr = as.integer(nr), nc = as.integer(nc), mr = as.integer(mr), mc = as.integer(mc), inp = as.integer(t(y)), asp = as.double(asp), npasses = as.integer(npasses), distances = as.double (double(N)), rows = as.integer(integer(N)), cols = as.integer(integer(N)), PACKAGE = "spatstat") bdist <- matrix(bres$distances, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] bdist <- as.im(bdist, w) if(refined) bdist <- as.im(bdist, W=woriginal) attr(result, "bdist") <- bdist } return(result) } rectcontact <- function(X, ..., asp=1.0, npasses=4, eps=NULL, r=NULL, breaks=NULL, correction=c("rs", "km")) { verifyclass(X, "im") rorbgiven <- !is.null(r) || !is.null(breaks) checkspacing <- !isFALSE(list(...)$checkspacing) testme <- isTRUE(list(...)$testme) check.1.real(asp) stopifnot(asp > 0) if(X$type != "logical") stop("X should be a logical-valued image") if(!missing(eps)) X <- as.im(X, eps=eps) W <- as.mask(X) # the region that is defined Y <- solutionset(X) # the region that is TRUE fullframe <- all(W$m) emptyframe <- !any(W$m) ## histogram breakpoints rmaxdefault <- rmax.rule("F", W) breaks <- handle.r.b.args(r, breaks, W, eps, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max if(testme || (rorbgiven && checkspacing)) check.finespacing(rvals, if(is.null(eps)) NULL else eps/4, W, rmaxdefault=if(rorbgiven) NULL else rmaxdefault, action="fatal", rname="r", context="in rectcontact(X, r)") correction <- pickoption("correction", correction, c(border="rs", rs="rs", KM="km", km="km", Kaplan="km", best="km"), multi=TRUE) ## compute distances and censoring distances if(!emptyframe) { dist <- rectdistmap(Y, asp, npasses=npasses) if(fullframe) { bdry <- attr(dist, "bdist") } else { bdry <- rectdistmap(complement.owin(W), asp, npasses=npasses) } #' extract corresponding values dist <- dist[W, drop=TRUE, rescue=FALSE] bdry <- bdry[W, drop=TRUE, rescue=FALSE] ## censoring indicators d <- (dist <= bdry) ## observed distances o <- pmin.int(dist, bdry) } ## calculate Kaplan-Meier and/or border corrected (Reduced Sample) estimators want.rs <- "rs" %in% correction want.km <- "km" %in% correction selection <- c(want.rs, want.km) tags <- c("rs", "km")[selection] labels <- c("hat(%s)[bord](r)", "hat(%s)[km](r)")[selection] descr <- c("border corrected estimate of %s", "Kaplan-Meier estimate of %s")[selection] if(emptyframe) { df <- as.data.frame(matrix(0, length(rvals), length(tags))) names(df) <- tags } else { df <- km.rs.opt(o, bdry, d, breaks, KM=want.km, RS=want.rs) df <- as.data.frame(df[tags]) } ## create fv object df <- cbind(data.frame(r=rvals), df) Z <- fv(df, "r", quote(H(r)), if(want.km) "km" else "rs", . ~ r, c(0,rmax), c("r", labels), c("distance argument r", descr), fname="H") fvnames(Z, ".") <- rev(fvnames(Z, ".")) attr(Z, "alim") <- with(Z, range(.x[is.finite(.y) & .y <= 0.95])) attr(Z, "conserve") <- list(checkspacing=FALSE) return(Z) } spatstat/R/distfun.R0000644000176200001440000001471713333543254014134 0ustar liggesusers# # distfun.R # # distance function (returns a function of x,y) # # $Revision: 1.27 $ $Date: 2018/04/23 05:12:30 $ # distfun <- function(X, ...) { UseMethod("distfun") } distfun.ppp <- function(X, ..., k=1, undef=Inf) { # this line forces X to be bound stopifnot(is.ppp(X)) stopifnot(length(k) == 1) force(undef) g <- function(x,y=NULL) { Y <- xy.coords(x, y)[c("x", "y")] if(npoints(X) < k) rep(undef, length(Y$x)) else nncross(Y, X, what="dist", k=k) } attr(g, "Xclass") <- "ppp" g <- funxy(g, as.rectangle(as.owin(X))) attr(g, "k") <- k attr(g, "extrargs") <- list(k=k, undef=undef) class(g) <- c("distfun", class(g)) return(g) } distfun.psp <- function(X, ...) { # this line forces X to be bound stopifnot(is.psp(X)) g <- function(x,y=NULL) { Y <- xy.coords(x, y)[c("x", "y")] nncross(Y, X, what="dist") } attr(g, "Xclass") <- "psp" g <- funxy(g, as.rectangle(as.owin(X))) class(g) <- c("distfun", class(g)) attr(g, "extrargs") <- list() return(g) } distfun.owin <- function(X, ..., invert=FALSE) { # this line forces X to be bound stopifnot(is.owin(X)) force(invert) # P <- edges(X) # g <- function(x,y=NULL) { Y <- xy.coords(x, y) inside <- inside.owin(Y$x, Y$y, X) D <- nncross(Y, P, what="dist") out <- if(!invert) ifelseAX(inside, 0, D) else ifelseXB(inside, D, 0) return(out) } attr(g, "Xclass") <- "owin" g <- funxy(g, as.rectangle(as.owin(X))) attr(g, "extrargs") <- list(invert=invert) class(g) <- c("distfun", class(g)) return(g) } as.owin.distfun <- function(W, ..., fatal=TRUE) { X <- get("X", envir=environment(W)) result <- if(is.owin(X)) as.rectangle(X) else as.owin(X, ..., fatal=fatal) return(result) } domain.distfun <- Window.distfun <- function(X, ...) { as.owin(X) } as.im.distfun <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL, approx=TRUE) { k <- attr(X, "k") if(approx && is.null(W) && (is.null(k) || (k == 1))) { # use 'distmap' for speed env <- environment(X) Xdata <- get("X", envir=env) args <- list(X=Xdata, eps=eps, dimyx=dimyx, xy=xy) if(is.owin(Xdata)) { args <- append(args, list(invert = get("invert", envir=env))) } D <- do.call(distmap, args = args) if(!is.null(na.replace)) D$v[is.null(D$v)] <- na.replace } else if(identical(attr(X, "Xclass"), "ppp")) { # point pattern --- use nngrid/knngrid env <- environment(X) Xdata <- get("X", envir=env) D <- nnmap(Xdata, W=W, what="dist", k=k, eps=eps, dimyx=dimyx, xy=xy, na.replace=na.replace, ...) } else { # evaluate function at pixel centres D <- as.im.function(X, W=W, eps=eps, dimyx=dimyx, xy=xy, na.replace=na.replace) } return(D) } print.distfun <- function(x, ...) { xtype <- attr(x, "Xclass") typestring <- switch(xtype, ppp="point pattern", psp="line segment pattern", owin="window", "unrecognised object") objname <- switch(xtype, ppp="point", psp="line segment", "object") splat("Distance function for", typestring) X <- get("X", envir=environment(x)) print(X) if(!is.null(k <- attr(x, "k")) && k > 1) splat("Distance to", ordinal(k), "nearest", objname, "will be computed") return(invisible(NULL)) } summary.distfun <- function(object, ...) { xtype <- attr(object, "Xclass") w <- as.owin(object) fundef <- attr(object, "f") attr(fundef, "Xclass") <- NULL X <- get("X", envir=environment(object)) z <- list(xtype = xtype, k = attr(object, "k") %orifnull% 1, Xsumry = summary(X), values = summary(as.im(object, ...)), wintype = w$type, frame = Frame(w), units = unitname(w)) class(z) <- "summary.distfun" return(z) } print.summary.distfun <- function(x, ...) { typestring <- switch(x$xtype, ppp="point pattern", psp="line segment pattern", owin="window", "unrecognised object") objname <- switch(x$xtype, ppp="point", psp="line segment", "object") splat("Distance function for", typestring) if(x$k > 1) splat("Distance to", ordinal(x$k), "nearest", objname, "will be computed") windesc <- switch(x$wintype, rectangle="the rectangle", polygonal="a polygonal window inside the frame", mask="a binary mask in the rectangle") unitinfo <- summary(x$units) sigdig <- getOption('digits') splat("defined in", windesc, prange(signif(x$frame$xrange, sigdig)), "x", prange(signif(x$frame$yrange, sigdig)), unitinfo$plural, unitinfo$explain ) v <- x$values splat("\nDistance function values:") splat("\trange =", prange(signif(v$range, sigdig))) # splat("\tintegral =", signif(v$integral, sigdig)) splat("\tmean =", signif(v$mean, sigdig)) invisible(NULL) } shift.distfun <- rotate.distfun <- scalardilate.distfun <- affine.distfun <- function(X, ...) { f <- X extrargs <- attr(f, "extrargs") if(is.null(extrargs)) stop(paste("distfun object has outdated format;", "cannot apply geometrical transformation"), call.=FALSE) Y <- get("X", envir=environment(f)) Ynew <- do.call(.Generic, list(Y, ...)) fnew <- do.call(distfun, append(list(Ynew), extrargs)) return(fnew) } flipxy.distfun <- reflect.distfun <- function(X) { f <- X extrargs <- attr(f, "extrargs") if(is.null(extrargs)) stop(paste("distfun object has outdated format;", "cannot apply geometrical transformation"), call.=FALSE) Y <- get("X", envir=environment(f)) Ynew <- do.call(.Generic, list(Y)) fnew <- do.call(distfun, append(list(Ynew), extrargs)) return(fnew) } rescale.distfun <- function(X, s, unitname) { if(missing(s)) s <- NULL if(missing(unitname)) unitname <- NULL f <- X Y <- get("X", envir=environment(f)) Ynew <- rescale(Y, s, unitname) extrargs <- attr(f, "extrargs") if(is.null(extrargs)) stop(paste("distfun object has outdated format;", "cannot rescale it"), call.=FALSE) fnew <- do.call(distfun, append(list(Ynew), extrargs)) return(fnew) } spatstat/R/bw.abram.R0000644000176200001440000000343213460256005014135 0ustar liggesusers#' #' bw.abram.R #' #' Abramson bandwidths #' #' $Revision: 1.8 $ $Date: 2019/04/25 05:34:49 $ #' bw.abram <- function(X, h0, ..., at=c("points", "pixels"), hp=h0, pilot=NULL, trim=5, smoother=density.ppp){ stopifnot(is.ppp(X)) at <- match.arg(at) if(missing(h0) || is.null(h0)) { h0 <- bw.ppl(X) } else { check.1.real(h0) stopifnot(h0 > 0) } check.1.real(trim) stopifnot(trim > 0) pilot.data <- X imwin <- as.im(Window(X), ...) if(is.im(pilot)){ if(!compatible.im(imwin,pilot)) stop("'X' and 'pilot' have incompatible spatial domains", call.=FALSE) #' clip the worst small values away pilot[pilot<=0] <- min(pilot[pilot>0]) } else if(is.ppp(pilot)){ if(!compatible.im(imwin,as.im(Window(pilot), ...))) stop("'X' and 'pilot' have incompatible spatial domains", call.=FALSE) pilot.data <- pilot } else if(!is.null(pilot)) stop("if supplied, 'pilot' must be a pixel image or a point pattern", call.=FALSE) if(!is.im(pilot)) { if(is.character(smoother)) { smoother <- get(smoother, mode="function") } else stopifnot(is.function(smoother)) pilot <- smoother(pilot.data,sigma=hp,positive=TRUE,...) } pilot <- pilot/integral(pilot) # scale to probability density pilotvalues <- safelookup(pilot, pilot.data, warn=FALSE) ## geometric mean re-scaler (Silverman, 1986; ch 5). gamma <- exp(mean(log(pilotvalues[pilotvalues > 0])))^(-0.5) switch(at, points = { pilot.X <- safelookup(pilot,X,warn=FALSE) bw <- h0 * pmin((pilot.X^(-0.5))/gamma,trim) }, pixels = { bw <- eval.im(h0 * pmin((pilot^(-0.5))/gamma, trim)) }) return(bw) } spatstat/R/anova.ppm.R0000644000176200001440000002745013614463173014360 0ustar liggesusers# # anova.ppm.R # # $Revision: 1.28 $ $Date: 2020/01/30 05:05:52 $ # anova.ppm <- local({ do.gripe <- function(...) warning(paste(...), call.=FALSE) dont.gripe <- function(...) NULL nquad <- function(x) { if(is.quad(x)) n.quad(x) else 0 } fmlaString <- function(z) { paste(as.expression(formula(z))) } interString <- function(z) { as.interact(z)$creator } anova.ppm <- function(object, ..., test=NULL, adjust=TRUE, warn=TRUE, fine=FALSE) { gripe <- if(warn) do.gripe else dont.gripe if(!is.null(test)) { test <- match.arg(test, c("Chisq", "LRT", "Rao", "score", "F", "Cp")) if(test == "score") test <- "Rao" if(!(test %in% c("Chisq", "LRT", "Rao"))) stop("test=", dQuote(test), "is not yet implemented") } ## trap outmoded usage argh <- list(...) if("override" %in% names(argh)) { gripe("Argument 'override' is superseded and was ignored") argh <- argh[-which(names(argh) == "override")] } ## list of models objex <- append(list(object), argh) if(!all(sapply(objex, is.ppm))) stop(paste("Arguments must all be", sQuote("ppm"), "objects")) ## all models Poisson? pois <- all(sapply(objex, is.poisson.ppm)) gibbs <- !pois ## any models fitted by ippm? newton <- any(sapply(objex, inherits, what="ippm")) if(gibbs && !is.null(test) && test == "Rao") stop("Score test is only implemented for Poisson models", call.=FALSE) ## handle anova for a single object expandedfrom1 <- FALSE if(length(objex) == 1 && (gibbs || newton)) { ## we can't rely on anova.glm in this case ## so we have to re-fit explicitly Terms <- drop.scope(object) if((nT <- length(Terms)) > 0) { ## generate models by adding terms sequentially objex <- vector(mode="list", length=nT+1) for(n in 1L:nT) { ## model containing terms 1, ..., n-1 fmla <- paste(". ~ . - ", paste(Terms[n:nT], collapse=" - ")) fmla <- as.formula(fmla) objex[[n]] <- update(object, fmla) } ## full model objex[[nT+1L]] <- object expandedfrom1 <- TRUE } } ## all models fitted by same method? fitmethod <- unique(sapply(objex, getElement, name="method")) if(length(fitmethod) > 1) stop(paste("Models were fitted by different methods", commasep(sQuote(fitmethod)), "- comparison is not possible")) ## fitted by MPL or logistic? if(!(fitmethod %in% c("mpl", "logi"))) stop(paste("Not implemented for models fitted by method=", sQuote(fitmethod))) logi <- (fitmethod == "logi") refitargs <- list() fitz <- NULL ## fitted to same quadscheme using same edge correction? if(length(objex) > 1) { ## same data? datas <- lapply(objex, data.ppm) samedata <- all(sapply(datas[-1L], identical, y=datas[[1L]])) if(!samedata) stop("Models were fitted to different datasets") ## same dummy points? quads <- lapply(objex, quad.ppm) samequad <- all(sapply(quads[-1L], identical, y=quads[[1L]])) if(!samequad) { gripe("Models were re-fitted using a common quadrature scheme") sizes <- sapply(quads, nquad) imax <- which.max(sizes) bigQ <- quads[[imax]] refitargs$Q <- bigQ } ## same edge correction? corrxn <- unique(sapply(objex, getElement, name="correction")) if(length(corrxn) > 1) stop(paste("Models were fitting using different edge corrections", commasep(sQuote(corrxn)))) if(corrxn == "border") { rbord <- unique(sapply(objex, getElement, name="rbord")) if(length(rbord) > 1) { gripe("Models were re-fitted using a common value of 'rbord'") refitargs$rbord <- max(rbord) } } ## Extract glmfit objects fitz <- lapply(objex, getglmfit) ## Any trivial models? (uniform Poisson) trivial <- sapply(fitz, is.null) if(any(trivial)) refitargs$forcefit <- TRUE ## force all non-trivial models to be fitted using same method ## (all using GLM or all using GAM) isgam <- sapply(fitz, inherits, what="gam") isglm <- sapply(fitz, inherits, what="glm") usegam <- any(isgam) if(usegam && any(isglm)) { gripe("Models were re-fitted with use.gam=TRUE") refitargs$use.gam <- TRUE refitargs$forcefit <- TRUE } ## finally refit models if(length(refitargs) > 0) { objex <- do.call(lapply, append(list(X=objex, FUN=update), refitargs)) fitz <- lapply(objex, getglmfit) } } ## Ensure GLM/GAM objects all use the same 'subset' subz <- lapply(objex, getglmsubset) if(length(unique(subz)) > 1) { subsub <- Reduce("&", subz) fitz <- lapply(fitz, refittosubset, sub=subsub) gripe("Models were re-fitted after discarding quadrature points", "that were illegal under some of the models") } ## If any models were fitted by ippm we need to correct the df if(newton) { nfree <- sapply(lapply(objex, logLik), attr, which="df") ncanonical <- lengths(lapply(objex, coef)) nextra <- nfree - ncanonical if(is.null(fitz)) fitz <- lapply(objex, getglmfit) for(i in seq_along(fitz)) if(nextra[i] != 0) fitz[[i]]$df.residual <- fitz[[i]]$df.residual - nextra[i] } ## Finally do the appropriate ANOVA if(is.null(fitz)) fitz <- lapply(objex, getglmfit) result <- do.call(anova, append(fitz, list(test=test, dispersion=1))) ## Remove approximation-dependent columns if present result[, "Resid. Dev"] <- NULL ## replace 'residual df' by number of parameters in model if("Resid. Df" %in% names(result)) { ## count number of quadrature points used in each model obj1 <- objex[[1L]] ss <- getglmsubset(obj1) nq <- if(!is.null(ss)) sum(ss) else n.quad(quad.ppm(obj1)) result[, "Resid. Df"] <- nq - result[, "Resid. Df"] names(result)[match("Resid. Df", names(result))] <- "Npar" } ## edit header if(!is.null(h <- attr(result, "heading"))) { ## remove .mpl.Y and .logi.Y from formulae if present h <- gsub(".mpl.Y", "", h) h <- gsub(".logi.Y", "", h) ## delete GLM information if present h <- gsub("Model: quasi, link: log", "", h) h <- gsub("Model: binomial, link: logit", "", h) h <- gsub("Response: ", "", h) ## remove blank lines (up to 4 consecutive blanks can occur) for(i in 1L:5L) h <- gsub("\n\n", "\n", h) if(length(objex) > 1 && length(h) > 1) { ## anova(mod1, mod2, ...) ## change names of models fmlae <- sapply(objex, fmlaString) intrx <- sapply(objex, interString) h[2L] <- paste("Model", paste0(1L:length(objex), ":"), fmlae, "\t", intrx, collapse="\n") } ## Add explanation if we did the stepwise thing ourselves if(expandedfrom1) h <- c(h[1L], "Terms added sequentially (first to last)\n", h[-1L]) ## Contract spaces in output if spatstat.options('terse') >= 2 if(!waxlyrical('space')) h <- gsub("\n$", "", h) ## Put back attr(result, "heading") <- h } if(adjust && gibbs) { fitz <- lapply(objex, getglmfit) usegam <- any(sapply(fitz, inherits, what="gam")) if(usegam) { gripe("Deviance adjustment is not available for gam fits;", "unadjusted composite deviance calculated.") } else { ## issue warning, if not already given if(warn) warn.once("anovaAdjust", "anova.ppm now computes the *adjusted* deviances", "when the models are not Poisson processes.") ## Corrected pseudolikelihood ratio nmodels <- length(objex) if(nmodels > 1) { cfac <- rep(1, nmodels) for(i in 2:nmodels) { a <- objex[[i-1]] b <- objex[[i]] df <- length(coef(a)) - length(coef(b)) if(df > 0) { ibig <- i-1 ismal <- i } else { ibig <- i ismal <- i-1 df <- -df } bigger <- objex[[ibig]] smaller <- objex[[ismal]] if(df == 0) { gripe("Models", i-1, "and", i, "have the same dimension") } else { bignames <- names(coef(bigger)) smallnames <- names(coef(smaller)) injection <- match(smallnames, bignames) if(any(uhoh <- is.na(injection))) { gripe("Unable to match", ngettext(sum(uhoh), "coefficient", "coefficients"), commasep(sQuote(smallnames[uhoh])), "of model", ismal, "to coefficients in model", ibig) } else { thetaDot <- 0 * coef(bigger) thetaDot[injection] <- coef(smaller) JH <- vcov(bigger, what="internals", new.coef=thetaDot, fine=fine) J <- if(!logi) JH$Sigma else (JH$Sigma1log+JH$Sigma2log) H <- if(!logi) JH$A1 else JH$Slog G <- H%*%solve(J)%*%H if(df == 1) { cfac[i] <- H[-injection,-injection]/G[-injection,-injection] } else { Res <- residuals(bigger, type="score", new.coef=thetaDot, drop=TRUE) U <- integral.msr(Res) Uo <- U[-injection] Uo <- matrix(Uo, ncol=1) Hinv <- solve(H) Ginv <- solve(G) Hoo <- Hinv[-injection,-injection, drop=FALSE] Goo <- Ginv[-injection,-injection, drop=FALSE] ## ScoreStat <- t(Uo) %*% Hoo %*% solve(Goo) %*% Hoo %*% Uo HooUo <- Hoo %*% Uo ScoreStat <- t(HooUo) %*% solve(Goo) %*% HooUo ## cfac[i] <- ScoreStat/(t(Uo) %*% Hoo %*% Uo) cfac[i] <- ScoreStat/(t(HooUo) %*% Uo) } } } } ## apply Pace et al (2011) adjustment to pseudo-deviances ## (save attributes of 'result' for later reinstatement) oldresult <- result result$Deviance <- AdjDev <- result$Deviance * cfac cn <- colnames(result) colnames(result)[cn == "Deviance"] <- "AdjDeviance" if("Pr(>Chi)" %in% colnames(result)) result[["Pr(>Chi)"]] <- c(NA, pchisq(abs(AdjDev[-1L]), df=abs(result$Df[-1L]), lower.tail=FALSE)) class(result) <- class(oldresult) attr(result, "heading") <- attr(oldresult, "heading") } } if(newton) { ## calculation does not include 'covfunargs' cfa <- lapply(lapply(objex, getElement, name="covfunargs"), names) cfa <- unique(unlist(cfa)) action <- if(adjust && gibbs) "Adjustment to composite likelihood" else if(test == "Rao") "Score test calculation" else NULL if(!is.null(action)) gripe(action, "does not account for", "irregular trend parameters (covfunargs)", commasep(sQuote(cfa))) } } return(result) } refittosubset <- function(fut, sub) { etf <- environment(terms(fut)) gd <- get("glmdata", envir=etf) gd$.mpl.SUBSET <- sub assign("glmdata", gd, envir=etf) up <- update(fut, evaluate=FALSE) eval(up, envir=etf) } anova.ppm }) spatstat/R/persp.im.R0000644000176200001440000002511413333543255014207 0ustar liggesusers## ## persp.im.R ## ## 'persp' method for image objects ## plus annotation ## ## $Revision: 1.20 $ $Date: 2016/09/01 05:49:42 $ ## persp.im <- local({ persp.im <- function(x, ..., colmap=NULL, colin=x, apron=FALSE, visible=FALSE) { xname <- deparse(substitute(x)) xinfo <- summary(x) if(xinfo$type == "factor") stop("Perspective plot is inappropriate for factor-valued image") ## check whether 'col' was specified when 'colmap' was intended Col <- list(...)$col if(is.null(colmap) && !is.null(Col) && !is.matrix(Col) && length(Col) != 1) warning("Argument col is not a matrix. Did you mean colmap?") if(!missing(colin)) { ## separate image to determine colours verifyclass(colin, "im") if(!compatible(colin, x)) { ## resample 'colin' onto grid of 'x' colin <- as.im(colin, W=x) } if(is.null(colmap)) colmap <- spatstat.options("image.colfun")(128) } pop <- spatstat.options("par.persp") ## if(is.function(colmap) && !inherits(colmap, "colourmap")) { ## coerce to a 'colourmap' if possible clim <- range(colin, finite=TRUE) if(names(formals(colmap))[1] == "n") { colval <- colmap(128) colmap <- colourmap(colval, range=clim) } else { ## colour map determined by a rule (e.g. 'beachcolours') colmap <- invokeColourmapRule(colmap, colin, zlim=clim, colargs=list(...)) if(is.null(colmap)) stop("Unrecognised syntax for colour function") } } ## colour map? if(is.null(colmap)) { colinfo <- list(col=NULL) } else if(inherits(colmap, "colourmap")) { ## colour map object ## apply colour function to image data colval <- eval.im(colmap(colin)) colval <- t(as.matrix(colval)) ## strip one row and column for input to persp.default colval <- colval[-1, -1] ## replace NA by arbitrary value isna <- is.na(colval) if(any(isna)) { stuff <- attr(colmap, "stuff") colvalues <- stuff$outputs colval[isna] <- colvalues[1] } ## pass colour matrix (and suppress lines) colinfo <- list(col=colval, border=NA) } else { ## interpret 'colmap' as colour map if(is.list(colmap) && all(c("breaks", "col") %in% names(colmap))) { breaks <- colmap$breaks colvalues <- colmap$col } else if(is.vector(colmap)) { colvalues <- colmap breaks <- quantile(colin, seq(from=0,to=1,length.out=length(colvalues)+1)) if(!all(ok <- !duplicated(breaks))) { breaks <- breaks[ok] colvalues <- colvalues[ok[-1]] } } else warning("Unrecognised format for colour map") ## apply colour map to image values colid <- cut.im(colin, breaks=breaks, include.lowest=TRUE) colval <- eval.im(colvalues[unclass(colid)]) colval <- t(as.matrix(colval)) # nr <- nrow(colval) # nc <- ncol(colval) ## strip one row and column for input to persp.default colval <- colval[-1, -1] colval[is.na(colval)] <- colvalues[1] ## pass colour matrix (and suppress lines) colinfo <- list(col=colval, border=NA) } if(apron) { ## add an 'apron' zlim <- list(...)$zlim bottom <- if(!is.null(zlim)) zlim[1] else min(x) x <- na.handle.im(x, na.replace=bottom) x <- padimage(x, bottom) xinfo <- summary(x) if(is.matrix(colval <- colinfo$col)) { colval <- matrix(col2hex(colval), nrow(colval), ncol(colval)) grijs <- col2hex("lightgrey") colval <- cbind(grijs, rbind(grijs, colval, grijs), grijs) colinfo$col <- colval } } if(spatstat.options("monochrome")) colinfo$col <- to.grey(colinfo$col) ## get reasonable z scale while fixing x:y aspect ratio if(xinfo$type %in% c("integer", "real")) { zrange <- xinfo$range if(diff(zrange) > 0) { xbox <- as.rectangle(x) zscale <- 0.5 * mean(diff(xbox$xrange), diff(xbox$yrange))/diff(zrange) zlim <- zrange } else { zscale <- NULL mx <- xinfo$mean zlim <- mx + c(-1,1) * if(mx == 0) 0.1 else min(abs(mx), 1) } } else zscale <- zlim <- NULL dotargs <- list(...) if(spatstat.options("monochrome")) dotargs <- col.args.to.grey(dotargs) yargh <- resolve.defaults(list(x=x$xcol, y=x$yrow, z=t(x$v)), dotargs, pop, colinfo, list(xlab="x", ylab="y", zlab=xname), list(scale=FALSE, expand=zscale, zlim=zlim), list(main=xname), .StripNull=TRUE) jawab <- do.call.matched(persp, yargh, funargs=graphicsPars("persp")) attr(jawab, "expand") <- yargh$expand if(visible) attr(jawab, "visible") <- perspvis(x, M=jawab) return(invisible(jawab)) } diffit <- function(x) { y <- diff(x) return(c(y[1], y)) } perspvis <- function(X, ..., M=NULL) { stopifnot(is.im(X)) ## determine perspective matrix if(is.null(M)) M <- persp(X, ...) ## project the coordinates ## onto (x,y) plane of plot and z axis pointing out of it xy <- rasterxy.im(X, drop=TRUE) z <- X[drop=TRUE] xyz <- cbind(xy, z) v <- cbind(xyz, 1) %*% M pxyz <- v[,1:3]/v[,4] px <- pxyz[,1] py <- pxyz[,2] pz <- pxyz[,3] ## determine greatest possible difference in 'depth' in one pixel step PZ <- as.matrix(X) ok <- !is.na(PZ) PZ[ok] <- pz maxslip <- max(0, abs(apply(PZ, 1, diff)), abs(apply(PZ, 2, diff)), na.rm=TRUE) ## determine which pixels are in front d <- ceiling(dim(X)/2) jx <- cut(px, breaks=d[2]) iy <- cut(py, breaks=d[1]) zmax <- tapply(pz, list(iy,jx), max) isvis <- infront <- (pz > zmax[cbind(iy,jx)] - 2 * maxslip) ## if(TRUE) { ## Additionally check whether unit normal to surface is pointing to viewer Xmat <- as.matrix(X) dzdx <- cbind(0, t(apply(Xmat, 1, diff)))/X$xstep dzdy <- rbind(0, apply(Xmat, 2, diff))/X$ystep dzdx <- as.vector(dzdx[ok]) dzdy <- as.vector(dzdy[ok]) ## unscaled normal is (-dzdx, -dzdy, 1) if(FALSE) { ## THIS DOESN'T WORK - not sure why. ## rescale so that length is half diameter of pixel fac <- sqrt(X$xstep^2 + X$ystep^2)/(2 * sqrt(dzdx^2+dzdy^2+1)) ## add to spatial coordinates xyzplus <- xyz + fac * cbind(-dzdx, -dzdy, 1) ## transform vplus <- cbind(xyzplus, 1) %*% M pplus <- vplus[,1:3]/vplus[,4] ## determine whether normal is pointing toward viewer deltaz <- pplus[,3] - pz isvis <- infront & (deltaz > 0) } else { theta <- atan2(M[2,1],M[1,1]) + pi/2 phi <- - atan2(M[3,3], M[3,2]) ## check agreement ## cat(paste("Guess: theta=", theta * 180/pi, "\n")) ## cat(paste("Guess: phi=", phi * 180/pi, "\n")) ## view vector viewer <- cos(phi) * c(cos(theta), sin(theta), 0) + c(0, 0, sin(phi)) ## inner product dotprod <- -dzdx * viewer[1] - dzdy * viewer[2] + viewer[3] isvis <- infront & (dotprod < 0) } } ## put into image Y <- eval.im(X > 0) Y[] <- isvis ## replace 'NA' by 'FALSE' if(anyNA(Y)) Y <- as.im(Y, na.replace=FALSE) return(Y) } persp.im }) perspPoints <- function(x, y=NULL, ..., Z, M) { xy <- xy.coords(x, y) stopifnot(is.im(Z)) X <- as.ppp(xy, W=Frame(Z)) if(!(is.matrix(M) && all(dim(M) == 4))) stop("M should be a 4 x 4 matrix, returned from persp()") V <- attr(M, "visible") if(is.null(V)) { warning(paste("M does not contain visibility information;", "it should be recomputed by persp() with visible=TRUE")) } else { ## restrict to visible points VX <- V[X, drop=FALSE] VX[is.na(VX)] <- FALSE X <- X[VX] } #' determine heights ZX <- Z[X, drop=FALSE] # may contain NA #' transform and plot points(trans3d(X$x, X$y, ZX, M), ...) } perspSegments <- local({ perspSegments <- function(x0, y0=NULL, x1=NULL, y1=NULL, ..., Z, M) { stopifnot(is.im(Z)) if(!(is.matrix(M) && all(dim(M) == 4))) stop("M should be a 4 x 4 matrix, returned from persp()") V <- attr(M, "visible") if(is.null(V)) warning(paste("M does not contain visibility information;", "it should be recomputed by persp() with visible=TRUE")) if(is.psp(X <- x0) && is.null(y0) && is.null(x1) && is.null(y1)) { eX <- X$ends # nX <- nrow(eX) } else { # nX <- length(x0) check.nvector(x0, naok=TRUE) check.nvector(y0, naok=TRUE) check.nvector(x1, naok=TRUE) check.nvector(y1, naok=TRUE) eX <- cbind(x0, y0, x1, y1) } if(is.null(V)) { Y <- eX } else { ## chop segments to length of single pixel eps <- with(Z, min(xstep,ystep)) Y <- do.call(rbind, lapply(as.data.frame(t(eX)), chopsegment, eps=eps)) ## determine which segments are visible yleft <- list(x=Y[,1], y=Y[,2]) yright <- list(x=Y[,3], y=Y[,4]) ok <- V[yleft, drop=FALSE] & V[yright, drop=FALSE] ok[is.na(ok)] <- FALSE Y <- Y[ok, ,drop=FALSE] } if(nrow(Y) == 0) return(invisible(NULL)) ## map to projected plane x0y0 <- trans3d(Y[,1], Y[,2], Z[list(x=Y[,1],y=Y[,2]), drop=FALSE], M) x1y1 <- trans3d(Y[,3], Y[,4], Z[list(x=Y[,3],y=Y[,4]), drop=FALSE], M) segments(x0y0$x, x0y0$y, x1y1$x, x1y1$y, ...) } chopsegment <- function(x, eps) { len2 <- (x[3] - x[1])^2 + (x[4] - x[2])^2 if(len2 <= eps^2) return(x) n <- ceiling(sqrt(len2)/eps) b <- (1:n)/n a <- (0:(n-1))/n return(cbind(x[1] + a * (x[3]-x[1]), x[2] + a * (x[4]-x[2]), x[1] + b * (x[3]-x[1]), x[2] + b * (x[4]-x[2]))) } perspSegments }) perspLines <- function(x, y=NULL, ..., Z, M) { xy <- xy.coords(x, y) n <- length(xy$x) perspSegments(x[-n], y[-n], x[-1], y[-1], Z=Z, M=M, ...) } perspContour <- function(Z, M, ..., nlevels=10, levels=pretty(range(Z), nlevels)) { cl <- contourLines(x=Z$xcol, y=Z$yrow, z=t(Z$v), nlevels=nlevels, levels=levels) for(i in seq_along(cl)) { cli <- cl[[i]] perspLines(cli$x, cli$y, ..., Z=Z, M=M) } invisible(NULL) } spatstat/R/densitylppVoronoi.R0000644000176200001440000001553213544333563016227 0ustar liggesusers#' #' densitylppVoronoi.R #' #' densityVoronoi.lpp #' #' $Revision: 1.12 $ $Date: 2019/08/12 08:34:19 $ #' densityVoronoi.lpp <- function(X, f = 1, ..., nrep = 1, verbose = TRUE){ # Check input stopifnot(is.lpp(X)) check.1.real(f) if(badprobability(f)) stop("f should be a probability between 0 and 1") check.1.integer(nrep) stopifnot(nrep >= 1) #' secret argument what <- resolve.1.default(list(what="image"), list(...)) what <- match.arg(what, c("image", "function")) if(f == 0 || npoints(X) == 0) { #' uniform estimate lambdabar <- intensity(unmark(X)) fun <- function(x, y, seg, tp) { rep(lambdabar, length(seg)) } g <- linfun(fun, domain(X)) if(what == "image") g <- as.linim(g, ...) return(g) } if(f == 1) { #' Voronoi estimate if(!anyDuplicated(X)) { tes <- lineardirichlet(X) num <- 1 } else { um <- uniquemap(X) first <- (um == seq_along(um)) UX <- X[first] tes <- lineardirichlet(UX) num <- as.integer(table(factor(um, levels=um[first]))) } v <- tile.lengths(tes) g <- as.linfun(tes, values=num/v, navalue=0) if(what == "image") g <- as.linim(g, ...) return(g) } #' Smoothed Voronoi estimate. #' For each repetition calculate Dirichlet tessellation; #' save information in a list of dataframes; and save the #' corresponding intensity values (i.e. inverse tile lengths) #' in a list of vectors. dflist <- tilevalueslist <- vector("list", nrep) blankentry <- data.frame(seg = integer(0), t0 = numeric(0), t1 = numeric(0), tile = integer(0)) for (i in 1:nrep) { Xthin <- rthin(X, f) if(npoints(Xthin) == 0){ tilevalueslist[[i]] <- 0 dflist[[i]] <- blankentry } else { if(!anyDuplicated(Xthin)) { tes <- lineardirichlet(Xthin) num <- 1 } else { um <- uniquemap(Xthin) first <- (um == seq_along(um)) UXthin <- Xthin[first] tes <- lineardirichlet(UXthin) num <- as.integer(table(factor(um, levels=um[first]))) } v <- tile.lengths(tes) tilevalueslist[[i]] <- num/v dflist[[i]] <- tes$df } } #' Make the result into a function on the linear network fun <- function(x, y, seg, tp) { result <- numeric(length(seg)) for(j in 1:nrep){ dfj <- dflist[[j]] if(nrow(dfj) > 0) { #' classify query points by tessellation k <- lineartileindex(seg, tp, dfj) #' add Voronoi estimate lamj <- tilevalueslist[[j]] if(!anyNA(k)) { result <- result + lamj[k] } else { ok <- !is.na(k) result[ok] <- result[ok] + lamj[k[ok]] } } } return(result/(nrep*f)) } g <- linfun(fun, domain(X)) if(what == "image") g <- as.linim(g, ...) return(g) } bw.voronoi <- function(X, ..., probrange = c(0.2,0.8), nprob = 10, prob = NULL, nrep = 100, verbose = TRUE, warn=TRUE){ stopifnot(is.lpp(X)) trap.extra.arguments(..., .Context="in bw.voronoi") if(!is.null(prob)) { stopifnot(is.numeric(prob) && is.vector(prob)) nprob <- length(prob) } else { check.range(probrange) prob <- seq(from=probrange[1L], to=probrange[2L], length.out=nprob) } check.1.integer(nrep) nX <- npoints(X) cooX <- coords(X) segX <- cooX$seg tpX <- cooX$tp if(nX == 0) return(max(prob)) if(verbose) { cat("Performing", nrep, "replicates... ") pstate <- list() } lamhat <- array(, dim=c(nX, nprob, nrep)) for(irep in seq_len(nrep)) { if(verbose) pstate <- progressreport(irep, nrep, state=pstate) U <- runif(nX) for(j in seq_len(nprob)) { pj <- prob[j] retain <- (U <= pj) if(any(retain)) { Xp <- X[retain] #' compute leave-one-out estimates for points in Xp lamhat[retain, j, irep] <- looVoronoiLPP(Xp)/pj #' compute leave-one-out estimates for other points if(any(extra <- !retain)) { tess <- lineardirichlet(Xp) idx <- as.integer(lineartileindex(segX[extra], tpX[extra], tess)) lamhat[extra, j, irep] <- 1/(pj * tile.lengths(tess)[idx]) } } else lamhat[,j,irep] <- 0 } } lamhat <- apply(lamhat, c(1,2), mean) cv <- colSums(log(lamhat)) result <- bw.optim(cv, prob, iopt=which.max(cv), creator="bw.voronoi", criterion="Likelihood Cross-Validation", warnextreme=warn, hargnames=c("probrange", "prob"), unitname=NULL) return(result) } looVoronoiLPP <- function(X) { #' Compute leave-one-out Voronoi intensity estimate #' Hacked from 'lineardirichlet' nX <- npoints(X) if(nX == 0) return(numeric(0)) #' Unique points, remembering original sequence ii <- which(!duplicated(X)) uX <- X[ii] nuX <- npoints(uX) #' trivial case if(nuX <= 1) return(rep(1/volume(domain(X)), nX)) #' local coordinates coUX <- coords(uX)[, c("seg", "tp")] #' add label from original sequence index coUX$lab <- ii #' reorder oo <- with(coUX, order(seg, tp)) coUXord <- coUX[oo, , drop=FALSE] seg <- coUXord$seg tp <- coUXord$tp #' nearest neighbour of each data point, in sorted unique pattern nnid <- nnwhich(uX[oo]) #' for each data point Y[i] in the sorted pattern Y, #' find the label of the tile that will cover Y[i] when Y[i] is removed neighlab <- coUXord$lab[nnid] #' network data L <- domain(X) nv <- nvertices(L) ns <- nsegments(L) seglen <- lengths.psp(as.psp(L)) from <- L$from to <- L$to #' upper bound on interpoint distance huge <- sum(seglen) #' numerical tolerance for nnwhich tol <- max(sqrt(.Machine$double.eps), diameter(Frame(L))/2^20) #' For each vertex of network, find nearest and second-nearest data points a <- vnnFind(seg, tp, ns, nv, from, to, seglen, huge, tol, kmax=2) vnndist <- a$vnndist vnnwhich <- a$vnnwhich vnnlab <- coUXord$lab[vnnwhich] # index into original data pattern vnnlab <- matrix(vnnlab, ncol=2) #' compute result for each unique point lenf <- numeric(nuX) for(i in seq_len(nuX)) { #' compute Dirichlet tessellation WITHOUT point i coo.i <- coUXord[-i, , drop=FALSE] usenearest <- (vnnwhich[,1] != i) vnd <- ifelse(usenearest, vnndist[,1], vnndist[,2]) vnw <- ifelse(usenearest, vnnwhich[,1], vnnwhich[,2]) vnl <- ifelse(usenearest, vnnlab[,1], vnnlab[,2]) adjust <- (vnw > i) vnw[adjust] <- vnw[adjust] - 1L df <- ldtEngine(nv, ns, from, to, seglen, huge, coo.i, vnd, vnw, vnl) #' tile label of nearest neighbour neigh <- neighlab[i] #' find tile length associated with nearest neighbour of point i lenf[i] <- with(df, sum((tile == neigh) * seglen[seg] * (t1-t0))) } #' put back in correct place result <- numeric(npoints(X)) result[ii[oo]] <- 1/lenf return(result) } spatstat/R/softcore.R0000644000176200001440000000727413333543255014305 0ustar liggesusers# # # softcore.S # # $Revision: 2.16 $ $Date: 2018/03/15 07:37:41 $ # # Soft core processes. # # Softcore() create an instance of a soft core process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # Softcore <- local({ BlankSoftcore <- list( name = "Soft core process", creator = "Softcore", family = "pairwise.family", # evaluated later pot = function(d, par) { sig0 <- par$sigma0 if(is.na(sig0)) { p <- -d^(-2/par$kappa) } else { # expand around sigma0 and set large negative numbers to -Inf drat <- d/sig0 p <- -drat^(-2/par$kappa) p[p < -25] <- -Inf } return(p) }, par = list(kappa = NULL, sigma0=NA), # filled in later parnames = c("Exponent kappa", "Initial approximation to sigma"), hasInf = TRUE, selfstart = function(X, self) { # self starter for Softcore if(npoints(X) < 2) { # not enough points to make any decisions return(self) } md <- minnndist(X) if(md == 0) { warning(paste("Pattern contains duplicated points:", "impossible under Softcore model")) return(self) } kappa <- self$par$kappa if(!is.na(sigma0 <- self$par$sigma0)) { # value fixed by user or previous invocation # check it if((md/sigma0)^(-2/kappa) > 25) warning(paste("Initial approximation sigma0 is too large;", "some data points will have zero probability")) return(self) } # take sigma0 = minimum interpoint distance Softcore(kappa=kappa, sigma0=md) }, init = function(self) { kappa <- self$par$kappa if(!is.numeric(kappa) || length(kappa) != 1 || kappa <= 0 || kappa >= 1) stop(paste("Exponent kappa must be a", "positive number less than 1")) }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { theta <- as.numeric(coeffs[1]) sigma <- theta^(self$par$kappa/2) if(!is.na(sig0 <- self$par$sigma0)) sigma <- sigma * sig0 return(list(param=list(sigma=sigma), inames="interaction parameter sigma", printable=signif(sigma))) }, valid = function(coeffs, self) { theta <- coeffs[1] return(is.finite(theta) && (theta >= 0)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { # distance d beyond which log(interaction factor) <= epsilon if(anyNA(coeffs) || epsilon == 0) return(Inf) theta <- as.numeric(coeffs[1]) kappa <- self$par$kappa sig0 <- self$par$sigma0 if(is.na(sig0)) sig0 <- 1 return(sig0 * (theta/epsilon)^(kappa/2)) }, Mayer=function(coeffs, self) { # second Mayer cluster integral kappa <- self$par$kappa sigma <- (self$interpret)(coeffs, self)$param$sigma return(pi * (sigma^2) * gamma(1 - kappa)) }, version=NULL # filled in later ) class(BlankSoftcore) <- "interact" Softcore <- function(kappa, sigma0=NA) { instantiate.interact(BlankSoftcore, list(kappa=kappa, sigma0=sigma0)) } Softcore <- intermaker(Softcore, BlankSoftcore) Softcore }) spatstat/R/Gcom.R0000644000176200001440000001534013602573674013346 0ustar liggesusers# # Gcom.R # # Model compensator of G # # $Revision: 1.9 $ $Date: 2018/10/19 03:29:05 $ # ################################################################################ # Gcom <- function(object, r=NULL, breaks=NULL, ..., correction=c("border", "Hanisch"), conditional=!is.poisson(object), restrict=FALSE, model=NULL, trend=~1, interaction=Poisson(), rbord=reach(interaction), ppmcorrection="border", truecoef=NULL, hi.res=NULL) { if(is.ppm(object)) { fit <- object } else if(is.ppp(object) || is.quad(object)) { if(is.ppp(object)) object <- quadscheme(object, ...) if(!is.null(model)) { fit <- update(model, Q=object, forcefit=TRUE) } else { fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, forcefit=TRUE) } } else stop("object should be a fitted point process model or a point pattern") if(missing(conditional) || is.null(conditional)) conditional <- !is.poisson(fit) restrict <- isTRUE(restrict) if(restrict && !conditional) { warning("restrict=TRUE ignored because conditional=FALSE", call.=FALSE) restrict <- FALSE } # rfixed <- !is.null(r) || !is.null(breaks) # selection of edge corrections # correction.given <- !missing(correction) && !is.null(correction) correction <- pickoption("correction", correction, c(none="none", border="border", Hanisch="Hanisch", hanisch="Hanisch", best="Hanisch"), multi=TRUE) # Extract data and quadrature points Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) Win <- X$window # edge correction algorithm algo <- if(!conditional) "classical" else if(restrict) "restricted" else "reweighted" # conditioning on border region? if(!conditional) { Wfree <- Win } else { rbord <- fit$rbord Wfree <- erosion(Win, rbord) if(restrict) { retain <- inside.owin(union.quad(Q), , Wfree) Q <- Q[Wfree] X <- X[Wfree] Win <- Wfree } } # Extract quadrature info U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy # E <- equalsfun.quad(Q) WQ <- w.quad(Q) # quadrature weights # basic statistics npts <- npoints(X) areaW <- area(Win) lambda <- npts/areaW # quadrature points used USED <- if(algo == "reweighted") (bdist.points(U) > rbord) else rep.int(TRUE, U$n) # adjustments to account for restricted domain if(conditional && spatstat.options("eroded.intensity")) { npts.used <- sum(Z & USED) area.used <- sum(WQ[USED]) lambda.used <- npts.used/area.used } else { npts.used <- npts area.used <- areaW lambda.used <- lambda } # determine breakpoints for r values rmaxdefault <- rmax.rule("G", if(restrict) Wfree else Win, lambda) breaks <- handle.r.b.args(r, breaks, Wfree, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max # residuals resid <- residuals(fit, type="raw",drop=FALSE, new.coef=truecoef, quad=hi.res) rescts <- with(resid, "continuous") if(restrict) { # keep only data inside Wfree rescts <- rescts[retain] } # absolute weight for continuous integrals # wc <- -rescts # nearest neighbours (quadrature point to data point) nn <- nncross(U, X, seq(U$n), seq(X$n)) dIJ <- nn$dist I <- seq(U$n) # J <- nn$which DD <- Z <- (I <= X$n) # TRUE for data points wcIJ <- -rescts # determine whether a quadrature point will be used in integral okI <- USED[I] # initialise fv object r <- breaks$r df <- data.frame(r=r, pois=1 - exp(-pi * lambda.used * r^2)) G <- fv(df, "r", substitute(G(r), NULL), "pois", . ~ r, alim=c(0, rmax), labl=c("r","%s[pois](r)"), desc=c("distance argument r", "theoretical Poisson %s"), fname="G") # distance to boundary b <- bI <- bdist.points(U) dotnames <- character(0) # Border method if("border" %in% correction) { # reduced sample for G(r) of data only ZUSED <- Z & USED RSX <- Kount(dIJ[DD & okI], bI[DD & okI], b[ZUSED], breaks) Gb <- RSX$numerator/RSX$denom.count G <- bind.fv(G, data.frame(border=Gb), "hat(%s)[bord](r)", "border-corrected nonparametric estimate of %s", "border") # reduced sample for adjustment integral RSD <- Kwtsum(dIJ[okI], bI[okI], wcIJ[okI], b[ZUSED], rep.int(1, sum(ZUSED)), breaks, fatal=FALSE) Gbcom <- RSD$numerator/(1 + RSD$denominator) G <- bind.fv(G, data.frame(bcom=Gbcom), "bold(C)~hat(%s)[bord](r)", "model compensator of border-corrected %s", "bcom") dotnames <- c("border", "bcom", "pois") } # Hanisch correction for data if("Hanisch" %in% correction) { nnd <- dIJ[DD & okI] bdry <- bI[DD & okI] # weights ea <- eroded.areas(Win, rvals) if(algo == "reweighted") { # replace weight(r) by weight(max(rbord,r)) ea[rvals < rbord] <- eroded.areas(Win, rbord) } # compute x <- nnd[nnd <= bdry] h <- whist(x[x <= rmax], breaks=breaks$val) H <- (1/lambda.used) * cumsum(h/ea) # glue on G <- bind.fv(G, data.frame(han=H), "hat(%s)[han](r)", "Hanisch correction estimate of %s", "han") # Hanisch correction for adjustment integral nnd <- dIJ[okI] bdry <- bI[okI] wt <- wcIJ[okI] x <- nnd[nnd <= bdry] wt <- wt[nnd <= bdry] h <- whist(x[x <= rmax], breaks=breaks$val, weights=wt[x <= rmax]) lambdaplus <- (npts.used + 1)/area.used Hint <- (1/lambdaplus) * cumsum(h/ea) # glue on G <- bind.fv(G, data.frame(hcom=Hint), "bold(C)~hat(%s)[han](r)", "model compensator of Hanisch-corrected %s", "hcom") # pseudovariance for Hanisch residual Hvar <- (1/lambdaplus^2) * cumsum(h/ea^2) G <- bind.fv(G, data.frame(hvar=Hvar), "bold(C)^2~hat(%s)[han](r)", "Poincare variance for Hanisch corrected %s", "hcom") # default plot does not show all components dotnames <- c("han", "hcom", dotnames) } # compute sensible 'alim' endpoint <- function(y, r, f) { min(r[y >= f * max(y)]) } amax <- endpoint(G$pois, G$r, 0.99) if(length(dotnames) > 0) amax <- max(amax, unlist(lapply(as.data.frame(G)[,dotnames,drop=FALSE], endpoint, r=r, f=0.9))) attr(G, "alim") <- c(0, amax) # fvnames(G, ".") <- dotnames unitname(G) <- unitname(X) # secret tag used by 'Gres' attr(G, "maker") <- "Gcom" return(G) } spatstat/R/news.R0000644000176200001440000000105013422272135013412 0ustar liggesusers# # news.R # # News and warnings # latest.news <- function(package="spatstat", doBrowse=FALSE, major=TRUE) { ## get version number v <- read.dcf(file=system.file("DESCRIPTION", package=package), fields="Version") if(major) { ## the current major version vp <- package_version(v) vv <- unlist(vp) v <- paste0(vv[1], ".", vv[2]) } ne <- eval(substitute(news(Version >= v0, package=package), list(v0=v))) page(ne, method="print", doBrowse=doBrowse) return(invisible(ne)) } class(latest.news) <- "autoexec" spatstat/R/lineardisc.R0000644000176200001440000002114513577325560014576 0ustar liggesusers# # # disc.R # # $Revision: 1.32 $ $Date: 2019/12/21 04:14:48 $ # # Compute the disc of radius r in a linear network # # lineardisc <- function(L, x=locator(1), r, plotit=TRUE, cols=c("blue", "red", "green"), add=TRUE) { # L is the linear network (object of class "linnet") # x is the centre point of the disc # r is the radius of the disc # stopifnot(inherits(L, "linnet")) check.1.real(r) if(L$sparse) { message("Converting linear network to non-sparse representation..") L <- as.linnet(L, sparse=FALSE) message("Done.") } lines <- L$lines vertices <- L$vertices lengths <- lengths.psp(lines) win <- L$window marx <- marks(lines) ## if(missing(x) || is.null(x)) x <- clickppp(1, win, add=TRUE) if(is.lpp(x) && identical(L, domain(x))) { ## extract local coordinates stopifnot(npoints(x) == 1) coo <- coords(x) startsegment <- coo$seg startfraction <- coo$tp } else { ## interpret x as 2D location x <- as.ppp(x, win) stopifnot(npoints(x) == 1) ## project x to nearest segment pro <- project2segment(x, lines) ## which segment? startsegment <- pro$mapXY ## parametric position of x along this segment startfraction <- pro$tp } ## vertices at each end of this segment A <- L$from[startsegment] B <- L$to[startsegment] # distances from x to A and B dxA <- startfraction * lengths[startsegment] dxB <- (1-startfraction) * lengths[startsegment] # is r large enough to reach both A and B? startfilled <- (max(dxA, dxB) <= r) # compute vector of shortest path distances from x to each vertex j, # going through A: dxAv <- dxA + L$dpath[A,] # going through B: dxBv <- dxB + L$dpath[B,] # going either through A or through B: dxv <- pmin.int(dxAv, dxBv) # Thus dxv[j] is the shortest path distance from x to vertex j. # # Determine which vertices are inside the disc of radius r covered <- (dxv <= r) # Thus covered[j] is TRUE if the j-th vertex is inside the disc. # # Determine which line segments are completely inside the disc # from <- L$from to <- L$to # ( a line segment is inside the disc if the shortest distance # from x to one of its endpoints, plus the length of the segment, # is less than r .... allinside <- (dxv[from] + lengths <= r) | (dxv[to] + lengths <= r) # ... or alternatively, if the sum of the # two residual distances exceeds the length of the segment ) residfrom <- pmax.int(0, r - dxv[from]) residto <- pmax.int(0, r - dxv[to]) allinside <- allinside | (residfrom + residto >= lengths) # start segment is special allinside[startsegment] <- startfilled # Thus allinside[k] is TRUE if the k-th segment is inside the disc # Collect all these segments disclines <- lines[allinside] # # Determine which line segments cross the boundary of the disc boundary <- (covered[from] | covered[to]) & !allinside # For each of these, calculate the remaining distance at each end resid.from <- ifelseXB(boundary, pmax.int(r - dxv[from], 0), 0) resid.to <- ifelseXB(boundary, pmax.int(r - dxv[to], 0), 0) # Where the remaining distance is nonzero, create segment and endpoint okfrom <- (resid.from > 0) okfrom[startsegment] <- FALSE if(any(okfrom)) { v0 <- vertices[from[okfrom]] v1 <- vertices[to[okfrom]] tp <- (resid.from/lengths)[okfrom] vfrom <- ppp((1-tp)*v0$x + tp*v1$x, (1-tp)*v0$y + tp*v1$y, window=win) extralinesfrom <- as.psp(from=v0, to=vfrom) if(!is.null(marx)) marks(extralinesfrom) <- marx %msub% okfrom } else vfrom <- extralinesfrom <- NULL # okto <- (resid.to > 0) okto[startsegment] <- FALSE if(any(okto)) { v0 <- vertices[to[okto]] v1 <- vertices[from[okto]] tp <- (resid.to/lengths)[okto] vto <- ppp((1-tp)*v0$x + tp*v1$x, (1-tp)*v0$y + tp*v1$y, window=win) extralinesto <- as.psp(from=v0, to=vto) if(!is.null(marx)) marks(extralinesto) <- marx %msub% okto } else vto <- extralinesto <- NULL # # deal with special case where start segment is not fully covered if(!startfilled) { vA <- vertices[A] vB <- vertices[B] rfrac <- r/lengths[startsegment] tleft <- pmax.int(startfraction-rfrac, 0) tright <- pmin.int(startfraction+rfrac, 1) vleft <- ppp((1-tleft) * vA$x + tleft * vB$x, (1-tleft) * vA$y + tleft * vB$y, window=win) vright <- ppp((1-tright) * vA$x + tright * vB$x, (1-tright) * vA$y + tright * vB$y, window=win) startline <- as.psp(from=vleft, to=vright) if(!is.null(marx)) marks(startline) <- marx %msub% startsegment startends <- superimpose(if(!covered[A]) vleft else NULL, if(!covered[B]) vright else NULL) } else startline <- startends <- NULL # # combine all lines disclines <- superimpose(disclines, extralinesfrom, extralinesto, startline, W=win, check=FALSE) # combine all disc endpoints discends <- superimpose(vfrom, vto, vertices[dxv == r], startends, W=win, check=FALSE) # if(plotit) { if(!add || dev.cur() == 1) plot(L, main="") plot(as.ppp(x), add=TRUE, cols=cols[1L], pch=16) plot(disclines, add=TRUE, col=cols[2L], lwd=2) plot(discends, add=TRUE, col=cols[3L], pch=16) } return(list(lines=disclines, endpoints=discends)) } countends <- function(L, x=locator(1), r, toler=NULL, internal=list()) { # L is the linear network (object of class "linnet") # x is the centre point of the disc # r is the radius of the disc # stopifnot(inherits(L, "linnet")) sparse <- L$sparse %orifnull% is.null(L$dpath) if(sparse) stop(paste("countends() does not support linear networks", "that are stored in sparse matrix format.", "Please convert the data using as.linnet(sparse=FALSE)"), call.=FALSE) # get x if(missing(x)) x <- clickppp(1, Window(L), add=TRUE) if(!inherits(x, "lpp")) x <- as.lpp(x, L=L) np <- npoints(x) if(length(r) != np) stop("Length of vector r does not match number of points in x") ## determine whether network is connected iscon <- internal$is.connected %orifnull% is.connected(L) if(!iscon) { #' disconnected network - split into components result <- numeric(np) lab <- internal$connected.labels %orifnull% connected(L, what="labels") subsets <- split(seq_len(nvertices(L)), factor(lab)) for(subi in subsets) { xi <- thinNetwork(x, retainvertices=subi) witch <- which(attr(xi, "retainpoints")) ok <- is.finite(r[witch]) witchok <- witch[ok] result[witchok] <- countends(domain(xi), xi[ok], r[witchok], toler=toler, internal=list(is.connected=TRUE)) } return(result) } lines <- L$lines vertices <- L$vertices lengths <- lengths.psp(lines) dpath <- L$dpath nv <- vertices$n ns <- lines$n # if(!spatstat.options("Ccountends")) { #' interpreted code result <- integer(np) for(i in seq_len(np)) result[i] <- npoints(lineardisc(L, x[i], r[i], plotit=FALSE)$endpoints) return(result) } # extract coordinates coo <- coords(x) #' which segment startsegment <- coo$seg # parametric position of x along this segment startfraction <- coo$tp # convert indices to C seg0 <- startsegment - 1L from0 <- L$from - 1L to0 <- L$to - 1L # determine numerical tolerance if(is.null(toler)) { toler <- default.linnet.tolerance(L) } else { check.1.real(toler) stopifnot(toler > 0) } zz <- .C("Ccountends", np = as.integer(np), f = as.double(startfraction), seg = as.integer(seg0), r = as.double(r), nv = as.integer(nv), xv = as.double(vertices$x), yv = as.double(vertices$y), ns = as.integer(ns), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), lengths = as.double(lengths), toler=as.double(toler), nendpoints = as.integer(integer(np)), PACKAGE = "spatstat") zz$nendpoints } default.linnet.tolerance <- function(L) { # L could be a linnet or psp if(!is.null(toler <- L$toler)) return(toler) len2 <- lengths.psp(as.psp(L), squared=TRUE) len2pos <- len2[len2 > 0] toler <- if(length(len2pos) == 0) 0 else (0.001 * sqrt(min(len2pos))) toler <- makeLinnetTolerance(toler) return(toler) } makeLinnetTolerance <- function(toler) { max(sqrt(.Machine$double.xmin), toler[is.finite(toler)], na.rm=TRUE) } spatstat/R/quadratcount.R0000644000176200001440000001577613333543255015201 0ustar liggesusers# # quadratcount.R # # $Revision: 1.57 $ $Date: 2016/08/15 03:05:15 $ # quadratcount <- function(X, ...) { UseMethod("quadratcount") } quadratcount.splitppp <- function(X, ...) { solapply(X, quadratcount, ...) } quadratcount.ppp <- function(X, nx=5, ny=nx, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL) { verifyclass(X, "ppp") W <- X$window if(is.null(tess)) { # rectangular boundaries if(!is.numeric(nx)) stop("nx should be numeric") # start with rectangular tessellation tess <- quadrats(as.rectangle(W), nx=nx, ny=ny, xbreaks=xbreaks, ybreaks=ybreaks) # fast code for counting points in rectangular grid Xcount <- rectquadrat.countEngine(X$x, X$y, tess$xgrid, tess$ygrid) # if(W$type != "rectangle") { # intersections of rectangles with window including empty intersections tess <- quadrats(X, nx=nx, ny=ny, xbreaks=xbreaks, ybreaks=ybreaks, keepempty=TRUE) # now delete the empty quadrats and the corresponding counts nonempty <- !tiles.empty(tess) # WAS: nonempty <- !unlist(lapply(tiles(tess), is.empty)) if(!any(nonempty)) stop("All tiles are empty") if(!all(nonempty)) { # ntiles <- sum(nonempty) tess <- tess[nonempty] Xcount <- t(Xcount)[nonempty] # matrices and tables are in row-major order, # tiles in a rectangular tessellation are in column-major order Xcount <- array(Xcount, dimnames=list(tile=tilenames(tess))) class(Xcount) <- "table" } } } else { # user-supplied tessellation if(!inherits(tess, "tess")) { tess <- try(as.tess(tess), silent=TRUE) if(inherits(tess, "try-error")) stop("The argument tess should be a tessellation", call.=FALSE) } if(tess$type == "rect") { # fast code for counting points in rectangular grid Xcount <- rectquadrat.countEngine(X$x, X$y, tess$xgrid, tess$ygrid) } else { # quadrats are another type of tessellation Y <- cut(X, tess) if(anyNA(marks(Y))) warning("Tessellation does not contain all the points of X") Xcount <- table(tile=marks(Y)) } } attr(Xcount, "tess") <- tess class(Xcount) <- c("quadratcount", class(Xcount)) return(Xcount) } plot.quadratcount <- function(x, ..., add=FALSE, entries=as.vector(t(as.table(x))), dx=0, dy=0, show.tiles=TRUE, textargs = list()) { xname <- short.deparse(substitute(x)) tess <- attr(x, "tess") # add=FALSE, show.tiles=TRUE => plot tiles + numbers # add=FALSE, show.tiles=FALSE => plot window (add=FALSE) + numbers # add=TRUE, show.tiles=TRUE => plot tiles (add=TRUE) + numbers # add=TRUE, show.tiles=FALSE => plot numbers if(show.tiles || !add) { context <- if(show.tiles) tess else as.owin(tess) do.call(plot, resolve.defaults(list(context, add=add), list(...), list(main=xname), .StripNull=TRUE)) } if(!is.null(entries)) { labels <- paste(as.vector(entries)) til <- tiles(tess) incircles <- lapply(til, incircle) x0 <- sapply(incircles, getElement, name="x") y0 <- sapply(incircles, getElement, name="y") ra <- sapply(incircles, getElement, name="r") do.call.matched(text.default, resolve.defaults(list(x=x0 + dx * ra, y = y0 + dy * ra), list(labels=labels), textargs, list(...)), funargs=graphicsPars("text")) } return(invisible(NULL)) } rectquadrat.breaks <- function(xr, yr, nx=5, ny=nx, xbreaks=NULL, ybreaks=NULL) { if(is.null(xbreaks)) xbreaks <- seq(from=xr[1], to=xr[2], length.out=nx+1) else if(min(xbreaks) > xr[1] || max(xbreaks) < xr[2]) stop("xbreaks do not span the range of x coordinates in the window") if(is.null(ybreaks)) ybreaks <- seq(from=yr[1], to=yr[2], length.out=ny+1) else if(min(ybreaks) > yr[1] || max(ybreaks) < yr[2]) stop("ybreaks do not span the range of y coordinates in the window") return(list(xbreaks=xbreaks, ybreaks=ybreaks)) } rectquadrat.countEngine <- function(x, y, xbreaks, ybreaks, weights) { if(length(x) > 0) { # check validity of breaks if(!all(inside.range(range(x), range(xbreaks)))) stop("xbreaks do not span the actual range of x coordinates in data") if(!all(inside.range(range(y), range(ybreaks)))) stop("ybreaks do not span the actual range of y coordinates in data") } # WAS: # xg <- cut(x, breaks=xbreaks, include.lowest=TRUE) # yg <- cut(y, breaks=ybreaks, include.lowest=TRUE) xg <- fastFindInterval(x, xbreaks, labels=TRUE) yg <- fastFindInterval(y, ybreaks, labels=TRUE) if(missing(weights)) { sumz <- table(list(y=yg, x=xg)) } else { # was: # sumz <- tapply(weights, list(y=yg, x=xg), sum) # if(any(nbg <- is.na(sumz))) # sumz[nbg] <- 0 sumz <- tapplysum(weights, list(y=yg, x=xg), do.names=TRUE) } # reverse order of y sumz <- sumz[rev(seq_len(nrow(sumz))), ] sumz <- as.table(sumz) # attr(sumz, "xbreaks") <- xbreaks attr(sumz, "ybreaks") <- ybreaks return(sumz) } quadrats <- function(X, nx=5, ny=nx, xbreaks = NULL, ybreaks = NULL, keepempty=FALSE) { W <- as.owin(X) xr <- W$xrange yr <- W$yrange b <- rectquadrat.breaks(xr, yr, nx, ny, xbreaks, ybreaks) # rectangular tiles Z <- tess(xgrid=b$xbreaks, ygrid=b$ybreaks, unitname=unitname(W)) if(W$type != "rectangle") { # intersect rectangular tiles with window W if(!keepempty) { Z <- intersect.tess(Z, W) } else { til <- tiles(Z) for(i in seq_along(til)) til[[i]] <- intersect.owin(til[[i]], W) Z <- tess(tiles=til, window=W, keepempty=TRUE) } } return(Z) } as.tess.quadratcount <- function(X) { return(attr(X, "tess")) } as.owin.quadratcount <- function(W, ..., fatal=TRUE) { return(as.owin(as.tess(W), ..., fatal=fatal)) } domain.quadratcount <- Window.quadratcount <- function(X, ...) { as.owin(X) } intensity.quadratcount <- function(X, ..., image=FALSE) { Y <- as.tess(X) a <- tile.areas(Y) ## in the rectangular case, tiles are indexed in column-major order if(Y$type == "rect" && length(dim(X)) > 1) a <- matrix(a, byrow=TRUE, nrow(X), ncol(X)) lambda <- X/a if(!image) { trap.extra.arguments(...) class(lambda) <- "table" attr(lambda, "tess") <- NULL return(lambda) } ## again to handle rectangular case lambda <- as.vector(t(lambda)) tileid <- as.im(Y, ...) result <- eval.im(lambda[tileid]) return(result) } ## The shift method is undocumented. ## It is only needed in plot.listof / plot.solist / plot.layered shift.quadratcount <- function(X, ...) { attr(X, "tess") <- te <- shift(attr(X, "tess"), ...) attr(X, "lastshift") <- getlastshift(te) return(X) } spatstat/R/unique.ppp.R0000644000176200001440000001455413471210456014561 0ustar liggesusers# # unique.ppp.R # # $Revision: 1.37 $ $Date: 2019/05/22 09:04:57 $ # # Methods for 'multiplicity' co-authored by Sebastian Meyer # Copyright 2013 Adrian Baddeley and Sebastian Meyer unique.ppp <- function(x, ..., warn=FALSE) { verifyclass(x, "ppp") dupe <- duplicated.ppp(x, ...) if(!any(dupe)) return(x) if(warn) warning(paste(sum(dupe), "duplicated points were removed"), call.=FALSE) return(x[!dupe]) } duplicated.ppp <- function(x, ..., rule=c("spatstat", "deldir", "unmark")) { verifyclass(x, "ppp") rule <- match.arg(rule) if(rule == "deldir") return(deldir::duplicatedxy(x)) n <- npoints(x) xloc <- unmark(x) if(!anyDuplicated(xloc)) return(logical(n)) # i.e. vector of FALSE if(rule == "unmark") x <- xloc switch(markformat(x), none = { #' unmarked points u <- uniquemap(x) result <- (u != seq_along(u)) }, vector = { #' marked points - convert mark to integer m <- marks(x) if(is.factor(m)) { marks(x) <- as.integer(m) } else { um <- unique(m) marks(x) <- match(m, um) } result <- duplicated(as.data.frame(x)) }, dataframe = { result <- duplicated(as.data.frame(x)) }, # the following are currently not supported hyperframe = { result <- duplicated(as.data.frame(x)) }, list = { result <- duplicated(as.data.frame(as.hyperframe(x))) }, stop(paste("Unknown mark type", sQuote(markformat(x)))) ) return(result) } anyDuplicated.ppp <- function(x, ...) { #' first check duplication of coordinates using fast code n <- npoints(x) if(n <= 1) return(FALSE) xx <- x$x yy <- x$y o <- order(xx, seq_len(n)) anydupXY <- .C("anydupxy", n=as.integer(n), x=as.double(xx[o]), y=as.double(yy[o]), anydup=as.integer(integer(1)), PACKAGE="spatstat")$anydup anydupXY && (!is.marked(x) || anyDuplicated(as.data.frame(x), ...)) } ## utility to check whether two rows are identical IdenticalRowPair <- function(i,j, a, b=a) { #' i and j are row indices (single integers) ai <- a[i,] bj <- b[j,] row.names(ai) <- row.names(bj) <- NULL identical(ai, bj) } ## vectorised IdenticalRows <- function(i, j, a, b=a) { #' i and j are row index vectors of equal length #' result[k] = identical( a[i[k],] , b[j[k],] ) Mo <- if(missing(b)) list(a=a) else list(a=a, b=b) mapply(IdenticalRowPair, i=i, j=j, MoreArgs=Mo, SIMPLIFY=TRUE, USE.NAMES=FALSE) } ## .......... multiplicity ............. multiplicity <- function(x) { UseMethod("multiplicity") } multiplicity.ppp <- function(x) { verifyclass(x, "ppp") np <- npoints(x) if(np == 0) return(integer(0)) cl <- closepairs(x, 0, what="indices") I <- cl$i J <- cl$j if(length(I) == 0) return(rep.int(1L, np)) switch(markformat(x), none = { }, vector = { marx <- as.data.frame(marks(x)) agree <- IdenticalRows(I, J, marx) I <- I[agree] J <- J[agree] }, dataframe = { marx <- marks(x) agree <- IdenticalRows(I, J, marx) I <- I[agree] J <- J[agree] }, hyperframe = { marx <- as.data.frame(marks(x)) # possibly discards columns agree <- IdenticalRows(I, J, marx) I <- I[agree] J <- J[agree] }, list = stop("Not implemented for lists of marks") ) if(length(I) == 0) return(rep.int(1L, np)) JbyI <- split(J, factor(I, levels=1:np)) result <- 1 + lengths(JbyI) return(result) } multiplicity.data.frame <- function (x) { if(all(unlist(lapply(x, is.numeric)))) return(multiplicityNumeric(as.matrix(x))) ## result template (vector of 1's) result <- setNames(rep.int(1L, nrow(x)), rownames(x)) ## check for duplicates (works for data frames, arrays and vectors) ## CAVE: comparisons are based on a character representation of x if (!any(dup <- duplicated(x))) return(result) ux <- x[!dup, , drop=FALSE] dx <- x[dup, , drop=FALSE] nu <- nrow(ux) nd <- nrow(dx) hit <- outer(seq_len(nu), seq_len(nd), IdenticalRows, a=ux, b=dx) counts <- as.integer(1L + .rowSums(hit, nu, nd)) result[!dup] <- counts dumap <- apply(hit, 2, match, x=TRUE) # equivalent to min(which(z)) result[dup] <- counts[dumap] return(result) } ### multiplicity method for NUMERIC arrays, data frames, and vectors ### This implementation is simply based on checking for dist(x)==0 multiplicityNumeric <- function(x) { if (anyDuplicated(x)) { distmat <- as.matrix(dist(x, method="manhattan")) # faster than euclid. result <- as.integer(rowSums(distmat == 0)) # labels are kept if(is.null(names(result))) names(result) <- seq_along(result) } else { # -> vector of 1's nx <- NROW(x) labels <- if (length(dim(x))) rownames(x) else names(x) if (is.null(labels)) labels <- seq_len(nx) result <- setNames(rep.int(1L, nx), labels) } return(result) } ### multiplicity method for arrays, data frames, and vectors (including lists) ### It also works for non-numeric data, since it is based on duplicated(). multiplicity.default <- function (x) { if(is.numeric(x)) return(multiplicityNumeric(x)) nx <- NROW(x) # also works for a vector x ## result template (vector of 1's) labels <- if (length(dim(x))) rownames(x) else names(x) if (is.null(labels)) labels <- seq_len(nx) result <- setNames(rep.int(1L, nx), labels) ## check for duplicates (works for data frames, arrays and vectors) ## CAVE: comparisons are based on a character representation of x if (!any(dup <- duplicated(x))) return(result) ## convert x to a matrix for IdenticalRows() x <- as.matrix(x) dimnames(x) <- NULL # discard any names! ux <- x[!dup, , drop=FALSE] dx <- x[dup, , drop=FALSE] nu <- nrow(ux) nd <- nrow(dx) hit <- outer(seq_len(nu), seq_len(nd), IdenticalRows, a=ux, b=dx) counts <- as.integer(1L + .rowSums(hit, nu, nd)) dumap <- apply(hit, 2, match, x=TRUE) # was: function(z) min(which(z))) result[!dup] <- counts result[dup] <- counts[dumap] return(result) } spatstat/R/nnmap.R0000644000176200001440000001454413333543255013570 0ustar liggesusers# # nnmap.R # # nearest or k-th nearest neighbour of each pixel # # $Revision: 1.10 $ $Date: 2018/02/11 06:33:09 $ # nnmap <- function(X, k=1, what = c("dist", "which"), ..., W=as.owin(X), is.sorted.X=FALSE, sortby=c("range", "var", "x", "y")) { stopifnot(is.ppp(X)) sortby <- match.arg(sortby) outputarray <- resolve.1.default("outputarray", ..., outputarray=FALSE) W <- as.owin(W %orifnull% X) huge <- 1.1 * diameter(boundingbox(as.rectangle(X), as.rectangle(W))) what <- match.arg(what, choices=c("dist", "which"), several.ok=TRUE) want.dist <- "dist" %in% what want.which <- "which" %in% what want.both <- want.dist && want.which if(!missing(k)) { # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } } k <- as.integer(k) kmax <- max(k) nk <- length(k) # note whether W is `really' a rectangle isrect <- is.rectangle(rescue.rectangle(W)) # set up pixel array M <- do.call.matched(as.mask, resolve.defaults(list(...), list(w=W))) Mdim <- M$dim nxcol <- Mdim[2] nyrow <- Mdim[1] npixel <- nxcol * nyrow nX <- npoints(X) if(nX == 0) { # trivial - avoid potential problems in C code NND <- if(want.dist) array(Inf, dim=c(nk, Mdim)) else 0 NNW <- if(want.which) array(NA_integer_, dim=c(nk, Mdim)) else 0 } else { # usual case if(is.sorted.X && !(sortby %in% c("x", "y"))) stop(paste("If data are already sorted,", "the sorting coordinate must be specified explicitly", "using sortby = \"x\" or \"y\"")) # decide whether to sort on x or y coordinate switch(sortby, range = { s <- sidelengths(as.rectangle(X)) sortby.y <- (s[1] < s[2]) }, var = { sortby.y <- (var(X$x) < var(X$y)) }, x={ sortby.y <- FALSE}, y={ sortby.y <- TRUE} ) # The C code expects points to be sorted by x coordinate. if(sortby.y) { oldM <- M X <- flipxy(X) W <- flipxy(W) M <- flipxy(M) Mdim <- M$dim nxcol <- Mdim[2] nyrow <- Mdim[1] } xx <- X$x yy <- X$y # sort only if needed if(!is.sorted.X){ oX <- fave.order(xx) xx <- xx[oX] yy <- yy[oX] } # number of neighbours that are well-defined kmaxcalc <- min(nX, kmax) # prepare to call C code nndv <- if(want.dist) numeric(npixel * kmaxcalc) else numeric(1) nnwh <- if(want.which) integer(npixel * kmaxcalc) else integer(1) # ............. call C code ............................ if(kmaxcalc == 1) { zz <- .C("nnGinterface", nx = as.integer(nxcol), x0 = as.double(M$xcol[1]), xstep = as.double(M$xstep), ny = as.integer(nyrow), y0 = as.double(M$yrow[1]), ystep = as.double(M$ystep), np = as.integer(nX), xp = as.double(xx), yp = as.double(yy), wantdist = as.integer(want.dist), wantwhich = as.integer(want.which), nnd = as.double(nndv), nnwhich = as.integer(nnwh), huge = as.double(huge), PACKAGE = "spatstat") } else { zz <- .C("knnGinterface", nx = as.integer(nxcol), x0 = as.double(M$xcol[1]), xstep = as.double(M$xstep), ny = as.integer(nyrow), y0 = as.double(M$yrow[1]), ystep = as.double(M$ystep), np = as.integer(nX), xp = as.double(xx), yp = as.double(yy), kmax = as.integer(kmaxcalc), wantdist = as.integer(want.dist), wantwhich = as.integer(want.which), nnd = as.double(nndv), nnwhich = as.integer(nnwh), huge = as.double(huge), PACKAGE = "spatstat") } # extract results nnW <- zz$nnwhich nnD <- zz$nnd # map index 0 to NA if(want.which && any(uhoh <- (nnW == 0))) { nnW[uhoh] <- NA if(want.dist) nnD[uhoh] <- Inf } # reinterpret indices in original ordering if(!is.sorted.X) nnW <- oX[nnW] # reform as arrays NND <- if(want.dist) array(nnD, dim=c(kmaxcalc, Mdim)) else 0 NNW <- if(want.which) array(nnW, dim=c(kmaxcalc, Mdim)) else 0 if(sortby.y) { # flip x and y back again if(want.dist) NND <- aperm(NND, c(1, 3, 2)) if(want.which) NNW <- aperm(NNW, c(1, 3, 2)) M <- oldM Mdim <- dim(M) } # the return value should correspond to the original vector k if(kmax > kmaxcalc) { # pad with NA / Inf if(want.dist) { NNDcalc <- NND NND <- array(Inf, dim=c(kmax, Mdim)) NND[1:kmaxcalc, , ] <- NNDcalc } if(want.which) { NNWcalc <- NNW NNW <- array(NA_integer_, dim=c(kmax, Mdim)) NNW[1:kmaxcalc, , ] <- NNWcalc } } if(length(k) < kmax) { # select only the specified planes if(want.dist) NND <- NND[k, , , drop=FALSE] if(want.which) NNW <- NNW[k, , , drop=FALSE] } } # secret backdoor if(outputarray) { # return result as an array or pair of arrays result <- if(want.both) { list(dist=NND, which=NNW) } else if(want.dist) NND else NNW attr(result, "pixarea") <- with(M, xstep * ystep) return(result) } # format result as a list of images result <- list() if(want.dist) { dlist <- list() for(i in 1:nk) { DI <- as.im(NND[i,,], M) if(!isrect) DI <- DI[M, drop=FALSE] dlist[[i]] <- DI } names(dlist) <- k result[["dist"]] <- if(nk > 1) dlist else dlist[[1]] } if(want.which) { wlist <- list() for(i in 1:nk) { WI <- as.im(NNW[i,,], M) if(!isrect) WI <- WI[M, drop=FALSE] wlist[[i]] <- WI } names(wlist) <- k result[["which"]] <- if(nk > 1) wlist else wlist[[1]] } if(!want.both) result <- result[[1]] return(result) } spatstat/R/segtest.R0000644000176200001440000000337213333543255014132 0ustar liggesusers#' #' segtest.R #' #' Monte Carlo test of segregation for multitype patterns #' #' $Revision: 1.3 $ $Date: 2015/07/11 08:19:26 $ #' segregation.test <- function(X, ...) { UseMethod("segregation.test") } segregation.test.ppp <- function(X, ..., nsim=19, permute=TRUE, verbose=TRUE, Xname) { if(missing(Xname)) Xname <- short.deparse(substitute(X)) stopifnot(is.ppp(X)) stopifnot(is.multitype(X)) verboten <- c("at", "relative", "se", "leaveoneout", "casecontrol", "case", "control") if(any(nyet <- (verboten %in% names(list(...))))) stop(paste(ngettext(sum(nyet), "Argument", "Arguments"), commasep(sQuote(verboten[nyet])), "cannot be used")) lam <- intensity(X) pbar <- lam/sum(lam) np <- npoints(X) nt <- length(pbar) pbar <- matrix(pbar, byrow=TRUE, nrow=np, ncol=nt) if(verbose) cat("Computing observed value... ") phat <- relrisk(X, at="points", ...) obs <- mean((phat-pbar)^2) if(verbose) { cat("Done.\nComputing simulated values... ") pstate <- list() } sim <- numeric(nsim) for(i in 1:nsim) { Xsim <- rlabel(X, permute=permute) phatsim <- relrisk(Xsim, at="points", ...) if(permute) pbarsim <- pbar else { lamsim <- intensity(Xsim) pbarsim <- lamsim/sum(lamsim) pbarsim <- matrix(pbarsim, byrow=TRUE, nrow=np, ncol=nt) } sim[i] <- mean((phatsim - pbarsim)^2) if(verbose) pstate <- progressreport(i, nsim, state=pstate) } p.value <- (1+sum(sim >= obs))/(1+nsim) names(obs) <- "T" out <- list(statistic=obs, p.value=p.value, method="Monte Carlo test of spatial segregation of types", data.name=Xname) class(out) <- "htest" return(out) } spatstat/R/kernels.R0000644000176200001440000002215013333543255014112 0ustar liggesusers# # kernels.R # # rXXX, dXXX, pXXX and qXXX for kernels # # $Revision: 1.19 $ $Date: 2018/06/07 05:42:54 $ # match.kernel <- function(kernel) { kernel.map <- c(Gaussian ="gaussian", gaussian ="gaussian", Normal ="gaussian", normal ="gaussian", rectangular ="rectangular", triangular ="triangular", Epanechnikov="epanechnikov", epanechnikov="epanechnikov", biweight ="biweight", cosine ="cosine", optcosine ="optcosine" ) ker <- pickoption("kernel", kernel, kernel.map) return(ker) } kernel.factor <- function(kernel="gaussian") { # This function returns the factor c such that # h = c * sigma # where sigma is the standard deviation of the kernel, and # h is the corresponding bandwidth parameter as conventionally defined. # Conventionally h is defined as a scale factor # relative to the `standard form' of the kernel, namely the # form with support [-1,1], except in the Gaussian case where # the standard form is N(0,1). # Thus the standard form of the kernel (h=1) has standard deviation 1/c. # The kernel with standard deviation 1 has support [-c,c] # except for gaussian case. kernel <- match.kernel(kernel) switch(kernel, gaussian = 1, rectangular = sqrt(3), triangular = sqrt(6), epanechnikov = sqrt(5), biweight = sqrt(7), cosine = 1/sqrt(1/3 - 2/pi^2), optcosine = 1/sqrt(1 - 8/pi^2)) } rkernel <- function(n, kernel="gaussian", mean=0, sd=1) { kernel <- match.kernel(kernel) if(kernel == "gaussian") return(rnorm(n, mean=mean, sd=sd)) # inverse cdf transformation u <- runif(n) qkernel(u, kernel, mean=mean, sd=sd) } dkernel <- function(x, kernel="gaussian", mean=0, sd=1) { kernel <- match.kernel(kernel) stopifnot(is.numeric(x)) stopifnot(is.numeric(sd) && length(sd) == 1 && sd > 0) a <- sd * kernel.factor(kernel) y <- abs(x-mean)/a dens <- switch(kernel, gaussian = { dnorm(y) }, rectangular = { ifelse(y < 1, 1/2, 0) }, triangular = { ifelse(y < 1, (1 - y), 0) }, epanechnikov = { ifelse(y < 1, (3/4) * (1 - y^2), 0) }, biweight = { ifelse(y < 1, (15/16) * (1 - y^2)^2, 0) }, cosine = { ifelse(y < 1, (1 + cos(pi * y))/2, 0) }, optcosine = { ifelse(y < 1, (pi/4) * cos(pi * y/2), 0) } ) dens/a } pkernel <- function(q, kernel="gaussian", mean=0, sd=1, lower.tail=TRUE){ kernel <- match.kernel(kernel) stopifnot(is.numeric(q)) stopifnot(is.numeric(sd) && length(sd) == 1 && sd > 0) a <- sd * kernel.factor(kernel) y <- (q-mean)/a switch(kernel, gaussian = { pnorm(y, lower.tail=lower.tail) }, rectangular = { punif(y, min=-1, max=1, lower.tail=lower.tail) }, triangular = { p <- ifelse(y < -1, 0, ifelse(y > 1, 1, ifelse(y < 0, y + y^2/2 + 1/2, y - y^2/2 + 1/2))) if(lower.tail) p else (1 - p) }, epanechnikov = { p <- ifelse(y < -1, 0, ifelse(y > 1, 1, (2 + 3 * y - y^3)/4)) if(lower.tail) p else (1 - p) }, biweight = { p <- ifelse(y < -1, 0, ifelse(y > 1, 1, (15 * y - 10 * y^3 + 3 * y^5 + 8)/16)) if(lower.tail) p else (1 - p) }, cosine = { p <- ifelse(y < -1, 0, ifelse(y > 1, 1, (y + sin(pi * y)/pi + 1)/2)) if(lower.tail) p else (1 - p) }, optcosine = { p <- ifelse(y < -1, 0, ifelse(y > 1, 1, (sin(pi * y/2) + 1)/2)) if(lower.tail) p else (1 - p) }) } qkernel <- function(p, kernel="gaussian", mean=0, sd=1, lower.tail=TRUE) { kernel <- match.kernel(kernel) stopifnot(is.numeric(p)) stopifnot(is.numeric(sd) && length(sd) == 1 && sd > 0) a <- sd * kernel.factor(kernel) if(!lower.tail) p <- 1 - p y <- switch(kernel, gaussian = { qnorm(p, lower.tail=lower.tail) }, rectangular = { qunif(p, min=-1, max=1, lower.tail=lower.tail) }, triangular = { ifelse(p < 1/2, sqrt(2 * p) - 1, 1 - sqrt(2 * (1-p))) }, epanechnikov = { # solve using `polyroot' yy <- numeric(n <- length(p)) yy[p == 0] <- -1 yy[p == 1] <- 1 inside <- (p != 0) & (p != 1) # coefficients of polynomial (2 + 3 y - y^3)/4 z <- c(2, 3, 0, -1)/4 for(i in seq(n)[inside]) { sol <- polyroot(z - c(p[i], 0, 0, 0)) ok <- abs(Im(sol)) < 1e-6 realpart <- Re(sol) ok <- ok & (abs(realpart) <= 1) if(sum(ok) != 1) stop(paste("Internal error:", sum(ok), "roots of polynomial")) yy[i] <- realpart[ok] } yy }, biweight = { # solve using `polyroot' yy <- numeric(n <- length(p)) yy[p == 0] <- -1 yy[p == 1] <- 1 inside <- (p != 0) & (p != 1) # coefficients of polynomial (8 + 15 * y - 10 * y^3 + 3 * y^5)/16 z <- c(8, 15, 0, -10, 0, 3)/16 for(i in seq(n)[inside]) { sol <- polyroot(z - c(p[i], 0, 0, 0, 0, 0)) ok <- abs(Im(sol)) < 1e-6 realpart <- Re(sol) ok <- ok & (abs(realpart) <= 1) if(sum(ok) != 1) stop(paste("Internal error:", sum(ok), "roots of polynomial")) yy[i] <- realpart[ok] } yy }, cosine = { # solve using `uniroot' g <- function(y, pval) { (y + sin(pi * y)/pi + 1)/2 - pval } yy <- numeric(n <- length(p)) yy[p == 0] <- -1 yy[p == 1] <- 1 inside <- (p != 0) & (p != 1) for(i in seq(n)[inside]) yy[i] <- uniroot(g, c(-1,1), pval=p[i])$root yy }, optcosine = { (2/pi) * asin(2 * p - 1) }) return(mean + a * y) } #' integral of t^m k(t) dt from -Inf to r #' where k(t) is the standard kernel with support [-1,1] #' was: nukernel(r, m, kernel) kernel.moment <- local({ kernel.moment <- function(m, r, kernel="gaussian") { ker <- match.kernel(kernel) check.1.integer(m) #' restrict to support if(ker != "gaussian") { r <- pmin(r, 1) r <- pmax(r, -1) } if(!(m %in% c(0,1,2)) || (ker %in% c("cosine", "optcosine"))) { ## use generic integration neginf <- if(ker == "gaussian") -10 else -1 result <- numeric(length(r)) for(i in seq_along(r)) result[i] <- integralvalue(kintegrand, lower=neginf, upper=r[i], m=m, ker=ker) return(result) } switch(ker, gaussian={ if(m == 0) return(pnorm(r)) else if(m == 1) return(-dnorm(r)) else return(pnorm(r) - r * dnorm(r)) }, rectangular = { if(m == 0) return((r + 1)/2) else if(m == 1) return((r^2 - 1)/4) else return((r^3 + 1)/6) }, triangular={ m1 <- m+1 m2 <- m+2 const <- ((-1)^m1)/m1 + ((-1)^m2)/m2 answer <- (r^m1)/m1 + ifelse(r < 0, 1, -1) * (r^m2)/m2 - const return(answer) }, epanechnikov = { if(m == 0) return((2 + 3*r - r^3)/4) else if(m == 1) return((-3 + 6*r^2 - 3*r^4)/16) else return(( 2 + 5*r^3 - 3* r^5)/20) }, biweight = { if(m == 0) return((3*r^5 - 10*r^3 + 15*r + 8)/16) else if(m == 1) return((5*r^6 - 15*r^4 + 15*r^2 -5)/32) else return((15*r^7 - 42*r^5 + 35*r^3 + 8)/112) }, # never reached! cosine={stop("Sorry, not yet implemented for cosine kernel")}, optcosine={stop("Sorry, not yet implemented for optcosine kernel")} ) } integralvalue <- function(...) integrate(...)$value kintegrand <- function(x, m, ker) { (x^m) * dkernel(x, ker, mean=0, sd=1/kernel.factor(ker)) } kernel.moment }) kernel.squint <- function(kernel="gaussian", bw=1) { kernel <- match.kernel(kernel) check.1.real(bw) RK <- switch(kernel, gaussian = 1/(2 * sqrt(pi)), rectangular = sqrt(3)/6, triangular = sqrt(6)/9, epanechnikov = 3/(5 * sqrt(5)), biweight = 5 * sqrt(7)/49, cosine = 3/4 * sqrt(1/3 - 2/pi^2), optcosine = sqrt(1 - 8/pi^2) * pi^2/16) return(RK/bw) } spatstat/R/Math.im.R0000644000176200001440000000214513333543254013745 0ustar liggesusers## ## Math.im.R ## ## $Revision: 1.7 $ $Date: 2017/01/12 03:50:22 $ ## Ops.im <- function(e1,e2=NULL){ unary <- nargs() == 1L if(unary){ if(!is.element(.Generic, c("!", "-", "+"))) stop("Unary usage is undefined for this operation for images.") callstring <- paste(.Generic, "e1") } else { callstring <- paste("e1", .Generic, "e2") } expr <- parse(text = callstring) return(do.call(eval.im, list(expr = expr))) } Math.im <- function(x, ...){ m <- do.call(.Generic, list(x$v, ...)) rslt <- im(m, xcol = x$xcol, yrow = x$yrow, xrange = x$xrange, yrange = x$yrange, unitname = unitname(x)) return(rslt) } Summary.im <- function(..., na.rm=FALSE, drop=TRUE){ argh <- list(...) ims <- sapply(argh, is.im) argh[ims] <- lapply(argh[ims], getElement, name="v") do.call(.Generic, c(argh, list(na.rm = na.rm || drop))) } Complex.im <- function(z){ m <- do.call(.Generic, list(z=z$v)) rslt <- im(m, xcol = z$xcol, yrow = z$yrow, xrange = z$xrange, yrange = z$yrange, unitname = unitname(z)) return(rslt) } spatstat/R/rmh.R0000644000176200001440000000010713333543255013233 0ustar liggesusers# # generic rmh rmh <- function(model, ...){ UseMethod("rmh") } spatstat/R/slrm.R0000644000176200001440000004514313333543255013433 0ustar liggesusers# # slrm.R # # Spatial Logistic Regression # # $Revision: 1.29 $ $Date: 2018/05/12 16:19:22 $ # slrm <- function(formula, ..., data=NULL, offset=TRUE, link="logit", dataAtPoints=NULL, splitby=NULL) { # remember call CallInfo <- list(callstring = short.deparse(sys.call()), cl = match.call(), formula = formula, offset=offset, link=link, splitby=splitby, dotargs=list(...)) if(!(link %in% c("logit", "cloglog"))) stop(paste("Unrecognised link", dQuote(link))) ########### INTERPRET FORMULA ############################## if(!inherits(formula, "formula")) stop(paste("Argument", dQuote("formula"), "should be a formula")) # check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Argument", sQuote("formula"), "must have a left hand side")) Yname <- formula[[2]] trend <- rhs <- formula[c(1,3)] if(!is.name(Yname)) stop("Left hand side of formula should be a single name") Yname <- paste(Yname) if(!inherits(trend, "formula")) stop("Internal error: failed to extract RHS of formula") varnames <- unique(variablesinformula(trend)) specials <- c("x", "y", "logpixelarea") covnames <- varnames[!(varnames %in% specials)] # add 'splitby' to covariate names if(!is.null(splitby)) { if(!is.character(splitby) || length(splitby) != 1) stop("splitby should be a single character string") covnames <- unique(c(covnames, splitby)) } CallInfo$responsename <- Yname CallInfo$varnames <- varnames CallInfo$covnames <- covnames # Parent environment parenv <- environment(formula) ######## FIND DATA AND RESHAPE ####################### Data <- slr.prepare(CallInfo, parenv, data, dataAtPoints, splitby) # W <- Data$W df <- Data$df ######## FIT MODEL ############################### dformula <- formula if(offset) { # insert offset term in formula rhs <- paste(as.character(rhs), collapse=" ") rhs <- paste(c(rhs, "offset(logpixelarea)"), collapse="+") dformula <- as.formula(paste(Yname, rhs)) } linkname <- link FIT <- glm(dformula, family=binomial(link=linkname), data=df, na.action=na.exclude) result <- list(call = CallInfo$cl, CallInfo = CallInfo, Data = Data, Fit = list(FIT=FIT, dformula=dformula), terms = terms(formula)) class(result) <- c("slrm", class(result)) return(result) } ################ UTILITY TO FIND AND RESHAPE DATA ################# slr.prepare <- function(CallInfo, envir, data, dataAtPoints=NULL, splitby=NULL, clip=TRUE) { # CallInfo is produced by slrm() # envir is parent environment of model formula # data is 'data' argument that takes precedence over 'envir' # 'clip' is TRUE if the data should be clipped to the domain of Y Yname <- CallInfo$responsename # varnames <- CallInfo$varnames covnames <- CallInfo$covnames dotargs <- CallInfo$dotargs # getobj <- function(nama, env, dat) { if(!is.null(dat) && !is.null(x <- dat[[nama]])) return(x) else return(get(nama, envir=env)) } # Get the response point pattern Y Y <- getobj(Yname, envir, data) if(!is.ppp(Y)) stop(paste("The response", sQuote(Yname), "must be a point pattern")) # if(!is.null(dataAtPoints)) { dataAtPoints <- as.data.frame(dataAtPoints) if(nrow(dataAtPoints) != npoints(Y)) stop(paste("dataAtPoints should have one row for each point in", dQuote(Yname))) } # Find the covariates ncov <- length(covnames) covlist <- lapply(as.list(covnames), getobj, env = envir, dat=data) names(covlist) <- covnames # Each covariate should be an image, a window, a function, or a single number if(ncov == 0) { isim <- isowin <- ismask <- isfun <- isnum <- isspatial <- israster <- logical(0) } else { isim <- sapply(covlist, is.im) isowin <- sapply(covlist, is.owin) ismask <- sapply(covlist, is.mask) isfun <- sapply(covlist, is.function) isspatial <- isim | isowin | isfun israster <- isim | ismask isnum <- sapply(covlist, is.numeric) & (lengths(covlist) == 1) } if(!all(ok <- (isspatial | isnum))) { n <- sum(!ok) stop(paste(ngettext(n, "The argument", "Each of the arguments"), commasep(sQuote(covnames[!ok])), "should be either an image, a window, or a single number")) } # 'splitby' if(!is.null(splitby)) { splitwin <- covlist[[splitby]] if(!is.owin(splitwin)) stop("The splitting covariate must be a window") # ensure it is a polygonal window covlist[[splitby]] <- splitwin <- as.polygonal(splitwin) # delete splitting covariate from lists to be processed issplit <- (covnames == splitby) isspatial[issplit] <- FALSE israster[issplit] <- FALSE } # # nnum <- sum(isnum) # nspatial <- sum(isspatial) nraster <- sum(israster) # numlist <- covlist[isnum] spatiallist <- covlist[isspatial] rasterlist <- covlist[israster] # numnames <- names(numlist) spatialnames <- names(spatiallist) # rasternames <- names(rasterlist) # ######## CONVERT TO RASTER DATA ############################### convert <- function(x,W) { if(is.im(x) || is.function(x)) return(as.im(x,W)) if(is.owin(x)) return(as.im(x, W, value=TRUE, na.replace=FALSE)) return(NULL) } # determine spatial domain & common resolution: convert all data to it if(length(dotargs) > 0 || nraster == 0) { # Pixel resolution is determined by explicit arguments if(clip) { # Window extent is determined by response point pattern D <- as.owin(Y) } else { # Window extent is union of domains of data domains <- lapply(append(spatiallist, list(Y)), as.owin) D <- do.call(union.owin, domains) } # Create template mask W <- do.call(as.mask, append(list(D), dotargs)) # Convert all spatial objects to this resolution spatiallist <- lapply(spatiallist, convert, W=W) } else { # Pixel resolution is determined implicitly by covariate data W <- do.call(commonGrid, rasterlist) if(clip) { # Restrict data to spatial extent of response point pattern W <- intersect.owin(W, as.owin(Y)) } # Adjust spatial objects to this resolution spatiallist <- lapply(spatiallist, convert, W=W) } # images containing coordinate values xcoordim <- as.im(function(x,y){x}, W=W) ycoordim <- as.im(function(x,y){y}, W=W) # # create a list of covariate images, with names as in formula covimages <- append(list(x=xcoordim, y=ycoordim), spatiallist) basepixelarea <- W$xstep * W$ystep ######## ASSEMBLE DATA FRAME ############################### if(is.null(splitby)) { df <- slrAssemblePixelData(Y, Yname, W, covimages, dataAtPoints, basepixelarea) sumYloga <- Y$n * log(basepixelarea) serial <- attr(df, "serial") } else { # fractional pixel areas pixsplit <- pixellate(splitwin, W) splitpixelarea <- as.vector(as.matrix(pixsplit)) # determine which points of Y are inside/outside window ins <- inside.owin(Y$x, Y$y, splitwin) # split processing dfIN <- slrAssemblePixelData(Y[ins], Yname, W, covimages, dataAtPoints[ins, ], splitpixelarea) serialIN <- attr(dfIN, "serial") dfIN[[splitby]] <- TRUE dfOUT <- slrAssemblePixelData(Y[!ins], Yname, W, covimages, dataAtPoints[!ins, ], basepixelarea - splitpixelarea) serialOUT <- attr(dfOUT, "serial") dfOUT[[splitby]] <- FALSE df <- rbind(dfIN, dfOUT) serial <- c(serialIN, serialOUT) # sum of log pixel areas associated with points Ysplit <- pixsplit[Y] sumYloga <- sum(log(ifelseXY(ins, Ysplit, basepixelarea - Ysplit))) } # tack on any numeric values df <- do.call(cbind, append(list(df), numlist)) ### RETURN ALL Data <- list(response=Y, covariates=covlist, spatialnames=spatialnames, numnames=numnames, W=W, df=df, serial=serial, sumYloga=sumYloga, dataAtPoints=dataAtPoints) return(Data) } # slrAssemblePixelData <- function(Y, Yname, W, covimages, dataAtPoints, pixelarea) { # pixellate point pattern Z <- pixellate(Y, W=W) Z <- eval.im(as.integer(Z>0)) # overwrite pixel entries for data points using exact values # coordinates xcoordim <- covimages[["x"]] ycoordim <- covimages[["y"]] xcoordim[Y] <- Y$x ycoordim[Y] <- Y$y covimages[["x"]] <- xcoordim covimages[["y"]] <- ycoordim # overwrite pixel entries if(!is.null(dataAtPoints)) { enames <- colnames(dataAtPoints) relevant <- enames %in% names(covimages) for(v in enames[relevant]) { cova <- covimages[[v]] cova[Y] <- dataAtPoints[, v, drop=TRUE] covimages[[v]] <- cova } } # assemble list of all images Ylist <- list(Z) names(Ylist) <- Yname allimages <- append(Ylist, covimages) # extract pixel values of each image pixelvalues <- function(z) { v <- as.vector(as.matrix(z)) if(z$type != "factor") return(v) lev <- levels(z) return(factor(v, levels=seq_along(lev), labels=lev)) } pixdata <- lapply(allimages, pixelvalues) df <- as.data.frame(pixdata) serial <- seq_len(nrow(df)) # add log(pixel area) column if(length(pixelarea) == 1) { df <- cbind(df, logpixelarea=log(pixelarea)) } else { ok <- (pixelarea > 0) df <- cbind(df[ok, ], logpixelarea=log(pixelarea[ok])) serial <- serial[ok] } attr(df, "serial") <- serial return(df) } is.slrm <- function(x) { inherits(x, "slrm") } coef.slrm <- function(object, ...) { coef(object$Fit$FIT) } print.slrm <- function(x, ...) { lk <- x$CallInfo$link switch(lk, logit= { splat("Fitted spatial logistic regression model") }, cloglog= { splat("Fitted spatial regression model (complementary log-log)") }, { splat("Fitted spatial regression model") splat("Link =", dQuote(lk)) }) cat("Formula:\t") print(x$CallInfo$formula) splat("Fitted coefficients:") print(coef(x)) return(invisible(NULL)) } logLik.slrm <- function(object, ..., adjust=TRUE) { FIT <- object$Fit$FIT ll <- -deviance(FIT)/2 if(adjust) { sumYloga <- object$Data$sumYloga ll <- ll - sumYloga } attr(ll, "df") <- length(coef(object)) class(ll) <- "logLik" return(ll) } fitted.slrm <- function(object, ...) { if(length(list(...)) > 0) warning("second argument (and any subsequent arguments) ignored") predict(object, type="probabilities") } predict.slrm <- function(object, ..., type="intensity", newdata=NULL, window=NULL) { type <- pickoption("type", type, c(probabilities="probabilities", link="link", intensity="intensity", lambda="intensity")) FIT <- object$Fit$FIT link <- object$CallInfo$link W <- object$Data$W df <- object$Data$df loga <- df$logpixelarea if(is.null(newdata) && is.null(window)) { # fitted values from existing fit switch(type, probabilities={ values <- fitted(FIT) }, link={ values <- predict(FIT, type="link") }, intensity={ # this calculation applies whether an offset was included or not if(link == "cloglog") { linkvalues <- predict(FIT, type="link") values <- exp(linkvalues - loga) } else { probs <- fitted(FIT) values <- -log(1-probs)/exp(loga) } } ) out <- im(values, xcol=W$xcol, yrow=W$yrow, unitname=unitname(W)) return(out) } else { # prediction using new values # update arguments that may affect pixel resolution CallInfo <- object$CallInfo CallInfo$dotargs <- resolve.defaults(list(...), CallInfo$dotargs) # if(!is.null(window)) { # insert fake response in new window if(is.null(newdata)) newdata <- list() window <- as.owin(window) newdata[[CallInfo$responsename]] <- ppp(numeric(0), numeric(0), window=window) } # process new data newData <- slr.prepare(CallInfo, environment(CallInfo$formula), newdata, clip=!is.null(window)) newdf <- newData$df newW <- newData$W newloga <- newdf$logpixelarea # avoid NA etc npixel <- nrow(newdf) ok <- complete.cases(newdf) if(!all(ok)) { newdf <- newdf[ok, , drop=FALSE] newloga <- newloga[ok] } # compute link values linkvalues <- predict(FIT, newdata=newdf, type="link") # transform to desired scale linkinv <- family(FIT)$linkinv switch(type, probabilities={ values <- linkinv(linkvalues) }, link={ values <- linkvalues }, intensity={ # this calculation applies whether an offset was included or not if(link == "cloglog") { values <- exp(linkvalues - newloga) } else { probs <- linkinv(linkvalues) values <- -log(1-probs)/exp(newloga) } } ) # form image v <- rep.int(NA_real_, npixel) v[ok] <- values out <- im(v, xcol=newW$xcol, yrow=newW$yrow, unitname=unitname(W)) return(out) } } plot.slrm <- function(x, ..., type="intensity") { xname <- short.deparse(substitute(x)) y <- predict(x, type=type) do.call(plot.im, resolve.defaults(list(x=y), list(...), list(main=xname))) } formula.slrm <- function(x, ...) { f <- x$CallInfo$formula return(f) } terms.slrm <- function(x, ...) { terms(formula(x), ...) } labels.slrm <- function(object, ...) { # extract fitted trend coefficients co <- coef(object) # model terms tt <- terms(object) lab <- attr(tt, "term.labels") if(length(lab) == 0) return(character(0)) # model matrix mm <- model.matrix(object) ass <- attr(mm, "assign") # 'ass' associates coefficients with model terms # except ass == 0 for the Intercept coef.ok <- is.finite(co) relevant <- (ass > 0) okterms <- unique(ass[coef.ok & relevant]) return(lab[okterms]) } extractAIC.slrm <- function (fit, scale = 0, k = 2, ...) { edf <- length(coef(fit)) aic <- AIC(fit) c(edf, aic + (k - 2) * edf) } model.matrix.slrm <- function(object,..., keepNA=TRUE) { FIT <- object$Fit$FIT mm <- model.matrix(FIT, ...) if(!keepNA) return(mm) df <- object$Data$df comp <- complete.cases(df) if(all(comp)) return(mm) if(sum(comp) != nrow(mm)) stop("Internal error in patching NA's") mmplus <- matrix(NA, nrow(df), ncol(mm)) mmplus[comp, ] <- mm return(mmplus) } model.images.slrm <- function(object, ...) { mm <- model.matrix(object, ...) mm <- as.data.frame(mm) Data <- object$Data W <- Data$W serial <- Data$serial splitby <- object$CallInfo$splitby blank <- as.im(NA_real_, W) assignbyserial <- function(values, serial, template) { Z <- template Z$v[serial] <- values return(Z) } if(is.null(splitby)) { result <- lapply(as.list(mm), assignbyserial, serial=serial, template=blank) } else { df <- Data$df IN <- as.logical(df[[splitby]]) OUT <- !IN mmIN <- mm[IN, , drop=FALSE] mmOUT <- mm[OUT, , drop=FALSE] resultIN <- lapply(as.list(mmIN), assignbyserial, serial=serial[IN], template=blank) resultOUT <- lapply(as.list(mmOUT), assignbyserial, serial=serial[OUT], template=blank) names(resultIN) <- paste(names(resultIN), splitby, "TRUE", sep="") names(resultOUT) <- paste(names(resultOUT), splitby, "FALSE", sep="") result <- c(resultIN, resultOUT) } return(as.solist(result)) } update.slrm <- function(object, ..., evaluate=TRUE, env=parent.frame()) { e <- update.default(object, ..., evaluate=FALSE) if(evaluate) e <- eval(e, envir=env) return(e) } anova.slrm <- local({ anova.slrm <- function(object, ..., test=NULL) { objex <- append(list(object), list(...)) if(!all(unlist(lapply(objex, is.slrm)))) stop("Some arguments are not of class slrm") fitz <- lapply(objex, getFIT) do.call(anova, append(fitz, list(test=test))) } getFIT <- function(z) {z$Fit$FIT} anova.slrm }) vcov.slrm <- function(object, ..., what=c("vcov", "corr", "fisher", "Fisher")) { stopifnot(is.slrm(object)) what <- match.arg(what) vc <- vcov(object$Fit$FIT) result <- switch(what, vcov = vc, corr = { sd <- sqrt(diag(vc)) vc / outer(sd, sd, "*") }, fisher=, Fisher={ solve(vc) }) return(result) } unitname.slrm <- function(x) { return(unitname(x$Data$response)) } "unitname<-.slrm" <- function(x, value) { unitname(x$Data$response) <- value return(x) } is.stationary.slrm <- function(x) { fo <- formula(x) trend <- fo[c(1,3)] return(identical.formulae(trend, ~1)) } is.poisson.slrm <- function(x) { TRUE } simulate.slrm <- function(object, nsim=1, seed=NULL, ..., window=NULL, covariates=NULL, verbose=TRUE, drop=FALSE) { # .... copied from simulate.lm .... if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } starttime <- proc.time() # determine simulation window and compute intensity if(!is.null(window)) stopifnot(is.owin(window)) lambda <- predict(object, type="intensity", newdata=covariates, window=window) # max lambda (for efficiency) summ <- summary(lambda) lmax <- summ$max + 0.05 * diff(summ$range) # run out <- list() verbose <- verbose && (nsim > 1) if(verbose) { cat(paste("Generating", nsim, "simulations... ")) pstate <- list() } for(i in 1:nsim) { out[[i]] <- rpoispp(lambda, lmax=lmax) if(verbose) pstate <- progressreport(i, nsim, state=pstate) } #' pack up out <- simulationresult(out, nsim, drop) out <- timed(out, starttime=starttime) attr(out, "seed") <- RNGstate return(out) } spatstat/R/psstA.R0000644000176200001440000001143313333543255013543 0ustar liggesusers# # psstA.R # # Pseudoscore residual for unnormalised F (area-interaction) # # $Revision: 1.7 $ $Date: 2014/11/11 02:31:44 $ # ################################################################################ # psstA <- function(object, r=NULL, breaks=NULL, ..., model=NULL, trend=~1, interaction=Poisson(), rbord=reach(interaction), ppmcorrection="border", correction="all", truecoef=NULL, hi.res=NULL, nr=spatstat.options("psstA.nr"), ngrid=spatstat.options("psstA.ngrid")) { if(is.ppm(object)) fit <- object else if(is.ppp(object) || is.quad(object)) { # convert to quadscheme if(is.ppp(object)) object <- quadscheme(object, ...) # fit model if(!is.null(model)) fit <- update(model, Q=object, forcefit=TRUE) else if(ppmcorrection == "border") fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, forcefit=TRUE) else fit <- ppm(object, trend=trend, interaction=interaction, correction=ppmcorrection, forcefit=TRUE) } else stop("object should be a fitted point process model or a point pattern") rfixed <- !is.null(r) || !is.null(breaks) # Extract data and quadrature points Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy # E <- equalsfun.quad(Q) # WQ <- w.quad(Q) # quadrature weights # integrals will be restricted to quadrature points # that were actually used in the fit # USED <- getglmsubset(fit) if(fit$correction == "border") { rbord <- fit$rbord b <- bdist.points(U) USED <- (b > rbord) bX <- bdist.points(X) USEDX <- (bX > rbord) } else { USED <- rep.int(TRUE, U$n) USEDX <- rep.int(TRUE, X$n) } # basic statistics Win <- Window(X) npts <- npoints(X) areaW <- area(Win) lambda <- npts/areaW # determine breakpoints for r values rmaxdefault <- rmax.rule("F", Win, lambda) if(rfixed) breaks <- handle.r.b.args(r, breaks, Win, rmaxdefault=rmaxdefault) else { # create fairly coarse 'r' values r <- seq(0, rmaxdefault, length=nr) breaks <- breakpts.from.r(r) } rvals <- breaks$r rmax <- breaks$max # residuals res <- residuals(fit, type="raw", drop=FALSE, new.coef=truecoef, quad=hi.res) # rescts <- with(res, "continuous") # absolute weight for continuous integrals wc <- -rescts # initialise fv object df <- data.frame(r=rvals, theo=0) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~Delta~V[A](r), NULL), "theo", . ~ r, alim=c(0, rmax), c("r","%s[theo](r)"), desc, fname="bold(R)~Delta~V[A]") # # for efficiency, compute the largest value of distance transform Dmax <- 0 for(i in 1:npts) { Di <- distmap(X[-i]) Dimax <- summary(Di)$max Dmax <- max(Dmax, Dimax) } Rmax <- min(max(rvals), Dmax * 1.1) nontrivial <- (rvals <= Rmax) trivialzeroes <- numeric(sum(!nontrivial)) # pseudosum Ax <- areaLoss.grid(X, rvals[nontrivial], subset=USEDX, ngrid=ngrid) C1 <- apply(Ax, 2, sum) C1 <- c(C1, trivialzeroes) # pseudocompensator OK <- USED & !Z Au <- areaGain.grid(U[OK], X, rvals[nontrivial], W=Win, ngrid=ngrid) lamu <- matrix(wc[OK], nrow=nrow(Au), ncol=ncol(Au)) C2 <- apply(lamu * Au, 2, sum) C2 <- c(C2, trivialzeroes) # pseudoscore residual Ctot <- C1 - C2 # tack on ans <- bind.fv(ans, data.frame(dat=C1, com=C2, res=Ctot), c("Sigma~Delta~V[A](r)", "bold(C)~Delta~V[A](r)", "%s(r)"), c("data pseudosum (contribution to %s)", "model pseudocompensator (contribution to %s)", "pseudoscore residual %s"), "res") # # pseudovariance # (skipped if called by envelope() etc) # if(correction == "all") { lamX <- matrix(wc[USED & Z], nrow=nrow(Ax), ncol=ncol(Ax)) Var <- apply(lamu * Au^2, 2, sum) + apply(lamX * Ax^2, 2, sum) Var <- c(Var, trivialzeroes) # two-sigma limits TwoSig <- 2 * sqrt(Var) # tack on ans <- bind.fv(ans, data.frame(var=Var, up=TwoSig, lo=-TwoSig), c("bold(C)^2~Delta~V[A](r)", "%s[up](r)", "%s[lo](r)"), c("pseudovariance of %s", "upper 2sigma critical limit for %s", "lower 2sigma critical limit for %s"), "res") fvnames(ans, ".") <- c("res", "up", "lo", "theo") } unitname(ans) <- unitname(fit) # return(ans) } spatstat/R/diagram.R0000644000176200001440000002702413377673631014072 0ustar liggesusers## ## diagram.R ## ## Simple objects for the elements of a diagram (text, arrows etc) ## that are compatible with plot.layered and plot.solist ## ## $Revision: 1.14 $ $Date: 2018/11/29 05:20:09 $ # ......... internal class 'diagramobj' supports other classes ......... diagramobj <- function(X, ...) { if(inherits(try(Frame(X), silent=TRUE), "try-error")) stop("X is not a spatial object") a <- list(...) if(sum(nzchar(names(a))) != length(a)) stop("All extra arguments must be named") attributes(X) <- append(attributes(X), a) class(X) <- c("diagramobj", class(X)) return(X) } "[.diagramobj" <- function(x, ...) { y <- NextMethod("[") attributes(y) <- attributes(x) return(y) } shift.diagramobj <- function(X, ...) { y <- NextMethod("shift") attributes(y) <- attributes(X) return(y) } scalardilate.diagramobj <- function(X, f, ...) { y <- NextMethod("scalardilate") attributes(y) <- attributes(X) return(y) } # .............. user-accessible classes ................ # ......... (these only need a creator and a plot method) ...... ## ........... text ................. textstring <- function(x, y, txt=NULL, ...) { if(is.ppp(x) && missing(y)) { X <- x Window(X) <- boundingbox(x) } else { if(missing(y) && checkfields(x, c("x", "y"))) { y <- x$y x <- x$x stopifnot(length(x) == length(y)) } X <- ppp(x, y, window=owin(range(x),range(y))) } marks(X) <- txt Y <- diagramobj(X, otherargs=list(...)) class(Y) <- c("textstring", class(Y)) return(Y) } plot.textstring <- function(x, ..., do.plot=TRUE) { txt <- marks(x) otha <- attr(x, "otherargs") if(do.plot) do.call.matched(text.default, resolve.defaults(list(...), list(x=x$x, y=x$y, labels=txt), otha), funargs=graphicsPars("text")) return(invisible(Frame(x))) } print.textstring <- function(x, ...) { splat("Text string object") txt <- marks(x) if(npoints(x) == 1) { splat("Text:", dQuote(txt)) splat("Coordinates:", paren(paste(as.vector(coords(x)), collapse=", "))) } else { splat("Text:") print(txt) splat("Coordinates:") print(coords(x)) } return(invisible(NULL)) } ## ........... 'yardstick' to display scale information ................ yardstick <- function(x0, y0, x1, y1, txt=NULL, ...) { nomore <- missing(y0) && missing(x1) && missing(y1) if(is.ppp(x0) && nomore) { if(npoints(x0) != 2) stop("x0 should consist of exactly 2 points") X <- x0 } else if(is.psp(x0) && nomore) { if(nobjects(x0) != 1) stop("x0 should consist of exactly 1 segment") X <- endpoints.psp(x0) } else { xx <- c(x0, x1) yy <- c(y0, y1) B <- boundingbox(list(x=xx, y=yy)) X <- ppp(xx, yy, window=B, check=FALSE) } Window(X) <- boundingbox(X) Y <- diagramobj(X, txt=txt, otherargs=list(...)) class(Y) <- c("yardstick", class(Y)) return(Y) } plot.yardstick <- local({ mysegments <- function(x0, y0, x1, y1, ..., moreargs=list()) { ## ignore unrecognised arguments without whingeing do.call.matched(segments, resolve.defaults(list(x0=x0, y0=y0, x1=x1, y1=y1), list(...), moreargs), extrargs=c("col", "lty", "lwd", "xpd", "lend")) } myarrows <- function(x0, y0, x1, y1, ..., left=TRUE, right=TRUE, angle=20, frac=0.25, main, show.all, add) { mysegments(x0, y0, x1, y1, ...) if(left || right) { ang <- angle * pi/180 co <- cos(ang) si <- sin(ang) dx <- x1-x0 dy <- y1-y0 le <- sqrt(dx^2 + dy^2) rot <- matrix(c(dx, dy, -dy, dx)/le, 2, 2) arlen <- frac * le up <- arlen * (rot %*% c(co, si)) lo <- arlen * (rot %*% c(co, -si)) if(left) { mysegments(x0, y0, x0+up[1L], y0+up[2L], ...) mysegments(x0, y0, x0+lo[1L], y0+lo[2L], ...) } if(right) { mysegments(x1, y1, x1-up[1L], y1-up[2L], ...) mysegments(x1, y1, x1-lo[1L], y1-lo[2L], ...) } } return(invisible(NULL)) } plot.yardstick <- function(x, ..., angle=20, frac=1/8, split=FALSE, shrink=1/4, pos=NULL, txt.args=list(), txt.shift=c(0,0), do.plot=TRUE) { if(do.plot) { txt <- attr(x, "txt") argh <- resolve.defaults(list(...), attr(x, "otherargs")) A <- as.numeric(coords(x)[1L,]) B <- as.numeric(coords(x)[2L,]) M <- (A+B)/2 if(!split) { ## double-headed arrow myarrows(A[1L], A[2L], B[1L], y1=B[2L], angle=angle, frac=frac, moreargs=argh) if(is.null(pos) && !("adj" %in% names(txt.args))) pos <- if(abs(A[1L] - B[1L]) < abs(A[2L] - B[2L])) 4 else 3 } else { ## two single-headed arrows with text dM <- (shrink/2) * (B - A) AM <- M - dM BM <- M + dM newfrac <- frac/((1-shrink)/2) myarrows(AM[1L], AM[2L], A[1L], A[2L], angle=angle, frac=newfrac, left=FALSE, moreargs=argh) myarrows(BM[1L], BM[2L], B[1L], B[2L], angle=angle, frac=newfrac, left=FALSE, moreargs=argh) } if(is.null(txt.shift)) txt.shift <- rep(0, 2) else txt.shift <- ensure2vector(unlist(txt.shift)) do.call.matched(text.default, resolve.defaults(list(x=M[1L] + txt.shift[1L], y=M[2L] + txt.shift[2L]), txt.args, list(labels=txt, pos=pos), argh, .MatchNull=FALSE), funargs=graphicsPars("text")) } return(invisible(Window(x))) } plot.yardstick }) print.yardstick <- function(x, ...) { splat("Yardstick") if(!is.null(txt <- attr(x, "txt"))) splat("Text:", txt) ui <- summary(unitname(x)) splat("Length:", pairdist(x)[1L,2L], ui$plural, ui$explain) splat("Midpoint:", paren(paste(signif(c(mean(x$x), mean(x$y)), 3), collapse=", "))) dx <- diff(range(x$x)) dy <- diff(range(x$y)) orient <- if(dx == 0) "vertical" else if(dy == 0) "horizontal" else paste(atan2(dy, dx) * 180/pi, "degrees") splat("Orientation:", orient) return(invisible(NULL)) } ## code to draw a decent-looking arrow in spatstat diagrams ## (works in layered objects) ## The name 'onearrow' is used because R contains ## hidden functions [.arrow, length.arrow onearrow <- function(x0, y0, x1, y1, txt=NULL, ...) { nomore <- missing(y0) && missing(x1) && missing(y1) if(is.ppp(x0) && nomore) { if(npoints(x0) != 2) stop("x0 should consist of exactly 2 points") X <- x0 } else if(is.psp(x0) && nomore) { if(nobjects(x0) != 1) stop("x0 should consist of exactly 1 segment") X <- endpoints.psp(x0) } else { xx <- c(x0, x1) yy <- c(y0, y1) B <- boundingbox(list(x=xx, y=yy)) X <- ppp(xx, yy, window=B, check=FALSE) } Window(X) <- boundingbox(X) Y <- diagramobj(X, txt=txt, otherargs=list(...)) class(Y) <- c("onearrow", class(Y)) return(Y) } print.onearrow <- function(x, ...) { splat("Single arrow from", paren(paste0(x$x[1], ", ", x$y[1])), "to", paren(paste0(x$x[2], ", ", x$y[2]))) if(!is.null(txt <- attr(x, "txt"))) splat("Text:", sQuote(txt)) if(length(oa <- attr(x, "otherargs"))) { cat("Graphical parameters:\n") print(unlist(oa)) } return(invisible(NULL)) } plot.onearrow <- function(x, ..., add=FALSE, main="", retract=0.05, headfraction=0.25, headangle=12, # degrees headnick=0.1, # fraction of head length col.head=NA, lwd.head=lwd, lwd=1, col=1, zap=FALSE, zapfraction=0.07, pch=1, cex=1, do.plot=TRUE, do.points=FALSE, show.all=!add) { result <- plot.ppp(x, main=main, add=add, pch=pch, cex=cex, do.plot=do.plot && do.points, show.all=show.all) if(do.plot && !do.points && !add) plot(Frame(x), main="", type="n") txt <- attr(x, "txt") ## resolve formal arguments with those stored in the object saved <- attr(x, "otherargs") if(missing(col)) col <- saved[["col"]] %orifnull% col if(missing(lwd)) lwd <- saved[["lwd"]] %orifnull% lwd if(missing(pch)) pch <- saved[["pch"]] %orifnull% pch if(missing(cex)) cex <- saved[["cex"]] %orifnull% cex if(missing(col.head)) col.head <- saved[["col.head"]] %orifnull% col.head if(missing(lwd.head)) lwd.head <- saved[["lwd.head"]] %orifnull% lwd.head if(missing(retract)) retract <- saved[["retract"]] %orifnull% retract if(missing(headfraction)) headfraction <- saved[["headfraction"]] %orifnull% headfraction if(missing(headangle)) headangle <- saved[["headangle"]] %orifnull% headangle if(missing(headnick)) headnick <- saved[["headnick"]] %orifnull% headnick if(missing(zap)) zap <- saved[["zap"]] %orifnull% zap if(missing(zapfraction)) zapfraction <- saved[["zapfraction"]] %orifnull% zapfraction argh <- list(col=col, lwd=lwd, cex=cex, pch=pch, ...) ## calculate A <- as.numeric(coords(x)[1L,]) B <- as.numeric(coords(x)[2L,]) V <- B - A AR <- A + retract * V BR <- B - retract * V H <- B - headfraction * V HN <- H + headnick * headfraction * V headlength <- headfraction * sqrt(sum(V^2)) halfwidth <- headlength * tan((headangle/2) * pi/180) alpha <- atan2(V[2L], V[1L]) + pi/2 U <- c(cos(alpha), sin(alpha)) HL <- H + halfwidth * U HR <- H - halfwidth * U Head <- rbind(HN, HR, BR, HL, HN) objHead <- owin(poly=Head[1:4,]) parHead <- resolve.defaults(list(col=col.head, lwd=lwd.head), argh) if(do.plot && !is.na(col.head)) do.call.matched(polygon, append(list(x=Head), parHead)) if(!zap) { Tail <- AR } else { M <- (AR+HN)/2 dM <- (zapfraction/2) * (1-headfraction) * V dM <- dM + c(-dM[2L], dM[1L]) ML <- M + dM MR <- M - dM Tail <- rbind(MR, ML, AR) } parLines <- argh if(do.plot) do.call.matched(lines, append(list(x=rbind(Head, Tail)), parLines), extrargs=c("col", "lwd", "lty", "xpd", "lend")) HT <- rbind(Head, Tail) W <- owin(range(HT[,1]), range(HT[,2])) nht <- nrow(HT) HT <- cbind(HT[-nht, , drop=FALSE], HT[-1, , drop=FALSE]) objLines <- as.psp(HT, window=W) if(do.plot && !is.null(txt <- attr(x, "txt"))) { H <- (A+B)/2 do.call.matched(text.default, resolve.defaults( list(x=H[1L], y=H[2L]), argh, list(labels=txt, pos=3 + (V[2L] != 0))), funargs=graphicsPars("text")) } attr(result, "objects") <- layered(Head=objHead, Lines=objLines, plotargs=list(parHead, parLines)) return(invisible(result)) } spatstat/R/lohboot.R0000644000176200001440000003041313504040153014103 0ustar liggesusers# # lohboot.R # # $Revision: 1.24 $ $Date: 2019/06/24 03:15:26 $ # # Loh's bootstrap CI's for local pcf, local K etc # spatstatLocalFunctionInfo <- function(key) { ## This table has to be built on the fly. TheTable <- list( pcf = list(Global=pcf, Local=localpcf, L=FALSE, inhom=FALSE, indices=0), Kest = list(Global=Kest, Local=localK, L=FALSE, inhom=FALSE, indices=0), Lest = list(Global=Lest, Local=localK, # stet! L=TRUE, inhom=FALSE, indices=0), pcfinhom = list(Global=pcfinhom, Local=localpcfinhom, L=FALSE, inhom=TRUE, indices=0), Kinhom = list(Global=Kinhom, Local=localKinhom, L=FALSE, inhom=TRUE, indices=0), Linhom = list(Global=Linhom, Local=localKinhom, # stet! L=TRUE, inhom=TRUE, indices=0), Kcross = list(Global=Kcross, Local=localKcross, L=FALSE, inhom=FALSE, indices=2), Lcross = list(Global=Lcross, Local=localKcross, # stet! L=TRUE, inhom=FALSE, indices=2), Kdot = list(Global=Kdot, Local=localKdot, L=FALSE, inhom=FALSE, indices=1), Ldot = list(Global=Ldot, Local=localKdot, # stet! L=TRUE, inhom=FALSE, indices=1), Kcross.inhom = list(Global=Kcross.inhom, Local=localKcross.inhom, L=FALSE, inhom=TRUE, indices=2), Lcross.inhom = list(Global=Lcross.inhom, Local=localKcross.inhom, # stet! L=TRUE, inhom=TRUE, indices=2) ) if(length(key) != 1) stop("Argument must be a single character string or function", call.=FALSE) nama <- names(TheTable) pos <- if(is.character(key)) { match(key, nama) } else if(is.function(key)) { match(list(key), lapply(TheTable, getElement, name="Global")) } else NULL if(is.na(pos)) return(NULL) out <- TheTable[[pos]] out$GlobalName <- nama[pos] return(out) } lohboot <- function(X, fun=c("pcf", "Kest", "Lest", "pcfinhom", "Kinhom", "Linhom", "Kcross", "Lcross", "Kdot", "Ldot", "Kcross.inhom", "Lcross.inhom"), ..., block=FALSE, global=FALSE, basicboot=FALSE, Vcorrection=FALSE, confidence=0.95, nx = 4, ny = nx, nsim=200, type=7) { stopifnot(is.ppp(X)) ## validate 'fun' fun.name <- short.deparse(substitute(fun)) if(is.character(fun)) fun <- match.arg(fun) info <- spatstatLocalFunctionInfo(fun) if(is.null(info)) stop(paste("Loh's bootstrap is not supported for the function", sQuote(fun.name)), call.=FALSE) fun <- info$GlobalName localfun <- info$Local # validate confidence level stopifnot(confidence > 0.5 && confidence < 1) alpha <- 1 - confidence if(!global) { probs <- c(alpha/2, 1-alpha/2) rank <- nsim * probs[2L] } else { probs <- 1-alpha rank <- nsim * probs } if(abs(rank - round(rank)) > 0.001) warning(paste("confidence level", confidence, "corresponds to a non-integer rank", paren(rank), "so quantiles will be interpolated")) ## compute local functions f <- localfun(X, ...) theo <- f$theo ## parse edge correction info correction <- attr(f, "correction") switch(correction, none = { ckey <- clab <- "un" cadj <- "uncorrected" }, border = { ckey <- "border" clab <- "bord" cadj <- "border-corrected" }, translate = { ckey <- clab <- "trans" cadj <- "translation-corrected" }, isotropic = { ckey <- clab <- "iso" cadj <- "Ripley isotropic corrected" }) ## determine indices for Kcross etc types <- levels(marks(X)) from <- resolve.1.default(list(from=types[1]), list(...)) to <- resolve.1.default(list(to=types[2]), list(...)) fromName <- make.parseable(paste(from)) toName <- make.parseable(paste(to)) ## TEMPORARY HACK for cross/dot functions. ## Uses a possibly temporary attribute to overwrite X with only "from" points. if(info$indices > 0) { X <- attr(f, "Xfrom") } # first n columns are the local pcfs (etc) for the n points of X n <- npoints(X) y <- as.matrix(as.data.frame(f))[, 1:n] nr <- nrow(y) ## ---------- Modification by Christophe Biscio ----------------- ## (some re-coding by Adrian) if(!block) { ## Adrian's wrong code ## average local statistics ymean <- .rowMeans(y, na.rm=TRUE, nr, n) ## resample ystar <- matrix(, nrow=nr, ncol=nsim) for(i in 1:nsim) { ## resample n points with replacement ind <- sample(n, replace=TRUE) ## average their local statistics ystar[,i] <- .rowMeans(y[,ind], nr, n, na.rm=TRUE) } } else { ## Correct block bootstrap as described by Loh. W <- Window(X) GridTess <- quadrats(boundingbox(W), nx = nx, ny =ny) ## Classify points of X into grid tiles BlockIndex <- tileindex(X$x, X$y, GridTess) ## Use only 'full' blocks if(!is.rectangle(W)) { blocks <- tiles(GridTess) fullblocks <- sapply(blocks, is.subset.owin, B = W) if(sum(fullblocks)<2) stop("Not enough blocks are fully contained in the window", call.=FALSE) warning(paste("For non-rectangular windows,", "only blocks fully contained in the window are used:", paste(sum(fullblocks), "were used and", sum(!fullblocks), "were ignored.") ), call.=FALSE) ## blocks <- blocks[fullblocks] ## adjust classification of points of X indexmap <- cumsum(fullblocks) indexmap[!fullblocks] <- NA BlockIndex <- indexmap[BlockIndex] ## adjust total number of points n <- sum(!is.na(BlockIndex)) BlockFactor <- factor(BlockIndex, levels=unique(indexmap[!is.na(indexmap)])) } else BlockFactor <- factor(BlockIndex) nmarks <- length(levels(BlockFactor)) ## Average the local function values in each block ymarks <- by(t(y), BlockFactor, colSums, na.rm=TRUE, simplify=FALSE) ## Ensure empty data yield zero if(any(isempty <- sapply(ymarks, is.null))) ymarks[isempty] <- rep(list(numeric(nr)), sum(isempty)) ymarks <- as.matrix(do.call(cbind, ymarks)) * nmarks/n ## average all the marks ymean <- .rowMeans(ymarks, na.rm=TRUE, nr, nmarks) ## Average the marks in each block ystar <- matrix(, nrow=nr, ncol=nsim) for(i in 1:nsim) { ## resample nblocks blocks with replacement ind <- sample( nmarks , replace=TRUE) ## average their local function values ystar[,i] <- .rowMeans(ymarks[,ind], nr, nmarks, na.rm=TRUE) } } ## compute quantiles if(!global) { ## pointwise quantiles hilo <- apply(ystar, 1, quantile, probs=probs, na.rm=TRUE, type=type) ## Ripley's K function correction proposed by Loh if(Vcorrection && (fun=="Kest" || fun=="Kinhom")) { Vcov=sqrt(1+2*pi*n*(f$r)^2/area.owin(W)) hilo[1L,] <- ymean+(ymean-hilo[1L,]) / Vcov hilo[2L,] <- ymean+(ymean-hilo[2L,]) / Vcov hilo <- hilo[2:1,] # switch index so hilo[1,] is lower bound basicboot <- FALSE # The basic bootstrap interval is already used. Ensure that I do not modify hilo } ## So-called "basic bootstrap interval" proposed in Loh's paper; ## the intervals are asymptotically the same if(basicboot) { hilo[1L,] <- 2*ymean-hilo[1L,] hilo[2L,] <- 2*ymean-hilo[2L,] hilo <- hilo[c(2,1),] # switch index so hilo[1,] is lower bound } } else { ## quantiles of deviation ydif <- sweep(ystar, 1, ymean) ydev <- apply(abs(ydif), 2, max, na.rm=TRUE) crit <- quantile(ydev, probs=probs, na.rm=TRUE, type=type) hilo <- rbind(ymean - crit, ymean + crit) } ## ============= End Modification by Christophe Biscio =================== ## Transform to L function if required if(info$L) { theo <- sqrt(theo/pi) ymean <- sqrt(ymean/pi) hilo <- sqrt(hilo/pi) warn.once("lohbootLfun", "The calculation of confidence intervals for L functions", "in lohboot() has changed in spatstat 1.60-0 and later;", "they are now computed by transforming the confidence intervals", "for the corresponding K functions.") } ## create fv object df <- data.frame(r=f$r, theo=theo, ymean, lo=hilo[1L,], hi=hilo[2L,]) colnames(df)[3L] <- ckey CIlevel <- paste(100 * confidence, "%% confidence", sep="") desc <- c("distance argument r", "theoretical Poisson %s", paste(cadj, "estimate of %s"), paste("lower", CIlevel, "limit for %s"), paste("upper", CIlevel, "limit for %s")) switch(fun, pcf={ fname <- "g" yexp <- ylab <- quote(g(r)) }, Kest={ fname <- "K" yexp <- ylab <- quote(K(r)) }, Lest={ fname <- "L" yexp <- ylab <- quote(L(r)) }, pcfinhom={ fname <- c("g", "inhom") yexp <- ylab <- quote(g[inhom](r)) }, Kinhom={ fname <- c("K", "inhom") yexp <- ylab <- quote(K[inhom](r)) }, Linhom={ fname <- c("L", "inhom") yexp <- ylab <- quote(L[inhom](r)) }, Kcross={ fname <- c("K", paste0("list(", fromName, ",", toName, ")")) ylab <- substitute(K[fra,til](r), list(fra=fromName,til=toName)) yexp <- substitute(K[list(fra,til)](r), list(fra=fromName,til=toName)) }, Lcross={ fname <- c("L", paste0("list(", fromName, ",", toName, ")")) ylab <- substitute(L[fra,til](r), list(fra=fromName,til=toName)) yexp <- substitute(L[list(fra,til)](r), list(fra=fromName,til=toName)) }, Kdot={ fname <- c("K", paste0(fromName, "~ symbol(\"\\267\")")) ylab <- substitute(K[fra ~ dot](r), list(fra=fromName)) yexp <- substitute(K[fra ~ symbol("\267")](r), list(fra=fromName)) }, Ldot={ fname <- c("L", paste0(fromName, "~ symbol(\"\\267\")")) ylab <- substitute(L[fra ~ dot](r), list(fra=fromName)) yexp <- substitute(L[fra ~ symbol("\267")](r), list(fra=fromName)) }, Kcross.inhom={ fname <- c("K", paste0("list(inhom,", fromName, ",", toName, ")")) ylab <- substitute(K[inhom,fra,til](r), list(fra=fromName,til=toName)) yexp <- substitute(K[list(inhom,fra,til)](r), list(fra=fromName,til=toName)) }, Lcross.inhom={ fname <- c("L", paste0("list(inhom,", fromName, ",", toName, ")")) ylab <- substitute(L[inhom,fra,til](r), list(fra=fromName,til=toName)) yexp <- substitute(L[list(inhom,fra,til)](r), list(fra=fromName,til=toName)) }) labl <- c("r", makefvlabel(NULL, NULL, fname, "pois"), makefvlabel(NULL, "hat", fname, clab), makefvlabel(NULL, "hat", fname, "loCI"), makefvlabel(NULL, "hat", fname, "hiCI")) g <- fv(df, "r", ylab=ylab, ckey, , c(0, max(f$r)), labl, desc, fname=fname, yexp=yexp) formula(g) <- . ~ r fvnames(g, ".") <- c(ckey, "theo", "hi", "lo") fvnames(g, ".s") <- c("hi", "lo") unitname(g) <- unitname(X) g } spatstat/R/linalg.R0000644000176200001440000001532613333543255013724 0ustar liggesusers# # linalg.R # # Linear Algebra # # $Revision: 1.24 $ $Date: 2017/12/06 07:37:02 $ # sumouter <- function(x, w=NULL, y=x) { #' compute matrix sum_i (w[i] * outer(x[i,], y[i,])) stopifnot(is.matrix(x)) weighted <- !is.null(w) symmetric <- missing(y) || identical(x,y) if(weighted) { if(length(dim(w)) > 1) stop("w should be a vector") w <- as.numeric(w) check.nvector(w, nrow(x), things="rows of x") } if(!symmetric) { stopifnot(is.matrix(y)) stopifnot(nrow(x) == nrow(y)) } #' transpose (compute outer squares of columns) tx <- t(x) if(!symmetric) ty <- t(y) #' check for NA etc ok <- apply(is.finite(tx), 2, all) if(!symmetric) ok <- ok & apply(is.finite(ty), 2, all) if(weighted) ok <- ok & is.finite(w) #' remove NA etc if(!all(ok)) { tx <- tx[ , ok, drop=FALSE] if(!symmetric) ty <- ty[ , ok, drop=FALSE] if(weighted) w <- w[ok] } #' call C code if(symmetric) { n <- ncol(tx) p <- nrow(tx) if(is.null(w)) { zz <- .C("Csumouter", x=as.double(tx), n=as.integer(n), p=as.integer(p), y=as.double(numeric(p * p)), PACKAGE = "spatstat") } else { zz <- .C("Cwsumouter", x=as.double(tx), n=as.integer(n), p=as.integer(p), w=as.double(w), y=as.double(numeric(p * p)), PACKAGE = "spatstat") } out <- matrix(zz$y, p, p) if(!is.null(nama <- colnames(x))) dimnames(out) <- list(nama, nama) } else { n <- ncol(tx) px <- nrow(tx) py <- nrow(ty) if(is.null(w)) { zz <- .C("Csum2outer", x=as.double(tx), y=as.double(ty), n=as.integer(n), px=as.integer(px), py=as.integer(py), z=as.double(numeric(px * py)), PACKAGE = "spatstat") } else { zz <- .C("Cwsum2outer", x=as.double(tx), y=as.double(ty), n=as.integer(n), px=as.integer(px), py=as.integer(py), w=as.double(w), z=as.double(numeric(px * py))) } out <- matrix(zz$z, px, py) namx <- colnames(x) namy <- colnames(y) if(!is.null(namx) || !is.null(namy)) dimnames(out) <- list(namx, namy) } return(out) } quadform <- function(x, v) { #' compute vector of values y[i] = x[i, ] %*% v %*% t(x[i,]) stopifnot(is.matrix(x)) p <- ncol(x) n <- nrow(x) nama <- rownames(x) # transpose (evaluate quadratic form for each column) tx <- t(x) ok <- apply(is.finite(tx), 2, all) allok <- all(ok) if(!allok) { tx <- tx[ , ok, drop=FALSE] n <- ncol(tx) } if(missing(v)) { v <- diag(rep.int(1, p)) } else { stopifnot(is.matrix(v)) if(nrow(v) != ncol(v)) stop("v should be a square matrix") stopifnot(ncol(x) == nrow(v)) } z <- .C("Cquadform", x=as.double(tx), n=as.integer(n), p=as.integer(p), v=as.double(v), y=as.double(numeric(n)), PACKAGE = "spatstat") result <- z$y names(result) <- nama[ok] if(allok) return(result) fullresult <- rep.int(NA_real_, length(ok)) fullresult[ok] <- result names(fullresult) <- nama return(fullresult) } bilinearform <- function(x, v, y) { #' compute vector of values z[i] = x[i, ] %*% v %*% t(y[i,]) stopifnot(is.matrix(x)) stopifnot(is.matrix(y)) stopifnot(identical(dim(x), dim(y))) p <- ncol(x) n <- nrow(x) nama <- rownames(x) # transpose (evaluate quadratic form for each column) tx <- t(x) ty <- t(y) ok <- matcolall(is.finite(tx)) & matcolall(is.finite(ty)) allok <- all(ok) if(!allok) { tx <- tx[ , ok, drop=FALSE] ty <- ty[ , ok, drop=FALSE] n <- ncol(tx) } if(missing(v)) { v <- diag(rep.int(1, p)) } else { stopifnot(is.matrix(v)) if(nrow(v) != ncol(v)) stop("v should be a square matrix") stopifnot(ncol(x) == nrow(v)) } z <- .C("Cbiform", x=as.double(tx), y=as.double(ty), n=as.integer(n), p=as.integer(p), v=as.double(v), z=as.double(numeric(n)), PACKAGE = "spatstat") result <- z$z names(result) <- nama[ok] if(allok) return(result) fullresult <- rep.int(NA_real_, length(ok)) fullresult[ok] <- result names(fullresult) <- nama return(fullresult) } sumsymouter <- function(x, w=NULL) { ## x is a 3D array ## w is a matrix ## Computes the sum of outer(x[,i,j], x[,j,i]) * w[i,j] over all pairs i != j if(inherits(x, c("sparseSlab", "sparse3Darray")) && (is.null(w) || inherits(w, "sparseMatrix"))) return(sumsymouterSparse(x, w)) x <- as.array(x) stopifnot(length(dim(x)) == 3) if(dim(x)[2L] != dim(x)[3L]) stop("The second and third dimensions of x should be equal") if(!is.null(w)) { w <- as.matrix(w) if(!all(dim(w) == dim(x)[-1L])) stop("Dimensions of w should match the second and third dimensions of x") } p <- dim(x)[1L] n <- dim(x)[2L] if(is.null(w)) { zz <- .C("Csumsymouter", x = as.double(x), p = as.integer(p), n = as.integer(n), y = as.double(numeric(p * p)), PACKAGE = "spatstat") } else { zz <- .C("Cwsumsymouter", x = as.double(x), w = as.double(w), p = as.integer(p), n = as.integer(n), y = as.double(numeric(p * p)), PACKAGE = "spatstat") } matrix(zz$y, p, p) } checksolve <- function(M, action, descrip, target="") { Mname <- short.deparse(substitute(M)) Minv <- try(solve(M), silent=(action=="silent")) if(!inherits(Minv, "try-error")) return(Minv) if(missing(descrip)) descrip <- paste("the matrix", sQuote(Mname)) whinge <- paste0("Cannot compute ", target, ": ", descrip, " is singular") switch(action, fatal=stop(whinge, call.=FALSE), warn= warning(whinge, call.=FALSE), silent={}) return(NULL) } check.mat.mul <- function(A, B, Acols="columns of A", Brows="rows of B", fatal=TRUE) { # check whether A %*% B would be valid: if not, print a useful message if(!is.matrix(A)) A <- matrix(A, nrow=1, dimnames=list(NULL, names(A))) if(!is.matrix(B)) B <- matrix(B, ncol=1, dimnames=list(names(B), NULL)) nA <- ncol(A) nB <- nrow(B) if(nA == nB) return(TRUE) if(!fatal) return(FALSE) if(any(nzchar(Anames <- colnames(A)))) message(paste0("Names of ", Acols, ": ", commasep(Anames))) if(any(nzchar(Bnames <- rownames(B)))) message(paste0("Names of ", Brows, ": ", commasep(Bnames))) stop(paste("Internal error: number of", Acols, paren(nA), "does not match number of", Brows, paren(nB)), call.=FALSE) } spatstat/R/setcov.R0000644000176200001440000000640113333543255013753 0ustar liggesusers# # # setcov.R # # $Revision: 1.16 $ $Date: 2018/08/07 11:44:46 $ # # Compute the set covariance function of a window # or the (noncentred) spatial covariance function of an image # setcov <- function(W, V=W, ...) { W <- as.owin(W) # pixel approximation mW <- as.mask(W, ...) Z <- as.im(mW, na.replace=0) if(missing(V)) return(imcov(Z)) # cross-covariance V <- as.owin(V) mV <- as.mask(V, ...) Z2 <- as.im(mV, na.replace=0) imcov(Z, Z2) } imcov <- function(X, Y=X) { if(missing(Y)) Y <- NULL convolve.im(X, Y, reflectX = FALSE, reflectY=TRUE) } convolve.im <- function(X, Y=X, ..., reflectX=FALSE, reflectY=FALSE) { stopifnot(is.im(X)) have.Y <- !missing(Y) && !is.null(Y) crosscov <- have.Y || reflectX || reflectY trap.extra.arguments(..., .Context="In convolve.im") #' Check whether Fastest Fourier Transform in the West is available west <- fftwAvailable() #' if(have.Y) { # cross-covariance stopifnot(is.im(Y)) Xbox <- as.rectangle(X) Ybox <- as.rectangle(Y) # first shift images to common midpoint, to reduce storage Xmid <- centroid.owin(Xbox) Ymid <- centroid.owin(Ybox) svec <- as.numeric(Xmid) - as.numeric(Ymid) Y <- shift(Y, svec) # ensure images are compatible XY <- harmonise.im(X=X, Y=Y) X <- XY$X Y <- XY$Y } else { # Y is missing or NULL Y <- X Xbox <- Ybox <- as.rectangle(X) } M <- X$v M[is.na(M)] <- 0 xstep <- X$xstep ystep <- X$ystep # pad with zeroes nr <- nrow(M) nc <- ncol(M) Mpad <- matrix(0, ncol=2*nc, nrow=2*nr) Mpad[1:nr, 1:nc] <- M lengthMpad <- 4 * nc * nr fM <- fft2D(Mpad, west=west) if(!crosscov) { # compute convolution square G <- fft2D(fM^2, inverse=TRUE, west=west)/lengthMpad } else { # compute set cross-covariance or convolution by FFT N <- Y$v N[is.na(N)] <- 0 Npad <- matrix(0, ncol=2*nc, nrow=2*nr) Npad[1:nr, 1:nc] <- N fN <- fft2D(Npad, west=west) if(reflectY) fN <- Conj(fN) if(reflectX) fM <- Conj(fM) G <- fft2D(fM * fN, inverse=TRUE, west=west)/lengthMpad } # cat(paste("maximum imaginary part=", max(Im(G)), "\n")) G <- Mod(G) * xstep * ystep if(reflectX != reflectY) { # Currently G[i,j] corresponds to a vector shift of # dy = (i-1) mod nr, dx = (j-1) mod nc. # Rearrange this periodic function so that # the origin of translations (0,0) is at matrix position (nr,nc) # NB this introduces an extra row and column G <- G[ ((-nr):nr) %% (2 * nr) + 1, (-nc):nc %% (2*nc) + 1] } # Determine spatial domain of full raster image XB <- as.rectangle(X) YB <- as.rectangle(Y) # undo shift if(have.Y) YB <- shift(YB, -svec) # reflect if(reflectX) XB <- reflect(XB) if(reflectY) YB <- reflect(YB) # Minkowski sum of covering boxes xran <- XB$xrange + YB$xrange yran <- XB$yrange + YB$yrange # Declare spatial domain out <- im(G, xrange = xran, yrange=yran) if(crosscov) { # restrict to actual spatial domain of function if(reflectX) Xbox <- reflect(Xbox) if(reflectY) Ybox <- reflect(Ybox) # Minkowski sum xran <- Xbox$xrange + Ybox$xrange yran <- Xbox$yrange + Ybox$yrange XYbox <- owin(xran, yran) out <- out[XYbox, rescue=TRUE] } unitname(out) <- unitname(X) return(out) } spatstat/R/Jinhom.R0000644000176200001440000003203113434220165013664 0ustar liggesusers# # Jinhom.R # # $Revision: 1.11 $ $Date: 2017/06/05 10:31:58 $ # Ginhom <- function(X, lambda=NULL, lmin=NULL, ..., sigma=NULL, varcov=NULL, r=NULL, breaks=NULL, ratio=FALSE, update = TRUE, warn.bias=TRUE, savelambda=FALSE) { stopifnot(is.ppp(X)) npts <- npoints(X) W <- as.owin(X) areaW <- area(W) miss.update <- missing(update) # determine 'r' values rmaxdefault <- rmax.rule("G", W, npts/areaW) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) if(!breaks$even) stop("r values must be evenly spaced") r <- breaks$r rmax <- breaks$max nr <- length(r) dangerous <- "lambda" danger <- TRUE # Intensity values at data points if(is.null(lambda)) { # No intensity data provided danger <- FALSE # Estimate density at points by leave-one-out kernel smoothing lamX <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambdaX <- as.numeric(lamX) # negative or zero values are due to numerical error lambdaX <- pmax.int(lambdaX, .Machine$double.eps) } else { # lambda values provided if(is.im(lambda)) lambdaX <- safelookup(lambda, X) else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) { model <- lambda if(!update) { ## just use intensity of fitted model lambdaX <- predict(lambda, locations=X, type="trend") } else { ## re-fit model to data X model <- if(is.ppm(model)) update(model, Q=X) else update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE) danger <- FALSE if(miss.update) warn.once(key="Ginhom.update", "The behaviour of Ginhom when lambda is a ppm object", "has changed (in spatstat 1.37-0 and later).", "See help(Ginhom)") } } else if(is.function(lambda)) lambdaX <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) { lambdaX <- lambda check.nvector(lambdaX, npts) } else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, or a function")) # negative values are illegal minX <- min(lambdaX) if(minX < 0) stop("Negative values of lambda were encountered at data points") if(minX == 0) stop("Zero values of lambda were encountered at data points") } # Minimum intensity if(!is.null(lmin)) { check.1.real(lmin) stopifnot(lmin >= 0) if(lmin >= min(lambdaX)) stop("lmin must be smaller than all values of lambda") } else { # Compute minimum value over window if(is.null(lambda)) { # extract previously selected smoothing bandwidth sigma <- attr(lamX, "sigma") varcov <- attr(lamX, "varcov") # estimate density on a pixel grid and minimise lam <- density(X, ..., sigma=sigma, varcov=varcov, at="pixels") lmin <- min(lam) # negative or zero values may occur due to numerical error lmin <- max(lmin, .Machine$double.eps) } else { if(is.im(lambda)) lmin <- min(lambda) else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) lmin <- min(predict(lambda)) else if(is.function(lambda)) lmin <- min(as.im(lambda, W)) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) lmin <- min(lambdaX) } if(lmin < 0) stop("Negative values of intensity encountered") # ensure lmin < lambdaX lmin <- min(lmin, 0.95 * min(lambdaX)) } ## Compute intensity factor lratio <- lmin/lambdaX vv <- 1 - lratio if(warn.bias) { ra <- range(lratio) if(ra[1] < 1e-6 || ra[2] > 1 - 1e-6) warning(paste("Possible bias: range of values of lmin/lambdaX is", prange(signif(ra, 5))), call.=FALSE) } ## sort data points in order of increasing x coordinate xx <- X$x yy <- X$y oX <- fave.order(xx) xord <- xx[oX] yord <- yy[oX] vord <- vv[oX] # compute local cumulative products z <- .C("locprod", n = as.integer(npts), x = as.double(xord), y = as.double(yord), v = as.double(vord), nr = as.integer(nr), rmax = as.double(rmax), ans = as.double(numeric(npts * nr)), PACKAGE = "spatstat") ans <- matrix(z$ans, nrow=nr, ncol=npts) # revert to original ordering loccumprod <- matrix(, nrow=nr, ncol=npts) loccumprod[, oX] <- ans # border correction bX <- bdist.points(X) ok <- outer(r, bX, "<=") denom <- .rowSums(ok, nr, npts) loccumprod[!ok] <- 0 numer <- .rowSums(loccumprod, nr, npts) # pack up Gdf <- data.frame(r=r, theo = 1 - exp(- lmin * pi * r^2)) desc <- c("distance argument r", "theoretical Poisson %s") theo.denom <- rep.int(npts, nr) G <- ratfv(Gdf, NULL, theo.denom, "r", quote(G[inhom](r)), "theo", NULL, c(0,rmax), c("r", "{%s[%s]^{pois}}(r)"), desc, fname=c("G", "inhom"), ratio=ratio) G <- bind.ratfv(G, data.frame(bord=denom-numer), denom, "{hat(%s)[%s]^{bord}}(r)", "border estimate of %s", "bord", ratio=ratio) # formula(G) <- . ~ r fvnames(G, ".") <- c("bord", "theo") unitname(G) <- unitname(X) if(ratio) G <- conform.ratfv(G) if(danger) attr(G, "dangerous") <- dangerous if(savelambda) { attr(G, "lambda") <- lambdaX attr(G, "lmin") <- lmin } return(G) } Finhom <- function(X, lambda=NULL, lmin=NULL, ..., sigma=NULL, varcov=NULL, r=NULL, breaks=NULL, ratio=FALSE, update = TRUE, warn.bias=TRUE, savelambda=FALSE) { stopifnot(is.ppp(X)) npts <- npoints(X) W <- as.owin(X) areaW <- area(W) miss.update <- missing(update) # determine 'r' values rmaxdefault <- rmax.rule("F", W, npts/areaW) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) if(!breaks$even) stop("r values must be evenly spaced") r <- breaks$r rmax <- breaks$max nr <- length(r) dangerous <- "lambda" danger <- TRUE # Intensity values at data points if(is.null(lambda)) { # No intensity data provided danger <- FALSE # Estimate density at points by leave-one-out kernel smoothing lamX <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambdaX <- as.numeric(lamX) # negative or zero values are due to numerical error lambdaX <- pmax.int(lambdaX, .Machine$double.eps) } else { # lambda values provided if(is.im(lambda)) lambdaX <- safelookup(lambda, X) else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) { model <- lambda if(!update) { ## just use intensity of fitted model lambdaX <- predict(lambda, locations=X, type="trend") } else { ## re-fit model to data X model <- if(is.ppm(model)) update(model, Q=X) else update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE) danger <- FALSE if(miss.update) warn.once(key="Finhom.update", "The behaviour of Finhom when lambda is a ppm object", "has changed (in spatstat 1.37-0 and later).", "See help(Finhom)") } } else if(is.function(lambda)) lambdaX <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) { lambdaX <- lambda check.nvector(lambdaX, npts) } else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, or a function")) # negative values are illegal minX <- min(lambdaX) if(minX < 0) stop("Negative values of lambda were encountered at data points") if(minX == 0) stop("Zero values of lambda were encountered at data points") } # Minimum intensity if(!is.null(lmin)) { check.1.real(lmin) stopifnot(lmin >= 0) if(lmin >= min(lambdaX)) stop("lmin must be smaller than all values of lambda") } else { # Compute minimum value over window if(is.null(lambda)) { # extract previously selected smoothing bandwidth sigma <- attr(lamX, "sigma") varcov <- attr(lamX, "varcov") # estimate density on a pixel grid and minimise lam <- density(X, ..., sigma=sigma, varcov=varcov, at="pixels") lmin <- min(lam) # negative or zero values may occur due to numerical error lmin <- max(lmin, .Machine$double.eps) } else { if(is.im(lambda)) lmin <- min(lambda) else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) lmin <- min(predict(lambda)) else if(is.function(lambda)) lmin <- min(as.im(lambda, W)) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) lmin <- min(lambdaX) } if(lmin < 0) stop("Negative values of intensity encountered") # ensure lmin < lambdaX lmin <- min(lmin, 0.95 * min(lambdaX)) } # Compute intensity factor lratio <- lmin/lambdaX vv <- 1 - lratio if(warn.bias) { ra <- range(lratio) if(ra[1] < 1e-6 || ra[2] > 1 - 1e-6) warning(paste("Possible bias: range of values of lmin/lambdaX is", prange(signif(ra, 5))), call.=FALSE) } ## sort data points in order of increasing x coordinate xx <- X$x yy <- X$y oX <- fave.order(xx) xord <- xx[oX] yord <- yy[oX] vord <- vv[oX] # determine pixel grid and compute distance to boundary M <- do.call.matched(as.mask, append(list(w=W), list(...))) bM <- bdist.pixels(M, style="matrix") bM <- as.vector(bM) # x, y coordinates of pixels are already sorted by increasing x xM <- as.vector(rasterx.mask(M)) yM <- as.vector(rastery.mask(M)) nM <- length(xM) # compute local cumulative products z <- .C("locxprod", ntest = as.integer(nM), xtest = as.double(xM), ytest = as.double(yM), ndata = as.integer(npts), xdata = as.double(xord), ydata = as.double(yord), vdata = as.double(vord), nr = as.integer(nr), rmax = as.double(rmax), ans = as.double(numeric(nM * nr)), PACKAGE = "spatstat") loccumprod <- matrix(z$ans, nrow=nr, ncol=nM) # border correction ok <- outer(r, bM, "<=") denom <- .rowSums(ok, nr, nM) loccumprod[!ok] <- 0 numer <- .rowSums(loccumprod, nr, nM) # pack up Fdf <- data.frame(r=r, theo = 1 - exp(- lmin * pi * r^2)) desc <- c("distance argument r", "theoretical Poisson %s") theo.denom <- rep.int(npts, nr) FX <- ratfv(Fdf, NULL, theo.denom, "r", quote(F[inhom](r)), "theo", NULL, c(0,rmax), c("r","{%s[%s]^{pois}}(r)"), desc, fname=c("F", "inhom"), ratio=ratio) FX <- bind.ratfv(FX, data.frame(bord=denom-numer), denom, "{hat(%s)[%s]^{bord}}(r)", "border estimate of %s", "bord", ratio=ratio) # formula(FX) <- . ~ r fvnames(FX, ".") <- c("bord", "theo") unitname(FX) <- unitname(X) if(ratio) FX <- conform.ratfv(FX) if(danger) attr(FX, "dangerous") <- dangerous if(savelambda) { attr(FX, "lambda") <- lambdaX attr(FX, "lmin") <- lmin } return(FX) } Jinhom <- function(X, lambda=NULL, lmin=NULL, ..., sigma=NULL, varcov=NULL, r=NULL, breaks=NULL, update = TRUE, warn.bias=TRUE, savelambda=FALSE) { if(missing(update) & (is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda))) warn.once(key="Jinhom.update", "The behaviour of Jinhom when lambda is a ppm object", "has changed (in spatstat 1.37-0 and later).", "See help(Jinhom)") ## compute inhomogeneous G (including determination of r and lmin) GX <- Ginhom(X, lambda=lambda, lmin=lmin, ..., sigma=sigma, varcov=varcov, r=r, breaks=breaks, ratio=FALSE, update=update, warn.bias=warn.bias, savelambda=TRUE) ## extract auxiliary values to be used for Finhom r <- GX$r lmin <- attr(GX, "lmin") lambdaX <- attr(GX, "lambda") ## compute inhomogeneous J using previously-determined values FX <- Finhom(X, lambda=lambdaX, lmin=lmin, ..., sigma=sigma, varcov=varcov, r=r, ratio=FALSE, update=update, warn.bias=FALSE, savelambda=FALSE) ## evaluate inhomogeneous J function JX <- eval.fv((1-GX)/(1-FX)) # relabel the fv object JX <- rebadge.fv(JX, quote(J[inhom](r)), c("J","inhom"), names(JX), new.labl=attr(GX, "labl")) # tack on extra info attr(JX, "G") <- GX attr(JX, "F") <- FX attr(JX, "dangerous") <- attr(GX, "dangerous") if(savelambda) { attr(JX, "lmin") <- lmin attr(JX, "lambda") <- lambdaX } return(JX) } spatstat/R/subset.R0000644000176200001440000000646613354565756014005 0ustar liggesusers## ## subset.R ## ## Methods for 'subset' ## ## $Revision: 1.6 $ $Date: 2018/10/02 03:50:44 $ subset.ppp <- function(x, subset, select, drop=FALSE, ...) { stopifnot(is.ppp(x)) w <- as.owin(x) y <- as.data.frame(x) r <- if (missing(subset)) { rep_len(TRUE, nrow(y)) } else { e <- substitute(subset) r <- eval(e, y, parent.frame()) if(!is.logical(r)) r <- ppsubset(x, r, "subset", fatal=TRUE) r & !is.na(r) } vars <- if (missing(select)) { TRUE } else { ## create an environment in which column names are mapped to their positions nl <- as.list(seq_along(y)) names(nl) <- names(y) if(length(nl) > 3) { ## multiple columns of marks: add the name 'marks' nl <- append(nl, list(marks=3:length(nl))) } eval(substitute(select), nl, parent.frame()) } ## ensure columns include coordinates nama <- names(y) names(nama) <- nama vars <- union(c("x", "y"), nama[vars]) ## take subset z <- y[r, vars, drop = FALSE] ## reinstate as point pattern out <- as.ppp(z, W=w, check=FALSE) if(drop) out <- out[drop=TRUE] return(out) } subset.pp3 <- subset.lpp <- subset.ppx <- function(x, subset, select, drop=FALSE, ...) { y <- as.data.frame(x) r <- if (missing(subset)) rep_len(TRUE, nrow(y)) else { e <- substitute(subset) r <- eval(e, y, parent.frame()) if(!is.logical(r)) r <- ppsubset(x, r, "subset", fatal=TRUE) r & !is.na(r) } vars <- if (missing(select)) TRUE else { ## create an environment in which column names are mapped to their positions nl <- as.list(seq_along(y)) names(nl) <- names(y) if(!("marks" %in% names(y)) && any(ismark <- (x$ctype == "mark"))) { ## add the symbol 'marks' nl <- append(nl, list(marks=which(ismark))) } eval(substitute(select), nl, parent.frame()) } ## ensure columns include coordinates nama <- names(y) names(nama) <- nama vars <- union(names(coords(x)), nama[vars]) ## take subset z <- y[r, vars, drop = FALSE] ## reinstate as point pattern ctype <- as.character(x$ctype)[match(vars, nama)] out <- ppx(z, domain=x$domain, coord.type=ctype) ## drop unused factor levels if(drop) out <- out[drop=TRUE] ## reinstate class class(out) <- class(x) return(out) } subset.psp <- function(x, subset, select, drop=FALSE, ...) { stopifnot(is.psp(x)) w <- Window(x) y <- as.data.frame(x) r <- if (missing(subset)) { rep_len(TRUE, nrow(y)) } else { e <- substitute(subset) r <- eval(e, y, parent.frame()) if(!is.logical(r)) stop("Argument 'subset' should be a logical vector", call.=FALSE) r & !is.na(r) } vars <- if (missing(select)) { TRUE } else { ## create an environment in which column names are mapped to their positions nl <- as.list(seq_along(y)) names(nl) <- names(y) if(length(nl) > 3) { ## multiple columns of marks: add the name 'marks' nl <- append(nl, list(marks=3:length(nl))) } eval(substitute(select), nl, parent.frame()) } ## ensure columns include coordinates nama <- names(y) names(nama) <- nama vars <- union(c("x0", "y0", "x1", "y1"), nama[vars]) ## take subset z <- y[r, vars, drop = FALSE] ## reinstate as line segment pattern out <- as.psp(z, window=w, check=FALSE) if(drop) out <- out[drop=TRUE] return(out) } spatstat/R/bc.R0000644000176200001440000000405313333543254013034 0ustar liggesusers#' bc.R #' #' Bias correction techniques #' #' $Revision: 1.2 $ $Date: 2016/09/15 02:21:15 $ bc <- function(fit, ...) { UseMethod("bc") } bc.ppm <- function(fit, ..., nfine=256) { stopifnot(is.ppm(fit)) # theta0 <- coef(fit) nc <- length(theta0) # X <- data.ppm(fit) Z <- is.data(quad.ppm(fit)) # evaluate sufficient statistic at data points sufX <- model.matrix(fit)[Z, ] if(ncol(sufX) != nc) stop("Internal error: model.matrix does not match coef(model)") # predict on fine grid finemask <- as.mask(as.owin(fit), dimyx=nfine) lamF <- predict(fit, type="cif", locations=finemask) sufF <- model.images(fit, W=finemask) if(length(sufF) != nc) stop("Internal error: model.images does not match coef(model)") # edge correction if(fit$correction == "border" && ((rbord <- fit$rbord) > 0)) { b <- bdist.pixels(finemask) bX <- bdist.points(X) excludeU <- eval.im(b < rbord) retainX <- (bX >= rbord) sufX <- sufX[retainX, , drop=FALSE] } else { excludeU <- FALSE } # compute fine approximation to score scoreX <- colSums(sufX) scoreW <- numeric(nc) for(k in seq_len(nc)) { S <- sufF[[k]] # infinite values of S may occur and correspond to zero cif Slam <- eval.im(ifelse(is.infinite(S) | excludeU, 0, S * lamF)) scoreW[k] <- integral.im(Slam) } score <- scoreX - scoreW # Newton-Raphson Iinv <- vcov(fit, hessian=TRUE) theta <- theta0 + Iinv %*% score theta <- theta[ , 1L, drop=TRUE] # # return(list(theta0=theta0, theta=theta)) return(theta) } # Richardson extrapolation (generic) rex <- function(x, r=2, k=1, recursive=FALSE) { # x should be a matrix # whose columns are successive estimates of a parameter vector # obtained using "grid step sizes" t, t/r, t/r^2, ... # Estimate from step size t is assumed to converge at rate t^k if(!is.matrix(x)) x <- matrix(x, nrow=1) if(ncol(x) <= 1) return(x) rk <- r^k y <- (rk * x[, -1L, drop=FALSE] - x[, -ncol(x), drop=FALSE])/(rk - 1) if(recursive) y <- rex(y, r=r, k=k+1, recursive=TRUE) return(y) } spatstat/R/funxy.R0000644000176200001440000001145313417031473013621 0ustar liggesusers# # funxy.R # # Class of functions of x,y location with a spatial domain # # $Revision: 1.19 $ $Date: 2018/02/26 01:41:27 $ # spatstat.xy.coords <- function(x,y) { if(missing(y) || is.null(y)) { xy <- if(is.ppp(x) || is.lpp(x) || is.quad(x)) coords(x) else if(checkfields(x, c("x", "y"))) x else stop("Argument y is missing", call.=FALSE) x <- xy$x y <- xy$y } xy.coords(x,y)[c("x","y")] } funxy <- function(f, W=NULL) { stopifnot(is.function(f)) stopifnot(is.owin(W)) if(!identical(names(formals(f))[1:2], c("x", "y"))) stop("The first two arguments of f should be named x and y", call.=FALSE) if(is.primitive(f)) stop("Not implemented for primitive functions", call.=FALSE) ## copy 'f' including formals, environment, attributes h <- f ## make new function body: ## paste body of 'f' into last line of 'spatstat.xy.coords' b <- body(spatstat.xy.coords) b[[length(b)]] <- body(f) ## transplant the body body(h) <- b ## reinstate attributes attributes(h) <- attributes(f) ## stamp it class(h) <- c("funxy", class(h)) attr(h, "W") <- W attr(h, "f") <- f return(h) } print.funxy <- function(x, ...) { nama <- names(formals(x)) splat(paste0("function", paren(paste(nama,collapse=","))), "of class", sQuote("funxy")) print(as.owin(x)) splat("\nOriginal function definition:") print(attr(x, "f")) } summary.funxy <- function(object, ...) { w <- Window(object) z <- list(argues = names(formals(object)), fundef = attr(object, "f"), values = summary(as.im(object, ...)), wintype = w$type, frame = Frame(w), units = unitname(w)) class(z) <- "summary.funxy" return(z) } print.summary.funxy <- function(x, ...) { sigdig <- getOption('digits') splat(paste0("function", paren(paste(x$argues,collapse=","))), "of class", sQuote("funxy")) windesc <- switch(x$wintype, rectangle="the rectangle", polygonal="a polygonal window inside the frame", mask="a binary mask in the rectangle") unitinfo <- summary(x$units) splat("defined in", windesc, prange(signif(x$frame$xrange, sigdig)), "x", prange(signif(x$frame$yrange, sigdig)), unitinfo$plural, unitinfo$explain ) splat("\nOriginal function definition:") print(x$fundef) v <- x$values splat("\nFunction values are", v$type) switch(v$type, integer=, real={ splat("\trange =", prange(signif(v$range, sigdig))) splat("\tintegral =", signif(v$integral, sigdig)) splat("\tmean =", signif(v$mean, sigdig)) }, factor={ print(v$table) }, complex={ splat("\trange: Real", prange(signif(v$Re$range, sigdig)), "Imaginary", prange(signif(v$Im$range, sigdig))) # splat("\tintegral =", signif(v$integral, sigdig)) splat("\tmean =", signif(v$mean, sigdig)) }, { print(v$summary) }) invisible(NULL) } as.owin.funxy <- function(W, ..., fatal=TRUE) { W <- attr(W, "W") as.owin(W, ..., fatal=fatal) } domain.funxy <- Window.funxy <- function(X, ...) { as.owin(X) } # Note that 'distfun' (and other classes inheriting from funxy) # has a method for as.owin that takes precedence over as.owin.funxy # and this will affect the behaviour of the following plot methods # because 'distfun' does not have its own plot method. plot.funxy <- function(x, ...) { xname <- short.deparse(substitute(x)) W <- as.owin(x) do.call(do.as.im, resolve.defaults(list(x, action="plot"), list(...), list(main=xname, W=W))) invisible(NULL) } contour.funxy <- function(x, ...) { xname <- deparse(substitute(x)) W <- as.owin(x) do.call(do.as.im, resolve.defaults(list(x, action="contour"), list(...), list(main=xname, W=W))) invisible(NULL) } persp.funxy <- function(x, ...) { xname <- deparse(substitute(x)) zlab <- substitute(expression(f(x,y)), list(f=as.name(xname))) W <- as.rectangle(as.owin(x)) do.call(do.as.im, resolve.defaults(list(x, action="persp"), list(...), list(main=xname, W=W, zlab=zlab))) invisible(NULL) } hist.funxy <- function(x, ..., xname) { if(missing(xname) || is.null(xname)) xname <- short.deparse(substitute(x)) a <- do.call.matched(as.im, list(X=x, ...), c("X", "W", "dimyx", "eps", "xy", "na.replace", "strict"), sieve=TRUE) Z <- a$result do.call(hist.im, append(list(x=Z, xname=xname), a$otherargs)) } spatstat/R/leverage.R0000644000176200001440000012436713615771735014267 0ustar liggesusers# # leverage.R # # leverage and influence # # $Revision: 1.117 $ $Date: 2020/02/03 10:06:52 $ # leverage <- function(model, ...) { UseMethod("leverage") } leverage.ppm <- function(model, ..., drop=FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) { fitname <- short.deparse(substitute(model)) a <- ppmInfluence(model, what="leverage", drop=drop, iScore=iScore, iHessian=iHessian, iArgs=iArgs, ..., fitname=fitname) return(a$leverage) } influence.ppm <- function(model, ..., drop=FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) { fitname <- short.deparse(substitute(model)) a <- ppmInfluence(model, what="influence", drop=drop, iScore=iScore, iHessian=iHessian, iArgs=iArgs, ..., fitname=fitname) return(a$influence) } dfbetas.ppm <- function(model, ..., drop=FALSE, iScore=NULL, iHessian=NULL, iArgs=NULL) { fitname <- short.deparse(substitute(model)) a <- ppmInfluence(model, what="dfbetas", drop=drop, iScore=iScore, iHessian=iHessian, iArgs=iArgs, ..., fitname=fitname) return(a$dfbetas) } ppmInfluence <- function(fit, what=c("leverage", "influence", "dfbetas"), ..., iScore=NULL, iHessian=NULL, iArgs=NULL, drop=FALSE, fitname=NULL) { stuff <- ppmInfluenceEngine(fit, what=what, ..., iScore=iScore, iHessian=iHessian, iArgs=iArgs, drop=drop, fitname=fitname) fnam <- c("fitname", "fit.is.poisson") result <- list() if("lev" %in% names(stuff)) { lev <- stuff[c(fnam, "lev")] class(lev) <- "leverage.ppm" result$leverage <- lev } if("infl" %in% names(stuff)) { infl <- stuff[c(fnam, "infl")] class(infl) <- "influence.ppm" result$influence <- infl } if(!is.null(dfb <- stuff$dfbetas)) { attr(dfb, "info") <- stuff[fnam] result$dfbetas <- dfb } other <- setdiff(names(stuff), c("lev", "infl", "dfbetas")) result[other] <- stuff[other] class(result) <- "ppmInfluence" return(result) } leverage.ppmInfluence <- function(model, ...) { model$leverage } influence.ppmInfluence <- function(model, ...) { model$influence } dfbetas.ppmInfluence <- function(model, ...) { model$dfbetas } avenndist <- function(X) mean(nndist(unique(X))) ## ............... main workhorse .................................... ppmInfluenceEngine <- function(fit, what=c("leverage", "influence", "dfbetas", "score", "derivatives", "increments", "all"), ..., iScore=NULL, iHessian=NULL, iArgs=NULL, drop=FALSE, method=c("C", "interpreted"), fine=FALSE, precomputed=list(), sparseOK=TRUE, fitname=NULL, multitypeOK=FALSE, entrywise = TRUE, matrix.action = c("warn", "fatal", "silent"), dimyx=NULL, eps=NULL, geomsmooth = TRUE) { if(is.null(fitname)) fitname <- short.deparse(substitute(fit)) ## type of calculation to be performed method <- match.arg(method) what <- match.arg(what, several.ok=TRUE) if("all" %in% what) what <- c("leverage", "influence", "dfbetas", "score", "derivatives", "increments") matrix.action <- match.arg(matrix.action) influencecalc <- any(what %in% c("leverage", "influence", "dfbetas")) hesscalc <- influencecalc || any(what == "derivatives") sparse <- sparseOK target <- paste(what, collapse=",") ## ........... collect information about the model ................. stopifnot(is.ppm(fit)) #' ensure object contains GLM fit if(!hasglmfit(fit)) { fit <- update(fit, forcefit=TRUE) precomputed <- list() } #' type of interpoint interaction fit.is.poisson <- is.poisson(fit) hasInf <- !fit.is.poisson && !identical(fit$interaction$hasInf, FALSE) #' estimating function fitmethod <- fit$method logi <- (fitmethod == "logi") pseudo <- (fitmethod == "mpl") if(!logi && !pseudo) { warning(paste("Model was fitted with method =", dQuote(fitmethod), "but is treated as having been fitted by maximum", if(fit.is.poisson) "likelihood" else "pseudolikelihood", "for leverage/influence calculation"), call.=FALSE) pseudo <- TRUE } ## Detect presence of irregular parameters if(is.null(iArgs)) iArgs <- fit$covfunargs gotScore <- !is.null(iScore) gotHess <- !is.null(iHessian) needHess <- gotScore && hesscalc # may be updated later if(!gotHess && needHess) stop("Must supply iHessian", call.=FALSE) #' ................... evaluate basic terms .................... ## extract values from model, using precomputed values if given theta <- precomputed$coef %orifnull% coef(fit) lampos <- precomputed$lambda %orifnull% fitted(fit, ignore.hardcore=hasInf, check=FALSE) mom <- precomputed$mom %orifnull% model.matrix(fit, splitInf=hasInf) ## 'lampos' is positive part of cif ## 'lam' is full model cif including zeroes lam <- lampos zerocif <- attr(mom, "-Inf") %orifnull% logical(nrow(mom)) anyzerocif <- any(zerocif) if(hasInf && anyzerocif) lam[zerocif] <- 0 p <- length(theta) Q <- quad.ppm(fit) w <- w.quad(Q) loc <- union.quad(Q) isdata <- is.data(Q) mt <- is.multitype(loc) if(length(w) != length(lam)) stop(paste("Internal error: length(w) = ", length(w), "!=", length(lam), "= length(lam)"), call.=FALSE) ## smoothing bandwidth and resolution for smoothed images of densities smallsigma <- if(!mt) avenndist(loc) else max(sapply(split(loc), avenndist)) ## previously used 'maxnndist' instead of 'avenndist' if(is.null(dimyx) && is.null(eps)) eps <- sqrt(prod(sidelengths(Frame(loc))))/256 #' ............... evaluate Hessian of regular parameters ................ ## domain of composite likelihood ## (e.g. eroded window in border correction) inside <- getglmsubset(fit) %orifnull% rep(TRUE, npoints(loc)) ## extract negative Hessian matrix of regular part of log composite likelihood ## hess = negative Hessian H ## fgrad = Fisher-scoring-like gradient G = estimate of E[H] if(logi) { ## .............. logistic composite likelihood ...................... ## Intensity of dummy points rho <- fit$Q$param$rho %orifnull% intensity(as.ppp(fit$Q)) logiprob <- lampos / (lampos + rho) vclist <- vcov(fit, what = "internals", fine=fine, matrix.action="silent") hess <- vclist$Slog fgrad <- vclist$fisher invhess <- if(is.null(hess)) NULL else checksolve(hess, "silent") invfgrad <- if(is.null(fgrad)) NULL else checksolve(fgrad, "silent") if(is.null(invhess) || is.null(invfgrad)) { #' use more expensive estimate of variance terms vclist <- vcov(fit, what = "internals", fine=TRUE, matrix.action=matrix.action) hess <- vclist$Slog fgrad <- vclist$fisher #' try again - exit if really singular invhess <- checksolve(hess, matrix.action, "Hessian", target) invfgrad <- checksolve(fgrad, matrix.action, "gradient matrix", target) } # vc <- invhess %*% (vclist$Sigma1log+vclist$Sigma2log) %*% invhess } else { ## .............. likelihood or pseudolikelihood .................... invfgrad <- vcov(fit, hessian=TRUE, fine=fine, matrix.action="silent") fgrad <- hess <- if(is.null(invfgrad) || anyNA(invfgrad)) NULL else checksolve(invfgrad, "silent") if(is.null(fgrad)) { invfgrad <- vcov(fit, hessian=TRUE, fine=TRUE, matrix.action=matrix.action) fgrad <- hess <- checksolve(invfgrad, matrix.action, "Hessian", target) } } #' ............... augment Hessian ................... ## evaluate additional (`irregular') components of score, if any iscoremat <- ppmDerivatives(fit, "gradient", iScore, loc, covfunargs=iArgs) gotScore <- !is.null(iscoremat) needHess <- gotScore && hesscalc if(!gotScore) { REG <- 1:ncol(mom) } else { ## count regular and irregular parameters nreg <- ncol(mom) nirr <- ncol(iscoremat) ## add extra columns to model matrix mom <- cbind(mom, iscoremat) REG <- 1:nreg IRR <- nreg + 1:nirr ## evaluate additional (`irregular') entries of Hessian ihessmat <- if(!needHess) NULL else ppmDerivatives(fit, "hessian", iHessian, loc, covfunargs=iArgs) if(gotHess <- !is.null(ihessmat)) { ## recompute negative Hessian of log PL and its mean fgrad <- hessextra <- matrix(0, ncol(mom), ncol(mom)) } else if(needHess && length(iArgs)) { nami <- names(iArgs) stop(paste("Unable to compute iHess, the", ngettext(length(nami), "component", "components"), "of the Hessian matrix for the irregular", ngettext(length(nami), "parameter", "parameters"), commasep(sQuote(names(iArgs)))), call.=FALSE) } if(pseudo) { ## .............. likelihood or pseudolikelihood .................... switch(method, interpreted = { for(i in seq(loc$n)) { # weight for integrand wti <- lam[i] * w[i] if(all(is.finite(wti))) { # integral of outer product of score momi <- mom[i, ] v1 <- outer(momi, momi, "*") * wti if(all(is.finite(v1))) fgrad <- fgrad + v1 # integral of Hessian # contributions nonzero for irregular parameters if(gotHess) { v2 <- matrix(as.numeric(ihessmat[i,]), nirr, nirr) * wti if(all(is.finite(v2))) hessextra[IRR, IRR] <- hessextra[IRR, IRR] + v2 } } } # subtract sum over data points if(gotHess) { for(i in which(isdata)) { v2 <- matrix(as.numeric(ihessmat[i,]), nirr, nirr) if(all(is.finite(v2))) hessextra[IRR, IRR] <- hessextra[IRR, IRR] - v2 } hess <- fgrad + hessextra invhess <- checksolve(hess, matrix.action, "Hessian", target) } else { invhess <- hess <- NULL } }, C = { wlam <- lam * w fgrad <- sumouter(mom, wlam) if(gotHess) { # integral term isfin <- is.finite(wlam) & matrowall(is.finite(ihessmat)) vintegral <- if(all(isfin)) wlam %*% ihessmat else wlam[isfin] %*% ihessmat[isfin,, drop=FALSE] # sum over data points vdata <- .colSums(ihessmat[isdata, , drop=FALSE], sum(isdata), ncol(ihessmat), na.rm=TRUE) vcontrib <- vintegral - vdata hessextra[IRR, IRR] <- hessextra[IRR, IRR] + matrix(vcontrib, nirr, nirr) hess <- fgrad + hessextra invhess <- checksolve(hess, matrix.action, "Hessian", target) } else { invhess <- hess <- NULL } }) } else { ## .............. logistic composite likelihood .................... switch(method, interpreted = { oweight <- logiprob * (1 - logiprob) hweight <- ifelse(isdata, -(1 - logiprob), logiprob) for(i in seq(loc$n)) { ## outer product of score momi <- mom[i, ] v1 <- outer(momi, momi, "*") * oweight[i] if(all(is.finite(v1))) fgrad <- fgrad + v1 ## Hessian term ## contributions nonzero for irregular parameters if(gotHess) { v2 <- hweight[i] * matrix(as.numeric(ihessmat[i,]), nirr, nirr) if(all(is.finite(v2))) hessextra[IRR, IRR] <- hessextra[IRR, IRR] + v2 } } if(gotHess) { hess <- fgrad + hessextra invhess <- checksolve(hess, matrix.action, "Hessian", target) } else { invhess <- hess <- NULL } }, C = { oweight <- logiprob * (1 - logiprob) hweight <- ifelse(isdata, -(1 - logiprob), logiprob) fgrad <- sumouter(mom, oweight) if(gotHess) { # Hessian term isfin <- is.finite(hweight) & matrowall(is.finite(ihessmat)) vcontrib <- if(all(isfin)) hweight %*% ihessmat else hweight[isfin] %*% ihessmat[isfin,, drop=FALSE] hessextra[IRR, IRR] <- hessextra[IRR, IRR] + matrix(vcontrib, nirr, nirr) hess <- fgrad + hessextra invhess <- checksolve(hess, matrix.action, "Hessian", target) } else { invhess <- hess <- NULL } }) } invfgrad <- checksolve(fgrad, matrix.action, "gradient matrix", target) } if(!needHess) { if(pseudo){ hess <- fgrad invhess <- invfgrad } } # ok <- NULL if(drop) { ok <- complete.cases(mom) if(all(ok)) { ok <- NULL } else { if((nbad <- sum(isdata[!ok])) > 0) warning(paste("NA value of canonical statistic at", nbad, ngettext(nbad, "data point", "data points")), call.=FALSE) Q <- Q[ok] mom <- mom[ok, , drop=FALSE] loc <- loc[ok] lam <- lam[ok] w <- w[ok] isdata <- isdata[ok] inside <- inside[ok] } } # ........ start assembling result ..................... result <- list(fitname=fitname, fit.is.poisson=fit.is.poisson) if(any(c("score", "derivatives") %in% what)) { ## calculate the composite score rawmean <- if(logi) logiprob else (lam * w) rawresid <- isdata - rawmean score <- matrix(rawresid, nrow=1) %*% mom if("score" %in% what) result$score <- score if("derivatives" %in% what) result$deriv <- list(mom=mom, score=score, fgrad=fgrad, invfgrad=invfgrad, hess=hess, invhess=invhess) if(all(what %in% c("score", "derivatives"))) return(result) } ## ::::::::::::::: compute second order terms ::::::::::::: ## >>> set model matrix to zero outside the domain <<< mom[!inside, ] <- 0 ## compute effect of adding/deleting each quadrature point if(fit.is.poisson) { ## ........ Poisson case .................................. eff <- mom ddS <- ddSintegrand <- NULL } else { ## ........ Gibbs case .................................... ## initialise eff <- mom ## second order interaction terms ## columns index the point being added/deleted ## rows index the points affected ## goal is to compute these effect matrices: eff.data <- eff.back <- matrix(0, nrow(eff), ncol(eff), dimnames=dimnames(eff)) ## U <- union.quad(Q) nU <- npoints(U) ## decide whether to split into blocks nX <- Q$data$n nD <- Q$dummy$n bls <- quadBlockSizes(nX, nD, p, announce=TRUE) nblocks <- bls$nblocks nperblock <- bls$nperblock ## if(nblocks > 1 && ("increments" %in% what)) { warning("Oversize quadrature scheme: cannot return array of increments", call.=FALSE) what <- setdiff(what, "increments") } R <- reach(fit) ## indices into original quadrature scheme whichok <- if(!is.null(ok)) which(ok) else seq_len(nX+nD) whichokdata <- whichok[isdata] whichokdummy <- whichok[!isdata] ## {{{{{{{{{{{{{ L O O P }}}}}}}}}}}}} ## loop for(iblock in 1:nblocks) { first <- min(nD, (iblock - 1) * nperblock + 1) last <- min(nD, iblock * nperblock) # corresponding subset of original quadrature scheme if(!is.null(ok) || nblocks > 1) { ## subset for which we will compute the effect current <- c(whichokdata, whichokdummy[first:last]) ## find neighbours, needed for calculation other <- setdiff(seq_len(nU), current) crx <- crosspairs(U[current], U[other], R, what="indices") nabers <- other[unique(crx$j)] ## subset actually requested requested <- c(current, nabers) ## corresponding stuff ('B' for block) isdataB <- isdata[requested] changesignB <- ifelse(isdataB, -1, 1) zerocifB <- zerocif[requested] anyzerocifB <- any(zerocifB) momB <- mom[requested, , drop=FALSE] lamB <- lam[requested] #' unused: #' insideB <- inside[requested] #' lamposB <- lampos[requested] if(logi) logiprobB <- logiprob[requested] wB <- w[requested] currentB <- seq_along(current) } else { requested <- NULL isdataB <- isdata changesignB <- ifelse(isdataB, -1, 1) zerocifB <- zerocif anyzerocifB <- anyzerocif momB <- mom lamB <- lam #' unused: #' insideB <- inside #' lamposB <- lampos if(logi) logiprobB <- logiprob wB <- w } ## compute second order terms ## ddS[i,j, ] = Delta_i Delta_j S(x) ddS <- deltasuffstat(fit, restrict = "first", dataonly=FALSE, quadsub=requested, sparseOK=sparse, splitInf=hasInf, force=TRUE, warn.forced=TRUE) ## if(is.null(ddS)) { warning("Second order interaction terms are not implemented", " for this model; they are treated as zero", call.=FALSE) break } else { sparse <- inherits(ddS, "sparse3Darray") if(hasInf) { deltaInf <- attr(ddS, "deltaInf") hasInf <- !is.null(deltaInf) if(hasInf) sparse <- sparse && inherits(deltaInf, "sparseMatrix") } if(gotScore) { ## add extra planes of zeroes to second-order model matrix ## (zero because the irregular components are part of the trend) paddim <- c(dim(ddS)[1:2], nirr) if(!sparse) { ddS <- abind::abind(ddS, array(0, dim=paddim), along=3) } else { ddS <- bind.sparse3Darray(ddS, sparse3Darray(dims=paddim), along=3) } } } ## ^^^^^^^^^^^^^^^^^ second term in DeltaScore ^^^^^^^^^^^^^^^^^^^^ ## effect of addition/deletion of U[j] ## on score contribution from data points (sum automatically restricted to ## interior for border correction by earlier call to ## deltasuffstat(..., restrict = "first")) ddSX <- ddS[isdataB, , , drop=FALSE] eff.data.B <- marginSums(ddSX, c(2,3)) ## check if any quadrature points have zero conditional intensity; ## these do not contribute to this term if(anyzerocifB) eff.data.B[zerocifB, ] <- 0 ## save results for current subset of quadrature points if(is.null(requested)) { eff.data <- eff.data.B } else { eff.data[current,] <- as.matrix(eff.data.B[currentB,,drop=FALSE]) } ## rm(ddSX, eff.data.B) ## ^^^^^^^^^^^^^^^^^ third term in DeltaScore ^^^^^^^^^^^^^^^^^^^^ ## effect of addition/deletion of U[j] on integral term in score if(!sparse) { ## ::::::::::::::: full arrays, simpler code ::::::::::::::::::: if(pseudo) { ## --------------- likelihood or pseudolikelihood ----------- ## model matrix after addition/deletion of each U[j] ## mombefore[i,j,] <- mom[i,] di <- dim(ddS) mombefore <- array(apply(momB, 2, rep, times=di[2]), dim=di) momchange <- ddS momchange[ , isdataB, ] <- - momchange[, isdataB, ] momafter <- mombefore + momchange ## effect of addition/deletion of U[j] on lambda(U[i], X) if(gotScore) { lamratio <- exp(tensor::tensor(momchange[,,REG,drop=FALSE], theta, 3, 1)) } else { lamratio <- exp(tensor::tensor(momchange, theta, 3, 1)) } lamratio <- array(lamratio, dim=dim(momafter)) if(!hasInf) { #' cif is positive ddSintegrand <- lamB * (momafter * lamratio - mombefore) } else { #' cif can be zero zerobefore <- matrix(zerocifB, di[1], di[2]) zerochange <- deltaInf * 1L zerochange[ , isdataB] <- - zerochange[ , isdataB] zeroafter <- zerobefore + zerochange momZbefore <- mombefore momZbefore[ , zerocifB, ] <- 0 IJK <- unname(which(array(zeroafter == 1, dim=di), arr.ind=TRUE)) momZafter <- momafter momZafter[IJK] <- 0 momZchange <- momZafter- momZbefore ddSintegrand <- lamB * (momZafter * lamratio - momZbefore) } rm(momchange, mombefore, momafter, lamratio) } else { ## --------------- logistic composite likelihood ----------- stop("Non-sparse method is not implemented for method = 'logi'!") } gc() } else { ## :::::::::::::::::: sparse arrays :::::::::::::::::::::::: if(logi) { ## --------------- logistic composite likelihood ----------- ## Delta suff. stat. with sign change for data/dummy (sparse3Darray) momchange <- ddS momchange[ , isdataB, ] <- - momchange[, isdataB, ] ## Evaluate theta^T %*% ddS (with sign -1/+1 according to data/dummy) ## as triplet sparse matrix if(gotScore){ momchangeeffect <- tenseur(momchange[,,REG,drop=FALSE], theta, 3, 1) } else{ momchangeeffect <- tenseur(momchange, theta, 3, 1) } ## Copy to each slice momchangeeffect <- expandSparse(momchangeeffect, n = dim(ddS)[3], across = 3) ijk <- SparseIndices(momchangeeffect) ## Entrywise calculations below momchange <- as.numeric(momchange[ijk]) mombefore <- mom[cbind(ijk$i,ijk$k)] momafter <- mombefore + momchange ## Transform to change in probability expchange <- exp(momchangeeffect$x) lamBi <- lamB[ijk$i] logiprobBi <- logiprobB[ijk$i] changesignBj <- changesignB[ijk$j] pchange <- changesignBj*(lamBi * expchange / (lamBi*expchange + rho) - logiprobBi) ## Note: changesignBj * momchange == as.numeric(ddS[ijk]) if(!hasInf) { #' cif is positive ddSintegrand <- momafter * pchange + logiprobBi * changesignBj * momchange } else { #' cif can be zero isdataBj <- isdataB[ijk$j] zerobefore <- as.logical(zerocifB[ijk$i]) zerochange <- as.logical(deltaInf[cbind(ijk$i, ijk$j)]) zerochange[isdataBj] <- - zerochange[isdataBj] zeroafter <- zerobefore + zerochange momZbefore <- ifelse(zerobefore, 0, mombefore) momZafter <- ifelse(zeroafter, 0, momafter) momZchange <- momZafter - momZbefore ddSintegrand <- momZafter * pchange + logiprobBi * changesignBj * momZchange } ddSintegrand <- sparse3Darray(i = ijk$i, j = ijk$j, k = ijk$k, x = ddSintegrand, dims = dim(ddS)) } else{ ## --------------- likelihood or pseudolikelihood ----------- if(entrywise) { ## ...... sparse arrays, using explicit indices ...... momchange <- ddS momchange[ , isdataB, ] <- - momchange[, isdataB, ] if(gotScore){ lamratiominus1 <- expm1(tenseur(momchange[,,REG,drop=FALSE], theta, 3, 1)) } else{ lamratiominus1 <- expm1(tenseur(momchange, theta, 3, 1)) } lamratiominus1 <- expandSparse(lamratiominus1, n = dim(ddS)[3], across = 3) ijk <- SparseIndices(lamratiominus1) ## Everything entrywise with ijk now: # lamratiominus1 <- lamratiominus1[cbind(ijk$i, ijk$j)] lamratiominus1 <- as.numeric(lamratiominus1$x) momchange <- as.numeric(momchange[ijk]) mombefore <- momB[cbind(ijk$i, ijk$k)] momafter <- mombefore + momchange ## lamarray[i,j,k] <- lam[i] lamarray <- lamB[ijk$i] if(!hasInf) { #' cif is positive ddSintegrand <- lamarray * (momafter * lamratiominus1 + momchange) } else { #' cif can be zero isdataBj <- isdataB[ijk$j] zerobefore <- as.logical(zerocifB[ijk$i]) zerochange <- as.logical(deltaInf[cbind(ijk$i, ijk$j)]) zerochange[isdataBj] <- - zerochange[isdataBj] zeroafter <- zerobefore + zerochange momZbefore <- ifelse(zerobefore, 0, mombefore) momZafter <- ifelse(zeroafter, 0, momafter) momZchange <- momZafter - momZbefore ddSintegrand <- lamarray*(momZafter*lamratiominus1 + momZchange) } ddSintegrand <- sparse3Darray(i = ijk$i, j = ijk$j, k = ijk$k, x = ddSintegrand, dims = dim(ddS)) } else { ## ...... sparse array code ...... ## Entries are required only for pairs i,j which interact. ## mombefore[i,j,] <- mom[i,] mombefore <- mapSparseEntries(ddS, 1, momB, conform=TRUE, across=3) momchange <- ddS momchange[ , isdataB, ] <- - momchange[, isdataB, ] ## momafter <- evalSparse3Dentrywise(mombefore + momchange) momafter <- mombefore + momchange ## lamarray[i,j,k] <- lam[i] lamarray <- mapSparseEntries(ddS, 1, lamB, conform=TRUE, across=3) if(gotScore){ lamratiominus1 <- expm1(tenseur(momchange[,,REG,drop=FALSE], theta, 3, 1)) } else{ lamratiominus1 <- expm1(tenseur(momchange,theta, 3, 1)) } lamratiominus1 <- expandSparse(lamratiominus1, n = dim(ddS)[3], across = 3) ## ddSintegrand <- evalSparse3Dentrywise(lamarray * (momafter* lamratiominus1 + momchange)) if(!hasInf) { #' cif is positive ddSintegrand <- lamarray * (momafter* lamratiominus1 + momchange) } else { #' cif has zeroes zerobefore <- mapSparseEntries(ddS, 1, zerocifB, conform=TRUE, across=3) zerochange <- mapSparseEntries(ddS, 1, deltaInf, conform=TRUE, across=2) zerochange[,isdataB,] <- - zerochange[,isdataB,] zeroafter <- zerobefore + zerochange momZbefore <- mombefore momZafter <- momafter momZbefore[ , zerocifB , ] <- 0 IJK <- SparseIndices(zeroafter) momZafter[IJK] <- 0 momZchange <- momZafter - momZbefore ddSintegrand <- lamarray*(momZafter*lamratiominus1 + momZchange) } } rm(lamratiominus1, lamarray, momafter) } rm(momchange, mombefore) } if(anyzerocifB) { ddSintegrand[zerocifB,,] <- 0 ddSintegrand[,zerocifB,] <- 0 } ## integrate if(logi){ # eff.back.B <- tenseur(ddSintegrand, rep(1, length(wB)), 1, 1) eff.back.B <- marginSums(ddSintegrand, c(2,3)) } else{ eff.back.B <- changesignB * tenseur(ddSintegrand, wB, 1, 1) } ## save contribution if(is.null(requested)) { eff.back <- eff.back.B } else { eff.back[current,] <- as.matrix(eff.back.B[currentB, , drop=FALSE]) } } ## {{{{{{{{{{{{{ E N D O F L O O P }}}}}}}}}}}}} ## total eff <- eff + eff.data - eff.back eff <- as.matrix(eff) } if("increments" %in% what) { result$increm <- list(ddS=ddS, ddSintegrand=ddSintegrand, isdata=isdata, wQ=w) } if(!any(c("leverage", "influence", "dfbetas") %in% what)) return(result) # ............ compute leverage, influence, dfbetas .............. if(!is.matrix(invhess)) stop("Internal error: inverse Hessian not available", call.=FALSE) # compute basic contribution from each quadrature point nloc <- npoints(loc) switch(method, interpreted = { b <- numeric(nloc) for(i in seq(nloc)) { effi <- eff[i,, drop=FALSE] momi <- mom[i,, drop=FALSE] b[i] <- momi %*% invhess %*% t(effi) } }, C = { b <- bilinearform(mom, invhess, eff) }) # .......... leverage ............. if("leverage" %in% what) { ## values of leverage (diagonal) at points of 'loc' h <- b * lam ok <- is.finite(h) geomsmooth <- geomsmooth && all(h[!isdata & ok] >= 0) if(mt) h <- data.frame(leverage=h, type=marks(loc)) levval <- (loc %mark% h)[ok] levvaldum <- levval[!isdata[ok]] if(!mt) { levsmo <- Smooth(levvaldum, sigma=smallsigma, geometric=geomsmooth, dimyx=dimyx, eps=eps) levnearest <- nnmark(levvaldum, dimyx=dimyx, eps=eps) } else { levsplitdum <- split(levvaldum, reduce=TRUE) levsmo <- Smooth(levsplitdum, sigma=smallsigma, geometric=geomsmooth, dimyx=dimyx, eps=eps) levnearest <- solapply(levsplitdum, nnmark, dimyx=dimyx, eps=eps) } ## mean level if(fit.is.poisson) { a <- area(Window(loc)) * markspace.integral(loc) levmean <- p/a } else { levmean <- if(!mt) mean(levnearest) else mean(sapply(levnearest, mean)) } lev <- list(val=levval, smo=levsmo, ave=levmean, nearest=levnearest) result$lev <- lev } # .......... influence ............. if("influence" %in% what) { if(logi){ X <- loc effX <- as.numeric(isdata) * eff - mom * (inside * logiprob) } else{ # values of influence at data points X <- loc[isdata] effX <- eff[isdata, ,drop=FALSE] } M <- (1/p) * quadform(effX, invhess) if(logi || is.multitype(X)) { # result will have several columns of marks M <- data.frame(influence=M) if(logi) M$isdata <- factor(isdata, levels = c(TRUE, FALSE), labels = c("data", "dummy")) if(is.multitype(X)) M$type <- marks(X) } V <- X %mark% M result$infl <- V } # .......... dfbetas ............. if("dfbetas" %in% what) { if(logi){ M <- as.numeric(isdata) * eff - mom * (inside * logiprob) M <- t(invhess %*% t(M)) Mdum <- M Mdum[isdata,] <- 0 Mdum <- Mdum / w.quad(Q) DFB <- msr(Q, M[isdata, ], Mdum) } else { vex <- invhess %*% t(mom) dex <- invhess %*% t(eff) switch(method, interpreted = { dis <- con <- matrix(0, nloc, ncol(mom)) for(i in seq(nloc)) { vexi <- vex[,i, drop=FALSE] dexi <- dex[,i, drop=FALSE] dis[i, ] <- if(isdata[i]) dexi else 0 con[i, ] <- if(inside[i]) (- lam[i] * vexi) else 0 } }, C = { dis <- t(dex) dis[!isdata,] <- 0 con <- - lam * t(vex) con[(lam == 0 | !inside), ] <- 0 }) colnames(dis) <- colnames(con) <- colnames(mom) DFB <- msr(Q, dis[isdata, ], con) } #' add smooth component DFB <- augment.msr(DFB, sigma=smallsigma, dimyx=dimyx, eps=eps) result$dfbetas <- DFB } return(result) } ## >>>>>>>>>>>>>>>>>>>>>>> HELPER FUNCTIONS <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< ## extract derivatives from covariate functions ## WARNING: these are not the score components in general ppmDerivatives <- function(fit, what=c("gradient", "hessian"), Dcovfun=NULL, loc, covfunargs=list()) { what <- match.arg(what) if(!is.null(Dcovfun)) { ## use provided function Dcov to compute derivatives Dvalues <- mpl.get.covariates(Dcovfun, loc, covfunargs=covfunargs) result <- as.matrix(as.data.frame(Dvalues)) return(result) } ## any irregular parameters? if(length(covfunargs) == 0) return(NULL) ## Try to extract derivatives from covariate functions ## This often works if the functions were created by symbolic differentiation fvalues <- mpl.get.covariates(fit$covariates, loc, covfunargs=covfunargs, need.deriv=TRUE) Dlist <- attr(fvalues, "derivatives")[[what]] if(length(Dlist) == 0) return(NULL) switch(what, gradient = { result <- do.call(cbind, unname(lapply(Dlist, as.data.frame))) result <- as.matrix(result) }, hessian = { ## construct array containing Hessian matrices biga <- do.call(blockdiagarray, Dlist) ## flatten matrices result <- matrix(biga, nrow=dim(biga)[1L]) }) return(result) } ## >>>>>>>>>>>>>>>> PLOT METHODS <<<<<<<<<<<<<<<<<<<<< plot.leverage.ppm <- function(x, ..., what=c("smooth", "nearest", "exact"), showcut=TRUE, args.cut=list(drawlabels=FALSE), multiplot=TRUE) { what <- match.arg(what) fitname <- x$fitname defaultmain <- paste("Leverage for", fitname) y <- x$lev if(what == "exact") { #' plot exact quadrature locations and leverage values z <- do.call(plot, resolve.defaults(list(x=y$val, multiplot=multiplot), list(...), list(main=defaultmain))) return(invisible(z)) } smo <- as.im(x, what=what) if(is.null(smo)) return(invisible(NULL)) ave <- y$ave if(!multiplot && inherits(smo, "imlist")) { ave <- ave * length(smo) smo <- do.call(harmonise.im, unname(smo)) ## smo <- Reduce("+", smo) smo <- im.apply(smo, sum, check=FALSE) defaultmain <- c(defaultmain, "(sum over all types of point)") } args.contour <- resolve.defaults(args.cut, list(levels=ave)) cutinfo <- list(addcontour=showcut, args.contour=args.contour) if(is.im(smo)) { do.call(plot.im, resolve.defaults(list(smo), cutinfo, list(...), list(main=defaultmain))) } else if(inherits(smo, "imlist")) { do.call(plot.solist, resolve.defaults(list(smo), cutinfo, list(...), list(main=defaultmain))) } invisible(NULL) } persp.leverage.ppm <- function(x, ..., what=c("smooth", "nearest"), main, zlab="leverage") { if(missing(main)) main <- deparse(substitute(x)) what <- match.arg(what) y <- as.im(x, what=what) if(is.null(y)) return(invisible(NULL)) if(is.im(y)) return(persp(y, main=main, ..., zlab=zlab)) pa <- par(ask=TRUE) lapply(y, persp, main=main, ..., zlab=zlab) par(pa) return(invisible(NULL)) } contour.leverage.ppm <- function(x, ..., what=c("smooth", "nearest"), showcut=TRUE, args.cut=list(col=3, lwd=3, drawlabels=FALSE), multiplot=TRUE) { defaultmain <- paste("Leverage for", x$fitname) smo <- as.im(x, what=what) y <- x$lev ave <- y$ave if(!multiplot && inherits(smo, "imlist")) { ave <- ave * length(smo) smo <- do.call(harmonise.im, unname(smo)) ## smo <- Reduce("+", smo) smo <- im.apply(smo, sum, check=FALSE) defaultmain <- c(defaultmain, "(sum over all types of point)") } argh1 <- resolve.defaults(list(...), list(main=defaultmain)) argh2 <- resolve.defaults(args.cut, list(levels=ave), list(...)) if(is.im(smo)) { #' single panel out <- do.call(contour, append(list(x=smo), argh1)) if(showcut) do.call(contour, append(list(x=smo, add=TRUE), argh2)) } else if(inherits(smo, "imlist")) { #' multiple panels argh <- append(list(x=smo, plotcommand ="contour"), argh1) if(showcut) { argh <- append(argh, list(panel.end=function(i, y, ...) contour(y, ...), panel.end.args=argh2)) } out <- do.call(plot.solist, argh) } else { warning("Unrecognised format") out <- NULL } return(invisible(out)) } plot.influence.ppm <- function(x, ..., multiplot=TRUE) { fitname <- x$fitname defaultmain <- paste("Influence for", fitname) y <- x$infl if(multiplot && isTRUE(ncol(marks(y)) > 1)) { # apart from the influence value, there may be additional columns of marks # containing factors: {type of point}, { data vs dummy in logistic case } ma <- as.data.frame(marks(y)) fax <- sapply(ma, is.factor) nfax <- sum(fax) if(nfax == 1) { # split on first available factor, and remove this factor y <- split(y, reduce=TRUE) } else if(nfax > 1) { # several factors: split according to them all, and remove them all f.all <- do.call(interaction, ma[fax]) z <- y %mark% ma[,!fax] y <- split(z, f.all) } } do.call(plot, resolve.defaults(list(y), list(...), list(main=defaultmain, multiplot=multiplot, which.marks=1))) } ## >>>>>>>>>>>>>>>> CONVERSION METHODS <<<<<<<<<<<<<<<<<<<<< as.im.leverage.ppm <- function(X, ..., what=c("smooth", "nearest")) { what <- match.arg(what) y <- switch(what, smooth = X$lev$smo, nearest = X$lev$nearest) if(is.null(y)) warning(paste("Data for", sQuote(what), "image are not available:", "please recompute the leverage using the current spatstat"), call.=FALSE) return(y) # could be either an image or a list of images } as.function.leverage.ppm <- function(x, ...) { X <- x$lev$val S <- ssf(unmark(X), marks(X)) return(as.function(S)) } as.ppp.influence.ppm <- function(X, ...) { return(X$infl) } as.owin.leverage.ppm <- function(W, ..., fatal=TRUE) { y <- as.im(W) if(inherits(y, "imlist")) y <- y[[1L]] as.owin(y, ..., fatal=fatal) } as.owin.influence.ppm <- function(W, ..., fatal=TRUE) { as.owin(as.ppp(W), ..., fatal=fatal) } domain.leverage.ppm <- domain.influence.ppm <- Window.leverage.ppm <- Window.influence.ppm <- function(X, ...) { as.owin(X) } ## >>>>>>>>>>>>>>>> PRINT METHODS <<<<<<<<<<<<<<<<<<<<< print.leverage.ppm <- function(x, ...) { splat("Point process leverage function") fitname <- x$fitname splat("for model:", fitname) lev <- x$lev splat("\nExact values:") print(lev$val) splat("\nSmoothed values:") print(lev$smo) ## for compatibility we retain the x$fit usage if(x$fit.is.poisson %orifnull% is.poisson(x$fit)) splat("\nAverage value:", lev$ave) return(invisible(NULL)) } print.influence.ppm <- function(x, ...) { splat("Point process influence measure") fitname <- x$fitname splat("for model:", fitname) splat("\nExact values:") print(x$infl) return(invisible(NULL)) } ## >>>>>>>>>>>>>>>> SUBSET METHODS <<<<<<<<<<<<<<<<<<<<< "[.leverage.ppm" <- function(x, i, ..., update=TRUE) { if(missing(i)) return(x) y <- x$lev smoi <- if(is.im(y$smo)) y$smo[i, ...] else solapply(y$smo, "[", i=i, ...) if(!inherits(smoi, c("im", "imlist"))) return(smoi) y$smo <- smoi y$val <- y$val[i, ...] if(update) y$ave <- if(is.im(smoi)) mean(smoi) else mean(sapply(smoi, mean)) x$lev <- y return(x) } "[.influence.ppm" <- function(x, i, ...) { if(missing(i)) return(x) y <- x$infl[i, ...] if(!is.ppp(y)) return(y) x$infl <- y return(x) } ## >>>>>>>>>>>>>>>> SMOOTHING, INTEGRATION <<<<<<<<<<<<<<<<<<<<< integral.leverage.ppm <- function(f, domain=NULL, ...) { y <- as.im(f, what="nearest") z <- if(is.im(y)) { integral(y, domain=domain, ...) } else if(is.solist(y)) { sapply(y, integral, domain=domain, ...) } else stop("Internal format is not understood") if(length(dim(z))) z <- t(z) return(z) } integral.influence.ppm <- function(f, domain=NULL, ...) { if(!is.null(domain)) { if(is.tess(domain)) { z <- sapply(tiles(domain), integral, f=f) if(length(dim(z))) z <- t(z) return(z) } f <- f[domain] } #' actual computation y <- as.ppp(f) return(colSums(as.matrix(marks(y)))) } mean.leverage.ppm <- function(x, ...) { y <- as.im(x, what="nearest") mean(y, ...) } Smooth.leverage.ppm <- function(X, ...) Smooth(X$lev$val, ...) Smooth.influence.ppm <- function(X, ...) Smooth(as.ppp(X), ...) ## >>>>>>>>>>>>>>>> GEOMETRICAL OPERATIONS <<<<<<<<<<<<<<<<<<<<< shift.leverage.ppm <- function(X, ...) { vec <- getlastshift(shift(as.owin(X), ...)) X$lev$val <- shift(X$lev$val, vec=vec) smo <- X$lev$smo X$lev$smo <- if(is.im(smo)) shift(smo, vec=vec) else solapply(smo, shift, vec=vec) return(putlastshift(X, vec)) } shift.influence.ppm <- function(X, ...) { X$infl <- shift(X$infl, ...) return(putlastshift(X, getlastshift(X$infl))) } spatstat/R/pixellate.R0000644000176200001440000001554413357331223014443 0ustar liggesusers# # pixellate.R # # $Revision: 1.25 $ $Date: 2017/11/15 07:23:16 $ # # pixellate convert an object to a pixel image # # pixellate.ppp convert a point pattern to a pixel image # (pixel value = number of points in pixel) # # pixellate.owin convert a window to a pixel image # (pixel value = area of intersection with pixel) # pixellate <- function(x, ...) { UseMethod("pixellate") } pixellate.ppp <- function(x, W=NULL, ..., weights=NULL, padzero=FALSE, fractional=FALSE, preserve=FALSE, DivideByPixelArea=FALSE, savemap=FALSE) { verifyclass(x, "ppp") if(is.null(W)) W <- Window(x) isrect <- is.rectangle(W) preserve <- preserve && !isrect iscount <- is.null(weights) && !fractional && !preserve W <- do.call.matched(as.mask, resolve.defaults(list(...), list(w=W))) nx <- npoints(x) insideW <- W$m dimW <- W$dim nr <- dimW[1L] nc <- dimW[2L] xcolW <- W$xcol yrowW <- W$yrow xrangeW <- W$xrange yrangeW <- W$yrange unitsW <- unitname(W) # multiple columns of weights? if(is.data.frame(weights) || is.matrix(weights)) { k <- ncol(weights) stopifnot(nrow(weights) == npoints(x)) weights <- if(k == 1) as.vector(weights) else as.data.frame(weights) } else { k <- 1 if(length(weights) == 0) weights <- NULL else stopifnot(length(weights) == npoints(x) || length(weights) == 1) if(length(weights) == 1) weights <- rep(weights, npoints(x)) } # handle empty point pattern if(nx == 0) { zerovalue <- if(iscount) 0L else as.double(0) zeroimage <- as.im(zerovalue, W) if(padzero) # map NA to 0 zeroimage <- na.handle.im(zeroimage, zerovalue) result <- zeroimage if(k > 1) { result <- as.solist(rep(list(zeroimage), k)) names(result) <- colnames(weights) } return(result) } # map points to pixels xx <- x$x yy <- x$y if(!fractional) { #' map (x,y) to nearest raster point pixels <- if(preserve) nearest.valid.pixel(xx, yy, W) else nearest.raster.point(xx, yy, W) rowfac <- factor(pixels$row, levels=1:nr) colfac <- factor(pixels$col, levels=1:nc) } else { #' attribute fractional weights to the 4 pixel centres surrounding (x,y) #' find surrounding pixel centres jj <- findInterval(xx, xcolW, rightmost.closed=TRUE) ii <- findInterval(yy, yrowW, rightmost.closed=TRUE) jleft <- pmax(jj, 1) jright <- pmin(jj + 1, nr) ibot <- pmax(ii, 1) itop <- pmin(ii+1, nc) #' compute fractional weights wleft <- pmin(1, abs(xcolW[jright] - xx)/W$xstep) wright <- 1 - wleft wbot <- pmin(1, abs(yrowW[itop] - yy)/W$ystep) wtop <- 1 - wbot #' pack together ww <- c(wleft * wbot, wleft * wtop, wright * wbot, wright * wtop) rowfac <- factor(c(ibot, itop, ibot, itop), levels=1:nr) colfac <- factor(c(jleft, jleft, jright, jright), levels=1:nc) if(preserve) { #' normalise fractions for each data point to sum to 1 inside window ok <- insideW[cbind(as.integer(rowfac), as.integer(colfac))] wwok <- ww * ok denom <- .colSums(wwok, 4, nx, na.rm=TRUE) recip <- ifelse(denom == 0, 1, 1/denom) ww <- wwok * rep(recip, each=4) } #' data weights must be replicated if(is.null(weights)) { weights <- ww } else if(k == 1) { weights <- ww * rep(weights, 4) } else { weights <- ww * apply(weights, 2, rep, times=4) } } #' sum weights if(is.null(weights)) { ta <- table(row = rowfac, col = colfac) } else if(k == 1) { ta <- tapplysum(weights, list(row = rowfac, col=colfac)) } else { ta <- list() for(j in 1:k) { ta[[j]] <- tapplysum(weights[,j], list(row = rowfac, col=colfac)) } } #' divide by pixel area? if(DivideByPixelArea) { pixelarea <- W$xstep * W$ystep if(k == 1) { ta <- ta/pixelarea } else { ta <- lapply(ta, "/", e2=pixelarea) } } # pack up as image(s) if(k == 1) { # single image # clip to window of data if(!padzero) ta[!insideW] <- NA out <- im(ta, xcol = xcolW, yrow = yrowW, xrange = xrangeW, yrange = yrangeW, unitname=unitsW) } else { # case k > 1 # create template image to reduce overhead template <- im(ta[[1L]], xcol = xcolW, yrow = yrowW, xrange = xrangeW, yrange = yrangeW, unitname=unitsW) out <- list() for(j in 1:k) { taj <- ta[[j]] # clip to window of data if(!padzero) taj[!insideW] <- NA # copy template and reassign pixel values outj <- template outj$v <- taj # store out[[j]] <- outj } out <- as.solist(out) names(out) <- names(weights) } if(savemap) attr(out, "map") <- cbind(row=as.integer(rowfac), col=as.integer(colfac)) return(out) } pixellate.owin <- function(x, W=NULL, ..., DivideByPixelArea=FALSE) { stopifnot(is.owin(x)) P <- as.polygonal(x) R <- as.rectangle(x) if(is.null(W)) W <- R else if(!is.subset.owin(R, as.rectangle(W))) stop("W does not cover the domain of x") W <- do.call.matched(as.mask, resolve.defaults(list(...), list(w=W))) ## compute Zmat <- polytileareaEngine(P, W$xrange, W$yrange, nx=W$dim[2L], ny=W$dim[1L], DivideByPixelArea) ## convert to image Z <- im(Zmat, xcol=W$xcol, yrow=W$yrow, xrange=W$xrange, yrange=W$yrange, unitname=unitname(W)) return(Z) } polytileareaEngine <- function(P, xrange, yrange, nx, ny, DivideByPixelArea=FALSE) { x0 <- xrange[1L] y0 <- yrange[1L] dx <- diff(xrange)/nx dy <- diff(yrange)/ny # process each component polygon Z <- matrix(0.0, ny, nx) B <- P$bdry for(i in seq_along(B)) { PP <- B[[i]] # transform so that pixels become unit squares QQ <- affinexypolygon(PP, vec = c(-x0, -y0)) RR <- affinexypolygon(QQ, mat = diag(1/c(dx, dy))) # xx <- RR$x yy <- RR$y nn <- length(xx) # close polygon xx <- c(xx, xx[1L]) yy <- c(yy, yy[1L]) nn <- nn+1 # call C routine zz <- .C("poly2imA", ncol=as.integer(nx), nrow=as.integer(ny), xpoly=as.double(xx), ypoly=as.double(yy), npoly=as.integer(nn), out=as.double(numeric(nx * ny)), status=as.integer(integer(1L)), PACKAGE = "spatstat") if(zz$status != 0) stop("Internal error") # increment output Z[] <- Z[] + zz$out } if(!DivideByPixelArea) { #' revert to original scale pixelarea <- dx * dy Z <- Z * pixelarea } return(Z) } spatstat/R/plot.plotppm.R0000644000176200001440000001050713333543255015122 0ustar liggesusers# # plot.plotppm.R # # engine of plot method for ppm # # $Revision: 1.20 $ $Date: 2016/12/30 01:44:07 $ # # plot.plotppm <- function(x,data=NULL,trend=TRUE,cif=TRUE,se=TRUE, pause=interactive(), how=c("persp","image","contour"), ..., pppargs=list()) { verifyclass(x,"plotppm") # determine main plotting actions superimposed <- !is.null(data) if(!missing(trend) && (trend & is.null(x[["trend"]]))) stop("No trend to plot.\n") trend <- trend & !is.null(x[["trend"]]) if(!missing(cif) && (cif & is.null(x[["cif"]]))) stop("No cif to plot.\n") cif <- cif & !is.null(x[["cif"]]) if(!missing(se) && (se & is.null(x[["se"]]))) stop("No SE to plot.\n") se <- se & !is.null(x[["se"]]) surftypes <- c("trend", "cif", "se")[c(trend, cif, se)] # marked point process? mrkvals <- attr(x,"mrkvals") marked <- (length(mrkvals) > 1) if(marked) data.marks <- marks(data) if(marked & superimposed) { data.types <- levels(data.marks) if(any(sort(data.types) != sort(mrkvals))) stop(paste("Data marks are different from mark", "values for argument x.\n")) } # plotting style howmat <- outer(how, c("persp", "image", "contour"), "==") howmatch <- matrowany(howmat) if (any(!howmatch)) stop(paste("unrecognised option", how[!howmatch])) # no pause required for single display if(missing(pause) || is.null(pause)) { nplots <- length(surftypes) * length(mrkvals) pause <- interactive() && (nplots != 1) } # start plotting if(pause) oldpar <- par(ask = TRUE) on.exit(if(pause) par(oldpar)) for(ttt in surftypes) { xs <- x[[ttt]] for (i in seq_along(mrkvals)) { level <- mrkvals[i] main <- paste(if(ttt == "se") "Estimated" else "Fitted", ttt, if(marked) paste("\n mark =", level) else NULL) for (style in how) { switch(style, persp = { do.call(persp, resolve.defaults(list(xs[[i]]), list(...), spatstat.options("par.persp"), list(xlab="x", zlab=ttt, main=main))) }, image = { do.call(image, resolve.defaults(list(xs[[i]]), list(...), list(main=main))) if(superimposed) { X <- if(marked) data[data.marks == level] else data do.call(plot.ppp, append(list(x=X, add=TRUE), pppargs)) } }, contour = { do.call(contour, resolve.defaults(list(xs[[i]]), list(...), list(main=main))) if(superimposed) { X <- if(marked) data[data.marks == level] else data do.call(plot.ppp, append(list(x=X, add=TRUE), pppargs)) } }, { stop(paste("Unrecognised plot style", style)) }) } } } return(invisible()) } print.plotppm <- function(x, ...) { verifyclass(x, "plotppm") trend <- x$trend cif <- x$cif mrkvals <- attr(x, "mrkvals") ntypes <- length(mrkvals) unmarked <- (ntypes == 1 ) cat(paste("Object of class", sQuote("plotppm"), "\n")) if(unmarked) cat("Computed for an unmarked point process\n") else { cat("Computed for a marked point process, with mark values:\n") print(mrkvals) } cat("Contains the following components:\n") if(!is.null(trend)) { cat("\n$trend:\tFitted trend.\n") if(unmarked) { cat("A list containing 1 image\n") print(trend[[1]], ...) } else { cat(paste("A list of", ntypes, "images\n")) cat("Typical details:\n") print(trend[[1]], ...) } } if(!is.null(cif)) { cat("\n$cif:\tFitted conditional intensity.\n") if(unmarked) { cat("A list containing 1 image\n") print(cif[[1]], ...) } else { cat(paste("A list of", ntypes, "images\n")) cat("Typical details:\n") print(cif[[1]], ...) } } invisible(NULL) } spatstat/R/simplepanel.R0000644000176200001440000001647713360317101014764 0ustar liggesusers# # simplepanel.R # # A simple, robust point & click interface # used in rmh visual debugger. # # $Revision: 1.14 $ $Date: 2016/04/25 02:34:40 $ # simplepanel <- function(title, B, boxes, clicks, redraws=NULL, exit=NULL, env) { stopifnot(is.rectangle(B)) stopifnot(is.list(boxes)) if(!all(unlist(lapply(boxes, is.rectangle)))) stop("some of the boxes are not rectangles") if(!all(unlist(lapply(boxes, is.subset.owin, B=B)))) stop("Some boxes do not lie inside the bounding box B") stopifnot(is.list(clicks) && length(clicks) == length(boxes)) if(!all(unlist(lapply(clicks, is.function)))) stop("clicks must be a list of functions") if(is.null(redraws)) { redraws <- rep.int(list(dflt.redraw), length(boxes)) } else { stopifnot(is.list(redraws) && length(redraws) == length(boxes)) if(any(isnul <- unlist(lapply(redraws, is.null)))) redraws[isnul] <- rep.int(list(dflt.redraw), sum(isnul)) if(!all(unlist(lapply(redraws, is.function)))) stop("redraws must be a list of functions") } if(is.null(exit)) { exit <- function(...) { NULL} } else stopifnot(is.function(exit)) stopifnot(is.environment(env)) n <- length(boxes) bnames <- names(boxes) %orifnull% rep("", n) cnames <- names(clicks) %orifnull% rep("", n) dnames <- paste("Button", seq_len(n)) nama <- ifelse(nzchar(bnames), bnames, ifelse(nzchar(cnames), cnames, dnames)) out <- list(title=title, B=B, nama=nama, boxes=boxes, clicks=clicks, redraws=redraws, exit=exit, env=env) class(out) <- c("simplepanel", class(out)) return(out) } grow.simplepanel <- function(P, side=c("right","left","top","bottom"), len=NULL, new.clicks, new.redraws=NULL, ..., aspect) { verifyclass(P, "simplepanel") side <- match.arg(side) stopifnot(is.list(new.clicks)) if(!all(unlist(lapply(new.clicks, is.function)))) stop("new.clicks must be a list of functions") if(is.null(new.redraws)) { new.redraws <- rep.int(list(dflt.redraw), length(new.clicks)) } else { stopifnot(is.list(new.redraws) && length(new.redraws) == length(new.clicks)) if(any(isnul <- sapply(new.redraws, is.null))) new.redraws[isnul] <- rep.int(list(dflt.redraw), sum(isnul)) if(!all(unlist(lapply(new.redraws, is.function)))) stop("new.redraws must be a list of functions") } if(missing(aspect) || is.null(aspect)) { # determine aspect ratio from length of longest text string n <- length(new.clicks) nama <- names(new.clicks) if(sum(nzchar(nama)) != n) nama <- names(new.redraws) if(sum(nzchar(nama)) != n) nama <- paste("Box", seq_len(n)) aspect <- 3/max(4, nchar(nama)) } B <- P$B n <- length(new.clicks) switch(side, right={ new.width <- if(!is.null(len)) len else sidelengths(B)[1]/2 extraspace <- owin(B$xrange[2] + c(0, new.width), B$yrange) new.boxes <- layout.boxes(extraspace, n, ..., aspect=aspect) }, left={ new.width <- if(!is.null(len)) len else sidelengths(B)[1]/2 extraspace <- owin(B$xrange[1] - c(new.width, 0), B$yrange) new.boxes <- layout.boxes(extraspace, n, ..., aspect=aspect) }, top={ new.height <- if(!is.null(len)) len else sidelengths(B)[2]/2 extraspace <- owin(B$xrange, B$yrange[2] + c(0, new.height)) new.boxes <- layout.boxes(extraspace, n, ..., aspect=aspect, horizontal=TRUE) }, bottom={ new.height <- if(!is.null(len)) len else sidelengths(B)[2]/2 extraspace <- owin(B$xrange, B$yrange[1] - c(new.height, 0)) new.boxes <- layout.boxes(extraspace, n, ..., aspect=aspect, horizontal=TRUE) }) with(P, simplepanel(title, boundingbox(B, extraspace), append(boxes, new.boxes), append(clicks, new.clicks), append(redraws, new.redraws), exit, env)) } redraw.simplepanel <- function(P, verbose=FALSE) { verifyclass(P, "simplepanel") if(verbose) cat("Redrawing entire panel\n") with(P, { # ntitle <- sum(nzchar(title)) plot(B, type="n", main=title) for(j in seq_along(nama)) (redraws[[j]])(boxes[[j]], nama[j], env) }) invisible(NULL) } clear.simplepanel <- function(P) { verifyclass(P, "simplepanel") plot(P$B, main="") invisible(NULL) } run.simplepanel <- function(P, popup=TRUE, verbose=FALSE) { verifyclass(P, "simplepanel") if(popup) dev.new() ntitle <- sum(nzchar(P$title)) opa <- par(mar=c(0,0,ntitle+0.2,0),ask=FALSE) with(P, { # interaction loop more <- TRUE while(more) { redraw.simplepanel(P, verbose=verbose) xy <- spatstatLocator(1) if(is.null(xy)) { if(verbose) cat("No (x,y) coordinates\n") break } found <- FALSE for(j in seq_along(boxes)) { if(inside.owin(xy$x, xy$y, boxes[[j]])) { found <- TRUE if(verbose) cat(paste("Caught click on", sQuote(nama[j]), "\n")) more <- (clicks[[j]])(env, xy) if(!is.logical(more) || length(more) != 1) { warning(paste("Click function for", sQuote(nama[j]), "did not return TRUE/FALSE")) more <- FALSE } if(verbose) cat(if(more) "Continuing\n" else "Terminating\n") break } } if(verbose && !found) cat(paste("Coordinates", paren(paste(xy, collapse=",")), "not matched to any box\n")) } }) if(verbose) cat("Calling exit function\n") rslt <- with(P, exit(env)) # revert to original graphics parameters par(opa) # close popup window? if(popup) dev.off() # return value of 'exit' function return(rslt) } layout.boxes <- function(B, n, horizontal=FALSE, aspect=0.5, usefrac=0.9){ # make n boxes in B stopifnot(is.rectangle(B)) stopifnot(n > 0) width <- sidelengths(B)[1] height <- sidelengths(B)[2] if(!horizontal) { heightshare <- height/n useheight <- min(width * aspect, heightshare * usefrac) usewidth <- min(useheight /aspect, width * usefrac) lostwidth <- width - usewidth lostheightshare <- heightshare - useheight template <- owin(c(0, usewidth), c(0, useheight)) boxes <- list() boxes[[1]] <- shift(template, c(B$xrange[1]+lostwidth/2, B$yrange[1] + lostheightshare/2)) if(n > 1) for(j in 2:n) boxes[[j]] <- shift(boxes[[j-1]], c(0, heightshare)) } else { boxes <- layout.boxes(flipxy(B), n, horizontal=FALSE, aspect=1/aspect, usefrac=usefrac) boxes <- lapply(boxes, flipxy) } return(boxes) } # default redraw function for control buttons dflt.redraw <- function(button, name, env) { plot(button, add=TRUE, border="pink") text(centroid.owin(button), labels=name) } print.simplepanel <- function(x, ...) { nama <- x$nama cat("simplepanel object\n") cat(paste("\tTitle:", sQuote(x$title), "\n")) cat("\tPanel names:") for(i in seq_along(nama)) { if(i %% 6 == 1) cat("\n\t") cat(paste0(sQuote(nama[i]), " ")) } cat("\n") return(invisible(NULL)) } spatstat/R/concom.R0000644000176200001440000000742513333543254013734 0ustar liggesusers# # # concom.R # # $Revision: 1.5 $ $Date: 2018/03/15 07:37:41 $ # # The connected component interaction # # Concom() create an instance of the connected component interaction # [an object of class 'interact'] # # ------------------------------------------------------------------- # Concom <- local({ connectedlabels <- function(X, R) { connected(X, R, internal=TRUE) } countcompo <- function(X, R) { length(unique(connectedlabels(X, R))) } # change in number of components when point i is deleted cocoDel <- function(X, R, subset=seq_len(npoints(X))) { n <- length(subset) ans <- integer(n) if(n > 0) { cX <- countcompo(X, R) for(i in 1:n) ans[i] = countcompo(X[-subset[i]], R) - cX } return(ans) } # change in number of components when new point is added cocoAdd <- function(U, X, R) { U <- as.ppp(U, W=as.owin(X)) nU <- npoints(U) cr <- crosspairs(U, X, R, what="indices") lab <- connectedlabels(X, R) hitcomp <- tapply(X=lab[cr$j], INDEX=factor(cr$i, levels=1:nU), FUN=unique, simplify=FALSE) nhit <- unname(lengths(hitcomp)) change <- 1L - nhit return(change) } # connected component potential cocopot <- function(X,U,EqualPairs,pars,correction, ...) { bad <- !(correction %in% c("border", "none")) if((nbad <- sum(bad)) > 0) warning(paste("The", ngettext(nbad, "correction", "corrections"), commasep(sQuote(correction[!ok])), ngettext(nbad, "is", "are"), "not implemented")) n <- U$n answer <- numeric(n) r <- pars$r if(is.null(r)) stop("internal error: r parameter not found") dummies <- !(seq_len(n) %in% EqualPairs[,2L]) if(sum(dummies) > 0) answer[dummies] <- -cocoAdd(U[dummies], X, r) ii <- EqualPairs[,1L] jj <- EqualPairs[,2L] answer[jj] <- cocoDel(X, r, subset=ii) return(answer + 1L) } # template object without family, par, version BlankCoco <- list( name = "Connected component process", creator = "Concom", family = "inforder.family", # evaluated later pot = cocopot, par = list(r = NULL), # to be filled in parnames = "distance threshold", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1L || r <= 0) stop("distance threshold r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { logeta <- as.numeric(coeffs[1L]) eta <- exp(logeta) return(list(param=list(eta=eta), inames="interaction parameter eta", printable=signif(eta))) }, valid = function(coeffs, self) { eta <- ((self$interpret)(coeffs, self))$param$eta return(is.finite(eta)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { if(anyNA(coeffs)) return(Inf) logeta <- coeffs[1L] if(abs(logeta) <= epsilon) return(0) else return(Inf) }, version=NULL # to be added ) class(BlankCoco) <- "interact" Concom <- function(r) { instantiate.interact(BlankCoco, list(r=r)) } Concom <- intermaker(Concom, BlankCoco) Concom }) spatstat/R/randomseg.R0000644000176200001440000000443513333543255014434 0ustar liggesusers# # randomseg.R # # $Revision: 1.12 $ $Date: 2016/12/01 09:32:41 $ # rpoisline <- function(lambda, win=owin()) { win <- as.owin(win) # determine circumcircle xr <- win$xrange yr <- win$yrange xmid <- mean(xr) ymid <- mean(yr) width <- diff(xr) height <- diff(yr) rmax <- sqrt(width^2 + height^2)/2 boundbox <- owin(xmid + c(-1,1) * rmax, ymid + c(-1,1) * rmax) # generate poisson lines through circumcircle n <- rpois(1, lambda * 2 * pi * rmax) if(n == 0) { X <- psp(numeric(0), numeric(0), numeric(0), numeric(0), marks=integer(0), window=win) attr(X, "lines") <- infline(p=numeric(0), theta=numeric(0)) attr(X, "linemap") <- integer(0) return(X) } theta <- runif(n, max= 2 * pi) p <- runif(n, max=rmax) # compute intersection points with circle q <- sqrt(rmax^2 - p^2) co <- cos(theta) si <- sin(theta) X <- psp(x0= xmid + p * co + q * si, y0= ymid + p * si - q * co, x1= xmid + p * co - q * si, y1= ymid + p * si + q * co, marks = seq_len(n), window=boundbox, check=FALSE) # infinite lines L <- infline(p = p + xmid * co + ymid * si, theta = theta) # clip to window X <- X[win] # append info linemap <- as.integer(marks(X)) X <- unmark(X) attr(X, "lines") <- L attr(X, "linemap") <- linemap return(X) } rlinegrid <- function(angle=45, spacing=0.1, win=owin()) { win <- as.owin(win) # determine circumcircle width <- diff(win$xrange) height <- diff(win$yrange) rmax <- sqrt(width^2 + height^2)/2 xmid <- mean(win$xrange) ymid <- mean(win$yrange) # generate randomly-displaced grid of lines through circumcircle u <- runif(1, min=0, max=spacing) - rmax if(u >= rmax) return(psp(numeric(0), numeric(0), numeric(0), numeric(0), window=win, check=FALSE)) p <- seq(from=u, to=rmax, by=spacing) # compute intersection points with circle q <- sqrt(rmax^2 - p^2) theta <- pi * ((angle - 90)/180) co <- cos(theta) si <- sin(theta) X <- psp(x0= xmid + p * co + q * si, y0= ymid + p * si - q * co, x1= xmid + p * co - q * si, y1= ymid + p * si + q * co, window=owin(xmid+c(-1,1)*rmax, ymid+c(-1,1)*rmax), check=FALSE) # clip to window X <- X[win] return(X) } spatstat/R/hasclose.R0000644000176200001440000001234713333543255014257 0ustar liggesusers#' #' hasclose.R #' #' Determine whether each point has a close neighbour #' #' $Revision: 1.11 $ $Date: 2017/06/05 10:31:58 $ has.close <- function(X, r, Y=NULL, ...) { UseMethod("has.close") } has.close.default <- function(X, r, Y=NULL, ..., periodic=FALSE) { trap.extra.arguments(...) if(!periodic) { nd <- if(is.null(Y)) nndist(X) else nncross(X, Y, what="dist") return(nd <= r) } if(is.null(Y)) { pd <- pairdist(X, periodic=TRUE) diag(pd) <- Inf } else { pd <- crossdist(X, Y, periodic=TRUE) } # return(apply(pd <= r, 1, any)) return(matrowany(pd <= r)) } has.close.ppp <- function(X, r, Y=NULL, ..., periodic=FALSE, sorted=FALSE) { trap.extra.arguments(...) nX <- npoints(X) if(nX <= 1) return(logical(nX)) #' sort by increasing x coordinate cX <- coords(X) if(!sorted) { oo <- order(cX$x) cX <- cX[oo,,drop=FALSE] } if(is.null(Y)) { if(!periodic) { zz <- .C("hasXclose", n = as.integer(nX), x = as.double(cX$x), y = as.double(cX$y), r = as.double(r), t = as.integer(integer(nX)), PACKAGE = "spatstat") } else { b <- sidelengths(Frame(X)) zz <- .C("hasXpclose", n = as.integer(nX), x = as.double(cX$x), y = as.double(cX$y), r = as.double(r), b = as.double(b), t = as.integer(integer(nX)), PACKAGE = "spatstat") } } else { stopifnot(is.ppp(Y)) nY <- npoints(Y) if(nY == 0) return(logical(nX)) cY <- coords(Y) #' sort Y by increasing x coordinate if(!sorted) { ooY <- order(cY$x) cY <- cY[ooY, , drop=FALSE] } if(!periodic) { zz <- .C("hasXYclose", n1 = as.integer(nX), x1 = as.double(cX$x), y1 = as.double(cX$y), n2 = as.integer(nY), x2 = as.double(cY$x), y2 = as.double(cY$y), r = as.double(r), t = as.integer(integer(nX)), PACKAGE = "spatstat") } else { bX <- sidelengths(Frame(X)) bY <- sidelengths(Frame(Y)) if(any(bX != bY)) warning("Windows are not equal: periodic distance may be erroneous") zz <- .C("hasXYpclose", n1 = as.integer(nX), x1 = as.double(cX$x), y1 = as.double(cX$y), n2 = as.integer(nY), x2 = as.double(cY$x), y2 = as.double(cY$y), r = as.double(r), b = as.double(bX), t = as.integer(integer(nX)), PACKAGE = "spatstat") } } tt <- as.logical(zz$t) if(sorted) return(tt) #' reinstate original order ans <- logical(nX) ans[oo] <- tt return(ans) } has.close.pp3 <- function(X, r, Y=NULL, ..., periodic=FALSE, sorted=FALSE) { trap.extra.arguments(...) nX <- npoints(X) if(nX <= 1) return(logical(nX)) cX <- coords(X) if(!sorted) { #' sort by increasing x coordinate oo <- order(cX$x) cX <- cX[oo,,drop=FALSE] } if(is.null(Y)) { if(!periodic) { zz <- .C("hasX3close", n = as.integer(nX), x = as.double(cX$x), y = as.double(cX$y), z = as.double(cX$z), r = as.double(r), t = as.integer(integer(nX)), PACKAGE = "spatstat") } else { b <- sidelengths(as.box3(X)) zz <- .C("hasX3pclose", n = as.integer(nX), x = as.double(cX$x), y = as.double(cX$y), z = as.double(cX$z), r = as.double(r), b = as.double(b), t = as.integer(integer(nX)), PACKAGE = "spatstat") } } else { stopifnot(is.pp3(Y)) nY <- npoints(Y) if(nY == 0) return(logical(nX)) cY <- coords(Y) if(!sorted) { #' sort Y by increasing x coordinate ooY <- order(cY$x) cY <- cY[ooY, , drop=FALSE] } if(!periodic) { zz <- .C("hasXY3close", n1 = as.integer(nX), x1 = as.double(cX$x), y1 = as.double(cX$y), z1 = as.double(cX$z), n2 = as.integer(nY), x2 = as.double(cY$x), y2 = as.double(cY$y), z2 = as.double(cY$z), r = as.double(r), t = as.integer(integer(nX)), PACKAGE = "spatstat") } else { bX <- sidelengths(as.box3(X)) bY <- sidelengths(as.box3(Y)) if(any(bX != bY)) warning("Domains are not equal: periodic distance may be erroneous") zz <- .C("hasXY3pclose", n1 = as.integer(nX), x1 = as.double(cX$x), y1 = as.double(cX$y), z1 = as.double(cX$z), n2 = as.integer(nY), x2 = as.double(cY$x), y2 = as.double(cY$y), z2 = as.double(cY$z), r = as.double(r), b = as.double(bX), t = as.integer(integer(nX)), PACKAGE = "spatstat") } } tt <- as.logical(zz$t) if(sorted) return(tt) #' reinstate original order ans <- logical(nX) ans[oo] <- tt return(ans) } spatstat/R/multistrhard.R0000644000176200001440000003107713333543255015201 0ustar liggesusers# # # multistrhard.S # # $Revision: 2.39 $ $Date: 2018/03/15 07:37:41 $ # # The multitype Strauss/hardcore process # # MultiStraussHard() # create an instance of the multitype Strauss/ harcore # point process # [an object of class 'interact'] # # ------------------------------------------------------------------- # doMultiStraussHard <- local({ # ........ define potential ...................... MSHpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[i] type (mark) of point U[j] # # get matrices of interaction radii r <- par$iradii h <- par$hradii # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) # list all UNORDERED pairs of types to be counted # (the interaction must be symmetric in type, and scored as such) uptri <- (row(r) <= col(r)) & !is.na(r) mark1 <- (lx[row(r)])[uptri] mark2 <- (lx[col(r)])[uptri] # corresponding names mark1name <- (lxname[row(r)])[uptri] mark2name <- (lxname[col(r)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) # list all ORDERED pairs of types to be counted # (to save writing the same code twice) different <- mark1 != mark2 mark1o <- c(mark1, mark2[different]) mark2o <- c(mark2, mark1[different]) nordpairs <- length(mark1o) # unordered pair corresponding to each ordered pair ucode <- c(1:npairs, (1:npairs)[different]) # # create numeric array for result z <- array(0, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { # apply the relevant interaction distance to each pair of points rxu <- r[ tx, tu ] str <- (d < rxu) str[is.na(str)] <- FALSE # and the relevant hard core distance hxu <- h[ tx, tu ] forbid <- (d < hxu) forbid[is.na(forbid)] <- FALSE # form the potential value <- str value[forbid] <- -Inf # assign value[i,j] -> z[i,j,k] where k is relevant interaction code for(i in 1:nordpairs) { # data points with mark m1 Xsub <- (tx == mark1o[i]) # quadrature points with mark m2 Qsub <- (tu == mark2o[i]) # assign z[Xsub, Qsub, ucode[i]] <- value[Xsub, Qsub] } } return(z) } # ............... end of potential function ................... # .......... auxiliary functions ................. delMSH <- function(which, types, iradii, hradii, ihc) { iradii[which] <- NA if(any(!is.na(iradii))) { # some gamma interactions left # return modified MultiStraussHard with fewer gamma parameters return(MultiStraussHard(types, iradii, hradii)) } else if(any(!ihc)) { # no gamma interactions left, but some active hard cores return(MultiHard(types, hradii)) } else return(Poisson()) } # ........................................................... # Set up basic object except for family and parameters BlankMSHobject <- list( name = "Multitype Strauss Hardcore process", creator = "MultiStraussHard", family = "pairwise.family", # evaluated later pot = MSHpotential, par = list(types=NULL, iradii=NULL, hradii=NULL), # to be added parnames = c("possible types", "interaction distances", "hardcore distances"), pardesc = c("vector of possible types", "matrix of interaction distances", "matrix of hardcore distances"), hasInf = TRUE, selfstart = function(X, self) { types <- self$par$types hradii <- self$par$hradii if(!is.null(types) && !is.null(hradii)) return(self) if(is.null(types)) types <- levels(marks(X)) if(is.null(hradii)) { marx <- marks(X) d <- nndist(X, by=marx) h <- aggregate(d, by=list(from=marx), min) h <- as.matrix(h[, -1, drop=FALSE]) m <- table(marx) mm <- outer(m, m, pmin) hradii <- h * mm/(mm+1) dimnames(hradii) <- list(types, types) } MultiStraussHard(types=types,hradii=hradii,iradii=self$par$iradii) }, init = function(self) { types <- self$par$types iradii <- self$par$iradii hradii <- self$par$hradii # hradii could be NULL if(!is.null(types)) { if(!is.null(dim(types))) stop(paste("The", sQuote("types"), "argument should be a vector")) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } nt <- length(types) MultiPair.checkmatrix(iradii, nt, sQuote("iradii")) if(!is.null(hradii)) MultiPair.checkmatrix(hradii, nt, sQuote("hradii")) } ina <- is.na(iradii) if(all(ina)) stop(paste("All entries of", sQuote("iradii"), "are NA")) if(!is.null(hradii)) { hna <- is.na(hradii) both <- !ina & !hna if(any(iradii[both] <= hradii[both])) stop("iradii must be larger than hradii") } }, update = NULL, # default OK print = function(self) { types <- self$par$types iradii <- self$par$iradii hradii <- self$par$hradii nt <- nrow(iradii) if(waxlyrical('gory')) { splat(nt, "types of points") if(!is.null(types)) { splat("Possible types:") print(noquote(types)) } else splat("Possible types:\t not yet determined") } splat("Interaction radii:") dig <- getOption("digits") print(signif(iradii, dig)) if(!is.null(hradii)) { splat("Hardcore radii:") print(signif(hradii, dig)) } else splat("Hardcore radii: not yet determined") invisible() }, interpret = function(coeffs, self) { # get possible types typ <- self$par$types ntypes <- length(typ) # get matrices of interaction radii r <- self$par$iradii h <- self$par$hradii # list all relevant unordered pairs of types uptri <- (row(r) <= col(r)) & !is.na(r) index1 <- (row(r))[uptri] index2 <- (col(r))[uptri] npairs <- length(index1) # extract canonical parameters; shape them into a matrix gammas <- matrix(, ntypes, ntypes) dimnames(gammas) <- list(typ, typ) expcoef <- exp(coeffs) gammas[ cbind(index1, index2) ] <- expcoef gammas[ cbind(index2, index1) ] <- expcoef # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_ij", printable=dround(gammas))) }, valid = function(coeffs, self) { # interaction radii r[i,j] iradii <- self$par$iradii # hard core radii r[i,j] hradii <- self$par$hradii # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # Check that we managed to estimate all required parameters required <- !is.na(iradii) if(!all(is.finite(gamma[required]))) return(FALSE) # Check that the model is integrable # inactive hard cores ... ihc <- (is.na(hradii) | hradii == 0) # .. must have gamma <= 1 return(all(gamma[required & ihc] <= 1)) }, project = function(coeffs, self) { # types types <- self$par$types # interaction radii r[i,j] iradii <- self$par$iradii # hard core radii r[i,j] hradii <- self$par$hradii # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # required gamma parameters required <- !is.na(iradii) # active hard cores activehard <- !is.na(hradii) & (hradii > 0) ihc <- !activehard # problems gammavalid <- is.finite(gamma) & (activehard | gamma <= 1) naughty <- required & !gammavalid if(!any(naughty)) return(NULL) # if(spatstat.options("project.fast")) { # remove ALL naughty terms simultaneously return(delMSH(naughty, types, iradii, hradii, ihc)) } else { # present a list of candidates rn <- row(naughty) cn <- col(naughty) uptri <- (rn <= cn) upn <- uptri & naughty rowidx <- as.vector(rn[upn]) colidx <- as.vector(cn[upn]) # matindex <- function(v) { matrix(c(v, rev(v)), # ncol=2, byrow=TRUE) } mats <- lapply(as.data.frame(rbind(rowidx, colidx)), matindex) inters <- lapply(mats, delMSH, types=types, iradii=iradii, hradii=hradii, ihc=ihc) return(inters) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$iradii h <- self$par$hradii ractive <- !is.na(r) hactive <- !is.na(h) if(any(!is.na(coeffs))) { gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 ractive <- ractive & (abs(log(gamma)) > epsilon) } if(!any(c(ractive,hactive))) return(0) else return(max(c(r[ractive],h[hactive]))) }, version=NULL # to be added ) class(BlankMSHobject) <- "interact" matindex <- function(v) { matrix(c(v, rev(v)), ncol=2, byrow=TRUE) } # Finally define MultiStraussHard function doMultiStraussHard <- function(iradii, hradii=NULL, types=NULL) { iradii[iradii == 0] <- NA if(!is.null(hradii)) hradii[hradii == 0] <- NA out <- instantiate.interact(BlankMSHobject, list(types=types, iradii = iradii, hradii = hradii)) if(!is.null(types)) { dn <- list(types, types) dimnames(out$par$iradii) <- dn if(!is.null(out$par$hradii)) dimnames(out$par$hradii) <- dn } return(out) } doMultiStraussHard }) MultiStraussHard <- local({ MultiStraussHard <- function(iradii, hradii, types=NULL) { ## try new syntax newcall <- match.call() newcall[[1]] <- as.name('doMultiStraussHard') out <- try(eval(newcall, parent.frame()), silent=TRUE) if(is.interact(out)) return(out) ## try old syntax oldcall <- match.call(function(types=NULL, iradii, hradii) {}) oldcall[[1]] <- as.name('doMultiStraussHard') out <- try(eval(oldcall, parent.frame()), silent=TRUE) if(is.interact(out)) return(out) ## Syntax is wrong: generate error using new syntax rules if(missing(hradii)) hradii <- NULL doMultiStraussHard(iradii=iradii, hradii=hradii, types=types) } BlankMSHobject <- get("BlankMSHobject", envir=environment(doMultiStraussHard)) MultiStraussHard <- intermaker(MultiStraussHard, BlankMSHobject) MultiStraussHard }) spatstat/R/rcelllpp.R0000644000176200001440000000447413557002462014273 0ustar liggesusers#' rcelllpp.R #' #' Analogue of Baddeley-Silverman cell process for linear network. #' #' (plus analogue of Switzer's process) #' #' $Revision: 1.2 $ $Date: 2019/11/01 10:13:02 $ rcelllpp <- local({ rcelllpp <- function(L, lambda, rnumgen=NULL, ..., saveid=FALSE) { if(inherits(L, "lintess")) { LT <- L L <- as.linnet(LT) } else if(inherits(L, "linnet")) { #' default tessellation: each segment is a tile ns <- nsegments(L) df <- data.frame(seg=1:ns, t0=0, t1=1, tile=1:ns) LT <- lintess(L, df) } else stop("L should be a linnet or lintess") #' extract list of tiles df <- LT$df #' add required data df$len <- lengths.psp(as.psp(L))[df$seg] #' generate random points st <- by(df, df$tile, addpoints, lambda=lambda, rnumgen=rnumgen, ...) st <- Reduce(rbind, st) X <- lpp(st, L) if(saveid) attr(X, "cellid") <- marks(cut(X, LT)) return(X) } addpoints <- function(df, lambda=1, rnumgen=NULL, ...) { #' take a subset of the data frame representing one tile of the tessellation #' Add random points in this subset. piecelengths <- df$len tilelength <- sum(piecelengths) mu <- tilelength * lambda n <- if(is.null(rnumgen)) rcellnumber(1, mu=mu) else rnumgen(1, mu, ...) if(n == 0) return(data.frame(seg=integer(0), tp=numeric(0))) u <- runif(n, max=tilelength) csp <- c(0, cumsum(piecelengths)) i <- findInterval(u, csp, rightmost.closed=TRUE, all.inside=TRUE) seg <- df$seg[i] tp <- df$t0[i] + (df$t1 - df$t0)[i] * (u - csp[i])/piecelengths[i] return(data.frame(seg=seg, tp=tp)) } rcelllpp }) rSwitzerlpp <- local({ rSwitzerlpp <- function(L, lambdacut, rintens=rexp, ..., cuts=c("points", "lines")) { stopifnot(inherits(L, "linnet")) cuts <- match.arg(cuts) switch(cuts, points = { X <- rpoislpp(lambdacut, L) LT <- divide.linnet(X) }, lines = { X <- rpoisline(lambdacut, L) X <- attr(X, "lines") LT <- chop.linnet(L, X) }) Z <- rcelllpp(LT, 1, rNswitzer, rintens=rintens, ...) attr(Z, "breaks") <- X return(Z) } rNswitzer <- function(n, mu, rintens=rexp, ...) { rpois(n, mu * rintens(n, ...)) } rSwitzerlpp }) spatstat/R/disc.R0000644000176200001440000000700313333543254013370 0ustar liggesusers## ## disc.R ## ## discs and ellipses ## ## $Revision: 1.18 $ $Date: 2017/01/15 05:25:16 $ ## disc <- local({ indic <- function(x,y,x0,y0,r) { as.integer((x-x0)^2 + (y-y0)^2 < r^2) } disc <- function(radius=1, centre=c(0,0), ..., mask=FALSE, npoly=128, delta=NULL) { check.1.real(radius) stopifnot(radius > 0) centre <- as2vector(centre) if(!missing(npoly) && !is.null(npoly) && !is.null(delta)) stop("Specify either npoly or delta") if(!missing(npoly) && !is.null(npoly)) { stopifnot(length(npoly) == 1) stopifnot(npoly >= 3) } else if(!is.null(delta)) { check.1.real(delta) stopifnot(delta > 0) npoly <- pmax(16, ceiling(2 * pi * radius/delta)) } else npoly <- 128 if(!mask) { theta <- seq(from=0, to=2*pi, length.out=npoly+1)[-(npoly+1L)] x <- centre[1L] + radius * cos(theta) y <- centre[2L] + radius * sin(theta) W <- owin(poly=list(x=x, y=y), check=FALSE) } else { xr <- centre[1L] + radius * c(-1,1) yr <- centre[2L] + radius * c(-1,1) B <- owin(xr,yr) IW <- as.im(indic, B, x0=centre[1L], y0=centre[2L], r=radius, ...) W <- levelset(IW, 1, "==") } return(W) } disc }) hexagon <- function(edge=1, centre=c(0,0), ..., align=c("bottom", "top", "left", "right", "no")) { regularpolygon(6, edge, centre, align=align) } regularpolygon <- function(n, edge=1, centre=c(0,0), ..., align=c("bottom", "top", "left", "right", "no")) { check.1.integer(n) check.1.real(edge) stopifnot(n >= 3) stopifnot(edge > 0) align <- match.arg(align) theta <- 2 * pi/n radius <- edge/(2 * sin(theta/2)) result <- disc(radius, centre, npoly=n, mask=FALSE) if(align != "no") { k <- switch(align, bottom = 3/4, top = 1/4, left = 1/2, right = 1) alpha <- theta * (1/2 - (k * n) %% 1) result <- rotate(result, -alpha) } Frame(result) <- boundingbox(result) return(result) } ellipse <- local({ indic <- function(x,y,x0,y0,a,b,co,si){ x <- x-x0 y <- y-y0 as.integer(((x*co + y*si)/a)^2 + ((-x*si + y*co)/b)^2 < 1) } ellipse <- function(a, b, centre=c(0,0), phi=0, ..., mask=FALSE, npoly = 128) { ## Czechs: stopifnot(length(a) == 1) stopifnot(a > 0) stopifnot(length(b) == 1) stopifnot(b > 0) centre <- as2vector(centre) stopifnot(length(phi) == 1) stopifnot(length(npoly) == 1) stopifnot(npoly > 2) ## Rotator cuff: co <- cos(phi) si <- sin(phi) ## Mask: if(mask) { ## Thetas maximizing x and y. tx <- atan(-b*tan(phi)/a) ty <- atan(b/(a*tan(phi))) ## Maximal x and y (for centre = c(0,0)). xm <- a*co*cos(tx) - b*si*sin(tx) ym <- a*si*cos(ty) + b*co*sin(ty) ## Range of x and y. xr <- xm*c(-1,1)+centre[1L] yr <- ym*c(-1,1)+centre[2L] ## Wrecked-angle to contain the mask. B <- as.mask(owin(xr,yr),...) ## Build the mask as a level set. IW <- as.im(indic, B, x0=centre[1L], y0=centre[2L], a=a, b=b, co=co, si=si) return(levelset(IW, 1, "==")) } ## Polygonal. ## Build "horizontal" ellipse centred at 0: theta <- seq(0, 2 * pi, length = npoly+1)[-(npoly+1L)] xh <- a * cos(theta) yh <- b * sin(theta) ## Rotate through angle phi and shift centre: x <- centre[1L] + co*xh - si*yh y <- centre[2L] + si*xh + co*yh owin(poly=list(x = x, y = y)) } ellipse }) spatstat/R/pcfmulti.R0000644000176200001440000001641513333543255014301 0ustar liggesusers# # pcfmulti.R # # $Revision: 1.8 $ $Date: 2016/09/21 07:28:58 $ # # multitype pair correlation functions # pcfcross <- function(X, i, j, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), divisor=c("r","d")) { verifyclass(X, "ppp") stopifnot(is.multitype(X)) if(missing(correction)) correction <- NULL divisor <- match.arg(divisor) ## marx <- marks(X) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] I <- (marx == i) J <- (marx == j) Iname <- paste("points with mark i =", i) Jname <- paste("points with mark j =", j) ## result <- pcfmulti(X, I, J, ..., r=r, kernel=kernel, bw=bw, stoyan=stoyan, correction=correction, divisor=divisor, Iname=Iname, Jname=Jname) ## iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(result, substitute(g[i,j](r), list(i=iname,j=jname)), c("g", paste0("list", paren(paste(iname, jname, sep=",")))), new.yexp=substitute(g[list(i,j)](r), list(i=iname,j=jname))) return(result) } pcfdot <- function(X, i, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), divisor=c("r", "d")) { verifyclass(X, "ppp") stopifnot(is.multitype(X)) if(missing(correction)) correction <- NULL divisor <- match.arg(divisor) marx <- marks(X) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) J <- rep.int(TRUE, X$n) # i.e. all points Iname <- paste("points with mark i =", i) Jname <- "points" result <- pcfmulti(X, I, J, ..., r=r, kernel=kernel, bw=bw, stoyan=stoyan, correction=correction, divisor=divisor, Iname=Iname, Jname=Jname) iname <- make.parseable(paste(i)) result <- rebadge.fv(result, substitute(g[i ~ dot](r), list(i=iname)), c("g", paste0(iname, "~symbol(\"\\267\")")), new.yexp=substitute(g[i ~ symbol("\267")](r), list(i=iname))) return(result) } pcfmulti <- function(X, I, J, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), divisor=c("r","d"), Iname="points satisfying condition I", Jname="points satisfying condition J") { verifyclass(X, "ppp") # r.override <- !is.null(r) divisor <- match.arg(divisor) win <- X$window areaW <- area(win) npts <- npoints(X) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("translate", "Ripley") correction <- pickoption("correction", correction, c(isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, win$type, correction.given) ## .......... indices I and J ............................. I <- ppsubset(X, I) J <- ppsubset(X, J) if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") nI <- sum(I) nJ <- sum(J) if(nI == 0) stop(paste("There are no", Iname)) if(nJ == 0) stop(paste("There are no", Jname)) XI <- X[I] XJ <- X[J] # lambdaI <- nI/areaW lambdaJ <- nJ/areaW nIJ <- sum(I & J) lambdaIJarea <- (nI * nJ - nIJ)/areaW ## ........... kernel bandwidth and support ......................... if(is.null(bw) && kernel=="epanechnikov") { # Stoyan & Stoyan 1995, eq (15.16), page 285 h <- stoyan /sqrt(lambdaJ) hmax <- h # conversion to standard deviation bw <- h/sqrt(5) } else if(is.numeric(bw)) { # standard deviation of kernel specified # upper bound on half-width hmax <- 3 * bw } else { # data-dependent bandwidth selection: guess upper bound on half-width hmax <- 2 * stoyan /sqrt(lambdaJ) } ########## r values ############################ # handle argument r rmaxdefault <- rmax.rule("K", win, lambdaJ) breaks <- handle.r.b.args(r, NULL, win, rmaxdefault=rmaxdefault) if(!(breaks$even)) stop("r values must be evenly spaced") # extract r values r <- breaks$r rmax <- breaks$max # recommended range of r values for plotting alim <- c(0, min(rmax, rmaxdefault)) # initialise fv object df <- data.frame(r=r, theo=rep.int(1,length(r))) fname <- c("g", "list(I,J)") yexp <- quote(g[list(I,J)](r)) out <- fv(df, "r", quote(g[I,J](r)), "theo", , alim, c("r", makefvlabel(NULL, NULL, fname, "Pois")), c("distance argument r", "theoretical Poisson %s"), fname=fname, yexp=yexp) ########## smoothing parameters for pcf ############################ # arguments for 'density' denargs <- resolve.defaults(list(kernel=kernel, bw=bw), list(...), list(n=length(r), from=0, to=rmax)) ################################################# ## compute pairwise distances ## identify close pairs of points what <- if(any(correction == "translate")) "all" else "ijd" close <- crosspairs(XI, XJ, rmax+hmax, what=what) ## map (i,j) to original serial numbers in X orig <- seq_len(npts) imap <- orig[I] jmap <- orig[J] iX <- imap[close$i] jX <- jmap[close$j] ## eliminate any identical pairs if(nIJ > 0) { ok <- (iX != jX) if(!all(ok)) close <- as.list(as.data.frame(close)[ok, , drop=FALSE]) } ## extract information for these pairs (relative to orderings of XI, XJ) dclose <- close$d icloseI <- close$i # jcloseJ <- close$j ###### compute ####### if(any(correction=="translate")) { # translation correction edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=win, paired=TRUE) gT <- sewpcf(dclose, edgewt, denargs, lambdaIJarea, divisor)$g out <- bind.fv(out, data.frame(trans=gT), makefvlabel(NULL, "hat", fname, "Trans"), "translation-corrected estimate of %s", "trans") } if(any(correction=="isotropic")) { # Ripley isotropic correction edgewt <- edge.Ripley(XI[icloseI], matrix(dclose, ncol=1)) gR <- sewpcf(dclose, edgewt, denargs, lambdaIJarea, divisor)$g out <- bind.fv(out, data.frame(iso=gR), makefvlabel(NULL, "hat", fname, "Ripley"), "isotropic-corrected estimate of %s", "iso") } ## sanity check if(is.null(out)) { warning("Nothing computed - no edge corrections chosen") return(NULL) } # which corrections have been computed? corrxns <- rev(setdiff(names(out), "r")) # default is to display them all formula(out) <- . ~ r fvnames(out, ".") <- corrxns # unitname(out) <- unitname(X) return(out) } spatstat/R/ppp.R0000644000176200001440000005075313333543255013260 0ustar liggesusers# # ppp.R # # A class 'ppp' to define point patterns # observed in arbitrary windows in two dimensions. # # $Revision: 4.112 $ $Date: 2018/06/03 09:17:56 $ # # A point pattern contains the following entries: # # $window: an object of class 'owin' # defining the observation window # # $n: the number of points (for efficiency) # # $x: # $y: vectors of length n giving the Cartesian # coordinates of the points. # # It may also contain the entry: # # $marks: a vector of length n # whose entries are interpreted as the # 'marks' attached to the corresponding points. # #-------------------------------------------------------------------------- ppp <- function(x, y, ..., window, marks, check=TRUE, checkdup=check, drop=TRUE) { # Constructs an object of class 'ppp' # if(!missing(window)) verifyclass(window, "owin") else window <- owin(...) if((missing(x) && missing(y)) || (length(x) == 0 && length(y) == 0)) x <- y <- numeric(0) n <- length(x) if(length(y) != n) stop("coordinate vectors x and y are not of equal length") # validate x, y coordinates stopifnot(is.numeric(x)) stopifnot(is.numeric(y)) good <- is.finite(x) & is.finite(y) if(naughty <- !all(good)) { #' bad values will be discarded nbad <- sum(!good) nna <- sum(is.na(x) | is.na(y)) ninf <- nbad - nna if(nna > 0) warning(paste(nna, "out of", n, ngettext(n, "point", "points"), "had NA or NaN coordinate values, and", ngettext(nna, "was", "were"), "discarded")) if(ninf > 0) warning(paste(ninf, "out of", n, ngettext(n, "point", "points"), "had infinite coordinate values, and", ngettext(ninf, "was", "were"), "discarded")) #' chuck out x <- x[good] y <- y[good] n <- sum(good) } names(x) <- NULL names(y) <- NULL # check (x,y) points lie inside window if(check && n > 0) { ok <- inside.owin(x, y, window) nout <- sum(!ok) if(nout > 0) { warning(paste(nout, ngettext(nout, "point was", "points were"), "rejected as lying outside the specified window"), call.=FALSE) rr <- ripras(x,y) bb <- boundingbox(x,y) bb <- boundingbox(rr, bb, window) rejectwindow <- if(!is.null(rr)) rebound.owin(rr, bb) else bb rejects <- ppp(x[!ok], y[!ok], window=rejectwindow, check=FALSE) # discard illegal points x <- x[ok] y <- y[ok] n <- length(x) } } else nout <- 0 # initialise ppp object pp <- list(window=window, n=n, x=x, y=y) # coerce marks to appropriate format if(missing(marks)) marks <- NULL if(is.hyperframe(marks)) stop("Hyperframes of marks are not implemented for ppp objects; use ppx") if(is.matrix(marks)) marks <- as.data.frame(marks) ## drop dimensions? if(drop && is.data.frame(marks)) { nc <- ncol(marks) if(nc == 0) marks <- NULL else if(nc == 1) marks <- marks[,,drop=TRUE] } # attach marks if(is.null(marks)) { # no marks pp$markformat <- "none" } else if(is.data.frame(marks)) { # data frame of marks pp$markformat <- "dataframe" if(naughty) { #' remove marks attached to discarded points with non-finite coordinates marks <- marks[good, ] } if(nout > 0) { #' sequester marks of points falling outside window marks(rejects) <- marks[!ok,] marks <- marks[ok, ] } if(nrow(marks) != n) stop("number of rows of marks != length of x and y") pp$marks <- marks } else { # should be a vector or factor # To recognise vector, strip attributes isspecial <- is.factor(marks) || inherits(marks, "POSIXt") || inherits(marks, "Date") if(!isspecial) attributes(marks) <- NULL if(!(is.vector(marks) || isspecial)) stop("Format of marks not understood") # OK, it's a vector or factor pp$markformat <- "vector" if(naughty) { #' remove marks attached to discarded points with non-finite coordinates marks <- marks[good] } if(nout > 0) { #' sequester marks of points falling outside window marks(rejects) <- marks[!ok] marks <- marks[ok] } if(length(marks) != n) stop("length of marks vector != length of x and y") names(marks) <- NULL pp$marks <- marks } class(pp) <- "ppp" if(checkdup && anyDuplicated(pp)) warning("data contain duplicated points", call.=FALSE) if(nout > 0) attr(pp, "rejects") <- rejects pp } # #-------------------------------------------------------------------------- # is.ppp <- function(x) { inherits(x, "ppp") } # #-------------------------------------------------------------------------- # as.ppp <- function(X, ..., fatal=TRUE) { UseMethod("as.ppp") } as.ppp.ppp <- function(X, ..., fatal=TRUE) { check <- resolve.defaults(list(...), list(check=FALSE))$check return(ppp(X$x, X$y, window=X$window, marks=X$marks, check=check)) } as.ppp.quad <- function(X, ..., fatal=TRUE) { return(union.quad(X)) } as.ppp.data.frame <- function(X, W = NULL, ..., fatal=TRUE) { X <- as.data.frame(X) #' swim against the tidyverse check <- resolve.defaults(list(...), list(check=TRUE))$check if(ncol(X) < 2) return(complaining("X must have at least two columns", fatal, value=NULL)) if(is.null(W)) return(complaining("x,y coords given but no window specified", fatal, value=NULL)) # columns 1 and 2 are assumed to be coordinates # marks from other columns marx <- if(ncol(X) > 2) X[, -(1:2)] else NULL if(is.function(W)) Z <- cobble.xy(X[,1], X[,2], W, fatal, marks=marx, check=check) else { win <- as.owin(W) Z <- ppp(X[,1], X[,2], window = win, marks=marx, check=check) } return(Z) } as.ppp.matrix <- function(X, W = NULL, ..., fatal=TRUE) { check <- resolve.defaults(list(...), list(check=TRUE))$check if(!verifyclass(X, "matrix", fatal=fatal) || !is.numeric(X)) return(complaining("X must be a numeric matrix", fatal, value=NULL)) if(ncol(X) < 2) return(complaining("X must have at least two columns", fatal, value=NULL)) if(is.null(W)) return(complaining("x,y coords given but no window specified", fatal, value=NULL)) if(is.function(W)) Z <- cobble.xy(X[,1], X[,2], W, fatal) else { win <- as.owin(W) Z <- ppp(X[,1], X[,2], window = win, check=check) } # add marks from other columns if(ncol(X) > 2) marks(Z) <- X[, -(1:2)] return(Z) } as.ppp.default <- function(X, W=NULL, ..., fatal=TRUE) { # tries to coerce data X to a point pattern # X may be: # 1. a structure with entries x, y, xl, xu, yl, yu # 2. a structure with entries x, y, area where # 'area' has entries xl, xu, yl, yu # 3. a structure with entries x, y # 4. a vector of length 2, interpreted as a single point. # The second argument W is coerced to an object of class 'owin' by the # function "as.owin" in window.S # If X also has an entry X$marks # then this will be interpreted as the marks vector for the pattern. # check <- resolve.defaults(list(...), list(check=TRUE))$check if(checkfields(X, c("x", "y", "xl", "xu", "yl", "yu"))) { xrange <- c(X$xl, X$xu) yrange <- c(X$yl, X$yu) if(is.null(X$marks)) Z <- ppp(X$x, X$y, xrange, yrange, check=check) else Z <- ppp(X$x, X$y, xrange, yrange, marks=X$marks, check=check) return(Z) } else if(checkfields(X, c("x", "y", "area")) && checkfields(X$area, c("xl", "xu", "yl", "yu"))) { win <- as.owin(X$area) if (is.null(X$marks)) Z <- ppp(X$x, X$y, window=win, check=check) else Z <- ppp(X$x, X$y, window=win, marks = X$marks, check=check) return(Z) } else if(checkfields(X, c("x", "y"))) { if(is.function(W)) return(cobble.xy(X$x, X$y, W, fatal)) if(is.null(W)) { if(fatal) stop("x,y coords given but no window specified") else return(NULL) } win <- as.owin(W) if(is.null(X$marks)) Z <- ppp(X$x, X$y, window=win, check=check) else Z <- ppp(X$x, X$y, window=win, marks=X$marks, check=check) return(Z) } else if(is.vector(X) && length(X) == 2) { win <- as.owin(W) Z <- ppp(X[1], X[2], window=win, check=check) return(Z) } else { if(fatal) stop("Can't interpret X as a point pattern") else return(NULL) } } cobble.xy <- function(x, y, f=ripras, fatal=TRUE, ...) { if(!is.function(f)) stop("f is not a function") w <- f(x,y) if(!is.owin(w)) { gripe <- "Supplied function f did not return an owin object" if(fatal) stop(gripe) else { warning(gripe) return(NULL) } } return(ppp(x, y, window=w, ...)) } # -------------------------------------------------------------- "[.ppp" <- function(x, i, j, drop=FALSE, ..., clip=FALSE) { verifyclass(x, "ppp") if(!missing(i)) { if(inherits(i, "owin")) { # i is a window window <- i if(clip) window <- intersect.owin(window, x$window) if(is.vanilla(unitname(window))) unitname(window) <- unitname(x) ok <- inside.owin(x$x, x$y, window) x <- ppp(x$x[ok], x$y[ok], window=window, #SIC marks=marksubset(x$marks, ok), check=FALSE) } else if(inherits(i, "im")) { # i is an image if(i$type != "logical") stop(paste("Subset operator X[i] undefined", "when i is a pixel image", "unless it has logical values"), call.=FALSE) # convert logical image to window e <- sys.frame(sys.nframe()) window <- solutionset(i, e) if(clip) window <- intersect.owin(window, x$window) ok <- inside.owin(x$x, x$y, window) x <- ppp(x$x[ok], x$y[ok], window=window, #SIC marks=marksubset(x$marks, ok), check=FALSE) } else { # assume i is a subset index nx <- x$n if(nx == 0) return(x) subset <- seq_len(nx)[i] if(anyNA(subset)) stop("Index out of bounds in [.ppp", call.=FALSE) x <- ppp(x$x[subset], x$y[subset], window=x$window, marks=marksubset(x$marks, subset), check=FALSE) } } if(!missing(j)) x <- x[j] # invokes code above if(drop) { #' drop unused factor levels mx <- x$marks switch(markformat(mx), none = { }, vector = { if(is.factor(mx)) marks(x) <- factor(mx) # this preserves order of levels }, dataframe = { #' must be an actual data frame, not a matrix if(is.data.frame(mx)) { ml <- as.list(mx) isfac <- sapply(ml, is.factor) if(any(isfac)) mx[, isfac] <- as.data.frame(lapply(ml[isfac], factor)) } }, hyperframe = { }) } return(x) } # ------------------------------------------------------------------ # # scanpp <- function(filename, window, header=TRUE, dir="", factor.marks = NULL, ...) { filename <- if(dir=="") filename else paste(dir, filename, sep=.Platform$file.sep) df <- read.table(filename, header=header, stringsAsFactors = is.null(factor.marks)) if(header) { # check whether there are columns named 'x' and 'y' colnames <- dimnames(df)[[2]] xycolumns <- match(c("x", "y"), colnames, 0) named <- all(xycolumns > 0) } else { named <- FALSE } if(named) { x <- df$x y <- df$y } else { # assume x, y given in columns 1, 2 respectively x <- df[,1] y <- df[,2] xycolumns <- c(1,2) } if(ncol(df) == 2) X <- ppp(x, y, window=window) else { # Catch old argument "multitype": dots <- list(...) multi <- charmatch(names(dots), "multitype") argindex <- which(!is.na(multi)) if(length(argindex)>0){ if(missing(factor.marks)){ factor.marks <- dots[[argindex]] ignored <- "" } else{ ignored <- paste(" and it is ignored since", sQuote("factor.marks"), "is also supplied") } warning("It appears you have called scanpp ", " with (something partially matching) ", " the deprecated argument ", paste0(sQuote("multitype"), ignored, "."), " Please change to the new syntax.") } marks <- df[ , -xycolumns, drop=FALSE] if(any(factor.marks)){ # Find indices to convert to factors (recycling to obtain correct length) factorid <- (1:ncol(marks))[factor.marks] # Convert relevant columns to factors marks[,factorid] <- lapply(marks[,factorid,drop=FALSE], factor) } X <- ppp(x, y, window=window, marks = marks) } X } #------------------------------------------------------------------- "markspace.integral" <- function(X) { verifyclass(X, "ppp") if(!is.marked(X, dfok=TRUE)) return(1) if(is.multitype(X)) return(length(levels(marks(X)))) else stop("Don't know how to compute total mass of mark space") } #------------------------------------------------------------------- print.ppp <- function(x, ...) { verifyclass(x, "ppp") ism <- is.marked(x, dfok=TRUE) nx <- x$n splat(if(ism) "Marked planar" else "Planar", "point pattern:", nx, ngettext(nx, "point", "points")) if(ism) { mks <- marks(x, dfok=TRUE) if(is.data.frame(mks)) { ## data frame of marks exhibitStringList("Mark variables:", names(mks)) } else { ## vector of marks if(is.factor(mks)) { exhibitStringList("Multitype, with levels =", levels(mks)) } else { ## Numeric, or could be dates if(inherits(mks, "Date")) { splat("marks are dates, of class", sQuote("Date")) } else if(inherits(mks, "POSIXt")) { splat("marks are dates, of class", sQuote("POSIXt")) } else { splat(paste0("marks are", if(is.numeric(mks)) " numeric," else NULL), "of storage type ", sQuote(typeof(mks))) } } } } print(x$window) terselevel <- spatstat.options('terse') if(waxlyrical('errors', terselevel) && !is.null(rejects <- attr(x, "rejects"))) { nrejects <- rejects$n splat("***", nrejects, ngettext(nrejects, "illegal point", "illegal points"), "stored in", paste0("attr(,", dQuote("rejects"), ")"), "***") } if(waxlyrical('extras', terselevel) && !is.null(info <- attr(x, "info")) && inherits(info, "rmhInfoList")) splat("Pattern was generated by", if(is.poisson(info$model)) "Poisson" else "Metropolis-Hastings", "simulation.") return(invisible(NULL)) } summary.ppp <- function(object, ..., checkdup=TRUE) { verifyclass(object, "ppp") result <- list() result$is.marked <- is.marked(object, dfok=TRUE) result$n <- object$n result$window <- summary(object$window) result$intensity <- result$n/result$window$area if(checkdup) { result$nduplicated <- sum(duplicated(object)) result$rounding <- rounding(object) } if(result$is.marked) { mks <- marks(object, dfok=TRUE) if(result$multiple.marks <- is.data.frame(mks)) { result$marknames <- names(mks) result$is.numeric <- FALSE result$marktype <- "dataframe" result$is.multitype <- FALSE } else { result$is.numeric <- is.numeric(mks) result$marknames <- "marks" result$marktype <- typeof(mks) result$is.multitype <- is.multitype(object) } if(result$is.multitype) { tm <- as.vector(table(mks)) tfp <- data.frame(frequency=tm, proportion=tm/sum(tm), intensity=tm/result$window$area, row.names=levels(mks)) result$marks <- tfp } else result$marks <- summary(mks) } class(result) <- "summary.ppp" if(!is.null(rejects <- attr(object, "rejects"))) result$rejects <- rejects$n if(!is.null(info <- attr(object, "info")) && inherits(info, "rmhInfoList")) result$rmhinfo <- info return(result) } print.summary.ppp <- function(x, ..., dp=getOption("digits")) { verifyclass(x, "summary.ppp") terselevel <- spatstat.options("terse") splat(if(x$is.marked) "Marked planar" else "Planar", "point pattern: ", x$n, "points") oneline <- resolve.defaults(list(...), list(oneline=FALSE))$oneline if(oneline) return(invisible(NULL)) unitinfo <- summary(x$window$units) splat("Average intensity", signif(x$intensity,dp), "points per square", unitinfo$singular, unitinfo$explain) ndup <- x$nduplicated if(waxlyrical('extras', terselevel) && !is.null(ndup) && (ndup > 0)) { parbreak(terselevel) splat("*Pattern contains duplicated points*") } rndg <- x$rounding if(waxlyrical('gory', terselevel) && !is.null(rndg)) { cat("\n") if(rndg >= 1) { cat("Coordinates are", "given to", rndg, "decimal", ngettext(rndg, "place", "places"), fill=TRUE) if(rndg <= 3) { cat("i.e. rounded to", "the nearest", "multiple of", 10^(-rndg), unitinfo$plural, unitinfo$explain, fill=TRUE) } } else if(rndg == 0) { cat("Coordinates are", "integers", fill=TRUE) cat("i.e. rounded to", "the nearest", unitinfo$singular, unitinfo$explain, fill=TRUE) } else { cat("Coordinates are", "multiples of", 10^(-rndg), unitinfo$plural, unitinfo$explain, fill=TRUE) } parbreak(terselevel) } if(x$is.marked) { if(x$multiple.marks) { splat("Mark variables:", commasep(x$marknames, ", ")) cat("Summary:\n") print(x$marks) } else if(x$is.multitype) { cat("Multitype:\n") print(signif(x$marks,dp)) } else { splat("marks are ", if(x$is.numeric) "numeric, ", "of type ", sQuote(x$marktype), sep="") cat("Summary:\n") print(x$marks) } parbreak(terselevel) } if(waxlyrical('extras', terselevel)) print(x$window) if(waxlyrical('errors', terselevel) && !is.null(nrejects <- x$rejects)) { parbreak(terselevel) splat("***", nrejects, ngettext(nrejects, "illegal point", "illegal points"), "stored in", paste("attr(,", dQuote("rejects"), ")", sep=""), "***") } if(waxlyrical('gory', terselevel) && !is.null(info <- x$rmhinfo)) { cat("\nPattern was generated by", "Metropolis-Hastings algorithm rmh", fill=TRUE) print(info) } return(invisible(x)) } # --------------------------------------------------------------- identify.ppp <- function(x, ...) { verifyclass(x, "ppp") id <- identify(x$x, x$y, ...) if(!is.marked(x)) return(id) marks <- as.data.frame(x)[id, -(1:2)] out <- cbind(data.frame(id=id), marks) row.names(out) <- NULL return(out) } rebound <- function(x, rect) { UseMethod("rebound") } rebound.ppp <- function(x, rect) { verifyclass(x, "ppp") x$window <- rebound.owin(x$window, rect) return(x) } as.data.frame.ppp <- function(x, row.names=NULL, ...) { df <- data.frame(x=x$x, y=x$y, row.names=row.names) marx <- marks(x, dfok=TRUE) if(is.null(marx)) return(df) if(is.data.frame(marx)) df <- cbind(df, marx) else df <- data.frame(df, marks=marx) return(df) } is.empty.ppp <- function(x) { return(x$n == 0) } npoints <- function(x) { UseMethod("npoints") } nobjects <- function(x) { UseMethod("nobjects") } nobjects.ppp <- npoints.ppp <- function(x) { x$n } domain.ppp <- Window.ppp <- function(X, ...) { as.owin(X) } "Window<-.ppp" <- function(X, ..., value) { verifyclass(value, "owin") return(X[value]) } "Frame<-.ppp" <- function(X, value) { Frame(Window(X)) <- value return(X) } spatstat/R/envelope.R0000644000176200001440000024231113575046500014266 0ustar liggesusers# # envelope.R # # computes simulation envelopes # # $Revision: 2.107 $ $Date: 2019/12/14 01:58:20 $ # envelope <- function(Y, fun, ...) { UseMethod("envelope") } # ................................................................. # A 'simulation recipe' contains the following variables # # type = Type of simulation # "csr": uniform Poisson process # "rmh": simulated realisation of fitted Gibbs or Poisson model # "kppm": simulated realisation of fitted cluster model # "expr": result of evaluating a user-supplied expression # "list": user-supplied list of point patterns # # expr = expression that is repeatedly evaluated to generate simulations # # envir = environment in which to evaluate the expression `expr' # # 'csr' = TRUE iff the model is (known to be) uniform Poisson # # pois = TRUE if model is known to be Poisson # # constraints = additional information about simulation (e.g. 'with fixed n') # # ................................................................... simulrecipe <- function(type, expr, envir, csr, pois=csr, constraints="") { if(csr && !pois) warning("Internal error: csr=TRUE but pois=FALSE") out <- list(type=type, expr=expr, envir=envir, csr=csr, pois=pois, constraints=constraints) class(out) <- "simulrecipe" out } envelope.ppp <- function(Y, fun=Kest, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- Kest envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) ismarked <- is.marked(Y) ismulti <- is.multitype(Y) fix.marks <- fix.marks && ismarked if(!is.null(simulate)) { # ................................................... # Simulations are determined by 'simulate' argument if(fix.n || fix.marks) warning("fix.n and fix.marks were ignored, because 'simulate' was given") # Processing is deferred to envelopeEngine simrecipe <- simulate # Data pattern is argument Y X <- Y } else if(!fix.n && !fix.marks) { # ................................................... # Realisations of complete spatial randomness # will be generated by rpoispp # Data pattern X is argument Y # Data pattern determines intensity of Poisson process X <- Y sY <- summary(Y, checkdup=FALSE) Yintens <- sY$intensity nY <- npoints(Y) Ywin <- Y$window Ymarx <- marks(Y) # expression that will be evaluated simexpr <- if(is.null(Ymarx)) { # unmarked point pattern expression(rpoispp(Yintens, win=Ywin)) } else if(is.null(dim(Ymarx))) { # single column of marks expression({ A <- rpoispp(Yintens, win=Ywin); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j] }) } else { # multiple columns of marks expression({ A <- rpoispp(Yintens, win=Ywin); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j, , drop=FALSE] }) } dont.complain.about(Yintens, Ywin) # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE, pois = TRUE) } else if(fix.marks) { # ................................................... # Data pattern is argument Y X <- Y # Realisations of binomial process # with fixed number of points and fixed marks # will be generated by runifpoint nY <- npoints(Y) Ywin <- as.owin(Y) Ymarx <- marks(Y) # expression that will be evaluated simexpr <- expression(runifpoint(nY, Ywin) %mark% Ymarx) # suppress warnings from code checkers dont.complain.about(nY, Ywin, Ymarx) # simulation constraints (explanatory string) constraints <- if(ismulti) "with fixed number of points of each type" else "with fixed number of points and fixed marks" # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE, pois = TRUE, constraints = constraints) } else { # ................................................... # Data pattern is argument Y X <- Y # Realisations of binomial process # will be generated by runifpoint nY <- npoints(Y) Ywin <- as.owin(Y) Ymarx <- marks(Y) # expression that will be evaluated simexpr <- if(is.null(Ymarx)) { ## unmarked expression(runifpoint(nY, Ywin)) } else if(is.null(dim(Ymarx))) { ## single column of marks expression({ A <- runifpoint(nY, Ywin); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j] }) } else { ## multiple columns of marks expression({ A <- runifpoint(nY, Ywin); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j, ,drop=FALSE] }) } dont.complain.about(nY, Ywin) # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE, pois = TRUE, constraints = "with fixed number of points") } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=clipdata, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, rejectNA=rejectNA, silent=silent, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } envelope.ppm <- function(Y, fun=Kest, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, clipdata=TRUE, start=NULL, control=update(default.rmhcontrol(Y), nrep=nrep), nrep=1e5, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- Kest envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) # Extract data pattern X from fitted model Y X <- data.ppm(Y) if(is.null(simulate)) { # ................................................... # Simulated realisations of the fitted model Y # will be generated pois <- is.poisson(Y) csr <- is.stationary(Y) && pois type <- if(csr) "csr" else "rmh" # Set up parameters for rmh rmodel <- rmhmodel(Y, verbose=FALSE) if(is.null(start)) start <- list(n.start=npoints(X)) rstart <- rmhstart(start) rcontr <- rmhcontrol(control) if(fix.marks) { rcontr <- update(rcontr, fixall=TRUE, p=1, expand=1) nst <- if(is.multitype(X)) table(marks(X)) else npoints(X) rstart <- update(rstart, n.start=nst) constraints <- "with fixed number of points of each type" } else if(fix.n) { rcontr <- update(rcontr, p=1, expand=1) rstart <- update(rstart, n.start=X$n) constraints <- "with fixed number of points" } else constraints <- "" # pre-digest arguments rmhinfolist <- rmh(rmodel, rstart, rcontr, preponly=TRUE, verbose=FALSE) # expression that will be evaluated simexpr <- expression(rmhEngine(rmhinfolist, verbose=FALSE)) dont.complain.about(rmhinfolist) # evaluate in THIS environment simrecipe <- simulrecipe(type = type, expr = simexpr, envir = envir.here, csr = csr, pois = pois, constraints = constraints) } else { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=clipdata, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, rejectNA=rejectNA, silent=silent, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } envelope.kppm <- function(Y, fun=Kest, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- Kest envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) # Extract data pattern X from fitted model Y X <- Y$X if(is.null(simulate)) { # Simulated realisations of the fitted model Y # will be generated using simulate.kppm kmodel <- Y # expression that will be evaluated simexpr <- expression(simulate(kmodel)[[1L]]) dont.complain.about(kmodel) # evaluate in THIS environment simrecipe <- simulrecipe(type = "kppm", expr = simexpr, envir = envir.here, csr = FALSE, pois = FALSE) } else { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=clipdata, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, rejectNA=rejectNA, silent=silent, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } ## ................................................................. ## Engine for simulating and computing envelopes ## ................................................................. # # X is the data point pattern, which could be ppp, pp3, ppx etc # X determines the class of pattern expected from the simulations # envelopeEngine <- function(X, fun, simul, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, saveresultof=NULL, weights=NULL, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, maxerr.action=c("fatal", "warn", "null"), internal=NULL, cl=NULL, envir.user=envir.user, expected.arg="r", do.pwrong=FALSE, foreignclass=NULL, collectrubbish=FALSE) { # envir.here <- sys.frame(sys.nframe()) alternative <- match.arg(alternative) maxerr.action <- match.arg(maxerr.action) foreignclass <- as.character(foreignclass) if(length(foreignclass) != 0 && clipdata) { warning(paste("Ignoring clipdata=TRUE:", "I don't know how to clip objects of class", sQuote(paste(foreignclass, collapse=",")))) clipdata <- FALSE } # ---------------------------------------------------------- # Determine Simulation # ---------------------------------------------------------- # Identify class of patterns to be simulated, from data pattern X Xclass <- if(is.ppp(X)) "ppp" else if(is.pp3(X)) "pp3" else if(is.ppx(X)) "ppx" else if(inherits(X, foreignclass)) foreignclass else stop("Unrecognised class of point pattern") Xobjectname <- paste("point pattern of class", sQuote(Xclass)) # Option to use weighted average if(use.weights <- !is.null(weights)) { # weight can be either a numeric vector or a function if(is.numeric(weights)) { compute.weights <- FALSE weightfun <- NULL } else if(is.function(weights)) { compute.weights <- TRUE weightfun <- weights weights <- NULL } else stop("weights should be either a function or a numeric vector") } else compute.weights <- FALSE # Undocumented option to generate patterns only. patterns.only <- identical(internal$eject, "patterns") # Undocumented option to evaluate 'something' for each pattern if(savevalues <- !is.null(saveresultof)) { stopifnot(is.function(saveresultof)) SavedValues <- list() } # Identify type of simulation from argument 'simul' if(inherits(simul, "simulrecipe")) { # .................................................. # simulation recipe is given simtype <- simul$type simexpr <- simul$expr envir <- simul$envir csr <- simul$csr pois <- simul$pois constraints <- simul$constraints } else { # ................................................... # simulation is specified by argument `simulate' to envelope() simulate <- simul # which should be an expression, or a list of point patterns, # or an envelope object, or a function to be applied to the data csr <- FALSE # override if(!is.null(icsr <- internal$csr)) csr <- icsr pois <- csr constraints <- "" # model <- NULL if(inherits(simulate, "envelope")) { # envelope object: see if it contains stored point patterns simpat <- attr(simulate, "simpatterns") if(!is.null(simpat)) simulate <- simpat else stop(paste("The argument", sQuote("simulate"), "is an envelope object but does not contain", "any saved point patterns.")) } if(is.expression(simulate)) { ## The user-supplied expression 'simulate' will be evaluated repeatedly simtype <- "expr" simexpr <- simulate envir <- envir.user } else if(is.function(simulate)) { ## User-supplied function 'simulate' will be repeatedly evaluated on X simtype <- "func" simexpr <- expression(simulate(X)) envir <- envir.here } else if(is.list(simulate) && all(sapply(simulate, inherits, what=Xclass))) { # The user-supplied list of point patterns will be used simtype <- "list" SimDataList <- simulate # expression that will be evaluated simexpr <- expression(SimDataList[[i+nerr]]) dont.complain.about(SimDataList) envir <- envir.here # ensure that `i' is defined i <- 1L nerr <- 0L maxnerr <- min(length(SimDataList)-nsim, maxnerr) # any messages? if(!is.null(mess <- attr(simulate, "internal"))) { # determine whether these point patterns are realisations of CSR csr <- !is.null(mc <- mess$csr) && mc } } else stop(paste(sQuote("simulate"), "should be an expression,", "or a list of point patterns of the same kind as X")) } # ------------------------------------------------------------------- # Determine clipping window # ------------------------------------------------------------------ if(clipdata) { # Generate one realisation Xsim <- eval(simexpr, envir=envir) if(!inherits(Xsim, Xclass)) switch(simtype, csr=stop(paste("Internal error:", Xobjectname, "not generated")), rmh=stop(paste("Internal error: rmh did not return an", Xobjectname)), kppm=stop(paste("Internal error: simulate.kppm did not return an", Xobjectname)), expr=stop(paste("Evaluating the expression", sQuote("simulate"), "did not yield an", Xobjectname)), func=stop(paste("Evaluating the function", sQuote("simulate"), "did not yield an", Xobjectname)), list=stop(paste("Internal error: list entry was not an", Xobjectname)), stop(paste("Internal error:", Xobjectname, "not generated")) ) # Extract window clipwin <- Xsim$window if(!is.subset.owin(clipwin, X$window)) warning("Window containing simulated patterns is not a subset of data window") } # ------------------------------------------------------------------ # Summary function to be applied # ------------------------------------------------------------------ if(is.null(fun)) stop("Internal error: fun is NULL") # Name of function, for error messages fname <- if(is.name(substitute(fun))) short.deparse(substitute(fun)) else if(is.character(fun)) fun else "fun" fname <- sQuote(fname) # R function to apply if(is.character(fun)) { gotfun <- try(get(fun, mode="function")) if(inherits(gotfun, "try-error")) stop(paste("Could not find a function named", sQuote(fun))) fun <- gotfun } else if(!is.function(fun)) stop(paste("unrecognised format for function", fname)) fargs <- names(formals(fun)) if(!any(c(expected.arg, "...") %in% fargs)) stop(paste(fname, "should have", ngettext(length(expected.arg), "an argument", "arguments"), "named", commasep(sQuote(expected.arg)), "or a", sQuote("..."), "argument")) usecorrection <- any(c("correction", "...") %in% fargs) # --------------------------------------------------------------------- # validate other arguments if((nrank %% 1) != 0) stop("nrank must be an integer") if((nsim %% 1) != 0) stop("nsim must be an integer") stopifnot(nrank > 0 && nrank < nsim/2) rgiven <- any(expected.arg %in% names(list(...))) if(tran <- !is.null(transform)) { stopifnot(is.expression(transform)) # prepare expressions to be evaluated each time transform.funX <- inject.expr("with(funX,.)", transform) transform.funXsim <- inject.expr("with(funXsim,.)", transform) # .... old code using 'eval.fv' ...... # transform.funX <- dotexpr.to.call(transform, "funX", "eval.fv") # transform.funXsim <- dotexpr.to.call(transform, "funXsim", "eval.fv") # 'transform.funX' and 'transform.funXsim' are unevaluated calls to eval.fv } if(!is.null(ginterval)) stopifnot(is.numeric(ginterval) && length(ginterval) == 2) # --------------------------------------------------------------------- # Evaluate function for data pattern X # ------------------------------------------------------------------ Xarg <- if(!clipdata) X else X[clipwin] corrx <- if(usecorrection) list(correction="best") else NULL funX <- do.call(fun, resolve.defaults(list(Xarg), list(...), funYargs, corrx)) if(!inherits(funX, "fv")) stop(paste("The function", fname, "must return an object of class", sQuote("fv"))) ## catch 'conservation' parameters conserveargs <- attr(funX, "conserve") if(!is.null(conserveargs) && !any(c("conserve", "...") %in% fargs)) stop(paste("In this usage, the function", fname, "should have an argument named 'conserve' or '...'")) ## warn about 'dangerous' arguments if(!is.null(dang <- attr(funX, "dangerous")) && any(uhoh <- dang %in% names(list(...)))) { nuh <- sum(uhoh) warning(paste("Envelope may be invalid;", ngettext(nuh, "argument", "arguments"), commasep(sQuote(dang[uhoh])), ngettext(nuh, "appears", "appear"), "to have been fixed."), call.=FALSE) } argname <- fvnames(funX, ".x") valname <- fvnames(funX, ".y") has.theo <- "theo" %in% fvnames(funX, "*") csr.theo <- csr && has.theo use.theory <- if(is.null(use.theory)) csr.theo else (use.theory && has.theo) if(tran) { # extract only the recommended value if(use.theory) funX <- funX[, c(argname, valname, "theo")] else funX <- funX[, c(argname, valname)] # apply the transformation to it funX <- eval(transform.funX) } rvals <- funX[[argname]] # fX <- funX[[valname]] # default domain over which to maximise alim <- attr(funX, "alim") if(global && is.null(ginterval)) ginterval <- if(rgiven || is.null(alim)) range(rvals) else alim #-------------------------------------------------------------------- # Determine number of simulations # ------------------------------------------------------------------ # ## determine whether dual simulations are required ## (one set of simulations to calculate the theoretical mean, ## another independent set of simulations to obtain the critical point.) dual <- (global && !use.theory && !VARIANCE) Nsim <- if(!dual) nsim else (nsim + nsim2) # if taking data from a list of point patterns, # check there are enough of them if(simtype == "list" && Nsim > length(SimDataList)) stop(paste("Number of simulations", paren(if(!dual) paste(nsim) else paste(nsim, "+", nsim2, "=", Nsim) ), "exceeds number of point pattern datasets supplied")) # Undocumented secret exit # ------------------------------------------------------------------ if(patterns.only) { # generate simulated realisations and return only these patterns if(verbose) { action <- if(simtype == "list") "Extracting" else "Generating" descrip <- switch(simtype, csr = "simulations of CSR", rmh = paste("simulated realisations of fitted", if(pois) "Poisson" else "Gibbs", "model"), kppm = "simulated realisations of fitted cluster model", expr = "simulations by evaluating expression", func = "simulations by evaluating function", list = "point patterns from list", "simulated realisations") if(!is.null(constraints) && nzchar(constraints)) descrip <- paste(descrip, constraints) explan <- if(dual) paren(paste(nsim2, "to estimate the mean and", nsim, "to calculate envelopes")) else "" splat(action, Nsim, descrip, explan, "...") } XsimList <- list() # start simulation loop sstate <- list() for(i in 1:Nsim) { if(verbose) sstate <- progressreport(i, Nsim, state=sstate) Xsim <- eval(simexpr, envir=envir) if(!inherits(Xsim, Xclass)) switch(simtype, csr={ stop(paste("Internal error:", Xobjectname, "not generated")) }, rmh={ stop(paste("Internal error: rmh did not return an", Xobjectname)) }, kppm={ stop(paste("Internal error: simulate.kppm did not return an", Xobjectname)) }, expr={ stop(paste("Evaluating the expression", sQuote("simulate"), "did not yield an", Xobjectname)) }, func={ stop(paste("Evaluating the function", sQuote("simulate"), "did not yield an", Xobjectname)) }, list={ stop(paste("Internal error: list entry was not an", Xobjectname)) }, stop(paste("Internal error:", Xobjectname, "not generated")) ) XsimList[[i]] <- Xsim } if(verbose) { cat(paste("Done.\n")) flush.console() } attr(XsimList, "internal") <- list(csr=csr) return(XsimList) } # capture main decision parameters envelopeInfo <- list(call=cl, Yname=Yname, valname=valname, csr=csr, csr.theo=csr.theo, use.theory=use.theory, pois=pois, simtype=simtype, constraints=constraints, nrank=nrank, nsim=nsim, Nsim=Nsim, global=global, ginterval=ginterval, dual=dual, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, alternative=alternative, scale=scale, clamp=clamp, use.weights=use.weights, do.pwrong=do.pwrong) # ---------------------------------------- ######### SIMULATE ####################### # ---------------------------------------- if(verbose) { action <- if(simtype == "list") "Extracting" else "Generating" descrip <- switch(simtype, csr = "simulations of CSR", rmh = paste("simulated realisations of fitted", if(pois) "Poisson" else "Gibbs", "model"), kppm = "simulated realisations of fitted cluster model", expr = "simulations by evaluating expression", func = "simulations by evaluating function", list = "point patterns from list", "simulated patterns") if(!is.null(constraints) && nzchar(constraints)) descrip <- paste(descrip, constraints) explan <- if(dual) paren(paste(nsim2, "to estimate the mean and", nsim, "to calculate envelopes")) else "" splat(action, Nsim, descrip, explan, "...") } # determine whether simulated point patterns should be saved catchpatterns <- savepatterns && simtype != "list" Caughtpatterns <- list() # allocate space for computed function values nrvals <- length(rvals) simvals <- matrix(, nrow=nrvals, ncol=Nsim) # allocate space for weights to be computed if(compute.weights) weights <- numeric(Nsim) # inferred values of function argument 'r' or equivalent parameters if(identical(expected.arg, "r")) { # Kest, etc inferred.r.args <- list(r=rvals) } else if(identical(expected.arg, c("rmax", "nrval"))) { # K3est, etc inferred.r.args <- list(rmax=max(rvals), nrval=length(rvals)) } else stop(paste("Don't know how to infer values of", commasep(expected.arg))) # arguments for function when applied to simulated patterns funargs <- resolve.defaults(funargs, inferred.r.args, list(...), conserveargs, if(usecorrection) list(correction="best") else NULL) # reject simulated pattern if function values are all NA (etc) rejectNA <- isTRUE(rejectNA) # start simulation loop nerr <- 0 gaveup <- FALSE if(verbose) pstate <- list() for(i in 1:Nsim) { ## safely generate a random pattern and apply function success <- FALSE while(!success && !gaveup) { Xsim <- eval(simexpr, envir=envir) ## check valid point pattern if(!inherits(Xsim, Xclass)) switch(simtype, csr=stop(paste("Internal error:", Xobjectname, "not generated")), rmh=stop(paste("Internal error: rmh did not return an", Xobjectname)), kppm=stop(paste("Internal error:", "simulate.kppm did not return an", Xobjectname)), expr=stop(paste("Evaluating the expression", sQuote("simulate"), "did not yield an", Xobjectname)), func=stop(paste("Evaluating the function", sQuote("simulate"), "did not yield an", Xobjectname)), list=stop(paste("Internal error: list entry was not an", Xobjectname)), stop(paste("Internal error:", Xobjectname, "not generated")) ) if(catchpatterns) Caughtpatterns[[i]] <- Xsim if(savevalues) SavedValues[[i]] <- saveresultof(Xsim) if(compute.weights) { wti <- weightfun(Xsim) if(!is.numeric(wti)) stop("weightfun did not return a numeric value") if(length(wti) != 1L) stop("weightfun should return a single numeric value") weights[i] <- wti } ## apply function safely funXsim <- try(do.call(fun, c(list(Xsim), funargs)), silent=silent) success <- !inherits(funXsim, "try-error") && inherits(funXsim, "fv") && (!rejectNA || any(is.finite(funXsim[[valname]]))) if(!success) { #' error in computing summary function nerr <- nerr + 1L if(nerr > maxnerr) { gaveup <- TRUE errtype <- if(rejectNA) "fatal errors or NA function values" if(simtype == "list") { whinge <- paste("Exceeded maximum possible number of errors", "when evaluating summary function:", length(SimDataList), "patterns provided,", nsim, "patterns required,", nerr, ngettext(nerr, "pattern", "pattern"), "rejected due to", errtype) } else { whinge <- paste("Exceeded maximum permissible number of", errtype, paren(paste("maxnerr =", maxnerr)), "when evaluating summary function", "for simulated point patterns") } switch(maxerr.action, fatal = stop(whinge, call.=FALSE), warn = warning(whinge, call.=FALSE), null = {}) } else if(!silent) cat("[retrying]\n") } #' ..... end of while(!success) ................ } if(gaveup) break; # exit loop now ## sanity checks if(i == 1L) { if(!inherits(funXsim, "fv")) stop(paste("When applied to a simulated pattern, the function", fname, "did not return an object of class", sQuote("fv"))) argname.sim <- fvnames(funXsim, ".x") valname.sim <- fvnames(funXsim, ".y") if(argname.sim != argname) stop(paste("The objects returned by", fname, "when applied to a simulated pattern", "and to the data pattern", "are incompatible. They have different argument names", sQuote(argname.sim), "and", sQuote(argname), "respectively")) if(valname.sim != valname) stop(paste("When", fname, "is applied to a simulated pattern", "it provides an estimate named", sQuote(valname.sim), "whereas the estimate for the data pattern is named", sQuote(valname), ". Try using the argument", sQuote("correction"), "to make them compatible")) rfunX <- with(funX, ".x") rfunXsim <- with(funXsim, ".x") if(!identical(rfunX, rfunXsim)) stop(paste("When", fname, "is applied to a simulated pattern,", "the values of the argument", sQuote(argname.sim), "are different from those used for the data.")) } if(tran) { # extract only the recommended value if(use.theory) funXsim <- funXsim[, c(argname, valname, "theo")] else funXsim <- funXsim[, c(argname, valname)] # apply the transformation to it funXsim <- eval(transform.funXsim) } # extract the values for simulation i simvals.i <- funXsim[[valname]] if(length(simvals.i) != nrvals) stop("Vectors of function values have incompatible lengths") simvals[ , i] <- funXsim[[valname]] if(verbose) pstate <- progressreport(i, Nsim, state=pstate) if(collectrubbish) { rm(Xsim) rm(funXsim) gc() } } ## end simulation loop if(verbose) { cat("\nDone.\n") flush.console() } # ........................................................... # save functions and/or patterns if so commanded if(!gaveup) { if(savefuns) { alldata <- cbind(rvals, simvals) simnames <- paste("sim", 1:Nsim, sep="") colnames(alldata) <- c("r", simnames) alldata <- as.data.frame(alldata) SimFuns <- fv(alldata, argu="r", ylab=attr(funX, "ylab"), valu="sim1", fmla= deparse(. ~ r), alim=attr(funX, "alim"), labl=names(alldata), desc=c("distance argument r", paste("Simulation ", 1:Nsim, sep="")), fname=attr(funX, "fname"), yexp=attr(funX, "yexp"), unitname=unitname(funX)) fvnames(SimFuns, ".") <- simnames } if(savepatterns) SimPats <- if(simtype == "list") SimDataList else Caughtpatterns } ######### COMPUTE ENVELOPES ####################### etype <- if(global) "global" else if(VARIANCE) "variance" else "pointwise" if(dual) { jsim <- 1:nsim jsim.mean <- nsim + 1:nsim2 } else { jsim <- jsim.mean <- NULL } result <- envelope.matrix(simvals, funX=funX, jsim=jsim, jsim.mean=jsim.mean, type=etype, alternative=alternative, scale=scale, clamp=clamp, csr=csr, use.theory=use.theory, nrank=nrank, ginterval=ginterval, nSD=nSD, Yname=Yname, do.pwrong=do.pwrong, weights=weights, gaveup=gaveup) ## tack on envelope information attr(result, "einfo") <- resolve.defaults(envelopeInfo, attr(result, "einfo")) if(!gaveup) { ## tack on functions and/or patterns if so commanded if(savefuns) attr(result, "simfuns") <- SimFuns if(savepatterns) { attr(result, "simpatterns") <- SimPats attr(result, "datapattern") <- X } ## undocumented - tack on values of some other quantity if(savevalues) { attr(result, "simvalues") <- SavedValues attr(result, "datavalue") <- saveresultof(X) } } ## save function weights if(use.weights) attr(result, "weights") <- weights return(result) } plot.envelope <- function(x, ..., main) { if(missing(main)) main <- short.deparse(substitute(x)) shade.given <- ("shade" %in% names(list(...))) shade.implied <- !is.null(fvnames(x, ".s")) if(!(shade.given || shade.implied)) { # ensure x has default 'shade' attribute # (in case x was produced by an older version of spatstat) if(all(c("lo", "hi") %in% colnames(x))) fvnames(x, ".s") <- c("lo", "hi") else warning("Unable to determine shading for envelope") } NextMethod("plot", main=main) } print.envelope <- function(x, ...) { e <- attr(x, "einfo") g <- e$global csr <- e$csr pois <- e$pois if(is.null(pois)) pois <- csr simtype <- e$simtype constraints <- e$constraints nr <- e$nrank nsim <- e$nsim V <- e$VARIANCE fname <- flat.deparse(attr(x, "ylab")) type <- if(V) paste("Pointwise", e$nSD, "sigma") else if(g) "Simultaneous" else "Pointwise" splat(type, "critical envelopes for", fname, "\nand observed value for", sQuote(e$Yname)) if(!is.null(valname <- e$valname) && waxlyrical('extras')) splat("Edge correction:", dQuote(valname)) ## determine *actual* type of simulation descrip <- if(csr) "simulations of CSR" else if(!is.null(simtype)) { switch(simtype, csr="simulations of CSR", rmh=paste("simulations of fitted", if(pois) "Poisson" else "Gibbs", "model"), kppm="simulations of fitted cluster model", expr="evaluations of user-supplied expression", func="evaluations of user-supplied function", list="point pattern datasets in user-supplied list", funs="columns of user-supplied data") } else "simulations of fitted model" if(!is.null(constraints) && nzchar(constraints)) descrip <- paste(descrip, constraints) # splat("Obtained from", nsim, descrip) # if(waxlyrical('extras')) { dual <- isTRUE(e$dual) usetheory <- isTRUE(e$use.theory) hownull <- if(usetheory) { "(known exactly)" } else if(dual) { paste("(estimated from a separate set of", e$nsim2, "simulations)") } else NULL formodel <- if(csr) "for CSR" else NULL if(g) { splat("Envelope based on maximum deviation of", fname, "from null value", formodel, hownull) } else if(dual) { splat("Null value of", fname, formodel, hownull) } if(!is.null(attr(x, "simfuns"))) splat("(All simulated function values are stored)") if(!is.null(attr(x, "simpatterns"))) splat("(All simulated point patterns are stored)") } splat("Alternative:", e$alternative) if(!V && waxlyrical('extras')) { ## significance interpretation! alpha <- if(g) { nr/(nsim+1) } else { 2 * nr/(nsim+1) } splat("Significance level of", if(g) "simultaneous" else "pointwise", "Monte Carlo test:", paste0(if(g) nr else 2 * nr, "/", nsim+1), "=", signif(alpha, 3)) } if(waxlyrical('gory') && !is.null(pwrong <- attr(x, "pwrong"))) { splat("\t[Estimated significance level of pointwise excursions:", paste0("pwrong=", signif(pwrong, 3), "]")) } NextMethod("print") } summary.envelope <- function(object, ...) { e <- attr(object, "einfo") g <- e$global V <- e$VARIANCE nr <- e$nrank nsim <- e$nsim csr <- e$csr pois <- e$pois if(is.null(pois)) pois <- csr simtype <- e$simtype constraints <- e$constraints alternative <- e$alternative use.theory <- e$use.theory has.theo <- "theo" %in% fvnames(object, "*") csr.theo <- csr && has.theo use.theory <- if(is.null(use.theory)) csr.theo else (use.theory && has.theo) fname <- deparse(attr(object, "ylab")) type <- if(V) paste("Pointwise", e$nSD, "sigma") else if(g) "Simultaneous" else "Pointwise" splat(type, "critical envelopes for", fname, "\nand observed value for", sQuote(e$Yname)) # determine *actual* type of simulation descrip <- if(csr) "simulations of CSR" else if(!is.null(simtype)) { switch(simtype, csr="simulations of CSR", rmh=paste("simulations of fitted", if(pois) "Poisson" else "Gibbs", "model"), kppm="simulations of fitted cluster model", expr="evaluations of user-supplied expression", func="evaluations of user-supplied function", list="point pattern datasets in user-supplied list", funs="columns of user-supplied data", "simulated point patterns") } else "simulations of fitted model" if(!is.null(constraints) && nzchar(constraints)) descrip <- paste(descrip, constraints) # splat("Obtained from", nsim, descrip) # if(waxlyrical('extras')) { if(!is.null(e$dual) && e$dual) splat("Theoretical (i.e. null) mean value of", fname, "estimated from a separate set of", e$nsim2, "simulations") if(!is.null(attr(object, "simfuns"))) splat("(All", nsim, "simulated function values", "are stored in attr(,", dQuote("simfuns"), ") )") if(!is.null(attr(object, "simpatterns"))) splat("(All", nsim, "simulated point patterns", "are stored in attr(,", dQuote("simpatterns"), ") )") } # splat("Alternative:", alternative) if(V) { # nSD envelopes splat(switch(alternative, two.sided = "Envelopes", "Critical boundary"), "computed as sample mean", switch(alternative, two.sided="plus/minus", less="minus", greater="plus"), e$nSD, "sample standard deviations") } else { # critical envelopes lo.ord <- if(nr == 1L) "minimum" else paste(ordinal(nr), "smallest") hi.ord <- if(nr == 1L) "maximum" else paste(ordinal(nr), "largest") if(g) splat(switch(alternative, two.sided = "Envelopes", "Critical boundary"), "computed as", if(use.theory) "theoretical curve" else "mean of simulations", switch(alternative, two.sided="plus/minus", less="minus", greater="plus"), hi.ord, "simulated value of maximum", switch(alternative, two.sided="absolute", less="negative", greater="positive"), "deviation") else { if(alternative != "less") splat("Upper envelope: pointwise", hi.ord, "of simulated curves") if(alternative != "greater") splat("Lower envelope: pointwise", lo.ord, "of simulated curves") } symmetric <- (alternative == "two.sided") && !g alpha <- if(!symmetric) { nr/(nsim+1) } else { 2 * nr/(nsim+1) } splat("Significance level of Monte Carlo test:", paste0(if(!symmetric) nr else 2 * nr, "/", nsim+1), "=", alpha) } splat("Data:", e$Yname) return(invisible(NULL)) } # envelope.matrix # core functionality to compute envelope values # theory = funX[["theo"]] # observed = fX envelope.matrix <- function(Y, ..., rvals=NULL, observed=NULL, theory=NULL, funX=NULL, nsim=NULL, nsim2=NULL, jsim=NULL, jsim.mean=NULL, type=c("pointwise", "global", "variance"), alternative=c("two.sided", "less", "greater"), scale = NULL, clamp=FALSE, csr=FALSE, use.theory = csr, nrank=1, ginterval=NULL, nSD=2, savefuns=FALSE, check=TRUE, Yname=NULL, do.pwrong=FALSE, weights=NULL, precomputed=NULL, gaveup=FALSE) { if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) type <- match.arg(type) alternative <- match.arg(alternative) if(!is.null(funX)) stopifnot(is.fv(funX)) pwrong <- NULL use.weights <- !is.null(weights) cheat <- !is.null(precomputed) if(is.null(rvals) && is.null(observed) && !is.null(funX)) { ## assume funX is summary function for observed data rvals <- with(funX, .x) observed <- with(funX, .y) theory <- if(use.theory) (theory %orifnull% funX[["theo"]]) else NULL if(check) stopifnot(nrow(funX) == nrow(Y)) } else if(check) { ## validate vectors of data if(is.null(rvals)) stop("rvals must be supplied") if(is.null(observed)) stop("observed must be supplied") stopifnot(length(rvals) == nrow(Y)) stopifnot(length(observed) == length(rvals)) } use.theory <- use.theory && !is.null(theory) if(use.theory && check) stopifnot(length(theory) == length(rvals)) simvals <- Y fX <- observed atr <- if(!is.null(funX)) attributes(funX) else list(alim=range(rvals), ylab=quote(f(r)), yexp=quote(f(r)), fname="f") fname <- atr$fname NAvector <- rep(NA_real_, length(rvals)) if(!cheat) { ## ................ standard calculation ..................... ## validate weights if(use.weights && !gaveup) check.nvector(weights, ncol(simvals), things="simulated functions", naok=TRUE) ## determine numbers of columns used Ncol <- if(!gaveup) ncol(simvals) else Inf if(Ncol < 2) stop("Need at least 2 columns of function values") ## all columns are used unless 'nsim' or 'jsim' given. if(!(is.null(nsim) && is.null(jsim))) { if(is.null(jsim)) { jsim <- 1:nsim } else if(is.null(nsim)) { nsim <- length(jsim) } else stopifnot(length(jsim) == nsim) if(nsim > Ncol) stop(paste(nsim, "simulations are not available; only", Ncol, "columns provided")) } ## nsim2 or jsim.mean may be given, and imply dual calculation if(!(is.null(nsim2) && is.null(jsim.mean))) { if(is.null(jsim.mean)) { jsim.mean <- setdiff(seq_len(Ncol), jsim)[1:nsim2] } else if(is.null(nsim2)) { nsim2 <- length(jsim.mean) } else stopifnot(length(jsim.mean) == nsim2) if(nsim + nsim2 > Ncol) stop(paste(nsim, "+", nsim2, "=", nsim+nsim2, "simulations are not available; only", Ncol, "columns provided")) if(length(intersect(jsim, jsim.mean))) warning("Internal warning: Indices in jsim and jsim.mean overlap") } restrict.columns <- !is.null(jsim) dual <- !is.null(jsim.mean) } else { ## ................ precomputed values .................. ## validate weights if(use.weights) check.nvector(weights, nsim, things="simulations", naok=TRUE) restrict.columns <- FALSE dual <- FALSE } shadenames <- NULL nsim.mean <- NULL switch(type, pointwise = { ## ....... POINTWISE ENVELOPES ............................... if(gaveup) { lo <- hi <- NAvector } else if(cheat) { stopifnot(checkfields(precomputed, c("lo", "hi"))) lo <- precomputed$lo hi <- precomputed$hi } else { simvals[is.infinite(simvals)] <- NA if(restrict.columns) { simvals <- simvals[,jsim] if(use.weights) weights <- weights[jsim] } nsim <- ncol(simvals) if(nrank == 1L) { lohi <- apply(simvals, 1L, range) } else { lohi <- apply(simvals, 1L, # function(x, n) { sort(x)[n] }, orderstats, k=c(nrank, nsim-nrank+1L)) } lo <- lohi[1L,] hi <- lohi[2L,] } lo.name <- "lower pointwise envelope of %s from simulations" hi.name <- "upper pointwise envelope of %s from simulations" ## if(!gaveup) switch(alternative, two.sided = { }, less = { hi <- rep.int(Inf, length(hi)) hi.name <- "infinite upper limit" }, greater = { lo <- rep.int(-Inf, length(lo)) lo.name <- "infinite lower limit" }) if(use.theory) { results <- data.frame(r=rvals, obs=fX, theo=theory, lo=lo, hi=hi) } else { m <- if(gaveup) NAvector else if(cheat) precomputed$mmean else if(!use.weights) apply(simvals, 1L, mean, na.rm=TRUE) else apply(simvals, 1L, weighted.mean, w=weights, na.rm=TRUE) results <- data.frame(r=rvals, obs=fX, mmean=m, lo=lo, hi=hi) } shadenames <- c("lo", "hi") if(do.pwrong) { ## estimate the p-value for the 'wrong test' if(gaveup) { pwrong <- NA_real_ } else if(cheat) { pwrong <- precomputed$pwrong do.pwrong <- !is.null(pwrong) && !badprobability(pwrong, FALSE) } else { dataranks <- t(apply(simvals, 1, rank, ties.method="random")) upper.signif <- (dataranks <= nrank) lower.signif <- (dataranks >= nsim-nrank+1L) is.signif <- switch(alternative, less = lower.signif, greater = upper.signif, two.sided = lower.signif | upper.signif) is.signif.somewhere <- matcolany(is.signif) pwrong <- sum(is.signif.somewhere)/nsim } } }, global = { ## ..... SIMULTANEOUS ENVELOPES .......................... if(gaveup) { lo <- hi <- reference <- NAvector } else if(cheat) { ## ... use precomputed values .. stopifnot(checkfields(precomputed, c("lo", "hi"))) lo <- precomputed$lo hi <- precomputed$hi if(use.theory) { reference <- theory } else { stopifnot(checkfields(precomputed, "mmean")) reference <- precomputed$mmean } domain <- rep.int(TRUE, length(rvals)) } else { ## ... normal case: compute envelopes from simulations if(!is.null(ginterval)) { domain <- (rvals >= ginterval[1L]) & (rvals <= ginterval[2L]) funX <- funX[domain, ] simvals <- simvals[domain, ] } else domain <- rep.int(TRUE, length(rvals)) simvals[is.infinite(simvals)] <- NA if(use.theory) { reference <- theory[domain] if(restrict.columns) { simvals <- simvals[, jsim] if(use.weights) weights <- weights[jsim] } } else if(dual) { # Estimate the mean from one set of columns # Form envelopes from another set of columns simvals.mean <- simvals[, jsim.mean] # mmean <- reference <- if(!use.weights) apply(simvals.mean, 1L, mean, na.rm=TRUE) else apply(simvals.mean, 1L, weighted.mean, w=weights[jsim.mean], na.rm=TRUE) nsim.mean <- ncol(simvals.mean) # retain only columns used for envelope simvals <- simvals[, jsim] } else { # Compute the mean and envelopes using the same data if(restrict.columns) { simvals <- simvals[, jsim] if(use.weights) weights <- weights[jsim] } # mmean <- reference <- if(!use.weights) apply(simvals, 1L, mean, na.rm=TRUE) else apply(simvals, 1L, weighted.mean, w=weights, na.rm=TRUE) } nsim <- ncol(simvals) # compute deviations deviations <- sweep(simvals, 1L, reference) deviations <- switch(alternative, two.sided = abs(deviations), greater = if(clamp) pmax(0, deviations) else deviations, less = if(clamp) pmax(0, -deviations) else (-deviations)) deviations <- matrix(deviations, nrow=nrow(simvals), ncol=ncol(simvals)) ## rescale ? sc <- 1 if(!is.null(scale)) { stopifnot(is.function(scale)) sc <- scale(rvals) sname <- "scale(r)" ans <- check.nvector(sc, length(rvals), things="values of r", fatal=FALSE, vname=sname) if(!ans) stop(attr(ans, "whinge"), call.=FALSE) if(any(bad <- (sc <= 0))) { ## issue a warning unless this only happens at r=0 if(any(bad[rvals > 0])) warning(paste("Some values of", sname, "were negative or zero:", "scale was reset to 1 for these values"), call.=FALSE) sc[bad] <- 1 } deviations <- sweep(deviations, 1L, sc, "/") } ## compute max (scaled) deviations suprema <- apply(deviations, 2L, max, na.rm=TRUE) # ranked deviations dmax <- sort(suprema)[nsim-nrank+1L] # simultaneous bands lo <- reference - sc * dmax hi <- reference + sc * dmax } lo.name <- "lower critical boundary for %s" hi.name <- "upper critical boundary for %s" if(!gaveup) switch(alternative, two.sided = { }, less = { hi <- rep.int(Inf, length(hi)) hi.name <- "infinite upper boundary" }, greater = { lo <- rep.int(-Inf, length(lo)) lo.name <- "infinite lower boundary" }) if(use.theory) results <- data.frame(r=rvals[domain], obs=fX[domain], theo=reference, lo=lo, hi=hi) else results <- data.frame(r=rvals[domain], obs=fX[domain], mmean=reference, lo=lo, hi=hi) shadenames <- c("lo", "hi") if(do.pwrong) warning(paste("Argument", sQuote("do.pwrong=TRUE"), "ignored;", "it is not relevant to global envelopes")) }, variance={ ## ....... POINTWISE MEAN, VARIANCE etc ...................... if(gaveup) { Ef <- varf <- NAvector } else if(cheat) { # .... use precomputed values .... stopifnot(checkfields(precomputed, c("Ef", "varf"))) Ef <- precomputed$Ef varf <- precomputed$varf } else { ## .... normal case: compute from simulations simvals[is.infinite(simvals)] <- NA if(restrict.columns) { simvals <- simvals[, jsim] if(use.weights) weights <- weights[jsim] } nsim <- ncol(simvals) if(!use.weights) { Ef <- apply(simvals, 1L, mean, na.rm=TRUE) varf <- apply(simvals, 1L, var, na.rm=TRUE) } else { Ef <- apply(simvals, 1L, weighted.mean, w=weights, na.rm=TRUE) varf <- apply(simvals, 1L, weighted.var, w=weights, na.rm=TRUE) } } if(gaveup) { sd <- stdres <- lo <- hi <- loCI <- hiCI <- NAvector } else { ## derived quantities sd <- sqrt(varf) stdres <- (fX-Ef)/sd stdres[!is.finite(stdres)] <- NA ## critical limits lo <- Ef - nSD * sd hi <- Ef + nSD * sd ## confidence interval loCI <- Ef - nSD * sd/sqrt(nsim) hiCI <- Ef + nSD * sd/sqrt(nsim) } lo.name <- paste("lower", nSD, "sigma critical limit for %s") hi.name <- paste("upper", nSD, "sigma critical limit for %s") loCI.name <- paste("lower", nSD, "sigma confidence bound", "for mean of simulated %s") hiCI.name <- paste("upper", nSD, "sigma confidence bound", "for mean of simulated %s") ## if(!gaveup) switch(alternative, two.sided = { }, less = { hi <- hiCI <- rep.int(Inf, length(hi)) hi.name <- "infinite upper boundary" hiCI.name <- "infinite upper confidence limit" }, greater = { lo <- loCI <- rep.int(-Inf, length(lo)) lo.name <- "infinite lower boundary" loCI.name <- "infinite lower confidence limit" }) ## put together if(use.theory) { results <- data.frame(r=rvals, obs=fX, theo=theory, lo=lo, hi=hi) shadenames <- c("lo", "hi") morestuff <- data.frame(mmean=Ef, var=varf, res=fX-Ef, stdres=stdres, loCI=loCI, hiCI=hiCI) loCIlabel <- if(alternative == "greater" && !gaveup) "-infinity" else makefvlabel(NULL, NULL, fname, "loCI") hiCIlabel <- if(alternative == "less" && !gaveup) "infinity" else makefvlabel(NULL, NULL, fname, "hiCI") mslabl <- c(makefvlabel(NULL, "bar", fname), makefvlabel("var", "hat", fname), makefvlabel("res", "hat", fname), makefvlabel("stdres", "hat", fname), loCIlabel, hiCIlabel) wted <- if(use.weights) "weighted " else NULL msdesc <- c(paste0(wted, "sample mean of %s from simulations"), paste0(wted, "sample variance of %s from simulations"), "raw residual", "standardised residual", loCI.name, hiCI.name) } else { results <- data.frame(r=rvals, obs=fX, mmean=Ef, lo=lo, hi=hi) shadenames <- c("lo", "hi") morestuff <- data.frame(var=varf, res=fX-Ef, stdres=stdres, loCI=loCI, hiCI=hiCI) loCIlabel <- if(alternative == "greater" && !gaveup) "-infinity" else makefvlabel(NULL, NULL, fname, "loCI") hiCIlabel <- if(alternative == "less" && !gaveup) "infinity" else makefvlabel(NULL, NULL, fname, "hiCI") mslabl <- c(makefvlabel("var", "hat", fname), makefvlabel("res", "hat", fname), makefvlabel("stdres", "hat", fname), loCIlabel, hiCIlabel) msdesc <- c(paste0(if(use.weights) "weighted " else NULL, "sample variance of %s from simulations"), "raw residual", "standardised residual", loCI.name, hiCI.name) } if(do.pwrong) { ## estimate the p-value for the 'wrong test' if(gaveup) { pwrong <- NA_real_ } else if(cheat) { pwrong <- precomputed$pwrong do.pwrong <- !is.null(pwrong) && !badprobability(pwrong, FALSE) } else { upper.signif <- (simvals > hi) lower.signif <- (simvals < lo) is.signif <- switch(alternative, less = lower.signif, greater = upper.signif, two.sided = lower.signif | upper.signif) # is.signif.somewhere <- apply(is.signif, 2, any) is.signif.somewhere <- matcolany(is.signif) pwrong <- sum(is.signif.somewhere)/nsim } } } ) ############ WRAP UP ######################### if(use.theory) { # reference is computed curve `theo' reflabl <- makefvlabel(NULL, NULL, fname, "theo") refdesc <- paste0("theoretical value of %s", if(csr) " for CSR" else NULL) } else { # reference is sample mean of simulations reflabl <- makefvlabel(NULL, "bar", fname) refdesc <- paste0(if(use.weights) "weighted " else NULL, "sample mean of %s from simulations") } lolabl <- if(alternative == "greater" && !gaveup) "-infinity" else makefvlabel(NULL, "hat", fname, "lo") hilabl <- if(alternative == "less"&& !gaveup) "infinity" else makefvlabel(NULL, "hat", fname, "hi") result <- fv(results, argu="r", ylab=atr$ylab, valu="obs", fmla= deparse(. ~ r), alim=intersect.ranges(atr$alim, range(results$r)), labl=c("r", makefvlabel(NULL, "hat", fname, "obs"), reflabl, lolabl, hilabl), desc=c("distance argument r", "observed value of %s for data pattern", refdesc, lo.name, hi.name), fname=atr$fname, yexp =atr$yexp) # columns to be plotted by default dotty <- c("obs", if(use.theory) "theo" else "mmean", "hi", "lo") if(type == "variance") { # add more stuff result <- bind.fv(result, morestuff, mslabl, msdesc) if(use.theory) dotty <- c(dotty, "mmean") } fvnames(result, ".") <- dotty fvnames(result, ".s") <- shadenames unitname(result) <- unitname(funX) class(result) <- c("envelope", class(result)) # tack on envelope information attr(result, "einfo") <- list(global = (type =="global"), ginterval = ginterval, alternative=alternative, scale = scale, clamp = clamp, csr = csr, use.theory = use.theory, csr.theo = csr && use.theory, simtype = "funs", constraints = "", nrank = nrank, nsim = nsim, VARIANCE = (type == "variance"), nSD = nSD, valname = NULL, dual = dual, nsim = nsim, nsim2 = nsim.mean, Yname = Yname, do.pwrong=do.pwrong, use.weights=use.weights, gaveup = gaveup) # tack on saved functions if(savefuns && !gaveup) { nSim <- ncol(Y) alldata <- cbind(rvals, Y) simnames <- paste("sim", 1:nSim, sep="") colnames(alldata) <- c("r", simnames) alldata <- as.data.frame(alldata) SimFuns <- fv(alldata, argu="r", ylab=atr$ylab, valu="sim1", fmla= deparse(. ~ r), alim=atr$alim, labl=names(alldata), desc=c("distance argument r", paste("Simulation ", 1:nSim, sep="")), unitname=unitname(funX)) fvnames(SimFuns, ".") <- simnames attr(result, "simfuns") <- SimFuns } if(do.pwrong) attr(result, "pwrong") <- pwrong if(use.weights) attr(result, "weights") <- weights return(result) } envelope.envelope <- function(Y, fun=NULL, ..., transform=NULL, global=FALSE, VARIANCE=FALSE) { Yname <- short.deparse(substitute(Y)) stopifnot(inherits(Y, "envelope")) Yorig <- Y aargh <- list(...) X <- attr(Y, "datapattern") sf <- attr(Y, "simfuns") sp <- attr(Y, "simpatterns") wt <- attr(Y, "weights") einfo <- attr(Y, "einfo") csr <- aargh$internal$csr %orifnull% einfo$csr if(is.null(fun) && is.null(sf)) { # No simulated functions - must compute them from simulated patterns if(is.null(sp)) stop(paste("Cannot compute envelope:", "Y does not contain simulated functions", "(was not generated with savefuns=TRUE)", "and does not contain simulated patterns", "(was not generated with savepatterns=TRUE)")) # set default fun=Kest fun <- Kest } if(!is.null(fun)) { # apply new function # point patterns are required if(is.null(sp)) stop(paste("Object Y does not contain simulated point patterns", "(attribute", dQuote("simpatterns"), ");", "cannot apply a new", sQuote("fun"))) if(is.null(X)) stop(paste("Cannot apply a new", sQuote("fun"), "; object Y generated by an older version of spatstat")) ## send signal if simulations were CSR internal <- aargh$internal if(csr) { if(is.null(internal)) internal <- list() internal$csr <- TRUE } ## compute new envelope result <- do.call(envelope, resolve.defaults(list(Y=X, fun=fun, simulate=sp), aargh, list(transform=transform, global=global, VARIANCE=VARIANCE, internal=internal, Yname=Yname, nsim=einfo$nsim, nsim2=einfo$nsim2, weights=wt), .StripNull=TRUE)) } else { #' compute new envelope with existing simulated functions if(is.null(sf)) stop(paste("Y does not contain a", dQuote("simfuns"), "attribute", "(it was not generated with savefuns=TRUE)")) if(!is.null(transform)) { # Apply transformation to Y and sf stopifnot(is.expression(transform)) ## cc <- dotexpr.to.call(transform, "Y", "eval.fv") cc <- inject.expr("with(Y, .)", transform) Y <- eval(cc) ## cc <- dotexpr.to.call(transform, "sf", "eval.fv") cc <- inject.expr("with(sf, .)", transform) sf <- eval(cc) } #' catch discrepancy between domains of observed and simulated functions if(nrow(sf) != nrow(Y)) { rrsim <- sf[[fvnames(sf, ".x")]] rrobs <- Y[[fvnames(Y, ".x")]] ra <- intersect.ranges(range(rrsim), range(rrobs)) delta <- min(mean(diff(rrsim)), mean(diff(rrobs)))/2 oksim <- (rrsim >= ra[1] - delta) & (rrsim <= ra[2] + delta) okobs <- (rrobs >= ra[1] - delta) & (rrobs <= ra[2] + delta) if(sum(oksim) != sum(okobs)) stop("Internal error: Unable to reconcile the domains", "of the observed and simulated functions", call.=FALSE) if(mean(abs(rrsim[oksim] - rrobs[okobs])) > delta) stop("Internal error: Unable to reconcile the r values", "of the observed and simulated functions", call.=FALSE) sf <- sf[oksim, ,drop=FALSE] Y <- Y[okobs, ,drop=FALSE] } # extract simulated function values df <- as.data.frame(sf) rname <- fvnames(sf, ".x") df <- df[, (names(df) != rname)] # interface with 'envelope.matrix' etype <- if(global) "global" else if(VARIANCE) "variance" else "pointwise" result <- do.call(envelope.matrix, resolve.defaults(list(Y=as.matrix(df)), aargh, list(type=etype, csr=csr, funX=Y, Yname=Yname, weights=wt), .StripNull=TRUE)) } if(!is.null(transform)) { # post-process labels labl <- attr(result, "labl") dnames <- colnames(result) dnames <- dnames[dnames %in% fvnames(result, ".")] # expand "." ud <- as.call(lapply(c("cbind", dnames), as.name)) dont.complain.about(ud) expandtransform <- eval(substitute(substitute(tr, list(.=ud)), list(tr=transform[[1L]]))) # compute new labels attr(result, "fname") <- attr(Yorig, "fname") mathlabl <- as.character(fvlegend(result, expandtransform)) # match labels to columns evars <- all.vars(expandtransform) used.dotnames <- evars[evars %in% dnames] mathmap <- match(colnames(result), used.dotnames) okmath <- !is.na(mathmap) # update appropriate labels labl[okmath] <- mathlabl[mathmap[okmath]] attr(result, "labl") <- labl } # Tack on envelope info copyacross <- c("Yname", "csr.theo", "use.theory", "simtype", "constraints") attr(result, "einfo")[copyacross] <- attr(Yorig, "einfo")[copyacross] attr(result, "einfo")$csr <- csr # Save data return(result) } pool.envelope <- local({ pool.envelope <- function(..., savefuns=FALSE, savepatterns=FALSE) { Yname <- short.deparse(sys.call()) if(nchar(Yname) > 60) Yname <- paste(substr(Yname, 1L, 40L), "[..]") Elist <- unname(list(...)) nE <- length(Elist) if(nE == 0) return(NULL) #' ........ validate envelopes ..................... #' All arguments must be envelopes notenv <- !unlist(lapply(Elist, inherits, what="envelope")) if(any(notenv)) { n <- sum(notenv) why <- paste(ngettext(n, "Argument", "Arguments"), commasep(which(notenv)), ngettext(n, "does not", "do not"), "belong to the class", dQuote("envelope")) stop(why) } ## Only one envelope? if(nE == 1) return(Elist[[1L]]) ## envelopes must be compatible ok <- do.call(compatible, Elist) if(!ok) stop("Envelopes are not compatible") ## ... reconcile parameters in different envelopes ....... eilist <- lapply(Elist, attr, which="einfo") global <- resolveEinfo(eilist, "global", FALSE) ginterval <- resolveEinfo(eilist, "ginterval", NULL, atomic=FALSE) VARIANCE <- resolveEinfo(eilist, "VARIANCE", FALSE) alternative <- resolveEinfo(eilist, "alternative", FALSE) scale <- resolveEinfo(eilist, "scale", NULL, atomic=FALSE) clamp <- resolveEinfo(eilist, "clamp", FALSE) resolveEinfo(eilist, "simtype", "funs", "Envelopes were generated using different types of simulation") resolveEinfo(eilist, "constraints", "", "Envelopes were generated using different types of conditioning") resolveEinfo(eilist, "csr.theo", FALSE, NULL) csr <- resolveEinfo(eilist, "csr", FALSE, NULL) use.weights <- resolveEinfo(eilist, "use.weights" , FALSE, "Weights were used in some, but not all, envelopes: they will be ignored") use.theory <- resolveEinfo(eilist, "use.theory", csr, NULL) ## weights <- if(use.weights) unlist(lapply(Elist, attr, which="weights")) else NULL type <- if(global) "global" else if(VARIANCE) "variance" else "pointwise" ## ........ validate saved functions ..................... if(savefuns || !VARIANCE) { ## Individual simulated functions are required SFlist <- lapply(Elist, attr, which="simfuns") isnul <- unlist(lapply(SFlist, is.null)) if(any(isnul)) { n <- sum(isnul) comply <- if(!VARIANCE) "compute the envelope:" else "save the simulated functions:" why <- paste("Cannot", comply, ngettext(n, "argument", "arguments"), commasep(which(isnul)), ngettext(n, "does not", "do not"), "contain a", dQuote("simfuns"), "attribute", "(not generated with savefuns=TRUE)") stop(why) } ## Simulated functions must be the same function fnames <- unique(lapply(SFlist, attr, which="fname")) if(length(fnames) > 1L) { fnames <- unlist(lapply(fnames, flatfname)) stop(paste("Envelope objects contain values", "of different functions:", commasep(sQuote(fnames)))) } ## vectors of r values must be identical rlist <- lapply(SFlist, getrvals) rvals <- rlist[[1L]] samer <- unlist(lapply(rlist, identical, y=rvals)) if(!all(samer)) stop(paste("Simulated function values are not compatible", "(different values of function argument)")) ## Extract function values and assemble into one matrix matlist <- lapply(SFlist, getdotvals) SFmatrix <- do.call(cbind, matlist) } ## compute pooled envelope switch(type, pointwise = { result <- envelope(SFmatrix, funX=Elist[[1L]], type=type, alternative=alternative, clamp=clamp, csr=csr, use.theory=use.theory, Yname=Yname, weights=weights, savefuns=savefuns) }, global = { simfunmatrix <- if(is.null(ginterval)) SFmatrix else { ## savefuns have not yet been clipped to ginterval ## while envelope data have been clipped. domain <- (rvals >= ginterval[1L]) & (rvals <= ginterval[2L]) SFmatrix[domain, , drop=FALSE] } result <- envelope(simfunmatrix, funX=Elist[[1L]], type=type, alternative=alternative, scale=scale, clamp=clamp, csr=csr, use.theory=use.theory, ginterval=ginterval, Yname=Yname, weights=weights, savefuns=savefuns) }, variance = { ## Pool sample means and variances nsims <- unlist(lapply(eilist, getElement, name="nsim")) mmeans <- lapply(Elist, getElement, name="mmean") vars <- lapply(Elist, getElement, name="var") mmeans <- matrix(unlist(mmeans), ncol=nE) vars <- matrix(unlist(vars), ncol=nE) if(!use.weights) { w.mean <- nsims d.mean <- sum(nsims) w.var <- nsims - 1 d.var <- sum(nsims) - 1 } else { weightlist <- lapply(Elist, attr, which="weights") w.mean <- unlist(lapply(weightlist, sum)) d.mean <- sum(w.mean) ssw <- unlist(lapply(weightlist, meansqfrac)) ## meansqfrac : function(x) {sum((x/sum(x))^2)})) w.var <- w.mean * (1 - ssw) d.var <- d.mean * (1 - sum(ssw)) } poolmmean <- as.numeric(mmeans %*% matrix(w.mean/d.mean, ncol=1L)) within <- vars %*% matrix(w.var, ncol=1L) between <- ((mmeans - poolmmean[])^2) %*% matrix(w.mean, ncol=1L) poolvar <- as.numeric((within + between)/d.var) ## feed precomputed data to envelope.matrix pc <- list(Ef=poolmmean[], varf=poolvar[]) nsim <- sum(nsims) result <- envelope.matrix(NULL, funX=Elist[[1L]], type=type, alternative=alternative, csr=csr, Yname=Yname, weights=weights, savefuns=savefuns, nsim=nsim, precomputed=pc) }) ## Copy envelope info that is not handled by envelope.matrix copyacross <- c("Yname", "csr.theo", "use.theory", "simtype", "constraints") attr(result, "einfo")[copyacross] <- attr(Elist[[1L]], "einfo")[copyacross] ## ..............saved patterns ..................... if(savepatterns) { SPlist <- lapply(Elist, attr, which="simpatterns") isnul <- unlist(lapply(SPlist, is.null)) if(any(isnul)) { n <- sum(isnul) why <- paste("Cannot save the simulated patterns:", ngettext(n, "argument", "arguments"), commasep(which(isnul)), ngettext(n, "does not", "do not"), "contain a", dQuote("simpatterns"), "attribute", "(not generated with savepatterns=TRUE)") warning(why) } else { attr(result, "simpatterns") <- Reduce(append, SPlist) } } ## ..............saved summary functions ................ if(savefuns) { alldata <- cbind(rvals, SFmatrix) Nsim <- ncol(SFmatrix) simnames <- paste0("sim", 1:Nsim) colnames(alldata) <- c("r", simnames) alldata <- as.data.frame(alldata) SFtemplate <- SFlist[[1L]] SimFuns <- fv(alldata, argu="r", ylab=attr(SFtemplate, "ylab"), valu="sim1", fmla= deparse(. ~ r), alim=attr(SFtemplate, "alim"), labl=names(alldata), desc=c("distance argument r", paste("Simulation ", 1:Nsim, sep="")), fname=attr(SFtemplate, "fname"), yexp=attr(SFtemplate, "yexp"), unitname=unitname(SFtemplate)) fvnames(SimFuns, ".") <- simnames attr(result, "simfuns") <- SimFuns } dotnames <- lapply(Elist, fvnames, a=".") dn <- dotnames[[1L]] if(all(unlist(lapply(dotnames, identical, y=dn)))) fvnames(result, ".") <- dn shadenames <- lapply(Elist, fvnames, a=".s") sh <- shadenames[[1L]] if(all(unlist(lapply(shadenames, identical, y=sh)))) fvnames(result, ".s") <- sh return(result) } getrvals <- function(z) { as.matrix(z)[, fvnames(z, ".x")] } getdotvals <- function(z) { as.matrix(z)[, fvnames(z, "."), drop=FALSE] } meansqfrac <- function(x) {sum((x/sum(x))^2)} pool.envelope }) # resolve matching entries in different envelope objects # x is a list of envelope info objects resolveEinfo <- function(x, what, fallback, warn, atomic=TRUE) { if(atomic) { y <- unique(unlist(lapply(x, getElement, name=what))) if(length(y) == 1L) return(y) } else { y <- unique(lapply(x, getElement, name=what)) if(length(y) == 1L) return(y[[1L]]) } if(missing(warn)) warn <- paste("Envelopes were generated using different values", "of argument", paste(sQuote(what), ";", sep=""), "reverting to default value") if(!is.null(warn)) warning(warn, call.=FALSE) return(fallback) } as.data.frame.envelope <- function(x, ..., simfuns=FALSE) { if(simfuns && !is.null(sf <- attr(x, "simfuns"))) { # tack on the simulated functions as well y <- as.data.frame(bind.fv(x, sf, clip=TRUE)) return(y) } NextMethod("as.data.frame") } spatstat/R/Iest.R0000644000176200001440000000517613556450414013365 0ustar liggesusers# Iest.R # # I function # # $Revision: 1.16 $ $Date: 2019/10/31 03:01:26 $ # # # Iest <- local({ Iest <- function(X, ..., eps=NULL, r = NULL, breaks = NULL, correction=NULL) { X <- as.ppp(X) if(!is.multitype(X)) stop("Only applicable to multitype point patterns") marx <- marks(X, dfok=FALSE) ntypes <- length(levels(marx)) Y <- unmark(split(X)) ## relative proportions ni <- sapply(Y, npoints) fi <- ni/sum(ni) ## J function of pattern regardless of type Jdotdot <- Jest(unmark(X), correction=correction, r=r, eps=eps, breaks=breaks, ...) rvals <- Jdotdot$r ## J function of subpattern of each type i Jii <- lapply(Y, Jest, r=rvals, correction=correction, eps=eps, ...) nrvals <- lengths(lapply(Jii, getElement, name="r")) if(length(unique(nrvals)) != 1 || nrvals[1] != length(rvals)) stop("Internal error: J function objects have different lengths") ## initialise fv object alim <- attr(Jdotdot, "alim") Z <- fv(data.frame(r=rvals, theo=0), "r", substitute(I(r), NULL), "theo", . ~ r, alim, c("r", "%s[pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="I") ## Estimates of each type namii <- unlist(lapply(Jii, names)) namdd <- names(Jdotdot) bothnames <- namii[namii %in% namdd] if("un" %in% bothnames) { Jun <- matrix(extract(Jii, "un"), nrow=ntypes, byrow=TRUE) Iun <- apply(fi * Jun, 2, sum) - Jdotdot$un Z <- bind.fv(Z, data.frame(un=Iun), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") } if("rs" %in% bothnames) { Jrs <- matrix(extract(Jii, "rs"), nrow=ntypes, byrow=TRUE) Irs <- apply(fi * Jrs, 2, sum) - Jdotdot$rs Z <- bind.fv(Z, data.frame(rs=Irs), "hat(%s)[rs](r)", "border corrected estimate of %s", "rs") } if("han" %in% bothnames) { Jhan <- matrix(extract(Jii, "han"), nrow=ntypes, byrow=TRUE) Ihan <- apply(fi * Jhan, 2, sum) - Jdotdot$han Z <- bind.fv(Z, data.frame(han=Ihan), "hat(%s)[han](r)", "Hanisch-style estimate of %s", "han") } if("km" %in% bothnames) { Jkm <- matrix(extract(Jii, "km"), nrow=ntypes, byrow=TRUE) Ikm <- apply(fi * Jkm, 2, sum) - Jdotdot$km Z <- bind.fv(Z, data.frame(km=Ikm), "hat(%s)[km](r)", "Kaplan-Meier estimate of %s", "km") } unitname(Z) <- unitname(X) attr(Z, "conserve") <- attr(Jdotdot, "conserve") return(Z) } extract <- function(Zlist, what) sapply(Zlist, "[[", i=what) Iest }) spatstat/R/minkowski.R0000644000176200001440000000476013333543255014471 0ustar liggesusers#' #' minkowski.R #' #' Minkowski Sum and related operations #' #' $Revision: 1.7 $ $Date: 2017/06/05 10:31:58 $ "%(+)%" <- MinkowskiSum <- local({ MinkowskiSum <- function(A, B) { if(is.ppp(A)) return(UnionOfShifts(B, A)) if(is.ppp(B)) return(UnionOfShifts(A, B)) ## extract lists of simply-connected polygons AA <- simplepolygons(A) BB <- simplepolygons(B) ## determine common resolution for polyclip operations eps <- mean(c(sidelengths(Frame(A)), sidelengths(Frame(B))))/2^30 ## compute Minkowski sums of pieces pieces <- NULL for(b in BB) pieces <- append(pieces, lapply(AA, MinkSumConnected, b=b, eps=eps)) ## form union in one step, to avoid artefacts result <- union.owin(solapply(pieces, poly2owin)) return(result) } poly2owin <- function(z) owin(poly=z, check=FALSE) MinkSumConnected <- function(a, b, eps) { ## a and b are list(x,y) simply-connected polygons out <- polyclip::polyminkowski(a, b, x0=0, y0=0, eps=eps) if(length(out) == 1) return(out) ispos <- (sapply(out, Area.xypolygon) >= 0) if(sum(ispos) > 1) { stop("Internal error: result of sumconnected is not simply connected", call.=FALSE) } return(out[ispos]) } simplepolygons <- function(A) { if(is.psp(A)) return(psp2poly(A)) ## convert to owin, then polygonal A <- as.polygonal(A) ## separate into simply-connected pieces AA <- break.holes(A)$bdry return(AA) } ## handle segment patterns as well psp2poly <- function(X) apply(as.matrix(X$ends), 1, seg2poly) seg2poly <- function(z) with(as.list(z), list(x=c(x0, x1, x0), y=c(y0,y1,y0))) ## UnionOfShifts <- function(X, V) { #' compute the union or superposition of copies of X by vectors in V v <- as.matrix(coords(V)) n <- nrow(v) Y <- vector(mode="list", length=n) for(i in seq_len(n)) Y[[i]] <- shift(X, v[i,]) Y <- as.solist(Y) if(is.owin(X)) { Z <- union.owin(Y) } else { #' X is a pattern of objects in a window W <- MinkowskiSum(Window(X), Window(V)) Z <- superimpose(Y, W=W) } return(Z) } MinkowskiSum }) dilationAny <- function(A, B) { MinkowskiSum(A, reflect(B)) } "%(-)%" <- erosionAny <- function(A, B) { D <- Frame(A) Dplus <- grow.rectangle(D, 0.1 * shortside(D)) Ac <- complement.owin(A, Dplus) AcB <- MinkowskiSum(Ac, reflect(B)) if(is.subset.owin(D, AcB)) return(emptywindow(D)) C <- complement.owin(AcB[Dplus], Dplus)[D] return(C) } spatstat/R/listof.R0000644000176200001440000000214713333543255013753 0ustar liggesusers# # listof.R # # Methods for class `listof' # # plot.listof is defined in plot.splitppp.R # "[<-.listof" <- function(x, i, value) { # invoke list method class(x) <- "list" x[i] <- value # then make it a 'listof' object too class(x) <- c("listof", class(x)) x } summary.listof <- function(object, ...) { x <- lapply(object, summary, ...) class(x) <- "summary.listof" x } print.summary.listof <- function(x, ...) { class(x) <- "listof" print(x) invisible(NULL) } listof <- function(...) { # warn.once("listof", # "The class listof will be Deprecated", # "in future versions of spatstat.", # "Use anylist or solist") stuff <- list(...) class(stuff) <- c("listof", class(stuff)) return(stuff) } as.listof <- function(x) { if(!is.list(x)) x <- list(x) if(!inherits(x, "listof")) class(x) <- c("listof", class(x)) # warn.once("listof", # "The class listof will be Deprecated", # "in future versions of spatstat.", # "Use anylist or solist") return(x) } as.layered.listof <- function(X) { layered(LayerList=X) } spatstat/R/rags.R0000644000176200001440000000511313333543255013403 0ustar liggesusers#' #' rags.R #' #' Alternating Gibbs Sampler #' #' $Revision: 1.6 $ $Date: 2016/11/29 05:01:51 $ #' #' Initial implementation for multitype hard core process #' without interaction within types rags <- function(model, ..., ncycles=100) { if(!is.list(model)) stop("Argument 'model' should be a list") if(!all(c("beta", "hradii") %in% names(model))) stop("Argument 'model' should have entries 'beta' and 'hradii'") do.call(ragsMultiHard, append(model, list(..., ncycles=ncycles))) } ragsMultiHard <- function(beta, hradii, ..., types=NULL, bmax=NULL, periodic=FALSE, ncycles=100) { ## validate beta by generating first proposal points Xprop <- rmpoispp(lambda=beta, lmax=bmax, ..., types=types) ntypes <- length(levels(marks(Xprop))) check.nmatrix(hradii, ntypes, things="types of points") if(any(is.finite(dh <- diag(hradii)) & dh > 0)) stop("Interaction between points of the same type is not permitted") ## initial state empty X <- Xprop[integer(0)] Y <- split(X) ## for(cycle in 1:ncycles) { if(cycle > 1) Xprop <- rmpoispp(lambda=beta, lmax=bmax, ..., types=types) Xprop <- Xprop[order(coords(Xprop)$x)] Yprop <- split(Xprop) for(i in 1:ntypes) { Xi <- Yprop[[i]] ok <- TRUE for(j in (1:ntypes)[-i]) { if(!any(ok)) break; ok <- ok & !has.close(Xi, hradii[i,j], Y[[j]], sorted=TRUE, periodic=periodic) } Y[[i]] <- Xi[ok] } } Z <- do.call(superimpose, Y) return(Z) } ragsAreaInter <- function(beta, eta, r, ..., win=NULL, bmax=NULL, periodic=FALSE, ncycles=100) { check.1.real(eta) check.1.real(r) if(r == 0 || eta == 1) return(rpoispp(beta, win=win, lmax=bmax, ...)) if(eta < 1) stop("Alternating Gibbs algorithm requires eta >= 1", call.=FALSE) if(is.function(beta)) { beta <- as.im(beta, W=win, ...) } else if(is.numeric(beta)) { check.1.real(beta) stopifnot(beta >= 0) } else if(!is.im(beta)) { stop("beta should be a number, a pixel image, or a function(x,y)", call.=FALSE) } if(is.im(beta) && is.null(win)) win <- as.owin(beta) kappa <- beta * eta loggamma <- log(eta)/(pi * r^2) bmax <- if(is.null(bmax)) NULL else c(max(kappa), loggamma) B <- if(is.numeric(beta)) c(kappa, loggamma) else solist(kappa, as.im(loggamma, W=win)) H <- matrix(c(0,r,r,0), 2, 2) Y <- ragsMultiHard(B, H, types=1:2, bmax=bmax, periodic=periodic, ncycles=ncycles) X <- split(Y)[[1]] return(X) } spatstat/R/plot.im.R0000644000176200001440000007742313623714544014051 0ustar liggesusers# # plot.im.R # # $Revision: 1.137 $ $Date: 2020/02/21 08:45:54 $ # # Plotting code for pixel images # # plot.im # image.im # contour.im # ########################################################################### plot.im <- local({ ## auxiliary functions image.doit <- function(imagedata, ..., extrargs=graphicsPars("image"), W, workaround=FALSE) { aarg <- resolve.defaults(...) add <- resolve.1.default(list(add=FALSE), aarg) show.all <- resolve.1.default(list(show.all=!add), aarg) addcontour <- resolve.1.default(list(addcontour=FALSE), aarg) args.contour <- resolve.1.default(list(args.contour=list()), aarg) ## if(add && show.all) { ## set up the window space *with* the main title ## using the same code as plot.owin, for consistency do.call.matched(plot.owin, resolve.defaults(list(x=W, type="n"), aarg), extrargs=graphicsPars("owin")) } if(workaround && identical(aarg$useRaster, TRUE)) { #' workaround for bug 16035 #' detect reversed coordinates usr <- par('usr') xrev <- (diff(usr[1:2]) < 0) yrev <- (diff(usr[3:4]) < 0) if(xrev || yrev) { #' flip matrix of pixel values, because the device driver does not z <- imagedata$z d <- dim(z) # z is in the orientation expected for image.default if(xrev) z <- z[d[1]:1, , drop=FALSE] if(yrev) z <- z[ , d[2]:1, drop=FALSE] imagedata$z <- z } } extrargs <- setdiff(extrargs, c("claim.title.space", "box")) z <- do.call.matched(image.default, append(imagedata, aarg), extrargs=extrargs) if(addcontour) do.call(do.contour, resolve.defaults(imagedata, list(add=TRUE), args.contour, list(col=par('fg')), aarg, .StripNull=TRUE)) return(z) } do.contour <- function(x, y, z, ..., drawlabels=TRUE) { nx <- length(x) ny <- length(y) nz <- dim(z) if(nx > nz[1]) { if(nz[1] == 1) { z <- rbind(z, z) nz <- dim(z) drawlabels <- FALSE } else { x <- (x[-1] + x[-nx])/2 nx <- nx-1 } } if(ny > nz[2]) { if(nz[2] == 1) { z <- cbind(z, z) nz <- dim(z) drawlabels <- FALSE } else { y <- (y[-1] + y[-ny])/2 ny <- ny-1 } } do.call.matched(contour.default, list(x=x, y=y, z=z, ..., drawlabels=drawlabels)) } do.box.etc <- function(bb, add, argh) { do.call(box.etc, append(list(bb=bb, add=add), argh)) } box.etc <- function(bb, ..., add=FALSE, box=!add, axes=FALSE, ann=FALSE, xlab="", ylab="") { # axes for image xr <- bb$xrange yr <- bb$yrange if(box) rect(xr[1], yr[1], xr[2], yr[2]) if(axes) { px <- prettyinside(xr) py <- prettyinside(yr) do.call.plotfun(graphics::axis, resolve.defaults( list(side=1, at=px), list(...), list(pos=yr[1])), extrargs=graphicsPars("axis")) do.call.plotfun(graphics::axis, resolve.defaults( list(side=2, at=py), list(...), list(pos=xr[1])), extrargs=graphicsPars("axis")) } ## axis labels xlab, ylab if(ann) { dox <- any(nzchar(xlab)) doy <- any(nzchar(ylab)) line0 <- if(axes) 1 else 0 if(dox || doy) { mtargs <- resolve.defaults(list(...), list(line=line0)) if(dox) do.call.matched(mtext, append(list(text=xlab, side=1), mtargs)) if(doy) do.call.matched(mtext, append(list(text=ylab, side=2), mtargs)) } } } clamp <- function(x, v, tol=0.02 * diff(v)) { ok <- (x >= v[1] - tol) & (x <= v[2] + tol) x[ok] } cellbreaks <- function(x, dx) { nx <- length(x) seq(x[1] - dx/2, x[nx] + dx/2, length.out=nx+1) } log10orNA <- function(x) { y <- rep(NA_real_, length(x)) ok <- !is.na(x) & (x > 0) y[ok] <- log10(x[ok]) return(y) } Ticks <- function(usr, log=FALSE, nint=NULL, ..., clip=TRUE) { #' modification of grDevices::axisTicks #' constrains ticks to be inside the specified range if clip=TRUE #' accepts nint=NULL as if it were missing z <- if(is.null(nint)) axisTicks(usr=usr, log=log, ...) else axisTicks(usr=usr, log=log, nint=nint, ...) if(clip) { zlimits <- if(log) 10^usr else usr z <- z[inside.range(z, zlimits)] } return(z) } numericalRange <- function(x, zlim=NULL) { xr <- suppressWarnings(range(x, finite=TRUE)) if(!all(is.finite(xr))) warning("All pixel values are NA", call.=FALSE) if(!is.null(zlim)) xr <- suppressWarnings(range(xr, zlim, finite=TRUE)) if(!all(is.finite(xr))) { warning("Cannot determine range of values for colour map", call.=FALSE) xr <- c(0,0) } return(xr) } # main function PlotIm <- function(x, ..., main, add=FALSE, clipwin=NULL, col=NULL, valuesAreColours=NULL, log=FALSE, ncolours=256, gamma=1, ribbon=show.all, show.all=!add, ribside=c("right", "left", "bottom", "top"), ribsep=0.15, ribwid=0.05, ribn=1024, ribscale=1, ribargs=list(), riblab=NULL, colargs=list(), useRaster=NULL, workaround=FALSE, zap=1, do.plot=TRUE) { if(missing(main)) main <- short.deparse(substitute(x)) verifyclass(x, "im") if(x$type == "complex") { cl <- match.call() cl$x <- solist(Re=Re(x), Im=Im(x), Mod=Mod(x), Arg=Arg(x)) cl[[1]] <- as.name('plot') cl$main <- main out <- eval(cl, parent.frame()) return(invisible(out)) } ribside <- match.arg(ribside) col.given <- !is.null(col) dotargs <- list(...) stopifnot(is.list(ribargs)) user.ticks <- ribargs$at user.nint <- ribargs$nint if(!is.null(clipwin)) { x <- x[as.rectangle(clipwin)] if(!is.rectangle(clipwin)) x <- x[clipwin, drop=FALSE] } zlim <- dotargs$zlim x <- repair.image.xycoords(x) xtype <- x$type xbox <- as.rectangle(x) do.log <- identical(log, TRUE) if(do.log && !(x$type %in% c("real", "integer"))) stop(paste("Log transform is undefined for an image of type", sQuote(xtype))) # determine whether pixel values are to be treated as colours if(!is.null(valuesAreColours)) { # argument given - validate stopifnot(is.logical(valuesAreColours)) if(valuesAreColours) { ## pixel values must be factor or character if(!xtype %in% c("factor", "character")) { if(do.plot) warning(paste("Pixel values of type", sQuote(xtype), "are not interpretable as colours")) valuesAreColours <- FALSE } else if(col.given) { ## colour info provided: contradictory if(do.plot) warning(paste("Pixel values are taken to be colour values,", "because valuesAreColours=TRUE;", "the colour map (argument col) is ignored"), call.=FALSE) col <- NULL } if(do.log && do.plot) warning(paste("Pixel values are taken to be colour values,", "because valuesAreColours=TRUE;", "the argument log=TRUE is ignored"), call.=FALSE) } } else if(col.given) { # argument 'col' controls colours valuesAreColours <- FALSE } else if(spatstat.options("monochrome")) { valuesAreColours <- FALSE } else { ## default : determine whether pixel values are colours strings <- switch(xtype, character = { as.vector(x$v) }, factor = { levels(x) }, { NULL }) valuesAreColours <- is.character(strings) && !inherits(try(col2rgb(strings), silent=TRUE), "try-error") if(valuesAreColours && do.plot) splat("Interpreting pixel values as colours", "(valuesAreColours=TRUE)") } # if(valuesAreColours) { # colour-valued images are plotted using the code for factor images # with the colour map equal to the levels of the factor switch(xtype, factor = { col <- levels(x) }, character = { x <- eval.im(factor(x)) xtype <- "factor" col <- levels(x) }, { if(do.plot) warning(paste("Pixel values of type", sQuote(xtype), "are not interpretable as colours")) }) # colours not suitable for ribbon ribbon <- FALSE } # transform pixel values to log scale? if(do.log) { rx <- range(x, finite=TRUE) if(all(rx > 0)) { x <- eval.im(log10(x)) } else { if(do.plot && any(rx < 0)) warning(paste("Negative pixel values", "omitted from logarithmic colour map;", "range of values =", prange(rx)), call.=FALSE) if(do.plot && !all(rx < 0)) warning("Zero pixel values omitted from logarithmic colour map", call.=FALSE) x <- eval.im(log10orNA(x)) } xtype <- x$type Log <- log10 Exp <- function(x) { 10^x } } else { Log <- Exp <- function(x) { x } } imagebreaks <- NULL # ribbonvalues <- ribbonbreaks <- NULL ribbonvalues <- NULL ## NOW DETERMINE THE COLOUR MAP colfun <- colmap <- NULL if(valuesAreColours) { ## pixel values are colours; set of colours was determined earlier colmap <- colourmap(col=col, inputs=col) } else if(!col.given) { ## no colour information given: use default colfun <- spatstat.options("image.colfun") } else if(inherits(col, "colourmap")) { ## Bob's your uncle colmap <- col } else if(is.function(col)) { ## Some kind of function determining a colour map if(names(formals(col))[1] == "n") { ## function(n) -> colour values colfun <- col } else { ## colour map determined by a rule (e.g. 'beachcolours') colmap <- invokeColourmapRule(col, x, zlim=zlim, colargs=colargs) if(is.null(colmap)) stop("Unrecognised syntax for colour function") } } switch(xtype, real = { vrange <- numericalRange(x, zlim) if(!is.null(colmap)) { # explicit colour map s <- summary(colmap) if(s$discrete) stop("Discrete colour map is not applicable to real values") imagebreaks <- s$breaks vrange <- range(imagebreaks) col <- s$outputs } trivial <- (diff(vrange) <= zap * .Machine$double.eps) #' ribbonvalues: domain of colour map (pixel values) #' ribbonrange: (min, max) of pixel values in image #' nominalrange: range of values shown on ribbon #' nominalmarks: values shown on ribbon at tick marks #' ribbonticks: pixel values of tick marks #' ribbonlabels: text displayed at tick marks ribbonvalues <- if(trivial) vrange[1] else seq(from=vrange[1], to=vrange[2], length.out=ribn) ribbonrange <- vrange nominalrange <- Log(ribscale * Exp(ribbonrange)) nominalmarks <- if(trivial) nominalrange[1] else (user.ticks %orifnull% Ticks(nominalrange, log=do.log, nint=user.nint)) ribbonticks <- Log(nominalmarks/ribscale) ribbonlabels <- paste(nominalmarks) }, integer = { values <- as.vector(x$v) values <- values[!is.na(values)] uv <- unique(values) vrange <- numericalRange(uv, zlim) nvalues <- length(uv) trivial <- (nvalues < 2) if(!trivial){ nominalrange <- Log(ribscale * Exp(vrange)) if(!is.null(user.ticks)) { nominalmarks <- user.ticks } else { nominalmarks <- Ticks(nominalrange, log=do.log, nint = user.nint) nominalmarks <- nominalmarks[nominalmarks %% 1 == 0] } ribbonticks <- Log(nominalmarks/ribscale) ribbonlabels <- paste(nominalmarks) if(!do.log && isTRUE(all.equal(ribbonticks, vrange[1]:vrange[2]))) { # each possible pixel value will appear in ribbon ribbonvalues <- vrange[1]:vrange[2] imagebreaks <- c(ribbonvalues - 0.5, vrange[2] + 0.5) ribbonrange <- range(imagebreaks) ribbonticks <- ribbonvalues ribbonlabels <- paste(ribbonticks * ribscale) } else { # not all possible values will appear in ribbon ribn <- min(ribn, diff(vrange)+1) ribbonvalues <- seq(from=vrange[1], to=vrange[2], length.out=ribn) ribbonrange <- vrange } } if(!is.null(colmap)) { # explicit colour map s <- summary(colmap) imagebreaks <- if(!s$discrete) s$breaks else c(s$inputs[1] - 0.5, s$inputs + 0.5) col <- s$outputs } }, logical = { values <- as.integer(as.vector(x$v)) values <- values[!is.na(values)] uv <- unique(values) trivial <- (length(uv) < 2) vrange <- c(0,1) imagebreaks <- c(-0.5, 0.5, 1.5) ribbonvalues <- c(0,1) ribbonrange <- range(imagebreaks) # ribbonbreaks <- imagebreaks ribbonticks <- user.ticks %orifnull% ribbonvalues ribbonlabels <- c("FALSE", "TRUE") if(!is.null(colmap)) col <- colmap(c(FALSE,TRUE)) }, factor = { lev <- levels(x) nvalues <- length(lev) trivial <- (nvalues < 2) # ensure all factor levels plotted separately fac <- factor(lev, levels=lev) intlev <- as.integer(fac) imagebreaks <- c(intlev - 0.5, max(intlev) + 0.5) ribbonvalues <- intlev ribbonrange <- range(imagebreaks) # ribbonbreaks <- imagebreaks ribbonticks <- user.ticks %orifnull% ribbonvalues ribbonlabels <- paste(lev) vrange <- range(intlev) if(!is.null(colmap) && !valuesAreColours) col <- colmap(fac) }, character = { x <- eval.im(factor(x)) lev <- levels(x) nvalues <- length(lev) trivial <- (nvalues < 2) # ensure all factor levels plotted separately fac <- factor(lev, levels=lev) intlev <- as.integer(fac) imagebreaks <- c(intlev - 0.5, max(intlev) + 0.5) ribbonvalues <- intlev ribbonrange <- range(imagebreaks) # ribbonbreaks <- imagebreaks ribbonticks <- user.ticks %orifnull% ribbonvalues ribbonlabels <- paste(lev) vrange <- range(intlev) if(!is.null(colmap)) col <- colmap(fac) }, stop(paste("Do not know how to plot image of type", sQuote(xtype))) ) ## Compute colour values to be passed to image.default if(!is.null(colmap)) { ## Explicit colour map object colourinfo <- list(breaks=imagebreaks, col=col) } else if(!is.null(colfun)) { ## Function colfun(n) if(trivial) ncolours <- 1 colourinfo <- if(is.null(imagebreaks)) list(col=colfun(ncolours)) else list(breaks=imagebreaks, col=colfun(length(imagebreaks) - 1L)) } else if(col.given) { ## Colour values if(inherits(try(col2rgb(col), silent=TRUE), "try-error")) stop("Unable to interpret argument col as colour values") if(is.null(imagebreaks)) { colourinfo <- list(col=col) } else { nintervals <- length(imagebreaks) - 1 colourinfo <- list(breaks=imagebreaks, col=col) if(length(col) != nintervals) stop(paste("Length of argument", dQuote("col"), paren(paste(length(col))), "does not match the number of distinct values", paren(paste(nintervals)))) } } else stop("Internal error: unable to determine colour values") if(spatstat.options("monochrome")) { ## transform to grey scale colourinfo$col <- to.grey(colourinfo$col) } # colour map to be returned (invisibly) i.col <- colourinfo$col i.bks <- colourinfo$breaks output.colmap <- if(is.null(i.col)) NULL else if(inherits(i.col, "colourmap")) i.col else if(valuesAreColours) colourmap(col=i.col, inputs=i.col) else switch(xtype, integer=, real= { if(!is.null(i.bks)) { colourmap(col=i.col, breaks=i.bks) } else colourmap(col=i.col, range=vrange, gamma=gamma) }, logical={ colourmap(col=i.col, inputs=c(FALSE,TRUE)) }, character=, factor={ colourmap(col=i.col, inputs=lev) }, NULL) ## gamma correction soc <- summary(output.colmap) if(!is.null(gamma <- soc$gamma) && gamma != 1) colourinfo$breaks <- soc$breaks ## ........ decide whether to use rasterImage ......... ## get device capabilities ## (this will start a graphics device if none is active) rasterable <- dev.capabilities()$rasterImage if(is.null(rasterable)) rasterable <- "no" ## can.use.raster <- switch(rasterable, yes=TRUE, no=FALSE, "non-missing"=!anyNA(x$v), FALSE) if(is.null(useRaster)) { useRaster <- can.use.raster } else if(useRaster && !can.use.raster) { whinge <- "useRaster=TRUE is not supported by the graphics device" if(rasterable == "non-missing") whinge <- paste(whinge, "for images with NA values") warning(whinge, call.=FALSE) } ## ........ start plotting ................. if(!identical(ribbon, TRUE) || trivial) { ## no ribbon wanted attr(output.colmap, "bbox") <- as.rectangle(x) if(!do.plot) return(output.colmap) ## plot image without ribbon image.doit(imagedata=list(x=cellbreaks(x$xcol, x$xstep), y=cellbreaks(x$yrow, x$ystep), z=t(x$v)), W=xbox, workaround=workaround, ## list(axes=FALSE, xlab="",ylab=""), dotargs, list(useRaster=useRaster, add=add, show.all=show.all), colourinfo, list(zlim=vrange), list(asp = 1, main = main)) ## if(add && show.all) ## fakemaintitle(x, main, dotargs) do.box.etc(Frame(x), add, dotargs) return(invisible(output.colmap)) } # determine plot region bb <- owin(x$xrange, x$yrange) Width <- diff(bb$xrange) Height <- diff(bb$yrange) Size <- max(Width, Height) switch(ribside, right={ # ribbon to right of image bb.rib <- owin(bb$xrange[2] + c(ribsep, ribsep+ribwid) * Size, bb$yrange) rib.iside <- 4 }, left={ # ribbon to left of image bb.rib <- owin(bb$xrange[1] - c(ribsep+ribwid, ribsep) * Size, bb$yrange) rib.iside <- 2 }, top={ # ribbon above image bb.rib <- owin(bb$xrange, bb$yrange[2] + c(ribsep, ribsep+ribwid) * Size) rib.iside <- 3 }, bottom={ # ribbon below image bb.rib <- owin(bb$xrange, bb$yrange[1] - c(ribsep+ribwid, ribsep) * Size) rib.iside <- 1 }) bb.all <- boundingbox(bb.rib, bb) attr(output.colmap, "bbox") <- bb.all attr(output.colmap, "bbox.legend") <- bb.rib attr(output.colmap, "side.legend") <- rib.iside if(!do.plot) return(output.colmap) pt <- prepareTitle(main) if(!add) { ## establish coordinate system do.call.plotfun(plot.owin, resolve.defaults(list(x=bb.all, type="n", main=pt$blank), dotargs), extrargs=graphicsPars("owin")) } if(show.all) { ## plot title centred over main image area 'bb' do.call.plotfun(plot.owin, resolve.defaults(list(x=bb, type="n", main=main, add=TRUE, show.all=TRUE), dotargs), extrargs=graphicsPars("owin")) main <- "" } # plot image image.doit(imagedata=list(x=cellbreaks(x$xcol, x$xstep), y=cellbreaks(x$yrow, x$ystep), z=t(x$v)), W=xbox, workaround=workaround, list(add=TRUE, show.all=show.all), list(axes=FALSE, xlab="", ylab=""), dotargs, list(useRaster=useRaster), colourinfo, list(zlim=vrange), list(asp = 1, main = main)) ## if(add && show.all) ## fakemaintitle(bb.all, main, ...) # box or axes for image do.box.etc(bb, add, dotargs) # plot ribbon image containing the range of image values rib.npixel <- length(ribbonvalues) + 1 switch(ribside, left=, right={ # vertical ribbon rib.xcoords <- bb.rib$xrange rib.ycoords <- seq(from=bb.rib$yrange[1], to=bb.rib$yrange[2], length.out=rib.npixel) rib.z <- matrix(ribbonvalues, ncol=1) rib.useRaster <- useRaster }, top=, bottom={ # horizontal ribbon rib.ycoords <- bb.rib$yrange rib.xcoords <- seq(from=bb.rib$xrange[1], to=bb.rib$xrange[2], length.out=rib.npixel) rib.z <- matrix(ribbonvalues, nrow=1) # bug workaround rib.useRaster <- FALSE }) image.doit(imagedata=list(x=rib.xcoords, y=rib.ycoords, z=t(rib.z)), W=bb.rib, workaround=workaround, list(add=TRUE, show.all=show.all), ribargs, list(useRaster=rib.useRaster), list(main="", sub="", xlab="", ylab=""), dotargs, colourinfo) # box around ribbon? resol <- resolve.defaults(ribargs, dotargs) if(!identical(resol$box, FALSE)) plot(as.owin(bb.rib), add=TRUE) # scale axis for ribbon image ribaxis <- !(identical(resol$axes, FALSE) || identical(resol$ann, FALSE)) if(ribaxis) { ribaxis.iside <- rib.iside ## check for user-supplied xlim, ylim with reverse order ll <- resolve.defaults(ribargs, dotargs, list(xlim=NULL, ylim=NULL)) xlimflip <- is.numeric(ll$xlim) && (diff(ll$xlim) < 0) ylimflip <- is.numeric(ll$ylim) && (diff(ll$ylim) < 0) if(xlimflip) ribaxis.iside <- c(1, 4, 3, 2)[ribaxis.iside] if(ylimflip) ribaxis.iside <- c(3, 2, 1, 4)[ribaxis.iside] ## axisargs <- list(side=ribaxis.iside, labels=ribbonlabels) switch(ribside, right={ scal <- diff(bb.rib$yrange)/diff(ribbonrange) at <- bb.rib$yrange[1] + scal * (ribbonticks - ribbonrange[1]) axisargs <- append(axisargs, list(at=at)) posargs <- list(pos=bb.rib$xrange[2], yaxp=c(bb.rib$yrange, length(ribbonticks))) }, left={ scal <- diff(bb.rib$yrange)/diff(ribbonrange) at <- bb.rib$yrange[1] + scal * (ribbonticks - ribbonrange[1]) axisargs <- append(axisargs, list(at=at)) posargs <- list(pos=bb.rib$xrange[1], yaxp=c(bb.rib$yrange, length(ribbonticks))) }, top={ scal <- diff(bb.rib$xrange)/diff(ribbonrange) at <- bb.rib$xrange[1] + scal * (ribbonticks - ribbonrange[1]) axisargs <- append(axisargs, list(at=at)) posargs <- list(pos=bb.rib$yrange[2], xaxp=c(bb.rib$xrange, length(ribbonticks))) }, bottom={ scal <- diff(bb.rib$xrange)/diff(ribbonrange) at <- bb.rib$xrange[1] + scal * (ribbonticks - ribbonrange[1]) axisargs <- append(axisargs, list(at=at)) posargs <- list(pos=bb.rib$yrange[1], xaxp=c(bb.rib$xrange, length(ribbonticks))) }) do.call.plotfun(graphics::axis, resolve.defaults(axisargs, ribargs, dotargs, posargs), extrargs=graphicsPars("axis")) } if(!is.null(riblab)) { riblabel <- if(is.list(riblab)) riblab else list(text=riblab) riblabel$side <- rib.iside do.call(mtext, riblabel) } # return(invisible(output.colmap)) } PlotIm }) invokeColourmapRule <- function(colfun, x, ..., zlim=NULL, colargs=list()) { ## utility for handling special functions that generate colour maps ## either ## function(... range) -> colourmap ## function(... inputs) -> colourmap stopifnot(is.im(x)) stopifnot(is.function(colfun)) colargnames <- names(formals(colfun)) ## Convert it to a 'colourmap' colmap <- NULL xtype <- x$type if(xtype %in% c("real", "integer") && "range" %in% colargnames) { ## function(range) -> colourmap vrange <- range(range(x, finite=TRUE), zlim) cvals <- try(do.call.matched(colfun, append(list(range=vrange), colargs)), silent=TRUE) if(!inherits(cvals, "try-error")) { colmap <- if(inherits(cvals, "colourmap")) cvals else if(is.character(cvals)) colourmap(cvals, range=vrange) else NULL } } else if(xtype != "real" && "inputs" %in% colargnames) { ## function(inputs) -> colourmap vpossible <- switch(xtype, logical = c(FALSE, TRUE), factor = levels(x), unique(as.matrix(x))) if(!is.null(vpossible) && length(vpossible) < 256) { cvals <- try(do.call.matched(colfun, append(list(inputs=vpossible), colargs)), silent=TRUE) if(!inherits(cvals, "try-error")) { colmap <- if(inherits(cvals, "colourmap")) cvals else if(is.character(cvals)) colourmap(cvals, inputs=vpossible) else NULL } } } return(colmap) } ######################################################################## image.im <- plot.im ###################################################################### contour.im <- function (x, ..., main, axes=FALSE, add=FALSE, col=par("fg"), clipwin=NULL, show.all=!add, do.plot=TRUE) { defaultmain <- deparse(substitute(x)) dotargs <- list(...) bb <- Frame(x) ## return value result <- bb attr(result, "bbox") <- bb if(!do.plot) return(result) ## main title sop <- spatstat.options("par.contour") if(missing(main)) main <- resolve.1.default(list(main=defaultmain), sop) pt <- prepareTitle(main) ## plotting parameters if(missing(add)) { force(add) ## use default in formal arguments, unless overridden add <- resolve.1.default(list(add=add), sop) } if(missing(axes)) { force(axes) axes <- resolve.1.default(list(axes=axes), sop) } axes <- axes && !add col0 <- if(inherits(col, "colourmap")) par("fg") else col ## clip to subset if(!is.null(clipwin)) x <- x[clipwin, drop=FALSE] #' start plotting if(!add) { ## new plot - establish coordinate system if(axes && show.all) { #' standard plot initialisation in base graphics do.call.plotfun(plot.default, resolve.defaults( list(x = range(x$xcol), y = range(x$yrow), type = "n"), list(...), list(asp = 1, xlab = "x", ylab = "y", col = col0, main = main))) } else { #' plot invisible bounding box do.call.plotfun(plot.owin, resolve.defaults(list(x=bb, type="n", main=pt$blank), dotargs), extrargs=graphicsPars("owin")) } } if(show.all && !axes) { ## plot title centred over contour region do.call.plotfun(plot.owin, resolve.defaults(list(x=bb, main=main, add=TRUE, show.all=TRUE), dotargs, list(col.main=col0)), extrargs=graphicsPars("owin")) } #' plot contour lines if(!inherits(col, "colourmap")) { do.call.plotfun(contour.default, resolve.defaults(list(x=x$xcol, y=x$yrow, z=t(x$v)), list(add=TRUE, col=col), list(...))) } else { clin <- do.call.matched(contourLines, append(list(x=x$xcol, y=x$yrow, z=t(x$v)), list(...))) linpar <- graphicsPars("lines") for(i in seq_along(clin)) { lini <- clin[[i]] levi <- lini$level coli <- col(levi) argi <- resolve.defaults(lini[c("x", "y")], list(...), list(col=coli)) do.call.matched(lines.default, argi, extrargs=linpar) } } return(invisible(result)) } spatstat/R/bw.diggle.R0000644000176200001440000000527413544333563014324 0ustar liggesusers## ## bw.diggle.R ## ## bandwidth selection rule bw.diggle (for density.ppp) ## ## $Revision: 1.7 $ $Date: 2019/09/30 07:45:33 $ ## bw.diggle <- local({ #' integrand phi <- function(x,h) { if(h <= 0) return(numeric(length(x))) y <- pmax.int(0, pmin.int(1, x/(2 * h))) 4 * pi * h^2 * (acos(y) - y * sqrt(1 - y^2)) } #' secret option for debugging mf <- function(..., method=c("C", "interpreted")) { match.arg(method) } bw.diggle <- function(X, ..., correction="good", hmax=NULL, nr=512, warn=TRUE) { stopifnot(is.ppp(X)) method <- mf(...) W <- Window(X) lambda <- npoints(X)/area(W) rmax <- if(!is.null(hmax)) (4 * hmax) else rmax.rule("K", W, lambda) r <- seq(0, rmax, length=nr) K <- Kest(X, r=r, correction=correction) yname <- fvnames(K, ".y") K <- K[, c("r", yname)] ## check that K values can be passed to C code if(any(bad <- !is.finite(K[[yname]]))) { ## throw out bad values lastgood <- min(which(bad)) - 1L if(lastgood < 2L) stop("K function yields too many NA/NaN values") K <- K[1:lastgood, ] } rvals <- K$r ## evaluation of M(r) requires K(2r) rmax2 <- max(rvals)/2 if(!is.null(alim <- attr(K, "alim"))) rmax2 <- min(alim[2L], rmax2) ok <- (rvals <= rmax2) switch(method, interpreted = { rvals <- rvals[ok] nr <- length(rvals) J <- numeric(nr) for(i in 1:nr) J[i] <- stieltjes(phi, K, h=rvals[i])[[yname]]/(2 * pi) }, C = { nr <- length(rvals) nrmax <- sum(ok) dK <- diff(K[[yname]]) ndK <- length(dK) z <- .C("digberJ", r=as.double(rvals), dK=as.double(dK), nr=as.integer(nr), nrmax=as.integer(nrmax), ndK=as.integer(ndK), J=as.double(numeric(nrmax)), PACKAGE = "spatstat") J <- z$J rvals <- rvals[ok] }) pir2 <- pi * rvals^2 M <- (1/lambda - 2 * K[[yname]][ok])/pir2 + J/pir2^2 ## This calculation was for the uniform kernel on B(0,h) ## Convert to standard deviation of (one-dimensional marginal) kernel sigma <- rvals/2 result <- bw.optim(M, sigma, creator="bw.diggle", criterion="Berman-Diggle Cross-Validation", J=J, lambda=lambda, warnextreme=warn, hargnames="hmax", unitname=unitname(X)) return(result) } bw.diggle }) spatstat/R/envelopeArray.R0000644000176200001440000000536413333543255015273 0ustar liggesusers# # envelopeArray.R # # $Revision: 1.1 $ $Date: 2017/06/05 10:31:58 $ # # envelopeArray <- function(X, fun, ..., dataname=NULL,verb=FALSE,reuse=TRUE) { #' if(is.null(dataname)) dataname <- short.deparse(substitute(X)) #' determine function name f.is.name <- is.name(substitute(fun)) fname <- if(f.is.name) paste(as.name(substitute(fun))) else if(is.character(fun)) fun else sQuote("fun") #' determine function to be called if(is.character(fun)) { fun <- get(fun, mode="function") } else if(!is.function(fun)) stop(paste(sQuote("fun"), "should be a function or a character string")) #' Apply function to data pattern, to test it #' and to determine array dimensions, margin labels etc. fX <- do.call.matched(fun, append(list(X), list(...)), matchfirst=TRUE) if(!inherits(fX, "fasp")) stop("function did not return an object of class 'fasp'") d <- dim(fX) witch <- matrix(1:prod(d), nrow=d[1L], ncol=d[2L], dimnames=dimnames(fX)) #' make function that extracts [i,j] entry of result ijfun <- function(X, ..., i=1, j=1, expectdim=d) { fX <- fun(X, ...) if(!inherits(fX, "fasp")) stop("function did not return an object of class 'fasp'") if(!all(dim(fX) == expectdim)) stop("function returned an array with different dimensions") return(fX[i,j]) } # ------------ start computing ------------------------------- if(reuse) { L <- do.call(spatstat::envelope, resolve.defaults( list(X, fun=ijfun), list(internal=list(eject="patterns")), list(...), list(verbose=verb))) intern <- attr(L, "internal") } else intern <- L <- NULL # compute function array and build up 'fasp' object fns <- list() k <- 0 for(i in 1:nrow(witch)) { for(j in 1:ncol(witch)) { if(verb) cat("i =",i,"j =",j,"\n") currentfv <- do.call(spatstat::envelope, resolve.defaults( list(X, ijfun), list(simulate=L, internal=intern), list(verbose=FALSE), list(...), list(Yname=dataname), list(i=i, j=j))) k <- k+1 fns[[k]] <- as.fv(currentfv) } } # wrap up into 'fasp' object title <- paste("array of envelopes of", fname, "for", dataname) rslt <- fasp(fns, which=witch, formulae=NULL, dataname=dataname, title=title, checkfv=FALSE) return(rslt) } spatstat/R/Kres.R0000644000176200001440000000526713333543254013364 0ustar liggesusers# # Kres.R # # Residual K # # $Revision: 1.3 $ $Date: 2013/04/25 06:37:43 $ # ############################################################################# # Kres <- function(object, ...) { if(!is.fv(object)) { # usual case where 'object' is a ppm, ppp or quad K <- Kcom(object, ...) } else { # case where 'object' is the output of 'Kcom' a <- attr(object, "maker") if(is.null(a) || a != "Kcom") stop("fv object was not created by Kcom") K <- object if(length(list(...)) > 0) warning("Extra arguments ignored") } # initialise fv object df <- data.frame(r=K$r, theo=numeric(length(K$r))) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~hat(K)(r), NULL), "theo", . ~ r , attr(K, "alim"), c("r","bold(R)~%s[theo](r)"), desc, fname="K") # add residual functions nam <- names(K) if("border" %in% nam) ans <- bind.fv(ans, data.frame(bres=with(K, border-bcom)), "bold(R)~hat(%s)[bord](r)", "residual function %s based on border correction", "bres") if(all(c("trans","tcom") %in% nam)) ans <- bind.fv(ans, data.frame(tres=with(K, trans-tcom)), "bold(R)~hat(%s)[trans](r)", "residual function %s based on translation correction", "tres") if(all(c("iso","icom") %in% nam)) ans <- bind.fv(ans, data.frame(ires=with(K, iso-icom)), "bold(R)~hat(%s)[iso](r)", "residual function %s based on isotropic correction", "ires") if("ivar" %in% nam) { savedotnames <- fvnames(ans, ".") ans <- bind.fv(ans, as.data.frame(K)[, c("ivar", "isd", "ihi", "ilo")], c("bold(C)^2~hat(%s)[iso](r)", "sqrt(bold(C)^2~hat(%s)[iso](r))", "bold(R)~hat(%s)[Hi](r)", "bold(R)~hat(%s)[Lo](r)"), c("pseudovariance of isotropic-corrected residual %s", "pseudo-SD of isotropic-corrected residual %s", "upper critical band for isotropic-corrected residual %s", "lower critical band for isotropic-corrected residual %s"), "ires") ans <- bind.fv(ans, data.frame(istdres=with(ans, ires/isd)), "bold(T)~hat(%s)[iso](r)", "standardised isotropic-corrected residual %s", "ires") fvnames(ans, ".") <- c(savedotnames, c("ihi", "ilo")) } unitname(ans) <- unitname(K) return(ans) } spatstat/R/rshift.psp.R0000644000176200001440000000257413564441573014565 0ustar liggesusers# # rshift.psp.R # # $Revision: 1.7 $ $Date: 2019/11/18 06:22:50 $ # rshift.psp <- function(X, ..., group=NULL, which=NULL) { verifyclass(X, "psp") # process arguments W <- rescue.rectangle(X$window) arglist <- handle.rshift.args(W, ..., edgedefault="erode") radius <- arglist$radius width <- arglist$width height <- arglist$height edge <- arglist$edge clip <- arglist$clip if(W$type != "rectangle") stop("Not yet implemented for non-rectangular windows") if(edge != "erode") stop(paste("Only implemented for edge=", dQuote("erode"))) # split into groups if(is.null(group)) Y <- list(X) else { stopifnot(is.factor(group)) stopifnot(length(group) == X$n) Y <- lapply(levels(group), function(l, Z, group) {Z[group == l]}, Z=X, group=group) } ############ loop ################ result <- NULL for(i in seq_along(Y)) { Z <- Y[[i]] # generate random translation vector if(!is.null(radius)) jump <- runifdisc(1, radius=radius) else { jump <- list(x=runif(1, min=0, max=width), y=runif(1, min=0, max=height)) } # translate segments Zsh <- shift(Z, c(jump$x, jump$y)) Zsh$window <- W # append to result result <- append.psp(result, Zsh) } # clip if(!is.null(clip)) result <- result[clip] return(result) } spatstat/R/FGmultiInhom.R0000644000176200001440000001721413333543254015015 0ustar liggesusers#' #' FGmultiInhom.R #' #' inhomogeneous multitype G and F functions #' #' Original code by Ottmar Cronie and Marie-Colette van Lieshout #' #' Rewritten for spatstat by Adrian Baddeley #' #' GmultiInhom #' FmultiInhom #' #' $Revision: 1.6 $ $Date: 2017/06/05 10:31:58 $ GmultiInhom <- function(X, I, J, lambda=NULL, lambdaI=NULL, lambdaJ=NULL, lambdamin=NULL, ..., r=NULL, ReferenceMeasureMarkSetI=NULL, ratio=FALSE){ if(!is.ppp(X) || !is.marked(X)) stop("X should be a marked point pattern") W <- Window(X) nX <- npoints(X) #' handle r argument rmax <- rmax.rule("G", W, intensity(X)) bks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmax) r <- bks$r rmax <- bks$max nr <- length(r) #' Accept any kind of index for I; convert it to a logical index I <- ppsubset(X, I) if(is.null(I)) stop("I must be a valid subset index") XI <- X[I] nI <- sum(I) if (nI == 0) stop("No points satisfy condition I") if(!is.null(ReferenceMeasureMarkSetI)) { check.1.real(ReferenceMeasureMarkSetI) stopifnot(ReferenceMeasureMarkSetI >= 0) } #' likewise for J if(missing(J) || is.null(J)) { J <- rep(TRUE, nX) } else { J <- ppsubset(X, J) } XJ <- X[J] nJ <- sum(J) if (nJ == 0) stop("No points satisfy condition J") #' supply either lambda, or lambdaI and lambdaJ lam.given <- !is.null(lambda) lamIJ.given <- !is.null(lambdaI) || !is.null(lambdaJ) if(lam.given == lamIJ.given || is.null(lambdaI) != is.null(lambdaJ)) stop(paste("Supply either a vector lambda of length equal to npoints(X),", "or two vectors lambdaI, lambdaJ of lengths", "equal to npoints(X[I]) and npoints(X[J]) respectively"), call.=FALSE) if(lamIJ.given) { #' lambdaI and lambdaJ given check.nvector(lambdaI, nI, things="points of X[I]") stopifnot(all(lambdaI > 0)) check.nvector(lambdaJ, nJ, things="points of X[J]") stopifnot(all(lambdaJ > 0)) if(is.null(lambdamin)){ stop(paste("Supply lambdamin - a single positive number which is", "smaller than the values in lambdaJ"), call.=FALSE) } check.1.real(lambdamin) stopifnot(lambdamin > 0) stopifnot(lambdamin <= min(lambdaJ)) } else { #' lambda given check.nvector(lambda, nX, things="points of X") stopifnot(all(lambda > 0)) lambdaI <- lambda[I] lambdaJ <- lambda[J] if(is.null(lambdamin)){ stop(paste("Supply lambdamin - a single positive number which is", "smaller than the values in lambda"), call.=FALSE) } check.1.real(lambdamin) stopifnot(lambdamin > 0) stopifnot(lambdamin <= min(lambda)) } #' Calculate 1/lambda(x_i,y_i,m_i)) #' for all (x_i,y_i,m_i) with m_i in I invlambdaI <- 1/lambdaI #' Calculate (1 - lambda_min/lambda(x_i,y_i,m_i)) #' for all (x_i,y_i,m_i) with m_i in J Coeff <- 1-(lambdamin/lambdaJ) ## CoeffMatrix <- matrix(rep(Coeff,times=nI), nrow=nI, byrow=TRUE) #' distances ## DistanceXItoXJ <- crossdist(XI,XJ) #' eroded areas and boundary distances areaWr <- eroded.areas(W, r) bdistXI <- bdist.points(XI) #' for each point x in XI, determine largest r such that x \in W-r ibI <- fastFindInterval(bdistXI, r, labels=TRUE) #' count of points inside W-r for each r ## NumberEroded <- revcumsum(table(ibI)) #' denominator #' sum invlambdaI for all points x \in W-r DenominatorN <- c(sum(invlambdaI), revcumsum(natozero(tapply(invlambdaI, ibI, sum)))) if(!is.null(ReferenceMeasureMarkSetI)) DenominatorA <- areaWr * ReferenceMeasureMarkSetI #' local products of weights #' sort data points in order of increasing x coordinate xxI <- XI$x yyI <- XI$y oXI <- fave.order(xxI) xIord <- xxI[oXI] yIord <- yyI[oXI] #' xxJ <- XJ$x yyJ <- XJ$y vvJ <- Coeff oXJ <- fave.order(xxJ) xJord <- xxJ[oXJ] yJord <- yyJ[oXJ] vJord <- vvJ[oXJ] # compute local cumulative products z <- .C("locxprod", ntest = as.integer(nI), xtest = as.double(xIord), ytest = as.double(yIord), ndata = as.integer(nJ), xdata = as.double(xJord), ydata = as.double(yJord), vdata = as.double(vJord), nr = as.integer(nr), rmax = as.double(rmax), ans = as.double(numeric(nI * nr)), PACKAGE = "spatstat") ans <- matrix(z$ans, nrow=nr, ncol=nI) #' revert to original ordering loccumprod <- matrix(, nrow=nr, ncol=nI) loccumprod[, oXI] <- ans #' border correction outside <- outer(r, bdistXI, ">") loccumprod[outside] <- 0 #' weight by 1/lambdaI wlcp <- loccumprod * matrix(invlambdaI, byrow=TRUE, nr, nI) #' sum over I for each fixed r numer <- .rowSums(wlcp, nr, nI) # pack up Gdf <- data.frame(r=r, theo = 1 - exp(- lambdamin * pi * r^2)) desc <- c("distance argument r", "theoretical Poisson %s") theo.denom <- rep.int(nI, nr) fname <- c("G", "list(inhom,I,J)") G <- ratfv(Gdf, NULL, theo.denom, "r", quote(G[inhom, I, J](r)), "theo", NULL, c(0,rmax), c("r", makefvlabel(NULL, NULL, fname, "pois")), desc, fname=fname, yexp=quote(G[list(inhom,I,J)](r)), ratio=ratio) # add border corrected (Hamilton Principle) estimate G <- bind.ratfv(G, data.frame(bord=DenominatorN-numer), DenominatorN, makefvlabel(NULL, "hat", fname, "bord"), "border estimate of %s", "bord", ratio=ratio) fvnames(G, ".") <- c("bord", "theo") # add modified border corrected (non-Hamilton-Principle) estimate if(!is.null(ReferenceMeasureMarkSetI)) { G <- bind.ratfv(G, data.frame(bordm=DenominatorA-numer), DenominatorA, makefvlabel(NULL, "hat", fname, "bordm"), "modified border estimate of %s", "bordm", ratio=ratio) fvnames(G, ".") <- c("bord", "bordm", "theo") } # formula(G) <- . ~ r unitname(G) <- unitname(X) if(ratio) G <- conform.ratfv(G) return(G) } #' marked inhomogeneous F FmultiInhom <- function(X, J, lambda=NULL,lambdaJ=NULL, lambdamin=NULL, ..., r=NULL) { if(!is.ppp(X) || !is.marked(X)) stop("X should be a marked point pattern") nX <- npoints(X) #' Accept any kind of index for J; convert it to a logical index J <- ppsubset(X, J) if(is.null(J)) stop("J must be a valid subset index") XJ <- X[J] nJ <- sum(J) if (nJ == 0) stop("No points satisfy condition J") if(is.null(lambda) == is.null(lambdaJ)) stop(paste("Supply either a vector lambda of length equal to npoints(X),", "or a vector lambdaJ of length equal to npoints(X[J])"), call.=FALSE) if(is.null(lambdamin)) stop("Supply a value for lambdamin", call.=FALSE) check.1.real(lambdamin) if(!is.null(lambda)) { check.nvector(lambda, nX) stopifnot(all(lambda > 0)) stopifnot(lambdamin <= min(lambda[J])) lambdaJ <- lambda[J] } else { check.nvector(lambdaJ, nJ) stopifnot(all(lambdaJ > 0)) stopifnot(lambdamin <= min(lambdaJ)) } FJ <- Finhom(XJ, lambda=lambdaJ, lmin=lambdamin, r=r) FJ <- rebadge.fv(FJ, new.ylab = quote(F[inhom, J](r)), new.fname = c("F", "list(inhom,J)"), new.yexp = quote(F[list(inhom,J)](r))) return(FJ) } spatstat/R/Kmulti.inhom.R0000644000176200001440000004064513551505525015036 0ustar liggesusers# # Kmulti.inhom.S # # $Revision: 1.52 $ $Date: 2019/05/24 10:21:38 $ # # # ------------------------------------------------------------------------ Lcross.inhom <- function(X, i, j, ..., correction) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(i)) i <- levels(marks(X))[1] if(missing(j)) j <- levels(marks(X))[2] if(missing(correction)) correction <- NULL K <- Kcross.inhom(X, i, j, ..., correction=correction) L <- eval.fv(sqrt(pmax.int(K,0)/pi)) iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) # relabel the fv object L <- rebadge.fv(L, substitute(L[inhom,i,j](r), list(i=iname,j=jname)), c("L", paste0("list", paren(paste("inhom", i, j, sep=",")))), new.yexp=substitute(L[list(inhom,i,j)](r), list(i=iname,j=jname))) attr(L, "labl") <- attr(K, "labl") attr(L, "dangerous") <- attr(K, "dangerous") return(L) } Ldot.inhom <- function(X, i, ..., correction) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(i)) i <- levels(marks(X))[1] if(missing(correction)) correction <- NULL K <- Kdot.inhom(X, i, ..., correction=correction) L <- eval.fv(sqrt(pmax.int(K,0)/pi)) # relabel the fv object iname <- make.parseable(paste(i)) L <- rebadge.fv(L, substitute(L[inhom, i ~ dot](r), list(i=iname)), c("L", paste0("list(inhom,", iname, "~symbol(\"\\267\"))")), new.yexp=substitute(L[list(inhom, i ~ symbol("\267"))](r), list(i=iname))) attr(L, "labl") <- attr(K, "labl") attr(L, "dangerous") <- attr(K, "dangerous") return(L) } "Kcross.inhom" <- function(X, i, j, lambdaI=NULL, lambdaJ=NULL, ..., r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL, lambdaIJ=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(correction)) correction <- NULL miss.update <- missing(update) miss.leave <- missing(leaveoneout) marx <- marks(X) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] I <- (marx == i) J <- (marx == j) Iname <- paste("points with mark i =", i) Jname <- paste("points with mark j =", j) K <- Kmulti.inhom(X, I, J, lambdaI, lambdaJ, ..., r=r,breaks=breaks,correction=correction, sigma=sigma, varcov=varcov, lambdaIJ=lambdaIJ, Iname=Iname, Jname=Jname, lambdaX=lambdaX, update=update, leaveoneout=leaveoneout, miss.update=miss.update, miss.leave=miss.leave) iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(K, substitute(K[inhom,i,j](r), list(i=iname,j=jname)), c("K", paste0("list", paren(paste("inhom", i, j, sep=",")))), new.yexp=substitute(K[list(inhom,i,j)](r), list(i=iname,j=jname))) attr(result, "dangerous") <- attr(K, "dangerous") return(result) } "Kdot.inhom" <- function(X, i, lambdaI=NULL, lambdadot=NULL, ..., r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL, lambdaIdot=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(correction)) correction <- NULL miss.update <- missing(update) miss.leave <- missing(leaveoneout) marx <- marks(X) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) J <- rep.int(TRUE, X$n) # i.e. all points Iname <- paste("points with mark i =", i) Jname <- paste("points") K <- Kmulti.inhom(X, I, J, lambdaI, lambdadot, ..., r=r,breaks=breaks,correction=correction, sigma=sigma, varcov=varcov, lambdaIJ=lambdaIdot, Iname=Iname, Jname=Jname, lambdaX=lambdaX, update=update, leaveoneout=leaveoneout, miss.update=miss.update, miss.leave=miss.leave) iname <- make.parseable(paste(i)) result <- rebadge.fv(K, substitute(K[inhom, i ~ dot](r), list(i=iname)), c("K", paste0("list(inhom,", iname, "~symbol(\"\\267\"))")), new.yexp=substitute(K[list(inhom, i ~ symbol("\267"))](r), list(i=iname))) if(!is.null(dang <- attr(K, "dangerous"))) { dang[dang == "lambdaJ"] <- "lambdadot" dang[dang == "lambdaIJ"] <- "lambdaIdot" attr(result, "dangerous") <- dang } return(result) } "Kmulti.inhom"<- function(X, I, J, lambdaI=NULL, lambdaJ=NULL, ..., r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), lambdaIJ=NULL, sigma=NULL, varcov=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) { verifyclass(X, "ppp") dflt <- list(Iname="points satisfying condition I", Jname="points satisfying condition J", miss.update=missing(update), miss.leave=missing(leaveoneout)) extrargs <- resolve.defaults(list(...), dflt) if(length(extrargs) > length(dflt)) warning("Additional arguments unrecognised") Iname <- extrargs$Iname Jname <- extrargs$Jname miss.update <- extrargs$miss.update miss.leave <- extrargs$miss.leave npts <- npoints(X) W <- as.owin(X) areaW <- area(W) # validate edge correction correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) # validate I, J I <- ppsubset(X, I) J <- ppsubset(X, J) if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") XI <- X[I] XJ <- X[J] nI <- sum(I) nJ <- sum(J) if(nI == 0) stop(paste("There are no", Iname)) if(nJ == 0) stop(paste("There are no", Jname)) # r values rmaxdefault <- rmax.rule("K", W, nJ/areaW) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max dangerous <- c("lambdaI", "lambdaJ") dangerI <- dangerJ <- TRUE ## intensity data if(!is.null(lambdaX)) { ## Intensity values for all points of X if(!is.null(lambdaI)) warning("lambdaI was ignored, because lambdaX was given", call.=FALSE) if(!is.null(lambdaJ)) warning("lambdaJ was ignored, because lambdaX was given", call.=FALSE) if(is.im(lambdaX)) { ## Look up intensity values lambdaI <- safelookup(lambdaX, X[I]) lambdaJ <- safelookup(lambdaX, X[J]) } else if(is.function(lambdaX)) { ## evaluate function at locations lambdaI <- lambdaX(XI$x, XI$y) lambdaJ <- lambdaX(XJ$x, XJ$y) } else if(is.numeric(lambdaX) && is.vector(as.numeric(lambdaX))) { ## vector of intensity values if(length(lambdaX) != npts) stop(paste("The length of", sQuote("lambdaX"), "should equal the number of points of X")) lambdaI <- lambdaX[I] lambdaJ <- lambdaX[J] } else if(is.ppm(lambdaX) || is.kppm(lambdaX) || is.dppm(lambdaX)) { ## point process model provides intensity model <- lambdaX if(!update) { ## just use intensity of fitted model lambdaI <- predict(model, locations=XI, type="trend") lambdaJ <- predict(model, locations=XJ, type="trend") } else { ## re-fit model to data X if(is.ppm(model)) { model <- update(model, Q=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else { model <- update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } lambdaI <- lambdaX[I] lambdaJ <- lambdaX[J] dangerI <- dangerJ <- FALSE dangerous <- "lambdaIJ" if(miss.update) warn.once(key="Kmulti.inhom.update", "The behaviour of Kmulti.inhom when lambda is a ppm object", "has changed (in spatstat 1.45-3 and later).", "See help(Kmulti.inhom)") } } else stop(paste("Argument lambdaX is not understood:", "it should be a numeric vector,", "an image, a function(x,y)", "or a fitted point process model (ppm, kppm or dppm)")) } else { ## lambdaI, lambdaJ expected if(is.null(lambdaI)) { ## estimate intensity dangerI <- FALSE dangerous <- setdiff(dangerous, "lambdaI") lambdaI <- density(X[I], ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=leaveoneout) } else if(is.im(lambdaI)) { ## look up intensity values lambdaI <- safelookup(lambdaI, X[I]) } else if(is.function(lambdaI)) { ## evaluate function at locations lambdaI <- lambdaI(XI$x, XI$y) } else if(is.numeric(lambdaI) && is.vector(as.numeric(lambdaI))) { ## validate intensity vector if(length(lambdaI) != nI) stop(paste("The length of", sQuote("lambdaI"), "should equal the number of", Iname)) } else if(is.ppm(lambdaI) || is.kppm(lambdaI) || is.dppm(lambdaI)) { ## point process model provides intensity model <- lambdaI if(!update) { ## just use intensity of fitted model lambdaI <- predict(model, locations=XI, type="trend") } else { ## re-fit model to data X if(is.ppm(model)) { model <- update(model, Q=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else { #' kppm or dppm model <- update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } lambdaI <- lambdaX[I] dangerI <- FALSE dangerous <- setdiff(dangerous, "lambdaI") if(miss.update) warn.once(key="Kmulti.inhom.update", "The behaviour of Kmulti.inhom when lambda is a ppm object", "has changed (in spatstat 1.45-3 and later).", "See help(Kmulti.inhom)") } } else stop(paste(sQuote("lambdaI"), "should be a vector or an image")) if(is.null(lambdaJ)) { ## estimate intensity dangerJ <- FALSE dangerous <- setdiff(dangerous, "lambdaJ") lambdaJ <- density(X[J], ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=leaveoneout) } else if(is.im(lambdaJ)) { ## look up intensity values lambdaJ <- safelookup(lambdaJ, X[J]) } else if(is.function(lambdaJ)) { ## evaluate function at locations XJ <- X[J] lambdaJ <- lambdaJ(XJ$x, XJ$y) } else if(is.numeric(lambdaJ) && is.vector(as.numeric(lambdaJ))) { ## validate intensity vector if(length(lambdaJ) != nJ) stop(paste("The length of", sQuote("lambdaJ"), "should equal the number of", Jname)) } else if(is.ppm(lambdaJ) || is.kppm(lambdaJ) || is.dppm(lambdaJ)) { ## point process model provides intensity model <- lambdaJ if(!update) { ## just use intensity of fitted model lambdaJ <- predict(model, locations=XJ, type="trend") } else { ## re-fit model to data X if(is.ppm(model)) { model <- update(model, Q=X) if(leaveoneout && !miss.leave) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else { model <- update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } lambdaJ <- lambdaX[J] dangerJ <- FALSE dangerous <- setdiff(dangerous, "lambdaJ") if(miss.update) warn.once(key="Kmulti.inhom.update", "The behaviour of Kmulti.inhom when lambda is a ppm object", "has changed (in spatstat 1.45-3 and later).", "See help(Kmulti.inhom)") } } else stop(paste(sQuote("lambdaJ"), "should be a vector or an image")) } ## Weight for each pair if(!is.null(lambdaIJ)) { dangerIJ <- TRUE dangerous <- union(dangerous, "lambdaIJ") if(!is.matrix(lambdaIJ)) stop("lambdaIJ should be a matrix") if(nrow(lambdaIJ) != nI) stop(paste("nrow(lambdaIJ) should equal the number of", Iname)) if(ncol(lambdaIJ) != nJ) stop(paste("ncol(lambdaIJ) should equal the number of", Jname)) } else { dangerIJ <- FALSE } danger <- dangerI || dangerJ || dangerIJ # Recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame # It will be given more columns later K <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") fname <- c("K", "list(inhom,I,J)") K <- fv(K, "r", quote(K[inhom, I, J](r)), "theo", , alim, c("r", makefvlabel(NULL, NULL, fname, "pois")), desc, fname=fname, yexp=quote(K[list(inhom,I,J)](r))) # identify close pairs of points close <- crosspairs(XI, XJ, max(r), what="ijd") # map (i,j) to original serial numbers in X orig <- seq_len(npts) imap <- orig[I] jmap <- orig[J] iX <- imap[close$i] jX <- jmap[close$j] # eliminate any identical pairs if(any(I & J)) { ok <- (iX != jX) if(!all(ok)) { close$i <- close$i[ok] close$j <- close$j[ok] close$d <- close$d[ok] } } # extract information for these pairs (relative to orderings of XI, XJ) dclose <- close$d icloseI <- close$i jcloseJ <- close$j # Form weight for each pair if(is.null(lambdaIJ)) weight <- 1/(lambdaI[icloseI] * lambdaJ[jcloseJ]) else weight <- 1/lambdaIJ[cbind(icloseI, jcloseJ)] # Compute estimates by each of the selected edge corrections. if(any(correction == "border" | correction == "bord.modif")) { # border method # Compute distances to boundary b <- bdist.points(XI) bI <- b[icloseI] # apply reduced sample algorithm RS <- Kwtsum(dclose, bI, weight, b, 1/lambdaI, breaks) if(any(correction == "border")) { Kb <- RS$ratio K <- bind.fv(K, data.frame(border=Kb), makefvlabel(NULL, "hat", fname, "bord"), "border-corrected estimate of %s", "border") } if(any(correction == "bord.modif")) { Kbm <- RS$numerator/eroded.areas(W, r) K <- bind.fv(K, data.frame(bord.modif=Kbm), makefvlabel(NULL, "hat", fname, "bordm"), "modified border-corrected estimate of %s", "bord.modif") } } if(any(correction == "translate")) { ## translation correction edgewt <- edge.Trans(XI[icloseI], XJ[jcloseJ], paired=TRUE) allweight <- edgewt * weight wh <- whist(dclose, breaks$val, allweight) Ktrans <- cumsum(wh)/areaW rmax <- diameter(W)/2 Ktrans[r >= rmax] <- NA K <- bind.fv(K, data.frame(trans=Ktrans), makefvlabel(NULL, "hat", fname, "trans"), "translation-corrected estimate of %s", "trans") } if(any(correction == "isotropic")) { ## Ripley isotropic correction edgewt <- edge.Ripley(XI[icloseI], matrix(dclose, ncol=1)) allweight <- edgewt * weight wh <- whist(dclose, breaks$val, allweight) Kiso <- cumsum(wh)/areaW rmax <- diameter(W)/2 Kiso[r >= rmax] <- NA K <- bind.fv(K, data.frame(iso=Kiso), makefvlabel(NULL, "hat", fname, "iso"), "Ripley isotropic correction estimate of %s", "iso") } ## default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) if(danger) attr(K, "dangerous") <- dangerous return(K) } spatstat/R/bugtable.R0000644000176200001440000000622113623712055014233 0ustar liggesusers#' #' bugtable.R #' #' $Revision: 1.6 $ $Date: 2020/02/21 08:16:43 $ bugfixes <- function(sinceversion=NULL, sincedate=NULL, package="spatstat", show=TRUE) { if(package == "spatstat" && ("book" %in% list(sinceversion, sincedate))) { ## special usage sinceversion <- "1.42-1" sincedate <- NULL } if("all" %in% list(sinceversion, sincedate)) { ## no date constraint a <- eval(substitute(news(grepl("^BUG", Category), package=package))) } else if(!is.null(sincedate) && package != "spatstat") { #' another package: news items after specified date ne <- news(package=package) if(is.null(ne) || is.null(ne$Date) || anyNA(ne$Date)) stop(paste(if(is.null(ne)) "News" else "Date", "information is not available for package", sQuote(package)), call.=FALSE) a <- eval(substitute(news(Date >= SD & grepl("^BUG", Category), package=package), list(SD=sincedate))) } else { #' determine a corresponding version number if(is.null(sinceversion) && is.null(sincedate)) { #' default is latest version dfile <- system.file("DESCRIPTION", package=package) sinceversion <- read.dcf(file=dfile, fields="Version") } else if(!is.null(sincedate) && package == "spatstat") { #' read spatstat release history table fname <- system.file("doc", "packagesizes.txt", package="spatstat") p <- read.table(fname, header=TRUE, stringsAsFactors=FALSE) #' find earliest package version on or after the given date imin <- with(p, min(which(as.Date(date) >= sincedate))) sinceversion <- p[imin, "version"] } a <- eval(substitute(news(Version >= sv & grepl("^BUG", Category), package=package), list(sv=sinceversion))) } if(!is.data.frame(a) || nrow(a) == 0) { if(show) message("No bugs reported") return(invisible(NULL)) } #' split each entry into lines alines <- strsplit(a$Text, "\n") #' extract first line f <- unname(sapply(alines, "[", i=1L)) #' extract body b <- unname(lapply(alines, "[", i=-1L)) b <- unname(sapply(b, paste, collapse="\n")) #' extract header from first line h <- unname(sapply(strsplit(f, ":"), "[", i=1L)) h <- unname(sapply(strsplit(h, ","), "[", i=1L)) h <- unname(sapply(strsplit(h, " "), "[", i=1L)) #' sort by header oo <- order(h, f) #' rebuild z <- data.frame(Header=h[oo], Firstline=f[oo], Body=b[oo], Version=a$Version[oo], stringsAsFactors=FALSE) class(z) <- c("bugtable", class(z)) if(!show) return(z) page(z, method="print") return(invisible(z)) } class(bugfixes) <- "autoexec" print.bugtable <- function(x, ...) { hprev <- "" for(i in seq_len(nrow(x))) { h <- x$Header[i] f <- x$Firstline[i] if(h != hprev) { # new main header cat("\n***", h, "***\n", fill=TRUE) } cat(x$Version[i], ":", f, fill=TRUE) cat(x$Body[i], "\n", fill=TRUE) hprev <- h } return(invisible(NULL)) } spatstat/R/hierstrauss.R0000644000176200001440000002110613333543255015023 0ustar liggesusers## ## hierstrauss.R ## ## $Revision: 1.10 $ $Date: 2018/03/15 07:37:41 $ ## ## The hierarchical Strauss process ## ## HierStrauss() create an instance of the hierarchical Strauss process ## [an object of class 'interact'] ## ## ------------------------------------------------------------------- ## HierStrauss <- local({ # ......... define interaction potential HSpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[j] type (mark) of point U[j] # # get matrix of interaction radii r[ , ] r <- par$radii # # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) ## list all ordered pairs of types to be checked uptri <- par$archy$relation & !is.na(r) mark1 <- (lx[row(r)])[uptri] mark2 <- (lx[col(r)])[uptri] ## corresponding names mark1name <- (lxname[row(r)])[uptri] mark2name <- (lxname[col(r)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) ## create logical array for result z <- array(FALSE, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { ## assemble the relevant interaction distance for each pair of points rxu <- r[ tx, tu ] ## apply relevant threshold to each pair of points str <- (d <= rxu) ## score for(i in 1:npairs) { # data points with mark m1 Xsub <- (tx == mark1[i]) # quadrature points with mark m2 Qsub <- (tu == mark2[i]) # assign z[Xsub, Qsub, i] <- str[Xsub, Qsub] } } return(z) } #### end of 'pot' function #### # ........ auxiliary functions .............. delHS <- function(which, types, radii, archy) { radii[which] <- NA if(all(is.na(radii))) return(Poisson()) return(HierStrauss(types=types, radii=radii, archy=archy)) } # Set up basic object except for family and parameters BlankHSobject <- list( name = "Hierarchical Strauss process", creator = "HierStrauss", family = "hierpair.family", # evaluated later pot = HSpotential, par = list(types=NULL, radii=NULL, archy=NULL), # filled in later parnames = c("possible types", "interaction distances", "hierarchical order"), hasInf = FALSE, selfstart = function(X, self) { if(!is.null(self$par$types) && !is.null(self$par$archy)) return(self) types <- self$par$types %orifnull% levels(marks(X)) archy <- self$par$archy %orifnull% types HierStrauss(types=types,radii=self$par$radii,archy=archy) }, init = function(self) { types <- self$par$types if(!is.null(types)) { radii <- self$par$radii nt <- length(types) MultiPair.checkmatrix(radii, nt, sQuote("radii"), asymmok=TRUE) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } } }, update = NULL, # default OK print = function(self) { radii <- self$par$radii types <- self$par$types archy <- self$par$archy if(waxlyrical('gory')) splat(nrow(radii), "types of points") if(!is.null(types) && !is.null(archy)) { if(waxlyrical('space')) { splat("Possible types and ordering:") } else cat("Hierarchy: ") print(archy) } else if(!is.null(types)) { (if(waxlyrical('space')) splat else cat)("Possible types: ") print(types) } else if(waxlyrical('gory')) splat("Possible types:\t not yet determined") splat("Interaction radii:") print(hiermat(radii, self$par$archy)) invisible(NULL) }, interpret = function(coeffs, self) { # get possible types typ <- self$par$types ntypes <- length(typ) # get matrix of Strauss interaction radii r <- self$par$radii # list all unordered pairs of types uptri <- self$par$archy$relation & !is.na(r) index1 <- (row(r))[uptri] index2 <- (col(r))[uptri] npairs <- length(index1) # extract canonical parameters; shape them into a matrix gammas <- matrix(NA, ntypes, ntypes) dimnames(gammas) <- list(typ, typ) gammas[ cbind(index1, index2) ] <- exp(coeffs) # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_ij", printable=hiermat(round(gammas, 4), self$par$archy))) }, valid = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii radii <- self$par$radii # parameters to estimate required <- !is.na(radii) & self$par$archy$relation # all required parameters must be finite if(!all(is.finite(gamma[required]))) return(FALSE) # DIAGONAL interaction parameters must be non-explosive d <- diag(rep(TRUE, nrow(radii))) return(all(gamma[required & d] <= 1)) }, project = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii and types radii <- self$par$radii types <- self$par$types archy <- self$par$archy # problems? uptri <- archy$relation required <- !is.na(radii) & uptri okgamma <- !uptri | (is.finite(gamma) & (gamma <= 1)) naughty <- required & !okgamma # if(!any(naughty)) return(NULL) if(spatstat.options("project.fast")) { # remove ALL naughty terms simultaneously return(delHS(naughty, types, radii, archy)) } else { # present a list of candidates rn <- row(naughty) cn <- col(naughty) ord <- self$par$archy$ordering uptri <- (ord[rn] <= ord[cn]) upn <- uptri & naughty rowidx <- as.vector(rn[upn]) colidx <- as.vector(cn[upn]) mats <- lapply(as.data.frame(rbind(rowidx, colidx)), matrix, ncol=2) inters <- lapply(mats, delHS, types=types, radii=radii, archy=archy) return(inters) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$radii active <- !is.na(r) & self$par$archy$relation if(any(!is.na(coeffs))) { gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- active & (abs(log(gamma)) > epsilon) } if(any(active)) return(max(r[active])) else return(0) }, version=NULL # to be added ) class(BlankHSobject) <- "interact" # finally create main function HierStrauss <- function(radii, types=NULL, archy=NULL) { if(!is.null(types)) { if(is.null(archy)) archy <- seq_len(length(types)) archy <- hierarchicalordering(archy, types) } radii[radii == 0] <- NA out <- instantiate.interact(BlankHSobject, list(types=types, radii=radii, archy=archy)) if(!is.null(types)) dimnames(out$par$radii) <- list(types, types) return(out) } HierStrauss <- intermaker(HierStrauss, BlankHSobject) HierStrauss }) spatstat/R/rescue.rectangle.R0000644000176200001440000000137513353334550015704 0ustar liggesusers# # rescue.rectangle.R # # $Revision: 1.7 $ $Date: 2018/09/28 05:05:49 $ # rescue.rectangle <- function(W) { verifyclass(W, "owin") if(W$type == "mask" && all(W$m)) return(owin(W$xrange, W$yrange, unitname=unitname(W))) if(W$type == "polygonal" && length(W$bdry) == 1) { x <- W$bdry[[1]]$x y <- W$bdry[[1]]$y if(length(x) == 4 && length(y) == 4) { # could be a rectangle ux <- veryunique(x) uy <- veryunique(y) if(length(ux) == 2 && length(uy) == 2) return(owin(ux,uy, unitname=unitname(W))) } } return(W) } veryunique <- function(z) { uz <- sortunique(z) epsilon <- 2 * .Machine$double.eps * diff(range(uz)) close <- (diff(uz) <= epsilon) uz <- uz[c(TRUE, !close)] return(uz) } spatstat/R/linfun.R0000644000176200001440000000755513517767230013763 0ustar liggesusers# # linfun.R # # Class of functions of location on a linear network # # $Revision: 1.14 $ $Date: 2019/07/30 07:12:50 $ # linfun <- function(f, L) { stopifnot(is.function(f)) stopifnot(inherits(L, "linnet")) fargs <- names(formals(f)) needargs <- c("x", "y", "seg", "tp") if(!all(needargs %in% fargs)) stop(paste("Function must have formal arguments", commasep(sQuote(needargs))), call.=FALSE) otherfargs <- setdiff(fargs, needargs) g <- function(...) { argh <- list(...) extra <- names(argh) %in% otherfargs if(!any(extra)) { X <- as.lpp(..., L=L) value <- do.call(f, as.list(coords(X))) } else { extrargs <- argh[extra] mainargs <- argh[!extra] X <- do.call(as.lpp, append(mainargs, list(L=L))) value <- do.call(f, append(as.list(coords(X)), extrargs)) } return(value) } class(g) <- c("linfun", class(g)) attr(g, "L") <- L attr(g, "f") <- f return(g) } print.linfun <- function(x, ...) { L <- as.linnet(x) if(!is.null(explain <- attr(x, "explain"))) { explain(x) } else { splat("Function on linear network:") print(attr(x, "f"), ...) splat("Function domain:") print(L) } invisible(NULL) } summary.linfun <- function(object, ...) { print(object, ...) } as.linim.linfun <- function(X, L=domain(X), ..., eps = NULL, dimyx = NULL, xy = NULL, delta=NULL) { if(is.null(L)) L <- domain(X) #' create template typical <- X(runiflpp(1, L), ...) if(length(typical) != 1) stop(paste("The function must return a single value", "when applied to a single point")) Y <- as.linim(typical, L, eps=eps, dimyx=dimyx, xy=xy, delta=delta) # extract coordinates of sample points along network df <- attr(Y, "df") coo <- df[, c("x", "y", "mapXY", "tp")] colnames(coo)[3L] <- "seg" # evaluate function at sample points vals <- do.call(X, append(as.list(coo), list(...))) # write values in data frame df$values <- vals attr(Y, "df") <- df #' overwrite values in pixel array Y$v[] <- NA pix <- nearest.raster.point(df$xc, df$yc, Y) Y$v[cbind(pix$row, pix$col)] <- vals #' return(Y) } as.data.frame.linfun <- function(x, ...) { as.data.frame(as.linim(x, ...)) } as.linfun.linim <- function(X, ...) { trap.extra.arguments(..., .Context="as.linfun.linim") ## extract info L <- as.linnet(X) df <- attr(X, "df") ## function values and corresponding locations values <- df$values locations <- with(df, as.lpp(x=x, y=y, seg=mapXY, tp=tp, L=L)) ## Function that maps any spatial location to the nearest data location nearestloc <- nnfun(locations) ## Function that reads value at nearest data location f <- function(x, y, seg, tp) { values[nearestloc(x,y,seg,tp)] } g <- linfun(f, L) return(g) } plot.linfun <- function(x, ..., L=NULL, main) { if(missing(main)) main <- short.deparse(substitute(x)) if(is.null(L)) L <- as.linnet(x) argh <- list(...) fargnames <- get("otherfargs", envir=environment(x)) resolution <- c("eps", "dimyx", "xy", "delta") convert <- names(argh) %in% c(fargnames, resolution) Z <- do.call(as.linim, append(list(x, L=L), argh[convert])) rslt <- do.call(plot.linim, append(list(Z, main=main), argh[!convert])) return(invisible(rslt)) } as.owin.linfun <- function(W, ...) { as.owin(as.linnet(W)) } domain.linfun <- as.linnet.linfun <- function(X, ...) { attr(X, "L") } as.function.linfun <- function(x, ...) { nax <- names(attributes(x)) if(!is.null(nax)) { retain <- (nax == "srcref") attributes(x)[!retain] <- NULL } return(x) } integral.linfun <- function(f, domain=NULL, ..., delta) { if(missing(delta)) delta <- NULL integral(as.linim(f, delta=delta), domain=domain, ...) } as.linfun <- function(X, ...) { UseMethod("as.linfun") } as.linfun.linfun <- function(X, ...) { return(X) } spatstat/R/quickndirty.R0000644000176200001440000001142213513112236015004 0ustar liggesusers#' #' quick-and-dirty KDE for points on a network #' #' Copyright (C) 2019 Adrian Baddeley, Suman Rakshit and Tilman Davies #' #' $Revision: 1.4 $ $Date: 2019/07/15 14:58:34 $ densityQuick.lpp <- function(X, sigma=NULL, ..., kernel="gaussian", at=c("pixels", "points"), what=c("estimate", "se", "var"), leaveoneout=TRUE, diggle = FALSE, edge2D = FALSE, weights=NULL, positive=FALSE) { #' kernel density estimation stopifnot(is.lpp(X)) what <- match.arg(what) if(is.function(sigma)) sigma <- sigma(X) qkdeEngine(X=X, sigma=sigma, kernel=kernel, at=at, what=what, leaveoneout=leaveoneout, diggle=diggle, edge2D=edge2D, weights=weights, positive=positive, ...) } qkdeEngine <- function(X, sigma=NULL, ..., at=c("pixels", "points"), what=c("estimate", "se", "var"), leaveoneout=TRUE, diggle = FALSE, raw=FALSE, edge2D = FALSE, edge = edge2D, weights=NULL, varcov=NULL, positive=FALSE, shortcut=TRUE, precomputed=NULL, savecomputed=FALSE) { stopifnot(is.lpp(X)) at <- match.arg(at) what <- match.arg(what) L <- domain(X) S <- as.psp(L) XX <- as.ppp(X) stuff <- resolve.2D.kernel(x=XX, sigma=sigma, varcov=varcov, ...) sigma <- stuff$sigma varcov <- stuff$varcov switch(what, estimate = { if(shortcut) { PS <- precomputed$PS %orifnull% pixellate(S, ..., DivideByPixelArea=TRUE) KS <- blur(PS, sigma, normalise=edge2D, bleed=FALSE, ..., varcov=varcov) } else { KS <- density(S, sigma, ..., edge=edge2D, varcov=varcov) } if(diggle && !raw) weights <- (weights %orifnull% 1) / KS[XX] KX <- density(XX, sigma, ..., weights=weights, at=at, leaveoneout=leaveoneout, edge=edge2D, diggle=FALSE, positive=FALSE, varcov=varcov) }, se = , var= { tau <- taumat <- NULL if(is.null(varcov)) { varconst <- 1/(4 * pi * prod(ensure2vector(sigma))) tau <- sigma/sqrt(2) } else { varconst <- 1/(4 * pi * sqrt(det(varcov))) taumat <- varcov/2 } if(shortcut) { PS <- precomputed$PS %orifnull% pixellate(S, ..., DivideByPixelArea=TRUE) KS <- blur(PS, sigma, normalise=edge2D, bleed=FALSE, varcov=varcov)^2 } else { KS <- density(S, sigma, ..., edge=edge2D, varcov=varcov)^2 } if(diggle && !raw) weights <- (weights %orifnull% 1) / KS[XX] KX <- varconst * density(XX, sigma=tau, ..., weights=weights, at=at, leaveoneout=leaveoneout, edge=edge2D, diggle=FALSE, positive=FALSE, varcov=taumat) }) switch(at, points = { result <- if(diggle || raw) KX else (KX/(KS[XX])) if(positive) result <- pmax(result, .Machine$double.xmin) if(savecomputed) { #' save geometry info for re-use savedstuff <- list(PS=PS, M=solutionset(PS > 0), df=NULL) } }, pixels = { Z <- if(diggle || raw) KX else (KX/KS) M <- if(shortcut) { precomputed$M %orifnull% solutionset(PS > 0) } else as.mask.psp(S, KS) Z <- Z[M, drop=FALSE] #' build linim object, using precomputed sample points if available result <- linim(L, Z, restrict=FALSE, df=precomputed$df) if(positive) result <- eval.linim(pmax(result, .Machine$double.xmin)) if(savecomputed) { #' save geometry info for re-use dfg <- attr(result, "df") dfg <- dfg[, colnames(dfg) != "values"] savedstuff <- list(PS=PS, M=M, df=dfg) } }) if(what == "se") result <- sqrt(result) attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov if(raw) attr(result, "denominator") <- KS if(savecomputed) attr(result, "savedstuff") <- savedstuff return(result) } spatstat/R/kmrs.R0000644000176200001440000001721613333543255013432 0ustar liggesusers# # kmrs.S # # S code for Kaplan-Meier, Reduced Sample and Hanisch # estimates of a distribution function # from _histograms_ of censored data. # # kaplan.meier() # reduced.sample() # km.rs() # # $Revision: 3.26 $ $Date: 2013/06/27 08:59:16 $ # # The functions in this file produce vectors `km' and `rs' # where km[k] and rs[k] are estimates of F(breaks[k+1]), # i.e. an estimate of the c.d.f. at the RIGHT endpoint of the interval. # "kaplan.meier" <- function(obs, nco, breaks, upperobs=0) { # obs: histogram of all observations : min(T_i,C_i) # nco: histogram of noncensored observations : T_i such that T_i <= C_i # breaks: breakpoints (vector or 'breakpts' object, see breaks.S) # upperobs: number of observations beyond rightmost breakpoint # breaks <- as.breakpts(breaks) n <- length(obs) if(n != length(nco)) stop("lengths of histograms do not match") check.hist.lengths(nco, breaks) # # # reverse cumulative histogram of observations d <- revcumsum(obs) + upperobs # # product integrand s <- ifelseXB(d > 0, 1 - nco/d, 1) # km <- 1 - cumprod(s) # km has length n; km[i] is an estimate of F(r) for r=breaks[i+1] # widths <- diff(breaks$val) lambda <- numeric(n) pos <- (s > 0) lambda[pos] <- -log(s[pos])/widths[pos] # lambda has length n; lambda[i] is an estimate of # the average of \lambda(r) over the interval (breaks[i],breaks[i+1]). # return(list(km=km, lambda=lambda)) } "reduced.sample" <- function(nco, cen, ncc, show=FALSE, uppercen=0) # nco: histogram of noncensored observations: T_i such that T_i <= C_i # cen: histogram of all censoring times: C_i # ncc: histogram of censoring times for noncensored obs: # C_i such that T_i <= C_i # # Then nco[k] = #{i: T_i <= C_i, T_i \in I_k} # cen[k] = #{i: C_i \in I_k} # ncc[k] = #{i: T_i <= C_i, C_i \in I_k}. # # The intervals I_k must span an interval [0,R] beginning at 0. # If this interval did not include all censoring times, # then `uppercen' must be the number of censoring times # that were not counted in 'cen'. { n <- length(nco) if(n != length(cen) || n != length(ncc)) stop("histogram lengths do not match") # # denominator: reverse cumulative histogram of censoring times # denom(r) = #{i : C_i >= r} # We compute # cc[k] = #{i: C_i > breaks[k]} # except that > becomes >= for k=0. # cc <- revcumsum(cen) + uppercen # # # numerator # #{i: T_i <= r <= C_i } # = #{i: T_i <= r, T_i <= C_i} - #{i: C_i < r, T_i <= C_i} # We compute # u[k] = #{i: T_i <= C_i, T_i <= breaks[k+1]} # - #{i: T_i <= C_i, C_i <= breaks[k]} # = #{i: T_i <= C_i, C_i > breaks[k], T_i <= breaks[k+1]} # this ensures that numerator and denominator are # comparable, u[k] <= cc[k] always. # u <- cumsum(nco) - c(0,cumsum(ncc)[1:(n-1)]) rs <- u/cc # # Hence rs[k] = u[k]/cc[k] is an estimator of F(r) # for r = breaks[k+1], i.e. for the right hand end of the interval. # if(!show) return(rs) else return(list(rs=rs, numerator=u, denominator=cc)) } "km.rs" <- function(o, cc, d, breaks) { # o: censored lifetimes min(T_i,C_i) # cc: censoring times C_i # d: censoring indicators 1(T_i <= C_i) # breaks: histogram breakpoints (vector or 'breakpts' object) # breaks <- as.breakpts(breaks) bval <- breaks$val # compile histograms (breakpoints may not span data) obs <- whist( o, breaks=bval) nco <- whist( o[d], breaks=bval) cen <- whist( cc, breaks=bval) ncc <- whist( cc[d], breaks=bval) # number of observations exceeding largest breakpoint upperobs <- attr(obs, "high") uppercen <- attr(cen, "high") # go km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs) rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen) # return(list(rs=rs, km=km$km, hazard=km$lambda, r=breaks$r, breaks=bval)) } "km.rs.opt" <- function(o, cc, d, breaks, KM=TRUE, RS=TRUE) { # o: censored lifetimes min(T_i,C_i) # cc: censoring times C_i # d: censoring indicators 1(T_i <= C_i) # breaks: histogram breakpoints (vector or 'breakpts' object) # breaks <- as.breakpts(breaks) bval <- breaks$val out <- list(r=breaks$r, breaks=bval) if(KM || RS) nco <- whist( o[d], breaks=bval) if(KM) { obs <- whist( o, breaks=bval) upperobs <- attr(obs, "high") km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs) out <- append(list(km=km$km, hazard=km$lambda), out) } if(RS) { cen <- whist( cc, breaks=bval) ncc <- whist( cc[d], breaks=bval) uppercen <- attr(cen, "high") rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen) out <- append(list(rs=rs), out) } return(out) } censtimeCDFest <- function(o, cc, d, breaks, ..., KM=TRUE, RS=TRUE, HAN=TRUE, RAW=TRUE, han.denom=NULL, tt=NULL, pmax=0.9) { # Histogram-based estimation of cumulative distribution function # of lifetimes subject to censoring. # o: censored lifetimes min(T_i,C_i) # cc: censoring times C_i # d: censoring indicators 1(T_i <= C_i) # breaks: histogram breakpoints (vector or 'breakpts' object) # han.denom: denominator (eroded area) for each value of r # tt: uncensored lifetimes T_i, if known breaks <- as.breakpts(breaks) bval <- breaks$val rval <- breaks$r rmax <- breaks$max # Kaplan-Meier and/or Reduced Sample out <- km.rs.opt(o, cc, d, breaks, KM=KM, RS=RS) # convert to data frame out$breaks <- NULL df <- as.data.frame(out) # Raw ecdf of observed lifetimes if available if(RAW && !is.null(tt)) { h <- whist(tt[tt <= rmax], breaks=bval) df <- cbind(df, data.frame(raw=cumsum(h)/length(tt))) } # Hanisch if(HAN) { if(is.null(han.denom)) stop("Internal error: missing denominator for Hanisch estimator") if(length(han.denom) != length(rval)) stop(paste("Internal error:", "length(han.denom) =", length(han.denom), "!=", length(rval), "= length(rvals)")) # uncensored distances x <- o[d] # calculate Hanisch estimator h <- whist(x[x <= rmax], breaks=bval) H <- cumsum(h/han.denom) df <- cbind(df, data.frame(han=H/max(H[is.finite(H)]))) } # determine appropriate plotting range bestest <- if(KM) "km" else if(HAN) "han" else if(RS) "rs" else "raw" alim <- range(df$r[df[[bestest]] <= pmax]) # convert to fv object nama <- c("r", "km", "hazard", "han", "rs", "raw") avail <- c(TRUE, KM, KM, HAN, RS, RAW) iscdf <- c(FALSE, TRUE, FALSE, TRUE, TRUE, TRUE) labl <- c("r", "hat(%s)[km](r)", "lambda(r)", "hat(%s)[han](r)", "hat(%s)[bord](r)", "hat(%s)[raw](r)")[avail] desc <- c("distance argument r", "Kaplan-Meier estimate of %s", "Kaplan-Meier estimate of hazard function lambda(r)", "Hanisch estimate of %s", "border corrected estimate of %s", "uncorrected estimate of %s")[avail] df <- df[, nama[avail]] Z <- fv(df, "r", substitute(CDF(r), NULL), bestest, . ~ r, alim, labl, desc, fname="CDF") fvnames(Z, ".") <- nama[iscdf & avail] return(Z) } # simple interface for students and code development compileCDF <- function(D, B, r, ..., han.denom=NULL, check=TRUE) { han <- !is.null(han.denom) breaks <- breakpts.from.r(r) if(check) { stopifnot(length(D) == length(B) && all(D >= 0) && all(B >= 0)) if(han) stopifnot(length(han.denom) == length(r)) } D <- as.vector(D) B <- as.vector(B) # observed (censored) lifetimes o <- pmin.int(D, B) # censoring indicators d <- (D <= B) # go result <- censtimeCDFest(o, B, d, breaks, HAN=han, han.denom=han.denom, RAW=TRUE, tt=D) result <- rebadge.fv(result, new.fname="compileCDF") } spatstat/R/diagnoseppm.R0000644000176200001440000003457113545236327014773 0ustar liggesusers# # diagnoseppm.R # # Makes diagnostic plots based on residuals or energy weights # # $Revision: 1.44 $ $Date: 2019/10/02 10:33:21 $ # diagnose.ppm.engine <- function(object, ..., type="eem", typename, opt, sigma=NULL, rbord=reach(object), compute.sd=is.poisson(object), compute.cts=TRUE, envelope=FALSE, nsim=39, nrank=1, rv=NULL, oldstyle=FALSE, splineargs = list(spar=0.5), verbose=TRUE) { if(is.marked.ppm(object)) stop("Sorry, this is not yet implemented for marked models") # quadrature points Q <- quad.ppm(object) U <- union.quad(Q) Qweights <- w.quad(Q) # -------------- Calculate residuals/weights ------------------- # Discretised residuals if(type == "eem") { residval <- if(!is.null(rv)) rv else eem(object, check=FALSE) residval <- as.numeric(residval) X <- data.ppm(object) Y <- X %mark% residval } else { if(!is.null(rv) && !inherits(rv, "msr")) stop("rv should be a measure (object of class msr)") residobj <- if(!is.null(rv)) rv else residuals.ppm(object, type=type, check=FALSE) residval <- with(residobj, "increment") if(ncol(as.matrix(residval)) > 1L) stop("Not implemented for vector-valued residuals; use [.msr to split into separate components") Y <- U %mark% residval } # Atoms and density of measure Ymass <- NULL Ycts <- NULL Ydens <- NULL if(compute.cts) { if(type == "eem") { Ymass <- Y Ycts <- U %mark% (-1) Ydens <- as.im(-1, Y$window) } else { atoms <- with(residobj, "is.atom") masses <- with(residobj, "discrete") cts <- with(residobj, "density") if(!is.null(atoms) && !is.null(masses) && !is.null(cts)) { Ymass <- (U %mark% masses)[atoms] Ycts <- U %mark% cts # remove NAs (as opposed to zero cif points) if(!all(ok <- is.finite(cts))) { U <- U[ok] Ycts <- Ycts[ok] cts <- cts[ok] Qweights <- Qweights[ok] } # interpolate continuous part to yield an image for plotting if(type == "inverse" && all(cts != 0)) { Ydens <- as.im(-1, Y$window) } else if(is.stationary.ppm(object) && is.poisson.ppm(object)) { # all values of `cts' will be equal Ydens <- as.im(cts[1L], Y$window) } else { smallsigma <- maxnndist(Ycts) Ujitter <- U Ujitter$x <- U$x + runif(U$n, -smallsigma, smallsigma) Ujitter$y <- U$y + runif(U$n, -smallsigma, smallsigma) Ydens <- Smooth(Ujitter %mark% marks(Ycts), sigma=smallsigma, weights=Qweights, edge=TRUE, ...) } } } } #---------------- Erode window --------------------------------- # ## Compute windows W <- Y$window # Erode window if required clip <- !is.null(rbord) && is.finite(rbord) && (rbord > 0) if(clip) { Wclip <- erosion.owin(W, rbord) Yclip <- Y[Wclip] Qweightsclip <- Qweights[inside.owin(U, , Wclip)] if(!is.null(Ycts)) Ycts <- Ycts[Wclip] if(!is.null(Ydens)) Ydens <- Ydens[Wclip, drop=FALSE] } else { Wclip <- W Yclip <- Y } # ------------ start collecting results ------------------------- result <- list(type=type, clip=clip, Y=Y, W=W, Yclip=Yclip, Ymass=Ymass, Ycts=Ycts, Ydens=Ydens) # ------------- smoothed field ------------------------------ Z <- NULL if(opt$smooth | opt$xcumul | opt$ycumul | opt$xmargin | opt$ymargin) { if(is.null(sigma)) sigma <- 0.1 * diameter(Wclip) Z <- density.ppp(Yclip, sigma, weights=Yclip$marks, edge=TRUE, ...) } if(opt$smooth) { result$smooth <- list(Z = Z, sigma=sigma) if(type == "pearson") result$smooth$sdp <- 1/(2 * sigma * sqrt(pi)) } # -------------- marginals of smoothed field ------------------------ if(opt$xmargin) { xZ <- apply(Z$v, 2, sum, na.rm=TRUE) * Z$xstep if(type == "eem") ExZ <- colSums(!is.na(Z$v)) * Z$xstep else ExZ <- numeric(length(xZ)) result$xmargin <- list(x=Z$xcol, xZ=xZ, ExZ=ExZ) } if(opt$ymargin) { yZ <- apply(Z$v, 1L, sum, na.rm=TRUE) * Z$ystep if(type == "eem") EyZ <- rowSums(!is.na(Z$v)) * Z$ystep else EyZ <- numeric(length(yZ)) result$ymargin <- list(y=Z$yrow, yZ=yZ, EyZ=EyZ) } # -------------- cumulative (lurking variable) plots -------------- ## precompute simulated patterns for envelopes if(identical(envelope, TRUE)) envelope <- simulate(object, nsim=nsim, progress=verbose) if(opt$xcumul) result$xcumul <- lurking(object, covariate=expression(x), type=type, clipwindow= if(clip) Wclip else NULL, rv=residval, plot.sd=compute.sd, envelope=envelope, nsim=nsim, nrank=nrank, plot.it=FALSE, typename=typename, covname="x coordinate", oldstyle=oldstyle, check=FALSE, splineargs=splineargs, ...) if(opt$ycumul) result$ycumul <- lurking(object, covariate=expression(y), type=type, clipwindow= if(clip) Wclip else NULL, rv=residval, plot.sd=compute.sd, envelope=envelope, nsim=nsim, nrank=nrank, plot.it=FALSE, typename=typename, covname="y coordinate", oldstyle=oldstyle, check=FALSE, splineargs=splineargs, ...) # -------------- summary numbers -------------- if(opt$sum) result$sum <- list(marksum=sum(Yclip$marks, na.rm=TRUE), areaWclip=area(Wclip), areaquad=if(clip) sum(Qweightsclip) else sum(Qweights), range=if(!is.null(Z)) range(Z) else NULL) return(invisible(result)) } ######################################################################## diagnose.ppm <- function(object, ..., type="raw", which="all", sigma=NULL, rbord =reach(object), cumulative=TRUE, plot.it = TRUE, rv = NULL, compute.sd=is.poisson(object), compute.cts=TRUE, envelope=FALSE, nsim=39, nrank=1, typename, check=TRUE, repair=TRUE, oldstyle=FALSE, splineargs=list(spar=0.5)) { asked.newstyle <- !missing(oldstyle) && !oldstyle if(is.marked.ppm(object)) stop("Sorry, this is not yet implemented for marked models") # check whether model originally came from replicated data is.subfit <- (object$method == "mppm") Coefs <- coef(object) if(check && damaged.ppm(object)) { if(!repair) stop("object format corrupted; try update(object, use.internal=TRUE)") message("object format corrupted; repairing it.") object <- update(object, use.internal=TRUE) object <- tweak.coefs(object, Coefs) } else if(compute.sd && is.null(getglmfit(object))) { object <- update(object, forcefit=TRUE, use.internal=TRUE) object <- tweak.coefs(object, Coefs) } # ------------- Interpret arguments -------------------------- # Edge-effect avoidance if(missing(rbord) && !is.finite(rbord)) { ## Model has infinite reach ## Use correction rule employed when fitting rbord <- if(object$correction == "border") object$rbord else 0 } # match type argument type <- pickoption("type", type, c(eem="eem", raw="raw", inverse="inverse", pearson="pearson", Pearson="pearson")) if(missing(typename)) typename <- switch(type, eem="exponential energy weights", raw="raw residuals", inverse="inverse-lambda residuals", pearson="Pearson residuals") # 'which' is multiple choice with exact matching optionlist <- c("all", "marks", "smooth", "x", "y", "sum") if(!all(m <- which %in% optionlist)) stop(paste("Unrecognised choice(s) of", paste(sQuote("which"), ":", sep=""), paste(which[!m], collapse=", "))) opt <- list() opt$all <- "all" %in% which opt$marks <- ("marks" %in% which) | opt$all opt$smooth <- ("smooth" %in% which) | opt$all opt$xmargin <- (("x" %in% which) | opt$all) && !cumulative opt$ymargin <- (("y" %in% which) | opt$all) && !cumulative opt$xcumul <- (("x" %in% which) | opt$all) && cumulative opt$ycumul <- (("y" %in% which) | opt$all) && cumulative opt$sum <- ("sum" %in% which) | opt$all # compute and plot estimated standard deviations? # yes for Poisson, no for other models, unless overridden if(!missing(compute.sd)) plot.sd <- compute.sd else plot.sd <- list(...)$plot.sd if(is.null(plot.sd)) plot.sd <- is.poisson.ppm(object) if(missing(compute.sd)) compute.sd <- plot.sd # default for mppm objects is oldstyle=TRUE if(compute.sd && is.subfit) { if(!asked.newstyle) { # silently change default oldstyle <- TRUE } else { stop(paste("Variance calculation for a subfit of an mppm object", "is only implemented for oldstyle=TRUE"), call.=FALSE) } } # interpolate the density of the residual measure? if(missing(compute.cts)) { plot.neg <- resolve.defaults(list(...), formals(plot.diagppm)["plot.neg"])$plot.neg # only if it is needed for the mark plot compute.cts <- opt$marks && (plot.neg != "discrete") } # ------- DO THE CALCULATIONS ----------------------------------- RES <- diagnose.ppm.engine(object, type=type, typename=typename, opt=opt, sigma=sigma, rbord=rbord, compute.sd=compute.sd, compute.cts=compute.cts, envelope=envelope, nsim=nsim, nrank=nrank, rv=rv, oldstyle=oldstyle, splineargs=splineargs, ...) RES$typename <- typename RES$opt <- opt RES$compute.sd <- compute.sd RES$compute.cts <- compute.cts class(RES) <- "diagppm" # ------- PLOT -------------------------------------------------- if(plot.it) plot(RES, ...) return(RES) } plot.diagppm <- function(x, ..., which, plot.neg=c("image", "discrete", "contour", "imagecontour"), plot.smooth=c("imagecontour", "image", "contour", "persp"), plot.sd, spacing=0.1, outer=3, srange=NULL, monochrome=FALSE, main=NULL) { opt <- x$opt plot.neg <- match.arg(plot.neg) plot.smooth <- match.arg(plot.smooth) if(!missing(which)) { witches <- c("all", "marks", "smooth", "x", "y", "sum") unknown <- is.na(match(which, witches)) if(any(unknown)) warning(paste("Unrecognised", ngettext(sum(unknown), "option", "options"), "which =", commasep(sQuote(which[unknown])), ": valid options are", commasep(sQuote(witches))), call.=FALSE) oldopt <- opt newopt <- list() newopt$all <- "all" %in% which newopt$marks <- ("marks" %in% which) | newopt$all newopt$smooth <- ("smooth" %in% which) | newopt$all newopt$xmargin <- (("x" %in% which) | newopt$all) && oldopt$xmargin newopt$ymargin <- (("y" %in% which) | newopt$all) && oldopt$ymargin newopt$xcumul <- (("x" %in% which) | newopt$all) && oldopt$xcumul newopt$ycumul <- (("y" %in% which) | newopt$all) && oldopt$ycumul newopt$sum <- ("sum" %in% which) | newopt$all illegal <- (unlist(newopt) > unlist(oldopt)) if(any(illegal)) { offending <- paste(names(newopt)[illegal], collapse=", ") whinge <- paste("cannot display the following components;\n", "they were not computed: - \n", offending, "\n") stop(whinge) } opt <- newopt } if(missing(plot.sd)) { plot.sd <- x$compute.sd } else if(plot.sd && !(x$compute.sd)) { warning("can't plot standard deviations; they were not computed") plot.sd <- FALSE } if(!(x$compute.cts) && (plot.neg != "discrete") && (opt$marks || opt$all)) { if(!missing(plot.neg)) warning("can't plot continuous component of residuals; it was not computed") plot.neg <- "discrete" } if(opt$all) resid4plot(RES=x, plot.neg=plot.neg, plot.smooth=plot.smooth, spacing=spacing, outer=outer, srange=srange, monochrome=monochrome, main=main, ...) else resid1plot(RES=x, opt=opt, plot.neg=plot.neg, plot.smooth=plot.smooth, srange=srange, monochrome=monochrome, main=main, ...) } print.diagppm <- function(x, ...) { opt <- x$opt typename <- x$typename splat("Model diagnostics", paren(typename)) splat("Diagnostics available:") optkey <- list(all="four-panel plot", marks=paste("mark plot", if(!x$compute.cts) "(discrete representation only)" else NULL), smooth="smoothed residual field", xmargin="x marginal density", ymargin="y marginal density", xcumul="x cumulative residuals", ycumul="y cumulative residuals", sum="sum of all residuals") avail <- unlist(optkey[names(opt)[unlist(opt)]]) names(avail) <- NULL cat(paste("\t", paste(avail, collapse="\n\t"), "\n", sep="")) if(opt$sum) { xs <- x$sum windowname <- if(x$clip) "clipped window" else "entire window" splat("sum of", typename, "in", windowname, "=", signif(sum(xs$marksum),4)) splat("area of", windowname, "=", signif(xs$areaWclip, 4)) splat("quadrature area =", signif(xs$areaquad, 4)) } if(opt$smooth) { splat("range of smoothed field = ", prange(signif(range(x$smooth$Z),4))) if(!is.null(sdp <- x$smooth$sdp)) splat("Null standard deviation of smoothed Pearson residual field:", signif(sdp, 4)) } return(invisible(NULL)) } spatstat/R/hermite.R0000644000176200001440000000417013333543255014106 0ustar liggesusers## ## hermite.R ## ## Gauss-Hermite quadrature ## ## $Revision: 1.5 $ $Date: 2017/02/07 07:35:32 $ ## HermiteCoefs <- function(order) { ## compute coefficients of Hermite polynomial (unnormalised) x <- 1 if(order > 0) for(n in 1:order) x <- c(0, 2 * x) - c(((0:(n-1)) * x)[-1L], 0, 0) return(x) } gauss.hermite <- function(f, mu=0, sd=1, ..., order=5) { stopifnot(is.function(f)) stopifnot(length(mu) == 1) stopifnot(length(sd) == 1) ## Hermite polynomial coefficients (un-normalised) Hn <- HermiteCoefs(order) Hn1 <- HermiteCoefs(order-1) ## quadrature points x <- sort(Re(polyroot(Hn))) ## weights Hn1x <- matrix(Hn1, nrow=1) %*% t(outer(x, 0:(order-1), "^")) w <- 2^(order-1) * factorial(order) * sqrt(pi)/(order * Hn1x)^2 ## adjust ww <- w/sqrt(pi) xx <- mu + sd * sqrt(2) * x ## compute ans <- 0 for(i in seq_along(x)) ans <- ans + ww[i] * f(xx[i], ...) return(ans) } dmixpois <- local({ dpoisG <- function(x, ..., k, g) dpois(k, g(x)) function(x, mu, sd, invlink=exp, GHorder=5) gauss.hermite(dpoisG, mu=mu, sd=sd, g=invlink, k=x, order=GHorder) }) pmixpois <- local({ ppoisG <- function(x, ..., q, g, lot) ppois(q, g(x), lower.tail=lot) function(q, mu, sd, invlink=exp, lower.tail = TRUE, GHorder=5) gauss.hermite(ppoisG, mu=mu, sd=sd, g=invlink, q=q, order=GHorder, lot=lower.tail) }) qmixpois <- function(p, mu, sd, invlink=exp, lower.tail = TRUE, GHorder=5) { ## guess upper limit ## Guess upper and lower limits pmin <- min(p, 1-p)/2 lam.hi <- invlink(qnorm(pmin, mean=max(mu), sd=max(sd), lower.tail=FALSE)) lam.lo <- invlink(qnorm(pmin, mean=min(mu), sd=max(sd), lower.tail=TRUE)) kmin <- qpois(pmin, lam.lo, lower.tail=TRUE) kmax <- qpois(pmin, lam.hi, lower.tail=FALSE) kk <- kmin:kmax pp <- pmixpois(kk, mu, sd, invlink, lower.tail=TRUE, GHorder) ans <- if(lower.tail) kk[findInterval(p, pp, all.inside=TRUE)] else rev(kk)[findInterval(1-p, rev(1-pp), all.inside=TRUE)] return(ans) } rmixpois <- function(n, mu, sd, invlink=exp) { lam <- invlink(rnorm(n, mean=mu, sd=sd)) y <- rpois(n, lam) return(y) } spatstat/R/pcf.R0000644000176200001440000003016213430742567013226 0ustar liggesusers# # pcf.R # # $Revision: 1.68 $ $Date: 2019/02/13 07:21:23 $ # # # calculate pair correlation function # from point pattern (pcf.ppp) # or from estimate of K or Kcross (pcf.fv) # or from fasp object # # pcf <- function(X, ...) { UseMethod("pcf") } pcf.ppp <- function(X, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), divisor=c("r", "d"), var.approx=FALSE, domain=NULL, ratio=FALSE, close=NULL) { verifyclass(X, "ppp") # r.override <- !is.null(r) win <- Window(X) areaW <- area(win) npts <- npoints(X) lambda <- npts/areaW lambda2area <- areaW * lambda^2 kernel <- match.kernel(kernel) rmaxdefault <- rmax.rule("K", win, lambda) if(!is.null(domain)) { # estimate based on contributions from a subdomain domain <- as.owin(domain) if(!is.subset.owin(domain, win)) stop(paste(dQuote("domain"), "is not a subset of the window of X")) # trick pcfdot() into doing it indom <- factor(inside.owin(X$x, X$y, domain), levels=c(FALSE,TRUE)) g <- pcfdot(X %mark% indom, i="TRUE", r=r, correction=correction, kernel=kernel, bw=bw, stoyan=stoyan, divisor=divisor, ...) if(!ratio) { ## relabel g <- rebadge.fv(g, quote(g(r)), "g") } else { ## construct ratfv object denom <- sum(indom == "TRUE") * lambda g <- ratfv(as.data.frame(g), NULL, denom, "r", quote(g(r)), "theo", NULL, c(0, rmaxdefault), attr(g, "labl"), attr(g, "desc"), fname="g", ratio=TRUE) } unitname(g) <- unitname(X) if(var.approx) warning("var.approx is not implemented when 'domain' is given") return(g) } correction.given <- !missing(correction) correction <- pickoption("correction", correction, c(isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", good="translate", best="best", none="none"), multi=TRUE) correction <- implemented.for.K(correction, win$type, correction.given) divisor <- match.arg(divisor) # bandwidth if(is.null(bw) && (kernel == "epanechnikov")) { # Stoyan & Stoyan 1995, eq (15.16), page 285 h <- stoyan /sqrt(lambda) hmax <- h # conversion to standard deviation bw <- h/sqrt(5) } else if(is.numeric(bw)) { # standard deviation of kernel specified # upper bound on half-width hmax <- 3 * bw } else { # data-dependent bandwidth selection: guess upper bound on half-width hmax <- 2 * stoyan /sqrt(lambda) } ########## r values ############################ # handle arguments r and breaks breaks <- handle.r.b.args(r, NULL, win, rmaxdefault=rmaxdefault) if(!(breaks$even)) stop("r values must be evenly spaced") # extract r values r <- breaks$r rmax <- breaks$max # recommended range of r values for plotting alim <- c(0, min(rmax, rmaxdefault)) # arguments for 'density' denargs <- resolve.defaults(list(kernel=kernel, bw=bw), list(...), list(n=length(r), from=0, to=rmax), .StripNull = TRUE) ################################################# # compute pairwise distances if(npts > 1) { needall <- any(correction %in% c("translate", "isotropic")) if(is.null(close)) { what <- if(needall) "all" else "ijd" close <- closepairs(X, rmax + hmax, what=what) } else { #' check 'close' has correct format needed <- if(!needall) c("i", "j", "d") else c("i", "j", "xi", "yi", "xj", "yj", "dx", "dy", "d") if(any(is.na(match(needed, names(close))))) stop(paste("Argument", sQuote("close"), "should have components named", commasep(sQuote(needed))), call.=FALSE) } dIJ <- close$d } else { undefined <- rep(NaN, length(r)) } # initialise fv object df <- data.frame(r=r, theo=rep.int(1,length(r))) out <- ratfv(df, NULL, lambda2area, "r", quote(g(r)), "theo", NULL, alim, c("r","%s[Pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="g", ratio=ratio) ###### compute ####### bw.used <- NULL if(any(correction=="none")) { #' uncorrected if(npts > 1) { kdenN <- sewpcf(dIJ, 1, denargs, lambda2area, divisor) gN <- kdenN$g bw.used <- attr(kdenN, "bw") } else gN <- undefined if(!ratio) { out <- bind.fv(out, data.frame(un=gN), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") } else { out <- bind.ratfv(out, data.frame(un=gN * lambda2area), lambda2area, "hat(%s)[un](r)", "uncorrected estimate of %s", "un") } } if(any(correction=="translate")) { # translation correction if(npts > 1) { edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=win, paired=TRUE) kdenT <- sewpcf(dIJ, edgewt, denargs, lambda2area, divisor) gT <- kdenT$g bw.used <- attr(kdenT, "bw") } else gT <- undefined if(!ratio) { out <- bind.fv(out, data.frame(trans=gT), "hat(%s)[Trans](r)", "translation-corrected estimate of %s", "trans") } else { out <- bind.ratfv(out, data.frame(trans=gT * lambda2area), lambda2area, "hat(%s)[Trans](r)", "translation-corrected estimate of %s", "trans") } } if(any(correction=="isotropic")) { # Ripley isotropic correction if(npts > 1) { XI <- ppp(close$xi, close$yi, window=win, check=FALSE) edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1)) kdenR <- sewpcf(dIJ, edgewt, denargs, lambda2area, divisor) gR <- kdenR$g bw.used <- attr(kdenR, "bw") } else gR <- undefined if(!ratio) { out <- bind.fv(out, data.frame(iso=gR), "hat(%s)[Ripley](r)", "isotropic-corrected estimate of %s", "iso") } else { out <- bind.ratfv(out, data.frame(iso=gR * lambda2area), lambda2area, "hat(%s)[Ripley](r)", "isotropic-corrected estimate of %s", "iso") } } # sanity check if(is.null(out)) { warning("Nothing computed - no edge corrections chosen") return(NULL) } ## variance approximation ## Illian et al 2008 p 234 equation 4.3.42 if(var.approx) { gr <- if(any(correction == "isotropic")) gR else gT # integral of squared kernel intk2 <- kernel.squint(kernel, bw.used) # isotropised set covariance of window gWbar <- as.function(rotmean(setcov(win), result="fv")) vest <- gr * intk2/(pi * r * gWbar(r) * lambda^2) if(!ratio) { out <- bind.fv(out, data.frame(v=vest), "v(r)", "approximate variance of %s", "v") } else { vden <- rep((npts-1)^2, length(vest)) vnum <- vden * vest out <- bind.ratfv(out, data.frame(v=vnum), data.frame(v=vden), "v(r)", "approximate variance of %s", "v") } } ## Finish off ## default is to display all corrections formula(out) <- . ~ r fvnames(out, ".") <- setdiff(rev(colnames(out)), c("r", "v")) ## unitname(out) <- unitname(X) ## copy to other components if(ratio) out <- conform.ratfv(out) attr(out, "bw") <- bw.used return(out) } # Smoothing Estimate of Weighted Pair Correlation # d = vector of relevant distances # w = vector of edge correction weights (in normal use) # denargs = arguments to density.default # lambda2area = constant lambda^2 * areaW (in normal use) sewpcf <- function(d, w, denargs, lambda2area, divisor=c("r","d")) { divisor <- match.arg(divisor) nw <- length(w) if(nw != length(d) && nw != 1) stop("Internal error: incorrect length of weights vector in sewpcf") if(divisor == "d") { w <- w/d if(!all(good <- is.finite(w))) { nbad <- sum(!good) warning(paste(nbad, "infinite or NA", ngettext(nbad, "contribution was", "contributions were"), "deleted from pcf estimate")) d <- d[good] w <- w[good] } } if(nw == 1) { #' weights are equal kden <- do.call.matched(density.default, append(list(x=d), denargs)) wtot <- length(d) } else { #' weighted wtot <- sum(w) kden <- do.call.matched(density.default, append(list(x=d, weights=w/wtot), denargs)) } r <- kden$x y <- kden$y * wtot if(divisor == "r") y <- y/r g <- y/(2 * pi * lambda2area) result <- data.frame(r=r,g=g) attr(result, "bw") <- kden$bw return(result) } # #---------- OTHER METHODS FOR pcf -------------------- # "pcf.fasp" <- function(X, ..., method="c") { verifyclass(X, "fasp") Y <- X Y$title <- paste("Array of pair correlation functions", if(!is.null(X$dataname)) "for", X$dataname) # go to work on each function for(i in seq_along(X$fns)) { Xi <- X$fns[[i]] PCFi <- pcf.fv(Xi, ..., method=method) Y$fns[[i]] <- PCFi if(is.fv(PCFi)) Y$default.formula[[i]] <- formula(PCFi) } return(Y) } pcf.fv <- local({ callmatched <- function(fun, argue) { formalnames <- names(formals(fun)) formalnames <- formalnames[formalnames != "..."] do.call(fun, argue[names(argue) %in% formalnames]) } pcf.fv <- function(X, ..., method="c") { verifyclass(X, "fv") # extract r and the recommended estimate of K r <- with(X, .x) K <- with(X, .y) alim <- attr(X, "alim") # remove NA's ok <- !is.na(K) K <- K[ok] r <- r[ok] switch(method, a = { ss <- callmatched(smooth.spline, list(x=r, y=K, ...)) dK <- predict(ss, r, deriv=1)$y g <- dK/(2 * pi * r) }, b = { y <- K/(2 * pi * r) y[!is.finite(y)] <- 0 ss <- callmatched(smooth.spline, list(x=r, y=y, ...)) dy <- predict(ss, r, deriv=1)$y g <- dy + y/r }, c = { z <- K/(pi * r^2) z[!is.finite(z)] <- 1 ss <- callmatched(smooth.spline, list(x=r, y=z, ...)) dz <- predict(ss, r, deriv=1)$y g <- (r/2) * dz + z }, d = { z <- sqrt(K) z[!is.finite(z)] <- 0 ss <- callmatched(smooth.spline, list(x=r, y=z, ...)) dz <- predict(ss, r, deriv=1)$y g <- z * dz/(pi * r) }, stop(paste("unrecognised method", sQuote(method))) ) # pack result into "fv" data frame Z <- fv(data.frame(r=r, theo=rep.int(1, length(r)), pcf=g), "r", substitute(g(r), NULL), "pcf", . ~ r, alim, c("r", "%s[pois](r)", "%s(r)"), c("distance argument r", "theoretical Poisson value of %s", "estimate of %s by numerical differentiation"), fname="g") unitname(Z) <- unitname(X) return(Z) } pcf.fv }) spatstat/R/addvar.R0000644000176200001440000003065313333543254013716 0ustar liggesusers# # addvar.R # # added variable plot # # $Revision: 1.11 $ $Date: 2016/10/23 10:36:58 $ # addvar <- function(model, covariate, ..., subregion=NULL, bw="nrd0", adjust=1, from=NULL, to=NULL, n=512, bw.input = c("points", "quad"), bw.restrict = FALSE, covname, crosscheck=FALSE) { if(missing(covname)) covname <- sensiblevarname(deparse(substitute(covariate)), "X") callstring <- paste(deparse(sys.call()), collapse = "") if(is.marked(model)) stop("Sorry, this is not yet implemented for marked models") if(is.null(adjust)) adjust <- 1 bw.input <- match.arg(bw.input) # validate model stopifnot(is.ppm(model)) if(is.null(getglmfit(model))) model <- update(model, forcefit=TRUE) modelcall <- model$callstring if(is.null(modelcall)) modelcall <- model$call # extract spatial locations Q <- quad.ppm(model) # datapoints <- Q$data quadpoints <- union.quad(Q) Z <- is.data(Q) wts <- w.quad(Q) nQ <- n.quad(Q) # fitted intensity lam <- fitted(model, type="trend") # subset of quadrature points used to fit model subQset <- getglmsubset(model) if(is.null(subQset)) subQset <- rep.int(TRUE, nQ) # restriction to subregion insubregion <- if(!is.null(subregion)) { inside.owin(quadpoints, w=subregion) } else rep.int(TRUE, nQ) ################################################################ # Pearson residuals from point process model yr <- residuals(model, type="Pearson") yresid <- with(yr, "increment") # averaged (then sum with weight 'wts') yresid <- yresid/wts ################################################################# # Covariates # # covariate data frame df <- getglmdata(model) if(!all(c("x", "y") %in% names(df))) { xy <- as.data.frame(quadpoints) notxy <- !(colnames(df) %in% c("x", "y")) other <- df[, notxy] df <- cbind(xy, other) } # avail.covars <- names(df) # covariates used in model used.covars <- model.covariates(model) fitted.covars <- model.covariates(model, offset=FALSE) # ################################################################# # identify the covariate # if(!is.character(covariate)) { # Covariate is some kind of data, treated as external covariate if(covname %in% fitted.covars) stop(paste("covariate named", dQuote(covname), "is already used in model")) covvalues <- evalCovariate(covariate, quadpoints) # validate covvalues if(is.null(covvalues)) stop("Unable to extract covariate values") else if(length(covvalues) != npoints(quadpoints)) stop(paste("Internal error: number of covariate values =", length(covvalues), "!=", npoints(quadpoints), "= number of quadrature points")) # tack onto data frame covdf <- data.frame(covvalues) names(covdf) <- covname df <- cbind(df, covdf) } else { # Argument is name of covariate covname <- covariate if(length(covname) > 1) stop("Must specify only one covariate") # if(covname %in% fitted.covars) stop(paste("covariate", dQuote(covname), "already used in model")) # if(!(covname %in% avail.covars)) stop(paste("covariate", dQuote(covname), "not available")) # covvalues <- df[, covname] } ################################################################ # Pearson residuals from weighted linear regression of new covariate on others rhs <- formula(model) fo <- as.formula(paste(covname, paste(rhs, collapse=" "))) fit <- lm(fo, data=df, weights=lam * wts) xresid <- residuals(fit, type="pearson")/sqrt(wts) if(crosscheck) { message("Cross-checking...") X <- model.matrix(fo, data=df) V <- diag(lam * wts) sqrtV <- diag(sqrt(lam * wts)) Info <- t(X) %*% V %*% X H <- sqrtV %*% X %*% solve(Info) %*% t(X) %*% sqrtV nQ <- length(lam) Id <- diag(1, nQ, nQ) xresid.pearson <- (Id - H) %*% sqrtV %*% covvalues xresid.correct <- xresid.pearson/sqrt(wts) abserr <- max(abs(xresid - xresid.correct), na.rm=TRUE) relerr <- abserr/diff(range(xresid.correct, finite=TRUE)) if(is.finite(relerr) && relerr > 0.01) { warning("Large relative error in residual computation") } message("Done.") } # experiment suggests residuals(fit, "pearson") == xresid.correct # and residuals(fit) equivalent to # covvalues - X %*% solve(t(X) %*% V %*% X) %*% t(X) %*% V %*% covvalues ################################################################# # check for NA's etc # locations that must have finite values operative <- if(bw.restrict) insubregion & subQset else subQset nbg <- !is.finite(xresid) | !is.finite(yresid) if(any(offending <- nbg & operative)) { warning(paste(sum(offending), "out of", length(offending), "covariate values discarded because", ngettext(sum(offending), "it is", "they are"), "NA or infinite")) } ################################################################# # Restrict data to 'operative' points # with finite values ok <- !nbg & operative Q <- Q[ok] xresid <- xresid[ok] yresid <- yresid[ok] covvalues <- covvalues[ok] df <- df[ok, ] lam <- lam[ok] wts <- wts[ok] Z <- Z[ok] insubregion <- insubregion[ok] #################################################### # assemble data for smoothing xx <- xresid yy <- yresid ww <- wts if(makefrom <- is.null(from)) from <- min(xresid) if(maketo <- is.null(to)) to <- max(xresid) #################################################### # determine smoothing bandwidth # from 'operative' data switch(bw.input, quad = { # bandwidth selection from covariate values at all quadrature points numer <- unnormdensity(xx, weights=yy * ww, bw=bw, adjust=adjust, n=n,from=from,to=to, ...) sigma <- numer$bw }, points= { # bandwidth selection from covariate values at data points fake <- unnormdensity(xx[Z], weights=1/lam[Z], bw=bw, adjust=adjust, n=n,from=from,to=to, ...) sigma <- fake$bw numer <- unnormdensity(xx, weights=yy * ww, bw=sigma, adjust=1, n=n,from=from,to=to, ...) }) #################################################### # Restrict data and recompute numerator if required if(!is.null(subregion) && !bw.restrict) { # Bandwidth was computed on all data # Restrict to subregion and recompute numerator xx <- xx[insubregion] yy <- yy[insubregion] ww <- ww[insubregion] lam <- lam[insubregion] Z <- Z[insubregion] if(makefrom) from <- min(xx) if(maketo) to <- max(xx) numer <- unnormdensity(xx, weights=yy * ww, bw=sigma, adjust=1, n=n,from=from,to=to, ...) } #################################################### # Compute denominator denom <- unnormdensity(xx,weights=ww, bw=sigma, adjust=1, n=n,from=from,to=to, ...) #################################################### # Determine recommended plot range xr <- range(xresid[Z], finite=TRUE) alim <- xr + 0.1 * diff(xr) * c(-1,1) alim <- intersect.ranges(alim, c(from, to)) #################################################### # Compute terms interpolate <- function(x,y) { if(inherits(x, "density") && missing(y)) approxfun(x$x, x$y, rule=2) else approxfun(x, y, rule=2) } numfun <- interpolate(numer) denfun <- interpolate(denom) xxx <- numer$x ratio <- function(y, x) { ifelseXB(x != 0, y/x, NA) } yyy <- ratio(numfun(xxx), denfun(xxx)) # Null variance estimation # smooth with weight 1 and smaller bandwidth tau <- sigma/sqrt(2) varnumer <- unnormdensity(xx,weights=ww, bw=tau,adjust=1, n=n,from=from,to=to, ...) varnumfun <- interpolate(varnumer) vvv <- ratio(varnumfun(xxx), 2 * sigma * sqrt(pi) * denfun(xxx)^2) safesqrt <- function(x) { ok <- is.finite(x) & (x >= 0) y <- rep.int(NA_real_, length(x)) y[ok] <- sqrt(x[ok]) return(y) } twosd <- 2 * safesqrt(vvv) # pack into fv object rslt <- data.frame(rcov=xxx, rpts=yyy, theo=0, var=vvv, hi=twosd, lo=-twosd) nuc <- length(used.covars) if(nuc == 0) { given <- givenlab <- 1 } else if(nuc == 1) { given <- givenlab <- used.covars } else { given <- commasep(used.covars, ", ") givenlab <- paste("list", paren(given)) } given <- paste("|", given) xlab <- sprintf("r(paste(%s, '|', %s))", covname, givenlab) ylab <- sprintf("r(paste(points, '|', %s))", givenlab) yexpr <- parse(text=ylab)[[1L]] desc <- c(paste("Pearson residual of covariate", covname, given), paste("Smoothed Pearson residual of point process", given), "Null expected value of point process residual", "Null variance of point process residual", "Upper limit of pointwise 5%% significance band", "Lower limit of pointwise 5%% significance band") rslt <- fv(rslt, argu="rcov", ylab=yexpr, valu="rpts", fmla= (. ~ rcov), alim=alim, labl=c(xlab, "%s", "0", "bold(var) ~ %s", "%s[hi]", "%s[lo]"), desc=desc, fname=ylab) attr(rslt, "dotnames") <- c("rpts", "theo", "hi", "lo") # data associated with quadrature points reserved <- (substr(colnames(df), 1L, 4L) == ".mpl") isxy <- colnames(df) %in% c("x", "y") dfpublic <- cbind(df[, !(reserved | isxy)], data.frame(xresid, yresid)) attr(rslt, "spatial") <- union.quad(Q) %mark% dfpublic # auxiliary data attr(rslt, "stuff") <- list(covname = covname, xresid = xresid, yresid = yresid, covvalues = covvalues, wts = wts, bw = bw, adjust = adjust, sigma = sigma, used.covars = used.covars, modelcall = modelcall, callstring = callstring, xlim = c(from, to), xlab = xlab, ylab = ylab, lmcoef = coef(fit), bw.input = bw.input, bw.restrict = bw.restrict, restricted = !is.null(subregion)) # finish class(rslt) <- c("addvar", class(rslt)) return(rslt) } print.addvar <- function(x, ...) { cat("Added variable plot diagnostic (class addvar)\n") s <- attr(x, "stuff") mc <- paste(s$modelcall, collapse="") cat(paste("for the covariate", dQuote(s$covname), "for the fitted model:", if(nchar(mc) <= 30) "" else "\n\t", mc, "\n\n")) if(identical(s$restricted, TRUE)) cat("\t--Diagnostic computed for a subregion--\n") cat(paste("Call:", s$callstring, "\n")) cat(paste("Actual smoothing bandwidth sigma =", signif(s$sigma,5), "\n\n")) NextMethod("print") } plot.addvar <- function(x, ..., do.points=FALSE) { xname <- deparse(substitute(x)) s <- attr(x, "stuff") # covname <- s$covname xresid <- s$xresid yresid <- s$yresid # adjust y limits if intending to plot points as well ylimcover <- if(do.points) range(yresid, finite=TRUE) else NULL # do.call(plot.fv, resolve.defaults(list(x), list(...), list(main=xname, shade=c("hi", "lo"), legend=FALSE, ylim.covers=ylimcover))) # plot points if(do.points) do.call(points, resolve.defaults(list(x=xresid, y=yresid), list(...), list(pch=3, cex=0.5))) return(invisible(x)) } spatstat/R/summary.quad.R0000644000176200001440000001142313333543255015076 0ustar liggesusers# # summary.quad.R # # summary() method for class "quad" # # $Revision: 1.12 $ $Date: 2018/07/06 02:05:31 $ # summary.quad <- local({ sumriz <- function(ww) { if(length(ww) > 0) return(list(range=range(ww), sum=sum(ww))) else return(NULL) } summary.quad <- function(object, ..., checkdup=FALSE) { verifyclass(object, "quad") X <- object$data D <- object$dummy s <- list( logi = inherits(object, "logiquad"), data = summary.ppp(X, checkdup=checkdup), dummy = summary.ppp(D, checkdup=checkdup), param = object$param) ## make description of dummy point arrangement dpar <- object$param$dummy eps.given <- dpar$orig$eps # could be NULL eps.actual <- NULL if(is.null(dpar)) { descrip <- "(provided manually)" } else if(is.character(dmethod <- dpar$method)) { descrip <- dmethod } else if(identical(dpar$quasi, TRUE)) { descrip <- paste(npoints(D), "quasirandom dummy points", "plus 4 corner points") eps.actual <- 1/(2 * sqrt(intensity(D))) } else if(!is.null(nd <- dpar$nd)) { nd <- ensure2vector(nd) eps.actual <- unique(sidelengths(Frame(D))/nd) if(identical(dpar$random, TRUE)) { descrip <- paste("systematic random dummy points in", nd[1], "x", nd[2], "grid", "plus 4 corner points") } else { descrip <- paste(nd[1], "x", nd[2], "grid of dummy points, plus 4 corner points") } } else descrip <- "(rule for creating dummy points not understood)" if(!is.null(eps.actual)) { uD <- unitname(D) s$resolution <- numberwithunit(eps.actual, uD) if(!is.null(eps.given)) { descrip2 <- paste("dummy spacing:", format(eps.given %unit% uD), "requested,", format(eps.actual %unit% uD), "actual") } else { descrip2 <- paste("dummy spacing:", format(eps.actual %unit% uD)) } descrip <- c(descrip, descrip2) } s$descrip <- descrip w <- object$w Z <- is.data(object) s$w <- list(all = sumriz(w), data = sumriz(w[Z]), dummy = sumriz(w[!Z])) class(s) <- "summary.quad" return(s) } summary.quad }) print.summary.quad <- local({ summariseweights <- function(ww, blah, dp=3) { cat(paste(blah, ":\n\t", sep="")) if(is.null(ww)) { cat("(None)\n") return() } splat(paste0("range: ", "[", paste(signif(ww$range, digits=dp), collapse=", "), "]\t", "total: ", signif(ww$sum, digits=dp))) } print.summary.quad <- function(x, ..., dp=3) { splat("Quadrature scheme (Berman-Turner) = data + dummy + weights") pa <- x$param if(is.null(pa)) splat("created by an unknown function.") parbreak() splat("Data pattern:") print(x$data, dp=dp) parbreak() splat("Dummy quadrature points:") ## How they were computed splat(x$descrip, indent=5) parbreak() ## What arguments were given if(!is.null(orig <- pa$dummy$orig)) splat("Original dummy parameters:", paste0(names(orig), "=", orig, collapse=", ")) ## Description of the dummy points print(x$dummy, dp=dp) splat("Quadrature weights:") ## How they were computed if(!is.null(pa)) { wpar <- pa$weight if(is.null(wpar)) splat("(values provided manually)", indent=5) else if(is.character(wmethod <- wpar$method)) { switch(wmethod, grid = { splat("(counting weights based on", wpar$ntile[1], "x", wpar$ntile[2], "array of rectangular tiles)", indent=5) }, dirichlet = { splat("(Dirichlet tile areas, computed", if(wpar$exact) "exactly)" else "by pixel approximation)", indent=5) }, splat(wmethod, indent=5) ) } else splat("(rule for creating dummy points not understood)") } if(waxlyrical('extras')) { summariseweights(x$w$all, "All weights", dp) summariseweights(x$w$data, "Weights on data points", dp) summariseweights(x$w$dummy, "Weights on dummy points", dp) } return(invisible(NULL)) } print.summary.quad }) print.quad <- function(x, ...) { logi <- inherits(x, "logiquad") splat("Quadrature scheme", paren(if(logi) "logistic" else "Berman-Turner")) splat(x$data$n, "data points,", x$dummy$n, "dummy points") if(waxlyrical('extras')) { sx <- summary(x) splat(sx$descrip, indent=5) } splat("Total weight", sum(x$w), indent=5) return(invisible(NULL)) } spatstat/R/exactPdt.R0000644000176200001440000000430713333543255014227 0ustar liggesusers# # exactPdt.R # R function exactPdt() for exact distance transform of pixel image # # $Revision: 4.17 $ $Date: 2017/06/05 10:31:58 $ # "exactPdt"<- function(w) { verifyclass(w, "owin") if(w$type != "mask") stop(paste("Input must be a window of type", sQuote("mask"))) # nr <- w$dim[1L] nc <- w$dim[2L] # input image will be padded out with a margin of width 2 on all sides mr <- mc <- 2L # full dimensions of padded image Nnr <- nr + 2 * mr Nnc <- nc + 2 * mc N <- Nnr * Nnc # output image (subset): rows & columns (R indexing) rmin <- mr + 1L rmax <- Nnr - mr cmin <- mc + 1L cmax <- Nnc - mc # do padding x <- matrix(FALSE, nrow=Nnr, ncol=Nnc) x[rmin:rmax, cmin:cmax] <- w$m # res <- .C("ps_exact_dt_R", as.double(w$xrange[1L]), as.double(w$yrange[1L]), as.double(w$xrange[2L]), as.double(w$yrange[2L]), nr = as.integer(nr), nc = as.integer(nc), mr = as.integer(mr), mc = as.integer(mc), inp = as.integer(t(x)), distances = as.double (double(N)), rows = as.integer(integer(N)), cols = as.integer(integer(N)), boundary = as.double (double(N)), PACKAGE = "spatstat") dist <- matrix(res$distances, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] rows <- matrix(res$rows, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] cols <- matrix(res$cols, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] bdist<- matrix(res$boundary, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] # convert from C to R indexing rows <- rows + 1L - as.integer(mr) cols <- cols + 1L - as.integer(mc) return(list(d=dist,row=rows,col=cols,b=bdist, w=w)) } project2set <- function(X, W, ...) { stopifnot(is.ppp(X)) W <- as.mask(W, ...) eW <- exactPdt(W) ## grid location of X XX <- nearest.raster.point(X$x, X$y, W) ijX <- cbind(XX$row, XX$col) ## look up values of 'eW' at this location iY <- eW$row[ijX] jY <- eW$col[ijX] ## convert to spatial coordinates Y <- ppp(W$xcol[jY], W$yrow[iY], window=W) return(Y) } spatstat/R/ord.family.R0000644000176200001440000001020213333543255014506 0ustar liggesusers# # # ord.family.S # # $Revision: 1.17 $ $Date: 2015/10/21 09:06:57 $ # # The Ord model (family of point process models) # # ord.family: object of class 'isf' defining Ord model structure # # # ------------------------------------------------------------------- # ord.family <- list( name = "ord", print = function(self) { cat("Ord model family\n") }, eval = function(X, U, EqualPairs, pot, pars, ...) { # # This auxiliary function is not meant to be called by the user. # It computes the distances between points, # evaluates the pair potential and applies edge corrections. # # Arguments: # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function function(d, p) # pars auxiliary parameters for pot list(......) # ... IGNORED # # Value: # matrix of values of the potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # # Note: # The potential function 'pot' will be called as # pot(M, pars) where M is a vector of tile areas. # It must return a vector of the same length as M # or a matrix with number of rows equal to the length of M ########################################################################## nX <- npoints(X) nU <- length(U$x) # number of data + dummy points seqX <- seq_len(nX) seqU <- seq_len(nU) # determine which points in the combined list are data points if(length(EqualPairs) > 0) is.data <- seqU %in% EqualPairs[,2] else is.data <- rep.int(FALSE, nU) ############################################################################# # First compute Dirichlet tessellation of data # and its total potential (which could be vector-valued) ############################################################################# marks(X) <- NULL Wdata <- dirichletWeights(X) # sic - these are the tile areas. Pdata <- pot(Wdata, pars) summa <- function(P) { if(is.matrix(P)) matrowsum(P) else if(is.vector(P) || length(dim(P))==1 ) sum(P) else stop("Don't know how to take row sums of this object") } total.data.potential <- summa(Pdata) # Initialise V dimpot <- dim(Pdata)[-1] # dimension of each value of the potential function # (= numeric(0) if potential is a scalar) dimV <- c(nU, dimpot) if(length(dimV) == 1) dimV <- c(dimV, 1) V <- array(0, dim=dimV) rowV <- array(seqU, dim=dimV) #################### Next, evaluate V for the data points. ############### # For each data point, compute Dirichlet tessellation # of the data with this point removed. # Compute difference of total potential. ############################################################################# for(j in seq_len(nX)) { # Dirichlet tessellation of data without point j Wminus <- dirichletWeights(X[-j]) # regressor is the difference in total potential V[rowV == j] <- total.data.potential - summa(pot(Wminus, pars)) } #################### Next, evaluate V for the dummy points ################ # For each dummy point, compute Dirichlet tessellation # of (data points together with this dummy point) only. # Take difference of total potential. ############################################################################# for(j in seqU[!is.data]) { Xplus <- superimpose(X, list(x=U$x[j], y=U$y[j]), W=X$window) # compute Dirichlet tessellation (of these points only!) Wplus <- dirichletWeights(Xplus) # regressor is difference in total potential V[rowV == j] <- summa(pot(Wplus, pars)) - total.data.potential } cat("dim(V) = \n") print(dim(V)) return(V) } ######### end of function $eval ) ######### end of list class(ord.family) <- "isf" spatstat/R/nnclean.R0000644000176200001440000001732213333543255014072 0ustar liggesusers# # nnclean.R # # Nearest-neighbour clutter removal # # Adapted from statlib file NNclean.q # Authors: Simon Byers and Adrian Raftery # # $Revision: 1.16 $ $Date: 2016/02/11 10:17:12 $ # nnclean <- function(X, k, ...) { UseMethod("nnclean") } nnclean.pp3 <- function(X, k, ..., convergence = 0.001, plothist = FALSE, verbose=TRUE, maxit=50) { # Adapted from statlib file NNclean.q # Authors: Simon Byers and Adrian Raftery # Adapted for spatstat by Adrian Baddeley Xname <- short.deparse(substitute(X)) stopifnot(inherits(X, "pp3")) validposint(k, "nnclean.pp3") kthNND <- nndist(X, k=k) # apply classification algorithm em <- do.call(nncleanEngine, resolve.defaults(list(kthNND, k=k), list(...), list(d=3, tol=convergence, plothist=plothist, verbose=verbose, maxit=maxit, Xname=Xname))) # tack results onto point pattern as marks pp <- em$probs zz <- factor(em$z, levels=c(0,1)) levels(zz) <- c("noise", "feature") mm <- hyperframe(prob=pp, label=zz) marks(X) <- cbind(marks(X), mm) attr(X, "theta") <- em[c("lambda1", "lambda2", "p")] attr(X, "info") <- em[c("d", "niter", "maxit", "converged")] attr(X, "hist") <- em$hist return(X) } nnclean.ppp <- function(X, k, ..., edge.correct = FALSE, wrap = 0.1, convergence = 0.001, plothist = FALSE, verbose=TRUE, maxit=50) { # Adapted from statlib file NNclean.q # Authors: Simon Byers and Adrian Raftery # Adapted for spatstat by Adrian Baddeley Xname <- short.deparse(substitute(X)) validposint(k, "nnclean.ppp") if(!edge.correct) { # compute vector of k-th nearest neighbour distances kthNND <- nndist(X, k=k) } else { # replicate data periodically # (ensuring original points are listed first) Xbox <- X[as.rectangle(X)] Xpand <- periodify(Xbox, ix=c(0,-1,1), iy=c(0,-1,1), check=FALSE) # trim to margin W <- expand.owin(X$window, (1+2*wrap)^2) Xpand <- Xpand[W] kthNND <- nndist(Xpand, k=k) } # apply classification algorithm em <- do.call(nncleanEngine, resolve.defaults(list(kthNND, k=k), list(...), list(d=2, tol=convergence, plothist=plothist, verbose=verbose, maxit=maxit, Xname=Xname))) # extract results pp <- em$probs zz <- em$z zz <- factor(zz, levels=c(0,1)) levels(zz) <- c("noise", "feature") df <- data.frame(class=zz,prob=pp) if(edge.correct) { # trim back to original point pattern df <- df[seq_len(X$n), ] } # tack on marx <- marks(X, dfok=TRUE) if(is.null(marx)) marks(X, dfok=TRUE) <- df else marks(X, dfok=TRUE) <- cbind(df, marx) attr(X, "theta") <- em[c("lambda1", "lambda2", "p")] attr(X, "info") <- em[c("d", "niter", "maxit", "converged")] attr(X, "hist") <- em$hist return(X) } nncleanEngine <- function(kthNND, k, d, ..., tol = 0.001, maxit = 50, plothist = FALSE, lineargs = list(), verbose=TRUE, Xname="X") { ## Adapted from statlib file NNclean.q ## Authors: Simon Byers and Adrian Raftery ## Adapted for spatstat by Adrian Baddeley n <- length(kthNND) ## Undocumented extension by Adrian Baddeley 2014 ## Allow different dimensions in feature and noise. ## d[1] is cluster dimension. d <- ensure2vector(d) alpha.d <- (2. * pi^(d/2.))/(d * gamma(d/2.)) # raise to power d for efficiency kNNDpowd1 <- kthNND^(d[1]) kNNDpowd2 <- kthNND^(d[2]) # # Now use kthNND in E-M algorithm # First set up starting guesses. # # probs <- numeric(n) thresh <- (min(kthNND) + diff(range(kthNND))/3.) high <- (kthNND > thresh) delta <- as.integer(high) p <- 0.5 lambda1 <- k/(alpha.d[1] * mean(kNNDpowd1[!high])) lambda2 <- k/(alpha.d[2] * mean(kNNDpowd2[ high])) loglik.old <- 0. loglik.new <- 1. # # Iterator starts here, # Z <- !kthNND niter <- 0 while(abs(loglik.new - loglik.old)/(1 + abs(loglik.new)) > tol) { if(niter >= maxit) { warning(paste("E-M algorithm failed to converge in", maxit, ngettext(maxit, "iteration", "iterations")), call.=FALSE) break } niter <- niter + 1 # E - step f1 <- dknn(kthNND[!Z], lambda=lambda1, k = k, d = d[1]) f2 <- dknn(kthNND[!Z], lambda=lambda2, k = k, d = d[2]) delta[!Z] <- (p * f1)/(p * f1 + (1 - p) * f2) delta[Z] <- 0 # M - step sumdelta <- sum(delta) negdelta <- 1. - delta p <- sumdelta/n lambda1 <- (k * sumdelta)/(alpha.d[1] * sum(kNNDpowd1 * delta)) lambda2 <- (k * (n - sumdelta))/(alpha.d[2] * sum(kNNDpowd2 * negdelta)) # evaluate marginal loglikelihood loglik.old <- loglik.new loglik.new <- sum( - p * lambda1 * alpha.d[1] * (kNNDpowd1 * delta) - (1. - p) * lambda2 * alpha.d[2] * (kNNDpowd2 * negdelta) + delta * k * log(lambda1 * alpha.d[1]) + negdelta * k * log(lambda2 * alpha.d[2])) if(verbose) cat(paste("Iteration", niter, "\tlogLik =", loglik.new, "\tp =", signif(p,4), "\n")) } if(plothist) { dotargs <- list(...) if(spatstat.options('monochrome')) dotargs <- col.args.to.grey(dotargs) ## compute plot limits to include both histogram and density xlim <- c(0, max(kthNND)) H <- do.call(hist, resolve.defaults(list(kthNND, plot=FALSE, warn.unused=FALSE), dotargs, list(nclass=40))) barheights <- H$density support <- seq(from=xlim[1], to=xlim[2], length.out = 200) fittedy <- p * dknn(support, lambda=lambda1, k = k, d = d[1]) + (1 - p) * dknn(support, lambda=lambda2, k = k, d = d[2]) ylim <- range(c(0, barheights, fittedy)) xlab <- paste("Distance to", ordinal(k), "nearest neighbour") ## now plot it (unless overridden by plot=FALSE) reallyplot <- resolve.1.default("plot", list(...), list(plot=TRUE)) H <- do.call(hist, resolve.defaults(list(kthNND, probability=TRUE), dotargs, list(plot=TRUE, warn.unused=reallyplot, nclass=40, xlim = xlim, ylim=ylim, xlab = xlab, ylab = "Probability density", axes = TRUE, main=""))) H$xname <- xlab if(reallyplot) { box() lineargs <- resolve.defaults(lineargs, list(col="green", lwd=2)) if(spatstat.options("monochrome")) lineargs <- col.args.to.grey(lineargs) do.call(lines, append(list(x=support, y=fittedy), lineargs)) } } # delta1 <- dknn(kthNND[!Z], lambda=lambda1, k = k, d = d[1]) delta2 <- dknn(kthNND[!Z], lambda=lambda2, k = k, d = d[2]) probs[!Z] <- delta1/(delta1 + delta2) probs[Z] <- 1 # if(verbose) { cat("Estimated parameters:\n") cat(paste("p [cluster] =", signif(p, 5), "\n")) cat(paste("lambda [cluster] =", signif(lambda1, 5), "\n")) cat(paste("lambda [noise] =", signif(lambda2, 5), "\n")) } # # z will be the classifications. 1= in cluster. 0= in noise. # return(list(z = round(probs), probs = probs, lambda1 = lambda1, lambda2 = lambda2, p = p, kthNND = kthNND, d=d, n=n, k=k, niter = niter, maxit = maxit, converged = (niter >= maxit), hist=if(plothist) H else NULL)) } spatstat/R/psp2pix.R0000644000176200001440000000673213333543255014064 0ustar liggesusers# # psp2pix.R # # $Revision: 1.12 $ $Date: 2017/11/15 07:21:21 $ # # as.mask.psp <- function(x, W=NULL, ...) { L <- as.psp(x) if(is.null(W)) W <- as.owin(L) else W <- as.owin(W) W <- do.call.matched(as.mask, resolve.defaults(list(...), list(w=W))) ends <- L$ends nseg <- nrow(ends) if(nseg == 0) { # empty W$m[] <- FALSE return(W) } x0 <- (ends$x0 - W$xrange[1])/W$xstep x1 <- (ends$x1 - W$xrange[1])/W$xstep y0 <- (ends$y0 - W$yrange[1])/W$ystep y1 <- (ends$y1 - W$yrange[1])/W$ystep nr <- W$dim[1] nc <- W$dim[2] zz <- .C("seg2pixI", ns=as.integer(nseg), x0=as.double(x0), y0=as.double(y0), x1=as.double(x1), y1=as.double(y1), nx=as.integer(nc), ny=as.integer(nr), out=as.integer(integer(nr * nc)), PACKAGE = "spatstat") mm <- matrix(zz$out, nr, nc) # intersect with existing window W$m <- W$m & mm W } pixellate.psp <- function(x, W=NULL, ..., weights=NULL, what=c("length", "number"), DivideByPixelArea=FALSE) { L <- as.psp(x) what <- match.arg(what) if(is.null(W)) W <- as.owin(L) else W <- as.owin(W) W <- do.call.matched(as.mask, resolve.defaults(list(...), list(w=W))) Z <- as.im(W) ends <- L$ends nseg <- nrow(ends) if(nseg == 0) { # empty Z$v[] <- 0 return(Z) } if(is.null(weights)) weights <- rep.int(1, nseg) else { if(!is.numeric(weights)) stop("weights must be numeric") if(anyNA(weights)) stop("weights must not be NA") if(!all(is.finite(weights))) stop("weights must not be infinite") if(length(weights) == 1) weights <- rep.int(weights, nseg) else if(length(weights) != nseg) stop(paste("weights vector has length", length(weights), "but there are", nseg, "line segments")) } x0 <- (ends$x0 - Z$xrange[1])/Z$xstep x1 <- (ends$x1 - Z$xrange[1])/Z$xstep y0 <- (ends$y0 - Z$yrange[1])/Z$ystep y1 <- (ends$y1 - Z$yrange[1])/Z$ystep nr <- Z$dim[1] nc <- Z$dim[2] switch(what, length = { zz <- .C("seg2pixL", ns=as.integer(nseg), x0=as.double(x0), y0=as.double(y0), x1=as.double(x1), y1=as.double(y1), weights=as.double(weights), pixwidth=as.double(Z$xstep), pixheight=as.double(Z$ystep), nx=as.integer(nc), ny=as.integer(nr), out=as.double(numeric(nr * nc)), PACKAGE = "spatstat") }, number = { zz <- .C("seg2pixN", ns=as.integer(nseg), x0=as.double(x0), y0=as.double(y0), x1=as.double(x1), y1=as.double(y1), w=as.double(weights), nx=as.integer(nc), ny=as.integer(nr), out=as.double(numeric(nr * nc)), PACKAGE = "spatstat") }) mm <- matrix(zz$out, nr, nc) ## intersect with existing window mm[is.na(Z$v)] <- NA ## if(DivideByPixelArea) { pixelarea <- W$xstep * W$ystep mm <- mm/pixelarea } ## pack up Z$v <- mm return(Z) } spatstat/R/linearpcf.R0000644000176200001440000001274513606253516014424 0ustar liggesusers# # linearpcf.R # # $Revision: 1.29 $ $Date: 2020/01/11 04:23:16 $ # # pair correlation function for point pattern on linear network # # linearpcf <- function(X, r=NULL, ..., correction="Ang", ratio=FALSE) { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern np <- npoints(X) lengthL <- volume(domain(X)) # compute denom <- np * (np - 1)/lengthL g <- linearpcfengine(X, r=r, ..., denom=denom, correction=correction, ratio=ratio) # extract bandwidth bw <- attr(g, "bw") correction <- attr(g, "correction") # set appropriate y axis label switch(correction, Ang = { ylab <- quote(g[L](r)) fname <- c("g", "L") }, none = { ylab <- quote(g[net](r)) fname <- c("g", "net") }) g <- rebadge.fv(g, new.ylab=ylab, new.fname=fname) # reattach bandwidth attr(g, "bw") <- bw attr(g, "correction") <- correction return(g) } linearpcfinhom <- function(X, lambda=NULL, r=NULL, ..., correction="Ang", normalise=TRUE, normpower=1, update=TRUE, leaveoneout=TRUE, ratio=FALSE) { stopifnot(inherits(X, "lpp")) loo.given <- !missing(leaveoneout) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) if(is.null(lambda)) linearpcf(X, r=r, ..., correction=correction, ratio=ratio) if(normalise) { check.1.real(normpower) stopifnot(normpower >= 1) } # extract info about pattern lengthL <- volume(domain(X)) # lambdaX <- getlambda.lpp(lambda, X, ..., update=update, leaveoneout=leaveoneout, loo.given=loo.given, lambdaname="lambda") # invlam <- 1/lambdaX invlam2 <- outer(invlam, invlam, "*") denom <- if(!normalise) lengthL else if(normpower == 1) sum(invlam) else lengthL * (sum(invlam)/lengthL)^normpower g <- linearpcfengine(X, ..., r=r, reweight=invlam2, denom=denom, correction=correction, ratio=ratio) # extract bandwidth bw <- attr(g, "bw") correction <- attr(g, "correction") # set appropriate y axis label switch(correction, Ang = { ylab <- quote(g[L, inhom](r)) fname <- c("g", "list(L, inhom)") }, none = { ylab <- quote(g[net, inhom](r)) fname <- c("g", "list(net, inhom)") }) g <- rebadge.fv(g, new.fname=fname, new.ylab=ylab) # reattach bandwidth attr(g, "bw") <- bw attr(g, "correction") <- correction attr(g, "dangerous") <- attr(lambdaX, "dangerous") return(g) } linearpcfengine <- function(X, ..., r=NULL, reweight=NULL, denom=1, correction="Ang", ratio=FALSE) { # ensure distance information is present X <- as.lpp(X, sparse=FALSE) # extract info about pattern np <- npoints(X) # extract linear network L <- domain(X) W <- Window(L) # determine r values rmaxdefault <- 0.98 * boundingradius(L) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # type <- if(correction == "Ang") "L" else "net" fname <- c("g", type) ylab <- substitute(g[type](r), list(type=type)) # if(np < 2) { # no pairs to count: return zero function zeroes <- numeric(length(r)) df <- data.frame(r = r, est = zeroes) g <- ratfv(df, NULL, 0, "r", ylab, "est", . ~ r, c(0, rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname = fname, ratio=ratio) if(correction == "Ang") { # tack on theoretical value g <- bind.ratfv(g, quotient = data.frame(theo=r), denominator = 0, labl = makefvlabel(NULL, NULL, fname, "theo"), desc = "theoretical Poisson %s", ratio=ratio) } attr(g, "correction") <- correction return(g) } # compute pairwise distances D <- pairdist(X) #--- compile into pcf --- if(correction == "none" && is.null(reweight)) { # no weights (Okabe-Yamada) g <- compilepcf(D, r, denom=denom, fname=fname, ratio=ratio) unitname(g) <- unitname(X) attr(g, "correction") <- correction return(g) } if(correction == "none") { edgewt <- 1 } else { ## inverse m weights (Wei's correction) ## determine tolerance toler <- default.linnet.tolerance(L) ## compute m[i,j] m <- DoCountEnds(X, D, toler) edgewt <- 1/m } # compute pcf wt <- if(!is.null(reweight)) edgewt * reweight else edgewt g <- compilepcf(D, r, weights=wt, denom=denom, ..., fname=fname, ratio=ratio) # extract bandwidth bw <- attr(g, "bw") # tack on theoretical value g <- bind.ratfv(g, quotient = data.frame(theo=rep.int(1,length(r))), denominator = denom, labl = makefvlabel(NULL, NULL, fname, "pois"), desc = "theoretical Poisson %s", ratio = ratio) # tweak unitname(g) <- unitname(X) fvnames(g, ".") <- rev(fvnames(g, ".")) # tack on bandwidth again attr(g, "bw") <- bw attr(g, "correction") <- correction return(g) } spatstat/R/bw.ppl.R0000644000176200001440000000715613544333563013665 0ustar liggesusers#' #' bw.ppl.R #' #' Likelihood cross-validation for kernel smoother of point pattern #' #' bw.ppl class ppp #' bw.lppl class lpp #' #' $Revision: 1.11 $ $Date: 2019/09/30 07:48:05 $ #' bw.ppl <- function(X, ..., srange=NULL, ns=16, sigma=NULL, weights=NULL, shortcut=FALSE, warn=TRUE) { stopifnot(is.ppp(X)) if(!is.null(sigma)) { stopifnot(is.numeric(sigma) && is.vector(sigma)) ns <- length(sigma) } else { if(!is.null(srange)) check.range(srange) else { nnd <- nndist(X) srange <- c(min(nnd[nnd > 0]), diameter(as.owin(X))/2) } sigma <- geomseq(from=srange[1L], to=srange[2L], length.out=ns) } cv <- numeric(ns) if(shortcut) { for(i in 1:ns) { si <- sigma[i] lamx <- density(X, sigma=si, at="points", leaveoneout=TRUE, weights=weights, ...) cv[i] <- sum(log(lamx)) } } else { for(i in 1:ns) { si <- sigma[i] lamx <- density(X, sigma=si, at="points", leaveoneout=TRUE, weights=weights, ...) lam <- density(X, sigma=si, weights=weights, ...) cv[i] <- sum(log(lamx)) - integral.im(lam) } } result <- bw.optim(cv, sigma, iopt=which.max(cv), creator="bw.ppl", criterion="Likelihood Cross-Validation", warnextreme=warn, hargnames="srange", unitname=unitname(X)) return(result) } bw.lppl <- function(X, ..., srange=NULL, ns=16, sigma=NULL, weights=NULL, distance="euclidean", shortcut=FALSE, warn=TRUE) { stopifnot(is.lpp(X)) if(!is.null(sigma)) { stopifnot(is.numeric(sigma) && is.vector(sigma)) ns <- length(sigma) } else { if(!is.null(srange)) check.range(srange) else { dd <- diameter(Frame(X)) ss <- bw.scott.iso(X) srange <- range(c(ss/10, ss*5, dd/5)) } sigma <- geomseq(from=srange[1L], to=srange[2L], length.out=ns) } cv <- numeric(ns) if(shortcut) { #' omit calculation of integral term #' precompute the geometry data lam1 <- density(X, sigma=sigma[1], weights=weights, distance=distance, ..., savecomputed=TRUE) precooked <- attr(lam1, "savedstuff") for(i in 1:ns) { si <- sigma[i] lamx <- density(X, sigma=si, at="points", leaveoneout=TRUE, weights=weights, distance=distance, precomputed=precooked, ...) lamx <- pmax(0, lamx) cv[i] <- sum(log(lamx)) } } else { #' full calculation precooked <- NULL cooking <- TRUE for(i in 1:ns) { si <- sigma[i] lamx <- density(X, sigma=si, at="points", leaveoneout=TRUE, weights=weights, distance=distance, precomputed=precooked, ...) lam <- density(X, sigma=si, weights=weights, distance=distance, precomputed=precooked, savecomputed=cooking, ...) if(cooking) { #' save geometry info for re-use in subsequent iterations precooked <- attr(lam, "savedstuff") cooking <- FALSE } lamx <- pmax(0, lamx) cv[i] <- sum(log(lamx)) - integral(lam) } } result <- bw.optim(cv, sigma, iopt=which.max(cv), creator="bw.lppl", criterion="Likelihood Cross-Validation", warnextreme=warn, hargnames="srange", unitname=unitname(X)) return(result) } spatstat/R/derivfv.R0000644000176200001440000001040013353334550014105 0ustar liggesusers# # derivfv.R # # differentiation for fv objects # # $Revision: 1.7 $ $Date: 2018/09/28 05:12:08 $ # deriv.fv <- local({ derivative <- function(y, r, ...) { ss <- smooth.spline(r, y, ...) predict(ss, r, deriv=1)$y } deriv.fv <- function(expr, which="*", ..., method=c("spline", "numeric"), kinks=NULL, periodic=FALSE, Dperiodic=periodic) { f <- expr method <- match.arg(method) ## select columns ## if(length(which) == 1L && which %in% .Spatstat.FvAbbrev) { if(length(which) == 1L) { if(which == ".x") stop("Cannot smooth the function argument") which <- fvnames(f, which) } if(any(nbg <- !(which %in% names(f)))) stop(paste("Unrecognised column", ngettext(sum(nbg), "name", "names"), commasep(sQuote(which[nbg])), "in argument", sQuote("which"))) relevant <- names(f) %in% which ## get rname <- fvnames(f, ".x") df <- as.data.frame(f) rpos <- which(colnames(df) == rname) rvals <- df[,rpos] yvals <- df[,relevant,drop=FALSE] nr <- length(rvals) ## if(Dperiodic) { ## Derivative should be periodic ## Recycle data to imitate periodicity DR <- diff(range(rvals)) rvals <- c(rvals[-nr] - DR, rvals, rvals[-1L] + DR) yleft <- yvals[-nr, , drop=FALSE] yright <- yvals[-1L, , drop=FALSE] if(!periodic) { ## original data are not periodic (e.g. cdf of angular variable) ## but derivative must be periodic jump <- matrix(as.numeric(yvals[nr,] - yvals[1L, ]), nr-1L, ncol(yvals), byrow=TRUE) yleft <- yleft - jump yright <- yright + jump } yvals <- rbind(yleft, yvals, yright) actual <- nr:(2*nr - 1L) NR <- length(rvals) } else { NR <- nr actual <- 1:nr } ## cut x axis into intervals? if(is.null(kinks)) { cutx <- factor(rep(1, NR)) } else { rr <- range(rvals) if(periodic) kinks <- c(kinks-DR, kinks, kinks+DR) breaks <- sortunique(kinks) if(breaks[1L] > rr[1L]) breaks <- c(rr[1L], breaks) if(max(breaks) < rr[2L]) breaks <- c(breaks, rr[2L]) cutx <- cut(rvals, breaks=breaks, include.lowest=TRUE) } ## process for(segment in levels(cutx)) { ii <- (cutx == segment) yy <- yvals[ii, , drop=FALSE] switch(method, numeric = { dydx <- apply(yy, 2, diff)/diff(rvals[ii]) nd <- nrow(dydx) dydx <- rbind(dydx, dydx[nd, ]) }, spline = { dydx <- apply(yy, 2, derivative, r=rvals[ii], ...) }) df[ii[actual], relevant] <- dydx[ actual, ] } ## pack up result <- f result[,] <- df ## tweak name of function if(!is.null(yl <- attr(f, "ylab"))) attr(result, "ylab") <- substitute(bold(D)~Fx, list(Fx=yl)) if(!is.null(ye <- attr(f, "yexp"))) attr(result, "yexp") <- substitute(bold(D)~Fx, list(Fx=ye)) ## tweak mathematical labels attr(result, "labl")[relevant] <- paste0("bold(D)~", attr(f, "labl")[relevant]) return(result) } deriv.fv }) increment.fv <- function(f, delta) { stopifnot(is.fv(f)) check.1.real(delta) stopifnot(delta > 0) half <- delta/2 xx <- with(f, .x) ynames <- fvnames(f, ".") yy <- as.data.frame(lapply(ynames, function(a, xx, f, h) { g <- as.function(f, value=a) g(xx+h)-g(xx-h) }, xx=xx, f=f, h=half)) Y <- f Y[,ynames] <- yy ## tweak name of function if(!is.null(yl <- attr(f, "ylab"))) attr(Y, "ylab") <- substitute(Delta~Fx, list(Fx=yl)) if(!is.null(ye <- attr(f, "yexp"))) attr(Y, "yexp") <- substitute(Delta~Fx, list(Fx=ye)) ## tweak mathematical labels relevant <- colnames(Y) %in% ynames attr(Y, "labl")[relevant] <- paste0("Delta~", attr(f, "labl")[relevant]) ## tweak recommended range attr(Y, "alim") <- intersect.ranges(attr(f, "alim"), range(xx) + c(1,-1)*half) return(Y) } spatstat/R/lintess.R0000644000176200001440000003360313557001571014133 0ustar liggesusers#' #' lintess.R #' #' Tessellations on a Linear Network #' #' $Revision: 1.41 $ $Date: 2019/09/17 07:23:27 $ #' lintess <- function(L, df, marks=NULL) { verifyclass(L, "linnet") if(missing(df) || is.null(df)) { # tessellation consisting of a single tile ns <- nsegments(L) df <- data.frame(seg=seq_len(ns), t0=0, t1=1, tile=factor(1)) return(lintess(L, df, marks)) } #' validate 'df' stopifnot(is.data.frame(df)) dfnames <- colnames(df) needed <- c("seg", "t0", "t1", "tile") if(any(bad <- is.na(match(needed, dfnames)))) stop(paste(ngettext(sum(bad), "Column", "Columns"), commasep(sQuote(needed[bad])), "missing from data frame"), call.=FALSE) #' straighten out df <- df[, needed] df$seg <- as.integer(df$seg) df$tile <- as.factor(df$tile) if(any(reversed <- with(df, t1 < t0))) df[reversed, c("t0", "t1")] <- df[reversed, c("t1", "t0")] with(df, { segU <- sortunique(seg) segN <- seq_len(nsegments(L)) if(length(omitted <- setdiff(segN, segU)) > 0) stop(paste(ngettext(length(omitted), "Segment", "Segments"), commasep(omitted), "omitted from data"), call.=FALSE) if(length(unknown <- setdiff(segU, segN)) > 0) stop(paste(ngettext(length(unknown), "Segment", "Segments"), commasep(unknown), ngettext(length(unknown), "do not", "does not"), "exist in the network"), call.=FALSE) pieces <- split(df, seg) for(piece in pieces) { t0 <- piece$t0 t1 <- piece$t1 thedata <- paste("Data for segment", piece$seg[[1L]]) if(!any(t0 == 0)) stop(paste(thedata, "do not contain an entry with t0 = 0"), call.=FALSE) if(!any(t1 == 1)) stop(paste(thedata, "do not contain an entry with t1 = 1"), call.=FALSE) if(any(t1 < 1 & is.na(match(t1, t0))) | any(t0 > 0 & is.na(match(t0, t1)))) stop(paste(thedata, "are inconsistent"), call.=FALSE) } }) #' validate marks if(!is.null(marks)) { marks <- as.data.frame(marks) nr <- nrow(marks) nt <- length(levels(df$tile)) if(nr == 1L) { marks <- marks[rep(1, nt), , drop=FALSE] row.names(marks) <- 1:nt nr <- nt } else if(nr != nt) { stop(paste("Wrong number of", ngettext(ncol(marks), "mark values:", "rows of mark values:"), nr, "should be", nt), call.=FALSE) } } out <- list(L=L, df=df, marks=marks) class(out) <- c("lintess", class(out)) return(out) } print.lintess <- function(x, ...) { splat("Tessellation on a linear network") nt <- length(levels(x$df$tile)) splat(nt, ngettext(nt, "tile", "tiles")) if(anyNA(x$df$tile)) splat("[An additional tile is labelled NA]") if(!is.null(marx <- x$marks)) { mvt <- markvaluetype(marx) if(length(mvt) == 1) { splat("Tessellation has", mvt, "marks") } else { splat("Tessellation has mark variables", commasep(paste(colnames(marx), paren(mvt)))) } } return(invisible(NULL)) } nobjects.lintess <- function(x) { length(levels(x$df$tile)) } tile.lengths <- function(x) { if(!inherits(x, "lintess")) stop("x should be a tessellation on a linear network (class 'lintess')", call.=FALSE) seglen <- lengths.psp(as.psp(x$L)) df <- x$df df$fraglen <- with(df, seglen[seg] * (t1-t0)) tilelen <- with(df, tapplysum(fraglen, list(tile))) return(tilelen) } tilenames.lintess <- function(x) { levels(x$df$tile) } "tilenames<-.lintess" <- function(x, value) { levels(x$df$tile) <- value return(x) } marks.lintess <- function(x, ...) { x$marks } "marks<-.lintess" <- function(x, ..., value) { if(!is.null(value)) { value <- as.data.frame(value) nt <- length(levels(x$df$tile)) if(nrow(value) != nt) stop(paste("replacement value for marks has wrong length:", nrow(value), "should be", nt), call.=FALSE) rownames(value) <- NULL if(ncol(value) == 1) colnames(value) <- "marks" } x$marks <- value return(x) } unmark.lintess <- function(X) { X$marks <- NULL return(X) } summary.lintess <- function(object, ...) { df <- object$df lev <- levels(df$tile) nt <- length(lev) nr <- nrow(df) seglen <- lengths.psp(as.psp(object$L)) df$fraglen <- with(df, seglen[seg] * (t1-t0)) tilelen <- with(df, tapplysum(fraglen, list(tile))) hasna <- anyNA(df$tile) nalen <- if(hasna) (sum(seglen) - sum(tilelen)) else 0 marx <- object$marks if(!is.null(marx)) { mvt <- markvaluetype(marx) names(mvt) <- colnames(marx) marx <- summary(marx) } else mvt <- NULL y <- list(nt=nt, nr=nr, lev=lev, seglen=seglen, tilelen=tilelen, hasna=hasna, nalen=nalen, marx=marx, mvt=mvt) class(y) <- c("summary.lintess", class(y)) return(y) } print.summary.lintess <- function(x, ...) { splat("Tessellation on a linear network") with(x, { splat(nt, "tiles") if(hasna) splat("[An additional tile is labelled NA]") if(nt <= 30) { splat("Tile labels:", paste(lev, collapse=" ")) splat("Tile lengths:") print(signif(tilelen, 4)) } else { splat("Tile lengths (summary):") print(summary(tilelen)) } if(hasna) splat("Tile labelled NA has length", nalen) if(!is.null(marx)) { splat("Tessellation is marked") if(length(mvt) == 1) { splat("Marks are of type", sQuote(mvt)) } else { splat("Mark variables:", commasep(paste(names(mvt), paren(unname(mvt))))) } splat("Summary of marks:") print(marx) } }) return(invisible(NULL)) } plot.lintess <- local({ plot.lintess <- function(x, ..., main, add=FALSE, style=c("colour", "width", "image"), col=NULL, values=marks(x), ribbon=TRUE, ribargs=list(), multiplot=TRUE, do.plot=TRUE ) { if(missing(main)) main <- short.deparse(substitute(x)) style <- match.arg(style) df <- x$df ntiles <- length(levels(df$tile)) #' Associate 'values' with tiles if(markformat(values) == "hyperframe") values <- as.data.frame(values) #' automatic warning switch(markformat(values), none = { #' no values assigned. #' default is tile name tn <- tilenames(x) values <- factor(tn, levels=tn) }, vector = { #' vector of values. #' validate length of vector check.anyvector(values, ntiles, things="tiles") }, dataframe = { #' data frame or matrix of values. values <- as.data.frame(values) if(nrow(values) != ntiles) stop(paste("Number of rows of values =", nrow(values), "!=", ntiles, "= number of tiles"), call.=FALSE) if(multiplot && ncol(values) > 1 && !add) { #' Multiple Panel Plot result <- multi.plot.lintess(x, ..., style=style, main=main, do.plot=do.plot, ribbon=ribbon, ribargs=ribargs, col=col) return(invisible(result)) } if(ncol(values) > 1) warning("Using only the first column of values") values <- values[,1] }, stop("Format of values is not understood") ) #' Single Panel Plot if(style == "image") { z <- plot(as.linfun(x, values=values), main=main, ..., add=add, do.plot=do.plot, ribbon=ribbon, ribargs=ribargs, col=col) return(invisible(z)) } #' convert to marked psp object L <- as.linnet(x) from <- L$from[df$seg] to <- L$to[df$seg] V <- vertices(L) vx <- V$x vy <- V$y segdata <- with(df, list(x0=vx[from] * (1-t0) + vx[to] * t0, y0=vy[from] * (1-t0) + vy[to] * t0, x1=vx[from] * (1-t1) + vx[to] * t1, y1=vy[from] * (1-t1) + vy[to] * t1, marks=values[as.integer(tile)])) S <- as.psp(segdata, window=Window(L)) cmap <- plot(S, style=style, add=add, do.plot=do.plot, main=main, ribbon=ribbon, ribargs=ribargs, col=col, ...) return(invisible(cmap)) } multi.plot.lintess <- function(x, ..., zlim=NULL, col=NULL, equal.ribbon=FALSE) { if(equal.ribbon && is.null(zlim) && !inherits(col, "colourmap")) zlim <- range(marks(x)) if(!is.null(zlim)) { result <- plot(unstack(x), ..., zlim=zlim, col=col) } else { result <- plot(unstack(x), ..., col=col) } return(invisible(result)) } plot.lintess }) unstack.lintess <- function(x, ...) { marx <- marks(x) if(is.null(marx) || is.null(dim(marx)) || ncol(marx) <= 1) return(solist(x)) ux <- unmark(x) y <- solapply(as.list(marx), setmarks, x=ux) return(y) } as.owin.lintess <- function(W, ...) { as.owin(as.linnet(W), ...) } Window.lintess <- function(X, ...) { as.owin(as.linnet(X)) } domain.lintess <- as.linnet.lintess <- function(X, ...) { X$L } as.data.frame.lintess <- function(x, ...) { df <- x$df if(!is.null(marx <- marks(x))) { marx <- as.data.frame(marx) if(ncol(marx) == 1L) colnames(marx) <- "marks" marx <- marx[as.integer(df$tile), , drop=FALSE] df <- cbind(df, marx) } df <- as.data.frame(df, ...) return(df) } lineartileindex <- function(seg, tp, Z, method=c("encode", "C", "interpreted")) { method <- match.arg(method) df <- if(inherits(Z, "lintess")) Z$df else if(is.data.frame(Z)) Z else stop("Format of Z is unrecognised") switch(method, interpreted = { n <- length(seg) #' extract tessellation data tilenames <- levels(df$tile) answer <- factor(rep(NA_integer_, n), levels=seq_along(tilenames), labels=tilenames) for(i in seq_along(seg)) { tpi <- tp[i] segi <- seg[i] j <- which(df$seg == segi) kk <- which(df[j, "t0"] <= tpi & df[j, "t1"] >= tpi) answer[i] <- if(length(kk) == 0) NA else df[j[min(kk)], "tile"] } }, encode = { #' encode locations as numeric loc <- seg - 1 + tp #' extract tessellation data and sort them df <- df[order(df$seg, df$t0), , drop=FALSE] m <- nrow(df) #' encode breakpoints as numeric bks <- with(df, c(seg - 1 + t0, seg[m])) #' which piece contains each query point jj <- findInterval(loc, bks, left.open=TRUE, all.inside=TRUE, rightmost.closed=TRUE) answer <- df$tile[jj] }, C = { #' sort query data oo <- order(seg, tp) seg <- seg[oo] tp <- tp[oo] n <- length(seg) #' extract tessellation data and sort them df <- df[order(df$seg, df$t0), , drop=FALSE] m <- nrow(df) #' handle factor dftile <- df$tile tilecode <- as.integer(dftile) tilenames <- levels(dftile) #' launch z <- .C("lintileindex", n = as.integer(n), seg = as.integer(seg), tp = as.double(tp), dfn = as.integer(m), dfseg = as.integer(df$seg), dft0 = as.double(df$t0), dft1 = as.double(df$t1), dftile = as.integer(tilecode), answer = as.integer(integer(n)), PACKAGE="spatstat") z <- z$answer z[z == 0] <- NA answer <- integer(n) answer[oo] <- z answer <- factor(answer, levels=seq_along(tilenames), labels=tilenames) }) return(answer) } as.linfun.lintess <- local({ as.linfun.lintess <- function(X, ..., values=marks(X), navalue=NA) { Xdf <- X$df tilenames <- levels(Xdf$tile) value.is.tile <- is.null(values) if(value.is.tile) { tilevalues <- factor(tilenames, levels=tilenames) } else { if(!is.null(dim(values))) values <- values[,1] if(length(values) != length(tilenames)) stop("Length of 'values' should equal the number of tiles", call.=FALSE) tilevalues <- values } f <- function(x, y, seg, tp) { k <- as.integer(lineartileindex(seg, tp, Xdf)) if(!anyNA(k)) { result <- tilevalues[k] } else { ok <- !is.na(k) result <- rep(navalue, length(seg)) result[ok] <- tilevalues[k[ok]] } return(result) } g <- linfun(f, X$L) attr(g, "explain") <- uitleggen return(g) } uitleggen <- function(x, ...) { envf <- environment(attr(x, "f")) Xdf <- get("Xdf", envir=envf) value.is.tile <- get("value.is.tile", envir=envf) %orifnull% FALSE if(value.is.tile) { valuename <- "the tile index" } else { tilevalues <- get("tilevalues", envir=envf) valuetype <- typeof(tilevalues) valuename <- paste("a value", paren(sQuote(valuetype)), "associated with each tile") } splat("Function on a network, associated with a tessellation,") splat("\treturns", valuename) nt <- length(levels(Xdf$tile)) splat("Tessellation has", nt, ngettext(nt, "tile", "tiles")) splat("Function domain:") print(as.linnet(x)) return(invisible(NULL)) } as.linfun.lintess }) spatstat/R/densityVoronoi.R0000644000176200001440000000476513460231502015504 0ustar liggesusers#' #' densityVoronoi.R #' #' $Revision: 1.18 $ $Date: 2019/04/25 04:03:11 $ #' densityVoronoi <- function(X, ...) { UseMethod("densityVoronoi") } densityVoronoi.ppp <- function(X, f=1, ..., counting=FALSE, fixed=FALSE, nrep=1, verbose=TRUE) { stopifnot(is.ppp(X)) nX <- npoints(X) check.1.real(f) if(badprobability(f)) stop("f should be a probability between 0 and 1") check.1.integer(nrep) stopifnot(nrep >= 1) duped <- anyDuplicated(X) ## ntess <- floor(f * nX) if(ntess == 0) { ## naive estimate of intensity if(f > 0 && verbose) splat("Tiny threshold: returning uniform intensity estimate") W <- X$window lam <- nX/area(W) return(as.im(lam, W, ...)) } if(ntess == nX) { ## Voronoi/Dirichlet estimate if(!duped) { tes <- dirichlet(X) tesim <- nnmap(X, what="which", ...) num <- 1 } else { UX <- unique(X) tes <- dirichlet(UX) tesim <- nnmap(UX, what="which", ...) idx <- nncross(X, UX, what="which") num <- as.integer(table(factor(idx, levels=seq_len(npoints(UX))))) } lam <- num/tile.areas(tes) out <- eval.im(lam[tesim]) return(out) } if(nrep > 1) { ## estimate is the average of nrep randomised estimates total <- 0 if(verbose) cat(paste("Computing", nrep, "intensity estimates...")) state <- list() for(i in seq_len(nrep)) { estimate <- densityVoronoi(X, f, ..., counting=counting, fixed=fixed, nrep=1) total <- eval.im(total + estimate) if(verbose) state <- progressreport(i, nrep, state=state) } if(verbose) cat("Done.\n") average <- eval.im(total/nrep) return(average) } ## perform thinning if(!fixed) { itess <- thinjump(nX, f) tessfrac <- f } else { itess <- sample(seq_len(nX), ntess, replace=FALSE) tessfrac <- as.numeric(ntess)/nX } Xtess <- X[itess] if(duped) Xtess <- unique(Xtess) ## make tessellation tes <- dirichlet(Xtess) ## estimate intensity in each tile if(!counting) { tilemass <- 1 expansion <- 1/tessfrac } else { Xcount <- X[-itess] tilemap <- tileindex(Xcount$x, Xcount$y, tes) tilemass <- as.numeric(table(tilemap)) expansion <- 1/(1-tessfrac) } lam <- expansion * tilemass/tile.areas(tes) ## estimate of intensity at each location tesim <- nnmap(Xtess, what="which", ...) out <- eval.im(lam[tesim]) return(out) } spatstat/R/rppm.R0000644000176200001440000000722013333543255013426 0ustar liggesusers#' #' rppm.R #' #' Recursive Partitioning for Point Process Models #' #' $Revision: 1.12 $ $Date: 2017/06/05 10:31:58 $ rppm <- function(..., rpargs=list()) { ## do the equivalent of ppm(...) cl <- match.call() cl[[1]] <- as.name('ppm') if("rpargs" %in% names(cl)) cl$rpargs <- NULL cl$forcefit <- TRUE pfit <- eval(cl, envir=parent.frame()) ## if(!is.poisson(pfit)) warning("Interpoint interaction will be ignored", call.=FALSE) df <- getglmdata(pfit) gf <- getglmfit(pfit) sf <- getglmsubset(pfit) rp <- do.call(rpart, resolve.defaults(list(formula=formula(gf), data=df, subset=sf, weights=df$.mpl.W), rpargs, list(method="poisson"))) result <- list(pfit=pfit, rp=rp) class(result) <- c("rppm", class(result)) return(result) } # undocumented as.ppm.rppm <- function(object) { object$pfit } print.rppm <- function(x, ...) { splat("Point process model with recursive partitioning") splat("Data:", sQuote(x$pfit$Qname)) splat("Covariates:", commasep(sQuote(variablesinformula(formula(x$pfit))))) splat("Regression tree:") print(x$rp) invisible(NULL) } plot.rppm <- local({ argsPlotRpart <- c("x", "uniform", "branch", "compress", "margin", "minbranch") argsTextRpart <- c("splits", "label", "FUN", "all", "pretty", "digits", "use.n", "fancy", "fwidth", "fheight", "bg", "minlength") plot.rppm <- function(x, ..., what=c("tree", "spatial"), treeplot=NULL) { xname <- short.deparse(substitute(x)) what <- match.arg(what) switch(what, tree = { if(is.function(treeplot)) return(treeplot(x$rp, ...)) out <- do.call.matched(plot, list(x=x$rp, ...), funargs=argsPlotRpart, extrargs=graphicsPars("plot")) # note: plot.rpart does not pass arguments to 'lines' do.call.matched(text, list(x=x$rp, ...), funargs=argsTextRpart, extrargs=graphicsPars("text")) }, spatial = { p <- predict(x) out <- do.call("plot", resolve.defaults(list(x=p), list(...), list(main=xname))) }) return(invisible(out)) } plot.rppm }) #' prune method prune.rppm <- function(tree, ...) { tree$rp <- rpart::prune(tree$rp, ...) return(tree) } #' predict method predict.rppm <- function(object, ...) { model <- object$pfit tree <- object$rp #' assemble covariates for prediction, using rules of predict.ppm co <- predict(model, ..., type="covariates", check=FALSE, repair=FALSE) newdata <- co$newdata masque <- co$mask #' perform prediction using the tree pred <- predict(tree, newdata=co$newdata) #' pack up appropriately if(is.null(masque)) return(pred) imago <- as.im(masque, value=1.0) if(!is.marked(model)) { out <- imago out[] <- pred } else { lev <- levels(marks(data.ppm(model))) nlev <- length(lev) out <- rep(list(imago), nlev) names(out) <- lev splitpred <- split(pred, newdata$marks) for(i in seq_len(nlev)) out[[i]][] <- splitpred[[i]] out <- as.solist(out) } return(out) } fitted.rppm <- function(object, ...) { predict(object, locations=data.ppm(object$pfit)) } spatstat/R/rmhexpand.R0000644000176200001440000001454313333543255014444 0ustar liggesusers# # rmhexpand.R # # Rules/data for expanding the simulation window in rmh # # $Revision: 1.8 $ $Date: 2016/02/11 10:17:12 $ # # Establish names and rules for each type of expansion RmhExpandRule <- local({ .RmhExpandTable <- list(area=list(descrip ="Area expansion factor", minval = 1, expands = function(x) { unname(x) > 1 }), length=list(descrip ="Length expansion factor", minval = 1, expands = function(x) { unname(x) > 1 }), distance=list(descrip="Expansion buffer distance", minval = 0, expands = function(x) { unname(x) > 0 })) RmhExpandRule <- function(nama) { if(length(nama) == 0) nama <- "area" if(length(nama) > 1) stop("Internal error: too many names in RmhExpandRule", call.=FALSE) if(!(nama %in% names(.RmhExpandTable))) stop(paste("Internal error: unrecognised expansion type", sQuote(nama)), call.=FALSE) return(.RmhExpandTable[[nama]]) } RmhExpandRule }) rmhexpand <- function(x=NULL, ..., area=NULL, length=NULL, distance=NULL) { trap.extra.arguments(..., .Context="In rmhexpand") # check for incompatibility n <- (!is.null(x)) + (!is.null(area)) + (!is.null(length)) + (!is.null(distance)) if(n > 1) stop("Only one argument should be given") # absorb other arguments into 'x' if(is.null(x) && n > 0) { if(!is.null(area)) x <- c(area=area) if(!is.null(length)) x <- c(length=length) if(!is.null(distance)) x <- c(distance=distance) } if(is.null(x)) { # No expansion rule supplied. # Use spatstat default, indicating that the user did not choose it. force.exp <- force.noexp <- FALSE x <- spatstat.options("expand") x <- rmhexpand(x)$expand } else { # process x if(inherits(x, "rmhexpand")) return(x) if(is.owin(x)) { force.exp <- TRUE force.noexp <- FALSE } else { # expecting c(name=value) or list(name=value) if(is.list(x)) x <- unlist(x) if(!is.numeric(x)) stop(paste("Expansion argument must be either", "a number, a window, or NULL.\n")) # x is numeric check.1.real(x, "In rmhexpand(x)") explain.ifnot(is.finite(x), "In rmhexpand(x)") # an unlabelled numeric value is interpreted as an area expansion factor if(!any(nzchar(names(x)))) names(x) <- "area" # validate rule <- RmhExpandRule(names(x)) if(x < rule$minval) { warning(paste(rule$descrip, "<", rule$minval, "has been reset to", rule$minval), call.=FALSE) x[] <- rule$minval } force.exp <- rule$expands(x) force.noexp <- !force.exp } } result <- list(expand=x, force.exp=force.exp, force.noexp=force.noexp) class(result) <- "rmhexpand" return(result) } .no.expansion <- list(expand=c(area=1), force.exp=FALSE, force.noexp=TRUE) class(.no.expansion) <- "rmhexpand" print.rmhexpand <- function(x, ..., prefix=TRUE) { if(prefix) cat("Expand the simulation window? ") if(x$force.noexp) { cat("No.\n") } else { if(x$force.exp) cat("Yes:\n") else cat("Not determined. Default is:\n") y <- x$expand if(is.null(y)) { print(rmhexpand(spatstat.options("expand")), prefix=FALSE) } else if(is.numeric(y)) { descrip <- RmhExpandRule(names(y))$descrip cat(paste("\t", descrip, unname(y), "\n")) } else { print(y) } } return(invisible(NULL)) } summary.rmhexpand <- function(object, ...) { decided <- with(object, force.exp || force.noexp) ex <- object$expand if(is.null(ex)) ex <- rmhexpand(spatstat.options("expand"))$expand if(is.owin(ex)) { willexpand <- TRUE descrip <- "Window" } else if(is.numeric(ex)) { rule <- RmhExpandRule(names(ex)) descrip <- rule$descrip willexpand <- if(object$force.exp) TRUE else if(object$force.noexp) FALSE else (unname(ex) > rule$minval) } else stop("Internal error: unrecognised format in summary.rmhexpand", call.=FALSE) out <- list(rule.decided=decided, window.decided=decided && is.owin(ex), expand=ex, descrip=descrip, willexpand=willexpand) class(out) <- "summary.rmhexpand" return(out) } print.summary.rmhexpand <- function(x, ...) { cat("Expansion rule\n") ex <- x$expand if(x$window.decided) { cat("Window is decided.\n") print(ex) } else { if(x$rule.decided) { cat("Rule is decided.\n") } else { cat("Rule is not decided.\nDefault is:\n") } if(!x$willexpand) { cat("No expansion\n") } else { if(is.numeric(ex)) cat(paste(x$descrip, ex, "\n")) else print(ex) } } return(invisible(NULL)) } expand.owin <- function(W, ...) { ex <- list(...) if(length(ex) > 1) stop("Too many arguments") # get an rmhexpand object if(inherits(ex[[1]], "rmhexpand")) { ex <- ex[[1]] } else ex <- do.call(rmhexpand, ex) f <- ex$expand if(is.null(f)) return(W) if(is.owin(f)) return(f) if(!is.numeric(f)) stop("Format not understood") switch(names(f), area = { if(f == 1) return(W) bb <- boundingbox(W) xr <- bb$xrange yr <- bb$yrange fff <- (sqrt(f) - 1)/2 Wexp <- grow.rectangle(bb, fff * diff(xr), fff * diff(yr)) }, length = { if(f == 1) return(W) bb <- boundingbox(W) xr <- bb$xrange yr <- bb$yrange fff <- (f - 1)/2 Wexp <- grow.rectangle(bb, fff * diff(xr), fff * diff(yr)) }, distance = { if(f == 0) return(W) Wexp <- if(is.rectangle(W)) grow.rectangle(W, f) else dilation(W, f) }, stop("Internal error: unrecognised type") ) return(Wexp) } will.expand <- function(x) { stopifnot(inherits(x, "rmhexpand")) if(x$force.exp) return(TRUE) if(x$force.noexp) return(FALSE) return(summary(x)$willexpand) } is.expandable <- function(x) { UseMethod("is.expandable") } change.default.expand <- function(x, newdefault) { stopifnot(inherits(x, "rmhexpand")) decided <- with(x, force.exp || force.noexp) if(!decided) x$expand <- rmhexpand(newdefault)$expand return(x) } spatstat/R/pppmatch.R0000644000176200001440000007333313515521652014273 0ustar liggesusers# # pppmatch.R # # $Revision: 1.25 $ $Date: 2018/10/26 08:06:28 $ # # Code by Dominic Schuhmacher # # # ----------------------------------------------------------------- # The standard functions for the new class pppmatching # # Objects of class pppmatching consist of two point patterns pp1 and pp2, # and either an adjacency matrix ((i,j)-th entry 1 if i-th point of pp1 and j-th # point of pp2 are matched, 0 otherwise) for "full point matchings" or # a "generalized adjacency matrix" (or flow matrix; positive values are # no longer limited to 1, (i,j)-th entry gives the "flow" between # the i-th point of pp1 and the j-th point of pp2) for "fractional matchings". # Optional elements are the type # of the matching, the cutoff value for distances in R^2, the order # of averages taken, and the resulting distance for the matching. # Currently recognized types are "spa" (subpattern assignment, # where dummy points at maximal dist are introduced if cardinalities differ), # "ace" (assignment if cardinalities equal, where dist is maximal if cards differ), # and "mat" (mass transfer, fractional matching that belongs to the # Wasserstein distance obtained if point patterns are normalized to probability measures). # ----------------------------------------------------------------- pppmatching <- function(X, Y, am, type = NULL, cutoff = NULL, q = NULL, mdist = NULL) { verifyclass(X, "ppp") verifyclass(Y, "ppp") n1 <- X$n n2 <- Y$n am <- as.matrix(am) if (length(am) == 0) { if (min(n1,n2) == 0) am <- matrix(am, nrow=n1, ncol=n2) else stop("Adjacency matrix does not have the right dimensions") } if (dim(am)[1] != n1 || dim(am)[2] != n2) stop("Adjacency matrix does not have the right dimensions") am <- matrix(as.numeric(am), n1, n2) #am <- apply(am, c(1,2), as.numeric) res <- list("pp1" = X, "pp2" = Y, "matrix" = am, "type" = type, "cutoff" = cutoff, "q" = q, "distance" = mdist) class(res) <- "pppmatching" res } plot.pppmatching <- function(x, addmatch = NULL, main = NULL, ..., adjust=1) { if (is.null(main)) main <- short.deparse(substitute(x)) pp1 <- x$pp1 pp2 <- x$pp2 do.call.matched(plot.owin, list(x=pp1$window, main = main, ...), extrargs=graphicsPars("owin")) here <- which((x$matrix > 0), arr.ind = TRUE) if (!is.null(addmatch)) { stopifnot(is.matrix(addmatch)) addhere <- which((addmatch > 0), arr.ind = TRUE) seg <- as.psp(from=pp1[addhere[,1]], to=pp2[addhere[,2]]) plot(seg, add=TRUE, lty = 2, col="gray70") } if (length(here) > 0) { seg <- as.psp(from=pp1[here[,1]], to=pp2[here[,2]]) marks(seg) <- x$matrix[here] plot(seg, add=TRUE, ..., style="width", adjust=adjust) } plot(x$pp1, add=TRUE, pch=20, col=2, ...) plot(x$pp2, add=TRUE, pch=20, col=4, ...) return(invisible(NULL)) } print.pppmatching <- function(x, ...) { n1 <- x$pp1$n n2 <- x$pp2$n if (is.null(x$type) || is.null(x$q) || is.null(x$cutoff)) splat("Generic matching of two planar point patterns") else splat(x$type, "-", x$q, " matching of two planar point patterns (cutoff = ", x$cutoff, ")", sep = "") splat("pp1:", n1, ngettext(n1, "point", "points")) splat("pp2:", n2, ngettext(n2, "point", "points")) print(Window(x$pp1)) npair <- sum(x$matrix > 0) if (npair == 0) splat("matching is empty") else { if (any(x$matrix != trunc(x$matrix))) splat("fractional matching,", npair, ngettext(npair, "flow", "flows")) else splat("point matching,", npair, ngettext(npair, "line", "lines")) } if (!is.null(x$distance)) splat("distance:", x$distance) return(invisible(NULL)) } summary.pppmatching <- function(object, ...) { X <- object$pp1 Y <- object$pp2 n1 <- X$n n2 <- Y$n if (is.null(object$type) || is.null(object$q) || is.null(object$cutoff)) splat("Generic matching of two planar point patterns") else splat(object$type, "-", object$q, " matching of two planar point patterns (cutoff = ", object$cutoff, ")", sep = "") splat("pp1:", n1, ngettext(n1, "point", "points")) splat("pp2:", n2, ngettext(n2, "point", "points")) print(Window(X)) npair <- sum(object$matrix > 0) if (npair == 0) splat("matching is empty") else { if (any(object$matrix != trunc(object$matrix))) { splat("fractional matching,", npair, ngettext(npair, "flow", "flows")) } else { splat("point matching,", npair, ngettext(npair, "line", "lines")) rowsum <- rowSums(object$matrix) colsum <- colSums(object$matrix) lt <- ifelse(min(rowsum) >= 1, TRUE, FALSE) ru <- ifelse(max(rowsum) <= 1, TRUE, FALSE) rt <- ifelse(min(colsum) >= 1, TRUE, FALSE) lu <- ifelse(max(colsum) <= 1, TRUE, FALSE) if (lt && ru && rt && lu) splat("matching is 1-1") else if (any(lt, ru, rt, lu)) { splat("matching is", ifelse(lt, " left-total", ""), ifelse(lu, " left-unique", ""), ifelse(rt, " right-total", ""), ifelse(ru, " right-unique", ""), sep="") } } } if (!is.null(object$distance)) splat("distance:", object$distance) return(invisible(NULL)) } # ----------------------------------------------------------------- # matchingdist computes the distance associated with a certain kind of matching. # Any of the arguments type, cutoff and order (if supplied) override the # the corresponding arguments in the matching. # This function is useful for verifying the distance element of an # object of class pppmatching as well as for comparing different # (typically non-optimal) matchings. # ----------------------------------------------------------------- matchingdist <- function(matching, type = NULL, cutoff = NULL, q = NULL) { verifyclass(matching, "pppmatching") if (is.null(type)) if (is.null(matching$type)) stop("Type of matching unknown. Distance cannot be computed") else type <- matching$type if (is.null(cutoff)) if (is.null(matching$cutoff)) stop("Cutoff value unknown. Distance cannot be computed") else cutoff <- matching$cutoff if (is.null(q)) if (is.null(matching$q)) stop("Order unknown. Distance cannot be computed") else q <- matching$q X <- matching$pp1 Y <- matching$pp2 n1 <- X$n n2 <- Y$n Lpexpect <- function(x, w, p) { f <- max(x) return(ifelse(f==0, 0, f * sum((x/f)^p * w)^(1/p))) } if (type == "spa") { n <- max(n1,n2) # divisor for Lpexpect if (n == 0) return(0) else if (min(n1,n2) == 0) return(cutoff) shortdim <- which.min(c(n1,n2)) shortsum <- apply(matching$matrix, shortdim, sum) if (any(shortsum != 1)) warning("matching does not attribute mass 1 to each point of point pattern with smaller cardinality") # dfix <- apply(crossdist(X,Y), c(1,2), function(x) { min(x,cutoff) }) dfix <- pmin(crossdist(X,Y), cutoff) if (is.finite(q)) resdist <- (Lpexpect(dfix, matching$matrix/n, q)^q + abs(n2-n1)/n * cutoff^q)^(1/q) else resdist <- ifelse(n1==n2, max(dfix[matching$matrix > 0]), cutoff) } else if (type == "ace") { n <- n1 # divisor for Lpexpect if (n1 != n2) return(cutoff) if (n == 0) return(0) rowsum <- rowSums(matching$matrix) colsum <- colSums(matching$matrix) if (any(c(rowsum, colsum) != 1)) warning("matching is not 1-1") # dfix <- apply(crossdist(X,Y), c(1,2), function(x) { min(x,cutoff) }) dfix <- pmin(crossdist(X,Y), cutoff) if (is.finite(q)) resdist <- Lpexpect(dfix, matching$matrix/n, q) else resdist <- max(dfix[matching$matrix > 0]) } else if (type == "mat") { n <- min(n1,n2) # divisor for Lpexpect if (min(n1,n2) == 0) return(NaN) shortdim <- which.min(c(n1,n2)) shortsum <- apply(matching$matrix, shortdim, sum) if (any(shortsum != 1)) warning("matching does not attribute mass 1 to each point of point pattern with smaller cardinality") # dfix <- apply(crossdist(X,Y), c(1,2), function(x) { min(x,cutoff) }) dfix <- pmin(crossdist(X,Y), cutoff) if (is.finite(q)) resdist <- Lpexpect(dfix, matching$matrix/n, q) else resdist <- max(dfix[matching$matrix > 0]) } else stop(paste("Unrecognised type", sQuote(type))) return(resdist) } # ----------------------------------------------------------------- # The main function for computation of distances and finding optimal # matchings between point patterns: pppdist # ----------------------------------------------------------------- # # pppdist uses several helper functions not normally called by the user # # The arguments of pppdist are # # x and y of class ppp (the two point patterns for which we want to compute # a distance) # The type of distance to be computed; any one of "spa" (default), "ace", "mat". # For details of this and the following two arguments see above (description # for class "pppmatching") # cutoff and order q of the distance # Set matching to TRUE if the full point matching (including distance) # should be returned; otherwise only the distance is returned # If ccode is FALSE R code is used where available. This may be useful if q # is high (say above 10) and severe warning messages pop up. R can # (on most machines) deal with a higher number of significant digits per # number than C (at least with the code used below) # precision should only be entered by advanced users. Empirically reasonable defaults # are used otherwise. As a rule of thumb, if ccode is TRUE, precision should # be the highest value that does not give an error (typically 9); if ccode # is FALSE, precision should be balanced (typically between 10 and 100) in # such a way that the sum of the number of zeroes and pseudo-zeroes given in the # warning messages is minimal # approximation: if q = Inf, by the distance of which order should # the true distance be approximated. If approximation is Inf, brute force # computation is used, which is only practicable for point patterns with # very few points (see also the remarks just before the pppdist.prohorov # function below). # show.rprimal=TRUE shows at each stage of the algorithm what the current restricted # primal problem and its solution are (algorithm jumps between restricted primal # and dual problem until the solution to the restricted primal (a partial # matching of the point patterns) is a full matching) # timelag gives the number of seconds of pause added each time a solution to # the current restricted primal is found (has only an effect if show.primal=TRUE) # ----------------------------------------------------------------- pppdist <- function(X, Y, type = "spa", cutoff = 1, q = 1, matching = TRUE, ccode = TRUE, auction = TRUE, precision = NULL, approximation = 10, show.rprimal = FALSE, timelag = 0) { verifyclass(X, "ppp") verifyclass(Y, "ppp") if (!ccode && type == "mat") { warning("R code is not available for type = ", dQuote("mat"), ". C code is used instead") ccode <- TRUE } if (!ccode && is.infinite(q) && is.infinite(approximation)) { warning("R code is not available for q = Inf and approximation = Inf. C code is used instead") ccode <- TRUE } if (ccode && is.infinite(q) && is.infinite(approximation) && type == "spa" && X$n != Y$n) { warning("approximation = Inf not available for type = ", dQuote("spa"), " and point patterns with differing cardinalities") approximation <- 10 } if (is.infinite(q) && is.infinite(approximation) && type == "mat") { warning("approximation = Inf not available for type = ", dQuote("mat")) approximation <- 10 } if (show.rprimal) { ccode <- FALSE auction <- FALSE if (type != "ace"){ warning("show.rprimal = TRUE not available for type = ", dQuote(type), ". Type is changed to ", dQuote("ace")) type <- "ace" } } if (is.null(precision)) { if (ccode) precision <- trunc(log10(.Machine$integer.max)) else { db <- .Machine$double.base minprec <- trunc(log10(.Machine$double.base^.Machine$double.digits)) if (is.finite(q)) precision <- min(max(minprec,2*q), (.Machine$double.max.exp-1)*log(db)/log(10)) else precision <- min(max(minprec,2*approximation), (.Machine$double.max.exp-1)*log(db)/log(10)) } } if (type == "spa") { if (X$n == 0 && Y$n == 0) { if (!matching) return(0) else { return(pppmatching(X, Y, matrix(0, nrow=0,ncol=0), type, cutoff, q, 0)) } } n1 <- X$n n2 <- Y$n n <- max(n1,n2) dfix <- matrix(cutoff,n,n) if (min(n1,n2) > 0) dfix[1:n1,1:n2] <- crossdist(X,Y) # d <- dfix <- apply(dfix, c(1,2), function(x) { min(x,cutoff) }) d <- dfix <- pmin(dfix,cutoff) if (is.infinite(q)) { if (n1 == n2 || matching) return(pppdist.prohorov(X, Y, n, d, type, cutoff, matching, ccode, auction, precision, approximation)) else return(cutoff) # in the case n1 != n2 the distance is clear, and in a sense any # matching would be correct. We go here the extra mile and call # pppdist.prohorov in order to find (approximate) the matching # that is intuitively most interesting (i.e. the one that # pairs the points of the # smaller cardinality point pattern with the points of the larger # cardinality point pattern in such a way that the maximal pairing distance # is minimal (for q < Inf the q-th order pairing distance before the introduction # of dummy points is automatically minimal if it is minimal after the # introduction of dummy points) # which would be the case for the obtained pairing if q < Inf } } else if (type == "ace") { if (X$n != Y$n) { if (!matching) return(cutoff) else { return(pppmatching(X, Y, matrix(0, nrow=X$n, ncol=Y$n), type, cutoff, q, cutoff)) } } if (X$n == 0) { if (!matching) return(0) else { return(pppmatching(X, Y, matrix(0, nrow=0,ncol=0), type, cutoff, q, 0)) } } n <- n1 <- n2 <- X$n dfix <- crossdist(X,Y) # d <- dfix <- apply(dfix, c(1,2), function(x) { min(x,cutoff) }) d <- dfix <- pmin(dfix, cutoff) if (is.infinite(q)) return(pppdist.prohorov(X, Y, n, d, type, cutoff, matching, ccode, auction, precision, approximation)) } else if (type == "mat") { if (!ccode) warning("R code is not available for type = ", dQuote("mat"), ". C code is used instead") if (auction) warning("Auction algorithm is not available for type = ", dQuote("mat"), ". Primal-dual algorithm is used instead") return(pppdist.mat(X, Y, cutoff, q, matching, precision, approximation)) } else stop(paste("Unrecognised type", sQuote(type))) d <- d/max(d) d <- round((d^q)*(10^precision)) nzeroes <- sum(d == 0 & dfix > 0) if(nzeroes > 0) warning(paste(nzeroes, ngettext(nzeroes, "zero", "zeroes"), "introduced, while rounding the q-th powers of distances")) if(ccode & any(d > .Machine$integer.max)) stop("integer overflow, while rounding the q-th powers of distances") if(!ccode) { if (any(is.infinite(d))) stop("Inf obtained, while taking the q-th powers of distances") maxd <- max(d) npszeroes <- sum(maxd/d[d>0] >= .Machine$double.base^.Machine$double.digits) if (npszeroes > 0) warning(paste(npszeroes, ngettext(npszeroes, "pseudo-zero", "pseudo-zeroes"), "introduced, while taking the q-th powers of distances")) # a pseudo-zero is a value that is positive but contributes nothing to the # q-th order average because it is too small compared to the other values } Lpmean <- function(x, p) { f <- max(x) return(ifelse(f==0, 0, f * mean((x/f)^p)^(1/p))) } if (show.rprimal && type == "ace") { assig <- acedist.show(X, Y, n, d, timelag) am <- matrix(0, n, n) am[cbind(1:n, assig[1:n])] <- 1 } else if (ccode) { if (auction) { dupper <- max(d)/10 lasteps <- 1/(n+1) epsfac <- 10 epsvec <- lasteps ## Bertsekas: from dupper/2 to 1/(n+1) divide repeatedly by a constant while (lasteps < dupper) { lasteps <- lasteps*epsfac epsvec <- c(epsvec,lasteps) } epsvec <- rev(epsvec)[-1] neps <- length(epsvec) stopifnot(neps >= 1) d <- max(d)-d ## auctionbf uses a "desire matrix" res <- .C("auctionbf", as.integer(d), as.integer(n), pers_to_obj = as.integer(rep(-1,n)), price = as.double(rep(0,n)), profit = as.double(rep(0,n)), as.integer(neps), as.double(epsvec), PACKAGE = "spatstat") am <- matrix(0, n, n) am[cbind(1:n,res$pers_to_obj+1)] <- 1 } else { res <- .C("dwpure", as.integer(d), as.integer(rep.int(1,n)), as.integer(rep.int(1,n)), as.integer(n), as.integer(n), flowmatrix = as.integer(integer(n^2)), PACKAGE = "spatstat") am <- matrix(res$flowmatrix, n, n) } } else { assig <- acedist.noshow(X, Y, n, d) am <- matrix(0, n, n) am[cbind(1:n, assig[1:n])] <- 1 } resdist <- Lpmean(dfix[am == 1], q) if (!matching) return(resdist) else { amsmall <- suppressWarnings(matrix(am[1:n1,1:n2], nrow=n1, ncol=n2)) # previous line solves various problems associated with min(n1,n2) = 0 or = 1 return(pppmatching(X, Y, amsmall, type, cutoff, q, resdist)) } } # # # =========================================================== # =========================================================== # Anything below: # Internal functions usually not to be called by user # =========================================================== # =========================================================== # # # Called if show.rprimal is true # acedist.show <- function(X, Y, n, d, timelag = 0) { plot(pppmatching(X, Y, matrix(0, n, n))) # initialization of dual variables u <- apply(d, 1, min) d <- d - u v <- apply(d, 2, min) d <- d - rep(v, each=n) # the main loop feasible <- FALSE while (!feasible) { rpsol <- maxflow(d) # rpsol = restricted primal, solution am <- matrix(0, n, n) for (i in 1:n) { if (rpsol$assignment[i] > -1) am[i, rpsol$assignment[i]] <- TRUE } Sys.sleep(timelag) channelmat <- (d == 0 & !am) plot(pppmatching(X, Y, am), addmatch = channelmat) # if the solution of the restricted primal is not feasible for # the original primal, update dual variables if (min(rpsol$assignment) == -1) { w1 <- which(rpsol$fi_rowlab > -1) w2 <- which(rpsol$fi_collab == -1) subtractor <- min(d[w1, w2]) d[w1,] <- d[w1,] - subtractor d[,-w2] <- d[,-w2] + subtractor } # otherwise break the loop else { feasible <- TRUE } } return(rpsol$assignment) } # # R-version of hungarian algo without the pictures # useful if q is large # acedist.noshow <- function(X, Y, n, d) { # initialization of dual variables u <- apply(d, 1, min) d <- d - u v <- apply(d, 2, min) d <- d - rep(v, each=n) # the main loop feasible <- FALSE while (!feasible) { rpsol <- maxflow(d) # rpsol = restricted primal, solution # ~~~~~~~~~ deleted by AJB ~~~~~~~~~~~~~~~~~ # am <- matrix(0, n, n) # for (i in 1:n) { # if (rpsol$assignment[i] > -1) am[i, rpsol$assignment[i]] <- TRUE # } # channelmat <- (d == 0 & !am) # ~~~~~~~~~~~~~~~~~~~~~~~~~~ # if the solution of the restricted primal is not feasible for # the original primal, update dual variables if (min(rpsol$assignment) == -1) { w1 <- which(rpsol$fi_rowlab > -1) w2 <- which(rpsol$fi_collab == -1) subtractor <- min(d[w1, w2]) d[w1,] <- d[w1,] - subtractor d[,-w2] <- d[,-w2] + subtractor } # otherwise break the loop else { feasible <- TRUE } } return(rpsol$assignment) } # # Solution of restricted primal # maxflow <- function(costm) { stopifnot(is.matrix(costm)) stopifnot(nrow(costm) == ncol(costm)) if(!all(apply(costm == 0, 1, any))) stop("Each row of the cost matrix must contain a zero") m <- dim(costm)[1] # cost matrix is square m * m assignment <- rep.int(-1, m) # -1 means no pp2-point assigned to i-th pp1-point ## initial assignment or rowlabel <- source label (= 0) where not possible for (i in 1:m) { j <- match(0, costm[i,]) if (!(j %in% assignment)) assignment[i] <- j } newlabelfound <- TRUE while (newlabelfound) { rowlab <- rep.int(-1, m) # -1 means no label given, 0 stands for source label collab <- rep.int(-1, m) rowlab <- ifelse(assignment == -1, 0, rowlab) ## column and row labeling procedure until either breakthrough occurs ## (which means that there is a better point assignment, i.e. one that ## creates more point pairs than the current one (flow can be increased)) ## or no more labeling is possible breakthrough <- -1 while (newlabelfound && breakthrough == -1) { newlabelfound <- FALSE for (i in 1:m) { if (rowlab[i] != -1) { for (j in 1:m) { if (costm[i,j] == 0 && collab[j] == -1) { collab[j] <- i newlabelfound <- TRUE if (!(j %in% assignment) && breakthrough == -1) breakthrough <- j } } } } for (j in 1:m) { if (collab[j] != -1) { for (i in 1:m) { if (assignment[i] == j && rowlab[i] == -1) { rowlab[i] <- j newlabelfound <- TRUE } } } } } ## if the while-loop was left due to breakthrough, ## reassign points (i.e. redirect flow) and restart labeling procedure if (breakthrough != -1) { l <- breakthrough while (l != 0) { k <- collab[l] assignment[k] <- l l <- rowlab[k] } } } ## the outermost while-loop is left, no more labels can be given; hence ## the maximal number of points are paired given the current restriction ## (flow is maximal given the current graph) return(list("assignment"=assignment, "fi_rowlab"=rowlab, "fi_collab"=collab)) } # # Prohorov distance computation/approximation (called if q = Inf in pppdist # and type = "spa" or "ace") # Exact brute force computation of distance if approximation = Inf, # scales very badly, should not be used for cardinality n larger than 10-12 # Approximation by order q distance gives often (if the warning messages # are not too extreme) the right matching and therefore the exact Prohorov distance, # but in very rare cases the result can be very wrong. However, it is always # an exact upper bound of the Prohorov distance (since based on *a* pairing # as opposed to optimal pairing. # pppdist.prohorov <- function(X, Y, n, dfix, type, cutoff = 1, matching = TRUE, ccode = TRUE, auction = TRUE, precision = 9, approximation = 10) { n1 <- X$n n2 <- Y$n d <- dfix/max(dfix) if (is.finite(approximation)) { warning(paste("distance with parameter q = Inf is approximated by distance with parameter q =", approximation)) d <- round((d^approximation)*(10^precision)) nzeroes <- sum(d == 0 & dfix > 0) if (nzeroes > 0) warning(paste(nzeroes, ngettext(nzeroes, "zero", "zeroes"), "introduced, while rounding distances")) if (ccode) { if (any(d > .Machine$integer.max)) stop("integer overflow, while rounding the q-th powers of distances") if (auction) { dupper <- max(d)/10 lasteps <- 1/(n+1) epsfac <- 10 epsvec <- lasteps ## Bertsekas: from dupper/2 to 1/(n+1) divide repeatedly by a constant while (lasteps < dupper) { lasteps <- lasteps*epsfac epsvec <- c(epsvec,lasteps) } epsvec <- rev(epsvec)[-1] neps <- length(epsvec) stopifnot(neps >= 1) d <- max(d)-d ## auctionbf uses a "desire matrix" res <- .C("auctionbf", as.integer(d), as.integer(n), pers_to_obj = as.integer(rep(-1,n)), price = as.double(rep(0,n)), profit = as.double(rep(0,n)), as.integer(neps), as.double(epsvec), PACKAGE = "spatstat") am <- matrix(0, n, n) am[cbind(1:n,res$pers_to_obj+1)] <- 1 } else { res <- .C("dwpure", as.integer(d), as.integer(rep.int(1,n)), as.integer(rep.int(1,n)), as.integer(n), as.integer(n), flowmatrix = as.integer(integer(n^2)), PACKAGE = "spatstat") am <- matrix(res$flowmatrix, n, n) } } else { if (any(is.infinite(d))) stop("Inf obtained, while taking the q-th powers of distances") maxd <- max(d) npszeroes <- sum(maxd/d[d>0] >= .Machine$double.base^.Machine$double.digits) if (npszeroes > 0) warning(paste(npszeroes, ngettext(npszeroes, "pseudo-zero", "pseudo-zeroes"), "introduced, while taking the q-th powers of distances")) assig <- acedist.noshow(X, Y, n, d) am <- matrix(0, n, n) am[cbind(1:n, assig[1:n])] <- 1 } } else { d <- round(d*(10^precision)) nzeroes <- sum(d == 0 & dfix > 0) if (nzeroes > 0) warning(paste(nzeroes, ngettext(nzeroes, "zero", "zeroes"), "introduced, while rounding distances")) if (any(d > .Machine$integer.max)) stop("integer overflow, while rounding the q-th powers of distances") res <- .C("dinfty_R", as.integer(d), as.integer(n), assignment = as.integer(rep.int(-1,n)), PACKAGE = "spatstat") assig <- res$assignment am <- matrix(0, n, n) am[cbind(1:n, assig[1:n])] <- 1 } if (n1 == n2) resdist <- max(dfix[am == 1]) else resdist <- cutoff if (!matching) return(resdist) else { amsmall <- suppressWarnings(matrix(am[1:n1,1:n2], nrow=n1, ncol=n2)) ## previous line solves various problems associated with min(n1,n2) = 0 or = 1 return(pppmatching(X, Y, amsmall, type, cutoff, Inf, resdist)) } } # # Computation of "pure Wasserstein distance" for any q (called if type="mat" # in pppdist, no matter if q finite or not). # If q = Inf, approximation using ccode is enforced # (approximation == Inf is not allowed here). # pppdist.mat <- function(X, Y, cutoff = 1, q = 1, matching = TRUE, precision = 9, approximation = 10) { n1 <- X$n n2 <- Y$n n <- min(n1,n2) if (n == 0) { if (!matching) return(NaN) else return(pppmatching(X, Y, matrix(0, nrow=0,ncol=0), "mat", cutoff, q, NaN)) } dfix <- crossdist(X,Y) # d <- dfix <- apply(dfix, c(1,2), function(x) { min(x,cutoff) }) d <- dfix <- pmin(dfix, cutoff) d <- d/max(d) if (is.infinite(q)) { if (is.infinite(approximation)) stop("approximation = Inf") warning(paste("distance with parameter q = Inf is approximated by distance with parameter q =", approximation)) d <- round((d^approximation)*(10^precision)) nzeroes <- sum(d == 0 & dfix > 0) if (nzeroes > 0) warning(paste(nzeroes, "zeroes introduced, while rounding distances")) if (any(d > .Machine$integer.max)) stop("integer overflow, while rounding the q-th powers of distances") gcd <- greatest.common.divisor(n1,n2) mass1 <- n2/gcd mass2 <- n1/gcd res <- .C("dwpure", as.integer(d), as.integer(rep.int(mass1,n1)), as.integer(rep.int(mass2,n2)), as.integer(n1), as.integer(n2), flowmatrix = as.integer(integer(n1*n2)), PACKAGE = "spatstat") am <- matrix(res$flowmatrix/(max(n1,n2)/gcd), n1, n2) resdist <- max(dfix[am > 0]) } else { d <- round((d^q)*(10^precision)) nzeroes <- sum(d == 0 & dfix > 0) if(nzeroes > 0) warning(paste(nzeroes, ngettext(nzeroes, "zero", "zeroes"), "introduced, while rounding the q-th powers of distances")) if(any(d > .Machine$integer.max)) stop("integer overflow, while rounding the q-th powers of distances") gcd <- greatest.common.divisor(n1,n2) mass1 <- n2/gcd mass2 <- n1/gcd Lpexpect <- function(x, w, p) { f <- max(x) return(ifelse(f==0, 0, f * sum((x/f)^p * w)^(1/p))) } res <- .C("dwpure", as.integer(d), as.integer(rep.int(mass1,n1)), as.integer(rep.int(mass2,n2)), as.integer(n1), as.integer(n2), flowmatrix = as.integer(integer(n1*n2)), PACKAGE = "spatstat") am <- matrix(res$flowmatrix/(max(n1,n2)/gcd), n1, n2) # our "adjacency matrix" in this case is standardized to have # rowsum 1 if n1 <= n2 and colsum 1 if n1 >= n2 resdist <- Lpexpect(dfix, am/n, q) } if (!matching) return(resdist) else { amsmall <- suppressWarnings(matrix(am[1:n1,1:n2], nrow=n1, ncol=n2)) # previous line solves various problems associated with min(n1,n2) = 0 or = 1 return(pppmatching(X, Y, amsmall, "mat", cutoff, q, resdist)) } } spatstat/R/detpointprocfamilyfun.R0000644000176200001440000004200613333543254017075 0ustar liggesusers## detpointprocfamilyfun.R ## ## $Revision: 1.5 $ $Date: 2015/10/19 02:27:17 $ ## ## This file contains the function `detpointprocfamilyfun' ## to define new DPP model family functions ## and a print method for class `detpointprocfamilyfun' ## as well as the currently defined ## - dppBessel ## - dppCauchy ## - dppGauss ## - dppMatern ## - dppPowerExp detpointprocfamilyfun <- local({ names_formals <- function(f, dots = FALSE){ nam <- names(formals(f)) if(!dots) nam <- nam[nam!="..."] return(nam) } detpointprocfamilyfun <- function(kernel=NULL, specden=NULL, basis="fourierbasis", convkernel=NULL, Kfun=NULL, valid=NULL, intensity=NULL, dim=2, name="User-defined", isotropic=TRUE, range=NULL, parbounds=NULL, specdenrange=NULL, startpar=NULL, ...) { ## Check which functions are given, check them for sanity and ## extract argument names and other stuff given <- NULL if(!is.null(kernel)){ if(!is.function(kernel)) stop("If kernel is given it must be a function.") given <- "kernel" kernelnames <- names_formals(kernel) if(length(kernelnames)<1L) stop("kernel function must have at least one argument") kernelnames <- kernelnames[-1L] } if(!is.null(specden)){ if(!is.function(specden)) stop("If specden is given it must be a function.") given <- c(given, "specden") specdennames <- names_formals(specden) if(length(specdennames)<1L) stop("specden function must have at least one argument") specdennames <- specdennames[-1L] } if(is.null(given)) stop("At least one of kernel or specden must be provided.") if(length(given)==2){ if(!setequal(kernelnames,specdennames)) stop("argument names of kernel and specden must match.") } if(is.element("kernel",given)){ parnames <- kernelnames } else{ parnames <- specdennames } if(!is.null(convkernel)){ given <- c(given,"convkernel") if(!is.function(convkernel)||length(formals(convkernel))<2) stop("If convkernel is given it must be a function with at least two arguments.") if(!setequal(parnames,names_formals(convkernel)[-(1:2)])) stop("argument names of convkernel must match argument names of kernel and/or specden.") } if(!is.null(Kfun)){ given <- c(given,"Kfun") if(!is.function(Kfun)||length(formals(Kfun))<1L) stop("If Kfun is given it must be a function with at least one arguments.") if(!setequal(parnames,names_formals(Kfun)[-1L])) stop("argument names of Kfun must match argument names of kernel and/or specden.") } if(!is.null(valid)){ if(!(is.function(valid)&&setequal(parnames,names_formals(valid)))) stop("argument names of valid must match argument names of kernel and/or specden.") } else{ warning("No function for checking parameter validity provided. ANY numerical value for the parameters will be accepted.") } if(!is.null(intensity)&&!(is.character(intensity)&&length(intensity)==1L&&is.element(intensity, parnames))) stop("argument intensity must be NULL or have length one, be of class character and match a parameter name") if(!(is.character(dim)|is.numeric(dim))|length(dim)!=1L) stop("argument dim must have length one and be of class character or numeric") if(is.character(dim)){ if(!is.element(dim, parnames)) stop("When dim is a character it must agree with one of the parameter names of the model") } else{ dim <- round(dim) if(dim<1L) stop("When dim is a numeric it must be a positive integer") } ## Catch extra unknown args (will be appended to output object below). dots <- list(...) ## Create output object. out <- function(...){ caller <- match.call()[[1L]] caller <- eval(substitute(caller), parent.frame()) fixedpar <- list(...) nam <- names(fixedpar) if(length(fixedpar)>0&&is.null(nam)) stop(paste("Named arguments are required. Please supply parameter values in a", sQuote("tag=value"), "form")) match <- is.element(nam, parnames) if(sum(!match)>0) warning(paste("Not all supplied argument(s) make sense. Valid arguments are: ", paste(parnames, collapse = ", "), ". The following supplied argument(s) will be ignored: ", paste(nam[!match], collapse = ", "), sep = "")) fixedpar <- fixedpar[match] ## Code to always fix the dimension to a numeric when calling the function ####### if(is.character(dim) && !is.element(dim,names(fixedpar))){ dimpar <- structure(list(2), .Names=dim) fixedpar <- c(fixedpar, dimpar) } ## Detect inhomogeneous intensity (an image), and replace by max and an image for thinning thin <- NULL if(!is.null(intensity)){ lambda <- getElement(fixedpar, intensity) if(is.im(lambda)){ lambdamax <- max(lambda) thin <- lambda/lambdamax fixedpar[[intensity]] <- lambdamax } } obj <- list(fixedpar = fixedpar, freepar = parnames[!is.element(parnames,names(fixedpar))], kernel = kernel, specden = specden, convkernel = convkernel, intensity = intensity, thin = thin, dim = dim, name = name, range = range, valid = valid, parbounds = parbounds, specdenrange = specdenrange, startpar = startpar, isotropic = isotropic, caller = caller, basis = basis ) obj <- append(obj, dots) class(obj) <- "detpointprocfamily" return(obj) } class(out) <- c("detpointprocfamilyfun", "pointprocfamilyfun", class(out)) attr(out, "parnames") <- parnames attr(out, "name") <- name return(out) } detpointprocfamilyfun } ) print.detpointprocfamilyfun <- function(x, ...){ cat(paste(attr(x, "name"), "determinantal point process model family\n")) cat("The parameters of the family are:\n") cat(attr(x, "parnames"), sep = ", ") cat("\n") invisible(NULL) } dppBessel <- detpointprocfamilyfun( name="Bessel", kernel=function(x, lambda, alpha, sigma, d){ a <- 0.5*(sigma+d) y <- abs(x/alpha) # Kernel: lambda*2^a*gamma(a+1)*besselJ(2*y*sqrt(a),a) / (2*y*sqrt(a))^a logrslt <- log(lambda) + a*log(2) + lgamma(a+1) - a*log(2*y*sqrt(a)) rslt <- exp(logrslt) * besselJ(2*y*sqrt(a), a) rslt[x==0] <- lambda return(rslt) }, specden=function(x, lambda, alpha, sigma, d){ a <- sigma+d # specden: lambda*(2*pi)^(d/2)*alpha^d*gamma(0.5*a+1)/a^(d/2)/gamma(sigma/2+1)*(1-2*pi^2*alpha^2*x^2/a)^(sigma/2) logrslt <- log(lambda) + (d/2)*log(2*pi) + d*log(alpha) + lgamma(0.5*a+1) logrslt <- logrslt - (d/2)*log(a) - lgamma(sigma/2+1) tmp <- 1-2*pi^2*alpha^2*x^2/a warnopt <- options(warn=-1) logrslt <- logrslt + ifelse(tmp<0, -Inf, (sigma/2)*log(tmp)) options(warnopt) return(exp(logrslt)) }, specdenrange=function(model){ p <- model$fixedpar sqrt((p$sigma+p$d)/(2*pi^2*p$alpha^2)) }, valid=function(lambda, alpha, sigma, d){ a <- sigma+d OK <- lambda>0 && alpha>0 && d>=1 && sigma>=0 if(!OK) return(FALSE) ## Upper bound for alpha (using log-scale) lognum <- log(a^(0.5*d)) + lgamma(0.5*sigma+1) logdenom <- log( lambda*(2*pi^(0.5*d))) + lgamma(0.5*a+1) logalphamax <- (1/d) * (lognum - logdenom) return(OK && log(alpha) <= logalphamax) }, isotropic=TRUE, intensity="lambda", dim="d", parbounds=function(name, lambda, alpha, sigma, d){ lognum <- log((sigma+d)^(0.5*d)) + lgamma(0.5*sigma+1) logdenom <- log(2*pi^(0.5*d)) + lgamma(0.5*(sigma+d)+1) switch(name, lambda = c(0, exp(lognum - log( alpha^d) - logdenom)) , alpha = c(0, exp((1/d) * (lognum - log(lambda) - logdenom))), sigma = c(0, switch(as.character(d), "2"=Inf, NA)), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("d" %in% model$freepar){ model <- update(model, d=spatdim(X)) } if("lambda" %in% model$freepar){ lambda <- intensity(X) while(!is.na(OK <- valid(model <- update(model, lambda=lambda)))&&!OK) lambda <- lambda/2 rslt <- c(rslt, "lambda" = lambda) } if("sigma" %in% model$freepar){ sigma <- 2 while(!is.na(OK <- valid(model <- update(model, sigma=sigma)))&&!OK) sigma <- sigma/2 rslt <- c(rslt, "sigma" = sigma) } if("alpha" %in% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2L] while(!is.na(OK <- valid(model <- update(model, alpha=alpha)))&&!OK){ alpha <- alpha/2 } rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) dppCauchy <- detpointprocfamilyfun( name="Cauchy", kernel=function(x, lambda, alpha, nu, d){ rslt <- lambda * (1+(x/alpha)^2)^(-nu-d/2) rslt[x==0] <- lambda return(rslt) }, specden=function(x, lambda, alpha, nu, d){ y <- 2*x*alpha*pi rslt <- lambda * y^nu * besselK(x = y, nu = nu) * (sqrt(pi)*alpha)^d * exp((1-nu)*log(2) - lgamma(nu+d/2)) rslt[x==0] <- lambda * exp(lgamma(nu) - lgamma(nu+d/2)) * (sqrt(pi)*alpha)^d return(rslt) }, Kfun = function(x, lambda, alpha, nu, d){ rslt <- pi*x^2 - pi*alpha^2/(2*nu+1) * (1 - (alpha^2/(alpha^2+x^2))^(2*nu+1)) rslt[rslt<0] <- 0 return(rslt) }, valid=function(lambda, alpha, nu, d){ ## Note the upper bound on nu for numerical stability! lambda>0 && alpha>0 && nu>0 && nu<=50 && d>=1 && lambda <= gamma(nu+d/2)/(gamma(nu)*(sqrt(pi)*alpha)^d) }, isotropic=TRUE, intensity="lambda", dim="d", range=function(alpha, nu, d, bound = .99){ if(missing(alpha)) stop("The parameter alpha is missing.") if(missing(nu)) stop("The parameter nu is missing.") if(missing(d)) stop("The parameter d (giving the dimension) is missing.") if(!(is.numeric(bound)&&bound>0&&bound<1)) stop("Argument bound must be a numeric between 0 and 1.") return(alpha * sqrt((1-bound)^(-1/(2*nu+d))-1)) }, parbounds=function(name, lambda, alpha, nu, d){ switch(name, lambda = c(0, gamma(nu+d/2)/(gamma(nu)*(sqrt(pi)*alpha)^d)), alpha = c(0, (exp(lgamma(nu+d/2)-lgamma(nu))/lambda)^(1/d)/sqrt(pi)), ## nu bound only implemented for d = 2. nu = c(switch(as.character(d), "2"=pi*lambda*alpha^2, NA), Inf), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("lambda" %in% model$freepar){ lambda <- intensity(X) while(!is.na(OK <- valid(model <- update(model, lambda=lambda)))&&!OK) lambda <- lambda/2 rslt <- c(rslt, "lambda" = lambda) } if("nu" %in% model$freepar){ nu <- 2 while(!is.na(OK <- valid(model <- update(model, nu=nu)))&&!OK) nu <- nu/2 rslt <- c(rslt, "nu" = nu) } if("alpha" %in% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2L] while(!is.na(OK <- valid(model <- update(model, alpha=alpha)))&&!OK){ alpha <- alpha/2 } rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) dppGauss <- detpointprocfamilyfun( name="Gaussian", kernel=function(x, lambda, alpha, d){ rslt <- lambda*exp(-(x/alpha)^2) return(rslt) }, specden=function(x, lambda, alpha, d){ lambda * (sqrt(pi)*alpha)^d * exp(-(x*alpha*pi)^2) }, convkernel=function(x, k, lambda, alpha, d){ logres <- k*log(lambda*pi*alpha^2) - log(pi*k*alpha^2) - x^2/(k*alpha^2) return(exp(logres)) }, Kfun = function(x, lambda, alpha, d){ pi*x^2 - pi*alpha^2/2*(1-exp(-2*x^2/alpha^2)) }, valid=function(lambda, alpha, d){ lambda>0 && alpha>0 && d>=1 && lambda <= (sqrt(pi)*alpha)^(-d) }, isotropic=TRUE, intensity="lambda", dim="d", range=function(alpha, bound = .99){ if(missing(alpha)) stop("The parameter alpha is missing.") if(!(is.numeric(bound)&&bound>0&&bound<1)) stop("Argument bound must be a numeric between 0 and 1.") return(alpha*sqrt(-log(sqrt(1-bound)))) }, parbounds=function(name, lambda, alpha, d){ switch(name, lambda = c(0, (sqrt(pi)*alpha)^(-d)), alpha = c(0, lambda^(-1/d)/sqrt(pi)), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("lambda" %in% model$freepar){ lambda <- intensity(X) rslt <- c(rslt, "lambda" = lambda) model <- update(model, lambda=lambda) } if("alpha" %in% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2L] rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) dppMatern <- detpointprocfamilyfun( name="Whittle-Matern", kernel=function(x, lambda, alpha, nu, d){ rslt <- lambda*2^(1-nu) / gamma(nu) * ((x/alpha)^nu) * besselK(x = x/alpha, nu = nu) rslt[x==0] <- lambda return(rslt) }, specden=function(x, lambda, alpha, nu, d){ lambda * exp(lgamma(nu+d/2) - lgamma(nu)) * (2*sqrt(pi)*alpha)^d * (1+(2*x*alpha*pi)^2)^(-nu-d/2) }, convkernel=function(x, k, lambda, alpha, nu, d){ nu2 <- k*(nu+d/2)-d/2 logres <- (nu2)*log(x/alpha) + log(besselK(x = x/alpha, nu = nu2, expon.scaled = TRUE)) - x/alpha logres[x == 0] <- (nu2-1)*log(2) + lgamma(nu2) logres <- logres + k*log(lambda) + k*(lgamma(nu+d/2)-lgamma(nu)) + (d*k-d+1-nu2)*log(2) + d*(k-1)*log(sqrt(pi)*alpha) - lgamma(nu2+d/2) index <- which(logres == Inf) logres[index] <- -Inf return(exp(logres)) }, valid=function(lambda, alpha, nu, d){ ## Note the upper bound on nu for numerical stability! lambda>0 && alpha>0 && nu>0 && nu<=50 && d>=1 && lambda <= gamma(nu)/(gamma(nu+d/2)*(2*sqrt(pi)*alpha)^d) }, isotropic=TRUE, intensity="lambda", dim="d", range=function(alpha, nu, d, bound = .99, exact = FALSE){ if(missing(alpha)) stop("The parameter alpha is missing.") if(missing(nu)) stop("The parameter nu is missing.") if(missing(d)) stop("The parameter d (giving the dimension) is missing.") if(!is.logical(exact)) stop("Argument exact must be a logical.") if(!exact&&d==2) return(alpha * sqrt(8*nu)) ## range suggested by Haavard Rue et al. if(!(is.numeric(bound)&&bound>0&&bound<1)) stop("Argument bound must be a numeric between 0 and 1.") fun <- function(x) sqrt(1-bound)-2^(1-nu) / gamma(nu) * ((x/alpha)^nu) * besselK(x = x/alpha, nu = nu) return(uniroot(fun, c(sqrt(.Machine$double.eps),1e3*alpha*sqrt(nu)))$root) }, parbounds=function(name, lambda, alpha, nu, d){ switch(name, lambda = c(0, gamma(nu)/(gamma(nu+d/2)*(2*sqrt(pi)*alpha)^d)), alpha = c(0, (exp(lgamma(nu)-lgamma(nu+d/2))/lambda)^(1/d)/2/sqrt(pi)), ## nu bound only implemented for d = 2 and d = 4. nu = c(0, switch(as.character(d), "2"=1/(4*pi*lambda*alpha^2), "4"=sqrt(1/4+1/(lambda*16*pi*pi*alpha^4))-1/2, NA)), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("lambda" %in% model$freepar){ lambda <- intensity(X) while(!is.na(OK <- valid(model <- update(model, lambda=lambda)))&&!OK) lambda <- lambda/2 rslt <- c(rslt, "lambda" = lambda) } if("nu" %in% model$freepar){ nu <- 2 while(!is.na(OK <- valid(model <- update(model, nu=nu)))&&!OK) nu <- nu/2 rslt <- c(rslt, "nu" = nu) } if("alpha" %in% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2L] while(!is.na(OK <- valid(model <- update(model, alpha=alpha)))&&!OK){ alpha <- alpha/2 } rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) dppPowerExp <- detpointprocfamilyfun( name="Power Exponential Spectral", specden=function(x, lambda, alpha, nu, d){ lambda * gamma(d/2+1) * alpha^d / (pi^(d/2)*gamma(d/nu+1)) * exp(-(alpha*x)^nu) }, valid=function(lambda, alpha, nu, d){ ## Note the upper bound on nu for numerical stability! lambda>0 && alpha>0 && nu>0 && nu<=20 && d>=1 && lambda <= pi^(d/2)*gamma(d/nu+1) / (gamma(1+d/2)*alpha^d) }, isotropic=TRUE, intensity="lambda", dim="d", parbounds=function(name, lambda, alpha, nu, d){ switch(name, lambda = c(0, pi^(d/2)*gamma(d/nu+1) / (gamma(d/2+1)*alpha^d)), alpha = c(0, (pi^(d/2)*gamma(d/nu+1) / (lambda * gamma(d/2+1)))^(1/d)), nu = c(NA, NA), stop("Parameter name misspecified") ) }, startpar=function(model, X){ rslt <- NULL if("lambda" %in% model$freepar){ lambda <- intensity(X) while(!is.na(OK <- valid(model <- update(model, lambda=lambda)))&&!OK) lambda <- lambda/2 rslt <- c(rslt, "lambda" = lambda) } if("nu" %in% model$freepar){ nu <- 2 while(!is.na(OK <- valid(model <- update(model, nu=nu)))&&!OK) nu <- nu/2 rslt <- c(rslt, "nu" = nu) } if("alpha" %in% model$freepar){ alpha <- .8*dppparbounds(model, "alpha")[2L] while(!is.na(OK <- valid(model <- update(model, alpha=alpha)))&&!OK){ alpha <- alpha/2 } rslt <- c(rslt, "alpha" = alpha) } return(rslt) } ) spatstat/R/istat.R0000644000176200001440000001430713333543255013600 0ustar liggesusers# # interactive analysis of point patterns # # $Revision: 1.23 $ $Date: 2015/10/21 09:06:57 $ # # istat <- function(x, xname) { if(missing(xname)) xname <- short.deparse(substitute(x)) verifyclass(x, "ppp") kraever("rpanel") # generate simulations of CSR for use in envelopes simx <- envelope(x, fun=NULL, nsim=39, verbose=FALSE, internal=list(csr=TRUE, eject="patterns")) # initial value of smoothing parameter sigma0 <- with(x$window, min(diff(xrange),diff(yrange)))/8 # create panel p <- rpanel::rp.control(title=paste("istat(", xname, ")", sep=""), panelname="istat", size=c(600,400), x=x, # point pattern xname=xname, # name of point pattern simx=simx, # simulated realisations of CSR stat="data", envel="none", sigma=sigma0) # Split panel into two halves # Left half of panel: display # Right half of panel: controls rpanel::rp.grid(p, "gdisplay", pos=list(row=0,column=0), width=400, height=400) rpanel::rp.grid(p, "gcontrols", pos=list(row=0,column=1), width=200, height=400) #----- Display side ------------ # This line is to placate the package checker mytkr2 <- NULL rpanel::rp.tkrplot(p, mytkr2, do.istat, pos=list(row=0,column=0,grid="gdisplay")) redraw <- function(panel) { rpanel::rp.tkrreplot(panel, mytkr2) panel } #----- Control side ------------ nextrow <- 0 pozzie <- function(n=nextrow,s='w') list(row=n,column=0,grid="gcontrols",sticky=s) # choice of summary statistic ftable <- c(data="data", density="kernel smoothed", Kest="K-function", Lest="L-function", pcf="pair correlation", Kinhom="inhomogeneous K", Linhom="inhomogeneous L", Fest="empty space function F", Gest="nearest neighbour function G", Jest="J-function") fvals <- names(ftable) flabs <- as.character(ftable) stat <- NULL rpanel::rp.radiogroup(p, stat, vals=fvals, labels=flabs, title="statistic", action=redraw, pos=pozzie(0)) nextrow <- 1 # envelopes? envel <- NULL evals <- c("none", "pointwise", "simultaneous") elabs <- c("No simulation envelopes", "Pointwise envelopes under CSR", "Simultaneous envelopes under CSR") rpanel::rp.radiogroup(p, envel, vals=evals, labels=elabs, title="Simulation envelopes", action=redraw, pos=pozzie(nextrow)) nextrow <- nextrow + 1 # smoothing parameters sigma <- NULL rect <- as.rectangle(x$window) winwid <- min(abs(diff(rect$xrange)), abs(diff(rect$yrange))) rpanel::rp.slider(p, sigma, winwid/80, winwid/2, action=redraw, title="sigma", initval=winwid/8, showvalue=TRUE, pos=pozzie(nextrow, '')) nextrow <- nextrow + 1 pcfbw <- pcfbwinit <- 0.15/sqrt(5 * x$n/area(x$window)) rpanel::rp.slider(p, pcfbw, pcfbwinit/10, 4 * pcfbwinit, action=redraw, title="bw", initval=pcfbwinit, showvalue=TRUE, pos=pozzie(nextrow, '')) nextrow <- nextrow + 1 # button to print a summary at console rpanel::rp.button(p, title="Print summary information", action=function(panel) { print(summary(panel$x)); panel}, pos=pozzie(nextrow)) nextrow <- nextrow + 1 # quit button rpanel::rp.button(p, title="Quit", quitbutton=TRUE, action= function(panel) { panel }, pos=pozzie(nextrow)) invisible(NULL) } # function that updates the plot when the control panel is operated do.istat <- function(panel) { x <- panel$x xname <- panel$xname envel <- panel$envel stat <- panel$stat sigma <- panel$sigma simx <- panel$simx if(stat=="data") { plot(x, main=xname) return(panel) } out <- switch(envel, none=switch(stat, density=density(x, sigma=sigma), Kest=Kest(x), Lest=Lest(x), pcf=pcf(x, bw=panel$pcfbw), Kinhom=Kinhom(x, sigma=sigma), Linhom=Linhom(x, sigma=sigma), Fest=Fest(x), Gest=Gest(x), Jest=Jest(x)), pointwise=switch(stat, density=density(x, sigma=sigma), Kest=envelope(x, Kest, nsim=39, simulate=simx), Lest=envelope(x, Lest, nsim=39, simulate=simx), pcf=envelope(x, pcf, bw=panel$pcfbw, nsim=39, simulate=simx), Kinhom=envelope(x, Kinhom, nsim=39, sigma=sigma, simulate=simx), Linhom=envelope(x, Linhom, nsim=39, sigma=sigma, simulate=simx), Fest=envelope(x, Fest, nsim=39, simulate=simx), Gest=envelope(x, Gest, nsim=39, simulate=simx), Jest=envelope(x, Jest, nsim=39, simulate=simx)), simultaneous=switch(stat, density=density(x, sigma=sigma), Kest=envelope(x, Kest, nsim=19, global=TRUE, simulate=simx), Lest=envelope(x, Lest, nsim=19, global=TRUE, simulate=simx), pcf=envelope(x, pcf, bw=panel$pcfbw, nsim=19, global=TRUE, simulate=simx), Kinhom=envelope(x, Kinhom, nsim=19, sigma=sigma, global=TRUE, simulate=simx), Linhom=envelope(x, Linhom, nsim=19, sigma=sigma, global=TRUE, simulate=simx), Fest=envelope(x, Fest, nsim=19, global=TRUE, simulate=simx), Gest=envelope(x, Gest, nsim=19, global=TRUE, simulate=simx), Jest=envelope(x, Jest, nsim=19, global=TRUE, simulate=simx)) ) # plot it if(stat %in% c("density", "Kinhom", "Linhom")) { plot(out, main=paste(stat, "(", xname, ", sigma)", sep="")) if(stat == "density") points(x) } else if(stat == "pcf") plot(out, main=paste("pcf(", xname, ", bw)", sep="")) else plot(out, main=paste(stat, "(", xname, ")", sep="")) return(panel) } spatstat/R/fgk3.R0000644000176200001440000003765113433376244013320 0ustar liggesusers# # $Revision: 1.27 $ $Date: 2019/02/21 01:18:21 $ # # Estimates of F, G and K for three-dimensional point patterns # # # ............ user interface ............................. # K3est <- function(X, ..., rmax=NULL, nrval=128, correction=c("translation", "isotropic"), ratio=FALSE) { stopifnot(inherits(X, "pp3")) correction <- pickoption("correction", correction, c(translation="translation", trans="translation", isotropic="isotropic", iso="isotropic", best="isotropic"), multi=TRUE) trap.extra.arguments(..., .Context="In K3est") B <- X$domain if(is.null(rmax)) rmax <- diameter(B)/2 r <- seq(from=0, to=rmax, length.out=nrval) np <- npoints(X) denom <- np * (np-1)/volume(B) # this will be the output data frame K <- data.frame(r=r, theo= (4/3) * pi * r^3) desc <- c("distance argument r", "theoretical Poisson %s") K <- ratfv(K, NULL, denom, "r", quote(K[3](r)), "theo", NULL, c(0,rmax/2), c("r","{%s[%s]^{pois}}(r)"), desc, fname=c("K", "3"), ratio=ratio) # extract the x,y,z ranges as a vector of length 6 flatbox <- unlist(B[1:3]) # extract coordinates coo <- coords(X) if(any(correction %in% "translation")) { u <- k3engine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, correction="translation") K <- bind.ratfv(K, data.frame(trans=u$num), u$denom, "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans", ratio=ratio) } if(any(correction %in% "isotropic")) { u <- k3engine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, correction="isotropic") K <- bind.ratfv(K, data.frame(iso=u$num), u$denom, "{hat(%s)[%s]^{iso}}(r)", "isotropic-corrected estimate of %s", "iso", ratio=ratio) } # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) return(K) } G3est <- function(X, ..., rmax=NULL, nrval=128, correction=c("rs", "km", "Hanisch")) { stopifnot(inherits(X, "pp3")) correction <- pickoption("correction", correction, c(rs="rs", border="rs", km="km", KM="km", Hanisch="han", hanisch="han", best="km"), multi=TRUE) trap.extra.arguments(..., .Context="In G3est") B <- X$domain if(is.null(rmax)) rmax <- diameter(B)/2 r <- seq(from=0, to=rmax, length.out=nrval) coo <- coords(X) lambda <- nrow(coo)/volume(B) # this will be the output data frame G <- data.frame(r=r, theo= 1 - exp( - lambda * (4/3) * pi * r^3)) desc <- c("distance argument r", "theoretical Poisson %s") G <- fv(G, "r", substitute(G3(r), NULL), "theo", , c(0,rmax/2), c("r","%s[pois](r)"), desc, fname="G3") # extract the x,y,z ranges as a vector of length 6 flatbox <- unlist(B[1:3]) # collect four histograms for censored data u <- g3Cengine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval) if("rs" %in% correction) G <- bind.fv(G, data.frame(rs=u$rs), "%s[rs](r)", "reduced sample estimate of %s", "rs") if("km" %in% correction) G <- bind.fv(G, data.frame(km=u$km), "%s[km](r)", "Kaplan-Meier estimate of %s", "km") if("han" %in% correction) G <- bind.fv(G, data.frame(han=u$han), "%s[han](r)", "Normalised Hanisch estimate of %s", "han") # default is to display them all formula(G) <- . ~ r unitname(G) <- unitname(X) return(G) } F3est <- function(X, ..., rmax=NULL, nrval=128, vside=NULL, correction=c("rs", "km", "cs"), sphere=c("fudge", "ideal", "digital")) { stopifnot(inherits(X, "pp3")) sphere <- match.arg(sphere) correction <- pickoption("correction", correction, c(rs="rs", border="rs", km="km", KM="km", Kaplan="km", cs="cs", CS="cs", best="km"), multi=TRUE) trap.extra.arguments(..., .Context="In F3est") B <- X$domain if(is.null(rmax)) rmax <- diameter(B)/2 r <- seq(from=0, to=rmax, length.out=nrval) coo <- coords(X) vol <- volume(B) lambda <- nrow(coo)/vol # determine voxel size if(missing(vside)) { voxvol <- vol/spatstat.options("nvoxel") vside <- voxvol^(1/3) # ensure the shortest side is a whole number of voxels s <- shortside(B) m <- ceiling(s/vside) vside <- s/m } # compute theoretical value switch(sphere, ideal = { volsph <- (4/3) * pi * r^3 spherename <- "ideal sphere" }, fudge = { volsph <- 0.78 * (4/3) * pi * r^3 spherename <- "approximate sphere" }, digital = { volsph <- digital.volume(c(0, rmax), nrval, vside) spherename <- "digital sphere" }) theo.desc <- paste("theoretical Poisson %s using", spherename) # this will be the output data frame FF <- data.frame(r = r, theo = 1 - exp( - lambda * volsph)) desc <- c("distance argument r", theo.desc) labl <- c("r","%s[pois](r)") FF <- fv(FF, "r", substitute(F3(r), NULL), "theo", , c(0,rmax/2), labl, desc, fname="F3") # extract the x,y,z ranges as a vector of length 6 flatbox <- unlist(B[1:3]) # go u <- f3Cengine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, vside=vside) if("rs" %in% correction) FF <- bind.fv(FF, data.frame(rs=u$rs), "%s[rs](r)", "reduced sample estimate of %s", "rs") if("km" %in% correction) FF <- bind.fv(FF, data.frame(km=u$km), "%s[km](r)", "Kaplan-Meier estimate of %s", "km") if("cs" %in% correction) FF <- bind.fv(FF, data.frame(cs=u$cs), "%s[cs](r)", "Chiu-Stoyan estimate of %s", "cs") # default is to display them all formula(FF) <- . ~ r unitname(FF) <- unitname(X) return(FF) } pcf3est <- function(X, ..., rmax=NULL, nrval=128, correction=c("translation", "isotropic"), delta=NULL, adjust=1, biascorrect=TRUE) { stopifnot(inherits(X, "pp3")) correction <- pickoption("correction", correction, c(translation="translation", trans="translation", isotropic="isotropic", iso="isotropic", best="isotropic"), multi=TRUE) trap.extra.arguments(..., .Context="In pcf3est") B <- X$domain if(is.null(rmax)) rmax <- diameter(B)/2 r <- seq(from=0, to=rmax, length.out=nrval) if(is.null(delta)) { lambda <- npoints(X)/volume(B) delta <- adjust * 0.26/lambda^(1/3) } if(biascorrect) { # bias correction rondel <- r/delta biasbit <- ifelseAX(rondel > 1, 1, (3/4)*(rondel + 2/3 - (1/3)*rondel^3)) } # this will be the output data frame g <- data.frame(r=r, theo=rep.int(1, length(r))) desc <- c("distance argument r", "theoretical Poisson %s") g <- fv(g, "r", quote(g[3](r)), "theo", , c(0,rmax/2), c("r", "{%s[%s]^{pois}}(r)"), desc, fname=c("g", "3")) # extract the x,y,z ranges as a vector of length 6 flatbox <- unlist(B[1:3]) # extract coordinates coo <- coords(X) if(any(correction %in% "translation")) { u <- pcf3engine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, correction="translation", delta=delta) gt <- u$f if(biascorrect) gt <- gt/biasbit g <- bind.fv(g, data.frame(trans=gt), "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans") } if(any(correction %in% "isotropic")) { u <- pcf3engine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, correction="isotropic", delta=delta) gi <- u$f if(biascorrect) gi <- gi/biasbit g <- bind.fv(g, data.frame(iso=gi), "{hat(%s)[%s]^{iso}}(r)", "isotropic-corrected estimate of %s", "iso") } # default is to display them all formula(g) <- . ~ r unitname(g) <- unitname(X) attr(g, "delta") <- delta return(g) } # ............ low level code .............................. # k3engine <- function(x, y, z, box=c(0,1,0,1,0,1), rmax=1, nrval=100, correction="translation") { code <- switch(correction, translation=0, isotropic=1) res <- .C("RcallK3", as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(0), as.double(rmax), as.integer(nrval), f = as.double(numeric(nrval)), num = as.double(numeric(nrval)), denom = as.double(numeric(nrval)), as.integer(code), PACKAGE = "spatstat") return(list(range = c(0,rmax), f = res$f, num=res$num, denom=res$denom, correction=correction)) } # # # g3engine <- function(x, y, z, box=c(0,1,0,1,0,1), rmax=1, nrval=10, correction="Hanisch G3") { code <- switch(correction, "minus sampling"=1, "Hanisch G3"=3) res <- .C("RcallG3", as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(0), as.double(rmax), as.integer(nrval), f = as.double(numeric(nrval)), num = as.double(numeric(nrval)), denom = as.double(numeric(nrval)), as.integer(code), PACKAGE = "spatstat") return(list(range = c(0, rmax), f = res$f, num=res$num, denom=res$denom, correction=correction)) } # # f3engine <- function(x, y, z, box=c(0,1,0,1,0,1), vside=0.05, range=c(0,1.414), nval=25, correction="minus sampling") { # code <- switch(correction, "minus sampling"=1, no=0) res <- .C("RcallF3", as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(vside), as.double(range[1L]), as.double(range[2L]), m=as.integer(nval), num = as.integer(integer(nval)), denom = as.integer(integer(nval)), as.integer(code), PACKAGE = "spatstat") r <- seq(from=range[1L], to=range[2L], length.out=nval) f <- with(res, ifelseXB(denom > 0, num/denom, 1)) return(list(r = r, f = f, num=res$num, denom=res$denom, correction=correction)) } f3Cengine <- function(x, y, z, box=c(0,1,0,1,0,1), vside=0.05, rmax=1, nrval=25) { # res <- .C("RcallF3cen", as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(vside), as.double(0), as.double(rmax), m=as.integer(nrval), obs = as.integer(integer(nrval)), nco = as.integer(integer(nrval)), cen = as.integer(integer(nrval)), ncc = as.integer(integer(nrval)), upperobs = as.integer(integer(1L)), uppercen = as.integer(integer(1L)), PACKAGE = "spatstat") r <- seq(from=0, to=rmax, length.out=nrval) # obs <- res$obs nco <- res$nco cen <- res$cen ncc <- res$ncc upperobs <- res$upperobs uppercen <- res$uppercen # breaks <- breakpts.from.r(r) km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs) rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen) # ero <- eroded.volumes(as.box3(box), r) H <- cumsum(nco/ero) cs <- H/max(H[is.finite(H)]) # return(list(rs=rs, km=km$km, hazard=km$lambda, cs=cs, r=r)) } g3Cengine <- function(x, y, z, box=c(0,1,0,1,0,1), rmax=1, nrval=25) { # res <- .C("RcallG3cen", as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(0), as.double(rmax), m=as.integer(nrval), obs = as.integer(integer(nrval)), nco = as.integer(integer(nrval)), cen = as.integer(integer(nrval)), ncc = as.integer(integer(nrval)), upperobs = as.integer(integer(1L)), uppercen = as.integer(integer(1L)), PACKAGE = "spatstat") r <- seq(from=0, to=rmax, length.out=nrval) # obs <- res$obs nco <- res$nco cen <- res$cen ncc <- res$ncc upperobs <- res$upperobs uppercen <- res$uppercen # breaks <- breakpts.from.r(r) km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs) rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen) # ero <- eroded.volumes(as.box3(box), r) H <- cumsum(nco/ero) han <- H/max(H[is.finite(H)]) return(list(rs=rs, km=km$km, hazard=km$lambda, han=han, r=r)) } pcf3engine <- function(x, y, z, box=c(0,1,0,1,0,1), rmax=1, nrval=100, correction="translation", delta=rmax/10) { code <- switch(correction, translation=0, isotropic=1) res <- .C("Rcallpcf3", as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(0), as.double(rmax), as.integer(nrval), f = as.double(numeric(nrval)), num = as.double(numeric(nrval)), denom = as.double(numeric(nrval)), method=as.integer(code), delta=as.double(delta), PACKAGE = "spatstat") return(list(range = c(0,rmax), f = res$f, num=res$num, denom=res$denom, correction=correction)) } # # ------------------------------------------------------------ # volume of a sphere (exact and approximate) # sphere.volume <- function(range=c(0,1.414), nval=10) { rr <- seq(from=range[1L], to=range[2L], length.out=nval) return( (4/3) * pi * rr^3) } digital.volume <- function(range=c(0, 1.414), nval=25, vside= 0.05) { # Calculate number of points in digital sphere # by performing distance transform for a single point # in the middle of a suitably large box # # This takes EIGHT TIMES AS LONG as the corresponding empirical F-hat !!! # w <- 2 * range[2L] + 2 * vside # dvol <- .C("RcallF3", as.double(w/2), as.double(w/2), as.double(w/2), as.integer(1L), as.double(0), as.double(w), as.double(0), as.double(w), as.double(0), as.double(w), as.double(vside), as.double(range[1L]), as.double(range[2L]), as.integer(nval), num = as.integer(integer(nval)), denom = as.integer(integer(nval)), as.integer(0), PACKAGE = "spatstat")$num # (vside^3) * dvol } spatstat/R/aaaa.R0000644000176200001440000000250713333543254013335 0ustar liggesusers#' #' aaaa.R #' #' Code that must be read before the rest of the R code in spatstat #' #' $Revision: 1.4 $ $Date: 2014/12/10 10:34:53 $ #' ................................................................... #' intermaker: #' Class structure for functions like 'Strauss' #' so they print a nice description. #' intermaker <- function(f, blank) { # f is the creator function like 'Strauss' class(f) <- c("intermaker", class(f)) # blank is the prototype interaction object: extract some fields desired <- c("creator", "name", "par", "parnames", "pardesc") avail <- desired[desired %in% names(blank)] attr(f, "b") <- blank[avail] return(f) } print.intermaker <- function(x, ...) { b <- attr(x, "b") argh <- names(formals(x)) explain <- NULL if(length(argh) > 0) { desc <- b$pardesc %orifnull% b$parnames namep <- names(b$par) if(length(desc) == length(namep) && all(argh %in% namep)) { names(desc) <- namep explain <- paste(", where", commasep(paste(sQuote(argh), "is the", desc[argh]))) } } blah <- paste0("Function ", b$creator, paren(paste(argh, collapse=", ")), ": creates the interpoint interaction of the ", b$name, explain) splat(blah) return(invisible(NULL)) } spatstat/R/Math.imlist.R0000644000176200001440000000156613333543254014647 0ustar liggesusers## ## Math.imlist.R ## ## $Revision: 1.4 $ $Date: 2017/08/15 03:46:57 $ ## Math.imlist <- function(x, ...){ solapply(x, .Generic, ...) } Complex.imlist <- function(z){ solapply(z, .Generic) } Summary.imlist <- function(..., na.rm=TRUE){ argh <- expandSpecialLists(list(...)) if(length(names(argh)) > 0) { isim <- sapply(argh, is.im) names(argh)[isim] <- "" } do.call(.Generic, c(argh, list(na.rm=na.rm))) } Ops.imlist <- function(e1,e2=NULL){ if(nargs() == 1L) { #' unary operation return(solapply(e1, .Generic)) } #' binary operation if(inherits(e2, "imlist")) { #' two image lists - must have equal length v <- mapply(.Generic, unname(e1), unname(e2), SIMPLIFY=FALSE) names(v) <- names(e1) return(as.solist(v)) } #' other binary operation e.g. imlist + constant, imlist + im return(solapply(e1, .Generic, e2=e2)) } spatstat/R/deltametric.R0000644000176200001440000000136713333543254014752 0ustar liggesusers# # deltametric.R # # Delta metric # # $Revision: 1.4 $ $Date: 2014/10/24 00:22:30 $ # deltametric <- function(A, B, p=2, c=Inf, ...) { stopifnot(is.numeric(p) && length(p) == 1L && p > 0) # ensure frames are identical bb <- boundingbox(as.rectangle(A), as.rectangle(B)) # enforce identical frames A <- rebound(A, bb) B <- rebound(B, bb) # compute distance functions dA <- distmap(A, ...) dB <- distmap(B, ...) if(!is.infinite(c)) { dA <- eval.im(pmin.int(dA, c)) dB <- eval.im(pmin.int(dB, c)) } if(is.infinite(p)) { # L^infinity Z <- eval.im(abs(dA-dB)) delta <- summary(Z)$max } else { # L^p Z <- eval.im(abs(dA-dB)^p) iZ <- summary(Z)$mean delta <- iZ^(1/p) } return(delta) } spatstat/R/envelope3.R0000644000176200001440000000560513551001745014350 0ustar liggesusers# # envelope3.R # # simulation envelopes for pp3 # # $Revision: 1.13 $ $Date: 2016/04/25 02:34:40 $ # envelope.pp3 <- function(Y, fun=K3est, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- K3est if("clipdata" %in% names(list(...))) stop(paste("The argument", sQuote("clipdata"), "is not available for envelope.pp3")) envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) if(is.null(simulate)) { # ................................................... # Realisations of complete spatial randomness # will be generated by rpoispp # Data pattern X is argument Y # Data pattern determines intensity of Poisson process X <- Y sY <- summary(Y) Yintens <- sY$intensity Ydomain <- Y$domain # expression that will be evaluated simexpr <- if(!is.marked(Y)) { # unmarked point pattern expression(rpoispp3(Yintens, domain=Ydomain)) } else { stop("Sorry, simulation of marked 3D point patterns is not yet implemented") } # suppress warnings from code checkers dont.complain.about(Yintens, Ydomain) # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE) } else { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate # Data pattern is argument Y X <- Y } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=FALSE, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, rejectNA=rejectNA, silent=silent, cl=cl, envir.user=envir.user, expected.arg=c("rmax", "nrval"), do.pwrong=do.pwrong) } spatstat/R/breakpts.R0000644000176200001440000001543713556707650014304 0ustar liggesusers# # breakpts.S # # A simple class definition for the specification # of histogram breakpoints in the special form we need them. # # even.breaks() # # $Revision: 1.24 $ $Date: 2019/11/01 01:07:53 $ # # # Other functions in this directory use the standard Splus function # hist() to compute histograms of distance values. # One argument of hist() is the vector 'breaks' # of breakpoints for the histogram cells. # # The breakpoints must # (a) span the range of the data # (b) be given in increasing order # (c) satisfy breaks[2] = 0, # # The function make.even.breaks() will create suitable breakpoints. # # Condition (c) means that the first histogram cell has # *right* endpoint equal to 0. # # Since all our distance values are nonnegative, the effect of (c) is # that the first histogram cell counts the distance values which are # exactly equal to 0. Hence F(0), the probability P{X = 0}, # is estimated without a discretisation bias. # # We assume the histograms have followed the default counting rule # in hist(), which is such that the k-th entry of the histogram # counts the number of data values in # I_k = ( breaks[k],breaks[k+1] ] for k > 1 # I_1 = [ breaks[1],breaks[2] ] # # The implementations of estimators of c.d.f's in this directory # produce vectors of length = length(breaks)-1 # with value[k] = estimate of F(breaks[k+1]), # i.e. value[k] is an estimate of the c.d.f. at the RIGHT endpoint # of the kth histogram cell. # # An object of class 'breakpts' contains: # # $val the actual breakpoints # $max the maximum value (= last breakpoint) # $ncells total number of histogram cells # $r right endpoints, r = val[-1] # $even logical = TRUE if cells known to be evenly spaced # $npos number of histogram cells on the positive halfline # = length(val) - 2, # or NULL if cells not evenly spaced # $step histogram cell width # or NULL if cells not evenly spaced # # -------------------------------------------------------------------- breakpts <- function(val, maxi, even=FALSE, npos=NULL, step=NULL) { out <- list(val=val, max=maxi, ncells=length(val)-1L, r = val[-1L], even=even, npos=npos, step=step) class(out) <- "breakpts" out } scalardilate.breakpts <- function(X, f, ...) { out <- with(X, list(val = f*val, max = f*max, ncells = ncells, r = f*r, even = even, npos = npos, step = f*step)) class(out) <- "breakpts" out } "make.even.breaks" <- function(bmax, npos, bstep) { if(bmax <= 0) stop("bmax must be positive") if(missing(bstep) && missing(npos)) stop(paste("Must specify either", sQuote("bstep"), "or", sQuote("npos"))) if(!missing(npos)) { bstep <- bmax/npos val <- seq(from=0, to=bmax, length.out=npos+1L) val <- c(-bstep,val) right <- bmax } else { npos <- ceiling(bmax/bstep) right <- bstep * npos val <- seq(from=0, to=right, length.out=npos+1L) val <- c(-bstep,val) } breakpts(val, right, TRUE, npos, bstep) } "as.breakpts" <- function(...) { XL <- list(...) if(length(XL) == 1L) { # single argument X <- XL[[1L]] if(!is.null(class(X)) && class(X) == "breakpts") # X already in correct form return(X) if(is.vector(X) && length(X) > 2) { # it's a vector if(X[2L] != 0) stop("breakpoints do not satisfy breaks[2] = 0") # The following test for equal spacing is used in hist.default steps <- diff(X) if(diff(range(steps)) < 1e-07 * mean(steps)) # equally spaced return(breakpts(X, max(X), TRUE, length(X)-2, steps[1L])) else # unknown spacing return(breakpts(X, max(X), FALSE)) } } else { # There are multiple arguments. # exactly two arguments - interpret as even.breaks() if(length(XL) == 2) return(make.even.breaks(XL[[1L]], XL[[2L]])) # two arguments 'max' and 'npos' if(!is.null(XL$max) && !is.null(XL$npos)) return(make.even.breaks(XL$max, XL$npos)) # otherwise stop("Don't know how to convert these data to breakpoints") } # never reached } check.hist.lengths <- function(hist, breaks) { verifyclass(breaks, "breakpts") nh <- length(hist) nb <- breaks$ncells if(nh != nb) stop(paste("Length of histogram =", nh, "not equal to number of histogram cells =", nb)) } breakpts.from.r <- function(r) { if(!is.numeric(r) && !is.vector(r)) stop("r must be a numeric vector") if(length(r) < 2) stop(paste("r has length", length(r), "- must be at least 2")) if(r[1L] != 0) stop("First r value must be 0") if(any(diff(r) <= 0)) stop("successive values of r must be increasing") dr <- r[2L] - r[1L] b <- c(-dr, r) return(as.breakpts(b)) } handle.r.b.args <- function(r=NULL, breaks=NULL, window, pixeps=NULL, rmaxdefault=NULL) { if(!is.null(r) && !is.null(breaks)) stop(paste("Do not specify both", sQuote("r"), "and", sQuote("breaks"))) if(!is.null(breaks)) { breaks <- as.breakpts(breaks) } else if(!is.null(r)) { breaks <- breakpts.from.r(r) } else { #' determine rmax #' ignore infinite or NA values of rmaxdefault if(!isTRUE(is.finite(rmaxdefault))) rmaxdefault <- NULL rmax <- rmaxdefault %orifnull% diameter(Frame(window)) #' determine spacing if(is.null(pixeps)) { pixeps <- if(is.mask(window)) min(window$xstep, window$ystep) else rmax/128 } rstep <- pixeps/4 breaks <- make.even.breaks(rmax, bstep=rstep) } return(breaks) } check.finespacing <- function(r, eps=NULL, win=NULL, rmaxdefault = max(r), context="", action=c("fatal", "warn", "silent"), rname) { if(missing(rname)) rname <- deparse(substitute(r)) action <- match.arg(action) if(is.null(eps)) { b <- handle.r.b.args(window=win, rmaxdefault=rmaxdefault) eps <- b$step } dr <- max(diff(r)) if(dr > eps * 1.01) { whinge <- paste(context, "the successive", rname, "values must be finely spaced:", "given spacing =", paste0(signif(dr, 5), ";"), "required spacing <= ", signif(eps, 3)) switch(action, fatal = stop(whinge, call.=FALSE), warn = warning(whinge, call.=FALSE), silent = {}) return(FALSE) } return(TRUE) } spatstat/R/markcorr.R0000644000176200001440000006733613623714544014311 0ustar liggesusers## ## ## markcorr.R ## ## $Revision: 1.84 $ $Date: 2020/02/21 08:46:31 $ ## ## Estimate the mark correlation function ## and related functions ## ## ------------------------------------------------------------------------ markvario <- local({ halfsquarediff <- function(m1, m2) { ((m1-m2)^2)/2 } assigntheo <- function(x, value) { x$theo <- value; return(x) } markvario <- function(X, correction=c("isotropic", "Ripley", "translate"), r=NULL, method="density", ..., normalise=FALSE) { m <- onecolumn(marks(X)) if(!is.numeric(m)) stop("Marks are not numeric") if(missing(correction)) correction <- NULL ## compute reference value Ef weights <- pointweights(X, ..., parent=parent.frame()) Ef <- if(is.null(weights)) var(m) else weighted.var(m, weights) ## Compute estimates v <- markcorr(X, f=halfsquarediff, r=r, correction=correction, method=method, normalise=normalise, ..., internal=list(Ef=Ef)) if(is.fv(v)) v <- anylist(v) ## adjust theoretical value and fix labels theoval <- if(normalise) 1 else var(m) for(i in seq_len(length(v))) { v[[i]]$theo <- theoval v[[i]] <- rebadge.fv(v[[i]], quote(gamma(r)), "gamma") } if(length(v) == 1) v <- v[[1]] return(v) } markvario }) markconnect <- local({ indicateij <- function(m1, m2, i, j) { (m1 == i) & (m2 == j) } markconnect <- function(X, i, j, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., normalise=FALSE) { stopifnot(is.ppp(X) && is.multitype(X)) if(missing(correction)) correction <- NULL marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1] if(missing(j)) j <- lev[2] ## compute reference value Ef weights <- pointweights(X, ..., parent=parent.frame()) Ef <- if(is.null(weights)) mean(marx == i) * mean(marx == j) else mean(weights * (marx == i)) * mean(weights * (marx == j)) ## compute estimates p <- markcorr(X, f=indicateij, r=r, correction=correction, method=method, ..., fargs=list(i=i, j=j), normalise=normalise, internal=list(Ef=Ef)) ## alter theoretical value and fix labels if(!normalise) { pipj <- mean(marx==i) * mean(marx==j) p$theo <- pipj } else { p$theo <- 1 } p <- rebadge.fv(p, new.ylab=substitute(p[i,j](r), list(i=paste(i),j=paste(j))), new.fname=c("p", paste0("list(", i, ",", j, ")")), new.yexp=substitute(p[list(i,j)](r), list(i=paste(i),j=paste(j)))) return(p) } markconnect }) Emark <- local({ f1 <- function(m1, m2) { m1 } Emark <- function(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., normalise=FALSE) { stopifnot(is.ppp(X) && is.marked(X)) marx <- marks(X) isvec <- is.vector(marx) && is.numeric(marx) isdf <- is.data.frame(marx) && all(sapply(as.list(marx), is.numeric)) if(!(isvec || isdf)) stop("All marks of X should be numeric") if(missing(correction)) correction <- NULL E <- markcorr(X, f1, r=r, correction=correction, method=method, ..., normalise=normalise) if(isvec) { E <- rebadge.fv(E, quote(E(r)), "E") } else { E[] <- lapply(E, rebadge.fv, new.ylab=quote(E(r)), new.fname="E") } return(E) } Emark }) Vmark <- local({ f2 <- function(m1, m2) { m1^2 } Vmark <- function(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., normalise=FALSE) { if(missing(correction)) correction <- NULL E <- Emark(X, r=r, correction=correction, method=method, ..., normalise=FALSE) E2 <- markcorr(X, f2, r=E$r, correction=correction, method=method, ..., normalise=FALSE) if(normalise) sig2 <- var(marks(X)) if(is.fv(E)) { E <- list(E) E2 <- list(E2) } V <- list() for(i in seq_along(E)) { Ei <- E[[i]] E2i <- E2[[i]] Vi <- eval.fv(E2i - Ei^2) if(normalise) Vi <- eval.fv(Vi/sig2[i,i]) Vi <- rebadge.fv(Vi, quote(V(r)), "V") attr(Vi, "labl") <- attr(Ei, "labl") V[[i]] <- Vi } if(length(V) == 1) return(V[[1]]) V <- as.anylist(V) names(V) <- colnames(marks(X)) return(V) } Vmark }) ############## workhorses 'markcorr' and 'markcorrint' #################### markcorrint <- Kmark <- function(X, f=NULL, r=NULL, correction=c("isotropic", "Ripley", "translate"), ..., f1=NULL, normalise=TRUE, returnL=FALSE, fargs=NULL) { ## Computes the analogue of Kest(X) ## where each pair (x_i,x_j) is weighted by w(m_i,m_j) ## ## If multiplicative=TRUE then w(u,v) = f(u) f(v) ## If multiplicative=FALSE then w(u,v) = f(u, v) ## stopifnot(is.ppp(X) && is.marked(X)) is.marked(X, dfok=FALSE) W <- Window(X) ## if(identical(sys.call()[[1]], as.name('markcorrint'))) warn.once('markcorrint', "markcorrint will be deprecated in future versions of spatstat;", "use the equivalent function Kmark") ## validate test function h <- check.testfun(f, f1, X) f <- h$f f1 <- h$f1 ftype <- h$ftype multiplicative <- ftype %in% c("mul", "product") ## ## check corrections correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) isborder <- correction %in% c("border", "bord.modif") if(any(isborder) && !multiplicative) { whinge <- paste("Border correction is not valid unless", "test function is of the form f(u,v) = f1(u)*f1(v)") correction <- correction[!isborder] if(length(correction) == 0) stop(whinge) else warning(whinge) } ## estimated intensity lambda <- intensity(X) mX <- marks(X) switch(ftype, mul={ wt <- mX/lambda K <- Kinhom(X, r=r, reciplambda=wt, correction=correction, ..., renormalise=FALSE) Ef2 <- mean(mX)^2 }, equ={ fXX <- outer(mX, mX, "==") wt <- fXX/lambda^2 K <- Kinhom(X, r=r, reciplambda2=wt, correction=correction, ..., renormalise=FALSE) mtable <- table(mX) Ef2 <- sum(mtable^2)/length(mX)^2 }, product={ f1X <- do.call(f1, append(list(mX), fargs)) wt <- f1X/lambda K <- Kinhom(X, r=r, reciplambda=wt, correction=correction, ..., renormalise=FALSE) Ef2 <- mean(f1X)^2 }, general={ fXX <- do.call(outer, append(list(mX, mX, f), fargs)) wt <- fXX/lambda^2 K <- Kinhom(X, r=r, reciplambda2=wt, correction=correction, ..., renormalise=FALSE) Ef2 <- mean(fXX) }) K$theo <- K$theo * Ef2 labl <- attr(K, "labl") if(normalise) K <- eval.fv(K/Ef2) if(returnL) K <- eval.fv(sqrt(K/pi)) attr(K, "labl") <- labl if(normalise && !returnL) { ylab <- quote(K[f](r)) fnam <- c("K", "f") } else if(normalise && returnL) { ylab <- quote(L[f](r)) fnam <- c("L", "f") } else if(!normalise && !returnL) { ylab <- quote(C[f](r)) fnam <- c("C", "f") } else { ylab <- quote(sqrt(C[f](r)/pi)) fnam <- "sqrt(C[f]/pi)" } K <- rebadge.fv(K, ylab, fnam) return(K) } markcorr <- function(X, f = function(m1, m2) { m1 * m2}, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., weights=NULL, f1=NULL, normalise=TRUE, fargs=NULL, internal=NULL) { ## mark correlation function with test function f stopifnot(is.ppp(X) && is.marked(X)) nX <- npoints(X) ## set defaults to NULL if(missing(f)) f <- NULL if(missing(correction)) correction <- NULL ## handle data frame of marks marx <- marks(X, dfok=TRUE) if(is.data.frame(marx)) { nc <- ncol(marx) result <- list() for(j in 1:nc) { Xj <- X %mark% marx[,j] result[[j]] <- markcorr(Xj, f=f, r=r, correction=correction, method=method, ..., weights=weights, f1=f1, normalise=normalise, fargs=fargs) } result <- as.anylist(result) names(result) <- colnames(marx) return(result) } ## weights if(unweighted <- is.null(weights)) { weights <- rep(1, nX) } else { weights <- pointweights(X, weights=weights, parent=parent.frame()) stopifnot(all(weights > 0)) } ## validate test function h <- check.testfun(f, f1, X) f <- h$f f1 <- h$f1 ftype <- h$ftype ## ## npts <- npoints(X) W <- X$window ## determine r values rmaxdefault <- rmax.rule("K", W, npts/area(W)) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max if(length(method) > 1) stop("Select only one method, please") if(method=="density" && !breaks$even) stop(paste("Evenly spaced r values are required if method=", sQuote("density"), sep="")) ## available selection of edge corrections depends on window correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) ## Denominator ## Ef = Ef(M,M') when M, M' are independent ## Optionally provided by other code Ef <- internal$Ef if(is.null(Ef)) { ## Apply f to every possible pair of marks, and average Ef <- switch(ftype, mul = { mean(marx * weights)^2 }, equ = { if(unweighted) { mtable <- table(marx) } else { mtable <- tapply(weights, marx, sum) mtable[is.na(mtable)] <- 0 } sum(mtable^2)/nX^2 }, product={ f1m <- do.call(f1, append(list(marx), fargs)) mean(f1m * weights)^2 }, general = { mcross <- if(is.null(fargs)) { outer(marx, marx, f) } else { do.call(outer, append(list(marx,marx,f),fargs)) } if(unweighted) { mean(mcross) } else { wcross <- outer(weights, weights, "*") mean(mcross * wcross) } }, stop("Internal error: invalid ftype")) } if(normalise) { theory <- 1 Efdenom <- Ef } else { theory <- Ef Efdenom <- 1 } if(normalise) { ## check validity of denominator if(Efdenom == 0) stop("Cannot normalise the mark correlation; the denominator is zero") else if(Efdenom < 0) warning(paste("Problem when normalising the mark correlation:", "the denominator is negative")) } ## this will be the output data frame result <- data.frame(r=r, theo= rep.int(theory,length(r))) desc <- c("distance argument r", "theoretical value (independent marks) for %s") alim <- c(0, min(rmax, rmaxdefault)) ## determine conventional name of function if(ftype %in% c("mul", "equ")) { if(normalise) { ylab <- quote(k[mm](r)) fnam <- c("k", "mm") } else { ylab <- quote(c[mm](r)) fnam <- c("c", "mm") } } else { if(normalise) { ylab <- quote(k[f](r)) fnam <- c("k", "f") } else { ylab <- quote(c[f](r)) fnam <- c("c", "f") } } result <- fv(result, "r", ylab, "theo", , alim, c("r","{%s[%s]^{iid}}(r)"), desc, fname=fnam) ## find close pairs of points close <- closepairs(X, rmax) dIJ <- close$d I <- close$i J <- close$j XI <- ppp(close$xi, close$yi, window=W, check=FALSE) ## apply f to marks of close pairs of points ## mI <- marx[I] mJ <- marx[J] ff <- switch(ftype, mul = mI * mJ, equ = (mI == mJ), product={ if(is.null(fargs)) { fI <- f1(mI) fJ <- f1(mJ) } else { fI <- do.call(f1, append(list(mI), fargs)) fJ <- do.call(f1, append(list(mJ), fargs)) } fI * fJ }, general={ if(is.null(fargs)) f(marx[I], marx[J]) else do.call(f, append(list(marx[I], marx[J]), fargs)) }) ## check values of f(M1, M2) if(is.logical(ff)) ff <- as.numeric(ff) else if(!is.numeric(ff)) stop("function f did not return numeric values") if(anyNA(ff)) switch(ftype, mul=, equ=stop("some marks were NA"), product=, general=stop("function f returned some NA values")) if(any(ff < 0)) switch(ftype, mul=, equ=stop("negative marks are not permitted"), product=, general=stop("negative values of function f are not permitted")) ## weights if(!unweighted) ff <- ff * weights[I] * weights[J] #### Compute estimates ############## if(any(correction == "none")) { ## uncorrected estimate edgewt <- rep.int(1, length(dIJ)) ## get smoothed estimate of mark covariance Mnone <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) result <- bind.fv(result, data.frame(un=Mnone), "{hat(%s)[%s]^{un}}(r)", "uncorrected estimate of %s", "un") } if(any(correction == "translate")) { ## translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) ## get smoothed estimate of mark covariance Mtrans <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) result <- bind.fv(result, data.frame(trans=Mtrans), "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans") } if(any(correction == "isotropic")) { ## Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1)) ## get smoothed estimate of mark covariance Miso <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) result <- bind.fv(result, data.frame(iso=Miso), "{hat(%s)[%s]^{iso}}(r)", "Ripley isotropic correction estimate of %s", "iso") } ## which corrections have been computed? nama2 <- names(result) corrxns <- rev(nama2[nama2 != "r"]) ## default is to display them all formula(result) <- (. ~ r) fvnames(result, ".") <- corrxns ## unitname(result) <- unitname(X) return(result) } ## mark cross-correlation function markcrosscorr <- function(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., normalise=TRUE, Xname=NULL) { if(missing(Xname)) Xname <- short.deparse(substitute(X)) stopifnot(is.ppp(X) && is.marked(X)) npts <- npoints(X) W <- Window(X) ## available selection of edge corrections depends on window correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) ## determine r values rmaxdefault <- rmax.rule("K", W, npts/area(W)) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max ## find close pairs of points close <- closepairs(X, rmax) dIJ <- close$d I <- close$i J <- close$j XI <- ppp(close$xi, close$yi, window=W, check=FALSE) ## determine estimation method if(length(method) > 1) stop("Select only one method, please") if(method=="density" && !breaks$even) stop(paste("Evenly spaced r values are required if method=", sQuote("density"), sep="")) ## ensure marks are a data frame marx <- marks(X, dfok=TRUE) if(!is.data.frame(marx)) marx <- data.frame(marks=marx) ## convert factor marks to dummy variables while(any(isfac <- sapply(marx, is.factor))) { i <- min(which(isfac)) mari <- marx[,i] levi <- levels(mari) nami <- colnames(marx)[i] dumi <- 1 * outer(mari, levi, "==") colnames(dumi) <- paste0(nami, levi) marx <- as.data.frame(append(marx[,-i,drop=FALSE], list(dumi), after=i-1)) } nc <- ncol(marx) nama <- colnames(marx) ## loop over all pairs of columns funs <- list() for(i in 1:nc) { marxi <- marx[,i] namei <- nama[i] for(j in 1:nc) { marxj <- marx[,j] namej <- nama[j] ## Denominator ## Ef = E M M' = EM EM' ## when M, M' are independent from the respective columns Ef <- mean(marxi) * mean(marxj) if(normalise) { theory <- 1 Efdenom <- Ef ## check validity of denominator if(Efdenom == 0) stop(paste("Cannot normalise the mark correlation for", namei, "x", namej, "because the denominator is zero"), call.=FALSE) else if(Efdenom < 0) warning(paste("Problem when normalising the mark correlation for", namei, "x", namej, "- the denominator is negative"), call.=FALSE) } else { theory <- Ef Efdenom <- 1 } ## this will be the output data frame df.ij <- data.frame(r=r, theo= rep.int(theory,length(r))) desc <- c("distance argument r", "theoretical value (independent marks) for %s") alim <- c(0, min(rmax, rmaxdefault)) ## determine conventional name of function mimj <- as.name(paste0(namei,".",namej)) if(normalise) { ylab <- substitute(k[mm](r), list(mm=mimj)) fnam <- c("k", as.character(mimj)) } else { ylab <- substitute(c[mm](r), list(mm=mimj)) fnam <- c("c", as.character(mimj)) } fun.ij <- fv(df.ij, "r", ylab, "theo", , alim, c("r","{%s[%s]^{ind}}(r)"), desc, fname=fnam) mI <- marxi[I] mJ <- marxj[J] ff <- mI * mJ ## check values of f(M1, M2) if(anyNA(ff)) stop("some marks were NA", call.=FALSE) if(any(ff < 0)) stop("negative marks are not permitted") ## Compute estimates ############## if(any(correction == "translate")) { ## translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) ## get smoothed estimate of mark covariance Mtrans <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) fun.ij <- bind.fv(fun.ij, data.frame(trans=Mtrans), "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans") } if(any(correction == "isotropic")) { ## Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1)) ## get smoothed estimate of mark covariance Miso <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) fun.ij <- bind.fv(fun.ij, data.frame(iso=Miso), "{hat(%s)[%s]^{iso}}(r)", "Ripley isotropic correction estimate of %s", "iso") } ## which corrections have been computed? nama2 <- names(fun.ij) corrxns <- rev(nama2[nama2 != "r"]) ## default is to display them all formula(fun.ij) <- (. ~ r) fvnames(fun.ij, ".") <- corrxns ## unitname(fun.ij) <- unitname(X) funs <- append(funs, list(fun.ij)) } } # matrix mapping array entries to list positions in 'funs' witch <- matrix(1:(nc^2), nc, nc, byrow=TRUE) header <- paste("Mark cross-correlation functions for", Xname) answer <- fasp(funs, witch, rowNames=nama, colNames=nama, title=header, dataname=Xname) return(answer) } sewsmod <- function(d, ff, wt, Ef, rvals, method="smrep", ..., nwtsteps=500) { ## Smooth Estimate of Weighted Second Moment Density ## (engine for computing mark correlations, etc) ## ------ ## Vectors containing one entry for each (close) pair of points ## d = interpoint distance ## ff = f(M1, M2) where M1, M2 are marks at the two points ## wt = edge correction weight ## ----- ## Ef = E[f(M, M')] where M, M' are independent random marks ## d <- as.vector(d) ff <- as.vector(ff) wt <- as.vector(wt) switch(method, density={ fw <- ff * wt sum.fw <- sum(fw) sum.wt <- sum(wt) ## smooth estimate of kappa_f est <- density(d, weights=fw/sum.fw, from=min(rvals), to=max(rvals), n=length(rvals), ...)$y numerator <- est * sum.fw ## smooth estimate of kappa_1 est0 <- density(d, weights=wt/sum.wt, from=min(rvals), to=max(rvals), n=length(rvals), ...)$y denominator <- est0 * Ef * sum.wt result <- numerator/denominator }, sm={ ## This is slow! oldopt <- options(warn=-1) smok <- requireNamespace("sm") options(oldopt) if(!smok) stop(paste("Option method=sm requires package sm,", "which is not available")) ## smooth estimate of kappa_f fw <- ff * wt est <- sm::sm.density(d, weights=fw, eval.points=rvals, display="none", nbins=0, ...)$estimate numerator <- est * sum(fw)/sum(est) ## smooth estimate of kappa_1 est0 <- sm::sm.density(d, weights=wt, eval.points=rvals, display="none", nbins=0, ...)$estimate denominator <- est0 * (sum(wt)/ sum(est0)) * Ef result <- numerator/denominator }, smrep={ oldopt <- options(warn=-1) smok <- requireNamespace("sm") options(oldopt) if(!smok) stop(paste("Option method=smrep requires package sm,", "which is not available")) hstuff <- resolve.defaults(list(...), list(hmult=1, h.weights=NA)) if(hstuff$hmult == 1 && all(is.na(hstuff$h.weights))) warning("default smoothing parameter may be inappropriate") ## use replication to effect the weights (it's faster) nw <- round(nwtsteps * wt/max(wt)) drep.w <- rep.int(d, nw) fw <- ff * wt nfw <- round(nwtsteps * fw/max(fw)) drep.fw <- rep.int(d, nfw) ## smooth estimate of kappa_f est <- sm::sm.density(drep.fw, eval.points=rvals, display="none", ...)$estimate numerator <- est * sum(fw)/sum(est) ## smooth estimate of kappa_1 est0 <- sm::sm.density(drep.w, eval.points=rvals, display="none", ...)$estimate denominator <- est0 * (sum(wt)/ sum(est0)) * Ef result <- numerator/denominator }, loess = { ## set up data frame df <- data.frame(d=d, ff=ff, wt=wt) ## fit curve to numerator using loess fitobj <- loess(ff ~ d, data=df, weights=wt, ...) ## evaluate fitted curve at desired r values Eff <- predict(fitobj, newdata=data.frame(d=rvals)) ## normalise: ## denominator is the sample mean of all ff[i,j], ## an estimate of E(ff(M1,M2)) for M1,M2 independent marks result <- Eff/Ef }, ) return(result) } ############## user interface bits ################################## check.testfun <- local({ fmul <- function(m1, m2) { m1 * m2 } fequ <- function(m1, m2) { m1 == m2 } f1id <- function(m) { m } check.testfun <- function(f=NULL, f1=NULL, X) { ## Validate f or f1 as a test function for point pattern X ## Determine function type 'ftype' ## ("mul", "equ", "product" or "general") if(is.null(f) && is.null(f1)) { ## no functions given ## default depends on kind of marks if(is.multitype(X)) { f <- fequ ftype <- "equ" } else { f1 <- f1id ftype <- "mul" } } else if(!is.null(f1)) { ## f1 given ## specifies test function of the form f(u,v) = f1(u) f1(v) if(!is.null(f)) warning("argument f ignored (overridden by f1)") stopifnot(is.function(f1)) ftype <- "product" } else { ## f given if(is.character(fname <- f)) { switch(fname, "mul" = { f1 <- f1id ftype <- "mul" }, "equ" = { f <- fequ ftype <- "equ" }, { f <- get(fname) ftype <- "general" }) } else if(is.function(f)) { ftype <- if(isTRUE(all.equal(f, fmul))) "mul" else if(isTRUE(all.equal(f, fequ))) "equ" else "general" if(ftype == "mul" && is.multitype(X)) stop(paste("Inappropriate choice of function f;", "point pattern is multitype;", "types cannot be multiplied.")) } else stop("Argument f must be a function or the name of a function") } return(list(f=f, f1=f1, ftype=ftype)) } check.testfun }) spatstat/R/edges2triangles.R0000644000176200001440000000764713351661107015544 0ustar liggesusers# # edges2triangles.R # # $Revision: 1.16 $ $Date: 2018/09/23 09:21:22 $ # edges2triangles <- function(iedge, jedge, nvert=max(iedge, jedge), ..., check=TRUE, friendly=rep(TRUE, nvert)) { usefriends <- !missing(friendly) if(check) { stopifnot(length(iedge) == length(jedge)) stopifnot(all(iedge > 0)) stopifnot(all(jedge > 0)) if(!missing(nvert)) { stopifnot(all(iedge <= nvert)) stopifnot(all(jedge <= nvert)) } if(usefriends) { stopifnot(is.logical(friendly)) stopifnot(length(friendly) == nvert) usefriends <- !all(friendly) } } # zero length data, or not enough to make triangles if(length(iedge) < 3) return(matrix(integer(0), nrow=0, ncol=3, dimnames=list(NULL, c("i", "j", "k")))) # sort in increasing order of 'iedge' oi <- fave.order(iedge) iedge <- iedge[oi] jedge <- jedge[oi] # call C storage.mode(nvert) <- storage.mode(iedge) <- storage.mode(jedge) <- "integer" if(usefriends) { fr <- as.logical(friendly) storage.mode(fr) <- "integer" zz <- .Call("trioxgraph", nv=nvert, iedge=iedge, jedge=jedge, friendly=fr, PACKAGE="spatstat") } else if(spatstat.options("fast.trigraph")) { zz <- .Call("triograph", nv=nvert, iedge=iedge, jedge=jedge, PACKAGE="spatstat") } else { #' testing purposes only zz <- .Call("trigraph", nv=nvert, iedge=iedge, jedge=jedge, PACKAGE="spatstat") } mat <- as.matrix(as.data.frame(zz, col.names=c("i", "j", "k"))) return(mat) } # compute triangle diameters as well trianglediameters <- function(iedge, jedge, edgelength, ..., nvert=max(iedge, jedge), dmax=Inf, check=TRUE) { if(check) { stopifnot(length(iedge) == length(jedge)) stopifnot(length(iedge) == length(edgelength)) stopifnot(all(iedge > 0)) stopifnot(all(jedge > 0)) if(!missing(nvert)) { stopifnot(all(iedge <= nvert)) stopifnot(all(jedge <= nvert)) } if(is.finite(dmax)) check.1.real(dmax) } # zero length data if(length(iedge) == 0 || dmax < 0) return(data.frame(i=integer(0), j=integer(0), k=integer(0), diam=numeric(0))) # call C storage.mode(nvert) <- storage.mode(iedge) <- storage.mode(jedge) <- "integer" storage.mode(edgelength) <- "double" if(is.infinite(dmax)) { zz <- .Call("triDgraph", nv=nvert, iedge=iedge, jedge=jedge, edgelength=edgelength, PACKAGE = "spatstat") } else { storage.mode(dmax) <- "double" zz <- .Call("triDRgraph", nv=nvert, iedge=iedge, jedge=jedge, edgelength=edgelength, dmax=dmax, PACKAGE = "spatstat") } df <- as.data.frame(zz) colnames(df) <- c("i", "j", "k", "diam") return(df) } closetriples <- function(X, rmax) { a <- closepairs(X, rmax, what="ijd", twice=FALSE, neat=FALSE) tri <- trianglediameters(a$i, a$j, a$d, nvert=npoints(X), dmax=rmax) return(tri) } # extract 'vees', i.e. triples (i, j, k) where i ~ j and i ~ k edges2vees <- function(iedge, jedge, nvert=max(iedge, jedge), ..., check=TRUE) { if(check) { stopifnot(length(iedge) == length(jedge)) stopifnot(all(iedge > 0)) stopifnot(all(jedge > 0)) if(!missing(nvert)) { stopifnot(all(iedge <= nvert)) stopifnot(all(jedge <= nvert)) } } # zero length data, or not enough to make vees if(length(iedge) < 2) return(data.frame(i=numeric(0), j=numeric(0), k=numeric(0))) # call vees <- .Call("graphVees", nv = nvert, iedge = iedge, jedge = jedge, PACKAGE="spatstat") names(vees) <- c("i", "j", "k") vees <- as.data.frame(vees) return(vees) } spatstat/R/dgs.R0000644000176200001440000000724713333543254013235 0ustar liggesusers# # # dgs.R # # $Revision: 1.12 $ $Date: 2018/03/19 14:41:54 $ # # Diggle-Gates-Stibbard process # # # ------------------------------------------------------------------- # DiggleGatesStibbard <- local({ # .......... auxiliary functions ................ dgsTerms <- function(X, Y, idX, idY, rho) { stopifnot(is.numeric(rho)) # sort in increasing order of x coordinate oX <- fave.order(X$x) oY <- fave.order(Y$x) Xsort <- X[oX] Ysort <- Y[oY] idXsort <- idX[oX] idYsort <- idY[oY] nX <- npoints(X) nY <- npoints(Y) # call C routine out <- .C("Ediggatsti", nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), idsource = as.integer(idXsort), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), idtarget = as.integer(idYsort), rrho = as.double(rho), values = as.double(double(nX)), PACKAGE = "spatstat") answer <- numeric(nX) answer[oX] <- out$values return(answer) } # ...... template object ...................... BlankDGS <- list( name = "Diggle-Gates-Stibbard process", creator = "DiggleGatesStibbard", family = "pairwise.family", # evaluated later pot = function(d, par) { rho <- par$rho v <- log(sin((pi/2) * d/rho)^2) v[ d > par$rho ] <- 0 attr(v, "IsOffset") <- TRUE v }, par = list(rho = NULL), # to be filled in later parnames = "interaction range", hasInf = TRUE, init = function(self) { rho <- self$par$rho if(!is.numeric(rho) || length(rho) != 1L || rho <= 0) stop("interaction range rho must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { return(NULL) }, valid = function(coeffs, self) { return(TRUE) }, project = function(coeffs, self) { return(NULL) }, irange = function(self, coeffs=NA, epsilon=0, ...) { rho <- self$par$rho return(rho) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ...) { # fast evaluator for DiggleGatesStibbard interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for DiggleGatesStibbard") dont.complain.about(splitInf) rho <- potpars$rho idX <- seq_len(npoints(X)) idU <- rep.int(-1L, npoints(U)) idU[EqualPairs[,2L]] <- EqualPairs[,1L] v <- dgsTerms(U, X, idU, idX, rho) v <- matrix(v, ncol=1L) attr(v, "IsOffset") <- TRUE return(v) }, Mayer=function(coeffs, self) { # second Mayer cluster integral rho <- self$par$rho return((pi/2 - 2/pi) * rho^2) } ) class(BlankDGS) <- "interact" DiggleGatesStibbard <- function(rho) { instantiate.interact(BlankDGS, list(rho = rho)) } DiggleGatesStibbard <- intermaker(DiggleGatesStibbard, BlankDGS) DiggleGatesStibbard }) spatstat/R/util.R0000644000176200001440000003063213616011600013414 0ustar liggesusers# # util.R miscellaneous utilities # # $Revision: 1.242 $ $Date: 2020/02/03 11:14:58 $ # # common invocation of matrixsample rastersample <- function(X, Y) { stopifnot(is.im(X) || is.mask(X)) stopifnot(is.im(Y) || is.mask(Y)) phase <- c((Y$yrow[1] - X$yrow[1])/X$ystep, (Y$xcol[1] - X$xcol[1])/X$xstep) scale <- c(Y$ystep/X$ystep, Y$xstep/X$xstep) if(is.im(X)) { # resample an image if(!is.im(Y)) Y <- as.im(Y) Xtype <- X$type Xv <- X$v # handle factor-valued image as integer if(Xtype == "factor") Xv <- array(as.integer(Xv), dim=X$dim) # resample naval <- switch(Xtype, factor=, integer= NA_integer_, logical = as.logical(NA_integer_), real = NA_real_, complex = NA_complex_, character = NA_character_, NA) Y$v <- matrixsample(Xv, Y$dim, phase=phase, scale=scale, na.value=naval) # inherit pixel data type from X Y$type <- Xtype if(Xtype == "factor") { lev <- levels(X) Y$v <- factor(Y$v, labels=lev, levels=seq_along(lev)) dim(Y$v) <- Y$dim } } else { # resample a mask if(!is.mask(Y)) Y <- as.mask(Y) Y$m <- matrixsample(X$m, Y$dim, phase=phase, scale=scale, na.value=FALSE) } return(Y) } pointgrid <- function(W, ngrid) { W <- as.owin(W) masque <- as.mask(W, dimyx=ngrid) rxy <- rasterxy.mask(masque, drop=TRUE) xx <- rxy$x yy <- rxy$y return(ppp(xx, yy, W)) } onecolumn <- function(m) { switch(markformat(m), none=stop("No marks provided"), vector=m, dataframe=m[,1, drop=TRUE], NA) } checkbigmatrix <- function(n, m, fatal=FALSE, silent=FALSE) { nm <- as.numeric(n) * as.numeric(m) if(nm <= spatstat.options("maxmatrix")) return(TRUE) whinge <- paste("Attempted to create binary mask with", n, "*", m, "=", nm, "entries") if(fatal) stop(whinge, call.=FALSE) if(!silent) warning(whinge, call.=FALSE) return(FALSE) } ## ........... progress reports ..................... progressreport <- local({ Put <- function(name, value, state) { if(is.null(state)) { putSpatstatVariable(paste0("Spatstat.", name), value) } else { state[[name]] <- value } return(state) } Get <- function(name, state) { if(is.null(state)) { value <- getSpatstatVariable(paste0("Spatstat.", name)) } else { value <- state[[name]] } return(value) } IterationsPerLine <- function(charsperline, n, every, tick, showtime, showevery) { # Calculate number of iterations that triggers a newline. # A dot is printed every 'tick' iterations # Iteration number is printed every 'every' iterations. # If showtime=TRUE, the time is shown every 'showevery' iterations # where showevery \in {1, every, n}. chars.report <- max(1, ceiling(log10(n))) if(showtime) { chars.time <- nchar(' [etd 12:00:00] ') timesperreport <- if(showevery == 1) every else if(showevery == every) 1 else 0 chars.report <- chars.report + timesperreport * chars.time } chars.ticks <- floor((every-1)/tick) chars.block <- chars.report + chars.ticks nblocks <- max(1, floor(charsperline/chars.block)) nperline <- nblocks * every leftover <- charsperline - nblocks * chars.block if(leftover > 0) nperline <- nperline + min(leftover * tick, showevery - 1) return(nperline) } progressreport <- function(i, n, every=min(100,max(1, ceiling(n/100))), tick=1, nperline=NULL, charsperline=getOption("width"), style=spatstat.options("progress"), showtime=NULL, state=NULL) { missevery <- missing(every) nperline.fixed <- !is.null(nperline) showtime.optional <- is.null(showtime) if(showtime.optional) showtime <- FALSE # initialise only if(i > n) { warning(paste("progressreport called with i =", i, "> n =", n)) return(invisible(NULL)) } if(style == "tk" && !requireNamespace("tcltk")) { warning("tcltk is unavailable; switching to style='txtbar'", call.=FALSE) style <- "txtbar" } if(is.null(state) && style != "tty") stop(paste("Argument 'state' is required when style =",sQuote(style)), call.=FALSE) switch(style, txtbar={ if(i == 1) { ## initialise text bar state <- Put("ProgressBar", txtProgressBar(1, n, 1, style=3), state) } else { ## get text bar pbar <- Get("ProgressBar", state) ## update setTxtProgressBar(pbar, i) if(i == n) { close(pbar) state <- Put("ProgressBar", NULL, state) } } }, tk={ requireNamespace("tcltk") if(i == 1) { ## initialise text bar state <- Put("ProgressBar", tcltk::tkProgressBar(title="progress", min=0, max=n, width=300), state) } else { ## get text bar pbar <- Get("ProgressBar", state) ## update tcltk::setTkProgressBar(pbar, i, label=paste0(round(100 * i/n), "%")) if(i == n) { close(pbar) state <- Put("ProgressBar", NULL, state) } } }, tty={ now <- proc.time() if(i == 1 || is.null(state)) { ## Initialise stuff if(missevery && every > 1 && n > 10) every <- niceround(every) showevery <- if(showtime) every else n if(!nperline.fixed) nperline <- IterationsPerLine(charsperline, n, every, tick, showtime, showevery) state <- Put("ProgressData", list(every=every, tick=tick, nperline=nperline, starttime=now, showtime=showtime, showevery=showevery, nperline.fixed=nperline.fixed, showtime.optional=showtime.optional), state) } else { pd <- Get("ProgressData", state) if(is.null(pd)) stop(paste("progressreport called with i =", i, "before i = 1")) every <- pd$every tick <- pd$tick nperline <- pd$nperline showtime <- pd$showtime showevery <- pd$showevery showtime.optional <- pd$showtime.optional nperline.fixed <- pd$nperline.fixed if(i < n) { if(showtime || showtime.optional) { ## estimate time remaining starttime <- pd$starttime elapsed <- now - starttime elapsed <- unname(elapsed[3]) rate <- elapsed/(i-1) remaining <- rate * (n-i) if(!showtime) { ## show time remaining if.. if(rate > 20) { ## .. rate is very slow showtime <- TRUE showevery <- 1 } else if(remaining > 180) { ## ... more than 3 minutes remaining showtime <- TRUE showevery <- every aminute <- ceiling(60/rate) if(aminute < showevery) showevery <- min(niceround(aminute), showevery) } # update number of iterations per line if(showtime && !nperline.fixed) nperline <- IterationsPerLine(charsperline, n, every, tick, showtime, showevery) } } state <- Put("ProgressData", list(every=every, tick=tick, nperline=nperline, starttime=starttime, showtime=showtime, showevery=showevery, nperline.fixed=nperline.fixed, showtime.optional=showtime.optional), state) } } if(i == n) cat(paste(" ", n, ".\n", sep="")) else if(every == 1 || i <= 3) cat(paste(i, ",", if(i %% nperline == 0) "\n" else " ", sep="")) else { if(i %% every == 0) cat(i) else if(i %% tick == 0) cat(".") if(i %% nperline == 0) cat("\n") } if(i < n && showtime && (i %% showevery == 0)) { st <- paste("etd", codetime(round(remaining))) st <- paren(st, "[") cat(paste("", st, "")) } flush.console() }, stop(paste("Unrecognised option for style:", dQuote(style))) ) return(invisible(state)) } progressreport }) multiply.only.finite.entries <- function(x, a) { # In ppm a potential value that is -Inf must remain -Inf # and a potential value that is 0 multiplied by NA remains 0 y <- x ok <- is.finite(x) & (x != 0) y[ok] <- a * x[ok] return(y) } ## print names and version numbers of libraries loaded sessionLibs <- function() { a <- sessionInfo() b <- unlist(lapply(a$otherPkgs, getElement, name="Version")) g <- rbind(names(b), unname(b)) d <- apply(g, 2, paste, collapse=" ") if(length(d) > 0) { cat("Libraries loaded:\n") for(di in d) cat(paste("\t", di, "\n")) } else cat("Libraries loaded: None\n") return(invisible(d)) } # .................. prepareTitle <- function(main) { ## Count the number of lines in a main title ## Convert title to a form usable by plot.owin if(is.expression(main)) { nlines <- 1 } else { main <- paste(main) ## break at newline main <- unlist(strsplit(main, "\n")) nlines <- if(sum(nchar(main)) == 0) 0 else length(main) } return(list(main=main, nlines=nlines, blank=rep(' ', nlines))) } requireversion <- function(pkg, ver, fatal=TRUE) { pkgname <- deparse(substitute(pkg)) pkgname <- gsub("\"", "", pkgname) pkgname <- gsub("'", "", pkgname) v <- read.dcf(file=system.file("DESCRIPTION", package=pkgname), fields="Version") ok <- (package_version(v) >= ver) if(!ok && fatal) stop(paste("Package", sQuote(pkgname), "is out of date: version >=", ver, "is needed"), call.=FALSE) return(if(ok) invisible(TRUE) else FALSE) } spatstatDiagnostic <- function(msg) { cat("-----------------------------\n") cat(paste(" >>> Spatstat Diagnostic: ", msg, "<<<\n")) cat("-----------------------------\n") invisible(NULL) } allElementsIdentical <- function(x, entry=NULL) { if(length(x) <= 1) return(TRUE) if(is.null(entry)) { x1 <- x[[1]] for(i in 2:length(x)) if(!identical(x[[i]], x1)) return(FALSE) } else { e1 <- x[[1]][[entry]] for(i in 2:length(x)) if(!identical(x[[i]][[entry]], e1)) return(FALSE) } return(TRUE) } representativeRows <- function(x) { # select a unique representative of each equivalence class of rows, # in a numeric matrix or data frame of numeric values. ord <- do.call(order, as.list(as.data.frame(x))) y <- x[ord, , drop=FALSE] dy <- apply(y, 2, diff) answer <- logical(nrow(y)) answer[ord] <- c(TRUE, !matrowall(dy == 0)) return(answer) } spatstat/R/intensity.R0000644000176200001440000002127613613547031014502 0ustar liggesusers# # intensity.R # # Code related to intensity and intensity approximations # # $Revision: 1.22 $ $Date: 2020/01/27 09:17:20 $ # intensity <- function(X, ...) { UseMethod("intensity") } intensity.ppp <- function(X, ..., weights=NULL) { n <- npoints(X) a <- area(Window(X)) if(is.null(weights)) { ## unweighted case - for efficiency if(is.multitype(X)) { mks <- marks(X) answer <- as.vector(table(mks))/a names(answer) <- levels(mks) } else answer <- n/a return(answer) } ## weighted case weights <- pointweights(X, weights=weights, parent=parent.frame()) if(is.multitype(X)) { mks <- marks(X) answer <- as.vector(tapply(weights, mks, sum))/a answer[is.na(answer)] <- 0 names(answer) <- levels(mks) } else { answer <- sum(weights)/a } return(answer) } intensity.splitppp <- function(X, ..., weights=NULL) { if(is.null(weights)) return(sapply(X, intensity.ppp)) if(is.expression(weights)) return(sapply(X, intensity.ppp, weights=weights)) if(is.numeric(weights)) { fsplit <- attr(X, "fsplit") n <- length(fsplit) check.nvector(weights, n) result <- mapply(intensity.ppp, X, weights=split(weights, fsplit)) result <- simplify2array(result, higher=FALSE) return(result) } stop("Unrecognised format for weights") } intensity.ppm <- function(X, ...) { if(!identical(valid.ppm(X), TRUE)) { warning("Model is invalid - projecting it") X <- project.ppm(X) } if(is.poisson(X)) { if(is.stationary(X)) { # stationary univariate/multivariate Poisson sX <- summary(X, quick="no variances") lam <- sX$trend$value if(sX$multitype && sX$no.trend) { ## trend is ~1; lam should be replicated for each mark lev <- levels(marks(data.ppm(X))) lam <- rep(lam, length(lev)) names(lam) <- lev } return(lam) } # Nonstationary Poisson return(predict(X, ...)) } # Gibbs process if(is.multitype(X)) stop("Not yet implemented for multitype Gibbs processes") # Compute first order term if(is.stationary(X)) { ## activity parameter sX <- summary(X, quick="no variances") beta <- sX$trend$value } else { ## activity function (or values of it, depending on '...') beta <- predict(X, ...) } ## apply approximation lambda <- PoisSaddle(beta, fitin(X)) return(lambda) } PoisSaddle <- function(beta, fi) { ## apply Poisson-Saddlepoint approximation ## given first order term and fitted interaction stopifnot(inherits(fi, "fii")) inte <- as.interact(fi) if(identical(inte$family$name, "pairwise")) return(PoisSaddlePairwise(beta, fi)) if(identical(inte$name, "Geyer saturation process")) return(PoisSaddleGeyer(beta, fi)) if(identical(inte$name, "Area-interaction process")) return(PoisSaddleArea(beta, fi)) stop(paste("Intensity approximation is not yet available for", inte$name), call.=FALSE) } PoisSaddlePairwise <- function(beta, fi) { inte <- as.interact(fi) Mayer <- inte$Mayer if(is.null(Mayer)) stop(paste("Sorry, not yet implemented for", inte$name)) # interaction coefficients co <- with(fi, coefs[Vnames[!IsOffset]]) # compute second Mayer cluster integral G <- Mayer(co, inte) if(is.null(G) || !is.finite(G)) stop("Internal error in computing Mayer cluster integral") if(G < 0) stop(paste("Unable to apply Poisson-saddlepoint approximation:", "Mayer cluster integral is negative")) ## solve if(is.im(beta)) { lambda <- if(G == 0) beta else eval.im(LambertW(G * beta)/G) } else { lambda <- if(G == 0) beta else (LambertW(G * beta)/G) if(length(lambda) == 1) lambda <- unname(lambda) } return(lambda) } # Lambert's W-function LambertW <- local({ yexpyminusx <- function(y,x){y*exp(y)-x} W <- function(x) { result <- rep.int(NA_real_, length(x)) ok <- is.finite(x) & (x >= 0) if(requireNamespace("gsl", quietly=TRUE)) { result[ok] <- gsl::lambert_W0(x[ok]) } else { for(i in which(ok)) result[i] <- uniroot(yexpyminusx, c(0, x[i]), x=x[i])$root } return(result) } W }) PoisSaddleGeyer <- local({ PoisSaddleGeyer <- function(beta, fi) { gamma <- summary(fi)$sensible$param$gamma if(gamma == 1) return(beta) inte <- as.interact(fi) sat <- inte$par$sat R <- inte$par$r #' get probability distribution of Geyer statistic under reference model z <- Spatstat.Geyer.Nulldist # from sysdata if(is.na(m <- match(sat, z$sat))) stop(paste("Sorry, the Poisson-saddlepoint approximation", "is not implemented for Geyer models with sat =", sat), call.=FALSE) probmatrix <- z$prob[[m]] maxachievable <- max(which(colSums(probmatrix) > 0)) - 1 gammarange <- sort(c(1, gamma^maxachievable)) #' apply approximation betavalues <- beta[] nvalues <- length(betavalues) lambdavalues <- numeric(nvalues) for(i in seq_len(nvalues)) { beta.i <- betavalues[i] ra <- beta.i * gammarange lambdavalues[i] <- uniroot(diffapproxGeyer, ra, beta=beta.i, gamma=gamma, R=R, sat=sat, probmatrix=probmatrix)$root } #' return result in same format as 'beta' lambda <- beta lambda[] <- lambdavalues if(length(lambda) == 1) lambda <- unname(lambda) return(lambda) } diffapproxGeyer <- function(lambda, beta, gamma, R, sat, probmatrix) { lambda - approxEpoisGeyerT(lambda, beta, gamma, R, sat, probmatrix) } approxEpoisGeyerT <- function(lambda, beta=1, gamma=1, R=1, sat=1, probmatrix) { #' Compute approximation to E_Pois(lambda) Lambda(0,X) for Geyer #' ('probmatrix' contains distribution of geyerT(0, Z_n) for each n, #' where 'sat' is given, and Z_n is runifdisc(n, radius=2*R). possT <- 0:(ncol(probmatrix)-1) possN <- 0:(nrow(probmatrix)-1) pN <- dpois(possN, lambda * pi * (2*R)^2) EgamT <- pN %*% probmatrix %*% (gamma^possT) #' assume that, for n > max(possN), #' distribution of T is concentrated on T=sat EgamT <- EgamT + (gamma^sat) * (1-sum(pN)) return(beta * EgamT) } PoisSaddleGeyer }) PoisSaddleArea <- local({ PoisSaddleArea <- function(beta, fi) { eta <- summary(fi)$sensible$param$eta if(eta == 1) return(beta) etarange <- range(c(eta^2, 1.1, 0.9)) inte <- as.interact(fi) R <- inte$par$r #' reference distribution of canonical sufficient statistic zeroprob <- Spatstat.Area.Zeroprob areaquant <- Spatstat.Area.Quantiles # expectation of eta^A_n for each n = 0, 1, .... EetaAn <- c(1/eta, zeroprob + (1-zeroprob) * colMeans((eta^(-areaquant)))) #' compute approximation betavalues <- beta[] nvalues <- length(betavalues) lambdavalues <- numeric(nvalues) for(i in seq_len(nvalues)) { beta.i <- betavalues[i] ra <- beta.i * etarange lambdavalues[i] <- uniroot(diffapproxArea, ra, beta=beta.i, eta=eta, r=R, EetaAn=EetaAn)$root } #' return result in same format as 'beta' lambda <- beta lambda[] <- lambdavalues if(length(lambda) == 1) lambda <- unname(lambda) return(lambda) } diffapproxArea <- function(lambda, beta, eta, r, EetaAn) { lambda - approxEpoisArea(lambda, beta, eta, r, EetaAn) } approxEpoisArea <- function(lambda, beta=1, eta=1, r=1, EetaAn) { #' Compute approximation to E_Pois(lambda) Lambda(0,X) for AreaInter mu <- lambda * pi * (2*r)^2 zeta <- pi^2/2 - 1 theta <- -log(eta) zetatheta <- zeta * theta #' contribution from tabulated values Nmax <- length(EetaAn) - 1L possN <- 0:Nmax qN <- dpois(possN, mu) # expectation of eta^A when N ~ poisson (truncated) EetaA <- sum(qN * EetaAn) #' asymptotics for quite large n Nbig <- qpois(0.999, mu) qn <- 0 if(Nbig > Nmax) { n <- (Nmax+1):Nbig #' asymptotic mean uncovered area conditional on this being positive mstarn <- (16/((n+3)^2)) * exp(n * (1/4 - log(4/3))) ztm <- zetatheta * mstarn ok <- (ztm < 1) if(!any(ok)) { Nbig <- Nmax qn <- 0 } else { if(!all(ok)) { Nbig <- max(which(!ok)) - 1 n <- (Nmax+1):Nbig ztm <- ztm[1:((Nbig-Nmax)+1)] } qn <- dpois(n, mu) #' asymptotic probability of complete coverage pstarn <- 1 - pmin(1, 3 * (1 + n^2/(16*pi)) * exp(-n/4)) Estarn <- (1 - ztm)^(-1/zeta) EetaA <- EetaA + sum(qn * (pstarn + (1-pstarn) * Estarn)) } } #' for very large n, assume complete coverage, so A = 0 EetaA <- EetaA + 1 - sum(qN) - sum(qn) return(beta * eta * EetaA) } PoisSaddleArea }) spatstat/R/hardcore.R0000644000176200001440000001000613333543255014233 0ustar liggesusers# # # hardcore.S # # $Revision: 1.15 $ $Date: 2018/03/19 14:44:53 $ # # The Hard core process # # Hardcore() create an instance of the Hard Core process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # Hardcore <- local({ BlankHardcore <- list( name = "Hard core process", creator = "Hardcore", family = "pairwise.family", # evaluated later pot = function(d, par) { v <- 0 * d v[ d <= par$hc ] <- (-Inf) attr(v, "IsOffset") <- TRUE v }, par = list(hc = NULL), # filled in later parnames = "hard core distance", hasInf = TRUE, selfstart = function(X, self) { # self starter for Hardcore nX <- npoints(X) if(nX < 2) { # not enough points to make any decisions return(self) } md <- minnndist(X) if(md == 0) { warning(paste("Pattern contains duplicated points:", "impossible under Hardcore model")) return(self) } if(!is.na(hc <- self$par$hc)) { # value fixed by user or previous invocation # check it if(md < hc) warning(paste("Hard core distance is too large;", "some data points will have zero probability")) return(self) } # take hc = minimum interpoint distance * n/(n+1) hcX <- md * nX/(nX+1) Hardcore(hc = hcX) }, init = function(self) { hc <- self$par$hc if(length(hc) != 1) stop("hard core distance must be a single value") if(!is.na(hc) && !(is.numeric(hc) && hc > 0)) stop("hard core distance hc must be a positive number, or NA") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { return(NULL) }, valid = function(coeffs, self) { return(TRUE) }, project = function(coeffs, self) { return(NULL) }, irange = function(self, coeffs=NA, epsilon=0, ...) { hc <- self$par$hc return(hc) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ...) { # fast evaluator for Hardcore interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for Hardcore") hc <- potpars$hc # call evaluator for Strauss process counts <- strausscounts(U, X, hc, EqualPairs) forbid <- (counts != 0) if(!splitInf) { ## usual case v <- matrix(ifelseAB(forbid, -Inf, 0), ncol=1L) } else { ## separate hard core v <- matrix(0, nrow=npoints(U), ncol=1L) attr(v, "-Inf") <- forbid } attr(v, "IsOffset") <- TRUE return(v) }, Mayer=function(coeffs, self) { # second Mayer cluster integral hc <- self$par$hc return(pi * hc^2) }, Percy=function(d, coeffs, par, ...) { ## term used in Percus-Yevick type approximation H <- par$hc t <- abs(d/(2*H)) t <- pmin.int(t, 1) y <- 2 * H^2 * (pi - acos(t) + t * sqrt(1 - t^2)) return(y) } ) class(BlankHardcore) <- "interact" Hardcore <- function(hc=NA) { instantiate.interact(BlankHardcore, list(hc=hc)) } Hardcore <- intermaker(Hardcore, BlankHardcore) Hardcore }) spatstat/R/compileK.R0000644000176200001440000000766613333543254014230 0ustar liggesusers# compileK # # Function to take a matrix of pairwise distances # and compile a 'K' function in the format required by spatstat. # # $Revision: 1.10 $ $Date: 2018/07/21 04:05:36 $ # ------------------------------------------------------------------- compileK <- function(D, r, weights=NULL, denom=1, check=TRUE, ratio=FALSE, fname="K") { # process r values breaks <- breakpts.from.r(r) rmax <- breaks$max r <- breaks$r # check that D is a symmetric matrix with nonnegative entries if(check) stopifnot(is.matrix(D) && isSymmetric(D) && all(D >= 0)) # ignore the diagonal; throw away any D values greater than rmax ok <- (D <= rmax & D > 0) Dvalues <- D[ok] # # weights? if(!is.null(weights)) { stopifnot(is.matrix(weights) && all(dim(weights)==dim(D))) wvalues <- weights[ok] } else wvalues <- NULL # count the number of D values in each interval (r[k], r[k+1L]] counts <- whist(Dvalues, breaks=breaks$val, weights=wvalues) # cumulative counts: number of D values in [0, r[k]) Kcount <- cumsum(counts) # divide by appropriate denominator Kratio <- Kcount/denom # wrap it up as an 'fv' object for use in spatstat df <- data.frame(r=r, est=Kratio) if(!ratio) { K <- fv(df, "r", quote(K(r)), "est", . ~ r , c(0,rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname=fname) } else { num <- data.frame(r=r, est=Kcount) den <- data.frame(r=r, est=denom) K <- ratfv(df=NULL, numer=num, denom=den, "r", quote(K(r)), "est", . ~ r , c(0,rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname=fname) } return(K) } compilepcf <- function(D, r, weights=NULL, denom=1, check=TRUE, endcorrect=TRUE, ratio=FALSE, ..., fname="g") { # process r values breaks <- breakpts.from.r(r) if(!breaks$even) stop("compilepcf: r values must be evenly spaced", call.=FALSE) r <- breaks$r rmax <- breaks$max # check that D is a symmetric matrix with nonnegative entries if(check) stopifnot(is.matrix(D) && isSymmetric(D) && all(D >= 0)) # ignore the diagonal; throw away any D values greater than rmax ok <- (D <= rmax & D > 0) Dvalues <- D[ok] # # weights? if(!is.null(weights)) { stopifnot(is.matrix(weights) && all(dim(weights)==dim(D))) wvalues <- weights[ok] totwt <- sum(wvalues) normwvalues <- wvalues/totwt } else { nv <- length(Dvalues) normwvalues <- rep.int(1/nv, nv) totwt <- nv } # form kernel estimate rmin <- min(r) rmax <- max(r) nr <- length(r) den <- density(Dvalues, weights=normwvalues, from=rmin, to=rmax, n=nr, ...) gval <- den$y * totwt # normalise gval <- gval/denom # edge effect correction at r = 0 if(endcorrect) { one <- do.call(density, resolve.defaults( list(seq(rmin,rmax,length=512)), list(bw=den$bw, adjust=1), list(from=rmin, to=rmax, n=nr), list(...))) onefun <- approxfun(one$x, one$y, rule=2) gval <- gval /((rmax-rmin) * onefun(den$x)) } # wrap it up as an 'fv' object for use in spatstat df <- data.frame(r=r, est=gval) if(!ratio) { g <- fv(df, "r", quote(g(r)), "est", . ~ r , c(0,rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname=fname) } else { num <- data.frame(r=r, est=gval * denom) den <- data.frame(r=r, est=denom) g <- ratfv(df=NULL, numer=num, denom=den, "r", quote(g(r)), "est", . ~ r , c(0,rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname=fname) } attr(g, "bw") <- den$bw return(g) } spatstat/R/weights.R0000644000176200001440000002345113606020442014115 0ustar liggesusers# # weights.S # # Utilities for computing quadrature weights # # $Revision: 4.40 $ $Date: 2020/01/10 06:54:23 $ # # # Main functions: # gridweights() Divide the window frame into a regular nx * ny # grid of rectangular tiles. Given an arbitrary # pattern of (data + dummy) points derive the # 'counting weights'. # # dirichletWeights() Compute the areas of the tiles of the # Dirichlet tessellation generated by the # given pattern of (data+dummy) points, # restricted to the window. # # Auxiliary functions: # # countingweights() compute the counting weights # for a GENERIC tiling scheme and an arbitrary # pattern of (data + dummy) points, # given the tile areas and the information # that point number k belongs to tile number id[k]. # # # gridindex() Divide the window frame into a regular nx * ny # grid of rectangular tiles. # Compute tile membership for arbitrary x,y. # # grid1index() 1-dimensional analogue of gridindex() # # #------------------------------------------------------------------- countingweights <- function(id, areas, check=TRUE) { # # id: cell indices of n points # (length n, values in 1:k) # # areas: areas of k cells # (length k) # id <- as.integer(id) fid <- factor(id, levels=seq_along(areas)) counts <- table(fid) w <- areas[id] / counts[id] # ensures denominator > 0 w <- as.vector(w) # # that's it; but check for funny business # if(check) { zerocount <- (counts == 0) zeroarea <- (areas == 0) if(any(!zeroarea & zerocount)) { lostfrac <- 1 - sum(w)/sum(areas) lostpc <- round(100 * lostfrac, 1) if(lostpc >= 1) warning(paste("some tiles with positive area", "do not contain any quadrature points:", "relative error =", paste0(lostpc, "%"))) } if(any(!zerocount & zeroarea)) { warning("Some tiles with zero area contain quadrature points") warning("Some weights are zero") attr(w, "zeroes") <- zeroarea[id] } } # names(w) <- NULL return(w) } gridindex <- function(x, y, xrange, yrange, nx, ny) { # # The box with dimensions xrange, yrange is divided # into nx * ny cells. # # For each point (x[i], y[i]) compute the index (ix, iy) # of the cell containing the point. # ix <- grid1index(x, xrange, nx) iy <- grid1index(y, yrange, ny) # return(list(ix=ix, iy=iy, index=as.integer((iy-1) * nx + ix))) } grid1index <- function(x, xrange, nx) { i <- ceiling( nx * (x - xrange[1])/diff(xrange)) i <- pmax.int(1, i) i <- pmin.int(i, nx) i } gridweights <- function(X, ntile=NULL, ..., window=NULL, verbose=FALSE, npix=NULL, areas=NULL) { # # Compute counting weights based on a regular tessellation of the # window frame into ntile[1] * ntile[2] rectangular tiles. # # Arguments X and (optionally) 'window' are interpreted as a # point pattern. # # The window frame is divided into a regular ntile[1] * ntile[2] grid # of rectangular tiles. The counting weights based on this tessellation # are computed for the points (x, y) of the pattern. # # 'npix' determines the dimensions of the pixel raster used to # approximate tile areas. X <- as.ppp(X, window) x <- X$x y <- X$y win <- X$window # determine number of tiles if(is.null(ntile)) ntile <- default.ntile(X) if(length(ntile) == 1) ntile <- rep.int(ntile, 2) nx <- ntile[1] ny <- ntile[2] if(verbose) cat(paste("grid weights for a", nx, "x", ny, "grid of tiles\n")) ## determine pixel resolution in case it is required if(!is.null(npix)) { npix <- ensure2vector(npix) } else { npix <- pmax(rev(spatstat.options("npixel")), c(nx, ny)) if(is.mask(win)) npix <- pmax(npix, rev(dim(win))) } if(is.null(areas)) { # compute tile areas switch(win$type, rectangle = { nxy <- nx * ny tilearea <- area(win)/nxy areas <- rep.int(tilearea, nxy) zeroareas <- rep(FALSE, nxy) }, polygonal = { areamat <- polytileareaEngine(win, win$xrange, win$yrange, nx, ny) ## convert from 'im' to 'gridindex' ordering areas <- as.vector(t(areamat)) zeroareas <- (areas == 0) if(verbose) splat("Split polygonal window of area", area(win), "into", nx, "x", ny, "grid of tiles", "of total area", sum(areas)) }, mask = { win <- as.mask(win, dimyx=rev(npix)) if(verbose) splat("Converting mask dimensions to", npix[1], "x", npix[2], "pixels") ## extract pixel coordinates inside window rxy <- rasterxy.mask(win, drop=TRUE) xx <- rxy$x yy <- rxy$y ## classify all pixels into tiles pixelid <- gridindex(xx, yy, win$xrange, win$yrange, nx, ny)$index pixelid <- factor(pixelid, levels=seq_len(nx * ny)) ## compute digital areas of tiles tilepixels <- as.vector(table(pixelid)) pixelarea <- win$xstep * win$ystep areas <- tilepixels * pixelarea zeroareas <- (tilepixels == 0) } ) } else zeroareas <- (areas == 0) id <- gridindex(x, y, win$xrange, win$yrange, nx, ny)$index if(win$type != "rectangle" && any(uhoh <- zeroareas[id])) { # this can happen: the tile has digital area zero # but contains a data/dummy point slivers <- unique(id[uhoh]) switch(win$type, mask = { offence <- "digital area zero" epsa <- pixelarea/2 }, polygonal = { offence <- "very small area" epsa <- min(areas[!zeroareas])/10 }) areas[slivers] <- epsa nsliver <- length(slivers) extraarea <- nsliver * epsa extrafrac <- extraarea/area(win) if(verbose || extrafrac > 0.01) { splat(nsliver, ngettext(nsliver, "tile", "tiles"), "of", offence, ngettext(nsliver, "was", "were"), "given nominal area", signif(epsa, 3), "increasing the total area by", signif(extraarea, 5), "square units or", paste0(round(100 * extrafrac, 1), "% of total area")) if(extrafrac > 0.01) warning(paste("Repairing tiles with", offence, "caused a", paste0(round(100 * extrafrac), "%"), "change in total area"), call.=FALSE) } } # compute counting weights w <- countingweights(id, areas) # attach information about weight construction parameters attr(w, "weight.parameters") <- list(method="grid", ntile=ntile, npix=npix, areas=areas) return(w) } # dirichlet.weights <- function(...) { # .Deprecated("dirichletWeights", package="spatstat") # dirichletWeights(...) # } dirichletWeights <- function(X, window = NULL, exact=TRUE, ...) { #' #' Compute weights based on Dirichlet tessellation of the window #' induced by the point pattern X. #' The weights are just the tile areas. #' #' NOTE: X should contain both data and dummy points, #' if you need these weights for the B-T-B method. #' #' Arguments X and (optionally) 'window' are interpreted as a #' point pattern. #' #' If the window is a rectangle, we invoke Rolf Turner's "deldir" #' package to compute the areas of the tiles of the Dirichlet #' tessellation of the window frame induced by the points. #' [NOTE: the functionality of deldir to create dummy points #' is NOT used. ] #' if exact=TRUE compute the exact areas, using "deldir" #' if exact=FALSE compute the digital areas using exactdt() #' #' If the window is a mask, we compute the digital area of #' each tile of the Dirichlet tessellation by counting pixels. #' #' #' X <- as.ppp(X, window) if(!exact && is.polygonal(Window(X))) Window(X) <- as.mask(Window(X)) #' compute tile areas w <- dirichletAreas(X) #' zero areas can occur due to discretisation or weird geometry zeroes <- (w == 0) if(any(zeroes)) { #' compute weights for subset nX <- npoints(X) Xnew <- X[!zeroes] wnew <- dirichletAreas(Xnew) w <- numeric(nX) w[!zeroes] <- wnew #' map deleted points to nearest retained point jj <- nncross(X[zeroes], Xnew, what="which") #' map retained points to themselves ii <- Xseq <- seq_len(nX) ii[zeroes] <- (ii[!zeroes])[jj] #' redistribute weights nshare <- table(factor(ii, levels=Xseq)) w <- w[ii]/nshare[ii] } #' attach information about weight construction parameters attr(w, "weight.parameters") <- list(method="dirichlet", exact=exact) return(w) } default.ntile <- function(X) { # default number of tiles (n x n) for tile weights # when data and dummy points are X X <- as.ppp(X) guess.ngrid <- 10 * floor(sqrt(X$n)/10) max(5, guess.ngrid/2) } spatstat/R/model.depends.R0000644000176200001440000000702713616162327015177 0ustar liggesusers# # Determine which 'canonical variables' depend on a supplied covariate # # $Revision: 1.9 $ $Date: 2020/02/04 03:26:37 $ # model.depends <- function(object) { # supplied covariates fo <- formula(object) if(length(as.list(fo)) == 3) { # formula has a response: strip it fo <- fo[-2] } covars <- variablesinformula(fo) # canonical covariates mm <- model.matrix(object) ass <- attr(mm, "assign") # model terms tt <- terms(object) lab <- attr(tt, "term.labels") # 'ass' maps canonical covariates to 'lab' # determine which canonical covariate depends on which supplied covariate depends <- matrix(FALSE, length(ass), length(covars)) for(i in seq(along=ass)) { if(ass[i] == 0) # 0 is the intercept term depends[i,] <- FALSE else { turm <- lab[ass[i]] depends[i, ] <- covars %in% all.vars(parse(text=turm)) } } rownames(depends) <- colnames(mm) colnames(depends) <- covars # detect offsets if(!is.null(oo <- attr(tt, "offset")) && ((noo <- length(oo)) > 0)) { # entries of 'oo' index the list of variables in terms object vv <- attr(tt, "variables") offdep <- matrix(FALSE, noo, length(covars)) offnms <- character(noo) for(i in seq_len(noo)) { offseti <- languageEl(vv, oo[i] + 1) offdep[i, ] <- covars %in% all.vars(offseti) offnms[i] <- deparse(offseti) } rownames(offdep) <- offnms colnames(offdep) <- covars attr(depends, "offset") <- offdep } return(depends) } model.is.additive <- function(object) { dep <- model.depends(object) hit <- t(dep) %*% dep diag(hit) <- 0 ok <- all(hit == 0) return(ok) } model.covariates <- function(object, fitted=TRUE, offset=TRUE) { md <- model.depends(object) nm <- colnames(md) keep <- rep.int(FALSE, length(nm)) # variables used in formula with coefficients if(fitted) keep <- apply(md, 2, any) # variables used in offset if(offset) { oo <- attr(md, "offset") if(!is.null(oo)) keep <- keep | apply(oo, 2, any) } return(nm[keep]) } has.offset.term <- function(object) { # model terms tt <- terms(object) oo <- attr(tt, "offset") return(!is.null(oo) && (length(oo) > 0)) } has.offset <- function(object) { has.offset.term(object) || !is.null(model.offset(model.frame(object))) } check.separable <- function(dmat, covname, isconstant, fatal=TRUE) { #' Determine whether the effect of 'covname' is separable from other terms. #' dmat = model.depends(model) #' Find covariates entangled with 'covname' in the model relevant <- dmat[, covname] othercov <- (colnames(dmat) != covname) conflict <- dmat[relevant, othercov, drop=FALSE] if(!any(conflict)) return(TRUE) #' names of entangled covariates entangled <- colnames(conflict)[matcolany(conflict)] #' not problematic if constant if(is.null(names(isconstant))) names(isconstant) <- colnames(dmat) ok <- unlist(isconstant[entangled]) conflict[ , ok] <- FALSE if(!any(conflict)) return(TRUE) #' there are conflicts conflictterms <- matrowany(conflict) conflictcovs <- matcolany(conflict) whinge <- paste("The covariate", sQuote(covname), "cannot be separated from the", ngettext(sum(conflictcovs), "covariate", "covariates"), commasep(sQuote(colnames(conflict)[conflictcovs])), "in the model", ngettext(sum(conflictterms), "term", "terms"), commasep(sQuote(rownames(conflict)[conflictterms]))) if(fatal) stop(whinge, call.=FALSE) warning(whinge, call.=FALSE) return(FALSE) } spatstat/R/marks.R0000644000176200001440000002624513536625724013604 0ustar liggesusers# # marks.R # # $Revision: 1.46 $ $Date: 2019/09/13 04:48:17 $ # # stuff for handling marks # # marks <- function(x, ...) { UseMethod("marks") } marks.default <- function(x, ...) { NULL } # The 'dfok' switch is temporary # while we convert the code to accept data frames of marks marks.ppp <- function(x, ..., dfok=TRUE, drop=TRUE) { ma <- x$marks if((is.data.frame(ma) || is.matrix(ma))) { if(!dfok) stop("Sorry, not implemented when the marks are a data frame") if(drop && ncol(ma) == 1) ma <- ma[,1,drop=TRUE] } return(ma) } # ------------------------------------------------------------------ "marks<-" <- function(x, ..., value) { UseMethod("marks<-") } "marks<-.ppp" <- function(x, ..., dfok=TRUE, drop=TRUE, value) { np <- npoints(x) m <- value switch(markformat(m), none = { return(unmark(x)) }, vector = { # vector of marks if(length(m) == 1) m <- rep.int(m, np) else if(np == 0) m <- rep.int(m, 0) # ensures marked pattern obtained else if(length(m) != np) stop("number of points != number of marks") marx <- m }, dataframe = { if(!dfok) stop("Sorry, data frames of marks are not yet implemented") m <- as.data.frame(m) # data frame of marks if(ncol(m) == 0) { # no mark variables marx <- NULL } else { # marks to be attached if(nrow(m) == np) { marx <- m } else { # lengths do not match if(nrow(m) == 1 || np == 0) { # replicate data frame marx <- as.data.frame(lapply(as.list(m), function(x, k) { rep.int(x, k) }, k=np)) } else stop("number of rows of data frame != number of points") } # convert single-column data frame to vector? if(drop && ncol(marx) == 1) marx <- marx[,1,drop=TRUE] } }, hyperframe = stop("Hyperframes of marks are not supported in ppp objects; use ppx"), stop("Format of marks is not understood") ) # attach/overwrite marks Y <- ppp(x$x,x$y,window=x$window,marks=marx, check=FALSE, drop=drop) return(Y) } "%mark%" <- setmarks <- function(x,value) { marks(x) <- value return(x) } # ------------------------------------------------- markformat <- function(x) { UseMethod("markformat") } markformat.ppp <- function(x) { mf <- x$markformat if(is.null(mf)) mf <- markformat(marks(x)) return(mf) } markformat.default <- function(x) { if(is.null(x)) return("none") if(is.null(dim(x))) { if(is.vector(x) || is.factor(x) || is.atomic(x)) return("vector") if(inherits(x, "POSIXt") || inherits(x, "Date")) return("vector") } if(is.data.frame(x) || is.matrix(x)) return("dataframe") if(is.hyperframe(x)) return("hyperframe") if(inherits(x, c("solist", "anylist", "listof"))) return("list") stop("Mark format not understood") } # ------------------------------------------------------------------ "is.marked" <- function(X, ...) { UseMethod("is.marked") } "is.marked.ppp" <- function(X, na.action="warn", ...) { marx <- marks(X, ...) if(is.null(marx)) return(FALSE) if((length(marx) > 0) && anyNA(marx)) { gripe <- paste("some mark values are NA in the point pattern", short.deparse(substitute(X))) switch(na.action, warn = warning(gripe, call.=FALSE), fatal = stop(gripe, call.=FALSE), ignore = {} ) } return(TRUE) } "is.marked.default" <- function(...) { return(!is.null(marks(...))) } # ------------------------------------------------------------------ is.multitype <- function(X, ...) { UseMethod("is.multitype") } is.multitype.default <- function(X, ...) { m <- marks(X) if(is.null(m)) return(FALSE) if(!is.null(dim(m))) { # should have a single column if(dim(m)[2] != 1) return(FALSE) m <- m[,1,drop=TRUE] } return(is.factor(m)) } is.multitype.ppp <- function(X, na.action="warn", ...) { marx <- marks(X, dfok=TRUE) if(is.null(marx)) return(FALSE) if((is.data.frame(marx) || is.hyperframe(marx)) && ncol(marx) > 1) return(FALSE) if(!is.factor(marx)) return(FALSE) if((length(marx) > 0) && anyNA(marx)) switch(na.action, warn = { warning(paste("some mark values are NA in the point pattern", short.deparse(substitute(X)))) }, fatal = { return(FALSE) }, ignore = {} ) return(TRUE) } # ------------------------------------------------------------------ unmark <- function(X) { UseMethod("unmark") } unmark.ppp <- function(X) { X$marks <- NULL X$markformat <- "none" return(X) } unmark.splitppp <- function(X) { Y <- lapply(X, unmark.ppp) class(Y) <- c("splitppp", class(Y)) return(Y) } ##### utility functions for subsetting & combining marks ######### marksubset <- function(x, index, format=NULL) { if(is.null(format)) format <- markformat(x) switch(format, none={return(NULL)}, list=, vector={return(x[index])}, hyperframe=, dataframe={return(x[index,,drop=FALSE])}, stop("Internal error: unrecognised format of marks")) } "%msub%" <- marksubsetop <- function(x,i) { marksubset(x, i) } "%mrep%" <- markreplicateop <- function(x,n) { format <- markformat(x) switch(format, none={return(NULL)}, list=, vector={ return(rep.int(x,n))}, dataframe={ return(as.data.frame(lapply(x, rep, times=n))) }, hyperframe={ xcols <- as.list(x) repxcols <- lapply(xcols, rep, times=n) return(do.call(hyperframe, repxcols)) }, stop("Internal error: unrecognised format of marks")) } "%mapp%" <- markappendop <- function(x,y) { fx <- markformat(x) fy <- markformat(y) agree <- (fx == fy) if(all(c(fx,fy) %in% c("dataframe", "hyperframe"))) agree <- agree && identical(names(x),names(y)) if(!agree) stop("Attempted to concatenate marks that are not compatible") switch(fx, none = { return(NULL) }, vector = { if(is.factor(x) || is.factor(y)) return(cat.factor(x,y)) else return(c(x,y)) }, hyperframe=, dataframe = { return(rbind(x,y)) }, list = { z <- append(x,y) z <- as.solist(z, demote=TRUE) return(z) }, stop("Internal error: unrecognised format of marks")) } markappend <- function(...) { # combine marks from any number of patterns marxlist <- list(...) # check on compatibility of marks mkfmt <- sapply(marxlist,markformat) if(length(ufm <- unique(mkfmt))>1) stop(paste("Cannot append marks of different formats:", commasep(sQuote(ufm))), call.=FALSE) mkfmt <- mkfmt[1] # combine the marks switch(mkfmt, none = { return(NULL) }, vector = { marxlist <- lapply(marxlist, function(x){as.data.frame.vector(x,nm="v1")}) marx <- do.call(rbind, marxlist)[,1] return(marx) }, hyperframe =, dataframe = { # check compatibility of data frames # (this is redundant but gives more helpful message) nama <- lapply(marxlist, names) dims <- lengths(nama) if(length(unique(dims)) != 1) stop("Data frames of marks have different column dimensions.") samenames <- unlist(lapply(nama, function(x,y) { identical(x,y) }, y=nama[[1]])) if(!all(samenames)) stop("Data frames of marks have different names.\n") marx <- do.call(rbind, marxlist) return(marx) }, list = { marx <- do.call(c, marxlist) marx <- as.solist(marx, demote=TRUE) return(marx) }) stop("Unrecognised mark format") } markcbind <- function(...) { # cbind several columns of marks marxlist <- list(...) mkfmt <- unlist(lapply(marxlist, markformat)) if(any(vacuous <- (mkfmt == "none"))) { marxlist <- marxlist[!vacuous] mkfmt <- mkfmt[!vacuous] } if(any(isvec <- (mkfmt == "vector"))) { ## convert vectors to data frames with invented names for(i in which(isvec)) { mi <- as.data.frame(marxlist[i]) colnames(mi) <- paste0("V", i) marxlist[[i]] <- mi } mkfmt[isvec] <- "dataframe" } if(all(mkfmt == "dataframe")) { ## result is a data frame marx <- do.call(data.frame, marxlist) } else { ## result is a hyperframe if(!all(ishyp <- (mkfmt == "hyperframe"))) marxlist[!ishyp] <- lapply(marxlist[!ishyp], as.hyperframe) marx <- do.call(hyperframe, marxlist) } return(marx) } numeric.columns <- local({ ## extract only the columns of (passably) numeric data from a data frame process <- function(z, logi, other) { if(is.numeric(z)) return(z) if(logi && is.logical(z)) return(as.integer(z)) switch(other, na=rep.int(NA_real_, length(z)), discard=NULL, NULL) } numeric.columns <- function(M, logical=TRUE, others=c("discard", "na")) { others <- match.arg(others) M <- as.data.frame(M) if(ncol(M) == 1) colnames(M) <- NULL Mprocessed <- lapply(M, process, logi=logical, other=others) isnul <- unlist(lapply(Mprocessed, is.null)) if(all(isnul)) { #' all columns have been removed #' return a data frame with no columns return(as.data.frame(matrix(, nrow=nrow(M), ncol=0))) } Mout <- do.call(data.frame, Mprocessed[!isnul]) if(ncol(M) == 1 && ncol(Mout) == 1) colnames(Mout) <- NULL return(Mout) } numeric.columns }) coerce.marks.numeric <- function(X, warn=TRUE) { marx <- marks(X) if(is.null(dim(marx))) { if(is.factor(marx)) { if(warn) warning("Factor-valued marks were converted to integer codes", call.=FALSE) marx <- as.integer(marx) return(X %mark% marx) } } else { marx <- as.data.frame(marx) if(any(fax <- unlist(lapply(marx, is.factor)))) { if(warn) { nf <- sum(fax) whinge <- paste("Factor-valued mark", ngettext(nf, "variable", "variables"), commasep(sQuote(colnames(marx)[fax])), ngettext(nf, "was", "were"), "converted to integer codes") warning(whinge, call.=FALSE) } marx[fax] <- as.data.frame(lapply(marx[fax], as.integer)) return(X %mark% marx) } } return(X) } #' for 'print' methods markvaluetype <- function(x) { if(is.hyperframe(x)) return(unclass(x)$vclass) if(!is.null(dim(x))) x <- as.data.frame(x) if(is.data.frame(x)) return(sapply(x, markvaluetype)) if(inherits(x, c("POSIXt", "Date"))) return("date-time") if(is.factor(x)) return("factor") return(typeof(x)) } spatstat/R/lintessmakers.R0000644000176200001440000001525313557001571015337 0ustar liggesusers#' #' lintessmakers.R #' #' Creation of linear tessellations #' and intersections between lintess objects #' #' $Revision: 1.2 $ $Date: 2019/11/01 09:53:54 $ #' divide.linnet <- local({ #' Divide a linear network into tiles demarcated by #' the points of a point pattern divide.linnet <- function(X) { stopifnot(is.lpp(X)) L <- as.linnet(X) coo <- coords(X) #' add identifiers of endpoints coo$from <- L$from[coo$seg] coo$to <- L$to[coo$seg] #' group data by segment, sort by increasing 'tp' coo <- coo[with(coo, order(seg, tp)), , drop=FALSE] bits <- split(coo, coo$seg) #' expand as a sequence of intervals bits <- lapply(bits, expanddata) #' reassemble as data frame df <- Reduce(rbind, bits) #' find all undivided segments other <- setdiff(seq_len(nsegments(L)), unique(coo$seg)) #' add a single line for each undivided segment if(length(other) > 0) df <- rbind(df, data.frame(seg=other, t0=0, t1=1, from=L$from[other], to=L$to[other])) #' We now have a tessellation #' Sort again df <- df[with(df, order(seg, t0)), , drop=FALSE] #' Now identify connected components #' Two intervals are connected if they share an endpoint #' that is a vertex of the network. nvert <- nvertices(L) nbits <- nrow(df) iedge <- jedge <- integer(0) for(iv in seq_len(nvert)) { joined <- with(df, which(from == iv | to == iv)) njoin <- length(joined) if(njoin > 1) iedge <- c(iedge, joined[-njoin]) jedge <- c(jedge, joined[-1L]) } nedge <- length(iedge) zz <- .C("cocoGraph", nv = as.integer(nbits), ne = as.integer(nedge), ie = as.integer(iedge - 1L), je = as.integer(jedge - 1L), label = as.integer(integer(nbits)), status = as.integer(integer(1L)), PACKAGE = "spatstat") if (zz$status != 0) stop("Internal error: connectedness algorithm did not converge") lab <- zz$label + 1L lab <- as.integer(factor(lab)) df <- df[,c("seg", "t0", "t1")] df$tile <- lab return(lintess(L, df)) } expanddata <- function(z) { df <- with(z, data.frame(seg=c(seg[1L], seg), t0 = c(0, tp), t1 = c(tp, 1), from=NA_integer_, to=NA_integer_)) df$from[1L] <- z$from[1L] df$to[nrow(df)] <- z$to[1L] return(df) } divide.linnet }) intersect.lintess <- function(X, Y) { # common refinement of two tessellations on linear network verifyclass(X, "lintess") verifyclass(Y, "lintess") if(!identical(as.linnet(X), as.linnet(Y))) stop("X and Y must be defined on the same linear network") L <- as.linnet(X) ns <- nsegments(L) marX <- marks(X) marY <- marks(Y) X <- X$df Y <- Y$df XY <- data.frame(seg=integer(0), t0=numeric(0), t1=numeric(0), tile=character(0)) for(seg in seq_len(ns)) { xx <- X[X$seg == seg, , drop=FALSE] yy <- Y[Y$seg == seg, , drop=FALSE] nxx <- nrow(xx) nyy <- nrow(yy) if(nxx > 0 && nyy > 0) { for(i in 1:nxx) { xxi <- xx[i,,drop=FALSE] xr <- with(xxi, c(t0, t1)) for(j in 1:nyy) { yyj <- yy[j,,drop=FALSE] yr <- with(yyj, c(t0, t1)) zz <- intersect.ranges(xr, yr, fatal=FALSE) if(!is.null(zz)) { XY <- rbind(XY, data.frame(seg=seg, t0=zz[1], t1=zz[2], tile=paste0(xxi$tile, ":", yyj$tile))) } } } } } out <- lintess(L, XY) if(!is.null(marX) || !is.null(marY)) { ## associate marks with TILES XYtiles <- levels(out$df$tile) posstiles <- outer(levels(X$tile), levels(Y$tile), paste, sep=":") m <- match(XYtiles, as.character(posstiles)) if(anyNA(m)) stop("Internal error in matching tile labels") xid <- as.integer(row(posstiles))[m] yid <- as.integer(col(posstiles))[m] marXid <- marksubset(marX, xid) marYid <- marksubset(marY, yid) if(is.null(marX)) { marks(out) <- marYid } else if(is.null(marY)) { marks(out) <- marXid } else { if(identical(ncol(marX), 1L)) colnames(marXid) <- "marksX" if(identical(ncol(marY), 1L)) colnames(marYid) <- "marksY" marks(out) <- data.frame(marksX=marXid, marksY=marYid) } } return(out) } chop.linnet <- function(X, L) { X <- as.linnet(X) verifyclass(L, "infline") ## convert infinite lines to segments LS <- clip.infline(L, Frame(X)) linemap <- marks(LS) # line which generated each segment ## extract segments of network XS <- as.psp(X) ## find crossing points (remembering provenance) Y <- crossing.psp(LS, XS, fatal=FALSE, details=TRUE) ## initialise tessellation Tess <- lintess(X) if(is.null(Y) || npoints(Y) == 0) return(Tess) ## extract info about network V <- vertices(X) startvertex <- X$from nXS <- nsegments(XS) segseq <- seq_len(nXS) ## allocate vertices to halfplanes defined by lines Vin <- whichhalfplane(L, V) ## group crossing-points by the infinite line that made them M <- marks(Y) # column names: iA, tA, jB, tB MM <- split(M, linemap[M$iA], drop=FALSE) #' for each infinite line, #' make the tessellation induced by this line for(i in seq_along(MM)) { Mi <- MM[[i]] if(is.data.frame(Mi) && (ni <- nrow(Mi)) > 0) { #' for each segment, determine which end is in lower left halfplane startsinside <- Vin[i, startvertex ] if(anyNA(startsinside)) browser() #' find segments of X that are split, and position of split jj <- Mi$jB tt <- Mi$tB ss <- startsinside[jj] #' assemble data for these segments: 2 entries for each split segment inside <- paste0(i, ifelse(ss, "-", "+")) outside <- paste0(i, ifelse(ss, "+", "-")) df <- data.frame(seg=rep(jj, 2), t0=c(rep(0, ni), tt), t1=c(tt, rep(1, ni)), tile=c(inside, outside)) #' segments not split otherseg <- segseq[-jj] #' segments entirely inside otherin <- startsinside[otherseg] #' tack on if(any(otherin)) df <- rbind(df, data.frame(seg=otherseg[otherin], t0=0, t1=1, tile=paste0(i, "-"))) if(any(!otherin)) df <- rbind(df, data.frame(seg=otherseg[!otherin], t0=0, t1=1, tile=paste0(i, "+"))) #' make it a tessellation Tessi <- lintess(X, df) #' intersect with existing Tess <- intersect.lintess(Tess, Tessi) } } return(Tess) } spatstat/R/varcount.R0000644000176200001440000000355013333543255014313 0ustar liggesusers#' #' varcount.R #' #' Variance of N(B) #' #' $Revision: 1.8 $ $Date: 2015/11/21 07:02:51 $ #' varcount <- function(model, B, ..., dimyx=NULL) { stopifnot(is.owin(B) || is.im(B) || is.function(B)) g <- pcfmodel(model) if(!is.function(g)) stop("Pair correlation function cannot be computed") if(is.owin(B)) { lambdaB <- predict(model, locations=B, ngrid=dimyx, type="intensity") v <- varcountEngine(g, B, lambdaB) } else { f <- if(is.im(B)) B else as.im(B, W=as.owin(model), ..., dimyx=dimyx) B <- as.owin(f) lambdaB <- predict(model, locations=B, type="intensity") v <- varcountEngine(g, B, lambdaB, f) } return(v) } varcountEngine <- local({ varcountEngine <- function(g, B, lambdaB, f=1) { if(missing(f) || identical(f, 1)) { v <- integral(lambdaB) + covterm(g, B, lambdaB) } else if(min(f) >= 0) { ## nonnegative integrand v <- integral(lambdaB * f^2) + covterm(g, B, lambdaB * f) } else if(max(f) <= 0) { ## nonpositive integrand v <- integral(lambdaB * f^2) + covterm(g, B, lambdaB * (-f)) } else { ## integrand has both positive and negative parts lamfplus <- eval.im(lambdaB * pmax(0, f)) lamfminus <- eval.im(lambdaB * pmax(0, -f)) v <- integral(lambdaB * f^2) + (covterm(g, B, lamfplus) + covterm(g, B, lamfminus) - covterm(g, B, lamfplus, lamfminus) - covterm(g, B, lamfminus, lamfplus)) } return(v) } covterm <- function(g, B, f, f2) { if(missing(f2)) { # \int_B \int_B (g(u-v) - 1) f(u) f(v) du dv H <- distcdf(B, dW=f) a <- integral(f)^2 * (as.numeric(stieltjes(g, H)) - 1) } else { # \int_B \int_B (g(u-v) - 1) f(u) f2(v) du dv H <- distcdf(B, dW=f, dV=f2) a <- integral(f) * integral(f2) * (as.numeric(stieltjes(g, H)) - 1) } return(a) } varcountEngine }) spatstat/R/distances.R0000644000176200001440000001522613356364526014441 0ustar liggesusers# # distances.R # # $Revision: 1.47 $ $Date: 2018/10/07 11:07:54 $ # # # Interpoint distances between pairs # # pairdist <- function(X, ...) { UseMethod("pairdist") } pairdist.ppp <- function(X, ..., periodic=FALSE, method="C", squared=FALSE) { verifyclass(X, "ppp") if(!periodic) return(pairdist.default(X$x, X$y, method=method, squared=squared)) # periodic case W <- X$window if(W$type != "rectangle") stop(paste("periodic edge correction can't be applied", "in a non-rectangular window")) wide <- diff(W$xrange) high <- diff(W$yrange) return(pairdist.default(X$x, X$y, period=c(wide,high), method=method, squared=squared)) } pairdist.default <- function(X, Y=NULL, ..., period=NULL, method="C", squared=FALSE) { if(!is.null(dim(X)) && ncol(X) > 2) stop("Data contain more than 2 coordinates") xy <- xy.coords(X,Y)[c("x","y")] if(identical(xy$xlab, "Index")) stop("Cannot interpret data as 2-dimensional coordinates") x <- xy$x y <- xy$y n <- length(x) if(length(y) != n) stop("lengths of x and y do not match") # special cases if(n == 0) return(matrix(numeric(0), nrow=0, ncol=0)) else if(n == 1L) return(matrix(0,nrow=1L,ncol=1L)) if((periodic<- !is.null(period))) { stopifnot(is.numeric(period)) stopifnot(length(period) == 2 || length(period) == 1) stopifnot(all(period > 0)) if(length(period) == 1) period <- rep.int(period, 2) wide <- period[1L] high <- period[2L] } switch(method, interpreted={ xx <- matrix(rep.int(x, n), nrow = n) yy <- matrix(rep.int(y, n), nrow = n) if(!periodic) { d2 <- (xx - t(xx))^2 + (yy - t(yy))^2 } else { dx <- xx - t(xx) dy <- yy - t(yy) dx2 <- pmin.int(dx^2, (dx + wide)^2, (dx - wide)^2) dy2 <- pmin.int(dy^2, (dy + high)^2, (dy - high)^2) d2 <- dx2 + dy2 } if(squared) dout <- d2 else dout <- sqrt(d2) }, C={ d <- numeric( n * n) if(!periodic) { z<- .C("Cpairdist", n = as.integer(n), x= as.double(x), y= as.double(y), squared=as.integer(squared), d= as.double(d), PACKAGE = "spatstat") } else { z <- .C("CpairPdist", n = as.integer(n), x= as.double(x), y= as.double(y), xwidth=as.double(wide), yheight=as.double(high), squared = as.integer(squared), d= as.double(d), PACKAGE = "spatstat") } dout <- matrix(z$d, nrow=n, ncol=n) }, stop(paste("Unrecognised method", sQuote(method))) ) return(dout) } crossdist <- function(X, Y, ...) { UseMethod("crossdist") } crossdist.ppp <- function(X, Y, ..., periodic=FALSE, method="C", squared=FALSE) { verifyclass(X, "ppp") Y <- as.ppp(Y) if(!periodic) return(crossdist.default(X$x, X$y, Y$x, Y$y, method=method, squared=squared)) # periodic case WX <- X$window WY <- Y$window if(WX$type != "rectangle" || WY$type != "rectangle") stop(paste("periodic edge correction can't be applied", "in non-rectangular windows")) if(!is.subset.owin(WX,WY) || !is.subset.owin(WY, WX)) stop(paste("periodic edge correction is not implemented", "for the case when X and Y lie in different rectangles")) wide <- diff(WX$xrange) high <- diff(WX$yrange) return(crossdist.default(X$x, X$y, Y$x, Y$y, period=c(wide,high), method=method, squared=squared)) } crossdist.default <- function(X, Y, x2, y2, ..., period=NULL, method="C", squared=FALSE) { x1 <- X y1 <- Y # returns matrix[i,j] = distance from (x1[i],y1[i]) to (x2[j],y2[j]) if(length(x1) != length(y1)) stop("lengths of x and y do not match") if(length(x2) != length(y2)) stop("lengths of x2 and y2 do not match") n1 <- length(x1) n2 <- length(x2) if(n1 == 0 || n2 == 0) return(matrix(numeric(0), nrow=n1, ncol=n2)) if((periodic<- !is.null(period))) { stopifnot(is.numeric(period)) stopifnot(length(period) == 2 || length(period) == 1) stopifnot(all(period > 0)) if(length(period) == 1L) period <- rep.int(period, 2) wide <- period[1L] high <- period[2L] } switch(method, interpreted = { X1 <- matrix(rep.int(x1, n2), ncol = n2) Y1 <- matrix(rep.int(y1, n2), ncol = n2) X2 <- matrix(rep.int(x2, n1), ncol = n1) Y2 <- matrix(rep.int(y2, n1), ncol = n1) if(!periodic) d2 <- (X1 - t(X2))^2 + (Y1 - t(Y2))^2 else { dx <- X1 - t(X2) dy <- Y1 - t(Y2) dx2 <- pmin.int(dx^2, (dx + wide)^2, (dx - wide)^2) dy2 <- pmin.int(dy^2, (dy + high)^2, (dy - high)^2) d2 <- dx2 + dy2 } return(if(squared) d2 else sqrt(d2)) }, C = { if(!periodic) { z<- .C("Ccrossdist", nfrom = as.integer(n1), xfrom = as.double(x1), yfrom = as.double(y1), nto = as.integer(n2), xto = as.double(x2), yto = as.double(y2), squared = as.integer(squared), d = as.double(matrix(0, nrow=n1, ncol=n2)), PACKAGE = "spatstat") } else { z<- .C("CcrossPdist", nfrom = as.integer(n1), xfrom = as.double(x1), yfrom = as.double(y1), nto = as.integer(n2), xto = as.double(x2), yto = as.double(y2), xwidth = as.double(wide), yheight = as.double(high), squared = as.integer(squared), d = as.double(matrix(0, nrow=n1, ncol=n2)), PACKAGE = "spatstat") } return(matrix(z$d, nrow=n1, ncol=n2)) }, stop(paste("Unrecognised method", method)) ) } spatstat/R/Tstat.R0000644000176200001440000002107313333543254013550 0ustar liggesusers# # tstat.R Estimation of T function # # $Revision: 1.12 $ $Date: 2018/07/02 15:45:48 $ # Tstat <- local({ # helper functions diffrange <- function(z) diff(range(z, na.rm=TRUE)) edgetri.Trans <- function(X, triid, trim=spatstat.options("maxedgewt")) { triid <- as.matrix(triid) ntri <- nrow(triid) if(ntri == 0) return(numeric(0)) W <- rescue.rectangle(as.owin(X)) if(W$type != "rectangle") stop("Translation correction is only implemented for rectangular windows") x <- matrix(X$x[triid], nrow=ntri) y <- matrix(X$y[triid], nrow=ntri) dx <- apply(x, 1, diffrange) dy <- apply(y, 1, diffrange) wide <- diff(W$xrange) high <- diff(W$yrange) weight <- wide * high/((wide - dx) * (high - dy)) weight <- pmin.int(trim, weight) return(weight) } # helper function implemented.for.T <- function(correction, windowtype, explicit) { rect <- (windowtype == "rectangle") if(any(correction == "best")) { # select best available correction correction <- if(rect) "translate" else "border" } else { # available selection of edge corrections depends on window if(!rect) { tra <- (correction == "translate") if(any(tra)) { whinge <- "Translation correction is only implemented for rectangular windows" if(explicit) { if(all(tra)) stop(whinge) else warning(whinge) } correction <- correction[!tra] } } } return(correction) } # .......... main function .................... Tstat <- function(X, ..., r=NULL, rmax=NULL, correction=c("border", "translate"), ratio=FALSE, verbose=TRUE) { verifyclass(X, "ppp") # rfixed <- !is.null(r) npts <- npoints(X) W <- Window(X) areaW <- area(W) lambda <- npts/areaW lambda2 <- (npts * (npts - 1))/(areaW^2) lambda3 <- (npts * (npts - 1) * (npts - 2))/(areaW^3) rmaxdefault <- if(!is.null(rmax)) rmax else rmax.rule("K", W, lambda) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(!correction.given) correction <- c("border", "bord.modif", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.T(correction, W$type, correction.given) # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame TT <- data.frame(r=r, theo= (pi/2) * (pi - 3 * sqrt(3)/4) * r^4) desc <- c("distance argument r", "theoretical Poisson %s") TT <- fv(TT, "r", quote(T(r)), "theo", , alim, c("r","%s[pois](r)"), desc, fname="T") # save numerator and denominator? if(ratio) { denom <- lambda2 * areaW numT <- eval.fv(denom * TT) denT <- eval.fv(denom + TT * 0) attributes(numT) <- attributes(denT) <- attributes(TT) attr(numT, "desc")[2] <- "numerator for theoretical Poisson %s" attr(denT, "desc")[2] <- "denominator for theoretical Poisson %s" } # identify all close pairs rmax <- max(r) close <- closepairs(X, rmax, what="ijd", twice=FALSE, neat=FALSE) I <- close$i J <- close$j DIJ <- close$d nI <- length(I) # estimate computation time if(verbose) { nTmax <- nI * (nI-1) /2 esttime <- exp(1.25 * log(nTmax) - 21.5) message(paste("Searching", nTmax, "potential triangles;", "estimated time", codetime(esttime))) } # find triangles with their diameters tri <- trianglediameters(I, J, DIJ, nvert=npts) stopifnot(identical(colnames(tri), c("i", "j", "k", "diam"))) # reassemble so each triangle appears 3 times, once for each vertex II <- with(tri, c(i, j, k)) DD <- with(tri, rep.int(diam, 3)) if(any(correction == "none")) { # uncorrected! For demonstration purposes only! wh <- whist(DD, breaks$val) # no weights numTun <- cumsum(wh) denTun <- lambda3 * areaW # uncorrected estimate of T Tun <- numTun/denTun TT <- bind.fv(TT, data.frame(un=Tun), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") if(ratio) { # save numerator and denominator numT <- bind.fv(numT, data.frame(un=numTun), "hat(%s)[un](r)", "numerator of uncorrected estimate of %s", "un") denT <- bind.fv(denT, data.frame(un=denTun), "hat(%s)[un](r)", "denominator of uncorrected estimate of %s", "un") } } if(any(correction == "border" | correction == "bord.modif")) { # border method # Compute distances to boundary b <- bdist.points(X) bI <- b[II] # apply reduced sample algorithm RS <- Kount(DD, bI, b, breaks) if(any(correction == "bord.modif")) { # modified border correction denom.area <- eroded.areas(W, r) numTbm <- RS$numerator denTbm <- lambda3 * denom.area Tbm <- numTbm/denTbm TT <- bind.fv(TT, data.frame(bord.modif=Tbm), "hat(%s)[bordm](r)", "modified border-corrected estimate of %s", "bord.modif") if(ratio) { # save numerator and denominator numT <- bind.fv(numT, data.frame(bord.modif=numTbm), "hat(%s)[bordm](r)", "numerator of modified border-corrected estimate of %s", "bord.modif") denT <- bind.fv(denT, data.frame(bord.modif=denTbm), "hat(%s)[bordm](r)", "denominator of modified border-corrected estimate of %s", "bord.modif") } } if(any(correction == "border")) { numTb <- RS$numerator denTb <- lambda2 * RS$denom.count Tb <- numTb/denTb TT <- bind.fv(TT, data.frame(border=Tb), "hat(%s)[bord](r)", "border-corrected estimate of %s", "border") if(ratio) { numT <- bind.fv(numT, data.frame(border=numTb), "hat(%s)[bord](r)", "numerator of border-corrected estimate of %s", "border") denT <- bind.fv(denT, data.frame(border=denTb), "hat(%s)[bord](r)", "denominator of border-corrected estimate of %s", "border") } } } if(any(correction == "translate")) { # translation correction # apply to triangle list edgewt <- edgetri.Trans(X, tri[, 1:3]) wh <- whist(tri$diam, breaks$val, edgewt) numTtrans <- 3 * cumsum(wh) denTtrans <- lambda3 * areaW Ttrans <- numTtrans/denTtrans h <- diameter(W)/2 Ttrans[r >= h] <- NA TT <- bind.fv(TT, data.frame(trans=Ttrans), "hat(%s)[trans](r)", "translation-corrected estimate of %s", "trans") if(ratio) { numT <- bind.fv(numT, data.frame(trans=numTtrans), "hat(%s)[trans](r)", "numerator of translation-corrected estimate of %s", "trans") denT <- bind.fv(denT, data.frame(trans=denTtrans), "hat(%s)[trans](r)", "denominator of translation-corrected estimate of %s", "trans") } } # default plot will display all edge corrections formula(TT) <- . ~ r unitname(TT) <- unitname(X) # if(ratio) { # finish up numerator & denominator formula(numT) <- formula(denT) <- . ~ r unitname(numT) <- unitname(denT) <- unitname(TT) # tack on to result TT <- rat(TT, numT, denT, check=FALSE) } return(TT) } Tstat }) spatstat/R/rLGCP.R0000644000176200001440000000636613511062074013362 0ustar liggesusers# # rLGCP.R # # simulation of log-Gaussian Cox process # # original code by Abdollah Jalilian # # $Revision: 1.21 $ $Date: 2019/07/09 09:52:18 $ # rLGCP <- local({ rLGCP <- function(model="exp", mu = 0, param = NULL, ..., win=NULL, saveLambda=TRUE, nsim=1, drop=TRUE) { ## validate if (is.numeric(mu)) { check.1.real(mu, paste("if", sQuote("mu"), "is numeric,")) } else if(!is.function(mu) && !is.im(mu)) stop(paste(sQuote("mu"), "must be a constant, a function or an image")) check.1.integer(nsim) stopifnot(nsim >= 1) ## check for outdated usage if(!all(nzchar(names(param)))) stop("Outdated syntax of argument 'param' to rLGCP", call.=FALSE) ## do.rLGCP(model=model, mu=mu, param=param, ..., win=win, saveLambda=saveLambda, nsim=nsim, drop=drop) } do.rLGCP <- function(model="exp", mu = 0, param = NULL, ..., win=NULL, saveLambda=TRUE, eps = NULL, dimyx = NULL, xy = NULL, modelonly=FALSE, nsim=1, drop=TRUE) { ## make RF model object from RandomFields package ## get the 'model generator' modgen <- getRandomFieldsModelGen(model) ## now create a RandomFields 'model' object rfmodel <- do.call(modgen, append(as.list(param), list(...))) if(!inherits(rfmodel, "RMmodel")) stop("Unable to create RandomFields model object", call.=FALSE) ## secret exit if(modelonly) return(rfmodel) ## simulation window win.given <- !is.null(win) mu.image <- is.im(mu) win <- if(win.given) as.owin(win) else if(mu.image) as.owin(mu) else owin() if(win.given && mu.image && !is.subset.owin(win, as.owin(mu))) stop(paste("The spatial domain of the pixel image", sQuote("mu"), "does not cover the simulation window", sQuote("win"))) ## convert win to a mask w <- as.mask(w=win, eps=eps, dimyx=dimyx, xy=xy) xcol <- w$xcol yrow <- w$yrow dimw <- w$dim ## evaluate 'mu' at pixels of mask if(is.numeric(mu)) { muxy <- mu } else { xy <- rasterxy.mask(w, drop=FALSE) xx <- xy$x yy <- xy$y muxy <- if (is.function(mu)) mu(xx,yy) else lookup.im(mu, xx, yy, naok=TRUE, strict=TRUE) muxy[is.na(muxy)] <- -Inf } ## corresponding image template Lambda <- as.im(w) ## generate 'nsim' realisations of a zero-mean Gaussian random field Z spc <- RandomFields::RFoptions()$general$spConform if(spc) RandomFields::RFoptions(spConform=FALSE) z <- RandomFields::RFsimulate(rfmodel, xcol, yrow, grid = TRUE, n=nsim) if(spc) RandomFields::RFoptions(spConform=TRUE) ## ensure 3D array if(length(dim(z)) == 2) z <- array(z, dim=c(dim(z), 1)) ## generate realisations of LGCP result <- vector(mode="list", length=nsim) for(i in 1:nsim) { ## Extract i-th realisation of Z; convert to log-Gaussian image Lambda$v[] <- exp(muxy + z[,,i]) ## generate Poisson points X <- rpoispp(Lambda)[win] ## if(saveLambda) attr(X, "Lambda") <- Lambda result[[i]] <- X } if(drop && nsim == 1) return(result[[1]]) names(result) <- paste("Simulation", 1:nsim) return(as.solist(result)) } rLGCP }) spatstat/R/poisson.R0000644000176200001440000000172713333543255014150 0ustar liggesusers# # # poisson.S # # $Revision: 1.8 $ $Date: 2015/10/21 09:06:57 $ # # The Poisson process # # Poisson() create an object of class 'interact' describing # the (null) interpoint interaction structure # of the Poisson process. # # # ------------------------------------------------------------------- # Poisson <- local({ BlankPoisson <- list( name = "Poisson process", creator = "Poisson", family = NULL, pot = NULL, par = NULL, parnames = NULL, init = function(...) { }, update = function(...) { }, print = function(self) { cat("Poisson process\n") invisible() }, valid = function(...) { TRUE }, project = function(...) NULL, irange = function(...) { 0 }, version=NULL ) class(BlankPoisson) <- "interact" Poisson <- function() { BlankPoisson } Poisson <- intermaker(Poisson, BlankPoisson) Poisson }) spatstat/R/linearpcfmulti.R0000644000176200001440000002223313606253516015470 0ustar liggesusers# # linearpcfmulti.R # # $Revision: 1.15 $ $Date: 2020/01/11 04:36:59 $ # # pair correlation functions for multitype point pattern on linear network # # linearpcfdot <- function(X, i, r=NULL, ..., correction="Ang") { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i) || is.null(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) I <- (marx == i) J <- rep(TRUE, npoints(X)) # i.e. all points result <- linearpcfmulti(X, I, J, r=r, correction=correction, ...) correction <- attr(result, "correction") type <- if(correction == "Ang") "L" else "net" result <- rebadge.as.dotfun(result, "g", type, i) attr(result, "correction") <- correction return(result) } linearpcfcross <- function(X, i, j, r=NULL, ..., correction="Ang") { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i) || is.null(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) if(missing(j) || is.null(j)) j <- lev[2L] else if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark")) # if(i == j) { result <- linearpcf(X[marx == i], r=r, correction=correction, ...) } else { I <- (marx == i) J <- (marx == j) result <- linearpcfmulti(X, I, J, r=r, correction=correction, ...) } # rebrand correction <- attr(result, "correction") type <- if(correction == "Ang") "L" else "net" result <- rebadge.as.crossfun(result, "g", type, i, j) attr(result, "correction") <- correction return(result) } linearpcfmulti <- function(X, I, J, r=NULL, ..., correction="Ang") { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern np <- npoints(X) lengthL <- volume(domain(X)) # validate I, J if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != np || length(J) != np) stop(paste("The length of I and J must equal", "the number of points in the pattern")) if(!any(I)) stop("no points satisfy I") # if(!any(J)) stop("no points satisfy J") nI <- sum(I) nJ <- sum(J) nIandJ <- sum(I & J) # lambdaI <- nI/lengthL # lambdaJ <- nJ/lengthL # compute pcf denom <- (nI * nJ - nIandJ)/lengthL g <- linearPCFmultiEngine(X, I, J, r=r, denom=denom, correction=correction, ...) # set appropriate y axis label correction <- attr(g, "correction") type <- if(correction == "Ang") "L" else "net" g <- rebadge.as.crossfun(g, "g", type, "I", "J") attr(g, "correction") <- correction return(g) } # ................ inhomogeneous ............................ linearpcfdot.inhom <- function(X, i, lambdaI, lambdadot, r=NULL, ..., correction="Ang", normalise=TRUE) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) I <- (marx == i) J <- rep(TRUE, npoints(X)) # i.e. all points # compute result <- linearpcfmulti.inhom(X, I, J, lambdaI, lambdadot, r=r, correction=correction, normalise=normalise, ...) correction <- attr(result, "correction") type <- if(correction == "Ang") "L, inhom" else "net, inhom" result <- rebadge.as.dotfun(result, "g", type, i) attr(result, "correction") <- correction return(result) } linearpcfcross.inhom <- function(X, i, j, lambdaI, lambdaJ, r=NULL, ..., correction="Ang", normalise=TRUE) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) if(missing(j)) j <- lev[2L] else if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark")) # if(i == j) { I <- (marx == i) result <- linearpcfinhom(X[I], lambda=lambdaI, r=r, correction=correction, normalise=normalise, ...) } else { I <- (marx == i) J <- (marx == j) result <- linearpcfmulti.inhom(X, I, J, lambdaI, lambdaJ, r=r, correction=correction, normalise=normalise, ...) } # rebrand correction <- attr(result, "correction") type <- if(correction == "Ang") "L, inhom" else "net, inhom" result <- rebadge.as.crossfun(result, "g", type, i, j) attr(result, "correction") <- correction return(result) } linearpcfmulti.inhom <- function(X, I, J, lambdaI, lambdaJ, r=NULL, ..., correction="Ang", normalise=TRUE) { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern np <- npoints(X) lengthL <- volume(domain(X)) # validate I, J if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != np || length(J) != np) stop(paste("The length of I and J must equal", "the number of points in the pattern")) if(!any(I)) stop("no points satisfy I") # validate lambda vectors lambdaI <- getlambda.lpp(lambdaI, X, subset=I, ...) lambdaJ <- getlambda.lpp(lambdaJ, X, subset=J, ...) # compute pcf weightsIJ <- outer(1/lambdaI, 1/lambdaJ, "*") denom <- if(!normalise) lengthL else sum(1/lambdaI) g <- linearPCFmultiEngine(X, I, J, r=r, reweight=weightsIJ, denom=denom, correction=correction, ...) # set appropriate y axis label correction <- attr(g, "correction") type <- if(correction == "Ang") "L, inhom" else "net, inhom" g <- rebadge.as.crossfun(g, "g", type, "I", "J") attr(g, "correction") <- correction attr(g, "dangerous") <- union(attr(lambdaI, "dangerous"), attr(lambdaJ, "dangerous")) return(g) } # .............. internal ............................... linearPCFmultiEngine <- function(X, I, J, ..., r=NULL, reweight=NULL, denom=1, correction="Ang", showworking=FALSE) { # ensure distance information is present X <- as.lpp(X, sparse=FALSE) # extract info about pattern np <- npoints(X) # extract linear network L <- domain(X) W <- Window(L) # determine r values rmaxdefault <- 0.98 * boundingradius(L) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # if(correction == "Ang") { fname <- c("g", "list(L, I, J)") ylab <- quote(g[L,I,J](r)) } else { fname <- c("g", "list(net, I, J)") ylab <- quote(g[net,I,J](r)) } # if(np < 2) { # no pairs to count: return zero function zeroes <- rep(0, length(r)) df <- data.frame(r = r, est = zeroes) g <- fv(df, "r", ylab, "est", . ~ r, c(0, rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname = fname) unitname(g) <- unitname(X) attr(g, "correction") <- correction return(g) } # ## nI <- sum(I) ## nJ <- sum(J) ## whichI <- which(I) ## whichJ <- which(J) clash <- I & J has.clash <- any(clash) ## compute pairwise distances DIJ <- crossdist(X[I], X[J], check=FALSE) if(has.clash) { ## exclude pairs of identical points from consideration Iclash <- which(clash[I]) Jclash <- which(clash[J]) DIJ[cbind(Iclash,Jclash)] <- Inf } #--- compile into pair correlation function --- if(correction == "none" && is.null(reweight)) { # no weights (Okabe-Yamada) g <- compilepcf(DIJ, r, denom=denom, check=FALSE, fname=fname) g <- rebadge.as.crossfun(g, "g", "net", "I", "J") unitname(g) <- unitname(X) attr(g, "correction") <- correction return(g) } if(correction == "none") { edgewt <- 1 } else { ## inverse m weights (Ang's correction) ## determine tolerance toler <- default.linnet.tolerance(L) ## compute m[i,j] m <- DoCountCrossEnds(X, I, J, DIJ, toler) edgewt <- 1/m } # compute pcf wt <- if(!is.null(reweight)) edgewt * reweight else edgewt g <- compilepcf(DIJ, r, weights=wt, denom=denom, check=FALSE, ..., fname=fname) ## rebadge and tweak g <- rebadge.as.crossfun(g, "g", "L", "I", "J") fname <- attr(g, "fname") # tack on theoretical value g <- bind.fv(g, data.frame(theo=rep(1,length(r))), makefvlabel(NULL, NULL, fname, "pois"), "theoretical Poisson %s") unitname(g) <- unitname(X) fvnames(g, ".") <- rev(fvnames(g, ".")) # show working if(showworking) attr(g, "working") <- list(DIJ=DIJ, wt=wt) attr(g, "correction") <- correction return(g) } spatstat/R/clusterfunctions.R0000644000176200001440000000616513552031332016060 0ustar liggesusers## clusterfunctions.R ## ## Contains the generic functions: ## - clusterkernel ## - clusterfield ## - clusterradius. ## ## $Revision: 1.4 $ $Date: 2019/10/17 08:46:15 $ ## clusterkernel <- function(model, ...) { UseMethod("clusterkernel") } clusterkernel.kppm <- function(model, ...) { kernelR <- Kpcf.kppm(model, what = "kernel") f <- function(x, y = 0, ...){ kernelR(sqrt(x^2+y^2)) } return(f) } clusterkernel.character <- function(model, ...){ info <- spatstatClusterModelInfo(model, onlyPCP = TRUE) internalkernel <- info$kernel dots <- list(...) par <- c(kappa = 1, scale = dots$scale) par <- info$checkpar(par, old = TRUE) nam <- info$clustargsnames margs <- NULL if(!is.null(nam)) margs <- dots[nam] f <- function(x, y = 0, ...){ internalkernel(par = par, rvals = sqrt(x^2+y^2), margs = margs) } return(f) } clusterfield <- function(model, locations = NULL, ...) { UseMethod("clusterfield") } clusterfield.kppm <- function(model, locations = NULL, ...) { f <- clusterkernel(model) if(is.null(locations)){ if(!is.stationary(model)) stop("The model is non-stationary. The argument ", sQuote("locations"), " must be given.") locations <- centroid.owin(Window(model), as.ppp = TRUE) } clusterfield.function(f, locations, ..., mu = model$mu) } clusterfield.character <- function(model, locations = NULL, ...){ f <- clusterkernel(model, ...) clusterfield.function(f, locations, ...) } clusterfield.function <- function(model, locations = NULL, ..., mu = NULL) { if(is.null(locations)){ locations <- ppp(.5, .5, window=square(1)) } if(!inherits(locations, "ppp")) stop("Argument ", sQuote("locations"), " must be a point pattern (ppp).") if("sigma" %in% names(list(...)) && "sigma" %in% names(formals(model))) warning("Currently ", sQuote("sigma"), "cannot be passed as an extra argument to the kernel function. ", "Please redefine the kernel function to use another argument name.") rslt <- density(locations, kernel=model, ...) if(is.null(mu)) return(rslt) mu <- as.im(mu, W=rslt) if(min(mu)<0) stop("Cluster reference intensity ", sQuote("mu"), " is negative.") return(rslt*mu) } clusterradius <- function(model, ...){ UseMethod("clusterradius") } clusterradius.character <- function(model, ..., thresh = NULL, precision = FALSE){ info <- spatstatClusterModelInfo(model, onlyPCP = TRUE) rmax <- info$range(..., thresh = thresh) if(precision){ ddist <- function(r) info$ddist(r, ...) prec <- integrate(ddist, 0, rmax) attr(rmax, "prec") <- prec } return(rmax) } clusterradius.kppm <- function(model, ..., thresh = NULL, precision = FALSE){ a <- list(model = model$clusters, thresh = thresh, precision = precision) a <- append(a, as.list(c(model$clustpar, model$clustargs))) do.call(clusterradius.character, a) } reach.kppm <- function(x, ..., epsilon) { thresh <- if(missing(epsilon)) NULL else epsilon 2 * clusterradius.kppm(x, ..., thresh=thresh) } spatstat/R/round.R0000644000176200001440000000165013433151224013570 0ustar liggesusers# # round.R # # discretisation of coordinates # # $Revision: 1.6 $ $Date: 2019/02/20 03:34:50 $ round.ppp <- round.pp3 <- round.ppx <- function(x, digits=0) { coords(x) <- round(as.matrix(coords(x)), digits=digits) return(x) } rounding <- function(x) { UseMethod("rounding") } rounding.ppp <- rounding.pp3 <- rounding.ppx <- function(x) { rounding(as.matrix(coords(x))) } rounding.default <- function(x) { # works for numeric, complex, matrix etc if(all(x == 0)) return(NULL) if(isTRUE(all.equal(x, round(x)))) { # integers: go up k <- 0 smallk <- -log10(.Machine$double.xmax) repeat { if(k < smallk || !isTRUE(all.equal(x, round(x, k-1)))) return(k) k <- k-1 } } else { # not integers: go down k <- 1 bigk <- -log10(.Machine$double.eps) repeat { if(k > bigk || isTRUE(all.equal(x, round(x, k)))) return(k) k <- k+1 } } } spatstat/R/ordthresh.R0000644000176200001440000000330713333543255014454 0ustar liggesusers# # # ordthresh.S # # $Revision: 1.12 $ $Date: 2018/03/15 07:37:41 $ # # Ord process with threshold potential # # OrdThresh() create an instance of the Ord process # [an object of class 'interact'] # with threshold potential # # # ------------------------------------------------------------------- # OrdThresh <- local({ BlankOrdThresh <- list( name = "Ord process with threshold potential", creator = "OrdThresh", family = "ord.family", pot = function(d, par) { (d <= par$r) }, par = list(r = NULL), parnames = "threshold distance", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("threshold distance r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) is.finite(loggamma) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(...) { return(Inf) }, version=NULL ) class(BlankOrdThresh) <- "interact" OrdThresh <- function(r) { instantiate.interact(BlankOrdThresh, list(r=r)) } OrdThresh <- intermaker(OrdThresh, BlankOrdThresh) OrdThresh }) spatstat/R/distan3D.R0000644000176200001440000002060113333543254014116 0ustar liggesusers# # distan3D.R # # $Revision: 1.13 $ $Date: 2017/06/05 10:31:58 $ # # Interpoint distances for 3D points # # Methods for pairdist, nndist, nnwhich, crossdist # pairdist.pp3 <- function(X, ..., periodic=FALSE, squared=FALSE) { verifyclass(X, "pp3") # extract point coordinates xyz <- coords(X) n <- nrow(xyz) x <- xyz$x y <- xyz$y z <- xyz$z # # special cases if(n == 0) return(matrix(numeric(0), nrow=0, ncol=0)) else if(n == 1L) return(matrix(0,nrow=1L,ncol=1L)) # if(!periodic) { Cout <- .C("D3pairdist", n = as.integer(n), x = as.double(x), y = as.double(y), z = as.double(z), squared = as.integer(squared), d = as.double(numeric(n*n)), PACKAGE = "spatstat") } else { b <- as.box3(X) wide <- diff(b$xrange) high <- diff(b$yrange) deep <- diff(b$zrange) Cout <- .C("D3pairPdist", n = as.integer(n), x = as.double(x), y = as.double(y), z = as.double(z), xwidth=as.double(wide), yheight=as.double(high), zdepth=as.double(deep), squared = as.integer(squared), d= as.double(numeric(n*n)), PACKAGE = "spatstat") } dout <- matrix(Cout$d, nrow=n, ncol=n) return(dout) } nndist.pp3 <- function(X, ..., k=1) { verifyclass(X, "pp3") if((narg <- length(list(...))) > 0) warning(paste(narg, "unrecognised", ngettext(narg, "argument was", "arguments were"), "ignored")) # extract point coordinates xyz <- coords(X) n <- nrow(xyz) x <- xyz$x y <- xyz$y z <- xyz$z # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1L) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) # trivial cases if(n <= 1L) { # empty pattern => return numeric(0) # or pattern with only 1 point => return Inf nnd <- matrix(Inf, nrow=n, ncol=kmax) nnd <- nnd[,k, drop=TRUE] return(nnd) } # number of neighbours that are well-defined kmaxcalc <- min(n-1L, kmax) # calculate k-nn distances for k <= kmaxcalc if(kmaxcalc == 1L) { # calculate nearest neighbour distance only nnd<-numeric(n) o <- fave.order(z) big <- sqrt(.Machine$double.xmax) Cout <- .C("nnd3D", n= as.integer(n), x= as.double(x[o]), y= as.double(y[o]), z= as.double(z[o]), nnd= as.double(nnd), nnwhich = as.integer(integer(1L)), huge=as.double(big), PACKAGE = "spatstat") nnd[o] <- Cout$nnd } else { # case kmaxcalc > 1 nnd<-numeric(n * kmaxcalc) o <- fave.order(z) big <- sqrt(.Machine$double.xmax) Cout <- .C("knnd3D", n = as.integer(n), kmax = as.integer(kmaxcalc), x = as.double(x[o]), y = as.double(y[o]), z = as.double(z[o]), nnd = as.double(nnd), nnwhich = as.integer(integer(1L)), huge = as.double(big), PACKAGE = "spatstat") nnd <- matrix(nnd, nrow=n, ncol=kmaxcalc) nnd[o, ] <- matrix(Cout$nnd, nrow=n, ncol=kmaxcalc, byrow=TRUE) } # post-processing if(kmax > kmaxcalc) { # add columns of Inf's infs <- matrix(as.numeric(Inf), nrow=n, ncol=kmax-kmaxcalc) nnd <- cbind(nnd, infs) } if(length(k) < kmax) { # select only the specified columns nnd <- nnd[, k, drop=TRUE] } return(nnd) } nnwhich.pp3 <- function(X, ..., k=1) { verifyclass(X, "pp3") if((narg <- length(list(...))) > 0) warning(paste(narg, "unrecognised", ngettext(narg, "argument was", "arguments were"), "ignored")) # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1L) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) # extract point coordinates xyz <- coords(X) n <- nrow(xyz) x <- xyz$x y <- xyz$y z <- xyz$z # special cases if(n <= 1L) { # empty pattern => return integer(0) # or pattern with only 1 point => return NA nnw <- matrix(as.integer(NA), nrow=n, ncol=kmax) nnw <- nnw[,k, drop=TRUE] return(nnw) } # number of neighbours that are well-defined kmaxcalc <- min(n-1L, kmax) # identify k-nn for k <= kmaxcalc if(kmaxcalc == 1L) { # identify nearest neighbour only nnw <- integer(n) o <- fave.order(z) big <- sqrt(.Machine$double.xmax) Cout <- .C("nnw3D", n = as.integer(n), x = as.double(x[o]), y = as.double(y[o]), z = as.double(z[o]), nnd = as.double(numeric(1L)), nnwhich = as.integer(nnw), huge = as.double(big), PACKAGE = "spatstat") # [sic] Conversion from C to R indexing is done in C code. witch <- Cout$nnwhich if(any(witch <= 0)) stop("Internal error: illegal index returned from C code") if(any(witch > n)) stop("Internal error: index returned from C code exceeds n") nnw[o] <- o[witch] } else { # case kmaxcalc > 1 nnw <- matrix(integer(n * kmaxcalc), nrow=n, ncol=kmaxcalc) o <- fave.order(z) big <- sqrt(.Machine$double.xmax) Cout <- .C("knnw3D", n = as.integer(n), kmax = as.integer(kmaxcalc), x = as.double(x[o]), y = as.double(y[o]), z = as.double(z[o]), nnd = as.double(numeric(1L)), nnwhich = as.integer(nnw), huge = as.double(big), PACKAGE = "spatstat") # [sic] Conversion from C to R indexing is done in C code. witch <- Cout$nnwhich witch <- matrix(witch, nrow=n, ncol=kmaxcalc, byrow=TRUE) if(any(witch <= 0)) stop("Internal error: illegal index returned from C code") if(any(witch > n)) stop("Internal error: index returned from C code exceeds n") # convert back to original ordering nnw[o,] <- matrix(o[witch], nrow=n, ncol=kmaxcalc) } # post-processing if(kmax > kmaxcalc) { # add columns of NA's nas <- matrix(as.integer(NA), nrow=n, ncol=kmax-kmaxcalc) nnw <- cbind(nnw, nas) } if(length(k) < kmax) { # select only the specified columns nnw <- nnw[, k, drop=TRUE] } return(nnw) } crossdist.pp3 <- function(X, Y, ..., periodic=FALSE, squared=FALSE) { verifyclass(X, "pp3") verifyclass(Y, "pp3") cX <- coords(X) cY <- coords(Y) nX <- nrow(cX) nY <- nrow(cY) if(nX == 0 || nY == 0) return(matrix(numeric(0), nrow=nX, ncol=nY)) if(!periodic) { Cout <- .C("D3crossdist", nfrom = as.integer(nX), xfrom = as.double(cX$x), yfrom = as.double(cX$y), zfrom = as.double(cX$z), nto = as.integer(nY), xto = as.double(cY$x), yto = as.double(cY$y), zto = as.double(cY$z), squared = as.integer(squared), d = as.double(matrix(0, nrow=nX, ncol=nY)), PACKAGE = "spatstat") } else { b <- as.box3(X) wide <- diff(b$xrange) high <- diff(b$yrange) deep <- diff(b$zrange) Cout <- .C("D3crossPdist", nfrom = as.integer(nX), xfrom = as.double(cX$x), yfrom = as.double(cX$y), zfrom = as.double(cX$z), nto = as.integer(nY), xto = as.double(cY$x), yto = as.double(cY$y), zto = as.double(cY$z), xwidth = as.double(wide), yheight = as.double(high), zheight = as.double(deep), squared = as.integer(squared), d = as.double(matrix(0, nrow=nX, ncol=nY)), PACKAGE = "spatstat") } return(matrix(Cout$d, nrow=nX, ncol=nY)) } spatstat/R/exactdt.R0000644000176200001440000000436013333543255014106 0ustar liggesusers# # exactdt.S # S function exactdt() for exact distance transform # # $Revision: 4.17 $ $Date: 2017/06/05 10:31:58 $ # exactdt <- local({ die <- function(why) { stop(paste("ppp object format corrupted:", why)) } exactdt <- function(X, ...) { verifyclass(X, "ppp") w <- X$window if(spatstat.options("exactdt.checks.data")) { ## check validity of ppp structure bb <- as.rectangle(w) xr <- bb$xrange yr <- bb$yrange rx <- range(X$x) ry <- range(X$y) if(rx[1L] < xr[1L] || rx[2L] > xr[2L]) die("x-coordinates out of bounds") if(ry[1L] < yr[1L] || ry[2L] > yr[2L]) die("y-coordinates out of bounds") if(length(X$x) != length(X$y)) die("x and y vectors have different length") if(length(X$x) != X$n) die("length of x,y vectors does not match n") } w <- as.mask(w, ...) ## dimensions of result nr <- w$dim[1L] nc <- w$dim[2L] ## margins in C array mr <- 2 mc <- 2 ## full dimensions of allocated storage Nnr <- nr + 2 * mr Nnc <- nc + 2 * mc N <- Nnr * Nnc ## output rows & columns (R indexing) rmin <- mr + 1 rmax <- Nnr - mr cmin <- mc + 1 cmax <- Nnc - mc ## go res <- .C("exact_dt_R", as.double(X$x), as.double(X$y), as.integer(X$n), as.double(w$xrange[1L]), as.double(w$yrange[1L]), as.double(w$xrange[2L]), as.double(w$yrange[2L]), nr = as.integer(nr), nc = as.integer(nc), mr = as.integer(mr), mc = as.integer(mc), distances = as.double(double(N)), indices = as.integer(integer(N)), boundary = as.double(double(N)), PACKAGE = "spatstat") ## extract dist <- matrix(res$distances, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] inde <- matrix(res$indices, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] bdry <- matrix(res$boundary, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] ## convert index from C to R indexing inde <- inde + 1L return(list(d = dist, i = inde, b = bdry, w=w)) } exactdt }) spatstat/R/matrixpower.R0000644000176200001440000000457113333543255015037 0ustar liggesusers#' #' matrixpower.R #' #' $Revision: 1.1 $ $Date: 2016/11/13 01:50:51 $ #' matrixsqrt <- function(x, complexOK=TRUE) { ## matrix square root if(length(dim(x)) != 2) stop("x must be a matrix") if(!is.matrix(x)) x <- as.matrix(x) if(missing(complexOK) && is.complex(x)) complexOK <- TRUE if(!complexOK) stopifnot(is.numeric(x)) else stopifnot(is.numeric(x) || is.complex(x)) e <- eigen(x) values <- e$values vectors <- e$vectors if(any(values < 0)) { if(complexOK) values <- as.complex(values) else stop("matrix has negative eigenvalues: square root is complex", call.=FALSE) } y <- vectors %*% diag(sqrt(values)) %*% t(vectors) if(!is.null(dn <- dimnames(x))) dimnames(y) <- rev(dn) return(y) } matrixinvsqrt <- function(x, complexOK=TRUE) { ## matrix inverse square root if(length(dim(x)) != 2) stop("x must be a matrix") if(!is.matrix(x)) x <- as.matrix(x) if(missing(complexOK) && is.complex(x)) complexOK <- TRUE if(!complexOK) stopifnot(is.numeric(x)) else stopifnot(is.numeric(x) || is.complex(x)) e <- eigen(x) values <- e$values vectors <- e$vectors if(any(values == 0)) stop("matrix is singular; cannot compute inverse square root", call.=FALSE) if(any(values < 0)) { if(complexOK) values <- as.complex(values) else stop("matrix has negative eigenvalues: inverse square root is complex", call.=FALSE) } y <- vectors %*% diag(1/sqrt(values)) %*% t(vectors) if(!is.null(dn <- dimnames(x))) dimnames(y) <- rev(dn) return(y) } matrixpower <- function(x, power, complexOK=TRUE) { check.1.real(power) if(length(dim(x)) != 2) stop("x must be a matrix") if(!is.matrix(x)) x <- as.matrix(x) if(missing(complexOK) && is.complex(x)) complexOK <- TRUE if(!complexOK) stopifnot(is.numeric(x)) else stopifnot(is.numeric(x) || is.complex(x)) e <- eigen(x) values <- e$values vectors <- e$vectors if(any(values == 0) && power < 0) stop("matrix is singular; cannot compute negative power", call.=FALSE) if(any(values < 0) && (power != ceiling(power))) { if(complexOK) values <- as.complex(values) else stop("matrix has negative eigenvalues: result is complex", call.=FALSE) } y <- vectors %*% diag(values^power) %*% t(vectors) if(!is.null(dn <- dimnames(x))) dimnames(y) <- rev(dn) return(y) } spatstat/R/predictmppm.R0000644000176200001440000003066513611545442015004 0ustar liggesusers# # predictmppm.R # # $Revision: 1.13 $ $Date: 2020/01/21 09:56:47 $ # # # ------------------------------------------------------------------- predict.mppm <- local({ predict.mppm <- function(object, ..., newdata=NULL, type=c("trend", "cif"), ngrid=40, locations=NULL, verbose=FALSE) { ## ## 'object' is the output of mppm() ## model <- object verifyclass(model, "mppm") ## ## ## 'type' type <- pickoption("type", type, c(trend="trend", lambda="cif", cif="cif"), multi=TRUE) want.trend <- "trend" %in% type want.cif <- "cif" %in% type ## selfcheck <- resolve.defaults(list(...), list(selfcheck=FALSE))$selfcheck ## ## if(verbose) cat("Inspecting arguments...") ## ## 'newdata' use.olddata <- is.null(newdata) if(use.olddata) { newdata <- model$data newdataname <- "Original data" } else { stopifnot(is.data.frame(newdata) || is.hyperframe(newdata)) newdataname <- sQuote("newdata") } ## ## ## Locations for prediction if(is.hyperframe(locations)) locations <- locations[,1,drop=TRUE] if(is.list(locations)) cls <- unique(sapply(locations, class)) loctype <- if(is.null(locations)) "null" else if(is.data.frame(locations)) "data.frame" else if(is.list(locations)) { if(any(c("ppp", "quad") %in% cls)) "points" else if("owin" %in% cls) { if(all(sapply(locations, is.mask))) "mask" else "window" } else "unknown" } else "unknown" need.grid <- switch(loctype, null =TRUE, data.frame=FALSE, points =FALSE, mask =FALSE, window =TRUE, unknown =stop("Unrecognised format for locations")) make.image <- need.grid || (loctype == "mask") ## locationvars <- c("x", "y", "id") ## ## if(verbose) cat("done.\nDetermining locations for prediction...") if(need.grid) { ## prediction on a grid is required if(is.data.frame(newdata)) stop(paste("Cannot predict model on a grid;", newdataname, "are a data frame")) } else { ## prediction at `locations' is required if(is.hyperframe(newdata)) { ## check consistency between locations and newdata nloc <- length(locations) nnew <- summary(newdata)$ncases if(nloc != nnew) stop(paste("Length of argument", sQuote("locations"), paren(nloc), "does not match number of rows in", newdataname, paren(nnew))) } else { ## newdata is a data frame if(!is.data.frame(locations)) stop(paste(newdataname, "is a data frame; locations must be a data frame")) else { stopifnot(nrow(locations) == nrow(newdata)) dup <- names(newdata) %in% names(locations) if(any(dup)) for(nam in names(newdata)[dup]) if(!isTRUE(all.equal(newdata[,nam], locations[,nam]))) stop(paste("The data frames newdata and locations", "both have a column called", sQuote(nam), "but the entries differ")) nbg <- !(locationvars %in% c(names(newdata),names(locations))) if(any(nbg)) stop(paste(ngettext(sum(nbg), "Variable", "Variables"), commasep(locationvars[nbg]), "not provided")) ## merge the two data frames newdata <- cbind(newdata[,!dup], locations) locations <- NULL } } } if(verbose) cat("done.\n Constructing data for prediction...") ## ## ## extract fitted glm/gam/glmm object FIT <- model$Fit$FIT ## extract names of interaction variables Vnamelist <- model$Fit$Vnamelist vnames <- unlist(Vnamelist) ## ## ## newdata is data frame if(is.data.frame(newdata)) { if(verbose) cat("(data frame)...") if(need.grid) stop("Cannot predict model on a grid; newdata is a data frame") ## use newdata as covariates nbg <- !(locationvars %in% names(newdata)) if(any(nbg)) stop(paste(ngettext(sum(nbg), "variable", "variables"), commasep(locationvars[nbg]), "not provided")) ## create output data frame answer <- as.data.frame(matrix(, nrow=nrow(newdata), ncol=0), row.names=row.names(newdata)) if(want.trend) { ## add interaction components, set to zero (if any) if(length(vnames) > 0) newdata[, vnames] <- 0 ## compute fitted values answer$trend <- Predict(FIT, newdata=newdata, type="response") } if(want.cif) { if(is.poisson(object)) { ## cif = trend answer$cif <- if(want.trend) answer$trend else Predict(FIT, newdata=newdata, type="response") } else { warning("Not yet implemented (computation of cif in data frame case)") ## split data frame by 'id' ## compute interaction components using existing point patterns ## compute fitted values } } return(answer) } ## newdata is a hyperframe if(verbose) cat("(hyperframe)...") sumry <- summary(newdata) npat.new <- sumry$ncases ## name of response point pattern in model Yname <- model$Info$Yname ## ## Determine response point patterns if known. ## Extract from newdata if available ## Otherwise from the original data if appropriate if(verbose) cat("(responses)...") Y <- if(Yname %in% sumry$col.names) newdata[, Yname, drop=TRUE, strip=FALSE] else if(npat.new == model$npat) data[, Yname, drop=TRUE, strip=FALSE] else NULL ## if(want.cif && is.null(Y)) stop(paste("Cannot compute cif:", "newdata does not contain column", dQuote(Yname), "of response point patterns")) ## ## Determine windows for prediction if(verbose) cat("(windows)...") Wins <- if(!need.grid) lapply(locations, as.owin, fatal=FALSE) else if(!is.null(Y)) lapply(Y, as.owin, fatal=FALSE) else NULL if(is.null(Wins) || any(sapply(Wins, is.null))) stop("Cannot determine windows where predictions should be made") ## ## if(is.null(Y)) { ## only want trend; empty patterns will do Y <- lapply(Wins, emptypattern) } ## ensure Y contains data points only if(is.quad(Y[[1]])) Y <- lapply(Y, getElement, name="data") ## Determine locations for prediction if(need.grid) { ## Generate grids of dummy locations if(verbose) cat("(grids)...") Gridded <- lapply(Wins, gridsample, ngrid=ngrid) Dummies <- lapply(Gridded, getElement, name="D") Templates <- lapply(Gridded, getElement, name="I") } else { ## locations are given somehow if(verbose) cat("(locations)...") if(loctype == "points") Dummies <- locations else if(loctype == "mask") { Dummies <- lapply(locations, punctify) Templates <- lapply(locations, as.im) } else stop("Internal error: illegal loctype") } ## Pack into quadschemes if(verbose) cat("(quadschemes)...") Quads <- list() for(i in seq(npat.new)) Quads[[i]] <- quad(data=Y[[i]], dummy=Dummies[[i]]) ## Insert quadschemes into newdata newdata[, Yname] <- Quads ## Determine interactions to be used if(verbose) cat("(interactions)...") interactions <- model$Inter$interaction ninter <- if(is.hyperframe(interactions)) nrow(interactions) else 1 nnew <- nrow(newdata) if(ninter != nnew && ninter != 1) { if(!all(model$Inter$constant)) stop(paste("Number of rows of newdata", paren(nnew), "does not match number of interactions in model", paren(ninter))) interactions <- interactions[1, ] } ## compute the Berman-Turner frame if(verbose) cat("done.\nStarting prediction...(Berman-Turner frame)...") moadf <- mppm(formula = model$formula, data = newdata, interaction = interactions, iformula = model$iformula, #%^!ifdef RANDOMEFFECTS random = model$random, #%^!endif use.gam = model$Fit$use.gam, correction = model$Info$correction, rbord = model$Info$rbord, backdoor = TRUE) ## compute fitted values if(verbose) cat("(glm prediction)...") values <- moadf[, c("x", "y", "id")] if(want.cif) values$cif <- Predict(FIT, newdata=moadf, type="response") if(want.trend) { if(length(vnames) == 0) { ## Poisson model: trend = cif values$trend <- if(want.cif) values$cif else Predict(FIT, newdata=moadf, type="response") } else { ## zero the interaction components moadf[, vnames] <- 0 ## compute fitted values values$trend <- Predict(FIT, newdata=moadf, type="response") } } if(verbose) cat("done.\nReshaping results...") ## ## Reshape results ## separate answers for each image values <- split(values, values$id) ## Trends <- list() Lambdas <- list() if(!make.image) { if(verbose) cat("(marked point patterns)...") ## values become marks attached to locations for(i in seq(npat.new)) { Val <- values[[i]] Loc <- Dummies[[i]] isdum <- !is.data(Quads[[i]]) if(selfcheck) if(length(isdum) != length(Val$trend)) stop("Internal error: mismatch between data frame and locations") if(want.trend) Trends[[i]] <- Loc %mark% (Val$trend[isdum]) if(want.cif) Lambdas[[i]] <- Loc %mark% (Val$cif[isdum]) } } else { if(verbose) cat("(pixel images)...") ## assign values to pixel images for(i in seq(npat.new)) { values.i <- values[[i]] Q.i <- Quads[[i]] values.i <- values.i[!is.data(Q.i), ] Template.i <- Templates[[i]] ok.i <- !is.na(Template.i$v) if(sum(ok.i) != nrow(values.i)) stop("Internal error: mismatch between data frame and image") if(selfcheck) { dx <- rasterx.im(Template.i)[ok.i] - values.i$x dy <- rastery.im(Template.i)[ok.i] - values.i$y cat(paste("i=", i, "range(dx) =", paste(range(dx), collapse=", "), "range(dy) =", paste(range(dy), collapse=", "), "\n")) } if(want.trend) { Trend.i <- Template.i Trend.i$v[ok.i] <- values.i$trend Trends[[i]] <- Trend.i } if(want.cif) { Lambda.i <- Template.i Lambda.i$v[ok.i] <- values.i$cif Lambdas[[i]] <- Lambda.i } } } if(verbose) cat("done.\n") ## answer is a hyperframe Answer <- hyperframe(id=factor(levels(moadf$id)), row.names=sumry$row.names) if(want.trend) Answer$trend <- Trends if(want.cif) Answer$cif <- Lambdas return(Answer) } ## helper functions emptypattern <- function(w) { ppp(numeric(0), numeric(0), window=w) } gridsample <- function(W, ngrid) { masque <- as.mask(W, dimyx=ngrid) xx <- raster.x(masque) yy <- raster.y(masque) xpredict <- xx[masque$m] ypredict <- yy[masque$m] Dummy <- ppp(xpredict, ypredict, window=W) Image <- as.im(masque) return(list(D=Dummy, I=Image)) } punctify <- function(M) { xx <- raster.x(M) yy <- raster.y(M) xpredict <- xx[M$m] ypredict <- yy[M$m] return(ppp(xpredict, ypredict, window=M)) } Predict <- function(object, newdata, type=c("link", "response")) { type <- match.arg(type) if(inherits(object, "glmmPQL")) { class(object) <- class(object)[-1L] pred <- predict(object, newdata=newdata) if(type == "response") pred <- object$family$linkinv(pred) } else { pred <- predict(object, newdata=newdata, type=type) } return(as.numeric(pred)) } predict.mppm }) spatstat/R/plot.mppm.R0000644000176200001440000000125013333543255014373 0ustar liggesusers# # plot.mppm.R # # $Revision: 1.4 $ $Date: 2016/02/11 10:17:12 $ # # plot.mppm <- function(x, ..., trend=TRUE, cif=FALSE, se=FALSE, how=c("image", "contour", "persp")) { xname <- deparse(substitute(x)) how <- match.arg(how) subs <- subfits(x) arglist <- resolve.defaults(list(x=subs, how=how), list(...), list(main=xname)) if(trend) do.call(plot, c(arglist, list(trend=TRUE, cif=FALSE, se=FALSE))) if(cif) do.call(plot, c(arglist, list(trend=FALSE, cif=TRUE, se=FALSE))) if(se) do.call(plot, c(arglist, list(trend=FALSE, cif=FALSE, se=TRUE))) invisible(NULL) } spatstat/R/clickppp.R0000644000176200001440000000476713563450107014270 0ustar liggesusers#' Dominic Schuhmacher's idea #' #' $Revision: 1.17 $ $Date: 2019/11/15 07:12:52 $ #' clickppp <- local({ clickppp <- function(n=NULL, win=square(1), types=NULL, ..., add=FALSE, main=NULL, hook=NULL) { win <- as.owin(win) instructions <- if(!is.null(n)) paste("click", n, "times in window") else paste("add points: click left mouse button in window\n", "exit: press ESC or another mouse button") if(is.null(main)) main <- instructions #### single type ######################### if(is.null(types)) { plot(win, add=add, main=main, invert=TRUE) if(!is.null(hook)) plot(hook, add=TRUE) splat("Ready to click..") if(!is.null(n)) xy <- spatstatLocator(n=n, ...) else xy <- spatstatLocator(...) #' check whether all points lie inside window if((nout <- sum(!inside.owin(xy$x, xy$y, win))) > 0) { warning(paste(nout, ngettext(nout, "point", "points"), "lying outside specified window; window was expanded")) win <- boundingbox(win, xy) } X <- ppp(xy$x, xy$y, window=win) return(X) } ##### multitype ####################### ftypes <- factor(types, levels=types) #' input points of type 1 X <- getem(ftypes[1L], instructions, n=n, win=win, add=add, ..., pch=1) X <- X %mark% ftypes[1L] #' input points of types 2, 3, ... in turn naughty <- FALSE for(i in 2:length(types)) { Xi <- getem(ftypes[i], instructions, n=n, win=win, add=add, ..., hook=X, pch=i) Xi <- Xi %mark% ftypes[i] if(!naughty && identical(Xi$window, win)) { #' normal case X <- superimpose(X, Xi, W=win) } else { #' User has clicked outside original window. naughty <- TRUE #' Use bounding box for simplicity bb <- boundingbox(Xi$window, X$window) X <- superimpose(X, Xi, W=bb) } } if(!add) { if(!naughty) plot(X, main="Final pattern") else { plot(X$window, main="Final pattern (in expanded window)", invert=TRUE) plot(win, add=TRUE, invert=TRUE) plot(X, add=TRUE) } } return(X) } getem <- function(i, instr, ...) { main <- paste("Points of type", sQuote(i), "\n", instr) do.call(clickppp, resolve.defaults(list(...), list(main=main))) } clickppp }) clickdist <- function() { a <- spatstatLocator(2) return(pairdist(a)[1L,2L]) } spatstat/R/sparsecommon.R0000644000176200001440000002205313602535575015164 0ustar liggesusers#' #' sparsecommon.R #' #' Utilities for sparse arrays #' #' $Revision: 1.15 $ $Date: 2019/12/31 03:07:33 $ #' #' .............. completely generic .................... inside3Darray <- function(d, i, j, k) { stopifnot(length(d) == 3) if(length(dim(i)) == 2 && missing(j) && missing(k)) { stopifnot(ncol(i) == 3) j <- i[,2] k <- i[,3] i <- i[,1] } ans <- inside.range(i, c(1, d[1])) & inside.range(j, c(1, d[2])) & inside.range(k, c(1, d[3])) return(ans) } #' .............. depends on Matrix package ................ sparseVectorCumul <- function(x, i, length) { #' extension of 'sparseVector' to allow repeated indices #' (the corresponding entries are added) z <- tapply(x, list(factor(i, levels=1:length)), sum) z <- z[!is.na(z)] sparseVector(i=as.integer(names(z)), x=as.numeric(z), length=length) } #' .............. code that mentions sparse3Darray ................ expandSparse <- function(x, n, across) { #' x is a sparse vector/matrix; replicate it 'n' times #' and form a sparse matrix/array #' in which each slice along the 'across' dimension is identical to 'x' #' Default is across = length(dim(x)) + 1 check.1.integer(n) stopifnot(n >= 1) dimx <- dim(x) if(is.null(dimx)) { if(inherits(x, "sparseVector")) dimx <- x@length else if(is.vector(x)) dimx <- length(x) else stop("Format of x is not understood", call.=FALSE) } nd <- length(dimx) if(missing(across)) across <- nd + 1L else { check.1.integer(across) if(!(across %in% (1:(nd+1L)))) stop(paste("Argument 'across' must be an integer from 1 to", nd+1L), call.=FALSE) } if(nd == 1) { if(inherits(x, "sparseVector")) { m <- length(x@x) y <- switch(across, sparseMatrix(i=rep(1:n, times=m), j=rep(x@i, each=n), x=rep(x@x, each=n), dims=c(n, dimx)), sparseMatrix(i=rep(x@i, each=n), j=rep(1:n, times=m), x=rep(x@x, each=n), dims=c(dimx, n))) } else { y <- switch(across, outer(1:n, x, function(a,b) b), outer(x, 1:n, function(a,b) a)) } } else if(nd == 2) { if(inherits(x, "sparseMatrix")) { z <- as(x, "TsparseMatrix") m <- length(z@x) y <- switch(across, sparse3Darray(i=rep(1:n, times=m), j=rep(z@i + 1L, each=n), k=rep(z@j + 1L, each=n), x=rep(z@x, each=n), dims=c(n, dimx)), sparse3Darray(i=rep(z@i + 1L, each=n), j=rep(1:n, times=m), k=rep(z@j + 1L, each=n), x=rep(z@x, each=n), dims=c(dimx[1], n, dimx[2])), sparse3Darray(i=rep(z@i + 1L, each=n), j=rep(z@j + 1L, each=n), k=rep(1:n, times=m), x=rep(z@x, each=n), dims=c(dimx, n))) } else stop("Not yet implemented for full arrays") } else stop("Not implemented for arrays of more than 2 dimensions", call.=FALSE) return(y) } mapSparseEntries <- function(x, margin, values, conform=TRUE, across) { # replace the NONZERO entries of sparse vector, matrix or array # by values[l] where l is one of the slice indices dimx <- dim(x) if(is.null(dimx)) { if(inherits(x, "sparseVector")) dimx <- x@length else if(is.vector(x)) dimx <- length(x) else stop("Format of x is not understood", call.=FALSE) } if(length(dimx) == 1) { x <- as(x, "sparseVector") i <- x@i if(length(i) == 0) { # no entries return(x) } if(!missing(margin) && !is.null(margin)) stopifnot(margin == 1) check.anySparseVector(values, dimx, things="entries", oneok=TRUE) nv <- if(inherits(values, "sparseVector")) values@length else length(values) yvalues <- if(nv > 1) as.vector(values[i]) else rep(values[1], length(i)) y <- sparseVector(i=i, x=yvalues, length=dimx) return(y) } if(inherits(x, "sparseMatrix")) { x <- as(x, Class="TsparseMatrix") if(length(x@i) == 0) { # no entries return(x) } check.1.integer(margin) stopifnot(margin %in% 1:2) check.anySparseVector(values, dimx[margin], things=c("rows","columns")[margin], oneok=TRUE) nv <- if(inherits(values, "sparseVector")) values@length else length(values) i <- x@i + 1L j <- x@j + 1L yindex <- switch(margin, i, j) yvalues <- if(nv > 1) values[yindex] else rep(values[1], length(yindex)) y <- sparseMatrix(i=i, j=j, x=yvalues, dims=dimx, dimnames=dimnames(x)) y <- drop0(y) return(y) } if(inherits(x, "sparse3Darray")) { if(length(x$i) == 0) { # no entries return(x) } ijk <- cbind(i=x$i, j=x$j, k=x$k) if(conform) { #' ensure common pattern of sparse values #' in each slice on 'across' margin force(across) nslice <- dimx[across] #' pick one representative of each equivalence class ## ---- old code --------- ## dup <- duplicated(ijk[,-across,drop=FALSE]) ## ijk <- ijk[!dup, , drop=FALSE] ## --------------------- use <- representativeRows(ijk[,-across,drop=FALSE]) ijk <- ijk[use, , drop=FALSE] ## npattern <- nrow(ijk) #' repeat this pattern in each 'across' slice ijk <- apply(ijk, 2, rep, times=nslice) ijk[, across] <- rep(seq_len(nslice), each=npattern) } if(is.vector(values) || inherits(values, "sparseVector")) { # vector of values matching margin extent check.anySparseVector(values, dimx[margin], things=c("rows","columns","planes")[margin], oneok=TRUE) nv <- if(inherits(values, "sparseVector")) values@length else length(values) yindex <- ijk[,margin] yvalues <- if(nv > 1) values[yindex] else rep(values[1], length(yindex)) y <- sparse3Darray(i=ijk[,1], j=ijk[,2], k=ijk[,3], x=yvalues, dims=dimx, dimnames=dimnames(x)) return(y) } else if(is.matrix(values) || inherits(values, "sparseMatrix")) { #' matrix of values. force(across) stopifnot(across != margin) #' rows of matrix must match 'margin' if(nrow(values) != dimx[margin]) stop(paste("Number of rows of values", paren(nrow(values)), "does not match array size in margin", paren(dimx[margin])), call.=FALSE) #' columns of matrix must match 'across' if(ncol(values) != dimx[across]) stop(paste("Number of columns of values", paren(ncol(values)), "does not match array size in 'across'", paren(dimx[across])), call.=FALSE) # map yindex <- ijk[,margin] zindex <- ijk[,across] y <- sparse3Darray(i=ijk[,1], j=ijk[,2], k=ijk[,3], x=values[cbind(yindex,zindex)], dims=dimx, dimnames=dimnames(x)) return(y) } else stop("Format of values not understood", call.=FALSE) } stop("Format of x not understood", call.=FALSE) } applySparseEntries <- local({ applySparseEntries <- function(x, f, ...) { ## apply vectorised function 'f' only to the nonzero entries of 'x' if(inherits(x, "sparseMatrix")) { x <- applytoxslot(x, f, ...) } else if(inherits(x, "sparse3Darray")) { x <- applytoxentry(x, f, ...) } else { x <- f(x, ...) } return(x) } applytoxslot <- function(x, f, ...) { xx <- x@x n <- length(xx) xx <- f(xx, ...) if(length(xx) != n) stop(paste("Function f returned the wrong number of values:", length(xx), "instead of", n), call.=FALSE) x@x <- xx return(x) } applytoxentry <- function(x, f, ...) { xx <- x$x n <- length(xx) xx <- f(xx, ...) if(length(xx) != n) stop(paste("Function f returned the wrong number of values:", length(xx), "instead of", n), call.=FALSE) x$x <- xx return(x) } applySparseEntries }) check.anySparseVector <- function(v, npoints=NULL, fatal=TRUE, things="data points", naok=FALSE, warn=FALSE, vname, oneok=FALSE) { # vector, factor or sparse vector of values for each point/thing if(missing(vname)) vname <- sQuote(deparse(substitute(v))) whinge <- NULL isVector <- is.atomic(v) && is.null(dim(v)) isSparse <- inherits(v, "sparseVector") nv <- if(isSparse) v@length else length(v) if(!isVector && !isSparse) whinge <- paste(vname, "is not a vector, factor or sparse vector") else if(!(is.null(npoints) || (nv == npoints)) && !(oneok && nv == 1)) whinge <- paste("The length of", vname, paren(paste0("=", nv)), "should equal the number of", things, paren(paste0("=", npoints))) else if(!naok && anyNA(v)) whinge <- paste("Some values of", vname, "are NA or NaN") # if(!is.null(whinge)) { if(fatal) stop(whinge) if(warn) warning(whinge) ans <- FALSE attr(ans, "whinge") <- whinge return(ans) } return(TRUE) } spatstat/R/parres.R0000644000176200001440000005054413623714544013756 0ustar liggesusers# # parres.R # # code to plot transformation diagnostic # # $Revision: 1.15 $ $Date: 2020/02/21 08:47:50 $ # parres <- function(model, covariate, ..., smooth.effect=FALSE, subregion=NULL, bw="nrd0", adjust=1, from=NULL,to=NULL, n=512, bw.input = c("points", "quad"), bw.restrict = FALSE, covname) { callstring <- paste(deparse(sys.call()), collapse = "") modelname <- deparse(substitute(model)) stopifnot(is.ppm(model)) if(missing(covariate)) { mc <- model.covariates(model) if(length(mc) == 1) covariate <- mc else stop("covariate must be provided") } if(missing(covname)) covname <- sensiblevarname(deparse(substitute(covariate)), "X") if(is.marked(model)) stop("Sorry, this is not yet implemented for marked models") if(!is.null(subregion)) stopifnot(is.owin(subregion)) if(is.null(adjust)) adjust <- 1 bw.input <- match.arg(bw.input) # validate model modelcall <- model$callstring if(is.null(modelcall)) modelcall <- model$call if(is.null(getglmfit(model))) model <- update(model, forcefit=TRUE) # extract spatial locations Q <- quad.ppm(model) # datapoints <- Q$data quadpoints <- union.quad(Q) Z <- is.data(Q) wts <- w.quad(Q) nQ <- npoints(quadpoints) # fitted intensity lam <- fitted(model, type="trend") # subset of quadrature points used to fit model subQset <- getglmsubset(model) if(is.null(subQset)) subQset <- rep.int(TRUE, nQ) # restriction to subregion insubregion <- if(!is.null(subregion)) { inside.owin(quadpoints, w=subregion) } else rep.int(TRUE, nQ) ################################################################ # Inverse lambda residuals rx <- residuals(model, type="inverse") resid <- with(rx, "increment") ################################################################# # identify the covariate # if(length(covariate) == 0) stop("No covariate specified") covtype <- "unknown" if(!is.character(covariate)) { # Covariate is some kind of data, treated as external covariate covtype <- "external" beta <- 0 covvalues <- evalCovariate(covariate, quadpoints) } else { # Argument is name of covariate covname <- covariate if(length(covname) > 1) stop("Must specify only one covariate") # 'original covariates' orig.covars <- variablesinformula(formula(model)) # 'canonical covariates' canon.covars <- names(coef(model)) # offsets offset.covars <- offsetsinformula(formula(model)) # if(covname %in% orig.covars) { # one of the original covariates covtype <- "original" covvalues <- evalCovariate(findCovariate(covname, model), quadpoints) } else if(covname %in% canon.covars) { # one of the canonical covariates covtype <- "canonical" mm <- model.matrix(model) covvalues <- mm[, covname] ## extract the corresponding coefficient beta <- coef(model)[[covname]] } else if(covname %in% offset.covars) { # an offset term only covtype <- "offset" mf <- model.frame(model, subset=rep.int(TRUE, n.quad(Q))) if(!(covname %in% colnames(mf))) stop(paste("Internal error: offset term", covname, "not found in model frame")) covvalues <- mf[, covname] ## fixed coefficient (not an estimated parameter) beta <- 1 } else{ # must be an external covariate (i.e. not used in fitted model) covtype <- "external" beta <- 0 covvalues <- evalCovariate(findCovariate(covname, model), quadpoints) } } # validate covvalues # if(is.null(covvalues)) stop("Unable to extract covariate values") if(length(covvalues) != npoints(quadpoints)) stop(paste("Internal error: number of covariate values =", length(covvalues), "!=", npoints(quadpoints), "= number of quadrature points")) vtype <- typeof(covvalues) switch(vtype, real=, double = { }, integer = { warning("Covariate is integer-valued") }, stop(paste("Cannot handle covariate of type", sQuote(vtype)))) ################################################################# # Compute covariate effect if(covtype != "original") { effect <- beta * covvalues mediator <- covtype effectfundata <- list(beta=beta) effectFun <- function(x) { (effectfundata$beta) * x } isoffset <- (covtype == "offset") names(isoffset) <- covname } else { ## `original' covariate (passed as argument to ppm) ## may determine one or more canonical covariates and/or offsets origcovdf <- getppmOriginalCovariates(model)[insubregion, , drop=FALSE] isconstant <- lapply(origcovdf, function(z) { length(unique(z)) == 1 }) ## ## Initialise termnames <- character(0) termbetas <- numeric(0) isoffset <- logical(0) mediator <- character(0) effect <- 0 effectFun <- function(x) { effectFun.can(x) + effectFun.off(x) } effectFun.can <- effectFun.off <- function(x) { 0 * x } ## Identify relevant canonical covariates dmat <- model.depends(model) if(!(covname %in% colnames(dmat))) stop("Internal error: cannot match covariate names") relevant <- dmat[, covname] if(any(relevant)) { # original covariate determines one or more canonical covariates mediator <- "canonical" ## check whether covariate is separable check.separable(dmat, covname, isconstant) ## Extract information about relevant model terms termnames <- rownames(dmat)[relevant] isoffset <- rep.int(FALSE, length(termnames)) names(isoffset) <- termnames ## Extract relevant canonical covariates mm <- model.matrix(model) termvalues <- mm[, relevant, drop=FALSE] ## extract corresponding coefficients termbetas <- coef(model)[relevant] ## evaluate model effect effect <- as.numeric(termvalues %*% termbetas) ## check length if(length(effect) != npoints(quadpoints)) stop(paste("Internal error: number of values of fitted effect =", length(effect), "!=", npoints(quadpoints), "= number of quadrature points")) ## Trap loglinear case if(length(termnames) == 1 && identical(termnames, covname)) { covtype <- "canonical" beta <- termbetas } ## construct the corresponding function gd <- getglmdata(model) goodrow <- min(which(complete.cases(gd))) defaultdata <- gd[goodrow, , drop=FALSE] #' set interaction terms to zero if(length(Vnames <- model$internal$Vnames)) defaultdata[,Vnames] <- 0 gf <- getglmfit(model) effectfundata.can <- list(covname=covname, fmla = rhs.of.formula(formula(gf)), termbetas = termbetas, defaultdata = defaultdata, relevant = relevant, termnames = termnames) effectFun.can <- function(x) { d <- effectfundata.can # replicate default data to correct length df <- as.data.frame(lapply(d$defaultdata, rep, length(x))) # overwrite value of covariate with new data df[,covname] <- x # construct model matrix m <- model.matrix(d$fmla, df) # check it conforms to expected structure if(!identical(colnames(m)[d$relevant], d$termnames)) stop("Internal error: mismatch in term names in effectFun") me <- m[, d$relevant, drop=FALSE] y <- me %*% as.matrix(d$termbetas, ncol=1) return(y) } } if(!is.null(offmat <- attr(dmat, "offset")) && any(relevant <- offmat[, covname])) { ## covariate appears in a model offset term mediator <- c(mediator, "offset") ## check whether covariate is separable check.separable(offmat, covname, isconstant) ## collect information about relevant offset offnames <- rownames(offmat)[relevant] termnames <- c(termnames, offnames) noff <- length(offnames) termbetas <- c(termbetas, rep.int(1, noff)) isoffset <- c(isoffset, rep.int(TRUE, noff)) names(termbetas) <- names(isoffset) <- termnames ## extract values of relevant offset mf <- model.frame(model, subset=rep.int(TRUE, n.quad(Q))) if(any(nbg <- !(offnames %in% colnames(mf)))) stop(paste("Internal error:", ngettext(sum(nbg), "offset term", "offset terms"), offnames[nbg], "not found in model frame")) effex <- mf[, offnames, drop=FALSE] effect <- effect + rowSums(effex) # # construct the corresponding function gd <- getglmdata(model) goodrow <- min(which(complete.cases(gd))) defaultdata <- gd[goodrow, , drop=FALSE] #' set interaction terms to zero if(length(Vnames <- model$internal$Vnames)) defaultdata[,Vnames] <- 0 gf <- getglmfit(model) effectfundata.off <- list(covname=covname, fmla = rhs.of.formula(formula(gf)), defaultdata = defaultdata, offnames = offnames) effectFun.off <- function(x) { d <- effectfundata.off # replicate default data to correct length df <- as.data.frame(lapply(d$defaultdata, rep, length(x))) # overwrite value of covariate with new data df[,covname] <- x # construct model FRAME mf <- model.frame(d$fmla, df) # check it conforms to expected structure if(!all(d$offnames %in% colnames(mf))) stop("Internal error: mismatch in term names in effectFun") moff <- mf[, d$offnames, drop=FALSE] y <- rowSums(moff) return(y) } } if(length(termnames) == 0) { # Sanity clause # (everyone knows there ain't no Sanity Clause...) warning(paste("Internal error: could not find any", "canonical covariates or offset terms", "that depended on the covariate", sQuote(covname))) # Assume it's an external covariate (i.e. not used in fitted model) covtype <- "external" beta <- 0 effect <- beta * covvalues effectFun <- function(x) { 0 * x } isoffset <- FALSE names(isoffset) <- covname } } #### Canonical covariates and coefficients switch(covtype, original={ cancovs <- termnames canbeta <- termbetas }, offset = , canonical={ cancovs <- covname canbeta <- beta }, external={ cancovs <- canbeta <- NA }) ################################################################# # Validate covariate values # locations that must have finite values operative <- if(bw.restrict) insubregion & subQset else subQset nbg.cov <- !is.finite(covvalues) if(any(offending <- nbg.cov & operative)) { warning(paste(sum(offending), "out of", length(offending), "covariate values discarded because", ngettext(sum(offending), "it is", "they are"), "NA or infinite")) } nbg.eff <- !is.finite(effect) if(any(offending <- nbg.eff & operative)) { warning(paste(sum(offending), "out of", length(offending), "values of fitted effect discarded because", ngettext(sum(offending), "it is", "they are"), "NA or infinite")) } ################################################################# # Restrict data to 'operative' points # with finite values nbg <- nbg.cov | nbg.eff ok <- !nbg & operative if(sum(ok) < 2) { warning("Not enough data; returning NULL") return(NULL) } if(!all(ok)) { Q <- Q[ok] covvalues <- covvalues[ok] quadpoints <- quadpoints[ok] resid <- resid[ok] lam <- lam[ok] effect <- effect[ok] insubregion <- insubregion[ok] Z <- Z[ok] wts <- wts[ok] } #################################################### # assemble data for smoothing x <- covvalues y <- resid/wts if(smooth.effect) y <- y + effect w <- wts # if(makefrom <- is.null(from)) from <- min(x) if(maketo <- is.null(to)) to <- max(x) #################################################### # determine smoothing bandwidth # from 'operative' data switch(bw.input, quad = { # bandwidth selection from covariate values at all quadrature points numer <- unnormdensity(x, weights=w*y, bw=bw, adjust=adjust, n=n,from=from,to=to, ...) sigma <- numer$bw }, points= { # bandwidth selection from covariate values at data points fake <- unnormdensity(x[Z], weights=1/lam[Z], bw=bw, adjust=adjust, n=n,from=from,to=to, ...) sigma <- fake$bw numer <- unnormdensity(x, weights=w*y, bw=sigma, adjust=1, n=n,from=from,to=to, ...) }) #################################################### # Restrict data and recompute numerator if required if(!is.null(subregion) && !bw.restrict) { # Bandwidth was computed on all data # Restrict to subregion and recompute numerator if(sum(insubregion) < 2) { warning("Not enough useable data in subregion; returning NULL") return(NULL) } x <- x[insubregion] y <- y[insubregion] w <- w[insubregion] Z <- Z[insubregion] lam <- lam[insubregion] if(makefrom) from <- min(x) if(maketo) to <- max(x) numer <- unnormdensity(x, weights=w*y, bw=sigma, adjust=1, n=n,from=from,to=to, ...) } #################################################### # Compute denominator denom <- unnormdensity(x, weights=w, bw=sigma, adjust=1, n=n,from=from,to=to, ...) #################################################### # Determine recommended plot range alim <- c(from, to) nZ <- sum(Z) if(nZ > 5) { xr <- range(as.vector(x[Z]), finite=TRUE) alimx <- xr + 0.1 * diff(xr) * c(-1,1) alim <- intersect.ranges(alim, alimx) } #################################################### # Compute terms interpolate <- function(x,y) { if(inherits(x, "density") && missing(y)) approxfun(x$x, x$y, rule=2) else approxfun(x, y, rule=2) } numfun <- interpolate(numer) denfun <- interpolate(denom) xxx <- numer$x yyy <- numfun(xxx)/denfun(xxx) # variance estimation # smooth 1/lambda(u) with smaller bandwidth tau <- sigma/sqrt(2) varnumer <- unnormdensity(x, weights=w/lam, bw=tau, adjust=1, n=n,from=from,to=to, ...) varnumfun <- interpolate(varnumer) varestxxx <- varnumfun(xxx)/(2 * sigma * sqrt(pi) * denfun(xxx)^2) sd <- sqrt(varestxxx) # alternative estimate of variance using data points only if(nZ > 1) { varXnumer <- unnormdensity(x[Z], weights=1/lam[Z]^2, bw=tau, adjust=1, n=n,from=from,to=to, ...) varXnumfun <- interpolate(varXnumer) varXestxxx <- varXnumfun(xxx)/(2 * sigma * sqrt(pi) * denfun(xxx)^2) sdX <- sqrt(varXestxxx) } else sdX <- rep(NA, length(xxx)) # fitted effect effxxx <- effectFun(xxx) # add fitted effect of covariate, if not added before smoothing if(!smooth.effect) yyy <- yyy + effxxx #################################################### # pack into fv object df <- data.frame(xxx=xxx, h =yyy, varh=varestxxx, hi=yyy+2*sd, lo=yyy-2*sd, hiX=yyy+2*sdX, loX=yyy-2*sdX, fit=effxxx) # remove any funny characters in name of covariate (e.g. if it is an offset) Covname <- make.names(covname) names(df)[1] <- Covname desc <- c(paste("covariate", sQuote(covname)), "Smoothed partial residual", "Variance", "Upper limit of pointwise 5%% significance band (integral)", "Lower limit of pointwise 5%% significance band (integral)", "Upper limit of pointwise 5%% significance band (sum)", "Lower limit of pointwise 5%% significance band (sum)", paste("Parametric fitted effect of", sQuote(covname))) rslt <- fv(df, argu=Covname, ylab=substitute(h(X), list(X=as.name(covname))), valu="h", fmla= as.formula(paste(". ~ ", Covname)), alim=alim, labl=c(covname, paste("%s", paren(covname), sep=""), paste("var", paren(covname), sep=""), paste("hi", paren(covname), sep=""), paste("lo", paren(covname), sep=""), paste("hiX", paren(covname), sep=""), paste("loX", paren(covname), sep=""), paste("fit", paren(covname), sep="")), desc=desc, fname="h", yexp=as.expression(substitute(hat(h)(X), list(X=covname)))) attr(rslt, "dotnames") <- c("h", "hi", "lo", "fit") fvnames(rslt, ".s") <- c("hi", "lo") # add special class data class(rslt) <- c("parres", class(rslt)) attr(rslt, "stuff") <- list(covname = paste(covname, collapse=""), covtype = covtype, mediator = mediator, cancovs = cancovs, canbeta = canbeta, isoffset = isoffset, modelname = modelname, modelcall = modelcall, callstring = callstring, sigma = sigma, smooth.effect = smooth.effect, restricted = !is.null(subregion), bw.input = bw.input) return(rslt) } print.parres <- function(x, ...) { cat("Transformation diagnostic (class parres)\n") s <- attr(x, "stuff") cat(paste("for the", s$covtype, "covariate", sQuote(s$covname), if(s$covtype != "external") "in" else "for", "the fitted model", if(nchar(s$modelcall) < 30) "" else "\n\t", s$modelcall, "\n")) switch(s$covtype, original={ cancovs <- s$cancovs med <- s$mediator isoffset <- s$isoffset if(is.null(isoffset)) isoffset <- rep.int(FALSE, length(cancovs)) ncc <- length(cancovs) nfitted <- sum(!isoffset) noff <- sum(isoffset) explainfitted <- explainoff <- character(0) if(noff > 0) explainoff <- paste("offset", ngettext(noff, "term", "terms"), commasep(dQuote(cancovs[isoffset]))) if(nfitted > 0) explainfitted <- paste( paste(med[med != "offset"], collapse=" and "), ngettext(nfitted, "term", "terms"), commasep(dQuote(cancovs[!isoffset]))) splat("Fitted effect: ", if(ncc > 1) "sum of" else NULL, paste(c(explainfitted, explainoff), collapse=" and ")) }, external={ cat("Note: effect estimate not justified by delta method\n") }, offset={}, canonical={}) # earlier versions were equivalent to restricted=FALSE if(identical(s$restricted, TRUE)) cat("\t--Diagnostic computed for a subregion--\n") cat(paste("Call:", s$callstring, "\n")) cat(paste("Actual smoothing bandwidth sigma =", signif(s$sigma,5), "\n")) # earlier versions were equivalent to smooth.effect=TRUE sme <- !identical(s$smooth.effect, FALSE) if(sme) { cat("Algorithm: smooth(effect + residual)\n\n") } else { cat("Algorithm: effect + smooth(residual)\n\n") } NextMethod("print") } plot.parres <- function(x, ...) { xname <- deparse(substitute(x)) do.call(plot.fv, resolve.defaults(list(x), list(...), list(main=xname, shade=c("hi", "lo")))) } spatstat/R/randomlpp.R0000644000176200001440000000704713613547031014450 0ustar liggesusers# # random.R # # Random point pattern generators for a linear network # # $Revision: 1.12 $ $Date: 2020/01/05 02:46:04 $ # rpoislpp <- function(lambda, L, ..., nsim=1, drop=TRUE) { if(missing(L) || is.null(L)) { if(inherits(lambda, c("linim", "linfun"))) { L <- as.linnet(lambda) } else if(all(sapply(lambda, inherits, what=c("linim", "linfun")))) { L <- unique(lapply(lambda, as.linnet)) if(length(L) > 1) stop("All entries of lambda must be defined on the same network") L <- L[[1L]] } else stop("L is missing", call.=FALSE) } else verifyclass(L, "linnet") result <- vector(mode="list", length=nsim) S <- as.psp(L) bugout <- (nsim == 1) && drop for(i in seq_len(nsim)) { X <- datagen.rpoisppOnLines(lambda, S, ...) Y <- lpp(X, L) if(bugout) return(Y) result[[i]] <- Y } result <- simulationresult(result, nsim, drop) return(result) } runiflpp <- function(n, L, nsim=1, drop=TRUE) { verifyclass(L, "linnet") result <- vector(mode="list", length=nsim) S <- as.psp(L) bugout <- (nsim == 1) && drop for(i in seq_len(nsim)) { X <- datagen.runifpointOnLines(n, S) Y <- lpp(X, L) if(bugout) return(Y) result[[i]] <- Y } result <- simulationresult(result, nsim, drop) return(result) } rlpp <- function(n, f, ..., nsim=1, drop=TRUE) { if(inherits(f, "linfun")) f <- as.linim(f, ...) ismulti <- FALSE if(length(n) > 1 && inherits(f, "linim")) { f <- rep(list(f), length(n)) ismulti <- TRUE } else if(!inherits(f, "linim") && is.list(f) && all(sapply(f, inherits, what=c("linim", "linfun")))) { #' f is a list of densities for each type of point if(length(n) == 1) { n <- rep(n, length(f)) } else stopifnot(length(n) == length(f)) ismulti <- TRUE } if(ismulti) { Y <- mapply(rlpp, n=as.list(n), f=f, MoreArgs=list(nsim=nsim, drop=FALSE, ...), SIMPLIFY=FALSE) names(Y) <- names(f) %orifnull% as.character(seq(along=f)) Z <- do.call(mapply, c(list(superimpose), Y, list(SIMPLIFY=FALSE))) result <- simulationresult(Z, nsim, drop) return(result) } if(!inherits(f, "linim")) stop("f should be a linfun or linim object") if(length(n) > 1) { flist <- rep(list(f), length(n)) return(rlpp(n, flist, nsim=nsim, drop=drop, ...)) } check.1.integer(nsim) if(nsim <= 0) return(list()) #' extract data L <- as.linnet(f) df <- attr(f, "df") seglen <- lengths.psp(as.psp(L)) #' sort into segments, left-to-right within segments df <- df[order(df$mapXY, df$tp), , drop=FALSE] nr <- nrow(df) fvals <- df$values if(anyNA(fvals)) stop("f has some NA values") if(min(fvals) < 0) stop("f has some negative values") #' find interval corresponding to each sample point sameseg <- (diff(df$mapXY) == 0) sharenext <- c(sameseg, FALSE) shareprevious <- c(FALSE, sameseg) tcur <- df$tp tnext <- c(tcur[-1], NA) tprev <- c(NA, tcur[-nr]) tleft <- ifelse(shareprevious, (tcur + tprev)/2, 0) tright <- ifelse(sharenext, (tcur + tnext)/2, 1) #' compute probability of each interval probs <- fvals * (tright - tleft) * seglen[df$mapXY] probs <- probs/sum(probs) #' result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { #' sample intervals and place point uniformly in each interval ii <- sample.int(nr, size=n, replace=TRUE, prob=probs) seg <- df[ii, "mapXY"] tp <- runif(n, tleft[ii], tright[ii]) result[[isim]] <- as.lpp(seg=seg, tp=tp, L=L) } result <- simulationresult(result, nsim, drop) return(result) } spatstat/R/options.R0000644000176200001440000005454613613216544014157 0ustar liggesusers# # options.R # # Spatstat options and other internal states # # $Revision: 1.85 $ $Date: 2020/01/24 04:46:33 $ # # .spEnv <- new.env() putSpatstatVariable <- function(name, value) { assign(name, value, envir=.spEnv) } getSpatstatVariable <- function(name) { get(name, envir=.spEnv) } existsSpatstatVariable <- function(name) { exists(name, envir=.spEnv) } putSpatstatVariable("Spatstat.Options", list()) putSpatstatVariable("Spatstat.ProgressBar", NULL) putSpatstatVariable("Spatstat.ProgressData", NULL) putSpatstatVariable("warnedkeys", character(0)) ## Kovesi's uniform colour map, row 29, linear 'bmy' putSpatstatVariable("DefaultImageColours", c("#000C7D", "#000D7E", "#000D80", "#000E81", "#000E83", "#000E85", "#000F86", "#000F88", "#00108A", "#00108B", "#00118D", "#00118F", "#001190", "#001292", "#001293", "#001295", "#001396", "#001398", "#001399", "#00149A", "#00149C", "#00149D", "#00149E", "#00159F", "#0015A0", "#0015A1", "#0015A2", "#0015A3", "#0015A4", "#0016A5", "#0016A6", "#0016A6", "#0016A7", "#0016A8", "#0016A8", "#0016A8", "#0A16A9", "#1516A9", "#1D15A9", "#2315A9", "#2915A9", "#2F15A8", "#3414A8", "#3914A7", "#3E13A6", "#4313A5", "#4712A4", "#4C12A3", "#5011A2", "#5311A1", "#5710A0", "#5A0F9F", "#5E0F9E", "#610E9E", "#640E9D", "#670D9C", "#6A0D9B", "#6C0C9A", "#6F0B99", "#720B98", "#740A98", "#770A97", "#790996", "#7C0896", "#7E0895", "#800794", "#810794", "#840693", "#860692", "#880692", "#8A0591", "#8C0591", "#8E0490", "#900490", "#92048F", "#94038F", "#96038E", "#98038E", "#9A028D", "#9C028D", "#9E028D", "#A0018C", "#A2018C", "#A4018B", "#A6018B", "#A8008A", "#AA008A", "#AB0089", "#AD0089", "#AF0088", "#B10088", "#B30087", "#B50087", "#B70086", "#B80086", "#BA0086", "#BC0085", "#BE0085", "#C00084", "#C20084", "#C30083", "#C50083", "#C70082", "#C90082", "#CB0081", "#CD0081", "#CE0080", "#D00080", "#D20080", "#D40080", "#D5007F", "#D7007F", "#D9007E", "#DA007E", "#DC007D", "#DD007C", "#DF017C", "#E1027B", "#E2047B", "#E4067A", "#E5087A", "#E70B79", "#E80D78", "#E91078", "#EB1277", "#EC1477", "#ED1676", "#EF1875", "#F01A75", "#F11C74", "#F31E73", "#F42073", "#F52272", "#F62471", "#F72671", "#F82870", "#FA2A6F", "#FB2C6F", "#FC2E6E", "#FD306D", "#FE326C", "#FE346C", "#FE366B", "#FE386A", "#FE3A6A", "#FE3D69", "#FE3F68", "#FE4167", "#FE4366", "#FE4566", "#FE4765", "#FE4964", "#FE4B63", "#FE4D62", "#FE5062", "#FE5261", "#FE5460", "#FE565F", "#FE585E", "#FE5A5D", "#FE5D5C", "#FE5F5B", "#FE615B", "#FE635A", "#FE6559", "#FE6758", "#FE6A57", "#FE6C56", "#FE6E55", "#FE7054", "#FE7253", "#FE7452", "#FE7651", "#FE7850", "#FE7A4E", "#FE7C4D", "#FE7E4C", "#FE7F4B", "#FE804A", "#FE8249", "#FE8448", "#FE8647", "#FE8745", "#FE8944", "#FE8B43", "#FE8D42", "#FE8E40", "#FE903F", "#FE923E", "#FE943C", "#FE953B", "#FE9739", "#FE9938", "#FE9A36", "#FE9C35", "#FE9E33", "#FE9F32", "#FEA130", "#FEA22F", "#FEA42E", "#FEA52C", "#FEA72B", "#FEA82A", "#FEAA29", "#FEAB28", "#FEAD27", "#FEAE26", "#FEB026", "#FEB125", "#FEB324", "#FEB423", "#FEB523", "#FEB722", "#FEB822", "#FEBA21", "#FEBB20", "#FEBC20", "#FEBE1F", "#FEBF1F", "#FEC11F", "#FEC21E", "#FEC31E", "#FEC51E", "#FEC61D", "#FEC71D", "#FEC91D", "#FECA1D", "#FECB1D", "#FECD1D", "#FECE1C", "#FECF1C", "#FED11C", "#FED21C", "#FED31C", "#FED51C", "#FED61D", "#FED71D", "#FED91D", "#FEDA1D", "#FEDB1D", "#FEDD1D", "#FEDE1E", "#FEDF1E", "#FEE11E", "#FEE21E", "#FEE31F", "#FEE51F", "#FEE61F", "#FEE720", "#FEE820", "#FEEA21", "#FEEB21", "#FEEC22", "#FEEE22", "#FEEF23", "#FEF023")) warn.once <- function(key, ...) { warned <- getSpatstatVariable("warnedkeys") if(!(key %in% warned)) { warning(paste(...), call.=FALSE) putSpatstatVariable("warnedkeys", c(warned, key)) } return(invisible(NULL)) } ".Spat.Stat.Opt.Table" <- list( areainter.polygonal = list( ## use polygonal calculations in AreaInter default=FALSE, check=function(x) { is.logical(x) && length(x) == 1}, valid="a single logical value" ), checkpolygons = list( ## superseded superseded=TRUE, default=FALSE, check=function(x) { warning("spatstat.options('checkpolygons') will be ignored in future versions of spatstat", call.=FALSE) return(is.logical(x) && length(x) == 1) }, valid="a single logical value" ), checksegments = list( ## default value of 'check' for psp objects default=TRUE, check=function(x) { is.logical(x) && length(x) == 1}, valid="a single logical value" ), closepairs.newcode=list( ## use new code for 'closepairs' default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), closepairs.altcode=list( ## use alternative code for 'closepairs' default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), crossing.psp.useCall=list( ## use new code for 'crossing.psp' default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), crosspairs.newcode=list( ## use new code for 'crosspairs' default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), densityTransform=list( ## use experimental new C routines for 'density.ppp' default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), densityC=list( ## use C routines for 'density.ppp' default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), dpp.maxmatrix=list( ## maximum size of matrix in dppeigen default=2^24, # 16,777,216 check=function(x) { is.numeric(x) && length(x) == 1 && all(x == ceiling(x)) && all(x > 1024) }, valid="a single integer, greater than 1024" ), exactdt.checks.data=list( ## whether 'exactdt' checks validity of return value default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), expand=list( ## default area expansion factor default=2, check=function(x) { is.numeric(x) && length(x) == 1 && all(x > 1) }, valid="a single numeric value, greater than 1" ), expand.polynom=list( ## whether to expand polynom() in ppm formulae default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), fasteval=list( ## whether to use 'fasteval' code if available default="on", check=function(x) { x %in% c("off", "on", "test") }, valid="one of the strings \'off\', \'on\' or \'test\'" ), fastpois=list( # whether to use fast algorithm for rpoispp() when lambda is an image default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), fastthin=list( # whether to use fast C algorithm for rthin() when P is constant default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), fastK.lgcp=list( ## whether to cut a few corners in 'lgcp.estK' default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), fast.trigraph=list( ## whether to use C function triograph or trigraph in edges2triangles default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), fixpolygons = list( ## whether to repair polygons automatically default=TRUE, check=function(x) { is.logical(x) && length(x) == 1}, valid="a single logical value" ), gpclib=list( ## defunct! superseded=TRUE, default=FALSE, check=function(x) { message("gpclib is no longer needed") return(TRUE) }, valid="a single logical value" ), huge.npoints=list( ## threshold to trigger a warning from rpoispp default=1e6, check=function(x) { is.numeric(x) && length(x) == 1 && all(x == ceiling(x)) && all(x > 1024) }, valid="a single integer, greater than 1024" ), image.colfun=list( ## default colour scheme for plot.im # default=function(n){topo.colors(n)}, default=function(n) { z <- getSpatstatVariable("DefaultImageColours") interp.colours(z, n) }, check=function(x) { if(!is.function(x) || length(formals(x)) == 0) return(FALSE) y <- x(42) if(length(y) != 42 || !is.character(y)) return(FALSE) z <- try(col2rgb(y), silent=TRUE) return(!inherits(z, "try-error")) }, valid="a function f(n) that returns character strings, interpretable as colours" ), Kcom.remove.zeroes=list( ## whether Kcom removes zero distances default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), maxedgewt=list( ## maximum edge correction weight default=100, check=function(x){ is.numeric(x) && length(x) == 1 && is.finite(x) && all(x >= 1) }, valid="a finite numeric value, not less than 1" ), maxmatrix=list( ## maximum size of matrix of pairs of points in mpl.R default=2^24, # 16,777,216 check=function(x) { is.numeric(x) && length(x) == 1 && all(x == ceiling(x)) && all(x > 1024) }, valid="a single integer, greater than 1024" ), monochrome = list( ## switch for monochrome colour scheme default=FALSE, check=function(x) { is.logical(x) && length(x) == 1}, valid="a single logical value" ), n.bandwidth=list( ## number of values of bandwidth to try in bandwidth selection default=32, check=function(x) { is.numeric(x) && (length(x) == 1) && all(x == ceiling(x)) && all(x > 2) }, valid="a single integer, greater than 2" ), ndummy.min=list( ## minimum grid size for dummy points default=32, check=function(x) { is.numeric(x) && length(x) <= 2 && all(x == ceiling(x)) && all(x > 1) }, valid="a single integer or a pair of integers, greater than 1" ), ngrid.disc=list( ## number of grid points used to calculate area in area-interaction default=128, check=function(x) { is.numeric(x) && length(x) == 1 && all(x == ceiling(x)) && all(x > 1) }, valid="a single integer, greater than 1" ), npixel=list( ## default pixel dimensions default=128, check=function(x){ is.numeric(x) && (length(x) %in% c(1,2)) && all(is.finite(x)) && all(x == ceiling(x)) && all(x > 1) }, valid="an integer, or a pair of integers, greater than 1" ), nvoxel=list( ## default total number of voxels default=2^22, check=function(x) { is.numeric(x) && length(x) == 1 && all(x == ceiling(x)) && all(x > 2^12) }, valid="a single integer, greater than 2^12" ), old.morpho.psp=list( ## use old code for morphological operations default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), par.binary=list( ## default graphics parameters for masks default=list(), check=is.list, valid="a list" ), par.contour=list( ## default graphics parameters for 'contour' default=list(), check=is.list, valid="a list" ), par.fv=list( ## default graphics parameters for 'plot.fv' default=list(), check=is.list, valid="a list" ), par.persp=list( ## default graphics parameters for 'persp' plots default=list(), check=is.list, valid="a list" ), par.points=list( ## default graphics parameters for 'points' default=list(), check=is.list, valid="a list" ), par.pp3=list( ## default graphics parameters for 'plot.pp3' default=list(), check=is.list, valid="a list" ), print.ppm.SE=list( ## under what conditions to print estimated SE in print.ppm default="poisson", check=function(x) { is.character(x) && length(x) == 1 && all(x %in% c("always", "poisson", "never")) }, valid="one of the strings \'always\', \'poisson\' or \'never\'" ), progress = list( ## how to display progress reports default="tty", check=function(x){ all(x %in% c("tty", "tk", "txtbar")) }, valid="one of the strings 'tty', 'tk' or 'txtbar'" ), project.fast=list( ## whether to cut corners when projecting an invalid ppm object default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), psstA.ngrid=list( ## size of point grid for computing areas in psstA default=32, check=function(x) { is.numeric(x) && length(x) == 1 && all(x == ceiling(x)) && all(x >= 8) }, valid="a single integer, greater than or equal to 8" ), psstA.nr=list( ## number of 'r' values to consider in psstA default=30, check=function(x) { is.numeric(x) && length(x) == 1 && all(x == ceiling(x)) && all(x >= 4) }, valid="a single integer, greater than or equal to 4" ), psstG.remove.zeroes=list( ## whether to remove zero distances in psstG default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), eroded.intensity=list( ## whether to compute intensity estimate in eroded window ## e.g. for Kcom, Gcom default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), rmh.nrep=list( ## default value of parameter 'nrep' in rmh default=5e5, check=function(x) { is.numeric(x) && length(x) == 1 && all(x == ceiling(x)) && all(x > 0) }, valid="a single integer, greater than 0" ), rmh.p=list( ## default value of parameter 'p' in rmh default=0.9, check=function(x) { is.numeric(x) && length(x) == 1 && all(x >= 0) && all(x <= 1) }, valid="a single numerical value, between 0 and 1" ), rmh.q=list( ## default value of parameter 'q' in rmh default=0.9, check=function(x) { is.numeric(x) && length(x) == 1 && all(x > 0) && all(x < 1) }, valid="a single numerical value, strictly between 0 and 1" ), scalable = list( ## whether certain calculations in ppm should be scalable default=TRUE, check=function(x) { is.logical(x) && length(x) == 1}, valid="a single logical value" ), selfcrossing.psp.useCall=list( ## whether to use new code in selfcrossing.psp default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), terse = list( ## Level of terseness in printed output (higher => more terse) default=0, check=function(x) { length(x) == 1 && all(x %in% 0:4) }, valid="an integer between 0 and 4" ), transparent=list( ## whether to allow transparent colours in default colour maps default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), units.paren=list( default="(", check=function(x) { is.character(x) && (length(x) == 1) && all(x %in% c("(", "[", "{", "")) }, valid="one of the strings '(', '[', '{' or '' " ), use.Krect=list( ## whether to use function Krect in Kest(X) when window is rectangle default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), Cwhist=list( ## whether to use C code for whist default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), Cbdrymask=list( ## whether to use C code for bdry.mask default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), kppm.canonical=list( ## whether to use 'canonical' parameters in kppm default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), kppm.adjusted=list( ## experimental default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), check.rpanel.loaded=list( # internal debugging default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), check.RandomFields.loaded=list( # this is working OK so no need to check unless debugging default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), check.RandomFieldsUtils.loaded=list( # this is working OK so no need to check unless debugging default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), Clinequad = list( # use C code for 'linequad' default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), Ccountends = list( # use C code for 'countends' default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), Clinearradius = list( # use C code for 'boundingradius.linnet' default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), Cnndistlpp = list( # use C code for 'nndist.lpp'/'nnwhich.lpp' default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), Cnncrosslpp = list( # use C code for 'nncross.lpp' default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), developer = list( # general purpose; user is a developer; use experimental code, etc default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ) ) # end of options list reset.spatstat.options <- function() { Spatstat.Options <- lapply(.Spat.Stat.Opt.Table, getElement, name="default") putSpatstatVariable("Spatstat.Options", Spatstat.Options) invisible(Spatstat.Options) } reset.spatstat.options() spatstat.options <- local({ spatstat.options <- function (...) { Spatstat.Options <- getSpatstatVariable("Spatstat.Options") called <- list(...) if(length(called) == 0) { # return all options, except superseded ones allofem <- .Spat.Stat.Opt.Table[names(Spatstat.Options)] retain <- sapply(lapply(allofem, getElement, name="superseded"), is.null) return(Spatstat.Options[retain]) } if(is.null(names(called)) && length(called)==1) { # spatstat.options(x) x <- called[[1]] if(is.null(x)) return(Spatstat.Options) # spatstat.options(NULL) if(is.list(x)) called <- x } if(is.null(names(called))) { # spatstat.options("par1", "par2", ...) ischar <- unlist(lapply(called, is.character)) if(all(ischar)) { choices <- unlist(called) ok <- choices %in% names(Spatstat.Options) if(!all(ok)) stop(paste("Unrecognised option(s):", called[!ok])) if(length(called) == 1) return(Spatstat.Options[[choices]]) else return(Spatstat.Options[choices]) } else { wrong <- called[!ischar] offending <- sapply(wrong, ShortDeparse) offending <- paste(offending, collapse=",") stop(paste("Unrecognised mode of argument(s) [", offending, "]: should be character string or name=value pair")) } } ## spatstat.options(name=value, name2=value2,...) assignto <- names(called) if (is.null(assignto) || !all(nzchar(assignto))) stop("options must all be identified by name=value") recog <- assignto %in% names(.Spat.Stat.Opt.Table) if(!all(recog)) stop(paste("Unrecognised option(s):", assignto[!recog])) ## validate new values for(i in seq_along(assignto)) { nama <- assignto[i] valo <- called[[i]] entry <- .Spat.Stat.Opt.Table[[nama]] ok <- entry$check(valo) if(!ok) stop(paste("Parameter", dQuote(nama), "should be", entry$valid)) } ## reassign changed <- Spatstat.Options[assignto] Spatstat.Options[assignto] <- called putSpatstatVariable("Spatstat.Options", Spatstat.Options) ## return invisible(changed) } ShortDeparse <- function(x) { y <- x dont.complain.about(y) short.deparse(substitute(y)) } spatstat.options }) spatstat/R/quantess.R0000644000176200001440000002164513551744250014322 0ustar liggesusers#' quantess.R #' #' Quantile Tessellation #' #' $Revision: 1.22 $ $Date: 2019/10/17 01:49:01 $ quantess <- function(M, Z, n, ...) { UseMethod("quantess") } quantess.owin <- function(M, Z, n, ..., type=2, origin=c(0,0), eps=NULL) { W <- as.owin(M) B <- boundingbox(W) tcross <- MinimalTess(W, ...) force(n) if(!is.character(Z)) { Zim <- as.im(Z, W, eps=eps) Zrange <- range(Zim) } else { Z <- match.arg(Z, c("x", "y", "rad", "ang")) if(Z %in% c("x", "y") && is.rectangle(W)) { out <- switch(Z, x={ quadrats(W, nx=n, ny=1) }, y={ quadrats(W, nx=1, ny=n) }) if(!is.null(tcross)) out <- intersect.tess(out, tcross) return(out) } a <- qtPrepareCoordinate(Z, W, origin) Zfun <- a$Zfun Zrange <- a$Zrange Zim <- as.im(Zfun, W, eps=eps) } qZ <- quantile(Zim, probs=(0:n)/n, type=type) qZ[1] <- min(qZ[1], Zrange[1]) qZ[n+1] <- max(qZ[n+1], Zrange[2]) if(is.polygonal(W) && is.character(Z)) { R <- Frame(W) strips <- switch(Z, x = tess(xgrid=qZ, ygrid=R$yrange), y = tess(xgrid=R$xrange, ygrid=qZ), rad = polartess(B, radii=qZ, origin=origin), ang = polartess(B, angles=qZ, origin=origin)) out <- intersect.tess(strips, tess(tiles=list(W))) tilenames(out) <- makeCutLabels(qZ, include.lowest=TRUE) } else { ZC <- cut(Zim, breaks=qZ, include.lowest=TRUE) out <- tess(image=ZC) } if(!is.null(tcross)) out <- intersect.tess(out, tcross) return(out) } qtPrepareCoordinate <- function(covname, W, origin=c(0,0)) { switch(covname, x={ Zfun <- function(x,y){x} Zrange <- boundingbox(W)$xrange }, y={ Zfun <- function(x,y){y} Zrange <- boundingbox(W)$yrange }, rad={ origin <- interpretAsOrigin(origin, W) Zfun <- function(x,y) { sqrt((x-origin[1])^2+(y-origin[2])^2) } V <- vertices(W) Zrange <- range(Zfun(V$x, V$y)) }, ang={ origin <- interpretAsOrigin(origin, W) Zstart <- 0 Zfun <- function(x,y) { angle <- atan2(y-origin[2], x-origin[1]) %% (2*pi) if(Zstart < 0) { negangle <- angle - 2*pi angle <- ifelse(negangle >= Zstart, negangle, angle) } return(angle) } S <- as.data.frame(edges(W)) a <- Zfun(S[,"x0"], S[,"y0"]) b <- Zfun(S[,"x1"], S[,"y1"]) bmina <- b - a swap <- (bmina > pi) | (bmina < 0 & bmina > -pi) arcs <- cbind(ifelse(swap, b, a), ifelse(swap, a, b)) arcs <- lapply(apply(arcs, 1, list), unlist) Zunion <- circunion(arcs) Zrange <- c(Zunion[[1]][1], Zunion[[length(Zunion)]][2]) if(diff(Zrange) < 0) { #' first interval straddles the positive x-axis Zstart <- Zrange[1] <- Zrange[1] - 2*pi } }) return(list(Zrange=Zrange, Zfun=Zfun)) } quantess.ppp <- function(M, Z, n, ..., type=2, origin=c(0,0), eps=NULL) { W <- as.owin(M) B <- boundingbox(W) tcross <- MinimalTess(W, ...) force(n) if(!is.character(Z)) { Zim <- as.im(Z, W, eps=eps) ZM <- if(is.function(Z)) Z(M$x, M$y) else Zim[M] Zrange <- range(range(Zim), ZM) } else { Z <- match.arg(Z, c("x", "y", "rad", "ang")) if(Z %in% c("x", "y") && is.rectangle(W)) { switch(Z, x={ qx <- quantile(M$x, probs=(1:(n-1))/n, type=type) qx <- c(W$xrange[1], qx, W$xrange[2]) out <- tess(xgrid=qx, ygrid=W$yrange) }, y={ qy <- quantile(M$y, probs=(1:(n-1))/n, type=type) qy <- c(W$yrange[1], qy, W$yrange[2]) out <- tess(xgrid=W$xrange, ygrid=qy) }) if(!is.null(tcross)) out <- intersect.tess(out, tcross) return(out) } a <- qtPrepareCoordinate(Z, W, origin) Zrange <- a$Zrange Zfun <- a$Zfun ZM <- Zfun(M$x, M$y) Zrange <- range(Zrange, range(ZM)) Zim <- as.im(Zfun, W, eps=eps) } qZ <- quantile(Zim, probs=(0:n)/n, type=type) qZ[1] <- min(qZ[1], Zrange[1]) qZ[n+1] <- max(qZ[n+1], Zrange[2]) if(is.polygonal(W) && is.character(Z)) { R <- Frame(W) strips <- switch(Z, x = tess(xgrid=qZ, ygrid=R$yrange), y = tess(xgrid=R$xrange, ygrid=qZ), rad = polartess(B, radii=qZ, origin=origin), ang = polartess(B, angles=qZ, origin=origin)) out <- intersect.tess(strips, tess(tiles=list(W))) tilenames(out) <- makeCutLabels(qZ, include.lowest=TRUE) } else { ZC <- cut(Zim, breaks=qZ, include.lowest=TRUE) out <- tess(image=ZC) } if(!is.null(tcross)) out <- intersect.tess(out, tcross) return(out) } quantess.im <- function(M, Z, n, ..., type=2, origin=c(0,0)) { W <- Window(M) tcross <- MinimalTess(W, ...) force(n) if(!(type %in% c(1,2))) stop("Only quantiles of type 1 and 2 are implemented for quantess.im") if(is.character(Z)) { Z <- match.arg(Z, c("x", "y", "rad", "ang")) a <- qtPrepareCoordinate(Z, W, origin) Z <- a$Zfun Zrange <- a$Zrange } else Zrange <- NULL MZ <- harmonise(M=M, Z=Z) M <- MZ$M[W, drop=FALSE] Z <- MZ$Z[W, drop=FALSE] Zrange <- range(c(range(Z), Zrange)) Fun <- ewcdf(Z[], weights=M[]/sum(M[])) qZ <- quantile(Fun, probs=(1:(n-1))/n, type=type) qZ <- c(Zrange[1], qZ, Zrange[2]) ZC <- cut(Z, breaks=qZ, include.lowest=TRUE) out <- tess(image=ZC) tilenames(out) <- makeCutLabels(qZ, include.lowest=TRUE) if(!is.null(tcross)) out <- intersect.tess(out, tcross) return(out) } MinimalTess <- function(W, ...) { ## find the minimal tessellation of W consistent with the arguments argh <- list(...) v <- NULL if(length(argh)) { nama <- names(argh) known <- union(names(formals(quadrats)), names(formals(tess))) recognised <- !is.na(match(nama, known)) if(any(recognised)) { if(any(c("nx", "ny") %in% nama)) { v <- do.call(quadrats, resolve.defaults(list(X=W), argh[recognised], list(nx=1, ny=1))) } else if(any(c("xbreaks", "ybreaks") %in% nama)) { v <- do.call(quadrats, resolve.defaults(list(X=W), argh[recognised], list(xbreaks=W$xrange, ybreaks=W$yrange))) } else { v <- do.call(tess, resolve.defaults(argh[recognised], list(window=W, keepempty=TRUE))) } } } return(v) } nestsplit <- function(X, ...) { stopifnot(is.ppp(X)) flist <- list(...) cansplit <- sapply(flist, inherits, what=c("factor", "tess", "owin", "im", "character")) splitted <- lapply(flist[cansplit], split, x=X) splitters <- lapply(splitted, attr, which="fsplit") if(any(!cansplit)) { extra <- do.call(MinimalTess, append(list(W=Window(X)), flist[!cansplit])) pos <- min(which(!cansplit)) ns <- length(splitters) if(pos > ns) { splitters <- append(splitters, list(extra)) } else { before <- splitters[seq_len(pos-1)] after <- splitters[pos:ns] splitters <- c(before, list(extra), after) } } ns <- length(splitters) if(ns == 0) return(X) if(ns == 1) return(split(X, splitters[[1]])) if(ns > 2) stop("Nesting depths greater than 2 are not yet implemented") names(splitters) <- good.names(names(splitters), paste0("f", 1:ns)) fax1 <- is.factor(sp1 <- splitters[[1]]) fax2 <- is.factor(sp2 <- splitters[[2]]) lev1 <- if(fax1) levels(sp1) else seq_len(sp1$n) lev2 <- if(fax2) levels(sp2) else seq_len(sp2$n) if(!fax1 && !fax2) { ## two tessellations marks(sp1) <- factor(lev1, levels=lev1) marks(sp2) <- factor(lev2, levels=lev2) sp12 <- intersect.tess(sp1, sp2, keepmarks=TRUE) pats <- split(X, sp12) f1 <- marks(sp12)[,1] f2 <- marks(sp12)[,2] } else { if(fax1 && fax2) { ## two grouping factors Xsp1 <- split(X, sp1) sp2.1 <- split(sp2, sp1) ll <- mapply(split, Xsp1, sp2.1, SIMPLIFY=FALSE) } else if(fax1 && !fax2) { ## grouping factor and tessellation Xsp1 <- split(X, sp1) ll <- lapply(Xsp1, split, f=sp2) } else if(!fax1 && fax2) { ## tessellation and grouping factor Xsp1 <- split(X, sp1) sp2.1 <- split(sp2, attr(Xsp1, "fgroup")) ll <- mapply(split, Xsp1, sp2.1, SIMPLIFY=FALSE) } neach <- lengths(ll) f1 <- rep(factor(lev1, levels=lev1), neach) f2 <- rep(factor(lev2, levels=lev2), length(Xsp1)) pats <- do.call(c, unname(ll)) } h <- hyperframe(pts=pats, f1=f1, f2=f2) names(h)[2:3] <- names(splitters) return(h) } spatstat/R/Ksector.R0000644000176200001440000001722613333543254014070 0ustar liggesusers# # Ksector.R Estimation of 'sector K function' # # $Revision: 1.5 $ $Date: 2014/11/10 10:41:14 $ # Ksector <- function(X, begin=0, end=360, ..., units=c("degrees", "radians"), r=NULL, breaks=NULL, correction=c("border", "isotropic", "Ripley", "translate"), domain = NULL, ratio=FALSE, verbose=TRUE) { verifyclass(X, "ppp") # rfixed <- !is.null(r) || !is.null(breaks) npts <- npoints(X) W <- Window(X) areaW <- area(W) lambda <- npts/areaW lambda2 <- (npts * (npts - 1))/(areaW^2) rmaxdefault <- rmax.rule("K", W, lambda) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max if(!is.null(domain)) { domain <- as.owin(domain) stopifnot(is.subset.owin(domain, Window(X))) areaW <- area(domain) } units <- match.arg(units) switch(units, radians = { if(missing(end)) end <- 2 * pi check.1.real(begin) check.1.real(end) check.in.range(begin, c(-pi, 2*pi)) check.in.range(end, c(0, 2*pi)) stopifnot(begin < end) stopifnot((end - begin) <= 2 * pi) BEGIN <- begin END <- end Bname <- simplenumber(begin/pi, "pi") %orifnull% signif(begin, 3) Ename <- simplenumber(end/pi, "pi") %orifnull% signif(end, 3) }, degrees = { check.1.real(begin) check.1.real(end) check.in.range(begin, c(-90, 360)) check.in.range(end, c(0, 360)) stopifnot(begin < end) stopifnot((end - begin) <= 360) if(verbose && (end - begin) <= 2 * pi) warning("Very small interval in degrees: did you mean radians?") BEGIN <- pi* (begin/180) END <- pi * (end/180) Bname <- signif(begin, 3) Ename <- signif(end, 3) }) ## choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", good="good", best="best"), multi=TRUE) # best.wanted <- ("best" %in% correction) ## replace 'good' by the optimal choice for this size of dataset if("good" %in% correction) correction[correction == "good"] <- good.correction.K(X) ## retain only corrections that are implemented for the window correction <- implemented.for.K(correction, W$type, correction.given) ## recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) ## labels subscripts <- paste("sector", Bname, Ename, sep=",") ylabel <- paste("K[", subscripts, "]") ylab <- eval(parse(text=paste("quote(", ylabel, ")"))) # ylab <- parse(text=paste("K[sector,", Bname, ",", Ename, "]")) # yexp <- substitute(K[list(sector,B,E)](r), # list(B=Bname, E=Ename)) yexp <- parse(text=paste("K[list(", subscripts, ")]")) fname <- c("K", paste("list", paren(subscripts))) ## this will be the output data frame Kdf <- data.frame(r=r, theo = ((END-BEGIN)/2) * r^2) desc <- c("distance argument r", "theoretical Poisson %s") denom <- lambda2 * areaW K <- ratfv(Kdf, NULL, denom, "r", ylab = ylab, valu = "theo", fmla = NULL, alim =alim, labl = c("r","{%s[%s]^{pois}}(r)"), desc = desc, fname=fname, yexp=yexp, ratio=ratio) ## identify all close pairs rmax <- max(r) close <- as.data.frame(closepairs(X, rmax)) if(!is.null(domain)) { ## restrict to pairs with first point in 'domain' indom <- with(close, inside.owin(xi, yi, domain)) close <- close[indom, , drop=FALSE] } ## select pairs in angular range ang <- with(close, atan2(dy, dx)) %% (2*pi) if(BEGIN >= 0) { ## 0 <= begin < end ok <- (BEGIN <= ang) & (ang <= END) } else { ## begin < 0 <= end ok <- (ang >= 2 * pi + BEGIN) | (ang <= END) } close <- close[ok, , drop=FALSE] ## pairwise distances DIJ <- close$d if(any(correction == "none")) { # uncorrected! For demonstration purposes only! wh <- whist(DIJ, breaks$val) # no weights numKun <- cumsum(wh) denKun <- lambda2 * areaW # uncorrected estimate of K K <- bind.ratfv(K, data.frame(un=numKun), denKun, "{hat(%s)[%s]^{un}}(r)", "uncorrected estimate of %s", "un", ratio=ratio) } if(any(correction == "border" | correction == "bord.modif")) { # border method # Compute distances to boundary b <- bdist.points(X) I <- close$i bI <- b[I] if(!is.null(domain)) b <- b[inside.owin(X, , w=domain)] # apply reduced sample algorithm RS <- Kount(DIJ, bI, b, breaks) if(any(correction == "bord.modif")) { # modified border correction denom.area <- eroded.areas(W, r, subset=domain) numKbm <- RS$numerator denKbm <- lambda2 * denom.area K <- bind.ratfv(K, data.frame(bord.modif=numKbm), data.frame(bord.modif=denKbm), "{hat(%s)[%s]^{bordm}}(r)", "modified border-corrected estimate of %s", "bord.modif", ratio=ratio) } if(any(correction == "border")) { numKb <- RS$numerator denKb <- lambda * RS$denom.count K <- bind.ratfv(K, data.frame(border=numKb), data.frame(border=denKb), "{hat(%s)[%s]^{bord}}(r)", "border-corrected estimate of %s", "border", ratio=ratio) } } if(any(correction == "translate")) { ## Ohser-Stoyan translation correction edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=W, paired=TRUE) wh <- whist(DIJ, breaks$val, edgewt) numKtrans <- cumsum(wh) denKtrans <- lambda2 * areaW h <- diameter(as.rectangle(W))/2 numKtrans[r >= h] <- NA K <- bind.ratfv(K, data.frame(trans=numKtrans), denKtrans, "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans", ratio=ratio) } if(any(correction == "isotropic")) { ## Ripley isotropic correction XI <- ppp(close$xi, close$yi, window=W, check=FALSE) edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) wh <- whist(DIJ, breaks$val, edgewt) numKiso <- cumsum(wh) denKiso <- lambda2 * areaW h <- diameter(W)/2 numKiso[r >= h] <- NA K <- bind.ratfv(K, data.frame(iso=numKiso), denKiso, "{hat(%s)[%s]^{iso}}(r)", "Ripley isotropic correction estimate of %s", "iso", ratio=ratio) } # # default plot will display all edge corrections formula(K) <- . ~ r nama <- rev(colnames(K)) nama <- nama[!(nama %in% c("r", "rip", "ls"))] fvnames(K, ".") <- nama unitname(K) <- unitname(X) # copy to other components if(ratio) K <- conform.ratfv(K) return(K) } spatstat/R/fitted.mppm.R0000644000176200001440000000346513603021745014701 0ustar liggesusers# # fitted.mppm.R # # method for 'fitted' for mppm objects # # $Revision: 1.3 $ $Date: 2020/01/01 04:43:21 $ # fitted.mppm <- function(object, ..., type="lambda", dataonly=FALSE) { # sumry <- summary(object) type <- pickoption("type", type, c(lambda="lambda", cif ="lambda", trend ="trend"), multi=FALSE, exact=FALSE) # extract fitted model object and data frame glmfit <- object$Fit$FIT glmdata <- object$Fit$moadf # interaction names Vnames <- unlist(object$Fit$Vnamelist) interacting <- (length(Vnames) > 0) # row identifier id <- glmdata$id # Modification of `glmdata' may be required if(interacting) switch(type, trend={ # zero the interaction statistics glmdata[ , Vnames] <- 0 }, lambda={ # Find any dummy points with zero conditional intensity forbid <- matrowany(as.matrix(glmdata[, Vnames]) == -Inf) # exclude from predict.glm glmdata <- glmdata[!forbid, ] }) # Compute predicted [conditional] intensity values values <- predict(glmfit, newdata=glmdata, type="response") # Note: the `newdata' argument is necessary in order to obtain # predictions at all quadrature points. If it is omitted then # we would only get predictions at the quadrature points j # where glmdata$SUBSET[j]=TRUE. if(interacting && type=="lambda") { # reinsert zeroes vals <- numeric(length(forbid)) vals[forbid] <- 0 vals[!forbid] <- values values <- vals } names(values) <- NULL if(dataonly) { # extract only data values isdata <- (glmdata$.mpl.Y != 0) values <- values[isdata] id <- id[isdata] } return(split(values, id)) } spatstat/R/pairdistlpp.R0000644000176200001440000000560713333543255015012 0ustar liggesusers# # pairdistlpp.R # # $Revision: 1.12 $ $Date: 2017/06/05 10:31:58 $ # # # pairdist.lpp # Calculates the shortest-path distance between each pair of points # in a point pattern on a linear network. # pairdist.lpp <- function(X, ..., method="C") { stopifnot(inherits(X, "lpp")) stopifnot(method %in% c("C", "interpreted")) # n <- npoints(X) pairdistmat <- matrix(Inf,n,n) diag(pairdistmat) <- 0 # L <- as.linnet(X, sparse=FALSE) # if(any(is.infinite(L$dpath))) { #' disconnected network lab <- connected(L, what="labels") subsets <- split(seq_len(nvertices(L)), lab) for(i in seq_along(subsets)) { Xi <- thinNetwork(X, retainvertices=subsets[[i]]) witch <- attr(Xi, "retainpoints") pairdistmat[witch, witch] <- pairdist.lpp(Xi, method=method) } return(pairdistmat) } # Y <- as.ppp(X) Lvert <- L$vertices from <- L$from to <- L$to dpath <- L$dpath # nearest segment for each point pro <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE)$seg if(method == "interpreted") { # loop through all pairs of data points for (i in 1:(n-1)) { proi <- pro[i] Xi <- Y[i] nbi1 <- from[proi] nbi2 <- to[proi] vi1 <- Lvert[nbi1] vi2 <- Lvert[nbi2] dXi1 <- crossdist(Xi, vi1) dXi2 <- crossdist(Xi, vi2) for (j in (i+1):n) { Xj <- Y[j] proj <- pro[j] if(proi == proj) { # points i and j lie on the same segment # use Euclidean distance d <- crossdist(Xi, Xj) } else { # shortest path from i to j passes through ends of segments nbj1 <- from[proj] nbj2 <- to[proj] vj1 <- Lvert[nbj1] vj2 <- Lvert[nbj2] # Calculate shortest of 4 possible paths from i to j d1Xj <- crossdist(vj1,Xj) d2Xj <- crossdist(vj2,Xj) d11 <- dXi1 + dpath[nbi1,nbj1] + d1Xj d12 <- dXi1 + dpath[nbi1,nbj2] + d2Xj d21 <- dXi2 + dpath[nbi2,nbj1] + d1Xj d22 <- dXi2 + dpath[nbi2,nbj2] + d2Xj d <- min(d11,d12,d21,d22) } # store result pairdistmat[i,j] <- pairdistmat[j,i] <- d } } } else { # C code # convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L segmap <- pro - 1L zz <- .C("linpairdist", np = as.integer(n), xp = as.double(Y$x), yp = as.double(Y$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.double(L$n), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), segmap = as.integer(segmap), answer = as.double(numeric(n*n)), PACKAGE = "spatstat") pairdistmat <- matrix(zz$answer, n, n) } return(pairdistmat) } spatstat/R/smoothfv.R0000644000176200001440000000323113333543255014313 0ustar liggesusers# # smoothfv.R # # $Revision: 1.14 $ $Date: 2017/12/30 05:14:18 $ # # smooth.fv <- function(x, which="*", ..., # method=c("smooth.spline", "loess"), # xinterval=NULL) { # .Deprecated("Smooth.fv", package="spatstat", # msg="smooth.fv is deprecated: use the generic Smooth with a capital S") # Smooth(x, which=which, ..., method=method, xinterval=xinterval) # } Smooth.fv <- function(X, which="*", ..., method=c("smooth.spline", "loess"), xinterval=NULL) { x <- X stopifnot(is.character(which)) method <- match.arg(method) if(!is.null(xinterval)) check.range(xinterval) if(length(which) == 1 && which %in% .Spatstat.FvAbbrev) { if(which == ".x") stop("Cannot smooth the function argument") which <- fvnames(x, which) } if(any(nbg <- !(which %in% names(x)))) stop(paste("Unrecognised column", ngettext(sum(nbg), "name", "names"), commasep(sQuote(which[nbg])), "in argument", sQuote("which"))) xx <- x[[fvnames(x, ".x")]] # process each column of function values for(ynam in which) { yy <- x[[ynam]] ok <- is.finite(yy) if(!is.null(xinterval)) ok <- ok & inside.range(xx, xinterval) switch(method, smooth.spline = { ss <- smooth.spline(xx[ok], yy[ok], ...) yhat <- predict(ss, xx[ok])$y }, loess = { df <- data.frame(x=xx[ok], y=yy[ok]) lo <- loess(y ~ x, df, ...) yhat <- predict(lo, df[,"x", drop=FALSE]) }) yy[ok] <- yhat x[[ynam]] <- yy } return(x) } spatstat/R/randomImage.R0000644000176200001440000000057313333543255014677 0ustar liggesusers#' #' randomImage.R #' #' Functions for generating random images #' #' $Revision: 1.1 $ $Date: 2015/03/23 10:44:04 $ #' #' rnoise <- function(rgen=runif, w=square(1), ...) { a <- do.call.matched(as.mask, list(w=w, ...), sieve=TRUE) W <- a$result argh <- a$otherargs Z <- as.im(W) n <- sum(W$m) Z[] <- do.call(rgen, append(list(n=n), argh)) return(Z) } spatstat/R/dffit.R0000644000176200001440000000212213333543254013537 0ustar liggesusers#' #' dffit.R #' #' $Revision: 1.1 $ $Date: 2018/04/19 05:04:59 $ #' #' Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2018 dffit <- function(object, ...) UseMethod("dffit") dffit.ppm <- function(object, ..., collapse=FALSE, dfb=NULL) { db <- dfb %orifnull% dfbetas(object, ...) Z <- model.matrix(object, drop=FALSE, irregular=TRUE) if(!all(dim(db) == dim(Z))) { #' internal error - mismatch in quadrature points - fall back U <- db$loc Z <- sapply(model.images(object, irregular=TRUE), "[", i=U) } #' ensure 0 * (-Inf) = 0 if(any(a <- (db$val == 0) & (Z == -Inf))) Z[a] <- 0 #' smoothed density must be handled separately sm <- attr(db, "smoothdensity") attr(db, "smoothdensity") <- NULL #' do the main calculation Y <- db * Z #' also calculate the smoothed density if(!is.null(sm)) { ZZ <- model.images(object, irregular=TRUE) HH <- mapply(harmonise, ZZ=ZZ, sm=sm) sm <- mapply("*", e1=HH$sm, e2=HH$ZZ, SIMPLIFY=FALSE) attr(Y, "smoothdensity") <- as.solist(sm) } if(collapse) Y <- Reduce("+", unstack(Y)) return(Y) } spatstat/R/multihard.R0000644000176200001440000001446313333543255014450 0ustar liggesusers# # # multihard.R # # $Revision: 1.18 $ $Date: 2018/03/15 07:37:41 $ # # The Hard core process # # Hardcore() create an instance of the Hard Core process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # MultiHard <- local({ # .... multitype hard core potential MHpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[i] type (mark) of point U[j] # # get matrices of interaction radii h <- par$hradii # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) # list all UNORDERED pairs of types to be checked # (the interaction must be symmetric in type, and scored as such) uptri <- (row(h) <= col(h)) & (!is.na(h)) mark1 <- (lx[row(h)])[uptri] mark2 <- (lx[col(h)])[uptri] # corresponding names mark1name <- (lxname[row(h)])[uptri] mark2name <- (lxname[col(h)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) # list all ORDERED pairs of types to be checked # (to save writing the same code twice) different <- mark1 != mark2 mark1o <- c(mark1, mark2[different]) mark2o <- c(mark2, mark1[different]) nordpairs <- length(mark1o) # unordered pair corresponding to each ordered pair ucode <- c(1:npairs, (1:npairs)[different]) # # create numeric array for result z <- array(0, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { # apply the relevant hard core distance to each pair of points hxu <- h[ tx, tu ] forbid <- (d < hxu) forbid[is.na(forbid)] <- FALSE # form the potential value <- array(0, dim=dim(d)) value[forbid] <- -Inf # assign value[i,j] -> z[i,j,k] where k is relevant interaction code for(i in 1:nordpairs) { # data points with mark m1 Xsub <- (tx == mark1o[i]) # quadrature points with mark m2 Qsub <- (tu == mark2o[i]) # assign z[Xsub, Qsub, ucode[i]] <- value[Xsub, Qsub] } } attr(z, "IsOffset") <- TRUE return(z) } #### end of 'pot' function #### # ............ template object ................... BlankMH <- list( name = "Multitype Hardcore process", creator = "MultiHard", family = "pairwise.family", # evaluated later pot = MHpotential, par = list(types=NULL, hradii = NULL), # filled in later parnames = c("possible types", "hardcore distances"), pardesc = c("vector of possible types", "matrix of hardcore distances"), hasInf = TRUE, selfstart = function(X, self) { types <- self$par$types hradii <- self$par$hradii if(!is.null(types) && !is.null(hradii)) return(self) if(is.null(types)) types <- levels(marks(X)) if(is.null(hradii)) { marx <- marks(X) d <- nndist(X, by=marx) h <- aggregate(d, by=list(from=marx), min) h <- as.matrix(h[, -1, drop=FALSE]) m <- table(marx) mm <- outer(m, m, pmin) hradii <- h * mm/(mm+1) dimnames(hradii) <- list(types, types) } MultiHard(types=types,hradii=hradii) }, init = function(self) { types <- self$par$types if(!is.null(types)) { h <- self$par$hradii nt <- length(types) if(!is.null(h)) MultiPair.checkmatrix(h, nt, sQuote("hradii")) if(length(types) == 0) stop(paste("The", sQuote("types"), "argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } } }, update = NULL, # default OK print = function(self) { h <- self$par$hradii if(waxlyrical('gory')) { if(!is.null(h)) splat(nrow(h), "types of points") types <- self$par$types if(!is.null(types)) { splat("Possible types:") print(noquote(types)) } else splat("Possible types:\t not yet determined") } if(!is.null(h)) { splat("Hardcore radii:") print(signif(h, getOption("digits"))) } else splat("Hardcore radii:\t not yet determined") invisible() }, interpret = function(coeffs, self) { # there are no regular parameters (woo-hoo!) return(NULL) }, valid = function(coeffs, self) { return(TRUE) }, project = function(coeffs, self) { return(NULL) }, irange = function(self, coeffs=NA, epsilon=0, ...) { h <- self$par$hradii return(max(0, h, na.rm=TRUE)) }, version=NULL # fix later ) class(BlankMH) <- "interact" MultiHard <- function(hradii=NULL, types=NULL) { if((missing(hradii) || !is.matrix(hradii)) && is.matrix(types)) { ## old syntax: (types=NULL, hradii) hradii <- types types <- NULL } if(!is.null(hradii)) hradii[hradii == 0] <- NA out <- instantiate.interact(BlankMH, list(types=types, hradii = hradii)) if(!is.null(types)) dimnames(out$par$hradii) <- list(types, types) return(out) } MultiHard <- intermaker(MultiHard, BlankMH) MultiHard }) spatstat/R/pairwise.family.R0000644000176200001440000004751313333543255015564 0ustar liggesusers# # # pairwise.family.S # # $Revision: 1.71 $ $Date: 2018/04/06 08:55:03 $ # # The pairwise interaction family of point process models # # pairwise.family: object of class 'isf' defining pairwise interaction # # # ------------------------------------------------------------------- # pairwise.family <- list( name = "pairwise", print = function(self) { cat("Pairwise interaction family\n") }, plot = function(fint, ..., d=NULL, plotit=TRUE) { verifyclass(fint, "fii") inter <- fint$interaction unitz <- unitname(fint) if(is.null(inter) || is.null(inter$family) || inter$family$name != "pairwise") stop("Tried to plot the wrong kind of interaction") # get fitted coefficients of interaction terms # and set coefficients of offset terms to 1 Vnames <- fint$Vnames IsOffset <- fint$IsOffset coeff <- rep.int(1, length(Vnames)) names(coeff) <- Vnames coeff[!IsOffset] <- fint$coefs[Vnames[!IsOffset]] # pairpot <- inter$pot potpars <- inter$par rmax <- reach(fint, epsilon=1e-3) xlim <- list(...)$xlim if(is.infinite(rmax)) { if(!is.null(xlim)) rmax <- max(xlim) else { warning("Reach of interaction is infinite; need xlim to plot it") return(invisible(NULL)) } } if(is.null(d)) { dmax <- 1.25 * rmax d <- seq(from=0, to=dmax, length.out=1024) } else { stopifnot(is.numeric(d) && all(is.finite(d)) && all(diff(d) > 0)) dmax <- max(d) } if(is.null(xlim)) xlim <- c(0, dmax) types <- potpars$types if(is.null(types)) { # compute potential function as `fv' object dd <- matrix(d, ncol=1) p <- pairpot(dd, potpars) if(length(dim(p))==2) p <- array(p, dim=c(dim(p),1), dimnames=NULL) if(dim(p)[3] != length(coeff)) stop("Dimensions of potential do not match coefficient vector") for(k in seq_len(dim(p)[3])) p[,,k] <- multiply.only.finite.entries( p[,,k] , coeff[k] ) y <- exp(apply(p, c(1,2), sum)) ylim <- range(0, 1.1, y, finite=TRUE) fun <- fv(data.frame(r=d, h=y, one=1), "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "pairwise interaction term h(r)", "reference value 1"), unitname=unitz) if(plotit) do.call(plot.fv, resolve.defaults(list(fun), list(...), list(ylim=ylim))) return(invisible(fun)) } else{ # compute each potential and store in `fasp' object if(!is.factor(types)) types <- factor(types, levels=types) m <- length(types) nd <- length(d) dd <- matrix(rep.int(d, m), nrow=nd * m, ncol=m) tx <- rep.int(types, rep.int(nd, m)) ty <- types p <- pairpot(dd, tx, ty, potpars) if(length(dim(p))==2) p <- array(p, dim=c(dim(p),1), dimnames=NULL) if(dim(p)[3] != length(coeff)) stop("Dimensions of potential do not match coefficient vector") for(k in seq_len(dim(p)[3])) p[,,k] <- multiply.only.finite.entries( p[,,k] , coeff[k] ) y <- exp(apply(p, c(1,2), sum)) ylim <- range(0, 1.1, y, finite=TRUE) fns <- vector(m^2, mode="list") which <- matrix(, m, m) for(i in seq_len(m)) { for(j in seq_len(m)) { # relevant position in matrix ijpos <- i + (j-1) * m which[i,j] <- ijpos # extract values of potential yy <- y[tx == types[i], j] # make fv object fns[[ijpos]] <- fv(data.frame(r=d, h=yy, one=1), "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "pairwise interaction term h(r)", "reference value 1"), unitname=unitz) # } } funz <- fasp(fns, which=which, formulae=list(cbind(h, one) ~ r), title="Fitted pairwise interactions", rowNames=paste(types), colNames=paste(types)) if(plotit) do.call(plot.fasp, resolve.defaults(list(funz), list(...), list(ylim=ylim))) return(invisible(funz)) } }, # end of function `plot' # ---------------------------------------------------- eval = function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ..., Reach=NULL, precomputed=NULL, savecomputed=FALSE, pot.only=FALSE) { # # This is the eval function for the `pairwise' family. # # This internal function is not meant to be called by the user. # It is called by mpl.prepare() during execution of ppm(). # # The eval functions perform all the manipulations that are common to # a given class of interactions. # # For the `pairwise' family of pairwise-interaction processes, # this eval function computes the distances between points, # invokes 'pairpot' to evaluate the potential between each pair of points, # applies edge corrections, and then sums the pair potential terms. # # ARGUMENTS: # All 'eval' functions have the following arguments # which are called in sequence (without formal names) # by mpl.prepare(): # # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function # potpars auxiliary parameters for pot list(......) # correction edge correction type (string) # # VALUE: # All `eval' functions must return a # matrix of values of the total potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # ########################################################################## # POTENTIAL: # # The pair potential function 'pairpot' should be either # pairpot(d, par) [for potentials that don't depend on marks] # or # pairpot(d, tx, tu, par) [for potentials that do depend on mark] # where d is a matrix of interpoint distances, # tx is the vector of types for the data points, # tu is the vector of types for all quadrature points # and # par is a list of parameters for the potential. # # The additional argument 'splitInf' is also permitted. # # It must return a matrix with the same dimensions as d # or an array with its first two dimensions the same as the dimensions of d. pt <- PairPotentialType(pairpot) # includes validation of pair potential ## edge correction argument if(length(correction) > 1) stop("Only one edge correction allowed at a time!") if(!any(correction == c("periodic", "border", "translate", "translation", "isotropic", "Ripley", "none"))) stop(paste("Unrecognised edge correction", sQuote(correction))) no.correction <- #### Compute basic data # Decide whether to apply faster algorithm using 'closepairs' use.closepairs <- (correction %in% c("none", "border", "translate", "translation")) && !is.null(Reach) && is.finite(Reach) && is.null(precomputed) && !savecomputed if(!is.null(precomputed)) { # precomputed X <- precomputed$X U <- precomputed$U EqualPairs <- precomputed$E M <- precomputed$M } else { U <- as.ppp(U, X$window) # i.e. X$window is DEFAULT window if(!use.closepairs) # Form the matrix of distances M <- crossdist(X, U, periodic=(correction=="periodic")) } nX <- npoints(X) nU <- npoints(U) dimM <- c(nX, nU) # Evaluate the pairwise potential without edge correction if(use.closepairs) { POT <- evalPairPotential(X,U,EqualPairs,pairpot,potpars,Reach) } else { POT <- do.call.matched(pairpot, list(d=M, tx=marks(X), tu=marks(U), par=potpars)) } # Determine whether each component of potential is an offset IsOffset <- attr(POT, "IsOffset") # Check errors and special cases if(!is.matrix(POT) && !is.array(POT)) { if(length(POT) == 0 && X$n == 0) # empty pattern POT <- array(POT, dim=c(dimM,1)) else stop("Pair potential did not return a matrix or array") } if(length(dim(POT)) == 1 || any(dim(POT)[1:2] != dimM)) { whinge <- paste0( "The pair potential function ",short.deparse(substitute(pairpot)), " must produce a matrix or array with its first two dimensions\n", "the same as the dimensions of its input.\n") stop(whinge) } # make it a 3D array if(length(dim(POT))==2) POT <- array(POT, dim=c(dim(POT),1), dimnames=NULL) #' positive case if(splitInf) { IsNegInf <- (POT == -Inf) POT[IsNegInf] <- 0 } # handle corrections if(correction == "translate" || correction == "translation") { edgewt <- edge.Trans(X, U) # sanity check ("everybody knows there ain't no...") if(!is.matrix(edgewt)) stop("internal error: edge.Trans() did not yield a matrix") if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x)) stop("internal error: edge weights matrix returned by edge.Trans() has wrong dimensions") POT <- c(edgewt) * POT } else if(correction == "isotropic" || correction == "Ripley") { # weights are required for contributions from QUADRATURE points edgewt <- t(edge.Ripley(U, t(M), X$window)) if(!is.matrix(edgewt)) stop("internal error: edge.Ripley() did not return a matrix") if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x)) stop("internal error: edge weights matrix returned by edge.Ripley() has wrong dimensions") POT <- c(edgewt) * POT } # No pair potential term between a point and itself if(length(EqualPairs) > 0) { nplanes <- dim(POT)[3] for(k in 1:nplanes) { POT[cbind(EqualPairs, k)] <- 0 if(splitInf) IsNegInf[cbind(EqualPairs, k)] <- FALSE } } # reattach the negative infinity for re-use by special code if(splitInf) attr(POT, "IsNegInf") <- IsNegInf # Return just the pair potential? if(pot.only) return(POT) # Sum the pairwise potentials over data points for each quadrature point V <- apply(POT, c(2,3), sum) # Handle positive case if(splitInf) attr(V, "-Inf") <- apply(IsNegInf, 2, any) # attach the original pair potentials attr(V, "POT") <- POT # attach the offset identifier attr(V, "IsOffset") <- IsOffset # pass computed information out the back door if(savecomputed) attr(V, "computed") <- list(E=EqualPairs, M=M) return(V) }, ######### end of function $eval suffstat = function(model, X=NULL, callstring="pairwise.family$suffstat") { # for pairwise models only (possibly nonstationary) verifyclass(model, "ppm") if(!identical(model$interaction$family$name,"pairwise")) stop("Model is not a pairwise interaction process") if(is.null(X)) { X <- data.ppm(model) modelX <- model } else { verifyclass(X, "ppp") modelX <- update(model, X, method="mpl") } # find data points which do not contribute to pseudolikelihood mplsubset <- getglmdata(modelX)$.mpl.SUBSET mpldata <- is.data(quad.ppm(modelX)) contribute <- mplsubset[mpldata] Xin <- X[contribute] Xout <- X[!contribute] # partial model matrix arising from ordered pairs of data points # which both contribute to the pseudolikelihood Empty <- X[numeric(0)] momINxIN <- partialModelMatrix(Xin, Empty, model, "suffstat") # partial model matrix arising from ordered pairs of data points # the second of which does not contribute to the pseudolikelihood mom <- partialModelMatrix(Xout, Xin, model, "suffstat") indx <- Xout$n + seq_len(Xin$n) momINxOUT <- mom[indx, , drop=FALSE] # parameters order2 <- names(coef(model)) %in% model$internal$Vnames order1 <- !order2 result <- 0 * coef(model) if(any(order1)) { # first order contributions can be determined from INxIN o1terms <- momINxIN[ , order1, drop=FALSE] o1sum <- colSums(o1terms) result[order1] <- o1sum } if(any(order2)) { # adjust for double counting of ordered pairs in INxIN but not INxOUT o2termsINxIN <- momINxIN[, order2, drop=FALSE] o2termsINxOUT <- momINxOUT[, order2, drop=FALSE] o2sum <- colSums(o2termsINxIN)/2 + colSums(o2termsINxOUT) result[order2] <- o2sum } return(result) }, ######### end of function $suffstat delta2 = function(X, inte, correction, ..., sparseOK=FALSE) { #' Sufficient statistic for second order conditional intensity #' for pairwise interaction processes #' Equivalent to evaluating pair potential. if(is.ppp(X)) { seqX <- seq_len(npoints(X)) E <- cbind(seqX, seqX) R <- reach(inte) result <- pairwise.family$eval(X,X,E, inte$pot,inte$par, correction, pot.only=TRUE, Reach=R, splitInf=TRUE) M <- attr(result, "IsNegInf") if(sparseOK) result <- as.sparse3Darray(result) if(!is.null(M)) { #' validate if(length(dim(M)) != 3) stop("Internal error: IsNegInf is not a 3D array") #' collapse vector-valued potential, yielding a matrix M <- apply(M, c(1,2), any) if(!is.matrix(M)) M <- matrix(M, nrow=nX) #' count conflicts hits <- colSums(M) #' hits[j] == 1 implies that X[j] violates hard core with only one X[i] #' and therefore changes status if X[i] is deleted. deltaInf <- M deltaInf[, hits != 1] <- FALSE if(sparseOK) deltaInf <- as(deltaInf, "sparseMatrix") #' attr(result, "deltaInf") <- deltaInf } } else if(is.quad(X)) { U <- union.quad(X) izdat <- is.data(X) nU <- npoints(U) nX <- npoints(X$data) seqU <- seq_len(nU) E <- cbind(seqU, seqU) R <- reach(inte) result <- pairwise.family$eval(U,U,E, inte$pot,inte$par, correction, pot.only=TRUE, Reach=R, splitInf=TRUE) M <- attr(result, "IsNegInf") if(sparseOK) result <- as.sparse3Darray(result) if(!is.null(M)) { #' validate if(length(dim(M)) != 3) stop("Internal error: IsNegInf is not a 3D array") #' consider conflicts with data points MXU <- M[izdat, , , drop=FALSE] #' collapse vector-valued potential, yielding a matrix MXU <- apply(MXU, c(1,2), any) if(!is.matrix(MXU)) MXU <- matrix(MXU, nrow=nX) #' count data points conflicting with each quadrature point nhitdata <- colSums(MXU) #' for a conflicting pair U[i], U[j], #' status of U[j] will change when U[i] is added/deleted #' iff EITHER #' U[i] = X[i] is a data point and #' U[j] is only in conflict with X[i], deltaInf <- apply(M, c(1,2), any) deltaInf[izdat, nhitdata != 1] <- FALSE #' OR #' U[i] is a dummy point, #' U[j] has no conflicts with X. deltaInf[!izdat, nhitdata != 0] <- FALSE #' if(sparseOK) deltaInf <- as(deltaInf, "sparseMatrix") #' attr(result, "deltaInf") <- deltaInf } } return(result) } ######### end of function $delta2 ) ######### end of list class(pairwise.family) <- "isf" # externally visible PairPotentialType <- function(pairpot) { stopifnot(is.function(pairpot)) fop <- names(formals(pairpot)) v <- match(list(fop), list(c("d", "par"), c("d", "tx", "tu", "par"))) if(is.na(v)) stop("Formal arguments of pair potential function are not understood", call.=FALSE) marked <- (v == 2) return(list(marked=marked)) } evalPairPotential <- function(X, P, E, pairpot, potpars, R) { # Evaluate pair potential without edge correction weights nX <- npoints(X) nP <- npoints(P) pt <- PairPotentialType(pairpot) # includes validation # determine dimension of potential, etc fakePOT <- do.call.matched(pairpot, list(d=matrix(, 0, 0), tx=marks(X)[integer(0)], tu=marks(P)[integer(0)], par=potpars)) IsOffset <- attr(fakePOT, "IsOffset") fakePOT <- ensure3Darray(fakePOT) Vnames <- dimnames(fakePOT)[[3]] p <- dim(fakePOT)[3] # Identify close pairs X[i], P[j] cl <- crosspairs(X, P, R, what="ijd") I <- cl$i J <- cl$j D <- matrix(cl$d, ncol=1) # deal with empty cases if(nX == 0 || nP == 0 || length(I) == 0) { di <- c(nX, nP, p) dn <- list(NULL, NULL, Vnames) result <- array(0, dim=di, dimnames=dn) attr(result, "IsOffset") <- IsOffset return(result) } # evaluate potential for close pairs # POT is a 1-column matrix or array, with rows corresponding to close pairs if(!pt$marked) { # unmarked POT <- do.call.matched(pairpot, list(d=D, par=potpars)) IsOffset <- attr(POT, "IsOffset") } else { # marked marX <- marks(X) marP <- marks(P) if(!identical(levels(marX), levels(marP))) stop("Internal error: marks of X and P have different levels") types <- levels(marX) mI <- marX[I] mJ <- marP[J] POT <- NULL # split data by type of P[j] for(k in types) { relevant <- which(mJ == k) if(length(relevant) > 0) { fk <- factor(k, levels=types) POTk <- do.call.matched(pairpot, list(d=D[relevant, , drop=FALSE], tx=mI[relevant], tu=fk, par=potpars)) POTk <- ensure3Darray(POTk) if(is.null(POT)) { #' use first result of 'pairpot' to determine dimension POT <- array(, dim=c(length(I), 1, dim(POTk)[3])) #' capture information about offsets, and names of interaction terms IsOffset <- attr(POTk, "IsOffset") Vnames <- dimnames(POTk)[[3]] } # insert values just computed POT[relevant, , ] <- POTk } } } POT <- ensure3Darray(POT) p <- dim(POT)[3] # create result array result <- array(0, dim=c(npoints(X), npoints(P), p), dimnames=list(NULL, NULL, Vnames)) # insert results II <- rep(I, p) JJ <- rep(J, p) KK <- rep(1:p, each=length(I)) IJK <- cbind(II, JJ, KK) result[IJK] <- POT # finally identify identical pairs and set value to 0 if(length(E) > 0) { E.rep <- apply(E, 2, rep, times=p) p.rep <- rep(1:p, each=nrow(E)) result[cbind(E.rep, p.rep)] <- 0 } attr(result, "IsOffset") <- IsOffset return(result) } spatstat/R/rmhstart.R0000644000176200001440000000473413333543255014323 0ustar liggesusers# # # rmhstart.R # # $Revision: 1.12 $ $Date: 2016/02/11 10:17:12 $ # # rmhstart <- function(start, ...) { UseMethod("rmhstart") } rmhstart.rmhstart <- function(start, ...) { return(start) } rmhstart.list <- function(start, ...) { st <- do.call.matched(rmhstart.default, start) return(st) } rmhstart.default <- function(start=NULL, ..., n.start=NULL, x.start=NULL) { if(!is.null(start) || length(list(...)) > 0) stop("Syntax should be rmhstart(n.start) or rmhstart(x.start)") ngiven <- !is.null(n.start) xgiven <- !is.null(x.start) # n.start and x.start are incompatible if(ngiven && xgiven) stop("Give only one of the arguments n.start and x.start") given <- if(ngiven) "n" else if(xgiven) "x" else "none" # Validate arguments if(ngiven && !is.numeric(n.start)) stop("n.start should be numeric") if(xgiven) { # We can't check x.start properly because we don't have the relevant window # Just check that it is INTERPRETABLE as a point pattern xx <- as.ppp(x.start, W=ripras, fatal=FALSE) if(is.null(xx)) stop(paste("x.start should be a point pattern object,", "or coordinate data in a format recognised by as.ppp")) } else xx <- NULL ################################################################### # return augmented list out <- list(n.start=n.start, x.start=x.start, given=given, xx=xx) class(out) <- c("rmhstart", class(out)) return(out) } print.rmhstart <- function(x, ...) { verifyclass(x, "rmhstart") cat("Metropolis-Hastings algorithm starting parameters\n") cat("Initial state: ") switch(x$given, none={ cat("not given\n") }, x = { cat("given as x.start\n") if(is.ppp(x$x.start)) print(x$x.start) else cat(paste("(x,y) coordinates of", x$xx$n, "points (window unspecified)\n")) cat("\n") }, n = { n.start <- x$n.start nstring <- if(length(n.start) == 1) paste(n.start) else paste("(", paste(n.start, collapse=","), ")", sep="") cat(paste("number fixed at n.start =", nstring, "\n")) } ) } update.rmhstart <- function(object, ...) { do.call.matched(rmhstart.default, resolve.defaults(list(...), as.list(object), .StripNull=TRUE)) } spatstat/R/rasterfilter.R0000644000176200001440000000216313333543255015157 0ustar liggesusers#' #' rasterfilter.R #' #' raster filters implemented directly #' #' $Revision: 1.5 $ $Date: 2017/11/18 07:17:18 $ #' rasterfilter <- function(X, f) { X <- as.im(X) dimX <- dim(X) f <- as.matrix(f) if(!all(dim(f) == 3)) stop("f should be a 3 x 3 matrix or image") #' handle NA v <- as.double(X$v) if(hasna <- anyNA(v)) { isna <- is.na(v) v[isna] <- 0 } #' compute z <- .C("raster3filter", nx = as.integer(dimX[2]), ny = as.integer(dimX[1]), a = as.double(v), w = as.double(f), b = as.double(numeric(prod(dimX))), PACKAGE="spatstat") z <- z$b #' handle NA if(hasna) z[isna] <- NA # replace X[] <- z return(X) } #' antialiasing smudge <- function(X) { stopifnot(is.im(X)) xstep <- X$xstep ystep <- X$ystep #' choose a very small bandwidth sigma <- min(xstep, ystep)/2 #' match variance: 2 p step^2 = sigma^2 px <- sigma^2/(2 * xstep^2) py <- sigma^2/(2 * ystep^2) f <- outer(c(py, 1-2*py, py), c(px, 1-2*px, px), "*") #' compute Z <- rasterfilter(X, f) attr(Z, "sigma") <- sigma return(Z) } spatstat/R/effectfun.R0000644000176200001440000001505013612230501014400 0ustar liggesusers# # effectfun.R # # $Revision: 1.25 $ $Date: 2020/01/23 05:36:51 $ # effectfun <- local({ okclasses <- c("ppm", "kppm", "lppm", "dppm", "rppm", "profilepl") effectfun <- function(model, covname, ..., se.fit=FALSE, nvalues=256) { if(!inherits(model, okclasses)) stop(paste("First argument 'model' should be a fitted model of class", commasep(sQuote(okclasses), " or ")), call.=FALSE) orig.model <- model model <- as.ppm(model) dotargs <- list(...) #' determine names of covariates involved intern.names <- if(is.marked.ppm(model)) c("x", "y", "marks") else c("x", "y") needed.names <- variablesinformula(rhs.of.formula(formula(model))) #' check for clashes/quirks if("lambda" %in% needed.names) { if(is.dppm(orig.model) && ( identical.formulae(formula(model), ~offset(log(lambda))-1) || identical.formulae(formula(model), ~log(lambda)-1) )) stop("effectfun is not defined for a DPP model with fixed intensity", call.=FALSE) intensityname <- setdiff(c("Lambda", "intensity"), needed.names)[1] } else intensityname <- "lambda" ## validate the relevant covariate if(missing(covname) || is.null(covname)) { mc <- model.covariates(model) if(length(mc) == 1) covname <- mc else stop("covname must be provided") } if(!is.character(covname)) stop("covname should be a character string", call.=FALSE) if(length(covname) != 1L) stop("covname should be a single character string", call.=FALSE) # check that fixed values for all other covariates are provided given.covs <- names(dotargs) if(any(uhoh <- !(needed.names %in% c(given.covs, covname)))) { nuh <- sum(uhoh) stop(paste(ngettext(nuh, "A value for the covariate", "Values for the covariates"), commasep(dQuote(needed.names[uhoh])), "must be provided (as", ngettext(nuh, "an argument", "arguments"), "to effectfun)")) } #' establish type and range of covariate values check.1.integer(nvalues) stopifnot(nvalues >= 128) N0 <- nvalues if(covname == "x") { covtype <- "real" W <- as.owin(data.ppm(model)) Zr <- W$xrange Zvals <- seq(from=Zr[1L], to=Zr[2L], length.out=N0) } else if(covname == "y") { covtype <- "real" W <- as.owin(data.ppm(model)) Zr <- W$yrange Zvals <- seq(from=Zr[1L], to=Zr[2L], length.out=N0) } else if(covname == "marks") { covtype <- "factor" Zvals <- levels(marks(data.ppm(model))) } else { # covariate is external if(is.data.frame(covdf <- model$covariates) && (covname %in% names(covdf))) { Z <- covdf[,covname] covtype <- typeof(Z) if(covtype == "double") covtype <- "real" switch(covtype, real={ Zr <- range(Z) Zvals <- seq(from=Zr[1L], to=Zr[2L], length.out=N0) }, integer={ Zr <- range(Z) Zvals <- seq(from=Zr[1L], to=Zr[2L], by=ceiling((diff(Zr)+1)/N0)) }, factor={ Zvals <- levels(Z) }, logical={ Zvals <- c(FALSE, TRUE) }, stop(paste("Cannot handle covariate of type", dQuote(covtype))) ) } else { Z <- getdataobjects(covname, environment(formula(model)), model$covariates)[[1L]] if(is.null(Z)) stop(paste("Cannot find covariate", sQuote(covname)), call.=FALSE) # convert to image if(!is.im(Z)) Z <- as.im(Z, W=as.owin(model)) covtype <- Z$type switch(covtype, real={ Zr <- summary(Z)$range Zvals <- seq(from=Zr[1L], to=Zr[2L], length.out=N0) }, factor={ Zvals <- levels(Z) }, logical={ Zvals <- c(FALSE, TRUE) }, stop(paste("Cannot handle covariate of type", dQuote(covtype))) ) } } # set up data frames of fake data for predict method # First set up default, constant value for each covariate N <- length(Zvals) fakeloc <- resolve.defaults(dotargs, list(x=0, y=0))[c("x","y")] if(is.marked.ppm(model)) { if("marks" %in% names(dotargs)) { fakeloc$marks <- dotargs$marks dotargs <- dotargs[names(dotargs) != "marks"] } else { lev <- levels(marks(data.ppm(model))) fakeloc$marks <- lev[1L] } } fakeloc <- lapply(fakeloc, padout, N=N) fakecov <- lapply(dotargs, padout, N=N) # Overwrite value for covariate of interest if(covname %in% intern.names) fakeloc[[covname]] <- Zvals else fakecov[[covname]] <- Zvals # convert to data frame fakeloc <- do.call(data.frame, fakeloc) fakecov <- if(length(fakecov) > 0) do.call(data.frame, fakecov) else NULL # # Now predict pred <- predict(orig.model, locations=fakeloc, covariates=fakecov, se=se.fit) if(!se.fit) lambda <- pred else { lambda <- pred$estimate se <- pred$se sedf <- data.frame(se =se, hi = lambda + 2 * se, lo = lambda - 2 * se) } # dfin <- if(!is.null(fakecov)) cbind(fakeloc, fakecov) else fakeloc dfin <- dfin[covname] dflam <- data.frame(lambda=lambda) names(dflam) <- intensityname df <- cbind(dfin, dflam) # if(covtype != "real") { result <- df if(se.fit) result <- cbind(result, sedf) } else { bc <- paren(covname) result <- fv(df, argu=covname, ylab=substitute(lambda(X), list(X=as.name(covname), lambda=as.name(intensityname))), labl=c(covname, paste("hat(%s)", bc)), valu=intensityname, alim=Zr, desc=c(paste("value of covariate", covname), "fitted intensity"), fname=intensityname) if(se.fit) { result <- bind.fv(result, sedf, labl=c(paste("se[%s]", bc), paste("%s[hi]", bc), paste("%s[lo]", bc)), desc=c("standard error of fitted trend", "upper limit of pointwise 95%% CI for trend", "lower limit of pointwise 95%% CI for trend")) fvnames(result, ".") <- c(intensityname, "hi", "lo") fvnames(result, ".s") <- c("hi", "lo") formula(result) <- paste(". ~ ", covname) } } return(result) } padout <- function(x,N) { rep.int(x[1L],N) } effectfun }) spatstat/R/psp.R0000644000176200001440000006141713564441573013270 0ustar liggesusers# # psp.R # # $Revision: 1.101 $ $Date: 2019/11/18 07:05:03 $ # # Class "psp" of planar line segment patterns # # ################################################# # creator ################################################# psp <- function(x0, y0, x1, y1, window, marks=NULL, check=spatstat.options("checksegments")) { stopifnot(is.numeric(x0)) stopifnot(is.numeric(y0)) stopifnot(is.numeric(x1)) stopifnot(is.numeric(y1)) stopifnot(is.vector(x0)) stopifnot(is.vector(y0)) stopifnot(is.vector(x1)) stopifnot(is.vector(y1)) stopifnot(length(x0) == length(y0)) stopifnot(length(x1) == length(y1)) stopifnot(length(x0) == length(x1)) ends <- data.frame(x0=x0,y0=y0,x1=x1,y1=y1) if(!missing(window)) verifyclass(window,"owin") if(check) { ok <- inside.owin(x0,y0, window) & inside.owin(x1,y1,window) if((nerr <- sum(!ok)) > 0) stop(paste(nerr, ngettext(nerr, "segment does not", "segments do not"), "lie entirely inside the window.\n"), call.=FALSE) } out <- list(ends=ends, window=window, n = nrow(ends)) # add marks if any if(!is.null(marks)) { if(is.matrix(marks)) marks <- as.data.frame(marks) if(is.data.frame(marks)) { omf <- "dataframe" nmarks <- nrow(marks) rownames(marks) <- seq_len(nmarks) whinge <- "The number of rows of marks" } else { omf <- "vector" names(marks) <- NULL nmarks <- length(marks) whinge <- "The length of the marks vector" } if(nmarks != out$n) stop(paste(whinge, "!= length of x and y.\n")) out$marks <- marks out$markformat <- omf } else { out$markformat <- "none" } class(out) <- c("psp", class(out)) return(out) } ###################################################### # conversion ###################################################### is.psp <- function(x) { inherits(x, "psp") } as.psp <- function(x, ..., from=NULL, to=NULL) { # special case: two point patterns if(is.null(from) != is.null(to)) stop(paste("If one of", sQuote("from"), "and", sQuote("to"), "is specified, then both must be specified.\n")) if(!is.null(from) && !is.null(to)) { verifyclass(from, "ppp") verifyclass(to, "ppp") if(from$n != to$n) stop(paste("The point patterns", sQuote("from"), "and", sQuote("to"), "have different numbers of points.\n")) uni <- union.owin(from$window, to$window) Y <- do.call(psp, resolve.defaults(list(from$x, from$y, to$x, to$y), list(...), list(window=uni))) return(Y) } UseMethod("as.psp") } as.psp.psp <- function(x, ..., check=FALSE, fatal=TRUE) { if(!verifyclass(x, "psp", fatal=fatal)) return(NULL) ends <- x$ends psp(ends$x0, ends$y0, ends$x1, ends$y1, window=x$window, marks=x$marks, check=check) } as.psp.data.frame <- function(x, ..., window=NULL, marks=NULL, check=spatstat.options("checksegments"), fatal=TRUE) { window <- suppressWarnings(as.owin(window,fatal=FALSE)) if(!is.owin(window)) { if(fatal) stop("Cannot interpret \"window\" as an object of class owin.\n") return(NULL) } if(checkfields(x, "marks")) { if(is.null(marks)) marks <- x$marks else warning(paste("Column named \"marks\" ignored;\n", "argument named \"marks\" has precedence.\n",sep="")) x$marks <- NULL } if(checkfields(x, c("x0", "y0", "x1", "y1"))) { out <- psp(x$x0, x$y0, x$x1, x$y1, window=window, check=check) x <- x[-match(c("x0","y0","x1","y1"),names(x))] } else if(checkfields(x, c("xmid", "ymid", "length", "angle"))) { rr <- x$length/2 dx <- cos(x$angle) * rr dy <- sin(x$angle) * rr bb <- boundingbox(window) rmax <- max(rr) bigbox <- owin(bb$xrange + c(-1,1) * rmax, bb$yrange + c(-1,1) * rmax) pattern <- psp(x$xmid - dx, x$ymid - dy, x$xmid + dx, x$ymid + dy, window=bigbox,check=FALSE) out <- pattern[window] x <- x[-match(c("xmid","ymid","length","angle"),names(x))] } else if(ncol(x) >= 4) { out <- psp(x[,1], x[,2], x[,3], x[,4], window=window, check=check) x <- x[-(1:4)] } else { ## data not understood if(fatal) stop("Unable to interpret x as a line segment pattern.", call.=FALSE) return(NULL) } if(ncol(x) > 0) { #' additional columns of mark data in 'x' if(is.null(marks)) { marks <- x } else { warning(paste("Additional columns in x were ignored", "because argument 'marks' takes precedence"), call.=FALSE) } } if(!is.null(marks)) { #' SUPPRESSED: if(identical(ncol(marks), 1L)) marks <- marks[,1L] #' assign marks directly to avoid infinite recursion out$marks <- marks out$markformat <- markformat(marks) } return(out) } as.psp.matrix <- function(x, ..., window=NULL, marks=NULL, check=spatstat.options("checksegments"), fatal=TRUE) { x <- as.data.frame(x) as.psp(x,...,window=window,marks=marks,check=check,fatal=fatal) } as.psp.default <- function(x, ..., window=NULL, marks=NULL, check=spatstat.options("checksegments"), fatal=TRUE) { if(checkfields(x,"marks")) { if(is.null(marks)) marks <- x$marks else warning(paste("Component of \"x\" named \"marks\" ignored;\n", "argument named \"marks\" has precedence.\n",sep="")) } if(checkfields(x, c("x0", "y0", "x1", "y1"))) return(psp(x$x0, x$y0, x$x1, x$y1, window=window, marks=marks, check=check)) else if(checkfields(x, c("xmid", "ymid", "length", "angle"))) { rr <- x$length/2 dx <- cos(x$angle) * rr dy <- sin(x$angle) * rr window <- as.owin(window) bb <- boundingbox(window) rmax <- max(rr) bigbox <- owin(bb$xrange + c(-1,1) * rmax, bb$yrange + c(-1,1) * rmax) pattern <- psp(x$x - dx, x$y - dy, x$x + dx, x$y + dy, window=bigbox, marks=marks, check=FALSE) clipped <- pattern[window] return(clipped) } else if(fatal) stop("Unable to interpret x as a line segment pattern") return(NULL) } as.psp.owin <- function(x, ..., window=NULL, check=spatstat.options("checksegments"), fatal=TRUE) { .Deprecated("edges", package="spatstat") edges(x, ..., window=window, check=check) } edges <- function(x, ..., window=NULL, check=FALSE) { x <- as.owin(x) if(is.null(window)) window <- as.rectangle(x) x <- as.polygonal(x) x0 <- y0 <- x1 <- y1 <- numeric(0) bdry <- x$bdry for(i in seq_along(bdry)) { po <- bdry[[i]] ni <- length(po$x) nxt <- c(2:ni, 1) x0 <- c(x0, po$x) y0 <- c(y0, po$y) x1 <- c(x1, po$x[nxt]) y1 <- c(y1, po$y[nxt]) } out <- psp(x0, y0, x1, y1, window=window, check=check) return(out) } xypolygon2psp <- function(p, w, check=spatstat.options("checksegments")) { verify.xypolygon(p) n <- length(p$x) nxt <- c(2:n, 1) return(psp(p$x, p$y, p$x[nxt], p$y[nxt], window=w, check=check)) } ################# as.data.frame.psp <- function(x, row.names=NULL, ...) { df <- as.data.frame(x$ends, row.names=row.names) if(is.marked(x)) df <- cbind(df, if(x$markformat=="dataframe") marks(x) else data.frame(marks=marks(x))) return(df) } ####### manipulation ########################## append.psp <- function(A,B) { if(is.null(A) && (is.psp(B) || is.null(B))) return(B) if(is.null(B) && is.psp(A)) return(A) verifyclass(A, "psp") verifyclass(B, "psp") stopifnot(identical(A$window, B$window)) marks <- marks(A) %mapp% marks(B) ends <- rbind(A$ends, B$ends) out <- as.psp(ends,window=A$window,marks=marks,check=FALSE) return(out) } rebound.psp <- function(x, rect) { verifyclass(x, "psp") x$window <- rebound.owin(x$window, rect) return(x) } ################################################# # marks ################################################# is.marked.psp <- function(X, ...) { marx <- marks(X, ...) return(!is.null(marx)) } marks.psp <- function(x, ..., dfok = TRUE) { # data frames of marks are as of 19/March 2011 implemented for psp ma <- x$marks if ((is.data.frame(ma) || is.matrix(ma)) && !dfok) stop("Sorry, not implemented when the marks are a data frame.\n") return(ma) } "marks<-.psp" <- function(x, ..., value) { stopifnot(is.psp(x)) if(is.null(value)) { return(unmark(x)) } m <- value if(!(is.vector(m) || is.factor(m) || is.data.frame(m) || is.matrix(m))) stop("Incorrect format for marks") if (is.hyperframe(m)) stop("Hyperframes of marks are not supported in psp objects.\n") nseg <- nsegments(x) if (!is.data.frame(m) && !is.matrix(m)) { if (length(m) == 1) m <- rep.int(m, nseg) else if (nseg == 0) m <- rep.int(m, 0) else if (length(m) != nseg) stop("Number of marks != number of line segments.\n") marx <- m } else { m <- as.data.frame(m) if (ncol(m) == 0) { marx <- NULL } else { if (nrow(m) == nseg) { marx <- m } else { if (nrow(m) == 1 || nseg == 0) { marx <- as.data.frame(lapply(as.list(m), rep.int, times=nseg)) } else stop("Number of rows of data frame != number of points.\n") } } } Y <- as.psp(x$ends, window = x$window, marks = marx, check = FALSE) return(Y) } markformat.psp <- function(x) { mf <- x$markformat if(is.null(mf)) mf <- markformat(marks(x)) return(mf) } unmark.psp <- function(X) { X$marks <- NULL X$markformat <- "none" return(X) } ################################################# # plot and print methods ################################################# plot.psp <- function(x, ..., main, add=FALSE, show.all=!add, show.window=show.all, which.marks=1, style=c("colour", "width", "none"), col=NULL, ribbon=show.all, ribsep=0.15, ribwid=0.05, ribn=1024, do.plot=TRUE) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) verifyclass(x, "psp") #' n <- nsegments(x) marx <- marks(x) #' style <- match.arg(style) use.marks <- !is.null(marx) && (n != 0) && (style != "none") #' if(use.marks && style == "width") { #' plot marks as line width #' temporary cheat using plot.linfun L <- linnet(endpoints.psp(x), edges=cbind(2*(1:n)-1, 2*(1:n)), sparse=TRUE) if(length(dim(marx))) marx <- marx[,which.marks] f <- function(x,y,seg,tp, values=marx) { values[seg] } g <- linfun(f, L) out <- plot(g, style="width", ..., main=main, add=add, col=col, show.all=show.all, show.window=show.window, do.plot=do.plot) return(invisible(out)) } #' plot marks as colours, if present do.ribbon <- identical(ribbon, TRUE) && use.marks ## ## .... initialise plot; draw observation window ...... owinpars <- setdiff(graphicsPars("owin"), "col") if(!do.ribbon) { ## window of x only bb.all <- as.rectangle(as.owin(x)) if(do.plot && (!add || show.window)) do.call.plotfun(plot.owin, resolve.defaults(list(x=x$window, main=if(show.all) main else "", add=add, type = if(show.window) "w" else "n", show.all=show.all), list(...)), extrargs=owinpars) } else { ## enlarged window with room for colour ribbon ## x at left, ribbon at right bb <- as.rectangle(as.owin(x)) xwidth <- diff(bb$xrange) xheight <- diff(bb$yrange) xsize <- max(xwidth, xheight) bb.rib <- owin(bb$xrange[2] + c(ribsep, ribsep+ribwid) * xsize, bb$yrange) bb.all <- boundingbox(bb.rib, bb) if(do.plot) { pt <- prepareTitle(main) ## establish coordinate system if(!add) do.call.plotfun(plot.owin, resolve.defaults(list(x=bb.all, type="n", main=pt$blank), list(...)), extrargs=owinpars) ## now plot window of x ## with title centred on this window if(show.window) { do.call.plotfun(plot.owin, resolve.defaults(list(x=x$window, add=TRUE, main=main, show.all=TRUE), list(...)), extrargs=owinpars) ## title done. main <- "" } } } # plot segments if(n == 0) { result <- symbolmap() attr(result, "bbox") <- bb.all return(invisible(result)) } ## determine colours if any colmap <- NULL if(use.marks) { ## use colours marx <- as.data.frame(marx)[, which.marks] if(is.character(marx) || length(unique(marx)) == 1) marx <- factor(marx) if(is.null(col)) { ## no colour info: use default colour palette nc <- if(is.factor(marx)) { length(levels(marx)) } else { min(256, length(unique(marx))) } colfun <- spatstat.options("image.colfun") col <- colfun(nc) } ## determine colour map if(inherits(col, "colourmap")) { colmap <- colourmap } else if(is.colour(col)) { ## colour values given; create colour map if(is.factor(marx)) { lev <- levels(marx) colmap <- colourmap(col=col, inputs=factor(lev)) } else { if(!all(is.finite(marx))) warning("Some mark values are infinite or NaN or NA") colmap <- colourmap(col=col, range=range(marx, finite=TRUE)) } } else stop("Format of argument 'col' is not recognised") #' map the mark values to colours col <- colmap(marx) } ## convert to greyscale? if(spatstat.options("monochrome")) { col <- to.grey(col) colmap <- to.grey(colmap) } if(do.plot) { ## plot segments do.call.plotfun(segments, resolve.defaults(as.list(x$ends), list(...), list(col=col), .MatchNull=FALSE, .StripNull=TRUE), extrargs=names(par())) ## plot ribbon if(do.ribbon) plot(colmap, vertical=TRUE, add=TRUE, xlim=bb.rib$xrange, ylim=bb.rib$yrange) } # return colour map result <- colmap %orifnull% colourmap() attr(result, "bbox") <- bb.all return(invisible(result)) } print.psp <- function(x, ...) { verifyclass(x, "psp") n <- x$n ism <- is.marked(x, dfok = TRUE) splat(if(ism) "marked" else NULL, "planar line segment pattern:", n, ngettext(n, "line segment", "line segments")) if(ism) { mks <- marks(x, dfok = TRUE) if(is.data.frame(mks)) { splat("Mark variables: ", paste(names(mks), collapse = ", ")) } else { if(is.factor(mks)) { splat("multitype, with levels =", paste(levels(mks), collapse = "\t")) } else { splat("marks are", if(is.numeric(mks)) "numeric," else NULL, "of type", sQuote(typeof(mks))) } } } print(x$window) return(invisible(NULL)) } unitname.psp <- function(x) { return(unitname(x$window)) } "unitname<-.psp" <- function(x, value) { w <- x$window unitname(w) <- value x$window <- w return(x) } #################################################### # summary information #################################################### endpoints.psp <- function(x, which="both") { verifyclass(x, "psp") ends <- x$ends n <- x$n switch(which, both={ first <- second <- rep.int(TRUE, n) }, first={ first <- rep.int(TRUE, n) second <- rep.int(FALSE, n) }, second={ first <- rep.int(FALSE, n) second <- rep.int(TRUE, n) }, left={ first <- (ends$x0 < ends$x1) second <- !first }, right={ first <- (ends$x0 > ends$x1) second <- !first }, lower={ first <- (ends$y0 < ends$y1) second <- !first }, upper={ first <- (ends$y0 > ends$y1) second <- !first }, stop(paste("Unrecognised option: which=", sQuote(which))) ) ok <- rbind(first, second) xmat <- rbind(ends$x0, ends$x1) ymat <- rbind(ends$y0, ends$y1) idmat <- col(ok) xx <- as.vector(xmat[ok]) yy <- as.vector(ymat[ok]) id <- as.vector(idmat[ok]) result <- ppp(xx, yy, window=x$window, check=FALSE) attr(result, "id") <- id return(result) } midpoints.psp <- function(x) { verifyclass(x, "psp") xm <- eval(expression((x0+x1)/2), envir=x$ends) ym <- eval(expression((y0+y1)/2), envir=x$ends) win <- x$window ok <- inside.owin(xm, ym, win) if(any(!ok)) { warning(paste("Some segment midpoints lie outside the original window;", "window replaced by bounding box")) win <- boundingbox(win) } ppp(x=xm, y=ym, window=win, check=FALSE) } lengths.psp <- function(x, squared=FALSE) { verifyclass(x, "psp") lengths2 <- eval(expression((x1-x0)^2 + (y1-y0)^2), envir=x$ends) return(if(squared) lengths2 else sqrt(lengths2)) } angles.psp <- function(x, directed=FALSE) { verifyclass(x, "psp") a <- eval(expression(atan2(y1-y0, x1-x0)), envir=x$ends) if(!directed) a <- a %% pi return(a) } summary.psp <- function(object, ...) { verifyclass(object, "psp") len <- lengths.psp(object) out <- list(n = object$n, len = summary(len), totlen = sum(len), ang= summary(angles.psp(object)), w = summary.owin(object$window), marks=if(is.null(object$marks)) NULL else summary(object$marks), unitinfo=summary(unitname(object))) class(out) <- c("summary.psp", class(out)) return(out) } print.summary.psp <- function(x, ...) { cat(paste(x$n, "line segments\n")) cat("Lengths:\n") print(x$len) unitblurb <- paste(x$unitinfo$plural, x$unitinfo$explain) cat(paste("Total length:", x$totlen, unitblurb, "\n")) cat(paste("Length per unit area:", x$totlen/x$w$area, "\n")) cat("Angles (radians):\n") print(x$ang) print(x$w) if(!is.null(x$marks)) { cat("Marks:\n") print(x$marks) } return(invisible(NULL)) } extrapolate.psp <- function(x, ...) { verifyclass(x, "psp") theta <- (angles.psp(x) + pi/2) %% (2*pi) p <- with(x$ends, x1*cos(theta) + y1 * sin(theta)) result <- infline(p=p, theta=theta) return(result) } ######################################################## # subsets ######################################################## "[.psp" <- function(x, i, j, drop, ..., fragments=TRUE) { verifyclass(x, "psp") if(missing(i) && missing(j)) return(x) if(!missing(i)) { style <- if(inherits(i, "owin")) "window" else "index" switch(style, window={ x <- clip.psp(x, window=i, check=FALSE, fragments=fragments) }, index={ enz <- x$ends[i, ] win <- x$window marx <- marksubset(x$marks, i, markformat(x)) x <- with(enz, psp(x0, y0, x1, y1, window=win, marks=marx, check=FALSE)) }) } if(!missing(j)) x <- x[j] # invokes code above return(x) } #################################################### # affine transformations #################################################### affine.psp <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) { verifyclass(X, "psp") W <- affine.owin(X$window, mat=mat, vec=vec, ...) E <- X$ends ends0 <- affinexy(list(x=E$x0,y=E$y0), mat=mat, vec=vec) ends1 <- affinexy(list(x=E$x1,y=E$y1), mat=mat, vec=vec) psp(ends0$x, ends0$y, ends1$x, ends1$y, window=W, marks=marks(X, dfok=TRUE), check=FALSE) } shift.psp <- function(X, vec=c(0,0), ..., origin=NULL) { verifyclass(X, "psp") W <- Window(X) if(!is.null(origin)) { if(!missing(vec)) warning("argument vec ignored; argument origin has precedence") locn <- interpretAsOrigin(origin, W) vec <- -locn } # perform shift W <- shift.owin(W, vec=vec, ...) E <- X$ends ends0 <- shiftxy(list(x=E$x0,y=E$y0), vec=vec, ...) ends1 <- shiftxy(list(x=E$x1,y=E$y1), vec=vec, ...) Y <- psp(ends0$x, ends0$y, ends1$x, ends1$y, window=W, marks=marks(X, dfok=TRUE), check=FALSE) # tack on shift vector attr(Y, "lastshift") <- vec return(Y) } rotate.psp <- function(X, angle=pi/2, ..., centre=NULL) { verifyclass(X, "psp") if(!is.null(centre)) { X <- shift(X, origin=centre) negorigin <- getlastshift(X) } else negorigin <- NULL W <- rotate.owin(X$window, angle=angle, ...) E <- X$ends ends0 <- rotxy(list(x=E$x0,y=E$y0), angle=angle) ends1 <- rotxy(list(x=E$x1,y=E$y1), angle=angle) Y <- psp(ends0$x, ends0$y, ends1$x, ends1$y, window=W, marks=marks(X, dfok=TRUE), check=FALSE) if(!is.null(negorigin)) Y <- shift(Y, -negorigin) return(Y) } is.empty.psp <- function(x) { return(x$n == 0) } identify.psp <- function(x, ..., labels=seq_len(nsegments(x)), n=nsegments(x), plot=TRUE) { Y <- x W <- as.owin(Y) mids <- midpoints.psp(Y) poz <- c(1, 2,4, 3)[(floor(angles.psp(Y)/(pi/4)) %% 4) + 1L] if(!(is.numeric(n) && (length(n) == 1) && (n %% 1 == 0) && (n >= 0))) stop("n should be a single integer") out <- integer(0) while(length(out) < n) { xy <- spatstatLocator(1) # check for interrupt exit if(length(xy$x) == 0) return(out) # find nearest segment X <- ppp(xy$x, xy$y, window=W) ident <- project2segment(X, Y)$mapXY # add to list if(ident %in% out) { cat(paste("Segment", ident, "already selected\n")) } else { if(plot) { # Display mi <- mids[ident] li <- labels[ident] po <- poz[ident] do.call.matched(graphics::text.default, resolve.defaults(list(x=mi$x, y=mi$y, labels=li), list(...), list(pos=po))) } out <- c(out, ident) } } # exit if max n reached return(out) } nsegments <- function(x) { UseMethod("nsegments") } nobjects.psp <- nsegments.psp <- function(x) { x$n } as.ppp.psp <- function (X, ..., fatal=TRUE) { Y <- endpoints.psp(X, which="both") m <- marks(X) marks(Y) <- markappend(m, m) return(Y) } domain.psp <- Window.psp <- function(X, ...) { as.owin(X) } "Window<-.psp" <- function(X, ..., value) { verifyclass(value, "owin") X[value] } edit.psp <- function(name, ...) { x <- name y <- edit(as.data.frame(x), ...) xnew <- as.psp(y, window=Window(x)) return(xnew) } text.psp <- function(x, ...) { mids <- midpoints.psp(x) poz <- c(1, 2,4, 3)[(floor(angles.psp(x)/(pi/4)) %% 4) + 1L] do.call.matched(graphics::text.default, resolve.defaults(list(x=mids$x, y=mids$y), list(...), list(pos=poz), .StripNull=TRUE)) return(invisible(NULL)) } intensity.psp <- function(X, ..., weights=NULL) { len <- lengths.psp(X) a <- area(Window(X)) if(is.null(weights)) { ## unweighted case - for efficiency if(is.multitype(X)) { mks <- marks(X) answer <- tapply(len, mks, sum)/a answer[is.na(answer)] <- 0 names(answer) <- levels(mks) } else answer <- sum(len)/a return(answer) } ## weighted case if(is.numeric(weights)) { check.nvector(weights, nsegments(X), things="segments") } else if(is.expression(weights)) { # evaluate expression in data frame of coordinates and marks df <- as.data.frame(X) pf <- parent.frame() eval.weights <- try(eval(weights, envir=df, enclos=pf)) if(inherits(eval.weights, "try-error")) stop("Unable to evaluate expression for weights", call.=FALSE) if(!check.nvector(eval.weights, nsegments(X), fatal=FALSE, warn=TRUE)) stop("Result of evaluating the expression for weights has wrong format") weights <- eval.weights } else stop("Unrecognised format for argument 'weights'") ## if(is.multitype(X)) { mks <- marks(X) answer <- as.vector(tapply(weights * len, mks, sum))/a answer[is.na(answer)] <- 0 names(answer) <- levels(mks) } else { answer <- sum(weights)/a } return(answer) } spatstat/R/strauss.R0000644000176200001440000001570013333543255014156 0ustar liggesusers# # # strauss.R # # $Revision: 2.43 $ $Date: 2018/03/15 07:37:41 $ # # The Strauss process # # Strauss() create an instance of the Strauss process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # Strauss <- local({ # create blank template object without family and pars BlankStrauss <- list( name = "Strauss process", creator = "Strauss", family = "pairwise.family", # evaluated later pot = function(d, par) { d <= par$r }, par = list(r = NULL), # to be filled in parnames = "interaction distance", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("interaction distance r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) return(is.finite(loggamma) && (loggamma <= 0)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(anyNA(coeffs)) return(r) loggamma <- coeffs[1] if(abs(loggamma) <= epsilon) return(0) else return(r) }, version=NULL, # to be filled in # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ...) { #' fast evaluator for Strauss interaction dont.complain.about(splitInf) if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for Strauss") r <- potpars$r answer <- strausscounts(U, X, r, EqualPairs) return(matrix(answer, ncol=1)) }, Mayer=function(coeffs, self) { # second Mayer cluster integral gamma <- exp(as.numeric(coeffs[1])) r <- self$par$r return((1-gamma) * pi * r^2) }, Percy=function(d, coeffs, par, ...) { ## term used in Percus-Yevick type approximation gamma <- exp(as.numeric(coeffs[1])) R <- par$r t <- abs(d/(2*R)) t <- pmin.int(t, 1) y <- 2 * R^2 * (pi * (1-gamma) - (1-gamma)^2 * (acos(t) - t * sqrt(1 - t^2))) return(y) }, delta2 = function(X, inte, correction, ..., sparseOK=FALSE) { r <- inte$par$r X <- as.ppp(X) # algorithm is the same for data and dummy points nX <- npoints(X) cl <- weightedclosepairs(X, r, correction=correction, what="indices") if(is.null(cl)) return(NULL) v <- sparseMatrix(i=cl$i, j=cl$j, x=cl$weight, dims=c(nX, nX)) if(!sparseOK) v <- as.matrix(v) return(v) } ) class(BlankStrauss) <- "interact" # Finally define main function Strauss <- function(r) { instantiate.interact(BlankStrauss, list(r=r)) } Strauss <- intermaker(Strauss, BlankStrauss) Strauss }) # generally accessible functions strausscounts <- function(U, X, r, EqualPairs=NULL) { answer <- crosspaircounts(U,X,r) # subtract counts of identical pairs if(length(EqualPairs) > 0) { nU <- npoints(U) idcount <- as.integer(table(factor(EqualPairs[,2L], levels=1:nU))) answer <- answer - idcount } return(answer) } crosspaircounts <- function(X, Y, r) { stopifnot(is.ppp(X)) stopifnot(is.numeric(r) && length(r) == 1) stopifnot(is.finite(r)) stopifnot(r >= 0) # sort in increasing order of x coordinate oX <- fave.order(X$x) oY <- fave.order(Y$x) Xsort <- X[oX] Ysort <- Y[oY] nX <- npoints(X) nY <- npoints(Y) # call C routine out <- .C("Ccrosspaircounts", nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), rrmax = as.double(r), counts = as.integer(integer(nX)), PACKAGE = "spatstat") answer <- integer(nX) answer[oX] <- out$counts return(answer) } closepaircounts <- function(X, r) { stopifnot(is.ppp(X)) stopifnot(is.numeric(r) && length(r) == 1) stopifnot(is.finite(r)) stopifnot(r >= 0) # sort in increasing order of x coordinate oX <- fave.order(X$x) Xsort <- X[oX] nX <- npoints(X) # call C routine out <- .C("Cclosepaircounts", nxy = as.integer(nX), x = as.double(Xsort$x), y = as.double(Xsort$y), rmaxi = as.double(r), counts = as.integer(integer(nX)), PACKAGE = "spatstat") answer <- integer(nX) answer[oX] <- out$counts return(answer) } weightedclosepairs <- function(X, r, correction, what=c("all", "indices", "ijd")) { what <- match.arg(what) ## return list(i,j,..,weight) for all r-close pairs switch(correction, none = , border = { cl <- closepairs(X, r, what=what) weight <- rep(1, length(cl$i)) }, isotropic = , Ripley = { if(what == "indices") { cl <- closepairs(X, r, what="ijd") weight <- edge.Ripley(X[cl$i], cl$d) cl <- cl[c("i", "j")] } else { cl <- closepairs(X, r, what=what) weight <- edge.Ripley(X[cl$i], cl$d) } }, translate = { cl <- closepairs(X, r, what="all") weight <- edge.Trans(dx = cl$dx, dy = cl$dy, W = Window(X), paired=TRUE) switch(what, indices = { cl <- cl[c("i", "j")] }, ijd = { cl <- cl[c("i", "j", "d")] }, all = { }) }, periodic = { cl <- closepairs(X, r, what=what, periodic=TRUE) weight <- rep(1, length(cl$i)) }, { warning(paste("Unrecognised correction", sQuote(correction)), call.=FALSE) return(NULL) } ) result <- append(cl, list(weight=as.numeric(weight))) return(result) } spatstat/R/relrisk.ppm.R0000644000176200001440000003307413415053027014716 0ustar liggesusers## ## relrisk.ppm.R ## ## $Revision: 1.9 $ $Date: 2019/01/08 07:44:07 $ ## relrisk.ppm <- local({ relrisk.ppm <- function(X, ..., at=c("pixels", "points"), relative=FALSE, se=FALSE, casecontrol=TRUE, control=1, case, ngrid=NULL, window=NULL) { stopifnot(is.ppm(X)) stopifnot(is.multitype(X)) control.given <- !missing(control) case.given <- !missing(case) at <- match.arg(at) if(!relative && (control.given || case.given)) { aa <- c("control", "case")[c(control.given, case.given)] nn <- length(aa) warning(paste(ngettext(nn, "Argument", "Arguments"), paste(sQuote(aa), collapse=" and "), ngettext(nn, "was", "were"), "ignored, because relative=FALSE")) } model <- X Y <- data.ppm(model) types <- levels(marks(Y)) ntypes <- length(types) # np <- length(coef(model)) ## compute probabilities or risks if(ntypes == 2 && casecontrol) { if(control.given || !case.given) { stopifnot(length(control) == 1) if(is.numeric(control)) { icontrol <- control <- as.integer(control) stopifnot(control %in% 1:2) } else if(is.character(control)) { icontrol <- match(control, types) if(is.na(icontrol)) stop(paste("No points have mark =", control)) } else stop(paste("Unrecognised format for argument", sQuote("control"))) if(!case.given) icase <- 3 - icontrol } if(case.given) { stopifnot(length(case) == 1) if(is.numeric(case)) { icase <- case <- as.integer(case) stopifnot(case %in% 1:2) } else if(is.character(case)) { icase <- match(case, types) if(is.na(icase)) stop(paste("No points have mark =", case)) } else stop(paste("Unrecognised format for argument", sQuote("case"))) if(!control.given) icontrol <- 3 - icase } switch(at, pixels= { ## estimate is a single image ## compute images of intensities of each mark lambda.each <- predict(model, ngrid=ngrid, window=window) if(!relative) { ## compute probabilities.. ## total intensity (image) lambda.all <- im.apply(lambda.each, sum, check=FALSE) ## WAS: lambda.all <- Reduce("+", lambda.each) if(!se) { result <- lambda.each[[icase]]/lambda.all result <- killglitches(result) } else { probs <- lapply(lambda.each, "/", e2=lambda.all) probs <- as.solist(lapply(probs, killglitches)) estimate <- probs[[icase]] SE <- SEprobPixels(model, probs)[[icase]] SE <- killglitches(SE) result <- list(estimate=estimate, SE=SE) } } else { ## relative risks lambda.ctrl <- lambda.each[[icontrol]] if(!se) { result <- lambda.each[[icase]]/lambda.ctrl result <- killglitches(result) } else { risks <- lapply(lambda.each, "/", e2=lambda.ctrl) risks <- as.solist(lapply(risks, killglitches)) estimate <- risks[[icase]] SE <- SErelriskPixels(model, risks, icontrol)[[icase]] SE <- killglitches(SE) result <- list(estimate=estimate, SE=SE) } } }, points={ ## compute intensities of each type Ycase <- unmark(Y) %mark% factor(types[icase], levels=types) Yctrl <- unmark(Y) %mark% factor(types[icontrol], levels=types) lambda.case <- predict(model, locations=Ycase) lambda.ctrl <- predict(model, locations=Yctrl) if(!relative) { ## compute probabilities ## total intensity lambda.all <- lambda.case + lambda.ctrl prob.case <- lambda.case/lambda.all if(!se) { result <- prob.case } else { probs <- matrix(, length(prob.case), 2) probs[,icase] <- prob.case probs[,icontrol] <- 1 - prob.case SE <- SEprobPoints(model, probs)[,icase] result <- list(estimate=prob.case, SE=SE) } } else { ## compute relative risks risk.case <- lambda.case/lambda.ctrl if(!se) { result <- risk.case } else { risks <- matrix(, length(risk.case), 2) risks[,icase] <- risk.case risks[,icontrol] <- 1 SE <- SErelriskPoints(model, risks, icontrol)[,icase] result <- list(estimate=risk.case, SE=SE) } } }) } else { ## several types if(relative) { ## need 'control' type stopifnot(length(control) == 1) if(is.numeric(control)) { icontrol <- control <- as.integer(control) stopifnot(control %in% 1:ntypes) } else if(is.character(control)) { icontrol <- match(control, types) if(is.na(icontrol)) stop(paste("No points have mark =", control)) } else stop(paste("Unrecognised format for argument", sQuote("control"))) } switch(at, pixels={ ## estimate is a list of images ## Compute images of intensities of each type lambda.each <- predict(model, ngrid=ngrid, window=window) if(!relative) { ## compute probabilities... ## image of total intensity lambda.all <- im.apply(lambda.each, sum, check=FALSE) ## WAS: lambda.all <- Reduce("+", lambda.each) probs <- lapply(lambda.each, "/", e2=lambda.all) probs <- as.solist(lapply(probs, killglitches)) if(!se) { result <- probs } else { SE <- SEprobPixels(model, probs) SE <- as.solist(lapply(SE, killglitches)) result <- list(estimate=probs, SE=SE) } } else { ## compute relative risks risks <- lapply(lambda.each, "/", e2=lambda.each[[icontrol]]) risks <- as.solist(lapply(risks, killglitches)) if(!se) { result <- risks } else { SE <- SErelriskPixels(model, risks, icontrol) SE <- as.solist(lapply(SE, killglitches)) result <- list(estimate=risks, SE=SE) } } }, points = { ## matrix of intensities of each type at each point ## rows=locations, cols=types lambda.each <- sapply(types, predictfortype, loc=unmark(Y), model=model, types=types) if(!relative) { ## compute probabilities lambda.all <- rowSums(lambda.each) probs <- lambda.each/lambda.all if(!se) { result <- probs } else { SE <- SEprobPoints(model, probs) result <- list(estimate=probs, SE=SE) } } else { ## compute relative risks risks <- lambda.each/lambda.each[,icontrol] if(!se) { result <- risks } else { SE <- SErelriskPoints(model, risks, icontrol) result <- list(estimate=risks, SE=SE) } } }) } return(result) } modmats <- function(model) { # model matrices for data locations for each possible mark QM <- quad.ppm(model) Y <- QM$data suppressWarnings({ QR <- quadscheme.replicated(Y, unmark(Y[FALSE])) }) sourceid <- QR$param$sourceid ## canonical covariates mm <- model.matrix(model, Q=QR) ## mm is a matrix with one column for canonical covariate ## and one row for each marked point in QR. mm <- cbind(data.frame(".s"=sourceid, ".m"=marks(QR)), mm) ## Split by marks ss <- split(mm, mm$.m) ## Reorganise into compatible matrices zz <- lapply(ss, reorg) return(zz) } reorg <- function(x) { z <- x rownames(z) <- NULL z[x$.s, ] <- z return(z[,-(1:2), drop=FALSE]) } SErelriskPoints <- function(model, riskvalues, icontrol) { ## riskvalues is a matrix with rows=data locations, cols=types types <- colnames(riskvalues) ntypes <- length(types) ## S.um <- modmats(model) S.um <- lapply(S.um, as.matrix) ## S.um is a list of matrices, one for each possible type, ## each matrix having one row per data location dS.um <- lapply(S.um, "-", e2=S.um[[icontrol]]) R.um <- mapply("*", dS.um, as.list(as.data.frame(riskvalues)), SIMPLIFY=FALSE) ## likewise R.um is a list of matrices ## vc <- vcov(model) VAR <- lapply(R.um, quadform, v=vc) VAR <- do.call(cbind, VAR) SE <- sqrt(VAR) colnames(SE) <- types return(SE) } msubtract <- function(z1, z2) mapply("-", e1=z1, e2=z2, SIMPLIFY=FALSE) mmultiply <- function(z1, z2) solapply(z1, "*", e2=z2) SErelriskPixels <- function(model, riskvalues, icontrol) { ## riskvalues is an imlist types <- names(riskvalues) ntypes <- length(types) ## canonical covariates S.um <- model.images(model) ## S.um is a hyperframe with one column for each mark value ## and one row for each canonical covariate dS.um <- lapply(S.um, msubtract, z2=S.um[,icontrol,drop=TRUE]) R.um <- mapply(mmultiply, z1=dS.um, z2=riskvalues, SIMPLIFY=FALSE) VAR <- vector(mode="list", length=ntypes) ntypes <- length(types) vc <- vcov(model) ncoef <- nrow(vc) for(type in 1:ntypes) { v <- 0 Rum <- R.um[[type]] for(i in 1:ncoef) { for(j in 1:ncoef) { v <- v + Rum[[i]] * vc[i,j] * Rum[[j]] } } VAR[[type]] <- v } names(VAR) <- types VAR <- as.solist(VAR) SE <- as.solist(lapply(VAR, sqrt)) return(SE) } SEprobPixels <- function(model, probvalues) { ## probvalues is an imlist types <- names(probvalues) ntypes <- length(types) ## canonical covariates S.um <- model.images(model) ## S.um is a hyperframe with one column for each mark value ## and one row for each canonical covariate ncoef <- length(coef(model)) Sbar.u <- vector(mode="list", length=ncoef) for(k in 1:ncoef) { A <- mapply("*", e1=S.um[k,,drop=TRUE], e2=probvalues, SIMPLIFY=FALSE) Sbar.u[[k]] <- im.apply(A, sum) } ## Sbar.u is a list of images, one for each canonical covariate Sdif.um <- lapply(as.list(S.um), msubtract, z2=Sbar.u) ## Sdif.um is a list of lists of images. ## List of length ntypes, ## each entry being an imlist of length ncoef P.um <- mapply(mmultiply, Sdif.um, probvalues, SIMPLIFY=FALSE) ## P.um is same format as Sdif.um vc <- vcov(model) ncoef <- nrow(vc) VAR <- vector(mode="list", length=ntypes) for(m in 1:ntypes) { v <- 0 Pum <- P.um[[m]] for(i in 1:ncoef) { for(j in 1:ncoef) { v <- v + Pum[[i]] * vc[i,j] * Pum[[j]] } } VAR[[m]] <- v } names(VAR) <- types VAR <- as.solist(VAR) SE <- as.solist(lapply(VAR, sqrt)) } SEprobPoints <- function(model, probvalues) { ## probvalues is a matrix with row=location and column=type types <- colnames(probvalues) ntypes <- length(types) ## canonical covariates S.um <- modmats(model) S.um <- lapply(S.um, as.matrix) ## S.um is a list of matrices, one for each possible type, ## each matrix having rows=locations and cols=covariates ## Weight each matrix by its mark probabilities SW <- mapply("*", e1=S.um, e2=as.list(as.data.frame(probvalues)), SIMPLIFY=FALSE) ## average them Sbar.u <- Reduce("+", SW) ## Sbar.u is a matrix with rows=locations and cols=covariates Sdif.um <- lapply(S.um, "-", e2=Sbar.u) ## Sdif.um is a list of matrices like S.um P.um <- mapply("*", e1=Sdif.um, e2=as.list(as.data.frame(probvalues)), SIMPLIFY=FALSE) ## P.um likewise vc <- vcov(model) VAR <- lapply(P.um, quadform, v=vc) VAR <- do.call(cbind, VAR) SE <- sqrt(VAR) colnames(SE) <- types return(SE) } predictfortype <- function(type, model, types, loc) { predict(model, locations=loc %mark% factor(type, levels=types)) } killglitches <- function(z, eps=.Machine$double.eps) { ra <- range(z, finite=TRUE) if(max(abs(ra)) < eps) { z[] <- 0 return(z) } if(diff(ra) < eps) z[] <- mean(z, na.rm=TRUE) return(z) } relrisk.ppm }) spatstat/R/edgeRipley.R0000644000176200001440000001627113541331472014544 0ustar liggesusers# # edgeRipley.R # # $Revision: 1.18 $ $Date: 2019/09/21 05:19:19 $ # # Ripley isotropic edge correction weights # # edge.Ripley(X, r, W) compute isotropic correction weights # for centres X[i], radii r[i,j], window W # # To estimate the K-function see the idiom in "Kest.S" # ####################################################################### edge.Ripley <- local({ small <- function(x) { abs(x) < .Machine$double.eps } hang <- function(d, r) { nr <- nrow(r) nc <- ncol(r) answer <- matrix(0, nrow=nr, ncol=nc) # replicate d[i] over j index d <- matrix(d, nrow=nr, ncol=nc) hit <- (d < r) answer[hit] <- acos(d[hit]/r[hit]) answer } edge.Ripley <- function(X, r, W=Window(X), method=c("C", "interpreted"), maxweight=100, internal=list()) { # X is a point pattern, or equivalent X <- as.ppp(X, W) W <- X$window method <- match.arg(method) debug <- resolve.1.default(list(debug=FALSE), internal) repair <- resolve.1.default(list(repair=TRUE), internal) switch(W$type, rectangle={}, polygonal={ if(method != "C") stop(paste("Ripley isotropic correction for polygonal windows", "requires method = ", dQuote("C"))) }, mask={ stop(paste("sorry, Ripley isotropic correction", "is not implemented for binary masks")) } ) n <- npoints(X) if(is.matrix(r) && nrow(r) != n) stop("the number of rows of r should match the number of points in X") if(!is.matrix(r)) { if(length(r) != n) stop("length of r is incompatible with the number of points in X") r <- matrix(r, nrow=n) } # Nr <- nrow(r) Nc <- ncol(r) if(Nr * Nc == 0) return(r) ########## x <- X$x y <- X$y switch(method, interpreted = { ######## interpreted R code for rectangular case ######### # perpendicular distance from point to each edge of rectangle # L = left, R = right, D = down, U = up dL <- x - W$xrange[1L] dR <- W$xrange[2L] - x dD <- y - W$yrange[1L] dU <- W$yrange[2L] - y # detect whether any points are corners of the rectangle corner <- (small(dL) + small(dR) + small(dD) + small(dU) >= 2) # angle between (a) perpendicular to edge of rectangle # and (b) line from point to corner of rectangle bLU <- atan2(dU, dL) bLD <- atan2(dD, dL) bRU <- atan2(dU, dR) bRD <- atan2(dD, dR) bUL <- atan2(dL, dU) bUR <- atan2(dR, dU) bDL <- atan2(dL, dD) bDR <- atan2(dR, dD) # The above are all vectors [i] # Now we compute matrices [i,j] # half the angle subtended by the intersection between # the circle of radius r[i,j] centred on point i # and each edge of the rectangle (prolonged to an infinite line) aL <- hang(dL, r) aR <- hang(dR, r) aD <- hang(dD, r) aU <- hang(dU, r) # apply maxima # note: a* are matrices; b** are vectors; # b** are implicitly replicated over j index cL <- pmin.int(aL, bLU) + pmin.int(aL, bLD) cR <- pmin.int(aR, bRU) + pmin.int(aR, bRD) cU <- pmin.int(aU, bUL) + pmin.int(aU, bUR) cD <- pmin.int(aD, bDL) + pmin.int(aD, bDR) # total exterior angle ext <- cL + cR + cU + cD # add pi/2 for corners if(any(corner)) ext[corner,] <- ext[corner,] + pi/2 # OK, now compute weight weight <- 1 / (1 - ext/(2 * pi)) }, C = { ############ C code ############################# switch(W$type, rectangle={ z <- .C("ripleybox", nx=as.integer(n), x=as.double(x), y=as.double(y), rmat=as.double(r), nr=as.integer(Nc), #sic xmin=as.double(W$xrange[1L]), ymin=as.double(W$yrange[1L]), xmax=as.double(W$xrange[2L]), ymax=as.double(W$yrange[2L]), epsilon=as.double(.Machine$double.eps), out=as.double(numeric(Nr * Nc)), PACKAGE = "spatstat") weight <- matrix(z$out, nrow=Nr, ncol=Nc) }, polygonal={ Y <- edges(W) bd <- bdist.points(X) if(!debug) { z <- .C("ripleypoly", nc=as.integer(n), xc=as.double(x), yc=as.double(y), bd=as.double(bd), nr=as.integer(Nc), rmat=as.double(r), nseg=as.integer(Y$n), x0=as.double(Y$ends$x0), y0=as.double(Y$ends$y0), x1=as.double(Y$ends$x1), y1=as.double(Y$ends$y1), out=as.double(numeric(Nr * Nc)), PACKAGE = "spatstat") } else { z <- .C("rippolDebug", nc=as.integer(n), xc=as.double(x), yc=as.double(y), bd=as.double(bd), nr=as.integer(Nc), rmat=as.double(r), nseg=as.integer(Y$n), x0=as.double(Y$ends$x0), y0=as.double(Y$ends$y0), x1=as.double(Y$ends$x1), y1=as.double(Y$ends$y1), out=as.double(numeric(Nr * Nc)), PACKAGE = "spatstat") } angles <- matrix(z$out, nrow = Nr, ncol = Nc) weight <- 2 * pi/angles } ) } ) ## eliminate wild values if(repair) weight <- matrix(pmax.int(1, pmin.int(maxweight, weight)), nrow=Nr, ncol=Nc) return(weight) } edge.Ripley }) rmax.Ripley <- function(W) { W <- as.owin(W) if(is.rectangle(W)) return(boundingradius(W)) if(is.polygonal(W) && length(W$bdry) == 1L) return(boundingradius(W)) ## could have multiple connected components pieces <- tiles(tess(image=connected(W))) answer <- sapply(pieces, boundingradius) return(as.numeric(answer)) } spatstat/R/hierpair.family.R0000644000176200001440000003150013433151224015521 0ustar liggesusers# # # hierpair.family.R # # $Revision: 1.11 $ $Date: 2019/02/20 03:34:50 $ # # The family of hierarchical pairwise interactions # # # ------------------------------------------------------------------- # hierpair.family <- list( name = "hierpair", print = function(self) { splat("Hierarchical pairwise interaction family") }, plot = function(fint, ..., d=NULL, plotit=TRUE) { verifyclass(fint, "fii") inter <- fint$interaction if(is.null(inter) || is.null(inter$family) || inter$family$name != "hierpair") stop("Tried to plot the wrong kind of interaction") # get fitted coefficients of interaction terms # and set coefficients of offset terms to 1 Vnames <- fint$Vnames IsOffset <- fint$IsOffset coeff <- rep.int(1, length(Vnames)) names(coeff) <- Vnames coeff[!IsOffset] <- fint$coefs[Vnames[!IsOffset]] # pairpot <- inter$pot potpars <- inter$par rmax <- reach(fint, epsilon=1e-3) xlim <- list(...)$xlim if(is.infinite(rmax)) { if(!is.null(xlim)) rmax <- max(xlim) else { warning("Reach of interaction is infinite; need xlim to plot it") return(invisible(NULL)) } } if(is.null(d)) { dmax <- 1.25 * rmax d <- seq(from=0, to=dmax, length.out=1024) } else { stopifnot(is.numeric(d) && all(is.finite(d)) && all(diff(d) > 0)) dmax <- max(d) } if(is.null(xlim)) xlim <- c(0, dmax) types <- potpars$types if(is.null(types)) stop("Unable to determine types of points") if(!is.factor(types)) types <- factor(types, levels=types) ## compute each potential and store in `fasp' object m <- length(types) nd <- length(d) dd <- matrix(rep.int(d, m), nrow=nd * m, ncol=m) tx <- rep.int(types, rep.int(nd, m)) ty <- types p <- pairpot(dd, tx, ty, potpars) if(length(dim(p))==2) p <- array(p, dim=c(dim(p),1), dimnames=NULL) if(dim(p)[3L] != length(coeff)) stop("Dimensions of potential do not match coefficient vector") for(k in seq_len(dim(p)[3L])) p[,,k] <- multiply.only.finite.entries( p[,,k] , coeff[k] ) y <- exp(apply(p, c(1,2), sum)) ylim <- range(0, 1.1, y, finite=TRUE) fns <- vector(m^2, mode="list") which <- matrix(, m, m) for(i in seq_len(m)) { for(j in seq_len(m)) { ## relevant position in matrix ijpos <- i + (j-1L) * m which[i,j] <- ijpos ## extract values of potential yy <- y[tx == types[i], j] ## make fv object fns[[ijpos]] <- fv(data.frame(r=d, h=yy, one=1), "r", quote(h(r)), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "pairwise interaction term h(r)", "reference value 1")) } } funz <- fasp(fns, which=which, formulae=list(cbind(h, one) ~ r), title="Fitted pairwise interactions", rowNames=paste(types), colNames=paste(types)) if(plotit) do.call(plot.fasp, resolve.defaults(list(funz), list(...), list(ylim=ylim, ylab="Pairwise interaction", xlab="Distance"))) return(invisible(funz)) }, # end of function `plot' # ---------------------------------------------------- eval = function(X,U,EqualPairs,pairpot,potpars,correction, ..., Reach=NULL, precomputed=NULL, savecomputed=FALSE, pot.only=FALSE) { ## ## This is the eval function for the `hierpair' family. ## fop <- names(formals(pairpot)) if(isTRUE(all.equal(fop, c("d", "par")))) marx <- FALSE else if(isTRUE(all.equal(fop, c("d", "tx", "tu", "par")))) marx <- TRUE else stop("Formal arguments of pair potential function are not understood") ## edge correction argument if(length(correction) > 1) stop("Only one edge correction allowed at a time!") if(!any(correction == c("periodic", "border", "translate", "translation", "isotropic", "Ripley", "none"))) stop(paste("Unrecognised edge correction", sQuote(correction))) no.correction <- #### Compute basic data # Decide whether to apply faster algorithm using 'closepairs' use.closepairs <- FALSE && (correction %in% c("none", "border", "translate", "translation")) && !is.null(Reach) && is.finite(Reach) && is.null(precomputed) && !savecomputed if(!is.null(precomputed)) { # precomputed X <- precomputed$X U <- precomputed$U EqualPairs <- precomputed$E M <- precomputed$M } else { U <- as.ppp(U, X$window) # i.e. X$window is DEFAULT window if(!use.closepairs) # Form the matrix of distances M <- crossdist(X, U, periodic=(correction=="periodic")) } nX <- npoints(X) nU <- npoints(U) dimM <- c(nX, nU) # Evaluate the pairwise potential without edge correction if(use.closepairs) POT <- evalPairPotential(X,U,EqualPairs,pairpot,potpars,Reach) else if(!marx) POT <- pairpot(M, potpars) else POT <- pairpot(M, marks(X), marks(U), potpars) # Determine whether each column of potential is an offset IsOffset <- attr(POT, "IsOffset") # Check errors and special cases if(!is.matrix(POT) && !is.array(POT)) { if(length(POT) == 0 && X$n == 0) # empty pattern POT <- array(POT, dim=c(dimM,1)) else stop("Pair potential did not return a matrix or array") } if(length(dim(POT)) == 1 || any(dim(POT)[1:2] != dimM)) { whinge <- paste0( "The pair potential function ",short.deparse(substitute(pairpot)), " must produce a matrix or array with its first two dimensions\n", "the same as the dimensions of its input.\n") stop(whinge) } # make it a 3D array if(length(dim(POT))==2) POT <- array(POT, dim=c(dim(POT),1), dimnames=NULL) if(correction == "translate" || correction == "translation") { edgewt <- edge.Trans(X, U) # sanity check ("everybody knows there ain't no...") if(!is.matrix(edgewt)) stop("internal error: edge.Trans() did not yield a matrix") if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x)) stop("internal error: edge weights matrix returned by edge.Trans() has wrong dimensions") POT <- c(edgewt) * POT } else if(correction == "isotropic" || correction == "Ripley") { # weights are required for contributions from QUADRATURE points edgewt <- t(edge.Ripley(U, t(M), X$window)) if(!is.matrix(edgewt)) stop("internal error: edge.Ripley() did not return a matrix") if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x)) stop("internal error: edge weights matrix returned by edge.Ripley() has wrong dimensions") POT <- c(edgewt) * POT } # No pair potential term between a point and itself if(length(EqualPairs) > 0) { nplanes <- dim(POT)[3L] for(k in 1:nplanes) POT[cbind(EqualPairs, k)] <- 0 } # Return just the pair potential? if(pot.only) return(POT) # Sum the pairwise potentials V <- apply(POT, c(2,3), sum) # attach the original pair potentials attr(V, "POT") <- POT # attach the offset identifier attr(V, "IsOffset") <- IsOffset # pass computed information out the back door if(savecomputed) attr(V, "computed") <- list(E=EqualPairs, M=M) return(V) }, ######### end of function $eval suffstat = function(model, X=NULL, callstring="hierpair.family$suffstat") { # for hierarchical pairwise models only (possibly nonstationary) verifyclass(model, "ppm") if(!identical(model$interaction$family$name,"hierpair")) stop("Model is not a hierarchical pairwise interaction process") if(is.null(X)) { X <- data.ppm(model) modelX <- model } else { verifyclass(X, "ppp") modelX <- update(model, X, method="mpl") } # find data points which do not contribute to pseudolikelihood mplsubset <- getglmdata(modelX)$.mpl.SUBSET mpldata <- is.data(quad.ppm(modelX)) contribute <- mplsubset[mpldata] Xin <- X[contribute] Xout <- X[!contribute] # partial model matrix arising from ordered pairs of data points # which both contribute to the pseudolikelihood Empty <- X[integer(0)] momINxIN <- partialModelMatrix(Xin, Empty, model, "suffstat") # partial model matrix at data points which contribute to the pseudolikelihood momIN <- partialModelMatrix(X, Empty, model, "suffstat")[contribute, , drop=FALSE] # partial model matrix arising from ordered pairs of data points # the second of which does not contribute to the pseudolikelihood mom <- partialModelMatrix(Xout, Xin, model, "suffstat") indx <- Xout$n + seq_len(Xin$n) momINxOUT <- mom[indx, , drop=FALSE] ## determine which canonical covariates are true second-order terms ## eg 'mark1x1' typ <- levels(marks(X)) vn <- paste0("mark", typ, "x", typ) order2 <- names(coef(model)) %in% vn order1 <- !order2 result <- 0 * coef(model) if(any(order1)) { # first order contributions (including 'mark1x2' etc) o1terms <- momIN[ , order1, drop=FALSE] o1sum <- colSums(o1terms) result[order1] <- o1sum } if(any(order2)) { # adjust for double counting of ordered pairs in INxIN but not INxOUT o2termsINxIN <- momINxIN[, order2, drop=FALSE] o2termsINxOUT <- momINxOUT[, order2, drop=FALSE] o2sum <- colSums(o2termsINxIN)/2 + colSums(o2termsINxOUT) result[order2] <- o2sum } return(result) }, ######### end of function $suffstat delta2 = function(X, inte, correction, ...) { # Sufficient statistic for second order conditional intensity # for hierarchical pairwise interaction processes # Equivalent to evaluating pair potential. if(is.ppp(X)) { seqX <- seq_len(npoints(X)) E <- cbind(seqX, seqX) R <- reach(inte) POT <- hierpair.family$eval(X,X,E, inte$pot,inte$par, correction, pot.only=TRUE, Reach=R, splitInf=TRUE) result <- aperm(POT, c(2,1,3)) M <- attr(POT, "IsNegInf") if(!is.null(M)) { #' validate if(length(dim(M)) != 3) stop("Internal error: IsNegInf is not a 3D array") M <- aperm(M, c(2,1,3)) #' collapse vector-valued potential, yielding a matrix M <- apply(M, c(1,2), any) if(!is.matrix(M)) M <- matrix(M, nrow=nX) #' count conflicts hits <- colSums(M) #' hits[j] == 1 implies that X[j] violates hard core with only one X[i] #' and therefore changes status if X[i] is deleted. deltaInf <- M deltaInf[, hits != 1] <- FALSE attr(result, "deltaInf") <- deltaInf } } else if(is.quad(X)) { U <- union.quad(X) izdat <- is.data(X) nU <- npoints(U) nX <- npoints(X$data) seqU <- seq_len(nU) E <- cbind(seqU, seqU) R <- reach(inte) POT <- hierpair.family$eval(U,U,E, inte$pot,inte$par, correction, pot.only=TRUE, Reach=R, splitInf=TRUE) result <- aperm(POT, c(2,1,3)) M <- attr(POT, "IsNegInf") if(!is.null(M)) { #' validate if(length(dim(M)) != 3) stop("Internal error: IsNegInf is not a 3D array") M <- aperm(M, c(2,1,3)) #' consider conflicts with data points MXU <- M[izdat, , , drop=FALSE] #' collapse vector-valued potential, yielding a matrix MXU <- apply(MXU, c(1,2), any) if(!is.matrix(MXU)) MXU <- matrix(MXU, nrow=nX) #' count data points conflicting with each quadrature point nhitdata <- colSums(MXU) #' for a conflicting pair U[i], U[j], #' status of U[j] will change when U[i] is added/deleted #' iff EITHER #' U[i] = X[i] is a data point and #' U[j] is only in conflict with X[i], deltaInf <- apply(M, c(1,2), any) deltaInf[izdat, nhitdata != 1] <- FALSE #' OR #' U[i] is a dummy point, #' U[j] has no conflicts with X. deltaInf[!izdat, nhitdata != 0] <- FALSE attr(result, "deltaInf") <- deltaInf } } return(result) } ######### end of function $delta2 ) ######### end of list class(hierpair.family) <- "isf" spatstat/R/weightedStats.R0000644000176200001440000000477413333543255015302 0ustar liggesusers#' #' weightedStats.R #' #' weighted versions of hist, var, median, quantile #' #' $Revision: 1.3 $ $Date: 2017/06/05 10:31:58 $ #' #' #' whist weighted histogram #' whist <- function(x, breaks, weights=NULL) { N <- length(breaks) if(length(x) == 0) h <- numeric(N+1) else { # classify data into histogram cells (breaks need not span range of data) cell <- findInterval(x, breaks, rightmost.closed=TRUE) # values of 'cell' range from 0 to N. nb <- N + 1L if(is.null(weights)) { ## histogram h <- tabulate(cell+1L, nbins=nb) } else { ## weighted histogram if(!spatstat.options("Cwhist")) { cell <- factor(cell, levels=0:N) h <- unlist(lapply(split(weights, cell), sum, na.rm=TRUE)) } else { h <- .Call("Cwhist", as.integer(cell), as.double(weights), as.integer(nb), PACKAGE = "spatstat") } } } h <- as.numeric(h) y <- h[2:N] attr(y, "low") <- h[1] attr(y, "high") <- h[N+1] return(y) } #' wrapper for computing weighted variance of a vector #' Note: this includes a factor 1 - sum(v^2) in the denominator #' where v = w/sum(w). See help(cov.wt) weighted.var <- function(x, w, na.rm=TRUE) { bad <- is.na(w) | is.na(x) if(any(bad)) { if(!na.rm) return(NA_real_) ok <- !bad x <- x[ok] w <- w[ok] } cov.wt(matrix(x, ncol=1),w)$cov[] } #' weighted median weighted.median <- function(x, w, na.rm=TRUE) { unname(weighted.quantile(x, probs=0.5, w=w, na.rm=na.rm)) } #' weighted quantile weighted.quantile <- function(x, w, probs=seq(0,1,0.25), na.rm=TRUE) { x <- as.numeric(as.vector(x)) w <- as.numeric(as.vector(w)) if(anyNA(x) || anyNA(w)) { ok <- !(is.na(x) | is.na(w)) x <- x[ok] w <- w[ok] } stopifnot(all(w >= 0)) if(all(w == 0)) stop("All weights are zero", call.=FALSE) #' oo <- order(x) x <- x[oo] w <- w[oo] Fx <- cumsum(w)/sum(w) #' result <- numeric(length(probs)) for(i in seq_along(result)) { p <- probs[i] lefties <- which(Fx <= p) if(length(lefties) == 0) { result[i] <- x[1] } else { left <- max(lefties) result[i] <- x[left] if(Fx[left] < p && left < length(x)) { right <- left+1 y <- x[left] + (x[right]-x[left]) * (p-Fx[left])/(Fx[right]-Fx[left]) if(is.finite(y)) result[i] <- y } } } names(result) <- paste0(format(100 * probs, trim = TRUE), "%") return(result) } spatstat/R/Kmodel.R0000644000176200001440000000032413333543254013660 0ustar liggesusers# # Kmodel.R # # Kmodel and pcfmodel # # $Revision: 1.1 $ $Date: 2011/05/30 14:02:21 $ # Kmodel <- function(model, ...) { UseMethod("Kmodel") } pcfmodel <- function(model, ...) { UseMethod("pcfmodel") } spatstat/R/hybrid.family.R0000644000176200001440000001563613333543255015223 0ustar liggesusers# # hybrid.family.R # # $Revision: 1.13 $ $Date: 2018/03/15 08:47:20 $ # # Hybrid interactions # # hybrid.family: object of class 'isf' defining pairwise interaction # # ------------------------------------------------------------------- # hybrid.family <- list( name = "hybrid", print = function(self) { cat("Hybrid interaction family\n") }, plot = function(fint, ..., d=NULL, plotit=TRUE, separate=FALSE) { # plot hybrid interaction if possible verifyclass(fint, "fii") inter <- fint$interaction if(is.null(inter) || is.null(inter$family) || inter$family$name != "hybrid") stop("Tried to plot the wrong kind of interaction") if(is.null(d)) { # compute reach and determine max distance for plots dmax <- 1.25 * reach(inter) if(!is.finite(dmax)) { # interaction has infinite reach # Are plot limits specified? xlim <- resolve.defaults(list(...), list(xlim=c(0, Inf))) if(all(is.finite(xlim))) dmax <- max(xlim) else stop("Interaction has infinite reach; need to specify xlim or d") } d <- seq(0, dmax, length=256) } # get fitted coefficients of interaction terms # and set coefficients of offset terms to 1 Vnames <- fint$Vnames IsOffset <- fint$IsOffset coeff <- rep.int(1, length(Vnames)) names(coeff) <- Vnames coeff[!IsOffset] <- fint$coefs[Vnames[!IsOffset]] # extract the component interactions interlist <- inter$par # check that they are all pairwise interactions families <- unlist(lapply(interlist, interactionfamilyname)) if(!separate && !all(families == "pairwise")) { warning(paste("Cannot compute the resultant function;", "not all components are pairwise interactions;", "plotting each component separately")) separate <- TRUE } # deal with each interaction ninter <- length(interlist) results <- list() for(i in 1:ninter) { interI <- interlist[[i]] nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") # find coefficients with prefix that exactly matches nameI. prefixlength <- nchar(nameI.) Vprefix <- substr(Vnames, 1, prefixlength) relevant <- (Vprefix == nameI.) # construct fii object for this component fitinI <- fii(interI, coeff[relevant], Vnames[relevant], IsOffset[relevant]) # convert to fv object a <- plot(fitinI, ..., d=d, plotit=FALSE) aa <- list(a) names(aa) <- nameI results <- append(results, aa) } # computation of resultant is only implemented for fv objects if(!separate && !all(unlist(lapply(results, is.fv)))) { warning(paste("Cannot compute the resultant function;", "not all interaction components yielded an fv object;", "plotting separate results for each component")) separate <- TRUE } # return separate 'fv' or 'fasp' objects if required results <- as.anylist(results) if(separate) { if(plotit) { main0 <- "Pairwise interaction components" do.call(plot, resolve.defaults(list(results), list(...), list(main=main0))) } return(invisible(results)) } # multiply together to obtain resultant pairwise interaction ans <- results[[1L]] if(ninter >= 2) { for(i in 2:ninter) { Fi <- results[[i]] ans <- eval.fv(ans * Fi) } copyover <- c("ylab", "yexp", "labl", "desc", "fname") attributes(ans)[copyover] <- attributes(results[[1L]])[copyover] } main0 <- "Resultant pairwise interaction" if(plotit) do.call(plot, resolve.defaults(list(ans), list(...), list(main=main0))) return(invisible(ans)) }, eval = function(X,U,EqualPairs,pot,pars,correction, ...) { # `pot' is ignored; `pars' is a list of interactions nU <- length(U$x) V <- matrix(, nU, 0) IsOffset <- logical(0) for(i in 1:length(pars)) { # extract i-th component interaction interI <- pars[[i]] nameI <- names(pars)[[i]] # compute potential for i-th component VI <- evalInteraction(X, U, EqualPairs, interI, correction, ...) if(ncol(VI) > 0) { if(ncol(VI) > 1 && is.null(colnames(VI))) # make up names colnames(VI) <- paste("Interaction", seq(ncol(VI)), sep=".") # prefix label with name of i-th component colnames(VI) <- paste(nameI, dimnames(VI)[[2L]], sep=".") # handle IsOffset offI <- attr(VI, "IsOffset") if(is.null(offI)) offI <- rep.int(FALSE, ncol(VI)) # tack on IsOffset <- c(IsOffset, offI) # append to matrix V V <- cbind(V, VI) } } if(any(IsOffset)) attr(V, "IsOffset") <- IsOffset return(V) }, delta2 = function(X, inte, correction, ..., sparseOK=FALSE) { ## Sufficient statistic for second order conditional intensity result <- NULL deltaInf <- FALSE interlist <- inte$par for(ii in interlist) { v <- NULL ## look for 'delta2' in component interaction 'ii' if(!is.null(delta2 <- ii$delta2) && is.function(delta2)) v <- delta2(X, ii, correction, sparseOK=sparseOK) ## look for 'delta2' in family of component 'ii' if(is.null(v) && !is.null(delta2 <- ii$family$delta2) && is.function(delta2)) v <- delta2(X, ii, correction, sparseOK=sparseOK) if(is.null(v)) { ## no special algorithm available: generic algorithm needed return(NULL) } if(is.null(result)) { result <- v } else if(inherits(v, c("sparse3Darray", "sparseMatrix"))) { result <- bind.sparse3Darray(result, v, along=3) } else { result <- abind::abind(as.array(result), v, along=3) } deltaInf <- deltaInf | (attr(v, "deltaInf") %orifnull% FALSE) } if(length(dim(deltaInf))) attr(result, "deltaInf") <- deltaInf return(result) }, suffstat = NULL ) class(hybrid.family) <- "isf" spatstat/R/cdftest.R0000644000176200001440000003642013602557473014116 0ustar liggesusers# # cdftest.R # # $Revision: 2.21 $ $Date: 2019/12/31 05:16:01 $ # # cdf.test <- function(...) { UseMethod("cdf.test") } cdf.test.ppp <- function(X, covariate, test=c("ks", "cvm", "ad"), ..., interpolate=TRUE, jitter=TRUE) { Xname <- short.deparse(substitute(X)) covname <- singlestring(short.deparse(substitute(covariate))) test <- match.arg(test) if(is.character(covariate)) covname <- covariate if(!is.marked(X, dfok=TRUE)) { # unmarked model <- ppm(X) modelname <- "CSR" } else if(is.multitype(X)) { # multitype mf <- summary(X)$marks$frequency if(all(mf > 0)) { model <- ppm(X ~marks) modelname <- "CSRI" } else { warning("Ignoring marks, because some mark values have zero frequency") X <- unmark(X) model <- ppm(X) modelname <- "CSR" } } else { # marked - general case X <- unmark(X) warning("marks ignored") model <- ppm(X) modelname <- "CSR" } do.call(spatialCDFtest, resolve.defaults(list(model, covariate, test=test), list(interpolate=interpolate, jitter=jitter), list(...), list(modelname=modelname, covname=covname, dataname=Xname))) } cdf.test.ppm <- function(model, covariate, test=c("ks", "cvm", "ad"), ..., interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE) { modelname <- short.deparse(substitute(model)) covname <- singlestring(short.deparse(substitute(covariate))) test <- match.arg(test) verifyclass(model, "ppm") if(is.character(covariate)) covname <- covariate if(is.poisson(model) && is.stationary(model)) modelname <- "CSR" do.call(spatialCDFtest, resolve.defaults(list(model, covariate, test=test), list(interpolate=interpolate, jitter=jitter, nsim=nsim, verbose=verbose), list(...), list(modelname=modelname, covname=covname))) } cdf.test.lpp <- function(X, covariate, test=c("ks", "cvm", "ad"), ..., interpolate=TRUE, jitter=TRUE) { Xname <- short.deparse(substitute(X)) covname <- singlestring(short.deparse(substitute(covariate))) test <- match.arg(test) if(is.character(covariate)) covname <- covariate if(!is.marked(X, dfok=TRUE)) { # unmarked model <- lppm(X) modelname <- "CSR" } else if(is.multitype(X)) { # multitype mf <- table(marks(X)) if(all(mf > 0)) { model <- lppm(X ~ marks) modelname <- "CSRI" } else { warning("Ignoring marks, because some mark values have zero frequency") X <- unmark(X) model <- lppm(X) modelname <- "CSR" } } else { # marked - general case X <- unmark(X) warning("marks ignored") model <- lppm(X) modelname <- "CSR" } do.call(spatialCDFtest, resolve.defaults(list(model, covariate, test=test), list(interpolate=interpolate, jitter=jitter), list(...), list(modelname=modelname, covname=covname, dataname=Xname))) } cdf.test.lppm <- function(model, covariate, test=c("ks", "cvm", "ad"), ..., interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE) { modelname <- short.deparse(substitute(model)) covname <- singlestring(short.deparse(substitute(covariate))) test <- match.arg(test) verifyclass(model, "lppm") if(is.character(covariate)) covname <- covariate if(is.poisson(model) && is.stationary(model)) modelname <- "CSR" do.call(spatialCDFtest, resolve.defaults(list(model, covariate, test=test), list(interpolate=interpolate, jitter=jitter, nsim=nsim, verbose=verbose), list(...), list(modelname=modelname, covname=covname))) } cdf.test.slrm <- function(model, covariate, test=c("ks", "cvm", "ad"), ..., modelname=NULL, covname=NULL) { # get names if(is.null(modelname)) modelname <- short.deparse(substitute(model)) if(is.null(covname)) covname <- short.deparse(substitute(covariate)) dataname <- model$CallInfo$responsename test <- match.arg(test) # stopifnot(is.slrm(model)) stopifnot(is.im(covariate)) # extract data prob <- fitted(model) covim <- as.im(covariate, W=as.owin(prob)) probvalu <- as.matrix(prob) covvalu <- as.matrix(covim) ok <- !is.na(probvalu) & !is.na(covvalu) probvalu <- as.vector(probvalu[ok]) covvalu <- as.vector(covvalu[ok]) # compile weighted cdf's FZ <- ewcdf(covvalu, probvalu/sum(probvalu)) X <- model$Data$response ZX <- safelookup(covim, X) # Ensure support of cdf includes the range of the data xxx <- knots(FZ) yyy <- FZ(xxx) if(min(xxx) > min(ZX)) { xxx <- c(min(ZX), xxx) yyy <- c(0, yyy) } if(max(xxx) < max(ZX)) { xxx <- c(xxx, max(ZX)) yyy <- c(yyy, 1) } # make piecewise linear approximation of cdf FZ <- approxfun(xxx, yyy, rule=2) # now apply cdf U <- FZ(ZX) # Test uniformity of transformed values result <- switch(test, ks = ks.test(U, "punif", ...), cvm = cvm.test(U, "punif", ...), ad = ad.test(U, "punif", ...)) testname <- switch(test, ks="Kolmogorov-Smirnov", cvm="Cramer-Von Mises", ad="Anderson-Darling") # modify the 'htest' entries result$method <- paste("Spatial", testname, "test of", "inhomogeneous Poisson process", "in two dimensions") result$data.name <- paste("covariate", sQuote(paste(covname, collapse="")), "evaluated at points of", sQuote(dataname), "\n and transformed to uniform distribution under", sQuote(modelname)) # additional class 'cdftest' class(result) <- c("cdftest", class(result)) attr(result, "prep") <- list(Zvalues=covvalu, ZX=ZX, FZ=FZ, FZX=ecdf(ZX), U=U) attr(result, "info") <- list(modelname=modelname, covname=covname, dataname=dataname, csr=FALSE) return(result) } #............. helper functions ........................# spatialCDFtest <- function(model, covariate, test=c("ks", "cvm", "ad"), ..., dimyx=NULL, eps=NULL, interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE, modelname=NULL, covname=NULL, dataname=NULL) { ## conduct test based on comparison of CDF's of covariate values test <- match.arg(test) ## compute the essential data fra <- spatialCDFframe(model, covariate, dimyx=dimyx, eps=eps, interpolate=interpolate, jitter=jitter, modelname=modelname, covname=covname, dataname=dataname) ## calculate the test statistic result <- spatialCDFtestCalc(fra, test=test, ...) if(is.poisson(model)) return(result) ## Gibbs model: perform Monte Carlo test result$poisson.p.value <- pobs <- result$p.value result$poisson.statistic <- tobs <- result$statistic Xsim <- simulate(model, nsim=nsim, progress=verbose) sim.pvals <- sim.stats <- numeric(nsim) if(verbose) { cat("Processing.. ") state <- list() } for(i in seq_len(nsim)) { model.i <- update(model, Xsim[[i]]) fra.i <- spatialCDFframe(model.i, covariate, dimyx=dimyx, eps=eps, interpolate=interpolate, jitter=jitter, modelname=modelname, covname=covname, dataname=dataname) res.i <- spatialCDFtestCalc(fra.i, test=test, ..., details=FALSE) sim.pvals[i] <- res.i$p.value sim.stats[i] <- res.i$statistic if(verbose) state <- progressreport(i, nsim, state=state) } if(verbose) cat("Done.\n") result$sim.pvals <- sim.pvals result$sim.stats <- sim.stats ## Monte Carlo p-value ## For tied p-values, first compare values of test statistics ## (because p = 0 may occur due to rounding) ## otherwise resolve ties by randomisation nless <- sum(sim.pvals < pobs) nplus <- sum(sim.pvals == pobs & sim.stats > tobs) nties <- sum(sim.pvals == pobs & sim.stats == tobs) result$p.value <- (nless + nplus + sample(0:nties, 1L))/(nsim+1L) ## modify the 'htest' entries testname <- switch(test, ks="Kolmogorov-Smirnov", cvm="Cramer-Von Mises", ad="Anderson-Darling") result$method <- paste("Monte Carlo spatial", testname, "test", "of Gibbs process in", fra$info$spacename) return(result) } spatialCDFtestCalc <- function(fra, test=c("ks", "cvm", "ad"), ..., details=TRUE) { test <- match.arg(test) values <- fra$values info <- fra$info ## Test uniformity of transformed values U <- values$U result <- switch(test, ks = ks.test(U, "punif", ...), cvm = cvm.test(U, "punif", ...), ad = ad.test(U, "punif", ...)) # shortcut for internal use only if(!details) return(result) ## add a full explanation, internal data, etc. ## modify the 'htest' entries csr <- info$csr ispois <- info$ispois modelname <- if(csr) "CSR" else if(ispois) "inhomogeneous Poisson process" else "Gibbs process" testname <- switch(test, ks="Kolmogorov-Smirnov", cvm="Cramer-Von Mises", ad="Anderson-Darling") result$method <- paste("Spatial", testname, "test of", modelname, "in", info$spacename) result$data.name <- paste("covariate", sQuote(singlestring(info$covname)), "evaluated at points of", sQuote(info$dataname), "\n and transformed to uniform distribution under", if(csr) info$modelname else sQuote(info$modelname)) ## include internal data attr(result, "frame") <- fra ## additional class 'cdftest' class(result) <- c("cdftest", class(result)) return(result) } spatialCDFframe <- function(model, covariate, ..., jitter=TRUE) { # evaluate CDF of covariate values at data points and at pixels stuff <- evalCovar(model, covariate, ..., jitter=jitter) # extract values <- stuff$values # info <- stuff$info Zvalues <- values$Zvalues lambda <- values$lambda weights <- values$weights ZX <- values$ZX # compute empirical cdf of Z values at points of X FZX <- ecdf(ZX) # form weighted cdf of Z values in window wts <- lambda * weights sumwts <- sum(wts) FZ <- ewcdf(Zvalues, wts/sumwts) # Ensure support of cdf includes the range of the data xxx <- knots(FZ) yyy <- FZ(xxx) minZX <- min(ZX, na.rm=TRUE) minxxx <- min(xxx, na.rm=TRUE) if(minxxx > minZX) { xxx <- c(minZX, xxx) yyy <- c(0, yyy) } maxZX <- max(ZX, na.rm=TRUE) maxxxx <- max(xxx, na.rm=TRUE) if(maxxxx < maxZX) { xxx <- c(xxx, maxZX) yyy <- c(yyy, 1) } # make piecewise linear approximation of cdf FZ <- approxfun(xxx, yyy, rule=2) # now apply cdf U <- FZ(ZX) if(jitter) { ## Z values have already been jittered, but this does not guarantee ## that U values are distinct nU <- length(U) U <- U + runif(nU, -1, 1)/max(100, 2*nU) U <- pmax(0, pmin(1, U)) } # pack up stuff$values$FZ <- FZ stuff$values$FZX <- FZX stuff$values$U <- U stuff$values$EN <- sumwts ## integral of intensity = expected number of pts class(stuff) <- "spatialCDFframe" return(stuff) } plot.cdftest <- function(x, ..., style=c("cdf", "PP", "QQ"), lwd=par("lwd"), col=par("col"), lty=par("lty"), lwd0=lwd, col0=2, lty0=2, do.legend=TRUE) { style <- match.arg(style) fram <- attr(x, "frame") if(!is.null(fram)) { values <- fram$values info <- fram$info } else { # old style values <- attr(x, "prep") info <- attr(x, "info") } # cdf of covariate Z over window FZ <- values$FZ # cdf of covariate values at data points FZX <- values$FZX # blurb covname <- info$covname covdescrip <- switch(covname, x="x coordinate", y="y coordinate", paste("covariate", dQuote(covname))) # plot it switch(style, cdf={ # plot both cdf's superimposed qZ <- get("x", environment(FZ)) pZ <- get("y", environment(FZ)) main <- c(x$method, paste("based on distribution of", covdescrip), paste("p-value=", signif(x$p.value, 4))) do.call(plot.default, resolve.defaults( list(x=qZ, y=pZ, type="l"), list(...), list(lwd=lwd0, col=col0, lty=lty0), list(xlab=info$covname, ylab="probability", main=main))) plot(FZX, add=TRUE, do.points=FALSE, lwd=lwd, col=col, lty=lty) if(do.legend) legend("topleft", c("observed", "expected"), lwd=c(lwd,lwd0), col=c(col2hex(col), col2hex(col0)), lty=c(lty2char(lty),lty2char(lty0))) }, PP={ # plot FZX o (FZ)^{-1} pX <- get("y", environment(FZX)) qX <- get("x", environment(FZX)) p0 <- FZ(qX) do.call(plot.default, resolve.defaults( list(x=p0, y=pX), list(...), list(col=col), list(xlim=c(0,1), ylim=c(0,1), xlab="Theoretical probability", ylab="Observed probability", main=""))) abline(0,1, lwd=lwd0, col=col0, lty=lty0) }, QQ={ # plot (FZX)^{-1} o FZ pZ <- get("y", environment(FZ)) qZ <- get("x", environment(FZ)) FZinverse <- approxfun(pZ, qZ, rule=2) pX <- get("y", environment(FZX)) qX <- get("x", environment(FZX)) qZX <- FZinverse(pX) Zrange <- range(qZ, qX, qZX) xlab <- paste("Theoretical quantile of", covname) ylab <- paste("Observed quantile of", covname) do.call(plot.default, resolve.defaults( list(x=qZX, y=qX), list(...), list(col=col), list(xlim=Zrange, ylim=Zrange, xlab=xlab, ylab=ylab, main=""))) abline(0,1, lwd=lwd0, col=col0, lty=lty0) }) return(invisible(NULL)) } spatstat/R/hierarchy.R0000644000176200001440000000257413333543255014435 0ustar liggesusers## hierarchy.R ## ## Support functions for hierarchical interactions ## ## $Revision: 1.1 $ $Date: 2015/05/26 08:39:56 $ hierarchicalordering <- function(i, s) { s <- as.character(s) if(inherits(i, "hierarchicalordering")) { ## already a hierarchical ordering if(length(s) != length(i$labels)) stop("Tried to change the number of types in the hierarchical order") i$labels <- s return(i) } n <- length(s) possible <- if(is.character(i)) s else seq_len(n) j <- match(i, possible) if(any(uhoh <- is.na(j))) stop(paste("Unrecognised", ngettext(sum(uhoh), "level", "levels"), commasep(sQuote(i[uhoh])), "amongst possible levels", commasep(sQuote(s)))) if(length(j) < n) stop("Ordering is incomplete") ord <- order(j) m <- matrix(, n, n) rel <- matrix(ord[row(m)] <= ord[col(m)], n, n) dimnames(rel) <- list(s, s) x <- list(indices=j, ordering=ord, labels=s, relation=rel) class(x) <- "hierarchicalordering" x } print.hierarchicalordering <- function(x, ...) { splat(x$labels[x$indices], collapse=" ~> ") invisible(NULL) } hiermat <- function (x, h) { stopifnot(is.matrix(x)) isna <- is.na(x) x[] <- as.character(x) x[isna] <- "" if(inherits(h, "hierarchicalordering")) ## allows h to be NULL, etc x[!(h$relation)] <- "" return(noquote(x)) } spatstat/R/pairpiece.R0000644000176200001440000001046413333543255014415 0ustar liggesusers# # # pairpiece.S # # $Revision: 1.23 $ $Date: 2018/03/15 07:37:41 $ # # A pairwise interaction process with piecewise constant potential # # PairPiece() create an instance of the process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # PairPiece <- local({ # .... auxiliary functions ........ delP <- function(i, r) { r <- r[-i] nr <- length(r) if(nr == 0) return(Poisson()) if(nr == 1) return(Strauss(r)) return(PairPiece(r)) } # ..... template .......... BlankPairPiece <- list( name = "Piecewise constant pairwise interaction process", creator = "PairPiece", family = "pairwise.family", # evaluated later pot = function(d, par) { r <- par$r nr <- length(r) out <- array(FALSE, dim=c(dim(d), nr)) out[,,1] <- (d < r[1]) if(nr > 1) { for(i in 2:nr) out[,,i] <- (d >= r[i-1]) & (d < r[i]) } out }, par = list(r = NULL), # filled in later parnames = "interaction thresholds", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || !all(r > 0)) stop("interaction thresholds r must be positive numbers") if(length(r) > 1 && !all(diff(r) > 0)) stop("interaction thresholds r must be strictly increasing") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { r <- self$par$r npiece <- length(r) # extract coefficients gammas <- exp(as.numeric(coeffs)) # name them gn <- gammas names(gn) <- paste("[", c(0,r[-npiece]),",", r, ")", sep="") # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_i", printable=dround(gn))) }, valid = function(coeffs, self) { # interaction parameters gamma gamma <- (self$interpret)(coeffs, self)$param$gammas if(!all(is.finite(gamma))) return(FALSE) return(all(gamma <= 1) || gamma[1] == 0) }, project = function(coeffs, self){ # interaction parameters gamma gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction thresholds r[i] r <- self$par$r # check for NA or Inf bad <- !is.finite(gamma) # gamma > 1 forbidden unless hard core ishard <- is.finite(gamma[1]) && (gamma[1] == 0) if(!ishard) bad <- bad | (gamma > 1) if(!any(bad)) return(NULL) if(spatstat.options("project.fast") || sum(bad) == 1) { # remove smallest threshold with an unidentifiable parameter firstbad <- min(which(bad)) return(delP(firstbad, r)) } else { # consider all candidate submodels subs <- lapply(which(bad), delP, r=r) return(subs) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(all(is.na(coeffs))) return(max(r)) gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- (abs(log(gamma)) > epsilon) if(!any(active)) return(0) else return(max(r[active])) }, Mayer=function(coeffs, self) { # second Mayer cluster integral r <- self$par$r gamma <- (self$interpret)(coeffs, self)$param$gammas # areas of annuli between r[i-1], r[i] areas <- pi * diff(c(0,r)^2) return(sum(areas * (1-gamma))) }, version=NULL # filled in later ) class(BlankPairPiece) <- "interact" PairPiece <- function(r) { instantiate.interact(BlankPairPiece, list(r=r)) } PairPiece <- intermaker(PairPiece, BlankPairPiece) PairPiece }) spatstat/R/simulatelppm.R0000644000176200001440000000157213333543255015170 0ustar liggesusers## ## simulatelppm.R ## ## Simulation of lppm objects ## ## $Revision: 1.7 $ $Date: 2018/05/12 16:14:05 $ ## simulate.lppm <- function(object, nsim=1, ..., new.coef=NULL, progress=(nsim > 1), drop=FALSE) { starttime <- proc.time() if(!is.poisson(object$fit)) stop("Simulation of non-Poisson models is not yet implemented") lambda <- predict(object, ..., new.coef=new.coef) lmax <- if(is.im(lambda)) max(lambda) else unlist(lapply(lambda, max)) L <- as.linnet(object) result <- vector(mode="list", length=nsim) pstate <- list() for(i in seq_len(nsim)) { if(progress) pstate <- progressreport(i, nsim, state=pstate) result[[i]] <- rpoislpp(lambda, L, lmax=lmax) } result <- simulationresult(result, nsim, drop) result <- timed(result, starttime=starttime) return(result) } spatstat/R/alltypes.R0000644000176200001440000001613413443662267014320 0ustar liggesusers# # alltypes.R # # $Revision: 1.37 $ $Date: 2019/03/18 09:22:24 $ # # alltypes <- function(X, fun="K", ..., dataname=NULL,verb=FALSE,envelope=FALSE,reuse=TRUE) { # # Function 'alltypes' --- calculates a summary function for # each type, or each pair of types, in a multitype point pattern # if(is.ppp(X)) classname <- "ppp" else if(is.lpp(X)) classname <- "lpp" else stop("X should be a ppp or lpp object") if(is.null(dataname)) dataname <- short.deparse(substitute(X)) # -------------------------------------------------------------------- # First inspect marks if(!is.marked(X)) { nmarks <- 0 marklabels <- "" } else { if(!is.multitype(X)) stop("the marks must be a factor") # ensure type names are parseable (for mathematical labels) levels(marks(X)) <- make.parseable(levels(marks(X))) mks <- marks(X) ma <- levels(mks) nmarks <- length(ma) marklabels <- paste(ma) } # --------------------------------------------------------------------- # determine function name f.is.name <- is.name(substitute(fun)) fname <- if(f.is.name) paste(as.name(substitute(fun))) else if(is.character(fun)) fun else sQuote("fun") # --------------------------------------------------------------------- # determine function to be called if(is.function(fun)) { estimator <- fun } else if(is.character(fun)) { # First try matching one of the standard abbreviations K, G etc estimator <- getSumFun(fun, classname, (nmarks > 0), fatal=FALSE) if(is.null(estimator)) estimator <- get(fun, mode="function") } else stop(paste(sQuote("fun"), "should be a function or a character string")) # ------------------------------------------------------------------ # determine how the function shall be called. # indices.expected <- sum(c("i", "j") %in% names(formals(estimator))) apply.to.split <- (indices.expected == 0 && nmarks > 1) if(apply.to.split) ppsplit <- split(X) # -------------------------------------------------------------------- # determine array dimensions and margin labels witch <- if(nmarks == 0) matrix(1L, nrow=1L, ncol=1L, dimnames=list("","")) else if (nmarks == 1) matrix(1L, nrow=1L, ncol=1L, dimnames=list(marklabels, marklabels)) else if(indices.expected != 2) matrix(1L:nmarks, nrow=nmarks, ncol=1L, dimnames=list(marklabels, "")) else matrix(1L:(nmarks^2),ncol=nmarks,nrow=nmarks, byrow=TRUE, dimnames=list(marklabels, marklabels)) # ------------ start computing ------------------------------- # if computing envelopes, first generate simulated patterns # using undocumented feature of envelope() if(envelope && reuse) { L <- do.call(spatstat::envelope, resolve.defaults( list(X, fun=estimator), list(internal=list(eject="patterns")), list(...), switch(1L+indices.expected, NULL, list(i=ma[1L]), list(i=ma[1L], j=ma[2L]), NULL), list(verbose=verb))) intern <- attr(L, "internal") } else intern <- L <- NULL # compute function array and build up 'fasp' object fns <- list() k <- 0 maxerr.action <- if(verb) "warn" else "null" for(i in 1L:nrow(witch)) { Y <- if(apply.to.split) ppsplit[[i]] else X for(j in 1L:ncol(witch)) { if(verb) cat("i =",i,"j =",j,"\n") currentfv <- if(!envelope) switch(1L+indices.expected, estimator(Y, ...), estimator(Y, i=ma[i], ...), estimator(Y, i=ma[i], j=ma[j], ...)) else do.call(spatstat::envelope, resolve.defaults( list(Y, estimator), list(simulate=L, internal=intern), list(verbose=FALSE), list(...), list(Yname=dataname, silent=TRUE, maxerr.action=maxerr.action), switch(1L+indices.expected, NULL, list(i=ma[i]), list(i=ma[i], j=ma[j]), NULL))) k <- k+1 fns[[k]] <- as.fv(currentfv) } } einfo <- lapply(fns, attr, which="einfo") gaveup <- sapply(lapply(einfo, getElement, name="gaveup"), isTRUE) if(any(gaveup)) { ng <- sum(gaveup) warning(paste(ng, "out of", length(fns), "envelopes", ngettext(ng, "was", "were"), "not computed, due to errors in evaluating", "the summary functions for simulated patterns")) } # wrap up into 'fasp' object title <- paste(if(nmarks > 1) "array of " else NULL, if(envelope) "envelopes of " else NULL, fname, if(nmarks <= 1) " function " else " functions ", "for ", dataname, ".", sep="") rslt <- fasp(fns, which=witch, formulae=NULL, dataname=dataname, title=title, checkfv=FALSE) return(rslt) } # Lookup table for standard abbreviations of functions getSumFun <- local({ ftable <- rbind( data.frame(class="ppp", marked=FALSE, abbrev=c("F", "G", "J", "K", "L", "pcf"), full=c("Fest", "Gest", "Jest", "Kest", "Lest", "pcf"), stringsAsFactors=FALSE), data.frame(class="ppp", marked=TRUE, abbrev=c("F", "G", "J", "K", "L", "pcf"), full= c("Fest", "Gcross", "Jcross", "Kcross", "Lcross", "pcfcross"), stringsAsFactors=FALSE), data.frame(class="lpp", marked=FALSE, abbrev=c("K", "pcf"), full=c("linearK", "linearpcf"), stringsAsFactors=FALSE), data.frame(class="lpp", marked=TRUE, abbrev=c("K", "pcf"), full=c("linearKcross", "linearpcfcross"), stringsAsFactors=FALSE) ) getfun <- function(abbreviation, classname, ismarked, fatal=TRUE) { matches <- with(ftable, which(abbrev == abbreviation & class == classname & marked == ismarked)) if(length(matches) == 0) { if(!fatal) return(NULL) stop(paste("No match to function abbreviation", sQuote(abbreviation), "for class", sQuote(classname))) } if(length(matches) > 1) stop("Ambiguous function name") fullname <- ftable$full[matches] get(fullname, mode="function") } getfun }) spatstat/R/rmhmodel.ppm.R0000644000176200001440000003321513333543255015055 0ustar liggesusers# # rmhmodel.ppm.R # # convert ppm object into format palatable to rmh.default # # $Revision: 2.64 $ $Date: 2017/06/05 10:31:58 $ # # .Spatstat.rmhinfo # rmhmodel.ppm() # .Spatstat.Rmhinfo <- list( "Multitype Hardcore process" = function(coeffs, inte) { # hard core radii r[i,j] hradii <- inte$par[["hradii"]] return(list(cif='multihard', par=list(hradii=hradii), ntypes=ncol(hradii))) }, "Lennard-Jones process" = function(coeffs, inte) { pa <- inte$interpret(coeffs,inte)$param sigma <- pa[["sigma"]] epsilon <- pa[["epsilon"]] return(list(cif='lennard', par=list(sigma=sigma, epsilon=epsilon), ntypes=1)) }, "Fiksel process" = function(coeffs, inte) { hc <- inte$par[["hc"]] r <- inte$par[["r"]] kappa <- inte$par[["kappa"]] a <- inte$interpret(coeffs,inte)$param$a return(list(cif='fiksel', par=list(r=r,hc=hc,kappa=kappa,a=a), ntypes=1)) }, "Diggle-Gates-Stibbard process" = function(coeffs, inte) { rho <- inte$par[["rho"]] return(list(cif='dgs', par=list(rho=rho), ntypes=1)) }, "Diggle-Gratton process" = function(coeffs, inte) { kappa <- inte$interpret(coeffs,inte)$param$kappa delta <- inte$par[["delta"]] rho <- inte$par[["rho"]] return(list(cif='diggra', par=list(kappa=kappa,delta=delta,rho=rho), ntypes=1)) }, "Hard core process" = function(coeffs, inte) { hc <- inte$par[["hc"]] return(list(cif='hardcore', par=list(hc=hc), ntypes=1)) }, "Geyer saturation process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par[["r"]] sat <- inte$par[["sat"]] return(list(cif='geyer', par=list(gamma=gamma,r=r,sat=sat), ntypes=1)) }, "Soft core process" = function(coeffs, inte) { kappa <- inte$par[["kappa"]] sigma <- inte$interpret(coeffs,inte)$param$sigma return(list(cif="sftcr", par=list(sigma=sigma,kappa=kappa), ntypes=1)) }, "Strauss process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par[["r"]] return(list(cif = "strauss", par = list(gamma = gamma, r = r), ntypes=1)) }, "Strauss - hard core process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par[["r"]] hc <- inte$par[["hc"]] return(list(cif='straush', par=list(gamma=gamma,r=r,hc=hc), ntypes=1)) }, "Triplets process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par[["r"]] return(list(cif = "triplets", par = list(gamma = gamma, r = r), ntypes=1)) }, "Penttinen process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par[["r"]] return(list(cif='penttinen', par=list(gamma=gamma, r=r), ntypes=1)) }, "Multitype Strauss process" = function(coeffs, inte) { # interaction radii r[i,j] radii <- inte$par[["radii"]] # interaction parameters gamma[i,j] gamma <- (inte$interpret)(coeffs, inte)$param$gammas return(list(cif='straussm', par=list(gamma=gamma,radii=radii), ntypes=ncol(radii))) }, "Multitype Strauss Hardcore process" = function(coeffs, inte) { # interaction radii r[i,j] iradii <- inte$par[["iradii"]] # hard core radii r[i,j] hradii <- inte$par[["hradii"]] # interaction parameters gamma[i,j] gamma <- (inte$interpret)(coeffs, inte)$param$gammas return(list(cif='straushm', par=list(gamma=gamma,iradii=iradii,hradii=hradii), ntypes=ncol(iradii))) }, "Piecewise constant pairwise interaction process" = function(coeffs, inte) { r <- inte$par[["r"]] gamma <- (inte$interpret)(coeffs, inte)$param$gammas h <- stepfun(r, c(gamma, 1)) return(list(cif='lookup', par=list(h=h), ntypes=1)) }, "Area-interaction process" = function(coeffs, inte) { r <- inte$par[["r"]] eta <- (inte$interpret)(coeffs, inte)$param$eta return(list(cif='areaint', par=list(eta=eta,r=r), ntypes=1)) }, "hybrid Geyer process" = function(coeffs, inte) { r <- inte$par[["r"]] sat <- inte$par[["sat"]] gamma <- (inte$interpret)(coeffs,inte)$param$gammas return(list(cif='badgey',par=list(gamma=gamma,r=r,sat=sat), ntypes=1)) }, "Hybrid interaction"= function(coeffs, inte){ # for hybrids, $par is a list of the component interactions interlist <- inte$par # check for Poisson components ispois <- unlist(lapply(interlist, is.poisson)) if(all(ispois)) { # reduces to Poisson Z <- list(cif='poisson', par=list()) return(Z) } else if(any(ispois)) { # remove Poisson components interlist <- interlist[!ispois] } # N <- length(interlist) cifs <- character(N) pars <- vector(mode="list", length=N) ntyp <- integer(N) for(i in 1:N) { interI <- interlist[[i]] # forbid hybrids-of-hybrids - these should not occur anyway if(interI$name == "Hybrid interaction") stop("Simulation of a hybrid-of-hybrid interaction is not implemented") # get RMH mapping for I-th component siminfoI <- .Spatstat.Rmhinfo[[interI$name]] if(is.null(siminfoI)) stop(paste("Simulation of a fitted", sQuote(interI$name), "has not yet been implemented"), call.=FALSE) # nameI is the tag that identifies I-th component in hybrid nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") # find coefficients with prefix that exactly matches nameI. Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) # extract coefficients # (there may be none, if this interaction is an 'offset') coeffsI <- coeffs[relevant] # remove the prefix so the coefficients are recognisable to 'siminfoI' if(any(relevant)) names(coeffsI) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) # compute RMH info ZI <- siminfoI(coeffsI, interI) cifs[i] <- ZI$cif pars[[i]] <- ZI$par ntyp[i] <- ZI$ntypes } nt <- unique(ntyp[ntyp != 1]) if(length(nt) > 1) stop(paste("Hybrid components have different numbers of types:", commasep(nt))) if(N == 1) { # single cif: revert to original format: par is a list of parameters Z <- list(cif=cifs[1], par=pars[[1]], ntypes=ntyp) } else { # hybrid cif: par is a list of lists of parameters Z <- list(cif=cifs, par=pars, ntypes=ntyp) } return(Z) } ) # OTHER MODELS not yet implemented: # # # interaction object rmh.default # ------------------ ----------- # # OrdThresh # rmhmodel.ppm <- function(model, w, ..., verbose=TRUE, project=TRUE, control=rmhcontrol(), new.coef=NULL) { ## converts ppm object `model' into format palatable to rmh.default verifyclass(model, "ppm") argh <- list(...) if(!is.null(new.coef)) model <- tweak.coefs(model, new.coef) ## Ensure the fitted model is valid ## (i.e. exists mathematically as a point process) if(!valid.ppm(model)) { if(project) { if(verbose) cat("Model is invalid - projecting it\n") model <- project.ppm(model, fatal=TRUE) } else stop("The fitted model is not a valid point process") } if(verbose) cat("Extracting model information...") ## Extract essential information Y <- summary(model, quick="no variances") if(Y$marked && !Y$multitype) stop("Not implemented for marked point processes other than multitype") if(Y$uses.covars && is.data.frame(model$covariates)) stop(paste("This model cannot be simulated, because the", "covariate values were given as a data frame.")) ## enforce defaults for `control' control <- rmhcontrol(control) ## adjust to peculiarities of model control <- rmhResolveControl(control, model) ######## Interpoint interaction if(Y$poisson) { Z <- list(cif="poisson", par=list()) # par is filled in later } else { ## First check version number of ppm object if(Y$antiquated) stop(paste("This model was fitted by a very old version", "of the package: spatstat", Y$version, "; simulation is not possible.", "Re-fit the model using your original code")) else if(Y$old) warning(paste("This model was fitted by an old version", "of the package: spatstat", Y$version, ". Re-fit the model using update.ppm", "or your original code")) ## Extract the interpoint interaction object inte <- Y$entries$interaction ## Determine whether the model can be simulated using rmh siminfo <- .Spatstat.Rmhinfo[[inte$name]] if(is.null(siminfo)) stop(paste("Simulation of a fitted", sQuote(inte$name), "has not yet been implemented")) ## Get fitted model's canonical coefficients coeffs <- Y$entries$coef if(newstyle.coeff.handling(inte)) { ## extract only the interaction coefficients Vnames <- Y$entries$Vnames IsOffset <- Y$entries$IsOffset coeffs <- coeffs[Vnames[!IsOffset]] } ## Translate the model to the format required by rmh.default Z <- siminfo(coeffs, inte) if(is.null(Z)) stop("The model cannot be simulated") else if(is.null(Z$cif)) stop(paste("Internal error: no cif returned from .Spatstat.Rmhinfo")) } ## Don't forget the types if(Y$multitype && is.null(Z$types)) Z$types <- levels(Y$entries$marks) ######## Window for result if(missing(w) || is.null(w)) { ## check for outdated argument name 'win' if(!is.na(m <- match("win", names(argh)))) { warning("Argument 'win' to rmhmodel.ppm is deprecated; use 'w'") w <- argh[[m]] argh <- argh[-m] } else w <- Y$entries$data$window } Z$w <- w ######## Expanded window for simulation? covims <- if(Y$uses.covars) model$covariates[Y$covars.used] else NULL wsim <- rmhResolveExpansion(w, control, covims, "covariate")$wsim ###### Trend or Intensity ############ if(verbose) cat("Evaluating trend...") if(Y$stationary) { ## first order terms (beta or beta[i]) are carried in Z$par beta <- as.numeric(Y$trend$value) Z$trend <- NULL } else { ## trend terms present ## all first order effects are subsumed in Z$trend beta <- if(!Y$marked) 1 else rep.int(1, length(Z$types)) ## predict on window possibly larger than original data window Z$trend <- if(wsim$type == "mask") predict(model, window=wsim, type="trend", locations=wsim) else predict(model, window=wsim, type="trend") } Ncif <- length(Z$cif) if(Ncif == 1) { ## single interaction Z$par[["beta"]] <- beta } else { ## hybrid interaction if(all(Z$ntypes == 1)) { ## unmarked model: scalar 'beta' is absorbed in first cif absorb <- 1 } else { ## multitype model: vector 'beta' is absorbed in a multitype cif absorb <- min(which(Z$ntypes > 1)) } Z$par[[absorb]]$beta <- beta ## other cifs have par$beta = 1 for(i in (1:Ncif)[-absorb]) Z$par[[i]]$beta <- rep.int(1, Z$ntypes[i]) } if(verbose) cat("done.\n") Z <- do.call(rmhmodel, append(list(Z), argh)) return(Z) } rmhResolveExpansion <- function(win, control, imagelist, itype="covariate") { # Determine expansion window for simulation ex <- control$expand # The following is redundant because it is implied by !will.expand(ex) # if(ex$force.noexp) { # # Expansion prohibited # return(list(wsim=win, expanded=FALSE)) # } # Is expansion contemplated? if(!will.expand(ex)) return(list(wsim=win, expanded=FALSE)) # Proposed expansion window wexp <- expand.owin(win, ex) # Check feasibility isim <- unlist(lapply(imagelist, is.im)) imagelist <- imagelist[isim] if(length(imagelist) == 0) { # Unlimited expansion is feasible return(list(wsim=wexp, expanded=TRUE)) } # Expansion is limited to domain of image data # Determine maximum possible expansion window wins <- lapply(imagelist, as.owin) cwin <- do.call(intersect.owin, unname(wins)) if(!is.subset.owin(wexp, cwin)) { # Cannot expand to proposed window if(ex$force.exp) stop(paste("Cannot expand the simulation window,", "because the", itype, "images do not cover", "the expanded window"), call.=FALSE) # Take largest possible window wexp <- intersect.owin(wexp, cwin) } return(list(wsim=wexp, expanded=TRUE)) } spatstat/R/hexagons.R0000644000176200001440000000531313333543255014265 0ustar liggesusers## hexagons.R ## $Revision: 1.6 $ $Date: 2017/02/07 07:35:32 $ hexgrid <- function(W, s, offset=c(0,0), origin=NULL, trim=TRUE) { W <- as.owin(W) check.1.real(s) stopifnot(s > 0) hstep <- 3 * s vstep <- sqrt(3) * s R <- grow.rectangle(as.rectangle(W), hstep) xr <- R$xrange yr <- R$yrange ## initial positions for 'odd' and 'even grids p0 <- as2vector(origin %orifnull% centroid.owin(R)) p0 <- p0 + as2vector(offset) q0 <- p0 + c(hstep, vstep)/2 ## 'even' points p0 <- c(startinrange(p0[1L], hstep, xr), startinrange(p0[2L], vstep, yr)) if(!anyNA(p0)) { xeven <- prolongseq(p0[1L], xr, step=hstep) yeven <- prolongseq(p0[2L], yr, step=vstep) xyeven <- expand.grid(x=xeven, y=yeven) } else xyeven <- list(x=numeric(0), y=numeric(0)) ## 'odd' points q0 <- c(startinrange(q0[1L], hstep, xr), startinrange(q0[2L], vstep, yr)) if(!anyNA(q0)) { xodd <- prolongseq(q0[1L], xr, step=hstep) yodd <- prolongseq(q0[2L], yr, step=vstep) xyodd <- expand.grid(x=xodd, y=yodd) } else xyodd <- list(x=numeric(0), y=numeric(0)) ## xy <- concatxy(xyeven, xyodd) XY <- as.ppp(xy, W=R) ## if(trim) return(XY[W]) ok <- inside.owin(XY, w=dilation.owin(W, s)) return(XY[ok]) } hextess <- function(W, s, offset=c(0,0), origin=NULL, trim=TRUE) { W <- as.owin(W) G <- hexgrid(W=W, s=s, offset=offset, origin=origin, trim=FALSE) if(trim && is.mask(W)) { ## Result is a pixel image tessellation ## Determine pixel resolution by extending 'W' to larger domain of 'G' rasta <- harmonise.im(as.im(1, W), as.owin(G))[[1L]] rasta <- as.mask(rasta) ## Tweak G to have mask window G$window <- rasta ## img <- nnmap(G, what="which") result <- tess(image=img) return(result) } ## Result is a polygonal tessellation Gxy <- as.matrix(as.data.frame(G)) n <- nrow(Gxy) ## Hexagon centred at origin hex0 <- disc(npoly=6, radius=s) ## Form hexagons hexes <- vector(mode="list", length=n) for(i in 1:n) hexes[[i]] <- shift(hex0, Gxy[i,]) ## Determine whether tiles intersect window wholly or partly suspect <- rep(TRUE, n) GW <- G[W] GinW <- inside.owin(G, w=W) suspect[GinW] <- (bdist.points(GW) <= s) ## Compute intersection of tiles with window trimmed <- hexes trimmed[suspect] <- trimmed.suspect <- lapply(trimmed[suspect], intersect.owin, B=W, fatal=FALSE) nonempty <- rep(TRUE, n) nonempty[suspect] <- !unlist(lapply(trimmed.suspect, is.empty)) if(trim) { ## return the tiles intersected with W result <- tess(tiles=trimmed[nonempty], window=W) } else { ## return the tiles that have nonempty intersection with W result <- tess(tiles=hexes[nonempty]) } return(result) } spatstat/R/is.cadlag.R0000644000176200001440000000030713433151224014264 0ustar liggesusersis.cadlag <- function (s) { if(!is.stepfun(s)) stop("s is not a step function.\n") r <- knots(s) h <- s(r) n <- length(r) r1 <- c(r[-1L],r[n]+1) rm <- (r+r1)/2 hm <- s(rm) isTRUE(all.equal(h,hm)) } spatstat/R/compareFit.R0000644000176200001440000000503113333543254014536 0ustar liggesusers# # compareFit.R # # $Revision: 1.3 $ $Date: 2015/10/21 09:06:57 $ compareFit <- function(object, Fun, r=NULL, breaks=NULL, ..., trend=~1, interaction=Poisson(), rbord=NULL, modelnames=NULL, same=NULL, different=NULL) { dotargs <- list(...) h <- hyperframe(obj=object, tren=trend, inte=interaction) N <- nrow(h) if(N == 0) stop("No objects specified") # determine rbord for summary statistics if(is.null(rbord) && !is.null(interaction)) rbord <- max(with(h, reach(inte))) h$rbord <- rbord # try to get nice model names if(is.null(modelnames)) { if(inherits(trend, "formula") && is.interact(interaction) && inherits(object, c("anylist", "listof")) && all(nzchar(names(object))) && length(names(object)) == nrow(h)) modelnames <- names(object) else if(inherits(trend, c("anylist", "listof")) && all(nzchar(names(trend))) && length(names(trend)) == nrow(h)) modelnames <- names(trend) else if(inherits(interaction, c("anylist", "listof")) && all(nzchar(names(interaction))) && length(names(interaction)) == nrow(h)) modelnames <- names(interaction) else modelnames <- row.names(h) } row.names(h) <- make.names(modelnames) # fix a common vector of r values if(is.null(r)) { # compute first function fun1 <- with(h[1L,,drop=TRUE,strip=FALSE], do.call(Fun, append(list(object=obj, trend=tren, interaction=inte, rbord=rbord, r=NULL, breaks=breaks), dotargs))) # extract r values r <- with(fun1, .x) } # compute the subsequent functions if(N == 1L) funs2toN <- NULL else funs2toN <- with(h[-1L, , drop=TRUE, strip=FALSE], do.call(Fun, append(list(object=obj, trend=tren, interaction=inte, rbord=rbord, r=r), dotargs))) if(N == 2) funs2toN <- list(funs2toN) # collect all functions in a list funs <- as.anylist(append(list(fun1), funs2toN)) names(funs) <- row.names(h) # collapse together out <- collapse.fv(funs, same=same, different=different) return(out) } spatstat/R/distbdry.R0000644000176200001440000001604213426536300014272 0ustar liggesusers# # distbdry.S Distance to boundary # # $Revision: 4.45 $ $Date: 2019/02/06 10:53:48 $ # # -------- functions ---------------------------------------- # # bdist.points() # compute vector of distances # from each point of point pattern # to boundary of window # # bdist.pixels() # compute matrix of distances from each pixel # to boundary of window # # erodemask() erode the window mask by a distance r # [yields a new window] # # # "bdist.points"<- function(X) { verifyclass(X, "ppp") if(X$n == 0) return(numeric(0)) x <- X$x y <- X$y window <- X$window switch(window$type, rectangle = { xmin <- min(window$xrange) xmax <- max(window$xrange) ymin <- min(window$yrange) ymax <- max(window$yrange) result <- pmin.int(x - xmin, xmax - x, y - ymin, ymax - y) }, polygonal = { xy <- cbind(x,y) ll <- edges(window)$ends result <- distppllmin(xy, ll)$min.d }, mask = { b <- bdist.pixels(window, style="matrix") loc <- nearest.raster.point(x,y,window) result <- b[cbind(loc$row, loc$col)] }, stop("Unrecognised window type", window$type) ) return(result) } "bdist.pixels" <- function(w, ..., style="image", method=c("C", "interpreted")) { verifyclass(w, "owin") masque <- as.mask(w, ...) switch(w$type, mask = { neg <- complement.owin(masque) m <- exactPdt(neg) b <- pmin.int(m$d,m$b) }, rectangle = { rxy <- rasterxy.mask(masque) x <- rxy$x y <- rxy$y xmin <- w$xrange[1L] xmax <- w$xrange[2L] ymin <- w$yrange[1L] ymax <- w$yrange[2L] b <- pmin.int(x - xmin, xmax - x, y - ymin, ymax - y) }, polygonal = { # set up pixel raster method <- match.arg(method) rxy <- rasterxy.mask(masque) x <- rxy$x y <- rxy$y b <- numeric(length(x)) # test each pixel in/out, analytically inside <- inside.owin(x, y, w) # compute distances for these pixels xy <- cbind(x[inside], y[inside]) switch(method, C = { #' C code ll <- as.data.frame(edges(w)) dxy <- distppllmin(xy, ll)$min.d }, interpreted = { #' ancient R code dxy <- rep.int(Inf, sum(inside)) bdry <- w$bdry for(i in seq_along(bdry)) { polly <- bdry[[i]] nsegs <- length(polly$x) for(j in 1:nsegs) { j1 <- if(j < nsegs) j + 1L else 1L seg <- c(polly$x[j], polly$y[j], polly$x[j1], polly$y[j1]) dxy <- pmin.int(dxy, distppl(xy, seg)) } } }) b[inside] <- dxy }, stop("unrecognised window type", w$type) ) # reshape it b <- matrix(b, nrow=masque$dim[1L], ncol=masque$dim[2L]) switch(style, coords={ # format which can be plotted by image(), persp() etc return(list(x=masque$xcol, y=masque$yrow, z=t(b))) }, matrix={ # return matrix (for internal use by package) return(b) }, image={ bim <- im(b, xcol=masque$xcol, yrow=masque$yrow, unitname=unitname(masque)) return(bim) }, stop(paste("Unrecognised option for style:", style))) } erodemask <- function(w, r, strict=FALSE) { # erode a binary image mask without changing any other entries verifyclass(w, "owin") if(w$type != "mask") stop(paste("window w is not of type", sQuote("mask"))) if(!is.numeric(r) || length(r) != 1L) stop("r must be a single number") if(r < 0) stop("r must be nonnegative") bb <- bdist.pixels(w, style="matrix") if(r > max(bb)) warning("eroded mask is empty") if(identical(strict, TRUE)) w$m <- (bb > r) else w$m <- (bb >= r) return(w) } "Frame<-.owin" <- function(X, value) { stopifnot(is.rectangle(value)) W <- Frame(X) if(!is.subset.owin(W, value)) W <- intersect.owin(W, value) rebound.owin(X, value) } rebound.owin <- local({ rebound.owin <- function(x, rect) { w <- x verifyclass(rect, "owin") if(is.empty(w)) return(emptywindow(rect)) verifyclass(w, "owin") if(!is.subset.owin(as.rectangle(w), rect)) { bb <- boundingbox(w) if(!is.subset.owin(bb, rect)) stop(paste("The new rectangle", sQuote("rect"), "does not contain the window", sQuote("win"))) } xr <- rect$xrange yr <- rect$yrange ## determine unitname uu <- list(unitname(x), unitname(rect)) uu <- unique(uu[sapply(uu, is.vanilla)]) if(length(uu) > 1) { warning("Arguments of rebound.owin have incompatible unitnames", call.=FALSE) uu <- list() } un <- if(length(uu)) uu[[1]] else NULL ## switch(w$type, rectangle={ return(owin(xr, yr, poly=list(x=w$xrange[c(1L,2L,2L,1L)], y=w$yrange[c(1L,1L,2L,2L)]), unitname = un, check=FALSE)) }, polygonal={ return(owin(xr, yr, poly=w$bdry, unitname=un, check=FALSE)) }, mask={ xcol <- newseq(w$xcol, xr) yrow <- newseq(w$yrow, yr) newmask <- as.mask(xy=list(x=xcol, y=yrow)) xx <- rasterx.mask(newmask) yy <- rastery.mask(newmask) newmask$m <- inside.owin(xx, yy, w) unitname(newmask) <- un return(newmask) } ) } newseq <- function(oldseq, newrange) { oldrange <- range(oldseq) dstep <- mean(diff(oldseq)) nleft <- max(0, floor((oldrange[1L] - newrange[1L])/dstep)) nright <- max(0, floor((newrange[2L] - oldrange[2L])/dstep)) newstart <- max(oldrange[1L] - nleft * dstep, newrange[1L]) newend <- min(oldrange[2L] + nright * dstep, newrange[2L]) seq(from=newstart, by=dstep, to=newend) } rebound.owin }) spatstat/R/rmhcontrol.R0000644000176200001440000001766713602557473014665 0ustar liggesusers# # # rmhcontrol.R # # $Revision: 1.35 $ $Date: 2019/12/31 04:56:58 $ # # rmhcontrol <- function(...) { UseMethod("rmhcontrol") } rmhcontrol.rmhcontrol <- function(...) { argz <- list(...) if(length(argz) == 1) return(argz[[1]]) stop("Arguments not understood") } rmhcontrol.list <- function(...) { argz <- list(...) nama <- names(argz) if(length(argz) == 1 && !any(nzchar(nama))) do.call(rmhcontrol.default, argz[[1]]) else do.call.matched(rmhcontrol.default, argz) } rmhcontrol.default <- function(..., p=0.9, q=0.5, nrep=5e5, expand=NULL, periodic=NULL, ptypes=NULL, x.cond=NULL, fixall=FALSE, nverb=0, nsave=NULL, nburn=nsave, track=FALSE, pstage=c("block", "start")) { argh <- list(...) nargh <- length(argh) if(nargh > 0) { # allow rmhcontrol(NULL), otherwise flag an error if(!(nargh == 1 && is.null(argh[[1]]))) stop(paste("Unrecognised arguments to rmhcontrol;", "valid arguments are listed in help(rmhcontrol.default)")) } # impose default values if(missing(p)) p <- spatstat.options("rmh.p") if(missing(q)) q <- spatstat.options("rmh.q") if(missing(nrep)) nrep <- spatstat.options("rmh.nrep") # validate arguments if(!is.numeric(p) || length(p) != 1 || p < 0 || p > 1) stop("p should be a number in [0,1]") if(!is.numeric(q) || length(q) != 1 || q < 0 || q > 1) stop("q should be a number in [0,1]") if(!is.numeric(nrep) || length(nrep) != 1 || nrep < 1) stop("nrep should be an integer >= 1") nrep <- as.integer(nrep) if(!is.numeric(nverb) || length(nverb) != 1 || nverb < 0 || nverb > nrep) stop("nverb should be an integer <= nrep") nverb <- as.integer(nverb) if(!is.logical(fixall) || length(fixall) != 1) stop("fixall should be a logical value") if(!is.null(periodic) && (!is.logical(periodic) || length(periodic) != 1)) stop(paste(sQuote("periodic"), "should be a logical value or NULL")) if(saving <- !is.null(nsave)) { nsave <- as.integer(as.vector(nsave)) if(length(nsave) == 1L) { if(nsave <= 0) stop("nsave should be a positive integer") stopifnot(nsave < nrep) } else { stopifnot(all(nsave > 0)) stopifnot(sum(nsave) <= nrep) } if(missing(nburn) || is.null(nburn)) { nburn <- min(nsave[1], nrep-sum(nsave)) } else { check.1.integer(nburn) stopifnot(nburn + sum(nsave) <= nrep) } } stopifnot(is.logical(track)) pstage <- match.arg(pstage) ################################################################# # Conditioning on point configuration # # condtype = "none": no conditioning # condtype = "Palm": conditioning on the presence of specified points # condtype = "window": conditioning on the configuration in a subwindow # if(is.null(x.cond)) { condtype <- "none" n.cond <- NULL } else if(is.ppp(x.cond)) { condtype <- "window" n.cond <- x.cond$n } else if(is.data.frame(x.cond)) { if(ncol(x.cond) %in% c(2,3)) { condtype <- "Palm" n.cond <- nrow(x.cond) } else stop("Wrong number of columns in data frame x.cond") } else if(is.list(x.cond)) { if(length(x.cond) %in% c(2,3)) { x.cond <- as.data.frame(x.cond) condtype <- "Palm" n.cond <- nrow(x.cond) } else stop("Wrong number of components in list x.cond") } else stop("Unrecognised format for x.cond") if(condtype == "Palm" && n.cond == 0) { warning(paste("Ignored empty configuration x.cond;", "conditional (Palm) simulation given an empty point pattern", "is equivalent to unconditional simulation"), call.=FALSE) condtype <- "none" x.cond <- NULL n.cond <- NULL } ################################################################# # Fixing the number of points? # # fixcode = 1 <--> no conditioning # fixcode = 2 <--> conditioning on n = number of points # fixcode = 3 <--> conditioning on the number of points of each type. fixcode <- 2 - (p<1) + fixall - fixall*(p<1) fixing <- switch(fixcode, "none", "n.total", "n.each.type") # Warn about silly combination if(fixall && p < 1) warning("fixall = TRUE conflicts with p < 1. Ignored.", call.=FALSE) ############################################################### # `expand' determines expansion of the simulation window expand <- rmhexpand(expand) # No expansion is permitted if we are conditioning on the # number of points if(fixing != "none") { if(expand$force.exp) stop(paste("When conditioning on the number of points,", "no expansion may be done."), call.=FALSE) # no expansion expand <- .no.expansion } ################################################################### # return augmented list out <- list(p=p, q=q, nrep=nrep, nverb=nverb, expand=expand, periodic=periodic, ptypes=ptypes, fixall=fixall, fixcode=fixcode, fixing=fixing, condtype=condtype, x.cond=x.cond, saving=saving, nsave=nsave, nburn=nburn, track=track, pstage=pstage) class(out) <- c("rmhcontrol", class(out)) return(out) } print.rmhcontrol <- function(x, ...) { verifyclass(x, "rmhcontrol") splat("Metropolis-Hastings algorithm control parameters") splat("Probability of shift proposal: p =", x$p) if(x$fixing == "none") { splat("Conditional probability of death proposal: q =", x$q) if(!is.null(x$ptypes)) { splat("Birth proposal probabilities for each type of point:") print(x$ptypes) } } switch(x$fixing, none={}, n.total=splat("The total number of points is fixed"), n.each.type=splat("The number of points of each type is fixed")) switch(x$condtype, none={}, window={ splat("Conditional simulation given the", "configuration in a subwindow") print(x$x.cond$window) }, Palm={ splat("Conditional simulation of Palm type") }) splat("Number of M-H iterations: nrep =", x$nrep) if(x$saving) { nsave <- x$nsave len <- length(nsave) howmany <- if(len == 1L) nsave else if(len < 5L) commasep(nsave) else paste(paste(nsave[1:5], collapse=", "), "[...]") splat("After a burn-in of", x$nburn, "iterations,", "save point pattern after every", howmany, "iterations.") } pstage <- x$pstage %orifnull% "start" hdr <- "Generate random proposal points:" switch(pstage, start = splat(hdr, "at start of simulations."), block = splat(hdr, "before each block of", if(length(x$nsave) == 1L) x$nsave else "", "iterations.")) cat(paste("Track proposal type and acceptance/rejection?", if(x$track) "yes" else "no", "\n")) if(x$nverb > 0) cat(paste("Progress report every nverb=", x$nverb, "iterations\n")) else cat("No progress reports (nverb = 0).\n") # invoke print.rmhexpand print(x$expand) cat("Periodic edge correction? ") if(is.null(x$periodic)) cat("Not yet determined.\n") else if(x$periodic) cat("Yes.\n") else cat("No.\n") # return(invisible(NULL)) } default.rmhcontrol <- function(model, w=NULL) { # set default for 'expand' return(rmhcontrol(expand=default.expand(model, w=w))) } update.rmhcontrol <- function(object, ...) { do.call.matched(rmhcontrol.default, resolve.defaults(list(...), as.list(object), .StripNull=TRUE)) } rmhResolveControl <- function(control, model) { # adjust control information once the model is known stopifnot(inherits(control, "rmhcontrol")) # change *default* expansion rule to something appropriate for model # (applies only if expansion rule is undecided) control$expand <- change.default.expand(control$expand, default.expand(model)) return(control) } spatstat/R/pickoption.R0000644000176200001440000000247613506343364014640 0ustar liggesusers# # pickoption.R # # $Revision: 1.7 $ $Date: 2019/06/30 07:49:10 $ # pickoption <- function(what="option", key, keymap, ..., exact=FALSE, list.on.err=TRUE, die=TRUE, multi=FALSE, allow.all=TRUE) { keyname <- short.deparse(substitute(key)) if(!is.character(key)) stop(paste(keyname, "must be a character string", if(multi) "or strings" else NULL)) if(length(key) == 0) stop(paste("Argument", sQuote(keyname), "has length zero")) key <- unique(key) if(!multi && length(key) > 1) stop(paste("Must specify only one", what, sQuote(keyname))) allow.all <- allow.all && multi id <- if(allow.all && "all" %in% key) { seq_along(keymap) } else if(exact) { match(key, names(keymap), nomatch=NA) } else { pmatch(key, names(keymap), nomatch=NA) } if(any(nbg <- is.na(id))) { # no match whinge <- paste("unrecognised", what, paste(dQuote(key[nbg]), collapse=", "), "in argument", sQuote(keyname)) if(list.on.err) { cat(paste(whinge, "\n", "Options are:"), paste(dQuote(names(keymap)), collapse=","), "\n") } if(die) stop(whinge, call.=FALSE) else return(NULL) } key <- unique(keymap[id]) names(key) <- NULL return(key) } spatstat/R/dg.R0000644000176200001440000001560213333543254013044 0ustar liggesusers# # dg.S # # $Revision: 1.22 $ $Date: 2018/03/15 07:37:41 $ # # Diggle-Gratton pair potential # # DiggleGratton <- local({ # .... auxiliary functions ...... diggraterms <- function(X, Y, idX, idY, delta, rho, splitInf=FALSE) { stopifnot(is.numeric(delta)) stopifnot(is.numeric(rho)) stopifnot(delta < rho) ## sort in increasing order of x coordinate oX <- fave.order(X$x) oY <- fave.order(Y$x) Xsort <- X[oX] Ysort <- Y[oY] idXsort <- idX[oX] idYsort <- idY[oY] nX <- npoints(X) nY <- npoints(Y) ## call C routine if(!splitInf) { ## usual case: allow cif to be zero because of hard core out <- .C("Ediggra", nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), idsource = as.integer(idXsort), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), idtarget = as.integer(idYsort), ddelta = as.double(delta), rrho = as.double(rho), values = as.double(double(nX)), PACKAGE = "spatstat") answer <- integer(nX) answer[oX] <- out$values } else { ## split off the hard core terms and return them separately out <- .C("ESdiggra", nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), idsource = as.integer(idXsort), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), idtarget = as.integer(idYsort), ddelta = as.double(delta), rrho = as.double(rho), positive = as.double(double(nX)), hardcore = as.integer(integer(nX)), PACKAGE = "spatstat") answer <- integer(nX) hardcore <- logical(nX) answer[oX] <- out$positive hardcore[oX] <- as.logical(out$hardcore) attr(answer, "hardcore") <- hardcore } return(answer) } # .......... template object .......... BlankDG <- list( name = "Diggle-Gratton process", creator = "DiggleGratton", family = "pairwise.family", #evaluated later pot = function(d, par) { delta <- par$delta rho <- par$rho above <- (d > rho) inrange <- (!above) & (d > delta) h <- above + inrange * (d - delta)/(rho - delta) return(log(h)) }, par = list(delta=NULL, rho=NULL), # to be filled in later parnames = list("lower limit delta", "upper limit rho"), hasInf = TRUE, selfstart = function(X, self) { # self starter for DiggleGratton nX <- npoints(X) if(nX < 2) { # not enough points to make any decisions return(self) } md <- minnndist(X) if(!is.na(delta <- self$par$delta)) { # value fixed by user or previous invocation # check it if(md < delta) warning(paste("Hard core distance delta is too large;", "some data points will have zero probability")) return(self) } if(md == 0) warning(paste("Pattern contains duplicated points:", "hard core distance delta must be zero")) # take hard core = minimum interpoint distance * n/(n+1) deltaX <- md * nX/(nX+1) DiggleGratton(delta=deltaX, rho=self$par$rho) }, init = function(self) { delta <- self$par$delta rho <- self$par$rho if(!is.numeric(rho) || length(rho) != 1L) stop("upper limit rho must be a single number") stopifnot(is.finite(rho)) if(!is.na(delta)) { if(!is.numeric(delta) || length(delta) != 1L) stop("lower limit delta must be a single number") stopifnot(delta >= 0) stopifnot(rho > delta) } else stopifnot(rho >= 0) }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { kappa <- as.numeric(coeffs[1L]) return(list(param=list(kappa=kappa), inames="exponent kappa", printable=dround(kappa))) }, valid = function(coeffs, self) { kappa <- as.numeric(coeffs[1L]) return(is.finite(kappa) && (kappa >= 0)) }, project = function(coeffs, self) { kappa <- as.numeric(coeffs[1L]) if(is.finite(kappa) && (kappa >= 0)) return(NULL) return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { rho <- self$par$rho if(all(is.na(coeffs))) return(rho) kappa <- coeffs[1L] delta <- self$par$delta if(abs(kappa) <= epsilon) return(delta) else return(rho) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ...) { ## fast evaluator for DiggleGratton interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for DiggleGratton") delta <- potpars$delta rho <- potpars$rho idX <- seq_len(npoints(X)) idU <- rep.int(-1L, npoints(U)) idU[EqualPairs[,2L]] <- EqualPairs[,1L] values <- diggraterms(U, X, idU, idX, delta, rho, splitInf) result <- log(pmax.int(0, values)) result <- matrix(result, ncol=1L) if(!splitInf) attr(result, "-Inf") <- attr(values, "hardcore") return(result) }, Mayer=function(coeffs, self) { # second Mayer cluster integral rho <- self$par$rho delta <- self$par$delta width <- rho - delta kappa <- coeffs[1L] ans <- pi * (rho^2 - 2 * rho* width/(kappa + 1) + 2 * width^2/((kappa + 1) * (kappa + 2))) return(ans) } ) class(BlankDG) <- "interact" DiggleGratton <- function(delta=NA, rho) { instantiate.interact(BlankDG, list(delta=delta, rho=rho)) } DiggleGratton <- intermaker(DiggleGratton, BlankDG) DiggleGratton }) spatstat/R/smoothfun.R0000644000176200001440000000427013344406331014467 0ustar liggesusers## ## smoothfun.R ## ## Exact 'funxy' counterpart of Smooth.ppp ## ## $Revision: 1.9 $ $Date: 2018/09/07 05:29:50 $ Smoothfun <- function(X, ...) { UseMethod("Smoothfun") } Smoothfun.ppp <- function(X, sigma=NULL, ..., weights=NULL, edge=TRUE, diggle=FALSE) { verifyclass(X, "ppp") if(!is.marked(X, dfok=TRUE)) stop("X should be a marked point pattern") ## handle weights now weightsgiven <- !missing(weights) && !is.null(weights) if(weightsgiven) { # convert to numeric if(is.im(weights)) { weights <- safelookup(weights, X) # includes warning if NA } else if(is.expression(weights)) weights <- eval(weights, envir=as.data.frame(X), enclos=parent.frame()) if(length(weights) == 0) weightsgiven <- FALSE } if(weightsgiven) { check.nvector(weights, npoints(X)) } else weights <- NULL ## X <- coerce.marks.numeric(X) ## stuff <- list(Xdata=X, values=marks(X), weights=weights, edge=edge, diggle=diggle, ...) ## ## determine smoothing parameters ker <- resolve.2D.kernel(sigma=sigma, ..., x=X, bwfun=bw.smoothppp, allow.zero=TRUE) stuff[c("sigma", "varcov")] <- ker[c("sigma", "varcov")] ## g <- function(x, y=NULL) { Y <- xy.coords(x, y)[c("x", "y")] Xquery <- as.ppp(Y, Window(stuff$Xdata)) do.call(smoothcrossEngine, append(list(Xquery=Xquery), stuff)) } g <- funxy(g, as.rectangle(as.owin(X))) class(g) <- c("Smoothfun", class(g)) return(g) } print.Smoothfun <- function(x, ...) { cat("function(x,y)", "which returns", "values", "interpolated from", fill=TRUE) X <- get("X", envir=environment(x)) print(X, ...) return(invisible(NULL)) } ## Method for as.im ## (enables plot.funxy, persp.funxy, contour.funxy to work for this class) as.im.Smoothfun <- function(X, W=Window(X), ..., approx=TRUE) { stuff <- get("stuff", envir=environment(X)) if(!approx) { #' evaluate exactly at grid points result <- as.im.function(X, W=W, ...) } else { #' faster, approximate evaluation using FFT if(!is.null(W)) stuff$X <- stuff$X[W] result <- do.call(Smooth, resolve.defaults(list(...), stuff)) } return(result) } spatstat/R/smooth.ppp.R0000644000176200001440000011205413616150144014554 0ustar liggesusers# # smooth.ppp.R # # Smooth the marks of a point pattern # # $Revision: 1.74 $ $Date: 2020/02/04 01:55:03 $ # # smooth.ppp <- function(X, ..., weights=rep(1, npoints(X)), at="pixels") { # .Deprecated("Smooth.ppp", package="spatstat", # msg="smooth.ppp is deprecated: use the generic Smooth with a capital S") # Smooth(X, ..., weights=weights, at=at) # } Smooth <- function(X, ...) { UseMethod("Smooth") } Smooth.solist <- function(X, ...) { solapply(X, Smooth, ...) } Smooth.ppp <- function(X, sigma=NULL, ..., weights=rep(1, npoints(X)), at="pixels", adjust=1, varcov=NULL, edge=TRUE, diggle=FALSE, kernel="gaussian", scalekernel=is.character(kernel), geometric=FALSE) { verifyclass(X, "ppp") if(!is.marked(X, dfok=TRUE, na.action="fatal")) stop("X should be a marked point pattern", call.=FALSE) X <- coerce.marks.numeric(X) if(!all(is.finite(as.matrix(marks(X))))) stop("Some mark values are Inf, NaN or NA", call.=FALSE) at <- pickoption("output location type", at, c(pixels="pixels", points="points")) ## trivial case if(npoints(X) == 0) { cn <- colnames(marks(X)) nc <- length(cn) switch(at, points = { result <- if(nc == 0) numeric(0) else matrix(, 0, nc, dimnames=list(NULL, cn)) }, pixels = { result <- as.im(NA_real_, Window(X)) if(nc) { result <- as.solist(rep(list(result), nc)) names(result) <- cn } }) return(result) } ## ensure weights are numeric if(weightsgiven <- !missing(weights) && !is.null(weights)) { pa <- parent.frame() weights <- pointweights(X, weights=weights, parent=pa) weightsgiven <- !is.null(weights) } else weights <- NULL ## geometric mean smoothing if(geometric) return(ExpSmoothLog(X, sigma=sigma, ..., at=at, adjust=adjust, varcov=varcov, kernel=kernel, scalekernel=scalekernel, weights=weights, edge=edge, diggle=diggle)) ## determine smoothing parameters if(scalekernel) { ker <- resolve.2D.kernel(sigma=sigma, ..., adjust=adjust, varcov=varcov, kernel=kernel, x=X, bwfun=bw.smoothppp, allow.zero=TRUE) sigma <- ker$sigma varcov <- ker$varcov adjust <- 1 } ## infinite bandwidth if(bandwidth.is.infinite(sigma)) { #' uniform estimate nX <- npoints(X) if(is.null(weights)) weights <- rep(1, nX) marx <- marks(X) single <- is.null(dim(marx)) wtmark <- weights * marx totwt <- sum(weights) totwtmark <- if(single) sum(wtmark) else colSums(wtmark) W <- Window(X) switch(at, pixels = { result <- solapply(totwtmark/totwt, as.im, W=W, ...) names(result) <- colnames(marx) if(single) result <- result[[1L]] }, points = { denominator <- rep(totwt, nX) numerator <- rep(totwtmark, each=nX) if(!single) numerator <- matrix(numerator, nrow=nX) leaveoneout <- resolve.1.default(list(leaveoneout=TRUE), list(...)) if(leaveoneout) { numerator <- numerator - wtmark denominator <- denominator - weights } result <- numerator/denominator if(!single) colnames(result) <- colnames(marx) }) return(result) } ## Diggle's edge correction? if(diggle && !edge) warning("Option diggle=TRUE overridden by edge=FALSE") diggle <- diggle && edge ## ## cutoff distance (beyond which the kernel value is treated as zero) cutoff <- cutoff2Dkernel(kernel, sigma=sigma, varcov=varcov, scalekernel=scalekernel, adjust=adjust, ..., fatal=TRUE) ## if(cutoff < minnndist(X)) { # very small bandwidth leaveoneout <- resolve.1.default("leaveoneout", list(...), list(leaveoneout=TRUE)) if(!leaveoneout && at=="points") { warning(paste("Bandwidth is close to zero:", "original values returned")) Y <- marks(X) } else { warning(paste("Bandwidth is close to zero:", "nearest-neighbour interpolation performed")) Y <- nnmark(X, ..., k=1, at=at) } return(Y) } if(diggle) { ## absorb Diggle edge correction into weights vector edg <- second.moment.calc(X, sigma, what="edge", ..., varcov=varcov, adjust=adjust, kernel=kernel, scalekernel=scalekernel) ei <- safelookup(edg, X, warn=FALSE) weights <- if(weightsgiven) weights/ei else 1/ei weights[!is.finite(weights)] <- 0 weightsgiven <- TRUE } ## rescale weights to avoid numerical gremlins if(weightsgiven && ((mw <- median(abs(weights))) > 0)) weights <- weights/mw ## calculate... marx <- marks(X) uhoh <- NULL if(!is.data.frame(marx)) { # ........ vector of marks ................... values <- marx if(is.factor(values)) warning("Factor valued marks were converted to integers", call.=FALSE) values <- as.numeric(values) ## detect constant values ra <- range(values, na.rm=TRUE) if(diff(ra) == 0) { switch(at, points = { result <- values }, pixels = { M <- do.call.matched(as.mask, list(w=as.owin(X), ...)) result <- as.im(ra[1], M) }) } else { switch(at, points={ result <- do.call(smoothpointsEngine, resolve.defaults(list(x=X, values=values, weights=weights, sigma=sigma, varcov=varcov, kernel=kernel, scalekernel=scalekernel, edge=FALSE), list(...))) }, pixels={ values.weights <- if(weightsgiven) values * weights else values numerator <- do.call(density.ppp, resolve.defaults(list(x=X, at="pixels", weights = values.weights, sigma=sigma, varcov=varcov, kernel=kernel, scalekernel=scalekernel, edge=FALSE), list(...))) denominator <- do.call(density.ppp, resolve.defaults(list(x=X, at="pixels", weights = weights, sigma=sigma, varcov=varcov, kernel=kernel, scalekernel=scalekernel, edge=FALSE), list(...))) result <- eval.im(numerator/denominator) ## trap small values of denominator ## trap NaN and +/- Inf values of result, but not NA eps <- .Machine$double.eps nbg <- eval.im(is.infinite(result) | is.nan(result) | (denominator < eps)) if(any(as.matrix(nbg), na.rm=TRUE)) { warning(paste("Numerical underflow detected:", "sigma is probably too small"), call.=FALSE) uhoh <- unique(c(uhoh, "underflow")) ## l'Hopital's rule distX <- distmap(X, xy=numerator) whichnn <- attr(distX, "index") nnvalues <- eval.im(values[whichnn]) result[nbg] <- nnvalues[nbg] } uhoh <- attr(numerator, "warnings") }) } } else { ## ......... data frame of marks .................. ## convert to numerical values if(any(sapply(as.list(marx), is.factor))) warning("Factor columns of marks were converted to integers", call.=FALSE) marx <- asNumericMatrix(marx) ## detect constant columns ra <- apply(marx, 2, range, na.rm=TRUE) isconst <- (apply(ra, 2, diff) == 0) if(anyisconst <- any(isconst)) { oldmarx <- marx # oldX <- X marx <- marx[, !isconst] X <- X %mark% marx } if(any(!isconst)) { ## compute denominator denominator <- do.call(density.ppp, resolve.defaults(list(x=X, at=at, weights = weights, sigma=sigma, varcov=varcov, kernel=kernel, scalekernel=scalekernel, edge=FALSE), list(...))) ## compute numerator for each column of marks marx.weights <- if(weightsgiven) marx * weights else marx numerators <- do.call(density.ppp, resolve.defaults(list(x=X, at=at, weights = marx.weights, sigma=sigma, varcov=varcov, kernel=kernel, scalekernel=scalekernel, edge=FALSE), list(...))) uhoh <- attr(numerators, "warnings") ## calculate ratios switch(at, points={ if(is.null(uhoh)) { ## numerators is a matrix (or may have dropped to vector) if(is.data.frame(numerators)) { numerators <- as.matrix(numerators) } else if(!is.matrix(numerators)) { numerators <- matrix(unlist(numerators), nrow=npoints(X)) } ratio <- numerators/denominator if(any(badpoints <- matrowany(!is.finite(ratio)))) { whichnnX <- nnwhich(X) ratio[badpoints,] <- as.matrix(marx[whichnnX[badpoints], , drop=FALSE]) } } else { warning("returning original values") ratio <- marx } result <- as.data.frame(ratio) colnames(result) <- colnames(marx) }, pixels={ ## numerators is a list of images (or may have dropped to 'im') if(is.im(numerators)) numerators <- list(numerators) result <- solapply(numerators, "/", e2=denominator) eps <- .Machine$double.eps denOK <- eval.im(denominator >= eps) if(!is.null(uhoh) || !all(denOK)) { ## compute nearest neighbour map on same raster distX <- distmap(X, xy=denominator) whichnnX <- attr(distX, "index") ## fix images allgood <- TRUE for(j in 1:length(result)) { ratj <- result[[j]] valj <- marx[,j] goodj <- eval.im(is.finite(ratj) & denOK) result[[j]] <- eval.im(goodj, ratj, valj[whichnnX]) allgood <- allgood && all(goodj) } if(!allgood) { warning(paste("Numerical underflow detected:", "sigma is probably too small"), call.=FALSE) uhoh <- unique(c(uhoh, "underflow")) } } names(result) <- colnames(marx) }) } else result <- NULL if(anyisconst) { partresult <- result switch(at, points = { nX <- npoints(X) result <- matrix(, nX, ncol(oldmarx)) if(length(partresult) > 0) result[,!isconst] <- as.matrix(partresult) result[,isconst] <- rep(ra[1,isconst], each=nX) colnames(result) <- colnames(oldmarx) }, pixels = { result <- vector(mode="list", length=ncol(oldmarx)) if(length(partresult) > 0) { result[!isconst] <- partresult M <- as.owin(partresult[[1]]) } else { M <- do.call.matched(as.mask, list(w=as.owin(X), ...)) } result[isconst] <- lapply(ra[1, isconst], as.im, W=M) result <- as.solist(result) names(result) <- colnames(oldmarx) }) } } ## wrap up attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov attr(result, "warnings") <- uhoh return(result) } smoothpointsEngine <- function(x, values, sigma, ..., kernel="gaussian", scalekernel=is.character(kernel), weights=NULL, varcov=NULL, leaveoneout=TRUE, sorted=FALSE, cutoff=NULL) { debugging <- spatstat.options("developer") stopifnot(is.logical(leaveoneout)) if(!is.null(dim(values))) stop("Internal error: smoothpointsEngine does not support multidimensional values") #' detect constant values if(diff(range(values, na.rm=TRUE)) == 0) { result <- values attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } validate2Dkernel(kernel) if(is.character(kernel)) kernel <- match2DkernelName(kernel) isgauss <- identical(kernel, "gaussian") ## Handle weights that are meant to be null if(length(weights) == 0) weights <- NULL ## infinite bandwidth if(bandwidth.is.infinite(sigma)) { #' uniform estimate nX <- npoints(x) if(is.null(weights)) weights <- rep(1, nX) wtval <- weights * values totwt <- sum(weights) totwtval <- sum(wtval) denominator <- rep(totwt, nX) numerator <- rep(totwtval, nX) if(leaveoneout) { numerator <- numerator - wtval denominator <- denominator - weights } result <- numerator/denominator return(result) } ## cutoff distance (beyond which the kernel value is treated as zero) ## NB: input argument 'cutoff' is either NULL or ## an absolute distance (if scalekernel=FALSE) ## a number of standard deviations (if scalekernel=TRUE) cutoff <- cutoff2Dkernel(kernel, sigma=sigma, varcov=varcov, scalekernel=scalekernel, cutoff=cutoff, fatal=TRUE) ## cutoff is now an absolute distance if(debugging) cat(paste("cutoff=", cutoff, "\n")) # detect very small bandwidth nnd <- nndist(x) nnrange <- range(nnd) if(cutoff < nnrange[1]) { if(leaveoneout && (npoints(x) > 1)) { warning("Very small bandwidth; values of nearest neighbours returned") result <- values[nnwhich(x)] } else { warning("Very small bandwidth; original values returned") result <- values } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov attr(result, "warnings") <- "underflow" return(result) } if(leaveoneout) { # ensure cutoff includes at least one point cutoff <- max(1.1 * nnrange[2], cutoff) } sd <- if(is.null(varcov)) sigma else sqrt(max(eigen(varcov)$values)) if(isgauss && spatstat.options("densityTransform") && spatstat.options("densityC")) { ## .................. experimental C code ..................... if(debugging) cat('Using experimental code!\n') npts <- npoints(x) result <- numeric(npts) ## transform to standard coordinates xx <- x$x yy <- x$y if(is.null(varcov)) { xx <- xx/(sqrt(2) * sigma) yy <- yy/(sqrt(2) * sigma) } else { Sinv <- solve(varcov) xy <- cbind(xx, yy) %*% matrixsqrt(Sinv/2) xx <- xy[,1] yy <- xy[,2] sorted <- FALSE } ## cutoff in standard coordinates cutoff <- cutoff/(sqrt(2) * sd) ## sort into increasing order of x coordinate (required by C code) if(!sorted) { oo <- fave.order(xx) xx <- xx[oo] yy <- yy[oo] vv <- values[oo] } else { vv <- values } if(is.null(weights)) { zz <- .C("Gsmoopt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } else { wtsort <- weights[oo] zz <- .C("Gwtsmoopt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } if(any(nbg <- (is.infinite(result) | is.nan(result)))) { # NaN or +/-Inf can occur if bandwidth is small # Use mark of nearest neighbour (by l'Hopital's rule) result[nbg] <- values[nnwhich(x)[nbg]] } } else if(isgauss && spatstat.options("densityC")) { # .................. C code ........................... if(debugging) cat('Using standard code.\n') npts <- npoints(x) result <- numeric(npts) # sort into increasing order of x coordinate (required by C code) if(sorted) { xx <- x$x yy <- x$y vv <- values } else { oo <- fave.order(x$x) xx <- x$x[oo] yy <- x$y[oo] vv <- values[oo] } if(is.null(varcov)) { # isotropic kernel if(is.null(weights)) { zz <- .C("smoopt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), sig = as.double(sd), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } else { wtsort <- weights[oo] zz <- .C("wtsmoopt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), sig = as.double(sd), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } } else { # anisotropic kernel Sinv <- solve(varcov) flatSinv <- as.vector(t(Sinv)) if(is.null(weights)) { zz <- .C("asmoopt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), sinv = as.double(flatSinv), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } else { wtsort <- weights[oo] zz <- .C("awtsmoopt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), sinv = as.double(flatSinv), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } } if(any(nbg <- (is.infinite(result) | is.nan(result)))) { # NaN or +/-Inf can occur if bandwidth is small # Use mark of nearest neighbour (by l'Hopital's rule) result[nbg] <- values[nnwhich(x)[nbg]] } } else { #' Either a non-Gaussian kernel or using older, partly interpreted code #' compute weighted densities if(is.null(weights)) { # weights are implicitly equal to 1 numerator <- do.call(density.ppp, resolve.defaults(list(x=x, at="points", weights = values, sigma=sigma, varcov=varcov, leaveoneout=leaveoneout, sorted=sorted, kernel=kernel, scalekernel=scalekernel), list(...), list(edge=FALSE))) denominator <- do.call(density.ppp, resolve.defaults(list(x=x, at="points", sigma=sigma, varcov=varcov, leaveoneout=leaveoneout, sorted=sorted, kernel=kernel, scalekernel=scalekernel), list(...), list(edge=FALSE))) } else { numerator <- do.call(density.ppp, resolve.defaults(list(x=x, at="points", weights = values * weights, sigma=sigma, varcov=varcov, leaveoneout=leaveoneout, sorted=sorted, kernel=kernel, scalekernel=scalekernel), list(...), list(edge=FALSE))) denominator <- do.call(density.ppp, resolve.defaults(list(x=x, at="points", weights = weights, sigma=sigma, varcov=varcov, leaveoneout=leaveoneout, sorted=sorted, kernel=kernel, scalekernel=scalekernel), list(...), list(edge=FALSE))) } if(is.null(uhoh <- attr(numerator, "warnings"))) { result <- numerator/denominator result <- ifelseXB(is.finite(result), result, NA_real_) } else { warning("returning original values") result <- values attr(result, "warnings") <- uhoh } } # pack up and return attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } markmean <- function(X, ...) { stopifnot(is.marked(X)) Y <- Smooth(X, ...) return(Y) } markvar <- function(X, sigma=NULL, ..., weights=NULL, varcov=NULL) { stopifnot(is.marked(X)) if(is.expression(weights)) weights <- eval(weights, envir=as.data.frame(X), enclos=parent.frame()) E1 <- Smooth(X, sigma=sigma, varcov=varcov, weights=weights, ...) X2 <- X %mark% marks(X)^2 ## ensure smoothing bandwidth is the same! sigma <- attr(E1, "sigma") varcov <- attr(E1, "varcov") E2 <- Smooth(X2, sigma=sigma, varcov=varcov, weights=weights, ...) V <- eval.im(E2 - E1^2) return(V) } bw.smoothppp <- function(X, nh=spatstat.options("n.bandwidth"), hmin=NULL, hmax=NULL, warn=TRUE, kernel="gaussian") { stopifnot(is.ppp(X)) stopifnot(is.marked(X)) X <- coerce.marks.numeric(X) # rearrange in ascending order of x-coordinate (for C code) X <- X[fave.order(X$x)] # marx <- marks(X) dimmarx <- dim(marx) if(!is.null(dimmarx)) marx <- as.matrix(as.data.frame(marx)) # determine a range of bandwidth values # n <- npoints(X) if(is.null(hmin) || is.null(hmax)) { W <- Window(X) # a <- area(W) d <- diameter(as.rectangle(W)) # Stoyan's rule of thumb stoyan <- bw.stoyan(X) # rule of thumb based on nearest-neighbour distances nnd <- nndist(X) nnd <- nnd[nnd > 0] if(is.null(hmin)) { hmin <- max(1.1 * min(nnd), stoyan/5) hmin <- min(d/8, hmin) } if(is.null(hmax)) { hmax <- max(stoyan * 20, 3 * mean(nnd), hmin * 2) hmax <- min(d/2, hmax) } } else stopifnot(hmin < hmax) # h <- geomseq(from=hmin, to=hmax, length.out=nh) cv <- numeric(nh) # # compute cross-validation criterion for(i in seq_len(nh)) { yhat <- Smooth(X, sigma=h[i], at="points", leaveoneout=TRUE, kernel=kernel, sorted=TRUE) if(!is.null(dimmarx)) yhat <- as.matrix(as.data.frame(yhat)) cv[i] <- mean((marx - yhat)^2) } # optimize result <- bw.optim(cv, h, hname="sigma", creator="bw.smoothppp", criterion="Least Squares Cross-Validation", warnextreme=warn, hargnames=c("hmin", "hmax"), unitname=unitname(X)) return(result) } smoothcrossEngine <- function(Xdata, Xquery, values, sigma, ..., weights=NULL, varcov=NULL, kernel="gaussian", scalekernel=is.character(kernel), sorted=FALSE, cutoff=NULL) { validate2Dkernel(kernel) if(is.character(kernel)) kernel <- match2DkernelName(kernel) isgauss <- identical(kernel, "gaussian") && scalekernel if(!is.null(dim(weights))) stop("weights must be a vector") ndata <- npoints(Xdata) nquery <- npoints(Xquery) if(nquery == 0 || ndata == 0) { if(is.null(dim(values))) return(rep(NA_real_, nquery)) nuttin <- matrix(NA_real_, nrow=nquery, ncol=ncol(values)) colnames(nuttin) <- colnames(values) return(nuttin) } # validate weights if(is.matrix(values) || is.data.frame(values)) { k <- ncol(values) stopifnot(nrow(values) == npoints(Xdata)) values <- as.data.frame(values) } else { k <- 1L stopifnot(length(values) == npoints(Xdata) || length(values) == 1) if(length(values) == 1L) values <- rep(values, ndata) } ## infinite bandwidth if(bandwidth.is.infinite(sigma)) { #' uniform estimate if(is.null(weights)) weights <- rep(1, ndata) single <- is.null(dim(values)) wtval <- weights * values totwt <- sum(weights) totwtval <- if(single) sum(wtval) else colSums(wtval) denominator <- rep(totwt, nquery) numerator <- rep(totwtval, each=nquery) if(!single) numerator <- matrix(numerator, nrow=nquery) result <- numerator/denominator if(!single) colnames(result) <- colnames(values) return(result) } ## cutoff distance (beyond which the kernel value is treated as zero) ## NB: input argument 'cutoff' is either NULL or ## an absolute distance (if scalekernel=FALSE) ## a number of standard deviations (if scalekernel=TRUE) cutoff.orig <- cutoff cutoff <- cutoff2Dkernel(kernel, sigma=sigma, varcov=varcov, scalekernel=scalekernel, cutoff=cutoff, fatal=TRUE) ## cutoff is now an absolute distance ## detect very small bandwidth nnc <- nncross(Xquery, Xdata) if(cutoff < min(nnc$dist)) { if(ndata > 1) { warning("Very small bandwidth; values of nearest neighbours returned") nw <- nnc$which result <- if(k == 1) values[nw] else values[nw,,drop=FALSE] } else { warning("Very small bandwidth; original values returned") result <- values } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov attr(result, "warnings") <- "underflow" return(result) } ## Handle weights that are meant to be null if(length(weights) == 0) weights <- NULL if(!isgauss) { ## .................. non-Gaussian kernel ........................ close <- crosspairs(Xdata, Xquery, cutoff) kerij <- evaluate2Dkernel(kernel, close$dx, close$dy, sigma=sigma, varcov=varcov, scalekernel=scalekernel, ...) ## sum the (weighted) contributions i <- close$i # data point j <- close$j # query point jfac <- factor(j, levels=seq_len(nquery)) wkerij <- if(is.null(weights)) kerij else kerij * weights[i] denominator <- tapplysum(wkerij, list(jfac)) if(k == 1L) { contribij <- wkerij * values[i] numerator <- tapplysum(contribij, list(jfac)) result <- numerator/denominator } else { result <- matrix(, nrow=nquery, ncol=k) for(kk in 1:k) { contribij <- wkerij * values[i, kk] numeratorkk <- tapplysum(contribij, list(jfac)) result[,kk] <- numeratorkk/denominator } } ## trap bad values if(any(nbg <- (is.infinite(result) | is.nan(result)))) { ## NaN or +/-Inf can occur if bandwidth is small ## Use value at nearest neighbour (by l'Hopital's rule) nnw <- nnc$which if(k == 1L) { result[nbg] <- values[nnw[nbg]] } else { bad <- which(nbg, arr.ind=TRUE) badrow <- bad[,"row"] badcol <- bad[,"col"] result[nbg] <- values[cbind(nnw[badrow], badcol)] } } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } ## .................. Gaussian kernel henceforth ........................ ## handle multiple columns of values if(is.matrix(values) || is.data.frame(values)) { k <- ncol(values) stopifnot(nrow(values) == npoints(Xdata)) values <- as.data.frame(values) result <- matrix(, nquery, k) colnames(result) <- colnames(values) if(!sorted) { ood <- fave.order(Xdata$x) Xdata <- Xdata[ood] values <- values[ood, ] ooq <- fave.order(Xquery$x) Xquery <- Xquery[ooq] } for(j in 1:k) result[,j] <- smoothcrossEngine(Xdata, Xquery, values[,j], sigma=sigma, varcov=varcov, weights=weights, kernel=kernel, scalekernel=scalekernel, cutoff=cutoff.orig, sorted=TRUE, ...) if(!sorted) { sortresult <- result result[ooq,] <- sortresult } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } ## values must be a vector stopifnot(length(values) == npoints(Xdata) || length(values) == 1) if(length(values) == 1) values <- rep(values, ndata) result <- numeric(nquery) ## coordinates and values xq <- Xquery$x yq <- Xquery$y xd <- Xdata$x yd <- Xdata$y vd <- values if(!sorted) { ## sort into increasing order of x coordinate (required by C code) ooq <- fave.order(Xquery$x) xq <- xq[ooq] yq <- yq[ooq] ood <- fave.order(Xdata$x) xd <- xd[ood] yd <- yd[ood] vd <- vd[ood] } sd <- if(is.null(varcov)) sigma else sqrt(min(eigen(varcov)$values)) if(is.null(varcov)) { ## isotropic kernel if(is.null(weights)) { zz <- .C("crsmoopt", nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), vd = as.double(vd), rmaxi = as.double(cutoff), sig = as.double(sd), result = as.double(double(nquery)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[ooq] <- zz$result } else { wtsort <- if(sorted) weights else weights[ood] zz <- .C("wtcrsmoopt", nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), vd = as.double(vd), wd = as.double(wtsort), rmaxi = as.double(cutoff), sig = as.double(sd), result = as.double(double(nquery)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[ooq] <- zz$result } } else { # anisotropic kernel Sinv <- solve(varcov) flatSinv <- as.vector(t(Sinv)) if(is.null(weights)) { zz <- .C("acrsmoopt", nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), vd = as.double(vd), rmaxi = as.double(cutoff), sinv = as.double(flatSinv), result = as.double(double(nquery)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[ooq] <- zz$result } else { wtsort <- if(sorted) weights else weights[ood] zz <- .C("awtcrsmoopt", nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), vd = as.double(vd), wd = as.double(wtsort), rmaxi = as.double(cutoff), sinv = as.double(flatSinv), result = as.double(double(nquery)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[ooq] <- zz$result } } if(any(nbg <- (is.infinite(result) | is.nan(result)))) { # NaN or +/-Inf can occur if bandwidth is small # Use mark of nearest neighbour (by l'Hopital's rule) result[nbg] <- values[nnc$which[nbg]] } # pack up and return attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } ExpSmoothLog <- function(X, ..., at=c("pixels", "points"), weights=NULL) { verifyclass(X, "ppp") at <- match.arg(at) if(!is.null(weights)) check.nvector(weights, npoints(X)) X <- coerce.marks.numeric(X) marx <- marks(X) d <- dim(marx) if(!is.null(d) && d[2] > 1) { switch(at, points = { Z <- lapply(unstack(X), ExpSmoothLog, ..., at=at, weights=weights) Z <- do.call(data.frame, Z) }, pixels = { Z <- solapply(unstack(X), ExpSmoothLog, ..., at=at, weights=weights) }) return(Z) } # vector or single column of numeric marks v <- as.numeric(marx) vmin <- min(v) if(vmin < 0) stop("Negative values in geometric mean smoothing", call.=FALSE) Y <- X %mark% log(v) if(vmin > 0) { Z <- Smooth(Y, ..., at=at, weights=weights) } else { yok <- is.finite(marks(Y)) YOK <- Y[yok] weightsOK <- if(is.null(weights)) NULL else weights[yok] switch(at, points = { Z <- rep(-Inf, npoints(X)) Z[yok] <- Smooth(YOK, ..., at=at, weights=weightsOK) }, pixels = { isfinite <- nnmark(Y %mark% yok, ...) support <- solutionset(isfinite) Window(YOK) <- support Z <- as.im(-Inf, W=Window(Y), ...) Z[support] <- Smooth(YOK, ..., at=at, weights=weightsOK)[] }) } return(exp(Z)) } spatstat/R/as.im.R0000644000176200001440000002436513611503715013464 0ustar liggesusers# # as.im.R # # conversion to class "im" # # $Revision: 1.58 $ $Date: 2020/01/21 04:50:04 $ # # as.im() # as.im <- function(X, ...) { UseMethod("as.im") } as.im.im <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) { X <- repair.old.factor.image(X) nopar <- is.null(eps) && is.null(dimyx) && is.null(xy) if(is.null(W)) { if(nopar) { X <- repair.image.xycoords(X) X <- na.handle.im(X, na.replace) return(X) } # pixel raster determined by dimyx etc W <- as.mask(as.rectangle(X), eps=eps, dimyx=dimyx, xy=xy) # invoke as.im.owin Y <- as.im(W) } else if(is.mask(W) || is.im(W) || !nopar) { #' raster information is present in { W, eps, dimyx, xy } Y <- as.im(W, eps=eps, dimyx=dimyx, xy=xy) } else { #' use existing raster information in X return(X[W, drop=FALSE, tight=TRUE]) } # resample X onto raster of Y Y <- rastersample(X, Y) return(na.handle.im(Y, na.replace)) } as.im.owin <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL, value=1) { if(!(is.null(eps) && is.null(dimyx) && is.null(xy))) { # raster dimensions determined by dimyx etc # convert X to a mask M <- as.mask(X, eps=eps, dimyx=dimyx, xy=xy) # convert mask to image d <- M$dim v <- matrix(value, d[1L], d[2L]) m <- M$m v[!m] <- if(is.null(na.replace)) NA else na.replace out <- im(v, M$xcol, M$yrow, xrange=M$xrange, yrange=M$yrange, unitname=unitname(X)) return(out) } if(!is.null(W) && is.owin(W) && W$type == "mask") { # raster dimensions determined by W # convert W to zero image d <- W$dim Z <- im(matrix(0, d[1L], d[2L]), W$xcol, W$yrow, unitname=unitname(X)) # adjust values to indicator of X Z[X] <- 1 if(missing(value) && is.null(na.replace)) { # done out <- Z } else { # map {0, 1} to {na.replace, value} v <- matrix(ifelseAB(Z$v == 0, na.replace, value), d[1L], d[2L]) out <- im(v, W$xcol, W$yrow, unitname=unitname(X)) } return(out) } if(X$type == "mask") { # raster dimensions determined by X # convert X to image d <- X$dim v <- matrix(value, d[1L], d[2L]) m <- X$m v[!m] <- if(is.null(na.replace)) NA else na.replace out <- im(v, xcol=X$xcol, yrow=X$yrow, xrange=X$xrange, yrange=X$yrange, unitname=unitname(X)) return(out) } # X is not a mask. # W is either missing, or is not a mask. # Convert X to a image using default settings M <- as.mask(X) # convert mask to image d <- M$dim v <- matrix(value, d[1L], d[2L]) m <- M$m v[!m] <- if(is.null(na.replace)) NA else na.replace out <- im(v, M$xcol, M$yrow, unitname=unitname(X)) return(out) } as.im.funxy <- function(X, W=Window(X), ...) { as.im.function(X, W=W, ...) } as.im.function <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL, stringsAsFactors=default.stringsAsFactors(), strict=FALSE, drop=TRUE) { f <- X if(is.null(W)) stop("A window W is required") W <- as.owin(W) W <- as.mask(W, eps=eps, dimyx=dimyx, xy=xy) m <- W$m funnywindow <- !all(m) xx <- as.vector(rasterx.mask(W)) yy <- as.vector(rastery.mask(W)) argh <- list(...) if(strict) argh <- argh[names(argh) %in% names(formals(f))] #' evaluate function value at each pixel if(!funnywindow) { values <- do.call(f, append(list(xx, yy), argh)) slices <- as.list(as.data.frame(values, stringsAsFactors=stringsAsFactors)) ns <- length(slices) } else { #' evaluate only inside window inside <- as.vector(m) values.inside <- do.call(f, append(list(xx[inside], yy[inside]), argh)) slices.inside <- as.list(as.data.frame(values.inside, stringsAsFactors=stringsAsFactors)) ns <- length(slices.inside) #' pad out msize <- length(m) slices <- vector(mode="list", length=ns) for(i in seq_len(ns)) { slice.inside.i <- slices.inside[[i]] #' create space for full matrix slice.i <- vector(mode=typeof(slice.inside.i), length=msize) if(is.factor(slice.inside.i)) slice.i <- factor(slice.i, levels=levels(slice.inside.i)) #' copy values, assigning NA outside window slice.i[inside] <- slice.inside.i slice.i[!inside] <- NA #' slices[[i]] <- slice.i } } outlist <- vector(mode="list", length=ns) nc <- length(W$xcol) nr <- length(W$yrow) for(i in seq_len(ns)) { if(nr == 1 || nc == 1) { #' exception: can't determine pixel width/height from centres mat.i <- matrix(slices[[i]], nr, nc) levels(mat.i) <- levels(slices[[i]]) out.i <- im(mat.i, xrange=W$xrange, yrange=W$yrange, unitname=unitname(W)) } else { out.i <- im(slices[[i]], W$xcol, W$yrow, unitname=unitname(W)) } outlist[[i]] <- na.handle.im(out.i, na.replace) } if(ns == 1 && drop) return(outlist[[1L]]) return(as.imlist(outlist)) } as.im.expression <- function(X, W=NULL, ...) { e <- parent.frame() f <- function(x,y, ...) eval(X, envir=list(x=x, y=y), enclos=e) as.im(f, W=W, ...) } as.im.matrix <- function(X, W=NULL, ...) { nr <- nrow(X) nc <- ncol(X) if(is.null(W)) return(im(X, ...)) W <- as.owin(W) if(W$type == "mask") { xcol <- W$xcol yrow <- W$yrow # pixel coordinate information if(length(xcol) == nc && length(yrow) == nr) return(im(X, xcol, yrow, unitname=unitname(W))) } # range information R <- as.rectangle(W) xrange <- R$xrange yrange <- R$yrange return(im(X, xrange=xrange, yrange=yrange, unitname=unitname(W))) } as.im.default <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) { if((is.vector(X) || is.factor(X)) && length(X) == 1) { # numerical value: interpret as constant function xvalue <- X X <- function(xx, yy, ...) { rep.int(xvalue, length(xx)) } return(as.im(X, W, ..., eps=eps, dimyx=dimyx, xy=xy, na.replace=na.replace)) } if(is.list(X) && checkfields(X, c("x","y","z"))) { stopifnot(is.matrix(X$z)) z <- X$z y <- X$y x <- X$x # Usual S convention as in contour.default() and image.default() # Rows of z correspond to x values. nr <- nrow(z) nc <- ncol(z) lx <- length(x) ly <- length(y) if(lx == nr + 1) x <- (x[-1L] + x[-lx])/2 else if(lx != nr) stop("length of x coordinate vector does not match number of rows of z") if(ly == nc + 1) y <- (y[-1L] + y[-ly])/2 else if(ly != nc) stop("length of y coordinate vector does not match number of columns of z") # convert to class "im" out <- im(t(z), x, y) # now apply W and dimyx if present if(is.null(W) && !(is.null(eps) && is.null(dimyx) && is.null(xy))) out <- as.im(out, eps=eps, dimyx=dimyx, xy=xy) else if(!is.null(W)) out <- as.im(out, W=W, eps=eps, dimyx=dimyx, xy=xy) return(na.handle.im(out, na.replace)) } stop("Can't convert X to a pixel image") } as.im.ppp <- function(X, ...) { pixellate(X, ..., weights=NULL, zeropad=FALSE) } as.im.data.frame <- function(X, ..., step, fatal=TRUE, drop=TRUE) { if(missing(step)) { xstep <- ystep <- NULL } else { step <- ensure2vector(step) xstep <- step[1L] ystep <- step[2L] } if(ncol(X) < 3) { whinge <- "Argument 'X' must have at least 3 columns of data" if(fatal) stop(whinge) warning(whinge) return(NULL) } ## extract (x,y) coordinates mch <- matchNameOrPosition(c("x", "y", "z"), names(X)) x <- X[, mch[1L]] y <- X[, mch[2L]] z <- X[, -mch[1:2], drop=FALSE] ## unique x,y coordinates xx <- sortunique(x) yy <- sortunique(y) jj <- match(x, xx) ii <- match(y, yy) iijj <- cbind(ii, jj) ## make matrix (for incomplete x, y sequence) ok <- checkbigmatrix(length(xx), length(yy), fatal=fatal) if(!ok) return(NULL) mm <- matrix(NA, length(yy), length(xx)) ## ensure xx and yy are complete equally-spaced sequences fx <- fillseq(xx, step=xstep) fy <- fillseq(yy, step=ystep) xcol <- fx[[1L]] yrow <- fy[[1L]] ## trap very large matrices ok <- checkbigmatrix(length(xcol), length(yrow), fatal=fatal) if(!ok) return(NULL) ## mapping from xx to xcol, yy to yrow jjj <- fx[[2L]] iii <- fy[[2L]] ## make matrix for full sequence m <- matrix(NA, length(yrow), length(xcol)) ## run through columns of pixel values nz <- ncol(z) result <- vector(mode="list", length=nz) names(result) <- colnames(z) for(k in seq_len(nz)) { zk <- z[,k] mm[] <- RNA <- RelevantNA(zk) mm[iijj] <- zk m[] <- RNA m[iii,jjj] <- mm lev <- levels(zk) mo <- if(is.null(lev)) m else factor(m, levels=seq_along(lev), labels=lev) result[[k]] <- im(mat=mo, xcol=xcol, yrow=yrow) } if(nz == 1 && drop) result <- result[[1L]] return(result) } # convert to image from some other format, then do something do.as.im <- function(x, action, ..., W = NULL, eps = NULL, dimyx = NULL, xy = NULL, na.replace = NULL) { Z <- as.im(x, W=W, eps=eps, dimyx=dimyx, xy=xy, na.replace=na.replace) Y <- do.call(action, list(Z, ...)) return(Y) } na.handle.im <- function(X, na.replace) { if(is.null(na.replace)) return(X) if(length(na.replace) != 1) stop("na.replace should be a single value") if(X$type == "factor") { lev <- levels(X) newlev <- union(lev, na.replace) if(length(newlev) > length(lev)) levels(X) <- newlev } X$v[is.na(X$v)] <- na.replace return(X) } repair.old.factor.image <- function(x) { # convert from old to new representation of factor images if(x$type != "factor") return(x) v <- x$v isold <- !is.null(lev <- attr(x, "levels")) isnew <- is.factor(v) && is.matrix(v) if(isnew) return(x) if(!isold) stop("Internal error: unrecognised format for factor-valued image") v <- factor(v, levels=lev) dim(v) <- x$dim x$v <- v return(x) } repair.image.xycoords <- function(x) { v <- x$v if(is.null(dim(v))) dim(v) <- c(length(x$yrow), length(x$xcol)) im(v, xrange=x$xrange, yrange=x$yrange, unitname=unitname(x)) } spatstat/R/closepairs.R0000644000176200001440000005624113555526342014627 0ustar liggesusers# # closepairs.R # # $Revision: 1.44 $ $Date: 2019/10/28 08:48:47 $ # # simply extract the r-close pairs from a dataset # # Less memory-hungry for large patterns # closepairs <- function(X, rmax, ...) { UseMethod("closepairs") } closepairs.ppp <- function(X, rmax, twice=TRUE, what=c("all", "indices", "ijd"), distinct=TRUE, neat=TRUE, periodic=FALSE, ...) { verifyclass(X, "ppp") what <- match.arg(what) stopifnot(is.numeric(rmax) && length(rmax) == 1L) stopifnot(is.finite(rmax)) stopifnot(rmax >= 0) ordered <- list(...)$ordered if(missing(twice) && !is.null(ordered)) { warning("Obsolete argument 'ordered' has been replaced by 'twice'") twice <- ordered } if(periodic && !is.rectangle(Window(X))) warning("Periodic edge correction applied in non-rectangular window", call.=FALSE) npts <- npoints(X) null.answer <- switch(what, all = { list(i=integer(0), j=integer(0), xi=numeric(0), yi=numeric(0), xj=numeric(0), yj=numeric(0), dx=numeric(0), dy=numeric(0), d=numeric(0)) }, indices = { list(i=integer(0), j=integer(0)) }, ijd = { list(i=integer(0), j=integer(0), d=numeric(0)) }) if(npts == 0) return(null.answer) ## sort points by increasing x coordinate if(!periodic) { oo <- fave.order(X$x) Xsort <- X[oo] } ## First make an OVERESTIMATE of the number of unordered pairs nsize <- list(...)$nsize # secret option to test memory overflow code if(!is.null(nsize)) { splat("Using nsize =", nsize) } else { #' normal usage nsize <- ceiling(2 * pi * (npts^2) * (rmax^2)/area(Window(X))) nsize <- max(1024, nsize) if(nsize > .Machine$integer.max) { warning("Estimated number of close pairs exceeds maximum possible integer", call.=FALSE) nsize <- .Machine$integer.max } } ## Now extract pairs if(periodic) { ## special algorithm for periodic distance got.twice <- TRUE x <- X$x y <- X$y r <- rmax p <- sidelengths(Frame(X)) ng <- nsize storage.mode(x) <- "double" storage.mode(y) <- "double" storage.mode(r) <- "double" storage.mode(p) <- "double" storage.mode(ng) <- "integer" z <- .Call("closePpair", xx=x, yy=y, pp=p, rr=r, nguess=ng, PACKAGE="spatstat") i <- z[[1L]] j <- z[[2L]] d <- z[[3L]] if(what == "all") { xi <- x[i] yi <- y[i] xj <- x[j] yj <- y[j] dx <- xj - xi dy <- yj - yi } } else if(spatstat.options("closepairs.newcode")) { # ------------------- use new faster code --------------------- # fast algorithms collect each distinct pair only once got.twice <- FALSE ng <- nsize # x <- Xsort$x y <- Xsort$y r <- rmax storage.mode(x) <- "double" storage.mode(y) <- "double" storage.mode(r) <- "double" storage.mode(ng) <- "integer" switch(what, all = { z <- .Call("Vclosepairs", xx=x, yy=y, rr=r, nguess=ng, PACKAGE = "spatstat") if(length(z) != 9) stop("Internal error: incorrect format returned from Vclosepairs") i <- z[[1L]] # NB no increment required j <- z[[2L]] xi <- z[[3L]] yi <- z[[4L]] xj <- z[[5L]] yj <- z[[6L]] dx <- z[[7L]] dy <- z[[8L]] d <- z[[9L]] }, indices = { z <- .Call("VcloseIJpairs", xx=x, yy=y, rr=r, nguess=ng, PACKAGE = "spatstat") if(length(z) != 2) stop("Internal error: incorrect format returned from VcloseIJpairs") i <- z[[1L]] # NB no increment required j <- z[[2L]] }, ijd = { z <- .Call("VcloseIJDpairs", xx=x, yy=y, rr=r, nguess=ng, PACKAGE = "spatstat") if(length(z) != 3) stop("Internal error: incorrect format returned from VcloseIJDpairs") i <- z[[1L]] # NB no increment required j <- z[[2L]] d <- z[[3L]] }) } else if(spatstat.options("closepairs.altcode")) { #' experimental alternative code got.twice <- FALSE ng <- nsize # x <- Xsort$x y <- Xsort$y r <- rmax storage.mode(x) <- "double" storage.mode(y) <- "double" storage.mode(r) <- "double" storage.mode(ng) <- "integer" switch(what, all = { z <- .Call("altVclosepairs", xx=x, yy=y, rr=r, nguess=ng, PACKAGE = "spatstat") if(length(z) != 9) stop("Internal error: incorrect format returned from altVclosepairs") i <- z[[1L]] # NB no increment required j <- z[[2L]] xi <- z[[3L]] yi <- z[[4L]] xj <- z[[5L]] yj <- z[[6L]] dx <- z[[7L]] dy <- z[[8L]] d <- z[[9L]] }, indices = { z <- .Call("altVcloseIJpairs", xx=x, yy=y, rr=r, nguess=ng, PACKAGE = "spatstat") if(length(z) != 2) stop("Internal error: incorrect format returned from altVcloseIJpairs") i <- z[[1L]] # NB no increment required j <- z[[2L]] }, ijd = { z <- .Call("altVcloseIJDpairs", xx=x, yy=y, rr=r, nguess=ng, PACKAGE = "spatstat") if(length(z) != 3) stop("Internal error: incorrect format returned from altVcloseIJDpairs") i <- z[[1L]] # NB no increment required j <- z[[2L]] d <- z[[3L]] }) } else { # ------------------- use older code -------------------------- if(!distinct) { ii <- seq_len(npts) xx <- X$x yy <- X$y zeroes <- rep(0, npts) null.answer <- switch(what, all = { list(i=ii, j=ii, xi=xx, yi=yy, xj=xx, yj=yy, dx=zeroes, dy=zeroes, d=zeroes) }, indices = { list(i=ii, j=ii) }, ijd = { list(i=ii, j=ii, d=zeroes) }) } got.twice <- TRUE nsize <- nsize * 2 z <- .C("Fclosepairs", nxy=as.integer(npts), x=as.double(Xsort$x), y=as.double(Xsort$y), r=as.double(rmax), noutmax=as.integer(nsize), nout=as.integer(integer(1L)), iout=as.integer(integer(nsize)), jout=as.integer(integer(nsize)), xiout=as.double(numeric(nsize)), yiout=as.double(numeric(nsize)), xjout=as.double(numeric(nsize)), yjout=as.double(numeric(nsize)), dxout=as.double(numeric(nsize)), dyout=as.double(numeric(nsize)), dout=as.double(numeric(nsize)), status=as.integer(integer(1L)), PACKAGE = "spatstat") if(z$status != 0) { # Guess was insufficient # Obtain an OVERCOUNT of the number of pairs # (to work around gcc bug #323) rmaxplus <- 1.25 * rmax nsize <- .C("paircount", nxy=as.integer(npts), x=as.double(Xsort$x), y=as.double(Xsort$y), rmaxi=as.double(rmaxplus), count=as.integer(integer(1L)), PACKAGE = "spatstat")$count if(nsize <= 0) return(null.answer) # add a bit more for safety nsize <- ceiling(1.1 * nsize) + 2 * npts # now extract points z <- .C("Fclosepairs", nxy=as.integer(npts), x=as.double(Xsort$x), y=as.double(Xsort$y), r=as.double(rmax), noutmax=as.integer(nsize), nout=as.integer(integer(1L)), iout=as.integer(integer(nsize)), jout=as.integer(integer(nsize)), xiout=as.double(numeric(nsize)), yiout=as.double(numeric(nsize)), xjout=as.double(numeric(nsize)), yjout=as.double(numeric(nsize)), dxout=as.double(numeric(nsize)), dyout=as.double(numeric(nsize)), dout=as.double(numeric(nsize)), status=as.integer(integer(1L)), PACKAGE = "spatstat") if(z$status != 0) stop(paste("Internal error: C routine complains that insufficient space was allocated:", nsize)) } # trim vectors to the length indicated npairs <- z$nout if(npairs <= 0) return(null.answer) actual <- seq_len(npairs) i <- z$iout[actual] # sic j <- z$jout[actual] switch(what, indices={}, all={ xi <- z$xiout[actual] yi <- z$yiout[actual] xj <- z$xjout[actual] yj <- z$yjout[actual] dx <- z$dxout[actual] dy <- z$dyout[actual] d <- z$dout[actual] }, ijd = { d <- z$dout[actual] }) # ------------------- end code switch ------------------------ } if(!periodic) { ## convert i,j indices to original sequence i <- oo[i] j <- oo[j] } if(twice) { ## both (i, j) and (j, i) should be returned if(!got.twice) { ## duplication required iold <- i jold <- j i <- c(iold, jold) j <- c(jold, iold) switch(what, indices = { }, ijd = { d <- rep(d, 2) }, all = { xinew <- c(xi, xj) yinew <- c(yi, yj) xjnew <- c(xj, xi) yjnew <- c(yj, yi) xi <- xinew yi <- yinew xj <- xjnew yj <- yjnew dx <- c(dx, -dx) dy <- c(dy, -dy) d <- rep(d, 2) }) } } else { ## only one of (i, j) and (j, i) should be returned if(got.twice) { ## remove duplication ok <- (i < j) i <- i[ok] j <- j[ok] switch(what, indices = { }, all = { xi <- xi[ok] yi <- yi[ok] xj <- xj[ok] yj <- yj[ok] dx <- dx[ok] dy <- dy[ok] d <- d[ok] }, ijd = { d <- d[ok] }) } else if(neat) { ## enforce i < j swap <- (i > j) tmp <- i[swap] i[swap] <- j[swap] j[swap] <- tmp if(what == "all") { xinew <- ifelse(swap, xj, xi) yinew <- ifelse(swap, yj, yi) xjnew <- ifelse(swap, xi, xj) yjnew <- ifelse(swap, yi, yj) xi <- xinew yi <- yinew xj <- xjnew yj <- yjnew dx[swap] <- -dx[swap] dy[swap] <- -dy[swap] } } ## otherwise no action required } ## add pairs of identical points? if(!distinct) { ii <- seq_len(npts) xx <- X$x yy <- X$y zeroes <- rep(0, npts) i <- c(i, ii) j <- c(j, ii) switch(what, ijd={ d <- c(d, zeroes) }, all = { xi <- c(xi, xx) yi <- c(yi, yy) xj <- c(xj, xx) yi <- c(yi, yy) dx <- c(dx, zeroes) dy <- c(dy, zeroes) d <- c(d, zeroes) }) } ## done switch(what, all = { answer <- list(i=i, j=j, xi=xi, yi=yi, xj=xj, yj=yj, dx=dx, dy=dy, d=d) }, indices = { answer <- list(i = i, j = j) }, ijd = { answer <- list(i=i, j=j, d=d) }) return(answer) } ####################### crosspairs <- function(X, Y, rmax, ...) { UseMethod("crosspairs") } crosspairs.ppp <- function(X, Y, rmax, what=c("all", "indices", "ijd"), ...) { verifyclass(X, "ppp") verifyclass(Y, "ppp") what <- match.arg(what) stopifnot(is.numeric(rmax) && length(rmax) == 1L && rmax >= 0) null.answer <- switch(what, all = { list(i=integer(0), j=integer(0), xi=numeric(0), yi=numeric(0), xj=numeric(0), yj=numeric(0), dx=numeric(0), dy=numeric(0), d=numeric(0)) }, indices = { list(i=integer(0), j=integer(0)) }, ijd = { list(i=integer(0), j=integer(0), d=numeric(0)) }) nX <- npoints(X) nY <- npoints(Y) if(nX == 0 || nY == 0) return(null.answer) # order patterns by increasing x coordinate ooX <- fave.order(X$x) Xsort <- X[ooX] ooY <- fave.order(Y$x) Ysort <- Y[ooY] if(spatstat.options("crosspairs.newcode")) { # ------------------- use new faster code --------------------- # First (over)estimate the number of pairs nsize <- ceiling(2 * pi * (rmax^2) * nX * nY/area(Window(Y))) nsize <- max(1024, nsize) if(nsize > .Machine$integer.max) { warning( "Estimated number of close pairs exceeds maximum possible integer", call.=FALSE) nsize <- .Machine$integer.max } # .Call Xx <- Xsort$x Xy <- Xsort$y Yx <- Ysort$x Yy <- Ysort$y r <- rmax ng <- nsize storage.mode(Xx) <- storage.mode(Xy) <- "double" storage.mode(Yx) <- storage.mode(Yy) <- "double" storage.mode(r) <- "double" storage.mode(ng) <- "integer" switch(what, all = { z <- .Call("Vcrosspairs", xx1=Xx, yy1=Xy, xx2=Yx, yy2=Yy, rr=r, nguess=ng, PACKAGE = "spatstat") if(length(z) != 9) stop("Internal error: incorrect format returned from Vcrosspairs") i <- z[[1L]] # NB no increment required j <- z[[2L]] xi <- z[[3L]] yi <- z[[4L]] xj <- z[[5L]] yj <- z[[6L]] dx <- z[[7L]] dy <- z[[8L]] d <- z[[9L]] }, indices = { z <- .Call("VcrossIJpairs", xx1=Xx, yy1=Xy, xx2=Yx, yy2=Yy, rr=r, nguess=ng, PACKAGE = "spatstat") if(length(z) != 2) stop("Internal error: incorrect format returned from VcrossIJpairs") i <- z[[1L]] # NB no increment required j <- z[[2L]] }, ijd = { z <- .Call("VcrossIJDpairs", xx1=Xx, yy1=Xy, xx2=Yx, yy2=Yy, rr=r, nguess=ng, PACKAGE = "spatstat") if(length(z) != 3) stop("Internal error: incorrect format returned from VcrossIJDpairs") i <- z[[1L]] # NB no increment required j <- z[[2L]] d <- z[[3L]] }) } else { # Older code # obtain upper estimate of number of pairs # (to work around gcc bug 323) rmaxplus <- 1.25 * rmax nsize <- .C("crosscount", nn1=as.integer(X$n), x1=as.double(Xsort$x), y1=as.double(Xsort$y), nn2=as.integer(Ysort$n), x2=as.double(Ysort$x), y2=as.double(Ysort$y), rmaxi=as.double(rmaxplus), count=as.integer(integer(1L)), PACKAGE = "spatstat")$count if(nsize <= 0) return(null.answer) # allow slightly more space to work around gcc bug #323 nsize <- ceiling(1.1 * nsize) + X$n + Y$n # now extract pairs z <- .C("Fcrosspairs", nn1=as.integer(X$n), x1=as.double(Xsort$x), y1=as.double(Xsort$y), nn2=as.integer(Y$n), x2=as.double(Ysort$x), y2=as.double(Ysort$y), r=as.double(rmax), noutmax=as.integer(nsize), nout=as.integer(integer(1L)), iout=as.integer(integer(nsize)), jout=as.integer(integer(nsize)), xiout=as.double(numeric(nsize)), yiout=as.double(numeric(nsize)), xjout=as.double(numeric(nsize)), yjout=as.double(numeric(nsize)), dxout=as.double(numeric(nsize)), dyout=as.double(numeric(nsize)), dout=as.double(numeric(nsize)), status=as.integer(integer(1L)), PACKAGE = "spatstat") if(z$status != 0) stop(paste("Internal error: C routine complains that insufficient space was allocated:", nsize)) # trim vectors to the length indicated npairs <- z$nout if(npairs <= 0) return(null.answer) actual <- seq_len(npairs) i <- z$iout[actual] # sic j <- z$jout[actual] xi <- z$xiout[actual] yi <- z$yiout[actual] xj <- z$xjout[actual] yj <- z$yjout[actual] dx <- z$dxout[actual] dy <- z$dyout[actual] d <- z$dout[actual] } # convert i,j indices to original sequences i <- ooX[i] j <- ooY[j] # done switch(what, all = { answer <- list(i=i, j=j, xi=xi, yi=yi, xj=xj, yj=yj, dx=dx, dy=dy, d=d) }, indices = { answer <- list(i=i, j=j) }, ijd = { answer <- list(i=i, j=j, d=d) }) return(answer) } closethresh <- function(X, R, S, twice=TRUE, ...) { # list all R-close pairs # and indicate which of them are S-close (S < R) # so that results are consistent with closepairs(X,S) verifyclass(X, "ppp") stopifnot(is.numeric(R) && length(R) == 1L && R >= 0) stopifnot(is.numeric(S) && length(S) == 1L && S >= 0) stopifnot(S < R) ordered <- list(...)$ordered if(missing(twice) && !is.null(ordered)) { warning("Obsolete argument 'ordered' has been replaced by 'twice'") twice <- ordered } npts <- npoints(X) if(npts == 0) return(list(i=integer(0), j=integer(0), t=logical(0))) # sort points by increasing x coordinate oo <- fave.order(X$x) Xsort <- X[oo] # First make an OVERESTIMATE of the number of pairs nsize <- ceiling(4 * pi * (npts^2) * (R^2)/area(Window(X))) nsize <- max(1024, nsize) if(nsize > .Machine$integer.max) { warning("Estimated number of close pairs exceeds maximum possible integer", call.=FALSE) nsize <- .Machine$integer.max } # Now extract pairs x <- Xsort$x y <- Xsort$y r <- R s <- S ng <- nsize storage.mode(x) <- "double" storage.mode(y) <- "double" storage.mode(r) <- "double" storage.mode(s) <- "double" storage.mode(ng) <- "integer" z <- .Call("Vclosethresh", xx=x, yy=y, rr=r, ss=s, nguess=ng, PACKAGE = "spatstat") if(length(z) != 3) stop("Internal error: incorrect format returned from Vclosethresh") i <- z[[1L]] # NB no increment required j <- z[[2L]] th <- as.logical(z[[3L]]) # convert i,j indices to original sequence i <- oo[i] j <- oo[j] # fast C code only returns i < j if(twice) { iold <- i jold <- j i <- c(iold, jold) j <- c(jold, iold) th <- rep(th, 2) } # done return(list(i=i, j=j, th=th)) } crosspairquad <- function(Q, rmax, what=c("all", "indices")) { # find all close pairs X[i], U[j] stopifnot(inherits(Q, "quad")) what <- match.arg(what) X <- Q$data D <- Q$dummy clX <- closepairs(X=X, rmax=rmax, what=what) clXD <- crosspairs(X=X, Y=D, rmax=rmax, what=what) # convert all indices to serial numbers in union.quad(Q) # assumes data are listed first clXD$j <- npoints(X) + clXD$j result <- as.list(rbind(as.data.frame(clX), as.data.frame(clXD))) return(result) } tweak.closepairs <- function(cl, rmax, i, deltax, deltay, deltaz) { stopifnot(is.list(cl)) stopifnot(all(c("i", "j") %in% names(cl))) if(!any(c("xi", "dx") %in% names(cl))) stop("Insufficient data to update closepairs list") check.1.real(rmax) check.1.integer(i) check.1.real(deltax) check.1.real(deltay) if("dz" %in% names(cl)) check.1.real(deltaz) else { deltaz <- NULL } hit.i <- (cl$i == i) hit.j <- (cl$j == i) if(any(hit.i | hit.j)) { mm <- hit.i & !hit.j if(any(mm)) { cl$xi[mm] <- cl$xi[mm] + deltax cl$yi[mm] <- cl$yi[mm] + deltay cl$dx[mm] <- cl$dx[mm] - deltax cl$dy[mm] <- cl$dy[mm] - deltay if(is.null(deltaz)) { cl$d[mm] <- sqrt(cl$dx[mm]^2 + cl$dy[mm]^2) } else { cl$zi[mm] <- cl$zi[mm] + deltaz cl$dz[mm] <- cl$dz[mm] - deltaz cl$d[mm] <- sqrt(cl$dx[mm]^2 + cl$dy[mm]^2 + cl$dz[mm]^2) } } mm <- hit.j & !hit.i if(any(mm)) { cl$xj[mm] <- cl$xj[mm] + deltax cl$yj[mm] <- cl$yj[mm] + deltay cl$dx[mm] <- cl$dx[mm] + deltax cl$dy[mm] <- cl$dy[mm] + deltay if(is.null(deltaz)) { cl$d[mm] <- sqrt(cl$dx[mm]^2 + cl$dy[mm]^2) } else { cl$zj[mm] <- cl$zj[mm] + deltaz cl$dz[mm] <- cl$dz[mm] + deltaz cl$d[mm] <- sqrt(cl$dx[mm]^2 + cl$dy[mm]^2 + cl$dz[mm]^2) } } mm <- hit.i & hit.j if(any(mm)) { cl$xi[mm] <- cl$xi[mm] + deltax cl$xj[mm] <- cl$xj[mm] + deltax cl$yi[mm] <- cl$yi[mm] + deltay cl$yj[mm] <- cl$yj[mm] + deltay if(!is.null(deltaz)) { cl$zi[mm] <- cl$zi[mm] + deltaz cl$zj[mm] <- cl$zj[mm] + deltaz } } if(any(lost <- (cl$d > rmax))) cl <- as.list(as.data.frame(cl)[!lost, , drop=FALSE]) } return(cl) } spatstat/R/distfunlpp.R0000644000176200001440000000173313333543254014642 0ustar liggesusers# # distfunlpp.R # # method for 'distfun' for class 'lpp' # # $Revision: 1.3 $ $Date: 2018/04/23 04:52:17 $ # distfun.lpp <- local({ distfun.lpp <- function(X, ..., k=1) { stopifnot(inherits(X, "lpp")) force(X) force(k) stopifnot(length(k) == 1) L <- as.linnet(X) f <- function(x, y=NULL, seg=NULL, tp=NULL, ...) { # L is part of the environment Y <- as.lpp(x=x, y=y, seg=seg, tp=tp, L=L) d <- nncross.lpp(Y, X, what="dist", k=k) return(d) } f <- linfun(f, L) assign("k", k, envir=environment(f)) assign("X", X, envir=environment(f)) attr(f, "explain") <- uitleggen attr(f, "extrargs") <- list(k=k) return(f) } uitleggen <- function(x, ...) { splat("Distance function for lpp object") envx <- environment(x) k <- get("k", envir=envx) if(k != 1L) splat("Yields distance to", ordinal(k), "nearest point") X <- get("X", envir=envx) print(X) } distfun.lpp }) spatstat/R/ripras.R0000644000176200001440000000266713333543255013762 0ustar liggesusers# # ripras.S Ripley-Rasson estimator of domain # # # $Revision: 1.14 $ $Date: 2014/10/24 00:22:30 $ # # # # #------------------------------------- bounding.box.xy <- function(x, y=NULL) { xy <- xy.coords(x,y) if(length(xy$x) == 0) return(NULL) owin(range(xy$x), range(xy$y), check=FALSE) } convexhull.xy <- function(x, y=NULL) { xy <- xy.coords(x, y) x <- xy$x y <- xy$y if(length(x) < 3) return(NULL) h <- rev(chull(x, y)) # must be anticlockwise if(length(h) < 3) return(NULL) w <- owin(poly=list(x=x[h], y=y[h]), check=FALSE) return(w) } ripras <- function(x, y=NULL, shape="convex", f) { xy <- xy.coords(x, y) n <- length(xy$x) w <- switch(shape, convex = convexhull.xy(xy), rectangle = boundingbox(xy), stop(paste("Unrecognised option: shape=", dQuote(shape)))) if(is.null(w)) return(NULL) # expansion factor if(!missing(f)) stopifnot(is.numeric(f) && length(f) == 1 && f >= 1) else switch(shape, convex = { # number of vertices m <- summary(w)$nvertices f <- if(m < n) 1/sqrt(1 - m/n) else 2 }, rectangle = { f <- (n+1)/(n-1) }) # centroid ce <- unlist(centroid.owin(w)) # shift centroid to origin W <- shift(w, -ce) # rescale W <- affine(W, mat=diag(c(f,f))) # shift origin to centroid W <- shift(W, ce) return(W) } spatstat/R/plot.ppm.R0000644000176200001440000000542413333543255014225 0ustar liggesusers# # plot.ppm.S # # $Revision: 2.12 $ $Date: 2016/06/11 08:02:17 $ # # plot.ppm() # Plot a point process model fitted by ppm(). # # # plot.ppm <- function(x, ngrid = c(40,40), superimpose = TRUE, trend=TRUE, cif=TRUE, se=TRUE, pause = interactive(), how=c("persp","image", "contour"), plot.it=TRUE, locations=NULL, covariates=NULL, ...) { model <- x # Plot a point process model fitted by ppm(). # verifyclass(model, "ppm") # # find out what kind of model it is # mod <- summary(model, quick="entries") stationary <- mod$stationary poisson <- mod$poisson marked <- mod$marked multitype <- mod$multitype data <- mod$entries$data if(marked) { if(!multitype) stop("Not implemented for general marked point processes") else mrkvals <- levels(marks(data)) } else mrkvals <- 1 # ntypes <- length(mrkvals) # # Interpret options # ----------------- # # Whether to plot trend, cif, se if(!trend && !cif && !se) { cat(paste("Nothing plotted;", sQuote("trend"), ",", sQuote("cif"), "and", sQuote("se"), "are all FALSE\n")) return(invisible(NULL)) } # Suppress uninteresting plots # unless explicitly instructed otherwise if(missing(trend)) trend <- !stationary if(missing(cif)) cif <- !poisson if(missing(se)) se <- poisson && !stationary else if(se && !poisson) { warning(paste("standard error calculation", "is only implemented for Poisson models")) se <- FALSE } if(!trend && !cif && !se) { cat("Nothing plotted -- all plots selected are flat surfaces.\n") return(invisible(NULL)) } # # style of plot: suppress pseudo-default # if(missing(how)) how <- "image" # # # Do the prediction # ------------------ out <- list() surftypes <- c("trend","cif","se")[c(trend,cif,se)] ng <- if(missing(ngrid) && !missing(locations)) NULL else ngrid for (ttt in surftypes) { p <- predict(model, ngrid=ng, locations=locations, covariates=covariates, type = ttt, getoutofjail=TRUE) # permit outdated usage type="se" if(is.im(p)) p <- list(p) out[[ttt]] <- p } # Make it a plotppm object # ------------------------ class(out) <- "plotppm" attr(out, "mrkvals") <- mrkvals # Actually plot it if required # ---------------------------- if(plot.it) { if(!superimpose) data <- NULL if(missing(pause)) pause <- NULL plot(out,data=data,trend=trend,cif=cif,se=se,how=how,pause=pause, ...) } return(invisible(out)) } spatstat/R/lennard.R0000644000176200001440000000707013333543255014076 0ustar liggesusers# # # lennard.R # # $Revision: 1.22 $ $Date: 2018/03/15 07:37:41 $ # # Lennard-Jones potential # # # ------------------------------------------------------------------- # LennardJones <- local({ BlankLJ <- list( name = "Lennard-Jones process", creator = "LennardJones", family = "pairwise.family", # evaluated later pot = function(d, par) { sig0 <- par$sigma0 if(is.na(sig0)) { d6 <- d^{-6} p <- array(c(-d6^2,d6),dim=c(dim(d),2)) } else { # expand around sig0 and set large numbers to Inf drat <- d/sig0 d6 <- drat^{-6} p <- array(c(-d6^2,d6),dim=c(dim(d),2)) small <- (drat < 1/4) small <- array(c(small, small), dim=c(dim(d), 2)) p[small] <- -Inf big <- (drat > 4) big <- array(c(big, big), dim=c(dim(d), 2)) p[big] <- 0 } return(p) }, par = list(sigma0=NULL), # filled in later parnames = "Initial approximation to sigma", hasInf = TRUE, selfstart = function(X, self) { # self starter for Lennard Jones # attempt to set value of 'sigma0' if(!is.na(self$par$sigma0)) { # value fixed by user or previous invocation return(self) } if(npoints(X) < 2) { # not enough points return(self) } s0 <- minnndist(X) if(s0 == 0) { warning(paste("Pattern contains duplicated points:", "impossible under Lennard-Jones model")) s0 <- mean(nndist(X)) if(s0 == 0) return(self) } LennardJones(s0) }, init = function(...){}, # do nothing update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { theta1 <- as.numeric(coeffs[1L]) theta2 <- as.numeric(coeffs[2L]) sig0 <- self$par$sigma0 if(is.na(sig0)) sig0 <- 1 if(sign(theta1) * sign(theta2) == 1) { sigma <- sig0 * (theta1/theta2)^(1/6) epsilon <- (theta2^2)/(4 * theta1) } else { sigma <- NA epsilon <- NA } return(list(param=list(sigma=sigma, epsilon=epsilon), inames="interaction parameters", printable=signif(c(sigma=sigma,epsilon=epsilon)))) }, valid = function(coeffs, self) { p <- unlist(self$interpret(coeffs, self)$param) return(all(is.finite(p) & (p > 0))) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { if(anyNA(coeffs) || epsilon == 0) return(Inf) sig0 <- self$par$sigma0 if(is.na(sig0)) sig0 <- 1 theta1 <- abs(coeffs[1L]) theta2 <- abs(coeffs[2L]) return(sig0 * max((theta1/epsilon)^(1/12), (theta2/epsilon)^(1/6))) }, version=NULL # filled in later ) class(BlankLJ) <- "interact" LennardJones <- function(sigma0=NA) { if(is.null(sigma0) || !is.finite(sigma0)) sigma0 <- NA instantiate.interact(BlankLJ, list(sigma0=sigma0)) } LennardJones <- intermaker(LennardJones, BlankLJ) LennardJones }) spatstat/R/saturated.R0000644000176200001440000000333113433151224014433 0ustar liggesusers# # # saturated.S # # $Revision: 1.10 $ $Date: 2019/02/20 03:34:50 $ # # Saturated pairwise process with user-supplied potential # # Saturated() create a saturated pairwise process # [an object of class 'interact'] # with user-supplied potential # # # ------------------------------------------------------------------- # Saturated <- function(pot, name) { if(missing(name)) name <- "Saturated process with user-defined potential" fop <- names(formals(pot)) if(!isTRUE(all.equal(fop, c("d", "par"))) && !isTRUE(all.equal(fop, c("d", "tx", "tu", "par")))) stop(paste("Formal arguments of pair potential function", sQuote("pot"), "must be either (d, par) or (d, tx, tu, par)")) out <- list( name = name, creator = "Saturated", family = pairsat.family, pot = pot, par = NULL, parnames = NULL, hasInf = NA, init = NULL, update = function(self, ...){ do.call(Saturated, resolve.defaults(list(...), list(pot=self$pot, name=self$name))) } , print = function(self) { cat("Potential function:\n") print(self$pot) invisible() }, version=versionstring.spatstat() ) class(out) <- "interact" return(out) } Saturated <- intermaker(Saturated, list(creator="Saturated", name="saturated process with user-defined potential", par=formals(Saturated), parnames=list("the potential", "the name of the interaction"))) spatstat/R/summary.im.R0000644000176200001440000000757113333543255014562 0ustar liggesusers# # summary.im.R # # summary() method for class "im" # # $Revision: 1.21 $ $Date: 2016/09/01 02:31:52 $ # # summary.im() # print.summary.im() # print.im() # summary.im <- function(object, ...) { verifyclass(object, "im") x <- object y <- unclass(x)[c("dim", "xstep", "ystep")] pixelarea <- y$xstep * y$ystep # extract image values v <- x$v inside <- !is.na(v) v <- v[inside] # type of values? y$type <- x$type # factor-valued? lev <- levels(x) if(!is.null(lev) && !is.factor(v)) v <- factor(v, levels=seq_along(lev), labels=lev) switch(x$type, integer=, real={ y$mean <- mv <- mean(v) y$integral <- mv * length(v) * pixelarea y$range <- ra <- range(v) y$min <- ra[1] y$max <- ra[2] }, factor={ y$levels <- lev y$table <- table(v, dnn="") }, complex={ y$mean <- mv <- mean(v) y$integral <- mv * length(v) * pixelarea rr <- range(Re(v)) y$Re <- list(range=rr, min=rr[1], max=rr[2]) ri <- range(Im(v)) y$Im <- list(range=ri, min=ri[1], max=ri[2]) }, { # another unknown type pixelvalues <- v y$summary <- summary(pixelvalues) }) # summarise pixel raster win <- as.owin(x) y$window <- summary.owin(win) y$fullgrid <- (rescue.rectangle(win)$type == "rectangle") y$units <- unitname(x) class(y) <- "summary.im" return(y) } print.summary.im <- function(x, ...) { verifyclass(x, "summary.im") splat(paste0(x$type, "-valued"), "pixel image") unitinfo <- summary(x$units) pluralunits <- unitinfo$plural sigdig <- getOption('digits') di <- x$dim win <- x$window splat(di[1], "x", di[2], "pixel array (ny, nx)") splat("enclosing rectangle:", prange(signif(x$window$xrange, sigdig)), "x", prange(signif(x$window$yrange, sigdig)), unitinfo$plural, unitinfo$explain) splat("dimensions of each pixel:", signif(x$xstep, 3), "x", signif(x$ystep, sigdig), pluralunits) if(!is.null(explain <- unitinfo$explain)) splat(explain) fullgrid <- x$fullgrid if(fullgrid) { splat("Image is defined on the full rectangular grid") whatpart <- "Frame" } else { splat("Image is defined on a subset of the rectangular grid") whatpart <- "Subset" } splat(whatpart, "area =", win$area, "square", pluralunits) if(!fullgrid) { af <- signif(win$areafraction, min(3, sigdig)) splat(whatpart, "area fraction =", af) } if(fullgrid) splat("Pixel values") else splat("Pixel values (inside window):") switch(x$type, integer=, real={ splat("\trange =", prange(signif(x$range, sigdig))) splat("\tintegral =", signif(x$integral, sigdig)) splat("\tmean =", signif(x$mean, sigdig)) }, factor={ print(x$table) }, complex={ splat("\trange: Real", prange(signif(x$Re$range, sigdig)), "Imaginary", prange(signif(x$Im$range, sigdig))) splat("\tintegral =", signif(x$integral, sigdig)) splat("\tmean =", signif(x$mean, sigdig)) }, { print(x$summary) }) return(invisible(NULL)) } print.im <- function(x, ...) { splat(paste0(x$type, "-valued"), "pixel image") if(x$type == "factor") { splat("factor levels:") print(levels(x)) } sigdig <- min(5, getOption('digits')) unitinfo <- summary(unitname(x)) di <- x$dim splat(di[1], "x", di[2], "pixel array (ny, nx)") splat("enclosing rectangle:", prange(signif(zapsmall(x$xrange), sigdig)), "x", prange(signif(zapsmall(x$yrange), sigdig)), unitinfo$plural, unitinfo$explain) return(invisible(NULL)) } spatstat/R/linearmrkcon.R0000644000176200001440000000307613333543255015141 0ustar liggesusers# # linearmrkcon.R # # mark connection function & mark equality function for linear networks # # $Revision: 1.4 $ $Date: 2017/02/07 08:12:05 $ # linearmarkconnect <- function(X, i, j, r=NULL, ...) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i) || is.null(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) if(missing(j) || is.null(j)) j <- lev[2L] else if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark")) # ensure distance information is present X <- as.lpp(X, sparse=FALSE) # pcfij <- linearpcfcross(X, i, j, r=r, ...) pcfall <- linearpcf(X, r=r, ...) qi <- mean(marx == i) qj <- mean(marx == j) result <- eval.fv(qi * qj * pcfij/pcfall) # rebrand result <- rebadge.as.crossfun(result, "p", "L", i, j) attr(result, "labl") <- attr(pcfij, "labl") return(result) } linearmarkequal <- local({ linearmarkequal <- function(X, r=NULL, ...) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") ## ensure distance information is present X <- as.lpp(X, sparse=FALSE) lev <- levels(marks(X)) v <- list() for(l in lev) v[[l]] <- linearmarkconnect(X, l, l, r=r, ...) result <- Reduce(addfuns, v) result <-rebadge.fv(result, quote(p[L](r)), new.fname=c("p", "L")) attr(result, "labl") <- attr(v[[1L]], "labl") return(result) } addfuns <- function(f1, f2) eval.fv(f1 + f2) linearmarkequal }) spatstat/R/fiksel.R0000644000176200001440000001407113333543255013727 0ustar liggesusers# # # fiksel.R # # $Revision: 1.18 $ $Date: 2018/03/15 07:37:41 $ # # Fiksel interaction # # ee Stoyan Kendall Mcke 1987 p 161 # # ------------------------------------------------------------------- # Fiksel <- local({ # ......... auxiliary functions ........... fikselterms <- function(U, X, r, kappa, EqualPairs=NULL) { answer <- crossfikselterms(U, X, r, kappa) nU <- npoints(U) # subtract contrinbutions from identical pairs (exp(-0) = 1 for each) if(length(EqualPairs) > 0) { idcount <- as.integer(table(factor(EqualPairs[,2L], levels=1:nU))) answer <- answer - idcount } return(answer) } crossfikselterms <- function(X, Y, r, kappa) { stopifnot(is.numeric(r)) # sort in increasing order of x coordinate oX <- fave.order(X$x) oY <- fave.order(Y$x) Xsort <- X[oX] Ysort <- Y[oY] nX <- npoints(X) nY <- npoints(Y) # call C routine out <- .C("Efiksel", nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), rrmax = as.double(r), kkappa = as.double(kappa), values = as.double(double(nX)), PACKAGE = "spatstat") answer <- integer(nX) answer[oX] <- out$values return(answer) } # ........ template object .............. BlankFiksel <- list( name = "Fiksel process", creator = "Fiksel", family = "pairwise.family", # evaluated later pot = function(d, par) { v <- (d <= par$r) * exp( - d * par$kappa) v[ d <= par$hc ] <- (-Inf) v }, par = list(r = NULL, hc = NULL, kappa=NULL), # filled in later parnames = c("interaction distance", "hard core distance", "rate parameter"), hasInf = TRUE, selfstart = function(X, self) { # self starter for Fiksel nX <- npoints(X) if(nX < 2) { # not enough points to make any decisions return(self) } md <- minnndist(X) if(!is.na(hc <- self$par$hc)) { # value fixed by user or previous invocation # check it if(md < hc) warning(paste("Hard core distance is too large;", "some data points will have zero probability")) return(self) } if(md == 0) warning(paste("Pattern contains duplicated points:", "hard core must be zero")) # take hc = minimum interpoint distance * n/(n+1) hcX <- md * nX/(nX+1) Fiksel(r=self$par$r, hc = hcX, kappa=self$par$kappa) }, init = function(self) { r <- self$par$r hc <- self$par$hc kappa <- self$par$kappa check.1.real(r) check.1.real(kappa) if(!is.na(hc)) { check.1.real(hc) stopifnot(hc > 0) stopifnot(r > hc) } else stopifnot(r > 0) }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { a <- as.numeric(coeffs[1L]) return(list(param=list(a=a), inames="interaction strength a", printable=signif(a))) }, valid = function(coeffs, self) { a <- (self$interpret)(coeffs, self)$param$a return(is.finite(a)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) hc <- self$par$hc if(hc > 0) return(Hardcore(hc)) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r hc <- self$par$hc if(anyNA(coeffs)) return(r) a <- coeffs[1L] if(abs(a) <= epsilon) return(hc) else return(r) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ...) { ## fast evaluator for Fiksel interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for Fiksel") r <- potpars$r hc <- potpars$hc kappa <- potpars$kappa hclose <- (strausscounts(U, X, hc, EqualPairs) != 0) fikselbit <- fikselterms(U, X, r, kappa, EqualPairs) if(!splitInf) { answer <- ifelseAX(hclose, -Inf, fikselbit) answer <- matrix(answer, ncol=1L) } else { answer <- fikselbit answer <- matrix(answer, ncol=1L) attr(answer, "-Inf") <- hclose } return(answer) }, Mayer=function(coeffs, self) { # second Mayer cluster integral a <- as.numeric(coeffs[1L]) r <- self$par$r hc <- self$par$hc kappa <- self$par$kappa f <- function(x, kappa, a){ 2 * pi * x * (1 - exp(a * exp(-x * kappa))) } hardbit <- integrate(f=f, lower=hc, upper=r, a=a, kappa=kappa) mess <- hardbit[["message"]] if(!identical(mess, "OK")) { warning(mess) return(NA) } return(pi * hc^2 + hardbit$value) } ) class(BlankFiksel) <- "interact" Fiksel <- function(r, hc=NA, kappa) { instantiate.interact(BlankFiksel, list(r = r, hc = hc, kappa=kappa)) } Fiksel <- intermaker(Fiksel, BlankFiksel) Fiksel }) spatstat/R/reach.R0000644000176200001440000000162713333543255013537 0ustar liggesusers# # reach.R # # $Revision: 1.8 $ $Date: 2007/10/24 09:41:15 $ # reach <- function(x, ...) { UseMethod("reach") } reach.interact <- function(x, ...) { verifyclass(x, "interact") irange <- x$irange if(is.null(irange)) return(Inf) if(!is.function(irange)) stop("Internal error - x$irange is not a function") ir <- irange(x) if(is.na(ir)) ir <- Inf return(ir) } reach.ppm <- function(x, ..., epsilon=0) { verifyclass(x, "ppm") # Poisson case if(is.poisson.ppm(x)) return(0) # extract info inte <- x$interaction coeffs <- coef(x) if(newstyle.coeff.handling(inte)) { # extract only interaction coefficients Vnames <- x$internal$Vnames coeffs <- coeffs[Vnames] } # apply 'irange' function irange <- inte$irange if(is.null(irange)) return(Inf) ir <- irange(inte, coeffs, epsilon=epsilon) if(is.na(ir)) ir <- Inf return(ir) } spatstat/R/scanstat.R0000644000176200001440000002520713333543255014275 0ustar liggesusers## ## scanstat.R ## ## Spatial scan statistics ## ## $Revision: 1.17 $ $Date: 2017/06/05 10:31:58 $ ## scanmeasure <- function(X, ...){ UseMethod("scanmeasure") } scanmeasure.ppp <- function(X, r, ..., method=c("counts", "fft")) { method <- match.arg(method) check.1.real(r) ## enclosing window R <- as.rectangle(as.owin(X)) ## determine pixel resolution M <- as.mask(R, ...) ## expand domain to include centres of all circles intersecting R W <- grow.mask(M, r) ## switch(method, counts = { ## direct calculation using C code ## get new dimensions dimyx <- W$dim xr <- W$xrange yr <- W$yrange nr <- dimyx[1] nc <- dimyx[2] ## n <- npoints(X) zz <- .C("scantrans", x=as.double(X$x), y=as.double(X$y), n=as.integer(n), xmin=as.double(xr[1]), ymin=as.double(yr[1]), xmax=as.double(xr[2]), ymax=as.double(yr[2]), nr=as.integer(nr), nc=as.integer(nc), R=as.double(r), counts=as.integer(numeric(prod(dimyx))), PACKAGE = "spatstat") zzz <- matrix(zz$counts, nrow=dimyx[1], ncol=dimyx[2], byrow=TRUE) Z <- im(zzz, xrange=xr, yrange=yr, unitname=unitname(X)) }, fft = { ## Previous version of scanmeasure.ppp had ## Y <- pixellate(X, ..., padzero=TRUE) ## but this is liable to Gibbs phenomena. ## Instead, convolve with small Gaussian (sd = 1 pixel width) sigma <- with(W, unique(c(xstep, ystep))) Y <- density(X, ..., sigma=sigma) ## invoke scanmeasure.im Z <- scanmeasure(Y, r) Z <- eval.im(as.integer(round(Z))) }) return(Z) } scanmeasure.im <- function(X, r, ...) { D <- disc(radius=r) eps <- with(X, c(xstep,ystep)) if(any(eps >= 2 * r)) return(eval.im(X * pi * r^2)) D <- as.im(as.mask(D, eps=eps)) Z <- imcov(X, D) return(Z) } scanPoisLRTS <- function(nZ, nG, muZ, muG, alternative) { nZco <- nG - nZ muZco <- muG - muZ nlogn <- function(n, a) ifelse(n == 0, 0, n * log(n/a)) ll <- nlogn(nZ, muZ) + nlogn(nZco, muZco) - nlogn(nG, muG) criterion <- (nZ * muZco - muZ * nZco) switch(alternative, less={ ll[criterion > 0] <- 0 }, greater={ ll[criterion < 0] <- 0 }, two.sided={}) return(2 * ll) } scanBinomLRTS <- function(nZ, nG, muZ, muG, alternative) { nZco <- nG - nZ muZco <- muG - muZ nlogn <- function(n, a) ifelse(n == 0, 0, n * log(n/a)) logbin <- function(k, n) { nlogn(k, n) + nlogn(n-k, n) } ll <- logbin(nZ, muZ) + logbin(nZco, muZco) - logbin(nG, muG) criterion <- (nZ * muZco - muZ * nZco) switch(alternative, less={ ll[criterion > 0] <- 0 }, greater={ ll[criterion < 0] <- 0 }, two.sided={}) return(2 * ll) } scanLRTS <- function(X, r, ..., method=c("poisson", "binomial"), baseline=NULL, case=2, alternative=c("greater", "less", "two.sided"), saveopt = FALSE, Xmask=NULL) { stopifnot(is.ppp(X)) stopifnot(check.nvector(r)) method <- match.arg(method) alternative <- match.arg(alternative) if(is.null(Xmask)) Xmask <- as.mask(as.owin(X), ...) switch(method, poisson={ Y <- X if(is.null(baseline)) { mu <- as.im(Xmask, value=1) } else if(is.ppm(baseline)) { if(is.marked(baseline)) stop("baseline is a marked point process: not supported") mu <- predict(baseline, locations=Xmask) } else if(is.im(baseline) || is.function(baseline)) { mu <- as.im(baseline, W=Xmask) } else stop(paste("baseline should be", "a pixel image, a function, or a fitted model")) nG <- npoints(Y) }, binomial={ stopifnot(is.multitype(X)) lev <- levels(marks(X)) if(length(lev) != 2) warning("X should usually be a bivariate (2-type) point pattern") if(!is.null(baseline)) stop("baseline is not supported in the binomial case") if(is.character(case) && !(case %in% lev)) stop(paste("Unrecognised label for cases:", sQuote(case))) if(is.numeric(case) && !(case %in% seq_along(lev))) stop(paste("Undefined level:", case)) Y <- split(X)[[case]] nG <- npoints(Y) mu <- unmark(X) }) ## The following line ensures that the same pixel resolution information ## is passed to the two calls to 'scanmeasure' below Y$window <- Xmask ## nr <- length(r) lrts <- vector(mode="list", length=nr) for(i in 1:nr) { ri <- r[i] nZ <- scanmeasure(Y, ri) muZ <- scanmeasure(mu, ri) if(!compatible.im(nZ, muZ)) { ha <- harmonise.im(nZ, muZ) nZ <- ha[[1]] muZ <- ha[[2]] } switch(method, poisson = { muG <- integral.im(mu) lrts[[i]] <- eval.im(scanPoisLRTS(nZ, nG, muZ, muG, alternative)) }, binomial = { muG <- npoints(mu) lrts[[i]] <- eval.im(scanBinomLRTS(nZ, nG, muZ, muG, alternative)) }) } if(length(lrts) == 1) { result <- lrts[[1]] } else { result <- im.apply(lrts, max) if(saveopt) attr(result, "iopt") <- im.apply(lrts, which.max) } return(result) } scan.test <- function(X, r, ..., method=c("poisson", "binomial"), nsim = 19, baseline=NULL, case = 2, alternative=c("greater", "less", "two.sided"), verbose=TRUE) { dataname <- short.deparse(substitute(X)) stopifnot(is.ppp(X)) method <- match.arg(method) alternative <- match.arg(alternative) stopifnot(is.numeric(r)) check.1.real(nsim) if(!(round(nsim) == nsim && nsim > 1)) stop("nsim should be an integer > 1") regionname <- paste("circles of radius", if(length(r) == 1) r else paste("between", min(r), "and", max(r))) ## ## compute observed loglikelihood function ## This also validates the arguments. obsLRTS <- scanLRTS(X=X, r=r, method=method, alternative=alternative, baseline=baseline, case=case, ..., saveopt=TRUE) obs <- max(obsLRTS) sim <- numeric(nsim) ## determine how to simulate switch(method, binomial={ methodname <- c("Spatial scan test", "Null hypothesis: constant relative risk", paste("Candidate cluster regions:", regionname), "Likelihood: binomial", paste("Monte Carlo p-value based on", nsim, "simulations")) lev <- levels(marks(X)) names(lev) <- lev casename <- lev[case] counted <- paste("points with mark", sQuote(casename), "inside cluster region") simexpr <- expression(rlabel(X)) }, poisson={ counted <- paste("points inside cluster region") X <- unmark(X) Xwin <- as.owin(X) Xmask <- as.mask(Xwin, ...) if(is.null(baseline)) { nullname <- "Complete Spatial Randomness (CSR)" lambda <- intensity(X) simexpr <- expression(runifpoispp(lambda, Xwin)) dont.complain.about(lambda) } else if(is.ppm(baseline)) { nullname <- baseline$callstring rmhstuff <- rmh(baseline, preponly=TRUE, verbose=FALSE) simexpr <- expression(rmhEngine(rmhstuff)) dont.complain.about(rmhstuff) } else if(is.im(baseline) || is.function(baseline)) { nullname <- "Poisson process with intensity proportional to baseline" base <- as.im(baseline, W=Xmask) alpha <- npoints(X)/integral.im(base) lambda <- eval.im(alpha * base) simexpr <- expression(rpoispp(lambda)) dont.complain.about(lambda) } else stop(paste("baseline should be", "a pixel image, a function, or a fitted model")) methodname <- c("Spatial scan test", paste("Null hypothesis:", nullname), paste("Candidate cluster regions:", regionname), "Likelihood: Poisson", paste("Monte Carlo p-value based on", nsim, "simulations")) }) if(verbose) { cat("Simulating...") pstate <- list() } for(i in 1:nsim) { if(verbose) pstate <- progressreport(i, nsim, state=pstate) Xsim <- eval(simexpr) simLRTS <- scanLRTS(X=Xsim, r=r, method=method, alternative=alternative, baseline=baseline, case=case, ...) sim[i] <- max(simLRTS) } pval <- mean(c(sim,obs) >= obs, na.rm=TRUE) names(obs) <- "maxLRTS" nm.alternative <- switch(alternative, greater="Excess of", less="Deficit of", two.sided="Two-sided: excess or deficit of", stop("Unknown alternative")) nm.alternative <- paste(nm.alternative, counted) result <- list(statistic = obs, p.value = pval, alternative = nm.alternative, method = methodname, data.name = dataname) class(result) <- c("scan.test", "htest") attr(result, "obsLRTS") <- obsLRTS attr(result, "X") <- X attr(result, "r") <- r return(result) } plot.scan.test <- function(x, ..., what=c("statistic", "radius"), do.window=TRUE) { xname <- short.deparse(substitute(x)) what <- match.arg(what) Z <- as.im(x, what=what) do.call(plot, resolve.defaults(list(x=Z), list(...), list(main=xname))) if(do.window) { X <- attr(x, "X") plot(as.owin(X), add=TRUE, invert=TRUE) } invisible(NULL) } as.im.scan.test <- function(X, ..., what=c("statistic", "radius")) { Y <- attr(X, "obsLRTS") what <- match.arg(what) if(what == "radius") { iopt <- attr(Y, "iopt") r <- attr(X, "r") Y <- eval.im(r[iopt]) } return(as.im(Y, ...)) } spatstat/R/interact.R0000644000176200001440000002567613613216544014277 0ustar liggesusers# # interact.S # # # $Revision: 1.29 $ $Date: 2020/01/26 04:25:08 $ # # Class 'interact' representing the interpoint interaction # of a point process model # (e.g. Strauss process with a given threshold r) # # Class 'isf' representing a generic interaction structure # (e.g. pairwise interactions) # # These do NOT specify the "trend" part of the model, # only the "interaction" component. # # The analogy is: # # glm() ppm() # # model formula trend formula # # family interaction # # That is, the 'systematic' trend part of a point process # model is specified by a 'trend' formula argument to ppm(), # and the interpoint interaction is specified as an 'interact' # object. # # You only need to know about these classes if you want to # implement a new point process model. # # THE DISTINCTION: # An object of class 'isf' describes an interaction structure # e.g. pairwise interaction, triple interaction, # pairwise-with-saturation, Dirichlet interaction. # Think of it as determining the "order" of interaction # but not the specific interaction potential function. # # An object of class 'interact' completely defines the interpoint # interactions in a specific point process model, except for the # regular parameters of the interaction, which are to be estimated # by ppm() or otherwise. An 'interact' object specifies the values # of all the 'nuisance' or 'irregular' parameters. An example # is the Strauss process with a given, fixed threshold r # but with the parameters beta and gamma undetermined. # # DETAILS: # # An object of class 'isf' contains the following: # # $name Name of the interaction structure # e.g. "pairwise" # # $print How to 'print()' this object # [A function; invoked by the 'print' method # 'print.isf()'] # # $eval A function which evaluates the canonical # sufficient statistic for an interaction # of this general class (e.g. any pairwise # interaction.) # # If lambda(u,X) denotes the conditional intensity at a point u # for the point pattern X, then we assume # log lambda(u, X) = theta . S(u,X) # where theta is the vector of regular parameters, # and we call S(u,X) the sufficient statistic. # # A typical calling sequence for the $eval function is # # (f$eval)(X, U, E, potentials, potargs, correction) # # where X is the data point pattern, U is the list of points u # at which the sufficient statistic S(u,X) is to be evaluated, # E is a logical matrix equivalent to (X[i] == U[j]), # $potentials defines the specific potential function(s) and # $potargs contains any nuisance/irregular parameters of these # potentials [the $potargs are passed to the $potentials without # needing to be understood by $eval.] # $correction is the name of the edge correction method. # # # An object of class 'interact' contains the following: # # # $name Name of the specific potential # e.g. "Strauss" # # $family Object of class "isf" describing # the interaction structure # # $pot The interaction potential function(s) # -- usually a function or list of functions. # (passed as an argument to $family$eval) # # $par list of any nuisance/irregular parameters # (passed as an argument to $family$eval) # # $parnames vector of long names/descriptions # of the parameters in 'par' # # $init() initialisation action # or NULL indicating none required # # $update() A function to modify $par # [Invoked by 'update.interact()'] # or NULL indicating a default action # # $print How to 'print()' this object # [Invoked by 'print' method 'print.interact()'] # or NULL indicating a default action # # -------------------------------------------------------------------------- print.isf <- function(x, ...) { if(is.null(x)) return(invisible(NULL)) verifyclass(x, "isf") if(!is.null(x$print)) (x$print)(x) invisible(NULL) } print.interact <- function(x, ..., family, brief=FALSE, banner=TRUE) { verifyclass(x, "interact") if(missing(family)) family <- waxlyrical('extras') #' Print name of model if(banner) { if(family && !brief && !is.null(xf <- x$family)) print.isf(xf) splat(if(!brief) "Interaction:" else NULL, x$name, sep="") } # Now print the parameters if(!is.null(x$print)) { (x$print)(x) } else { # default # just print the parameter names and their values pwords <- x$parnames parval <- x$par pwords <- paste(toupper(substring(pwords, 1, 1)), substring(pwords, 2), sep="") isnum <- sapply(parval, is.numeric) parval[isnum] <- lapply(parval[isnum], signif, digits=getOption("digits")) splat(paste(paste0(pwords, ":\t", parval), collapse="\n")) } invisible(NULL) } is.interact <- function(x) { inherits(x, "interact") } update.interact <- function(object, ...) { verifyclass(object, "interact") if(!is.null(object$update)) (object$update)(object, ...) else { # Default # First update the version if(outdated.interact(object)) object <- reincarnate.interact(object) # just match the arguments in "..." # with those in object$par and update them want <- list(...) if(length(want) > 0) { m <- match(names(want),names(object$par)) nbg <- is.na(m) if(any(nbg)) { which <- paste((names(want))[nbg]) warning(paste("Arguments not matched: ", which)) } m <- m[!nbg] object$par[m] <- want } # call object's own initialisation routine if(!is.null(object$init)) (object$init)(object) object } } is.poisson.interact <- function(x) { verifyclass(x, "interact") is.null(x$family) } parameters.interact <- function(model, ...) { model$par } # Test whether interact object was made by an older version of spatstat outdated.interact <- function(object) { ver <- object$version older <- is.null(ver) || (package_version(ver) < versionstring.spatstat()) return(older) } # Test whether the functions in the interaction object # expect the coefficient vector to contain ALL coefficients, # or only the interaction coefficients. # This change was introduced in 1.11-0, at the same time # as interact objects were given version numbers. newstyle.coeff.handling <- function(object) { stopifnot(inherits(object, "interact")) ver <- object$version old <- is.null(ver) || (package_version(ver) < "1.11") return(!old) } # ###### # # Re-create an interact object in the current version of spatstat # # reincarnate.interact <- function(object) { # re-creates an interact object in the current version of spatstat if(!is.null(object$update)) { newobject <- (object$update)(object) return(newobject) } par <- object$par # pot <- object$pot name <- object$name # get creator function creator <- object$creator if(is.null(creator)) { # old version: look up list creator <- .Spatstat.Old.InteractionList[[name]] if(is.null(creator)) stop(paste("Don't know how to update", sQuote(name), "to current version of spatstat")) } if(is.character(creator)) creator <- get(creator) if(!is.function(creator) && !is.expression(creator)) stop("Internal error: creator is not a function or expression") # call creator if(is.expression(creator)) newobject <- eval(creator) else { # creator is a function # It's assumed that the creator function's arguments are # either identical to components of 'par' (the usual case) # or to one of the components of the object itself (Ord, Saturated) # or to printfun=object$print (Pairwise). argnames <- names(formals(creator)) available <- append(par, object) available <- append(available, list(printfun=object$print)) ok <- argnames %in% names(available) if(!all(ok)) stop(paste("Internal error:", ngettext(sum(!ok), "argument", "arguments"), paste(sQuote(argnames[!ok]), collapse=", "), "in creator function were not understood")) newobject <- do.call(creator, available[argnames]) } if(!inherits(newobject, "interact")) stop("Internal error: creator did not return an object of class interact") return(newobject) } # This list is necessary to deal with older formats of 'interact' objects # which did not include the creator name .Spatstat.Old.InteractionList <- list("Diggle-Gratton process" = "DiggleGratton", "Geyer saturation process" = "Geyer", "Lennard-Jones potential" = "LennardJones", "Multitype Strauss process" = "MultiStrauss", "Multitype Strauss Hardcore process" = "MultiStraussHard", "Ord process with threshold potential"="OrdThresh", "Piecewise constant pairwise interaction process"="PairPiece", "Poisson process" = "Poisson", "Strauss process" = "Strauss", "Strauss - hard core process" = "StraussHard", "Soft core process" = "Softcore", # weird ones: "Ord process with user-defined potential" = expression(Ord(object$pot)), "Saturated process with user-defined potential" =expression(Saturated(object$pot)), "user-defined pairwise interaction process"= expression( Pairwise(object$pot, par=object$par, parnames=object$parnames, printfun=object$print)) ) as.interact <- function(object) { UseMethod("as.interact") } as.interact.interact <- function(object) { verifyclass(object, "interact") return(object) } interactionfamilyname <- function(x) { if(inherits(x, "isf")) return(x$name) x <- as.interact(x) return(x$family$name) } #### internal code for streamlining initialisation of interactions # # x should be a partially-completed 'interact' object # instantiate.interact <- function(x, par=NULL) { if(is.character(x$family)) x$family <- get(x$family) # set parameter values x$par <- par # validate parameters if(!is.null(x$init)) x$init(x) x$version <- versionstring.spatstat() return(x) } spatstat/R/bermantest.R0000644000176200001440000002516313333543254014621 0ustar liggesusers# # bermantest.R # # Test statistics from Berman (1986) # # $Revision: 1.21 $ $Date: 2018/01/17 08:46:51 $ # # berman.test <- function(...) { UseMethod("berman.test") } berman.test.ppp <- function(X, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { Xname <- short.deparse(substitute(X)) covname <- short.deparse(substitute(covariate)) if(is.character(covariate)) covname <- covariate which <- match.arg(which) alternative <- match.arg(alternative) do.call(bermantestEngine, resolve.defaults(list(ppm(X), covariate, which, alternative), list(...), list(modelname="CSR", covname=covname, dataname=Xname))) } berman.test.ppm <- function(model, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { modelname <- short.deparse(substitute(model)) covname <- short.deparse(substitute(covariate)) if(is.character(covariate)) covname <- covariate verifyclass(model, "ppm") which <- match.arg(which) alternative <- match.arg(alternative) if(is.poisson(model) && is.stationary(model)) modelname <- "CSR" do.call(bermantestEngine, resolve.defaults(list(model, covariate, which, alternative), list(...), list(modelname=modelname, covname=covname, dataname=model$Qname))) } berman.test.lpp <- function(X, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { Xname <- short.deparse(substitute(X)) covname <- short.deparse(substitute(covariate)) if(is.character(covariate)) covname <- covariate which <- match.arg(which) alternative <- match.arg(alternative) do.call(bermantestEngine, resolve.defaults(list(lppm(X), covariate, which, alternative), list(...), list(modelname="CSR", covname=covname, dataname=Xname))) } berman.test.lppm <- function(model, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { modelname <- short.deparse(substitute(model)) covname <- short.deparse(substitute(covariate)) if(is.character(covariate)) covname <- covariate verifyclass(model, "lppm") which <- match.arg(which) alternative <- match.arg(alternative) if(is.poisson(model) && is.stationary(model)) modelname <- "CSR" do.call(bermantestEngine, resolve.defaults(list(model, covariate, which, alternative), list(...), list(modelname=modelname, covname=covname, dataname=model$Xname))) } bermantestEngine <- function(model, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ..., modelname, covname, dataname="") { csr <- is.poisson(model) && is.stationary(model) if(missing(modelname)) modelname <- if(csr) "CSR" else short.deparse(substitute(model)) if(missing(covname)) { covname <- short.deparse(substitute(covariate)) if(is.character(covariate)) covname <- covariate } which <- match.arg(which) alternative <- match.arg(alternative) if(!is.poisson(model)) stop("Only implemented for Poisson point process models") #' compute required data fram <- spatialCDFframe(model, covariate, ..., modelname=modelname, covname=covname, dataname=dataname) #' evaluate berman test statistic result <- bermantestCalc(fram, which=which, alternative=alternative) return(result) } bermantestCalc <- function(fram, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { which <- match.arg(which) alternative <- match.arg(alternative) verifyclass(fram, "spatialCDFframe") fvalues <- fram$values info <- fram$info ## values of covariate at data points ZX <- fvalues$ZX ## transformed to Unif[0,1] under H0 U <- fvalues$U ## values of covariate at pixels Zvalues <- fvalues$Zvalues ## corresponding pixel areas/weights weights <- fvalues$weights ## intensity of model lambda <- fvalues$lambda ## names modelname <- info$modelname dataname <- info$dataname covname <- info$covname switch(which, Z1={ #......... Berman Z1 statistic ..................... method <- paste("Berman Z1 test of", if(info$csr) "CSR" else "inhomogeneous Poisson process", "in", info$spacename) # sum of covariate values at data points Sn <- sum(ZX) # predicted mean and variance lamwt <- lambda * weights En <- sum(lamwt) ESn <- sum(lamwt * Zvalues) varSn <- sum(lamwt * Zvalues^2) # working, for plot method working <- list(meanZX=mean(ZX), meanZ=ESn/En) # standardise statistic <- (Sn - ESn)/sqrt(varSn) names(statistic) <- "Z1" p.value <- switch(alternative, two.sided=2 * pnorm(-abs(statistic)), less=pnorm(statistic), greater=pnorm(statistic, lower.tail=FALSE)) altblurb <- switch(alternative, two.sided="two-sided", less="mean value of covariate at random points is less than predicted under model", greater="mean value of covariate at random points is greater than predicted under model") valuename <- paste("covariate", sQuote(paste(covname, collapse="")), "evaluated at points of", sQuote(dataname)) }, Z2={ #......... Berman Z2 statistic ..................... method <- paste("Berman Z2 test of", if(info$csr) "CSR" else "inhomogeneous Poisson process", "in", info$spacename) npts <- length(ZX) statistic <- sqrt(12/npts) * (sum(U) - npts/2) working <- list(meanU=mean(U)) names(statistic) <- "Z2" p.value <- switch(alternative, two.sided=2 * pnorm(-abs(statistic)), less=pnorm(statistic), greater=pnorm(statistic, lower.tail=FALSE)) altblurb <- switch(alternative, two.sided="two-sided", less="covariate values at random points have lower quantiles than predicted under model", greater="covariate values at random points have higher quantiles than predicted under model") valuename <- paste("covariate", sQuote(paste(covname, collapse="")), "evaluated at points of", sQuote(dataname), "\n\t", "and transformed to uniform distribution under", if(info$csr) modelname else sQuote(modelname)) }) out <- list(statistic=statistic, p.value=p.value, alternative=altblurb, method=method, which=which, working=working, data.name=valuename, fram=fram) class(out) <- c("htest", "bermantest") return(out) } plot.bermantest <- function(x, ..., lwd=par("lwd"), col=par("col"), lty=par("lty"), lwd0=lwd, col0=2, lty0=2) { fram <- x$fram if(!is.null(fram)) { values <- fram$values info <- fram$info } else { # old style ks <- x$ks values <- attr(ks, "prep") info <- attr(ks, "info") } work <- x$working op <- options(useFancyQuotes=FALSE) switch(x$which, Z1={ # plot cdf's of Z FZ <- values$FZ xxx <- get("x", environment(FZ)) yyy <- get("y", environment(FZ)) main <- c(x$method, paste("based on distribution of covariate", sQuote(info$covname)), paste("Z1 statistic =", signif(x$statistic, 4)), paste("p-value=", signif(x$p.value, 4))) do.call(plot.default, resolve.defaults( list(x=xxx, y=yyy, type="l"), list(...), list(lwd=lwd0, col=col0, lty=lty0), list(xlab=info$covname, ylab="probability", main=main))) FZX <- values$FZX if(is.null(FZX)) FZX <- ecdf(values$ZX) plot(FZX, add=TRUE, do.points=FALSE, lwd=lwd, col=col, lty=lty) abline(v=work$meanZ, lwd=lwd0,col=col0, lty=lty0, xpd=FALSE) abline(v=work$meanZX, lwd=lwd,col=col, lty=lty, xpd=FALSE) }, Z2={ # plot cdf of U U <- values$U cdfU <- ecdf(U) main <- c(x$method, paste("based on distribution of covariate", sQuote(info$covname)), paste("Z2 statistic =", signif(x$statistic, 4)), paste("p-value=", signif(x$p.value, 4))) do.call(plot.ecdf, resolve.defaults( list(cdfU), list(...), list(do.points=FALSE, asp=1), list(xlim=c(0,1), ylim=c(0,1)), list(lwd=lwd, col=col, lty=lty), list(xlab="U", ylab="relative frequency"), list(main=main))) abline(0,1,lwd=lwd0,col=col0,lty=lty0, xpd=FALSE) abline(v=0.5, lwd=lwd0,col=col0,lty=lty0, xpd=FALSE) abline(v=work$meanU, lwd=lwd,col=col,lty=lty, xpd=FALSE) }) options(op) return(invisible(NULL)) } spatstat/R/circarcs.R0000644000176200001440000000717713572371066014260 0ustar liggesusers#' #' circarcs.R #' #' Circular Arcs #' #' An interval on the circle is specified by [left, right] #' meaning the arc starting at 'left' going anticlockwise until 'right'. #' Here 'left' and 'right' are angles in radians (mod 2*pi) #' from the positive x-axis. #' #' $Revision: 1.5 $ $Date: 2019/12/06 06:15:29 $ check.arc <- function(arc, fatal=TRUE) { if(is.numeric(arc) && length(arc) == 2) return(TRUE) if(fatal) stop("arc should be a numeric vector of length 2") return(FALSE) } inside.arc <- function(theta, arc) { check.arc(arc) arc <- arc %% (2*pi) theta <- theta %% (2*pi) if(arc[1] <= arc[2]) { #' arc does not cross the positive x-axis result <- (arc[1] <= theta) & (theta <= arc[2]) } else { #' arc crosses the positive x-axis result <- (arc[1] <= theta) | (theta <= arc[2]) } return(result) } circunion <- function(arcs) { stopifnot(is.list(arcs)) nothing <- list() everything <- list(c(0, 2*pi)) if(length(arcs) == 0) return(nothing) lapply(arcs, check.arc) #' extract all endpoints allends <- unlist(arcs) allends <- sortunique(as.numeric(allends) %% (2*pi)) #' compute midpoints between each successive pair of endpoints (mod 2pi) midpts <- allends + diff(c(allends, allends[1] + 2*pi))/2 #' determine which midpoints lie inside one of the arcs midinside <- Reduce("|", lapply(arcs, inside.arc, theta=midpts)) zeroinside <- any(sapply(arcs, inside.arc, theta=0)) if(!any(midinside) && !zeroinside) return(nothing) if(all(midinside) && zeroinside) return(everything) result <- nothing if(zeroinside) { #' First deal with the connected component containing 0 #' Scan clockwise from 2*pi for left endpoint of interval n <- length(midinside) ileft <- (max(which(!midinside)) %% n) + 1L aleft <- allends[ileft] #' then anticlockwise for right endpoint iright <- min(which(!midinside)) aright <- allends[iright] #' save this interval result <- append(result, list(c(aleft, aright))) #' remove data from consideration seqn <- seq_len(n) retain <- seqn > iright & seqn < (ileft-1L) midinside <- midinside[retain] allends <- allends[retain] } #' Now scan anticlockwise for first midpoint that is inside the union while(any(midinside)) { ileft <- min(which(midinside)) toright <- (seq_along(midinside) > ileft) iright <- min(c(length(allends), which(!midinside & toright))) aleft <- allends[ileft] aright <- allends[iright] #' save this interval result <- append(result, list(c(aleft, aright))) #' throw away points that are not endpoints of the union midinside <- midinside[seq_along(midinside) > iright] allends <- allends[seq_along(allends) > iright] } return(result) } # plotarc <- function(arc, ..., add=TRUE, lwd=3, rad=1){ # if(!add || is.null(dev.list())) # plot(disc(), main="") # if(diff(arc) < 0) # arc[2] <- arc[2] + 2*pi # ang <- seq(arc[1], arc[2], by=0.01) # lines(rad * cos(ang), rad * sin(ang), ..., lwd=lwd) # } # # plotarcs <- function(arcs, ..., rad=1, jitter=FALSE, add=FALSE) { # if(length(rad) == 1) rad <- rep(rad, length(arcs)) # if(jitter) rad <- rad * seq(0.9, 1.05, length=length(rad)) # rad <- as.list(rad) # if(!add) plot(disc(), main="") # mapply(plotarc, arc=arcs,rad=rad, MoreArgs=list(...)) # invisible(NULL) # } # # runifarc <- function(n=1, maxlen=pi) { # replicate(n, runif(1, 0, 2*pi) + c(0, runif(1, 0, maxlen)), simplify=FALSE) # } # # tryit <- function(n=5, maxlen=pi) { # a <- runifarc(n, maxlen=maxlen) # plotarcs(circunion(a), col=3, jitter=FALSE, lwd=6) # plotarcs(a, jitter=TRUE, lwd=2, add=TRUE) # } spatstat/R/clip.psp.R0000644000176200001440000001761613333543254014211 0ustar liggesusers# # clip.psp.R # # $Revision: 1.23 $ $Date: 2018/01/23 02:40:14 $ # # ######################################################## # clipping operation (for subset) ######################################################## clip.psp <- function(x, window, check=TRUE, fragments=TRUE) { verifyclass(x, "psp") verifyclass(window, "owin") if(is.vanilla(unitname(window))) unitname(window) <- unitname(x) if(check && !is.subset.owin(window, x$window)) warning("The clipping window is not a subset of the window containing the line segment pattern x") if(x$n == 0) { emptypattern <- psp(numeric(0), numeric(0), numeric(0), numeric(0), window=window, marks=x$marks) return(emptypattern) } switch(window$type, rectangle={ result <- cliprect.psp(x, window, fragments=fragments) }, polygonal={ result <- clippoly.psp(x, window, fragments=fragments) }, mask={ result <- clippoly.psp(x, as.polygonal(window), fragments=fragments) result$window <- window }) return(result) } ##### # # clipping to a rectangle # cliprect.psp <- local({ cliprect.psp <- function(x, window, fragments=TRUE) { verifyclass(x, "psp") verifyclass(window, "owin") ends <- x$ends marx <- marks(x, dfok=TRUE) #' find segments which are entirely inside the window #' (by convexity) in0 <- inside.owin(ends$x0, ends$y0, window) in1 <- inside.owin(ends$x1, ends$y1, window) ok <- in0 & in1 #' if all segments are inside, return them if(all(ok)) return(as.psp(ends, window=window, marks=marx, check=FALSE)) #' otherwise, store those segments which are inside the window ends.inside <- ends[ok, , drop=FALSE] marks.inside <- marx %msub% ok x.inside <- as.psp(ends.inside, window=window, marks=marks.inside, check=FALSE) if(!fragments) return(x.inside) #' now consider the rest ends <- ends[!ok, , drop=FALSE] in0 <- in0[!ok] in1 <- in1[!ok] marx <- marx %msub% (!ok) #' first clip segments to the range x \in [xmin, xmax] #' use parametric coordinates tx <- cbind(ifelse0NA(between(ends$x0, window$xrange)), ifelse1NA(between(ends$x1, window$xrange)), tvalue(ends$x0, ends$x1, window$xrange[1L]), tvalue(ends$x0, ends$x1, window$xrange[2L])) #' discard segments which do not lie in the x range nx <- apply(!is.na(tx), 1L, sum) ok <- (nx >= 2) if(!any(ok)) return(x.inside) ends <- ends[ok, , drop=FALSE] tx <- tx[ok, , drop=FALSE] in0 <- in0[ok] in1 <- in1[ok] marx <- marx %msub% ok #' Clip the segments to the x range tmin <- apply(tx, 1L, min, na.rm=TRUE) tmax <- apply(tx, 1L, max, na.rm=TRUE) dx <- ends$x1 - ends$x0 dy <- ends$y1 - ends$y0 ends.xclipped <- data.frame(x0=ends$x0 + tmin * dx, y0=ends$y0 + tmin * dy, x1=ends$x0 + tmax * dx, y1=ends$y0 + tmax * dy) #' Now clip the segments to the range y \in [ymin, ymax] ends <- ends.xclipped in0 <- inside.owin(ends$x0, ends$y0, window) in1 <- inside.owin(ends$x1, ends$y1, window) ty <- cbind(ifelse0NA(in0), ifelse1NA(in1), tvalue(ends$y0, ends$y1, window$yrange[1L]), tvalue(ends$y0, ends$y1, window$yrange[2L])) #' discard segments which do not lie in the y range ny <- apply(!is.na(ty), 1L, sum) ok <- (ny >= 2) if(!any(ok)) return(x.inside) ends <- ends[ok, , drop=FALSE] ty <- ty[ok, , drop=FALSE] in0 <- in0[ok] in1 <- in1[ok] marx <- marx %msub% ok #' Clip the segments to the y range tmin <- apply(ty, 1L, min, na.rm=TRUE) tmax <- apply(ty, 1L, max, na.rm=TRUE) dx <- ends$x1 - ends$x0 dy <- ends$y1 - ends$y0 ends.clipped <- data.frame(x0=ends$x0 + tmin * dx, y0=ends$y0 + tmin * dy, x1=ends$x0 + tmax * dx, y1=ends$y0 + tmax * dy) marks.clipped <- marx #' OK - segments clipped #' Put them together with the unclipped ones ends.all <- rbind(ends.inside, ends.clipped) marks.all <- marks.inside %mapp% marks.clipped as.psp(ends.all, window=window, marks=marks.all, check=FALSE) } small <- function(x) { abs(x) <= .Machine$double.eps } tvalue <- function(z0, z1, zt) { y1 <- z1 - z0 yt <- zt - z0 tval <- ifelseAX(small(y1), 0.5, yt/y1) betwee <- (yt * (zt - z1)) <= 0 result <- ifelseXB(betwee, tval, NA) return(result) } between <- function(x, r) { ((x-r[1L]) * (x-r[2L])) <= 0 } cliprect.psp }) ############################ # # clipping to a polygonal window # clippoly.psp <- function(s, window, fragments=TRUE) { verifyclass(s, "psp") verifyclass(window, "owin") stopifnot(window$type == "polygonal") marx <- marks(s) has.marks <- !is.null(marx) eps <- .Machine$double.eps # find the intersection points between segments and window edges ns <- s$n es <- s$ends x0s <- es$x0 y0s <- es$y0 x1s <- es$x1 y1s <- es$y1 dxs <- x1s - x0s dys <- y1s - y0s bdry <- edges(window) nw <- bdry$n ew <- bdry$ends x0w <- ew$x0 y0w <- ew$y0 dxw <- ew$x1 - ew$x0 dyw <- ew$y1 - ew$y0 out <- .C("xysegint", na=as.integer(ns), x0a=as.double(x0s), y0a=as.double(y0s), dxa=as.double(dxs), dya=as.double(dys), nb=as.integer(nw), x0b=as.double(x0w), y0b=as.double(y0w), dxb=as.double(dxw), dyb=as.double(dyw), eps=as.double(eps), xx=as.double(numeric(ns * nw)), yy=as.double(numeric(ns * nw)), ta=as.double(numeric(ns * nw)), tb=as.double(numeric(ns * nw)), ok=as.integer(integer(ns * nw)), PACKAGE = "spatstat") hitting <- (matrix(out$ok, ns, nw) != 0) ts <- matrix(out$ta, ns, nw) anyhit <- matrowany(hitting) if(!fragments) { #' retain only segments that avoid the boundary entirely leftin <- inside.owin(es$x0, es$y0, window) rightin <- inside.owin(es$x1, es$y1, window) ok <- !anyhit & leftin & rightin return(as.psp(es[ok,,drop=FALSE], window=window, marks=marx %msub% ok, check=FALSE)) } # form all the chopped segments (whether in or out) #' initially empty chopx0 <- chopy0 <- chopx1 <- chopy1 <- numeric(0) chopmarks <- marx %msub% integer(0) for(seg in seq_len(ns)) { #' coordinates of segment number 'seg' segx0 <- x0s[seg] segy0 <- y0s[seg] segx1 <- x1s[seg] segy1 <- y1s[seg] if(has.marks) segmarks <- marx %msub% seg if(!anyhit[seg]) { #' no intersection with boundary - add single segment chopx0 <- c(chopx0, segx0) chopy0 <- c(chopy0, segy0) chopx1 <- c(chopx1, segx1) chopy1 <- c(chopy1, segy1) if(has.marks) chopmarks <- chopmarks %mapp% segmarks } else { #' crosses boundary - add several pieces tvals <- ts[seg,] tvals <- sort(tvals[hitting[seg,]]) dx <- segx1 - segx0 dy <- segy1 - segy0 chopx0 <- c(chopx0, segx0 + c(0,tvals) * dx) chopy0 <- c(chopy0, segy0 + c(0,tvals) * dy) chopx1 <- c(chopx1, segx0 + c(tvals,1) * dx) chopy1 <- c(chopy1, segy0 + c(tvals,1) * dy) if(has.marks) { npieces <- length(tvals) + 1L chopmarks <- chopmarks %mapp% (segmarks %mrep% npieces) } } } chopped <- psp(chopx0, chopy0, chopx1, chopy1, window=boundingbox(Window(s), window), marks=chopmarks) # select those chopped segments which are inside the window mid <- midpoints.psp(chopped) ins <- inside.owin(mid$x, mid$y, window) retained <- chopped[ins] retained$window <- window return(retained) } spatstat/R/morphology.R0000644000176200001440000002424413606002167014647 0ustar liggesusers# # morphology.R # # dilation, erosion, opening, closing # # generic functions # and methods for owin, psp, ppp # # $Revision: 1.32 $ $Date: 2020/01/10 04:42:31 $ # # ............ generic ............................ erosion <- function(w, r, ...) { UseMethod("erosion") } dilation <- function(w, r, ...) { UseMethod("dilation") } closing <- function(w, r, ...) { UseMethod("closing") } opening <- function(w, r, ...) { UseMethod("opening") } # ............ methods for class 'owin' ............................ # DELETED # erode.owin <- function(...) { # .Deprecated("erosion.owin", package="spatstat") # erosion.owin(...) # } erosion.owin <- function(w, r, shrink.frame=TRUE, ..., strict=FALSE, polygonal=NULL) { verifyclass(w, "owin") validradius(r, "erosion") if(r == 0 && !strict) return(w) xr <- w$xrange yr <- w$yrange if(2 * r >= max(diff(xr), diff(yr))) stop("erosion distance r too large for frame of window") # compute the dimensions of the eroded frame exr <- xr + c(r, -r) eyr <- yr + c(r, -r) ebox <- list(x=exr[c(1,2,2,1)], y=eyr[c(1,1,2,2)]) ismask <- is.mask(w) if(is.empty(w)) return(emptywindow(ebox)) # determine type of computation if(is.null(polygonal)) polygonal <- !ismask else { stopifnot(is.logical(polygonal)) if(polygonal && ismask) { # try to convert w <- as.polygonal(w) if(is.mask(w)) polygonal <- FALSE } } if(is.rectangle(w) && polygonal) { # result is a smaller rectangle if(shrink.frame) { return(owin(exr, eyr)) # type 'rectangle' } else { return(owin(xr, yr, poly=ebox, check=FALSE)) # type 'polygonal' } } if(polygonal) { # compute polygonal region using polyclip package pnew <- polyclip::polyoffset(w$bdry, -r, jointype="round") # ensure correct polarity totarea <- sum(unlist(lapply(pnew, Area.xypolygon))) if(totarea < 0) pnew <- lapply(pnew, reverse.xypolygon) if(shrink.frame) { return(owin(poly=pnew, check=FALSE)) } else { return(owin( xr, yr, poly=pnew, check=FALSE)) } } # otherwise erode the window in pixel image form if(w$type == "mask") wnew <- erodemask(w, r, strict=strict) else { D <- distmap(w, invert=TRUE, ...) wnew <- levelset(D, r, if(strict) ">" else ">=") } if(shrink.frame) { # trim off some rows & columns of pixel raster keepcol <- (wnew$xcol >= exr[1] & wnew$xcol <= exr[2]) keeprow <- (wnew$yrow >= eyr[1] & wnew$yrow <= eyr[2]) wnew$xcol <- wnew$xcol[keepcol] wnew$yrow <- wnew$yrow[keeprow] wnew$dim <- c(sum(keeprow), sum(keepcol)) wnew$m <- wnew$m[keeprow, keepcol] wnew$xrange <- exr wnew$yrange <- eyr } return(wnew) } # DELETED # dilate.owin <- function(...) { # .Deprecated("dilation.owin", package="spatstat") # dilation.owin(...) # } dilation.owin <- function(w, r, ..., polygonal=NULL, tight=TRUE) { verifyclass(w, "owin") validradius(r, "dilation") if(r == 0) return(w) ismask <- is.mask(w) if(is.empty(w)) return(w) # determine type of computation if(is.null(polygonal)) { polygonal <- !ismask } else stopifnot(is.logical(polygonal)) if(polygonal) { # convert to polygonal w <- as.polygonal(w) if(!is.polygonal(w)) polygonal <- FALSE } # bounding frame bb <- if(tight) boundingbox(w) else as.rectangle(w) newbox <- grow.rectangle(bb, r) # compute dilation if(!polygonal) { # compute pixel approximation epsilon <- sqrt(w$xstep^2 + w$ystep^2) r <- max(r, epsilon) w <- rebound.owin(w, newbox) distant <- distmap(w, ...) dil <- levelset(distant, r, "<=") return(dil) } else { # compute polygonal region using polyclip package pnew <- polyclip::polyoffset(w$bdry, r, jointype="round") # ensure correct polarity totarea <- sum(unlist(lapply(pnew, Area.xypolygon))) if(totarea < 0) pnew <- lapply(pnew, reverse.xypolygon) # determine bounding frame, convert to owin if(tight) { out <- owin(poly=pnew, check=FALSE) } else { out <- owin(newbox$xrange, newbox$yrange, poly=pnew, check=FALSE) } return(out) } } closing.owin <- function(w, r, ..., polygonal=NULL) { if(missing(r)) stop("r is required") validradius(r, "closing") wplus <- dilation.owin(w, r, ..., polygonal=polygonal, tight=FALSE) if(is.empty(wplus)) return(wplus) wclose <- erosion.owin(wplus, r, strict=TRUE) b <- as.rectangle(w) wclose <- rebound.owin(wclose[b], b) return(wclose) } opening.owin <- function(w, r, ..., polygonal=NULL) { if(missing(r)) stop("r is required") validradius(r, "opening") wminus <- erosion.owin(w, r, ..., polygonal=polygonal, shrink.frame=FALSE) if(is.empty(wminus)) return(wminus) wopen <- dilation.owin(wminus, r, tight=FALSE) b <- as.rectangle(w) wopen <- rebound.owin(wopen[b], b) return(wopen) } border <- function(w, r, outside=FALSE, ...) { w <- as.owin(w) if(!outside) { e <- erosion(w, r, ...) b <- setminus.owin(w, e) } else { d <- dilation(w, r, ...) b <- setminus.owin(d, w) } return(b) } # ............ methods for class 'psp' ............................ dilation.psp <- function(w, r, ..., polygonal=TRUE, tight=TRUE) { verifyclass(w, "psp") x <- w validradius(r, "dilation") if(r == 0) return(w) if(is.empty(x)) return(emptywindow(as.owin(w))) # bounding frame bb <- if(tight) boundingbox(x) else as.rectangle(x) newbox <- grow.rectangle(bb, r) # compute dilation if(!polygonal) { x <- rebound.psp(x, newbox) distant <- distmap(x, ...) dil <- levelset(distant, r, "<=") return(dil) } else if(spatstat.options("old.morpho.psp")) { # old code for polygonal case ends <- x$ends angles <- angles.psp(x, directed=TRUE) # lengths <- lengths.psp(x) out <- NULL # dilate individual segments halfcircle <- seq(from=0, to=pi, length.out=128)[-c(1,128)] for(i in seq_len(x$n)) { seg <- ends[i,] co <- cos(angles[i]) si <- sin(angles[i]) # draw sausage around i-th segment xx <- c(seg$x0, seg$x1) + r * si yy <- c(seg$y0, seg$y1) - r * co rightcircle <- angles[i] - pi/2 + halfcircle xx <- c(xx, seg$x1 + r * cos(rightcircle)) yy <- c(yy, seg$y1 + r * sin(rightcircle)) xx <- c(xx, c(seg$x1, seg$x0) - r * si) yy <- c(yy, c(seg$y1, seg$y0) + r * co) leftcircle <- angles[i] + pi/2 + halfcircle xx <- c(xx, seg$x0 + r * cos(leftcircle)) yy <- c(yy, seg$y0 + r * sin(leftcircle)) sausage <- owin(newbox$xrange, newbox$yrange, poly=list(x=xx, y=yy), check=FALSE) # add to set out <- union.owin(out, sausage, ...) } return(out) } else { # new code using 'polyclip' package # convert to list of list(x,y) ends <- as.matrix(x$ends) n <- nrow(ends) plines <- vector(mode="list", length=n) for(i in 1:n) plines[[i]] <- list(x=ends[i, c("x0","x1")], y=ends[i, c("y0","y1")]) # call pnew <- polyclip::polylineoffset(plines, r, jointype="round", endtype="openround") # ensure correct polarity totarea <- sum(unlist(lapply(pnew, Area.xypolygon))) if(totarea < 0) pnew <- lapply(pnew, reverse.xypolygon) # convert to owin object out <- if(tight) owin(poly=pnew, check=FALSE) else owin(newbox$xrange, newbox$yrange, poly=pnew, check=FALSE) return(out) } } closing.psp <- function(w, r, ..., polygonal=TRUE) { if(missing(r)) stop("r is required") validradius(r, "closing") wplus <- dilation.psp(w, r, ..., polygonal=polygonal, tight=FALSE) if(is.empty(wplus)) return(emptywindow(as.owin(w))) wclose <- erosion.owin(wplus, r, strict=TRUE) wclose <- rebound.owin(wclose, as.rectangle(w)) return(wclose) } erosion.psp <- function(w, r, ...) { idorempty(w, r, "erosion") } opening.psp <- function(w, r, ...) { idorempty(w, r,"opening") } # ............ methods for class 'ppp' ............................ dilation.ppp <- function(w, r, ..., polygonal=TRUE, tight=TRUE) { verifyclass(w, "ppp") validradius(r, "dilation") x <- w if(r == 0) return(x) if(is.empty(w)) return(emptywindow(as.owin(w))) # bounding frame bb <- if(tight) boundingbox(x) else as.rectangle(x) releps <- 1e-6 newbox <- grow.rectangle(bb, r * (1+releps)) # compute dilation if(!polygonal) { # compute pixel approximation Window(x) <- newbox distant <- distmap(x, ...) dil <- levelset(distant, r, "<=") return(dil) } else { # compute polygonal approximation # generate discs coo <- coords(x) nn <- npoints(x) balls <- vector(mode="list", length=nn) ball0 <- disc(r, c(0,0), ...) for(i in seq_len(nn)) balls[[i]] <- shift(ball0, vec=coo[i,]) class(balls) <- c("solist", class(balls)) out <- union.owin(balls) return(out) } } closing.ppp <- function(w, r, ..., polygonal=TRUE) { if(missing(r)) stop("r is required") validradius(r, "closing") if(is.empty(w) || w$n <= 3) return(emptywindow(as.owin(w))) # remove `isolated' points ok <- (nndist(w) <= 2 * r) if(sum(ok) <= 3) return(emptywindow(as.owin(w))) w <- w[ok] # dilate wplus <- dilation.ppp(w, r, ..., polygonal=polygonal, tight=FALSE) wclose <- erosion.owin(wplus, r, strict=TRUE) wclose <- rebound.owin(wclose, as.rectangle(w)) return(wclose) } erosion.ppp <- function(w, r, ...) { idorempty(w, r, "erosion") } opening.ppp <- function(w, r, ...) { idorempty(w, r,"opening") } # ............ utilities ............................ validradius <- local({ validradius <- function(r, caller="morphological operator") { # rname <- short.deparse(substitute(r)) if(!is.numeric(r) || length(r) != 1) groan("radius r must be a single number", caller) if(r < 0) groan("radius r must be nonnegative", caller) return(TRUE) } groan <- function(whinge, caller) { stop(paste("for", paste(caller, ",", sep=""), whinge), call.=FALSE) } validradius }) idorempty <- function(w, r, caller="morphological operator") { validradius(r, caller) if(r == 0) return(w) else return(emptywindow(w)) } spatstat/R/rotate.R0000644000176200001440000000524613333543255013754 0ustar liggesusers# # rotate.S # # $Revision: 1.21 $ $Date: 2014/10/24 00:22:30 $ # rotxy <- function(X, angle=pi/2) { co <- cos(angle) si <- sin(angle) list(x = co * X$x - si * X$y, y = si * X$x + co * X$y) } rotxypolygon <- function(p, angle=pi/2) { p[c("x","y")] <- rotxy(p, angle=angle) # area and hole status are invariant under rotation return(p) } rotate <- function(X, ...) { UseMethod("rotate") } rotate.owin <- function(X, angle=pi/2, ..., rescue=TRUE, centre=NULL) { verifyclass(X, "owin") if(!is.null(centre)) { ## rotation about designated centre X <- shift(X, origin=centre) negorig <- getlastshift(X) } else negorig <- NULL switch(X$type, rectangle={ # convert rectangle to polygon P <- owin(X$xrange, X$yrange, poly= list(x=X$xrange[c(1,2,2,1)], y=X$yrange[c(1,1,2,2)]), unitname=unitname(X)) # call polygonal case Y <- rotate.owin(P, angle, rescue=rescue) }, polygonal={ # First rotate the polygonal boundaries bdry <- lapply(X$bdry, rotxypolygon, angle=angle) # wrap up Y <- owin(poly=bdry, check=FALSE, unitname=unitname(X)) if(rescue) Y <- rescue.rectangle(Y) }, mask={ newframe <- boundingbox(rotxy(corners(X), angle)) Y <- if(length(list(...)) > 0) as.mask(newframe, ...) else as.mask(newframe, eps=with(X, min(xstep, ystep))) pixelxy <- rasterxy.mask(Y) xybefore <- rotxy(pixelxy, -angle) Y$m[] <- with(xybefore, inside.owin(x, y, X)) Y <- intersect.owin(Y, boundingbox(Y)) if(rescue) Y <- rescue.rectangle(Y) unitname(Y) <- unitname(X) }, stop("Unrecognised window type") ) if(!is.null(negorig)) Y <- shift(Y, -negorig) return(Y) } rotate.ppp <- function(X, angle=pi/2, ..., centre=NULL) { verifyclass(X, "ppp") if(!is.null(centre)) { X <- shift(X, origin=centre) negorigin <- getlastshift(X) } else negorigin <- NULL r <- rotxy(X, angle) w <- rotate.owin(X$window, angle, ...) Y <- ppp(r$x, r$y, window=w, marks=marks(X, dfok=TRUE), check=FALSE) if(!is.null(negorigin)) Y <- shift(Y, -negorigin) return(Y) } rotate.im <- function(X, angle=pi/2, ..., centre=NULL) { if(!is.null(centre)) { X <- shift(X, origin=centre) negorigin <- getlastshift(X) } else negorigin <- NULL co <- cos(angle) si <- sin(angle) m <- matrix(c(co,si,-si,co), nrow=2, ncol=2) Y <- affine(X, mat=m) if(!is.null(negorigin)) Y <- shift(Y, -negorigin) return(Y) } spatstat/R/psstG.R0000644000176200001440000001340013362252414013541 0ustar liggesusers# # psstG.R # # Pseudoscore residual for unnormalised G (saturation process) # # $Revision: 1.10 $ $Date: 2018/10/19 03:29:29 $ # ################################################################################ # psstG <- function(object, r=NULL, breaks=NULL, ..., model=NULL, trend=~1, interaction=Poisson(), rbord=reach(interaction), truecoef=NULL, hi.res=NULL) { if(is.ppm(object)) fit <- object else if(is.ppp(object) || is.quad(object)) { # convert to quadscheme if(is.ppp(object)) object <- quadscheme(object, ...) # fit model if(!is.null(model)) fit <- update(model, Q=object, forcefit=TRUE) else fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, forcefit=TRUE) } else stop("object should be a fitted point process model or a point pattern") # rfixed <- !is.null(r) || !is.null(breaks) # Extract data and quadrature points Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy E <- equalsfun.quad(Q) # WQ <- w.quad(Q) # quadrature weights # integrals will be restricted to quadrature points # that were actually used in the fit # USED <- getglmsubset(fit) if(fit$correction == "border") { rbord <- fit$rbord b <- bdist.points(U) USED <- (b > rbord) } else USED <- rep.int(TRUE, U$n) # basic statistics Win <- Window(X) npts <- npoints(X) areaW <- area(Win) lambda <- npts/areaW # adjustments to account for restricted domain of pseudolikelihood # if(any(!USED)) { # npts.used <- sum(Z & USED) # area.used <- sum(WQ[USED]) # lambda.used <- npts.used/area.used # } else { # npts.used <- npts # area.used <- areaW # lambda.used <- lambda # } # determine breakpoints for r values rmaxdefault <- rmax.rule("G", Win, lambda) breaks <- handle.r.b.args(r, breaks, Win, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max # residuals res <- residuals(fit, type="raw",drop=FALSE, new.coef=truecoef, quad=hi.res) # resval <- with(res, "increment") rescts <- with(res, "continuous") # absolute weight for continuous integrals wc <- -rescts # initialise fv object df <- data.frame(r=rvals, theo=0) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~Delta~V[S](r), NULL), "theo", . ~ r, alim=c(0, rmax), c("r","%s[theo](r)"), desc, fname="bold(R)~Delta~V[S]") # First phase: ................................................. # nearest neighbours (quadrature point to data point) nn <- nncross(U, X, seq(U$n), seq(X$n)) # excludes identical pairs dIJ <- nn$dist I <- seq(U$n) J <- nn$which DD <- (I <= X$n) # TRUE for data points wcIJ <- wc okI <- USED[I] # histogram of nndist for data points only (without edge correction) Bsum <- cumsum(whist(dIJ[DD & okI], breaks$val)) # weighted histogram of nncross (without edge correction) Bint <- cumsum(whist(dIJ[okI], breaks$val, wcIJ[okI])) # residual Bres <- Bsum - Bint # tack on ans <- bind.fv(ans, data.frame(dat1=Bsum, com1=Bint, res1=Bres), c("%s[dat1](r)", "%s[com1](r)", "%s[res1](r)"), c("phase 1 pseudosum (contribution to %s)", "phase 1 pseudocompensator (contribution to %s)", "phase 1 pseudoresidual (contribution to %s)")) # Second phase: ................................................ # close pairs (quadrature point to data point) close <- crosspairs(U, X, rmax, what="ijd") dIJ <- close$d I <- close$i J <- close$j # UI <- U[I] # XJ <- X[J] EIJ <- E(I, J) # TRUE if points are identical, U[I[k]] == X[J[k]] ZI <- Z[I] # TRUE if U[I[k]] is a data point DD <- ZI & !EIJ # TRUE for pairs of distinct data points only # nDD <- sum(DD) okI <- USED[I] # residual weights # wIJ <- ifelseXY(EIJ, rescts[I], resval[I]) # absolute weight for continuous integrals wc <- -rescts wcIJ <- -rescts[I] # nearest and second-nearest neighbour distances in X nn1 <- nndist(X) nn2 <- nndist(X, k=2) nn1J <- nn1[J] nn2J <- nn2[J] # weird use of the reduced sample estimator # data sum: RSX <- Kount(dIJ[DD & okI], nn2J[DD & okI], nn2J[ZI & okI], breaks) Csum <- RSX$numerator # integral: if(spatstat.options("psstG.remove.zeroes")) okE <- okI & !EIJ else okE <- okI RSD <- Kwtsum(dIJ[okE], nn1J[okE], wcIJ[okE], nn1, rep.int(1, length(nn1)), breaks, fatal=FALSE) Cint <- RSD$numerator # Cres <- Bres + Csum - Cint # tack on ans <- bind.fv(ans, data.frame(dat2=Csum, com2=Cint, res2=Cres, dat=Bsum+Csum, com=Bint+Cint, res=Bres+Cres), c("%s[dat2](r)", "%s[com2](r)", "%s[res2](r)", "Sigma~Delta~V[S](r)", "bold(C)~Delta~V[S](r)", "bold(R)~Delta~V[S](r)"), c("phase 2 pseudosum (contribution to %s)", "phase 2 pseudocompensator (contribution to %s)", "phase 2 pseudoresidual (contribution to %s)", "pseudosum (contribution to %s)", "pseudocompensator (contribution to %s)", "pseudoresidual function %s"), "res") # restrict choice of curves in default plot fvnames(ans, ".") <- c("dat", "com", "res", "theo") # return(ans) } spatstat/R/fv.R0000644000176200001440000013416213574033371013071 0ustar liggesusers## ## ## fv.R ## ## class "fv" of function value objects ## ## $Revision: 1.155 $ $Date: 2019/12/10 07:18:00 $ ## ## ## An "fv" object represents one or more related functions ## of the same argument, such as different estimates of the K function. ## ## It is a data.frame with additional attributes ## ## argu column name of the function argument (typically "r") ## ## valu column name of the recommended function ## ## ylab generic label for y axis e.g. K(r) ## ## fmla default plot formula ## ## alim recommended range of function argument ## ## labl recommended xlab/ylab for each column ## ## desc longer description for each column ## ## unitname name of unit of length for 'r' ## ## shade (optional) column names of upper & lower limits ## of shading - typically a confidence interval ## ## Objects of this class are returned by Kest(), etc ## ################################################################## ## creator fv <- function(x, argu="r", ylab=NULL, valu, fmla=NULL, alim=NULL, labl=names(x), desc=NULL, unitname=NULL, fname=NULL, yexp=ylab) { stopifnot(is.data.frame(x)) ## check arguments stopifnot(is.character(argu)) if(!is.null(ylab)) stopifnot(is.character(ylab) || is.language(ylab)) if(!missing(yexp)) { if(is.null(yexp)) yexp <- ylab else stopifnot(is.language(yexp)) } stopifnot(is.character(valu)) if(!(argu %in% names(x))) stop(paste(sQuote("argu"), "must be the name of a column of x")) if(!(valu %in% names(x))) stop(paste(sQuote("valu"), "must be the name of a column of x")) if(is.null(fmla)) fmla <- paste(valu, "~", argu) else if(inherits(fmla, "formula")) { ## convert formula to string fmla <- flat.deparse(fmla) } else if(!is.character(fmla)) stop(paste(sQuote("fmla"), "should be a formula or a string")) if(missing(alim)) { ## Note: if alim is given as NULL, it is not changed. argue <- x[[argu]] alim <- range(argue[is.finite(argue)]) } else if(!is.null(alim)) { if(!is.numeric(alim) || length(alim) != 2) stop(paste(sQuote("alim"), "should be a vector of length 2")) } if(!is.character(labl)) stop(paste(sQuote("labl"), "should be a vector of strings")) stopifnot(length(labl) == ncol(x)) if(is.null(desc)) desc <- character(ncol(x)) else { stopifnot(is.character(desc)) stopifnot(length(desc) == ncol(x)) nbg <- is.na(desc) if(any(nbg)) desc[nbg] <- "" } if(!is.null(fname)) stopifnot(is.character(fname) && length(fname) %in% 1:2) ## pack attributes attr(x, "argu") <- argu attr(x, "valu") <- valu attr(x, "ylab") <- ylab attr(x, "yexp") <- yexp attr(x, "fmla") <- fmla attr(x, "alim") <- alim attr(x, "labl") <- labl attr(x, "desc") <- desc attr(x, "units") <- as.unitname(unitname) attr(x, "fname") <- fname attr(x, "dotnames") <- NULL attr(x, "shade") <- NULL ## class(x) <- c("fv", class(x)) return(x) } .Spatstat.FvAttrib <- c( "argu", "valu", "ylab", "yexp", "fmla", "alim", "labl", "desc", "units", "fname", "dotnames", "shade") as.data.frame.fv <- function(x, ...) { stopifnot(is.fv(x)) fva <- .Spatstat.FvAttrib attributes(x)[fva] <- NULL class(x) <- "data.frame" x } is.fv <- function(x) { inherits(x, "fv") } ## as.fv <- function(x) { UseMethod("as.fv") } as.fv.fv <- function(x) x as.fv.data.frame <- function(x) { if(ncol(x) < 2) stop("Need at least 2 columns") return(fv(x, names(x)[1L], , names(x)[2L])) } as.fv.matrix <- function(x) { y <- as.data.frame(x) if(any(bad <- is.na(names(y)))) names(y)[bad] <- paste0("V", which(bad)) return(as.fv.data.frame(y)) } ## other methods for as.fv are described in the files for the relevant classes. vanilla.fv <- function(x) { ## remove everything except basic fv characteristics retain <- c("names", "row.names", .Spatstat.FvAttrib) attributes(x) <- attributes(x)[retain] class(x) <- c("fv", "data.frame") return(x) } print.fv <- local({ maxwords <- function(z, m) { max(0, which(cumsum(nchar(z) + 1) <= m+1)) } usewords <- function(z, n) paste(z[1:n], collapse=" ") print.fv <- function(x, ..., tight=FALSE) { verifyclass(x, "fv") terselevel <- spatstat.options("terse") showlabels <- waxlyrical('space', terselevel) showextras <- waxlyrical('extras', terselevel) nama <- names(x) a <- attributes(x) if(!is.null(ylab <- a$ylab)) { if(is.language(ylab)) ylab <- flat.deparse(ylab) } if(!inherits(x, "envelope")) { splat("Function value object", paren(paste("class", sQuote("fv")))) if(!is.null(ylab)) { xlab <- fvlabels(x, expand=TRUE)[[a$argu]] splat("for the function", xlab, "->", ylab) } } ## Descriptions .. desc <- a$desc ## .. may require insertion of ylab if(!is.null(ylab)) desc <- sprintf(desc, ylab) ## Labels .. labl <- fvlabels(x, expand=TRUE) ## Avoid overrunning text margin maxlinewidth <- options('width')[[1L]] key.width <- max(nchar(nama)) labl.width <- if(showlabels) max(nchar(labl), nchar("Math.label")) else 0 desc.width <- max(nchar(desc), nchar("Description")) fullwidth <- key.width + labl.width + desc.width + 2 if(fullwidth > maxlinewidth && tight) { ## try shortening the descriptions so that it all fits on one line spaceleft <- maxlinewidth - (key.width + labl.width + 2) desc <- truncline(desc, spaceleft) desc.width <- max(nchar(desc), nchar("Description")) fullwidth <- key.width + labl.width + desc.width + 2 } spaceleft <- maxlinewidth - (key.width + 1) if(desc.width > spaceleft) { ## Descriptions need to be truncated to max line width desc <- truncline(desc, spaceleft) desc.width <- max(nchar(desc), nchar("Description")) fullwidth <- key.width + labl.width + desc.width + 2 } if(showextras) { fullwidth <- pmin(maxlinewidth, fullwidth) fullline <- paste0(rep(".", fullwidth), collapse="") cat(fullline, fill=TRUE) } df <- data.frame(Math.label=labl, Description=desc, row.names=nama, stringsAsFactors=FALSE) if(!showlabels) df <- df[,-1L,drop=FALSE] print(df, right=FALSE) ## if(showextras) { cat(fullline, fill=TRUE) splat("Default plot formula: ", flat.deparse(as.formula(a$fmla))) splat("where", dQuote("."), "stands for", commasep(sQuote(fvnames(x, ".")), ", ")) if(!is.null(a$shade)) splat("Columns", commasep(sQuote(a$shade)), "will be plotted as shading (by default)") alim <- a$alim splat("Recommended range of argument", paste0(a$argu, ":"), if(!is.null(alim)) prange(signif(alim, 5)) else "not specified") rang <- signif(range(with(x, .x)), 5) splat("Available range", "of argument", paste0(a$argu, ":"), prange(rang)) ledge <- summary(unitname(x))$legend if(!is.null(ledge)) splat(ledge) } return(invisible(NULL)) } print.fv }) ## manipulating the names in fv objects .Spatstat.FvAbbrev <- c( ".x", ".y", ".s", ".", "*", ".a") fvnames <- function(X, a=".") { verifyclass(X, "fv") if(!is.character(a) || length(a) > 1) stop("argument a must be a character string") switch(a, ".y"={ return(attr(X, "valu")) }, ".x"={ return(attr(X, "argu")) }, ".s"={ return(attr(X, "shade")) }, "." = { ## The specified 'dotnames' dn <- attr(X, "dotnames") if(is.null(dn)) dn <- fvnames(X, "*") return(dn) }, ".a"={ ## all column names other than the function argument allvars <- names(X) argu <- attr(X, "argu") nam <- allvars[allvars != argu] return(nam) }, "*"={ ## Not documented at user level ## All column names other than the function argument ## IN REVERSE ORDER allvars <- names(X) argu <- attr(X, "argu") nam <- allvars[allvars != argu] nam <- rev(nam) # NB return(nam) }, { if(a %in% names(X)) return(a) stop(paste("Unrecognised abbreviation", dQuote(a))) } ) } "fvnames<-" <- function(X, a=".", value) { verifyclass(X, "fv") if(!is.character(a) || length(a) > 1) stop(paste("argument", sQuote("a"), "must be a character string")) ## special cases if(a == "." && length(value) == 0) { ## clear the dotnames attr(X, "dotnames") <- NULL return(X) } if(a == ".a" || a == "*") { warning("Nothing changed; use names(X) <- value to change names", call.=FALSE) return(X) } ## validate the names switch(a, ".x"=, ".y"={ if(!is.character(value) || length(value) != 1) stop("value should be a single string") }, ".s"={ if(!is.character(value) || length(value) != 2) stop("value should be a vector of 2 character strings") }, "."={ if(!is.character(value)) stop("value should be a character vector") }, stop(paste("Unrecognised abbreviation", dQuote(a))) ) ## check the names match existing column names tags <- names(X) if(any(nbg <- !(value %in% tags))) stop(paste(ngettext(sum(nbg), "The string", "The strings"), commasep(dQuote(value[nbg])), ngettext(sum(nbg), "does not match the name of any column of X", "do not match the names of any columns of X"))) ## reassign names switch(a, ".x"={ attr(X, "argu") <- value }, ".y"={ attr(X, "valu") <- value }, ".s"={ attr(X, "shade") <- value }, "."={ attr(X, "dotnames") <- value }) return(X) } "names<-.fv" <- function(x, value) { nama <- colnames(x) indx <- which(nama == fvnames(x, ".x")) indy <- which(nama == fvnames(x, ".y")) inds <- which(nama %in% fvnames(x, ".s")) ind. <- which(nama %in% fvnames(x, ".")) ## rename columns of data frame x <- NextMethod("names<-") ## adjust other tags fvnames(x, ".x") <- value[indx] fvnames(x, ".y") <- value[indy] fvnames(x, ".") <- value[ind.] if(length(inds) > 0) fvnames(x, ".s") <- value[inds] namemap <- setNames(lapply(value, as.name), nama) formula(x) <- flat.deparse(eval(substitute(substitute(fom, um), list(fom=as.formula(formula(x)), um=namemap)))) return(x) } fvlabels <- function(x, expand=FALSE) { lab <- attr(x, "labl") if(expand && !is.null(fname <- attr(x, "fname"))) { ## expand plot labels using function name nstrings <- max(substringcount("%s", lab)) ## pad with blanks nextra <- nstrings - length(fname) if(nextra > 0) fname <- c(fname, rep("", nextra)) ## render lab <- do.call(sprintf, append(list(lab), as.list(fname))) } ## remove empty space lab <- gsub(" ", "", lab) names(lab) <- names(x) return(lab) } "fvlabels<-" <- function(x, value) { stopifnot(is.fv(x)) stopifnot(is.character(value)) stopifnot(length(value) == length(fvlabels(x))) attr(x, "labl") <- value return(x) } flatfname <- function(x) { fn <- if(is.character(x)) x else attr(x, "fname") if(length(fn) > 1) fn <- paste0(fn[1L], "[", paste(fn[-1L], collapse=" "), "]") as.name(fn) } makefvlabel <- function(op=NULL, accent=NULL, fname, sub=NULL, argname="r") { ## de facto standardised label a <- "%s" if(!is.null(accent)) a <- paste0(accent, paren(a)) ## eg hat(%s) if(!is.null(op)) a <- paste0("bold", paren(op), "~", a) ## eg bold(var)~hat(%s) if(is.null(sub)) { if(length(fname) != 1) { a <- paste0(a, "[%s]") a <- paren(a, "{") } } else { if(length(fname) == 1) { a <- paste0(a, paren(sub, "[")) } else { a <- paste0(a, paren("%s", "["), "^", paren(sub, "{")) a <- paren(a, "{") } } a <- paste0(a, paren(argname)) return(a) } fvlabelmap <- local({ magic <- function(x) { subx <- paste("substitute(", x, ", NULL)") out <- try(eval(parse(text=subx)), silent=TRUE) if(inherits(out, "try-error")) out <- as.name(make.names(subx)) out } fvlabelmap <- function(x, dot=TRUE) { labl <- fvlabels(x, expand=TRUE) ## construct mapping from identifiers to labels map <- as.list(labl) map <- lapply(map, magic) names(map) <- colnames(x) if(dot) { ## also map "." and ".a" to name of target function if(!is.null(ye <- attr(x, "yexp"))) map <- append(map, list("."=ye, ".a"=ye)) ## map other fvnames to their corresponding labels map <- append(map, list(".x"=map[[fvnames(x, ".x")]], ".y"=map[[fvnames(x, ".y")]])) if(!is.null(fvnames(x, ".s"))) { shex <- unname(map[fvnames(x, ".s")]) shadexpr <- substitute(c(A,B), list(A=shex[[1L]], B=shex[[2L]])) map <- append(map, list(".s" = shadexpr)) } } return(map) } fvlabelmap }) ## map from abbreviations to expressions involving the column names, ## for use in eval(substitute(...)) fvexprmap <- function(x) { dotnames <- fvnames(x, ".") u <- if(length(dotnames) == 1) as.name(dotnames) else as.call(lapply(c("cbind", dotnames), as.name)) ux <- as.name(fvnames(x, ".x")) uy <- as.name(fvnames(x, ".y")) umap <- list(.=u, .a=u, .x=ux, .y=uy) if(!is.null(fvnames(x, ".s"))) { shnm <- fvnames(x, ".s") shadexpr <- substitute(cbind(A,B), list(A=as.name(shnm[1L]), B=as.name(shnm[2L]))) umap <- append(umap, list(.s = shadexpr)) } return(umap) } fvlegend <- local({ fvlegend <- function(object, elang) { ## Compute mathematical legend(s) for column(s) in fv object ## transformed by language expression 'elang'. ## The expression must already be in 'expanded' form. ## The result is an expression, or expression vector. ## The j-th entry of the vector is an expression for the ## j-th column of function values. ee <- distributecbind(as.expression(elang)) map <- fvlabelmap(object, dot = TRUE) eout <- as.expression(lapply(ee, invokemap, map=map)) return(eout) } invokemap <- function(ei, map) { eval(substitute(substitute(e, mp), list(e = ei, mp = map))) } fvlegend }) bind.fv <- function(x, y, labl=NULL, desc=NULL, preferred=NULL, clip=FALSE) { verifyclass(x, "fv") ax <- attributes(x) if(is.fv(y)) { ## y is already an fv object ay <- attributes(y) if(!identical(ax$fname, ay$fname)) { ## x and y represent different functions ## expand the labels separately fvlabels(x) <- fvlabels(x, expand=TRUE) fvlabels(y) <- fvlabels(y, expand=TRUE) ax <- attributes(x) ay <- attributes(y) } ## check compatibility of 'r' values xr <- ax$argu yr <- ay$argu rx <- x[[xr]] ry <- y[[yr]] if(length(rx) != length(ry)) { if(!clip) stop("fv objects x and y have incompatible domains") # restrict both objects to a common domain ra <- intersect.ranges(range(rx), range(ry)) x <- x[inside.range(rx, ra), ] y <- y[inside.range(ry, ra), ] rx <- x[[xr]] ry <- y[[yr]] } if(length(rx) != length(ry) || max(abs(rx-ry)) > .Machine$double.eps) stop("fv objects x and y have incompatible values of r") ## reduce y to data frame and strip off 'r' values ystrip <- as.data.frame(y) yrpos <- which(colnames(ystrip) == yr) ystrip <- ystrip[, -yrpos, drop=FALSE] ## determine descriptors if(is.null(labl)) labl <- attr(y, "labl")[-yrpos] if(is.null(desc)) desc <- attr(y, "desc")[-yrpos] ## y <- ystrip } else { ## y is a matrix or data frame y <- as.data.frame(y) } ## check for duplicated column names allnames <- c(colnames(x), colnames(y)) if(any(dup <- duplicated(allnames))) { nbg <- unique(allnames[dup]) nn <- length(nbg) warning(paste("The column", ngettext(nn, "name", "names"), commasep(sQuote(nbg)), ngettext(nn, "was", "were"), "duplicated. Unique names were generated")) allnames <- make.names(allnames, unique=TRUE, allow_ = FALSE) colnames(y) <- allnames[ncol(x) + seq_len(ncol(y))] } if(is.null(labl)) labl <- paste("%s[", colnames(y), "](r)", sep="") else if(length(labl) != ncol(y)) stop(paste("length of", sQuote("labl"), "does not match number of columns of y")) if(is.null(desc)) desc <- character(ncol(y)) else if(length(desc) != ncol(y)) stop(paste("length of", sQuote("desc"), "does not match number of columns of y")) if(is.null(preferred)) preferred <- ax$valu xy <- cbind(as.data.frame(x), y) z <- fv(xy, ax$argu, ax$ylab, preferred, ax$fmla, ax$alim, c(ax$labl, labl), c(ax$desc, desc), unitname=unitname(x), fname=ax$fname, yexp=ax$yexp) return(z) } cbind.fv <- function(...) { a <- list(...) n <- length(a) if(n == 0) return(NULL) if(n == 1) { ## single argument - extract it a <- a[[1L]] ## could be an fv object if(is.fv(a)) return(a) n <- length(a) } z <- a[[1L]] if(!is.fv(z)) stop("First argument should be an object of class fv") if(n > 1) for(i in 2:n) z <- bind.fv(z, a[[i]]) return(z) } collapse.anylist <- collapse.fv <- local({ collapse.fv <- function(object, ..., same=NULL, different=NULL) { if(is.fv(object)) { x <- list(object, ...) } else if(inherits(object, "anylist")) { x <- append(object, list(...)) } else if(is.list(object) && all(sapply(object, is.fv))) { x <- append(object, list(...)) } else stop("Format not understood") if(!all(unlist(lapply(x, is.fv)))) stop("arguments should be objects of class fv") if(is.null(same)) same <- character(0) if(is.null(different)) different <- character(0) if(anyDuplicated(c(same, different))) stop(paste("The arguments", sQuote("same"), "and", sQuote("different"), "should not have entries in common")) either <- c(same, different) ## validate if(length(either) == 0) stop(paste("At least one column of values must be selected", "using the arguments", sQuote("same"), "and", sQuote("different"))) nbg <- unique(unlist(lapply(x, missingnames, expected=either))) if((nbad <- length(nbg)) > 0) stop(paste(ngettext(nbad, "The name", "The names"), commasep(sQuote(nbg)), ngettext(nbad, "is", "are"), "not present in the function objects")) ## names for different versions versionnames <- names(x) if(is.null(versionnames)) versionnames <- paste("x", seq_along(x), sep="") shortnames <- abbreviate(versionnames, minlength=12) ## extract the common values y <- x[[1L]] xname <- fvnames(y, ".x") yname <- fvnames(y, ".y") if(length(same) == 0) { ## The column of 'preferred values' .y cannot be deleted ## retain .y for now and delete it later. z <- y[, c(xname, yname)] } else { if(!(yname %in% same)) fvnames(y, ".y") <- same[1L] z <- y[, c(xname, same)] } dotnames <- same ## now merge the different values if(length(different)) { for(i in seq_along(x)) { ## extract values for i-th object xi <- x[[i]] wanted <- (names(xi) %in% different) if(any(wanted)) { y <- as.data.frame(xi)[, wanted, drop=FALSE] desc <- attr(xi, "desc")[wanted] labl <- attr(xi, "labl")[wanted] ## relabel prefix <- shortnames[i] preamble <- versionnames[i] names(y) <- if(ncol(y) == 1) prefix else paste(prefix,names(y),sep="") dotnames <- c(dotnames, names(y)) ## glue onto fv object z <- bind.fv(z, y, labl=paste(prefix, labl, sep="~"), desc=paste(preamble, desc)) } } } if(length(same) == 0) { ## remove the second column which was retained earlier fvnames(z, ".y") <- names(z)[3L] z <- z[, -2L] } fvnames(z, ".") <- dotnames return(z) } missingnames <- function(z, expected) { expected[!(expected %in% names(z))] } collapse.fv }) ## rename one of the columns of an fv object tweak.fv.entry <- function(x, current.tag, new.labl=NULL, new.desc=NULL, new.tag=NULL) { hit <- (names(x) == current.tag) if(!any(hit)) return(x) ## update descriptions of column i <- min(which(hit)) if(!is.null(new.labl)) attr(x, "labl")[i] <- new.labl if(!is.null(new.desc)) attr(x, "desc")[i] <- new.desc ## adjust column tag if(!is.null(new.tag)) { names(x)[i] <- new.tag ## update dotnames dn <- fvnames(x, ".") if(current.tag %in% dn ) { dn[dn == current.tag] <- new.tag fvnames(x, ".") <- dn } ## if the tweaked column is the preferred value, adjust accordingly if(attr(x, "valu") == current.tag) attr(x, "valu") <- new.tag ## if the tweaked column is the function argument, adjust accordingly if(attr(x, "argu") == current.tag) attr(x, "valu") <- new.tag } return(x) } ## change some or all of the auxiliary text in an fv object rebadge.fv <- function(x, new.ylab, new.fname, tags, new.desc, new.labl, new.yexp=new.ylab, new.dotnames, new.preferred, new.formula, new.tags) { if(!missing(new.ylab)) attr(x, "ylab") <- new.ylab if(!missing(new.yexp) || !missing(new.ylab)) attr(x, "yexp") <- new.yexp if(!missing(new.fname)) attr(x, "fname") <- new.fname if(!missing(tags) && !(missing(new.desc) && missing(new.labl) && missing(new.tags))) { nama <- names(x) desc <- attr(x, "desc") labl <- attr(x, "labl") valu <- attr(x, "valu") for(i in seq_along(tags)) if(!is.na(m <- match(tags[i], nama))) { if(!missing(new.desc)) desc[m] <- new.desc[i] if(!missing(new.labl)) labl[m] <- new.labl[i] if(!missing(new.tags)) { names(x)[m] <- new.tags[i] if(tags[i] == valu) attr(x, "valu") <- new.tags[i] } } attr(x, "desc") <- desc attr(x, "labl") <- labl } if(!missing(new.dotnames)) fvnames(x, ".") <- new.dotnames if(!missing(new.preferred)) { stopifnot(new.preferred %in% names(x)) attr(x, "valu") <- new.preferred } if(!missing(new.formula)) formula(x) <- new.formula return(x) } ## common invocations to label a function like Kdot or Kcross rebadge.as.crossfun <- function(x, main, sub=NULL, i, j) { i <- make.parseable(i) j <- make.parseable(j) if(is.null(sub)) { ylab <- substitute(main[i, j](r), list(main=main, i=i, j=j)) fname <- c(main, paste0("list", paren(paste(i, j, sep=",")))) yexp <- substitute(main[list(i, j)](r), list(main=main, i=i, j=j)) } else { ylab <- substitute(main[sub, i, j](r), list(main=main, sub=sub, i=i, j=j)) fname <- c(main, paste0("list", paren(paste(sub, i, j, sep=",")))) yexp <- substitute(main[list(sub, i, j)](r), list(main=main, sub=sub, i=i, j=j)) } y <- rebadge.fv(x, new.ylab=ylab, new.fname=fname, new.yexp=yexp) return(y) } rebadge.as.dotfun <- function(x, main, sub=NULL, i) { i <- make.parseable(i) if(is.null(sub)) { ylab <- substitute(main[i ~ dot](r), list(main=main, i=i)) fname <- c(main, paste0(i, "~symbol(\"\\267\")")) yexp <- substitute(main[i ~ symbol("\267")](r), list(main=main, i=i)) } else { ylab <- substitute(main[sub, i ~ dot](r), list(main=main, sub=sub, i=i)) fname <- c(main, paste0("list", paren(paste0(sub, ",", i, "~symbol(\"\\267\")")))) yexp <- substitute(main[list(sub, i ~ symbol("\267"))](r), list(main=main, sub=sub, i=i)) } y <- rebadge.fv(x, new.ylab=ylab, new.fname=fname, new.yexp=yexp) return(y) } ## even simpler wrapper for rebadge.fv rename.fv <- function(x, fname, ylab, yexp=ylab) { stopifnot(is.fv(x)) stopifnot(is.character(fname) && (length(fname) %in% 1:2)) argu <- fvnames(x, ".x") if(missing(ylab) || is.null(ylab)) ylab <- switch(length(fname), substitute(fn(argu), list(fn=as.name(fname), argu=as.name(argu))), substitute(fn[fsub](argu), list(fn=as.name(fname[1]), fsub=as.name(fname[2]), argu=as.name(argu)))) if(missing(yexp) || is.null(yexp)) yexp <- ylab y <- rebadge.fv(x, new.fname=fname, new.ylab=ylab, new.yexp=yexp) return(y) } ## subset extraction operator "[.fv" <- function(x, i, j, ..., drop=FALSE) { igiven <- !missing(i) jgiven <- !missing(j) y <- as.data.frame(x) if(igiven && jgiven) z <- y[i, j, drop=drop] else if(igiven) z <- y[i, , drop=drop] else if(jgiven) z <- y[ , j, drop=drop] else z <- y ## return only the selected values as a data frame or vector. if(drop) return(z) if(!jgiven) selected <- seq_len(ncol(x)) else { nameindices <- seq_along(names(x)) names(nameindices) <- names(x) selected <- as.vector(nameindices[j]) } # validate choice of selected/dropped columns nama <- names(z) argu <- attr(x, "argu") if(!(argu %in% nama)) stop(paste("The function argument", sQuote(argu), "must not be removed")) valu <- attr(x, "valu") if(!(valu %in% nama)) stop(paste("The default column of function values", sQuote(valu), "must not be removed")) # if the plot formula involves explicit mention of dropped columns, # replace it by a generic formula fmla <- as.formula(attr(x, "fmla")) if(!all(variablesinformula(fmla) %in% nama)) fmla <- as.formula(. ~ .x, env=environment(fmla)) ## If range of argument was implicitly changed, adjust "alim" alim <- attr(x, "alim") rang <- range(z[[argu]]) alim <- intersect.ranges(alim, rang, fatal=FALSE) result <- fv(z, argu=attr(x, "argu"), ylab=attr(x, "ylab"), valu=attr(x, "valu"), fmla=fmla, alim=alim, labl=attr(x, "labl")[selected], desc=attr(x, "desc")[selected], unitname=attr(x, "units"), fname=attr(x,"fname"), yexp=attr(x, "yexp")) ## carry over preferred names, if possible dotn <- fvnames(x, ".") fvnames(result, ".") <- dotn[dotn %in% colnames(result)] shad <- fvnames(x, ".s") if(!is.null(shad) && all(shad %in% colnames(result))) fvnames(result, ".s") <- shad return(result) } ## Subset and column replacement methods ## to guard against deletion of columns "[<-.fv" <- function(x, i, j, value) { if(!missing(j)) { ## check for alterations to structure of object if((is.character(j) && !all(j %in% colnames(x))) || (is.numeric(j) && any(j > ncol(x)))) stop("Use bind.fv to add new columns to an object of class fv") if(is.null(value) && missing(i)) { ## column(s) will be removed co <- seq_len(ncol(x)) names(co) <- colnames(x) keepcol <- setdiff(co, co[j]) return(x[ , keepcol, drop=FALSE]) } } NextMethod("[<-") } "$<-.fv" <- function(x, name, value) { j <- which(colnames(x) == name) if(is.null(value)) { ## column will be removed if(length(j) != 0) return(x[, -j, drop=FALSE]) return(x) } if(length(j) == 0) { ## new column df <- data.frame(1:nrow(x), value)[,-1L,drop=FALSE] colnames(df) <- name y <- bind.fv(x, df, desc=paste("Additional variable", sQuote(name))) return(y) } NextMethod("$<-") } ## method for 'formula' formula.fv <- function(x, ...) { attr(x, "fmla") } # new generic "formula<-" <- function(x, ..., value) { UseMethod("formula<-") } "formula<-.fv" <- function(x, ..., value) { if(is.null(value)) value <- paste(fvnames(x, ".y"), "~", fvnames(x, ".x")) else if(inherits(value, "formula")) { ## convert formula to string value <- flat.deparse(value) } else if(!is.character(value)) stop("Assignment value should be a formula or a string") attr(x, "fmla") <- value return(x) } ## method for with() with.fv <- function(data, expr, ..., fun=NULL, enclos=NULL) { if(any(names(list(...)) == "drop")) stop("Outdated argument 'drop' used in with.fv") cl <- short.deparse(sys.call()) verifyclass(data, "fv") if(is.null(enclos)) enclos <- parent.frame() ## convert syntactic expression to 'expression' object # e <- as.expression(substitute(expr)) ## convert syntactic expression to call elang <- substitute(expr) ## map "." etc to names of columns of data datanames <- names(data) xname <- fvnames(data, ".x") yname <- fvnames(data, ".y") ux <- as.name(xname) uy <- as.name(yname) dnames <- datanames[datanames %in% fvnames(data, ".")] ud <- as.call(lapply(c("cbind", dnames), as.name)) anames <- datanames[datanames %in% fvnames(data, ".a")] ua <- as.call(lapply(c("cbind", anames), as.name)) if(!is.null(fvnames(data, ".s"))) { snames <- datanames[datanames %in% fvnames(data, ".s")] us <- as.call(lapply(c("cbind", snames), as.name)) } else us <- NULL expandelang <- eval(substitute(substitute(ee, list(.=ud, .x=ux, .y=uy, .s=us, .a=ua)), list(ee=elang))) dont.complain.about(ua, ud, us, ux, uy) evars <- all.vars(expandelang) used.dotnames <- evars[evars %in% dnames] ## evaluate expression datadf <- as.data.frame(data) results <- eval(expandelang, as.list(datadf), enclos=enclos) ## -------------------- ## commanded to return numerical values only? if(!is.null(fun) && !fun) return(results) if(!is.matrix(results) && !is.data.frame(results)) { ## result is a vector if(is.null(fun)) fun <- FALSE if(!fun || length(results) != nrow(datadf)) return(results) results <- matrix(results, ncol=1) } else { ## result is a matrix or data frame if(is.null(fun)) fun <- TRUE if(!fun || nrow(results) != nrow(datadf)) return(results) } ## result is a matrix or data frame of the right dimensions ## make a new fv object ## ensure columns of results have names if(is.null(colnames(results))) colnames(results) <- paste("col", seq_len(ncol(results)), sep="") resultnames <- colnames(results) ## get values of function argument xvalues <- datadf[[xname]] ## tack onto result matrix results <- cbind(xvalues, results) colnames(results) <- c(xname, resultnames) results <- data.frame(results) ## check for alteration of column names oldnames <- resultnames resultnames <- colnames(results)[-1L] if(any(resultnames != oldnames)) warning("some column names were illegal and have been changed") ## determine mapping (if any) from columns of output to columns of input namemap <- match(colnames(results), names(datadf)) okmap <- !is.na(namemap) ## Build up fv object ## decide which of the columns should be the preferred value newyname <- if(yname %in% resultnames) yname else resultnames[1L] ## construct default plot formula fmla <- flat.deparse(as.formula(paste(". ~", xname))) dotnames <- resultnames ## construct description strings desc <- character(ncol(results)) desc[okmap] <- attr(data, "desc")[namemap[okmap]] desc[!okmap] <- paste("Computed value", resultnames[!okmap]) ## function name (fname) and mathematical expression for function (yexp) oldyexp <- attr(data, "yexp") oldfname <- attr(data, "fname") if(is.null(oldyexp)) { fname <- cl yexp <- substitute(f(xname), list(f=as.name(fname), xname=as.name(xname))) } else { ## map 'cbind(....)' to "." for name of function only cb <- paste("cbind(", paste(used.dotnames, collapse=","), ")", sep="") compresselang <- gsub(cb, ".", flat.deparse(expandelang), fixed=TRUE) compresselang <- as.formula(paste(compresselang, "~1"))[[2L]] ## construct mapping using original function name labmap <- fvlabelmap(data, dot=TRUE) labmap[["."]] <- oldyexp yexp <- eval(substitute(substitute(ee, ff), list(ee=compresselang, ff=labmap))) labmap2 <- labmap labmap2[["."]] <- as.name(oldfname) fname <- eval(substitute(substitute(ee, ff), list(ee=compresselang, ff=labmap2))) fname <- paren(flat.deparse(fname)) } ## construct mathematical labels mathlabl <- as.character(fvlegend(data, expandelang)) mathlabl <- gsub("[[:space:]]+", " ", mathlabl) labl <- colnames(results) mathmap <- match(labl, used.dotnames) okmath <- !is.na(mathmap) labl[okmath] <- mathlabl[mathmap[okmath]] ## form fv object and return out <- fv(results, argu=xname, valu=newyname, labl=labl, desc=desc, alim=attr(data, "alim"), fmla=fmla, unitname=unitname(data), fname=fname, yexp=yexp, ylab=yexp) fvnames(out, ".") <- dotnames return(out) } ## method for 'range' range.fv <- local({ getValues <- function(x) { xdat <- as.matrix(as.data.frame(x)) yall <- fvnames(x, ".") vals <- xdat[, yall] return(as.vector(vals)) } range.fv <- function(..., na.rm=TRUE, finite=na.rm) { aarg <- list(...) isfun <- sapply(aarg, is.fv) if(any(isfun)) aarg[isfun] <- lapply(aarg[isfun], getValues) z <- do.call(range, append(aarg, list(na.rm=na.rm, finite=finite))) return(z) } range.fv }) min.fv <- function(..., na.rm=TRUE, finite=na.rm) { range(..., na.rm=TRUE, finite=na.rm)[1L] } max.fv <- function(..., na.rm=TRUE, finite=na.rm) { range(..., na.rm=TRUE, finite=na.rm)[2L] } ## stieltjes integration for fv objects stieltjes <- function(f, M, ...) { ## stieltjes integral of f(x) dM(x) stopifnot(is.function(f)) if(is.stepfun(M)) { envM <- environment(M) #' jump locations x <- get("x", envir=envM) #' values of integrand fx <- f(x, ...) #' jump amounts xx <- c(-Inf, (x[-1L] + x[-length(x)])/2, Inf) dM <- diff(M(xx)) #' integrate f(x) dM(x) f.dM <- fx * dM result <- sum(f.dM[is.finite(f.dM)]) return(list(result)) } else if(is.fv(M)) { ## integration variable argu <- attr(M, "argu") x <- M[[argu]] ## values of integrand fx <- f(x, ...) ## estimates of measure valuenames <- names(M) [names(M) != argu] Mother <- as.data.frame(M)[, valuenames] Mother <- as.matrix(Mother, nrow=nrow(M)) ## increments of measure dM <- apply(Mother, 2, diff) dM <- rbind(dM, 0) ## integrate f(x) dM(x) f.dM <- fx * dM f.dM[!is.finite(f.dM)] <- 0 results <- colSums(f.dM) results <- as.list(results) names(results) <- valuenames return(results) } else stop("M must be an object of class fv or stepfun") } prefixfv <- function(x, tagprefix="", descprefix="", lablprefix=tagprefix, whichtags=fvnames(x, "*")) { ## attach a prefix to fv information stopifnot(is.fv(x)) att <- attributes(x) relevant <- names(x) %in% whichtags oldtags <- names(x)[relevant] newtags <- paste(tagprefix, oldtags, sep="") newlabl <- paste(lablprefix, att$labl[relevant], sep="") newdesc <- paste(descprefix, att$desc[relevant]) y <- rebadge.fv(x, tags=oldtags, new.desc=newdesc, new.labl=newlabl, new.tags=newtags) return(y) } reconcile.fv <- local({ reconcile.fv <- function(...) { ## reconcile several fv objects by finding the columns they share in common z <- list(...) if(!all(unlist(lapply(z, is.fv)))) { if(length(z) == 1 && is.list(z[[1L]]) && all(unlist(lapply(z[[1L]], is.fv)))) z <- z[[1L]] else stop("all arguments should be fv objects") } n <- length(z) if(n <= 1) return(z) ## find columns that are common to all estimates keepcolumns <- names(z[[1L]]) keepvalues <- fvnames(z[[1L]], "*") for(i in 2:n) { keepcolumns <- intersect(keepcolumns, names(z[[i]])) keepvalues <- intersect(keepvalues, fvnames(z[[i]], "*")) } if(length(keepvalues) == 0) stop("cannot reconcile fv objects: they have no columns in common") ## determine name of the 'preferred' column prefs <- unlist(lapply(z, fvnames, a=".y")) prefskeep <- prefs[prefs %in% keepvalues] if(length(prefskeep) > 0) { ## pick the most popular chosen <- unique(prefskeep)[which.max(table(prefskeep))] } else { ## drat - pick a value arbitrarily chosen <- keepvalues[1L] } z <- lapply(z, rebadge.fv, new.preferred=chosen) z <- lapply(z, "[.fv", j=keepcolumns) ## also clip to the same r values rmax <- min(sapply(z, maxrval)) z <- lapply(z, cliprmax, rmax=rmax) return(z) } maxrval <- function(x) { max(with(x, .x)) } cliprmax <- function(x, rmax) { x[ with(x, .x) <= rmax, ] } reconcile.fv }) as.function.fv <- function(x, ..., value=".y", extrapolate=FALSE) { trap.extra.arguments(...) value.orig <- value ## extract function argument xx <- with(x, .x) ## extract all function values yy <- as.data.frame(x)[, fvnames(x, "*"), drop=FALSE] ## determine which value(s) to supply if(!is.character(value)) stop("value should be a string or vector specifying columns of x") if(!all(value %in% colnames(yy))) { expandvalue <- try(fvnames(x, value)) if(!inherits(expandvalue, "try-error")) { value <- expandvalue } else stop("Unable to determine columns of x") } yy <- yy[,value, drop=FALSE] argname <- fvnames(x, ".x") ## determine extrapolation rule (1=NA, 2=most extreme value) stopifnot(is.logical(extrapolate)) stopifnot(length(extrapolate) %in% 1:2) endrule <- 1 + extrapolate ## make function(s) if(length(value) == 1 && !identical(value.orig, "*")) { ## make a single 'approxfun' and return it f <- approxfun(xx, yy[,,drop=TRUE], rule=endrule) ## magic names(formals(f))[1L] <- argname body(f)[[4L]] <- as.name(argname) } else { ## make a list of 'approxfuns' with different function values funs <- lapply(yy, approxfun, x = xx, rule = endrule) ## return a function which selects the appropriate 'approxfun' and executes f <- function(xxxx, what=value) { what <- match.arg(what) funs[[what]](xxxx) } ## recast function definition ## ('any sufficiently advanced technology is ## indistinguishable from magic' -- Arthur C. Clarke) formals(f)[[2L]] <- value names(formals(f))[1L] <- argname ## body(f)[[3L]][[2L]] <- as.name(argname) body(f) <- eval(substitute(substitute(z, list(xxxx=as.name(argname))), list(z=body(f)))) } class(f) <- c("fvfun", class(f)) attr(f, "fname") <- attr(x, "fname") attr(f, "yexp") <- attr(x, "yexp") return(f) } print.fvfun <- function(x, ...) { y <- args(x) yexp <- as.expression(attr(x, "yexp")) body(y) <- as.name(paste("Returns interpolated value of", yexp)) print(y, ...) return(invisible(NULL)) } findcbind <- function(root, depth=0, maxdepth=1000) { ## recursive search through a parse tree to find calls to 'cbind' if(depth > maxdepth) stop("Reached maximum depth") if(length(root) == 1) return(NULL) if(identical(as.name(root[[1L]]), as.name("cbind"))) return(list(numeric(0))) out <- NULL for(i in 2:length(root)) { di <- findcbind(root[[i]], depth+1, maxdepth) if(!is.null(di)) out <- append(out, lapply(di, append, values=i, after=FALSE)) } return(out) } .MathOpNames <- c("+", "-", "*", "/", "^", "%%", "%/%", "&", "|", "!", "==", "!=", "<", "<=", ">=", ">") distributecbind <- local({ distributecbind <- function(x) { ## x is an expression involving a call to 'cbind' ## return a vector of expressions, each obtained by replacing 'cbind(...)' ## by one of its arguments in turn. stopifnot(typeof(x) == "expression") xlang <- x[[1L]] locations <- findcbind(xlang) if(length(locations) == 0) return(x) ## cbind might occur more than once ## check that the number of arguments is the same each time narg <- unique(sapply(locations, nargs.in.expr, e=xlang)) if(length(narg) > 1) return(NULL) out <- NULL if(narg > 0) { for(i in 1:narg) { ## make a version of the expression ## in which cbind() is replaced by its i'th argument fakexlang <- xlang for(loc in locations) { if(length(loc) > 0) { ## usual case: 'loc' is integer vector representing nested index cbindcall <- xlang[[loc]] ## extract i-th argument argi <- cbindcall[[i+1]] ## if argument is an expression, enclose it in parentheses if(length(argi) > 1 && paste(argi[[1L]]) %in% .MathOpNames) argi <- substitute((x), list(x=argi)) ## replace cbind call by its i-th argument fakexlang[[loc]] <- argi } else { ## special case: 'loc' = integer(0) representing xlang itself cbindcall <- xlang ## extract i-th argument argi <- cbindcall[[i+1L]] ## replace cbind call by its i-th argument fakexlang <- cbindcall[[i+1L]] } } ## add to final expression out <- c(out, as.expression(fakexlang)) } } return(out) } nargs.in.expr <- function(loc, e) { n <- if(length(loc) > 0) length(e[[loc]]) else length(e) return(n - 1L) } distributecbind }) ## Form a new 'fv' object as a ratio ratfv <- function(df, numer, denom, ..., ratio=TRUE) { ## Determine y if(!missing(df) && !is.null(df)) { y <- fv(df, ...) num <- NULL } else { ## Compute numer/denom ## Numerator must be a data frame num <- fv(numer, ...) ## Denominator may be a data frame or a constant force(denom) y <- eval.fv(num/denom) ## relabel y <- fv(as.data.frame(y), ...) } if(!ratio) return(y) if(is.null(num)) { ## Compute num = y * denom ## Denominator may be a data frame or a constant force(denom) num <- eval.fv(y * denom) ## ditch labels num <- fv(as.data.frame(num), ...) } ## make denominator an fv object if(is.data.frame(denom)) { den <- fv(denom, ...) } else { ## scalar check.1.real(denom, "Unless it is a data frame,") ## replicate it in all the data columns dendf <- as.data.frame(num) valuecols <- (names(num) != fvnames(num, ".x")) dendf[, valuecols] <- denom den <- fv(dendf, ...) } ## tweak the descriptions ok <- (names(y) != fvnames(y, ".x")) attr(num, "desc")[ok] <- paste("numerator of", attr(num, "desc")[ok]) attr(den, "desc")[ok] <- paste("denominator of", attr(den, "desc")[ok]) ## form ratio object y <- rat(y, num, den, check=FALSE) return(y) } ## Tack new column(s) onto a ratio fv object bind.ratfv <- function(x, numerator=NULL, denominator=NULL, labl = NULL, desc = NULL, preferred = NULL, ratio=TRUE, quotient=NULL) { if(ratio && !inherits(x, "rat")) stop("ratio=TRUE is set, but x has no ratio information", call.=FALSE) if(is.null(numerator) && !is.null(denominator) && !is.null(quotient)) numerator <- quotient * denominator if(is.null(denominator) && inherits(numerator, "rat")) { ## extract numerator & denominator from ratio object both <- numerator denominator <- attr(both, "denominator") usenames <- fvnames(both, ".a") numerator <- as.data.frame(both)[,usenames,drop=FALSE] denominator <- as.data.frame(denominator)[,usenames,drop=FALSE] ## labels default to those of ratio object ma <- match(usenames, colnames(both)) if(is.null(labl)) labl <- attr(both, "labl")[ma] if(is.null(desc)) desc <- attr(both, "desc")[ma] } # calculate ratio # The argument 'quotient' is rarely needed # except to avoid 0/0 or to improve accuracy if(is.null(quotient)) quotient <- numerator/denominator # bind new column to x y <- bind.fv(x, quotient, labl=labl, desc=desc, preferred=preferred) if(!ratio) return(y) ## convert scalar denominator to data frame if(!is.data.frame(denominator)) { if(!is.numeric(denominator) || !is.vector(denominator)) stop("Denominator should be a data frame or a numeric vector") nd <- length(denominator) if(nd != 1 && nd != nrow(x)) stop("Denominator has wrong length") dvalue <- denominator denominator <- numerator denominator[] <- dvalue } ## Now fuse with x num <- attr(x, "numerator") den <- attr(x, "denominator") num <- bind.fv(num, numerator, labl=labl, desc=paste("numerator of", desc), preferred=preferred) den <- bind.fv(den, denominator, labl=labl, desc=paste("denominator of", desc), preferred=preferred) y <- rat(y, num, den, check=FALSE) return(y) } conform.ratfv <- function(x) { ## harmonise display properties in components of a ratio stopifnot(inherits(x, "rat"), is.fv(x)) num <- attr(x, "numerator") den <- attr(x, "denominator") formula(num) <- formula(den) <- formula(x) fvnames(num, ".") <- fvnames(den, ".") <- fvnames(x, ".") unitname(num) <- unitname(den) <- unitname(x) attr(x, "numerator") <- num attr(x, "denominator") <- den return(x) } spatstat/R/First.R0000644000176200001440000000142713572330665013546 0ustar liggesusers# First.R # # $Revision: 1.48 $ $Date: 2019/12/06 01:38:23 $ # .onLoad <- function(...) reset.spatstat.options() .onAttach <- function(libname, pkgname) { store.versionstring.spatstat() ver <- versionstring.spatstat() ## descfile <- system.file("DESCRIPTION", package="spatstat") nickfile <- system.file("doc", "Nickname.txt", package="spatstat") ni <- scan(file=nickfile, what=character(), n=1, quiet=TRUE) msg <- paste("\nspatstat", ver, " ", paren(paste("nickname:", sQuote(ni))), "\nFor an introduction to spatstat, type", sQuote("beginner"), "\n") packageStartupMessage(msg) cur <- versioncurrency.spatstat() if(!is.null(cur)) packageStartupMessage(paste("\nNote:", cur)) invisible(NULL) } spatstat/R/edit.R0000644000176200001440000000133313333543255013374 0ustar liggesusers## edit.R ## ## Methods for 'edit' ## ## $Revision: 1.3 $ $Date: 2015/04/19 06:14:21 $ edit.ppp <- local({ edit.ppp <- function(name, ...) { X <- name df <- as.data.frame(X) df <- as.data.frame(lapply(df, as.num.or.char)) Y <- edit(df, ...) Z <- as.ppp(Y, W=Window(X)) return(Z) } as.num.or.char <- function(x) { if (is.character(x)) x else if (is.numeric(x)) { storage.mode(x) <- "double" x } else as.character(x) } edit.ppp }) edit.im <- function(name, ...) { X <- name M <- transmat(as.matrix(X), from="spatstat", to="European") Y <- as.data.frame(M) Z <- edit(Y, ...) X[] <- transmat(as.matrix(Z), from="European", to="spatstat") return(X) } spatstat/R/replace.ppp.R0000644000176200001440000000354413333543255014666 0ustar liggesusers# # replace.ppp.R # "[<-.ppp" <- function(x, i, j, value) { verifyclass(x, "ppp") verifyclass(value, "ppp") if(missing(i) && missing(j)) return(value) if(missing(i)) { message("The use of argument j in [<-.ppp is deprecated; use argument i") # invoke code below x[j] <- value return(x) } xmf <- markformat(x) vmf <- markformat(value) if(xmf != vmf) { if(xmf == "none") stop("Replacement points are marked, but x is not marked") else if(vmf == "none") stop("Replacement points have no marks, but x is marked") else stop("Format of marks in replacement is incompatible with original") } if(inherits(i, "owin")) { win <- i vok <- inside.owin(value$x, value$y, win) if(!all(vok)) { warning("Replacement points outside the specified window were deleted") value <- value[vok] } # convert to vector index i <- inside.owin(x$x, x$y, win) } if(!is.vector(i)) stop("Unrecognised format for subset index i") # vector index # determine index subset n <- x$n SUB <- seq_len(n)[i] # anything to replace? if(length(SUB) == 0) return(x) # sanity checks if(anyNA(SUB)) stop("Invalid subset: the resulting subscripts include NAs") # exact replacement of this subset? if(value$n == length(SUB)) { x$x[SUB] <- value$x x$y[SUB] <- value$y switch(xmf, none={}, list=, vector={ x$marks[SUB] <- value$marks }, dataframe={ x$marks[SUB,] <- value$marks }) } else x <- superimpose(x[-SUB], value, W=x$window) if(!missing(j)) { warning("The use of argument j in [<-.ppp is deprecated; use argument i") # invoke code above x[j] <- value } return(x) } spatstat/R/markmark.R0000644000176200001440000000415713401202452014246 0ustar liggesusers#' #' markmark.R #' #' Mark-mark scatterplot #' #' $Revision: 1.7 $ $Date: 2018/12/03 10:26:38 $ markmarkscatter <- function(X, rmax, ..., col=NULL, symap=NULL, transform=I, jit=FALSE) { if(!is.ppp(X) && !is.pp3(X) && !is.ppx(X)) stop("X should be a point pattern", call.=FALSE) if(npoints(X) == 0) { warning("Empty point pattern; no plot generated.", call.=FALSE) return(invisible(NULL)) } stopifnot(is.marked(X)) marx <- numeric.columns(marks(X)) nc <- ncol(marx) if(nc == 0) stop("No marks are numeric", call.=FALSE) if(nc > 1) warning("Multiple columns of numeric marks: using the first column", call.=FALSE) marx <- marx[,1,drop=TRUE] transformed <- !missing(transform) marx <- transform(marx) if(jit) marx <- jitter(marx, factor=2.5) if(is.ppp(X) || is.pp3(X)) { cl <- closepairs(X, rmax, what="ijd") } else { D <- pairdist(X) ij <- which(D <= rmax, arr.ind=TRUE) cl <- list(i=ij[,1], j=ij[,2], d=as.numeric(D[ij])) } mi <- marx[cl$i] mj <- marx[cl$j] d <- cl$d ra <- range(marx) Y <- ppp(mi, mj, ra, ra, marks=d, check=FALSE) nY <- npoints(Y) Y <- Y[order(d, decreasing=TRUE)] if(is.null(symap)) { if(is.null(col)) col <- grey(seq(0.9, 0, length.out=128)) if(nY > 0) { rd <- c(0, max(d)) symap <- symbolmap(cols=col, range=rd, size=1, pch=16) } } plot(Y, ..., symap=symap, main="", leg.side="right") axis(1) axis(2) mname <- if(jit && transformed) "Jittered, transformed mark" else if(jit) "Jittered mark" else if(transformed) "Transformed mark" else "Mark" title(xlab=paste(mname, "of first point"), ylab=paste(mname, "of second point")) if(nY >= 2) { mbar2 <- mean(marx)^2 msd2 <- sqrt(2 * var(marx)) hyperbola <- function(x) { mbar2/x } bandline1 <- function(x) { x + msd2 } bandline2 <- function(x) { x - msd2 } curve(hyperbola, from=mbar2/ra[2], to=ra[2], add=TRUE) curve(bandline1, from=ra[1], to=ra[2]-msd2, add=TRUE) curve(bandline2, from=ra[1]+msd2, to=ra[2], add=TRUE) } return(invisible(NULL)) } spatstat/R/nnmark.R0000644000176200001440000000234413333543255013740 0ustar liggesusers# # nnmark.R # # $Revision: 1.7 $ $Date: 2018/02/14 08:00:59 $ nnmark <- local({ nnmark <- function(X, ..., k=1, at=c("pixels", "points")) { stopifnot(is.ppp(X)) stopifnot(is.marked(X)) at <- match.arg(at) mX <- marks(X) switch(at, pixels = { Y <- nnmap(X, k=k, what="which", ...) switch(markformat(X), vector={ result <- eval.im(mX[Y]) }, dataframe = { mX <- as.list(as.data.frame(mX)) result <- solapply(mX, lookedup, indeximage=Y) }, stop("Marks must be a vector or dataframe")) }, points = { Y <- nnwhich(X, k=k) switch(markformat(X), vector={ result <- mX[Y] }, dataframe = { result <- mX[Y,, drop=FALSE] row.names(result) <- NULL }, stop("Marks must be a vector or dataframe")) }) return(result) } lookedup <- function(xvals, indeximage) eval.im(xvals[indeximage]) nnmark }) spatstat/R/transmat.R0000644000176200001440000000364413333543255014307 0ustar liggesusers## transmat.R ## ## transform matrices between different spatial indexing conventions ## ## $Revision: 1.1 $ $Date: 2015/03/04 07:13:10 $ transmat <- local({ euro <- matrix(c(0,-1,1,0), 2, 2) spat <- matrix(c(0,1,1,0), 2, 2) cart <- diag(c(1,1)) dimnames(euro) <- dimnames(spat) <- dimnames(cart) <- list(c("x","y"), c("i","j")) known <- list(spatstat=spat, cartesian=cart, Cartesian=cart, european=euro, European=euro) cmap <- list(x=c(1,0), y=c(0,1), i=c(1,0), j=c(0,1)) maptocoef <- function(s) { e <- parse(text=s)[[1]] eval(eval(substitute(substitute(f, cmap), list(f=e)))) } as.convention <- function(x) { if(is.character(x) && length(x) == 1) { k <- pmatch(x, names(known)) if(is.na(k)) stop(paste("Unrecognised convention", sQuote(x)), call.=FALSE) return(known[[k]]) } if(is.list(x) && is.character(unlist(x))) { xx <- lapply(x, maptocoef) if(all(c("x", "y") %in% names(xx))) z <- rbind(xx$x, xx$y) else if(all(c("i", "j") %in% names(xx))) z <- cbind(xx$x, xx$y) else stop("entries should be named i,j or x,y", call.=FALSE) dimnames(z) <- list(c("x","y"), c("i","j")) if(!(all(z == 0 | z == 1 | z == -1) && all(rowSums(abs(z)) == 1) && all(colSums(abs(z)) == 1))) stop("Illegal convention", call.=FALSE) return(z) } stop("Unrecognised format for spatial convention", call.=FALSE) } transmat <- function(m, from, to) { m <- as.matrix(m) from <- as.convention(from) to <- as.convention(to) conv <- solve(from) %*% to flip <- apply(conv == -1, 2, any) if(flip[["i"]]) m <- m[nrow(m):1, , drop=FALSE] if(flip[["j"]]) m <- m[ , ncol(m):1, drop=FALSE] if(all(diag(conv) == 0)) m <- t(m) return(m) } transmat }) spatstat/R/progress.R0000644000176200001440000002616413333543255014324 0ustar liggesusers# # progress.R # # $Revision: 1.21 $ $Date: 2016/04/25 02:34:40 $ # # progress plots (envelope representations) # dclf.progress <- function(X, ...) mctest.progress(X, ..., exponent=2) mad.progress <- function(X, ...) mctest.progress(X, ..., exponent=Inf) mctest.progress <- local({ smoothquantile <- function(z, alpha) { min(quantile(density(z), 1-alpha), max(z)) } silentmax <- function(z) { if(all(is.nan(z))) return(NaN) z <- z[is.finite(z)] if(length(z) == 0) return(NA) else return(max(z)) } mctest.progress <- function(X, fun=Lest, ..., exponent=1, nrank=1, interpolate=FALSE, alpha, rmin=0) { check.1.real(exponent) explain.ifnot(exponent >= 0) if(missing(fun) && inherits(X, "envelope")) fun <- NULL Z <- envelopeProgressData(X, fun=fun, ..., rmin=rmin, exponent=exponent) R <- Z$R devdata <- Z$devdata devsim <- Z$devsim nsim <- ncol(devsim) # determine 'alpha' and 'nrank' if(missing(alpha)) { if((nrank %% 1) != 0) stop("nrank must be an integer") alpha <- nrank/(nsim + 1) } else { check.1.real(alpha) stopifnot(alpha > 0 && alpha < 1) if(!interpolate) { if(!missing(nrank)) warning("nrank was ignored because alpha was given", call.=FALSE) nrank <- alpha * (nsim + 1) if(abs(nrank - round(nrank)) > 1e-2) stop("alpha should be a multiple of 1/(nsim + 1)", call.=FALSE) nrank <- as.integer(round(nrank)) } } alphastring <- paste(100 * alpha, "%%", sep="") # compute critical values critval <- if(interpolate) apply(devsim, 1, smoothquantile, alpha=alpha) else if(nrank == 1) apply(devsim, 1, silentmax) else apply(devsim, 1, orderstats, k=nrank, decreasing=TRUE) # create fv object fname <- if(is.infinite(exponent)) "mad" else if(exponent == 2) "T" else paste("D[",exponent,"]", sep="") ylab <- if(is.infinite(exponent)) quote(mad(R)) else if(exponent == 2) quote(T(R)) else eval(substitute(quote(D[p](R)), list(p=exponent))) df <- data.frame(R=R, obs=devdata, crit=critval, zero=0) mcname <- if(interpolate) "interpolated Monte Carlo" else "Monte Carlo" p <- fv(df, argu="R", ylab=ylab, valu="obs", fmla = . ~ R, desc = c("Interval endpoint R", "observed value of test statistic %s", paste(mcname, alphastring, "critical value for %s"), "zero"), labl=c("R", "%s(R)", "%s[crit](R)", "0"), unitname = unitname(X), fname = fname) fvnames(p, ".") <- c("obs", "crit", "zero") fvnames(p, ".s") <- c("zero", "crit") p <- hasenvelope(p, Z$envelope) # envelope may be NULL return(p) } mctest.progress }) # Do not call this function. # Performs underlying computations envelopeProgressData <- local({ envelopeProgressData <- function(X, fun=Lest, ..., exponent=1, alternative=c("two.sided", "less", "greater"), leaveout=1, scale=NULL, clamp=FALSE, normalize=FALSE, deflate=FALSE, rmin=0, save.envelope = savefuns || savepatterns, savefuns = FALSE, savepatterns = FALSE) { alternative <- match.arg(alternative) if(!(leaveout %in% 0:2)) stop("Argument leaveout should equal 0, 1 or 2") ## compute or extract simulated functions X <- envelope(X, fun=fun, ..., alternative=alternative, savefuns=TRUE, savepatterns=savepatterns) Y <- attr(X, "simfuns") ## extract values R <- with(X, .x) obs <- with(X, .y) sim <- as.matrix(as.data.frame(Y))[, -1] nsim <- ncol(sim) ## choose function as reference has.theo <- ("theo" %in% names(X)) use.theo <- identical(attr(X, "einfo")$use.theory, TRUE) if(use.theo && !has.theo) warning("No theoretical function available; use.theory ignored") if(use.theo && has.theo) { # theo.used <- TRUE reference <- with(X, theo) leaveout <- 0 } else { # theo.used <- FALSE if(leaveout == 2) { ## use sample mean of simulations only reference <- with(X, mmean) } else { ## use sample mean of simulations *and* observed reference <- (nsim * with(X, mmean) + obs)/(nsim + 1) } } ## restrict range if(rmin > 0) { if(sum(R >= rmin) < 2) stop("rmin is too large for the available range of r values") nskip <- sum(R < rmin) } else nskip <- 0 ## determine rescaling if any if(is.null(scale)) { scaling <- NULL scr <- 1 } else if(is.function(scale)) { scaling <- scale(R) sname <- "scale(r)" ans <- check.nvector(scaling, length(R), things="values of r", fatal=FALSE, vname=sname) if(!ans) stop(attr(ans, "whinge"), call.=FALSE) if(any(bad <- (scaling <= 0))) { ## issue a warning unless this only happens at r=0 if(any(bad[R > 0])) warning(paste("Some values of", sname, "were negative or zero:", "scale was reset to 1 for these values"), call.=FALSE) scaling[bad] <- 1 } scr <- scaling } else stop("Argument scale should be a function") ## compute deviations rawdevDat <- Deviation(obs, reference, leaveout, nsim, sim[,1]) rawdevSim <- Deviation(sim, reference, leaveout, nsim) ## evaluate signed/absolute deviation relevant to alternative ddat <- RelevantDeviation(rawdevDat, alternative, clamp, scaling) dsim <- RelevantDeviation(rawdevSim, alternative, clamp, scaling) ## compute test statistics if(is.infinite(exponent)) { ## MAD devdata <- cummaxskip(ddat, nskip) devsim <- apply(dsim, 2, cummaxskip, nskip=nskip) if(deflate) { devdata <- scr * devdata devsim <- scr * devsim } testname <- "Maximum absolute deviation test" } else { dR <- c(0, diff(R)) if(clamp || (alternative == "two.sided")) { ## deviations are nonnegative devdata <- cumsumskip(dR * ddat^exponent, nskip) devsim <- apply(dR * dsim^exponent, 2, cumsumskip, nskip=nskip) } else { ## sign of deviations should be retained devdata <- cumsumskip(dR * sign(ddat) * abs(ddat)^exponent, nskip=nskip) devsim <- apply(dR * sign(dsim) * abs(dsim)^exponent, 2, cumsumskip, nskip=nskip) } if(normalize) { devdata <- devdata/R devsim <- sweep(devsim, 1, R, "/") } if(deflate) { devdata <- scr * sign(devdata) * abs(devdata)^(1/exponent) devsim <- scr * sign(devsim) * abs(devsim)^(1/exponent) } testname <- if(exponent == 2) "Diggle-Cressie-Loosmore-Ford test" else if(exponent == 1) "Integral absolute deviation test" else paste("Integrated", ordinal(exponent), "Power Deviation test") } result <- list(R=R, devdata=devdata, devsim=devsim, testname=testname, scaleR=scr, clamp=clamp) if(save.envelope) result$envelope <- X return(result) } cumsumskip <- function(x, nskip=0) { if(nskip == 0) cumsum(x) else c(rep(NA, nskip), cumsum(x[-seq_len(nskip)])) } cummaxskip <- function(x, nskip=0) { if(nskip == 0) cummax(x) else c(rep(NA, nskip), cummax(x[-seq_len(nskip)])) } envelopeProgressData }) dg.progress <- function(X, fun=Lest, ..., exponent=2, nsim=19, nsimsub=nsim-1, nrank=1, alpha, leaveout=1, interpolate=FALSE, rmin=0, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE) { env.here <- sys.frame(sys.nframe()) if(!missing(nsimsub) && !relatively.prime(nsim, nsimsub)) stop("nsim and nsimsub must be relatively prime") ## determine 'alpha' and 'nrank' if(missing(alpha)) { if((nrank %% 1) != 0) stop("nrank must be an integer") alpha <- nrank/(nsim + 1) } else { check.1.real(alpha) stopifnot(alpha > 0 && alpha < 1) if(!interpolate) { if(!missing(nrank)) warning("nrank was ignored because alpha was given", call.=FALSE) nrank <- alpha * (nsim + 1) if(abs(nrank - round(nrank)) > 1e-2) stop("alpha should be a multiple of 1/(nsim + 1)", call.=FALSE) nrank <- as.integer(round(nrank)) } } if(verbose) cat("Computing first-level test data...") ## generate or extract simulated patterns and functions E <- envelope(X, fun=fun, ..., nsim=nsim, savepatterns=TRUE, savefuns=TRUE, verbose=FALSE, envir.simul=env.here) ## get progress data PD <- envelopeProgressData(E, fun=fun, ..., rmin=rmin, nsim=nsim, exponent=exponent, leaveout=leaveout, verbose=FALSE) ## get first level MC test significance trace T1 <- mctest.sigtrace(E, fun=fun, nsim=nsim, exponent=exponent, leaveout=leaveout, interpolate=interpolate, rmin=rmin, confint=FALSE, verbose=FALSE, ...) R <- T1$R phat <- T1$pest if(verbose) { cat("Done.\nComputing second-level data... ") state <- list() } ## second level traces simpat <- attr(E, "simpatterns") phat2 <- matrix(, length(R), nsim) for(j in seq_len(nsim)) { simj <- simpat[[j]] sigj <- mctest.sigtrace(simj, fun=fun, nsim=nsimsub, exponent=exponent, interpolate=interpolate, leaveout=leaveout, rmin=rmin, confint=FALSE, verbose=FALSE, ...) phat2[,j] <- sigj$pest if(verbose) state <- progressreport(j, nsim, state=state) } if(verbose) cat("Done.\n") ## Dao-Genton procedure dgcritrank <- 1 + rowSums(phat > phat2) dgcritrank <- pmin(dgcritrank, nsim) devsim.sort <- t(apply(PD$devsim, 1, sort, decreasing=TRUE, na.last=TRUE)) ii <- cbind(seq_along(dgcritrank), dgcritrank) devcrit <- devsim.sort[ii] devdata <- PD$devdata ## create fv object fname <- if(is.infinite(exponent)) "mad" else if(exponent == 2) "T" else paste("D[",exponent,"]", sep="") ylab <- if(is.infinite(exponent)) quote(mad(R)) else if(exponent == 2) quote(T(R)) else eval(substitute(quote(D[p](R)), list(p=exponent))) df <- data.frame(R=R, obs=devdata, crit=devcrit, zero=0) mcname <- if(interpolate) "interpolated Monte Carlo" else "Monte Carlo" p <- fv(df, argu="R", ylab=ylab, valu="obs", fmla = . ~ R, desc = c("Interval endpoint R", "observed value of test statistic %s", paste(mcname, paste0(100 * alpha, "%%"), "critical value for %s"), "zero"), labl=c("R", "%s(R)", "%s[crit](R)", "0"), unitname = unitname(X), fname = fname) fvnames(p, ".") <- c("obs", "crit", "zero") fvnames(p, ".s") <- c("zero", "crit") if(savefuns || savepatterns) p <- hasenvelope(p, E) return(p) } spatstat/R/ppx.R0000644000176200001440000003713213515012141013247 0ustar liggesusers# # ppx.R # # class of general point patterns in any dimension # # $Revision: 1.63 $ $Date: 2019/07/21 06:33:14 $ # ppx <- local({ ctype.table <- c("spatial", "temporal", "local", "mark") ctype.real <- c(TRUE, TRUE, FALSE, FALSE) ppx <- function(data, domain=NULL, coord.type=NULL, simplify=FALSE) { data <- as.hyperframe(data) # columns suitable for spatial coordinates suitable <- with(unclass(data), vtype == "dfcolumn" & (vclass == "numeric" | vclass == "integer")) if(is.null(coord.type)) { # assume all suitable columns of data are spatial coordinates # and all other columns are marks. ctype <- ifelse(suitable, "spatial", "mark") } else { stopifnot(is.character(coord.type)) stopifnot(length(coord.type) == ncol(data)) ctypeid <- pmatch(coord.type, ctype.table, duplicates.ok=TRUE) # validate if(any(uhoh <- is.na(ctypeid))) stop(paste("Unrecognised coordinate", ngettext(sum(uhoh), "type", "types"), commasep(sQuote(coord.type[uhoh])))) if(any(uhoh <- (!suitable & ctype.real[ctypeid]))) { nuh <- sum(uhoh) stop(paste(ngettext(nuh, "Coordinate", "Coordinates"), commasep(sQuote(names(data)[uhoh])), ngettext(nuh, "does not", "do not"), "contain real numbers")) } ctype <- ctype.table[ctypeid] } ctype <- factor(ctype, levels=ctype.table) # if(simplify && all(ctype == "spatial")) { # attempt to reduce to ppp or pp3 d <- length(ctype) if(d == 2) { ow <- try(as.owin(domain), silent=TRUE) if(!inherits(ow, "try-error")) { X <- try(as.ppp(as.data.frame(data), W=ow)) if(!inherits(X, "try-error")) return(X) } } else if(d == 3) { bx <- try(as.box3(domain), silent=TRUE) if(!inherits(bx, "try-error")) { m <- as.matrix(as.data.frame(data)) X <- try(pp3(m[,1], m[,2], m[,3], bx)) if(!inherits(X, "try-error")) return(X) } } } out <- list(data=data, ctype=ctype, domain=domain) class(out) <- "ppx" return(out) } ppx }) is.ppx <- function(x) { inherits(x, "ppx") } nobjects.ppx <- npoints.ppx <- function(x) { nrow(x$data) } print.ppx <- function(x, ...) { cat("Multidimensional point pattern\n") sd <- summary(x$data) np <- sd$ncases nama <- sd$col.names cat(paste(np, ngettext(np, "point", "points"), "\n")) if(any(iscoord <- (x$ctype == "spatial"))) cat(paste(sum(iscoord), "-dimensional space coordinates ", paren(paste(nama[iscoord], collapse=",")), "\n", sep="")) if(any(istime <- (x$ctype == "temporal"))) cat(paste(sum(istime), "-dimensional time coordinates ", paren(paste(nama[istime], collapse=",")), "\n", sep="")) if(any(islocal <- (x$ctype == "local"))) cat(paste(sum(islocal), ngettext(sum(islocal), "column", "columns"), "of local coordinates:", commasep(sQuote(nama[islocal])), "\n")) if(any(ismark <- (x$ctype == "mark"))) cat(paste(sum(ismark), ngettext(sum(ismark), "column", "columns"), "of marks:", commasep(sQuote(nama[ismark])), "\n")) if(!is.null(x$domain)) { cat("Domain:\n\t") print(x$domain) } invisible(NULL) } summary.ppx <- function(object, ...) { object } plot.ppx <- function(x, ...) { xname <- short.deparse(substitute(x)) coo <- coords(x, local=FALSE) dom <- x$domain m <- ncol(coo) if(m == 1) { coo <- coo[,1] ran <- diff(range(coo)) ylim <- c(-1,1) * ran/20 do.call(plot.default, resolve.defaults(list(coo, numeric(length(coo))), list(...), list(asp=1, ylim=ylim, axes=FALSE, xlab="", ylab=""))) axis(1, pos=ylim[1]) } else if(m == 2) { if(is.null(dom)) { # plot x, y coordinates only nama <- names(coo) do.call.matched(plot.default, resolve.defaults(list(x=coo[,1], y=coo[,2], asp=1), list(...), list(main=xname), list(xlab=nama[1], ylab=nama[2]))) } else { add <- resolve.defaults(list(...), list(add=FALSE))$add if(!add) { # plot domain, whatever it is do.call(plot, resolve.defaults(list(dom), list(...), list(main=xname))) } # convert to ppp x2 <- ppp(coo[,1], coo[,2], window=as.owin(dom), marks=as.data.frame(marks(x)), check=FALSE) # invoke plot.ppp return(do.call(plot, resolve.defaults(list(x2), list(add=TRUE), list(...)))) } } else if(m == 3) { # convert to pp3 if(is.null(dom)) dom <- box3(range(coo[,1]), range(coo[,2]), range(coo[,3])) x3 <- pp3(coo[,1], coo[,2], coo[,3], dom) # invoke plot.pp3 nama <- names(coo) do.call(plot, resolve.defaults(list(x3), list(...), list(main=xname), list(xlab=nama[1], ylab=nama[2], zlab=nama[3]))) } else stop(paste("Don't know how to plot a general point pattern in", ncol(coo), "dimensions")) return(invisible(NULL)) } "[.ppx" <- function (x, i, drop=FALSE, ...) { da <- x$data dom <- x$domain if(!missing(i)) { if(inherits(i, c("boxx", "box3"))) { dom <- i i <- inside.boxx(da, w=i) } da <- da[i, , drop=FALSE] } out <- list(data=da, ctype=x$ctype, domain=dom) class(out) <- "ppx" if(drop) { # remove unused factor levels mo <- marks(out) switch(markformat(mo), none = { }, vector = { if(is.factor(mo)) marks(out) <- factor(mo) }, dataframe = { isfac <- sapply(mo, is.factor) if(any(isfac)) mo[, isfac] <- lapply(mo[, isfac], factor) marks(out) <- mo }, hyperframe = { lmo <- as.list(mo) isfac <- sapply(lmo, is.factor) if(any(isfac)) mo[, isfac] <- as.hyperframe(lapply(lmo[isfac], factor)) marks(out) <- mo }) } return(out) } domain <- function(X, ...) { UseMethod("domain") } domain.ppx <- function(X, ...) { X$domain } coords <- function(x, ...) { UseMethod("coords") } coords.ppx <- function(x, ..., spatial=TRUE, temporal=TRUE, local=TRUE) { ctype <- x$ctype chosen <- (ctype == "spatial" & spatial) | (ctype == "temporal" & temporal) | (ctype == "local" & local) as.data.frame(x$data[, chosen, drop=FALSE]) } coords.ppp <- function(x, ...) { data.frame(x=x$x,y=x$y) } "coords<-" <- function(x, ..., value) { UseMethod("coords<-") } "coords<-.ppp" <- function(x, ..., value) { win <- x$window if(is.null(value)) { # empty pattern return(ppp(window=win)) } value <- as.data.frame(value) if(ncol(value) != 2) stop("Expecting a 2-column matrix or data frame, or two vectors") result <- as.ppp(value, win) marks(result) <- marks(x) return(result) } "coords<-.ppx" <- function(x, ..., spatial=TRUE, temporal=TRUE, local=TRUE, value) { ctype <- x$ctype chosen <- (ctype == "spatial" & spatial) | (ctype == "temporal" & temporal) | (ctype == "local" & local) x$data[, chosen] <- value return(x) } as.hyperframe.ppx <- function(x, ...) { x$data } as.data.frame.ppx <- function(x, ...) { as.data.frame(x$data, ...) } as.matrix.ppx <- function(x, ...) { as.matrix(as.data.frame(x, ...)) } marks.ppx <- function(x, ..., drop=TRUE) { ctype <- x$ctype chosen <- (ctype == "mark") if(!any(chosen)) return(NULL) x$data[, chosen, drop=drop] } "marks<-.ppx" <- function(x, ..., value) { ctype <- x$ctype retain <- (ctype != "mark") coorddata <- x$data[, retain, drop=FALSE] if(is.null(value)) { newdata <- coorddata newctype <- ctype[retain] } else { if(is.matrix(value) && nrow(value) == nrow(x$data)) { # assume matrix is to be treated as data frame value <- as.data.frame(value) } if(!is.data.frame(value) && !is.hyperframe(value)) value <- hyperframe(marks=value) if(is.hyperframe(value) || is.hyperframe(coorddata)) { value <- as.hyperframe(value) coorddata <- as.hyperframe(coorddata) } if(ncol(value) == 0) { newdata <- coorddata newctype <- ctype[retain] } else { if(nrow(coorddata) == 0) value <- value[integer(0), , drop=FALSE] newdata <- cbind(coorddata, value) newctype <- factor(c(as.character(ctype[retain]), rep.int("mark", ncol(value))), levels=levels(ctype)) } } out <- list(data=newdata, ctype=newctype, domain=x$domain) class(out) <- class(x) return(out) } unmark.ppx <- function(X) { marks(X) <- NULL return(X) } markformat.ppx <- function(x) { mf <- x$markformat if(is.null(mf)) mf <- markformat(marks(x)) return(mf) } boxx <- function(..., unitname=NULL) { if(length(list(...)) == 0) stop("No data") ranges <- data.frame(...) nama <- names(list(...)) if(is.null(nama) || !all(nzchar(nama))) names(ranges) <- paste("x", 1:ncol(ranges),sep="") if(nrow(ranges) != 2) stop("Data should be vectors of length 2") if(any(unlist(lapply(ranges, diff)) <= 0)) stop("Illegal range: Second element <= first element") out <- list(ranges=ranges, units=as.unitname(unitname)) class(out) <- "boxx" return(out) } as.boxx <- function(..., warn.owin = TRUE) { a <- list(...) n <- length(a) if (n == 0) stop("No arguments given") if (n == 1) { a <- a[[1]] if (inherits(a, "boxx")) return(a) if (inherits(a, "box3")) return(boxx(a$xrange, a$yrange, a$zrange, unitname = as.unitname(a$units))) if (inherits(a, "owin")) { if (!is.rectangle(a) && warn.owin) warning("The owin object does not appear to be rectangular - the bounding box is used!") return(boxx(a$xrange, a$yrange, unitname = as.unitname(a$units))) } if (is.numeric(a)) { if ((length(a)%%2) == 0) return(boxx(split(a, rep(1:(length(a)/2), each = 2)))) stop(paste("Don't know how to interpret", length(a), "numbers as a box")) } if (!is.list(a)) stop("Don't know how to interpret data as a box") } return(do.call(boxx, a)) } print.boxx <- function(x, ...) { m <- ncol(x$ranges) cat(paste(m, "-dimensional box:\n", sep="")) bracket <- function(z) paste("[", paste(signif(z, 5), collapse=", "), "]", sep="") v <- paste(unlist(lapply(x$ranges, bracket)), collapse=" x ") s <- summary(unitname(x)) cat(paste(v, s$plural, s$explain, "\n")) invisible(NULL) } unitname.boxx <- function(x) { as.unitname(x$units) } "unitname<-.boxx" <- function(x, value) { x$units <- as.unitname(value) return(x) } unitname.ppx <- function(x) { unitname(x$domain) } "unitname<-.ppx" <- function(x, value) { d <- x$domain unitname(d) <- value x$domain <- d return(x) } as.owin.boxx <- function(W, ..., fatal=TRUE) { ra <- W$ranges if(length(ra) == 2) return(owin(ra[[1]], ra[[2]])) if(fatal) stop(paste("Cannot interpret box of dimension", length(ra), "as a window")) return(NULL) } sidelengths.boxx <- function(x) { stopifnot(inherits(x, "boxx")) y <- unlist(lapply(x$ranges, diff)) return(y) } volume.boxx <- function(x) { prod(sidelengths(x)) } diameter.boxx <- function(x) { d <- sqrt(sum(sidelengths(x)^2)) return(d) } shortside.boxx <- function(x) { return(min(sidelengths(x))) } eroded.volumes.boxx <- local({ eroded.volumes.boxx <- function(x, r) { len <- sidelengths(x) ero <- sapply(as.list(len), erode1side, r=r) apply(ero, 1, prod) } erode1side <- function(z, r) { pmax.int(0, z - 2 * r)} eroded.volumes.boxx }) runifpointx <- function(n, domain, nsim=1, drop=TRUE) { check.1.integer(n) check.1.integer(nsim) stopifnot(inherits(domain, "boxx")) ra <- domain$ranges d <- length(ra) result <- vector(mode="list", length=nsim) for(i in 1:nsim) { if(n == 0) { coo <- matrix(numeric(0), nrow=0, ncol=d) } else { coo <- mapply(runif, n=rep(n, d), min=ra[1,], max=ra[2,]) if(!is.matrix(coo)) coo <- matrix(coo, ncol=d) } colnames(coo) <- colnames(ra) df <- as.data.frame(coo) result[[i]] <- ppx(df, domain, coord.type=rep("s", d)) } if(nsim == 1 && drop) return(result[[1]]) result <- as.anylist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } rpoisppx <- function(lambda, domain, nsim=1, drop=TRUE) { stopifnot(inherits(domain, "boxx")) stopifnot(is.numeric(lambda) && length(lambda) == 1 && lambda >= 0) n <- rpois(nsim, lambda * volume.boxx(domain)) result <- vector(mode="list", length=nsim) for(i in 1:nsim) result[[i]] <- runifpointx(n[i], domain) if(nsim == 1 && drop) return(result[[1]]) result <- as.anylist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } unique.ppx <- function(x, ..., warn=FALSE) { dup <- duplicated(x, ...) if(!any(dup)) return(x) if(warn) warning(paste(sum(dup), "duplicated points were removed"), call.=FALSE) y <- x[!dup] return(y) } duplicated.ppx <- function(x, ...) { dup <- duplicated(as.data.frame(x), ...) return(dup) } anyDuplicated.ppx <- function(x, ...) { anyDuplicated(as.data.frame(x), ...) } multiplicity.ppx <- function(x) { mul <- multiplicity(as.data.frame(x)) return(mul) } intensity.ppx <- function(X, ...) { if(!is.multitype(X)) { n <- npoints(X) } else { mks <- marks(X) n <- as.vector(table(mks)) names(n) <- levels(mks) } v <- volume(domain(X)) return(n/v) } grow.boxx <- function(W, left, right = left){ W <- as.boxx(W) ra <- W$ranges d <- length(ra) if(any(left < 0) || any(right < 0)) stop("values of left and right margin must be nonnegative.") if(length(left)==1) left <- rep(left, d) if(length(right)==1) right <- rep(right, d) if(length(left)!=d || length(right)!=d){ stop("left and right margin must be either of length 1 or the dimension of the boxx.") } W$ranges[1,] <- ra[1,]-left W$ranges[2,] <- ra[2,]+right return(W) } inside.boxx <- function(..., w = NULL){ if(is.null(w)) stop("Please provide a boxx using the named argument w.") w <- as.boxx(w) dat <- list(...) if(length(dat)==1){ dat1 <- dat[[1]] if(inherits(dat1, "ppx")) dat <- coords(dat1) if(inherits(dat1, "hyperframe")) dat <- as.data.frame(dat1) } ra <- w$ranges if(length(ra)!=length(dat)) stop("Mismatch between dimension of boxx and number of coordinate vectors.") ## Check coord. vectors have equal length n <- length(dat[[1]]) if(any(lengths(dat)!=n)) stop("Coordinate vectors have unequal length.") index <- rep(TRUE, n) for(i in seq_along(ra)){ index <- index & inside.range(dat[[i]], ra[[i]]) } return(index) } spatdim <- function(X, intrinsic=FALSE) { if(intrinsic) { if(inherits(X, c("lpp", "linnet", "linim", "linfun", "lintess"))) return(1L) if(inherits(X, c("s2pp", "s2", "s2region"))) return(2L) } if(is.sob(X)) 2L else if(inherits(X, "box3")) 3L else if(inherits(X, "boxx")) length(X$ranges) else if(is.ppx(X)) as.integer(sum(X$ctype == "spatial")) else NA_integer_ } spatstat/R/fardist.R0000644000176200001440000000330213333543255014101 0ustar liggesusers## ## fardist.R ## ## Farthest distance to boundary ## ## $Revision: 1.11 $ $Date: 2017/06/05 10:31:58 $ fardist <- function(X, ...) { UseMethod("fardist") } fardist.owin <- function(X, ..., squared=FALSE) { verifyclass(X, "owin") M <- as.mask(X, ...) V <- if(is.mask(X)) vertices(M) else vertices(X) nx <- dim(M)[2L] ny <- dim(M)[1L] x0 <- M$xcol[1L] y0 <- M$yrow[1L] xstep <- M$xstep ystep <- M$ystep if(squared) { z <- .C("fardist2grid", nx = as.integer(nx), x0 = as.double(x0), xstep = as.double(xstep), ny = as.integer(ny), y0 = as.double(y0), ystep = as.double(ystep), np = as.integer(length(V$x)), xp = as.double(V$x), yp = as.double(V$y), dfar = as.double(numeric(nx * ny)), PACKAGE = "spatstat") } else { z <- .C("fardistgrid", nx = as.integer(nx), x0 = as.double(x0), xstep = as.double(xstep), ny = as.integer(ny), y0 = as.double(y0), ystep = as.double(ystep), np = as.integer(length(V$x)), xp = as.double(V$x), yp = as.double(V$y), dfar = as.double(numeric(nx * ny)), PACKAGE = "spatstat") } out <- im(z$dfar, xcol=M$xcol, yrow=M$yrow, xrange=M$xrange, yrange=M$yrange, unitname=unitname(M)) if(!is.rectangle(X)) out <- out[X, drop=FALSE] return(out) } fardist.ppp <- function(X, ..., squared=FALSE) { verifyclass(X, "ppp") V <- vertices(Window(X)) D2 <- crossdist(X$x, X$y, V$x, V$y, squared=TRUE) D2max <- apply(D2, 1L, max) if(squared) return(D2max) else return(sqrt(D2max)) } spatstat/R/quadrattest.R0000644000176200001440000004471413532404142015012 0ustar liggesusers# # quadrattest.R # # $Revision: 1.62 $ $Date: 2019/08/31 05:07:15 $ # quadrat.test <- function(X, ...) { UseMethod("quadrat.test") } quadrat.test.ppp <- function(X, nx=5, ny=nx, alternative = c("two.sided", "regular", "clustered"), method = c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, lambda=NULL, df.est=NULL, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) { Xname <- short.deparse(substitute(X)) method <- match.arg(method) alternative <- match.arg(alternative) do.call(quadrat.testEngine, resolve.defaults(list(X, nx=nx, ny=ny, alternative=alternative, method=method, conditional=conditional, CR=CR, fit=lambda, df.est=df.est, xbreaks=xbreaks, ybreaks=ybreaks, tess=tess, nsim=nsim), list(...), list(Xname=Xname, fitname="CSR"))) } quadrat.test.splitppp <- function(X, ..., df=NULL, df.est=NULL, Xname=NULL) { if(is.null(Xname)) Xname <- short.deparse(substitute(X)) pool.quadrattest(lapply(X, quadrat.test.ppp, ...), df=df, df.est=df.est, Xname=Xname) } quadrat.test.ppm <- function(X, nx=5, ny=nx, alternative = c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, df.est=NULL, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) { fitname <- short.deparse(substitute(X)) dataname <- paste("data from", fitname) method <- match.arg(method) alternative <- match.arg(alternative) if(!is.poisson.ppm(X)) stop("Test is only defined for Poisson point process models") if(is.marked(X)) stop("Sorry, not yet implemented for marked point process models") do.call(quadrat.testEngine, resolve.defaults(list(data.ppm(X), nx=nx, ny=ny, alternative=alternative, method=method, conditional=conditional, CR=CR, xbreaks=xbreaks, ybreaks=ybreaks, tess=tess, nsim=nsim, fit=X, df.est=df.est), list(...), list(Xname=dataname, fitname=fitname))) } quadrat.test.quadratcount <- function(X, alternative = c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, lambda=NULL, df.est=NULL, ..., nsim=1999) { trap.extra.arguments(...) method <- match.arg(method) alternative <- match.arg(alternative) quadrat.testEngine(Xcount=X, alternative=alternative, fit=lambda, df.est=df.est, method=method, conditional=conditional, CR=CR, nsim=nsim) } quadrat.testEngine <- function(X, nx, ny, alternative = c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, ..., nsim=1999, Xcount=NULL, xbreaks=NULL, ybreaks=NULL, tess=NULL, fit=NULL, df.est=NULL, Xname=NULL, fitname=NULL) { trap.extra.arguments(...) method <- match.arg(method) alternative <- match.arg(alternative) if(method == "MonteCarlo") { check.1.real(nsim) explain.ifnot(nsim > 0) } if(!is.null(df.est)) check.1.integer(df.est) if(is.null(Xcount)) Xcount <- quadratcount(X, nx=nx, ny=ny, xbreaks=xbreaks, ybreaks=ybreaks, tess=tess) tess <- attr(Xcount, "tess") ## determine expected values under model normalised <- FALSE if(is.null(fit)) { nullname <- "CSR" if(tess$type == "rect") areas <- outer(diff(tess$xgrid), diff(tess$ygrid), "*") else areas <- unlist(lapply(tiles(tess), area)) fitmeans <- sum(Xcount) * areas/sum(areas) normalised <- TRUE df <- switch(method, Chisq = length(fitmeans) - 1, MonteCarlo = NULL) } else if(is.im(fit) || inherits(fit, "funxy")) { nullname <- "Poisson process with given intensity" fit <- as.im(fit, W=Window(tess)) areas <- integral(fit, tess) fitmeans <- sum(Xcount) * areas/sum(areas) normalised <- TRUE df <- switch(method, Chisq = length(fitmeans) - df.est %orifnull% 1, MonteCarlo = NULL) } else { if(!is.ppm(fit)) stop("fit should be a ppm object") if(!is.poisson.ppm(fit)) stop("Quadrat test only supported for Poisson point process models") if(is.marked(fit)) stop("Sorry, not yet implemented for marked point process models") nullname <- paste("fitted Poisson model", sQuote(fitname)) Q <- quad.ppm(fit, drop=TRUE) ww <- w.quad(Q) lambda <- fitted(fit, drop=TRUE) masses <- lambda * ww # sum weights of quadrature points in each tile if(tess$type == "rect") { xx <- x.quad(Q) yy <- y.quad(Q) xbreaks <- tess$xgrid ybreaks <- tess$ygrid fitmeans <- rectquadrat.countEngine(xx, yy, xbreaks, ybreaks, weights=masses) fitmeans <- as.vector(t(fitmeans)) } else { V <- tileindex(as.ppp(Q), Z=tess) fitmeans <- tapplysum(masses, list(tile=V)) } switch(method, Chisq = { df <- length(fitmeans) - df.est %orifnull% length(coef(fit)) if(df < 1) stop(paste("Not enough quadrats: degrees of freedom df =", df)) }, MonteCarlo = { df <- NA }) } ## assemble data for test OBS <- as.vector(t(as.table(Xcount))) EXP <- as.vector(fitmeans) if(!normalised) EXP <- EXP * sum(OBS)/sum(EXP) ## label it switch(method, Chisq = { if(CR == 1) { testname <- "Chi-squared test" reference <- statname <- NULL } else { testname <- CressieReadTestName(CR) statname <- paste("Test statistic:", CressieReadName(CR)) reference <- "(p-value obtained from chi-squared distribution)" } }, MonteCarlo = { testname <- paste(if(conditional) "Conditional" else "Unconditional", "Monte Carlo test") statname <- paste("Test statistic:", CressieReadName(CR)) reference <- NULL }) testblurb <- paste(testname, "of", nullname, "using quadrat counts") testblurb <- c(testblurb, statname, reference) #' perform test result <- X2testEngine(OBS, EXP, method=method, df=df, nsim=nsim, conditional=conditional, CR=CR, alternative=alternative, testname=testblurb, dataname=Xname) class(result) <- c("quadrattest", class(result)) attr(result, "quadratcount") <- Xcount return(result) } CressieReadStatistic <- function(OBS, EXP, lambda=1, normalise=FALSE, named=TRUE) { if(normalise) EXP <- sum(OBS) * EXP/sum(EXP) y <- if(lambda == 1) sum((OBS - EXP)^2/EXP) else if(lambda == 0) 2 * sum(ifelse(OBS > 0, OBS * log(OBS/EXP), 0)) else if(lambda == -1) 2 * sum(EXP * log(EXP/OBS)) else (2/(lambda * (lambda + 1))) * sum(ifelse(OBS > 0, OBS * ((OBS/EXP)^lambda - 1), 0)) names(y) <- if(named) CressieReadSymbol(lambda) else NULL return(y) } CressieReadSymbol <- function(lambda) { if(lambda == 1) "X2" else if(lambda == 0) "G2" else if(lambda == -1/2) "T2" else if(lambda == -1) "GM2" else if(lambda == -2) "NM2" else "CR" } CressieReadName <- function(lambda) { if(lambda == 1) "Pearson X2 statistic" else if(lambda == 0) "likelihood ratio test statistic G2" else if(lambda == -1/2) "Freeman-Tukey statistic T2" else if(lambda == -1) "modified likelihood ratio test statistic GM2" else if(lambda == -2) "Neyman modified X2 statistic NM2" else paste("Cressie-Read statistic", paren(paste("lambda =", if(abs(lambda - 2/3) < 1e-7) "2/3" else lambda) ) ) } CressieReadTestName <- function(lambda) { if(lambda == 1) "Chi-squared test" else if(lambda == 0) "Likelihood ratio test" else if(lambda == -1/2) "Freeman-Tukey test" else if(lambda == -1) "Modified likelihood ratio test" else if(lambda == -2) "Neyman modified chi-squared test" else paste("Cressie-Read power divergence test", paren(paste("lambda =", if(abs(lambda - 2/3) < 1e-7) "2/3" else lambda) ) ) } X2testEngine <- function(OBS, EXP, ..., method=c("Chisq", "MonteCarlo"), CR=1, df=NULL, nsim=NULL, conditional, alternative, testname, dataname) { method <- match.arg(method) if(method == "Chisq" && any(EXP < 5)) warning(paste("Some expected counts are small;", "chi^2 approximation may be inaccurate"), call.=FALSE) X2 <- CressieReadStatistic(OBS, EXP, CR) # conduct test switch(method, Chisq = { if(!is.null(df)) names(df) <- "df" pup <- pchisq(X2, df, lower.tail=FALSE) plo <- pchisq(X2, df, lower.tail=TRUE) PVAL <- switch(alternative, regular = plo, clustered = pup, two.sided = 2 * min(pup, plo)) }, MonteCarlo = { nsim <- as.integer(nsim) if(conditional) { npts <- sum(OBS) p <- EXP/sum(EXP) SIM <- rmultinom(n=nsim,size=npts,prob=p) } else { ne <- length(EXP) SIM <- matrix(rpois(nsim*ne,EXP),nrow=ne) } simstats <- apply(SIM, 2, CressieReadStatistic, EXP=EXP, lambda=CR, normalise=!conditional) if(anyDuplicated(simstats)) simstats <- jitter(simstats) phi <- (1 + sum(simstats >= X2))/(1+nsim) plo <- (1 + sum(simstats <= X2))/(1+nsim) PVAL <- switch(alternative, clustered = phi, regular = plo, two.sided = min(1, 2 * min(phi,plo))) }) result <- structure(list(statistic = X2, parameter = df, p.value = PVAL, method = testname, data.name = dataname, alternative = alternative, observed = OBS, expected = EXP, residuals = (OBS - EXP)/sqrt(EXP), CR = CR, method.key = method), class = "htest") return(result) } print.quadrattest <- function(x, ...) { NextMethod("print") single <- is.atomicQtest(x) if(!single) splat("Pooled test") if(waxlyrical('gory')) { if(single) { cat("Quadrats: ") } else { splat("Quadrats of component tests:") } do.call(print, resolve.defaults(list(x=as.tess(x)), list(...), list(brief=TRUE))) } return(invisible(NULL)) } plot.quadrattest <- local({ plot.quadrattest <- function(x, ..., textargs=list()) { xname <- short.deparse(substitute(x)) if(!is.atomicQtest(x)) { # pooled test - plot the original tests tests <- extractAtomicQtests(x) do.call(plot, resolve.defaults(list(x=tests), list(...), list(main=xname))) return(invisible(NULL)) } Xcount <- attr(x, "quadratcount") # plot tessellation tess <- as.tess(Xcount) do.call(plot.tess, resolve.defaults(list(tess), list(...), list(main=xname))) # compute locations for text til <- tiles(tess) ok <- sapply(til, haspositivearea) incircles <- lapply(til[ok], incircle) x0 <- sapply(incircles, getElement, name="x") y0 <- sapply(incircles, getElement, name="y") ra <- sapply(incircles, getElement, name="r") # plot observed counts cos30 <- sqrt(2)/2 sin30 <- 1/2 f <- 0.4 dotext(-f * cos30, f * sin30, as.vector(t(as.table(Xcount)))[ok], x0, y0, ra, textargs, adj=c(1,0), ...) # plot expected counts dotext(f * cos30, f * sin30, round(x$expected,1)[ok], x0, y0, ra, textargs, adj=c(0,0), ...) # plot Pearson residuals dotext(0, -f, signif(x$residuals,2)[ok], x0, y0, ra, textargs, ...) return(invisible(NULL)) } dotext <- function(dx, dy, values, x0, y0, ra, textargs, ...) { do.call.matched(text.default, resolve.defaults(list(x=x0 + dx * ra, y = y0 + dy * ra), list(labels=paste(as.vector(values))), textargs, list(...)), funargs=graphicsPars("text")) } haspositivearea <- function(x) { !is.null(x) && area(x) > 0 } plot.quadrattest }) ######## pooling multiple quadrat tests into a quadrat test pool.quadrattest <- function(..., df=NULL, df.est=NULL, nsim=1999, Xname=NULL, CR=NULL) { argh <- list(...) if(!is.null(df) + !is.null(df.est)) stop("Arguments df and df.est are incompatible") if(all(unlist(lapply(argh, inherits, what="quadrattest")))) { # Each argument is a quadrattest object tests <- argh } else if(length(argh) == 1 && is.list(arg1 <- argh[[1]]) && all(unlist(lapply(arg1, inherits, "quadrattest")))) { # There is just one argument, which is a list of quadrattests tests <- arg1 } else stop("Each entry in the list must be a quadrat test") # data from all cells in all tests OBS <- unlist(lapply(tests, getElement, name="observed")) EXP <- unlist(lapply(tests, getElement, name="expected")) # RES <- unlist(lapply(tests, getElement, name="residuals")) # STA <- unlist(lapply(tests, getElement, name="statistic")) # information about each test Mkey <- unlist(lapply(tests, getElement, name="method.key")) Testname <- lapply(tests, getElement, name="method") Alternative <- unlist(lapply(tests, getElement, name="alternative")) Conditional <- unlist(lapply(tests, getElement, name="conditional")) # name of data if(is.null(Xname)) { Nam <- unlist(lapply(tests, getElement, name="data.name")) Xname <- commasep(sQuote(Nam)) } # name of test testname <- unique(Testname) method.key <- unique(Mkey) if(length(testname) > 1) stop(paste("Cannot combine different types of tests:", commasep(sQuote(method.key)))) testname <- testname[[1]] # alternative hypothesis alternative <- unique(Alternative) if(length(alternative) > 1) stop(paste("Cannot combine tests with different alternatives:", commasep(sQuote(alternative)))) # conditional tests conditional <- any(Conditional) if(conditional) stop("Sorry, not implemented for conditional tests") # Cressie-Read exponent if(is.null(CR)) { CR <- unlist(lapply(tests, getElement, name="CR")) CR <- unique(CR) if(length(CR) > 1) { warning("Tests used different values of CR; assuming CR=1") CR <- 1 } } if(method.key == "Chisq") { # determine degrees of freedom if(is.null(df)) { if(!is.null(df.est)) { # total number of observations minus number of fitted parameters df <- length(OBS) - df.est } else { # total degrees of freedom of tests # implicitly assumes independence of tests PAR <- unlist(lapply(tests, getElement, name="parameter")) df <- sum(PAR) } } # validate df if(df < 1) stop(paste("Degrees of freedom = ", df)) names(df) <- "df" } # perform test result <- X2testEngine(OBS, EXP, method=method.key, df=df, nsim=nsim, conditional=conditional, CR=CR, alternative=alternative, testname=testname, dataname=Xname) # add info class(result) <- c("quadrattest", class(result)) attr(result, "tests") <- as.solist(tests) # there is no quadratcount attribute return(result) } is.atomicQtest <- function(x) { inherits(x, "quadrattest") && is.null(attr(x, "tests")) } extractAtomicQtests <- function(x) { if(is.atomicQtest(x)) return(list(x)) stopifnot(inherits(x, "quadrattest")) tests <- attr(x, "tests") y <- lapply(tests, extractAtomicQtests) z <- do.call(c, y) return(as.solist(z)) } as.tess.quadrattest <- function(X) { if(is.atomicQtest(X)) { Y <- attr(X, "quadratcount") return(as.tess(Y)) } tests <- extractAtomicQtests(X) return(as.solist(lapply(tests, as.tess.quadrattest))) } as.owin.quadrattest <- function(W, ..., fatal=TRUE) { if(is.atomicQtest(W)) return(as.owin(as.tess(W), ..., fatal=fatal)) gezeur <- paste("Cannot convert quadrat test result to a window;", "it contains data for several windows") if(fatal) stop(gezeur) else warning(gezeur) return(NULL) } domain.quadrattest <- Window.quadrattest <- function(X, ...) { as.owin(X) } ## The shift method is undocumented. ## It is only needed in plot.listof etc shift.quadrattest <- function(X, ...) { if(is.atomicQtest(X)) { attr(X, "quadratcount") <- qc <- shift(attr(X, "quadratcount"), ...) attr(X, "lastshift") <- getlastshift(qc) } else { tests <- extractAtomicQtests(X) attr(X, "tests") <- te <- lapply(tests, shift, ...) attr(X, "lastshift") <- getlastshift(te[[1]]) } return(X) } spatstat/R/newformula.R0000644000176200001440000000102713333543255014626 0ustar liggesusers#' #' newformula.R #' #' $Revision: 1.2 $ $Date: 2017/09/29 09:08:51 $ #' #' Update formula and expand polynomial newformula <- function(old, change, eold, enew) { old <- if(is.null(old)) ~1 else eval(old, eold) change <- if(is.null(change)) ~1 else eval(change, enew) old <- as.formula(old, env=eold) change <- as.formula(change, env=enew) if(spatstat.options("expand.polynom")) { old <- expand.polynom(old) change <- expand.polynom(change) } answer <- update.formula(old, change) return(answer) } spatstat/R/nncross3D.R0000644000176200001440000001615513333543255014333 0ustar liggesusers# # nncross3D.R # # $Revision: 1.8 $ $Date: 2017/06/05 10:31:58 $ # # Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2013 # Licence: GNU Public Licence >= 2 nncross.pp3 <- function(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), ..., k = 1, sortby=c("range", "var", "x", "y", "z"), is.sorted.X = FALSE, is.sorted.Y = FALSE) { stopifnot(is.pp3(Y)) sortby <- match.arg(sortby) what <- match.arg(what, choices=c("dist", "which"), several.ok=TRUE) want.dist <- "dist" %in% what want.which <- "which" %in% what want.both <- want.dist && want.which if(!missing(k)) { # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } } k <- as.integer(k) kmax <- max(k) nk <- length(k) # trivial cases nX <- npoints(X) nY <- nobjects(Y) # deal with null cases if(nX == 0) return(as.data.frame(list(dist=matrix(0, nrow=0, ncol=nk), which=matrix(0L, nrow=0, ncol=nk))[what])) if(nY == 0) return(as.data.frame(list(dist=matrix(Inf, nrow=nX, ncol=nk), which=matrix(NA, nrow=nX, ncol=nk))[what])) if(is.null(iX) != is.null(iY)) stop("If one of iX, iY is given, then both must be given") exclude <- (!is.null(iX) || !is.null(iY)) if(exclude) { stopifnot(is.integer(iX) && is.integer(iY)) if(length(iX) != nX) stop("length of iX does not match the number of points in X") if(length(iY) != nY) stop("length of iY does not match the number of points in Y") } if((is.sorted.X || is.sorted.Y) && !(sortby %in% c("x", "y", "z"))) stop(paste("If data are already sorted,", "the sorting coordinate must be specified explicitly", "using sortby = \"x\" or \"y\" or \"z\"")) # decide which coordinate to sort on switch(sortby, range = { s <- sidelengths(as.box3(Y)) sortcoord <- c("x", "y", "z")[which.min(s)] }, var = { v <- apply(coords(Y), 2, var) sortcoord <- c("x", "y", "z")[which.min(v)] }, x={ sortcoord <- "x" }, y={ sortcoord <- "y" }, z={ sortcoord <- "z" } ) # The C code expects points to be sorted by z coordinate. XX <- coords(X) YY <- coords(Y) switch(sortcoord, x = { # rotate x axis to z axis XX <- XX[, c(3,2,1)] YY <- YY[, c(3,2,1)] }, y = { # rotate y axis to z axis XX <- XX[, c(3,1,2)] YY <- YY[, c(3,1,2)] }, z = { }) # sort only if needed if(!is.sorted.X){ oX <- fave.order(XX[,3]) XX <- XX[oX, , drop=FALSE] if(exclude) iX <- iX[oX] } if (!is.sorted.Y){ oY <- fave.order(YY[,3]) YY <- YY[oY, , drop=FALSE] if(exclude) iY <- iY[oY] } # number of neighbours that are well-defined kmaxcalc <- min(nY, kmax) if(kmaxcalc == 1) { # ............... single nearest neighbour .................. # call C code nndv <- if(want.dist) numeric(nX) else numeric(1) nnwh <- if(want.which) integer(nX) else integer(1) if(!exclude) iX <- iY <- integer(1) huge <- 1.1 * diameter(bounding.box3(as.box3(X),as.box3(Y))) z <- .C("nnX3Dinterface", n1=as.integer(nX), x1=as.double(XX[,1]), y1=as.double(XX[,2]), z1=as.double(XX[,3]), id1=as.integer(iX), n2=as.integer(nY), x2=as.double(YY[,1]), y2=as.double(YY[,2]), z2=as.double(YY[,3]), id2=as.integer(iY), exclude = as.integer(exclude), wantdist = as.integer(want.dist), wantwhich = as.integer(want.which), nnd=as.double(nndv), nnwhich=as.integer(nnwh), huge=as.double(huge), PACKAGE = "spatstat") if(want.which) { # conversion to R indexing is done in C code nnwcode <- z$nnwhich if(any(uhoh <- (nnwcode == 0))) { warning("Internal error: NA's produced in nncross()$which") nnwcode[uhoh] <- NA } } # reinterpret in original ordering if(is.sorted.X){ if(want.dist) nndv <- z$nnd if(want.which) nnwh <- if(is.sorted.Y) nnwcode else oY[nnwcode] } else { if(want.dist) nndv[oX] <- z$nnd if(want.which) nnwh[oX] <- if(is.sorted.Y) nnwcode else oY[nnwcode] } if(want.both) return(data.frame(dist=nndv, which=nnwh)) return(if(want.dist) nndv else nnwh) } else { # ............... k nearest neighbours .................. # call C code nndv <- if(want.dist) numeric(nX * kmaxcalc) else numeric(1) nnwh <- if(want.which) integer(nX * kmaxcalc) else integer(1) if(!exclude) iX <- iY <- integer(1) huge <- 1.1 * diameter(bounding.box3(as.box3(X),as.box3(Y))) z <- .C("knnX3Dinterface", n1=as.integer(nX), x1=as.double(XX[,1]), y1=as.double(XX[,2]), z1=as.double(XX[,3]), id1=as.integer(iX), n2=as.integer(nY), x2=as.double(YY[,1]), y2=as.double(YY[,2]), z2=as.double(YY[,3]), id2=as.integer(iY), kmax=as.integer(kmaxcalc), exclude = as.integer(exclude), wantdist = as.integer(want.dist), wantwhich = as.integer(want.which), nnd=as.double(nndv), nnwhich=as.integer(nnwh), huge=as.double(huge), PACKAGE = "spatstat") # extract results nnD <- z$nnd nnW <- z$nnwhich # map 0 to NA if(want.which && any(uhoh <- (nnW == 0))) { nnW[uhoh] <- NA if(want.dist) nnD[uhoh] <- Inf } # reinterpret indices in original ordering if(!is.sorted.Y) nnW <- oY[nnW] # reform as matrices NND <- if(want.dist) matrix(nnD, nrow=nX, ncol=kmaxcalc, byrow=TRUE) else 0 NNW <- if(want.which) matrix(nnW, nrow=nX, ncol=kmaxcalc, byrow=TRUE) else 0 if(!is.sorted.X){ # rearrange rows to correspond to original ordering of points if(want.dist) NND[oX, ] <- NND if(want.which) NNW[oX, ] <- NNW } # the return value should correspond to the original vector k if(kmax > kmaxcalc) { # add columns of NA / Inf kextra <- kmax - kmaxcalc if(want.dist) NND <- cbind(NND, matrix(Inf, nrow=nX, ncol=kextra)) if(want.which) NNW <- cbind(NNW, matrix(NA_integer_, nrow=nX, ncol=kextra)) } if(length(k) < kmax) { # select only the specified columns if(want.dist) NND <- NND[, k, drop=TRUE] if(want.which) NNW <- NNW[, k, drop=TRUE] } result <- as.data.frame(list(dist=NND, which=NNW)[what]) if(ncol(result) == 1) result <- result[, , drop=TRUE] return(result) } } spatstat/R/ppmclass.R0000644000176200001440000007605113333543255014302 0ustar liggesusers# # ppmclass.R # # Class 'ppm' representing fitted point process models. # # # $Revision: 2.145 $ $Date: 2018/06/27 04:12:06 $ # # An object of class 'ppm' contains the following: # # $method model-fitting method (currently "mpl") # # $coef vector of fitted regular parameters # as given by coef(glm(....)) # # $trend the trend formula # or NULL # # $interaction the interaction family # (an object of class 'interact') or NULL # # $Q the quadrature scheme used # # $maxlogpl the maximised value of log pseudolikelihood # # $internal list of internal calculation results # # $correction name of edge correction method used # $rbord erosion distance for border correction (or NULL) # # $the.call the originating call to ppm() # # $the.version version of mpl() which yielded the fit # # #------------------------------------------------------------------------ is.ppm <- function(x) { inherits(x, "ppm") } print.ppm <- function(x, ..., what=c("all", "model", "trend", "interaction", "se", "errors")) { verifyclass(x, "ppm") misswhat <- missing(what) opts <- c("model", "trend", "interaction", "se", "errors") what <- match.arg(what, c("all", opts), several.ok=TRUE) if("all" %in% what) what <- opts np <- length(coef(x)) terselevel <- spatstat.options("terse") digits <- getOption('digits') # If SE was explicitly requested, calculate it. # Otherwise, do it only if the model is Poisson (by default) do.SE <- force.no.SE <- force.SE <- FALSE if(np == 0) { force.no.SE <- TRUE } else if(!is.null(x$internal$VB)) { force.no.SE <- TRUE } else if(!misswhat && ("se" %in% what)) { force.SE <- TRUE } else switch(spatstat.options("print.ppm.SE"), always = { force.SE <- TRUE }, never = { force.no.SE <- TRUE }, poisson = { do.SE <- is.poisson(x) && !identical(x$fitter, "gam") && (!is.null(x$varcov) || x$method != "logi") && waxlyrical("extras", terselevel) }) do.SE <- (do.SE || force.SE) && !force.no.SE s <- summary.ppm(x, quick=if(do.SE) FALSE else "no variances") notrend <- s$no.trend # stationary <- s$stationary poisson <- s$poisson markeddata <- s$marked multitype <- s$multitype # markedpoisson <- poisson && markeddata csr <- poisson && notrend && !markeddata special <- csr && all(c("model", "trend") %in% what) if(special) { ## ---------- Trivial/special cases ----------------------- splat("Stationary Poisson process") cat("Intensity:", signif(s$trend$value, digits), fill=TRUE) } else { ## ----------- Print model type ------------------- if("model" %in% what) { splat(s$name) parbreak(terselevel) if(markeddata) mrk <- s$entries$marks if(multitype) { splat(paste("Possible marks:", commasep(sQuote(levels(mrk))))) parbreak(terselevel) } } ## ----- trend -------------------------- if("trend" %in% what) { if(!notrend) { splat("Log", if(poisson) "intensity: " else "trend: ", pasteFormula(s$trend$formula)) parbreak(terselevel) } if(waxlyrical('space', terselevel) || !do.SE) { ## print trend coefficients, unless redundant and space is tight tv <- s$trend$value if(length(tv) == 0) splat("[No trend coefficients]") else { thead <- paste0(s$trend$label, ":") if(is.list(tv)) { splat(thead) for(i in seq_along(tv)) print(tv[[i]]) } else if(is.numeric(tv) && length(tv) == 1) { ## single number: append to end of current line tvn <- names(tv) tveq <- if(is.null(tvn)) "\t" else paste(" ", tvn, "= ") splat(paste0(thead, tveq, signif(tv, digits))) } else { ## some other format splat(thead) print(tv) } } parbreak(terselevel) } } if(waxlyrical("space", terselevel) && !is.null(cfa <- s$covfunargs) && length(cfa) > 0) { cfafitter <- s$cfafitter if(is.null(cfafitter)) { cat("Covariate", "function", "arguments", "(covfunargs)", "provided:", fill=TRUE) } else { cat("Irregular", "parameters", "(covfunargs)", "fitted", "by", paste0(sQuote(cfafitter), ":"), fill=TRUE) } for(i in seq_along(cfa)) { cat(paste(names(cfa)[i], "= ")) cfai <- cfa[[i]] if(is.numeric(cfai) && length(cfai) == 1) { cfai <- signif(cfai, digits) cat(paste(cfai, "\n")) } else print(cfai) } } } # ---- Interaction ---------------------------- if("interaction" %in% what) { if(!poisson) { print(s$interaction, family=FALSE, banner=FALSE, brief=!waxlyrical("extras")) parbreak(terselevel) } } # ----- parameter estimates with SE and 95% CI -------------------- if(waxlyrical("extras", terselevel) && ("se" %in% what) && (np > 0)) { if(!is.null(cose <- s$coefs.SE.CI)) { print(cose, digits=digits) } else if(do.SE) { # standard error calculation failed splat("Standard errors unavailable; variance-covariance matrix is singular") } else if(!force.no.SE) { # standard error was voluntarily omitted if(waxlyrical('space', terselevel)) splat("For standard errors, type coef(summary(x))\n") } } # ---- Warnings issued in mpl.prepare --------------------- if(waxlyrical("errors", terselevel) && "errors" %in% what) { probs <- s$problems if(!is.null(probs) && is.list(probs) && (length(probs) > 0)) lapply(probs, function(x) { if(is.list(x) && !is.null(p <- x$print)) splat(paste("Problem:\n", p, "\n\n")) }) if(s$old) warning(paste("Model fitted by old spatstat version", s$version)) # ---- Algorithm status ---------------------------- fitter <- s$fitter converged <- s$converged if(!is.null(fitter) && fitter %in% c("glm", "gam") && !converged) splat("*** Fitting algorithm for", sQuote(fitter), "did not converge ***") } if(waxlyrical("extras", terselevel) && s$projected) { parbreak() splat("Fit was emended to obtain a valid point process model") } if(identical(s$valid, FALSE) && waxlyrical("errors", terselevel)) { parbreak() splat("*** Model is not valid ***") if(!all(is.finite(s$entries$coef))) { splat("*** Some coefficients are NA or Inf ***") } else { splat("*** Interaction parameters are outside valid range ***") } } else if(is.na(s$valid) && waxlyrical("extras", terselevel)) { parbreak() splat("[Validity of model could not be checked]") } return(invisible(NULL)) } quad.ppm <- function(object, drop=FALSE, clip=FALSE) { if(!is.ppm(object)) { if(is.kppm(object)) object <- object$po else if(is.lppm(object)) object <- object$fit else stop("object is not of class ppm, kppm or lppm") } Q <- object$Q if(is.null(Q)) return(Q) if(drop || clip) { ok <- getglmsubset(object) if(!is.null(ok)) Q <- Q[ok] } if(clip && object$correction == "border") { Wminus <- erosion(as.owin(object), object$rbord) Q <- Q[Wminus] } return(Q) } data.ppm <- function(object) { verifyclass(object, "ppm") object$Q$data } dummy.ppm <- function(object, drop=FALSE) { return(quad.ppm(object, drop=drop)$dummy) } # method for 'coef' coef.ppm <- function(object, ...) { verifyclass(object, "ppm") object$coef } hasglmfit <- function(object) { return(!is.null(object$internal$glmfit)) } getglmfit <- function(object) { verifyclass(object, "ppm") glmfit <- object$internal$glmfit if(is.null(glmfit)) return(NULL) if(object$method != "mpl") glmfit$coefficients <- object$coef return(glmfit) } getglmdata <- function(object, drop=FALSE) { verifyclass(object, "ppm") gd <- object$internal$glmdata if(!drop) return(gd) return(gd[getglmsubset(object), , drop=FALSE]) } getglmsubset <- function(object) { gd <- object$internal$glmdata if(object$method=="logi") return(gd$.logi.ok) return(gd$.mpl.SUBSET) } getppmdatasubset <- function(object) { # Equivalent to getglmsubset(object)[is.data(quad.ppm(object))] # but also works for models fitted exactly, etc # if(object$method %in% c("mpl", "ho")) { sub <- getglmsubset(object) if(!is.null(sub)) { Z <- is.data(quad.ppm(object)) return(sub[Z]) } } X <- data.ppm(object) sub <- if(object$correction == "border") { (bdist.points(X) >= object$rbord) } else rep(TRUE, npoints(X)) return(sub) } getppmOriginalCovariates <- function(object) { df <- as.data.frame(as.ppp(quad.ppm(object))) cova <- object$covariates if(length(cova) > 0) { df2 <- mpl.get.covariates(object$covariates, union.quad(quad.ppm(object)), "quadrature points", object$covfunargs) df <- cbind(df, df2) } return(df) } # ??? method for 'effects' ??? valid <- function(object, ...) { UseMethod("valid") } valid.ppm <- function(object, warn=TRUE, ...) { verifyclass(object, "ppm") coeffs <- coef(object) # ensure all coefficients are fitted, and finite if(!all(is.finite(coeffs))) return(FALSE) # inspect interaction inte <- object$interaction if(is.poisson(object)) return(TRUE) # Poisson process # extract fitted interaction coefficients Vnames <- object$internal$Vnames IsOffset <- object$internal$IsOffset Icoeffs <- coeffs[Vnames[!IsOffset]] # check interaction checker <- inte$valid if(is.null(checker) || !newstyle.coeff.handling(inte)) { if(warn) warning("Internal error: unable to check validity of model") return(NA) } answer <- checker(Icoeffs, inte) return(answer) } emend <- function(object, ...) { UseMethod("emend") } emend.ppm <- project.ppm <- local({ tracemessage <- function(depth, ...) { if(depth == 0) return(NULL) spacer <- paste(rep.int(" ", depth), collapse="") marker <- ngettext(depth, "trace", paste("trace", depth)) marker <- paren(marker, "[") splat(paste0(spacer, marker, " ", paste(...))) } leaving <- function(depth) { tracemessage(depth, ngettext(depth, "Returning.", "Exiting level.")) } emend.ppm <- function(object, ..., fatal=FALSE, trace=FALSE) { verifyclass(object, "ppm") fast <- spatstat.options("project.fast") # user specifies 'trace' as logical # but 'trace' can also be integer representing trace depth td <- as.integer(trace) trace <- (td > 0) tdnext <- if(trace) td+1 else 0 if(valid.ppm(object)) { tracemessage(td, "Model is valid.") leaving(td) return(object) } # First ensure trend coefficients are all finite coeffs <- coef(object) # Which coefficients are trend coefficients coefnames <- names(coeffs) internames <- object$internal$Vnames trendnames <- coefnames[!(coefnames %in% internames)] # Trend terms in trend formula trendterms <- attr(terms(object), "term.labels") # Mapping from coefficients to terms of GLM coef2term <- attr(model.matrix(object), "assign") istrend <- (coef2term > 0) & (coefnames %in% trendnames) # Identify non-finite trend coefficients bad <- istrend & !is.finite(coeffs) if(!any(bad)) { tracemessage(td, "Trend terms are valid.") } else { nbad <- sum(bad) tracemessage(td, "Non-finite ", ngettext(nbad, "coefficient for term ", "coefficients for terms "), commasep(sQuote(trendterms[coef2term[bad]]))) if(fast) { # remove first illegal term firstbad <- min(which(bad)) badterm <- trendterms[coef2term[firstbad]] # remove this term from model tracemessage(td, "Removing term ", sQuote(badterm)) removebad <- as.formula(paste("~ . - ", badterm), env=object$callframe) newobject <- update(object, removebad) if(trace) { tracemessage(td, "Updated model:") print(newobject) } # recurse newobject <- emend.ppm(newobject, fatal=fatal, trace=tdnext) # return leaving(td) return(newobject) } else { # consider all illegal terms bestobject <- NULL for(i in which(bad)) { badterm <- trendterms[coef2term[i]] # remove this term from model tracemessage(td, "Considering removing term ", sQuote(badterm)) removebad <- as.formula(paste("~ . - ", badterm), env=object$callframe) object.i <- update(object, removebad) if(trace) { tracemessage(td, "Considering updated model:") print(object.i) } # recurse object.i <- emend.ppm(object.i, fatal=fatal, trace=tdnext) # evaluate logPL logPL.i <- logLik(object.i, warn=FALSE) tracemessage(td, "max log pseudolikelihood = ", logPL.i) # optimise if(is.null(bestobject) || (logLik(bestobject, warn=FALSE) < logPL.i)) bestobject <- object.i } if(trace) { tracemessage(td, "Best submodel:") print(bestobject) } # return leaving(td) return(bestobject) } } # Now handle interaction inte <- object$interaction if(is.null(inte)) { tracemessage(td, "No interaction to check.") leaving(td) return(object) } tracemessage(td, "Inspecting interaction terms.") proj <- inte$project if(is.null(proj)) { whinge <- "Internal error: interaction has no projection operator" if(fatal) stop(whinge) warning(whinge) leaving(td) return(object) } # ensure the same edge correction is used! correction <- object$correction rbord <- object$rbord # apply projection coef.orig <- coeffs <- coef(object) Vnames <- object$internal$Vnames Icoeffs <- coeffs[Vnames] change <- proj(Icoeffs, inte) if(is.null(change)) { tracemessage(td, "Interaction does not need updating.") leaving(td) return(object) } tracemessage(td, "Interaction is not valid.") if(is.numeric(change)) { tracemessage(td, "Interaction coefficients updated without re-fitting.") # old style: 'project' returned a vector of updated coefficients Icoeffs <- change # tweak interaction coefficients object$coef[Vnames] <- Icoeffs # recompute fitted interaction object$fitin <- NULL object$fitin <- fitin(object) } else if(is.interact(change)) { # new style: 'project' returns an interaction if(trace) { tracemessage(td, "Interaction changed to:") print(change) } # refit the whole model # (using the same edge correction) # (and the same quadrature scheme) newobject <- update(object, interaction=change, correction=correction, rbord=rbord, forcefit=TRUE, envir=object$callframe) if(trace) { tracemessage(td, "Updated model:") print(newobject) } # recurse newobject <- emend.ppm(newobject, fatal=fatal, trace=tdnext) object <- newobject } else if(is.list(change) && all(unlist(lapply(change, is.interact)))) { # new style: 'project' returns a list of candidate interactions nchange <- length(change) tracemessage(td, "Considering", nchange, ngettext(nchange, "submodel", "submodels")) bestobject <- NULL for(i in seq_len(nchange)) { change.i <- change[[i]] if(trace) { tracemessage(td, "Considering", ordinal(i), "candidate submodel, with interaction:") print(change.i) } # refit the whole model object.i <- update(object, interaction=change.i, correction=correction, rbord=rbord, forcefit=TRUE, envir=object$callframe) if(trace) { tracemessage(td, "Considering", ordinal(i), "candidate updated model:") print(object.i) } # recurse object.i <- emend.ppm(object.i, fatal=fatal, trace=tdnext) # evaluate logPL logPL.i <- logLik(object.i, warn=FALSE) tracemessage(td, "max log pseudolikelihood = ", logPL.i) # optimise if(is.null(bestobject) || (logLik(bestobject, warn=FALSE) < logPL.i)) bestobject <- object.i } # end loop through submodels if(trace) { tracemessage(td, "Best submodel:") print(bestobject) } object <- bestobject } else stop("Internal error: unrecognised format of update") object$projected <- TRUE object$coef.orig <- coef.orig leaving(td) return(object) } emend.ppm }) # more methods deviance.ppm <- function(object, ...) { satlogpl <- object$satlogpl if(is.null(satlogpl)) { object <- update(object, forcefit=TRUE) satlogpl <- object$satlogpl } if(is.null(satlogpl) || !is.finite(satlogpl)) return(NA) ll <- do.call(logLik, resolve.defaults(list(object=object, absolute=FALSE), list(...))) ll <- as.numeric(ll) 2 * (satlogpl - ll) } logLik.ppm <- function(object, ..., new.coef=NULL, warn=TRUE, absolute=FALSE) { if(!is.poisson.ppm(object) && warn) warn.once("ppmLogLik", "log likelihood is not available for non-Poisson model;", "log pseudolikelihood returned") ## degrees of freedom nip <- if(!inherits(object, "ippm")) 0 else length(attr(object$covfunargs, "free")) df <- length(coef(object)) + nip ## compute adjustment constant if(absolute && object$method %in% c("exact", "mpl", "ho")) { X <- data.ppm(object) W <- Window(X) areaW <- if(object$correction == "border" && object$rbord > 0) eroded.areas(W, object$rbord) else area(W) constant <- areaW * markspace.integral(X) } else constant <- 0 ## if(is.null(new.coef)) { ## extract from object ll <- object$maxlogpl + constant attr(ll, "df") <- df class(ll) <- "logLik" return(ll) } ## recompute for new parameter values method <- object$method if(method == "exact") method <- update(method, forcefit=TRUE) Q <- quad.ppm(object, drop=TRUE) Z <- is.data(Q) cif <- fitted(object, type="cif", new.coef=new.coef, drop=TRUE) cifdata <- cif[Z] switch(method, mpl=, exact=, ho = { w <- w.quad(Q) ll <- sum(log(cifdata[cifdata > 0])) - sum(w * cif) }, logi=, VBlogi={ B <- getglmdata(object, drop=TRUE)$.logi.B p <- cif/(B+cif) ll <- sum(log(p/(1-p))[Z]) + sum(log(1-p)) + sum(log(B[Z])) }, stop(paste("Internal error: unrecognised ppm method:", dQuote(method))) ) ll <- ll + constant attr(ll, "df") <- df class(ll) <- "logLik" return(ll) } pseudoR2 <- function(object, ...) { UseMethod("pseudoR2") } pseudoR2.ppm <- function(object, ..., keepoffset=TRUE) { dres <- deviance(object, ..., warn=FALSE) nullfmla <- . ~ 1 if(keepoffset && has.offset.term(object)) { off <- attr(model.depends(object), "offset") offterms <- row.names(off)[apply(off, 1, any)] if(length(offterms)) { nullrhs <- paste(offterms, collapse=" + ") nullfmla <- as.formula(paste(". ~ ", nullrhs)) } } nullmod <- update(object, nullfmla, forcefit=TRUE) dnul <- deviance(nullmod, warn=FALSE) return(1 - dres/dnul) } formula.ppm <- function(x, ...) { return(x$trend) } terms.ppm <- function(x, ...) { terms(x$terms, ...) } labels.ppm <- function(object, ...) { # extract fitted trend coefficients co <- coef(object) Vnames <- object$internal$Vnames is.trend <- !(names(co) %in% Vnames) # model terms tt <- terms(object) lab <- attr(tt, "term.labels") if(length(lab) == 0) return(character(0)) # model matrix mm <- model.matrix(object) ass <- attr(mm, "assign") # 'ass' associates coefficients with model terms # except ass == 0 for the Intercept coef.ok <- is.finite(co) relevant <- (ass > 0) & is.trend okterms <- unique(ass[coef.ok & relevant]) return(lab[okterms]) } AIC.ppm <- function(object, ..., k=2, takeuchi=TRUE) { ll <- logLik(object, warn=FALSE) pen <- attr(ll, "df") if(takeuchi && !is.poisson(object)) { vv <- vcov(object, what="internals") logi <- (object$method == "logi") J <- with(vv, if(!logi) Sigma else (Sigma1log+Sigma2log)) H <- with(vv, if(!logi) A1 else Slog) ## Takeuchi penalty = trace of J H^{-1} = trace of H^{-1} J JiH <- try(solve(H, J), silent=TRUE) if(!inherits(JiH, "try-error")) pen <- sum(diag(JiH)) } return(- 2 * as.numeric(ll) + k * pen) } extractAIC.ppm <- function (fit, scale = 0, k = 2, ..., takeuchi=TRUE) { edf <- length(coef(fit)) aic <- AIC(fit, k=k, takeuchi=takeuchi) c(edf, aic) } # # method for model.frame model.frame.ppm <- function(formula, ...) { object <- formula gf <- getglmfit(object) if(is.null(gf)) { warning("Model re-fitted with forcefit=TRUE") object <- update(object, forcefit=TRUE) gf <- getglmfit(object) } argh <- resolve.defaults(list(formula=gf), list(...), list(data = getglmdata(object), subset = TRUE)) result <- switch(object$fitter, gam = do.call(modelFrameGam, argh), do.call(model.frame, argh)) return(result) } #' a hacked version of model.frame.glm that works for gam objects (mgcv) modelFrameGam <- function(formula, ...) { dots <- list(...) nargs <- dots[match(c("data", "na.action", "subset"), names(dots), 0L)] if (length(nargs) || is.null(formula$model)) { fcall <- formula$call # fcall$method <- "model.frame" fcall[[1L]] <- quote(mgcv::gam) fcall[names(nargs)] <- nargs env <- environment(formula$terms) if (is.null(env)) env <- parent.frame() refut <- eval(fcall, env) refut$model } else formula$model } # # method for model.matrix model.matrix.ppm <- function(object, data=model.frame(object, na.action=NULL), ..., Q=NULL, keepNA=TRUE) { if(missing(data)) data <- NULL PPMmodelmatrix(object, data=data, ..., Q=Q, keepNA=keepNA) } model.matrix.ippm <- function(object, data=model.frame(object, na.action=NULL), ..., Q=NULL, keepNA=TRUE, irregular=FALSE) { if(missing(data)) data <- NULL PPMmodelmatrix(object, data=data, ..., Q=Q, keepNA=keepNA, irregular=irregular) } PPMmodelmatrix <- function(object, data = NULL, ..., subset, Q=NULL, keepNA=TRUE, irregular=FALSE, splitInf=FALSE) { # handles ppm and ippm data.given <- !is.null(data) irregular <- irregular && inherits(object, "ippm") && !is.null(object$iScore) if(splitInf && !data.given && is.null(Q)) { #' force re-computation Q <- quad.ppm(object) } if(!is.null(Q)) { if(data.given) stop("Arguments Q and data are incompatible") if(!inherits(Q, c("ppp", "quad"))) stop("Q should be a point pattern or quadrature scheme") if(is.ppp(Q)) Q <- quad(Q, Q[FALSE]) ## construct Berman-Turner frame needed <- c("trend", "interaction", "covariates", "covfunargs", "correction", "rbord") bt <- do.call(bt.frame, c(list(Q), object[needed], list(splitInf=splitInf))) forbid <- bt$forbid ## compute model matrix mf <- model.frame(bt$fmla, bt$glmdata, ...) mm <- model.matrix(bt$fmla, mf, ...) ass <- attr(mm, "assign") if(irregular) { ## add irregular score components U <- union.quad(Q) mi <- sapply(object$iScore, do.call, args=append(list(x=U$x, y=U$y), object$covfunargs), envir=environment(terms(object))) if(nrow(mi) != nrow(mm)) stop("Internal error: incorrect number of rows in iScore") mm <- cbind(mm, mi) } ## subset if(!missing(subset)) { ok <- eval(substitute(subset), envir=bt$glmdata) mm <- mm[ok, , drop=FALSE] if(!is.null(forbid)) forbid <- forbid[ok] } ## remove NA's ? if(!keepNA) { ok <- complete.cases(mm) mm <- mm[ok, , drop=FALSE] if(!is.null(forbid)) forbid <- forbid[ok] } attr(mm, "assign") <- ass attr(mm, "-Inf") <- forbid return(mm) } #' extract GLM fit gf <- getglmfit(object) if(is.null(gf)) { warning("Model re-fitted with forcefit=TRUE") object <- update(object, forcefit=TRUE) gf <- getglmfit(object) if(is.null(gf)) stop("internal error: unable to extract a glm fit") } if(data.given) { #' new data. Must contain the Berman-Turner variables as well. bt <- list(.mpl.Y=1, .mpl.W=1, .mpl.SUBSET=TRUE) if(any(forgot <- !(names(bt) %in% names(data)))) data <- do.call(cbind, append(list(data), bt[forgot])) mm <- model.matrix(gf, data=data, ..., subset=NULL) ass <- attr(mm, "assign") if(irregular) { ## add irregular score components mi <- sapply(object$iScore, do.call, args=append(list(x=data$x, y=data$y), object$covfunargs), envir=environment(terms(object))) if(nrow(mi) != nrow(mm)) stop("Internal error: incorrect number of rows in iScore") mm <- cbind(mm, mi) attr(mm, "assign") <- ass } if(inherits(gf, "gam")) attr(mm, "assign") <- gf$assign return(mm) } scrambled <- object$scrambled %orifnull% FALSE ## if TRUE, this object was produced by 'subfits' using jittered covariate if(!keepNA && !irregular && !scrambled) { # extract model matrix of glm fit object # restricting to its 'subset' mm <- model.matrix(gf, ...) if(inherits(gf, "gam")) attr(mm, "assign") <- gf$assign return(mm) } ## extract model matrix for all cases gd <- getglmdata(object, drop=FALSE) if(!scrambled) { ## 'gf' was fitted to correct data. Use internals. mm <- model.matrix(gf, ..., subset=NULL, na.action=NULL) ass <- attr(mm, "assign") } else { ## 'gf' was originally fitted using jittered data: ## Use correct data given by 'gd' ## Temporarily add scrambled data to avoid singular matrices etc gds <- object$internal$glmdata.scrambled gdplus <- rbind(gd, gds) mm <- model.matrix(gf, ..., data=gdplus, subset=NULL, na.action=NULL) ass <- attr(mm, "assign") ## Now remove rows corresponding to scrambled data mm <- mm[seq_len(nrow(gd)), , drop=FALSE] attr(mm, "assign") <- ass } cn <- colnames(mm) if(nrow(mm) != nrow(gd)) { # can occur if covariates include NA's or interaction is -Inf insubset <- getglmsubset(object) isna <- is.na(insubset) | !insubset if(sum(isna) + nrow(mm) == nrow(gd)) { # insert rows of NA's mmplus <- matrix( , nrow(gd), ncol(mm)) mmplus[isna, ] <- NA mmplus[!isna, ] <- mm mm <- mmplus attr(mm, "assign") <- ass } else stop("internal error: model matrix does not match glm data frame") } if(irregular) { ## add irregular score components U <- union.quad(quad.ppm(object, drop=FALSE)) mi <- sapply(object$iScore, do.call, args=append(list(x=U$x, y=U$y), object$covfunargs), envir=environment(terms(object))) if(nrow(mi) != nrow(mm)) stop("Internal error: incorrect number of rows in iScore") mm <- cbind(mm, mi) attr(mm, "assign") <- ass cn <- c(cn, colnames(mi)) } ## subset if(!missing(subset)) { ok <- eval(substitute(subset), envir=gd) mm <- mm[ok, , drop=FALSE] attr(mm, "assign") <- ass } ## remove NA's if(!keepNA) { mm <- mm[complete.cases(mm), , drop=FALSE] attr(mm, "assign") <- ass } if(inherits(gf, "gam")) attr(mm, "assign") <- gf$assign colnames(mm) <- cn return(mm) } model.images <- function(object, ...) { UseMethod("model.images") } model.images.ppm <- function(object, W=as.owin(object), ...) { X <- data.ppm(object) # irregular <- resolve.1.default(list(irregular=FALSE), list(...)) ## make a quadscheme with a dummy point at every pixel Q <- pixelquad(X, W) ## compute model matrix mm <- model.matrix(object, Q=Q, ...) ## retain only the entries for dummy points (pixels) mm <- mm[!is.data(Q), , drop=FALSE] mm <- as.data.frame(mm) ## create template image Z <- as.im(attr(Q, "M")) ok <- !is.na(Z$v) ## make images imagenames <- colnames(mm) if(!is.multitype(object)) { result <- lapply(as.list(mm), replace, list=ok, x=Z) result <- as.solist(result) names(result) <- imagenames } else { marx <- marks(Q$dummy) mmsplit <- split(mm, marx) result <- vector(mode="list", length=length(mmsplit)) for(i in seq_along(mmsplit)) result[[i]] <- as.solist(lapply(as.list(mmsplit[[i]]), replace, list=ok, x=Z)) names(result) <- names(mmsplit) result <- do.call(hyperframe, result) row.names(result) <- imagenames } return(result) } unitname.ppm <- function(x) { return(unitname(x$Q)) } "unitname<-.ppm" <- function(x, value) { unitname(x$Q) <- value return(x) } nobs.ppm <- function(object, ...) { npoints(data.ppm(object)) } as.interact.ppm <- function(object) { verifyclass(object, "ppm") inte <- object$interaction if(is.null(inte)) inte <- Poisson() return(inte) } as.ppm <- function(object) { UseMethod("as.ppm") } as.ppm.ppm <- function(object) { object } ## method for as.owin as.owin.ppm <- function(W, ..., from=c("points", "covariates"), fatal=TRUE) { if(!verifyclass(W, "ppm", fatal=fatal)) return(NULL) from <- match.arg(from) datawin <- as.owin(data.ppm(W)) if(from == "points") return(datawin) covs <- W$covariates isim <- unlist(lapply(covs, is.im)) if(!any(isim)) return(datawin) cwins <- lapply(covs[isim], as.owin) covwin <- do.call(intersect.owin, unname(cwins)) result <- intersect.owin(covwin, datawin) return(result) } domain.ppm <- Window.ppm <- function(X, ..., from=c("points", "covariates")) { from <- match.arg(from) as.owin(X, ..., from=from) } ## change the coefficients in a ppm or other model tweak.coefs <- function(model, new.coef) { if(is.null(new.coef)) return(model) co <- coef(model) check.nvector(new.coef, length(co), things="coefficients") model$coef.orig <- co model$coef <- new.coef return(model) } spatstat/R/distanxD.R0000644000176200001440000003006513571674174014242 0ustar liggesusers# # distanxD.R # # $Revision: 1.14 $ $Date: 2019/12/04 09:08:47 $ # # Interpoint distances for multidimensional points # # Methods for pairdist, nndist, nnwhich, crossdist # pairdist.ppx <- function(X, ...) { verifyclass(X, "ppx") # extract point coordinates coo <- as.matrix(coords(X, ...)) n <- nrow(coo) if(n == 0) return(matrix(numeric(0), nrow=0, ncol=0)) return(as.matrix(dist(coo))) } crossdist.ppx <- function(X, Y, ...) { verifyclass(X, "ppx") verifyclass(Y, "ppx") # extract point coordinates cooX <- as.matrix(coords(X, ...)) cooY <- as.matrix(coords(Y, ...)) nX <- nrow(cooX) nY <- nrow(cooY) if(ncol(cooX) != ncol(cooY)) stop("X and Y have different dimensions (different numbers of coordinates)") if(nX == 0 || nY == 0) return(matrix(numeric(0), nrow=nX, ncol=nY)) coo <- rbind(cooX, cooY) dis <- as.matrix(dist(coo)) ans <- dis[1:nX, nX + (1:nY)] return(ans) } nndist.ppx <- function(X, ..., k=1) { verifyclass(X, "ppx") # extract point coordinates coo <- as.matrix(coords(X, ...)) n <- nrow(coo) m <- ncol(coo) if(m == 0) { warning("nndist.ppx: Zero-dimensional coordinates: returning NA") if(length(k) == 1L) return(rep.int(NA_real_, n)) else return(matrix(NA_real_, n, length(k))) } # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1L) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) # trivial cases if(n <= 1L) { # empty pattern => return numeric(0) # or pattern with only 1 point => return Inf nnd <- matrix(Inf, nrow=n, ncol=kmax) nnd <- nnd[,k, drop=TRUE] return(nnd) } # number of neighbours that are well-defined kmaxcalc <- min(n-1L, kmax) # calculate k-nn distances for k <= kmaxcalc if(kmaxcalc == 1L) { # calculate nearest neighbour distance only nnd<-numeric(n) o <- fave.order(coo[,1L]) big <- sqrt(.Machine$double.xmax) Cout <- .C("nndMD", n= as.integer(n), m=as.integer(m), x= as.double(t(coo[o,])), nnd= as.double(nnd), as.double(big), PACKAGE = "spatstat") nnd[o] <- Cout$nnd } else { # case kmaxcalc > 1 nnd<-numeric(n * kmaxcalc) o <- fave.order(coo[,1L]) big <- sqrt(.Machine$double.xmax) Cout <- .C("knndMD", n = as.integer(n), m = as.integer(m), kmax = as.integer(kmaxcalc), x = as.double(t(coo[o,])), nnd = as.double(nnd), huge = as.double(big), PACKAGE = "spatstat") nnd <- matrix(nnd, nrow=n, ncol=kmaxcalc) nnd[o, ] <- matrix(Cout$nnd, nrow=n, ncol=kmaxcalc, byrow=TRUE) } # post-processing if(kmax > kmaxcalc) { # add columns of Inf's infs <- matrix(as.numeric(Inf), nrow=n, ncol=kmax-kmaxcalc) nnd <- cbind(nnd, infs) } if(length(k) < kmax) { # select only the specified columns nnd <- nnd[, k, drop=TRUE] } return(nnd) } nnwhich.ppx <- function(X, ..., k=1) { verifyclass(X, "ppx") # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1L) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) # extract point coordinates coo <- coords(X, ...) n <- nrow(coo) m <- ncol(coo) if(m == 0) { warning("nnwhich.ppx: Zero-dimensional coordinates: returning NA") if(length(k) == 1L) return(rep.int(NA_real_, n)) else return(matrix(NA_real_, n, length(k))) } # special cases if(n <= 1L) { # empty pattern => return integer(0) # or pattern with only 1 point => return NA nnw <- matrix(NA_integer_, nrow=n, ncol=kmax) nnw <- nnw[,k, drop=TRUE] return(nnw) } # number of neighbours that are well-defined kmaxcalc <- min(n-1L, kmax) # identify k-nn for k <= kmaxcalc if(kmaxcalc == 1L) { # identify nearest neighbour only nnw <- integer(n) o <- fave.order(coo[,1L]) big <- sqrt(.Machine$double.xmax) Cout <- .C("nnwMD", n = as.integer(n), m = as.integer(m), x = as.double(t(coo[o,])), nnd = as.double(numeric(n)), nnwhich = as.integer(nnw), huge = as.double(big), PACKAGE = "spatstat") witch <- Cout$nnwhich if(any(witch <= 0)) stop("Internal error: non-positive index returned from C code") if(any(witch > n)) stop("Internal error: index returned from C code exceeds n") nnw[o] <- o[witch] } else { # case kmaxcalc > 1 nnw <- matrix(integer(n * kmaxcalc), nrow=n, ncol=kmaxcalc) o <- fave.order(coo[,1L]) big <- sqrt(.Machine$double.xmax) Cout <- .C("knnwMD", n = as.integer(n), m = as.integer(m), kmax = as.integer(kmaxcalc), x = as.double(t(coo[o,])), nnd = as.double(numeric(n * kmaxcalc)), nnwhich = as.integer(nnw), huge = as.double(big), PACKAGE = "spatstat") witch <- Cout$nnwhich witch <- matrix(witch, nrow=n, ncol=kmaxcalc, byrow=TRUE) if(any(witch <= 0)) stop("Internal error: non-positive index returned from C code") if(any(witch > n)) stop("Internal error: index returned from C code exceeds n") # convert back to original ordering nnw[o,] <- matrix(o[witch], nrow=n, ncol=kmaxcalc) } # post-processing if(kmax > kmaxcalc) { # add columns of NA's nas <- matrix(NA_integer_, nrow=n, ncol=kmax-kmaxcalc) nnw <- cbind(nnw, nas) } if(length(k) < kmax) { # select only the specified columns nnw <- nnw[, k, drop=TRUE] } return(nnw) } nncross.ppx <- function(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), ..., k = 1) { verifyclass(X, "ppx") verifyclass(Y, "ppx") what <- match.arg(what, several.ok=TRUE) want.dist <- "dist" %in% what want.which <- "which" %in% what ## k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1L) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) nk <- length(k) ## extract point coordinates cooX <- as.matrix(coords(X, ...)) nX <- nrow(cooX) m <- ncol(cooX) cooY <- as.matrix(coords(Y, ...)) nY <- nrow(cooY) mY <- ncol(cooY) ## check dimensions if(mY != m) stop(paste("Point patterns have different spatial dimensions:", m, "!=", mY), call.=FALSE) if(m == 0) { warning("nncross.ppx: Zero-dimensional coordinates: returning NA") if(nk == 1L) { NND <- if(want.dist) rep.int(NA_real_, nX) else 0 NNW <- if(want.which) rep.int(NA_integer_, nX) else 0 } else { NND <- if(want.dist) matrix(NA_real_, nX, nk) else 0 NNW <- if(want.which) matrix(NA_integer_, nX, nk) else 0 } return(packupNNdata(NND, NNW, what, k)) } ## trivial cases if(nX == 0L || nY == 0L) { NND <- matrix(Inf, nrow=nX, ncol=nk) NNW <- matrix(NA_integer_, nrow=nX, ncol=nk) return(packupNNdata(NND, NNW, what, k)) } ## exclusion arguments if(is.null(iX) != is.null(iY)) stop("If one of iX, iY is given, then both must be given") exclude <- (!is.null(iX) || !is.null(iY)) if(exclude) { stopifnot(is.integer(iX) && is.integer(iY)) if(length(iX) != nX) stop("length of iX does not match the number of points in X") if(length(iY) != nY) stop("length of iY does not match the number of points in Y") } ## number of neighbours that are well-defined kmaxcalc <- min(nY, kmax) ## find k-nearest neighbours for k <= kmaxcalc oX <- fave.order(cooX[,1L]) oY <- fave.order(cooY[,1L]) big <- sqrt(.Machine$double.xmax) if(kmaxcalc == 1L) { ## find nearest neighbour only nnd <- numeric(nX) nnw <- integer(nX) if(!exclude) { Cout <- .C("nnXwMD", m =as.integer(m), n1 = as.integer(nX), x1 = as.double(t(cooX[oX,])), n2 = as.integer(nY), x2 = as.double(t(cooY[oY,])), nnd = as.double(nnd), nnwhich = as.integer(nnw), as.double(big), PACKAGE = "spatstat") } else { Cout <- .C("nnXxMD", m =as.integer(m), n1 = as.integer(nX), x1 = as.double(t(cooX[oX,])), id1 = as.integer(iX[oX]), n2 = as.integer(nY), x2 = as.double(t(cooY[oY,])), id2 = as.integer(iY[oY]), nnd = as.double(nnd), nnwhich = as.integer(nnw), as.double(big), PACKAGE = "spatstat") } if(want.dist) nnd[oX] <- Cout$nnd if(want.which) { witch <- Cout$nnwhich if(any(witch <= 0)) stop("Internal error: non-positive index returned from C code") if(any(witch > nY)) stop("Internal error: index returned from C code exceeds npoints(Y)") nnw[oX] <- oY[witch] } } else { ## k-nearest nnd <- matrix(0, nX, kmaxcalc) nnw <- matrix(0L, nX, kmaxcalc) if(!exclude) { Cout <- .C("knnXwMD", m = as.integer(m), n1 = as.integer(nX), x1 = as.double(t(cooX[oX,])), n2 = as.integer(nY), x2 = as.double(t(cooY[oY,])), kmax = as.integer(kmaxcalc), nnd = as.double(nnd), nnwhich = as.integer(nnw), huge = as.double(big), PACKAGE = "spatstat") } else { Cout <- .C("knnXxMD", m = as.integer(m), n1 = as.integer(nX), x1 = as.double(t(cooX[oX,])), id1 = as.integer(iX[oX]), n2 = as.integer(nY), x2 = as.double(t(cooY[oY,])), id2 = as.integer(iY[oY]), kmax = as.integer(kmaxcalc), nnd = as.double(nnd), nnwhich = as.integer(nnw), huge = as.double(big), PACKAGE = "spatstat") } dust <- Cout$nnd witch <- Cout$nnwhich if(any(notfound <- (witch <= 0 | witch > nY))) { dust[notfound] <- Inf witch[notfound] <- NA } nnd[oX, ] <- matrix(dust, nrow=nX, ncol=kmaxcalc, byrow=TRUE) nnw[oX, ] <- matrix(oY[witch], nrow=nX, ncol=kmaxcalc, byrow=TRUE) } ## post-processing if(kmax > kmaxcalc) { ## add columns of Inf's/NA's if(want.dist) { infs <- matrix(as.numeric(Inf), nrow=nX, ncol=kmax-kmaxcalc) nnd <- cbind(nnd, infs) } if(want.which) { nas <- matrix(NA_integer_, nrow=nX, ncol=kmax-kmaxcalc) nnw <- cbind(nnw, nas) } } if(length(k) < kmax) { ## select only the specified columns if(want.dist) nnd <- nnd[, k, drop=TRUE] if(want.which) nnw <- nnw[, k, drop=TRUE] } return(packupNNdata(nnd, nnw, what, k)) } packupNNdata <- function(NND, NNW, what, k) { result <- as.data.frame(list(dist=NND, which=NNW)[what]) if(max(k) > 1L) { colnames(result) <- c(if("dist" %in% what) paste0("dist.", k) else NULL, if("which" %in% what) paste0("which.",k) else NULL) } if(ncol(result) == 1L) result <- result[, , drop=TRUE] return(result) } spatstat/R/quadratmtest.R0000644000176200001440000000067113333543255015171 0ustar liggesusers# # method for 'quadrat.test' for class mppm # # $Revision: 1.8 $ $Date: 2015/08/12 07:29:17 $ # quadrat.test.mppm <- function(X, ...) { Xname <- short.deparse(substitute(X)) if(!is.poisson.mppm(X)) stop("Model is not a Poisson point process") subs <- subfits(X) tests <- anylapply(subs, quadrat.test.ppm, ..., fitname=Xname) df.est <- length(coef(X)) return(pool.quadrattest(tests, Xname=Xname, df.est=df.est)) } spatstat/R/nnfun.R0000644000176200001440000000717513610471501013575 0ustar liggesusers# # nnfun.R # # nearest neighbour function (returns a function of x,y) # # $Revision: 1.9 $ $Date: 2020/01/18 02:52:11 $ # nnfun <- function(X, ...) { UseMethod("nnfun") } nnfun.ppp <- function(X, ..., k=1, value=c("index", "mark")) { # this line forces X to be bound stopifnot(is.ppp(X)) if(length(k) != 1) stop("k should be a single integer") value <- match.arg(value) switch(value, index = { gi <- function(x,y=NULL) { Y <- xy.coords(x, y)[c("x", "y")] nncross(Y, X, what="which", k=k) } attr(gi, "Xclass") <- "ppp" g <- funxy(gi, as.rectangle(as.owin(X))) }, mark = { stopifnot(is.marked(X)) marx <- as.data.frame(marks(X))[,1] gm <- function(x,y=NULL) { Y <- xy.coords(x, y)[c("x", "y")] marx[nncross(Y, X, what="which", k=k)] } attr(gm, "Xclass") <- "ppp" g <- funxy(gm, as.rectangle(as.owin(X))) }) class(g) <- c("nnfun", class(g)) return(g) } nnfun.psp <- function(X, ..., value=c("index", "mark")) { # this line forces X to be bound stopifnot(is.psp(X)) value <- match.arg(value) switch(value, index = { gi <- function(x,y=NULL) { Y <- xy.coords(x, y)[c("x", "y")] nncross(Y, X, what="which") } attr(gi, "Xclass") <- "psp" g <- funxy(gi, as.rectangle(as.owin(X))) }, mark = { stopifnot(is.marked(X)) marx <- as.data.frame(marks(X))[,1] gm <- function(x,y=NULL) { Y <- xy.coords(x, y)[c("x", "y")] marx[nncross(Y, X, what="which")] } attr(gm, "Xclass") <- "psp" g <- funxy(gm, as.rectangle(as.owin(X))) }) class(g) <- c("nnfun", class(g)) return(g) } as.owin.nnfun <- function(W, ..., fatal=TRUE) { X <- get("X", envir=environment(W)) as.owin(X, ..., fatal=fatal) } domain.nnfun <- Window.nnfun <- function(X, ...) { as.owin(X) } as.im.nnfun <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL, approx=TRUE) { if(approx && is.null(W)) { env <- environment(X) Xdata <- get("X", envir=env) if(is.ppp(Xdata)) { #' fast approximation is supported only for point patterns k <- mget("k", envir=env, inherits=FALSE, ifnotfound=list(1))[[1L]] value <- mget("value", envir=env, ifnotfound=list(NULL))[[1L]] Z <- nnmap(Xdata, k=k, what="which", eps=eps, dimyx=dimyx, xy=xy) if(identical(value, "mark")) { marx <- get("marx", envir=env) Z <- eval.im(marx[Z]) } if(!is.null(na.replace)) Z$v[is.null(Z$v)] <- na.replace return(Z) } } if(is.null(W)) W <- Window(X) result <- as.im.function(X, W=W, eps=eps, dimyx=dimyx, xy=xy, na.replace=na.replace, ...) return(result) } print.nnfun <- function(x, ...) { env <- environment(x) X <- get("X", envir=env) k <- mget("k", envir=env, inherits=FALSE, ifnotfound=list(1))[[1L]] v <- mget("value", envir=env, ifnotfound=list(NULL))[[1L]] xtype <- attr(x, "Xclass") typestring <- switch(xtype, ppp="point pattern", psp="line segment pattern", paste("object of class", sQuote(xtype))) Kth <- if(k == 1) "Nearest" else paste0(ordinal(k), "-Nearest") cat(paste(Kth, "Neighbour", if(is.null(v)) "Index" else "Mark", "function for ", typestring, "\n")) print(X) return(invisible(NULL)) } spatstat/R/rotmean.R0000644000176200001440000000316013406354173014114 0ustar liggesusers## ## rotmean.R ## ## rotational average of pixel values ## ## $Revision: 1.11 $ $Date: 2018/12/19 05:29:34 $ rotmean <- function(X, ..., origin, padzero=TRUE, Xname, result=c("fv", "im")) { if(missing(Xname)) Xname <- sensiblevarname(short.deparse(substitute(X)), "X") trap.extra.arguments(..., .Context="rotmean") stopifnot(is.im(X)) if(!missing(origin)) { X <- shift(X, origin=origin) backshift <- -getlastshift(X) } else { backshift <- NULL } result <- match.arg(result) rmax <- with(vertices(Frame(X)), sqrt(max(x^2+y^2))) Xunpad <- X if(padzero) X <- padimage(na.handle.im(X, 0), 0, W=square(c(-1,1)*rmax)) Xdata <- as.data.frame(X) values <- Xdata$value radii <- with(Xdata, sqrt(x^2+y^2)) ra <- pmin(range(radii), rmax) ## eps <- sqrt(X$xstep^2 + X$ystep^2) a <- unnormdensity(radii, from=ra[1], to=ra[2]) b <- unnormdensity(radii, weights=values, from=ra[1], to=ra[2], bw=a$bw) df <- data.frame(r=a$x, f=b$y/a$y) FUN <- fv(df, argu="r", ylab=substitute(bar(X)(r), list(X=as.name(Xname))), valu="f", fmla=(. ~ r), alim=ra, labl=c("r", "%s(r)"), desc=c("distance argument r", "rotational average"), unitname=unitname(X), fname=paste0("bar", paren(Xname))) attr(FUN, "dotnames") <- "f" if(result == "fv") return(FUN) ## compute image FUN <- as.function(FUN) XX <- as.im(Xunpad, na.replace=1) IM <- as.im(function(x,y,FUN){ FUN(sqrt(x^2+y^2)) }, XX, FUN=FUN) if(!is.null(backshift)) IM <- shift(IM,backshift) return(IM) } spatstat/R/nnfromvertex.R0000644000176200001440000000776013333543255015216 0ustar liggesusers#' nnfromvertex.R #' #' Nearest data point to each vertex of a network #' #' $Revision: 1.2 $ $Date: 2017/09/23 04:56:45 $ #' nnfromvertex <- function(X, what=c("dist", "which"), k=1) { stopifnot(is.lpp(X)) what <- match.arg(what, several.ok=TRUE) nX <- npoints(X) nv <- nvertices(domain(X)) #' k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) #' Initialise results nnd <- matrix(Inf, nrow=nv, ncol=kmax) nnw <- matrix(NA_integer_, nrow=nv, ncol=kmax) colnames(nnd) <- colnames(nnw) <- 1:kmax #' Trivial cases if(nX > 0) { #' Unique points, remembering original sequence ii <- which(!duplicated(X)) uX <- X[ii] #' local coordinates coUX <- coords(uX)[, c("seg", "tp")] #' add label from original sequence index coUX$lab <- ii #' reorder oo <- with(coUX, order(seg, tp)) coUXord <- coUX[oo, , drop=FALSE] seg <- coUXord$seg tp <- coUXord$tp #' network data L <- domain(X) nv <- nvertices(L) ns <- nsegments(L) seglen <- lengths.psp(as.psp(L)) from <- L$from to <- L$to #' upper bound on interpoint distance huge <- sum(seglen) #' numerical tolerance for nnwhich tol <- max(sqrt(.Machine$double.eps), diameter(Frame(L))/2^20) #' .............................................. #' number of neighbours that are well-defined kmaxcalc <- min(nX, kmax) #' calculate k-nn distances and identifiers for 1 <= k <= kmaxcalc z <- vnnFind(seg, tp, ns, nv, from, to, seglen, huge, tol, kmax=kmaxcalc) vnndist <- z$vnndist vnnwhich <- z$vnnwhich #' map identifiers back to original data pattern vnnwhich <- coUXord$lab[vnnwhich] #' insert results in correct places nnd[, 1:kmaxcalc] <- vnndist nnw[, 1:kmaxcalc] <- vnnwhich } #' extract required values nnd <- nnd[,k, drop=TRUE] nnw <- nnw[,k, drop=TRUE] if(identical(what, "dist")) return(nnd) if(identical(what, "which")) return(nnw) return(cbind(data.frame(dist=nnd), data.frame(which=nnw))) } vnnFind <- function(seg, tp, ns, nv, from, to, seglen, huge, tol, kmax=1) { #' Find data point nearest to each vertex of network #' Assumed 'seg' is sorted in increasing order #' 'tp' is increasing within 'seg' nX <- length(seg) from0 <- from - 1L to0 <- to - 1L seg0 <- seg - 1L #' if(kmax == 1) { z <- .C("Clinvwhichdist", np = as.integer(nX), sp = as.integer(seg0), tp = as.double(tp), nv = as.integer(nv), ns = as.integer(ns), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglen), huge = as.double(huge), tol = as.double(tol), dist = as.double(numeric(nv)), which = as.integer(integer(nv)), PACKAGE = "spatstat") } else { z <- .C("linvknndist", kmax = as.integer(kmax), nq = as.integer(nX), sq = as.integer(seg0), tq = as.double(tp), nv = as.integer(nv), ns = as.integer(ns), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglen), huge = as.double(huge), tol = as.double(tol), dist = as.double(numeric(kmax * nv)), which = as.integer(integer(kmax * nv)), PACKAGE = "spatstat") } vnndist <- z$dist vnnwhich <- z$which + 1L vnnwhich[vnnwhich == 0] <- NA # possible if network is disconnected if(kmax > 1) { vnndist <- matrix(vnndist, ncol=kmax, byrow=TRUE) vnnwhich <- matrix(vnnwhich, ncol=kmax, byrow=TRUE) } return(list(vnndist=vnndist, vnnwhich=vnnwhich)) } spatstat/R/rlabel.R0000644000176200001440000000230013460231502013671 0ustar liggesusers# # rlabel.R # # random (re)labelling # # $Revision: 1.12 $ $Date: 2019/04/25 02:59:12 $ # # rlabel <- function(X, labels=marks(X), permute=TRUE, nsim=1, drop=TRUE) { stopifnot(is.ppp(X) || is.lpp(X) || is.pp3(X) || is.ppx(X) || is.psp(X)) if(is.null(labels)) stop("labels not given and marks not present") nthings <- nobjects(X) things <- if(is.psp(X)) "segments" else "points" if(is.vector(labels) || is.factor(labels)) { nlabels <- length(labels) if(permute && (nlabels != nthings)) stop(paste("length of labels vector does not match number of", things)) Y <- replicate(nsim, X %mark% sample(labels, nthings, replace=!permute), simplify=FALSE) } else if(is.data.frame(labels) || is.hyperframe(labels)) { nlabels <- nrow(labels) if(permute && (nlabels != nthings)) stop(paste("number of rows of data frame does not match number of", things)) Y <- replicate(nsim, X %mark% labels[sample(1:nlabels, nthings, replace=!permute), ,drop=FALSE], simplify=FALSE) } else stop("Format of labels argument is not understood") return(simulationresult(Y, nsim, drop)) } spatstat/R/inforder.family.R0000644000176200001440000000653013333543255015543 0ustar liggesusers# # # inforder.family.R # # $Revision: 1.2 $ $Date: 2010/07/10 10:22:09 $ # # Family of `infinite-order' point process models # # inforder.family: object of class 'isf' # # # ------------------------------------------------------------------- # inforder.family <- list( name = "inforder", print = function(self) { cat("Family of infinite-order interactions\n") }, plot = NULL, # ---------------------------------------------------- eval = function(X,U,EqualPairs,pot,pars,correction, ...) { # # This is the eval function for the `inforder' family. # # This internal function is not meant to be called by the user. # It is called by mpl.prepare() during execution of ppm(). # # The eval functions perform all the manipulations that are common to # a given class of interactions. # # For the `inforder' family of interactions with infinite order, # there are no structures common to all interactions. # So this function simply invokes the potential 'pot' directly # and expects 'pot' to return the values of the sufficient statistic S(u,X). # # ARGUMENTS: # All 'eval' functions have the following arguments # which are called in sequence (without formal names) # by mpl.prepare(): # # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function # potpars auxiliary parameters for pairpot list(......) # correction edge correction type (string) # # VALUE: # All `eval' functions must return a # matrix of values of the total potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # ########################################################################## # POTENTIAL: # In this case the potential function 'pot' should have arguments # pot(X, U, EqualPairs, pars, correction, ...) # # It must return a vector with length equal to the number of points in U, # or a matrix with as many rows as there are points in U. if(!is.ppp(U)) U <- ppp(U$x, U$y, window=X$window) POT <- pot(X, U, EqualPairs, pars, correction, ...) if(is.matrix(POT)) { if(nrow(POT) != U$n) stop("Internal error: the potential returned a matrix with the wrong number of rows") } else if(is.array(POT) && length(dim(POT)) > 2) stop("Internal error: the potential returned an array with more than 2 dimensions") else if(is.vector(POT)) { if(length(POT) != U$n) stop("Internal error: the potential returned a vector with the wrong length") POT <- matrix(POT, ncol=1) } else stop("Internal error: the return value from the potential is not understood") return(POT) }, ######### end of function $eval suffstat = NULL ######### end of function $suffstat ) ######### end of list class(inforder.family) <- "isf" spatstat/R/quadratresample.R0000644000176200001440000000223213333543255015640 0ustar liggesusers# # quadratresample.R # # resample a point pattern by resampling quadrats # # $Revision: 1.7 $ $Date: 2015/10/21 09:06:57 $ # quadratresample <- function(X, nx, ny=nx, ..., replace=FALSE, nsamples=1, verbose=(nsamples > 1)) { stopifnot(is.ppp(X)) if(X$window$type != "rectangle") stop("Resampling is only implemented for rectangular windows") # create tessellation A <- quadrats(X, nx=nx, ny=ny) # split data over tessellation B <- split(X, A) nq <- length(B) # determine bottom left corner of each tile V <- lapply(B, framebottomleft) out <- list() if(verbose) { cat("Generating resampled patterns...") pstate <- list() } for(i in 1:nsamples) { # resample tiles ind <- sample(1:nq, nq, replace=replace) Xresampled <- X Bresampled <- B for(j in 1:nq) { k <- ind[j] Bresampled[[j]] <- shift(B[[k]], unlist(V[[j]]) - unlist(V[[k]])) } split(Xresampled, A) <- Bresampled out[[i]] <- Xresampled if(verbose) pstate <- progressreport(i, nsamples, state=pstate) } if(nsamples == 1) return(out[[1]]) return(as.solist(out)) } spatstat/R/versions.R0000644000176200001440000000551713570111653014323 0ustar liggesusers# # versions.R # # version numbers # # $Revision: 1.13 $ $Date: 2019/11/29 03:38:28 $ # ##################### # Extract version string from ppm object versionstring.ppm <- function(object) { verifyclass(object, "ppm") v <- object$version if(is.null(v) || !is.list(v)) v <- list(major=1, minor=3, release=4) vs <- paste(v$major, ".", v$minor, "-", v$release, sep="") return(vs) } # Extract version string from interact object versionstring.interact <- function(object) { verifyclass(object, "interact") v <- object$version return(v) # NULL before 1.11-0 } # Get version number of current spatstat installation # This is now saved in the spatstat cache environment # rather than read from file every time versionstring.spatstat <- function() { if(!existsSpatstatVariable("SpatstatVersion")) store.versionstring.spatstat() getSpatstatVariable("SpatstatVersion") } store.versionstring.spatstat <- function() { vs <- read.dcf(file=system.file("DESCRIPTION", package="spatstat"), fields="Version") vs <- as.character(vs) putSpatstatVariable("SpatstatVersion", vs) } # Extract major and minor versions only. majorminorversion <- function(v) { vp <- package_version(v) return(package_version(paste(vp$major, vp$minor, sep="."))) } versioncurrency.spatstat <- function(today=Sys.Date(), checkR=TRUE) { ## check version currency using dates msg <- NULL if(checkR && exists("getRversion") && getRversion() >= "3.2.2") { ## check version of R rv <- R.Version() rdate <- with(rv, ISOdate(year, month, day)) if(today - as.Date(rdate) > 365) { ## R version is really old; just warn about this msg <- paste(rv$version.string, "is more than a year old;", "we strongly recommend upgrading to the latest version") } } if(is.null(msg)) { ## check version of spatstat descfile <- system.file("DESCRIPTION", package="spatstat") packdate <- as.Date(read.dcf(file=descfile, fields="Date")) elapsed <- today - packdate if(elapsed > 75) { if(elapsed > 365) { n <- floor(elapsed/365) unit <- "year" sowhat <- "we strongly recommend upgrading to the latest version." } else if(elapsed > 100) { n <- floor(elapsed/30) unit <- "month" sowhat <- "we recommend upgrading to the latest version." } else { n <- floor(elapsed/7) unit <- "week" sowhat <- "a newer version should be available." } expired <- if(n == 1) paste("a", unit) else paste(n, paste0(unit, "s")) ver <- versionstring.spatstat() msg <- paste("spatstat version", ver, "is out of date by more than", paste0(expired, ";"), sowhat) } } return(msg) } # legacy function RandomFieldsSafe <- function() { TRUE } spatstat/R/Kest.R0000644000176200001440000010341713620223030013344 0ustar liggesusers# # Kest.R Estimation of K function # # $Revision: 5.129 $ $Date: 2020/02/10 09:44:52 $ # # # -------- functions ---------------------------------------- # Kest() compute estimate of K # using various edge corrections # # # -------- standard arguments ------------------------------ # X point pattern (of class 'ppp') # # r distance values at which to compute K # # -------- standard output ------------------------------ # A data frame (class "fv") with columns named # # r: same as input # # trans: K function estimated by translation correction # # iso: K function estimated by Ripley isotropic correction # # theo: K function for Poisson ( = pi * r ^2 ) # # border: K function estimated by border method # using standard formula (denominator = count of points) # # bord.modif: K function estimated by border method # using modified formula # (denominator = area of eroded window # # ------------------------------------------------------------------------ "Lest" <- function(X, ..., correction) { if(missing(correction)) correction <- NULL K <- Kest(X, ..., correction=correction) L <- eval.fv(sqrt(K/pi), dotonly=FALSE) # handle variance estimates if(any(varcols <- colnames(K) %in% c("rip", "ls"))) { r <- with(L, .x) L[,varcols] <- as.data.frame(K)[,varcols]/(2 * pi * r)^2 # fix 0/0 n <- npoints(X) A <- area(Window(X)) if(any(colnames(K) == "rip")) L[r == 0, "rip"] <- (2 * A/(n-1)^2)/(4 * pi) if(any(colnames(K) == "ls")) L[r == 0, "ls"] <- (2 * A/(n * (n-1)))/(4 * pi) } # relabel the fv object L <- rebadge.fv(L, quote(L(r)), "L", names(K), new.labl=attr(K, "labl")) # return(L) } "Kest"<- function(X, ..., r=NULL, rmax=NULL, breaks=NULL, correction=c("border", "isotropic", "Ripley", "translate"), nlarge=3000, domain=NULL, var.approx=FALSE, ratio=FALSE) { verifyclass(X, "ppp") nlarge.given <- !missing(nlarge) && !is.null(nlarge) rfixed <- !is.null(r) || !is.null(breaks) npts <- npoints(X) W <- X$window areaW <- area(W) lambda <- npts/areaW lambda2 <- (npts * (npts - 1))/(areaW^2) if(!is.null(domain)) { # estimate based on contributions from a subdomain domain <- as.owin(domain) if(!is.subset.owin(domain, W)) stop(paste(dQuote("domain"), "is not a subset of the window of X")) # trick Kdot() into doing it indom <- factor(inside.owin(X$x, X$y, domain), levels=c(FALSE,TRUE)) Kd <- Kdot(X %mark% indom, i="TRUE", r=r, breaks=breaks, correction=correction, ratio=ratio) # relabel and exit Kd <- rebadge.fv(Kd, quote(K(r)), "K") return(Kd) } rmaxdefault <- rmax %orifnull% rmax.rule("K", W, lambda) if(is.infinite(rmaxdefault)) rmaxdefault <- diameter(W) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", rigid="rigid", good="good", best="best"), multi=TRUE) # best.wanted <- ("best" %in% correction) # replace 'good' by the optimal choice for this size of dataset if("good" %in% correction) correction[correction == "good"] <- good.correction.K(X) # retain only corrections that are implemented for the window correction <- implemented.for.K(correction, W$type, correction.given) # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) ########################################### # Efficient code for border correction and no correction # Usable only if r values are evenly spaced from 0 to rmax # Invoked automatically if number of points is large can.do.fast <- breaks$even large.n <- (npts >= nlarge) # demand.best <- correction.given && best.wanted large.n.trigger <- large.n && !correction.given fastcorrections <- c("border", "bord.modif", "none") fastdefault <- "border" correction.fast <- all(correction %in% fastcorrections) will.do.fast <- can.do.fast && (correction.fast || large.n.trigger) asked <- correction.fast || (nlarge.given && large.n.trigger) if(asked && !can.do.fast) warning("r values not evenly spaced - cannot use efficient code") if(will.do.fast) { # determine correction(s) ok <- correction %in% fastcorrections correction <- if(any(ok)) correction[ok] else fastdefault bord <- any(correction %in% c("border", "bord.modif")) none <- any(correction =="none") if(!all(ok)) { # some corrections were overridden; notify user corx <- c(if(bord) "border correction estimate" else NULL, if(none) "uncorrected estimate" else NULL) corx <- paste(corx, collapse=" and ") message(paste("number of data points exceeds", nlarge, "- computing", corx , "only")) } # restrict r values to recommended range, unless specifically requested if(!rfixed) r <- seq(from=0, to=alim[2], length.out=length(r)) if(bord) Kb <- Kborder.engine(X, max(r), length(r), correction, ratio=ratio) if(none) Kn <- Knone.engine(X, max(r), length(r), ratio=ratio) if(bord && none) { Kn <- Kn[ , names(Kn) != "theo"] yn <- fvnames(Kb, ".y") Kbn <- if(!ratio) bind.fv(Kb, Kn, preferred=yn) else bind.ratfv(Kb, Kn, preferred=yn) return(Kbn) } if(bord) return(Kb) if(none) return(Kn) } do.fast.rectangle <- can.do.fast && is.rectangle(W) && spatstat.options("use.Krect") && !any(correction == "rigid") if(do.fast.rectangle) { ########################################### ## Fast code for rectangular window ########################################### K <- Krect.engine(X, rmax, length(r), correction, ratio=ratio) attr(K, "alim") <- alim } else { ########################################### ## Slower code ########################################### ## this will be the output data frame Kdf <- data.frame(r=r, theo = pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") denom <- lambda2 * areaW K <- ratfv(Kdf, NULL, denom, "r", quote(K(r)), "theo", NULL, alim, c("r","%s[pois](r)"), desc, fname="K", ratio=ratio) ## identify all close pairs rmax <- max(r) what <- if(any(correction %in% c("translate", "isotropic"))) "all" else "ijd" close <- closepairs(X, rmax, what=what) DIJ <- close$d ## precompute set covariance of window gW <- NULL if(any(correction %in% c("translate", "rigid", "isotropic"))) gW <- setcov(W) if(any(correction == "none")) { ## uncorrected! For demonstration purposes only! wh <- whist(DIJ, breaks$val) # no weights numKun <- cumsum(wh) denKun <- lambda2 * areaW ## uncorrected estimate of K K <- bind.ratfv(K, data.frame(un=numKun), denKun, "hat(%s)[un](r)", "uncorrected estimate of %s", "un", ratio=ratio) } if(any(correction == "border" | correction == "bord.modif")) { ## border method ## Compute distances to boundary b <- bdist.points(X) I <- close$i bI <- b[I] ## apply reduced sample algorithm RS <- Kount(DIJ, bI, b, breaks) if(any(correction == "bord.modif")) { ## modified border correction denom.area <- eroded.areas(W, r) numKbm <- RS$numerator denKbm <- lambda2 * denom.area K <- bind.ratfv(K, data.frame(bord.modif=numKbm), data.frame(bord.modif=denKbm), "hat(%s)[bordm](r)", "modified border-corrected estimate of %s", "bord.modif", ratio=ratio) } if(any(correction == "border")) { numKb <- RS$numerator denKb <- lambda * RS$denom.count K <- bind.ratfv(K, data.frame(border=numKb), data.frame(border=denKb), "hat(%s)[bord](r)", "border-corrected estimate of %s", "border", ratio=ratio) } } if(any(correction == "translate")) { ## Ohser-Stoyan translation correction edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=W, paired=TRUE, gW = gW, give.rmax=TRUE) wh <- whist(DIJ, breaks$val, edgewt) numKtrans <- cumsum(wh) denKtrans <- lambda2 * areaW h <- attr(edgewt, "rmax") numKtrans[r >= h] <- NA K <- bind.ratfv(K, data.frame(trans=numKtrans), denKtrans, "hat(%s)[trans](r)", "translation-corrected estimate of %s", "trans", ratio=ratio) } if(any(correction == "rigid")) { ## Ohser-Stoyan rigid motion correction CW <- rotmean(gW) edgewt <- areaW/as.function(CW)(DIJ) wh <- whist(DIJ, breaks$val, edgewt) numKrigid <- cumsum(wh) denKrigid <- lambda2 * areaW h <- rmax.Rigid(X, gW) #sic: X not W numKrigid[r >= h] <- NA K <- bind.ratfv(K, data.frame(rigid=numKrigid), denKrigid, "hat(%s)[rigid](r)", "rigid motion-corrected estimate of %s", "rigid", ratio=ratio) } if(any(correction == "isotropic")) { ## Ripley isotropic correction XI <- ppp(close$xi, close$yi, window=W, check=FALSE) edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) wh <- whist(DIJ, breaks$val, edgewt) numKiso <- cumsum(wh) denKiso <- lambda2 * areaW h <- boundingradius(W) numKiso[r >= h] <- NA K <- bind.ratfv(K, data.frame(iso=numKiso), denKiso, "hat(%s)[iso](r)", "Ripley isotropic correction estimate of %s", "iso", ratio=ratio) } } ############################# ## VARIANCE APPROXIMATION ############################# if(var.approx) { ## Compute variance approximations A <- areaW P <- perimeter(W) n <- npts ## Ripley asymptotic approximation rip <- 2 * ((A/(n-1))^2) * (pi * r^2/A + 0.96 * P * r^3/A^2 + 0.13 * (n/A) * P * r^5/A^2) if(!ratio) { K <- bind.fv(K, data.frame(rip=rip), "vR(r)", "Ripley approximation to var(%s) under CSR", "iso") } else { den <- (n-1)^2 ripnum <- den * rip ripden <- rep.int(den, length(rip)) K <- bind.ratfv(K, data.frame(rip=ripnum), data.frame(rip=ripden), "vR(r)", "Ripley approximation to var(%s) under CSR", "iso") } if(W$type == "rectangle") { # Lotwick-Silverman a1r <- (0.21 * P * r^3 + 1.3 * r^4)/A^2 a2r <- (0.24 * P * r^5 + 2.62 * r^6)/A^3 # contains correction to typo on p52 of Diggle 2003 # cf Lotwick & Silverman 1982 eq (5) br <- (pi * r^2/A) * (1 - pi * r^2/A) + (1.0716 * P * r^3 + 2.2375 * r^4)/A^2 ls <- (A^2) * (2 * br - a1r + (n-2) * a2r)/(n*(n-1)) # add column if(!ratio) { K <- bind.fv(K, data.frame(ls=ls), "vLS(r)", "Lotwick-Silverman approx to var(%s) under CSR", "iso") } else { den <- n*(n-1) lsnum <- ls * den lsden <- rep.int(den, length(ls)) K <- bind.ratfv(K, data.frame(ls=lsnum), data.frame(ls=lsden), "vLS(r)", "Lotwick-Silverman approx to var(%s) under CSR", "iso") } } } ### FINISH OFF ##### ## default plot will display all edge corrections formula(K) <- . ~ r nama <- rev(colnames(K)) fvnames(K, ".") <- setdiff(nama, c("r", "rip", "ls")) ## unitname(K) <- unitname(X) # copy to other components if(ratio) K <- conform.ratfv(K) return(K) } ################################################################ ############# SUPPORTING ALGORITHMS ########################### ################################################################ Kount <- function(dIJ, bI, b, breaks) { # # "internal" routine to compute border-correction estimate of K or Kij # # dIJ: vector containing pairwise distances for selected I,J pairs # bI: corresponding vector of boundary distances for I # b: vector of ALL distances to window boundary # # breaks : breakpts object # stopifnot(length(dIJ) == length(bI)) # determine which distances d_{ij} were observed without censoring uncen <- (dIJ <= bI) # histogram of noncensored distances nco <- whist(dIJ[uncen], breaks$val) # histogram of censoring times for noncensored distances ncc <- whist(bI[uncen], breaks$val) # histogram of censoring times (yes, this is a different total size) cen <- whist(b, breaks$val) # count censoring times beyond rightmost breakpoint uppercen <- sum(b > max(breaks$val)) # go RS <- reduced.sample(nco, cen, ncc, show=TRUE, uppercen=uppercen) # extract results numerator <- RS$numerator denom.count <- RS$denominator # check if(length(numerator) != breaks$ncells) stop("internal error: length(numerator) != breaks$ncells") if(length(denom.count) != breaks$ncells) stop("internal error: length(denom.count) != breaks$ncells") return(list(numerator=numerator, denom.count=denom.count)) } #### interface to C code for border method Kborder.engine <- function(X, rmax, nr=100, correction=c("border", "bord.modif"), weights=NULL, ratio=FALSE) { verifyclass(X, "ppp") npts <- npoints(X) W <- as.owin(X) areaW <- area(W) lambda <- npts/areaW lambda2 <- (npts * (npts - 1))/(areaW^2) if(missing(rmax)) rmax <- diameter(W)/4 r <- seq(from=0, to=rmax, length.out=nr) # this will be the output data frame Kdf <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") Kfv <- fv(Kdf, "r", quote(K(r)), "theo", , c(0,rmax), c("r","%s[pois](r)"), desc, fname="K") if(ratio) { # save numerator and denominator denom <- lambda2 * areaW numK <- eval.fv(denom * Kfv) denK <- eval.fv(denom + Kfv * 0) attributes(numK) <- attributes(denK) <- attributes(Kfv) numK <- rebadge.fv(numK, tags="theo", new.desc="numerator for theoretical Poisson %s") denK <- rebadge.fv(denK, tags="theo", new.desc="denominator for theoretical Poisson %s") } ####### start computing ############ # sort in ascending order of x coordinate orderX <- fave.order(X$x) Xsort <- X[orderX] x <- Xsort$x y <- Xsort$y # boundary distances b <- bdist.points(Xsort) # call the C code if(is.null(weights)) { # determine whether the numerator can be stored as an integer bigint <- .Machine$integer.max if(npts < sqrt(bigint)) { # yes - use faster integer arithmetic res <- .C("KborderI", nxy=as.integer(npts), x=as.double(x), y=as.double(y), b=as.double(b), nr=as.integer(nr), rmax=as.double(rmax), numer=as.integer(integer(nr)), denom=as.integer(integer(nr)), PACKAGE = "spatstat") } else { # no - need double precision storage res <- .C("KborderD", nxy=as.integer(npts), x=as.double(x), y=as.double(y), b=as.double(b), nr=as.integer(nr), rmax=as.double(rmax), numer=as.double(numeric(nr)), denom=as.double(numeric(nr)), PACKAGE = "spatstat") } if("bord.modif" %in% correction) { denom.area <- eroded.areas(W, r) numKbm <- res$numer denKbm <- lambda2 * denom.area bm <- numKbm/denKbm Kfv <- bind.fv(Kfv, data.frame(bord.modif=bm), "hat(%s)[bordm](r)", "modified border-corrected estimate of %s", "bord.modif") if(ratio) { # save numerator and denominator numK <- bind.fv(numK, data.frame(bord.modif=numKbm), "hat(%s)[bordm](r)", "numerator of modified border-corrected estimate of %s", "bord.modif") denK <- bind.fv(denK, data.frame(bord.modif=denKbm), "hat(%s)[bordm](r)", "denominator of modified border-corrected estimate of %s", "bord.modif") } } if("border" %in% correction) { numKb <- res$numer denKb <- lambda * res$denom bord <- numKb/denKb Kfv <- bind.fv(Kfv, data.frame(border=bord), "hat(%s)[bord](r)", "border-corrected estimate of %s", "border") if(ratio) { numK <- bind.fv(numK, data.frame(border=numKb), "hat(%s)[bord](r)", "numerator of border-corrected estimate of %s", "border") denK <- bind.fv(denK, data.frame(border=denKb), "hat(%s)[bord](r)", "denominator of border-corrected estimate of %s", "border") } } } else { # weighted version if(is.numeric(weights)) { if(length(weights) != X$n) stop("length of weights argument does not match number of points in X") } else { wim <- as.im(weights, W) weights <- wim[X, drop=FALSE] if(anyNA(weights)) stop("domain of weights image does not contain all points of X") } weights.Xsort <- weights[orderX] res <- .C("Kwborder", nxy=as.integer(npts), x=as.double(x), y=as.double(y), w=as.double(weights.Xsort), b=as.double(b), nr=as.integer(nr), rmax=as.double(rmax), numer=as.double(numeric(nr)), denom=as.double(numeric(nr)), PACKAGE = "spatstat") if("border" %in% correction) { bord <- res$numer/res$denom Kfv <- bind.fv(Kfv, data.frame(border=bord), "hat(%s)[bord](r)", "border-corrected estimate of %s", "border") if(ratio) { numK <- bind.fv(numK, data.frame(border=res$numer), "hat(%s)[bord](r)", "numerator of border-corrected estimate of %s", "border") denK <- bind.fv(denK, data.frame(border=res$denom), "hat(%s)[bord](r)", "denominator of border-corrected estimate of %s", "border") } } if("bord.modif" %in% correction) { numKbm <- res$numer denKbm <- eroded.areas(W, r) bm <- numKbm/denKbm Kfv <- bind.fv(Kfv, data.frame(bord.modif=bm), "hat(%s)[bordm](r)", "modified border-corrected estimate of %s", "bord.modif") if(ratio) { # save numerator and denominator numK <- bind.fv(numK, data.frame(bord.modif=numKbm), "hat(%s)[bordm](r)", "numerator of modified border-corrected estimate of %s", "bord.modif") denK <- bind.fv(denK, data.frame(bord.modif=denKbm), "hat(%s)[bordm](r)", "denominator of modified border-corrected estimate of %s", "bord.modif") } } } ## # default is to display them all formula(Kfv) <- . ~ r unitname(Kfv) <- unitname(X) if(ratio) { # finish off numerator and denominator formula(numK) <- formula(denK) <- . ~ r unitname(denK) <- unitname(numK) <- unitname(X) # tack on to result Kfv <- rat(Kfv, numK, denK, check=FALSE) } return(Kfv) } Knone.engine <- function(X, rmax, nr=100, weights=NULL, ratio=FALSE) { verifyclass(X, "ppp") npts <- npoints(X) W <- as.owin(X) areaW <- area(W) # lambda <- npts/areaW lambda2 <- (npts * (npts - 1))/(areaW^2) denom <- lambda2 * areaW if(missing(rmax)) rmax <- diameter(W)/4 r <- seq(from=0, to=rmax, length.out=nr) # this will be the output data frame Kdf <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") Kfv <- fv(Kdf, "r", quote(K(r)), "theo", , c(0,rmax), c("r","%s[pois](r)"), desc, fname="K") if(ratio) { # save numerator and denominator numK <- eval.fv(denom * Kfv) denK <- eval.fv(denom + Kfv * 0) attributes(numK) <- attributes(denK) <- attributes(Kfv) numK <- rebadge.fv(numK, tags="theo", new.desc="numerator for theoretical Poisson %s") denK <- rebadge.fv(denK, tags="theo", new.desc="denominator for theoretical Poisson %s") } ####### start computing ############ # sort in ascending order of x coordinate orderX <- fave.order(X$x) Xsort <- X[orderX] x <- Xsort$x y <- Xsort$y # call the C code if(is.null(weights)) { # determine whether the numerator can be stored as an integer bigint <- .Machine$integer.max if(npts < sqrt(bigint)) { # yes - use faster integer arithmetic res <- .C("KnoneI", nxy=as.integer(npts), x=as.double(x), y=as.double(y), nr=as.integer(nr), rmax=as.double(rmax), numer=as.integer(integer(nr)), PACKAGE = "spatstat") } else { # no - need double precision storage res <- .C("KnoneD", nxy=as.integer(npts), x=as.double(x), y=as.double(y), nr=as.integer(nr), rmax=as.double(rmax), numer=as.double(numeric(nr)), PACKAGE = "spatstat") } numKun <- res$numer denKun <- denom # = lambda2 * areaW Kun <- numKun/denKun } else { # weighted version if(is.numeric(weights)) { if(length(weights) != X$n) stop("length of weights argument does not match number of points in X") } else { wim <- as.im(weights, W) weights <- wim[X, drop=FALSE] if(anyNA(weights)) stop("domain of weights image does not contain all points of X") } weights.Xsort <- weights[orderX] res <- .C("Kwnone", nxy=as.integer(npts), x=as.double(x), y=as.double(y), w=as.double(weights.Xsort), nr=as.integer(nr), rmax=as.double(rmax), numer=as.double(numeric(nr)), PACKAGE = "spatstat") numKun <- res$numer denKun <- sum(weights) Kun <- numKun/denKun } # tack on to fv object Kfv <- bind.fv(Kfv, data.frame(un=Kun), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") if(ratio) { numK <- bind.fv(numK, data.frame(un=numKun), "hat(%s)[un](r)", "numerator of uncorrected estimate of %s", "un") denK <- bind.fv(denK, data.frame(un=denKun), "hat(%s)[un](r)", "denominator of uncorrected estimate of %s", "un") } ## # default is to display them all formula(Kfv) <- . ~ r unitname(Kfv) <- unitname(X) if(ratio) { # finish off numerator and denominator formula(numK) <- formula(denK) <- . ~ r unitname(denK) <- unitname(numK) <- unitname(X) # tack on to result Kfv <- rat(Kfv, numK, denK, check=FALSE) } return(Kfv) } rmax.rule <- function(fun="K", W, lambda) { if(gotW <- !missing(W)) verifyclass(W, "owin") if(gotL <- !missing(lambda)) lambda <- as.numeric(lambda) # can be vector gotall <- gotW && gotL switch(fun, K = { ## Ripley's Rule ripley <- if(gotW) shortside(Frame(W))/4 else Inf ## Count at most 1000 neighbours per point rlarge <- if(gotL) sqrt(1000 /(pi * lambda)) else Inf rmax <- min(rlarge, ripley) }, Kscaled = { ## rule of thumb for Kscaled rdiam <- if(gotall) diameter(Frame(W))/2 * sqrt(lambda) else Inf rmax <- min(10, rdiam) }, F = , G = , J = { # rule of thumb rdiam <- if(gotW) diameter(Frame(W))/2 else Inf # Poisson process has F(rlarge) = 1 - 10^(-5) rlarge <- if(gotL) sqrt(log(1e5)/(pi * lambda)) else Inf rmax <- min(rlarge, rdiam) }, stop(paste("Unrecognised function type", sQuote(fun))) ) return(rmax) } implemented.for.K <- function(correction, windowtype, explicit) { pixels <- (windowtype == "mask") if(any(correction == "best")) { # select best available correction correction[correction == "best"] <- if(!pixels) "isotropic" else "translate" } else { # available selection of edge corrections depends on window if(pixels) { iso <- (correction == "isotropic") if(any(iso)) { whinge <- "Isotropic correction not implemented for binary masks" if(explicit) { if(all(iso)) stop(whinge, call.=FALSE) else warning(whinge, call.=FALSE) } correction <- correction[!iso] } } } return(correction) } good.correction.K <- function(X) { nX <- npoints(X) W <- as.owin(X) avail <- c("none", if(nX < 1e5) "border" else NULL, if(nX < 3000)"translate" else NULL, if(nX < 1000 && !is.mask(W)) "isotropic" else NULL) chosen <- rev(avail)[1] return(chosen) } Krect.engine <- function(X, rmax, nr=100, correction, weights=NULL, ratio=FALSE, fname="K", use.integers=TRUE) { verifyclass(X, "ppp") npts <- npoints(X) W <- as.owin(X) areaW <- area(W) width <- sidelengths(W)[1] height <- sidelengths(W)[2] lambda <- npts/areaW lambda2 <- (npts * (npts - 1))/(areaW^2) if(missing(rmax)) rmax <- diameter(W)/4 r <- seq(from=0, to=rmax, length.out=nr) if(weighted <- !is.null(weights)) { ## coerce weights to a vector if(is.numeric(weights)) { check.nvector(weights, npts) } else { wim <- as.im(weights, W) weights <- wim[X, drop=FALSE] if(anyNA(weights)) stop("domain of weights image does not contain all points of X") } } # this will be the output data frame Kdf <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") denom <- if(weighted) areaW else (lambda2 * areaW) Kfv <- ratfv(Kdf, NULL, denom, "r", quote(K(r)), "theo", NULL, c(0,rmax), c("r", makefvlabel(NULL, NULL, fname, "pois")), desc, fname=fname, ratio=ratio) ####### prepare data ############ if(!all(correction == "translate")) { ## Ensure rectangle has its bottom left corner at the origin if(W$xrange[1] != 0 || W$yrange[1] != 0) { X <- shift(X, origin="bottomleft") W <- as.owin(X) } } ## sort in ascending order of x coordinate orderX <- fave.order(X$x) x <- X$x[orderX] y <- X$y[orderX] if(weighted) wt <- weights[orderX] ## establish algorithm parameters doIso <- "isotropic" %in% correction doTrans <- "translate" %in% correction doBord <- any(c("border", "bord.modif") %in% correction) doUnco <- "none" %in% correction trimedge <- spatstat.options("maxedgewt") ## allocate space for results ziso <- numeric(if(doIso) nr else 1L) ztrans <- numeric(if(doTrans) nr else 1L) ## call the C code if(weighted) { ## weighted version zbnumer <- numeric(if(doBord) nr else 1L) zbdenom <- numeric(if(doBord) nr else 1L) zunco <- numeric(if(doUnco) nr else 1L) res <- .C("KrectWtd", width=as.double(width), height=as.double(height), nxy=as.integer(npts), x=as.double(x), y=as.double(y), w=as.double(wt), nr=as.integer(nr), rmax=as.double(rmax), trimedge=as.double(trimedge), doIso=as.integer(doIso), doTrans=as.integer(doTrans), doBord=as.integer(doBord), doUnco=as.integer(doUnco), iso=as.double(ziso), trans=as.double(ztrans), bnumer=as.double(zbnumer), bdenom=as.double(zbdenom), unco=as.double(zunco), PACKAGE = "spatstat") } else if(use.integers && npts < sqrt(.Machine$integer.max)) { ## unweighted ## numerator of border correction can be stored as an integer ## use faster integer arithmetic zbnumer <- integer(if(doBord) nr else 1L) zbdenom <- integer(if(doBord) nr else 1L) zunco <- integer(if(doUnco) nr else 1L) res <- .C("KrectInt", width=as.double(width), height=as.double(height), nxy=as.integer(npts), x=as.double(x), y=as.double(y), nr=as.integer(nr), rmax=as.double(rmax), trimedge=as.double(trimedge), doIso=as.integer(doIso), doTrans=as.integer(doTrans), doBord=as.integer(doBord), doUnco=as.integer(doUnco), iso=as.double(ziso), trans=as.double(ztrans), bnumer=as.integer(zbnumer), bdenom=as.integer(zbdenom), unco=as.integer(zunco), PACKAGE = "spatstat") } else { ## unweighted ## need double precision storage zbnumer <- numeric(if(doBord) nr else 1L) zbdenom <- numeric(if(doBord) nr else 1L) zunco <- numeric(if(doUnco) nr else 1L) res <- .C("KrectDbl", width=as.double(width), height=as.double(height), nxy=as.integer(npts), x=as.double(x), y=as.double(y), nr=as.integer(nr), rmax=as.double(rmax), trimedge=as.double(trimedge), doIso=as.integer(doIso), doTrans=as.integer(doTrans), doBord=as.integer(doBord), doUnco=as.integer(doUnco), iso=as.double(ziso), trans=as.double(ztrans), bnumer=as.double(zbnumer), bdenom=as.double(zbdenom), unco=as.double(zunco), PACKAGE = "spatstat") } ## Process corrections in reverse order of priority ## Uncorrected estimate if("none" %in% correction) { numKun <- res$unco denKun <- if(weighted) areaW else (lambda2 * areaW) Kfv <- bind.ratfv(Kfv, data.frame(un=numKun), denKun, makefvlabel(NULL, "hat", fname, "un"), "uncorrected estimate of %s", "un", ratio=ratio) } ## Modified border correction if("bord.modif" %in% correction) { denom.area <- eroded.areas(W, r) numKbm <- res$bnumer denKbm <- if(weighted) denom.area else (lambda2 * denom.area) Kfv <- bind.ratfv(Kfv, data.frame(bord.modif=numKbm), denKbm, makefvlabel(NULL, "hat", fname, "bordm"), "modified border-corrected estimate of %s", "bord.modif", ratio=ratio) } ## Border correction if("border" %in% correction) { numKb <- res$bnumer denKb <- if(weighted) res$bdenom else lambda * res$bdenom Kfv <- bind.ratfv(Kfv, data.frame(border=numKb), denKb, makefvlabel(NULL, "hat", fname, "bord"), "border-corrected estimate of %s", "border", ratio=ratio) } ## translation correction if("translate" %in% correction) { numKtrans <- res$trans denKtrans <- if(weighted) areaW else (lambda2 * areaW) h <- diameter(as.rectangle(W))/2 numKtrans[r >= h] <- NA Kfv <- bind.ratfv(Kfv, data.frame(trans=numKtrans), denKtrans, makefvlabel(NULL, "hat", fname, "trans"), "translation-corrected estimate of %s", "trans", ratio=ratio) } ## isotropic correction if("isotropic" %in% correction) { numKiso <- res$iso denKiso <- if(weighted) areaW else (lambda2 * areaW) h <- diameter(as.rectangle(W))/2 numKiso[r >= h] <- NA Kfv <- bind.ratfv(Kfv, data.frame(iso=numKiso), denKiso, makefvlabel(NULL, "hat", fname, "iso"), "isotropic-corrected estimate of %s", "iso", ratio=ratio) } ## # default is to display them all formula(Kfv) <- . ~ r unitname(Kfv) <- unitname(X) if(ratio) Kfv <- conform.ratfv(Kfv) return(Kfv) } spatstat/R/ewcdf.R0000644000176200001440000001056513551474402013545 0ustar liggesusers# # ewcdf.R # # $Revision: 1.19 $ $Date: 2019/10/14 08:40:51 $ # # With contributions from Kevin Ummel # ewcdf <- function(x, weights=NULL, normalise=TRUE, adjust=1) { nx <- length(x) nw <- length(weights) weighted <- (nw > 0) if(weighted) { check.nvector(weights, things="entries of x", oneok=TRUE) stopifnot(all(weights >= 0)) if(nw == 1) weights <- rep(weights, nx) } ## remove NA's nbg <- is.na(x) x <- x[!nbg] if(weighted) weights <- weights[!nbg] n <- length(x) if (n < 1) stop("'x' must have 1 or more non-missing values") ## sort in increasing order of x value if(!weighted) { x <- sort(x) w <- rep(1, n) } else { ox <- fave.order(x) x <- x[ox] w <- weights[ox] } ## find jump locations and match rl <- rle(x) vals <- rl$values if(!weighted) { wmatch <- rl$lengths } else { nv <- length(vals) wmatch <- .C("tabsumweight", nx=as.integer(n), x=as.double(x), w=as.double(w), nv=as.integer(nv), v=as.double(vals), z=as.double(numeric(nv)), PACKAGE="spatstat")$z } ## cumulative weight in each interval cumwt <- cumsum(wmatch) totwt <- sum(wmatch) ## rescale ? if(normalise) { cumwt <- cumwt/totwt totwt <- 1 } else if(adjust != 1) { cumwt <- adjust * cumwt totwt <- adjust * totwt } ## make function rval <- approxfun(vals, cumwt, method = "constant", yleft = 0, yright = totwt, f = 0, ties = "ordered") class(rval) <- c("ewcdf", if(normalise) "ecdf" else NULL, "stepfun", class(rval)) assign("w", w, envir=environment(rval)) attr(rval, "call") <- sys.call() return(rval) } # Hacked from stats:::print.ecdf print.ewcdf <- function (x, digits = getOption("digits") - 2L, ...) { cat("Weighted empirical CDF \nCall: ") print(attr(x, "call"), ...) env <- environment(x) xx <- get("x", envir=env) ww <- get("w", envir=env) n <- length(xx) i1 <- 1L:min(3L, n) i2 <- if (n >= 4L) max(4L, n - 1L):n else integer() numform <- function(x) paste(formatC(x, digits = digits), collapse = ", ") cat(" x[1:", n, "] = ", numform(xx[i1]), if (n > 3L) ", ", if (n > 5L) " ..., ", numform(xx[i2]), "\n", sep = "") cat(" weights[1:", n, "] = ", numform(ww[i1]), if (n > 3L) ", ", if (n > 5L) " ..., ", numform(ww[i2]), "\n", sep = "") invisible(x) } quantile.ewcdf <- function(x, probs=seq(0,1,0.25), names=TRUE, ..., normalise=TRUE, type=1) { trap.extra.arguments(..., .Context="quantile.ewcdf") if(!(type %in% c(1,2))) stop("Only quantiles of type 1 and 2 are implemented", call.=FALSE) env <- environment(x) xx <- get("x", envir=env) n <- length(xx) Fxx <- get("y", envir=env) maxFxx <- max(Fxx) eps <- 100 * .Machine$double.eps if(normalise) { Fxx <- Fxx/maxFxx maxp <- 1 } else { maxp <- maxFxx } if(any((p.ok <- !is.na(probs)) & (probs/maxp < -eps | probs/maxp > 1 + eps))) { allowed <- if(normalise) "[0,1]" else paste("permitted range", prange(c(0, maxp))) stop(paste("'probs' outside", allowed), call.=FALSE) } if (na.p <- any(!p.ok)) { o.pr <- probs probs <- probs[p.ok] probs <- pmax(0, pmin(maxp, probs)) } np <- length(probs) if (n > 0 && np > 0) { qs <- numeric(np) if(type == 1) { ## right-continuous inverse for(k in 1:np) qs[k] <- xx[min(which(Fxx >= probs[k]))] } else { ## average of left and right continuous for(k in 1:np) { pk <- probs[k] ik <- min(which(Fxx >= probs[k])) qs[k] <- if(Fxx[ik] > pk) (xx[ik] + xx[ik-1L])/2 else xx[ik] } } } else { qs <- rep(NA_real_, np) } if (names && np > 0L) { dig <- max(2L, getOption("digits")) if(normalise) { probnames <- if(np < 100) formatC(100 * probs, format="fg", width=1, digits=dig) else format(100 * probs, trim = TRUE, digits = dig) names(qs) <- paste0(probnames, "%") } else { names(qs) <- if(np < 100) formatC(probs, format="fg", width=1, digits=dig) else format(probs, trim=TRUE, digits=dig) } } if (na.p) { o.pr[p.ok] <- qs names(o.pr) <- rep("NA", length(o.pr)) names(o.pr)[p.ok] <- names(qs) o.pr } else qs } spatstat/R/Math.linim.R0000644000176200001440000000332113546513700014444 0ustar liggesusers## ## Math.linim.R ## ## $Revision: 1.7 $ $Date: 2019/10/07 01:27:14 $ ## Ops.linim <- function(e1,e2=NULL){ unary <- nargs() == 1L if(unary){ if(!is.element(.Generic, c("!", "-", "+"))) stop("Unary usage is undefined for this operation for images.") callstring <- paste(.Generic, "e1") } else { callstring <- paste("e1", .Generic, "e2") } expr <- parse(text = callstring) return(do.call(eval.linim, list(expr = expr))) } Math.linim <- function(x, ...){ m <- do.call(.Generic, list(x[,,drop=FALSE], ...)) Z <- im(m, xcol = x$xcol, yrow = x$yrow, xrange = x$xrange, yrange = x$yrange, unitname = unitname(x)) df <- attr(x, "df") df$values <- do.call(.Generic, list(df$values, ...)) L <- attr(x, "L") rslt <- linim(L, Z, df=df, restrict=FALSE) return(rslt) } Summary.linim <- function(..., na.rm, finite){ if(missing(finite)) finite <- FALSE if(missing(na.rm)) na.rm <- FALSE argh <- list(...) values <- lapply(argh, "[") dfvalues <- if(is.element(.Generic, c("sum", "prod"))) list() else lapply(lapply(argh, attr, which="df"), getElement, name="values") vals <- as.numeric(unlist(c(values, dfvalues))) if(finite) { vals <- vals[is.finite(vals)] } else if(na.rm) { vals <- vals[!is.na(vals)] } do.call(.Generic, list(vals)) } Complex.linim <- function(z){ L <- attr(z, "L") df <- attr(z, "df") m <- do.call(.Generic, list(z=z[,,drop=FALSE])) Z <- im(m, xcol = z$xcol, yrow = z$yrow, xrange = z$xrange, yrange = z$yrange, unitname = unitname(z)) df$values <- do.call(.Generic, list(z=df$values)) rslt <- linim(L, Z, df=df, restrict=FALSE) return(rslt) } spatstat/R/flipxy.R0000644000176200001440000000247213333543255013767 0ustar liggesusers# # flipxy.R # # flip x and y coordinates # # $Revision: 1.3 $ $Date: 2017/02/07 07:22:47 $ # flipxy <- function(X) { UseMethod("flipxy") } flipxy.ppp <- function(X) { stopifnot(is.ppp(X)) ppp(X$y, X$x, marks=X$marks, window=flipxy(X$window), unitname=unitname(X), check=FALSE) } flipxypolygon <- function(p) { # flip x and y coordinates, and reinstate anticlockwise order oldy <- p$y p$y <- rev(p$x) p$x <- rev(oldy) # area and hole status unchanged return(p) } flipxy.owin <- function(X) { verifyclass(X, "owin") switch(X$type, rectangle={ W <- owin(X$yrange, X$xrange, unitname=unitname(X)) }, polygonal={ bdry <- lapply(X$bdry, flipxypolygon) W <- owin(poly=bdry, check=FALSE, unitname=unitname(X)) }, mask={ W <- owin(mask=t(X$m), xy=list(x=X$yrow, y=X$xcol), unitname=unitname(X)) }, stop("Unrecognised window type") ) return(W) } flipxy.psp <- function(X) { stopifnot(is.psp(X)) flipends <- (X$ends)[, c(2L,1L,4L,3L), drop=FALSE] as.psp(flipends, window=flipxy(X$window), marks=X$marks, unitname=unitname(X), check=FALSE) } flipxy.im <- function(X) { im(t(X$v), xcol=X$yrow, yrow=X$xcol, unitname=unitname(X)) } spatstat/R/nndistlpp.R0000644000176200001440000005160613613704607014473 0ustar liggesusers# # nndistlpp.R # # $Revision: 1.25 $ $Date: 2020/01/28 01:01:57 $ # # Methods for nndist, nnwhich, nncross for linear networks # # nndist.lpp # Calculates the nearest neighbour distances in the shortest-path metric # for a point pattern on a linear network. nndist.lpp <- function(X, ..., k=1, method="C") { stopifnot(inherits(X, "lpp")) stopifnot(method %in% c("C", "interpreted")) n <- npoints(X) k <- as.integer(k) stopifnot(all(k > 0)) kmax <- max(k) L <- as.linnet(X) if(is.null(br <- L$boundingradius) || is.infinite(br)) { # network may be disconnected lab <- connected(L, what="labels") if(length(levels(lab)) > 1L) { # network is disconnected result <- matrix(Inf, n, length(k)) # handle each connected component separately subsets <- split(seq_len(nvertices(L)), lab) for(i in seq_along(subsets)) { Xi <- thinNetwork(X, retainvertices=subsets[[i]]) relevant <- attr(Xi, "retainpoints") result[relevant, ] <- nndist.lpp(Xi, k=k, method=method) } return(result) } } toomany <- (kmax >= n-1) if(toomany) { ## not enough points to define kmax nearest neighbours result <- matrix(Inf, nrow=n, ncol=kmax) if(n <= 1) return(result[,k,drop=TRUE]) ## reduce kmax to feasible value kmax <- n-1 kuse <- k[k <= kmax] } else { kuse <- k } Y <- as.ppp(X) sparse <- identical(L$sparse, TRUE) ## find nearest segment for each point ## This is given by local coordinates, if available (spatstat >= 1.28-0) loco <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE) pro <- if(!is.null(seg <- loco$seg)) seg else nearestsegment(X, Lseg) if(method == "interpreted") { ## interpreted code D <- pairdist(X, method="interpreted") diag(D) <- Inf ans <- if(kmax == 1) apply(D, 1, min) else t(apply(D, 1, orderstats, k=kuse))[,,drop=TRUE] } else if(!sparse && kmax == 1) { # C code for non-sparse network Lseg <- L$lines Lvert <- L$vertices from <- L$from to <- L$to dpath <- L$dpath # convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L segmap <- pro - 1L nseg <- length(from0) # upper bound on interpoint distance huge <- max(dpath) + 2 * max(lengths.psp(Lseg)) # space for result ans <- double(n) # call C zz <- .C("linnndist", np = as.integer(n), xp = as.double(Y$x), yp = as.double(Y$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), segmap = as.integer(segmap), huge = as.double(huge), answer = as.double(ans), PACKAGE = "spatstat") ans <- zz$answer } else if(spatstat.options('Cnndistlpp')) { ## use new C routine Lseg <- L$lines Lvert <- L$vertices from <- L$from to <- L$to ## nseg <- length(from) seglen <- lengths.psp(Lseg) ## convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L segmap <- pro - 1L tp <- loco$tp ## sort by segment index oo <- order(segmap, tp) segmap <- segmap[oo] tp <- tp[oo] # upper bound on interpoint distance huge <- sum(seglen) #' numerical tolerance tol <- max(.Machine$double.eps, diameter(Frame(L))/2^20) #' kmax1 <- kmax + 1L zz <- .C("linknnd", kmax = as.integer(kmax1), np = as.integer(n), sp = as.integer(segmap), tp = as.double(tp), nv = as.integer(Lvert$n), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglen), huge = as.double(huge), tol = as.double(tol), nndist = as.double(numeric(n * kmax1)), nnwhich = as.integer(integer(n * kmax1)), PACKAGE = "spatstat") ans <- matrix(, n, kmax1) ans[oo, ] <- matrix(zz$nndist, n, kmax1, byrow=TRUE) # drop first column which is zero corresponding to j = i ans <- ans[, -1, drop=FALSE] colnames(ans) <- paste0("dist.", 1:ncol(ans)) ans <- ans[,kuse] } else { ## use fast code for nncross ans <- nncross(X, X, what="dist", k=kuse+1) if(is.matrix(ans) || is.data.frame(ans)) colnames(ans) <- paste0("dist.", kuse) } if(!is.null(dim(ans))) { ans <- as.matrix(ans) rownames(ans) <- NULL } if(!toomany) return(ans) result[, kuse] <- as.matrix(ans) colnames(result) <- paste0("dist.", 1:ncol(result)) return(result[,k]) } # nnwhich.lpp # Identifies the nearest neighbours in the shortest-path metric # for a point pattern on a linear network. # nnwhich.lpp <- function(X, ..., k=1, method="C") { stopifnot(inherits(X, "lpp")) stopifnot(method %in% c("C", "interpreted")) k <- as.integer(k) stopifnot(all(k > 0)) kmax <- max(k) n <- npoints(X) L <- as.linnet(X) if(is.null(br <- L$boundingradius) || is.infinite(br)) { # network may be disconnected lab <- connected(L, what="labels") if(length(levels(lab)) > 1L) { # network is disconnected result <- matrix(NA_integer_, n, length(k)) # handle each connected component separately subsets <- split(seq_len(nvertices(L)), lab) for(i in seq_along(subsets)) { Xi <- thinNetwork(X, retainvertices=subsets[[i]]) relevant <- attr(Xi, "retainpoints") result[relevant, ] <- nnwhich.lpp(Xi, k=k, method=method) } return(result) } } toomany <- (kmax >= n-1) if(toomany) { ## not enough points to define kmax nearest neighbours result <- matrix(NA_integer_, nrow=n, ncol=kmax) if(n <= 1) return(result[,k,drop=TRUE]) ## reduce kmax to feasible value kmax <- n-1 kuse <- k[k <= kmax] } else { kuse <- k } # Y <- as.ppp(X) sparse <- identical(L$sparse, TRUE) ## find nearest segment for each point ## This is given by local coordinates, if available (spatstat >= 1.28-0) loco <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE) pro <- if(!is.null(seg <- loco$seg)) seg else nearestsegment(X, Lseg) if(method == "interpreted") { D <- pairdist(X, method="interpreted") diag(D) <- Inf nnw <- if(kmax == 1) apply(D, 1, which.min) else t(apply(D, 1, orderwhich, k=kuse))[,,drop=TRUE] } else if(!sparse && kmax == 1) { # C code for non-sparse network ## Lseg <- L$lines Lvert <- L$vertices from <- L$from to <- L$to dpath <- L$dpath ## convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L segmap <- pro - 1L nseg <- length(from0) # upper bound on interpoint distance huge <- max(dpath) + 2 * max(lengths.psp(Lseg)) # space for result nnd <- double(n) nnw <- integer(n) # call C zz <- .C("linnnwhich", np = as.integer(n), xp = as.double(Y$x), yp = as.double(Y$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), segmap = as.integer(segmap), huge = as.double(huge), nndist = as.double(nnd), nnwhich = as.integer(nnw), PACKAGE = "spatstat") # convert C indexing to R indexing nnw <- zz$nnwhich + 1L # any zeroes occur if points have no neighbours. nnw[nnw == 0] <- NA } else if(spatstat.options('Cnndistlpp')) { ## use new C routine Lseg <- L$lines Lvert <- L$vertices from <- L$from to <- L$to ## nseg <- length(from) seglen <- lengths.psp(Lseg) ## convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L segmap <- pro - 1L tp <- loco$tp ## sort by segment index oo <- order(segmap, tp) segmap <- segmap[oo] tp <- tp[oo] # upper bound on interpoint distance huge <- sum(seglen) #' numerical tolerance tol <- max(.Machine$double.eps, diameter(Frame(L))/2^20) #' kmax1 <- kmax + 1L zz <- .C("linknnd", kmax = as.integer(kmax1), np = as.integer(n), sp = as.integer(segmap), tp = as.double(tp), nv = as.integer(Lvert$n), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglen), huge = as.double(huge), tol = as.double(tol), nndist = as.double(numeric(n * kmax1)), nnwhich = as.integer(integer(n * kmax1)), PACKAGE = "spatstat") nnw <- matrix(, n, kmax1) nnw[oo, ] <- matrix(oo[zz$nnwhich + 1L], n, kmax1, byrow=TRUE) # drop first column which is j = i nnw <- nnw[, -1, drop=FALSE] colnames(nnw) <- paste0("which.", 1:ncol(nnw)) nnw <- nnw[,kuse] } else { ## use fast code for nncross nnw <- nncross(X, X, what="which", k=kuse+1) if(is.matrix(nnw) || is.data.frame(nnw)) colnames(nnw) <- paste0("which.", kuse) } if(!is.null(dim(nnw))) { nnw <- as.matrix(nnw) rownames(nnw) <- NULL } if(!toomany) return(nnw) result[, kuse] <- as.matrix(nnw) colnames(result) <- paste0("which.", 1:ncol(result)) return(result[,k]) } # nncross.lpp # Identifies the nearest neighbours in the shortest-path metric # from one point pattern on a linear network to ANOTHER pattern # on the SAME network. # nncross.lpp <- local({ nncross.lpp <- function(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), ..., k=1, method="C") { stopifnot(inherits(X, "lpp")) stopifnot(inherits(Y, "lpp")) what <- match.arg(what, choices=c("dist", "which"), several.ok=TRUE) stopifnot(method %in% c("C", "interpreted")) if(is.null(iX) != is.null(iY)) stop("If one of iX, iY is given, then both must be given") exclude <- (!is.null(iX) || !is.null(iY)) check <- resolve.defaults(list(...), list(check=TRUE))$check if(check && !identical(as.linnet(X, sparse=TRUE), as.linnet(Y, sparse=TRUE))) stop("X and Y are on different linear networks") # internal use only format <- resolve.defaults(list(...), list(format="data.frame"))$format nX <- npoints(X) nY <- npoints(Y) L <- domain(X) if(is.null(br <- L$boundingradius) || is.infinite(br)) { # network may be disconnected lab <- connected(L, what="labels") if(length(levels(lab)) > 1L) { # network is disconnected # handle each connected component separately subsets <- split(seq_len(nvertices(L)), lab) nndistmat <- if("dist" %in% what) matrix(Inf, nX, length(k)) else NULL nnwhichmat <- if("which" %in% what) matrix(NA_integer_, nX, length(k)) else NULL for(i in seq_along(subsets)) { subi <- subsets[[i]] Xi <- thinNetwork(X, retainvertices=subi) useX <- attr(Xi, "retainpoints") Yi <- thinNetwork(Y, retainvertices=subi) useY <- attr(Yi, "retainpoints") z <- nncross.lpp(Xi, Yi, iX = iX[useX], iY=iY[useY], what=what, k=k, method=method, format="list") if("dist" %in% what) nndistmat[useX, ] <- z$dist if("which" %in% what) nnwhichmat[useX, ] <- which(useY)[z$which] } return(shapedresult(dist=nndistmat, which=nnwhichmat, what=what, format=format)) } } koriginal <- k <- as.integer(k) stopifnot(all(k > 0)) kmax <- max(k) #' decide which algorithm to use #' fast C algorithm fast <- (method == "C") && (spatstat.options("Cnncrosslpp") || (kmax > 1)) #' slower C algorithm for exclusion case for k=1 excludeinC <- exclude && (method == "C") && !fast && (k == 1) excludeinR <- exclude && !excludeinC if(excludeinR) { #' compute k+1 neighbours in C, then filter in R kmax <- kmax+1 k <- 1:kmax } toomany <- (kmax > nY) if(toomany) { paddist <- matrix(Inf, nX, kmax) padwhich <- matrix(NA_integer_, nX, kmax) kmax <- nY kuse <- k[k <= kmax] } else { kuse <- k } if(length(kuse) == 0) { # None of the required values are defined nnd <- paddist nnw <- padwhich maxk <- max(k) colnames(nnd) <- paste0("dist.", seq_len(maxk)) colnames(nnd) <- paste0("dist.", seq_len(maxk)) nnd <- nnd[,k,drop=TRUE] nnw <- nnw[,k,drop=TRUE] return(shapedresult(dist=nnd, which=nnw, what=what, format=format)) } need.dist <- ("dist" %in% what) || excludeinR need.which <- ("which" %in% what) || excludeinR if(!fast) { ## require dpath matrix Xsparse <- identical(domain(X)$sparse, TRUE) Ysparse <- identical(domain(Y)$sparse, TRUE) L <- if(!Xsparse && Ysparse) as.linnet(X) else if(Xsparse && !Ysparse) as.linnet(Y) else as.linnet(X, sparse=FALSE) } else L <- as.linnet(X) # nX <- npoints(X) nY <- npoints(Y) P <- as.ppp(X) Q <- as.ppp(Y) # Lvert <- L$vertices from <- L$from to <- L$to if(fast) { seglengths <- lengths.psp(as.psp(L)) } else { dpath <- L$dpath } # deal with null cases if(nX == 0) return(shapedresult(dist=numeric(0), which=integer(0), what=what, format=format)) if(nY == 0) return(shapedresult(dist=rep(Inf, nX), which=rep(NA_integer_, nX), what=what, format=format)) # find nearest segment for each point Xcoords <- coords(X) Ycoords <- coords(Y) Xpro <- Xcoords$seg Ypro <- Ycoords$seg # handle serial numbers if(exclude) { stopifnot(is.integer(iX) && is.integer(iY)) if(length(iX) != nX) stop("length of iX does not match the number of points in X") if(length(iY) != nY) stop("length of iY does not match the number of points in Y") } if(method == "interpreted") { ## interpreted code D <- crossdist(X, Y, method="interpreted") if(exclude) D[outer(iX, iY, "==")] <- Inf nnd <- nnw <- NULL if(need.dist) { nnd <- if(kmax == 1) apply(D, 1, min) else t(apply(D, 1, orderstats, k=kuse))[,,drop=TRUE] } if(need.which) { nnw <- if(kmax == 1) apply(D, 1, which.min) else t(apply(D, 1, orderwhich, k=kuse))[,,drop=TRUE] } } else { ## C code ## convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L nseg <- length(from0) Xsegmap <- Xpro - 1L Ysegmap <- Ypro - 1L ## upper bound on interpoint distance huge <- if(!fast) { max(dpath) + 2 * diameter(Frame(L)) } else { sum(seglengths) } ## space for result nnd <- double(nX * kmax) nnw <- integer(nX * kmax) ## call C if(fast) { ## experimental faster code ooX <- order(Xsegmap) ooY <- order(Ysegmap) tol <- max(.Machine$double.eps, diameter(Frame(L))/2^20) if(kmax > 1) { zz <- .C("linknncross", kmax = as.integer(kmax), np = as.integer(nX), sp = as.integer(Xsegmap[ooX]), tp = as.double(Xcoords$tp[ooX]), nq = as.integer(nY), sq = as.integer(Ysegmap[ooY]), tq = as.double(Ycoords$tp[ooY]), nv = as.integer(Lvert$n), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglengths), huge = as.double(huge), tol = as.double(tol), nndist = as.double(nnd), nnwhich = as.integer(nnw), PACKAGE = "spatstat") zznd <- matrix(zz$nndist, ncol=kmax, byrow=TRUE) zznw <- matrix(zz$nnwhich + 1L, ncol=kmax, byrow=TRUE) if(any(notfound <- (zznw == 0))) { zznd[notfound] <- NA zznw[notfound] <- NA } nnd <- matrix(nnd, nX, kmax) nnw <- matrix(nnw, nX, kmax) nnd[ooX, ] <- zznd nnw[ooX, ] <- ooY[zznw] colnames(nnd) <- colnames(nnw) <- seq_len(kmax) if(!identical(kuse, seq_len(kmax))) { nnd <- nnd[,kuse,drop=FALSE] nnw <- nnw[,kuse,drop=FALSE] if(length(kuse) == 1) { colnames(nnd) <- paste0("dist.", kuse) colnames(nnw) <- paste0("which.", kuse) } } } else { zz <- .C("linSnndwhich", np = as.integer(nX), sp = as.integer(Xsegmap[ooX]), tp = as.double(Xcoords$tp[ooX]), nq = as.integer(nY), sq = as.integer(Ysegmap[ooY]), tq = as.double(Ycoords$tp[ooY]), nv = as.integer(Lvert$n), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglengths), huge = as.double(huge), tol = as.double(tol), nndist = as.double(nnd), nnwhich = as.integer(nnw), PACKAGE = "spatstat") zznd <- zz$nndist zznw <- zz$nnwhich + 1L if(any(notfound <- (zznw == 0))) { zznd[notfound] <- NA zznw[notfound] <- NA } nnd[ooX] <- zznd nnw[ooX] <- ooY[zznw] } } else { ## slower code requiring dpath matrix if(!excludeinC) { zz <- .C("linndcross", np = as.integer(nX), xp = as.double(P$x), yp = as.double(P$y), nq = as.integer(nY), xq = as.double(Q$x), yq = as.double(Q$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), psegmap = as.integer(Xsegmap), qsegmap = as.integer(Ysegmap), huge = as.double(huge), nndist = as.double(nnd), nnwhich = as.integer(nnw), PACKAGE = "spatstat") nnd <- zz$nndist nnw <- zz$nnwhich + 1L } else { ## excluding certain pairs (k=1) zz <- .C("linndxcross", np = as.integer(nX), xp = as.double(P$x), yp = as.double(P$y), nq = as.integer(nY), xq = as.double(Q$x), yq = as.double(Q$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), psegmap = as.integer(Xsegmap), qsegmap = as.integer(Ysegmap), idP = as.integer(iX), idQ = as.integer(iY), huge = as.double(huge), nndist = as.double(nnd), nnwhich = as.integer(nnw), PACKAGE = "spatstat") nnd <- zz$nndist nnw <- zz$nnwhich + 1L } ## any zeroes occur if points have no neighbours. nnw[nnw == 0] <- NA } } if(toomany) { ## Nearest neighbours were undefined for some large values of k. ## Insert results obtained for valid 'k' back into matrix of NA/Inf if(need.dist) { paddist[,kuse] <- as.matrix(nnd) nnd <- paddist } if(need.which) { padwhich[,kuse] <- as.matrix(nnw) nnw <- padwhich } } if(excludeinR) { ## now find neighbours that don't have the same id number if(!is.matrix(nnw)) nnw <- as.matrix(nnw, ncol=1) if(!is.matrix(nnd)) nnd <- as.matrix(nnd, ncol=1) avoid <- matrix(iX[as.vector(row(nnw))] != iY[as.vector(nnw)], nrow=nrow(nnw), ncol=ncol(nnw)) colind <- apply(avoid, 1, whichcoltrue, m=seq_len(ncol(avoid)-1)) colind <- if(is.matrix(colind)) t(colind) else matrix(colind, ncol=1) rowcol <- cbind(as.vector(row(colind)), as.vector(colind)) nnd <- matrix(nnd[rowcol], nrow=nX) nnw <- matrix(nnw[rowcol], nrow=nX) nnd <- nnd[,koriginal] nnw <- nnw[,koriginal] } return(shapedresult(dist=nnd, which=nnw, what=what, format=format)) } whichcoltrue <- function(x, m) which(x)[m] shapedresult <- function(dist, which, what=c("dist", "which"), format="data.frame") { #' idiom to return result in correct format result <- list(dist=dist, which=which)[what] if(format == "data.frame") result <- as.data.frame(result)[,,drop=TRUE] return(result) } nncross.lpp }) spatstat/R/satpiece.R0000644000176200001440000001120013333543255014236 0ustar liggesusers# # # satpiece.S # # $Revision: 1.17 $ $Date: 2018/03/15 07:37:41 $ # # Saturated pairwise interaction process with piecewise constant potential # # SatPiece() create an instance of the process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # SatPiece <- local({ # ..... auxiliary functions ...... delSP <- function(i, r, sat) { r <- r[-i] sat <- sat[-i] nr <- length(r) if(nr == 0) return(Poisson()) if(nr == 1) return(Geyer(r, sat)) return(SatPiece(r, sat)) } # ....... template object .......... BlankSatPiece <- list( name = "piecewise constant Saturated pairwise interaction process", creator = "SatPiece", family = "pairsat.family", # evaluated later pot = function(d, par) { r <- par$r nr <- length(r) out <- array(FALSE, dim=c(dim(d), nr)) out[,,1] <- (d < r[1]) if(nr > 1) { for(i in 2:nr) out[,,i] <- (d >= r[i-1]) & (d < r[i]) } out }, par = list(r = NULL, sat=NULL), # filled in later parnames = c("interaction thresholds", "saturation parameters"), hasInf = FALSE, init = function(self) { r <- self$par$r sat <- self$par$sat if(!is.numeric(r) || !all(r > 0)) stop("interaction thresholds r must be positive numbers") if(length(r) > 1 && !all(diff(r) > 0)) stop("interaction thresholds r must be strictly increasing") if(!is.numeric(sat) || any(sat < 0)) stop("saturation parameters must be nonnegative numbers") if(any(ceiling(sat) != floor(sat))) warning("saturation parameter has a non-integer value") if(length(sat) != length(r) && length(sat) != 1) stop("vectors r and sat must have equal length") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { r <- self$par$r npiece <- length(r) # extract coefficients gammas <- exp(as.numeric(coeffs)) # name them gn <- gammas names(gn) <- paste("[", c(0,r[-npiece]),",", r, ")", sep="") # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_i", printable=dround(gn))) }, valid = function(coeffs, self) { # interaction parameters gamma must be # non-NA # finite, if sat > 0 # less than 1, if sat = Inf gamma <- (self$interpret)(coeffs, self)$param$gammas sat <- self$par$sat if(anyNA(gamma)) return(FALSE) return(all((is.finite(gamma) | sat == 0) & (gamma <= 1 | sat != Inf))) }, project = function(coeffs, self){ loggammas <- as.numeric(coeffs) sat <- self$par$sat r <- self$par$r ok <- is.finite(loggammas) & (is.finite(sat) | loggammas <= 0) if(all(ok)) return(NULL) if(!any(ok)) return(Poisson()) bad <- !ok if(spatstat.options("project.fast") || sum(bad) == 1) { # remove smallest threshold with an unidentifiable parameter firstbad <- min(which(bad)) return(delSP(firstbad, r, sat)) } else { # consider all candidate submodels subs <- lapply(which(bad), delSP, r=r, sat=sat) return(subs) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r sat <- self$par$sat if(all(is.na(coeffs))) return(2 * max(r)) gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- (abs(log(gamma)) > epsilon) & (sat > 0) if(!any(active)) return(0) else return(2 * max(r[active])) }, version=NULL # added later ) class(BlankSatPiece) <- "interact" SatPiece <- function(r, sat) { instantiate.interact(BlankSatPiece, list(r=r, sat=sat)) } SatPiece <- intermaker(SatPiece, BlankSatPiece) SatPiece }) spatstat/R/infline.R0000644000176200001440000001551613333543255014103 0ustar liggesusers# # infline.R # # Infinite lines # # $Revision: 1.28 $ $Date: 2017/02/07 07:47:20 $ # infline <- function(a=NULL, b=NULL, h=NULL, v=NULL, p=NULL, theta=NULL) { if(is.null(a) != is.null(b)) stop("invalid specification of a,b") if(is.null(p) != is.null(theta)) stop("invalid specification of p,theta") if(!is.null(h)) out <- data.frame(a=h, b=0, h=h, v=NA, p=h, theta=pi/2) else if(!is.null(v)) out <- data.frame(a=NA,b=NA,h=NA,v=v,p=v,theta=ifelseAB(v < 0, pi, 0)) else if(!is.null(a)) { # a, b specified z <- data.frame(a=a,b=b) a <- z$a b <- z$b theta <- ifelseAX(b == 0, pi/2, atan(-1/b)) theta <- theta %% pi p <- a * sin(theta) out <- data.frame(a=a, b=b, h=ifelseXB(b==0, a, NA), v=NA, p=p, theta=theta) } else if(!is.null(p)) { # p, theta specified z <- data.frame(p=p,theta=theta) p <- z$p theta <- z$theta theta <- theta %% (2*pi) if(any(reverse <- (theta >= pi))) { theta[reverse] <- theta[reverse] - pi p[reverse] <- -p[reverse] } vert <- (theta == 0) horz <- (cos(theta) == 0) gene <- !(vert | horz) v <- ifelseXB(vert, p, NA) h <- ifelseXB(horz, p, NA) a <- ifelseXB(gene, p/sin(theta), NA) b <- ifelseXB(gene, -cos(theta)/sin(theta), NA) out <- data.frame(a=a,b=b,h=h,v=v,p=p,theta=theta) } else stop("No data given!") class(out) <- c("infline", class(out)) return(out) } is.infline <- function(x) { inherits(x, "infline") } plot.infline <- function(x, ...) { for(i in seq_len(nrow(x))) { xi <- as.list(x[i, 1:4]) xi[sapply(xi, is.na)] <- NULL do.call(abline, append(xi, list(...))) } return(invisible(NULL)) } print.infline <- function(x, ...) { n <- nrow(x) splat(n, "infinite", ngettext(n, "line", "lines")) print(as.data.frame(x), ...) return(invisible(NULL)) } clip.infline <- function(L, win) { # clip a set of infinite straight lines to a window win <- as.owin(win) stopifnot(inherits(L, "infline")) nL <- nrow(L) if(nL == 0) return(psp(numeric(0),numeric(0),numeric(0),numeric(0), window=win)) seqL <- seq_len(nL) # determine circumcircle of win xr <- win$xrange yr <- win$yrange xmid <- mean(xr) ymid <- mean(yr) width <- diff(xr) height <- diff(yr) rmax <- sqrt(width^2 + height^2)/2 boundbox <- owin(xmid + c(-1,1) * rmax, ymid + c(-1,1) * rmax) # convert line coordinates to origin (xmid, ymid) p <- L$p theta <- L$theta co <- cos(theta) si <- sin(theta) p <- p - xmid * co - ymid * si # compute intersection points with circumcircle hit <- (abs(p) < rmax) if(!any(hit)) return(psp(numeric(0),numeric(0),numeric(0),numeric(0), window=win)) p <- p[hit] theta <- theta[hit] q <- sqrt(rmax^2 - p^2) co <- co[hit] si <- si[hit] id <- seqL[hit] X <- psp(x0= xmid + p * co + q * si, y0= ymid + p * si - q * co, x1= xmid + p * co - q * si, y1= ymid + p * si + q * co, marks = factor(id, levels=seqL), window=boundbox, check=FALSE) # clip to window X <- X[win] return(X) } chop.tess <- function(X, L) { stopifnot(is.infline(L)) stopifnot(is.tess(X)||is.owin(X)) X <- as.tess(X) if(X$type == "image") { Xim <- X$image xr <- Xim$xrange yr <- Xim$yrange # extract matrices of pixel values and x, y coordinates Zmat <- as.integer(as.matrix(Xim)) xmat <- rasterx.im(Xim) ymat <- rastery.im(Xim) # process lines for(i in seq_len(nrow(L))) { # line i chops window into two pieces if(!is.na(h <- L[i, "h"])) { # horizontal line if(h > yr[1L] && h < yr[2L]) Zmat <- 2 * Zmat + (ymat > h) } else if(!is.na(v <- L[i, "v"])) { # vertical line if(v > xr[1L] && v < xr[2L]) Zmat <- 2 * Zmat + (xmat < v) } else { # generic line y = a + bx a <- L[i, "a"] b <- L[i, "b"] Zmat <- 2 * Zmat + (ymat > a + b * xmat) } } # Now just put back as factor image Zim <- im(Zmat, xcol=Xim$xcol, yrow=Xim$yrow, unitname=unitname(Xim)) Z <- tess(image=Zim) return(Z) } #---- polygonal computation -------- # get bounding box B <- as.rectangle(as.owin(X)) xr <- B$xrange yr <- B$yrange # get coordinates for(i in seq_len(nrow(L))) { # line i chops box B into two pieces if(!is.na(h <- L[i, "h"])) { # horizontal line if(h < yr[1L] || h > yr[2L]) Z <- NULL else { lower <- owin(xr, c(yr[1L], h)) upper <- owin(xr, c(h, yr[2L])) Z <- tess(tiles=list(lower,upper), window=B) } } else if(!is.na(v <- L[i, "v"])) { # vertical line if(v < xr[1L] || v > xr[2L]) Z <- NULL else { left <- owin(c(xr[1L], v), yr) right <- owin(c(v, xr[2L]), yr) Z <- tess(tiles=list(left,right), window=B) } } else { # generic line a <- L[i, "a"] b <- L[i, "b"] # Intersect with extended left and right sides of B yleft <- a + b * xr[1L] yright <- a + b * xr[2L] ylo <- min(yleft, yright, yr[1L]) - 1 yhi <- max(yleft, yright, yr[2L]) + 1 lower <- owin(poly=list(x=xr[c(1L,1L,2L,2L)], y=c(yleft,ylo,ylo,yright))) upper <- owin(poly=list(x=xr[c(1L,2L,2L,1L)], y=c(yleft,yright,yhi,yhi))) Bplus <- owin(xr, c(ylo, yhi), unitname=unitname(B)) Z <- tess(tiles=list(lower,upper), window=Bplus) } # intersect this simple tessellation with X if(!is.null(Z)) { X <- intersect.tess(X, Z) tilenames(X) <- paste("Tile", seq_len(length(tiles(X)))) } } return(X) } whichhalfplane <- function(L, x, y=NULL) { verifyclass(L, "infline") xy <- xy.coords(x, y) x <- xy$x y <- xy$y m <- length(x) n <- nrow(L) Z <- matrix(as.logical(NA_integer_), n, m) for(i in seq_len(n)) { if(!is.na(h <- L[i, "h"])) { #' horizontal line Z[i,] <- (y < h) } else if(!is.na(v <- L[i, "v"])) { #' vertical line Z[i,] <- (x < v) } else { #' generic line y = a + bx a <- L[i, "a"] b <- L[i, "b"] Z[i,] <- (y < a + b * x) } } return(Z) } rotate.infline <- function(X, angle=pi/2, ...) { if(nrow(X) == 0) return(X) Y <- with(X, infline(p = p, theta=theta + angle)) return(Y) } shift.infline <- function(X, vec=c(0,0), ...) { if(nrow(X) == 0) return(X) vec <- as2vector(vec) Y <- with(X, infline(p = p + vec[1L] * cos(theta) + vec[2L] * sin(theta), theta=theta)) return(Y) } reflect.infline <- function(X) { if(nrow(X) == 0) return(X) Y <- with(X, infline(p = p, theta=(theta + pi) %% (2 * pi))) return(Y) } flipxy.infline <- function(X) { if(nrow(X) == 0) return(X) Y <- with(X, infline(p = p, theta=(pi/2 - theta) %% (2 * pi))) return(Y) } spatstat/R/linequad.R0000644000176200001440000002253313333543255014256 0ustar liggesusers# # linequad.R # # $Revision: 1.14 $ $Date: 2017/06/05 10:31:58 $ # # create quadscheme for a pattern of points lying *on* line segments linequad <- function(X, Y, ..., eps=NULL, nd=1000, random=FALSE) { epsgiven <- !is.null(eps) if(is.lpp(X)) { # extract local coordinates from lpp object coo <- coords(X) mapXY <- coo$seg tp <- coo$tp Xproj <- as.ppp(X) if(!missing(Y) && !is.null(Y)) warning("Argument Y ignored when X is an lpp object") Y <- as.psp(X) } else if(is.ppp(X)) { # project data points onto segments stopifnot(is.psp(Y)) v <- project2segment(X, Y) Xproj <- v$Xproj mapXY <- v$mapXY tp <- v$tp } else stop("X should be an object of class lpp or ppp") # handle multitype ismulti <- is.multitype(X) if(is.marked(X) && !ismulti) stop("Not implemented for marked patterns") if(ismulti) { marx <- marks(X) flev <- factor(levels(marx)) } # win <- as.owin(Y) len <- lengths.psp(Y) nseg <- length(len) if(is.null(eps)) { stopifnot(is.numeric(nd) && length(nd) == 1L & is.finite(nd) && nd > 0) eps <- sum(len)/nd } else stopifnot(is.numeric(eps) && length(eps) == 1L && is.finite(eps) && eps > 0) ## if(is.lpp(X) && spatstat.options('Clinequad')) { L <- as.linnet(X) W <- Frame(L) V <- vertices(L) nV <- npoints(V) coordsV <- coords(V) coordsX <- coords(X) nX <- npoints(X) ooX <- order(coordsX$seg) ndumeach <- ceiling(len/eps) + 1L ndummax <- sum(ndumeach) maxdataperseg <- max(table(factor(coordsX$seg, levels=1:nsegments(L)))) maxscratch <- max(ndumeach) + maxdataperseg if(!ismulti) { if(!random) { z <- .C("Clinequad", ns = as.integer(nseg), from = as.integer(L$from-1L), to = as.integer(L$to-1L), nv = as.integer(nV), xv = as.double(coordsV$x), yv = as.double(coordsV$y), eps = as.double(eps), ndat = as.integer(nX), sdat = as.integer(coordsX$seg[ooX]-1L), tdat = as.double(coordsX$tp[ooX]), wdat = as.double(numeric(nX)), ndum = as.integer(integer(1L)), xdum = as.double(numeric(ndummax)), ydum = as.double(numeric(ndummax)), sdum = as.integer(integer(ndummax)), tdum = as.double(numeric(ndummax)), wdum = as.double(numeric(ndummax)), maxscratch = as.integer(maxscratch), PACKAGE = "spatstat") } else { z <- .C("ClineRquad", ns = as.integer(nseg), from = as.integer(L$from-1L), to = as.integer(L$to-1L), nv = as.integer(nV), xv = as.double(coordsV$x), yv = as.double(coordsV$y), eps = as.double(eps), ndat = as.integer(nX), sdat = as.integer(coordsX$seg[ooX]-1L), tdat = as.double(coordsX$tp[ooX]), wdat = as.double(numeric(nX)), ndum = as.integer(integer(1L)), xdum = as.double(numeric(ndummax)), ydum = as.double(numeric(ndummax)), sdum = as.integer(integer(ndummax)), tdum = as.double(numeric(ndummax)), wdum = as.double(numeric(ndummax)), maxscratch = as.integer(maxscratch), PACKAGE = "spatstat") } seqdum <- seq_len(z$ndum) dum <- with(z, ppp(xdum[seqdum], ydum[seqdum], window=W, check=FALSE)) wdum <- z$wdum[seqdum] wdat <- numeric(nX) wdat[ooX] <- z$wdat dat <- as.ppp(X) } else { ntypes <- length(flev) ndummax <- ntypes * (ndummax + nX) maxscratch <- ntypes * maxscratch if(!random) { z <- .C("ClineMquad", ns = as.integer(nseg), from = as.integer(L$from-1L), to = as.integer(L$to-1L), nv = as.integer(nV), xv = as.double(coordsV$x), yv = as.double(coordsV$y), eps = as.double(eps), ntypes = as.integer(ntypes), ndat = as.integer(nX), xdat = as.double(coordsX$x), ydat = as.double(coordsX$y), mdat = as.integer(as.integer(marx)-1L), sdat = as.integer(coordsX$seg[ooX]-1L), tdat = as.double(coordsX$tp[ooX]), wdat = as.double(numeric(nX)), ndum = as.integer(integer(1L)), xdum = as.double(numeric(ndummax)), ydum = as.double(numeric(ndummax)), mdum = as.integer(integer(ndummax)), sdum = as.integer(integer(ndummax)), tdum = as.double(numeric(ndummax)), wdum = as.double(numeric(ndummax)), maxscratch = as.integer(maxscratch), PACKAGE = "spatstat") } else { z <- .C("ClineRMquad", ns = as.integer(nseg), from = as.integer(L$from-1L), to = as.integer(L$to-1L), nv = as.integer(nV), xv = as.double(coordsV$x), yv = as.double(coordsV$y), eps = as.double(eps), ntypes = as.integer(ntypes), ndat = as.integer(nX), xdat = as.double(coordsX$x), ydat = as.double(coordsX$y), mdat = as.integer(as.integer(marx)-1L), sdat = as.integer(coordsX$seg[ooX]-1L), tdat = as.double(coordsX$tp[ooX]), wdat = as.double(numeric(nX)), ndum = as.integer(integer(1L)), xdum = as.double(numeric(ndummax)), ydum = as.double(numeric(ndummax)), mdum = as.integer(integer(ndummax)), sdum = as.integer(integer(ndummax)), tdum = as.double(numeric(ndummax)), wdum = as.double(numeric(ndummax)), maxscratch = as.integer(maxscratch), PACKAGE = "spatstat") } seqdum <- seq_len(z$ndum) marques <- factor(z$mdum[seqdum] + 1L, labels=flev) dum <- with(z, ppp(xdum[seqdum], ydum[seqdum], marks=marques, window=W, check=FALSE)) wdum <- z$wdum[seqdum] wdat <- numeric(nX) wdat[ooX] <- z$wdat dat <- as.ppp(X) } } else { ## older, interpreted code ## initialise quad scheme dat <- dum <- ppp(numeric(0), numeric(0), window=win) wdat <- wdum <- numeric(0) if(ismulti) marks(dat) <- marks(dum) <- marx[integer(0)] ## consider each segment in turn YY <- as.data.frame(Y) for(i in 1:nseg) { ## divide segment into pieces of length eps ## with shorter bits at each end leni <- len[i] nwhole <- floor(leni/eps) if(leni/eps - nwhole < 0.5 && nwhole > 2) nwhole <- nwhole - 1 rump <- (leni - nwhole * eps)/2 brks <- c(0, rump + (0:nwhole) * eps, leni) nbrks <- length(brks) ## dummy points at middle of each piece sdum <- (brks[-1L] + brks[-nbrks])/2 x <- with(YY, x0[i] + (sdum/leni) * (x1[i]-x0[i])) y <- with(YY, y0[i] + (sdum/leni) * (y1[i]-y0[i])) newdum <- list(x=x, y=y) ndum <- length(sdum) IDdum <- 1:ndum ## relevant data points relevant <- (mapXY == i) newdat <- Xproj[relevant] sdat <- leni * tp[relevant] IDdat <- findInterval(sdat, brks, rightmost.closed=TRUE, all.inside=TRUE) ## determine weights w <- countingweights(id=c(IDdum, IDdat), areas=diff(brks)) wnewdum <- w[1:ndum] wnewdat <- w[-(1:ndum)] ## if(!ismulti) { ## unmarked pattern dat <- superimpose(dat, newdat, W=win, check=FALSE) dum <- superimpose(dum, newdum, W=win, check=FALSE) wdat <- c(wdat, wnewdat) wdum <- c(wdum, wnewdum) } else { ## marked point pattern ## attach correct marks to data points marks(newdat) <- marx[relevant] dat <- superimpose(dat, newdat, W=win, check=FALSE) wdat <- c(wdat, wnewdat) newdum <- as.ppp(newdum, W=win, check=FALSE) ## replicate dummy points with each mark ## also add points at data locations with other marks for(k in seq_len(length(flev))) { le <- flev[k] avoid <- (marks(newdat) != le) dum <- superimpose(dum, newdum %mark% le, newdat[avoid] %mark% le, W=win, check=FALSE) wdum <- c(wdum, wnewdum, wnewdat[avoid]) } } } } ## save parameters dmethod <- paste("Equally spaced along each segment at spacing eps =", signif(eps, 4), summary(unitname(X))$plural) if(!epsgiven) dmethod <- paste0(dmethod, "\nOriginal parameter nd = ", nd) wmethod <- "Counting weights based on segment length" param <- list(dummy = list(method=dmethod), weight = list(method=wmethod)) ## make quad scheme Qout <- quad(dat, dum, c(wdat, wdum), param=param) ## silently attach lines attr(Qout, "lines") <- Y return(Qout) } spatstat/R/geyer.R0000644000176200001440000003477213333543255013577 0ustar liggesusers# # # geyer.S # # $Revision: 2.42 $ $Date: 2018/03/15 07:37:41 $ # # Geyer's saturation process # # Geyer() create an instance of Geyer's saturation process # [an object of class 'interact'] # # Geyer <- local({ # .......... template .......... BlankGeyer <- list( name = "Geyer saturation process", creator = "Geyer", family = "pairsat.family", # evaluated later pot = function(d, par) { (d <= par$r) # same as for Strauss }, par = list(r = NULL, sat=NULL), # filled in later parnames = c("interaction distance","saturation parameter"), hasInf = FALSE, init = function(self) { r <- self$par$r sat <- self$par$sat if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("interaction distance r must be a positive number") if(!is.numeric(sat) || length(sat) != 1 || sat < 0) stop("saturation parameter sat must be a positive number") }, update = NULL, # default OK print = NULL, # default OK plot = function(fint, ..., d=NULL, plotit=TRUE) { verifyclass(fint, "fii") inter <- fint$interaction unitz <- unitname(fint) if(!identical(inter$name, "Geyer saturation process")) stop("Tried to plot the wrong kind of interaction") #' fitted interaction coefficient theta <- fint$coefs[fint$Vnames] #' interaction radius r <- inter$par$r sat <- inter$par$sat xlim <- resolve.1.default(list(xlim=c(0, 1.25 * r)), list(...)) rmax <- max(xlim, d) if(is.null(d)) { d <- seq(from=0, to=rmax, length.out=1024) } else { stopifnot(is.numeric(d) && all(is.finite(d)) && all(diff(d) > 0)) } #' compute interaction between two points at distance d y <- exp(theta * sat * (d <= r)) #' compute `fv' object fun <- fv(data.frame(r=d, h=y, one=1), "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "maximal interaction h(r)", "reference value 1"), unitname=unitz) if(plotit) do.call(plot.fv, resolve.defaults(list(fun), list(...), list(ylim=range(0,1,y)))) return(invisible(fun)) }, #' end of function 'plot' interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1L]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { loggamma <- as.numeric(coeffs[1L]) sat <- self$par$sat return(is.finite(loggamma) && (is.finite(sat) || loggamma <= 0)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(any(!is.na(coeffs))) { loggamma <- coeffs[1L] if(!is.na(loggamma) && (abs(loggamma) <= epsilon)) return(0) } return(2 * r) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ..., halfway=FALSE, check=TRUE) { #' fast evaluator for Geyer interaction dont.complain.about(splitInf) if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for Geyer") r <- potpars$r sat <- potpars$sat # first ensure all data points are in U nX <- npoints(X) nU <- npoints(U) Xseq <- seq_len(nX) if(length(EqualPairs) == 0) { # no data points currently included missingdata <- rep.int(TRUE, nX) } else { Xused <- EqualPairs[,1L] missingdata <- !(Xseq %in% Xused) } somemissing <- any(missingdata) if(somemissing) { # add the missing data points nmiss <- sum(missingdata) U <- superimpose(U, X[missingdata], W=X$window, check=check) # correspondingly augment the list of equal pairs originalrows <- seq_len(nU) newXindex <- Xseq[missingdata] newUindex <- nU + seq_len(nmiss) EqualPairs <- rbind(EqualPairs, cbind(newXindex, newUindex)) nU <- nU + nmiss } # determine saturated pair counts counts <- strausscounts(U, X, r, EqualPairs) satcounts <- pmin.int(sat, counts) satcounts <- matrix(satcounts, ncol=1) if(halfway) { # trapdoor used by suffstat() answer <- satcounts } else if(sat == Inf) { # no saturation: fast code answer <- 2 * satcounts } else { # extract counts for data points Uindex <- EqualPairs[,2L] Xindex <- EqualPairs[,1L] Xcounts <- integer(npoints(X)) Xcounts[Xindex] <- counts[Uindex] # evaluate change in saturated counts of other data points change <- geyercounts(U, X, r, sat, Xcounts, EqualPairs) answer <- satcounts + change answer <- matrix(answer, ncol=1) } if(somemissing) answer <- answer[originalrows, , drop=FALSE] return(answer) }, delta2 = function(X,inte,correction, ..., sparseOK=FALSE) { # Sufficient statistic for second order conditional intensity # h(X[i] | X) - h(X[i] | X[-j]) # Geyer interaction if(correction == "periodic") return(NULL) r <- inte$par$r sat <- inte$par$sat result <- geyerdelta2(X, r, sat, correction=correction, sparseOK=sparseOK) return(result) } ) class(BlankGeyer) <- "interact" Geyer <- function(r, sat) { instantiate.interact(BlankGeyer, list(r = r, sat=sat)) } Geyer <- intermaker(Geyer, BlankGeyer) Geyer }) # ........... externally visible auxiliary functions ......... geyercounts <- function(U, X, r, sat, Xcounts, EqualPairs) { # evaluate effect of adding dummy point or deleting data point # on saturated counts of other data points stopifnot(is.numeric(r)) stopifnot(is.numeric(sat)) # for C calls we need finite numbers stopifnot(is.finite(r)) stopifnot(is.finite(sat)) # sort in increasing order of x coordinate oX <- fave.order(X$x) oU <- fave.order(U$x) Xsort <- X[oX] Usort <- U[oU] nX <- npoints(X) nU <- npoints(U) Xcountsort <- Xcounts[oX] # inverse: data point i has sorted position i' = rankX[i] rankX <- integer(nX) rankX[oX] <- seq_len(nX) rankU <- integer(nU) rankU[oU] <- seq_len(nU) # map from quadrature points to data points Uindex <- EqualPairs[,2L] Xindex <- EqualPairs[,1L] Xsortindex <- rankX[Xindex] Usortindex <- rankU[Uindex] Cmap <- rep.int(-1L, nU) Cmap[Usortindex] <- Xsortindex - 1L # call C routine zz <- .C("Egeyer", nnquad = as.integer(nU), xquad = as.double(Usort$x), yquad = as.double(Usort$y), quadtodata = as.integer(Cmap), nndata = as.integer(nX), xdata = as.double(Xsort$x), ydata = as.double(Xsort$y), tdata = as.integer(Xcountsort), rrmax = as.double(r), ssat = as.double(sat), result = as.double(numeric(nU)), PACKAGE = "spatstat") result <- zz$result[rankU] return(result) } geyerdelta2 <- local({ geyerdelta2 <- function(X, r, sat, ..., sparseOK=FALSE, correction="none") { ## Sufficient statistic for second order conditional intensity ## Geyer model stopifnot(is.numeric(sat) && length(sat) == 1 && sat > 0) ## X could be a ppp or quad. if(is.ppp(X)) { # evaluate \Delta_{x_i} \Delta_{x_j} S(x) for data points x_i, x_j # i.e. h(X[i]|X) - h(X[i]|X[-j]) where h is first order cif statistic return(geydelppp(X, r, sat, correction, sparseOK)) } else if(is.quad(X)) { # evaluate \Delta_{u_i} \Delta_{u_j} S(x) for quadrature points u_i, u_j return(geydelquad(X, r, sat, correction, sparseOK)) } else stop("Internal error: X should be a ppp or quad object") } geydelppp <- function(X, r, sat, correction, sparseOK) { # initialise nX <- npoints(X) result <- if(!sparseOK) matrix(0, nX, nX) else sparseMatrix(i=integer(0), j=integer(0), x=numeric(0), dims=c(nX, nX)) ## identify all r-close pairs (ordered pairs i ~ j) unweighted <- correction %in% c("border", "none") if(unweighted) { a <- closepairs(X, r, what="indices") I <- a$i J <- a$j IJ <- cbind(I,J) ## count number of r-neighbours for each point tvals <- table(factor(I, levels=1:nX)) } else { a <- weightedclosepairs(X, r, correction=correction, what="indices") I <- a$i J <- a$j IJ <- cbind(I,J) wIJ <- a$weight wmatrix <- sparseMatrix(i=I,j=J,x=wIJ, dims=c(nX,nX)) wJI <- wmatrix[cbind(J,I)] ## total edge-correction weight over r-neighbours for each point tvals <- tapplysum(wIJ, list(factor(I, levels=1:nX))) } # Compute direct part # (arising when i~j) tI <- tvals[I] tJ <- tvals[J] if(unweighted) { result[IJ] <- pmin(sat, tI) - pmin(sat, tI - 1L) + pmin(sat, tJ) - pmin(sat, tJ - 1L) } else { result[IJ] <- pmin(sat, tI) - pmin(sat, tI - wIJ) + pmin(sat, tJ) - pmin(sat, tJ - wJI) } # Compute indirect part # (arising when i~k and j~k for another point k) # First find all such triples ord <- (I < J) vees <- edges2vees(I[ord], J[ord], nX) # evaluate contribution of (k, i, j) II <- vees$j JJ <- vees$k KK <- vees$i tKK <- tvals[KK] if(unweighted) { contribKK <- pmin(sat, tKK) - 2 * pmin(sat, tKK-1L) + pmin(sat, tKK-2L) } else { wKKII <- wmatrix[cbind(KK, II)] wKKJJ <- wmatrix[cbind(KK, JJ)] contribKK <- ( pmin(sat, tKK) - pmin(sat, tKK-wKKII) - pmin(sat, tKK-wKKJJ) + pmin(sat, tKK-wKKII-wKKJJ) ) } # for each (i, j), sum the contributions over k if(!sparseOK) { II <- factor(II, levels=1:nX) JJ <- factor(JJ, levels=1:nX) # was: # delta3 <- tapply(contribKK, list(I=II, J=JJ), sum) # delta3[is.na(delta3)] <- 0 delta3 <- tapplysum(contribKK, list(I=II, J=JJ)) } else { delta3 <- sparseMatrix(i=II, j=JJ, x=contribKK, dims=c(nX, nX)) } # symmetrise and combine result <- result + delta3 + t(delta3) return(result) } geydelquad <- function(Q, r, sat, correction, sparseOK) { Z <- is.data(Q) U <- union.quad(Q) nU <- npoints(U) nX <- npoints(Q$data) result <- if(!sparseOK) matrix(0, nU, nU) else sparseMatrix(i=integer(0), j=integer(0), x=numeric(0), dims=c(nU, nU)) ## identify all r-close pairs U[i], U[j] unweighted <- correction %in% c("none", "border") if(unweighted) { a <- closepairs(U, r, what="indices") I <- a$i J <- a$j IJ <- cbind(I, J) } else { a <- weightedclosepairs(U, r, correction=correction, what="indices") I <- a$i J <- a$j IJ <- cbind(I, J) wIJ <- a$weight wmatrix <- sparseMatrix(i=I, j=J, x=wIJ, dims=c(nU,nU)) wJI <- wmatrix[cbind(J,I)] } ## tag which ones are data points zI <- Z[I] zJ <- Z[J] # count t(U[i], X) IzJ <- I[zJ] JzJ <- J[zJ] if(unweighted) { tvals <- table(factor(IzJ, levels=1:nU)) } else { wzJ <- wIJ[zJ] tvals <- tapplysum(wzJ, list(factor(IzJ, levels=1:nU))) } ## Compute direct part ## (arising when U[i]~U[j]) tI <- tvals[I] tJ <- tvals[J] if(unweighted) { tIJ <- tI - zJ tJI <- tJ - zI result[IJ] <- pmin(sat, tIJ + 1L) - pmin(sat, tIJ) + pmin(sat, tJI + 1L) - pmin(sat, tJI) } else { tIJ <- tI - zJ * wIJ tJI <- tJ - zI * wJI result[IJ] <- pmin(sat, tIJ + wIJ) - pmin(sat, tIJ) + pmin(sat, tJI + wJI) - pmin(sat, tJI) } # Compute indirect part # (arising when U[i]~X[k] and U[j]~X[k] for another point X[k]) # First find all such triples # Group close pairs X[k] ~ U[j] by index k spl <- split(IzJ, factor(JzJ, levels=1:nX)) grlen <- lengths(spl) # Assemble list of triples U[i], X[k], U[j] # by expanding each pair U[i], X[k] JJ <- unlist(spl[JzJ]) II <- rep(IzJ, grlen[JzJ]) KK <- rep(JzJ, grlen[JzJ]) # remove identical pairs i = j ok <- II != JJ II <- II[ok] JJ <- JJ[ok] KK <- KK[ok] # evaluate contribution of each triple tKK <- tvals[KK] zII <- Z[II] zJJ <- Z[JJ] if(unweighted) { tKIJ <- tKK - zII - zJJ contribKK <- pmin(sat, tKIJ + 2L) - 2 * pmin(sat, tKIJ + 1L) + pmin(sat, tKIJ) } else { wKKII <- wmatrix[cbind(KK, II)] wKKJJ <- wmatrix[cbind(KK, JJ)] tKIJ <- tKK - zII * wKKII - zJJ * wKKJJ contribKK <- (pmin(sat, tKIJ + wKKII + wKKJJ) - pmin(sat, tKIJ + wKKII) - pmin(sat, tKIJ + wKKJJ) + pmin(sat, tKIJ)) } # for each (i, j), sum the contributions over k if(!sparseOK) { II <- factor(II, levels=1:nU) JJ <- factor(JJ, levels=1:nU) # was: # delta4 <- tapply(contribKK, list(I=II, J=JJ), sum) # delta4[is.na(delta4)] <- 0 delta4 <- tapplysum(contribKK, list(I=II, J=JJ)) } else { delta4 <- sparseMatrix(i=II, j=JJ, x=contribKK, dims=c(nU, nU)) } # combine result <- result + delta4 return(result) } geyerdelta2 }) spatstat/R/fasp.R0000644000176200001440000001367213333543255013411 0ustar liggesusers# # fasp.R # # $Revision: 1.35 $ $Date: 2017/02/07 07:22:47 $ # # #----------------------------------------------------------------------------- # # creator fasp <- function(fns, which, formulae=NULL, dataname=NULL, title=NULL, rowNames=NULL, colNames=NULL, checkfv=TRUE) { stopifnot(is.list(fns)) stopifnot(is.matrix(which)) stopifnot(length(fns) == length(which)) n <- length(which) if(checkfv) for(i in seq_len(n)) if(!is.fv(fns[[i]])) stop(paste("fns[[", i, "]] is not an fv object", sep="")) # set row and column labels if(!is.null(rowNames)) rownames(which) <- rowNames if(!is.null(colNames)) colnames(which) <- colNames if(!is.null(formulae)) { # verify format and convert to character vector formulae <- FormatFaspFormulae(formulae, "formulae") # ensure length matches length of "fns" if(length(formulae) == 1L && n > 1L) # single formula - replicate it formulae <- rep.int(formulae, n) else stopifnot(length(formulae) == length(which)) } rslt <- list(fns=fns, which=which, default.formula=formulae, dataname=dataname, title=title) class(rslt) <- "fasp" return(rslt) } # subset extraction operator "[.fasp" <- function(x, I, J, drop=TRUE, ...) { verifyclass(x, "fasp") m <- nrow(x$which) n <- ncol(x$which) if(missing(I)) I <- 1:m if(missing(J)) J <- 1:n if(!is.vector(I) || !is.vector(J)) stop("Subset operator is only implemented for vector indices") # determine index subset for lists 'fns', 'titles' etc included <- rep.int(FALSE, length(x$fns)) w <- as.vector(x$which[I,J]) if(length(w) == 0) stop("result is empty") included[w] <- TRUE # if only one cell selected, and drop=TRUE: if((sum(included) == 1L) && drop) return(x$fns[included][[1L]]) # determine positions in shortened lists whichIJ <- x$which[I,J,drop=FALSE] newk <- cumsum(included) newwhich <- matrix(newk[whichIJ], ncol=ncol(whichIJ), nrow=nrow(whichIJ)) rownames(newwhich) <- rownames(x$which)[I] colnames(newwhich) <- colnames(x$which)[J] # default plotting formulae - could be NULL deform <- x$default.formula # create new fasp object Y <- fasp(fns = x$fns[included], formulae = if(!is.null(deform)) deform[included] else NULL, which = newwhich, dataname = x$dataname, title = x$title) return(Y) } dim.fasp <- function(x) { dim(x$which) } # print method print.fasp <- function(x, ...) { verifyclass(x, "fasp") cat(paste("Function array (class", sQuote("fasp"), ")\n")) dim <- dim(x$which) cat(paste("Dimensions: ", dim[1L], "x", dim[2L], "\n")) cat(paste("Title:", if(is.null(x$title)) "(None)" else x$title, "\n")) invisible(NULL) } # other methods as.fv.fasp <- function(x) do.call(cbind.fv, x$fns) dimnames.fasp <- function(x) { return(dimnames(x$which)) } "dimnames<-.fasp" <- function(x, value) { w <- x$which dimnames(w) <- value x$which <- w return(x) } pool.fasp <- local({ pool.fasp <- function(...) { Alist <- list(...) Yname <- short.deparse(sys.call()) if(nchar(Yname) > 60) Yname <- paste(substr(Yname, 1L, 40L), "[..]") nA <- length(Alist) if(nA == 0) return(NULL) ## validate.... ## All arguments must be fasp objects notfasp <- !unlist(lapply(Alist, inherits, what="fasp")) if(any(notfasp)) { n <- sum(notfasp) why <- paste(ngettext(n, "Argument", "Arguments"), commasep(which(notfasp)), ngettext(n, "does not", "do not"), "belong to the class", dQuote("fasp")) stop(why) } ## All arguments must have envelopes notenv <- !unlist(lapply(Alist, has.env)) if(any(notenv)) { n <- sum(notenv) why <- paste(ngettext(n, "Argument", "Arguments"), commasep(which(notenv)), ngettext(n, "does not", "do not"), "contain envelope data") stop(why) } if(nA == 1L) return(Alist[[1L]]) ## All arguments must have the same dimensions witches <- lapply(Alist, getElement, name="which") witch1 <- witches[[1L]] same <- unlist(lapply(witches, identical, y=witch1)) if(!all(same)) stop("Function arrays do not have the same array dimensions") ## OK. ## Pool envelopes at each position result <- Alist[[1L]] fns <- result$fns for(k in seq_along(fns)) { funks <- lapply(Alist, extractfun, k=k) fnk <- do.call(pool.envelope, funks) attr(fnk, "einfo")$Yname <- Yname fns[[k]] <- fnk } result$fns <- fns return(result) } has.env <- function(z) { all(unlist(lapply(z$fns, inherits, what="envelope"))) } extractfun <- function(z, k) { z$fns[[k]] } pool.fasp }) ## other functions FormatFaspFormulae <- local({ zapit <- function(x, argname) { if(inherits(x, "formula")) deparse(x) else if(is.character(x)) x else stop(paste("The entries of", sQuote(argname), "must be formula objects or strings")) } FormatFaspFormulae <- function(f, argname) { ## f should be a single formula object, a list of formula objects, ## a character vector, or a list containing formulae and strings. ## It will be converted to a character vector. result <- if(is.character(f)) f else if(inherits(f, "formula")) deparse(f) else if(is.list(f)) unlist(lapply(f, zapit, argname=argname)) else stop(paste(sQuote(argname), "should be a formula, a list of formulae,", "or a character vector")) return(result) } FormatFaspFormulae }) spatstat/R/dummify.R0000644000176200001440000000153413333543254014123 0ustar liggesusers# # dummify.R # # Convert a factor to a matrix of dummy variables, etc. # # $Revision: 1.5 $ $Date: 2016/02/11 10:17:12 $ # dummify <- function(x) { if(is.matrix(x) || is.data.frame(x)) { x <- as.data.frame(x) y <- do.call(data.frame, lapply(x, dummify)) return(as.matrix(y)) } # x is 1-dimensional if(is.complex(x)) return(as.matrix(data.frame(Re=Re(x), Im=Im(x)))) # convert factors etc if(is.character(x)) x <- factor(x) if(is.logical(x)) x <- factor(x, levels=c(FALSE,TRUE)) if(is.factor(x)) { # convert to dummy variables nx <- length(x) lev <- levels(x) y <- matrix(0L, nrow=nx, ncol=length(lev)) colnames(y) <- lev y[cbind(seq_len(nx), as.integer(x))] <- 1L return(y) } # convert to numeric y <- as.numeric(x) if(!is.matrix(y)) y <- matrix(y, ncol=1) return(y) } spatstat/R/clusterset.R0000644000176200001440000000432313333543254014645 0ustar liggesusers# # clusterset.R # # Allard-Fraley estimator of cluster region # # $Revision: 1.12 $ $Date: 2016/02/16 01:39:12 $ # clusterset <- function(X, what=c("marks", "domain"), ..., verbose=TRUE, fast=FALSE, exact=!fast) { stopifnot(is.ppp(X)) what <- match.arg(what, several.ok=TRUE) if(!missing(exact)) stopifnot(is.logical(exact)) if(fast && exact) stop("fast=TRUE is incompatible with exact=TRUE") # compute duplication exactly as in deldir, or the universe will explode X <- unique(unmark(X), rule="deldir", warn=TRUE) n <- npoints(X) W <- as.owin(X) # discretised Dirichlet tessellation if(verbose) cat("Computing Dirichlet tessellation...") if(fast || !exact) cellid <- as.im(nnfun(X), ...) # compute tile areas if(fast) { a <- table(factor(as.vector(as.matrix(cellid)), levels=1:n)) if(verbose) cat("done.\n") a <- a + 0.5 A <- sum(a) } else { d <- dirichlet(X) if(verbose) cat("done.\n") D <- tiles(d) suppressWarnings(id <- as.integer(names(D))) if(anyNA(id) && ("marks" %in% what)) stop("Unable to map Dirichlet tiles to data points") A <- area(W) a <- unlist(lapply(D, area)) } # determine optimal selection of tiles ntile <- length(a) o <- order(a) b <- cumsum(a[o]) m <- seq_len(ntile) logl <- -n * log(n) + m * log(m/b) + (n-m) * log((n-m)/(A-b)) mopt <- which.max(logl) picked <- o[seq_len(mopt)] ## map tiles to points if(!fast) picked <- id[picked] ## logical vector is.picked <- rep.int(FALSE, n) is.picked[picked] <- TRUE # construct result out <- list(marks=NULL, domain=NULL) if("marks" %in% what) { ## label points yesno <- factor(ifelse(is.picked, "yes", "no"), levels=c("no", "yes")) out$marks <- X %mark% yesno } if("domain" %in% what) { if(verbose) cat("Computing cluster set...") if(exact) { domain <- do.call(union.owin, unname(D[is.picked])) domain <- rebound.owin(domain, as.rectangle(W)) } else { domain <- eval.im(is.picked[cellid]) } out$domain <- domain if(verbose) cat("done.\n") } out <- if(length(what) == 1L) out[[what]] else out return(out) } spatstat/R/triplets.R0000644000176200001440000001250113333543255014314 0ustar liggesusers# # # triplets.R # # $Revision: 1.18 $ $Date: 2018/03/15 07:37:41 $ # # The triplets interaction # # Triplets() create an instance of the triplets process # [an object of class 'interact'] # # ------------------------------------------------------------------- # Triplets <- local({ DebugTriplets <- FALSE # define triplet potential TripletPotential <- function(X,U,EqualPairs,pars,correction, ...) { if(!all(ok <- correction %in% c("border", "none"))) { nbad <- sum(bad <- !ok) warning(paste(ngettext(nbad, "Correction", "Corrections"), commasep(sQuote(correction[bad])), ngettext(nbad, "is unavailable and was ignored", "are unavailable and were ignored"))) } # check that all points of X are included in U nX <- npoints(X) nU <- npoints(U) XinU <- if(length(EqualPairs) == 0) integer(0) else EqualPairs[,1] missX <- which(table(factor(XinU, levels=1:nX)) == 0) if((nmiss <- length(missX)) > 0) { # add missing points to (the end of) U U <- superimpose(U, X[missX], W=as.owin(X), check=FALSE) EqualPairs <- rbind(EqualPairs, cbind(missX, nU + 1:nmiss)) nU <- nU + nmiss } iXX <- EqualPairs[,1] iXU <- EqualPairs[,2] # construct map from X index to U index mapXU <- integer(nX) mapXU[iXX] <- iXU # construct map from U index to X index mapUX <- rep.int(NA_integer_, nU) mapUX[iXU] <- iXX # logical vector identifying which quadrature points are in X isdata <- rep.int(FALSE, nU) isdata[iXU] <- TRUE # identify all close pairs u, x r <- pars$r cp <- crosspairs(U, X, r, what="indices") if(DebugTriplets) cat(paste("crosspairs at distance", r, "yields", length(cp$i), "pairs\n")) IU <- cp$i J <- cp$j # map X index to U index JU <- mapXU[J] # Each (Xi, Xj) pair will appear twice - eliminate duplicates dupX <- isdata[IU] & isdata[JU] & (IU > JU) retain <- !dupX IU <- IU[retain] JU <- JU[retain] if(DebugTriplets) cat(paste(sum(dupX), "duplicate pairs removed\n")) # find all triangles tri <- edges2triangles(IU, JU, nU, friendly=isdata) if(DebugTriplets) cat(paste(nrow(tri), "triangles identified\n")) if(nrow(tri) == 0) { # there are no triangles; return vector of zeroes return(rep.int(0, nU-nmiss)) } # count triangles containing a given quadrature point tcount <- apply(tri, 2, function(x, n) { table(factor(x, levels=1:n)) }, n=nU) tcount <- .rowSums(tcount, nrow(tcount), ncol(tcount)) # select triangles consisting only of data points triX <- matrix(mapUX[tri], nrow=nrow(tri)) isX <- matrowall(!is.na(triX)) triX <- triX[isX, , drop=FALSE] # if(nrow(triX) > 0) { # count triangles of data points containing each given data point tXcount <- apply(triX, 2, function(x, n) { table(factor(x, levels=1:n)) }, n=nX) tXcount <- .rowSums(tXcount, nrow(tXcount), ncol(tXcount)) } else { # there are no triangles of data points tXcount <- rep.int(0, nX) } # answer <- tcount answer[iXU] <- tXcount[iXX] if(DebugTriplets) cat(paste("Max suff stat: data ", max(tXcount), ", dummy ", max(tcount[isdata]), "\n", sep="")) # truncate to original size if(nmiss > 0) answer <- answer[-((nU-nmiss+1):nU)] return(answer) } # set up basic 'triplets' object except for family and parameters BlankTripletsObject <- list( name = "Triplets process", creator = "Triplets", family = "triplet.family", # evaluated later pot = TripletPotential, par = list(r=NULL), # filled in later parnames = "interaction distance", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("interaction distance r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { gamma <- ((self$interpret)(coeffs, self))$param$gamma return(is.finite(gamma) && (gamma <= 1)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(anyNA(coeffs)) return(r) loggamma <- coeffs[1] if(abs(loggamma) <= epsilon) return(0) else return(r) }, version=NULL # to be added ) class(BlankTripletsObject) <- "interact" # define Triplets function Triplets <- function(r) { instantiate.interact(BlankTripletsObject, list(r=r)) } Triplets <- intermaker(Triplets, BlankTripletsObject) Triplets }) spatstat/R/linnetsurgery.R0000644000176200001440000002102213555761570015366 0ustar liggesusers#' #' linnetsurgery.R #' #' Surgery on linear networks and related objects #' #' $Revision: 1.22 $ $Date: 2019/10/29 07:00:02 $ #' insertVertices <- function(L, ...) { if(!inherits(L, c("lpp", "linnet"))) stop("L should be a linear network (linnet) or point pattern (lpp)", call.=FALSE) if(haspoints <- is.lpp(L)) { X <- L L <- as.linnet(L) cooXnew <- cooXold <- coords(X) segXold <- cooXold$seg tpXold <- cooXold$tp } ## validate new vertices V <- as.lpp(..., L=L) if(!identical(as.linnet(L, sparse=TRUE), as.linnet(V, sparse=TRUE))) stop("New vertices must lie on exactly the same network as L") if(npoints(V) == 0) { attr(L, "id") <- integer(0) if(!haspoints) { return(L) } else { X$domain <- L return(X) } } ## extract new vertex coordinates co <- coords(V) seg <- co$seg tp <- co$tp ## determine which segments will be split, ## and compute new serial numbers for the un-split segments splitsegments <- sortunique(seg) notsplit <- rep(TRUE, nsegments(L)) notsplit[splitsegments] <- FALSE segmap <- cumsum(notsplit) nunsplit <- sum(notsplit) ## existing vertices v <- L$vertices n <- npoints(v) ## initialise nadd <- 0 vadd <- list(x=numeric(0), y=numeric(0)) fromadd <- toadd <- id <- integer(0) ## split segments containing new vertices for(theseg in splitsegments) { ## find new vertices lying on segment 'theseg' i <- L$from[theseg] j <- L$to[theseg] those <- (seg == theseg) idthose <- which(those) ## order the new vertices along this segment tt <- tp[those] oo <- order(tt) tt <- tt[oo] idadd <- idthose[oo] ## make new vertices nnew <- length(tt) xnew <- with(v, x[i] + tt * diff(x[c(i,j)])) ynew <- with(v, y[i] + tt * diff(y[c(i,j)])) vnew <- list(x=xnew, y=ynew) ## make new edges kk <- n + nadd + (1:nnew) fromnew <- c(i, kk) tonew <- c(kk, j) nnewseg <- nnew + 1 ## add new vertices and edges to running total nadd <- nadd + nnew vadd <- concatxy(vadd, vnew) fromadd <- c(fromadd, fromnew) toadd <- c(toadd, tonew) id <- c(id, idadd) ## handle data points if any if(haspoints && any(relevant <- (segXold == theseg))) { tx <- tpXold[relevant] ttt <- c(0, tt, 1) m <- findInterval(tx, ttt, rightmost.closed=TRUE, all.inside=TRUE) t0 <- ttt[m] t1 <- ttt[m+1L] tpXnew <- (tx - t0)/(t1-t0) tpXnew <- pmin(1, pmax(0, tpXnew)) n0 <- nunsplit + length(fromadd) - nnewseg segXnew <- n0 + m cooXnew$seg[relevant] <- segXnew cooXnew$tp[relevant] <- tpXnew } } newfrom <- c(L$from[-splitsegments], fromadd) newto <- c(L$to[-splitsegments], toadd) newv <- superimpose(v, vadd, check=FALSE) Lnew <- linnet(newv, edges=cbind(newfrom, newto), sparse=identical(L$sparse, TRUE)) newid <- integer(nadd) newid[id] <- n + 1:nadd attr(Lnew, "id") <- newid if(!haspoints) return(Lnew) ## adjust segment id for data points on segments that were not split Xnotsplit <- notsplit[segXold] cooXnew$seg[Xnotsplit] <- segmap[segXold[Xnotsplit]] Xnew <- lpp(cooXnew, Lnew) marks(Xnew) <- marks(X) attr(Xnew, "id") <- newid return(Xnew) } joinVertices <- function(L, from, to) { if(!inherits(L, c("lpp", "linnet"))) stop("L should be a linear network (linnet) or point pattern (lpp)", call.=FALSE) if(haspoints <- is.lpp(L)) { X <- L L <- as.linnet(L) Xdf <- as.data.frame(X) } if((missing(to) || is.null(to)) && !is.null(dim(from)) && ncol(from) == 2) { to <- from[,2] from <- from[,1] } newfrom <- as.integer(from) newto <- as.integer(to) edges <- cbind(c(L$from, newfrom), c(L$to, newto)) Lnew <- linnet(vertices(L), edges=edges, sparse=L$sparse) if(!is.null(L$toler)) Lnew$toler <- L$toler if(!haspoints) return(Lnew) X <- lpp(Xdf, Lnew) return(X) } repairNetwork <- function(X) { if(!inherits(X, c("linnet", "lpp"))) stop("X should be a linnet or lpp object", call.=FALSE) L <- as.linnet(X) from <- L$from to <- L$to reverse <- (from > to) if(any(reverse)) { newfrom <- ifelse(reverse, to, from) newto <- ifelse(reverse, from, to) from <- L$from <- newfrom to <- L$to <- newto L$lines$ends[reverse,] <- L$lines$ends[reverse, c(3,4,1,2)] if(is.lpp(X)) { X$domain <- L } else { X <- L } } edgepairs <- cbind(from, to) retainedges <- !duplicated(as.data.frame(edgepairs)) & (from != to) keepall <- all(retainedges) if(is.lpp(X) && (!keepall || any(reverse))) { #' adjust segment coordinates cooX <- coords(X) # hyperframe, may include marks oldseg <- as.integer(unlist(cooX$seg)) oldtp <- as.numeric(unlist(cooX$tp)) if(keepall) { newseg <- oldseg } else { segmap <- uniquemap(as.data.frame(edgepairs)) newseg <- segmap[oldseg] } newtp <- ifelse(reverse[oldseg], 1 - oldtp, oldtp) cooX$seg <- newseg cooX$tp <- newtp coords(X) <- cooX } if(keepall) return(X) Y <- thinNetwork(X, retainedges=retainedges) return(Y) } thinNetwork <- function(X, retainvertices, retainedges) { ## thin a network by retaining only the specified edges and/or vertices if(!inherits(X, c("linnet", "lpp"))) stop("X should be a linnet or lpp object", call.=FALSE) gotvert <- !missing(retainvertices) gotedge <- !missing(retainedges) if(!gotedge && !gotvert) return(X) L <- as.linnet(X) from <- L$from to <- L$to V <- L$vertices sparse <- identical(L$sparse, TRUE) #' determine which edges/vertices are to be retained edgesFALSE <- logical(nsegments(L)) verticesFALSE <- logical(npoints(V)) if(!gotedge) { retainedges <- edgesFALSE } else if(!is.logical(retainedges)) { z <- edgesFALSE z[retainedges] <- TRUE retainedges <- z } if(!gotvert) { retainvertices <- verticesFALSE } else if(!is.logical(retainvertices)) { z <- verticesFALSE z[retainvertices] <- TRUE retainvertices <- z } if(gotvert && !gotedge) { ## retain all edges between retained vertices retainedges <- retainvertices[from] & retainvertices[to] } else if(gotedge) { ## retain vertices required for the retained edges retainvertices[from[retainedges]] <- TRUE retainvertices[to[retainedges]] <- TRUE } ## assign new serial numbers to vertices, and recode Vsub <- V[retainvertices] newserial <- cumsum(retainvertices) newfrom <- newserial[from[retainedges]] newto <- newserial[to[retainedges]] ## remove duplicate segments reverse <- (newfrom > newto) edgepairs <- cbind(ifelse(reverse, newto, newfrom), ifelse(reverse, newfrom, newto)) nontrivial <- (newfrom != newto) & !duplicated(edgepairs) edgepairs <- edgepairs[nontrivial,,drop=FALSE] reverse <- reverse[nontrivial] ## extract relevant subset of network Lsub <- linnet(Vsub, edges=edgepairs, sparse=sparse) ## tack on information about subset attr(Lsub, "retainvertices") <- retainvertices attr(Lsub, "retainedges") <- retainedges ## done? if(inherits(X, "linnet")) return(Lsub) ## X is an lpp object ## Find data points that lie on accepted segments dat <- X$data # hyperframe, may include marks ok <- retainedges[unlist(dat$seg)] dsub <- dat[ok, , drop=FALSE] ## compute new serial numbers for retained segments segmap <- cumsum(retainedges) oldseg <- as.integer(unlist(dsub$seg)) dsub$seg <- newseg <- segmap[oldseg] ## adjust tp coordinate if segment endpoints were reversed if(any(revseg <- reverse[newseg])) { tp <- as.numeric(unlist(dsub$tp)) dsub$tp[revseg] <- 1 - tp[revseg] } # make new lpp object Y <- ppx(data=dsub, domain=Lsub, coord.type=as.character(X$ctype)) class(Y) <- c("lpp", class(Y)) ## tack on information about subset attr(Y, "retainpoints") <- ok return(Y) } validate.lpp.coords <- function(X, fatal=TRUE, context="") { ## check for mangled internal data proj <- project2segment(as.ppp(X), as.psp(as.linnet(X))) seg.claimed <- coords(X)$seg seg.mapped <- proj$mapXY if(any(seg.claimed != seg.mapped)) { whinge <- paste("Incorrect segment id", context) if(fatal) stop(whinge, call.=FALSE) else warning(whinge, call.=FALSE) return(FALSE) } tp.claimed <- coords(X)$tp tp.mapped <- proj$tp v <- max(abs(tp.claimed - tp.mapped)) if(v > 0.01) { whinge <- paste("Incorrect 'tp' coordinate", paren(paste("max discrepancy", v)), context) if(fatal) stop(whinge, call.=FALSE) else warning(whinge, call.=FALSE) return(FALSE) } return(TRUE) } spatstat/R/nncorr.R0000644000176200001440000001154413421506136013750 0ustar liggesusers# # nncorr.R # # $Revision: 1.12 $ $Date: 2019/01/22 03:08:57 $ # nnmean <- function(X, k=1, na.action="warn") { stopifnot(is.ppp(X)) if(!is.marked(X, na.action=na.action)) stop("X must be a marked point pattern", call.=FALSE) if(k %% 1 != 0 || length(k) != 1 || k <= 0) stop("k should be a single integer greater than 0", call.=FALSE) m <- numeric.columns(marks(X), logical=TRUE, others="na") ## default result nana <- rep(NA_real_, ncol(m)) ans <- rbind(unnormalised=nana, normalised=nana) ## if(all(is.na(m))) { warning("non-numeric marks; results are NA", call.=FALSE) } else if(k >= npoints(X)) { warning(paste("Not enough points to compute k-th nearest neighbours", paste0(paren(paste0("n = ", npoints(X), ", k = ", k)), ";"), "results are NA"), call.=FALSE) } else { nnid <- nnwhich(X, k=k) ok <- (nndist(X, k=k) <= bdist.points(X)) if(!any(ok, na.rm=TRUE)) { warning("insufficient data remaining after border correction; results are NA") } else { numer <- sapply(as.data.frame(m[nnid[ok], ]), mean, na.rm=TRUE) denom <- sapply(as.data.frame(m), mean, na.rm=TRUE) ans <- rbind(unnormalised=numer, normalised =numer/denom) } } if(ncol(ans) == 1) ans <- ans[,1,drop=TRUE] return(ans) } nnvario <- local({ nnvario <- function(X, k=1, na.action="warn") { stopifnot(is.ppp(X)) if(!is.marked(X, na.action=na.action)) stop("X must be a marked point pattern", call.=FALSE) m <- numeric.columns(marks(X), logical=TRUE, others="na") if(all(is.na(m))) warning("non-numeric marks; results are NA", call.=FALSE) ans <- nncorr(X %mark% m, sqdif, k=k, denominator=diag(var(m)), na.action="ignore") return(ans) } sqdif <- function(m1,m2) { ((m1-m2)^2)/2 } nnvario }) nncorr <- function(X, f = function(m1,m2) { m1 * m2}, k=1, ..., use = "all.obs", method = c("pearson", "kendall", "spearman"), denominator=NULL, na.action="warn") { stopifnot(is.ppp(X)) if(!is.marked(X, na.action=na.action)) stop("X must be a marked point pattern", call.=FALSE) if(k %% 1 != 0 || length(k) != 1 || k <= 0) stop("k should be a single integer greater than 0", call.=FALSE) if(k >= npoints(X)) stop("Not enough points to compute k-th nearest neighbours") m <- as.data.frame(marks(X)) nv <- ncol(m) if(nv == 1) colnames(m) <- "" # if(missing(method) || is.null(method)) method <- "pearson" # if(missing(f)) f <- NULL if(!is.null(f) && !is.function(f)) { if(nv == 1) stop("f should be a function") # could be a list of functions if(!(is.list(f) && all(unlist(lapply(f, is.function))))) stop("f should be a function or a list of functions") if(length(f) != nv) stop("Length of list f does not match number of mark variables") } # optional denominator(s) if(!is.null(denominator) && !(length(denominator) %in% c(1, nv))) stop("Denominator has incorrect length") # multi-dimensional case if(nv > 1) { # replicate things if(is.function(f)) f <- rep.int(list(f), nv) if(length(denominator) <= 1) denominator <- rep.int(list(denominator), nv) # result <- matrix(NA, nrow=3, ncol=nv) outnames <- c("unnormalised", "normalised", "correlation") dimnames(result) <- list(outnames, colnames(m)) for(j in 1:nv) { mj <- m[,j, drop=FALSE] denj <- denominator[[j]] nncj <- nncorr(X %mark% mj, f=f[[j]], k=k, use=use, method=method, denominator=denj) kj <- length(nncj) result[1:kj,j] <- nncj } if(all(is.na(result[3, ]))) result <- result[1:2, ] return(result) } # one-dimensional m <- m[,1,drop=TRUE] # select 'f' appropriately for X chk <- check.testfun(f, X=X) f <- chk$f ftype <- chk$ftype # denominator Efmm <- if(!is.null(denominator)) denominator else switch(ftype, mul={ mean(m)^2 }, equ={ sum(table(m)^2)/length(m)^2 }, general={ mean(outer(m, m, f, ...)) }) # border method nn <- nnwhich(X, k=k) ok <- (nndist(X, k=k) <= bdist.points(X)) if(!any(ok)) stop("Insufficient data") mY <- m[nn[ok]] mX <- m[ok] Efmk <- switch(ftype, mul = { mean(mX * mY, ...) }, equ = { mean(mX == mY, ...) }, general = { mean(f(mX, mY, ...)) }) # answer <- c(unnormalised=Efmk, normalised=Efmk/Efmm) if(ftype == "mul") { classic <- cor(mX, mY, use=use, method=method) answer <- c(answer, correlation=classic) } return(answer) } spatstat/R/polygood.R0000644000176200001440000001354713552235742014317 0ustar liggesusers#' #' polygood.R #' #' Check validity of polygon data #' #' $Revision: 1.2 $ $Date: 2017/06/05 10:31:58 $ #' #' check validity of a polygonal owin owinpolycheck <- function(W, verbose=TRUE) { verifyclass(W, "owin") stopifnot(W$type == "polygonal") # extract stuff B <- W$bdry npoly <- length(B) outerframe <- owin(W$xrange, W$yrange) # can't use as.rectangle here; we're still checking validity boxarea.mineps <- area.owin(outerframe) * (1 - 0.00001) # detect very large datasets BS <- object.size(B) blowbyblow <- verbose && (BS > 1e4 || npoly > 20) # answer <- TRUE notes <- character(0) err <- character(0) # check for duplicated points, self-intersection, outer frame if(blowbyblow) { cat(paste("Checking", npoly, ngettext(npoly, "polygon...", "polygons..."))) pstate <- list() } dup <- self <- is.box <- logical(npoly) for(i in 1:npoly) { if(blowbyblow && npoly > 1L) pstate <- progressreport(i, npoly, state=pstate) Bi <- B[[i]] # check for duplicated vertices dup[i] <- as.logical(anyDuplicated(ppp(Bi$x, Bi$y, window=outerframe, check=FALSE))) if(dup[i] && blowbyblow) message(paste("Polygon", i, "contains duplicated vertices")) # check for self-intersection self[i] <- xypolyselfint(B[[i]], proper=TRUE, yesorno=TRUE) if(self[i] && blowbyblow) message(paste("Polygon", i, "is self-intersecting")) # check whether one of the current boundary polygons # is the bounding box itself (with + sign) is.box[i] <- (length(Bi$x) == 4) && (Area.xypolygon(Bi) >= boxarea.mineps) } if(blowbyblow) cat("done.\n") if((ndup <- sum(dup)) > 0) { whinge <- paste(ngettext(ndup, "Polygon", "Polygons"), if(npoly == 1L) NULL else commasep(which(dup)), ngettext(ndup, "contains", "contain"), "duplicated vertices") notes <- c(notes, whinge) err <- c(err, "duplicated vertices") if(verbose) message(whinge) answer <- FALSE } if((nself <- sum(self)) > 0) { whinge <- paste(ngettext(nself, "Polygon", "Polygons"), if(npoly == 1L) NULL else commasep(which(self)), ngettext(nself, "is", "are"), "self-intersecting") notes <- c(notes, whinge) if(verbose) message(whinge) err <- c(err, "self-intersection") answer <- FALSE } if(sum(is.box) > 1L) { answer <- FALSE whinge <- paste("Polygons", commasep(which(is.box)), "coincide with the outer frame") notes <- c(notes, whinge) err <- c(err, "polygons duplicating the outer frame") } # check for crossings between different polygons cross <- matrix(FALSE, npoly, npoly) if(npoly > 1L) { if(blowbyblow) { cat(paste("Checking for cross-intersection between", npoly, "polygons...")) pstate <- list() } P <- lapply(B, xypolygon2psp, w=outerframe, check=FALSE) for(i in seq_len(npoly-1L)) { if(blowbyblow) pstate <- progressreport(i, npoly-1L, state=pstate) Pi <- P[[i]] for(j in (i+1L):npoly) { crosses <- if(is.box[i] || is.box[j]) FALSE else { anycrossing.psp(Pi, P[[j]]) } cross[i,j] <- cross[j,i] <- crosses if(crosses) { answer <- FALSE whinge <- paste("Polygons", i, "and", j, "cross over") notes <- c(notes, whinge) if(verbose) message(whinge) err <- c(err, "overlaps between polygons") } } } if(blowbyblow) cat("done.\n") } err <- unique(err) attr(answer, "notes") <- notes attr(answer, "err") <- err return(answer) } #' check for self-intersections in an xypolygon xypolyselfint <- function(p, eps=.Machine$double.eps, proper=FALSE, yesorno=FALSE, checkinternal=FALSE) { verify.xypolygon(p) n <- length(p$x) verbose <- (n > 1000) if(verbose) cat(paste("[Checking polygon with", n, "edges...")) x0 <- p$x y0 <- p$y dx <- diff(x0[c(1:n,1L)]) dy <- diff(y0[c(1:n,1L)]) if(yesorno) { # get a yes-or-no answer answer <- .C("xypsi", n=as.integer(n), x0=as.double(x0), y0=as.double(y0), dx=as.double(dx), dy=as.double(dy), xsep=as.double(2 * max(abs(dx))), ysep=as.double(2 * max(abs(dy))), eps=as.double(eps), proper=as.integer(proper), answer=as.integer(integer(1L)), PACKAGE = "spatstat")$answer if(verbose) cat("]\n") return(answer != 0) } out <- .C("Cxypolyselfint", n=as.integer(n), x0=as.double(x0), y0=as.double(y0), dx=as.double(dx), dy=as.double(dy), eps=as.double(eps), xx=as.double(numeric(n^2)), yy=as.double(numeric(n^2)), ti=as.double(numeric(n^2)), tj=as.double(numeric(n^2)), ok=as.integer(integer(n^2)), PACKAGE = "spatstat") uhoh <- (matrix(out$ok, n, n) != 0) if(proper) { # ignore cases where two vertices coincide ti <- matrix(out$ti, n, n)[uhoh] tj <- matrix(out$tj, n, n)[uhoh] i.is.vertex <- (abs(ti) < eps) | (abs(ti - 1) < eps) j.is.vertex <- (abs(tj) < eps) | (abs(tj - 1) < eps) dup <- i.is.vertex & j.is.vertex uhoh[uhoh] <- !dup } if(checkinternal && any(uhoh != t(uhoh))) warning("Internal error: incidence matrix is not symmetric") xx <- matrix(out$xx, n, n) yy <- matrix(out$yy, n, n) uptri <- (row(uhoh) < col(uhoh)) xx <- as.vector(xx[uhoh & uptri]) yy <- as.vector(yy[uhoh & uptri]) result <- list(x=xx, y=yy) if(verbose) cat("]\n") return(result) } spatstat/R/lpp.R0000644000176200001440000005110513555737605013256 0ustar liggesusers# # lpp.R # # $Revision: 1.66 $ $Date: 2019/10/29 04:16:59 $ # # Class "lpp" of point patterns on linear networks lpp <- function(X, L, ...) { stopifnot(inherits(L, "linnet")) if(missing(X) || is.null(X)) { ## empty pattern df <- data.frame(x=numeric(0), y=numeric(0)) lo <- data.frame(seg=integer(0), tp=numeric(0)) } else { localnames <- c("seg", "tp") spatialnames <- c("x", "y") allcoordnames <- c(spatialnames, localnames) if(is.matrix(X)) X <- as.data.frame(X) if(checkfields(X, localnames)) { #' X includes at least local coordinates X <- as.data.frame(X) #' validate local coordinates if(nrow(X) > 0) { nedge <- nsegments(L) if(with(X, any(seg < 1 | seg > nedge))) stop("Segment index coordinate 'seg' exceeds bounds") if(with(X, any(tp < 0 | tp > 1))) stop("Local coordinate 'tp' outside [0,1]") } if(!checkfields(X, spatialnames)) { #' data give local coordinates only #' reconstruct x,y coordinates from local coordinates Y <- local2lpp(L, X$seg, X$tp, df.only=TRUE) X[,spatialnames] <- Y[,spatialnames,drop=FALSE] } #' local coordinates lo <- X[ , localnames, drop=FALSE] #' spatial coords and marks marknames <- setdiff(names(X), allcoordnames) df <- X[, c(spatialnames, marknames), drop=FALSE] } else { #' local coordinates must be computed from spatial coordinates if(!is.ppp(X)) X <- as.ppp(X, W=L$window, ...) #' project to segment pro <- project2segment(X, as.psp(L)) #' projected points (spatial coordinates and marks) df <- as.data.frame(pro$Xproj) #' local coordinates lo <- data.frame(seg=pro$mapXY, tp=pro$tp) } } # combine spatial, local, marks nmark <- ncol(df) - 2 if(nmark == 0) { df <- cbind(df, lo) ctype <- c(rep("s", 2), rep("l", 2)) } else { df <- cbind(df[,1:2], lo, df[, -(1:2), drop=FALSE]) ctype <- c(rep("s", 2), rep("l", 2), rep("m", nmark)) } out <- ppx(data=df, domain=L, coord.type=ctype) class(out) <- c("lpp", class(out)) return(out) } print.lpp <- function(x, ...) { stopifnot(inherits(x, "lpp")) splat("Point pattern on linear network") sd <- summary(x$data) np <- sd$ncases nama <- sd$col.names splat(np, ngettext(np, "point", "points")) ## check for unusual coordinates ctype <- x$ctype nam.m <- nama[ctype == "mark"] nam.t <- nama[ctype == "temporal"] nam.c <- setdiff(nama[ctype == "spatial"], c("x","y")) nam.l <- setdiff(nama[ctype == "local"], c("seg", "tp")) if(length(nam.c) > 0) splat("Additional spatial coordinates", commasep(sQuote(nam.c))) if(length(nam.l) > 0) splat("Additional local coordinates", commasep(sQuote(nam.l))) if(length(nam.t) > 0) splat("Additional temporal coordinates", commasep(sQuote(nam.t))) if((nmarks <- length(nam.m)) > 0) { if(nmarks > 1) { splat(nmarks, "columns of marks:", commasep(sQuote(nam.m))) } else { marx <- marks(x) if(is.factor(marx)) { exhibitStringList("Multitype, with possible types:", levels(marx)) } else splat("Marks of type", sQuote(typeof(marx))) } } print(x$domain, ...) return(invisible(NULL)) } plot.lpp <- function(x, ..., main, add=FALSE, use.marks=TRUE, which.marks=NULL, show.all=!add, show.window=FALSE, show.network=TRUE, do.plot=TRUE, multiplot=TRUE) { if(missing(main)) main <- short.deparse(substitute(x)) ## Handle multiple columns of marks as separate plots ## (unless add=TRUE or which.marks selects a single column ## or multiplot = FALSE) mx <- marks(x) if(use.marks && !is.null(dim(mx))) { implied.all <- is.null(which.marks) want.several <- implied.all || !is.null(dim(mx <- mx[,which.marks,drop=TRUE])) do.several <- want.several && !add && multiplot if(want.several) mx <- as.data.frame(mx) #' ditch hyperframe columns if(do.several) { ## generate one plot for each column of marks y <- solapply(mx, setmarks, x=x) out <- do.call(plot, c(list(x=y, main=main, do.plot=do.plot, show.window=show.window), list(...))) return(invisible(out)) } if(is.null(which.marks)) { which.marks <- 1 if(do.plot) message("Plotting the first column of marks") } } ## single plot ## determine space required, including legend P <- as.ppp(x) a <- plot(P, ..., do.plot=FALSE, use.marks=use.marks, which.marks=which.marks) if(!do.plot) return(a) ## initialise graphics space if(!add) { if(show.window) { plot(Window(P), main=main, invert=TRUE, ...) } else { b <- attr(a, "bbox") plot(b, type="n", main=main, ..., show.all=FALSE) } } ## plot linear network if(show.network) { L <- as.linnet(x) do.call.matched(plot.linnet, resolve.defaults(list(x=L, add=TRUE), list(...)), extrargs=c("lty", "lwd", "col")) } ## plot points, legend, title ans <- do.call.matched(plot.ppp, c(list(x=P, add=TRUE, main=main, use.marks=use.marks, which.marks=which.marks, show.all=show.all, show.window=FALSE), list(...)), extrargs=c("shape", "size", "pch", "cex", "fg", "bg", "cols", "lty", "lwd", "etch", "cex.main", "col.main", "line", "outer", "sub")) return(invisible(ans)) } summary.lpp <- function(object, ...) { stopifnot(inherits(object, "lpp")) L <- object$domain result <- summary(L) np <- npoints(object) result$npoints <- np <- npoints(object) result$intensity <- np/result$totlength result$is.marked <- is.marked(object) result$is.multitype <- is.multitype(object) mks <- marks(object) result$markformat <- mkf <- markformat(mks) switch(mkf, none = { result$multiple.marks <- FALSE }, vector = { result$multiple.marks <- FALSE if(result$is.multitype) { tm <- as.vector(table(mks)) tfp <- data.frame(frequency=tm, proportion=tm/sum(tm), intensity=tm/result$totlength, row.names=levels(mks)) result$marks <- tfp result$is.numeric <- FALSE } else { result$marks <- summary(mks) result$is.numeric <- is.numeric(mks) } result$marknames <- "marks" result$marktype <- typeof(mks) }, dataframe = , hyperframe = { result$multiple.marks <- TRUE result$marknames <- names(mks) result$is.numeric <- FALSE result$marktype <- mkf result$is.multitype <- FALSE result$marks <- summary(mks) }) class(result) <- "summary.lpp" return(result) } print.summary.lpp <- function(x, ...) { what <- if(x$is.multitype) "Multitype point pattern" else if(x$is.marked) "Marked point pattern" else "Point pattern" splat(what, "on linear network") splat(x$npoints, "points") splat("Linear network with", x$nvert, "vertices and", x$nline, "lines") u <- x$unitinfo dig <- getOption('digits') splat("Total length", signif(x$totlength, dig), u$plural, u$explain) splat("Average intensity", signif(x$intensity, dig), "points per", if(u$vanilla) "unit length" else u$singular) if(x$is.marked) { if(x$multiple.marks) { splat("Mark variables:", commasep(x$marknames, ", ")) cat("Summary of marks:\n") print(x$marks) } else if(x$is.multitype) { cat("Types of points:\n") print(signif(x$marks,dig)) } else { splat("marks are ", if(x$is.numeric) "numeric, ", "of type ", sQuote(x$marktype), sep="") cat("Summary:\n") print(x$marks) } } else splat("Unmarked") print(x$win, prefix="Enclosing window: ") invisible(NULL) } intensity.lpp <- function(X, ...) { len <- sum(lengths.psp(as.psp(as.linnet(X)))) if(is.multitype(X)) table(marks(X))/len else npoints(X)/len } is.lpp <- function(x) { inherits(x, "lpp") } is.multitype.lpp <- function(X, na.action="warn", ...) { marx <- marks(X) if(is.null(marx)) return(FALSE) if((is.data.frame(marx) || is.hyperframe(marx)) && ncol(marx) > 1) return(FALSE) if(!is.factor(marx)) return(FALSE) if((length(marx) > 0) && anyNA(marx)) switch(na.action, warn = { warning(paste("some mark values are NA in the point pattern", short.deparse(substitute(X)))) }, fatal = { return(FALSE) }, ignore = {} ) return(TRUE) } as.lpp <- function(x=NULL, y=NULL, seg=NULL, tp=NULL, ..., marks=NULL, L=NULL, check=FALSE, sparse) { nomore <- is.null(y) && is.null(seg) && is.null(tp) if(inherits(x, "lpp") && nomore) { X <- x if(!missing(sparse) && !is.null(sparse)) X$domain <- as.linnet(domain(X), sparse=sparse) } else { if(!inherits(L, "linnet")) stop("L should be a linear network") if(!missing(sparse) && !is.null(sparse)) L <- as.linnet(L, sparse=sparse) if(is.ppp(x) && nomore) { X <- lpp(x, L) } else if(is.null(x) && is.null(y) && !is.null(seg) && !is.null(tp)){ X <- lpp(data.frame(seg=seg, tp=tp), L=L) } else { if(is.numeric(x) && length(x) == 2 && is.null(y)) { xy <- list(x=x[1L], y=x[2L]) } else { xy <- xy.coords(x,y)[c("x", "y")] } if(!is.null(seg) && !is.null(tp)) { # add segment map information xy <- append(xy, list(seg=seg, tp=tp)) } else { # convert to ppp, typically suppressing check mechanism xy <- as.ppp(xy, W=as.owin(L), check=check) } X <- lpp(xy, L) } } if(!is.null(marks)) marks(X) <- marks return(X) } as.ppp.lpp <- function(X, ..., fatal=TRUE) { verifyclass(X, "lpp", fatal=fatal) L <- X$domain Y <- as.ppp(coords(X, temporal=FALSE, local=FALSE), W=L$window, check=FALSE) if(!is.null(marx <- marks(X))) { if(is.hyperframe(marx)) marx <- as.data.frame(marx) marks(Y) <- marx } return(Y) } Window.lpp <- function(X, ...) { as.owin(X) } "Window<-.lpp" <- function(X, ..., check=TRUE, value) { if(check) { X <- X[value] } else { Window(X$domain, check=FALSE) <- value } return(X) } as.owin.lpp <- function(W, ..., fatal=TRUE) { as.owin(as.ppp(W, ..., fatal=fatal)) } domain.lpp <- function(X, ...) { as.linnet(X) } as.linnet.lpp <- function(X, ..., fatal=TRUE, sparse) { verifyclass(X, "lpp", fatal=fatal) L <- X$domain if(!missing(sparse)) L <- as.linnet(L, sparse=sparse) return(L) } unitname.lpp <- function(x) { u <- unitname(x$domain) return(u) } "unitname<-.lpp" <- function(x, value) { w <- x$domain unitname(w) <- value x$domain <- w return(x) } "marks<-.lpp" <- function(x, ..., value) { NextMethod("marks<-") } unmark.lpp <- function(X) { NextMethod("unmark") } as.psp.lpp <- function(x, ..., fatal=TRUE){ verifyclass(x, "lpp", fatal=fatal) return(x$domain$lines) } nsegments.lpp <- function(x) { return(x$domain$lines$n) } local2lpp <- function(L, seg, tp, X=NULL, df.only=FALSE) { stopifnot(inherits(L, "linnet")) if(is.null(X)) { # map to (x,y) Ldf <- as.data.frame(L$lines) dx <- with(Ldf, x1-x0) dy <- with(Ldf, y1-y0) x <- with(Ldf, x0[seg] + tp * dx[seg]) y <- with(Ldf, y0[seg] + tp * dy[seg]) } else { x <- X$x y <- X$y } # compile into data frame data <- data.frame(x=x, y=y, seg=seg, tp=tp) if(df.only) return(data) ctype <- c("s", "s", "l", "l") out <- ppx(data=data, domain=L, coord.type=ctype) class(out) <- c("lpp", class(out)) return(out) } #################################################### # subset extractor #################################################### "[.lpp" <- function (x, i, j, drop=FALSE, ..., snip=TRUE) { if(!missing(i) && !is.null(i)) { if(is.owin(i)) { # spatial domain: call code for 'j' xi <- x[,i,snip=snip] } else { # usual row-type index da <- x$data daij <- da[i, , drop=FALSE] xi <- ppx(data=daij, domain=x$domain, coord.type=as.character(x$ctype)) if(drop) xi <- xi[drop=TRUE] # call [.ppx to remove unused factor levels class(xi) <- c("lpp", class(xi)) } x <- xi } if(missing(j) || is.null(j)) return(x) stopifnot(is.owin(j)) x <- repairNetwork(x) w <- j L <- x$domain if(is.vanilla(unitname(w))) unitname(w) <- unitname(x) # Find vertices that lie inside 'w' vertinside <- inside.owin(L$vertices, w=w) from <- L$from to <- L$to if(snip) { ## For efficiency, first restrict network to relevant segments. ## Find segments EITHER OF whose endpoints lie in 'w' okedge <- vertinside[from] | vertinside[to] ## extract relevant subset of network graph x <- thinNetwork(x, retainedges=okedge) ## Now add vertices at crossing points with boundary of 'w' b <- crossing.psp(as.psp(L), edges(w)) x <- insertVertices(x, unique(b)) boundarypoints <- attr(x, "id") ## update data L <- x$domain from <- L$from to <- L$to vertinside <- inside.owin(L$vertices, w=w) vertinside[boundarypoints] <- TRUE } ## find segments whose endpoints BOTH lie in 'w' edgeinside <- vertinside[from] & vertinside[to] ## extract relevant subset of network xnew <- thinNetwork(x, retainedges=edgeinside) ## adjust window without checking Window(xnew, check=FALSE) <- w return(xnew) } #################################################### # affine transformations #################################################### scalardilate.lpp <- function(X, f, ...) { trap.extra.arguments(..., .Context="In scalardilate(X,f)") check.1.real(f, "In scalardilate(X,f)") stopifnot(is.finite(f) && f > 0) Y <- X Y$data$x <- f * as.numeric(X$data$x) Y$data$y <- f * as.numeric(X$data$y) Y$domain <- scalardilate(X$domain, f) return(Y) } affine.lpp <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) { verifyclass(X, "lpp") Y <- X Y$data[, c("x","y")] <- affinexy(X$data[, c("x","y")], mat=mat, vec=vec) Y$domain <- affine(X$domain, mat=mat, vec=vec, ...) return(Y) } shift.lpp <- function(X, vec=c(0,0), ..., origin=NULL) { verifyclass(X, "lpp") Y <- X Y$domain <- if(missing(vec)) { shift(X$domain, ..., origin=origin) } else { shift(X$domain, vec=vec, ..., origin=origin) } vec <- getlastshift(Y$domain) Y$data[, c("x","y")] <- shiftxy(X$data[, c("x","y")], vec=vec) # tack on shift vector attr(Y, "lastshift") <- vec return(Y) } rotate.lpp <- function(X, angle=pi/2, ..., centre=NULL) { verifyclass(X, "lpp") if(!is.null(centre)) { X <- shift(X, origin=centre) negorigin <- getlastshift(X) } else negorigin <- NULL Y <- X Y$data[, c("x","y")] <- rotxy(X$data[, c("x","y")], angle=angle) Y$domain <- rotate(X$domain, angle=angle, ...) if(!is.null(negorigin)) Y <- shift(Y, -negorigin) return(Y) } rescale.lpp <- function(X, s, unitname) { if(missing(unitname)) unitname <- NULL if(missing(s)) s <- 1/unitname(X)$multiplier Y <- scalardilate(X, f=1/s) unitname(Y) <- rescale(unitname(X), s, unitname) return(Y) } superimpose.lpp <- function(..., L=NULL) { objects <- list(...) if(!is.null(L) && !inherits(L, "linnet")) stop("L should be a linear network") if(length(objects) == 0) { if(is.null(L)) return(NULL) emptyX <- lpp(list(x=numeric(0), y=numeric(0)), L) return(emptyX) } islpp <- unlist(lapply(objects, is.lpp)) if(is.null(L) && !any(islpp)) stop("Cannot determine linear network: no lpp objects given") nets <- unique(lapply(objects[islpp], as.linnet)) if(length(nets) > 1) stop("Point patterns are defined on different linear networks") if(!is.null(L)) { nets <- unique(append(nets, list(L))) if(length(nets) > 1) stop("Argument L is a different linear network") } L <- nets[[1L]] ## convert list(x,y) to linear network, etc if(any(!islpp)) objects[!islpp] <- lapply(objects[!islpp], lpp, L=L) ## concatenate coordinates locns <- do.call(rbind, lapply(objects, coords)) ## concatenate marks (or use names of arguments) marx <- superimposeMarks(objects, sapply(objects, npoints)) ## make combined pattern Y <- lpp(locns, L) marks(Y) <- marx return(Y) } # # interactive plot for lpp objects # iplot.lpp <- function(x, ..., xname) { if(missing(xname)) xname <- short.deparse(substitute(x)) stopifnot(is.lpp(x)) ## predigest L <- domain(x) v <- vertices(L) deg <- vertexdegree(L) dv <- textstring(v, txt=paste(deg)) y <- layered(lines=as.psp(L), vertices=v, degree=dv, points=as.ppp(x)) iplot(y, ..., xname=xname, visible=c(TRUE, FALSE, FALSE, TRUE)) } identify.lpp <- function(x, ...) { verifyclass(x, "lpp") P <- as.ppp(x) id <- identify(P$x, P$y, ...) if(!is.marked(x)) return(id) marks <- as.data.frame(P)[id, -(1:2)] out <- cbind(data.frame(id=id), marks) row.names(out) <- NULL return(out) } cut.lpp <- function(x, z=marks(x), ...) { if(missing(z) || is.null(z)) { z <- marks(x, dfok=TRUE) if(is.null(z)) stop("no data for grouping: z is missing, and x has no marks") } else { #' special objects if(inherits(z, "linim")) { z <- z[x, drop=FALSE] } else if(inherits(z, "linfun")) { z <- z(x) } else if(inherits(z, "lintess")) { z <- (as.linfun(z))(x) } } if(is.character(z)) { if(length(z) == npoints(x)) { # interpret as a factor z <- factor(z) } else if((length(z) == 1) && (z %in% colnames(df <- as.data.frame(x)))) { # interpret as the name of a column of marks or a coordinate zname <- z z <- df[, zname] if(zname == "seg") z <- factor(z) } else stop("format of argument z not understood") } switch(markformat(z), none = stop("No data for grouping"), vector = { stopifnot(length(z) == npoints(x)) g <- if(is.factor(z)) z else if(is.numeric(z)) cut(z, ...) else factor(z) marks(x) <- g return(x) }, dataframe = , hyperframe = { stopifnot(nrow(z) == npoints(x)) #' extract atomic data z <- as.data.frame(z) if(ncol(z) < 1) stop("No suitable data for grouping") #' take first column of atomic data z <- z[,1L,drop=TRUE] g <- if(is.numeric(z)) cut(z, ...) else factor(z) marks(x) <- g return(x) }, list = stop("Don't know how to cut according to a list")) stop("Format of z not understood") } points.lpp <- function(x, ...) { points(coords(x, spatial=TRUE, local=FALSE), ...) } connected.lpp <- function(X, R=Inf, ..., dismantle=TRUE) { if(!dismantle) { if(is.infinite(R)) { Y <- X %mark% factor(1) attr(Y, "retainpoints") <- attr(X, "retainpoints") return(Y) } check.1.real(R) stopifnot(R >= 0) nv <- npoints(X) close <- (pairdist(X) <= R) diag(close) <- FALSE ij <- which(close, arr.ind=TRUE) ie <- ij[,1] - 1L je <- ij[,2] - 1L ne <- length(ie) zz <- .C("cocoGraph", nv=as.integer(nv), ne=as.integer(ne), ie=as.integer(ie), je=as.integer(je), label=as.integer(integer(nv)), status=as.integer(integer(1L)), PACKAGE = "spatstat") if(zz$status != 0) stop("Internal error: connected.ppp did not converge") lab <- zz$label + 1L # Renumber labels sequentially lab <- as.integer(factor(lab)) # Convert labels to factor lab <- factor(lab) # Apply to points Y <- X %mark% lab attr(Y, "retainpoints") <- attr(X, "retainpoints") return(Y) } # first break the *network* into connected components L <- domain(X) lab <- connected(L, what="labels") if(length(levels(lab)) == 1) { XX <- solist(X) } else { subsets <- split(seq_len(nvertices(L)), lab) XX <- solist() for(i in seq_along(subsets)) XX[[i]] <- thinNetwork(X, retainvertices=subsets[[i]]) } # now find R-connected components in each dismantled piece YY <- solapply(XX, connected.lpp, R=R, dismantle=FALSE) if(length(YY) == 1) YY <- YY[[1]] return(YY) } text.lpp <- function(x, ...) { co <- coords(x) graphics::text.default(x=co$x, y=co$y, ...) } spatstat/R/lindirichlet.R0000644000176200001440000001201613333543255015121 0ustar liggesusers#' lindirichlet.R #' #' Dirichlet tessellation on a linear network #' #' $Revision: 1.9 $ $Date: 2017/11/04 03:49:18 $ lineardirichlet <- function(X) { stopifnot(is.lpp(X)) #' unique points, remembering original sequence ii <- which(!duplicated(X)) uX <- X[ii] #' local coordinates coUX <- coords(uX)[, c("seg", "tp")] #' add label from original sequence index coUX$lab <- ii #' reorder oo <- with(coUX, order(seg, tp)) coUXord <- coUX[oo, , drop=FALSE] seg <- coUXord$seg tp <- coUXord$tp #' network data L <- domain(X) nv <- nvertices(L) ns <- nsegments(L) seglen <- lengths.psp(as.psp(L)) from <- L$from to <- L$to #' upper bound on interpoint distance huge <- sum(seglen) #' numerical tolerance for nnwhich tol <- max(sqrt(.Machine$double.eps), diameter(Frame(L))/2^20) #' Find data point (in sorted pattern) nearest to each vertex of network a <- vnnFind(seg, tp, ns, nv, from, to, seglen, huge, tol) vnndist <- a$vnndist vnnwhich <- a$vnnwhich #' index back into original data pattern vnnlab <- coUXord$lab[vnnwhich] #' compute Dirichlet tessellation df <- ldtEngine(nv, ns, from, to, seglen, huge, coUXord, vnndist, vnnwhich, vnnlab) return(lintess(L, df)) } ldtEngine <- function(nv, ns, from, to, seglen, huge, # network coUXord, # point coordinates, sorted vnndist, vnnwhich, # nearest data point for each vertex vnnlab) { #' initialise tessellation data seg <- integer(0) t0 <- numeric(0) t1 <- numeric(0) tile <- integer(0) #' split point data by segment, discarding segments which contain no points fseg <- factor(coUXord$seg, levels=1:ns) blist <- split(coUXord, fseg, drop=TRUE) #' process each segment containing data points for(b in blist) { n <- nrow(b) #' which segment? sygmund <- b$seg[[1L]] lenf <- seglen[sygmund] #' segment endpoints A <- from[sygmund] B <- to[sygmund] #' data points (from X) closest to endpoints jA <- vnnlab[A] jB <- vnnlab[B] dA <- vnndist[A] dB <- vnndist[B] #' data points (along segment) closest to endpoints iA <- b$lab[1L] iB <- b$lab[n] #' splits between consecutive data points btp <- b$tp tcut <- if(n < 2) numeric(0) else (btp[-1] + btp[-n])/2 labs <- b$lab #' consider left endpoint if(jA == iA) { #' leftmost data point covers left endpoint tcut <- c(0, tcut) } else { #' cut between left endpoint and leftmost data point dA1 <- lenf * btp[1L] dx <- (dA1 - dA)/2 if(dx > 0) { #' expected! tx <- dx/lenf tcut <- c(0, tx, tcut) labs <- c(jA, labs) } else { #' unexpected tcut <- c(0, tcut) } } #' consider right endpoint if(jB == iB) { #' rightmost data point covers right endpoint tcut <- c(tcut, 1) } else { #' cut between right endpoint and rightmost data point dB1 <- lenf * (1 - btp[n]) dx <- (dB1 - dB)/2 if(dx > 0) { #' expected! tx <- 1 - dx/lenf tcut <- c(tcut, tx, 1) labs <- c(labs, jB) } else { #' unexpected tcut <- c(tcut, 1) } } m <- length(tcut) seg <- c(seg, rep(sygmund, m-1L)) t0 <- c(t0, tcut[-m]) t1 <- c(t1, tcut[-1L]) tile <- c(tile, labs) } df <- data.frame(seg=seg, t0=t0, t1=t1, tile=tile) #' now deal with segments having no data points unloved <- (table(fseg) == 0) if(any(unloved)) { unlovedt0 <- rep(0, 2*sum(unloved)) unlovedt1 <- rep(1, 2*sum(unloved)) unlovedseg <- unlovedtile <- rep(-1, 2*sum(unloved)) counter <- 0 for(sygmund in which(unloved)) { counter <- counter + 1 lenf <- seglen[sygmund] #' segment endpoints A <- from[sygmund] B <- to[sygmund] #' data points (from X) closest to endpoints jA <- vnnlab[A] jB <- vnnlab[B] dA <- vnndist[A] dB <- vnndist[B] if(is.na(jA) || is.na(jB) || jA == jB) { #' entire segment is covered by one tile unlovedtile[counter] <- if(is.na(jA)) jB else jA unlovedseg[counter] <- sygmund } else { #' split somewhere tx <- (dB - dA + lenf)/(2 * lenf) if(tx >= 0 && tx <= 1) { unlovedseg[counter] <- sygmund unlovedtile[counter] <- jA unlovedt1[counter] <- tx counter <- counter + 1 unlovedseg[counter] <- sygmund unlovedtile[counter] <- jB unlovedt0[counter] <- tx } else if(tx < 0) { # weird unlovedseg[counter] <- sygmund unlovedtile[counter] <- jB } else { # weird unlovedseg[counter] <- sygmund unlovedtile[counter] <- jA } } } newdf <- data.frame(seg = unlovedseg[1:counter], t0 = unlovedt0[1:counter], t1 = unlovedt1[1:counter], tile = unlovedtile[1:counter]) df <- rbind(df, newdf) } return(df) } spatstat/R/areainter.R0000644000176200001440000003142013333543254014420 0ustar liggesusers# # # areainter.R # # $Revision: 1.48 $ $Date: 2018/03/15 07:07:19 $ # # The area interaction # # AreaInter() create an instance of the area-interaction process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # AreaInter <- local({ #' area-interaction conditional intensity potential #' corresponds to potential -C(x) = n(x) - A(x)/\pi r^2 areapot <- function(X,U,EqualPairs,pars,correction, ..., W=as.owin(X)) { #' W is the window to which computations of area will be clipped. #' W=NULL is permissible, meaning no clipping. uhoh <- !(correction %in% c("border", "none")) if(any(uhoh)) { nuh <- sum(uhoh) warning(paste(ngettext(nuh, "Correction", "Corrections"), commasep(sQuote(correction[uhoh])), ngettext(nuh, "is not supported and was ignored", "are not supported and were ignored"))) } r <- pars$r if(is.null(r)) stop("internal error: r parameter not found") #' if(Poly <- spatstat.options('areainter.polygonal')) { if(is.mask(W)) W <- as.polygonal(W) if(is.mask(Window(X))) Window(X) <- as.polygonal(Window(X)) if(is.mask(Window(U))) Window(U) <- as.polygonal(Window(U)) } n <- U$n areas <- numeric(n) #' dummy points dummies <- setdiff(seq_len(n), EqualPairs[,2L]) if(length(dummies)) areas[dummies] <- areaGain(U[dummies], X, r, W=W, exact=Poly) #' data points represented in U ii <- EqualPairs[,1L] jj <- EqualPairs[,2L] inborder <- (bdist.points(X[ii]) <= r) # sic #' points in border region need clipping if(any(inborder)) areas[jj[inborder]] <- areaLoss(X, r, subset=ii[inborder], exact=Poly) #' points in eroded region do not necessarily if(any(ineroded <- !inborder)) { areas[jj[ineroded]] <- areaLoss(X, r, subset=ii[ineroded], W=W, exact=Poly) } return(1 - areas/(pi * r^2)) } #' fractional area of overlap of two unit discs at distance 2 * z discOverlap <- function(z) { z <- pmax(pmin(z, 1), -1) (2/pi) * (acos(z) - z * sqrt(1 - z^2)) } # template object without family, par, version BlankAI <- list( name = "Area-interaction process", creator = "AreaInter", family = "inforder.family", # evaluated later pot = areapot, par = list(r = NULL), # to be filled in parnames = "disc radius", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("disc radius r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK plot = function(fint, ..., d=NULL, plotit=TRUE) { verifyclass(fint, "fii") inter <- fint$interaction unitz <- unitname(fint) if(!identical(inter$name, "Area-interaction process")) stop("Tried to plot the wrong kind of interaction") #' fitted interaction coefficient theta <- fint$coefs[fint$Vnames] #' interaction radius r <- inter$par$r xlim <- resolve.1.default(list(xlim=c(0, 1.25 * 2*r)), list(...)) rmax <- max(xlim, d) if(is.null(d)) { d <- seq(from=0, to=rmax, length.out=1024) } else { stopifnot(is.numeric(d) && all(is.finite(d)) && all(diff(d) > 0)) } #' compute interaction between two points at distance d y <- exp(theta * discOverlap(d/(2 * r))) #' compute `fv' object fun <- fv(data.frame(r=d, h=y, one=1), "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "maximal interaction h(r)", "reference value 1"), unitname=unitz) if(plotit) do.call(plot.fv, resolve.defaults(list(fun), list(...), list(ylim=range(0,1,y)))) return(invisible(fun)) }, #' end of function 'plot' interpret = function(coeffs, self) { logeta <- as.numeric(coeffs[1L]) eta <- exp(logeta) return(list(param=list(eta=eta), inames="interaction parameter eta", printable=signif(eta))) }, valid = function(coeffs, self) { eta <- ((self$interpret)(coeffs, self))$param$eta return(is.finite(eta)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(anyNA(coeffs)) return(2 * r) logeta <- coeffs[1L] if(abs(logeta) <= epsilon) return(0) else return(2 * r) }, delta2 = function(X, inte, correction, ..., sparseOK=FALSE) { # Sufficient statistic for second order conditional intensity # Area-interaction model if(!(correction %in% c("border", "none"))) return(NULL) r <- inte$par$r areadelta2(X, r, sparseOK=sparseOK) }, version=NULL # to be added ) class(BlankAI) <- "interact" AreaInter <- function(r) { instantiate.interact(BlankAI, list(r=r)) } AreaInter <- intermaker(AreaInter, BlankAI) AreaInter }) areadelta2 <- local({ areadelta2 <- function(X, r, ..., sparseOK=FALSE) { # Sufficient statistic for second order conditional intensity # Area-interaction model if(is.ppp(X)) return(areadelppp(X, r, ..., sparseOK=sparseOK)) else if(is.quad(X)) return(areadelquad(X, r, sparseOK=sparseOK)) else stop("internal error: X should be a ppp or quad object") } areadelppp <- function(X, r, algorithm=c("C", "nncross", "nnmap"), sparseOK=FALSE) { # Evaluate \Delta_{x_i} \Delta_{x_j} S(x) for data points x_i, x_j # i.e. h(X[i]|X) - h(X[i]|X[-j]) # where h is first order cif statistic algorithm <- match.arg(algorithm) nX <- npoints(X) sparseOK <- sparseOK result <- if(!sparseOK) matrix(0, nX, nX) else sparseMatrix(i=integer(0), j=integer(0), x=numeric(0), dims=c(nX,nX)) if(nX < 2) return(result) if(algorithm == "C") { # use special purpose C routine # called once for each interacting pair of points xx <- X$x yy <- X$y cl <- closepairs(X, 2 * r, what="indices", twice=FALSE, neat=FALSE) I <- cl$i J <- cl$j eps <- r/spatstat.options("ngrid.disc") for(k in seq_along(I)) { i <- I[k] j <- J[k] # all neighbours of i Ki <- union(J[I==i], I[J==i]) # all neighbours of j Kj <- union(J[I==j], I[J==j]) # relevant neighbours K <- setdiff(union(Ki, Kj), c(i,j)) # call C code z <- .C("delta2area", xa = as.double(xx[i]), ya = as.double(yy[i]), xb = as.double(xx[j]), yb = as.double(yy[j]), nother = as.integer(length(K)), xother = as.double(xx[K]), yother = as.double(yy[K]), radius = as.double(r), epsilon = as.double(eps), pixcount = as.integer(integer(1L)), PACKAGE = "spatstat") result[i,j] <- result[j,i] <- z$pixcount } # normalise result <- result * (eps^2)/(pi * r^2) return(result) } # non-C algorithms # confine attention to points which are interacting relevant <- (nndist(X) <= 2 * r) if(!all(relevant)) { if(any(relevant)) { # call self on subset Dok <- areadelppp(X[relevant], r, algorithm, sparseOK=sparseOK) result[relevant,relevant] <- Dok } return(result) } # .............. algorithm using interpreted code ........... # sort pattern in increasing order of x sortX <- (algorithm == "nnmap") if(sortX) { oX <- fave.order(X$x) X <- X[oX] } # area calculation may be restricted to window W for efficiency W <- as.owin(X) U <- as.rectangle(W) # decide pixel resolution eps <- r/spatstat.options("ngrid.disc") npix <- prod(ceiling(sidelengths(U)/eps)) if(npix <= 2^20) { # do it all in one go tile <- list(NULL) } else { # divide into rectangular tiles B <- as.rectangle(W) ntile0 <- ceiling(npix/(2^20)) tile0area <- area(B)/ntile0 tile0side <- sqrt(tile0area) nx <- ceiling(sidelengths(B)[1L]/tile0side) ny <- ceiling(sidelengths(B)[2L]/tile0side) tile <- tiles(quadrats(B, nx, ny)) } result <- matrix(0, nX, nX) for(i in seq_len(length(tile))) { # form pixel grid Ti <- tile[[i]] Wi <- if(is.null(Ti)) W else intersect.owin(W, Ti) if(algorithm == "nncross") { # Trusted, slow algorithm using nncross Z <- as.mask(Wi, eps=eps) G <- as.ppp(rasterxy.mask(Z), U, check=FALSE) # compute 3 nearest neighbours in X of each grid point v <- nncross(G, X, k=1:3) # select pixels which have exactly 2 neighbours within distance r ok <- with(v, dist.3 > r & dist.2 <= r) if(any(ok)) { v <- v[ok, , drop=FALSE] # accumulate pixel counts -> areas counts <- with(v, table(i=factor(which.1, levels=1L:nX), j=factor(which.2, levels=1L:nX))) pixarea <- with(Z, xstep * ystep) result <- result + pixarea * (counts + t(counts)) } } else { # Faster algorithm using nnmap # compute 3 nearest neighbours in X of each grid point stuff <- nnmap(X, k=1:3, W=Wi, eps=eps, is.sorted.X=TRUE, sortby="x", outputarray=TRUE) dist.2 <- stuff$dist[2L,,] dist.3 <- stuff$dist[3L,,] which.1 <- stuff$which[1L,,] which.2 <- stuff$which[2L,,] ok <- (dist.3 > r & dist.2 <= r) if(any(ok)) { which.1 <- as.vector(which.1[ok]) which.2 <- as.vector(which.2[ok]) counts <- table(i=factor(which.1, levels=1L:nX), j=factor(which.2, levels=1L:nX)) pixarea <- attr(stuff, "pixarea") result <- result + pixarea * (counts + t(counts)) } } } if(sortX) { # map back to original ordering result[oX, oX] <- result } # normalise result <- result/(pi * r^2) return(result) } areadelquad <- function(Q, r, sparseOK=FALSE) { # Sufficient statistic for second order conditional intensity # Area-interaction model # Evaluate \Delta_{u_j} \Delta_{u_i} S(x) for quadrature points # answer is area(b(u[i],r) \cap b(u[j],r)\setminus \bigcup_k b(x[k],r)) # where k ranges over all indices that are not equivalent to u[i,j] U <- union.quad(Q) Z <- is.data(Q) nU <- npoints(U) xx <- U$x yy <- U$y # identify all close pairs of quadrature points cl <- closepairs(U, 2 * r, what="indices") I <- cl$i J <- cl$j # find neighbours in X of each quadrature point zJ <- Z[J] neigh <- split(J[zJ], factor(I[zJ], levels=1L:nU)) # result <- if(!sparseOK) matrix(0, nU, nU) else sparseMatrix(i=integer(0), j=integer(0), x=numeric(0), dims=c(nU,nU)) eps <- r/spatstat.options("ngrid.disc") # for(k in seq_along(I)) { i <- I[k] j <- J[k] # all points of X close to U[i] Ki <- neigh[[i]] # all points of X close to U[j] Kj <- neigh[[j]] # relevant neighbours K <- setdiff(union(Ki, Kj), c(i,j)) # call C code z <- .C("delta2area", xa = as.double(xx[i]), ya = as.double(yy[i]), xb = as.double(xx[j]), yb = as.double(yy[j]), nother = as.integer(length(K)), xother = as.double(xx[K]), yother = as.double(yy[K]), radius = as.double(r), epsilon = as.double(eps), pixcount = as.integer(integer(1L)), PACKAGE = "spatstat") result[i,j] <- z$pixcount } # normalise result <- result * (eps^2)/(pi * r^2) return(result) } areadelta2 }) spatstat/R/triplet.family.R0000644000176200001440000000640113333543255015413 0ustar liggesusers# # # triplet.family.R # # $Revision: 1.1 $ $Date: 2011/11/05 07:18:51 $ # # Family of `third-order' point process models # # triplet.family: object of class 'isf' # # # ------------------------------------------------------------------- # triplet.family <- list( name = "triplet", print = function(self) { cat("Family of third-order interactions\n") }, plot = NULL, # ---------------------------------------------------- eval = function(X,U,EqualPairs,pot,pars,correction, ...) { # # This is the eval function for the `triplet' family. # # This internal function is not meant to be called by the user. # It is called by mpl.prepare() during execution of ppm(). # # The eval functions perform all the manipulations that are common to # a given class of interactions. # # This function is currently modelled on 'inforder.family'. # It simply invokes the potential 'pot' directly # and expects 'pot' to return the values of the sufficient statistic S(u,X). # # ARGUMENTS: # All 'eval' functions have the following arguments # which are called in sequence (without formal names) # by mpl.prepare(): # # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function # potpars auxiliary parameters for pairpot list(......) # correction edge correction type (string) # # VALUE: # All `eval' functions must return a # matrix of values of the total potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # ########################################################################## # POTENTIAL: # In this case the potential function 'pot' should have arguments # pot(X, U, EqualPairs, pars, correction, ...) # # It must return a vector with length equal to the number of points in U, # or a matrix with as many rows as there are points in U. if(!is.ppp(U)) U <- ppp(U$x, U$y, window=X$window) POT <- pot(X, U, EqualPairs, pars, correction, ...) if(is.matrix(POT)) { if(nrow(POT) != U$n) stop("Internal error: the potential returned a matrix with the wrong number of rows") } else if(is.array(POT) && length(dim(POT)) > 2) stop("Internal error: the potential returned an array with more than 2 dimensions") else if(is.vector(POT)) { if(length(POT) != U$n) stop("Internal error: the potential returned a vector with the wrong length") POT <- matrix(POT, ncol=1) } else stop("Internal error: the return value from the potential is not understood") return(POT) }, ######### end of function $eval suffstat = NULL ######### end of function $suffstat ) ######### end of list class(triplet.family) <- "isf" spatstat/R/unstack.R0000644000176200001440000000343313536343214014120 0ustar liggesusers#' #' unstack.R #' #' Methods for generic 'unstack' #' #' $Revision: 1.5 $ $Date: 2019/09/12 04:05:58 $ unstack.ppp <- unstack.psp <- unstack.lpp <- unstack.tess <- function(x, ...) { trap.extra.arguments(...) marx <- marks(x) d <- dim(marx) if(is.null(d)) return(solist(x)) y <- rep(list(unmark(x)), d[2]) for(j in seq_along(y)) marks(y[[j]]) <- marx[,j,drop=FALSE] names(y) <- colnames(marx) return(as.solist(y)) } unstack.msr <- function(x, ...) { trap.extra.arguments(...) d <- dim(x) if(is.null(d)) return(solist(x)) smo <- attr(x, "smoothdensity") if(!inherits(smo, "imlist")) smo <- NULL nc <- d[2] y <- vector(mode="list", length=nc) for(j in seq_len(nc)) { xj <- x[,j,drop=FALSE] if(!is.null(smo)) attr(xj, "smoothdensity") <- smo[[j]] y[[j]] <- xj } names(y) <- colnames(x) return(as.solist(y)) } unstackFilter <- function(x) { ## deal with a whole swag of classes that do not need to be unstacked nonvectorclasses <- c("im", "owin", "quad", "quadratcount", "quadrattest", "funxy", "distfun", "nnfun", "linnet", "linfun", "influence.ppm", "leverage.ppm") y <- if(inherits(x, nonvectorclasses)) solist(x) else unstack(x) return(y) } unstack.solist <- function(x, ...) { trap.extra.arguments(...) y <- lapply(x, unstackFilter) z <- as.solist(unlist(y, recursive=FALSE)) return(z) } unstack.layered <- function(x, ...) { trap.extra.arguments(...) y <- lapply(x, unstackFilter) ny <- lengths(y) nx <- length(ny) if(all(ny == 1) || nx == 0) return(solist(x)) pax <- layerplotargs(x) pay <- rep(pax, times=ny) z <- unlist(y, recursive=FALSE) z <- layered(LayerList=z, plotargs=pay) return(z) } spatstat/R/quadclass.R0000644000176200001440000002067313417031473014434 0ustar liggesusers# # quadclass.S # # Class 'quad' to define quadrature schemes # in (rectangular) windows in two dimensions. # # $Revision: 4.28 $ $Date: 2019/01/14 06:44:41 $ # # An object of class 'quad' contains the following entries: # # $data: an object of class 'ppp' # defining the OBSERVATION window, # giving the locations (& marks) of the data points. # # $dummy: object of class 'ppp' # defining the QUADRATURE window, # giving the locations (& marks) of the dummy points. # # $w: vector giving the nonnegative weights for the # data and dummy points (data first, followed by dummy) # # w may also have an attribute attr(w, "zeroes") # equivalent to (w == 0). If this is absent # then all points are known to have positive weights. # # $param: # parameters that were used to compute the weights # and possibly to create the dummy points (see below). # # The combined (data+dummy) vectors of x, y coordinates of the points, # and their weights, are extracted using standard functions # x.quad(), y.quad(), w.quad() etc. # # ---------------------------------------------------------------------- # Note about parameters: # # If the quadrature scheme was created by quadscheme(), # then $param contains # # $param$weight # list containing the values of all parameters # actually used to compute the weights. # # $param$dummy # list containing the values of all parameters # actually used to construct the dummy pattern # via default.dummy(); # or NULL if the dummy pattern was provided externally # # $param$sourceid # vector mapping the quadrature points to the # original data and dummy points. # # If you constructed the quadrature scheme manually, this # structure may not be present. # #------------------------------------------------------------- quad <- function(data, dummy, w, param=NULL) { data <- as.ppp(data) dummy <- as.ppp(dummy) n <- data$n + dummy$n if(missing(w)) w <- rep.int(1, n) else { w <- as.vector(w) if(length(w) != n) stop("length of weights vector w is not equal to total number of points") } if(is.null(attr(w, "zeroes")) && any( w == 0)) attr(w, "zeroes") <- (w == 0) Q <- list(data=data, dummy=dummy, w=w, param=param) class(Q) <- "quad" invisible(Q) } is.quad <- function(x) { inherits(x, "quad") } # ------------------ extractor functions ---------------------- x.quad <- function(Q) { verifyclass(Q, "quad") c(Q$data$x, Q$dummy$x) } y.quad <- function(Q) { verifyclass(Q, "quad") c(Q$data$y, Q$dummy$y) } coords.quad <- function(x, ...) { data.frame(x=x.quad(x), y=y.quad(x)) } w.quad <- function(Q) { verifyclass(Q, "quad") Q$w } param.quad <- function(Q) { verifyclass(Q, "quad") Q$param } n.quad <- function(Q) { verifyclass(Q, "quad") Q$data$n + Q$dummy$n } marks.quad <- function(x, dfok=FALSE, ...) { verifyclass(x, "quad") dat <- x$data dum <- x$dummy if(dfok) warning("ignored dfok = TRUE; not implemented") mdat <- marks(dat, dfok=FALSE, ...) mdum <- marks(dum, dfok=FALSE, ...) if(is.null(mdat) && is.null(mdum)) return(NULL) if(is.null(mdat)) mdat <- rep.int(NA_integer_, dat$n) if(is.null(mdum)) mdum <- rep.int(NA_integer_, dum$n) if(is.factor(mdat) && is.factor(mdum)) { mall <- cat.factor(mdat, mdum) } else mall <- c(mdat, mdum) return(mall) } is.marked.quad <- function(X, na.action="warn", ...) { marx <- marks(X, ...) if(is.null(marx)) return(FALSE) if(anyNA(marx)) switch(na.action, warn = { warning(paste("some mark values are NA in the point pattern", short.deparse(substitute(X)))) }, fatal = { return(FALSE) }, ignore = {} ) return(TRUE) } is.multitype.quad <- function(X, na.action="warn", ...) { marx <- marks(X, ...) if(is.null(marx)) return(FALSE) if(anyNA(marx)) switch(na.action, warn = { warning(paste("some mark values are NA in the point pattern", short.deparse(substitute(X)))) }, fatal = { return(FALSE) }, ignore = {} ) return(!is.data.frame(marx) && is.factor(marx)) } is.data <- function(Q) { verifyclass(Q, "quad") return(c(rep.int(TRUE, Q$data$n), rep.int(FALSE, Q$dummy$n))) } equals.quad <- function(Q) { # return matrix E such that E[i,j] = (X[i] == U[j]) # where X = Q$data and U = union.quad(Q) n <- Q$data$n m <- Q$dummy$n E <- matrix(FALSE, nrow=n, ncol=n+m) diag(E) <- TRUE E } equalsfun.quad <- function(Q) { stopifnot(inherits(Q, "quad")) return(function(i,j) { i == j }) } equalpairs.quad <- function(Q) { # return two-column matrix E such that # X[E[i,1]] == U[E[i,2]] for all i # where X = Q$data and U = union.quad(Q) n <- Q$data$n return(matrix(rep.int(seq_len(n),2), ncol=2)) } union.quad <- function(Q) { verifyclass(Q, "quad") ppp(x= c(Q$data$x, Q$dummy$x), y= c(Q$data$y, Q$dummy$y), window=Q$dummy$window, marks=marks.quad(Q), check=FALSE) } # # Plot a quadrature scheme # # plot.quad <- function(x, ..., main, add=FALSE, dum=list(), tiles=FALSE) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) verifyclass(x, "quad") data <- x$data dummy <- x$dummy # determine plot parameters for dummy points dum <- resolve.defaults(dum, list(pch=".", add=TRUE)) tt <- NULL if(tiles) { # show tiles that determined the weights wp <- x$param$weight tt <- NULL if(is.null(wp) || is.null(wp$method)) { warning("Tile information is not available") } else { switch(wp$method, grid = { ntile <- wp$ntile tt <- quadrats(as.owin(x), ntile[1], ntile[2]) }, dirichlet = { U <- union.quad(x) if(wp$exact) { tt <- dirichlet(U) } else { win <- as.mask(as.owin(U)) tileid <- im(exactdt(U)$i, win$xcol, win$yrow, win$xrange, win$yrange) tt <- tess(image=tileid[win, drop=FALSE]) } }, warning("Unrecognised 'method' for tile weights") ) } } pixeltiles <- !is.null(tt) && tt$type == "image" tileargs <- resolve.defaults(list(x=tt, main=main, add=add), list(...), if(!pixeltiles) list(col="grey") else NULL) if(!is.marked(data)) { if(!is.null(tt)) { do.call(plot, tileargs) add <- TRUE } plot(data, main=main, add=add, ...) do.call(plot, append(list(x=dummy), dum)) } else if(is.multitype(data) && !add) { oldpar <- par(ask = interactive() && (.Device %in% c("X11", "GTK", "windows", "Macintosh"))) on.exit(par(oldpar)) data.marks <- marks(data) dummy.marks <- marks(dummy) types <- levels(data.marks) for(k in types) { add <- FALSE if(!is.null(tt)) { do.call(plot, tileargs) add <- TRUE } maink <- paste(main, "\n mark = ", k, sep="") plot(unmark(data[data.marks == k]), main=maink, add=add, ...) do.call(plot, append(list(x=unmark(dummy[dummy.marks == k])), dum)) } } else { if(!is.null(tt)) { do.call(plot, tileargs) add <- TRUE } plot(data, ..., main=main, add=add) do.call(plot, append(list(x=dummy), dum)) } invisible(NULL) } # subset operator "[.quad" <- function(x, ...) { U <- union.quad(x) Z <- is.data(x) w <- w.quad(x) # determine serial numbers of points to be included V <- U %mark% seq_len(U$n) i <- marks(V[...]) # extract corresponding subsets of vectors Z <- Z[i] w <- w[i] # take subset of points, using any type of subset index U <- U[...] # stick together quad(U[Z], U[!Z], w) } domain.quad <- Window.quad <- function(X, ...) { as.owin(X) } "Window<-.quad" <- function(X, ..., value) { verifyclass(value, "owin") return(X[value]) } unitname.quad <- function(x) { return(unitname(x$data)) } "unitname<-.quad" <- function(x, value) { unitname(x$data) <- value unitname(x$dummy) <- value return(x) } spatstat/R/Kcom.R0000644000176200001440000003150313602573674013351 0ustar liggesusers# # Kcom.R # # model compensated K-function # # $Revision: 1.16 $ $Date: 2018/10/19 03:20:51 $ # Kcom <- local({ Kcom <- function(object, r=NULL, breaks=NULL, ..., correction=c("border", "isotropic", "translate"), conditional=!is.poisson(object), restrict=FALSE, model=NULL, trend=~1, interaction=Poisson(), rbord=reach(interaction), compute.var=TRUE, truecoef=NULL, hi.res=NULL) { if(is.ppm(object)) { fit <- object } else if(is.ppp(object) || is.quad(object)) { if(is.ppp(object)) object <- quadscheme(object, ...) if(!is.null(model)) { fit <- update(model, Q=object, forcefit=TRUE) } else { fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, forcefit=TRUE) } } else stop("object should be a fitted point process model or a point pattern") if(missing(conditional) || is.null(conditional)) conditional <- !is.poisson(fit) restrict <- isTRUE(restrict) if(restrict && !conditional) { warning("restrict=TRUE ignored because conditional=FALSE", call.=FALSE) restrict <- FALSE } # rfixed <- !is.null(r) || !is.null(breaks) # Extract data and window Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) Win <- X$window # selection of edge corrections correction.given <- !missing(correction) && !is.null(correction) correction <- pickoption("correction", correction, c(none="none", border="border", isotropic="isotropic", Ripley="isotropic", ripley="isotropic", trans="translation", translate="translation", translation="translation", best="best"), multi=TRUE) correction <- implemented.for.K(correction, Win$type, correction.given) opt <- list(bord = any(correction == "border"), tran = any(correction == "translation"), ripl = any(correction == "isotropic")) if(sum(unlist(opt)) == 0) stop("No corrections selected") # edge correction algorithm algo <- if(!conditional) "classical" else if(restrict) "restricted" else "reweighted" # conditioning on border region? if(!conditional) { Wfree <- Win } else { rbord <- fit$rbord Wfree <- erosion(Win, rbord) if(restrict) { retain <- inside.owin(union.quad(Q), , Wfree) # Throw away boundary data Q <- Q[Wfree] X <- X[Wfree] Win <- Wfree } } # Extract quadrature info U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy E <- equalsfun.quad(Q) WQ <- w.quad(Q) # quadrature weights # quadrature points used USED <- if(algo == "reweighted") (bdist.points(U) > rbord) else rep.int(TRUE, U$n) # basic statistics npts <- npoints(X) areaW <- area(Win) lambda <- npts/areaW lambda2 <- npts * (npts - 1)/(areaW^2) # adjustments to account for restricted domain of pseudolikelihood if(algo == "reweighted") { npts.used <- sum(Z & USED) area.used <- sum(WQ[USED]) # lambda.used <- npts.used/area.used # lambda2.used <- npts.used * (npts.used - 1)/(area.used^2) } else { npts.used <- npts area.used <- areaW # lambda.used <- lambda # lambda2.used <- lambda2 } # 'r' values rmaxdefault <- rmax.rule("K", if(restrict) Wfree else Win, npts/areaW) breaks <- handle.r.b.args(r, breaks, Wfree, rmaxdefault=rmaxdefault) r <- breaks$r # nr <- length(r) rmax <- breaks$max # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame K <- data.frame(r=r, pois=pi * r^2) desc <- c("distance argument r", "expected %s for CSR") K <- fv(K, "r", substitute(K(r), NULL), "pois", , alim, c("r","%s[pois](r)"), desc, fname="K") ############### start computing ################## # residuals resid <- residuals(fit, type="raw",drop=FALSE, new.coef=truecoef, quad=hi.res) resval <- with(resid, "increment") rescts <- with(resid, "continuous") if(restrict) { # keep only data inside Wfree resval <- resval[retain] rescts <- rescts[retain] } # close pairs of points # (quadrature point to data point) clos <- crosspairs(U, X, rmax, what="ijd") dIJ <- clos$d I <- clos$i J <- clos$j UI <- U[I] XJ <- X[J] EIJ <- E(I, J) # TRUE if points are identical, U[I[k]] == X[J[k]] ZI <- Z[I] # TRUE if U[I[k]] is a data point DD <- ZI & !EIJ # TRUE for pairs of distinct data points only # nDD <- sum(DD) # determine whether a quadrature point will be used in integral okI <- USED[I] if(spatstat.options("Kcom.remove.zeroes")) okI <- okI & !EIJ # residual weights # wIJ <- ifelseXY(EIJ, rescts[I], resval[I]) # absolute weight for continuous integrals wc <- -rescts wcIJ <- -rescts[I] #################################################### if(opt$bord) { # border method # Compute distances to boundary # (in restricted case, the window of U has been adjusted) b <- bdist.points(U) bI <- b[I] # reduced sample for K(r) of data only RSX <- Kount(dIJ[DD & okI], bI[DD & okI], b[Z & USED], breaks) # Kb <- RSX$numerator/(lambda.used * RSX$denom.count) Kb <- RSX$numerator/(lambda * RSX$denom.count) K <- bind.fv(K, data.frame(border=Kb), "hat(%s)[bord](r)", nzpaste(algo, "border-corrected nonparametric estimate of %s"), "border") # reduced sample for adjustment integral RSD <- Kwtsum(dIJ[okI], bI[okI], wcIJ[okI], b[Z & USED], rep.int(1, npts.used), breaks, fatal=FALSE) # lambdaU <- (npts.used + 1)/area.used lambdaU <- (npts + 1)/areaW Kb <- RSD$numerator/((RSD$denominator + 1) * lambdaU) K <- bind.fv(K, data.frame(bcom=Kb), "bold(C)~hat(%s)[bord](r)", nzpaste("model compensator of", algo, "border-corrected %s"), "border") } if(opt$tran) { # translation correction edgewt <- switch(algo, classical = edge.Trans(UI, XJ, paired=TRUE), restricted = edge.Trans(UI, XJ, paired=TRUE), reweighted = edge.Trans.modif(UI, XJ, Win, Wfree, paired=TRUE)) wh <- whist(dIJ[okI], breaks$val, (edgewt * wcIJ)[okI]) whDD <- whist(dIJ[DD & okI], breaks$val, edgewt[DD & okI]) Ktrans <- cumsum(whDD)/(lambda2 * area.used) Ktrans[r >= rmax] <- NA K <- bind.fv(K, data.frame(trans=Ktrans), "hat(%s)[trans](r)", nzpaste(algo, "translation-corrected nonparametric estimate of %s"), "trans") # lambda2U <- (npts.used + 1) * npts.used/(area.used^2) lambda2U <- (npts + 1) * npts/(areaW^2) Ktrans <- cumsum(wh)/(lambda2U * area.used) Ktrans[r >= rmax] <- NA K <- bind.fv(K, data.frame(tcom=Ktrans), "bold(C)~hat(%s)[trans](r)", nzpaste("model compensator of", algo, "translation-corrected %s"), "trans") } if(opt$ripl) { # Ripley isotropic correction edgewt <- edge.Ripley(UI, matrix(dIJ, ncol=1)) wh <- whist(dIJ[okI], breaks$val, (edgewt * wcIJ)[okI]) whDD <- whist(dIJ[DD & okI], breaks$val, edgewt[DD & okI]) # Kiso <- cumsum(whDD)/(lambda2.used * area.used) Kiso <- cumsum(whDD)/(lambda2 * area.used) Kiso[r >= rmax] <- NA K <- bind.fv(K, data.frame(iso=Kiso), "hat(%s)[iso](r)", nzpaste(algo, "isotropic-corrected nonparametric estimate of %s"), "iso") # lambda2U <- (npts.used + 1) * npts.used/(area.used^2) lambda2U <- (npts + 1) * npts/(areaW^2) Kiso <- cumsum(wh)/(lambda2U * area.used) Kiso[r >= rmax] <- NA K <- bind.fv(K, data.frame(icom=Kiso), "bold(C)~hat(%s)[iso](r)", nzpaste("model compensator of", algo, "isotropic-corrected %s"), "iso") # if(compute.var) { savedotnames <- fvnames(K, ".") # compute contribution to compensator from each quadrature point dOK <- dIJ[okI] eOK <- edgewt[okI] iOK <- I[okI] denom <- lambda2U * area.used variso <- varsumiso <- 0 * Kiso for(i in sortunique(iOK)) { relevant <- (iOK == i) tincrem <- whist(dOK[relevant], breaks$val, eOK[relevant]) localterm <- cumsum(tincrem)/denom variso <- variso + wc[i] * localterm^2 if(Z[i]) varsumiso <- varsumiso + localterm^2 } sdiso <- sqrt(variso) K <- bind.fv(K, data.frame(ivar=variso, isd =sdiso, ihi = 2*sdiso, ilo = -2*sdiso, ivarsum=varsumiso), c("bold(C)^2~hat(%s)[iso](r)", "sqrt(bold(C)^2~hat(%s)[iso](r))", "bold(R)~hat(%s)[hi](r)", "bold(R)~hat(%s)[lo](r)", "hat(C)^2~hat(%s)[iso](r)"), c("Poincare variance of isotropic-corrected %s", "sqrt(Poincare variance) of isotropic-corrected %s", "upper critical band for isotropic-corrected %s", "lower critical band for isotropic-corrected %s", "data estimate of Poincare variance of %s"), "iso") # fvnames(K, ".") <- c(savedotnames, "isd") fvnames(K, ".") <- savedotnames } } # default is to display all corrections formula(K) <- . ~ r unitname(K) <- unitname(X) # secret tag used by 'Kres' attr(K, "maker") <- "Kcom" return(K) } # `reweighted' translation edge correction edge.Trans.modif <- function(X, Y=X, WX=X$window, WY=Y$window, exact=FALSE, paired=FALSE, trim=spatstat.options("maxedgewt")) { # computes edge correction factor # f = area(WY)/area(intersect.owin(WY, shift(WX, X[i] - Y[j]))) X <- as.ppp(X, WX) W <- X$window x <- X$x y <- X$y Y <- as.ppp(Y, WY) xx <- Y$x yy <- Y$y nX <- npoints(X) nY <- npoints(Y) if(paired && (nX != nY)) stop("X and Y should have equal length when paired=TRUE") # For irregular polygons, exact evaluation is very slow; # so use pixel approximation, unless exact=TRUE if(!exact) { if(WX$type == "polygonal") WX <- as.mask(WX) if(WY$type == "polygonal") WY <- as.mask(WX) } typeX <- WX$type typeY <- WY$type if(typeX == "rectangle" && typeY == "rectangle") { # Fast code for this case if(!paired) { DX <- abs(outer(x,xx,"-")) DY <- abs(outer(y,yy,"-")) } else { DX <- abs(xx - x) DY <- abs(yy - y) } A <- WX$xrange B <- WX$yrange a <- WY$xrange b <- WY$yrange # compute width and height of intersection wide <- pmin.int(a[2], A[2]+DX) - pmax(a[1], A[1]+DX) high <- pmin.int(b[2], B[2]+DY) - pmax(b[1], B[1]+DY) # edge correction weight weight <- diff(a) * diff(b) / (wide * high) if(!paired) weight <- matrix(weight, nrow=X$n, ncol=Y$n) } else if(typeX %in% c("rectangle", "polygonal") && typeY %in% c("rectangle", "polygonal")) { # This code is SLOW WX <- as.polygonal(WX) WY <- as.polygonal(WY) a <- area(W) if(!paired) { weight <- matrix(, nrow=nX, ncol=nY) if(nX > 0 && nY > 0) { for(i in seq_len(nX)) { X.i <- c(x[i], y[i]) for(j in seq_len(nY)) { shiftvector <- X.i - c(xx[j],yy[j]) WXshift <- shift(WX, shiftvector) b <- overlap.owin(WY, WXshift) weight[i,j] <- a/b } } } } else { nX <- npoints(X) weight <- numeric(nX) if(nX > 0) { for(i in seq_len(nX)) { shiftvector <- c(x[i],y[i]) - c(xx[i],yy[i]) WXshift <- shift(WX, shiftvector) b <- overlap.owin(WY, WXshift) weight[i] <- a/b } } } } else { WX <- as.mask(WX) WY <- as.mask(WY) # make difference vectors if(!paired) { DX <- outer(x,xx,"-") DY <- outer(y,yy,"-") } else { DX <- x - xx DY <- y - yy } # compute set cross-covariance g <- setcov(WY,WX) # evaluate set cross-covariance at these vectors gvalues <- lookup.im(g, as.vector(DX), as.vector(DY), naok=TRUE, strict=FALSE) weight <- area(WY)/gvalues } # clip high values if(length(weight) > 0) weight <- pmin.int(weight, trim) if(!paired) weight <- matrix(weight, nrow=X$n, ncol=Y$n) return(weight) } Kcom }) spatstat/R/levelset.R0000644000176200001440000000235313333543255014275 0ustar liggesusers# levelset.R # # $Revision: 1.5 $ $Date: 2015/01/15 07:10:37 $ # # level set of an image levelset <- function(X, thresh, compare="<=") { # force X and thresh to be evaluated in this frame verifyclass(X, "im") thresh <- thresh switch(compare, "<" = { A <- eval.im(X < thresh) }, ">" = { A <- eval.im(X > thresh) }, "<=" = { A <- eval.im(X <= thresh) }, ">=" = { A <- eval.im(X >= thresh) }, "==" = { A <- eval.im(X == thresh) }, "!=" = { A <- eval.im(X != thresh) }, stop(paste("unrecognised comparison operator", sQuote(compare)))) W <- as.owin(eval.im(ifelse1NA(A))) return(W) } # compute owin containing all pixels where image expression is TRUE solutionset <- function(..., envir) { if(missing(envir)) envir <- parent.frame() A <- try(eval.im(..., envir=envir), silent=TRUE) if(inherits(A, "try-error")) A <- try(eval(..., envir=envir), silent=TRUE) if(inherits(A, "try-error")) stop("Unable to evaluate expression") if(!is.im(A)) stop("Evaluating the expression did not yield a pixel image") if(A$type != "logical") stop("Evaluating the expression did not yield a logical-valued image") W <- as.owin(eval.im(ifelse1NA(A))) return(W) } spatstat/R/mincontrast.R0000644000176200001440000007374413570043051015017 0ustar liggesusers# # mincontrast.R # # Functions for estimation by minimum contrast # ################## base ################################ mincontrast <- local({ ## objective function (in a format that is re-usable by other code) contrast.objective <- function(par, objargs, ...) { with(objargs, { theo <- theoretical(par=par, rvals, ...) if(!is.vector(theo) || !is.numeric(theo)) stop("theoretical function did not return a numeric vector") if(length(theo) != nrvals) stop("theoretical function did not return the correct number of values") ## experimental if(!is.null(adjustment)) { theo <- adjustment$fun(theo=theo, par=par, auxdata=adjustment$auxdata, ...) if(!is.vector(theo) || !is.numeric(theo)) stop("adjustment did not return a numeric vector") if(length(theo) != nrvals) stop("adjustment did not return the correct number of values") } ## discrep <- (abs(theo^qq - obsq))^pp value <- mean(discrep) value <- min(value, .Machine$double.xmax) return(value) }) } mincontrast <- function(observed, theoretical, startpar, ..., ctrl=list(q = 1/4, p = 2, rmin=NULL, rmax=NULL), fvlab=list(label=NULL, desc="minimum contrast fit"), explain=list(dataname=NULL, modelname=NULL, fname=NULL), adjustment=NULL) { verifyclass(observed, "fv") stopifnot(is.function(theoretical)) if(!any("par" %in% names(formals(theoretical)))) stop(paste("Theoretical function does not include an argument called", sQuote("par"))) ## enforce defaults ctrl <- resolve.defaults(ctrl, list(q = 1/4, p = 2, rmin=NULL, rmax=NULL)) fvlab <- resolve.defaults(fvlab, list(label=NULL, desc="minimum contrast fit")) explain <- resolve.defaults(explain, list(dataname=NULL, modelname=NULL, fname=NULL)) ## extract vector of r values argu <- fvnames(observed, ".x") rvals <- observed[[argu]] ## determine range of r values rmin <- ctrl$rmin rmax <- ctrl$rmax if(!is.null(rmin) && !is.null(rmax)) stopifnot(rmin < rmax && rmin >= 0) else { alim <- attr(observed, "alim") %orifnull% range(rvals) if(is.null(rmax)) rmax <- alim[2] if(is.null(rmin)) { rmin <- alim[1] if(rmin == 0 && identical(explain$fname,"g")) rmin <- rmax/1e3 # avoid artefacts at zero in pcf } } ## extract vector of observed values of statistic valu <- fvnames(observed, ".y") obs <- observed[[valu]] ## restrict to [rmin, rmax] if(max(rvals) < rmax) stop(paste("rmax=", signif(rmax,4), "exceeds the range of available data", "= [", signif(min(rvals),4), ",", signif(max(rvals),4), "]")) sub <- (rvals >= rmin) & (rvals <= rmax) rvals <- rvals[sub] obs <- obs[sub] ## sanity clause if(!all(ok <- is.finite(obs))) { whinge <- paste("Some values of the empirical function", sQuote(explain$fname), "were infinite or NA.") iMAX <- max(which(ok)) iMIN <- min(which(!ok)) + 1 if(iMAX > iMIN && all(ok[iMIN:iMAX])) { rmin <- rvals[iMIN] rmax <- rvals[iMAX] obs <- obs[iMIN:iMAX] rvals <- rvals[iMIN:iMAX] sub[sub] <- ok warning(paste(whinge, "Range of r values was reset to", prange(c(rmin, rmax))), call.=FALSE) } else stop(paste(whinge, "Please choose a narrower range [rmin, rmax]"), call.=FALSE) } ## pack data into a list objargs <- list(theoretical = theoretical, rvals = rvals, nrvals = length(rvals), obsq = obs^(ctrl$q), ## for efficiency qq = ctrl$q, pp = ctrl$p, rmin = rmin, rmax = rmax, adjustment = adjustment) ## go minimum <- optim(startpar, fn=contrast.objective, objargs=objargs, ...) ## if convergence failed, issue a warning signalStatus(optimStatus(minimum), errors.only=TRUE) ## evaluate the fitted theoretical curve fittheo <- theoretical(minimum$par, rvals, ...) ## pack it up as an `fv' object label <- fvlab$label %orifnull% "%s[fit](r)" desc <- fvlab$desc fitfv <- bind.fv(observed[sub, ], data.frame(fit=fittheo), label, desc) if(!is.null(adjustment)) { adjtheo <- adjustment$fun(theo=fittheo, par=minimum$par, auxdata=adjustment$auxdata, ...) fitfv <- bind.fv(fitfv, data.frame(adjfit=adjtheo), "%s[adjfit](r)", paste("adjusted", desc)) } result <- list(par = minimum$par, fit = fitfv, opt = minimum, ctrl = list(p=ctrl$p,q=ctrl$q,rmin=rmin,rmax=rmax), info = explain, startpar = startpar, objfun = contrast.objective, objargs = objargs, dotargs = list(...)) class(result) <- c("minconfit", class(result)) return(result) } mincontrast }) print.minconfit <- function(x, ...) { terselevel <- spatstat.options('terse') digits <- getOption('digits') ## explanatory cat(paste("Minimum contrast fit ", "(", "object of class ", dQuote("minconfit"), ")", "\n", sep="")) mo <- x$info$modelname fu <- x$info$fname da <- x$info$dataname cm <- x$covmodel if(!is.null(mo)) cat("Model:", mo, fill=TRUE) if(!is.null(cm)) { ## Covariance/kernel model and nuisance parameters cat("\t", cm$type, "model:", cm$model, fill=TRUE) margs <- cm$margs if(!is.null(margs)) { nama <- names(margs) tags <- ifelse(nzchar(nama), paste(nama, "="), "") tagvalue <- paste(tags, margs) splat("\t", cm$type, "parameters:", paste(tagvalue, collapse=", ")) } } if(!is.null(fu) && !is.null(da)) splat("Fitted by matching theoretical", fu, "function to", da) else { if(!is.null(fu)) splat(" based on", fu) if(!is.null(da)) splat(" fitted to", da) } if(waxlyrical('space', terselevel)) cat("\n") ## Values splat("Internal parameters fitted by minimum contrast ($par):") print(x$par, ...) if(waxlyrical('space', terselevel)) cat("\n") ## Handling new parameters isPCP <- x$isPCP %orifnull% x$internal$model!="lgcp" cpar <- x$clustpar if (!is.null(cpar)) { splat("Fitted", if(isPCP) "cluster" else "covariance", "parameters:") print(cpar, digits=digits) } else{ ## Old modelpar field if necessary mp <- x$modelpar if(!is.null(mp)) { splat("Derived parameters of", if(!is.null(mo)) mo else "model", "($modelpar):") print(mp) } } if(length(mu <- x$mu)) { if(isPCP) { splat("Mean cluster size: ", if(is.numeric(mu)) paste(signif(mu, digits), "points") else if(is.im(mu)) "[pixel image]" else "[unknown]") } else { splat("Fitted mean of log of random intensity: ", if(is.numeric(mu)) signif(mu, digits) else if(is.im(mu)) "[pixel image]" else "[unknown]") } } if(waxlyrical('space', terselevel)) cat("\n") ## Diagnostics printStatus(optimStatus(x$opt)) ## Starting values if(waxlyrical('gory', terselevel)){ cat("\n") splat("Starting values of parameters:") print(x$startpar) ## Algorithm parameters ct <- x$ctrl splat("Domain of integration:", "[", signif(ct$rmin,4), ",", signif(ct$rmax,4), "]") splat("Exponents:", "p=", paste(signif(ct$p, 3), ",", sep=""), "q=", signif(ct$q,3)) } invisible(NULL) } plot.minconfit <- function(x, ...) { xname <- short.deparse(substitute(x)) do.call(plot.fv, resolve.defaults(list(x$fit), list(...), list(main=xname))) } unitname.minconfit <- function(x) { unitname(x$fit) } "unitname<-.minconfit" <- function(x, value) { unitname(x$fit) <- value return(x) } as.fv.minconfit <- function(x) x$fit ###### convergence status of 'optim' object optimStatus <- function(x, call=NULL) { cgce <- x$convergence neval <- x$counts[["function"]] switch(paste(cgce), "0" = { simpleMessage( paste("Converged successfully after", neval, "function evaluations"), call) }, "1" = simpleWarning( paste("Iteration limit maxit was reached after", neval, "function evaluations"), call), "10" = simpleWarning("Nelder-Mead simplex was degenerate", call), "51"= { simpleWarning( paste("Warning message from L-BGFS-B method:", sQuote(x$message)), call) }, "52"={ simpleError( paste("Error message from L-BGFS-B method:", sQuote(x$message)), call) }, simpleWarning(paste("Unrecognised error code", cgce), call) ) } #' general code for collecting status reports signalStatus <- function(x, errors.only=FALSE) { stopifnot(inherits(x, "condition")) if(inherits(x, "error")) stop(x) if(inherits(x, "warning")) warning(x) if(inherits(x, "message") && !errors.only) message(x) return(invisible(NULL)) } printStatus <- function(x, errors.only=FALSE) { prefix <- if(inherits(x, "error")) "error: " else if(inherits(x, "warning")) "warning: " else NULL if(!is.null(prefix) || !errors.only) cat(paste(prefix, conditionMessage(x), "\n", sep="")) return(invisible(NULL)) } accumulateStatus <- function(x, stats=NULL) { values <- stats$values %orifnull% list() frequencies <- stats$frequencies %orifnull% integer(0) if(inherits(x, c("error", "warning", "message"))) { same <- unlist(lapply(values, identical, y=x)) if(any(same)) { i <- min(which(same)) frequencies[i] <- frequencies[i] + 1L } else { values <- append(values, list(x)) frequencies <- c(frequencies, 1L) } } stats <- list(values=values, frequencies=frequencies) return(stats) } printStatusList <- function(stats) { with(stats, { for(i in seq_along(values)) { printStatus(values[[i]]) fi <- frequencies[i] splat("\t", paren(paste(fi, ngettext(fi, "time", "times")))) } } ) invisible(NULL) } ############### applications (specific models) ################## getdataname <- function(defaultvalue, ..., dataname=NULL) { if(!is.null(dataname)) dataname else defaultvalue } thomas.estK <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!identical(attr(K, "fname")[1], "K")) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("Thomas") startpar <- info$checkpar(startpar) theoret <- info$K result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of Thomas process"), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="Thomas process"), ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "sigma2") result$par <- par ## infer meaningful model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="Thomas") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) return(result) } lgcp.estK <- function(X, startpar=c(var=1,scale=1), covmodel=list(model="exponential"), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!identical(attr(K, "fname")[1], "K")) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("LGCP") startpar <- info$checkpar(startpar) ## digest parameters of Covariance model and test validity ph <- info$parhandler cmodel <- do.call(ph, covmodel) theoret <- info$K result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p, rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of LGCP"), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="log-Gaussian Cox process"), ..., model=cmodel$model, margs=cmodel$margs) ## imbue with meaning par <- result$par names(par) <- c("sigma2", "alpha") result$par <- par result$covmodel <- cmodel ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="lgcp") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) result$clustargs <- info$checkclustargs(cmodel$margs, old=FALSE) return(result) } matclust.estK <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!identical(attr(K, "fname")[1], "K")) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("MatClust") startpar <- info$checkpar(startpar) theoret <- info$K funaux <- info$funaux result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of Matern Cluster process"), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="Matern Cluster process"), ..., funaux=funaux) ## imbue with meaning par <- result$par names(par) <- c("kappa", "R") result$par <- par ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="MatClust") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) return(result) } ## versions using pcf (suggested by Jan Wild) thomas.estpcf <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()){ dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!identical(attr(g, "fname")[1], "g")) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call(pcf.ppp, append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("Thomas") startpar <- info$checkpar(startpar) theoret <- info$pcf ## avoid using g(0) as it may be infinite argu <- fvnames(g, ".x") rvals <- g[[argu]] if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2] } result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list( label="%s[fit](r)", desc="minimum contrast fit of Thomas process"), explain=list( dataname=dataname, fname=attr(g, "fname"), modelname="Thomas process"), ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "sigma2") result$par <- par ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="Thomas") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) return(result) } matclust.estpcf <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()){ dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!identical(attr(g, "fname")[1], "g")) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call(pcf.ppp, append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("MatClust") startpar <- info$checkpar(startpar) theoret <- info$pcf funaux <- info$funaux ## avoid using g(0) as it may be infinite argu <- fvnames(g, ".x") rvals <- g[[argu]] if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2] } result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of Matern Cluster process"), explain=list(dataname=dataname, fname=attr(g, "fname"), modelname="Matern Cluster process"), ..., funaux=funaux) ## imbue with meaning par <- result$par names(par) <- c("kappa", "R") result$par <- par ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="MatClust") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) return(result) } lgcp.estpcf <- function(X, startpar=c(var=1,scale=1), covmodel=list(model="exponential"), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()) { dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!identical(attr(g, "fname")[1], "g")) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call(pcf.ppp, append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("LGCP") startpar <- info$checkpar(startpar) ## digest parameters of Covariance model and test validity ph <- info$parhandler cmodel <- do.call(ph, covmodel) theoret <- info$pcf result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p, rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of LGCP"), explain=list(dataname=dataname, fname=attr(g, "fname"), modelname="log-Gaussian Cox process"), ..., model=cmodel$model, margs=cmodel$margs) ## imbue with meaning par <- result$par names(par) <- c("sigma2", "alpha") result$par <- par result$covmodel <- cmodel ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="lgcp") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) result$clustargs <- info$checkclustargs(cmodel$margs, old=FALSE) return(result) } cauchy.estK <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { ## omega: scale parameter of Cauchy kernel function ## eta: scale parameter of Cauchy pair correlation function ## eta = 2 * omega dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!identical(attr(K, "fname")[1], "K")) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("Cauchy") startpar <- info$checkpar(startpar) theoret <- info$K desc <- "minimum contrast fit of Neyman-Scott process with Cauchy kernel" result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="Cauchy process"), ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "eta2") result$par <- par ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="Cauchy") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) return(result) } cauchy.estpcf <- function(X, startpar=c(kappa=1,scale=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()) { ## omega: scale parameter of Cauchy kernel function ## eta: scale parameter of Cauchy pair correlation function ## eta = 2 * omega dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!identical(attr(g, "fname")[1], "g")) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call(pcf.ppp, append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") info <- spatstatClusterModelInfo("Cauchy") startpar <- info$checkpar(startpar) theoret <- info$pcf ## avoid using g(0) as it may be infinite argu <- fvnames(g, ".x") rvals <- g[[argu]] if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2] } desc <- "minimum contrast fit of Neyman-Scott process with Cauchy kernel" result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=attr(g, "fname"), modelname="Cauchy process"), ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "eta2") result$par <- par ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="Cauchy") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) return(result) } ## user-callable resolve.vargamma.shape <- function(..., nu.ker=NULL, nu.pcf=NULL, default = FALSE) { if(is.null(nu.ker) && is.null(nu.pcf)){ if(!default) stop("Must specify either nu.ker or nu.pcf", call.=FALSE) nu.ker <- -1/4 } if(!is.null(nu.ker) && !is.null(nu.pcf)) stop("Only one of nu.ker and nu.pcf should be specified", call.=FALSE) if(!is.null(nu.ker)) { check.1.real(nu.ker) stopifnot(nu.ker > -1/2) nu.pcf <- 2 * nu.ker + 1 } else { check.1.real(nu.pcf) stopifnot(nu.pcf > 0) nu.ker <- (nu.pcf - 1)/2 } return(list(nu.ker=nu.ker, nu.pcf=nu.pcf)) } vargamma.estK <- function(X, startpar=c(kappa=1,scale=1), nu = -1/4, lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { ## nu.ker: smoothness parameter of Variance Gamma kernel function ## omega: scale parameter of kernel function ## nu.pcf: smoothness parameter of Variance Gamma pair correlation function ## eta: scale parameter of Variance Gamma pair correlation function ## nu.pcf = 2 * nu.ker + 1 and eta = omega dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!identical(attr(K, "fname")[1], "K")) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") ## Catch old nu.ker/nu.pcf syntax and resolve nu-value. dots <- list(...) if(missing(nu)){ nu <- resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf, default = TRUE)$nu.ker } check.1.real(nu) stopifnot(nu > -1/2) info <- spatstatClusterModelInfo("VarGamma") startpar <- info$checkpar(startpar) theoret <- info$K ## test validity of parameter nu and digest ph <- info$parhandler cmodel <- ph(nu.ker=nu) margs <- cmodel$margs desc <- "minimum contrast fit of Neyman-Scott process with Variance Gamma kernel" result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="Variance Gamma process"), margs=margs, ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "eta") result$par <- par result$covmodel <- cmodel ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="VarGamma") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) result$clustargs <- info$checkclustargs(cmodel$margs, old=FALSE) return(result) } vargamma.estpcf <- function(X, startpar=c(kappa=1,scale=1), nu=-1/4, lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()) { ## nu.ker: smoothness parameter of Variance Gamma kernel function ## omega: scale parameter of kernel function ## nu.pcf: smoothness parameter of Variance Gamma pair correlation function ## eta: scale parameter of Variance Gamma pair correlation function ## nu.pcf = 2 * nu.ker + 1 and eta = omega dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!identical(attr(g, "fname")[1], "g")) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call(pcf.ppp, append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") ## Catch old nu.ker/nu.pcf syntax and resolve nu-value. dots <- list(...) if(missing(nu)){ ## nutmp <- try(resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf)$nu.ker, silent=TRUE) ## if(!inherits(nutmp, "try-error")) nu <- nutmp nu <- resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf, default = TRUE)$nu.ker } check.1.real(nu) stopifnot(nu > -1/2) info <- spatstatClusterModelInfo("VarGamma") startpar <- info$checkpar(startpar) theoret <- info$pcf ## test validity of parameter nu and digest ph <- info$parhandler cmodel <- ph(nu.ker=nu) margs <- cmodel$margs ## avoid using g(0) as it may be infinite argu <- fvnames(g, ".x") rvals <- g[[argu]] if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2] } desc <- "minimum contrast fit of Neyman-Scott process with Variance Gamma kernel" result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=attr(g, "fname"), modelname="Variance Gamma process"), margs=margs, ...) ## imbue with meaning par <- result$par names(par) <- c("kappa", "eta") result$par <- par result$covmodel <- cmodel ## infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="VarGamma") ## add new parametrisation to object result$clustpar <- info$checkpar(par, old=FALSE) result$clustargs <- info$checkclustargs(cmodel$margs, old=FALSE) return(result) } spatstat/R/clusterinfo.R0000644000176200001440000006511413402725253015010 0ustar liggesusers## lookup table of explicitly-known K functions and pcf ## and algorithms for computing sensible starting parameters .Spatstat.ClusterModelInfoTable <- list( Thomas=list( ## Thomas process: old par = (kappa, sigma2) (internally used everywhere) ## Thomas process: new par = (kappa, scale) (officially recommended for input/output) modelname = "Thomas process", # In modelname field of mincon fv obj. descname = "Thomas process", # In desc field of mincon fv obj. modelabbrev = "Thomas process", # In fitted obj. printmodelname = function(...) "Thomas process", # Used by print.kppm parnames = c("kappa", "sigma2"), clustargsnames = NULL, checkpar = function(par, old = TRUE){ if(is.null(par)) par <- c(kappa=1,scale=1) if(any(par<=0)) stop("par values must be positive.") nam <- check.named.vector(par, c("kappa","sigma2"), onError="null") if(is.null(nam)) { check.named.vector(par, c("kappa","scale")) names(par)[2L] <- "sigma2" par[2L] <- par[2L]^2 } if(!old){ names(par)[2L] <- "scale" par[2L] <- sqrt(par[2L]) } return(par) }, checkclustargs = function(margs, old = TRUE) list(), resolvedots = function(...){ return(list(...)) }, # density function for the distance to offspring ddist = function(r, scale, ...) { 2 * pi * r * dnorm(r, 0, scale)/sqrt(2*pi*scale^2) }, ## Practical range of clusters range = function(...){ dots <- list(...) par <- dots$par # Choose the first of the possible supplied values for scale: scale <- c(dots$scale, dots$par[["scale"]], dots$sigma, dots$par[["sigma"]])[1L] if(is.null(scale)) stop("Argument ", sQuote("scale"), " must be given.") thresh <- dots$thresh if(!is.null(thresh)){ ## The squared length of isotropic Gaussian (sigma) ## is exponential with mean 2 sigma^2 rmax <- scale * sqrt(2 * qexp(thresh, lower.tail=FALSE)) ## old code ## ddist <- .Spatstat.ClusterModelInfoTable$Thomas$ddist ## kernel0 <- clusterkernel("Thomas", scale = scale)(0,0) ## f <- function(r) ddist(r, scale = scale)-thresh*kernel0 ## rmax <- uniroot(f, lower = scale, upper = 1000 * scale)$root } else{ rmax <- 4*scale } return(rmax) }, kernel = function(par, rvals, ...) { scale <- sqrt(par[2L]) dnorm(rvals, 0, scale)/sqrt(2*pi*scale^2) }, isPCP=TRUE, ## K-function K = function(par,rvals, ...){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) pi*rvals^2+(1-exp(-rvals^2/(4*par[2L])))/par[1L] }, ## pair correlation function pcf= function(par,rvals, ...){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) 1 + exp(-rvals^2/(4 * par[2L]))/(4 * pi * par[1L] * par[2L]) }, ## sensible starting parameters selfstart = function(X) { kappa <- intensity(X) sigma2 <- 4 * mean(nndist(X))^2 c(kappa=kappa, sigma2=sigma2) }, ## meaningful model parameters interpret = function(par, lambda) { kappa <- par[["kappa"]] sigma <- sqrt(par[["sigma2"]]) mu <- if(is.numeric(lambda) && length(lambda) == 1) lambda/kappa else NA c(kappa=kappa, sigma=sigma, mu=mu) }, ## Experimental: convert to/from canonical cluster parameters tocanonical = function(par) { kappa <- par[[1L]] sigma2 <- par[[2L]] c(strength=1/(4 * pi * kappa * sigma2), scale=sqrt(sigma2)) }, tohuman = function(can) { strength <- can[[1L]] scale <- can[[2L]] sigma2 <- scale^2 c(kappa=1/(4 * pi * strength * sigma2), sigma2=sigma2) } ), ## ............................................... MatClust=list( ## Matern Cluster process: old par = (kappa, R) (internally used everywhere) ## Matern Cluster process: new par = (kappa, scale) (officially recommended for input/output) modelname = "Matern cluster process", # In modelname field of mincon fv obj. descname = "Matern cluster process", # In desc field of mincon fv obj. modelabbrev = "Matern cluster process", # In fitted obj. printmodelname = function(...) "Matern cluster process", # Used by print.kppm parnames = c("kappa", "R"), clustargsnames = NULL, checkpar = function(par, old = TRUE){ if(is.null(par)) par <- c(kappa=1,scale=1) if(any(par<=0)) stop("par values must be positive.") nam <- check.named.vector(par, c("kappa","R"), onError="null") if(is.null(nam)) { check.named.vector(par, c("kappa","scale")) names(par)[2L] <- "R" } if(!old){ names(par)[2L] <- "scale" } return(par) }, # density function for the distance to offspring ddist = function(r, scale, ...) { ifelse(r>scale, 0, 2 * r / scale^2) }, ## Practical range of clusters range = function(...){ dots <- list(...) par <- dots$par # Choose the first of the possible supplied values for scale: scale <- c(dots$scale, dots$par[["scale"]], dots$R, dots$par[["R"]])[1L] if(is.null(scale)) stop("Argument ", sQuote("scale"), " must be given.") if(!is.null(dots$thresh)) warning("Argument ", sQuote("thresh"), " is ignored for Matern Cluster model") return(scale) }, checkclustargs = function(margs, old = TRUE) list(), resolvedots = function(...){ return(list(...)) }, kernel = function(par, rvals, ...) { scale <- par[2L] ifelse(rvals>scale, 0, 1/(pi*scale^2)) }, isPCP=TRUE, K = function(par,rvals, ..., funaux){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) kappa <- par[1L] R <- par[2L] Hfun <- funaux$Hfun y <- pi * rvals^2 + (1/kappa) * Hfun(rvals/(2 * R)) return(y) }, pcf= function(par,rvals, ..., funaux){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) kappa <- par[1L] R <- par[2L] g <- funaux$g y <- 1 + (1/(pi * kappa * R^2)) * g(rvals/(2 * R)) return(y) }, funaux=list( Hfun=function(zz) { ok <- (zz < 1) h <- numeric(length(zz)) h[!ok] <- 1 z <- zz[ok] h[ok] <- 2 + (1/pi) * ( (8 * z^2 - 4) * acos(z) - 2 * asin(z) + 4 * z * sqrt((1 - z^2)^3) - 6 * z * sqrt(1 - z^2) ) return(h) }, DOH=function(zz) { ok <- (zz < 1) h <- numeric(length(zz)) h[!ok] <- 0 z <- zz[ok] h[ok] <- (16/pi) * (z * acos(z) - (z^2) * sqrt(1 - z^2)) return(h) }, ## g(z) = DOH(z)/z has a limit at z=0. g=function(zz) { ok <- (zz < 1) h <- numeric(length(zz)) h[!ok] <- 0 z <- zz[ok] h[ok] <- (2/pi) * (acos(z) - z * sqrt(1 - z^2)) return(h) }), ## sensible starting paramters selfstart = function(X) { kappa <- intensity(X) R <- 2 * mean(nndist(X)) c(kappa=kappa, R=R) }, ## meaningful model parameters interpret = function(par, lambda) { kappa <- par[["kappa"]] R <- par[["R"]] mu <- if(is.numeric(lambda) && length(lambda) == 1) lambda/kappa else NA c(kappa=kappa, R=R, mu=mu) } ), ## ............................................... Cauchy=list( ## Neyman-Scott with Cauchy clusters: old par = (kappa, eta2) (internally used everywhere) ## Neyman-Scott with Cauchy clusters: new par = (kappa, scale) (officially recommended for input/output) modelname = "Neyman-Scott process with Cauchy kernel", # In modelname field of mincon fv obj. descname = "Neyman-Scott process with Cauchy kernel", # In desc field of mincon fv obj. modelabbrev = "Cauchy process", # In fitted obj. printmodelname = function(...) "Cauchy process", # Used by print.kppm parnames = c("kappa", "eta2"), clustargsnames = NULL, checkpar = function(par, old = TRUE){ if(is.null(par)) par <- c(kappa=1,scale=1) if(any(par<=0)) stop("par values must be positive.") nam <- check.named.vector(par, c("kappa","eta2"), onError="null") if(is.null(nam)) { check.named.vector(par, c("kappa","scale")) names(par)[2L] <- "eta2" par[2L] <- (2*par[2L])^2 } if(!old){ names(par)[2L] <- "scale" par[2L] <- sqrt(par[2L])/2 } return(par) }, checkclustargs = function(margs, old = TRUE) list(), resolvedots = function(...){ return(list(...)) }, # density function for the distance to offspring ddist = function(r, scale, ...) { r/(scale^2) * (1 + (r / scale)^2)^(-3/2) }, ## Practical range of clusters range = function(...){ dots <- list(...) # Choose the first of the possible supplied values for scale: scale <- c(dots$scale, dots$par[["scale"]])[1L] if(is.null(scale)) stop("Argument ", sQuote("scale"), " must be given.") thresh <- dots$thresh %orifnull% 0.01 ## integral of ddist(r) dr is 1 - (1+(r/scale)^2)^(-1/2) ## solve for integral = 1-thresh: rmax <- scale * sqrt(1/thresh^2 - 1) ## old code ## ddist <- .Spatstat.ClusterModelInfoTable$Cauchy$ddist ## kernel0 <- clusterkernel("Cauchy", scale = scale)(0,0) ## f <- function(r) ddist(r, scale = scale)-thresh*kernel0 ## rmax <- uniroot(f, lower = scale, upper = 1000 * scale)$root return(rmax) }, kernel = function(par, rvals, ...) { scale <- sqrt(par[2L])/2 1/(2*pi*scale^2)*((1 + (rvals/scale)^2)^(-3/2)) }, isPCP=TRUE, K = function(par,rvals, ...){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) pi*rvals^2 + (1 - 1/sqrt(1 + rvals^2/par[2L]))/par[1L] }, pcf= function(par,rvals, ...){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) 1 + ((1 + rvals^2/par[2L])^(-1.5))/(2 * pi * par[2L] * par[1L]) }, selfstart = function(X) { kappa <- intensity(X) eta2 <- 4 * mean(nndist(X))^2 c(kappa = kappa, eta2 = eta2) }, ## meaningful model parameters interpret = function(par, lambda) { kappa <- par[["kappa"]] omega <- sqrt(par[["eta2"]])/2 mu <- if(is.numeric(lambda) && length(lambda) == 1) lambda/kappa else NA c(kappa=kappa, omega=omega, mu=mu) } ), ## ............................................... VarGamma=list( ## Neyman-Scott with VarianceGamma/Bessel clusters: old par = (kappa, eta) (internally used everywhere) ## Neyman-Scott with VarianceGamma/Bessel clusters: new par = (kappa, scale) (officially recommended for input/output) modelname = "Neyman-Scott process with Variance Gamma kernel", # In modelname field of mincon fv obj. descname = "Neyman-Scott process with Variance Gamma kernel", # In desc field of mincon fv obj. modelabbrev = "Variance Gamma process", # In fitted obj. printmodelname = function(obj){ # Used by print.kppm paste0("Variance Gamma process (nu=", signif(obj$clustargs[["nu"]], 2), ")") }, parnames = c("kappa", "eta"), clustargsnames = "nu", checkpar = function(par, old = TRUE){ if(is.null(par)) par <- c(kappa=1,scale=1) if(any(par<=0)) stop("par values must be positive.") nam <- check.named.vector(par, c("kappa","eta"), onError="null") if(is.null(nam)) { check.named.vector(par, c("kappa","scale")) names(par)[2L] <- "eta" } if(!old) names(par)[2L] <- "scale" return(par) }, checkclustargs = function(margs, old = TRUE){ if(!old) margs <- list(nu=margs$nu.ker) return(margs) }, resolvedots = function(...){ ## resolve dots for kppm and friends allowing for old/new par syntax dots <- list(...) out <- list() nu <- dots$nu if(is.null(nu)){ nu <- try(resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf)$nu.ker, silent = TRUE) if(inherits(nu, "try-error")) nu <- -1/4 } else { check.1.real(nu) stopifnot(nu > -1/2) } out$margs <- list(nu.ker=nu, nu.pcf=2*nu+1) out$covmodel <- list(type="Kernel", model="VarGamma", margs=out$margs) return(out) }, # density function for the distance to offspring ddist = function(r, scale, nu, ...) { numer <- ((r/scale)^(nu+1)) * besselK(r/scale, nu) numer[r==0] <- 0 denom <- (2^nu) * scale * gamma(nu + 1) numer/denom }, ## Practical range of clusters range = function(...){ dots <- list(...) # Choose the first of the possible supplied values for scale: scale <- c(dots$scale, dots$par[["scale"]])[1L] if(is.null(scale)) stop("Argument ", sQuote("scale"), " must be given.") # Find value of nu: extra <- .Spatstat.ClusterModelInfoTable$VarGamma$resolvedots(...) nu <- .Spatstat.ClusterModelInfoTable$VarGamma$checkclustargs(extra$margs, old=FALSE)$nu if(is.null(nu)) stop("Argument ", sQuote("nu"), " must be given.") thresh <- dots$thresh if(is.null(thresh)) thresh <- .001 ddist <- .Spatstat.ClusterModelInfoTable$VarGamma$ddist f1 <- function(rmx) { integrate(ddist, 0, rmx, scale=scale, nu=nu)$value - (1 - thresh) } f <- Vectorize(f1) ## old code ## kernel0 <- clusterkernel("VarGamma", scale = scale, nu = nu)(0,0) ## f <- function(r) ddist(r, scale = scale, nu = nu) - thresh*kernel0 rmax <- uniroot(f, lower = scale, upper = 1000 * scale)$root return(rmax) }, ## kernel function in polar coordinates (no angular argument). kernel = function(par, rvals, ..., margs) { scale <- as.numeric(par[2L]) nu <- margs$nu if(is.null(nu)) stop("Argument ", sQuote("nu"), " is missing.") numer <- ((rvals/scale)^nu) * besselK(rvals/scale, nu) numer[rvals==0] <- ifelse(nu>0, 2^(nu-1)*gamma(nu), Inf) denom <- pi * (2^(nu+1)) * scale^2 * gamma(nu + 1) numer/denom }, isPCP=TRUE, K = local({ ## K function requires integration of pair correlation xgx <- function(x, par, nu.pcf) { ## x * pcf(x) without check on par values numer <- (x/par[2L])^nu.pcf * besselK(x/par[2L], nu.pcf) denom <- 2^(nu.pcf+1) * pi * par[2L]^2 * par[1L] * gamma(nu.pcf + 1) return(x * (1 + numer/denom)) } vargammaK <- function(par,rvals, ..., margs){ ## margs = list(.. nu.pcf.. ) if(any(par <= 0)) return(rep.int(Inf, length(rvals))) nu.pcf <- margs$nu.pcf out <- numeric(length(rvals)) ok <- (rvals > 0) rvalsok <- rvals[ok] outok <- numeric(sum(ok)) for (i in 1:length(rvalsok)) outok[i] <- 2 * pi * integrate(xgx, lower=0, upper=rvalsok[i], par=par, nu.pcf=nu.pcf)$value out[ok] <- outok return(out) } ## Initiated integration in sub-subintervals, but it is unfinished! ## vargammaK <- function(par,rvals, ..., margs){ ## ## margs = list(.. nu.pcf.. ) ## if(any(par <= 0)) ## return(rep.int(Inf, length(rvals))) ## nu.pcf <- margs$nu.pcf ## out <- numeric(length(rvals)) ## out[1L] <- if(rvals[1L] == 0) 0 else ## integrate(xgx, lower=0, upper=rvals[1L], ## par = par, nu.pcf=nu.pcf)$value ## for (i in 2:length(rvals)) { ## delta <- integrate(xgx, ## lower=rvals[i-1L], upper=rvals[i], ## par=par, nu.pcf=nu.pcf) ## out[i]=out[i-1L]+delta$value ## } ## return(out) ## } vargammaK }), ## end of 'local' pcf= function(par,rvals, ..., margs){ ## margs = list(..nu.pcf..) if(any(par <= 0)) return(rep.int(Inf, length(rvals))) nu.pcf <- margs$nu.pcf sig2 <- 1 / (4 * pi * (par[2L]^2) * nu.pcf * par[1L]) denom <- 2^(nu.pcf - 1) * gamma(nu.pcf) rr <- rvals / par[2L] ## Matern correlation function fr <- ifelseXB(rr > 0, (rr^nu.pcf) * besselK(rr, nu.pcf) / denom, 1) return(1 + sig2 * fr) }, parhandler = function(..., nu.ker = -1/4) { check.1.real(nu.ker) stopifnot(nu.ker > -1/2) nu.pcf <- 2 * nu.ker + 1 return(list(type="Kernel", model="VarGamma", margs=list(nu.ker=nu.ker, nu.pcf=nu.pcf))) }, ## sensible starting values selfstart = function(X) { kappa <- intensity(X) eta <- 2 * mean(nndist(X)) c(kappa=kappa, eta=eta) }, ## meaningful model parameters interpret = function(par, lambda) { kappa <- par[["kappa"]] omega <- par[["eta"]] mu <- if(is.numeric(lambda) && length(lambda) == 1) lambda/kappa else NA c(kappa=kappa, omega=omega, mu=mu) } ), ## ............................................... LGCP=list( ## Log Gaussian Cox process: old par = (sigma2, alpha) (internally used everywhere) ## Log Gaussian Cox process: new par = (var, scale) (officially recommended for input/output) modelname = "Log-Gaussian Cox process", # In modelname field of mincon fv obj. descname = "LGCP", # In desc field of mincon fv obj. modelabbrev = "log-Gaussian Cox process", # In fitted obj. printmodelname = function(...) "log-Gaussian Cox process", # Used by print.kppm parnames = c("sigma2", "alpha"), checkpar = function(par, old = TRUE){ if(is.null(par)) par <- c(var=1,scale=1) if(any(par<=0)) stop("par values must be positive.") nam <- check.named.vector(par, c("sigma2","alpha"), onError="null") if(is.null(nam)) { check.named.vector(par, c("var","scale")) names(par) <- c("sigma2", "alpha") } if(!old) names(par) <- c("var", "scale") return(par) }, checkclustargs = function(margs, old = TRUE) return(margs), resolvedots = function(...){ ## resolve dots for kppm and friends allowing for old/new par syntax dots <- list(...) nam <- names(dots) out <- list() cmod <- dots$covmodel model <- cmod$model %orifnull% dots$model %orifnull% "exponential" margs <- NULL if(!identical(model, "exponential")) { ## get the 'model generator' modgen <- getRandomFieldsModelGen(model) attr(model, "modgen") <- modgen if(is.null(cmod)){ margsnam <- names(formals(modgen)) margsnam <- margsnam[!(margsnam %in% c("var", "scale"))] margs <- dots[nam %in% margsnam] } else{ margs <- cmod[names(cmod)!="model"] } } if(length(margs)==0) { margs <- NULL } else { ## detect anisotropic model if("Aniso" %in% names(margs)) stop("Anisotropic covariance models cannot be used", call.=FALSE) } out$margs <- margs out$model <- model out$covmodel <- list(type="Covariance", model=model, margs=margs) return(out) }, isPCP=FALSE, ## calls relevant covariance function from RandomFields package K = function(par, rvals, ..., model, margs) { if(any(par <= 0)) return(rep.int(Inf, length(rvals))) if(model == "exponential") { ## For efficiency and to avoid need for RandomFields package integrand <- function(r,par,...) 2*pi*r*exp(par[1L]*exp(-r/par[2L])) } else { kraeverRandomFields() integrand <- function(r,par,model,margs) { modgen <- attr(model, "modgen") if(length(margs) == 0) { mod <- modgen(var=par[1L], scale=par[2L]) } else { mod <- do.call(modgen, append(list(var=par[1L], scale=par[2L]), margs)) } 2*pi *r *exp(RandomFields::RFcov(model=mod, x=r)) } } nr <- length(rvals) th <- numeric(nr) if(spatstat.options("fastK.lgcp")) { ## integrate using Simpson's rule fvals <- integrand(r=rvals, par=par, model=model, margs=margs) th[1L] <- rvals[1L] * fvals[1L]/2 if(nr > 1) for(i in 2:nr) th[i] <- th[i-1L] + (rvals[i] - rvals[i-1L]) * (fvals[i] + fvals[i-1L])/2 } else { ## integrate using 'integrate' th[1L] <- if(rvals[1L] == 0) 0 else integrate(integrand,lower=0,upper=rvals[1L], par=par,model=model,margs=margs)$value for (i in 2:length(rvals)) { delta <- integrate(integrand, lower=rvals[i-1L],upper=rvals[i], par=par,model=model,margs=margs) th[i]=th[i-1L]+delta$value } } return(th) }, pcf= function(par, rvals, ..., model, margs) { if(any(par <= 0)) return(rep.int(Inf, length(rvals))) if(model == "exponential") { ## For efficiency and to avoid need for RandomFields package gtheo <- exp(par[1L]*exp(-rvals/par[2L])) } else { kraeverRandomFields() modgen <- attr(model, "modgen") if(length(margs) == 0) { mod <- modgen(var=par[1L], scale=par[2L]) } else { mod <- do.call(modgen, append(list(var=par[1L], scale=par[2L]), margs)) } gtheo <- exp(RandomFields::RFcov(model=mod, x=rvals)) } return(gtheo) }, parhandler=function(model = "exponential", ...) { if(!is.character(model)) stop("Covariance function model should be specified by name") margs <- c(...) if(!identical(model, "exponential")) { ## get the 'model generator' modgen <- getRandomFieldsModelGen(model) attr(model, "modgen") <- modgen } return(list(type="Covariance", model=model, margs=margs)) }, ## sensible starting values selfstart = function(X) { alpha <- 2 * mean(nndist(X)) c(sigma2=1, alpha=alpha) }, ## meaningful model parameters interpret = function(par, lambda) { sigma2 <- par[["sigma2"]] alpha <- par[["alpha"]] mu <- if(is.numeric(lambda) && length(lambda) == 1 && lambda > 0) log(lambda) - sigma2/2 else NA c(sigma2=sigma2, alpha=alpha, mu=mu) } ) ) spatstatClusterModelInfo <- function(name, onlyPCP = FALSE) { if(inherits(name, "detpointprocfamily")) return(spatstatDPPModelInfo(name)) if(!is.character(name) || length(name) != 1) stop("Argument must be a single character string", call.=FALSE) TheTable <- .Spatstat.ClusterModelInfoTable nama2 <- names(TheTable) if(onlyPCP){ ok <- sapply(TheTable, getElement, name="isPCP") nama2 <- nama2[ok] } if(!(name %in% nama2)) stop(paste(sQuote(name), "is not recognised;", "valid names are", commasep(sQuote(nama2))), call.=FALSE) out <- TheTable[[name]] return(out) } spatstat/R/pool.R0000644000176200001440000000647113333543255013430 0ustar liggesusers#' #' pool.R #' #' $Revision: 1.5 $ $Date: 2017/06/05 10:31:58 $ pool <- function(...) { UseMethod("pool") } pool.fv <- local({ Square <- function(A) { force(A); eval.fv(A^2, relabel=FALSE) } Add <- function(A,B){ force(A); force(B); eval.fv(A+B, relabel=FALSE) } Cmul <- function(A, f) { force(A); force(f); eval.fv(f * A, relabel=FALSE) } pool.fv <- function(..., weights=NULL, relabel=TRUE, variance=TRUE) { argh <- list(...) n <- narg <- length(argh) if(narg == 0) return(NULL) if(narg == 1) return(argh[[1]]) ## validate isfv <- unlist(lapply(argh, is.fv)) if(!all(isfv)) stop("All arguments must be fv objects") argh <- do.call(harmonise, append(argh, list(strict=TRUE))) template <- vanilla.fv(argh[[1]]) ## compute products if(!is.null(weights)) { check.nvector(weights, narg, things="Functions") Y <- Map(Cmul, argh, weights) XY <- Map(Cmul, argh, weights^2) sumX <- sum(weights) sumX2 <- sum(weights^2) } else { ## default: weights=1 Y <- XY <- argh sumX <- sumX2 <- narg } ## sum sumY <- Reduce(Add, Y) attributes(sumY) <- attributes(template) ## ratio-of-sums Ratio <- eval.fv(sumY/sumX, relabel=FALSE) if(variance) { ## variance calculation meanX <- sumX/n meanY <- eval.fv(sumY/n, relabel=FALSE) sumY2 <- Reduce(Add, lapply(Y, Square)) varX <- (sumX2 - n * meanX^2)/(n-1) varY <- eval.fv((sumY2 - n * meanY^2)/(n-1), relabel=FALSE) sumXY <- Reduce(Add, XY) covXY <- eval.fv((sumXY - n * meanX * meanY)/(n-1), relabel=FALSE) ## variance by delta method relvar <- eval.fv(pmax.int(0, varY/meanY^2 + varX/meanX^2 - 2 * covXY/(meanX * meanY)), relabel=FALSE) Variance <- eval.fv(Ratio^2 * relvar/n, relabel=FALSE) ## two sigma CI hiCI <- eval.fv(Ratio + 2 * sqrt(Variance), relabel=FALSE) loCI <- eval.fv(Ratio - 2 * sqrt(Variance), relabel=FALSE) } ## tweak labels of main estimate attributes(Ratio) <- attributes(template) if(relabel) Ratio <- prefixfv(Ratio, tagprefix="pool", descprefix="pooled ", lablprefix="") if(!variance) return(Ratio) ## tweak labels of variance terms attributes(Variance) <- attributes(template) Variance <- prefixfv(Variance, tagprefix="var", descprefix="delta-method variance estimate of ", lablprefix="bold(var)~") attributes(hiCI) <- attributes(loCI) <- attributes(template) hiCI <- prefixfv(hiCI, tagprefix="hi", descprefix="upper limit of two-sigma CI based on ", lablprefix="bold(hi)~") loCI <- prefixfv(loCI, tagprefix="lo", descprefix="lower limit of two-sigma CI based on ", lablprefix="bold(lo)~") ## glue together result <- Reduce(bind.fv, list(Ratio, Variance, hiCI, loCI)) ## don't plot variances, by default fvnames(result, ".") <- setdiff(fvnames(result, "."), fvnames(Variance, ".")) return(result) } pool.fv }) spatstat/R/terse.R0000644000176200001440000000251413333543255013573 0ustar liggesusers## terse.R ## ## code to control terseness and layout of printed output ## ## $Revision: 1.11 $ $Date: 2016/09/23 02:07:24 $ ## ## paragraph break in long output e.g. ppm parbreak <- function(terse = spatstat.options("terse")) { if(waxlyrical('space', terse)) cat("\n") return(invisible(NULL)) } waxlyrical <- local({ ## Values of spatstat.options('terse'): ## 0 default ## 1 suppress obvious wastage e.g. 'gory details' ## 2 contract space between paragraphs in long output ## 3 suppress extras e.g. standard errors and CI ## 4 suppress error messages eg failed to converge TerseCutoff <- list(gory=1, space=2, extras=3, errors=4) waxlyrical <- function(type, terse = spatstat.options("terse")) { if(!(type %in% names(TerseCutoff))) stop(paste("Internal error: unrecognised permission request", sQuote(type)), call.=TRUE) return(terse < TerseCutoff[[type]]) } waxlyrical }) ruletextline <- function(ch="-", n=getOption('width'), terse=spatstat.options('terse')) { if(waxlyrical('space', terse)) { chn <- paste(rep(ch, n), collapse="") chn <- substr(chn, 1, n) cat(chn, fill=TRUE) } return(invisible(NULL)) } spatstat/R/marktable.R0000644000176200001440000000272113333543255014413 0ustar liggesusers# # marktable.R # # Tabulate mark frequencies in neighbourhood of each point # for multitype point patterns # # $Revision: 1.7 $ $Date: 2015/03/25 03:43:35 $ # # Requested by Ian Robertson "marktable" <- function(X, R, N, exclude=TRUE, collapse=FALSE) { verifyclass(X, "ppp") if(!is.marked(X, dfok=FALSE)) stop("point pattern has no marks") gotR <- !missing(R) && !is.null(R) gotN <- !missing(N) && !is.null(N) if(gotN == gotR) stop("Exactly one of the arguments N and R should be given") stopifnot(is.logical(exclude) && length(exclude) == 1) m <- marks(X) if(!is.factor(m)) stop("marks must be a factor") if(gotR) { stopifnot(is.numeric(R) && length(R) == 1 && R > 0) #' identify close pairs p <- closepairs(X,R,what="indices") pi <- p$i pj <- p$j if(!exclude) { #' add identical pairs n <- X$n pi <- c(pi, 1:n) pj <- c(pj, 1:n) } } else { stopifnot(is.numeric(N) && length(N) == 1) ii <- seq_len(npoints(X)) nn <- nnwhich(X, k=1:N) if(N == 1) nn <- matrix(nn, ncol=1) if(!exclude) nn <- cbind(ii, nn) pi <- as.vector(row(nn)) pj <- as.vector(nn) } #' tabulate if(!collapse) { ## table for each point i <- factor(pi, levels=seq_len(npoints(X))) mj <- m[pj] mat <- table(point=i, mark=mj) } else { #' table by type mi <- m[pi] mj <- m[pj] mat <- table(point=mi, neighbour=mj) } return(mat) } spatstat/R/twostage.R0000644000176200001440000002736513547263746014334 0ustar liggesusers## ## twostage.R ## ## Two-stage Monte Carlo tests and envelopes ## ## $Revision: 1.17 $ $Date: 2019/10/09 04:52:11 $ ## bits.test <- function(X, ..., exponent=2, nsim=19, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate=FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE) { twostage.test(X, ..., exponent=exponent, nsim=nsim, nsimsub=nsim, reuse=FALSE, alternative=match.arg(alternative), leaveout=leaveout, interpolate=interpolate, savefuns=savefuns, savepatterns=savepatterns, verbose=verbose, testblurb="Balanced Independent Two-stage Test") } dg.test <- function(X, ..., exponent=2, nsim=19, nsimsub=nsim-1, alternative=c("two.sided", "less", "greater"), reuse=TRUE, leaveout=1, interpolate=FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE) { if(!missing(nsimsub) && !relatively.prime(nsim, nsimsub)) stop("nsim and nsimsub must be relatively prime") twostage.test(X, ..., exponent=exponent, nsim=nsim, nsimsub=nsimsub, reuse=reuse, alternative=match.arg(alternative), leaveout=leaveout, interpolate=interpolate, savefuns=savefuns, savepatterns=savepatterns, verbose=verbose, testblurb="Dao-Genton adjusted goodness-of-fit test") } twostage.test <- function(X, ..., exponent=2, nsim=19, nsimsub=nsim, alternative=c("two.sided", "less", "greater"), reuse=FALSE, leaveout=1, interpolate=FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE, badXfatal=TRUE, testblurb="Two-stage Monte Carlo test") { Xname <- short.deparse(substitute(X)) alternative <- match.arg(alternative) env.here <- sys.frame(sys.nframe()) Xismodel <- is.ppm(X) || is.kppm(X) || is.lppm(X) || is.slrm(X) ## first-stage p-value if(verbose) cat("Applying first-stage test to original data... ") tX <- envelopeTest(X, ..., nsim=if(reuse) nsim else nsimsub, alternative=alternative, leaveout=leaveout, interpolate=interpolate, exponent=exponent, savefuns=savefuns, savepatterns=savepatterns || reuse, verbose=FALSE, badXfatal=badXfatal, envir.simul=env.here) pX <- tX$p.value ## check special case afortiori <- !interpolate && !reuse && (nsimsub < nsim) && (pX == (1/(nsim+1)) || pX == 1) if(afortiori) { ## result is determined padj <- pX pY <- NULL } else { ## result is not yet determined if(!reuse) { if(verbose) cat("Repeating first-stage test... ") tXX <- envelopeTest(X, ..., nsim=nsim, alternative=alternative, leaveout=leaveout, interpolate=interpolate, exponent=exponent, savefuns=savefuns, savepatterns=TRUE, verbose=FALSE, badXfatal=badXfatal, envir.simul=env.here) ## extract simulated patterns Ylist <- attr(attr(tXX, "envelope"), "simpatterns") } else { Ylist <- attr(attr(tX, "envelope"), "simpatterns") } if(verbose) cat("Done.\n") ## apply same test to each simulated pattern if(verbose) cat(paste("Running second-stage tests on", nsim, "simulated patterns... ")) pY <- numeric(nsim) for(i in 1:nsim) { if(verbose) progressreport(i, nsim) Yi <- Ylist[[i]] ## if X is a model, fit it to Yi. Otherwise the implicit model is CSR. if(Xismodel) Yi <- update(X, Yi) tYi <- envelopeTest(Yi, ..., nsim=nsimsub, alternative=alternative, leaveout=leaveout, interpolate=interpolate, exponent=exponent, savepatterns=TRUE, verbose=FALSE, badXfatal=FALSE, envir.simul=env.here) pY[i] <- tYi$p.value } pY <- sort(pY) ## compute adjusted p-value padj <- (1 + sum(pY <= pX))/(1+nsim) } # pack up method <- tX$method method <- c(testblurb, paste("based on", method[1L]), paste("First stage:", method[2L]), method[-(1:2)], if(afortiori) { paren(paste("Second stage was omitted: p0 =", pX, "implies p-value =", padj)) } else if(reuse) { paste("Second stage: nested, ", nsimsub, "simulations for each first-stage simulation") } else { paste("Second stage:", nsim, "*", nsimsub, "nested simulations independent of first stage") } ) names(pX) <- "p0" result <- structure(list(statistic = pX, p.value = padj, method = method, data.name = Xname), class="htest") attr(result, "rinterval") <- attr(tX, "rinterval") attr(result, "pX") <- pX attr(result, "pY") <- pY if(savefuns || savepatterns) result <- hasenvelope(result, attr(tX, "envelope")) return(result) } dg.envelope <- function(X, ..., nsim=19, nsimsub=nsim-1, nrank=1, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE) { twostage.envelope(X, ..., nsim=nsim, nsimsub=nsimsub, reuse=TRUE, nrank=nrank, alternative=match.arg(alternative), leaveout=leaveout, interpolate=interpolate, savefuns=savefuns, savepatterns=savepatterns, verbose=verbose, testlabel="bits") } bits.envelope <- function(X, ..., nsim=19, nrank=1, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE) { twostage.envelope(X, ..., nsim=nsim, nsimsub=nsim, reuse=FALSE, nrank=nrank, alternative=match.arg(alternative), leaveout=leaveout, interpolate=interpolate, savefuns=savefuns, savepatterns=savepatterns, verbose=verbose, testlabel="bits") } twostage.envelope <- function(X, ..., nsim=19, nsimsub=nsim, nrank=1, alternative=c("two.sided", "less", "greater"), reuse=FALSE, leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE, badXfatal=TRUE, testlabel="twostage") { # Xname <- short.deparse(substitute(X)) alternative <- match.arg(alternative) env.here <- sys.frame(sys.nframe()) Xismodel <- is.ppm(X) || is.kppm(X) || is.lppm(X) || is.slrm(X) ############## first stage ################################## if(verbose) cat("Applying first-stage test to original data... ") tX <- envelopeTest(X, ..., alternative=alternative, leaveout=leaveout, interpolate = interpolate, nsim=if(reuse) nsim else nsimsub, nrank=nrank, exponent=Inf, savepatterns=TRUE, savefuns=TRUE, verbose=FALSE, badXfatal=badXfatal, envir.simul=env.here) if(verbose) cat("Done.\n") envX <- attr(tX, "envelope") if(!reuse) { if(verbose) cat("Repeating first-stage test... ") tXX <- envelopeTest(X, ..., alternative=alternative, leaveout=leaveout, interpolate = interpolate, nsim=nsim, nrank=nrank, exponent=Inf, savepatterns=TRUE, savefuns=TRUE, verbose=FALSE, badXfatal=badXfatal, envir.simul=env.here) ## extract simulated patterns Ylist <- attr(attr(tXX, "envelope"), "simpatterns") } else { Ylist <- attr(attr(tX, "envelope"), "simpatterns") } if(verbose) cat("Done.\n") ############## second stage ################################# ## apply same test to each simulated pattern if(verbose) cat(paste("Running tests on", nsim, "simulated patterns... ")) pvalY <- numeric(nsim) for(i in 1:nsim) { if(verbose) progressreport(i, nsim) Yi <- Ylist[[i]] # if X is a model, fit it to Yi. Otherwise the implicit model is CSR. if(Xismodel) Yi <- update(X, Yi) tYi <- envelopeTest(Yi, ..., alternative=alternative, leaveout=leaveout, interpolate = interpolate, save.interpolant = FALSE, nsim=nsimsub, nrank=nrank, exponent=Inf, savepatterns=TRUE, verbose=FALSE, badXfatal=FALSE, envir.simul=env.here) pvalY[i] <- tYi$p.value } ## Find critical deviation if(!interpolate) { ## find critical rank 'l' rankY <- pvalY * (nsimsub + 1) twostage.rank <- orderstats(rankY, k=nrank) if(verbose) cat(paste0(testlabel, ".rank"), "=", twostage.rank, fill=TRUE) ## extract deviation values from top-level simulation simdev <- attr(tX, "statistics")[["sim"]] ## find critical deviation twostage.crit <- orderstats(simdev, decreasing=TRUE, k=twostage.rank) if(verbose) cat(paste0(testlabel, ".crit"), "=", twostage.crit, fill=TRUE) } else { ## compute estimated cdf of t fhat <- attr(tX, "density")[c("x", "y")] fhat$z <- with(fhat, cumsum(y)/sum(y)) # 'within' upsets package checker ## find critical (second stage) p-value pcrit <- orderstats(pvalY, k=nrank) ## compute corresponding upper quantile of estimated density of t twostage.crit <- with(fhat, { min(x[z >= 1 - pcrit]) }) } ## make fv object, for now refname <- if("theo" %in% names(envX)) "theo" else "mmean" fname <- attr(envX, "fname") result <- (as.fv(envX))[, c(fvnames(envX, ".x"), fvnames(envX, ".y"), refname)] refval <- envX[[refname]] ## newdata <- data.frame(hi=refval + twostage.crit, lo=refval - twostage.crit) newlabl <- c(makefvlabel(NULL, NULL, fname, "hi"), makefvlabel(NULL, NULL, fname, "lo")) alpha <- nrank/(nsim+1) alphatext <- paste0(100*alpha, "%%") newdesc <- c(paste("upper", alphatext, "critical boundary for %s"), paste("lower", alphatext, "critical boundary for %s")) switch(alternative, two.sided = { }, less = { newdata$hi <- Inf newlabl[1L] <- "infinity" newdesc[1L] <- "infinite upper limit" }, greater = { newdata$lo <- -Inf newlabl[2L] <- "infinity" newdesc[2L] <- "infinite lower limit" }) result <- bind.fv(result, newdata, newlabl, newdesc) fvnames(result, ".") <- rev(fvnames(result, ".")) fvnames(result, ".s") <- c("lo", "hi") if(savefuns || savepatterns) result <- hasenvelope(result, envX) return(result) } spatstat/R/blur.R0000644000176200001440000001116313616220203013402 0ustar liggesusers# # blur.R # # apply Gaussian blur to an image # # $Revision: 1.23 $ $Date: 2020/02/04 07:05:28 $ # fillNA <- function(x, value=0) { stopifnot(is.im(x)) v <- x$v v[is.na(v)] <- value x$v <- v return(x) } Smooth.im <- function(X, sigma=NULL, ..., kernel="gaussian", normalise=FALSE, bleed=TRUE, varcov=NULL) { blur(X, sigma=sigma, ..., kernel=kernel, normalise=normalise, bleed=bleed, varcov=varcov) } blur <- function(x, sigma=NULL, ..., kernel="gaussian", normalise=FALSE, bleed=TRUE, varcov=NULL) { stopifnot(is.im(x)) # determine smoothing kernel sigma.given <- !is.null(sigma) varcov.given <- !is.null(varcov) if (sigma.given) { stopifnot(is.numeric(sigma)) stopifnot(length(sigma) %in% c(1, 2)) stopifnot(all(sigma > 0)) } if (varcov.given) stopifnot(is.matrix(varcov) && nrow(varcov) == 2 && ncol(varcov) == 2) ngiven <- varcov.given + sigma.given switch(ngiven + 1L, { sigma <- (1/8) * min(diff(x$xrange), diff(x$yrange)) }, { if (sigma.given && length(sigma) == 2) varcov <- diag(sigma^2) if (!is.null(varcov)) sigma <- NULL }, { stop(paste("Give only one of the arguments", sQuote("sigma"), "and", sQuote("varcov"))) }) # replace NA's in image raster by zeroes X <- fillNA(x, 0) # convolve with Gaussian Y <- second.moment.calc(X, sigma=sigma, ..., kernel=kernel, varcov=varcov, what="smooth") # if no bleeding, we restrict data to the original boundary if(!bleed) Y$v[is.na(x$v)] <- NA # if(!normalise) return(Y) # normalisation: # convert original image to window (0/1 image) Xone <- x isna <- is.na(x$v) Xone$v[isna] <- 0 Xone$v[!isna] <- 1 # convolve with Gaussian Ydenom <- second.moment.calc(Xone, sigma=sigma, ..., kernel=kernel, varcov=varcov, what="smooth") # normalise Z <- eval.im(Y/Ydenom) return(Z) } safelookup <- function(Z, x, factor=2, warn=TRUE) { #' x is a ppp #' evaluates Z[x], replacing any NA's by blur(Z)[x] Zvals <- Z[x, drop=FALSE] isna <- is.na(Zvals) if(!any(isna)) return(Zvals) #' First pass - look up values at neighbouring pixels if valid Xbad <- x[isna] rc <- nearest.valid.pixel(Xbad$x, Xbad$y, Z) Nvals <- Z$v[cbind(rc$row, rc$col)] fixed <- !is.na(Nvals) Zvals[isna] <- Nvals if(all(fixed)) return(Zvals) isna[isna] <- !fixed #' Second pass Xbad <- x[isna] #' expand domain of Z RX <- as.rectangle(x) RZ <- as.rectangle(Z) bb <- boundingbox(RX, RZ) pixdiam <- sqrt(Z$xstep^2 + Z$ystep^2) big <- grow.rectangle(bb, 2 * pixdiam) Z <- rebound.im(Z, big) #' isfac <- (Z$type == "factor") if(!isfac) { #' Numerical extrapolation: blur by a few pixels Zblur <- blur(Z, factor * pixdiam, bleed=TRUE, normalise=TRUE) Bvals <- Zblur[Xbad, drop=FALSE] Zvals[isna] <- Bvals fixed <- !is.na(Bvals) if(warn && any(fixed)) warning(paste("Values for", sum(fixed), "points lying slightly outside the pixel image domain", "were estimated by convolution"), call.=FALSE) if(all(fixed)) return(Zvals) isna[isna] <- notfixed <- !fixed Xbad <- Xbad[notfixed] } #' Third pass #' last resort: project to nearest pixel at any distance W <- as.mask(Z) eW <- exactPdt(W) ## discretise points of Xbad Gbad <- nearest.raster.point(Xbad$x, Xbad$y, W) ijGbad <- cbind(Gbad$row, Gbad$col) ## find nearest pixels inside domain iclosest <- eW$row[ijGbad] jclosest <- eW$col[ijGbad] ## look up values of Z Cvals <- Z$v[cbind(iclosest, jclosest)] fixed <- !is.na(Cvals) Zvals[isna] <- Cvals if(warn && any(fixed)) warning(paste(if(isfac) "Categorical values" else "Values", "for", sum(fixed), "points lying", if(isfac) "outside" else "far outside", "the pixel image domain", "were estimated by projection to the nearest pixel"), call.=FALSE) if(!all(fixed)) stop(paste("Internal error:", sum(!fixed), "pixel values were NA, even after projection"), call.=FALSE) return(Zvals) } nearestValue <- function(X) { #' for each raster location, look up the nearest defined pixel value X <- as.im(X) if(!anyNA(X)) return(X) Y <- X ## copy dimensions, value type, units etc etc W <- as.mask(X) eW <- exactPdt(W) iclosest <- as.vector(eW$row) jclosest <- as.vector(eW$col) ## look up values of Z Y$v[] <- X$v[cbind(iclosest, jclosest)] return(Y) } spatstat/R/Fest.R0000644000176200001440000001427513556707650013371 0ustar liggesusers# # Fest.R # # Computes estimates of the empty space function # # $Revision: 4.47 $ $Date: 2019/11/01 01:32:28 $ # Fhazard <- function(X, ...) { Z <- Fest(X, ...) if(!any(names(Z) == "km")) stop("Kaplan-Meier estimator 'km' is required for hazard rate") conserve <- attr(Z, "conserve") ## strip off Poisson F Z <- Z[, (colnames(Z) != "theo")] ## relabel the fv object Z <- rebadge.fv(Z, new.ylab=quote(h(r)), new.fname="h", tags=c("hazard", "theohaz"), new.tags=c("hazard", "theo"), new.labl=c("hat(%s)[km](r)", "%s[pois](r)"), new.desc=c( "Kaplan-Meier estimate of %s", "theoretical Poisson %s"), new.dotnames=c("hazard", "theo"), new.preferred="hazard") ## strip off unwanted bits Z <- Z[, c("r", "hazard", "theo")] attr(Z, "conserve") <- conserve return(Z) } Fest <- function(X, ..., eps = NULL, r=NULL, breaks=NULL, correction=c("rs", "km", "cs"), domain=NULL) { verifyclass(X, "ppp") if(!is.null(domain)) stopifnot(is.subset.owin(domain, Window(X))) rorbgiven <- !is.null(r) || !is.null(breaks) checkspacing <- !isFALSE(list(...)$checkspacing) testme <- isTRUE(list(...)$testme) ## Intensity estimate W <- Window(X) npts <- npoints(X) lambda <- npts/area(W) ## Discretise window dwin <- as.mask(W, eps=eps) dX <- ppp(X$x, X$y, window=dwin, check=FALSE) ## Histogram breakpoints rmaxdefault <- rmax.rule("F", dwin, lambda) breaks <- handle.r.b.args(r, breaks, dwin, eps, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max if(testme || (rorbgiven && checkspacing)) check.finespacing(rvals, if(is.null(eps)) NULL else eps/4, dwin, rmaxdefault=rmaxdefault, action="fatal", rname="r", context="in Fest(X, r)") ## choose correction(s) if(is.null(correction)) { correction <- c("rs", "km", "cs") } else correction <- pickoption("correction", correction, c(none="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", cs="cs", ChiuStoyan="cs", Hanisch="cs", han="cs", best="km"), multi=TRUE) ## initialise fv object df <- data.frame(r=rvals, theo=1-exp(-lambda * pi * rvals^2)) Z <- fv(df, "r", substitute(F(r), NULL), "theo", . ~ r, c(0,rmax), c("r", "%s[pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="F") nr <- length(rvals) zeroes <- numeric(nr) ## compute distances and censoring distances if(X$window$type == "rectangle") { ## original data were in a rectangle ## output of exactdt() is sufficient e <- exactdt(dX) dist <- e$d bdry <- e$b if(!is.null(domain)) { ok <- inside.owin(raster.xy(e$w), , domain) dist <- dist[ok] bdry <- bdry[ok] } } else { ## window is irregular.. # Distance transform & boundary distance for all pixels e <- exactdt(dX) b <- bdist.pixels(dX$window, style="matrix") ## select only those pixels inside mask mm <- dwin$m if(!is.null(domain)) { ok <- inside.owin(raster.xy(e$w), , domain) mm <- as.vector(mm) & ok } dist <- e$d[mm] bdry <- b[mm] } ## censoring indicators d <- (dist <= bdry) ## observed distances o <- pmin.int(dist, bdry) ## start calculating estimates of F if("none" %in% correction) { ## UNCORRECTED e.d.f. of empty space distances if(npts == 0) edf <- zeroes else { hh <- hist(dist[dist <= rmax],breaks=breaks$val,plot=FALSE)$counts edf <- cumsum(hh)/length(dist) } Z <- bind.fv(Z, data.frame(raw=edf), "hat(%s)[raw](r)", "uncorrected estimate of %s", "raw") } if("cs" %in% correction) { ## Chiu-Stoyan correction if(npts == 0) cs <- zeroes else { ## uncensored distances x <- dist[d] ## weights a <- eroded.areas(W, rvals) ## calculate Hanisch estimator h <- hist(x[x <= rmax], breaks=breaks$val, plot=FALSE)$counts H <- cumsum(h/a) cs <- H/max(H[is.finite(H)]) } ## add to fv object Z <- bind.fv(Z, data.frame(cs=cs), "hat(%s)[cs](r)", "Chiu-Stoyan estimate of %s", "cs") } if(any(correction %in% c("rs", "km"))) { ## calculate Kaplan-Meier and/or border corrected (Reduced Sample) estimators want.rs <- "rs" %in% correction want.km <- "km" %in% correction selection <- c(want.rs, want.km, want.km, want.km) tags <- c("rs", "km", "hazard", "theohaz")[selection] labels <- c("hat(%s)[bord](r)", "hat(%s)[km](r)", "hat(h)[km](r)", "h[pois](r)")[selection] descr <- c("border corrected estimate of %s", "Kaplan-Meier estimate of %s", "Kaplan-Meier estimate of hazard function h(r)", "theoretical Poisson hazard h(r)")[selection] if(npts == 0) { result <- as.data.frame(matrix(0, nr, length(tags))) names(result) <- tags } else { result <- km.rs.opt(o, bdry, d, breaks, KM=want.km, RS=want.rs) result$theohaz <- 2 * pi * lambda * rvals result <- as.data.frame(result[tags]) } ## add to fv object Z <- bind.fv(Z, result, labels, descr, if(want.km) "km" else "rs") } ## wrap up unitname(Z) <- unitname(X) ## remove 'hazard' from the dotnames nama <- names(Z) fvnames(Z, ".") <- rev(setdiff(nama, c("r", "hazard", "theohaz"))) ## determine recommended plot range attr(Z, "alim") <- with(Z, range(.x[is.finite(.y) & .y <= 0.9])) ## arguments to be used in envelope, etc attr(Z, "conserve") <- list(checkspacing=FALSE) return(Z) } spatstat/R/exactMPLEstrauss.R0000644000176200001440000000400313333543255015653 0ustar liggesusers# # exactMPLEstrauss.R # # 'exact' MPLE for stationary Strauss process # # $Revision: 1.6 $ $Date: 2014/11/10 07:39:41 $ # exactMPLEstrauss <- local({ # main function exactMPLEstrauss <- function(X, R, ngrid=2048, plotit=FALSE, project=TRUE) { # n <- npoints(X) W <- as.owin(X) # border correction WminR <- erosion(W, R) bR <- (bdist.points(X) >= R) nR <- sum(bR) # evaluate neighbour counts for data points Tcounts <- crosspaircounts(X, X, R) - 1L sumT <- sum(Tcounts[bR]) # determine the coefficients a_k for k = 0, 1, ... Z <- scanmeasure(X, R, dimyx=ngrid) Z <- Z[WminR, drop=FALSE] kcounts <- tabulate(as.vector(Z$v) + 1L) pixarea <- with(Z, xstep * ystep) A <- kcounts * pixarea # find optimal log(gamma) op <- optim(log(0.5), lpl, sco, method="L-BFGS-B", control=list(fnscale=-1), lower=-Inf, upper=if(project) 0 else Inf, A=A, sumT=sumT, nR=nR) loggamma <- op$par # plot? if(plotit) { x <- seq(log(1e-4), if(project) 0 else log(1e4), length=512) plot(x, lpl(x, A, sumT, nR), type="l", xlab=expression(log(gamma)), ylab=expression(log(PL(gamma)))) abline(v=loggamma, lty=3) } # derive optimal beta kmax <-length(A) - 1L polypart <- A %*% exp(outer(0:kmax, loggamma)) beta <- nR/polypart logbeta <- log(beta) result <- c(logbeta, loggamma) names(result) <- c("(Intercept)", "Interaction") return(result) } # helper functions (vectorised) # log pseudolikelihood lpl <- function(theta, A=A, sumT=sumT, nR=nR) { kmax <-length(A) - 1L polypart <- A %*% exp(outer(0:kmax, theta)) nR * (log(nR) - log(polypart) - 1) + theta * sumT } # pseudoscore sco <- function(theta, A=A, sumT=sumT, nR=nR) { kmax <- length(A) - 1L kseq <- 0:kmax mat <- exp(outer(kseq, theta)) polypart <- A %*% mat Dpolypart <- (A * kseq) %*% mat sumT - nR * Dpolypart/polypart } exactMPLEstrauss }) spatstat/R/strausshard.R0000644000176200001440000001611213333543255015013 0ustar liggesusers# # # strausshard.S # # $Revision: 2.37 $ $Date: 2018/05/02 09:38:36 $ # # The Strauss/hard core process # # StraussHard() create an instance of the Strauss-hardcore process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # StraussHard <- local({ BlankStraussHard <- list( name = "Strauss - hard core process", creator = "StraussHard", family = "pairwise.family", # evaluated later pot = function(d, par) { v <- (d <= par$r) v[ d <= par$hc ] <- (-Inf) v }, par = list(r = NULL, hc = NULL), # filled in later parnames = c("interaction distance", "hard core distance"), hasInf = TRUE, selfstart = function(X, self) { # self starter for StraussHard nX <- npoints(X) if(nX < 2) { # not enough points to make any decisions return(self) } r <- self$par$r md <- minnndist(X) if(md == 0) { warning(paste("Pattern contains duplicated points:", "hard core must be zero")) return(StraussHard(r=r, hc=0)) } if(!is.na(hc <- self$par$hc)) { # value fixed by user or previous invocation # check it if(md < hc) warning(paste("Hard core distance is too large;", "some data points will have zero probability")) return(self) } # take hc = minimum interpoint distance * n/(n+1) hcX <- md * nX/(nX+1) StraussHard(r=r, hc = hcX) }, init = function(self) { r <- self$par$r hc <- self$par$hc if(length(hc) != 1) stop("hard core distance must be a single value") if(!is.na(hc)) { if(!is.numeric(hc) || hc <= 0) stop("hard core distance hc must be a positive number, or NA") if(!is.numeric(r) || length(r) != 1 || r <= hc) stop("interaction distance r must be a number greater than hc") } }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) return(is.finite(loggamma)) }, project = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) if(is.finite(loggamma)) return(NULL) hc <- self$par$hc if(hc > 0) return(Hardcore(hc)) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r hc <- self$par$hc if(anyNA(coeffs)) return(r) loggamma <- coeffs[1] if(abs(loggamma) <= epsilon) return(hc) else return(r) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, splitInf=FALSE, ...) { #' fast evaluator for StraussHard interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for StraussHard") r <- potpars$r hc <- potpars$hc hclose <- (strausscounts(U, X, hc, EqualPairs) != 0) rclose <- strausscounts(U, X, r, EqualPairs) if(!splitInf) { answer <- ifelseAX(hclose, -Inf, rclose) answer <- matrix(answer, ncol=1) } else { answer <- ifelseAX(hclose, 0, rclose) answer <- matrix(answer, ncol=1) attr(answer, "-Inf") <- hclose } return(answer) }, Mayer=function(coeffs, self) { # second Mayer cluster integral gamma <- exp(as.numeric(coeffs[1])) r <- self$par$r hc <- self$par$hc return(pi * (hc^2 + (1-gamma) * (r^2 - hc^2))) }, delta2 = function(X, inte, correction, ..., sparseOK=FALSE) { r <- inte$par$r hc <- inte$par$hc #' positive part U <- as.ppp(X) nU <- npoints(U) cl <- weightedclosepairs(U, r, correction=correction, what="indices") if(is.null(cl)) # can't handle edge correction return(NULL) v <- sparseMatrix(i=cl$i, j=cl$j, x=cl$weight, dims=c(nU, nU)) #' hard core part hcl <- closepairs(U, hc, what="indices") ihit <- hcl$i jhit <- hcl$j vh <- NULL if(is.ppp(X)) { #' count conflicts between data points nhit <- as.integer(table(factor(jhit, levels=seq_len(nU)))) #' for a conflicting pair X[i], X[j], #' status of X[j] will change when X[i] is deleted #' iff X[j] is only in conflict with X[i] changes <- (nhit == 1) if(any(changes)) { changesJ <- changes[jhit] vh <- sparseMatrix(i=ihit[changesJ], j=jhit[changesJ], x=TRUE, dims=c(nU, nU)) } } else if(is.quad(X)) { #' count conflicts with existing data points izdat <- is.data(X) hitdata <- izdat[ihit] nhitdata <- as.integer(table(factor(jhit[hitdata], levels=seq_len(nU)))) #' for a conflicting pair U[i], U[j], #' status of U[j] will change when U[i] is added/deleted #' iff EITHER #' U[i] = X[i] is a data point and #' U[j] is only in conflict with X[i], #' OR #' U[i] is a dummy point, #' U[j] has no conflicts with X. changesJ <- (hitdata & (nhitdata[jhit] == 1)) | (!hitdata & (nhitdata[jhit] == 0)) if(any(changesJ)) vh <- sparseMatrix(i=ihit[changesJ], j=jhit[changesJ], x=TRUE, dims=c(nU, nU)) } else stop("X should be a ppp or quad object") # pack up if(!sparseOK) { v <- as.matrix(v) if(!is.null(vh)) vh <- as.matrix(vh) } attr(v, "deltaInf") <- vh return(v) } ) class(BlankStraussHard) <- "interact" StraussHard <- function(r, hc=NA) { instantiate.interact(BlankStraussHard, list(r=r, hc=hc)) } StraussHard <- intermaker(StraussHard, BlankStraussHard) StraussHard }) spatstat/R/boundingbox.R0000644000176200001440000001462413333543254014773 0ustar liggesusers## ## boundingbox.R ## ## $Revision: 1.11 $ $Date: 2017/12/30 05:04:44 $ # bounding.box <- function(...) { # .Deprecated("boundingbox", "spatstat") # boundingbox(...) # } boundingbox <- function(...) { ## remove any NULL arguments arglist <- list(...) if(any(isnull <- sapply(arglist, is.null))) { if(length(arglist[!isnull])) return(do.call(boundingbox, arglist[!isnull])) stop("No non-null arguments given.\n") } UseMethod("boundingbox") } boundingbox.solist <- function(...) { argh <- list(...) issl <- sapply(argh, inherits, what="solist") yarg <- c(do.call(c, argh[issl]), argh[!issl]) do.call(bbEngine, yarg) } boundingbox.ppp <- boundingbox.psp <- boundingbox.owin <- boundingbox.list <- boundingbox.linnet <- boundingbox.lpp <- boundingbox.im <- function(...) { bbEngine(...) } recognise.spatstat.type <- local({ knowntypes <- c("ppp","psp","owin","im", "lpp", "linnet") function(x) { for(kt in knowntypes) if(inherits(x, kt)) return(kt) if(is.list(x) && checkfields(x, c("x", "y")) && is.numeric(x$x) && is.numeric(x$y) && is.vector(x$x) && is.vector(x$y) && length(x$x) == length(x$y)) return("listxy") aso <- try(as.owin(x), silent=TRUE) if(!inherits(aso, "try-error")) return("as.owin") return("unknown") } }) bbEngine <- local({ bb.listxy <- function(X) owin(range(X$x), range(X$y)) bb.linnet <- function(X) boundingbox(vertices(X)) bb.lpp <- function(X) boundingbox(as.ppp(X)) bbEngine <- function(...) { wins <- list(...) ## first detect any numeric vector arguments if(any(isnumvec <- unlist(lapply(wins, is.vector)) & unlist(lapply(wins, is.numeric)))) { ## invoke default method on these arguments bb <- do.call(boundingbox, wins[isnumvec]) ## repack wins <- append(wins[!isnumvec], list(bb)) } if(length(wins) > 1) { ## multiple arguments -- compute bounding box for each argument. objtype <- unlist(lapply(wins, recognise.spatstat.type)) nbad <- sum(objtype == "unknown") if(nbad > 0) { whinge <- paste("Function boundingbox called with", nbad,"unrecognised", ngettext(nbad,"argument","arguments")) stop(whinge, call.=FALSE) } if(any(isppp <- (objtype == "ppp"))) wins[isppp] <- lapply(wins[isppp], boundingbox) if(any(islistxy <- (objtype == "listxy"))) wins[islistxy] <- lapply(wins[islistxy], bb.listxy) if(any(isnet <- (objtype == "linnet"))) wins[isnet] <- lapply(wins[isnet], bb.linnet) if(any(islpp <- (objtype == "lpp"))) wins[islpp] <- lapply(wins[islpp], bb.lpp) ## then convert all windows to owin wins <- lapply(wins, as.owin) ## then take bounding box of each window boxes <- lapply(wins, boundingbox) ## discard NULL values isnull <- unlist(lapply(boxes, is.null)) boxes <- boxes[!isnull] ## take bounding box of these boxes xrange <- range(unlist(lapply(boxes, getElement, name="xrange"))) yrange <- range(unlist(lapply(boxes, getElement, name="yrange"))) W <- owin(xrange, yrange) ## If all of the windows have a common unit name, give ## that unit name to the bounding box. youse <- unique(t(sapply(boxes,unitname))) if(nrow(youse)==1) { ute <- unlist(youse[1L,]) unitname(W) <- ute } return(W) } ## single argument w <- wins[[1L]] if(is.null(w)) return(NULL) wtype <- recognise.spatstat.type(w) ## point pattern? if(wtype == "ppp") return(boundingbox(coords(w))) ## line segment pattern? if(wtype == "psp") return(boundingbox(endpoints.psp(w))) ## list(x,y) if(wtype == "listxy") return(bb.listxy(w)) if(wtype == "linnet") w <- return(bb.linnet(w)) if(wtype == "lpp") w <- return(bb.lpp(w)) ## convert to window w <- as.owin(w) ## determine a tight bounding box for the window w switch(w$type, rectangle = { return(w) }, polygonal = { bdry <- w$bdry if(length(bdry) == 0) return(NULL) xr <- range(unlist(lapply(bdry, rangeofx))) yr <- range(unlist(lapply(bdry, rangeofy))) return(owin(xr, yr, unitname=unitname(w))) }, mask = { m <- w$m x <- rasterx.mask(w) y <- rastery.mask(w) xr <- range(x[m]) + c(-1,1) * w$xstep/2 yr <- range(y[m]) + c(-1,1) * w$ystep/2 return(owin(xr, yr, unitname=unitname(w))) }, stop("unrecognised window type", w$type) ) } rangeofx <- function(a) range(a$x) rangeofy <- function(a) range(a$y) bbEngine }) boundingbox.default <- local({ bb.listxy <- function(X) owin(range(X$x), range(X$y)) boundingbox.default <- function(...) { arglist <- list(...) bb <- NULL if(length(arglist) == 0) return(bb) ## handle numeric vector arguments if(any(isnumvec <- unlist(lapply(arglist, is.vector)) & unlist(lapply(arglist, is.numeric)))) { nvec <- sum(isnumvec) if(nvec != 2) stop(paste("boundingbox.default expects 2 numeric vectors:", nvec, "were supplied"), call.=FALSE) vecs <- arglist[isnumvec] x <- vecs[[1L]] y <- vecs[[2L]] bb <- if(length(x) == length(y)) owin(range(x), range(y)) else NULL arglist <- arglist[!isnumvec] } if(length(arglist) == 0) return(bb) ## other objects are present objtype <- unlist(lapply(arglist, recognise.spatstat.type)) ## Unrecognised? nbad <- sum(objtype == "unknown") if(nbad > 0) { whinge <- paste("Function boundingbox called with", nbad,"unrecognised", ngettext(nbad,"argument","arguments")) stop(whinge, call.=FALSE) } if(any(aso <- (objtype == "as.owin"))) { ## promote objects to owin (to avoid infinite recursion!) arglist[aso] <- lapply(arglist[aso], as.owin) } if(any(lxy <- (objtype == "listxy"))) { ## handle list(x,y) objects arglist[lxy] <- lapply(arglist[lxy], bb.listxy) } result <- do.call(boundingbox, if(is.null(bb)) arglist else append(list(bb), arglist)) return(result) } boundingbox.default }) spatstat/R/eval.fv.R0000644000176200001440000002361013573571003014010 0ustar liggesusers# # eval.fv.R # # # eval.fv() Evaluate expressions involving fv objects # # compatible.fv() Check whether two fv objects are compatible # # $Revision: 1.40 $ $Date: 2019/12/10 00:20:22 $ # eval.fv <- local({ # main function eval.fv <- function(expr, envir, dotonly=TRUE, equiv=NULL, relabel=TRUE) { # convert syntactic expression to 'expression' object e <- as.expression(substitute(expr)) # convert syntactic expression to call elang <- substitute(expr) # find names of all variables in the expression varnames <- all.vars(e) if(length(varnames) == 0) stop("No variables in this expression") # get the actual variables if(missing(envir)) { envir <- parent.frame() } else if(is.list(envir)) { envir <- list2env(envir, parent=parent.frame()) } vars <- lapply(as.list(varnames), get, envir=envir) names(vars) <- varnames # find out which ones are fv objects fvs <- unlist(lapply(vars, is.fv)) nfuns <- sum(fvs) if(nfuns == 0) stop("No fv objects in this expression") # extract them funs <- vars[fvs] # restrict to columns identified by 'dotnames' if(dotonly) funs <- lapply(funs, restrict.to.dot) # map names if instructed if(!is.null(equiv)) funs <- lapply(funs, mapnames, map=equiv) # test whether the fv objects are compatible if(nfuns > 1L && !(do.call(compatible, unname(funs)))) { warning(paste(if(nfuns > 2) "some of" else NULL, "the functions", commasep(sQuote(names(funs))), "were not compatible: enforcing compatibility")) funs <- do.call(harmonise, append(funs, list(strict=TRUE))) } # copy first object as template result <- funs[[1L]] ## ensure 'conservation' info is retained conserve <- unname(lapply(funs, attr, which="conserve")) if(any(present <- !sapply(conserve, is.null))) { conserve <- do.call(resolve.defaults, conserve[present]) attr(result, "conserve") <- conserve } ## remove potential ratio info class(result) <- setdiff(class(result), "rat") attr(result, "numerator") <- attr(result, "denominator") <- NULL labl <- attr(result, "labl") origdotnames <- fvnames(result, ".") origshadenames <- fvnames(result, ".s") # determine which function estimates are supplied argname <- fvnames(result, ".x") nam <- names(result) ynames <- nam[nam != argname] # for each function estimate, evaluate expression for(yn in ynames) { # extract corresponding estimates from each fv object funvalues <- lapply(funs, "[[", i=yn) # insert into list of argument values vars[fvs] <- funvalues # evaluate result[[yn]] <- eval(e, vars, enclos=envir) } if(!relabel) return(result) # determine mathematical labels. # 'yexp' determines y axis label # 'ylab' determines y label in printing and description # 'fname' is sprintf-ed into 'labl' for legend yexps <- lapply(funs, attr, which="yexp") ylabs <- lapply(funs, attr, which="ylab") fnames <- lapply(funs, getfname) # Repair 'fname' attributes if blank blank <- unlist(lapply(fnames, isblank)) if(any(blank)) { # Set function names to be object names as used in the expression for(i in which(blank)) attr(funs[[i]], "fname") <- fnames[[i]] <- names(funs)[i] } # Remove duplicated names # Typically occurs when combining several K functions, etc. # Tweak fv objects so their function names are their object names # as used in the expression if(anyDuplicated(fnames)) { newfnames <- names(funs) for(i in 1:nfuns) funs[[i]] <- rebadge.fv(funs[[i]], new.fname=newfnames[i]) fnames <- newfnames } if(anyDuplicated(ylabs)) { flatnames <- lapply(funs, flatfname) for(i in 1:nfuns) { new.ylab <- substitute(f(r), list(f=flatnames[[i]])) funs[[i]] <- rebadge.fv(funs[[i]], new.ylab=new.ylab) } ylabs <- lapply(funs, attr, which="ylab") } if(anyDuplicated(yexps)) { newfnames <- names(funs) for(i in 1:nfuns) { new.yexp <- substitute(f(r), list(f=as.name(newfnames[i]))) funs[[i]] <- rebadge.fv(funs[[i]], new.yexp=new.yexp) } yexps <- lapply(funs, attr, which="yexp") } # now compute y axis labels for the result attr(result, "yexp") <- eval(substitute(substitute(e, yexps), list(e=elang))) attr(result, "ylab") <- eval(substitute(substitute(e, ylabs), list(e=elang))) # compute fname equivalent to expression if(nfuns > 1L) { # take original expression the.fname <- paren(flatten(deparse(elang))) } else if(nzchar(oldname <- flatfname(funs[[1L]]))) { # replace object name in expression by its function name namemap <- list(as.name(oldname)) names(namemap) <- names(funs)[1L] the.fname <- deparse(eval(substitute(substitute(e, namemap), list(e=elang)))) } else the.fname <- names(funs)[1L] attr(result, "fname") <- the.fname # now compute the [modified] y labels labelmaps <- lapply(funs, fvlabelmap, dot=FALSE) for(yn in ynames) { # labels for corresponding columns of each argument funlabels <- lapply(labelmaps, "[[", i=yn) # form expression involving these columns labl[match(yn, names(result))] <- flatten(deparse(eval(substitute(substitute(e, f), list(e=elang, f=funlabels))))) } attr(result, "labl") <- labl # copy dotnames and shade names from template fvnames(result, ".") <- origdotnames[origdotnames %in% names(result)] if(!is.null(origshadenames) && all(origshadenames %in% names(result))) fvnames(result, ".s") <- origshadenames return(result) } # helper functions restrict.to.dot <- function(z) { argu <- fvnames(z, ".x") dotn <- fvnames(z, ".") shadn <- fvnames(z, ".s") ok <- colnames(z) %in% unique(c(argu, dotn, shadn)) return(z[, ok]) } getfname <- function(x) { if(!is.null(y <- attr(x, "fname"))) y else "" } flatten <- function(x) { paste(x, collapse=" ") } mapnames <- function(x, map=NULL) { colnames(x) <- mapstrings(colnames(x), map=map) fvnames(x, ".y") <- mapstrings(fvnames(x, ".y"), map=map) return(x) } isblank <- function(z) { !any(nzchar(z)) } eval.fv }) compatible <- function(A, B, ...) { UseMethod("compatible") } compatible.fv <- local({ approx.equal <- function(x, y) { max(abs(x-y)) <= .Machine$double.eps } compatible.fv <- function(A, B, ..., samenames=TRUE) { verifyclass(A, "fv") if(missing(B)) { answer <- if(length(...) == 0) TRUE else compatible(A, ...) return(answer) } verifyclass(B, "fv") ## is the function argument the same? samearg <- (fvnames(A, ".x") == fvnames(B, ".x")) if(!samearg) return(FALSE) if(samenames) { ## are all columns the same, and in the same order? namesmatch <- isTRUE(all.equal(names(A),names(B))) && samearg && (fvnames(A, ".y") == fvnames(B, ".y")) if(!namesmatch) return(FALSE) } ## are 'r' values the same ? rA <- with(A, .x) rB <- with(B, .x) rmatch <- (length(rA) == length(rB)) && approx.equal(rA, rB) if(!rmatch) return(FALSE) ## A and B are compatible if(length(list(...)) == 0) return(TRUE) ## recursion return(compatible.fv(B, ...)) } compatible.fv }) # force a list of images to be compatible with regard to 'x' values harmonize <- harmonise <- function(...) { UseMethod("harmonise") } harmonize.fv <- harmonise.fv <- local({ harmonise.fv <- function(..., strict=FALSE) { argh <- list(...) n <- length(argh) if(n == 0) return(argh) if(n == 1) { a1 <- argh[[1L]] if(is.fv(a1)) return(argh) if(is.list(a1) && all(sapply(a1, is.fv))) { argh <- a1 n <- length(argh) } } isfv <- sapply(argh, is.fv) if(!all(isfv)) stop("All arguments must be fv objects") if(n == 1) return(argh[[1L]]) ## determine range of argument ranges <- lapply(argh, argumentrange) xrange <- c(max(unlist(lapply(ranges, min))), min(unlist(lapply(ranges, max)))) if(diff(xrange) < 0) stop("No overlap in ranges of argument") if(strict) { ## find common column names and keep these keepnames <- Reduce(intersect, lapply(argh, colnames)) argh <- lapply(argh, "[", j=keepnames) } ## determine finest resolution xsteps <- sapply(argh, argumentstep) finest <- which.min(xsteps) ## extract argument values xx <- with(argh[[finest]], .x) xx <- xx[xrange[1L] <= xx & xx <= xrange[2L]] xrange <- range(xx) ## convert each fv object to a function funs <- lapply(argh, as.function, value="*") ## evaluate at common argument result <- vector(mode="list", length=n) for(i in 1:n) { ai <- argh[[i]] fi <- funs[[i]] xxval <- list(xx=xx) names(xxval) <- fvnames(ai, ".x") starnames <- fvnames(ai, "*") ## ensure they are given in same order as current columns starnames <- colnames(ai)[colnames(ai) %in% starnames] yyval <- lapply(starnames, function(v,xx,fi) fi(xx, v), xx=xx, fi=fi) names(yyval) <- starnames ri <- do.call(data.frame, append(xxval, yyval)) fva <- .Spatstat.FvAttrib attributes(ri)[fva] <- attributes(ai)[fva] class(ri) <- c("fv", class(ri)) attr(ri, "alim") <- intersect.ranges(attr(ai, "alim"), xrange) result[[i]] <- ri } names(result) <- names(argh) return(result) } argumentrange <- function(f) { range(with(f, .x)) } argumentstep <- function(f) { mean(diff(with(f, .x))) } harmonise.fv }) spatstat/R/discarea.R0000644000176200001440000000557413433151224014225 0ustar liggesusers# # discarea.R # # $Revision: 1.21 $ $Date: 2019/02/20 03:34:50 $ # # # Compute area of intersection between a disc and a window, # discpartarea <- function(X, r, W=as.owin(X)) { if(!missing(W)) { verifyclass(W, "owin") if(!inherits(X, "ppp")) X <- as.ppp(X, W) } verifyclass(X, "ppp") n <- X$n if(is.matrix(r) && nrow(r) != n) stop("the number of rows of r should match the number of points in X") if(!is.matrix(r)) { nr <- length(r) r <- matrix(r, nrow=n, ncol=nr, byrow=TRUE) } else { nr <- ncol(r) } W <- as.polygonal(W) # convert polygon to line segments Y <- edges(W) # remove vertical segments (contribution is zero) vert <- (Y$ends$x1 == Y$ends$x0) Y <- Y[!vert] # go z <- .C("discareapoly", nc=as.integer(n), xc=as.double(X$x), yc=as.double(X$y), nr=as.integer(nr), rmat=as.double(r), nseg=as.integer(Y$n), x0=as.double(Y$ends$x0), y0=as.double(Y$ends$y0), x1=as.double(Y$ends$x1), y1=as.double(Y$ends$y1), eps=as.double(.Machine$double.eps), out=as.double(numeric(length(r))), PACKAGE = "spatstat") areas <- matrix(z$out, n, nr) return(areas) } # Compute area of dilation of point pattern # using Dirichlet tessellation or distmap # (areas of other dilations using distmap) dilated.areas <- function(X, r, W=as.owin(X), ..., constrained=TRUE, exact=FALSE) { if(is.matrix(r)) { if(sum(dim(r) > 1) > 1L) stop("r should be a vector or single value") r <- as.vector(r) } if(exact && !is.ppp(X)) { exact <- FALSE warning("Option exact=TRUE is only available for ppp objects") } if(!constrained || is.null(W)) { # unconstrained dilation bb <- as.rectangle(X) W <- grow.rectangle(bb, max(r)) if(is.owin(X)) X <- rebound.owin(X, W) else X$window <- W } else W <- as.owin(W) if(!exact) { D <- distmap(X, ...) pixelarea <- D$xstep * D$ystep Dvals <- D[W, drop=TRUE] if(is.im(Dvals)) Dvals <- as.vector(as.matrix(Dvals)) Dvals <- Dvals[!is.na(Dvals)] rr <- c(-1, r) h <- cumsum(whist(Dvals, rr)) return(h * pixelarea) } npts <- npoints(X) nr <- length(r) if(npts == 0) return(numeric(nr)) else if(npts == 1L) return(discpartarea(X, r, W)) samebox <- (W$type == "rectangle") && isTRUE(all.equal(W, as.owin(X))) needclip <- constrained && !samebox X <- unique(X) dd <- dirichlet(X) til <- tiles(dd) #' some data points may not have a tile whichpoint <- as.integer(names(til)) partareas <- matrix(0, length(til), nr) for(j in seq_along(til)) { Tj <- til[[j]] if(needclip) Tj <- intersect.owin(Tj, W) i <- whichpoint[j] partareas[j,] <- discpartarea(X[i], r, Tj) } return(colSums(partareas)) } spatstat/R/interactions.R0000644000176200001440000002112413333543255015151 0ustar liggesusers# # interactions.R # # Works out which interaction is in force for a given point pattern # # $Revision: 1.25 $ $Date: 2016/04/25 02:34:40 $ # # impliedpresence <- function(tags, formula, df, extranames=character(0)) { # Determines, for each row of the data frame df, # whether the variable called tags[j] is required in the formula stopifnot(is.data.frame(df)) stopifnot(inherits(formula, "formula")) stopifnot(is.character(tags)) stopifnot(is.character(extranames)) # allvars <- variablesinformula(formula) if(any(tags %in% names(df))) stop(paste(sQuote("tags"), "conflicts with the name of a column of", sQuote("df"))) if(any(extranames %in% names(df))) stop(paste(sQuote("extranames"), "conflicts with the name of a column of", sQuote("df"))) # answer is a matrix nvars <- length(tags) nrows <- nrow(df) answer <- matrix(TRUE, nrows, nvars) # expand data frame with zeroes for each tags and extranames for(v in unique(c(tags, extranames))) df[ , v] <- 0 # loop for(i in seq(nrow(df))) { # make a fake data frame for the formula # using the data frame entries from row i # (includes 0 values for all other variables) pseudat <- df[i, , drop=FALSE] # use this to construct a fake model matrix mof0 <- model.frame(formula, pseudat) mom0 <- model.matrix(formula, mof0) for(j in seq(nvars)) { # Reset the variable called tags[j] to 1 pseudatj <- pseudat pseudatj[ , tags[j]] <- 1 # Now create the fake model matrix mofj <- model.frame(formula, pseudatj) momj <- model.matrix(formula, mofj) # Compare the two matrices answer[i,j] <- any(momj != mom0) } } return(answer) } active.interactions <- function(object) { stopifnot(inherits(object, "mppm")) interaction <- object$Inter$interaction iformula <- object$iformula nenv <- new.env() environment(iformula) <- nenv #%^!ifdef RANDOMEFFECTS random <- object$random if(!is.null(random)) environment(random) <- nenv #%^!endif itags <- object$Inter$itags # The following are currently unused # ninter <- object$Inter$ninter # iused <- object$Inter$iused # trivial <- object$Inter$trivial # names of variables dat <- object$data datanames <- names(dat) dfnames <- summary(dat)$dfnames nondfnames <- datanames[!(datanames %in% dfnames)] nondfnames <- union(nondfnames, c("x", "y")) # extract data-frame values dfdata <- as.data.frame(dat[, dfnames, drop=FALSE], warn=FALSE) # determine which interaction(s) are in force answer <- impliedpresence(itags, iformula, dfdata, nondfnames) #%^!ifdef RANDOMEFFECTS if(!is.null(random)) { if("|" %in% all.names(random)) { ## hack since model.matrix doesn't handle "|" as desired rnd <- gsub("|", "/", pasteFormula(random), fixed=TRUE) random <- as.formula(rnd, env=environment(random)) } answer2 <- impliedpresence(itags, random, dfdata, nondfnames) answer <- answer | answer2 } #%^!endif colnames(answer) <- names(interaction) return(answer) } impliedcoefficients <- function(object, tag) { stopifnot(inherits(object, "mppm")) stopifnot(is.character(tag) && length(tag) == 1) fitobj <- object$Fit$FIT Vnamelist <- object$Fit$Vnamelist has.random <- object$Info$has.random # Not currently used: # fitter <- object$Fit$fitter # interaction <- object$Inter$interaction # ninteract <- object$Inter$ninteract # trivial <- object$Inter$trivial # iused <- object$Inter$iused itags <- object$Inter$itags if(!(tag %in% itags)) stop(paste("Argument", dQuote("tag"), "is not one of the interaction names")) # (0) Set up # Identify the columns of the glm data frame # that are associated with this interpoint interaction vnames <- Vnamelist[[tag]] if(!is.character(vnames)) stop("Internal error - wrong format for vnames") # Check atomic type of each covariate Moadf <- as.list(object$Fit$moadf) islog <- sapply(Moadf, is.logical) isnum <- sapply(Moadf, is.numeric) isfac <- sapply(Moadf, is.factor) # Interaction variables must be numeric or logical if(any(bad <- !(isnum | islog)[vnames])) stop(paste("Internal error: the", ngettext(sum(bad), "variable", "variables"), commasep(sQuote(vnames[bad])), "should be numeric or logical"), call.=FALSE) # The answer is a matrix of coefficients, # with one row for each point pattern, # and one column for each vname answer <- matrix(, nrow=object$npat, ncol=length(vnames)) colnames(answer) <- vnames # (1) make a data frame of covariates # Names of all columns in glm data frame allnames <- names(Moadf) # Extract the design covariates df <- as.data.frame(object$data, warn=FALSE) # Names of all covariates other than design covariates othernames <- allnames[!(allnames %in% names(df))] # Add columns in which all other covariates are set to 0, FALSE, etc for(v in othernames) { df[, v] <- if(isnum[[v]]) 0 else if(islog[[v]]) FALSE else if(isfac[[v]]) { lev <- levels(Moadf[[v]]) factor(lev[1], levels=lev) } else sort(unique(Moadf[[v]]))[1] } # (2) evaluate linear predictor Coefs <- if(!has.random) coef(fitobj) else fixef(fitobj) opt <- options(warn= -1) # eta0 <- predict(fitobj, newdata=df, type="link") eta0 <- GLMpredict(fitobj, data=df, coefs=Coefs, changecoef=TRUE, type="link") options(opt) # (3) for each vname in turn, # set the value of the vname to 1 and predict again for(j in seq_along(vnames)) { vnj <- vnames[j] df[[vnj]] <- 1 opt <- options(warn= -1) # etaj <- predict(fitobj, newdata=df, type="link") etaj <- GLMpredict(fitobj, data=df, coefs=Coefs, changecoef=TRUE, type="link") options(opt) answer[ ,j] <- etaj - eta0 # set the value of this vname back to 0 df[[vnj]] <- 0 } return(answer) } illegal.iformula <- local({ illegal.iformula <- function(ifmla, itags, dfvarnames) { ## THIS IS TOO STRINGENT! ## Check validity of the interaction formula. ## ifmla is the formula. ## itags is the character vector of interaction names. ## Check whether the occurrences of `itags' in `iformula' are valid: ## e.g. no functions applied to `itags[i]'. ## Returns NULL if legal, otherwise a character string stopifnot(inherits(ifmla, "formula")) stopifnot(is.character(itags)) ## formula must not have a LHS if(length(ifmla) > 2) return("iformula must not have a left-hand side") ## variables in formula must be interaction tags or data frame variables varsinf <- variablesinformula(ifmla) if(!all(ok <- varsinf %in% c(itags, dfvarnames))) return(paste( ngettext(sum(!ok), "variable", "variables"), paste(dQuote(varsinf[!ok]), collapse=", "), "not allowed in iformula")) ## if formula uses no interaction tags, it's trivial if(!any(itags %in% variablesinformula(ifmla))) return(NULL) ## create terms object tt <- attributes(terms(ifmla)) ## extract all variables appearing in the formula vars <- as.list(tt$variables)[-1] ## nvars <- length(vars) varexprs <- lapply(vars, as.expression) varstrings <- sapply(varexprs, paste) ## Each variable may be a name or an expression v.is.name <- sapply(vars, is.name) ## a term may be an expression like sin(x), poly(x,y,degree=2) v.args <- lapply(varexprs, all.vars) ## v.n.args <- sapply(v.args, length) v.has.itag <- sapply(lapply(v.args, "%in%", x=itags), any) ## interaction tags may only appear as names, not in functions if(any(nbg <- v.has.itag & !v.is.name)) return(paste("interaction tags may not appear inside a function:", paste(dQuote(varstrings[nbg]), collapse=", "))) ## Interaction between two itags is not defined ## Inspect the higher-order terms fax <- tt$factors if(prod(dim(fax)) == 0) return(NULL) ## rows are first order terms, columns are terms of order >= 1 fvars <- rownames(fax) fterms <- colnames(fax) fv.args <- lapply(fvars, variablesintext) ft.args <- lapply(fterms, variables.in.term, factors=fax, varnamelist=fv.args) ft.itags <- lapply(ft.args, intersect, y=itags) if(any(lengths(ft.itags) > 1)) return("Interaction between itags is not defined") return(NULL) } variables.in.term <- function(term, factors, varnamelist) { basis <- (factors[, term] != 0) unlist(varnamelist[basis]) } illegal.iformula }) spatstat/R/plot.ppp.R0000644000176200001440000004461713353334550014235 0ustar liggesusers# # plot.ppp.R # # $Revision: 1.95 $ $Date: 2018/03/22 00:46:59 $ # # #-------------------------------------------------------------------------- plot.ppp <- local({ transparencyfun <- function(n) { if(n <= 100) 1 else (0.2 + 0.8 * exp(-(n-100)/1000)) } ## determine symbol map for marks of points default.symap.points <- function(x, ..., chars=NULL, cols=NULL, maxsize=NULL, meansize=NULL, markscale=NULL, markrange=NULL, marklevels=NULL) { marx <- marks(x) if(is.null(marx)) { ## null or constant symbol map ## consider using transparent colours if(is.null(cols) && !any(c("col", "fg", "bg") %in% names(list(...))) && (nx <- npoints(x)) > 100 && identical(dev.capabilities()$semiTransparency, TRUE) && spatstat.options("transparent")) cols <- rgb(0,0,0,transparencyfun(nx)) return(symbolmap(..., chars=chars, cols=cols)) } if(!is.null(dim(marx))) stop("Internal error: multivariate marks in default.symap.points") argnames <- names(list(...)) shapegiven <- "shape" %in% argnames chargiven <- (!is.null(chars)) || ("pch" %in% argnames) assumecircles <- !(shapegiven || chargiven) sizegiven <- ("size" %in% argnames) || (("cex" %in% argnames) && !shapegiven) if(inherits(marx, c("Date", "POSIXt"))) { ## ......... marks are dates or date/times ..................... timerange <- range(marx, na.rm=TRUE) shapedefault <- if(!assumecircles) list() else list(shape="circles") if(sizegiven) { g <- do.call(symbolmap, resolve.defaults(list(range=timerange), list(...), shapedefault, list(chars=chars, cols=cols))) return(g) } ## attempt to determine a scale for the marks y <- scaletointerval(marx, 0, 1, timerange) y <- y[is.finite(y)] if(length(y) == 0) return(symbolmap(..., chars=chars, cols=cols)) scal <- mark.scale.default(y, as.owin(x), markscale=markscale, maxsize=maxsize, meansize=meansize, characters=chargiven) if(is.na(scal)) return(symbolmap(..., chars=chars, cols=cols)) ## scale determined sizefun <- function(x, scal=1) { (scal/2) * scaletointerval(x, 0, 1, timerange) } formals(sizefun)[[2]] <- scal ## ensures value of 'scal' is printed ## g <- do.call(symbolmap, resolve.defaults(list(range=timerange), list(...), shapedefault, list(size=sizefun))) return(g) } if(is.numeric(marx)) { ## ............. marks are numeric values ................... marx <- marx[is.finite(marx)] if(is.null(markrange)) { #' usual case if(length(marx) == 0) return(symbolmap(..., chars=chars, cols=cols)) markrange <- range(marx) } else { if(!all(inside.range(marx, markrange))) warning("markrange does not encompass the range of mark values", call.=FALSE) } ## if(sizegiven) { g <- do.call(symbolmap, resolve.defaults(list(range=markrange), list(...), if(assumecircles) list(shape="circles") else list(), list(chars=chars, cols=cols))) return(g) } ## attempt to determine a scale for the marks if(all(markrange == 0)) return(symbolmap(..., chars=chars, cols=cols)) scal <- mark.scale.default(marx, as.owin(x), markscale=markscale, maxsize=maxsize, meansize=meansize, characters=chargiven) if(is.na(scal)) return(symbolmap(..., chars=chars, cols=cols)) ## scale determined if(markrange[1] >= 0) { ## all marks are nonnegative shapedefault <- if(!assumecircles) list() else list(shape="circles") cexfun <- function(x, scal=1) { scal * x } circfun <- function(x, scal=1) { scal * x } formals(cexfun)[[2]] <- formals(circfun)[[2]] <- scal sizedefault <- if(sizegiven) list() else if(chargiven) list(cex=cexfun) else list(size=circfun) } else { ## some marks are negative shapedefault <- if(!assumecircles) list() else list(shape=function(x) { ifelse(x >= 0, "circles", "squares") }) cexfun <- function(x, scal=1) { scal * abs(x) } circfun <- function(x, scal=1) { scal * abs(x) } formals(cexfun)[[2]] <- formals(circfun)[[2]] <- scal sizedefault <- if(sizegiven) list() else if(chargiven) list(cex=cexfun) else list(size=circfun) } g <- do.call(symbolmap, resolve.defaults(list(range=markrange), list(...), shapedefault, sizedefault, list(chars=chars, cols=cols))) return(g) } ## ........... non-numeric marks ......................... um <- marklevels %orifnull% if(is.factor(marx)) levels(marx) else sortunique(marx) ntypes <- length(um) if(!is.null(cols)) cols <- rep.int(cols, ntypes)[1:ntypes] if(shapegiven && sizegiven) { #' values mapped to symbols (shape and size specified) g <- symbolmap(inputs=um, ..., cols=cols) } else if(!shapegiven) { #' values mapped to 'pch' chars <- default.charmap(ntypes, chars) g <- symbolmap(inputs=um, ..., chars=chars, cols=cols) } else { #' values mapped to symbols #' determine size scal <- mark.scale.default(rep(1, npoints(x)), Window(x), maxsize=maxsize, meansize=meansize, characters=FALSE) g <- symbolmap(inputs=um, ..., size=scal, cols=cols) } return(g) } default.charmap <- function(n, ch=NULL) { if(!is.null(ch)) return(rep.int(ch, n)[1:n]) if(n <= 25) return(1:n) ltr <- c(letters, LETTERS) if(n <= 52) return(ltr[1:n]) ## wrapped sequence of letters warning("Too many types to display every type as a different character") return(ltr[1 + (0:(n - 1) %% 52)]) } ## main function plot.ppp <- function(x, main, ..., clipwin=NULL, chars=NULL, cols=NULL, use.marks=TRUE, which.marks=NULL, add=FALSE, type=c("p", "n"), legend=TRUE, leg.side=c("left", "bottom", "top", "right"), leg.args=list(), symap=NULL, maxsize=NULL, meansize=NULL, markscale=NULL, zap=0.01, show.window=show.all, show.all=!add, do.plot=TRUE, multiplot=TRUE) { if(missing(main)) main <- short.deparse(substitute(x)) type <- match.arg(type) if(missing(legend)) legend <- (type == "p") # if(!missing(maxsize) || !missing(markscale) || !missing(meansize)) # warn.once("circlescale", # "Interpretation of arguments maxsize and markscale", # "has changed (in spatstat version 1.37-0 and later).", # "Size of a circle is now measured by its diameter.") if(clipped <- !is.null(clipwin)) { stopifnot(is.owin(clipwin)) W <- Window(x) clippy <- if(is.mask(W)) intersect.owin(W, clipwin) else edges(W)[clipwin] x <- x[clipwin] } else clippy <- NULL ## sensible default position legend <- legend && show.all if(legend) { leg.side <- match.arg(leg.side) vertical <- (leg.side %in% c("left", "right")) } # if(type == "n" || npoints(x) == 0) { # ## plot the window only # xwindow <- x$window # if(do.plot) # do.call(plot.owin, # resolve.defaults(list(xwindow), # list(...), # list(main=main, invert=TRUE, add=add, # type=if(show.window) "w" else "n"))) # if(is.null(symap)) symap <- symbolmap() # attr(symap, "bbox") <- as.rectangle(xwindow) # return(invisible(symap)) # } ## ................................................................ ## Handle multiple columns of marks as separate plots ## (unless add=TRUE or which.marks selects a single column ## or multipage = FALSE) if(use.marks && is.data.frame(mx <- marks(x))) { implied.all <- is.null(which.marks) want.several <- implied.all || is.data.frame(mx <- mx[,which.marks]) do.several <- want.several && !add && multiplot if(do.several) { ## generate one plot for each column of marks y <- solapply(mx, setmarks, x=x) out <- do.call(plot, resolve.defaults(list(x=y, main=main, show.window=show.window && !clipped, do.plot=do.plot, type=type), list(...), list(equal.scales=TRUE), list(panel.end=clippy), list(legend=legend, leg.side=leg.side, leg.args=leg.args), list(chars=chars, cols=cols, maxsize=maxsize, meansize=meansize, markscale=markscale, zap=zap))) return(invisible(out)) } if(is.null(which.marks)) { which.marks <- 1 if(do.plot) message("Plotting the first column of marks") } } ## ............... unmarked, or single column of marks .................... ## Determine symbol map and mark values to be used y <- x if(!is.marked(x, na.action="ignore") || !use.marks) { ## Marks are not mapped. marx <- NULL if(is.null(symap)) symap <- default.symap.points(unmark(x), ..., chars=chars, cols=cols) } else { ## Marked point pattern marx <- marks(y, dfok=TRUE) if(is.data.frame(marx)) { ## select column or take first colum marx <- marx[, which.marks] y <- setmarks(y, marx) } if(npoints(y) > 0) { ok <- complete.cases(as.data.frame(y)) if(!any(ok)) { warning("All mark values are NA; plotting locations only.") if(is.null(symap)) symap <- default.symap.points(unmark(x), ..., chars=chars, cols=cols) } else if(any(!ok)) { warning(paste("Some marks are NA;", "corresponding points are omitted.")) x <- x[ok] y <- y[ok] marx <- marks(y) } } ## apply default symbol map if(is.null(symap)) symap <- default.symap.points(y, chars=chars, cols=cols, maxsize=maxsize, meansize=meansize, markscale=markscale, ...) } # gtype <- symbolmaptype(symap) ## Determine bounding box for main plot BB <- as.rectangle(x) sick <- inherits(x, "ppp") && !is.null(rejects <- attr(x, "rejects")) if(sick) { ## Get relevant parameters par.direct <- list(main=main, use.marks=use.marks, maxsize=maxsize, meansize=meansize, markscale=markscale) par.rejects <- resolve.1.default(list(par.rejects=list(pch="+")), list(...)) par.all <- resolve.defaults(par.rejects, par.direct) rw <- resolve.defaults(list(...), list(rejectwindow=NULL))$rejectwindow ## determine window for rejects rwin <- if(is.null(rw)) rejects$window else if(is.logical(rw) && rw) rejects$window else if(inherits(rw, "owin")) rw else if(is.character(rw)) { switch(rw, box={boundingbox(rejects, x)}, ripras={ripras(c(rejects$x, x$x), c(rejects$y, x$y))}, stop(paste("Unrecognised option: rejectwindow=", rw))) } else stop("Unrecognised format for rejectwindow") if(is.null(rwin)) stop("Selected window for rejects pattern is NULL") BB <- boundingbox(BB, as.rectangle(rwin)) } ## Augment bounding box with space for legend, if appropriate legend <- legend && (symbolmaptype(symap) != "constant") if(legend) { ## guess maximum size of symbols maxsize <- invoke.symbolmap(symap, symbolmapdomain(symap), corners(as.rectangle(x)), add=add, do.plot=FALSE) sizeguess <- if(maxsize <= 0) NULL else (1.5 * maxsize) leg.args <- append(list(side=leg.side, vertical=vertical), leg.args) ## draw up layout legbox <- do.call.matched(plan.legend.layout, append(list(B=BB, size = sizeguess, started=FALSE, map=symap), leg.args)) ## bounding box for everything BB <- legbox$A } ## return now if not plotting attr(symap, "bbox") <- BB if(!do.plot) return(invisible(symap)) ## ............. start plotting ....................... pt <- prepareTitle(main) main <- pt$main nlines <- pt$nlines blankmain <- if(nlines == 0) "" else rep(" ", nlines) rez <- resolve.defaults(list(...), list(cex.main=1, xlim=NULL, ylim=NULL)) plot(BB, type="n", add=add, main=blankmain, show.all=show.all, cex.main=rez$cex.main, xlim=rez$xlim, ylim=rez$ylim) if(sick) { if(show.window) { ## plot windows if(!is.null(rw)) { ## plot window for rejects rwinpardefault <- list(lty=2,lwd=1,border=1) rwinpars <- resolve.defaults(par.rejects, rwinpardefault)[names(rwinpardefault)] do.call(plot.owin, append(list(rwin, add=TRUE), rwinpars)) } ## plot window of main pattern if(!clipped) { do.call(plot.owin, resolve.defaults(list(x$window, add=TRUE), list(...), list(invert=TRUE))) } else plot(clippy, add=TRUE, ...) } if(type != "n") { ## plot reject points do.call(plot.ppp, append(list(rejects, add=TRUE), par.all)) warning(paste(rejects$n, "illegal points also plotted")) } ## the rest is added add <- TRUE } ## Now convert to bona fide point pattern x <- as.ppp(x) xwindow <- x$window ## Plot observation window (or at least the main title) do.call(plot.owin, resolve.defaults(list(x=xwindow, add=TRUE, main=main, type=if(show.window && !clipped) "w" else "n", show.all=show.all), list(...), list(invert=TRUE))) ## If clipped, plot visible part of original window if(show.window && clipped) plot(clippy, add=TRUE, ...) # else if(show.all) fakemaintitle(as.rectangle(xwindow), main, ...) if(type != "n") { ## plot symbols ## invoke.symbolmap(symap, marx, x, add=TRUE) } ## add legend if(legend) { b <- legbox$b legendmap <- if(length(leg.args) == 0) symap else do.call(update, append(list(object=symap), leg.args)) do.call(plot, append(list(x=legendmap, main="", add=TRUE, xlim=b$xrange, ylim=b$yrange), leg.args)) } return(invisible(symap)) } plot.ppp }) mark.scale.default <- function(marx, w, ..., markscale=NULL, maxsize=NULL, meansize=NULL, characters=FALSE) { ## establish values of markscale, maxsize, meansize ngiven <- (!is.null(markscale)) + (!is.null(maxsize)) + (!is.null(meansize)) if(ngiven > 1) stop("Only one of the arguments markscale, maxsize, meansize", " should be given", call.=FALSE) if(ngiven == 0) { ## if ALL are absent, enforce the spatstat defaults ## (which could also be null) pop <- spatstat.options("par.points") markscale <- pop$markscale maxsize <- pop$maxsize meansize <- pop$meansize } ## Now check whether markscale is fixed if(!is.null(markscale)) { stopifnot(markscale > 0) return(markscale) } # Usual case: markscale is to be determined from maximum/mean physical size if(is.null(maxsize) && is.null(meansize)) { ## compute default value of 'maxsize' ## guess appropriate max physical size of symbols bb <- as.rectangle(w) maxsize <- 1.4/sqrt(pi * length(marx)/area(bb)) maxsize <- min(maxsize, diameter(bb) * 0.07) ## updated: maxsize now represents *diameter* maxsize <- 2 * maxsize } else { if(!is.null(maxsize)) stopifnot(maxsize > 0) else stopifnot(meansize > 0) } # Examine mark values absmarx <- abs(marx) maxabs <- max(absmarx) tiny <- (maxabs < 4 * .Machine$double.eps) if(tiny) return(NA) ## finally determine physical scale for symbols if(!is.null(maxsize)) { scal <- maxsize/maxabs } else { meanabs <- mean(absmarx) scal <- meansize/meanabs } if(!characters) return(scal) ## if using characters ('pch') we need to ## convert physical sizes to 'cex' values charsize <- max(sidelengths(as.rectangle(w)))/40 return(scal/charsize) } fakemaintitle <- function(bb, main, ...) { ## Try to imitate effect of 'title(main=main)' above a specified box if(!any(nzchar(main))) return(invisible(NULL)) bb <- as.rectangle(bb) x0 <- mean(bb$xrange) y0 <- bb$yrange[2] + length(main) * diff(bb$yrange)/12 parnames <- c('cex.main', 'col.main', 'font.main') parlist <- par(parnames) parlist <- resolve.defaults(list(...), parlist)[parnames] names(parlist) <- c('cex', 'col', 'font') do.call.matched(text.default, resolve.defaults(list(x=x0, y=y0, labels=main), parlist, list(...)), funargs=graphicsPars("text")) return(invisible(NULL)) } text.ppp <- function(x, ...) { graphics::text.default(x=x$x, y=x$y, ...) } spatstat/R/images.R0000644000176200001440000011145313532403242013711 0ustar liggesusers# # images.R # # $Revision: 1.157 $ $Date: 2019/08/31 05:19:39 $ # # The class "im" of raster images # # im() object creator # # is.im() tests class membership # # rasterx.im(), rastery.im() # raster X and Y coordinates # # nearest.pixel() # lookup.im() # facilities for looking up pixel values # ################################################################ ######## basic support for class "im" ################################################################ # # creator im <- function(mat, xcol=seq_len(ncol(mat)), yrow=seq_len(nrow(mat)), xrange=NULL, yrange=NULL, unitname=NULL) { typ <- typeof(mat) if(typ == "double") typ <- "real" miss.xcol <- missing(xcol) miss.yrow <- missing(yrow) # determine dimensions if(!is.null(dim(mat))) { nr <- nrow(mat) nc <- ncol(mat) if(is.na(nc)) { #' handle one-dimensional tables nc <- 1 nr <- length(mat) if(missing(xcol)) xcol <- seq_len(nc) } if(length(xcol) != nc) stop("Length of xcol does not match ncol(mat)") if(length(yrow) != nr) stop("Length of yrow does not match nrow(mat)") } else { if(miss.xcol || miss.yrow) stop(paste(sQuote("mat"), "is not a matrix and I can't guess its dimensions")) stopifnot(length(mat) == length(xcol) * length(yrow)) nc <- length(xcol) nr <- length(yrow) } # deal with factor case if(is.factor(mat)) { typ <- "factor" } else if(!is.null(lev <- levels(mat))) { typ <- "factor" mat <- factor(mat, levels=lev) } # Ensure 'mat' is a matrix (without destroying factor information) if(!is.matrix(mat)) dim(mat) <- c(nr, nc) # set up coordinates if((miss.xcol || length(xcol) <= 1) && !is.null(xrange) ) { # use 'xrange' xstep <- diff(xrange)/nc xcol <- seq(from=xrange[1L] + xstep/2, to=xrange[2L] - xstep/2, length.out=nc) } else if(length(xcol) > 1) { # use 'xcol' # ensure spacing is constant xcol <- seq(from=min(xcol), to=max(xcol), length.out=length(xcol)) xstep <- diff(xcol)[1L] xrange <- range(xcol) + c(-1,1) * xstep/2 } else stop("Cannot determine pixel width") if((miss.yrow || length(yrow) <= 1) && !is.null(yrange)) { # use 'yrange' ystep <- diff(yrange)/nr yrow <- seq(from=yrange[1L] + ystep/2, to=yrange[2L] - ystep/2, length.out=nr) } else if(length(yrow) > 1) { # use 'yrow' # ensure spacing is constant yrow <- seq(from=min(yrow), to=max(yrow), length.out=length(yrow)) ystep <- diff(yrow)[1L] yrange <- range(yrow) + c(-1,1) * ystep/2 } else stop("Cannot determine pixel height") unitname <- as.unitname(unitname) out <- list(v = mat, dim = c(nr, nc), xrange = xrange, yrange = yrange, xstep = xstep, ystep = ystep, xcol = xcol, yrow = yrow, type = typ, units = unitname) class(out) <- "im" return(out) } is.im <- function(x) { inherits(x,"im") } levels.im <- function(x) { levels(x$v) } "levels<-.im" <- function(x, value) { if(x$type != "factor") stop("image is not factor-valued") levels(x$v) <- value x } ################################################################ ######## methods for class "im" ################################################################ shift.im <- function(X, vec=c(0,0), ..., origin=NULL) { verifyclass(X, "im") if(!is.null(origin)) { if(!missing(vec)) warning("argument vec ignored; argument origin has precedence") locn <- interpretAsOrigin(origin, Window(X)) vec <- -locn } vec <- as2vector(vec) X$xrange <- X$xrange + vec[1L] X$yrange <- X$yrange + vec[2L] X$xcol <- X$xcol + vec[1L] X$yrow <- X$yrow + vec[2L] attr(X, "lastshift") <- vec return(X) } "Frame<-.im" <- function(X, value) { stopifnot(is.rectangle(value)) if(!is.subset.owin(value, Frame(X))) { ## first expand X <- X[value, drop=FALSE] } X[value, drop=TRUE] } "[.im" <- local({ disjoint <- function(r, s) { (r[2L] < s[1L]) || (r[1L] > s[2L]) } clip <- function(r, s) { c(max(r[1L],s[1L]), min(r[2L],s[2L])) } inrange <- function(x, r) { (x >= r[1L]) & (x <= r[2L]) } Extract.im <- function(x, i, j, ..., drop=TRUE, tight=FALSE, raster=NULL, rescue=is.owin(i)) { ## detect 'blank' arguments like second argument in x[i, ] ngiven <- length(sys.call()) nmatched <- length(match.call()) nblank <- ngiven - nmatched itype <- if(missing(i)) "missing" else "given" jtype <- if(missing(j)) "missing" else "given" if(nblank == 1) { if(!missing(i)) jtype <- "blank" if(!missing(j)) itype <- "blank" } else if(nblank == 2) { itype <- jtype <- "blank" } if(missing(rescue) && itype != "given") rescue <- FALSE if(itype == "missing" && jtype == "missing") { ## no indices: return entire image out <- if(is.null(raster)) x else as.im(raster) xy <- expand.grid(y=out$yrow,x=out$xcol) if(!is.null(raster)) { ## resample image on new pixel raster values <- lookup.im(x, xy$x, xy$y, naok=TRUE) out <- im(values, out$xcol, out$yrow, unitname=unitname(out)) } if(!drop) return(out) else { v <- out$v return(v[!is.na(v)]) } } if(itype == "given") { ## ................................................................. ## Try spatial index ## ................................................................. if(verifyclass(i, "owin", fatal=FALSE)) { if(jtype == "given") warning("Argument j ignored") ## 'i' is a window ## if drop = FALSE, just set values outside window to NA ## if drop = TRUE, extract values for all pixels inside window ## as an image (if 'i' is a rectangle) ## or as a vector (otherwise) ## determine pixel raster for output if(!is.null(raster)) { out <- as.im(raster) do.resample <- TRUE } else if(is.subset.owin(i, as.owin(x))) { out <- x do.resample <- FALSE } else { ## new window does not contain data window: expand it bb <- boundingbox(as.rectangle(i), as.rectangle(x)) rr <- if(is.mask(i)) i else x xcol <- prolongseq(rr$xcol, bb$xrange, rr$xstep) yrow <- prolongseq(rr$yrow, bb$yrange, rr$ystep) out <- list(xcol=xcol, yrow=yrow) do.resample <- TRUE } xy <- expand.grid(y=out$yrow,x=out$xcol) if(do.resample) { ## resample image on new pixel raster values <- lookup.im(x, xy$x, xy$y, naok=TRUE) out <- im(values, out$xcol, out$yrow, unitname=unitname(out)) } inside <- inside.owin(xy$x, xy$y, i) if(!drop) { ## set other pixels to NA and return image out$v[!inside] <- NA if(!tight) return(out) } else if(!(rescue && i$type == "rectangle")) { ## return pixel values values <- out$v[inside] return(values) } ## return image in smaller rectangle if(disjoint(i$xrange, x$xrange) || disjoint(i$yrange, x$yrange)) ## empty intersection return(numeric(0)) xr <- clip(i$xrange, x$xrange) yr <- clip(i$yrange, x$yrange) colsub <- inrange(out$xcol, xr) rowsub <- inrange(out$yrow, yr) ncolsub <- sum(colsub) nrowsub <- sum(rowsub) if(ncolsub == 0 || nrowsub == 0) return(numeric(0)) marg <- list(mat=out$v[rowsub, colsub, drop=FALSE], unitname=unitname(x)) xarg <- if(ncolsub > 1) list(xcol = out$xcol[colsub]) else list(xrange=xr) yarg <- if(nrowsub > 1) list(yrow = out$yrow[rowsub]) else list(yrange=yr) result <- do.call(im, c(marg, xarg, yarg)) return(result) } if(verifyclass(i, "im", fatal=FALSE)) { if(jtype == "given") warning("Argument j ignored") ## logical images OK if(i$type == "logical") { ## convert to window w <- as.owin(eval.im(ifelse1NA(i))) return(x[w, drop=drop, ..., raster=raster]) } else stop("Subset argument \'i\' is an image, but not of logical type") } if(inherits(i, "linnet")) { #' linear network if(jtype == "given") warning("Argument j ignored") W <- raster %orifnull% as.owin(x) M <- as.mask.psp(as.psp(i), W=W, ...) xM <- x[M, drop=drop] if(is.im(xM)) xM <- linim(i, xM) return(xM) } if(is.ppp(i)) { ## 'i' is a point pattern if(jtype == "given") warning("Argument j ignored") ## Look up the greyscale values for the points of the pattern values <- lookup.im(x, i$x, i$y, naok=TRUE) if(drop) values <- values[!is.na(values)] if(length(values) == 0) ## ensure the zero-length vector is of the right type values <- switch(x$type, factor={ factor(, levels=levels(x)) }, integer = { integer(0) }, logical = { logical(0) }, real = { numeric(0) }, complex = { complex(0) }, character = { character(0) }, { values } ) return(values) } } ## ............... not a spatial index ............................. ## Try indexing as a matrix ## Construct a matrix index call for possible re-use M <- as.matrix(x) ## suppress warnings from code checkers dont.complain.about(M) ## ycall <- switch(itype, given = { switch(jtype, given = quote(M[i, j, drop=FALSE]), blank = quote(M[i, , drop=FALSE]), missing = quote(M[i, drop=FALSE])) }, blank = { switch(jtype, given = quote(M[ , j, drop=FALSE]), blank = quote(M[ , , drop=FALSE]), missing = quote(M[ , drop=FALSE])) }, missing = { switch(jtype, given = quote(M[j=j, drop=FALSE]), blank = quote(M[j= , drop=FALSE]), missing = quote(M[ drop=FALSE])) }) ## try it y <- try(eval(as.call(ycall)), silent=TRUE) if(!inherits(y, "try-error")) { ## valid subset index for a matrix if(rescue) { ## check whether it's a rectangular block, in correct order RR <- row(x$v) CC <- col(x$v) rcall <- ycall rcall[[2L]] <- quote(RR) ccall <- ycall ccall[[2L]] <- quote(CC) rr <- eval(as.call(rcall)) cc <- eval(as.call(ccall)) rseq <- sortunique(as.vector(rr)) cseq <- sortunique(as.vector(cc)) if(all(diff(rseq) == 1) && all(diff(cseq) == 1) && (length(rr) == length(rseq) * length(cseq)) && all(rr == RR[rseq, cseq]) && all(cc == CC[rseq,cseq])) { ## yes - make image dim(y) <- c(length(rseq), length(cseq)) Y <- x Y$v <- y Y$dim <- dim(y) Y$xcol <- x$xcol[cseq] Y$yrow <- x$yrow[rseq] Y$xrange <- range(Y$xcol) + c(-1,1) * x$xstep/2 Y$yrange <- range(Y$yrow) + c(-1,1) * x$ystep/2 return(Y) } } ## return pixel values (possibly as matrix) return(y) } ## Last chance! if(itype == "given" && !is.matrix(i) && !is.null(ip <- as.ppp(i, W=as.owin(x), fatal=FALSE, check=FALSE))) { ## 'i' is convertible to a point pattern ## Look up the greyscale values for the points of the pattern values <- lookup.im(x, ip$x, ip$y, naok=TRUE) if(drop) values <- values[!is.na(values)] if(length(values) == 0) ## ensure the zero-length vector is of the right type values <- switch(x$type, factor={ factor(, levels=levels(x)) }, integer = { integer(0) }, logical = { logical(0) }, real = { numeric(0) }, complex = { complex(0) }, character = { character(0) }, { values } ) return(values) } stop("The subset operation is undefined for this type of index") } Extract.im }) update.im <- function(object, ...) { ## update internal structure of image after manipulation X <- object mat <- X$v typ <- typeof(mat) if(typ == "double") typ <- "real" ## deal with factor case if(is.factor(mat)) { typ <- "factor" } else if(!is.null(lev <- levels(mat))) { typ <- "factor" X$v <- factor(mat, levels=lev) } X$type <- typ return(X) } "[<-.im" <- function(x, i, j, ..., drop=TRUE, value) { # detect 'blank' arguments like second argument of x[i, ] ngiven <- length(sys.call()) nmatched <- length(match.call()) nblank <- ngiven - nmatched itype <- if(missing(i)) "missing" else "given" jtype <- if(missing(j)) "missing" else "given" if(nblank == 1) { if(!missing(i)) jtype <- "blank" if(!missing(j)) itype <- "blank" } else if(nblank == 2) { itype <- jtype <- "blank" } X <- x W <- as.owin(X) stopifnot(is.im(value) || is.vector(value) || is.matrix(value) || is.array(value) || is.factor(value)) if(is.im(value)) value <- value$v if(itype == "missing" && jtype == "missing") { #' no index provided #' set all pixels to 'value' #' (if drop=TRUE, this applies only to pixels inside the window) v <- X$v if(!is.factor(value)) { if(!drop) { v[] <- value } else { v[!is.na(v)] <- value } } else { vnew <- matrix(NA_integer_, ncol(v), nrow(v)) if(!drop) { vnew[] <- as.integer(value) } else { vnew[!is.na(v)] <- as.integer(value) } v <- factor(vnew, labels=levels(value)) } X$v <- v return(update(X)) } if(itype == "given") { # ..................... Try a spatial index .................... if(verifyclass(i, "owin", fatal=FALSE)) { if(jtype == "given") warning("Index j ignored") # 'i' is a window if(is.empty(i)) return(X) rxy <- rasterxy.mask(W) xx <- rxy$x yy <- rxy$y ok <- inside.owin(xx, yy, i) X$v[ok] <- value X$type <- ifelse(is.factor(X$v), "factor", typeof(X$v)) return(update(X)) } if(verifyclass(i, "im", fatal=FALSE) && i$type == "logical") { if(jtype == "given") warning("Index j ignored") # convert logical vector to window where entries are TRUE i <- as.owin(eval.im(ifelse1NA(i))) # continue as above rxy <- rasterxy.mask(W) xx <- rxy$x yy <- rxy$y ok <- inside.owin(xx, yy, i) X$v[ok] <- value X$type <- ifelse(is.factor(X$v), "factor", typeof(X$v)) return(update(X)) } if(is.ppp(i)) { # 'i' is a point pattern if(jtype == "given") warning("Index j ignored") nv <- length(value) np <- npoints(i) if(nv != np && nv != 1) stop("Length of replacement value != number of point locations") # test whether all points are inside window FRAME ok <- inside.owin(i$x, i$y, as.rectangle(W)) if(any(!ok)) { warning("Some points are outside the outer frame of the image") if(nv == np) value <- value[ok] i <- i[ok] } if(npoints(i) > 0) { # determine row & column positions for each point loc <- nearest.pixel(i$x, i$y, X) # set values X$v[cbind(loc$row, loc$col)] <- value } X$type <- ifelse(is.factor(X$v), "factor", typeof(X$v)) return(update(X)) } } # .................. 'i' is not a spatial index .................... # Construct a matrix replacement call ycall <- switch(itype, given = { switch(jtype, given = quote(X$v[i, j] <- value), blank = quote(X$v[i, ] <- value), missing = quote(X$v[i] <- value)) }, blank = { switch(jtype, given = quote(X$v[ , j] <- value), blank = quote(X$v[ , ] <- value), missing = quote(X$v[ ] <- value)) }, missing = { switch(jtype, given = quote(X$v[j=j] <- value), blank = quote(X$v[j= ] <- value), missing = quote(X$v[] <- value)) }) # try it litmus <- try(eval(as.call(ycall)), silent=TRUE) if(!inherits(litmus, "try-error")){ X$type <- ifelse(is.factor(X$v), "factor", typeof(X$v)) return(update(X)) } # Last chance! if(itype == "given" && !is.matrix(i) && !is.null(ip <- as.ppp(i, W=W, fatal=FALSE, check=TRUE))) { # 'i' is convertible to a point pattern if(jtype == "given") warning("Index j ignored") nv <- length(value) np <- npoints(ip) if(nv != np && nv != 1) stop("Length of replacement value != number of point locations") # test whether all points are inside window FRAME ok <- inside.owin(ip$x, ip$y, as.rectangle(W)) if(any(!ok)) { warning("Some points are outside the outer frame of the image") if(nv == np) value <- value[ok] ip <- ip[ok] } if(npoints(ip) > 0) { # determine row & column positions for each point loc <- nearest.pixel(ip$x, ip$y, X) # set values X$v[cbind(loc$row, loc$col)] <- value } X$type <- ifelse(is.factor(X$v), "factor", typeof(X$v)) return(update(X)) } stop("The subset operation is undefined for this type of index") } ################################################################ ######## other tools ################################################################ # # This function is similar to nearest.raster.point except for # the third argument 'im' and the different idiom for calculating # row & column - which could be used in nearest.raster.point() nearest.pixel <- function(x,y, Z) { stopifnot(is.im(Z) || is.mask(Z)) if(length(x) > 0) { nr <- Z$dim[1L] nc <- Z$dim[2L] cc <- as.integer(round(1 + (x - Z$xcol[1L])/Z$xstep)) rr <- as.integer(round(1 + (y - Z$yrow[1L])/Z$ystep)) cc <- pmax.int(1L,pmin.int(cc, nc)) rr <- pmax.int(1L,pmin.int(rr, nr)) } else cc <- rr <- integer(0) return(list(row=rr, col=cc)) } # Explores the 3 x 3 neighbourhood of nearest.pixel # and finds the nearest pixel that is not NA nearest.valid.pixel <- function(x, y, Z, method=c("C","interpreted"), nsearch=1) { method <- match.arg(method) switch(method, interpreted = { rc <- nearest.pixel(x,y,Z) # checks that Z is an 'im' or 'mask' rr <- rc$row cc <- rc$col #' check whether any pixels are outside image domain inside <- as.owin(Z)$m miss <- !inside[cbind(rr, cc)] if(!any(miss)) return(rc) #' for offending pixels, explore 3 x 3 neighbourhood nr <- Z$dim[1L] nc <- Z$dim[2L] xcol <- Z$xcol yrow <- Z$yrow searching <- (-nsearch):(nsearch) for(i in which(miss)) { rows <- rr[i] + searching cols <- cc[i] + searching rows <- unique(pmax.int(1L, pmin.int(rows, nr))) cols <- unique(pmax.int(1L, pmin.int(cols, nc))) rcp <- expand.grid(row=rows, col=cols) ok <- inside[as.matrix(rcp)] if(any(ok)) { #' At least one of the neighbours is valid #' Find the closest one rcp <- rcp[ok,] dsq <- with(rcp, (x[i] - xcol[col])^2 + (y[i] - yrow[row])^2) j <- which.min(dsq) rc$row[i] <- rcp$row[j] rc$col[i] <- rcp$col[j] } } }, C = { stopifnot(is.im(Z) || is.mask(Z)) n <- length(x) if(n == 0) { cc <- rr <- integer(0) } else { nr <- Z$dim[1L] nc <- Z$dim[2L] xscaled <- (x - Z$xcol[1])/Z$xstep yscaled <- (y - Z$yrow[1])/Z$ystep aspect <- Z$ystep/Z$xstep inside <- as.owin(Z)$m zz <- .C("nearestvalidpixel", n = as.integer(n), x = as.double(xscaled), y = as.double(yscaled), nr = as.integer(nr), nc = as.integer(nc), aspect = as.double(aspect), z = as.integer(inside), nsearch = as.integer(nsearch), rr = as.integer(integer(n)), cc = as.integer(integer(n)), PACKAGE="spatstat") rr <- zz$rr + 1L cc <- zz$cc + 1L if(any(bad <- (rr == 0 | cc == 0))) { rr[bad] <- NA cc[bad] <- NA } } rc <- list(row=rr, col=cc) }) return(rc) } # This function is a generalisation of inside.owin() # to images other than binary-valued images. lookup.im <- function(Z, x, y, naok=FALSE, strict=TRUE) { verifyclass(Z, "im") if(Z$type == "factor") Z <- repair.old.factor.image(Z) if((missing(y) || is.null(y)) && all(c("x", "y") %in% names(x))) { y <- x$y x <- x$x } if(length(x) != length(y)) stop("x and y must be numeric vectors of equal length") # initialise answer to NA if(Z$type != "factor") { niets <- NA mode(niets) <- mode(Z$v) } else { niets <- factor(NA, levels=levels(Z)) } value <- rep.int(niets, length(x)) # test whether inside bounding rectangle xr <- Z$xrange yr <- Z$yrange eps <- sqrt(.Machine$double.eps) frameok <- (x >= xr[1L] - eps) & (x <= xr[2L] + eps) & (y >= yr[1L] - eps) & (y <= yr[2L] + eps) if(!any(frameok)) { # all points OUTSIDE range - no further work needed if(!naok) warning("Internal error: all values NA") return(value) # all NA } # consider only those points which are inside the frame xf <- x[frameok] yf <- y[frameok] # map locations to raster (row,col) coordinates if(strict) loc <- nearest.pixel(xf,yf,Z) else loc <- nearest.valid.pixel(xf,yf,Z) # look up image values vf <- Z$v[cbind(loc$row, loc$col)] # insert into answer value[frameok] <- vf if(!naok && anyNA(value)) warning("Internal error: NA's generated") return(value) } ## low level rasterx.im <- function(x) { verifyclass(x, "im") xx <- x$xcol matrix(xx[col(x)], ncol=ncol(x), nrow=nrow(x)) } rastery.im <- function(x) { verifyclass(x, "im") yy <- x$yrow matrix(yy[row(x)], ncol=ncol(x), nrow=nrow(x)) } rasterxy.im <- function(x, drop=FALSE) { verifyclass(x, "im") xx <- x$xcol yy <- x$yrow ans <- cbind(x=as.vector(xx[col(x)]), y=as.vector(yy[row(x)])) if(drop) { ok <- as.vector(!is.na(x$v)) ans <- ans[ok, , drop=FALSE] } return(ans) } ## user interface raster.x <- function(w, drop=FALSE) { if(is.owin(w)) return(rasterx.mask(w, drop=drop)) if(!is.im(w)) stop("w should be a window or an image") x <- w$xcol[col(w)] x <- if(drop) x[!is.na(w$v), drop=TRUE] else array(x, dim=w$dim) return(x) } raster.y <- function(w, drop=FALSE) { if(is.owin(w)) return(rastery.mask(w, drop=drop)) if(!is.im(w)) stop("w should be a window or an image") y <- w$yrow[row(w)] y <- if(drop) y[!is.na(w$v), drop=TRUE] else array(y, dim=w$dim) return(y) } raster.xy <- function(w, drop=FALSE) { if(is.owin(w)) return(rasterxy.mask(w, drop=drop)) if(!is.im(w)) stop("w should be a window or an image") x <- w$xcol[col(w)] y <- w$yrow[row(w)] if(drop) { ok <- !is.na(w$v) x <- x[ok, drop=TRUE] y <- y[ok, drop=TRUE] } return(list(x=as.numeric(x), y=as.numeric(y))) } ############## # methods for other functions xtfrm.im <- function(x) { as.numeric(as.matrix.im(x)) } as.matrix.im <- function(x, ...) { return(x$v) } as.array.im <- function(x, ...) { m <- as.matrix(x) a <- do.call(array, resolve.defaults(list(m), list(...), list(dim=c(dim(m), 1)))) return(a) } as.data.frame.im <- function(x, ...) { verifyclass(x, "im") v <- x$v xx <- x$xcol[col(v)] yy <- x$yrow[row(v)] ok <- !is.na(v) xx <- as.vector(xx[ok]) yy <- as.vector(yy[ok]) # extract pixel values without losing factor info vv <- v[ok] dim(vv) <- NULL # data.frame(x=xx, y=yy, value=vv, ...) } mean.im <- function(x, trim=0, na.rm=TRUE, ...) { verifyclass(x, "im") xvalues <- x[drop=na.rm] return(mean(xvalues, trim=trim, na.rm=na.rm)) } ## arguments of generic 'median' will change in R 3.4 median.im <- if("..." %in% names(formals(median))) { function(x, na.rm=TRUE, ...) { verifyclass(x, "im") xvalues <- x[drop=na.rm] return(median(xvalues, ...)) } } else { function(x, na.rm=TRUE) { verifyclass(x, "im") xvalues <- x[drop=na.rm] return(median(xvalues)) } } where.max <- function(x, first=TRUE) { stopifnot(is.im(x)) if(first) { ## find the first maximum v <- x$v locn <- which.max(as.vector(v)) # ignores NA, NaN locrow <- as.vector(row(v))[locn] loccol <- as.vector(col(v))[locn] } else { ## find all maxima xmax <- max(x) M <- solutionset(x == xmax) loc <- which(M$m, arr.ind=TRUE) locrow <- loc[,1L] loccol <- loc[,2L] } xx <- x$xcol[loccol] yy <- x$yrow[locrow] return(ppp(x=xx, y=yy, window=Window(x))) } where.min <- function(x, first=TRUE) { stopifnot(is.im(x)) if(first) { ## find the first minimum v <- x$v locn <- which.min(as.vector(v)) # ignores NA, NaN locrow <- as.vector(row(v))[locn] loccol <- as.vector(col(v))[locn] } else { ## find all minima xmin <- min(x) M <- solutionset(x == xmin) loc <- which(M$m, arr.ind=TRUE) locrow <- loc[,1L] loccol <- loc[,2L] } xx <- x$xcol[loccol] yy <- x$yrow[locrow] return(ppp(x=xx, y=yy, window=Window(x))) } ## the following ensures that 'sd' works as.double.im <- function(x, ...) { as.double(x[], ...) } ## hist.im <- function(x, ..., probability=FALSE, xname) { if(missing(xname) || is.null(xname)) xname <- short.deparse(substitute(x)) verifyclass(x, "im") main <- paste("Histogram of", xname) # default plot arguments # extract pixel values values <- as.matrix(x) dim(values) <- NULL # barplot or histogram if(x$type %in% c("logical", "factor")) { # barplot tab <- table(values) probs <- tab/sum(tab) if(probability) { heights <- probs ylab <- "Probability" } else { heights <- tab ylab <- "Number of pixels" } mids <- do.call(barplot, resolve.defaults(list(heights), list(...), list(xlab=paste("Pixel value"), ylab=ylab, main=main))) out <- list(counts=tab, probs=probs, heights=heights, mids=mids, xname=xname) class(out) <- "barplotdata" } else { # histogram values <- values[!is.na(values)] plotit <- resolve.defaults(list(...), list(plot=TRUE))$plot if(plotit) { ylab <- if(probability) "Probability density" else "Number of pixels" out <- do.call(hist.default, resolve.defaults(list(values), list(...), list(freq=!probability, xlab="Pixel value", ylab=ylab, main=main))) out$xname <- xname } else { # plot.default whinges if `probability' given when plot=FALSE out <- do.call(hist.default, resolve.defaults(list(values), list(...))) # hack! out$xname <- xname } } return(invisible(out)) } plot.barplotdata <- function(x, ...) { do.call(barplot, resolve.defaults(list(height=x$heights), list(...), list(main=paste("Histogram of ", x$xname)))) } cut.im <- function(x, ...) { verifyclass(x, "im") typ <- x$type if(typ %in% c("factor", "logical", "character")) stop(paste0("cut.im is not defined for ", typ, "-valued images"), call.=FALSE) vcut <- cut(as.numeric(as.matrix(x)), ...) return(im(vcut, xcol=x$xcol, yrow=x$yrow, xrange=x$xrange, yrange=x$yrange, unitname=unitname(x))) } quantile.im <- function(x, ...) { verifyclass(x, "im") q <- do.call(quantile, resolve.defaults(list(as.numeric(as.matrix(x))), list(...), list(na.rm=TRUE))) return(q) } integral <- function(f, domain=NULL, ...) { UseMethod("integral") } integral.im <- function(f, domain=NULL, ...) { verifyclass(f, "im") typ <- f$type if(!any(typ == c("integer", "real", "complex", "logical"))) stop(paste("Don't know how to integrate an image of type", sQuote(typ))) if(!is.null(domain)) { if(is.tess(domain)) return(sapply(tiles(domain), integral.im, f=f)) f <- f[domain, drop=FALSE, tight=TRUE] } a <- with(f, sum(v, na.rm=TRUE) * xstep * ystep) return(a) } conform.imagelist <- function(X, Zlist) { # determine points of X where all images in Zlist are defined ok <- rep.int(TRUE, length(X$x)) for(i in seq_along(Zlist)) { Zi <- Zlist[[i]] ZiX <- Zi[X, drop=FALSE] ok <- ok & !is.na(ZiX) } return(ok) } split.im <- function(x, f, ..., drop=FALSE) { stopifnot(is.im(x)) if(inherits(f, "tess")) subsets <- tiles(f) else if(is.im(f)) { if(f$type != "factor") f <- eval.im(factor(f)) subsets <- tiles(tess(image=f)) } else stop("f should be a tessellation or a factor-valued image") if(!is.subset.owin(as.owin(x), as.owin(f))) stop("f does not cover the window of x") n <- length(subsets) out <- vector(mode="list", length=n) names(out) <- names(subsets) for(i in 1:n) out[[i]] <- x[subsets[[i]], drop=drop] if(drop) return(out) else return(as.solist(out)) } by.im <- function(data, INDICES, FUN, ...) { stopifnot(is.im(data)) V <- split(data, INDICES) U <- lapply(V, FUN, ...) return(as.solist(U, demote=TRUE)) } rebound.im <- function(x, rect) { stopifnot(is.im(x)) stopifnot(is.owin(rect)) rect <- as.rectangle(rect) stopifnot(is.subset.owin(as.rectangle(x), rect)) # compute number of extra rows/columns dx <- x$xstep nleft <- max(0, floor((x$xrange[1L]-rect$xrange[1L])/dx)) nright <- max(0, floor((rect$xrange[2L]-x$xrange[2L])/dx)) dy <- x$ystep nbot <- max(0, floor((x$yrange[1L]-rect$yrange[1L])/dy)) ntop <- max(0, floor((rect$yrange[2L]-x$yrange[2L])/dy)) # determine exact x and y ranges (to preserve original pixel locations) xrange.new <- x$xrange + c(-nleft, nright) * dx yrange.new <- x$yrange + c(-nbot, ntop) * dy # expand pixel data matrix nr <- x$dim[1L] nc <- x$dim[2L] nrnew <- nbot + nr + ntop ncnew <- nleft + nc + nright naval <- switch(x$type, factor=, integer=NA_integer_, real=NA_real_, character=NA_character_, complex=NA_complex_, NA) vnew <- matrix(naval, nrnew, ncnew) if(x$type != "factor") { vnew[nbot + (1:nr), nleft + (1:nc)] <- x$v } else { vnew[nbot + (1:nr), nleft + (1:nc)] <- as.integer(x$v) vnew <- factor(vnew, labels=levels(x)) dim(vnew) <- c(nrnew, ncnew) } # build new image object xnew <- im(vnew, xrange = xrange.new, yrange = yrange.new, unitname = unitname(x)) return(xnew) } sort.im <- function(x, ...) { verifyclass(x, "im") sort(as.vector(as.matrix(x)), ...) } dim.im <- function(x) { x$dim } # colour images rgbim <- function(R, G, B, A=NULL, maxColorValue=255, autoscale=FALSE) { if(autoscale) { R <- scaletointerval(R, 0, maxColorValue) G <- scaletointerval(G, 0, maxColorValue) B <- scaletointerval(B, 0, maxColorValue) if(!is.null(A)) A <- scaletointerval(A, 0, maxColorValue) } Z <- eval.im(factor(rgbNA(as.vector(R), as.vector(G), as.vector(B), as.vector(A), maxColorValue=maxColorValue))) return(Z) } hsvim <- function(H, S, V, A=NULL, autoscale=FALSE) { if(autoscale) { H <- scaletointerval(H, 0, 1) S <- scaletointerval(S, 0, 1) V <- scaletointerval(V, 0, 1) if(!is.null(A)) A <- scaletointerval(A, 0, 1) } Z <- eval.im(factor(hsvNA(as.vector(H), as.vector(S), as.vector(V), as.vector(A)))) return(Z) } scaletointerval <- function(x, from=0, to=1, xrange=range(x)) { UseMethod("scaletointerval") } scaletointerval.default <- function(x, from=0, to=1, xrange=range(x)) { x <- as.numeric(x) rr <- if(missing(xrange)) range(x, na.rm=TRUE) else as.numeric(xrange) b <- as.numeric(to - from)/diff(rr) if(is.finite(b)) { y <- from + b * (x - rr[1L]) } else { y <- (from+to)/2 + 0 * x } y[] <- pmin(pmax(y[], from), to) return(y) } scaletointerval.im <- function(x, from=0, to=1, xrange=range(x)) { v <- scaletointerval(x$v, from, to, xrange=xrange) y <- im(v, x$xcol, x$yrow, x$xrange, x$yrange, unitname(x)) return(y) } zapsmall.im <- function(x, digits) { if(missing(digits)) return(eval.im(zapsmall(x))) return(eval.im(zapsmall(x, digits=digits))) } domain.im <- Window.im <- function(X, ...) { as.owin(X) } "Window<-.im" <- function(X, ..., value) { verifyclass(value, "owin") X[value, drop=FALSE] } padimage <- function(X, value=NA, n=1, W=NULL) { stopifnot(is.im(X)) stopifnot(length(value) == 1) if(!missing(n) && !is.null(W)) stop("Arguments n and W are incompatible", call.=FALSE) padW <- !is.null(W) if(isfac <- (X$type == "factor")) { ## handle factors levX <- levels(X) if(is.factor(value)) { stopifnot(identical(levels(X), levels(value))) } else { value <- factor(value, levels=levX) } X <- eval.im(as.integer(X)) value <- as.integer(value) } if(!padW) { ## pad by 'n' pixels nn <- rep(n, 4) nleft <- nn[1L] nright <- nn[2L] nbottom <- nn[3L] ntop <- nn[4L] } else { ## pad out to window W FX <- Frame(X) B <- boundingbox(Frame(W), FX) nleft <- max(1, round((FX$xrange[1L] - B$xrange[1L])/X$xstep)) nright <- max(1, round((B$xrange[2L] - FX$xrange[2L])/X$xstep)) nbottom <- max(1, round((FX$yrange[1L] - B$yrange[1L])/X$ystep)) ntop <- max(1, round((B$yrange[2L] - FX$yrange[2L])/X$ystep)) } mX <- as.matrix(X) dd <- dim(mX) mX <- cbind(matrix(value, dd[1L], nleft, byrow=TRUE), as.matrix(X), matrix(value, dd[1L], nright, byrow=TRUE)) dd <- dim(mX) mX <- rbind(matrix(rev(value), nbottom, dd[2L]), mX, matrix(value, ntop, dd[2L])) xcol <- with(X, c(xcol[1L] - (nleft:1) * xstep, xcol, xcol[length(xcol)] + (1:nright) * xstep)) yrow <- with(X, c(yrow[1L] - (nbottom:1) * ystep, yrow, yrow[length(yrow)] + (1:ntop) * ystep)) xr <- with(X, xrange + c(-nleft, nright) * xstep) yr <- with(X, yrange + c(-nbottom, ntop) * ystep) Y <- im(mX, xcol=xcol, yrow=yrow, xrange=xr, yrange=yr, unitname=unitname(X)) if(isfac) Y <- eval.im(factor(Y, levels=seq_along(levX), labels=levX)) if(padW && !is.rectangle(W)) Y <- Y[W, drop=FALSE] return(Y) } as.function.im <- function(x, ...) { Z <- x f <- function(x,y) { Z[list(x=x, y=y)] } g <- funxy(f, Window(x)) return(g) } anyNA.im <- function(x, recursive=FALSE) { anyNA(x$v) } ZeroValue <- function(x) { UseMethod("ZeroValue") } ZeroValue.im <- function(x) { lev <- levels(x) z <- switch(x$type, factor = factor(lev[1L], levels=lev), integer = integer(1L), logical = logical(1L), real = numeric(1L), complex = complex(1L), character = character(1L), x$v[!is.na(x$v),drop=TRUE][1]) return(z) } spatstat/R/studpermutest.R0000644000176200001440000005627713551737041015420 0ustar liggesusers#' #' studpermtest.R #' #' Original by Ute Hahn 2014 #' #' $Revision: 1.10 $ $Date: 2019/10/16 02:36:54 $ #' #' Studentized permutation test for comparison of grouped point patterns; #' functions to generate these grouped point patterns; #' wrapper for test of reweighted second order stationarity. #' #' studpermu.test #' studentized permutation test for grouped point patterns #' interpreted version, random permutations only. #' A group needs to contain at least two point patterns with at least minpoints each. # #' X the data, may be a list of lists of point patterns, or a hyperframe #' formula if X is a hyperframe, relates point patterns to factor variables that #' determine the groups. If missing, the first column of X that contains #' a factor variable is used. #' summaryfunction the function used in the test #' ... additional arguments for summaryfunction #' rinterval r-interval where summaryfunction is evaluated. If NULL, the #' interval is calculated from spatstat defaults #' (intersection for all patterns) #' nperm number of random permutations #' use.Tbar use the alternative test statistic, for summary functions with #' roughly constant variance, such as K/r or L #' minpoints the minimum number of points a pattern needs to have. Patterns #' with fewer points are not used. #' rsteps discretization steps of the r-interval #' r arguments at which to evaluate summaryfunction, overrides rinterval #' Should normally not be given, replace by rinterval instead, #' this allows r_0 > 0. Also, there is no plausibility check for r so far #' arguments.in.data if TRUE, individual extra arguments to summary function that #' change are taken from X (which has to be a hyperframe then). #' Assumes that the first argument of summaryfunction always is the #' point pattern. #' This is meant for internal purposes (automatisation) # #' returns an object of classes htest and studpermutest, that can be plotted. The #' plot shows the summary functions for the groups (and the means if requested) studpermu.test <- local({ studpermu.test <- function (X, formula, summaryfunction = Kest, ..., rinterval = NULL, nperm = 999, use.Tbar = FALSE, # the alternative statistic, use with K/r or L minpoints = 20, rsteps = 128, r = NULL, arguments.in.data = FALSE) { #' ---- the loooong preliminaries ------- #' ---- argument checking paranoia ---- if (arguments.in.data & !is.hyperframe(X)) stop(paste("X needs to be a hyperframe", "if arguments for summary function are to be retrieved"), call.=FALSE) stopifnot(is.function(summaryfunction)) #' there could be more... #' first prepare the data if(is.hyperframe(X)) { if(dim(X)[2] < 2) stop(paste("Hyperframe X needs to contain at least 2 columns,", "one for patterns, one indicating groups"), call.=FALSE) data <- X # renaming for later. Xclass <- unclass(X)$vclass factorcandidate <- Xclass %in% c("integer", "numeric", "character", "factor") ppcandidate <- Xclass == "ppp" names(factorcandidate) <- names(ppcandidate) <- names(Xclass) <- Xnames <- names(X) if(all(!factorcandidate) || all(!ppcandidate)) stop(paste("Hyperframe X needs to contain at least a column", "with point patterns, and one indicating groups"), call.=FALSE) if(!missing(formula)){ #' safety precautions ;-) if(!inherits(formula, "formula")) stop(paste("Argument", dQuote("formula"), "should be a formula")) if (length(formula) < 3) stop(paste("Argument", sQuote("formula"), "must have a left hand side")) rhs <- rhs.of.formula(formula) ppname <- formula[[2]] if (!is.name(ppname)) stop("Left hand side of formula should be a single name") ppname <- paste(ppname) if(!ppcandidate[ppname]) stop(paste("Left hand side of formula", "should be the name of a column of point patterns"), call.=FALSE) groupvars <- all.vars(as.expression(rhs)) if(!all(groupvars %in% Xnames) || any(!factorcandidate[groupvars])) stop(paste("Not all variables on right hand side of formula", "can be interpreted as factors"), call.=FALSE) #' make the groups to be compared group <- interaction(lapply(as.data.frame(data[ , groupvars, drop=FALSE]), factor)) #' rename the point patterns, needs the patch newnames <- Xnames newnames[Xnames == ppname] <- "pp" names(data) <- newnames data$group <- group } else { #' No formula supplied. #' Choose first ppp column and first factor column to make pp and groups thepp <- which.max(ppcandidate) thegroup <- which.max(factorcandidate) #' fake formula for output of test result formula <- as.formula(paste( Xnames[thepp],"~", Xnames[thegroup])) newnames <- Xnames newnames[thepp] <- "pp" newnames[thegroup] <- "group" names(data) <- newnames data$group <- as.factor(data$group) } } else { #' X is not a hyperframe, but hopefully a list of ppp if(!is.list(X)) stop("X should be a hyperframe or a list of lists of point patterns") if (!is.list(X[[1]]) || !is.ppp(X[[1]][[1]])) stop("X is a list, but not a list of lists of point patterns") nams <- names(X) if(is.null(nams)) nams <- paste("group", seq_along(X)) pp <- list() group <- NULL for (i in seq_along(X)) { pp <- c(pp, X[[i]]) group <- c(group, rep(nams[i], length(X[[i]]))) } group <- as.factor(group) data <- hyperframe(pp = pp, group = group) ppname <- "pp" } framename <- short.deparse(substitute(X)) fooname <- short.deparse(substitute(summaryfunction)) #' sorting out the patterns that contain too few points OK <- sapply(data$pp, npoints) >= minpoints if((nbad <- sum(!OK)) > 0) warning(paste(nbad, "patterns have been discarded", "because they contained fewer than", minpoints, "points"), call.=FALSE) data <- data[OK, ,drop=FALSE] pp <- data$pp #' ---- the groups, #' or what remains after discarding the poor patterns with few points ----- #' check if at least two observations in each group groupi <- as.integer(data$group) ngroups <- max(groupi) if(ngroups < 2) stop(paste("Sorry, after discarding patterns with fewer than", minpoints, "points,", if(ngroups < 1) "nothing" else "only one group", "is left over.", "\n- nothing to compare, take a break!"), call.=FALSE) lev <- 1:ngroups m <- as.vector(table(groupi)) if (any(m < 3)) stop(paste("Data groups need to contain at least two patterns;", "\nafter discarding those with fewer than", minpoints, "points, the remaining group sizes are", commasep(m)), call.=FALSE) #' check if number of possible outcomes is small #' WAS: npossible <- factorial(sum(m))/prod(factorial(m))/prod(factorial(table(m))) lognpossible <- lgamma(sum(m)+1)-sum(lgamma(m+1))-sum(lgamma(table(m)+1)) if (lognpossible < log(max(100, nperm))) warning("Don't expect exact results - group sizes are too small") #' --------- real preliminaries now ------ #' get interval for arguments if(!is.null(r)){ rinterval <- range(r) rsteps <- length(r) } else if (is.null(rinterval)) { foochar <- substr(fooname, 1, 1) if (foochar %in% c("p", "L")) foochar <- "K" if (fooname %in% c("Kscaled", "Lscaled")) foochar <- "Kscaled" rinterval <- c(0, min(with(data, rmax.rule(foochar, Window(pp), intensity(pp))))) } ranger <- diff(range(rinterval)) #' r sequence needs to start at 0 for Kest and such rr <- r %orifnull% seq(0, rinterval[2], length.out = rsteps + 1) taker <- rr >= rinterval[1] & rr <= rinterval[2] # used for testing #' now estimate the summary function, finally... #' TO DO!!!! Match function call of summary function with data columns! #' use arguments.in.data, if applicable. This is for inhomogeneous summary #' functions #' Force all calls to summaryfunction to use the same edge correction, #' rather than allowing correction to depend on npoints needcorx <- "correction" %in% names(formals(summaryfunction)) gavecorx <- "correction" %in% names(list(...)) corx <- if(needcorx && !gavecorx) "best" else NULL #' --------- retrieve arguments for summary function from data, hvis det er fvlist <- if(arguments.in.data) { #' use arguments in hyperframe 'data' as well as explicit arguments if(is.null(corx)) { multicall(summaryfunction, pp, data, r = rr, ...) } else { multicall(summaryfunction, pp, data, r = rr, ..., correction=corx) } } else { #' use explicit arguments only if(is.null(corx)) { with(data, summaryfunction(pp, r = rr, ...)) } else { with(data, summaryfunction(pp, r = rr, ..., correction=corx)) } } fvtemplate <- fvlist[[1]] valu <- attr(fvtemplate, "valu") argu <- attr(fvtemplate, "argu") foar <- sapply(lapply(fvlist, "[[", valu), "[", taker) #' --------- the real stuff -------------- #' function that calculates the discrepancy #' slow version combs <- combn(lev, 2) #' --------- now do the real real stuff :-) ------------- #' generate "simulated values" from random permutations. #' possible improvement for future work: #' If the number of all permutations (combis) is small, #' first generate all permutations and then #' sample from them to improve precision predigested <- list(lev=lev, foar=foar, m=m, combs=combs, rrr=rr[taker], ranger=ranger) if(use.Tbar) { Tobs <- Tbarstat(groupi, predigested) Tsim <- replicate(nperm, Tbarstat(sample(groupi), predigested)) } else { Tobs <- Tstat(groupi, predigested) Tsim <- replicate(nperm, Tstat(sample(groupi), predigested)) } names(Tobs) <- if(use.Tbar) "Tbar" else "T" pval <- (1 + sum(Tobs < Tsim))/(1 + nperm) #' ----- making a test object ----- method <- c("Studentized permutation test for grouped point patterns", if(is.hyperframe(X)) pasteFormula(formula) else NULL, choptext(ngroups, "groups:", paste(levels(data$group), collapse=", ")), choptext("summary function:", paste0(fooname, ","), "evaluated on r in", prange(rinterval)), choptext("test statistic:", if(use.Tbar) "Tbar," else "T,", nperm, "random permutations")) fooshort <- switch(fooname, pcf = "pair correlation ", Kinhom = "inhomogeneous K-", Linhom = "inhomogeneous L-", Kscaled = "locally scaled K-", Lscaled = "locally scaled L-", paste(substr(fooname, 1, 1),"-",sep="")) alternative <- c(paste("not the same ",fooshort,"function", sep="")) testerg <- list(statistic = Tobs, p.value = pval, alternative = alternative, method = method, data.name = framename) class(testerg) <- c("studpermutest", "htest") #' Add things for plotting #' prepare the fvlist, so that it only contains the estimates used, fvs <- lapply(fvlist, "[.fv", j=c(argu, valu)) #' with rinterval as alim fvs <- lapply(fvs, "attr<-", which="alim", value=rinterval) testerg$curves <- hyperframe(fvs = fvs, groups = data$group) fvtheo <- fvlist[[1]] fvnames(fvtheo, ".y") <- "theo" attr(fvtheo, "alim") <- rinterval testerg$curvtheo <- fvtheo[ , c(argu, "theo")] #' group means grmn <- lapply(lev, splitmean, ind=groupi, f=foar) testerg$groupmeans <- lapply(grmn, makefv, xvals=rr[taker], template=fvtheo) return(testerg) } splitmean <- function(l, ind, f) { apply(f[ , ind == l], 1, mean) } splitvarn <- function(l, ind, f, m) { apply(f[ , ind == l], 1, var) / m[l] } studentstat <- function(i, grmean, grvar) { (grmean[, i[1]] - grmean[, i[2]])^2 / (grvar[i[1],] + grvar[i[2], ]) } Tstat <- function (ind = groupi, predigested) { #' predigested should be a list with entries lev, foar, m, combs, rrr with(predigested, { grmean <- sapply(lev, splitmean, ind=ind, f=foar) grvar <- t(sapply(lev, splitvarn, ind=ind, f=foar, m=m)) y <- apply(combs, 2, studentstat, grmean=grmean, grvar=grvar) sum(apply(y, 2, trapint, x = rrr)) }) } intstudent <- function(i, rrr, grmean, meangrvar) { trapint(rrr, (grmean[, i[1]] - grmean[, i[2]])^2 / (meangrvar[i[1]] + meangrvar[i[2]])) } Tbarstat <- function (ind = groupi, predigested) { #' predigested should be a list #' with entries lev, foar, m, combs, rrr, ranger with(predigested, { grmean <- sapply(lev, splitmean, ind=ind, f=foar) grvar <- t(sapply(lev, splitvarn, ind=ind, f=foar, m=m)) meangrvar <- apply(grvar, 1, trapint, x = rrr)/ranger sum(apply(combs, 2, intstudent, rrr=rrr, grmean=grmean, meangrvar=meangrvar)) #' trapint(rr[taker], grvar[i[1],] + grvar[i[2], ])))) }) } makefv <- function(yvals, xvals, template) { fdf <- data.frame(r = xvals, y = yvals) argu <- fvnames(template, ".x") valu <- fvnames(template, ".y") names(fdf) <- c(argu,valu) fv(fdf, argu = argu, ylab = attr(template, "ylab"), valu = valu, fmla = attr(template,"fmla"), alim = attr(template, "alim")) } #' Trapezoidal rule approximation to integral #' ------- Trapezregel, mit Behandlung von NAns: #' die werden einfach ignoriert ---- trapint <- function(x, y) { nonan <- !is.na(y) nn <- sum(nonan) if(nn < 2L) return(0) Y <- y[nonan] X <- x[nonan] 0.5 * sum( (Y[-1] + Y[-nn]) * diff(X)) } #' call foo(x, further arguments) repeatedly #' further arguments are taken from hyperframe H and ... multicall <- function(foo, x, H, ...){ stopifnot(is.hyperframe(H)) if (is.hyperframe(x)) { x <- as.list(x)[[1]] } else if(!is.list(x)) stop("in multicall: x should be a hyperframe or list", call.=FALSE) #' check if same length nrows <- dim(H)[1] if (length(x) != nrows) stop(paste("in multicall: x and H need to have", "the same number of rows or list elements"), call.=FALSE) dotargs <- list(...) hnames <- names(H) argnames <- names(formals(foo))#' always assume first argument is given ppname <- argnames[1] argnames <- argnames[-1] dotmatch <- pmatch(names(dotargs), argnames) dotmatched <- dotmatch[!is.na(dotmatch)] dotuseargs <- dotargs[!is.na(dotmatch)] restargs <- if(length(dotmatched) >0) argnames[-dotmatched] else argnames hmatch <- pmatch(hnames, restargs) huse <- !is.na(hmatch) lapply(seq_len(nrows), function (i) do.call(foo, c(list(x[[i]]), as.list(H[i, huse, drop=TRUE, strip=FALSE]), dotargs))) } studpermu.test }) #' ------------------- plot studpermutest --------------------------------------- # #' plot.studpermutest #' plot the functions that were used in studperm.test #' also plot group means, if requested # #' x a studpermtest object, the test result #' fmla a plot formula as in plot.fv, should be generic, using "." for values #' ... further plot parameters #' col, lty, lwd parameter (vectors) for plotting the individual summary functions, #' according to group, if vectors #' col.theo, lty.theo, lwd.theo if not all are NULL, the "theo" curve is also plotted #' lwd.mean a multiplyer for the line width of the group means. #' if NULL, group means are not plotted, defaults to NULL #' lty.mean, col.mean selbsterklaerend #' separately generate a separate plot for each group (then no legends are plotted) #' meanonly do not plot individual summary functions #' legend if TRUE, and plots are not separate, plot a legend #' legendpos ... #' lbox if TRUE, draw box around legend. Defaults to FALSE #' add ... plot.studpermutest <- local({ plot.studpermutest <- function(x, fmla, ..., lty = NULL, col = NULL, lwd = NULL, lty.theo = NULL, col.theo = NULL, lwd.theo = NULL, lwd.mean = if(meanonly) 1 else NULL, lty.mean = lty, col.mean = col, separately = FALSE, meanonly = FALSE, main = if(meanonly) "group means" else NULL, xlim = NULL, ylim = NULL, ylab = NULL, legend = !add, legendpos = "topleft", lbox=FALSE, add = FALSE) { stopifnot(inherits(x, "studpermutest")) env.user <- parent.frame() curvlists <- split(x$curves, x$curves$groups) ngroups <- length(curvlists) gnames <- names(curvlists) #' check if theoretical functions shall be plottet plottheo <- !(is.null(lty.theo) & is.null(col.theo) & is.null(lwd.theo)) #' prepare plot parameters for groups if (is.null(lty)) lty <- 1:ngroups if (is.null(col)) col <- 1:ngroups if (is.null(lwd)) lwd <- par("lwd") if (is.null(col.mean)) col.mean <- col if (is.null(lty.mean)) lty.mean <- lty lty <- rep(lty, length.out = ngroups) col <- rep(col, length.out = ngroups) lwd <- rep(lwd, length.out = ngroups) col.mean <- rep(col.mean, length.out = ngroups) lty.mean <- rep(lty.mean, length.out = ngroups) if (plottheo){ if (is.null(lty.theo)) lty.theo <- ngroups + 1#par("lty") if (is.null(col.theo)) col.theo <- ngroups + 1 #par("col") if (is.null(lwd.theo)) lwd.theo <- par("lwd") } #' transporting the formula in ... unfortunately does not work #' for the axis labels, because the fvs contain only one variable. #' Have to knit them self if (is.null(ylab)) { if (!missing(fmla)) { #' puha. det bliver noget lappevaerk. fmla <- as.formula(fmla, env=env.user) map <- fvlabelmap(x$curvtheo) lhs <- lhs.of.formula(as.formula(fmla)) ylab <- eval(substitute(substitute(le, mp), list(le = lhs, mp = map))) } else ylab <- attr(x$curvtheo, "yexp") } if (missing(fmla)) fmla <- attr(x$curvtheo, "fmla") if(!is.null(lwd.mean)) lwd.Mean <- lwd.mean*lwd if(separately) { for (i in seq_along(gnames)) { if(!meanonly) plot.fvlist(curvlists[[i]]$fvs, fmla, ..., col = col[i], lwd = lwd[i], lty= lty[i], xlim = xlim, ylim = ylim, ylab = ylab, main = gnames[i]) if (!is.null(lwd.mean)) plot(x$groupmeans[[i]], fmla, ..., col = col.mean[i], lwd = lwd.Mean[i], lty = lty.mean[i], main = gnames[i], add = !meanonly, ylim = ylim) if (plottheo) plot(x$curvtheo, fmla, ..., add = TRUE, col = col.theo, lwd = lwd.theo, lty = lty.theo) } } else { #' ---- TODO SIMPLIFY! they should all have the same x-range, #' just check y-range ---- lims <- if (meanonly) { plot.fvlist(x$groupmeans, fmla,..., limitsonly = TRUE) } else { as.data.frame(apply(sapply(curvlists, function(C) plot.fvlist(C$fvs, fmla,..., limitsonly = TRUE)), 1, range)) } if(is.null(xlim)) xlim <- lims$xlim if(is.null(ylim)) ylim <- lims$ylim iadd <- add for (i in seq_along(gnames)) { if(!meanonly) plot.fvlist(curvlists[[i]]$fvs, fmla, ..., col = col[i], lwd = lwd[i], lty= lty[i], xlim = xlim, ylim = ylim, ylab= ylab, main = main, add = iadd) iadd <- iadd | !meanonly if (!is.null(lwd.mean)) plot(x$groupmeans[[i]], fmla, ..., col = col.mean[i], lwd = lwd.Mean[i], lty = lty.mean[i], add = iadd, xlim = xlim, ylim = ylim, ylab= ylab, main=main) if (plottheo) plot(x$curvtheo, fmla, ..., add = TRUE, col = col.theo, lwd = lwd.theo, lty = lty.theo, xlim = xlim, ylim = ylim, ylab= ylab, main=main) iadd <- TRUE } if(legend) { if(meanonly) { lwd <- lwd.Mean col <- col.mean lty <- lty.mean } if(plottheo){ gnames <- c(gnames, "Poisson mean") col <- c(col, col.theo) lty <- c(lty, lty.theo) lwd <- c(lwd, lwd.theo) } legend(legendpos, gnames, col = col, lty = lty, lwd = lwd, bty=ifelse(lbox, "o", "n")) } } return(invisible(NULL)) } #' ------------------ Helper function---------------- #' flist: list of fv, with plot method plot.fvlist <- function(x, fmla, ..., xlim=NULL, ylim=NULL, add = FALSE, limitsonly = FALSE, main=NULL){ #' no safety precautions if (missing(fmla)) fmla <- attr(x[[1]], "fmla") if (!add | limitsonly) { lims <- sapply(x, plot, fmla, ..., limitsonly = TRUE) if(is.null(xlim)) xlim = range(unlist(lims[1,])) if(is.null(ylim)) ylim = range(unlist(lims[2,])) lims=list(xlim=xlim, ylim=ylim) if(limitsonly) return(lims) plot(x[[1]], fmla, ..., xlim = xlim, ylim = ylim, main = main) } else plot(x[[1]], fmla,..., add=T) for (foo in x[-1]) plot(foo, fmla, ..., add=T) } plot.studpermutest }) spatstat/R/covering.R0000644000176200001440000000212613602545263014264 0ustar liggesusers#' #' covering.R #' #' $Revision: 1.4 $ $Date: 2019/12/31 03:57:05 $ #' covering <- function(W, r, ..., giveup=1000) { W <- as.owin(W) ## compute distance to boundary D <- distmap(W, invert=TRUE, ...) D <- D[W, drop=FALSE] M <- as.owin(D) pixstep <- max(M$xstep, M$ystep) ## very small distances if(r <= pixstep) { warning("r is smaller than the pixel resolution: returning pixel centres", call.=FALSE) xy <- rasterxy.mask(M, drop=TRUE) return(ppp(xy$x, xy$y, window=W, check=FALSE)) } ## find the point of W farthest from the boundary X <- where.max(D) ## build a hexagonal grid through this point ruse <- if(is.convex(W)) r else (r * 2/3) ruse <- max(pixstep, ruse - pixstep) H <- hexgrid(W, ruse, offset=c(X$x, X$y), origin=c(0,0)) if(npoints(H) == 0) H <- X ## this may not suffice if W is irregular for(i in 1:giveup) { DH <- distmap(H) if(max(DH) < ruse && npoints(H) > 0) return(H) Hnew <- where.max(DH) H <- superimpose(H, Hnew, W=W) } stop(paste("Failed to converge after adding", giveup, "points"), call.=FALSE) } spatstat/R/rat.R0000644000176200001440000001262213606002167013233 0ustar liggesusers# # rat.R # # Ratio objects # # Numerator and denominator are stored as attributes # # $Revision: 1.12 $ $Date: 2020/01/10 03:10:03 $ # rat <- function(ratio, numerator, denominator, check=TRUE) { if(check) { stopifnot(compatible(numerator, denominator)) stopifnot(compatible(ratio, denominator)) } attr(ratio, "numerator") <- numerator attr(ratio, "denominator") <- denominator class(ratio) <- unique(c("rat", class(ratio))) return(ratio) } print.rat <- function(x, ...) { NextMethod("print") cat("[Contains ratio information]\n") return(invisible(NULL)) } compatible.rat <- function(A, B, ...) { NextMethod("compatible") } pool.rat <- local({ Add <- function(A,B){ force(A); force(B); eval.fv(A+B, relabel=FALSE) } Square <- function(A) { force(A); eval.fv(A^2, relabel=FALSE) } Mul <- function(A,B){ force(A); force(B); eval.fv(A*B, relabel=FALSE) } pool.rat <- function(..., weights=NULL, relabel=TRUE, variance=TRUE) { argh <- list(...) n <- narg <- length(argh) if(narg == 0) return(NULL) if(narg == 1) return(argh[[1]]) ## israt <- unlist(lapply(argh, inherits, what="rat")) if(any(bad <- !israt)) { nbad <- sum(bad) stop(paste(ngettext(nbad, "Argument", "Arguments"), commasep(which(bad)), ngettext(nbad, "does not", "do not"), "contain ratio (numerator/denominator) information")) } isfv <- unlist(lapply(argh, is.fv)) if(!all(isfv)) stop("All arguments must be fv objects") ## extract template <- vanilla.fv(argh[[1]]) Y <- lapply(argh, attr, which="numerator") X <- lapply(argh, attr, which="denominator") X <- do.call(harmonise, X) Y <- do.call(harmonise, Y) templateX <- vanilla.fv(X[[1]]) templateY <- vanilla.fv(Y[[1]]) ## compute products if(!is.null(weights)) { check.nvector(weights, narg, things="Functions") X <- Map(Mul, X, weights) Y <- Map(Mul, Y, weights) } ## sum sumX <- Reduce(Add, X) sumY <- Reduce(Add, Y) attributes(sumX) <- attributes(templateX) attributes(sumY) <- attributes(templateY) ## ratio-of-sums Ratio <- eval.fv(sumY/sumX, relabel=FALSE) attributes(Ratio) <- attributes(template) ## variance calculation if(variance) { meanX <- eval.fv(sumX/n, relabel=FALSE) meanY <- eval.fv(sumY/n, relabel=FALSE) sumX2 <- Reduce(Add, lapply(X, Square)) sumY2 <- Reduce(Add, lapply(Y, Square)) varX <- eval.fv((sumX2 - n * meanX^2)/(n-1), relabel=FALSE) varY <- eval.fv((sumY2 - n * meanY^2)/(n-1), relabel=FALSE) XY <- Map(Mul, X, Y) sumXY <- Reduce(Add, XY) covXY <- eval.fv((sumXY - n * meanX * meanY)/(n-1), relabel=FALSE) ## variance by delta method relvar <- eval.fv(pmax.int(0, varY/meanY^2 + varX/meanX^2 - 2 * covXY/(meanX * meanY)), relabel=FALSE) Variance <- eval.fv(Ratio^2 * relvar/n, relabel=FALSE) attributes(Variance) <- attributes(template) ## two sigma CI hiCI <- eval.fv(Ratio + 2 * sqrt(Variance), relabel=FALSE) loCI <- eval.fv(Ratio - 2 * sqrt(Variance), relabel=FALSE) attributes(hiCI) <- attributes(loCI) <- attributes(template) } ## dress up if(relabel) { Ratio <- prefixfv(Ratio, tagprefix="pool", descprefix="pooled ", lablprefix="") if(variance) { Variance <- prefixfv(Variance, tagprefix="var", descprefix="delta-method variance estimate of ", lablprefix="bold(var)~") hiCI <- prefixfv(hiCI, tagprefix="hi", descprefix="upper limit of two-sigma CI based on ", lablprefix="bold(hi)~") loCI <- prefixfv(loCI, tagprefix="lo", descprefix="lower limit of two-sigma CI based on ", lablprefix="bold(lo)~") } } result <- if(!variance) Ratio else Reduce(bind.fv, list(Ratio, Variance, hiCI, loCI)) return(result) } pool.rat }) adjust.ratfv <- function(f, columns=fvnames(f, "*"), numfactor=1, denfactor=1) { stopifnot(is.fv(f)) f[,columns] <- (numfactor/denfactor) * as.data.frame(f)[,columns] if(numfactor != 1 && !is.null(num <- attr(f, "numerator"))) { num[,columns] <- numfactor * as.data.frame(num)[,columns] attr(f, "numerator") <- num } if(denfactor != 1 && !is.null(den <- attr(f, "denominator"))) { den[,columns] <- denfactor * as.data.frame(den)[,columns] attr(f, "denominator") <- den } return(f) } tweak.ratfv.entry <- function(x, ...) { # apply same tweak to function, numerator and denominator. x <- tweak.fv.entry(x, ...) if(!is.null(num <- attr(x, "numerator"))) attr(x, "numerator") <- tweak.fv.entry(num, ...) if(!is.null(den <- attr(x, "denominator"))) attr(x, "denominator") <- tweak.fv.entry(den, ...) return(x) } "[.rat" <- function(x, ...) { if(!is.fv(x)) stop("Not yet implemented for non-fv ratios") num <- attr(x, "numerator") den <- attr(x, "denominator") class(x) <- "fv" x <- x[...] den <- den[...] num <- num[...] attr(x, "numerator") <- num attr(x, "denominator") <- den class(x) <- unique(c("rat", class(x))) return(x) } spatstat/R/solist.R0000644000176200001440000001407213536320135013763 0ustar liggesusers## ## solist.R ## ## Methods for class `solist' (spatial object list) ## ## and related classes 'anylist', 'ppplist', 'imlist' ## ## plot.solist is defined in plot.solist.R ## ## $Revision: 1.20 $ $Date: 2019/09/12 01:30:50 $ anylist <- function(...) { x <- list(...) class(x) <- c("anylist", "listof", class(x)) return(x) } print.anylist <- function (x, ...) { ll <- length(x) if(ll == 0) { splat("(Zero length list)") return(invisible(NULL)) } nn <- names(x) if (length(nn) != ll) nn <- paste("Component", seq.int(ll)) spaceok <- waxlyrical('space') for (i in seq_len(ll)) { splat(paste0(nn[i], ":")) print(x[[i]], ...) if(spaceok && i < ll) cat("\n") } return(invisible(NULL)) } as.anylist <- function(x) { if(inherits(x, "anylist")) return(x) if(!is.list(x)) x <- list(x) class(x) <- c("anylist", "listof", class(x)) return(x) } "[.anylist" <- function(x, i, ...) { cl <- oldClass(x) ## invoke list method y <- NextMethod("[") if(length(y) == 0) return(list()) class(y) <- cl return(y) } "[<-.anylist" <- function(x, i, value) { as.anylist(NextMethod("[<-")) } summary.anylist <- function(object, ...) { as.anylist(lapply(object, summary, ...)) } pool.anylist <- function(x, ...) { do.call(pool, append(x, list(...))) } ## .................... solist ............................. is.sob <- local({ ## test whether x is a spatial object suitable for solist sobjectclasses <- c("ppp", "psp", "im", "owin", "quad", "tess", "msr", "quadratcount", "quadrattest", "layered", "funxy", "distfun", "nnfun", "lpp", "linnet", "linfun", "lintess", "influence.ppm", "leverage.ppm") # Note 'linim' inherits 'im' # 'dfbetas.ppm' inherits 'msr' is.sob <- function(x) { inherits(x, what=sobjectclasses) } is.sob }) solist <- function(..., check=TRUE, promote=TRUE, demote=FALSE, .NameBase) { stuff <- list(...) if(length(stuff) && !missing(.NameBase) && !any(nzchar(names(stuff)))) names(stuff) <- paste(.NameBase, seq_along(stuff)) if((check || demote) && !all(sapply(stuff, is.sob))) { if(demote) return(as.anylist(stuff)) stop("Some arguments of solist() are not 2D spatial objects") } class(stuff) <- c("solist", "anylist", "listof", class(stuff)) if(promote) { if(all(unlist(lapply(stuff, is.ppp)))) { class(stuff) <- c("ppplist", class(stuff)) } else if(all(unlist(lapply(stuff, is.im)))) { class(stuff) <- c("imlist", class(stuff)) } } return(stuff) } as.solist <- function(x, ...) { if(inherits(x, "solist") && length(list(...)) == 0) { #' wipe superfluous info if(inherits(x, "ppplist")) attributes(x)[c("fsplit", "fgroup")] <- NULL class(x) <- c("solist", "anylist", "listof") return(x) } #' needs to be enclosed in list() ? if(!is.list(x) || (is.sob(x) && !inherits(x, "layered"))) x <- list(x) return(do.call(solist, append(x, list(...)))) } is.solist <- function(x) inherits(x, "solist") print.solist <- function (x, ...) { what <- if(inherits(x, "ppplist")) "point patterns" else if(inherits(x, "imlist")) "pixel images" else "spatial objects" splat(paste("List of", what)) parbreak() NextMethod("print") } "[.solist" <- function(x, i, ...) { cl <- oldClass(x) if(!missing(i) && is.owin(i)) { ## spatial subset y <- lapply(unclass(x), "[", i=i, ...) } else { ## invoke list method y <- NextMethod("[") } if(length(y) == 0) return(list()) class(y) <- cl return(y) } "[<-.solist" <- function(x, i, value) { ## invoke list method y <- NextMethod("[<-") ## check again return(do.call(solist, y)) } summary.solist <- function(object, ...) { x <- lapply(object, summary, ...) attr(x, "otype") <- if(inherits(object, "ppplist")) "ppp" else if(inherits(object, "imlist")) "im" else "" class(x) <- c("summary.solist", "anylist") x } print.summary.solist <- function(x, ...) { what <- switch(attr(x, "otype"), ppp="point patterns", im="pixel images", "spatial objects") splat("Summary of", length(x), what) parbreak() NextMethod("print") } as.layered.solist <- function(X) { layered(LayerList=X) } #' ----- ppplist and imlist ---------------------------- #' for efficiency only as.ppplist <- function(x, check=TRUE) { if(check) { x <- as.solist(x, promote=TRUE, check=TRUE) if(!inherits(x, "ppplist")) stop("some entries are not point patterns") } class(x) <- unique(c("ppplist", "solist", "anylist", "listof", class(x))) return(x) } is.ppplist <- function(x) inherits(x, "ppplist") as.imlist <- function(x, check=TRUE) { if(check) { x <- as.solist(x, promote=TRUE, check=TRUE) if(!inherits(x, "imlist")) stop("some entries are not images") } class(x) <- unique(c("imlist", "solist", "anylist", "listof", class(x))) return(x) } is.imlist <- function(x) inherits(x, "imlist") # --------------- counterparts of 'lapply' -------------------- anylapply <- function(X, FUN, ...) { v <- lapply(X, FUN, ...) return(as.anylist(v)) } solapply <- function(X, FUN, ..., check=TRUE, promote=TRUE, demote=FALSE) { v <- lapply(X, FUN, ...) u <- as.solist(v, check=check, promote=promote, demote=demote) return(u) } density.ppplist <- function(x, ..., se=FALSE) { y <- lapply(x, density, ..., se=se) if(!se) return(as.solist(y, demote=TRUE)) y.est <- lapply(y, getElement, name="estimate") y.se <- lapply(y, getElement, name="SE") z <- list(estimate = as.solist(y.est, demote=TRUE), SE = as.solist(y.se, demote=TRUE)) return(z) } expandSpecialLists <- function(x, special="solist") { ## x is a list which may include entries which are lists, of class 'special' ## unlist these entries only hit <- sapply(x, inherits, what=special) if(!any(hit)) return(x) # wrap each *non*-special entry in list() x[!hit] <- lapply(x[!hit], list) # now strip one layer of list() from all entries return(unlist(x, recursive=FALSE)) } spatstat/R/pp3.R0000644000176200001440000001777413421754003013162 0ustar liggesusers# # pp3.R # # class of three-dimensional point patterns in rectangular boxes # # $Revision: 1.30 $ $Date: 2019/01/23 02:41:43 $ # box3 <- function(xrange=c(0,1), yrange=xrange, zrange=yrange, unitname=NULL) { stopifnot(is.numeric(xrange) && length(xrange) == 2 && diff(xrange) > 0) stopifnot(is.numeric(yrange) && length(yrange) == 2 && diff(yrange) > 0) stopifnot(is.numeric(zrange) && length(zrange) == 2 && diff(zrange) > 0) out <- list(xrange=xrange, yrange=yrange, zrange=zrange, units=as.unitname(unitname)) class(out) <- "box3" return(out) } as.box3 <- function(...) { a <- list(...) n <- length(a) if(n == 0) stop("No arguments given") if(n == 1) { a <- a[[1]] if(inherits(a, "box3")) return(a) if(inherits(a, "pp3")) return(a$domain) if(inherits(a, "boxx")){ if(ncol(a$ranges)==3) return(box3(a$ranges[,1], a$ranges[,2], a$ranges[,3])) stop("Supplied boxx object does not have dimension three") } if(inherits(a, "ppx")) return(as.box3(a$domain)) if(is.numeric(a)) { if(length(a) == 6) return(box3(a[1:2], a[3:4], a[5:6])) stop(paste("Don't know how to interpret", length(a), "numbers as a box")) } if(!is.list(a)) stop("Don't know how to interpret data as a box") } return(do.call(box3, a)) } print.box3 <- function(x, ...) { bracket <- function(z) paste("[", paste(signif(z, 5), collapse=", "), "]", sep="") v <- paste(unlist(lapply(x[1:3], bracket)), collapse=" x ") s <- summary(unitname(x)) splat("Box:", v, s$plural, s$explain) invisible(NULL) } unitname.box3 <- function(x) { as.unitname(x$units) } "unitname<-.box3" <- function(x, value) { x$units <- as.unitname(value) return(x) } grow.box3 <- function(W, left, right=left) { as.box3(grow.boxx(as.boxx(W), left, right)) } eroded.volumes <- function(x, r) { UseMethod("eroded.volumes") } eroded.volumes.box3 <- function(x, r) { b <- as.box3(x) ax <- pmax.int(0, diff(b$xrange) - 2 * r) ay <- pmax.int(0, diff(b$yrange) - 2 * r) az <- pmax.int(0, diff(b$zrange) - 2 * r) ax * ay * az } shortside <- function(x) { UseMethod("shortside") } shortside.box3 <- function(x) { min(sidelengths(x)) } sidelengths <- function(x) { UseMethod("sidelengths") } sidelengths.box3 <- function(x) { with(x, c(diff(xrange), diff(yrange), diff(zrange))) } bounding.box3 <- function(...) { wins <- list(...) boxes <- lapply(wins, as.box3) xr <- range(unlist(lapply(boxes, getElement, name="xrange"))) yr <- range(unlist(lapply(boxes, getElement, name="yrange"))) zr <- range(unlist(lapply(boxes, getElement, name="zrange"))) box3(xr, yr, zr) } pp3 <- function(x, y, z, ..., marks=NULL) { stopifnot(is.numeric(x)) stopifnot(is.numeric(y)) stopifnot(is.numeric(z)) b <- as.box3(...) out <- ppx(data=data.frame(x=x,y=y,z=z), domain=b) class(out) <- c("pp3", class(out)) if(!is.null(marks)) marks(out) <- marks return(out) } domain.pp3 <- function(X, ...) { X$domain } is.pp3 <- function(x) { inherits(x, "pp3") } npoints.pp3 <- function(x) { nrow(x$data) } print.pp3 <- function(x, ...) { ism <- is.marked(x, dfok=TRUE) nx <- npoints(x) splat(if(ism) "Marked three-dimensional" else "Three-dimensional", "point pattern:", nx, ngettext(nx, "point", "points")) if(ism) { mks <- marks(x, dfok=TRUE) if(is.data.frame(mks) | is.hyperframe(mks)) { ## data frame of marks exhibitStringList("Mark variables:", names(mks)) } else { ## vector of marks if(is.factor(mks)) { exhibitStringList("Multitype, with levels =", levels(mks)) } else { ## Numeric, or could be dates if(inherits(mks, "Date")) { splat("marks are dates, of class", sQuote("Date")) } else if(inherits(mks, "POSIXt")) { splat("marks are dates, of class", sQuote("POSIXt")) } else { splat(paste0("marks are", if(is.numeric(mks)) " numeric," else NULL), "of storage type ", sQuote(typeof(mks))) } } } } print(x$domain) invisible(NULL) } summary.pp3 <- function(object, ...) { sd <- summary(object$data) np <- sd$ncases dom <- object$domain v <- volume.box3(dom) u <- summary(unitname(dom)) intens <- np/v out <- list(np=np, sumdat=sd, dom=dom, v=v, u=u, intensity=intens) class(out) <- "summary.pp3" return(out) } print.summary.pp3 <- function(x, ...) { splat("Three-dimensional point pattern") splat(x$np, ngettext(x$np, "point", "points")) print(x$dom) u <- x$u v <- x$v splat("Volume", v, "cubic", if(v == 1) u$singular else u$plural, u$explain) splat("Average intensity", x$intensity, "points per cubic", u$singular, u$explain) invisible(NULL) } plot.pp3 <- function(x, ..., eye=NULL, org=NULL, theta=25, phi=15, type=c("p", "n", "h"), box.back=list(col="pink"), box.front=list(col="blue", lwd=2)) { xname <- short.deparse(substitute(x)) type <- match.arg(type) # given arguments argh <- list(...) if(!missing(box.front)) argh$box.front <- box.front if(!missing(box.back)) argh$box.back <- box.back # Now apply formal defaults above formaldefaults <- list(box.front=box.front, box.back=box.back) #' coo <- as.matrix(coords(x)) xlim <- x$domain$xrange ylim <- x$domain$yrange zlim <- x$domain$zrange if(is.null(org)) org <- c(mean(xlim), mean(ylim), mean(zlim)) if(is.null(eye)) { theta <- theta * pi/180 phi <- phi * pi/180 d <- 2 * diameter(x$domain) eye <- org + d * c(cos(phi) * c(sin(theta), -cos(theta)), sin(phi)) } deefolts <- spatstat.options('par.pp3') ## determine default eye position and centre of view do.call(plot3Dpoints, resolve.defaults(list(xyz=coo, eye=eye, org=org, type=type), argh, deefolts, formaldefaults, list(main=xname, xlim=xlim, ylim=ylim, zlim=zlim))) } "[.pp3" <- function(x, i, drop=FALSE, ...) { answer <- NextMethod("[") if(is.ppx(answer)) class(answer) <- c("pp3", class(answer)) return(answer) } unitname.pp3 <- function(x) { unitname(x$domain) } "unitname<-.pp3" <- function(x, value) { d <- x$domain unitname(d) <- value x$domain <- d return(x) } diameter.box3 <- function(x) { stopifnot(inherits(x, "box3")) with(x, sqrt(diff(xrange)^2+diff(yrange)^2+diff(zrange)^2)) } volume <- function(x) { UseMethod("volume") } volume.box3 <- function(x) { stopifnot(inherits(x, "box3")) with(x, prod(diff(xrange), diff(yrange), diff(zrange))) } runifpoint3 <- function(n, domain=box3(), nsim=1, drop=TRUE) { domain <- as.box3(domain) result <- vector(mode="list", length=nsim) dd <- as.list(domain)[c("xrange", "yrange", "zrange")] for(i in 1:nsim) { x <- with(dd, runif(n, min=xrange[1], max=xrange[2])) y <- with(dd, runif(n, min=yrange[1], max=yrange[2])) z <- with(dd, runif(n, min=zrange[1], max=zrange[2])) result[[i]] <- pp3(x,y,z,domain) } if(drop && nsim == 1) return(result[[1]]) result <- as.anylist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } rpoispp3 <- function(lambda, domain=box3(), nsim=1, drop=TRUE) { domain <- as.box3(domain) v <- volume(domain) if(!(is.numeric(lambda) && length(lambda) == 1)) stop("lambda must be a single numeric value") np <- rpois(nsim, lambda * v) dd <- as.list(domain)[c("xrange", "yrange", "zrange")] result <- vector(mode="list", length=nsim) for(i in 1:nsim) { ni <- np[i] x <- with(dd, runif(ni, min=xrange[1], max=xrange[2])) y <- with(dd, runif(ni, min=yrange[1], max=yrange[2])) z <- with(dd, runif(ni, min=zrange[1], max=zrange[2])) result[[i]] <- pp3(x,y,z,domain) } if(drop && nsim == 1) return(result[[1]]) result <- as.anylist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } spatstat/R/rhohat.R0000644000176200001440000006553513613547031013747 0ustar liggesusers#' #' rhohat.R #' #' $Revision: 1.85 $ $Date: 2020/01/27 10:22:38 $ #' #' Non-parametric estimation of a transformation rho(z) determining #' the intensity function lambda(u) of a point process in terms of a #' spatial covariate Z(u) through lambda(u) = rho(Z(u)). #' More generally allows offsets etc. #' Copyright (c) Adrian Baddeley 2015-2019 #' GNU Public Licence GPL >= 2.0 rhohat <- function(object, covariate, ...) { UseMethod("rhohat") } rhohat.ppp <- rhohat.quad <- function(object, covariate, ..., baseline=NULL, weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing"), subset=NULL, dimyx=NULL, eps=NULL, n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, confidence=0.95, positiveCI) { callstring <- short.deparse(sys.call()) smoother <- match.arg(smoother) method <- match.arg(method) if(missing(positiveCI)) positiveCI <- (smoother == "local") if(missing(covname)) covname <- sensiblevarname(short.deparse(substitute(covariate)), "X") if(is.null(adjust)) adjust <- 1 # validate model if(is.null(baseline)) { model <- ppm(object ~1) reference <- "Lebesgue" } else { model <- ppm(object ~ offset(log(baseline))) reference <- "baseline" } modelcall <- NULL if(is.character(covariate) && length(covariate) == 1) { covname <- covariate switch(covname, x={ covariate <- function(x,y) { x } }, y={ covariate <- function(x,y) { y } }, stop("Unrecognised covariate name") ) covunits <- unitname(data.ppm(model)) } else { covunits <- NULL } W <- Window(data.ppm(model)) if(!is.null(subset)) W <- W[subset, drop=FALSE] areaW <- area(W) rhohatEngine(model, covariate, reference, areaW, ..., subset=subset, weights=weights, method=method, horvitz=horvitz, smoother=smoother, resolution=list(dimyx=dimyx, eps=eps), n=n, bw=bw, adjust=adjust, from=from, to=to, bwref=bwref, covname=covname, covunits=covunits, confidence=confidence, positiveCI=positiveCI, modelcall=modelcall, callstring=callstring) } rhohat.ppm <- function(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing"), subset=NULL, dimyx=NULL, eps=NULL, n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, confidence=0.95, positiveCI) { callstring <- short.deparse(sys.call()) smoother <- match.arg(smoother) method <- match.arg(method) if(missing(positiveCI)) positiveCI <- (smoother == "local") if(missing(covname)) covname <- sensiblevarname(short.deparse(substitute(covariate)), "X") if(is.null(adjust)) adjust <- 1 if("baseline" %in% names(list(...))) warning("Argument 'baseline' ignored: not available for rhohat.ppm") ## validate model model <- object reference <- "model" modelcall <- model$call if(is.character(covariate) && length(covariate) == 1) { covname <- covariate switch(covname, x={ covariate <- function(x,y) { x } }, y={ covariate <- function(x,y) { y } }, stop("Unrecognised covariate name") ) covunits <- unitname(data.ppm(model)) } else { covunits <- NULL } W <- Window(data.ppm(model)) if(!is.null(subset)) W <- W[subset, drop=FALSE] areaW <- area(W) rhohatEngine(model, covariate, reference, areaW, ..., weights=weights, method=method, horvitz=horvitz, smoother=smoother, resolution=list(dimyx=dimyx, eps=eps), n=n, bw=bw, adjust=adjust, from=from, to=to, bwref=bwref, covname=covname, covunits=covunits, confidence=confidence, positiveCI=positiveCI, modelcall=modelcall, callstring=callstring) } rhohat.lpp <- rhohat.lppm <- function(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing"), subset=NULL, nd=1000, eps=NULL, random=TRUE, n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, confidence=0.95, positiveCI) { callstring <- short.deparse(sys.call()) smoother <- match.arg(smoother) method <- match.arg(method) if(missing(positiveCI)) positiveCI <- (smoother == "local") if(missing(covname)) covname <- sensiblevarname(short.deparse(substitute(covariate)), "X") if(is.null(adjust)) adjust <- 1 # validate model if(is.lpp(object)) { X <- object model <- lppm(object, ~1, eps=eps, nd=nd, random=random) reference <- "Lebesgue" modelcall <- NULL } else if(inherits(object, "lppm")) { model <- object X <- model$X reference <- "model" modelcall <- model$call } else stop("object should be of class lpp or lppm") if("baseline" %in% names(list(...))) warning("Argument 'baseline' ignored: not available for ", if(is.lpp(object)) "rhohat.lpp" else "rhohat.lppm") if(is.character(covariate) && length(covariate) == 1) { covname <- covariate switch(covname, x={ covariate <- function(x,y) { x } }, y={ covariate <- function(x,y) { y } }, stop("Unrecognised covariate name") ) covunits <- unitname(X) } else { covunits <- NULL } S <- as.psp(as.linnet(X)) if(!is.null(subset)) S <- S[subset] totlen <- sum(lengths.psp(S)) rhohatEngine(model, covariate, reference, totlen, ..., subset=subset, weights=weights, method=method, horvitz=horvitz, smoother=smoother, resolution=list(nd=nd, eps=eps, random=random), n=n, bw=bw, adjust=adjust, from=from, to=to, bwref=bwref, covname=covname, covunits=covunits, confidence=confidence, positiveCI=positiveCI, modelcall=modelcall, callstring=callstring) } rhohatEngine <- function(model, covariate, reference=c("Lebesgue", "model", "baseline"), volume, ..., subset=NULL, weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing"), resolution=list(), n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, covunits=NULL, confidence=0.95, modelcall=NULL, callstring="rhohat") { reference <- match.arg(reference) # evaluate the covariate at data points and at pixels stuff <- do.call(evalCovar, append(list(model=model, covariate=covariate, subset=subset), resolution)) # unpack values <- stuff$values # values at each data point ZX <- values$ZX lambdaX <- values$lambdaX # values at each pixel Zimage <- values$Zimage lambdaimage <- values$lambdaimage # could be multiple images # values at each pixel (for .ppp, .ppm) or quadrature point (for .lpp, .lppm) Zvalues <- values$Zvalues lambda <- values$lambda ## weights if(!is.null(weights)) { X <- data.ppm(model) if(is.im(weights)) weights <- safelookup(weights, X) else if(is.function(weights)) weights <- weights(X$x, X$y) else if(is.numeric(weights) && is.vector(as.numeric(weights))) check.nvector(weights, npoints(X)) else stop(paste(sQuote("weights"), "should be a vector, a pixel image, or a function")) } # normalising constants denom <- volume * (if(reference == "Lebesgue" || horvitz) 1 else mean(lambda)) # info savestuff <- list(reference = reference, horvitz = horvitz, Zimage = Zimage, lambdaimage = lambdaimage) # calculate rho-hat result <- rhohatCalc(ZX, Zvalues, lambda, denom, ..., weights=weights, lambdaX=lambdaX, method=method, horvitz=horvitz, smoother=smoother, n=n, bw=bw, adjust=adjust, from=from, to=to, bwref=bwref, covname=covname, confidence=confidence, covunits=covunits, modelcall=modelcall, callstring=callstring, savestuff=savestuff) return(result) } # basic calculation of rhohat from covariate values rhohatCalc <- local({ interpolate <- function(x,y) { if(inherits(x, "density") && missing(y)) approxfun(x$x, x$y, rule=2) else approxfun(x, y, rule=2) } ## note: this function normalises the weights, like density.default LocfitRaw <- function(x, ..., weights=NULL) { if(is.null(weights)) weights <- 1 requireNamespace("locfit", quietly=TRUE) do.call.matched(locfit::locfit.raw, append(list(x=x, weights=weights), list(...))) } varlog <- function(obj,xx) { ## variance of log f-hat stopifnot(inherits(obj, "locfit")) if(!identical(obj$trans, exp)) stop("internal error: locfit object does not have log link") ## the following call should have band="local" but that produces NaN's pred <- predict(obj, newdata=xx, se.fit=TRUE, what="coef") se <- pred$se.fit return(se^2) } rhohatCalc <- function(ZX, Zvalues, lambda, denom, ..., weights=NULL, lambdaX, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing"), n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, confidence=0.95, positiveCI=(smoother == "local"), markovCI=TRUE, covunits = NULL, modelcall=NULL, callstring=NULL, savestuff=list()) { method <- match.arg(method) smoother <- match.arg(smoother) ## check availability of locfit package if(smoother == "local" && !requireNamespace("locfit", quietly=TRUE)) { warning(paste("In", paste0(dQuote(callstring), ":"), "package", sQuote("locfit"), "is not available;", "unable to perform local likelihood smoothing;", "using kernel smoothing instead"), call.=FALSE) smoother <- "kernel" } ## validate stopifnot(is.numeric(ZX)) stopifnot(is.numeric(Zvalues)) stopifnot(is.numeric(lambda)) stopifnot(length(lambda) == length(Zvalues)) stopifnot(all(is.finite(lambda))) check.1.real(denom) ## if(horvitz) { ## data points will be weighted by reciprocal of model intensity weights <- (weights %orifnull% 1)/lambdaX } ## normalising constants nX <- if(is.null(weights)) length(ZX) else sum(weights) kappahat <- nX/denom ## limits Zrange <- range(ZX, Zvalues) if(is.null(from)) from <- Zrange[1] if(is.null(to)) to <- Zrange[2] if(from > Zrange[1] || to < Zrange[2]) stop(paste("In", paste0(dQuote(callstring), ":"), "interval [from, to] =", prange(c(from,to)), "does not contain the range of data values =", prange(Zrange)), call.=FALSE) ## critical constant for CI's crit <- qnorm((1+confidence)/2) percentage <- paste(round(100 * confidence), "%%", sep="") CIblurb <- paste("pointwise", percentage, "confidence interval") ## estimate densities switch(smoother, kernel = { ## ............... kernel smoothing ...................... ## reference density (normalised) for calculation ghat <- density(Zvalues,weights=if(horvitz) NULL else lambda/sum(lambda), bw=bwref,adjust=adjust,n=n,from=from,to=to, ...) xxx <- ghat$x ghatfun <- interpolate(ghat) ## relative density switch(method, ratio={ ## compute ratio of smoothed densities fhat <- unnormdensity(ZX,weights=weights, bw=bw,adjust=adjust, n=n,from=from, to=to, ...) fhatfun <- interpolate(fhat) Ghat.xxx <- denom * ghatfun(xxx) yyy <- fhatfun(xxx)/Ghat.xxx ## compute variance approximation sigma <- fhat$bw weights2 <- if(is.null(weights)) NULL else weights^2 fstar <- unnormdensity(ZX,weights=weights2, bw=bw,adjust=adjust/sqrt(2), n=n,from=from, to=to, ...) fstarfun <- interpolate(fstar) const <- 1/(2 * sigma * sqrt(pi)) vvv <- const * fstarfun(xxx)/Ghat.xxx^2 }, reweight={ ## weight Z values by reciprocal of reference wt <- (weights %orifnull% 1)/(denom * ghatfun(ZX)) rhat <- unnormdensity(ZX, weights=wt, bw=bw,adjust=adjust, n=n,from=from, to=to, ...) rhatfun <- interpolate(rhat) yyy <- rhatfun(xxx) ## compute variance approximation sigma <- rhat$bw rongstar <- unnormdensity(ZX, weights=wt^2, bw=bw,adjust=adjust/sqrt(2), n=n,from=from, to=to, ...) rongstarfun <- interpolate(rongstar) const <- 1/(2 * sigma * sqrt(pi)) vvv <- const * rongstarfun(xxx) }, transform={ ## probability integral transform Gfun <- interpolate(ghat$x, cumsum(ghat$y)/sum(ghat$y)) GZX <- Gfun(ZX) ## smooth density on [0,1] qhat <- unnormdensity(GZX,weights=weights, bw=bw,adjust=adjust, n=n, from=0, to=1, ...) qhatfun <- interpolate(qhat) ## edge effect correction one <- density(seq(from=0,to=1,length.out=512), bw=qhat$bw, adjust=1, n=n,from=0, to=1, ...) onefun <- interpolate(one) ## apply to transformed values Gxxx <- Gfun(xxx) Dxxx <- denom * onefun(Gxxx) yyy <- qhatfun(Gxxx)/Dxxx ## compute variance approximation sigma <- qhat$bw weights2 <- if(is.null(weights)) NULL else weights^2 qstar <- unnormdensity(GZX,weights=weights2, bw=bw,adjust=adjust/sqrt(2), n=n,from=0, to=1, ...) qstarfun <- interpolate(qstar) const <- 1/(2 * sigma * sqrt(pi)) vvv <- const * qstarfun(Gxxx)/Dxxx^2 }) vvvname <- "Variance of estimator" vvvlabel <- paste("bold(Var)~hat(%s)", paren(covname), sep="") sd <- sqrt(vvv) if(!positiveCI) { hi <- yyy + crit * sd lo <- yyy - crit * sd } else { sdlog <- ifelse(yyy > 0, sd/yyy, 0) sss <- exp(crit * sdlog) hi <- yyy * sss lo <- yyy / sss if(markovCI) { ## truncate extremely large confidence intervals ## using Markov's Inequality hi <- pmin(hi, yyy/(1-confidence)) } } }, local = { ## .................. local likelihood smoothing ....................... xlim <- c(from, to) xxx <- seq(from, to, length=n) ## reference density ghat <- LocfitRaw(Zvalues, weights=if(horvitz) NULL else lambda, xlim=xlim, ...) ggg <- predict(ghat, xxx) ## relative density switch(method, ratio={ ## compute ratio of smoothed densities fhat <- LocfitRaw(ZX, weights=weights, xlim=xlim, ...) fff <- predict(fhat, xxx) yyy <- kappahat * fff/ggg ## compute approximation to variance of log rho-hat varlogN <- 1/nX vvv <- varlog(fhat, xxx) + varlogN }, reweight={ ## weight Z values by reciprocal of reference wt <- (weights %orifnull% 1)/(denom * predict(ghat,ZX)) sumwt <- sum(wt) rhat <- LocfitRaw(ZX, weights=wt, xlim=xlim, ...) rrr <- predict(rhat, xxx) yyy <- sumwt * rrr ## compute approximation to variance of log rho-hat varsumwt <- mean(yyy /(denom * ggg)) * diff(xlim) varlogsumwt <- varsumwt/sumwt^2 vvv <- varlog(rhat, xxx) + varlogsumwt }, transform={ ## probability integral transform Gfun <- approxfun(xxx, cumsum(ggg)/sum(ggg), rule=2) GZX <- Gfun(ZX) ## smooth density on [0,1], end effect corrected qhat <- LocfitRaw(GZX, weights=weights, xlim=c(0,1), ...) ## apply to transformed values Gxxx <- Gfun(xxx) qqq <- predict(qhat, Gxxx) yyy <- kappahat * qqq ## compute approximation to variance of log rho-hat varlogN <- 1/nX vvv <- varlog(qhat, Gxxx) + varlogN }) vvvname <- "Variance of log of estimator" vvvlabel <- paste("bold(Var)~log(hat(%s)", paren(covname), ")", sep="") sdlog <- sqrt(vvv) if(positiveCI) { sss <- exp(crit * sdlog) hi <- yyy * sss lo <- yyy / sss if(markovCI) { ## truncate extremely large confidence intervals ## using Markov's Inequality hi <- pmin(hi, yyy/(1-confidence)) } } else { hi <- yyy * (1 + crit * sdlog) lo <- yyy * (1 - crit * sdlog) } }, increasing = , decreasing = { ## .................. nonparametric maximum likelihood ............ if(is.null(weights)) weights <- rep(1, length(ZX)) #' observed (sorted) oX <- order(ZX) ZX <- ZX[oX] weights <- weights[oX] #' reference CDF G <- ewcdf(Zvalues, lambda) #' reference denominator ('area') at each observed value if(smoother == "decreasing") { areas <- denom * G(ZX) } else { areas <- denom * (1 - G(rev(ZX))) weights <- rev(weights) } #' maximum upper sets algorithm rho <- numeric(0) darea <- diff(c(0, areas)) dcount <- weights while(length(darea) > 0) { u <- cumsum(dcount)/cumsum(darea) if(any(bad <- !is.finite(u))) # divide by zero etc u[bad] <- max(u[!bad], 0) k <- which.max(u) rho <- c(rho, rep(u[k], k)) darea <- darea[-(1:k)] dcount <- dcount[-(1:k)] } rho <- c(rho, 0) if(smoother == "increasing") rho <- rev(rho) #' compute as a stepfun rhofun <- stepfun(x = ZX, y=rho, right=TRUE, f=1) #' evaluate on a grid xlim <- c(from, to) xxx <- seq(from, to, length=n) yyy <- rhofun(xxx) #' vvv <- hi <- lo <- NULL savestuff$rhofun <- rhofun }) ## pack into fv object df <- data.frame(xxx=xxx, rho=yyy) names(df)[1] <- covname desc <- c(paste("covariate", covname), "Estimated intensity") labl <- c(covname, paste("hat(%s)", paren(covname), sep="")) if(did.variance <- !is.null(vvv)) { df <- cbind(df, data.frame(var=vvv, hi=hi, lo=lo)) desc <- c(desc, vvvname, paste("Upper limit of", CIblurb), paste("Lower limit of", CIblurb)) labl <- c(labl, vvvlabel, paste("%s[hi]", paren(covname), sep=""), paste("%s[lo]", paren(covname), sep="")) } rslt <- fv(df, argu=covname, ylab=substitute(rho(X), list(X=as.name(covname))), valu="rho", fmla= as.formula(paste(". ~ ", covname)), alim=range(ZX), labl=labl, desc=desc, unitname=covunits, fname="rho", yexp=substitute(rho(X), list(X=as.name(covname)))) if(did.variance) { fvnames(rslt, ".") <- c("rho", "hi", "lo") fvnames(rslt, ".s") <- c("hi", "lo") } else fvnames(rslt, ".") <- "rho" ## pack up class(rslt) <- c("rhohat", class(rslt)) ## add info stuff <- list(modelcall = modelcall, callstring = callstring, sigma = switch(smoother, kernel=sigma, local=NULL), covname = paste(covname, collapse=""), ZX = ZX, lambda = lambda, method = method, smoother = smoother, confidence = confidence, positiveCI = positiveCI) attr(rslt, "stuff") <- append(stuff, savestuff) return(rslt) } rhohatCalc }) ## ........... end of 'rhohatCalc' ................................. print.rhohat <- function(x, ...) { s <- attr(x, "stuff") splat("Intensity function estimate (class rhohat)", "for the covariate", s$covname) switch(s$reference, Lebesgue=splat("Function values are absolute intensities"), baseline=splat("Function values are relative to baseline"), model={ splat("Function values are relative to fitted model") print(s$modelcall) }) NPMLE <- s$smoother %in% c("increasing", "decreasing") cat("Estimation method: ") if(NPMLE) splat("nonparametric maximum likelihood") else switch(s$method, ratio={ splat("ratio of fixed-bandwidth kernel smoothers") }, reweight={ splat("fixed-bandwidth kernel smoother of weighted data") }, transform={ splat("probability integral transform,", "edge-corrected fixed bandwidth kernel smoothing", "on [0,1]") }, cat("UNKNOWN\n")) if(identical(s$horvitz, TRUE)) splat("\twith Horvitz-Thompson weight") cat("Smoother: ") switch(s$smoother, kernel={ splat("Kernel density estimator") splat("\tActual smoothing bandwidth sigma = ", signif(s$sigma,5)) }, local = splat("Local likelihood density estimator"), increasing = splat("Increasing function of covariate"), decreasing = splat("Decreasing function of covariate"), splat("UNKNOWN") ) if(!NPMLE) { positiveCI <- s$positiveCI %orifnull% (s$smoother == "local") confidence <- s$confidence %orifnull% 0.95 splat("Pointwise", paste0(100 * confidence, "%"), "confidence bands for rho(x)\n\t based on asymptotic variance of", if(positiveCI) "log(rhohat(x))" else "rhohat(x)") } splat("Call:", s$callstring) cat("\n") NextMethod("print") } plot.rhohat <- function(x, ..., do.rug=TRUE) { xname <- short.deparse(substitute(x)) s <- attr(x, "stuff") covname <- s$covname asked.rug <- !missing(do.rug) && identical(rug, TRUE) snam <- intersect(c("hi", "lo"), names(x)) if(length(snam) == 0) snam <- NULL out <- do.call(plot.fv, resolve.defaults(list(x=x), list(...), list(main=xname, shade=snam))) if(identical(list(...)$limitsonly, TRUE)) return(out) if(do.rug) { rugx <- ZX <- s$ZX # check whether it's the default plot argh <- list(...) isfo <- unlist(lapply(argh, inherits, what="formula")) if(any(isfo)) { # a plot formula was given; inspect RHS fmla <- argh[[min(which(isfo))]] rhs <- rhs.of.formula(fmla) vars <- variablesinformula(rhs) vars <- vars[vars %in% c(colnames(x), ".x", ".y")] if(length(vars) == 1 && vars %in% c(covname, ".x")) { # expression in terms of covariate rhstr <- as.character(rhs)[2] dat <- list(ZX) names(dat) <- vars[1] rugx <- as.numeric(eval(parse(text=rhstr), dat)) } else { if(asked.rug) warning("Unable to add rug plot") rugx <- NULL } } if(!is.null(rugx)) { # restrict to x limits, if given if(!is.null(xlim <- list(...)$xlim)) rugx <- rugx[rugx >= xlim[1] & rugx <= xlim[2]] # finally plot the rug if(length(rugx) > 0) rug(rugx) } } invisible(NULL) } predict.rhohat <- local({ predict.rhohat <- function(object, ..., relative=FALSE, what=c("rho", "lo", "hi", "se")) { trap.extra.arguments(..., .Context="in predict.rhohat") what <- match.arg(what) #' extract info s <- attr(object, "stuff") reference <- s$reference #' check availability if((what %in% c("lo", "hi", "se")) && !("hi" %in% names(object))) stop("Standard error and confidence bands are not available in this object", call.=FALSE) #' convert to (linearly interpolated) function x <- with(object, .x) y <- if(what == "se") sqrt(object[["var"]]) else object[[what]] fun <- approxfun(x, y, rule=2) #' extract image(s) of covariate Z <- s$Zimage #' apply fun to Z Y <- if(is.im(Z)) evalfun(Z, fun) else solapply(Z, evalfun, f=fun) #' adjust to reference baseline if(reference != "Lebesgue" && !relative) { Lam <- s$lambdaimage # could be 'im' or 'imlist' Y <- Lam * Y } return(Y) } evalfun <- function(X, f) { force(f) force(X) if(is.linim(X)) eval.linim(f(X)) else if(is.im(X)) eval.im(f(X)) else NULL } predict.rhohat }) as.function.rhohat <- function(x, ..., value=".y", extrapolate=TRUE) { NextMethod("as.function") } simulate.rhohat <- function(object, nsim=1, ..., drop=TRUE) { trap.extra.arguments(..., .Context="in simulate.rhohat") lambda <- predict(object) if(is.linim(lambda) || (is.solist(lambda) && all(sapply(lambda, is.linim)))) { result <- rpoislpp(lambda, nsim=nsim, drop=drop) } else { result <- rpoispp(lambda, nsim=nsim, drop=drop) } return(result) } spatstat/R/edgeTrans.R0000644000176200001440000001045513420234635014364 0ustar liggesusers# # edgeTrans.R # # $Revision: 1.16 $ $Date: 2019/01/18 02:26:41 $ # # Translation edge correction weights # # edge.Trans(X) compute translation correction weights # for each pair of points from point pattern X # # edge.Trans(X, Y, W) compute translation correction weights # for all pairs of points X[i] and Y[j] # (i.e. one point from X and one from Y) # in window W # # edge.Trans(X, Y, W, paired=TRUE) # compute translation correction weights # for each corresponding pair X[i], Y[i]. # # To estimate the K-function see the idiom in "Kest.R" # ####################################################################### edge.Trans <- function(X, Y=X, W=Window(X), exact=FALSE, paired=FALSE, ..., trim=spatstat.options("maxedgewt"), dx=NULL, dy=NULL, give.rmax=FALSE, gW = NULL) { given.dxdy <- !is.null(dx) && !is.null(dy) if(!given.dxdy) { ## dx, dy will be computed from X, Y X <- as.ppp(X, W) W <- X$window Y <- if(!missing(Y)) as.ppp(Y, W) else X nX <- X$n nY <- Y$n if(paired) { if(nX != nY) stop("X and Y should have equal length when paired=TRUE") dx <- Y$x - X$x dy <- Y$y - X$y } else { dx <- outer(X$x, Y$x, "-") dy <- outer(X$y, Y$y, "-") } } else { ## dx, dy given if(paired) { ## dx, dy are vectors check.nvector(dx) check.nvector(dy) stopifnot(length(dx) == length(dy)) } else { ## dx, dy are matrices check.nmatrix(dx) check.nmatrix(dy) stopifnot(all(dim(dx) == dim(dy))) nX <- nrow(dx) nY <- ncol(dx) } stopifnot(is.owin(W)) } ## For irregular polygons, exact evaluation is very slow; ## so use pixel approximation, unless exact=TRUE if(W$type == "polygonal" && !exact) W <- as.mask(W) ## compute if(!paired) { dx <- as.vector(dx) dy <- as.vector(dy) } switch(W$type, rectangle={ ## Fast code for this case wide <- diff(W$xrange) high <- diff(W$yrange) weight <- wide * high / ((wide - abs(dx)) * (high - abs(dy))) }, polygonal={ ## This code is SLOW n <- length(dx) weight <- numeric(n) if(n > 0) { for(i in seq_len(n)) { Wshift <- shift(W, c(dx[i], dy[i])) weight[i] <- overlap.owin(W, Wshift) } weight <- area(W)/weight } }, mask={ ## compute set covariance of window if(is.null(gW)) gW <- setcov(W) ## evaluate set covariance at these vectors gvalues <- lookup.im(gW, dx, dy, naok=TRUE, strict=FALSE) weight <- area(W)/gvalues } ) ## clip high values if(length(weight) > 0) weight <- pmin.int(weight, trim) if(!paired) weight <- matrix(weight, nrow=nX, ncol=nY) if(give.rmax) attr(weight, "rmax") <- rmax.Trans(W, gW) return(weight) } ## maximum radius for translation correction ## = radius of largest circle centred at 0 contained in W + ^W rmax.Trans <- function(W, g=setcov(W)) { ## calculate maximum permissible 'r' value ## for validity of translation correction W <- as.owin(W) if(is.rectangle(W)) return(shortside(W)) ## find support of set covariance if(is.null(g)) g <- setcov(W) eps <- 2 * max(1, max(g)) * .Machine$double.eps gsupport <- solutionset(g > eps) gboundary <- bdry.mask(gsupport) xy <- rasterxy.mask(gboundary, drop=TRUE) rmax <- with(xy, sqrt(min(x^2 + y^2))) return(rmax) } ## maximum radius for rigid motion correction ## = radius of smallest circle centred at 0 containing W + ^W rmax.Rigid <- function(X, g=setcov(as.owin(X))) { stopifnot(is.ppp(X) || is.owin(X)) if(is.ppp(X)) return(max(pairdist(X[chull(X)]))) W <- X if(is.rectangle(W)) return(diameter(W)) if(is.null(g)) g <- setcov(W) eps <- 2 * max(1, max(g)) * .Machine$double.eps gsupport <- solutionset(g > eps) gboundary <- bdry.mask(gsupport) xy <- rasterxy.mask(gboundary, drop=TRUE) rmax <- with(xy, sqrt(max(x^2 + y^2))) return(rmax) } spatstat/R/Hest.R0000644000176200001440000001042213623370540013346 0ustar liggesusers# # Hest.R # # Contact distribution for a random set # # Hest <- local({ Hest <- function(X, r=NULL, breaks=NULL, ..., W, correction=c("km", "rs", "han"), conditional=TRUE) { if(missing(W)) W <- NULL HestEngine(X, r=r, breaks=breaks, ..., W=W, correction=correction, conditional=conditional) } HestEngine <- function(X, r=NULL, breaks=NULL, ..., W, correction=c("km", "rs", "han"), conditional=TRUE, checkspacing=TRUE, testme=FALSE) { rorbgiven <- !is.null(r) || !is.null(breaks) if(is.ppp(X) || is.psp(X)) { XX <- X W0 <- Window(X) } else if(is.owin(X)) { XX <- X W0 <- Frame(X) } else if(is.im(X)) { if(!is.logical(ZeroValue(X))) stop("When X is an image, its pixel values should be logical values") XX <- solutionset(X) W0 <- Window(X) } else stop("X should be an object of class ppp, psp, owin or im") ## if(given.W <- !missing(W) && !is.null(W)) { stopifnot(is.owin(W)) if(!is.subset.owin(W, W0)) stop("W is not a subset of the observation window of X") } else { W <- W0 } ## handle corrections if(is.null(correction)) correction <- c("rs", "km", "cs") correction <- pickoption("correction", correction, c(none="none", raw="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", han="han", Hanisch="han", best="km"), multi=TRUE) corxtable <- c("km", "rs", "han", "none") corx <- as.list(corxtable %in% correction) names(corx) <- corxtable ## compute distance map D <- distmap(XX, ...) pixeps <- with(D, min(xstep, ystep)) if(!given.W && !is.im(X)) { B <- attr(D, "bdry") } else { B <- distmap(W, invert=TRUE, ...) har <- harmonise(D=D, B=B) D <- har$D[W, drop=FALSE] B <- har$B[W, drop=FALSE] } ## histogram breakpoints dmax <- max(D) breaks <- handle.r.b.args(r, breaks, W, NULL, rmaxdefault=dmax) rval <- breaks$r if(testme || (rorbgiven && checkspacing)) check.finespacing(rval, rname="r", eps=pixeps/4, W, rmaxdefault=dmax, context="in Hest(X,r)", action="fatal") ## extract distances and censoring distances dist <- as.vector(as.matrix(D)) bdry <- as.vector(as.matrix(B)) ok <- !is.na(dist) & !is.na(bdry) dist <- dist[ok] bdry <- bdry[ok] ## delete zero distances if(is.owin(X) || is.im(X)) { pos <- (dist > 0) areafraction <- 1 - mean(pos) dist <- dist[pos] bdry <- bdry[pos] } ## censoring indicators d <- (dist <= bdry) ## observed distances o <- pmin.int(dist, bdry) ## calculate estimates Z <- censtimeCDFest(o, bdry, d, breaks, KM=corx$km, RS=corx$rs, HAN=corx$han, RAW=corx$none, han.denom=if(corx$han) eroded.areas(W, rval) else NULL, tt=dist) ## conditional on d > 0 ? if(is.owin(X) || is.im(X)) { if(conditional) { if(corx$km) Z$km <- condition(Z$km) if(corx$rs) Z$rs <- condition(Z$rs) if(corx$han) Z$han <- condition(Z$han) if(corx$none) Z$raw <- condition(Z$raw) } else { if(corx$km) Z$km <- reconstitute(Z$km, areafraction) if(corx$rs) Z$rs <- reconstitute(Z$rs, areafraction) if(corx$han) Z$han <- reconstitute(Z$han, areafraction) if(corx$none) Z$raw <- reconstitute(Z$raw, areafraction) } } ## relabel Z <- rebadge.fv(Z, substitute(H(r), NULL), "H") unitname(Z) <- unitname(X) attr(Z, "conserve") <- list(checkspacing=FALSE) return(Z) } condition <- function(x) { (x - x[1])/(1-x[1]) } reconstitute <- function(x, p) { p + (1-p) * x } Hest }) spatstat/R/vcov.kppm.R0000644000176200001440000001217713333543255014402 0ustar liggesusers# # vcov.kppm # # vcov method for kppm objects # # Original code: Abdollah Jalilian # # $Revision: 1.11 $ $Date: 2018/02/15 03:28:11 $ # vcov.kppm <- function(object, ..., what=c("vcov", "corr", "fisher", "internals"), fast = NULL, rmax = NULL, eps.rmax = 0.01, verbose = TRUE) { what <- match.arg(what) verifyclass(object, "kppm") fast.given <- !is.null(fast) #' secret argument (eg for testing) splitup <- resolve.1.default(list(splitup=FALSE), list(...)) #' if(is.null(object$improve)) { ## Normal composite likelihood (poisson) case ## extract composite likelihood results po <- object$po ## ensure it was fitted with quadscheme if(is.null(getglmfit(po))) { warning("Re-fitting model with forcefit=TRUE") po <- update(po, forcefit=TRUE) } ## extract quadrature scheme information Q <- quad.ppm(po) U <- union.quad(Q) nU <- npoints(U) wt <- w.quad(Q) ## compute fitted intensity values lambda <- fitted(po, type="lambda") ## extract covariate values Z <- model.matrix(po) ## evaluate integrand ff <- Z * lambda * wt ## extract pcf g <- pcfmodel(object) ## resolve options for algorithm maxmat <- spatstat.options("maxmatrix") if(!fast.given) { fast <- (nU^2 > maxmat) } else stopifnot(is.logical(fast)) ## attempt to compute large matrix: pair correlation function minus 1 if(!fast) { gminus1 <- there.is.no.try( matrix(g(c(pairdist(U))) - 1, nU, nU) ) } else { if(is.null(rmax)){ diamwin <- diameter(as.owin(U)) fnc <- get("fnc", envir = environment(improve.kppm)) rmax <- if(fnc(diamwin, eps.rmax, g) >= 0) diamwin else uniroot(fnc, lower = 0, upper = diamwin, eps=eps.rmax, g=g)$root } cp <- there.is.no.try( crosspairs(U,U,rmax,what="ijd") ) gminus1 <- if(is.null(cp)) NULL else sparseMatrix(i=cp$i, j=cp$j, x=g(cp$d) - 1, dims=c(nU, nU)) } ## compute quadratic form if(!splitup && !is.null(gminus1)) { E <- t(ff) %*% gminus1 %*% ff } else { ## split calculation of (gminus1 %*% ff) into blocks nrowperblock <- max(1, floor(maxmat/nU)) nblocks <- ceiling(nU/nrowperblock) g1ff <- NULL if(verbose) { splat("Splitting large matrix calculation into", nblocks, "blocks") pstate <- list() } if(!fast) { for(k in seq_len(nblocks)) { if(verbose) pstate <- progressreport(k, nblocks, state=pstate) istart <- nrowperblock * (k-1) + 1 iend <- min(nrowperblock * k, nU) ii <- istart:iend gm1 <- matrix(g(c(crossdist(U[ii], U))) - 1, iend-istart+1, nU) g1ff <- rbind(g1ff, gm1 %*% ff) } } else { for(k in seq_len(nblocks)) { if(verbose) pstate <- progressreport(k, nblocks, state=pstate) istart <- nrowperblock * (k-1) + 1 iend <- min(nrowperblock * k, nU) ii <- istart:iend cp <- crosspairs(U[ii], U, rmax, what="ijd") gm1 <- sparseMatrix(i=cp$i, j=cp$j, x=g(cp$d) - 1, dims=c(iend-istart+1, nU)) g1ff <- rbind(g1ff, as.matrix(gm1 %*% ff)) } } E <- t(ff) %*% g1ff } ## asymptotic covariance matrix in the Poisson case J <- t(Z) %*% ff J.inv <- try(solve(J)) ## could be singular if(inherits(J.inv, "try-error")) { if(what == "internals") { return(list(ff=ff, J=J, E=E, J.inv=NULL)) } else { return(NULL) } } ## asymptotic covariance matrix in the clustered case vc <- J.inv + J.inv %*% E %*% J.inv } else { ## Case of quasi-likelihood (or other things from improve.kppm) run <- is.null(object$vcov) || (!is.null(fast) && (fast != object$improve$fast.vcov)) if(run){ ## Calculate vcov if it hasn't already been so ## or if option fast differs from fast.vcov args <- object$improve internal <- what=="internals" if(!is.null(fast)) args$fast.vcov <- fast object <- with(args, improve.kppm(object, type = type, rmax = rmax, dimyx = dimyx, fast = fast, vcov = TRUE, fast.vcov = fast.vcov, maxIter = 0, save.internals = internal)) } vc <- object$vcov } ## Convert from Matrix to ordinary matrix: vc <- as.matrix(vc) switch(what, vcov={ return(vc) }, corr={ sd <- sqrt(diag(vc)) co <- vc/outer(sd, sd, "*") return(co) }, fisher={ fish <- try(solve(vc)) if(inherits(fish, "try-error")) fish <- NULL return(fish) }, internals={ return(list(ff=ff, J=J, E=E, J.inv=J.inv, vc=vc)) }) stop(paste("Unrecognised option: what=", what)) } spatstat/R/nnorient.R0000644000176200001440000001024013354545742014306 0ustar liggesusers## ## nnorient.R ## ## nearest neighbour pair orientation distribution ## ## Function \vartheta(phi) defined in ## Illian et al (2008) equ (4.5.3) page 253 ## ## $Revision: 1.4 $ $Date: 2018/10/02 01:21:40 $ nnorient <- function(X, ..., cumulative=FALSE, correction, k = 1, unit=c("degree", "radian"), domain=NULL, ratio=FALSE) { stopifnot(is.ppp(X)) check.1.integer(k) stopifnot(k>=1) W <- Window(X) if(!is.null(domain)) stopifnot(is.subset.owin(domain, W)) unit <- match.arg(unit) switch(unit, degree = { FullCircle <- 360 Convert <- 180/pi }, radian = { FullCircle <- 2 * pi Convert <- 1 }) ## choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(!correction.given) correction <- c("bord.modif", "none") correction <- pickoption("correction", correction, c(none="none", bord.modif="bord.modif", good="good", best="best"), multi=TRUE) correction[correction %in% c("good", "best")] <- "bord.modif" ## process point pattern Xcoord <- coords(X) Ycoord <- Xcoord[nnwhich(X, k=k), ] if(!is.null(domain)) { inD <- inside.owin(Xcoord$x, Xcoord$y, domain) Xcoord <- Xcoord[inD,] Ycoord <- Ycoord[inD,] } dYX <- Ycoord-Xcoord ANGLE <- with(dYX, atan2(y, x) * Convert) %% FullCircle nangles <- length(ANGLE) ## initialise output object Nphi <- 512 breaks <- make.even.breaks(bmax=FullCircle, npos=Nphi-1) phi <- breaks$r Odf <- data.frame(phi = phi, theo = (if(cumulative) phi else 1)/FullCircle) desc <- c("angle argument phi", "theoretical isotropic %s") NOletter <- if(cumulative) "Theta" else "vartheta" NOsymbol <- as.name(NOletter) NNO <- ratfv(Odf, NULL, denom=nangles, argu="phi", ylab=substitute(fn(phi), list(fn=NOsymbol)), valu="theo", fmla = . ~ phi, alim = c(0, FullCircle), c("phi", "{%s[%s]^{pois}}(phi)"), desc, fname=NOletter, yexp=substitute(fn(phi), list(fn=NOsymbol))) ## ^^^^^^^^^^^^^^^ Compute edge corrected estimates ^^^^^^^^^^^^^^^^ if(any(correction == "none")) { ## uncorrected! For demonstration purposes only! if(cumulative) { wh <- whist(ANGLE, breaks$val) # no weights num.un <- cumsum(wh) } else { kd <- circdensity(ANGLE, ..., n=Nphi, unit=unit) num.un <- kd$y * nangles } den.un <- nangles ## uncorrected estimate NNO <- bind.ratfv(NNO, data.frame(un=num.un), den.un, "{hat(%s)[%s]^{un}}(phi)", "uncorrected estimate of %s", "un", ratio=ratio) } if("bord.modif" %in% correction) { ## border type correction bX <- bdist.points(X) nndX <- nndist(X, k=k) if(!is.null(domain)) { bX <- bX[inD] nndX <- nndX[inD] } ok <- (nndX < bX) nok <- sum(ok) rr <- seq(0, max(bX), length=256) if(nok == 0) { num.bm <- numeric(Nphi) # i.e. rep(0, Nphi) } else { Ar <- eroded.areas(W, rr) Arf <- approxfun(rr, Ar, rule=2) AI <- Arf(bX) edgewt <- ifelse(ok, pmin(area(W)/AI, 100), 0) if(cumulative) { wh <- whist(ANGLE, breaks$val, edgewt) num.bm <- cumsum(wh)/mean(edgewt) } else { w <- edgewt/sum(edgewt) kd <- circdensity(ANGLE, ..., weights=w, n=Nphi, unit=unit) num.bm <- kd$y * nok } } den.bm <- nok NNO <- bind.ratfv(NNO, data.frame(bordm=num.bm), den.bm, "{hat(%s)[%s]^{bordm}}(phi)", "modified border-corrected estimate of %s", "bordm", ratio=ratio) } unitname(NNO) <- switch(unit, degree = c("degree", "degrees"), radian = c("radian", "radians")) return(NNO) } spatstat/R/update.ppm.R0000644000176200001440000003141113333543255014524 0ustar liggesusers# # update.ppm.R # # # $Revision: 1.61 $ $Date: 2017/10/04 03:51:04 $ # # # update.ppm <- local({ ## update point pattern dataset using either data or formula newpattern <- function(oldpattern, lhs, callframe, envir) { eval(eval(substitute(substitute(l, list("."=Q)), list(l=lhs, Q=oldpattern)), envir=as.list(envir), enclos=callframe), envir=as.list(envir), enclos=callframe) } update.ppm <- function(object, ..., fixdummy=TRUE, use.internal=NULL, envir=environment(terms(object))) { verifyclass(object, "ppm") new.callstring <- short.deparse(sys.call()) aargh <- list(...) if(inherits(object, "ippm")) { call <- object$dispatched$call callframe <- object$dispatched$callframe } else { call <- getCall(object) if(!is.call(call)) stop(paste("Internal error - getCall(object) is not of class", sQuote("call"))) callframe <- object$callframe } callfun <- as.character(call[[1]]) newstyle <- (callfun == "ppm.formula") oldstyle <- !newstyle ## Special cases ## (1) no new information given if(length(aargh) == 0 && !identical(use.internal, TRUE)) { result <- eval(call, as.list(envir), enclos=callframe) result$callframe <- callframe return(result) } ## (2) model can be updated using existing covariate data frame if(!identical(use.internal, FALSE) && ## single argument which is a formula (length(aargh) == 1) && inherits(fmla <- aargh[[1]], "formula") && is.null(lhs.of.formula(fmla)) && ## not a ppm.formula call oldstyle && ## fitted by mpl using glm/gam with(object, method == "mpl" && !is.null(fitter) && fitter %in% c("gam", "glm"))) { ## This is a dangerous hack! glmdata <- object$internal$glmdata ## check whether data for new variables are available ## (this doesn't work with things like 'pi') vars.available <- c(colnames(glmdata), names(object$covfunargs)) if(all(variablesinformula(fmla) %in% c(".", vars.available))) { ## we can update using internal data FIT <- object$internal$glmfit orig.env <- environment(FIT$terms) ## update formulae using "." rules trend <- newformula(object$trend, fmla, callframe, envir) fmla <- newformula(formula(FIT), fmla, callframe, envir) ## expand polynom() in formula if(spatstat.options("expand.polynom")) { fmla <- expand.polynom(fmla) trend <- expand.polynom(trend) } ## update GLM/GAM fit upd.glm.call <- update(FIT, fmla, evaluate=FALSE) FIT <- eval(upd.glm.call, envir=orig.env) environment(FIT$terms) <- orig.env object$internal$glmfit <- FIT ## update entries of object object$trend <- trend object$terms <- terms(fmla) object$coef <- co <- FIT$coef object$callstring <- new.callstring object$internal$fmla <- fmla ## if(is.finite(object$maxlogpl)) { ## Update maxlogpl provided it is finite ## (If the likelihood is infinite, this is due to the interaction; ## if we update the trend, the likelihood will remain infinite.) W <- glmdata$.mpl.W SUBSET <- glmdata$.mpl.SUBSET Z <- is.data(object$Q) object$maxlogpl <- -(deviance(FIT)/2 + sum(log(W[Z & SUBSET])) + sum(Z & SUBSET)) } ## update the model call upd.call <- call upd.call$trend <- trend object$call <- upd.call ## update fitted interaction (depends on coefficients, if not Poisson) if(!is.null(inter <- object$interaction) && !is.poisson(inter)) object$fitin <- fii(inter, co, object$internal$Vnames, object$internal$IsOffset) ## if(is.stationary(object) && !is.marked(object)) { ## uniform Poisson if(eval(call$rename.intercept) %orifnull% TRUE) { names(object$coef) <- "log(lambda)" } } return(object) } } ## (3) Need to use internal data if(oldstyle) { ## decide whether to use internal data undecided <- is.null(use.internal) || !is.logical(use.internal) force.int <- !undecided && use.internal force.ext <- !undecided && !use.internal if(!force.int) { ## check for validity of format badformat <- damaged.ppm(object) } if(undecided) { use.internal <- badformat if(badformat) message("object format corrupted; repairing it") } else if(force.ext && badformat) warning("object format corrupted; try update(object, use.internal=TRUE)") if(use.internal) { ## reset the main arguments in the call using the internal data call$Q <- quad.ppm(object) namobj <- names(call) if("trend" %in% namobj) call$trend <- newformula(call$trend, object$trend, callframe, envir) if("interaction" %in% namobj) call$interaction <- object$interaction if("covariates" %in% namobj) call$covariates <- object$covariates } } ## General case. X.is.new <- FALSE ## First split named and unnamed arguments nama <- names(aargh) named <- if(is.null(nama)) rep.int(FALSE, length(aargh)) else nzchar(nama) namedargs <- aargh[named] unnamedargs <- aargh[!named] nama <- names(namedargs) ## Find the argument 'Q' by name or implicitly by class ## (including detection of conflicts) argQ <- NULL if(n <- sp.foundclasses(c("ppp", "quad"), unnamedargs, "Q", nama)) { argQ <- unnamedargs[[n]] unnamedargs <- unnamedargs[-n] } if("Q" %in% nama) { argQ <- namedargs$Q nama <- setdiff(nama, "Q") namedargs <- namedargs[nama] } ## Deal with argument 'Q' which has several possible forms if(!is.null(argQ)) { X.is.new <- TRUE if(inherits(argQ, "formula")) { ## Q = X ~ trend if(newstyle) { ## update the formula call$Q <- newformula(call$Q, argQ, callframe, envir) } else { ## split into Q = X and trend = ~trend if(!is.null(lhs <- lhs.of.formula(argQ))) call$Q <- newpattern(call$Q, lhs, callframe, envir) call$trend <- newformula(call$trend, rhs.of.formula(eval(argQ)), callframe, envir) } } else { ## Q = X if(newstyle) { ## convert old call to old style fo <- as.formula(call$Q) Yexpr <- lhs.of.formula(fo) trend <- rhs.of.formula(fo) newcall <- call("ppm", Q=Yexpr, trend=trend) if(length(call) > 2) { whichQ <- which(names(call) == "Q") morecall <- call[-c(1, whichQ)] if((mc <- length(morecall)) > 0) { newcall[3 + 1:mc] <- morecall names(newcall)[3 + 1:mc] <- names(call)[-c(1, whichQ)] } } call <- newcall newstyle <- FALSE oldstyle <- TRUE } ## Now update the dataset call$Q <- argQ } } ## Find any formula arguments ## (including detection of conflicts) argfmla <- NULL if(n <- sp.foundclass("formula", unnamedargs, "trend", nama)) { argfmla <- unnamedargs[[n]] unnamedargs <- unnamedargs[-n] } else if(n <- sp.foundclass("character", unnamedargs, "trend", nama)) { ## string that might be interpreted as a formula strg <- unnamedargs[[n]] if(!is.na(charmatch("~", strg))) { argfmla <- as.formula(strg) unnamedargs <- unnamedargs[-n] } } if("trend" %in% nama) { argfmla <- namedargs$trend nama <- setdiff(nama, "trend") namedargs <- namedargs[nama] } ## Handle new formula if(!is.null(argfmla)) { lhs <- lhs.of.formula(argfmla) if(newstyle) { ## ppm.formula: update the formula if(is.null(lhs)) { argfmla <- as.formula(paste(".", deparse(argfmla))) } else X.is.new <- TRUE call$Q <- newformula(call$Q, argfmla, callframe, envir) } else { ## ppm.ppp: update the trend and possibly the data if(is.null(lhs)) { ## assign new trend call$trend <- newformula(call$trend, argfmla, callframe, envir) } else { ## split into Q = X and trend = ~trend X.is.new <- TRUE call$Q <- newpattern(call$Q, lhs, callframe, envir) call$trend <- newformula(call$trend, rhs.of.formula(argfmla), callframe, envir) } } } if(length(namedargs) > 0) { ## any other named arguments that were also present in the original call ## override their original values. existing <- !is.na(match(nama, names(call))) for (a in nama[existing]) call[[a]] <- aargh[[a]] ## add any named arguments not present in the original call if (any(!existing)) { call <- c(as.list(call), namedargs[!existing]) call <- as.call(call) } } if(length(unnamedargs) > 0) { ## some further objects identified by their class if(n<- sp.foundclass("interact", unnamedargs, "interaction", nama)) { call$interaction <- unnamedargs[[n]] unnamedargs <- unnamedargs[-n] } if(n <- sp.foundclasses(c("data.frame", "im"), unnamedargs, "covariates", nama)) { call$covariates <- unnamedargs[[n]] unnamedargs <- unnamedargs[-n] } } ## ************************************************************* ## ****** Special action when Q is a point pattern ************* ## ************************************************************* if(X.is.new && fixdummy && oldstyle && is.ppp(X <- eval(call$Q, as.list(envir), enclos=callframe)) && identical(Window(X), Window(data.ppm(object)))) { ## Instead of allowing default.dummy(X) to occur, ## explicitly create a quadrature scheme from X, ## using the same dummy points and weight parameters ## as were used in the fitted model Qold <- quad.ppm(object) if(is.marked(Qold)) { dpar <- Qold$param$dummy wpar <- Qold$param$weight Qnew <- do.call(quadscheme, append(list(X), append(dpar, wpar))) } else { Dum <- Qold$dummy wpar <- Qold$param$weight Qnew <- do.call(quadscheme, append(list(X, Dum), wpar)) } ## replace X by new Q call$Q <- Qnew } ## finally call ppm call[[1]] <- as.name('ppm') return(eval(call, as.list(envir), enclos=callframe)) } update.ppm }) sp.foundclass <- function(cname, inlist, formalname, argsgiven) { ok <- unlist(lapply(inlist, inherits, what=cname)) nok <- sum(ok) if(nok > 1) stop(paste("I am confused: there are two unnamed arguments", "of class", sQuote(cname))) if(nok == 0) return(0) absent <- !(formalname %in% argsgiven) if(!absent) stop(paste("I am confused: there is an unnamed argument", "of class", sQuote(cname), "which conflicts with the", "named argument", sQuote(formalname))) theposition <- seq_along(ok)[ok] return(theposition) } sp.foundclasses <- function(cnames, inlist, formalname, argsgiven) { ncn <- length(cnames) pozzie <- logical(ncn) for(i in seq_len(ncn)) pozzie[i] <- sp.foundclass(cnames[i], inlist, formalname, argsgiven) found <- (pozzie > 0) nfound <- sum(found) if(nfound == 0) return(0) else if(nfound == 1) return(pozzie[found]) else stop(paste("I am confused: there are", nfound, "unnamed arguments of different classes (", paste(sQuote(cnames(pozzie[found])), collapse=", "), ") which could be interpreted as", sQuote(formalname))) } damaged.ppm <- function(object) { ## guess whether the object format has been damaged ## e.g. by dump/restore gf <- getglmfit(object) badfit <- !is.null(gf) && !inherits(gf$terms, "terms") if(badfit) return(TRUE) ## escape clause for fake models if(identical(object$fake, TRUE)) return(FALSE) ## otherwise it was made by ppm Qcall <- object$call$Q cf <- object$callframe if(is.null(cf)) { ## Old format of ppm objects if(is.name(Qcall) && !exists(paste(Qcall))) return(TRUE) Q <- eval(Qcall) } else { ## New format of ppm objects if(is.name(Qcall) && !exists(paste(Qcall), cf)) return(TRUE) Q <- eval(Qcall, cf) } badQ <- is.null(Q) || !(inherits(Q, c("ppp", "quad", "formula"))) return(badQ) } spatstat/R/Jest.R0000644000176200001440000000475413556452115013367 0ustar liggesusers# Jest.S # # Usual invocation to compute J function # if F and G are not required # # $Revision: 4.25 $ $Date: 2019/10/31 02:58:29 $ # # # Jest <- function(X, ..., eps=NULL, r=NULL, breaks=NULL, correction=NULL) { X <- as.ppp(X) W <- Window(X) brks <- handle.r.b.args(r, breaks, window=W, pixeps=eps, rmaxdefault=rmax.rule("J", W, intensity(X))) checkspacing <- !isFALSE(list(...)$checkspacing) #' compute F and G FF <- Fest(X, eps, breaks=brks, correction=correction, checkspacing=checkspacing) G <- Gest(X, breaks=brks, correction=correction) # initialise fv object rvals <- FF$r rmax <- max(rvals) Z <- fv(data.frame(r=rvals, theo=1), "r", substitute(J(r), NULL), "theo", . ~ r, c(0,rmax), c("r", "%s[pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="J") # compute J function estimates # this has to be done manually because of the mismatch between names Fnames <- names(FF) Gnames <- names(G) bothnames <- intersect(Fnames, Gnames) if("raw" %in% bothnames) { Jun <- ratiotweak(1-G$raw, 1-FF$raw) Z <- bind.fv(Z, data.frame(un=Jun), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") attr(Z, "alim") <- range(rvals[FF$raw <= 0.9]) } if("rs" %in% bothnames) { Jrs <- ratiotweak(1-G$rs, 1-FF$rs) Z <- bind.fv(Z, data.frame(rs=Jrs), "hat(%s)[rs](r)", "border corrected estimate of %s", "rs") attr(Z, "alim") <- range(rvals[FF$rs <= 0.9]) } if("han" %in% Gnames && "cs" %in% Fnames) { Jhan <- ratiotweak(1-G$han, 1-FF$cs) Z <- bind.fv(Z, data.frame(han=Jhan), "hat(%s)[han](r)", "Hanisch-style estimate of %s", "han") attr(Z, "alim") <- range(rvals[FF$cs <= 0.9]) } if("km" %in% bothnames) { Jkm <- ratiotweak(1-G$km, 1-FF$km) Z <- bind.fv(Z, data.frame(km=Jkm), "hat(%s)[km](r)", "Kaplan-Meier estimate of %s", "km") attr(Z, "alim") <- range(rvals[FF$km <= 0.9]) } if("hazard" %in% bothnames) { Jhaz <- G$hazard - FF$hazard Z <- bind.fv(Z, data.frame(hazard=Jhaz), "hazard(r)", "Kaplan-Meier estimate of derivative of log(%s)") } # set default plotting values and order nama <- names(Z) fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))]) # add more info attr(Z, "F") <- FF attr(Z, "G") <- G attr(Z, "conserve") <- attr(FF, "conserve") unitname(Z) <- unitname(X) return(Z) } spatstat/R/hopskel.R0000644000176200001440000000500413333543255014113 0ustar liggesusers## ## hopskel.R ## Hopkins-Skellam test ## ## $Revision: 1.2 $ $Date: 2014/09/23 08:24:36 $ hopskel <- function(X) { stopifnot(is.ppp(X)) n <- npoints(X) if(n < 2) return(NA) dX <- nndist(X) U <- runifpoint(n, Window(X)) dU <- nncross(U, X, what="dist") A <- mean(dX^2)/mean(dU^2) return(A) } hopskel.test <- function(X, ..., alternative=c("two.sided", "less", "greater", "clustered", "regular"), method=c("asymptotic", "MonteCarlo"), nsim=999 ) { Xname <- short.deparse(substitute(X)) verifyclass(X, "ppp") W <- Window(X) n <- npoints(X) method <- match.arg(method) # alternative hypothesis alternative <- match.arg(alternative) if(alternative == "clustered") alternative <- "less" if(alternative == "regular") alternative <- "greater" altblurb <- switch(alternative, two.sided="two-sided", less="clustered (A < 1)", greater="regular (A > 1)") ## compute observed value statistic <- hopskel(X) ## p-value switch(method, asymptotic = { ## F-distribution nn <- 2 * n p.value <- switch(alternative, less = pf(statistic, nn, nn, lower.tail=TRUE), greater = pf(statistic, nn, nn, lower.tail=FALSE), two.sided = 2 * pf(statistic, nn, nn, lower.tail=(statistic < 1))) pvblurb <- "using F distribution" }, MonteCarlo = { ## Monte Carlo p-value sims <- numeric(nsim) for(i in 1:nsim) { Xsim <- runifpoint(n, win=W) sims[i] <- hopskel(Xsim) p.upper <- (1 + sum(sims >= statistic))/(1 + nsim) p.lower <- (1 + sum(sims <= statistic))/(1 + nsim) p.value <- switch(alternative, less=p.lower, greater=p.upper, two.sided=2*min(p.lower, p.upper)) } pvblurb <- paste("Monte Carlo test based on", nsim, "simulations of CSR with fixed n") }) statistic <- as.numeric(statistic) names(statistic) <- "A" out <- list(statistic=statistic, p.value=p.value, alternative=altblurb, method=c("Hopkins-Skellam test of CSR", pvblurb), data.name=Xname) class(out) <- "htest" return(out) } spatstat/R/Jmulti.R0000644000176200001440000001375613556707650013737 0ustar liggesusers# Jmulti.S # # Usual invocations to compute multitype J function(s) # if F and G are not required # # $Revision: 4.43 $ $Date: 2019/11/01 01:35:30 $ # # # "Jcross" <- function(X, i, j, eps=NULL, r=NULL, breaks=NULL, ..., correction=NULL) { # # multitype J function J_{ij}(r) # # X: point pattern (an object of class 'ppp') # i, j: types for which J_{i,j}(r) is calculated # eps: raster grid mesh size for distance transform # (unless specified by X$window) # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # X <- as.ppp(X) if(!is.marked(X)) stop(paste("point pattern has no", sQuote("marks"))) stopifnot(is.multitype(X)) checkspacing <- !isFALSE(list(...)$checkspacing) # marx <- marks(X, dfok=FALSE) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] # I <- (marx == i) if(sum(I) == 0) stop(paste("No points have mark = ", i)) # if(i == j) result <- Jest(X[I], eps=eps, r=r, breaks=breaks, correction=correction, checkspacing=checkspacing) else { J <- (marx == j) result <- Jmulti(X, I, J, eps=eps, r=r, breaks=breaks, disjoint=TRUE, correction=correction, checkspacing=checkspacing) } conserve <- attr(result, "conserve") iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(result, substitute(J[i,j](r), list(i=iname,j=jname)), c("J", paste0("list(", iname, ",", jname, ")")), new.yexp=substitute(J[list(i,j)](r), list(i=iname,j=jname))) attr(result, "conserve") <- conserve return(result) } "Jdot" <- function(X, i, eps=NULL, r=NULL, breaks=NULL, ..., correction=NULL) { # # multitype J function J_{i\dot}(r) # # X: point pattern (an object of class 'ppp') # i: mark i for which we calculate J_{i\cdot}(r) # eps: raster grid mesh size for distance transform # (unless specified by X$window) # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # X <- as.ppp(X) if(!is.marked(X)) stop(paste("point pattern has no", sQuote("marks"))) stopifnot(is.multitype(X)) checkspacing <- !isFALSE(list(...)$checkspacing) # marx <- marks(X, dfok=FALSE) if(missing(i)) i <- levels(marx)[1] # I <- (marx == i) if(sum(I) == 0) stop(paste("No points have mark = ", i)) J <- rep.int(TRUE, X$n) # result <- Jmulti(X, I, J, eps=eps, r=r, breaks=breaks, disjoint=FALSE, correction=correction, checkspacing=checkspacing) conserve <- attr(result, "conserve") iname <- make.parseable(paste(i)) result <- rebadge.fv(result, substitute(J[i ~ dot](r), list(i=iname)), c("J", paste(iname, "~ symbol(\"\\267\")")), new.yexp=substitute(J[i ~ symbol("\267")](r), list(i=iname))) attr(result, "conserve") <- conserve return(result) } "Jmulti" <- function(X, I, J, eps=NULL, r=NULL, breaks=NULL, ..., disjoint=NULL, correction=NULL) { # # multitype J function (generic engine) # # X marked point pattern (of class ppp) # # I,J logical vectors of length equal to the number of points # and identifying the two subsets of points to be # compared. # # eps: raster grid mesh size for distance transform # (unless specified by X$window) # # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # # X <- as.ppp(X) W<- X$window I <- ppsubset(X, I) J <- ppsubset(X, J) if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") XJ <- X[J] lambdaJ <- intensity(XJ) rmaxdefault <- rmax.rule("J", W, lambdaJ) brks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault)$val FJ <- Fest(XJ, eps, breaks=brks, correction=correction, ...) GIJ <- Gmulti(X, I, J, breaks=brks, disjoint=disjoint, correction=correction, ...) rvals <- FJ$r Fnames <- names(FJ) Gnames <- names(GIJ) bothnames <- Fnames[Fnames %in% Gnames] # initialise fv object alim <- attr(FJ, "alim") fname <- c("J", "list(I,J)") Z <- fv(data.frame(r=rvals, theo=1), "r", quote(J[I,J](r)), "theo", . ~ r, alim, c("r", makefvlabel(NULL, NULL, fname, "pois")), c("distance argument r", "theoretical Poisson %s"), fname=fname, yexp=quote(J[list(I,J)](r))) # add pieces manually ratio <- function(a, b) { result <- a/b result[ b == 0 ] <- NA result } if("raw" %in% bothnames) { Jun <- ratio(1-GIJ$raw, 1-FJ$raw) Z <- bind.fv(Z, data.frame(un=Jun), makefvlabel(NULL, "hat", fname, "un"), "uncorrected estimate of %s", "un") } if("rs" %in% bothnames) { Jrs <- ratio(1-GIJ$rs, 1-FJ$rs) Z <- bind.fv(Z, data.frame(rs=Jrs), makefvlabel(NULL, "hat", fname, "rs"), "border corrected estimate of %s", "rs") } if("han" %in% Gnames && "cs" %in% Fnames) { Jhan <- ratio(1-GIJ$han, 1-FJ$cs) Z <- bind.fv(Z, data.frame(han=Jhan), makefvlabel(NULL, "hat", fname, "han"), "Hanisch-style estimate of %s", "han") } if("km" %in% bothnames) { Jkm <- ratio(1-GIJ$km, 1-FJ$km) Z <- bind.fv(Z, data.frame(km=Jkm), makefvlabel(NULL, "hat", fname, "km"), "Kaplan-Meier estimate of %s", "km") if("hazard" %in% names(GIJ) && "hazard" %in% names(FJ)) { Jhaz <- GIJ$hazard - FJ$hazard Z <- bind.fv(Z, data.frame(hazard=Jhaz), "hazard(r)", "Kaplan-Meier estimate of derivative of log(%s)") } } # set default plotting values and order nama <- names(Z) fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))]) # add other info attr(Z, "G") <- GIJ attr(Z, "F") <- FJ attr(Z, "conserve") <- attr(FJ, "conserve") unitname(Z) <- unitname(X) return(Z) } spatstat/R/interp.im.R0000644000176200001440000000444313333543255014361 0ustar liggesusers# # interp.im.R # # $Revision: 1.6 $ $Date: 2018/07/30 14:29:25 $ # interp.im <- local({ lukimyu <- function(ccc, rrr, mat, defaults) { dimm <- dim(mat) within <- (rrr >= 1 & rrr <= dimm[1L] & ccc >= 1 & ccc <= dimm[2L]) result <- defaults result[within] <- mat[cbind(rrr[within], ccc[within])] result } interp.im <- function(Z, x, y=NULL, bilinear=FALSE) { stopifnot(is.im(Z)) if(!is.null(levels(Z))) stop("Interpolation is undefined for factor-valued images") xy <- xy.coords(x, y) x <- xy$x y <- xy$y ok <- inside.owin(x,y, as.owin(Z)) V <- Z$v ## get default lookup values (for boundary cases) fallback <- Z[ppp(x[ok], y[ok], window=as.rectangle(Z), check=FALSE)] ## Transform to grid coordinates ## so that pixel centres are at integer points, ## bottom left of image is (0,0) xx <- (x[ok] - Z$xcol[1L])/Z$xstep yy <- (y[ok] - Z$yrow[1L])/Z$ystep ## find grid point to left and below ## (may transgress boundary) xlower <- floor(xx) ylower <- floor(yy) cc <- as.integer(xlower) + 1L rr <- as.integer(ylower) + 1L dx <- xx - xlower dy <- yy - ylower if(bilinear) { ## 'orthodox' values <- ((1-dx) * (1-dy) * lukimyu(cc,rr,V,fallback) + dx * (1-dy) * lukimyu(cc+1,rr,V,fallback) + (1-dx) * dy * lukimyu(cc,rr+1,V,fallback) + dx * dy * lukimyu(cc+1,rr+1,V,fallback) ) } else { ## original & default ## determine whether (x,y) is above or below antidiagonal in square below <- (dx + dy <= 1) ## if below,interpolate Z(x,y) = (1-x-y)Z(0,0) + xZ(1,0) + yZ(0,1) ## if above,interpolate Z(x,y) = (x+y-1)Z(1,1) + (1-x)Z(0,1) + (1-y)Z(1,0) values <- ifelse(below, ( (1-dx-dy)*lukimyu(cc,rr,V,fallback) + dx*lukimyu(cc+1,rr,V,fallback) + dy*lukimyu(cc,rr+1,V,fallback) ), ( (dx+dy-1)*lukimyu(cc+1,rr+1,V,fallback) + (1-dx)*lukimyu(cc,rr+1,V,fallback) + (1-dy)*lukimyu(cc+1,rr,V,fallback) )) } result <- numeric(length(x)) result[ok] <- values result[!ok] <- NA return(result) } interp.im }) spatstat/R/rmhsnoop.R0000644000176200001440000005223713605234113014315 0ustar liggesusers# # rmhsnoop.R # # visual debug mechanism for rmh # # $Revision: 1.33 $ $Date: 2020/01/07 09:14:59 $ # # When rmh is called in visual debug mode (snooping = TRUE), # it calls e <- rmhSnoopEnv(...) to create an R environment 'e' # containing variables that will represent the current state # of the M-H algorithm with initial state X and model reach R. # # The environment 'e' is passed to the C routine xmethas. # This makes it possible for data to be exchanged between # the C and R code. # # When xmethas reaches the debugger's stopping time, # the current state of the simulation and the proposal # are copied from C into the R environment 'e'. # # Then to execute the visual display, the C code calls # 'eval' to execute the R function rmhsnoop(). # # The function rmhsnoop uses the 'simplepanel' class # to generate a plot showing the state of the simulation # and the proposal, and then wait for point-and-click input using # locator(). # # When rmhsnoop() exits, it returns an integer giving the # (user-specified) next stopping time. This is read back into # the C code. Then xmethas resumes simulations. # # I said it was simple! %^] rmhSnoopEnv <- function(Xinit, Wclip, R) { stopifnot(is.ppp(Xinit)) # Create an environment that will be accessible to R and C code e <- new.env() # initial state (point pattern) X <- Xinit assign("Wsim", as.owin(X), envir=e) assign("xcoords", coords(X)[,1], envir=e) assign("ycoords", coords(X)[,2], envir=e) if(is.multitype(X)) { mcodes <- as.integer(marks(X)) - 1L mlevels <- levels(marks(X)) assign("mcodes", mcodes, envir=e) assign("mlevels", mlevels, envir=e) } else { assign("mcodes", NULL, envir=e) assign("mlevels", NULL, envir=e) } # clipping window assign("Wclip", Wclip, envir=e) # reach of model (could be infinite) assign("R", R, envir=e) # current iteration number assign("irep", 0L, envir=e) # next iteration to be inspected assign("inxt", 1L, envir=e) # next transition to be inspected assign("tnxt", 1L, envir=e) # proposal type assign("proptype", NULL, envir=e) # outcome of proposal assign("itype", NULL, envir=e) # proposal location assign("proplocn", NULL, envir=e) # proposal mark assign("propmark", NULL, envir=e) # index of proposal point in existing pattern assign("propindx", NULL, envir=e) # Hastings ratio assign("numerator", NULL, envir=e) assign("denominator", NULL, envir=e) # Expression actually evaluated to execute visual debug # Expression is evaluated in the environment 'e' snoopexpr <- expression({ rslt <- rmhsnoop(Wsim=Wsim, Wclip=Wclip, R=R, xcoords=xcoords, ycoords=ycoords, mlevels=mlevels, mcodes=mcodes, irep=irep, itype=itype, proptype=proptype, proplocn=proplocn, propmark=propmark, propindx=propindx, numerator=numerator, denominator=denominator) inxt <- rslt$inxt tnxt <- rslt$tnxt itype <- if(rslt$accepted) rslt$itype else 0 storage.mode(tnxt) <- storage.mode(inxt) <- storage.mode(itype) <- "integer" }) assign("snoopexpr", snoopexpr, envir=e) # callback expression assign("callbackexpr", quote(eval(snoopexpr)), envir=e) return(e) } # visual debug display using base graphics rmhsnoop <- local({ rmhsnoop <- function(..., Wsim, Wclip, R, xcoords, ycoords, mlevels=NULL, mcodes, irep, itype, proptype, proplocn, propmark, propindx, numerator, denominator, panel.only=FALSE) { trap.extra.arguments(..., .Context="In rmhsnoop") X <- ppp(xcoords, ycoords, window=Wsim) if(ismarked <- (length(mlevels) > 0)) marks(X) <- factor(mlevels[mcodes+1L], levels=mlevels) Wclip.orig <- Wclip # determine plot arguments if(is.mask(Wclip)) { parg.Wclip <- list(invert=TRUE, col="grey") } else { Wclip <- edges(Wclip) parg.Wclip <- list(lty=3, lwd=2, col="grey") } parg.birth <- list(cols="green", lwd=3) parg.death <- list(cols="red", lwd=3) parg.birthcircle <- list(col="green", lty=3) parg.deathcircle <- list(col="red", lty=3) # assemble a layered object representing the state and the proposal if(is.null(proptype)) { # initial state L <- layered(Wsim, Wclip, X) layerplotargs(L)$Wclip <- parg.Wclip accepted <- TRUE } else { accepted <- (itype == proptype) # add proposal info switch(decode.proptype(proptype), Reject= { propname <- "rejected" L <- layered(Wsim=Wsim, Wclip=Wclip, X=X) layerplotargs(L)$Wclip <- parg.Wclip }, Birth = { propname <- "birth proposal" U <- ppp(proplocn[1L], proplocn[2L], window=Wsim) if(ismarked) marks(U) <- factor(mlevels[propmark+1L], levels=mlevels) D <- if(is.finite(R) && R > 0) { edges(disc(R, proplocn))[Wsim] } else NULL L <- layered(Wsim=Wsim, Wclip=Wclip, PrevState=X, Reach=D, NewPoint=U) layerplotargs(L)$Wclip <- parg.Wclip layerplotargs(L)$NewPoint <- parg.birth }, Death = { propname <- "death proposal" # convert from C to R indexing propindx <- propindx + 1 XminI <- X[-propindx] XI <- X[propindx] D <- if(is.finite(R) && R > 0) { edges(disc(R, c(XI$x, XI$y)))[Wsim] } else NULL L <- layered(Wsim=Wsim, Wclip=Wclip, RetainedPoints=XminI, Reach=D, Deletion=XI) layerplotargs(L)$Wclip <- parg.Wclip layerplotargs(L)$Reach <- parg.deathcircle layerplotargs(L)$Deletion <- parg.death }, Shift = { propname <- "shift proposal" # convert from C to R indexing propindx <- propindx + 1L # make objects XminI <- X[-propindx] XI <- X[propindx] U <- ppp(proplocn[1L], proplocn[2L], window=Wsim) if(ismarked) marks(U) <- factor(mlevels[propmark+1L], levels=mlevels) if(is.finite(R) && R > 0) { DU <- edges(disc(R, proplocn))[Wsim] DXI <- edges(disc(R, c(XI$x, XI$y)))[Wsim] } else { DU <- DXI <- NULL } # make layers L <- layered(Wsim=Wsim, Wclip=Wclip, OtherPoints=XminI, ReachAfter=DU, AfterShift=U, ReachBefore=DXI, BeforeShift=XI) layerplotargs(L)$Wclip <- parg.Wclip layerplotargs(L)$ReachAfter <- parg.birthcircle layerplotargs(L)$AfterShift <- parg.birth layerplotargs(L)$ReachBefore <- parg.deathcircle layerplotargs(L)$BeforeShift <- parg.death }, stop("Unrecognised proposal type") ) } header <- c(paste("Iteration", irep), propname, paste("Hastings ratio =", signif(numerator, 4), "/", signif(denominator, 4), "=", signif(numerator/denominator, 4))) info <- list(irep=irep, Wsim=Wsim, Wclip=Wclip.orig, X=X, proptype=proptype, proplocn=proplocn, propindx=propindx, propmark=propmark, mlevels=mlevels, accepted=accepted, numerator=numerator, denominator=denominator) inspectProposal(L, info, title=header, panel.only=panel.only) } decode.proptype <- function(n) { if(n < 0 || n > 3) stop(paste("Unrecognised proposal type:", n)) switch(n+1, "Reject", "Birth", "Death", "Shift") } encode.proptype <- function(s) { switch(s, Reject=0, Birth=1, Death=2, Shift=3) } inspectProposal <- function(X, info, ..., title, panel.only=FALSE) { if(missing(title)) title <- short.deparse(substitute(X)) if(!inherits(X, "layered")) X <- layered(X) lnames <- names(X) if(sum(nzchar(lnames)) != length(X)) lnames <- paste("Layer", seq_len(length(X))) # Find window and bounding box (validates X) W <- as.owin(X) BX <- as.rectangle(W) # Initialise environment for state variables etc # This environment is accessible to the panel button functions en <- new.env() assign("X", X, envir=en) assign("W", W, envir=en) assign("BX", BX, envir=en) assign("zoomfactor", 1L, envir=en) midX <- unlist(centroid.owin(BX)) assign("midX", midX, envir=en) assign("zoomcentre", midX, envir=en) assign("irep", info$irep, envir=en) assign("inxt", info$irep+1, envir=en) assign("tnxt", -1, envir=en) assign("accepted", info$accepted, envir=en) assign("proplocn", info$proplocn, envir=en) assign("info", info, envir=en) # Build interactive panel # Start with data panel P <- simplepanel(title, BX, list(Data=BX), list(Data=dataclickfun), list(Data=dataredrawfun), snoopexit, en) # Add pan buttons margin <- max(sidelengths(BX))/4 panelwidth <- sidelengths(BX)[1L]/2 P <- grow.simplepanel(P, "top", margin, navfuns["Up"], aspect=1) P <- grow.simplepanel(P, "bottom", margin, navfuns["Down"], aspect=1) P <- grow.simplepanel(P, "left", margin, navfuns["Left"], aspect=1) P <- grow.simplepanel(P, "right", margin, navfuns["Right"], aspect=1) # Zoom/Pan buttons at right P <- grow.simplepanel(P, "right", panelwidth, zoomfuns) # Accept/reject buttons at top P <- grow.simplepanel(P, "top", margin, accept.clicks, accept.redraws) # Dump/print buttons at bottom P <- grow.simplepanel(P, "bottom", margin, dumpfuns) # Jump controls at left maxchars <- max(4, nchar(names(jump.clicks))) P <- grow.simplepanel(P, "left", panelwidth * maxchars/6, jump.clicks) ## exit for debug/test code if(panel.only) return(P) ## go rslt <- run.simplepanel(P, popup=FALSE) clear.simplepanel(P) rm(en) return(rslt) } # button control functions zoomfuns <- rev(list( "Zoom In"=function(env, xy) { z <- get("zoomfactor", envir=env) assign("zoomfactor", z * 2, envir=env) return(TRUE) }, "Zoom Out"=function(env, xy) { z <- get("zoomfactor", envir=env) assign("zoomfactor", z / 2, envir=env) return(TRUE) }, "At Proposal"=function(env, xy) { proplocn <- get("proplocn", envir=env) assign("zoomcentre", proplocn, envir=env) return(TRUE) }, Reset=function(env, xy) { assign("zoomfactor", 1L, envir=env) midX <- get("midX", envir=env) assign("zoomcentre", midX, envir=env) return(TRUE) })) navfuns <- list( Left = function(env, xy) { zoom <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) BX <- get("BX", envir=env) width <- sidelengths(BX)[1L] stepsize <- (width/4)/zoom ce <- ce - c(stepsize, 0) assign("zoomcentre", ce, envir=env) return(TRUE) }, Right = function(env, xy) { zoom <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) BX <- get("BX", envir=env) width <- sidelengths(BX)[1L] stepsize <- (width/4)/zoom ce <- ce + c(stepsize, 0) assign("zoomcentre", ce, envir=env) return(TRUE) }, Up = function(env, xy) { zoom <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) BX <- get("BX", envir=env) height <- sidelengths(BX)[2] stepsize <- (height/4)/zoom ce <- ce + c(0, stepsize) assign("zoomcentre", ce, envir=env) return(TRUE) }, Down = function(env, xy) { zoom <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) BX <- get("BX", envir=env) height <- sidelengths(BX)[2] stepsize <- (height/4)/zoom ce <- ce - c(0, stepsize) assign("zoomcentre", ce, envir=env) return(TRUE) }) accept.clicks <- rev(list( Accept=function(env, xy) { assign("accepted", TRUE, envir=env) return(TRUE) }, Reject=function(env, xy) { assign("accepted", FALSE, envir=env) return(TRUE) })) accept.redraws <- rev(list( Accept=function(button, name, env) { accepted <- get("accepted", envir=env) if(accepted) { plot(button, add=TRUE, col="green") } else { plot(button, add=TRUE) } text(centroid.owin(button), labels=name) }, Reject=function(button, name, env) { accepted <- get("accepted", envir=env) if(accepted) { plot(button, add=TRUE) } else { plot(button, add=TRUE, col="pink") } text(centroid.owin(button), labels=name) })) jump.clicks <- rev(list( "Next Iteration"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+1, envir=env) return(FALSE) }, "Skip 10"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+10, envir=env) return(FALSE) }, "Skip 100"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+100, envir=env) return(FALSE) }, "Skip 1000"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+1000, envir=env) return(FALSE) }, "Skip 10,000"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+10000, envir=env) return(FALSE) }, "Skip 100,000"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+100000, envir=env) return(FALSE) }, "Next Birth"=function(env, xy) { assign("inxt", -1, envir=env) assign("tnxt", encode.proptype("Birth"), envir=env) return(FALSE) }, "Next Death"=function(env, xy) { assign("inxt", -1, envir=env) assign("tnxt", encode.proptype("Death"), envir=env) return(FALSE) }, "Next Shift"=function(env, xy) { assign("inxt", -1, envir=env) assign("tnxt", encode.proptype("Shift"), envir=env) return(FALSE) }, "Exit Debugger"=function(env, xy) { assign("inxt", -1L, envir=env) return(FALSE) })) dataclickfun <- function(env, xy) { # function for handling clicks in the data window z <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) midX <- get("midX", envir=env) ce <- ce + (unlist(xy) - midX)/z assign("zoomcentre", ce, envir=env) return(TRUE) } dataredrawfun <- function(button, name, env) { # redraw data window X <- get("X", envir=env) BX <- get("BX", envir=env) W <- get("W", envir=env) midX <- get("midX", envir=env) z <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) scaleX <- shift(affine(shift(X, -ce), diag(c(z,z))), unlist(midX)) scaleW <- shift(affine(shift(W, -ce), diag(c(z,z))), unlist(midX)) scaleX <- scaleX[, BX] scaleW <- intersect.owin(scaleW, BX, fatal=FALSE) # redraw data in 'BX' if(!is.null(scaleW)) { if(z == 1 && is.rectangle(scaleW)) { plot(scaleW, add=TRUE, lwd=2) } else { plot(BX, add=TRUE, lty=3, border="red") if(!identical(BX, scaleW)) plot(scaleW, add=TRUE, invert=TRUE) } } if(!is.null(scaleX)) plot(scaleX, add=TRUE) invisible(NULL) } # functions to dump the current state, etc dumpfuns <- list( "Dump to file"=function(env, xy) { irep <- get("irep", envir=env) X <- get("X", envir=env) xname <- paste("dump", irep, sep="") assign(xname, X) fname <- paste(xname, ".rda", sep="") eval(substitute(save(x, file=y, compress=TRUE), list(x=xname, y=fname))) splat("Saved to", sQuote(fname)) return(TRUE) }, "Print Info"=function(env, xy) { info <- get("info", envir=env) will.accept <- get("accepted", envir=env) cat("\n\n------------------- \n") with(info, { splat("Iteration", irep) splat("Simulation window:") print(Wsim) splat("Clipping window:") print(Wclip) splat("Current state:") print(X) propname <- decode.proptype(proptype) splat("Proposal type:", propname) switch(propname, Reject = { }, Birth = { splat("Birth of new point at location", pastepoint(proplocn, propmark, mlevels)) }, Death = { Xi <- X[propindx] splat("Death of data point", propindx, "located at", pastepoint(Xi)) }, Shift = { Xi <- X[propindx] splat("Shift data point", propindx, "from current location", pastepoint(Xi), "to new location", pastepoint(proplocn, propmark, mlevels)) }) splat("Hastings ratio = ", numerator, "/", denominator, "=", numerator/denominator) splat("Fate of proposal:", if(will.accept) "Accepted" else "Rejected") return(TRUE) }) }) pastepoint <- function(X, markcode, marklevels) { if(is.ppp(X)) { xy <- coords(X) m <- if(is.marked(X)) dQuote(marks(X)) else NULL } else { xy <- X m <- if(length(marklevels)) dQuote(marklevels[markcode+1L]) else NULL } xy <- signif(as.numeric(xy), 6) paren(paste(c(xy, m), collapse=", ")) } # function to determine return value snoopexit <- function(env) { ans <- eval(quote(list(inxt=inxt, tnxt=tnxt, accepted=accepted)), envir=env) return(ans) } rmhsnoop }) # testit <- function() { # rmhsnoop(Wsim=owin(), Wclip=square(0.7), R=0.1, # xcoords=runif(40), # ycoords=runif(40), # mlevels=NULL, mcodes=NULL, # irep=3, itype=1, # proptype=1, proplocn=c(0.5, 0.5), propmark=0, propindx=0, # numerator=42, denominator=24) # } spatstat/R/densityAdaptiveKernel.R0000644000176200001440000000505513440104523016740 0ustar liggesusers#' #' densityAdaptiveKernel.R #' #' $Revision: 1.4 $ $Date: 2019/03/07 02:58:08 $ #' #' #' Adaptive kernel smoothing via 3D FFT #' densityAdaptiveKernel <- function(X, ...) { UseMethod("densityAdaptiveKernel") } densityAdaptiveKernel.ppp <- function(X, bw, ..., weights=NULL, at=c("pixels", "points"), edge=TRUE, ngroups) { stopifnot(is.ppp(X)) at <- match.arg(at) nX <- npoints(X) if(nX == 0) switch(at, points = return(numeric(nX)), pixels = return(as.im(0, W=Window(X), ...))) if(missing(ngroups) || is.null(ngroups)) { ngroups <- max(1L, floor(sqrt(npoints(X)))) } else if(any(is.infinite(ngroups))) { ngroups <- nX } else { check.1.integer(ngroups) ngroups <- min(nX, ngroups) } if(weighted <- !is.null(weights)) { check.nvector(weights, nX, oneok=TRUE) if(length(weights) == 1) weights <- rep(weights, nX) } else weights <- rep(1,nX) ## determine bandwidth for each data point if(missing(bw) || is.null(bw)) { bw <- do.call.matched(bw.abram, resolve.defaults(list(X=X, at="points"), list(...)), extrargs=names(args(as.mask))) } else if(is.numeric(bw)) { check.nvector(bw, nX, oneok=TRUE) if(length(bw) == 1) bw <- rep(bw, nX) } else if(is.im(bw)) { bw <- safelookup(bw, X, warn=FALSE) if(anyNA(bw)) stop("Some data points lie outside the domain of image 'bw'", call.=FALSE) } else if(inherits(bw, "funxy")) { bw <- bw(X) if(anyNA(bw)) stop("Some data points lie outside the domain of function 'bw'", call.=FALSE) } else stop("Argument 'bw' should be a numeric vector or a pixel image") #' divide bandwidths into groups p <- seq(0,1,length=ngroups+1) qbands <- quantile(bw, p) groupid <- findInterval(bw,qbands,all.inside=TRUE) #' map to middle of group pmid <- (p[-1] + p[-length(p)])/2 qmid <- quantile(bw, pmid) marks(X) <- if(weighted) weights else NULL group <- factor(groupid, levels=1:ngroups) Y <- split(X, group) Z <- mapply(density.ppp, x=Y, sigma=as.list(qmid), weights=lapply(Y, marks), MoreArgs=list(edge=edge, at=at, ...), SIMPLIFY=FALSE) ZZ <- switch(at, pixels = im.apply(Z, "sum"), points = unsplit(Z, group)) return(ZZ) } spatstat/R/harmonic.R0000644000176200001440000000327613333543255014257 0ustar liggesusers# # # harmonic.R # # $Revision: 1.2 $ $Date: 2004/01/07 08:57:39 $ # # harmonic() # Analogue of polynom() for harmonic functions only # # ------------------------------------------------------------------- # harmonic <- function(x,y,n) { if(missing(n)) stop("the order n must be specified") n <- as.integer(n) if(is.na(n) || n <= 0) stop("n must be a positive integer") if(n > 3) stop("Sorry, harmonic() is not implemented for degree > 3") namex <- deparse(substitute(x)) namey <- deparse(substitute(y)) if(!is.name(substitute(x))) namex <- paste("(", namex, ")", sep="") if(!is.name(substitute(y))) namey <- paste("(", namey, ")", sep="") switch(n, { result <- cbind(x, y) names <- c(namex, namey) }, { result <- cbind(x, y, x*y, x^2-y^2) names <- c(namex, namey, paste("(", namex, ".", namey, ")", sep=""), paste("(", namex, "^2-", namey, "^2)", sep="")) }, { result <- cbind(x, y, x * y, x^2-y^2, x^3 - 3 * x * y^2, y^3 - 3 * x^2 * y) names <- c(namex, namey, paste("(", namex, ".", namey, ")", sep=""), paste("(", namex, "^2-", namey, "^2)", sep=""), paste("(", namex, "^3-3", namex, ".", namey, "^2)", sep=""), paste("(", namey, "^3-3", namex, "^2.", namey, ")", sep="") ) } ) dimnames(result) <- list(NULL, names) return(result) } spatstat/R/sharpen.R0000644000176200001440000000406213333543255014111 0ustar liggesusers# # sharpen.R # # $Revision: 1.6 $ $Date: 2013/08/29 03:52:17 $ # sharpen <- function(X, ...) { UseMethod("sharpen") } sharpen.ppp <- function(X, sigma=NULL, ..., varcov=NULL, edgecorrect=FALSE) { stopifnot(is.ppp(X)) Yx <- Smooth(X %mark% X$x, at="points", sigma=sigma, varcov=varcov, edge=TRUE) Yy <- Smooth(X %mark% X$y, at="points", sigma=sigma, varcov=varcov, edge=TRUE) # trap NaN etc nbad <- sum(!(is.finite(Yx) & is.finite(Yy))) if(nbad > 0) stop(paste(nbad, ngettext(nbad, "point is", "points are"), "undefined due to numerical problems;", "smoothing parameter is probably too small")) # W <- as.owin(X) if(edgecorrect) { # convolve x and y coordinate functions with kernel xim <- as.im(function(x,y){x}, W) yim <- as.im(function(x,y){y}, W) xblur <- blur(xim, sigma=sigma, varcov=varcov, normalise=TRUE, ...) yblur <- blur(yim, sigma=sigma, varcov=varcov, normalise=TRUE, ...) # evaluate at data locations xx <- safelookup(xblur, X, warn=FALSE) yy <- safelookup(yblur, X, warn=FALSE) # estimated vector bias of sharpening procedure xbias <- xx - X$x ybias <- yy - X$y # adjust Yx <- Yx - xbias Yy <- Yy - ybias # check this does not place points outside window if(any(uhoh <- !inside.owin(Yx, Yy, W))) { # determine mass of edge effect edgeim <- blur(as.im(W), sigma=sigma, varcov=varcov, normalise=FALSE, ...) edg <- safelookup(edgeim, X[uhoh], warn=FALSE) # contract bias correction Yx[uhoh] <- (1 - edg) * X$x[uhoh] + edg * Yx[uhoh] Yy[uhoh] <- (1 - edg) * X$y[uhoh] + edg * Yy[uhoh] } # check again if(any(nbg <- !inside.owin(Yx, Yy, W))) { # give up Yx[nbg] <- X$x[nbg] Yy[nbg] <- X$y[nbg] } } # make point pattern Y <- ppp(Yx, Yy, marks=marks(X), window=W) # tack on smoothing information attr(Y, "sigma") <- sigma attr(Y, "varcov") <- varcov attr(Y, "edgecorrected") <- edgecorrect return(Y) } spatstat/R/beginner.R0000644000176200001440000000174113333543254014242 0ustar liggesusers# # beginner.R # # Helpful information for beginners # # $Revision: 1.3 $ $Date: 2015/10/21 09:06:57 $ # print.autoexec <- function(x, ...) { x() } beginner <- function(package="spatstat") { package <- as.character(substitute(package)) RShowDoc("BEGINNER.txt", type="txt", package=package) return(invisible(NULL)) } class(beginner) <- "autoexec" foo <- local({ fooText <- paste0("Error: object 'foo' not found.\n\n", "'foo' is not a defined variable or function.\n", "It is a placeholder name, which serves only to ", "demonstrate a concept. It represents the name of ", "any desired object or function. ", "Other placeholder names popular with computer scientists ", "are 'bar', 'foobar', 'qux' and 'mork'.") foo <- function() { splat(fooText) return(invisible(NULL)) } class(foo) <- "autoexec" foo }) plot.foo <- function(x, ...) foo() spatstat/R/indices.R0000644000176200001440000001661413567643063014104 0ustar liggesusers#' #' indices.R #' #' Code for handling vector/array indices #' #' $Revision: 1.11 $ $Date: 2019/11/28 03:10:14 $ #' grokIndexVector <- function(ind, len, nama=NULL) { #' Parse any kind of index vector, #' returning #' a logical index 'lo' (the subset of elements), #' a positive integer index 'i' ( = which(lo) ), #' the number 'n' of values required #' the number 'nind' of values indexed #' and if appropriate #' a character vector 's' of names #' a mapping 'map' (matching 'ind' to 'i') #' #' There are two versions: #' 'strict' (confined to specified bounds 1:len and specified names 'nama') #' 'full' (allowing implied extension of array bounds) named <- !is.null(nama) if(missing(len) && named) len <- length(nama) force(len) # special cases if(is.null(ind)) { #' all entries (implied) return(list(strict=list(lo=rep(TRUE, len), i=seq_len(len), n=len, s=nama, nind=len, map=NULL))) } if(length(ind) == 0) { #' no entries return(list(strict=list(lo=logical(len), i=integer(0), n=0L, s=character(0), nind=0L, map=NULL))) } #' main cases if(is.logical(ind)) { # logical (subset) index into 1:len lo <- ind m <- length(lo) if(m < len) { #' recycle oldlo <- lo lo <- logical(len) lo[oldlo] <- TRUE m <- len } if(m == len) { n <- sum(lo) result <- list(strict=list(lo=lo, i=which(lo), n=n, s=nama[lo], nind=n, map=NULL)) return(result) } #' new elements implied lostrict <- lo[1:len] newones <- (len+1L):m nstrict <- sum(lostrict) strict <- list(lo=lostrict, i=which(lostrict), n=nstrict, s=nama[lostrict], nind=nstrict, map=NULL) nfull <- sum(lo) full <- list(newones=newones, fullset=1:m, lo=lo, i=which(lo), n=nfull, s=if(named) c(nama, rep("", length(newones)))[lo] else NULL, nind=nfull, map=NULL) return(list(strict=strict, full=full)) } if(is.character(ind)) { #' character index into 'nama' #' order is important imap <- match(ind, nama) unknown <- is.na(imap) i <- sortunique(imap[!unknown]) lo <- logical(len) lo[i] <- TRUE map <- match(imap, i) n <- length(ind) s <- nama[map] nind <- length(ind) if(identical(map, seq_along(map))) map <- NULL strict <- list(lo=lo, i=i, n=n, s=s, nind, map=map) if(!any(unknown)) return(list(strict=strict)) # some unrecognised strings newones <- unique(ind[unknown]) fullset <- c(nama, newones) imapfull <- match(ind, fullset) ifull <- sortunique(imapfull) lofull <- logical(length(fullset)) lofull[ifull] <- TRUE mapfull <- match(imapfull, ifull) nfull <- length(ind) sfull <- fullset[mapfull] if(identical(mapfull, seq_along(mapfull))) mapfull <- NULL full <- list(newones=newones, fullset=fullset, lo=lofull, i=ifull, n=nfull, s=sfull, nind=nind, map=mapfull) return(list(strict=strict, full=full)) } if(is.numeric(ind)) { if(all(ind > 0)) { #' integer index into 1:len #' order is important ifull <- sortunique(ind) inside <- (ifull <= len) i <- ifull[inside] map <- match(ind, i) lo <- logical(len) lo[i] <- TRUE n <- length(ind) s <- nama[ind] if(identical(map, seq_along(map))) map <- NULL strict <- list(lo=lo,i=i,n=n,s=s,nind=length(i),map=map) if(all(inside)) return(list(strict=strict)) newones <- ifull[!inside] mapfull <- match(ind, ifull) fullset <- 1:max(ifull) lofull <- logical(length(fullset)) lofull[ifull] <- TRUE nfull <- length(ind) sfull <- if(named) c(nama, rep("", length(newones)))[ind] else NULL if(identical(mapfull, seq_along(mapfull))) mapfull <- NULL return(list(strict=strict, full=list(newones=newones, fullset=fullset, lo=lofull, i=ifull, n=nfull, s=sfull, nind=nfull, map=mapfull))) } if(all(ind < 0)) { #' exclusion index #' ignore indices outside bounds negind <- -ind negind <- negind[negind <= len] lo <- rep(TRUE, len) lo[negind] <- FALSE i <- which(lo) n <- length(i) map <- seq_len(n) return(list(strict=list(lo=lo, i=i, n=n, s=nama[i], nind=n, map=map))) } stop("An integer index may not contain both negative and positive values", call.=FALSE) } stop("Unrecognised format for index", call.=FALSE) } #' g is the result of 'grokIndexVector' strictIndexSequence <- function(g) { g$strict$i } fullIndexSequence <- function(g) { g$full$i %orifnull% g$strict$i } replacementIndex <- function(ii, stuff) { ## 'stuff' is predigested information about a subset index. ## Find the location in the original array ## whose value should be replaced by the 'ii'-th replacement value ## according to this info. if(length(stuff) == 0) stop("Internal error - no predigested info", call.=FALSE) with(stuff, { if(!is.null(map)) ii <- map[ii] i[ii] }) } positiveIndex <- function(i, nama, len=length(nama)) { #' convert any kind of index to a positive integer sequence x <- seq_len(len) if(is.null(i)) return(x) stopifnot(is.vector(i)) if(is.numeric(i) && !all(ok <- (abs(i) <= len))) { warning("Index values lie outside array bounds", call.=FALSE) i <- i[ok] } names(x) <- nama y <- x[i] return(unname(y)) } logicalIndex <- function(i, nama, len=length(nama)) { #' convert any kind of index to a logical vector if(is.null(i)) return(rep(TRUE, len)) stopifnot(is.vector(i)) if(is.numeric(i) && !all(ok <- (abs(i) <= len))) { warning("Index values lie outside array bounds", call.=FALSE) i <- i[ok] } x <- logical(len) names(x) <- nama x[i] <- TRUE return(unname(x)) } #' convert any appropriate subset index for any kind of point pattern #' to a logical vector ppsubset <- function(X, I, Iname, fatal=FALSE) { if(missing(Iname)) Iname <- deparse(substitute(I)) # I could be a window or logical image if(is.im(I)) I <- solutionset(I) if((is.ppp(X) || is.lpp(X)) && is.owin(I)) { I <- inside.owin(X, w=I) return(I) } if((is.pp3(X) && inherits(I, "box3")) || (is.ppx(X) && inherits(I, "boxx"))) { I <- inside.boxx(X, w=I) return(I) } # I could be a function to be applied to X if(is.function(I)) { I <- I(X) if(!is.vector(I)) { whinge <- paste("Function", sQuote(Iname), "did not return a vector") if(fatal) stop(whinge, call.=FALSE) warning(whinge, call.=FALSE) return(NULL) } } # I is now a subset index: convert to logical I <- grokIndexVector(I, npoints(X))$strict$lo if(anyNA(I)) { #' illegal entries whinge <- paste("Indices in", sQuote(Iname), "exceed array limits") if(fatal) stop(whinge, call.=FALSE) warning(whinge, call.=FALSE) return(NULL) } return(I) } spatstat/R/measures.R0000644000176200001440000006564413606323512014305 0ustar liggesusers# # measures.R # # signed/vector valued measures with atomic and diffuse components # # $Revision: 1.90 $ $Date: 2020/01/11 10:41:21 $ # msr <- function(qscheme, discrete, density, check=TRUE) { if(!is.quad(qscheme)) stop("qscheme should be a quadrature scheme") nquad <- n.quad(qscheme) U <- union.quad(qscheme) wt <- w.quad(qscheme) Z <- is.data(qscheme) ndata <- sum(Z) # ensure conformable vectors/matrices stopifnot(is.numeric(discrete) || is.logical(discrete)) stopifnot(is.numeric(density)) if(is.vector(discrete) && is.vector(density)) { # handle constants if(length(discrete) == 1) discrete <- rep.int(discrete, ndata) if(length(density) == 1) density <- rep.int(density, nquad) # check lengths if(check) { check.nvector(discrete, ndata, things="data points", naok=TRUE) check.nvector(density, nquad, things="quadrature points", naok=TRUE) } discretepad <- numeric(nquad) discretepad[Z] <- discrete } else { if(length(discrete) == 1 && is.matrix(density)) { # replicate constant 'discrete' component to matrix of correct size discrete <- matrix(discrete, ndata, ncol(density)) } else if(length(density) == 1 && is.matrix(discrete)) { # replicate constant 'density' to matrix of correct size density <- matrix(density, nquad, ncol(discrete)) } else { discrete <- as.matrix(discrete) density <- as.matrix(density) } if(check) { # check numbers of rows check.nmatrix(discrete, ndata, things="data points", naok=TRUE, squarematrix=FALSE) check.nmatrix(density, nquad, things="quadrature points", naok=TRUE, squarematrix=FALSE) } nd <- ncol(discrete) nc <- ncol(density) if(nd != nc) { if(nd == 1) { # replicate columns of discrete component discrete <- matrix(rep.int(discrete, nc), ndata, nc) colnames(discrete) <- colnames(density) } else if(nc == 1) { # replicate columns of density component density <- matrix(rep.int(density, nd), nquad, nd) colnames(density) <- colnames(discrete) } else stop(paste("Incompatible numbers of columns in", sQuote("discrete"), paren(nd), "and", sQuote("density"), paren(nc))) } discretepad <- matrix(0, nquad, max(nd, nc)) discretepad[Z, ] <- discrete colnames(discretepad) <- colnames(density) } ## ## Discretised measure (value of measure for each quadrature tile) ## val <- discretepad + wt * density if(is.matrix(density)) colnames(val) <- colnames(density) ## ## if(check && !all(ok <- complete.cases(val))) { warning("Some infinite, NA or NaN increments were removed", call.=FALSE) val <- ok * val discretepad <- ok * discretepad density <- ok * density } ## finished out <- list(loc = U, val = val, atoms = Z, discrete = discretepad, density = density, wt = wt) class(out) <- "msr" return(out) } weed.msr <- function(x) { } # Translation table for usage of measures # # e.g. res <- residuals(fit, ...) # # OLD NEW # res[ ] res$val[ ] with(res, "increment") # attr(res, "atoms") res$atoms with(res, "is.atom") # attr(res, "discrete") res$discrete with(res, "discrete") # attr(res, "continuous") res$density with(res, "density") # w.quad(quad.ppm(fit)) res$wt with(res, "qweights") # union.quad(quad.ppm(fit)) res$loc with(res, "qlocations") # ................................................. with.msr <- function(data, expr, ...) { stopifnot(inherits(data, "msr")) stuff <- list(increment = data$val, is.atom = data$atoms, discrete = data$discrete, density = data$density, continuous = data$density * data$wt, qweights = data$wt, qlocations = data$loc, atoms = data$loc[data$atoms], atommass = marksubset(data$discrete, data$atoms)) y <- eval(substitute(expr), envir=stuff, enclos=parent.frame()) if(is.character(y) && length(y) == 1 && y %in% names(stuff)) y <- stuff[[y]] return(y) } print.msr <- function(x, ...) { xloc <- x$loc n <- npoints(xloc) d <- ncol(as.matrix(x$val)) splat(paste0(if(d == 1) "Scalar" else paste0(d, "-dimensional vector"), "-valued measure")) if(d > 1 && !is.null(cn <- colnames(x$val)) && waxlyrical("space")) splat("vector components:", commasep(sQuote(cn))) if(is.marked(xloc)) { splat("\tDefined on 2-dimensional space x marks") if(is.multitype(xloc)) exhibitStringList("\tPossible marks: ", levels(marks(xloc))) } if(waxlyrical("gory")) { splat("Approximated by", n, "quadrature points") print(as.owin(xloc)) splat(sum(x$atoms), "atoms") } if(waxlyrical("extras")) { splat("Total mass:") if(d == 1) { splat("discrete =", signif(sum(with(x, "discrete")), 5), " continuous =", signif(sum(with(x, "continuous")), 5), " total =", signif(sum(with(x, "increment")), 5)) } else { if(is.null(cn)) cn <- paste("component", 1:d) for(j in 1:d) { splat(paste0(cn[j], ":\t"), "discrete =", signif(sum(with(x, "discrete")[,j]), 5), " continuous =", signif(sum(with(x, "continuous")[,j]), 5), " total =", signif(sum(with(x, "increment")[,j]), 5)) } } } return(invisible(NULL)) } summary.msr <- function(object, ...) { print(object) } is.multitype.msr <- function(X, ...) { is.multitype(X$loc, ...) } is.marked.msr <- function(X, ...) { is.marked(X$loc, ...) } split.msr <- function(x, f, drop=FALSE, ...) { xloc <- x$loc ## determine split using rules for split.ppp locsplit <- if(missing(f)) split(xloc, drop=drop) else split(xloc, f, drop=drop) ## extract grouping factor g <- attr(locsplit, "fgroup") ## split contributions to measure atomsplit <- split(x$atoms, g, drop=drop) # hyuk wtsplit <- split(x$wt, g, drop=drop) if(ncol(x) == 1) { ## scalar measure valsplit <- split(x$val, g, drop=drop) discsplit <- split(x$discrete, g, drop=drop) denssplit <- split(x$density, g, drop=drop) } else { ## vector measure valsplit <- lapply(split(as.data.frame(x$val), g, drop=drop), as.matrix) discsplit <- lapply(split(as.data.frame(x$discrete), g, drop=drop), as.matrix) denssplit <- lapply(split(as.data.frame(x$density), g, drop=drop), as.matrix) } ## form the component measures result <- mapply(list, loc=locsplit, val=valsplit, atoms=atomsplit, discrete=discsplit, density=denssplit, wt=wtsplit, SIMPLIFY=FALSE) names(result) <- names(locsplit) result <- lapply(result, "class<-", value="msr") if(drop && any(isnul <- (sapply(locsplit, npoints) == 0))) result[isnul] <- NULL result <- as.solist(result) return(result) } integral.msr <- function(f, domain=NULL, ...) { stopifnot(inherits(f, "msr")) if(is.tess(domain)) { result <- sapply(tiles(domain), integral.msr, f = f) if(length(dim(result)) > 1) result <- t(result) return(result) } if(!is.null(domain)) f <- f[domain] y <- with(f, "increment") z <- if(is.matrix(y)) apply(y, 2, sum) else sum(y) return(z) } augment.msr <- function(x, ..., sigma, recompute=FALSE) { ## add a pixel image of the smoothed density component stopifnot(inherits(x, "msr")) if(!recompute && !is.null(attr(x, "smoothdensity"))) return(x) d <- ncol(as.matrix(x$val)) xloc <- x$loc W <- as.owin(xloc) mt <- is.multitype(xloc) if(missing(sigma)) { sigma <- if(!mt) avenndist(xloc) else max(sapply(split(xloc), avenndist)) if(sigma == 0) sigma <- max(bw.scott(xloc))/5 } if(mt) { ## multitype case - split by type, extract smoothed part, then sum y <- lapply(split(x), augment.msr, sigma=sigma, ...) z <- lapply(y, attr, which="smoothdensity") if((nc <- ncol(x)) == 1) { ## scalar valued smo <- im.apply(z, sum) ## WAS: z <- do.call(harmonise, unname(z)) ## smo <- Reduce("+", z) } else { ## vector valued smo <- vector(mode="list", length=nc) for(j in 1:nc) { zj <- lapply(z, "[[", i=j) smo[[j]] <- im.apply(zj, sum) ## WAS: zj <- do.call(harmonise, unname(zj)) ## smo[[j]] <- Reduce("+", zj) } smo <- as.solist(smo) } attr(smo, "sigma") <- sigma attr(x, "smoothdensity") <- smo return(x) } ## Single-type xdensity <- as.matrix(x$density) ## first weed out Inf, NA, NaN if(!all(ok <- complete.cases(xdensity))) xdensity <- ok * xdensity ## smooth density unless constant ra <- apply(xdensity, 2, range) varble <- apply(as.matrix(ra), 2, diff) > sqrt(.Machine$double.eps) ## if(d == 1) { smo <- if(!varble) as.im(mean(xdensity), W=W) else do.call(Smooth, resolve.defaults(list(X=xloc %mark% xdensity), list(...), list(sigma=sigma))) } else { smo <- vector(mode="list", length=d) names(smo) <- colnames(x) if(any(varble)) smo[varble] <- do.call(Smooth, resolve.defaults(list(X=xloc %mark% xdensity[,varble, drop=FALSE]), list(...), list(sigma=sigma))) if(any(!varble)) smo[!varble] <- lapply(apply(xdensity[, !varble, drop=FALSE], 2, mean), as.im, W=W) smo <- as.solist(smo) } attr(smo, "sigma") <- sigma attr(x, "smoothdensity") <- smo return(x) } update.msr <- function(object, ...) { #' reconcile internal data if(!is.null(smo <- attr(object, "smoothdensity"))) { sigma <- attr(smo, "sigma") object <- augment.msr(object, ..., sigma=sigma, recompute=TRUE) } return(object) } plot.msr <- function(x, ..., add=FALSE, how=c("image", "contour", "imagecontour"), main=NULL, do.plot=TRUE, multiplot=TRUE, massthresh=0, equal.markscale=FALSE, equal.ribbon=FALSE) { if(is.null(main)) main <- short.deparse(substitute(x)) how <- match.arg(how) if(!multiplot) { ## compress everything to a single panel x$loc <- unmark(x$loc) if(is.matrix(x$val)) x$val <- rowSums(x$val) if(is.matrix(x$discrete)) x$discrete <- rowSums(x$discrete) if(is.matrix(x$density)) x$density <- rowSums(x$density) if(!is.null(smo <- attr(x, "smoothdensity")) && inherits(smo, "solist")) attr(x, "smoothdensity") <- im.apply(smo, sum, check=FALSE) ## WAS: attr(x, "smoothdensity") <- Reduce("+", smo) } d <- dim(x)[2] k <- if(is.multitype(x)) length(levels(marks(x$loc))) else 1 ## multiple plot panels may be generated if(k == 1 && d == 1) { ## single plot y <- solist(x) } else if(k > 1 && d == 1) { ## multitype y <- split(x) } else if(k == 1 && d > 1) { ## vector-valued y <- unstack(x) } else if(k > 1 && d > 1) { ## both multitype and vector-valued y <- split(x) typenames <- names(y) vecnames <- colnames(x$val) y <- unstack(y) names(y) <- as.vector(t(outer(typenames, vecnames, paste, sep="."))) } #' ensure image of density is present y <- solapply(y, augment.msr) #' ready to plot if(length(y) > 1) { ## plot as an array of panels userarg <- list(...) rowcol <- list(nrows=k, ncols=d) if(any(c("nrows", "ncols") %in% names(userarg))) rowcol <- list() #' determine common scales if required scaleinfo <- list() if(equal.markscale) { W <- Window(x) #' extract vectors of atomic masses from each panel marx <- lapply(y, with, "atommass") #' make a separate scale calculation for each panel scales <- sapply(marx, mark.scale.default, w=W, ...) scaleinfo$markscale <- min(scales) scaleinfo$markrange <- range(unlist(marx)) } if(equal.ribbon) { images <- lapply(y, attr, which="smoothdensity") scaleinfo$zlim <- range(sapply(images, range)) } ## go result <- do.call(plot.solist, resolve.defaults(list(y), userarg, rowcol, scaleinfo, list(how=how, main=main, equal.scales=TRUE, halign=TRUE, valign=TRUE, claim.title.space=TRUE))) return(invisible(result)) } ## scalar measure x <- y[[1]] ## get atoms xatomic <- (x$loc %mark% x$discrete)[x$atoms] if(length(massthresh) && all(is.finite(massthresh))) { ## ignore atoms with absolute mass <= massthresh check.1.real(massthresh) xatomic <- xatomic[abs(marks(xatomic)) > massthresh] } xtra.im <- graphicsPars("image") xtra.pp <- setdiff(graphicsPars("ppp"), c("box", "col")) xtra.pp <- union(xtra.pp, c("markrange", "marklevels")) xtra.ow <- graphicsPars("owin") smo <- attr(x, "smoothdensity") ## do.image <- how %in% c("image", "imagecontour") do.contour <- how %in% c("contour", "imagecontour") ## allocate space for plot and legend using do.plot=FALSE mechanism pdata <- do.call.matched(plot.ppp, resolve.defaults(list(x=xatomic, do.plot=FALSE, main=main), list(...), list(show.all=TRUE)), extrargs=xtra.pp) result <- pdata bb <- attr(pdata, "bbox") if(do.image) { idata <- do.call.matched(plot.im, resolve.defaults(list(x=smo, main=main, do.plot=FALSE), list(...)), extrargs=xtra.im) result <- idata bb <- boundingbox(bb, attr(idata, "bbox")) } ## attr(result, "bbox") <- bb ## if(do.plot) { if(!add) { blankmain <- prepareTitle(main)$blank ## initialise plot do.call.matched(plot.owin, resolve.defaults(list(x=bb, type="n", main=blankmain), list(...)), extrargs=xtra.ow) } ## display density if(do.image) do.call.matched(plot.im, resolve.defaults(list(x=smo, add=TRUE), list(...), list(main=main, show.all=TRUE)), extrargs=xtra.im) if(do.contour) do.call.matched(contour.im, resolve.defaults(list(x=smo, add=TRUE), list(...), list(main=main, axes=FALSE, show.all=!do.image)), extrargs=c("zlim", "labels", "labcex", ## DO NOT ALLOW 'col' "drawlabels", "method", "vfont", "lty", "lwd", "claim.title.space")) ## display atoms do.call.matched(plot.ppp, resolve.defaults(list(x=xatomic, add=TRUE, main=""), list(...), list(show.all=TRUE)), extrargs=xtra.pp) } return(invisible(result)) } "[.msr" <- function(x, i, j, ...) { valu <- as.matrix(x$val) disc <- as.matrix(x$discrete) dens <- as.matrix(x$density) wt <- x$wt atoms <- x$atoms # if(!missing(j)) { valu <- valu[, j] disc <- disc[, j] dens <- dens[, j] } loc <- x$loc if(!missing(i)) { # use [.ppp to identify which points are retained locn <- loc %mark% seq_len(npoints(loc)) loci <- locn[i, clip=TRUE] loc <- unmark(loci) id <- marks(loci) # extract valu <- valu[id, , drop=FALSE] disc <- disc[id, , drop=FALSE] dens <- dens[id, , drop=FALSE] wt <- wt[id] atoms <- atoms[id] } out <- list(loc=loc, val=valu, atoms=atoms, discrete=disc, density=dens, wt=wt) class(out) <- "msr" return(out) } dim.msr <- function(x) { dim(as.matrix(x$val)) } dimnames.msr <- function(x) { list(NULL, colnames(x$val)) } # smooth.msr <- function(X, ...) { # .Deprecated("Smooth.msr", package="spatstat", # msg="smooth.msr is deprecated: use the generic Smooth with a capital S") # Smooth(X, ...) # } Smooth.msr <- function(X, ..., drop=TRUE) { verifyclass(X, "msr") loc <- X$loc val <- X$val result <- density(loc, weights=val, ...) if(!drop && is.im(result)) result <- solist(result) return(result) } as.owin.msr <- function(W, ..., fatal=TRUE) { as.owin(W$loc, ..., fatal=fatal) } domain.msr <- Window.msr <- function(X, ...) { as.owin(X) } shift.msr <- function(X, ...) { X$loc <- Xloc <- shift(X$loc, ...) if(!is.null(smo <- attr(X, "smoothdensity"))) attr(X, "smoothdensity") <- shift(smo, getlastshift(Xloc)) putlastshift(X, getlastshift(Xloc)) } as.layered.msr <- local({ as.layered.msr <- function(X) { nc <- ncol(X) if(nc == 0) return(layered()) if(nc == 1) return(layered(X)) Y <- lapply(seq_len(nc), pickcol, x=X) names(Y) <- colnames(X) return(layered(LayerList=Y)) } pickcol <- function(j,x) x[,j] as.layered.msr }) unitname.msr <- function(x) unitname(x$loc) "unitname<-.msr" <- function(x, value) { unitname(x$loc) <- value return(x) } scalardilate.msr <- function(X, f, ...) { X$loc <- scalardilate(X$loc, f, ...) X$density <- X$density/f^2 X$wt <- X$wt * f^2 return(X) } rotate.msr <- function(X, angle=pi/2, ..., centre=NULL) { X$loc <- rotate(X$loc, angle=angle, ..., centre=centre) return(X) } flipxy.msr <- function(X) { X$loc <- flipxy(X$loc) return(X) } rescale.msr <- function(X, s, unitname) { if(missing(unitname)) unitname <- NULL if(missing(s) || is.null(s)) s <- 1/unitname(X)$multiplier Y <- scalardilate(X, 1/s) unitname(Y) <- rescale(unitname(X), s, unitname) return(Y) } affine.msr <- function(X, mat = diag(c(1, 1)), vec = c(0, 0), ...) { X$loc <- affine(X$loc, mat=mat, vec=vec, ...) detmat <- abs(det(mat)) X$density <- X$density/detmat X$wt <- X$wt * detmat return(X) } Ops.msr <- function(e1,e2=NULL){ vn <- c("val", "discrete", "density") if(nargs() == 1L) { #' unary operator if(!is.element(.Generic, c("+", "-"))) stop(paste("Unary operation", sQuote(paste0(.Generic, "A")), "is undefined for a measure A."), call.=FALSE) e1 <- unclass(e1) e1[vn] <- lapply(e1[vn], .Generic) if(!is.null(sm <- attr(e1, "smoothdensity"))) attr(e1, "smoothdensity") <- do.call(.Generic, sm) class(e1) <- "msr" return(e1) } else { #' binary operator m1 <- inherits(e1, "msr") m2 <- inherits(e2, "msr") if(m1 && m2) { if(!is.element(.Generic, c("+", "-"))) stop(paste("Operation", sQuote(paste0("A", .Generic, "B")), "is undefined for measures A, B"), call.=FALSE) k1 <- dim(e1)[2] k2 <- dim(e2)[2] if(k1 != k2) stop(paste("Operation", sQuote(paste0("A", .Generic, "B")), "is undefined because A, B have incompatible dimensions:", "A is", ngettext(k1, "scalar", paste0(k1, "-vector")), ", B is", ngettext(k2, "scalar", paste0(k2, "-vector"))), call.=FALSE) if(!identical(e1$loc, e2$loc)) { haha <- harmonise(e1, e2) e1 <- haha[[1L]] e2 <- haha[[2L]] } e1 <- unclass(e1) e2 <- unclass(e2) e1[vn] <- mapply(.Generic, e1[vn], e2[vn], SIMPLIFY=FALSE) class(e1) <- "msr" #' handle smoothed densities sm1 <- attr(e1, "smoothdensity") sm2 <- attr(e2, "smoothdensity") sm <- if(is.null(sm1) || is.null(sm2)) { NULL } else if(is.im(sm1) && is.im(sm2)) { do.call(.Generic, list(sm1, sm2)) } else if(is.im(sm1) && is.solist(sm2)) { mapply(.Generic, e2=sm2, MoreArgs=list(e1=sm1), SIMPLIFY=FALSE) } else if(is.solist(sm1) && is.im(sm2)) { mapply(.Generic, e1=sm1, MoreArgs=list(e2=sm2), SIMPLIFY=FALSE) } else if(is.solist(sm1) && is.solist(sm2)) { mapply(.Generic, e1=sm1, e2=sm2, SIMPLIFY=FALSE) } else NULL attr(e1, "smoothdensity") <- sm return(e1) } else if(m1 && is.numeric(e2)) { if(!is.element(.Generic, c("/", "*"))) stop(paste("Operation", sQuote(paste0("A", .Generic, "z")), "is undefined for a measure A and numeric z."), call.=FALSE) e1 <- unclass(e1) e1[vn] <- lapply(e1[vn], .Generic, e2=e2) class(e1) <- "msr" #' handle smoothed density sm1 <- attr(e1, "smoothdensity") sm <- if(is.null(sm1)) NULL else if(is.im(sm1)) do.call(.Generic, list(e1=sm1, e2=e2)) else if(is.solist(sm1)) solapply(sm1, .Generic, e2=e2) else NULL attr(e1, "smoothdensity") <- sm return(e1) } else if(m2 && is.numeric(e1)) { if(.Generic != "*") stop(paste("Operation", sQuote(paste0("z", .Generic, "A")), "is undefined for a measure A and numeric z."), call.=FALSE) e2 <- unclass(e2) e2[vn] <- lapply(e2[vn], .Generic, e1=e1) class(e2) <- "msr" #' handle smoothed density sm2 <- attr(e2, "smoothdensity") sm <- if(is.null(sm2)) NULL else if(is.im(sm2)) do.call(.Generic, list(e1=e1, e2=sm2)) else if(is.solist(sm2)) solapply(sm2, .Generic, e1=e1) else NULL attr(e2, "smoothdensity") <- sm return(e2) } stop(paste("Operation", sQuote(paste0("e1", .Generic, "e2")), "is undefined for this kind of data"), call.=FALSE) } } measurePositive <- function(x) { if(!inherits(x, "msr")) stop("x must be a measure", call.=FALSE) y <- x y$discrete <- pmax(0, x$discrete) y$density <- pmax(0, x$density) y$val <- y$discrete + y$wt * y$density y <- update(y) return(y) } measureNegative <- function(x) { if(!inherits(x, "msr")) stop("x must be a measure", call.=FALSE) y <- x y$discrete <- -pmin(0, x$discrete) y$density <- -pmin(0, x$density) y$val <- y$discrete + y$wt * y$density y <- update(y) return(y) } measureVariation <- function(x) { if(!inherits(x, "msr")) stop("x must be a measure", call.=FALSE) y <- x y$discrete <- abs(x$discrete) y$density <- abs(x$density) y$val <- y$discrete + y$wt * y$density y <- update(y) return(y) } totalVariation <- function(x) integral(measureVariation(x)) measureDiscrete <- function(x) { if(!inherits(x, "msr")) stop("x must be a measure", call.=FALSE) y <- x y$density[] <- 0 y$val <- y$discrete y <- update(y) return(y) } measureContinuous <- function(x) { if(!inherits(x, "msr")) stop("x must be a measure", call.=FALSE) y <- x y$discrete[] <- 0 y$val <- y$wt * y$density y <- update(y) return(y) } harmonise.msr <- local({ harmonise.msr <- function(...) { argz <- list(...) n <- length(argz) if(n == 0) return(argz) ismeasure <- sapply(argz, inherits, what="msr") if(!any(ismeasure)) stop("No measures supplied") if(!all(ismeasure)) stop("All arguments should be measures (objects of class msr)") if(n < 2) return(argz) result <- vector(mode="list", length=n) ## extract entries loclist <- lapply(argz, getElement, name="loc") atomlist <- lapply(argz, getElement, name="atoms") masslist <- lapply(argz, getElement, name="discrete") denslist <- lapply(argz, getElement, name="density") ## check for compatible dimensions of measure values dimen <- unique(sapply(argz, ncol)) if(length(dimen) > 1) stop("Measures have different dimensions:", commasep(sort(dimen))) ## check for marked points ismarked <- sapply(loclist, is.marked) if(any(ismarked) && !all(ismarked)) stop("Some, but not all, quadrature schemes are marked") ismarked <- all(ismarked) ## union of all quadrature points in all measures Uloc <- do.call(superimpose, append(unname(loclist), list(check=FALSE))) Uloc <- unique(Uloc) nU <- npoints(Uloc) ## match each quadrature set to the union ## and find nearest data point to each point in the union if(!ismarked) { matchlist <- lapply(loclist, nncross, Y=Uloc, what="which") nearlist <- lapply(loclist, ssorcnn, xx=Uloc, what="which") } else { stop("Not yet implemented for marked quadrature schemes") } ## nearest neighbour interpolation of density values of each argument ## onto the common quadrature set Udenslist <- mapply(extract, x=denslist, i=nearlist, SIMPLIFY=FALSE) ## initialise other bits noatoms <- logical(nU) zeromass <- if(dimen == 1) numeric(nU) else matrix(0, nU, dimen) Uatomlist <- rep(list(noatoms), n) Umasslist <- rep(list(zeromass), n) ## assign atoms in each argument Uatomlist <- mapply(subsetgets, x=Uatomlist, i=matchlist, value=atomlist, SIMPLIFY=FALSE) Umasslist <- mapply(subsetgets, x=Umasslist, i=matchlist, value=masslist, SIMPLIFY=FALSE) ## union of atoms isatom <- Reduce("|", Uatomlist) ## masses at atoms Umasslist <- lapply(Umasslist, extract, i=isatom) ## make common quadrature scheme UQ <- quadscheme(Uloc[isatom], Uloc[!isatom]) ## reorder density data correspondingly neworder <- c(which(isatom), which(!isatom)) Udenslist <- lapply(Udenslist, extract, i=neworder) ## make new measures result <- mapply(msr, MoreArgs=list(qscheme=UQ), discrete=Umasslist, density=Udenslist, SIMPLIFY=FALSE) names(result) <- names(argz) class(result) <- unique(c("solist", class(result))) return(result) } ssorcnn <- function(xx, yy, what) nncross(xx, yy, what=what) extract <- function(x, i) { if(is.matrix(x)) x[i, , drop=FALSE] else x[i] } subsetgets <- function(x, i, value) { if(is.matrix(x)) { x[i, ] <- value } else { x[i] <- value } return(x) } harmonise.msr }) spatstat/R/sparselinalg.R0000644000176200001440000002043513602532431015130 0ustar liggesusers#' #' sparselinalg.R #' #' Counterpart of linalg.R for sparse matrices/arrays #' #' #' $Revision: 1.11 $ $Date: 2019/12/31 02:19:08 $ marginSums <- function(X, MARGIN) { #' equivalent to apply(X, MARGIN, sum) if(length(MARGIN) == 0) return(sum(X)) if(is.array(X) || is.matrix(X)) return(apply(X, MARGIN, sum)) dimX <- dim(X) if(length(MARGIN) == length(dimX)) return(aperm(X, MARGIN)) if(any(huh <- (MARGIN < 0 | MARGIN > length(dimX)))) stop(paste(commasep(sQuote(paste0("MARGIN=", MARGIN[huh]))), ngettext(sum(huh), "is", "are"), "not defined"), call.=FALSE) df <- SparseEntries(X) # discard other indices nonmargin <- setdiff(seq_along(dimX), MARGIN) df <- df[ , -nonmargin, drop=FALSE] # implicitly accumulate result <- EntriesToSparse(df, dimX[MARGIN]) return(result) } tensor1x1 <- function(A, B) { ## equivalent of tensor(A, B, 1, 1) ## when A is a vector and B is a sparse array. stopifnot(length(dim(B)) == 3) A <- as.vector(as.matrix(A)) stopifnot(length(A) == dim(B)[1]) if(is.array(B)) { result <- tensor::tensor(A,B,1,1) } else if(inherits(B, "sparse3Darray")) { result <- sparseMatrix(i=B$j, j=B$k, x=B$x * A[B$i], # values for same (i,j) are summed dims=dim(B)[-1], dimnames=dimnames(B)[2:3]) result <- drop0(result) } else stop("Format of B not understood", call.=FALSE) return(result) } tenseur <- local({ tenseur <- function(A, B, alongA=integer(0), alongB=integer(0)) { #' full arrays? if(isfull(A) && isfull(B)) return(tensor::tensor(A=A, B=B, alongA=alongA, alongB=alongB)) #' check dimensions dimA <- dim(A) %orifnull% length(A) dnA <- dimnames(A) if(is.null(dnA)) dnA <- rep(list(NULL), length(dimA)) dimB <- dim(B) %orifnull% length(B) dnB <- dimnames(B) if(is.null(dnB)) dnB <- rep(list(NULL), length(dimB)) #' check 'along' if (length(alongA) != length(alongB)) stop("\"along\" vectors must be same length") mtch <- dimA[alongA] == dimB[alongB] if (any(is.na(mtch)) || !all(mtch)) stop("Mismatch in \"along\" dimensions") #' dimensions of result retainA <- !(seq_along(dimA) %in% alongA) retainB <- !(seq_along(dimB) %in% alongB) dimC <- c(dimA[retainA], dimB[retainB]) nC <- length(dimC) if(nC > 3) stop("Sorry, sparse arrays of more than 3 dimensions are not supported", call.=FALSE) #' fast code for special cases if(length(dimA) == 1 && length(alongA) == 1 && !isfull(B)) { BB <- SparseEntries(B) Bx <- BB[,ncol(BB)] ijk <- BB[,-ncol(BB),drop=FALSE] kalong <- ijk[,alongB] ABalong <- as.numeric(Bx * A[kalong]) ndimB <- ncol(ijk) switch(ndimB, { result <- sum(ABalong) }, { iout <- ijk[,-alongB] result <- sparseVectorCumul(i=iout, x=ABalong, # values aggregated by i length=dimC) }, { ijout <- ijk[,-alongB,drop=FALSE] result <- sparseMatrix(i=ijout[,1], j=ijout[,2], x=ABalong, # values aggregated by (i,j) dims=dimC, dimnames=dnB[-alongB]) result <- drop0(result) }) return(result) } if(length(dimB) == 1 && length(alongB) == 1 && !isfull(A)) { AA <- SparseEntries(A) Ax <- AA[,ncol(AA)] ijk <- AA[,-ncol(AA),drop=FALSE] kalong <- ijk[,alongA] ABalong <- as.numeric(Ax * B[kalong]) nA <- ncol(ijk) switch(nA, { result <- sum(ABalong) }, { iout <- ijk[,-alongA] result <- sparseVectorCumul(i=iout, x=ABalong, # values aggregated by i length=dimC) }, { ijout <- ijk[,-alongA,drop=FALSE] result <- sparseMatrix(i=ijout[,1], j=ijout[,2], x=ABalong, # values aggregated by (i,j) dims=dimC, dimnames=dnA[-alongA]) result <- drop0(result) }) return(result) } #' extract indices and values of nonzero entries dfA <- SparseEntries(A) dfB <- SparseEntries(B) #' assemble all tuples which contribute if(length(alongA) == 0) { #' outer product dfC <- outersparse(dfA, dfB) } else { if(length(alongA) == 1) { Acode <- dfA[,alongA] Bcode <- dfB[,alongB] } else { Along <- unname(as.list(dfA[,alongA, drop=FALSE])) Blong <- unname(as.list(dfB[,alongB, drop=FALSE])) Acode <- do.call(paste, append(Along, list(sep=","))) Bcode <- do.call(paste, append(Blong, list(sep=","))) } lev <- unique(c(Acode,Bcode)) Acode <- factor(Acode, levels=lev) Bcode <- factor(Bcode, levels=lev) splitA <- split(dfA, Acode) splitB <- split(dfB, Bcode) splitC <- mapply(outersparse, splitA, splitB, SIMPLIFY=FALSE) dfC <- rbindCompatibleDataFrames(splitC) } #' form product of contributing entries dfC$x <- with(dfC, A.x * B.x) #' retain only appropriate columns retain <- c(retainA, FALSE, retainB, FALSE, TRUE) dfC <- dfC[, retain, drop=FALSE] #' collect result result <- EntriesToSparse(dfC, dimC) return(result) } isfull <- function(z) { if(is.array(z) || is.matrix(z) || is.data.frame(z)) return(TRUE) if(inherits(z, c("sparseVector", "sparseMatrix", "sparse3Darray"))) return(FALSE) return(TRUE) } outersparse <- function(dfA, dfB) { if(is.null(dfA) || is.null(dfB)) return(NULL) IJ <- expand.grid(I=seq_len(nrow(dfA)), J=seq_len(nrow(dfB))) dfC <- with(IJ, cbind(A=dfA[I,,drop=FALSE], B=dfB[J,,drop=FALSE])) return(dfC) } tenseur }) sumsymouterSparse <- function(x, w=NULL, dbg=FALSE) { dimx <- dim(x) if(length(dimx) != 3) stop("x should be a 3D array") stopifnot(dim(x)[2] == dim(x)[3]) if(!is.null(w)) { stopifnot(inherits(w, "sparseMatrix")) stopifnot(all(dim(w) == dim(x)[2:3])) } m <- dimx[1] n <- dimx[2] if(inherits(x, "sparse3Darray")) { df <- data.frame(i = x$i - 1L, # need 0-based indices j = x$j - 1L, k = x$k - 1L, value = x$x) } else stop("x is not a recognised kind of sparse array") # trivial? if(nrow(df) < 2) { y <- matrix(0, m, m) dimnames(y) <- rep(dimnames(x)[1], 2) return(y) } # order by increasing j, then k oo <- with(df, order(j, k, i)) df <- df[oo, ] # now provide ordering by increasing k then j ff <- with(df, order(k,j,i)) # if(dbg) { cat("----------------- Data ---------------------\n") print(df) cat("-------------- Reordered data --------------\n") print(df[ff,]) cat("Calling......\n") } if(is.null(w)) { z <- .C("CspaSumSymOut", m = as.integer(m), n = as.integer(n), lenx = as.integer(nrow(df)), ix = as.integer(df$i), # indices are already 0-based jx = as.integer(df$j), kx = as.integer(df$k), x = as.double(df$value), flip = as.integer(ff - 1L), # convert 1-based to 0-based y = as.double(numeric(m * m)), PACKAGE = "spatstat") } else { # extract triplet representation of w w <- as(w, Class="TsparseMatrix") dfw <- data.frame(j=w@i, k=w@j, w=w@x) woo <- with(dfw, order(j, k)) dfw <- dfw[woo, , drop=FALSE] z <- .C("CspaWtSumSymOut", m = as.integer(m), n = as.integer(n), lenx = as.integer(nrow(df)), ix = as.integer(df$i), # indices are already 0-based jx = as.integer(df$j), kx = as.integer(df$k), x = as.double(df$value), flip = as.integer(ff - 1L), # convert 1-based to 0-based lenw = as.integer(nrow(dfw)), jw = as.integer(dfw$j), kw = as.integer(dfw$k), w = as.double(dfw$w), y = as.double(numeric(m * m)), PACKAGE = "spatstat") } y <- matrix(z$y, m, m) dimnames(y) <- rep(dimnames(x)[1], 2) return(y) } spatstat/R/ppqq.R0000644000176200001440000000737213333543255013441 0ustar liggesusers## ## ppqq.R ## ## P-P and Q-Q versions of fv objects ## PPversion <- local({ PPversion <- function(f, theo="theo", columns=".") { if(!any(colnames(f) == theo)) stop(paste(sQuote(theo), "is not the name of a column of f")) ## set up inverse theoretical function f_0: 'theo' |-> 'r' xname <- fvnames(f, ".x") df <- as.data.frame(f) theo.table <- df[,theo] x.table <- df[,xname] invfun <- approxfun(x=theo.table, y=x.table, rule=1) ## evaluate f_0^{-1}(theo) for evenly-spaced grid of 'theo' values ra <- range(theo.table) theo.seq <- seq(from=ra[1], to=ra[2], length.out=nrow(df)) x.vals <- invfun(theo.seq) ## convert f to a function and evaluate at these 'r' values ynames <- setdiff(fvnames(f, columns), theo) ff <- as.function(f, value=ynames) y.vals <- lapply(ynames, evalselected, x=x.vals, f=ff) ## build data frame all.vals <- append(list(theo=theo.seq), y.vals) names(all.vals) <- c(theo, ynames) DF <- as.data.frame(all.vals) ## set up fv object atr <- attributes(f) cnames <- colnames(f) i.theo <- match(theo, cnames) i.yval <- match(ynames, cnames) ii <- c(i.theo, i.yval) old.best <- fvnames(f, ".y") best <- if(old.best %in% ynames) old.best else ynames[length(ynames)] result <- fv(DF, argu = theo, ylab = atr$ylab, valu = best, fmla = . ~ .x, alim = ra, labl = atr$labl[ii], desc = atr$desc[ii], unitname = NULL, fname = atr$fname, yexp = atr$yexp) fvnames(result, ".") <- c(ynames, theo) return(result) } evalselected <- function(what, f, x){ f(x, what=what) } PPversion }) QQversion <- function(f, theo="theo", columns=".") { if(!any(colnames(f) == theo)) stop(paste(sQuote(theo), "is not the name of a column of f")) ## extract relevant columns of data xname <- fvnames(f, ".x") ynames <- fvnames(f, columns) df <- as.data.frame(f) theo.table <- df[,theo] x.table <- df[,xname] y.table <- df[,ynames, drop=FALSE] ## set up inverse theoretical function f_0: 'theo' |-> 'r' invfun <- approxfun(x=theo.table, y=x.table, rule=1) ## apply f_0^{-1} to tabulated function values z.table <- as.data.frame(lapply(y.table, invfun)) ## build data frame DF <- cbind(df[,xname,drop=FALSE], z.table) ## set up fv object atr <- attributes(f) cnames <- colnames(f) i.x <- match(xname, cnames) i.y <- match(ynames, cnames) ii <- c(i.x, i.y) old.best <- fvnames(f, ".y") best <- if(old.best %in% ynames) old.best else ynames[length(ynames)] if(versionstring.spatstat() < package_version("1.38-2")) { fvl <- fvlabels(f, expand=TRUE) theo.string <- fvl[colnames(f) == theo] } else { theo.string <- fvlabels(f, expand=TRUE)[[theo]] } ## remove '(r)' from outer function theo.string <- sub(paren(xname), "", theo.string, fixed=TRUE) theo.expr <- parse(text=theo.string) theo.lang <- theo.expr[[1]] ylab <- substitute({{THEO}^{-1}}(FUN), list(FUN=atr$ylab, THEO=theo.lang)) yexp <- substitute({{THEO}^{-1}}(FUN), list(FUN=atr$yexp, THEO=theo.lang)) oldlabl <- atr$labl labl.iy <- sprintf("{{%s}^{-1}}(%s)", theo.string, oldlabl[i.y]) labl.ii <- c(oldlabl[i.x], labl.iy) result <- fv(DF, argu = atr$argu, ylab = ylab, valu = best, fmla = . ~ .x, alim = atr$alim, labl = labl.ii, desc = atr$desc[ii], unitname = NULL, fname = atr$fname, yexp = yexp) fvnames(result, ".") <- ynames unitname(result) <- unitname(f) return(result) } spatstat/R/pspcross.R0000644000176200001440000002111413424530017014313 0ustar liggesusers# # pspcross.R # # Intersections of line segments # # $Revision: 1.24 $ $Date: 2019/01/31 08:11:34 $ # # crossing.psp <- function(A,B,fatal=TRUE,details=FALSE) { verifyclass(A, "psp") verifyclass(B, "psp") # first check for intersection of windows ABW <- intersect.owin(A$window, B$window, fatal=fatal) if(is.null(ABW)) return(NULL) eps <- .Machine$double.eps na <- A$n eA <- A$ends x0a <- eA$x0 y0a <- eA$y0 dxa <- eA$x1 - eA$x0 dya <- eA$y1 - eA$y0 nb <- B$n eB <- B$ends x0b <- eB$x0 y0b <- eB$y0 dxb <- eB$x1 - eB$x0 dyb <- eB$y1 - eB$y0 useCall <- spatstat.options("crossing.psp.useCall") if(!useCall) { # old C routine out <- .C("xysegint", na=as.integer(na), x0a=as.double(x0a), y0a=as.double(y0a), dxa=as.double(dxa), dya=as.double(dya), nb=as.integer(nb), x0b=as.double(x0b), y0b=as.double(y0b), dxb=as.double(dxb), dyb=as.double(dyb), eps=as.double(eps), xx=as.double(numeric(na * nb)), yy=as.double(numeric(na * nb)), ta=as.double(numeric(na * nb)), tb=as.double(numeric(na * nb)), ok=as.integer(integer(na * nb)), PACKAGE = "spatstat") ok <- (matrix(out$ok, na, nb) != 0) xx <- matrix(out$xx, na, nb) yy <- matrix(out$yy, na, nb) xx <- as.vector(xx[ok]) yy <- as.vector(yy[ok]) if(details) { ia <- as.vector(row(ok)[ok]) jb <- as.vector(col(ok)[ok]) ta <- as.vector(matrix(out$ta, na, nb)[ok]) tb <- as.vector(matrix(out$tb, na, nb)[ok]) } } else { # new storage.mode(x0a) <- storage.mode(y0a) <- "double" storage.mode(dxa) <- storage.mode(dya) <- "double" storage.mode(x0b) <- storage.mode(y0b) <- "double" storage.mode(dxb) <- storage.mode(dyb) <- "double" storage.mode(eps) <- "double" out <- .Call("Cxysegint", x0a, y0a, dxa, dya, x0b, y0b, dxb, dyb, eps, PACKAGE="spatstat") xx <- out[[5]] yy <- out[[6]] if(details) { ia <- out[[1L]] + 1L jb <- out[[2L]] + 1L ta <- out[[3L]] tb <- out[[4L]] } } result <- ppp(xx, yy, window=ABW, check=FALSE) if(details) marks(result) <- data.frame(iA=ia, jB=jb, tA=ta, tB=tb) return(result) } test.crossing.psp <- function(A,B) { # return logical matrix specifying whether A[i] and B[j] cross verifyclass(A, "psp") verifyclass(B, "psp") eps <- .Machine$double.eps na <- A$n eA <- A$ends x0a <- eA$x0 y0a <- eA$y0 dxa <- eA$x1 - eA$x0 dya <- eA$y1 - eA$y0 nb <- B$n eB <- B$ends x0b <- eB$x0 y0b <- eB$y0 dxb <- eB$x1 - eB$x0 dyb <- eB$y1 - eB$y0 out <- .C("xysi", na=as.integer(na), x0a=as.double(x0a), y0a=as.double(y0a), dxa=as.double(dxa), dya=as.double(dya), nb=as.integer(nb), x0b=as.double(x0b), y0b=as.double(y0b), dxb=as.double(dxb), dyb=as.double(dyb), eps=as.double(eps), ok=as.integer(integer(na * nb)), PACKAGE = "spatstat") hit <- (matrix(out$ok, na, nb) != 0) return(hit) } anycrossing.psp <- function(A,B) { # equivalent to: any(test.crossing.psp(A,B)) # Test whether two psp objects have at least one crossing point verifyclass(A, "psp") verifyclass(B, "psp") eps <- .Machine$double.eps na <- A$n eA <- A$ends x0a <- eA$x0 y0a <- eA$y0 dxa <- eA$x1 - eA$x0 dya <- eA$y1 - eA$y0 nb <- B$n eB <- B$ends x0b <- eB$x0 y0b <- eB$y0 dxb <- eB$x1 - eB$x0 dyb <- eB$y1 - eB$y0 out <- .C("xysiANY", na=as.integer(na), x0a=as.double(x0a), y0a=as.double(y0a), dxa=as.double(dxa), dya=as.double(dya), nb=as.integer(nb), x0b=as.double(x0b), y0b=as.double(y0b), dxb=as.double(dxb), dyb=as.double(dyb), eps=as.double(eps), ok=as.integer(integer(1L)), PACKAGE = "spatstat") hit <- (out$ok != 0) return(hit) } selfcrossing.psp <- function(A) { verifyclass(A, "psp") eps <- .Machine$double.eps n <- A$n eA <- A$ends x0 <- eA$x0 y0 <- eA$y0 dx <- eA$x1 - eA$x0 dy <- eA$y1 - eA$y0 useCall <- spatstat.options("selfcrossing.psp.useCall") if(!useCall) { # old C routine out <- .C("xysegXint", n=as.integer(n), x0=as.double(x0), y0=as.double(y0), dx=as.double(dx), dy=as.double(dy), eps=as.double(eps), xx=as.double(numeric(n^2)), yy=as.double(numeric(n^2)), ti=as.double(numeric(n^2)), tj=as.double(numeric(n^2)), ok=as.integer(integer(n^2)), PACKAGE = "spatstat") ok <- (matrix(out$ok, n, n) != 0) xx <- matrix(out$xx, n, n) yy <- matrix(out$yy, n, n) xx <- as.vector(xx[ok]) yy <- as.vector(yy[ok]) } else { # new storage.mode(x0) <- storage.mode(y0) <- "double" storage.mode(dx) <- storage.mode(dy) <- "double" storage.mode(eps) <- "double" out <- .Call("CxysegXint", x0, y0, dx, dy, eps, PACKAGE="spatstat") xx <- out[[5L]] yy <- out[[6L]] } result <- ppp(xx, yy, window=A$window, check=FALSE) return(result) } test.selfcrossing.psp <- function(A) { verifyclass(A, "psp") eps <- .Machine$double.eps n <- A$n eA <- A$ends x0 <- eA$x0 y0 <- eA$y0 dx <- eA$x1 - eA$x0 dy <- eA$y1 - eA$y0 out <- .C("xysxi", na=as.integer(n), x0=as.double(x0), y0=as.double(y0), dx=as.double(dx), dy=as.double(dy), eps=as.double(eps), ok=as.integer(integer(n*n)), PACKAGE = "spatstat") hit <- (matrix(out$ok, n, n) != 0) return(hit) } selfcut.psp <- function(A, ..., eps) { stopifnot(is.psp(A)) n <- A$n eA <- A$ends x0 <- eA$x0 y0 <- eA$y0 x1 <- eA$x1 y1 <- eA$y1 dx <- x1 - x0 dy <- y1 - y0 if(missing(eps) || is.null(eps)) { eps <- sqrt(.Machine$double.eps) * diameter(Frame(A)) } else { check.1.real(eps) stopifnot(eps >= 0) } ## identify self-crossings eps <- .Machine$double.eps storage.mode(x0) <- storage.mode(y0) <- "double" storage.mode(dx) <- storage.mode(dy) <- "double" storage.mode(eps) <- "double" zz <- .Call("CxysegXint", x0, y0, dx, dy, eps, PACKAGE = "spatstat") if(length(zz[[1]]) == 0) { ## no dissection required attr(A, "camefrom") <- seq_len(n) return(A) } ## names(zz) <- c("i", "j", "ti", "tj", "x", "y") icross <- zz$i + 1L jcross <- zz$j + 1L ticross <- zz$ti tjcross <- zz$tj xcross <- zz$x ycross <- zz$y ## which segments are split... gone <- unique(c(icross, jcross)) ## ... and which are not retained <- setdiff(seq_len(n), gone) ## initialise result ## start with all segments which are retained x0out <- x0[retained] y0out <- y0[retained] x1out <- x1[retained] y1out <- y1[retained] camefrom <- retained ## cut each segment using the *provided* values of x,y for(ii in gone) { ## assemble cuts through segment ii imatch <- which(icross == ii) jmatch <- which(jcross == ii) ijmatch <- c(imatch, jmatch) tt <- c(ticross[imatch], tjcross[jmatch]) xx <- xcross[ijmatch] yy <- ycross[ijmatch] # discard T-junctions ok <- (tt > 0 & tt < 1) tt <- tt[ok] xx <- xx[ok] yy <- yy[ok] # order the pieces ord <- order(tt) xx <- xx[ord] yy <- yy[ord] ## add endpoints of old segment xnew <- c(x0[ii], xx, x1[ii]) ynew <- c(y0[ii], yy, y1[ii]) ## append to result m <- length(xnew) x0out <- c(x0out, xnew[-m]) y0out <- c(y0out, ynew[-m]) x1out <- c(x1out, xnew[-1L]) y1out <- c(y1out, ynew[-1L]) camefrom <- c(camefrom, rep(ii, m-1L)) } marx <- marks(A) marxout <- if(is.null(marx)) NULL else as.data.frame(marx)[camefrom, , drop=FALSE] Y <- psp(x0out, y0out, x1out, y1out, window=Window(A), marks=marxout) if(eps > 0) { ok <- (lengths.psp(Y) > eps) if(!all(ok)) { Y <- Y[ok] camefrom <- camefrom[ok] } } attr(Y, "camefrom") <- camefrom return(Y) } spatstat/R/badgey.R0000644000176200001440000001616513333543254013712 0ustar liggesusers# # # badgey.S # # $Revision: 1.17 $ $Date: 2018/03/15 07:37:41 $ # # Hybrid Geyer process # # BadGey() create an instance of the process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # BadGey <- local({ # ........... auxiliary functions .............. delBG <- function(i, r, sat) { r <- r[-i] if(length(r) == length(sat)) { r <- r[-i] sat <- sat[-i] } else if(length(sat) == 1) { r <- r[-i] } else stop("Mismatch in dimensions of arguments r and sat") nr <- length(r) if(nr == 0) return(Poisson()) if(nr == 1) return(Geyer(r, sat)) return(BadGey(r, sat)) } # .............. template .................... BlankBG <- list( name = "hybrid Geyer process", creator = "BadGey", family = "pairsat.family", # will be evaluated later pot = function(d, par) { r <- par$r nr <- length(r) out <- array(FALSE, dim=c(dim(d), nr)) for(i in 1:nr) out[,,i] <- (d <= r[i]) out }, par = list(r = NULL, sat=NULL), # to fill in later parnames = c("interaction radii", "saturation parameters"), hasInf = FALSE, init = function(self) { r <- self$par$r sat <- self$par$sat if(!is.numeric(r) || !all(r > 0)) stop("interaction radii r must be positive numbers") if(length(r) > 1 && !all(diff(r) > 0)) stop("interaction radii r must be strictly increasing") if(!is.numeric(sat) || any(sat < 0)) stop("saturation parameters must be nonnegative numbers") if(length(sat) != length(r) && length(sat) != 1) stop("vectors r and sat must have equal length") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { r <- self$par$r npiece <- length(r) # extract coefficients gammas <- exp(as.numeric(coeffs)) # name them gn <- gammas names(gn) <- paste("[0,", r, ")", sep="") # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_i", printable=dround(gn))) }, valid = function(coeffs, self) { # interaction parameters gamma must be # non-NA # finite, if sat > 0 # less than 1, if sat = Inf gamma <- (self$interpret)(coeffs, self)$param$gammas sat <- self$par$sat if(anyNA(gamma)) return(FALSE) return(all((is.finite(gamma) | sat == 0) & (gamma <= 1 | sat != Inf))) }, project = function(coeffs, self){ loggammas <- as.numeric(coeffs) sat <- self$par$sat r <- self$par$r good <- is.finite(loggammas) & (is.finite(sat) | loggammas <= 0) if(all(good)) return(NULL) if(!any(good)) return(Poisson()) bad <- !good if(spatstat.options("project.fast") || sum(bad) == 1) { # remove smallest threshold with an unidentifiable parameter firstbad <- min(which(bad)) return(delBG(firstbad, r, sat)) } else { # consider all candidate submodels subs <- lapply(which(bad), delBG, r=r, sat=sat) return(subs) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r sat <- self$par$sat if(all(is.na(coeffs))) return(2 * max(r)) gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- (abs(log(gamma)) > epsilon) & (sat > 0) if(!any(active)) return(0) else return(2 * max(r[active])) }, version=NULL, # to be added later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ..., halfway=FALSE) { # fast evaluator for BadGey interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for BadGey") r <- potpars$r sat <- potpars$sat # ensure r and sat have equal length if(length(r) != length(sat)) { if(length(r) == 1) r <- rep.int(r, length(sat)) else if(length(sat) == 1) sat <- rep.int(sat, length(r)) else stop("lengths of r and sat do not match") } # first ensure all data points are in U nX <- npoints(X) nU <- npoints(U) Xseq <- seq_len(nX) if(length(EqualPairs) == 0) { # no data points currently included missingdata <- rep.int(TRUE, nX) } else { Xused <- EqualPairs[,1L] missingdata <- !(Xseq %in% Xused) } somemissing <- any(missingdata) if(somemissing) { # add the missing data points nmiss <- sum(missingdata) U <- superimpose(U, X[missingdata], W=X$window) # correspondingly augment the list of equal pairs originalrows <- seq_len(nU) newXindex <- Xseq[missingdata] newUindex <- nU + seq_len(nmiss) EqualPairs <- rbind(EqualPairs, cbind(newXindex, newUindex)) nU <- nU + nmiss } nterms <- length(r) answer <- matrix(, nrow=nU, ncol=nterms) for(k in 1:nterms) { # first determine saturated pair counts counts <- strausscounts(U, X, r[k], EqualPairs) satcounts <- pmin.int(sat[k], counts) # trapdoor used by suffstat() if(halfway) answer[,k] <- satcounts else if(sat[k] == Inf) answer[,k] <- 2 * satcounts else { # extract counts for data points Uindex <- EqualPairs[,2L] Xindex <- EqualPairs[,1L] Xcounts <- integer(npoints(X)) Xcounts[Xindex] <- counts[Uindex] # evaluate change in saturated counts of other data points change <- geyercounts(U, X, r[k], sat[k], Xcounts, EqualPairs) answer[,k] <- satcounts + change } } if(somemissing) answer <- answer[originalrows, , drop=FALSE] return(answer) } ) class(BlankBG) <- "interact" BadGey <- function(r, sat) { instantiate.interact(BlankBG, list(r=r, sat=sat)) } BadGey <- intermaker(BadGey, BlankBG) BadGey }) spatstat/R/hasenvelope.R0000644000176200001440000000120513333543255014756 0ustar liggesusers#' #' hasenvelope.R #' #' A simple class of objects which contain additional envelope data #' #' $Revision: 1.1 $ $Date: 2015/10/05 06:20:31 $ hasenvelope <- function(X, E=NULL) { if(inherits(E, "envelope")) { attr(X, "envelope") <- E class(X) <- c("hasenvelope", class(X)) } return(X) } print.hasenvelope <- function(x, ...) { NextMethod("print") splat("[Object contains simulation envelope data]") return(invisible(NULL)) } envelope.hasenvelope <- function(Y, ..., Yname=NULL) { if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) E <- attr(Y, "envelope") return(envelope(E, ..., Yname=Yname)) } spatstat/R/Gest.R0000644000176200001440000001004513333543254013350 0ustar liggesusers# # Gest.S # # Compute estimates of nearest neighbour distance distribution function G # # $Revision: 4.31 $ $Date: 2015/10/21 09:06:57 $ # ################################################################################ # "Gest" <- "nearest.neighbour" <- function(X, r=NULL, breaks=NULL, ..., correction=c("rs", "km", "han"), domain=NULL) { verifyclass(X, "ppp") if(!is.null(domain)) stopifnot(is.subset.owin(domain, Window(X))) ## W <- X$window npts <- npoints(X) lambda <- npts/area(W) ## determine r values rmaxdefault <- rmax.rule("G", W, lambda) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max zeroes <- numeric(length(rvals)) ## choose correction(s) # correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) { correction <- c("rs", "km", "han") } else correction <- pickoption("correction", correction, c(none="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", han="han", Hanisch="han", cs="han", ChiuStoyan="han", best="km"), multi=TRUE) ## compute nearest neighbour distances nnd <- nndist(X$x, X$y) ## distance to boundary bdry <- bdist.points(X) ## restrict to subset ? if(!is.null(domain)) { ok <- inside.owin(X, w=domain) nnd <- nnd[ok] bdry <- bdry[ok] } ## observations o <- pmin.int(nnd,bdry) ## censoring indicators d <- (nnd <= bdry) ## initialise fv object df <- data.frame(r=rvals, theo=1-exp(-lambda * pi * rvals^2)) Z <- fv(df, "r", substitute(G(r), NULL), "theo", . ~ r, c(0,rmax), c("r", "%s[pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="G") if("none" %in% correction) { ## UNCORRECTED e.d.f. of nearest neighbour distances: use with care if(npts <= 1) edf <- zeroes else { hh <- hist(nnd[nnd <= rmax],breaks=breaks$val,plot=FALSE)$counts edf <- cumsum(hh)/length(nnd) } Z <- bind.fv(Z, data.frame(raw=edf), "hat(%s)[raw](r)", "uncorrected estimate of %s", "raw") } if("han" %in% correction) { if(npts <= 1) G <- zeroes else { ## uncensored distances x <- nnd[d] ## weights a <- eroded.areas(W, rvals, subset=domain) ## calculate Hanisch estimator h <- hist(x[x <= rmax], breaks=breaks$val, plot=FALSE)$counts G <- cumsum(h/a) G <- G/max(G[is.finite(G)]) } ## add to fv object Z <- bind.fv(Z, data.frame(han=G), "hat(%s)[han](r)", "Hanisch estimate of %s", "han") ## modify recommended plot range attr(Z, "alim") <- range(rvals[G <= 0.9]) } if(any(correction %in% c("rs", "km"))) { ## calculate Kaplan-Meier and border correction (Reduced Sample) estimators if(npts == 0) result <- data.frame(rs=zeroes, km=zeroes, hazard=zeroes, theohaz=zeroes) else { result <- km.rs(o, bdry, d, breaks) result$theohaz <- 2 * pi * lambda * rvals result <- as.data.frame(result[c("rs", "km", "hazard", "theohaz")]) } ## add to fv object Z <- bind.fv(Z, result, c("hat(%s)[bord](r)", "hat(%s)[km](r)", "hat(h)[km](r)", "h[pois](r)"), c("border corrected estimate of %s", "Kaplan-Meier estimate of %s", "Kaplan-Meier estimate of hazard function h(r)", "theoretical Poisson hazard function h(r)"), "km") ## modify recommended plot range attr(Z, "alim") <- range(rvals[result$km <= 0.9]) } nama <- names(Z) fvnames(Z, ".") <- rev(setdiff(nama, c("r", "hazard", "theohaz"))) unitname(Z) <- unitname(X) return(Z) } spatstat/R/resid4plot.R0000644000176200001440000005774413575343014014560 0ustar liggesusers# # # Residual plots: # resid4plot four panels with matching coordinates # resid1plot one or more unrelated individual plots # resid1panel one panel of resid1plot # # $Revision: 1.37 $ $Date: 2019/12/15 05:26:58 $ # # resid4plot <- local({ Contour <- function(..., pch, chars, cols, etch, size, maxsize, meansize, markscale, symap, zap, legend, leg.side, leg.args) { ## avoid passing arguments of plot.ppp to contour.default contour(...) } do.clean <- function(fun, ..., pch, chars, cols, etch, size, maxsize, meansize, markscale, symap, zap, legend, leg.side, leg.args, nlevels, levels, labels, drawlabels, labcex) { ## avoid passing arguments of plot.ppp, contour.default to other functions do.call(fun, list(...)) } do.lines <- function(x, y, defaulty=1, ...) { do.call(lines, resolve.defaults(list(x, y), list(...), list(lty=defaulty))) } resid4plot <- function(RES, plot.neg=c("image", "discrete", "contour", "imagecontour"), plot.smooth=c("imagecontour", "image", "contour", "persp"), spacing=0.1, outer=3, srange=NULL, monochrome=FALSE, main=NULL, xlab="x coordinate", ylab="y coordinate", rlab, col.neg=NULL, col.smooth=NULL, ...) { plot.neg <- match.arg(plot.neg) if(missing(rlab)) rlab <- NULL rlablines <- if(is.null(rlab)) 1 else sum(nzchar(rlab)) clip <- RES$clip Yclip <- RES$Yclip Z <- RES$smooth$Z W <- RES$W Wclip <- Yclip$window type <- RES$type typename <- RES$typename Ydens <- RES$Ydens[Wclip, drop=FALSE] Ymass <- RES$Ymass[Wclip] # set up 2 x 2 plot with space wide <- diff(W$xrange) high <- diff(W$yrange) space <- spacing * max(wide,high) width <- wide + space + wide height <- high + space + high outerspace <- outer * space outerRspace <- (outer - 1 + rlablines) * space plot(c(0, width) + c(-outerRspace, outerspace), c(0, height) + c(-outerspace, outerRspace), type="n", asp=1.0, axes=FALSE, xlab="", ylab="") # determine colour map for background nullvalue <- if(type == "eem") 1 else 0 if(is.null(srange)) { Yrange <- if(!is.null(Ydens)) summary(Ydens)$range else NULL Zrange <- if(!is.null(Z)) summary(Z)$range else NULL srange <- range(c(Yrange, Zrange, nullvalue), na.rm=TRUE) } else check.range(srange) backcols <- beachcolours(srange, nullvalue, monochrome) if(is.null(col.neg)) col.neg <- backcols if(is.null(col.smooth)) col.smooth <- backcols # ------ plot residuals/marks (in top left panel) ------------ Xlowleft <- c(W$xrange[1],W$yrange[1]) vec <- c(0, high) + c(0, space) - Xlowleft # shift the original window Ws <- shift(W, vec) # shift the residuals Ys <- shift(Yclip,vec) # determine whether pre-plotting the window(s) is redundant redundant <- (plot.neg == "image") && (type != "eem") && (Yclip$window$type == "mask") # pre-plot the window(s) if(!redundant) { if(!clip) do.clean(plot, Ys$window, add=TRUE, ...) else do.clean(ploterodewin, Ws, Ys$window, add=TRUE, ...) } ## adjust position of legend associated with eroded window sep <- if(clip) Wclip$yrange[1] - W$yrange[1] else NULL ## decide whether mark scale should be shown showscale <- (type != "raw") switch(plot.neg, discrete={ neg <- (Ys$marks < 0) ## plot negative masses of discretised measure as squares if(any(c("maxsize","meansize","markscale") %in% names(list(...)))) { plot(Ys[neg], add=TRUE, legend=FALSE, ...) } else { hackmax <- 0.5 * sqrt(area(Wclip)/Yclip$n) plot(Ys[neg], add=TRUE, legend=FALSE, maxsize=hackmax, ...) } ## plot positive masses at atoms plot(Ys[!neg], add=TRUE, leg.side="left", leg.args=list(sep=sep), show.all=TRUE, main="", ...) }, contour = { Yds <- shift(Ydens, vec) Yms <- shift(Ymass, vec) Contour(Yds, add=TRUE, ...) do.call(plot, resolve.defaults(list(x=Yms, add=TRUE), list(...), list(use.marks=showscale, leg.side="left", show.all=TRUE, main="", leg.args=list(sep=sep)))) }, imagecontour=, image={ Yds <- shift(Ydens, vec) Yms <- shift(Ymass, vec) if(redundant) do.clean(ploterodeimage, Ws, Yds, rangeZ=srange, colsZ=col.neg, ...) else if(type != "eem") do.clean(image, Yds, add=TRUE, ribbon=FALSE, col=col.neg, zlim=srange, ...) if(plot.neg == "imagecontour") Contour(Yds, add=TRUE, ...) ## plot positive masses at atoms do.call(plot, resolve.defaults(list(x=Yms, add=TRUE), list(...), list(use.marks=showscale, leg.side="left", show.all=TRUE, main="", leg.args=list(sep=sep)))) } ) # --------- plot smoothed surface (in bottom right panel) ------------ vec <- c(wide, 0) + c(space, 0) - Xlowleft Zs <- shift.im(Z, vec) switch(plot.smooth, image={ do.clean(image, Zs, add=TRUE, col=col.smooth, zlim=srange, ribbon=FALSE, ...) }, contour={ Contour(Zs, add=TRUE, ...) }, persp={ warning("persp not available in 4-panel plot") }, imagecontour={ do.clean(image, Zs, add=TRUE, col=col.smooth, zlim=srange, ribbon=FALSE, ...) Contour(Zs, add=TRUE, ...) } ) lines(Zs$xrange[c(1,2,2,1,1)], Zs$yrange[c(1,1,2,2,1)]) # -------------- lurking variable plots ----------------------- # --------- lurking variable plot for x coordinate ------------------ # (cumulative or marginal) # in bottom left panel if(!is.null(RES$xmargin)) { a <- RES$xmargin observedV <- a$xZ observedX <- a$x theoreticalV <- a$ExZ theoreticalX <- a$x theoreticalSD <- theoreticalHI <- theoreticalLO <- NULL if(is.null(rlab)) rlab <- paste("marginal of", typename) } else if(!is.null(RES$xcumul)) { a <- RES$xcumul observedX <- a$empirical$covariate observedV <- a$empirical$value theoreticalX <- a$theoretical$covariate theoreticalV <- a$theoretical$mean theoreticalSD <- a$theoretical$sd theoreticalHI <- a$theoretical$upper theoreticalLO <- a$theoretical$lower if(is.null(rlab)) rlab <- paste("cumulative sum of", typename) } # pretty axis marks pX <- pretty(theoreticalX) rV <- range(0, observedV, theoreticalV, theoreticalHI, theoreticalLO) if(!is.null(theoreticalSD)) rV <- range(rV, theoreticalV+2*theoreticalSD, theoreticalV-2*theoreticalSD) pV <- pretty(rV) # rescale smoothed values rr <- range(c(0, observedV, theoreticalV, pV)) yscale <- function(y) { high * (y - rr[1])/diff(rr) } xscale <- function(x) { x - W$xrange[1] } if(!is.null(theoreticalHI)) do.call.matched(polygon, resolve.defaults( list(x=xscale(c(theoreticalX, rev(theoreticalX))), y=yscale(c(theoreticalHI, rev(theoreticalLO)))), list(...), list(col="grey", border=NA))) do.clean(do.lines, xscale(observedX), yscale(observedV), 1, ...) do.clean(do.lines, xscale(theoreticalX), yscale(theoreticalV), 2, ...) if(!is.null(theoreticalSD)) { do.clean(do.lines, xscale(theoreticalX), yscale(theoreticalV + 2 * theoreticalSD), 3, ...) do.clean(do.lines, xscale(theoreticalX), yscale(theoreticalV - 2 * theoreticalSD), 3, ...) } axis(side=1, pos=0, at=xscale(pX), labels=pX) text(xscale(mean(theoreticalX)), - outerspace, xlab) axis(side=2, pos=0, at=yscale(pV), labels=pV) text(-outerRspace, yscale(mean(pV)), rlab, srt=90) # --------- lurking variable plot for y coordinate ------------------ # (cumulative or marginal) # in top right panel if(!is.null(RES$ymargin)) { a <- RES$ymargin observedV <- a$yZ observedY <- a$y theoreticalV <- a$EyZ theoreticalY <- a$y theoreticalSD <- NULL if(is.null(rlab)) rlab <- paste("marginal of", typename) } else if(!is.null(RES$ycumul)) { a <- RES$ycumul observedV <- a$empirical$value observedY <- a$empirical$covariate theoreticalY <- a$theoretical$covariate theoreticalV <- a$theoretical$mean theoreticalSD <- a$theoretical$sd theoreticalHI <- a$theoretical$upper theoreticalLO <- a$theoretical$lower if(is.null(rlab)) rlab <- paste("cumulative sum of", typename) } # pretty axis marks pY <- pretty(theoreticalY) rV <- range(0, observedV, theoreticalV, theoreticalHI, theoreticalLO) if(!is.null(theoreticalSD)) rV <- range(rV, theoreticalV+2*theoreticalSD, theoreticalV-2*theoreticalSD) pV <- pretty(rV) # rescale smoothed values rr <- range(c(0, observedV, theoreticalV, pV)) yscale <- function(y) { y - W$yrange[1] + high + space} xscale <- function(x) { wide + space + wide * (rr[2] - x)/diff(rr) } if(!is.null(theoreticalHI)) do.call.matched(polygon, resolve.defaults( list(x=xscale(c(theoreticalHI, rev(theoreticalLO))), y=yscale(c(theoreticalY, rev(theoreticalY)))), list(...), list(col="grey", border=NA))) do.clean(do.lines, xscale(observedV), yscale(observedY), 1, ...) do.clean(do.lines, xscale(theoreticalV), yscale(theoreticalY), 2, ...) if(!is.null(theoreticalSD)) { do.clean(do.lines, xscale(theoreticalV+2*theoreticalSD), yscale(theoreticalY), 3, ...) do.clean(do.lines, xscale(theoreticalV-2*theoreticalSD), yscale(theoreticalY), 3, ...) } axis(side=4, pos=width, at=yscale(pY), labels=pY) text(width + outerspace, yscale(mean(theoreticalY)), ylab, srt=90) axis(side=3, pos=height, at=xscale(pV), labels=pV) text(xscale(mean(pV)), height + outerRspace, rlab) # if(!is.null(main)) title(main=main) invisible(NULL) } resid4plot }) # # # Residual plot: single panel(s) # # resid1plot <- local({ Contour <- function(..., pch, chars, cols, etch, size, maxsize, meansize, markscale, symap, zap, legend, leg.side, leg.args) { ## avoid passing arguments of plot.ppp to contour.default contour(...) } do.clean <- function(fun, ..., pch, chars, cols, etch, size, maxsize, meansize, markscale, symap, zap, legend, leg.side, leg.args, nlevels, levels, labels, drawlabels, labcex) { ## avoid passing arguments of plot.ppp, contour.default to other functions do.call(fun, list(...)) } resid1plot <- function(RES, opt, plot.neg=c("image", "discrete", "contour", "imagecontour"), plot.smooth=c("imagecontour", "image", "contour", "persp"), srange=NULL, monochrome=FALSE, main=NULL, add=FALSE, show.all=!add, do.plot=TRUE, col.neg=NULL, col.smooth=NULL, ...) { if(!any(unlist(opt[c("all", "marks", "smooth", "xmargin", "ymargin", "xcumul", "ycumul")]))) return(invisible(NULL)) if(!add && do.plot) { ## determine size of plot area by calling again with do.plot=FALSE cl <- match.call() cl$do.plot <- FALSE b <- eval(cl, parent.frame()) bb <- as.owin(b, fatal=FALSE) if(is.owin(bb)) { ## initialise plot area plot(bb, type="n", main="") force(show.all) add <- TRUE } } ## extract info clip <- RES$clip Y <- RES$Y Yclip <- RES$Yclip Z <- RES$smooth$Z W <- RES$W Wclip <- Yclip$window type <- RES$type Ydens <- RES$Ydens[Wclip, drop=FALSE] Ymass <- RES$Ymass[Wclip] ## determine colour map if(opt$all || opt$marks || opt$smooth) { nullvalue <- if(type == "eem") 1 else 0 if(is.null(srange)) { Yrange <- if(!is.null(Ydens)) summary(Ydens)$range else NULL Zrange <- if(!is.null(Z)) summary(Z)$range else NULL srange <- range(c(Yrange, Zrange, nullvalue), na.rm=TRUE) } else check.range(srange) backcols <- beachcolours(srange, nullvalue, monochrome) if(is.null(col.neg)) col.neg <- backcols if(is.null(col.smooth)) col.smooth <- backcols } ## determine main heading if(is.null(main)) { prefix <- if(opt$marks) NULL else if(opt$smooth) "Smoothed" else if(opt$xcumul) "Lurking variable plot for x coordinate\n" else if(opt$ycumul) "Lurking variable plot for y coordinate\n" else if(opt$xmargin) "Lurking variable plot for x coordinate\n" else if(opt$ymargin) "Lurking variable plot for y coordinate\n" else NULL main <- paste(prefix, RES$typename) } ## ------------- residuals --------------------------------- if(opt$marks) { ## determine whether pre-plotting the window(s) is redundant redundant <- (plot.neg == "image") && (type != "eem") && (Yclip$window$type == "mask") ## pre-plot the window(s) if(redundant && !add) { z <- do.clean(plot, as.rectangle(W), box=FALSE, main="", do.plot=do.plot, ...) } else { if(!clip) z <- do.clean(plot, W, main="", add=add, show.all=show.all, do.plot=do.plot, ...) else z <- do.clean(ploterodewin, W, Wclip, main="", add=add, show.all=show.all, do.plot=do.plot, ...) } bb <- as.owin(z) switch(plot.neg, discrete={ neg <- (Y$marks < 0) ## plot negative masses of discretised measure as squares if(any(c("maxsize", "markscale") %in% names(list(...)))) { z <- plot(Y[neg], add=TRUE, show.all=show.all, do.plot=do.plot, ...) } else { hackmax <- 0.5 * sqrt(area(Wclip)/Yclip$n) z <- plot(Y[neg], add=TRUE, maxsize=hackmax, show.all=show.all, do.plot=do.plot, ...) } ## plot positive masses at atoms zp <- plot(Y[!neg], add=TRUE, show.all=show.all, do.plot=do.plot, ...) bb <- boundingbox(bb, z, zp) }, contour = { z <- Contour(Ydens, add=TRUE, do.plot=do.plot, ...) bb <- boundingbox(bb, z) }, imagecontour=, image={ if(redundant) { z <- do.clean(ploterodeimage, W, Ydens, rangeZ=srange, colsZ=col.neg, add=add, show.all=show.all, main="", do.plot=do.plot, ...) } else if(type != "eem") { z <- do.clean(image, Ydens, col=col.neg, zlim=srange, ribbon=FALSE, add=TRUE, show.all=show.all, do.plot=do.plot, main="", ...) } bb <- boundingbox(bb, z) if(plot.neg == "imagecontour") { z <- Contour(Ydens, add=TRUE, show.all=show.all, do.plot=do.plot, ...) bb <- boundingbox(bb, z) } ## decide whether mark scale should be shown showscale <- (type != "raw") ## plot positive masses at atoms z <- do.call(plot, resolve.defaults(list(x=Ymass, add=TRUE), list(...), list(use.marks=showscale, do.plot=do.plot))) bb <- boundingbox(bb, z) } ) if(do.plot && show.all) title(main=main) } # ------------- smooth ------------------------------------- if(opt$smooth) { if(!clip) { switch(plot.smooth, image={ z <- do.clean(image, Z, main="", axes=FALSE, xlab="", ylab="", col=col.smooth, zlim=srange, ribbon=FALSE, do.plot=do.plot, add=add, show.all=show.all, ...) bb <- as.owin(z) }, contour={ z <- Contour(Z, main="", axes=FALSE, xlab="", ylab="", do.plot=do.plot, add=add, show.all=show.all, ...) bb <- as.owin(z) }, persp={ if(do.plot) do.clean(persp, Z, main="", axes=FALSE, xlab="", ylab="", ...) bb <- NULL }, imagecontour={ z <- do.clean(image, Z, main="", axes=FALSE, xlab="", ylab="", col=col.smooth, zlim=srange, ribbon=FALSE, do.plot=do.plot, add=add, show.all=show.all, ...) Contour(Z, add=TRUE, do.plot=do.plot, ...) bb <- as.owin(z) } ) if(do.plot && show.all) title(main=main) } else { switch(plot.smooth, image={ do.clean(plot, as.rectangle(W), box=FALSE, main=main, do.plot=do.plot, add=add, ...) z <- do.clean(ploterodeimage, W, Z, colsZ=col.smooth, rangeZ=srange, do.plot=do.plot, ...) bb <- boundingbox(as.rectangle(W), z) }, contour={ do.clean(plot, W, main=main, do.plot=do.plot, add=add, show.all=show.all, ...) z <- Contour(Z, add=TRUE, show.all=show.all, do.plot=do.plot, ...) bb <- as.owin(z) }, persp={ if(do.plot) do.clean(persp, Z, main=main, axes=FALSE, xlab="", ylab="", ...) bb <- NULL }, imagecontour={ do.clean(plot, as.rectangle(W), box=FALSE, main=main, do.plot=do.plot, add=add, ...) z <- do.clean(ploterodeimage, W, Z, colsZ=col.smooth, rangeZ=srange, do.plot=do.plot, ...) Contour(Z, add=TRUE, do.plot=do.plot, ...) bb <- as.owin(z) } ) } } # ------------ cumulative x ----------------------------------------- if(opt$xcumul) { a <- RES$xcumul obs <- a$empirical theo <- a$theoretical do.clean(resid1panel, obs$covariate, obs$value, theo$covariate, theo$mean, theo$sd, "x coordinate", "cumulative mark", main=main, ..., do.plot=do.plot) bb <- NULL } # ------------ cumulative y ----------------------------------------- if(opt$ycumul) { a <- RES$ycumul obs <- a$empirical theo <- a$theoretical do.clean(resid1panel, obs$covariate, obs$value, theo$covariate, theo$mean, theo$sd, "y coordinate", "cumulative mark", main=main, ..., do.plot=do.plot) bb <- NULL } ## ------------ x margin ----------------------------------------- if(opt$xmargin) { a <- RES$xmargin do.clean(resid1panel, a$x, a$xZ, a$x, a$ExZ, NULL, "x coordinate", "marginal of residuals", main=main, ..., do.plot=do.plot) bb <- NULL } # ------------ y margin ----------------------------------------- if(opt$ymargin) { a <- RES$ymargin do.clean(resid1panel, a$y, a$yZ, a$y, a$EyZ, NULL, "y coordinate", "marginal of residuals", main=main, ..., do.plot=do.plot) bb <- NULL } attr(bb, "bbox") <- bb return(invisible(bb)) } resid1plot }) resid1panel <- local({ do.lines <- function(x, y, defaulty=1, ...) { do.call(lines, resolve.defaults(list(x, y), list(...), list(lty=defaulty))) } resid1panel <- function(observedX, observedV, theoreticalX, theoreticalV, theoreticalSD, xlab, ylab, ..., do.plot=TRUE) { if(!do.plot) return(NULL) ## work out plot range rX <- range(observedX, theoreticalX) rV <- range(c(0, observedV, theoreticalV)) if(!is.null(theoreticalSD)) rV <- range(c(rV, theoreticalV + 2*theoreticalSD, theoreticalV - 2*theoreticalSD)) ## argument handling ## start plot plot(rX, rV, type="n", xlab=xlab, ylab=ylab, ...) do.lines(observedX, observedV, 1, ...) do.lines(theoreticalX, theoreticalV, 2, ...) if(!is.null(theoreticalSD)) { do.lines(theoreticalX, theoreticalV + 2 * theoreticalSD, 3, ...) do.lines(theoreticalX, theoreticalV - 2 * theoreticalSD, 3, ...) } } resid1panel }) # # ploterodewin <- function(W1, W2, col.edge=grey(0.75), col.inside=rgb(1,0,0), do.plot=TRUE, ...) { ## internal use only ## W2 is assumed to be an erosion of W1 switch(W1$type, rectangle={ z <- plot(W1, ..., do.plot=do.plot) plot(W2, add=TRUE, lty=2, do.plot=do.plot) }, polygonal={ z <- plot(W1, ..., do.plot=do.plot) plot(W2, add=TRUE, lty=2, do.plot=do.plot) }, mask={ Z <- as.im(W1) x <- as.vector(rasterx.mask(W1)) y <- as.vector(rastery.mask(W1)) ok <- inside.owin(x, y, W2) Z$v[ok] <- 2 z <- plot(Z, ..., col=c(col.edge, col.inside), add=TRUE, ribbon=FALSE, do.plot=do.plot, show.all=TRUE) } ) return(z) } ploterodeimage <- function(W, Z, ..., Wcol=grey(0.75), rangeZ, colsZ, do.plot=TRUE) { # Internal use only # Image Z is assumed to live on a subset of mask W # colsZ are the colours for the values in the range 'rangeZ' if(!is.mask(W)) { if(do.plot) plot(W, add=TRUE) W <- as.mask(W) } # Extend the colour map to include an extra colour for pixels in W # (1) Add the desired colour of W to the colour map pseudocols <- c(Wcol, colsZ) # (2) Breakpoints bks <- seq(from=rangeZ[1], to=rangeZ[2], length=length(colsZ)+1) dZ <- diff(bks)[1] pseudobreaks <- c(rangeZ[1] - dZ, bks) # (3) Determine a fake value for pixels in W Wvalue <- rangeZ[1] - dZ/2 # Create composite image on W grid # (with W-pixels initialised to Wvalue) X <- as.im(Wvalue, W) # Look up Z-values of W-pixels xx <- as.vector(rasterx.mask(W)) yy <- as.vector(rastery.mask(W)) Zvalues <- lookup.im(Z, xx, yy, naok = TRUE, strict=FALSE) # Overwrite pixels in Z inZ <- !is.na(Zvalues) X$v[inZ] <- Zvalues[inZ] z <- image(X, ..., add=TRUE, ribbon=FALSE, col=pseudocols, breaks=pseudobreaks, do.plot=do.plot) out <- list(X, pseudocols, pseudobreaks) attr(out, "bbox") <- as.owin(z) return(out) } spatstat/R/lurkmppm.R0000644000176200001440000001511213427761007014317 0ustar liggesusers#' lurkmppm.R #' Lurking variable plot for mppm #' $Revision: 1.8 $ $Date: 2019/02/10 08:33:42 $ lurking.mppm <- local({ zerofun <- function(x) rep(0, length(x)) threshfun <- function(threshold, value) { force(threshold) force(value) function(x) { value * (x >= threshold) } } approxcumfun <- function(x, y) { stopifnot(length(x) == length(y)) n <- length(x) if(n == 0) return(zerofun) if(n == 1) return(threshfun(x, y)) return(approxfun(x=x, y=y, yleft=0, yright=y[n], rule=2)) } as.function.lurk <- function(x, ..., what=c("empirical", "theoretical")) { what <- match.arg(what) switch(what, empirical = { with(x$empirical, approxcumfun(covariate, value)) }, theoretical = { with(x$theoretical, approxcumfun(covariate, mean)) }) } acceptable <- function(x) { is.im(x) || is.numeric(x) || is.expression(x) } approxcumul <- function(yin, xin, xout) { if(length(yin) > 1) { z <- approx(x=xin, y=yin, xout=xout, rule=2)$y } else { z <- yin * (xout >= xin) } return(z) } interpolateworking <- function(object, xx) { #' extract working data (variance terms) #' and interpolate them at the specified covariate values xx w <- attr(object, "working") if(is.null(w)) return(NULL) w <- as.data.frame(w) covariate <- object$theoretical$covariate y <- apply(w, 2, approxcumul, xin=covariate, xout=xx) return(as.data.frame(y)) } multilurk <- function(object, covariate, type="eem", ..., separate=FALSE, plot.it=TRUE, covname, oldstyle=FALSE, nx=512, main="") { cl <- match.call() stopifnot(is.mppm(object)) if(missing(covname)) { co <- cl$covariate covname <- if(is.name(co)) as.character(co) else if(is.expression(co)) format(co[[1]]) else "covariate" } Fisher <- vcov(object, what="fisher") Vcov <- solve(Fisher) if(acceptable(covariate)) { cov.is.list <- FALSE } else { cov.is.list <- is.list(covariate) && length(covariate) == object$npat && all(sapply(covariate, acceptable)) if(!cov.is.list) stop(paste("Argument 'covariate' should be", "a pixel image, a numeric vector, an expression", "or a list of such arguments", "with one entry for each row of original data"), call.=FALSE) } #' pseudo fitted model for each row of data futs <- subfits(object) #' make lurking variable plot object for each row if(cov.is.list) { #' list of covariate arguments, one for each row of data lurks <- mapply(lurking.ppm, object=futs, covariate=covariate, MoreArgs=list(type=type, plot.it=FALSE, ..., internal=list(saveworking=TRUE, Fisher=Fisher), nx=nx, oldstyle=oldstyle, covname=covname), SIMPLIFY=FALSE) } else { #' One covariate argument to rule them all #' First determine range of covariate values covrange <- range(sapply(futs, lurking, covariate=covariate, type=type, internal=list(getrange=TRUE)), na.rm=TRUE) #' Now compute lurking variable plots lurks <- anylapply(futs, lurking, covariate=covariate, type=type, plot.it=FALSE, ..., internal=list(saveworking=TRUE, Fisher=Fisher, covrange=covrange), nx=nx, oldstyle=oldstyle, covname=covname) } if(separate) { #' separate lurking variable plots for each row if(plot.it) { do.call(plot, resolve.defaults(list(x=lurks), list(...), list(main=main, mar.panel=c(5,4,2,3)))) return(invisible(lurks)) } else { return(lurks) } } #' auxiliary info infos <- lapply(lurks, attr, which="info") #' range of covariate values covrange <- range(unlist(lapply(infos, getElement, name="covrange")), na.rm=TRUE) xx <- seq(covrange[1], covrange[2], length=nx) #' empirical part efuns <- lapply(lurks, as.function.lurk, what="empirical") vlist <- lapply(efuns, do.call, list(xx)) sumv <- Reduce("+", vlist) empirical <- data.frame(covariate=xx, value=sumv) #' similar for theoretical curves tfuns <- lapply(lurks, as.function.lurk, what="theoretical") vlist <- lapply(tfuns, do.call, list(xx)) sumv <- Reduce("+", vlist) theoretical <- data.frame(covariate=xx, mean=sumv) #' variance calculation if available wlist <- lapply(lurks, interpolateworking, xx=xx) if(!any(sapply(wlist, is.null))) { w <- Reduce("+", wlist) varI <- w$varI if(oldstyle) { theoretical$sd <- sqrt(varI) } else { Bnames <- setdiff(colnames(w), c("varI", "varII")) B <- as.matrix(w[, Bnames, drop=FALSE]) if(ncol(B) != nrow(Vcov)) { warning("Internal variance data are incomplete; reverting to oldstyle=TRUE") oldstyle <- TRUE theoretical$sd <- sqrt(varI) } else { varII <- quadform(B, Vcov) varR <- varI - varII ra <- range(varR, finite=TRUE) if(ra[1] < 0) { warning(paste("Negative values of residual variance!", "Range =", prange(signif(ra, 4))), call.=FALSE) varR <- pmax(0, varR) } theoretical$sd <- sqrt(varR) } } } ## form result result <- list(empirical=empirical, theoretical=theoretical) class(result) <- "lurk" ## copy info e.g. type of residual info <- infos[[1]] info$covrange <- covrange attr(result, "info") <- info if(plot.it) plot(result, ..., main=main) return(invisible(result)) } multilurk }) spatstat/R/ppm.R0000644000176200001440000002106413537661337013256 0ustar liggesusers# # $Revision: 1.58 $ $Date: 2017/10/04 04:10:33 $ # # ppm() # Fit a point process model to a two-dimensional point pattern # # ppm <- function(Q, ...) { UseMethod("ppm") } ppm.formula <- function(Q, interaction=NULL, ..., data=NULL, subset) { ## remember call callstring <- short.deparse(sys.call()) cl <- match.call() ## trap a common error to give a more informative message if(is.sob(data) || is.function(data)) stop(paste("The argument", sQuote("data"), "should not be a spatial object;", "it should be a list of spatial objects"), call.=FALSE) ########### INTERPRET FORMULA ############################## if(!inherits(Q, "formula")) stop(paste("Argument 'Q' should be a formula")) formula <- Q ## check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Formula must have a left hand side")) Yexpr <- formula[[2]] trend <- formula[c(1,3)] ## FIT ####################################### thecall <- if(missing(subset)) { call("ppm", Q=Yexpr, trend=trend, data=data, interaction=interaction) } else { call("ppm", Q=Yexpr, trend=trend, data=data, interaction=interaction, subset=substitute(subset)) } ncall <- length(thecall) argh <- list(...) nargh <- length(argh) if(nargh > 0) { thecall[ncall + 1:nargh] <- argh names(thecall)[ncall + 1:nargh] <- names(argh) } callenv <- list2env(as.list(data), parent=parent.frame()) result <- eval(thecall, envir=callenv) result$call <- cl result$callstring <- callstring result$callframe <- parent.frame() return(result) } ppm.quad <- ppm.ppp <- ppm.default <- function(Q, trend = ~1, interaction = Poisson(), ..., covariates = data, data = NULL, covfunargs = list(), subset, clipwin, correction="border", rbord = reach(interaction), use.gam=FALSE, method = "mpl", forcefit=FALSE, emend=project, project=FALSE, prior.mean = NULL, prior.var = NULL, nd = NULL, eps = NULL, gcontrol=list(), nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh), verb=TRUE, callstring=NULL ) { Qname <- short.deparse(substitute(Q)) subsetexpr <- if(!missing(subset)) substitute(subset) else NULL clipwin <- if(!missing(clipwin)) clipwin else NULL datalistname <- if(missing(covariates)) "data" else "covariates" if(!(method %in% c("mpl", "ho", "logi", "VBlogi"))) stop(paste("Unrecognised fitting method", sQuote(method))) if(!missing(emend) && !missing(project) && emend != project) stop("Conflicting options: emend != project") if(!is.null(prior.mean) | !is.null(prior.var)){ if(missing(method)) method <- "VBlogi" if(method!="VBlogi") stop("Prior specification only works with method ", sQuote("VBlogi")) } if(method=="VBlogi"){ VB <- TRUE method <- "logi" } else{ VB <- FALSE } if(is.sob(covariates) || is.function(covariates)) stop(paste("The argument", sQuote(datalistname), "should not be a spatial object;", "it should be a list of spatial objects"), call.=FALSE) if(inherits(Q, "logiquad")){ if(missing(method)) method <- "logi" if(method != "logi") stop(paste("Only method =", sQuote("logi"), "makes sense when Q is of type", sQuote("logiquad"))) } cl <- match.call() if(is.null(callstring)) callstring <- paste(short.deparse(sys.call()), collapse="") if(is.ppp(Q) && is.marked(Q) && !is.multitype(Q)) stop(paste("ppm is not yet implemented for marked point patterns,", "other than multitype patterns.")) if(!(is.ppp(Q) || is.quad(Q) || checkfields(Q, c("data", "dummy")))) { stop("Argument Q must be a point pattern or a quadrature scheme") } X <- if(is.ppp(Q)) Q else Q$data ## Validate interaction if(is.null(interaction)) { interaction <- Poisson() } else if(inherits(interaction, "intermaker")) { ## e.g. 'interaction=Hardcore': invoke it without arguments interaction <- (f <- interaction)() dont.complain.about(f) } else if(!is.interact(interaction)) stop("Argument 'interaction' must be an object of class 'interact'") ## Ensure interaction is fully defined if(!is.null(ss <- interaction$selfstart)) { # invoke selfstart mechanism to fix all parameters interaction <- ss(X, interaction) } if(inherits(trend, "formula")) { ## handle "." in formula, representing all variables in 'data' if("." %in% variablesinformula(trend)) { if(is.null(covariates)) stop("Cannot expand '.' since 'data' is not present", call.=FALSE) rhs <- paste(names(covariates), collapse=" + ") allmaineffects <- as.formula(paste("~", rhs)) environment(allmaineffects) <- environment(trend) trend <- update(allmaineffects, trend) } ## expand polynom() in formula if(spatstat.options("expand.polynom")) trend <- expand.polynom(trend) } # validate choice of edge correction correction <- pickoption("correction", correction, c(border="border", periodic="periodic", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", none="none")) # validate rbord if(correction == "border") { # rbord for border correction rbord.given <- !missing(rbord) && !is.null(rbord) if(is.null(rbord)) rbord <- reach(interaction) infin <- is.infinite(rbord) too.large <- infin || (eroded.areas(as.owin(X), rbord) == 0) if(too.large) { whinge <- paste(if(rbord.given) "rbord" else "the reach of this interaction", if(infin) "is infinite or unknown;" else "is too large for this window;", "please specify", if(rbord.given) "a smaller value of", "rbord, or use a different edge correction") stop(whinge) } } else { # rbord must be numeric to satisfy mpl.engine if(is.null(rbord)) rbord <- 0 } if(method == "logi") { fitLOGI <- logi.engine(Q=Q, trend=trend, interaction=interaction, covariates=covariates, covfunargs=covfunargs, subsetexpr=subsetexpr, clipwin=clipwin, correction=correction, rbord=rbord, use.gam=use.gam, forcefit=forcefit, nd = nd, gcontrol=gcontrol, callstring=callstring, prior.mean=prior.mean, prior.var=prior.var, VB=VB, ...) fitLOGI$Qname <- Qname fitLOGI$call <- cl fitLOGI$callstring <- callstring fitLOGI$callframe <- parent.frame() if(emend && !valid.ppm(fitLOGI)) fitLOGI <- emend.ppm(fitLOGI) return(fitLOGI) } # fit by maximum pseudolikelihood fitMPL <- mpl.engine(Q=Q, trend=trend, interaction=interaction, covariates=covariates, covfunargs=covfunargs, subsetexpr=subsetexpr, clipwin=clipwin, correction=correction, rbord=rbord, use.gam=use.gam, forcefit=forcefit, nd = nd, eps = eps, gcontrol=gcontrol, callstring=callstring, ...) fitMPL$Qname <- Qname if(!is.ppm(fitMPL)) { # internal use only - returns some other data return(fitMPL) } fitMPL$call <- cl fitMPL$callstring <- callstring fitMPL$callframe <- parent.frame() if(emend && !valid.ppm(fitMPL)) fitMPL <- emend.ppm(fitMPL) if(method == "mpl" || is.poisson.ppm(fitMPL)) return(fitMPL) fitHO <- ho.engine(fitMPL, nsim=nsim, nrmh=nrmh, start=start, control=control, verb=verb) if(is.null(fitHO)) return(fitMPL) if(emend && !valid.ppm(fitHO)) fitHO <- emend.ppm(fitHO) return(fitHO) } spatstat/R/rshift.R0000644000176200001440000001135313333543255013751 0ustar liggesusers# # rshift.R # # random shift with optional toroidal boundary # # $Revision: 1.18 $ $Date: 2017/12/30 05:39:13 $ # # rshift <- function(X, ...) { UseMethod("rshift") } rshift.splitppp <- function(X, ..., which=seq_along(X)) { verifyclass(X, "splitppp") if("group" %in% names(list(...))) stop(paste("argument", sQuote("group"), "not implemented for splitppp objects")) if(is.null(which)) { iwhich <- which <- seq_along(X) } else { id <- seq_along(X) names(id) <- names(X) iwhich <- id[which] if(length(iwhich) == 0) stop(paste("Argument", sQuote("which"), "did not match any marks")) } # validate arguments and determine common clipping window arglist <- handle.rshift.args(X[[1]]$window, ..., edgedefault="torus") if(!is.null(clip <- arglist$clip)) { # clip the patterns that are not to be shifted if(length(iwhich) < length(X)) X[-iwhich] <- lapply(X[-iwhich], "[.ppp", i=clip) } # perform shift on selected patterns # (setting group = NULL ensures each pattern is not split further) shiftXsub <- do.call(lapply, append(list(X[iwhich], rshift.ppp, group=NULL), arglist)) # put back X[iwhich] <- shiftXsub return(X) } rshift.ppp <- function(X, ..., which=NULL, group) { verifyclass(X, "ppp") # validate arguments and determine common clipping window arglist <- handle.rshift.args(X$window, ..., edgedefault="torus") # default grouping # (NULL is not the default) # (NULL means all points shifted in parallel) if(missing(group)) group <- if(is.multitype(X)) marks(X) else NULL # if no grouping, use of `which' is undefined if(is.null(group) && !is.null(which)) stop(paste("Cannot apply argument", sQuote("which"), "; no grouping defined")) # if grouping, use split if(!is.null(group)) { Y <- split(X, group) split(X, group) <- do.call(rshift.splitppp, append(list(Y, which=which), arglist)) return(X) } # ungrouped point pattern # shift all points in parallel # recover arguments radius <- arglist$radius width <- arglist$width height <- arglist$height edge <- arglist$edge clip <- arglist$clip W <- X$window W <- rescue.rectangle(W) if(W$type != "rectangle" && edge=="torus") stop("Torus (periodic) boundary is only meaningful for rectangular windows") # generate random translation vector if(!is.null(radius)) jump <- runifdisc(1, radius=radius) else { jump <- list(x=runif(1, min=0, max=width), y=runif(1, min=0, max=height)) } # translate points x <- X$x + jump$x y <- X$y + jump$y # wrap points if(edge == "torus") { xr <- W$xrange yr <- W$yrange Wide <- diff(xr) High <- diff(yr) x <- xr[1] + (x - xr[1]) %% Wide y <- yr[1] + (y - yr[1]) %% High } # put back into point pattern X$x <- x X$y <- y # clip to window if(!is.null(clip)) X <- X[clip] return(X) } handle.rshift.args <- function(W, ..., radius=NULL, width=NULL, height=NULL, edge=NULL, clip=NULL, edgedefault) { verifyclass(W, "owin") W <- rescue.rectangle(W) if(length(aargh <- list(...)) > 0) stop(paste("Unrecognised arguments:", paste(names(aargh), collapse=","))) if(!is.null(radius)) { # radial generator if(!(is.null(width) && is.null(height))) stop(paste(sQuote("radius"), "is incompatible with", sQuote("width"), "and", sQuote("height"))) } else { # rectangular generator if(is.null(width) != is.null(height)) stop("Must specify both width and height, if one is specified") if(is.null(width)) width <- diff(W$xrange) if(is.null(height)) height <- diff(W$yrange) } if(is.null(edge)) edge <- edgedefault else if(!(edge %in% c("torus", "erode", "none"))) stop(paste("Unrecognised option erode=", sQuote(edge))) # determine whether clipping window is needed if(is.null(clip)) clip <- switch(edge, torus= NULL, none= W, erode={ if(!is.null(radius)) erosion.owin(W, radius) else if(W$type == "rectangle") trim.rectangle(W, width, height) else erosion.owin(W, max(width, height)) }) return(list(radius=radius, width=width, height=height, edge=edge, clip=clip)) } # rtoro <- function(X, which=NULL, radius=NULL, width=NULL, height=NULL) { # .Deprecated("rshift", package="spatstat") # rshift(X, which=which, radius=radius, width=width, height=height) # } spatstat/R/rescale.R0000644000176200001440000000321013427752145014065 0ustar liggesusers# # # rescale.R # # $Revision: 1.8 $ $Date: 2019/02/10 06:42:26 $ # # rescale <- function(X, s, unitname) { UseMethod("rescale") } rescale.ppp <- function(X, s, unitname) { if(missing(unitname)) unitname <- NULL if(missing(s) || is.null(s)) s <- 1/unitname(X)$multiplier Y <- affine.ppp(X, mat=diag(c(1/s,1/s)), rescue=FALSE) unitname(Y) <- rescale(unitname(X), s, unitname) return(Y) } rescale.owin <- function(X, s, unitname) { if(missing(unitname)) unitname <- NULL if(missing(s) || is.null(s)) s <- 1/unitname(X)$multiplier Y <- affine.owin(X, mat=diag(c(1/s,1/s)), rescue=FALSE) unitname(Y) <- rescale(unitname(X), s, unitname) return(Y) } rescale.im <- function(X, s, unitname) { if(missing(unitname)) unitname <- NULL if(missing(s) || is.null(s)) s <- 1/unitname(X)$multiplier Y <- X Y$xrange <- X$xrange/s Y$yrange <- X$yrange/s Y$xstep <- X$xstep/s Y$ystep <- X$ystep/s Y$xcol <- X$xcol/s Y$yrow <- X$yrow/s unitname(Y) <- rescale(unitname(X), s, unitname) return(Y) } rescale.psp <- function(X, s, unitname) { if(missing(unitname)) unitname <- NULL if(missing(s) || is.null(s)) s <- 1/unitname(X)$multiplier Y <- affine.psp(X, mat=diag(c(1/s,1/s)), rescue=FALSE) unitname(Y) <- rescale(unitname(X), s, unitname) return(Y) } rescale.unitname <- function(X, s, unitname) { if(!missing(unitname) && !is.null(unitname)) return(as.unitname(unitname)) if(summary(X)$vanilla) return(X) if(missing(s)) { X$multiplier <- 1 } else { if(!is.numeric(s) || length(s) != 1 || s <= 0) stop("s should be a positive number") X$multiplier <- s * X$multiplier } return(X) } spatstat/R/fryplot.R0000644000176200001440000000450013333543255014145 0ustar liggesusers# # fryplot.R # # $Revision: 1.15 $ $Date: 2017/02/07 07:22:47 $ # fryplot <- function(X, ..., width=NULL, from=NULL, to=NULL, axes=FALSE) { Xname <- short.deparse(substitute(X)) X <- as.ppp(X) b <- as.rectangle(X) halfspan <- with(b, c(diff(xrange), diff(yrange))) if(!is.null(width)) { halfwidth <- ensure2vector(width)/2 halfspan <- pmin.int(halfspan, halfwidth) } bb <- owin(c(-1,1) * halfspan[1L], c(-1,1) * halfspan[2L]) Y <- frypoints(X, from=from, to=to, dmax=diameter(bb))[bb] do.call(plot.ppp, resolve.defaults(list(x=Y), list(...), list(main=paste("Fry plot of", Xname)))) if(axes) { lines(c(0,0), c(-1,1) * halfspan[1L]) lines(c(-1,1) * halfspan[2L], c(0,0)) } return(invisible(NULL)) } frypoints <- function(X, from=NULL, to=NULL, dmax=Inf) { X <- as.ppp(X) b <- as.rectangle(X) bb <- owin(c(-1,1) * diff(b$xrange), c(-1,1) * diff(b$yrange)) n <- X$n xx <- X$x yy <- X$y ## determine (dx, dy) for all relevant pairs if(is.null(from) && is.null(to)) { if(is.infinite(dmax)) { dx <- outer(xx, xx, "-") dy <- outer(yy, yy, "-") notsame <- matrix(TRUE, n, n) diag(notsame) <- FALSE DX <- as.vector(dx[notsame]) DY <- as.vector(dy[notsame]) I <- row(notsame)[notsame] } else { cl <- closepairs(X, dmax) DX <- cl$dx DY <- cl$dy I <- cl$j ## sic: I is the index of the 'TO' element } } else { seqn <- seq_len(n) from <- if(is.null(from)) seqn else seqn[from] to <- if(is.null(to)) seqn else seqn[to] if(is.infinite(dmax)) { dx <- outer(xx[to], xx[from], "-") dy <- outer(yy[to], yy[from], "-") notsame <- matrix(TRUE, n, n) diag(notsame) <- FALSE notsame <- notsame[to, from, drop=FALSE] DX <- as.vector(dx[notsame]) DY <- as.vector(dy[notsame]) I <- row(notsame)[notsame] } else { cl <- crosspairs(X[from], X[to], dmax) ok <- with(cl, from[i] != to[j]) DX <- cl$dx[ok] DY <- cl$dy[ok] I <- cl$j[ok] } } ## form into point pattern Fry <- ppp(DX, DY, window=bb, check=FALSE) if(is.marked(X)) { marx <- as.data.frame(marks(X)) marxto <- if(is.null(to)) marx else marx[to, ,drop=FALSE] marks(Fry) <- marxto[I, ] } return(Fry) } spatstat/R/rmh.ppm.R0000644000176200001440000001223313553465042014032 0ustar liggesusers# # simulation of FITTED model # # $Revision: 1.36 $ $Date: 2019/10/22 02:31:58 $ # # rmh.ppm <- function(model, start = NULL, control = default.rmhcontrol(model, w=w), ..., w = NULL, project=TRUE, nsim=1, drop=TRUE, saveinfo=TRUE, verbose=TRUE, new.coef=NULL) { verifyclass(model, "ppm") argh <- list(...) if(is.null(control)) { control <- default.rmhcontrol(model, w=w) } else { control <- rmhcontrol(control) } # override if(length(list(...)) > 0) control <- update(control, ...) # convert fitted model object to list of parameters for rmh.default X <- rmhmodel(model, w=w, verbose=verbose, project=project, control=control, new.coef=new.coef) # set initial state if(is.null(start)) { datapattern <- data.ppm(model) start <- rmhstart(n.start=datapattern$n) } # call rmh.default # passing only arguments unrecognised by rmhcontrol known <- names(argh) %in% names(formals(rmhcontrol.default)) fargs <- argh[!known] Y <- do.call(rmh.default, append(list(model=X, start=start, control=control, nsim=nsim, drop=drop, saveinfo=saveinfo, verbose=verbose), fargs)) return(Y) } simulate.ppm <- function(object, nsim=1, ..., singlerun=FALSE, start = NULL, control = default.rmhcontrol(object, w=w), w = NULL, project=TRUE, new.coef=NULL, verbose=FALSE, progress=(nsim > 1), drop=FALSE) { verifyclass(object, "ppm") argh <- list(...) if(nsim == 0) return(list()) starttime = proc.time() # set up control parameters if(missing(control) || is.null(control)) { rcontr <- default.rmhcontrol(object, w=w) } else { rcontr <- rmhcontrol(control) } if(singlerun) { # allow nsave, nburn to determine nrep nsave <- resolve.1.default("nsave", list(...), as.list(rcontr), .MatchNull=FALSE) nburn <- resolve.1.default("nburn", list(...), as.list(rcontr), list(nburn=nsave), .MatchNull=FALSE) if(!is.null(nsave)) { nrep <- nburn + (nsim-1) * sum(nsave) rcontr <- update(rcontr, nrep=nrep, nsave=nsave, nburn=nburn) } } # other overrides if(length(list(...)) > 0) rcontr <- update(rcontr, ...) # Set up model parameters for rmh rmodel <- rmhmodel(object, w=w, verbose=FALSE, project=TRUE, control=rcontr, new.coef=new.coef) if(is.null(start)) { datapattern <- data.ppm(object) start <- rmhstart(n.start=datapattern$n) } rstart <- rmhstart(start) ######### if(singlerun && nsim > 1) { # ////////////////////////////////////////////////// # execute one long run and save every k-th iteration if(is.null(rcontr$nsave)) { # determine spacing between subsamples if(!is.null(rcontr$nburn)) { nsave <- max(1, with(rcontr, floor((nrep - nburn)/(nsim-1)))) } else { # assume nburn = 2 * nsave nsave <- max(1, with(rcontr, floor(nrep/(nsim+1)))) nburn <- 2 * nsave } rcontr <- update(rcontr, nsave=nsave, nburn=nburn) } # check nrep is enough nrepmin <- with(rcontr, nburn + (nsim-1) * sum(nsave)) if(rcontr$nrep < nrepmin) rcontr <- update(rcontr, nrep=nrepmin) # OK, run it if(progress) { cat(paste("Generating", nsim, "simulated patterns in a single run ... ")) flush.console() } Y <- rmh(rmodel, rstart, rcontr, verbose=verbose) if(progress) cat("Done.\n") # extract sampled states out <- attr(Y, "saved") nout <- length(out) if(nout == nsim+1L && identical(names(out)[1], "Iteration_0")) { ## expected behaviour: first entry is initial state out <- out[-1L] } else if(nout != nsim) { stop(paste("Internal error: wrong number of simulations generated:", nout, "!=", nsim)) } } else { # ////////////////////////////////////////////////// # execute 'nsim' independent runs out <- list() # pre-digest arguments rmhinfolist <- rmh(rmodel, rstart, rcontr, preponly=TRUE, verbose=verbose) # go if(nsim > 0) { if(progress) { cat(paste("Generating", nsim, "simulated", ngettext(nsim, "pattern", "patterns"), "...")) flush.console() } # call rmh # passing only arguments unrecognised by rmhcontrol known <- names(argh) %in% names(formals(rmhcontrol.default)) fargs <- argh[!known] rmhargs <- append(list(InfoList=rmhinfolist, verbose=verbose), fargs) if(progress) pstate <- list() for(i in 1:nsim) { out[[i]] <- do.call(rmhEngine, rmhargs) if(progress) pstate <- progressreport(i, nsim, state=pstate) } } } out <- simulationresult(out, nsim, drop) out <- timed(out, starttime=starttime) return(out) } spatstat/R/Kscaled.R0000644000176200001440000001355713573077323014034 0ustar liggesusers# # Kscaled.R Estimation of K function for locally-scaled process # # $Revision: 1.17 $ $Date: 2019/12/08 04:29:28 $ # "Lscaled" <- function(...) { K <- Kscaled(...) L <- eval.fv(sqrt(pmax.int(K,0)/pi)) # relabel the fv object L <- rebadge.fv(L, quote(L[scaled](r)), c("L","scaled")) attr(L, "labl") <- attr(K, "labl") return(L) } "Kscaled"<- function (X, lambda=NULL, ..., r = NULL, breaks = NULL, rmax = 2.5, correction=c("border", "isotropic", "translate"), renormalise=FALSE, normpower=1, sigma=NULL, varcov=NULL) { verifyclass(X, "ppp") # rfixed <- !missing(r) || !missing(breaks) ## determine basic parameters W <- X$window npts <- X$n areaW <- area(W) halfdiameter <- diameter(W)/2 ## match corrections correction.given <- !missing(correction) && !is.null(correction) correction <- pickoption("correction", correction, c(none="none", border="border", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) # best.wanted <- ("best" %in% correction) correction <- implemented.for.K(correction, W$type, correction.given) ########################################################### ## DETERMINE WEIGHTS AND VALIDATE ## if(missing(lambda)) { ## No intensity data provided ## Estimate density by leave-one-out kernel smoothing lambda <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambda <- as.numeric(lambda) } else { ## lambda values provided if(is.im(lambda)) lambda <- safelookup(lambda, X) else if(is.function(lambda)) lambda <- lambda(X$x, X$y) else if(is.ppm(lambda)) lambda <- safelookup(predict(lambda, type="trend"), X) else if(!is.numeric(lambda) || !is.null(dim(lambda))) stop(paste(sQuote("lambda"), "should be a vector, a pixel image, a function or a ppm")) check.nvector(lambda, npts) } if(renormalise) { ## renormalise. Here we only need half the power ;-) check.1.real(normpower) stopifnot(normpower %in% 1:2) renorm.factor <- (areaW/sum(1/lambda))^(normpower/2) lambda <- lambda/renorm.factor } ## Calculate range of r values using max lambda sra <- sqrt(range(lambda)) minrescale <- sra[1] maxrescale <- sra[2] ## convert arguments to absolute distances absr <- if(!is.null(r)) r/maxrescale else NULL absrmaxdefault <- min(rmax.rule("K", W), rmax/maxrescale) absbreaks <- if(!is.null(breaks)) scalardilate(breaks, 1/maxrescale) else NULL ## determine absolute distances absbreaks <- handle.r.b.args(absr, absbreaks, W, rmaxdefault=absrmaxdefault) absr <- absbreaks$r ## convert to rescaled distances breaks <- scalardilate(absbreaks, maxrescale) r <- breaks$r rmax <- breaks$max ## recommended range of scaled r values alim <- c(0, min(rmax, maxrescale * absrmaxdefault)) rthresh <- minrescale * halfdiameter ## maximum absolute distance ever needed maxabsdist <- min(rmax/minrescale, halfdiameter) ## this will be the output data frame K <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") K <- fv(K, "r", quote(K[scaled](r)), "theo", , alim, c("r","{%s[%s]^{pois}}(r)"), desc, fname=c("K", "scaled")) ## identify all relevant close pairs needXI <- any(correction %in% c("translate", "isotropic")) close <- closepairs(X, maxabsdist, what=if(needXI) "all" else "ijd") I <- close$i J <- close$j ## locally-scaled distances sqrtLambda <- sqrt(lambda) lamIJ <- (sqrtLambda[I] + sqrtLambda[J])/2 absDIJ <- close$d DIJ <- absDIJ * lamIJ ## first point of each pair XI <- if(needXI) ppp(close$xi, close$yi, window=W, check=FALSE) else NULL if(any(correction == "none")) { ## uncorrected! For demonstration purposes only! wh <- whist(DIJ, breaks$val) # no weights Kun <- cumsum(wh)/npts K <- bind.fv(K, data.frame(un=Kun), "{hat(%s)[%s]^{un}}(r)", "uncorrected estimate of %s", "un") } if(any(correction == "border")) { ## border method ## Compute SCALED distances to boundary b <- bdist.points(X) * sqrtLambda bI <- b[I] ## apply reduced sample algorithm to scaled distances RS <- Kount(DIJ, bI, b, breaks) Kb <- RS$numerator/RS$denom.count Kb[r > rthresh] <- NA K <- bind.fv(K, data.frame(border=Kb), "{hat(%s)[%s]^{bord}}(r)", "border-corrected estimate of %s", "border") } if(any(correction == "translate")) { ## translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) wh <- whist(DIJ, breaks$val, edgewt) Ktrans <- cumsum(wh)/npts Ktrans[r >= rthresh] <- NA K <- bind.fv(K, data.frame(trans=Ktrans), "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans") } if(any(correction == "isotropic")) { ## Ripley isotropic correction (using UN-SCALED distances) edgewt <- edge.Ripley(XI, matrix(absDIJ, ncol=1)) wh <- whist(DIJ, breaks$val, edgewt) Kiso <- cumsum(wh)/npts Kiso[r >= rthresh] <- NA K <- bind.fv(K, data.frame(iso=Kiso), "{hat(%s)[%s]^{iso}}(r)", "Ripley isotropic correction estimate of %s", "iso") } ## default plot will display all edge corrections formula(K) <- . ~ r nama <- rev(colnames(K)) fvnames(K, ".") <- nama[!(nama %in% c("r", "rip", "ls"))] ## unitname(K) <- c("normalised unit", "normalised units") return(K) } spatstat/R/iplotlayered.R0000644000176200001440000002433313333543255015151 0ustar liggesusers# # interactive plot # # $Revision: 1.13 $ $Date: 2017/02/07 07:47:20 $ # # iplot.default <- function(x, ..., xname) { if(missing(xname)) xname <- short.deparse(substitute(x)) x <- as.layered(x) iplot(x, ..., xname=xname) } iplot.layered <- local({ CommitAndRedraw <- function(panel) { ## hack to ensure that panel is immediately updated in rpanel kraever("rpanel") ## This is really a triple-colon! rpanel:::rp.control.put(panel$panelname, panel) ## now redraw it redraw.iplot.layered(panel) } faster.layers <- function(x, visible) { if(any(islinnet <- unlist(lapply(x, inherits, what="linnet")))) { # convert linnet layers to psp, for efficiency x[islinnet] <- lapply(x[islinnet], as.psp) } repeat{ islpp <- unlist(lapply(x, inherits, what="lpp")) if(!any(islpp)) break # convert an lpp layer to two layers: psp and ppp, for efficiency ii <- min(which(islpp)) pl <- layerplotargs(x) n <- length(x) xpre <- if(ii == 1) NULL else x[1:ii] xpost <- if(ii == n) NULL else x[(ii+1L):n] ppre <- if(ii == 1) NULL else pl[1:ii] ppost <- if(ii == n) NULL else pl[(ii+1):n] a <- as.psp(as.linnet(x[[ii]])) b <- as.ppp(x[[ii]]) x <- layered(LayerList=c(xpre, list(a, b), xpost), plotargs=unname(c(ppre, pl[ii], pl[ii], ppost))) visible <- visible[if(ii == 1) c(1, seq_len(n)) else if(ii == n) c(seq_len(n), n) else c(1:(ii-1), ii, ii, (ii+1):n)] } attr(x, "visible") <- visible return(x) } iplot.layered <- function(x, ..., xname, visible) { if(missing(xname)) xname <- short.deparse(substitute(x)) verifyclass(x, "layered") if(missing(visible) || is.null(visible)) { visible <- rep(TRUE, length(x)) } else if(length(visible) == 1) { visible <- rep(visible, length(x)) } else stopifnot(length(visible) == length(x)) kraever("rpanel") x <- faster.layers(x, visible) visible <- attr(x, "visible") x <- freeze.colourmaps(x) bb <- as.rectangle(as.owin(x)) bbmid <- unlist(centroid.owin(bb)) lnames <- names(x) if(sum(nzchar(lnames)) != length(x)) lnames <- paste("Layer", seq_len(length(x))) ## p <- rpanel::rp.control(paste("iplot(", xname, ")", sep=""), x=x, w=as.owin(x), xname=xname, layernames=lnames, bb=bb, bbmid=bbmid, zoomfactor=1, zoomcentre=bbmid, which = visible, size=c(700, 400)) # Split panel into three # Left: plot controls # Middle: data # Right: navigation/zoom rpanel::rp.grid(p, "gcontrols", pos=list(row=0,column=0)) rpanel::rp.grid(p, "gdisplay", pos=list(row=0,column=1)) rpanel::rp.grid(p, "gnavigate", pos=list(row=0,column=2)) #----- Data display ------------ # This line is to placate the package checker mytkr <- NULL # Create data display panel rpanel::rp.tkrplot(p, mytkr, plotfun=do.iplot.layered, action=click.iplot.layered, pos=list(row=0,column=0,grid="gdisplay")) #----- Plot controls ------------ nextrow <- 0 pozzie <- function(n=nextrow, ...) append(list(row=n,column=0,grid="gcontrols"), list(...)) # main title rpanel::rp.textentry(p, xname, action=redraw.iplot.layered, title="Plot title", pos=pozzie(0)) nextrow <- 1 # select some layers nx <- length(x) which <- rep.int(TRUE, nx) if(nx > 1) { rpanel::rp.checkbox(p, which, labels=lnames, action=redraw.iplot.layered, title="Select layers to plot", pos=pozzie(nextrow), sticky="") nextrow <- nextrow + 1 } # button to print a summary at console rpanel::rp.button(p, title="Print summary information", pos=pozzie(nextrow), action=function(panel) { lapply(lapply(panel$x, summary), print) return(panel) }) # #----- Navigation controls ------------ nextrow <- 0 navpos <- function(n=nextrow,cc=0, ...) append(list(row=n,column=cc,grid="gnavigate"), list(...)) rpanel::rp.button(p, title="Up", pos=navpos(nextrow,1,sticky=""), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb height <- sidelengths(bb)[2L] stepsize <- (height/4)/zo panel$zoomcentre <- ce + c(0, stepsize) CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Left", pos=navpos(nextrow,0,sticky="w"), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb width <- sidelengths(bb)[1L] stepsize <- (width/4)/zo panel$zoomcentre <- ce - c(stepsize, 0) CommitAndRedraw(panel) return(panel) }) rpanel::rp.button(p, title="Right", pos=navpos(nextrow,2,sticky="e"), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb width <- sidelengths(bb)[1L] stepsize <- (width/4)/zo panel$zoomcentre <- ce + c(stepsize, 0) CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Down", pos=navpos(nextrow,1,sticky=""), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb height <- sidelengths(bb)[2L] stepsize <- (height/4)/zo panel$zoomcentre <- ce - c(0, stepsize) CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Zoom In", pos=navpos(nextrow,1,sticky=""), action=function(panel) { panel$zoomfactor <- panel$zoomfactor * 2 CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Zoom Out", pos=navpos(nextrow,1,sticky=""), action=function(panel) { panel$zoomfactor <- panel$zoomfactor / 2 CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Reset", pos=navpos(nextrow,1,sticky=""), action=function(panel) { panel$zoomfactor <- 1 panel$zoomcentre <- panel$bbmid CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Redraw", pos=navpos(nextrow,1,sticky=""), action=redraw.iplot.layered) nextrow <- nextrow+1 # quit button rpanel::rp.button(p, title="Quit", quitbutton=TRUE, pos=navpos(nextrow, 1, sticky=""), action= function(panel) { panel }) invisible(NULL) } # Function to redraw the whole shebang redraw.iplot.layered <- function(panel) { rpanel::rp.tkrreplot(panel, mytkr) panel } # Function executed when data display is clicked click.iplot.layered <- function(panel, x, y) { panel$zoomcentre <- panel$zoomcentre + (c(x,y) - panel$bbmid)/panel$zoomfactor CommitAndRedraw(panel) return(panel) } # function that updates the plot when the control panel is operated do.iplot.layered <- function(panel) { # scale and clip the pattern x <- panel$x[panel$which] w <- panel$w z <- panel$zoomfactor if(is.null(z)) z <- 1 ce <- panel$zoomcentre bb <- panel$bb bbmid <- panel$bbmid scalex <- shift(scalardilate(shift(x, -ce), z), bbmid) scalew <- shift(scalardilate(shift(w, -ce), z), bbmid) scalex <- scalex[, bb] scalew <- intersect.owin(scalew, bb, fatal=FALSE) # determine what is plotted under the clipped pattern blankargs <- list(type="n") dashargs <- list(lty=3, border="red") panel.begin <- if(is.null(scalew)) { # empty intersection; just create the plot space layered(bb, plotargs=list(blankargs)) } else if(identical(bb, scalew)) { if(z == 1) { # original state # window is rectangular # plot the data window as a solid black rectangle layered(bb, scalew, plotargs=list(blankargs, list(lwd=2))) } else { # zoom view is entirely inside window # plot the clipping region as a red dashed rectangle layered(bb, plotargs=list(dashargs)) } } else { # field of view is not a subset of window # plot the clipping region as a red dashed rectangle # Then add the data window layered(bb, scalew, plotargs=list(dashargs, list(invert=TRUE))) } # draw it opa <- par(ask=FALSE) plot(panel.begin, main=panel$xname) plot(scalex, add=TRUE) par(opa) panel } freeze.colourmaps <- function(x) { # tweak a layered object to ensure that # the colours of image layers don't change with zoom/pan isim <- unlist(lapply(x, is.im)) if(any(isim)) { # ensure there are plotargs pl <- attr(x, "plotargs") if(is.null(pl)) pl <- rep.int(list(list()), length(x)) # make sure the plotargs include 'zlim' for(i in which(isim)) { x.i <- x[[i]] if(x.i$type %in% c("integer", "real")) pl[[i]] <- resolve.defaults(pl[[i]], list(zlim=range(x.i))) } # put back attr(x, "plotargs") <- pl } return(x) } iplot.layered }) spatstat/R/clickjoin.R0000644000176200001440000000147213333543254014417 0ustar liggesusers# # clickjoin.R # # interactive addition/deletion of segments between vertices # clickjoin <- function(X, ..., add=TRUE, m=NULL, join=TRUE) { verifyclass(X, "ppp") if(!(is.logical(join) && length(join) == 1)) stop("join should be a single logical value") plot(X, add=add, pch=16) if(is.null(m)) { m <- matrix(FALSE, npoints(X), npoints(X)) } else { stopifnot(is.matrix(m) && is.logical(m)) stopifnot(all(dim(m) == npoints(X))) from <- as.vector(row(m)[m]) to <- as.vector(col(m)[m]) with(X, segments(x[from], y[from], x[to], y[to])) } while(TRUE) { twoid <- identify(X, plot=FALSE, n=2) n <- length(twoid) if(n == 0) break if(n == 2) { m[twoid[1L],twoid[2L]] <- m[twoid[2L],twoid[1L]] <- join lines(X$x[twoid], X$y[twoid], ...) } } return(m) } spatstat/R/eval.im.R0000644000176200001440000002231113457056116014003 0ustar liggesusers# # eval.im.R # # eval.im() Evaluate expressions involving images # # compatible.im() Check whether two images are compatible # # harmonise.im() Harmonise images # commonGrid() # # $Revision: 1.54 $ $Date: 2019/04/21 11:42:27 $ # eval.im <- local({ eval.im <- function(expr, envir, harmonize=TRUE, warn=TRUE) { e <- as.expression(substitute(expr)) ## get names of all variables in the expression varnames <- all.vars(e) allnames <- all.names(e, unique=TRUE) funnames <- allnames[!(allnames %in% varnames)] if(length(varnames) == 0) stop("No variables in this expression") ## get the values of the variables if(missing(envir)) { envir <- parent.frame() # WAS: sys.parent() } else if(is.list(envir)) { envir <- list2env(envir, parent=parent.frame()) } vars <- mget(varnames, envir=envir, inherits=TRUE, ifnotfound=list(NULL)) funs <- mget(funnames, envir=envir, inherits=TRUE, ifnotfound=list(NULL)) ## WAS: vars <- lapply(as.list(varnames), get, envir=envir) ## WAS: funs <- lapply(as.list(funnames), get, envir=envir) ## ## find out which variables are images ims <- unlist(lapply(vars, is.im)) if(!any(ims)) stop("No images in this expression") images <- vars[ims] nimages <- length(images) ## test that the images are compatible if(!(do.call(compatible, unname(images)))) { whinge <- paste(if(nimages > 2) "some of" else NULL, "the images", commasep(sQuote(names(images))), if(!harmonize) "are" else "were", "not compatible") if(!harmonize) { stop(whinge, call.=FALSE) } else if(warn) { warning(whinge, call.=FALSE) } images <- do.call(harmonise.im, images) } ## trap a common error: using fv object as variable isfun <- unlist(lapply(vars, is.fv)) if(any(isfun)) stop("Cannot use objects of class fv as variables in eval.im") ## replace each image by its matrix of pixel values, and evaluate imagevalues <- lapply(images, getImValues) template <- images[[1L]] ## This bit has been repaired: vars[ims] <- imagevalues v <- eval(e, append(vars, funs)) ## ## reshape, etc result <- im(v, xcol=template$xcol, yrow=template$yrow, xrange=template$xrange, yrange=template$yrange, unitname=unitname(template)) return(result) } ## extract pixel values without destroying type information getImValues <- function(x) { v <- as.matrix(x) dim(v) <- NULL return(v) } eval.im }) compatible.im <- function(A, B, ..., tol=1e-6) { verifyclass(A, "im") if(missing(B)) return(TRUE) verifyclass(B, "im") if(!all(A$dim == B$dim)) return(FALSE) xdiscrep <- max(abs(A$xrange - B$xrange), abs(A$xstep - B$xstep), abs(A$xcol - B$xcol)) ydiscrep <- max(abs(A$yrange - B$yrange), abs(A$ystep - B$ystep), abs(A$yrow - B$yrow)) xok <- (xdiscrep < tol * min(A$xstep, B$xstep)) yok <- (ydiscrep < tol * min(A$ystep, B$ystep)) uok <- compatible.unitname(unitname(A), unitname(B)) if(!(xok && yok && uok)) return(FALSE) ## A and B are compatible if(length(list(...)) == 0) return(TRUE) ## recursion return(compatible.im(B, ..., tol=tol)) } ## force a list of images to be compatible harmonize.im <- harmonise.im <- function(...) { argz <- list(...) n <- length(argz) if(n < 2) return(argz) result <- vector(mode="list", length=n) isim <- unlist(lapply(argz, is.im)) if(!any(isim)) stop("No images supplied") imgs <- argz[isim] ## if any windows are present, extract bounding box iswin <- unlist(lapply(argz, is.owin)) bb0 <- if(!any(iswin)) NULL else do.call(boundingbox, unname(argz[iswin])) if(length(imgs) == 1L && is.null(bb0)) { ## only one 'true' image: use it as template. result[isim] <- imgs Wtemplate <- imgs[[1L]] } else { ## test for compatible units un <- lapply(imgs, unitname) uok <- unlist(lapply(un, compatible.unitname, y=un[[1L]])) if(!all(uok)) stop("Images have incompatible units of length") ## find the image with the highest resolution xsteps <- unlist(lapply(imgs, getElement, name="xstep")) which.finest <- which.min(xsteps) finest <- imgs[[which.finest]] ## get the bounding box bb <- do.call(boundingbox, lapply(unname(imgs), as.rectangle)) if(!is.null(bb0)) bb <- boundingbox(bb, bb0) ## determine new raster coordinates xcol <- prolongseq(finest$xcol, bb$xrange) yrow <- prolongseq(finest$yrow, bb$yrange) xy <- list(x=xcol, y=yrow) ## resample all images on new raster newimgs <- lapply(imgs, as.im, xy=xy) result[isim] <- newimgs Wtemplate <- newimgs[[which.finest]] } ## convert other data to images if(any(notim <- !isim)) result[notim] <- lapply(argz[notim], as.im, W=as.mask(Wtemplate)) names(result) <- names(argz) return(result) } ## Return just the corresponding template window commonGrid <- local({ ## auxiliary function gettype <- function(x) { if(is.im(x) || is.mask(x)) "raster" else if(is.owin(x) || is.ppp(x) || is.psp(x)) "spatial" else "none" } commonGrid <- function(...) { argz <- list(...) type <- unlist(lapply(argz, gettype)) israster <- (type == "raster") haswin <- (type != "none") if(any(israster)) { ## Get raster data rasterlist <- argz[israster] } else { ## No existing raster data - apply default resolution if(!any(haswin)) stop("No spatial data supplied") wins <- lapply(argz[haswin], as.owin) rasterlist <- lapply(wins, as.mask) } ## Find raster object with finest resolution if(length(rasterlist) == 1L) { ## only one raster object finest <- rasterlist[[1L]] } else { ## test for compatible units un <- lapply(rasterlist, unitname) uok <- unlist(lapply(un, compatible.unitname, y=un[[1L]])) if(!all(uok)) stop("Objects have incompatible units of length") ## find the image/mask with the highest resolution xsteps <- unlist(lapply(rasterlist, getElement, name="xstep")) which.finest <- which.min(xsteps) finest <- rasterlist[[which.finest]] } ## determine the bounding box bb <- do.call(boundingbox, lapply(unname(argz[haswin]), as.rectangle)) ## determine new raster coordinates xcol <- prolongseq(finest$xcol, bb$xrange) yrow <- prolongseq(finest$yrow, bb$yrange) xy <- list(x=xcol, y=yrow) ## generate template Wtemplate <- as.mask(bb, xy=xy) return(Wtemplate) } commonGrid }) im.apply <- function(X, FUN, ..., fun.handles.na=FALSE, check=TRUE) { if(!inherits(X, "imlist")) { stopifnot(is.list(X)) if(!all(sapply(X, is.im))) stop("All elements of X must be pixel images") } ## determine function to be applied fun <- if(is.character(FUN)) get(FUN, mode="function") else if(is.function(FUN)) FUN else stop("Unrecognised format for FUN") funcode <- match(list(fun), list(base::sum, base::mean, base::mean.default, stats::var, stats::sd), nomatch=0L) funtype <- c("general", "sum", "mean", "mean", "var", "sd")[funcode+1L] if(funcode != 0) na.rm <- resolve.1.default(list(na.rm=FALSE), list(...)) ## ensure images are compatible if(check && !do.call(compatible, unname(X))) X <- do.call(harmonise.im, X) template <- X[[1L]] d <- dim(template) ## extract numerical values and convert to matrix with one column per image vals <- sapply(X, getElement, name="v") ## apply to all pixels ? full <- fun.handles.na || !anyNA(vals) if(!full) { ## NA present ok <- complete.cases(vals) if(!any(ok)) { ## empty result return(as.im(NA, W=template)) } ## restrict to pixels where all data are non-NA vals <- vals[ok, , drop=FALSE] } n <- nrow(vals) ## calculate y <- switch(funtype, general = apply(vals, 1L, fun, ...), sum = rowSums(vals, na.rm=na.rm), mean = rowMeans(vals, na.rm = na.rm), sd = , var = { sumx <- rowSums(vals, na.rm = na.rm) sumx2 <- rowSums(vals^2, na.rm = na.rm) if(!anyNA(vals)) { m <- ncol(vals) v <- (sumx2 - sumx^2/m)/(m-1) } else { m <- rowSums(!is.na(vals)) v <- ifelse(m < 2, NA, (sumx2 - sumx^2/m)/(m-1)) } if(funtype == "var") v else sqrt(v) }) if(funtype == "general" && length(y) != n) stop("FUN should yield one value per pixel") if(!full) { ## put the NA's back (preserving type of 'y') yfull <- rep(y[1L], prod(d)) yfull[ok] <- y yfull[!ok] <- NA y <- yfull } ## pack up (preserving type of 'y') result <- im(y, xcol=template$xcol, yrow=template$yrow, xrange=template$xrange, yrange=template$yrange, unitname=template$unitname) return(result) } spatstat/R/transect.R0000644000176200001440000000517613360317101014270 0ustar liggesusers# # transect.R # # Line transects of pixel images # # $Revision: 1.6 $ $Date: 2013/03/15 01:28:06 $ # transect.im <- local({ specify.location <- function(loc, rect) { lname <- short.deparse(substitute(loc)) if(is.numeric(loc) && length(loc) == 2) return(list(x=loc[1], y=loc[2])) if(is.list(loc)) return(xy.coords(loc)) if(!(is.character(loc) && length(loc) == 1)) stop(paste("Unrecognised format for", sQuote(lname)), call.=FALSE) xr <- rect$xrange yr <- rect$yrange switch(loc, bottomleft = list(x=xr[1], y=yr[1]), bottom = list(x=mean(xr), y=yr[1]), bottomright = list(x=xr[2], y=yr[1]), right = list(x=xr[2], y=mean(yr)), topright = list(x=xr[2], y=yr[2]), top = list(x=mean(xr), y=yr[2]), topleft = list(x=xr[1], y=yr[2]), left = list(x=xr[1], y=mean(yr)), centre=, center = list(x=mean(xr), y=mean(yr)), stop(paste("Unrecognised location", sQuote(lname), "=", dQuote(loc)), call.=FALSE) ) } transect.im <- function(X, ..., from="bottomleft", to="topright", click=FALSE, add=FALSE) { Xname <- short.deparse(substitute(X)) Xname <- sensiblevarname(Xname, "X") stopifnot(is.im(X)) # determine transect position if(click) { # interactive if(!add) plot(X) from <- spatstatLocator(1) points(from) to <- spatstatLocator(1) points(to) segments(from$x, from$y, to$x, to$y) } else { # data defining a line segment R <- as.rectangle(X) from <- specify.location(from, R) to <- specify.location(to, R) } # create sample points along transect if(identical(from,to)) stop(paste(sQuote("from"), "and", sQuote("to"), "must be distinct points"), call.=FALSE) u <- seq(0,1,length=512) x <- from$x + u * (to$x - from$x) y <- from$y + u * (to$y - from$y) leng <- sqrt( (to$x - from$x)^2 + (to$y - from$y)^2) t <- u * leng # look up pixel values (may be NA) v <- X[list(x=x, y=y), drop=FALSE] # package into fv object df <- data.frame(t=t, v=v) colnames(df)[2] <- Xname fv(df, argu = "t", ylab = substitute(Xname(t), list(Xname=as.name(Xname))), valu=Xname, labl = c("t", "%s(t)"), desc = c("distance along transect", "pixel value of %s"), unitname = unitname(X), fname = Xname) } transect.im }) spatstat/R/objsurf.R0000644000176200001440000000751213333543255014126 0ustar liggesusers# # objsurf.R # # surface of the objective function for an M-estimator # # $Revision: 1.5 $ $Date: 2016/02/11 10:17:12 $ # objsurf <- function(x, ...) { UseMethod("objsurf") } objsurf.kppm <- objsurf.dppm <- function(x, ..., ngrid=32, ratio=1.5, verbose=TRUE) { Fit <- x$Fit switch(Fit$method, mincon = { result <- objsurf(Fit$mcfit, ..., ngrid=ngrid, ratio=ratio, verbose=verbose) }, clik = { optpar <- x$par objfun <- Fit$objfun objargs <- Fit$objargs result <- objsurfEngine(objfun, optpar, objargs, ..., ngrid=ngrid, ratio=ratio, verbose=verbose) }) return(result) } objsurf.minconfit <- function(x, ..., ngrid=32, ratio=1.5, verbose=TRUE) { optpar <- x$par.canon %orifnull% x$par objfun <- x$objfun objargs <- x$objargs dotargs <- x$dotargs objsurfEngine(objfun, optpar, objargs, ..., dotargs=dotargs, ngrid=ngrid, ratio=ratio, verbose=verbose) } objsurfEngine <- function(objfun, optpar, objargs, ..., dotargs=list(), objname="objective", ngrid=32, ratio=1.5, verbose=TRUE) { trap.extra.arguments(...) if(!is.function(objfun)) stop("Object is in an outdated format and needs to be re-fitted") npar <- length(optpar) if(npar != 2) stop("Only implemented for functions of 2 arguments") # create grid of parameter values ratio <- ensure2vector(ratio) ngrid <- ensure2vector(ngrid) stopifnot(all(ratio > 1)) xgrid <- seq(optpar[1]/ratio[1], optpar[1] * ratio[1], length=ngrid[1]) ygrid <- seq(optpar[2]/ratio[2], optpar[2] * ratio[2], length=ngrid[2]) pargrid <- expand.grid(xgrid, ygrid) colnames(pargrid) <- names(optpar) # evaluate if(verbose) cat(paste("Evaluating", nrow(pargrid), "function values...")) values <- do.call(apply, append(list(pargrid, 1, objfun, objargs=objargs), dotargs)) if(verbose) cat("Done.\n") result <- list(x=xgrid, y=ygrid, z=matrix(values, ngrid[1], ngrid[2])) attr(result, "optpar") <- optpar attr(result, "objname") <- "contrast" class(result) <- "objsurf" return(result) } print.objsurf <- function(x, ...) { cat("Objective function surface\n") optpar <- attr(x, "optpar") objname <- attr(x, "objname") nama <- names(optpar) cat("Parameter ranges:\n") cat(paste(paste0(nama[1], ":"), prange(range(x$x)), "\n")) cat(paste(paste0(nama[2], ":"), prange(range(x$y)), "\n")) cat(paste("Function value:", objname, "\n")) invisible(NULL) } image.objsurf <- plot.objsurf <- function(x, ...) { xname <- short.deparse(substitute(x)) optpar <- attr(x, "optpar") nama <- names(optpar) do.call(image, resolve.defaults(list(x=unclass(x)), list(...), list(xlab=nama[1], ylab=nama[2], main=xname))) abline(v=optpar[1], lty=3) abline(h=optpar[2], lty=3) invisible(NULL) } contour.objsurf <- function(x, ...) { xname <- short.deparse(substitute(x)) optpar <- attr(x, "optpar") nama <- names(optpar) do.call(contour, resolve.defaults(list(x=unclass(x)), list(...), list(xlab=nama[1], ylab=nama[2], main=xname))) abline(v=optpar[1], lty=3) abline(h=optpar[2], lty=3) invisible(NULL) } persp.objsurf <- function(x, ...) { xname <- short.deparse(substitute(x)) optpar <- attr(x, "optpar") objname <- attr(x, "objname") nama <- names(optpar) r <- do.call(persp, resolve.defaults(list(x=x$x, y=x$y, z=x$z), list(...), list(xlab=nama[1], ylab=nama[2], zlab=objname, main=xname))) invisible(r) } spatstat/R/dclftest.R0000644000176200001440000003417713547277144014303 0ustar liggesusers# # dclftest.R # # $Revision: 1.46 $ $Date: 2019/10/09 06:25:49 $ # # Monte Carlo tests for CSR (etc) # # clf.test <- function(...) { # .Deprecated("dclf.test", package="spatstat") # dclf.test(...) # } dclf.test <- function(X, ..., alternative=c("two.sided", "less", "greater"), rinterval=NULL, leaveout=1, scale=NULL, clamp=FALSE, interpolate=FALSE) { Xname <- short.deparse(substitute(X)) envelopeTest(X, ..., exponent=2, alternative=alternative, rinterval=rinterval, leaveout=leaveout, scale=scale, clamp=clamp, interpolate=interpolate, Xname=Xname) } mad.test <- function(X, ..., alternative=c("two.sided", "less", "greater"), rinterval=NULL, leaveout=1, scale=NULL, clamp=FALSE, interpolate=FALSE) { Xname <- short.deparse(substitute(X)) envelopeTest(X, ..., exponent=Inf, alternative=alternative, rinterval=rinterval, leaveout=leaveout, scale=scale, clamp=clamp, interpolate=interpolate, Xname=Xname) } ## measure deviation of summary function ## leaveout = 0: typically 'ref' is exact theoretical value ## Compute raw deviation. ## leaveout = 1: 'ref' is mean of simulations *and* observed. ## Use algebra to compute leave-one-out deviation. ## leaveout = 2: 'ref' is mean of simulations ## Use algebra to compute leave-two-out deviation. Deviation <- function(x, ref, leaveout, n, xi=x) { if(leaveout == 0) return(x-ref) if(leaveout == 1) return((x-ref) * (n+1)/n) jackmean <- (n * ref - xi)/(n-1) return(x - jackmean) } ## Evaluate signed or absolute deviation, ## taking account of alternative hypothesis and possible scaling ## (Large positive values always favorable to alternative) RelevantDeviation <- local({ positivepart <- function(x) { d <- dim(x) y <- pmax(0, x) if(!is.null(d)) y <- matrix(y, d[1L], d[2L]) return(y) } negativepart <- function(x) positivepart(-x) RelevantDeviation <- function(x, alternative, clamp=FALSE, scaling=NULL) { if(!is.null(scaling)) x <- x/scaling switch(alternative, two.sided = abs(x), less = if(clamp) negativepart(x) else -x, greater = if(clamp) positivepart(x) else x) } RelevantDeviation }) ## workhorse function envelopeTest <- function(X, ..., exponent=1, alternative=c("two.sided", "less", "greater"), rinterval=NULL, leaveout=1, scale=NULL, clamp=FALSE, tie.rule=c("randomise","mean"), interpolate=FALSE, save.interpolant = TRUE, save.envelope = savefuns || savepatterns, savefuns = FALSE, savepatterns = FALSE, Xname=NULL, badXfatal=TRUE, verbose=TRUE) { if(is.null(Xname)) Xname <- short.deparse(substitute(X)) tie.rule <- match.arg(tie.rule) alternative <- match.arg(alternative) if(!(leaveout %in% 0:2)) stop("Argument leaveout should equal 0, 1 or 2") force(save.envelope) check.1.real(exponent) explain.ifnot(exponent >= 0) deviationtype <- switch(alternative, two.sided = "absolute", greater = if(clamp) "positive" else "signed", less = if(clamp) "negative" else "signed") deviationblurb <- paste(deviationtype, "deviation") ## compute or extract simulated functions X <- envelope(X, ..., savefuns=TRUE, savepatterns=savepatterns, Yname=Xname, verbose=verbose) Y <- attr(X, "simfuns") ## extract values r <- with(X, .x) obs <- with(X, .y) sim <- as.matrix(as.data.frame(Y))[, -1L] nsim <- ncol(sim) nr <- length(r) ## choose function as reference has.theo <- ("theo" %in% names(X)) use.theo <- identical(attr(X, "einfo")$use.theory, TRUE) if(use.theo && !has.theo) warning("No theoretical function available; use.theory ignored") if(use.theo && has.theo) { theo.used <- TRUE reference <- with(X, theo) leaveout <- 0 } else { theo.used <- FALSE if(leaveout == 2) { ## use sample mean of simulations only reference <- apply(sim, 1L, mean, na.rm=TRUE) } else { ## use sample mean of simulations *and* observed reference <- apply(cbind(sim, obs), 1L, mean, na.rm=TRUE) } } ## determine interval of r values for computation if(is.null(rinterval)) { rinterval <- range(r) ok <- rep(TRUE, nr) first <- 1L } else { #' argument 'rinterval' specified check.range(rinterval) if(max(r) < rinterval[2L]) { oldrinterval <- rinterval rinterval <- intersect.ranges(rinterval, range(r), fatal=FALSE) if(is.null(rinterval)) stop(paste("The specified rinterval", prange(oldrinterval), "has empty intersection", "with the range of r values", prange(range(r)), "computed by the summary function"), call.=FALSE) if(verbose) warning(paste("The interval", prange(oldrinterval), "is too large for the available data;", "it has been trimmed to", prange(rinterval))) } ok <- (rinterval[1L] <= r & r <= rinterval[2L]) first <- min(which(ok)) } #' check for valid function values, and possibly adjust rinterval #' observed function values badr <- !is.finite(obs) if(badXfatal && all(badr)) stop("Observed function values are all infinite, NA or NaN", call.=FALSE) if(any(badr[ok])) { if(badr[first] && !any(badr[ok][-1L])) { ## ditch smallest r value (usually zero) ok[first] <- FALSE first <- first + 1L rmin <- r[first] if(verbose) warning(paste("Some function values were infinite, NA or NaN", "at distance r =", paste0(rinterval[1L], ";"), "lower limit of r interval was reset to", rmin, summary(unitname(X))$plural)) rinterval[1] <- rmin } else { ## problem rbadmax <- paste(max(r[badr]), summary(unitname(X))$plural) warning(paste("Some function values were infinite, NA or NaN", "at distances r up to", paste0(rbadmax, "."), "Consider specifying a shorter", sQuote("rinterval"))) } } #' simulated function values badsim <- matcolall(!is.finite(sim[ok,,drop=FALSE])) if(all(badsim)) stop(paste("Simulated function values are all infinite, NA or NaN.", "Check whether simulated patterns are empty"), call.=FALSE) if(any(badsim)) { warning(paste("In", sum(badsim), "out of", length(badsim), "simulations,", "the simulated function values were infinite, NA or NaN", "at every distance r.", "Check whether some simulated patterns are empty"), call.=FALSE) } #' finally trim data rok <- r[ok] obs <- obs[ok] sim <- sim[ok, ] reference <- reference[ok] nr <- sum(ok) if(nr == 0) { ## rinterval is very short: pick nearest r value ok <- which.min(abs(r - mean(rinterval))) nr <- 1L } ## determine rescaling if any if(is.null(scale)) { scaling <- NULL } else if(is.function(scale)) { scaling <- scale(rok) sname <- "scale(r)" ans <- check.nvector(scaling, nr, things="values of r", fatal=FALSE, vname=sname) if(!ans) stop(attr(ans, "whinge"), call.=FALSE) if(any(bad <- (scaling <= 0))) { ## issue a warning unless this only happens at r=0 if(any(bad[rok > 0])) warning(paste("Some values of", sname, "were negative or zero:", "scale was reset to 1 for these values"), call.=FALSE) scaling[bad] <- 1 } } else stop("Argument scale should be a function") ## compute deviations rawdevDat <- Deviation(obs, reference, leaveout, nsim, sim[,1L]) rawdevSim <- Deviation(sim, reference, leaveout, nsim) ## evaluate signed/absolute deviation relevant to alternative ddat <- RelevantDeviation(rawdevDat, alternative, clamp, scaling) dsim <- RelevantDeviation(rawdevSim, alternative, clamp, scaling) if(!all(is.finite(ddat))) warning("Some deviation values were Inf, NA or NaN") if(!all(is.finite(dsim))) warning("Some simulated deviations were Inf, NA or NaN") ## compute test statistic if(is.infinite(exponent)) { ## MAD devdata <- max(ddat,na.rm=TRUE) devsim <- apply(dsim, 2, max, na.rm=TRUE) names(devdata) <- "mad" testname <- paste("Maximum", deviationblurb, "test") statisticblurb <- paste("Maximum", deviationblurb) } else { L <- if(nr > 1) diff(rinterval) else 1 if(exponent == 2) { ## Cramer-von Mises ddat2 <- if(clamp) ddat^2 else (sign(ddat) * ddat^2) dsim2 <- if(clamp) dsim^2 else (sign(dsim) * dsim^2) devdata <- L * mean(ddat2, na.rm=TRUE) devsim <- L * .colMeans(dsim2, nr, nsim, na.rm=TRUE) names(devdata) <- "u" testname <- "Diggle-Cressie-Loosmore-Ford test" statisticblurb <- paste("Integral of squared", deviationblurb) } else if(exponent == 1) { ## integral absolute deviation devdata <- L * mean(ddat, na.rm=TRUE) devsim <- L * .colMeans(dsim, nr, nsim, na.rm=TRUE) names(devdata) <- "L1" testname <- paste("Integral", deviationblurb, "test") statisticblurb <- paste("Integral of", deviationblurb) } else { ## general p if(clamp) { ddatp <- ddat^exponent dsimp <- dsim^exponent } else { ddatp <- sign(ddat) * (abs(ddat)^exponent) dsimp <- sign(dsim) * (abs(dsim)^exponent) } devdata <- L * mean(ddatp, na.rm=TRUE) devsim <- L * .colMeans(dsimp, nr, nsim, na.rm=TRUE) names(devdata) <- "Lp" testname <- paste("Integrated", ordinal(exponent), "Power Deviation test") statisticblurb <- paste("Integral of", ordinal(exponent), "power of", deviationblurb) } } if(!interpolate) { ## standard Monte Carlo test ## compute rank and p-value datarank <- sum(devdata < devsim, na.rm=TRUE) + 1 nties <- sum(devdata == devsim, na.rm=TRUE) if(nties > 0) { tierank <- switch(tie.rule, mean = nties/2, randomise = sample(1:nties, 1L)) datarank <- datarank + tierank if(verbose) message("Ties were encountered") } pvalue <- datarank/(nsim+1) ## bookkeeping statistic <- data.frame(devdata, rank=datarank) colnames(statistic)[1L] <- names(devdata) } else { ## Dao-Genton style interpolation fhat <- density(devsim, na.rm=TRUE) pvalue <- with(fhat, { if(max(x) <= devdata) 0 else mean(y[x >= devdata]) * (max(x) - devdata) }) statistic <- data.frame(devdata) colnames(statistic)[1L] <- names(devdata) nties <- 0 } e <- attr(X, "einfo") nullmodel <- if(identical(e$csr, TRUE)) "CSR" else if(!is.null(e$simtype)) { switch(e$simtype, csr = "CSR", rmh = paste("fitted", if(identical(e$pois, TRUE)) "Poisson" else "Gibbs", "model"), kppm = "fitted cluster model", expr = "model simulated by evaluating expression", func = "model simulated by evaluating function", list = "model simulated by drawing patterns from a list", "unrecognised model") } else "unrecognised model" fname <- deparse(attr(X, "ylab")) uname <- with(summary(unitname(X)), if(!vanilla) paste(plural, explain) else NULL) testtype <- paste0(if(interpolate) "Interpolated " else NULL, "Monte Carlo") scaleblurb <- if(is.null(scale)) NULL else paste("Scale function:", paste(deparse(scale), collapse=" ")) refblurb <- if(theo.used) "theoretical" else "sample mean" leaveblurb <- if(leaveout == 0) paste("observed minus", refblurb) else if(leaveout == 1) "leave-one-out" else "leave-two-out" testname <- c(paste(testname, "of", nullmodel), paste(testtype, "test based on", nsim, "simulations", e$constraints), paste("Summary function:", fname), paste("Reference function:", refblurb), paste("Alternative:", alternative), paste("Interval of distance values:", prange(rinterval), uname), scaleblurb, paste("Test statistic:", statisticblurb), paste("Deviation =", leaveblurb) ) result <- structure(list(statistic = statistic, p.value = pvalue, method = testname, data.name = e$Yname), class="htest") attr(result, "rinterval") <- rinterval if(save.interpolant && interpolate) attr(result, "density") <- fhat if(save.envelope) { result <- hasenvelope(result, X) attr(result, "statistics") <- list(data=devdata, sim=devsim) attr(result, "info") <- list(exponent=exponent, alternative=alternative, nties=nties, leaveout=leaveout, interpolate=interpolate, scale=scale, clamp=clamp, tie.rule=tie.rule, use.theo=use.theo) } return(result) } spatstat/R/polartess.R0000644000176200001440000000637413443161035014467 0ustar liggesusers#' #' polartess.R #' #' Tessellation using polar coordinates #' #' $Revision: 1.4 $ $Date: 2019/03/16 05:36:40 $ polartess <- function(W, ..., nradial=NULL, nangular=NULL, radii=NULL, angles=NULL, origin=NULL, sep="x") { trap.extra.arguments(...) W <- as.owin(W) if(!is.null(origin)) { origin <- interpretAsOrigin(origin, W) W <- shift(W, -origin) } V <- vertices(Frame(W)) rmax <- sqrt(max(V$x^2 + V$y^2)) if(!is.null(radii)) { if(!is.null(nradial)) warning("nradial ignored because radii were specified") radii <- as.numeric(radii) stopifnot(length(radii) >= 2) stopifnot(all(radii >= 0)) if(sum(is.infinite(radii)) > 1 || !all(diff(radii) > 0)) stop("radii should be increasing") radnames <- paste(signif(radii, 4)) radii[is.infinite(radii)] <- 1.01 * rmax rmax <- max(radii) nradial <- length(radii) - 1L } else if(!is.null(nradial)) { check.1.integer(nradial) radii <- seq(0, rmax, length.out=nradial+1L) radnames <- paste(signif(radii, 4)) } nradii <- length(radii) if(!is.null(angles)) { if(!is.null(nangular)) warning("nangular ignored because angles were specified") angles <- as.numeric(angles) stopifnot(length(angles) >= 2) if(!all(diff(angles) > 0)) stop("angles should be increasing") if(diff(range(angles)) > 2 * pi + .Machine$double.eps) stop("The range of angles must not exceed 2 * pi") nangular <- length(angles) - 1L } else if(!is.null(nangular)) { check.1.integer(nangular) angles <- seq(0, 2*pi, length.out=nangular+1L) } nangles <- length(angles) #' build tessellations result <- as.tess(W) DD <- Dmax <- disc(rmax) if(!is.null(radii)) { rmin <- radii[1] if(rmin > 0) DD <- setminus.owin(DD, disc(rmin)) Dlist <- lapply(radii[radii > 0], disc) if(rmin == 0) Dlist <- append(list(NULL), Dlist) Tlist <- list() for(i in 1:nradial) Tlist <- append(Tlist, list(setminus.owin(Dlist[[i+1]], Dlist[[i]]))) names(Tlist) <- paste0("[", radnames[-nradii], ", ", radnames[-1L], c(rep(")", nradial-1L), "]")) Rtess <- tess(tiles=Tlist, window=DD) result <- intersect.tess(result, Rtess, sep=sep) } if(!is.null(angles)) { Tlist <- list() aa <- seq(min(angles), max(angles), length.out=256) aa <- sort(c(aa, angles)) xx <- rmax * cos(aa) yy <- rmax * sin(aa) for(i in 1:nangular) { jj <- (aa >= angles[i]) & (aa <= angles[i+1L]) Tlist[[i]] <- owin(poly=list(x=c(0, xx[jj]), y=c(0, yy[jj]))) } angnames <- lapply(angles/pi, simplenumber, unit="pi", multiply="") unknown <- sapply(angnames, is.null) angnames[unknown] <- paste(signif((angles/pi)[unknown], 4), "pi") angnames <- unlist(angnames) names(Tlist) <- paste0("[", angnames[-nangles], ", ", angnames[-1L], c(rep(")", nangular-1L), "]")) gap <- abs(1 - diff(range(angles))/(2*pi)) DDD <- if(gap < 0.01) Dmax else owin(poly=list(x=c(0, xx), y=c(0,yy))) Atess <- tess(tiles=Tlist, window=DDD) result <- intersect.tess(result, Atess, sep=sep) } if(!is.null(origin)) result <- shift(result, vec=origin) return(result) } spatstat/R/texture.R0000644000176200001440000003041413333543255014151 0ustar liggesusers## ## texture.R ## ## Texture plots and texture maps ## ## $Revision: 1.15 $ $Date: 2016/02/16 01:39:12 $ ### .................. basic graphics ............................. ## put hatching in a window add.texture <- function(W, texture=4, spacing=NULL, ...) { if(is.data.frame(texture)) { ## texture = f(x) where f is a texturemap out <- do.call(add.texture, resolve.defaults(list(W=W, spacing=spacing), list(...), as.list(texture))) return(out) } ## texture should be an integer stopifnot(is.owin(W)) stopifnot(texture %in% 1:8) if(is.null(spacing)) { spacing <- diameter(as.rectangle(W))/50 } else { check.1.real(spacing) stopifnot(spacing > 0) } P <- L <- NULL switch(texture, { ## texture 1: graveyard P <- rsyst(W, dx=3*spacing) }, { ## texture 2: vertical lines L <- rlinegrid(90, spacing, W)[W] }, { ## texture 3: horizontal lines L <- rlinegrid(0, spacing, W)[W] }, { ## texture 4: forward slashes L <- rlinegrid(45, spacing, W)[W] }, { ## texture 5: back slashes L <- rlinegrid(135, spacing, W)[W] }, { ## texture 6: horiz/vert grid L0 <- rlinegrid(0, spacing, W)[W] L90 <- rlinegrid(90, spacing, W)[W] L <- superimpose(L0, L90, W=W, check=FALSE) }, { ## texture 7: diagonal grid L45 <- rlinegrid(45, spacing, W)[W] L135 <- rlinegrid(135, spacing, W)[W] L <- superimpose(L45, L135, W=W, check=FALSE) }, { ## texture 8: hexagons H <- hextess(W, spacing, offset=runifpoint(1, W)) H <- intersect.tess(H, W) do.call.matched(plot.tess, resolve.defaults(list(x=H, add=TRUE), list(...))) }) if(!is.null(P)) do.call.matched(plot.ppp, resolve.defaults(list(x=P, add=TRUE), list(...), list(chars=3, cex=0.2)), extrargs=c("lwd", "col", "cols", "pch")) if(!is.null(L)) do.call.matched(plot.psp, resolve.defaults(list(x=L, add=TRUE), list(...)), extrargs=c("lwd","lty","col")) return(invisible(NULL)) } ## .................. texture maps ................................ ## create a texture map texturemap <- function(inputs, textures, ...) { argh <- list(...) if(length(argh) > 0) { isnul <- unlist(lapply(argh, is.null)) argh <- argh[!isnul] } if(missing(textures) || is.null(textures)) textures <- seq_along(inputs) df <- do.call(data.frame, append(list(input=inputs, texture=textures), argh)) f <- function(x) { df[match(x, df$input), -1, drop=FALSE] } class(f) <- c("texturemap", class(f)) attr(f, "df") <- df return(f) } print.texturemap <- function(x, ...) { cat("Texture map\n") print(attr(x, "df")) return(invisible(NULL)) } ## plot a texture map plot.texturemap <- local({ ## recognised additional arguments to and axis() axisparams <- c("cex", "cex.axis", "cex.lab", "col.axis", "col.lab", "font.axis", "font.lab", "las", "mgp", "xaxp", "yaxp", "tck", "tcl", "xpd") # rules to determine the map dimensions when one dimension is given widthrule <- function(heightrange, separate, n, gap) { if(separate) 1 else diff(heightrange)/10 } heightrule <- function(widthrange, separate, n, gap) { (if(separate) (n + (n-1)*gap) else 10) * diff(widthrange) } plot.texturemap <- function(x, ..., main, xlim=NULL, ylim=NULL, vertical=FALSE, axis=TRUE, labelmap=NULL, gap=0.25, spacing=NULL, add=FALSE) { if(missing(main)) main <- short.deparse(substitute(x)) df <- attr(x, "df") # textures <- df$textures n <- nrow(df) check.1.real(gap, "In plot.texturemap") explain.ifnot(gap >= 0, "In plot.texturemap") separate <- (gap > 0) if(is.null(labelmap)) { labelmap <- function(x) x } else stopifnot(is.function(labelmap)) ## determine rectangular window for display rr <- c(0, n + (n-1)*gap) if(is.null(xlim) && is.null(ylim)) { u <- widthrule(rr, separate, n, gap) if(!vertical) { xlim <- rr ylim <- c(0,u) } else { xlim <- c(0,u) ylim <- rr } } else if(is.null(ylim)) { if(!vertical) ylim <- c(0, widthrule(xlim, separate, n, gap)) else ylim <- c(0, heightrule(xlim, separate, n, gap)) } else if(is.null(xlim)) { if(!vertical) xlim <- c(0, heightrule(ylim, separate, n, gap)) else xlim <- c(0, widthrule(ylim, separate, n, gap)) } width <- diff(xlim) height <- diff(ylim) ## determine boxes to be filled with textures, if(vertical) { boxheight <- min(width, height/(n + (n-1) * gap)) vgap <- (height - n * boxheight)/(n-1) boxes <- list() for(i in 1:n) boxes[[i]] <- owin(xlim, ylim[1] + c(i-1, i) * boxheight + (i-1) * vgap) } else { boxwidth <- min(height, width/(n + (n-1) * gap)) hgap <- (width - n * boxwidth)/(n-1) boxes <- list() for(i in 1:n) boxes[[i]] <- owin(xlim[1] + c(i-1, i) * boxwidth + (i-1) * hgap, ylim) } boxsize <- shortside(boxes[[1]]) if(is.null(spacing)) spacing <- 0.1 * boxsize # .......... initialise plot ............................... if(!add) do.call.matched(plot.default, resolve.defaults(list(x=xlim, y=ylim, type="n", main=main, axes=FALSE, xlab="", ylab="", asp=1.0), list(...))) ## ................ plot texture blocks ................. for(i in 1:n) { dfi <- df[i,,drop=FALSE] add.texture(W=boxes[[i]], texture=dfi, ..., spacing=spacing) plot(boxes[[i]], add=TRUE) } if(axis) { # ................. draw annotation .................. la <- paste(labelmap(df$input)) if(!vertical) { ## add horizontal axis/annotation at <- lapply(lapply(boxes, centroid.owin), "getElement", name="x") # default axis position is below the ribbon (side=1) sidecode <- resolve.1.default("side", list(...), list(side=1)) if(!(sidecode %in% c(1,3))) warning(paste("side =", sidecode, "is not consistent with horizontal orientation")) pos <- c(ylim[1], xlim[1], ylim[2], xlim[2])[sidecode] # don't draw axis lines if plotting separate blocks lwd0 <- if(separate) 0 else 1 # draw axis do.call.matched(graphics::axis, resolve.defaults(list(...), list(side = 1, pos = pos, at = at), list(labels=la, lwd=lwd0)), extrargs=axisparams) } else { ## add vertical axis at <- lapply(lapply(boxes, centroid.owin), "getElement", name="y") # default axis position is to the right of ribbon (side=4) sidecode <- resolve.1.default("side", list(...), list(side=4)) if(!(sidecode %in% c(2,4))) warning(paste("side =", sidecode, "is not consistent with vertical orientation")) pos <- c(ylim[1], xlim[1], ylim[2], xlim[2])[sidecode] # don't draw axis lines if plotting separate blocks lwd0 <- if(separate) 0 else 1 # draw labels horizontally if plotting separate blocks las0 <- if(separate) 1 else 0 # draw axis do.call.matched(graphics::axis, resolve.defaults(list(...), list(side=4, pos=pos, at=at), list(labels=la, lwd=lwd0, las=las0)), extrargs=axisparams) } } invisible(NULL) } plot.texturemap }) ## plot a pixel image using textures textureplot <- local({ textureplot <- function(x, ..., main, add=FALSE, clipwin=NULL, do.plot=TRUE, border=NULL, col=NULL, lwd=NULL, lty=NULL, spacing=NULL, textures=1:8, legend=TRUE, leg.side=c("right", "left", "bottom", "top"), legsep=0.1, legwid=0.2) { if(missing(main)) main <- short.deparse(substitute(x)) if(!(is.im(x) || is.tess(x))) { x <- try(as.tess(x), silent=TRUE) if(inherits(x, "try-error")) x <- try(as.im(x), silent=TRUE) if(inherits(x, "try-error")) stop("x must be a pixel image or a tessellation", call.=FALSE) } leg.side <- match.arg(leg.side) if(!is.null(clipwin)) x <- x[clipwin, drop=FALSE] if(is.im(x)) { if(x$type != "factor") x <- eval.im(factor(x)) levX <- levels(x) } else { tilX <- tiles(x) levX <- names(tilX) } n <- length(levX) if(n > 8) stop("Too many factor levels or tiles: maximum is 8") ## determine texture map if(inherits(textures, "texturemap")) { tmap <- textures } else { stopifnot(all(textures %in% 1:8)) stopifnot(length(textures) >= n) mono <- spatstat.options("monochrome") col <- enforcelength(col, n, if(mono) 1 else 1:8) lwd <- if(is.null(lwd)) NULL else enforcelength(lwd, n, 1) lty <- if(is.null(lty)) NULL else enforcelength(lwd, n, 1) tmap <- texturemap(inputs=levX, textures=textures[1:n], col=col, lwd=lwd, lty=lty) } ## determine plot region bb <- as.rectangle(x) if(!legend) { bb.all <- bb } else { Size <- max(sidelengths(bb)) bb.leg <- switch(leg.side, right={ ## legend to right of image owin(bb$xrange[2] + c(legsep, legsep+legwid) * Size, bb$yrange) }, left={ ## legend to left of image owin(bb$xrange[1] - c(legsep+legwid, legsep) * Size, bb$yrange) }, top={ ## legend above image owin(bb$xrange, bb$yrange[2] + c(legsep, legsep+legwid) * Size) }, bottom={ ## legend below image owin(bb$xrange, bb$yrange[1] - c(legsep+legwid, legsep) * Size) }) iside <- match(leg.side, c("bottom", "left", "top", "right")) bb.all <- boundingbox(bb.leg, bb) } ## result <- tmap attr(result, "bbox") <- bb ## if(do.plot) { ## Plot textures if(!add) { plot(bb.all, type="n", main="") fakemaintitle(bb, main, ...) } if(is.null(spacing)) spacing <- diameter(as.rectangle(x))/50 areas <- if(is.im(x)) table(x$v) else tile.areas(x) for(i in which(areas > 0)) { Zi <- if(is.tess(x)) tilX[[i]] else levelset(x, levX[i], "==") Zi <- as.polygonal(Zi) if(is.null(border) || !is.na(border)) plot(Zi, add=TRUE, border=border) add.texture(Zi, texture=tmap(levX[i]), spacing=spacing, ...) } vertical <- leg.side %in% c("left", "right") if(legend) do.call(plot.texturemap, resolve.defaults(list(x=tmap, add=TRUE, vertical=vertical, side=iside, xlim=bb.leg$xrange, ylim=bb.leg$yrange, spacing=spacing), list(...))) } return(invisible(result)) } enforcelength <- function(x, n, x0) { if(is.null(x)) x <- x0 if(length(x) < n) x <- rep(x, n) return(x[1:n]) } textureplot }) spatstat/R/cut.ppp.R0000644000176200001440000000243213333543254014040 0ustar liggesusers# # cut.ppp.R # # cut method for ppp objects # # $Revision: 1.15 $ $Date: 2016/10/26 09:29:57 $ # cut.ppp <- function(x, z=marks(x), ...) { x <- as.ppp(x) if(missing(z) || is.null(z)) { z <- marks(x, dfok=TRUE) if(is.null(z)) stop("x has no marks to cut") } if(is.character(z)) { if(length(z) == npoints(x)) { # interpret as a factor z <- factor(z) } else if((length(z) == 1L) && (z %in% colnames(df <- as.data.frame(x)))) { # interpret as the name of a column of marks or coordinates z <- df[, z] } else stop("format of argument z not understood") } if(is.factor(z) || is.vector(z)) { stopifnot(length(z) == npoints(x)) g <- if(is.factor(z)) z else if(is.numeric(z)) cut(z, ...) else factor(z) marks(x) <- g return(x) } if(is.data.frame(z) || is.matrix(z)) { stopifnot(nrow(z) == npoints(x)) # take first column z <- z[,1L] g <- if(is.numeric(z)) cut(z, ...) else factor(z) marks(x) <- g return(x) } if(is.im(z)) return(cut(x, z[x, drop=FALSE], ...)) if(is.owin(z)) { marks(x) <- factor(inside.owin(x$x, x$y, z), levels=c(FALSE, TRUE)) return(x) } if(is.tess(z)) { marks(x) <- tileindex(x$x, x$y, z) return(x) } stop("Format of z not understood") } spatstat/R/layered.R0000644000176200001440000002650713333543255014106 0ustar liggesusers# # layered.R # # Simple mechanism for layered plotting # # $Revision: 1.39 $ $Date: 2017/06/05 10:31:58 $ # layered <- function(..., plotargs=NULL, LayerList=NULL) { argh <- list(...) if(length(argh) > 0 && !is.null(LayerList)) stop("LayerList is incompatible with other arguments") out <- if(!is.null(LayerList)) LayerList else argh n <- length(out) if(sum(nzchar(names(out))) != n) names(out) <- paste("Layer", seq_len(n)) if(is.null(plotargs)) { plotargs <- rep.int(list(list()), n) } else { if(!is.list(plotargs)) stop("plotargs should be a list of lists") if(!all(unlist(lapply(plotargs, is.list)))) plotargs <- list(plotargs) np <- length(plotargs) if(np == 1) plotargs <- rep(plotargs, n) else if(np != n) stop("plotargs should have one component for each element of the list") } names(plotargs) <- names(out) attr(out, "plotargs") <- plotargs class(out) <- c("layered", class(out)) return(out) } print.layered <- function(x, ...) { splat("Layered object") if(length(x) == 0) splat("(no entries)") for(i in seq_along(x)) { cat(paste("\n", names(x)[i], ":\n", sep="")) print(x[[i]]) } pl <- layerplotargs(x) hasplot <- (lengths(pl) > 0) if(any(hasplot)) splat("Includes plot arguments for", commasep(names(pl)[hasplot])) invisible(NULL) } plot.layered <- function(x, ..., which=NULL, plotargs=NULL, add=FALSE, show.all=!add, main=NULL, do.plot=TRUE) { if(is.null(main)) main <- short.deparse(substitute(x)) n <- length(x) if(!is.null(plotargs)) { np <- length(plotargs) if(!(is.list(plotargs) && all(unlist(lapply(plotargs, is.list))))) stop("plotargs should be a list of lists") } ## select layers if(!is.null(which)) { x <- x[which] nw <- length(x) if(!is.null(plotargs)) { if(np == n) plotargs <- plotargs[which] else if(np == 1) plotargs <- rep(plotargs, nw) else if(np != nw) stop("plotargs should have one component for each layer to be plotted") } n <- nw } else if(!is.null(plotargs)) { if(np == 1) plotargs <- rep(plotargs, n) else if(np != n) stop("plotargs should have one component for each layer") } ## remove null layers if(any(isnul <- unlist(lapply(x, is.null)))) { x <- x[!isnul] if(!is.null(plotargs)) plotargs <- plotargs[!isnul] n <- length(x) } ## anything to plot? if(n == 0) return(invisible(NULL)) ## Merge plotting arguments xplotargs <- layerplotargs(x) if(is.null(plotargs)) { plotargs <- xplotargs } else if(length(xplotargs) > 0) { for(i in 1:n) plotargs[[i]] <- resolve.defaults(plotargs[[i]], xplotargs[[i]]) } ## Determine bounding box a <- plotEachLayer(x, ..., plotargs=plotargs, add=add, show.all=show.all, do.plot=FALSE) if(!do.plot) return(a) bb <- as.rectangle(as.owin(a)) ## Start plotting if(!add && !is.null(bb)) { ## initialise new plot using bounding box pt <- prepareTitle(main) plot(bb, type="n", main=pt$blank) add <- TRUE } # plot the layers out <- plotEachLayer(x, ..., main=main, plotargs=plotargs, add=add, show.all=show.all, do.plot=TRUE) return(invisible(out)) } plotEachLayer <- function(x, ..., main, plotargs, add, show.all, do.plot=TRUE) { main.given <- !missing(main) ## do.plot=TRUE => plot the layers ## do.plot=FALSE => determine bounding boxes out <- boxes <- list() nama <- names(x) firstlayer <- TRUE for(i in seq_along(x)) { xi <- x[[i]] if(length(xi) == 0) { # null layer - no plotting out[[i]] <- boxes[[i]] <- NULL } else { ## plot layer i on top of previous layers if any. ## By default, ## - show all graphic elements of the first component only; ## - show title 'firstmain' on first component; ## - do not show any component names. add.i <- add || !firstlayer if(main.given) { main.i <- if(firstlayer) main else "" } else { show.all.i <- resolve.1.default(list(show.all=FALSE), list(...), plotargs[[i]]) main.i <- if(show.all.i) nama[i] else "" } dflt <- list(main=main.i, show.all=show.all && firstlayer) pla.i <- plotargs[[i]] defaultplot <- !(".plot" %in% names(pla.i)) ## plot layer i, or just determine bounding box if(defaultplot && inherits(xi, c("ppp", "psp", "owin", "lpp", "linnet", "im", "msr", "layered"))) { ## plot method for 'xi' has argument 'do.plot'. mplf <- if(inherits(xi, c("ppp", "lpp"))) list(multiplot=FALSE) else list() out[[i]] <- outi <- do.call(plot, resolve.defaults(list(x=xi, add=add.i, do.plot=do.plot), list(...), mplf, pla.i, dflt)) boxes[[i]] <- as.rectangle(as.owin(outi)) } else { ## plot method for 'xi' does not have argument 'do.plot' if(do.plot) { if(defaultplot) { plotfun <- "plot" } else { plotfun <- pla.i[[".plot"]] pla.i <- pla.i[names(pla.i) != ".plot"] } out[[i]] <- outi <- do.call(plotfun, resolve.defaults(list(x=xi, add=add.i), list(...), pla.i, dflt)) } ## convert layer i to box boxi <- try(as.rectangle(xi), silent=TRUE) boxes[[i]] <- if(!inherits(boxi, "try-error")) boxi else NULL } firstlayer <- FALSE } } ## one box to bound them all if(!all(unlist(lapply(boxes, is.null)))) attr(out, "bbox") <- do.call(boundingbox, boxes) return(out) } "[.layered" <- function(x, i, j, drop=FALSE, ...) { i.given <- !missing(i) && !is.null(i) j.given <- !missing(j) && !is.null(j) if(!i.given && !j.given) return(x) p <- attr(x, "plotargs") x <- unclass(x) nx <- length(x) if(i.given) { if(is.owin(i)) { #' spatial window subset nonemp <- (lengths(x) != 0) x[nonemp] <- lapply(x[nonemp], "[", i=i, ...) } else { #' vector subset index x <- x[i] p <- p[i] nx <- length(x) } } if(j.given) { nonemp <- (lengths(x) != 0) x[nonemp] <- lapply(x[nonemp], "[", i=j, ...) } if(drop && nx == 1) return(x[[1L]]) y <- layered(LayerList=x, plotargs=p) return(y) } "[[<-.layered" <- function(x, i, value) { x[i] <- if(!is.null(value)) list(value) else NULL return(x) } "[<-.layered" <- function(x, i, value) { p <- layerplotargs(x) ## invoke list method y <- x class(y) <- "list" y[i] <- value # make it a 'layered' object too class(y) <- c("layered", class(y)) # update names and plotargs if(any(blank <- !nzchar(names(y)))) { names(y)[blank] <- paste("Layer", which(blank)) pnew <- rep(list(list()), length(y)) names(pnew) <- names(y) m <- match(names(y), names(x)) mok <- !is.na(m) pnew[mok] <- p[m[mok]] layerplotargs(y) <- pnew } else layerplotargs(y) <- layerplotargs(x)[names(y)] return(y) } layerplotargs <- function(L) { stopifnot(inherits(L, "layered")) attr(L, "plotargs") } "layerplotargs<-" <- function(L, value) { if(!inherits(L, "layered")) L <- layered(L) if(!is.list(value)) stop("Replacement value should be a list, or a list-of-lists") n <- length(L) if(!all(unlist(lapply(value, is.list)))) value <- unname(rep(list(value), n)) if(length(value) != n) { if(length(value) == 1) value <- unname(rep(value, n)) else stop("Replacement value is wrong length") } if(is.null(names(value))) names(value) <- names(L) else if(!identical(names(value), names(L))) stop("Mismatch in names of list elements") attr(L, "plotargs") <- value return(L) } applytolayers <- function(L, FUN, ...) { # Apply FUN to each **non-null** layer, # preserving the plot arguments pla <- layerplotargs(L) if(length(L) > 0) { ok <- !unlist(lapply(L, is.null)) L[ok] <- lapply(L[ok], FUN, ...) } Z <- layered(LayerList=L, plotargs=pla) return(Z) } shift.layered <- function(X, vec=c(0,0), ...) { if(length(list(...)) > 0) { if(!missing(vec)) warning("Argument vec ignored; overridden by other arguments") ## ensure the same shift is applied to all layers s <- shift(X[[1L]], ...) vec <- getlastshift(s) } Y <- applytolayers(X, shift, vec=vec) attr(Y, "lastshift") <- vec return(Y) } affine.layered <- function(X, ...) { applytolayers(X, affine, ...) } rotate.layered <- function(X, ..., centre=NULL) { if(!is.null(centre)) { X <- shift(X, origin=centre) negorigin <- getlastshift(X) } else negorigin <- NULL Y <- applytolayers(X, rotate, ...) if(!is.null(negorigin)) Y <- shift(Y, -negorigin) return(Y) } reflect.layered <- function(X) { applytolayers(X, reflect) } flipxy.layered <- function(X) { applytolayers(X, flipxy) } scalardilate.layered <- function(X, ...) { applytolayers(X, scalardilate, ...) } rescale.layered <- function(X, s, unitname) { if(missing(s)) s <- NULL if(missing(unitname)) unitname <- NULL applytolayers(X, rescale, s=s, unitname=unitname) } as.owin.layered <- local({ as.owin.layered <- function(W, ..., fatal=TRUE) { if(length(W) == 0) { if(fatal) stop("Layered object is empty: no window data") return(NULL) } ## remove null layers isnul <- unlist(lapply(W, is.null)) W <- W[!isnul] if(length(W) == 0) { if(fatal) stop("Layered object has no window data") return(NULL) } Wlist <- lapply(unname(W), as.owin, ..., fatal=fatal) Wlist <- lapply(Wlist, rescue.rectangle) Wlist <- lapply(Wlist, puffbox) Z <- Wlist[[1L]] if(length(Wlist) > 1) { same <- unlist(lapply(Wlist[-1L], identical, y=Z)) if(!all(same)) Z <- do.call(union.owin, Wlist) } return(Z) } puffbox <- function(W) { ## union.owin will delete boxes that have width zero or height zero ## so 'puff' them out slightly ss <- sidelengths(Frame(W)) if(ss[1L] == 0) W$xrange <- W$xrange + 1e-6 * c(-1,1) * ss[2L] if(ss[2L] == 0) W$yrange <- W$yrange + 1e-6 * c(-1,1) * ss[1L] return(W) } as.owin.layered }) domain.layered <- Window.layered <- function(X, ...) { as.owin(X) } as.layered <- function(X) { UseMethod("as.layered") } as.layered.default <- function(X) { if(is.list(X) && all(sapply(X, is.sob))) layered(LayerList=X) else layered(X) } as.layered.ppp <- function(X) { if(!is.marked(X)) return(layered(X)) if(is.multitype(X)) return(layered(LayerList=split(X))) mX <- marks(X) if(!is.null(d <- dim(mX)) && d[2L] > 1) { mx <- as.data.frame(marks(X)) Y <- lapply(mx, setmarks, x=X) return(layered(LayerList=Y)) } return(layered(X)) } spatstat/R/GJfox.R0000644000176200001440000001035413623370540013464 0ustar liggesusers# # GJfox.R # # Foxall G-function and J-function # # $Revision: 1.11 $ $Date: 2020/02/20 02:32:21 $ # Gfox <- function(X, Y, r=NULL, breaks=NULL, correction=c("km", "rs", "han"), W=NULL, ...) { stopifnot(is.ppp(X)) #' validate and resolve windows a <- resolve.foxall.window(X, Y, W) X <- a$X Y <- a$Y W <- a$W #' if(is.null(correction)) correction <- c("rs", "km", "cs") correction <- pickoption("correction", correction, c(none="none", raw="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", han="han", Hanisch="han", best="km"), multi=TRUE) corxtable <- c("km", "rs", "han", "none") corx <- as.list(corxtable %in% correction) names(corx) <- corxtable ## compute distances and censoring distances D <- distfun(Y) dist <- D(X) bdry <- bdist.points(X[W]) # sic ## histogram breakpoints dmax <- max(dist) breaks <- handle.r.b.args(r, breaks, Window(X), NULL, rmaxdefault=dmax) rval <- breaks$r ## censoring indicators d <- (dist <= bdry) ## observed distances o <- pmin.int(dist, bdry) ## calculate estimates Z <- censtimeCDFest(o, bdry, d, breaks, KM=corx$km, RS=corx$rs, HAN=corx$han, RAW=corx$none, han.denom=if(corx$han) eroded.areas(Window(X), rval) else NULL, tt=dist) ## relabel Z <- rebadge.fv(Z, quote(G[fox](r)), c("G", "fox")) unitname(Z) <- unitname(Y) return(Z) } Jfox <- function(X, Y, r=NULL, breaks=NULL, correction=c("km", "rs", "han"), W=NULL, ..., warn.trim=TRUE) { ## validate and resolve windows a <- resolve.foxall.window(X, Y, W, isTRUE(warn.trim)) X <- a$X Y <- a$Y W <- a$W ## process H <- Hest(Y, r=r, breaks=breaks, correction=correction, ..., W=W) G <- Gfox(X, Y, r=H$r, correction=correction, ..., W=W) ## derive J-function J <- eval.fv((1-G)/(1-H), dotonly=FALSE) ## correct calculation of hazard is different if("hazard" %in% names(J)) J$hazard <- G$hazard - H$hazard ## base labels on 'J' rather than full expression attr(J, "labl") <- attr(H, "labl") ## add column of 1's J <- bind.fv(J, data.frame(theo=rep.int(1, nrow(J))), "%s[theo](r)", "theoretical value of %s for independence") ## rename J <- rebadge.fv(J, quote(J[fox](r)), c("J", "fox")) funs <- c("km", "han", "rs", "raw", "theo") fvnames(J, ".") <- funs[funs %in% names(J)] unitname(J) <- unitname(Y) attr(J, "conserve") <- attr(H, "conserve") return(J) } resolve.foxall.window <- function(X, Y, W=NULL, warn.trim=TRUE) { if(!(is.ppp(Y) || is.psp(Y) || is.owin(Y) || is.im(Y))) stop("Y should be an object of class ppp, psp, owin or im") if(is.im(Y) && !is.logical(ZeroValue(Y))) stop("When Y is an image, its pixel values should be logical values") if(!identical(unitname(X), unitname(Y))) warning("X and Y are not in the same units") ## default window based on Y if(is.ppp(Y) || is.psp(Y)) { W0 <- Window(Y) W0describe <- "the observation window of Y" } else if(is.owin(Y)) { W0 <- Frame(Y) W0describe <- "the Frame of Y" } else if(is.im(Y)) { W0 <- Window(Y) W0describe <- "the observation window of Y" Y <- solutionset(Y) } else stop("Y should be an object of class ppp, psp, owin or im") ## actual window used for estimation if(!is.null(W)) { stopifnot(is.owin(W)) if(!is.subset.owin(W, W0)) stop(paste("W is not a subset of", W0describe)) Wdescribe <- "W" } else { W <- W0 Wdescribe <- W0describe } ## ensure compatible windows WX <- Window(X) if(!is.subset.owin(WX, W)) { if(warn.trim) warning(paste("Trimming the window of X to be a subset of", Wdescribe)) WX <- intersect.owin(WX, W) if(area.owin(WX) == 0) stop("Trimmed window has zero area") X <- X[WX] if(npoints(X) == 0) stop("No points remaining after trimming window") } return(list(X=X, Y=Y, W=W)) } spatstat/R/digestCovariates.R0000644000176200001440000000377713333543254015764 0ustar liggesusers#' #' digestCovariates.R #' #' $Revision: 1.4 $ $Date: 2018/05/03 08:33:44 $ #' is.scov <- function(x) { #' Determines whether x is a valid candidate for a spatial covariate #' A spatial object is OK if it can be coerced to a function if(inherits(x, c("im", "funxy", "owin", "tess", "ssf", "leverage.ppm"))) return(TRUE) #' A function(x,y,...) is OK if(is.function(x) && identical(names(formals(x))[1:2], c("x", "y"))) return(TRUE) #' A single character "x" or "y" is OK if(is.character(x) && length(x) == 1 && (x %in% c("x", "y"))) return(TRUE) #' Can't handle input return(FALSE) } ## Assumes each input (besides W) is a single covariate or a list of covariates ## Returns a `solist` with possibly a unitname attribute digestCovariates <- function(..., W = NULL) { x <- list(...) #' Find individual covariates in list valid <- sapply(x, is.scov) covs <- x[valid] #' The remaining entries are assumed to be lists of covariates #' so we unlist them x <- unlist(x[!valid], recursive = FALSE) valid <- sapply(x, is.scov) if(!all(valid)) stop("Couldn't interpret all input as spatial covariates.") covs <- append(covs, x) if(any(needW <- !sapply(covs, is.sob))) { if(is.null(W)){ boxes <- lapply(covs[!needW], Frame) W <- do.call(boundingbox, boxes) } else stopifnot(is.owin(W)) } covunits <- vector("list", length(covs)) # Now covs is a list of valid covariates we can loop through for(i in seq_along(covs)){ covar <- covs[[i]] if(inherits(covar, "distfun")) covunits[[i]] <- unitname(covar) if(is.character(covar) && length(covar) == 1 && (covar %in% c("x", "y"))) { covar <- if(covar == "x"){ function(x,y) { x } } else{ function(x,y) { y } } covunits[[i]] <- unitname(W) } if(is.function(covar) && !inherits(covar, "funxy")){ covar <- funxy(f = covar, W = W) } covs[[i]] <- covar } covs <- as.solist(covs) attr(covs, "covunits") <- covunits return(covs) } spatstat/R/residuals.mppm.R0000644000176200001440000000566413333543255015425 0ustar liggesusers# # residuals.mppm.R # # computes residuals for fitted multiple point process model # # # $Revision: 1.5 $ $Date: 2015/01/29 06:44:26 $ # residuals.mppm <- function(object, type="raw", ..., fittedvalues = fitted.mppm(object)) { verifyclass(object, "mppm") userfitted <- !missing(fittedvalues) type <- pickoption("type", type, c(inverse="inverse", raw="raw", pearson="pearson", Pearson="pearson")) typenames <- c(inverse="inverse-lambda residuals", raw="raw residuals", pearson="Pearson residuals") typename <- typenames[[type]] # Extract quadrature points and weights Q <- quad.mppm(object) # U <- lapply(Q, union.quad) # quadrature point patterns Z <- unlist(lapply(Q, is.data)) # indicator data/dummy W <- unlist(lapply(Q, w.quad)) # quadrature weights # total number of quadrature points nquadrature <- length(W) # number of quadrature points in each pattern nU <- unlist(lapply(Q, n.quad)) # number of rows of hyperframe npat <- object$npat # attribution of each quadrature point id <- factor(rep(1:npat, nU), levels=1:npat) # Compute fitted conditional intensity at quadrature points if(!is.list(fittedvalues) || length(fittedvalues) != npat) stop(paste(sQuote("fittedvalues"), "should be a list of length", npat, "containing vectors of fitted values")) lambda <- unlist(fittedvalues) # consistency check if(length(lambda) != nquadrature) stop(paste(if(!userfitted) "internal error:" else NULL, "number of fitted values", paren(length(lambda)), "does not match number of quadrature points", paren(nquadrature))) # indicator is 1 if lambda > 0 # (adjusted for numerical behaviour of predict.glm) indicator <- (lambda > .Machine$double.eps) # Evaluate residual measure components discrete <- ifelse(Z, switch(type, raw = 1, inverse = 1/lambda, pearson = 1/sqrt(lambda) ), 0) density <- switch(type, raw = -lambda, inverse = -indicator, pearson = -indicator * sqrt(lambda)) atoms <- as.logical(Z) # All components resdf <- data.frame(discrete=discrete, density=density, atoms=atoms) # Split residual data according to point pattern affiliation splitres <- split(resdf, id) # Associate with quadrature scheme reshf <- hyperframe(R=splitres, Q=Q) # Convert to signed measures answer <- with(reshf, msr(Q, R$discrete[R$atoms], R$density)) # tag answer <- lapply(answer, "attr<-", which="type", value=type) answer <- lapply(answer, "attr<-", which="typename", value=typename) return(as.solist(answer)) } spatstat/R/sparse3Darray.R0000644000176200001440000010167013602532431015170 0ustar liggesusers#' #' sparse3Darray.R #' #' Sparse 3D arrays represented as list(i,j,k,x) #' #' $Revision: 1.39 $ $Date: 2019/12/31 01:03:10 $ #' sparse3Darray <- function(i=integer(0), j=integer(0), k=integer(0), x=numeric(0), dims=c(max(i),max(j),max(k)), dimnames=NULL, strict=FALSE, nonzero=FALSE) { dat <- data.frame(i, j, k, x) if(typeof(x) == "complex") warn.once("sparse.complex", "complex-valued sparse 3D arrays are supported in spatstat,", "but complex-valued sparse matrices", "are not yet supported by the Matrix package") stopifnot(length(dims) == 3) dims <- as.integer(dims) if(!all(i >= 1 & i <= dims[1])) stop("indices i are outside range") if(!all(j >= 1 & j <= dims[2])) stop("indices j are outside range") if(!all(k >= 1 & k <= dims[3])) stop("indices k are outside range") if(!is.null(dimnames)) { stopifnot(is.list(dimnames)) stopifnot(length(dimnames) == 3) notnull <- !sapply(dimnames, is.null) dimnames[notnull] <- lapply(dimnames[notnull], as.character) } if(nonzero || strict) { #' drop zeroes ok <- (x != RelevantZero(x)) dat <- dat[ok, , drop=FALSE] } if(strict) { #' arrange in 'R order' dat <- dat[with(dat, order(k,j,i)), , drop=FALSE] #' duplicates will be adjacent dup <- with(dat, c(FALSE, diff(i) == 0 & diff(j) == 0 & diff(k) == 0)) if(any(dup)) { #' accumulate values at the same array location retain <- !dup newrow <- cumsum(retain) newx <- as(tapply(dat$x, newrow, sum), typeof(dat$x)) newdat <- dat[retain,,drop=FALSE] newdat$x <- newx dat <- newdat } } result <- append(as.list(dat), list(dim=dims, dimnames=dimnames)) class(result) <- "sparse3Darray" return(result) } as.sparse3Darray <- function(x, ...) { if(inherits(x, "sparse3Darray")) { y <- x } else if(inherits(x, c("matrix", "sparseMatrix"))) { z <- as(x, Class="TsparseMatrix") dn <- dimnames(x) dn <- if(is.null(dn)) NULL else c(dn, list(NULL)) one <- if(length(z@i) > 0) 1L else integer(0) y <- sparse3Darray(i=z@i + 1L, j=z@j + 1L, k=one, x=z@x, dims=c(dim(x), 1L), dimnames=dn) } else if(is.array(x)) { stopifnot(length(dim(x)) == 3) dimx <- dim(x) if(prod(dimx) == 0) { y <- sparse3Darray(, dims=dimx, dimnames=dimnames(x)) } else { ijk <- which(x != RelevantZero(x), arr.ind=TRUE) ijk <- cbind(as.data.frame(ijk), x[ijk]) y <- sparse3Darray(i=ijk[,1L], j=ijk[,2L], k=ijk[,3L], x=ijk[,4L], dims=dimx, dimnames=dimnames(x)) } } else if(inherits(x, "sparseVector")) { one <- if(length(x@i) > 0) 1L else integer(0) y <- sparse3Darray(i=x@i, j=one, k=one, x=x@x, dims=c(x@length, 1L, 1L)) } else if(is.null(dim(x)) && is.atomic(x)) { n <- length(x) dn <- names(x) if(!is.null(dn)) dn <- list(dn, NULL, NULL) one <- if(n > 0) 1L else integer(0) y <- sparse3Darray(i=seq_len(n), j=one, k=one, x=x, dims=c(n, 1L, 1L), dimnames=dn) } else if(is.list(x) && length(x) > 0) { n <- length(x) if(all(sapply(x, is.matrix))) { z <- Reduce(abind, x) y <- as.sparse3Darray(z) } else if(all(sapply(x, inherits, what="sparseMatrix"))) { dimlist <- unique(lapply(x, dim)) if(length(dimlist) > 1) stop("Dimensions of matrices do not match") dimx <- c(dimlist[[1L]], n) dnlist <- lapply(x, dimnames) isnul <- sapply(dnlist, is.null) dnlist <- unique(dnlist[!isnul]) if(length(dnlist) > 1) stop("Dimnames of matrices do not match") dn <- if(length(dnlist) == 0) NULL else c(dnlist[[1L]], list(NULL)) for(k in seq_len(n)) { mk <- as(x[[k]], "TsparseMatrix") kvalue <- if(length(mk@i) > 0) k else integer(0) dfk <- data.frame(i=mk@i + 1L, j=mk@j + 1L, k=kvalue, x=mk@x) df <- if(k == 1) dfk else rbind(df, dfk) } y <- sparse3Darray(i=df$i, j=df$j, k=df$k, x=df$x, dims=dimx, dimnames=dn) } else { warning("I don't know how to convert a list to a sparse array") return(NULL) } } else { warning("I don't know how to convert x to a sparse array") return(NULL) } return(y) } dim.sparse3Darray <- function(x) { x$dim } "dim<-.sparse3Darray" <- function(x, value) { stopifnot(length(value) == 3) if(!all(inside.range(x$i, c(1, value[1])))) stop("indices i are outside new range") if(!all(inside.range(x$j, c(1, value[2])))) stop("indices j are outside new range") if(!all(inside.range(x$k, c(1, value[3])))) stop("indices k are outside new range") dimx <- dim(x) x$dim <- value if(!is.null(dimnames(x))) { dn <- dimnames(x) for(n in 1:3) { if(value[n] < dimx[n]) dn[[n]] <- dn[[n]][1:value[n]] else if(value[n] > dimx[n]) dn[n] <- list(NULL) } dimnames(x) <- dn } return(x) } dimnames.sparse3Darray <- function(x) { x$dimnames } "dimnames<-.sparse3Darray" <- function(x, value) { if(!is.list(value)) value <- list(value) if(length(value) == 1) value <- rep(value, 3) x$dimnames <- value return(x) } print.sparse3Darray <- function(x, ...) { dimx <- dim(x) cat("Sparse 3D array of dimensions", paste(dimx, collapse="x"), fill=TRUE) if(prod(dimx) == 0) return(invisible(NULL)) dn <- dimnames(x) %orifnull% rep(list(NULL), 3) d3 <- dimx[3] dn3 <- dn[[3]] %orifnull% as.character(seq_len(d3)) df <- data.frame(i=x$i, j=x$j, k=x$k, x=x$x) pieces <- split(df, factor(df$k, levels=1:d3)) dim2 <- dimx[1:2] dn2 <- dn[1:2] if(typeof(x$x) == "complex") { splat("Complex-valued") splat("\t\tReal component:") stuff <- capture.output(eval(Re(x))) cat(stuff[-1],sep="\n") splat("\n\n\t\tImaginary component:") stuff <- capture.output(eval(Im(x))) cat(stuff[-1],sep="\n") } else { for(k in seq_along(pieces)) { cat(paste0("\n\t[ , , ", dn3[k], "]\n\n")) Mi <- with(pieces[[k]], sparseMatrix(i=i, j=j, x=x, dims=dim2, dimnames=dn2)) stuff <- capture.output(eval(Mi)) #' Remove 'sparse Matrix' header blurb stuff <- stuff[-1] if(is.blank(stuff[1])) stuff <- stuff[-1] cat(stuff, sep="\n") } } return(invisible(NULL)) } aperm.sparse3Darray <- function(a, perm=NULL, resize=TRUE, ...) { if(is.null(perm)) return(a) stopifnot(length(perm) == 3) a <- unclass(a) a[c("i", "j", "k")] <- a[c("i", "j", "k")][perm] if(resize) { a$dim <- a$dim[perm] if(length(a$dimnames)==3) a$dimnames <- a$dimnames[perm] } class(a) <- c("sparse3Darray", class(a)) return(a) } as.array.sparse3Darray <- function(x, ...) { zerovalue <- vector(mode=typeof(x$x), length=1L) z <- array(zerovalue, dim=dim(x), dimnames=dimnames(x)) z[cbind(x$i,x$j,x$k)] <- x$x return(z) } "[.sparse3Darray" <- local({ Extract <- function(x, i,j,k, drop=TRUE, ...) { dimx <- dim(x) dn <- dimnames(x) %orifnull% rep(list(NULL), 3) if(!missing(i) && length(dim(i)) == 2) { ## matrix index i <- as.matrix(i) if(!(missing(j) && missing(k))) stop("If i is a matrix, j and k should not be given", call.=FALSE) if(ncol(i) != 3) stop("If i is a matrix, it should have 3 columns", call.=FALSE) ## start with vector of 'zero' answers of the correct type answer <- sparseVector(x=RelevantEmpty(x$x), i=integer(0), length=nrow(i)) ## values outside array return NA if(anybad <- !all(good <- inside3Darray(dim(x), i))) { bad <- !good answer[bad] <- NA } ## if entire array is zero, there is nothing to match if(length(x$x) == 0) return(answer) ## restrict attention to entries inside array igood <- if(anybad) i[good, , drop=FALSE] else i ## match desired indices to sparse entries varies <- (dimx > 1) nvary <- sum(varies) varying <- which(varies) if(nvary == 3) { ## ---- older code ----- ## convert triples of integers to character codes #### icode <- apply(i, 1, paste, collapse=",") << is too slow >> ## icode <- paste(i[,1], i[,2], i[,3], sep=",") ## dcode <- paste(x$i, x$j, x$k, sep=",") ## ------------------ mgood <- matchIntegerDataFrames(igood, cbind(x$i, x$j, x$k)) } else if(nvary == 2) { ## effectively a sparse matrix ## ---- older code ----- ## icode <- paste(i[,varying[1]], i[,varying[2]], sep=",") ## ijk <- cbind(x$i, x$j, x$k) ## dcode <- paste(ijk[,varying[1]], ijk[,varying[2]], sep=",") ## ------------------ ijk <- cbind(x$i, x$j, x$k) mgood <- matchIntegerDataFrames(igood[,varying,drop=FALSE], ijk[,varying,drop=FALSE]) } else if(nvary == 1) { ## effectively a sparse vector ## ---- older code ----- ## icode <- i[,varying] ## dcode <- switch(varying, x$i, x$j, x$k) ## ------------------ mgood <- match(igood[,varying], switch(varying, x$i, x$j, x$k)) } else { ## effectively a single value ## ---- older code ----- ## icode <- rep(1, nrow(i)) ## dcode <- 1 # since we know length(x$x) > 0 mgood <- 1 } ## insert any found elements found <- logical(nrow(i)) found[good] <- foundgood <- !is.na(mgood) answer[found] <- x$x[mgood[foundgood]] return(answer) } if(!(missing(i) && missing(j) && missing(k))) { I <- grokIndexVector(if(missing(i)) NULL else i, dimx[1], dn[[1]]) J <- grokIndexVector(if(missing(j)) NULL else j, dimx[2], dn[[2]]) K <- grokIndexVector(if(missing(k)) NULL else k, dimx[3], dn[[3]]) IJK <- list(I,J,K) if(!all(sapply(lapply(IJK, getElement, name="full"), is.null))) { ## some indices exceed array bounds; ## result is a full array containing NA's fullindices <- lapply(IJK, fullIndexSequence) strictindices <- lapply(IJK, strictIndexSequence) result <- array(data=RelevantNA(x$x), dim=lengths(fullindices)) matches <- mapply(match, x=fullindices, table=strictindices) ok <- lapply(lapply(matches, is.na), "!") result[ok[[1]], ok[[2]], ok[[3]]] <- as.array(x)[matches[[1]][ok[[1]]], matches[[2]][ok[[2]]], matches[[3]][ok[[3]]]] if(drop) result <- result[,,,drop=TRUE] return(result) } IJK <- lapply(IJK, getElement, name="strict") I <- IJK[[1]] J <- IJK[[2]] K <- IJK[[3]] #' number of values to be returned along each margin newdims <- sapply(IJK, getElement, name="n") #' dimnames of return array newdn <- lapply(IJK, getElement, name="s") #' find all required data (not necessarily in required order) inI <- I$lo inJ <- J$lo inK <- K$lo df <- data.frame(i=x$i, j=x$j, k=x$k, x=x$x) use <- with(df, inI[i] & inJ[j] & inK[k]) df <- df[use, ,drop=FALSE] #' contract sub-array to (1:n) * (1:m) * (1:l) df <- transform(df, i = cumsum(inI)[i], j = cumsum(inJ)[j], k = cumsum(inK)[k]) Imap <- I$map Jmap <- J$map Kmap <- K$map if(nrow(df) == 0 || (is.null(Imap) && is.null(Jmap) && is.null(Kmap))) { ## return values are already in correct position outdf <- df } else { #' invert map to determine output positions (reorder/repeat entries) snI <- seq_len(I$n) snJ <- seq_len(J$n) snK <- seq_len(K$n) imap <- Imap %orifnull% snI jmap <- Jmap %orifnull% snJ kmap <- Kmap %orifnull% snK whichi <- split(seq_along(imap), factor(imap, levels=snI)) whichj <- split(seq_along(jmap), factor(jmap, levels=snJ)) whichk <- split(seq_along(kmap), factor(kmap, levels=snK)) dat.i <- whichi[df$i] dat.j <- whichj[df$j] dat.k <- whichk[df$k] stuff <- mapply(expandwithdata, i=dat.i, j=dat.j, k=dat.k, x=df$x, SIMPLIFY=FALSE) outdf <- rbindCompatibleDataFrames(stuff) } x <- sparse3Darray(i=outdf$i, j=outdf$j, k=outdf$k, x=outdf$x, dims=newdims, dimnames=newdn) dimx <- newdims dn <- newdn } if(drop) { retain <- (dimx > 1) nretain <- sum(retain) if(nretain == 2) { #' result is a matrix retained <- which(retain) newi <- getElement(x, name=c("i","j","k")[ retained[1] ]) newj <- getElement(x, name=c("i","j","k")[ retained[2] ]) newdim <- dimx[retain] newdn <- dn[retain] return(sparseMatrix(i=newi, j=newj, x=x$x, dims=newdim, dimnames=newdn)) } else if(nretain == 1) { #' sparse vector retained <- which(retain) newi <- getElement(x, name=c("i","j","k")[retained]) #' ensure 'strict' ord <- order(newi) newi <- newi[ord] newx <- x$x[ord] if(any(dup <- c(FALSE, diff(newi) == 0))) { retain <- !dup ii <- cumsum(retain) newi <- newi[retain] newx <- as(tapply(newx, ii, sum), typeof(newx)) } x <- sparseVector(x=newx, i=newi, length=dimx[retained]) } else if(nretain == 0) { #' single value x <- as.vector(as.array(x)) } } return(x) } expandwithdata <- function(i, j, k, x) { z <- expand.grid(i=i, j=j, k=k) if(nrow(z) > 0) z$x <- x return(z) } Extract }) rbindCompatibleDataFrames <- function(x) { #' faster version of Reduce(rbind, x) when entries are known to be compatible nama2 <- colnames(x[[1]]) y <- vector(mode="list", length=length(nama2)) names(y) <- nama2 for(nam in nama2) y[[nam]] <- unlist(lapply(x, getElement, name=nam)) return(as.data.frame(y)) } "[<-.sparse3Darray" <- function(x, i, j, k, ..., value) { dimx <- dim(x) dn <- dimnames(x) %orifnull% rep(list(NULL), 3) #' interpret indices if(!missing(i) && length(dim(i)) == 2) { ## matrix index ijk <- as.matrix(i) if(!(missing(j) && missing(k))) stop("If i is a matrix, j and k should not be given", call.=FALSE) if(ncol(ijk) != 3) stop("If i is a matrix, it should have 3 columns", call.=FALSE) if(!all(inside3Darray(dimx, i))) stop("Some indices lie outside array limits", call.=FALSE) if(nrow(ijk) == 0) return(x) # no items to replace ## assemble data frame xdata <- data.frame(i=x$i, j=x$j, k=x$k, x=x$x) ## match xdata into ijk (not necessarily the first match in original order) m <- matchIntegerDataFrames(xdata[,1:3,drop=FALSE], ijk) ## ------- OLDER VERSION: -------- ## convert triples of integers to character codes ## icode <- apply(ijk, 1, paste, collapse=",") << is too slow >> ## icode <- paste(ijk[,1], ijk[,2], ijk[,3], sep=",") ## xcode <- paste(x$i, x$j, x$k, sep=",") ## m <- match(xcode, icode) ## ------------------------------- ## remove any matches, retaining only data that do not match 'i' xdata <- xdata[is.na(m), , drop=FALSE] # sic ## ensure replacement value is vector-like value <- as.vector(value) nv <- length(value) if(nv != nrow(i) && nv != 1) stop(paste("Number of items to replace", paren(nrow(i)), "does not match number of items given", paren(nv)), call.=FALSE) vdata <- data.frame(i=ijk[,1], j=ijk[,2], k=ijk[,3], x=value) ## combine ydata <- rbind(xdata, vdata) y <- with(ydata, sparse3Darray(i=i,j=j,k=k,x=x, dims=dimx, dimnames=dn, strict=TRUE)) return(y) } I <- grokIndexVector(if(missing(i)) NULL else i, dimx[1], dn[[1]]) J <- grokIndexVector(if(missing(j)) NULL else j, dimx[2], dn[[2]]) K <- grokIndexVector(if(missing(k)) NULL else k, dimx[3], dn[[3]]) IJK <- list(I,J,K) if(!all(sapply(lapply(IJK, getElement, name="full"), is.null))) { warning("indices exceed array bounds; extending the array dimensions", call.=FALSE) fullindices <- lapply(IJK, fullIndexSequence) ## strictindices <- lapply(IJK, strictIndexSequence) dnew <- pmax(dimx, sapply(fullindices, max)) result <- array(data=RelevantZero(x$x), dim=dnew) result[cbind(x$i, x$j, x$k)] <- x$x result[fullindices[[1]], fullindices[[2]], fullindices[[3]]] <- value result <- as.sparse3Darray(result) return(result) } IJK <- lapply(IJK, getElement, name="strict") if(all(sapply(IJK, getElement, name="nind") == 0)) { # no elements are indexed return(x) } I <- IJK[[1]] J <- IJK[[2]] K <- IJK[[3]] #' extract current array entries xdata <- data.frame(i=x$i, j=x$j, k=x$k, x=x$x) #' identify data volume that will be overwritten inI <- I$lo inJ <- J$lo inK <- K$lo #' remove data that will be overwritten retain <- !with(xdata, inI[i] & inJ[j] & inK[k]) xdata <- xdata[retain,,drop=FALSE] #' expected dimensions of 'value' implied by indices dimVshould <- sapply(IJK, getElement, name="nind") dimV <- dim(value) if(length(dimV) == 3) { #' both source and destination are 3D if(all(dimVshould == dimV)) { #' replace 3D block by 3D block of same dimensions value <- as.sparse3Darray(value) vdata <- data.frame(i=value$i, j=value$j, k=value$k, x=value$x) # determine positions of replacement data in original array vdata <- transform(vdata, i=replacementIndex(i, I), j=replacementIndex(j, J), k=replacementIndex(k, K)) } else stop(paste("Replacement value has wrong dimensions:", paste(dimV, collapse="x"), "instead of", paste(dimVshould, collapse="x")), call.=FALSE) } else if(is.null(dimV)) { #' replacement value is a vector or sparseVector value <- as(value, "sparseVector") iv <- value@i xv <- value@x nv <- value@length collapsing <- (dimVshould == 1) realdim <- sum(!collapsing) if(nv == 1) { #' replacement value is a constant value <- as.vector(value[1]) if(identical(value, RelevantZero(x$x))) { #' assignment causes relevant entries to be set to zero; #' these entries have already been deleted from 'xdata'; #' nothing to add vdata <- data.frame(i=integer(0), j=integer(0), k=integer(0), x=x$x[integer(0)]) } else { #' replicate the constant vdata <- expand.grid(i=I$i, j=J$i, k=K$i, x=as.vector(value[1])) } } else if(realdim == 0) { stop(paste("Replacement value has too many entries:", nv, "instead of 1"), call.=FALSE) } else if(realdim == 1) { theindex <- which(!collapsing) # target slice is one-dimensional if(nv != dimVshould[theindex]) stop(paste("Replacement value has wrong number of entries:", nv, "instead of", dimVshould[theindex]), call.=FALSE) newpos <- replacementIndex(iv, IJK[[theindex]]) vdata <- switch(theindex, data.frame(i=newpos, j=J$i, k=K$i, x=xv), data.frame(i=I$i, j=newpos, k=K$i, x=xv), data.frame(i=I$i, j=J$i, k=newpos, x=xv)) } else { # target slice is two-dimensional sdim <- dimVshould[!collapsing] sd1 <- sdim[1] sd2 <- sdim[2] if(nv != sd1) stop(paste("Length of replacement vector", paren(nv), "does not match dimensions of array subset", paren(paste(dimVshould, collapse="x"))), call.=FALSE) firstindex <- which(!collapsing)[1] secondindex <- which(!collapsing)[2] pos1 <- replacementIndex(iv, IJK[[firstindex]]) pos2 <- replacementIndex(seq_len(sd2), IJK[[secondindex]]) xv <- rep(xv, sd2) pos2 <- rep(pos2, each=length(pos1)) pos1 <- rep(pos1, sd2) pos3 <- if(length(pos1)) IJK[[which(collapsing)]]$i else integer(0) vdata <- data.frame(i=pos3, j=pos3, k=pos3, x=xv) vdata[,firstindex] <- pos1 vdata[,secondindex] <- pos2 } } else if(identical(dimVshould[dimVshould > 1], dimV[dimV > 1])) { #' lower dimensional sets of the same dimension value <- value[drop=TRUE] dimV <- dim(value) dropping <- (dimVshould == 1) if(length(dimV) == 2) { value <- as(value, "TsparseMatrix") iv <- value@i + 1L jv <- value@j + 1L xv <- value@x firstindex <- which(!dropping)[1] secondindex <- which(!dropping)[2] pos1 <- replacementIndex(iv, IJK[[firstindex]]) pos2 <- replacementIndex(jv, IJK[[secondindex]]) pos3 <- if(length(pos1)) IJK[[which(dropping)]]$i else integer(0) vdata <- data.frame(i=pos3, j=pos3, k=pos3, x=xv) vdata[,firstindex] <- pos1 vdata[,secondindex] <- pos2 } else { value <- as(value, "sparseVector") iv <- value@i xv <- value@x vdata <- data.frame(i=if(dropping[1]) I$i else replacementIndex(iv, I), j=if(dropping[2]) J$i else replacementIndex(iv, J), k=if(dropping[3]) K$i else replacementIndex(iv, K), x=xv) } } else stop(paste("Replacement value has wrong dimensions:", paste(dimV, collapse="x"), "instead of", paste(dimVshould, collapse="x")), call.=FALSE) ## combine if(nrow(vdata) > 0) xdata <- rbind(xdata, vdata) y <- with(xdata, sparse3Darray(i=i,j=j,k=k,x=x, dims=dimx, dimnames=dn, strict=TRUE)) return(y) } bind.sparse3Darray <- function(A,B,along) { A <- as.sparse3Darray(A) B <- as.sparse3Darray(B) check.1.integer(along) stopifnot(along %in% 1:3) dimA <- dim(A) dimB <- dim(B) if(!all(dimA[-along] == dimB[-along])) stop("dimensions of A and B do not match") dimC <- dimA dimC[along] <- dimA[along] + dimB[along] # extract data Adf <- SparseEntries(A) Bdf <- SparseEntries(B) # realign 'B' coordinate Bdf[,along] <- Bdf[,along] + dimA[along] # combine C <- EntriesToSparse(rbind(Adf, Bdf), dimC) # add dimnames dnA <- dimnames(A) dnB <- dimnames(B) if(!is.null(dnA) || !is.null(dnB)) { if(length(dnA) != 3) dnA <- rep(list(NULL), 3) if(length(dnB) != 3) dnB <- rep(list(NULL), 3) dnC <- dnA dnC[[along]] <- c(dnA[[along]] %orifnull% rep("", dimA[along]), dnB[[along]] %orifnull% rep("", dimB[along])) dimnames(C) <- dnC } return(C) } anyNA.sparse3Darray <- function(x, recursive=FALSE) { anyNA(x$x) } RelevantZero <- function(x) vector(mode=typeof(x), length=1L) isRelevantZero <- function(x) identical(x, RelevantZero(x)) RelevantEmpty <- function(x) vector(mode=typeof(x), length=0L) RelevantNA <- function(x) { RelevantZero(x)[2] } unionOfSparseIndices <- function(A, B) { #' A, B are data frames of indices i, j, k ijk <- unique(rbind(A, B)) colnames(ijk) <- c("i", "j", "k") return(ijk) } Ops.sparse3Darray <- function(e1,e2=NULL){ if(nargs() == 1L) { switch(.Generic, "!" = { result <- do.call(.Generic, list(as.array(e1))) }, "-" = , "+" = { result <- e1 result$x <- do.call(.Generic, list(e1$x)) }, stop(paste("Unary", sQuote(.Generic), "is undefined for sparse 3D arrays."), call.=FALSE)) return(result) } # binary operation # Decide whether full or sparse elist <- list(e1, e2) isfull <- sapply(elist, inherits, what=c("matrix", "array")) if(any(isfull) && any(sapply(lapply(elist[isfull], dim), prod) > 1)) { # full array n1 <- length(dim(e1)) n2 <- length(dim(e2)) e1 <- if(n1 == 3) as.array(e1) else if(n1 == 2) as.matrix(e1) else as.vector(as.matrix(as.array(e1))) e2 <- if(n2 == 3) as.array(e2) else if(n2 == 2) as.matrix(e2) else as.vector(as.matrix(as.array(e2))) result <- do.call(.Generic, list(e1, e2)) return(result) } # sparse result (usually) e1 <- as.sparse3Darray(e1) e2 <- as.sparse3Darray(e2) dim1 <- dim(e1) dim2 <- dim(e2) mode1 <- typeof(e1$x) mode2 <- typeof(e2$x) zero1 <- vector(mode=mode1, length=1L) zero2 <- vector(mode=mode2, length=1L) if(prod(dim1) == 1) { ## e1 is constant e1 <- as.vector(as.array(e1)) z12 <- do.call(.Generic, list(e1, zero2)) if(!isRelevantZero(z12)) { # full matrix/array will be generated result <- do.call(.Generic, list(e1, as.array(e2)[drop=TRUE])) } else { # sparse result <- e2 result$x <- do.call(.Generic, list(e1, e2$x)) } return(result) } if(prod(dim2) == 1) { ## e2 is constant e2 <- as.vector(as.array(e2)) z12 <- do.call(.Generic, list(zero1, e2)) if(!isRelevantZero(z12)) { # full matrix/array will be generated result <- do.call(.Generic, list(as.array(e1)[drop=TRUE], e2)) } else { # sparse result <- e1 result$x <- do.call(.Generic, list(e1$x, e2)) } return(result) } z12 <- do.call(.Generic, list(zero1, zero2)) if(!isRelevantZero(z12)) { #' Result is an array e1 <- as.array(e1) e2 <- as.array(e2) result <- do.call(.Generic, list(e1, e2)) return(result) } # Result is sparse if(identical(dim1, dim2)) { #' extents are identical ijk1 <- SparseIndices(e1) ijk2 <- SparseIndices(e2) if(identical(ijk1, ijk2)) { #' patterns of nonzero entries are identical ijk <- ijk1 values <- do.call(.Generic, list(e1$x, e2$x)) } else { #' different patterns of nonzero entries ijk <- unionOfSparseIndices(ijk1, ijk2) values <- as.vector(do.call(.Generic, list(e1[ijk], e2[ijk]))) } dn <- dimnames(e1) %orifnull% dimnames(e2) result <- sparse3Darray(i=ijk$i, j=ijk$j, k=ijk$k, x=values, dims=dim1, dimnames=dn, strict=TRUE) return(result) } drop1 <- (dim1 == 1) drop2 <- (dim2 == 1) if(!any(drop1 & !drop2) && identical(dim1[!drop2], dim2[!drop2])) { #' dim2 is a slice of dim1 ijk1 <- data.frame(i=e1$i, j=e1$j, k=e1$k) ijk2 <- data.frame(i=e2$i, j=e2$j, k=e2$k) expanding <- which(drop2 & !drop1) if(length(expanding) == 1) { n <- dim1[expanding] m <- nrow(ijk2) ijk2 <- as.data.frame(lapply(ijk2, rep, times=n)) ijk2[,expanding] <- rep(seq_len(n), each=m) ijk <- unionOfSparseIndices(ijk1, ijk2) ijkdrop <- ijk if(nrow(ijkdrop) > 0) ijkdrop[,expanding] <- 1 xout <- do.call(.Generic, list(e1[ijk], e2[ijkdrop])) result <- sparse3Darray(i=ijk[,1L], j=ijk[,2L], k=ijk[,3L], x=as.vector(xout), dims=dim1, dimnames=dimnames(e1), strict=TRUE) return(result) } } if(!any(drop2 & !drop1) && identical(dim2[!drop1], dim1[!drop1])) { #' dim1 is a slice of dim2 ijk1 <- data.frame(i=e1$i, j=e1$j, k=e1$k) ijk2 <- data.frame(i=e2$i, j=e2$j, k=e2$k) expanding <- which(drop1 & !drop2) if(length(expanding) == 1) { n <- dim2[expanding] m <- nrow(ijk1) ijk1 <- as.data.frame(lapply(ijk1, rep, times=n)) ijk1[,expanding] <- rep(seq_len(n), each=m) ijk <- unionOfSparseIndices(ijk1, ijk2) ijkdrop <- ijk if(nrow(ijkdrop) > 0) ijkdrop[,expanding] <- 1L xout <- do.call(.Generic, list(e1[ijkdrop], e2[ijk])) result <- sparse3Darray(i=ijk[,1L], j=ijk[,2L], k=ijk[,3L], x=as.vector(xout), dims=dim2, dimnames=dimnames(e2), strict=TRUE) return(result) } } if(all(drop1[-1]) && dim1[1L] == dim2[1L]) { #' e1 is a (sparse) vector matching the first extent of e2 if(.Generic %in% c("*", "&")) { # result is sparse ijk <- data.frame(i=e2$i, j=e2$j, k=e2$k) ones <- rep(1L, nrow(ijk)) i11 <- data.frame(i=e2$i, j=ones, k=ones) xout <- do.call(.Generic, list(e1[i11], e2[ijk])) result <- sparse3Darray(i=ijk[,1L], j=ijk[,2L], k=ijk[,3L], x=as.vector(xout), dims=dim2, dimnames=dimnames(e2), strict=TRUE) } else { # result is full array e1 <- as.array(e1)[,,,drop=TRUE] e2 <- as.array(e2) result <- do.call(.Generic, list(e1, e2)) } return(result) } stop(paste("Non-conformable arrays:", paste(dim1, collapse="x"), "and", paste(dim2, collapse="x")), call.=FALSE) } Math.sparse3Darray <- function(x, ...){ z <- RelevantZero(x$x) fz <- do.call(.Generic, list(z)) if(!isRelevantZero(fz)) { # result is a full array result <- do.call(.Generic, list(as.array(x), ...)) return(result) } x$x <- do.call(.Generic, list(x$x)) return(x) } Complex.sparse3Darray <- function(z) { oo <- RelevantZero(z$x) foo <- do.call(.Generic, list(z=oo)) if(!isRelevantZero(foo)) { # result is a full array result <- do.call(.Generic, list(z=as.array(z))) return(result) } z$x <- do.call(.Generic, list(z=z$x)) return(z) } Summary.sparse3Darray <- function(..., na.rm=FALSE) { argh <- list(...) is3D <- sapply(argh, inherits, what="sparse3Darray") if(any(is3D)) { xvalues <- lapply(argh[is3D], getElement, name="x") fullsizes <- sapply(lapply(argh[is3D], dim), prod) argh[is3D] <- xvalues #' zero entry should be appended if and only if there are any empty cells zeroes <- lapply(xvalues, RelevantZero) zeroes <- zeroes[lengths(xvalues) < fullsizes] argh <- append(argh, zeroes) } rslt <- do.call(.Generic, append(argh, list(na.rm=na.rm))) return(rslt) } SparseIndices <- function(x) { #' extract indices of entries of sparse vector/matrix/array nd <- length(dim(x)) if(nd > 3) stop("Arrays of more than 3 dimensions are not supported", call.=FALSE) if(nd == 0 || nd == 1) { x <- as(x, "sparseVector") df <- data.frame(i=x@i) } else if(nd == 2) { x <- as(x, "TsparseMatrix") df <- data.frame(i=x@i + 1L, j=x@j + 1L) } else if(nd == 3) { x <- as.sparse3Darray(x) df <- data.frame(i=x$i, j=x$j, k=x$k) } return(df) } SparseEntries <- function(x) { #' extract entries of sparse vector/matrix/array nd <- length(dim(x)) if(nd > 3) stop("Arrays of more than 3 dimensions are not supported", call.=FALSE) if(nd == 0 || nd == 1) { x <- as(x, "sparseVector") df <- data.frame(i=x@i, x=x@x) } else if(nd == 2) { x <- as(x, "TsparseMatrix") df <- data.frame(i=x@i + 1L, j=x@j + 1L, x=x@x) } else if(nd == 3) { x <- as.sparse3Darray(x) df <- data.frame(i=x$i, j=x$j, k=x$k, x=x$x) } return(df) } EntriesToSparse <- function(df, dims) { #' convert data frame of indices and values #' to sparse vector/matrix/array nd <- length(dims) if(nd == 0) return(with(df, as(sum(x), typeof(x)))) sn <- seq_len(nd) colnames(df)[sn] <- c("i","j","k")[sn] if(nd == 1) { #' sparse vector: duplicate entries not allowed df <- df[with(df, order(i)), , drop=FALSE] dup <- c(FALSE, with(df, diff(i) == 0)) if(any(dup)) { #' accumulate values at the same array location first <- !dup newi <- cumsum(first) newx <- as(tapply(df$x, newi, sum), typeof(df$x)) df <- data.frame(i=newi[first], x=newx) } result <- with(df, sparseVector(i=i, x=x, length=dims)) } else if(nd == 2) { result <- with(df, sparseMatrix(i=i, j=j, x=x, dims=dims)) } else if(nd == 3) { result <- with(df, sparse3Darray(i=i, j=j, k=k, x=x, dims=dims)) } return(result) } evalSparse3Dentrywise <- function(expr, envir) { ## DANGER: this assumes all sparse arrays in the expression ## have the same pattern of nonzero elements! e <- as.expression(substitute(expr)) ## get names of all variables in the expression varnames <- all.vars(e) allnames <- all.names(e, unique=TRUE) funnames <- allnames[!(allnames %in% varnames)] if(length(varnames) == 0) stop("No variables in this expression") ## get the values of the variables if(missing(envir)) { envir <- parent.frame() # WAS: sys.parent() } else if(is.list(envir)) { envir <- list2env(envir, parent=parent.frame()) } vars <- mget(varnames, envir=envir, inherits=TRUE, ifnotfound=list(NULL)) funs <- mget(funnames, envir=envir, inherits=TRUE, ifnotfound=list(NULL)) ## find out which variables are sparse3Darray isSpud <- sapply(vars, inherits, what="sparse3Darray") if(!any(isSpud)) stop("No sparse 3D arrays in this expression") spuds <- vars[isSpud] template <- spuds[[1L]] ## replace each array by its entries, and evaluate spudvalues <- lapply(spuds, getElement, name="x") ## minimal safety check if(length(unique(lengths(spudvalues))) > 1) stop("Different numbers of sparse entries", call.=FALSE) vars[isSpud] <- spudvalues v <- eval(e, append(vars, funs)) ## reshape as 3D array result <- sparse3Darray(x=v, i=template$i, j=template$j, k=template$k, dims=dim(template), dimnames=dimnames(template)) return(result) } spatstat/R/pointweights.R0000644000176200001440000000247313613547031015176 0ustar liggesusers#' pointweights.R #' #' get a valid vector of weights for a point pattern #' #' Argument 'weights' is usually passed from a user-level function #' It may be: #' a numeric vector #' a single number #' a function(x,y) #' a pixel image #' an expression involving the coordinates and marks #' #' $Revision: 1.2 $ $Date: 2020/01/27 09:08:06 $ pointweights <- function(X, ..., weights=NULL, parent=NULL) { if(is.null(weights)) return(NULL) nX <- npoints(X) if(is.numeric(weights) && is.vector(as.numeric(weights))) { if(length(weights) == 1) weights <- rep(weights, nX) } else if(is.im(weights)) { weights <- safelookup(weights, X) # includes warning if NA } else if(is.function(weights)) { weights <- weights(X$x, X$y) } else if(is.expression(weights)) { #' evaluate expression in data frame of coordinates and marks df <- as.data.frame(X) weights <- try(eval(weights, envir=df, enclos=parent)) if(inherits(weights, "try-error")) stop("Unable to evaluate expression for weights", call.=FALSE) if(length(weights) == 0) return(NULL) } else stop(paste("Argument 'weights' should be", "a numeric vector, a function, an image,", "or an expression"), call.=FALSE) check.nvector(weights, nX) return(weights) } spatstat/R/nearestsegment.R0000644000176200001440000000506113333543255015475 0ustar liggesusers# # nearestsegment.R # # $Revision: 1.12 $ $Date: 2018/03/07 01:56:36 $ # # Given a point pattern X and a line segment pattern Y, # for each point x of X, determine which segment of Y is closest to x # and find the point on Y closest to x. # nearestsegment <- function(X,Y) { return(ppllengine(X,Y,"identify")) } project2segment <- function(X, Y) { return(ppllengine(X,Y,"project")) } ppllengine <- function(X, Y, action="project", check=FALSE) { stopifnot(is.ppp(X)) stopifnot(is.psp(Y)) stopifnot(action %in% c("distance", "identify", "project")) # deal with empty patterns if(X$n == 0) { nowt <- numeric(0) none <- integer(0) switch(action, identify = return(none), distance = return(list(dist=nowt, which=none)), project = return(list(Xproj=X, mapXY=none, d=nowt, tp=nowt))) } if(Y$n == 0) stop("Segment pattern Y contains 0 segments; projection undefined") # XX <- as.matrix(as.data.frame(unmark(X))) YY <- as.matrix(as.data.frame(unmark(Y))) # determine which segment lies closest to each point huge <- max(diameter(as.rectangle(as.owin(X))), diameter(as.rectangle(as.owin(Y)))) d <- distppllmin(XX, YY, huge^2) mapXY <- d$min.which if(action == "identify") return(mapXY) else if(action == "distance") return(data.frame(dist=d$min.d, which=mapXY)) # combine relevant rows of data alldata <- as.data.frame(cbind(XX, YY[mapXY, ,drop=FALSE])) colnames(alldata) <- c("x", "y", "x0", "y0", "x1", "y1") # coordinate geometry dx <- with(alldata, x1-x0) dy <- with(alldata, y1-y0) leng <- sqrt(dx^2 + dy^2) # rotation sines & cosines (may include 0/0) co <- dx/leng si <- dy/leng # vector to point from first endpoint of segment xv <- with(alldata, x - x0) yv <- with(alldata, y - y0) # rotate coordinate system so that x axis is parallel to line segment xpr <- xv * co + yv * si # ypr <- - xv * si + yv * co # determine whether projection is an endpoint or interior point of segment ok <- is.finite(xpr) left <- !ok | (xpr <= 0) right <- ok & (xpr >= leng) # location of projected point in rotated coordinates xr <- with(alldata, ifelseAX(left, 0, ifelseXY(right, leng, xpr))) # back to standard coordinates xproj <- with(alldata, x0 + ifelseXB(ok, xr * co, 0)) yproj <- with(alldata, y0 + ifelseXB(ok, xr * si, 0)) Xproj <- ppp(xproj, yproj, window=X$window, marks=X$marks, check=check) # parametric coordinates tp <- xr/leng tp[!is.finite(tp)] <- 0 # return(list(Xproj=Xproj, mapXY=mapXY, d=d$min.d, tp=tp)) } spatstat/R/bw.pcf.R0000644000176200001440000001372213544361606013635 0ustar liggesusers#' #' bw.pcf.R #' #' $Revision: 1.5 $ $Date: 2019/09/30 07:51:52 $ #' #' bandwidth selection for pcf #' with least-squares cross-validation method #' #' Original code by: Rasmus Waagepetersen and Abdollah Jalilian #' #' References: #' Guan, Y. (2007). A composite likelihood cross-validation approach in #' selecting bandwidth for the estimation of the pair correlation function. #' Scandinavian Journal of Statistics, 34(2), 336--346. #' DOI: http://doi.org/10.1111/j.1467-9469.2006.00533.x #' Guan, Y. (2007). A least-squares cross-validation bandwidth #' selection approach in pair correlation function estimations. #' Statistics & Probability Letters, 77(18), 1722--1729. #' DOI: http://doi.org/10.1016/j.spl.2007.04.016 bw.pcf <- function(X, rmax=NULL, lambda=NULL, divisor="r", kernel="epanechnikov", nr=10000, bias.correct=TRUE, cv.method=c("compLik", "leastSQ"), simple=TRUE, srange=NULL, ..., verbose=FALSE, warn=TRUE) { stopifnot(is.ppp(X)) X <- unmark(X) win <- Window(X) areaW <- area(win) nX <- npoints(X) cv.method <- match.arg(cv.method) kernel <- match.kernel(kernel) #' maximum distance lag: rmax if (is.null(rmax)) rmax <- rmax.rule("K", win, nX/areaW) if(is.null(srange)) srange <- c(0, rmax/4) #' number of subintervals for discretization of [0, rmax]: nr #' length of subintervals discr <- rmax / nr #' breaks of subintervals rs <- seq(0, rmax, length.out= nr + 1) #' closepairs distances: \\ u - v \\ #' Pre-compute close pair distances for use in 'pcf' #' we need close pairs up to a distance rmax + smax #' where 'smax' is the maximum halfwidth of the support of the kernel smax <- srange[2] * (if(kernel == "gaussian") 2 else kernel.factor(kernel)) cpfull <- closepairs(X, rmax + smax, what="all", twice=TRUE) #' For cross-validation, restrict close pairs to distance rmax ok <- (cpfull$d <= rmax) cp <- lapply(cpfull, "[", i=ok) ds <- cp$d #' determining closepairs distances are in which subinterval idx <- round(ds / discr) + 1L idx <- pmin.int(idx, nr+1L) #' translation edge correction factor: /W|/|W \cap W_{u-v}| edgewt <- edge.Trans(dx=cp$dx, dy=cp$dy, W=win, paired=TRUE) if(homogeneous <- is.null(lambda)) { #' homogeneous case lambda <- nX/areaW lambda2area <- lambda^2 * areaW pcfargs <- list(X=X, r=rs, divisor=divisor, kernel=kernel, correction="translate", close=cpfull) renorm.factor <- 1 } else { # inhomogeneous case: lambda is assumed to be a numeric vector giving # the intensity at the points of the point pattern X check.nvector(lambda, nX) lambda2area <- lambda[cp$i] * lambda[cp$j] * areaW pcfargs <- list(X=X, lambda=lambda, r=rs, divisor=divisor, kernel=kernel, correction="translate", close=cpfull) renorm.factor <- (areaW/sum(1/lambda)) } stuff <- list(cv.method=cv.method, kernel=kernel, homogeneous=homogeneous, bias.correct=bias.correct, simple = simple, discr=discr, rs=rs, cp=cp, ds=ds, idx=idx, edgewt=edgewt, pcfargs=pcfargs, lambda=lambda, lambda2area=lambda2area, renorm.factor=renorm.factor, show=verbose) stuff <- list2env(stuff) #' find optimum bandwidth z <- optimizeWithTrace(CVforPCF, srange, maximum=TRUE, stuff=stuff) #' pack up ox <- order(z$x) sigma <- z$x[ox] cv <- z$y[ox] criterion <- switch(cv.method, compLik = "composite likelihood cross-validation", leastSQ = "least squares cross-validation") result <- bw.optim(cv, sigma, which.max(cv), criterion = criterion, warnextreme=warn, hargnames=c("rmax", "srange"), unitname=unitname(X)) return(result) } CVforPCF <- function(bw, stuff) { stuff$bw <- bw with(stuff, { if(show) splat("bw=", bw) #' values of pair correlation at breaks of subintervals a <- append(pcfargs, list(bw=bw)) grs <- if(homogeneous) do.call(pcf.ppp, a) else do.call(pcfinhom, a) grs <- grs$trans #' bias correction if (bias.correct) { grs <- grs / pkernel(rs, kernel, 0, bw) dcorrec <- pkernel(ds, kernel, 0, bw) } else { dcorrec <- 1 } #' make sure that the estimated pair correlation at origin is finite if (!is.finite(grs[1])) grs[1] <- grs[2] #' approximate the pair correlation values at closepairs distances gds <- grs[idx] wt <- edgewt / (2 * pi * ds * lambda2area * dcorrec) * renorm.factor #' remove pairs to approximate the cross-validation term: g^{-(u, v)} if (simple) { gds <- gds - 2 * wt * dkernel(0, kernel, 0, bw) } else { cpi <- cp$i cpj <- cp$j for (k in 1:length(ds)) { exclude <- (cpi == cpi[k]) | (cpj == cpj[k]) gds[k] <- gds[k] - 2 * sum(wt[exclude] * dkernel(ds[k] - ds[exclude], kernel, 0, bw)) } } #' remove negative and zero values gds <- pmax.int(.Machine$double.eps, gds) switch(cv.method, compLik={ #' composite likelihood cross-validation #' the integral term: 2 \pi \int_{0}^{rmax} \hat g(r) r dr normconst <- 2 * pi * sum(grs * rs) * discr value <- mean(log(gds)) - log(normconst) }, leastSQ={ #' least squares cross-validation #' the integral term: 2 \pi \int_{0}^{rmax} \hat g^2(r) r dr normconst <- 2 * pi * sum(grs^2 * rs) * discr value <- 2 * sum(gds * edgewt / (lambda2area)) - normconst }, stop("Unrecognised cross-validation method")) if(show) splat("value=", value) return(value) }) } spatstat/R/localpcf.R0000644000176200001440000001336513503620200014223 0ustar liggesusers# # localpcf.R # # $Revision: 1.23 $ $Date: 2019/06/23 06:38:19 $ # # localpcf <- function(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15) { if(length(list(...)) > 0) warning("Additional arguments ignored") stopifnot(is.ppp(X)) localpcfengine(X, delta=delta, rmax=rmax, nr=nr, stoyan=stoyan) } localpcfinhom <- function(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15, lambda=NULL, sigma=NULL, varcov=NULL, update=TRUE, leaveoneout=TRUE) { stopifnot(is.ppp(X)) a <- resolve.lambda(X, lambda, ..., sigma=sigma, varcov=varcov, update=update, leaveoneout=leaveoneout) result <- localpcfengine(X, ..., delta=delta, rmax=rmax, nr=nr, stoyan=stoyan, lambda=a$lambda) if(a$danger) attr(result, "dangerous") <- a$dangerous return(result) } localpcfengine <- function(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15, lambda=NULL) { m <- localpcfmatrix(X, delta=delta, rmax=rmax, nr=nr, stoyan=stoyan, lambda=lambda) r <- attr(m, "r") delta <- attr(m, "delta") nX <- npoints(X) if(nX == 0) { df <- data.frame(r=r, theo=rep.int(1, length(r))) nama <- desc <- labl <- NULL } else { # border correction dbord <- bdist.points(X) m[r[row(m)] > dbord[col(m)]] <- NA # df <- data.frame(m, r=r, theo=rep.int(1, length(r))) icode <- unlist(lapply(seq_len(nX), numalign, nmax=nX)) nama <- paste("est", icode, sep="") desc <- paste("estimate of %s for point", icode) labl <- paste("%s[", icode, "](r)", sep="") } names(df) <- c(nama, "r", "theo") desc <- c(desc, "distance argument r", "theoretical Poisson %s") labl <- c(labl, "r", "%s[pois](r)") # create fv object g <- fv(df, "r", quote(localg(r)), "theo", , c(0, max(r)), labl, desc, fname="localg") # default is to display them all formula(g) <- . ~ r fvnames(g, ".") <- names(df)[names(df) != "r"] unitname(g) <- unitname(X) attr(g, "delta") <- delta attr(g, "correction") <- "border" return(g) } localpcfmatrix <- function(X, i=seq_len(npoints(X)), ..., lambda = NULL, delta=NULL, rmax=NULL, nr=512, stoyan=0.15) { missi <- missing(i) weighted <- !is.null(lambda) nX <- npoints(X) nY <- if(missi) nX else length(seq_len(nX)[i]) W <- as.owin(X) lambda.ave <- nX/area(W) if(is.null(delta)) delta <- stoyan/sqrt(lambda.ave) if(is.null(rmax)) rmax <- rmax.rule("K", W, lambda.ave) # if(nX == 0 || nY == 0) { out <- matrix(0, nr, 0) } else { # sort points in increasing order of x coordinate oX <- fave.order(X$x) Xsort <- X[oX] idXsort <- (1:nX)[oX] if(weighted) { lambdaXsort <- lambda[oX] weightXsort <- 1/lambdaXsort } if(missi) { Y <- X oY <- oX Ysort <- Xsort idYsort <- idXsort } else { # i is some kind of index Y <- X[i] idY <- (1:nX)[i] oY <- fave.order(Y$x) Ysort <- Y[oY] idYsort <- idY[oY] } nY <- npoints(Y) force(nr) # call C if(!weighted) { zz <- .C("locpcfx", nn1 = as.integer(nY), x1 = as.double(Ysort$x), y1 = as.double(Ysort$y), id1 = as.integer(idYsort), nn2 = as.integer(nX), x2 = as.double(Xsort$x), y2 = as.double(Xsort$y), id2 = as.integer(idXsort), nnr = as.integer(nr), rmaxi=as.double(rmax), del=as.double(delta), pcf=as.double(double(nr * nY)), PACKAGE = "spatstat") } else { zz <- .C("locWpcfx", nn1 = as.integer(nY), x1 = as.double(Ysort$x), y1 = as.double(Ysort$y), id1 = as.integer(idYsort), nn2 = as.integer(nX), x2 = as.double(Xsort$x), y2 = as.double(Xsort$y), id2 = as.integer(idXsort), w2 = as.double(weightXsort), nnr = as.integer(nr), rmaxi=as.double(rmax), del=as.double(delta), pcf=as.double(double(nr * nY)), PACKAGE = "spatstat") } out <- matrix(zz$pcf, nr, nY) # reorder columns to match original out[, oY] <- out # rescale out <- out/(2 * pi * if(!weighted) lambda.ave else 1) } # dress up attr(out, "r") <- seq(from=0, to=rmax, length.out=nr) attr(out, "delta") <- delta class(out) <- c("localpcfmatrix", class(out)) return(out) } print.localpcfmatrix <- function(x, ...) { cat("Matrix of local pair correlation estimates\n") nc <- ncol(x) nr <- nrow(x) cat(paste("pcf estimates for", nc, ngettext(nc, "point", "points"), "\n")) rval <- attr(x, "r") cat(paste("r values from 0 to", max(rval), "in", nr, "steps\n")) return(invisible(NULL)) } plot.localpcfmatrix <- function(x, ...) { xname <- short.deparse(substitute(x)) rval <- attr(x, "r") do.call(matplot, resolve.defaults(list(rval, x), list(...), list(type="l", main=xname, xlab="r", ylab="pair correlation"))) } "[.localpcfmatrix" <- function(x, i, ...) { r <- attr(x, "r") delta <- attr(x, "delta") class(x) <- "matrix" if(missing(i)) { x <- x[ , ...] } else { x <- x[i, ...] if(is.matrix(i)) return(x) r <- r[i] } if(!is.matrix(x)) x <- matrix(x, nrow=length(r)) attr(x, "r") <- r attr(x, "delta") <- delta class(x) <- c("localpcfmatrix", class(x)) return(x) } spatstat/R/uniquemap.R0000644000176200001440000001173413613216544014460 0ustar liggesusers#' #' uniquemap.R #' #' Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2019 #' Licence: GNU Public Licence >= 2 #' #' $Revision: 1.16 $ $Date: 2020/01/26 03:50:10 $ uniquemap <- function(x) { UseMethod("uniquemap") } uniquemap.default <- function(x) { result <- seqn <- seq_along(x) if(length(x) <= 1) return(result) if(is.atomic(x) && (is.factor(x) || (is.vector(x) && is.numeric(x)))) { if(is.factor(x)) x <- as.integer(x) o <- order(x, seqn) isfirst <- c(TRUE, (diff(x[o]) != 0)) omap <- cumsum(isfirst) result <- seqn result[o] <- o[isfirst][omap] return(result) } dup <- duplicated(x) ux <- x[!dup] mapdup <- match(x[dup], ux) result[dup] <- which(!dup)[mapdup] return(result) } uniquemap.matrix <- function(x) { n <- nrow(x) result <- seqn <- seq_len(n) if(n <= 1) return(result) #' faster algorithms for special cases nc <- ncol(x) if(nc == 1L) return(uniquemap(x[,1])) if(is.numeric(x)) { if(nc == 2L) { oo <- order(x[,1], x[,2], seqn) xx <- x[oo, , drop=FALSE] isfirst <- c(TRUE, (diff(xx[,1]) != 0) | (diff(xx[,2]) != 0)) } else { ## y <- asplit(x, 2) would require R 3.6.0 y <- split(as.vector(x), factor(as.vector(col(x)), levels=1:nc)) oo <- do.call(order, append(unname(y), list(seqn))) xx <- x[oo, , drop=FALSE] isfirst <- c(TRUE, matrowany(apply(xx, 2, diff) != 0)) } uniqueids <- seqn[oo][isfirst] lastunique <- cumsum(isfirst) result[oo] <- uniqueids[lastunique] return(result) } #' non-numeric matrix e.g. character if(!anyDuplicated(x)) return(result) dup <- duplicated(x) uni <- which(!dup) for(j in which(dup)) { for(i in uni[uni < j]) { if(IdenticalRowPair(i, j, x)) { result[j] <- i break } } } return(result) } uniquemap.data.frame <- function(x) { n <- nrow(x) result <- seqn <- seq_len(n) if(n <= 1) return(result) #' faster algorithms for special cases nc <- ncol(x) if(nc == 1L) return(uniquemap(x[,1])) if(all(sapply(x, is.numeric))) { if(nc == 2L) { oo <- order(x[,1], x[,2], seqn) xx <- x[oo, , drop=FALSE] isfirst <- c(TRUE, (diff(xx[,1]) != 0) | (diff(xx[,2]) != 0)) } else { oo <- do.call(order, append(unname(as.list(x)), list(seqn))) xx <- x[oo, , drop=FALSE] isfirst <- c(TRUE, matrowany(apply(xx, 2, diff) != 0)) } uniqueids <- seqn[oo][isfirst] lastunique <- cumsum(isfirst) result[oo] <- uniqueids[lastunique] return(result) } #' general case if(!anyDuplicated(x)) return(result) dup <- duplicated(x) uni <- which(!dup) for(j in which(dup)) { for(i in uni[uni < j]) { if(IdenticalRowPair(i, j, x)) { result[j] <- i break } } } return(result) } uniquemap.ppp <- function(x) { n <- npoints(x) seqn <- seq_len(n) if(n <= 1) return(seqn) marx <- marks(x) switch(markformat(marx), none = { useC <- TRUE }, vector = { #' convert to integers if possible if(is.integer(marx) || is.factor(marx)) { marx <- as.integer(marx) useC <- TRUE } else { um <- unique(marx) if(length(um) <= 2^30) { marx <- match(marx, um) useC <- TRUE } else { useC <- FALSE } } }, { useC <- FALSE }) if(!useC) { #' first find duplicated spatial coordinates u <- uniquemap(unmark(x)) #' add marks df <- cbind(data.frame(ind=seqn, uni=u), as.data.frame(marx)) bb <- split(df, factor(u)) #' consider each set of duplicated locations for(b in bb) { #' find unique rows of marks, as a list mrows <- lapply(seq_len(nrow(b)), function(i) b[i, -(1:2)]) um <- unique(mrows) #' match other rows to them ma <- match(mrows, um) #' map to original index u[b$ind] <- b$ind[ma] } return(u) } #' unmarked or integer/factor marked xx <- x$x yy <- x$y o <- order(xx, seqn) if(is.null(marx)) { umap <- .C("uniqmapxy", n=as.integer(n), x=as.double(xx[o]), y=as.double(yy[o]), uniqmap=as.integer(integer(n)), PACKAGE="spatstat")$uniqmap } else { #' marks are (converted to) integers umap <- .C("uniqmap2M", n=as.integer(n), x=as.double(xx[o]), y=as.double(yy[o]), marks=as.integer(marx[o]), uniqmap=as.integer(integer(n)), PACKAGE="spatstat")$uniqmap } nodup <- (umap == 0) umap[nodup] <- which(nodup) result <- integer(n) result[o] <- o[umap] return(result) } uniquemap.lpp <- function(x) { n <- npoints(x) if(n <= 1 || !anyDuplicated(as.ppp(x))) return(seq_len(n)) result <- uniquemap(as.data.frame(x)) return(result) } uniquemap.ppx <- function(x) { uniquemap(as.data.frame(x)) } spatstat/R/units.R0000644000176200001440000001344713433151224013612 0ustar liggesusers# # Functions for extracting and setting the name of the unit of length # # $Revision: 1.29 $ $Date: 2019/02/20 03:34:50 $ # # unitname <- function(x) { UseMethod("unitname") } unitname.owin <- function(x) { u <- as.unitname(x$units) return(u) } unitname.ppp <- function(x) { u <- as.unitname(x$window$units) return(u) } unitname.im <- function(x) { u <- as.unitname(x$units) return(u) } unitname.default <- function(x) { return(as.unitname(attr(x, "units"))) } "unitname<-" <- function(x, value) { UseMethod("unitname<-") } "unitname<-.owin" <- function(x, value) { x$units <- as.unitname(value) return(x) } "unitname<-.ppp" <- function(x, value) { w <- x$window unitname(w) <- value x$window <- w return(x) } "unitname<-.im" <- function(x, value) { x$units <- as.unitname(value) return(x) } "unitname<-.default" <- function(x, value) { if(is.null(x)) return(x) attr(x, "units") <- as.unitname(value) return(x) } ### class 'unitname' makeunitname <- function(sing="unit", plur="units", mul = 1) { if(!is.character(sing)) stop("In unit name, first entry should be a character string") if(!is.character(plur)) stop("In unit name, second entry should be a character string") mul <- try(as.numeric(mul), silent=TRUE) if(inherits(mul, "try-error")) stop("In unit name, third entry should be a number") if(length(mul) != 1 || mul <= 0) stop("In unit name, third entry should be a single positive number") u <- list(singular=sing, plural=plur, multiplier=mul) if(mul != 1 && (sing=="unit" || plur=="units")) stop(paste("A multiplier is not allowed", "if the unit does not have a specific name")) class(u) <- "unitname" return(u) } as.unitname <- function(s) { if(inherits(s, "unitname")) return(s) s <- as.list(s) n <- length(s) if(n > 3) stop(paste("Unit name should be a character string,", "or a vector/list of 2 character strings,", "or a list(character, character, numeric)")) out <- switch(n+1, makeunitname(), makeunitname(s[[1]], s[[1]]), makeunitname(s[[1]], s[[2]]), makeunitname(s[[1]], s[[2]], s[[3]])) return(out) } print.unitname <- function(x, ...) { mul <- x$multiplier if(mul == 1) cat(paste(x$singular, "/", x$plural, "\n")) else cat(paste(mul, x$plural, "\n")) return(invisible(NULL)) } as.character.unitname <- function(x, ...) { mul <- x$multiplier return(if(mul == 1) x$plural else paste(mul, x$plural)) } is.vanilla <- function(u) { u <- as.unitname(u) z <- (u$singular == "unit") && (u$multiplier == 1) return(z) } summary.unitname <- function(object, ...) { x <- object scaled <- (x$multiplier != 1) named <- (x$singular != "unit") vanilla <- !named && !scaled out <- if(vanilla) { list(legend = NULL, axis = NULL, explain = NULL, singular = "unit", plural = "units") } else if(named & !scaled) { list(legend = paste("Unit of length: 1", x$singular), axis = paren(x$plural, type=spatstat.options('units.paren')), explain = NULL, singular = x$singular, plural = x$plural) } else { expanded <- paste(x$multiplier, x$plural) expla <- paren(paste("one unit =", expanded), type=spatstat.options('units.paren')) list(legend = paste("Unit of length:", expanded), axis = expla, explain = expla, singular = "unit", plural = "units") } out <- append(out, list(scaled = scaled, named = named, vanilla = vanilla)) class(out) <- "summary.unitname" return(out) } print.summary.unitname <- function(x, ...) { if(x$vanilla) cat("Unit of length (unnamed)\n") else cat(paste(x$legend, "\n")) invisible(NULL) } compatible.unitname <- function(A, B, ..., coerce=TRUE) { A <- as.unitname(A) if(missing(B)) return(TRUE) B <- as.unitname(B) # check for null units Anull <- summary(A)$vanilla Bnull <- summary(B)$vanilla # `coerce' determines whether `vanilla' units are compatible with other units coerce <- as.logical(coerce) # agree <- if(!Anull && !Bnull) isTRUE(all.equal(A,B)) else if(Anull && Bnull) TRUE else coerce # if(!agree) return(FALSE) # A and B agree if(length(list(...)) == 0) return(TRUE) # recursion return(compatible.unitname(B, ...)) } harmonize.unitname <- harmonise.unitname <- function(..., coerce=TRUE, single=FALSE) { argh <- list(...) n <- length(argh) if(n == 0) return(NULL) u <- lapply(argh, as.unitname) if(n == 1) return(if(single) u[[1L]] else u) if(coerce) { #' vanilla units are compatible with another unit s <- lapply(u, summary) v <- sapply(s, getElement, name="vanilla") if(all(v)) return(if(single) u[[1L]] else u) u <- u[!v] } z <- unique(u) if(length(z) > 1) stop("Unitnames are incompatible", call.=FALSE) if(single) return(z[[1]]) z <- rep(z, n) names(z) <- names(argh) return(z) } # class 'numberwithunit': numeric value(s) with unit of length numberwithunit <- function(x, u) { u <- as.unitname(u) x <- as.numeric(x) unitname(x) <- u class(x) <- c(class(x), "numberwithunit") return(x) } "%unit%" <- function(x, u) { numberwithunit(x, u) } format.numberwithunit <- function(x, ..., collapse=" x ", modifier=NULL) { u <- summary(unitname(x)) uname <- if(all(x == 1)) u$singular else u$plural y <- format(as.numeric(x), ...) z <- pasteN(paste(y, collapse=collapse), modifier, uname, u$explain) return(z) } as.character.numberwithunit <- function(x, ...) { return(format(x)) } print.numberwithunit <- function(x, ...) { cat(format(x, ...), fill=TRUE) return(invisible(NULL)) } spatstat/R/Kmeasure.R0000644000176200001440000004315113556771766014250 0ustar liggesusers# # Kmeasure.R # # $Revision: 1.70 $ $Date: 2019/11/01 08:56:38 $ # # Kmeasure() compute an estimate of the second order moment measure # # Kest.fft() use Kmeasure() to form an estimate of the K-function # # second.moment.calc() underlying algorithm # # second.moment.engine() underlying underlying algorithm! # Kmeasure <- function(X, sigma, edge=TRUE, ..., varcov=NULL) { stopifnot(is.ppp(X)) sigma.given <- !missing(sigma) && !is.null(sigma) varcov.given <- !is.null(varcov) ngiven <- sigma.given + varcov.given if(ngiven == 2) stop(paste("Give only one of the arguments", sQuote("sigma"), "and", sQuote("varcov"))) if(ngiven == 0) stop(paste("Please specify smoothing bandwidth", sQuote("sigma"), "or", sQuote("varcov"))) if(varcov.given) { stopifnot(is.matrix(varcov) && nrow(varcov) == 2 && ncol(varcov)==2 ) sigma <- NULL } else { stopifnot(is.numeric(sigma)) stopifnot(length(sigma) %in% c(1,2)) stopifnot(all(sigma > 0)) if(length(sigma) == 2) { varcov <- diag(sigma^2) sigma <- NULL } } second.moment.calc(x=X, sigma=sigma, edge=edge, what="Kmeasure", varcov=varcov, ...) } second.moment.calc <- function(x, sigma=NULL, edge=TRUE, what=c("Kmeasure", "kernel", "smooth", "Bartlett", "edge", "smoothedge", "all"), ..., varcov=NULL, expand=FALSE, obswin, npts=NULL, debug=FALSE) { if(is.null(sigma) && is.null(varcov)) stop("must specify sigma or varcov") obswin.given <- !missing(obswin) what <- match.arg(what) sig <- if(!is.null(sigma)) sigma else max(c(diag(varcov), sqrt(det(varcov)))) xtype <- if(is.ppp(x)) "ppp" else if(is.im(x)) "im" else if(inherits(x, "imlist")) "imlist" else if(all(sapply(x, is.im))) "imlist" else stop("x should be a point pattern or a pixel image") nimages <- switch(xtype, ppp = 1, im = 1, imlist = length(x)) win <- if(nimages == 1) as.owin(x) else as.owin(x[[1]]) win <- rescue.rectangle(win) rec <- as.rectangle(win) across <- min(diff(rec$xrange), diff(rec$yrange)) # determine whether to expand window if(!expand || (6 * sig < across)) { if(!obswin.given) obswin <- NULL result <- second.moment.engine(x, sigma=sigma, edge=edge, what=what, debug=debug, ..., obswin=obswin, npts=npts, varcov=varcov) return(result) } #' need to expand window wid <- (7 * sig - across)/2 bigger <- grow.rectangle(rec, wid) switch(xtype, ppp = { # pixellate first (to preserve pixel resolution) X <- pixellate(x, ..., padzero=TRUE) np <- npoints(x) }, im = { X <- x np <- NULL }, imlist = { X <- x np <- NULL }) # Now expand if(nimages == 1) { X <- rebound.im(X, bigger) X <- na.handle.im(X, 0) } else { X <- lapply(X, rebound.im, rect=bigger) X <- lapply(X, na.handle.im, na.replace=0) } ## handle override arguments ow <- if(obswin.given) obswin else win # may be NULL if given if(!is.null(npts)) np <- npts ## Compute! out <- second.moment.engine(X, sigma=sigma, edge=edge, what=what, debug=debug, ..., obswin=ow, varcov=varcov, npts=np) # Now clip it fbox <- shift(rec, origin="midpoint") if(nimages == 1) { result <- switch(what, kernel = out[fbox], smooth = out[win], Kmeasure = out[fbox], Bartlett = out[fbox], edge = out[win], smoothedge = list(smooth=out$smooth[win], edge =out$edge[win]), all = list(kernel=out$kernel[fbox], smooth=out$smooth[win], Kmeasure=out$Kmeasure[fbox], Bartlett=out$Bartlett[fbox], edge=out$edge[win])) } else { result <- switch(what, kernel = out[fbox], smooth = lapply(out, "[", i=win), Kmeasure = lapply(out, "[", i=fbox), Bartlett = lapply(out, "[", i=fbox), edge = out[win], smoothedge = list( smooth = lapply(out$smooth, "[", i=win), edge = out$edge[win]), all = list( kernel=out$kernel[fbox], smooth=lapply(out$smooth, "[", i=win), Kmeasure=lapply(out$Kmeasure, "[", i=fbox), Bartlett=lapply(out$Bartlett, "[", i=fbox), edge=out$edge[win])) } return(result) } second.moment.engine <- function(x, sigma=NULL, edge=TRUE, what=c("Kmeasure", "kernel", "smooth", "Bartlett", "edge", "smoothedge", "all"), ..., kernel="gaussian", scalekernel=is.character(kernel), obswin = as.owin(x), varcov=NULL, npts=NULL, debug=FALSE) { what <- match.arg(what) validate2Dkernel(kernel) obswin.given <- !missing(obswin) && !is.null(obswin) is.second.order <- what %in% c("Kmeasure", "Bartlett", "all") needs.kernel <- what %in% c("kernel", "all", "Kmeasure") returns.several <- what %in% c("all", "smoothedge") # check whether Fastest Fourier Transform in the West is available west <- fftwAvailable() if(returns.several) result <- list() # several results will be returned in a list if(is.ppp(x)) { # convert list of points to mass distribution X <- pixellate(x, ..., padzero=TRUE) if(is.null(npts)) npts <- npoints(x) } else X <- x if(is.im(X)) { Xlist <- list(X) nimages <- 1 } else if(all(unlist(lapply(X, is.im)))) { Xlist <- X X <- Xlist[[1]] nimages <- length(Xlist) blanklist <- vector(mode="list", length=nimages) names(blanklist) <- names(Xlist) } else stop("internal error: unrecognised format for x") unitsX <- unitname(X) xstep <- X$xstep ystep <- X$ystep ## ensure obswin has same bounding frame as X if(!obswin.given) { obswin <- Window(X) } else if(!identical(Frame(obswin), Frame(X))) { obswin <- rebound.owin(obswin, as.rectangle(X)) } # go to work Y <- X$v Ylist <- lapply(Xlist, getElement, name="v") # pad with zeroes nr <- nrow(Y) nc <- ncol(Y) Ypad <- matrix(0, ncol=2*nc, nrow=2*nr) Ypadlist <- rep(list(Ypad), nimages) for(i in 1:nimages) Ypadlist[[i]][1:nr, 1:nc] <- Ylist[[i]] Ypad <- Ypadlist[[1]] lengthYpad <- 4 * nc * nr # corresponding coordinates xcol.pad <- X$xcol[1] + xstep * (0:(2*nc-1)) yrow.pad <- X$yrow[1] + ystep * (0:(2*nr-1)) # compute kernel and its Fourier transform if(!needs.kernel && identical(kernel, "gaussian") && is.numeric(sigma) && (length(sigma) == 1) && spatstat.options('developer')) { # compute Fourier transform of kernel directly (*experimental*) ii <- c(0:(nr-1), nr:1) jj <- c(0:(nc-1), nc:1) zz <- -sigma^2 * pi^2/2 uu <- exp(zz * ii^2) vv <- exp(zz * jj^2) fK <- outer(uu, vv, "*") } else { # set up kernel xcol.ker <- xstep * c(0:(nc-1),-(nc:1)) yrow.ker <- ystep * c(0:(nr-1),-(nr:1)) #' kerpixarea <- xstep * ystep if(identical(kernel, "gaussian")) { if(!is.null(sigma)) { densX.ker <- dnorm(xcol.ker, sd=sigma) densY.ker <- dnorm(yrow.ker, sd=sigma) #' WAS: Kern <- outer(densY.ker, densX.ker, "*") * kerpixarea Kern <- outer(densY.ker, densX.ker, "*") Kern <- Kern/sum(Kern) } else if(!is.null(varcov)) { ## anisotropic kernel Sinv <- solve(varcov) halfSinv <- Sinv/2 #' WAS: #' detSigma <- det(varcov) #' constker <- kerpixarea/(2 * pi * sqrt(detSigma)) xsq <- matrix((xcol.ker^2)[col(Ypad)], ncol=2*nc, nrow=2*nr) ysq <- matrix((yrow.ker^2)[row(Ypad)], ncol=2*nc, nrow=2*nr) xy <- outer(yrow.ker, xcol.ker, "*") #' WAS: Kern <- constker * exp(.... Kern <- exp(-(xsq * halfSinv[1,1] + xy * (halfSinv[1,2]+halfSinv[2,1]) + ysq * halfSinv[2,2])) Kern <- Kern/sum(Kern) } else stop("Must specify either sigma or varcov") } else { ## non-Gaussian kernel ## evaluate kernel at array of points xker <- as.vector(xcol.ker[col(Ypad)]) yker <- as.vector(yrow.ker[row(Ypad)]) #' WAS: Kern <- kerpixarea * evaluate2Dkernel(... Kern <- evaluate2Dkernel(kernel, xker, yker, sigma=sigma, varcov=varcov, scalekernel=scalekernel, ...) if(!all(ok <- is.finite(Kern))) { if(anyNA(Kern)) stop("kernel function produces NA values") if(any(is.nan(Kern))) stop("kernel function produces NaN values") ra <- range(Kern[ok]) Kern[Kern == Inf] <- ra[2] Kern[Kern == -Inf] <- ra[1] } Kern <- matrix(Kern, ncol=2*nc, nrow=2*nr) Kern <- Kern/sum(Kern) } if(what %in% c("kernel", "all")) { ## kernel will be returned ## first rearrange it into spatially sensible order (monotone x and y) rtwist <- ((-nr):(nr-1)) %% (2 * nr) + 1 ctwist <- (-nc):(nc-1) %% (2*nc) + 1 if(debug) { if(any(fave.order(xcol.ker) != rtwist)) splat("something round the twist") } Kermit <- Kern[ rtwist, ctwist] ker <- im(Kermit, xcol.ker[ctwist], yrow.ker[ rtwist], unitname=unitsX) if(what == "kernel") return(ker) else result$kernel <- ker } ## convolve using fft fK <- fft2D(Kern, west=west) } if(what != "edge") { if(nimages == 1) { fY <- fft2D(Ypad, west=west) sm <- fft2D(fY * fK, inverse=TRUE, west=west)/lengthYpad if(debug) { splat("smooth: maximum imaginary part=", signif(max(Im(sm)),3)) if(!is.null(npts)) splat("smooth: mass error=", signif(sum(Mod(sm))-npts,3)) } } else { fYlist <- smlist <- blanklist for(i in 1:nimages) { fYlist[[i]] <- fY.i <- fft2D(Ypadlist[[i]], west=west) smlist[[i]] <- sm.i <- fft2D(fY.i * fK, inverse=TRUE, west=west)/lengthYpad if(debug) { splat("smooth component", i, ": maximum imaginary part=", signif(max(Im(sm.i)),3)) if(!is.null(npts)) splat("smooth component", i, ": mass error=", signif(sum(Mod(sm.i))-npts,3)) } } } } if(what %in% c("smooth", "all", "smoothedge")) { # compute smoothed point pattern without edge correction if(nimages == 1) { smo <- im(Re(sm)[1:nr, 1:nc], xcol.pad[1:nc], yrow.pad[1:nr], unitname=unitsX) if(what == "smooth") { return(smo) } else { result$smooth <- smo } } else { smolist <- blanklist for(i in 1:nimages) smolist[[i]] <- im(Re(smlist[[i]])[1:nr, 1:nc], xcol.pad[1:nc], yrow.pad[1:nr], unitname=unitsX) smolist <- as.solist(smolist) if(what == "smooth") { return(smolist) } else { result$smooth <- smolist } } } if(is.second.order) { # compute Bartlett spectrum if(nimages == 1) { bart <- BartCalc(fY, fK) ## bart <- Mod(fY)^2 * fK } else { bartlist <- lapply(fYlist, BartCalc, fK=fK) } } if(what %in% c("Bartlett", "all")) { # Bartlett spectrum will be returned # rearrange into spatially sensible order (monotone x and y) rtwist <- ((-nr):(nr-1)) %% (2 * nr) + 1 ctwist <- (-nc):(nc-1) %% (2*nc) + 1 if(nimages == 1) { Bart <- bart[ rtwist, ctwist] Bartlett <- im(Mod(Bart),(-nc):(nc-1), (-nr):(nr-1)) if(what == "Bartlett") return(Bartlett) else result$Bartlett <- Bartlett } else { Bartlist <- blanklist for(i in 1:nimages) { Bart <- (bartlist[[i]])[ rtwist, ctwist] Bartlist[[i]] <- im(Mod(Bart),(-nc):(nc-1), (-nr):(nr-1)) } Bartlist <- as.solist(Bartlist) if(what == "Bartlett") return(Bartlist) else result$Bartlett <- Bartlist } } #### ------- Second moment measure -------------- # if(is.second.order) { if(nimages == 1) { mom <- fft2D(bart, inverse=TRUE, west=west)/lengthYpad if(debug) { splat("2nd moment measure: maximum imaginary part=", signif(max(Im(mom)),3)) if(!is.null(npts)) splat("2nd moment measure: mass error=", signif(sum(Mod(mom))-npts^2, 3)) } mom <- Mod(mom) # subtract (delta_0 * kernel) * npts if(is.null(npts)) stop("Internal error: second moment measure requires npts") mom <- mom - npts* Kern } else { momlist <- blanklist for(i in 1:nimages) { mom.i <- fft2D(bartlist[[i]], inverse=TRUE, west=west)/lengthYpad if(debug) { splat("2nd moment measure: maximum imaginary part=", signif(max(Im(mom.i)),3)) if(!is.null(npts)) splat("2nd moment measure: mass error=", signif(sum(Mod(mom.i))-npts^2, 3)) } mom.i <- Mod(mom.i) # subtract (delta_0 * kernel) * npts if(is.null(npts)) stop("Internal error: second moment measure requires npts") mom.i <- mom.i - npts* Kern momlist[[i]] <- mom.i } } } # edge correction if(edge || what %in% c("edge", "all", "smoothedge")) { M <- as.mask(obswin, xy=list(x=X$xcol, y=X$yrow))$m # previous line ensures M has same dimensions and scale as Y Mpad <- matrix(0, ncol=2*nc, nrow=2*nr) Mpad[1:nr, 1:nc] <- M lengthMpad <- 4 * nc * nr fM <- fft2D(Mpad, west=west) if(edge && is.second.order) { # compute kernel-smoothed set covariance # apply edge correction co <- fft2D(Mod(fM)^2 * fK, inverse=TRUE, west=west)/lengthMpad co <- Mod(co) a <- sum(M) wt <- a/co me <- spatstat.options("maxedgewt") weight <- matrix(pmin.int(me, wt), ncol=2*nc, nrow=2*nr) # apply edge correction to second moment measure if(nimages == 1) { mom <- mom * weight # set to NA outside 'reasonable' region mom[wt > 10] <- NA } else { wgt10 <- (wt > 10) for(i in 1:nimages) { mom.i <- momlist[[i]] mom.i <- mom.i * weight # set to NA outside 'reasonable' region mom.i[wgt10] <- NA momlist[[i]] <- mom.i } } } } if(is.second.order) { # rearrange second moment measure # into spatially sensible order (monotone x and y) rtwist <- ((-nr):(nr-1)) %% (2 * nr) + 1 ctwist <- (-nc):(nc-1) %% (2*nc) + 1 if(nimages == 1) { mom <- mom[ rtwist, ctwist] } else { momlist <- lapply(momlist, "[", i=rtwist, j=ctwist) } if(debug) { if(any(fave.order(xcol.ker) != rtwist)) splat("internal error: something round the twist") } } if(what %in% c("edge", "all", "smoothedge")) { # return convolution of window with kernel # (evaluated inside window only) con <- fft2D(fM * fK, inverse=TRUE, west=west)/lengthMpad edg <- Mod(con[1:nr, 1:nc]) edg <- im(edg, xcol.pad[1:nc], yrow.pad[1:nr], unitname=unitsX) if(what == "edge") return(edg) else result$edge <- edg } if(what == "smoothedge") return(result) # Second moment measure, density estimate # Divide by number of points * lambda and convert mass to density pixarea <- xstep * ystep if(nimages == 1) { mom <- mom * area(obswin) / (pixarea * npts^2) # this is the second moment measure mm <- im(mom, xcol.ker[ctwist], yrow.ker[rtwist], unitname=unitsX) if(what == "Kmeasure") return(mm) else result$Kmeasure <- mm } else { ccc <- area(obswin) / (pixarea * npts^2) mmlist <- blanklist for(i in 1:nimages) { mom.i <- momlist[[i]] mom.i <- mom.i * ccc # this is the second moment measure mmlist[[i]] <- im(mom.i, xcol.ker[ctwist], yrow.ker[rtwist], unitname=unitsX) } mmlist <- as.solist(mmlist) if(what == "Kmeasure") return(mmlist) else result$Kmeasure <- mmlist } # what = "all", so return all computed objects return(result) } BartCalc <- function(fY, fK) { Mod(fY)^2 * fK } Kest.fft <- function(X, sigma, r=NULL, ..., breaks=NULL) { verifyclass(X, "ppp") W <- Window(X) lambda <- npoints(X)/area(W) rmaxdefault <- rmax.rule("K", W, lambda) bk <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) breaks <- bk$val rvalues <- bk$r u <- Kmeasure(X, sigma, ...) xx <- rasterx.im(u) yy <- rastery.im(u) rr <- sqrt(xx^2 + yy^2) tr <- whist(rr, breaks, u$v) K <- cumsum(tr) * with(u, xstep * ystep) rmax <- min(rr[is.na(u$v)]) K[rvalues >= rmax] <- NA result <- data.frame(r=rvalues, theo=pi * rvalues^2, border=K) w <- X$window alim <- c(0, min(diff(w$xrange), diff(w$yrange))/4) out <- fv(result, "r", quote(K(r)), "border", . ~ r, alim, c("r", "%s[pois](r)", "hat(%s)[fb](r)"), c("distance argument r", "theoretical Poisson %s", "border-corrected FFT estimate of %s"), fname="K", unitname=unitname(X) ) return(out) } spatstat/R/pointsonlines.R0000644000176200001440000000320513333543255015353 0ustar liggesusers# # pointsonlines.R # # place points at regular intervals along line segments # # $Revision: 1.8 $ $Date: 2018/07/11 05:51:05 $ # pointsOnLines <- function(X, eps=NULL, np=1000, shortok=TRUE) { stopifnot(is.psp(X)) len <- lengths.psp(X) nseg <- length(len) if(is.null(eps)) { stopifnot(is.numeric(np) && length(np) == 1) stopifnot(is.finite(np) && np > 0) eps <- sum(len)/np } else { stopifnot(is.numeric(eps) && length(eps) == 1) stopifnot(is.finite(eps) && eps > 0) } # initialise Xdf <- as.data.frame(X) xmid <- with(Xdf, (x0+x1)/2) ymid <- with(Xdf, (y0+y1)/2) # handle very short segments # allsegs <- 1:nseg if(any(short <- (len <= eps)) && shortok) { # very short segments: use midpoints Z <- data.frame(x = xmid[short], y = ymid[short], seg=which(short), tp=0.5) } else Z <- data.frame(x=numeric(0), y=numeric(0), seg=integer(0), tp=numeric(0)) # handle other segments for(i in (1:nseg)[!short]) { # divide segment into pieces of length eps # with shorter bits at each end leni <- len[i] nwhole <- floor(leni/eps) if(leni/eps - nwhole < 0.5 && nwhole > 2) nwhole <- nwhole - 1 rump <- (leni - nwhole * eps)/2 brks <- c(0, rump + (0:nwhole) * eps, leni) nbrks <- length(brks) # points at middle of each piece ss <- (brks[-1] + brks[-nbrks])/2 tp <- ss/leni x <- with(Xdf, x0[i] + tp * (x1[i]-x0[i])) y <- with(Xdf, y0[i] + tp * (y1[i]-y0[i])) Z <- rbind(Z, data.frame(x=x, y=y, seg=i, tp=tp)) } result <- as.ppp(Z[,c("x","y")], W=X$window) attr(result, "map") <- Z[,c("seg", "tp")] return(result) } spatstat/R/reduceformula.R0000644000176200001440000000627413333543255015315 0ustar liggesusers# # reduceformula.R # # $Revision: 1.7 $ $Date: 2016/12/30 01:44:07 $ # # delete variable from formula # #...................................................... # reduceformula <- function(fmla, deletevar, verbose=FALSE) { ## removes the variable `deletevar' from the formula `fmla' ## returns a simplified formula, or NULL if it can't simplify. stopifnot(inherits(fmla, "formula")) stopifnot(is.character(deletevar) && length(deletevar) == 1) if(!(deletevar %in% all.vars(as.expression(fmla)))) { if(verbose) message(paste("The formula does not involve", dQuote(deletevar), "and is therefore unchanged")) return(fmla) } lhs <- if(length(fmla) < 3) NULL else fmla[[2]] ## create terms object tt <- attributes(terms(fmla)) ## formula.has.intercept <- (tt$intercept == 1) ## extract all variables appearing in the model vars <- as.list(tt$variables)[-1] nvars <- length(vars) varexprs <- lapply(vars, as.expression) varstrings <- sapply(varexprs, paste) ## identify any offsets offs <- tt$offset v.is.offset <- if(!is.null(offs)) (1:nvars) %in% offs else rep(FALSE, nvars) ## remove the response repo <- tt$response if(repo != 0) { vars <- vars[-repo] varstrings <- varstrings[-repo] varexprs <- varexprs[-repo] v.is.offset <- v.is.offset[-repo] } ## a term may be a variable name ## v.is.name <- sapply(vars, is.name) ## a term may be an expression like sin(x), poly(x,y,degree=2) v.args <- lapply(varexprs, all.vars) matches.delete <- lapply(v.args, "==", deletevar) v.has.delete <- sapply(matches.delete, any) v.has.other <- !sapply(matches.delete, all) v.is.mixed <- v.has.delete & v.has.other ## we can't handle mixed terms like sin(x-d), poly(x,d) ## where d is to be deleted. Handling these would require ## knowledge about the functions sin and poly. if(any(v.is.mixed)) { nmixed <- sum(v.is.mixed) if(verbose) message(paste("Don't know how to reduce the", ngettext(nmixed, "term", "terms"), paste(dQuote(varstrings[v.is.mixed]), collapse=","))) return(NULL) } ## OK. We have identified all first order terms to be deleted. condemned.names <- varstrings[v.has.delete] ## Determine the terms of all orders that include these first order terms ## (1) terms with model coefficients fax <- tt$factors if(prod(dim(fax)) == 0) retained.terms <- character(0) else { ## Rows are first order terms condemned.row <- rownames(fax) %in% condemned.names ## Columns are the terms of all orders allterms <- colnames(fax) ## Find all columns containing a 1 in a row that is to be deleted if(any(condemned.row)) { condemned.column <- matcolany(fax[condemned.row, , drop=FALSE] != 0) retained.terms <- allterms[!condemned.column] } else retained.terms <- allterms } ## (2) offsets if any if(any(v.is.offset)) retained.terms <- c(retained.terms, varstrings[v.is.offset & !v.has.delete]) ## (3) intercept forced? if(length(retained.terms) == 0) retained.terms <- "1" ## OK. Cut-and-paste f <- paste(lhs, "~", paste(retained.terms, collapse=" + ")) return(as.formula(f)) } spatstat/R/profilepl.R0000644000176200001440000002761613606002167014452 0ustar liggesusers# # profilepl.R # # $Revision: 1.46 $ $Date: 2020/01/10 03:27:35 $ # # computes profile log pseudolikelihood # profilepl <- local({ ## Determine edge correction ## with partial matching, avoiding collisions with ## other arguments to ppm that have similar names. getppmcorrection <- function(..., correction = "border", covariates = NULL, covfunargs = NULL, control = NULL) { return(correction) } isSingleNA <- function(x) { length(x) == 1 && is.na(x) } profilepl <- function(s, f, ..., aic=FALSE, rbord=NULL, verbose=TRUE, fast=TRUE) { callenv <- parent.frame() s <- as.data.frame(s) n <- nrow(s) stopifnot(is.function(f)) ## validate 's' parms <- names(s) fargs <- names(formals(f)) if(!all(fargs %in% parms)) { bad <- !(fargs %in% parms) forgiven <- sapply(formals(f)[bad], isSingleNA) if(!all(forgiven)) { slecht <- fargs[bad[!forgiven]] nsl <- length(slecht) stop(paste(ngettext(nsl, "Argument", "Arguments"), commasep(sQuote(slecht)), ngettext(nsl, "is", "are"), "not provided in the data frame s")) } } ## extra columns in 's' are assumed to be parameters of covariate functions is.farg <- parms %in% fargs pass.cfa <- any(!is.farg) got.cfa <- "covfunargs" %in% names(list(...)) if(pass.cfa && got.cfa) stop("Some columns in s are superfluous") ## criterion <- numeric(n) ## make a fake call pseudocall <- match.call() pseudocall[[1]] <- as.symbol("ppm") namcal <- names(pseudocall) ## remove arguments 's' and 'verbose' retain <- !(namcal %in% c("s", "verbose")) pseudocall <- pseudocall[retain] namcal <- namcal[retain] ## place 'f' argument third np <- length(pseudocall) fpos <- (1:np)[namcal == "f"] indices <- (1:np)[-fpos] if(length(indices) < 3) { indices <- c(indices, fpos) } else { indices <- c(indices[1:3], fpos, indices[-(1:3)]) } pseudocall <- pseudocall[indices] namcal <- names(pseudocall) namcal[namcal=="f"] <- "interaction" names(pseudocall) <- namcal ## get correction correction <- getppmcorrection(...) if(correction == "border") { ## determine border correction distance if(is.null(rbord)) { ## compute rbord = max reach of interactions if(verbose) message("(computing rbord)") for(i in 1:n) { fi <- do.call(f, as.list(s[i, is.farg, drop=FALSE])) if(!inherits(fi, "interact")) stop(paste("f did not yield an object of class", sQuote("interact"))) re <- reach(fi) if(is.null(rbord)) rbord <- re else if(rbord < re) rbord <- re } } } ## determine whether computations can be saved if(pass.cfa || got.cfa) { savecomp <- FALSE } else { Q <- do.call(ppm, append(list(...), list(rbord=rbord, justQ=TRUE)), envir=callenv) savecomp <- !oversize.quad(Q) } ## go gc() if(verbose) { message(paste("comparing", n, "models...")) pstate <- list() } for(i in 1:n) { if(verbose) pstate <- progressreport(i, n, state=pstate) fi <- do.call(f, as.list(s[i, is.farg, drop=FALSE])) if(!inherits(fi, "interact")) stop(paste("f did not yield an object of class", sQuote("interact"))) if(pass.cfa) cfai <- list(covfunargs=as.list(s[i, !is.farg, drop=FALSE])) ## fit model if(i == 1) { ## fit from scratch arg1 <- list(..., interaction=fi, rbord=rbord, savecomputed=savecomp, warn.illegal=FALSE, callstring="", skip.border=TRUE, clip.interaction=!fast) if(pass.cfa) arg1 <- append(arg1, cfai) fiti <- do.call(ppm, arg1, envir=callenv) ## save intermediate computations (pairwise distances, etc) precomp <- fiti$internal$computed savedargs <- list(..., rbord=rbord, precomputed=precomp, warn.illegal=FALSE, callstring="", skip.border=TRUE, clip.interaction=!fast) } else { ## use precomputed data argi <- append(savedargs, list(interaction=fi)) if(pass.cfa) argi <- append(argi, cfai) fiti <- do.call(ppm, argi, envir=callenv) } ## save log pl for each fit criterion[i] <- if(aic) -AIC(fiti) else as.numeric(logLik(fiti, warn=FALSE)) ## save fitted coefficients for each fit co <- coef(fiti) if(i == 1) { allcoef <- data.frame(matrix(co, nrow=1)) names(allcoef) <- names(co) } else allcoef <- rbind(allcoef, co) } if(verbose) message("fitting optimal model...") opti <- which.max(criterion) gc() optint <- do.call(f, as.list(s[opti, is.farg, drop=FALSE])) optarg <- list(..., interaction=optint, rbord=rbord) if(pass.cfa) { optcfa <- as.list(s[opti, !is.farg, drop=FALSE]) attr(optcfa, "fitter") <- "profilepl" optarg <- append(optarg, list(covfunargs=optcfa)) } optfit <- do.call(ppm, optarg, envir=callenv) if(verbose) message("done.") critname <- if(aic) "-AIC" else if(is.poisson(optfit)) "log l" else if(optfit$method == "logi") "log CL" else "log PL" result <- list(param=s, prof=criterion, critname=critname, iopt=opti, fit=optfit, rbord=rbord, fname=as.interact(optfit)$name, allcoef=allcoef, otherstuff=list(...), pseudocall=pseudocall) class(result) <- c("profilepl", class(result)) return(result) } profilepl }) ## ## print method ## print.profilepl <- function(x, ...) { head1 <- "profile log pseudolikelihood" head2 <- "for model: " psc <- paste(unlist(strsplitretain(format(x$pseudocall))), collapse=" ") if(nchar(psc) + nchar(head2) + 1 <= getOption('width')) { splat(head1) splat(head2, psc) } else { splat(head1, head2) splat(psc) } nparm <- ncol(x$param) if(waxlyrical('extras')) { corx <- x$fit$correction if(identical(corx, "border") && !is.null(x$rbord)) splat("fitted with rbord =", x$rbord) splat("interaction:", x$fname) splat("irregular", ngettext(nparm, "parameter:", "parameters:\n"), paste(names(x$param), "in", unlist(lapply(lapply(as.list(x$param), range), prange)), collapse="\n")) } popt <- x$param[x$iopt,, drop=FALSE] splat("optimum", ngettext(nparm, "value", "values"), "of irregular", ngettext(nparm, "parameter: ", "parameters:\n"), commasep(paste(names(popt), "=", popt))) invisible(NULL) } ## ## summary method ## summary.profilepl <- function(object, ...) { print(object) cat("\n\noptimal model:\n") print(object$fit) } as.ppm.profilepl <- function(object) { object$fit } fitin.profilepl <- function(object) { fitin(as.ppm(object)) } predict.profilepl <- function(object, ...) { predict(as.ppm(object), ...) } ## ## plot method ## plot.profilepl <- local({ plot.profilepl <- function(x, ..., add=FALSE, main=NULL, tag=TRUE, coeff=NULL, xvariable=NULL, col=1, lty=1, lwd=1, col.opt="green", lty.opt=3, lwd.opt=1) { para <- x$param ## graphics arguments may be expressions involving parameters if(ncol(para) > 1) { col <- eval(substitute(col), para) lwd <- eval(substitute(lwd), para) lty <- eval(substitute(lty), para) px <- cbind(para, col, lwd, lty, stringsAsFactors=FALSE) col <- px$col lwd <- px$lwd lty <- px$lty } ## strip any column that is entirely na if(any(nacol <- sapply(para, none.finite))) { warning(paste("Deleted the irregular", ngettext(sum(nacol), "parameter", "parameters"), commasep(sQuote(names(para)[nacol])), "because all values were NA"), call.=FALSE) para <- para[, !nacol, drop=FALSE] } ## npara <- ncol(para) ## main header if(is.null(main)) main <- short.deparse(x$pseudocall) ## x variable for plot if(is.null(xvariable)) { xvalues <- para[,1L] xname <- names(para)[1L] } else { stopifnot(is.character(xvariable)) if(!(xvariable %in% names(para))) stop("there is no irregular parameter named", sQuote(xvariable)) xvalues <- para[[xvariable]] xname <- xvariable } ## y variable for plot if(is.null(coeff)) { yvalues <- x$prof ylab <- x$critname %orifnull% "log pl" } else { stopifnot(is.character(coeff)) allcoef <- x$allcoef if(!(coeff %in% names(allcoef))) stop(paste("there is no coefficient named", sQuote(coeff), "in the fitted model")) yvalues <- allcoef[[coeff]] ylab <- paste("coefficient:", coeff) } ## start plot if(!add) do.call.matched(plot.default, resolve.defaults(list(x=range(xvalues), y=range(yvalues)), list(type="n", main=main), list(...), list(ylab=ylab, xlab=xname)), extrargs=graphicsPars("plot")) linepars <- graphicsPars("lines") if(npara == 1) { ## single curve do.call.matched(lines.default, resolve.defaults(list(x=xvalues, y=yvalues, ...), spatstat.options("par.fv")), extrargs=linepars) } else { ## multiple curves xvarindex <- match(xname, names(para)) other <- para[, -xvarindex, drop=FALSE] tapply(1:nrow(para), as.list(other), plotslice, xvalues=xvalues, yvalues=yvalues, other=other, tag=tag, ..., col=col, lwd=lwd, lty=lty, lineargs=linepars) } ## show optimal value do.call.matched(abline, resolve.defaults(list(v = xvalues[x$iopt]), list(...), list(lty=lty.opt, lwd=lwd.opt, col=col.opt)), extrargs=linepars) return(invisible(NULL)) } plotslice <- function(z, xvalues, yvalues, other, tag=TRUE, ..., lty=1, col=1, lwd=1, lineargs) { fz <- xvalues[z] pz <- yvalues[z] n <- length(xvalues) if(length(lty) == n) lty <- unique(lty[z])[1] if(length(col) == n) col <- unique(col[z])[1] if(length(lwd) == n) lwd <- unique(lwd[z])[1] do.call.matched(lines.default, resolve.defaults(list(x=fz, y=pz, col=col, lwd=lwd, lty=lty), list(...)), extrargs=lineargs) if(tag) { oz <- other[z, , drop=FALSE] uniques <- apply(oz, 2, unique) labels <- paste(names(uniques), "=", uniques, sep="") label <- paste(labels, sep=",") ii <- which.max(pz) do.call.matched(text.default, list(x=fz[ii], y=pz[ii], labels=label, col=col, ...), funargs=graphicsPars("text")) } return(NULL) } none.finite <- function(x) all(!is.finite(x)) plot.profilepl }) simulate.profilepl <- function(object, ...) { simulate(as.ppm(object), ...) } parameters.profilepl <- function(model, ...) { parameters(as.ppm(model)) } spatstat/R/qqplotppm.R0000644000176200001440000002656413333543255014521 0ustar liggesusers# # QQ plot of smoothed residual field against model # # qqplot.ppm() QQ plot (including simulation) # # $Revision: 1.30 $ $Date: 2016/04/25 02:34:40 $ # qqplot.ppm <- local({ ## How to refit the model refit <- function(fit, pattern) { update.ppm(fit, Q=pattern, use.internal=(fit$method != "mppm")) } ## how to compute the residual field residualfield <- function(fit, ...) { d <- diagnose.ppm(fit, which="smooth", plot.it=FALSE, compute.cts=FALSE, compute.sd=FALSE, check=FALSE, ...) return(d$smooth$Z$v) } qqplot.ppm <- function(fit, nsim=100, expr=NULL, ..., type="raw", style="mean", fast=TRUE, verbose=TRUE, plot.it=TRUE, dimyx=NULL, nrep=if(fast) 5e4 else 1e5, control=update(default.rmhcontrol(fit), nrep=nrep), saveall=FALSE, monochrome=FALSE, limcol=if(monochrome) "black" else "red", maxerr=max(100, ceiling(nsim/10)), check=TRUE, repair=TRUE, envir.expr) { verifyclass(fit, "ppm") if(check && damaged.ppm(fit)) { if(!repair) stop("object format corrupted; try update(fit, use.internal=TRUE)") message("object format corrupted; repairing it.") fit <- update(fit, use.internal=TRUE) } if(fast) { oldnpixel <- spatstat.options("npixel") if(is.null(dimyx)) dimyx <- pmin(40, rev(oldnpixel)) spatstat.options(npixel=rev(dimyx)) } ################ How to evaluate residuals ########################## ## Quantiles of the residual field will be computed. ## Data values dat <- residualfield(fit, type=type, ..., dimyx=dimyx) ################## How to perform simulations? ####################### ## envir.call <- sys.parent() envir.here <- sys.frame(sys.nframe()) ## extract.from.list <- FALSE inext <- 0 # to placate package checker dont.complain.about(inext) if(is.null(expr)) { ## We will simulate from the fitted model 'nsim' times ## and refit the model to these simulations simsource <- "fit" how.simulating <- "simulating from fitted model" ## prepare rmh arguments rcontrol <- rmhcontrol(control) rmodel <- rmhmodel(fit, control=rcontrol, project=FALSE, verbose=verbose) rstart <- rmhstart(n.start=data.ppm(fit)$n) ## pre-digest arguments rmhinfolist <- rmh(rmodel, rstart, rcontrol, preponly=TRUE, verbose=FALSE) ## expression to be evaluated each time expr <- expression( refit(fit, rmhEngine(rmhinfolist, verbose=FALSE))) envir.expr <- envir.here ## pacify code checkers dont.complain.about(rmhinfolist) } else if(is.expression(expr)) { simsource <- "expr" how.simulating <- paste("evaluating", sQuote("expr")) if(missing(envir.expr) || is.null(envir.expr)) envir.expr <- parent.frame() } else if(inherits(expr, "envelope")) { simpat <- attr(expr, "simpatterns") if(!is.null(simpat) && all(sapply(simpat, is.ppp))) { expr <- expression(simpat[[inext]]) envir.expr <- envir.here dont.complain.about(simpat) simsource <- "list" how.simulating <- "extracting point pattern from list" } else stop(paste("Argument", sQuote("expr"), "is an envelope object,", "but does not contain point patterns"), call.=FALSE) } else if(is.list(expr) && all(sapply(expr, is.ppp))) { simpat <- expr expr <- expression(simpat[[inext]]) envir.expr <- envir.here dont.complain.about(simpat) simsource <- "list" how.simulating <- "extracting point pattern from list" } else stop(paste(sQuote("expr"), "should be an expression, or an envelope object,", "or a list of point patterns"), call.=FALSE) exprstring <- if(simsource == "expr") deparse(expr) else NULL ###### Perform simulations if(verbose) { cat(paste("Simulating", nsim, "realisations... ")) pstate <- list() } simul.sizes <- numeric(nsim) isim <- 0 ierr <- 0 repeat { inext <- isim + 1 ## protect from randomly-generated crashes in gam ei <- try(eval(expr, envir=envir.expr), silent=!verbose) if(inherits(ei, "try-error")) { ## error encountered in evaluating 'expr' ierr <- ierr + 1 if(ierr > maxerr) stop(paste("Exceeded maximum of", maxerr, "failures in", how.simulating, "after generating only", isim, "realisations")) else break } else { ## simulation successful isim <- isim + 1 fiti <- if(simsource == "fit") ei else if(is.ppm(ei)) ei else if(is.ppp(ei)) refit(fit, ei) else stop("result of eval(expr) is not a ppm or ppp object") ## diagnostic info simul.sizes[isim] <- data.ppm(fiti)$n ## compute residual field resi <- residualfield(fiti, type=type, ..., dimyx=dimyx) if(isim == 1) sim <- array(, dim=c(dim(resi), nsim)) sim[,,isim] <- resi if(verbose) pstate <- progressreport(isim, nsim, state=pstate) if(isim >= nsim) break } } ###### Report diagnostics if(ierr > 0) cat(paste("\n\n**Alert:", ierr, "failures occurred in", how.simulating, "\n\n")) nempty <- sum(simul.sizes == 0) if(nempty > 0) cat(paste("\n\n**Alert:", nempty, "out of", nsim, "simulated patterns were empty.\n\n")) else cat(paste("\nDiagnostic info:\n", "simulated patterns contained an average of", mean(simul.sizes), "points.\n")) if(nempty == nsim) warning("All simulated patterns were empty") ############ Plot them switch(style, classical = { rr <- range(c(dat,sim)) result <- qqplot(sim, dat, xlim=rr, ylim=rr, asp=1.0, xlab="Quantiles of simulation", ylab="Quantiles of data",plot.it=plot.it) title(sub=paste("Residuals:", type)) abline(0,1, lty=2) result <- append(result, list(data=dat, sim=sim, xlim=rr, ylim=rr, xlab="Quantiles of simulation", ylab="Quantiles of data", rtype=type, nsim=nsim, fit=fit, expr=exprstring, simsource = simsource ) ) }, mean = { ## compute quantiles corresponding to probabilities p[i] ## separately in each realisation. if(verbose) cat("Calculating quantiles...") if(fast) { p <- ppoints(min(100,length(dat)), 3/8) qsim <- apply(sim, 3, quantile, probs=p, na.rm=TRUE) } else { qsim <- apply(sim, 3, sort, na.last=TRUE) } if(verbose) cat("averaging...") ## sample mean of each quantile meanq <- apply(qsim, 1, mean, na.rm=TRUE) ## et cetera varq <- apply(qsim, 1, var, na.rm=TRUE) sdq <- sqrt(varq) q.025 <- apply(qsim, 1, quantile, probs=0.025, na.rm=TRUE) q.975 <- apply(qsim, 1, quantile, probs=0.975, na.rm=TRUE) rr <- range(c(meanq,dat), na.rm=TRUE) dats <- if(fast) quantile(dat, probs=p, na.rm=TRUE) else sort(dat, na.last=TRUE) if(verbose) cat("..Done.\n") if(plot.it) { plot(meanq, dats, xlab="Mean quantile of simulations", ylab="data quantile", xlim=rr, ylim=rr, asp=1.0) abline(0,1) lines(meanq, q.025, lty=2, col=limcol) lines(meanq, q.975, lty=2, col=limcol) title(sub=paste("Residuals:", type)) } result <- list(x=meanq, y=dats, sdq=sdq, q.025=q.025, q.975=q.975, data=dat, sim=sim, xlim=rr, ylim=rr, xlab="Mean quantile of simulations", ylab="data quantile", rtype=type, nsim=nsim, fit=fit, expr=exprstring, simsource=simsource) }, stop(paste("Unrecognised option for", sQuote("style"))) ) ## Throw out baggage if not wanted if(!saveall) { result$fit <- summary(fit, quick=TRUE) result$sim <- NULL } ## reset npixel if(fast) spatstat.options(npixel=oldnpixel) ## class(result) <- c("qqppm", class(result)) return(invisible(result)) } qqplot.ppm }) plot.qqppm <- local({ plot.qqppm <- function(x, ..., limits=TRUE, monochrome=spatstat.options('monochrome'), limcol=if(monochrome) "black" else "red") { stopifnot(inherits(x, "qqppm")) default.type <- if(length(x$x) > 150) "l" else "p" do.call(myplot, resolve.defaults(list(x, ..., type=default.type, limits=limits, limcol=limcol))) return(invisible(x)) } myplot <- function(object, xlab = object$xlab, ylab = object$ylab, xlim = object$xlim, ylim = object$ylim, asp = 1, type = default.type, ..., limits=TRUE, limcol="red") { plot(object$x, object$y, xlab = xlab, ylab = ylab, xlim = xlim, ylim = ylim, asp = asp, type = type, ...) abline(0, 1) if(limits) { if(!is.null(object$q.025)) lines(object$x, object$q.025, lty = 2, col=limcol) if(!is.null(object$q.975)) lines(object$x, object$q.975, lty = 2, col=limcol) } title(sub=paste("Residuals:", object$rtype)) } plot.qqppm }) print.qqppm <- function(x, ...) { stopifnot(inherits(x, "qqppm")) splat("Q-Q plot of point process residuals", "of type", sQuote(x$rtype), "\n", "based on", x$nsim, "simulations") simsource <- x$simsource if(is.null(simsource)) # old version simsource <- if(x$simulate.from.fit) "fit" else "expr" switch(simsource, fit = { fit <- x$fit sumfit <- if(is.ppm(fit)) summary(fit, quick=TRUE) else if(inherits(fit, "summary.ppm")) fit else list(name="(unrecognised format)") splat("\nSimulations from fitted model:", sumfit$name) }, expr = { splat("Simulations obtained by evaluating the following expression:") print(x$expr) }, list = { splat("Simulated point patterns were provided in a list") }) invisible(NULL) } spatstat/R/areadiff.R0000644000176200001440000002422613421032737014212 0ustar liggesusers# # areadiff.R # # $Revision: 1.38 $ $Date: 2019/01/20 08:46:55 $ # # Computes sufficient statistic for area-interaction process # # Invokes areadiff.c # # areaLoss = area lost by removing X[i] from X areaLoss <- function(X, r, ..., W=as.owin(X), subset=NULL, exact=FALSE, ngrid=spatstat.options("ngrid.disc")) { if(exact) areaLoss.diri(X, r, ..., W=W, subset=subset) else areaLoss.grid(X, r, ..., W=W, subset=subset, ngrid=ngrid) } # areaGain = area gained by adding u[i] to X areaGain <- function(u, X, r, ..., W=as.owin(X), exact=FALSE, ngrid=spatstat.options("ngrid.disc")) { if(exact) areaGain.diri(u, X, r, ..., W=W) else areaGain.grid(u, X, r, W=W, ..., ngrid=ngrid) } #//////////////////////////////////////////////////////////// # algorithms using polygon geometry #/////////////////////////////////////////////////////////// areaLoss.poly <- function(X, r, ..., W=as.owin(X), subset=NULL, splitem=TRUE) { check.1.real(r) nX <- npoints(X) if(r <= 0 || nX == 0) return(numeric(nX)) cooX <- coords(X) if(useW <- is.owin(W)) W <- as.polygonal(W) #' initialise result result <- rep(pi * r^2, nX) wanted <- 1:nX if(!is.null(subset)) wanted <- wanted[subset] #' split into connected components if(splitem) { Y <- connected(X, 2 * r) Z <- split(Y) V <- lapply(Z, areaLoss.poly, r=r, W=W, splitem=FALSE) return(unsplit(V, marks(Y))[wanted]) } #' determine which pairs of points interact cl <- closepairs(X, 2 * r, what="indices") if(length(cl$i) == 0) return(result[wanted]) #' determine scale parameters for polyclip p <- commonPolyclipArgs(Frame(X)) #' template disc ball0 <- disc(r, c(0,0), ...) #' discs centred on data points balls <- vector(mode="list", length=nX) for(i in seq_len(nX)) balls[[i]] <- shift(ball0, vec=cooX[i,]) balls <- as.solist(balls, check=FALSE) #' start computin' for(i in wanted) { jj <- cl$j[cl$i == i] nn <- length(jj) if(nn > 0) { #' union of balls close to i u <- if(nn == 1) balls[[ jj ]] else union.owin(balls[jj], p=p) #' subtract from ball i v <- setminus.owin(balls[[i]], u) #' clip to window if(useW) v <- intersect.owin(v, W) #' compute result[i] <- area(v) } } return(result[wanted]) } #//////////////////////////////////////////////////////////// # algorithms using Dirichlet tessellation #/////////////////////////////////////////////////////////// areaLoss.diri <- function(X, r, ..., W=as.owin(X), subset=NULL) { stopifnot(is.ppp(X)) npts <- npoints(X) if(is.matrix(r)) { if(sum(dim(r) > 1) > 1) stop("r should be a vector or single value") r <- as.vector(r) } nr <- length(r) if(npts == 0) return(matrix(, nrow=0, ncol=nr)) else if(npts == 1) return(matrix(discpartarea(X, r, W), nrow=1)) #' set up output array indices <- 1L:npts if(!is.null(subset)) indices <- indices[subset] out <- matrix(0, nrow=length(indices), ncol=nr) #' handle duplicate points retain <- !duplicated(X) getzero <- (multiplicity(X) > 1) uX <- X[retain] newserial <- cumsum(retain) # dirichlet neighbour relation in entire pattern w <- X$window dd <- deldir(uX$x, uX$y, rw=c(w$xrange, w$yrange)) a <- dd$delsgs[,5L] b <- dd$delsgs[,6L] pir2 <- pi * r^2 for(k in seq_along(indices)) { ind <- indices[k] if(!getzero[ind]) { #' find serial number in uX i <- newserial[ind] #' find all Delaunay neighbours of i jj <- c(b[a==i], a[b==i]) jj <- sortunique(jj) #' extract only these points Yminus <- uX[jj] Yplus <- uX[c(jj, i)] #' dilate aplus <- dilated.areas(Yplus, r, W, exact=TRUE, ...) aminus <- dilated.areas(Yminus, r, W, exact=TRUE, ...) areas <- aplus - aminus #' area/(pi * r^2) must be positive and nonincreasing y <- ifelseAX(r == 0, 1, areas/pir2) y <- pmin.int(1, y) ok <- is.finite(y) y[ok] <- rev(cummax(rev(y[ok]))) areas <- pmax.int(0, y * pir2) #' save out[k, ] <- areas } } return(out) } areaGain.diri <- function(u, X, r, ..., W=as.owin(X), verbose=FALSE) { stopifnot(is.ppp(X)) Y <- as.ppp(u, W=W) nX <- X$n nY <- Y$n if(is.matrix(r)) { if(sum(dim(r) > 1) > 1) stop("r should be a vector or single value") r <- as.vector(r) } nr <- length(r) if(nY == 0) return(matrix(, nrow=0, ncol=nr)) if(nX == 0) return(matrix(pi * r^2, nrow=nY, ncol=nr, byrow=TRUE)) if(verbose) splat("areaGain,", nY, ngettext(nY, "point,", "points,"), nr, ngettext(nr, "rvalue", "r values")) out <- matrix(0, nrow=nY, ncol=nr) pir2 <- pi * r^2 wbox <- as.rectangle(as.owin(X)) # state <- list() for(i in 1L:nY) { if(verbose) state <- progressreport(i, nY, state=state) V <- superimpose(Y[i], X, W=wbox, check=FALSE) # Dirichlet neighbour relation for V dd <- deldir(V$x, V$y, rw=c(wbox$xrange, wbox$yrange)) aa <- dd$delsgs[,5L] bb <- dd$delsgs[,6L] # find all Delaunay neighbours of Y[1] in V jj <- c(bb[aa==1L], aa[bb==1L]) jj <- sortunique(jj) # extract only these points Zminus <- V[jj] Zplus <- V[c(1, jj)] # dilate aplus <- dilated.areas(Zplus, r, W, exact=TRUE) aminus <- dilated.areas(Zminus, r, W, exact=TRUE) areas <- aplus - aminus # area/(pi * r^2) must be in [0,1] and nonincreasing y <- ifelseAX(r == 0, 1, areas/pir2) y <- pmin.int(1, y) ok <- is.finite(y) y[ok] <- rev(cummax(rev(y[ok]))) areas <- pmax.int(0, y * pir2) # save out[i,] <- areas } return(out) } #//////////////////////////////////////////////////////////////////////// # alternative implementations using grid counting in C #//////////////////////////////////////////////////////////////////////// areaGain.grid <- function(u, X, r, ..., W=NULL, ngrid=spatstat.options("ngrid.disc")) { verifyclass(X, "ppp") u <- as.ppp(u, W=as.owin(X)) stopifnot(is.numeric(r) && all(is.finite(r)) && all(r >= 0)) # nu <- u$n nr <- length(r) if(nr == 0) return(numeric(0)) rmax <- max(r) # constrain <- !is.null(W) if(constrain && (W$type != "rectangle")) { # Constrained to an irregular window # initialise to value for small-r result <- matrix(pi * r^2, nrow=nu, ncol=nr, byrow=TRUE) # vector of radii below which b(u,r) is disjoint from U(X,r) rcrit.u <- nncross(u, X, what="dist")/2 rcrit.min <- min(rcrit.u) #' determine pixel resolution eps <- unclass(as.mask(Window(X), ...))[c("xstep", "ystep")] eps <- as.numeric(eps) eps <- eps * min(1, (rmax/4)/max(eps)) #' Use distance transform and set covariance D <- distmap(X, eps=eps) DW <- D[W, drop=FALSE] # distance from (0,0) - thresholded to make digital discs discWin <- owin(c(-rmax,rmax),c(-rmax,rmax)) discWin <- as.mask(discWin, eps=eps) rad <- as.im(function(x,y){sqrt(x^2+y^2)}, W=discWin) # for(j in which(r > rcrit.min)) { # rj is above the critical radius rcrit.u[i] for at least one point u[i] rj <- r[j] if(any(above <- (rj > rcrit.u))) { Uncovered <- levelset(DW, rj, ">") DiscRj <- levelset(rad, rj, "<=") AreaGainIm <- setcov(Uncovered, DiscRj) result[above, j] <- safelookup(AreaGainIm, u[above]) } } return(result) } # # xx <- X$x yy <- X$y result <- matrix(, nrow=nu, ncol=nr) # for(i in 1L:nu) { # shift u[i] to origin xu <- u$x[i] yu <- u$y[i] xshift <- xx - xu yshift <- yy - yu # find points within distance 2 rmax of origin close <- (xshift^2 + yshift^2 < 4 * rmax^2) nclose <- sum(close) # invoke C routine if(!constrain) { z <- .C("areadifs", rad = as.double(r), nrads = as.integer(nr), x = as.double(xshift[close]), y = as.double(yshift[close]), nn = as.integer(nclose), ngrid = as.integer(ngrid), answer = as.double(numeric(nr)), PACKAGE = "spatstat") result[i,] <- z$answer } else { z <- .C("areaBdif", rad = as.double(r), nrads = as.integer(nr), x = as.double(xshift[close]), y = as.double(yshift[close]), nn = as.integer(nclose), ngrid = as.integer(ngrid), x0 = as.double(W$xrange[1L] - xu), y0 = as.double(W$yrange[1L] - yu), x1 = as.double(W$xrange[2L] - xu), y1 = as.double(W$yrange[2L] - yu), answer = as.double(numeric(nr)), PACKAGE = "spatstat") result[i,] <- z$answer } } return(result) } areaLoss.grid <- function(X, r, ..., W=as.owin(X), subset=NULL, method = c("count", "distmap"), ngrid = spatstat.options("ngrid.disc"), exact = FALSE) { verifyclass(X, "ppp") n <- npoints(X) nr <- length(r) indices <- if(is.null(subset)) 1L:n else (1L:n)[subset] answer <- matrix(, nrow=length(indices), ncol=nr) if(missing(method)) { method <- if(nr <= 20 || exact) "count" else "distmap" } else method <- match.arg(method) switch(method, count = { # one value of r: use grid-counting for(k in seq_along(indices)) { i <- indices[k] answer[k,] <- areaGain(X[i], X[-i], r, W=W, ngrid=ngrid, exact=exact, ...) } }, distmap = { # Many values of r: use distance transform D <- distmap(X, ...) DW <- D[W, drop=FALSE] a <- area(Window(DW)) # empirical cdf of distance values FW <- ecdf(DW[drop=TRUE]) # radii below which there are no overlaps rcrit <- nndist(X)/2 for(k in seq_along(indices)) { i <- indices[k] Di <- distmap(X[-i], ...) FiW <- ecdf(Di[W, drop=TRUE]) answer[k, ] <- ifelseXY(r > rcrit[i], a * (FW(r) - FiW(r)), pi * r^2) } }) return(answer) } spatstat/R/multipair.util.R0000644000176200001440000000173013333543255015432 0ustar liggesusers## ## ## multipair.util.R ## ## $Revision: 1.13 $ $Date: 2014/04/29 01:13:35 $ ## ## Utilities for multitype pairwise interactions ## ## ------------------------------------------------------------------- ## MultiPair.checkmatrix <- function(mat, n, matname, naok=TRUE, zerook=TRUE, asymmok=FALSE) { if(missing(matname)) matname <- short.deparse(substitute(mat)) if(!is.matrix(mat)) stop(paste(matname, "must be a matrix")) if(any(dim(mat) != rep.int(n,2))) stop(paste(matname, "must be a square matrix,", "of size", n, "x", n)) isna <- is.na(mat) if(!naok && any(isna)) stop(paste("NA entries not allowed in", matname)) if(any(mat[!isna] < 0)) stop(paste("Negative entries not allowed in", matname)) if(!zerook && any(mat[!isna] == 0)) stop(paste("Zero entries not allowed in", matname)) if(!asymmok && !isSymmetric(mat)) stop(paste(matname, "must be a symmetric matrix")) } spatstat/R/localK.R0000644000176200001440000001550513503620200013643 0ustar liggesusers# # localK.R Getis-Franklin neighbourhood density function # # $Revision: 1.25 $ $Date: 2019/06/23 06:30:55 $ # # "localL" <- function(X, ..., rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL) { localK(X, wantL=TRUE, rmax = rmax, correction=correction, verbose=verbose, rvalue=rvalue) } "localLinhom" <- function(X, lambda=NULL, ..., rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL, sigma=NULL, varcov=NULL, update=TRUE, leaveoneout=TRUE) { localKinhom(X, lambda=lambda, wantL=TRUE, ..., rmax = rmax, correction=correction, verbose=verbose, rvalue=rvalue, sigma=sigma, varcov=varcov, update=update, leaveoneout=leaveoneout) } "localK" <- function(X, ..., rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL) { verifyclass(X, "ppp") localKengine(X, ..., rmax = rmax, correction=correction, verbose=verbose, rvalue=rvalue) } "localKinhom" <- function(X, lambda=NULL, ..., rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL, sigma=NULL, varcov=NULL, update=TRUE, leaveoneout=TRUE) { verifyclass(X, "ppp") a <- resolve.lambda(X, lambda, ..., sigma=sigma, varcov=varcov, update=update, leaveoneout=leaveoneout) result <- localKengine(X, lambda=a$lambda, ..., rmax = rmax, correction=correction, verbose=verbose, rvalue=rvalue) if(a$danger) attr(result, "dangerous") <- a$dangerous return(result) } "localKengine" <- function(X, ..., wantL=FALSE, lambda=NULL, rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL) { npts <- npoints(X) W <- X$window areaW <- area(W) lambda.ave <- npts/areaW lambda1.ave <- (npts - 1)/areaW weighted <- !is.null(lambda) if(is.null(rvalue)) rmaxdefault <- rmax %orifnull% rmax.rule("K", W, lambda.ave) else { stopifnot(is.numeric(rvalue)) stopifnot(length(rvalue) == 1) stopifnot(rvalue >= 0) rmaxdefault <- rvalue } breaks <- handle.r.b.args(NULL, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max correction.given <- !missing(correction) correction <- pickoption("correction", correction, c(none="none", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=FALSE) correction <- implemented.for.K(correction, W$type, correction.given) # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # identify all close pairs rmax <- max(r) close <- closepairs(X, rmax) DIJ <- close$d XI <- ppp(close$xi, close$yi, window=W, check=FALSE) I <- close$i if(weighted) { J <- close$j lambdaJ <- lambda[J] weightJ <- 1/lambdaJ } # initialise df <- as.data.frame(matrix(NA, length(r), npts)) labl <- desc <- character(npts) if(verbose) state <- list() switch(correction, none={ # uncorrected! For demonstration purposes only! for(i in 1:npts) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, if(weighted) weightJ[ii] else NULL) # no edge weights df[,i] <- cumsum(wh) icode <- numalign(i, npts) names(df)[i] <- paste("un", icode, sep="") labl[i] <- makefvlabel(NULL, "hat", character(2), icode) desc[i] <- paste("uncorrected estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, npts, state=state) } if(!weighted) df <- df/lambda1.ave }, translate={ # Translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) if(weighted) edgewt <- edgewt * weightJ for(i in 1:npts) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, edgewt[ii]) Ktrans <- cumsum(wh) df[,i] <- Ktrans icode <- numalign(i, npts) names(df)[i] <- paste("trans", icode, sep="") labl[i] <- makefvlabel(NULL, "hat", character(2), icode) desc[i] <- paste("translation-corrected estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, npts, state=state) } if(!weighted) df <- df/lambda1.ave h <- diameter(W)/2 df[r >= h, ] <- NA }, isotropic={ # Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) if(weighted) edgewt <- edgewt * weightJ for(i in 1:npts) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, edgewt[ii]) Kiso <- cumsum(wh) df[,i] <- Kiso icode <- numalign(i, npts) names(df)[i] <- paste("iso", icode, sep="") labl[i] <- makefvlabel(NULL, "hat", character(2), icode) desc[i] <- paste("Ripley isotropic correction estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, npts, state=state) } if(!weighted) df <- df/lambda1.ave h <- diameter(W)/2 df[r >= h, ] <- NA }) # transform values if L required if(wantL) df <- sqrt(df/pi) # return vector of values at r=rvalue, if desired if(!is.null(rvalue)) { nr <- length(r) if(r[nr] != rvalue) stop("Internal error - rvalue not attained") return(as.numeric(df[nr,])) } # function value table required # add r and theo if(!wantL) { df <- cbind(df, data.frame(r=r, theo=pi * r^2)) if(!weighted) { fnam <- c("K", "loc") yexp <- ylab <- quote(K[loc](r)) } else { fnam <- c("K", "list(inhom,loc)") ylab <- quote(K[inhom,loc](r)) yexp <- quote(K[list(inhom,loc)](r)) } } else { df <- cbind(df, data.frame(r=r, theo=r)) if(!weighted) { fnam <- c("L", "loc") yexp <- ylab <- quote(L[loc](r)) } else { fnam <- c("L", "list(inhom,loc)") ylab <- quote(L[inhom,loc](r)) yexp <- quote(L[list(inhom,loc)](r)) } } desc <- c(desc, c("distance argument r", "theoretical Poisson %s")) labl <- c(labl, c("r", "{%s[%s]^{pois}}(r)")) # create fv object K <- fv(df, "r", ylab, "theo", , alim, labl, desc, fname=fnam, yexp=yexp) # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) attr(K, "correction") <- correction return(K) } spatstat/R/rmhtemper.R0000644000176200001440000000430413362051145014445 0ustar liggesusers#' #' rmhtemper.R #' #' $Revision: 1.4 $ $Date: 2018/10/18 02:07:56 $ #' reheat <- local({ expon <- function(x, alpha) { if(is.null(x)) return(NULL) if(is.numeric(x)) return(x^alpha) if(is.im(x)) return(x^alpha) if(is.function(x)) { f <- x g <- function(...) { f(...)^alpha } if(!inherits(f, "funxy")) return(g) return(funxy(g, W=as.owin(f))) } if(is.list(x)) return(lapply(x, expon)) stop("Unrecognised format for x in x^alpha", call.=FALSE) } reheat <- function(model, invtemp) { model <- rmhmodel(model) cif <- model$cif par <- model$par w <- model$w trend <- model$trend types <- model$types newtrend <- expon(trend, invtemp) rules <- lapply(cif, spatstatRmhInfo) temperfuns <- lapply(rules, getElement, name="temper") if(any(bad <- sapply(temperfuns, is.null))) stop(paste("reheating the", commasep(sQuote(cif[bad])), ngettext(sum(bad), "cif", "cifs"), "is not supported")) Ncif <- length(cif) if(Ncif == 1) { newpar <- temperfuns[[1]](par, invtemp) } else { newpar <- par for(i in 1:Ncif) newpar[[i]] <- temperfuns[[i]](par[[i]], invtemp) } newmodel <- rmhmodel(cif=cif, par=newpar, trend=newtrend, w=w, types=types) return(newmodel) } reheat }) rtemper <- function(model, invtemp, nrep, ..., track=FALSE, start=NULL, verbose=FALSE){ df <- data.frame(invtemp, nrep) ndf <- nrow(df) X <- NULL h <- NULL for(i in 1:ndf) { if(verbose) cat(paste("Step", i, "of", paste0(ndf, ":"), "Running", nrep[i], "iterations", "at inverse temperature", signif(invtemp[i], 4), "... ")) model.i <- reheat(model, invtemp[i]) X <- rmh(model.i, nrep=nrep[i], ..., start=start, overrideXstart = X, overrideclip = (i != ndf), track=track, saveinfo = FALSE, verbose=FALSE) if(track) { hnew <- attr(X, "history") h <- rbind(h, hnew) } } if(verbose) cat("Done.\n") if(track) attr(X, "history") <- h return(X) } spatstat/R/quadscheme.R0000644000176200001440000002351713333543255014576 0ustar liggesusers# # # quadscheme.S # # $Revision: 4.35 $ $Date: 2016/02/11 10:17:12 $ # # quadscheme() generate a quadrature scheme from # data and dummy point patterns. # # quadscheme.spatial() case where both patterns are unmarked # # quadscheme.replicated() case where data are multitype # # #--------------------------------------------------------------------- quadscheme <- function(data, dummy, method="grid", ...) { # # generate a quadrature scheme from data and dummy patterns. # # Other arguments control how the quadrature weights are computed # data <- as.ppp(data) if(missing(dummy)) { # create dummy points dummy <- default.dummy(data, method=method, ...) # extract full set of parameters used to create dummy points dp <- attr(dummy, "dummy.parameters") # extract recommended parameters for computing weights wp <- attr(dummy, "weight.parameters") } else { # user-supplied dummy points if(!is.ppp(dummy)) { # convert to ppp object dummy <- as.ppp(dummy, data$window, check=FALSE) # confine dummy points to data window dummy <- dummy[data$window] wp <- dp <- list() } else { # if it's already a ppp, it may have been created by default.dummy dp <- attr(dummy, "dummy.parameters") wp <- attr(dummy, "weight.parameters") } } # arguments supplied directly to quadscheme() # override any arguments passed as attributes wp <- resolve.defaults(list(method=method), list(...), wp) mX <- is.marked(data) mD <- is.marked(dummy) if(!mX && !mD) Q <- do.call(quadscheme.spatial, append(list(data, dummy, check=FALSE), wp)) else if(mX && !mD) Q <- do.call(quadscheme.replicated, append(list(data, dummy, check=FALSE), wp)) else if(!mX && mD) stop("dummy points are marked but data are unmarked") else stop("marked data and marked dummy points -- sorry, this case is not implemented") # record parameters used to make dummy points Q$param$dummy <- dp return(Q) } quadscheme.spatial <- function(data, dummy, method=c("grid", "dirichlet"), ...) { # # generate a quadrature scheme from data and dummy patterns. # # The 'method' may be "grid" or "dirichlet" # # '...' are passed to gridweights() or dirichletWeights() # # quadscheme.spatial: # for unmarked point patterns. # # weights are determined only by spatial locations # (i.e. weight computations ignore any marks) # # No two points should have the same spatial location # check <- resolve.defaults(list(...), list(check=TRUE))$check method <- match.arg(method) data <- as.ppp(data, check=check) dummy <- as.ppp(dummy, data$window, check=check) # note data$window is the DEFAULT quadrature window # applicable when 'dummy' does not contain a window if(is.marked(data, dfok=TRUE)) warning("marks in data pattern - ignored") if(is.marked(dummy, dfok=TRUE)) warning("marks in dummy pattern - ignored") both <- as.ppp(concatxy(data, dummy), dummy$window, check=check) switch(method, grid={ w <- gridweights(both, window= dummy$window, ...) }, dirichlet = { w <- dirichletWeights(both, window=dummy$window, ...) }, { stop(paste("unrecognised method", sQuote(method))) } ) # parameters actually used to make weights wp <- attr(w, "weight.parameters") param <- list(weight = wp, dummy = NULL) Q <- quad(data, dummy, w, param) return(Q) } "quadscheme.replicated" <- function(data, dummy, method=c("grid", "dirichlet"), ...) { ## ## generate a quadrature scheme from data and dummy patterns. ## ## The 'method' may be "grid" or "dirichlet" ## ## '...' are passed to gridweights() or dirichletWeights() ## ## quadscheme.replicated: ## for multitype point patterns. ## ## No two points in 'data'+'dummy' should have the same spatial location check <- resolve.defaults(list(...), list(check=TRUE))$check method <- match.arg(method) data <- as.ppp(data, check=check) dummy <- as.ppp(dummy, data$window, check=check) ## note data$window is the DEFAULT quadrature window ## unless otherwise specified in 'dummy' ndata <- data$n ndummy <- dummy$n if(!is.marked(data)) stop("data pattern does not have marks") if(is.marked(dummy, dfok=TRUE) && npoints(dummy) > 0) warning("dummy points have marks --- ignored") ## first, ignore marks and compute spatial weights P <- quadscheme.spatial(unmark(data), dummy, method, ...) W <- w.quad(P) iz <- is.data(P) Wdat <- W[iz] Wdum <- W[!iz] ## find the set of all possible marks if(!is.multitype(data)) stop("data pattern is not multitype") data.marks <- marks(data) markset <- levels(data.marks) nmarks <- length(markset) ## replicate dummy points, one copy for each possible mark ## -> dummy x {1,..,K} dumdum <- cartesian(dummy, markset) Wdumdum <- rep.int(Wdum, nmarks) Idumdum <- rep.int(ndata + seq_len(ndummy), nmarks) ## also make dummy marked points at same locations as data points ## but with different marks dumdat <- cartesian(unmark(data), markset) Wdumdat <- rep.int(Wdat, nmarks) Mdumdat <- marks(dumdat) Idumdat <- rep.int(1:ndata, nmarks) Mrepdat <- rep.int(data.marks, nmarks) ok <- (Mdumdat != Mrepdat) dumdat <- dumdat[ok,] Wdumdat <- Wdumdat[ok] Idumdat <- Idumdat[ok] ## combine the two dummy patterns dumb <- superimpose(dumdum, dumdat, W=dummy$window, check=FALSE) Wdumb <- c(Wdumdum, Wdumdat) Idumb <- c(Idumdum, Idumdat) ## record the quadrature parameters param <- list(weight = P$param$weight, dummy = NULL, sourceid=c(1:ndata, Idumb)) ## wrap up Q <- quad(data, dumb, c(Wdat, Wdumb), param) return(Q) } "cartesian" <- function(pp, markset, fac=TRUE) { ## given an unmarked point pattern 'pp' ## and a finite set of marks, ## create the marked point pattern which is ## the Cartesian product, consisting of all pairs (u,k) ## where u is a point of 'pp' and k is a mark in 'markset' nmarks <- length(markset) result <- ppp(rep.int(pp$x, nmarks), rep.int(pp$y, nmarks), window=pp$window, check=FALSE) marx <- rep.int(markset, rep.int(pp$n, nmarks)) if(fac) marx <- factor(marx, levels=markset) marks(result) <- marx return(result) } validate.quad <- function(Q, fatal=FALSE, repair=TRUE, announce=FALSE) { X <- Q$data D <- Q$dummy mX <- is.marked(X) mD <- is.marked(D) nbg <- function(whinge, fatal=FALSE, announce=FALSE) { if(fatal) stop(whinge, call.=FALSE) else { if(announce) warning(whinge, call.=FALSE) return(FALSE) } } if(mX != mD) { whinge <- if(mX) "data points are marked, but dummy points are not" else "dummy points are marked, but data points are not" return(nbg(whinge, fatal, announce)) } if(!mX) return(TRUE) # marked points fX <- is.factor(Xmarx <- marks(X)) fD <- is.factor(Dmarx <- marks(D)) if(fX != fD) { whinge <- if(fX) "data points are multitype, but dummy points are not" else "dummy points are multitype, but data points are not" return(nbg(whinge, fatal, announce)) } if(!fX) return(TRUE) # multitype points lX <- levels(Xmarx) lD <- levels(Dmarx) if(length(lX) != length(lD) || any(lX != lD)) { whinge <- "data and dummy points have different sets of possible marks" return(nbg(whinge, fatal, announce)) } return(TRUE) } pixelquad <- function(X, W=as.owin(X)) { ## make a quadscheme with a dummy point at every pixel verifyclass(X, "ppp") ## convert window to mask if not already one W <- as.owin(W) M <- as.mask(W) MM <- M$m pixelarea <- M$xstep * M$ystep ## create pixel coordinates and corresponding row, column indices rxy <- rasterxy.mask(M, drop=TRUE) xx <- rxy$x yy <- rxy$y cc <- as.vector(col(MM)[MM]) rr <- as.vector(row(MM)[MM]) Nr <- M$dim[1] Nc <- M$dim[2] ## dummy point pattern dum <- ppp(xx, yy, window=W, check=FALSE) ## discretise data points ij <- nearest.raster.point(X$x, X$y, M) ijrow <- ij$row ijcol <- ij$col if(!is.marked(X)) { ## tabulate pixel locations of data points Xtab <- table(row=factor(ijrow, levels=1:Nr), col=factor(ijcol, levels=1:Nc)) ## every pixel contains exactly one dummy point, ## so the total count of quadrature points in each pixel is: Qtab <- Xtab + 1 ## compute counting weights for data points wdat <- 1/Qtab[cbind(ijrow, ijcol)] ## compute counting weights for dummy points wdum <- 1/Qtab[cbind(rr, cc)] } else { marx <- marks(X) ## tabulate pixel locations and marks of data points Xtab <- table(row=factor(ijrow, levels=1:Nr), col=factor(ijcol, levels=1:Nc), mark=marx) ## replicate dummy points (pixel centres) for each mark dum <- cartesian(dum, levels(marx)) ## every marked pixel contains exactly one dummy point, ## so the total count of quadrature points in each marked pixel is: Qtab <- Xtab + 1 ## compute counting weights for data points wdat <- 1/Qtab[cbind(ijrow, ijcol, as.integer(marx))] ## compute counting weights for dummy points nm <- length(levels(marx)) wdum <- 1/Qtab[cbind(rep.int(rr, nm), rep.int(cc, nm), rep(1:nm, each=length(rr)))] } ## create quadrature scheme wboth <- pixelarea * c(wdat, wdum) Q <- quad(X, dum, wboth) attr(Q, "M") <- M return(Q) } spatstat/R/distmap.R0000644000176200001440000000651013333543254014111 0ustar liggesusers# # # distmap.R # # $Revision: 1.23 $ $Date: 2017/06/05 10:31:58 $ # # # Distance transforms # # distmap <- function(X, ...) { UseMethod("distmap") } distmap.ppp <- function(X, ...) { verifyclass(X, "ppp") e <- exactdt(X, ...) W <- e$w uni <- unitname(W) dmat <- e$d imat <- e$i V <- im(dmat, W$xcol, W$yrow, unitname=uni) I <- im(imat, W$xcol, W$yrow, unitname=uni) if(X$window$type == "rectangle") { # distance to frame boundary bmat <- e$b B <- im(bmat, W$xcol, W$yrow, unitname=uni) } else { # distance to window boundary, not frame boundary bmat <- bdist.pixels(W, style="matrix") B <- im(bmat, W$xcol, W$yrow, unitname=uni) # clip all to window V <- V[W, drop=FALSE] I <- I[W, drop=FALSE] B <- B[W, drop=FALSE] } attr(V, "index") <- I attr(V, "bdry") <- B return(V) } distmap.owin <- function(X, ..., discretise=FALSE, invert=FALSE) { verifyclass(X, "owin") uni <- unitname(X) if(X$type == "rectangle") { M <- as.mask(X, ...) Bdry <- im(bdist.pixels(M, style="matrix"), M$xcol, M$yrow, unitname=uni) if(!invert) Dist <- as.im(M, value=0) else Dist <- Bdry } else if(X$type == "polygonal" && !discretise) { Edges <- edges(X) Dist <- distmap(Edges, ...) Bdry <- attr(Dist, "bdry") if(!invert) Dist[X] <- 0 else { bb <- as.rectangle(X) bigbox <- grow.rectangle(bb, diameter(bb)/4) Dist[complement.owin(X, bigbox)] <- 0 } } else { X <- as.mask(X, ...) if(invert) X <- complement.owin(X) xc <- X$xcol yr <- X$yrow nr <- X$dim[1L] nc <- X$dim[2L] # pad out the input image with a margin of width 1 on all sides mat <- X$m pad <- invert # boundary condition is opposite of value inside W mat <- cbind(pad, mat, pad) mat <- rbind(pad, mat, pad) # call C routine res <- .C("distmapbin", xmin=as.double(X$xrange[1L]), ymin=as.double(X$yrange[1L]), xmax=as.double(X$xrange[2L]), ymax=as.double(X$yrange[2L]), nr = as.integer(nr), nc = as.integer(nc), inp = as.integer(as.logical(t(mat))), distances = as.double(matrix(0, ncol = nc + 2, nrow = nr + 2)), boundary = as.double(matrix(0, ncol = nc + 2, nrow = nr + 2)), PACKAGE = "spatstat") # strip off margins again dist <- matrix(res$distances, ncol = nc + 2, byrow = TRUE)[2:(nr + 1), 2:(nc +1)] bdist <- matrix(res$boundary, ncol = nc + 2, byrow = TRUE)[2:(nr + 1), 2:(nc +1)] # cast as image objects Dist <- im(dist, xc, yr, unitname=uni) Bdry <- im(bdist, xc, yr, unitname=uni) } attr(Dist, "bdry") <- Bdry return(Dist) } distmap.psp <- function(X, ...) { verifyclass(X, "psp") W <- as.mask(Window(X), ...) uni <- unitname(W) rxy <- rasterxy.mask(W) xp <- rxy$x yp <- rxy$y E <- X$ends big <- 2 * diameter(Frame(W))^2 z <- NNdist2segments(xp, yp, E$x0, E$y0, E$x1, E$y1, big) xc <- W$xcol yr <- W$yrow Dist <- im(array(sqrt(z$dist2), dim=W$dim), xc, yr, unitname=uni) Indx <- im(array(z$index, dim=W$dim), xc, yr, unitname=uni) Bdry <- im(bdist.pixels(W, style="matrix"), xc, yr, unitname=uni) attr(Dist, "index") <- Indx attr(Dist, "bdry") <- Bdry return(Dist) } spatstat/R/split.ppx.R0000644000176200001440000000747013344410702014410 0ustar liggesusers# # split.ppx.R # # $Revision: 1.6 $ $Date: 2018/09/07 05:49:15 $ # # split.ppx etc # ######################################### split.ppx <- function(x, f = marks(x), drop=FALSE, un=NULL, ...) { stopifnot(inherits(x, "ppx")) mf <- markformat(x) if(is.null(un)) un <- missing(f) && !(mf %in% c("dataframe", "hyperframe")) if(missing(f)) { # f defaults to marks of x switch(mf, none={ stop("f is missing and there are no marks") }, vector={ if(!is.multitype(x)) stop("f is missing and the pattern is not multitype") f <- fsplit <- marks(x) }, hyperframe=, dataframe={ f <- fsplit <- firstfactor(marks(x)) if(is.null(f)) stop("Marks do not include a factor") }) splittype <- "factor" } else{ # f was given fsplit <- f if(is.factor(f)) { splittype <- "factor" } else if(is.logical(f)) { splittype <- "factor" f <- factor(f) } else if(is.character(f) && length(f) == 1) { # f is the name of a column of marks marx <- marks(x) if((is.data.frame(marx) || is.hyperframe(marx)) && (f %in% names(marx))) { fsplit <- f <- as.factor(marx[ ,f,drop=TRUE]) } else stop(paste("The name", sQuote(f), "does not match any column of marks")) splittype <- "factor" } else stop(paste("f must be", "a factor,", "or the name of a column of marks")) if(length(f) != npoints(x)) stop("length(f) must equal the number of points in x") } # At this point # 'f' is a factor that can be used to separate the points # 'fsplit' is the object (either a factor or a tessellation) # that determines the split (and can be "un-split") lev <- levels(f) if(drop) { # remove components that don't contain points retain <- (table(f) > 0) lev <- lev[retain] switch(splittype, factor = { # delete levels that don't occur fsplit <- factor(fsplit, levels=lev) }, stop("Internal error: wrong format for fsplit")) } # split the data out <- list() for(l in lev) out[[paste(l)]] <- x[!is.na(f) & (f == l)] if(un) out <- lapply(out, unmark) class(out) <- c("splitppx", "anylist", class(out)) attr(out, "fsplit") <- fsplit return(out) } print.splitppx <- function(x, ...) { f <- attr(x, "fsplit") what <- if(is.factor(f)) "factor" else "unknown data" cat(paste("Multidimensional point pattern split by", what, "\n")) nam <- names(x) for(i in seq_along(x)) { cat(paste("\n", nam[i], ":\n", sep="")) print(x[[i]]) } return(invisible(NULL)) } summary.splitppx <- function(object, ...) { x <- lapply(object, summary, ...) class(x) <- "summary.splitppx" x } print.summary.splitppx <- function(x, ...) { class(x) <- "anylist" print(x) invisible(NULL) } "[.splitppx" <- function(x, ...) { f <- attr(x, "fsplit") # invoke list method on x class(x) <- "list" y <- x[...] # then make it a 'splitppx' object too class(y) <- c("splitppx", class(y)) if(is.factor(f)) { lev <- levels(f) sublev <- lev[...] subf <- f[f %in% sublev] fsplit <- factor(subf, levels=lev) } else stop("Unknown splitting type") attr(y, "fsplit") <- fsplit y } "[<-.splitppx" <- function(x, ..., value) { if(!all(unlist(lapply(value, is.ppx)))) stop("replacement value must be a list of point patterns (ppx)") f <- attr(x, "fsplit") # invoke list method class(x) <- "list" x[...] <- value # then make it a 'splitppx' object too class(x) <- c("splitppx", class(x)) if(is.factor(f)) { lev <- levels(f) fsplit <- factor(rep.int(lev, unlist(lapply(x, npoints))), levels=lev) } attr(x, "fsplit") <- fsplit x } spatstat/R/linearKmulti.R0000644000176200001440000002536313606253516015121 0ustar liggesusers# # linearKmulti # # $Revision: 1.18 $ $Date: 2020/01/11 04:35:04 $ # # K functions for multitype point pattern on linear network # # linearKdot <- function(X, i, r=NULL, ..., correction="Ang") { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) I <- (marx == i) J <- rep(TRUE, npoints(X)) # i.e. all points result <- linearKmulti(X, I, J, r=r, correction=correction, ...) correction <- attr(result, "correction") type <- if(correction == "Ang") "L" else "net" result <- rebadge.as.dotfun(result, "K", type, i) return(result) } linearKcross <- function(X, i, j, r=NULL, ..., correction="Ang") { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) if(missing(j)) j <- lev[2L] else if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark")) # if(i == j) { result <- linearK(X[marx == i], r=r, correction=correction, ...) } else { I <- (marx == i) J <- (marx == j) result <- linearKmulti(X, I, J, r=r, correction=correction, ...) } # rebrand correction <- attr(result, "correction") type <- if(correction == "Ang") "L" else "net" result <- rebadge.as.crossfun(result, "K", type, i, j) return(result) } linearKmulti <- function(X, I, J, r=NULL, ..., correction="Ang") { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern np <- npoints(X) lengthL <- volume(domain(X)) # validate I, J if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != np || length(J) != np) stop(paste("The length of I and J must equal", "the number of points in the pattern")) if(!any(I)) stop("no points satisfy I") # if(!any(J)) stop("no points satisfy J") nI <- sum(I) nJ <- sum(J) nIandJ <- sum(I & J) # lambdaI <- nI/lengthL # lambdaJ <- nJ/lengthL # compute K denom <- (nI * nJ - nIandJ)/lengthL K <- linearKmultiEngine(X, I, J, r=r, denom=denom, correction=correction, ...) # set appropriate y axis label correction <- attr(K, "correction") type <- if(correction == "Ang") "L" else "net" K <- rebadge.as.crossfun(K, "K", type, "I", "J") attr(K, "correction") <- correction return(K) } # ................ inhomogeneous ............................ linearKdot.inhom <- function(X, i, lambdaI, lambdadot, r=NULL, ..., correction="Ang", normalise=TRUE) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) I <- (marx == i) J <- rep(TRUE, npoints(X)) # i.e. all points # compute result <- linearKmulti.inhom(X, I, J, lambdaI, lambdadot, r=r, correction=correction, normalise=normalise, ...) ## relabel correction <- attr(result, "correction") type <- if(correction == "Ang") "L, inhom" else "net, inhom" result <- rebadge.as.dotfun(result, "K", type, i) attr(result, "correction") <- correction return(result) } linearKcross.inhom <- function(X, i, j, lambdaI, lambdaJ, r=NULL, ..., correction="Ang", normalise=TRUE) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) if(missing(j)) j <- lev[2L] else if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark")) # if(i == j) { I <- (marx == i) result <- linearKinhom(X[I], lambda=lambdaI, r=r, correction=correction, normalise=normalise, ...) } else { I <- (marx == i) J <- (marx == j) result <- linearKmulti.inhom(X, I, J, lambdaI, lambdaJ, r=r, correction=correction, normalise=normalise, ...) } # rebrand correction <- attr(result, "correction") type <- if(correction == "Ang") "L, inhom" else "net, inhom" result <- rebadge.as.crossfun(result, "K", type, i, j) attr(result, "correction") <- correction return(result) } linearKmulti.inhom <- function(X, I, J, lambdaI, lambdaJ, r=NULL, ..., correction="Ang", normalise=TRUE) { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern np <- npoints(X) lengthL <- volume(domain(X)) # # validate I, J if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != np || length(J) != np) stop(paste("The length of I and J must equal", "the number of points in the pattern")) if(!any(I)) stop("no points satisfy I") # validate lambda vectors lambdaI <- getlambda.lpp(lambdaI, X, subset=I, ...) lambdaJ <- getlambda.lpp(lambdaJ, X, subset=J, ...) # compute K weightsIJ <- outer(1/lambdaI, 1/lambdaJ, "*") denom <- if(!normalise) lengthL else sum(1/lambdaI) K <- linearKmultiEngine(X, I, J, r=r, reweight=weightsIJ, denom=denom, correction=correction, ...) # set appropriate y axis label correction <- attr(K, "correction") type <- if(correction == "Ang") "L, inhom" else "net, inhom" K <- rebadge.as.crossfun(K, "K", type, "I", "J") # set markers for 'envelope' attr(K, "dangerous") <- union(attr(lambdaI, "dangerous"), attr(lambdaJ, "dangerous")) attr(K, "correction") <- correction return(K) } # .............. internal ............................... linearKmultiEngine <- function(X, I, J, ..., r=NULL, reweight=NULL, denom=1, correction="Ang", showworking=FALSE) { # ensure distance information is present X <- as.lpp(X, sparse=FALSE) # extract info about pattern np <- npoints(X) # extract linear network L <- domain(X) W <- Window(L) # determine r values rmaxdefault <- 0.98 * boundingradius(L) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # if(correction == "Ang") { fname <- c("K", "list(L, I, J)") ylab <- quote(K[L,I,J](r)) } else { fname <- c("K", "list(net, I, J)") ylab <- quote(K[net,I,J](r)) } # if(np < 2) { # no pairs to count: return zero function zeroes <- rep(0, length(r)) df <- data.frame(r = r, est = zeroes) K <- fv(df, "r", ylab, "est", . ~ r, c(0, rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname = fname) attr(K, "correction") <- correction return(K) } # ## nI <- sum(I) ## nJ <- sum(J) ## whichI <- which(I) ## whichJ <- which(J) clash <- I & J has.clash <- any(clash) ## compute pairwise distances DIJ <- crossdist(X[I], X[J], check=FALSE) if(has.clash) { ## exclude pairs of identical points from consideration Iclash <- which(clash[I]) Jclash <- which(clash[J]) DIJ[cbind(Iclash,Jclash)] <- Inf } #--- compile into K function --- if(correction == "none" && is.null(reweight)) { # no weights (Okabe-Yamada) K <- compileK(DIJ, r, denom=denom, check=FALSE, fname=fname) K <- rebadge.as.crossfun(K, "K", "net", "I", "J") unitname(K) <- unitname(X) attr(K, "correction") <- correction return(K) } if(correction == "none") { edgewt <- 1 } else { ## inverse m weights (Ang's correction) ## determine tolerance toler <- default.linnet.tolerance(L) ## compute m[i,j] m <- DoCountCrossEnds(X, I, J, DIJ, toler) edgewt <- 1/m } # compute K wt <- if(!is.null(reweight)) edgewt * reweight else edgewt K <- compileK(DIJ, r, weights=wt, denom=denom, check=FALSE, fname=fname) ## rebadge and tweak K <- rebadge.as.crossfun(K, "K", "L", "I", "J") fname <- attr(K, "fname") # tack on theoretical value K <- bind.fv(K, data.frame(theo=r), makefvlabel(NULL, NULL, fname, "pois"), "theoretical Poisson %s") ## unitname(K) <- unitname(X) fvnames(K, ".") <- rev(fvnames(K, ".")) # show working if(showworking) attr(K, "working") <- list(DIJ=DIJ, wt=wt) attr(K, "correction") <- correction return(K) } DoCountCrossEnds <- function(X, I, J, DIJ, toler) { stopifnot(is.lpp(X)) stopifnot(is.logical(I) && is.logical(J)) stopifnot(is.matrix(DIJ)) nI <- sum(I) nJ <- sum(J) whichI <- which(I) whichJ <- which(J) m <- matrix(1, nI, nJ) easy <- list(is.connected=TRUE) L <- domain(X) if(is.connected(L)) { ## network is connected for(k in seq_len(nJ)) { j <- whichJ[k] I.j <- (whichI != j) i.j <- setdiff(whichI, j) m[I.j, k] <- countends(L, X[i.j], DIJ[I.j,k], toler=toler, internal=easy) } } else { ## network is disconnected - split into components vlab <- connected(L, what="labels") subsets <- split(seq_len(nvertices(L)), factor(vlab)) for(s in subsets) { ## extract one component and the points falling in it Xs <- thinNetwork(X, retainvertices=s) ns <- npoints(Xs) if(ns >= 2) { Ls <- domain(Xs) ## identify which points of X are involved relevant <- attr(Xs, "retainpoints") Xindex <- which(relevant) ## classify them Isub <- I[relevant] ## Jsub <- J[relevant] ## identify relevant submatrix of DIJ rowsub <- relevant[I] colsub <- relevant[J] ## corresponding indices in X ## rowXindex <- whichI[rowsub] ## colXindex <- whichJ[colsub] ## handle for(k in which(colsub)) { j <- whichJ[k] I.j <- rowsub & (whichI != j) i.j <- Isub & (Xindex != j) m[ I.j, k ] <- countends(Ls, Xs[i.j], DIJ[I.j, k], toler=toler, internal=easy) } } } } if(any(uhoh <- (m == 0) & is.finite(DIJ))) { warning("Internal error: disc boundary count equal to zero") m[uhoh] <- 1 } return(m) } spatstat/R/randomtess.R0000644000176200001440000000270113333543255014626 0ustar liggesusers# # randomtess.R # # Random tessellations # # $Revision: 1.7 $ $Date: 2015/10/21 09:06:57 $ # # Poisson line tessellation rpoislinetess <- function(lambda, win=owin()) { win <- as.owin(win) if(win$type == "mask") stop("Not implemented for masks") # determine circumcircle xr <- win$xrange yr <- win$yrange xmid <- mean(xr) ymid <- mean(yr) width <- diff(xr) height <- diff(yr) rmax <- sqrt(width^2 + height^2)/2 boundbox <- owin(xmid + c(-1,1) * rmax, ymid + c(-1,1) * rmax) # generate poisson lines through circumcircle n <- rpois(1, lambda * 2 * pi * rmax) if(n == 0) return(tess(tiles=list(win))) theta <- runif(n, max= 2 * pi) p <- runif(n, max=rmax) Y <- infline(p=p, theta=theta) # form the induced tessellation in bounding box Z <- chop.tess(boundbox, Y) # clip to window Z <- intersect.tess(Z, win) attr(Z, "lines") <- Y return(Z) } rMosaicSet <- function(X, p=0.5) { stopifnot(is.tess(X)) Y <- tiles(X) Y <- Y[runif(length(Y)) < p] if(length(Y) == 0) return(NULL) Z <- NULL for(i in seq_along(Y)) Z <- union.owin(Z, Y[[i]]) return(Z) } rMosaicField <- function(X, rgen=function(n) { sample(0:1, n, replace=TRUE)}, ..., rgenargs=NULL ) { stopifnot(is.tess(X)) Y <- as.im(X, ...) ntiles <- length(levels(Y)) values <- do.call(rgen, append(list(ntiles),rgenargs)) Z <- eval.im(values[as.integer(Y)]) return(Z) } spatstat/R/linearK.R0000644000176200001440000002513013606253516014036 0ustar liggesusers# # linearK # # $Revision: 1.56 $ $Date: 2020/01/11 04:23:26 $ # # K function for point pattern on linear network # # linearK <- function(X, r=NULL, ..., correction="Ang", ratio=FALSE) { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) np <- npoints(X) lengthL <- volume(domain(X)) denom <- np * (np - 1)/lengthL K <- linearKengine(X, r=r, ..., denom=denom, correction=correction, ratio=ratio) correction <- attr(K, "correction") # set appropriate y axis label switch(correction, Ang = { ylab <- quote(K[L](r)) fname <- c("K", "L") }, none = { ylab <- quote(K[net](r)) fname <- c("K", "net") }) K <- rebadge.fv(K, new.ylab=ylab, new.fname=fname) attr(K, "correction") <- correction return(K) } linearKinhom <- function(X, lambda=NULL, r=NULL, ..., correction="Ang", normalise=TRUE, normpower=1, update=TRUE, leaveoneout=TRUE, ratio=FALSE) { stopifnot(inherits(X, "lpp")) loo.given <- !missing(leaveoneout) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) if(is.null(lambda)) linearK(X, r=r, ..., ratio=ratio, correction=correction) if(normalise) { check.1.real(normpower) stopifnot(normpower >= 1) } lambdaX <- getlambda.lpp(lambda, X, ..., update=update, leaveoneout=leaveoneout, loo.given=loo.given, lambdaname="lambda") invlam <- 1/lambdaX invlam2 <- outer(invlam, invlam, "*") lengthL <- volume(domain(X)) denom <- if(!normalise) lengthL else if(normpower == 1) sum(invlam) else lengthL * (sum(invlam)/lengthL)^normpower K <- linearKengine(X, reweight=invlam2, denom=denom, r=r, correction=correction, ratio=ratio, ...) # set appropriate y axis label correction <- attr(K, "correction") switch(correction, Ang = { ylab <- quote(K[L, inhom](r)) yexp <- quote(K[list(L, "inhom")](r)) fname <- c("K", "list(L, inhom)") }, none = { ylab <- quote(K[net, inhom](r)) yexp <- quote(K[list(net, "inhom")](r)) fname <- c("K", "list(net, inhom)") }) K <- rebadge.fv(K, new.fname=fname, new.ylab=ylab, new.yexp=yexp) attr(K, "correction") <- correction attr(K, "dangerous") <- attr(lambdaX, "dangerous") return(K) } getlambda.lpp <- function(lambda, X, subset=NULL, ..., update=TRUE, leaveoneout=TRUE, loo.given=TRUE, lambdaname) { missup <- missing(update) if(missing(lambdaname)) lambdaname <- deparse(substitute(lambda)) Y <- if(is.null(subset)) X else X[subset] danger <- TRUE if(is.ppm(lambda) || is.lppm(lambda)) { ## fitted model if(update) { ## refit the model to the full dataset X lambda <- if(is.lppm(lambda)) update(lambda, X) else update(lambda, as.ppp(X)) ## now evaluate lambdaX <- fitted(lambda, dataonly=TRUE, leaveoneout=leaveoneout) ## restrict if required lambdaY <- if(is.null(subset)) lambdaX else lambdaX[subset] ## danger <- FALSE if(missup) warn.once("lin.inhom.update", "The behaviour of linearKinhom and similar functions", "when lambda is an lppm object", "has changed in spatstat 1.41-0,", "and again in spatstat 1.52-0.", "See help(linearKinhom)") } else { if(loo.given && leaveoneout) stop("leave-one-out calculation for fitted models is only available when update=TRUE", call.=FALSE) lambdaY <- predict(lambda, locations=as.data.frame(as.ppp(Y))) } } else { ## lambda is some other kind of object lambdaY <- if(is.vector(lambda)) lambda else if(inherits(lambda, "linfun")) lambda(Y, ...) else if(inherits(lambda, "linim")) lambda[Y, drop=FALSE] else if(is.function(lambda)) { coo <- coords(Y) do.call.matched(lambda, list(x=coo$x, y=coo$y, ...)) } else if(is.im(lambda)) safelookup(lambda, as.ppp(Y)) else stop(paste(lambdaname, "should be", "a numeric vector, function, pixel image, or fitted model")) } if(!is.numeric(lambdaY)) stop(paste("Values of", lambdaname, "are not numeric")) if((nv <- length(lambdaY)) != (np <- npoints(Y))) stop(paste("Obtained", nv, "values of", lambdaname, "but point pattern contains", np, "points")) if(any(lambdaY < 0)) stop(paste("Negative values of", lambdaname, "obtained")) if(any(lambdaY == 0)) stop(paste("Zero values of", lambdaname, "obtained")) if(danger) attr(lambdaY, "dangerous") <- lambdaname return(lambdaY) } linearKengine <- function(X, ..., r=NULL, reweight=NULL, denom=1, correction="Ang", ratio=FALSE, showworking=FALSE) { # ensure distance information is present X <- as.lpp(X, sparse=FALSE) # extract info about pattern np <- npoints(X) # extract linear network L <- domain(X) W <- Window(L) # determine r values rmaxdefault <- 0.98 * boundingradius(L) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # type <- if(correction == "Ang") "L" else "net" fname <- c("K", type) ylab <- substitute(K[type](r), list(type=type)) # if(np < 2) { # no pairs to count: return zero function zeroes <- numeric(length(r)) df <- data.frame(r = r, est = zeroes) K <- ratfv(df, NULL, 0, "r", ylab, "est", . ~ r, c(0, rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname = fname, ratio=ratio) unitname(K) <- unitname(X) if(correction == "Ang") { # tack on theoretical value K <- bind.ratfv(K, quotient = data.frame(theo=r), denominator = 0, labl = makefvlabel(NULL, NULL, fname, "theo"), desc = "theoretical Poisson %s", ratio = ratio) } attr(K, "correction") <- correction return(K) } # compute pairwise distances D <- pairdist(X) #--- compile into K function --- if(correction == "none" && is.null(reweight)) { # no weights (Okabe-Yamada) K <- compileK(D, r, denom=denom, fname=fname, ratio=ratio) K <- rebadge.fv(K, ylab, fname) unitname(K) <- unitname(X) attr(K, "correction") <- correction return(K) } if(correction == "none") { edgewt <- 1 } else { ## inverse m weights (Wei's correction) ## determine tolerance toler <- default.linnet.tolerance(L) ## compute m[i,j] m <- DoCountEnds(X, D, toler) edgewt <- 1/m } # compute K wt <- if(!is.null(reweight)) edgewt * reweight else edgewt K <- compileK(D, r, weights=wt, denom=denom, fname=fname, ratio=ratio) # tack on theoretical value if(ratio) { K <- bind.ratfv(K, quotient = data.frame(theo = r), denominator = denom, labl = makefvlabel(NULL, NULL, fname, "theo"), desc = "theoretical Poisson %s") } else { K <- bind.fv(K, data.frame(theo=r), makefvlabel(NULL, NULL, fname, "theo"), "theoretical Poisson %s") } K <- rebadge.fv(K, ylab, fname) unitname(K) <- unitname(X) fvnames(K, ".") <- rev(fvnames(K, ".")) # show working if(showworking) attr(K, "working") <- list(D=D, wt=wt) attr(K, "correction") <- correction return(K) } ApplyConnected <- function(X, Engine, r=NULL, ..., rule, auxdata=NULL) { # Apply 'Engine' to each connected component of domain(X) stopifnot(is.function(rule)) # Ensure distance information is present X <- as.lpp(X, sparse=FALSE) L <- domain(X) # check network connectivity br <- boundingradius(L) if(disco <- is.infinite(br)) { # disconnected network XX <- connected(X) LL <- lapply(XX, domain) br <- max(sapply(LL, boundingradius)) } else XX <- NULL # determine r values rmaxdefault <- 0.98 * br breaks <- handle.r.b.args(r, NULL, Window(L), rmaxdefault=rmaxdefault) r <- breaks$r if(!disco) { # single connected network stuff <- rule(X=X, auxdata=auxdata, ...) result <- do.call(Engine, append(list(X=X, r=r), stuff)) return(result) } # disconnected network nsub <- length(XX) results <- anylist() denoms <- numeric(nsub) for(i in seq_len(nsub)) { X.i <- XX[[i]] sub.i <- attr(X.i, "retainpoints") # identifies which points of X aux.i <- if(length(auxdata) == 0) NULL else lapply(auxdata, marksubset, index=sub.i) stuff.i <- rule(X=X.i, auxdata=aux.i, ...) denoms[i] <- stuff.i$denom %orifnull% 1 results[[i]] <- do.call(Engine, append(list(X=X.i, r=r), stuff.i)) } result <- do.call(pool, append(results, list(weights=denoms, relabel=FALSE, variance=FALSE))) return(result) } DoCountEnds <- function(X, D, toler) { stopifnot(is.lpp(X)) stopifnot(is.matrix(D)) nX <- npoints(X) if(nrow(D) != nX) stopifnot(nrow(D) == npoints(X)) if(ncol(D) != nX) stopifnot(ncol(D) == npoints(X)) m <- matrix(1, nX, nX) easy <- list(is.connected=TRUE) L <- domain(X) if(is.connected(L)) { ## network is connected for(j in 1:nX) { m[ -j, j] <- countends(L, X[-j], D[-j,j], toler=toler, internal=easy) } } else { ## network is disconnected - split into components vlab <- connected(L, what="labels") subsets <- split(seq_len(nvertices(L)), factor(vlab)) for(subi in subsets) { ## extract one component and the points falling in it Xsubi <- thinNetwork(X, retainvertices=subi) ni <- npoints(Xsubi) if(ni >= 2) { Lsubi <- domain(Xsubi) ## identify which points of X are involved imap <- which(attr(Xsubi, "retainpoints")) ## handle for(j in seq_len(ni)) { ij <- imap[j] i.j <- imap[-j] m[ i.j, ij ] <- countends(Lsubi, Xsubi[-j], D[i.j, ij], toler=toler, internal=easy) } } } } if(any(uhoh <- (m == 0) & is.finite(D))) { warning("Internal error: disc boundary count equal to zero") m[uhoh] <- 1 } return(m) } spatstat/R/stienen.R0000644000176200001440000000363213461516003014111 0ustar liggesusers## stienen.R ## ## Stienen diagram with border correction ## ## $Revision: 1.8 $ $Date: 2015/10/21 09:06:57 $ stienen <- function(X, ..., bg="grey", border=list(bg=NULL)) { Xname <- short.deparse(substitute(X)) stopifnot(is.ppp(X)) if(npoints(X) <= 1) { do.call(plot, resolve.defaults(list(x=Window(X)), list(...), list(main=Xname))) return(invisible(NULL)) } d <- nndist(X) b <- bdist.points(X) Y <- X %mark% d gp <- union(graphicsPars("symbols"), "lwd") do.call.plotfun(plot.ppp, resolve.defaults(list(x=Y[b >= d], markscale=1), list(...), list(bg=bg), list(main=Xname)), extrargs=gp) if(!identical(border, FALSE)) { if(!is.list(border)) border <- list() do.call.plotfun(plot.ppp, resolve.defaults(list(x=Y[b < d], markscale=1, add=TRUE), border, list(...), list(bg=bg), list(cols=grey(0.5), lwd=2)), extrargs=gp) } return(invisible(NULL)) } stienenSet <- function(X, edge=TRUE) { stopifnot(is.ppp(X)) nnd <- nndist(X) if(!edge) { ok <- bdist.points(X) >= nnd X <- X[ok] nnd <- nnd[ok] } n <- npoints(X) if(n == 0) return(emptywindow(Window(X))) if(n == 1) return(Window(X)) rad <- nnd/2 if(!all(ok <- (rad > 0))) { eps <- min(rad[ok], shortside(Frame(X)))/100 rad <- pmax(rad, eps) } delta <- 2 * pi * max(rad)/128 Z <- disc(rad[1], X[1], delta=delta) for(i in 2:n) Z <- union.owin(Z, disc(rad[i], X[i], delta=delta)) return(Z) } spatstat/R/tess.R0000644000176200001440000010447113536721455013441 0ustar liggesusers# # tess.R # # support for tessellations # # $Revision: 1.97 $ $Date: 2019/09/13 04:29:08 $ # tess <- function(..., xgrid=NULL, ygrid=NULL, tiles=NULL, image=NULL, window=NULL, marks=NULL, keepempty=FALSE, unitname=NULL, check=TRUE) { uname <- unitname if(!is.null(window)) { window <- as.owin(window) if(is.null(uname)) uname <- unitname(window) } isrect <- !is.null(xgrid) && !is.null(ygrid) istiled <- !is.null(tiles) isimage <- !is.null(image) if(isrect + istiled + isimage != 1) stop("Must specify either (xgrid, ygrid) or tiles or img") if(isrect) { stopifnot(is.numeric(xgrid) && all(diff(xgrid) > 0)) stopifnot(is.numeric(ygrid) && all(diff(ygrid) > 0)) if(!is.null(window)) warning("Argument 'window' ignored, because xgrid, grid are given") window <- owin(range(xgrid), range(ygrid), unitname=uname) ntiles <- (length(xgrid)-1) * (length(ygrid)-1) out <- list(type="rect", window=window, xgrid=xgrid, ygrid=ygrid, n=ntiles) } else if(istiled) { stopifnot(is.list(tiles)) if(check) { if(!all(sapply(tiles, is.owin))) stop("Tiles must be a list of owin objects") if(!is.null(uname)) { ## attach new unit name to each tile tiles <- lapply(tiles, "unitname<-", value=uname) } else { ## extract unit names from tiles, check agreement, use as unitname uu <- unique(lapply(tiles, unitname)) uu <- uu[!sapply(uu, is.null)] nun <- length(uu) if(nun > 1) stop("Tiles have inconsistent names for the unit of length") if(nun == 1) { ## use this unit name uname <- uu[[1]] if(!is.null(window)) unitname(window) <- uname } } } if(!keepempty && check) { # remove empty tiles isempty <- sapply(tiles, is.empty) if(all(isempty)) stop("All tiles are empty") if(any(isempty)) tiles <- tiles[!isempty] } ntiles <- length(tiles) nam <- names(tiles) lev <- if(!is.null(nam) && all(nzchar(nam))) nam else 1:ntiles if(is.null(window)) window <- do.call(union.owin, unname(tiles)) if(is.mask(window) || any(sapply(tiles, is.mask))) { # convert to pixel image tessellation Grid <- do.call(commonGrid, append(list(window), unname(tiles))) ima <- as.im(window, W=Grid) ima$v[] <- NA for(i in 1:ntiles) ima[tiles[[i]]] <- i ima <- ima[window, drop=FALSE] ima <- eval.im(factor(ima, levels=1:ntiles)) levels(ima) <- lev out <- list(type="image", window=window, image=ima, n=length(lev)) } else { # tile list window <- rescue.rectangle(window) out <- list(type="tiled", window=window, tiles=tiles, n=length(tiles)) } } else if(isimage) { # convert to factor valued image image <- as.im(image) if(!is.null(uname)) unitname(image) <- uname switch(image$type, logical={ # convert to factor if(keepempty) image <- eval.im(factor(image, levels=c(FALSE,TRUE))) else image <- eval.im(factor(image)) }, factor={ # eradicate unused levels if(!keepempty) image <- eval.im(factor(image)) }, { # convert to factor image <- eval.im(factor(image)) }) if(is.null(window)) window <- as.owin(image) out <- list(type="image", window=window, image=image, n=length(levels(image))) } else stop("Internal error: unrecognised format") ## add marks! if(!is.null(marks)) { marks <- as.data.frame(marks) if(nrow(marks) != out$n) stop(paste("wrong number of marks:", nrow(marks), "should be", out$n), call.=FALSE) out$marks <- marks } class(out) <- c("tess", class(out)) return(out) } is.tess <- function(x) { inherits(x, "tess") } print.tess <- function(x, ..., brief=FALSE) { full <- !brief if(full) cat("Tessellation\n") win <- x$window switch(x$type, rect={ if(full) { unitinfo <- summary(unitname(win)) if(equispaced(x$xgrid) && equispaced(x$ygrid)) splat("Tiles are equal rectangles, of dimension", signif(mean(diff(x$xgrid)), 5), "x", signif(mean(diff(x$ygrid)), 5), unitinfo$plural, " ", unitinfo$explain) else splat("Tiles are unequal rectangles") } splat(length(x$xgrid)-1, "by", length(x$ygrid)-1, "grid of tiles") }, tiled={ if(full) { if(win$type == "polygonal") splat("Tiles are irregular polygons") else splat("Tiles are windows of general type") } splat(length(x$tiles), "tiles (irregular windows)") }, image={ nlev <- length(levels(x$image)) if(full) { splat("Tessellation is determined by a factor-valued image with", nlev, "levels") } else splat(nlev, "tiles (levels of a pixel image)") }) if(!is.null(marx <- x$marks)) { m <- dim(marx)[2] %orifnull% 1 if(m == 1) splat("Tessellation is marked") else splat("Tessellation has", m, "columns of marks:", commasep(sQuote(colnames(marx)))) } if(full) print(win) invisible(NULL) } unitname.tess <- function(x) unitname(x$window) "unitname<-.tess" <- function(x, value) { unitname(x$window) <- value switch(x$type, rect={}, tiled={ x$tiles <- lapply(x$tiles, "unitname<-", value) }, image={ unitname(x$image) <- value }) return(x) } plot.tess <- local({ plotpars <- c("sub", "lty", "lwd", "cex.main", "col.main", "font.main", "cex.sub", "col.sub", "font.sub", "border") plot.tess <- function(x, ..., main, add=FALSE, show.all=!add, border=NULL, do.plot=TRUE, do.labels=FALSE, labels=tilenames(x), labelargs=list(), do.col=FALSE, values=marks(x), multiplot=TRUE, col=NULL, ribargs=list()) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) ntiles <- x$n if(!do.col) { #' Plot tiles, with adornment y <- NULL result <- NULL bbox <- NULL need.legend <- FALSE } else { #' Fill tiles with colours determined by 'values' if(markformat(values) == "hyperframe") values <- as.data.frame(values) #' automatic warning #' Determine values associated with each tile switch(markformat(values), none = { #' no values assigned. #' default is tile name tn <- tilenames(x) values <- factor(tn, levels=tn) }, vector = { #' vector of values. #' validate length of vector check.anyvector(values, ntiles, things="tiles") }, dataframe = { #' data frame or matrix of values. values <- as.data.frame(values) if(nrow(values) != ntiles) stop(paste("Number of rows of values =", nrow(values), "!=", ntiles, "= number of tiles"), call.=FALSE) if(multiplot && ncol(values) > 1 && !add) { #' Multiple Panel Plot result <- multi.plot.tess(x, ..., main=main, show.all=show.all, border=border, do.plot=do.plot, do.labels=do.labels, labels=labels, labelargs=labelargs, do.col=do.col, col=col, ribargs=ribargs) return(invisible(result)) } if(ncol(values) > 1) warning("Using only the first column of values") values <- values[,1] }, stop("Format of values is not understood") ) #' Single Panel Plot #' Determine colour map and plan layout (including colour ribbon) #' using rules for pixel images y <- as.im(as.function(x, values=values)) result <- do.call(plot.im, resolve.defaults( list(x=y, do.plot=FALSE, show.all=show.all, add=add, main=main, col=col, ribargs=ribargs), list(...), list(valuesAreColours=FALSE) )) #' exit if not actually plotting if(!do.plot) return(invisible(result)) #' extract info colmap <- result bbox <- attr(result, "bbox") bbox.legend <- attr(result, "bbox.legend") need.legend <- !is.null(bbox.legend) } #' Start Plot #' initialise plot region if it is determined if(do.plot && !is.null(bbox) && !add) { plot(bbox, main=" ", type="n") add <- TRUE } switch(x$type, rect={ win <- x$window z <- do.call.matched(plot.owin, resolve.defaults(list(x=win, main=main, add=add, show.all=show.all, do.plot=do.plot), list(...)), extrargs=plotpars) if(is.null(result)) result <- z if(do.plot) { #' actually plot if(do.col) { #' fill tiles with colours colours <- colmap(values) til <- tiles(x) for(i in seq_len(x$n)) plot(til[[i]], add=TRUE, col=colours[i], border=border, main="", ...) } else { #' draw tile boundaries only xg <- x$xgrid yg <- x$ygrid do.call.matched(segments, resolve.defaults(list(x0=xg, y0=win$yrange[1], x1=xg, y1=win$yrange[2]), list(col=border), list(...), .StripNull=TRUE)) do.call.matched(segments, resolve.defaults(list(x0=win$xrange[1], y0=yg, x1=win$xrange[2], y1=yg), list(col=border), list(...), .StripNull=TRUE)) } } }, tiled={ z <- do.call.matched(plot.owin, resolve.defaults(list(x=x$window, main=main, add=add, show.all=show.all, do.plot=do.plot), list(...)), extrargs=plotpars) if(is.null(result)) result <- z if(do.plot) { #' plot each tile til <- tiles(x) if(!do.col) { #' border only lapply(til, plot.owin, ..., add=TRUE, border=border) } else { #' fill with colour colours <- colmap(values) mapply(plot.owin, x=til, col=colours, MoreArgs=list(add=TRUE, main="", border=border, ...)) } } }, image={ if(is.null(y)) y <- x$image result <- do.call(plot, resolve.defaults(list(y, add=add, main=main, show.all=show.all, do.plot=do.plot, col=col, ribargs=ribargs), list(...), list(valuesAreColours=FALSE))) need.legend <- FALSE }) if(do.plot && do.labels) { labels <- paste(as.vector(labels)) til <- tiles(x) incircles <- lapply(til, incircle) x0 <- sapply(incircles, getElement, name="x") y0 <- sapply(incircles, getElement, name="y") do.call.matched(text.default, resolve.defaults(list(x=x0, y = y0), list(labels=labels), labelargs), funargs=graphicsPars("text")) } if(do.plot && need.legend) { #' determine position of legend xlim <- bbox.legend$xrange ylim <- bbox.legend$yrange sidecode <- attr(colmap, "side.legend") vertical <- sidecode %in% c(2,4) do.call(plot.colourmap, resolve.defaults(list(x=colmap, add=TRUE, main="", xlim=xlim, ylim=ylim, side=sidecode, vertical=vertical), ribargs, list(...))) } return(invisible(result)) } multi.plot.tess <- function(x, ..., zlim=NULL, col=NULL, equal.ribbon=FALSE) { if(equal.ribbon && is.null(zlim) && !inherits(col, "colourmap")) zlim <- range(marks(x)) if(!is.null(zlim)) { result <- plot(unstack(x), ..., zlim=zlim, col=col) } else { result <- plot(unstack(x), ..., col=col) } return(invisible(result)) } plot.tess }) "[<-.tess" <- function(x, i, ..., value) { switch(x$type, rect=, tiled={ til <- tiles(x) til[i] <- value ok <- !unlist(lapply(til, is.null)) x <- tess(tiles=til[ok]) }, image={ stop("Cannot assign new values to subsets of a pixel image") }) return(x) } "[.tess" <- function(x, i, ...) { trap.extra.arguments(..., .Context="in [.tess") if(missing(i)) return(x) if(is.owin(i)) return(intersect.tess(x, i)) switch(x$type, rect=, tiled={ til <- tiles(x)[i] return(tess(tiles=til)) }, image={ img <- x$image oldlev <- levels(img) newlev <- unique(oldlev[i]) img <- eval.im(factor(img, levels=newlev)) return(tess(image=img)) }) } tiles <- function(x) { stopifnot(is.tess(x)) switch(x$type, rect={ out <- list() xg <- x$xgrid yg <- x$ygrid nx <- length(xg) - 1 ny <- length(yg) - 1 for(j in rev(seq_len(ny))) { for(i in seq_len(nx)) { winij <- owin(xg[c(i,i+1)], yg[c(j,j+1)]) out <- append(out, list(winij)) } } }, tiled={ out <- x$tiles }, image={ out <- list() ima <- x$image lev <- levels(ima) for(i in seq_along(lev)) out[[i]] <- solutionset(ima == lev[i]) }) names(out) <- tilenames(x) out <- as.solist(out) return(out) } tiles.empty <- function(x) { stopifnot(is.tess(x)) switch(x$type, rect = { nx <- length(x$xgrid) - 1 ny <- length(x$ygrid) - 1 ans <- rep(FALSE, nx * ny) }, tiled = { ans <- sapply(x$tiles, is.empty) }, image = { ans <- (table(x$image[]) == 0) }) return(ans) } tilenames <- function(x) { UseMethod("tilenames") } tilenames.tess <- function(x) { switch(x$type, rect={ if(!is.null(x$tilenames)) { out <- x$tilenames } else { nx <- length(x$xgrid) - 1 ny <- length(x$ygrid) - 1 ij <- expand.grid(1:nx, 1:ny) out <- paste0("Tile row ", ij[,2], ", col ", ij[,1]) } }, tiled={ out <- names(x$tiles) if(sum(nzchar(out)) != x$n) out <- paste("Tile", seq_len(x$n)) }, image={ out <- levels(x$image) } ) return(as.character(out)) } "tilenames<-" <- function(x, value) { UseMethod("tilenames<-") } "tilenames<-.tess" <- function(x, value) { if(!is.null(value)) { ## validate length value <- as.character(value) nv <- length(value) switch(x$type, rect = { nx <- length(x$xgrid) - 1 ny <- length(x$ygrid) - 1 n <- nx * ny }, tiled = { n <- length(x$tiles) }, image = { n <- length(levels(x$image)) }) if(nv != n) stop("Replacement value has wrong length", paren(paste(nv, "instead of", n))) } switch(x$type, rect={ x$tilenames <- value }, tiled={ names(x$tiles) <- value }, image={ levels(x$image) <- value %orifnull% (1:n) } ) return(x) } marks.tess <- function(x, ...) { stopifnot(is.tess(x)) return(x$marks) } "marks<-.tess" <- function(x, ..., value) { stopifnot(is.tess(x)) if(!is.null(value)) { value <- as.data.frame(value) ntil <- x$n if(nrow(value) != ntil) stop(paste("replacement value for marks has wrong length:", nrow(value), "should be", ntil), call.=FALSE) rownames(value) <- NULL if(ncol(value) == 1) colnames(value) <- "marks" } x$marks <- value return(x) } unmark.tess <- function(X) { marks(X) <- NULL; return(X) } tile.areas <- function(x) { stopifnot(is.tess(x)) switch(x$type, rect={ xg <- x$xgrid yg <- x$ygrid # nx <- length(xg) - 1 # ny <- length(yg) - 1 a <- outer(rev(diff(yg)), diff(xg), "*") a <- as.vector(t(a)) names(a) <- as.vector(t(tilenames(x))) }, tiled={ a <- unlist(lapply(x$tiles, area)) }, image={ z <- x$image a <- table(z$v) * z$xstep * z$ystep }) return(a) } as.im.tess <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) { # if W is present, it may have to be converted if(!is.null(W)) { stopifnot(is.owin(W)) if(W$type != "mask") W <- as.mask(W, eps=eps, dimyx=dimyx, xy=xy) } switch(X$type, image={ out <- as.im(X$image, W=W, eps=eps, dimyx=dimyx, xy=xy, na.replace=na.replace) }, tiled={ if(is.null(W)) W <- as.mask(as.owin(X), eps=eps, dimyx=dimyx, xy=xy) til <- X$tiles ntil <- length(til) nama <- names(til) if(is.null(nama) || !all(nzchar(nama))) nama <- paste(seq_len(ntil)) xy <- list(x=W$xcol, y=W$yrow) for(i in seq_len(ntil)) { indic <- as.mask(til[[i]], xy=xy) tag <- as.im(indic, value=i) if(i == 1) { out <- tag outv <- out$v } else { outv <- pmin.int(outv, tag$v, na.rm=TRUE) } } out <- im(factor(outv, levels=seq_len(ntil), labels=nama), out$xcol, out$yrow) unitname(out) <- unitname(W) }, rect={ if(is.null(W)) out <- as.im(as.rectangle(X), eps=eps, dimyx=dimyx, xy=xy) else out <- as.im(W) xg <- X$xgrid yg <- X$ygrid nrows <- length(yg) - 1 ncols <- length(xg) - 1 jx <- findInterval(out$xcol, xg, rightmost.closed=TRUE) iy <- findInterval(out$yrow, yg, rightmost.closed=TRUE) M <- as.matrix(out) Jcol <- jx[col(M)] Irow <- nrows - iy[row(M)] + 1 Ktile <- Jcol + ncols * (Irow - 1) Ktile <- factor(Ktile, levels=seq_len(nrows * ncols)) out <- im(Ktile, xcol=out$xcol, yrow=out$yrow, unitname=unitname(W)) } ) return(out) } nobjects.tess <- function(x) { switch(x$type, image = length(levels(x$image)), rect = (length(x$xgrid)-1) * (length(x$ygrid)-1), tiled = length(x$tiles)) } as.function.tess <- function(x, ..., values=NULL) { V <- x if(is.null(values)) { f <- function(x,y) { tileindex(x,y,V) } } else { if(length(values) != nobjects(x)) stop("Length of 'values' should equal the number of tiles", call.=FALSE) f <- function(x,y) { values[as.integer(tileindex(x,y,V))] } } g <- funxy(f, Window(V)) return(g) } tileindex <- function(x, y, Z) { stopifnot(is.tess(Z)) if((missing(y) || is.null(y)) && all(c("x", "y") %in% names(x))) { y <- x$y x <- x$x } stopifnot(length(x) == length(y)) switch(Z$type, rect={ jx <- findInterval(x, Z$xgrid, rightmost.closed=TRUE) iy <- findInterval(y, Z$ygrid, rightmost.closed=TRUE) nrows <- length(Z$ygrid) - 1 ncols <- length(Z$xgrid) - 1 iy[iy < 1 | iy > nrows] <- NA jx[jx < 1 | jx > ncols] <- NA jcol <- jx irow <- nrows - iy + 1 ktile <- jcol + ncols * (irow - 1) m <- factor(ktile, levels=seq_len(nrows*ncols)) ij <- expand.grid(j=seq_len(ncols),i=seq_len(nrows)) levels(m) <- paste("Tile row ", ij$i, ", col ", ij$j, sep="") }, tiled={ n <- length(x) todo <- seq_len(n) nt <- length(Z$tiles) m <- integer(n) for(i in 1:nt) { ti <- Z$tiles[[i]] hit <- inside.owin(x[todo], y[todo], ti) if(any(hit)) { m[todo[hit]] <- i todo <- todo[!hit] } if(length(todo) == 0) break } m[m == 0] <- NA nama <- names(Z$tiles) lev <- seq_len(nt) lab <- if(!is.null(nama) && all(nzchar(nama))) nama else paste("Tile", lev) m <- factor(m, levels=lev, labels=lab) }, image={ Zim <- Z$image m <- lookup.im(Zim, x, y, naok=TRUE) if(anyNA(m)) { #' look up neighbouring pixels isna <- is.na(m) rc <- nearest.valid.pixel(x[isna], y[isna], Zim, nsearch=2) m[isna] <- Zim$v[cbind(rc$row, rc$col)] } } ) return(m) } as.tess <- function(X) { UseMethod("as.tess") } as.tess.tess <- function(X) { fields <- switch(X$type, rect={ c("xgrid", "ygrid") }, tiled={ "tiles" }, image={ "image" }, stop(paste("Unrecognised tessellation type", sQuote(X$type)))) fields <- c(c("type", "window", "n", "marks"), fields) X <- unclass(X)[fields] class(X) <- c("tess", class(X)) return(X) } as.tess.im <- function(X) { return(tess(image = X)) } as.tess.list <- function(X) { W <- lapply(X, as.owin) return(tess(tiles=W)) } as.tess.owin <- function(X) { return(tess(tiles=list(X))) } domain.tess <- Window.tess <- function(X, ...) { as.owin(X) } intersect.tess <- function(X, Y, ..., keepmarks=FALSE, sep="x") { X <- as.tess(X) check.1.string(sep) if(is.owin(Y)) { ## intersection of a tessellation with a window if(Y$type == "mask") { ## convert to pixel image result <- as.im(Y) Xtiles <- tiles(X) seqXtiles <- seq_along(Xtiles) for(i in seqXtiles) { tilei <- Xtiles[[i]] result[tilei] <- i } result <- result[Y, drop=FALSE] out <- tess(image=result, window=Y) if(keepmarks && !is.null(marx <- marks(X))) { #' identify non-empty tiles tab <- table(factor(result[], levels=seqXtiles)) marks(out) <- marksubset(marx, tab > 0) } return(out) } else { ## efficient code when Y is a window, retaining names of tiles of X Ztiles <- lapply(tiles(X), intersect.owin, B=Y, ..., fatal=FALSE) isempty <- sapply(Ztiles, is.empty) Ztiles <- Ztiles[!isempty] Xwin <- as.owin(X) Ywin <- Y Zwin <- intersect.owin(Xwin, Ywin) out <- tess(tiles=Ztiles, window=Zwin) if(keepmarks) { marx <- marks(X) if(!is.null(marx)) marx <- as.data.frame(marx)[!isempty, ] marks(out) <- marx } return(out) } } ## general case: intersection of two tessellations Y <- as.tess(Y) Xtiles <- tiles(X) Ytiles <- tiles(Y) Ztiles <- list() namesX <- tilenames(X) namesY <- tilenames(Y) if(keepmarks) { ## initialise the mark variables to be inherited from parent tessellations Xmarks <- as.data.frame(marks(X)) Ymarks <- as.data.frame(marks(Y)) gotXmarks <- (ncol(Xmarks) > 0) gotYmarks <- (ncol(Ymarks) > 0) if(gotXmarks && gotYmarks) { colnames(Xmarks) <- paste0("X", colnames(Xmarks)) colnames(Ymarks) <- paste0("Y", colnames(Ymarks)) } if(gotXmarks || gotYmarks) { marx <- if(gotXmarks && gotYmarks) { cbind(Xmarks[integer(0), , drop=FALSE], Ymarks[integer(0), , drop=FALSE]) } else if(gotXmarks) { Xmarks[integer(0), , drop=FALSE] } else { Ymarks[integer(0), , drop=FALSE] } } else keepmarks <- FALSE } ## now compute intersection tiles Xtrivial <- (length(Xtiles) == 1) for(i in seq_along(Xtiles)) { Xi <- Xtiles[[i]] Ti <- lapply(Ytiles, intersect.owin, B=Xi, ..., fatal=FALSE) isempty <- sapply(Ti, is.empty) nonempty <- !isempty if(any(nonempty)) { Ti <- Ti[nonempty] names(Ti) <- if(Xtrivial) namesY[nonempty] else paste(namesX[i], namesY[nonempty], sep=sep) Ztiles <- append(Ztiles, Ti) if(keepmarks) { extra <- if(gotXmarks && gotYmarks) { data.frame(X=Xmarks[i, ,drop=FALSE], Y=Ymarks[nonempty, ,drop=FALSE], row.names=NULL) } else if(gotYmarks) { Ymarks[nonempty, ,drop=FALSE] } else { Xmarks[rep(i, sum(nonempty)), ,drop=FALSE] } marx <- rbind(marx, extra) } } } ## form tessellation object Xwin <- as.owin(X) Ywin <- as.owin(Y) Zwin <- intersect.owin(Xwin, Ywin) out <- tess(tiles=Ztiles, window=Zwin) if(keepmarks) marks(out) <- marx return(out) } venn.tess <- function(..., window=NULL) { argh <- list(...) nargh <- length(argh) if(nargh == 0) stop("No arguments given") iswin <- sapply(argh, is.owin) istes <- sapply(argh, is.tess) if(!all(iswin | istes)) stop("All arguments must be windows or tessellations", call.=FALSE) nama <- names(argh) if(sum(nzchar(nama)) < nargh) nama <- paste0("T", seq_len(nargh)) W <- window %orifnull% do.call(union.owin, unname(lapply(argh, as.owin))) for(i in seq_len(nargh)) { A <- argh[[i]] if(is.owin(A)) { Z <- list(A, Out=setminus.owin(W, A)) names(Z) <- paste0(c("", "not"), nama[i]) A <- tess(tiles=Z, window=W) } Y <- if(i == 1) A else intersect.tess(Y, A) } return(Y) } bdist.tiles <- local({ vdist <- function(x,w) { z <- as.ppp(vertices(x), W=w, check=FALSE) min(bdist.points(z)) } edist <- function(x,b) { xd <- crossdist(edges(x, check=FALSE), b, type="separation") min(xd) } bdist.tiles <- function(X) { if(!is.tess(X)) stop("X must be a tessellation") W <- as.owin(X) switch(X$type, rect=, tiled={ tt <- tiles(X) if(is.convex(W)) { # distance is minimised at a tile vertex d <- sapply(tt, vdist, w=W) } else { # coerce everything to polygons W <- as.polygonal(W) tt <- lapply(tt, as.polygonal) # compute min dist from tile edges to window edges d <- sapply(tt, edist, b=edges(W)) } }, image={ Xim <- X$image # compute boundary distance for each pixel bd <- bdist.pixels(as.owin(Xim), style="image") bd <- bd[W, drop=FALSE] # split over tiles bX <- split(bd, X) # compute minimum distance over each level of factor d <- sapply(bX, function(z) { summary(z)$min }) } ) return(d) } bdist.tiles }) ## ......... geometrical transformations .................. shift.tess <- function(X, ...) { Y <- X Y$window <- wY <- shift(X$window, ...) vec <- getlastshift(wY) switch(X$type, rect={ Y$xgrid <- Y$xgrid + vec[1] Y$ygrid <- Y$ygrid + vec[2] }, tiled={ Y$tiles <- lapply(Y$tiles, shift, vec=vec) }, image = { Y$image <- shift(Y$image, vec) }) attr(Y, "lastshift") <- vec return(Y) } affine.tess <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) { Y <- X Y$window <- affine(X$window, mat=mat, vec=vec, ...) switch(Y$type, rect = { if(all(mat == diag(diag(mat)))) { ## result is rectangular Y$xgrid <- sort(mat[1,1] * X$xgrid + vec[1]) Y$ygrid <- sort(mat[2,2] * X$ygrid + vec[2]) } else { ## shear transformation; treat rectangles as general tiles Y <- tess(tiles=tiles(X), window=Y$window) Y$tiles <- lapply(Y$tiles, affine, mat=mat, vec=vec, ...) } }, tiled={ Y$tiles <- lapply(Y$tiles, affine, mat=mat, vec=vec, ...) }, image = { Y$image <- affine(Y$image, mat=mat, vec=vec, ...) }) return(Y) } reflect.tess <- function(X) { Y <- X Y$window <- reflect(Y$window) switch(X$type, rect = { Y$xgrid <- rev(- Y$xgrid) Y$ygrid <- rev(- Y$ygrid) }, tiled = { Y$tiles <- lapply(Y$tiles, reflect) }, image = { Y$image <- reflect(Y$image) }) return(Y) } flipxy.tess <- function(X) { Y <- X Y$window <- flipxy(Y$window) switch(X$type, rect = { Y$xgrid <- X$ygrid Y$ygrid <- X$xgrid }, tiled = { Y$tiles <- lapply(Y$tiles, flipxy) }, image = { Y$image <- flipxy(Y$image) }) return(Y) } scalardilate.tess <- function(X, f, ...) { Y <- X Y$window <- scalardilate(X$window, f, ...) switch(X$type, rect = { Y$xgrid <- f * Y$xgrid Y$ygrid <- f * Y$ygrid }, tiled = { Y$tiles <- lapply(Y$tiles, scalardilate, f=f, ...) }, image = { Y$image <- scalardilate(Y$image, f=f, ...) }) return(Y) } rotate.tess <- function(X, angle=pi/2, ..., centre=NULL) { if(angle %% (2 * pi) == 0) return(X) if(!is.null(centre)) { X <- shift(X, origin=centre) negorigin <- getlastshift(X) } else negorigin <- NULL Y <- X Y$window <- rotate(X$window, angle=angle, ...) switch(X$type, rect = { if(angle %% (pi/2) == 0) { ## result is rectangular co <- round(cos(angle)) si <- round(sin(angle)) Y$xgrid <- sort((if(co == 0) 0 else (co * X$xgrid)) - (if(si == 0) 0 else (si * X$ygrid))) Y$ygrid <- sort((if(si == 0) 0 else (si * X$xgrid)) + (if(co == 0) 0 else (co * X$ygrid))) } else { ## general tessellation Y <- tess(tiles=lapply(tiles(X), rotate, angle=angle, ...), window=Y$window) } }, tiled = { Y$tiles <- lapply(X$tiles, rotate, angle=angle, ...) }, image = { Y$image <- rotate(X$image, angle=angle, ...) }) if(!is.null(negorigin)) Y <- shift(Y, -negorigin) return(Y) } as.data.frame.tess <- function(x, ...) { switch(x$type, rect =, tiled = { y <- lapply(tiles(x), as.data.frame, ...) z <- mapply(assignDFcolumn, x=y, value=tilenames(x), MoreArgs=list(name="Tile", ...), SIMPLIFY=FALSE) z <- do.call(rbind, z) row.names(z) <- NULL }, image = { z <- as.data.frame(x$image, ...) if(!is.na(m <- match("value", colnames(z)))) colnames(z)[m] <- "Tile" }, { z <- NULL warning("Unrecognised type of tessellation") }) return(z) } connected.tess <- function(X, ...) { Xim <- as.im(X, ...) X <- as.tess(Xim) tilesX <- tiles(X) namesX <- names(tilesX) shards <- lapply(tilesX, connected) # list of factor images shardnames <- lapply(shards, levels) nshards <- lengths(shardnames) broken <- (nshards > 1) #' unbroken tiles keep their original tile names shardnames[!broken] <- namesX[!broken] #' shards of broken tiles are named "tilename[i] shard j" shardnames[broken] <- mapply(paste, namesX[broken], "shard", shardnames[broken], SIMPLIFY=FALSE) #' rename them shards <- mapply("levels<-", shards, shardnames, SIMPLIFY=FALSE) #' separate them shards <- lapply(lapply(shards, as.tess), tiles) shards <- unlist(shards, recursive=FALSE, use.names=FALSE) names(shards) <- unlist(shardnames) #' form tessellation result <- tess(tiles=shards, window=as.owin(Xim)) result } spatstat/R/fourierbasis.R0000644000176200001440000000156713333543255015155 0ustar liggesusers#' fourierbasis.R #' $Revision: 1.4 $ $Date: 2017/11/04 04:10:32 $ fourierbasis <- function(x, k, win = boxx(rep(list(0:1), ncol(k)))) { x <- as.matrix(x) k <- as.matrix(k) if (nrow(k) == 0 | nrow(x) == 0) return(complex()) d <- ncol(x) if (ncol(k) != d) stop("Arguments x and k must have the same number of columns.") win <- as.boxx(win) boxlengths <- as.numeric(win$ranges[2L, ] - win$ranges[1L, ]) if (length(boxlengths) != d) stop("The box dimension differs from the number of columns in x and k") return(fourierbasisraw(x, k, boxlengths)) } fourierbasisraw <- function(x, k, boxlengths) { two_pi_i <- 2 * pi * (0+1i) rslt <- outer(k[, 1L], x[, 1L]/boxlengths[1L]) d <- ncol(x) if (d > 1) { for (i in 2:d) { rslt <- rslt + outer(k[, i], x[, i]/boxlengths[i]) } } return(exp(two_pi_i * rslt)/sqrt(prod(boxlengths))) } spatstat/R/residppm.R0000644000176200001440000000704313333543255014276 0ustar liggesusers# # residppm.R # # computes residuals for fitted point process model # # # $Revision: 1.25 $ $Date: 2017/12/07 03:03:48 $ # residuals.ppm <- function(object, type="raw", ..., check=TRUE, drop=FALSE, fittedvalues = NULL, new.coef=NULL, dropcoef=FALSE, quad=NULL) { verifyclass(object, "ppm") trap.extra.arguments(..., .Context="In residuals.ppm") type <- pickoption("type", type, c(inverse="inverse", raw="raw", pearson="pearson", Pearson="pearson", score="score")) typenames <- c(inverse="inverse-lambda residuals", raw="raw residuals", pearson="Pearson residuals", score="score residuals") typename <- typenames[[type]] given.fitted <- !missing(fittedvalues) && !is.null(fittedvalues) # ................. determine fitted values ................. NewCoef <- NULL if(is.null(new.coef) && is.null(quad)) { # use 'object' without modification # validate 'object' if(check && !given.fitted && damaged.ppm(object)) stop("object format corrupted; try update(object, use.internal=TRUE)") } else { # determine a new set of model coefficients if(!is.null(new.coef)) { # use specified model parameters NewCoef <- new.coef } else { # estimate model parameters using a (presumably) denser set of dummy pts # Determine new quadrature scheme if(is.quad(quad)) hi.res.quad <- quad else if(is.ppp(quad)) hi.res.quad <- quadscheme(data=data.ppm(object), dummy=quad) else { # assume 'quad' is a list of arguments to 'quadscheme' hi.res.quad <- do.call(quadscheme, append(list(data.ppm(object)), quad)) } # refit the model with new quadscheme hi.res.fit <- update(object, hi.res.quad) NewCoef <- coef(hi.res.fit) } } #' now compute fitted values using new coefficients if(!given.fitted) fittedvalues <- fitted(object, drop=drop, check=check, new.coef=NewCoef, dropcoef=dropcoef) # ..................... compute residuals ..................... # Extract quadrature points and weights Q <- quad.ppm(object, drop=drop, clip=drop) # U <- union.quad(Q) # quadrature points Z <- is.data(Q) # indicator data/dummy # W <- w.quad(Q) # quadrature weights # Compute fitted conditional intensity at quadrature points lambda <- fittedvalues # indicator is 1 if lambda > 0 # (adjusted for numerical behaviour of predict.glm) indicator <- (lambda > .Machine$double.eps) if(type == "score") { # need the covariates X <- model.matrix(object) if(drop) { gs <- getglmsubset(object) ok <- !is.na(gs) & gs X <- X[ok, , drop=FALSE] } } # Evaluate residual measure components discrete <- switch(type, raw = rep.int(1, sum(Z)), inverse = 1/lambda[Z], pearson = 1/sqrt(lambda[Z]), score = X[Z, , drop=FALSE] ) density <- switch(type, raw = -lambda, inverse = -indicator, pearson = -indicator * sqrt(lambda), score = -lambda * X) # Residual measure (return value) res <- msr(Q, discrete, density) # name the residuals attr(res, "type") <- type attr(res, "typename") <- typename return(res) } spatstat/R/minnndist.R0000644000176200001440000000420613604235526014454 0ustar liggesusers## ## minnndist.R ## ## Fast versions of min(nndist(X)), max(nndist(X)) ## ## $Revision: 1.8 $ $Date: 2020/01/05 01:26:42 $ minnndist <- function(X, positive=FALSE, by=NULL) { stopifnot(is.ppp(X)) if(!is.null(by)) { stopifnot(length(by) == npoints(X)) if(positive) { retain <- !duplicated(X) X <- X[retain] by <- by[retain] } nn <- nndist(X, by=by) result <- aggregate(nn, by=list(from=by), min, drop=FALSE)[,-1,drop=FALSE] return(result) } n <- npoints(X) if(n <= 1) return(NA) x <- X$x y <- X$y o <- fave.order(y) big <- sqrt(.Machine$double.xmax) if(positive) { z <- .C("minPnnd2", n = as.integer(n), x = as.double(x[o]), y = as.double(y[o]), as.double(big), result = as.double(numeric(1)), PACKAGE = "spatstat") } else { z <- .C("minnnd2", n = as.integer(n), x = as.double(x[o]), y = as.double(y[o]), as.double(big), result = as.double(numeric(1)), PACKAGE = "spatstat") } return(sqrt(z$result)) } maxnndist <- function(X, positive=FALSE, by=NULL) { stopifnot(is.ppp(X)) if(!is.null(by)) { stopifnot(length(by) == npoints(X)) if(positive) { retain <- !duplicated(X) X <- X[retain] by <- by[retain] } nn <- nndist(X, by=by) result <- aggregate(nn, by=list(from=by), max, drop=FALSE)[,-1,drop=FALSE] return(result) } n <- npoints(X) if(n <= 1) return(NA) x <- X$x y <- X$y o <- fave.order(y) big <- sqrt(.Machine$double.xmax) if(positive) { z <- .C("maxPnnd2", n = as.integer(n), x = as.double(x[o]), y = as.double(y[o]), as.double(big), result = as.double(numeric(1)), PACKAGE = "spatstat") } else { z <- .C("maxnnd2", n = as.integer(n), x = as.double(x[o]), y = as.double(y[o]), as.double(big), result = as.double(numeric(1)), PACKAGE = "spatstat") } return(sqrt(z$result)) } spatstat/R/summary.mppm.R0000644000176200001440000001667413333543255015132 0ustar liggesusers# # summary.mppm.R # # $Revision: 1.15 $ $Date: 2016/04/25 02:34:40 $ # summary.mppm <- function(object, ..., brief=FALSE) { # y will be the summary y <- object[c("Call", "Info", "Inter", "trend", "iformula", #%^!ifdef RANDOMEFFECTS "random", #%^!endif "npat", "maxlogpl")] y$brief <- brief Info <- object$Info Inter <- object$Inter FIT <- object$Fit$FIT moadf <- object$Fit$moadf y$Fit <- object$Fit[c("fitter", "use.gam", "fmla", "Vnamelist")] y$Fit$FIT <- summary(FIT) y$Fit$moadf <- list(nrow=nrow(moadf), colnames=colnames(moadf)) ninteract <- Inter$ninteract interaction <- Inter$interaction iused <- Inter$iused itags <- Inter$itags processnames <- Inter$processes constant <- Inter$constant trivial <- Inter$trivial npat <- y$npat iformula <- y$iformula #%^!ifdef RANDOMEFFECTS random <- y$random #%^!endif Vnamelist <- y$Fit$Vnamelist allVnames <- unlist(Vnamelist) poistags <- itags[trivial] # rownames <- y$Info$rownames switch(y$Fit$fitter, #%^!ifdef RANDOMEFFECTS glmmPQL={ y$coef <- co <- fixed.effects(FIT) systematic <- !(names(co) %in% c(allVnames, poistags)) y$coef.syst <- co[systematic] y$coef.rand <- random.effects(FIT) }, #%^!endif gam=, glm={ y$coef <- co <- coef(FIT) systematic <- !(names(co) %in% c(allVnames, poistags)) y$coef.syst <- co[systematic] }) # model depends on covariates y$depends.covar <- Info$has.covar && (length(Info$used.cov.names) > 0) #%^!ifdef RANDOMEFFECTS # random effects y$ranef <- if(Info$has.random) summary(FIT$modelStruct) else NULL #%^!endif ### Interactions # model is Poisson y$poisson <- all(trivial[iused]) # Determine how complicated the interactions are: #%^!ifdef RANDOMEFFECTS # (0) are there random effects involving the interactions randominteractions <- !is.null(random) && any(variablesinformula(random) %in% itags) #%^!endif # (1) is the interaction formula of the form ~ tag + tag + ... + tag isimple <- identical(sort(variablesinformula(iformula)), sort(termsinformula(iformula))) # (2) is it of the form ~tag trivialformula <- (isimple && ninteract == 1) # (3) is it of the form ~tag where the interaction is the same in each row #%^!ifdef RANDOMEFFECTS fixedinteraction <- (trivialformula && constant && !randominteractions) #%^!else # fixedinteraction <- trivialformula && constant #%^!endif ### Determine printing of interactions, accordingly ### iprint <- list() #%^!ifdef RANDOMEFFECTS if(randominteractions) { toohard <- TRUE printeachrow <- FALSE } else #%^!endif if(fixedinteraction) { # exactly the same interaction for all patterns interaction <- interaction[1,1,drop=TRUE] fi.all <- fii(interaction, co, Vnamelist[[1]]) iprint <- list("Interaction for all patterns"=fi.all) printeachrow <- FALSE toohard <- FALSE } else if(trivialformula) { # same type of process for all patterns pname <- unlist(processnames)[iused] iprint <- list("Interaction for each pattern" = pname) printeachrow <- TRUE toohard <- FALSE } else if(isimple && all(constant)) { # several interactions involved, each of which is the same for all patterns iprint <- list("Interaction formula"=iformula, "Interactions defined for each pattern"=NULL) for(j in (1:ninteract)[iused]) { name.j <- paste("Interaction", sQuote(itags[j])) int.j <- Inter$interaction[1,j,drop=TRUE] Vnames.j <- Vnamelist[[j]] fii.j <- fii(int.j, co, Vnames.j) extra.j <- list(fii.j) names(extra.j) <- name.j iprint <- append(iprint, extra.j) } printeachrow <- FALSE toohard <- FALSE } else { # general case # determine which interaction(s) are active on each row active <- active.interactions(object) if(ninteract > 1 || !all(active)) iprint <- list("Active interactions"=active) printeachrow <- TRUE toohard <- any(rowSums(active) > 1) } y$ikind <- list( #%^!ifdef RANDOMEFFECTS randominteractions=randominteractions, #%^!endif isimple =isimple, trivialformula =trivialformula, fixedinteraction =fixedinteraction, toohard =toohard, printeachrow =printeachrow) if(toohard) iprint <- append(iprint, list("(Sorry, cannot interpret fitted interactions)")) else if(printeachrow) { subs <- subfits(object, what="interactions") names(subs) <- paste("Interaction", 1:npat) iprint <- append(iprint, subs) } y$iprint <- iprint class(y) <- c("summary.mppm", class(list)) return(y) } print.summary.mppm <- function(x, ..., brief=x$brief) { # NB: x is an object of class "summary.mppm" npat <- x$npat # Inter <- x$Inter # ninteract <- Inter$ninteract # interaction <- Inter$interaction # iused <- Inter$iused # constant <- Inter$constant # iformula <- x$iformula # processnames <- Inter$processes # itags <- Inter$itags # trivial <- Inter$trivial #%^!ifdef RANDOMEFFECTS # random <- x$random #%^!endif FIT <- x$Fit$FIT # Vnamelist <- x$Fit$Vnamelist # allVnames <- unlist(Vnamelist) # poistags <- itags[trivial] terselevel <- spatstat.options("terse") # rownames <- x$Info$rownames splat("Point process model fitted to", npat, "point patterns") if(waxlyrical('gory', terselevel)) splat("Call:", x$Call$callstring) splat("Log trend formula:", pasteFormula(x$trend)) switch(x$Fit$fitter, #%^!ifdef RANDOMEFFECTS glmmPQL={ cat("Fixed effects:\n") print(x$coef.syst) cat("Random effects:\n") print(x$coef.rand) co <- fixed.effects(FIT) }, #%^!endif gam=, glm={ cat("Fitted trend coefficients:\n") print(x$coef.syst) co <- coef(FIT) }) if(!brief && waxlyrical('extras', terselevel)) { cat("All fitted coefficients:\n") print(co) } parbreak(terselevel) #%^!ifdef RANDOMEFFECTS if(!is.null(x$ranef)) { splat("Random effects summary:") print(x$ranef) parbreak(terselevel) } #%^!endif ### Print interaction information ### if(waxlyrical('extras', terselevel)) { iprint <- x$iprint nama <- names(iprint) %orifnull% rep("", length(iprint)) for(i in seq_along(iprint)) { nami <- nama[i] vali <- iprint[[i]] if(brief && is.matrix(vali)) vali <- paren(paste(nrow(vali), "x", ncol(vali), "matrix")) if(nami != "") { inline <- inherits(vali, "formula") || is.character(vali) || (brief && inherits(vali, "fii")) if(inline) cat(paste0(nami, ":\t")) else splat(paste0(nami, ":")) } if(!is.null(vali)) { if(inherits(vali, "fii")) { print(vali, tiny=brief) } else if(is.character(vali)) { splat(vali) } else { print(vali) } } parbreak(terselevel) } } if(!brief && waxlyrical('gory', terselevel)) { splat("--- Gory details: ---") splat("Combined data frame has", x$Fit$moadf$nrow, "rows") print(FIT) } invisible(NULL) } spatstat/R/random.R0000644000176200001440000007626613460231502013736 0ustar liggesusers## ## random.R ## ## Functions for generating random point patterns ## ## $Revision: 4.97 $ $Date: 2019/04/05 03:41:12 $ ## ## ## runifpoint() n i.i.d. uniform random points ("binomial process") ## ## runifpoispp() uniform Poisson point process ## ## rpoispp() general Poisson point process (thinning method) ## ## rpoint() n independent random points (rejection/pixel list) ## ## rMaternI() Mat'ern model I ## rMaternII() Mat'ern model II ## rSSI() Simple Sequential Inhibition process ## ## rthin() independent random thinning ## rjitter() random perturbation ## ## Examples: ## u01 <- owin(0:1,0:1) ## plot(runifpoispp(100, u01)) ## X <- rpoispp(function(x,y) {100 * (1-x/2)}, 100, u01) ## X <- rpoispp(function(x,y) {ifelse(x < 0.5, 100, 20)}, 100) ## plot(X) ## plot(rMaternI(100, 0.02)) ## plot(rMaternII(100, 0.05)) ## simulationresult <- function(resultlist, nsim, drop, NameBase="Simulation") { if(nsim == 1 && drop) return(resultlist[[1L]]) #' return 'solist' if appropriate, otherwise 'anylist' return(as.solist(resultlist, .NameBase=NameBase, demote=TRUE)) } runifrect <- function(n, win=owin(c(0,1),c(0,1)), nsim=1, drop=TRUE) { ## no checking xr <- win$xrange yr <- win$yrange result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { x <- runif(n, min=xr[1], max=xr[2]) y <- runif(n, min=yr[1], max=yr[2]) result[[isim]] <- ppp(x, y, window=win, check=FALSE) } result <- simulationresult(result, nsim, drop) return(result) } runifdisc <- function(n, radius=1, centre=c(0,0), ..., nsim=1, drop=TRUE) { ## i.i.d. uniform points in the disc of radius r and centre (x,y) check.1.real(radius) stopifnot(radius > 0) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } disque <- disc(centre=centre, radius=radius, ...) twopi <- 2 * pi rad2 <- radius^2 result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { theta <- runif(n, min=0, max=twopi) s <- sqrt(runif(n, min=0, max=rad2)) result[[isim]] <- ppp(centre[1] + s * cos(theta), centre[2] + s * sin(theta), window=disque, check=FALSE) } result <- simulationresult(result, nsim, drop) return(result) } runifpoint <- function(n, win=owin(c(0,1),c(0,1)), giveup=1000, warn=TRUE, ..., nsim=1, drop=TRUE, ex=NULL) { if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } if(missing(n) && missing(win) && !is.null(ex)) { stopifnot(is.ppp(ex)) n <- npoints(ex) win <- Window(ex) } else { win <- as.owin(win) check.1.integer(n) stopifnot(n >= 0) } if(n == 0) { emp <- ppp(window=win) result <- rep(list(emp), nsim) result <- simulationresult(result, nsim, drop) return(result) } if(warn) { nhuge <- spatstat.options("huge.npoints") if(n > nhuge) { whinge <- paste("Attempting to generate", n, "random points") message(whinge) warning(whinge, call.=FALSE) } } switch(win$type, rectangle = { return(runifrect(n, win, nsim=nsim, drop=drop)) }, mask = { dx <- win$xstep dy <- win$ystep ## extract pixel coordinates and probabilities rxy <- rasterxy.mask(win, drop=TRUE) xpix <- rxy$x ypix <- rxy$y ## make a list of nsim point patterns result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { ## select pixels with equal probability id <- sample(seq_along(xpix), n, replace=TRUE) ## extract pixel centres and randomise within pixels x <- xpix[id] + runif(n, min= -dx/2, max=dx/2) y <- ypix[id] + runif(n, min= -dy/2, max=dy/2) result[[isim]] <- ppp(x, y, window=win, check=FALSE) } }, polygonal={ ## make a list of nsim point patterns result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { ## rejection method ## initialise empty pattern x <- numeric(0) y <- numeric(0) X <- ppp(x, y, window=win) ## ## rectangle in which trial points will be generated box <- boundingbox(win) ## ntries <- 0 repeat { ntries <- ntries + 1 ## generate trial points in batches of n qq <- runifrect(n, box) ## retain those which are inside 'win' qq <- qq[win] ## add them to result X <- superimpose(X, qq, W=win, check=FALSE) ## if we have enough points, exit if(X$n > n) { result[[isim]] <- X[1:n] break } else if(X$n == n) { result[[isim]] <- X break } else if(ntries >= giveup) { ## otherwise get bored eventually stop(paste("Gave up after", giveup * n, "trials,", X$n, "points accepted")) } } } }, stop("Unrecognised window type") ) ## list of point patterns produced. result <- simulationresult(result, nsim, drop) return(result) } runifpoispp <- function(lambda, win = owin(c(0,1),c(0,1)), ..., nsim=1, drop=TRUE) { win <- as.owin(win) if(!is.numeric(lambda) || length(lambda) > 1 || !is.finite(lambda) || lambda < 0) stop("Intensity lambda must be a single finite number >= 0") if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } if(lambda == 0) { ## return empty pattern emp <- ppp(window=win) result <- rep(list(emp), nsim) result <- simulationresult(result, nsim, drop) return(result) } ## will generate Poisson process in enclosing rectangle and trim it box <- boundingbox(win) meanN <- lambda * area(box) result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { n <- rpois(1, meanN) if(!is.finite(n)) stop(paste("Unable to generate Poisson process with a mean of", meanN, "points")) X <- runifpoint(n, box) ## trim to window if(win$type != "rectangle") X <- X[win] result[[isim]] <- X } result <- simulationresult(result, nsim, drop) return(result) } rpoint <- function(n, f, fmax=NULL, win=unit.square(), ..., giveup=1000,verbose=FALSE, nsim=1, drop=TRUE) { if(missing(f) || (is.numeric(f) && length(f) == 1)) ## uniform distribution return(runifpoint(n, win, giveup, nsim=nsim, drop=drop)) ## non-uniform distribution.... if(!is.function(f) && !is.im(f)) stop(paste(sQuote("f"), "must be either a function or an", sQuote("im"), "object")) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } if(is.im(f)) { ## ------------ PIXEL IMAGE --------------------- wf <- as.owin(f) if(n == 0) { ## return empty pattern(s) emp <- ppp(window=wf) result <- rep(list(emp), nsim) result <- simulationresult(result, nsim, drop) return(result) } w <- as.mask(wf) M <- w$m dx <- w$xstep dy <- w$ystep ## extract pixel coordinates and probabilities rxy <- rasterxy.mask(w, drop=TRUE) xpix <- rxy$x ypix <- rxy$y ppix <- as.vector(f$v[M]) ## not normalised - OK ## result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { ## select pixels id <- sample(length(xpix), n, replace=TRUE, prob=ppix) ## extract pixel centres and randomise within pixels x <- xpix[id] + runif(n, min= -dx/2, max=dx/2) y <- ypix[id] + runif(n, min= -dy/2, max=dy/2) result[[isim]] <- ppp(x, y, window=wf, check=FALSE) } result <- simulationresult(result, nsim, drop) return(result) } ## ------------ FUNCTION --------------------- ## Establish parameters for rejection method verifyclass(win, "owin") if(n == 0) { ## return empty pattern(s) emp <- ppp(window=win) result <- rep(list(emp), nsim) result <- simulationresult(result, nsim, drop) return(result) } if(is.null(fmax)) { ## compute approx maximum value of f imag <- as.im(f, win, ...) summ <- summary(imag) fmax <- summ$max + 0.05 * diff(summ$range) } irregular <- (win$type != "rectangle") box <- boundingbox(win) result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { ## initialise empty pattern X <- ppp(numeric(0), numeric(0), window=win) pbar <- 1 nremaining <- n totngen <- 0 ## generate uniform random points in batches ## and apply the rejection method. ## Collect any points that are retained in X ntries <- 0 repeat{ ntries <- ntries + 1 ## proposal points ngen <- nremaining/pbar + 10 totngen <- totngen + ngen prop <- runifrect(ngen, box) if(irregular) prop <- prop[win] if(prop$n > 0) { fvalues <- f(prop$x, prop$y, ...) paccept <- fvalues/fmax u <- runif(prop$n) ## accepted points Y <- prop[u < paccept] if(Y$n > 0) { ## add to X X <- superimpose(X, Y, W=win, check=FALSE) nX <- X$n pbar <- nX/totngen nremaining <- n - nX if(nremaining <= 0) { ## we have enough! if(verbose) splat("acceptance rate = ", round(100 * pbar, 2), "%") result[[isim]] <- if(nX == n) X else X[1:n] break } } } if(ntries > giveup) stop(paste("Gave up after",giveup * n,"trials with", X$n, "points accepted")) } } result <- simulationresult(result, nsim, drop) return(result) } rpoispp <- function(lambda, lmax=NULL, win = owin(), ..., nsim=1, drop=TRUE, ex=NULL, warnwin=TRUE) { ## arguments: ## lambda intensity: constant, function(x,y,...) or image ## lmax maximum possible value of lambda(x,y,...) ## win default observation window (of class 'owin') ## ... arguments passed to lambda(x, y, ...) ## nsim number of replicate simulations if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } if(missing(lambda) && is.null(lmax) && missing(win) && !is.null(ex)) { lambda <- intensity(unmark(ex)) win <- Window(ex) } else { if(!(is.numeric(lambda) || is.function(lambda) || is.im(lambda))) stop(paste(sQuote("lambda"), "must be a constant, a function or an image")) if(is.numeric(lambda) && !(length(lambda) == 1 && lambda >= 0)) stop(paste(sQuote("lambda"), "must be a single, nonnegative number")) if(!is.null(lmax)) { if(!is.numeric(lmax)) stop("lmax should be a number") if(length(lmax) > 1) stop("lmax should be a single number") } if(is.im(lambda)) { if(warnwin && !missing(win)) warning("Argument win ignored", call.=FALSE) win <- rescue.rectangle(as.owin(lambda)) } else { win <- as.owin(win) } } if(is.numeric(lambda)) ## uniform Poisson return(runifpoispp(lambda, win, nsim=nsim, drop=drop)) ## inhomogeneous Poisson ## perform thinning of uniform Poisson ## determine upper bound if(is.null(lmax)) { imag <- as.im(lambda, win, ...) summ <- summary(imag) lmax <- summ$max + 0.05 * diff(summ$range) } if(is.function(lambda)) { ## function lambda #' runifpoispp checks 'lmax' result <- runifpoispp(lmax, win, nsim=nsim, drop=FALSE) #' result is a 'ppplist' with appropriate names for(isim in 1:nsim) { X <- result[[isim]] if(X$n > 0) { prob <- lambda(X$x, X$y, ...)/lmax u <- runif(X$n) retain <- (u <= prob) result[[isim]] <- X[retain] } } if(nsim == 1 && drop) result <- result[[1L]] return(result) } if(is.im(lambda)) { ## image lambda if(spatstat.options("fastpois")) { ## new code: sample pixels directly mu <- integral(lambda) dx <- lambda$xstep/2 dy <- lambda$ystep/2 df <- as.data.frame(lambda) npix <- nrow(df) lpix <- df$value result <- vector(mode="list", length=nsim) nn <- rpois(nsim, mu) if(!all(is.finite(nn))) stop(paste("Unable to generate Poisson process with a mean of", mu, "points")) for(isim in seq_len(nsim)) { ni <- nn[isim] ii <- sample.int(npix, size=ni, replace=TRUE, prob=lpix) xx <- df$x[ii] + runif(ni, -dx, dx) yy <- df$y[ii] + runif(ni, -dy, dy) result[[isim]] <- ppp(xx, yy, window=win, check=FALSE) } result <- simulationresult(result, nsim, drop) return(result) } else { ## old code: thinning result <- runifpoispp(lmax, win, nsim=nsim, drop=FALSE) for(isim in 1:nsim) { X <- result[[isim]] if(X$n > 0) { prob <- lambda[X]/lmax u <- runif(X$n) retain <- (u <= prob) result[[isim]] <- X[retain] } } if(nsim == 1 && drop) return(result[[1L]]) return(result) } } stop(paste(sQuote("lambda"), "must be a constant, a function or an image")) } rMaternI <- function(kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE, ..., nsim=1, drop=TRUE) { rMaternInhibition(type=1, kappa=kappa, r=r, win=win, stationary=stationary, ..., nsim=nsim, drop=drop) } rMaternII <- function(kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE, ..., nsim=1, drop=TRUE) { rMaternInhibition(type=2, kappa=kappa, r=r, win=win, stationary=stationary, ..., nsim=nsim, drop=drop) } rMaternInhibition <- function(type, kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE, ..., nsim=1, drop=TRUE) { stopifnot(is.numeric(r) && length(r) == 1) stopifnot(type %in% c(1,2)) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } ## Resolve window class if(!inherits(win, c("owin", "box3", "boxx"))) { givenwin <- win win <- try(as.owin(givenwin), silent = TRUE) if(inherits(win, "try-error")) win <- try(as.boxx(givenwin), silent = TRUE) if(inherits(win, "try-error")) stop("Could not coerce argument win to a window (owin, box3 or boxx).") } dimen <- spatdim(win) if(dimen == 2) { bigbox <- if(stationary) grow.rectangle(win, r) else win result <- rpoispp(kappa, win = bigbox, nsim = nsim, drop=FALSE) } else if(dimen == 3) { bigbox <- if(stationary) grow.box3(win, r) else win result <- rpoispp3(kappa, domain = bigbox, nsim = nsim, drop=FALSE) } else { bigbox <- if(stationary) grow.boxx(win, r) else win result <- rpoisppx(kappa, domain = bigbox, nsim = nsim, drop=FALSE) } for(isim in 1:nsim) { Y <- result[[isim]] nY <- npoints(Y) if(type == 1) { ## Matern Model I if(nY > 1) { d <- nndist(Y) Y <- Y[d > r] } } else { ## Matern Model II if(nY > 1) { ## matrix of squared pairwise distances d2 <- pairdist(Y, squared=TRUE) close <- (d2 <= r^2) ## random order 1:n age <- sample(seq_len(nY), nY, replace=FALSE) earlier <- outer(age, age, ">") conflict <- close & earlier ## delete <- apply(conflict, 1, any) delete <- matrowany(conflict) Y <- Y[!delete] } } if(stationary) Y <- Y[win] result[[isim]] <- Y } if(nsim == 1 && drop) return(result[[1L]]) if(is.owin(win)) result <- as.ppplist(result) return(result) } rSSI <- function(r, n=Inf, win = square(1), giveup = 1000, x.init=NULL, ..., f=NULL, fmax=NULL, nsim=1, drop=TRUE) { win.given <- !missing(win) && !is.null(win) stopifnot(is.numeric(r) && length(r) == 1 && r >= 0) stopifnot(is.numeric(n) && length(n) == 1 && n >= 0) must.reach.n <- is.finite(n) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } ## if(!is.null(f)) { stopifnot(is.numeric(f) || is.im(f) || is.function(f)) if(is.null(fmax) && !is.numeric(f)) fmax <- if(is.im(f)) max(f) else max(as.im(f, win)) } ## result <- vector(mode="list", length=nsim) if(!win.given) win <- square(1) ## validate initial state if(is.null(x.init)) { ## start with empty pattern in specified window win <- as.owin(win) x.init <- ppp(numeric(0),numeric(0), window=win) } else { ## start with specified pattern stopifnot(is.ppp(x.init)) if(!win.given) { win <- as.owin(x.init) } else { ## check compatibility of windows if(!identical(win, as.owin(x.init))) warning(paste("Argument", sQuote("win"), "is not the same as the window of", sQuote("x.init"))) x.init.new <- x.init[win] if(npoints(x.init.new) == 0) stop(paste("No points of x.init lie inside the specified window", sQuote("win"))) nlost <- npoints(x.init) - npoints(x.init.new) if(nlost > 0) warning(paste(nlost, "out of", npoints(x.init), "points of the pattern x.init", "lay outside the specified window", sQuote("win"))) x.init <- x.init.new } if(n < npoints(x.init)) stop(paste("x.init contains", npoints(x.init), "points", "but a pattern containing only n =", n, "points", "is required")) if(n == npoints(x.init)) { warning(paste("Initial state x.init already contains", n, "points;", "no further points were added")) result <- rep(list(x.init), nsim) result <- simulationresult(result, nsim, drop) return(result) } } #' validate radius r2 <- r^2 if(!is.infinite(n) && (n * pi * r2/4 > area(win))) warning(paste("Window is too small to fit", n, "points", "at minimum separation", r)) #' start simulation pstate <- list() for(isim in 1:nsim) { if(nsim > 1) pstate <- progressreport(isim, nsim, state=pstate) ## Simple Sequential Inhibition process ## fixed number of points ## Naive implementation, proposals are uniform X <- x.init ntries <- 0 while(ntries < giveup) { ntries <- ntries + 1 qq <- if(is.null(f)) runifpoint(1, win) else rpoint(1, f, fmax, win) dx <- qq$x[1] - X$x dy <- qq$y[1] - X$y if(all(dx^2 + dy^2 > r2)) { X <- superimpose(X, qq, W=win, check=FALSE) ntries <- 0 } if(X$n >= n) break } if(must.reach.n && X$n < n) warning(paste("Gave up after", giveup, "attempts with only", X$n, "points placed out of", n)) result[[isim]] <- X } result <- simulationresult(result, nsim, drop) return(result) } rPoissonCluster <- function(kappa, expand, rcluster, win = owin(c(0,1),c(0,1)), ..., lmax=NULL, nsim=1, drop=TRUE, saveparents=TRUE) { ## Generic Poisson cluster process ## Implementation for bounded cluster radius ## ## 'rcluster' is a function(x,y) that takes the coordinates ## (x,y) of the parent point and generates a list(x,y) of offspring ## ## "..." are arguments to be passed to 'rcluster()' ## ## Catch old argument name rmax for expand, and allow rmax to be ## passed to rcluster (and then be ignored) if(missing(expand) && !is.null(rmax <- list(...)$rmax)){ expand <- rmax f <- rcluster rcluster <- function(..., rmax) f(...) } win <- as.owin(win) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } ## Generate parents in dilated window frame <- boundingbox(win) dilated <- owin(frame$xrange + c(-expand, expand), frame$yrange + c(-expand, expand)) if(is.im(kappa) && !is.subset.owin(dilated, as.owin(kappa))) stop(paste("The window in which the image", sQuote("kappa"), "is defined\n", "is not large enough to contain the dilation of the window", sQuote("win"))) parentlist <- rpoispp(kappa, lmax=lmax, win=dilated, nsim=nsim) if(nsim == 1) parentlist <- list(parentlist) resultlist <- vector(mode="list", length=nsim) for(isim in 1:nsim) { parents <- parentlist[[isim]] result <- NULL ## generate clusters np <- parents$n if(np > 0) { xparent <- parents$x yparent <- parents$y for(i in seq_len(np)) { ## generate random offspring of i-th parent point cluster <- rcluster(xparent[i], yparent[i], ...) if(!inherits(cluster, "ppp")) cluster <- ppp(cluster$x, cluster$y, window=frame, check=FALSE) ## skip if cluster is empty if(cluster$n > 0) { ## trim to window cluster <- cluster[win] if(is.null(result)) { ## initialise offspring pattern and offspring-to-parent map result <- cluster parentid <- rep.int(1, cluster$n) } else { ## add to pattern result <- superimpose(result, cluster, W=win, check=FALSE) ## update offspring-to-parent map parentid <- c(parentid, rep.int(i, cluster$n)) } } } } else { ## no parents - empty pattern result <- ppp(numeric(0), numeric(0), window=win) parentid <- integer(0) } if(saveparents) { attr(result, "parents") <- parents attr(result, "parentid") <- parentid attr(result, "expand") <- expand } resultlist[[isim]] <- result } result <- simulationresult(resultlist, nsim, drop) return(result) } rGaussPoisson <- local({ rGaussPoisson <- function(kappa, r, p2, win=owin(c(0,1), c(0,1)), ..., nsim=1, drop=TRUE) { ## Gauss-Poisson process result <- rPoissonCluster(kappa, 1.05 * r, oneortwo, win, radius=r/2, p2=p2, nsim=nsim, drop=drop) return(result) } oneortwo <- function(x0, y0, radius, p2) { if(runif(1) > p2) ## one point return(list(x=x0, y=y0)) ## two points theta <- runif(1, min=0, max=2*pi) return(list(x=x0+c(-1,1)*radius*cos(theta), y=y0+c(-1,1)*radius*sin(theta))) } rGaussPoisson }) rstrat <- function(win=square(1), nx, ny=nx, k=1, nsim=1, drop=TRUE) { win <- as.owin(win) stopifnot(nx >= 1 && ny >= 1) stopifnot(k >= 1) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { xy <- stratrand(win, nx, ny, k) Xbox <- ppp(xy$x, xy$y, win$xrange, win$yrange, check=FALSE) result[[isim]] <- Xbox[win] } result <- simulationresult(result, nsim, drop) return(result) } xy.grid <- function(xr, yr, nx, ny, dx, dy) { nx.given <- !is.null(nx) ny.given <- !is.null(ny) dx.given <- !is.null(dx) dy.given <- !is.null(dy) if(nx.given && dx.given) stop("Do not give both nx and dx") if(nx.given) { stopifnot(nx >= 1) x0 <- seq(from=xr[1], to=xr[2], length.out=nx+1) dx <- diff(xr)/nx } else if(dx.given) { stopifnot(dx > 0) x0 <- seq(from=xr[1], to=xr[2], by=dx) nx <- length(x0) - 1 } else stop("Need either nx or dx") ## determine y grid if(ny.given && dy.given) stop("Do not give both ny and dy") if(ny.given) { stopifnot(ny >= 1) y0 <- seq(from=yr[1], to=yr[2], length.out=ny+1) dy <- diff(yr)/ny } else { if(is.null(dy)) dy <- dx stopifnot(dy > 0) y0 <- seq(from=yr[1], to=yr[2], by=dy) ny <- length(y0) - 1 } return(list(x0=x0, y0=y0, nx=nx, ny=ny, dx=dx, dy=dy)) } rsyst <- function(win=square(1), nx=NULL, ny=nx, ..., dx=NULL, dy=dx, nsim=1, drop=TRUE) { if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } win <- as.owin(win) xr <- win$xrange yr <- win$yrange ## determine grid coordinates if(missing(ny)) ny <- NULL if(missing(dy)) dy <- NULL g <- xy.grid(xr, yr, nx, ny, dx, dy) x0 <- g$x0 y0 <- g$y0 dx <- g$dx dy <- g$dy ## assemble grid and randomise location xy0 <- expand.grid(x=x0, y=y0) result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { x <- xy0$x + runif(1, min = 0, max = dx) y <- xy0$y + runif(1, min = 0, max = dy) Xbox <- ppp(x, y, xr, yr, check=FALSE) ## trim to window result[[isim]] <- Xbox[win] } result <- simulationresult(result, nsim, drop) return(result) } rcellnumber <- local({ rcellnumber <- function(n, N=10, mu=1) { if(missing(mu) || mu == 1) { z <- rCellUnit(n=n, N=N) } else { z <- replicate(n, rCellCumul(x=mu, N=N)) } return(z) } rCellUnit <- function(n, N=10) { if(!missing(N)) { if(round(N) != N) stop("N must be an integer") stopifnot(is.finite(N)) stopifnot(N > 1) } u <- runif(n, min=0, max=1) p0 <- 1/N pN <- 1/(N * (N-1)) k <- ifelse(u < p0, 0, ifelse(u < (1 - pN), 1, N)) return(k) } rCellCumul <- function(x, N=10) { check.1.real(x) n <- ceiling(x) if(n <= 0) return(0) y <- rCellUnit(n=n, N=N) if(n == x) return(sum(y)) p <- x - (n-1) z <- sum(y[-1]) + rbinom(1, size=y[1], prob=p) return(z) } rcellnumber }) rcell <- function(win=square(1), nx=NULL, ny=nx, ..., dx=NULL, dy=dx, N=10, nsim=1, drop=TRUE) { if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } win <- as.owin(win) xr <- win$xrange yr <- win$yrange ## determine grid coordinates if(missing(ny)) ny <- NULL if(missing(dy)) dy <- NULL g <- xy.grid(xr, yr, nx, ny, dx, dy) nx <- g$nx ny <- g$ny x0 <- g$x0 y0 <- g$y0 dx <- g$dx dy <- g$dy ## generate pattern(s) result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { x <- numeric(0) y <- numeric(0) for(ix in seq_len(nx)) for(iy in seq_len(ny)) { nij <- rcellnumber(1, N) x <- c(x, x0[ix] + runif(nij, min=0, max=dx)) y <- c(y, y0[iy] + runif(nij, min=0, max=dy)) } Xbox <- ppp(x, y, xr, yr, check=FALSE) result[[isim]] <- Xbox[win] } result <- simulationresult(result, nsim, drop) return(result) } thinjump <- function(n, p) { # equivalent to which(runif(n) < p) for constant p stopifnot(length(p) == 1) if(p <= 0) return(integer(0)) if(p >= 1) return(seq_len(n)) if(p > 0.5) { #' for retention prob > 0.5 we find the ones to discard instead discard <- thinjump(n, 1-p) retain <- if(length(discard)) -discard else seq_len(n) return(retain) } guessmaxlength <- ceiling(n * p + 2 * sqrt(n * p * (1-p))) i <- .Call("thinjumpequal", n, p, guessmaxlength, PACKAGE = "spatstat") return(i) } rthin <- function(X, P, ..., nsim=1, drop=TRUE) { if(!(is.ppp(X) || is.lpp(X) || is.pp3(X) || is.ppx(X) || is.psp(X))) stop(paste("X should be a point pattern (class ppp, lpp, pp3 or ppx)", "or a line segment pattern (class psp)"), call.=FALSE) if(!missing(nsim)) { check.1.integer(nsim) stopifnot(nsim >= 1) } nX <- nobjects(X) if(nX == 0) { result <- rep(list(X), nsim) result <- simulationresult(result, nsim, drop) return(result) } if(is.numeric(P) && length(P) == 1 && spatstat.options("fastthin")) { # special algorithm for constant probability result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { retain <- thinjump(nX, P) Y <- X[retain] ## also handle offspring-to-parent map if present if(!is.null(parentid <- attr(X, "parentid"))) attr(Y, "parentid") <- parentid[retain] result[[isim]] <- Y } result <- simulationresult(result, nsim, drop) return(result) } if(is.numeric(P)) { ## vector of retention probabilities pX <- P if(length(pX) != nX) { if(length(pX) == 1) pX <- rep.int(pX, nX) else stop("Length of vector P does not match number of points of X") } if(anyNA(pX)) stop("P contains NA's") } else if(is.function(P)) { ## function - evaluate it at points of X if(!(is.ppp(X) || is.lpp(X))) stop(paste("Don't know how to apply a function to an object of class", commasep(sQuote(class(X)))), call.=FALSE) pX <- if(inherits(P, c("linfun", "funxy"))) P(X, ...) else P(X$x, X$y, ...) if(length(pX) != nX) stop("Function P returned a vector of incorrect length") if(!is.numeric(pX)) stop("Function P returned non-numeric values") if(anyNA(pX)) stop("Function P returned some NA values") } else if(is.im(P)) { ## image - look it up if(!(is.ppp(X) || is.lpp(X))) stop(paste("Don't know how to apply image values to an object of class", commasep(sQuote(class(X)))), call.=FALSE) if(!(P$type %in% c("integer", "real"))) stop("Values of image P should be numeric") pX <- P[X, drop=FALSE] if(anyNA(pX)) stop("some points of X lie outside the domain of image P") } else stop("Unrecognised format for P") if(min(pX) < 0) stop("some probabilities are negative") if(max(pX) > 1) stop("some probabilities are greater than 1") result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { retain <- (runif(length(pX)) < pX) Y <- X[retain] ## also handle offspring-to-parent map if present if(!is.null(parentid <- attr(X, "parentid"))) attr(Y, "parentid") <- parentid[retain] result[[isim]] <- Y } result <- simulationresult(result, nsim, drop) return(result) } ## rjitter rjitter <- function(X, radius, retry=TRUE, giveup=10000, ..., nsim=1, drop=TRUE) { verifyclass(X, "ppp") check.1.integer(nsim) stopifnot(nsim >= 1) nX <- npoints(X) if(nX == 0) { result <- rep(list(X), nsim) result <- simulationresult(result, nsim, drop) return(result) } W <- X$window if(missing(radius) || is.null(radius)) radius <- min(bw.stoyan(X), shortside(Frame(W))) result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { if(!retry) { ## points outside window are lost D <- runifdisc(nX, radius=radius) xnew <- X$x + D$x ynew <- X$y + D$y ok <- inside.owin(xnew, ynew, W) result[[isim]] <- ppp(xnew[ok], ynew[ok], window=W, check=FALSE) } else { ## retry = TRUE: condition on points being inside window undone <- rep.int(TRUE, nX) triesleft <- giveup Xshift <- X while(any(undone)) { triesleft <- triesleft - 1 if(triesleft <= 0) break Y <- Xshift[undone] D <- runifdisc(Y$n, radius=radius) xnew <- Y$x + D$x ynew <- Y$y + D$y ok <- inside.owin(xnew, ynew, W) if(any(ok)) { changed <- which(undone)[ok] Xshift$x[changed] <- xnew[ok] Xshift$y[changed] <- ynew[ok] undone[changed] <- FALSE } } result[[isim]] <- Xshift } } result <- simulationresult(result, nsim, drop) return(result) } spatstat/R/randomNS.R0000644000176200001440000003310013444125056014163 0ustar liggesusers## ## randomNS.R ## ## simulating from Neyman-Scott processes ## ## $Revision: 1.26 $ $Date: 2019/03/19 08:32:39 $ ## ## Original code for rCauchy and rVarGamma by Abdollah Jalilian ## Other code and modifications by Adrian Baddeley ## Bug fixes by Abdollah, Adrian, and Rolf Turner rNeymanScott <- function(kappa, expand, rcluster, win = owin(c(0,1),c(0,1)), ..., lmax=NULL, nsim=1, drop=TRUE, nonempty=TRUE, saveparents=TRUE) { ## Generic Neyman-Scott process ## Implementation for bounded cluster radius ## ## Catch old argument name rmax for expand if(missing(expand) && !is.null(rmax <- list(...)$rmax)) expand <- rmax ## 'rcluster' may be ## ## (1) a function(x,y, ...) that takes the coordinates ## (x,y) of the parent point and generates a list(x,y) of offspring ## if(is.function(rcluster)) return(rPoissonCluster(kappa, expand, rcluster, win, ..., lmax=lmax, nsim=nsim, drop=drop, saveparents=saveparents)) ## (2) a list(mu, f) where mu is a numeric value, function, or pixel image ## and f is a function(n, ...) generating n i.i.d. offspring at 0,0 if(!(is.list(rcluster) && length(rcluster) == 2)) stop("rcluster should be either a function, or a list of two elements") win <- as.owin(win) mu <- rcluster[[1]] rdisplace <- rcluster[[2]] if(is.numeric(mu)) { ## homogeneous if(!(length(mu) == 1 && mu >= 0)) stop("rcluster[[1]] should be a single nonnegative number") mumax <- mu } else if (is.im(mu) || is.function(mu)) { ## inhomogeneous if(is.function(mu)) mu <- as.im(mu, W=win, ..., strict=TRUE) mumax <- max(mu) } else stop("rcluster[[1]] should be a number, a function or a pixel image") if(!is.function(rdisplace)) stop("rcluster[[2]] should be a function") ## Generate parents in dilated window frame <- boundingbox(win) dilated <- grow.rectangle(frame, expand) if(is.im(kappa) && !is.subset.owin(dilated, as.owin(kappa))) stop(paste("The window in which the image", sQuote("kappa"), "is defined\n", "is not large enough to contain the dilation of the window", sQuote("win"))) if(nonempty) { if(is.function(kappa)) { kappa <- as.im(kappa, W=dilated, ..., strict=TRUE) lmax <- NULL } ## intensity of parents with at least one offspring point kappa <- kappa * (1 - exp(-mumax)) } ## generate parentlist <- rpoispp(kappa, lmax=lmax, win=dilated, nsim=nsim, drop=FALSE, warnwin=FALSE) resultlist <- vector(mode="list", length=nsim) for(i in 1:nsim) { parents <- parentlist[[i]] np <- npoints(parents) ## generate cluster sizes if(np == 0) { ## no parents - empty pattern result <- ppp(numeric(0), numeric(0), window=win) parentid <- integer(0) } else { if(!nonempty) { ## cluster sizes are Poisson csize <- rpois(np, mumax) } else { ## cluster sizes are Poisson conditional on > 0 csize <- qpois(runif(np, min=dpois(0, mumax)), mumax) } noff <- sum(csize) xparent <- parents$x yparent <- parents$y x0 <- rep.int(xparent, csize) y0 <- rep.int(yparent, csize) ## invoke random generator dd <- rdisplace(noff, ...) mm <- if(is.ppp(dd)) marks(dd) else NULL ## validate xy <- xy.coords(dd) dx <- xy$x dy <- xy$y if(!(length(dx) == noff)) stop("rcluster returned the wrong number of points") ## create offspring and offspring-to-parent map xoff <- x0 + dx yoff <- y0 + dy parentid <- rep.int(1:np, csize) ## trim to window retain <- inside.owin(xoff, yoff, win) if(is.im(mu)) retain[retain] <- inside.owin(xoff[retain], yoff[retain], as.owin(mu)) xoff <- xoff[retain] yoff <- yoff[retain] parentid <- parentid[retain] if(!is.null(mm)) mm <- marksubset(mm, retain) ## done result <- ppp(xoff, yoff, window=win, check=FALSE, marks=mm) } if(is.im(mu)) { ## inhomogeneously modulated clusters a la Waagepetersen P <- eval.im(mu/mumax) result <- rthin(result, P) } if(saveparents) { attr(result, "parents") <- parents attr(result, "parentid") <- parentid attr(result, "expand") <- expand } resultlist[[i]] <- result } result <- simulationresult(resultlist, nsim, drop) return(result) } rMatClust <- local({ ## like runifdisc but returns only the coordinates rundisk <- function(n, radius) { R <- radius * sqrt(runif(n, min=0, max=1)) Theta <- runif(n, min=0, max=2*pi) cbind(R * cos(Theta), R * sin(Theta)) } rMatClust <- function(kappa, scale, mu, win = owin(c(0,1),c(0,1)), nsim=1, drop=TRUE, saveLambda=FALSE, expand = scale, ..., poisthresh=1e-6, saveparents=TRUE) { ## Matern Cluster Process with Poisson (mu) offspring distribution ## Catch old scale syntax (r) if(missing(scale)) scale <- list(...)$r check.1.real(scale) stopifnot(scale > 0) ## trap case of large clusters, close to Poisson kok <- is.numeric(kappa) || is.im(kappa) if(kok) { kappamax <- max(kappa) } else { kim <- as.im(kappa, W=win, ..., strict=TRUE) kra <- range(kim) kappamax <- kra[2] + 0.05 * diff(kra) } if(1/(pi * kappamax * scale^2) < poisthresh) { kapmu <- mu * (if(kok) kappa else kim) result <- rpoispp(kapmu, win=win, nsim=nsim, drop=drop, warnwin=FALSE) return(result) } result <- rNeymanScott(kappa, scale, list(mu, rundisk), win, radius=scale, nsim=nsim, drop=FALSE, saveparents = saveparents || saveLambda) if(saveLambda){ for(i in 1:nsim) { parents <- attr(result[[i]], "parents") Lambda <- clusterfield("MatClust", parents, scale=scale, mu=mu, ...) attr(result[[i]], "Lambda") <- Lambda[win, drop=FALSE] } } return(if(nsim == 1 && drop) result[[1]] else result) } rMatClust }) rThomas <- local({ ## random displacements gaus <- function(n, sigma) { matrix(rnorm(2 * n, mean=0, sd=sigma), ncol=2) } ## main function rThomas <- function(kappa, scale, mu, win = owin(c(0,1),c(0,1)), nsim=1, drop=TRUE, saveLambda=FALSE, expand = 4*scale, ..., poisthresh=1e-6, saveparents=TRUE) { ## Thomas process with Poisson(mu) number of offspring ## at isotropic Normal(0,sigma^2) displacements from parent ## ## Catch old scale syntax (sigma) if(missing(scale)) scale <- list(...)$sigma check.1.real(scale) stopifnot(scale > 0) ## trap case of large clusters, close to Poisson kok <- is.numeric(kappa) || is.im(kappa) if(kok) { kappamax <- max(kappa) } else { kim <- as.im(kappa, W=win, ..., strict=TRUE) kra <- range(kim) kappamax <- kra[2] + 0.05 * diff(kra) } if(1/(4*pi * kappamax * scale^2) < poisthresh) { kapmu <- mu * (if(kok) kappa else kim) result <- rpoispp(kapmu, win=win, nsim=nsim, drop=drop, warnwin=FALSE) return(result) } ## determine the maximum radius of clusters if(missing(expand)) expand <- clusterradius("Thomas", scale = scale, ...) result <- rNeymanScott(kappa, expand, list(mu, gaus), win, sigma=scale, nsim=nsim, drop=FALSE, saveparents = saveparents || saveLambda) if(saveLambda){ for(i in 1:nsim) { parents <- attr(result[[i]], "parents") Lambda <- clusterfield("Thomas", parents, scale=scale, mu=mu, ...) attr(result[[i]], "Lambda") <- Lambda[win, drop=FALSE] } } return(if(nsim == 1 && drop) result[[1]] else result) } rThomas }) ## ================================================ ## Neyman-Scott process with Cauchy kernel function ## ================================================ ## scale / omega: scale parameter of Cauchy kernel function ## eta: scale parameter of Cauchy pair correlation function ## eta = 2 * omega rCauchy <- local({ ## simulate mixture of normals with inverse-gamma distributed variance rnmix.invgam <- function(n = 1, rate) { V <- matrix(rnorm(2 * n, 0, 1), nrow = n, ncol = 2) s <- 1/rgamma(n, shape=1/2, rate=rate) return(sqrt(s) * V) } ## main function rCauchy <- function (kappa, scale, mu, win = owin(), thresh = 0.001, nsim=1, drop=TRUE, saveLambda=FALSE, expand = NULL, ..., poisthresh=1e-6, saveparents=TRUE) { ## scale / omega: scale parameter of Cauchy kernel function ## eta: scale parameter of Cauchy pair correlation function ## Catch old scale syntax (omega) dots <- list(...) if(missing(scale)) scale <- dots$omega ## Catch old name 'eps' for 'thresh': if(missing(thresh)) thresh <- dots$eps %orifnull% 0.001 ## trap case of large clusters, close to Poisson kok <- is.numeric(kappa) || is.im(kappa) if(kok) { kappamax <- max(kappa) } else { kim <- as.im(kappa, W=win, ..., strict=TRUE) kra <- range(kim) kappamax <- kra[2] + 0.05 * diff(kra) } if(1/(pi * kappamax * scale^2) < poisthresh) { kapmu <- mu * (if(kok) kappa else kim) result <- rpoispp(kapmu, win=win, nsim=nsim, drop=drop, warnwin=FALSE) return(result) } ## determine the maximum radius of clusters if(missing(expand)){ expand <- clusterradius("Cauchy", scale = scale, thresh = thresh, ...) } else if(!missing(thresh)){ warning("Argument ", sQuote("thresh"), " is ignored when ", sQuote("expand"), " is given") } ## simulate result <- rNeymanScott(kappa, expand, list(mu, rnmix.invgam), win, rate = scale^2/2, nsim=nsim, drop=FALSE, saveparents = saveparents || saveLambda) ## correction from Abdollah: the rate is beta = omega^2 / 2 = eta^2 / 8. if(saveLambda){ for(i in 1:nsim) { parents <- attr(result[[i]], "parents") Lambda <- clusterfield("Cauchy", parents, scale=scale, mu=mu, ...) attr(result[[i]], "Lambda") <- Lambda[win, drop=FALSE] } } return(if(nsim == 1 && drop) result[[1]] else result) } rCauchy }) ## ## ================================================================= ## Neyman-Scott process with Variance Gamma (Bessel) kernel function ## ================================================================= ## nu.ker: smoothness parameter of Variance Gamma kernel function ## omega: scale parameter of kernel function ## nu.pcf: smoothness parameter of Variance Gamma pair correlation function ## eta: scale parameter of Variance Gamma pair correlation function ## nu.pcf = 2 * nu.ker + 1 and eta = omega rVarGamma <- local({ ## simulates mixture of isotropic Normal points in 2D with gamma variances rnmix.gamma <- function(n = 1, shape, rate) { V <- matrix(rnorm(2 * n, 0, 1), nrow = n, ncol = 2) s <- rgamma(n, shape=shape, rate=rate) return(sqrt(s) * V) } ## main function rVarGamma <- function (kappa, nu, scale, mu, win = owin(), thresh = 0.001, nsim=1, drop=TRUE, saveLambda=FALSE, expand = NULL, ..., poisthresh=1e-6, saveparents=TRUE) { ## nu / nu.ker: smoothness parameter of Variance Gamma kernel function ## scale / omega: scale parameter of kernel function ## Catch old nu.ker/nu.pcf syntax and resolve nu-value. dots <- list(...) if(missing(nu)){ nu <- resolve.vargamma.shape(nu.ker=dots$nu.ker, nu.pcf=dots$nu.pcf)$nu.ker } else{ check.1.real(nu) stopifnot(nu > -1/2) } ## Catch old scale syntax (omega) if(missing(scale)) scale <- dots$omega ## Catch old name 'eps' for 'thresh': if(missthresh <- missing(thresh)) thresh <- dots$eps %orifnull% 0.001 ## trap case of large clusters, close to Poisson kok <- is.numeric(kappa) || is.im(kappa) if(kok) { kappamax <- max(kappa) } else { kim <- as.im(kappa, W=win, ..., strict=TRUE) kra <- range(kim) kappamax <- kra[2] + 0.05 * diff(kra) } if(1/(4 * pi * kappamax * scale^2) < poisthresh) { kapmu <- mu * (if(kok) kappa else kim) result <- rpoispp(kapmu, win=win, nsim=nsim, drop=drop, warnwin=FALSE) return(result) } ## determine the maximum radius of clusters if(missing(expand)){ expand <- clusterradius("VarGamma", scale = scale, nu = nu, thresh = thresh, ...) } else if(!missthresh){ warning("Argument ", sQuote("thresh"), " is ignored when ", sQuote("expand"), " is given") } ## simulate result <- rNeymanScott(kappa, expand, list(mu, rnmix.gamma), win, ## WAS: shape = 2 * (nu.ker + 1) shape = nu + 1, rate = 1/(2 * scale^2), nsim=nsim, drop=FALSE, saveparents = saveparents || saveLambda) if(saveLambda){ for(i in 1:nsim) { parents <- attr(result[[i]], "parents") Lambda <- clusterfield("VarGamma", parents, scale=scale, nu=nu, mu=mu, ...) attr(result[[i]], "Lambda") <- Lambda[win, drop=FALSE] } } return(if(nsim == 1 && drop) result[[1]] else result) } rVarGamma }) spatstat/R/otherpackages.R0000644000176200001440000000525513431146321015266 0ustar liggesusers#' #' otherpackages.R #' #' Dealing with other packages #' #' $Revision: 1.18 $ $Date: 2019/02/14 02:02:11 $ fft2D <- function(z, inverse=FALSE, west=fftwAvailable()) { if(west) return(fftwtools::fftw2d(data=z, inverse=inverse)) return(stats::fft(z=z, inverse=inverse)) } fftwAvailable <- function() { # including temporary check for recent version ok <- requireNamespace("fftwtools", quietly=TRUE) return(ok) } kraeverRandomFields <- function() { kraever("RandomFieldsUtils") kraever("RandomFields") # should no longer be needed: # capture.output(RandomFieldsUtils:::.onLoad()) # capture.output(RandomFields:::.onLoad()) return(invisible(NULL)) } # require a namespace and optionally check whether it is attached kraever <- function(package, fatal=TRUE) { if(!requireNamespace(package, quietly=TRUE)) { if(fatal) stop(paste("The package", sQuote(package), "is required"), call.=FALSE) return(FALSE) } if(spatstat.options(paste("check", package, "loaded", sep=".")) && !isNamespaceLoaded(package)){ if(fatal) stop(paste("The package", sQuote(package), "must be loaded: please type", sQuote(paste0("library", paren(package)))), call.=FALSE) return(FALSE) } return(TRUE) } getRandomFieldsModelGen <- function(model) { kraeverRandomFields() if(inherits(model, "RMmodelgenerator")) return(model) if(!is.character(model)) stop(paste("'model' should be a character string", "or one of the functions in the RandomFields package", "with a name beginning 'RM'"), call.=FALSE) f <- switch(model, cauchy = RandomFields::RMcauchy, exponential = , exp = RandomFields::RMexp, gencauchy = RandomFields::RMgencauchy, gauss = RandomFields::RMgauss, gneiting = RandomFields::RMgneiting, matern = RandomFields::RMmatern, nugget = RandomFields::RMnugget, spheric = RandomFields::RMspheric, stable = RandomFields::RMstable, whittle = RandomFields::RMwhittle, { modgen <- try(getExportedValue("RandomFields", paste0("RM", model)), silent=TRUE) if(inherits(modgen, "try-error") || !inherits(modgen, "RMmodelgenerator")) stop(paste("Model", sQuote(model), "is not recognised")) modgen }) if(!is.function(f)) stop(paste0("Unable to retrieve RandomFields::RM", model)) return(f) } spatstat/R/allstats.R0000644000176200001440000000220513333543254014274 0ustar liggesusers# # # allstats.R # # $Revision: 1.18 $ $Date: 2016/02/11 10:17:12 $ # # allstats <- function(pp, ..., dataname=NULL,verb=FALSE) { # # Function allstats --- to calculate the F, G, K, and J functions # for an unmarked point pattern. # verifyclass(pp,"ppp") if(is.marked(pp)) stop("This function is applicable only to unmarked patterns.\n") # estimate F, G and J if(verb) cat("Calculating F, G, J ...") Jout <- do.call.matched(Jest,list(X=pp, ...)) if(verb) cat("ok.\n") # extract F, G and J Fout <- attr(Jout, "F") Gout <- attr(Jout, "G") attr(Jout, "F") <- NULL attr(Jout, "G") <- NULL fns <- list("F function"=Fout, "G function"=Gout, "J function"=Jout) # compute second moment function K if(verb) cat("Calculating K function...") Kout <- do.call.matched(Kest, list(X=pp, ...)) fns <- append(fns, list("K function"=Kout)) if(verb) cat("done.\n") # add title if(is.null(dataname)) dataname <- short.deparse(substitute(pp)) title <- paste("Four summary functions for ", dataname,".",sep="") attr(fns, "title") <- title # fns <- as.anylist(fns) return(fns) } spatstat/R/clarkevans.R0000644000176200001440000001531413333543254014603 0ustar liggesusers## clarkevans.R ## Clark-Evans statistic and test ## $Revision: 1.17 $ $Date: 2015/10/19 05:03:37 $ clarkevans <- function(X, correction=c("none", "Donnelly", "cdf"), clipregion=NULL) { verifyclass(X, "ppp") W <- X$window # validate correction argument gavecorrection <- !missing(correction) correction <- pickoption("correction", correction, c(none="none", Donnelly="Donnelly", donnelly="Donnelly", guard="guard", cdf="cdf"), multi=TRUE) if(("Donnelly" %in% correction) && (W$type != "rectangle")) { if(gavecorrection) warning("Donnelly correction only available for rectangular windows") correction <- correction[correction != "Donnelly"] } # guard correction applied iff `clipregion' is present isguard <- "guard" %in% correction askguard <- any(isguard) gaveguard <- !is.null(clipregion) if(gaveguard) clipregion <- as.owin(clipregion) if(askguard && !gaveguard) { warning("guard correction not performed; clipregion not specified") correction <- correction[!isguard] } else if(gaveguard && !askguard) correction <- c(correction, "guard") result <- clarkevansCalc(X, correction, clipregion) if(length(result) == 1L) result <- unname(result) return(result) } clarkevans.test <- function(X, ..., correction="none", clipregion=NULL, alternative=c("two.sided", "less", "greater", "clustered", "regular"), nsim=999 ) { Xname <- short.deparse(substitute(X)) miss.nsim <- missing(nsim) verifyclass(X, "ppp") W <- Window(X) nX <- npoints(X) # validate SINGLE correction correction <- pickoption("correction", correction, c(none="none", Donnelly="Donnelly", donnelly="Donnelly", guard="guard", cdf="cdf")) switch(correction, none={ corrblurb <- "No edge correction" }, Donnelly={ if(W$type != "rectangle") stop("Donnelly correction only available for rectangular windows") corrblurb <- "Donnelly correction" }, guard={ if(is.null(clipregion)) stop("clipregion not specified") clipregion <- as.owin(clipregion) corrblurb <- "Guard correction" }, cdf={ corrblurb <- "CDF correction" }) # alternative hypothesis if(missing(alternative) || is.null(alternative)) alternative <- "two.sided" alternative <- pickoption("alternative", alternative, c(two.sided="two.sided", less="less", clustered="less", greater="greater", regular="greater")) altblurb <- switch(alternative, two.sided="two-sided", less="clustered (R < 1)", greater="regular (R > 1)") # compute observed value statistic <- clarkevansCalc(X, correction=correction, clipregion=clipregion, working=TRUE) working <- attr(statistic, "working") # if(correction == "none" && miss.nsim) { # standard Normal p-value SE <- with(working, sqrt(((4-pi)*areaW)/(4 * pi))/npts) Z <- with(working, (Dobs - Dpois)/SE) p.value <- switch(alternative, less=pnorm(Z), greater=1 - pnorm(Z), two.sided= 2*(1-pnorm(abs(Z)))) pvblurb <- "Z-test" } else { # Monte Carlo p-value sims <- numeric(nsim) for(i in 1:nsim) { Xsim <- runifpoint(nX, win=W) sims[i] <- clarkevansCalc(Xsim, correction=correction, clipregion=clipregion) } p.upper <- (1 + sum(sims >= statistic))/(1.0 + nsim) p.lower <- (1 + sum(sims <= statistic))/(1.0 + nsim) p.value <- switch(alternative, less=p.lower, greater=p.upper, two.sided=min(1, 2*min(p.lower, p.upper))) pvblurb <- paste("Monte Carlo test based on", nsim, "simulations of CSR with fixed n") } statistic <- as.numeric(statistic) names(statistic) <- "R" out <- list(statistic=statistic, p.value=p.value, alternative=altblurb, method=c("Clark-Evans test", corrblurb, pvblurb), data.name=Xname) class(out) <- "htest" return(out) } clarkevansCalc <- function(X, correction="none", clipregion=NULL, working=FALSE) { # calculations for Clark-Evans index or test W <- Window(X) areaW <- area(W) npts <- npoints(X) intensity <- npts/areaW # R undefined for empty point pattern if(npts == 0) return(NA) # Dobs = observed mean nearest neighbour distance nndistX <- nndist(X) Dobs <- mean(nndistX) # Dpois = Expected mean nearest neighbour distance for Poisson process Dpois <- 1/(2*sqrt(intensity)) statistic <- NULL if(working) work <- list(areaW=areaW, npts=npts, intensity=intensity, Dobs=Dobs, Dpois=Dpois) # Naive uncorrected value if("none" %in% correction) { Rnaive <- Dobs/Dpois statistic <- c(statistic, naive=Rnaive) } # Donnelly edge correction if("Donnelly" %in% correction) { # Dedge = Edge corrected mean nearest neighbour distance, Donnelly 1978 if(W$type == "rectangle") { perim <- perimeter(W) Dkevin <- Dpois + (0.0514+0.0412/sqrt(npts))*perim/npts Rkevin <- Dobs/Dkevin if(working) work <- append(work, list(perim=perim, Dkevin=Dkevin)) } else Rkevin <- NA statistic <- c(statistic, Donnelly=Rkevin) } # guard area method if("guard" %in% correction && !is.null(clipregion)) { # use nn distances from points inside `clipregion' ok <- inside.owin(X, , clipregion) Dguard <- mean(nndistX[ok]) Rguard <- Dguard/Dpois if(working) work <- append(work, list(Dguard=Dguard)) statistic <- c(statistic, guard=Rguard) } if("cdf" %in% correction) { # compute mean of estimated nearest-neighbour distance distribution G G <- Gest(X) numer <- stieltjes(function(x){x}, G)$km denom <- stieltjes(function(x){rep.int(1, length(x))}, G)$km Dcdf <- numer/denom Rcdf <- Dcdf/Dpois if(working) work <- append(work, list(Dcdf=Dcdf)) statistic <- c(statistic, cdf=Rcdf) } if(working) attr(statistic, "working") <- work return(statistic) } spatstat/R/applynbd.R0000644000176200001440000000501113333543254014254 0ustar liggesusers# applynbd.R # # $Revision: 1.17 $ $Date: 2016/10/23 10:36:58 $ # # applynbd() # For each point, identify either # - all points within distance R # - the closest N points # - those points satisfying some constraint # and apply the function FUN to them # # markstat() # simple application of applynbd ################################################################# applynbd <- function(X, FUN, N=NULL, R=NULL, criterion=NULL, exclude=FALSE, ...) { if(is.null(N) && is.null(R) && is.null(criterion)) stop(paste("must specify at least one of the arguments", commasep(sQuote(c("N","R","criterion"))))) X <- as.ppp(X) npts <- npoints(X) # compute matrix of pairwise distances dist <- pairdist(X) # compute row ranks (avoid ties) rankit <- function(x) { u <- numeric(length(x)); u[fave.order(x)] <- seq_along(x); return(u) } drank <- t(apply(dist, 1L, rankit)) - 1L included <- matrix(TRUE, npts, npts) if(!is.null(R)) { # select points closer than R included <- included & (dist <= R) } if(!is.null(N)) { # select N closest points if(N < 1) stop("Value of N must be at least 1") if(exclude) included <- included & (drank <= N) else included <- included & (drank <= N-1) } if(!is.null(criterion)) { # some funny criterion for(i in 1L:npts) included[i,] <- included[i,] & criterion(dist[i,], drank[i,]) } if(exclude) diag(included) <- FALSE # bind into an array a <- array(c(included, dist, drank, row(included)), dim=c(npts,npts,4)) # what to do with a[i, , ] if(!is.marked(X)) go <- function(ai, Z, fun, ...) { which <- as.logical(ai[,1L]) distances <- ai[,2L] dranks <- ai[,3L] here <- ai[1L,4L] fun(Y=Z[which], current=c(x=Z$x[here], y=Z$y[here]), dists=distances[which], dranks=dranks[which], ...) } else go <- function(ai, Z, fun, ...) { which <- as.logical(ai[,1L]) distances <- ai[,2L] dranks <- ai[,3L] here <- ai[1L,4L] fun(Y=Z[which], current=Z[here], dists=distances[which], dranks=dranks[which], ...) } # do it result <- apply(a, 1, go, Z=X, fun=FUN, ...) return(result) } markstat <- function(X, fun, N=NULL, R=NULL, ...) { verifyclass(X, "ppp") stopifnot(is.function(fun)) statfun <- function(Y, current, dists, dranks, func, ...) { func(marks(Y, dfok=TRUE), ...) } applynbd(X, statfun, R=R, N=N, func=fun, ...) } spatstat/R/indicator.R0000644000176200001440000000147113333543255014426 0ustar liggesusers#' indicator function for window as.function.owin <- function(x, ...) { W <- x g <- function(x, y=NULL) { xy <- xy.coords(x, y) inside.owin(xy$x, xy$y, W) } class(g) <- c("indicfun", class(g)) return(g) } print.indicfun <- function(x, ...) { W <- get("W", envir=environment(x)) nama <- names(formals(x)) splat(paste0("function", paren(paste(nama, collapse=",")))) splat("Indicator function (returns 1 inside window, 0 outside)") print(W) return(invisible(NULL)) } plot.indicfun <- function(x, W, ..., main) { if(missing(main)) main <- short.deparse(substitute(x)) if(missing(W) || is.null(W)) { w <- get("W", envir=environment(x)) R <- Frame(w) W <- grow.rectangle(R, min(sidelengths(R))/5) } result <- do.as.im(x, plot, W=W, ..., main=main) return(invisible(result)) } spatstat/R/dppmclass.R0000644000176200001440000000205713552233655014444 0ustar liggesusers#' Methods and support for class dppm #' #' $Revision: 1.5 $ $Date: 2019/10/18 03:41:43 $ is.dppm <- function(x) { inherits(x, "dppm") } plot.dppm <- function (x, ..., what = c("intensity", "statistic")){ objectname <- short.deparse(substitute(x)) if(missing(what) && is.stationary(x)) what <- "statistic" plot.kppm(x, ..., xname = objectname, what = what) } Kmodel.dppm <- function (model, ...){ Kmodel(model$fitted, W=model$window) } pcfmodel.dppm <- function (model, ...){ pcfmodel(model$fitted, W=model$window) } intensity.dppm <- function (X, ...){ return(intensity(X$fitted)) } reach.dppm <- function(x, ...){ reach(x$fitted, ...) } repul <- function(model, ...) { UseMethod("repul") } repul.dppm <- function(model, ...) { g <- pcfmodel(model) f <- function(x) { 2 * pi * x * (1 - g(x)) } rmax <- reach(model) h <- integrate(f, 0, rmax)$value lam <- intensity(model) ans <- h * lam return(ans) } #' print.dppm is identical to print.kppm and defined in kppm.R #' summary.dppm is defined in summary.dppm.R spatstat/R/density.psp.R0000644000176200001440000000676413335516541014744 0ustar liggesusers# # # density.psp.R # # $Revision: 1.15 $ $Date: 2018/08/11 14:27:16 $ # # density.psp <- function(x, sigma, ..., edge=TRUE, method=c("FFT", "C", "interpreted"), at=NULL) { verifyclass(x, "psp") method <- match.arg(method) w <- x$window n <- x$n len <- lengths.psp(x) ang <- angles.psp(x, directed=TRUE) ux <- unitname(x) if(missing(sigma)) sigma <- 0.1 * diameter(w) #' determine locations for evaluation of density if(is.null(at)) { atype <- "window" w <- do.call.matched(as.mask, resolve.defaults(list(w=w, ...))) } else if(is.owin(at)) { atype <- "window" w <- do.call.matched(as.mask, resolve.defaults(list(w=at, ...))) } else { atype <- "points" atY <- try(as.ppp(at, W=w)) if(inherits(atY, "try-error")) stop("Argument 'at' should be a window or a point pattern", call.=FALSE) } #' detect empty pattern if(n == 0 || all(len == 0)) switch(atype, window = return(as.im(0, w)), points = return(rep(0, npoints(atY)))) #' determine prediction coordinates switch(atype, window = { xy <- rasterxy.mask(w) xx <- xy$x yy <- xy$y }, points = { xx <- atY$x yy <- atY$y }) #' c o m p u t e switch(method, interpreted = { #' compute matrix contribution from each segment coz <- cos(ang) zin <- sin(ang) for(i in seq_len(n)) { en <- x$ends[i,] dx <- xx - en$x0 dy <- yy - en$y0 u1 <- dx * coz[i] + dy * zin[i] u2 <- - dx * zin[i] + dy * coz[i] value <- dnorm(u2, sd=sigma) * (pnorm(u1, sd=sigma) - pnorm(u1-len[i], sd=sigma)) totvalue <- if(i == 1L) value else (value + totvalue) } dens <- switch(atype, window = im(totvalue, w$xcol, w$yrow, unitname=ux), points = totvalue) }, C = { #' C implementation of the above xs <- x$ends$x0 ys <- x$ends$y0 xp <- as.numeric(as.vector(xx)) yp <- as.numeric(as.vector(yy)) np <- length(xp) z <- .C("segdens", sigma = as.double(sigma), ns = as.integer(n), xs = as.double(xs), ys = as.double(ys), alps = as.double(ang), lens = as.double(len), np = as.integer(np), xp = as.double(xp), yp = as.double(yp), z = as.double(numeric(np)), PACKAGE = "spatstat") dens <- switch(atype, window = im(z$z, w$xcol, w$yrow, unitname=ux), points = z$z) }, FFT = { Y <- pixellate(x, ..., DivideByPixelArea=TRUE) dens <- blur(Y, sigma, normalise=edge, bleed=FALSE, ...) if(atype == "points") dens <- dens[atY, drop=FALSE] }) if(edge && method != "FFT") { edg <- second.moment.calc(midpoints.psp(x), sigma, what="edge", ...) switch(atype, window = { dens <- eval.im(dens/edg) }, points = { edgY <- edg[atY, drop=FALSE] dens <- dens/edgY }) } if(atype == "window") dens <- dens[x$window, drop=FALSE] attr(dens, "sigma") <- sigma return(dens) } spatstat/R/by.ppp.R0000644000176200001440000000062413333543254013660 0ustar liggesusers# # by.ppp.R # # $Revision: 1.6 $ $Date: 2015/10/21 09:06:57 $ # by.ppp <- function(data, INDICES=marks(data), FUN, ...) { if(missing(INDICES)) INDICES <- marks(data, dfok=FALSE) if(missing(FUN)) stop("FUN is missing") y <- split(data, INDICES) z <- list() for(i in seq_along(y)) z[[i]] <- FUN(y[[i]], ...) names(z) <- names(y) z <- as.solist(z, demote=TRUE) return(z) } spatstat/R/penttinen.R0000644000176200001440000000413713333543255014460 0ustar liggesusers# # # penttinen.R # # $Revision: 1.3 $ $Date: 2018/03/15 07:37:41 $ # # Penttinen pairwise interaction # # # ------------------------------------------------------------------- # Penttinen <- local({ # create blank template object without family and pars BlankAntti <- list( name = "Penttinen process", creator = "Penttinen", family = "pairwise.family", # evaluated later pot = function(d, par) { ans <- numeric(length(d)) dim(ans) <- dim(d) zz <- d/(2 * par$r) ok <- (zz < 1) z <- zz[ok] ans[ok] <- (2/pi) * (acos(z) - z * sqrt(1-z^2)) return(ans) }, par = list(r = NULL), # to be filled in parnames = "circle radius", hasInf = FALSE, init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("interaction distance r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { theta <- as.numeric(coeffs[1]) gamma <- exp(theta) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=dround(gamma))) }, valid = function(coeffs, self) { theta <- as.numeric(coeffs[1]) return(is.finite(theta) && (theta <= 0)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(anyNA(coeffs)) return(2 * r) theta <- coeffs[1] if(abs(theta) <= epsilon) return(0) else return(2 * r) }, version=NULL # to be filled in ) class(BlankAntti) <- "interact" # Finally define main function Penttinen <- function(r) { instantiate.interact(BlankAntti, list(r=r)) } Penttinen <- intermaker(Penttinen, BlankAntti) Penttinen }) spatstat/R/detPPF-class.R0000644000176200001440000001756013333543254014704 0ustar liggesusers## support for class 'detpointprocfamily' print.detpointprocfamily <- function(x, ...){ splat(x$name, "determinantal point process model", ifelse(is.numeric(x$dim), paste("in dimension", x$dim), "")) #' Not used: #' parnames <- names(x$par) anyfixed <- length(x$fixedpar)>0 if(anyfixed){ fixedlambda <- NULL if(!is.null(x$intensity) && is.element(x$intensity, names(x$fixedpar))){ lambda <- signif(x$fixedpar[[x$intensity]], 4) x$fixedpar <- x$fixedpar[names(x$fixedpar)!=x$intensity] fixedlambda <- paste(x$intensity, ifelse(is.null(x$thin), paste("=", lambda), "= an image")) } if(length(x$fixedpar)>0){ fixedparstring <- paste(names(x$fixedpar), signif(unlist(x$fixed),4), sep = " = ", collapse = ", ") fixedparstring <- paste(fixedlambda, fixedparstring, sep=", ") } else{ fixedparstring <- fixedlambda } } ## Partially specified model: if(length(x$freepar)>0){ splat("The model is only partially specified.") splat("The following parameters are free (e.g. to be estimated by dppm):") cat(x$freepar, sep = ", ") cat("\n") if(anyfixed){ cat("The fixed parameters are: ") cat(fixedparstring, sep = ", ") } else{ splat("There are no fixed parameters.") } } else{ cat("The parameters are: ") cat(fixedparstring, sep = ", ") } cat("\n") if(!is.null(x$intensity)){ splat("The parameter", x$intensity, "specifies the intensity of the process.") } if(is.character(x$dim)){ splat("The parameter", x$dim, "specifies the dimension of the state space.") } invisible(NULL) } reach.detpointprocfamily <- function(x, ...){ model <- x fun <- model$range nam <- names(formals(fun)) do.call(model$range, c(model$fixedpar[is.element(names(model$fixedpar),nam)], list(...))) } dppparbounds <- function(model, name, ...){ if(inherits(model, "dppm")) model <- model$fitted if(!inherits(model, "detpointprocfamily")) stop("input model must be of class detpointprocfamily or dppm") fun <- model$parbounds nam <- names(formals(fun)) if(missing(name)) name <- nam[!is.element(nam, c("name", model$dim))] rslt <- matrix(0,length(name), 2, dimnames = list(name, c("lower", "upper"))) for(nn in name){ tmp <- try(do.call(fun, c(model$fixedpar[is.element(names(model$fixedpar),nam)], list(...), list(name=nn))), silent=TRUE) if(class(tmp)=="try-error"){ rslt[nn,] <- c(NA, NA) }else{ rslt[nn,] <- tmp } } rslt } valid.detpointprocfamily <- function(object, ...){ if(length(object$freepar)>0) return(NA) ## If there is no function for checking validity we always return TRUE: if(is.null(object$valid)) return(TRUE) do.call(object$valid, object$fixedpar) } dppspecdenrange <- function(model){ ## If there is no function for checking finite range of spectral density we always return Inf: fun <- model$specdenrange if(is.null(fun)) return(Inf) xx <- try(fun(model), silent = TRUE) ifelse(class(xx)=="try-error", Inf, xx) } dppspecden <- function(model){ fun <- model$specden if(is.null(fun)) stop("Spectral density unknown for this model!") if(length(model$freepar)>0) stop("Cannot extract the spectral density of a partially specified model. Please supply all parameters.") specden <- function(x, ...){ allargs <- c(list(x), model$fixedpar, list(...)) do.call(fun, allargs) } return(specden) } dppkernel <- function(model, ...){ if(inherits(model, "dppm")) model <- model$fitted fun <- model$kernel if(is.null(fun)) return(dppapproxkernel(model, ...)) if(length(model$freepar)>0) stop("Cannot extract the kernel of a partially specified model. Please supply all parameters.") firstarg <- names(formals(fun))[1L] kernel <- function(x){ allargs <- c(structure(list(x), .Names=firstarg), model$fixedpar) do.call(fun, allargs) } return(kernel) } dppapproxkernel <- function(model, trunc = .99, W = NULL){ if(inherits(model, "dppm")){ W <- model$window model <- model$fitted } ####### BACKDOOR TO SPHERICAL CASE ######## if(!is.null(spherefun <- model$approxkernelfun)){ spherefun <- get(spherefun) rslt <- spherefun(model, trunc) return(rslt) } ########################################### d <- dim(model) if(is.null(W)) W <- boxx(replicate(d, c(-.5,.5), simplify=FALSE)) W <- as.boxx(W) if(d!=ncol(W$ranges)) stop(paste("The dimension of the window:", ncol(W$ranges), "is inconsistent with the dimension of the model:", d)) Wscale <- as.numeric(W$ranges[2L,]-W$ranges[1L,]) tmp <- dppeigen(model, trunc, Wscale, stationary=FALSE) index <- tmp$index eig <- tmp$eig prec <- tmp$prec trunc <- tmp$trunc rm(tmp) f <- function(r){ x <- matrix(0, nrow=length(r), ncol=d) x[,1L] <- r basis <- fourierbasis(x, index, win = W) approx <- matrix(eig, nrow=length(eig), ncol=length(r)) * basis return(Re(colSums(approx))) } attr(f, "dpp") <- list(prec = prec, trunc = trunc) return(f) } pcfmodel.detpointprocfamily <- function(model, ...){ kernel <- dppkernel(model, ...) f <- function(x){ 1 - (kernel(x)/kernel(0))^2 } return(f) } dppapproxpcf <- function(model, trunc = .99, W = NULL){ kernel <- dppapproxkernel(model, trunc = trunc, W = W) f <- function(x){ 1 - (kernel(x)/kernel(0))^2 } attr(f, "dpp") <- attr(kernel, "dpp") return(f) } Kmodel.detpointprocfamily <- function(model, ...){ if(length(model$freepar)>0) stop("Cannot extract the K function of a partially specified model. Please supply all parameters.") fun <- model$Kfun if(!is.null(fun)){ firstarg <- names(formals(fun))[1L] Kfun <- function(r){ allargs <- c(structure(list(r), .Names=firstarg), model$fixedpar) do.call(fun, allargs) } } else{ pcf <- pcfmodel(model, ...) intfun <- function(xx){ 2*pi*xx*pcf(xx) } Kfun <- function(r){ r <- sort(r) if(r[1L]<0) stop("Negative values not allowed in K function!") r <- c(0,r) int <- unlist(lapply(2:length(r), function(i) integrate(intfun, r[i-1L], r[i], subdivisions=10)$value)) return(cumsum(int)) } } return(Kfun) } update.detpointprocfamily <- function(object, ...){ newpar <- list(...) if(length(newpar)==1L && is.list(newpar[[1L]]) && !is.im(newpar[[1L]])) newpar <- newpar[[1L]] nam <- names(newpar) if(length(newpar)>0&&is.null(nam)) stop(paste("Named arguments are required. Please supply parameter values in a", sQuote("tag=value"), "form")) oldpar <- object$fixedpar[!is.element(names(object$fixedpar), nam)] thin <- object$thin object <- do.call(object$caller, c(newpar,oldpar)) if(is.null(object$thin)) object$thin <- thin return(object) } is.stationary.detpointprocfamily <- function(x){ if(is.null(x$intensity)) return(FALSE) lambda <- getElement(x$fixedpar, x$intensity) if(!is.null(lambda)&&is.numeric(lambda)&&is.null(x$thin)) return(TRUE) return(FALSE) } intensity.detpointprocfamily <- function(X, ...){ lambda <- NULL if(!is.null(X$intensity)) lambda <- getElement(X$fixedpar, X$intensity) if(!is.null(lambda)){ if(!is.null(X$thin)) lambda <- lambda*X$thin return(lambda) } return(NA) } parameters.dppm <- parameters.detpointprocfamily <- function(model, ...){ if(inherits(model, "dppm")) model <- model$fitted c(model$fixed, structure(rep(NA,length(model$freepar)), .Names = model$freepar)) } dim.detpointprocfamily <- function(x){ if(is.numeric(d <- x$dim)){ return(d) } else{ return(getElement(x$fixedpar, d)) } } spatstat/R/clicklpp.R0000644000176200001440000000341513333543254014252 0ustar liggesusers#' #' $Revision: 1.1 $ $Date: 2017/06/05 10:31:58 $ #' clicklpp <- local({ clicklpp <- function(L, n=NULL, types=NULL, ..., add=FALSE, main=NULL, hook=NULL) { if(!inherits(L, "linnet")) stop("L should be a linear network", call.=FALSE) instructions <- if(!is.null(n)) paste("click", n, "times in window") else paste("add points: click left mouse button in window\n", "exit: press ESC or another mouse button") if(is.null(main)) main <- instructions W <- Window(L) #### single type ######################### if(is.null(types)) { plot(L, add=add, main=main) if(!is.null(hook)) plot(hook, add=TRUE) xy <- if(!is.null(n)) spatstatLocator(n=n, ...) else spatstatLocator(...) ok <- inside.owin(xy, w=W) if((nbad <- sum(!ok)) > 0) warning(paste("Ignored", nbad, ngettext(nbad, "point", "points"), "outside window"), call.=FALSE) X <- as.lpp(xy$x[ok], xy$y[ok], L=L) return(X) } ##### multitype ####################### ftypes <- factor(types, levels=types) #' input points of type 1 X <- getem(ftypes[1L], instructions, n=n, L=L, add=add, ..., pch=1) X <- X %mark% ftypes[1L] #' input points of types 2, 3, ... in turn for(i in 2:length(types)) { Xi <- getem(ftypes[i], instructions, n=n, L=L, add=add, ..., hook=X, pch=i) Xi <- Xi %mark% ftypes[i] X <- superimpose(X, Xi, L=L) } if(!add) plot(X, main="Final pattern") return(X) } getem <- function(i, instr, ...) { main <- paste("Points of type", sQuote(i), "\n", instr) do.call(clicklpp, resolve.defaults(list(...), list(main=main))) } clicklpp }) spatstat/R/spatialcdf.R0000644000176200001440000000440513441715742014566 0ustar liggesusers## ## spatialcdf.R ## ## $Revision: 1.5 $ $Date: 2019/03/12 11:45:26 $ ## spatialcdf <- function(Z, weights=NULL, normalise=FALSE, ..., W=NULL, Zname=NULL) { Zdefaultname <- singlestring(short.deparse(substitute(Z))) if(is.character(Z) && length(Z) == 1) { if(is.null(Zname)) Zname <- Z switch(Zname, x={ Z <- function(x,y) { x } }, y={ Z <- function(x,y) { y } }, stop("Unrecognised covariate name") ) } if(is.null(Zname)) Zname <- Zdefaultname ## if(is.ppm(weights) || is.kppm(weights) || is.dppm(weights)) { Q <- quad.ppm(as.ppm(weights)) loc <- as.ppp(Q) df <- mpl.get.covariates(list(Z=Z), loc, covfunargs=list(...)) df$wt <- fitted(weights) * w.quad(Q) G <- with(df, ewcdf(Z, wt, normalise=normalise)) wtname <- if(normalise) "fraction of points" else "number of points" } else { if(is.null(W)) W <- as.owin(weights, fatal=FALSE) if(is.null(W)) W <- as.owin(Z, fatal=FALSE) if(is.null(W)) stop("No information specifying the spatial window") M <- as.mask(W, ...) loc <- rasterxy.mask(M, drop=TRUE) pixelarea <- with(unclass(M), xstep * ystep) if(is.null(weights)) { df <- mpl.get.covariates(list(Z=Z), loc, covfunargs=list(...)) G <- with(df, ewcdf(Z, normalise=normalise, adjust=pixelarea)) wtname <- if(normalise) "fraction of area" else "area" } else { df <- mpl.get.covariates(list(Z=Z, wt=weights), loc, covfunargs=list(...)) G <- with(df, ewcdf(Z, wt, normalise=normalise, adjust=pixelarea)) wtname <- if(normalise) "fraction of weight" else "weight" } } class(G) <- c("spatialcdf", class(G)) attr(G, "call") <- sys.call() attr(G, "Zname") <- Zname attr(G, "ylab") <- paste("Cumulative", wtname) return(G) } plot.spatialcdf <- function(x, ..., xlab, ylab) { if(missing(xlab) || is.null(xlab)) xlab <- attr(x, "Zname") if(missing(ylab) || is.null(ylab)) ylab <- attr(x, "ylab") if(inherits(x, "ecdf")) { plot.ecdf(x, ..., xlab=xlab, ylab=ylab) } else { plot.stepfun(x, ..., xlab=xlab, ylab=ylab) } } spatstat/R/auc.R0000644000176200001440000001036613333543254013224 0ustar liggesusers## ## auc.R ## ## Calculate ROC curve or area under it ## ## $Revision: 1.6 $ $Date: 2016/11/10 01:08:04 $ roc <- function(X, ...) { UseMethod("roc") } roc.ppp <- function(X, covariate, ..., high=TRUE) { nullmodel <- ppm(X) result <- rocData(covariate, nullmodel, ..., high=high) return(result) } roc.lpp <- function(X, covariate, ..., high=TRUE) { nullmodel <- lppm(X) result <- rocData(covariate, nullmodel, ..., high=high) return(result) } rocData <- function(covariate, nullmodel, ..., high=TRUE) { d <- spatialCDFframe(nullmodel, covariate, ...) U <- d$values$U ec <- if(high) ecdf(1-U) else ecdf(U) p <- seq(0,1,length=1024) df <- data.frame(p=p, fobs=ec(p), fnull=p) result <- fv(df, argu="p", ylab=quote(roc(p)), valu="fobs", desc=c("fraction of area", "observed fraction of points", "expected fraction if no effect"), fname="roc") fvnames(result, ".") <- c("fobs", "fnull") return(result) } roc.ppm <- function(X, ...) { stopifnot(is.ppm(X)) model <- X lambda <- predict(model, ...) Y <- data.ppm(model) nullmodel <- ppm(Y) result <- rocModel(lambda, nullmodel, ...) return(result) } roc.kppm <- function(X, ...) { stopifnot(is.kppm(X)) model <- as.ppm(X) lambda <- predict(model, ...) Y <- data.ppm(model) nullmodel <- ppm(Y) result <- rocModel(lambda, nullmodel, ...) return(result) } roc.lppm <- function(X, ...) { stopifnot(is.lppm(X)) model <- X lambda <- predict(model, ...) Y <- X$X nullmodel <- lppm(Y) result <- rocModel(lambda, nullmodel, ...) return(result) } rocModel <- function(lambda, nullmodel, ..., high) { if(!missing(high)) warning("Argument 'high' is ignored when computing ROC for a fitted model") d<- spatialCDFframe(nullmodel, lambda, ...) U <- d$values$U ec <- ecdf(1-U) p <- seq(0,1,length=1024) fobs <- ec(p) FZ <- d$values$FZ lambdavalues <- if(is.im(lambda)) lambda[] else unlist(lapply(lambda, "[")) F1Z <- ewcdf(lambdavalues, lambdavalues/sum(lambdavalues)) pZ <- get("y", environment(FZ)) qZ <- get("x", environment(FZ)) FZinverse <- approxfun(pZ, qZ, rule=2) ftheo <- 1 - F1Z(FZinverse(1-p)) df <- data.frame(p=p, fobs=fobs, ftheo=ftheo, fnull=p) result <- fv(df, argu="p", ylab=quote(roc(p)), valu="fobs", fmla = . ~ p, desc=c("fraction of area", "observed fraction of points", "expected fraction of points", "expected fraction if no effect"), fname="roc") fvnames(result, ".") <- c("fobs", "ftheo", "fnull") return(result) } # ...................................................... auc <- function(X, ...) { UseMethod("auc") } auc.ppp <- function(X, covariate, ..., high=TRUE) { d <- spatialCDFframe(ppm(X), covariate, ...) U <- d$values$U EU <- mean(U) result <- if(high) EU else (1 - EU) return(result) } auc.lpp <- function(X, covariate, ..., high=TRUE) { d <- spatialCDFframe(lppm(X), covariate, ...) U <- d$values$U EU <- mean(U) result <- if(high) EU else (1 - EU) return(result) } auc.kppm <- function(X, ...) { auc(as.ppm(X), ...) } auc.ppm <- function(X, ...) { model <- X if(is.multitype(model)) { # cheat ro <- roc(model, ...) aobs <- with(ro, mean(fobs)) atheo <- with(ro, mean(ftheo)) } else if(is.stationary(model)) { aobs <- atheo <- 1/2 } else { lambda <- intensity(model) Fl <- ecdf(lambda[]) lambda <- as.im(lambda, Window(model)) X <- data.ppm(model) lamX <- lambda[X] aobs <- mean(Fl(lamX)) atheo <- mean(lambda[] * Fl(lambda[]))/mean(lambda) } result <- c(aobs, atheo) names(result) <- c("obs", "theo") return(result) } auc.lppm <- function(X, ...) { stopifnot(inherits(X, "lppm")) model <- X if(is.multitype(model)) { # cheat ro <- roc(model, ...) aobs <- with(ro, mean(fobs)) atheo <- with(ro, mean(ftheo)) } else { lambda <- predict(model, ...) Fl <- ecdf(lambda[]) lamX <- lambda[model$X] aobs <- mean(Fl(lamX)) atheo <- mean(lambda[] * Fl(lambda[]))/mean(lambda) } result <- c(aobs, atheo) names(result) <- c("obs", "theo") return(result) } spatstat/R/crossdistlpp.R0000644000176200001440000000704313333543254015203 0ustar liggesusers# # crossdistlpp.R # # $Revision: 1.6 $ $Date: 2017/06/05 10:31:58 $ # # crossdist.lpp # Calculates the shortest-path distance from each point of X # to each point of Y, where X and Y are point patterns # on the same linear network. # crossdist.lpp <- function(X, Y, ..., method="C") { stopifnot(inherits(X, "lpp")) stopifnot(method %in% c("C", "interpreted")) check <- resolve.defaults(list(...), list(check=TRUE))$check # nX <- npoints(X) nY <- npoints(Y) # L <- as.linnet(X, sparse=FALSE) if(check) { LY <- as.linnet(Y, sparse=FALSE) if(!identical(L, LY)) stop("X and Y are on different linear networks") } if(any(is.infinite(L$dpath))) { #' disconnected network lab <- connected(L, what="labels") subsets <- split(seq_len(nvertices(L)), lab) crossdistmat <- matrix(Inf,nX,nY) for(subi in subsets) { Xi <- thinNetwork(X, retainvertices=subi) Yi <- thinNetwork(Y, retainvertices=subi) whichX <- attr(Xi, "retainpoints") whichY <- attr(Yi, "retainpoints") crossdistmat[whichX, whichY] <- crossdist.lpp(Xi, Yi, method=method) } return(crossdistmat) } # network is connected P <- as.ppp(X) Q <- as.ppp(Y) # # Lseg <- L$lines Lvert <- L$vertices from <- L$from to <- L$to dpath <- L$dpath # nearest segment for each point Xpro <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE)$seg Ypro <- coords(Y, local=TRUE, spatial=FALSE, temporal=FALSE)$seg if(method == "interpreted") { # loop through all pairs of data points crossdistmat <- matrix(,nX,nY) for (i in 1:nX) { Xproi <- Xpro[i] Xi <- P[i] nbi1 <- from[Xproi] nbi2 <- to[Xproi] vi1 <- Lvert[nbi1] vi2 <- Lvert[nbi2] dXi1 <- crossdist(Xi, vi1) dXi2 <- crossdist(Xi, vi2) for (j in 1:nY) { Yj <- Q[j] Yproj <- Ypro[j] if(Xproi == Yproj) { # points i and j lie on the same segment # use Euclidean distance d <- crossdist(Xi, Yj) } else { # shortest path from i to j passes through ends of segments nbj1 <- from[Yproj] nbj2 <- to[Yproj] vj1 <- Lvert[nbj1] vj2 <- Lvert[nbj2] # Calculate shortest of 4 possible paths from i to j d1Yj <- crossdist(vj1,Yj) d2Yj <- crossdist(vj2,Yj) d11 <- dXi1 + dpath[nbi1,nbj1] + d1Yj d12 <- dXi1 + dpath[nbi1,nbj2] + d2Yj d21 <- dXi2 + dpath[nbi2,nbj1] + d1Yj d22 <- dXi2 + dpath[nbi2,nbj2] + d2Yj d <- min(d11,d12,d21,d22) } # store result crossdistmat[i,j] <- d } } } else { # C code # convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L Xsegmap <- Xpro - 1L Ysegmap <- Ypro - 1L zz <- .C("lincrossdist", np = as.integer(nX), xp = as.double(P$x), yp = as.double(P$y), nq = as.integer(nY), xq = as.double(Q$x), yq = as.double(Q$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.double(L$n), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), psegmap = as.integer(Xsegmap), qsegmap = as.integer(Ysegmap), answer = as.double(numeric(nX * nY)), PACKAGE = "spatstat") crossdistmat <- matrix(zz$answer, nX, nY) } return(crossdistmat) } spatstat/R/rmhmodel.R0000644000176200001440000013523513605234113014257 0ustar liggesusers# # # rmhmodel.R # # $Revision: 1.77 $ $Date: 2020/01/08 01:21:59 $ # # rmhmodel <- function(...) { UseMethod("rmhmodel") } rmhmodel.rmhmodel <- function(model, ...) { # Check for outdated internal format # C.par was replaced by C.beta and C.ipar in spatstat 1.22-3 if(outdated <- !is.null(model$C.par)) warning("Outdated internal format of rmhmodel object; rebuilding it") if(outdated || (length(list(...)) > 0)) model <- rmhmodel.list(unclass(model), ...) return(model) } rmhmodel.list <- function(model, ...) { argnames <- c("cif","par","w","trend","types") ok <- argnames %in% names(model) do.call(rmhmodel.default, resolve.defaults(list(...), model[argnames[ok]])) } rmhmodel.default <- local({ rmhmodel.default <- function(..., cif=NULL, par=NULL, w=NULL, trend=NULL, types=NULL) { rmhmodelDefault(..., cif=cif, par=par, w=w, trend=trend, types=types) } rmhmodelDefault <- function(..., cif=NULL, par=NULL, w=NULL, trend=NULL, types=NULL, stopinvalid=TRUE) { if(length(list(...)) > 0) stop(paste("rmhmodel.default: syntax should be", "rmhmodel(cif, par, w, trend, types)", "with arguments given by name if they are present"), call. = FALSE) ## Validate parameters if(is.null(cif)) stop("cif is missing or NULL") if(is.null(par)) stop("par is missing or NULL") if(!is.null(w)) w <- as.owin(w) if(!is.character(cif)) stop("cif should be a character string") betamultiplier <- 1 Ncif <- length(cif) if(Ncif > 1) { ## hybrid ## check for Poisson components ispois <- (cif == 'poisson') if(any(ispois)) { ## validate Poisson components Npois <- sum(ispois) poismodels <- vector(mode="list", length=Npois) parpois <- par[ispois] for(i in 1:Npois) poismodels[[i]] <- rmhmodel(cif='poisson', par=parpois[[i]], w=w, trend=NULL, types=types, stopinvalid=FALSE) ## consolidate Poisson intensity parameters poisbetalist <- lapply(poismodels, getElement, name="C.beta") poisbeta <- Reduce("*", poisbetalist) if(all(ispois)) { ## model collapses to a Poisson process cif <- 'poisson' Ncif <- 1 par <- list(beta=poisbeta) betamultiplier <- 1 } else { ## remove Poisson components cif <- cif[!ispois] Ncif <- sum(!ispois) par <- par[!ispois] if(Ncif == 1) # revert to single-cif format par <- par[[1]] ## absorb beta parameters betamultiplier <- poisbeta } } } if(Ncif > 1) { ## genuine hybrid models <- vector(mode="list", length=Ncif) check <- vector(mode="list", length=Ncif) for(i in 1:Ncif) models[[i]] <- rmhmodel(cif=cif[i], par=par[[i]], w=w, trend=NULL, types=types, stopinvalid=FALSE) C.id <- unlist(lapply(models, getElement, name="C.id")) C.betalist <- lapply(models, getElement, name="C.beta") C.iparlist <- lapply(models, getElement, name="C.ipar") ## absorb beta multiplier into beta parameter of first component C.betalist[[1]] <- C.betalist[[1]] * betamultiplier ## concatenate for use in C C.beta <- unlist(C.betalist) C.ipar <- unlist(C.iparlist) check <- lapply(models, getElement, name="check") maxr <- max(unlist(lapply(models, getElement, name="reach"))) ismulti <- unlist(lapply(models, getElement, name="multitype.interact")) multi <- any(ismulti) ## determine whether model exists integ <- unlist(lapply(models, getElement, name="integrable")) stabi <- unlist(lapply(models, getElement, name="stabilising")) integrable <- all(integ) || any(stabi) stabilising <- any(stabi) ## string explanations of conditions for validity expl <- lapply(models, getElement, name="explainvalid") integ.ex <- unlist(lapply(expl, getElement, name="integrable")) stabi.ex <- unlist(lapply(expl, getElement, name="stabilising")) stabi.oper <- !(stabi.ex %in% c("TRUE", "FALSE")) integ.oper <- !(integ.ex %in% c("TRUE", "FALSE")) compnames <- if(!anyDuplicated(C.id)) paste("cif", sQuote(C.id)) else paste("component", 1:Ncif, paren(sQuote(C.id))) if(!integrable && stopinvalid) { ## model is not integrable: explain why ifail <- !integ & integ.oper ireason <- paste(compnames[ifail], "should satisfy", paren(integ.ex[ifail], "{")) ireason <- verbalogic(ireason, "and") if(sum(ifail) <= 1) { ## There's only one offending cif, so stability is redundant sreason <- "FALSE" } else { sfail <- !stabi & stabi.oper sreason <- paste(compnames[sfail], "should satisfy", paren(stabi.ex[sfail], "{")) sreason <- verbalogic(sreason, "or") } reason <- verbalogic(c(ireason, sreason), "or") stop(paste("rmhmodel: hybrid model is not integrable; ", reason), call.=FALSE) } else { ## construct strings summarising conditions for validity if(!any(integ.oper)) ireason <- as.character(integrable) else { ireason <- paste(compnames[integ.oper], "should satisfy", paren(integ.ex[integ.oper], "{")) ireason <- verbalogic(ireason, "and") } if(!any(stabi.oper)) sreason <- as.character(stabilising) else { sreason <- paste(compnames[stabi.oper], "should satisfy", paren(stabi.ex[stabi.oper], "{")) sreason <- verbalogic(sreason, "or") } ireason <- verbalogic(c(ireason, sreason), "or") explainvalid <- list(integrable=ireason, stabilising=sreason) } out <- list(cif=cif, par=par, w=w, trend=trend, types=types, C.id=C.id, C.beta=C.beta, C.ipar=C.ipar, C.betalist=C.betalist, C.iparlist=C.iparlist, check=check, multitype.interact=multi, integrable=integrable, stabilising=stabilising, explainvalid=explainvalid, reach=maxr) class(out) <- c("rmhmodel", class(out)) return(out) } ## non-hybrid ## Check that this is a recognised model ## and look up the rules for this model rules <- spatstatRmhInfo(cif) ## Map the name of the cif from R to C ## (the names are normally identical in R and C, ## except "poisson" -> NA) C.id <- rules$C.id ## Check that the C name is recognised in C if(!is.na(C.id)) { z <- .C("knownCif", cifname=as.character(C.id), answer=as.integer(0), PACKAGE = "spatstat") ok <- as.logical(z$answer) if(!ok) stop(paste("Internal error: the cif", sQuote(C.id), "is not recognised in the C code")) } ## Validate the model parameters and reformat them check <- rules$parhandler checkedpar <- if(!rules$multitype) check(par) else if(!is.null(types)) check(par, types) else ## types vector not given - defer checking NULL if(!is.null(checkedpar)) { stopifnot(is.list(checkedpar)) stopifnot(!is.null(names(checkedpar)) && all(nzchar(names(checkedpar)))) stopifnot(names(checkedpar)[[1]] == "beta") C.beta <- unlist(checkedpar[[1]]) C.beta <- C.beta * betamultiplier C.ipar <- as.numeric(unlist(checkedpar[-1])) } else { C.beta <- C.ipar <- NULL } ## Determine whether model is integrable integrable <- rules$validity(par, "integrable") explainvalid <- rules$explainvalid if(!integrable && stopinvalid) stop(paste("rmhmodel: the model is not integrable; it should satisfy", explainvalid$integrable), call.=FALSE) ## Determine whether cif is stabilising ## (i.e. any hybrid including this cif will be integrable) stabilising <- rules$validity(par, "stabilising") ## Calculate reach of model mreach <- rules$reach(par) ################################################################### ## return augmented list out <- list(cif=cif, par=par, w=w, trend=trend, types=types, C.id=C.id, C.beta=C.beta, C.ipar=C.ipar, check= if(is.null(C.ipar)) check else NULL, multitype.interact=rules$multitype, integrable=integrable, stabilising=stabilising, explainvalid=explainvalid, reach=mreach ) class(out) <- c("rmhmodel", class(out)) return(out) } rmhmodel.default }) print.rmhmodel <- function(x, ...) { verifyclass(x, "rmhmodel") splat("Metropolis-Hastings algorithm, model parameters\n") Ncif <- length(x$cif) splat("Conditional intensity:", if(Ncif == 1) "cif=" else "hybrid of cifs", commasep(sQuote(x$cif))) if(!is.null(x$types)) { if(length(x$types) == 1) splat("Univariate process.") else { cat("Multitype process with types =\n") print(x$types) if(!x$multitype.interact) splat("Interaction does not depend on type") } } else if(x$multitype.interact) { splat("Multitype process, types not yet specified.") } else { typ <- try(rmhResolveTypes(x, rmhstart(), rmhcontrol())) if(!inherits(typ, "try-error")) { ntyp <- length(typ) if(ntyp > 1) { splat("Data imply a multitype process with", ntyp, "types of points.") splat("Interaction does not depend on type.") } } } cat("\nNumerical parameters: par =\n") print(x$par) if(is.null(x$C.ipar)) splat("Parameters have not yet been checked for compatibility with types.") if(is.owin(x$w)) print(x$w) else splat("Window: not specified.") cat("\nTrend: ") tren <- x$trend if(is.null(tren)) { cat("none.\n") } else { if(is.list(tren)) cat(paste0("List of ", length(tren), ":\n")) print(tren) } if(!is.null(x$integrable) && !x$integrable) cat("\n*Warning: model is not integrable and cannot be simulated*\n") return(invisible(NULL)) } reach.rmhmodel <- function(x, ...) { if(length(list(...)) == 0) return(x$reach) # reach must be recomputed cif <- x$cif Ncif <- length(cif) pars <- if(Ncif == 1) list(x$par) else x$par maxr <- 0 for(i in seq_len(Ncif)) { cif.i <- cif[i] par.i <- pars[[i]] rules <- spatstatRmhInfo(cif.i) rchfun <- rules$reach if(!is.function(rchfun)) stop(paste("Internal error: reach is unknown for cif=", sQuote(cif.i)), call.=FALSE) r.i <- rchfun(par.i, ...) maxr <- max(maxr, r.i, na.rm=TRUE) } return(maxr) } is.poisson.rmhmodel <- function(x) { verifyclass(x, "rmhmodel") identical(x$cif, 'poisson') } is.stationary.rmhmodel <- function(x) { verifyclass(x, "rmhmodel") tren <- x$trend return(is.null(tren) || is.numeric(tren)) } as.owin.rmhmodel <- function(W, ..., fatal=FALSE) { # W is the rmhmodel object. It contains a window w ans <- W$w if(is.owin(ans)) return(ans) if(fatal) stop("rmhmodel object does not contain a window") return(NULL) } domain.rmhmodel <- Window.rmhmodel <- function(X, ...) { as.owin(X) } is.expandable.rmhmodel <- local({ ok <- function(z) { is.null(z) || is.numeric(z) || is.function(z) } is.expandable.rmhmodel <- function(x) { tren <- x$tren ans <- if(!is.list(tren)) ok(tren) else all(sapply(tren, ok)) return(ans) } is.expandable.rmhmodel }) ##### Table of rules for handling rmh models ################## spatstatRmhInfo <- function(cifname) { rules <- .Spatstat.RmhTable[[cifname]] if(is.null(rules)) stop(paste("Unrecognised cif:", sQuote(cifname)), call.=FALSE) return(rules) } .Spatstat.RmhTable <- list( # # 0. Poisson (special case) # 'poisson'= list( C.id=NA, multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the Poisson process" with(par, forbidNA(beta, ctxt)) par <- check.named.list(par, "beta", ctxt) with(par, explain.ifnot(all(beta >= 0), ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE",stabilising="FALSE"), reach = function(par, ...) { return(0) }, hardcore = function(par, ...) { return(0) }, temper = function(par, invtemp) { return(par^invtemp) } ), # # 1. Strauss. # 'strauss'= list( C.id="strauss", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the strauss cif" par <- check.named.list(par, c("beta","gamma","r"), ctxt) # treat r=NA as absence of interaction par <- within(par, if(is.na(r)) { r <- 0; gamma <- 1 }) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.1.real(gamma, ctxt)) with(par, check.1.real(r, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(gamma >= 0, ctxt)) with(par, explain.ifnot(r >= 0, ctxt)) return(par) }, validity=function(par, kind) { gamma <- par$gamma switch(kind, integrable=(gamma <= 1), stabilising=(gamma == 0) ) }, explainvalid=list( integrable="gamma <= 1", stabilising="gamma == 0"), reach = function(par, ...) { r <- par[["r"]] g <- par[["gamma"]] return(if(g == 1) 0 else r) }, hardcore = function(par, ..., epsilon=0) { r <- par[["r"]] g <- par[["gamma"]] return(if(g <= epsilon) r else 0) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 2. Strauss with hardcore. # 'straush' = list( C.id="straush", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the straush cif" par <- check.named.list(par, c("beta","gamma","r","hc"), ctxt) # treat hc=NA as absence of hard core par <- within(par, if(is.na(hc)) { hc <- 0 } ) # treat r=NA as absence of interaction par <- within(par, if(is.na(r)) { r <- hc; gamma <- 1 } ) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.finite(hc, ctxt)) with(par, check.1.real(gamma, ctxt)) with(par, check.1.real(r, ctxt)) with(par, check.1.real(hc, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(gamma >= 0, ctxt)) with(par, explain.ifnot(r >= 0, ctxt)) with(par, explain.ifnot(hc >= 0, ctxt)) with(par, explain.ifnot(hc <= r, ctxt)) return(par) }, validity=function(par, kind) { hc <- par$hc gamma <- par$gamma switch(kind, integrable=(hc > 0 || gamma <= 1), stabilising=(hc > 0) ) }, explainvalid=list( integrable="hc > 0 or gamma <= 1", stabilising="hc > 0"), reach = function(par, ...) { h <- par[["hc"]] r <- par[["r"]] g <- par[["gamma"]] return(if(g == 1) h else r) }, hardcore = function(par, ..., epsilon=0) { h <- par[["hc"]] r <- par[["r"]] g <- par[["gamma"]] return(if(g <= epsilon) r else h) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 3. Softcore. # 'sftcr' = list( C.id="sftcr", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the sftcr cif" par <- check.named.list(par, c("beta","sigma","kappa"), ctxt) with(par, check.finite(beta, ctxt)) with(par, check.finite(sigma, ctxt)) with(par, check.finite(kappa, ctxt)) with(par, check.1.real(sigma, ctxt)) with(par, check.1.real(kappa, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(sigma >= 0, ctxt)) with(par, explain.ifnot(kappa >= 0 && kappa <= 1, ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE",stabilising="FALSE"), reach = function(par, ..., epsilon=0) { if(epsilon==0) return(Inf) kappa <- par[["kappa"]] sigma <- par[["sigma"]] return(sigma/(epsilon^(kappa/2))) }, hardcore = function(par, ..., epsilon=0) { if(epsilon==0) return(0) kappa <- par[["kappa"]] sigma <- par[["sigma"]] return(sigma/((-log(epsilon))^(kappa/2))) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp sigma <- sigma * (invtemp^(kappa/2)) }) } ), # # 4. Multitype Strauss. # 'straussm' = list( C.id="straussm", multitype=TRUE, parhandler=function(par, types) { ctxt <- "For the straussm cif" par <- check.named.list(par, c("beta","gamma","radii"), ctxt) beta <- par$beta gamma <- par$gamma r <- par$radii ntypes <- length(types) check.finite(beta, ctxt) check.nvector(beta, ntypes, TRUE, "types") MultiPair.checkmatrix(gamma, ntypes, "par$gamma") gamma[is.na(gamma)] <- 1 check.finite(gamma, ctxt) MultiPair.checkmatrix(r, ntypes, "par$radii") if(any(nar <- is.na(r))) { r[nar] <- 0 gamma[nar] <- 1 } check.finite(r, ctxt) explain.ifnot(all(beta >= 0), ctxt) explain.ifnot(all(gamma >= 0), ctxt) explain.ifnot(all(r >= 0), ctxt) par <- list(beta=beta, gamma=gamma, r=r) return(par) }, validity=function(par, kind) { gamma <- par$gamma radii <- par$radii dg <- diag(gamma) dr <- diag(radii) hard <-!is.na(dg) & (dg == 0) & !is.na(dr) & (dr > 0) operative <- !is.na(gamma) & !is.na(radii) & (radii > 0) switch(kind, stabilising=all(hard), integrable=all(hard) || all(gamma[operative] <= 1)) }, explainvalid=list( integrable=paste( "gamma[i,j] <= 1 for all i and j,", "or gamma[i,i] = 0 for all i"), stabilising="gamma[i,i] = 0 for all i"), reach = function(par, ...) { r <- par$radii g <- par$gamma operative <- ! (is.na(r) | (g == 1)) return(max(0, r[operative])) }, hardcore = function(par, ..., epsilon=0) { r <- par$radii g <- par$gamma return(max(0, r[!is.na(r) & g <= epsilon])) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 5. Multitype Strauss with hardcore. # 'straushm' = list( C.id="straushm", multitype=TRUE, parhandler=function(par, types) { ctxt="For the straushm cif" par <- check.named.list(par, c("beta","gamma","iradii","hradii"), ctxt) beta <- par$beta gamma <- par$gamma iradii <- par$iradii hradii <- par$hradii ntypes <- length(types) check.nvector(beta, ntypes, TRUE, "types") check.finite(beta, ctxt) MultiPair.checkmatrix(gamma, ntypes, "par$gamma") gamma[is.na(gamma)] <- 1 check.finite(gamma, ctxt) MultiPair.checkmatrix(iradii, ntypes, "par$iradii") if(any(nar <- is.na(iradii))) { iradii[nar] <- 0 gamma[nar] <- 1 } check.finite(iradii, ctxt) MultiPair.checkmatrix(hradii, ntypes, "par$hradii") nah <- is.na(hradii) hradii[nah] <- 0 check.finite(hradii, ctxt) explain.ifnot(all(beta >= 0), ctxt) explain.ifnot(all(gamma >= 0), ctxt) explain.ifnot(all(iradii >= 0), ctxt) explain.ifnot(all(hradii >= 0), ctxt) comparable <- !nar & !nah explain.ifnot(all((iradii >= hradii)[comparable]), ctxt) par <- list(beta=beta,gamma=gamma,iradii=iradii,hradii=hradii) return(par) }, validity=function(par, kind) { gamma <- par$gamma iradii <- par$iradii hradii <- par$hradii dh <- diag(hradii) dg <- diag(gamma) dr <- diag(iradii) hhard <- !is.na(dh) & (dh > 0) ihard <- !is.na(dr) & (dr > 0) & !is.na(dg) & (dg == 0) hard <- hhard | ihard operative <- !is.na(gamma) & !is.na(iradii) & (iradii > 0) switch(kind, stabilising=all(hard), integrable={ all(hard) || all(gamma[operative] <= 1) }) }, explainvalid=list( integrable=paste( "hradii[i,i] > 0 or gamma[i,i] = 0 for all i, or", "gamma[i,j] <= 1 for all i and j"), stabilising="hradii[i,i] > 0 or gamma[i,i] = 0 for all i"), reach=function(par, ...) { r <- par$iradii h <- par$hradii g <- par$gamma roperative <- ! (is.na(r) | (g == 1)) hoperative <- ! is.na(h) return(max(0, r[roperative], h[hoperative])) }, hardcore = function(par, ..., epsilon=0) { r <- par$radii h <- par$hradii g <- par$gamma return(max(h[!is.na(h)], r[!is.na(r) & g <= epsilon])) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 6. Diggle-Gates-Stibbard interaction # (function number 1 from Diggle, Gates, and Stibbard) 'dgs' = list( C.id="dgs", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the dgs cif" par <- check.named.list(par, c("beta","rho"), ctxt) with(par, check.finite(beta, ctxt)) with(par, check.finite(rho, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, check.1.real(rho, ctxt)) with(par, explain.ifnot(rho >= 0, ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE", stabilising="FALSE"), reach=function(par, ...) { return(par[["rho"]]) }, hardcore=function(par, ..., epsilon=0) { if(epsilon == 0) return(0) return(par[["rho"]] * (2/pi) * asin(sqrt(epsilon))) }, temper = NULL # not a loglinear model ), # # 7. Diggle-Gratton interaction # (function number 2 from Diggle, Gates, and Stibbard). 'diggra' = list( C.id="diggra", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the diggra cif" par <- check.named.list(par, c("beta","kappa","delta","rho"), ctxt) with(par, check.finite(beta, ctxt)) with(par, check.finite(kappa, ctxt)) with(par, check.finite(delta, ctxt)) with(par, check.finite(rho, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, check.1.real(kappa, ctxt)) with(par, check.1.real(delta, ctxt)) with(par, check.1.real(rho, ctxt)) with(par, explain.ifnot(kappa >= 0, ctxt)) with(par, explain.ifnot(delta >= 0, ctxt)) with(par, explain.ifnot(rho >= 0, ctxt)) with(par, explain.ifnot(delta < rho, ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE",stabilising="FALSE"), reach=function(par, ...) { return(par[["rho"]]) }, hardcore=function(par, ..., epsilon=0) { return(par[["delta"]]) }, temper = function(par, invtemp) { within(par, { kappa <- kappa * invtemp }) }), # # 8. Geyer saturation model # 'geyer' = list( C.id="geyer", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the geyer cif" par <- check.named.list(par, c("beta","gamma","r","sat"), ctxt) with(par, check.1.real(gamma, ctxt)) with(par, check.1.real(r, ctxt)) with(par, check.1.real(sat, ctxt)) par <- within(par, sat <- min(sat, .Machine$integer.max-100)) par <- within(par, if(is.na(gamma)) { r <- 0; gamma <- 1 }) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.finite(sat, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE", stabilising="FALSE"), reach = function(par, ...) { r <- par[["r"]] g <- par[["gamma"]] return(if(g == 1) 0 else 2 * r) }, hardcore = function(par, ..., epsilon=0) { r <- par[["r"]] g <- par[["gamma"]] return(if(g <= epsilon) r else 0) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 9. The ``lookup'' device. This permits simulating, at least # approximately, ANY pairwise interaction function model # with isotropic pair interaction (i.e. depending only on distance). # The pair interaction function is provided as a vector of # distances and corresponding function values which are used # as a ``lookup table'' by the C code. # 'lookup' = list( C.id="lookup", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the lookup cif" par <- check.named.list(par, c("beta","h"), ctxt, "r") with(par, check.finite(beta, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) beta <- par[["beta"]] h.init <- par[["h"]] r <- par[["r"]] if(is.null(r)) { if(!is.stepfun(h.init)) stop(paste("For cif=lookup, if component r of", "par is absent then component h must", "be a stepfun object.")) if(!is.cadlag(h.init)) stop(paste("The lookup pairwise interaction step", "function must be right continuous,\n", "i.e. built using the default values of the", sQuote("f"), "and", sQuote("right"), "arguments for stepfun.")) r <- knots(h.init) h0 <- get("yleft",envir=environment(h.init)) h <- h.init(r) nlook <- length(r) if(!isTRUE(all.equal(h[nlook],1))) stop(paste("The lookup interaction step function", "must be equal to 1 for", dQuote("large"), "distances.")) if(r[1] <= 0) stop(paste("The first jump point (knot) of the lookup", "interaction step function must be", "strictly positive.")) h <- c(h0,h) } else { h <- h.init nlook <- length(r) if(length(h) != nlook) stop("Mismatch of lengths of h and r lookup vectors.") if(anyNA(r)) stop("Missing values not allowed in r lookup vector.") if(is.unsorted(r)) stop("The r lookup vector must be in increasing order.") if(r[1] <= 0) stop(paste("The first entry of the lookup vector r", "should be strictly positive.")) h <- c(h,1) } if(any(h < 0)) stop(paste("Negative values in the lookup", "pairwise interaction function.")) if(h[1] > 0 & any(h > 1)) stop(paste("Lookup pairwise interaction function does", "not define a valid point process.")) rmax <- r[nlook] r <- c(0,r) nlook <- nlook+1 deltar <- mean(diff(r)) if(isTRUE(all.equal(diff(r),rep.int(deltar,nlook-1)))) { par <- list(beta=beta,nlook=nlook, equisp=1, deltar=deltar,rmax=rmax, h=h) } else { par <- list(beta=beta,nlook=nlook, equisp=0, deltar=deltar,rmax=rmax, h=h, r=r) } return(par) }, validity=function(par, kind) { h <- par$h if(is.stepfun(h)) h <- eval(expression(c(yleft,y)),envir=environment(h)) switch(kind, integrable={ (h[1] == 0) || all(h <= 1) }, stabilising={ h[1] == 0 }) }, explainvalid=list( integrable="h[1] == 0 or h[i] <= 1 for all i", stabilising="h[1] == 0"), reach = function(par, ...) { r <- par[["r"]] h <- par[["h"]] if(is.null(r)) r <- knots(h) return(max(r)) }, hardcore = function(par, ..., epsilon=0) { r <- par[["r"]] h <- par[["h"]] if(is.null(r)) r <- knots(h) return(max(0, r[h <= epsilon])) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp h <- h^invtemp }) } ), # # 10. Area interaction # 'areaint'= list( C.id="areaint", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the areaint cif" par <- check.named.list(par, c("beta","eta","r"), ctxt) par <- within(par, if(is.na(r)) { r <- 0 }) with(par, check.finite(beta, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, check.1.real(eta, ctxt)) with(par, check.1.real(r, ctxt)) with(par, check.finite(eta, ctxt)) with(par, check.finite(r, ctxt)) with(par, explain.ifnot(eta >= 0, ctxt)) with(par, explain.ifnot(r >= 0, ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE", stabilising="FALSE"), reach = function(par, ...) { r <- par[["r"]] eta <- par[["eta"]] return(if(eta == 1) 0 else (2 * r)) }, hardcore = function(par, ..., epsilon=0) { r <- par[["r"]] eta <- par[["eta"]] if(eta > epsilon) return(0) if(eta == 0) return(2 * r) # linear approximation return(2 * r * eta/epsilon) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp eta <- eta^invtemp }) } ), # # 11. The ``badgey'' (Baddeley-Geyer) model. # 'badgey' = list( C.id="badgey", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the badgey cif" par <- check.named.list(par, c("beta","gamma","r","sat"), ctxt) par <- within(par, sat <- pmin(sat, .Machine$integer.max-100)) par <- within(par, gamma[is.na(gamma) | is.na(r)] <- 1) par <- within(par, r[is.na(r)] <- 0) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.finite(sat, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(all(gamma >= 0), ctxt)) with(par, explain.ifnot(all(r >= 0), ctxt)) with(par, explain.ifnot(all(sat >= 0), ctxt)) with(par, explain.ifnot(length(gamma) == length(r), ctxt)) gamma <- par[["gamma"]] r <- par[["r"]] sat <- par[["sat"]] if(length(sat)==1) sat <- rep.int(sat,length(gamma)) else explain.ifnot(length(sat) == length(gamma), ctxt) mmm <- cbind(gamma,r,sat) mmm <- mmm[fave.order(r),] ndisc <- length(r) par <- list(beta=par$beta,ndisc=ndisc,parms=as.vector(t(mmm))) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE", stabilising="FALSE"), reach = function(par, ...) { r <- par[["r"]] gamma <- par[["gamma"]] operative <- (gamma != 1) return(if(!any(operative)) 0 else (2 * max(r[operative]))) }, hardcore = function(par, ..., epsilon=0) { r <- par[["r"]] gamma <- par[["gamma"]] return(max(0, r[gamma <= epsilon])) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 12. The hard core process 'hardcore' = list( C.id="hardcore", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the hardcore cif" par <- check.named.list(par, c("beta", "hc"), ctxt) par <- within(par, if(is.na(hc)) { hc <- 0 }) with(par, check.finite(beta, ctxt)) with(par, check.finite(hc, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, check.1.real(hc, ctxt)) return(par) }, validity=function(par, kind) { hc <- par$hc switch(kind, integrable=TRUE, stabilising=(hc > 0)) }, explainvalid=list(integrable="TRUE", stabilising="hc > 0"), reach = function(par, ...) { hc <- par[["hc"]] return(hc) }, hardcore = function(par, ...) { hc <- par[["hc"]] return(hc) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp }) } ), # # Lucky 13. Fiksel process 'fiksel' = list( C.id="fiksel", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the Fiksel cif" par <- check.named.list(par, c("beta", "r", "hc", "kappa", "a"), ctxt) with(par, check.finite(beta, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.finite(hc, ctxt)) with(par, check.finite(kappa, ctxt)) with(par, check.finite(a, ctxt)) with(par, check.1.real(r, ctxt)) with(par, check.1.real(hc, ctxt)) with(par, check.1.real(kappa, ctxt)) with(par, check.1.real(a, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(hc >= 0, ctxt)) with(par, explain.ifnot(r > hc, ctxt)) return(par) }, validity=function(par, kind) { hc <- par$hc a <- par$a switch(kind, integrable=(hc > 0 || a <= 0), stabilising=(hc > 0)) }, explainvalid=list( integrable="hc > 0 or a <= 0", stabilising="hc > 0"), reach = function(par, ...) { r <- par[["r"]] hc <- par[["hc"]] a <- par[["a"]] return(if(a != 0) r else hc) }, hardcore = function(par, ...) { return(par[["hc"]]) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp a <- a * invtemp }) } ), # # 14. Lennard-Jones 'lennard' = list( C.id="lennard", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the Lennard-Jones cif" par <- check.named.list(par, c("beta", "sigma", "epsilon"), ctxt) with(par, check.finite(beta, ctxt)) with(par, check.finite(sigma, ctxt)) with(par, check.finite(epsilon, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, check.1.real(sigma, ctxt)) with(par, check.1.real(epsilon, ctxt)) with(par, explain.ifnot(sigma > 0, ctxt)) with(par, explain.ifnot(epsilon > 0, ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=(par$sigma > 0), stabilising=FALSE) }, explainvalid=list( integrable="sigma > 0", stabilising="FALSE"), reach = function(par, ...) { sigma <- par[["sigma"]] return(2.5 * sigma) }, hardcore = function(par, ...) { sigma <- par[["sigma"]] return(sigma/2.5) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp epsilon <- epsilon * invtemp }) } ), # # 15. Multitype hardcore. # 'multihard' = list( C.id="multihard", multitype=TRUE, parhandler=function(par, types) { ctxt="For the multihard cif" par <- check.named.list(par, c("beta","hradii"), ctxt) beta <- par$beta hradii <- par$hradii ntypes <- length(types) check.nvector(beta, ntypes, TRUE, "types") check.finite(beta, ctxt) MultiPair.checkmatrix(hradii, ntypes, "par$hradii") hradii[is.na(hradii)] <- 0 check.finite(hradii, ctxt) explain.ifnot(all(beta >= 0), ctxt) explain.ifnot(all(hradii >= 0), ctxt) par <- list(beta=beta,hradii=hradii) return(par) }, validity=function(par, kind) { switch(kind, integrable=return(TRUE), stabilising={ hself <- diag(par$hradii) repel <- !is.na(hself) & (hself > 0) return(all(repel)) }) }, explainvalid=list( integrable="TRUE", stabilising="hradii[i,i] > 0 for all i"), reach=function(par, ...) { return(max(0, par$hradii, na.rm=TRUE)) }, hardcore=function(par, ..., epsilon=0) { return(max(0, par$hradii, na.rm=TRUE)) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp }) } ), # # 16. Triplets. # 'triplets'= list( C.id="triplets", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the triplets cif" par <- check.named.list(par, c("beta","gamma","r"), ctxt) # treat r=NA as absence of interaction par <- within(par, if(is.na(r)) { r <- 0; gamma <- 1 }) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.1.real(gamma, ctxt)) with(par, check.1.real(r, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(gamma >= 0, ctxt)) with(par, explain.ifnot(r >= 0, ctxt)) return(par) }, validity=function(par, kind) { gamma <- par$gamma switch(kind, integrable=(gamma <= 1), stabilising=(gamma == 0) ) }, explainvalid=list( integrable="gamma <= 1", stabilising="gamma == 0"), reach = function(par, ...) { r <- par[["r"]] g <- par[["gamma"]] return(if(g == 1) 0 else r) }, hardcore = function(par, ...) { return(0) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ), # # 17. Penttinen. # 'penttinen'= list( C.id="penttinen", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the penttinen cif" par <- check.named.list(par, c("beta", "gamma", "r"), ctxt) # treat r=NA as absence of interaction par <- within(par, if(is.na(r)) { r <- 0; gamma <- 1 }) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.1.real(gamma, ctxt)) with(par, check.1.real(r, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(gamma >= 0, ctxt)) with(par, explain.ifnot(r > 0, ctxt)) return(par) }, validity=function(par, kind) { gamma <- par$gamma switch(kind, integrable=(gamma <= 1), stabilising=(gamma == 0) ) }, explainvalid=list( integrable="gamma <= 1", stabilising="gamma == 0"), reach = function(par, ...) { r <- par[["r"]] g <- par[["gamma"]] return(if(g == 1) 0 else (2 * r)) }, hardcore = function(par, ..., epsilon=0) { r <- par[["r"]] g <- par[["gamma"]] return(if(g <= epsilon) (2 * r) else 0) }, temper = function(par, invtemp) { within(par, { beta <- beta^invtemp gamma <- gamma^invtemp }) } ) # end of list '.Spatstat.RmhTable' ) spatstat/R/eval.fasp.R0000644000176200001440000000560513333543255014334 0ustar liggesusers# # eval.fasp.R # # # eval.fasp() Evaluate expressions involving fasp objects # # compatible.fasp() Check whether two fasp objects are compatible # # $Revision: 1.11 $ $Date: 2016/02/11 10:17:12 $ # eval.fasp <- local({ eval.fasp <- function(expr, envir, dotonly=TRUE) { #' convert syntactic expression to 'expression' object e <- as.expression(substitute(expr)) #' convert syntactic expression to call ## elang <- substitute(expr) #' find names of all variables in the expression varnames <- all.vars(e) if(length(varnames) == 0) stop("No variables in this expression") ## get the actual variables if(missing(envir)) { envir <- sys.parent() } else if(is.list(envir)) { envir <- list2env(envir, parent=parent.frame()) } vars <- lapply(as.list(varnames), get, envir=envir) names(vars) <- varnames ## find out which ones are fasp objects isfasp <- unlist(lapply(vars, inherits, what="fasp")) if(!any(isfasp)) stop("No fasp objects in this expression") fasps <- vars[isfasp] nfasps <- length(fasps) ## test whether the fasp objects are compatible if(nfasps > 1L && !(do.call(compatible, unname(fasps)))) stop(paste(if(nfasps > 2) "some of" else NULL, "the objects", commasep(sQuote(names(fasps))), "are not compatible")) ## copy first object as template result <- fasps[[1L]] which <- result$which nr <- nrow(which) nc <- ncol(which) ## create environment for evaluation fenv <- new.env() ## for each [i,j] extract fv objects and evaluate expression for(i in seq_len(nr)) for(j in seq_len(nc)) { ## extract fv objects at position [i,j] funs <- lapply(fasps, getpanel, i=i, j=j) ## insert into list of argument values vars[isfasp] <- funs ## assign them into the right environment for(k in seq_along(vars)) assign(varnames[k], vars[[k]], envir=fenv) ## evaluate resultij <- eval(substitute(eval.fv(ee,ff,dd), list(ee=e, ff=fenv, dd=dotonly))) ## insert back into fasp result$fns[[which[i,j] ]] <- resultij } result$title <- paste("Result of eval.fasp(", e, ")", sep="") return(result) } getpanel <- function(x, i, j) { as.fv(x[i,j]) } eval.fasp }) compatible.fasp <- function(A, B, ...) { verifyclass(A, "fasp") if(missing(B)) return(TRUE) verifyclass(B, "fasp") dimA <- dim(A$which) dimB <- dim(B$which) if(!all(dimA == dimB)) return(FALSE) for(i in seq_len(dimA[1L])) for(j in seq_len(dimA[2L])) { Aij <- as.fv(A[i,j]) Bij <- as.fv(B[i,j]) if(!compatible.fv(Aij, Bij)) return(FALSE) } # A and B agree if(length(list(...)) == 0) return(TRUE) # recursion return(compatible.fasp(B, ...)) } spatstat/R/psst.R0000644000176200001440000001403313333543255013441 0ustar liggesusers# # psst.R # # Computes the GNZ contrast of delta-f for any function f # # $Revision: 1.9 $ $Date: 2015/07/11 08:19:26 $ # ################################################################################ # psst <- function(object, fun, r=NULL, breaks=NULL, ..., model=NULL, trend=~1, interaction=Poisson(), rbord=reach(interaction), truecoef=NULL, hi.res=NULL, funargs=list(correction="best"), verbose=TRUE) { if(is.ppm(object)) { fit <- object } else if(is.ppp(object) || is.quad(object)) { if(is.ppp(object)) object <- quadscheme(object, ...) if(!is.null(model)) { fit <- update(model, Q=object, forcefit=TRUE) } else { fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, forcefit=TRUE) } } else stop("object should be a fitted point process model or a point pattern") # rfixed <- !is.null(r) || !is.null(breaks) # Extract data and quadrature points Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy # E <- equalsfun.quad(Q) # WQ <- w.quad(Q) # quadrature weights # integrals will be restricted to quadrature points # that were actually used in the fit # USED <- getglmsubset(fit) if(fit$correction == "border") { rbord <- fit$rbord b <- bdist.points(U) USED <- (b > rbord) } else USED <- rep.int(TRUE, U$n) # basic statistics Win <- Window(X) npts <- npoints(X) areaW <- area(Win) lambda <- npts/areaW # adjustments to account for restricted domain of pseudolikelihood # if(any(!USED) && spatstat.options("eroded.intensity")) { # XUSED <- USED[Z] # npts.used <- sum(Z & USED) # area.used <- sum(WQ[USED]) # lambda.used <- npts.used/area.used # } else { # XUSED <- rep.int(TRUE, npts) # npts.used <- npts # area.used <- areaW # lambda.used <- lambda # } # determine breakpoints for r values rmaxdefault <- rmax.rule("G", Win, lambda) breaks <- handle.r.b.args(r, breaks, Win, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max # residuals resid <- residuals(fit, type="raw",drop=FALSE, new.coef=truecoef, quad=hi.res) rescts <- with(resid, "continuous") # absolute weight for continuous integrals wc <- -rescts # initialise fv object df <- data.frame(r=rvals, theo=0) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~Delta~S(r), NULL), "theo", . ~ r, alim=c(0, rmax), c("r","%s[theo](r)"), desc, fname="bold(R)~Delta~S") # evaluate fun(X) for data fX <- do.call(fun, append(list(X, r=rvals), funargs)) fXunits <- unitname(fX) # Extract 'best' estimate only fX <- with(fX, .y) zero <- numeric(length(fX)) # sum over all quadrature points iused <- seq(U$n)[USED] nused <- length(iused) if(verbose) cat(paste("\nProcessing", nused, "quadrature points...")) # running sums & integrals sumX <- zero integ <- integ2 <- zero # template for X \cup {u} uX <- superimpose(U[1], X, W=Win, check=FALSE) Ux <- U$x Uy <- U$y # if(verbose) pstate <- list() # for(j in seq(nused)) { i <- iused[j] wi <- wc[i] if(Z[i]) { # data point fXi <- do.call(fun, append(list(X[-i], r=rvals), funargs)) fXi <- with(fXi, .y) deltaf <- fX - fXi sumX <- sumX + deltaf } else { # dummy point uX$x[1] <- Ux[i] uX$y[1] <- Uy[i] fuX <- do.call(fun, append(list(uX, r=rvals), funargs)) fuX <- with(fuX, .y) deltaf <- fuX - fX } integ <- integ + wi * deltaf integ2 <- integ2 + wi * deltaf^2 # if(j %% 500 == 0) { cat("[garbage ") gc() cat("collected]") } if(verbose) pstate <- progressreport(j, nused, state=pstate) } sdv <- sqrt(integ2) res <- sumX - integ ans <- bind.fv(ans, data.frame(dat=sumX, com=integ, var=integ2, sd=sdv, hi=2*sdv, lo=-2*sdv, res=res, stdres=res/sdv), c("Sigma~Delta~S(r)", "bold(C)~Delta~S(r)", "bold(C)^2~Delta~S(r)", "sqrt(bold(C)^2~Delta~S(r))", "%s[hi](r)", "%s[lo](r)", "bold(R)~Delta~S(r)", "bold(T)~Delta~S(r)"), c("data pseudosum (contribution to %s)", "model compensator (contribution to %s)", "pseudovariance of %s", "sqrt(pseudovariance) of %s", "upper 2 sigma critical band for %s", "lower 2 sigma critical band for %s", "pseudoresidual function %s", "standardised pseudoresidual function %s"), "res") fvnames(ans,".") <- c("res", "hi", "lo", "theo") unitname(ans) <- fXunits # return(ans) } npfun <- function(X, ..., r) { npts <- npoints(X) # initialise fv object df <- data.frame(r=r, theo=0, npoint=npts) desc <- c("distance argument r", "value 0", "value equal to number of points") ans <- fv(df, "r", substitute(npoints(r), NULL), "npoint", . ~ r, alim=c(0, max(r)), c("r","%s[theo](r)", "%s[obs](r)"), desc, fname="npoints") unitname(ans) <- unitname(X) return(ans) } nndcumfun <- function(X, ..., r) { nn <- nndist(X) bk <- breakpts.from.r(r) # nn <- nn[nn <= bdist.points(X)] h <- whist(nn, bk$val) # initialise fv object df <- data.frame(r=r, theo=0, obs=h) desc <- c("distance argument r", "value 0", "observed count") ans <- fv(df, "r", substitute(nndcount(r), NULL), "obs", . ~ r, alim=c(0, max(r)), c("r","%s[theo](r)", "%s[obs](r)"), desc, fname="nndcount") unitname(ans) <- unitname(X) return(ans) } spatstat/R/connected.R0000644000176200001440000001321013353334550014404 0ustar liggesusers# # connected.R # # connected component transform # # $Revision: 1.20 $ $Date: 2017/11/06 02:01:55 $ # # Interpreted code for pixel images by Julian Burgos # Rewritten in C by Adrian Baddeley # # Code for point patterns by Adrian Baddeley connected <- function(X, ...) { UseMethod("connected") } connected.im <- function(X, ..., background=NA, method="C") { W <- if(!is.na(background)) solutionset(X != background) else if(X$type == "logical") solutionset(X) else as.owin(X) connected.owin(W, method=method, ...) } connected.owin <- function(X, ..., method="C") { method <- pickoption("algorithm choice", method, c(C="C", interpreted="interpreted")) # convert X to binary mask X <- as.mask(X, ...) # Y <- X$m nr <- X$dim[1L] nc <- X$dim[2L] if(method == "C") { ################ COMPILED CODE ######################### # Pad border with FALSE M <- rbind(FALSE, Y, FALSE) M <- cbind(FALSE, M, FALSE) # assign unique label to each foreground pixel L <- M L[M] <- seq_len(sum(M)) L[!M] <- 0 # resolve labels z <- .C("cocoImage", mat=as.integer(t(L)), nr=as.integer(nr), nc=as.integer(nc), PACKAGE = "spatstat") # unpack Z <- matrix(z$mat, nr+2, nc+2, byrow=TRUE) } else { ################ INTERPRETED CODE ######################### # by Julian Burgos # # Pad border with zeros padY <- rbind(0, Y, 0) padY <- cbind(0, padY, 0) # Initialise Z <- matrix(0, nrow(padY), ncol(padY)) currentlab <- 1L todo <- as.vector(t(Y)) equiv <- NULL # ........ main loop .......................... while(any(todo)){ # pick first unresolved pixel one <- which(todo)[1L] onerow <- ceiling(one/nc) onecol <- one -((onerow-1L)*nc) parow=onerow+1L # Equivalent rows & column in padded matrix pacol=onecol+1L #Examine four previously scanned neighbors # (use padded matrix to avoid edge issues) nbrs <- rbind(c(parow-1L,pacol-1L), c(parow-1L,pacol), c(parow, pacol-1L), c(parow-1L,pacol+1L)) px <- sum(padY[nbrs]) if (px==0){ # no neighbours: new component Z[parow,pacol] <- currentlab currentlab <- currentlab+1L todo[one] <- FALSE } else if(px==1L) { # one neighbour: assign existing label labs <- unique(Z[nbrs], na.rm=TRUE) labs <- labs[labs != 0] Z[parow,pacol] <- labs[1L] currentlab <- max(Z)+1L todo[one] <- FALSE } else { # more than one neighbour: possible merger of labels labs <- unique(Z[nbrs], na.rm=TRUE) labs <- labs[labs != 0] labs <- sort(labs) equiv <- rbind(equiv,c(labs,rep.int(0,times=4-length(labs)))) Z[parow,pacol] <- labs[1L] currentlab <- max(Z)+1L todo[one] <- FALSE } } # ........... end of loop ............ # Resolve equivalences ................ if(length(equiv)>1L){ merges <- (equiv[,2L] > 1L) nmerge <- sum(merges) if(nmerge==1L) equiv <- equiv[which(merges), , drop=FALSE] else if(nmerge > 1L) { relevant <- (equiv[,2L] > 0) equiv <- equiv[relevant, , drop=FALSE] equiv <- equiv[fave.order(equiv[,1L]),] } for (i in 1:nrow(equiv)){ current <- equiv[i, 1L] for (j in 2:4){ twin <- equiv[i,j] if (twin>0){ # Change labels matrix Z[which(Z==twin)] <- current # Update equivalence table equiv[which(equiv==twin)] <- current } } } } } ########### COMMON CODE ############################ # Renumber labels sequentially mapped <- (Z != 0) usedlabs <- sortunique(as.vector(Z[mapped])) nlabs <- length(usedlabs) labtable <- cumsum(seq_len(max(usedlabs)) %in% usedlabs) Z[mapped] <- labtable[Z[mapped]] # banish zeroes Z[!mapped] <- NA # strip borders Z <- Z[2:(nrow(Z)-1L),2:(ncol(Z)-1L)] # dress up Z <- im(factor(Z, levels=1:nlabs), xcol=X$xcol, yrow=X$yrow, unitname=unitname(X)) return(Z) } connected.ppp <- connected.pp3 <- function(X, R, ...) { methodname <- if(is.ppp(X)) "connected.ppp" else if(is.pp3(X)) "connected.pp3" else stopifnot(is.ppp(X) || is.pp3(X)) check.1.real(R, paste("In", methodname)) stopifnot(R >= 0) internal <- resolve.1.default("internal", list(...), list(internal=FALSE)) nv <- npoints(X) cl <- closepairs(X, R, what="indices") ie <- cl$i - 1L je <- cl$j - 1L ne <- length(ie) zz <- .C("cocoGraph", nv=as.integer(nv), ne=as.integer(ne), ie=as.integer(ie), je=as.integer(je), label=as.integer(integer(nv)), status=as.integer(integer(1L)), PACKAGE = "spatstat") if(zz$status != 0) stop(paste("Internal error:", methodname, "did not converge"), call.=FALSE) if(internal) return(zz$label) lab <- zz$label + 1L # Renumber labels sequentially lab <- as.integer(factor(lab)) # Convert labels to factor lab <- factor(lab) # Apply to points Y <- X %mark% lab return(Y) } # ................................................. is.connected <- function(X, ...) { UseMethod("is.connected") } is.connected.default <- function(X, ...) { y <- connected(X, ...) npieces <- length(levels(y)) if(npieces == 0) stop("Unable to determine connectedness") return(npieces == 1) } is.connected.ppp <- function(X, R, ...) { lab <- connected(X, R, internal=TRUE) npieces <- length(unique(lab)) return(npieces == 1) } spatstat/R/nndensity.R0000644000176200001440000000173113333543255014464 0ustar liggesusers# # nndensity.R # # Density estimation based on nn distance # # $Revision: 1.3 $ $Date: 2014/10/24 00:22:30 $ # nndensity <- function(x, ...) { UseMethod("nndensity") } nndensity.ppp <- function(x, k, ..., verbose=TRUE) { if(missing(k) || is.null(k)) { k <- round(sqrt(npoints(x))) if(verbose) cat(paste("k=", k, "\n")) } else if(k == 1) warning("k=1 will produce strange results") # distance to k-th nearest neighbour D <- nnmap(x, k=k, what="dist", ...) # area searched A <- eval.im(pi * D^2) # distance to boundary B <- bdist.pixels(as.owin(D)) # handle edge effects edge <- solutionset(B < D) # centres of all pixels where edge effect occurs xy <- rasterxy.mask(edge, drop=TRUE) # corresponding values of distance rr <- D[edge, drop=TRUE] # compute actual search area X <- as.ppp(xy, W=as.owin(x), check=FALSE) A[edge] <- discpartarea(X, matrix(rr, ncol=1)) # finally compute intensity estimate L <- eval.im(k/A) return(L) } spatstat/R/localKcross.R0000644000176200001440000005240313536347317014740 0ustar liggesusers#' #' localKcross.R #' #' original by Ege Rubak #' #' $Revision: 1.14 $ $Date: 2019/09/12 04:51:07 $ "localLcross" <- function(X, from, to, ..., rmax = NULL, correction = "Ripley") { localKcross(X, from, to, ..., rmax = rmax, correction = correction, wantL = TRUE) } "localLdot" <- function(X, from, ..., rmax = NULL, correction = "Ripley") { localKdot(X, from, ..., rmax = rmax, correction = correction, wantL = TRUE) } "localKcross" <- function(X, from, to, ..., rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) if(missing(from)) from <- levels(marx)[1] if(missing(to)) to <- levels(marx)[2] I <- (marx == from) if(!any(I)) stop(paste("No points have mark =", from)) Iexplain <- paste("points having mark =", from) Ikey <- make.parseable(paste(from)) if(from == to) { ## use Kest result <- do.call(localK, resolve.defaults(list(X=X[I], rmax=rmax, correction=correction, verbose=verbose, rvalue=rvalue), list(...))) } else { J <- (marx == to) if(!any(J)) stop(paste("No points have mark =", to)) Jexplain <- paste("points having mark =", to) Jkey <- make.parseable(paste(to)) result <-localKmultiEngine(X, I, J, ..., Ikey=Ikey, Jkey=Jkey, Iexplain=Iexplain, Jexplain=Jexplain, rmax = rmax, correction=correction, verbose=verbose, rvalue=rvalue) } return(result) } "localKdot" <- function(X, from, ..., rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) if(missing(from)) from <- levels(marx)[1] I <- (marx == from) J <- rep.int(TRUE, X$n) # i.e. all points Iexplain <- paste("points having mark =", from) Jexplain <- "points of any type" Ikey <- make.parseable(paste(from)) Jkey <- "." if(!any(I)) stop(paste("No points have mark =", from)) result <- localKmultiEngine(X, I, J, ..., Iexplain=Iexplain, Jexplain=Jexplain, Ikey=Ikey, Jkey=Jkey, rmax = rmax, correction=correction, verbose=verbose, rvalue=rvalue) attr(result, "indices") <- list(from=from) return(result) } "localKcross.inhom" <- function(X, from, to, lambdaFrom=NULL, lambdaTo=NULL, ..., rmax = NULL, correction = "Ripley", sigma=NULL, varcov=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") miss.update <- missing(update) miss.leave <- missing(leaveoneout) marx <- marks(X) if(missing(from)) from <- levels(marx)[1] if(missing(to)) to <- levels(marx)[2] I <- (marx == from) J <- (marx == to) Iexplain <- paste("points having mark =", from) Jexplain <- paste("points having mark =", to) Ikey <- make.parseable(paste(from)) Jkey <- make.parseable(paste(to)) K <- localKmultiEngine(X, I, J, lambdaFrom, lambdaTo, ..., rmax = rmax, Iexplain=Iexplain, Jexplain=Jexplain, Ikey=Ikey, Jkey=Jkey, correction=correction, sigma=sigma, varcov=varcov, lambdaX=lambdaX, update=update, leaveoneout=leaveoneout, miss.update=miss.update, miss.leave=miss.leave) attr(K, "indices") <- list(from=from, to=to) return(K) } localLcross.inhom <- function(X, from, to, lambdaFrom = NULL, lambdaTo = NULL, ..., rmax = NULL) { localKcross.inhom(X, from, to, lambdaFrom, lambdaTo, ..., rmax = rmax, wantL = TRUE) } "localKmultiEngine" <- function(X, from, to, lambdaFrom=NULL, lambdaTo=NULL, ..., rmax = NULL, wantL=FALSE, correction="Ripley", verbose=TRUE, rvalue=NULL, sigma=NULL, varcov=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE, Iexplain="points satisfying condition I", Jexplain="points satisfying condition J", Ikey="I", Jkey="J", miss.update=missing(update), miss.leave=missing(leaveoneout)) { npts <- npoints(X) W <- Window(X) areaW <- area(W) lambda.ave <- npts/areaW from <- ppsubset(X, from) to <- ppsubset(X, to) if(is.null(from) || is.null(to)) stop("from and to must be valid subset indices") if(!any(from)) stop("no points belong to subset from") if(!any(to)) stop("no points belong to subset to") X_from <- X[from] X_to <- X[to] n_from <- sum(from) n_to <- sum(to) lambdaFrom.ave <- n_from/areaW lambdaTo.ave <- n_to/areaW weighted <- !is.null(lambdaFrom) || !is.null(lambdaTo) || !is.null(lambdaX) if(weighted){ lambdas <- resolve.lambda.cross(X, from, to, lambdaFrom, lambdaTo, ..., lambdaX = lambdaX, sigma = sigma, varcov = varcov, leaveoneout = leaveoneout, update = update, Iexplain=Iexplain, Jexplain=Jexplain, miss.update=miss.update, miss.leave=miss.leave, caller = "localKcrossEngine") lambdaFrom <- lambdas$lambdaI lambdaTo <- lambdas$lambdaJ } if(is.null(rvalue)) rmaxdefault <- rmax %orifnull% rmax.rule("K", W, lambda.ave) else { stopifnot(is.numeric(rvalue)) stopifnot(length(rvalue) == 1) stopifnot(rvalue >= 0) rmaxdefault <- rvalue } breaks <- handle.r.b.args(NULL, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max correction.given <- !missing(correction) correction <- pickoption("correction", correction, c(none="none", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=FALSE) correction <- implemented.for.K(correction, W$type, correction.given) # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # identify all close pairs rmax <- max(r) close <- crosspairs(X_from, X_to, rmax) # close$i and close$j are serial numbers in X_from and X_to respectively; # map them to original serial numbers in X orig <- seq_len(npts) imap <- orig[from] jmap <- orig[to] iX <- imap[close$i] jX <- jmap[close$j] # eliminate any identical pairs if(any(from & to)) { ok <- (iX != jX) if(!all(ok)) { close$i <- close$i[ok] close$j <- close$j[ok] close$d <- close$d[ok] close$xi <- close$xi[ok] close$xj <- close$xj[ok] close$yi <- close$yi[ok] close$yj <- close$yj[ok] } } # extract information for these pairs (relative to orderings of X_from, X_to) DIJ <- close$d XI <- ppp(close$xi, close$yi, window=W, check=FALSE) I <- close$i J <- close$j if(weighted) { ## lambdaI <- lambdaFrom[I] ## not used lambdaJ <- lambdaTo[J] ## weightI <- 1/lambdaI ## not used weightJ <- 1/lambdaJ } # initialise df <- as.data.frame(matrix(NA, length(r), n_from)) labl <- desc <- character(n_from) if(verbose) state <- list() switch(correction, none={ # uncorrected! For demonstration purposes only! for(i in 1:n_from) { ii <- (I == i) ## Below wh <- whist(DIJ[ii], breaks$val, if(weighted) weightJ[ii] else NULL) # no edge weights Knone <- cumsum(wh) ## Tweaking factor to express Kcross.inhom as unweighted average of local contrib. if(weighted) Knone <- Knone * lambdaFrom.ave/lambdaFrom[i] df[,i] <- Knone icode <- numalign(i, n_from) names(df)[i] <- paste("un", icode, sep="") labl[i] <- makefvlabel(NULL, "hat", character(2), icode) desc[i] <- paste("uncorrected estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, n_from, state=state) } if(!weighted) df <- df/lambdaTo.ave }, translate={ # Translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) if(weighted) edgewt <- edgewt * weightJ for(i in 1:n_from) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, edgewt[ii]) Ktrans <- cumsum(wh) ## Tweaking factor to express Kcross.inhom as unweighted average of local contrib. if(weighted) Ktrans <- Ktrans * lambdaFrom.ave/lambdaFrom[i] df[,i] <- Ktrans icode <- numalign(i, n_from) names(df)[i] <- paste("trans", icode, sep="") labl[i] <- makefvlabel(NULL, "hat", character(2), icode) desc[i] <- paste("translation-corrected estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, n_from, state=state) } if(!weighted) df <- df/lambdaTo.ave h <- diameter(W)/2 df[r >= h, ] <- NA }, isotropic={ # Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) if(weighted) edgewt <- edgewt * weightJ for(i in 1:n_from) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, edgewt[ii]) Kiso <- cumsum(wh) ## Tweaking factor to express Kcross.inhom as unweighted average of local contrib. if(weighted) Kiso <- Kiso * lambdaFrom.ave/lambdaFrom[i] df[,i] <- Kiso icode <- numalign(i, n_from) names(df)[i] <- paste("iso", icode, sep="") labl[i] <- makefvlabel(NULL, "hat", character(2), icode) desc[i] <- paste("Ripley isotropic correction estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, n_from, state=state) } if(!weighted) df <- df/lambdaTo.ave h <- diameter(W)/2 df[r >= h, ] <- NA }) # transform values if L required if(wantL) df <- sqrt(df/pi) # return vector of values at r=rvalue, if desired if(!is.null(rvalue)) { nr <- length(r) if(r[nr] != rvalue) stop("Internal error - rvalue not attained") return(as.numeric(df[nr,])) } ## function value table required ## add r and theo df <- cbind(df, data.frame(r=r, theo=if(wantL) r else (pi * r^2))) desc <- c(desc, c("distance argument r", "theoretical Poisson %s")) labl <- c(labl, c("r", "{%s[%s]^{pois}}(r)")) ## Handle 'dot' symbol if(identical(Jkey, ".")) { Jkeyname <- "symbol(\"\\267\")" Jkeylab <- quote(dot) Jkeyexpr <- quote(symbol("\267")) } else Jkeyname <- Jkeylab <- Jkeyexpr <- Jkey ## Determine fv labels if(!wantL) { if(!weighted) { fnam <- c("K", paste0("list(loc,", Ikey, ",", Jkeyname, ")")) ylab <- substitute(K[loc,I,J](r), list(I=Ikey, J=Jkeylab)) yexp <- substitute(K[list(loc,I,J)](r), list(I=Ikey, J=Jkeyexpr)) } else { fnam <- c("K", paste0("list(inhom,loc,", Ikey, ",", Jkeyname, ")")) ylab <- substitute(K[inhom,loc,I,J](r), list(I=Ikey, J=Jkeylab)) yexp <- substitute(K[list(inhom,loc,I,J)](r), list(I=Ikey, J=Jkeyexpr)) } } else { if(!weighted) { fnam <- c("L", paste0("list(loc,", Ikey, ",", Jkeyname, ")")) ylab <- substitute(L[loc,I,J](r), list(I=Ikey, J=Jkeylab)) yexp <- substitute(L[list(loc,I,J)](r), list(I=Ikey, J=Jkeyexpr)) } else { fnam <- c("L", paste0("list(inhom,loc,", Ikey, ",", Jkeyname, ")")) ylab <- substitute(L[inhom,loc,I,J](r), list(I=Ikey, J=Jkeylab)) yexp <- substitute(L[list(inhom,loc,I,J)](r), list(I=Ikey, J=Jkeyexpr)) } } # create fv object K <- fv(df, "r", ylab, "theo", , alim, labl, desc, fname=fnam, yexp=yexp) # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) attr(K, "correction") <- correction if(weighted && lambdas$danger) attr(K, "dangerous") <- lambdas$dangerous ### TEMPORARY HACK to save info about the "from" points attr(K, "Xfrom") <- X_from return(K) } resolve.lambda.cross <- function(X, I, J, lambdaI=NULL, lambdaJ=NULL, ..., lambdaX=NULL, sigma=NULL, varcov=NULL, leaveoneout=TRUE, update=TRUE, lambdaIJ=NULL, Iexplain="points satisfying condition I", Jexplain="points satisfying condition J", miss.update=missing(update), miss.leave=missing(leaveoneout), caller="direct"){ dangerous <- c("lambdaI", "lambdaJ") dangerI <- dangerJ <- TRUE XI <- X[I] XJ <- X[J] nI <- npoints(XI) nJ <- npoints(XJ) lamIname <- short.deparse(substitute(lambdaI)) lamJname <- short.deparse(substitute(lambdaJ)) bothnames <- c(lamIname, lamJname) givenI <- !is.null(lambdaI) givenJ <- !is.null(lambdaJ) givenX <- !is.null(lambdaX) if(givenI != givenJ) { givenone <- bothnames[c(givenI, givenJ)] missedone <- setdiff(bothnames, givenone) stop(paste("If", givenone, "is given, then", missedone, "should also be given"), call.=FALSE) } if(givenX && givenI && givenJ) warning(paste(paste(sQuote(bothnames), collapse=" and "), "were ignored because", sQuote("lambdaX"), "was given"), call.=FALSE) if(givenX) { ## Intensity values for all points of X if(is.im(lambdaX)) { ## Look up intensity values lambdaI <- safelookup(lambdaX, XI) lambdaJ <- safelookup(lambdaX, XJ) } else if(is.imlist(lambdaX) && is.multitype(X) && length(lambdaX) == length(levels(marks(X)))) { ## Look up intensity values Y <- split(X) lamY <- mapply("[", x=lambdaX, i=Y, SIMPLIFY=FALSE) lamX <- unsplit(lamY, marks(X)) lambdaI <- lamX[I] lambdaJ <- lamX[J] } else if(is.function(lambdaX)) { ## evaluate function at locations if(!is.marked(X) || length(formals(lambdaX)) == 2) { lambdaI <- lambdaX(XI$x, XI$y) lambdaJ <- lambdaX(XJ$x, XJ$y) } else { lambdaI <- lambdaX(XI$x, XI$y, marks(XI)) lambdaJ <- lambdaX(XJ$x, XJ$y, marks(XJ)) } } else if(is.numeric(lambdaX) && is.vector(as.numeric(lambdaX))) { ## vector of intensity values if(length(lambdaX) != npoints(X)) stop(paste("The length of", sQuote("lambdaX"), "should equal the number of points of X")) lambdaI <- lambdaX[I] lambdaJ <- lambdaX[J] } else if(is.ppm(lambdaX) || is.kppm(lambdaX) || is.dppm(lambdaX)) { ## point process model provides intensity model <- lambdaX if(!update) { ## just use intensity of fitted model lambdaI <- predict(model, locations=XI, type="trend") lambdaJ <- predict(model, locations=XJ, type="trend") } else { ## re-fit model to data X if(is.ppm(model)) { model <- update(model, Q=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else { model <- update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } lambdaI <- lambdaX[I] lambdaJ <- lambdaX[J] dangerI <- dangerJ <- FALSE dangerous <- "lambdaIJ" if(miss.update & caller == "Kmulti.inhom") warn.once(key="Kmulti.inhom.update", "The behaviour of Kmulti.inhom when lambda is a ppm object", "has changed (in spatstat 1.45-3 and later).", "See help(Kmulti.inhom)") } } else stop(paste("Argument lambdaX is not understood:", "it should be a numeric vector,", "an image, a function(x,y)", "or a fitted point process model (ppm, kppm or dppm)")) } else { ## lambdaI, lambdaJ expected if(!givenI) { ## estimate intensity dangerI <- FALSE dangerous <- setdiff(dangerous, "lambdaI") lambdaI <- density(XI, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=leaveoneout) } else if(is.im(lambdaI)) { ## look up intensity values lambdaI <- safelookup(lambdaI, XI) } else if(is.function(lambdaI)) { ## evaluate function at locations lambdaI <- lambdaI(XI$x, XI$y) } else if(is.numeric(lambdaI) && is.vector(as.numeric(lambdaI))) { ## validate intensity vector check.nvector(lambdaI, nI, things=Iexplain) } else if(is.ppm(lambdaI) || is.kppm(lambdaI) || is.dppm(lambdaI)) { ## point process model provides intensity model <- lambdaI if(!update) { ## just use intensity of fitted model lambdaI <- predict(model, locations=XI, type="trend") } else { ## re-fit model to data X model <- if(is.ppm(model)) update(model, Q=X) else update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) lambdaI <- lambdaX[I] dangerI <- FALSE dangerous <- setdiff(dangerous, "lambdaI") if(miss.update && caller == "Kmulti.inhom") warn.once(key="Kmulti.inhom.update", "The behaviour of Kmulti.inhom when lambda is a ppm object", "has changed (in spatstat 1.45-3 and later).", "See help(Kmulti.inhom)") } } else stop(paste(sQuote("lambdaI"), "should be a vector or an image")) if(!givenJ) { ## estimate intensity dangerJ <- FALSE dangerous <- setdiff(dangerous, "lambdaJ") lambdaJ <- density(XJ, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=leaveoneout) } else if(is.im(lambdaJ)) { ## look up intensity values lambdaJ <- safelookup(lambdaJ, XJ) } else if(is.function(lambdaJ)) { ## evaluate function at locations lambdaJ <- lambdaJ(XJ$x, XJ$y) } else if(is.numeric(lambdaJ) && is.vector(as.numeric(lambdaJ))) { ## validate intensity vector check.nvector(lambdaJ, nJ, things=Jexplain) } else if(is.ppm(lambdaJ) || is.kppm(lambdaJ) || is.dppm(lambdaJ)) { ## point process model provides intensity model <- lambdaJ if(!update) { ## just use intensity of fitted model lambdaJ <- predict(model, locations=XJ, type="trend") } else { ## re-fit model to data X model <- if(is.ppm(model)) update(model, Q=X) else update(model, X=X) lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) lambdaJ <- lambdaX[J] dangerJ <- FALSE dangerous <- setdiff(dangerous, "lambdaJ") if(miss.update & caller == "Kmulti.inhom") warn.once(key="Kmulti.inhom.update", "The behaviour of Kmulti.inhom when lambda is a ppm object", "has changed (in spatstat 1.45-3 and later).", "See help(Kmulti.inhom)") } } else stop(paste(sQuote("lambdaJ"), "should be a vector or an image")) } ## Weight for each pair if(!is.null(lambdaIJ)) { dangerIJ <- TRUE dangerous <- union(dangerous, "lambdaIJ") if(!is.matrix(lambdaIJ)) stop("lambdaIJ should be a matrix") if(nrow(lambdaIJ) != nI) stop(paste("nrow(lambdaIJ) should equal the number of", Iexplain)) if(ncol(lambdaIJ) != nJ) stop(paste("ncol(lambdaIJ) should equal the number of", Jexplain)) } else { dangerIJ <- FALSE } danger <- dangerI || dangerJ || dangerIJ return(list(lambdaI = lambdaI, lambdaJ = lambdaJ, lambdaIJ=lambdaIJ, danger = danger, dangerous = dangerous)) } spatstat/R/Gmulti.R0000644000176200001440000001625713333543254013722 0ustar liggesusers# Gmulti.S # # Compute estimates of nearest neighbour distance distribution functions # for multitype point patterns # # S functions: # Gcross G_{ij} # Gdot G_{i\bullet} # Gmulti (generic) # # $Revision: 4.43 $ $Date: 2015/10/21 09:06:57 $ # ################################################################################ "Gcross" <- function(X, i, j, r=NULL, breaks=NULL, ..., correction=c("rs", "km", "han")) { # computes G_{ij} estimates # # X marked point pattern (of class 'ppp') # i,j the two mark values to be compared # # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # X <- as.ppp(X) if(!is.marked(X, dfok=FALSE)) stop(paste("point pattern has no", sQuote("marks"))) stopifnot(is.multitype(X)) # marx <- marks(X, dfok=FALSE) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] # I <- (marx == i) if(sum(I) == 0) stop("No points are of type i") if(i == j) result <- Gest(X[I], r=r, breaks=breaks, ...) else { J <- (marx == j) if(sum(J) == 0) stop("No points are of type j") result <- Gmulti(X, I, J, r=r, breaks=breaks, disjoint=FALSE, ..., correction=correction) } iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(result, substitute(G[i,j](r), list(i=iname, j=jname)), c("G", paste0("list(", iname, ",", jname, ")")), new.yexp=substitute(G[list(i,j)](r), list(i=iname,j=jname))) return(result) } "Gdot" <- function(X, i, r=NULL, breaks=NULL, ..., correction=c("km","rs","han")) { # Computes estimate of # G_{i\bullet}(t) = # P( a further point of pattern in B(0,t)| a type i point at 0 ) # # X marked point pattern (of class ppp) # # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # X <- as.ppp(X) if(!is.marked(X)) stop(paste("point pattern has no", sQuote("marks"))) stopifnot(is.multitype(X)) # marx <- marks(X, dfok=FALSE) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) if(sum(I) == 0) stop("No points are of type i") J <- rep.int(TRUE, X$n) # i.e. all points # result <- Gmulti(X, I, J, r, breaks, disjoint=FALSE, ..., correction=correction) iname <- make.parseable(paste(i)) result <- rebadge.fv(result, substitute(G[i ~ dot](r), list(i=iname)), c("G", paste(iname, "~ symbol(\"\\267\")")), new.yexp=substitute(G[i ~ symbol("\267")](r), list(i=iname))) return(result) } ########## "Gmulti" <- function(X, I, J, r=NULL, breaks=NULL, ..., disjoint=NULL, correction=c("rs", "km", "han")) { # # engine for computing the estimate of G_{ij} or G_{i\bullet} # depending on selection of I, J # # X marked point pattern (of class ppp) # # I,J logical vectors of length equal to the number of points # and identifying the two subsets of points to be # compared. # # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # verifyclass(X, "ppp") W <- X$window npts <- npoints(X) areaW <- area(W) # check I and J I <- ppsubset(X, I) J <- ppsubset(X, J) if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") nI <- sum(I) nJ <- sum(J) if(nI == 0) stop("No points satisfy condition I") if(nJ == 0) stop("No points satisfy condition J") if(is.null(disjoint)) disjoint <- !any(I & J) # choose correction(s) # correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("rs", "km", "han") correction <- pickoption("correction", correction, c(none="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", han="han", Hanisch="han", best="km"), multi=TRUE) # determine breakpoints for r values lamJ <- nJ/areaW rmaxdefault <- rmax.rule("G", W, lamJ) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) # brks <- breaks$val rmax <- breaks$max rvals <- breaks$r zeroes <- numeric(length(rvals)) # initialise fv object df <- data.frame(r=rvals, theo=1-exp(-lamJ * pi * rvals^2)) fname <- c("G", "list(I,J)") Z <- fv(df, "r", quote(G[I,J](r)), "theo", . ~ r, c(0,rmax), c("r", makefvlabel(NULL, NULL, fname, "pois")), c("distance argument r", "theoretical Poisson %s"), fname=fname, yexp=quote(G[list(I,J)](r))) # "type I to type J" nearest neighbour distances XI <- X[I] XJ <- X[J] if(disjoint) nnd <- nncross(XI, XJ, what="dist") else { seqnp <- seq_len(npts) iX <- seqnp[I] iY <- seqnp[J] nnd <- nncross(XI, XJ, iX, iY, what="dist") } # distance to boundary from each type i point bdry <- bdist.points(XI) # observations o <- pmin.int(nnd,bdry) # censoring indicators d <- (nnd <= bdry) # # calculate estimates if("none" %in% correction) { # UNCORRECTED e.d.f. of nearest neighbour distances: use with care if(npts == 0) edf <- zeroes else { hh <- hist(nnd[nnd <= rmax],breaks=breaks$val,plot=FALSE)$counts edf <- cumsum(hh)/length(nnd) } Z <- bind.fv(Z, data.frame(raw=edf), makefvlabel(NULL, "hat", fname, "raw"), "uncorrected estimate of %s", "raw") } if("han" %in% correction) { # Hanisch style estimator if(npts == 0) G <- zeroes else { # uncensored distances x <- nnd[d] # weights a <- eroded.areas(W, rvals) # calculate Hanisch estimator h <- hist(x[x <= rmax], breaks=breaks$val, plot=FALSE)$counts G <- cumsum(h/a) G <- G/max(G[is.finite(G)]) } # add to fv object Z <- bind.fv(Z, data.frame(han=G), makefvlabel(NULL, "hat", fname, "han"), "Hanisch estimate of %s", "han") # modify recommended plot range attr(Z, "alim") <- range(rvals[G <= 0.9]) } if(any(correction %in% c("rs", "km"))) { # calculate Kaplan-Meier and border correction (Reduced Sample) estimators if(npts == 0) result <- data.frame(rs=zeroes, km=zeroes, hazard=zeroes) else { result <- km.rs(o, bdry, d, breaks) result <- as.data.frame(result[c("rs", "km", "hazard")]) } # add to fv object Z <- bind.fv(Z, result, c(makefvlabel(NULL, "hat", fname, "bord"), makefvlabel(NULL, "hat", fname, "km"), "hazard(r)"), c("border corrected estimate of %s", "Kaplan-Meier estimate of %s", "Kaplan-Meier estimate of hazard function lambda(r)"), "km") # modify recommended plot range attr(Z, "alim") <- range(rvals[result$km <= 0.9]) } nama <- names(Z) fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))]) unitname(Z) <- unitname(X) return(Z) } spatstat/R/parameters.R0000644000176200001440000000121313333543255014607 0ustar liggesusers## ## parameters.R ## ## $Revision: 1.2 $ $Date: 2015/05/08 04:27:15 $ ## parameters <- function(model, ...) { UseMethod("parameters") } parameters.ppm <- function(model, ...) { ss <- summary(model, quick="no variances") out <- c(list(trend=ss$trend$value), ss$covfunargs, ss$interaction$interaction$par, ss$interaction$sensible$param) return(out) } parameters.kppm <- function(model, ...) { ss <- summary(model, quick="no variances") out <- c(list(trend=ss$trend$trend$value), ss$covfunargs, ss$clustpar, ss$clustargs, list(mu=ss$mu)) return(out) } spatstat/R/covariates.R0000644000176200001440000000306713333543254014614 0ustar liggesusers# # covariates.R # # evaluate covariates # # $Revision: 1.3 $ $Date: 2015/10/21 09:06:57 $ # evalCovariate <- function(covariate, locations) { # evaluate covariate of any kind at specified locations covvalues <- if(is.im(covariate)) safelookup(covariate, locations) else if(is.function(covariate)) covariate(locations$x, locations$y) else if(is.numeric(covariate) || is.factor(covariate)) { if(length(covariate) == 1L) rep.int(covariate, length(locations$x)) else if(length(covariate) == length(locations$x)) covariate else stop("Inappropriate length for covariate vector") } else stop("Covariate should be an image, a function or a factor/numeric vector") return(covvalues) } ppmCovariates <- function(model) { # generate list of all covariates in ppm (excluding marks) stopifnot(is.ppm(model)) co <- as.list(model$covariates) xy <- list(x=function(x,y){x}, y=function(x,y){y}) coplus <- append(co, xy) return(as.anylist(coplus)) } findCovariate <- function(covname, scope, scopename=NULL) { # find the named covariate in the given ppm object or list if(is.ppm(scope)) { covlist <- ppmCovariates(scope) if(missing(scopename)) scopename <- "covariates in model" } else if(is.list(scope)) { covlist <- scope } else stop("scope should be a named list of covariates, or a ppm object") if(!(covname %in% names(covlist))) stop(paste("covariate", dQuote(covname), "not found", if(!is.null(scopename)) paste("amongst", scopename) else NULL)) covlist[[covname]] } spatstat/R/treebranches.R0000644000176200001440000001361613421006314015107 0ustar liggesusers#' #' treebranches.R #' #' Label branches in a tree #' #' $Revision: 1.5 $ $Date: 2019/01/20 05:26:51 $ #' compute branch labels for each *vertex* in the tree L treebranchlabels <- local({ treebranchlabels <- function(L, root=1) { stopifnot(inherits(L, "linnet")) stopifnot(length(root) == 1) V <- L$vertices #' M <- L$m #' assign label to each vertex e <- rep(NA_character_, npoints(V)) #' do root e[root] <- "" #' recurse descendtree(L, root, e) } descendtree <- function(L, at, labels, verbose=FALSE) { if(verbose) cat(paste("Descending from node", at, "\n")) below <- which(L$m[at, ] & is.na(labels)) while(length(below) == 1) { if(verbose) cat(paste("Line from", at, paren(labels[at]), "to", below, "\n")) labels[below] <- labels[at] at <- below below <- which(L$m[at, ] & is.na(labels)) } if(length(below) == 0) { if(verbose) cat("*\n") return(labels) } if(verbose) cat(paste("split into", length(below), "\n")) if(length(below) > 26) stop("Oops - degree > 27") labels[below] <- paste(labels[at], letters[1:length(below)], sep="") for(b in below) labels <- descendtree(L, b, labels) return(labels) } treebranchlabels }) #' Function which will return the branch label associated with #' any point on the network branchlabelfun <- function(L, root=1) { L <- as.linnet(L) vertexLabels <- treebranchlabels(L, root=root) labfrom <- vertexLabels[L$from] labto <- vertexLabels[L$to] segmentLabels <- ifelse(nchar(labfrom) < nchar(labto), labto, labfrom) f <- function(x, y, seg, tp) { segmentLabels[seg] } fL <- linfun(f, L) return(fL) } #' convenience function for use in model formulae begins <- function(x, firstbit) { stopifnot(is.character(firstbit) && length(firstbit) == 1) n <- nchar(firstbit) if(n == 0) rep(TRUE, length(x)) else (substr(x, 1, n) == firstbit) } #' extract the sub-tree for a particular label #' e.g. extractbranch(L, "a") extracts everything whose label begins with 'a' extractbranch <- function(X, ...) { UseMethod("extractbranch") } extractbranch.linnet <- function(X, code, labels, ..., which=NULL) { L <- X V <- L$vertices if(!is.null(which)) { stopifnot(is.logical(which)) if(length(which) != npoints(V)) stop("Argument 'which' is the wrong length") vin <- which } else { if(length(labels) != npoints(V)) stop("labels vector is the wrong length") #' which vertices are included #' (a) vertices with the right initial code vin <- (substr(labels, 1, nchar(code)) == code) #' (b) the apex isneighbour <- (rowSums(L$m[,vin]) > 0) apexcode <- if(nchar(code) > 1) substr(code, 1, nchar(code)-1) else "" vin <- vin | (isneighbour & (labels == apexcode)) } #' which edges are included ein <- vin[L$from] & vin[L$to] #' new serial numbers for vertices vId <- cumsum(vin) #' pack up sparse <- L$sparse out <- list(vertices=V[vin], m=L$m[vin,vin], lines=L$lines[ein], from=vId[L$from[ein]], to=vId[L$to[ein]], dpath=if(sparse) NULL else L$dpath[vin,vin], sparse=sparse, window=V$window) class(out) <- c("linnet", class(out)) #' pre-compute bounding radius if(sparse) out$boundingradius <- boundingradius(out) out$toler <- default.linnet.tolerance(out) attr(out, "which") <- vin return(out) } extractbranch.lpp <- function(X, code, labels, ..., which=NULL) { L <- as.linnet(X) #' make sub-network if(missing(code)) code <- NULL if(missing(labels)) labels <- NULL Lnew <- extractbranch(L, code, labels, which=which) #' which vertices are included vin <- attr(Lnew, "which") #' which edges are included ein <- vin[L$from] & vin[L$to] #' which data points are included xin <- ein[coords(X)$seg] #' new serial numbers for edges eId <- cumsum(ein) #' construct subset Xnew <- X[xin] Xnew$domain <- Lnew #' apply new serial numbers to segment map coords(Xnew)$seg <- eId[coords(Xnew)$seg] #' return(Xnew) } deletebranch <- function(X, ...) { UseMethod("deletebranch") } deletebranch.linnet <- function(X, code, labels, ...) { L <- X V <- L$vertices if(length(labels) != npoints(V)) stop("labels vector is the wrong length") #' which vertices are retained vkeep <- (substr(labels, 1, nchar(code)) != code) #' which edges are retained ekeep <- vkeep[L$from] & vkeep[L$to] #' new serial numbers for vertices vId <- cumsum(vkeep) #' pack up sparse <- L$sparse out <- list(vertices=V[vkeep], m=L$m[vkeep,vkeep], lines=L$lines[ekeep], from=vId[L$from[ekeep]], to=vId[L$to[ekeep]], dpath=if(sparse) NULL else L$dpath[vkeep,vkeep], sparse=sparse, window=V$window) class(out) <- c("linnet", class(out)) #' recompute bounding radius if(sparse) out$boundingradius <- boundingradius(out) out$toler <- default.linnet.tolerance(out) attr(out, "which") <- vkeep return(out) } deletebranch.lpp <- function(X, code, labels, ...) { #' make sub-network L <- as.linnet(X) Lnew <- deletebranch(L, code=code, labels=labels) #' which vertices are retained vkeep <- attr(Lnew, "which") #' which edges are retained ekeep <- vkeep[L$from] & vkeep[L$to] #' which data points are retained xin <- ekeep[coords(X)$seg] #' new serial numbers for vertices # vId <- cumsum(vkeep) #' new serial numbers for edges eId <- cumsum(ekeep) #' construct subset Xnew <- X[xin] Xnew$domain <- Lnew #' apply new serial numbers to segment map coords(Xnew)$seg <- eId[coords(Xnew)$seg] #' return(Xnew) } treeprune <- function(X, root=1, level=0){ ## collect names of branches to be pruned tb <- treebranchlabels(as.linnet(X), root=root) keep <- (nchar(tb) <= level) Y <- extractbranch(X, which=keep) return(Y) } spatstat/R/logistic.R0000644000176200001440000003374413431506134014271 0ustar liggesusers## ## logistic.R ## ## $Revision: 1.26 $ $Date: 2019/02/15 09:46:41 $ ## ## Logistic composite likelihood method ## logi.engine <- function(Q, trend = ~1, interaction, ..., covariates=NULL, subsetexpr=NULL, clipwin=NULL, correction="border", rbord=reach(interaction), covfunargs=list(), allcovar=FALSE, vnamebase=c("Interaction", "Interact."), vnameprefix=NULL, justQ = FALSE, savecomputed = FALSE, precomputed = NULL, VB=FALSE ){ if(is.null(trend)) trend <- ~1 if(is.null(interaction)) interaction <- Poisson() want.trend <- !identical.formulae(trend, ~1) want.inter <- !is.poisson(interaction) want.subset <- !is.null(subsetexpr) # validate choice of edge correction correction <- pickoption("correction", correction, c(border="border", periodic="periodic", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", none="none")) # rbord applies only to border correction if(correction == "border") { check.1.real(rbord, "In ppm") explain.ifnot(rbord >= 0, "In ppm") } else rbord <- 0 # backdoor stuff if(!missing(vnamebase)) { if(length(vnamebase) == 1) vnamebase <- rep.int(vnamebase, 2) if(!is.character(vnamebase) || length(vnamebase) != 2) stop("Internal error: illegal format of vnamebase") } if(!is.null(vnameprefix)) { if(!is.character(vnameprefix) || length(vnameprefix) != 1) stop("Internal error: illegal format of vnameprefix") } # create dummy points if(inherits(Q, "ppp")){ Xplus <- Q Q <- quadscheme.logi(Xplus, ...) D <- Q$dummy Dinfo <- Q$param } else if(checkfields(Q, c("data", "dummy"))) { Xplus <- Q$data D <- Q$dummy Dinfo <- Q$param if(is.null(Dinfo)){ Dinfo <- list(how="given", rho=npoints(D)/(area(D)*markspace.integral(D))) } Q <- quadscheme.logi(Xplus, D) } else stop("Format of object Q is not understood") ## clip to subset? if(!is.null(clipwin)) { if(is.data.frame(covariates)) { ok <- inside.owin(union.quad(Q), w=clipwin) covariates <- covariates[ok, , drop=FALSE] } Q <- Q[clipwin] Xplus <- Q$data D <- Q$dummy } if (justQ) return(Q) ### Dirty way of recording arguments so that the model can be refitted later (should probably be done using call, eval, envir, etc.): extraargs <- list(covfunargs = covfunargs, allcovar = allcovar, vnamebase = vnamebase, vnameprefix = vnameprefix) extraargs <- append(extraargs, list(...)) ## Dummy intensity if(correction == "border" && Dinfo$how=="grid"){ Dbord <- D[bdist.points(D)>=rbord] Dinfo$rho <- npoints(Dbord)/(eroded.areas(as.owin(Dbord), rbord)*markspace.integral(Dbord)) } rho <- Dinfo$rho ##Setting the B from Barker dynamics (relative to dummy intensity) B <- list(...)$Barker if(is.null(B)) B <- 1 B <- B*rho Dinfo <- append(Dinfo, list(B=B)) Dinfo <- append(Dinfo, list(extraargs=extraargs)) # Wplus <- as.owin(Xplus) nXplus <- npoints(Xplus) U <- superimpose(Xplus, D, W=Wplus, check=FALSE) # E <- equalpairs(U, Xplus, marked = is.marked(Xplus)) E <- cbind(1:nXplus, 1:nXplus) # computed <- if (savecomputed) list(X = Xplus, Q = Q, U = U) else list() # assemble covariate data frame if(want.trend || want.subset) { tvars <- variablesinformula(trend) if(want.subset) tvars <- union(tvars, all.vars(subsetexpr)) if(!is.data.frame(covariates)) { ## resolve 'external' covariates externalvars <- setdiff(tvars, c("x", "y", "marks")) tenv <- environment(trend) covariates <- getdataobjects(externalvars, tenv, covariates, fatal=TRUE) } wantxy <- c("x", "y") %in% tvars wantxy <- wantxy | rep.int(allcovar, 2) cvdf <- data.frame(x=U$x, y=U$y)[, wantxy, drop=FALSE] if(!is.null(covariates)) { df <- mpl.get.covariates(covariates, U, "quadrature points", covfunargs) cvdf <- cbind(cvdf, df) } wantmarks <- "marks" %in% tvars if(wantmarks) cvdf <- cbind(cvdf, marks = marks(U)) } else cvdf <- NULL # evaluate interaction sufficient statistics if (!is.null(ss <- interaction$selfstart)) interaction <- ss(Xplus, interaction) V <- evalInteraction(Xplus, U, E, interaction, correction, precomputed = precomputed, savecomputed = savecomputed) if(!is.matrix(V)) stop("evalInteraction did not return a matrix") if (savecomputed) computed <- append(computed, attr(V, "computed")) IsOffset <- attr(V, "IsOffset") if(is.null(IsOffset)) IsOffset <- rep.int(FALSE, ncol(V)) # determine names if(ncol(V) > 0) { Vnames <- colnames(V) if(is.null(Vnames)) { nc <- ncol(V) Vnames <- if(nc == 1) vnamebase[1L] else paste(vnamebase[2L], 1:nc, sep="") colnames(V) <- Vnames } else if(!is.null(vnameprefix)) { Vnames <- paste(vnameprefix, Vnames, sep="") colnames(V) <- Vnames } } else Vnames <- character(0) # combine all data glmdata <- as.data.frame(V) if(!is.null(cvdf)) glmdata <- cbind(glmdata, cvdf) # construct response and weights ok <- if(correction == "border") (bdist.points(U) >= rbord) else rep.int(TRUE, npoints(U)) # Keep only those quadrature points for which the # conditional intensity is nonzero. KEEP <- if(ncol(V)>0) matrowall(V != -Inf) else rep.int(TRUE, npoints(U)) ok <- ok & KEEP wei <- c(rep.int(1,npoints(Xplus)),rep.int(B/rho,npoints(D))) resp <- c(rep.int(1,npoints(Xplus)),rep.int(0,npoints(D))) ## User-defined subset: if(!is.null(subsetexpr)) { USERSUBSET <- eval(subsetexpr, glmdata, environment(trend)) ok <- ok & USERSUBSET } # add offset, subset and weights to data frame # using reserved names beginning with ".logi." glmdata <- cbind(glmdata, .logi.Y = resp, .logi.B = B, .logi.w = wei, .logi.ok =ok) # build glm formula # (reserved names begin with ".logi.") trendpart <- paste(as.character(trend), collapse=" ") fmla <- paste(".logi.Y ", trendpart) # Interaction terms if(want.inter) { VN <- Vnames # enclose offset potentials in 'offset(.)' if(any(IsOffset)) VN[IsOffset] <- paste("offset(", VN[IsOffset], ")", sep="") fmla <- paste(c(fmla, VN), collapse="+") } # add offset intrinsic to logistic technique fmla <- paste(fmla, "offset(-log(.logi.B))", sep="+") fmla <- as.formula(fmla) # to satisfy package checker: .logi.B <- B .logi.w <- wei .logi.ok <- ok .logi.Y <- resp # suppress warnings from code checkers dont.complain.about(.logi.B, .logi.w, .logi.ok, .logi.Y) # go ##fit <- glm(fmla, data=glmdata, ## family=binomial(), subset = .logi.ok, weights = .logi.w) fit <- if(VB) vblogit.fmla(fmla, data = glmdata, subset = .logi.ok, weights = .logi.w, ...) else glm(fmla, data = glmdata, family = binomial(), subset = .logi.ok, weights = .logi.w) environment(fit$terms) <- sys.frame(sys.nframe()) ## Fitted coeffs co <- coef(fit) fitin <- fii(interaction, co, Vnames, IsOffset) ## Saturated log-likelihood: satlogpl <- sum(ok*resp*log(B)) ## Max. value of log-likelihood: maxlogpl <- logLik(fit) + satlogpl # Stamp with spatstat version number spv <- package_version(versionstring.spatstat()) the.version <- list(major=spv$major, minor=spv$minor, release=spv$patchlevel, date="$Date: 2019/02/15 09:46:41 $") ## Compile results fit <- list(method = "logi", fitter = "glm", projected = FALSE, coef = co, trend = trend, interaction = interaction, fitin = fitin, Q = Q, maxlogpl = maxlogpl, satlogpl = satlogpl, internal = list(Vnames = Vnames, IsOffset=IsOffset, glmdata = glmdata, glmfit = fit, logistic = Dinfo, computed = computed, vnamebase=vnamebase, vnameprefix=vnameprefix, VB = if(VB) TRUE else NULL, priors = if(VB) fit$priors else NULL ), covariates = mpl.usable(covariates), covfunargs= covfunargs, subsetexpr = subsetexpr, correction = correction, rbord = rbord, fisher = NULL, varcov = NULL, # if(VB) fit$S else NULL, terms = terms(trend), version = the.version, problems = list() ) class(fit) <- "ppm" return(fit) } forbid.logi <- function(object) { if(object$method == "logi") stop("Sorry, this is not implemented for method=\'logi\'") return(invisible(NULL)) } logi.dummy <- function(X, dummytype = "stratrand", nd = NULL, mark.repeat = FALSE, ...){ ## Resolving nd inspired by default.n.tiling if(is.null(nd)){ nd <- spatstat.options("ndummy.min") if(inherits(X, "ppp")) nd <- pmax(nd, 10 * ceiling(2 * sqrt(X$n)/10)) } nd <- ensure2vector(nd) marx <- is.multitype(X) if(marx) lev <- levels(marks(X)) if(marx && mark.repeat){ N <- length(lev) Dlist <- inDlist <- vector("list", N) } else{ N <- 1 } W <- as.owin(X) type <- match.arg(dummytype, c("stratrand", "binomial", "poisson", "grid", "transgrid")) B <- boundingbox(W) rho <- nd[1L]*nd[2L]/area(B) Dinfo <- list(nd=nd, rho=rho, how=type) ## Repeating dummy process for each mark type 1:N (only once if unmarked or mark.repeat = FALSE) for(i in 1:N){ switch(type, stratrand={ D <- as.ppp(stratrand(B, nd[1L], nd[2L]), W = B) inD <- which(inside.owin(D, w = W)) D <- D[W] inD <- paste(i,inD,sep="_") }, binomial={ D <- runifpoint(nd[1L]*nd[2L], win=B) D <- D[W] }, poisson={ D <- rpoispp(rho, win = W) }, grid={ D <- as.ppp(gridcenters(B, nd[1L], nd[2L]), W = B) inD <- which(inside.owin(D, w = W)) D <- D[W] inD <- paste(i,inD,sep="_") }, transgrid={ D <- as.ppp(gridcenters(B, nd[1L], nd[2L]), W = B) dxy <- c(diff(D$window$xrange),diff(D$window$yrange))/(2*nd) coords(D) <- coords(D)+matrix(runif(2,-dxy,dxy),npoints(D),2,byrow=TRUE) inD <- which(inside.owin(D, w = W)) D <- D[W] inD <- paste(i,inD,sep="_") }, stop("unknown dummy type")) if(marx && mark.repeat){ marks(D) <- factor(lev[i], levels = lev) Dlist[[i]] <- D if(type %in% c("stratrand","grid","transgrid")) inDlist[[i]] <- inD } } if(marx && mark.repeat){ inD <- Reduce(append, inDlist) D <- Reduce(superimpose, Dlist) } if(type %in% c("stratrand","grid","transgrid")) Dinfo <- append(Dinfo, list(inD=inD)) if(marx && !mark.repeat){ marks(D) <- sample(factor(lev, levels=lev), npoints(D), replace = TRUE) Dinfo$rho <- Dinfo$rho/length(lev) } attr(D, "dummy.parameters") <- Dinfo return(D) } quadscheme.logi <- function(data, dummy, dummytype = "stratrand", nd = NULL, mark.repeat = FALSE, ...){ data <- as.ppp(data) ## If dummy is missing we generate dummy pattern with logi.dummy. if(missing(dummy)) dummy <- logi.dummy(data, dummytype, nd, mark.repeat, ...) Dinfo <- attr(dummy, "dummy.parameters") D <- as.ppp(dummy) if(is.null(Dinfo)) Dinfo <- list(how="given", rho=npoints(D)/(area(D)*markspace.integral(D))) ## Weights: n <- npoints(data)+npoints(D) w <- area(Window(data))/n Q <- quad(data, D, rep(w,n), param=Dinfo) class(Q) <- c("logiquad", class(Q)) return(Q) } summary.logiquad <- function(object, ..., checkdup=FALSE) { verifyclass(object, "logiquad") s <- list( data = summary.ppp(object$data, checkdup=checkdup), dummy = summary.ppp(object$dummy, checkdup=checkdup), param = object$param) class(s) <- "summary.logiquad" return(s) } print.summary.logiquad <- function(x, ..., dp=3) { cat("Quadrature scheme (logistic) = data + dummy\n") Dinfo <- x$param if(is.null(Dinfo)) cat("created by an unknown function.\n") cat("Data pattern:\n") print(x$data, dp=dp) cat("\n\nDummy pattern:\n") # How they were computed switch(Dinfo$how, stratrand={ cat(paste("(Stratified random dummy points,", paste(Dinfo$nd, collapse=" x "), "grid of cells)\n")) }, binomial={ cat("(Binomial dummy points)\n") }, poisson={ cat("(Poisson dummy points)\n") }, grid={ cat(paste("(Fixed grid of dummy points,", paste(Dinfo$nd, collapse=" x "), "grid)\n")) }, transgrid={ cat(paste("(Random translation of fixed grid of dummy points,", paste(Dinfo$nd, collapse=" x "), "grid)\n")) }, given=cat("(Dummy points given by user)\n") ) # Description of them print(x$dummy, dp=dp) return(invisible(NULL)) } spatstat/R/hybrid.R0000644000176200001440000002707413333543255013742 0ustar liggesusers# # # hybrid.R # # $Revision: 1.9 $ $Date: 2018/03/15 07:37:41 $ # # Hybrid of several interactions # # Hybrid() create a hybrid of several interactions # [an object of class 'interact'] # # # ------------------------------------------------------------------- # Hybrid <- local({ Hybrid <- function(...) { interlist <- list(...) n <- length(interlist) if(n == 0) stop("No arguments given") #' arguments may be interaction objects or ppm objects isinter <- unlist(lapply(interlist, is.interact)) isppm <- unlist(lapply(interlist, is.ppm)) if(any(nbg <- !(isinter | isppm))) stop(paste(ngettext(sum(nbg), "Argument", "Arguments"), paste(which(nbg), collapse=", "), ngettext(sum(nbg), "is not an interaction", "are not interactions"))) #' ensure the list contains only interaction objects if(any(isppm)) interlist[isppm] <- lapply(interlist[isppm], as.interact) #' recursively expand any components that are themselves hybrids while(any(ishybrid <- unlist(lapply(interlist, is.hybrid)))) { i <- min(which(ishybrid)) n <- length(interlist) expandi <- interlist[[i]]$par interlist <- c(if(i > 1) interlist[1:(i-1L)] else NULL, expandi, if(i < n) interlist[(i+1L):n] else NULL) } #' ncomponents <- length(interlist) if(ncomponents == 1) { #' single interaction - return it return(interlist[[1L]]) } #' ensure all components have names names(interlist) <- good.names(names(interlist), "HybridComponent", 1:ncomponents) #' check for infinite potential values haveInf <- lapply(interlist, getElement, name="hasInf") haveInf <- !sapply(haveInf, identical, y=FALSE) hasInf <- any(haveInf) #' build object out <- list( name = "Hybrid interaction", creator = "Hybrid", family = hybrid.family, pot = NULL, par = interlist, parnames = names(interlist), hasInf = hasInf, selfstart = function(X, self) { ilist <- self$par sslist <- lapply(ilist, getElement, name="selfstart") has.ss <- sapply(sslist, is.function) if(any(has.ss)) { ilist[has.ss] <- lapply(ilist[has.ss], invokeSelfStart, Y=X) self$par <- ilist } return(self) }, init = NULL, update = NULL, # default OK print = function(self, ..., family=FALSE, brief=FALSE) { if(family) print.isf(self$family) ncomponents <- length(self$par) clabs <- self$parnames splat("Hybrid of", ncomponents, "components:", commasep(sQuote(clabs))) for(i in 1:ncomponents) { splat(paste0(clabs[i], ":")) print(self$par[[i]], ..., family=family, brief=brief) } parbreak() return(invisible(NULL)) }, interpret = function(coeffs, self) { interlist <- self$par result <- list(param=list(), inames=character(0), printable=list()) for(i in 1:length(interlist)) { interI <- interlist[[i]] nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") #' find coefficients with prefix that exactly matches nameI. Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) #' extract them if(any(relevant)) { Crelevant <- coeffs[relevant] names(Crelevant) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) #' invoke the self-interpretation of interI interpretI <- interI$interpret if(is.function(interpretI)) { resultI <- interpretI(Crelevant, interI) paramI <- resultI$param prinI <- resultI$printable inamesI <- resultI$inames inamesI <- paste(nameI, inamesI) if(length(prinI) > 0) { result$param <- append(result$param, paramI) result$printable <- append(result$printable, list(prinI)) result$inames <- c(result$inames, inamesI) } } } } return(result) }, valid = function(coeffs, self) { #' check validity via mechanism used for 'rmhmodel' siminfo <- .Spatstat.Rmhinfo[["Hybrid interaction"]] Z <- siminfo(coeffs, self) cifs <- Z$cif pars <- Z$par ntypes <- Z$ntypes if((Ncif <- length(cifs)) == 1) { #' single cif pars <- append(pars, list(beta=rep.int(1, ntypes))) } else { for(i in 1:Ncif) pars[[i]] <- append(pars[[i]], list(beta=rep.int(1, ntypes[i]))) } RM <- rmhmodel(cif=cifs, par=pars, types=1:max(ntypes), stopinvalid=FALSE) return(RM$integrable) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) #' separate into components spl <- splitHybridInteraction(coeffs, self) interlist <- spl$interlist coeflist <- spl$coeflist #' compute projection for each component interaction Ncif <- length(interlist) projlist <- vector(mode="list", length=Ncif) nproj <- integer(Ncif) for(i in 1:Ncif) { coefsI <- coeflist[[i]] interI <- interlist[[i]] if(!is.interact(interI)) stop("Internal error: interlist entry is not an interaction") projI <- interI$project if(is.null(projI)) stop(paste("Projection is not yet implemented for a", interI$name)) p <- projI(coefsI, interI) #' p can be NULL (indicating no projection required for interI) #' or a single interaction or a list of interactions. if(is.null(p)) { if(Ncif == 1) return(NULL) # no projection required p <- list(NULL) nproj[i] <- 0 } else if(is.interact(p)) { p <- list(p) nproj[i] <- 1L } else if(is.list(p) && all(unlist(lapply(p, is.interact)))) { nproj[i] <- length(p) } else stop("Internal error: result of projection had wrong format") projlist[[i]] <- p } #' for interaction i there are nproj[i] **new** interactions to try. if(all(nproj == 0)) return(NULL) if(spatstat.options("project.fast")) { #' Single interaction required. #' Extract first entry from each list #' (there should be only one entry, but...) qlist <- lapply(projlist, "[[", i=1L) #' replace NULL entries by corresponding original interactions isnul <- unlist(lapply(qlist, is.null)) if(all(isnul)) return(NULL) if(any(isnul)) qlist[isnul] <- interlist[isnul] names(qlist) <- names(interlist) #' build hybrid and return result <- do.call(Hybrid, qlist) return(result) } #' Full case result <- list() for(i in which(nproj > 0)) { ntry <- nproj[i] tries <- projlist[[i]] for(j in 1:ntry) { #' assemble list of component interactions for hybrid qlist <- interlist qlist[[i]] <- tries[[j]] #' eliminate Poisson ispois <- unlist(lapply(qlist, is.poisson)) if(all(ispois)) { #' collapse to single Poisson h <- Poisson() } else { if(any(ispois)) qlist <- qlist[!ispois] h <- do.call(Hybrid, qlist) } result <- append(result, list(h)) } } #' 'result' is a list of interactions, each a hybrid if(length(result) == 1) result <- result[[1L]] return(result) }, irange = function(self, coeffs=NA, epsilon=0, ...) { interlist <- self$par answer <- 0 for(i in 1:length(interlist)) { interI <- interlist[[i]] nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") #' find coefficients with prefix that exactly matches nameI. if(all(is.na(coeffs))) Crelevant <- NA else { Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) #' extract them Crelevant <- coeffs[relevant] names(Crelevant) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) } #' compute reach reachI <- interI$irange if(is.function(reachI)) { resultI <- reachI(interI, coeffs=Crelevant, epsilon=epsilon, ...) answer <- max(answer, resultI) } } return(answer) }, version=versionstring.spatstat() ) class(out) <- "interact" return(out) } invokeSelfStart <- function(inte, Y) { ss <- inte$selfstart if(!is.function(ss)) return(inte) return(ss(Y, inte)) } Hybrid }) is.hybrid <- function(x) { UseMethod("is.hybrid") } is.hybrid.interact <- function(x) { return(is.interact(x) && (x$name == "Hybrid interaction")) } is.hybrid.ppm <- function(x) { return(is.hybrid(as.interact(x))) } splitHybridInteraction <- function(coeffs, inte) { # For hybrids, $par is a list of the component interactions, # but coeffs is a numeric vector. # Split the coefficient vector into the relevant coeffs for each interaction interlist <- inte$par N <- length(interlist) coeflist <- vector(mode="list", length=N) for(i in 1:N) { interI <- interlist[[i]] # forbid hybrids-of-hybrids - these should not occur anyway if(interI$name == "Hybrid interaction") stop("A hybrid-of-hybrid interactions is not implemented") # nameI is the tag that identifies I-th component in hybrid nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") # find coefficients with prefix that exactly matches nameI. Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) # extract coefficients # (there may be none, if this interaction is Poisson or an 'offset') coeffsI <- coeffs[relevant] # remove the prefix so the coefficients are recognisable to interaction if(any(relevant)) names(coeffsI) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) # store coeflist[[i]] <- coeffsI } names(coeflist) <- names(interlist) return(list(coeflist=coeflist, interlist=interlist)) } Hybrid <- intermaker(Hybrid, list(creator="Hybrid", name="general hybrid Gibbs process", par=list("..."=42), parnames=list("any list of interactions"))) spatstat/R/subfits.R0000644000176200001440000004621113333543255014132 0ustar liggesusers# # # $Revision: 1.52 $ $Date: 2017/12/10 06:11:16 $ # # subfits.new <- local({ subfits.new <- function(object, what="models", verbose=FALSE) { stopifnot(inherits(object, "mppm")) what <- match.arg(what, c("models", "interactions", "basicmodels")) if(what == "interactions") return(subfits.old(object, what, verbose)) ## extract stuff announce <- if(verbose) Announce else Ignore announce("Extracting stuff...") fitter <- object$Fit$fitter FIT <- object$Fit$FIT trend <- object$trend #%^!ifdef RANDOMEFFECTS random <- object$random #%^!endif info <- object$Info npat <- object$npat Inter <- object$Inter interaction <- Inter$interaction itags <- Inter$itags Vnamelist <- object$Fit$Vnamelist has.design <- info$has.design # has.random <- info$has.random announce("done.\n") ## fitted parameters coefs.full <- coef(object) if(is.null(dim(coefs.full))) { ## fixed effects model: replicate vector to matrix coefs.names <- names(coefs.full) coefs.full <- matrix(coefs.full, byrow=TRUE, nrow=npat, ncol=length(coefs.full), dimnames=list(NULL, coefs.names)) } else { ## random/mixed effects model: coerce to matrix coefs.names <- colnames(coefs.full) coefs.full <- as.matrix(coefs.full) } ## determine which interaction(s) are active on each row announce("Determining active interactions...") active <- active.interactions(object) announce("done.\n") ## exceptions if(any(rowSums(active) > 1)) stop(paste("subfits() is not implemented for models", "in which several interpoint interactions", "are active on the same point pattern")) #%^!ifdef RANDOMEFFECTS if(!is.null(random) && any(variablesinformula(random) %in% itags)) stop(paste("subfits() is not yet implemented for models", "with random effects that involve", "the interpoint interactions")) #%^!endif ## implied coefficients for each active interaction announce("Computing implied coefficients...") implcoef <- list() for(tag in itags) { announce(tag) implcoef[[tag]] <- impliedcoefficients(object, tag) announce(", ") } announce("done.\n") ## Fisher information and vcov fisher <- varcov <- NULL if(what == "models") { announce("Fisher information...") fisher <- vcov(object, what="fisher", err="null") varcov <- try(solve(fisher), silent=TRUE) if(inherits(varcov, "try-error")) varcov <- NULL announce("done.\n") } ## Extract data frame announce("Extracting data...") datadf <- object$datadf rownames <- object$Info$rownames announce("done.\n") ## set up lists for results models <- rep(list(NULL), npat) interactions <- rep(list(NULL), npat) ## interactions announce("Determining interactions...") pstate <- list() for(i in 1:npat) { if(verbose) pstate <- progressreport(i, npat, state=pstate) ## Find relevant interaction acti <- active[i,] nactive <- sum(acti) interi <- if(nactive == 0) Poisson() else interaction[i, acti, drop=TRUE] tagi <- names(interaction)[acti] ## Find relevant coefficients coefs.avail <- coefs.full[i,] names(coefs.avail) <- coefs.names if(nactive == 1) { ic <- implcoef[[tagi]] coefs.implied <- ic[i, ,drop=TRUE] names(coefs.implied) <- colnames(ic) ## overwrite any existing values of coefficients; add new ones. coefs.avail[names(coefs.implied)] <- coefs.implied } ## create fitted interaction with these coefficients vni <- if(nactive > 0) Vnamelist[[tagi]] else character(0) interactions[[i]] <- fii(interi, coefs.avail, vni) } announce("Done!\n") names(interactions) <- rownames ## if(what=="interactions") return(interactions) ## Extract data required to reconstruct complete model fits announce("Extracting more data...") data <- object$data Y <- object$Y Yname <- info$Yname moadf <- object$Fit$moadf fmla <- object$Fit$fmla ## deal with older formats of mppm if(is.null(Yname)) Yname <- info$Xname if(is.null(Y)) Y <- data[ , Yname, drop=TRUE] ## used.cov.names <- info$used.cov.names has.covar <- info$has.covar if(has.covar) { covariates.hf <- data[, used.cov.names, drop=FALSE] dfvar <- used.cov.names %in% names(datadf) } announce("done.\n") ## Construct template for fake ppm object spv <- package_version(versionstring.spatstat()) fake.version <- list(major=spv$major, minor=spv$minor, release=spv$patchlevel, date="$Date: 2017/12/10 06:11:16 $") fake.call <- call("cannot.update", Q=NULL, trend=trend, interaction=NULL, covariates=NULL, correction=object$Info$correction, rbord = object$Info$rbord) fakemodel <- list( method = "mpl", fitter = fitter, coef = coef(object), trend = object$trend, interaction = NULL, fitin = NULL, Q = NULL, maxlogpl = NA, internal = list(glmfit = FIT, glmdata = NULL, Vnames = NULL, fmla = fmla, computed = list()), covariates = NULL, correction = object$Info$correction, rbord = object$Info$rbord, version = fake.version, problems = list(), fisher = fisher, varcov = varcov, call = fake.call, callstring = "cannot.update()", fake = TRUE) class(fakemodel) <- "ppm" ## Loop through point patterns announce("Generating models for each row...") pstate <- list() for(i in 1:npat) { if(verbose) pstate <- progressreport(i, npat, state=pstate) Yi <- Y[[i]] Wi <- if(is.ppp(Yi)) Yi$window else Yi$data$window ## assemble relevant covariate images covariates <- if(has.covar) covariates.hf[i, , drop=TRUE, strip=FALSE] else NULL if(has.covar && has.design) ## Convert each data frame covariate value to an image covariates[dfvar] <- lapply(covariates[dfvar], as.im, W=Wi) ## Extract relevant interaction finte <- interactions[[i]] inte <- finte$interaction if(is.poisson.interact(inte)) inte <- NULL Vnames <- finte$Vnames if(length(Vnames) == 0) Vnames <- NULL ## Construct fake ppm object fakemodel$interaction <- inte fakemodel$fitin <- finte fakemodel$Q <- Yi fakemodel$covariates <- covariates fakemodel$internal$glmdata <- moadf[moadf$id == i, ] fakemodel$internal$Vnames <- Vnames fake.call$Q <- Yi fake.call$covariates <- covariates fakemodel$call <- fake.call fakemodel$callstring <- short.deparse(fake.call) ## store in list models[[i]] <- fakemodel } announce("done.\n") names(models) <- rownames models <- as.anylist(models) return(models) } Announce <- function(...) cat(...) Ignore <- function(...) { NULL } subfits.new }) ## ///////////////////////////////////////////////////// subfits <- subfits.old <- local({ subfits.old <- function(object, what="models", verbose=FALSE) { stopifnot(inherits(object, "mppm")) what <- match.arg(what, c("models","interactions", "basicmodels")) ## extract stuff announce <- if(verbose) Announce else Ignore announce("Extracting stuff...") trend <- object$trend random <- object$random use.gam <- object$Fit$use.gam info <- object$Info npat <- object$npat Inter <- object$Inter interaction <- Inter$interaction itags <- Inter$itags Vnamelist <- object$Fit$Vnamelist has.design <- info$has.design has.random <- info$has.random moadf <- object$Fit$moadf announce("done.\n") ## levels of any factors levelslist <- lapply(as.list(moadf), levelsAsFactor) isfactor <- !sapply(levelslist, is.null) ## fitted parameters coefs.full <- coef(object) if(is.null(dim(coefs.full))) { ## fixed effects model: replicate vector to matrix coefs.names <- names(coefs.full) coefs.full <- matrix(coefs.full, byrow=TRUE, nrow=npat, ncol=length(coefs.full), dimnames=list(NULL, coefs.names)) } else { ## random/mixed effects model: coerce to matrix coefs.names <- colnames(coefs.full) coefs.full <- as.matrix(coefs.full) } ## determine which interaction(s) are active on each row announce("Determining active interactions...") active <- active.interactions(object) announce("done.\n") ## exceptions if(any(rowSums(active) > 1)) stop(paste("subfits() is not implemented for models", "in which several interpoint interactions", "are active on the same point pattern")) #%^!ifdef RANDOMEFFECTS if(!is.null(random) && any(variablesinformula(random) %in% itags)) stop(paste("subfits() is not yet implemented for models", "with random effects that involve", "the interpoint interactions")) #%^!endif ## implied coefficients for each active interaction announce("Computing implied coefficients...") implcoef <- list() for(tag in itags) { announce(tag) implcoef[[tag]] <- impliedcoefficients(object, tag) announce(", ") } announce("done.\n") ## This code is currently not usable because the mapping is wrong reconcile <- FALSE if(reconcile) { ## determine which coefficients of main model are interaction terms announce("Identifying interaction coefficients...") md <- model.depends(object$Fit$FIT) usetags <- unlist(lapply(implcoef, colnames)) isVname <- apply(md[, usetags, drop=FALSE], 1, any) mainVnames <- row.names(md)[isVname] announce("done.\n") } ## Fisher information and vcov fisher <- varcov <- NULL if(what == "models") { announce("Fisher information...") fisher <- vcov(object, what="fisher", err="null") varcov <- try(solve(fisher), silent=TRUE) if(inherits(varcov, "try-error")) varcov <- NULL announce("done.\n") } ## Extract data frame announce("Extracting data...") datadf <- object$datadf rownames <- object$Info$rownames announce("done.\n") ## set up list for results results <- rep(list(NULL), npat) if(what == "interactions") { announce("Determining interactions...") pstate <- list() for(i in 1:npat) { if(verbose) pstate <- progressreport(i, npat, state=pstate) ## Find relevant interaction acti <- active[i,] nactive <- sum(acti) interi <- if(nactive == 0) Poisson() else interaction[i, acti, drop=TRUE] tagi <- names(interaction)[acti] ## Find relevant coefficients coefs.avail <- coefs.full[i,] names(coefs.avail) <- coefs.names if(nactive == 1) { ic <- implcoef[[tagi]] coefs.implied <- ic[i, ,drop=TRUE] names(coefs.implied) <- colnames(ic) ## overwrite any existing values of coefficients; add new ones. coefs.avail[names(coefs.implied)] <- coefs.implied } ## create fitted interaction with these coefficients vni <- if(nactive > 0) Vnamelist[[tagi]] else character(0) results[[i]] <- fii(interi, coefs.avail, vni) } announce("Done!\n") names(results) <- rownames return(results) } ## Extract data required to reconstruct complete model fits announce("Extracting more data...") data <- object$data Y <- object$Y Yname <- info$Yname ## deal with older formats of mppm if(is.null(Yname)) Yname <- info$Xname if(is.null(Y)) Y <- data[ , Yname, drop=TRUE] ## used.cov.names <- info$used.cov.names has.covar <- info$has.covar if(has.covar) { covariates.hf <- data[, used.cov.names, drop=FALSE] dfvar <- used.cov.names %in% names(datadf) } announce("done.\n") ## Loop through point patterns announce("Looping through rows...") pstate <- list() for(i in 1:npat) { if(verbose) pstate <- progressreport(i, npat, state=pstate) Yi <- Y[[i]] Wi <- if(is.ppp(Yi)) Yi$window else Yi$data$window ## assemble relevant covariate images scrambled <- FALSE if(!has.covar) { covariates <- NULL } else { covariates <- covariates.hf[i, , drop=TRUE, strip=FALSE] if(has.design) { ## Convert each data frame covariate value to an image imrowi <- lapply(covariates[dfvar], as.im, W=Wi) ## Problem: constant covariate leads to singular fit ## --------------- Hack: --------------------------- scrambled <- TRUE ## Construct fake data by resampling from possible values covar.vals <- lapply(as.list(covariates[dfvar, drop=FALSE]), possible) fake.imrowi <- lapply(covar.vals, scramble, W=Wi, Y=Yi$data) ## insert fake data into covariates covariates[dfvar] <- fake.imrowi ## ------------------ end hack ---------------------------- } ## identify factor-valued spatial covariates spatialfactors <- !dfvar & isfactor[names(covariates)] if(any(spatialfactors)) { ## problem: factor levels may be dropped ## more fakery... scrambled <- TRUE spfnames <- names(spatialfactors)[spatialfactors] covariates[spatialfactors] <- lapply(levelslist[spfnames], scramble, W=Wi, Y=Yi$data) } } ## Fit ppm to data for case i only ## using relevant interaction acti <- active[i,] nactive <- sum(acti) if(nactive == 1){ interi <- interaction[i, acti, drop=TRUE] tagi <- names(interaction)[acti] fiti <- PiPiM(Yi, trend, interi, covariates=covariates, allcovar=has.random, use.gam=use.gam, vnamebase=tagi, vnameprefix=tagi) } else { fiti <- PiPiM(Yi, trend, Poisson(), covariates=covariates, allcovar=has.random, use.gam=use.gam) } fiti$scrambled <- scrambled ## fiti determines which coefficients are required coefi.fitted <- fiti$coef coefnames.wanted <- coefnames.fitted <- names(coefi.fitted) ## reconcile interaction coefficient names if(reconcile) { coefnames.translated <- coefnames.wanted ma <- match(coefnames.fitted, fiti$internal$Vnames) hit <- !is.na(ma) if(any(hit)) coefnames.translated[hit] <- mainVnames[ ma[hit] ] } ## take the required coefficients from the full mppm fit coefs.avail <- coefs.full[i,] names(coefs.avail) <- coefs.names if(nactive == 1) { ic <- implcoef[[tagi]] coefs.implied <- ic[i, ,drop=TRUE] names(coefs.implied) <- colnames(ic) ## overwrite any existing values of coefficients; add new ones. coefs.avail[names(coefs.implied)] <- coefs.implied } ## check if(!all(coefnames.wanted %in% names(coefs.avail))) stop("Internal error: some fitted coefficients not accessible") coefi.new <- coefs.avail[coefnames.wanted] ## reset coefficients fiti$coef.orig <- coefi.fitted ## (detected by summary.ppm, predict.ppm) fiti$theta <- fiti$coef <- coefi.new fiti$method <- "mppm" ## ... and replace fake data by true data if(has.design) { fiti$internal$glmdata.scrambled <- gd <- fiti$internal$glmdata fixnames <- intersect(names(imrowi), colnames(gd)) for(nam in fixnames) { fiti$covariates[[nam]] <- imrowi[[nam]] fiti$internal$glmdata[[nam]] <- data[i, nam, drop=TRUE] } } ## Adjust rank of glm fit object # fiti$internal$glmfit$rank <- FIT$rank fiti$internal$glmfit$rank <- sum(is.finite(fiti$coef)) ## Fisher information and variance-covariance if known ## Extract submatrices for relevant parameters if(reconcile) { #' currently disabled because mapping is wrong if(!is.null(fisher)) { if(!reconcile) { fiti$fisher <- fisher[coefnames.wanted, coefnames.wanted, drop=FALSE] } else { fush <- fisher[coefnames.translated, coefnames.translated, drop=FALSE] dimnames(fush) <- list(coefnames.wanted, coefnames.wanted) fiti$fisher <- fush } } if(!is.null(varcov)) { if(!reconcile) { fiti$varcov <- varcov[coefnames.wanted, coefnames.wanted, drop=FALSE] } else { vc <- varcov[coefnames.translated, coefnames.translated, drop=FALSE] dimnames(vc) <- list(coefnames.wanted, coefnames.wanted) fiti$varcov <- vc } } } ## store in list results[[i]] <- fiti } announce("done.\n") names(results) <- rownames results <- as.anylist(results) return(results) } PiPiM <- function(Y, trend, inter, covariates, ..., allcovar=FALSE, use.gam=FALSE, vnamebase=c("Interaction", "Interact."), vnameprefix=NULL) { # This ensures that the model is fitted in a unique environment # so that it can be updated later. force(Y) force(trend) force(inter) force(covariates) force(allcovar) force(use.gam) force(vnamebase) force(vnameprefix) feet <- ppm(Y, trend, inter, covariates=covariates, allcovar=allcovar, use.gam=use.gam, forcefit=TRUE, vnamebase=vnamebase, vnameprefix=vnameprefix) return(feet) } possible <- function(z) { if(!is.factor(z)) unique(z) else factor(levels(z), levels=levels(z)) } scramble <- function(vals, W, Y) { W <- as.mask(W) npixels <- prod(W$dim) nvalues <- length(vals) npts <- npoints(Y) ## sample the possible values randomly at the non-data pixels sampled <- sample(vals, npixels, replace=TRUE) Z <- im(sampled, xcol=W$xcol, yrow=W$yrow) ## repeat the possible values cyclically at the data points if(npts >= 1) Z[Y] <- vals[1 + ((1:npts) %% nvalues)] return(Z) } Announce <- function(...) cat(...) Ignore <- function(...) { NULL } subfits.old }) cannot.update <- function(...) { stop("This model cannot be updated") } spatstat/R/zclustermodel.R0000644000176200001440000000504213402743666015351 0ustar liggesusers#' #' zclustermodel.R #' #' Experimental #' zclustermodel <- function(name="Thomas", ..., mu, kappa, scale) { if(missing(kappa)) stop("The parent intensity kappa must be given") if(missing(mu)) stop("The mean cluster size mu must be given") if(missing(scale)) stop("The cluster scale must be given") rules <- spatstatClusterModelInfo(name) par <- c(kappa=kappa, scale=scale) other <- rules$resolvedots(...) clustargs <- rules$checkclustargs(other$margs, old=FALSE) out <- list(name=name, rules=rules, par=par, mu=mu, clustargs=clustargs, other=other) class(out) <- "zclustermodel" return(out) } print.zclustermodel <- local({ print.zclustermodel <- function(x, ...) { with(x, { splat(rules$printmodelname(list(clustargs=clustargs))) newpar <- rules$checkpar(par, old=FALSE) splat("Parent intensity kappa =", blurb("kappa", newpar["kappa"])) splat("Cluster scale = ", newpar["scale"]) splat("Mean cluster size mu =", blurb("mu", mu)) if(length(clustargs) > 0) { hdr <- paste("Cluster shape", ngettext(length(clustargs), "parameter:", "parameters:")) if(is.list(clustargs) && all(sapply(clustargs, is.numeric)) && all(lengths(clustargs) == 1)) { splat(hdr, paste(names(clustargs), as.numeric(clustargs), sep="=", collapse=", ")) } else { splat(hdr) print(clustargs) } } }) return(invisible(NULL)) } blurb <- function(name, value) { if(is.numeric(value)) as.character(value) else if(is.im(value)) "[image]" else "[unrecognized format]" } print.zclustermodel }) pcfmodel.zclustermodel <- function(model, ...) { p <- model$rules$pcf mpar <- model$par other <- model$other f <- function(r) { do.call(p, c(list(par=mpar, rvals=r), other, model$rules["funaux"])) } return(f) } predict.zclustermodel <- function(object, ..., locations, type="intensity", ngrid=NULL) { ## limited use!!! if(!identical(type, "intensity")) stop("Sorry, only type='intensity' is implemented") lambda <- object$par["kappa"] * object$mu if(is.numeric(lambda)) { if(is.ppp(locations)) return(rep(lambda, npoints(locations))) W <- as.owin(locations) if(!is.mask(W)) W <- as.mask(W, dimyx=ngrid, ...) return(as.im(lambda, W=W)) } return(lambda[locations]) } spatstat/R/fii.R0000644000176200001440000001435213333543255013223 0ustar liggesusers# # fii.R # # Class of fitted interpoint interactions # # fii <- function(interaction=NULL, coefs=numeric(0), Vnames=character(0), IsOffset=NULL) { if(is.null(interaction)) interaction <- Poisson() stopifnot(is.interact(interaction)) if(is.poisson.interact(interaction)) { if(length(Vnames) > 0) stop("Coefficients inappropriate for Poisson process") } if(is.null(IsOffset)) IsOffset <- rep.int(FALSE, length(Vnames)) else { stopifnot(is.logical(IsOffset)) stopifnot(length(IsOffset) == length(Vnames)) } hasInf <- !identical(interaction$hasInf, FALSE) out <- list(interaction=interaction, coefs=coefs, Vnames=Vnames, IsOffset=IsOffset, hasInf=hasInf) class(out) <- c("fii", class(out)) return(out) } summary.fii <- function(object, ...) { y <- unclass(object) INTERACT <- object$interaction coefs <- object$coefs Vnames <- object$Vnames IsOffset <- object$IsOffset y$poisson <- is.poisson.interact(INTERACT) thumbnail <- NULL if(y$poisson) { thumbnail <- "Poisson()" } else { if(!is.null(INTERACT$interpret)) { # invoke auto-interpretation feature sensible <- if(newstyle.coeff.handling(INTERACT)) (INTERACT$interpret)(coefs[Vnames[!IsOffset]], INTERACT) else (INTERACT$interpret)(coefs, INTERACT) if(!is.null(sensible)) { header <- paste("Fitted", sensible$inames) printable <- sensible$printable # Try to make a thumbnail description param <- sensible$param ipar <- INTERACT$par if(all(lengths(param) == 1) && all(lengths(ipar) == 1)) { allargs <- append(ipar, param) allargs <- lapply(allargs, signif, digits=4) thumbnail <- fakecallstring(INTERACT$creator, allargs) } } else { # no fitted interaction parameters (e.g. Hard Core) header <- NULL printable <- NULL thumbnail <- paste0(INTERACT$creator, "()") } } else { # fallback sensible <- NULL VN <- Vnames[!IsOffset] if(length(VN) > 0) { header <- "Fitted interaction terms" icoef <- coefs[VN] printable <- exp(unlist(icoef)) ricoef <- lapply(icoef, signif, digits=4) thumbnail <- fakecallstring(INTERACT$creator, ricoef) } else { header <- NULL printable <- NULL thumbnail <- paste0(INTERACT$creator, "()") } } y <- append(y, list(sensible=sensible, header=header, printable=printable, thumbnail=thumbnail)) } class(y) <- c("summary.fii", class(y)) return(y) } print.fii <- function(x, ...) { sx <- summary(x) do.call(print.summary.fii, resolve.defaults(list(x=sx, brief=TRUE), list(...))) return(invisible(NULL)) } print.summary.fii <- local({ #' hide internal arguments print.summary.fii <- function(x, ...) { PrintIt(x, ...) } PrintIt <- function(x, ..., prefix="Interaction: ", banner=TRUE, family = waxlyrical('extras'), brief = !family, tiny = !waxlyrical('errors')) { if(tiny) { #' use thumbnail if available thumbnail <- x$thumbnail if(!is.null(thumbnail)) { splat(thumbnail) return(invisible(NULL)) } } terselevel <- spatstat.options('terse') if(banner && !brief) cat(prefix) if(x$poisson) { splat("Poisson process") parbreak(terselevel) } else { print(x$interaction, family=family, brief=TRUE, banner=banner) if(!is.null(x$printable)) { nvalues <- length(x$printable) nheader <- length(x$header) if(nvalues == 1) { splat(paste(paste0(x$header, ":\t"), x$printable)) } else if(nvalues == nheader) { for(i in 1:nheader) { hdi <- x$header[i] xpi <- x$printable[[i]] if(!is.list(xpi) && length(xpi) == 1) { splat(paste0(hdi, ":\t", xpi)) } else { splat(paste0(hdi, ":")) print(xpi) } } } else { splat(x$header) print(x$printable) } } } if(!brief) { co <- x$coefs[x$Vnames[!x$IsOffset]] if(length(co) > 0) { parbreak(terselevel) splat("Relevant coefficients:") print(co) } } return(invisible(NULL)) } print.summary.fii }) parameters.fii <- function(model, ...) { ss <- summary(model) out <- append(ss$interaction$par, ss$sensible$param) return(out) } coef.summary.fii <- function(object, ...) { object$printable } reach.fii <- function(x, ..., epsilon=0) { inte <- x$interaction coeffs <- x$coefs Vnames <- x$Vnames if(is.poisson.interact(inte)) return(0) # get 'irange' function from interaction object irange <- inte$irange if(is.null(irange)) return(Inf) # apply 'irange' function using fitted coefficients if(newstyle.coeff.handling(inte)) ir <- irange(inte, coeffs[Vnames], epsilon=epsilon) else ir <- irange(inte, coeffs, epsilon=epsilon) if(is.na(ir)) ir <- Inf return(ir) } plot.fii <- function(x, ...) { inte <- x$interaction if(is.poisson.interact(inte)) { message("Poisson interaction; nothing plotted") return(invisible(NULL)) } plfun <- inte$plot %orifnull% inte$family$plot if(is.null(plfun)) stop("Plotting not implemented for this type of interaction") plfun(x, ...) } fitin <- function(object) { UseMethod("fitin") } fitin.ppm <- function(object) { f <- object$fitin if(!is.null(f)) return(f) # For compatibility with older versions inte <- object$interaction if(is.null(inte)) f <- fii() # Poisson else { coefs <- coef(object) Vnames <- object$internal$Vnames IsOffset <- object$internal$IsOffset # Internal names of regressor variables f <- fii(inte, coefs, Vnames, IsOffset) } unitname(f) <- unitname(data.ppm(object)) return(f) } as.interact.fii <- function(object) { verifyclass(object, "fii") return(object$interaction) } coef.fii <- function(object, ...) { verifyclass(object, "fii") return(object$coefs) } spatstat/R/dummy.R0000644000176200001440000002765313353334550013615 0ustar liggesusers# # dummy.S # # Utilities for generating patterns of dummy points # # $Revision: 5.32 $ $Date: 2018/09/28 05:11:55 $ # # corners() corners of window # gridcenters() points of a rectangular grid # stratrand() random points in each tile of a rectangular grid # spokes() Rolf's 'spokes' arrangement # # concatxy() concatenate any lists of x, y coordinates # # default.dummy() Default action to create a dummy pattern # corners <- function(window) { window <- as.owin(window) x <- window$xrange[c(1L,2L,1L,2L)] y <- window$yrange[c(1L,1L,2L,2L)] return(list(x=x, y=y)) } gridcenters <- gridcentres <- function(window, nx, ny) { window <- as.owin(window) xr <- window$xrange yr <- window$yrange x <- seq(from=xr[1L], to=xr[2L], length.out = 2L * nx + 1L)[2L * (1:nx)] y <- seq(from=yr[1L], to=yr[2L], length.out = 2L * ny + 1L)[2L * (1:ny)] x <- rep.int(x, ny) y <- rep.int(y, rep.int(nx, ny)) return(list(x=x, y=y)) } stratrand <- function(window,nx,ny, k=1) { # divide window into an nx * ny grid of tiles # and place k points at random in each tile window <- as.owin(window) wide <- diff(window$xrange)/nx high <- diff(window$yrange)/ny cent <- gridcentres(window, nx, ny) cx <- rep.int(cent$x, k) cy <- rep.int(cent$y, k) n <- nx * ny * k x <- cx + runif(n, min = -wide/2, max = wide/2) y <- cy + runif(n, min = -high/2, max = high/2) return(list(x=x,y=y)) } tilecentroids <- function (W, nx, ny) { W <- as.owin(W) if(W$type == "rectangle") return(gridcentres(W, nx, ny)) else { # approximate W <- as.mask(W) rxy <- rasterxy.mask(W, drop=TRUE) xx <- rxy$x yy <- rxy$y pid <- gridindex(xx,yy,W$xrange,W$yrange,nx,nx)$index x <- tapply(xx,pid,mean) y <- tapply(yy,pid,mean) return(list(x=x,y=y)) } } cellmiddles <- local({ # auxiliary middle <- function(v) { n <- length(v); mid <- ceiling(n/2); v[mid]} dcut <- function(x, nx, xrange) { dx <- diff(xrange)/nx fx <- ((x - xrange[1L])/dx) %% 1 bx <- dx * pmin(fx, 1-fx) bx } # main cellmiddles <- function (W, nx, ny, npix=NULL, distances=FALSE) { if(W$type == "rectangle") return(gridcentres(W, nx, ny)) # pixel approximation to window # This matches the pixel approximation used to compute tile areas # and ensures that dummy points are generated only inside those tiles # that have nonzero digital area M <- as.mask(W, dimyx=rev(npix)) xx <- as.vector(rasterx.mask(M, drop=TRUE)) yy <- as.vector(rastery.mask(M, drop=TRUE)) pid <- gridindex(xx,yy,W$xrange,W$yrange,nx,ny)$index # compute tile centroids xmid <- tapply(xx, pid, mean) ymid <- tapply(yy, pid, mean) # check whether they are inside window ok <- inside.owin(xmid, ymid, W) if(all(ok)) return(list(x=xmid, y=ymid)) # some problem tiles bad <- rep.int(TRUE, nx * ny) bad[as.integer(names(xmid))] <- !ok badpid <- bad[pid] if(!distances) { midpix <- tapply(seq_along(pid)[badpid], pid[badpid], middle) } else { # find 'middle' points using boundary distances Dlines <- im(outer(dcut(M$yrow,ny,M$yrange), dcut(M$xcol,nx,M$xrange), "pmin"), M$xcol, M$yrow, M$xrange, M$yrange) Dbdry <- bdist.pixels(M) Dtile <- eval.im(pmin(Dlines, Dbdry)) dtile <- as.vector(Dtile[M]) df <- data.frame(dtile=dtile, id=seq_along(dtile))[badpid, , drop=FALSE] midpix <- by(df, pid[badpid], midpixid) } xmid[!ok] <- xx[midpix] ymid[!ok] <- yy[midpix] return(list(x=xmid,y=ymid)) } midpixid <- function(z) { z$id[which.max(z$dtile)] } cellmiddles }) spokes <- function(x, y, nrad = 3, nper = 3, fctr = 1.5, Mdefault=1) { # # Rolf Turner's "spokes" arrangement # # Places dummy points on radii of circles # emanating from each data point x[i], y[i] # # nrad: number of radii from each data point # nper: number of dummy points per radius # fctr: length of largest radius = fctr * M # where M is mean nearest neighbour distance in data # pat <- inherits(x,"ppp") if(pat) w <- x$w if(checkfields(x,c("x","y"))) { y <- x$y x <- x$x } M <- if(length(x) > 1) mean(nndist(x,y)) else Mdefault lrad <- fctr * M / nper theta <- 2 * pi * (1:nrad)/nrad cs <- cos(theta) sn <- sin(theta) xt <- lrad * as.vector((1:nper) %o% cs) yt <- lrad * as.vector((1:nper) %o% sn) xd <- as.vector(outer(x, xt, "+")) yd <- as.vector(outer(y, yt, "+")) tmp <- list(x = xd, y = yd) if(pat) return(as.ppp(tmp,W=w)[w]) else return(tmp) } # concatenate any number of list(x,y) into a list(x,y) concatxy <- function(...) { x <- unlist(lapply(list(...), getElement, name="x")) y <- unlist(lapply(list(...), getElement, name="y")) if(length(x) != length(y)) stop("Internal error: lengths of x and y unequal") return(list(x=x,y=y)) } #------------------------------------------------------------ default.dummy <- function(X, nd=NULL, random=FALSE, ntile=NULL, npix = NULL, quasi=FALSE, ..., eps=NULL, verbose=FALSE) { # default action to create dummy points. # regular grid of nd[1] * nd[2] points # plus corner points of window frame, # all clipped to window. orig <- list(nd=nd, eps=eps, ntile=ntile, npix=npix) orig <- orig[!sapply(orig, is.null)] # X <- as.ppp(X) win <- X$window # # # default dimensions a <- default.n.tiling(X, nd=nd, ntile=ntile, npix=npix, eps=eps, random=random, quasi=quasi, verbose=verbose) nd <- a$nd ntile <- a$ntile npix <- a$npix periodsample <- !quasi && !random && is.mask(win) && all(nd %% win$dim == 0) # make dummy points dummy <- if(quasi) rQuasi(prod(nd), as.rectangle(win)) else if(random) stratrand(win, nd[1L], nd[2L], 1) else cellmiddles(win, nd[1L], nd[2L], npix) dummy <- as.ppp(dummy, win, check=FALSE) # restrict to window if(!is.rectangle(win) && !periodsample) dummy <- dummy[win] # corner points corn <- as.ppp(corners(win), win, check=FALSE) corn <- corn[win] dummy <- superimpose(dummy, corn, W=win, check=FALSE) if(dummy$n == 0) stop("None of the dummy points lies inside the window") # pass parameters for computing weights attr(dummy, "weight.parameters") <- append(list(...), list(ntile=ntile, verbose=verbose, npix=npix)) # record parameters used to create dummy locations attr(dummy, "dummy.parameters") <- list(nd=nd, random=random, quasi=quasi, verbose=verbose, orig=orig) return(dummy) } # Criteria: # for rectangular windows, # R1. nd >= ntile # for non-rectangular windows, # R2. nd should be a multiple of ntile # R3. each dummy point is also a pixel of the npix grid # R4. npix should ideally be a multiple of nd, for speed # R5. npix should be large, for accuracy # R6. npix should not be too large, for speed # R7. if the window is a mask, npix should ideally be # a multiple of the mask array dimensions, for speed. # default.n.tiling <- local({ # auxiliary ensure2print <- function(x, verbose=TRUE, blah="user specified") { xname <- short.deparse(substitute(x)) x <- ensure2vector(x) if(verbose) cat(paste(blah, xname, "=", x[1L], "*", x[2L], "\n")) x } minmultiple <- function(n, lo, hi) { if(lo > hi) { temp <- hi hi <- lo lo <- temp } if(n > hi) return(hi) m <- n * (floor(lo/n):ceiling(hi/n)) m <- m[m >= n & m >= lo & m <= hi] if(length(m) > 0) min(m) else hi } mindivisor <- function(N, lo, Nbig) { d <- divisors(N) ok <- (d >= lo) if(any(ok)) return(min(d[ok])) m <- floor(Nbig/N) d <- unlist(lapply(as.list(seq_len(m) * N), divisors)) d <- sortunique(d) ok <- (d >= lo) if(any(ok)) return(min(d[ok])) return(Nbig) } min2mul <- function(n, lo, hi) c(minmultiple(n[1L], lo[1L], hi[1L]), minmultiple(n[2L], lo[2L], hi[2L])) min2div <- function(N, lo, Nbig) c(mindivisor(N[1L], lo[1L], Nbig[1L]), mindivisor(N[2L], lo[2L], Nbig[2L])) maxdiv <- function(n, k=1) { if(length(n) > 1L) return(c(maxdiv(n[1L], k), maxdiv(n[2L], k))) ## k-th largest divisor other than n d <- divisors(n) m <- length(d) ans <- if(m == 2L) n else if(m < 2+k) d[2L] else d[m-k] return(ans) } # main default.n.tiling <- function(X, nd=NULL, ntile=NULL, npix=NULL, eps=NULL, random=FALSE, quasi=FALSE, verbose=TRUE) { # computes dimensions of rectangular grids of # - dummy points (nd) (eps) # - tiles for grid weights (ntile) # - pixels for approximating area (npix) # for data pattern X. # verifyclass(X, "ppp") win <- X$window pixels <- (win$type != "rectangle") if(nd.given <- !is.null(nd)) nd <- ensure2print(nd, verbose) if(ntile.given <- !is.null(ntile)) ntile <- ensure2print(ntile, verbose) if(npix.given <- !is.null(npix)) npix <- ensure2print(npix, verbose) if(pixels) sonpixel <- rev(ensure2print(spatstat.options("npixel"), verbose, "")) ndummy.min <- ensure2print(spatstat.options("ndummy.min"), verbose, "") ndminX <- pmax(ndummy.min, 10 * ceiling(2 * sqrt(X$n)/10)) ndminX <- ensure2vector(ndminX) if(!is.null(eps)) { eps <- ensure2print(eps, verbose) Xbox <- as.rectangle(as.owin(X)) sides <- with(Xbox, c(diff(xrange), diff(yrange))) ndminX <- pmax(ndminX, ceiling(sides/eps)) } # range of acceptable values for npix if(npix.given) Nmin <- Nmax <- npix else switch(win$type, rectangle = { Nmin <- ensure2vector(X$n) Nmax <- Inf }, polygonal = { Nmin <- sonpixel Nmax <- 4 * sonpixel }, mask={ nmask <- rev(win$dim) Nmin <- nmask Nmax <- pmax(2 * nmask, 4 * sonpixel) }) # determine values of nd and ntile if(nd.given && !ntile.given) { # ntile must be a divisor of nd if(any(nd > Nmax)) warning("number of dummy points nd exceeds maximum pixel dimensions") ntile <- min2div(nd, ndminX, nd) } else if(!nd.given && ntile.given) { # nd must be a multiple of ntile nd <- min2mul(ntile, ndminX, Nmin) if(any(nd >= Nmin)) nd <- ntile } else if(!nd.given && !ntile.given) { if(!pixels) { nd <- ntile <- ensure2vector(ndminX) if(verbose) cat(paste("nd and ntile default to", nd[1L], "*", nd[2L], "\n")) } else { # find suitable divisors of the number of pixels nd <- ntile <- min2div(Nmin, ndminX, Nmax) if(any(nd >= Nmin)) { # none suitable if(verbose) cat("No suitable divisor of pixel dimensions\n") nd <- ntile <- ndminX } } } else { # both nd, ntile were given if(any(ntile > nd)) warning("the number of tiles (ntile) exceeds the number of dummy points (nd)") } if(!ntile.given && quasi) { if(verbose) cat("Adjusting ntile because quasi=TRUE\n") ntile <- maxdiv(ntile, if(pixels) 2L else 1L) } if(!npix.given && pixels) npix <- min2mul(nd, Nmin, Nmax) if(verbose) { if(!quasi) cat(paste("dummy points:", paste0(if(random) "stratified random in" else NULL, "grid"), nd[1L], "x", nd[2L], "\n")) else cat(paste("dummy points:", nd[1L], "x", nd[2L], "=", prod(nd), "quasirandom points\n")) cat(paste("weighting tiles", ntile[1L], "x", ntile[2L], "\n")) if(pixels) cat(paste("pixel grid", npix[1L], "x", npix[2L], "\n")) } if(pixels) return(list(nd=nd, ntile=ntile, npix=npix)) else return(list(nd=nd, ntile=ntile, npix=npix)) } default.n.tiling }) spatstat/R/hackglmm.R0000644000176200001440000000656413333543255014245 0ustar liggesusers# hackglmm.R # $Revision: 1.5 $ $Date: 2017/02/07 07:35:32 $ hackglmmPQL <- function (fixed, random, family, data, correlation, weights, control, niter = 10, verbose = TRUE, subset, ..., reltol=1e-3) { if (is.character(family)) family <- get(family) if (is.function(family)) family <- family() if (is.null(family$family)) { print(family) stop("'family' not recognized") } m <- mcall <- Call <- match.call() nm <- names(m)[-1L] keep <- is.element(nm, c("weights", "data", "subset", "na.action")) for (i in nm[!keep]) m[[i]] <- NULL allvars <- if (is.list(random)) allvars <- c(all.vars(fixed), names(random), unlist(lapply(random, function(x) all.vars(formula(x))))) else c(all.vars(fixed), all.vars(random)) Terms <- if (missing(data)) terms(fixed) else terms(fixed, data = data) off <- attr(Terms, "offset") if (length(off <- attr(Terms, "offset"))) allvars <- c(allvars, as.character(attr(Terms, "variables"))[off + 1]) Call$fixed <- eval(fixed) Call$random <- eval(random) m$formula <- as.formula(paste("~", paste(allvars, collapse = "+"))) environment(m$formula) <- environment(fixed) m$drop.unused.levels <- TRUE m[[1L]] <- as.name("model.frame") mf <- eval.parent(m) off <- model.offset(mf) if (is.null(off)) off <- 0 w <- model.weights(mf) if (is.null(w)) w <- rep(1, nrow(mf)) wts <- mf$wts <- w if(missing(subset)) fit0 <- glm(formula = fixed, family = family, data = mf, weights = wts, ...) else { # hack to get around peculiar problem with `subset' argument glmmsubset <- eval(expression(subset), data) if(length(glmmsubset) != nrow(mf)) { if(sum(glmmsubset) != nrow(mf)) stop("Internal error: subset vector is wrong length") message("(Fixing subset index..)") glmmsubset <- glmmsubset[glmmsubset] } mf$glmmsubset <- glmmsubset fit0 <- glm(formula = fixed, family = family, data = mf, weights = wts, subset=glmmsubset, ...) } w <- fit0$prior.weights eta <- fit0$linear.predictor zz <- eta + fit0$residuals - off wz <- fit0$weights fam <- family nm <- names(mcall)[-1L] keep <- is.element(nm, c("fixed", "random", "data", "subset", "na.action", "control")) for (i in nm[!keep]) mcall[[i]] <- NULL fixed[[2L]] <- quote(zz) mcall[["fixed"]] <- fixed mcall[[1L]] <- as.name("lme") mcall$random <- random mcall$method <- "ML" if (!missing(correlation)) mcall$correlation <- correlation mcall$weights <- quote(varFixed(~invwt)) mf$zz <- zz mf$invwt <- 1/wz mcall$data <- mf for (i in 1:niter) { if (verbose) cat("iteration", i, "\n") fit <- eval(mcall) etaold <- eta eta <- fitted(fit) + off if (sum((eta - etaold)^2) < (reltol^2) * sum(eta^2)) break mu <- fam$linkinv(eta) mu.eta.val <- fam$mu.eta(eta) mf$zz <- eta + (fit0$y - mu)/mu.eta.val - off wz <- w * mu.eta.val^2/fam$variance(mu) mf$invwt <- 1/wz mcall$data <- mf } attributes(fit$logLik) <- NULL fit$call <- Call fit$family <- family fit$logLik <- as.numeric(NA) oldClass(fit) <- c("glmmPQL", oldClass(fit)) fit } spatstat/R/randommk.R0000644000176200001440000003672613333543255014275 0ustar liggesusers# # # randommk.R # # Random generators for MULTITYPE point processes # # $Revision: 1.39 $ $Date: 2018/05/07 04:34:35 $ # # rmpoispp() random marked Poisson pp # rmpoint() n independent random marked points # rmpoint.I.allim() ... internal # rpoint.multi() temporary wrapper # rmpoispp <- local({ ## Argument checking is.numvector <- function(x) {is.numeric(x) && is.vector(x)} is.constant <- function(x) {is.numvector(x) && length(x) == 1} checkone <- function(x) { if(is.constant(x)) { if(x >= 0) return(TRUE) else stop("Intensity is negative!") } return(is.function(x) || is.im(x)) } ## Ensure that m can be passed as a single value to function(x,y,m,...) slice.fun <- function(x,y,fun,mvalue, ...) { m <- if(length(mvalue) == 1) rep.int(mvalue, length(x)) else mvalue result <- fun(x,y,m, ...) return(result) } ## Main function rmpoispp <- function(lambda, lmax=NULL, win = owin(c(0,1),c(0,1)), types, ..., nsim=1, drop=TRUE, warnwin=!missing(win)) { ## arguments: ## lambda intensity: ## constant, function(x,y,m,...), image, ## vector, list of function(x,y,...) or list of images ## ## lmax maximum possible value of lambda ## constant, vector, or list ## ## win default observation window (of class 'owin') ## ## types possible types for multitype pattern ## ## ... extra arguments passed to lambda() ## if(missing(types)) types <- NULL force(warnwin) if(nsim > 1) { result <- vector(mode="list", length=nsim) for(i in 1:nsim) result[[i]] <- rmpoispp(lambda, lmax, win, types, ..., warnwin=warnwin) return(simulationresult(result, nsim, drop)) } ## Validate arguments single.arg <- checkone(lambda) vector.arg <- !single.arg && is.numvector(lambda) list.arg <- !single.arg && is.list(lambda) if(! (single.arg || vector.arg || list.arg)) stop(paste("argument", sQuote("lambda"), "not understood")) if(list.arg && !all(unlist(lapply(lambda, checkone)))) stop(paste("Each entry in the list", sQuote("lambda"), "must be either a constant, a function or an image")) if(vector.arg && any(lambda < 0)) stop(paste("Some entries in the vector", sQuote("lambda"), "are negative")) ## Determine & validate the set of possible types if(is.null(types)) { if(single.arg) { stop(paste(sQuote("types"), "must be given explicitly when", sQuote("lambda"), "is a constant, a function or an image")) } else if(!is.null(nama <- names(lambda)) && sum(nzchar(nama)) == length(lambda)) { types <- nama } else { types <- seq_along(lambda) } } ntypes <- length(types) if(!single.arg && (length(lambda) != ntypes)) stop(paste("The lengths of", sQuote("lambda"), "and", sQuote("types"), "do not match")) factortype <- factor(types, levels=types) ## Validate `lmax' if(! (is.null(lmax) || is.numvector(lmax) || is.list(lmax) )) stop(paste(sQuote("lmax"), "should be a constant, a vector, a list or NULL")) ## coerce lmax to a vector, to save confusion if(is.null(lmax)) maxes <- rep(NULL, ntypes) else if(is.numvector(lmax) && length(lmax) == 1) maxes <- rep.int(lmax, ntypes) else if(length(lmax) != ntypes) stop(paste("The length of", sQuote("lmax"), "does not match the number of possible types")) else if(is.list(lmax)) maxes <- unlist(lmax) else maxes <- lmax ## coerce lambda to a list, to save confusion lam <- if(single.arg) rep(list(lambda), ntypes) else if(vector.arg) as.list(lambda) else lambda ## Simulate for(i in 1:ntypes) { if(single.arg && is.function(lambda)) { ## call f(x,y,m, ...) Y <- rpoispp(slice.fun, lmax=maxes[i], win=win, fun=lambda, mvalue=types[i], ..., warnwin=warnwin) } else { ## call f(x,y, ...) or use other formats Y <- rpoispp(lam[[i]], lmax=maxes[i], win=win, ..., warnwin=warnwin) } Y <- Y %mark% factortype[i] X <- if(i == 1) Y else superimpose(X, Y, W=X$window, check=FALSE) } ## Randomly permute, just in case the order is important permu <- sample(X$n) X <- X[permu] return(simulationresult(list(X), 1, drop)) } rmpoispp }) ## ------------------------------------------------------------------------ rmpoint <- local({ ## argument validation is.numvector <- function(x) {is.numeric(x) && is.vector(x)} is.constant <- function(x) {is.numvector(x) && length(x) == 1} checkone <- function(x) { if(is.constant(x)) { if(x >= 0) return(TRUE) else stop("Intensity is negative!") } return(is.function(x) || is.im(x)) } # integration.. integratexy <- function(f, win, ...) { imag <- as.im(f, W=win, ...) integral.im(imag) } ## create a counterpart of f(x,y,m) that works when m is a single value funwithfixedmark <- function(xx, yy, ..., m, fun) { mm <- rep.int(m, length(xx)) fun(xx, yy, mm, ...) } integratewithfixedmark <- function(m, fun, win, ...) { integratexy(funwithfixedmark, win=win, m=m, fun=fun, ...) } # Main function rmpoint <- function(n, f=1, fmax=NULL, win = unit.square(), types, ptypes, ..., giveup = 1000, verbose = FALSE, nsim = 1, drop=TRUE) { if(!is.numeric(n)) stop("n must be a scalar or vector") if(any(ceiling(n) != floor(n))) stop("n must be an integer or integers") if(any(n < 0)) stop("n must be non-negative") if(missing(types)) types <- NULL if(missing(ptypes)) ptypes <- NULL if(nsim > 1) { result <- vector(mode="list", length=nsim) for(i in 1:nsim) result[[i]] <- rmpoint(n, f, fmax, win, types, ptypes, ..., giveup=giveup, verbose=verbose) return(simulationresult(result, nsim, drop)) } if(sum(n) == 0) { nopoints <- ppp(x=numeric(0), y=numeric(0), window=win, check=FALSE) if(!is.null(types)) { nomarks <- factor(types[numeric(0)], levels=types) nopoints <- nopoints %mark% nomarks } return(simulationresult(list(nopoints), 1, drop)) } ############# Model <- if(length(n) == 1) { if(is.null(ptypes)) "I" else "II" } else "III" ############## Validate f argument single.arg <- checkone(f) vector.arg <- !single.arg && is.numvector(f) list.arg <- !single.arg && is.list(f) if(! (single.arg || vector.arg || list.arg)) stop(paste("argument", sQuote("f"), "not understood")) if(list.arg && !all(unlist(lapply(f, checkone)))) stop(paste("Each entry in the list", sQuote("f"), "must be either a constant, a function or an image")) if(vector.arg && any(f < 0)) stop(paste("Some entries in the vector", sQuote("f"), "are negative")) ## cases where it's known that all types of points ## have the same conditional density of location (x,y) const.density <- vector.arg || (list.arg && all(unlist(lapply(f, is.constant)))) same.density <- const.density || (single.arg && !is.function(f)) ################ Determine & validate the set of possible types if(is.null(types)) { if(single.arg && length(n) == 1) stop(paste(sQuote("types"), "must be given explicitly when", sQuote("f"), "is a single number, a function or an image and", sQuote("n"), "is a single number")) else { basis <- if(single.arg) n else f if(!is.null(nama <- names(basis)) && sum(nzchar(nama)) == length(basis)) { types <- nama } else { types <- seq_along(basis) } } } ntypes <- length(types) if(!single.arg && (length(f) != ntypes)) stop(paste("The lengths of", sQuote("f"), "and", sQuote("types"), "do not match")) if(length(n) > 1 && ntypes != length(n)) stop(paste("The lengths of", sQuote("n"), "and", sQuote("types"), "do not match")) factortype <- factor(types, levels=types) ####################### Validate `fmax' if(! (is.null(fmax) || is.numvector(fmax) || is.list(fmax) )) stop(paste(sQuote("fmax"), "should be a constant, a vector, a list or NULL")) ## coerce fmax to a vector, to save confusion if(is.null(fmax)) maxes <- rep(NULL, ntypes) else if(is.constant(fmax)) maxes <- rep.int(fmax, ntypes) else if(length(fmax) != ntypes) stop(paste("The length of", sQuote("fmax"), "does not match the number of possible types")) else if(is.list(fmax)) maxes <- unlist(fmax) else maxes <- fmax ## coerce f to a list, to save confusion flist <- if(single.arg) rep(list(f), ntypes) else if(vector.arg) as.list(f) else f #################### START ################################## ## special algorithm for Model I when all f[[i]] are images if(Model == "I" && !same.density && all(unlist(lapply(flist, is.im)))) { X <- rmpoint.I.allim(n, flist, types) return(simulationresult(list(X), 1, drop)) } ## otherwise, first select types, then locations given types if(Model == "I") { ## Compute approximate marginal distribution of type if(vector.arg) ptypes <- f/sum(f) else if(list.arg) { fintegrals <- unlist(lapply(flist, integratexy, win=win, ...)) ptypes <- fintegrals/sum(fintegrals) } else { ## single argument if(is.constant(f)) { ptypes <- rep.int(1/ntypes, ntypes) } else { ## f is a function (x,y,m) ## convert to images and integrate fintegrals <- unlist(lapply(types, integratewithfixedmark, win=win, fun=f, ...)) ## normalise ptypes <- fintegrals/sum(fintegrals) } } } ## Generate marks if(Model == "I" || Model == "II") { ## i.i.d.: n marks with distribution 'ptypes' marques <- sample(factortype, n, prob=ptypes, replace=TRUE) nn <- table(marques) } else { ## multinomial: fixed number n[i] of types[i] repmarks <- factor(rep.int(types, n), levels=types) marques <- sample(repmarks) nn <- n } ntot <- sum(nn) ############## SIMULATE !!! ######################### ## If all types have the same conditional density of location, ## generate the locations using rpoint, and return. if(same.density) { X <- rpoint(ntot, flist[[1]], maxes[[1]], win=win, ..., giveup=giveup, verbose=verbose) X <- X %mark% marques return(simulationresult(list(X), 1, drop)) } ## Otherwise invoke rpoint() for each type separately X <- ppp(numeric(ntot), numeric(ntot), window=win, marks=marques, check=FALSE) for(i in 1:ntypes) { if(verbose) cat(paste("Type", i, "\n")) if(single.arg && is.function(f)) { ## want to call f(x,y,m, ...) Y <- rpoint(nn[i], funwithfixedmark, fmax=maxes[i], win=win, ..., m=factortype[i], fun=f, giveup=giveup, verbose=verbose) } else { ## call f(x,y, ...) or use other formats Y <- rpoint(nn[i], flist[[i]], fmax=maxes[i], win=win, ..., giveup=giveup, verbose=verbose) } Y <- Y %mark% factortype[i] X[marques == factortype[i]] <- Y } return(simulationresult(list(X), 1, drop)) } rmpoint }) rmpoint.I.allim <- local({ ## Extract pixel coordinates and probabilities get.stuff <- function(imag) { w <- as.mask(as.owin(imag)) dx <- w$xstep dy <- w$ystep rxy <- rasterxy.mask(w, drop=TRUE) xpix <- rxy$x ypix <- rxy$y ppix <- as.vector(imag$v[w$m]) ## not normalised - OK npix <- length(xpix) return(list(xpix=xpix, ypix=ypix, ppix=ppix, dx=rep.int(dx,npix), dy=rep.int(dy, npix), npix=npix)) } rmpoint.I.allim <- function(n, f, types) { ## Internal use only! ## Generates random marked points (Model I *only*) ## when all f[[i]] are pixel images. ## stuff <- lapply(f, get.stuff) ## Concatenate into loooong vectors xpix <- unlist(lapply(stuff, getElement, name="xpix")) ypix <- unlist(lapply(stuff, getElement, name="ypix")) ppix <- unlist(lapply(stuff, getElement, name="ppix")) dx <- unlist(lapply(stuff, getElement, name="dx")) dy <- unlist(lapply(stuff, getElement, name="dy")) ## replicate types numpix <- unlist(lapply(stuff, getElement, name="npix")) tpix <- rep.int(seq_along(types), numpix) ## ## sample pixels from union of all images ## npix <- sum(numpix) id <- sample(npix, n, replace=TRUE, prob=ppix) ## get pixel centre coordinates and randomise within pixel x <- xpix[id] + (runif(n) - 1/2) * dx[id] y <- ypix[id] + (runif(n) - 1/2) * dy[id] ## compute types marx <- factor(types[tpix[id]],levels=types) ## et voila! return(ppp(x, y, window=as.owin(f[[1]]), marks=marx, check=FALSE)) } rmpoint.I.allim }) ## ## wrapper for Rolf's function ## rpoint.multi <- function (n, f, fmax=NULL, marks = NULL, win = unit.square(), giveup = 1000, verbose = FALSE, warn=TRUE, nsim=1, drop=TRUE) { if(nsim > 1) { result <- vector(mode="list", length=nsim) for(i in 1:nsim) result[[i]] <- rpoint.multi(n, f, fmax, marks, win, giveup, verbose) return(simulationresult(result, nsim, drop)) } no.marks <- is.null(marks) || (is.factor(marks) && length(levels(marks)) == 1) if(warn) { nhuge <- spatstat.options("huge.npoints") if(n > nhuge) warning(paste("Attempting to generate", n, "random points")) } ## unmarked case if (no.marks) { X <- if(is.function(f)) { rpoint(n, f, fmax, win, giveup=giveup, verbose=verbose) } else { rpoint(n, f, fmax, giveup=giveup, verbose=verbose) } return(simulationresult(list(X), 1, drop)) } ## multitype case if(length(marks) != n) stop("length of marks vector != n") if(!is.factor(marks)) stop("marks should be a factor") types <- levels(marks) types <- factor(types, levels=types) ## generate required number of points of each type nums <- table(marks) X <- rmpoint(nums, f, fmax, win=win, types=types, giveup=giveup, verbose=verbose) if(any(table(marks(X)) != nums)) stop("Internal error: output of rmpoint illegal") ## reorder them to correspond to the desired 'marks' vector Y <- X Xmarks <- marks(X) for(ty in types) { to <- (marks == ty) from <- (Xmarks == ty) if(sum(to) != sum(from)) stop(paste("Internal error: mismatch for mark =", ty)) if(any(to)) { Y$x[to] <- X$x[from] Y$y[to] <- X$y[from] Y$marks[to] <- ty } } return(simulationresult(list(Y), 1, drop)) } spatstat/R/densityfun.R0000644000176200001440000000420113344370731014633 0ustar liggesusers## ## densityfun.R ## ## Exact 'funxy' counterpart of density.ppp ## ## $Revision: 1.8 $ $Date: 2018/09/02 07:47:31 $ densityfun <- function(X, ...) { UseMethod("densityfun") } densityfun.ppp <- function(X, sigma=NULL, ..., weights=NULL, edge=TRUE, diggle=FALSE) { verifyclass(X, "ppp") ## handle weights now weightsgiven <- !missing(weights) && !is.null(weights) if(weightsgiven) { # convert to numeric if(is.im(weights)) { weights <- safelookup(weights, X) # includes warning if NA } else if(is.expression(weights)) weights <- eval(weights, envir=as.data.frame(X), enclos=parent.frame()) if(length(weights) == 0) weightsgiven <- FALSE } if(weightsgiven) { check.nvector(weights, npoints(X)) } else weights <- NULL ## stuff <- list(Xdata=X, weights=weights, edge=edge, diggle=diggle, ...) ## ## determine smoothing parameters ker <- resolve.2D.kernel(sigma=sigma, ..., x=X, bwfun=bw.diggle, allow.zero=TRUE) stuff[c("sigma", "varcov")] <- ker[c("sigma", "varcov")] ## g <- function(x, y=NULL) { Y <- xy.coords(x, y)[c("x", "y")] Xquery <- as.ppp(Y, Window(stuff$Xdata)) do.call(densitycrossEngine, append(list(Xquery=Xquery), stuff)) } g <- funxy(g, as.rectangle(as.owin(X))) class(g) <- c("densityfun", class(g)) return(g) } print.densityfun <- function(x, ...) { cat("function(x,y)", "which returns", "kernel estimate of intensity for", fill=TRUE) X <- get("X", envir=environment(x)) print(X, ...) return(invisible(NULL)) } ## Method for as.im ## (enables plot.funxy, persp.funxy, contour.funxy to work for this class) as.im.densityfun <- function(X, W=Window(X), ..., approx=TRUE) { if(!approx) { #' evaluate exactly at grid points using as.im.funxy -> as.im.function result <- as.im.function(X, W=W, ...) } else { #' faster, approximate evaluation using FFT stuff <- get("stuff", envir=environment(X)) if(!missing(W)) stuff$X <- stuff$X[W] names(stuff)[names(stuff) == "X"] <- "x" result <- do.call(density, resolve.defaults(list(...), stuff)) } return(result) } spatstat/R/distcdf.R0000644000176200001440000000642213333543254014072 0ustar liggesusers#' #' distcdf.R #' #' cdf of |X1-X2| when X1,X2 are iid uniform in W, etc #' #' $Revision: 1.10 $ $Date: 2016/02/11 10:17:12 $ #' distcdf <- function(W, V=W, ..., dW=1, dV=dW, nr=1024, regularise=TRUE) { reflexive <- missing(V) && missing(dV) diffuse <- is.owin(W) && is.owin(V) uniformW <- identical(dW, 1) uniformV <- identical(dV, 1) uniform <- uniformW && uniformV if(is.owin(W)) { W <- as.mask(as.owin(W), ...) dW <- as.im(dW, W=W) } else if(is.ppp(W)) { if(uniformW) { #' discrete uniform distribution on W dW <- pixellate(W, ...) } else { #' dW should be a weight or vector of weights if(!is.vector(dW) || !is.numeric(dW)) stop("If W is a point pattern, dW should be a vector of weights") if(length(dW) == 1L) { dW <- rep(dW, npoints(W)) } else stopifnot(length(dW) == npoints(W)) dW <- pixellate(W, weights=dW, ...) } } else stop("W should be a point pattern or a window") if(is.owin(V)) { V <- as.mask(as.owin(V), ...) dV <- as.im(dV, W=V) } else if(is.ppp(V)) { if(uniformV) { #' discrete uniform distribution on V dV <- pixellate(V, ...) } else { #' dV should be a weight or vector of weights if(!is.vector(dV) || !is.numeric(dV)) stop("If V is a point pattern, dV should be a vector of weights") if(length(dV) == 1L) { dV <- rep(dV, npoints(V)) } else stopifnot(length(dV) == npoints(V)) dV <- pixellate(V, weights=dV, ...) } } else stop("V should be a point pattern or a window") if(!uniformW && min(dW) < 0) stop("Negative values encountered in dW") if(!uniformV && min(dV) < 0) stop("Negative values encountered in dV") #' compute if(diffuse && uniform) { #' uniform distributions on windows g <- if(reflexive) setcov(W, ...) else setcov(W, V, ...) } else { g <- if(reflexive) imcov(dW) else imcov(dW, dV) } r <- as.im(function(x,y) { sqrt(x^2 + y^2) }, g) rvals <- as.vector(as.matrix(r)) gvals <- as.vector(as.matrix(g)) rgrid <- seq(0, max(rvals), length=nr) dr <- max(rvals)/(nr-1) h <- whist(rvals, breaks=rgrid, weights=gvals/sum(gvals)) ch <- c(0,cumsum(h)) #' regularise at very short distances if(regularise) { sevenpix <- 7 * with(r, max(xstep, ystep)) ii <- round(sevenpix/dr) ch[1:ii] <- ch[ii] * (rgrid[1:ii]/rgrid[ii])^2 } #' ok result <- fv(data.frame(r=rgrid, f=ch), "r", quote(CDF(r)), "f", , range(rvals), c("r","%s(r)"), c("Interpoint distance","Cumulative probability"), fname="CDF") return(result) } bw.frac <- function(X, ..., f=1/4) { X <- as.owin(X) g <- distcdf(X, ...) r <- with(g, .x) Fr <- with(g, .y) iopt <- min(which(Fr >= f)) ropt <- r[iopt] attr(ropt, "f") <- f attr(ropt, "g") <- g class(ropt) <- c("bw.frac", class(ropt)) return(ropt) } print.bw.frac <- function(x, ...) { print(as.numeric(x), ...) } plot.bw.frac <- function(x, ...) { xname <- short.deparse(substitute(x)) g <- attr(x, "g") f <- attr(x, "f") ropt <- as.numeric(x) do.call(plot, resolve.defaults(list(g), list(...), list(main=xname))) abline(v=ropt, lty=3) abline(h=f, lty=3) invisible(NULL) } spatstat/R/window.R0000644000176200001440000011332613562232605013762 0ustar liggesusers# # window.S # # A class 'owin' to define the "observation window" # # $Revision: 4.189 $ $Date: 2019/11/11 10:15:17 $ # # # A window may be either # # - rectangular: # a rectangle in R^2 # (with sides parallel to the coordinate axes) # # - polygonal: # delineated by 0, 1 or more non-self-intersecting # polygons, possibly including polygonal holes. # # - digital mask: # defined by a binary image # whose pixel values are TRUE wherever the pixel # is inside the window # # Any window is an object of class 'owin', # containing at least the following entries: # # $type: a string ("rectangle", "polygonal" or "mask") # # $xrange # $yrange # vectors of length 2 giving the real dimensions # of the enclosing box # $units # name of the unit of length # # The 'rectangle' type has only these entries. # # The 'polygonal' type has an additional entry # # $bdry # a list of polygons. # Each entry bdry[[i]] determines a closed polygon. # # bdry[[i]] has components $x and $y which are # the cartesian coordinates of the vertices of # the i-th boundary polygon (without repetition of # the first vertex, i.e. same convention as in the # plotting function polygon().) # # # The 'mask' type has entries # # $m logical matrix # $dim its dimension array # $xstep,ystep x and y dimensions of a pixel # $xcol vector of x values for each column # $yrow vector of y values for each row # # (the row index corresponds to increasing y coordinate; # the column index " " " " " " x " " ".) # # #----------------------------------------------------------------------------- # .Spatstat.Image.Warning <- c("Row index corresponds to increasing y coordinate; column to increasing x", "Transpose matrices to get the standard presentation in R", "Example: image(result$xcol,result$yrow,t(result$d))") owin <- local({ isxy <- function(x) { (is.matrix(x) || is.data.frame(x)) && ncol(x) == 2 } asxy <- function(xy) { list(x=xy[,1], y=xy[,2]) } owin <- function(xrange=c(0,1), yrange=c(0,1), ..., poly=NULL, mask=NULL, unitname=NULL, xy=NULL) { # trap a common abuse of syntax if(nargs() == 1 && !missing(xrange) && is.owin(xrange)) return(xrange) unitname <- as.unitname(unitname) ## Exterminate ambiguities poly.given <- !is.null(poly) mask.given <- !is.null(mask) if(poly.given && mask.given) stop("Ambiguous -- both polygonal boundary and digital mask supplied") if(!is.null(xy) && !mask.given) warning("Argument xy ignored: it is only applicable when a mask is given") if(missing(xrange) != missing(yrange)) stop("If one of xrange, yrange is specified then both must be.") if(!missing(xrange)) { xrange <- unname(xrange) yrange <- unname(yrange) } # convert data frames to vanilla lists if(poly.given) { if(is.data.frame(poly)) poly <- as.list(poly) else if(is.list(poly) && any(unlist(lapply(poly, is.data.frame)))) poly <- lapply(poly, as.list) } ## Hidden options controlling how much checking is performed check <- resolve.1.default(list(check=TRUE), list(...)) calculate <- resolve.1.default(list(calculate=check), list(...)) strict <- resolve.1.default(list(strict=spatstat.options("checkpolygons")), list(...)) fix <- resolve.1.default(list(fix=spatstat.options("fixpolygons")), list(...)) if(!poly.given && !mask.given) { ######### rectangle ################# if(check) { if(!is.vector(xrange) || length(xrange) != 2 || xrange[2L] < xrange[1L]) stop("xrange should be a vector of length 2 giving (xmin, xmax)") if(!is.vector(yrange) || length(yrange) != 2 || yrange[2L] < yrange[1L]) stop("yrange should be a vector of length 2 giving (ymin, ymax)") } w <- list(type="rectangle", xrange=xrange, yrange=yrange, units=unitname) class(w) <- "owin" return(w) } else if(poly.given) { ######### polygonal boundary ######## # if(length(poly) == 0) { # empty polygon if(check) { if(!is.vector(xrange) || length(xrange) != 2 || xrange[2L] < xrange[1L]) stop("xrange should be a vector of length 2 giving (xmin, xmax)") if(!is.vector(yrange) || length(yrange) != 2 || yrange[2L] < yrange[1L]) stop("yrange should be a vector of length 2 giving (ymin, ymax)") } w <- list(type="polygonal", xrange=xrange, yrange=yrange, bdry=list(), units=unitname) class(w) <- "owin" return(w) } # convert matrix or data frame to list(x,y) if(isxy(poly)) { poly <- asxy(poly) } else if(is.list(poly) && all(unlist(lapply(poly, isxy)))) { poly <- lapply(poly, asxy) } # nonempty polygon # test whether it's a single polygon or multiple polygons if(verify.xypolygon(poly, fatal=FALSE)) psingle <- TRUE else if(all(unlist(lapply(poly, verify.xypolygon, fatal=FALSE)))) psingle <- FALSE else stop("poly must be either a list(x,y) or a list of list(x,y)") w.area <- NULL if(psingle) { # single boundary polygon bdry <- unname(list(poly)) if(check || calculate) { w.area <- Area.xypolygon(poly) if(w.area < 0) stop(paste("Area of polygon is negative -", "maybe traversed in wrong direction?")) } } else { # multiple boundary polygons bdry <- unname(poly) if(check || calculate) { w.area <- sapply(poly, Area.xypolygon) if(sum(w.area) < 0) stop(paste("Area of window is negative;\n", "check that all polygons were traversed", "in the right direction")) } } actual.xrange <- range(unlist(lapply(bdry, getElement, name="x"))) if(missing(xrange)) xrange <- actual.xrange else if(check) { if(!is.vector(xrange) || length(xrange) != 2 || xrange[2L] < xrange[1L]) stop("xrange should be a vector of length 2 giving (xmin, xmax)") if(!all(xrange == range(c(xrange, actual.xrange)))) stop("polygon's x coordinates outside xrange") } actual.yrange <- range(unlist(lapply(bdry, getElement, name="y"))) if(missing(yrange)) yrange <- actual.yrange else if(check) { if(!is.vector(yrange) || length(yrange) != 2 || yrange[2L] < yrange[1L]) stop("yrange should be a vector of length 2 giving (ymin, ymax)") if(!all(yrange == range(c(yrange, actual.yrange)))) stop("polygon's y coordinates outside yrange") } if(!is.null(w.area)) { # tack on area and hole data holes <- (w.area < 0) for(i in seq_along(bdry)) bdry[[i]] <- append(bdry[[i]], list(area=w.area[i], hole=holes[i])) } w <- list(type="polygonal", xrange=xrange, yrange=yrange, bdry=bdry, units=unitname) class(w) <- "owin" if(check && strict) { ## strict checks on geometry (self-intersection etc) ok <- owinpolycheck(w) if(!ok) { errors <- attr(ok, "err") stop(paste("Polygon data contain", commasep(errors))) } } if(check && fix) { if(length(bdry) == 1 && length(bx <- bdry[[1L]]$x) == 4 && length(unique(bx)) == 2 && length(unique(bdry[[1L]]$y)) == 2) { ## it's really a rectangle if(Area.xypolygon(bdry[[1L]]) < 0) w$bdry <- lapply(bdry, reverse.xypolygon) } else { ## repair polygon data by invoking polyclip ## to intersect polygon with larger-than-bounding rectangle ## (Streamlined version of intersect.owin) ww <- lapply(bdry, reverse.xypolygon) xrplus <- mean(xrange) + c(-1,1) * diff(xrange) yrplus <- mean(yrange) + c(-1,1) * diff(yrange) bignum <- (.Machine$integer.max^2)/2 epsclip <- max(diff(xrange), diff(yrange))/bignum rr <- list(list(x=xrplus[c(1,2,2,1)], y=yrplus[c(2,2,1,1)])) bb <- polyclip::polyclip(ww, rr, "intersection", fillA="nonzero", fillB="nonzero", eps=epsclip) ## ensure correct polarity totarea <- sum(unlist(lapply(bb, Area.xypolygon))) if(totarea < 0) bb <- lapply(bb, reverse.xypolygon) w$bdry <- bb } } return(w) } else if(mask.given) { ######### digital mask ##################### if(is.data.frame(mask) && ncol(mask) %in% c(2,3) && sum(sapply(mask, is.numeric)) == 2) { # data frame with 2 columns of coordinates return(as.owin(W=mask, xy=xy)) } if(!is.matrix(mask)) stop(paste(sQuote("mask"), "must be a matrix")) if(!is.logical(mask)) stop(paste("The entries of", sQuote("mask"), "must be logical")) nc <- ncol(mask) nr <- nrow(mask) if(!is.null(xy)) { # pixel coordinates given explicitly # validate dimensions if(!is.list(xy) || !checkfields(xy, c("x","y"))) stop("xy should be a list with entries x and y") xcol <- xy$x yrow <- xy$y if(length(xcol) != nc) stop(paste("length of xy$x =", length(xcol), "!=", nc, "= number of columns of mask")) if(length(yrow) != nr) stop(paste("length of xy$y =", length(yrow), "!=", nr, "= number of rows of mask")) # x and y should be evenly spaced if(!evenly.spaced(xcol)) stop("xy$x is not evenly spaced") if(!evenly.spaced(yrow)) stop("xy$y is not evenly spaced") # determine other parameters xstep <- diff(xcol)[1L] ystep <- diff(yrow)[1L] if(missing(xrange) && missing(yrange)) { xrange <- range(xcol) + c(-1,1) * xstep/2 yrange <- range(yrow) + c(-1,1) * ystep/2 } } else { # determine pixel coordinates from xrange, yrange if(missing(xrange) && missing(yrange)) { # take pixels to be 1 x 1 unit xrange <- c(0,nc) yrange <- c(0,nr) } else if(check) { if(!is.vector(xrange) || length(xrange) != 2 || xrange[2L] < xrange[1L]) stop("xrange should be a vector of length 2 giving (xmin, xmax)") if(!is.vector(yrange) || length(yrange) != 2 || yrange[2L] < yrange[1L]) stop("yrange should be a vector of length 2 giving (ymin, ymax)") } xstep <- diff(xrange)/nc ystep <- diff(yrange)/nr xcol <- seq(from=xrange[1L]+xstep/2, to=xrange[2L]-xstep/2, length.out=nc) yrow <- seq(from=yrange[1L]+ystep/2, to=yrange[2L]-ystep/2, length.out=nr) } out <- list(type = "mask", xrange = unname(xrange), yrange = unname(yrange), dim = c(nr, nc), xstep = unname(xstep), ystep = unname(ystep), warnings = .Spatstat.Image.Warning, xcol = unname(xcol), yrow = unname(yrow), m = mask, units = unitname) class(out) <- "owin" return(out) } # never reached NULL } owin }) # #----------------------------------------------------------------------------- # is.owin <- function(x) { inherits(x, "owin") } # #----------------------------------------------------------------------------- # as.owin <- function(W, ..., fatal=TRUE) { UseMethod("as.owin") } as.owin.owin <- function(W, ..., fatal=TRUE) { if(verifyclass(W, "owin", fatal=fatal)) return(owin(W$xrange, W$yrange, poly=W$bdry, mask=W$m, unitname=unitname(W), check=FALSE)) else return(NULL) } as.owin.ppp <- function(W, ..., fatal=TRUE) { if(verifyclass(W, "ppp", fatal=fatal)) return(W$window) else return(NULL) } as.owin.quad <- function(W, ..., fatal=TRUE) { if(verifyclass(W, "quad", fatal=fatal)) return(W$data$window) else return(NULL) } as.owin.im <- function(W, ..., fatal=TRUE) { if(!verifyclass(W, "im", fatal=fatal)) return(NULL) out <- list(type = "mask", xrange = W$xrange, yrange = W$yrange, dim = W$dim, xstep = W$xstep, ystep = W$ystep, warnings = .Spatstat.Image.Warning, xcol = W$xcol, yrow = W$yrow, m = !is.na(W$v), units = unitname(W)) class(out) <- "owin" return(out) } as.owin.psp <- function(W, ..., fatal=TRUE) { if(!verifyclass(W, "psp", fatal=fatal)) return(NULL) return(W$window) } as.owin.tess <- function(W, ..., fatal=TRUE) { if(!verifyclass(W, "tess", fatal=fatal)) return(NULL) return(W$window) } as.owin.data.frame <- function(W, ..., step, fatal=TRUE) { if(!verifyclass(W, "data.frame", fatal=fatal)) return(NULL) if(missing(step)) { xstep <- ystep <- NULL } else { step <- ensure2vector(step) xstep <- step[1L] ystep <- step[2L] } if(!(ncol(W) %in% c(2,3))) { whinge <- "need exactly 2 or 3 columns of data" if(fatal) stop(whinge) warning(whinge) return(NULL) } if(twocol <- (ncol(W) == 2)) { # assume data is a list of TRUE pixels W <- cbind(W, TRUE) } mch <- matchNameOrPosition(c("x", "y", "z"), names(W)) ix <- mch[1L] iy <- mch[2L] iz <- mch[3L] df <- data.frame(x=W[,ix], y=W[,iy], z=as.logical(W[,iz])) with(df, { xx <- sortunique(x) yy <- sortunique(y) jj <- match(x, xx) ii <- match(y, yy) ## make logical matrix (for incomplete x, y sequence) ok <- checkbigmatrix(length(xx), length(yy), fatal=fatal) if(!ok) return(NULL) mm <- matrix(FALSE, length(yy), length(xx)) mm[cbind(ii,jj)] <- z ## ensure xx and yy are complete equally-spaced sequences fx <- fillseq(xx, step=xstep) fy <- fillseq(yy, step=ystep) xcol <- fx[[1L]] yrow <- fy[[1L]] ## trap very large matrices ok <- checkbigmatrix(length(xcol), length(yrow), fatal=fatal) if(!ok) return(NULL) ## mapping from xx to xcol, yy to yrow jjj <- fx[[2L]] iii <- fy[[2L]] ## make logical matrix for full sequence m <- matrix(FALSE, length(yrow), length(xcol)) m[iii,jjj] <- mm ## make binary mask out <- owin(mask=m, xy=list(x=xcol, y=yrow)) ## warn if area fraction is small: may be a misuse of as.owin if(twocol) { pcarea <- 100 * nrow(df)/prod(dim(m)) if(pcarea < 1) warning(paste("Window occupies only", paste0(signif(pcarea, 2), "%"), "of frame area. Did you mean owin(poly=df) ?"), call.=FALSE) } return(out) }) } as.owin.default <- function(W, ..., fatal=TRUE) { ## Tries to interpret data as an object of class 'owin' ## W may be ## a structure with entries xrange, yrange ## a structure with entries xl, xu, yl, yu ## a structure with entries xmin, xmax, ymin, ymax ## a four-element vector (interpreted xmin, xmax, ymin, ymax) ## an object with attribute "bbox" if(inherits(W, "box3")) { #' cannot be flattened if(fatal) stop("3D box cannot be converted to a 2D window") return(NULL) } if(checkfields(W, c("xrange", "yrange"))) { Z <- owin(W$xrange, W$yrange) return(Z) } else if(checkfields(W, c("xmin", "xmax", "ymin", "ymax"))) { W <- as.list(W) Z <- owin(c(W$xmin, W$xmax),c(W$ymin, W$ymax)) return(Z) } else if(checkfields(W, c("xl", "xu", "yl", "yu"))) { W <- as.list(W) Z <- owin(c(W$xl, W$xu),c(W$yl, W$yu)) return(Z) } else if(checkfields(W, c("x", "y", "area")) && checkfields(W$area, c("xl", "xu", "yl", "yu"))) { V <- as.list(W$area) Z <- owin(c(V$xl, V$xu),c(V$yl, V$yu)) return(Z) } else if(is.vector(W) && is.numeric(W) && length(W) == 4) { Z <- owin(W[1:2], W[3:4]) return(Z) } else if(!is.null(Z <- attr(W, "bbox"))) { return(as.owin(Z, ..., fatal=fatal)) } else if(any(c("SpatialPolygons", "SpatialPolygonsDataFrame") %in% class(W))) { gripe <- "The package 'maptools' is needed to convert this data type" if(fatal) stop(gripe, call.=FALSE) else warning(gripe, call.=FALSE) return(NULL) } else { #' no idea if(fatal) stop("Can't interpret W as a window", call.=FALSE) return(NULL) } } # #----------------------------------------------------------------------------- # # Frame <- function(X) { UseMethod("Frame") } "Frame<-" <- function(X, value) { UseMethod("Frame<-") } Frame.default <- function(X) { as.rectangle(X) } "Frame<-.default" <- function(X, value) { Frame(Window(X)) <- value return(X) } ## ......................................................... as.rectangle <- function(w, ...) { if(inherits(w, "owin")) return(owin(w$xrange, w$yrange, unitname=unitname(w))) else if(inherits(w, "im")) return(owin(w$xrange, w$yrange, unitname=unitname(w))) else if(inherits(w, "layered")) return(do.call(boundingbox, unname(lapply(w, as.rectangle)))) else { w <- as.owin(w, ...) return(owin(w$xrange, w$yrange, unitname=unitname(w))) } } # #----------------------------------------------------------------------------- # as.mask <- function(w, eps=NULL, dimyx=NULL, xy=NULL) { # eps: grid mesh (pixel) size # dimyx: dimensions of pixel raster # xy: coordinates of pixel raster nonamedargs <- is.null(eps) && is.null(dimyx) && is.null(xy) uname <- as.unitname(NULL) if(!missing(w) && !is.null(w)) { if(is.data.frame(w)) return(owin(mask=w, xy=xy)) if(is.matrix(w)) { w <- as.data.frame(w) colnames(w) <- c("x", "y") return(owin(mask=w, xy=xy)) } w <- as.owin(w) uname <- unitname(w) } else { if(is.null(xy)) stop("If w is missing, xy is required") } # If it's already a mask, and no other arguments specified, # just return it. if(!missing(w) && w$type == "mask" && nonamedargs) return(w) ########################## # First determine pixel coordinates ########################## if(is.null(xy)) { # Pixel coordinates to be computed from other dimensions # First determine row & column dimensions if(!is.null(dimyx)) { dimyx <- ensure2vector(dimyx) nr <- dimyx[1L] nc <- dimyx[2L] } else { # use pixel size 'eps' if(!is.null(eps)) { eps <- ensure2vector(eps) nc <- diff(w$xrange)/eps[1L] nr <- diff(w$yrange)/eps[2L] if(nr < 1 || nc < 1) warning("pixel size parameter eps > size of window") nr <- ceiling(nr) nc <- ceiling(nc) } else { # use spatstat defaults np <- spatstat.options("npixel") if(length(np) == 1) nr <- nc <- np[1L] else { nr <- np[2L] nc <- np[1L] } } } if((mpix <- (nr * nc)/1048576) >= 10) { whinge <- paste("Creating", articlebeforenumber(mpix), paste0(round(mpix, 1), "-megapixel"), "window mask") message(whinge) warning(whinge, call.=FALSE) } # Initialise mask with all entries TRUE rasta <- owin(w$xrange, w$yrange, mask=matrix(TRUE, nr, nc)) } else { # # Pixel coordinates given explicitly: # xy is an image, a mask, or a list(x,y) # if(is.im(xy)) { rasta <- as.owin(xy) rasta$m[] <- TRUE } else if(is.owin(xy)) { if(xy$type != "mask") stop("argument xy does not contain raster coordinates.") rasta <- xy rasta$m[] <- TRUE } else { if(!checkfields(xy, c("x", "y"))) stop(paste(sQuote("xy"), "should be a list containing two vectors x and y")) x <- sortunique(xy$x) y <- sortunique(xy$y) # derive other parameters nr <- length(y) nc <- length(x) # check size if((mpix <- (nr * nc)/1048576) >= 10) { whinge <- paste("Creating", articlebeforenumber(mpix), paste0(round(mpix, 1), "-megapixel"), "window mask") message(whinge) warning(whinge, call.=FALSE) } # x and y pixel sizes dx <- diff(x) if(diff(range(dx)) > 0.01 * mean(dx)) stop("x coordinates must be evenly spaced") xstep <- mean(dx) dy <- diff(y) if(diff(range(dy)) > 0.01 * mean(dy)) stop("y coordinates must be evenly spaced") ystep <- mean(dy) xr <- range(x) yr <- range(y) xrange <- xr + xstep * c(-1,1)/2 yrange <- yr + ystep * c(-1,1)/2 # initialise mask with all entries TRUE rasta <- list(type = "mask", xrange = xrange, yrange = yrange, dim = c(nr, nc), xstep = xstep, ystep = ystep, warnings = .Spatstat.Image.Warning, xcol = seq(from=xr[1L], to=xr[2L], length.out=nc), yrow = seq(from=yr[1L], to=yr[2L], length.out=nr), m = matrix(TRUE, nr, nc), units = uname) class(rasta) <- "owin" } if(missing(w)) { # No more window information out <- rasta if(!(identical(x, xy$x) && identical(y, xy$y))) { ## xy is an enumeration of the TRUE pixels out$m[] <- FALSE ij <- cbind(i=match(xy$y, y), j=match(xy$x, x)) out$m[ij] <- TRUE } return(out) } } ################################ # Second, mask pixel raster with existing window ################################ switch(w$type, rectangle = { out <- rasta if(!all(w$xrange == rasta$xrange) || !all(w$yrange == rasta$yrange)) { xcol <- rasta$xcol yrow <- rasta$yrow wx <- w$xrange wy <- w$yrange badrow <- which(yrow > wy[2L] | yrow < wy[1L]) badcol <- which(xcol > wx[2L] | xcol < wx[1L]) out$m[badrow , ] <- FALSE out$m[ , badcol] <- FALSE } }, mask = { # resample existing mask on new raster out <- rastersample(w, rasta) }, polygonal = { # use C code out <- owinpoly2mask(w, rasta, FALSE) }) unitname(out) <- uname return(out) } as.matrix.owin <- function(x, ...) { m <- as.mask(x, ...) return(m$m) } # # #----------------------------------------------------------------------------- # as.polygonal <- function(W, repair=FALSE) { verifyclass(W, "owin") switch(W$type, rectangle = { xr <- W$xrange yr <- W$yrange return(owin(xr, yr, poly=list(x=xr[c(1,2,2,1)],y=yr[c(1,1,2,2)]), unitname=unitname(W), check=FALSE)) }, polygonal = { if(repair) W <- owin(poly=W$bdry, unitname=unitname(W)) return(W) }, mask = { # This could take a while M <- W$m nr <- nrow(M) notM <- !M xcol <- W$xcol yrow <- W$yrow xbracket <- 1.1 * c(-1,1) * W$xstep/2 ybracket <- 1.1 * c(-1,1) * W$ystep/2 ## determine resolution for polyclip operations p <- list(x0 = xcol[1], y0 = yrow[1], eps = max(W$xstep, W$ystep)/(2^31)) # identify runs of TRUE entries in each column start <- M & rbind(TRUE, notM[-nr, ]) finish <- M & rbind(notM[-1, ], TRUE) #' build result out <- NULL for(j in 1:ncol(M)) { xj <- xcol[j] # identify start and end positions in column j starts <- which(start[,j]) finishes <- which(finish[,j]) ns <- length(starts) nf <- length(finishes) if(ns != nf) stop(paste("Internal error: length(starts)=", ns, ", length(finishes)=", nf)) if(ns > 0) { for(k in 1:ns) { yfrom <- yrow[starts[k]] yto <- yrow[finishes[k]] yk <- sort(c(yfrom,yto)) #' make rectangle boundary in reversed orientation xrect <- xj + xbracket yrect <- yk + ybracket recto <- list(list(x = xrect[c(1,2,2,1)], y = yrect[c(2,2,1,1)])) #' add to result if(is.null(out)) { out <- recto } else { out <- polyclip::polyclip(out, recto, "union", fillA="nonzero", fillB="nonzero", eps = p$eps, x0 = p$x0, y0 = p$y0) } } } } if(is.null(out)) return(emptywindow(Frame(W))) totarea <- sum(sapply(out, Area.xypolygon)) if(totarea < 0) out <- lapply(out, reverse.xypolygon) out <- owin(poly=out, check=FALSE, unitname=unitname(W)) return(out) } ) } # # ---------------------------------------------------------------------- is.polygonal <- function(w) { return(inherits(w, "owin") && (w$type == "polygonal")) } is.rectangle <- function(w) { return(inherits(w, "owin") && (w$type == "rectangle")) } is.mask <- function(w) { return(inherits(w, "owin") && (w$type == "mask")) } validate.mask <- function(w, fatal=TRUE) { verifyclass(w, "owin", fatal=fatal) if(w$type == "mask") return(TRUE) if(fatal) stop(paste(short.deparse(substitute(w)), "is not a binary mask")) else { warning(paste(short.deparse(substitute(w)), "is not a binary mask")) return(FALSE) } } dim.owin <- function(x) { return(x$dim) } ## NULL unless it's a mask ## internal use only: rasterx.mask <- function(w, drop=FALSE) { validate.mask(w) x <- w$xcol[col(w)] x <- if(drop) x[w$m, drop=TRUE] else array(x, dim=w$dim) return(x) } rastery.mask <- function(w, drop=FALSE) { validate.mask(w) y <- w$yrow[row(w)] y <- if(drop) y[w$m, drop=TRUE] else array(y, dim=w$dim) return(y) } rasterxy.mask <- function(w, drop=FALSE) { validate.mask(w) x <- w$xcol[col(w)] y <- w$yrow[row(w)] if(drop) { m <- w$m x <- x[m, drop=TRUE] y <- y[m, drop=TRUE] } return(list(x=as.numeric(x), y=as.numeric(y))) } nearest.raster.point <- function(x,y,w, indices=TRUE) { stopifnot(is.mask(w) || is.im(w)) nr <- w$dim[1L] nc <- w$dim[2L] if(length(x) == 0) { cc <- rr <- integer(0) } else { cc <- 1 + round((x - w$xcol[1L])/w$xstep) rr <- 1 + round((y - w$yrow[1L])/w$ystep) cc <- pmax.int(1,pmin.int(cc, nc)) rr <- pmax.int(1,pmin.int(rr, nr)) } if(indices) return(list(row=rr, col=cc)) else return(list(x=w$xcol[cc], y=w$yrow[rr])) } mask2df <- function(w) { stopifnot(is.owin(w) && w$type == "mask") xx <- raster.x(w) yy <- raster.y(w) ok <- w$m xx <- as.vector(xx[ok]) yy <- as.vector(yy[ok]) return(data.frame(x=xx, y=yy)) } #------------------------------------------------------------------ complement.owin <- function(w, frame=as.rectangle(w)) { w <- as.owin(w) if(reframe <- !missing(frame)) { verifyclass(frame, "owin") w <- rebound.owin(w, frame) # if w was a rectangle, it's now polygonal } switch(w$type, mask = { w$m <- !(w$m) }, rectangle = { # return empty window return(emptywindow(w)) }, polygonal = { bdry <- w$bdry if(length(bdry) == 0) { # w is empty return(frame) } # bounding box, in anticlockwise order box <- list(x=w$xrange[c(1,2,2,1)], y=w$yrange[c(1,1,2,2)]) boxarea <- Area.xypolygon(box) # first check whether one of the current boundary polygons # is the bounding box itself (with + sign) if(reframe) is.box <- rep.int(FALSE, length(bdry)) else { nvert <- lengths(lapply(bdry, getElement, name="x")) areas <- sapply(bdry, Area.xypolygon) boxarea.mineps <- boxarea * (0.99999) is.box <- (nvert == 4 & areas >= boxarea.mineps) if(sum(is.box) > 1) stop("Internal error: multiple copies of bounding box") if(all(is.box)) { return(emptywindow(box)) } } # if box is present (with + sign), remove it if(any(is.box)) bdry <- bdry[!is.box] # now reverse the direction of each polygon bdry <- lapply(bdry, reverse.xypolygon, adjust=TRUE) # if box was absent, add it if(!any(is.box)) bdry <- c(bdry, list(box)) # sic # put back into w w$bdry <- bdry }, stop("unrecognised window type", w$type) ) return(w) } #----------------------------------------------------------- inside.owin <- function(x, y, w) { # test whether (x,y) is inside window w # x, y may be vectors if((missing(y) || is.null(y)) && all(c("x", "y") %in% names(x))) { y <- x$y x <- x$x } w <- as.owin(w) if(length(x)==0) return(logical(0)) # test whether inside bounding rectangle xr <- w$xrange yr <- w$yrange eps <- sqrt(.Machine$double.eps) frameok <- (x >= xr[1L] - eps) & (x <= xr[2L] + eps) & (y >= yr[1L] - eps) & (y <= yr[2L] + eps) if(!any(frameok)) # all points OUTSIDE window - no further work needed return(frameok) ok <- frameok switch(w$type, rectangle = { return(ok) }, polygonal = { ## check scale framesize <- max(diff(xr), diff(yr)) if(framesize > 1e6 || framesize < 1e-6) { ## rescale to avoid numerical overflow scalefac <- as.numeric(framesize)/100 w <- as.polygonal(rescale(w, scalefac)) x <- x/scalefac y <- y/scalefac } xy <- list(x=x,y=y) bdry <- w$bdry total <- numeric(length(x)) on.bdry <- rep.int(FALSE, length(x)) for(i in seq_along(bdry)) { score <- inside.xypolygon(xy, bdry[[i]], test01=FALSE) total <- total + score on.bdry <- on.bdry | attr(score, "on.boundary") } # any points identified as belonging to the boundary get score 1 total[on.bdry] <- 1 # check for sanity now.. uhoh <- (total * (1-total) != 0) if(any(uhoh)) { nuh <- sum(uhoh) warning(paste("point-in-polygon test had difficulty with", nuh, ngettext(nuh, "point", "points"), "(total score not 0 or 1)"), call.=FALSE) total[uhoh] <- 0 } return(ok & (total != 0)) }, mask = { # consider only those points which are inside the frame xf <- x[frameok] yf <- y[frameok] # map locations to raster (row,col) coordinates loc <- nearest.raster.point(xf,yf,w) # look up mask values okf <- (w$m)[cbind(loc$row, loc$col)] # insert into 'ok' vector ok[frameok] <- okf return(ok) }, stop("unrecognised window type", w$type) ) } #------------------------------------------------------------------------- print.owin <- function(x, ..., prefix="window: ") { verifyclass(x, "owin") unitinfo <- summary(unitname(x)) switch(x$type, rectangle={ rectname <- paste0(prefix, "rectangle =") }, polygonal={ nonemp <- (length(x$bdry) != 0) splat(paste0(prefix, if(nonemp) "polygonal boundary" else "empty")) rectname <- "enclosing rectangle:" }, mask={ splat(paste0(prefix, "binary image mask")) di <- x$dim splat(di[1L], "x", di[2L], "pixel array (ny, nx)") rectname <- "enclosing rectangle:" } ) splat(rectname, prange(zapsmall(x$xrange)), "x", prange(zapsmall(x$yrange)), unitinfo$plural, unitinfo$explain) invisible(NULL) } summary.owin <- function(object, ...) { verifyclass(object, "owin") result <- list(xrange=object$xrange, yrange=object$yrange, type=object$type, area=area(object), units=unitname(object)) result$areafraction <- with(result, area/(diff(xrange) * diff(yrange))) switch(object$type, rectangle={ }, polygonal={ poly <- object$bdry result$npoly <- npoly <- length(poly) if(npoly == 0) { result$areas <- result$nvertices <- numeric(0) } else if(npoly == 1) { result$areas <- Area.xypolygon(poly[[1L]]) result$nvertices <- length(poly[[1L]]$x) } else { result$areas <- unlist(lapply(poly, Area.xypolygon)) result$nvertices <- lengths(lapply(poly, getElement, name="x")) } result$nhole <- sum(result$areas < 0) }, mask={ result$npixels <- object$dim result$xstep <- object$xstep result$ystep <- object$ystep } ) class(result) <- "summary.owin" result } print.summary.owin <- function(x, ...) { verifyclass(x, "summary.owin") unitinfo <- summary(x$units) pluralunits <- unitinfo$plural singularunits <- unitinfo$singular switch(x$type, rectangle={ rectname <- "Window: rectangle =" }, polygonal={ np <- x$npoly splat("Window: polygonal boundary") if(np == 0) { splat("window is empty") } else if(np == 1) { splat("single connected closed polygon with", x$nvertices, "vertices") } else { nh <- x$nhole holy <- if(nh == 0) "(no holes)" else if(nh == 1) "(1 hole)" else paren(paste(nh, "holes")) splat(np, "separate polygons", holy) if(np > 0) print(data.frame(vertices=x$nvertices, area=signif(x$areas, 6), relative.area=signif(x$areas/x$area,3), row.names=paste("polygon", 1:np, ifelse(x$areas < 0, "(hole)", "") ))) } rectname <- "enclosing rectangle:" }, mask={ splat("binary image mask") di <- x$npixels splat(di[1L], "x", di[2L], "pixel array (ny, nx)") splat("pixel size:", signif(x$xstep,3), "by", signif(x$ystep,3), pluralunits) rectname <- "enclosing rectangle:" } ) splat(rectname, prange(zapsmall(x$xrange)), "x", prange(zapsmall(x$yrange)), pluralunits) if(x$xrange[1] != 0 || x$yrange[1] != 0) { width <- diff(x$xrange) height <- diff(x$yrange) blank <- paste(rep(" ", nchar(rectname)), collapse="") splat(blank, paren(paste(signif(width, 4), "x", signif(height, 4), pluralunits))) } Area <- signif(x$area, 6) splat("Window area =", Area, "square", if(Area == 1) singularunits else pluralunits) if(!is.null(ledge <- unitinfo$legend)) splat(ledge) if(x$type != "rectangle") splat("Fraction of frame area:", signif(x$areafraction, 3)) return(invisible(x)) } as.data.frame.owin <- function(x, ..., drop=TRUE) { stopifnot(is.owin(x)) switch(x$type, rectangle = { x <- as.polygonal(x) }, polygonal = { }, mask = { xy <- rasterxy.mask(x, drop=drop) if(!drop) xy <- append(xy, list(inside=as.vector(x$m))) return(as.data.frame(xy, ...)) }) b <- x$bdry ishole <- sapply(b, is.hole.xypolygon) sign <- (-1)^ishole b <- lapply(b, as.data.frame, ...) nb <- length(b) if(nb == 1) return(b[[1L]]) dfs <- mapply(cbind, b, id=as.list(seq_len(nb)), sign=as.list(sign), SIMPLIFY=FALSE) df <- do.call(rbind, dfs) return(df) } discretise <- function(X,eps=NULL,dimyx=NULL,xy=NULL) { verifyclass(X,"ppp") W <- X$window ok <- inside.owin(X$x,X$y,W) if(!all(ok)) stop("There are points of X outside the window of X") all.null <- is.null(eps) & is.null(dimyx) & is.null(xy) if(W$type=="mask" & all.null) return(X) WM <- as.mask(W,eps=eps,dimyx=dimyx,xy=xy) nok <- !inside.owin(X$x,X$y,WM) if(any(nok)) { ifix <- nearest.raster.point(X$x[nok],X$y[nok], WM) ifix <- cbind(ifix$row,ifix$col) WM$m[ifix] <- TRUE } X$window <- WM X } pixelcentres <- function (X, W=NULL,...) { X <- as.mask(as.owin(X), ...) if(is.null(W)) W <- as.rectangle(X) Y <- as.ppp(raster.xy(X,drop=TRUE),W=W) return(Y) } owin2polypath <- function(w) { w <- as.polygonal(w) b <- w$bdry xvectors <- lapply(b, getElement, name="x") yvectors <- lapply(b, getElement, name="y") xx <- unlist(lapply(xvectors, append, values=NA, after=FALSE))[-1] yy <- unlist(lapply(yvectors, append, values=NA, after=FALSE))[-1] return(list(x=xx, y=yy)) } ## generics which extract and assign the window of some object Window <- function(X, ...) { UseMethod("Window") } "Window<-" <- function(X, ..., value) { UseMethod("Window<-") } spatstat/R/density.ppp.R0000644000176200001440000010301613365464476014741 0ustar liggesusers# # density.ppp.R # # Method for 'density' for point patterns # # $Revision: 1.106 $ $Date: 2018/10/29 01:25:21 $ # # ksmooth.ppp <- function(x, sigma, ..., edge=TRUE) { # .Deprecated("density.ppp", package="spatstat") # density.ppp(x, sigma, ..., edge=edge) # } density.ppp <- local({ density.ppp <- function(x, sigma=NULL, ..., weights=NULL, edge=TRUE, varcov=NULL, at="pixels", leaveoneout=TRUE, adjust=1, diggle=FALSE, se=FALSE, kernel="gaussian", scalekernel=is.character(kernel), positive=FALSE, verbose=TRUE) { verifyclass(x, "ppp") output <- pickoption("output location type", at, c(pixels="pixels", points="points")) if(!identical(kernel, "gaussian")) { validate2Dkernel(kernel) ## kernel is only partly implemented! if(se) stop("Standard errors are not implemented for non-Gaussian kernel") if(verbose && scalekernel && (is.function(sigma) || (is.null(sigma) && is.null(varcov)))) warning("Bandwidth selection will be based on Gaussian kernel") } ker <- resolve.2D.kernel(..., sigma=sigma, varcov=varcov, x=x, adjust=adjust) sigma <- ker$sigma varcov <- ker$varcov ## sigma.is.infinite <- ker$infinite if(is.im(weights)) { weights <- safelookup(weights, x) # includes warning if NA } else if(is.expression(weights)) weights <- eval(weights, envir=as.data.frame(x), enclos=parent.frame()) if(length(weights) == 0 || (!is.null(dim(weights)) && nrow(weights) == 0)) weights <- NULL if(se) { # compute standard error SE <- denspppSEcalc(x, sigma=sigma, varcov=varcov, ..., weights=weights, edge=edge, at=output, leaveoneout=leaveoneout, adjust=adjust, diggle=diggle) if(positive) SE <- posify(SE) } ## infinite bandwidth if(bandwidth.is.infinite(sigma)) { #' uniform estimate nx <- npoints(x) single <- is.null(dim(weights)) totwt <- if(is.null(weights)) nx else if(single) sum(weights) else colSums(weights) if(!edge) totwt <- 0 * totwt W <- Window(x) A <- area.owin(W) switch(output, pixels = { E <- solapply(totwt/A, as.im, W=W, ...) names(E) <- colnames(weights) if(single) E <- E[[1L]] }, points = { numerator <- rep(totwt, each=nx) if(!single) numerator <- matrix(numerator, nrow=nx) if(leaveoneout && edge) numerator <- numerator - (weights %orifnull% 1) E <- numerator/A if(!single) colnames(E) <- colnames(weights) }) result <- if(se) list(estimate=E, SE=SE) else E return(result) } if(output == "points") { # VALUES AT DATA POINTS ONLY result <- densitypointsEngine(x, sigma, varcov=varcov, kernel=kernel, scalekernel=scalekernel, weights=weights, edge=edge, leaveoneout=leaveoneout, diggle=diggle, ...) if(verbose && !is.null(uhoh <- attr(result, "warnings"))) { switch(uhoh, underflow=warning("underflow due to very small bandwidth"), warning(uhoh)) } ## constrain values to be positive if(positive) result <- posify(result) if(se) result <- list(estimate=result, SE=SE) return(result) } # VALUES AT PIXELS if(!edge) { # no edge correction edg <- NULL raw <- second.moment.calc(x, sigma, what="smooth", ..., kernel=kernel, scalekernel=scalekernel, weights=weights, varcov=varcov) raw <- divide.by.pixelarea(raw) smo <- raw } else if(!diggle) { # edge correction e(u) both <- second.moment.calc(x, sigma, what="smoothedge", ..., kernel=kernel, scalekernel=scalekernel, weights=weights, varcov=varcov) raw <- divide.by.pixelarea(both$smooth) edg <- both$edge smo <- if(is.im(raw)) eval.im(raw/edg) else lapply(raw, divideimage, denom=edg) } else { # edge correction e(x_i) edg <- second.moment.calc(x, sigma, what="edge", ..., scalekernel=scalekernel, kernel=kernel, varcov=varcov) wi <- 1/safelookup(edg, x, warn=FALSE) wi[!is.finite(wi)] <- 0 # edge correction becomes weight attached to points if(is.null(weights)) { newweights <- wi } else if(is.matrix(weights) || is.data.frame(weights)) { stopifnot(nrow(weights) == npoints(x)) newweights <- weights * wi } else { stopifnot(length(weights) == npoints(x)) newweights <- weights * wi } raw <- second.moment.calc(x, sigma, what="smooth", ..., kernel=kernel, scalekernel=scalekernel, weights=newweights, varcov=varcov) raw <- divide.by.pixelarea(raw) smo <- raw } result <- if(is.im(smo)) smo[x$window, drop=FALSE] else solapply(smo, "[", i=x$window, drop=FALSE) # internal use only spill <- resolve.1.default(list(spill=FALSE), list(...)) if(spill) return(list(result=result, sigma=sigma, varcov=varcov, raw = raw, edg=edg)) # constrain values to be positive if(positive) result <- posify(result) # normal return attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov attr(result, "kernel") <- kernel if(se) result <- list(estimate=result, SE=SE) return(result) } divideimage <- function(numer, denom) eval.im(numer/denom) posify <- function(x, eps=.Machine$double.xmin) { force(eps) # scalpel if(is.im(x)) return(eval.im(pmax(eps, x))) if(inherits(x, "solist")) return(solapply(x, posify, eps=eps)) if(is.numeric(x)) return(pmax(eps, x)) # data frame or list if(is.list(x) && all(sapply(x, is.numeric))) return(lapply(x, posify, eps=eps)) warning("Internal error: posify did not recognise data format") return(x) } divide.by.pixelarea <- function(x) { if(is.im(x)) { x$v <- x$v/(x$xstep * x$ystep) } else { for(i in seq_along(x)) x[[i]]$v <- with(x[[i]], v/(xstep * ystep)) } return(x) } denspppSEcalc <- function(x, sigma, varcov, ..., weights, edge, diggle, at) { ## Calculate standard error, rather than estimate nx <- npoints(x) if(bandwidth.is.infinite(sigma)) { #' special case - uniform single <- is.null(dim(weights)) totwt2 <- if(is.null(weights)) nx else if(single) sum(weights^2) else colSums(weights^2) if(!edge) totwt2 <- 0 * totwt2 W <- Window(x) A <- area.owin(W) switch(at, pixels = { V <- solapply(totwt2/A, as.im, W=W, ...) names(V) <- colnames(weights) if(single) V <- V[[1L]] }, points = { numerator <- rep(totwt2, each=nx) if(!single) numerator <- matrix(numerator, nrow=nx) leaveoneout <- resolve.1.default(list(leaveoneout=TRUE), list(...)) if(edge && leaveoneout) numerator <- numerator - (weights %orifnull% 1)^2 V <- numerator/A if(!single) colnames(V) <- colnames(weights) }) return(sqrt(V)) } ## Usual case tau <- taumat <- NULL if(is.null(varcov)) { varconst <- 1/(4 * pi * prod(ensure2vector(sigma))) tau <- sigma/sqrt(2) } else { varconst <- 1/(4 * pi * sqrt(det(varcov))) taumat <- varcov/2 } ## Calculate edge correction weights if(edge) { edgeim <- second.moment.calc(x, sigma, what="edge", ..., varcov=varcov) if(diggle || at == "points") { edgeX <- safelookup(edgeim, x, warn=FALSE) diggleX <- 1/edgeX diggleX[!is.finite(diggleX)] <- 0 } edgeim <- edgeim[Window(x), drop=FALSE] } ## Perform smoothing if(!edge) { ## no edge correction V <- density(x, sigma=tau, varcov=taumat, ..., weights=weights, edge=edge, diggle=diggle, at=at) } else if(!diggle) { ## edge correction e(u) V <- density(x, sigma=tau, varcov=taumat, ..., weights=weights, edge=edge, diggle=diggle, at=at) V <- if(at == "pixels") (V/edgeim) else (V * diggleX) } else { ## Diggle edge correction e(x_i) wts <- diggleX * (weights %orifnull% 1) V <- density(x, sigma=tau, varcov=taumat, ..., weights=wts, edge=edge, diggle=diggle, at=at) } V <- V * varconst return(sqrt(V)) } density.ppp }) densitypointsEngine <- function(x, sigma=NULL, ..., kernel="gaussian", scalekernel=is.character(kernel), weights=NULL, edge=TRUE, varcov=NULL, leaveoneout=TRUE, diggle=FALSE, sorted=FALSE, spill=FALSE, cutoff=NULL) { debugging <- spatstat.options("developer") stopifnot(is.logical(leaveoneout)) validate2Dkernel(kernel) if(is.character(kernel)) kernel <- match2DkernelName(kernel) isgauss <- identical(kernel, "gaussian") if(isgauss) { ## constant factor in Gaussian density if(is.null(varcov)) { gaussconst <- 1/(2 * pi * sigma^2) } else { detSigma <- det(varcov) Sinv <- solve(varcov) gaussconst <- 1/(2 * pi * sqrt(detSigma)) } } if(length(weights) == 0 || (!is.null(dim(weights)) && nrow(weights) == 0)) weights <- NULL ## infinite bandwidth if(bandwidth.is.infinite(sigma)) { #' uniform estimate nx <- npoints(x) single <- is.null(dim(weights)) totwt <- if(is.null(weights)) nx else if(single) sum(weights) else colSums(weights) if(!edge) totwt <- 0 * totwt W <- Window(x) A <- area.owin(W) numerator <- rep(totwt, each=nx) if(!single) numerator <- matrix(numerator, nrow=nx) if(leaveoneout && edge) numerator <- numerator - (weights %orifnull% 1) result <- numerator/A if(!single) colnames(result) <- colnames(weights) return(result) } ## cutoff distance (beyond which the kernel value is treated as zero) ## NB: input argument 'cutoff' is either NULL or ## an absolute distance (if scalekernel=FALSE) ## a number of standard deviations (if scalekernel=TRUE) cutoff <- cutoff2Dkernel(kernel, sigma=sigma, varcov=varcov, scalekernel=scalekernel, cutoff=cutoff, fatal=TRUE) ## cutoff is now an absolute distance if(debugging) cat(paste("cutoff=", cutoff, "\n")) if(leaveoneout && npoints(x) > 1) { ## ensure each point has its closest neighbours within the cutoff nndmax <- maxnndist(x) cutoff <- max(2 * nndmax, cutoff) if(debugging) cat(paste("adjusted cutoff=", cutoff, "\n")) } # validate weights if(is.null(weights)) { k <- 1L } else if(is.matrix(weights) || is.data.frame(weights)) { k <- ncol(weights) stopifnot(nrow(weights) == npoints(x)) weights <- as.data.frame(weights) weightnames <- colnames(weights) } else { k <- 1L stopifnot(length(weights) == npoints(x) || length(weights) == 1L) } # evaluate edge correction weights at points if(edge) { win <- x$window if(isgauss && is.null(varcov) && win$type == "rectangle") { # evaluate Gaussian probabilities directly xr <- win$xrange yr <- win$yrange xx <- x$x yy <- x$y xprob <- pnorm(xr[2L], mean=xx, sd=sigma) - pnorm(xr[1L], mean=xx, sd=sigma) yprob <- pnorm(yr[2L], mean=yy, sd=sigma) - pnorm(yr[1L], mean=yy, sd=sigma) edgeweight <- xprob * yprob } else { edg <- second.moment.calc(x, sigma=sigma, kernel=kernel, scalekernel=scalekernel, what="edge", varcov=varcov, ...) edgeweight <- safelookup(edg, x, warn=FALSE) } if(diggle) { # Diggle edge correction # edgeweight is attached to each point if(is.null(weights)) { k <- 1L weights <- 1/edgeweight } else { weights <- weights/edgeweight } } } if(isgauss && spatstat.options("densityTransform") && spatstat.options("densityC")) { ## .................. experimental C code ..................... if(debugging) cat('Using experimental code!\n') npts <- npoints(x) result <- if(k == 1L) numeric(npts) else matrix(, npts, k) xx <- x$x yy <- x$y ## transform to standard coordinates if(is.null(varcov)) { xx <- xx/(sqrt(2) * sigma) yy <- yy/(sqrt(2) * sigma) } else { xy <- cbind(xx, yy) %*% matrixsqrt(Sinv/2) xx <- xy[,1L] yy <- xy[,2L] sorted <- FALSE } ## cutoff in standard coordinates sd <- sigma %orifnull% sqrt(min(eigen(varcov)$values)) cutoff <- cutoff/(sqrt(2) * sd) ## sort into increasing order of x coordinate (required by C code) if(!sorted) { oo <- fave.order(xx) xx <- xx[oo] yy <- yy[oo] } if(is.null(weights)) { zz <- .C("Gdenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result result <- result * gaussconst } else if(k == 1L) { wtsort <- if(sorted) weights else weights[oo] zz <- .C("Gwtdenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result result <- result * gaussconst } else { ## matrix of weights wtsort <- if(sorted) weights else weights[oo, ] for(j in 1:k) { zz <- .C("Gwtdenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), weight = as.double(wtsort[,j]), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result[,j] <- zz$result else result[oo,j] <- zz$result } result <- result * gaussconst } } else if(isgauss && spatstat.options("densityC")) { # .................. C code ........................... if(debugging) cat('Using standard code.\n') npts <- npoints(x) result <- if(k == 1L) numeric(npts) else matrix(, npts, k) # sort into increasing order of x coordinate (required by C code) if(sorted) { xx <- x$x yy <- x$y } else { oo <- fave.order(x$x) xx <- x$x[oo] yy <- x$y[oo] } if(is.null(varcov)) { # isotropic kernel if(is.null(weights)) { zz <- .C("denspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), sig = as.double(sigma), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } else if(k == 1L) { wtsort <- if(sorted) weights else weights[oo] zz <- .C("wtdenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), sig = as.double(sigma), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } else { # matrix of weights wtsort <- if(sorted) weights else weights[oo, ] for(j in 1:k) { zz <- .C("wtdenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), sig = as.double(sigma), weight = as.double(wtsort[,j]), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result[,j] <- zz$result else result[oo,j] <- zz$result } } } else { # anisotropic kernel flatSinv <- as.vector(t(Sinv)) if(is.null(weights)) { zz <- .C("adenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } else if(k == 1L) { # vector of weights wtsort <- if(sorted) weights else weights[oo] zz <- .C("awtdenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } else { # matrix of weights wtsort <- if(sorted) weights else weights[oo, ] for(j in 1:k) { zz <- .C("awtdenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), weight = as.double(wtsort[,j]), result = as.double(double(npts)), PACKAGE = "spatstat") if(sorted) result[,j] <- zz$result else result[oo,j] <- zz$result } } } } else { # ..... interpreted code ......................................... close <- closepairs(x, cutoff) i <- close$i j <- close$j d <- close$d npts <- npoints(x) result <- if(k == 1L) numeric(npts) else matrix(, npts, k) # evaluate contribution from each close pair (i,j) if(isgauss) { if(is.null(varcov)) { contrib <- gaussconst * exp(-d^2/(2 * sigma^2)) } else { ## anisotropic kernel dx <- close$dx dy <- close$dy contrib <- gaussconst * exp(-(dx * (dx * Sinv[1L,1L] + dy * Sinv[1L,2L]) + dy * (dx * Sinv[2L,1L] + dy * Sinv[2L,2L]))/2) } } else { contrib <- evaluate2Dkernel(kernel, close$dx, close$dy, sigma=sigma, varcov=varcov, scalekernel=scalekernel, ...) } ## sum (weighted) contributions ## query point i, data point j ifac <- factor(i, levels=1:npts) if(is.null(weights)) { result <- tapplysum(contrib, list(ifac)) } else if(k == 1L) { wcontrib <- contrib * weights[j] result <- tapplysum(wcontrib, list(ifac)) } else { for(kk in 1:k) { wcontribkk <- contrib * weights[j, kk] result[,kk] <- tapplysum(wcontribkk, list(ifac)) } } # } # ----- contribution from point itself ---------------- if(!leaveoneout) { #' add contribution from point itself if(isgauss) { self <- gaussconst } else { self <- evaluate2Dkernel(kernel, 0, 0, sigma=sigma, varcov=varcov, scalekernel=scalekernel, ...) } if(!is.null(weights)) self <- self * weights result <- result + self } # ........ Edge correction ........................................ if(edge && !diggle) result <- result/edgeweight # ............. validate ................................. npts <- npoints(x) if(k == 1L) { result <- as.numeric(result) if(length(result) != npts) stop(paste("Internal error: incorrect number of lambda values", "in leave-one-out method:", "length(lambda) = ", length(result), "!=", npts, "= npoints")) if(anyNA(result)) { nwrong <- sum(is.na(result)) stop(paste("Internal error:", nwrong, "NA or NaN", ngettext(nwrong, "value", "values"), "generated in leave-one-out method")) } } else { if(ncol(result) != k) stop(paste("Internal error: incorrect number of columns returned:", ncol(result), "!=", k)) colnames(result) <- weightnames if(nrow(result) != npts) stop(paste("Internal error: incorrect number of rows of lambda values", "in leave-one-out method:", "nrow(lambda) = ", nrow(result), "!=", npts, "= npoints")) if(anyNA(result)) { nwrong <- sum(!complete.cases(result)) stop(paste("Internal error:", nwrong, ngettext(nwrong, "row", "rows"), "of NA values generated in leave-one-out method")) } } if(spill) return(list(result=result, sigma=sigma, varcov=varcov, edg=edgeweight)) # tack on bandwidth attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov # return(result) } resolve.2D.kernel <- function(..., sigma=NULL, varcov=NULL, x, mindist=NULL, adjust=1, bwfun=NULL, allow.zero=FALSE) { if(is.function(sigma)) { bwfun <- sigma sigma <- NULL } if(is.null(sigma) && is.null(varcov) && !is.null(bwfun)) { # call bandwidth selection function bw <- do.call.matched(bwfun, resolve.defaults(list(X=x), list(...))) # interpret the result as either sigma or varcov if(!is.numeric(bw)) stop("bandwidth selector returned a non-numeric result") if(length(bw) %in% c(1L,2L)) { sigma <- as.numeric(bw) if(!all(sigma > 0)) { gripe <- "bandwidth selector returned negative value(s)" if(allow.zero) warning(gripe) else stop(gripe) } } else if(is.matrix(bw) && nrow(bw) == 2 && ncol(bw) == 2) { varcov <- bw if(!all(eigen(varcov)$values > 0)) stop("bandwidth selector returned matrix with negative eigenvalues") } else stop("bandwidth selector did not return a matrix or numeric value") } sigma.given <- !is.null(sigma) varcov.given <- !is.null(varcov) if(sigma.given) { stopifnot(is.numeric(sigma)) stopifnot(length(sigma) %in% c(1L,2L)) if(!allow.zero) stopifnot(all(sigma > 0)) } if(varcov.given) stopifnot(is.matrix(varcov) && nrow(varcov) == 2 && ncol(varcov)==2 ) # reconcile ngiven <- varcov.given + sigma.given switch(ngiven+1L, { # default w <- x$window sigma <- (1/8) * shortside(as.rectangle(w)) }, { if(sigma.given && length(sigma) == 2) varcov <- diag(sigma^2) if(!is.null(varcov)) sigma <- NULL }, { stop(paste("Give only one of the arguments", sQuote("sigma"), "and", sQuote("varcov"))) }) # apply adjustments if(!is.null(sigma)) sigma <- adjust * sigma if(!is.null(varcov)) varcov <- (adjust^2) * varcov # sd <- if(is.null(varcov)) sigma else sqrt(sum(diag(varcov))) cutoff <- 8 * sd uhoh <- if(!is.null(mindist) && cutoff < mindist) "underflow" else NULL result <- list(sigma=sigma, varcov=varcov, cutoff=cutoff, warnings=uhoh) return(result) } densitycrossEngine <- function(Xdata, Xquery, sigma=NULL, ..., kernel="gaussian", scalekernel=is.character(kernel), weights=NULL, edge=TRUE, varcov=NULL, diggle=FALSE, sorted=FALSE, cutoff=NULL) { validate2Dkernel(kernel) if(is.character(kernel)) kernel <- match2DkernelName(kernel) isgauss <- identical(kernel, "gaussian") && scalekernel if(length(weights) == 0 || (!is.null(dim(weights)) && nrow(weights) == 0)) weights <- NULL # validate weights if(is.null(weights)) { k <- 1L } else if(is.matrix(weights) || is.data.frame(weights)) { k <- ncol(weights) stopifnot(nrow(weights) == npoints(Xdata)) weights <- as.data.frame(weights) weightnames <- colnames(weights) } else { k <- 1L stopifnot(length(weights) == npoints(Xdata) || length(weights) == 1L) } #' infinite bandwidth if(bandwidth.is.infinite(sigma)) { #' uniform estimate single <- is.null(dim(weights)) totwt <- if(is.null(weights)) npoints(Xdata) else if(single) sum(weights) else colSums(weights) if(!edge) totwt <- 0 * totwt lam <- totwt/area.owin(Window(Xdata)) result <- if(single) rep(lam, npoints(Xquery)) else matrix(lam, npoints(Xquery), length(lam), byrow=TRUE, dimnames=list(NULL, colnames(weights))) return(result) } # evaluate edge correction weights at points if(edge) { win <- Xdata$window if(diggle) { ## edge correction weights are attached to data points xedge <- Xdata } else { ## edge correction weights are applied at query points xedge <- Xquery if(!all(inside.owin(Xquery, , win))) stop(paste("Edge correction is not possible:", "some query points lie outside the data window"), call.=FALSE) } if(isgauss && is.null(varcov) && win$type == "rectangle") { ## evaluate Gaussian probabilities directly xr <- win$xrange yr <- win$yrange xx <- xedge$x yy <- xedge$y xprob <- pnorm(xr[2L], mean=xx, sd=sigma) - pnorm(xr[1L], mean=xx, sd=sigma) yprob <- pnorm(yr[2L], mean=yy, sd=sigma) - pnorm(yr[1L], mean=yy, sd=sigma) edgeweight <- xprob * yprob } else { ## evaluate edge correction weights edg <- second.moment.calc(Xdata, what="edge", kernel=kernel, scalekernel=scalekernel, sigma=sigma, varcov=varcov) edgeweight <- safelookup(edg, xedge, warn=FALSE) } if(diggle) { ## Diggle edge correction ## edgeweight is attached to each data point if(is.null(weights)) { k <- 1L weights <- 1/edgeweight } else { weights <- weights/edgeweight } } } ## cutoff distance (beyond which the kernel value is treated as zero) ## NB: input argument 'cutoff' is either NULL or ## an absolute distance (if scalekernel=FALSE) ## a number of standard deviations (if scalekernel=TRUE) cutoff <- cutoff2Dkernel(kernel, sigma=sigma, varcov=varcov, scalekernel=scalekernel, cutoff=cutoff, fatal=TRUE) ## cutoff is now an absolute distance ndata <- npoints(Xdata) nquery <- npoints(Xquery) if(!isgauss) { ## .................. non-Gaussian kernel ........................ close <- crosspairs(Xdata, Xquery, cutoff) contrib <- evaluate2Dkernel(kernel, close$dx, close$dy, sigma=sigma, varcov=varcov, scalekernel=scalekernel, ...) ## sum the (weighted) contributions i <- close$i j <- close$j jfac <- factor(j, levels=seq_len(nquery)) if(is.null(weights)) { result <- tapplysum(contrib, list(jfac)) } else if(k == 1L) { wcontrib <- contrib * weights[i] result <- tapplysum(wcontrib, list(jfac)) } else { result <- matrix(, nquery, k) for(kk in 1:k) { wcontribkk <- contrib * weights[i, kk] result[,kk] <- tapplysum(wcontribkk, list(jfac)) } } } else { ## ................. Gaussian kernel ................... result <- if(k == 1L) numeric(nquery) else matrix(, nquery, k) ## coordinates xq <- Xquery$x yq <- Xquery$y xd <- Xdata$x yd <- Xdata$y if(!sorted) { ## sort into increasing order of x coordinate (required by C code) ooq <- fave.order(Xquery$x) xq <- xq[ooq] yq <- yq[ooq] ood <- fave.order(Xdata$x) xd <- xd[ood] yd <- yd[ood] } if(is.null(varcov)) { ## isotropic kernel if(is.null(weights)) { zz <- .C("crdenspt", nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), rmaxi = as.double(cutoff), sig = as.double(sigma), result = as.double(double(nquery)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[ooq] <- zz$result } else if(k == 1L) { wtsort <- if(sorted) weights else weights[ood] zz <- .C("wtcrdenspt", nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), wd = as.double(wtsort), rmaxi = as.double(cutoff), sig = as.double(sigma), result = as.double(double(nquery)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[ooq] <- zz$result } else { ## matrix of weights wtsort <- if(sorted) weights else weights[ood, ] for(j in 1:k) { zz <- .C("wtcrdenspt", nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), wd = as.double(wtsort[,j]), rmaxi = as.double(cutoff), sig = as.double(sigma), result = as.double(double(nquery)), PACKAGE = "spatstat") if(sorted) result[,j] <- zz$result else result[ooq,j] <- zz$result } colnames(result) <- weightnames } } else { ## anisotropic kernel detSigma <- det(varcov) Sinv <- solve(varcov) flatSinv <- as.vector(t(Sinv)) if(is.null(weights)) { zz <- .C("acrdenspt", nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), result = as.double(double(nquery)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[ooq] <- zz$result } else if(k == 1L) { ## vector of weights wtsort <- if(sorted) weights else weights[ood] zz <- .C("awtcrdenspt", nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), wd = as.double(wtsort), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), result = as.double(double(nquery)), PACKAGE = "spatstat") if(sorted) result <- zz$result else result[ooq] <- zz$result } else { ## matrix of weights wtsort <- if(sorted) weights else weights[ood, ] for(j in 1:k) { zz <- .C("awtcrdenspt", nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), wd = as.double(wtsort[,j]), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), result = as.double(double(nquery)), PACKAGE = "spatstat") if(sorted) result[,j] <- zz$result else result[ooq,j] <- zz$result } colnames(result) <- weightnames } } } # ........ Edge correction ........................................ if(edge && !diggle) result <- result/edgeweight # tack on bandwidth attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov # return(result) } bandwidth.is.infinite <- function(sigma) { sigma <- as.numeric(sigma) return((length(sigma) > 0) && all(sigma == Inf)) } spatstat/R/pcfinhom.R0000644000176200001440000001650413471743361014263 0ustar liggesusers# # pcfinhom.R # # $Revision: 1.23 $ $Date: 2019/05/24 10:24:48 $ # # inhomogeneous pair correlation function of point pattern # # pcfinhom <- function(X, lambda=NULL, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), divisor=c("r","d"), renormalise=TRUE, normpower=1, update=TRUE, leaveoneout=TRUE, reciplambda=NULL, sigma=NULL, varcov=NULL, close=NULL) { verifyclass(X, "ppp") # r.override <- !is.null(r) miss.update <- missing(update) win <- X$window areaW <- area(win) npts <- npoints(X) kernel <- match.kernel(kernel) correction.given <- !missing(correction) correction <- pickoption("correction", correction, c(isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", good="good", best="best"), multi=TRUE) if("good" %in% correction) correction[correction == "good"] <- good.correction.K(X) correction <- implemented.for.K(correction, win$type, correction.given) divisor <- match.arg(divisor) if(is.null(bw) && kernel=="epanechnikov") { # Stoyan & Stoyan 1995, eq (15.16), page 285 h <- stoyan /sqrt(npts/areaW) hmax <- h # conversion to standard deviation bw <- h/sqrt(5) } else if(is.numeric(bw)) { # standard deviation of kernel specified # upper bound on half-width hmax <- 3 * bw } else { # data-dependent bandwidth selection: guess upper bound on half-width hmax <- 2 * stoyan /sqrt(npts/areaW) } ########## intensity values ######################### dangerous <- c("lambda", "reciplambda") danger <- TRUE if(npts == 0) { lambda <- reciplambda <- numeric(0) danger <- FALSE } else if(missing(lambda) && is.null(reciplambda)) { # No intensity data provided danger <- FALSE # Estimate density by leave-one-out kernel smoothing lambda <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambda <- as.numeric(lambda) reciplambda <- 1/lambda } else if(!is.null(reciplambda)) { # 1/lambda values provided if(is.im(reciplambda)) reciplambda <- safelookup(reciplambda, X) else if(is.function(reciplambda)) reciplambda <- reciplambda(X$x, X$y) else if(is.numeric(reciplambda) && is.vector(as.numeric(reciplambda))) check.nvector(reciplambda, npts) else stop(paste(sQuote("reciplambda"), "should be a vector, a pixel image, or a function")) } else { # lambda values provided if(is.im(lambda)) lambda <- safelookup(lambda, X) else if(is.ppm(lambda) || is.kppm(lambda) || is.dppm(lambda)) { model <- lambda if(!update) { ## just use intensity of fitted model lambda <- predict(model, locations=X, type="trend") } else { if(is.ppm(model)) { model <- update(model, Q=X) lambda <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } else { model <- update(model, X=X) lambda <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } danger <- FALSE if(miss.update) warn.once(key="pcfinhom.update", "The behaviour of pcfinhom when lambda is a ppm object", "has changed (in spatstat 1.45-0 and later).", "See help(pcfinhom)") } } else if(is.function(lambda)) lambda <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) check.nvector(lambda, npts) else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, a function, or a fitted model")) # evaluate reciprocal reciplambda <- 1/lambda } # renormalise if(renormalise && npts > 0) { check.1.real(normpower) stopifnot(normpower %in% 1:2) renorm.factor <- (areaW/sum(reciplambda))^normpower } ########## r values ############################ # handle arguments r and breaks rmaxdefault <- rmax.rule("K", win, lambda) breaks <- handle.r.b.args(r, NULL, win, rmaxdefault=rmaxdefault) if(!(breaks$even)) stop("r values must be evenly spaced") # extract r values r <- breaks$r rmax <- breaks$max # recommended range of r values for plotting alim <- c(0, min(rmax, rmaxdefault)) ########## smoothing parameters for pcf ############################ # arguments for 'density' denargs <- resolve.defaults(list(kernel=kernel, bw=bw), list(...), list(n=length(r), from=0, to=rmax)) ################################################# # compute pairwise distances if(npts > 1) { if(is.null(close)) { #' find close pairs close <- closepairs(X, rmax+hmax) } else { #' check 'close' has correct format needed <- c("i", "j", "xi", "yi", "xj", "yj", "dx", "dy", "d") if(any(is.na(match(needed, names(close))))) stop(paste("Argument", sQuote("close"), "should have components named", commasep(sQuote(needed))), call.=FALSE) } dIJ <- close$d I <- close$i J <- close$j XI <- ppp(close$xi, close$yi, window=win, check=FALSE) wIJ <- reciplambda[I] * reciplambda[J] } else { undefined <- rep(NaN, length(r)) } # initialise fv object df <- data.frame(r=r, theo=rep.int(1,length(r))) out <- fv(df, "r", quote(g[inhom](r)), "theo", , alim, c("r","{%s[%s]^{pois}}(r)"), c("distance argument r", "theoretical Poisson %s"), fname=c("g", "inhom")) ###### compute ####### if(any(correction=="translate")) { # translation correction if(npts > 1) { XJ <- ppp(close$xj, close$yj, window=win, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) gT <- sewpcf(dIJ, edgewt * wIJ, denargs, areaW, divisor)$g if(renormalise) gT <- gT * renorm.factor } else gT <- undefined out <- bind.fv(out, data.frame(trans=gT), "{hat(%s)[%s]^{Trans}}(r)", "translation-corrected estimate of %s", "trans") } if(any(correction=="isotropic")) { # Ripley isotropic correction if(npts > 1) { edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1)) gR <- sewpcf(dIJ, edgewt * wIJ, denargs, areaW, divisor)$g if(renormalise) gR <- gR * renorm.factor } else gR <- undefined out <- bind.fv(out, data.frame(iso=gR), "{hat(%s)[%s]^{Ripley}}(r)", "isotropic-corrected estimate of %s", "iso") } # sanity check if(is.null(out)) { warning("Nothing computed - no edge corrections chosen") return(NULL) } # which corrections have been computed? corrxns <- rev(setdiff(names(out), "r")) # default is to display them all formula(out) <- . ~ r fvnames(out, ".") <- corrxns unitname(out) <- unitname(X) if(danger) attr(out, "dangerous") <- dangerous return(out) } spatstat/R/clickpoly.R0000644000176200001440000000417413333543254014445 0ustar liggesusers# # clickpoly.R # # # $Revision: 1.10 $ $Date: 2015/10/21 09:06:57 $ # # clickpoly <- function(add=FALSE, nv=NULL, np=1, ...) { if((!add) | dev.cur() == 1L) { plot(0,0,type="n", xlab="", ylab="", xlim=c(0,1), ylim=c(0,1), asp=1.0, axes=FALSE) rect(0,0,1,1) } spatstatLocator(0) ## check locator is enabled gon <- list() stopifnot(np >= 1) # for(i in 1:np) { if(np > 1) cat(paste(".... Polygon number", i, ".....\n")) if(!is.null(nv)) cat(paste("click", nv, "times in window\n")) else cat(paste("to add points: click left mouse button in window\n", " to exit: press ESC or click middle mouse button\n", "[The last point should NOT repeat the first point]\n")) xy <- do.call(spatstatLocator, resolve.defaults(if(!is.null(nv)) list(n=nv) else list(), list(...), list(type="o"))) if(Area.xypolygon(xy) < 0) xy <- lapply(xy, rev) gon[[i]] <- xy plotPolygonBdry(owin(poly=xy), ...) } result <- owin(poly=gon) plotPolygonBdry(result, ...) return(result) } clickbox <- function(add=TRUE, ...) { spatstatLocator(0) # check locator enabled cat("Click two corners of a box\n") if(!add) plot(owin(), main="Click two corners of a box") a <- try(spatstatLocator(1), silent=TRUE) if(inherits(a, "try-error")) { ## add=TRUE but there is no current plot plot.new() a <- spatstatLocator(1, ...) } abline(v=a$x) abline(h=a$y) b <- spatstatLocator(1, ...) abline(v=b$x) abline(h=b$y) ab <- concatxy(a, b) result <- owin(range(ab$x), range(ab$y)) plotPolygonBdry(result, ...) return(result) } plotPolygonBdry <- function(x, ...) { # filter appropriate arguments argh <- list(...) polyPars <- union(graphicsPars("lines"), graphicsPars("owin")) polyargs <- argh[names(argh) %in% polyPars] # change 'col' to 'border' nama <- names(polyargs) if(any(nama == "col") && !any(nama == "border")) names(polyargs)[nama == "col"] <- "border" # plot do.call(plot.owin, append(list(x=x, add=TRUE), polyargs)) } spatstat/R/laslett.R0000644000176200001440000002671713333543255014134 0ustar liggesusers#' Calculating Laslett's transform #' Original by Kassel Hingee #' Adapted by Adrian Baddeley #' Copyright (C) 2016 Kassel Hingee and Adrian Baddeley # $Revision: 1.8 $ $Date: 2017/02/07 08:12:05 $ laslett <- function(X, ..., verbose=FALSE, plotit=TRUE, discretise=FALSE, type = c("lower", "upper", "left", "right")){ #' validate X and convert to a logical matrix type <- match.arg(type) oldX <- X if(is.im(X)) { X <- solutionset(X != 0) } else if(!is.owin(X)) stop("X should be an image or a window", call.=FALSE) if(type != "lower") { nrot <- match(type, c("right", "upper", "left")) theta <- nrot * pi/2 X <- rotate(X, angle=-theta) } if(!discretise && (is.polygonal(X) || is.rectangle(X))) { result <- polyLaslett(X, ..., oldX=oldX, verbose=verbose, plotit=FALSE) } else { result <- maskLaslett(X, ..., oldX=oldX, verbose=verbose, plotit=FALSE) } if(type != "lower") { #' rotate back prods <- c("TanOld", "TanNew", "Rect") result[prods] <- lapply(result[prods], rotate, angle=theta) } if(plotit) plot(result, ...) result$type <- type return(result) } maskLaslett <- local({ sumtoright <- function(x) { rev(cumsum(rev(x))) - x } maskLaslett <- function(X, ..., eps=NULL, dimyx=NULL, xy=NULL, oldX=X, verbose=FALSE, plotit=TRUE) { if(is.null(oldX)) oldX <- X X <- as.mask(X, eps=eps, dimyx=dimyx, xy=xy) unitX <- unitname(X) if(is.empty(X)) stop("Empty window!") M <- as.matrix(X) #' ....... Compute transformed set ................... #' Total width of transformed set on each row TotFalse <- rowSums(!M) ## compute transformed set Laz <- (col(M) <= TotFalse[row(M)]) Laz <- owin(mask=Laz, xrange=X$xrange, yrange=X$yrange, unitname=unitX) #' Largest sub-rectangle of transformed set width <- min(TotFalse) * X$xstep Rect <- owin(X$xrange[1L] + c(0, width), X$yrange, unitname=unitX) #' Along each horizontal line (row), #' compute a running count of FALSE pixels. #' This is the mapping for the set transform #' (the value at any pixel gives the new column number #' for the transformed pixel) CumulFalse <- t(apply(!M, 1L, cumsum)) #' discard one column for consistency with other matrices below CumulFalse <- CumulFalse[,-1L,drop=FALSE] #' ....... Find lower tangent points ................. #' compute discrete gradient in x direction G <- t(apply(M, 1, diff)) #' detect entries, exits, changes Exit <- (G == -1) Enter <- (G == 1) Change <- Exit | Enter #' form a running total of the number of pixels inside X #' to the **right** of the current pixel FutureInside <- t(apply(M, 1, sumtoright))[,-1L,drop=FALSE] #' find locations of changes loc <- which(Change, arr.ind=TRUE) #' don't consider entries/exits in the bottom row ok <- (loc[,"row"] > 1) loc <- loc[ok, , drop=FALSE] #' corresponding locations on horizontal line below current line below <- cbind(loc[,"row"]-1L, loc[,"col"]) #' look up data at these locations df <- data.frame(row=loc[,"row"], col=loc[,"col"], newcol=CumulFalse[loc], Exit=Exit[loc], Enter=Enter[loc], InsideBelow=M[below], FutureInsideBelow=FutureInside[below]) #' identify candidates for tangents df$IsCandidate <- with(df, Enter & !InsideBelow & (newcol < TotFalse[row])) #' collect data for each horizontal line (row) #' then sort by increasing x (column) within each line. oo <- with(df, order(row, col)) df <- df[oo, , drop=FALSE] #' divide data into one piece for each hztal line g <- split(df, df$row) #' Initialise empty list of tangent points tangents <- data.frame(row=integer(0), col=integer(0), newcol=integer(0)) #' process each hztal line for(p in g) { tangents <- with(p, { candidates <- which(IsCandidate) # indices are row numbers in 'p' if(verbose) print(p) exits <- which(Exit) for(i in candidates) { if(verbose) cat(paste("candidate", i, "\n")) if(any(found <- (exits > i))) { j <- exits[min(which(found))] if(verbose) cat(paste("next exit:", j, "\n")) #' check no pixels inside X in row below between i and j if(FutureInsideBelow[i] == FutureInsideBelow[j]) { if(verbose) cat(paste("Tangent (1) at row=", row[i], "col=", col[i], "\n")) tangents <- rbind(tangents, data.frame(row=row[i], col=col[i], newcol=newcol[i])) } } else { #' no exits on this row if(verbose) cat("no subsequent exit\n") if(FutureInsideBelow[i] == 0) { if(verbose) cat(paste("Tangent (2) at row=", row[i], "col=", col[i], "\n")) tangents <- rbind(tangents, data.frame(row=row[i], col=col[i], newcol=newcol[i])) } } } if(verbose) cat("====\n") tangents }) } tangents$oldx <- X$xcol[tangents$col] tangents$newx <- X$xcol[tangents$newcol] tangents$y <- X$yrow[tangents$row] TanOld <- with(tangents, ppp(oldx, y, window=Frame(X), unitname=unitX)) TanNew <- with(tangents, ppp(newx, y, window=Laz), unitname=unitX) result <- list(oldX=oldX, TanOld=TanOld, TanNew=TanNew, Rect=Rect, df=tangents) class(result) <- c("laslett", class(result)) if(plotit) plot(result, ...) return(result) } maskLaslett }) print.laslett <- function(x, ...) { cat("Laslett Transform\n") cat("\nOriginal object:\n") print(x$oldX) cat("\nTransformed set:\n") W <- Window(x$TanNew) print(W) unitinfo <- summary(unitname(W)) cat("\nTransformed area:", area.owin(W), "square", unitinfo$plural, unitinfo$explain, fill=TRUE) cat("\n") type <- x$type %orifnull% "lower" cat(npoints(x$TanNew), type, "tangent points found.", fill=TRUE) return(invisible(NULL)) } plot.laslett <- function(x, ..., Xpars=list(box=TRUE, col="grey"), pointpars=list(pch=3, cols="blue"), rectpars=list(lty=3, border="green")) { Display <- with(x, solist(Original= layered(oldX, TanOld, plotargs=list(Xpars, pointpars)), Transformed= layered(TanNew, Rect, plotargs=list(pointpars, rectpars)))) #' ignore arguments intended for as.mask argh <- list(...) if(any(bad <- names(argh) %in% c("eps", "dimyx", "xy"))) argh <- argh[!bad] do.call(plot, resolve.defaults(list(x=Display), argh, list(main="", mar.panel=0, hsep=1, equal.scales=TRUE))) return(invisible(NULL)) } polyLaslett <- function(X, ..., oldX=X, verbose=FALSE, plotit=TRUE) { X <- as.polygonal(X) if(is.empty(X)) stop("Empty window!") unitX <- unitname(X) # expand frame slightly B <- Frame(X) B <- grow.rectangle(B, max(sidelengths(B))/8) x0 <- B$xrange[1L] x1 <- B$xrange[2L] # extract vertices v <- vertices(X) nv <- length(v$x) # .......... compute transformed set ..................... # make horizontal segments from each vertex to sides of box left <- with(v, psp(rep(x0,nv), y, x, y, window=B, marks=1:nv, check=FALSE)) right <- with(v, psp(x, y, rep(x1,nv), y, window=B, marks=1:nv, check=FALSE)) # intersect each horizontal segment with the window if(verbose) cat("Processing", nv, "polygon vertices... ") clipleft <- clip.psp(left, X) clipright <- clip.psp(right, X) if(verbose) cat("Done.\n") # calculate lengths of clipped segments, and group by vertex. # marks indicate which hztal segment was the parent of each piece. lenleft <- tapply(lengths.psp(clipleft), factor(marks(clipleft), levels=1:nv), sum) lenright <- tapply(lengths.psp(clipright), factor(marks(clipright), levels=1:nv), sum) lenleft[is.na(lenleft)] <- 0 lenright[is.na(lenright)] <- 0 emptylenleft <- lengths.psp(left) - lenleft emptylenright <- lengths.psp(right) - lenright # The transformed polygon isrightmost <- (lenright == 0) yright <- v$y[isrightmost] xright <- x0 + (emptylenleft+emptylenright)[isrightmost] minxright <- min(xright) # right margin of largest rectangle ord <- order(yright) Ty <- yright[ord] Tx <- xright[ord] nT <- length(Ty) if(Tx[nT] > x0) { Ty <- c(Ty, Ty[nT]) Tx <- c(Tx, x0) } if(Tx[1L] > x0) { Ty <- c(Ty[1L], Ty) Tx <- c(x0, Tx) } TX <- owin(B$xrange, B$yrange, poly=list(x=Tx, y=Ty), check=FALSE) TX <- TX[Frame(X)] # .......... identify lower tangents ..................... V <- as.ppp(v, W=Frame(X), unitname=unitX) is.candidate <- is.tangent <- logical(nv) # apply simple criteria for ruling in or out Plist <- X$bdry cumnv <- 0 for(i in seq_along(Plist)) { P <- Plist[[i]] xx <- P$x yy <- P$y nn <- length(xx) # xnext <- c(xx[-1L], xx[1L]) ynext <- c(yy[-1L], yy[1L]) # xprev <- c(xx[nn], xx[-nn]) yprev <- c(yy[nn], yy[-nn]) is.candidate[cumnv + seq_len(nn)] <- if(!is.hole.xypolygon(P)) { (yprev > yy & ynext >= yy) } else { (yprev >= yy & ynext > yy) } cumnv <- cumnv + nn } ## was.candidate <- is.candidate #' reject candidates lying too close to boundary tooclose <- (bdist.points(V[is.candidate]) < diameter(Frame(V))/1000) is.candidate[is.candidate][tooclose] <- FALSE #' evaluate candidate points #' make tiny boxes around vertex candidates <- which(is.candidate) nc <- length(candidates) nnd <- nndist(V) if(verbose) { cat(paste("Processing", nc, "tangent candidates ... ")) pstate <- list() } tiny <- .Machine$double.eps for(j in 1:nc) { i <- candidates[j] eps <- nnd[i]/16 xi <- v$x[i] yi <- v$y[i] Below <- owin(xi + c(-eps,eps), yi + c(-eps, 0)) # Above <- owin(xi + c(-eps, eps), yi + c(0, eps)) UpLeft <- owin(xi + c(-eps, 0), yi + c(0, eps)) is.tangent[i] <- (overlap.owin(X, Below) <= tiny) && (overlap.owin(X, UpLeft) < eps^2) if(verbose) pstate <- progressreport(j, nc, state=pstate) } if(verbose) cat(paste("Found", sum(is.tangent), "tangents\n")) TanOld <- V[is.tangent] ynew <- TanOld$y xnew <- x0 + emptylenleft[is.tangent] TanNew <- ppp(xnew, ynew, window=TX, check=FALSE, unitname=unitX) # maximal rectangle Rect <- owin(c(X$xrange[1L], minxright), X$yrange, unitname=unitX) # df <- data.frame(xold=TanOld$x, xnew=TanNew$x, y=TanNew$y) # result <- list(oldX=oldX, TanOld=TanOld, TanNew=TanNew, Rect=Rect, df=df) class(result) <- c("laslett", class(result)) if(plotit) plot(result, ...) return(result) } spatstat/R/percy.R0000644000176200001440000000542513333543255013577 0ustar liggesusers## percus.R ## ## Percus-Yevick style approximations to pcf and K ## ## $Revision: 1.4 $ $Date: 2014/01/31 10:10:19 $ pcfmodel.ppm <- local({ pcfmodel.ppm <- function(model, ...) { if(is.multitype(model)) stop("Not yet implemented for multitype models") if(!is.stationary(model)) stop("Model must be stationary") if(is.poisson(model)) return(function(r) rep(1, length(r))) inte <- as.interact(model) if(inte$family$name != "pairwise") stop("Only implemented for pairwise-interaction models") lambda <- intensity(model) beta <- exp(coef(model)[1]) par <- inte$par pot <- inte$pot f <- fitin(model) Vcoefs <- f$coefs[f$Vnames] Mayer <- inte$Mayer G <- Mayer(Vcoefs, inte) irange <- reach(inte, epsilon=1e-6) G2fun <- inte$Percy testit <- resolve.1.default(list(testit=FALSE), list(...)) if(testit || is.null(G2fun)) G2fun <- pairwisePercy fun <- function(r) { pcfapprox(r, beta, lambda, pot, par, Vcoefs, G, G2fun, irange) } return(fun) } pcfapprox <- function(r, beta, lambda, pot, par, Vcoefs, G, G2fun, irange) { as.numeric((beta/lambda)^2 * exp(logpairpot(r, pot, par, Vcoefs) - lambda * G2fun(r, Vcoefs, par, pot=pot, irange=irange, G=G))) } logpairpot <- function(r, pot, par, Vcoefs) { as.numeric(pot(matrix(r, ncol=1), par) %*% Vcoefs) } negpair <- function(x,y, pot, par, Vcoefs) { ## evaluate 1 - g(x,y) ## where g(x,y) is pair interaction between (0,0) and (x,y) 1 - exp(logpairpot(sqrt(x^2+y^2), pot, par, Vcoefs)) } pairwisePercy <- function(r, Vcoefs, par, ..., G, pot, irange, dimyx=256) { S <- max(max(r), irange) ng <- as.im(negpair, square(c(-S,S)), pot=pot, par=par, Vcoefs=Vcoefs, dimyx=dimyx) ng2 <- convolve.im(ng) rr <- seq(min(r), max(r), length=dimyx[1]) yy <- ng2[list(x=rr, y=rep.int(0, dimyx[1]))] zz <- 2 * G - yy z <- approx(rr, zz, r)$y return(z) } pcfmodel.ppm }) Kmodel.ppm <- local({ Kmodel.ppm <- function(model, ...) { if(is.poisson(model)) return(function(r) { pi * r^2 }) pc <- pcfmodel(model, ...) K <- function(r) pcf2K(r, pc) return(K) } pcf2K <- function(r, pc) { ## integrate the pair correlation function to obtain the K-function if(length(r) == 1) { ## definite integral spcfs <- function(s) { s * pc(s) } y <- 2 * pi * integrate(spcfs, lower=0, upper=r)$value } else { ## indefinite integral rr <- seq(0, max(r), length=1025) dr <- max(r)/(length(rr) - 1) ff <- 2 * pi * rr * pc(rr) yy <- dr * cumsum(ff) y <- approx(rr, yy, r)$y } return(y) } Kmodel.ppm }) spatstat/R/ssf.R0000644000176200001440000001573713362775427013271 0ustar liggesusers# # ssf.R # # spatially sampled functions # # $Revision: 1.19 $ $Date: 2018/03/07 03:31:17 $ # ssf <- function(loc, val) { stopifnot(is.ppp(loc)) if(is.function(val)) val <- val(loc$x, loc$y) if(is.data.frame(val)) val <- as.matrix(val) if(!is.matrix(val)) val <- matrix(val, ncol=1, dimnames=list(NULL, "value")) if(nrow(val) != npoints(loc)) stop("Incompatible lengths") result <- loc %mark% val class(result) <- c("ssf", class(result)) attr(result, "ok") <- complete.cases(val) return(result) } print.ssf <- function(x, ..., brief=FALSE) { if(brief) { splat("Spatial function sampled at", npoints(x), "locations") } else { splat("Spatially sampled function") cat("Locations:\n\t") print(unmark(x)) } val <- marks(x) if(!is.matrix(val)) { d <- 1 warning("Internal format error: val is not a matrix") } else d <- ncol(val) if(!brief) { type <- if(d == 1) "Scalar" else paste(d, "-vector", sep="") splat(type, "valued function") } if(d > 1 && !is.null(nama <- colnames(val))) splat("Component names:", commasep(sQuote(nama))) return(invisible(NULL)) } summary.ssf <- function(object, ...) { z <- NextMethod("summary") class(z) <- c("summary.ssf", class(z)) return(z) } print.summary.ssf <- function(x, ...) { splat("Spatially sampled function") cat("Locations:\n\t") NextMethod("print") } image.ssf <- function(x, ...) { do.call("plot", resolve.defaults(list(x, how="smoothed"), list(...))) } as.im.ssf <- function(X, ...) nnmark(X, ...) as.function.ssf <- function(x, ...) { X <- x mX <- marks(X) switch(markformat(X), vector = { g <- function(x, y=NULL) { Y <- xy.coords(x,y)[c("x","y")] J <- nncross(Y, X, what="which") result <- mX[J] return(unname(result)) } }, dataframe = { g <- function(x, y=NULL) { Y <- xy.coords(x,y)[c("x","y")] J <- nncross(Y, X, what="which") result <- mX[J,,drop=FALSE] row.names(result) <- NULL return(result) } }, stop("Marks must be a vector or data.frame")) h <- funxy(g, Frame(X)) return(h) } plot.ssf <- function(x, ..., how=c("smoothed", "nearest", "points"), style = c("image", "contour", "imagecontour"), sigma=NULL, contourargs=list()) { xname <- short.deparse(substitute(x)) how <- match.arg(how) style <- match.arg(style) otherargs <- list(...) # convert to images y <- switch(how, points = as.ppp(x), nearest = nnmark(x), smoothed = Smooth(x, sigma=sigma) ) # points plot if(how == "points") { out <- do.call("plot", resolve.defaults(list(y), otherargs, list(main=xname))) if(is.null(out)) return(invisible(NULL)) return(out) } # image plot switch(style, image = { out <- do.call("plot", resolve.defaults(list(y), otherargs, list(main=xname))) }, contour = { do.call("plot", resolve.defaults(list(as.owin(x)), otherargs, list(main=xname))) do.call("contour", resolve.defaults(list(y, add=TRUE), contourargs)) out <- NULL }, imagecontour = { out <- do.call("plot", resolve.defaults(list(y), otherargs, list(main=xname))) do.call("contour", resolve.defaults(list(y, add=TRUE), contourargs)) }) return(invisible(out)) } contour.ssf <- function(x, ..., main, sigma=NULL) { if(missing(main)) main <- short.deparse(substitute(x)) y <- Smooth(x, sigma=sigma) contour(y, ..., main=main) return(invisible(NULL)) } Smooth.ssf <- function(X, ...) { stopifnot(inherits(X, "ssf")) ok <- attr(X, "ok") Y <- as.ppp(X)[ok] argh <- list(...) isnul <- as.logical(unlist(lapply(argh, is.null))) nonnularg <- argh[!isnul] sigma0 <- if(any(c("sigma", "varcov") %in% names(nonnularg))) NULL else 1.4 * max(nndist(X)) Z <- do.call("Smooth.ppp", resolve.defaults(list(X = Y), list(...), list(sigma=sigma0), .MatchNull=FALSE)) # don't take NULL for an answer! return(Z) } "[.ssf" <- function(x, i, j, ..., drop) { loc <- unmark(x) val <- marks(x) ok <- attr(x, "ok") # if(!missing(j)) val <- val[, j, drop=FALSE] if(!missing(i)) { # use [.ppp to identify which points are retained locn <- loc %mark% seq_len(npoints(loc)) loci <- locn[i] loc <- unmark(loci) id <- marks(loci) # extract val <- val[id, , drop=FALSE] ok <- ok[id] } out <- loc %mark% val class(out) <- c("ssf", class(out)) attr(out, "ok") <- ok return(out) } as.ppp.ssf <- function(X, ...) { class(X) <- "ppp" attr(X, "ok") <- NULL return(X) } marks.ssf <- function(x, ...) { val <- x$marks if(is.null(dim(val))) val <- matrix(val, ncol=1) if(is.data.frame(val)) val <- as.matrix(val) return(val) } "marks<-.ssf" <- function(x, ..., value) { ssf(unmark(x), value) } unmark.ssf <- function(X) { unmark(as.ppp(X)) } with.ssf <- function(data, ...) { loc <- as.ppp(data) val <- marks(data) newval <- with(as.data.frame(val), ...) if(length(newval) == npoints(loc) || (is.matrix(newval) && nrow(newval) == npoints(loc))) return(ssf(loc, newval)) return(newval) } ## the following is NOT a method for 'apply' !! apply.ssf <- function(X, ...) { loc <- as.ppp(X) val <- marks(X) newval <- apply(val, ...) if(length(newval) == npoints(loc) || (is.matrix(newval) && nrow(newval) == npoints(loc))) return(ssf(loc, newval)) return(newval) } range.ssf <- function(x, ...) range(marks(x), ...) min.ssf <- function(x, ...) min(marks(x), ...) max.ssf <- function(x, ...) max(marks(x), ...) integral.ssf <- function(f, domain=NULL, ..., weights=attr(f, "weights")) { if(!is.null(weights)) { check.nvector(weights, npoints(f), oneok=TRUE) if(length(weights) == 1) weights <- rep(weights, npoints(f)) } if(is.tess(domain)) { result <- sapply(tiles(domain), integral.ssf, f=f, weights=weights) if(length(dim(result)) > 1) result <- t(result) return(result) } if(!is.null(domain)) { ok <- inside.owin(f, w=domain) f <- f[ok,] if(!is.null(weights)) weights <- weights[ok] } y <- marks(f) if(is.null(weights)) { z <- if(!is.matrix(y)) mean(y, na.rm=TRUE) else colMeans(y, na.rm=TRUE) a <- area(Window(f)) } else { z <- if(!is.matrix(y)) weighted.mean(y, w=weights, na.rm=TRUE) else apply(y, 2, weighted.mean, w=weights, na.rm=TRUE) a <- sum(weights) } z[!is.finite(z)] <- 0 return(z * a) } spatstat/R/kernel2d.R0000644000176200001440000001055613342632403014156 0ustar liggesusers#' #' kernel2d.R #' #' Two-dimensional smoothing kernels #' #' $Revision: 1.14 $ $Date: 2018/09/01 09:09:34 $ #' .Spatstat.2D.KernelTable <- list( #' table entries: #' d = density of standardised kernel #' sd = standard deviation of x coordinate, for standardised kernel #' hw = halfwidth of support of standardised kernel gaussian=list( d = function(x,y, ...) { dnorm(x) * dnorm(y) }, sd = 1, hw = 8, symmetric = TRUE), epanechnikov=list( d = function(x,y, ...) { (2/pi) * pmax(1 - (x^2+y^2), 0) }, sd = 1/sqrt(6), hw = 1, symmetric = TRUE), quartic=list( d = function(x,y, ...) { (3/pi) * pmax(1 - (x^2+y^2), 0)^2 }, sd = 1/sqrt(8), hw = 1, symmetric = TRUE), disc=list( d = function(x,y,...) { (1/pi) * as.numeric(x^2 + y^2 <= 1) }, sd = 1/2, hw = 1, symmetric = TRUE) ) validate2Dkernel <- function(kernel, fatal=TRUE) { if(is.character(match2DkernelName(kernel))) return(TRUE) if(is.im(kernel) || is.function(kernel)) return(TRUE) if(!fatal) return(FALSE) if(is.character(kernel)) stop(paste("Unrecognised choice of kernel", sQuote(kernel), paren(paste("options are", commasep(sQuote(names(.Spatstat.2D.KernelTable)))))), call.=FALSE) stop(paste("kernel should be a character string,", "a pixel image, or a function (x,y)"), call.=FALSE) } match2DkernelName <- function(kernel) { if(!is.character(kernel) || length(kernel) != 1) return(NULL) nama <- names(.Spatstat.2D.KernelTable) m <- pmatch(kernel, nama) if(is.na(m)) return(NULL) return(nama[m]) } lookup2DkernelInfo <- function(kernel) { validate2Dkernel(kernel) kernel <- match2DkernelName(kernel) if(is.null(kernel)) return(NULL) return(.Spatstat.2D.KernelTable[[kernel]]) } evaluate2Dkernel <- function(kernel, x, y, sigma=NULL, varcov=NULL, ..., scalekernel=is.character(kernel)) { info <- lookup2DkernelInfo(kernel) if(scalekernel) { ## kernel adjustment factor sdK <- if(is.character(kernel)) info$sd else 1 ## transform coordinates to x',y' such that kerfun(x', y') ## yields density k(x,y) at desired bandwidth if(is.null(varcov)) { rr <- sdK/sigma x <- x * rr y <- y * rr scalefactor <- rr^2 } else { SinvH <- matrixinvsqrt(varcov) rSinvH <- sdK * SinvH XY <- cbind(x, y) %*% rSinvH x <- XY[,1L] y <- XY[,2L] scalefactor <- det(rSinvH) } } ## now evaluate kernel if(is.character(kernel)) { kerfun <- info$d result <- kerfun(x, y) } else if(is.function(kernel)) { argh <- list(...) if(length(argh) > 0) argh <- argh[names(argh) %in% names(formals(kernel))] result <- do.call(kernel, append(list(x, y), argh)) if(anyNA(result)) stop("NA values returned from kernel function") if(length(result) != length(x)) stop("Kernel function returned the wrong number of values") } else if(is.im(kernel)) { result <- kernel[list(x=x, y=y)] if(anyNA(result) || length(result) != length(x)) stop("Domain of kernel image is not large enough") } else stop("Unrecognised format for kernel") if(scalekernel) result <- scalefactor * result return(result) } cutoff2Dkernel <- function(kernel, sigma=NULL, varcov=NULL, ..., scalekernel=is.character(kernel), cutoff=NULL, fatal=FALSE) { info <- lookup2DkernelInfo(kernel) ## if scalekernel = FALSE, 'cutoff' is an absolute distance ## if scalekernel = TRUE, 'cutoff' is expressed in number of s.d. if(scalekernel) { if(is.null(cutoff)) { ## template kernel's standard deviation sdK <- info$sd %orifnull% 1 ## template kernel's halfwidth hwK <- info$hw %orifnull% 8 ## cutoff for kernel with sd=1 cutoff <- hwK/sdK } ## required standard deviation if(!is.null(sigma)) { sig <- sigma } else if(!is.null(varcov)) { lam <- eigen(varcov)$values sig <- sqrt(max(lam)) } else stop("Cannot determine standard deviation") ## cutoff <- cutoff * sig } if(fatal && is.null(cutoff)) stop(paste("The argument", sQuote("cutoff"), "is required", "when a non-Gaussian kernel is specified", "and scalekernel=FALSE"), call.=FALSE) return(cutoff) } spatstat/R/hierhard.R0000644000176200001440000001477613333543255014254 0ustar liggesusers## ## hierhard.R ## ## $Revision: 1.4 $ $Date: 2018/03/15 07:37:41 $ ## ## The hierarchical hard core process ## ## ------------------------------------------------------------------- ## HierHard <- local({ # ......... define interaction potential HHpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[j] type (mark) of point U[j] # # get matrices of interaction radii h <- par$hradii # # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) ## list all ordered pairs of types to be checked uptri <- par$archy$relation & !is.na(h) mark1 <- (lx[row(h)])[uptri] mark2 <- (lx[col(h)])[uptri] ## corresponding names mark1name <- (lxname[row(h)])[uptri] mark2name <- (lxname[col(h)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) ## create logical array for result z <- array(FALSE, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { # apply relevant hard core distance to each pair of points hxu <- h[ tx, tu ] forbid <- (d < hxu) forbid[is.na(forbid)] <- FALSE # form the potential value <- array(0, dim=dim(d)) value[forbid] <- -Inf ## score for(i in 1:npairs) { # data points with mark m1 Xsub <- (tx == mark1[i]) # quadrature points with mark m2 Qsub <- (tu == mark2[i]) # assign z[Xsub, Qsub, i] <- value[Xsub, Qsub] } } attr(z, "IsOffset") <- TRUE return(z) } #### end of 'pot' function #### # Set up basic object except for family and parameters BlankHHobject <- list( name = "Hierarchical hard core process", creator = "HierHard", family = "hierpair.family", # evaluated later pot = HHpotential, par = list(types=NULL, hradii=NULL, archy=NULL), parnames = c("possible types", "hardcore distances", "hierarchical order"), pardesc = c("vector of possible types", "matrix of hardcore distances", "hierarchical order"), hasInf = TRUE, selfstart = function(X, self) { types <- self$par$types hradii <- self$par$hradii archy <- self$par$archy if(!is.null(types) && !is.null(hradii) && !is.null(archy)) return(self) if(is.null(types)) types <- levels(marks(X)) if(is.null(archy)) archy <- seq_len(length(types)) if(!inherits(archy, "hierarchicalordering")) archy <- hierarchicalordering(archy, types) if(is.null(hradii)) { marx <- marks(X) d <- nndist(X, by=marx) h <- aggregate(d, by=list(from=marx), min) h <- as.matrix(h[, -1L, drop=FALSE]) m <- table(marx) mm <- outer(m, m, pmin) hradii <- h * mm/(mm+1) dimnames(hradii) <- list(types, types) h[!(archy$relation)] <- NA } HierHard(types=types,hradii=hradii,archy=archy) }, init = function(self) { types <- self$par$types hradii <- self$par$hradii ## hradii could be NULL if(!is.null(types)) { if(!is.null(dim(types))) stop(paste("The", sQuote("types"), "argument should be a vector")) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } nt <- length(types) if(!is.null(hradii)) MultiPair.checkmatrix(hradii, nt, sQuote("hradii"), asymmok=TRUE) } }, update = NULL, # default OK print = function(self) { hradii <- self$par$hradii types <- self$par$types archy <- self$par$archy if(waxlyrical('gory')) splat(nrow(hradii), "types of points") if(!is.null(types) && !is.null(archy)) { if(waxlyrical('space')) { splat("Possible types and ordering:") } else cat("Hierarchy: ") print(archy) } else if(!is.null(types)) { (if(waxlyrical('space')) splat else cat)("Possible types: ") print(types) } else if(waxlyrical('gory')) splat("Possible types:\t not yet determined") if(!is.null(hradii)) { splat("Hardcore radii:") print(hiermat(dround(hradii), archy)) } else splat("Hardcore radii: not yet determined") invisible(NULL) }, interpret = function(coeffs, self) { # there are no regular parameters (woo-hoo!) return(NULL) }, valid = function(coeffs, self) { return(TRUE) }, project = function(coeffs, self) { return(NULL) }, irange = function(self, coeffs=NA, epsilon=0, ...) { h <- self$par$hradii return(max(0, h, na.rm=TRUE)) }, version=NULL # to be added ) class(BlankHHobject) <- "interact" # finally create main function HierHard <- function(hradii=NULL, types=NULL, archy=NULL) { if(!is.null(types)) { if(is.null(archy)) archy <- seq_len(length(types)) archy <- hierarchicalordering(archy, types) } out <- instantiate.interact(BlankHHobject, list(types=types, hradii=hradii, archy=archy)) if(!is.null(types) && !is.null(out$par$hradii)) dimnames(out$par$hradii) <- list(types,types) return(out) } HierHard <- intermaker(HierHard, BlankHHobject) HierHard }) spatstat/R/pairwise.R0000644000176200001440000000505713433151224014271 0ustar liggesusers# # # pairwise.S # # $Revision: 1.12 $ $Date: 2019/02/20 03:32:22 $ # # Pairwise() create a user-defined pairwise interaction process # [an object of class 'interact'] # # ------------------------------------------------------------------- # Pairwise <- function(pot, name = "user-defined pairwise interaction process", par = NULL, parnames=NULL, printfun) { fop <- names(formals(pot)) if(!isTRUE(all.equal(fop, c("d", "par"))) && !isTRUE(all.equal(fop, c("d", "tx", "tu", "par")))) stop(paste("Formal arguments of pair potential function", sQuote("pot"), "must be either (d, par) or (d, tx, tu, par)")) if(!is.null(parnames)) { stopifnot(is.character(parnames)) if(is.null(par) || length(par) != length(parnames)) stop("par does not match parnames") } if(missing(printfun)) printfun <- function(self) { cat("Potential function:\n") print(self$pot) if(!is.null(parnames <- self$parnames)) { for(i in 1:length(parnames)) { cat(paste(parnames[i], ":\t")) pari <- self$par[[i]] if(is.numeric(pari) && length(pari) == 1) cat(pari, "\n") else print(pari) } } } out <- list( name = name, creator = "Pairwise", family = pairwise.family, pot = pot, par = par, parnames = parnames, hasInf = NA, init = NULL, update = function(self, ...){ do.call(Pairwise, resolve.defaults(list(...), list(pot=self$pot, name=self$name, par=self$par, parnames=self$parnames, printfun=self$print))) } , print = printfun, version = versionstring.spatstat() ) class(out) <- "interact" return(out) } Pairwise <- intermaker(Pairwise, list(creator="Pairwise", name="user-defined pairwise interaction process", par=formals(Pairwise), parnames=list("the potential", "the name of the interaction", "the list of parameters", "a description of each parameter", "an optional print function"))) spatstat/R/symbolmap.R0000644000176200001440000005741113606253516014463 0ustar liggesusers## ## symbolmap.R ## ## $Revision: 1.37 $ $Date: 2020/01/11 04:45:05 $ ## symbolmap <- local({ known.unknowns <- c("shape", "pch", "chars", "size", "cex", "direction", "arrowtype", "headlength", "headangle", "col", "cols", "fg", "bg", "lty", "lwd", "border", "fill", "etch") trycolourmap <- function(...) { try(colourmap(...), silent=TRUE) } symbolmap <- function(..., range=NULL, inputs=NULL) { if(!is.null(range) && !is.null(inputs)) stop("Arguments range and inputs are incompatible") ## graphics parameters parlist <- list(...) ## remove unrecognised parameters and NULL values if(length(parlist) > 0) { ok <- names(parlist) %in% known.unknowns ok <- ok & !unlist(lapply(parlist, is.null)) parlist <- parlist[ok] } got.pars <- (length(parlist) > 0) parnames <- names(parlist) type <- if(is.null(inputs) && is.null(range)) "constant" else if(!is.null(inputs)) "discrete" else "continuous" if(got.pars) { ## validate parameters if(is.null(parnames) || !all(nzchar(parnames))) stop("All graphics parameters must have names") atomic <- unlist(lapply(parlist, is.atomic)) functions <- unlist(lapply(parlist, is.function)) lenfs <- lengths(parlist) constants <- atomic & (lenfs == 1) if(any(bad <- !(constants | functions))) { if(type == "discrete" && any(repairable <- atomic[bad])) { ## recycle data to desired length parlist[repairable] <- lapply(parlist[repairable], reptolength, n=length(inputs)) bad[repairable] <- FALSE } if(type == "continuous") { ## look for vectors of colour values iscol <- bad & sapply(parlist, is.colour) & (names(parlist) %in% c("cols", "col", "fg", "bg")) ## convert colour values to colour map if(any(iscol)) { cmap <- lapply(parlist[iscol], trycolourmap, range=range) success <- sapply(cmap, inherits, what="colourmap") iscol[iscol] <- success if(any(iscol)) { parlist[iscol] <- cmap[success] bad[iscol] <- FALSE functions[iscol] <- TRUE } } } nbad <- sum(bad) if(nbad > 0) stop(paste(ngettext(nbad, "Argument", "Arguments"), commasep(sQuote(parnames[bad])), ngettext(nbad, "is neither a function nor a constant", "are neither functions nor constants"))) } if(type == "constant" && any(functions)) type <- "continuous" } switch(type, constant ={ ## set of constant graphics parameters defining a single symbol stuff <- list(type=type, parlist=parlist) ConstantValue <- as.data.frame(parlist, stringsAsFactors=FALSE) f <- function(x) ConstantValue }, discrete = { ## finite set of inputs mapped to symbols stuff <- list(type=type, inputs=inputs, parlist=parlist) f <- function(x) ApplyDiscreteSymbolMap(x, stuff) }, continuous = { got.shape <- "shape" %in% parnames got.size <- "size" %in% parnames got.cha <- any(c("pch", "chars") %in% parnames) ## interval of real line (etc) mapped to symbols or characters if(!got.cha) { ## mapped to symbols if(!got.shape) parlist$shape <- "circles" if(!got.size) stop("Parameter 'size' is missing") } rangetype <- if(is.null(range)) "numeric" else if(inherits(range, "POSIXt")) "datetime" else if(inherits(range, "Date")) "date" else if(is.numeric(range)) "numeric" else "unknown" stuff <- list(type=type, range=range, rangetype=rangetype, parlist=parlist) f <- function(x) ApplyContinuousSymbolMap(x, stuff) }) attr(f, "stuff") <- stuff class(f) <- c("symbolmap", class(f)) f } reptolength <- function(z, n) { rep.int(z, n)[1:n] } MapDiscrete <- function(f, x, i) { if(is.function(f)) f(x) else if(length(f) == 1) rep.int(f, length(x)) else f[i] } MapContinuous <- function(f, x) { if(is.function(f)) f(x) else rep.int(f, length(x)) } ApplyContinuousSymbolMap <- function(x, stuff) { with(stuff, { y <- as.data.frame(lapply(parlist, MapContinuous, x=x), stringsAsFactors=FALSE) return(y) }) } ApplyDiscreteSymbolMap <- function(x, stuff) { with(stuff, { ii <- match(x, inputs) if(anyNA(ii)) stop("Some values do not belong to the domain of the symbol map") y <- as.data.frame(lapply(parlist, MapDiscrete, x=x, i=ii), stringsAsFactors=FALSE) return(y) }) } symbolmap }) symbolmaptype <- function(x) { attr(x, "stuff")$type } symbolmapdomain <- function(x) { stuff <- attr(x, "stuff") d <- switch(stuff$type, constant = { integer(0) }, discrete = { stuff$inputs }, continuous = { stuff$range }) return(d) } update.symbolmap <- function(object, ...) { y <- attr(object, "stuff") oldargs <- append(y[["parlist"]], y[c("inputs", "range")]) do.call(symbolmap, resolve.defaults(list(...), oldargs)) } print.symbolmap <- function(x, ...) { with(attr(x, "stuff"), { switch(type, constant = { if(length(parlist) == 0) { cat("Symbol map", "with no parameters", fill=TRUE) } else { cat("Symbol map", "with constant values", fill=TRUE) } }, discrete = { cat("Symbol map", "for discrete inputs:", fill=TRUE) print(inputs) }, continuous = { cat("Symbol map", "for", switch(rangetype, numeric="real numbers", date = "dates", datetime = "date/time values", unknown = "unrecognised data"), if(!is.null(range)) paste("in", prange(range)) else NULL, fill=TRUE) }) if(length(parlist) > 0) { for(i in seq_along(parlist)) { cat(paste0(names(parlist)[i], ": ")) pari <- parlist[[i]] if(!is.function(pari) && length(pari) == 1) cat(pari, fill=TRUE) else print(pari) } } return(invisible(NULL)) }) } ## Function which actually plots the symbols. ## Called by plot.ppp and plot.symbolmap ## Returns maximum size of symbols invoke.symbolmap <- local({ ## plot points, handling various arguments do.points <- function(x, y, ..., cex=size, size=NULL, col=cols, pch=chars, cols=NULL, chars=NULL, lwd=1, etch=FALSE, do.plot=TRUE) { if(do.plot) { if(length(cex) == 0) cex <- 1 if(length(col) == 0) col <- par("col") if(length(pch) == 0) pch <- 1 if(length(lwd) == 0) lwd <- 1 n <- length(x) if(length(cex) == 1) cex <- rep(cex, n) if(length(col) == 1) col <- rep(col, n) if(length(pch) == 1) pch <- rep(pch, 1) if(length(lwd) == 1) lwd <- rep(lwd, n) if(length(etch) == 1) etch <- rep(etch, n) ## infer which arguments are parallelised other <- append(list(...), list(cex=cex, pch=pch)) isvec <- (lengths(other) == n) other.fixed <- other[!isvec] other.vec <- other[isvec] ## if(any(i <- as.logical(etch))) { anti.col <- complementarycolour(col) anti.lwd <- if(is.numeric(etch)) etch else 2 * lwd do.call.matched(points.default, resolve.defaults(list(x=x[i], y=y[i]), other.fixed, lapply(other.vec, "[", i=i), list(col=anti.col[i], lwd=anti.lwd[i])), extrargs=c("col", "pch", "type", "bg", "cex", "lwd", "lty")) } do.call.matched(points.default, resolve.defaults(list(x=x, y=y), other, list(col=col, lwd=lwd)), extrargs=c("col", "pch", "type", "bg", "cex", "lwd", "lty")) } return(max(cex %orifnull% 1)) } ## plot symbols likewise do.symbols <- function(x, y, ..., shape, size=cex, cex=NULL, fg=col, col=cols, cols=NULL, lwd=1, etch=FALSE, do.plot=TRUE) { if(do.plot) { ## zap tiny sizes tiny <- (size < (max(size)/1000)) size[tiny] <- 0 ## collect arguments n <- length(x) if(length(lwd) == 1) lwd <- rep(lwd, n) if(length(etch) == 1) etch <- rep(etch, n) if(length(fg) == 0) fg <- rep(par("col"), n) else if(length(fg) == 1) fg <- rep(fg, n) other <- resolve.defaults(list(...), list(add=TRUE, inches=FALSE)) ## infer which arguments are parallelised isvec <- (lengths(other) == n) other.fixed <- other[!isvec] other.vec <- other[isvec] ## if(any(as.logical(etch))) { anti.fg <- complementarycolour(fg) anti.lwd <- if(is.numeric(etch)) etch else 2 * lwd } ## plot if(any(i <- (shape == "circles") & as.logical(etch))) do.call.matched(symbols, c(list(x=x[i], y=y[i], circles=size[i]/2), other.fixed, lapply(other.vec, "[", i=i), list(lwd=anti.lwd[i], fg=anti.fg[i])), extrargs=c("lwd", "lty")) if(any(i <- (shape == "circles"))) do.call.matched(symbols, c(list(x=x[i], y=y[i], circles=size[i]/2), other.fixed, lapply(other.vec, "[", i=i), list(lwd=lwd[i], fg=fg[i])), extrargs=c("lwd", "lty")) if(any(i <- (shape == "squares") & as.logical(etch))) do.call.matched(symbols, c(list(x=x[i], y=y[i], squares=size[i]), other.fixed, lapply(other.vec, "[", i=i), list(lwd=anti.lwd[i], fg=anti.fg[i])), extrargs=c("lwd", "lty")) if(any(i <- (shape == "squares"))) do.call.matched(symbols, c(list(x=x[i], y=y[i], squares=size[i]), other.fixed, lapply(other.vec, "[", i=i), list(lwd=lwd[i], fg=fg[i])), extrargs=c("lwd", "lty")) if(any(i <- (shape == "arrows") & as.logical(etch))) do.call.matched(do.arrows, c(list(x=x[i], y=y[i], len=size[i]), other.fixed, lapply(other.vec, "[", i=i), list(lwd=anti.lwd[i], cols=anti.fg[i])), extrargs=c("cols", "col", "lwd", "lty")) if(any(i <- (shape == "arrows"))) do.call.matched(do.arrows, c(list(x=x[i], y=y[i], len=size[i]), other.fixed, lapply(other.vec, "[", i=i), list(lwd=lwd[i], cols=fg[i])), extrargs=c("cols", "col", "lwd", "lty")) } return(max(size)) } do.arrows <- function(x, y, len, direction=0, arrowtype=2, ..., headlength=len * 0.4, headangle=40, cols=col, col=par('fg'), lwd=1, lty=1) { #' vectorise all arguments df <- data.frame(x=x, y=y, len=len, direction=direction, arrowtype=arrowtype, headangle=headangle, cols=cols, lwd=lwd, lty=lty) with(df, { alpha <- direction * pi/180 dx <- len * cos(alpha)/2 dy <- len * sin(alpha)/2 x0 <- x - dx x1 <- x + dx y0 <- y - dy y1 <- y + dy segments(x0, y0, x1, y1, ..., col=cols, lty=lty, lwd=lwd) if(any(arrowtype != 0)) { halfangle <- (headangle/2) * pi/180 beta1 <- alpha + halfangle beta2 <- alpha - halfangle hx1 <- headlength * cos(beta1) hy1 <- headlength * sin(beta1) hx2 <- headlength * cos(beta2) hy2 <- headlength * sin(beta2) if(any(left <- (arrowtype %in% c(1,3)))) { segments(x0[left], y0[left], (x0 + hx1)[left], (y0 + hy1)[left], ..., col=cols[left], lwd=lwd[left], lty=lty[left]) segments(x0[left], y0[left], (x0 + hx2)[left], (y0 + hy2)[left], ..., col=cols[left], lwd=lwd[left], lty=lty[left]) } if(any(right <- (arrowtype %in% c(2,3)))) { segments(x1[right], y1[right], (x1 - hx1)[right], (y1 - hy1)[right], ..., col=cols[right], lwd=lwd[right], lty=lty[right]) segments(x1[right], y1[right], (x1 - hx2)[right], (y1 - hy2)[right], ..., col=cols[right], lwd=lwd[right], lty=lty[right]) } } }) return(invisible(NULL)) } ## main function invoke.symbolmap <- function(map, values, x=NULL, y=NULL, ..., add=FALSE, do.plot=TRUE, started = add && do.plot) { if(!inherits(map, "symbolmap")) stop("map should be an object of class 'symbolmap'") if(hasxy <- (!is.null(x) || !is.null(y))) { xy <- xy.coords(x, y) x <- xy$x y <- xy$y } ## function will return maximum size of symbols plotted. maxsize <- 0 if(do.plot && !add) plot(x, y, type="n", ...) force(values) g <- map(values) parnames <- colnames(g) if(do.plot) { xydf <- data.frame(x=x, y=y) if(nrow(xydf) == 0) return(invisible(maxsize)) g <- if(prod(dim(g)) == 0) xydf else do.call(data.frame, c(as.list(g), as.list(xydf), list(stringsAsFactors=FALSE))) } n <- nrow(g) ## figure out which function does the graphics job need.points <- any(c("pch", "chars") %in% parnames) need.symbols <- "shape" %in% parnames if(need.symbols && need.points) { worker <- with(g, ifelse(!is.na(shape), "symbols", "points")) } else if(need.symbols) { worker <- rep.int("symbols", n) } else { worker <- rep.int("points", n) } ## split data according to graphics function involved z <- split(g, factor(worker)) ## display using 'pch' zpoints <- z[["points"]] if(!is.null(zpoints) && nrow(zpoints) > 0) { ms <- do.call(do.points, resolve.defaults(as.list(zpoints), list(...), list(do.plot=do.plot))) ## value is max(cex) ## guess size of one character charsize <- if(started) max(par('cxy')) else if(hasxy) max(sidelengths(boundingbox(x,y))/40) else 1/40 maxsize <- max(maxsize, charsize * ms) } ## display using 'symbols' zsymbols <- z[["symbols"]] if(!is.null(zsymbols) && nrow(zsymbols) > 0) { ms <- do.call(do.symbols, resolve.defaults(as.list(zsymbols), list(...), list(do.plot=do.plot))) ## ms value is max physical size. maxsize <- max(maxsize, ms) } return(invisible(maxsize)) } invoke.symbolmap }) ## Display the symbol map itself (`legend' style) plot.symbolmap <- function(x, ..., main, xlim=NULL, ylim=NULL, vertical=FALSE, side=c("bottom", "left", "top", "right"), annotate=TRUE, labelmap=NULL, add=FALSE, nsymbols=NULL) { if(missing(main)) main <- short.deparse(substitute(x)) miss.side <- missing(side) side <- match.arg(side) type <- symbolmaptype(x) map <- x stuff <- attr(map, "stuff") if(type == "constant" && length(stuff$parlist) == 0) return(invisible(NULL)) if(is.null(labelmap)) { labelmap <- function(x) x } else if(type == "continuous" && is.numeric(labelmap) && length(labelmap) == 1) { labscal <- labelmap labelmap <- function(x) { x * labscal } } else stopifnot(is.function(labelmap)) ## determine the 'example' input values and their graphical representations switch(type, constant = { vv <- NULL }, continuous = { ra <- stuff$range if(is.null(ra)) stop("Cannot plot symbolmap with an infinite range") vv <- if(is.null(nsymbols)) prettyinside(ra) else prettyinside(ra, n = nsymbols) if(is.numeric(vv)) vv <- signif(vv, 4) }, discrete = { vv <- if(is.null(nsymbols)) prettydiscrete(stuff$inputs) else prettydiscrete(stuff$inputs, n = nsymbols) if(vertical) vv <- rev(vv) }) nn <- length(vv) ## gg <- map(vv) ll <- paste(labelmap(vv)) ## determine position of plot and symbols if(add) { ## x and y limits must respect existing plot space usr <- par('usr') if(is.null(xlim)) xlim <- usr[1:2] if(is.null(ylim)) ylim <- usr[3:4] } else { ## create new plot maxdiam <- invoke.symbolmap(map, vv, do.plot=FALSE, started=FALSE) zz <- c(0, max(1, maxdiam)) if(is.null(xlim) && is.null(ylim)) { if(vertical) { xlim <- zz ylim <- length(vv) * zz } else { xlim <- length(vv) * zz ylim <- zz } } else if(is.null(ylim)) { ylim <- zz } else if(is.null(xlim)) { xlim <- zz } } ## .......... initialise plot ............................... if(!add) do.call.matched(plot.default, resolve.defaults(list(x=xlim, y=ylim, type="n", main=main, axes=FALSE, xlab="", ylab="", asp=1.0), list(...))) ## maximum symbol diameter maxdiam <- invoke.symbolmap(map, vv, do.plot=FALSE, started=TRUE) ## .......... plot symbols .................... if(type == "constant") { xp <- mean(xlim) yp <- mean(ylim) } else if(vertical) { ## vertical arrangement xp <- rep(mean(xlim), nn) vskip <- 1.1 * max(maxdiam, 3 * max(strheight(labelmap(vv)))) if(diff(ylim) > nn * vskip) { yp <- (1:nn) * vskip yp <- yp - mean(yp) + mean(ylim) } else { z <- seq(ylim[1], ylim[2], length=nn+1) yp <- z[-1] - diff(z)/2 } } else { ## horizontal arrangement yp <- rep(mean(ylim), nn) hskip <- 1.1 * max(maxdiam, max(strwidth(labelmap(vv)))) if(diff(xlim) > nn * hskip) { xp <- (1:nn) * hskip xp <- xp - mean(xp) + mean(xlim) } else { z <- seq(xlim[1], xlim[2], length=nn+1) xp <- z[-1] - diff(z)/2 } } invoke.symbolmap(map, vv, xp, yp, ..., add=TRUE) ## ................. draw annotation .................. if(annotate && length(ll) > 0) { if(vertical) { ## default axis position is to the right if(miss.side) side <- "right" sidecode <- match(side, c("bottom", "left", "top", "right")) if(!(sidecode %in% c(2,4))) warning(paste("side =", sQuote(side), "is not consistent with vertical orientation")) pos <- c(ylim[1], xlim[1], ylim[2], xlim[2])[sidecode] ## draw axis do.call.matched(graphics::axis, resolve.defaults(list(...), list(side=sidecode, pos=pos, at=yp, labels=ll, tick=FALSE, las=1)), extrargs=graphicsPars("axis")) } else { ## default axis position is below if(miss.side) side <- "bottom" sidecode <- match(side, c("bottom", "left", "top", "right")) if(!(sidecode %in% c(1,3))) warning(paste("side =", sQuote(side), "is not consistent with horizontal orientation")) pos <- c(ylim[1], xlim[1], ylim[2], xlim[2])[sidecode] ## draw axis do.call.matched(graphics::axis, resolve.defaults(list(...), list(side = sidecode, pos = pos, at = xp, labels=ll, tick=FALSE)), extrargs=graphicsPars("axis")) } } return(invisible(NULL)) } plan.legend.layout <- function(B, ..., side=c("bottom", "left", "top", "right"), sep=NULL, size=NULL, sep.frac=0.05, size.frac=0.05, started=FALSE, map=NULL) { ## Determine size and position of a box containing legend or symbolmap ## attached to a plot in region 'B'. ## sep, size are absolute distances; ## sep.frac, size.frac are fractions of the maximum sidelength of B. side <- match.arg(side) B <- as.rectangle(B) Bsize <- max(sidelengths(B)) if(is.null(size)) { size <- size.frac * Bsize } else { check.1.real(size) stopifnot(size > 0) } if(is.null(sep)) { sep <- sep.frac * Bsize } else { check.1.real(sep) stopifnot(sep > 0) } if(is.null(map) || !inherits(map, "symbolmap")) { vv <- NULL textlength <- 8 } else { vv <- with(attr(map, "stuff"), if(type == "discrete") inputs else prettyinside(range)) textlength <- max(nchar(paste(vv))) } if(started && !is.null(vv)) { textwidth <- max(strwidth(vv)) textheight <- max(strheight(vv)) } else { ## the plot has not been initialised: guess character size charsize <- diff(if(side %in% c("left", "right")) B$yrange else B$xrange)/40 textwidth <- charsize * textlength textheight <- charsize } switch(side, right={ ## symbols to right of image b <- owin(B$xrange[2] + sep + c(0, size), B$yrange) ## text to right of symbols tt <- owin(b$xrange[2] + sep + c(0, textwidth), b$yrange) iside <- 4 }, left={ ## symbols to left of image b <- owin(B$xrange[1] - sep - c(size, 0), B$yrange) ## text to left of symbols tt <- owin(b$xrange[1] - sep - c(textwidth, 0), b$yrange) iside <- 2 }, top={ ## symbols above image b <- owin(B$xrange, B$yrange[2] + sep + c(0, size)) ## text above symbols tt <- owin(b$xrange, b$yrange[2] + 3* charsize + c(0, textheight)) iside <- 3 }, bottom={ ## symbols below image b <- owin(B$xrange, B$yrange[1] - sep - c(size, 0)) ## text below symbols tt <- owin(b$xrange, b$yrange[1] - 3 * charsize - c(textheight, 0)) iside <- 1 }) A <- boundingbox(B, b, tt) return(list(A=A, B=B, b=b, tt=tt, iside=iside, side=side, size=size, charsize=charsize, sep=sep)) } spatstat/R/hypersub.R0000644000176200001440000001446213526002746014316 0ustar liggesusers## ## hypersub.R ## ## ## subset operations for hyperframes ## ## $Revision: 1.28 $ $Date: 2019/08/17 13:48:03 $ ## "[.hyperframe" <- function(x, i, j, drop=FALSE, strip=drop, ...) { x <- unclass(x) if(!missing(i)) { y <- x y$df <- x$df[i, , drop=FALSE] y$ncases <- nrow(y$df) y$hypercolumns <- lapply(x$hypercolumns, "[", i=i) x <- y } if(!missing(j)) { y <- x patsy <- seq_len(y$nvars) names(patsy) <- y$vname jj <- patsy[j] names(jj) <- NULL y$nvars <- length(jj) y$vname <- vname <- x$vname[jj] y$vtype <- vtype <- x$vtype[jj] y$vclass <- x$vclass[jj] if(ncol(x$df) != 0) y$df <- x$df[ , vname[vtype == "dfcolumn"], drop=FALSE] y$hyperatoms <- x$hyperatoms[ vname[ vtype == "hyperatom" ]] y$hypercolumns <- x$hypercolumns[ vname [ vtype == "hypercolumn" ] ] x <- y } if(drop) { nrows <- x$ncases ncols <- x$nvars if(nrows == 1 && ncols == 1 && strip) { ## return a single object y <- switch(as.character(x$vtype), dfcolumn = x$df[, , drop=TRUE], hypercolumn = (x$hypercolumns[[1L]])[[1L]], hyperatom = x$hyperatoms[[1L]]) return(y) } else if(nrows == 1) { ## return the row as a vector or a list if(strip && all(x$vtype == "dfcolumn")) return(x$df[ , , drop=TRUE]) n <- x$nvars y <- vector(mode="list", length=n) names(y) <- nama <- x$vname for(i in seq_len(n)) { nami <- nama[i] y[[i]] <- switch(as.character(x$vtype[i]), dfcolumn = x$df[ , nami, drop=TRUE], hyperatom = x$hyperatoms[[nami]], hypercolumn = (x$hypercolumns[[nami]])[[1L]] ) } return(as.solist(y, demote=TRUE)) } else if(ncols == 1) { ## return a column as an 'anylist'/'solist' or a vector switch(as.character(x$vtype), dfcolumn = { return(x$df[, , drop=TRUE]) }, hypercolumn = { y <- as.solist(x$hypercolumns[[1L]], demote=TRUE) names(y) <- row.names(x$df) return(y) }, hyperatom = { ## replicate it to make a hypercolumn ha <- x$hyperatoms[1L] names(ha) <- NULL hc <- rep.int(ha, x$ncases) hc <- as.solist(hc, demote=TRUE) names(hc) <- row.names(x$df) return(hc) } ) } } class(x) <- c("hyperframe", class(x)) return(x) } "$.hyperframe" <- function(x,name) { m <- match(name, unclass(x)$vname) if(is.na(m)) return(NULL) return(x[, name, drop=TRUE, strip=FALSE]) } "$<-.hyperframe" <- function(x, name, value) { y <- as.list(x) if(is.hyperframe(value)) { if(ncol(value) == 1) { y[name] <- as.list(value) } else { y <- insertinlist(y, name, as.list(value)) } } else { dfcol <- is.atomic(value) && (is.vector(value) || is.factor(value)) if(!dfcol && !is.null(value)) value <- as.list(value) y[[name]] <- value } z <- do.call(hyperframe, append(y, list(row.names=row.names(x), stringsAsFactors=FALSE))) return(z) } "[<-.hyperframe" <- function (x, i, j, value) { sumry <- summary(x) colnam <- sumry$col.names dimx <- sumry$dim igiven <- !missing(i) jgiven <- !missing(j) if(!igiven) i <- seq_len(dimx[1L]) if(!jgiven) j <- seq_len(dimx[2L]) singlerow <- ((is.integer(i) && length(i) == 1 && i > 0) || (is.character(i) && length(i) == 1) || (is.logical(i) && sum(i) == 1)) singlecolumn <- ((is.integer(j) && length(j) == 1 && j > 0) || (is.character(j) && length(j) == 1) || (is.logical(j) && sum(j) == 1)) if(!igiven && jgiven) { # x[, j] <- value if(singlecolumn) { # expecting single hypercolumn if(is.logical(j)) j <- names(x)[j] y <- get("$<-.hyperframe")(x, j, value) } else { # expecting hyperframe xlist <- as.list(x) xlist[j] <- as.list(as.hyperframe(value)) # the above construction accepts all indices including extra entries y <- do.call(hyperframe, append(xlist, list(row.names=row.names(x)))) } } else { ## x[, ] <- value or x[i, ] <- value or x[i,j] <- value ## convert indices to positive integers rowseq <- seq_len(dimx[1L]) colseq <- seq_len(dimx[2L]) names(rowseq) <- row.names(x) names(colseq) <- colnam I <- rowseq[i] J <- colseq[j] ## convert to lists xlist <- as.list(x) if(singlerow && singlecolumn) { vlist <- list(anylist(value)) nrowV <- ncolV <- 1 } else { hv <- if(is.hyperframe(value)) value else as.hyperframe(as.solist(value, demote=TRUE)) vlist <- as.list(hv) nrowV <- dim(hv)[1L] ncolV <- dim(hv)[2L] } if(nrowV != length(I)) { if(nrowV == 1) { ## replicate vlist <- lapply(vlist, rep, times=nrowV) } else stop(paste("Replacement value has wrong number of rows:", nrowV, "should be", length(I)), call.=FALSE) } if(ncolV != length(J)) { if(ncolV == 1) { ## replicate vlist <- rep(vlist, times=ncolV) } else stop(paste("Replacement value has wrong number of columns:", ncolV, "should be", length(J)), call.=FALSE) } ## replace entries for(k in seq_along(J)) { jj <- J[k] xlist[[jj]][I] <- vlist[[k]] } ## put back together y <- do.call(hyperframe, append(xlist, list(row.names=row.names(x)))) } return(y) } split.hyperframe <- local({ split.hyperframe <- function(x, f, drop=FALSE, ...) { y <- data.frame(id=seq_len(nrow(x))) z <- split(y, f, drop=drop) z <- lapply(z, getElement, name="id") out <- lapply(z, indexi, x=x) return(out) } indexi <- function(i, x) x[i,] split.hyperframe }) "split<-.hyperframe" <- function(x, f, drop=FALSE, ..., value) { ix <- split(seq_len(nrow(x)), f, drop = drop, ...) n <- length(value) j <- 0 for (i in ix) { j <- j%%n + 1L x[i, ] <- value[[j]] } x } spatstat/R/eem.R0000644000176200001440000000062213333543255013215 0ustar liggesusers# eem.R # # Computes the Stoyan-Grabarnik "exponential energy weights" # # $Revision: 1.4 $ $Date: 2008/07/25 19:51:05 $ # eem <- function(fit, check=TRUE) { verifyclass(fit, "ppm") lambda <- fitted.ppm(fit, check=check) Q <- quad.ppm(fit) Z <- is.data(Q) eemarks <- 1/lambda[Z] attr(eemarks, "type") <- "eem" attr(eemarks, "typename") <- "exponential energy marks" return(eemarks) } spatstat/R/Kmulti.R0000644000176200001440000003137113551505525013721 0ustar liggesusers# # Kmulti.S # # Compute estimates of cross-type K functions # for multitype point patterns # # $Revision: 5.50 $ $Date: 2019/10/16 03:09:01 $ # # # -------- functions ---------------------------------------- # Kcross() cross-type K function K_{ij} # between types i and j # # Kdot() K_{i\bullet} # between type i and all points regardless of type # # Kmulti() (generic) # # # -------- standard arguments ------------------------------ # X point pattern (of class 'ppp') # including 'marks' vector # r distance values at which to compute K # # -------- standard output ------------------------------ # A data frame with columns named # # r: same as input # # trans: K function estimated by translation correction # # iso: K function estimated by Ripley isotropic correction # # theo: K function for Poisson ( = pi * r ^2 ) # # border: K function estimated by border method # using standard formula (denominator = count of points) # # bord.modif: K function estimated by border method # using modified formula # (denominator = area of eroded window # # ------------------------------------------------------------------------ "Lcross" <- function(X, i, j, ..., from, to, correction) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(i)) i <- if(!missing(from)) from else levels(marks(X))[1] if(missing(j)) j <- if(!missing(to)) to else levels(marks(X))[2] if(missing(correction)) correction <- NULL K <- Kcross(X, i, j, ..., correction=correction) L <- eval.fv(sqrt(K/pi)) # relabel the fv object iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) L <- rebadge.fv(L, substitute(L[i,j](r), list(i=iname,j=jname)), c("L", paste0("list(", iname, ",", jname, ")")), new.yexp=substitute(L[list(i,j)](r), list(i=iname,j=jname))) attr(L, "labl") <- attr(K, "labl") return(L) } "Ldot" <- function(X, i, ..., from, correction) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(i)) i <- if(!missing(from)) from else levels(marks(X))[1] if(missing(correction)) correction <- NULL K <- Kdot(X, i, ..., correction=correction) L <- eval.fv(sqrt(K/pi)) # relabel the fv object iname <- make.parseable(paste(i)) L <- rebadge.fv(L, substitute(L[i ~ dot](r), list(i=iname)), c("L", paste(iname, "~ symbol(\"\\267\")")), new.yexp=substitute(L[i ~ symbol("\267")](r), list(i=iname))) attr(L, "labl") <- attr(K, "labl") return(L) } "Kcross" <- function(X, i, j, r=NULL, breaks=NULL, correction =c("border", "isotropic", "Ripley", "translate") , ..., ratio=FALSE, from, to) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- if(!missing(from)) from else levels(marx)[1] if(missing(j)) j <- if(!missing(to)) to else levels(marx)[2] I <- (marx == i) if(!any(I)) stop(paste("No points have mark i =", i)) if(i == j) { ## use Kest result <- do.call(Kest, resolve.defaults(list(X=X[I], r=r, breaks=breaks, correction=correction, ratio=ratio), list(rmax=NULL), ## forbidden list(...))) } else { J <- (marx == j) if(!any(J)) stop(paste("No points have mark j =", j)) result <- Kmulti(X, I, J, r=r, breaks=breaks, correction=correction, ratio=ratio, ...) } iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(result, substitute(Kcross[i,j](r), list(i=iname,j=jname)), c("K", paste0("list(", iname, ",", jname, ")")), new.yexp=substitute(K[list(i,j)](r), list(i=iname,j=jname))) return(result) } "Kdot" <- function(X, i, r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate") , ..., ratio=FALSE, from) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- if(!missing(from)) from else levels(marx)[1] I <- (marx == i) J <- rep.int(TRUE, X$n) # i.e. all points if(!any(I)) stop(paste("No points have mark i =", i)) result <- Kmulti(X, I, J, r=r, breaks=breaks, correction=correction, ..., ratio=ratio) iname <- make.parseable(paste(i)) result <- rebadge.fv(result, substitute(K[i ~ dot](r), list(i=iname)), c("K", paste0(iname, "~ symbol(\"\\267\")")), new.yexp=substitute(K[i ~ symbol("\267")](r), list(i=iname))) return(result) } "Kmulti"<- function(X, I, J, r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate") , ..., ratio=FALSE) { verifyclass(X, "ppp") npts <- npoints(X) W <- X$window areaW <- area(W) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) I <- ppsubset(X, I) J <- ppsubset(X, J) if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") if(!any(I)) stop("no points belong to subset I") if(!any(J)) stop("no points belong to subset J") nI <- sum(I) nJ <- sum(J) lambdaI <- nI/areaW lambdaJ <- nJ/areaW # r values rmaxdefault <- rmax.rule("K", W, lambdaJ) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame # It will be given more columns later K <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") K <- fv(K, "r", quote(K[IJ](r)), "theo", , alim, c("r","{%s[%s]^{pois}}(r)"), desc, fname=c("K", "list(I,J)"), yexp=quote(K[list(I,J)](r))) # save numerator and denominator? if(ratio) { denom <- lambdaI * lambdaJ * areaW numK <- eval.fv(denom * K) denK <- eval.fv(denom + K * 0) attributes(numK) <- attributes(denK) <- attributes(K) attr(numK, "desc")[2] <- "numerator for theoretical Poisson %s" attr(denK, "desc")[2] <- "denominator for theoretical Poisson %s" } # find close pairs of points XI <- X[I] XJ <- X[J] close <- crosspairs(XI, XJ, max(r), what="ijd") # close$i and close$j are serial numbers in XI and XJ respectively; # map them to original serial numbers in X orig <- seq_len(npts) imap <- orig[I] jmap <- orig[J] iX <- imap[close$i] jX <- jmap[close$j] # eliminate any identical pairs if(any(I & J)) { ok <- (iX != jX) if(!all(ok)) { close$i <- close$i[ok] close$j <- close$j[ok] close$d <- close$d[ok] } } # extract information for these pairs (relative to orderings of XI, XJ) dcloseIJ <- close$d icloseI <- close$i jcloseJ <- close$j # Compute estimates by each of the selected edge corrections. if(any(correction == "none")) { # uncorrected! wh <- whist(dcloseIJ, breaks$val) # no weights numKun <- cumsum(wh) denKun <- lambdaI * lambdaJ * areaW Kun <- numKun/denKun K <- bind.fv(K, data.frame(un=Kun), "{hat(%s)[%s]^{un}}(r)", "uncorrected estimate of %s", "un") if(ratio) { # save numerator and denominator numK <- bind.fv(numK, data.frame(un=numKun), "{hat(%s)[%s]^{un}}(r)", "numerator of uncorrected estimate of %s", "un") denK <- bind.fv(denK, data.frame(un=denKun), "{hat(%s)[%s]^{un}}(r)", "denominator of uncorrected estimate of %s", "un") } } if(any(correction == "border" | correction == "bord.modif")) { # border method # distance to boundary from each point of type I bI <- bdist.points(XI) # distance to boundary from first element of each (i, j) pair bcloseI <- bI[icloseI] # apply reduced sample algorithm RS <- Kount(dcloseIJ, bcloseI, bI, breaks) if(any(correction == "bord.modif")) { denom.area <- eroded.areas(W, r) numKbm <- RS$numerator denKbm <- denom.area * nI * nJ Kbm <- numKbm/denKbm K <- bind.fv(K, data.frame(bord.modif=Kbm), "{hat(%s)[%s]^{bordm}}(r)", "modified border-corrected estimate of %s", "bord.modif") if(ratio) { # save numerator and denominator numK <- bind.fv(numK, data.frame(bord.modif=numKbm), "{hat(%s)[%s]^{bordm}}(r)", "numerator of modified border-corrected estimate of %s", "bord.modif") denK <- bind.fv(denK, data.frame(bord.modif=denKbm), "{hat(%s)[%s]^{bordm}}(r)", "denominator of modified border-corrected estimate of %s", "bord.modif") } } if(any(correction == "border")) { numKb <- RS$numerator denKb <- lambdaJ * RS$denom.count Kb <- numKb/denKb K <- bind.fv(K, data.frame(border=Kb), "{hat(%s)[%s]^{bord}}(r)", "border-corrected estimate of %s", "border") if(ratio) { numK <- bind.fv(numK, data.frame(border=numKb), "{hat(%s)[%s]^{bord}}(r)", "numerator of border-corrected estimate of %s", "border") denK <- bind.fv(denK, data.frame(border=denKb), "{hat(%s)[%s]^{bord}}(r)", "denominator of border-corrected estimate of %s", "border") } } } if(any(correction == "translate")) { # translation correction edgewt <- edge.Trans(XI[icloseI], XJ[jcloseJ], paired=TRUE) wh <- whist(dcloseIJ, breaks$val, edgewt) numKtrans <- cumsum(wh) denKtrans <- lambdaI * lambdaJ * areaW Ktrans <- numKtrans/denKtrans rmax <- diameter(W)/2 Ktrans[r >= rmax] <- NA K <- bind.fv(K, data.frame(trans=Ktrans), "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans") if(ratio) { numK <- bind.fv(numK, data.frame(trans=numKtrans), "{hat(%s)[%s]^{trans}}(r)", "numerator of translation-corrected estimate of %s", "trans") denK <- bind.fv(denK, data.frame(trans=denKtrans), "{hat(%s)[%s]^{trans}}(r)", "denominator of translation-corrected estimate of %s", "trans") } } if(any(correction == "isotropic")) { # Ripley isotropic correction edgewt <- edge.Ripley(XI[icloseI], matrix(dcloseIJ, ncol=1)) wh <- whist(dcloseIJ, breaks$val, edgewt) numKiso <- cumsum(wh) denKiso <- lambdaI * lambdaJ * areaW Kiso <- numKiso/denKiso rmax <- diameter(W)/2 Kiso[r >= rmax] <- NA K <- bind.fv(K, data.frame(iso=Kiso), "{hat(%s)[%s]^{iso}}(r)", "Ripley isotropic correction estimate of %s", "iso") if(ratio) { numK <- bind.fv(numK, data.frame(iso=numKiso), "{hat(%s)[%s]^{iso}}(r)", "numerator of Ripley isotropic correction estimate of %s", "iso") denK <- bind.fv(denK, data.frame(iso=denKiso), "{hat(%s)[%s]^{iso}}(r)", "denominator of Ripley isotropic correction estimate of %s", "iso") } } # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) if(ratio) { # finish up numerator & denominator formula(numK) <- formula(denK) <- . ~ r unitname(numK) <- unitname(denK) <- unitname(K) # tack on to result K <- rat(K, numK, denK, check=FALSE) } return(K) } spatstat/R/summary.dppm.R0000644000176200001440000000460313572330665015112 0ustar liggesusers#' #' summary.dppm.R #' #' $Revision: 1.4 $ $Date: 2019/12/06 01:35:46 $ summary.dppm <- function(object, ..., quick=FALSE) { nama <- names(object) result <- unclass(object)[!(nama %in% c("X", "po", "call", "callframe"))] ## Fitting information result$has.subset <- "subset" %in% names(object$call) ## Summarise trend component result$trend <- summary(as.ppm(object), ..., quick=quick) ## repulsion strength result$repul <- mean(repul(object)) #' pack up class(result) <- "summary.dppm" return(result) } print.summary.dppm <- function(x, ...) { terselevel <- spatstat.options('terse') digits <- getOption('digits') splat(if(x$stationary) "Stationary" else "Inhomogeneous", "determinantal point process model") if(waxlyrical('extras', terselevel) && nchar(x$Xname) < 20) splat("Fitted to point pattern dataset", sQuote(x$Xname)) Fit <- x$Fit if(waxlyrical('gory', terselevel)) { switch(Fit$method, mincon = { splat("Fitted by minimum contrast") splat("\tSummary statistic:", Fit$StatName) print(Fit$mcfit) }, clik =, clik2 = { splat("Fitted by maximum second order composite likelihood") splat("\trmax =", Fit$rmax) if(!is.null(wtf <- Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } printStatus(optimStatus(Fit$clfit)) }, palm = { splat("Fitted by maximum Palm likelihood") splat("\trmax =", Fit$rmax) if(!is.null(wtf <- Fit$weightfun)) { a <- attr(wtf, "selfprint") %orifnull% pasteFormula(wtf) splat("\tweight function:", a) } printStatus(optimStatus(Fit$clfit)) }, warning(paste("Unrecognised fitting method", sQuote(Fit$method))) ) } # ............... trend ......................... parbreak() splat("----------- TREND MODEL -----") print(x$trend, ...) # ..................... determinantal part ................ parbreak() splat("---------- DETERMINANTAL STRUCTURE -----------------") print(x$fitted) parbreak() splat(if(x$stationary) "Strength" else "(Average) strength", "of repulsion:", signif(x$repul, digits)) return(invisible(NULL)) } spatstat/R/distances.psp.R0000644000176200001440000001124013333543254015222 0ustar liggesusers# # distances.psp.R # # Hausdorff distance and Euclidean separation for psp objects # # $Revision: 1.11 $ $Date: 2015/10/21 09:06:57 $ # # pairdist.psp <- function(X, ..., method="C", type="Hausdorff") { verifyclass(X, "psp") if(X$n == 0) return(matrix(, 0, 0)) type <- pickoption("type", type, c(Hausdorff="Hausdorff", hausdorff="Hausdorff", separation="separation")) D12 <- AsymmDistance.psp(X, X, metric=type, method=method) switch(type, Hausdorff={ # maximum is Hausdorff metric D <- array(pmax.int(D12, t(D12)), dim=dim(D12)) }, separation={ # Take minimum of endpoint-to-segment distances D <- array(pmin.int(D12, t(D12)), dim=dim(D12)) # Identify any pairs of segments which cross cross <- test.selfcrossing.psp(X) # Assign separation = 0 to such pairs D[cross] <- 0 }) return(D) } crossdist.psp <- function(X, Y, ..., method="C", type="Hausdorff") { verifyclass(X, "psp") Y <- as.psp(Y) if(X$n * Y$n == 0) return(matrix(, X$n, Y$n)) type <- pickoption("type", type, c(Hausdorff="Hausdorff", hausdorff="Hausdorff", separation="separation")) DXY <- AsymmDistance.psp(X, Y, metric=type, method=method) DYX <- AsymmDistance.psp(Y, X, metric=type, method=method) switch(type, Hausdorff={ # maximum is Hausdorff metric D <- array(pmax.int(DXY, t(DYX)), dim=dim(DXY)) }, separation={ # Take minimum of endpoint-to-segment distances D <- array(pmin.int(DXY, t(DYX)), dim=dim(DXY)) # Identify pairs of segments which cross cross <- test.crossing.psp(X, Y) # Assign separation = 0 to such pairs D[cross] <- 0 }) return(D) } nndist.psp <- function(X, ..., k=1, method="C") { verifyclass(X, "psp") if(!(is.vector(k) && all(k %% 1 == 0) && all(k >= 1))) stop("k should be a positive integer or integers") n <- nobjects(X) kmax <- max(k) lenk <- length(k) result <- if(lenk == 1) numeric(n) else matrix(, nrow=n, ncol=lenk) if(n == 0) return(result) if(kmax >= n) { # not enough objects # fill with Infinite values result[] <- Inf if(any(ok <- (kmax < n))) { # compute the lower-order nnd's result[, ok] <- nndist.psp(X, ..., k=k[ok], method=method) } return(result) } # normal case: D <- pairdist.psp(X, ..., method=method) diag(D) <- Inf if(kmax == 1L) NND <- apply(D, 1L, min) else NND <- t(apply(D, 1L, orderstats, k=k))[, , drop=TRUE] return(NND) } # ..... AsymmDistance.psp ..... # # If metric="Hausdorff": # this function computes, for each pair of segments A = X[i] and B = Y[j], # the value max_{a in A} d(a,B) = max_{a in A} min_{b in B} ||a-b|| # which appears in the definition of the Hausdorff metric. # Since the distance function d(a,B) of a segment B is a convex function, # the maximum is achieved at an endpoint of A. So the algorithm # actually computes h(A,B) = max (d(e_1,B), d(e_2,B)) where e_1, e_2 # are the endpoints of A. And H(A,B) = max(h(A,B),h(B,A)). # # If metric="separation": # the function computes, for each pair of segments A = X[i] and B = Y[j], # the MINIMUM distance from an endpoint of A to any point of B. # t(A,B) = min (d(e_1,B), d(e_2,B)) # where e_1, e_2 are the endpoints of A. # Define the separation distance # s(A,B) = min_{a in A} min_{b in B} ||a-b||. # The minimum (a*, b*) occurs either when a* is an endpoint of A, # or when b* is an endpoint of B, or when a* = b* (so A and B intersect). # (If A and B are parallel, the minimum is still achieved at an endpoint) # Thus s(A,B) = min(t(A,B), t(B,A)) unless A and B intersect. AsymmDistance.psp <- function(X, Y, metric="Hausdorff", method=c("C", "Fortran", "interpreted")) { method <- match.arg(method) # Extract endpoints of X EX <- endpoints.psp(X, "both") idX <- attr(EX, "id") # compute shortest dist from each endpoint of X to each segment of Y DPL <- distppll(cbind(EX$x,EX$y), Y$ends, mintype=0, method=method) # for each segment in X, maximise or minimise over the two endpoints Dist <- as.vector(DPL) Point <- as.vector(idX[row(DPL)]) Segment <- as.vector(col(DPL)) switch(metric, Hausdorff={ DXY <- tapply(Dist, list(factor(Point), factor(Segment)), max) }, separation={ DXY <- tapply(Dist, list(factor(Point), factor(Segment)), min) }) return(DXY) } spatstat/R/idw.R0000644000176200001440000001230713602774005013233 0ustar liggesusers# # idw.R # # Inverse-distance weighted smoothing # # $Revision: 1.12 $ $Date: 2020/01/01 01:30:57 $ idw <- function(X, power=2, at=c("pixels", "points"), ..., se=FALSE) { stopifnot(is.ppp(X) && is.marked(X)) at <- match.arg(at) marx <- marks(X) if(is.data.frame(marx)) { if((nc <- ncol(marx)) > 1) { ## multiple columns of marks - process one-by-one each <- vector(mode="list", length=nc) for(j in 1:nc) each[[j]] <- idw(X %mark% marx[,j], power=power, at=at, ..., se=se) names(each) <- colnames(marx) ## if(!se) { ## estimates only switch(at, pixels = { out <- as.solist(each) }, points = { out <- as.data.frame(each) } ) } else { ## estimates and standard errors est <- lapply(each, getElement, name="estimate") SE <- lapply(each, getElement, name="SE") switch(at, pixels = { out <- list(estimate = as.solist(est), SE = as.solist(SE)) }, points = { out <- list(estimate = as.data.frame(est), SE = as.data.frame(SE)) }) } return(out) } else marx <- marx[,1L] } if(!is.numeric(marx)) stop("Marks must be numeric") check.1.real(power) switch(at, pixels = { ## create grid W <- as.mask(as.owin(X), ...) dim <- W$dim npixels <- prod(dim) ## call C if(!se) { z <- .C("Cidw", x = as.double(X$x), y = as.double(X$y), v = as.double(marx), n = as.integer(npoints(X)), xstart = as.double(W$xcol[1L]), xstep = as.double(W$xstep), nx = as.integer(dim[2L]), ystart = as.double(W$yrow[1L]), ystep = as.double(W$ystep), ny = as.integer(dim[1L]), power = as.double(power), num = as.double(numeric(npixels)), den = as.double(numeric(npixels)), rat = as.double(numeric(npixels)), PACKAGE = "spatstat") out <- as.im(matrix(z$rat, dim[1L], dim[2L]), W=W) out <- out[W, drop=FALSE] } else { z <- .C("Cidw2", x = as.double(X$x), y = as.double(X$y), v = as.double(marx), n = as.integer(npoints(X)), xstart = as.double(W$xcol[1L]), xstep = as.double(W$xstep), nx = as.integer(dim[2L]), ystart = as.double(W$yrow[1L]), ystep = as.double(W$ystep), ny = as.integer(dim[1L]), power = as.double(power), num = as.double(numeric(npixels)), den = as.double(numeric(npixels)), rat = as.double(numeric(npixels)), mtwo = as.double(numeric(npixels)), wtwo = as.double(numeric(npixels)), PACKAGE = "spatstat") est <- as.im(matrix(z$rat, dim[1L], dim[2L]), W=W) est <- est[W, drop=FALSE] sumw <- z$den sumw2 <- z$wtwo m2 <- z$mtwo varden <- sumw - sumw2/sumw varden[varden <= 0] <- NA SE <- sqrt(m2/varden) SE <- as.im(matrix(SE, dim[1L], dim[2L]), W=W) SE <- SE[W, drop=FALSE] out <- solist(estimate=est, SE=SE) } }, points={ npts <- npoints(X) if(!se) { z <- .C("idwloo", x = as.double(X$x), y = as.double(X$y), v = as.double(marx), n = as.integer(npts), power = as.double(power), num = as.double(numeric(npts)), den = as.double(numeric(npts)), rat = as.double(numeric(npts)), PACKAGE = "spatstat") out <- z$rat } else { z <- .C("idwloo2", x = as.double(X$x), y = as.double(X$y), v = as.double(marx), n = as.integer(npts), power = as.double(power), num = as.double(numeric(npts)), den = as.double(numeric(npts)), rat = as.double(numeric(npts)), mtwo = as.double(numeric(npts)), wtwo = as.double(numeric(npts)), PACKAGE = "spatstat") est <- z$rat sumw <- z$den sumw2 <- z$wtwo m2 <- z$mtwo varden <- sumw - sumw2/sumw varden[varden <= 0] <- NA SE <- sqrt(m2/varden) out <- list(estimate=est, SE=SE) } }) return(out) } spatstat/R/bw.optim.R0000644000176200001440000000734213544362122014210 0ustar liggesusers# # bw.optim.R # # Class of optimised bandwidths # Plotting the object displays the optimisation criterion # # $Revision: 1.30 $ $Date: 2019/09/30 11:17:34 $ # bw.optim <- function(cv, h, iopt=which.min(cv), ..., cvname, hname, criterion="cross-validation", warnextreme=TRUE, hargnames=NULL, unitname=NULL) { if(missing(cvname) || is.null(cvname)) cvname <- deparse(substitute(cv)) if(missing(hname) || is.null(hname)) hname <- deparse(substitute(h)) stopifnot(is.numeric(cv)) stopifnot(is.numeric(h)) stopifnot(length(h) == length(cv)) if(warnextreme && (iopt == length(h) || iopt == 1)) warning(paste(criterion, "criterion was optimised at", if(iopt == 1) "left-hand" else "right-hand", "end of interval", paste0(prange(signif(range(h), 3)), ";"), "use", ngettext(length(hargnames), "argument", "arguments"), paste(sQuote(hargnames), collapse=", "), "to specify a wider interval for bandwidth", sQuote(hname)), call.=FALSE) result <- h[iopt] attr(result, "cv") <- cv attr(result, "h") <- h attr(result, "iopt") <- iopt attr(result, "labels") <- list(hname=hname, cvname=cvname) attr(result, "info") <- list(...) attr(result, "criterion") <- criterion attr(result, "units") <- unitname class(result) <- "bw.optim" return(result) } print.bw.optim <- function(x, ...) { y <- as.numeric(x) names(y) <- attr(x, "labels")$hname print(y, ...) return(invisible(NULL)) } as.data.frame.bw.optim <- function(x, ...) { h <- attr(x, "h") cv <- attr(x, "cv") df <- data.frame(h, cv) labels <- attr(x, "labels") colnames(df) <- labels[c("hname", "cvname")] info <- attr(x, "info") if(length(info) > 0) { lenfs <- lengths(info) if(any(ok <- (lenfs == nrow(df)))) { df <- cbind(df, as.data.frame(info[ok])) } } return(df) } as.fv.bw.optim <- function(x) { # convert to fv object df <- as.data.frame(x) dfnames <- colnames(df) hname <- dfnames[1L] cvname <- dfnames[2L] descrip <- c("smoothing parameter", paste(attr(x, "criterion"), "criterion")) if(ncol(df) > 2) descrip <- c(descrip, paste("Additional variable", sQuote(dfnames[-(1:2)]))) labl <- c(hname, paste0(dfnames[-1L], paren(hname))) yexp <- substitute(CV(h), list(CV=as.name(cvname), h=as.name(hname))) xfv <- fv(df, argu=hname, ylab=yexp, valu=cvname, labl=labl, desc=descrip, fname=cvname, yexp=yexp) fvnames(xfv, ".") <- cvname unitname(xfv) <- unitname(x) return(xfv) } plot.bw.optim <- function(x, ..., showopt=TRUE, optargs=list(lty=3, col="blue")) { xname <- short.deparse(substitute(x)) # convert to fv object xfv <- as.fv(x) # plot cross-validation criterion out <- do.call(plot.fv, resolve.defaults(list(x=xfv), list(...), list(main=xname))) # Turn off 'showopt' if the x-variable is not the bandwidth if(missing(showopt)) { argh <- list(...) isfmla <- unlist(lapply(argh, inherits, what="formula")) if(any(isfmla)) { fmla <- argh[[min(which(isfmla))]] xvar <- deparse(rhs.of.formula(fmla, tilde=FALSE)) if(!(identical(xvar, fvnames(xfv, ".x")) || identical(xvar, ".x"))) showopt <- FALSE } } # show optimal value? if(showopt) { hoptim <- as.numeric(x) if(spatstat.options('monochrome')) optargs <- col.args.to.grey(optargs) do.call(abline, append(list(v=hoptim), optargs)) } if(is.null(out)) return(invisible(NULL)) return(out) } spatstat/R/affine.R0000644000176200001440000002470713433754666013724 0ustar liggesusers# # affine.R # # $Revision: 1.53 $ $Date: 2019/02/22 10:36:51 $ # affinexy <- function(X, mat=diag(c(1,1)), vec=c(0,0), invert=FALSE) { if(length(X$x) == 0 && length(X$y) == 0) return(list(x=numeric(0),y=numeric(0))) if(invert) { mat <- invmat <- solve(mat) vec <- - as.numeric(invmat %*% vec) } # Y = M X + V ans <- mat %*% rbind(X$x, X$y) + matrix(vec, nrow=2L, ncol=length(X$x)) return(list(x = ans[1L,], y = ans[2L,])) } affinexypolygon <- function(p, mat=diag(c(1,1)), vec=c(0,0), detmat=det(mat)) { # transform (x,y) p[c("x","y")] <- affinexy(p, mat=mat, vec=vec) # transform area if(!is.null(p$area)) p$area <- p$area * detmat # if map has negative sign, cyclic order was reversed; correct it if(detmat < 0) p <- reverse.xypolygon(p, adjust=TRUE) return(p) } "affine" <- function(X, ...) { UseMethod("affine") } "affine.owin" <- function(X, mat=diag(c(1,1)), vec=c(0,0), ..., rescue=TRUE) { verifyclass(X, "owin") vec <- as2vector(vec) if(!is.matrix(mat) || any(dim(mat) != c(2,2))) stop(paste(sQuote("mat"), "should be a 2 x 2 matrix")) diagonalmatrix <- all(mat == diag(diag(mat))) scaletransform <- diagonalmatrix && (length(unique(diag(mat))) == 1) newunits <- if(scaletransform) unitname(X) else as.unitname(NULL) # switch(X$type, rectangle={ if(diagonalmatrix) { # result is a rectangle Y <- owin(range(mat[1L,1L] * X$xrange + vec[1L]), range(mat[2L,2L] * X$yrange + vec[2L])) unitname(Y) <- newunits return(Y) } else { # convert rectangle to polygon P <- as.polygonal(X) # call polygonal case return(affine.owin(P, mat, vec, rescue=rescue)) } }, polygonal={ # Transform the polygonal boundaries bdry <- lapply(X$bdry, affinexypolygon, mat=mat, vec=vec, detmat=det(mat)) # Compile result W <- owin(poly=bdry, check=FALSE, unitname=newunits) # Result might be a rectangle: if so, convert to rectangle type if(rescue) W <- rescue.rectangle(W) return(W) }, mask={ # binary mask if(sqrt(abs(det(mat))) < .Machine$double.eps) stop("Matrix of linear transformation is singular") newframe <- boundingbox(affinexy(corners(X), mat, vec)) W <- if(length(list(...)) > 0) as.mask(newframe, ...) else as.mask(newframe, eps=with(X, min(xstep, ystep))) pixelxy <- rasterxy.mask(W) xybefore <- affinexy(pixelxy, mat, vec, invert=TRUE) W$m[] <- with(xybefore, inside.owin(x, y, X)) W <- intersect.owin(W, boundingbox(W)) if(rescue) W <- rescue.rectangle(W) return(W) }, stop("Unrecognised window type") ) } "affine.ppp" <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) { verifyclass(X, "ppp") vec <- as2vector(vec) r <- affinexy(X, mat, vec) w <- affine.owin(X$window, mat, vec, ...) return(ppp(r$x, r$y, window=w, marks=marks(X, dfok=TRUE), check=FALSE)) } "affine.im" <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) { verifyclass(X, "im") vec <- as2vector(vec) if(!is.matrix(mat) || any(dim(mat) != c(2,2))) stop(paste(sQuote("mat"), "should be a 2 x 2 matrix")) # Inspect the determinant detmat <- det(mat) if(sqrt(abs(detmat)) < .Machine$double.eps) stop("Matrix of linear transformation is singular") # diagonalmatrix <- all(mat == diag(diag(mat))) scaletransform <- diagonalmatrix && (length(unique(diag(mat))) == 1L) newunits <- if(scaletransform) unitname(X) else as.unitname(NULL) newpixels <- (length(list(...)) > 0) # if(diagonalmatrix && !newpixels) { # diagonal matrix: apply map to row and column locations v <- X$v d <- X$dim newbox <- affine(as.rectangle(X), mat=mat, vec=vec) xscale <- diag(mat)[1L] yscale <- diag(mat)[2L] xcol <- xscale * X$xcol + vec[1L] yrow <- yscale * X$yrow + vec[2L] if(xscale < 0) { # x scale is negative xcol <- rev(xcol) v <- v[, (d[2L]:1)] } if(yscale < 0) { # y scale is negative yrow <- rev(yrow) v <- v[(d[1L]:1), ] } Y <- im(v, xcol=xcol, yrow=yrow, xrange=newbox$xrange, yrange=newbox$yrange, unitname=newunits) } else { # general case # create box containing transformed image newframe <- boundingbox(affinexy(corners(X), mat, vec)) W <- if(length(list(...)) > 0) as.mask(newframe, ...) else as.mask(newframe, eps=with(X, min(xstep, ystep))) unitname(W) <- newunits # raster for transformed image naval <- switch(X$type, factor= , integer = NA_integer_, logical = as.logical(NA_integer_), real = NA_real_, complex = NA_complex_, character = NA_character_, NA) Y <- as.im(W, value=naval) # preimages of pixels of transformed image xx <- as.vector(rasterx.im(Y)) yy <- as.vector(rastery.im(Y)) pre <- affinexy(list(x=xx, y=yy), mat, vec, invert=TRUE) # sample original image if(X$type != "factor") { Y$v[] <- lookup.im(X, pre$x, pre$y, naok=TRUE) } else { lab <- levels(X) lev <- seq_along(lab) Y$v[] <- lookup.im(eval.im(as.integer(X)), pre$x, pre$y, naok=TRUE) Y <- eval.im(factor(Y, levels=lev, labels=lab)) } } return(Y) } ### ---------------------- reflect ---------------------------------- reflect <- function(X) { UseMethod("reflect") } reflect.default <- function(X) { affine(X, mat=diag(c(-1,-1))) } reflect.im <- function(X) { stopifnot(is.im(X)) out <- with(X, list(v = v[dim[1L]:1, dim[2L]:1], dim = dim, xrange = rev(-xrange), yrange = rev(-yrange), xstep = xstep, ystep = ystep, xcol = rev(-xcol), yrow = rev(-yrow), type = type, units = units)) class(out) <- "im" return(out) } ### ---------------------- shift ---------------------------------- "shift" <- function(X, ...) { UseMethod("shift") } shiftxy <- function(X, vec=c(0,0)) { if(is.null(vec)) { warning("Null displacement vector; treated as zero") return(X) } list(x = X$x + vec[1L], y = X$y + vec[2L]) } shiftxypolygon <- function(p, vec=c(0,0)) { # transform (x,y), retaining other data p[c("x","y")] <- shiftxy(p, vec=vec) return(p) } shift.owin <- function(X, vec=c(0,0), ..., origin=NULL) { verifyclass(X, "owin") if(!is.null(origin)) { if(!missing(vec)) warning("argument vec ignored; argument origin has precedence") locn <- interpretAsOrigin(origin, X) vec <- -locn } vec <- as2vector(vec) # Shift the bounding box X$xrange <- X$xrange + vec[1L] X$yrange <- X$yrange + vec[2L] switch(X$type, rectangle={ }, polygonal={ # Shift the polygonal boundaries X$bdry <- lapply(X$bdry, shiftxypolygon, vec=vec) }, mask={ # Shift the pixel coordinates X$xcol <- X$xcol + vec[1L] X$yrow <- X$yrow + vec[2L] # That's all --- the mask entries are unchanged }, stop("Unrecognised window type") ) # tack on shift vector attr(X, "lastshift") <- vec # units are unchanged return(X) } shift.ppp <- function(X, vec=c(0,0), ..., origin=NULL) { verifyclass(X, "ppp") if(!is.null(origin)) { if(!missing(vec)) warning("argument vec ignored; argument origin has precedence") locn <- interpretAsOrigin(origin, Window(X)) vec <- -locn } vec <- as2vector(vec) # perform shift r <- shiftxy(X, vec) w <- shift.owin(X$window, vec) Y <- ppp(r$x, r$y, window=w, marks=marks(X, dfok=TRUE), check=FALSE) # tack on shift vector attr(Y, "lastshift") <- vec return(Y) } getlastshift <- function(X) { v <- attr(X, "lastshift") if(is.null(v)) stop(paste("Internal error: shifted object of class", sQuote(as.character(class(X))[1L]), "does not have \"lastshift\" attribute"), call.=FALSE) if(!(is.numeric(v) && length(v) == 2L)) stop("Internal error: \"lastshift\" attribute is not a vector", call.=FALSE) return(v) } putlastshift <- function(X, vec) { attr(X, "lastshift") <- vec return(X) } interpretAsOrigin <- function(x, W) { if(is.character(x)) { x <- paste(x, collapse="") x <- match.arg(x, c("centroid", "midpoint", "left", "right", "top", "bottom", "bottomleft", "bottomright", "topleft", "topright")) W <- as.owin(W) xr <- W$xrange yr <- W$yrange x <- switch(x, centroid = { unlist(centroid.owin(W)) }, midpoint = { c(mean(xr), mean(yr)) }, left = { c(xr[1L], mean(yr)) }, right = { c(xr[2L], mean(yr)) }, top = { c(mean(xr), yr[2L]) }, bottom = { c(mean(xr), yr[1L]) }, bottomleft = { c(xr[1L], yr[1L]) }, bottomright = { c(xr[2L], yr[1L]) }, topleft = { c(xr[1L], yr[2L]) }, topright = { c(xr[2L], yr[2L]) }, stop(paste("Unrecognised option",sQuote(x)), call.=FALSE)) } return(as2vector(x)) } ### ---------------------- scalar dilation --------------------------------- scalardilate <- function(X, f, ...) { UseMethod("scalardilate") } scalardilate.default <- function(X, f, ...) { trap.extra.arguments(..., .Context="In scalardilate(X,f)") check.1.real(f, "In scalardilate(X,f)") stopifnot(is.finite(f) && f > 0) Y <- affine(X, mat=diag(c(f,f))) return(Y) } scalardilate.im <- scalardilate.owin <- scalardilate.psp <- scalardilate.ppp <- function(X, f, ..., origin=NULL) { trap.extra.arguments(..., .Context="In scalardilate(X,f)") check.1.real(f, "In scalardilate(X,f)") stopifnot(is.finite(f) && f > 0) if(!is.null(origin)) { X <- shift(X, origin=origin) negorig <- getlastshift(X) } else negorig <- c(0,0) Y <- affine(X, mat=diag(c(f, f)), vec = -negorig) return(Y) } spatstat/R/boundingcircle.R0000644000176200001440000000246013333543254015437 0ustar liggesusers#' #' boundingcircle.R #' #' bounding circle and its centre #' #' $Revision: 1.6 $ $Date: 2017/06/05 10:31:58 $ #' circumradius <- function(x, ...) { .Deprecated("boundingradius") UseMethod("boundingradius") } circumradius.owin <- function(x, ...) { .Deprecated("boundingradius.owin") boundingradius.owin(x, ...) } circumradius.ppp <- function(x, ...) { .Deprecated("boundingradius.ppp") boundingradius.ppp(x, ...) } boundingradius <- function(x, ...) { UseMethod("boundingradius") } boundingcentre <- function(x, ...) { UseMethod("boundingcentre") } boundingcircle <- function(x, ...) { UseMethod("boundingcircle") } #' owin boundingradius.owin <- function(x, ...) { sqrt(min(fardist(x, ..., squared=TRUE))) } boundingcentre.owin <- function(x, ...) { z <- where.min(fardist(x, ..., squared=TRUE)) Window(z) <- x return(z) } boundingcircle.owin <- function(x, ...) { d2 <- fardist(x, ..., squared=TRUE) z <- where.min(d2) r <- sqrt(min(d2)) w <- disc(centre=z, radius=r) return(w) } #' ppp boundingradius.ppp <- function(x, ...) { boundingradius(convexhull(x), ...) } boundingcentre.ppp <- function(x, ...) { z <- boundingcentre(convexhull(x), ...) Window(z) <- Window(x) return(z) } boundingcircle.ppp <- function(x, ...) { boundingcircle(convexhull(x), ...) } spatstat/R/randomonlines.R0000644000176200001440000001425213333543255015323 0ustar liggesusers# # randomOnLines.R # # $Revision: 1.9 $ $Date: 2018/05/06 17:49:44 $ # # Generate random points on specified lines # runifpointOnLines <- function(n, L, nsim=1, drop=TRUE) { if(!is.numeric(n) || any(n < 0) || any(n %% 1 != 0)) stop("n should be a nonnegative integer or integers") if(!is.psp(L)) L <- as.psp(L) W <- as.owin(L) result <- vector(mode="list", length=nsim) for(i in 1:nsim) { X <- datagen.runifpointOnLines(n, L) Y <- ppp(X$x, X$y, marks=X$marks, window=W, check=FALSE) result[[i]] <- Y } result <- simulationresult(result, nsim, drop) return(result) } datagen.runifpointOnLines <- function(n, L) { stopifnot(is.psp(L)) m <- length(n) ismarked <- (m > 1) if(m == 0 || (m == 1 && n == 0)) return(data.frame(x=numeric(0), y=numeric(0), seg=integer(0), tp=numeric(0))) # extract segment information len <- lengths.psp(L) sumlen <- sum(len) cumlen <- cumsum(len) cum0len <- c(0, cumlen) Ldf <- as.data.frame(L) x0 <- with(Ldf, x0) y0 <- with(Ldf, y0) dx <- with(Ldf, x1-x0) dy <- with(Ldf, y1-y0) # determine mark space if(ismarked) { markvalues <- names(n) if(sum(nzchar(markvalues)) < m) markvalues <- paste(1:m) } # initialise output data.frame out <- data.frame(x=numeric(0), y=numeric(0), seg=integer(0), tp=numeric(0)) if(ismarked) out <- cbind(out, data.frame(marks=character(0))) # generate points of each mark in turn for(j in 1:m) { if(n[[j]] > 0) { # generate random positions uu <- runif(n[[j]], min=0, max=sumlen) # identify segment for each point kk <- findInterval(uu, cum0len, rightmost.closed=TRUE, all.inside=TRUE) # parametric position along segment tt <- (uu - cum0len[kk])/len[kk] tt[!is.finite(tt)] <- 0 # convert to (x,y) x <- x0[kk] + tt * dx[kk] y <- y0[kk] + tt * dy[kk] # assemble result if(!ismarked) { out <- data.frame(x=x, y=y, seg=kk, tp=tt) } else { outj <- data.frame(x=x, y=y, seg=kk, tp=tt, marks=markvalues[j]) out <- rbind(out, outj) } } } if(ismarked) out$marks <- factor(out$marks, levels=markvalues) return(out) } runifpoisppOnLines <- function(lambda, L, nsim=1, drop=TRUE) { if(!is.numeric(lambda) || !all(is.finite(lambda) && (lambda >= 0))) stop("lambda should be a finite, nonnegative number or numbers") if(!is.psp(L)) L <- as.psp(L) W <- as.owin(L) result <- vector(mode="list", length=nsim) for(i in 1:nsim) { X <- datagen.runifpoisppOnLines(lambda, L) Y <- ppp(X$x, X$y, marks=X$marks, window=W, check=FALSE) result[[i]] <- Y } result <- simulationresult(result, nsim, drop) return(result) } datagen.runifpoisppOnLines <- function(lambda, L) { stopifnot(is.psp(L)) mu <- lambda * sum(lengths.psp(L)) n <- rpois(rep.int(1, length(mu)), mu) if(length(n) > 1) names(n) <- names(lambda) df <- datagen.runifpointOnLines(n, L) return(df) } rpoisppOnLines <- function(lambda, L, lmax=NULL, ..., nsim=1, drop=TRUE) { if(!is.psp(L)) L <- as.psp(L) W <- as.owin(L) result <- vector(mode="list", length=nsim) for(i in 1:nsim) { X <- datagen.rpoisppOnLines(lambda, L, lmax=lmax, ...) Y <- ppp(X$x, X$y, marks=X$marks, window=W, check=FALSE) result[[i]] <- Y } result <- simulationresult(result, nsim, drop) return(result) } datagen.rpoisppOnLines <- function(lambda, L, lmax=NULL, ..., check=TRUE) { stopifnot(is.psp(L)) if(is.numeric(lambda)) return(datagen.runifpoisppOnLines(lambda, L)) # ensure lambda is a list if(is.function(lambda) || is.im(lambda)) lambda <- list(lambda) m <- length(lambda) # determine type of argument argtype <- if(all(unlist(lapply(lambda, is.im)))) "im" else if(all(unlist(lapply(lambda, is.function)))) "function" else stop(paste(sQuote("lambda"), "must be a numeric vector, a function, an image,", "a list of functions, or a list of images")) # check values of lambda if(argtype == "im") { for(j in seq_len(m)) { lamj <- lambda[[j]] if(!(lamj$type %in% c("real", "integer"))) stop("lambda must be numeric-valued or integer-valued") lrange <- range(lamj) if(any(is.infinite(lrange))) stop("Infinite pixel values not permitted") if(lrange[1] < 0) stop("Negative pixel values not permitted") } } # determine uniform bound if(!is.null(lmax)) { stopifnot(is.numeric(lmax)) if(length(lmax) != m) { if(length(lmax) == 1) { lmax <- rep.int(lmax, m) } else stop("Length of lmax does not match length of lambda") } } else { # compute lmax lmax <- numeric(m) for(j in seq_len(m)) { lamj <- lambda[[j]] if(is.function(lamj)) { X <- pointsOnLines(L, np=10000) lambdaX <- lamj(X$x, X$y, ...) lmax[j] <- max(lambdaX, na.rm=TRUE) } else if(is.im(lamj)) lmax[j] <- max(lamj) } if(!all(is.finite(lmax))) stop("Infinite values of lambda obtained") if(any(lmax < 0)) stop("Negative upper bound for lambda obtained") names(lmax) <- names(lambda) } # Lewis-Shedler (rejection) method Y <- datagen.runifpoisppOnLines(lmax, L) n <- nrow(Y) if(n == 0) return(Y) # evaluate lambda at each simulated point if(m == 1) { lambda <- lambda[[1]] markindex <- 1 if(is.function(lambda)) lambdaY <- lambda(Y$x, Y$y, ...) else lambdaY <- safelookup(lambda, as.ppp(Y, W=as.owin(L))) } else { lambdaY <- numeric(n) markindex <- as.integer(Y$marks) for(j in seq_len(m)) { lamj <- lambda[[j]] jrows <- (markindex == j) Yj <- Y[jrows, , drop=FALSE] if(is.function(lamj)) lambdaY[jrows] <- lamj(Yj$x, Yj$y, ...) else lambdaY[jrows] <- safelookup(lamj, as.ppp(Yj, W=as.owin(L))) } } lambdaY[is.na(lambdaY)] <- 0 # accept/reject pY <- lambdaY/lmax[markindex] if(check) { if(any(pY < 0)) warning("Negative values of lambda obtained") if(any(pY > 1)) warning("lmax is not an upper bound for lambda") } retain <- (runif(n) < pY) Y <- Y[retain, , drop=FALSE] return(Y) } spatstat/R/fitted.ppm.R0000644000176200001440000001257313333543255014531 0ustar liggesusers# # fitted.ppm.R # # method for 'fitted' for ppm objects # # $Revision: 1.18 $ $Date: 2018/03/19 14:29:48 $ # fitted.ppm <- function(object, ..., type="lambda", dataonly=FALSE, new.coef=NULL, leaveoneout=FALSE, drop=FALSE, check=TRUE, repair=TRUE, ignore.hardcore=FALSE, dropcoef=FALSE) { verifyclass(object, "ppm") if(check && damaged.ppm(object)) { if(!repair) stop("object format corrupted; try update(object, use.internal=TRUE)") message("object format corrupted; repairing it.") object <- update(object, use.internal=TRUE) } if(leaveoneout) { ## Leave-one-out calculation for data points only if(missing(dataonly)) dataonly <- TRUE if(!dataonly) stop("Leave-one-out calculation requires dataonly=TRUE") if(!is.null(new.coef)) stop("Leave-one-out calculation requires new.coef=NULL") } coeffs <- adaptcoef(new.coef, coef(object), drop=dropcoef) uniform <- is.poisson.ppm(object) && no.trend.ppm(object) typelist <- c("lambda", "cif", "trend", "link") typevalu <- c("lambda", "lambda", "trend", "link") if(is.na(m <- pmatch(type, typelist))) stop(paste("Unrecognised choice of ", sQuote("type"), ": ", sQuote(type), sep="")) type <- typevalu[m] if(uniform) { lambda <- exp(coeffs[[1L]]) Q <- quad.ppm(object, drop=drop) lambda <- rep.int(lambda, n.quad(Q)) } else { glmdata <- getglmdata(object, drop=drop) glmfit <- getglmfit(object) Vnames <- object$internal$Vnames interacting <- (length(Vnames) != 0) # Modification of `glmdata' may be required if(interacting) switch(type, trend={ ## zero the interaction statistics glmdata[ , Vnames] <- 0 }, link=, lambda={ if(!ignore.hardcore) { ## Find any dummy points with zero conditional intensity forbid <- matrowany(as.matrix(glmdata[, Vnames]) == -Inf) ## Exclude these locations from predict.glm glmdata <- glmdata[!forbid, ] } else { ## Compute positive part of cif Q <- quad.ppm(object, drop=drop) X <- Q[["data"]] U <- union.quad(Q) E <- equalpairs.quad(Q) eva <- evalInteraction(X, U, E, object$interaction, object$correction, splitInf=TRUE) forbid <- attr(eva, "-Inf") %orifnull% logical(npoints(U)) ## Use positive part of interaction if(ncol(eva) != length(Vnames)) stop(paste("Internal error: evalInteraction yielded", ncol(eva), "variables instead of", length(Vnames)), call.=FALSE) glmdata[,Vnames] <- as.data.frame(eva) } }) # Compute predicted [conditional] intensity values changecoef <- !is.null(new.coef) || (object$method != "mpl") lambda <- GLMpredict(glmfit, glmdata, coeffs, changecoef=changecoef, type = ifelse(type == "link", "link", "response")) # Note: the `newdata' argument is necessary in order to obtain # predictions at all quadrature points. If it is omitted then # we would only get predictions at the quadrature points j # where glmdata$SUBSET[j]=TRUE. Assuming drop=FALSE. if(interacting && type=="lambda" && !ignore.hardcore) { # reinsert zeroes lam <- numeric(length(forbid)) lam[forbid] <- 0 lam[!forbid] <- lambda lambda <- lam } } if(dataonly) lambda <- lambda[is.data(quad.ppm(object))] if(leaveoneout) { ## Perform leverage calculation dfb <- dfbetas(object, multitypeOK=TRUE) delta <- with(dfb, 'discrete')[with(dfb, 'is.atom'),,drop=FALSE] ## adjust fitted value mom <- model.matrix(object)[is.data(quad.ppm(object)),,drop=FALSE] if(type == "trend" && !uniform && interacting) mom[, Vnames] <- 0 lambda <- lambda * exp(- rowSums(delta * mom)) } lambda <- unname(as.vector(lambda)) return(lambda) } adaptcoef <- function(new.coef, fitcoef, drop=FALSE) { ## a replacement for 'fitcoef' will be extracted from 'new.coef' if(is.null(new.coef)) { coeffs <- fitcoef } else if(length(new.coef) == length(fitcoef)) { coeffs <- new.coef } else { fitnames <- names(fitcoef) newnames <- names(new.coef) if(is.null(newnames) || is.null(fitnames)) stop(paste("Argument new.coef has wrong length", length(new.coef), ": should be", length(fitcoef)), call.=FALSE) absentnames <- setdiff(fitnames, newnames) excessnames <- setdiff(newnames, fitnames) if((nab <- length(absentnames)) > 0) stop(paste(ngettext(nab, "Coefficient", "Coefficients"), commasep(sQuote(absentnames)), ngettext(nab, "is", "are"), "missing from new.coef"), call.=FALSE) if(!drop && ((nex <- length(excessnames)) > 0)) stop(paste(ngettext(nex, "Coefficient", "Coefficients"), commasep(sQuote(excessnames)), ngettext(nab, "is", "are"), "present in new.coef but not in coef(object)"), call.=FALSE) #' extract only the relevant coefficients coeffs <- new.coef[fitnames] } return(coeffs) } spatstat/R/linim.R0000644000176200001440000006557413616415060013574 0ustar liggesusers# # linim.R # # $Revision: 1.71 $ $Date: 2020/02/05 01:22:28 $ # # Image/function on a linear network # linim <- function(L, Z, ..., restrict=TRUE, df=NULL) { L <- as.linnet(L) stopifnot(is.im(Z)) class(Z) <- "im" # prevent unintended dispatch dfgiven <- !is.null(df) if(dfgiven) { stopifnot(is.data.frame(df)) neednames <- c("xc", "yc", "x", "y", "mapXY", "tp", "values") ok <- neednames %in% names(df) dfcomplete <- all(ok) if(!dfcomplete) { #' omission of "values" column is permissible, but not other columns mapnames <- setdiff(neednames, "values") if(!all(mapnames %in% names(df))) { nn <- sum(!ok) stop(paste(ngettext(nn, "A column", "Columns"), "named", commasep(sQuote(neednames[!ok])), ngettext(nn, "is", "are"), "missing from argument", sQuote("df"))) } } } if(restrict) { #' restrict image to pixels actually lying on the network M <- as.mask.psp(as.psp(L), Z) if(dfgiven) { #' ensure all mapped pixels are untouched pos <- nearest.pixel(df$xc, df$yc, Z) pos <- cbind(pos$row, pos$col) M$m[pos] <- TRUE } Z <- Z[M, drop=FALSE] } if(!dfgiven) { # compute the data frame of mapping information xx <- rasterx.im(Z) yy <- rastery.im(Z) mm <- !is.na(Z$v) xx <- as.vector(xx[mm]) yy <- as.vector(yy[mm]) pixelcentres <- ppp(xx, yy, window=as.rectangle(Z), check=FALSE) pixdf <- data.frame(xc=xx, yc=yy) # project pixel centres onto lines p2s <- project2segment(pixelcentres, as.psp(L)) projloc <- as.data.frame(p2s$Xproj) projmap <- as.data.frame(p2s[c("mapXY", "tp")]) # extract values values <- Z[pixelcentres] # bundle df <- cbind(pixdf, projloc, projmap, data.frame(values=values)) } else if(!dfcomplete) { #' look up values pixelcentres <- ppp(df$xc, df$yc, window=as.rectangle(Z), check=FALSE) df$values <- safelookup(Z, pixelcentres) } out <- Z attr(out, "L") <- L attr(out, "df") <- df class(out) <- c("linim", class(out)) return(out) } is.linim <- function(x) { inherits(x, "linim") } print.linim <- function(x, ...) { splat("Image on linear network") L <- attr(x, "L") Lu <- summary(unitname(L)) nsample <- nrow(attr(x, "df")) print(L) NextMethod("print") if(!is.null(nsample)) splat(" Data frame:", nsample, "sample points along network", "\n", "Average density: one sample point per", signif(volume(L)/nsample, 3), Lu$plural, Lu$explain) return(invisible(NULL)) } summary.linim <- function(object, ...) { y <- NextMethod("summary") if("integral" %in% names(y)) y$integral <- integral(object) y$network <- summary(as.linnet(object)) class(y) <- c("summary.linim", class(y)) return(y) } print.summary.linim <- function(x, ...) { splat(paste0(x$type, "-valued"), "pixel image on a linear network") unitinfo <- summary(x$units) pluralunits <- unitinfo$plural sigdig <- getOption('digits') di <- x$dim win <- x$window splat(di[1L], "x", di[2L], "pixel array (ny, nx)") splat("enclosing rectangle:", prange(signif(win$xrange, sigdig)), "x", prange(signif(win$yrange, sigdig)), unitinfo$plural, unitinfo$explain) splat("dimensions of each pixel:", signif(x$xstep, 3), "x", signif(x$ystep, sigdig), pluralunits) if(!is.null(explain <- unitinfo$explain)) splat(explain) splat("Pixel values (on network):") switch(x$type, integer=, real={ splat("\trange =", prange(signif(x$range, sigdig))) splat("\tintegral =", signif(x$integral, sigdig)) splat("\tmean =", signif(x$mean, sigdig)) }, factor={ print(x$table) }, complex={ splat("\trange: Real", prange(signif(x$Re$range, sigdig)), "Imaginary", prange(signif(x$Im$range, sigdig))) splat("\tintegral =", signif(x$integral, sigdig)) splat("\tmean =", signif(x$mean, sigdig)) }, { print(x$summary) }) splat("Underlying network:") print(x$network) return(invisible(NULL)) } plot.linim <- local({ plot.linim <- function(x, ..., style=c("colour", "width"), scale, adjust=1, fatten=0, negative.args=list(col=2), legend=TRUE, leg.side=c("right", "left", "bottom", "top"), leg.sep=0.1, leg.wid=0.1, leg.args=list(), leg.scale=1, zlim, box=FALSE, do.plot=TRUE) { xname <- short.deparse(substitute(x)) style <- match.arg(style) leg.side <- match.arg(leg.side) check.1.real(leg.scale) if(!missing(fatten)) { check.1.real(fatten) if(fatten != 0 && style == "width") warning("Argument 'fatten' is ignored when style='width'", call.=FALSE) stopifnot(fatten >= 0) } if(missing(zlim) || is.null(zlim)) { zlim <- NULL zliminfo <- list() } else { check.range(zlim) stopifnot(all(is.finite(zlim))) zliminfo <- list(zlim=zlim) } ribstuff <- list(ribbon = legend, ribside = leg.side, ribsep = leg.sep, ribwid = leg.wid, ribargs = leg.args, ribscale = leg.scale) if(style == "colour" || !do.plot) { #' colour style: plot as pixel image if(fatten > 0) { #' first fatten the lines L <- attr(x, "L") S <- as.psp(L) D <- distmap(as.mask.psp(S, xy=x)) fatwin <- levelset(D, fatten) x <- nearestValue(x)[fatwin, drop=FALSE] } return(do.call(plot.im, resolve.defaults(list(x), list(...), ribstuff, zliminfo, list(main=xname, legend=legend, do.plot=do.plot, box=box)))) } #' width style L <- attr(x, "L") df <- attr(x, "df") Llines <- as.psp(L) W <- as.owin(L) #' ensure function values are numeric vals <- try(as.numeric(df$values)) if(inherits(vals, "try-error")) stop("Function values should be numeric: unable to convert them", call.=FALSE) #' convert non-finite values to zero width vals[!is.finite(vals)] <- 0 df$values <- vals #' plan layout if(legend) { #' use layout procedure in plot.im z <- do.call(plot.im, resolve.defaults(list(x, do.plot=FALSE, ribbon=TRUE), list(...), ribstuff, list(main=xname, valuesAreColours=FALSE))) bb.all <- attr(z, "bbox") bb.leg <- attr(z, "bbox.legend") } else { bb.all <- Frame(W) bb.leg <- NULL } legend <- !is.null(bb.leg) if(legend) { #' expand plot region to accommodate text annotation in legend if(leg.side %in% c("left", "right")) { delta <- 2 * sidelengths(bb.leg)[1] xmargin <- if(leg.side == "right") c(0, delta) else c(delta, 0) bb.all <- grow.rectangle(bb.all, xmargin=xmargin) } } #' initialise plot bb <- do.call.matched(plot.owin, resolve.defaults(list(x=bb.all, type="n"), list(...), list(main=xname)), extrargs="type") if(box) plot(Frame(W), add=TRUE) #' resolve graphics parameters for polygons names(negative.args) <- paste0(names(negative.args), ".neg") grafpar <- resolve.defaults(negative.args, list(...), list(col=1), .MatchNull=FALSE) #' rescale values to a plottable range if(is.null(zlim)) zlim <- range(x, finite=TRUE) vr <- range(0, zlim) if(missing(scale)) { maxsize <- mean(distmap(Llines))/2 scale <- maxsize/max(abs(vr)) } df$values <- adjust * scale * df$values/2 #' examine sign of values signtype <- if(vr[1] >= 0) "positive" else if(vr[2] <= 0) "negative" else "mixed" #' split data by segment mapXY <- factor(df$mapXY, levels=seq_len(Llines$n)) dfmap <- split(df, mapXY, drop=TRUE) #' sort each segment's data by position along segment dfmap <- lapply(dfmap, sortalongsegment) #' plot each segment's data Lperp <- angles.psp(Llines) + pi/2 Lfrom <- L$from Lto <- L$to Lvert <- L$vertices Ljoined <- (vertexdegree(L) > 1) #' precompute coordinates of dodecagon dodo <- disc(npoly=12)$bdry[[1L]] #' for(i in seq(length(dfmap))) { z <- dfmap[[i]] segid <- unique(z$mapXY)[1L] xx <- z$x yy <- z$y vv <- z$values #' add endpoints of segment ileft <- Lfrom[segid] iright <- Lto[segid] leftend <- Lvert[ileft] rightend <- Lvert[iright] xx <- c(leftend$x, xx, rightend$x) yy <- c(leftend$y, yy, rightend$y) vv <- c(vv[1L], vv, vv[length(vv)]) rleft <- vv[1L] rright <- vv[length(vv)] ## first add dodecagonal 'joints' if(Ljoined[ileft] && rleft != 0) drawSignedPoly(x=rleft * dodo$x + leftend$x, y=rleft * dodo$y + leftend$y, grafpar, sign(rleft)) if(Ljoined[iright] && rright != 0) drawSignedPoly(x=rright * dodo$x + rightend$x, y=rright * dodo$y + rightend$y, grafpar, sign(rright)) ## Now render main polygon ang <- Lperp[segid] switch(signtype, positive = drawseg(xx, yy, vv, ang, grafpar), negative = drawseg(xx, yy, vv, ang, grafpar), mixed = { ## find zero-crossings xing <- (diff(sign(vv)) != 0) ## excursions excu <- factor(c(0, cumsum(xing))) elist <- split(data.frame(xx=xx, yy=yy, vv=vv), excu) ## plot each excursion for(e in elist) with(e, drawseg(xx, yy, vv, ang, grafpar)) }) } result <- adjust * scale attr(result, "bbox") <- bb if(legend) { attr(result, "bbox.legend") <- bb.leg plotWidthMap(bb.leg = bb.leg, zlim = zlim, phys.scale = adjust * scale, leg.scale = leg.scale, leg.side = leg.side, leg.args = leg.args, grafpar = grafpar) } return(invisible(result)) } drawseg <- function(xx, yy, vv, ang, pars) { ## draw polygon around segment sgn <- sign(mean(vv)) xx <- c(xx, rev(xx)) yy <- c(yy, rev(yy)) vv <- c(vv, -rev(vv)) xx <- xx + cos(ang) * vv yy <- yy + sin(ang) * vv drawSignedPoly(xx, yy, pars, sgn) invisible(NULL) } plot.linim }) drawSignedPoly <- local({ ## internal function to plot line segments for style="width" ## with sign-dependent colours, etc pNames <- c("density", "angle", "border", "col", "lty") posnames <- paste(pNames, ".pos", sep="") negnames <- paste(pNames, ".neg", sep="") redub <- function(from, to, x) { #' rename entry x$from to x$to m <- match(from, names(x)) if(any(ok <- !is.na(m))) names(x)[m[ok]] <- to[ok] return(resolve.defaults(x)) } drawSignedPoly <- function(x, y, pars, sgn) { #' plot polygon using parameters appropriate to "sign" if(sgn >= 0) { pars <- redub(posnames, pNames, pars) } else { pars <- redub(negnames, pNames, pars) } pars <- pars[names(pars) %in% pNames] if(is.null(pars$border)) pars$border <- pars$col do.call(polygon, append(list(x=x, y=y), pars)) invisible(NULL) } drawSignedPoly }) ## internal function to plot the map of pixel values to line widths plotWidthMap <- function(bb.leg, zlim, phys.scale, leg.scale, leg.side, leg.args, grafpar) { ## get graphical arguments grafpar <- resolve.defaults(leg.args, grafpar) ## set up scale of typical pixel values gvals <- leg.args$at %orifnull% prettyinside(zlim) ## corresponding widths wvals <- phys.scale * gvals ## glyph positions ng <- length(gvals) xr <- bb.leg$xrange yr <- bb.leg$yrange switch(leg.side, right = , left = { y <- seq(yr[1], yr[2], length.out=ng+1L) y <- (y[-1L] + y[-(ng+1L)])/2 for(j in 1:ng) { xx <- xr[c(1L,2L,2L,1L)] yy <- (y[j] + c(-1,1) * wvals[j]/2)[c(1L,1L,2L,2L)] drawSignedPoly(x = xx, y = yy, grafpar, sign(wvals[j])) } }, bottom = , top = { x <- seq(xr[1], xr[2], length.out=ng+1L) x <- (x[-1L] + x[-(ng+1L)])/2 for(j in 1:ng) { xx <- (x[j] + c(-1,1) * wvals[j]/2)[c(1L,1L,2L,2L)] yy <- yr[c(1L,2L,2L,1L)] drawSignedPoly(x = xx, y = yy, grafpar, sign(wvals[j])) } }) ## add text labels glabs <- signif(leg.scale * gvals, 2) textpos <- switch(leg.side, right = list(x=xr[2], y=y, pos=4), left = list(x=xr[1], y=y, pos=2), bottom = list(x=x, y=yr[1], pos=1), top = list(x=x, y=yr[2], pos=3)) textargs <- resolve.defaults(textpos, leg.args, list(labels=glabs)) do.call.matched(text, textargs, extrargs=graphicsPars("text")) return(invisible(NULL)) } sortalongsegment <- function(df) { df[fave.order(df$tp), , drop=FALSE] } as.im.linim <- function(X, ...) { attr(X, "L") <- attr(X, "df") <- NULL class(X) <- "im" if(length(list(...)) > 0) X <- as.im(X, ...) return(X) } as.linim <- function(X, ...) { UseMethod("as.linim") } as.linim.default <- function(X, L, ..., eps = NULL, dimyx = NULL, xy = NULL, delta = NULL) { stopifnot(inherits(L, "linnet")) Y <- as.im(X, W=Frame(L), ..., eps=eps, dimyx=dimyx, xy=xy) M <- as.mask.psp(as.psp(L), as.owin(Y)) Y[complement.owin(M)] <- NA df <- NULL if(!is.null(delta)) { df <- pointsAlongNetwork(L, delta) pix <- nearest.valid.pixel(df$x, df$y, Y) df$xc <- Y$xcol[pix$col] df$yc <- Y$yrow[pix$row] df$values <- Y$v[cbind(pix$row, pix$col)] df <- df[,c("xc", "yc", "x", "y", "seg", "tp", "values")] names(df)[names(df) == "seg"] <- "mapXY" } if(is.mask(WL <- Window(L)) && !all(sapply(list(eps, dimyx, xy), is.null))) Window(L, check=FALSE) <- as.mask(WL, eps=eps, dimyx=dimyx, xy=xy) out <- linim(L, Y, df=df, restrict=FALSE) return(out) } pointsAlongNetwork <- local({ pointsAlongNetwork <- function(L, delta) { #' sample points evenly spaced along each segment stopifnot(inherits(L, "linnet")) S <- as.psp(L) ns <- nsegments(S) seglen <- lengths.psp(S) ends <- as.data.frame(S) nsample <- pmax(1, ceiling(seglen/delta)) df <- NULL x0 <- ends$x0 y0 <- ends$y0 x1 <- ends$x1 y1 <- ends$y1 for(i in seq_len(ns)) { nn <- nsample[i] + 1L tcut <- seq(0, 1, length.out=nn) tp <- (tcut[-1] + tcut[-nn])/2 x <- x0[i] * (1-tp) + x1[i] * tp y <- y0[i] * (1-tp) + y1[i] * tp df <- rbind(df, data.frame(x=x, y=y, seg=i, tp=tp)) } return(df) } pointsAlongNetwork }) as.linim.linim <- function(X, ...) { if(length(list(...)) == 0) return(X) Y <- as.linim.default(X, as.linnet(X), ...) return(Y) } # analogue of eval.im eval.linim <- function(expr, envir, harmonize=TRUE, warn=TRUE) { sc <- sys.call() # Get names of all variables in the expression e <- as.expression(substitute(expr)) varnames <- all.vars(e) allnames <- all.names(e, unique=TRUE) funnames <- allnames[!(allnames %in% varnames)] if(length(varnames) == 0) stop("No variables in this expression") # get the values of the variables if(missing(envir)) { envir <- parent.frame() # WAS: sys.parent() } else if(is.list(envir)) { envir <- list2env(envir, parent=parent.frame()) } vars <- mget(varnames, envir=envir, inherits=TRUE, ifnotfound=list(NULL)) funs <- mget(funnames, envir=envir, inherits=TRUE, ifnotfound=list(NULL)) # Find out which variables are (linear) images islinim <- unlist(lapply(vars, inherits, what="linim")) if(!any(islinim)) stop("There are no linear images (class linim) in this expression") # .................................... # Evaluate the pixel values using eval.im # .................................... sc[[1L]] <- as.name('eval.im') sc$envir <- envir Y <- eval(sc) # ......................................... # Then evaluate data frame entries if feasible # ......................................... dfY <- NULL linims <- vars[islinim] nlinims <- length(linims) dframes <- lapply(linims, attr, which="df") nets <- lapply(linims, attr, which="L") isim <- unlist(lapply(vars, is.im)) if(!any(isim & !islinim)) { # all images are 'linim' objects # Check that the images refer to the same linear network if(nlinims > 1) { agree <- unlist(lapply(nets[-1L], identical, y=nets[[1L]])) if(!all(agree)) stop(paste("Images do not refer to the same linear network")) } dfempty <- unlist(lapply(dframes, is.null)) if(!any(dfempty)) { # ensure data frames are compatible if(length(dframes) > 1 && ( length(unique(nr <- sapply(dframes, nrow))) > 1 || !allElementsIdentical(dframes, "seg") || !allElementsIdentical(dframes, "tp") )) { # find the one with finest spacing imax <- which.max(nr) # resample the others dframes[-imax] <- lapply(dframes[-imax], resampleNetworkDataFrame, template=dframes[[imax]]) } # replace each image variable by its data frame column of values vars[islinim] <- lapply(dframes, getElement, "values") # now evaluate expression Yvalues <- eval(e, append(vars, funs)) # pack up dfY <- dframes[[1L]] dfY$values <- Yvalues } } result <- linim(nets[[1L]], Y, df=dfY, restrict=FALSE) return(result) } resampleNetworkDataFrame <- function(df, template) { # resample 'df' at the points of 'template' invalues <- df$values insegment <- df$mapXY inteepee <- df$tp out <- template n <- nrow(out) outvalues <- vector(mode = typeof(invalues), length=n) outsegment <- out$mapXY outteepee <- out$tp for(i in seq_len(n)) { relevant <- which(insegment == outsegment[i]) if(length(relevant) > 0) { j <- which.min(abs(inteepee[relevant] - outteepee[i])) outvalues[i] <- invalues[relevant[j]] } } out$values <- outvalues return(out) } as.linnet.linim <- function(X, ...) { attr(X, "L") } "[.linim" <- function(x, i, ..., drop=TRUE) { if(!missing(i) && is.lpp(i)) { n <- npoints(i) result <- vector(mode=typeof(x$v), length=n) if(is.factor(x$v)) { lev <- levels(x$v) result <- factor(result, levels=seq_along(lev), labels=lev) } if(n == 0) return(result) if(!is.null(df <- attr(x, "df"))) { #' use data frame of sample points along network knownseg <- df$mapXY knowntp <- df$tp knownval <- df$values #' extract local coordinates of query points coo <- coords(i) queryseg <- coo$seg querytp <- coo$tp #' match to nearest sample point for(j in 1:n) { relevant <- (knownseg == queryseg[j]) if(!any(relevant)) { result[j] <- NA } else { k <- which.min(abs(knowntp[relevant] - querytp[j])) result[j] <- knownval[relevant][k] } } if(drop && anyNA(result)) result <- result[!is.na(result)] return(result) } #' give up and use pixel image } #' apply subset method for 'im' y <- NextMethod("[") if(!is.im(y)) return(y) # vector of pixel values class(y) <- unique(c("linim", class(y))) #' handle linear network info L <- attr(x, "L") df <- attr(x, "df") #' clip to new window W <- Window(y) LW <- L[W] df <- df[inside.owin(df$xc, df$yc, W), , drop=FALSE] #' update local coordinates in data frame samplepoints <- ppp(df$x, df$y, window=Frame(W), check=FALSE) a <- project2segment(samplepoints, as.psp(LW)) df$mapXY <- a$mapXY df$tp <- a$tp #' wrap up attr(y, "L") <- LW attr(y, "df") <- df return(y) } "[<-.linim" <- function(x, i, j, value) { y <- NextMethod("[<-") #' extract linear network info L <- attr(x, "L") df <- attr(x, "df") #' propagate *changed* pixel values to sample points pos <- nearest.pixel(df$xc, df$yc, y) pos <- cbind(pos$row, pos$col) yvalue <- y$v[pos] xvalue <- x$v[pos] changed <- (yvalue != xvalue) df$values[changed] <- yvalue[changed] #' restrict main pixel image to network m <- as.mask.psp(L, as.mask(y))$m m[pos] <- TRUE y$v[!m] <- NA #' package up attr(y, "L") <- L attr(y, "df") <- df class(y) <- unique(c("linim", class(y))) return(y) } integral.linim <- function(f, domain=NULL, ...){ verifyclass(f, "linim") if(is.tess(domain)) { result <- sapply(tiles(domain), integral.linim, f = f) if(length(dim(result)) > 1) result <- t(result) return(result) } if(!is.null(domain)) f <- f[domain] #' extract data L <- as.linnet(f) ns <- nsegments(L) df <- attr(f, "df") vals <- df$values seg <- factor(df$mapXY, levels=1:ns) #' ensure each segment has at least one sample point nper <- table(seg) if(any(missed <- (nper == 0))) { missed <- unname(which(missed)) mp <- midpoints.psp(as.psp(L)[missed]) #' nearest pixel value valmid <- safelookup(f, mp) #' concatenate factors seg <- unlist(list(seg, factor(missed, levels=1:ns))) vals <- c(vals, valmid) #' update nper <- table(seg) } #' take average of data on each segment ## mu <- as.numeric(by(vals, seg, mean, ..., na.rm=TRUE)) ## mu[is.na(mu)] <- 0 num <- tapplysum(as.numeric(vals), list(seg), na.rm=TRUE) mu <- num/nper #' weighted sum len <- lengths.psp(as.psp(L)) if(anyNA(vals)) { ## p <- as.numeric(by(!is.na(vals), seg, mean, ..., na.rm=TRUE)) ## p[is.na(p)] <- 0 defined <- as.numeric(!is.na(vals)) pnum <- tapplysum(defined, list(seg), na.rm=FALSE) p <- pnum/nper len <- len * p } return(sum(mu * len)) } mean.linim <- function(x, ...) { trap.extra.arguments(...) integral(x)/sum(lengths.psp(as.psp(as.linnet(x)))) } quantile.linim <- function(x, probs = seq(0,1,0.25), ...) { verifyclass(x, "linim") #' extract data df <- attr(x, "df") L <- as.linnet(x) vals <- df$values #' count sample points on each segment seg <- factor(df$mapXY, levels=1:nsegments(L)) nvals <- table(seg) #' calculate weights len <- lengths.psp(as.psp(L)) iseg <- as.integer(seg) wts <- len[iseg]/nvals[iseg] return(weighted.quantile(vals, wts, probs)) } median.linim <- function(x, ...) { trap.extra.arguments(...) return(unname(quantile(x, 0.5))) } shift.linim <- function (X, ...) { verifyclass(X, "linim") Z <- shift(as.im(X), ...) L <- shift(as.linnet(X), ...) v <- getlastshift(L) df <- attr(X, "df") df[,c("xc","yc")] <- shiftxy(df[,c("xc", "yc")], v) df[,c("x","y")] <- shiftxy(df[,c("x", "y")], v) Y <- linim(L, Z, df=df, restrict=FALSE) return(putlastshift(Y, v)) } affine.linim <- function(X, mat = diag(c(1, 1)), vec = c(0, 0), ...) { Z <- affine(as.im(X), mat=mat, vec=vec, ...) L <- affine(as.linnet(X), mat=mat, vec=vec, ...) df <- attr(X, "df") df[,c("xc","yc")] <- affinexy(df[,c("xc", "yc")], mat=mat, vec=vec) df[,c("x","y")] <- affinexy(df[,c("x", "y")], mat=mat, vec=vec) Y <- linim(L, Z, df=df, restrict=FALSE) return(Y) } scalardilate.linim <- function(X, f, ..., origin=NULL) { trap.extra.arguments(..., .Context = "In scalardilate(X,f)") check.1.real(f, "In scalardilate(X,f)") stopifnot(is.finite(f) && f > 0) if (!is.null(origin)) { X <- shift(X, origin = origin) negorig <- getlastshift(X) } else negorig <- c(0, 0) Y <- affine(X, mat = diag(c(f, f)), vec = -negorig) return(Y) } as.data.frame.linim <- function(x, ...) { df <- attr(x, "df") if(!is.na(m <- match("mapXY", colnames(df)))) colnames(df)[m] <- "seg" return(df) } pairs.linim <- function(..., plot=TRUE, eps=NULL) { argh <- list(...) cl <- match.call() ## unpack single argument which is a list of images if(length(argh) == 1) { arg1 <- argh[[1L]] if(is.list(arg1) && all(sapply(arg1, is.im))) argh <- arg1 } ## identify which arguments are images isim <- sapply(argh, is.im) nim <- sum(isim) if(nim == 0) stop("No images provided") ## separate image arguments from others imlist <- argh[isim] rest <- argh[!isim] ## identify which arguments are images on a network islinim <- sapply(imlist, inherits, what="linim") if(!any(islinim)) # shouldn't be here return(pairs.im(argh, plot=plot)) ## determine image names for plotting imnames <- argh$labels %orifnull% names(imlist) if(length(imnames) != nim || !all(nzchar(imnames))) { #' names not given explicitly callednames <- paste(cl)[c(FALSE, isim, FALSE)] backupnames <- paste0("V", seq_len(nim)) if(length(callednames) != nim) { callednames <- backupnames } else if(any(toolong <- (nchar(callednames) > 15))) { callednames[toolong] <- backupnames[toolong] } imnames <- good.names(imnames, good.names(callednames, backupnames)) } names(imlist) <- imnames ## choose resolution if(is.null(eps)) { xstep <- min(sapply(imlist, getElement, name="xstep")) ystep <- min(sapply(imlist, getElement, name="ystep")) eps <- min(xstep, ystep) } ## extract linear network Z1 <- imlist[[min(which(islinim))]] L <- as.linnet(Z1) ## construct equally-spaced sample points X <- pointsOnLines(as.psp(L), eps=eps) ## sample each image pixvals <- lapply(imlist, "[", i=X, drop=FALSE) pixdf <- as.data.frame(pixvals) ## pairs plot if(plot) { if(nim > 1) { do.call(pairs.default, resolve.defaults(list(x=pixdf), rest, list(labels=imnames, pch="."))) labels <- resolve.defaults(rest, list(labels=imnames))$labels colnames(pixdf) <- labels } else { do.call(hist.default, resolve.defaults(list(x=pixdf[,1L]), rest, list(xname=imnames[1L], xlab=imnames[1L]))) } } class(pixdf) <- unique(c("plotpairsim", class(pixdf))) attr(pixdf, "eps") <- eps return(invisible(pixdf)) } spatstat/R/rPerfect.R0000644000176200001440000002156113333543255014226 0ustar liggesusers# # Perfect Simulation # # $Revision: 1.21 $ $Date: 2017/06/05 10:31:58 $ # # rStrauss # rHardcore # rStraussHard # rDiggleGratton # rDGS # rPenttinen rStrauss <- function(beta, gamma=1, R=0, W=owin(), expand=TRUE, nsim=1, drop=TRUE) { if(!missing(W)) verifyclass(W, "owin") check.1.real(beta) check.1.real(gamma) check.1.real(R) check.finite(beta) check.finite(gamma) check.finite(R) stopifnot(beta > 0) stopifnot(gamma >= 0) stopifnot(gamma <= 1) stopifnot(R >= 0) runif(1) Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*R)) xrange <- Wsim$xrange yrange <- Wsim$yrange result <- vector(mode="list", length=nsim) for(i in 1:nsim) { storage.mode(beta) <- storage.mode(gamma) <- storage.mode(R) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call("PerfectStrauss", beta, gamma, R, xrange, yrange, PACKAGE = "spatstat") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] times <- c(start=z[[4]], end=z[[5]]) if(nout<0) stop("internal error: copying failed in PerfectStrauss") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE) if(attr(Wsim, "changed")) P <- P[W] attr(P, "times") <- times if(nsim == 1 && drop) return(P) result[[i]] <- P } result <- as.solist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } # Perfect Simulation of Hardcore process rHardcore <- function(beta, R=0, W=owin(), expand=TRUE, nsim=1, drop=TRUE) { if(!missing(W)) verifyclass(W, "owin") check.1.real(beta) check.1.real(R) check.finite(beta) check.finite(R) stopifnot(beta > 0) stopifnot(R >= 0) runif(1) Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*R)) xrange <- Wsim$xrange yrange <- Wsim$yrange result <- vector(mode="list", length=nsim) for(i in 1:nsim) { storage.mode(beta) <- storage.mode(R) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call("PerfectHardcore", beta, R, xrange, yrange, PACKAGE = "spatstat") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] if(nout<0) stop("internal error: copying failed in PerfectHardcore") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE) if(attr(Wsim, "changed")) P <- P[W] if(nsim == 1 && drop) return(P) result[[i]] <- P } result <- as.solist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } # # Perfect simulation of hybrid Strauss-Hardcore # provided gamma <= 1 # rStraussHard <- function(beta, gamma=1, R=0, H=0, W=owin(), expand=TRUE, nsim=1, drop=TRUE) { if(!missing(W)) verifyclass(W, "owin") check.1.real(beta) check.1.real(gamma) check.1.real(R) check.1.real(H) check.finite(beta) check.finite(gamma) check.finite(R) check.finite(H) stopifnot(beta > 0) stopifnot(gamma >= 0) if(gamma > 1) stop("Sorry, perfect simulation is only implemented for gamma <= 1") stopifnot(R >= 0) stopifnot(H >= 0) stopifnot(H <= R) runif(1) Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*R)) xrange <- Wsim$xrange yrange <- Wsim$yrange result <- vector(mode="list", length=nsim) for(i in 1:nsim) { storage.mode(beta) <- storage.mode(gamma) <- storage.mode(R) <- storage.mode(H) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call("PerfectStraussHard", beta, gamma, R, H, xrange, yrange, PACKAGE = "spatstat") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] if(nout<0) stop("internal error: copying failed in PerfectStraussHard") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE) if(attr(Wsim, "changed")) P <- P[W] if(nsim == 1 && drop) return(P) result[[i]] <- P } result <- as.solist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } # # Perfect Simulation of Diggle-Gratton process # rDiggleGratton <- function(beta, delta, rho, kappa=1, W=owin(), expand=TRUE, nsim=1, drop=TRUE) { if(!missing(W)) verifyclass(W, "owin") check.1.real(beta) check.1.real(delta) check.1.real(rho) check.1.real(kappa) check.finite(beta) check.finite(delta) check.finite(rho) check.finite(kappa) stopifnot(beta > 0) stopifnot(delta >= 0) stopifnot(rho >= 0) stopifnot(delta <= rho) stopifnot(kappa >= 0) runif(1) Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*rho)) xrange <- Wsim$xrange yrange <- Wsim$yrange result <- vector(mode="list", length=nsim) for(i in 1:nsim) { storage.mode(beta) <- "double" storage.mode(delta) <- storage.mode(rho) <- storage.mode(kappa) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call("PerfectDiggleGratton", beta, delta, rho, kappa, xrange, yrange, PACKAGE = "spatstat") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] if(nout<0) stop("internal error: copying failed in PerfectDiggleGratton") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE) if(attr(Wsim, "changed")) P <- P[W] if(nsim == 1 && drop) return(P) result[[i]] <- P } result <- as.solist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } # # Perfect Simulation of Diggle-Gates-Stibbard process # rDGS <- function(beta, rho, W=owin(), expand=TRUE, nsim=1, drop=TRUE) { if(!missing(W)) verifyclass(W, "owin") check.1.real(beta) check.1.real(rho) check.finite(beta) check.finite(rho) stopifnot(beta > 0) stopifnot(rho >= 0) runif(1) Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*rho)) xrange <- Wsim$xrange yrange <- Wsim$yrange result <- vector(mode="list", length=nsim) for(i in 1:nsim) { storage.mode(beta) <- "double" storage.mode(rho) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call("PerfectDGS", beta, rho, xrange, yrange, PACKAGE = "spatstat") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] if(nout<0) stop("internal error: copying failed in PerfectDGS") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE) if(attr(Wsim, "changed")) P <- P[W] if(nsim == 1 && drop) return(P) result[[i]] <- P } result <- as.solist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } # # Perfect Simulation of Penttinen process # rPenttinen <- function(beta, gamma=1, R, W=owin(), expand=TRUE, nsim=1, drop=TRUE) { if(!missing(W)) verifyclass(W, "owin") check.1.real(beta) check.1.real(gamma) check.1.real(R) check.finite(beta) check.finite(gamma) check.finite(R) stopifnot(beta > 0) stopifnot(gamma >= 0) stopifnot(gamma <= 1) stopifnot(R >= 0) runif(1) Wsim <- expandwinPerfect(W, expand, rmhexpand(distance=2*R)) xrange <- Wsim$xrange yrange <- Wsim$yrange result <- vector(mode="list", length=nsim) for(i in 1:nsim) { storage.mode(beta) <- storage.mode(gamma) <- storage.mode(R) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call("PerfectPenttinen", beta, gamma, R, xrange, yrange, PACKAGE = "spatstat") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] if(nout<0) stop("internal error: copying failed in PerfectPenttinen") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=Wsim, check=FALSE) if(attr(Wsim, "changed")) P <- P[W] if(nsim == 1 && drop) return(P) result[[i]] <- P } result <- as.solist(result) names(result) <- paste("Simulation", 1:nsim) return(result) } ## ....... utilities ................................. expandwinPerfect <- function(W, expand, amount) { ## expand 'W' if expand=TRUE according to default 'amount' ## or expand 'W' using rmhexpand(expand) if(!is.logical(expand)) { amount <- rmhexpand(expand) expand <- TRUE } changed <- FALSE if(expand) { W <- expand.owin(W, amount) changed <- TRUE } if(!is.rectangle(W)) { W <- as.rectangle(W) changed <- TRUE warning(paste("Simulation will be performed in the containing rectangle", "and clipped to the original window."), call.=FALSE) } attr(W, "changed") <- changed return(W) } spatstat/R/envelopelpp.R0000644000176200001440000001752713551001745015007 0ustar liggesusers# # envelopelpp.R # # $Revision: 1.26 $ $Date: 2019/10/14 04:53:06 $ # # Envelopes for 'lpp' objects # # envelope.lpp <- function(Y, fun=linearK, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- linearK if("clipdata" %in% names(list(...))) stop(paste("The argument", sQuote("clipdata"), "is not available for envelope.lpp")) envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) if(!is.null(simulate)) { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate # Data pattern is argument Y X <- Y } else if(!fix.n && !fix.marks) { # ................................................... # Realisations of complete spatial randomness # Data pattern X is argument Y # Data pattern determines intensity of Poisson process X <- Y nY <- npoints(Y) Yintens <- intensity(unmark(Y)) Ymarx <- marks(Y) NETWORK <- Y$domain dont.complain.about(nY, Yintens, NETWORK) ## expression that will be evaluated simexpr <- if(is.null(Ymarx)) { #' unmarked point pattern expression(rpoislpp(Yintens, NETWORK)) } else if(is.null(dim(Ymarx))) { #' single column of marks expression({ A <- rpoislpp(Yintens, NETWORK); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j] }) } else { #' multiple columns of marks expression({ A <- rpoislpp(Yintens, NETWORK); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j, , drop=FALSE] }) } # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE) } else if(!fix.marks) { # Fixed number of points, but random locations and marks # Data pattern X is argument Y X <- Y nY <- npoints(Y) Ymarx <- marks(Y) NETWORK <- Y$domain dont.complain.about(nY, NETWORK) # expression that will be evaluated simexpr <- if(is.null(Ymarx)) { ## unmarked expression(runiflpp(nY, NETWORK)) } else if(is.null(dim(Ymarx))) { ## single column of marks expression({ A <- runiflpp(nY, NETWORK); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j] }) } else { ## multiple columns of marks expression({ A <- runiflpp(nY, NETWORK); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j, ,drop=FALSE] }) } # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE) } else { # ................................................... # Randomised locations only; # fixed number of points and fixed marks # Data pattern X is argument Y X <- Y nY <- npoints(Y) Ymarx <- marks(Y) NETWORK <- Y$domain # expression that will be evaluated simexpr <- expression(runiflpp(nY, NETWORK) %mark% Ymarx) dont.complain.about(nY, Ymarx, NETWORK) # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE) } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=FALSE, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, rejectNA=rejectNA, silent=silent, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } envelope.lppm <- function(Y, fun=linearK, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- linearK if("clipdata" %in% names(list(...))) stop(paste("The argument", sQuote("clipdata"), "is not available for envelope.pp3")) envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) # Extract data pattern X from fitted model Y X <- data.lppm(Y) if(!is.null(simulate)) { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate } else { ## ................................................... ## Simulation of the fitted model Y if(!is.poisson(Y)) stop("Simulation of non-Poisson models is not yet implemented") MODEL <- Y NETWORK <- domain(X) lambdaFit <- predict(MODEL) Xmarx <- marks(X) nX <- if(!is.marked(X)) npoints(X) else table(marks(X)) dont.complain.about(NETWORK, Xmarx, nX) #' if(!fix.n && !fix.marks) { #' Unconstrained simulations LMAX <- if(is.im(lambdaFit)) max(lambdaFit) else sapply(lambdaFit, max) dont.complain.about(LMAX) simexpr <- expression(rpoislpp(lambdaFit, NETWORK, lmax=LMAX)) } else if(!fix.marks && is.marked(X)) { #' Fixed total number of points EN <- sapply(lambdaFit, integral) PROB <- EN/sum(EN) dont.complain.about(PROB) simexpr <- expression( rlpp(as.integer(rmultinom(1L, nX, PROB)), lambdaFit) ) } else { #' Fixed number of points of each type simexpr <- expression(rlpp(nX, lambdaFit)) } #' evaluate in THIS environment simrecipe <- simulrecipe(type = "lppm", expr = simexpr, envir = envir.here, csr = FALSE) } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=FALSE, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, rejectNA=rejectNA, silent=silent, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } spatstat/R/summary.ppm.R0000644000176200001440000004211513550020146014730 0ustar liggesusers# # summary.ppm.R # # summary() method for class "ppm" # # $Revision: 1.78 $ $Date: 2019/10/11 06:23:19 $ # # summary.ppm() # print.summary.ppm() # summary.ppm <- local({ covtype <- function(x) { if(is.im(x)) "im" else if(is.function(x)) "function" else if(is.owin(x)) "owin" else if(is.numeric(x) && length(x) == 1) "number" else if(is.factor(x)) "factor" else if(is.integer(x)) "integer" else if(is.numeric(x)) "numeric" else storage.mode(x) } xargs <- function(f) { ar <- names(formals(f))[-(1:2)] return(ar[ar != "..."]) } summary.ppm <- function(object, ..., quick=FALSE, fine=FALSE) { verifyclass(object, "ppm") x <- object y <- list() class(y) <- "summary.ppm" ####### Extract main data components ######################### QUAD <- object$Q DATA <- QUAD$data TREND <- x$trend INTERACT <- x$interaction if(is.null(INTERACT)) INTERACT <- Poisson() ####### Check version ######################### mpl.ver <- versionstring.ppm(object) int.ver <- versionstring.interact(INTERACT) current <- versionstring.spatstat() virgin <- min(package_version(c(mpl.ver, int.ver))) y$antiquated <- antiquated <- (virgin <= package_version("1.5")) y$old <- (virgin < majorminorversion(current)) y$version <- as.character(virgin) ####### Determine type of model ############################ y$entries <- list() y$no.trend <- identical.formulae(TREND, NULL) || identical.formulae(TREND, ~1) y$trendvar <- trendvar <- variablesinformula(TREND) y$stationary <- y$no.trend || all(trendvar == "marks") y$poisson <- is.poisson.interact(INTERACT) y$marked <- is.marked.ppp(DATA) y$multitype <- is.multitype.ppp(DATA) y$marktype <- if(y$multitype) "multitype" else if(y$marked) "marked" else "unmarked" if(y$marked) y$entries$marks <- marks(DATA) y$name <- paste(if(y$stationary) "Stationary " else "Nonstationary ", if(y$poisson) { if(y$multitype) "multitype " else if(y$marked) "marked " else "" }, INTERACT$name, sep="") ###### Fitting algorithm ######################################## y$method <- x$method y$VB <- x$internal$VB y$problems <- x$problems y$fitter <- if(!is.null(x$fitter)) x$fitter else "unknown" if(y$fitter %in% c("glm", "gam")) y$converged <- x$internal$glmfit$converged ###### Coefficients were changed after fit? ##################### y$projected <- yproj <- identical(x$projected, TRUE) y$changedcoef <- yproj || !is.null(x$coef.orig) y$valid <- valid.ppm(x, warn=FALSE) ###### Extract fitted model coefficients ######################### y$entries$coef <- COEFS <- x$coef y$coef.orig <- x$coef.orig y$entries$Vnames <- Vnames <- x$internal$Vnames y$entries$IsOffset <- x$internal$IsOffset ###### Extract fitted interaction and summarise ################# FITIN <- fitin(x) y$interaction <- summary(FITIN) # Exit here if quick=TRUE if(identical(quick, TRUE)) return(y) ###### Does it have external covariates? #################### # defaults y <- append(y, list(has.covars = FALSE, covnames = character(0), covars.used = character(0), uses.covars = FALSE, covars.are.df = FALSE, expandable = TRUE, covar.type = character(0), covar.descrip = character(0), has.funcs = FALSE, covfunargs = NULL, has.xargs = FALSE, xargmap = NULL)) class(y) <- "summary.ppm" if(!antiquated) { covars <- x$covariates y$has.covars <- hc <- !is.null(covars) && (length(covars) > 0) if(hc) { y$covnames <- names(covars) used <- (y$trendvar %in% names(covars)) y$covars.used <- y$trendvar[used] y$uses.covars <- any(used) y$covars.are.df <- is.data.frame(covars) # describe covariates ctype <- unlist(lapply(covars, covtype)) y$expandable <- all(ctype[used] %in%c("function", "number")) names(ctype) <- names(covars) y$covar.type <- ctype y$covar.descrip <- ctype # are there any functions? y$has.funcs <- any(isfun <- (ctype == "function")) # do covariates depend on additional arguments? if(y$has.funcs) { y$covfunargs <- x$covfunargs y$cfafitter <- attr(x$covfunargs, "fitter") funs <- covars[isfun] fdescrip <- function(f) { if(inherits(f, "distfun")) return("distfun") alist <- paste(names(formals(f)), collapse=", ") paste("function(", alist, ")", sep="") } y$covar.descrip[isfun] <- unlist(lapply(funs, fdescrip)) # find any extra arguments (after args 1 & 2) explicitly named fargs <- lapply(funs, xargs) nxargs <- lengths(fargs) y$has.xargs <- any(nxargs > 0) if(y$has.xargs) { # identify which function arguments are fixed in the call fmap <- data.frame(Covariate=rep.int(names(funs), nxargs), Argument=unlist(fargs)) fmap$Given <- (fmap$Argument %in% names(y$covfunargs)) y$xargmap <- fmap } } } } else { # Antiquated format # Interpret the function call instead callexpr <- parse(text=x$call) callargs <- names(as.list(callexpr[[1]])) # Data frame of covariates was called 'data' in versions up to 1.4-x y$has.covars <- !is.null(callargs) && !is.na(pmatch("data", callargs)) # conservative guess y$uses.covars <- y$has.covars y$covfunargs <- NULL } ###### Arguments in call #################################### y$args <- x[c("call", "correction", "rbord")] ####### Main data components ######################### y$entries <- append(list(quad=QUAD, data=DATA, interaction=INTERACT), y$entries) if(is.character(quick) && (quick == "entries")) return(y) ####### Summarise data ############################ y$data <- summary(DATA, checkdup=FALSE) y$quad <- summary(QUAD, checkdup=FALSE) if(is.character(quick) && (quick == "no prediction")) return(y) ###### Trend component ######################### y$trend <- list() y$trend$name <- if(y$poisson) "Intensity" else "Trend" y$trend$formula <- if(y$no.trend) NULL else TREND if(y$poisson && y$no.trend) { # uniform Poisson process y$trend$value <- exp(COEFS[[1]]) y$trend$label <- switch(y$marktype, unmarked="Uniform intensity", multitype="Uniform intensity for each mark level", marked="Uniform intensity in product space", "") } else if(y$stationary) { # stationary switch(y$marktype, unmarked={ # stationary non-poisson non-marked y$trend$label <- "First order term" y$trend$value <- c(beta=exp(COEFS[[1]])) }, multitype={ # stationary, multitype mrk <- marks(DATA) y$trend$label <- if(y$poisson) "Intensities" else "First order terms" # Use predict.ppm to evaluate the fitted intensities lev <- factor(levels(mrk), levels=levels(mrk)) nlev <- length(lev) marx <- list(x=rep.int(0, nlev), y=rep.int(0, nlev), marks=lev) betas <- predict(x, locations=marx, type="trend") names(betas) <- paste("beta_", as.character(lev), sep="") y$trend$value <- betas }, marked={ # stationary, marked y$trend$label <- "Fitted intensity coefficients" y$trend$value <- blankcoefnames(COEFS) }) } else { # not stationary # extract trend terms without trying to understand them much if(is.null(Vnames)) trendbits <- COEFS else { agree <- outer(names(COEFS), Vnames, "==") whichbits <- matrowall(!agree) trendbits <- COEFS[whichbits] } y$trend$label <- ngettext(length(trendbits), "Fitted trend coefficient", "Fitted trend coefficients") y$trend$value <- blankcoefnames(trendbits) } # ----- parameters with SE -------------------------- if(is.character(quick) && (quick == "no variances")) return(y) # Exit before SE for variational Bayes if(!is.null(x$internal$VB)) return(y) if(length(COEFS) > 0) { # compute standard errors se <- x$internal$se if(is.null(se)) { vc <- vcov(x, fine=fine, matrix.action="warn", invalid.action="silent") if(!is.null(vc)) { se <- if(is.matrix(vc)) sqrt(diag(vc)) else if(length(vc) == 1) sqrt(vc) else NULL } } if(!is.null(se)) { two <- qnorm(0.975) lo <- COEFS - two * se hi <- COEFS + two * se zval <- COEFS/se pval <- 2 * pnorm(abs(zval), lower.tail=FALSE) psig <- cut(pval, c(0,0.001, 0.01, 0.05, 1), labels=c("***", "**", "*", " "), include.lowest=TRUE) # table of coefficient estimates with SE and 95% CI y$coefs.SE.CI <- data.frame(Estimate=COEFS, S.E.=se, CI95.lo=lo, CI95.hi=hi, Ztest=psig, Zval=zval) } } return(y) } summary.ppm }) coef.summary.ppm <- function(object, ...) { object$coefs.SE.CI } print.summary.ppm <- function(x, ...) { if(x$old) warning("Model was fitted by an older version of spatstat") if(is.null(x$args)) { # this is the quick version splat(x$name) return(invisible(NULL)) } # otherwise - full details splat("Point process model") fitter <- if(!is.null(x$fitter)) x$fitter else "unknown" methodchosen <- if(is.null(x$method)) "unspecified method" else if(fitter == "exact") "maximum likelihood" else switch(x$method, mpl={ if(x$poisson) { # Poisson process "maximum likelihood (Berman-Turner approximation)" } else { "maximum pseudolikelihood (Berman-Turner approximation)" } }, logi={ if(is.null(x$VB)){ if(x$poisson) { # Poisson process "maximum likelihood (logistic regression approximation)" } else { "maximum pseudolikelihood (logistic regression approximation)" } } else { "maximum posterior density (variational Bayes approximation)" } }, ho="Huang-Ogata method (approximate maximum likelihood)", paste("unrecognised method", sQuote(x$method))) splat("Fitting method:", methodchosen) howfitted <- switch(fitter, exact= "analytically", gam = "using gam()", glm = "using glm()", ho = NULL, paste("using unrecognised fitter", sQuote(fitter))) if(!is.null(howfitted)) splat("Model was fitted", howfitted) if(fitter %in% c("glm", "gam")) { if(x$converged) splat("Algorithm converged") else splat("*** Algorithm did not converge ***") } if(x$projected) splat("Fit was projected to obtain a valid point process model") cat("Call:\n") print(x$args$call) if(x$old) splat("** Executed by old spatstat version", x$version, " **") splat("Edge correction:", dQuote(x$args$correction)) if(x$args$correction == "border") splat("\t[border correction distance r =", x$args$rbord,"]") # print summary of quadrature scheme if(is.null(x$quad)) return(invisible(NULL)) ruletextline() print(x$quad) ## start printing trend information if(is.null(x$no.trend)) return(invisible(NULL)) ruletextline() splat("FITTED MODEL:") parbreak() # This bit is currently identical to print.ppm() # except for a bit more fanfare # and the inclusion of the 'gory details' bit notrend <- x$no.trend # stationary <- x$stationary poisson <- x$poisson markeddata <- x$marked multitype <- x$multitype # markedpoisson <- poisson && markeddata # ----------- Print model type ------------------- cat(x$name) cat("\n") if(markeddata) mrk <- x$entries$marks if(multitype) { splat("Possible marks:") cat(paste(levels(mrk))) } # ----- trend -------------------------- if(length(x$trend) == 0) return(invisible(NULL)) parbreak() splat(paste0("---- ", x$trend$name, ": ----")) parbreak() if(!notrend) { splat("Log", if(poisson) "intensity:" else "trend:", pasteFormula(x$trend$formula)) if(x$uses.covars) splat("Model depends on external", ngettext(length(x$covars.used), "covariate", "covariates"), commasep(sQuote(x$covars.used))) } if(x$has.covars) { if(notrend || !x$uses.covars) splat("Model object contains external covariates") isdf <- identical(x$covars.are.df, TRUE) if(!is.null(cd <- x$covar.descrip)) { # print description of each covariate splat(paste0("Covariates provided", if(isdf) " (in data frame)" else NULL, ":")) namescd <- names(cd) for(i in seq_along(cd)) splat(paste0("\t", namescd[i], ": ", cd[i])) } if(!is.null(cfa <- x$covfunargs) && length(cfa) > 0) { splat("Covariate function arguments (covfunargs) provided:") namescfa <- names(cfa) for(i in seq_along(cfa)) { cat(paste(namescfa[i], "= ")) cfai <- cfa[[i]] if(is.numeric(cfai) && length(cfai) == 1) { cat(paste(cfai, "\n")) } else print(cfa[[i]]) } } } parbreak() splat(paste0(x$trend$label, ":")) tv <- x$trend$value if(!is.list(tv)) print(tv) else for(i in seq_along(tv)) print(tv[[i]]) # table of coefficient estimates with SE and 95% CI if(!is.null(cose <- x$coefs.SE.CI)) { cat("\n") print(cose) } # ---- Interaction ---------------------------- if(!poisson) { parbreak() splat(" ---- Interaction: -----") parbreak() print(x$interaction) } ####### Gory details ################################### parbreak() splat("----------- gory details -----") parbreak() COEFS <- x$entries$coef splat("Fitted regular parameters (theta):") print(COEFS) parbreak() splat("Fitted exp(theta):") print(exp(unlist(COEFS))) ##### Warnings issued ####### probs <- x$problems if(!is.null(probs) && is.list(probs) && (length(probs) > 0)) lapply(probs, function(a) { if(is.list(a) && !is.null(p <- a$print)) cat(paste("Problem:\n", p, "\n\n")) }) vali <- x$valid if(identical(vali, FALSE) && waxlyrical("errors")) { parbreak() splat("*** Model is not valid ***") if(!all(is.finite(x$entries$coef))) { splat("*** Some coefficients are NA or Inf ***") } else { splat("*** Interaction parameters are outside valid range ***") } } else if(is.na(vali) && waxlyrical("extras")) { parbreak() splat("[Validity of model could not be checked]") } return(invisible(NULL)) } no.trend.ppm <- function(x) { summary.ppm(x, quick=TRUE)$no.trend } is.stationary <- function(x) { UseMethod("is.stationary") } is.poisson <- function(x) { UseMethod("is.poisson") } is.stationary.ppm <- function(x) { TREND <- x$trend if(is.null(TREND) || identical.formulae(TREND, ~1)) return(TRUE) if(all(variablesinformula(TREND) == "marks")) return(TRUE) return(FALSE) } is.poisson.ppm <- function(x) { stopifnot(is.ppm(x)) y <- x$interaction if(is.null(y)) y <- Poisson() is.poisson.interact(y) } is.marked.ppm <- function(X, ...) { summary.ppm(X, quick=TRUE)$marked } is.multitype.ppm <- function(X, ...) { summary.ppm(X, quick=TRUE)$multitype } is.expandable.ppm <- function(x) { return(identical(summary(x, quick="entries")$expandable, TRUE)) } blankcoefnames <- function(x) { # remove name labels from ppm coefficients # First decide whether there are 'labels within labels' unlabelled <- unlist(lapply(x, function(z) { is.null(names(z)) } )) if(all(unlabelled)) value <- unlist(x) else { value <- list() for(i in seq_along(x)) value[[i]] <- if(unlabelled[i]) unlist(x[i]) else x[[i]] } return(value) } spatstat/R/iplot.R0000644000176200001440000002660113333543255013603 0ustar liggesusers# # interactive plot for ppp objects using rpanel # # $Revision: 1.23 $ $Date: 2017/02/07 07:47:20 $ # # # Effect: # when the user types # iplot(x) # a pop-up panel displays a standard plot of x and # buttons allowing control of the plot parameters. # Coding: # The panel 'p' contains the following internal variables # x Original point pattern # w Window of point pattern # xname Name of x (for main title) # mtype Type of marks of x # bb frame of x # bbmid midpoint of frame # The following variables in 'p' are controlled by panel buttons etc # split Logical: whether to split multitype pattern # pointmap Plot character, or "marks" indicating that marks are used # zoomfactor Zoom factor # zoomcentre Centre point for zoom # charsize Character expansion factor cex # markscale Mark scale factor markscale # iplot <- function(x, ...) { UseMethod("iplot") } iplot.ppp <- local({ iplot.ppp <- function(x, ..., xname) { if(missing(xname)) xname <- short.deparse(substitute(x)) verifyclass(x, "ppp") if(markformat(x) %in% c("hyperframe", "list")) marks(x) <- as.data.frame(as.hyperframe(marks(x))) if(markformat(x) == "dataframe" && ncol(marks(x)) > 1) { warning("Using only the first column of marks") marks(x) <- marks(x)[,1L] } mtype <- if(is.multitype(x)) "multitype" else if(is.marked(x)) "marked" else "unmarked" bb <- as.rectangle(as.owin(x)) bbmid <- unlist(centroid.owin(bb)) ## kraever("rpanel") ## p <- rpanel::rp.control(paste("iplot(", xname, ")", sep=""), x=x, w=as.owin(x), xname=xname, mtype=mtype, bb=bb, bbmid=bbmid, split=FALSE, pointmap=if(is.marked(x)) "marks" else "o", zoomfactor=1, zoomcentre=bbmid, size=c(700, 400)) # Split panel into three # Left: plot controls # Middle: data # Right: navigation/zoom rpanel::rp.grid(p, "gcontrols", pos=list(row=0,column=0)) rpanel::rp.grid(p, "gdisplay", pos=list(row=0,column=1)) rpanel::rp.grid(p, "gnavigate", pos=list(row=0,column=2)) #----- Data display ------------ # This line is to placate the package checker mytkr <- NULL # Create data display panel rpanel::rp.tkrplot(p, mytkr, plotfun=do.iplot.ppp, action=click.iplot.ppp, pos=list(row=0,column=0,grid="gdisplay")) #----- Plot controls ------------ nextrow <- 0 pozzie <- function(n=nextrow, ...) append(list(row=n,column=0,grid="gcontrols"), list(...)) # main title rpanel::rp.textentry(p, xname, action=redraw.iplot.ppp, title="Plot title", pos=pozzie(0)) nextrow <- 1 # split ? if(mtype == "multitype") { rpanel::rp.checkbox(p, split, initval=FALSE, title="Split according to marks", action=redraw.iplot.ppp, pos=pozzie(1)) nextrow <- 2 } # plot character or mark style ptvalues <- c("o", "bullet", "plus") ptlabels <- c("open circles", "filled circles", "crosshairs") if(is.marked(x)) { ptvalues <- c("marks", ptvalues) ptlabels <- if(mtype == "multitype") c("Symbols depending on mark", ptlabels) else c("Circles proportional to mark", ptlabels) } pointmap <- ptvalues[1L] rpanel::rp.radiogroup(p, pointmap, vals=ptvalues, labels=ptlabels, title="how to plot points", action=redraw.iplot.ppp, pos=pozzie(nextrow)) nextrow <- nextrow+1 # plot character size charsize <- 1 rpanel::rp.slider(p, charsize, 0, 5, action=redraw.iplot.ppp, title="symbol expansion factor (cex)", initval=1, showvalue=TRUE, pos=pozzie(nextrow, sticky="")) nextrow <- nextrow+1 # mark scale if(mtype == "marked") { marx <- x$marks marx <- marx[is.finite(marx)] scal <- mark.scale.default(marx, x$window) markscale <- scal rpanel::rp.slider(p, markscale, from=scal/10, to = 10*scal, action=redraw.iplot.ppp, initval=scal, title="mark scale factor (markscale)", showvalue=TRUE, pos=pozzie(nextrow)) nextrow <- nextrow+1 } # button to print a summary at console rpanel::rp.button(p, title="Print summary information", pos=pozzie(nextrow), action=function(panel) { print(summary(panel$x)); panel} ) # #----- Navigation controls ------------ nextrow <- 0 navpos <- function(n=nextrow,cc=0, ...) append(list(row=n,column=cc,grid="gnavigate"), list(...)) rpanel::rp.button(p, title="Up", pos=navpos(nextrow,1,sticky=""), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb height <- sidelengths(bb)[2L] stepsize <- (height/4)/zo panel$zoomcentre <- ce + c(0, stepsize) CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Left", pos=navpos(nextrow,0,sticky="w"), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb width <- sidelengths(bb)[1L] stepsize <- (width/4)/zo panel$zoomcentre <- ce - c(stepsize, 0) CommitAndRedraw(panel) return(panel) }) rpanel::rp.button(p, title="Right", pos=navpos(nextrow,2,sticky="e"), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb width <- sidelengths(bb)[1L] stepsize <- (width/4)/zo panel$zoomcentre <- ce + c(stepsize, 0) CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Down", pos=navpos(nextrow,1,sticky=""), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb height <- sidelengths(bb)[2L] stepsize <- (height/4)/zo panel$zoomcentre <- ce - c(0, stepsize) CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Zoom In", pos=navpos(nextrow,1,sticky=""), action=function(panel) { panel$zoomfactor <- panel$zoomfactor * 2 CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Zoom Out", pos=navpos(nextrow,1,sticky=""), action=function(panel) { panel$zoomfactor <- panel$zoomfactor / 2 CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Reset", pos=navpos(nextrow,1,sticky=""), action=function(panel) { panel$zoomfactor <- 1 panel$zoomcentre <- panel$bbmid CommitAndRedraw(panel) return(panel) }) nextrow <- nextrow + 1 rpanel::rp.button(p, title="Redraw", pos=navpos(nextrow,1,sticky=""), action=redraw.iplot.ppp) nextrow <- nextrow+1 # quit button rpanel::rp.button(p, title="Quit", quitbutton=TRUE, pos=navpos(nextrow, 1, sticky=""), action= function(panel) { panel }) invisible(NULL) } # Function to redraw the whole shebang redraw.iplot.ppp <- function(panel) { rpanel::rp.tkrreplot(panel, mytkr) panel } # Function executed when data display is clicked click.iplot.ppp <- function(panel, x, y) { if(panel$split) { cat("Mouse interaction is not supported when the point pattern is split\n") } else { panel$zoomcentre <- panel$zoomcentre + (c(x,y) - panel$bbmid)/panel$zoomfactor CommitAndRedraw(panel) } return(panel) } # function that updates the plot when the control panel is operated do.iplot.ppp <- function(panel) { use.marks <- TRUE pch <- 16 switch(panel$pointmap, marks={ use.marks <- TRUE pch <- NULL }, o = { use.marks <- FALSE pch <- 1 }, bullet = { use.marks <- FALSE pch <- 16 }, plus = { use.marks <- FALSE pch <- 3 }) # scale and clip the pattern x <- panel$x w <- panel$w z <- panel$zoomfactor if(is.null(z)) z <- 1 ce <- panel$zoomcentre bb <- panel$bb bbmid <- panel$bbmid scalex <- shift(scalardilate(shift(x, -ce), z), bbmid) scalew <- shift(scalardilate(shift(w, -ce), z), bbmid) scalex <- scalex[, bb] scalew <- intersect.owin(scalew, bb, fatal=FALSE) # determine what is plotted under the clipped pattern blankargs <- list(type="n") dashargs <- list(lty=3, border="red") panel.begin <- if(is.null(scalew)) { # empty intersection; just create the plot space layered(bb, plotargs=list(blankargs)) } else if(identical(bb, scalew)) { if(z == 1) { # original state # window is rectangular # plot the data window as a solid black rectangle layered(bb, scalew, plotargs=list(blankargs, list(lwd=2))) } else { # zoom view is entirely inside window # plot the clipping region as a red dashed rectangle layered(bb, plotargs=list(dashargs)) } } else { # field of view is not a subset of window # plot the clipping region as a red dashed rectangle # Then add the data window layered(bb, scalew, plotargs=list(dashargs, list(invert=TRUE))) } # draw it # opa <- par(ask=FALSE) if(panel$mtype == "multitype" && panel$split) { scalex <- split(scalex, un=(panel$pointmap != "marks")) plot(scalex, main=panel$xname, use.marks=use.marks, pch=pch, cex=panel$charsize, panel.begin=panel.begin) } else { # draw scaled & clipped window plot(panel.begin, main=panel$xname) # add points if(panel$mtype == "marked" && panel$pointmap == "marks") { plot(scalex, add=TRUE, use.marks=use.marks, markscale=panel$markscale) } else { plot(scalex, add=TRUE, use.marks=use.marks, pch=pch, cex=panel$charsize) } } # par(opa) panel } CommitAndRedraw <- function(panel) { # hack to ensure that panel is immediately updated in rpanel kraever("rpanel") ## This is really a triple-colon! rpanel:::rp.control.put(panel$panelname, panel) # now redraw it redraw.iplot.ppp(panel) } iplot.ppp }) spatstat/R/lppm.R0000644000176200001440000002525313613547031013423 0ustar liggesusers# # lppm.R # # Point process models on a linear network # # $Revision: 1.44 $ $Date: 2018/05/02 02:04:45 $ # lppm <- function(X, ...) { UseMethod("lppm") } lppm.formula <- function(X, interaction=NULL, ..., data=NULL) { ## remember call callstring <- paste(short.deparse(sys.call()), collapse = "") cl <- match.call() ########### INTERPRET FORMULA ############################## if(!inherits(X, "formula")) stop(paste("Argument 'X' should be a formula")) formula <- X if(spatstat.options("expand.polynom")) formula <- expand.polynom(formula) ## check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Formula must have a left hand side")) Yexpr <- formula[[2L]] trend <- formula[c(1L,3L)] ## FIT ####################################### thecall <- call("lppm", X=Yexpr, trend=trend, data=data, interaction=interaction) ncall <- length(thecall) argh <- list(...) nargh <- length(argh) if(nargh > 0) { thecall[ncall + 1:nargh] <- argh names(thecall)[ncall + 1:nargh] <- names(argh) } callenv <- list2env(as.list(data), parent=parent.frame()) result <- eval(thecall, envir=callenv) result$call <- cl result$callstring <- callstring return(result) } lppm.lpp <- function(X, ..., eps=NULL, nd=1000, random=FALSE) { Xname <- short.deparse(substitute(X)) callstring <- paste(short.deparse(sys.call()), collapse = "") cl <- match.call() nama <- names(list(...)) resv <- c("method", "forcefit") if(any(clash <- resv %in% nama)) warning(paste(ngettext(sum(clash), "Argument", "Arguments"), commasep(sQuote(resv[clash])), "must not be used")) stopifnot(inherits(X, "lpp")) Q <- linequad(X, eps=eps, nd=nd, random=random) fit <- ppm(Q, ..., method="mpl", forcefit=TRUE) if(!is.poisson.ppm(fit)) warning("Non-Poisson models currently use Euclidean distance") out <- list(X=X, fit=fit, Xname=Xname, call=cl, callstring=callstring) class(out) <- "lppm" return(out) } is.lppm <- function(x) { inherits(x, "lppm") } # undocumented as.ppm.lppm <- function(object) { object$fit } fitted.lppm <- function(object, ..., dataonly=FALSE, new.coef=NULL, leaveoneout=FALSE) { pfit <- object$fit v <- fitted(pfit, dataonly=dataonly, new.coef=new.coef, leaveoneout=leaveoneout) return(v) } predict.lppm <- function(object, ..., type="trend", locations=NULL, new.coef=NULL) { type <- pickoption("type", type, c(trend="trend", cif="cif", lambda="cif")) X <- object$X fit <- object$fit L <- as.linnet(X) if(!is.null(locations)) { #' locations given; return a vector/matrix of predicted values if(is.lpp(locations)) locations <- as.ppp(locations) values <- predict(fit, locations=locations, type=type, new.coef=new.coef) return(values) } # locations not given; want a pixel image # pixellate the lines Llines <- as.psp(L) linemask <- as.mask.psp(Llines, ...) lineimage <- as.im(linemask) # extract pixel centres xx <- rasterx.mask(linemask) yy <- rastery.mask(linemask) mm <- linemask$m xx <- as.vector(xx[mm]) yy <- as.vector(yy[mm]) pixelcentres <- ppp(xx, yy, window=as.rectangle(linemask), check=FALSE) pixdf <- data.frame(xc=xx, yc=yy) # project pixel centres onto lines p2s <- project2segment(pixelcentres, Llines) projloc <- as.data.frame(p2s$Xproj) projmap <- as.data.frame(p2s[c("mapXY", "tp")]) projdata <- cbind(pixdf, projloc, projmap) # predict at the projected points if(!is.multitype(fit)) { values <- predict(fit, locations=projloc, type=type, new.coef=new.coef) # map to nearest pixels Z <- lineimage Z[pixelcentres] <- values # attach exact line position data df <- cbind(projdata, values) out <- linim(L, Z, df=df, restrict=FALSE) } else { # predict for each type lev <- levels(marks(data.ppm(fit))) out <- list() for(k in seq(length(lev))) { markk <- factor(lev[k], levels=lev) locnk <- cbind(projloc, data.frame(marks=markk)) values <- predict(fit, locations=locnk, type=type, new.coef=new.coef) Z <- lineimage Z[pixelcentres] <- values df <- cbind(projdata, values) out[[k]] <- linim(L, Z, df=df, restrict=FALSE) } out <- as.solist(out) names(out) <- as.character(lev) } return(out) } coef.lppm <- function(object, ...) { coef(object$fit) } print.lppm <- function(x, ...) { splat("Point process model on linear network") print(x$fit) terselevel <- spatstat.options('terse') if(waxlyrical('extras', terselevel)) splat("Original data:", x$Xname) if(waxlyrical('gory', terselevel)) print(as.linnet(x)) return(invisible(NULL)) } summary.lppm <- function(object, ...) { splat("Point process model on linear network") print(summary(object$fit)) terselevel <- spatstat.options('terse') if(waxlyrical('extras', terselevel)) splat("Original data:", object$Xname) if(waxlyrical('gory', terselevel)) print(summary(as.linnet(object))) return(invisible(NULL)) } plot.lppm <- function(x, ..., type="trend") { xname <- short.deparse(substitute(x)) y <- predict(x, type=type) do.call(plot, resolve.defaults(list(y), list(...), list(main=xname))) } anova.lppm <- function(object, ..., test=NULL) { stuff <- list(object=object, ...) if(!is.na(hit <- match("override", names(stuff)))) { warning("Argument 'override' is outdated and was ignored") stuff <- stuff[-hit] } #' extract ppm objects where appropriate mod <- sapply(stuff, is.lppm) stuff[mod] <- lapply(stuff[mod], getElement, name="fit") #' analysis of deviance or adjusted composite deviance do.call(anova.ppm, append(stuff, list(test=test))) } update.lppm <- function(object, ...) { stopifnot(inherits(object, "lppm")) X <- object$X fit <- object$fit Xname <- object$Xname callframe <- environment(formula(fit)) aargh <- list(...) islpp <- sapply(aargh, is.lpp) if(any(islpp)) { # trap point pattern argument & convert to quadscheme ii <- which(islpp) if((npp <- length(ii)) > 1) stop(paste("Arguments not understood:", npp, "lpp objects given")) X <- aargh[[ii]] aargh[[ii]] <- linequad(X) } isfmla <- sapply(aargh, inherits, what="formula") if(any(isfmla)) { # trap formula pattern argument, update it, evaluate LHS if required jj <- which(isfmla) if((nf <- length(jj)) > 1) stop(paste("Arguments not understood:", nf, "formulae given")) fmla <- aargh[[jj]] fmla <- update(formula(object), fmla) if(!is.null(lhs <- lhs.of.formula(fmla))) { X <- eval(lhs, envir=list2env(list("."=X), parent=callframe)) Qpos <- if(any(islpp)) ii else (length(aargh) + 1L) aargh[[Qpos]] <- linequad(X) } aargh[[jj]] <- rhs.of.formula(fmla) } newfit <- do.call(update.ppm, append(list(fit), aargh), envir=callframe) if(!is.poisson.ppm(newfit)) warning("Non-Poisson models currently use Euclidean distance") out <- list(X=X, fit=newfit, Xname=Xname) class(out) <- "lppm" return(out) } terms.lppm <- function(x, ...) { terms(x$fit, ...) } logLik.lppm <- function(object, ...) { logLik(object$fit, ...) } deviance.lppm <- function(object, ...) { as.numeric(-2 * logLik(object, ...)) } pseudoR2.lppm <- function(object, ..., keepoffset=TRUE) { dres <- deviance(object, ..., warn=FALSE) nullfmla <- . ~ 1 if(keepoffset && has.offset.term(object)) { off <- attr(model.depends(object), "offset") offterms <- row.names(off)[apply(off, 1, any)] if(length(offterms)) { nullrhs <- paste(offterms, collapse=" + ") nullfmla <- as.formula(paste(". ~ ", nullrhs)) } } nullmod <- update(object, nullfmla, forcefit=TRUE) dnul <- deviance(nullmod, warn=FALSE) return(1 - dres/dnul) } formula.lppm <- function(x, ...) { formula(x$fit, ...) } extractAIC.lppm <- function(fit, ...) { extractAIC(fit$fit, ...) } as.owin.lppm <- function(W, ..., fatal=TRUE) { stopifnot(inherits(W, "lppm")) as.owin(as.linnet(W), ..., fatal=fatal) } Window.lppm <- function(X, ...) { as.owin(X) } data.lppm <- function(object) { object$X } model.images.lppm <- local({ model.images.lppm <- function(object, L=as.linnet(object), ...) { stopifnot(inherits(object, "lppm")) stopifnot(inherits(L, "linnet")) m <- model.images(object$fit, W=as.rectangle(L), ...) if(length(m)) { ## restrict images to L type <- if(is.hyperframe(m)) "hyperframe" else if(is.imlist(m)) "imlist" else if(is.list(m) && all(sapply(m, is.im))) "imlist" else stop("Internal error: model.images not understood", call.=FALSE) switch(type, imlist = { ## list of images: convert to list of linims ZL <- netmask(L, template=m[[1L]]) m <- tolinims(m, L=L, imL=ZL) }, hyperframe = { ## hyperframe, each column being a list of images ## extract columns rownam <- row.names(m) m <- as.list(m) ZL <- netmask(L, template=m[[1L]][[1L]]) mm <- lapply(m, tolinims, L=L, imL=ZL) m <- do.call(hyperframe, mm) row.names(m) <- rownam }) } return(m) } netmask <- function(L, template) { as.im(as.mask.psp(as.psp(L), xy=as.mask(template))) } tolinim <- function(x, L, imL) linim(L, eval.im(x * imL), restrict=FALSE) tolinims <- function(x, L, imL) solapply(x, tolinim, L=L, imL=imL) model.images.lppm }) model.matrix.lppm <- function(object, data=model.frame(object, na.action=NULL), ..., keepNA=TRUE) { stopifnot(is.lppm(object)) if(missing(data)) data <- NULL model.matrix(object$fit, data=data, ..., keepNA=keepNA) } model.frame.lppm <- function(formula, ...) { stopifnot(inherits(formula, "lppm")) model.frame(formula$fit, ...) } domain.lppm <- as.linnet.lppm <- function(X, ...) { as.linnet(X$X, ...) } nobs.lppm <- function(object, ...) { npoints(object$X) } is.poisson.lppm <- function(x) { is.poisson(x$fit) } is.stationary.lppm <- function(x) { is.stationary(x$fit) } is.multitype.lppm <- function(X, ...) { is.multitype(X$fit) } is.marked.lppm <- function(X, ...) { is.marked(X$fit) } vcov.lppm <- function(object, ...) { if(!is.poisson(object)) stop("vcov.lppm is only implemented for Poisson models") vcov(object$fit, ...) } valid.lppm <- function(object, ...) { valid(object$fit, ...) } emend.lppm <- function(object, ...) { object$fit <- emend(object$fit, ...) return(object) } spatstat/R/pcfmulti.inhom.R0000644000176200001440000002216513333543255015411 0ustar liggesusers# # pcfmulti.inhom.R # # $Revision: 1.15 $ $Date: 2016/09/21 07:28:42 $ # # inhomogeneous multitype pair correlation functions # # pcfcross.inhom <- function(X, i, j, lambdaI=NULL, lambdaJ=NULL, ..., r=NULL, breaks=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL) { verifyclass(X, "ppp") stopifnot(is.multitype(X)) if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] I <- (marx == i) J <- (marx == j) Iname <- paste("points with mark i =", i) Jname <- paste("points with mark j =", j) g <- pcfmulti.inhom(X, I, J, lambdaI, lambdaJ, ..., r=r,breaks=breaks, kernel=kernel, bw=bw, stoyan=stoyan, correction=correction, sigma=sigma, varcov=varcov, Iname=Iname, Jname=Jname) iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(g, substitute(g[inhom,i,j](r), list(i=iname,j=jname)), c("g", paste0("list", paren(paste("inhom", i, j, sep=",")))), new.yexp=substitute(g[list(inhom,i,j)](r), list(i=iname,j=jname))) attr(result, "dangerous") <- attr(g, "dangerous") return(result) } pcfdot.inhom <- function(X, i, lambdaI=NULL, lambdadot=NULL, ..., r=NULL, breaks=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL) { verifyclass(X, "ppp") stopifnot(is.multitype(X)) if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) J <- rep.int(TRUE, X$n) # i.e. all points Iname <- paste("points with mark i =", i) Jname <- paste("points") g <- pcfmulti.inhom(X, I, J, lambdaI, lambdadot, ..., r=r,breaks=breaks, kernel=kernel, bw=bw, stoyan=stoyan, correction=correction, sigma=sigma, varcov=varcov, Iname=Iname, Jname=Jname) iname <- make.parseable(paste(i)) result <- rebadge.fv(g, substitute(g[inhom, i ~ dot](r), list(i=iname)), c("g", paste0("list(inhom,", iname, "~symbol(\"\\267\"))")), new.yexp=substitute(g[list(inhom, i ~ symbol("\267"))](r), list(i=iname))) if(!is.null(dang <- attr(g, "dangerous"))) { dang[dang == "lambdaJ"] <- "lambdadot" dang[dang == "lambdaIJ"] <- "lambdaIdot" attr(result, "dangerous") <- dang } return(result) } pcfmulti.inhom <- function(X, I, J, lambdaI=NULL, lambdaJ=NULL, ..., r=NULL, breaks=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), sigma=NULL, varcov=NULL, Iname="points satisfying condition I", Jname="points satisfying condition J") { verifyclass(X, "ppp") # r.override <- !is.null(r) win <- X$window areaW <- area(win) npts <- npoints(X) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("translate", "Ripley") correction <- pickoption("correction", correction, c(isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, win$type, correction.given) # bandwidth if(is.null(bw) && kernel=="epanechnikov") { # Stoyan & Stoyan 1995, eq (15.16), page 285 h <- stoyan /sqrt(npts/areaW) hmax <- h # conversion to standard deviation bw <- h/sqrt(5) } else if(is.numeric(bw)) { # standard deviation of kernel specified # upper bound on half-width hmax <- 3 * bw } else { # data-dependent bandwidth selection: guess upper bound on half-width hmax <- 2 * stoyan /sqrt(npts/areaW) } ########## indices I and J ######################## if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != npts || length(J) != npts) stop(paste("The length of I and J must equal", "the number of points in the pattern")) nI <- sum(I) nJ <- sum(J) if(nI == 0) stop(paste("There are no", Iname)) if(nJ == 0) stop(paste("There are no", Jname)) XI <- X[I] XJ <- X[J] ########## intensity values ######################### dangerous <- c("lambdaI", "lambdaJ") dangerI <- dangerJ <- TRUE if(is.null(lambdaI)) { # Estimate density by leave-one-out kernel smoothing dangerI <- FALSE lambdaI <- density(XI, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) } else { # lambda values provided if(is.vector(lambdaI)) check.nvector(lambdaI, nI) else if(is.im(lambdaI)) lambdaI <- safelookup(lambdaI, XI) else if(is.function(lambdaI)) lambdaI <- lambdaI(XI$x, XI$y) else stop(paste(sQuote("lambdaI"), "should be a vector, a pixel image, or a function")) } if(is.null(lambdaJ)) { # Estimate density by leave-one-out kernel smoothing dangerJ <- FALSE lambdaJ <- density(XJ, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) } else { # lambda values provided if(is.vector(lambdaJ)) check.nvector(lambdaJ, nJ) else if(is.im(lambdaJ)) lambdaJ <- safelookup(lambdaJ, XJ) else if(is.function(lambdaJ)) lambdaJ <- lambdaJ(XJ$x, XJ$y) else stop(paste(sQuote("lambdaJ"), "should be a vector, a pixel image, or a function")) } danger <- dangerI || dangerJ ########## r values ############################ # handle arguments r and breaks rmaxdefault <- rmax.rule("K", win, npts/areaW) breaks <- handle.r.b.args(r, breaks, win, rmaxdefault=rmaxdefault) if(!(breaks$even)) stop("r values must be evenly spaced") # extract r values r <- breaks$r rmax <- breaks$max # recommended range of r values for plotting alim <- c(0, min(rmax, rmaxdefault)) # initialise fv object df <- data.frame(r=r, theo=rep.int(1,length(r))) fname <- c("g", "list(inhom,I,J)") out <- fv(df, "r", quote(g[inhom,I,J](r)), "theo", , alim, c("r", makefvlabel(NULL, NULL, fname, "pois")), c("distance argument r", "theoretical Poisson %s"), fname=fname, yexp=quote(g[list(inhom,I,J)](r))) ########## smoothing parameters for pcf ############################ # arguments for 'density' denargs <- resolve.defaults(list(kernel=kernel, bw=bw), list(...), list(n=length(r), from=0, to=rmax)) ################################################# # compute pairwise distances # identify close pairs of points close <- crosspairs(XI, XJ, rmax+hmax, what="ijd") # map (i,j) to original serial numbers in X orig <- seq_len(npts) imap <- orig[I] jmap <- orig[J] iX <- imap[close$i] jX <- jmap[close$j] # eliminate any identical pairs if(any(I & J)) { ok <- (iX != jX) if(!all(ok)) { close$i <- close$i[ok] close$j <- close$j[ok] close$d <- close$d[ok] } } # extract information for these pairs (relative to orderings of XI, XJ) dclose <- close$d icloseI <- close$i jcloseJ <- close$j # Form weight for each pair weight <- 1/(lambdaI[icloseI] * lambdaJ[jcloseJ]) ###### compute ####### if(any(correction=="translate")) { # translation correction edgewt <- edge.Trans(XI[icloseI], XJ[jcloseJ], paired=TRUE) gT <- sewpcf(dclose, edgewt * weight, denargs, areaW)$g out <- bind.fv(out, data.frame(trans=gT), makefvlabel(NULL, "hat", fname, "Trans"), "translation-corrected estimate of %s", "trans") } if(any(correction=="isotropic")) { # Ripley isotropic correction edgewt <- edge.Ripley(XI[icloseI], matrix(dclose, ncol=1)) gR <- sewpcf(dclose, edgewt * weight, denargs, areaW)$g out <- bind.fv(out, data.frame(iso=gR), makefvlabel(NULL, "hat", fname, "Ripley"), "isotropic-corrected estimate of %s", "iso") } # sanity check if(is.null(out)) { warning("Nothing computed - no edge corrections chosen") return(NULL) } # which corrections have been computed? corrxns <- rev(setdiff(names(out), "r")) # default is to display them all formula(out) <- . ~ r fvnames(out, ".") <- corrxns # unitname(out) <- unitname(X) if(danger) attr(out, "dangerous") <- dangerous return(out) } spatstat/R/ho.R0000644000176200001440000000431213333543255013055 0ustar liggesusers# # ho.R # # Huang-Ogata method # # $Revision: 1.17 $ $Date: 2016/03/15 07:42:26 $ # ho.engine <- function(model, ..., nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh), verb=TRUE) { verifyclass(model, "ppm") if(is.null(start)) start <- list(n.start=data.ppm(model)$n) # check that the model can be simulated if(!valid.ppm(model)) { warning("Fitted model is invalid - cannot be simulated") return(NULL) } # compute the observed value of the sufficient statistic X <- data.ppm(model) sobs <- suffstat(model, X) # generate 'nsim' realisations of the fitted model # and compute the sufficient statistics of the model rmhinfolist <- rmh(model, start, control, preponly=TRUE, verbose=FALSE) if(verb) { cat("Simulating... ") state <- list() } ndone <- 0 while(ndone < nsim) { Xi <- rmhEngine(rmhinfolist, verbose=FALSE) v <- try(suffstat(model,Xi)) if(!inherits(v, "try-error")) { if(ndone == 0) svalues <- matrix(, nrow=nsim, ncol=length(v)) ndone <- ndone + 1 svalues[ndone, ] <- v } if(verb) state <- progressreport(ndone, nsim, state=state) } if(verb) cat("Done.\n\n") # calculate the sample mean and variance of the # sufficient statistic for the simulations smean <- apply(svalues, 2, mean, na.rm=TRUE) svar <- var(svalues, na.rm=TRUE) # value of canonical parameter from MPL fit theta0 <- coef(model) # Newton-Raphson update Vinverse <- solve(svar) theta <- theta0 + as.vector(Vinverse %*% (sobs - smean)) ## appropriate names nama <- names(theta0) if(!is.null(nama)) { names(theta) <- nama dimnames(svar) <- dimnames(Vinverse) <- list(nama, nama) } ## update model newmodel <- model newmodel$coef <- theta newmodel$coef.orig <- theta0 newmodel$method <- "ho" newmodel$fitter <- "ho" newmodel$fisher <- svar newmodel$varcov <- Vinverse # recompute fitted interaction newmodel$fitin <- NULL newmodel$fitin <- fitin(newmodel) ## update pseudolikelihood value using code in logLik.ppm newmodel$maxlogpl.orig <- model$maxlogpl newmodel$maxlogpl <- logLik(newmodel, new.coef=theta, warn=FALSE) ## return(newmodel) } spatstat/R/vcov.mppm.R0000644000176200001440000002257113550026760014401 0ustar liggesusers# Variance-covariance matrix for mppm objects # # $Revision: 1.20 $ $Date: 2019/10/11 07:09:16 $ # # vcov.mppm <- local({ errhandler <- function(whinge, err) { switch(err, fatal=stop(whinge), warn={ warning(whinge) return(NA) }, null= return(NULL), stop(paste("Unrecognised option: err=", dQuote(err)))) } vcov.mppm <- function(object, ..., what="vcov", err="fatal") { what <- match.arg(what, c("vcov", "corr", "fisher", "Fisher", "internals", "all")) if(what == "Fisher") what <- "fisher" if(is.poisson.mppm(object) && object$Fit$fitter == "glm") return(vcmPois(object, ..., what=what, err=err)) return(vcmGibbs(object, ..., what=what, err=err)) } vcmPois <- function(object, ..., what, err, nacoef.action=c("warn", "fatal", "silent") ) { #' legacy algorithm for Poisson case #' detect NA coefficients if(missing(nacoef.action) && !missing(err) && !is.null(err)) { nacoef.action <- err } else { nacoef.action <- match.arg(nacoef.action) } if(!all(is.finite(coef(object)))) { gripe <- "Cannot compute variance; some coefficients are NA, NaN or Inf" switch(nacoef.action, fatal = stop(gripe, call.=FALSE), warn = warning(gripe, call.=FALSE), silent = {}) return(NULL) } #' get to work gf <- object$Fit$FIT gd <- object$Fit$moadf wt <- gd$.mpl.W fi <- fitted(gf) fo <- object$trend if(is.null(fo)) fo <- (~1) mof <- model.frame(fo, gd) mom <- model.matrix(fo, mof) momnames <- dimnames(mom)[[2]] fisher <- sumouter(mom, fi * wt) dimnames(fisher) <- list(momnames, momnames) switch(what, fisher = { return(fisher) }, vcov = { vc <- try(solve(fisher), silent=(err == "null")) if(inherits(vc, "try-error")) return(errhandler("Fisher information is singular", err)) else return(vc) }, corr={ co <- try(solve(fisher), silent=(err == "null")) if(inherits(co, "try-error")) return(errhandler("Fisher information is singular", err)) sd <- sqrt(diag(co)) return(co / outer(sd, sd, "*")) }) } vcmGibbs <- function(object, ..., what, err, matrix.action=c("warn", "fatal", "silent"), gam.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent"), nacoef.action=c("warn", "fatal", "silent") ) { if(!missing(err)) { if(err == "null") err <- "silent" matrix.action <- if(missing(matrix.action)) err else match.arg(matrix.action) gam.action <- if(missing(gam.action)) err else match.arg(gam.action) logi.action <- if(missing(logi.action)) err else match.arg(logi.action) nacoef.action <- if(missing(nacoef.action)) err else match.arg(nacoef.action) } else { matrix.action <- match.arg(matrix.action) gam.action <- match.arg(gam.action) logi.action <- match.arg(logi.action) nacoef.action <- match.arg(nacoef.action) } #' detect NA coefficients if(!all(is.finite(as.matrix(coef(object))))) { gripe <- "Cannot compute variance; some coefficients are NA, NaN or Inf" switch(nacoef.action, fatal = stop(gripe, call.=FALSE), warn = warning(gripe, call.=FALSE), silent = {}) return(NULL) } #' initialise cnames <- names(fixed.effects(object)) nc <- length(cnames) A2 <- A3 <- matrix(0, nc, nc, dimnames=list(cnames, cnames)) #' (1) Compute matrix A1 directly glmdata <- object$Fit$moadf glmsub <- glmdata$.mpl.SUBSET wt <- glmdata$.mpl.W mom <- model.matrix(object) lam <- unlist(fitted(object)) A1 <- sumouter(mom, lam * wt * glmsub) #' (2) compute A2 and A3 matrices of submodels subs <- subfits(object, what="basicmodels") n <- length(subs) guts <- lapply(subs, vcov, what="internals", matrix.action=matrix.action, gam.action=gam.action, logi.action=logi.action, dropcoef=TRUE, ...) a2 <- lapply(guts, getElement, name="A2") a3 <- lapply(guts, getElement, name="A3") #' (3) map into full model #' Identify the (unique) active interaction in each row activeinter <- active.interactions(object) #' interaction names (in glmdata) Vnamelist <- object$Fit$Vnamelist #' Each a2[[i]] and a3[[i]] refer to this interaction (eg 'str') #' but may contribute to several coefficients of the full model #' e.g. 'str' -> str:id -> 'str', 'str:id2' #' Determine which canonical variables of full model are active in each row mats <- split.data.frame(mom, glmdata$id) activevars <- t(sapply(mats, notallzero)) #' dependence map of canonical variables of full model #' on the original variables/interactions md <- model.depends(object$Fit$FIT) #' process each row, summing A2 and A3 for(i in seq_len(n)) { #' the submodel in this row subi <- subs[[i]] #' contributes to second order terms only if non-Poisson if(!is.poisson(subi)) { cnames.i <- names(coef(subi)) a2i <- a2[[i]] a3i <- a3[[i]] #' the (unique) tag name of the interaction in this model tagi <- colnames(activeinter)[activeinter[i,]] #' the corresponding variable name(s) in glmdata and coef(subi) vni <- Vnamelist[[tagi]] #' retain only the interaction rows & columns (the rest are zero anyway) e <- cnames.i %in% vni a2i <- a2i[e, e, drop=FALSE] a3i <- a3i[e, e, drop=FALSE] cnames.ie <- cnames.i[e] #' which coefficients of the full model are active in this row acti <- activevars[i,] #' for each interaction variable name in the submodel, #' find the coefficient(s) in the main model to which it contributes nie <- length(cnames.ie) cmap <- vector(mode="list", length=nie) names(cmap) <- cnames.ie for(j in seq_len(nie)) { cj <- cnames.ie[j] cmap[[j]] <- cnames[ md[,cj] & acti ] } #' all possible mappings maps <- do.call(expand.grid, append(cmap, list(stringsAsFactors=FALSE))) nmaps <- nrow(maps) if(nmaps == 0) { warning("Internal error: Unable to map submodel to full model") } else { for(irow in 1:nmaps) { for(jcol in 1:nmaps) { cmi <- as.character(maps[irow,]) cmj <- as.character(maps[jcol,]) if(anyDuplicated(cmi) || anyDuplicated(cmj)) { warning("Internal error: duplicated labels in submodel map") } else if(!is.null(a2i)) { A2[cmi,cmj] <- A2[cmi,cmj] + a2i A3[cmi,cmj] <- A3[cmi,cmj] + a2i } } } } } } internals <- list(A1=A1, A2=A2, A3=A3) if(what %in% c("internals", "all")) internals <- c(internals, list(suff=mom)) if(what %in% c("vcov", "corr", "all")) { #' variance-covariance matrix required U <- checksolve(A1, matrix.action, , "variance") vc <- if(is.null(U)) NULL else (U %*% (A1 + A2 + A3) %*% U) } out <- switch(what, fisher = A1 + A2 + A3, vcov = vc, corr = { if(is.null(vc)) return(NULL) sd <- sqrt(diag(vc)) vc / outer(sd, sd, "*") }, internals = internals, all = list(internals=internals, fisher=A1+A2+A3, varcov=vc, invgrad=A1) ) return(out) } addsubmatrix <- function(A, B, guessnames) { if(is.null(B)) return(A) if(is.null(colnames(B)) && !missing(guessnames)) { if(is.character(guessnames)) guessnames <- list(guessnames, guessnames) if(all(lengths(guessnames) == dim(B))) colnames(B) <- guessnames } if(is.null(colnames(B))) { #' unusual if(!all(dim(A) == dim(B))) stop("Internal error: no column names, and matrices non-conformable") A <- A + B return(A) } j <- match(colnames(B), colnames(A)) if(anyNA(j)) stop("Internal error: unmatched column name(s)") A[j,j] <- A[j,j] + B return(A) } bindsubmatrix <- function(A, B) { if(is.null(B)) return(A) if(is.null(colnames(B))) { if(ncol(A) != ncol(B)) stop("Internal error: no column names, and matrices non-conformable") A <- rbind(A, B) return(A) } j <- match(colnames(B), colnames(A)) if(anyNA(j)) stop("Internal error: unmatched column name(s)") BB <- matrix(0, nrow(B), ncol(A)) BB[,j] <- B A <- rbind(A, BB) return(A) } mergeAlternatives <- function(A, B) { okA <- !sapply(A, is.null) okB <- !sapply(B, is.null) if(any(override <- !okA & okB)) A[override] <- B[override] return(A) } notallzero <- function(df) { apply(df != 0, 2, any) } vcov.mppm }) spatstat/R/hierstrhard.R0000644000176200001440000002664113333543255014777 0ustar liggesusers## ## hierstrhard.R ## ## $Revision: 1.5 $ $Date: 2018/03/15 07:37:41 $ ## ## The hierarchical Strauss-hard core process ## ## ------------------------------------------------------------------- ## HierStraussHard <- local({ # ......... define interaction potential HSHpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[j] type (mark) of point U[j] # # get matrices of interaction radii r <- par$iradii h <- par$hradii # # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) ## list all ordered pairs of types to be checked uptri <- par$archy$relation & !is.na(r) mark1 <- (lx[row(r)])[uptri] mark2 <- (lx[col(r)])[uptri] ## corresponding names mark1name <- (lxname[row(r)])[uptri] mark2name <- (lxname[col(r)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) ## create logical array for result z <- array(FALSE, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { ## assemble the relevant interaction distance for each pair of points rxu <- r[ tx, tu ] ## apply relevant threshold to each pair of points str <- (d <= rxu) # and the relevant hard core distance hxu <- h[ tx, tu ] forbid <- (d < hxu) forbid[is.na(forbid)] <- FALSE # form the potential value <- str value[forbid] <- -Inf ## score for(i in 1:npairs) { # data points with mark m1 Xsub <- (tx == mark1[i]) # quadrature points with mark m2 Qsub <- (tu == mark2[i]) # assign z[Xsub, Qsub, i] <- value[Xsub, Qsub] } } return(z) } #### end of 'pot' function #### # ........ auxiliary functions .............. delHSH <- function(which, types, iradii, hradii, archy, ihc) { iradii[which] <- NA if(any(!is.na(iradii))) { # some gamma interactions left # return modified HierStraussHard with fewer gamma parameters return(HierStraussHard(types=types, iradii=iradii, hradii=hradii, archy=archy)) } else if(any(!ihc)) { # ihc = inactive hard cores # no gamma interactions left, but some active hard cores return(HierHard(types=types, hradii=hradii, archy=archy)) } else return(Poisson()) } # Set up basic object except for family and parameters BlankHSHobject <- list( name = "Hierarchical Strauss-hard core process", creator = "HierStraussHard", family = "hierpair.family", # evaluated later pot = HSHpotential, par = list(types=NULL, iradii=NULL, hradii=NULL, archy=NULL), parnames = c("possible types", "interaction distances", "hardcore distances", "hierarchical order"), pardesc = c("vector of possible types", "matrix of interaction distances", "matrix of hardcore distances", "hierarchical order"), hasInf = TRUE, selfstart = function(X, self) { types <- self$par$types hradii <- self$par$hradii archy <- self$par$archy if(!is.null(types) && !is.null(hradii) && !is.null(archy)) return(self) if(is.null(types)) types <- levels(marks(X)) if(is.null(archy)) archy <- seq_len(length(types)) if(!inherits(archy, "hierarchicalordering")) archy <- hierarchicalordering(archy, types) if(is.null(hradii)) { marx <- marks(X) d <- nndist(X, by=marx) h <- aggregate(d, by=list(from=marx), min) h <- as.matrix(h[, -1L, drop=FALSE]) m <- table(marx) mm <- outer(m, m, pmin) hradii <- h * mm/(mm+1) dimnames(hradii) <- list(types, types) h[!(archy$relation)] <- NA } HierStraussHard(types=types,hradii=hradii, iradii=self$par$iradii, archy=archy) }, init = function(self) { types <- self$par$types iradii <- self$par$iradii hradii <- self$par$hradii ## hradii could be NULL if(!is.null(types)) { if(!is.null(dim(types))) stop(paste("The", sQuote("types"), "argument should be a vector")) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(anyNA(types)) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } nt <- length(types) MultiPair.checkmatrix(iradii, nt, sQuote("iradii"), asymmok=TRUE) if(!is.null(hradii)) MultiPair.checkmatrix(hradii, nt, sQuote("hradii"), asymmok=TRUE) } ina <- is.na(iradii) if(all(ina)) stop(paste("All entries of", sQuote("iradii"), "are NA")) if(!is.null(hradii)) { hna <- is.na(hradii) both <- !ina & !hna if(any(iradii[both] <= hradii[both])) stop("iradii must be larger than hradii") } }, update = NULL, # default OK print = function(self) { iradii <- self$par$iradii hradii <- self$par$hradii types <- self$par$types archy <- self$par$archy if(waxlyrical('gory')) splat(nrow(iradii), "types of points") if(!is.null(types) && !is.null(archy)) { if(waxlyrical('space')) { splat("Possible types and ordering:") } else cat("Hierarchy: ") print(archy) } else if(!is.null(types)) { (if(waxlyrical('space')) splat else cat)("Possible types: ") print(types) } else if(waxlyrical('gory')) splat("Possible types:\t not yet determined") splat("Interaction radii:") dig <- getOption("digits") print(hiermat(signif(iradii, dig), archy)) if(!is.null(hradii)) { splat("Hardcore radii:") print(hiermat(signif(hradii, dig), archy)) } else splat("Hardcore radii: not yet determined") invisible(NULL) }, interpret = function(coeffs, self) { # get possible types typ <- self$par$types ntypes <- length(typ) ## get matrices of interaction radii r <- self$par$iradii h <- self$par$hradii ## list all unordered pairs of types uptri <- self$par$archy$relation & !is.na(r) index1 <- (row(r))[uptri] index2 <- (col(r))[uptri] npairs <- length(index1) # extract canonical parameters; shape them into a matrix gammas <- matrix(NA, ntypes, ntypes) dimnames(gammas) <- list(typ, typ) gammas[ cbind(index1, index2) ] <- exp(coeffs) # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_ij", printable=hiermat(dround(gammas), self$par$archy))) }, valid = function(coeffs, self) { # interaction radii r[i,j] iradii <- self$par$iradii # hard core radii r[i,j] hradii <- self$par$hradii # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # parameters to estimate required <- !is.na(iradii) & self$par$archy$relation # all required parameters must be finite if(!all(is.finite(gamma[required]))) return(FALSE) # DIAGONAL interactions must be non-explosive d <- diag(rep(TRUE, nrow(iradii))) activehard <- !is.na(hradii) & (hradii > 0) return(all(gamma[required & d & !activehard] <= 1)) }, project = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii iradii <- self$par$iradii # hard core radii r[i,j] hradii <- self$par$hradii types <- self$par$types archy <- self$par$archy # active hard cores activehard <- !is.na(hradii) & (hradii > 0) ihc <- !activehard # problems? uptri <- archy$relation required <- !is.na(iradii) & uptri offdiag <- !diag(nrow(iradii)) gammavalid <- is.finite(gamma) & (activehard | offdiag | (gamma <= 1)) naughty <- required & !gammavalid # if(!any(naughty)) return(NULL) if(spatstat.options("project.fast")) { # remove ALL naughty terms simultaneously return(delHSH(naughty, types, iradii, hradii, archy, ihc)) } else { # present a list of candidates rn <- row(naughty) cn <- col(naughty) ord <- self$par$archy$ordering uptri <- (ord[rn] <= ord[cn]) upn <- uptri & naughty rowidx <- as.vector(rn[upn]) colidx <- as.vector(cn[upn]) mats <- lapply(as.data.frame(rbind(rowidx, colidx)), matrix, ncol=2) inters <- lapply(mats, delHSH, types=types, iradii=iradii, hradii=hradii, archy=archy, ihc=ihc) return(inters) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$iradii h <- self$par$hradii ractive <- !is.na(r) & self$par$archy$relation hactive <- !is.na(h) & self$par$archy$relation if(any(!is.na(coeffs))) { gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 ractive <- ractive & (abs(log(gamma)) > epsilon) } if(!any(c(ractive,hactive))) return(0) else return(max(c(r[ractive],h[hactive]))) }, version=NULL # to be added ) class(BlankHSHobject) <- "interact" # finally create main function HierStraussHard <- function(iradii, hradii=NULL, types=NULL, archy=NULL) { if(!is.null(types)) { if(is.null(archy)) archy <- seq_len(length(types)) archy <- hierarchicalordering(archy, types) } iradii[iradii == 0] <- NA out <- instantiate.interact(BlankHSHobject, list(types=types, iradii=iradii, hradii=hradii, archy=archy)) if(!is.null(types)) { dn <- list(types, types) dimnames(out$par$iradii) <- dn if(!is.null(out$par$hradii)) dimnames(out$par$hradii) <- dn } return(out) } HierStraussHard <- intermaker(HierStraussHard, BlankHSHobject) HierStraussHard }) spatstat/R/deldir.R0000644000176200001440000002671013606020442013707 0ustar liggesusers#' #' deldir.R #' #' Interface to deldir package #' #' $Revision: 1.32 $ $Date: 2020/01/10 06:54:35 $ #' #' .............................................. #' Internal options #' deldir suggests spatstat (!!!) #' so we must save options here, not in spatstat.options .spst.triEnv <- new.env() assign("use.trigraf", TRUE, envir=.spst.triEnv) assign("use.trigrafS", TRUE, envir=.spst.triEnv) assign("debug.delaunay", FALSE, envir=.spst.triEnv) #' for testing purposes only spatstat.deldir.setopt <- function(use.trigrafS=TRUE, use.trigraf=TRUE, debug.delaunay=FALSE) { assign("use.trigrafS", use.trigrafS, envir=.spst.triEnv) assign("use.trigraf", use.trigraf, envir=.spst.triEnv) assign("debug.delaunay", debug.delaunay, envir=.spst.triEnv) return(invisible(NULL)) } #'.............................................. dirichlet <- local({ dirichlet <- function(X) { stopifnot(is.ppp(X)) X <- unique(X, rule="deldir", warn=TRUE) nX <- npoints(X) w <- X$window if(nX == 0) return(NULL) if(nX == 1) return(as.tess(w)) dd <- safedeldir(X) if(is.null(dd)) return(NULL) pp <- lapply(tile.list(dd), df2poly) if(length(pp) == npoints(X)) names(pp) <- seq_len(npoints(X)) dir <- tess(tiles=pp, window=as.rectangle(w)) if(w$type != "rectangle") dir <- intersect.tess(dir, w) return(dir) } df2poly <- function(z) { owin(poly=z[c("x","y")]) } dirichlet }) delaunay <- function(X) { stopifnot(is.ppp(X)) X <- unique(X, rule="deldir", warn=TRUE) nX <- npoints(X) if(nX < 3) return(NULL) w <- X$window dd <- safedeldir(X) if(is.null(dd)) return(NULL) a <- dd$delsgs[,5L] b <- dd$delsgs[,6L] use.trigraf <- get("use.trigraf", envir=.spst.triEnv) use.trigrafS <- get("use.trigrafS", envir=.spst.triEnv) debug.delaunay <- get("debug.delaunay", envir=.spst.triEnv) if(use.trigrafS) { # first ensure a[] < b[] swap <- (a > b) if(any(swap)) { oldb <- b b[swap] <- a[swap] a[swap] <- oldb[swap] } # next ensure a is sorted o <- order(a, b) a <- a[o] b <- b[o] # nv <- nX ne <- length(a) ntmax <- ne z <- .C("trigrafS", nv = as.integer(nv), ne = as.integer(ne), ie = as.integer(a), je = as.integer(b), ntmax = as.integer(ntmax), nt = as.integer(integer(1L)), it = as.integer(integer(ne)), jt = as.integer(integer(ne)), kt = as.integer(integer(ne)), status = as.integer(integer(1L)), PACKAGE = "spatstat") if(z$status != 0) stop("Internal error: overflow in trigrafS") tlist <- with(z, cbind(it, jt, kt)[1:nt, ]) } else if(use.trigraf) { nv <- nX ne <- length(a) ntmax <- ne z <- .C("trigraf", nv = as.integer(nv), ne = as.integer(ne), ie = as.integer(a), je = as.integer(b), ntmax = as.integer(ntmax), nt = as.integer(integer(1L)), it = as.integer(integer(ntmax)), jt = as.integer(integer(ntmax)), kt = as.integer(integer(ntmax)), status = as.integer(integer(1L)), PACKAGE = "spatstat") if(z$status != 0) stop("Internal error: overflow in trigraf") tlist <- with(z, cbind(it, jt, kt)[1:nt, ]) } else { tlist <- matrix(integer(0), 0, 3) for(i in seq_len(nX)) { # find all Delaunay neighbours of i jj <- c(b[a==i], a[b==i]) jj <- sortunique(jj) # select those with a higher index than i jj <- jj[jj > i] # find pairs of neighbours which are Delaunay neighbours # (thus, triangles where the first numbered vertex is i) if(length(jj) > 0) for(j in jj) { kk <- c(b[a == j], a[b == j]) kk <- kk[(kk %in% jj) & (kk > j)] if(length(kk) > 0) for(k in kk) # add (i,j,k) to list of triangles (i < j < k) tlist <- rbind(tlist, c(i, j, k)) } } } # At this point, `tlist' contains all triangles formed by the Delaunay edges, # with vertices given in ascending order i < j < k in the 3 columns of tlist. # Some of these triangles may not belong to the Delaunay triangulation. # They will be weeded out later. # Assemble coordinates of triangles x <- X$x y <- X$y xtri <- matrix(x[tlist], nrow(tlist), 3L) ytri <- matrix(y[tlist], nrow(tlist), 3L) # ensure triangle vertices are in anticlockwise order ztri <- ytri - min(y) dx <- cbind(xtri[,2L]-xtri[,1L], xtri[,3L]-xtri[,2L], xtri[,1L]-xtri[,3L]) zm <- cbind(ztri[,1L]+ztri[,2L], ztri[,2L]+ztri[,3L], ztri[,3L]+ztri[,1L]) negareas <- apply(dx * zm, 1L, sum) clockwise <- (negareas > 0) # if(any(clockwise)) { xc <- xtri[clockwise, , drop=FALSE] yc <- ytri[clockwise, , drop=FALSE] tc <- tlist[clockwise, , drop=FALSE] xtri[clockwise,] <- xc[,c(1L,3L,2L)] ytri[clockwise,] <- yc[,c(1L,3L,2L)] tlist[clockwise,] <- tc[, c(1L,3L,2L)] } # At this point, triangle vertices are listed in anticlockwise order. # The same directed edge (i, j) cannot appear twice. # To weed out invalid triangles, check for such duplication triedges <- rbind(tlist[, c(1L,2L)], tlist[, c(2L,3L)], tlist[, c(3L,1L)]) if(any(bad <- duplicated(triedges))) { badedges <- unique(triedges[bad, , drop=FALSE]) ntri <- nrow(tlist) triid <- rep.int(seq_len(ntri), 3) illegal <- rep.int(FALSE, ntri) for(j in seq_len(nrow(badedges))) { from <- badedges[j, 1L] to <- badedges[j, 2L] if(debug.delaunay) cat(paste("Suspect edge from vertex", from, "to vertex", to, "\n")) # find all triangles sharing this edge in this orientation sustri <- triid[(triedges[,1L] == from) & (triedges[,2L] == to)] if(debug.delaunay) cat(paste("\tInvestigating triangles", commasep(sustri), "\n")) # list all vertices associated with the suspect triangles susvert <- sortunique(as.vector(tlist[sustri, ])) if(debug.delaunay) cat(paste("\tInvestigating vertices", commasep(susvert), "\n")) xsusvert <- x[susvert] ysusvert <- y[susvert] # take each triangle in turn and check whether it contains a data point for(k in sustri) { if(!illegal[k] && any(inside.triangle(xsusvert, ysusvert, xtri[k,], ytri[k,]))) { if(debug.delaunay) cat(paste("Triangle", k, "is illegal\n")) illegal[k] <- TRUE } } } if(!any(illegal)) { if(debug.delaunay) cat("No illegal triangles found\n") } else { if(debug.delaunay) cat(paste("Removing", sum(illegal), "triangles\n")) tlist <- tlist[!illegal, , drop=FALSE] xtri <- xtri[!illegal, , drop=FALSE] ytri <- ytri[!illegal, , drop=FALSE] } } # make tile list tiles <- list() for(m in seq_len(nrow(tlist))) { p <- list(x=xtri[m,], y=ytri[m,]) tiles[[m]] <- owin(poly=p, check=FALSE) } wc <- convexhull.xy(x, y) del <- tess(tiles=tiles, window=wc) if(w$type != "rectangle") del <- intersect.tess(del, w) return(del) } delaunayDistance <- function(X) { stopifnot(is.ppp(X)) nX <- npoints(X) w <- as.owin(X) ok <- !duplicated(X, rule="deldir") Y <- X[ok] nY <- npoints(Y) if(nY < 3) return(matrix(Inf, nX, nX)) dd <- deldir(Y$x, Y$y, rw=c(w$xrange,w$yrange)) if(is.null(dd)) return(NULL) joins <- as.matrix(dd$delsgs[,5:6]) joins <- rbind(joins, joins[,2:1]) d <- matrix(-1L, nY, nY) diag(d) <- 0 d[joins] <- 1 adj <- matrix(FALSE, nY, nY) diag(adj) <- TRUE adj[joins] <- TRUE z <- .C("Idist2dpath", nv = as.integer(nY), d = as.integer(d), adj = as.integer(adj), dpath = as.integer(integer(nY * nY)), tol = as.integer(0), niter = as.integer(integer(1L)), status = as.integer(integer(1L)), PACKAGE = "spatstat") if (z$status == -1L) warning(paste("graph connectivity algorithm did not converge after", z$niter, "iterations", "on", nY, "vertices and", sum(adj) - nY, "edges")) dpathY <- matrix(z$dpath, nY, nY) if(all(ok)) { dpathX <- dpathY } else { dpathX <- matrix(NA_integer_, nX, nX) dpathX[ok, ok] <- dpathY } return(dpathX) } safedeldir <- function(X) { rw <- with(X$window, c(xrange,yrange)) dd <- try(deldir(X$x, X$y, rw=rw)) if(!inherits(dd, "try-error") && inherits(dd, "deldir")) return(dd) warning("deldir failed; re-trying with slight perturbation of coordinates.", call.=FALSE) Y <- rjitter(X, mean(nndist(X))/100) dd <- try(deldir(Y$x, Y$y, rw=rw)) if(!inherits(dd, "try-error") && inherits(dd, "deldir")) return(dd) warning("deldir failed even after perturbation of coordinates.", call.=FALSE) return(NULL) } dirichletVertices <- function(X) { DT <- tiles(dirichlet(X)) xy <- do.call(concatxy, lapply(DT, vertices)) Y <- unique(ppp(xy$x, xy$y, window=Window(X), check=FALSE)) b <- bdist.points(Y) thresh <- diameter(Frame(X))/1000 Y <- Y[b > thresh] return(Y) } dirichletAreas <- function(X) { stopifnot(is.ppp(X)) X <- unmark(X) win <- Window(X) dup <- duplicated(X, rule="deldir") if((anydup <- any(dup))) { oldX <- X X <- X[!dup] } switch(win$type, rectangle = { rw <- c(win$xrange, win$yrange) dd <- deldir(X$x, X$y, dpl=NULL, rw=rw) w <- dd$summary[, 'dir.area'] }, polygonal = { w <- tile.areas(dirichlet(X)) }, mask = { #' Nearest data point to each pixel: tileid <- exactdt(X)$i #' Restrict to window (result is a vector - OK) tileid <- tileid[win$m] #' Count pixels in each tile id <- factor(tileid, levels=seq_len(X$n)) counts <- table(id) #' Convert to digital area pixelarea <- win$xstep * win$ystep w <- pixelarea * as.numeric(counts) }) if(!anydup) return(w) oldw <- numeric(npoints(oldX)) oldw[!dup] <- w return(oldw) } delaunayNetwork <- function(X) { stopifnot(is.ppp(X)) X <- unique(X, rule="deldir") nX <- npoints(X) if(nX == 0) return(NULL) if(nX == 1L) return(linnet(X, !diag(TRUE))) if(nX == 2L) return(linnet(X, !diag(c(TRUE,TRUE)))) dd <- safedeldir(X) if(is.null(dd)) return(NULL) joins <- as.matrix(dd$delsgs[, 5:6]) return(linnet(X, edges=joins)) } dirichletEdges <- function(X) { stopifnot(is.ppp(X)) X <- unique(X, rule="deldir") nX <- npoints(X) W <- Window(X) if(nX < 2) return(edges(W)) dd <- safedeldir(X) if(is.null(dd)) return(edges(W)) return(as.psp(dd$dirsgs[,1:4], window=W)) } dirichletNetwork <- function(X, ...) as.linnet(dirichletEdges(X), ...) ## deprecated older names # delaunay.distance <- function(...) { # .Deprecated("delaunayDistance", package="spatstat") # delaunayDistance(...) # } # delaunay.network <- function(...) { # .Deprecated("delaunayNetwork", package="spatstat") # delaunayNetwork(...) # } # dirichlet.edges <- function(...) { # .Deprecated("dirichletEdges", package="spatstat") # dirichletEdges(...) # } # dirichlet.network <- function(...) { # .Deprecated("dirichletNetwork", package="spatstat") # dirichletNetwork(...) # } # dirichlet.vertices <- function(...) { # .Deprecated("dirichletVertices", package="spatstat") # dirichletVertices(...) # } spatstat/R/superimpose.R0000644000176200001440000001363513347357261015037 0ustar liggesusers# superimpose.R # # $Revision: 1.37 $ $Date: 2017/12/30 05:03:15 $ # # ############################# superimpose <- function(...) { # remove any NULL arguments arglist <- list(...) if(any(isnull <- sapply(arglist, is.null))) return(do.call(superimpose, arglist[!isnull])) UseMethod("superimpose") } superimpose.default <- function(...) { argh <- list(...) #' First expand any arguments which are lists of objects argh <- expandSpecialLists(argh, "solist") #' Now dispatch if(any(sapply(argh, is.lpp)) || any(sapply(argh, inherits, what="linnet"))) return(do.call(superimpose.lpp, argh)) if(any(sapply(argh, is.psp))) return(do.call(superimpose.psp, argh)) #' default return(do.call(superimpose.ppp, argh)) } superimpose.ppp <- function(..., W=NULL, check=TRUE) { arglist <- list(...) # Check that all "..." arguments have x, y coordinates hasxy <- unlist(lapply(arglist, checkfields, L=c("x", "y"))) if(!all(hasxy)) { nbad <- sum(bad <- !hasxy) stop(paste(ngettext(nbad, "Argument", "Arguments"), commasep(which(bad)), ngettext(nbad, "does not", "do not"), "have components x and y"), call.=FALSE) } # concatenate lists of (x,y) coordinates XY <- do.call(concatxy, arglist) needcheck <- TRUE # determine whether there is any window information if(!is.owin(W)) { ## we have to compute the final window if(is.function(W)) { ## W is a function like bounding.box.xy or ripras ## Apply function to the x,y coordinates; it should return an owin WXY <- W(XY) if(!is.owin(WXY)) stop("Function W did not return an owin object", call.=FALSE) W <- WXY } else if(is.character(W)) { ## character string identifies a function pW <- pmatch(W, c("convex", "rectangle", "bbox", "none")) if(is.na(pW)) stop(paste("Unrecognised option W=", sQuote(W)), call.=FALSE) WXY <- switch(pW, convex=ripras(XY), rectangle=ripras(XY, shape="rectangle"), bbox=boundingbox(XY), none=NULL) # in these cases we don't need to verify that the points are inside. needcheck <- !is.null(WXY) if(!is.null(WXY)) W <- WXY } else if(is.null(W)) { if(any(isppp <- unlist(lapply(arglist, is.ppp)))) { ## extract windows from ppp objects wins <- unname(lapply(arglist[isppp], as.owin)) ## take union W <- if(length(wins) == 1) wins[[1]] else do.call(union.owin, wins) } else { ## no window information return(XY) } } else stop("Argument W is not understood") } # extract the marks if any nobj <- lengths(lapply(arglist, getElement, name="x")) marx <- superimposeMarks(arglist, nobj) # ppp(XY$x, XY$y, window=W, marks=marx, check=check & needcheck) } superimpose.splitppp <- superimpose.ppplist <- function(..., W=NULL, check=TRUE) { arglist <- list(...) while(any(h <- sapply(arglist, inherits, what=c("splitppp", "ppplist")))) { i <- min(which(h)) arglist <- insertinlist(arglist, i, arglist[[i]]) } do.call(superimpose, append(arglist, list(W=W, check=check))) } superimpose.psp <- function(..., W=NULL, check=TRUE) { # superimpose any number of line segment patterns arglist <- list(...) misscheck <- missing(check) if(!all(sapply(arglist, is.psp))) stop("Patterns to be superimposed must all be psp objects", call.=FALSE) # extract segment coordinates matlist <- lapply(lapply(arglist, getElement, name="ends"), asNumericMatrix) # tack them together mat <- do.call(rbind, matlist) # determine whether there is any window information needcheck <- FALSE if(!is.owin(W)) { # we have to compute the final window WXY <- NULL # Wpsp <- NULL if(any(ispsp <- unlist(lapply(arglist, is.psp)))) { # extract windows from psp objects wins <- unname(lapply(arglist[ispsp], as.owin)) # take union Wppp <- if(length(wins) == 1) wins[[1]] else do.call(union.owin, wins) } if(is.function(W) || is.character(W)) { # guess window from x, y coordinates XY <- list(x=cbind(mat[,1], mat[,3]), y=cbind(mat[,2], mat[,4])) if(is.function(W)) { # W is a function like bounding.box.xy or ripras # Apply function to the x,y coordinates; it should return an owin WXY <- W(XY) if(!is.owin(WXY)) stop("Function W did not return an owin object", call.=FALSE) } if(is.character(W)) { # character string identifies a function pW <- pmatch(W, c("convex", "rectangle", "bbox", "none")) if(is.na(pW)) stop(paste("Unrecognised option W=", sQuote(W)), call.=FALSE) WXY <- switch(pW, convex=ripras(XY), rectangle=ripras(XY, shape="rectangle"), bbox=boundingbox(XY), none=NULL) # in these cases we don't need to verify that the points are inside. needcheck <- !is.null(WXY) } } W <- union.owin(WXY, Wppp) } # extract marks, if any nobj <- sapply(arglist, nsegments) marx <- superimposeMarks(arglist, nobj) if(misscheck && !needcheck) check <- FALSE return(as.psp(mat, window=W, marks=marx, check=check)) } superimposeMarks <- function(arglist, nobj) { # combine marks from the objects in the argument list marxlist <- lapply(arglist, marks) marx <- do.call(markappend, unname(marxlist)) nama <- names(arglist) if(length(nama) == length(arglist) && all(nzchar(nama))) { # arguments are named: use names as (extra) marks newmarx <- factor(rep.int(nama, nobj), levels=nama) marx <- markcbind(marx, newmarx) if(ncol(marx) == 2) { ## component marks were not named: call them 'origMarks' colnames(marx) <- c("origMarks", "pattern") } else colnames(marx)[ncol(marx)] <- "pattern" } return(marx) } spatstat/R/vcov.ppm.R0000644000176200001440000017335113614463173014233 0ustar liggesusers## ## Asymptotic covariance & correlation matrices ## and Fisher information matrix ## for ppm objects ## ## $Revision: 1.133 $ $Date: 2020/01/30 05:13:43 $ ## vcov.ppm <- local({ vcov.ppm <- function(object, ..., what="vcov", verbose=TRUE, fine=FALSE, gam.action=c("warn", "fatal", "silent"), matrix.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent"), nacoef.action=c("warn", "fatal", "silent"), hessian=FALSE) { verifyclass(object, "ppm") argh <- list(...) gam.action <- match.arg(gam.action) matrix.action <- match.arg(matrix.action) logi.action <- match.arg(logi.action) nacoef.action <- match.arg(nacoef.action) if(!all(is.finite(coef(object)))) { gripe <- "Cannot compute variance; model is not valid" switch(nacoef.action, fatal = stop(gripe, call.=FALSE), warn = warning(gripe, call.=FALSE), silent = {}) return(NULL) } if(missing(fine) && ("A1dummy" %in% names(argh))) { message("Argument 'A1dummy' has been replaced by 'fine'") fine <- as.logical(argh$A1dummy) } else fine <- as.logical(fine) stopifnot(length(what) == 1 && is.character(what)) what.options <- c("vcov", "corr", "fisher", "Fisher", "internals", "all") what.map <- c("vcov", "corr", "fisher", "fisher", "internals", "all") if(is.na(m <- pmatch(what, what.options))) stop(paste("Unrecognised option: what=", sQuote(what))) what <- what.map[m] ## No vcov for Variational Bayes if(!is.null(object$internal$VB)) stop("Variance calculations currently not possible for variational Bayes fit.") ## no parameters, no variance if(length(coef(object)) == 0) { result <- switch(what, vcov=, corr=, fisher= { matrix(, 0, 0) }, internals=, all={ list() }) return(result) } ## nonstandard calculations (hack) generic.triggers <- c("A1", "new.coef", "modmat", "matwt", "saveterms", "sparseOK") nonstandard <- any(generic.triggers %in% names(argh)) || fine # saveterms <- identical(resolve.1.default("saveterms", argh), TRUE) ## Fisher information *may* be contained in object fisher <- object$fisher varcov <- object$varcov ## Do we need to go into the guts? needguts <- nonstandard || (is.null(fisher) && what=="fisher") || (is.null(varcov) && what %in% c("vcov", "corr")) || (what %in% c("internals", "all")) ## In general it is not true that varcov = solve(fisher) ## because we might use different estimators, ## or the parameters might be a subset of the canonical parameter if(needguts) { ## warn if fitted model was obtained using GAM if(identical(object$fitter, "gam")) { switch(gam.action, fatal={ stop(paste("model was fitted by gam();", "execution halted because fatal=TRUE"), call.=FALSE) }, warn={ warning(paste("model was fitted by gam();", "asymptotic variance calculation ignores this"), call.=FALSE) }, silent={}) } ## ++++ perform main calculation ++++ if((is.poisson(object) || (hessian && what!="internals")) && object$method != "logi") { ## Poisson model, or Hessian of Gibbs model without internals results <- vcalcPois(object, ..., what=what, matrix.action=matrix.action, verbose=verbose, fisher=fisher) } else { ## Gibbs model results <- vcalcGibbs(object, ..., what=what, fine=fine, matrix.action=matrix.action, hessian = hessian) } varcov <- results$varcov fisher <- results$fisher internals <- results$internals } if(what %in% c("vcov", "corr") && is.null(varcov)) { ## Need variance-covariance matrix. if(!is.null(fisher) && is.poisson(object)) ## Derive from Fisher information varcov <- checksolve(fisher, matrix.action, "Fisher information matrix", "variance") } out <- switch(what, fisher = fisher, vcov = varcov, corr = { if(is.null(varcov)) return(NULL) sd <- sqrt(diag(varcov)) varcov / outer(sd, sd, "*") }, internals = internals, all = results ) return(out) } ## ................ variance calculation for Poisson models ............. vcalcPois <- function(object, ..., what = c("vcov", "corr", "fisher", "internals", "all"), matrix.action=c("warn", "fatal", "silent"), nacoef.action=c("warn", "fatal", "silent"), method=c("C", "interpreted"), verbose=TRUE, fisher=NULL, modmat=model.matrix(object), matwt=NULL, # weights on rows of model matrix new.coef=NULL, dropcoef=FALSE, saveterms=FALSE) { ## variance-covariance matrix of Poisson model, ## or Hessian of Gibbs model what <- match.arg(what) method <- match.arg(method) matrix.action <- match.arg(matrix.action) if(reweighting <- !is.null(matwt)) stopifnot(is.numeric(matwt) && is.vector(matwt)) internals <- NULL nonstandard <- reweighting || !is.null(new.coef) || saveterms ## detect invalid model if(!all(is.finite(coef(object)))) { gripe<-"Cannot compute variance; some coefficients are NA, NaN or infinite" switch(nacoef.action, fatal=stop(gripe, call.=FALSE), warn=warning(gripe, call.=FALSE), silent={}) return(NULL) } ## compute Fisher information if not known if(is.null(fisher) || nonstandard) { gf <- getglmfit(object) ## we need a glm or gam if(is.null(gf)) { if(verbose) warning("Refitting the model using GLM/GAM") object <- update(object, forcefit=TRUE) gf <- getglmfit(object) if(is.null(gf)) stop("Internal error - refitting did not yield a glm object") } ## compute fitted intensity and sufficient statistic ltype <- if(is.poisson(object)) "trend" else "lambda" lambda <- fitted(object, type=ltype, new.coef=new.coef, dropcoef=dropcoef, check=FALSE) mom <- modmat nmom <- nrow(mom) Q <- quad.ppm(object) wt <- w.quad(Q) ok <- getglmsubset(object) Z <- is.data(Q) ## save them if(what == "internals") { internals <- if(!saveterms) list(suff=mom) else list(suff=mom, mom=mom, lambda=lambda, Z=Z, ok=ok) } ## Now restrict all terms to the domain of the pseudolikelihood lambda <- lambda[ok] mom <- mom[ok, , drop=FALSE] wt <- wt[ok] Z <- Z[ok] ## apply weights to rows of model matrix - temporary hack if(reweighting) { nwt <- length(matwt) if(nwt == nmom) { ## matwt matches original quadrature scheme - trim it matwt <- matwt[ok] } else if(nwt != sum(ok)) stop("Hack argument matwt has incompatible length") mom.orig <- mom mom <- matwt * mom } ## compute Fisher information switch(method, C = { fisher <- sumouter(mom, lambda * wt) if(reweighting) { gradient <- sumouter(mom.orig, matwt * lambda * wt) } }, interpreted = { if(!reweighting) { fisher <- 0 for(i in 1:nrow(mom)) { ro <- mom[i, ] v <- outer(ro, ro, "*") * lambda[i] * wt[i] if(!anyNA(v)) fisher <- fisher + v } momnames <- dimnames(mom)[[2]] dimnames(fisher) <- list(momnames, momnames) } else { fisher <- gradient <- 0 for(i in 1:nrow(mom)) { ro <- mom[i, ] ro0 <- mom.orig[i,] ldu <- lambda[i] * wt[i] v <- outer(ro, ro, "*") * ldu v0 <- outer(ro0, ro0, "*") * matwt[i] * ldu if(!anyNA(v)) fisher <- fisher + v if(!anyNA(v0)) gradient <- gradient + v0 } momnames <- dimnames(mom)[[2]] dn <- list(momnames, momnames) dimnames(fisher) <- dimnames(gradient) <- dn } }) } if(what %in% c("all", "internals")) { ## Internals needed if(is.null(internals)) internals <- list(suff = modmat) internals$fisher <- fisher if(reweighting) internals$gradient <- gradient ilist <- list(internals=internals) } if(what %in% c("all", "vcov", "corr")) { ## Variance-covariance matrix needed if(!reweighting) { ## Derive variance-covariance from Fisher info varcov <- checksolve(fisher, matrix.action, "Fisher information matrix", "variance") vcovlist <- list(fisher=fisher, varcov=varcov) } else { invgrad <- checksolve(gradient, matrix.action, "gradient matrix", "variance") varcov <- if(is.null(invgrad)) NULL else invgrad %*% fisher %*% invgrad vcovlist <- list(fisher=fisher, varcov=varcov, invgrad=invgrad) } } result <- switch(what, fisher = list(fisher=fisher), vcov = vcovlist, corr = vcovlist, internals = ilist, all = append(ilist, vcovlist)) return(result) } ## ...................... vcov calculation for Gibbs models .................... vcalcGibbs <- function(fit, ..., fine=FALSE, what = c("vcov", "corr", "fisher", "internals", "all"), generic=FALSE) { what <- match.arg(what) if(missing(generic)) { ## Change default to TRUE in certain cases ## For logistic fits, use generic method by default if(fit$method == "logi") generic <- TRUE ## For 'difficult' interactions, use generic method by default fasterbygeneric <- c("Areainter") if(as.interact(fit)$creator %in% fasterbygeneric) generic <- TRUE } ## decide whether to use the generic algorithm generic.triggers <- c("A1", "hessian", "new.coef", "matwt", "saveterms", "sparseOK") use.generic <- generic || fine || !is.stationary(fit) || (fit$method == "logi" && ("marks" %in% variablesinformula(fit$trend))) || (fit$method != "logi" && has.offset(fit)) || (fit$method == "logi" && has.offset.term(fit)) || !(fit$correction == "border" && fit$rbord == reach(fit)) || any(generic.triggers %in% names(list(...))) || !identical(options("contrasts")[[1]], c(unordered="contr.treatment", ordered="contr.poly")) ## compute spill <- (what %in% c("all", "internals", "fisher")) spill.vc <- (what == "all") out <- if(use.generic) vcalcGibbsGeneral(fit, ..., fine=fine, spill=spill, spill.vc=spill.vc) else vcalcGibbsSpecial(fit, ..., spill=spill, spill.vc=spill.vc) switch(what, vcov = , corr = { ## out is the variance-covariance matrix; return it return(list(varcov=out)) }, fisher = { ## out is a list of internal data: extract the Fisher info Fmat <- with(out, if(fit$method != "logi") Sigma else Sigma1log+Sigma2log) return(list(fisher=Fmat)) }, internals = { ## out is a list of internal data: return it ## (ensure model matrix is included) if(is.null(out$mom)) out$mom <- model.matrix(fit) return(list(internals=out)) }, all = { ## out is a list(internals, vc): return it ## (ensure model matrix is included) if(is.null(out$internals$mom)) out$internals$mom <- model.matrix(fit) ## ensure Fisher info is included if(is.null(out$internals$fisher)) { Fmat <- with(out$internals, if(fit$method != "logi") Sigma else Sigma1log+Sigma2log) out$internals$fisher <- Fmat } return(out) }, ) return(NULL) } ## ...................... general algorithm ........................... vcalcGibbsGeneral <- function(model, ..., spill = FALSE, spill.vc = FALSE, na.action=c("warn", "fatal", "silent"), matrix.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent"), algorithm=c("vectorclip", "vector", "basic"), A1 = NULL, fine = FALSE, hessian = FALSE, modmat = model.matrix(model), matwt = NULL, new.coef = NULL, dropcoef=FALSE, saveterms = FALSE, parallel = TRUE, sparseOK = FALSE ) { modmat.given <- !missing(modmat) na.action <- match.arg(na.action) matrix.action <- match.arg(matrix.action) logi.action <- match.arg(logi.action) algorithm <- match.arg(algorithm) if(reweighting <- !is.null(matwt)) stopifnot(is.numeric(matwt) && is.vector(matwt)) spill <- spill || spill.vc saveterms <- spill && saveterms logi <- model$method=="logi" asked.parallel <- !missing(parallel) old.coef <- coef(model) use.coef <- adaptcoef(new.coef, old.coef, drop=dropcoef) if(modmat.given) { p <- ncol(modmat) pnames <- colnames(modmat) } else { p <- length(old.coef) pnames <- names(old.coef) } if(p == 0) { ## this probably can't happen if(!spill) return(matrix(, 0, 0)) else return(list()) } dnames <- list(pnames, pnames) # (may be revised later) internals <- list() ## sumobj <- summary(model, quick="entries") correction <- model$correction rbord <- model$rbord R <- reach(model, epsilon=1e-2) Q <- quad.ppm(model) D <- dummy.ppm(model) rho <- model$internal$logistic$rho #### If dummy intensity rho is unknown we estimate it if(is.null(rho)) rho <- npoints(D)/(area(D)*markspace.integral(D)) X <- data.ppm(model) Z <- is.data(Q) W <- as.owin(model) areaW <- if(correction == "border") eroded.areas(W, rbord) else area(W) ## ## determine which quadrature points contributed to the ## sum/integral in the pseudolikelihood ## (e.g. some points may be excluded by the border correction) okall <- getglmsubset(model) ## conditional intensity lambda(X[i] | X) = lambda(X[i] | X[-i]) ## data and dummy: lamall <- fitted(model, check = FALSE, new.coef = new.coef, dropcoef=dropcoef) if(anyNA(lamall)) { whinge <- "Some values of the fitted conditional intensity are NA" switch(na.action, fatal = { stop(whinge, call.=FALSE) }, warn = { warning(whinge, call.=FALSE) okall <- okall & !is.na(lamall) }, silent = { okall <- okall & !is.na(lamall) }) } ## data only: lam <- lamall[Z] ok <- okall[Z] nX <- npoints(X) ## sufficient statistic h(X[i] | X) = h(X[i] | X[-i]) ## data and dummy: mall <- modmat if(ncol(mall) != length(pnames)) { if(!dropcoef) stop(paste("Internal error: dimension of sufficient statistic = ", ncol(mall), "does not match length of coefficient vector =", length(pnames)), call.=FALSE) p <- length(pnames) pnames <- colnames(mall) dnames <- list(pnames, pnames) } ## save if(saveterms) internals <- append(internals, list(mom=mall, lambda=lamall, Z=Z, ok=okall, matwt=matwt)) if(reweighting) { ## each column of the model matrix is multiplied by 'matwt' check.nvector(matwt, nrow(mall), things="quadrature points") mall.orig <- mall mall <- mall * matwt } ## subsets of model matrix mokall <- mall[okall, , drop=FALSE] ## data only: m <- mall[Z, , drop=FALSE] mok <- m[ok, , drop=FALSE] ## if(reweighting) { ## save unweighted versions mokall.orig <- mall.orig[okall, , drop=FALSE] m.orig <- mall.orig[Z, , drop=FALSE] mok.orig <- m.orig[ok, , drop=FALSE] ## matwtX <- matwt[Z] } ## ^^^^^^^^^^^^^^^^ First order (sensitivity) matrices A1, S ## logistic if(logi){ ## Sensitivity matrix S for logistic case Slog <- sumouter(mokall, w = lamall[okall]*rho/(lamall[okall]+rho)^2) dimnames(Slog) <- dnames ## A1 matrix for logistic case A1log <- sumouter(mokall, w = lamall[okall]*rho*rho/(lamall[okall]+rho)^3) dimnames(A1log) <- dnames } ## Sensitivity matrix for MPLE case (= A1) if(is.null(A1) || reweighting) { if(fine){ A1 <- sumouter(mokall, w = (lamall * w.quad(Q))[okall]) if(reweighting) gradient <- sumouter(mokall.orig, w=(matwt * lamall * w.quad(Q))[okall]) } else{ A1 <- sumouter(mok) if(reweighting) gradient <- sumouter(mok.orig, w=matwtX) } } else { stopifnot(is.matrix(A1)) if(!all(dim(A1) == p)) stop(paste("Matrix A1 has wrong dimensions:", prange(dim(A1)), "!=", prange(c(p, p)))) } dimnames(A1) <- dnames ## ^^^^^^^^^^ Second order interaction effects A2, A3 if(hessian) { ## interaction terms suppressed A2 <- A3 <- matrix(0, p, p, dimnames=dnames) if(logi) A2log <- A3log <- matrix(0, p, p, dimnames=dnames) } else { ## ^^^^^^^^^^^^^^^^^^^^ `parallel' evaluation need.loop <- TRUE if(parallel) { ## compute second order difference ## ddS[i,j,] = h(X[i] | X) - h(X[i] | X[-j]) ddS <- deltasuffstat(model, restrict="pairs", force=FALSE, sparseOK=sparseOK) sparse <- inherits(ddS, "sparse3Darray") if(is.null(ddS)) { if(asked.parallel) warning("parallel option not available - reverting to loop") } else { need.loop <- FALSE ## rearrange so that ## ddS[ ,i,j] = h(X[i] | X) - h(X[i] | X[-j]) ddS <- aperm(ddS, c(3,2,1)) ## now compute sum_{i,j} for i != j ## outer(ddS[,i,j], ddS[,j,i]) ddSok <- ddS[ , ok, ok, drop=FALSE] A3 <- sumsymouter(ddSok) ## compute pairweight and other arrays if(sparse) { ## Entries are only required for pairs i,j which interact. ## mom.array[ ,i,j] = h(X[i] | X) mom.array <- mapSparseEntries(ddS, margin=2, values=m, conform=TRUE, across=1) ## momdel[ ,i,j] = h(X[i] | X[-j]) momdel <- mom.array - ddS ## pairweight[i,j] = lambda(X[i] | X[-j] )/lambda( X[i] | X ) - 1 pairweight <- expm1(tensor1x1(-use.coef, ddS)) } else { ## mom.array[ ,i,j] = h(X[i] | X) mom.array <- array(t(m), dim=c(p, nX, nX)) ## momdel[ ,i,j] = h(X[i] | X[-j]) momdel <- mom.array - ddS ## lamdel[i,j] = lambda(X[i] | X[-j]) lamdel <- matrix(lam, nX, nX) * exp(tensor::tensor(-use.coef, ddS, 1, 1)) ## pairweight[i,j] = lamdel[i,j]/lambda[i] - 1 pairweight <- lamdel / lam - 1 } ## now compute sum_{i,j} for i != j ## pairweight[i,j] * outer(momdel[,i,j], momdel[,j,i]) ## for data points that contributed to the pseudolikelihood momdelok <- momdel[ , ok, ok, drop=FALSE] pwok <- pairweight[ok, ok] if(anyNA(momdelok) || anyNA(pwok)) stop("Unable to compute variance: NA values present", call.=FALSE) A2 <- sumsymouter(momdelok, w=pwok) dimnames(A2) <- dimnames(A3) <- dnames if(logi){ if(!sparse) { ## lam.array[ ,i,j] = lambda(X[i] | X) lam.array <- array(lam, c(nX,nX,p)) lam.array <- aperm(lam.array, c(3,1,2)) ## lamdel.array[,i,j] = lambda(X[i] | X[-j]) lamdel.array <- array(lamdel, c(nX,nX,p)) lamdel.array <- aperm(lamdel.array, c(3,1,2)) momdellogi <- rho/(lamdel.array+rho)*momdel ddSlogi <- rho/(lam.array+rho)*mom.array - momdellogi } else { ## lam.array[ ,i,j] = lambda(X[i] | X) lam.array <- mapSparseEntries(ddS, margin=2, lam, conform=TRUE, across=1) ## lamdel.array[,i,j] = lambda(X[i] | X[-j]) pairweight.array <- aperm(as.sparse3Darray(pairweight), c(3,1,2)) lamdel.array <- pairweight.array * lam.array + lam.array lamdel.logi <- applySparseEntries(lamdel.array, function(y,rho) { rho/(rho+y) }, rho=rho) lam.logi <- applySparseEntries(lam.array, function(y,rho) { rho/(rho+y) }, rho=rho) momdellogi <- momdel * lamdel.logi ddSlogi <- mom.array * lam.logi - momdellogi } momdellogiok <- momdellogi[ , ok, ok, drop=FALSE] A2log <- sumsymouter(momdellogiok, w=pwok) ddSlogiok <- ddSlogi[ , ok, ok, drop=FALSE] A3log <- sumsymouter(ddSlogiok) dimnames(A2log) <- dimnames(A3log) <- dnames } } } ## ^^^^^^^^^^^^^^^^^^^^ loop evaluation if(need.loop) { A2 <- A3 <- matrix(0, p, p, dimnames=dnames) if(logi) A2log <- A3log <- matrix(0, p, p, dimnames=dnames) if(saveterms) { ## *initialise* matrices ## lamdel[i,j] = lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)]) lamdel <- matrix(lam, nX, nX) ## momdel[ ,i,j] = h(X[i] | X[-j]) = h(X[i] | X[-c(i,j)]) momdel <- array(t(m), dim=c(p, nX, nX)) } ## identify close pairs if(is.finite(R)) { cl <- closepairs(X, R, what="indices") I <- cl$i J <- cl$j if(algorithm == "vectorclip") { cl2 <- closepairs(X, 2*R, what="indices") I2 <- cl2$i J2 <- cl2$j } } else { ## either infinite reach, or something wrong IJ <- expand.grid(I=1:nX, J=1:nX) IJ <- subset(IJ, I != J) I2 <- I <- IJ$I J2 <- J <- IJ$J } ## filter: I and J must both belong to the nominated subset okIJ <- ok[I] & ok[J] I <- I[okIJ] J <- J[okIJ] ## if(length(I) > 0 && length(J) > 0) { ## .............. loop over pairs ........................ ## The following ensures that 'empty' and 'X' have compatible marks empty <- X[integer(0)] ## make an empty 'equalpairs' matrix nonE <- matrix(, nrow=0, ncol=2) ## Run through pairs switch(algorithm, basic={ for(i in unique(I)) { Xi <- X[i] Ji <- unique(J[I==i]) if((nJi <- length(Ji)) > 0) { for(k in 1:nJi) { j <- Ji[k] X.ij <- X[-c(i,j)] ## compute conditional intensity ## lambda(X[j] | X[-i]) = lambda(X[j] | X[-c(i,j)] plamj.i <- predict(model, type="cif", locations=X[j], X=X.ij, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=nonE) ## corresponding values of sufficient statistic ## h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] pmj.i <- partialModelMatrix(X.ij, X[j], model)[nX-1, ] ## conditional intensity and sufficient statistic ## in reverse order ## lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)] plami.j <- predict(model, type="cif", locations=X[i], X=X.ij, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=nonE) pmi.j <- partialModelMatrix(X.ij, Xi, model)[nX-1, ] ## if(reweighting) { pmj.i <- pmj.i * matwtX[j] pmi.j <- pmi.j * matwtX[i] } if(saveterms) { lamdel[i,j] <- plami.j momdel[ , i, j] <- pmi.j lamdel[j,i] <- plamj.i momdel[ , j, i] <- pmj.i } ## increment A2, A3 wt <- plami.j / lam[i] - 1 A2 <- A2 + wt * outer(pmi.j, pmj.i) if(logi) A2log <- A2log + wt * rho/(plami.j+rho) * rho/(plamj.i+rho) * outer(pmi.j, pmj.i) ## delta sufficient statistic ## delta_i h(X[j] | X[-c(i,j)]) ## = h(X[j] | X[-j]) - h(X[j] | X[-c(i,j)]) ## = h(X[j] | X) - h(X[j] | X[-i]) ## delta_j h(X[i] | X[-c(i,j)]) ## = h(X[i] | X[-i]) - h(X[i] | X[-c(i,j)]) ## = h(X[i] | X) - h(X[i] | X[-j]) deltaiSj <- m[j, ] - pmj.i deltajSi <- m[i, ] - pmi.j A3 <- A3 + outer(deltaiSj, deltajSi) if(logi){ deltaiSjlog <- rho*(m[j, ]/ (lam[j]+rho) - pmj.i/(plamj.i+rho)) deltajSilog <- rho*(m[i, ]/ (lam[i]+rho) - pmi.j/(plami.j+rho)) A3log <- A3log + outer(deltaiSjlog, deltajSilog) } } } } }, vector={ ## --------- faster algorithm using vector functions -------- for(i in unique(I)) { Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { Xi <- X[i] ## neighbours of X[i] XJi <- X[Ji] ## all points other than X[i] X.i <- X[-i] ## index of XJi in X.i J.i <- Ji - (Ji > i) ## equalpairs matrix E.i <- cbind(J.i, seq_len(nJi)) ## compute conditional intensity ## lambda(X[j] | X[-i]) = lambda(X[j] | X[-c(i,j)] ## for all j plamj <- predict(model, type="cif", locations=XJi, X=X.i, check = FALSE, new.coef = new.coef, sumobj=sumobj, E=E.i) ## corresponding values of sufficient statistic ## h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] ## for all j pmj <- partialModelMatrix(X.i, empty, model)[J.i, , drop=FALSE] ## ## conditional intensity & sufficient statistic ## in reverse order ## lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)] ## for all j plami <- numeric(nJi) pmi <- matrix(, nJi, p) for(k in 1:nJi) { j <- Ji[k] X.ij <- X[-c(i,j)] plami[k] <- predict(model, type="cif", locations=Xi, X=X.ij, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=nonE) pmi[k, ] <- partialModelMatrix(X.ij, Xi, model)[nX-1, ] } ## if(reweighting) { pmj <- pmj * matwtX[Ji] pmi <- pmi * matwtX[i] } if(saveterms) { lamdel[Ji, i] <- plamj momdel[ , Ji, i] <- t(pmj) lamdel[i,Ji] <- plami momdel[ , i, Ji] <- t(pmi) } ## increment A2, A3 wt <- plami / lam[i] - 1 for(k in 1:nJi) { j <- Ji[k] A2 <- A2 + wt[k] * outer(pmi[k,], pmj[k,]) if(logi) A2log <- A2log + wt[k] * rho/(plami[k]+rho) * rho/(plamj[k]+rho) * outer(pmi[k,], pmj[k,]) ## delta sufficient statistic ## delta_i h(X[j] | X[-c(i,j)]) ## = h(X[j] | X[-j]) - h(X[j] | X[-c(i,j)]) ## = h(X[j] | X) - h(X[j] | X[-i]) ## delta_j h(X[i] | X[-c(i,j)]) ## = h(X[i] | X[-i]) - h(X[i] | X[-c(i,j)]) ## = h(X[i] | X) - h(X[i] | X[-j]) deltaiSj <- m[j, ] - pmj[k,] deltajSi <- m[i, ] - pmi[k,] A3 <- A3 + outer(deltaiSj, deltajSi) if(logi){ deltaiSjlog <- rho*(m[j, ]/(lam[j]+rho) - pmj[k,]/(plamj[k]+rho)) deltajSilog <- rho*(m[i, ]/(lam[i]+rho) - pmi[k,]/(plami[k]+rho)) A3log <- A3log + outer(deltaiSjlog, deltajSilog) } } } } }, vectorclip={ ## --------- faster version of 'vector' algorithm ## -------- by removing non-interacting points of X for(i in unique(I)) { ## all points within 2R J2i <- unique(J2[I2==i]) ## all points within R Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { Xi <- X[i] ## neighbours of X[i] XJi <- X[Ji] ## replace X[-i] by X[-i] \cap b(0, 2R) X.i <- X[J2i] nX.i <- length(J2i) ## index of XJi in X.i J.i <- match(Ji, J2i) if(anyNA(J.i)) stop("Internal error: Ji not a subset of J2i") ## equalpairs matrix E.i <- cbind(J.i, seq_len(nJi)) ## compute conditional intensity ## lambda(X[j] | X[-i]) = lambda(X[j] | X[-c(i,j)] ## for all j plamj <- predict(model, type="cif", locations=XJi, X=X.i, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=E.i) ## corresponding values of sufficient statistic ## h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] ## for all j pmj <- partialModelMatrix(X.i, empty, model)[J.i, , drop=FALSE] ## ## conditional intensity & sufficient statistic ## in reverse order ## lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)] ## for all j plami <- numeric(nJi) pmi <- matrix(, nJi, p) for(k in 1:nJi) { j <- Ji[k] ## X.ij <- X[-c(i,j)] X.ij <- X.i[-J.i[k]] plami[k] <- predict(model, type="cif", locations=Xi, X=X.ij, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=nonE) pmi[k, ] <- partialModelMatrix(X.ij, Xi, model)[nX.i, ] } ## if(reweighting) { pmj <- pmj * matwtX[Ji] pmi <- pmi * matwtX[i] } if(saveterms) { lamdel[Ji, i] <- plamj momdel[ , Ji, i] <- t(pmj) lamdel[i,Ji] <- plami momdel[ , i, Ji] <- t(pmi) } ## increment A2, A3 wt <- plami / lam[i] - 1 for(k in 1:nJi) { j <- Ji[k] A2 <- A2 + wt[k] * outer(pmi[k,], pmj[k,]) if(logi) A2log <- A2log + wt[k] * rho/(plami[k]+rho) * rho/(plamj[k]+rho) * outer(pmi[k,], pmj[k,]) ## delta sufficient statistic ## delta_i h(X[j] | X[-c(i,j)]) ## = h(X[j] | X[-j]) - h(X[j] | X[-c(i,j)]) ## = h(X[j] | X) - h(X[j] | X[-i]) ## delta_j h(X[i] | X[-c(i,j)]) ## = h(X[i] | X[-i]) - h(X[i] | X[-c(i,j)]) ## = h(X[i] | X) - h(X[i] | X[-j]) deltaiSj <- m[j, ] - pmj[k,] deltajSi <- m[i, ] - pmi[k,] A3 <- A3 + outer(deltaiSj, deltajSi) if(logi){ deltaiSjlog <- rho*(m[j, ]/(lam[j]+rho) - pmj[k,]/(plamj[k]+rho)) deltajSilog <- rho*(m[i, ]/(lam[i]+rho) - pmi[k,]/(plami[k]+rho)) A3log <- A3log + outer(deltaiSjlog, deltajSilog) } } } } }) } } ## ......... end of loop computation ............... } #### Matrix Sigma Sigma <- A1+A2+A3 if(spill) { ## save internal data (with matrices unnormalised) internals <- c(internals, list(A1=A1, A2=A2, A3=A3, Sigma=Sigma, areaW=areaW), if(logi) list(A1log=A1log, A2log=A2log, A3log=A3log, Slog=Slog) else NULL, if(reweighting) list(gradient=gradient) else NULL, list(hessian = if(reweighting) gradient else if(logi) Slog else A1, fisher = Sigma), if(saveterms) list(lamdel=lamdel, momdel=momdel) else NULL) ## return internal data if no further calculation needed if(!spill.vc && !logi) return(internals) } ## ........... calculate variance/covariance matrix for MPL ......... if(!reweighting) { ## Normalise A1 <- A1/areaW Sigma <- Sigma/areaW ## Enforce exact symmetry A1 <- (A1 + t(A1))/2 Sigma <- (Sigma + t(Sigma))/2 ## calculate inverse negative Hessian U <- checksolve(A1, matrix.action, , "variance") } else { ## Normalise gradient <- gradient/areaW Sigma <- Sigma/areaW ## Enforce exact symmetry gradient <- (gradient + t(gradient))/2 Sigma <- (Sigma + t(Sigma))/2 ## calculate inverse negative Hessian U <- checksolve(gradient, matrix.action, , "variance") } ## compute variance-covariance vc.mpl <- if(is.null(U)) matrix(NA, p, p) else U %*% Sigma %*% U / areaW dimnames(vc.mpl) <- dnames ## return variance-covariance matrix, if model was fitted by MPL if(!logi) { if(spill.vc) return(list(varcov=vc.mpl, internals=internals)) return(vc.mpl) } ###### Everything below is only computed for logistic fits ####### ## Matrix Sigma1log (A1log+A2log+A3log): Sigma1log <- A1log+A2log+A3log ## Resolving the dummy process type how <- model$internal$logistic$how if(how %in% c("given", "grid", "transgrid")){ whinge <- paste("vcov is not implemented for dummy type", sQuote(how)) if(logi.action=="fatal") stop(whinge) how <- if(how=="given") "poisson" else "stratrand" if(logi.action=="warn") warning(paste(whinge,"- using", sQuote(how), "formula"), call.=FALSE) } ## Matrix Sigma2log (depends on dummy process type) switch(how, poisson={ Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3) }, binomial={ Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3) A1vec <- t(mokall) %*% (rho*lamall[okall]/(lamall[okall]+rho)^2) Sigma2log <- Sigma2log - A1vec%*%t(A1vec)/rho*1/sum(1/(lamall[okall]+rho)) }, stratrand={ ## Dirty way of refitting model with new dummy pattern (should probably be done using call, eval, envir, etc.): ## Changed by ER 2013/06/14 to use the new quadscheme.logi ## D2 <- logi.dummy(X = X, type = "stratrand", nd = model$internal$logistic$args) ## Q2 <- quad(data=X, dummy=D2) ## Q2$dummy$Dinfo <- D2$Dinfo Q2 <- quadscheme.logi(data=X, dummytype = "stratrand", nd = model$internal$logistic$nd) D2 <- Q2$dummy Q2$dummy$Dinfo <- D2$Dinfo Z2 <- is.data(Q2) arglist <- list(Q=Q2, trend=model$trend, interaction = model$interaction, method = model$method, correction = model$correction, rbord = model$rbord, covariates = model$covariates) arglist <- append(arglist, model$internal$logistic$extraargs) model2 <- do.call(ppm, args = arglist) ## New cif lamall2 <- fitted(model2, check = FALSE, new.coef = new.coef, dropcoef=dropcoef) ## New model matrix mall2 <- model.matrix(model2) okall2 <- getglmsubset(model2) ## index vectors of stratrand cell indices of dummy points inD <- model$internal$logistic$inD inD2 <- model2$internal$logistic$inD ## Dummy points inside eroded window (for border correction) if(is.finite(R) && (correction == "border")){ ii <- (bdist.points(D) >= R) ii2 <- (bdist.points(D2) >= R) } else{ ii <- rep.int(TRUE, npoints(D)) ii2 <- rep.int(TRUE, npoints(D2)) } ## OK points of dummy pattern 1 with a valid point of dummy pattern 2 in same stratrand cell (and vice versa) okdum <- okall[!Z] okdum2 <- okall2[!Z2] ok1 <- okdum & ii & is.element(inD, inD2[okdum2 & ii2]) ok2 <- okdum2 & ii2 & is.element(inD2, inD[okdum & ii]) ## ok1 <- okdum & okdum2 & ii & is.element(inD, inD2[ii2]) ## ok2 <- okdum2 & okdum1 & ii2 & is.element(inD2, inD[ii]) ## ok1 <- ii & is.element(inD, inD2[ii2]) ## ok2 <- ii2 & is.element(inD2, inD[ii]) ## cif and suff. stat. for valid points in dummy patterns 1 and 2 lamdum <- lamall[!Z][ok1] lamdum2 <- lamall2[!Z2][ok2] mdum <- mall[!Z,,drop=FALSE][ok1,] mdum2 <- mall2[!Z2,,drop=FALSE][ok2,] ## finally calculation of Sigma2 wlam <- mdum * rho*lamdum/(lamdum+rho) wlam2 <- mdum2 * rho*lamdum2/(lamdum2+rho) ## Sigma2log <- t(wlam-wlam2)%*%(wlam-wlam2)/(2*rho*rho) Sigma2log <- crossprod(wlam-wlam2)/(2*rho*rho) }, stop("sorry - unrecognized dummy process in logistic fit") ) ## Attaching to Sigma2log calculated above dimnames(Sigma2log) <- dnames if(spill) { ## return internal data only (with matrices unnormalised) internals <- c(internals, list(Sigma1log=Sigma1log, Sigma2log=Sigma2log, mple=vc.mpl)) if(!spill.vc) return(internals) } ## .. Calculate variance-covariance matrix for logistic fit ........... ## normalise Slog <- Slog/areaW Sigma1log <- Sigma1log/areaW Sigma2log <- Sigma2log/areaW ## evaluate Ulog <- checksolve(Slog, matrix.action, , "variance") vc.logi <- if(is.null(Ulog)) matrix(NA, p, p) else Ulog %*% (Sigma1log+Sigma2log) %*% Ulog / areaW dimnames(vc.logi) <- dnames ## if(spill.vc) return(list(varcov=vc.logi, internals=internals)) return(vc.logi) } ## vcalcGibbs from Ege Rubak and J-F Coeurjolly ## 2013/06/14, modified by Ege to handle logistic case as well vcalcGibbsSpecial <- function(fit, ..., spill=FALSE, spill.vc=FALSE, special.alg = TRUE, matrix.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent")) { matrix.action <- match.arg(matrix.action) logi.action <- match.arg(logi.action) spill <- spill || spill.vc ## Interaction name: iname <- fit$interaction$name ## Does the model have marks which are in the trend? marx <- is.marked(fit) && ("marks" %in% variablesinformula(fit$trend)) ## The full data and window: Xplus <- data.ppm(fit) Wplus <- as.owin(Xplus) ## Fitted parameters and the parameter dimension p (later consiting of p1 trend param. and p2 interaction param.): theta <- coef(fit) p <- length(theta) ## Number of points: n <- npoints(Xplus) ## Using the faster algorithms for special cases if(special.alg && fit$method != "logi"){ param <- coef(fit) switch(iname, "Strauss process"={ ## Only implemented for non-marked case: if(!marx) return(vcovPairPiece(Xplus, reach(fit$interaction), exp(coef(fit)[2]), matrix.action, spill=spill, spill.vc=spill.vc)) }, "Piecewise constant pairwise interaction process"={ ## Only implemented for non-marked case: if(!marx) return(vcovPairPiece(Xplus, fit$interaction$par$r, exp(coef(fit)[-1]), matrix.action, spill=spill, spill.vc=spill.vc)) }, "Multitype Strauss process"={ matR <- fit$interaction$par$radii R <- c(matR[1,1], matR[1,2], matR[2,2]) ## Only implemented for 2 types with equal interaction range: if(ncol(matR)==2 && marx){ n <- length(theta) res <- vcovMultiStrauss(Xplus, R, exp(theta[c(n-2,n-1,n)]), matrix.action,spill=spill,spill.vc=spill.vc) if(!spill) { res <- contrastmatrix(res, 2) dimnames(res) <- list(names(theta), names(theta)) } return(res) } } ) } ## Matrix specifying equal points in the two patterns in the call to eval below: E <- matrix(rep.int(1:n, 2), ncol = 2) ## Eval. the interaction potential difference at all points (internal spatstat function): # V1 <- fit$interaction$family$eval(Xplus, Xplus, E, fit$interaction$pot, fit$interaction$par, fit$correction) oldopt <- NULL if(fit$interaction$family$name=="pairwise"){ oldopt <- spatstat.options(fasteval = "off") } V1 <- evalInteraction(Xplus, Xplus, E, as.interact(fit), fit$correction) spatstat.options(oldopt) ## Calculate parameter dimensions and correct the contrast type parameters: p2 <- ncol(V1) p1 <- p-p2 if(p1>1) theta[2:p1] <- theta[2:p1] + theta[1] ## V1 <- evalInteraction(Q, Xplus, union.quad(Q), fit$interaction, fit$correction) POT <- attr(V1, "POT") attr(V1, "POT") <- NULL ## Adding the constant potential as first column (one column per type for multitype): if(!marx){ V1 <- cbind(1, V1) colnames(V1) <- names(theta) } else{ lev <- levels(marks(Xplus)) ## Indicator matrix for mark type attached to V1: tmp <- matrix(marks(Xplus), nrow(V1), p1)==matrix(lev, nrow(V1), p-ncol(V1), byrow=TRUE) colnames(tmp) <- lev V1 <- cbind(tmp,V1) } ## Matrices for differences of potentials: E <- matrix(rep.int(1:(n-1), 2), ncol = 2) dV <- V2 <- array(0,dim=c(n,n,p)) for(k in 1:p1){ V2[,,k] <- matrix(V1[,k], n, n, byrow = FALSE) } for(k in (p1+1):p){ diag(V2[,,k]) <- V1[,k] } for(j in 1:n){ ## Fast evaluation for pairwise interaction processes: if(fit$interaction$family$name=="pairwise" && !is.null(POT)){ V2[-j,j,-(1:p1)] <- V1[-j,-(1:p1)]-POT[-j,j,] } else{ V2[-j,j,-(1:p1)] <- fit$interaction$family$eval(Xplus[-j], Xplus[-j], E, fit$interaction$pot, fit$interaction$par, fit$correction) ## Q <- quadscheme(Xplus[-j],emptyppp) ## V2[-j,j,-1] <- evalInteraction(Q, Xplus[-j], Xplus[-j], fit$interaction, fit$correction) } for(k in 1:p){ dV[,j,k] <- V1[,k] - V2[,j,k] } } ## Ratio of first and second order Papangelou - 1: frac <- 0*dV[,,1] for(k in (p1+1):p){ frac <- frac + dV[,,k]*theta[k] } frac <- exp(-frac)-1 ## In the rest we restrict attention to points in the interior: ## The interaction range: R <- reach(fit$interaction) ## The reduced window, area and point pattern: W<-erosion.owin(Wplus,R) areaW <- area(W) ## Interior points determined by bdist.points: IntPoints <- bdist.points(Xplus)>=R X <- Xplus[IntPoints] ## Making a logical matrix, I, indicating R-close pairs which are in the interior: D <- pairdist(Xplus) diag(D) <- Inf I <- (D<=R) & outer(IntPoints,IntPoints, "&") ## Matrix A1: A1 <- t(V1[IntPoints,])%*%V1[IntPoints,] ## Matrix A2: A2 <- matrix(0,p,p) for(k in 1:p){ for(l in k:p){ A2[k,l] <- A2[l,k] <- sum(I*V2[,,k]*frac*t(V2[,,l])) } } ## Matrix A3: A3 <- matrix(0,p,p) for(k in 1:p){ for(l in k:p){ A3[k,l] <- A3[l,k] <- sum(I*dV[,,k]*t(dV[,,l])) } } ## Matrix Sigma (A1+A2+A3): Sigma<-A1+A2+A3 if(spill) { # save internal data (with matrices unnormalised) dimnames(A1) <- dimnames(A2) <- dimnames(A3) <- list(names(theta), names(theta)) internals <- list(A1=A1, A2=A2, A3=A3, Sigma=Sigma, areaW=areaW) # return internal data, if model fitted by MPL if(!spill.vc && fit$method != "logi") return(internals) } # ......... Calculate variance-covariance matrix for MPL ........ # normalise A1 <- A1/areaW Sigma <- Sigma/areaW # evaluate U <- checksolve(A1, matrix.action, , "variance") vc.mpl <- if(is.null(U)) matrix(NA, p, p) else U %*% Sigma %*% U / areaW ## Convert to treatment contrasts if(marx) vc.mpl <- contrastmatrix(vc.mpl, p1) dimnames(vc.mpl) <- list(names(theta), names(theta)) # Return result for standard ppm method: if(fit$method!="logi") { if(spill.vc) return(list(varcov=vc.mpl, internals=internals)) return(vc.mpl) } ######################################################################## ###### The remainder is only executed when the method is logistic ###### ######################################################################## ### Most of this is copy/pasted from vcalcGibbsGeneral correction <- fit$correction Q <- quad.ppm(fit) D <- dummy.ppm(fit) rho <- fit$internal$logistic$rho ## If dummy intensity rho is unknown we estimate it if(is.null(rho)) rho <- npoints(D)/(area(D)*markspace.integral(D)) X <- data.ppm(fit) Z <- is.data(Q) # determine which data points entered into the sum in the pseudolikelihood # (border correction, nonzero cif) # data and dummy: okall <- getglmsubset(fit) ## # data only: ## ok <- okall[Z] # conditional intensity lambda(X[i] | X) = lambda(X[i] | X[-i]) # data and dummy: lamall <- fitted(fit, check = FALSE) ## # data only: ## lam <- lamall[Z] # sufficient statistic h(X[i] | X) = h(X[i] | X[-i]) # data and dummy: mall <- model.matrix(fit) mokall <- mall[okall, , drop=FALSE] ## # data only: ## m <- mall[Z, , drop=FALSE] ## mok <- m[ok, , drop=FALSE] # Sensitivity matrix S and A1 matrix for logistic case Slog <- sumouter(mokall, w = lamall[okall]*rho/(lamall[okall]+rho)^2) A1log <- sumouter(mokall, w = lamall[okall]*rho*rho/(lamall[okall]+rho)^3) ## Define W1, W2 and dW for the logistic method based on V1, V2 and dV (frac is unchanged) lambda1 <- exp(.rowSums(matrix(theta,n,p,byrow=TRUE)*V1, n, p)) W1 <- V1*rho/(lambda1+rho) lambda2 <- exp(apply(array(rep(theta,each=n*n),dim=c(n,n,p))*V2, c(1,2), sum)) W2 <- V2 dW <- dV for(k in 1:p){ W2[,,k] <- V2[,,k] * rho/(lambda2+rho) for(j in 1:n){ dW[,j,k] <- W1[,k] - W2[,j,k] } } ## Matrices A2log and A3log for the first component Sigma1log of the variance: A2log <- A3log <- matrix(0,p,p) for(k in 1:p){ for(l in k:p){ A2log[k,l] <- A2log[l,k] <- sum(I*W2[,,k]*frac*t(W2[,,l])) A3log[k,l] <- A3log[l,k] <- sum(I*dW[,,k]*t(dW[,,l])) } } A2log <- A2log A3log <- A3log ## First variance component Sigma1log (A1log+A2log+A3log): Sigma1log <- A1log+A2log+A3log ## Resolving the dummy process type how <- fit$internal$logistic$how if(how %in% c("given", "grid", "transgrid")){ whinge <- paste("vcov is not implemented for dummy type", sQuote(how)) if(logi.action=="fatal") stop(whinge) how <- if(how=="given") "poisson" else "stratrand" if(logi.action=="warn") warning(paste(whinge,"- using", sQuote(how), "formula"), call.=FALSE) } ## Matrix Sigma2log (depends on dummy process type) switch(how, poisson={ Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3) }, binomial={ Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3) A1vec <- t(mokall) %*% (rho*lamall[okall]/(lamall[okall]+rho)^2) Sigma2log <- Sigma2log - A1vec%*%t(A1vec)/rho*1/sum(1/(lamall[okall]+rho)) }, stratrand={ ### Dirty way of refitting model with new dummy pattern (should probably be done using call, eval, envir, etc.): ## D2 <- logi.dummy(X = X, type = "stratrand", nd = model$internal$logistic$args) ## Q2 <- quad(data=X, dummy=D2) ## Q2$dummy$Dinfo <- D2$Dinfo Q2 <- quadscheme.logi(data=X, dummytype = "stratrand", nd = fit$internal$logistic$nd) D2 <- Q2$dummy Z2 <- is.data(Q2) arglist <- list(Q=Q2, trend=fit$trend, interaction = fit$interaction, method = fit$method, correction = fit$correction, rbord = fit$rbord, covariates = fit$covariates) arglist <- append(arglist, fit$internal$logistic$extraargs) fit2 <- do.call(ppm, args = arglist) ## New cif lamall2 <- fitted(fit2, check=FALSE) ## New model matrix mall2 <- model.matrix(fit2) okall2 <- getglmsubset(fit2) # index vectors of stratrand cell indices of dummy points inD <- fit$internal$logistic$inD inD2 <- fit2$internal$logistic$inD # Dummy points inside eroded window (for border correction) if(is.finite(R) && (correction == "border")){ ii <- inside.owin(D, w = W) ii2 <- inside.owin(D2, w = W) } else{ ii <- rep.int(TRUE, npoints(D)) ii2 <- rep.int(TRUE, npoints(D2)) } # OK points of dummy pattern 1 with a valid point of dummy pattern 2 in same stratrand cell (and vice versa) okdum <- okall[!Z] okdum2 <- okall2[!Z2] ok1 <- okdum & ii & is.element(inD, inD2[okdum2 & ii2]) ok2 <- okdum2 & ii2 & is.element(inD2, inD[okdum & ii]) ## ok1 <- okdum & okdum2 & ii & is.element(inD, inD2[ii2]) ## ok2 <- okdum2 & okdum1 & ii2 & is.element(inD2, inD[ii]) ## ok1 <- ii & is.element(inD, inD2[ii2]) ## ok2 <- ii2 & is.element(inD2, inD[ii]) # cif and suff. stat. for valid points in dummy patterns 1 and 2 lamdum <- lamall[!Z][ok1] lamdum2 <- lamall2[!Z2][ok2] mdum <- mall[!Z,][ok1,] mdum2 <- mall2[!Z2,][ok2,] # finally calculation of Sigma2 wlam <- mdum * rho*lamdum/(lamdum+rho) wlam2 <- mdum2 * rho*lamdum2/(lamdum2+rho) ## Sigma2log <- t(wlam-wlam2)%*%(wlam-wlam2)/(2*rho*rho) Sigma2log <- crossprod(wlam-wlam2)/(2*rho*rho) }, stop("sorry - unrecognized dummy process in logistic fit") ) if(spill) { ## Attach dimnames to all matrices dimnames(Sigma2log) <- dimnames(Slog) <- dimnames(Sigma1log) <- dimnames(A1log) <- dimnames(A2log) <- dimnames(A3log) <- list(names(theta),names(theta)) # return internal data (with matrices unnormalised) internals <- c(internals, list(A1log=A1log, A2log=A2log, A3log=A3log, Slog=Slog, Sigma1log=Sigma1log, Sigma2log=Sigma2log, mple=vc.mpl)) if(!spill.vc) return(internals) } # ....... Compute variance-covariance for logistic fit ............. # Normalise Slog <- Slog/areaW Sigma1log <- Sigma1log/areaW Sigma2log <- Sigma2log/areaW ## Finally the result is calculated: Ulog <- checksolve(Slog, matrix.action, , "variance") vc.logi <- if(is.null(Ulog)) matrix(NA, p, p) else Ulog %*% (Sigma1log+Sigma2log) %*% Ulog / areaW # dimnames(vc.logi) <- list(names(theta), names(theta)) if(spill.vc) return(list(varcov=vc.logi, internals=internals)) return(vc.logi) } vcovPairPiece <- function(Xplus, R, Gam, matrix.action, spill=FALSE, spill.vc=FALSE){ ## R is the vector of breaks (R[length(R)]= range of the pp. ## Gam is the vector of weights Rmax <- R[length(R)] ## Xplus : point process observed in W+R ## Extracting the window and calculating area: Wplus<-as.owin(Xplus) W<-erosion.owin(Wplus,Rmax) areaW <- area(W) ## Interior points determined by bdist.points: IntPoints <- bdist.points(Xplus)>=Rmax X <- Xplus[IntPoints] nX <- npoints(X) nXplus <- npoints(Xplus) ## Matrix D with pairwise distances between points and infinite distance ## between a point and itself: Dplus<-pairdist(Xplus) D <- pairdist(X) diag(D) <- diag(Dplus) <- Inf ## logical matrix, I, indicating R-close pairs: p<-length(R) Tplus<-T<-matrix(0,X$n,p) I<-Iplus<-list() for (i in 1:p){ if (i==1){ Iplus[[1]]<- Dplus <=R[1] I[[1]] <- D<=R[1] } else { Iplus[[i]]<- ((Dplus>R[i-1]) & (Dplus <=R[i])) I[[i]] <- ((D>R[i-1]) & (D <=R[i])) } ## Vector T with the number of $R$-close neighbours to each point: Tplus[,i]<- .colSums(Iplus[[i]], nXplus, nXplus)[IntPoints] T[,i] <- .colSums(I[[i]], nX, nX) } ## Matrices A1, A2 and A3 are initialized to zero: A1 <- A2 <- A3 <- matrix(0,p+1,p+1) ## A1 and A3: A1[1,1] <- npoints(X) for (j in (2:(p+1))){ A1[1,j]<-A1[j,1]<-sum(Tplus[,j-1]) A3[j,j]<-sum(T[,j-1]) for (k in (2:(p+1))){ A1[j,k]<-sum(Tplus[,j-1] * Tplus[,k-1]) } } ## A2: for (j in (2:(p+1))){ A2[1,1]<-A2[1,1]+(Gam[j-1]^(-1)-1)*sum(T[,j-1]) for (l in (2:(p+1))){ if (l==j) vj<-Tplus[,j-1]-1 else vj<-Tplus[,j-1] A2[1,j]<-A2[1,j]+(Gam[l-1]^(-1)-1)*sum(T[,l-1]*(vj) ) } A2[j,1]<-A2[1,j] for (k in (2:(p+1))){ for (l in (2:(p+1))){ if (l==j) vj<-Tplus[,j-1]-1 else vj<-Tplus[,j-1] if (l==k) vk<-Tplus[,k-1]-1 else vk<-Tplus[,k-1] A2[j,k]<-A2[j,k]+ (Gam[l-1]^(-1)-1)*sum(I[[l-1]]*outer(vj,vk)) } } } Sigma<-A1+A2+A3 nam <- c("(Intercept)", names(Gam)) dnam <- list(nam, nam) if(spill) { # return internal data (with matrices unnormalised) dimnames(A1) <- dimnames(A2) <- dimnames(A3) <- dimnames(Sigma) <- dnam internals <- list(A1=A1, A2=A2, A3=A3, Sigma=Sigma) if(!spill.vc) return(internals) } ## Calculate variance-covariance # Normalise: A1 <- A1/areaW Sigma <- Sigma/areaW U <- checksolve(A1, matrix.action, , "variance") mat <- if(is.null(U)) matrix(NA, length(nam), length(nam)) else U%*%Sigma%*%U / areaW dimnames(mat) <- dnam if(spill.vc) return(list(varcov=mat, internals=internals)) return(mat) } vcovMultiStrauss <- function(Xplus, vecR, vecg, matrix.action, spill=FALSE, spill.vc=FALSE){ ## Xplus : marked Strauss point process ## with two types ## observed in W+R (R=max(R11,R12,R22)) ## vecg = estimated parameters of interaction parameters ## ordered as the output of ppm, i.e. vecg=(g11,g12,g22) ## vecR = range for the diff. strauss ordered a vecg(R11,R12,R22) R <- max(vecR) R11<-vecR[1];R12<-vecR[2];R22<-vecR[3] ## Extracting the window and calculating area: Wplus<-as.owin(Xplus) W<-erosion.owin(Wplus,R) areaW <- area(W) X1plus<-Xplus[Xplus$marks==levels(Xplus$marks)[1]] X2plus<-Xplus[Xplus$marks==levels(Xplus$marks)[2]] ## Interior points determined by bdist.points: IntPoints1 <- bdist.points(X1plus)>=R IntPoints2 <- bdist.points(X2plus)>=R X1 <- X1plus[IntPoints1] X2 <- X2plus[IntPoints2] nX1 <- npoints(X1) nX2 <- npoints(X2) nX1plus <- npoints(X1plus) nX2plus <- npoints(X2plus) ## Matrix D with pairwise distances between points and infinite distance ## between a point and itself: D1plus<-pairdist(X1plus) D1 <- pairdist(X1) diag(D1) <- diag(D1plus) <- Inf D2plus<-pairdist(X2plus) D2 <- pairdist(X2) diag(D2) <- diag(D2plus) <- Inf D12plus<-crossdist(X1,X2plus) T12plus<- .rowSums(D12plus<=R12, nX1, nX2plus) D21plus<-crossdist(X2,X1plus) T21plus<- .rowSums(D21plus<=R12, nX2, nX1plus) I12<-crossdist(X1,X2)<=R12 I21<-crossdist(X2,X1)<=R12 T12<- .rowSums(I12, nX1, nX2) T21<- .rowSums(I21, nX2, nX1) ## logical matrix, I, indicating R-close pairs: I1plus<- D1plus <=R11 I1 <- D1<=R11 I2plus<- D2plus <=R22 I2 <- D2<=R22 ## Vector T with the number of $R$-close neighbours to each point: T1plus<- .colSums(I1plus, nX1plus, nX1plus)[IntPoints1] T1 <- .colSums(I1, nX1, nX1) T2plus<- .colSums(I2plus, nX2plus, nX2plus)[IntPoints2] T2 <- .colSums(I2, nX2, nX2) ## Matrices A1, A2 and A3 are initialized to zero: A1 <- A2 <- A3 <- matrix(0,5,5) ## A1 is filled: A1[1,1]<-npoints(X1) A1[1,3]<-A1[3,1]<-sum(T1plus) A1[1,4]<-A1[4,1]<-sum(T12plus) A1[2,2]<-npoints(X2) A1[2,5]<-A1[5,2]<-sum(T2plus) A1[2,4]<-A1[4,2]<-sum(T21plus) A1[3,3]<-sum(T1plus*T1plus) A1[3,4]<-A1[4,3]<-sum(T1plus*T12plus) A1[5,5]<-sum(T2plus*T2plus) A1[4,5]<-A1[5,4]<-sum(T2plus*T21plus) A1[4,4]<-sum(T12plus*T12plus)+sum(T21plus*T21plus) ## A3 is filled: A3[3,3]<-sum(T1) A3[5,5]<-sum(T2) A3[4,4]<-sum(T12)+sum(T21) ## A2 is filled: gamInv<-vecg^(-1)-1 gi1<-gamInv[1];gi12<-gamInv[2];gi2<-gamInv[3] A2[1,1]<-sum(T1)*gi1 A2[1,2]<-A2[2,1]<-sum(T12)*gi12 A2[1,3]<-A2[3,1]<-sum(T1*(T1plus-1))*gi1 A2[1,5]<-A2[5,1]<-sum(T21*T2plus)*gi12 A2[1,4]<-A2[4,1]<-gi1*sum(T1*(T12plus))+gi12*sum(T21*(T21plus-1)) A2[2,2]<-sum(T2)*gi2 A2[2,3]<-A2[3,2]<-sum(T12*T1plus)*gi12 A2[2,5]<-A2[5,2]<-sum(T2*(T2plus-1))*gi2 A2[2,4]<-A2[4,2]<-gi2*sum(T2*(T21plus))+gi12*sum(T12*(T12plus-1)) A2[3,3]<-gi1*sum(I1*outer(T1plus-1,T1plus-1)) A2[3,5]<-A2[5,3]<- gi12*sum(I12*outer(T1plus,T2plus)) A2[3,4]<-A2[4,3]<-gi1*sum(I1*outer(T1plus-1,T12plus))+gi12*sum(I12*outer(T1plus,T21plus-1)) A2[5,5]<-gi2*sum(I2*outer(T2plus-1,T2plus-1)) A2[4,5]<-A2[5,4]<-gi2*sum(I2*outer(T2plus-1,T21plus))+gi12*sum(I21*outer(T2plus,T12plus-1)) A2[4,4]<-gi1*sum(I1*outer(T12plus,T12plus))+gi2*sum(I2*outer(T21plus,T21plus))+ gi12*sum(I12*outer(T12plus-1,T21plus-1))+gi12*sum(I21*outer(T21plus-1,T12plus-1)) Sigma<-A1+A2+A3 nam <- c(levels(marks(Xplus)), names(vecg)) dnam <- list(nam, nam) if(spill) { # return internal data (with matrices unnormalised) dimnames(A1) <- dimnames(A2) <- dimnames(A3) <- dimnames(Sigma) <- dnam internals <- list(A1=A1, A2=A2, A3=A3, Sigma=Sigma) if(!spill.vc) return(internals) } ## Calculate variance-covariance # Normalise: A1 <- A1/areaW Sigma <- Sigma/areaW U <- checksolve(A1, matrix.action, , "variance") mat <- if(is.null(U)) matrix(NA, length(nam), length(nam)) else U%*%Sigma%*%U / areaW dimnames(mat) <- dnam if(spill.vc) return(list(varcov=mat, internals=internals)) return(mat) } # Convert the first p rows & columns of variance matrix x # to variances of treatment contrasts contrastmatrix <- function(x,p){ mat <- x ## Correct column and row 1: for(i in 2:p){ mat[1,i] <- mat[i,1] <- x[1,i]-x[1,1] } ## Correct columns and rows 2,...,p: for(i in 2:p){ for(j in 2:p){ mat[i,j] <- x[1,1]-x[1,i]-x[1,j]+x[i,j] } for(j in (p+1):ncol(x)){ mat[i,j] <- mat[j,i] <- x[i,j]-x[1,j] } } mat } vcov.ppm } ) suffloc <- function(object) { verifyclass(object, "ppm") if(!is.poisson(object)) stop("Internals not available for Gibbs models") return(vcov(object, what="internals")$suff) } spatstat/R/defaultwin.R0000644000176200001440000000251513333543254014613 0ustar liggesusers# # # defaultwin.R # # $Revision: 1.10 $ $Date: 2015/10/21 09:06:57 $ # default.expand <- function(object, m=2, epsilon=1e-6, w=Window(object)) { stopifnot(is.ppm(object) || inherits(object, "rmhmodel")) # no expansion necessary if model is Poisson if(is.poisson(object)) return(.no.expansion) # default is no expansion if model is nonstationary if(!is.stationary(object)) return(.no.expansion) # Redundant since a non-expandable model is non-stationary # if(!is.expandable(object)) # return(.no.expansion) # rule is to expand data window by distance d = m * reach rr <- reach(object, epsilon=epsilon) if(!is.finite(rr)) return(rmhexpand()) if(!is.numeric(m) || length(m) != 1 || m < 1) stop("m should be a single number >= 1") mr <- m * rr rule <- rmhexpand(distance = mr) # if(is.owin(w)) { # apply rule to window wplus <- expand.owin(w, rule) # save as new expansion rule rule <- rmhexpand(wplus) } return(rule) } default.clipwindow <- function(object, epsilon=1e-6) { stopifnot(is.ppm(object) || inherits(object, "rmhmodel")) # data window w <- as.owin(object) if(is.null(w)) return(NULL) # interaction range of model rr <- reach(object, epsilon=epsilon) if(!is.finite(rr)) return(NULL) if(rr == 0) return(w) else return(erosion(w, rr)) } spatstat/R/split.ppp.R0000644000176200001440000002237713575343014014412 0ustar liggesusers# # split.ppp.R # # $Revision: 1.34 $ $Date: 2019/12/15 04:44:38 $ # # split.ppp and "split<-.ppp" # ######################################### split.ppp <- function(x, f = marks(x), drop=FALSE, un=NULL, reduce=FALSE, ...) { verifyclass(x, "ppp") mf <- markformat(x) fgiven <- !missing(f) if(is.null(un)) { un <- !fgiven && (mf != "dataframe") } else un <- as.logical(un) if(!fgiven) { # f defaults to marks of x switch(mf, none={ stop("f is missing and there are no marks") }, vector={ if(!is.multitype(x)) stop("f is missing and the pattern is not multitype") f <- fsplit <- marks(x) }, dataframe={ f <- fsplit <- firstfactor(marks(x)) if(is.null(f)) stop("Data frame of marks contains no factors") }) splittype <- "factor" } else { # f was given fsplit <- f if(is.factor(f)) { splittype <- "factor" } else if(is.logical(f)) { splittype <- "factor" f <- factor(f) } else if(is.tess(f)) { # f is a tessellation: determine the grouping f <- marks(cut(x, fsplit)) splittype <- "tess" } else if(is.owin(f)) { # f is a window: coerce to a tessellation W <- as.owin(x) fsplit <- tess(tiles=list(fsplit, setminus.owin(W, fsplit)), window=W) f <- marks(cut(x, fsplit)) splittype <- "tess" } else if(is.im(f)) { # f is an image: coerce to a tessellation fsplit <- tess(image=f) f <- marks(cut(x, fsplit)) splittype <- "tess" } else if(is.character(f) && length(f) == 1) { # f is the name of a column of marks marx <- marks(x) if(is.data.frame(marx) && (f %in% names(marx))) fsplit <- f <- marx[[f]] else stop(paste("The name", sQuote(f), "does not match any column of marks")) splittype <- "factor" } else stop(paste("f must be", "a factor, a logical vector,", "a tessellation, a window, an image,", "or the name of a column of marks")) if(length(f) != npoints(x)) stop("length(f) must equal the number of points in x") } # At this point # 'f' is a factor that can be used to separate the points # 'fsplit' is the object (either a factor or a tessellation) # that determines the split (and can be "un-split") lev <- levels(f) if(drop) { # remove components that don't contain points retain <- (table(f) > 0) lev <- lev[retain] switch(splittype, tess = { # remove tiles that don't contain points fsplit <- fsplit[retain] }, factor = { # delete levels that don't occur fsplit <- factor(fsplit, levels=lev) }, stop("Internal error: wrong format for fsplit")) } ## remove marks that will not be retained if(un && reduce && mf == "dataframe") warning("Incompatible arguments un=TRUE and reduce=TRUE: assumed un=TRUE") if(un) { x <- unmark(x) } else if(reduce && !fgiven && mf == "dataframe") { # remove the column of marks that determined the split j <- findfirstfactor(marks(x)) if(!is.null(j)) marks(x) <- marks(x)[, -j] } ## split the data out <- list() fok <- !is.na(f) for(l in lev) out[[paste(l)]] <- x[fok & (f == l)] ## if(splittype == "tess") { til <- tiles(fsplit) for(i in seq_along(out)) out[[i]]$window <- til[[i]] } class(out) <- c("splitppp", "ppplist", "solist", class(out)) attr(out, "fsplit") <- fsplit attr(out, "fgroup") <- f return(out) } "split<-.ppp" <- function(x, f=marks(x), drop=FALSE, un=NULL, ..., value) { verifyclass(x, "ppp") W <- x$window fgiven <- !missing(f) mf <- markformat(x) # evaluate `un' before assigning value of 'f' if(is.null(un)) { un <- !fgiven && (mf != "dataframe") } else un <- as.logical(un) # validate assignment value stopifnot(is.list(value)) if(!all(unlist(lapply(value, is.ppp)))) stop(paste("Each entry of", sQuote("value"), "must be a point pattern")) ismark <- unlist(lapply(value, is.marked)) if(any(ismark) && !all(ismark)) stop(paste("Some entries of", sQuote("value"), "are marked, and others are unmarked")) vmarked <- all(ismark) # determine type of splitting if(missing(f)) { # f defaults to marks of x switch(mf, none={ stop("f is missing and there are no marks") }, vector={ if(!is.multitype(x)) stop("f is missing and the pattern is not multitype") f <- fsplit <- marks(x) }, dataframe={ f <- fsplit <- firstfactor(marks(x)) if(is.null(f)) stop("Data frame of marks contains no factors") }) } else { # f given fsplit <- f if(is.tess(f)) { # f is a tessellation: determine the grouping f <- marks(cut(x, fsplit)) } else if(is.im(f)) { # f is an image: determine the grouping fsplit <- tess(image=f) f <- marks(cut(x, fsplit)) } else if(is.character(f) && length(f) == 1) { # f is the name of a column of marks marx <- marks(x) if(is.data.frame(marx) && (f %in% names(marx))) fsplit <- f <- marx[[f]] else stop(paste("The name", sQuote(f), "does not match any column of marks")) } else if(is.logical(f)) { f <- factor(f) } else if(!is.factor(f)) stop(paste("f must be", "a factor, a logical vector, a tessellation, an image,", "or the name of a column of marks")) if(length(f) != x$n) stop("length(f) must equal the number of points in x") } # all.levels <- lev <- levels(f) if(!drop) levtype <- "levels of f" else { levtype <- "levels which f actually takes" # remove components that don't contain points lev <- lev[table(f) > 0] } if(length(value) != length(lev)) stop(paste("length of", sQuote("value"), "should equal the number of", levtype)) # ensure value[[i]] is associated with lev[i] if(!is.null(names(value))) { if(!all(names(value) %in% as.character(lev))) stop(paste("names of", sQuote("value"), "should be levels of f")) value <- value[lev] } names(value) <- NULL # restore the marks, if they were discarded if(un && is.marked(x)) { if(vmarked) warning(paste(sQuote("value"), "contains marked point patterns:", "this is inconsistent with un=TRUE; marks ignored.")) for(i in seq_along(value)) value[[i]] <- value[[i]] %mark% factor(lev[i], levels=all.levels) } # handle NA's in splitting factor if(any(isNA <- is.na(f))) { xNA <- x[isNA] if(un && is.marked(x)) xNA <- xNA %mark% factor(NA, levels=all.levels) value <- append(value, list(xNA)) } # put Humpty together again if(npoints(x) == length(f) && length(levels(f)) == length(value) && all(table(f) == sapply(value, npoints))) { ## exact correspondence out <- x for(i in seq_along(levels(f))) out[ f == lev[i] ] <- value[[i]] } else { out <- do.call(superimpose,c(value,list(W=W))) } return(out) } print.splitppp <- function(x, ...) { f <- attr(x, "fsplit") what <- if(is.tess(f)) "tessellation" else if(is.factor(f)) "factor" else if(is.logical(f)) "logical vector" else typeof(f) cat(paste("Point pattern split by", what, "\n")) nam <- names(x) for(i in seq_along(x)) { cat(paste("\n", nam[i], ":\n", sep="")) print(x[[i]]) } return(invisible(NULL)) } summary.splitppp <- function(object, ...) { x <- lapply(object, summary, ...) class(x) <- "summary.splitppp" x } print.summary.splitppp <- function(x, ...) { class(x) <- "anylist" print(x) invisible(NULL) } "[.splitppp" <- function(x, ...) { f <- attr(x, "fsplit") # invoke list method on x class(x) <- "list" y <- x[...] # then make it a 'splitppp' object too class(y) <- c("splitppp", class(y)) if(is.tess(f)) { fsplit <- f[...] } else if(is.factor(f)) { lev <- levels(f) sublev <- lev[...] subf <- f[f %in% sublev] fsplit <- factor(subf, levels=lev) } else stop("Unknown splitting type") attr(y, "fsplit") <- fsplit y } "[<-.splitppp" <- function(x, ..., value) { if(!all(unlist(lapply(value, is.ppp)))) stop("replacement value must be a list of point patterns") f <- attr(x, "fsplit") # invoke list method class(x) <- "list" x[...] <- value # then make it a 'splitppp' object too class(x) <- c("splitppp", class(x)) if(is.tess(f)) { fsplit <- f } else if(is.factor(f)) { lev <- levels(f) fsplit <- factor(rep.int(lev, unlist(lapply(x, npoints))), levels=lev) } attr(x, "fsplit") <- fsplit x } density.splitppp <- function(x, ..., se=FALSE) { density.ppplist(x, ..., se=se) } plot.splitppp <- function(x, ..., main) { if(missing(main)) main <- short.deparse(substitute(x)) do.call(plot.solist, resolve.defaults(list(x=x, main=main), list(...), list(equal.scales=TRUE))) } as.layered.splitppp <- function(X) { do.call(layered, X) } spatstat/R/headtail.R0000644000176200001440000000103513333543255014221 0ustar liggesusers#' #' headtail.R #' #' Methods for head() and tail() #' #' $Revision: 1.1 $ $Date: 2016/12/20 01:11:29 $ head.tess <- head.psp <- head.ppx <- head.ppp <- function(x, n=6L, ...) { stopifnot(length(n) == 1L) xlen <- nobjects(x) n <- if (n < 0L) max(xlen + n, 0L) else min(n, xlen) x[seq_len(n)] } tail.tess <- tail.psp <- tail.ppx <- tail.ppp <- function (x, n = 6L, ...) { stopifnot(length(n) == 1L) xlen <- nobjects(x) n <- if (n < 0L) max(xlen + n, 0L) else min(n, xlen) x[seq.int(to = xlen, length.out = n)] } spatstat/R/Gres.R0000644000176200001440000000502513333543254013350 0ustar liggesusers# # Gres.R # # Residual G # # $Revision: 1.3 $ $Date: 2013/04/25 06:37:43 $ # ############################################################################# # Gres <- function(object, ...) { if(!is.fv(object)) { # usual case where 'object' is a ppm, ppp or quad G <- Gcom(object, ...) } else { # case where 'object' is the output of 'Gcom' a <- attr(object, "maker") if(is.null(a) || a != "Gcom") stop("fv object was not created by Gcom") G <- object if(length(list(...)) > 0) warning("Extra arguments ignored") } # initialise fv object df <- data.frame(r=G$r, theo=numeric(length(G$r))) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~hat(G)(r), NULL), "theo", . ~ r, attr(G, "alim"), c("r","bold(R)~%s[theo](r)"), desc, fname="G") # add residual estimates nam <- names(G) if(all(c("border","bcom") %in% nam)) ans <- bind.fv(ans, data.frame(bres=with(G, border-bcom)), "bold(R)~hat(%s)[bord](r)", "border corrected residual of %s", "bres") if(all(c("han","hcom") %in% nam)) ans <- bind.fv(ans, data.frame(hres=with(G, han-hcom)), "bold(R)~hat(%s)[han](r)", "Hanisch corrected residual of %s", "hres") if("hvar" %in% nam) { savedotnames <- fvnames(ans, ".") hsd <- with(G, sqrt(hvar)) ans <- bind.fv(ans, data.frame(hvar=with(G, hvar), hsd = hsd, hi = 2*hsd, lo = -2*hsd), c("bold(C)^2~hat(%s)[han](r)", "sqrt(bold(C)^2~hat(%s)[han](r))", "bold(R)~hat(%s)[Hi](r)", "bold(R)~hat(%s)[Lo](r)"), c("pseudovariance of Hanisch corrected residual %s", "pseudo-SD of Hanisch corrected residual %s", "upper critical band for Hanisch corrected residual %s", "lower critical band for Hanisch corrected residual %s"), "hres") ans <- bind.fv(ans, data.frame(hstdres=with(ans, hres/hsd)), "bold(T)~hat(%s)[han](r)", "standardised Hanisch-corrected residual %s", "hres") fvnames(ans, ".") <- c(savedotnames, c("hi", "lo")) } unitname(ans) <- unitname(G) return(ans) } spatstat/R/varblock.R0000644000176200001440000001307613333543255014261 0ustar liggesusers# # varblock.R # # Variance estimation using block subdivision # # $Revision: 1.20 $ $Date: 2016/12/30 01:44:50 $ # varblock <- local({ getrvalues <- function(z) { with(z, .x) } stepsize <- function(z) { mean(diff(z)) } dofun <- function(domain, fun, Xpp, ...) { fun(Xpp, ..., domain=domain) } varblock <- function(X, fun=Kest, blocks=quadrats(X, nx=nx, ny=ny), ..., nx=3, ny=nx, confidence=0.95) { stopifnot(is.ppp(X)) stopifnot(is.tess(blocks)) stopifnot(is.function(fun) || is.character(fun)) if(is.character(fun)) fun <- get(fun, mode="function") ## validate confidence level stopifnot(confidence > 0.5 && confidence < 1) alpha <- 1 - confidence probs <- c(alpha/2, 1-alpha/2) ## determine whether 'fun' has an argument called 'domain' canrestrict <- ("domain" %in% names(formals(fun))) || samefunction(fun, pcf) || samefunction(fun, Lest) ## check there's at least one point in each block Y <- split(X, blocks) nums <- sapply(Y, npoints) blockok <- (nums > 0) if(some.zeroes <- any(!blockok)) warning("Some tiles contain no data: they are discarded") if(!canrestrict) { ## divide data into disjoint blocks if(some.zeroes) Y <- Y[blockok] n <- length(Y) if(n <= 1) stop("Need at least 2 blocks") ## apply 'fun' to each block if(any(c("r", "breaks") %in% names(list(...)))) { ## r vector specified fX <- fun(X, ...) z <- lapply(Y, fun, ...) } else { ## need to ensure compatible fv objects z <- lapply(Y, fun, ...) rlist <- lapply(z, getrvalues) rmax <- min(sapply(rlist, max)) rstep <- min(sapply(rlist, stepsize)) r <- seq(0, rmax, by=rstep) z <- lapply(Y, fun, ..., r=r) fX <- fun(X, ..., r=r) } } else { ## use 'domain' argument of 'fun' to compute contributions from each tile B <- tiles(blocks) if(some.zeroes) B <- B[blockok] n <- length(B) if(any(c("r", "breaks") %in% names(list(...)))) { ## r vector specified fX <- fun(X, ...) z <- lapply(B, dofun, ..., fun=fun, Xpp=X) } else { ## need to ensure compatible fv objects z <- lapply(B, dofun, ..., fun=fun, Xpp=X) rlist <- lapply(z, getrvalues) rmax <- min(sapply(rlist, max)) rstep <- min(sapply(rlist, stepsize)) r <- seq(0, rmax, by=rstep) z <- lapply(B, dofun, ..., fun=fun, Xpp=X, r=r) fX <- fun(X, ..., r=r) } } ## find columns that are common to all estimates zzz <- reconcile.fv(append(list(fX), z)) fX <- zzz[[1]] z <- zzz[-1] ## sample mean m <- meanlistfv(z) ## sample variance sqdev <- lapply(z, sqdev.fv, m=m) v <- meanlistfv(sqdev) v <- eval.fv(v * n/(n-1), dotonly=FALSE) ## sample standard deviation sd <- eval.fv(sqrt(v), dotonly=FALSE) ## upper and lower limits sem <- eval.fv(sd/sqrt(n), dotonly=FALSE) zcrit <- qnorm(probs) lower <- eval.fv(m + zcrit[1] * sem, dotonly=FALSE) upper <- eval.fv(m + zcrit[2] * sem, dotonly=FALSE) ## rebadge fva <- .Spatstat.FvAttrib fva <- fva[fva %in% names(attributes(fX))] attributes(m)[fva] <- attributes(v)[fva] <- attributes(sd)[fva] <- attributes(upper)[fva] <- attributes(lower)[fva] <- attributes(fX)[fva] m <- prefixfv(m, "mean", "sample mean of", "bold(mean)~") v <- prefixfv(v, "var", "estimated variance of", "bold(var)~") sd <- prefixfv(sd, "sd", "estimated standard deviation of", "bold(sd)~") CItext <- paste(c("lower", "upper"), paste0(100 * confidence, "%%"), "CI limit for") lower <- prefixfv(lower, "lo", CItext[1], "bold(lo)~") upper <- prefixfv(upper, "hi", CItext[2], "bold(hi)~") ## tack together out <- cbind(fX,m,v,sd,upper,lower) ## restrict r domain bad <- matrowall(!is.finite(as.matrix(as.data.frame(out)))) rmax <- max(getrvalues(out)[!bad]) alim <- c(0, rmax) if(!canrestrict) alim <- intersect.ranges(attr(out, "alim"), alim) attr(out, "alim") <- alim ## sensible default plot formula ybase <- fvnames(fX, ".y") xname <- fvnames(fX, ".x") tname <- intersect("theo", fvnames(fX, ".")) fvnames(out, ".y") <- yname <- paste0("mean", ybase) fvnames(out, ".s") <- snames <- paste0(c("lo", "hi"), ybase) fvnames(out, ".") <- c(yname, tname, snames) attr(out, "fmla") <- paste(". ~ ", xname) return(out) } sqdev.fv <- function(x,m){ eval.fv((x-m)^2, dotonly=FALSE) } varblock }) meanlistfv <- local({ getYmatrix <- function(x, yn=ynames) { as.matrix(as.data.frame(x)[,yn]) } meanlistfv <- function(z, ...) { ## compute sample mean of a list of fv objects if(!is.list(z) || !all(unlist(lapply(z, is.fv)))) stop("z should be a list of fv objects") if(!do.call(compatible, unname(z))) stop("Objects are not compatible") result <- template <- z[[1]] ## extract each object's function values as a matrix ynames <- fvnames(template, "*") matlist <- unname(lapply(z, getYmatrix, yn=ynames)) ## stack matrices into an array y <- do.call(abind, append(matlist, list(along=3))) ## take mean ymean <- apply(y, 1:2, mean, ...) result[,ynames] <- ymean return(result) } meanlistfv }) spatstat/R/nnfunlpp.R0000644000176200001440000000316713537661337014326 0ustar liggesusers# # nnfunlpp.R # # method for 'nnfun' for class 'lpp' # # $Revision: 1.3 $ $Date: 2019/09/16 10:14:18 $ # nnfun.lpp <- local({ nnfun.lpp <- function(X, ..., k=1, value=c("index", "mark")) { stopifnot(inherits(X, "lpp")) force(X) force(k) value <- match.arg(value) L <- as.linnet(X) switch(value, index = { fi <- function(x, y=NULL, seg=NULL, tp=NULL, ...) { ## L is part of the environment Y <- as.lpp(x=x, y=y, seg=seg, tp=tp, L=L) i <- nncross.lpp(Y, X, what="which", k=k) return(i) } f <- linfun(fi, L) }, mark = { stopifnot(is.marked(X)) marx <- as.data.frame(marks(X))[,1] fm <- function(x, y=NULL, seg=NULL, tp=NULL, ...) { Y <- as.lpp(x=x, y=y, seg=seg, tp=tp, L=L) i <- nncross.lpp(Y, X, what="which", k=k) return(marx[i]) } f <- linfun(fm, L) }) attr(f, "explain") <- uitleggen return(f) } uitleggen <- function(x, ...) { env <- environment(attr(x, "f")) X <- get("X", envir=env) k <- get("k", envir=env) if(identical(k, 1)) { cat("Nearest-neighbour function for lpp object\n") } else { cat("k-th nearest neighbour function for lpp object\n") cat(paste("k =", commasep(k), "\n")) } print(X) v <- mget("value", envir=env, ifnotfound=list(NULL))[[1L]] splat("Function returns the", if(identical(v, "mark")) "mark value" else "index", "of the neighbour") } nnfun.lpp }) spatstat/R/dppm.R0000644000176200001440000001236713402725253013415 0ustar liggesusers#' #' dppm.R #' #' $Revision: 1.9 $ $Date: 2018/12/08 11:14:52 $ dppm <- function(formula, family, data=NULL, ..., startpar = NULL, method = c("mincon", "clik2", "palm"), weightfun=NULL, control=list(), algorithm="Nelder-Mead", statistic="K", statargs=list(), rmax = NULL, covfunargs=NULL, use.gam=FALSE, nd=NULL, eps=NULL) { # Instantiate family if not already done. if(is.character(family)) family <- get(family, mode="function") if(inherits(family, "detpointprocfamilyfun")) { familyfun <- family family <- familyfun() } verifyclass(family, "detpointprocfamily") # Check for intensity as only unknown and exit (should be changed for likelihood method) if(length(family$freepar)==1 && (family$freepar %in% family$intensity)) stop("Only the intensity needs to be estimated. Please do this with ppm yourself.") # Detect missing rhs of 'formula' and fix if(inherits(formula, c("ppp", "quad"))){ Xname <- short.deparse(substitute(formula)) formula <- as.formula(paste(Xname, "~ 1")) } if(!inherits(formula, "formula")) stop(paste("Argument 'formula' should be a formula")) # kppm(formula, DPP = family, data = data, covariates = data, # startpar = startpar, method = method, weightfun = weightfun, # control = control, algorithm = algorithm, statistic = statistic, # statargs = statargs, rmax = rmax, covfunargs = covfunargs, # use.gam = use.gam, nd = nd, eps = eps, ...) thecall <- call("kppm", X=formula, DPP=family, data = data, covariates = data, startpar = startpar, method = method, weightfun = weightfun, control = control, algorithm = algorithm, statistic = statistic, statargs = statargs, rmax = rmax, covfunargs = covfunargs, use.gam = use.gam, nd = nd, eps = eps) ncall <- length(thecall) argh <- list(...) nargh <- length(argh) if(nargh > 0) { thecall[ncall + 1:nargh] <- argh names(thecall)[ncall + 1:nargh] <- names(argh) } callenv <- parent.frame() if(!is.null(data)) callenv <- list2env(data, parent=callenv) result <- eval(thecall, envir=callenv, enclos=baseenv()) return(result) } ## Auxiliary function to mimic cluster models for DPPs in kppm code spatstatDPPModelInfo <- function(model){ out <- list( modelname = paste(model$name, "DPP"), # In modelname field of mincon fv obj. descname = paste(model$name, "DPP"), # In desc field of mincon fv obj. modelabbrev = paste(model$name, "DPP"), # In fitted obj. printmodelname = function(...) paste(model$name, "DPP"), # Used by print.kppm parnames = model$freepar, checkpar = function(par){ return(par) }, checkclustargs = function(margs, old = TRUE) list(), resolvedots = function(...){ return(list(...)) }, ## K-function K = function(par, rvals, ...){ if(length(par)==1 && is.null(names(par))) names(par) <- model$freepar mod <- update(model, as.list(par)) if(!valid(mod)){ return(rep(Inf, length(rvals))) } else{ return(Kmodel(mod)(rvals)) } }, ## pair correlation function pcf = function(par, rvals, ...){ if(length(par)==1 && is.null(names(par))) names(par) <- model$freepar mod <- update(model, as.list(par)) if(!valid(mod)){ return(rep(Inf, length(rvals))) } else{ return(pcfmodel(mod)(rvals)) } }, ## sensible starting parameters selfstart = function(X) { return(model$startpar(model, X)) } ) return(out) } ## Auxilliary function used for DPP stuff in kppm.R dppmFixIntensity <- function(DPP, lambda, po){ lambdaname <- DPP$intensity if(is.null(lambdaname)) warning("The model has no intensity parameter.\n", "Prediction from the fitted model is invalid ", "(but no warning or error will be given by predict.dppm).") ## Update model object with estimated intensity if it is a free model parameter if(lambdaname %in% DPP$freepar){ clusters <- update(DPP, structure(list(lambda), .Names=lambdaname)) } else{ clusters <- DPP lambda <- intensity(clusters) ## Overwrite po object with fake version X <- po$Q$data dont.complain.about(X) po <- ppm(X~offset(log(lambda))-1) po$fitter <- "dppm" ## update pseudolikelihood value using code in logLik.ppm po$maxlogpl.orig <- po$maxlogpl po$maxlogpl <- logLik(po, warn=FALSE) ######################################### } return(list(clusters=clusters, lambda=lambda, po=po)) } ## Auxiliary function used for DPP stuff in kppm.R dppmFixAlgorithm <- function(algorithm, changealgorithm, clusters, startpar){ if(!setequal(clusters$freepar, names(startpar))) stop("Names of startpar vector does not match the free parameters of the model.") lower <- upper <- NULL if(changealgorithm){ bb <- dppparbounds(clusters, names(startpar)) if(all(is.finite(bb))){ algorithm <- "Brent" lower <- bb[1L] upper <- bb[2L] } else{ algorithm <- "BFGS" } } return(list(algorithm = algorithm, lower = lower, upper = upper)) } spatstat/R/ippm.R0000644000176200001440000002272113425751500013414 0ustar liggesusers# # ippm.R # # $Revision: 2.25 $ $Date: 2019/02/02 02:54:19 $ # # Fisher scoring algorithm for irregular parameters in ppm trend # ippm <- local({ chucknames <- c("iScore", "start", "nlm.args", "silent", "warn.unused") hasarg <- function(f,a) { a %in% names(formals(f)) } ippm <- function(Q, ..., iScore=NULL, start=list(), covfunargs=start, nlm.args=list(stepmax=1/2), silent=FALSE, warn.unused=TRUE) { ## remember call cl <- match.call() callframe <- parent.frame() callstring <- short.deparse(sys.call()) ## ppmcall <- cl[!(names(cl) %in% chucknames)] ppmcall[[1L]] <- as.name('ppm') ## validate if(!is.list(start)) stop("start should be a list of initial values for irregular parameters") if(length(start) == 0) { ppmcall <- ppmcall[names(ppmcall) != "covfunargs"] return(eval(ppmcall, callframe)) } if(!is.null(iScore)) { if(!is.list(iScore) || length(iScore) != length(start)) stop("iScore should be a list of the same length as start") stopifnot(identical(names(iScore), names(start))) if(!all(sapply(iScore, is.function))) stop("iScore should be a list of functions") } ## smap <- match(names(start), names(covfunargs)) if(anyNA(smap)) stop("variables in start should be a subset of variables in covfunargs") covfunargs[smap] <- start ## fit the initial model and extract information ppmcall$covfunargs <- covfunargs fit0 <- eval(ppmcall, callframe) # lpl0 <- fit0$maxlogpl # p <- length(coef(fit0)) ## examine covariates and trend covariates <- fit0$covariates isfun <- sapply(covariates, is.function) covfuns <- covariates[isfun] ## determine which covariates depend on which irregular parameters pnames <- names(start) depmat <- matrix(FALSE, nrow=length(covfuns), ncol=length(pnames)) rownames(depmat) <- names(covfuns) colnames(depmat) <- pnames for(j in 1:length(pnames)) depmat[,j] <- sapply(covfuns, hasarg, pnames[j]) ## find covariates that depend on ANY irregular parameter depvar <- rownames(depmat)[apply(depmat, 1L, any)] ## check that these covariates appear only in offset terms covnames.fitted <- model.covariates(fit0, fitted=TRUE, offset=FALSE) if(any(uhoh <- depvar %in% covnames.fitted)) stop(paste(ngettext(sum(uhoh), "The covariate", "The covariates"), commasep(sQuote(depvar[uhoh])), "should appear only in offset terms")) ## check that every irregular parameter to be updated appears somewhere cov.names.offset <- model.covariates(fit0, fitted=FALSE, offset=TRUE) covfun.names.offset <- intersect(cov.names.offset, names(covfuns)) usearg <- apply(depmat[covfun.names.offset, , drop=FALSE], 2L, any) if(!all(usearg)) { if(warn.unused) { nbad <- sum(!usearg) warning(paste("Cannot maximise over the irregular", ngettext(nbad, "parameter", "parameters"), commasep(sQuote(names(usearg)[!usearg])), ngettext(nbad, "because it is", "because they are"), "not used in any term of the model")) } ## restrict start <- start[usearg] if(!is.null(iScore)) iScore <- iScore[usearg] pnames <- names(start) } if(length(start) == 0) { ppmcall <- ppmcall[names(ppmcall) != "covfunargs"] return(eval(ppmcall, callframe)) } ## parameters for objective function fdata <- list(fit0=fit0, nreg=length(coef(fit0)), covfunargs=covfunargs, smap=smap, pnames=pnames, iScore=iScore) ## minimise objective startvec <- unlist(start) typsize <- abs(startvec) typsize <- pmax(typsize, min(typsize[typsize > 0])) g <- do.call(nlm, resolve.defaults(list(f=objectivefun, p=startvec, thedata=fdata), nlm.args, list(typsize=typsize))) popt <- g$estimate ## detect error states icode <- g$code if(!silent && icode > 2) { errmess <- nlmcodes[[icode]] if(!is.null(errmess)) warning(errmess) else warning("Unrecognised error code ", paste(icode), " returned from nlm", call.=FALSE) } ## return optimised model covfunargs[smap] <- popt attr(covfunargs, "fitter") <- "ippm" attr(covfunargs, "free") <- names(start) fit <- update(fit0, covfunargs=covfunargs, use.internal=TRUE) fit$dispatched <- fit[c("call", "callstring", "callframe")] fit$call <- cl fit$callstring <- callstring fit$callframe <- callframe fit$iScore <- iScore class(fit) <- c("ippm", class(fit)) return(fit) } ## define objective function objectivefun <- function(param, thedata) { with(thedata, { ## fit model with current irregular parameters param <- as.list(param) names(param) <- pnames covfunargs[smap] <- param fit <- update(fit0, covfunargs=covfunargs, use.internal=TRUE) lpl <- logLik(fit, warn=FALSE) ## return negative logL because nlm performs *minimisation* value <- -as.numeric(lpl) if(!is.null(iScore)) { ## compute analytic derivatives stuff <- ppmInfluence(fit, what="score", iScore=iScore, iArgs=param) score <- stuff$score if(length(score) == length(coef(fit)) + length(param)) attr(value, "gradient") <- -score[-(1:nreg), drop=FALSE] ## attr(value, "hessian") <- -hess[-(1:nreg), -(1:nreg), drop=FALSE] } return(value) }) } ## from help(nlm) nlmcodes <- list(c("Relative gradient is close to zero; ", "current iterate is probably solution"), c("Successive iterates are within tolerance; ", "current iterate is probably solution"), c("Last global step failed to locate a point ", "lower than current estimate. ", "Either current estimate is an approximate ", "local minimum of the function ", "or 'steptol' is too small"), "Iteration limit exceeded", c("Maximum step size 'stepmax' ", "exceeded five consecutive times. ", "Either the function is unbounded below, ", "becomes asymptotic to a finite value ", "from above in some direction, ", "or 'stepmax' is too small")) ippm }) update.ippm <- local({ newformula <- function(old, change, eold, enew) { old <- eval(old, eold) change <- eval(change, enew) old <- as.formula(old, env=eold) change <- as.formula(change, env=enew) update.formula(old, change) } update.ippm <- function(object, ..., envir=environment(terms(object))) { # call <- match.call() new.call <- old.call <- object$call old.callframe <- object$callframe Qold <- eval(old.call$Q, as.list(envir), enclos=old.callframe) argh <- list(...) if(any(isfmla <- sapply(argh, inherits, what="formula"))) { if(sum(isfmla) > 1) stop("Syntax not understood: several arguments are formulas") i <- min(which(isfmla)) new.fmla <- argh[[i]] argh <- argh[-i] if(inherits(Qold, "formula")) { ## formula will replace 'Q' if(is.null(lhs.of.formula(new.fmla))) { f <- (. ~ x) f[[3L]] <- new.fmla[[2L]] new.fmla <- f } new.call$Q <- newformula(Qold, new.fmla, old.callframe, envir) } else if(inherits(Qold, c("ppp", "quad"))) { ## formula will replace 'trend' and may replace 'Q' new.fmla <- newformula(formula(object), new.fmla, old.callframe, envir) if(!is.null(lhs <- lhs.of.formula(new.fmla))) { newQ <- eval(eval(substitute(substitute(l, list("."=Q)), list(l=lhs, Q=Qold))), envir=as.list(envir), enclos=old.callframe) new.call$Q <- newQ } new.fmla <- rhs.of.formula(new.fmla) if("trend" %in% names(old.call)) { new.call$trend <- new.fmla } else { ## find which argument in the original call was a formula wasfmla <- sapply(old.call, formulaic, envir=as.list(envir), enclos=old.callframe) if(any(wasfmla)) { new.call[[min(which(wasfmla))]] <- new.fmla } else { new.call$trend <- new.fmla } } } } ## silence the warnings about unused covfunargs (unless overruled) new.call$warn.unused <- FALSE ## other arguments if(length(argh) > 0) { nama <- names(argh) named <- if(is.null(nama)) rep(FALSE, length(argh)) else nzchar(nama) if(any(named)) new.call[nama[named]] <- argh[named] if(any(!named)) new.call[length(new.call) + 1:sum(!named)] <- argh[!named] } result <- eval(new.call, as.list(envir), enclos=old.callframe) return(result) } formulaic <- function(z, envir, enclos) { u <- try(eval(z, envir, enclos)) return(inherits(u, "formula")) } update.ippm }) spatstat/R/nncross.R0000644000176200001440000001616713604234557014152 0ustar liggesusers# # nncross.R # # # $Revision: 1.31 $ $Date: 2020/01/05 00:46:13 $ # # Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2012 # Licence: GNU Public Licence >= 2 nncross <- function(X, Y, ...) { UseMethod("nncross") } nncross.default <- function(X, Y, ...) { X <- as.ppp(X, W=boundingbox) nncross(X, Y, ...) } nncross.ppp <- function(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), ..., k = 1, sortby=c("range", "var", "x", "y"), is.sorted.X = FALSE, is.sorted.Y = FALSE) { stopifnot(is.ppp(Y) || is.psp(Y)) sortby <- match.arg(sortby) what <- match.arg(what, choices=c("dist", "which"), several.ok=TRUE) want.dist <- "dist" %in% what want.which <- "which" %in% what want.both <- want.dist && want.which if(!missing(k)) { # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } } k <- as.integer(k) kmax <- max(k) nk <- length(k) nX <- npoints(X) nY <- nobjects(Y) ## trivial cases if(nX == 0 || nY == 0) { result <- list(dist=matrix(Inf, nrow=nX, ncol=nk), which=matrix(NA_integer_, nrow=nX, ncol=nk))[what] result <- as.data.frame(result) if(ncol(result) == 1) result <- result[, , drop=TRUE] return(result) } # Y is a line segment pattern if(is.psp(Y)) { if(!identical(k, 1L)) stop("Sorry, the case k > 1 is not yet implemented for psp objects") return(ppllengine(X,Y,"distance")[, what]) } # Y is a point pattern if(is.null(iX) != is.null(iY)) stop("If one of iX, iY is given, then both must be given") exclude <- (!is.null(iX) || !is.null(iY)) if(exclude) { stopifnot(is.integer(iX) && is.integer(iY)) if(length(iX) != nX) stop("length of iX does not match the number of points in X") if(length(iY) != nY) stop("length of iY does not match the number of points in Y") } if((is.sorted.X || is.sorted.Y) && !(sortby %in% c("x", "y"))) stop(paste("If data are already sorted,", "the sorting coordinate must be specified explicitly", "using sortby = \"x\" or \"y\"")) # decide whether to sort on x or y coordinate switch(sortby, range = { WY <- as.owin(Y) sortby.y <- (diff(WY$xrange) < diff(WY$yrange)) }, var = { sortby.y <- (var(Y$x) < var(Y$y)) }, x={ sortby.y <- FALSE}, y={ sortby.y <- TRUE} ) # The C code expects points to be sorted by y coordinate. if(sortby.y) { Xx <- X$x Xy <- X$y Yx <- Y$x Yy <- Y$y } else { Xx <- X$y Xy <- X$x Yx <- Y$y Yy <- Y$x } # sort only if needed if(!is.sorted.X){ oX <- fave.order(Xy) Xx <- Xx[oX] Xy <- Xy[oX] if(exclude) iX <- iX[oX] } if (!is.sorted.Y){ oY <- fave.order(Yy) Yx <- Yx[oY] Yy <- Yy[oY] if(exclude) iY <- iY[oY] } # number of neighbours that are well-defined kmaxcalc <- min(nY, kmax) if(kmaxcalc == 1) { # ............... single nearest neighbour .................. # call C code nndv <- if(want.dist) numeric(nX) else numeric(1) nnwh <- if(want.which) integer(nX) else integer(1) if(!exclude) iX <- iY <- integer(1) huge <- 1.1 * diameter(boundingbox(as.rectangle(X), as.rectangle(Y))) z <- .C("nnXinterface", n1=as.integer(nX), x1=as.double(Xx), y1=as.double(Xy), id1=as.integer(iX), n2=as.integer(nY), x2=as.double(Yx), y2=as.double(Yy), id2=as.integer(iY), exclude = as.integer(exclude), wantdist = as.integer(want.dist), wantwhich = as.integer(want.which), nnd=as.double(nndv), nnwhich=as.integer(nnwh), huge=as.double(huge), PACKAGE = "spatstat") if(want.which) { nnwcode <- z$nnwhich #sic. C code now increments by 1 if(any(uhoh <- (nnwcode == 0))) { warning("NA's produced in nncross()$which") nnwcode[uhoh] <- NA } } # reinterpret in original ordering if(is.sorted.X){ if(want.dist) nndv <- z$nnd if(want.which) nnwh <- if(is.sorted.Y) nnwcode else oY[nnwcode] } else { if(want.dist) nndv[oX] <- z$nnd if(want.which) nnwh[oX] <- if(is.sorted.Y) nnwcode else oY[nnwcode] } if(want.both) return(data.frame(dist=nndv, which=nnwh)) return(if(want.dist) nndv else nnwh) } else { # ............... k nearest neighbours .................. # call C code nndv <- if(want.dist) numeric(nX * kmaxcalc) else numeric(1) nnwh <- if(want.which) integer(nX * kmaxcalc) else integer(1) if(!exclude) iX <- iY <- integer(1) huge <- 1.1 * diameter(boundingbox(as.rectangle(X), as.rectangle(Y))) z <- .C("knnXinterface", n1=as.integer(nX), x1=as.double(Xx), y1=as.double(Xy), id1=as.integer(iX), n2=as.integer(nY), x2=as.double(Yx), y2=as.double(Yy), id2=as.integer(iY), kmax=as.integer(kmaxcalc), exclude = as.integer(exclude), wantdist = as.integer(want.dist), wantwhich = as.integer(want.which), nnd=as.double(nndv), nnwhich=as.integer(nnwh), huge=as.double(huge), PACKAGE = "spatstat") # extract results nnD <- z$nnd nnW <- z$nnwhich # map 0 to NA if(want.which && any(uhoh <- (nnW == 0))) { nnW[uhoh] <- NA if(want.dist) nnD[uhoh] <- Inf } # reinterpret indices in original ordering if(!is.sorted.Y) nnW <- oY[nnW] # reform as matrices NND <- if(want.dist) matrix(nnD, nrow=nX, ncol=kmaxcalc, byrow=TRUE) else 0 NNW <- if(want.which) matrix(nnW, nrow=nX, ncol=kmaxcalc, byrow=TRUE) else 0 if(!is.sorted.X){ # rearrange rows to correspond to original ordering of points if(want.dist) NND[oX, ] <- NND if(want.which) NNW[oX, ] <- NNW } # the return value should correspond to the original vector k if(kmax > kmaxcalc) { # add columns of NA / Inf kextra <- kmax - kmaxcalc if(want.dist) NND <- cbind(NND, matrix(Inf, nrow=nX, ncol=kextra)) if(want.which) NNW <- cbind(NNW, matrix(NA_integer_, nrow=nX, ncol=kextra)) } if(length(k) < kmax) { # select only the specified columns if(want.dist) NND <- NND[, k, drop=TRUE] if(want.which) NNW <- NNW[, k, drop=TRUE] } result <- as.data.frame(list(dist=NND, which=NNW)[what]) colnames(result) <- c(if(want.dist) paste0("dist.", k) else NULL, if(want.which) paste0("which.",k) else NULL) if(ncol(result) == 1) result <- result[, , drop=TRUE] return(result) } } spatstat/R/mpl.R0000644000176200001440000017132613513607733013253 0ustar liggesusers# mpl.R # # $Revision: 5.232 $ $Date: 2019/02/20 03:34:50 $ # # mpl.engine() # Fit a point process model to a two-dimensional point pattern # by maximum pseudolikelihood # # mpl.prepare() # set up data for glm procedure # # ------------------------------------------------------------------- # # "mpl" <- function(Q, # trend = ~1, # interaction = NULL, # data = NULL, # correction="border", # rbord = 0, # use.gam=FALSE) { # .Deprecated("ppm", package="spatstat") # ppm(Q=Q, trend=trend, interaction=interaction, # covariates=data, correction=correction, rbord=rbord, # use.gam=use.gam, method="mpl") # } mpl.engine <- function(Q, trend = ~1, interaction = NULL, ..., covariates = NULL, subsetexpr = NULL, clipwin = NULL, covfunargs = list(), correction="border", rbord = 0, use.gam=FALSE, gcontrol=list(), GLM=NULL, GLMfamily=NULL, GLMcontrol=NULL, famille=NULL, forcefit=FALSE, nd = NULL, eps = eps, allcovar=FALSE, callstring="", precomputed=NULL, savecomputed=FALSE, preponly=FALSE, rename.intercept=TRUE, justQ = FALSE, weightfactor = NULL) { GLMname <- if(!missing(GLM)) short.deparse(substitute(GLM)) else NULL ## Extract precomputed data if available if(!is.null(precomputed$Q)) { Q <- precomputed$Q X <- precomputed$X P <- precomputed$U } else { ## Determine quadrature scheme from argument Q if(verifyclass(Q, "quad", fatal=FALSE)) { ## user-supplied quadrature scheme - validate it validate.quad(Q, fatal=TRUE, repair=FALSE, announce=TRUE) ## Extract data points X <- Q$data } else if(verifyclass(Q, "ppp", fatal = FALSE)) { ## point pattern - create default quadrature scheme X <- Q Q <- quadscheme(X, nd=nd, eps=eps, check=FALSE) } else stop("First argument Q should be a point pattern or a quadrature scheme") ## Data and dummy points together P <- union.quad(Q) } ## clip to subset? if(!is.null(clipwin)) { if(is.data.frame(covariates)) covariates <- covariates[inside.owin(P, w=clipwin), , drop=FALSE] Q <- Q[clipwin] X <- X[clipwin] P <- P[clipwin] } ## secret exit if(justQ) return(Q) ## computed <- if(savecomputed) list(X=X, Q=Q, U=P) else NULL ## ## Validate main arguments if(!is.null(trend) && !inherits(trend, "formula")) stop(paste("Argument", sQuote("trend"), "must be a formula")) if(!is.null(interaction) && !inherits(interaction, "interact")) stop(paste("Argument", sQuote("interaction"), "has incorrect format")) ## check.1.real(rbord, "In ppm") explain.ifnot(rbord >= 0, "In ppm") ## rbord applies only to border correction if(correction != "border") rbord <- 0 ## covfunargs <- as.list(covfunargs) ## ## Interpret the call if(is.null(trend)) { trend <- ~1 environment(trend) <- parent.frame() } want.trend <- !identical.formulae(trend, ~1) want.inter <- !is.null(interaction) && !is.null(interaction$family) ## Stamp with spatstat version number spv <- package_version(versionstring.spatstat()) the.version <- list(major=spv$major, minor=spv$minor, release=spv$patchlevel, date="$Date: 2019/02/20 03:34:50 $") if(want.inter) { ## ensure we're using the latest version of the interaction object if(outdated.interact(interaction)) interaction <- update(interaction) } ## if(!want.trend && !want.inter && !forcefit && !allcovar && is.null(subsetexpr)) { ## the model is the uniform Poisson process ## The MPLE (= MLE) can be evaluated directly npts <- npoints(X) W <- as.owin(X) if(correction == "border" && rbord > 0) { npts <- sum(bdist.points(X) >= rbord) areaW <- eroded.areas(W, rbord) } else { npts <- npoints(X) areaW <- area(W) } volume <- areaW * markspace.integral(X) lambda <- npts/volume ## fitted canonical coefficient co <- log(lambda) ## asymptotic variance of canonical coefficient varcov <- matrix(1/npts, 1, 1) fisher <- matrix(npts, 1, 1) se <- sqrt(1/npts) ## give names tag <- if(rename.intercept) "log(lambda)" else "(Intercept)" names(co) <- tag dimnames(varcov) <- dimnames(fisher) <- list(tag, tag) ## maximised log likelihood maxlogpl <- if(npts == 0) 0 else npts * (log(lambda) - 1) ## rslt <- list( method = "mpl", fitter = "exact", projected = FALSE, coef = co, trend = trend, interaction = NULL, fitin = fii(), Q = Q, maxlogpl = maxlogpl, satlogpl = NULL, internal = list(computed=computed, se=se), covariates = mpl.usable(covariates), ## covariates are still retained! covfunargs = covfunargs, subsetexpr = NULL, correction = correction, rbord = rbord, terms = terms(trend), fisher = fisher, varcov = varcov, version = the.version, problems = list()) class(rslt) <- "ppm" return(rslt) } ################# P r e p a r e D a t a ###################### prep <- mpl.prepare(Q, X, P, trend, interaction, covariates, want.trend, want.inter, correction, rbord, "quadrature points", callstring, subsetexpr=subsetexpr, allcovar=allcovar, precomputed=precomputed, savecomputed=savecomputed, covfunargs=covfunargs, weightfactor=weightfactor, ...) ## back door if(preponly) { ## exit now, returning prepared data frame and internal information prep$info <- list(want.trend=want.trend, want.inter=want.inter, correction=correction, rbord=rbord, interaction=interaction) return(prep) } fmla <- prep$fmla glmdata <- prep$glmdata problems <- prep$problems likelihood.is.zero <- prep$likelihood.is.zero is.identifiable <- prep$is.identifiable computed <- resolve.defaults(prep$computed, computed) IsOffset <- prep$IsOffset ## update covariates (if they were resolved from the environment) if(!is.null(prep$covariates)) covariates <- prep$covariates ################# F i t i t #################################### if(!is.identifiable) stop(paste("in", callstring, ":", problems$unidentifiable$print), call.=FALSE) ## to avoid problem with package checker .mpl.W <- glmdata$.mpl.W .mpl.SUBSET <- glmdata$.mpl.SUBSET ## determine algorithm control parameters if(is.null(gcontrol)) gcontrol <- list() else stopifnot(is.list(gcontrol)) gcontrol <- if(!is.null(GLMcontrol)) do.call(GLMcontrol, gcontrol) else if(want.trend && use.gam) do.call(mgcv::gam.control, gcontrol) else do.call(stats::glm.control, gcontrol) ## Fit the generalized linear/additive model. if(is.null(GLM) && is.null(famille)) { ## the sanctioned technique, using `quasi' family if(want.trend && use.gam) { FIT <- gam(fmla, family=quasi(link="log", variance="mu"), weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol) fittername <- "gam" } else { FIT <- glm(fmla, family=quasi(link="log", variance="mu"), weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol, model=FALSE) fittername <- "glm" } } else if(!is.null(GLM)) { ## alternative GLM fitting function or penalised GLM etc fam <- GLMfamily %orifnull% quasi(link="log", variance="mu") FIT <- GLM(fmla, family=fam, weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol) fittername <- GLMname } else { ## experimentation only! if(is.function(famille)) famille <- famille() stopifnot(inherits(famille, "family")) if(want.trend && use.gam) { FIT <- gam(fmla, family=famille, weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol) fittername <- "experimental" } else { FIT <- glm(fmla, family=famille, weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol, model=FALSE) fittername <- "experimental" } } environment(FIT$terms) <- sys.frame(sys.nframe()) ################ I n t e r p r e t f i t ####################### ## Fitted coefficients co <- FIT$coef ## glm covariates W <- glmdata$.mpl.W SUBSET <- glmdata$.mpl.SUBSET Z <- is.data(Q) Vnames <- prep$Vnames ## saturated log pseudolikelihood satlogpl <- - (sum(log(W[Z & SUBSET])) + sum(Z & SUBSET)) ## attained value of max log pseudolikelihood maxlogpl <- if(likelihood.is.zero) -Inf else (satlogpl - deviance(FIT)/2) ## fitted interaction object fitin <- if(want.inter) fii(interaction, co, Vnames, IsOffset) else fii() unitname(fitin) <- unitname(X) ###################################################################### ## Clean up & return rslt <- list( method = "mpl", fitter = fittername, projected = FALSE, coef = co, trend = trend, interaction = if(want.inter) interaction else NULL, fitin = fitin, Q = Q, maxlogpl = maxlogpl, satlogpl = satlogpl, internal = list(glmfit=FIT, glmdata=glmdata, Vnames=Vnames, IsOffset=IsOffset, fmla=fmla, computed=computed, vnamebase=prep$vnamebase, vnameprefix=prep$vnameprefix), covariates = mpl.usable(covariates), covfunargs = covfunargs, subsetexpr = subsetexpr, correction = correction, rbord = rbord, terms = terms(trend), version = the.version, problems = problems) class(rslt) <- "ppm" return(rslt) } ########################################################################## ### ///////////////////////////////////////////////////////////////////// ########################################################################## mpl.prepare <- local({ mpl.prepare <- function(Q, X, P, trend, interaction, covariates, want.trend, want.inter, correction, rbord, Pname="quadrature points", callstring="", ..., subsetexpr=NULL, covfunargs=list(), allcovar=FALSE, precomputed=NULL, savecomputed=FALSE, vnamebase=c("Interaction", "Interact."), vnameprefix=NULL, warn.illegal=TRUE, warn.unidentifiable=TRUE, weightfactor=NULL, skip.border=FALSE, clip.interaction=TRUE, splitInf=FALSE) { ## Q: quadrature scheme ## X = data.quad(Q) ## P = union.quad(Q) if(missing(want.trend)) want.trend <- !is.null(trend) && !identical.formulae(trend, ~1) if(missing(want.inter)) want.inter <- !is.null(interaction) && !is.null(interaction$family) want.subset <- !is.null(subsetexpr) computed <- list() problems <- list() names.precomputed <- names(precomputed) likelihood.is.zero <- FALSE is.identifiable <- TRUE if(!missing(vnamebase)) { if(length(vnamebase) == 1) vnamebase <- rep.int(vnamebase, 2) if(!is.character(vnamebase) || length(vnamebase) != 2) stop("Internal error: illegal format of vnamebase") } if(!is.null(vnameprefix)) { if(!is.character(vnameprefix) || length(vnameprefix) != 1) stop("Internal error: illegal format of vnameprefix") } ################ C o m p u t e d a t a #################### ## Extract covariate values updatecovariates <- FALSE covariates.df <- NULL if(allcovar || want.trend || want.subset) { if("covariates.df" %in% names.precomputed) { covariates.df <- precomputed$covariates.df } else { if(!is.data.frame(covariates)) { ## names of 'external' covariates to be found covnames <- variablesinformula(trend) if(want.subset) covnames <- union(covnames, all.vars(subsetexpr)) if(allcovar) covnames <- union(covnames, names(covariates)) covnames <- setdiff(covnames, c("x", "y", "marks")) ## resolve 'external' covariates tenv <- environment(trend) covariates <- getdataobjects(covnames, tenv, covariates, fatal=TRUE) updatecovariates <- any(attr(covariates, "external")) } ## extract values of covariates ('internal' and 'external') covariates.df <- mpl.get.covariates(covariates, P, Pname, covfunargs) } if(savecomputed) computed$covariates.df <- covariates.df } ## Form the weights and the ``response variable''. if("dotmplbase" %in% names.precomputed) .mpl <- precomputed$dotmplbase else { nQ <- n.quad(Q) wQ <- w.quad(Q) mQ <- marks.quad(Q) ## is NULL for unmarked patterns zQ <- is.data(Q) yQ <- numeric(nQ) yQ[zQ] <- 1/wQ[zQ] zeroes <- attr(wQ, "zeroes") sQ <- if(is.null(zeroes)) rep.int(TRUE, nQ) else !zeroes ## tweak weights ONLY if(!is.null(weightfactor)) wQ <- wQ * weightfactor ## pack up .mpl <- list(W = wQ, Z = zQ, Y = yQ, MARKS = mQ, SUBSET = sQ) } if(savecomputed) computed$dotmplbase <- .mpl glmdata <- data.frame(.mpl.W = .mpl$W, .mpl.Y = .mpl$Y) ## count data and dummy points in specified subset izdat <- .mpl$Z[.mpl$SUBSET] ndata <- sum(izdat) # ndummy <- sum(!izdat) ## Determine the domain of integration for the pseudolikelihood. if(correction == "border") { bdP <- if("bdP" %in% names.precomputed) precomputed$bdP else bdist.points(P) if(savecomputed) computed$bdP <- bdP .mpl$DOMAIN <- (bdP >= rbord) } skip.border <- skip.border && (correction == "border") ####################### T r e n d ############################## internal.names <- c(".mpl.W", ".mpl.Y", ".mpl.Z", ".mpl.SUBSET", "SUBSET", ".mpl") reserved.names <- c("x", "y", "marks", internal.names) if(allcovar || want.trend || want.subset) { trendvariables <- variablesinformula(trend) ## Check for use of internal names in trend cc <- check.clashes(internal.names, trendvariables, "the model formula") if(cc != "") stop(cc) if(want.subset) { subsetvariables <- all.vars(subsetexpr) cc <- check.clashes(internal.names, trendvariables, "the subset expression") if(cc != "") stop(cc) trendvariables <- union(trendvariables, subsetvariables) } ## Standard variables if(allcovar || "x" %in% trendvariables) glmdata <- data.frame(glmdata, x=P$x) if(allcovar || "y" %in% trendvariables) glmdata <- data.frame(glmdata, y=P$y) if(("marks" %in% trendvariables) || !is.null(.mpl$MARKS)) { if(is.null(.mpl$MARKS)) stop("Model formula depends on marks, but data do not have marks", call.=FALSE) glmdata <- data.frame(glmdata, marks=.mpl$MARKS) } ## ## Check covariates if(!is.null(covariates.df)) { ## Check for duplication of reserved names cc <- check.clashes(reserved.names, names(covariates), sQuote("covariates")) if(cc != "") stop(cc) ## Take only those covariates that are named in the trend formula if(!allcovar) needed <- names(covariates.df) %in% trendvariables else needed <- rep.int(TRUE, ncol(covariates.df)) if(any(needed)) { covariates.needed <- covariates.df[, needed, drop=FALSE] ## Append to `glmdata' glmdata <- data.frame(glmdata,covariates.needed) ## Ignore any quadrature points that have NA's in the covariates nbg <- is.na(covariates.needed) if(any(nbg)) { offending <- matcolany(nbg) covnames.na <- names(covariates.needed)[offending] quadpoints.na <- matrowany(nbg) n.na <- sum(quadpoints.na) n.tot <- length(quadpoints.na) errate <- n.na/n.tot pcerror <- round(signif(100 * errate, 2), 2) complaint <- paste("Values of the", ngettext(length(covnames.na), "covariate", "covariates"), paste(sQuote(covnames.na), collapse=", "), "were NA or undefined at", paste(pcerror, "%", " (", n.na, " out of ", n.tot, ")", sep=""), "of the", Pname) warning(paste(complaint, ". Occurred while executing: ", callstring, sep=""), call. = FALSE) .mpl$SUBSET <- .mpl$SUBSET & !quadpoints.na details <- list(covnames.na = covnames.na, quadpoints.na = quadpoints.na, print = complaint) problems <- append(problems, list(na.covariates=details)) } } } } ###################### I n t e r a c t i o n #################### Vnames <- NULL IsOffset <- NULL forbid <- NULL if(want.inter) { ## Form the matrix of "regression variables" V. ## The rows of V correspond to the rows of P (quadrature points) ## while the column(s) of V are the regression variables (log-potentials) E <- precomputed$E %orifnull% equalpairs.quad(Q) if(!skip.border) { ## usual case V <- evalInteraction(X, P, E, interaction, correction, ..., splitInf=splitInf, precomputed=precomputed, savecomputed=savecomputed) } else { ## evaluate only in eroded domain if(all(c("Esub", "Usub", "Retain") %in% names.precomputed)) { ## use precomputed data Psub <- precomputed$Usub Esub <- precomputed$Esub Retain <- precomputed$Retain } else { ## extract subset of quadrature points Retain <- .mpl$DOMAIN | is.data(Q) Psub <- P[Retain] ## map serial numbers in P to serial numbers in Psub Pmap <- cumsum(Retain) ## extract subset of equal-pairs matrix keepE <- Retain[ E[,2] ] Esub <- E[ keepE, , drop=FALSE] ## adjust indices in equal pairs matrix Esub[,2] <- Pmap[Esub[,2]] } ## call evaluator on reduced data if(all(c("X", "Q", "U") %in% names.precomputed)) { subcomputed <- resolve.defaults(list(E=Esub, U=Psub, Q=Q[Retain]), precomputed) } else subcomputed <- NULL if(clip.interaction) { ## normal V <- evalInteraction(X, Psub, Esub, interaction, correction, ..., splitInf=splitInf, precomputed=subcomputed, savecomputed=savecomputed) } else { ## ignore window when calculating interaction ## by setting 'W=NULL' (currently detected only by AreaInter) V <- evalInteraction(X, Psub, Esub, interaction, correction, ..., W=NULL, splitInf=splitInf, precomputed=subcomputed, savecomputed=savecomputed) } if(savecomputed) { computed$Usub <- Psub computed$Esub <- Esub computed$Retain <- Retain } } if(!is.matrix(V)) stop("interaction evaluator did not return a matrix") ## extract information about offsets IsOffset <- attr(V, "IsOffset") if(is.null(IsOffset)) IsOffset <- FALSE if(splitInf) { ## extract information about hard core terms forbid <- attr(V, "-Inf") %orifnull% logical(nrow(V)) } if(skip.border) { ## fill in the values in the border region with zeroes. Vnew <- matrix(0, nrow=npoints(P), ncol=ncol(V)) colnames(Vnew) <- colnames(V) Vnew[Retain, ] <- V ## retain attributes attr(Vnew, "IsOffset") <- IsOffset attr(Vnew, "computed") <- attr(V, "computed") attr(Vnew, "POT") <- attr(V, "POT") V <- Vnew if(splitInf) { fnew <- logical(nrow(Vnew)) fnew[Retain] <- forbid forbid <- fnew } } ## extract intermediate computation results if(savecomputed) computed <- resolve.defaults(attr(V, "computed"), computed) ## Augment data frame by appending the regression variables ## for interactions. ## ## First determine the names of the variables ## Vnames <- dimnames(V)[[2]] if(is.null(Vnames)) { ## No names were provided for the columns of V. ## Give them default names. ## In ppm the names will be "Interaction" ## or "Interact.1", "Interact.2", ... ## In mppm an alternative tag will be specified by vnamebase. nc <- ncol(V) Vnames <- if(nc == 1) vnamebase[1] else paste0(vnamebase[2], 1:nc) dimnames(V) <- list(dimnames(V)[[1]], Vnames) } else if(!is.null(vnameprefix)) { ## Variable names were provided by the evaluator (e.g. MultiStrauss). ## Prefix the variable names by a string ## (typically required by mppm) Vnames <- paste(vnameprefix, Vnames, sep="") dimnames(V) <- list(dimnames(V)[[1]], Vnames) } ## Check the names are valid as column names in a dataframe okVnames <- make.names(Vnames, unique=TRUE) if(any(Vnames != okVnames)) { warning(paste("Names of interaction terms", "contained illegal characters;", "names have been repaired.")) Vnames <- okVnames } ## Check for name clashes between the interaction variables ## and the formula cc <- check.clashes(Vnames, termsinformula(trend), "model formula") if(cc != "") stop(cc) ## and with the variables in 'covariates' if(!is.null(covariates)) { cc <- check.clashes(Vnames, names(covariates), sQuote("covariates")) if(cc != "") stop(cc) } ## OK. append variables. glmdata <- data.frame(glmdata, V) ## check IsOffset matches Vnames if(length(IsOffset) != length(Vnames)) { if(length(IsOffset) == 1) IsOffset <- rep.int(IsOffset, length(Vnames)) else stop("Internal error: IsOffset has wrong length", call.=FALSE) } ## Keep only those quadrature points for which the ## conditional intensity is nonzero. ##KEEP <- apply(V != -Inf, 1, all) .mpl$KEEP <- matrowall(V != -Inf) .mpl$SUBSET <- .mpl$SUBSET & .mpl$KEEP ## Check that there are at least some data and dummy points remaining datremain <- .mpl$Z[.mpl$SUBSET] somedat <- any(datremain) somedum <- !all(datremain) if(warn.unidentifiable && !(somedat && somedum)) { ## Model would be unidentifiable if it were fitted. ## Register problem is.identifiable <- FALSE if(ndata == 0) { complaint <- "model is unidentifiable: data pattern is empty" } else { offending <- !c(somedat, somedum) offending <- c("all data points", "all dummy points")[offending] offending <- paste(offending, collapse=" and ") complaint <- paste("model is unidentifiable:", offending, "have zero conditional intensity") } details <- list(data=!somedat, dummy=!somedum, print=complaint) problems <- append(problems, list(unidentifiable=details)) } ## check whether the model has zero likelihood: ## check whether ANY data points have zero conditional intensity if(any(.mpl$Z & !.mpl$KEEP)) { howmany <- sum(.mpl$Z & !.mpl$KEEP) complaint <- paste(howmany, "data point(s) are illegal", "(zero conditional intensity under the model)") details <- list(illegal=howmany, print=complaint) problems <- append(problems, list(zerolikelihood=details)) if(warn.illegal && is.identifiable) warning(paste(complaint, ". Occurred while executing: ", callstring, sep=""), call. = FALSE) likelihood.is.zero <- TRUE } } ################## S u b s e t ################### if(correction == "border") .mpl$SUBSET <- .mpl$SUBSET & .mpl$DOMAIN if(!is.null(subsetexpr)) { ## user-defined subset expression USER.SUBSET <- eval(subsetexpr, glmdata, environment(trend)) if(is.owin(USER.SUBSET)) { USER.SUBSET <- inside.owin(P$x, P$y, USER.SUBSET) } else if(is.im(USER.SUBSET)) { USER.SUBSET <- as.logical(USER.SUBSET[P, drop=FALSE]) if(anyNA(USER.SUBSET)) USER.SUBSET[is.na(USER.SUBSET)] <- FALSE } if(!(is.logical(USER.SUBSET) || is.numeric(USER.SUBSET))) stop("Argument 'subset' should yield logical values", call.=FALSE) if(anyNA(USER.SUBSET)) { USER.SUBSET[is.na(USER.SUBSET)] <- FALSE warning("NA values in argument 'subset' were changed to FALSE", call.=FALSE) } .mpl$SUBSET <- .mpl$SUBSET & USER.SUBSET } glmdata <- cbind(glmdata, data.frame(.mpl.SUBSET=.mpl$SUBSET, stringsAsFactors=FALSE)) ################# F o r m u l a ################################## if(!want.trend) trend <- ~1 trendpart <- paste(as.character(trend), collapse=" ") if(!want.inter) rhs <- trendpart else { VN <- Vnames ## enclose offset potentials in 'offset(.)' if(any(IsOffset)) VN[IsOffset] <- paste("offset(", VN[IsOffset], ")", sep="") rhs <- paste(c(trendpart, VN), collapse= "+") } fmla <- paste(".mpl.Y ", rhs) fmla <- as.formula(fmla) ## character string of trend formula (without Vnames) trendfmla <- paste(".mpl.Y ", trendpart) #### result <- list(fmla=fmla, trendfmla=trendfmla, covariates=if(updatecovariates) covariates else NULL, glmdata=glmdata, Vnames=Vnames, IsOffset=IsOffset, subsetexpr=subsetexpr, problems=problems, likelihood.is.zero=likelihood.is.zero, is.identifiable=is.identifiable, computed=computed, vnamebase=vnamebase, vnameprefix=vnameprefix, forbid=forbid) return(result) } check.clashes <- function(forbidden, offered, where) { name.match <- outer(forbidden, offered, "==") if(any(name.match)) { is.matched <- apply(name.match, 2, any) matched.names <- (offered)[is.matched] if(sum(is.matched) == 1) { return(paste("The variable",sQuote(matched.names), "in", where, "is a reserved name")) } else { return(paste("The variables", paste(sQuote(matched.names), collapse=", "), "in", where, "are reserved names")) } } return("") } mpl.prepare }) #################################################################### #################################################################### mpl.usable <- function(x) { ## silently remove covariates that don't have recognised format if(length(x) == 0 || is.data.frame(x)) return(x) isim <- sapply(x, is.im) isfun <- sapply(x, is.function) iswin <- sapply(x, is.owin) istess <- sapply(x, is.tess) isnum <- sapply(x, is.numeric) & (lengths(x) == 1) recognised <- isim | isfun | iswin | istess | isnum if(!all(recognised)) x <- x[recognised] return(x) } mpl.get.covariates <- local({ mpl.get.covariates <- function(covariates, locations, type="locations", covfunargs=list(), need.deriv=FALSE) { covargname <- sQuote(short.deparse(substitute(covariates))) locargname <- sQuote(short.deparse(substitute(locations))) if(is.null(covfunargs)) covfunargs <- list() ## x <- locations$x y <- locations$y if(is.null(x) || is.null(y)) { xy <- xy.coords(locations) x <- xy$x y <- xy$y } if(is.null(x) || is.null(y)) stop(paste("Can't interpret", locargname, "as x,y coordinates")) n <- length(x) if(is.data.frame(covariates)) { if(nrow(covariates) != n) stop(paste("Number of rows in", covargname, "does not equal the number of", type)) return(covariates) } else if(is.list(covariates)) { if(length(covariates) == 0) return(as.data.frame(matrix(, n, 0))) isim <- unlist(lapply(covariates, is.im)) isfun <- unlist(lapply(covariates, is.function)) iswin <- unlist(lapply(covariates, is.owin)) istess <- unlist(lapply(covariates, is.tess)) isnum <- unlist(lapply(covariates, is.number)) if(!all(isim | isfun | isnum | iswin | istess)) stop(paste("Each entry in the list", covargname, "should be an image, a function,", "a window, a tessellation or a single number")) if(sum(nzchar(names(covariates))) < length(covariates)) stop(paste("Some entries in the list", covargname, "are un-named")) ## look up values of each covariate at the quadrature points values <- unclass(covariates) values[isim] <- lapply(covariates[isim], lookup.im, x=x, y=y, naok=TRUE, strict=FALSE) values[isfun] <- vf <- lapply(covariates[isfun], evalfxy, x=x, y=y, extra=covfunargs) values[isnum] <- lapply(covariates[isnum], rep, length(x)) values[iswin] <- lapply(covariates[iswin], insidexy, x=x, y=y) values[istess] <- lapply(covariates[istess], tileindex, x=x, y=y) result <- as.data.frame(values) if(need.deriv && any(isfun)) { ## check for gradient/hessian attributes of function values grad <- lapply(vf, attr, which="gradient") hess <- lapply(vf, attr, which="hessian") grad <- grad[!unlist(lapply(grad, is.null))] hess <- hess[!unlist(lapply(hess, is.null))] if(length(grad) > 0 || length(hess) > 0) attr(result, "derivatives") <- list(gradient=grad, hessian=hess) } return(result) } stop(paste(covargname, "must be either a data frame or a list")) } ## functions for 'apply' evalfxy <- function(f, x, y, extra) { if(length(extra) == 0) return(f(x,y)) ## extra arguments must be matched explicitly by name ok <- names(extra) %in% names(formals(f)) z <- do.call(f, append(list(x,y), extra[ok])) return(z) } insidexy <- function(w, x, y) { inside.owin(x, y, w) } is.number <- function(x) { is.numeric(x) && (length(x) == 1) } mpl.get.covariates }) bt.frame <- function(Q, trend=~1, interaction=NULL, ..., covariates=NULL, correction="border", rbord=0, use.gam=FALSE, allcovar=FALSE) { prep <- mpl.engine(Q, trend=trend, interaction=interaction, ..., covariates=covariates, correction=correction, rbord=rbord, use.gam=use.gam, allcovar=allcovar, preponly=TRUE, forcefit=TRUE) class(prep) <- c("bt.frame", class(prep)) return(prep) } print.bt.frame <- function(x, ...) { cat("Model frame for Berman-Turner device\n") df <- x$glmdata cat(paste("$glmdata: Data frame with", nrow(df), "rows and", ncol(df), "columns\n")) cat(" Column names:\t") cat(paste(paste(names(df),collapse="\t"), "\n")) cat("Complete model formula ($fmla):\t") print(x$fmla) info <- x$info if(info$want.trend) { cat("Trend:\tyes\nTrend formula string ($trendfmla):\t") cat(paste(x$trendfmla, "\n")) } else cat("Trend:\tno\n") cat("Interaction ($info$interaction):\t") inte <- info$interaction if(is.null(inte)) inte <- Poisson() print(inte, family=FALSE, brief=TRUE) if(!is.poisson.interact(inte)) { cat("Internal names of interaction variables ($Vnames):\t") cat(paste(x$Vnames, collapse="\t")) cat("\n") } edge <- info$correction cat(paste("Edge correction ($info$correction):\t", sQuote(edge), "\n")) if(edge == "border") cat(paste("\tBorder width ($info$rbord):\t", info$rbord, "\n")) if(length(x$problems) > 0) { cat("Problems:\n") print(x$problems) } if(length(x$computed) > 0) cat(paste("Frame contains saved computations for", commasep(dQuote(names(x$computed))))) return(invisible(NULL)) } partialModelMatrix <- function(X, D, model, callstring="", ...) { ## X = 'data' ## D = 'dummy' Q <- quad(X,D) P <- union.quad(Q) trend <- model$trend inter <- model$interaction covar <- model$covariates prep <- mpl.prepare(Q, X, P, trend, inter, covar, correction=model$correction, rbord=model$rbord, Pname="data points", callstring=callstring, warn.unidentifiable=FALSE, ...) fmla <- prep$fmla glmdata <- prep$glmdata mof <- model.frame(fmla, glmdata) mom <- model.matrix(fmla, mof) modelnames <- names(coef(model)) modelnames <- sub("log(lambda)", "(Intercept)", modelnames, fixed=TRUE) if(!isTRUE(all.equal(colnames(mom), modelnames))) warning(paste("Internal error: mismatch between", "column names of model matrix", "and names of coefficient vector in fitted model")) attr(mom, "mplsubset") <- glmdata$.mpl.SUBSET attr(mom, "-Inf") <- prep$forbid return(mom) } oversize.quad <- function(Q, ..., nU, nX, p=1) { ## Determine whether the quadrature scheme is ## too large to handle in one piece (in mpl) ## for a generic interaction ## nU = number of quadrature points ## nX = number of data points ## p = dimension of statistic if(missing(nU)) nU <- n.quad(Q) if(missing(nX)) nX <- npoints(Q$data) nmat <- as.double(nU) * nX nMAX <- spatstat.options("maxmatrix")/p needsplit <- (nmat > nMAX) return(needsplit) } quadBlockSizes <- function(nX, nD, p=1, nMAX=spatstat.options("maxmatrix")/p, announce=TRUE) { if(is.quad(nX) && missing(nD)) { nD <- npoints(nX$dummy) nX <- npoints(nX$data) } ## Calculate number of dummy points in largest permissible X * (X+D) matrix nperblock <- max(1, floor(nMAX/nX - nX)) ## determine number of such blocks nblocks <- ceiling(nD/nperblock) ## make blocks roughly equal (except for the last one) nperblock <- min(nperblock, ceiling(nD/nblocks)) ## announce if(announce && nblocks > 1) { msg <- paste("Large quadrature scheme", "split into blocks to avoid memory size limits;", nD, "dummy points split into", nblocks, "blocks,") nfull <- nblocks - 1 nlastblock <- nD - nperblock * nfull if(nlastblock == nperblock) { msg <- paste(msg, "each containing", nperblock, "dummy points") } else { msg <- paste(msg, "the first", ngettext(nfull, "block", paste(nfull, "blocks")), "containing", nperblock, ngettext(nperblock, "dummy point", "dummy points"), "and the last block containing", nlastblock, ngettext(nlastblock, "dummy point", "dummy points")) } message(msg) } else nlastblock <- nperblock return(list(nblocks=nblocks, nperblock=nperblock, nlastblock=nlastblock)) } ## function that should be called to evaluate interaction terms ## between quadrature points and data points evalInteraction <- function(X, P, E = equalpairs(P, X), interaction, correction, splitInf=FALSE, ..., precomputed=NULL, savecomputed=FALSE) { ## evaluate the interaction potential ## (does not assign/touch the variable names) verifyclass(interaction, "interact") ## handle Poisson case if(is.poisson(interaction)) { out <- matrix(numeric(0), nrow=npoints(P), ncol=0) attr(out, "IsOffset") <- logical(0) if(splitInf) attr(out, "-Inf") <- logical(nrow(out)) return(out) } ## determine whether to use fast evaluation in C dofast <- (spatstat.options("fasteval") %in% c("on", "test")) && !is.null(cando <- interaction$can.do.fast) && cando(X, correction, interaction$par) && !is.null(interaction$fasteval) ## determine whether to split quadscheme into blocks if(dofast) { dosplit <- FALSE } else { ## decide whether the quadrature scheme is too large to handle in one piece needsplit <- oversize.quad(nU=npoints(P), nX=npoints(X)) ## not implemented when savecomputed=TRUE dosplit <- needsplit && !savecomputed if(needsplit && savecomputed) warning(paste("Oversize quadscheme cannot be split into blocks", "because savecomputed=TRUE;", "memory allocation error may occur")) } if(!dosplit) { ## normal case V <- evalInterEngine(X=X, P=P, E=E, interaction=interaction, correction=correction, splitInf=splitInf, ..., precomputed=precomputed, savecomputed=savecomputed) } else { ## Too many quadrature points: split into blocks nX <- npoints(X) nP <- npoints(P) ## Determine which evaluation points are data points Pdata <- E[,2] ## hence which are dummy points Pall <- seq_len(nP) Pdummy <- if(length(Pdata) > 0) Pall[-Pdata] else Pall nD <- length(Pdummy) ## calculate block sizes bls <- quadBlockSizes(nX, nD, announce=TRUE) nblocks <- bls$nblocks nperblock <- bls$nperblock ## seqX <- seq_len(nX) EX <- cbind(seqX, seqX) ## for(iblock in 1:nblocks) { first <- min(nD, (iblock - 1) * nperblock + 1) last <- min(nD, iblock * nperblock) ## extract dummy points Di <- P[Pdummy[first:last]] Pi <- superimpose(X, Di, check=FALSE, W=X$window) ## evaluate potential Vi <- evalInterEngine(X=X, P=Pi, E=EX, interaction=interaction, correction=correction, splitInf=splitInf, ..., savecomputed=FALSE) Mi <- attr(Vi, "-Inf") if(iblock == 1) { V <- Vi M <- Mi } else { ## tack on the glm variables for the extra DUMMY points only V <- rbind(V, Vi[-seqX, , drop=FALSE]) if(splitInf && !is.null(M)) M <- c(M, Mi[-seqX]) } } ## The first 'nX' rows of V contain values for X. ## The remaining rows of V contain values for dummy points. if(length(Pdata) == 0) { ## simply discard rows corresponding to data V <- V[-seqX, , drop=FALSE] if(splitInf && !is.null(M)) M <- M[-seqX] } else { ## replace data in correct position ii <- integer(nP) ii[Pdata] <- seqX ii[Pdummy] <- (nX+1):nrow(V) V <- V[ii, , drop=FALSE] if(splitInf && !is.null(M)) M <- M[ii] } attr(V, "-Inf") <- M } return(V) } ## workhorse function that actually calls relevant code to evaluate interaction evalInterEngine <- function(X, P, E, interaction, correction, splitInf=FALSE, ..., Reach = NULL, precomputed=NULL, savecomputed=FALSE) { ## fast evaluator (C code) may exist fasteval <- interaction$fasteval cando <- interaction$can.do.fast par <- interaction$par feopt <- spatstat.options("fasteval") dofast <- !is.null(fasteval) && (is.null(cando) || cando(X, correction,par)) && (feopt %in% c("on", "test")) && (!splitInf || ("splitInf" %in% names(formals(fasteval)))) V <- NULL if(dofast) { if(feopt == "test") message("Calling fasteval") V <- fasteval(X, P, E, interaction$pot, interaction$par, correction, splitInf=splitInf, ...) } if(is.null(V)) { ## use generic evaluator for family evaluate <- interaction$family$eval evalargs <- names(formals(evaluate)) if(splitInf && !("splitInf" %in% evalargs)) stop("Sorry, the", interaction$family$name, "interaction family", "does not support calculation of the positive part", call.=FALSE) if(is.null(Reach)) Reach <- reach(interaction) if("precomputed" %in% evalargs) { ## Use precomputed data ## version 1.9-3 onward (pairwise and pairsat families) V <- evaluate(X, P, E, interaction$pot, interaction$par, correction=correction, splitInf=splitInf, ..., Reach=Reach, precomputed=precomputed, savecomputed=savecomputed) } else { ## Cannot use precomputed data ## Object created by earlier version of ppm ## or not pairwise/pairsat interaction V <- evaluate(X, P, E, interaction$pot, interaction$par, correction=correction, splitInf=splitInf, ..., Reach=Reach) } } return(V) } deltasuffstat <- local({ deltasuffstat <- function(model, ..., restrict=c("pairs", "first", "none"), dataonly=TRUE, sparseOK=FALSE, quadsub=NULL, force=FALSE, warn.forced=FALSE, verbose=warn.forced, use.special=TRUE) { stopifnot(is.ppm(model)) sparsegiven <- !missing(sparseOK) restrict <- match.arg(restrict) if(dataonly) { X <- data.ppm(model) nX <- npoints(X) } else { X <- quad.ppm(model) if(!is.null(quadsub)) { z <- is.data(X) z[quadsub] <- FALSE if(any(z)) stop("subset 'quadsub' must include all data points", call.=FALSE) X <- X[quadsub] } nX <- n.quad(X) } ncoef <- length(coef(model)) inte <- as.interact(model) if(!sparseOK && exceedsMaxArraySize(nX, nX, ncoef)) { if(sparsegiven) stop("Array dimensions too large", call.=FALSE) warning("Switching to sparse array code", call.=FALSE) sparseOK <- TRUE } zeroes <- if(!sparseOK) array(0, dim=c(nX, nX, ncoef)) else sparse3Darray(dims=c(nX, nX, ncoef)) if(is.poisson(inte)) return(zeroes) ## Get names of interaction terms in model (including offsets) f <- fitin(model) Inames <- f$Vnames IsOffset <- f$IsOffset hasInf <- !identical(inte$hasInf, FALSE) ## Offset terms do not contribute to sufficient statistic if(all(IsOffset) && !hasInf) return(zeroes) ## Nontrivial interaction terms must be computed. ## Look for member function $delta2 in the interaction v <- NULL v.is.full <- FALSE if(use.special) { ## Use specialised $delta2 for interaction,if available if(is.function(delta2 <- inte$delta2)) v <- delta2(X, inte, model$correction, sparseOK=sparseOK) ## Use generic $delta2 for the family, if available if(is.null(v) && is.function(delta2 <- inte$family$delta2)) v <- delta2(X, inte, model$correction, sparseOK=sparseOK) } ## no luck? if(is.null(v)) { if(!force) return(NULL) ## use brute force algorithm if(warn.forced) warning("Reverting to brute force to compute interaction terms", call.=FALSE) v <- if(dataonly) deltasufX(model, sparseOK, verbose=verbose) else deltasufQ(model, quadsub, sparseOK, verbose=verbose) v.is.full <- TRUE } ## extract hard core information deltaInf <- attr(v, "deltaInf") ## ensure 'v' is a 3D array if(length(dim(v)) != 3) { if(is.matrix(v)) { v <- array(v, dim=c(dim(v), 1)) } else if(inherits(v, "sparseMatrix")) { v <- as.sparse3Darray(v) } } if(!sparseOK) { if(inherits(v, "sparse3Darray")) v <- as.array(v) if(inherits(deltaInf, "sparseMatrix")) deltaInf <- as.matrix(deltaInf) } if(restrict != "none") { ## kill contributions from points outside the domain of pseudolikelihood ## (e.g. points in the border region) use <- if(dataonly) getppmdatasubset(model) else if(is.null(quadsub)) getglmsubset(model) else getglmsubset(model)[quadsub] if(any(kill <- !use)) { switch(restrict, pairs = { v[kill,kill,] <- 0 }, first = { v[kill,,] <- 0 }, none = {}) if(!is.null(deltaInf)) { switch(restrict, pairs = { deltaInf[kill,kill] <- FALSE }, first = { deltaInf[kill,] <- FALSE }, none = {}) } } } ## Make output array, with planes corresponding to model coefficients if(v.is.full) { ## Planes of 'v' already correspond to coefficients of model cnames <- names(coef(model)) ## Remove any offset interaction terms ## (e.g. Hardcore interaction): these do not contribute to suff stat if(any(IsOffset)) { retain <- is.na(match(cnames, Inames[IsOffset])) v <- v[ , , retain, drop=FALSE] Inames <- Inames[!IsOffset] } result <- v } else { ## Planes of 'v' correspond to interaction terms only. ## Fill out the first order terms with zeroes result <- zeroes if(length(Inames) != dim(v)[3]) stop(paste("Internal error: deltasuffstat:", "number of planes of v =", dim(v)[3], "!= number of interaction terms =", length(Inames)), call.=FALSE) ## Offset terms do not contribute to sufficient statistic if(any(IsOffset)) { v <- v[ , , !IsOffset, drop=FALSE] Inames <- Inames[!IsOffset] } ## Map planes of 'v' into coefficients Imap <- match(Inames, names(coef(model))) if(anyNA(Imap)) stop(paste("Internal error: deltasuffstat:", "cannot match interaction coefficients")) if(length(Imap) > 0) { ## insert 'v' into array result[ , , Imap] <- v } } ## pack up attr(result, "deltaInf") <- deltaInf return(result) } ## compute deltasuffstat using partialModelMatrix deltasufX <- function(model, sparseOK=FALSE, verbose=FALSE) { stopifnot(is.ppm(model)) X <- data.ppm(model) hasInf <- !identical(model$interaction$hasInf, FALSE) nX <- npoints(X) p <- length(coef(model)) m <- model.matrix(model, splitInf=hasInf) if(hasInf) { isInf <- attr(m, "-Inf") hasInf <- !is.null(isInf) } isdata <- is.data(quad.ppm(model)) m <- m[isdata, ,drop=FALSE] if(hasInf) isInf <- isInf[isdata] ok <- getppmdatasubset(model) ## canonical statistic before and after deleting X[j] ## mbefore[ , i, j] = h(X[i] | X) ## mafter[ , i, j] = h(X[i] | X[-j]) ## where h(u|x) is the canonical statistic of the *positive* cif dimwork <- c(p, nX, nX) if(!sparseOK) { mafter <- mbefore <- array(t(m), dim=dimwork) isInfafter <- isInfbefore <- if(!hasInf) NULL else matrix(isInf, dim=dimwork[-1]) } else { ## make empty arrays; fill in values later ## (but only where they might change) mafter <- mbefore <- sparse3Darray(dims=dimwork) isInfafter <- isInfbefore <- if(!hasInf) NULL else sparseMatrix(i=integer(0), j=integer(0), x=logical(0), dims=dimwork[-1]) } ## identify close pairs R <- reach(model) if(is.finite(R)) { cl <- closepairs(X, R, what="indices") I <- cl$i J <- cl$j cl2 <- closepairs(X, 2*R, what="indices") I2 <- cl2$i J2 <- cl2$j } else { ## either infinite reach, or something wrong IJ <- expand.grid(I=1:nX, J=1:nX) IJ <- subset(IJ, I != J) I2 <- I <- IJ$I J2 <- J <- IJ$J } ## DO NOT RESTRICT - THIS IS NOW DONE IN deltasuffstat ## filter: I and J must both belong to the nominated subset ## okIJ <- ok[I] & ok[J] ## I <- I[okIJ] ## J <- J[okIJ] ## if(length(I) > 0 && length(J) > 0) { ## .............. loop over pairs ........................ uniqueI <- unique(I) npairs <- length(uniqueI) pstate <- list() if(verbose) splat("Examining", npairs, "pairs of data points...") ## The following ensures that 'empty' and 'X' have compatible marks empty <- X[integer(0)] ## ## Run through pairs for(iter in seq_len(npairs)) { i <- uniqueI[iter] ## all points within 2R J2i <- unique(J2[I2==i]) ## all points within R Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { Xi <- X[i] ## neighbours of X[i] XJi <- X[Ji] ## replace X[-i] by X[-i] \cap b(0, 2R) X.i <- X[J2i] nX.i <- length(J2i) ## index of XJi in X.i J.i <- match(Ji, J2i) if(anyNA(J.i)) stop("Internal error: Ji not a subset of J2i") ## values of sufficient statistic ## h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] ## for all j pmj <- snipModelMatrix(X.i, empty, model, J.i, hasInf) if(hasInf) zzj <- attr(pmj, "-Inf") ## sufficient statistic in reverse order ## h(X[i] | X[-j]) = h(X[i] | X[-c(i,j)] ## for all j pmi <- matrix(, nJi, p) zzi <- logical(nJi) for(k in 1:nJi) { ## j <- Ji[k] ## X.ij <- X[-c(i,j)] X.ij <- X.i[-J.i[k]] pmik <- snipModelMatrix(X.ij, Xi, model, nX.i, hasInf) pmi[k, ] <- pmik if(hasInf) zzi[k] <- attr(pmik, "-Inf") } ## if(!sparseOK) { mafter[ , Ji, i] <- t(pmj) mafter[ , i, Ji] <- t(pmi) if(hasInf) { isInfafter[Ji, i] <- zzj isInfafter[i, Ji] <- zzi } } else { mafter[ , Ji, i] <- array(t(pmj), dim=c(p, nJi, 1)) mafter[ , i, Ji] <- array(t(pmi), dim=c(p, 1, nJi)) mbefore[ , Ji, i] <- array(t(m[Ji,]), dim=c(p, nJi, 1)) mbefore[ , i, Ji] <- array(m[i,], dim=c(p, 1, nJi)) if(hasInf) { isInfafter[Ji, i] <- zzj isInfafter[i, Ji] <- zzi isInfbefore[Ji, i] <- isInf[Ji] isInfbefore[i, Ji] <- isInf[i] } } } if(verbose) pstate <- progressreport(iter, npairs, state=pstate) } } ## delta[ ,i,j] = h(X[i] | X) - h(X[i] | X[-j]) delta <- mbefore - mafter ## delta[i, j, ] = h(X[i] | X) - h(X[i] | X[-j]) delta <- aperm(delta, c(2,3,1)) ## if(hasInf) { deltaInf <- isInfbefore - isInfafter attr(delta, "deltaInf") <- deltaInf } return(delta) } deltasufQ <- function(model, quadsub, sparseOK, verbose=FALSE) { stopifnot(is.ppm(model)) hasInf <- !identical(model$interaction$hasInf, FALSE) p <- length(coef(model)) Q <- quad.ppm(model) ok <- getglmsubset(model) m <- model.matrix(model, splitInf=hasInf) if(hasInf) { isInf <- attr(m, "-Inf") hasInf <- !is.null(isInf) } if(!is.null(quadsub)) { Q <- Q[quadsub] m <- m[quadsub, , drop=FALSE] ok <- ok[quadsub] if(hasInf) isInf <- isInf[quadsub] } X <- Q$data U <- union.quad(Q) nU <- npoints(U) nX <- npoints(X) isdata <- is.data(Q) isdummy <- !isdata ## canonical statistic before and after adding/deleting U[j] dimwork <- c(p, nU, nU) if(!sparseOK) { mafter <- mbefore <- array(t(m), dim=dimwork) delta <- array(0, dim=dimwork) isInfafter <- isInfbefore <- deltaInf <- if(!hasInf) NULL else matrix(isInf, dim=dimwork[-1]) } else { ## make empty arrays; fill in values later ## [but only where they might change] mafter <- mbefore <- delta <- sparse3Darray(dims=dimwork) isInfafter <- isInfbefore <- deltaInf <- if(!hasInf) NULL else sparseMatrix(i=integer(0), j=integer(0), x=logical(0), dims=dimwork[-1]) } ## mbefore[ , i, j] = h(U[i] | X) ## For data points X[j] ## mafter[ , i, j] = h(U[i] | X[-j]) ## delta[ , i, j] = h(U[i] | X) - h(U[i] | X[-j]) ## For dummy points X[j] ## mafter[ , i, j] = h(U[i] | X \cup U[j]) ## delta[ , i, j] = h(U[i] | X \cup U[j]) - h(U[i] | X) changesign <- ifelseAB(isdata, -1, 1) ## identify close pairs of quadrature points R <- reach(model) if(is.finite(R)) { cl <- closepairs(U, R, what="indices") I <- cl$i J <- cl$j cl2 <- closepairs(U, 2*R, what="indices") I2 <- cl2$i J2 <- cl2$j } else { ## either infinite reach, or something wrong IJ <- expand.grid(I=1:nU, J=1:nX) IJ <- IJ[ with(IJ, I != J), ] I2 <- I <- IJ$I J2 <- J <- IJ$J } ## filter: I and J must both belong to the nominated subset okIJ <- ok[I] & ok[J] I <- I[okIJ] J <- J[okIJ] ## if(length(I) > 0 && length(J) > 0) { ## .............. loop over pairs of quadrature points ............... ## Run through pairs uI <- unique(I) zI <- isdata[uI] uIdata <- uI[zI] uIdummy <- uI[!zI] nuIdata <- length(uIdata) nuIdummy <- length(uIdummy) if(verbose) splat("Examining", nuIdata, "+", nuIdummy, "=", nuIdata + nuIdummy, "pairs of points") ## Run through pairs i, j where 'i' is a data point pstate <- list() for(iter in seq_len(nuIdata)) { i <- uIdata[iter] ## all DATA points within 2R of X[i] ## This represents X[-i] J2i <- unique(J2[I2==i]) J2i <- J2i[isdata[J2i]] ## all QUADRATURE points within R of X[i] Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { isd <- isdata[Ji] ## data points which are neighbours of X[i] XJi <- X[Ji[isd]] ## dummy points which are neighbours of X[i] DJi <- U[Ji[!isd]] ## replace X[-i] by X[-i] \cap b(0, 2R) X.i <- X[J2i] nX.i <- length(J2i) ## index of XJi in X.i J.i <- match(Ji[isd], J2i) if(anyNA(J.i)) stop("Internal error: Ji[isd] not a subset of J2i") ## index of DJi in superimpose(X.i, DJi) JDi <- nX.i + seq_len(sum(!isd)) ## values of sufficient statistic ## h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] ## for all j pmj <- snipModelMatrix(X.i, DJi, model, c(J.i, JDi), hasInf) ## mafter[ , Ji, i] <- t(pmj) if(hasInf) isInfafter[Ji, i] <- attr(pmj, "-Inf") if(sparseOK) { mbefore[ , Ji, i] <- array(t(m[Ji,]), dim=c(p, nJi, 1)) if(hasInf) isInfbefore[Ji, i] <- isInf[Ji] } } if(verbose) pstate <- progressreport(iter, nuIdata, state=pstate) } ## Run through pairs i, j where 'i' is a dummy point pstate <- list() for(iter in seq_len(nuIdummy)) { i <- uIdummy[iter] ## all DATA points within 2R of U[i] J2i <- unique(J2[I2==i]) J2i <- J2i[isdata[J2i]] ## all QUADRATURE points within R of U[i] Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { isd <- isdata[Ji] JiData <- Ji[isd] JiDummy <- Ji[!isd] ## data points which are neighbours of U[i] XJi <- X[JiData] ## dummy points which are neighbours of U[i] DJi <- U[JiDummy] ## replace X \cup U[i] by (X \cap b(0, 2R)) \cup U[i] J2Ui <- c(J2i, i) XUi <- U[J2Ui] nXUi <- length(J2Ui) ## index of XJi in X.i J.i <- match(JiData, J2Ui) if(anyNA(J.i)) stop("Internal error: Ji[isd] not a subset of J2i") ## index of DJi in superimpose(X.i, DJi) JDi <- nXUi + seq_len(length(JiDummy)) ## values of sufficient statistic ## h(X[j] | X \cup U[i]) ## for all j pmj <- snipModelMatrix(XUi, DJi, model, c(J.i, JDi), hasInf) ## JiSort <- c(JiData, JiDummy) if(!sparseOK) { mafter[ , JiSort, i] <- t(pmj) if(hasInf) isInfafter[JiSort, i] <- attr(pmj, "-Inf") } else { mafter[ , JiSort, i] <- array(t(pmj), dim=c(p, nJi, 1)) mbefore[ , JiSort, i] <- array(t(m[JiSort,]), dim=c(p, nJi, 1)) if(hasInf) { isInfafter[JiSort, i] <- attr(pmj, "-Inf") isInfbefore[JiSort, i] <- isInf[JiSort] } } } if(verbose) pstate <- progressreport(iter, nuIdummy, state=pstate) } } ## delta[ ,i,j] = h(X[i] | X) - h(X[i] | X[-j]) delta[ , , isdata] <- mbefore[, , isdata] - mafter[ , , isdata] ## delta[ ,i,j] = h(X[i] | X \cup U[j]) - h(X[i] | X) delta[ , , isdummy] <- mafter[, , isdummy] - mbefore[ , , isdummy] ## rearrange: new delta[i,j,] = old delta[, i, j] delta <- aperm(delta, c(2,3,1)) ## if(hasInf) { deltaInf[ , isdata] <- isInfbefore[ , isdata] - isInfafter[ , isdata] deltaInf[ , isdummy] <- isInfafter[ , isdummy] - isInfbefore[ , isdummy] attr(delta, "deltaInf") <- deltaInf } return(delta) } snipModelMatrix <- function(X, D, model, retain, splitInf=FALSE) { M <- partialModelMatrix(X, D, model, splitInf=splitInf) if(splitInf) isInf <- attr(M, "-Inf") M <- M[retain, , drop=FALSE] if(splitInf) attr(M, "-Inf") <- isInf[retain] return(M) } deltasuffstat }) spatstat/R/hyperframe.R0000644000176200001440000004734313611503715014620 0ustar liggesusers# # hyperframe.R # # $Revision: 1.72 $ $Date: 2020/01/21 04:15:26 $ # hyperframe <- local({ hyperframe <- function(..., row.names=NULL, check.rows=FALSE, check.names=TRUE, stringsAsFactors=default.stringsAsFactors()) { aarg <- list(...) nama <- names(aarg) ## number of columns (= variables) nvars <- length(aarg) if(nvars == 0) { ## zero columns - return result <- list(nvars=0, ncases=0, vname=character(0), vtype=factor(, levels=c("dfcolumn","hypercolumn","hyperatom")), vclass=character(0), df=data.frame(), hyperatoms=list(), hypercolumns=list()) class(result) <- c("hyperframe", class(result)) return(result) } ## check column names if(is.null(nama)) nama <- paste("V", 1:nvars, sep="") else if(any(unnamed <- (nama == ""))) nama[unnamed] <- paste("V", seq_len(sum(unnamed)), sep="") nama <- make.names(nama, unique=TRUE) names(aarg) <- nama ## Each argument must be either ## - a vector suitable as a column in a data frame ## - a list of objects of the same class ## - a single object of some class dfcolumns <- sapply(aarg, is.dfcolumn) hypercolumns <- sapply(aarg, is.hypercolumn) hyperatoms <- !(dfcolumns | hypercolumns) ## Determine number of rows (= cases) columns <- dfcolumns | hypercolumns if(!any(columns)) { ncases <- 1 } else { heights <- rep.int(1, nvars) heights[columns] <- lengths(aarg[columns]) u <- unique(heights) if(length(u) > 1) { u <- u[u != 1] if(length(u) > 1) stop(paste("Column lengths are inconsistent:", paste(u, collapse=","))) } ncases <- u if(ncases > 1 && all(heights[dfcolumns] == 1)) { ## force the data frame to have 'ncases' rows aarg[dfcolumns] <- lapply(aarg[dfcolumns], rep, ncases) heights[dfcolumns] <- ncases } if(any(stubs <- hypercolumns & (heights != ncases))) { ## hypercolumns of height 1 should be hyperatoms aarg[stubs] <- lapply(aarg[stubs], "[[", i=1L) hypercolumns[stubs] <- FALSE hyperatoms[stubs] <- TRUE } } ## Collect the data frame columns into a data frame if(!any(dfcolumns)) df <- as.data.frame(matrix(, ncases, 0), row.names=row.names) else { df <- do.call(data.frame, append(aarg[dfcolumns], list(row.names=row.names, check.rows=check.rows, check.names=check.names, stringsAsFactors=stringsAsFactors))) names(df) <- nama[dfcolumns] } ## Storage type of each variable vtype <- character(nvars) vtype[dfcolumns] <- "dfcolumn" vtype[hypercolumns] <- "hypercolumn" vtype[hyperatoms] <- "hyperatom" vtype=factor(vtype, levels=c("dfcolumn","hypercolumn","hyperatom")) ## Class of each variable vclass <- character(nvars) if(any(dfcolumns)) vclass[dfcolumns] <- unlist(lapply(as.list(df), class1)) if(any(hyperatoms)) vclass[hyperatoms] <- unlist(lapply(aarg[hyperatoms], class1)) if(any(hypercolumns)) vclass[hypercolumns] <- unlist(lapply(aarg[hypercolumns], class1of1)) ## Put the result together result <- list(nvars=nvars, ncases=ncases, vname=nama, vtype=vtype, vclass=vclass, df=df, hyperatoms=aarg[hyperatoms], hypercolumns=aarg[hypercolumns]) class(result) <- c("hyperframe", class(result)) return(result) } dateclasses <- is.dfcolumn <- function(x) { is.atomic(x) && (is.vector(x) || is.factor(x) || inherits(x, c("POSIXlt", "POSIXct", "Date"))) } is.hypercolumn <- function(x) { if(!is.list(x)) return(FALSE) if(inherits(x, c("listof", "anylist"))) return(TRUE) if(length(x) <= 1) return(TRUE) cla <- lapply(x, class) return(length(unique(cla)) == 1) } class1 <- function(x) { class(x)[1L] } class1of1 <- function(x) { class(x[[1L]])[1L] } hyperframe }) is.hyperframe <- function(x) inherits(x, "hyperframe") print.hyperframe <- function(x, ...) { ux <- unclass(x) nvars <- ux$nvars ncases <- ux$ncases if(nvars * ncases == 0) { splat("NULL hyperframe with", ncases, ngettext(ncases, "row (=case)", "rows (=cases)"), "and", nvars, ngettext(nvars, "column (=variable)", "columns (=variables)")) } else { if(waxlyrical('gory')) cat("Hyperframe:\n") print(as.data.frame(x, discard=FALSE), ...) } return(invisible(NULL)) } dim.hyperframe <- function(x) { with(unclass(x), c(ncases, nvars)) } summary.hyperframe <- function(object, ..., brief=FALSE) { x <- unclass(object) y <- list( nvars = x$nvars, ncases = x$ncases, dim = c(x$ncases, x$nvars), typeframe = data.frame(VariableName=x$vname, Class=x$vclass), storage = x$vtype, col.names = x$vname) classes <- x$vclass names(classes) <- x$vname y$classes <- classes # Ordinary data frame columns df <- x$df y$dfnames <- colnames(df) y$df <- if(length(df) > 0 && !brief) summary(df) else NULL y$row.names <- row.names(df) # insert into full array if(!brief && x$nvars > 0) { isobject <- (x$vtype != "dfcolumn") nobj <- sum(isobject) if(nobj == 0) { allcols <- y$df } else { nas <- rep(list(NA_character_), nobj) names(nas) <- x$vname[isobject] allcols <- do.call(cbind, append(list(y$df), nas)) acnames <- c(colnames(df), names(nas)) allcols <- allcols[ , match(x$vname, acnames), drop=FALSE] } pclass <- padtowidth(paren(classes), colnames(allcols), justify="right") allcols <- as.table(rbind(class=pclass, as.table(allcols))) row.names(allcols) <- rep("", nrow(allcols)) y$allcols <- allcols } class(y) <- c("summary.hyperframe", class(y)) return(y) } print.summary.hyperframe <- function(x, ...) { nvars <- x$nvars ncases <- x$ncases splat(if(nvars * ncases == 0) "NULL hyperframe" else "hyperframe", "with", ncases, ngettext(ncases, "row", "rows"), "and", nvars, ngettext(nvars, "column", "columns")) if(nvars == 0) return(invisible(NULL)) print(if(any(x$storage == "dfcolumn")) x$allcols else noquote(x$classes)) return(invisible(NULL)) } names.hyperframe <- function(x) { unclass(x)$vname } "names<-.hyperframe" <- function(x, value) { x <- unclass(x) stopifnot(is.character(value)) value <- make.names(value) if(length(value) != x$nvars) stop("Incorrect length for vector of names") vtype <- x$vtype names(x$df) <- value[vtype == "dfcolumn"] names(x$hyperatoms) <- value[vtype == "hyperatom"] names(x$hypercolumns) <- value[vtype == "hypercolumn"] x$vname <- value class(x) <- c("hyperframe", class(x)) return(x) } row.names.hyperframe <- function(x) { return(row.names(unclass(x)$df)) } "row.names<-.hyperframe" <- function(x, value) { y <- unclass(x) row.names(y$df) <- value class(y) <- c("hyperframe", class(y)) return(y) } dimnames.hyperframe <- function(x) { ux <- unclass(x) return(list(row.names(ux$df), ux$vname)) } "dimnames<-.hyperframe" <- function(x, value) { if(!is.list(value) || length(value) != 2 || !all(sapply(value, is.character))) stop("Invalid 'dimnames' for a hyperframe", call.=FALSE) rn <- value[[1L]] cn <- value[[2L]] d <- dim(x) if(length(rn) != d[1L]) stop(paste("Row names have wrong length:", length(rn), "should be", d[1L]), call.=FALSE) if(length(cn) != d[2L]) stop(paste("Column names have wrong length:", length(cn), "should be", d[2L]), call.=FALSE) y <- unclass(x) row.names(y$df) <- value[[1L]] y$vname <- value[[2]] class(y) <- c("hyperframe", class(y)) return(y) } ## conversion to hyperframe as.hyperframe <- function(x, ...) { UseMethod("as.hyperframe") } as.hyperframe.hyperframe <- function(x, ...) { return(x) } as.hyperframe.data.frame <- function(x, ..., stringsAsFactors=FALSE) { xlist <- if(missing(x)) NULL else as.list(x) do.call(hyperframe, resolve.defaults( xlist, list(...), list(row.names=rownames(x), stringsAsFactors=stringsAsFactors), .StripNull=TRUE)) } as.hyperframe.anylist <- as.hyperframe.listof <- function(x, ...) { if(!missing(x)) { xname <- sensiblevarname(short.deparse(substitute(x)), "x") xlist <- list(x) names(xlist) <- xname } else xlist <- NULL do.call(hyperframe, resolve.defaults( xlist, list(...), list(row.names=rownames(x)), .StripNull=TRUE)) } as.hyperframe.default <- function(x, ...) { as.hyperframe(as.data.frame(x, ...)) } #### conversion to other types as.data.frame.hyperframe <- function(x, row.names = NULL, optional = FALSE, ..., discard=TRUE, warn=TRUE) { ux <- unclass(x) if(is.null(row.names)) row.names <- row.names(ux$df) vtype <- ux$vtype vclass <- ux$vclass dfcol <- (vtype == "dfcolumn") if(discard) { nhyper <- sum(!dfcol) if(nhyper > 0 && warn) warning(paste(nhyper, ngettext(nhyper, "variable", "variables"), "discarded in conversion to data frame")) df <- as.data.frame(ux$df, row.names=row.names, optional=optional, ...) } else { lx <- as.list(x) nrows <- ux$ncases vclassstring <- paren(vclass) if(any(!dfcol)) lx[!dfcol] <- lapply(as.list(vclassstring[!dfcol]), rep.int, times=nrows) df <- do.call(data.frame, append(lx, list(row.names=row.names))) colnames(df) <- ux$vname } return(df) } as.list.hyperframe <- function(x, ...) { ux <- unclass(x) out <- vector(mode="list", length=ux$nvars) vtype <- ux$vtype df <- ux$df if(any(dfcol <- (vtype == "dfcolumn"))) out[dfcol] <- as.list(df) if(any(hypcol <- (vtype == "hypercolumn"))) { hc <- lapply(ux$hypercolumns, as.solist, demote=TRUE) out[hypcol] <- hc } if(any(hatom <- (vtype == "hyperatom"))) { ha <- ux$hyperatoms names(ha) <- NULL hacol <- lapply(ha, list) hacol <- lapply(hacol, rep.int, times=ux$ncases) hacol <- lapply(hacol, as.solist, demote=TRUE) out[hatom] <- hacol } out <- lapply(out, "names<-", value=row.names(df)) names(out) <- names(x) return(out) } # evaluation # eval.hyper <- function(e, h, simplify=TRUE, ee=NULL) { # .Deprecated("with.hyperframe", package="spatstat") # if(is.null(ee)) # ee <- as.expression(substitute(e)) # with.hyperframe(h, simplify=simplify, ee=ee) # } with.hyperframe <- function(data, expr, ..., simplify=TRUE, ee=NULL, enclos=NULL) { if(!inherits(data, "hyperframe")) stop("data must be a hyperframe") if(is.null(ee)) ee <- as.expression(substitute(expr)) if(is.null(enclos)) enclos <- parent.frame() n <- nrow(data) out <- vector(mode="list", length=n) datalist <- as.list(data) for(i in 1:n) { rowi <- lapply(datalist, "[[", i=i) # ensures the result is always a list outi <- eval(ee, rowi, enclos) if(!is.null(outi)) out[[i]] <- outi } names(out) <- row.names(data) if(simplify && all(unlist(lapply(out, is.vector)))) { # if all results are atomic vectors of equal length, # return a matrix or vector. lenfs <- lengths(out) if(all(unlist(lapply(out, is.atomic))) && length(unique(lenfs)) == 1) { out <- t(as.matrix(as.data.frame(out))) row.names(out) <- row.names(data) out <- out[,,drop=TRUE] return(out) } } out <- hyperframe(result=out, row.names=row.names(data))$result return(out) } cbind.hyperframe <- function(...) { aarg <- list(...) narg <- length(aarg) if(narg == 0) return(hyperframe()) namarg <- names(aarg) if(is.null(namarg)) namarg <- rep.int("", narg) ishyper <- unlist(lapply(aarg, inherits, what="hyperframe")) isdf <- unlist(lapply(aarg, inherits, what="data.frame")) columns <- list() for(i in 1:narg) { if(ishyper[i] || isdf[i]){ if(ncol(aarg[[i]]) > 0) { newcolumns <- as.list(aarg[[i]]) if(namarg[i] != "") names(newcolumns) <- paste(namarg[i], ".", names(newcolumns), sep="") columns <- append(columns, newcolumns) } } else { nextcolumn <- list(aarg[[i]]) names(nextcolumn) <- namarg[i] columns <- append(columns, nextcolumn) } } result <- do.call(hyperframe, columns) ## tack on row names rona <- lapply(aarg, row.names) good <- (lengths(rona) == nrow(result)) if(any(good)) { rona <- rona[[min(which(good))]] row.names(result) <- make.names(rona, unique=TRUE) } return(result) } rbind.hyperframe <- function(...) { argh <- list(...) if(length(argh) == 0) return(NULL) # convert them all to hyperframes argh <- lapply(argh, as.hyperframe) # nargh <- length(argh) if(nargh == 1) return(argh[[1L]]) # check for compatibility of dimensions & names dfs <- lapply(argh, as.data.frame, discard=FALSE) dfall <- do.call(rbind, dfs) # check that data frame columns also match dfs0 <- lapply(argh, as.data.frame, discard=TRUE, warn=FALSE) df0all <- do.call(rbind, dfs0) # assemble data rslt <- list() nam <- names(dfall) nam0 <- names(df0all) for(k in seq_along(nam)) { nama <- nam[k] if(nama %in% nam0) { # data frame column: already made rslt[[k]] <- dfall[,k] } else { # hypercolumns or hyperatoms: extract them hdata <- lapply(argh, "[", j=nama, drop=FALSE) hdata <- lapply(lapply(hdata, as.list), getElement, name=nama) # append them hh <- hdata[[1L]] for(j in 2:nargh) { hh <- append(hh, hdata[[j]]) } rslt[[k]] <- hh } } ## collect the row names rona <- sapply(dfs, row.names) rona <- make.names(rona, unique=TRUE) ## make hyperframe names(rslt) <- nam out <- do.call(hyperframe, append(rslt, list(stringsAsFactors=FALSE, row.names=rona))) return(out) } plot.hyperframe <- function(x, e, ..., main, arrange=TRUE, nrows=NULL, ncols=NULL, parargs=list(mar=mar * marsize), marsize=1, mar=c(1,1,3,1)) { xname <- short.deparse(substitute(x)) main <- if(!missing(main)) main else xname mar <- rep(mar, 4)[1:4] if(missing(e)) { # default: plot first column that contains objects ok <- (summary(x)$storage %in% c("hypercolumn", "hyperatom")) if(any(ok)) { j <- min(which(ok)) x <- x[,j, drop=TRUE, strip=FALSE] x <- as.solist(x, demote=TRUE) plot(x, ..., main=main, arrange=arrange, nrows=nrows, ncols=ncols) return(invisible(NULL)) } else { # hyperframe does not contain any objects # invoke plot.data.frame x <- as.data.frame(x) plot(x, ..., main=main) return(invisible(NULL)) } } if(!is.language(e)) stop(paste("Argument e should be a call or an expression;", "use quote(...) or expression(...)")) ee <- as.expression(e) if(!arrange) { # No arrangement specified: just evaluate the plot expression 'nr' times with(x, ee=ee) return(invisible(NULL)) } # Arrangement # Decide whether to plot a main header banner <- (sum(nchar(as.character(main))) > 0) if(length(main) > 1) main <- paste(main, collapse="\n") nlines <- if(!is.character(main)) 1 else length(unlist(strsplit(main, "\n"))) # determine arrangement of plots # arrange like mfrow(nrows, ncols) plus a banner at the top n <- summary(x)$ncases if(is.null(nrows) && is.null(ncols)) { nrows <- as.integer(floor(sqrt(n))) ncols <- as.integer(ceiling(n/nrows)) } else if(!is.null(nrows) && is.null(ncols)) ncols <- as.integer(ceiling(n/nrows)) else if(is.null(nrows) && !is.null(ncols)) nrows <- as.integer(ceiling(n/ncols)) else stopifnot(nrows * ncols >= length(x)) nblank <- ncols * nrows - n # declare layout mat <- matrix(c(seq_len(n), numeric(nblank)), byrow=TRUE, ncol=ncols, nrow=nrows) heights <- rep.int(1, nrows) if(banner) { # Increment existing panel numbers # New panel 1 is the banner panels <- (mat > 0) mat[panels] <- mat[panels] + 1L mat <- rbind(rep.int(1,ncols), mat) heights <- c(0.1 * (1 + nlines), heights) } # initialise plot layout(mat, heights=heights) # plot banner if(banner) { opa <- par(mar=rep.int(0,4), xpd=TRUE) plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) cex <- resolve.defaults(list(...), list(cex.title=2))$cex.title text(0,0,main, cex=cex) } # plot panels npa <- do.call(par, parargs) if(!banner) opa <- npa with(x, ee=ee) # revert layout(1) par(opa) return(invisible(NULL)) } str.hyperframe <- function(object, ...) { d <- dim(object) x <- unclass(object) argh <- resolve.defaults(list(...), list(nest.lev=0, indent.str=" ..")) cat(paste("'hyperframe':\t", d[1L], ngettext(d[1L], "row", "rows"), "and", d[2L], ngettext(d[2L], "column", "columns"), "\n")) nr <- d[1L] nc <- d[2L] if(nc > 0) { vname <- x$vname vclass <- x$vclass vtype <- as.character(x$vtype) indentstring <- with(argh, paste(rep.int(indent.str, nest.lev), collapse="")) for(j in 1:nc) { tag <- paste("$", vname[j]) switch(vtype[j], dfcolumn={ desc <- vclass[j] if(nr > 0) { vals <- object[1:min(nr,3),j,drop=TRUE] vals <- paste(paste(format(vals), collapse=" "), "...") } else vals <- "" }, hypercolumn=, hyperatom={ desc <- "objects of class" vals <- vclass[j] }) cat(paste(paste(indentstring, tag, sep=""), ":", desc, vals, "\n")) } } return(invisible(NULL)) } subset.hyperframe <- function(x, subset, select, ...) { stopifnot(is.hyperframe(x)) r <- if(missing(subset)) { rep_len(TRUE, nrow(x)) } else { r <- eval(substitute( with(x, e, enclos=parent.frame()), list(e=substitute(subset)))) if (!is.logical(r)) stop("'subset' must be logical") r & !is.na(r) } vars <- if(missing(select)) { TRUE } else { nl <- as.list(seq_len(ncol(x))) names(nl) <- names(x) eval(substitute(select), nl, parent.frame()) } nama <- names(x) names(nama) <- nama vars <- nama[vars] z <- x[i=r, j=vars, ...] return(z) } head.hyperframe <- function (x, n = 6L, ...) { stopifnot(length(n) == 1L) n <- if(n < 0L) max(nrow(x) + n, 0L) else min(n, nrow(x)) x[seq_len(n), , drop = FALSE] } tail.hyperframe <- function(x, n = 6L, ...) { stopifnot(length(n) == 1L) nrx <- nrow(x) n <- if(n < 0L) max(nrx + n, 0L) else min(n, nrx) sel <- seq.int(to = nrx, length.out = n) x[sel, , drop = FALSE] } edit.hyperframe <- function(name, ...) { x <- name isdf <- unclass(x)$vtype == "dfcolumn" if(!any(isdf)) { warning("No columns of editable data", call.=FALSE) return(x) } y <- x[,isdf] ynew <- edit(as.data.frame(y), ...) xnew <- x for(na in names(ynew)) xnew[,na] <- ynew[,na] losenames <- setdiff(names(y), names(ynew)) for(na in losenames) xnew[,na] <- NULL return(xnew) } spatstat/R/rknn.R0000644000176200001440000000203213333543255013414 0ustar liggesusers# # rknn.R # # Distribution of distance to k-th nearest point in d dimensions # (Poisson process of intensity lambda) # # $Revision: 1.2 $ $Date: 2009/12/31 01:33:44 $ # dknn <- function(x, k=1, d=2, lambda=1) { validposint(k, "dknn") validposint(d, "dknn") alpha.d <- (2 * pi^(d/2))/(d * gamma(d/2.)) y <- dgamma(x^d, shape=k, rate=lambda * alpha.d) y <- y * d * x^(d-1) return(y) } pknn <- function(q, k=1, d=2, lambda=1) { validposint(k, "pknn") validposint(d, "pknn") alpha.d <- (2 * pi^(d/2))/(d * gamma(d/2.)) p <- pgamma(q^d, shape=k, rate=lambda * alpha.d) return(p) } qknn <- function(p, k=1, d=2, lambda=1) { validposint(k, "qknn") validposint(d, "qknn") alpha.d <- (2 * pi^(d/2))/(d * gamma(d/2.)) y <- qgamma(p, shape=k, rate=lambda * alpha.d) z <- y^(1/d) return(z) } rknn <- function(n, k=1, d=2, lambda=1) { validposint(k, "rknn") validposint(d, "rknn") alpha.d <- (2 * pi^(d/2))/(d * gamma(d/2.)) y <- rgamma(n, shape=k, rate=lambda * alpha.d) x <- y^(1/d) return(x) } spatstat/R/relrisk.R0000644000176200001440000004406713574306172014137 0ustar liggesusers# # relrisk.R # # Estimation of relative risk # # $Revision: 1.44 $ $Date: 2019/12/12 00:37:13 $ # relrisk <- function(X, ...) UseMethod("relrisk") relrisk.ppp <- local({ relrisk.ppp <- function(X, sigma=NULL, ..., varcov=NULL, at=c("pixels", "points"), relative=FALSE, adjust=1, edge=TRUE, diggle=FALSE, se=FALSE, casecontrol=TRUE, control=1, case) { stopifnot(is.ppp(X)) stopifnot(is.multitype(X)) control.given <- !missing(control) case.given <- !missing(case) at <- match.arg(at) npts <- npoints(X) Y <- split(X) uX <- unmark(X) types <- names(Y) ntypes <- length(Y) if(ntypes == 1) stop("Data contains only one type of points") casecontrol <- casecontrol && (ntypes == 2) if((control.given || case.given) && !(casecontrol || relative)) { aa <- c("control", "case")[c(control.given, case.given)] nn <- length(aa) warning(paste(ngettext(nn, "Argument", "Arguments"), paste(sQuote(aa), collapse=" and "), ngettext(nn, "was", "were"), "ignored, because relative=FALSE and", if(ntypes==2) "casecontrol=FALSE" else "there are more than 2 types of points")) } marx <- marks(X) imarks <- as.integer(marx) lev <- levels(marx) ## compute bandwidth (default bandwidth selector is bw.relrisk) ker <- resolve.2D.kernel(..., sigma=sigma, varcov=varcov, adjust=adjust, bwfun=bw.relrisk, x=X) sigma <- ker$sigma varcov <- ker$varcov ## determine smoothing parameters if(bandwidth.is.infinite(sigma)) edge <- FALSE SmoothPars <- resolve.defaults(list(sigma=sigma, varcov=varcov, at=at, edge=edge, diggle=diggle), list(...)) ## if(se) { ## determine other bandwidth for variance estimation VarPars <- SmoothPars if(bandwidth.is.infinite(sigma)) { varconst <- 1 } else if(is.null(varcov)) { varconst <- 1/(4 * pi * prod(sigma)) VarPars$sigma <- sigma/sqrt(2) } else { varconst <- 1/(4 * pi * sqrt(det(varcov))) VarPars$varcov <- varcov/2 } if(edge) { ## evaluate edge correction weights edgeim <- do.call(second.moment.calc, append(list(x=uX, what="edge"), SmoothPars)) if(diggle || at == "points") { edgeX <- safelookup(edgeim, uX, warn=FALSE) diggleX <- 1/edgeX diggleX[!is.finite(diggleX)] <- 0 } edgeim <- edgeim[Window(X), drop=FALSE] } } ## ......................................... ## compute intensity estimates for each type ## ......................................... switch(at, pixels = { ## intensity estimates of each type Deach <- do.call(density.splitppp, append(list(x=Y), SmoothPars)) ## compute intensity estimate for unmarked pattern Dall <- im.apply(Deach, sum, check=FALSE) ## WAS: Dall <- Reduce("+", Deach) ## variance terms if(se) { if(!edge) { ## no edge correction Veach <- do.call(density.splitppp, append(list(x=Y), VarPars)) } else if(!diggle) { ## edge correction e(u) Veach <- do.call(density.splitppp, append(list(x=Y), VarPars)) Veach <- lapply(Veach, "/", e2=edgeim) } else { ## Diggle edge correction e(x_i) Veach <- mapply(density.ppp, x=Y, weights=split(diggleX, marx), MoreArgs=VarPars, SIMPLIFY=FALSE) } Veach <- lapply(Veach, "*", varconst) Vall <- im.apply(Veach, sum, check=FALSE) ## WAS: Vall <- Reduce("+", Veach) } }, points = { ## intensity estimates of each type **at each data point** ## dummy variable matrix dumm <- matrix(0, npts, ntypes) dumm[cbind(seq_len(npts), imarks)] <- 1 colnames(dumm) <- lev Deach <- do.call(density.ppp, append(list(x=uX, weights=dumm), SmoothPars)) ## compute intensity estimate for unmarked pattern Dall <- rowSums(Deach) ## variance terms if(se) { if(!edge) { ## no edge correction Veach <- do.call(density.ppp, append(list(x=uX, weights=dumm), VarPars)) } else if(!diggle) { ## edge correction e(u) Veach <- do.call(density.ppp, append(list(x=uX, weights=dumm), VarPars)) Veach <- Veach * diggleX } else { ## Diggle edge correction e(x_i) Veach <- do.call(density.ppp, append(list(x=uX, weights=dumm * diggleX), VarPars)) } Veach <- Veach * varconst Vall <- rowSums(Veach) } }) ## ......................................... ## compute probabilities/risks ## ......................................... if(ntypes == 2 && casecontrol) { if(control.given || !case.given) { stopifnot(length(control) == 1) if(is.numeric(control)) { icontrol <- control <- as.integer(control) stopifnot(control %in% 1:2) } else if(is.character(control)) { icontrol <- match(control, levels(marks(X))) if(is.na(icontrol)) stop(paste("No points have mark =", control)) } else stop(paste("Unrecognised format for argument", sQuote("control"))) if(!case.given) icase <- 3 - icontrol } if(case.given) { stopifnot(length(case) == 1) if(is.numeric(case)) { icase <- case <- as.integer(case) stopifnot(case %in% 1:2) } else if(is.character(case)) { icase <- match(case, levels(marks(X))) if(is.na(icase)) stop(paste("No points have mark =", case)) } else stop(paste("Unrecognised format for argument", sQuote("case"))) if(!control.given) icontrol <- 3 - icase } ## compute ...... switch(at, pixels = { ## compute probability of case pcase <- Deach[[icase]]/Dall ## correct small numerical errors pcase <- clamp01(pcase) ## trap NaN values nbg <- badvalues(pcase) if(any(nbg)) { ## apply l'Hopital's rule: ## p(case) = 1{nearest neighbour is case} distcase <- distmap(Y[[icase]], xy=pcase) distcontrol <- distmap(Y[[icontrol]], xy=pcase) closecase <- eval.im(as.integer(distcase < distcontrol)) pcase[nbg] <- closecase[nbg] } if(!relative) { if(!se) { result <- pcase } else { Vcase <- Veach[[icase]] NUM <- eval.im(Vcase * (1-2*pcase) + Vall * pcase^2) SE <- eval.im(sqrt(pmax(NUM, 0))/Dall) result <- solist(estimate=pcase, SE=SE) } } else { rcase <- eval.im(ifelse(pcase < 1, pcase/(1-pcase), NA)) if(!se) { result <- rcase } else { Vcase <- Veach[[icase]] Vctrl <- Veach[[icontrol]] Dctrl <- Deach[[icontrol]] NUM <- eval.im(Vcase + Vctrl * rcase^2) SE <- eval.im(sqrt(pmax(NUM, 0))/Dctrl) result <- solist(estimate=rcase, SE=SE) } } }, points={ ## compute probability of case pcase <- Deach[,icase]/Dall ## correct small numerical errors pcase <- clamp01(pcase) ## trap NaN values if(any(nbg <- badvalues(pcase))) { ## apply l'Hopital's rule nntype <- imarks[nnwhich(X)] pcase[nbg] <- as.integer(nntype[nbg] == icase) } if(!relative) { if(!se) { result <- pcase } else { NUM <- Veach[,icase] * (1-2*pcase) + Vall * pcase^2 SE <- sqrt(pmax(NUM, 0))/Dall result <- list(estimate=pcase, SE=SE) } } else { rcase <- ifelse(pcase < 1, pcase/(1-pcase), NA) if(!se) { result <- rcase } else { NUM <- Veach[,icase] + Veach[,icontrol] * rcase^2 SE <- sqrt(pmax(NUM, 0))/Deach[,icontrol] result <- list(estimate=rcase, SE=SE) } } }) } else { ## several types if(relative) { ## need 'control' type stopifnot(length(control) == 1) if(is.numeric(control)) { icontrol <- control <- as.integer(control) stopifnot(control %in% 1:ntypes) } else if(is.character(control)) { icontrol <- match(control, levels(marks(X))) if(is.na(icontrol)) stop(paste("No points have mark =", control)) } else stop(paste("Unrecognised format for argument", sQuote("control"))) } switch(at, pixels={ probs <- as.solist(lapply(Deach, "/", e2=Dall)) ## correct small numerical errors probs <- as.solist(lapply(probs, clamp01)) ## trap NaN values nbg <- lapply(probs, badvalues) nbg <- Reduce("|", nbg) if(any(nbg)) { ## apply l'Hopital's rule distX <- distmap(X, xy=Dall) whichnn <- attr(distX, "index") typenn <- eval.im(imarks[whichnn]) typennsub <- as.matrix(typenn)[nbg] for(k in seq_along(result)) probs[[k]][nbg] <- (typennsub == k) } if(!relative) { if(!se) { result <- probs } else { SE <- list() for(i in 1:ntypes) { NUM <- (Veach[[i]] * (1 - 2 * probs[[i]]) + Vall * probs[[i]]^2) SE[[i]] <- eval.im(sqrt(pmax(NUM, 0))/Dall) } SE <- as.solist(SE) names(SE) <- types result <- list(estimate=probs, SE=SE) } } else { risks <- as.solist(lapply(probs, divideifpositive, d = probs[[icontrol]])) if(!se) { result <- risks } else { Vctrl <- Veach[[icontrol]] Dctrl <- Deach[[icontrol]] SE <- list() for(i in 1:ntypes) { NUM <- Veach[[i]] + Vctrl * risks[[i]]^2 SE[[i]] <- eval.im(sqrt(pmax(NUM, 0))/Dctrl) } SE <- as.solist(SE) names(SE) <- types result <- list(estimate=risks, SE=SE) } } }, points = { probs <- Deach/Dall ## correct small numerical errors probs <- clamp01(probs) ## trap NaN values bad <- badvalues(probs) badrow <- matrowany(bad) if(any(badrow)) { ## apply l'Hopital's rule typenn <- imarks[nnwhich(X)] probs[badrow, ] <- (typenn == col(result))[badrow, ] } if(!relative) { if(!se) { result <- probs } else { NUM <- Veach * (1-2*probs) + Vall * probs^2 SE <- sqrt(pmax(NUM, 0))/Dall result <- list(estimate=probs, SE=SE) } } else { risks <- probs/probs[,icontrol] if(!se) { result <- risks } else { NUM <- Veach + Veach[,icontrol] * risks^2 NUM[,icontrol] <- 0 SE <- sqrt(pmax(NUM, 0))/Deach[,icontrol] result <- list(estimate=risks, SE=SE) } } }) } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } clamp01 <- function(x) { if(is.im(x)) return(eval.im(pmin(pmax(x, 0), 1))) return(pmin(pmax(x, 0), 1)) } badvalues <- function(x) { if(is.im(x)) x <- as.matrix(x) return(!(is.finite(x) | is.na(x))) } reciprocal <- function(x) 1/x divideifpositive <- function(z, d) { eval.im(ifelse(d > 0, z/d, NA)) } relrisk.ppp }) bw.stoyan <- function(X, co=0.15) { ## Stoyan's rule of thumb stopifnot(is.ppp(X)) n <- npoints(X) W <- Window(X) a <- area(W) stoyan <- co/sqrt(5 * max(1,n)/a) return(stoyan) } bw.relrisk <- function(X, method="likelihood", nh=spatstat.options("n.bandwidth"), hmin=NULL, hmax=NULL, warn=TRUE) { stopifnot(is.ppp(X)) stopifnot(is.multitype(X)) ## rearrange in ascending order of x-coordinate (for C code) X <- X[fave.order(X$x)] ## Y <- split(X) ntypes <- length(Y) if(ntypes == 1) stop("Data contains only one type of points") n <- npoints(X) marx <- marks(X) method <- pickoption("method", method, c(likelihood="likelihood", leastsquares="leastsquares", ls="leastsquares", LS="leastsquares", weightedleastsquares="weightedleastsquares", wls="weightedleastsquares", WLS="weightedleastsquares")) ## if(method != "likelihood") { ## dummy variables for each type imarks <- as.integer(marx) if(ntypes == 2) { ## 1 = control, 2 = case indic <- (imarks == 2) y01 <- as.integer(indic) } else { indic <- matrix(FALSE, n, ntypes) indic[cbind(seq_len(n), imarks)] <- TRUE y01 <- indic * 1 } X01 <- X %mark% y01 } ## cross-validated bandwidth selection ## determine a range of bandwidth values if(is.null(hmin) || is.null(hmax)) { W <- Window(X) a <- area(W) d <- diameter(as.rectangle(W)) ## Stoyan's rule of thumb applied to the least and most common types mcount <- table(marx) nmin <- max(1, min(mcount)) nmax <- max(1, max(mcount)) stoyan.low <- 0.15/sqrt(nmax/a) stoyan.high <- 0.15/sqrt(nmin/a) if(is.null(hmin)) hmin <- max(minnndist(unique(X)), stoyan.low/5) if(is.null(hmax)) { hmax <- min(d/4, stoyan.high * 20) hmax <- max(hmax, hmin * 2) } } else stopifnot(hmin < hmax) ## h <- geomseq(from=hmin, to=hmax, length.out=nh) cv <- numeric(nh) ## ## compute cross-validation criterion switch(method, likelihood={ methodname <- "Likelihood" ## for efficiency, only compute the estimate of p_j(x_i) ## when j = m_i = mark of x_i. Dthis <- numeric(n) for(i in seq_len(nh)) { Dall <- density.ppp(X, sigma=h[i], at="points", edge=FALSE, sorted=TRUE) Deach <- density.splitppp(Y, sigma=h[i], at="points", edge=FALSE, sorted=TRUE) split(Dthis, marx) <- Deach pthis <- Dthis/Dall cv[i] <- -mean(log(pthis)) } }, leastsquares={ methodname <- "Least Squares" for(i in seq_len(nh)) { phat <- Smooth(X01, sigma=h[i], at="points", leaveoneout=TRUE, sorted=TRUE) phat <- as.matrix(phat) cv[i] <- mean((y01 - phat)^2) } }, weightedleastsquares={ methodname <- "Weighted Least Squares" ## need initial value of h from least squares h0 <- bw.relrisk(X, "leastsquares", nh=ceiling(nh/4)) phat0 <- Smooth(X01, sigma=h0, at="points", leaveoneout=TRUE, sorted=TRUE) phat0 <- as.matrix(phat0) var0 <- phat0 * (1-phat0) var0 <- pmax.int(var0, 1e-6) for(i in seq_len(nh)) { phat <- Smooth(X01, sigma=h[i], at="points", leaveoneout=TRUE, sorted=TRUE) phat <- as.matrix(phat) cv[i] <- mean((y01 - phat)^2/var0) } }) ## optimize result <- bw.optim(cv, h, hname="sigma", creator="bw.relrisk", criterion=paste(methodname, "Cross-Validation"), warnextreme=warn, hargnames=c("hmin", "hmax"), unitname=unitname(X)) return(result) } which.max.im <- function(x) { .Deprecated("im.apply", "spatstat", "which.max.im(x) is deprecated: use im.apply(x, which.max)") ans <- im.apply(x, which.max) return(ans) } spatstat/R/rmhResolveTypes.R0000644000176200001440000000612413433151224015615 0ustar liggesusers# # # rmhResolveTypes.R # # $Revision: 1.10 $ $Date: 2019/02/20 03:34:50 $ # # rmhResolveTypes <- function(model, start, control) { # Decide whether a multitype point process is to be simulated. # If so, determine the vector of types. verifyclass(model, "rmhmodel") verifyclass(start, "rmhstart") verifyclass(control, "rmhcontrol") # Different ways of specifying types directly types.model <- model$types types.start <- if(start$given=="x" && is.marked(x.start <- start$x.start)) levels(marks(x.start, dfok=FALSE)) else NULL # Check for inconsistencies if(!is.null(types.model) && !is.null(types.start)) if(!isTRUE(all.equal(types.model, types.start))) stop("marks in start$x.start do not match model$types") types.given <- if(!is.null(types.model)) types.model else types.start types.given.source <- if(!is.null(types.model)) "model$types" else "marks of x.start" # Different ways of implying the number of types ntypes.beta <- length(model$par[["beta"]]) ntypes.ptypes <- length(control$ptypes) ntypes.nstart <- if(start$given == "n") length(start$n.start) else 0 mot <- model$trend ntypes.trend <- if(is.null(mot)) 0 else if(is.im(mot)) 1 else if(is.list(mot) && all(unlist(lapply(mot, is.im)))) length(mot) else 0 # Check for inconsistencies in implied number of types (only for numbers > 1) nty <- c(ntypes.beta, ntypes.ptypes, ntypes.nstart, ntypes.trend) nam <- c("model$par$beta", "control$ptypes", "start$n.start", "model$trend") implied <- (nty > 1) if(!any(implied)) ntypes.implied <- 1 else { if(length(unique(nty[implied])) > 1) stop(paste("Mismatch in numbers of types implied by", commasep(sQuote(nam[implied])))) ntypes.implied <- unique(nty[implied]) ntypes.implied.source <- (nam[implied])[1] } # Check consistency between types.given and ntypes.implied if(!is.null(types.given) && ntypes.implied > 1) if(length(types.given) != ntypes.implied) stop(paste("Mismatch between number of types in", types.given.source, "and length of", ntypes.implied.source)) # Finally determine the types if(model$multitype.interact) { # There MUST be a types vector types <- if(!is.null(types.given)) types.given else if(ntypes.implied > 1) 1:ntypes.implied else stop("Cannot determine types for multitype process") } else { types <- if(!is.null(types.given)) types.given else if(ntypes.implied > 1) 1:ntypes.implied else 1 } ntypes <- length(types) # If we are conditioning on the number of points of each type, # make sure the starting state is appropriate if(control$fixing == "n.each.type") { if(start$given == "n" && ntypes.nstart != ntypes) stop("Length of start$n.start not equal to number of types.\n") else if(start$given == "x" && length(types.given) != ntypes) stop("Marks of start$x.start do not match number of types.\n") } return(types) } spatstat/R/colourtools.R0000644000176200001440000001273413451622206015035 0ustar liggesusers# # colourtools.R # # $Revision: 1.21 $ $Date: 2019/04/05 09:20:59 $ # rgb2hex <- function(v, maxColorValue=255) { stopifnot(is.numeric(v)) if(!is.matrix(v)) v <- matrix(v, nrow=1L) if(ncol(v) %in% c(3, 4)) { out <- rgb(v, maxColorValue=maxColorValue) return(out) } stop("v should be a vector of length 3 or 4, or a matrix with 3 or 4 columns") } rgb2hsva <- function(red, green=NULL, blue=NULL, alpha=NULL, maxColorValue=255) { if(is.null(green) && is.null(blue) && is.null(alpha)) { ## red should be a 3-row matrix of RGB values ## or a 4-row matrix of RGBA values if(!is.matrix(red)) red <- matrix(red, ncol=1L) ## check for an alpha channel if(nrow(red) == 4) { alpha <- red[4L,] red <- red[-4L, , drop=FALSE] } } y <- rgb2hsv(red, green, blue, maxColorValue=maxColorValue) if(!is.null(alpha)) y <- rbind(y, alpha=alpha/maxColorValue) return(y) } col2hex <- function(x) { # convert to RGBA y <- col2rgb(x, alpha=TRUE) # remove alpha channel if all colours are opaque if(all(y["alpha", ] == 255)) y <- y[1:3, , drop=FALSE] # convert to hex z <- rgb2hex(t(y)) return(z) } paletteindex <- function(x) { x <- col2hex(x) p <- col2hex(palette()) m <- match(x, p) return(m) } is.colour <- function(x) { if(length(x) == 0) return(FALSE) cx <- try(col2rgb(x), silent=TRUE) bad <- inherits(cx, "try-error") return(!bad) } samecolour <- function(x, y) { col2hex(x) == col2hex(y) } complementarycolour <- function(x) { if(is.null(x)) return(NULL) if(inherits(x, "colourmap")) { colouroutputs(x) <- complementarycolour(colouroutputs(x)) return(x) } # convert to RGBA y <- col2rgb(x, alpha=TRUE) # complement of R, G, B y[1:3, ] <- 255 - y[1:3, ] # convert to colours z <- rgb2hex(t(y)) return(z) } is.grey <- function(x) { if(inherits(x, "colourmap")) x <- colouroutputs(x) if(is.function(x)) return(NA) y <- rgb2hsva(col2rgb(x, alpha=TRUE)) sat <- y["s", ] alp <- y["alpha", ] return(sat == 0 & alp == 1) } to.opaque <- function(x) { if(all(!is.na(paletteindex(x)))) return(x) # preserve palette colours rgb(t(col2rgb(x)), maxColorValue=255) } to.transparent <- function(x, fraction) { if(all(fraction == 1)) return(to.opaque(x)) rgb(t(col2rgb(x))/255, alpha=fraction, maxColorValue=1) } to.saturated <- function(x, s=1) { y <- rgb2hsv(col2rgb(x)) ## map grey to black, otherwise saturate the colour notwhite <- !(y["h",] == 0 & y["s",] == 0 & y["v", ] == 1) isgrey <- (y["s", ] == 0) y["v", isgrey & notwhite] <- 0 y["s", !isgrey & notwhite] <- s ## convert back z <- hsv(y["h",], y["s",], y["v",]) return(z) } to.grey <- function(x, weights=c(0.299, 0.587, 0.114), transparent=FALSE) { if(is.null(x)) return(NULL) if(inherits(x, "colourmap")) { colouroutputs(x) <- to.grey(colouroutputs(x), weights=weights, transparent=transparent) return(x) } if(is.function(x)) { f <- x g <- function(...) to.grey(f(...), weights=weights, transparent=transparent) return(g) } ## preserve palette indices, if only using black/grey if(all(!is.na(paletteindex(x))) && all(is.grey(x))) return(x) if(!transparent) { y <- col2rgb(x) g <- (weights %*% y)/(255 * sum(weights)) z <- grey(g) } else { yy <- col2rgb(x, alpha=TRUE) y <- yy[1:3, , drop=FALSE] g <- (weights %*% y)/(255 * sum(weights)) z <- grey(g, alpha=yy[4L,]/255.0) } return(z) } is.col.argname <- function(x) { return(nzchar(x) & ((x == "col") | (substr(x, 1L, 4L) == "col."))) } col.args.to.grey <- function(x, ...) { if(any(hit <- is.col.argname(names(x)))) x[hit] <- lapply(x[hit], to.grey, ...) return(x) } # versions of rgb() and hsv() that work with NA values rgbNA <- function(red, green, blue, alpha=NULL, maxColorValue=1) { df <- if(is.null(alpha)) data.frame(red=red, green=green, blue=blue) else data.frame(red=red, green=green, blue=blue, alpha=alpha) result <- rep(NA_character_, nrow(df)) ok <- complete.cases(df) result[ok] <- if(is.null(alpha)) { with(df, rgb(red[ok], green[ok], blue[ok], maxColorValue=maxColorValue)) } else { with(df, rgb(red[ok], green[ok], blue[ok], alpha[ok], maxColorValue=maxColorValue)) } return(result) } hsvNA <- function(h, s, v, alpha=NULL) { df <- if(is.null(alpha)) data.frame(h=h, s=s, v=v) else data.frame(h=h, s=s, v=v, alpha=alpha) result <- rep(NA_character_, nrow(df)) ok <- complete.cases(df) result[ok] <- if(is.null(alpha)) { with(df, hsv(h[ok], s[ok], v[ok])) } else { with(df, hsv(h[ok], s[ok], v[ok], alpha[ok])) } return(result) } ## This function traps the colour arguments ## and converts to greyscale if required. do.call.plotfun <- function(fun, arglist, ...) { if(spatstat.options("monochrome")) { keys <- names(arglist) if(!is.null(keys)) { cols <- nzchar(keys) & ((keys %in% c("border", "col", "fg", "bg")) | (substr(keys, 1, 4) == "col.")) if(any(cols)) arglist[cols] <- lapply(arglist[cols], to.grey) } } do.call.matched(fun, arglist, ...) } gammabreaks <- function(ra, n, gamma=1) { # make breaks for x which are evenly spaced on the scale y = x^gamma check.1.real(gamma) stopifnot(gamma > 0) y <- seq(from=0, to=1, length.out=n) breaks <- ra[1L] + diff(ra) * y^(1/gamma) breaks[1L] <- ra[1L] breaks[n] <- ra[2L] return(breaks) } spatstat/R/predict.ppm.R0000644000176200001440000006661313461016754014711 0ustar liggesusers# # predict.ppm.S # # $Revision: 1.111 $ $Date: 2019/04/27 07:22:19 $ # # predict.ppm() # From fitted model obtained by ppm(), # evaluate the fitted trend or conditional intensity # at a grid/list of other locations # # # ------------------------------------------------------------------- predict.ppm <- local({ ## ## extract undocumented/outdated arguments, and trap others ## xtract <- function(..., newdata=NULL, sumobj=NULL, E=NULL, total=NULL, getoutofjail=FALSE) { if(!is.null(newdata)) warning(paste("The use of the argument", sQuote("newdata"), "is out-of-date. See help(predict.ppm)")) if(!is.null(total)) message(paste("The use of the argument", sQuote("total"), "is out-of-date. See help(predict.ppm)")) trap.extra.arguments(..., .Context="In predict.ppm") return(list(sumobj=sumobj, E=E, total=total, getoutofjail=getoutofjail)) } ## ## confidence/prediction intervals for number of points predconfPois <- function(region, object, level, what=c("estimate", "se", "confidence", "prediction")) { what <- match.arg(what) stopifnot(0 < level && level < 1) lam <- predict(object, window=region) mu.hat <- integral.im(lam) if(what == "estimate") return(mu.hat) mo <- model.images(object, W=as.owin(lam)) ZL <- unlist(lapply(mo, function(z, w) integral.im(eval.im(z * w)), w = lam)) ZL <- matrix(ZL, nrow=1) var.muhat <- as.numeric(ZL %*% vcov(object) %*% t(ZL)) sd.muhat <- sqrt(var.muhat) if(what == "se") return(sd.muhat) alpha2 <- (1-level)/2 pp <- sort(c(alpha2, 1-alpha2)) out <- switch(what, confidence = mu.hat + qnorm(pp) * sd.muhat, prediction = qmixpois(pp, mu.hat, sd.muhat, I)) names(out) <- paste0(signif(100 * pp, 3), "%") out } typepublic <- c("trend", "cif", "intensity", "count") typeaccept <- c(typepublic, "lambda", "se", "SE", "covariates") typeuse <- c(typepublic, "cif", "se", "se", "covariates") predict.ppm <- function(object, window=NULL, ngrid=NULL, locations=NULL, covariates=NULL, type=c("trend", "cif", "intensity", "count"), se=FALSE, interval=c("none", "confidence", "prediction"), level = 0.95, X=data.ppm(object), correction, ignore.hardcore=FALSE, ..., dimyx=NULL, eps=NULL, new.coef=NULL, check=TRUE, repair=TRUE) { interval <- match.arg(interval) ## extract undocumented arguments xarg <- xtract(...) sumobj <- xarg$sumobj E <- xarg$E total <- xarg$total getoutofjail <- xarg$getoutofjail ## match 'type' argument including 'legacy' options seonly <- FALSE if(missing(type)) type <- type[1] else { if(length(type) > 1) stop("Argument 'type' should be a single value") mt <- pmatch(type, typeaccept) if(is.na(mt)) stop("Argument 'type' should be one of", commasep(sQuote(typepublic), " or ")) type <- typeuse[mt] if(type == "se") { if(!getoutofjail) message(paste("Outdated syntax:", "type='se' should be replaced by se=TRUE;", "then the standard error is predict(...)$se")) type <- "trend" se <- TRUE seonly <- TRUE } } if(!is.null(total)) { message("Outdated argument 'total': use 'window' and set type='count'") type <- "count" if(!is.logical(total)) window <- if(is.tess(total)) total else as.owin(total) } ## model <- object verifyclass(model, "ppm") ## if(check && damaged.ppm(object)) { if(!repair) stop("object format corrupted; try update(object, use.internal=TRUE)") message("object format corrupted; repairing it.") object <- update(object, use.internal=TRUE) } if(missing(correction) || is.null(correction)) correction <- object$correction fitcoef <- coef(object) if(!is.null(new.coef)) { ## validate coefs if(length(new.coef) != length(fitcoef)) stop(paste("Argument new.coef has wrong length", length(new.coef), ": should be", length(fitcoef))) coeffs <- new.coef } else { coeffs <- fitcoef } ## find out what kind of model it is if(is.null(sumobj)) sumobj <- summary(model, quick="entries") # undocumented hack! # stationary <- sumobj$stationary poisson <- sumobj$poisson marked <- sumobj$marked multitype <- sumobj$multitype notrend <- sumobj$no.trend changedcoef <- sumobj$changedcoef || !is.null(new.coef) trivial <- poisson && notrend need.covariates <- sumobj$uses.covars covnames.needed <- sumobj$covars.used if(sumobj$antiquated) warning("The model was fitted by an out-of-date version of spatstat") ## determine mark space if(marked) { if(!multitype) stop("Prediction not yet implemented for general marked point processes") else types <- levels(marks(sumobj$entries$data)) } ## For Poisson models cif=intensity=trend if(poisson && type %in% c("cif", "intensity")) type <- "trend" ## ............. trap un-implemented cases ................... ## Standard errors not yet available for cif, intensity if(se && type %in% c("cif", "intensity")) stop(paste("Standard error for", type, "is not yet implemented"), call.=FALSE) ## Intervals are only available for unmarked Poisson models if(type == "count" && interval != "none" && (marked || !poisson)) { stop(paste0(interval, " intervals for counts are only implemented for", if(marked) " unmarked" else "", if(!poisson) " Poisson", " models"), call.=FALSE) } if(interval == "prediction" && type != "count") stop("Prediction intervals are only available for type='count'", call.=FALSE) if(interval == "confidence" && type %in% c("intensity", "cif")) stop(paste("Confidence intervals are not yet available for", type), call.=FALSE) estimatename <- if(interval == "none") "estimate" else interval ## ............. start computing ............................. ## Total count in a region if(type == "count") { ## point or interval estimate, optionally with SE if(is.null(window)) { ## domain of the original data if(!seonly) est <- predconfPois(NULL, model, level, estimatename) if(se) sem <- predconfPois(NULL, model, level, "se") } else if(is.tess(window)) { ## quadrats tilz <- tiles(window) if(!seonly) { est <- lapply(tilz, predconfPois, object=model, level=level, what=estimatename) est <- switch(interval, none = unlist(est), confidence =, prediction = t(simplify2array(est))) } if(se) sem <- sapply(tilz, predconfPois, object=model, level=level, what="se") } else { ## window if(!seonly) est <- predconfPois(window, model, level, estimatename) if(se) sem <- predconfPois(window, model, level, "se") } if(!se) return(est) if(seonly) return(sem) result <- list(est, sem) names(result) <- c(estimatename, "se") return(result) } ## ..... Predict a spatial function ....... if(interval != "none") { ## Prepare for confidence interval alpha2 <- (1-level)/2 pp <- sort(c(alpha2, 1-alpha2)) ci.names <- paste0(signif(100 * pp, 3), "%") ci.q <- qnorm(pp) } ## determine what kind of output is required: ## (arguments present) (output) ## window, ngrid -> image ## locations (mask) -> image ## locations (image) -> image ## locations (rectangle) -> treat locations as 'window' ## locations (polygonal) -> treat locations as 'window' ## locations (other) -> data frame ## if(is.im(locations)) locations <- as.owin(locations) if(is.null(window) && is.owin(locations) && !is.mask(locations)) { window <- locations locations <- NULL } #' incompatible: if(!is.null(locations)) { #' other arguments are incompatible offending <- c(!is.null(ngrid), !is.null(dimyx), !is.null(eps)) if(any(offending)) { offenders <- c("grid", "dimyx", "eps")[offending] nbad <- sum(offending) stop(paste(ngettext(nbad, "The argument", "The arguments"), commasep(sQuote(offenders)), ngettext(nbad, "is", "are"), "incompatible with", sQuote("locations")), call.=FALSE) } } #' equivalent: if(!is.null(ngrid) && !is.null(dimyx)) warning(paste("The arguments", sQuote("ngrid"), "and", sQuote("dimyx"), "are equivalent: only one should be given"), call.=FALSE) ngrid <- ngrid %orifnull% dimyx if(is.null(ngrid) && is.null(locations)) ## use regular grid ngrid <- rev(spatstat.options("npixel")) want.image <- is.null(locations) || is.mask(locations) make.grid <- !is.null(ngrid) ## ############## Determine prediction points ##################### if(!want.image) { ## (A) list of (x,y) coordinates given by `locations' xpredict <- locations$x ypredict <- locations$y if(is.null(xpredict) || is.null(ypredict)) { xy <- xy.coords(locations) xpredict <- xy$x xpredict <- xy$y } if(is.null(xpredict) || is.null(ypredict)) stop(paste("Don't know how to extract x,y coordinates from", sQuote("locations"))) ## marks if required if(marked) { ## extract marks from data frame `locations' mpredict <- locations$marks if(is.null(mpredict)) stop(paste("The argument", sQuote("locations"), "does not contain a column of marks", "(required since the fitted model", "is a marked point process)")) if(is.factor(mpredict)) { ## verify mark levels match those in model if(!isTRUE(all.equal(levels(mpredict), types))) { if(all(levels(mpredict) %in% types)) mpredict <- factor(mpredict, levels=types) else stop(paste("The marks in", sQuote("locations"), "do not have the same levels as", "the marks in the model")) } } else { ## coerce to factor if possible if(all(mpredict %in% types)) mpredict <- factor(mpredict, levels=types) else stop(paste("The marks in", sQuote("locations"), "do not have the same values as the marks in the model")) } } } else { ## (B) pixel grid of points if(!make.grid) ## (B)(i) The grid is given in `locations' masque <- locations else { ## (B)(ii) We have to make the grid ourselves ## Validate ngrid if(!is.null(ngrid)) { if(!is.numeric(ngrid)) stop("ngrid should be a numeric vector") ngrid <- ensure2vector(ngrid) } if(is.null(window)) window <- sumobj$entries$data$window masque <- as.mask(window, dimyx=ngrid, eps=eps) } ## Hack ----------------------------------------------- ## gam with lo() will not allow extrapolation beyond the range of x,y ## values actually used for the fit. Check this: tums <- termsinformula(model$trend) if(any( tums == "lo(x)" | tums == "lo(y)" | tums == "lo(x,y)" | tums == "lo(y,x)") ) { ## determine range of x,y used for fit gg <- model$internal$glmdata gxr <- range(gg$x[gg$SUBSET]) gyr <- range(gg$y[gg$SUBSET]) ## trim window to this range masque <- intersect.owin(masque, owin(gxr, gyr)) } ## ------------------------------------ End Hack ## ## Finally, determine x and y vectors for grid rxy <- rasterxy.mask(masque, drop=TRUE) xpredict <- rxy$x ypredict <- rxy$y } ## ################ CREATE DATA FRAME ########################## ## ... to be passed to predict.glm() ## ## First the x, y coordinates if(!marked) newdata <- data.frame(x=xpredict, y=ypredict) else if(!want.image) newdata <- data.frame(x=xpredict, y=ypredict, marks=mpredict) else { ## replicate nt <- length(types) np <- length(xpredict) xpredict <- rep.int(xpredict,nt) ypredict <- rep.int(ypredict,nt) mpredict <- rep.int(types, rep.int(np, nt)) mpredict <- factor(mpredict, levels=types) newdata <- data.frame(x = xpredict, y = ypredict, marks=mpredict) } ## ## Next the external covariates, if any ## if(need.covariates) { if(is.null(covariates)) { ## Extract covariates from fitted model object ## They have to be images. oldcov <- model$covariates if(is.null(oldcov)) stop("External covariates are required, and are not available") if(is.data.frame(oldcov)) stop(paste("External covariates are required.", "Prediction is not possible at new locations")) covariates <- oldcov } ## restrict to covariates actually required for formula covariates <- if(is.data.frame(covariates)) { covariates[,covnames.needed, drop=FALSE] } else covariates[covnames.needed] covfunargs <- model$covfunargs covariates.df <- mpl.get.covariates(covariates, list(x=xpredict, y=ypredict), "prediction points", covfunargs) newdata <- cbind(newdata, covariates.df) } ## ###### Set up prediction variables ################################ ## ## Provide SUBSET variable ## if(is.null(newdata$SUBSET)) newdata$SUBSET <- rep.int(TRUE, nrow(newdata)) ## ## Dig out information used in Berman-Turner device ## Vnames: the names for the ``interaction variables'' ## glmdata: the data frame used for the glm fit ## glmfit: the fitted glm object ## if(!trivial) { Vnames <- model$internal$Vnames vnameprefix <- model$internal$vnameprefix glmdata <- getglmdata(model) glmfit <- getglmfit(model) if(object$method=="logi") newdata$.logi.B <- rep(glmdata$.logi.B[1], nrow(newdata)) } ## Undocumented secret exit if(type == "covariates") return(list(newdata=newdata, mask=if(want.image) masque else NULL)) ## ########## COMPUTE PREDICTION ############################## ## ## Compute the predicted value z[i] for each row of 'newdata' ## Store in a vector z and reshape it later ## ## ## ############################################################# needSE <- se || (interval != "none") attribeauts <- list() if(trivial) { ## ########### UNIFORM POISSON PROCESS ##################### lambda <- exp(coeffs[[1]]) if(needSE) { npts <- nobs(model) se.lambda <- lambda/sqrt(npts) } switch(interval, none = { z <- rep.int(lambda, nrow(newdata)) }, confidence = { z <- matrix(lambda + se.lambda * ci.q, byrow=TRUE, nrow=nrow(newdata), ncol=2, dimnames=list(NULL, ci.names)) }, stop("Internal error: unreached")) if(se) zse <- rep.int(se.lambda, nrow(newdata)) ## ############################################################## } else if((type %in% c("trend", "intensity")) || poisson) { ## ## ########## COMPUTE TREND ################################### ## ## set explanatory variables to zero ## zeroes <- numeric(nrow(newdata)) for(vn in Vnames) newdata[[vn]] <- zeroes ## ## predict trend ## z <- lambda <- GLMpredict(glmfit, newdata, coeffs, changecoef=changedcoef) ## if(type == "intensity") z <- PoisSaddle(z, fitin(model)) ## if(needSE) { ## extract variance-covariance matrix of parameters vc <- vcov(model) ## compute model matrix fmla <- rhs.of.formula(formula(glmfit)) # mf <- model.frame(fmla, newdata, ..., na.action=na.pass) # mm <- model.matrix(fmla, mf, ..., na.action=na.pass) mf <- model.frame(fmla, newdata, na.action=na.pass) mm <- model.matrix(fmla, mf, na.action=na.pass) if(nrow(mm) != nrow(newdata)) stop("Internal error: row mismatch in SE calculation") ## compute relative variance = diagonal of quadratic form if(ncol(mm) != ncol(vc)) stop("Internal error: column mismatch in SE calculation") vv <- quadform(mm, vc) ## standard error SE <- lambda * sqrt(vv) if(se) zse <- SE if(interval == "confidence") { z <- lambda + outer(SE, ci.q, "*") colnames(z) <- ci.names } } ## ############################################################ } else if(type == "cif" || type =="lambda") { ## ####### COMPUTE FITTED CONDITIONAL INTENSITY ################ ## ## set up arguments inter <- model$interaction if(!missing(X)) stopifnot(is.ppp(X)) W <- as.owin(data.ppm(model)) U <- ppp(newdata$x, y=newdata$y, window=W, check=FALSE) if(marked) marks(U) <- newdata$marks ## determine which prediction points are data points if(is.null(E)) E <- equalpairs(U, X, marked) ## evaluate interaction Vnew <- evalInteraction(X, U, E, inter, correction=correction, splitInf=ignore.hardcore, check=check) if(!ignore.hardcore) { ## Negative infinite values of potential signify cif = zero cif.equals.zero <- matrowany(Vnew == -Inf) } else { ## returned as attribute, unless vacuous cif.equals.zero <- attr(Vnew, "-Inf") %orifnull% logical(nrow(Vnew)) } attribeauts <- c(attribeauts, list(isZero=cif.equals.zero)) ## Insert the potential into the relevant column(s) of `newdata' if(ncol(Vnew) == 1) { ## Potential is real valued (Vnew is a column vector) ## Assign values to a column of the same name in newdata newdata[[Vnames]] <- as.vector(Vnew) ## } else if(is.null(avail <- colnames(Vnew))) { ## Potential is vector-valued (Vnew is a matrix) ## with unnamed components. ## Assign the components, in order of their appearance, ## to the columns of newdata labelled Vnames[1], Vnames[2],... for(i in seq_along(Vnames)) newdata[[Vnames[i] ]] <- Vnew[,i] ## } else { ## Potential is vector-valued (Vnew is a matrix) ## with named components. ## Match variables by name if(all(Vnames %in% avail)) { for(vn in Vnames) newdata[[ vn ]] <- Vnew[ , vn] } else if(all(Vnames %in% (Pavail <- paste0(vnameprefix, avail)))) { for(vn in Vnames) newdata[[ vn ]] <- Vnew[ , match(vn, Pavail)] } else stop(paste( "Internal error: unable to match names", "of available interaction terms", commasep(sQuote(avail)), "to required interaction terms", commasep(sQuote(Vnames)) ), call.=FALSE) } ## invoke predict.glm or compute prediction z <- GLMpredict(glmfit, newdata, coeffs, changecoef=changedcoef) ## reset to zero if potential was zero if(!ignore.hardcore && any(cif.equals.zero)) z[cif.equals.zero] <- 0 ## ############################################################### } else stop(paste("Unrecognised type", sQuote(type))) ## ############################################################### ## ## reshape the result ## if(!want.image) { if(!se) { z <- as.vector(z) attributes(z) <- c(attributes(z), attribeauts) out <- z } else if(seonly) { out <- as.vector(zse) } else { z <- as.vector(z) attributes(z) <- c(attributes(z), attribeauts) out <- list(z, as.vector(zse)) names(out) <- c(estimatename, "se") } } else { ## make an image of the right shape and value imago <- as.im(masque, value=1.0) if(!marked && interval=="none") { ## single image if(!se) { out <- imago ## set entries out[] <- z } else if(seonly) { out <- imago out[] <- zse } else { est <- std <- imago est[] <- z std[] <- zse out <- list(est, std) names(out) <- c(estimatename, "se") } } else if(interval != "none") { ## list of 2 images for CI if(!seonly) { hi <- lo <- imago hi[] <- z[,1] lo[] <- z[,2] est <- solist(hi, lo) names(est) <- ci.names } if(se) { std <- imago std[] <- zse } if(!se) { out <- est } else if(seonly) { out <- std } else { out <- list(est, std) names(out) <- c(estimatename, "se") } } else { ## list of images, one for each level of marks out <- list() for(i in seq_along(types)) { outi <- imago ## set entries outi[] <- z[newdata$marks == types[i]] out[[i]] <- outi } out <- as.solist(out) names(out) <- as.character(types) } } ## ## FINISHED ## return(out) } predict.ppm }) #################################################################### # # compute pointwise uncertainty of fitted intensity # model.se.image <- function(fit, W=as.owin(fit), ..., what="sd") { if(!is.poisson.ppm(fit)) stop("Only implemented for Poisson point process models", call.=FALSE) what <- pickoption("option", what, c(sd="sd", var="var", cv="cv", CV="cv", ce="ce", CE="ce")) W <- as.mask(as.owin(W)) # variance-covariance matrix of coefficients vc <- vcov(fit) np <- dim(vc)[1] # extract sufficient statistic for each coefficient mm <- model.images(fit, W, ...) # compute fitted intensity lam <- predict(fit, locations=W) # initialise resulting image U <- as.im(W) U[] <- 0 # compute pointwise matrix product, assuming vc is symmetric for(i in 1:np) { Si <- mm[[i]] aii <- vc[i,i] U <- eval.im(U + aii * Si^2) if(i > 1) { for(j in 1:(i-1)) { Sj <- mm[[j]] aij <- vc[i,j] twoaij <- 2 * aij U <- eval.im(U + twoaij * Si * Sj) } } } # the matrix product is the relative variance (CV) if(what=="cv") return(U) # relative sd if(what=="ce") { U <- eval.im(sqrt(U)) return(U) } # multiply by squared intensity to obtain variance U <- eval.im(U * lam^2) # variance if(what=="var") return(U) # compute SD and return U <- eval.im(sqrt(U)) return(U) } GLMpredict <- function(fit, data, coefs, changecoef=TRUE, type=c("response", "link")) { ok <- is.finite(coefs) type <- match.arg(type) if(!changecoef && all(ok)) { answer <- predict(fit, newdata=data, type=type) } else { if(inherits(fit, "gam")) stop("This calculation is not supported for GAM fits", call.=FALSE) # do it by hand fmla <- formula(fit) data$.mpl.Y <- 1 fram <- model.frame(fmla, data=data, na.action=NULL) # linear predictor mm <- model.matrix(fmla, data=fram) # ensure all required coefficients are present coefs <- fill.coefs(coefs, colnames(mm)) ok <- is.finite(coefs) # if(all(ok)) { eta <- as.vector(mm %*% coefs) } else { #' ensure 0 * anything = 0 eta <- as.vector(mm[ , ok, drop=FALSE] %*% coefs[ok]) for(j in which(!ok)) { mmj <- mm[, j] nonzero <- is.na(mmj) | (mmj != 0) if(any(nonzero)) eta[nonzero] <- eta[nonzero] + mmj[nonzero] * coefs[j] } } # offset mo <- model.offset(fram) if(!is.null(mo)) { if(is.matrix(mo)) mo <- apply(mo, 1, sum) eta <- mo + eta } switch(type, link = { answer <- eta }, response = { linkinv <- family(fit)$linkinv answer <- linkinv(eta) }) } # Convert from fitted logistic prob. to lambda for logistic fit if(type == "response" && family(fit)$family=="binomial") answer <- fit$data$.logi.B[1] * answer/(1-answer) return(answer) } # An 'equalpairs' matrix E is needed in the ppm class # to determine which quadrature points and data points are identical # (not just which quadrature points are data points). # It is a two-column matrix specifying all the identical pairs. # The first column gives the index of a data point (in the data pattern X) # and the second column gives the corresponding index in U. # The following function determines the equal pair information # from the coordinates (and marks) of U and X alone; # it should be used only if we can't figure out this information otherwise. equalpairs <- function(U, X, marked=FALSE) { nn <- nncross(U, X) coincides <- (nn$dist == 0) Xind <- nn$which[coincides] Uind <- which(coincides) if(marked) { samemarks <- (marks(X)[Xind] == marks(U)[Uind]) Xind <- Xind[samemarks] Uind <- Uind[samemarks] } return(cbind(Xind, Uind)) } fill.coefs <- function(coefs, required) { # 'coefs' should contain all the 'required' values coefsname <- deparse(substitute(coefs)) nama <- names(coefs) if(is.null(nama)) { #' names cannot be matched if(length(coefs) != length(required)) stop(paste("The unnamed argument", sQuote(coefsname), "has", length(coefs), "entries, but", length(required), "are required"), call.=FALSE) # blithely assume they match 1-1 names(coefs) <- required return(coefs) } stopifnot(is.character(required)) if(identical(nama, required)) return(coefs) inject <- match(nama, required) if(any(notneeded <- is.na(inject))) { warning(paste("Internal glitch: some coefficients were not required:", commasep(sQuote(nama[notneeded]))), call.=FALSE) coefs <- coefs[!notneeded] nama <- names(coefs) inject <- match(nama, required) } y <- numeric(length(required)) names(y) <- required y[inject] <- coefs return(y) } spatstat/R/suffstat.R0000644000176200001440000000633113333543255014311 0ustar liggesusers# # suffstat.R # # calculate sufficient statistic # # $Revision: 1.17 $ $Date: 2013/04/25 06:37:43 $ # # suffstat <- function(model, X=data.ppm(model)) { cl <- sys.call() callstring <- short.deparse(cl) verifyclass(model, "ppm") if(!missing(X)) verifyclass(X, "ppp") else X <- NULL inter <- model$interaction func <- if(is.null(inter) || is.poisson(inter)) suffstat.poisson else if(!is.null(ssinter <- inter$suffstat)) ssinter else if(!is.null(ssfamily <- inter$family$suffstat)) ssfamily else suffstat.generic return(func(model, X, callstring)) } suffstat.generic <- function(model, X=NULL, callstring="suffstat.generic") { # This should work for an arbitrary ppm # since it uses the fundamental relation between # conditional intensity and likelihood. # But it is computationally intensive. verifyclass(model, "ppm") coefnames <- names(coef(model)) if(is.null(X)) { X <- data.ppm(model) modelX <- model } else { verifyclass(X, "ppp") # refit the model to determine which points are used in pseudolikelihood modelX <- update(model, X, method="mpl") } # find data points which do not contribute to pseudolikelihood mplsubset <- getglmdata(modelX)$.mpl.SUBSET mpldata <- is.data(quad.ppm(modelX)) contribute <- mplsubset[mpldata] if(!any(contribute)) # result is zero vector return(0 * coef(model)) # Add points one-by-one # If there are points which don't contribute, condition on them use <- which(contribute) dontuse <- which(!contribute) for(i in seq_along(use)) { prior <- if(i == 1) c() else use[1:(i-1)] prior <- c(dontuse, prior) Xprior <- X[prior] Xcurrent <- X[use[i]] mom <- partialModelMatrix(Xprior, Xcurrent, model, "suffstat") lastrow <- length(prior) + 1 momrow <- mom[lastrow, ] if(i == 1) result <- momrow else result <- momrow + result } names(result) <- coefnames attr(result, "mplsubset") <- NULL return(result) } killinteraction <- function(model) { verifyclass(model, "ppm") ispoisson <- summary(model, quick=TRUE)$poisson if(ispoisson) return(model) # surgery required newmodel <- model newmodel$interaction <- NULL if(!is.null(Vnames <- model$internal$Vnames)) { matches <- names(model$coef) %in% Vnames newmodel$coef <- model$coef[!matches] newmodel$internal$Vnames <- NULL } # the other 'internal' stuff may still be wrong (or `preserved') return(newmodel) } suffstat.poisson <- function(model, X, callstring="suffstat.poisson") { verifyclass(model, "ppm") if(is.null(X)) X <- data.ppm(model) else verifyclass(X, "ppp") if(!is.poisson(model)) stop("Model is not a Poisson process") Empty <- X[numeric(0)] mom <- partialModelMatrix(X, Empty, model, "suffstat") nmom <- ncol(mom) ncoef <- length(coef(model)) if(nmom != ncoef) stop("Internal error: number of columns of model matrix does not match number of coefficients in fitted model") if(nmom > 1 && any(colnames(mom) != names(coef(model)))) warning("Internal error: mismatch between column names of model matrix and names of coefficient vector in fitted model") o1sum <- apply(mom, 2, sum) return(o1sum) } spatstat/R/close3Dpairs.R0000644000176200001440000001725313616730331015007 0ustar liggesusers# # close3Dpairs.R # # $Revision: 1.14 $ $Date: 2020/02/06 05:53:02 $ # # extract the r-close pairs from a 3D dataset # # closepairs.pp3 <- local({ closepairs.pp3 <- function(X, rmax, twice=TRUE, what=c("all", "indices", "ijd"), distinct=TRUE, neat=TRUE, ...) { verifyclass(X, "pp3") what <- match.arg(what) stopifnot(is.numeric(rmax) && length(rmax) == 1L) stopifnot(is.finite(rmax)) stopifnot(rmax >= 0) ordered <- list(...)$ordered if(missing(twice) && !is.null(ordered)) { warning("Obsolete argument 'ordered' has been replaced by 'twice'") twice <- ordered } npts <- npoints(X) nama <- switch(what, all = c("i", "j", "xi", "yi", "zi", "xj", "yj", "zj", "dx", "dy", "dz", "d"), indices = c("i", "j"), ijd = c("i", "j", "d")) names(nama) <- nama if(npts == 0) { null.answer <- lapply(nama, nuttink) return(null.answer) } ## sort points by increasing x coordinate oo <- fave.order(coords(X)$x) Xsort <- X[oo] ## First make an OVERESTIMATE of the number of pairs nsize <- list(...)$nsize # secret option to test overflow code if(!is.null(nsize)) { splat("Using nsize =", nsize) } else { #' normal usage nsize <- ceiling(5 * pi * (npts^2) * (rmax^3)/volume(as.box3(X))) nsize <- max(1024, nsize) if(nsize > .Machine$integer.max) { warning( "Estimated number of close pairs exceeds maximum possible integer", call.=FALSE) nsize <- .Machine$integer.max } } ## Now extract pairs XsortC <- coords(Xsort) x <- XsortC$x y <- XsortC$y z <- XsortC$z r <- rmax ng <- nsize storage.mode(x) <- "double" storage.mode(y) <- "double" storage.mode(z) <- "double" storage.mode(r) <- "double" storage.mode(ng) <- "integer" ## go a <- switch(what, all = { .Call("close3pairs", xx=x, yy=y, zz=z, rr=r, nguess=ng, PACKAGE = "spatstat") }, indices = { .Call("close3IJpairs", xx=x, yy=y, zz=z, rr=r, nguess=ng, PACKAGE = "spatstat") }, ijd = { .Call("close3IJDpairs", xx=x, yy=y, zz=z, rr=r, nguess=ng, PACKAGE = "spatstat") }) names(a) <- nama ## convert i,j indices to original sequence a$i <- oo[a$i] a$j <- oo[a$j] ## handle options if(twice) { ## both (i, j) and (j, i) should be returned a <- as.data.frame(a) a <- as.list(rbind(a, swapdata(a, what))) } else if(neat) { ## enforce i < j swap <- with(a, (j < i)) if(any(swap)) { a <- as.data.frame(a) a[swap,] <- swapdata(a[swap, ,drop=FALSE], what) a <- as.list(a) } } ## add pairs of identical points? if(!distinct) { ii <- seq_len(npts) xtra <- switch(what, indices = { data.frame(i = ii, j=ii) }, ijd= { data.frame(i = ii, j=ii, d=0) }, all = { cooi <- cooj <- coords(X)[, c("x","y","z")] names(cooi) <- c("xi", "yi", "zi") names(cooj) <- c("xj", "yj", "zj") zero <- numeric(npts) cbind(data.frame(i=ii, j=ii), cooi, cooj, data.frame(dx=zero, dy=zero, dz=zero, d=zero)) }) a <- as.list(rbind(as.data.frame(a), xtra)) } ## done return(a) } swapdata <- function(a, what) { switch(what, all = { with(a, data.frame(i = j, j = i, xi = xj, yi = yj, zi = zj, xj = xi, yj = yi, zj = zi, dx = -dx, dy = -dy, dz = -dz, d = d)) }, indices = { with(a, data.frame(i=j, j=i)) }, ijd = { with(a, data.frame(i=j, j=i, d=d)) }) } nuttink <- function(x) numeric(0) closepairs.pp3 }) ####################### crosspairs.pp3 <- local({ crosspairs.pp3 <- function(X, Y, rmax, what=c("all", "indices", "ijd"), ...) { verifyclass(X, "pp3") verifyclass(Y, "pp3") what <- match.arg(what) stopifnot(is.numeric(rmax) && length(rmax) == 1L && rmax >= 0) nama <- switch(what, all = c("i", "j", "xi", "yi", "zi", "xj", "yj", "zj", "dx", "dy", "dz", "d"), indices = c("i", "j"), ijd = c("i", "j", "d")) names(nama) <- nama nX <- npoints(X) nY <- npoints(Y) if(nX == 0 || nY == 0) { null.answer <- lapply(nama, nuttink) return(null.answer) } ## order patterns by increasing x coordinate ooX <- fave.order(coords(X)$x) Xsort <- X[ooX] ooY <- fave.order(coords(Y)$x) Ysort <- Y[ooY] ## First (over)estimate the number of pairs nsize <- list(...)$nsize # secret option to test overflow code if(!is.null(nsize)) { splat("Using nsize =", nsize) } else { #' normal usage nsize <- ceiling(3 * pi * (rmax^3) * nX * nY/volume(as.box3(Y))) nsize <- max(1024, nsize) } if(nsize > .Machine$integer.max) { warning( "Estimated number of close pairs exceeds maximum possible integer", call.=FALSE) nsize <- .Machine$integer.max } ## .Call XsortC <- coords(Xsort) YsortC <- coords(Ysort) Xx <- XsortC$x Xy <- XsortC$y Xz <- XsortC$z Yx <- YsortC$x Yy <- YsortC$y Yz <- YsortC$z r <- rmax ng <- nsize storage.mode(Xx) <- storage.mode(Xy) <- storage.mode(Xz) <- "double" storage.mode(Yx) <- storage.mode(Yy) <- storage.mode(Yz) <- "double" storage.mode(r) <- "double" storage.mode(ng) <- "integer" ## go a <- switch(what, all = { .Call("cross3pairs", xx1=Xx, yy1=Xy, zz1=Xz, xx2=Yx, yy2=Yy, zz2=Yz, rr=r, nguess=ng, PACKAGE = "spatstat") }, indices = { .Call("cross3IJpairs", xx1=Xx, yy1=Xy, zz1=Xz, xx2=Yx, yy2=Yy, zz2=Yz, rr=r, nguess=ng, PACKAGE = "spatstat") }, ijd = { .Call("cross3IJDpairs", xx1=Xx, yy1=Xy, zz1=Xz, xx2=Yx, yy2=Yy, zz2=Yz, rr=r, nguess=ng, PACKAGE = "spatstat") }) names(a) <- nama ## convert i,j indices to original sequence a$i <- ooX[a$i] a$j <- ooY[a$j] return(a) } nuttink <- function(x) numeric(0) crosspairs.pp3 }) spatstat/R/anova.mppm.R0000644000176200001440000002313513614463173014531 0ustar liggesusers# # anova.mppm.R # # $Revision: 1.16 $ $Date: 2020/01/30 05:06:54 $ # anova.mppm <- local({ do.gripe <- function(...) warning(paste(...), call.=FALSE) dont.gripe <- function(...) NULL tests.choices <- c("Chisq", "LRT", "Rao", "score", "F", "Cp") tests.avail <- c("Chisq", "LRT", "Rao", "score") tests.random <- c("Chisq", "LRT") tests.Gibbs <- c("Chisq", "LRT") totalnquad <- function(fit) sum(sapply(quad.mppm(fit), n.quad)) totalusedquad <- function(fit) with(fit$Fit$moadf, sum(.mpl.SUBSET)) fmlaString <- function(z) { paste(as.expression(formula(z))) } ## interString <- function(z) { as.interact(z)$creator } anova.mppm <- function(object, ..., test=NULL, adjust=TRUE, fine=FALSE, warn=TRUE) { gripe <- if(warn) do.gripe else dont.gripe argh <- list(...) ## trap outmoded usage if("override" %in% names(argh)) { gripe("Argument 'override' is superseded and was ignored") argh <- argh[-which(names(argh) == "override")] } ## list of models objex <- append(list(object), argh) ## Check each model is an mppm object if(!all(sapply(objex, is.mppm))) stop(paste("Arguments must all be", sQuote("mppm"), "objects")) ## are all models Poisson? pois <- all(sapply(objex, is.poisson.mppm)) gibbs <- !pois ## handle anova for a single object expandedfrom1 <- FALSE if(length(objex) == 1 && gibbs) { ## we can't rely on anova.glm in this case ## so we have to re-fit explicitly Terms <- drop.scope(object) if((nT <- length(Terms)) > 0) { ## generate models by adding terms sequentially objex <- vector(mode="list", length=nT+1) envy <- environment(terms(object)) for(n in 1L:nT) { ## model containing terms 1, ..., n-1 fmla <- paste(". ~ . - ", paste(Terms[n:nT], collapse=" - ")) fmla <- as.formula(fmla) calln <- update(object, fmla, evaluate=FALSE) objex[[n]] <- eval(calln, envy) } ## full model objex[[nT+1L]] <- object expandedfrom1 <- TRUE } } ## All models fitted using same method? Fits <- lapply(objex, getElement, name="Fit") fitter <- unique(unlist(lapply(Fits, getElement, name="fitter"))) if(length(fitter) > 1) stop(paste("Models are incompatible;", "they were fitted by different methods (", paste(fitter, collapse=", "), ")" )) ## Choice of test if(fitter == "glmmPQL") { ## anova.lme requires different format of `test' argument ## and does not recognise 'dispersion' if(is.null(test)) test <- FALSE else { test <- match.arg(test, tests.choices) if(!(test %in% tests.random)) stop(paste("Test", dQuote(test), "is not implemented for random effects models")) test <- TRUE } } else if(!is.null(test)) { test <- match.arg(test, tests.choices) if(!(test %in% tests.avail)) stop(paste("test=", dQuote(test), "is not yet implemented"), call.=FALSE) if(!pois && !(test %in% tests.Gibbs)) stop(paste("test=", dQuote(test), "is only implemented for Poisson models"), call.=FALSE) } ## Extract glm fit objects fitz <- lapply(Fits, getElement, name="FIT") ## Ensure all models were fitted using GLM, or all were fitted using GAM isgam <- sapply(fitz, inherits, what="gam") isglm <- sapply(fitz, inherits, what="glm") usegam <- any(isgam) if(usegam && any(isglm)) { gripe("Models were re-fitted with use.gam=TRUE") objex <- lapply(objex, update, use.gam=TRUE) } ## Finally do the appropriate ANOVA opt <- list(test=test) if(fitter != "glmmPQL") opt <- append(opt, list(dispersion=1)) result <- try(do.call(anova, append(fitz, opt))) if(inherits(result, "try-error")) stop("anova failed") ## Remove approximation-dependent columns if present result[, "Resid. Dev"] <- NULL ## replace 'residual df' by number of parameters in model if("Resid. Df" %in% names(result)) { ## count number of quadrature points used in each model nq <- totalusedquad(objex[[1L]]) result[, "Resid. Df"] <- nq - result[, "Resid. Df"] names(result)[match("Resid. Df", names(result))] <- "Npar" } ## edit header if(!is.null(h <- attr(result, "heading"))) { ## remove .mpl.Y and .logi.Y from formulae if present h <- gsub(".mpl.Y", "", h) h <- gsub(".logi.Y", "", h) ## delete GLM information if present h <- gsub("Model: quasi, link: log", "", h) h <- gsub("Model: binomial, link: logit", "", h) h <- gsub("Response: ", "", h) ## remove blank lines (up to 4 consecutive blanks can occur) for(i in 1L:5L) h <- gsub("\n\n", "\n", h) if(length(objex) > 1 && length(h) > 1) { ## anova(mod1, mod2, ...) ## change names of models fmlae <- unlist(lapply(objex, fmlaString)) # intrx <- unlist(lapply(objex, interString)) h[2L] <- paste("Model", paste0(1L:length(objex), ":"), fmlae, # "\t", # intrx, collapse="\n") } ## Add explanation if we did the stepwise thing ourselves if(expandedfrom1) h <- c(h[1L], "Terms added sequentially (first to last)\n", h[-1]) ## Contract spaces in output if spatstat.options('terse') >= 2 if(!waxlyrical('space')) h <- gsub("\n$", "", h) ## Put back attr(result, "heading") <- h } if(adjust && !pois) { ## issue warning, if not already given if(warn) warn.once("anovaMppmAdjust", "anova.mppm now computes the *adjusted* deviances", "when the models are not Poisson processes.") ## Corrected pseudolikelihood ratio nmodels <- length(objex) if(nmodels > 1) { cfac <- rep(1, nmodels) for(i in 2:nmodels) { a <- objex[[i-1]] b <- objex[[i]] df <- length(coef(a)) - length(coef(b)) if(df > 0) { ibig <- i-1 ismal <- i } else { ibig <- i ismal <- i-1 df <- -df } bigger <- objex[[ibig]] smaller <- objex[[ismal]] if(df == 0) { gripe("Models", i-1, "and", i, "have the same dimension") } else { bignames <- names(coef(bigger)) smallnames <- names(coef(smaller)) injection <- match(smallnames, bignames) if(any(uhoh <- is.na(injection))) { gripe("Unable to match", ngettext(sum(uhoh), "coefficient", "coefficients"), commasep(sQuote(smallnames[uhoh])), "of model", ismal, "to coefficients in model", ibig) } else { thetaDot <- 0 * coef(bigger) thetaDot[injection] <- coef(smaller) JH <- vcov(bigger, what="all", new.coef=thetaDot, fine=fine) # J <- if(!logi) JH$Sigma else (JH$Sigma1log+JH$Sigma2log) # H <- if(!logi) JH$A1 else JH$Slog J <- JH$fisher H <- JH$internals$A1 G <- H%*%solve(J)%*%H if(df == 1) { cfac[i] <- H[-injection,-injection]/G[-injection,-injection] } else { Res <- lapply(subfits(bigger), residuals, type="score", drop=TRUE, new.coef=thetaDot, dropcoef=TRUE) U <- sumcompatible(lapply(Res, integral.msr), names(thetaDot)) Uo <- U[-injection] Uo <- matrix(Uo, ncol=1) Hinv <- solve(H) Ginv <- solve(G) Hoo <- Hinv[-injection,-injection, drop=FALSE] Goo <- Ginv[-injection,-injection, drop=FALSE] ## ScoreStat <- t(Uo) %*% Hoo %*% solve(Goo) %*% Hoo %*% Uo HooUo <- Hoo %*% Uo ScoreStat <- t(HooUo) %*% solve(Goo) %*% HooUo ## cfac[i] <- ScoreStat/(t(Uo) %*% Hoo %*% Uo) cfac[i] <- ScoreStat/(t(HooUo) %*% Uo) } } } } ## apply Pace et al (2011) adjustment to pseudo-deviances ## (save attributes of 'result' for later reinstatement) oldresult <- result result$Deviance <- AdjDev <- result$Deviance * cfac cn <- colnames(result) colnames(result)[cn == "Deviance"] <- "AdjDeviance" if("Pr(>Chi)" %in% colnames(result)) result[["Pr(>Chi)"]] <- c(NA, pchisq(abs(AdjDev[-1L]), df=abs(result$Df[-1L]), lower.tail=FALSE)) class(result) <- class(oldresult) attr(result, "heading") <- attr(oldresult, "heading") } } return(result) } sumcompatible <- function(xlist, required) { result <- numeric(length(required)) names(result) <- required for(x in xlist) { namx <- names(x) if(!all(ok <- (namx %in% required))) stop(paste("Internal error in sumcompatible:", "list entry", i, "contains unrecognised", ngettext(sum(!ok), "value", "values"), commasep(sQuote(namx[!ok]))), call.=FALSE) inject <- match(namx, required) result[inject] <- result[inject] + x } return(result) } anova.mppm }) spatstat/R/linnet.R0000644000176200001440000004517013555737605013761 0ustar liggesusers# # linnet.R # # Linear networks # # $Revision: 1.71 $ $Date: 2019/10/29 04:24:45 $ # # An object of class 'linnet' defines a linear network. # It includes the following components # # vertices (ppp) vertices of network # # m (matrix) adjacency matrix # # lines (psp) edges of network # # dpath (matrix) matrix of shortest path distances # between each pair of vertices # # from, to (vectors) map from edges to vertices. # The endpoints of the i-th segment lines[i] # are vertices[from[i]] and vertices[to[i]] # # # FUNCTIONS PROVIDED: # linnet creates an object of class "linnet" from data # print.linnet print an object of class "linnet" # plot.linnet plot an object of class "linnet" # # Make an object of class "linnet" from the minimal data linnet <- function(vertices, m, edges, sparse=FALSE, warn=TRUE) { if(missing(m) && missing(edges)) stop("specify either m or edges") if(!missing(m) && !missing(edges)) stop("do not specify both m and edges") # validate inputs stopifnot(is.ppp(vertices)) nv <- npoints(vertices) if(nv <= 1) { m <- matrix(FALSE, nv, nv) from <- to <- integer(0) } else if(!missing(m)) { # check logical matrix or logical sparse matrix if(!is.matrix(m) && !inherits(m, c("lgCMatrix", "lgTMatrix"))) stop("m should be a matrix or sparse matrix") stopifnot(is.logical(m) && isSymmetric(m)) if(nrow(m) != vertices$n) stop("dimensions of matrix m do not match number of vertices") if(any(diag(m))) { warning("diagonal entries of the matrix m should not be TRUE; ignored") diag(m) <- FALSE } sparse <- !is.matrix(m) ## determine 'from' and 'to' vectors ij <- which(m, arr.ind=TRUE) ij <- ij[ ij[,1L] < ij[,2L], , drop=FALSE] from <- ij[,1L] to <- ij[,2L] } else { ## check (from, to) pairs stopifnot(is.matrix(edges) && ncol(edges) == 2) if(any((edges %% 1) != 0)) stop("Entries of edges list should be integers") if(any(self <- (edges[,1L] == edges[,2L]))) { warning("edge list should not join a vertex to itself; ignored") edges <- edges[!self, , drop=FALSE] } np <- npoints(vertices) if(any(edges > np)) stop("index out-of-bounds in edges list") from <- edges[,1L] to <- edges[,2L] ## avoid duplication in either sense up <- (from < to) ee <- cbind(ifelse(up, from , to), ifelse(up, to, from)) if(anyDuplicated(ee)) { warning("Duplicated segments were ignored", call.=FALSE) ok <- !duplicated(ee) from <- from[ok] to <- to[ok] } ## convert to adjacency matrix if(!sparse) { m <- matrix(FALSE, np, np) m[edges] <- TRUE } else m <- sparseMatrix(i=from, j=to, x=TRUE, dims=c(np, np)) m <- m | t(m) } # create line segments xx <- vertices$x yy <- vertices$y lines <- psp(xx[from], yy[from], xx[to], yy[to], window=vertices$window, check=FALSE) # tolerance toler <- default.linnet.tolerance(lines) ## pack up out <- list(vertices=vertices, m=m, lines=lines, from=from, to=to, sparse=sparse, window=vertices$window, toler=toler) class(out) <- c("linnet", class(out)) ## finish ? if(sparse) return(out) # compute matrix of distances between adjacent vertices n <- nrow(m) d <- matrix(Inf, n, n) diag(d) <- 0 d[m] <- pairdist(vertices)[m] ## now compute shortest-path distances between each pair of vertices out$dpath <- dpath <- dist2dpath(d) if(warn && any(is.infinite(dpath))) warning("Network is not connected", call.=FALSE) # pre-compute bounding radius out$boundingradius <- boundingradius(out) return(out) } print.linnet <- function(x, ...) { nv <- x$vertices$n nl <- x$lines$n splat("Linear network with", nv, ngettext(nv, "vertex", "vertices"), "and", nl, ngettext(nl, "line", "lines")) if(!is.null(br <- x$boundingradius) && is.infinite(br)) splat("[Network is not connected]") print(as.owin(x), prefix="Enclosing window: ") return(invisible(NULL)) } summary.linnet <- function(object, ...) { deg <- vertexdegree(object) sparse <- object$sparse %orifnull% is.null(object$dpath) result <- list(nvert = object$vertices$n, nline = object$lines$n, nedge = sum(deg)/2, unitinfo = summary(unitname(object)), totlength = sum(lengths.psp(object$lines)), maxdegree = max(deg), ncomponents = length(levels(connected(object, what="labels"))), win = as.owin(object), sparse = sparse) if(!sparse) { result$diam <- diameter(object) result$boundrad <- boundingradius(object) } result$toler <- object$toler class(result) <- c("summary.linnet", class(result)) result } print.summary.linnet <- function(x, ...) { dig <- getOption('digits') with(x, { splat("Linear network with", nvert, ngettext(nvert, "vertex", "vertices"), "and", nline, ngettext(nline, "line", "lines")) splat("Total length", signif(totlength, dig), unitinfo$plural, unitinfo$explain) splat("Maximum vertex degree:", maxdegree) if(sparse) splat("[Sparse matrix representation]") else splat("[Non-sparse matrix representation]") if(ncomponents > 1) { splat("Network is disconnected: ", ncomponents, "connected components") } else { splat("Network is connected") if(!sparse) { splat("Diameter:", signif(diam, dig), unitinfo$plural) splat("Bounding radius:", signif(boundrad, dig), unitinfo$plural) } } if(!is.null(x$toler)) splat("Numerical tolerance:", signif(x$toler, dig), unitinfo$plural) print(win, prefix="Enclosing window: ") }) return(invisible(NULL)) } plot.linnet <- function(x, ..., main=NULL, add=FALSE, vertices=FALSE, window=FALSE, do.plot=TRUE) { if(is.null(main)) main <- short.deparse(substitute(x)) stopifnot(inherits(x, "linnet")) bb <- Frame(x) if(!do.plot) return(invisible(bb)) lines <- as.psp(x) if(!add) { # initialise new plot w <- as.owin(lines) if(window) plot(w, ..., main=main) else plot(w, ..., main=main, type="n") } # plot segments and (optionally) vertices do.call(plot, resolve.defaults(list(x=lines, show.all=FALSE, add=TRUE, main=main), list(...))) if(vertices) plot(x$vertices, add=TRUE) return(invisible(bb)) } as.psp.linnet <- function(x, ..., fatal=TRUE) { verifyclass(x, "linnet", fatal=fatal) return(x$lines) } vertices.linnet <- function(w) { verifyclass(w, "linnet") return(w$vertices) } nvertices.linnet <- function(x, ...) { verifyclass(x, "linnet") return(x$vertices$n) } nsegments.linnet <- function(x) { return(x$lines$n) } Window.linnet <- function(X, ...) { return(X$window) } "Window<-.linnet" <- function(X, ..., check=TRUE, value) { if(check) { X <- X[value] } else { X$window <- value X$lines$window <- value X$vertices$window <- value } return(X) } as.owin.linnet <- function(W, ...) { return(Window(W)) } as.linnet <- function(X, ...) { UseMethod("as.linnet") } as.linnet.linnet <- function(X, ..., sparse) { if(missing(sparse)) return(X) if(is.null(X$sparse)) X$sparse <- is.null(X$dpath) if(sparse && !(X$sparse)) { # delete distance matrix X$dpath <- NULL # convert adjacency matrix to sparse matrix X$m <- as(X$m, "sparseMatrix") X$sparse <- TRUE } else if(!sparse && X$sparse) { # convert adjacency to matrix X$m <- m <- as.matrix(X$m) edges <- which(m, arr.ind=TRUE) from <- edges[,1L] to <- edges[,2L] # compute distances to one-step neighbours n <- nrow(m) d <- matrix(Inf, n, n) diag(d) <- 0 coo <- coords(vertices(X)) d[edges] <- sqrt(rowSums((coo[from, 1:2] - coo[to, 1:2])^2)) # compute shortest path distance matrix X$dpath <- dist2dpath(d) # compute bounding radius X$boundingradius <- boundingradius(X) X$sparse <- FALSE } else if(!sparse) { # possibly update internals X$boundingradius <- boundingradius(X) } # possibly update internals X$circumradius <- NULL X$toler <- default.linnet.tolerance(X) return(X) } as.linnet.psp <- function(X, ..., eps, sparse=FALSE) { X <- selfcut.psp(X) V <- unique(endpoints.psp(X)) if(missing(eps) || is.null(eps)) { eps <- sqrt(.Machine$double.eps) * diameter(Frame(X)) } else { check.1.real(eps) stopifnot(eps >= 0) } if(eps > 0 && minnndist(V) <= eps) { gV <- marks(connected(V, eps)) xx <- as.numeric(by(V$x, gV, mean)) yy <- as.numeric(by(V$y, gV, mean)) V <- ppp(xx, yy, window=Window(X)) } first <- endpoints.psp(X, "first") second <- endpoints.psp(X, "second") from <- nncross(first, V, what="which") to <- nncross(second, V, what="which") if(any(reverse <- (from > to))) { newfrom <- ifelse(reverse, to, from) newto <- ifelse(reverse, from, to) from <- newfrom to <- newto } fromto <- cbind(from, to) nontrivial <- (from != to) & !duplicated(fromto) join <- fromto[nontrivial, , drop=FALSE] result <- linnet(V, edges=join, sparse=sparse) if(is.marked(X)) marks(result$lines) <- marks(X[nontrivial]) return(result) } unitname.linnet <- function(x) { unitname(x$window) } "unitname<-.linnet" <- function(x, value) { w <- x$window v <- x$vertices l <- x$lines unitname(w) <- unitname(v) <- unitname(l) <- value x$window <- w x$vertices <- v x$lines <- l return(x) } diameter.linnet <- function(x) { stopifnot(inherits(x, "linnet")) dpath <- x$dpath if(is.null(dpath)) return(NULL) else return(max(0, dpath)) } volume.linnet <- function(x) { sum(lengths.psp(x$lines)) } vertexdegree <- function(x) { verifyclass(x, "linnet") return(rowSums(x$m)) } circumradius.linnet <- function(x, ...) { .Deprecated("boundingradius.linnet") boundingradius.linnet(x, ...) } boundingradius.linnet <- function(x, ...) { stopifnot(inherits(x, "linnet")) cr <- x$boundingradius %orifnull% x$circumradius if(!is.null(cr)) return(cr) dpath <- x$dpath if(is.null(dpath)) return(NULL) if(any(is.infinite(dpath))) return(Inf) if(nrow(dpath) <= 1) return(max(0,dpath)) from <- x$from to <- x$to lines <- x$lines nseg <- lines$n leng <- lengths.psp(lines) if(spatstat.options("Clinearradius")) { fromC <- from - 1L toC <- to - 1L nv <- npoints(vertices(x)) huge <- sum(leng) z <- .C("linearradius", ns = as.integer(nseg), from = as.integer(fromC), to = as.integer(toC), lengths = as.double(leng), nv = as.integer(nv), dpath = as.double(dpath), huge = as.double(huge), result = as.double(numeric(1)), PACKAGE = "spatstat") return(z$result) } sA <- sB <- matrix(Inf, nseg, nseg) for(i in 1:nseg) { # endpoints of segment i A <- from[i] B <- to[i] AB <- leng[i] sA[i,i] <- sB[i,i] <- AB/2 for(j in (1:nseg)[-i]) { # endpoints of segment j C <- from[j] D <- to[j] CD <- leng[j] AC <- dpath[A,C] AD <- dpath[A,D] BC <- dpath[B,C] BD <- dpath[B,D] # max dist from A to any point in segment j sA[i,j] <- if(AD > AC + CD) AC + CD else if(AC > AD + CD) AD + CD else (AC + AD + CD)/2 # max dist from B to any point in segment j sB[i,j] <- if(BD > BC + CD) BC + CD else if(BC > BD + CD) BD + CD else (BC + BD + CD)/2 } } # max dist from each A to any point in another segment mA <- apply(sA, 1, max) # max dist from each B to any point in another segment mB <- apply(sB, 1, max) # min of these min(mA, mB) } #################################################### # affine transformations #################################################### scalardilate.linnet <- function(X, f, ...) { trap.extra.arguments(..., .Context="In scalardilate(X,f)") check.1.real(f, "In scalardilate(X,f)") stopifnot(is.finite(f) && f > 0) Y <- X Y$vertices <- scalardilate(X$vertices, f=f) Y$lines <- scalardilate(X$lines, f=f) Y$window <- scalardilate(X$window, f=f) if(!is.null(X$dpath)) { Y$dpath <- f * X$dpath Y$boundingradius <- f * (X$boundingradius %orifnull% X$circumradius) Y$circumradius <- NULL } if(!is.null(X$toler)) X$toler <- makeLinnetTolerance(f * X$toler) return(Y) } affine.linnet <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) { verifyclass(X, "linnet") if(length(unique(eigen(mat)$values)) == 1) { # transformation is an isometry scal <- sqrt(abs(det(mat))) Y <- X Y$vertices <- affine(X$vertices, mat=mat, vec=vec, ...) Y$lines <- affine(X$lines, mat=mat, vec=vec, ...) Y$window <- affine(X$window, mat=mat, vec=vec, ...) if(!is.null(X$dpath)) { Y$dpath <- scal * X$dpath Y$boundingradius <- scal * (X$boundingradius %orifnull% X$circumradius) X$circumradius <- NULL } if(!is.null(Y$toler)) Y$toler <- makeLinnetTolerance(scal * Y$toler) } else { # general case vertices <- affine(X$vertices, mat=mat, vec=vec, ...) Y <- linnet(vertices, edges=cbind(X$from, X$to)) } return(Y) } shift.linnet <- function(X, vec=c(0,0), ..., origin=NULL) { verifyclass(X, "linnet") Y <- X if(!is.null(origin)) { if(!missing(vec)) warning("argument vec ignored; argument origin has precedence") locn <- interpretAsOrigin(origin, Window(X)) vec <- -locn } Y$window <- W <- shift(X$window, vec=vec, ...) v <- getlastshift(W) Y$vertices <- shift(X$vertices, vec=v, ...) Y$lines <- shift(X$lines, vec=v, ...) # tack on shift vector attr(Y, "lastshift") <- v return(Y) } rotate.linnet <- function(X, angle=pi/2, ..., centre=NULL) { verifyclass(X, "linnet") if(!is.null(centre)) { X <- shift(X, origin=centre) negorigin <- getlastshift(X) } else negorigin <- NULL Y <- X Y$vertices <- rotate(X$vertices, angle=angle, ...) Y$lines <- rotate(X$lines, angle=angle, ...) Y$window <- rotate(X$window, angle=angle, ...) if(!is.null(negorigin)) Y <- shift(Y, -negorigin) return(Y) } rescale.linnet <- function(X, s, unitname) { if(missing(unitname)) unitname <- NULL if(missing(s) || is.null(s)) s <- 1/unitname(X)$multiplier Y <- scalardilate(X, f=1/s) unitname(Y) <- rescale(unitname(X), s, unitname) return(Y) } "[.linnet" <- function(x, i, ..., snip=TRUE) { if(!is.owin(i)) stop("In [.linnet: the index i should be a window", call.=FALSE) x <- repairNetwork(x) w <- i wp <- as.polygonal(w) if(is.mask(w)) { ## protect against pixellation artefacts pixel <- owin(w$xstep * c(-1,1)/2, w$ystep * c(-1,1)/2) wp <- MinkowskiSum(wp, pixel) wp <- intersect.owin(wp, Frame(w)) } ## Find vertices that lie inside window vertinside <- inside.owin(x$vertices, w=wp) from <- x$from to <- x$to if(snip) { ## For efficiency, first restrict network to relevant segments. ## Find segments EITHER OF whose endpoints lie in 'w' ... okedge <- vertinside[from] | vertinside[to] ## ... or which cross the boundary of 'w' xlines <- as.psp(x) wlines <- edges(wp) if(any(miss <- !okedge)) { hits <- apply(test.crossing.psp(xlines[miss], wlines), 1, any) okedge[miss] <- hits } ## extract relevant subset of network graph x <- thinNetwork(x, retainedges=okedge) ## Now add vertices at crossing points with boundary of 'w' b <- crossing.psp(xlines, wlines) x <- insertVertices(x, unique(b)) boundarypoints <- attr(x, "id") ## update data from <- x$from to <- x$to vertinside <- inside.owin(x$vertices, w=wp) vertinside[boundarypoints] <- TRUE } ## find segments whose endpoints BOTH lie in 'w' edgeinside <- vertinside[from] & vertinside[to] ## extract relevant subset of network xnew <- thinNetwork(x, retainedges=edgeinside) ## adjust window efficiently Window(xnew, check=FALSE) <- w return(xnew) } # # interactive plot for linnet objects # iplot.linnet <- function(x, ..., xname) { if(missing(xname)) xname <- short.deparse(substitute(x)) if(!inherits(x, "linnet")) stop("x should be a linnet object") ## predigest v <- vertices(x) deg <- vertexdegree(x) dv <- textstring(v, txt=paste(deg)) y <- layered(lines=as.psp(x), vertices=v, degree=dv) iplot(y, ..., xname=xname, visible=c(TRUE, FALSE, FALSE)) } pixellate.linnet <- function(x, ...) { pixellate(as.psp(x), ...) } connected.linnet <- function(X, ..., what=c("labels", "components")) { verifyclass(X, "linnet") what <- match.arg(what) nv <- npoints(vertices(X)) ie <- X$from - 1L je <- X$to - 1L ne <- length(ie) zz <- .C("cocoGraph", nv = as.integer(nv), ne = as.integer(ne), ie = as.integer(ie), je = as.integer(je), label = as.integer(integer(nv)), status = as.integer(integer(1L)), PACKAGE = "spatstat") if (zz$status != 0) stop("Internal error: connected.linnet did not converge") lab <- zz$label + 1L lab <- as.integer(factor(lab)) lab <- factor(lab) if(what == "labels") return(lab) nets <- list() subsets <- split(seq_len(nv), lab) for(i in seq_along(subsets)) nets[[i]] <- thinNetwork(X, retainvertices=subsets[[i]]) return(nets) } is.connected.linnet <- function(X, ...) { if(!is.null(dpath <- X$dpath)) return(all(is.finite(dpath))) lab <- connected(X, what="labels") npieces <- length(levels(lab)) return(npieces == 1) } crossing.linnet <- function(X, Y) { X <- as.linnet(X) if(!inherits(Y, c("linnet", "infline", "psp"))) stop("L should be an object of class psp, linnet or infline", call.=FALSE) ## convert infinite lines to segments if(inherits(Y, "linnet")) Y <- as.psp(Y) if(inherits(Y, "infline")) { Y <- clip.infline(Y, Frame(X)) id <- marks(Y) lev <- levels(id) } else { id <- lev <- seq_len(nsegments(Y)) } ## extract segments of network S <- as.psp(X) ## find crossing points SY <- crossing.psp(S, Y, fatal=FALSE, details=TRUE) if(is.null(SY) || npoints(SY) == 0) return(lpp(L=X)) SY <- as.data.frame(SY) Z <- with(as.data.frame(SY), as.lpp(x=x, y=y, seg=iA, tp=tA, L=X, marks=factor(id[as.integer(jB)], levels=lev))) return(Z) } density.linnet <- function(x, ...) { density.psp(as.psp(x), ...) } spatstat/MD50000644000176200001440000025165613624534363012455 0ustar liggesusersb14727897b5ac969121dab2e5f49862b *DESCRIPTION fd539ec981b544f419c62c9d64dfb36a *NAMESPACE ebc7398de36f18464d305c2a835ceeb4 *NEWS 5e1711cba2ad40a53219b172b6fb99dc *R/FGmultiInhom.R f85ebd958263c02958f6bc1339b3509a *R/Fest.R 75d4be744afa082d58a8c9b6ecac469a *R/First.R f010f8e0f7b596cfaa0762a5e502a7fb *R/GJfox.R ec11370ccf4fe15a8379d672738b061d *R/Gcom.R 634f9eb8fec91a40ed0eb1389dbd7042 *R/Gest.R d3fa3abc1515437f625927587ac80278 *R/Gmulti.R 1101d753c8f35af184bfa8ff36a64486 *R/Gres.R 329b5c34b765bf1c99b3a9c9935d574d *R/Hest.R a50201e1887e8d1ee49bd00d99f7f870 *R/Iest.R 74c04764ad8be8db745ba9399c01b6b1 *R/Jest.R 9588ce22aa348a0faa893a0521ff7157 *R/Jinhom.R eb245a583d16d062f869a7e9eca0c105 *R/Jmulti.R 2b9b60b66e387026fcb0776b1a6a33b5 *R/Kcom.R fc4aeddad1d05e867cc4d8e21cd407ba *R/Kest.R c88d8faf0034709546f58ffe195b461c *R/Kinhom.R feba10e1458971e7628332a17e06dbc6 *R/Kmeasure.R a8742620874e75312563d5844b76ac3d *R/Kmodel.R be3da5fb9aca642fb95b6e5365186c8b *R/Kmulti.R 895a4a7c3912d0196cde3f48e3c08f76 *R/Kmulti.inhom.R abbf6db440f53371a4f757cb17274de6 *R/Kres.R e02e33a946af3db753bc986a091c6bb9 *R/Kscaled.R 94033aebde371087c4c246cb9fc7ef16 *R/Ksector.R 4e2cdb96fc1f583ebab0b39195656906 *R/Math.im.R 6ea4db23fe667009de99cf5bacfc5511 *R/Math.imlist.R 9606a78771526d236428069941076554 *R/Math.linim.R 15200464f18f33ee9e035c9121666bb4 *R/Tstat.R 69fd69139c158314a9cfc75cdf7c4762 *R/aaaa.R 6bfaccaa0b449d11076b688e550f23c7 *R/adaptive.density.R 286e37ecff8524697d89955543166505 *R/addvar.R b3cdd61aa043592e50b327fa3fa0b86b *R/affine.R 948e229cfe013701e32cbf3690856c2d *R/allstats.R 8c63a0e36908a251b7e0d27e5f37113b *R/alltypes.R 38a0a148b7b2dafa4292d724c096f958 *R/anova.mppm.R 1b6568b2e73859b71afc07d02d5e596a *R/anova.ppm.R 842580bf1e8ff51f0749f24582ffe646 *R/applynbd.R abb41e390520288c695cb1a8e1eeeb7e *R/areadiff.R 3d805f994d6c8b0a5c571e0d311ea820 *R/areainter.R 094341ee22b09e29378067a397d52d2d *R/as.im.R e933ffbde340ca92d90226034dc027eb *R/auc.R 7270a0b79a1555829f4a7b6417d31d33 *R/badgey.R 85b90545fe7081b0deef6d0b2ed2481a *R/bc.R 789f24a9b671b204cca2bea93c57a49e *R/beginner.R 9b93ad8646c55360e0246c8f23920326 *R/bermantest.R ce87ead8cf622a48e637625a0f28d2d4 *R/blur.R 3964c7a7a47fa8f73a9ed2842d34c674 *R/boundingbox.R cc3f8bffb96b961a96ba8ad83c30886c *R/boundingcircle.R da6a1d856e075b932237b70dad56dc79 *R/breakpts.R c64b05c0211d3055f579bd86e9da8a2c *R/bugtable.R 9e05d38aa26b0d4d58eab7df3abcf7b6 *R/bw.CvL.R 3ea771d435305f127425775d8719122b *R/bw.abram.R 84b8e95005baa31d3aba33be265b1458 *R/bw.diggle.R a2b2c4ea848a2903acbcfec3062be42d *R/bw.optim.R ee83e610fc062b34f9a0b171475e477d *R/bw.pcf.R a728e73d5385f31086647991e0453c7f *R/bw.ppl.R 7458e40ffda1e5eda31f1fcf78548f17 *R/bw.scott.R af810a4aec1706ad6f5b8ea1de775f0e *R/by.ppp.R 8be0fcaa6b54ab8452154d1b6edad38e *R/cdf.test.mppm.R 000b355784ae2aeaceed61d3eea8da3a *R/cdftest.R 5efd1803e37d7b6109ce6a9e60879748 *R/centroid.R eca522aca0b04c9773424ba04cd39767 *R/circarcs.R ab416fd2e977a6192d5764c5dce2eda9 *R/circdensity.R c17c44e7db04f6205bbcc061b9bdf7fe *R/clarkevans.R 55b8ccb851f8a2cf879ff18a57963304 *R/classes.R e317941f2ea501d3f8f31733bd17173b *R/clickjoin.R 46f71fabebc3f2cd3060bbc35dcab8d4 *R/clicklpp.R 661b2e31b502ac5b57ddd74f8c5b89c6 *R/clickpoly.R 741d77e0d5b228697f7510dbcf6be9a2 *R/clickppp.R 31ed6dd811249fe617fb3bc005f88525 *R/clip.psp.R d0cff4b72c07b7fab84a7990dc56ccb3 *R/close3Dpairs.R 53dab0d65d9adc89d559246c14bec11f *R/closepairs.R bfa18fdffe18d25aaf5566bd5faa59d0 *R/clusterfunctions.R 0110f3453b566e056c5616dbb79a9315 *R/clusterinfo.R b2a666ee329acd511ee4f5f10dad5c79 *R/clusterset.R d3faf35d8465b51a3bc99e24f83f95f5 *R/colourschemes.R 118084a2bf44e7583df5f1b7c2aa6fbf *R/colourtables.R ae6c977b7a2089699782eeb0591864b9 *R/colourtools.R 8b56cc31365f5a6fa4fa148894eb0a67 *R/compareFit.R 49649c7b2fa2c3789f7392dd31bb534f *R/compileK.R a00723d970d1312c47c47dd2ada3ba37 *R/concom.R 248ec0b863abef641443920d1d25996a *R/connected.R eecb29c4bf786f00b4eff413d5a04efd *R/convexify.R 507c554ad23eb96d9f5e7dde076fb80e *R/covariates.R b85a9e252d4d17ae574abeffc5e21f96 *R/covering.R da21dc26f7cdb0a08c645116a05481ce *R/crossdistlpp.R 96e52355fc9f3275a0d195245a047f77 *R/cut.ppp.R 93e6478f97c6e79a0d56f717d1cde65c *R/dclftest.R 53e0fb6d1af23e321722eea5e595653a *R/defaultwin.R 52f6a3733caa16ca6b4285d21bda6578 *R/deldir.R 539a7b69048787ca2824e60941858704 *R/deltametric.R 7674d088bae7fcd3516fff36aa328764 *R/density.lpp.R 1e9f6e46df8d306eb40ad93c98d8f14c *R/density.ppp.R ec71d63e228695df4ffa581b667b8148 *R/density.psp.R e4c62474d425fabda362a2a1ec1887d5 *R/densityAdaptiveKernel.R 8b2eabdfa0be0e04f5f621dacca45a16 *R/densityVoronoi.R 08f2d755822392ea11931fbbf23ff815 *R/densityfun.R b03f48aeb5aed9222be9b5f7f63cd3c8 *R/densitylppVoronoi.R 6706d26fc720c429660657026da0ccac *R/derivfv.R c027a0425cb16790aa2f605b68e48a75 *R/detPPF-class.R 2926aabd8533958849d4b1d4858823d0 *R/detpointprocfamilyfun.R 19f251b81e75d58d5d7445baca29bfb3 *R/dffit.R 8d410e4b0dbbc0a6c584c886b166769e *R/dg.R ce62afdcbd8a42100ca78f356a9f1a2d *R/dgs.R bed9220a5fa073e11ec9d3b4a2029d84 *R/diagnoseppm.R 46de7f85cef705b1f8c9fc7b4cee21c6 *R/diagram.R eef012a6deaa2493e4229326d3cced70 *R/digestCovariates.R 2382f86d9fe3dc1ad12175d6249706d0 *R/disc.R 049d7673b8ecf3a9ff4befd642f8f5a4 *R/discarea.R a8ec36b9d6c7dae5266f2c1961296c74 *R/dist2dpath.R eb8b9b627f5ffdb68e93a208ff165a77 *R/distan3D.R 13be51ada57e62514c369a9ee7d502ab *R/distances.R a90f2d580f886a276c423ff8cdc0221a *R/distances.psp.R 899d1b827b8af7fa2f4033c8142a8935 *R/distanxD.R 9675f83d15361064edbe88f751122f83 *R/distbdry.R aa66fd2c2cb128704755391efffe402f *R/distcdf.R a25105ed90e5f77b6e2145aa2996de5f *R/distfun.R f8d4fb83fb4129516c7d964047cb054c *R/distfunlpp.R 3d5b8e18bb3fd5ff0176a9c0e008bf27 *R/distmap.R 93350b238da5386adf9842f53263eeca *R/dppm.R 3e07b7fd60e16cd4fe992e5f9a13c54b *R/dppmclass.R ed7beb4930906f00a1d7b9f0fa47f18c *R/dummify.R 9c33a28d219e0a17c118a98574e4b8d0 *R/dummy.R 4ce4e64b746a030e13cebcd3ce9e4172 *R/edgeRipley.R b769a2c2a0e7da802acb15c9b0eaec4e *R/edgeTrans.R a2f665458ac388d86aa0c2df9322a8ae *R/edges2triangles.R 1539189a0ae07f00bc8488fb54b8f071 *R/edit.R f41cfef5a017692ee84c46e395aa4c36 *R/eem.R a816a44b139137b878fb2bcc08c5e4b8 *R/effectfun.R 4c90a3ba4904f33b4cff945df38273db *R/envelope.R 87e73cf8b09b29048114ce7892d5f0c6 *R/envelope3.R 70b7255db85f551617e5ffac3c610299 *R/envelopeArray.R 9e8172e789391e6329e03e28139c65b3 *R/envelopelpp.R e330bb6fa7a99e028497cf61aa37905c *R/eval.fasp.R 24335e6a155752aa61ec305f504143f0 *R/eval.fv.R 25a4c91ebf44fb817ad3fbc2510d15dc *R/eval.im.R f0851b7d38ef6af2a93572acbb6da062 *R/evalcovar.R 029b73027d490aafd38cc0be58468673 *R/ewcdf.R cf259b89afe368a0b167a6c58c615648 *R/exactMPLEstrauss.R 93f4d4218957ab82492423dfb49f2522 *R/exactPdt.R ed28a5a602cccb2462693d267888c468 *R/exactdt.R 55128c282f0fbecda3fbccec5c55be21 *R/factors.R 6c3913843ec541a115f8b3c8a2cb1fb3 *R/fardist.R dd968a2f9b87caf7bbd83ecb20ca7f40 *R/fasp.R 097669d533be797913bc992694afefe6 *R/fgk3.R d26edade9f2a1188cfcf6a7ac08f13ad *R/fii.R 78db49cd87d8eb1d1c18e2a67ea3adf4 *R/fiksel.R 087b82c7dcc47cbd94b449b8a3c2e5e3 *R/fitted.mppm.R 291bb8b3370e56318147e57573f0ab6c *R/fitted.ppm.R 9b4accc71b99440893950bbb817ba851 *R/flipxy.R ca97c5fe546c7b2d1ea8b370ee2b1cf8 *R/fourierbasis.R 78fdc58026c4b2c6747b07289224f1b6 *R/fryplot.R d352a17dd8236f8c0ad6956af10662a8 *R/funxy.R 9be1a8820effad1eb8b414a280ded79f *R/fv.R e9325f4fe08275c2d63977742f9229e9 *R/geyer.R eb1f3c876a7c6627ca82ec3d8c193260 *R/hackglmm.R 9b3c9727fdceeab5be3ed00dc639ddce *R/hardcore.R f1e16ee9c975eda27002f44980d1ea57 *R/harmonic.R 27d1b0c9a1d43eab62f466db5fa90b85 *R/hasclose.R 7576337da12b9579db9130c9e609822f *R/hasenvelope.R 44ad75b9d79a08a4689e2a101d8c6bd5 *R/headtail.R 123085aba3859361ca3f643e7c956c6f *R/hermite.R 1e6f05124674b83e0d62227fa4588b2c *R/hexagons.R 0fb7e0bb6cdf5a5a43a68bf3f63b04c4 *R/hierarchy.R 145717c89dabd410a2a3debb5ae312a9 *R/hierhard.R f03acc7a15870d424447b02f891bc25a *R/hierpair.family.R 3c1a28af26ec7ce13f8b8176dc74b64f *R/hierstrauss.R 8403d327d788f178e589a59ff3e0a0a5 *R/hierstrhard.R cb789e61235a77960c6b51d880da4c8d *R/ho.R e3447ff115c6062ebb3c5d3f30b68f1b *R/hopskel.R bef6d9101f9409d566d76059ed9f3993 *R/hybrid.R 7c059999373d56581fa6e6a85695322d *R/hybrid.family.R a268153b47a381534ef3af394dc5036b *R/hyperframe.R 58dd57cbaeeb0afb5a1b252bf8442e17 *R/hypersub.R 912c6086a8c494075d1e10614723642c *R/idw.R 26b854c1c0226d2d9c36d6c916c3a78f *R/images.R 7f9b47edabadcb43779e573342359fe4 *R/indicator.R 6788b75aaa4eb8c1fb0f27ee17442084 *R/indices.R 97169059771b69f8f89ea8dc15d2867f *R/infline.R b5d86a0ef4b0631df4e32747acd13171 *R/inforder.family.R a694f632876837902ec31f8102f1fa9a *R/intensity.R 291d095691d0ab78cb60ed69b84c466e *R/interact.R 475c2212ff5bb6318748b0c8243c4caf *R/interactions.R a2ee158ac416139dd1d9f16db92e14b3 *R/interp.im.R db5574753bf54b88204aee53f13f86e3 *R/iplot.R e60ea20fa4389884ee22c550d12ef058 *R/iplotlayered.R 45761eca09fd4c1617b46fcbc2e4812e *R/ippm.R bd25142ea1cb427261ddbafd92dd1446 *R/is.cadlag.R c613631d3b4d651fc3f409a306378f8b *R/is.subset.owin.R e4b9b1f05b000b5a69c2e992ca7aaa09 *R/istat.R 849cacf0b093f2199ac0b79bc517f0e6 *R/kernel2d.R 3bcee3e4bca044d4e7fe19b5d25809fc *R/kernels.R 79abe3ad5c54621a3dc219c8051ad102 *R/kmrs.R 2a9eb72099be1caa4c73ae8c71eca66b *R/kppm.R c6a963c659a5874585a22522cc07d285 *R/laslett.R a44ea103d52b7cfc4445578bf5636ac6 *R/layered.R 74171b1aeb5177def1360974a23a385f *R/lennard.R 5affa028f6bbd36a38ebb1ecba7e7f5e *R/levelset.R bbed51b27cd441b2d697021626548ab2 *R/leverage.R c071ad8afe1e37b51108915e88673ac0 *R/linalg.R ec116303270d99c0c08092af0caf8db8 *R/lindirichlet.R 510dd2efb4f5c1c556a4af8a0a888685 *R/linearK.R ee8ad61f7ececcb8b114ca8039d49520 *R/linearKmulti.R d5488b64005e66bdfc3ba182d2a09ea6 *R/lineardisc.R f2f04bfaec86cf00552471bb235c43db *R/linearmrkcon.R 045390cc3874a70ba2d34b23d79e9252 *R/linearpcf.R 799aa539c47b94518c01d996d4c44ff7 *R/linearpcfmulti.R d4afccb12a075e58904517028bdcc4e6 *R/linequad.R 6076009f96cbc0a9e8bebab3bf448c34 *R/linfun.R 6a2d1410b09ee9d0b3e483b4b43b0683 *R/linim.R 3069ffaf8034ca504cae7131781808f7 *R/linnet.R b5ebae624f83a3fe59dad74e11f12b0f *R/linnetsurgery.R 3430d08adca3a9dfa41f88f43d67637b *R/lintess.R 2a55816423fdd689d04155e9eca14d10 *R/lintessmakers.R 52a89f9b4655094430fc919796ae31c7 *R/listof.R c7d80f51dbc5e189aae7717b2c0070c4 *R/lixellate.R 67c395118252398fc41b5e2c547ae19d *R/localK.R b76179a28c5d3948792b128278766653 *R/localKcross.R eda3f640f38a6c1bd2e59915c2126175 *R/localpcf.R f3b05988bcfd64ffd2b61fbbd1a44a13 *R/logistic.R e399296061428b6a64778d0c403d0be9 *R/lohboot.R 2d5729f6ceefb4a9ffd8e5956316230d *R/lpp.R 3048c8429dc6309959857587a13e8814 *R/lppm.R 9955c31d58cf809b4b03f348456ee1f5 *R/lurking.R e8945f517b956706299a0aa8d322a335 *R/lurkmppm.R a028c6e6fe54e716658c78e79c5831f7 *R/markcorr.R 95cda2e43a41b86f31c6b8d68d7a62b0 *R/markmark.R 1da7f9164a083d1a52ec557ebeea5e55 *R/marks.R 6a3d9ee234a765226a04a6c3c23663eb *R/marktable.R ea3c639566286389d8c3aa4549f1cb6e *R/matrixpower.R 8bbd475c96bf63b7059d044ad853721c *R/measures.R 1fcf39b3e5e2a4e136ddf4c122bbb875 *R/metricPdt.R c8199b7684b43a113950e75c46a6f4fa *R/mincontrast.R 6abbec9263d9ff7773029523af1c1aaa *R/minkowski.R 4cca10884c74f5bba1ea35c2fa3da41c *R/minnndist.R 317042937a89d31d8bfe99c91d880540 *R/model.depends.R 5a89d6b1f9f3df7749d8953db2583f14 *R/morisita.R 0f379e9b39fee5af9dceb8e5a87e619a *R/morphology.R 62e79026d013b959287010ad3345bb7f *R/mpl.R 22ddf4e3375539b406ed1982e49b1181 *R/mppm.R 8f19872c0e1cacf2149b2bf116b7ed76 *R/multihard.R 23ade743ff6f39f31ff1bf52ee73f088 *R/multipair.util.R 21dee90242243823e6504b70762e3135 *R/multistrauss.R 641a9b05a22e90e13c6e62a73f1bffa3 *R/multistrhard.R c9d8071400779a76bc4133d776b16dee *R/nearestsegment.R d369ba3ff42b8dded86eee5209751352 *R/newformula.R 9a2de53cccb0801664c58677433ffe7e *R/news.R ab0739722eeb45406471a20c68fbc49a *R/nnclean.R aba4829868abf5130b72f8f5eaee0da8 *R/nncorr.R 0f7e18b7c80e2693d958ce25f9ab0f81 *R/nncross.R 09b95d6428ea9d64f0ab8caed513f688 *R/nncross3D.R eba47f8399e92f6bdd92ad6504973280 *R/nndensity.R 7599b97f7347749ec8801ae600d314c9 *R/nndist.R ffed0097fb0a7aa389476d35f4e8e6e3 *R/nndistlpp.R ee673cb9586c442138794718ee2bdb2d *R/nnfromvertex.R 6408fef0f626e3072dfdfb12238b8a0b *R/nnfun.R 7c991b3e96c24ed6e67e8e55d7d7e7df *R/nnfunlpp.R 0331c0e07719896ab44b9a5b10885c20 *R/nnmap.R ed5ceb9d18447ec47e0490a65e259b75 *R/nnmark.R 1ddc55fe21a5f54976da16fb1c691de4 *R/nnorient.R a2b94b1e048d56444616460d62da489d *R/objsurf.R 520e009dbc77e847c1590d7a84e2bf2a *R/options.R 0579ac687f57e918145495b397827a03 *R/ord.R 351116d5be6f0c962874e927ccf6c416 *R/ord.family.R f48d82982ecb34d893bfce2ef96a8a09 *R/ordthresh.R 55ff4871fea62bbc7887fec50aa73f09 *R/otherpackages.R 717ef43c7fa89d127ff9919f740f1b21 *R/pairdistlpp.R 718a11138e5e35a7916dd260f0d052e7 *R/pairorient.R a205ebe3d826cce02e8199aec096e857 *R/pairpiece.R ace43e8184e415c2af65233cf52aa638 *R/pairs.im.R 466b544caf507d4c55ab60882d0b7945 *R/pairsat.family.R b9643bbc4c4e7a336fcc535afc002c58 *R/pairwise.R 2cf2a16ee25f336d2b1c505f5e0571e4 *R/pairwise.family.R 7e219c0e44487ff76d3c06bb86a48984 *R/parameters.R d438b544bc4b604583e6fbd52b4f0821 *R/parres.R 2a9594b87309ae047f82393c5bd45d08 *R/pcf.R a38ebd9151fa6847dcaf862c4f452ed8 *R/pcfinhom.R c76e896661080247e9b5e95e3d1cab0b *R/pcfmulti.R 8dfd5c29c944568943c7144165e91bab *R/pcfmulti.inhom.R 7e1de57c9c386631f2ef7313259883d8 *R/penttinen.R dfb884749a05ca2792358f6d1ff35f0e *R/percy.R 4e2cfc0fbe156faabfc39466b943f522 *R/periodify.R 1d16cfd9960cff167575d94927c65244 *R/persp.im.R c73888548368e8f3a0b3c4a7abd1f51f *R/pickoption.R f3a95149f75149545f4e20ef530fcb1a *R/pixellate.R a878cd80edb2d826a6f12941f05f0e1c *R/plot.anylist.R 6cd130d91ac285c2220504f42fda6c46 *R/plot.fasp.R 0a90ef29006c2a09d94f459553c55242 *R/plot.fv.R 62b476eff2da7d16518994fe5bf846c6 *R/plot.im.R 2773c384cfbd702d010b28ec7852c1d6 *R/plot.mppm.R ae44f705e4f849122977504d71426ce7 *R/plot.owin.R d5a505a3595402321313152da0cf42a2 *R/plot.plotppm.R 125840a6a0920802ff82d6989a807046 *R/plot.ppm.R 18e00efbc2477ccf0879b564e6d6b915 *R/plot.ppp.R ec5ad7317b8538f5401793bf186913cf *R/plot3d.R 7a854e2816f88bb9a2ccf2b858ff7c87 *R/pointsonlines.R f89bf7b60015d0f0e3d3406cc47345c9 *R/pointweights.R 1a0c9f29d3c585dd7bbb9c9872188e05 *R/poisson.R 93f8d51fa25fcd69340d2a72f74c6111 *R/polartess.R 64b9f14b858dc9176ecc6a698f293648 *R/polygood.R eff91a3e3e11682e74644666801c4fc9 *R/polynom.R ced1f0fff95ddcc732a15c28d5374c5d *R/pool.R 6210e4a7b47d3745a27579275a187499 *R/pp3.R b8db4e3b8b463718a25ef53acfa4e6de *R/ppm.R 5f7e27796a5c264ab292e73983a5b05d *R/ppmclass.R 48a2a03ae76a21ca2f0159ec5882112a *R/ppp.R 3fef6f3ca2c342db7888a8b0783968be *R/pppmatch.R d06c9bdab82a9d64ccabe3154e91bcb9 *R/ppqq.R 015ef3c2c4b86119e20608d77b8351ef *R/ppx.R 8a0e6c4ea34206bd479f2fb8fc558a16 *R/predict.ppm.R 348a83165ff7660c9c490acdde81c99c *R/predictmppm.R 4a18e1a37550a6a75e67505f4b4e92f4 *R/profilepl.R f1a20af4200e23511ecbc06c75be2e33 *R/progress.R abae5cdd29c5edf7f104362f4f1d576e *R/psp.R c8fcab5e13197d3b352059a2869ab2b8 *R/psp2pix.R 81731d9ae4a27663a8789768a69910f1 *R/pspcross.R 4c2f7d4fd5d10218bd0fdd3f7395ba9f *R/psst.R 264fe31fba6ceb798e8782eb3b1108eb *R/psstA.R 858b91662669b087811e8ceb212c7ea8 *R/psstG.R 037230fd025f8fd9308b0b4b98760799 *R/qqplotppm.R f7faf40132ee0b04680a829b1996957e *R/quadclass.R 427ceadf3465047c40765f37a1b8d0e6 *R/quadratcount.R 99aa240d18a1c29ee5156bc18017f63f *R/quadratmtest.R cc7fc0270683fcf28f0f85df7986c0cf *R/quadratresample.R 5c2e764bb66a5789527483909f4637bb *R/quadrattest.R ba81abd6fe6794df409e9434dc1897f4 *R/quadscheme.R 540cc0ed9089cb766cbeab8d5afcdf46 *R/quantess.R 24f39fe4ae6c36437f7fb5caa8cab62c *R/quantiledensity.R 115a41aaf2d3c55b68f56802fcd56020 *R/quasirandom.R 13addb7549196eb16e258d5b5b0ab97c *R/quickndirty.R c29990b66d41c36425f52cd98b1cdb1f *R/rLGCP.R 826c16455c2ff593392615c64c4eada5 *R/rPerfect.R 6e0b2255bf8b42f5a236c391b10306de *R/rags.R 33b24fb5e892ef516e472d1619c80186 *R/random.R 73b70afa74d324923fd971bc1a4f8bbc *R/randomImage.R 87590bac3b9be6fe86bbb56b72aa02d6 *R/randomNS.R 5445c6fcc2e84291d06c6627dea1c7c9 *R/randomlpp.R 6b06764b43e80727f9a488c406c12774 *R/randommk.R b912bc67cd2c72b8751f83c1ae5bdc29 *R/randomonlines.R 2e3a3bb0b90807144b8d36e69d4df3cb *R/randomseg.R 6639370480c1a574a374d926f2ac2fba *R/randomsets.R 24972883102c87da259be0da8e982db7 *R/randomtess.R 3b4b8699391ccf618efd82a6158cf345 *R/rasterfilter.R 2b7f1040c134ab9b450aed5817b21047 *R/rat.R 97b9fac780f72c847fb59e126e7ae4a8 *R/rcelllpp.R a8a6f895acc18aa94be66b546be6c17f *R/reach.R 511b88658d51796d9a0daf71b44b9cb4 *R/reduceformula.R cf3f348ff8e8d2bc4e318b42a2ff9c4d *R/relrisk.R c7084f7d598252e624a777b5c9e2dc1c *R/relrisk.ppm.R 99f3f901362509f3494658b3b853981a *R/replace.ppp.R e34c842a124f8b2a99d7349dfa0800d3 *R/rescale.R 1c30f8c9cf5b707bab251c5afe77241a *R/rescue.rectangle.R 4640e8c94c3b73ffccfd18821c5b46fa *R/resid4plot.R 7bed23bc9e49020838f350679afff371 *R/residppm.R 15f1ea6eff30e3b2b696ca6e9a3f5d4f *R/residuals.mppm.R bfa4d3edf8377369bbe6e64a71db30bd *R/rho2hat.R a89c7dc280d1d33580b4712681ff2e47 *R/rhohat.R 9f131d0cb36ed648e36e623b90b674a9 *R/ripras.R f47c0eccaa94d6655967d988635109f5 *R/rknn.R 57865f2141c787795e579b744203e885 *R/rlabel.R 35f1e78c4ec615ebdf02bf522d15dbe7 *R/rmh.R 427b8f7cc0945f7a48726a1fb147e867 *R/rmh.default.R a3ad14bdc2deb154aa214e6ccffd5994 *R/rmh.ppm.R 0605d8f7db7997e78130f55a254d025c *R/rmhResolveTypes.R f38b36afeb02d76d09e32dbe5f7d368e *R/rmhcontrol.R 15998f271ee8c97b82a16a551a524cf4 *R/rmhexpand.R 6627412d955a4672105d5a3d03c8d500 *R/rmhmodel.R e9b62883c43b618c2eed5ad3729c0a23 *R/rmhmodel.ppm.R 7a33011a8c7aeacbbb495bab033ab723 *R/rmhsnoop.R 112482932314aa9b1dba3ec93d6d0892 *R/rmhstart.R 6378d22817e69ed8dec33950baa86f63 *R/rmhtemper.R e9b546ea6437c8c859621b2d15e211ea *R/rose.R f55cbf2dddbd415b9d1be26b0f2e2af0 *R/rotate.R 4f785802d39bd78aa8c2f5df2f4c0b70 *R/rotmean.R 2ef7687b1739a162d049eef56dc702d4 *R/round.R 4fcd2dee7f630b38180d057ea7802a20 *R/rppm.R d7bc421c52d1ca3b826b85adabcf9e05 *R/rshift.R cbcc8cf0330e9d44f0524af3e3eff341 *R/rshift.psp.R d6c9955f67b6ba632995629e59fcbea3 *R/satpiece.R 06ef1d6ad3f5b348e3c584f4e5411644 *R/saturated.R 42850d1f9770d402d5c115f8437ace70 *R/scanstat.R 38b728fe5e25298ae9270b71765f50e0 *R/scriptUtils.R f91342c5f456f5533b80836918f60120 *R/sdr.R 6e9989251f16919b9802f21093e8ac69 *R/segtest.R f8f9812990a5d541a23f11f0b7c429c1 *R/setcov.R 048ef0497b1b7b5b715a35c0d88bd4f9 *R/sharpen.R c0e7adf01137747788fad043b581c8e7 *R/sigtrace.R a6aa138f68bcd2d0ec27ad708148ddd1 *R/simplepanel.R a167865b3ac418ce26a642f280fd0ac6 *R/simulate.detPPF.R 2a90c951536a4919bd46c0294fa4664e *R/simulatelppm.R 107b78eaf28684581db067b1cccd1831 *R/slrm.R 527bacfb1911bdeabdc26a52afe9981a *R/smooth.ppp.R 93488a21301a5817abe392632bb72b12 *R/smoothfun.R ba0edc17de5025d4c348039706518702 *R/smoothfv.R 682554e524030a7b60c83f394bb11f01 *R/softcore.R 620b7344541c0d1a0a70327d194f12b0 *R/solist.R 851ee9aefccf0a840e8fe19466b9c066 *R/sparse3Darray.R 823a9ebe4eedf31abe35614ba1312ddf *R/sparsecommon.R b0514c2ec909c6a7506991fd12ee4ff3 *R/sparselinalg.R cfe29d37dc0dfe03b5f1c10b9ddb51ce *R/spatialcdf.R a48410ba7a0b5b5f3836f7558289ab54 *R/split.ppp.R 978c773e103e2c60aa1d2c1779b7befb *R/split.ppx.R 779116c375576d38dd12b3570e76e3b2 *R/ssf.R a9ff5b910f6ccb3eedd9d43861785eb0 *R/stienen.R f6c98b9d57ed1584ea2661c5d8ebb82c *R/strauss.R 35f9016037fe08d24060b7e6b0dbadaa *R/strausshard.R 7076c6ca78c74265417f2072818e4cb0 *R/studpermutest.R d6fd59acbc354523f98dd4eace068658 *R/subfits.R 94fa27af37045e9360113feb804615a5 *R/subset.R 5ce5c7351ab113de0450a01fa495759d *R/suffstat.R f00801e18398881039265c2373c2b3c1 *R/summary.dppm.R 0bcf7dd8d2ba1f985741420fc8c7c0f3 *R/summary.im.R 677d750a7936e218fd8b8312001c8225 *R/summary.kppm.R 398d2b3d789d26c1e7f285134c4f8fce *R/summary.mppm.R 820d0d994480844474ed2b4dd45f8bc1 *R/summary.ppm.R fd8885ecb274bfdae6fee1249fa65c2d *R/summary.quad.R d86a6008b2d5563f4dbd3ac945bdd321 *R/superimpose.R 689db2899506c6b07932cbabc7c04386 *R/symbolmap.R 749f7b886b0d565e7e90d0d9b1352265 *R/sysdata.rda 52c7080a314ab087e20d9303f89d7d05 *R/terse.R b964ae380932475e2610cb1c41319276 *R/tess.R 911200b0d21f6772f56dce486f9d07d9 *R/texture.R c3032a7090caf3ec6606ec27f85a6772 *R/timed.R d6720d955272f69d5d4c93476136078a *R/transect.R 7143eaf3c90beea6b15d60fc9fb65c77 *R/transmat.R b3c3d90d017e1fb9830bb9e20b76b07e *R/treebranches.R 746a816204b6077878f2bb7e3bdb4fdb *R/triangulate.R 6852ab624a8f0e62a8e6a362efb8e055 *R/triplet.family.R 954f26a72a4d5e59c68dbe379bbffe26 *R/triplets.R 29609c1886e80eaa12a24182c7e4f580 *R/twostage.R d9d6998b49b2b4f5eb0ee0e3a26c710d *R/unique.ppp.R 41895f8238de103221436d3875a67854 *R/uniquemap.R 1f167f05d077436d58d2f8f0171fdf52 *R/units.R 0b29ba3c73fde1d1d67bea586bcf29eb *R/unnormdensity.R 5d610ac98ba388411d19bad58bb2cf69 *R/unstack.R 600d317510863cfba04bd16968bdaabb *R/update.ppm.R 3652d8abb9c047420c3321e192c94802 *R/util.R 7ed4538851138ac6212059a289531087 *R/varblock.R 902d97c8541b066b0b2d8f5bf86be07e *R/varcount.R 3d605462069dd037c0b07cae6b128bcf *R/vblogistic.R 13de3993f910683b44865cb8b40c3dc5 *R/vcov.kppm.R 8f9a3c340ed1f325accd1539619eb96a *R/vcov.mppm.R a05038039dd4843b20ce357a6456eb65 *R/vcov.ppm.R 1c7a2fa6f48506b0b8115d456df563e3 *R/versions.R 466c043d66aebf5056e3a8ea2970d74d *R/weightedStats.R b1333ff8439dfd41629fdf56be763bea *R/weights.R 346148b6bb6c1ea813abc92b3d73fdf6 *R/window.R ea844d587cbaaa609e05b802fa223566 *R/wingeom.R 707324447941c4759e64ece4db02f2f5 *R/zclustermodel.R 11165b132b89e1985ff431da41afd8ef *build/vignette.rds 864845ec756c9bdd720c4399e3aff644 *demo/00Index 68d9e760c09449956b5a218bf1c91fbd *demo/data.R 69fec98d1b46c8c3205c44b677e6da15 *demo/diagnose.R 2cf3a86de3d63cba7047c6ddf155c236 *demo/spatstat.R 0382b9ed5e27f7c537787d3d2c654ea2 *demo/sumfun.R 407a8cca8bfc8a3407cc02f319e92e98 *inst/CITATION 40b7c225a16a40129627f60b05dcbe32 *inst/doc/BEGINNER.txt c28410c061609c77dd473d1b60ea8ee8 *inst/doc/Nickname.txt 0692d176362b7733c3eae77f82bb0611 *inst/doc/bugfixes.R 2f19eaeeb3e582db6477b0c7e042b210 *inst/doc/bugfixes.Rnw dbfffa40838d948c2bbb6eb612f340cd *inst/doc/bugfixes.pdf cfb03c6686a9e848c56e492caa9aee7a *inst/doc/datasets.R 8bb66a8aa0e8cee0b34f1ff8f81a4e32 *inst/doc/datasets.Rnw c60e9fb18ca9ef471ac0d36de87ba99d *inst/doc/datasets.pdf f10eb74b6eb784867cb8e1e28b1d31c7 *inst/doc/getstart.R 23c7cff7980144d2d528097174bf7114 *inst/doc/getstart.Rnw 0dde384f7d920a107c9f03ee4e53d3fe *inst/doc/getstart.pdf 5aac70e3d108387aeeac6b9888245162 *inst/doc/packagesizes.txt 52355a471d94901f23144944391cd5f4 *inst/doc/replicated.R 471ba4e0d3f21bfb11da447894ed47d4 *inst/doc/replicated.Rnw 7a953051e240e35dfb2495abcec6be85 *inst/doc/replicated.pdf 4fe0175127f4df355434ef95e954db03 *inst/doc/shapefiles.R 0bd1c5d601f8b5a1f966bc5d746dbdb7 *inst/doc/shapefiles.Rnw bf6137baad6806112d850858cbc51e7d *inst/doc/shapefiles.pdf 6eeb90478894607229ffb28dfda1d634 *inst/doc/spatstatlocalsize.txt 6bde3919979d391d1d9d3dc30ac43d55 *inst/doc/updates.R 2f575cc02bc530ed1409993cad0b98c7 *inst/doc/updates.Rnw f481ba758cadbb5bb70aad4fef2e27d8 *inst/doc/updates.pdf 12e68895fef0d3aa0bde45a0ddbadfa4 *inst/ratfor/Makefile 22e8a5189942ba190c13475b35459c7f *inst/ratfor/dppll.r 6d471ec061ea91398ba16323de56b9db *inst/ratfor/inxypOld.r 8fedcb4ea9b0d19e2da232b4b45c0626 *man/AreaInter.Rd 6a8d46b7ffd8e93938c8bb8f32264332 *man/BadGey.Rd 1d8d9afeacbb8e67e56abf455ebfe6d6 *man/CDF.Rd 2bc1cad769270696e144e751de4505c0 *man/Concom.Rd 282a83310afb9506c9a035c96e55bedd *man/DiggleGatesStibbard.Rd c59392fc5fa60782bf91c8c0675de144 *man/DiggleGratton.Rd 7035cca796f4e64b0b5089f25351b070 *man/Emark.Rd 951ee2df828ad3de8c17340e0c5255b9 *man/Extract.anylist.Rd d6cce504ef640b6cdd4e88a012fd52de *man/Extract.fasp.Rd e50f40eb875c124095941fc99a486f36 *man/Extract.fv.Rd 6baaa916bde99c7c78b8ee52dd949a1f *man/Extract.hyperframe.Rd 7e4de82b92ba6d2135c405cc5ec7564b *man/Extract.im.Rd 62c8609253cd6cca6b78e76ead3864f0 *man/Extract.influence.ppm.Rd 9df24cebfa86a43b332426d55b0d65cf *man/Extract.layered.Rd c165f89bfa18ceb0aafb57ba7c3977f9 *man/Extract.leverage.ppm.Rd de0b5e2b05e4341b1f5de7261804a993 *man/Extract.linim.Rd dc6c0e3c01c25332b25485da48c1f918 *man/Extract.linnet.Rd bfa41eea5bb69facbbecd9de7dc33106 *man/Extract.listof.Rd 5538d78ad2f67efa3bbf9fea43483a5f *man/Extract.lpp.Rd 91e440f304e3e4a4e021236bcce45108 *man/Extract.msr.Rd 740e2f7917d0e8e8df64e3f223ea09d6 *man/Extract.owin.Rd 5c9311308cb414457f540e618d59078a *man/Extract.ppp.Rd 3dc6242c1c60d1c44b59899b58f07129 *man/Extract.ppx.Rd 8ff65133fc2b94fdba9cf10fed0a92b0 *man/Extract.psp.Rd 298518688a3bb5c06e7f4b332965f163 *man/Extract.quad.Rd 4555b288918db5e360c216aad5b314e9 *man/Extract.solist.Rd d19bef46a23ecb0ab93af0ed35850d76 *man/Extract.splitppp.Rd 279ecfbecb82ff618333037b97bb953b *man/Extract.ssf.Rd 7f25d30fc29a62aa0499b509dcc22d52 *man/Extract.tess.Rd 563f9349613f728ccdc5c7e2edd9db37 *man/F3est.Rd 2ebdff1ff498c5d21fe8920a6255a6a5 *man/Fest.Rd 21ddb0ef529a342599d30a4ed85da941 *man/Fiksel.Rd 8e401dd59d31e7ba45e6947cbcecc519 *man/Finhom.Rd 2cde77506f7625f97adcda2d2056a7d2 *man/FmultiInhom.Rd 051d69217e89ee7f12ef3b0fad2a779b *man/Frame.Rd 4d35bfd3f54d1f1733a73d4727ae6f94 *man/G3est.Rd 5915cbb8979fa986cf75db47798b1258 *man/Gcom.Rd 5a51479a48d4247feef59af98acc62cc *man/Gcross.Rd 0f7fb9b0fb9c1a658f34bf706cb1cc73 *man/Gdot.Rd d876a87fd0e28f9a30d2deb945f9ac7c *man/Gest.Rd 48f800b52cb48c1f124a2dfeba305f29 *man/Geyer.Rd a66d553f02f060579a076e349d1a824f *man/Gfox.Rd 83519c71ac4129ebae8b0d8d386e7bbc *man/Ginhom.Rd fc20837f195b91fff99f485ff9403fe2 *man/Gmulti.Rd e18298340f5bf46c56b6a0cc4aad6c1a *man/GmultiInhom.Rd a312c80e348d113a05c6abe337f977ea *man/Gres.Rd d3d2b569cf8a6c694141d49f8484a80c *man/Hardcore.Rd 4fb5a69f38eb1906bcc34f9450ea0fdd *man/Hest.Rd 77c49a32e912ecaced766cadad6476ee *man/HierHard.Rd ebcb391ba5dcf25006f76797e8140278 *man/HierStrauss.Rd 1234e600c429e1b4e513e6aafa007cec *man/HierStraussHard.Rd 79b18821fc9b24fc83fcdd977459da92 *man/Hybrid.Rd 857f637abeb713f20381e6ad5277852c *man/Iest.Rd 6cad866725a9ff6d151e090ea371be95 *man/Jcross.Rd 965ba20346170bd6f15657c2f8699c0e *man/Jdot.Rd a4b9764604f960f6ba23305ba69332f5 *man/Jest.Rd 87995f2d988d8c0533550d40a4fc30c0 *man/Jinhom.Rd c4c794997556f644475e44bfeaaab636 *man/Jmulti.Rd 1e284fb57855d43a5579dd9fe62e8f2d *man/K3est.Rd e5a3d2d980667e1f6484f12e25bf0332 *man/Kcom.Rd 7911449fe5a316294577d1badf980820 *man/Kcross.Rd b6eeeaae285c6563fce6fe7d87d84e68 *man/Kcross.inhom.Rd fcdd7caced0711215c11a08cfd7d7c3c *man/Kdot.Rd a41bbc4a12a8e2616cdaa3cc61535114 *man/Kdot.inhom.Rd 5591528fc6e40dd45438938a335e4985 *man/Kest.Rd e8604ed0e68a6c592107645cb6555b62 *man/Kest.fft.Rd 8d45141cac727f8a0c598b32040c8259 *man/Kinhom.Rd 1a6b32af2b8f01a02c072267551d29be *man/Kmark.Rd 60072daf709b4e8a5e7617942bc06fad *man/Kmeasure.Rd c396e17ccc63be7995b7902317b7f3e6 *man/Kmodel.Rd 89f20169df3dfbd7ea1826d2e87003f4 *man/Kmodel.dppm.Rd 421fcb36cf31cd17e9514bea3346fed8 *man/Kmodel.kppm.Rd 9adad596d661756331cac79aa953ec94 *man/Kmodel.ppm.Rd 37b26fec6bb4442cb7f0cc82cd1bd64e *man/Kmulti.Rd 9260fa2d2918b899aaf83632bdf2ddcc *man/Kmulti.inhom.Rd 86bf243040ecb3e90837ae5d58737872 *man/Kres.Rd 2409f9d2191ef5ef6e05b5655c9d094e *man/Kscaled.Rd c5987d1db8f0582adf5d742e70cd7377 *man/Ksector.Rd 9115a22a373040ef2d7209718e4fbe29 *man/LambertW.Rd 340e1092c0ee8f6688db527737f55c64 *man/Lcross.Rd 714a137b2871c12ad00392f6b72a8ee2 *man/Lcross.inhom.Rd a123cc24d96d315daf0aed097a9d136b *man/Ldot.Rd d767a56998e421da4fd7fd0b1534189e *man/Ldot.inhom.Rd dd9b87f89f595e1396d0d6d43cfd39b1 *man/LennardJones.Rd 3520190a05bc88aa8e63f5c8ec6fc066 *man/Lest.Rd d7473e640476fb8b4a064acd9c62ee76 *man/Linhom.Rd 019a96958fd216110d6c221a32cc5605 *man/Math.im.Rd 7fd06b632c6c5cd04786c925bd2c3999 *man/Math.imlist.Rd cd470834ba92eb4d985d63e956fab73d *man/Math.linim.Rd fcd0b10c4c71c6d01500b70c894e578d *man/MinkowskiSum.Rd 26a9db71cd8fa55fdc1eb42afaa2907f *man/MultiHard.Rd 62f6b6f26e3d078704b4742b9e43bb13 *man/MultiStrauss.Rd bf2dcf70457431c00a3049bb814dbb33 *man/MultiStraussHard.Rd 176bbee178c7111abc5d6a0fe97ba0fd *man/Ops.msr.Rd e61d4cfd0d9bacea2346f5c064f28fe6 *man/Ord.Rd 37b2dff8a8916eea7e7927961b3c86bc *man/OrdThresh.Rd 3856350ef8ce867f1b9fa855082b74f4 *man/PPversion.Rd e5df8b20b03a422103c24fa834a8f32c *man/PairPiece.Rd 404b13dc8185a43d0206f2e54e3878a0 *man/Pairwise.Rd 084575ea7ae835815f09f0f3db1824f4 *man/Penttinen.Rd 04bb79d763acc68544149340fc7b7dd9 *man/Poisson.Rd 2e2c031bc8c0ffb8da2c3fd6b4371ffa *man/Replace.im.Rd b747613a11bc6f45761958859dec5b48 *man/Replace.linim.Rd d4fddff9acfab9982c86ae4f9b79343d *man/SatPiece.Rd 586b157510810340fd0b1f34adba6819 *man/Saturated.Rd 89216a124723c5fb1c2347c7446f8ce6 *man/Smooth.Rd 466dcdc6cc4b3995e072f9ff9c958ccf *man/Smooth.fv.Rd aa84079654b3b4ed64afb903ca1b71b6 *man/Smooth.msr.Rd cd4d56a4d3ac116acae31ee56df5652b *man/Smooth.ppp.Rd 8ad465a3b182d156f6e38d0c928542bc *man/Smooth.ssf.Rd 17dc82c299fef9191b2224d0f66cce9a *man/Smoothfun.ppp.Rd 07cfbc769f9ea63f4108bb3081942a03 *man/Softcore.Rd 8ba2cb10456824cd83c49a70fe1d41a8 *man/Strauss.Rd dfb8b417187d6dfae4408a4caa8fefa0 *man/StraussHard.Rd d5569b50779abf3f0033d9381c6fc13c *man/Triplets.Rd 207c090d4efc85174fc0d99a0464f89d *man/Tstat.Rd e900b1d972859fc84b3204e049bf9968 *man/Window.Rd f116c40218b20e1743e1febdb81a4287 *man/WindowOnly.Rd 4c4ad622e5ebf6861a77732435fd738a *man/adaptive.density.Rd da5e83b6e5a25aedb3393de676c087eb *man/add.texture.Rd 96d3635cd31ad5fa40a142d03ebf11a6 *man/addvar.Rd ad01dbc80f6193c73a59989b0e1d03c1 *man/affine.Rd f321206a180ad57fe5acecc685b7644d *man/affine.im.Rd ef8a00b237a0279520f8c333a533b44d *man/affine.linnet.Rd 66d775558a9ef05c271ab95c5f356a34 *man/affine.lpp.Rd 2bda2d4b230d431cf1158e897dac57f9 *man/affine.owin.Rd 21f132fd230e2ece3fdd1d9e9e372093 *man/affine.ppp.Rd 4ca5484af674c88bbcbfc0a54368835d *man/affine.psp.Rd 7f5e1d184203597ccd453526041f2773 *man/affine.tess.Rd f936f4c065ca59299257510de3a62fc9 *man/allstats.Rd 8948713319c86ac9c06a0172156c2fab *man/alltypes.Rd 42896f53104157557c17a210fd230f87 *man/angles.psp.Rd 5fcd23067d4ca2c676f57bf3dc7c71d5 *man/anova.lppm.Rd 830097ed9a03b1e7221582ec2fc89cc3 *man/anova.mppm.Rd d30797ca2a0b728a7ece2cb01daf07b1 *man/anova.ppm.Rd d8d90d340989a5c40405dc5a97f5487d *man/anova.slrm.Rd 63c074e020a138be1c4661864b75e937 *man/anyNA.im.Rd 8d0d4ecb41d8971aa5e8c3bc46321ab8 *man/anylist.Rd 9b7fa081f204c10df20d91f990a3cf31 *man/append.psp.Rd ebafa74187c6ebc98eaec232d17e43af *man/applynbd.Rd 57adf249c91fde7afa6a3e4932f5ae54 *man/area.owin.Rd ab99a8d7442ea97017c69d1939aee2a5 *man/areaGain.Rd 34ea12a4ef430cccd02fb208a0980d11 *man/areaLoss.Rd ceacda934ca7dddd84f18eae8e28ae9c *man/as.box3.Rd 037c6fddb0fde0d07a30da270c7d804c *man/as.boxx.Rd 2dd8d36757c24a29aaed2c837229539f *man/as.data.frame.envelope.Rd 0ec47d1f67cdada328f5a9d4b9b71916 *man/as.data.frame.hyperframe.Rd 2b0d501dcd65f686140f155e2079cdab *man/as.data.frame.im.Rd 8adcb78436fb363368b0e07ca01cd986 *man/as.data.frame.lintess.Rd bfc1533729cb0b45727df7007ca0c0b4 *man/as.data.frame.owin.Rd 9b1b64af3fe5a91f74937c9622aee20b *man/as.data.frame.ppp.Rd 625df6c52e611eb00c67e1b12efd4efd *man/as.data.frame.psp.Rd d643ee803cbff8a55324e84208db6787 *man/as.data.frame.tess.Rd 028d508f316097c621dcbaef53a7d9b4 *man/as.function.fv.Rd 3097e1e816d23fd1e56fc1d63ebbad45 *man/as.function.im.Rd ac2273a069a3ce20d19cd8e5a1c4bcb6 *man/as.function.leverage.ppm.Rd 1c4027fba2a77ae3d98209c5e6b31b01 *man/as.function.owin.Rd 216c2723695f08917d5bc6dccb80c483 *man/as.function.tess.Rd e7f77be0adf5a08ab2c965f624a83f80 *man/as.fv.Rd 14eb58bdc3b207105128c6dc51fb86e5 *man/as.hyperframe.Rd 53a356f14c2d00ff5328f09fe2b6d210 *man/as.hyperframe.ppx.Rd fd5036e08cca61f5df515ac80440731e *man/as.im.Rd 5e20b5864259039bd8ca272aee68027f *man/as.interact.Rd 1e916c8013dbf03efc8967072d5a147b *man/as.layered.Rd c0482ab9bc885ff881c9da7c14b7e2d5 *man/as.linfun.Rd a485b734be9b2fc8236c648d0013aae2 *man/as.linim.Rd ffbac6474298e0323e33c45b918f4437 *man/as.linnet.linim.Rd d52772e67af0ba3019677890f016af27 *man/as.linnet.psp.Rd a87cd4c1ecdfa262347b3a0d97b41192 *man/as.lpp.Rd 8574dfba2a3941a4217c9cf2ffd9a2a0 *man/as.mask.Rd 4092204230e2003cb69e4659f0772972 *man/as.mask.psp.Rd 36c4175e14e918f18f124fb401e25943 *man/as.matrix.im.Rd 4f0323fe12b26266d955603f279fe9fe *man/as.matrix.owin.Rd 0dbbfa1abf9194e4638a816203e05704 *man/as.owin.Rd 5c624477d81097c64ae4879ebf3f18e3 *man/as.polygonal.Rd dc165e3f06b729b80bd883fb226f700a *man/as.ppm.Rd 272b9e0258c8af6bcaea3f272a67cd84 *man/as.ppp.Rd 13cbca3dff795f0ce585afe910e25d10 *man/as.psp.Rd 43498a7fed17e562b24c5141290e177a *man/as.rectangle.Rd 6a714102b91252c9af411c4426119bc3 *man/as.solist.Rd df020957289f4dbcd83e1800b6efb124 *man/as.tess.Rd 6cbcf5e8449130c81017643f3724dbe9 *man/auc.Rd 3df7907e462038f9d2f410983c4be948 *man/bc.ppm.Rd 5d2ca9b9cb773088b584b20013096828 *man/bdist.pixels.Rd 6f91e4b42fe96806b3677d7bec18d9cd *man/bdist.points.Rd 2001c44828382ca663e828c03a993233 *man/bdist.tiles.Rd f1a900ff921acf095a49b5fa66c940b7 *man/beachcolours.Rd a843cd73ef1835fe4ea1a0ae21377f01 *man/beginner.Rd 34bfe7bb20f5983fe4266325b268d30b *man/begins.Rd ec3d1d0d1e0275d05f8c8620ae5ae3bc *man/berman.test.Rd 5da7cfc96a5c926f20691a304f45d901 *man/bind.fv.Rd 8b83d9212b3eec63678d967432473528 *man/bits.envelope.Rd 930cf60e872e994abf9f6ceae0301142 *man/bits.test.Rd 5fd192ba5b32e2cbb37234429c64d23e *man/blur.Rd e6ba5d3a73902ccab3c5756130de7e44 *man/border.Rd ffc9dcc131ee08a2de771dc48376ba9a *man/bounding.box.xy.Rd 2064969d0687cc616f53e4f3c5ae345c *man/boundingbox.Rd c3a04159e912cbcde9a325492fdde92c *man/boundingcircle.Rd 3d31bf6cfa4a213f6ef7524001440339 *man/box3.Rd 54834701b5ec9fb27880597f2e7593e3 *man/boxx.Rd eed7fe041f626623f67b718b48f0c387 *man/branchlabelfun.Rd 2b3980b25aaf443aff98116bebd51138 *man/bugfixes.Rd 8d1b685083f05c6909591c9c9e85135d *man/bw.CvL.Rd 71bf062a92e2af5e9023c61b4e5793d5 *man/bw.abram.Rd 10b76dbe5ec93d7a7cc9e7b3ca0ad159 *man/bw.diggle.Rd 44660ec80af7743495e4338c3e31bda5 *man/bw.frac.Rd ac9bebb02284e9625d89e95f797f07b7 *man/bw.lppl.Rd f37ce608e92cb8fbe8392ef957e516e8 *man/bw.pcf.Rd c883cc9705b4c651e940a2d23a9e3417 *man/bw.ppl.Rd c1ca1a597b98297e0edadfcd3593a071 *man/bw.relrisk.Rd 80beff15cebcf9064a12c9f43309b7d8 *man/bw.scott.Rd 5f1ec5269f7d2bbed37da9fc82a564ba *man/bw.smoothppp.Rd c4c09b8ed66a469a7428c9df69157da5 *man/bw.stoyan.Rd 1f67e56f6705a466e2b8690c1c9e4ef1 *man/bw.voronoi.Rd 4b6e4a877960de8cb174776093ba332d *man/by.im.Rd c431579f078cbfa38cb40ff75266f494 *man/by.ppp.Rd 28994ed5410d8bd3381dd6720c33d140 *man/cauchy.estK.Rd 51eee97456b592fe07b4ce325c43abc4 *man/cauchy.estpcf.Rd 4e6af84798c586d0fb2639e82d615251 *man/cbind.hyperframe.Rd 0b81a15f16b210d9c4f35bc18117e6c1 *man/cdf.test.Rd 01791eee38ad1a4600abfe4f8b518b1d *man/cdf.test.mppm.Rd 721dafe0173a124d815016e25927e960 *man/centroid.owin.Rd b88629357239cce654b8c37c6826d84e *man/chop.linnet.Rd ac1f611664f0e91ed78191eabe1d6ecd *man/chop.tess.Rd 48ff174a1fddbefc4c47fbf7bb09f816 *man/circdensity.Rd 3b351b5f30e29753d670d961d8415c17 *man/clarkevans.Rd 82568157a5cda9a70d0995bed08a850c *man/clarkevans.test.Rd 7654a284e984253e10452f98b495237f *man/clickbox.Rd bc2149002879baf26a34499534af39e1 *man/clickdist.Rd ca00f4d880a5cd81ce7d9a4b125bf2e0 *man/clickjoin.Rd 6dd835e65a81b6072b8d24272c47b600 *man/clicklpp.Rd 3295d098a0ee9741a140b28cad7307c9 *man/clickpoly.Rd 0db2141942eebc8b8461975ca3ed3dc1 *man/clickppp.Rd bde6cf2f59210136c60d71b1b2923138 *man/clip.infline.Rd a6ced7aa6dfe378556db19f516903d00 *man/closepairs.Rd 4c62311993e40151a6cf99701a095f8f *man/closepairs.pp3.Rd 8e7548e11708ceb8cda1b98cca310fd3 *man/closetriples.Rd e728e649b9c5857377ca80056425dc3a *man/closing.Rd 0bf2b0d4c4b9a1a5db900de856efcb7e *man/clusterfield.Rd 1500752784536732ea9a8914ca831352 *man/clusterfit.Rd d4547fd8acb0c39986339c0e6aadca9d *man/clusterkernel.Rd b51c47fc2c637f95e7db966e3c34421d *man/clusterradius.Rd 9969967ef0c3cb82ce73a0be6f08fe39 *man/clusterset.Rd dd5b0370ff9308b4ff96985941b94cd7 *man/coef.mppm.Rd 0c3bbbf66c63e7ff5c00665c2d6692dc *man/coef.ppm.Rd 8e1270ae95df370c3c9ef1ec6ec8d3bd *man/coef.slrm.Rd cb6e2c28b4393eaae15198da6c0a6028 *man/collapse.fv.Rd 031e2bd484ce32d814440776b45ec40f *man/colourmap.Rd b192205526803c311fbafb3271679ea6 *man/colouroutputs.Rd 6b48f30dc338828ecf3268b52ece04be *man/colourtools.Rd 7577667cef680abd3a2ec8d13fa413c0 *man/commonGrid.Rd 94b0f846eb376a7d73f4f08a8430d01e *man/compareFit.Rd c1b8e43213bcaf5a74c1a0238a0d6570 *man/compatible.Rd bbdd91aecc3f370e6d5349d7a72d56fa *man/compatible.fasp.Rd 0693f7679e63c7884c72e3c1d67c6272 *man/compatible.fv.Rd 0dbb6f2874f36a2409367b425a20970b *man/compatible.im.Rd 86f39d6bbc2448fa0a8ea7c8f5405c1b *man/compileK.Rd edc5b84dd7d0d6f60e5c27a2a7b8874f *man/complement.owin.Rd 9c37062f39c1f519e59ef50d6dabf3fe *man/concatxy.Rd b1f982113cf4b679ea625ee89abf357d *man/connected.Rd 66c243bbd2743d13c1b5d5745b1f1d08 *man/connected.linnet.Rd 88ca69371eddcee1a59fc9d67a70a664 *man/connected.lpp.Rd 4f1ab6267d02ee22580a21d0d2f5f9f2 *man/connected.ppp.Rd 92cea0b6516af10d1549c551ca1ca07a *man/connected.tess.Rd 7c22775202e81c10f23a210beba00e2c *man/contour.im.Rd 5fb384aadaccd28b925cc1ebc69f135a *man/contour.imlist.Rd 4cf1b288cffdead7f85cf0c0b86d42ea *man/convexhull.Rd 6ae9c7cf8d7a0140679b72f24916669f *man/convexhull.xy.Rd b323e7ff70db6054fe6b1412bd88e92f *man/convexify.Rd b79d752bb9228bc32fec25a2c488fb2f *man/convolve.im.Rd 8bfef4a4accabb9d55b51c356e436931 *man/coords.Rd 043d477a1eb8019a72195306231fa2be *man/corners.Rd 2c34a94c784871f85be912723b7bfb46 *man/covering.Rd 270668697df2da8368c716055fa16a39 *man/crossdist.Rd 0a3b28ff053854b6d9cb321304a3cfd0 *man/crossdist.default.Rd b0c7f58b1d9393deb439402c83ad0fbb *man/crossdist.lpp.Rd cbf5b84279b9901707b0e61ba9b80290 *man/crossdist.pp3.Rd 86bc3c6c78087ee16d8523de2ba09428 *man/crossdist.ppp.Rd b69412588854da90e9af9cc0c38a49c9 *man/crossdist.ppx.Rd ef11492e48f734cbdf46f6768ab05be5 *man/crossdist.psp.Rd 58cac3a3319f60ca98f5572ad296bd41 *man/crossing.linnet.Rd 661c50d342e9e32e7cc02d041c7ac0be *man/crossing.psp.Rd af5b5fcc49b56411f212cb487cb1b0ce *man/cut.im.Rd a6cd74dea247cd759420023c3a9fd0ea *man/cut.lpp.Rd 9bd7168a88c0befd87a787ea9c50fe8f *man/cut.ppp.Rd e1b7687066cc53ac3eb1a34be3f11073 *man/data.lppm.Rd 6e0c8912ceae9ee89bec670c9de6f135 *man/data.ppm.Rd 5a937a4d7cd2da5b2babdd2066816ae6 *man/dclf.progress.Rd cfe58cc740d905ec11772f662a1115a2 *man/dclf.sigtrace.Rd b32494aa80c8a1a1b28b967e4f3de1b3 *man/dclf.test.Rd c53a24542be4a9eb16621ec99a4bb45e *man/default.dummy.Rd 0ec93174356b4c09b9e90c5886dd50b8 *man/default.expand.Rd abb5e748d59b40a306e411526a5b2b17 *man/default.rmhcontrol.Rd aa8044cc7f49b4534077806138c7bbd6 *man/delaunay.Rd 0c1110c95832a3655e0db53d8d809ea7 *man/delaunayDistance.Rd 652c6ff5511e6a5ad1fd5c338590fef8 *man/delaunayNetwork.Rd c9aabaae8e19078decca8cb19c6b7ab5 *man/deletebranch.Rd b374a9ff6e2ac8635022a2d778d3e8a5 *man/deltametric.Rd 4fae2cd88b63999f37bb0a75b205edba *man/density.lpp.Rd 8a6fc933587fa9ad98b053b2f46a0560 *man/density.ppp.Rd d0b45be7b8cee60c7927bedd42ec0af3 *man/density.psp.Rd 2513790a42beb1924372b3d6a9bef779 *man/density.splitppp.Rd 434179e52df8df423f217569cc806b22 *man/densityAdaptiveKernel.Rd 5a6ff1211a1afbee38973efcad99f16d *man/densityQuick.lpp.Rd fb2637bccbc93dba50a96bdd1d666f17 *man/densityVoronoi.Rd c71ab76dcec73111e75f4c32b109fb2b *man/densityVoronoi.lpp.Rd 7e1b169837493285a01e53feae66f70e *man/densityfun.Rd 50fca06c24aac752c750d95c8f56f7f6 *man/deriv.fv.Rd cbdbe94416949b5299d9ae68b0875705 *man/detpointprocfamilyfun.Rd 0a0b26263084e4f120a8d91c4a53bd72 *man/dfbetas.ppm.Rd dc84b805cd54c511675e9427adf28391 *man/dffit.ppm.Rd ff2cfac07040c5637e8c86d3cbbab741 *man/dg.envelope.Rd d81c8d8e2470d6081243c61dd0829a14 *man/dg.progress.Rd 869ceab579b8674d6f7a686c01d3197b *man/dg.sigtrace.Rd 5edbb7cfbde253a31a72d93c3659126b *man/dg.test.Rd 0cbe18d4651cf05e1fba2b83d7aab0ec *man/diagnose.ppm.Rd feca6dece00297bcde1938f768c71985 *man/diameter.Rd 464efabd5c13eb3ea9c32c6f63c786f5 *man/diameter.box3.Rd 4c28781cc6cdbb4a0122e422396920f3 *man/diameter.boxx.Rd 041bedc39fc1f905ec3c6964cbed8119 *man/diameter.linnet.Rd 03f9b542f39c6984b4c97467e4c8b482 *man/diameter.owin.Rd 4d0c9a276023e58fffc597f6d689e8a6 *man/dilated.areas.Rd 8d25bc1bb5047bb0f060fac2866ea6d2 *man/dilation.Rd 382a56f92a804d52582cf716cdf79b09 *man/dim.detpointprocfamily.Rd b72e48220d7edbac9fc1686c28abd50f *man/dimhat.Rd 6a1b619a11efac3fd663196971650713 *man/dirichlet.Rd 1ad1403e16fd89dac665e9a8aa252076 *man/dirichletAreas.Rd b8cb279192ea77edccb3110fc3319209 *man/dirichletVertices.Rd dd23abfd3e95775f341cac33045517e1 *man/dirichletWeights.Rd d76568876272da7abd5e822e75d8097a *man/disc.Rd b8173608c10e7504c3a424e9a45388e9 *man/discpartarea.Rd 972e9a47b875c69b39489b73231909c1 *man/discretise.Rd 78a3f03297cf56c59e91165460890f93 *man/discs.Rd ea33970727fe02497a888d725d9f24ea *man/distcdf.Rd b194f3bf5ac35f38806779514eb6280d *man/distfun.Rd 645074b7fae18a7483a1a15b0ba8a371 *man/distfun.lpp.Rd faf4889f5aa8f5a78ec409ceadff1866 *man/distmap.Rd e3c58049d4baecc3370bf36eae896977 *man/distmap.owin.Rd 39d6077ce1b03560432870885cc17985 *man/distmap.ppp.Rd d342c07a66859851fde8c661f720b0d2 *man/distmap.psp.Rd 35027fc5ae66fdf42ad3a35a6f6541ac *man/divide.linnet.Rd 80cc6cd76ccc874db32f2df71e16435b *man/dkernel.Rd 8203fba31ada9ac91ebc681f14b3ab27 *man/dmixpois.Rd a6aa371b1538557b295c207c2541d1dc *man/domain.Rd e93b2a5629b7d0568ee47583179a89eb *man/dppBessel.Rd 80e324406c3b304135aaacfbc5965428 *man/dppCauchy.Rd 417e2a934a63afe19d76cacdddc90775 *man/dppGauss.Rd f7a4425077102442130d630aff76f500 *man/dppMatern.Rd 4c762a8765980d7d7dacfee820d913ca *man/dppPowerExp.Rd 43fca14b1c64f1a91fbdd8ec2e56b40f *man/dppapproxkernel.Rd 71f1f80278289aab6dbf5823a5131626 *man/dppapproxpcf.Rd edb8c34118a8284a8946cddfda3392d6 *man/dppeigen.Rd 96abd9ffe6be8d538ddffd6bcab72001 *man/dppkernel.Rd 54613020d15576549a0b4878dde229f8 *man/dppm.Rd 435c26403f233030ea066d2135f121c8 *man/dppparbounds.Rd 976360c41648d086a5d572c80803ee46 *man/dppspecden.Rd 1f7ad57545508cbaf3ebdf359509c96f *man/dppspecdenrange.Rd cfe1652069012d2187f56f99553015aa *man/dummify.Rd c24c8a8fa5eb8b63614db81c18feb432 *man/dummy.ppm.Rd f6b361ee19802275e239b31f06d60879 *man/duplicated.ppp.Rd 623a912c3f9f26c4e122c3c3932a5b48 *man/edge.Ripley.Rd 57d514e98cfdcf5106306495284b167f *man/edge.Trans.Rd 2c8a2832c36a28895f184e502e3b6624 *man/edges.Rd 34b1bf16cb0a8c40bffabcd2d64655ed *man/edges2triangles.Rd b1150d865b52154abe4431d659e9142f *man/edges2vees.Rd 1e473e02de85c9fcf7691e98ff40d1f1 *man/edit.hyperframe.Rd b7283957228c1bd0b45da50abde3bc0b *man/edit.ppp.Rd 87e2d038d7d67ecc47943d0ab8648c67 *man/eem.Rd 92f555a540e6030d41a3ef67b6aa5519 *man/effectfun.Rd 73298504ab6eb8d5f0a67fd42576eb9d *man/ellipse.Rd 62f625d02e3671424d4280d6c7de0428 *man/emend.Rd c0fd21d2e0fa25cb2a538abed97caa31 *man/emend.ppm.Rd ee7df7d9f7945158c439422ef6f9a6df *man/endpoints.psp.Rd 2673f428c630f22060df474fb9aaccfb *man/envelope.Rd 0cc8bc4984ea85d93d847009e5777e48 *man/envelope.envelope.Rd 44298536cd06a45bd6016723c92328b5 *man/envelope.lpp.Rd 04863e11a50071081554980fa450d435 *man/envelope.pp3.Rd 417ab6fe29f16cd10ec714b550af8374 *man/envelopeArray.Rd 406ab1cde1b506f881777c6895445180 *man/eroded.areas.Rd 417361fe6d1aaa8a94539398ce43533e *man/erosion.Rd efb075b7df9320223a187d100cc85c27 *man/erosionAny.Rd e5b8141ede5431bec1877bd7cc706ae0 *man/eval.fasp.Rd fe65f0c5f1631b2f524b77123f982895 *man/eval.fv.Rd ff34d08fd14da05441a08235866e2a07 *man/eval.im.Rd d3112d72c72642d2909ddf2d278a2079 *man/eval.linim.Rd ad424c4a8fd6c41e9399776a3fbacf85 *man/ewcdf.Rd 71cab92a2003ffd03cc99de59670bbd0 *man/exactMPLEstrauss.Rd 6e1d4f4674976dcd96b752dcf9063a90 *man/expand.owin.Rd a97dadc3222aebf6df0e382234783adf *man/extrapolate.psp.Rd 0cf5188fd022cb4c001997fafddc5600 *man/fardist.Rd 60624a18119aca69226f751383d3d871 *man/fasp.object.Rd 0daf1da7e9646e66c05f1202aac28638 *man/fitin.Rd c0ea753a8c1dd4801157715779193173 *man/fitted.lppm.Rd 786b1dd29f8c2ef4ac4dcb377d273734 *man/fitted.mppm.Rd 259c85984dc95363e1ada09018756be6 *man/fitted.ppm.Rd 9f6a06b4a7c477ca26c6bcc62a345213 *man/fitted.slrm.Rd ced7b79616a49227632d3d793f3fbeb1 *man/fixef.mppm.Rd 753f4eafa062d230841d8009565c22a1 *man/flipxy.Rd 782d7ab24890616e64a5bacb50c9fbc3 *man/foo.Rd 590d81abc370d01da0718fe8c8ed0c77 *man/formula.fv.Rd da28ccd90714621c7ba25c37b8297431 *man/formula.ppm.Rd 2b692f946fbfd4b7d3bb7545d4b4a137 *man/fourierbasis.Rd e411490a37b7ee3668c8ce893aa27a51 *man/fryplot.Rd 2200c0b48ef304c09d6bae6e192f567c *man/funxy.Rd cd5826c148618e9ff20da78773361299 *man/fv.Rd 81a53ee2dd2c8afdda608a90d37c9f04 *man/fv.object.Rd cdf23623d05c9cfabf2c13a29496b728 *man/fvnames.Rd 8786b2679753de57cff66911fd4822a9 *man/gauss.hermite.Rd 4014660ad0eea9fd14e7848f88612216 *man/gridcentres.Rd 7c4ae72e764a190364f40fc736c9907f *man/gridweights.Rd 8682c3236a127ecaa7ae430c3f9e72e3 *man/grow.boxx.Rd 30375bff5e98a7539edce32aa22edb24 *man/grow.rectangle.Rd c341ff01dd0540df584ea23c2e055443 *man/harmonic.Rd e5f06d04dbaca03d1cbbf798e5a313ae *man/harmonise.Rd 15ceb7a4e4d0ff84b3769143d69f82da *man/harmonise.fv.Rd 7657b089c0d5ba1e359906786c7d80f8 *man/harmonise.im.Rd b2477cde55321142f298c95657f38e34 *man/harmonise.msr.Rd 05eae00175dbeeaf8860df3f4b2559eb *man/harmonise.owin.Rd 53a50a8a89cc9655b91d6b10ba9394e4 *man/has.close.Rd aed9f3cceb2feee6b031b650b76de1e3 *man/headtail.Rd 316a20049d16deea0ef33177d5fde00d *man/hextess.Rd 059480c5911a91480fa9c38114732e51 *man/hierpair.family.Rd 724e1c23039b744db082d4f33aabb3e5 *man/hist.funxy.Rd b47d63a2c205b434fe8d474bf5743b7a *man/hist.im.Rd 4cbcb79d39be55de64a34143cdde90f8 *man/hopskel.Rd ca60f43560f10690985bc9e1144ff921 *man/hybrid.family.Rd adaeecad93471373f5014890c54b3084 *man/hyperframe.Rd 96f775face0dab2a7597a80a87dd5f99 *man/identify.ppp.Rd f9d14cd7306cd179193746eb85db3abe *man/identify.psp.Rd c06420b4d1ce545a3c93d96ef02223a0 *man/idw.Rd dcdf56d0f94e5f1848a701defb8615b6 *man/im.Rd 8044eb31faf9317ae125937ddf8d5e91 *man/im.apply.Rd 569c8cf7b3cec628e96e74ff545dfa8b *man/im.object.Rd 5cf099bfda09c699261c0f8f04e2d2d0 *man/imcov.Rd 4247b7dde78daaaf7aed382865eb0302 *man/improve.kppm.Rd 36fb37cec394cd3a85fe7e53fa7f83e4 *man/incircle.Rd 15c74c0a3626c007f606b73c46c658a0 *man/increment.fv.Rd fbb05bfe93cb434d601445c3caecdcd1 *man/infline.Rd c01cc984d9dacc9347669d5b30556a2c *man/influence.ppm.Rd 75939f951b1f8f20b1a585050ee676c9 *man/inforder.family.Rd a6f043c5bcb03c177fd95f31ce6f0887 *man/insertVertices.Rd a1ebb4c40a9876e357c402dd4952d415 *man/inside.boxx.Rd 5f2fb734bab533e325c12831ddf3295d *man/inside.owin.Rd b02df1b8b7baaa48fe54339c406c4ba7 *man/integral.im.Rd 9f8f30c3d5e97433a3da6d09e9aba312 *man/integral.linim.Rd cf21d9560e5a2ebc614866c7d7088f0d *man/integral.msr.Rd 60293f2d3c2c7afa1f197d3b5ce74afb *man/intensity.Rd 753620f42fe3e253ec926fc3a250add3 *man/intensity.dppm.Rd e696007219b960dc30c74bf2cfdcd222 *man/intensity.lpp.Rd 8255a135069e2b020f2bb2a536805556 *man/intensity.ppm.Rd bb796724312d4a8c5861ea710aaeefea *man/intensity.ppp.Rd 5460b9db190b08032b212593e6575759 *man/intensity.ppx.Rd b489d9c0f4fa5d03b4505aea4bb3e9f0 *man/intensity.psp.Rd 09c1f979ba1643a0765b78dea079aabe *man/intensity.quadratcount.Rd 9603be44e328138b5dc4e9d71f939ed2 *man/interp.colourmap.Rd 78d4a3147e74cf0e89180d043ede81d5 *man/interp.im.Rd ca44c73cdbfc32eb5da72e358d82da2b *man/intersect.lintess.Rd 21fd63094b31ca0d94ff3c6f75e79aaa *man/intersect.owin.Rd 7742bba505107bfc2beaedb26781580f *man/intersect.tess.Rd bc0d899193807240689833f26a3b01b6 *man/invoke.symbolmap.Rd 61c0965c0544368c8f81d8f57495a0d9 *man/iplot.Rd d6a061df58310496ea224a9c31ce65de *man/ippm.Rd 8912e01a6e2ff5bddbe831d50d93ac6f *man/is.connected.Rd 4ca865e9e624a8130ee672fbd01dd78f *man/is.connected.ppp.Rd a3704f6c85f8c3d9fa4d111160467730 *man/is.convex.Rd 342ef62d8db2ccc867de36df82a4cec6 *man/is.dppm.Rd 034fc8f4553fa6fb630179b1a8f4af1a *man/is.empty.Rd 9760b1ba9982fd060eeb48c4fb3f5d4f *man/is.hybrid.Rd d6d24c19b232dd6551c046c8e5077e8b *man/is.im.Rd 6591c9c6bd96eaa2a953b42813c1884d *man/is.linim.Rd 35e01c299472deb5431bca514719f6df *man/is.lpp.Rd bd911908bcb00c0812bcf7e5b0a5d543 *man/is.marked.Rd 5ff0e261c80b99f8c541a0402eadcb08 *man/is.marked.ppm.Rd b2c5082d855b24db4d45b8baa3e0e27d *man/is.marked.ppp.Rd bc221a7db503a4d7017a66b3c83f27f4 *man/is.multitype.Rd 429f15ef85108a2834c9591eaeb3b8ac *man/is.multitype.ppm.Rd f425ee2ba8b468d239158951f776472a *man/is.multitype.ppp.Rd 0e40fc24543a9a2b342776b6ff5973ee *man/is.owin.Rd 68c1be4581db533f711fc7534b037741 *man/is.ppm.Rd 6b810b065b12c1b3272e6cfd52f317c2 *man/is.ppp.Rd 006f6e69e5c82636c59f1c8f31b42e99 *man/is.rectangle.Rd 4a19e24fdd91dcf51f330c5a0d7b452d *man/is.stationary.Rd 6402d47263125b9acf4330fa429db005 *man/is.subset.owin.Rd c3bb8b1ff39d47aaa9b7e7740758d2c5 *man/istat.Rd e12e99cb6c774416c83a5d21df328146 *man/joinVertices.Rd 55a3c5b55340b4242fe64a2e244235f9 *man/kaplan.meier.Rd 4baac51cd91d96502fc48a9953680066 *man/kernel.factor.Rd f7a962f81e7673b46c915b521042a39e *man/kernel.moment.Rd 549a8a0da9ff9822fed5d908f2c602bd *man/kernel.squint.Rd 1efcd196df498043d360a07bacdda65e *man/km.rs.Rd 260be89200c37c5276962a098d33acd9 *man/kppm.Rd 737f3d1ec96556536d31e334f3e2fbac *man/laslett.Rd 753dc51de5e560bfe8c4123cc9dd9072 *man/latest.news.Rd e3ddd04a6557fd348a3345aef1f75d6b *man/layered.Rd 15b9725c1210edccb56275d9aa304aa4 *man/layerplotargs.Rd 7fc06f7ce92236daa4481801dfa2cf11 *man/layout.boxes.Rd 08d0dbe87480eb6dd0e3c1c68de04556 *man/lengths.psp.Rd 210fc7aaf9caf507e3a1fe5364b1cab4 *man/levelset.Rd 287ca25a82ac57b14c11cafa3e6f4dd5 *man/leverage.ppm.Rd 7cd3b665080a04c1015fb9bbdde9d585 *man/lgcp.estK.Rd 2db86eb90b4c7dde2912ba74abf133d8 *man/lgcp.estpcf.Rd 5670b9dced04d800e69b6fe9a7b0166a *man/linearK.Rd ce43cb8660392964c2e766771b68fc9d *man/linearKcross.Rd 4c3a30c262ab1b232f7dd78d8f1319a3 *man/linearKcross.inhom.Rd b34168736debb9feec579758eec3cdce *man/linearKdot.Rd 433aafed4b91f57bd2654eb40ebedfe5 *man/linearKdot.inhom.Rd ec0561370f5cd03b012d18df42178259 *man/linearKinhom.Rd 61a9c0ee9a32fd5cb5cbf1f3c7cbe7a4 *man/lineardirichlet.Rd 878f90d72b1600799724e250c275a4eb *man/lineardisc.Rd 222ebbe07c3786f9a5e6b89738a89397 *man/linearmarkconnect.Rd 16ace3968caabdf3f139d164c724c7f9 *man/linearmarkequal.Rd 68490464e097e0b5444c9d6661239168 *man/linearpcf.Rd aeb70b3559ea3b541d02ee631babf963 *man/linearpcfcross.Rd decf33bea24df39b5dd5c4e63d0c6ffd *man/linearpcfcross.inhom.Rd 2c6a6641002bcd15a20b745e03a790c1 *man/linearpcfdot.Rd 121098a765019a9ef05df3ee44380fd6 *man/linearpcfdot.inhom.Rd 00213d6b96fdc117fcd312c2e252d0b3 *man/linearpcfinhom.Rd 018024056cf83c3027af86554319b9a8 *man/lineartileindex.Rd 0485f3313d6e643b6aecb49494c1726e *man/linequad.Rd 5b43edf08f04436cd9feae10cde06d7a *man/linfun.Rd 06adc9370b13c10de5c898592d0b4b26 *man/linim.Rd 4042a8a38fa273b0909c982481c2d349 *man/linnet.Rd 1a7c8b754e3b0ef30b9b4b91ea6775bc *man/lintess.Rd cc314c90ddb3ebc15144776d5d532b6e *man/lixellate.Rd 40cd5716edc2e6ba7e4a5a4d9fe0b362 *man/localK.Rd 0cf23cfc1764077bc3b058c0c6c570c7 *man/localKcross.Rd 1dde27dd7df9a22e845cdca5c63461f4 *man/localKcross.inhom.Rd 75063d93f0bc38698d5295141f4f37c6 *man/localKdot.Rd ea8feb05f131ef72f96c3b02ae9c4dac *man/localKinhom.Rd e1b2c5514a30d2c0901b5224aa1069be *man/localpcf.Rd 7caeac313765d2713f998e992babcd1b *man/logLik.dppm.Rd ec6b9c54f62b790559987bb0e9a84149 *man/logLik.kppm.Rd de9a9690e5ef55aaddd586b960f3a0a5 *man/logLik.mppm.Rd cf09a3bff6acfe00690b9682a6ba6ee9 *man/logLik.ppm.Rd ca7223a4495046290195eadc2f265c6f *man/logLik.slrm.Rd eeec119d489733971c6edca5f3f92514 *man/lohboot.Rd 5e041885fdbf52c04d05b05709f75699 *man/lpp.Rd bf49bae5aaa3f6fed410486f152c7317 *man/lppm.Rd e6f9da6f717d13561c0436d75bf8745f *man/lurking.Rd 275fc51666be46726ae9f7687bfa7017 *man/lurking.mppm.Rd 39f9d1446f83aa9f00e775ffc4df2b3c *man/lut.Rd 390c43841b92ba3277f25852a55d2cc9 *man/macros/defns.Rd 4348adcb8a990fa0fe18a7178c3683aa *man/markconnect.Rd d7e93e318e576a7037da86b5ad6a7806 *man/markcorr.Rd bb18727cba02e6e22e104c57e95382e3 *man/markcrosscorr.Rd 3fbdd940f172e21cf0d13f94328c538e *man/markmarkscatter.Rd d2a9ee8b6a6e538dbf76f281f811f7da *man/marks.Rd cbe8805c7a3c8b2b452921800ab86f4e *man/marks.psp.Rd 9350500d1136041eb9326a256a750ea1 *man/marks.tess.Rd ed22c51331fd52293a4bb2be76a9b3d6 *man/markstat.Rd 6001c3ed60cf32729c2272a2411ee82a *man/marktable.Rd 45c3349058d7d736cbacaa383d0bf325 *man/markvario.Rd 3084e6dd2e16150c8626c557ce013761 *man/matchingdist.Rd 03ba2efe317cef14e1f21e8ce796dbc1 *man/matclust.estK.Rd 8dc0a512d3e7e7118b4cd361dab4ab1b *man/matclust.estpcf.Rd 2bbafee72c33faca8f1edf1c0a747419 *man/matrixpower.Rd 7d01b28a4617ebc4636e1a54987c1d5f *man/maxnndist.Rd 85c96d136f6c8e9dc5c9c8aa8870c98e *man/mean.im.Rd 29c7e9c341f6f886f03a1879daf361b7 *man/mean.linim.Rd eb7dd4a1eedec9dc245c35a321495e7f *man/measureContinuous.Rd ead8b17a0c7f48cbac4bbcf65f0346c9 *man/measureVariation.Rd d7deffaef7f7438de2c1fb8b261838e7 *man/mergeLevels.Rd fa199b0a326a764f216644737864ad6e *man/methods.box3.Rd 7d4203dcec605900f9384915a75a1ab6 *man/methods.boxx.Rd 99a7dc9da5d2d6c05e2704af4420aadf *man/methods.distfun.Rd a9506c6df413353de7decacd955bc0b7 *man/methods.dppm.Rd bc0d58ebfe4623fbd2189083d39694de *man/methods.fii.Rd db210f5aed0d8b4cadcb87dab3d567f6 *man/methods.funxy.Rd 8a584ba69dc28ef1c9d3b8f4e60896aa *man/methods.kppm.Rd f9e72503a0844daef9fae2ee9e9d6fd2 *man/methods.layered.Rd c7092d0e6e5ec9cb37a4570dec353e2a *man/methods.linfun.Rd 10ab8ad9556b2f70246edf511bf869aa *man/methods.linim.Rd a57eca4fe46c3530a6f1ffa892830f1b *man/methods.linnet.Rd 227ad8e4000a278f5153e5384ce2b71e *man/methods.lpp.Rd d4b958e06f1771e797d014f3eba2c6b5 *man/methods.lppm.Rd 7134071842e63af453a7f892b219c80d *man/methods.objsurf.Rd 1def2b75a472a68c70d30228b758972b *man/methods.pp3.Rd 97fbcb516f976097d46319b4e6e2ce3a *man/methods.ppx.Rd 4af538fd19d3df6cbf3e5594fef186c0 *man/methods.rho2hat.Rd 07479c50b6c25452ceed595a27321a33 *man/methods.rhohat.Rd 149524760f50d0c8a3464db5658e5d0b *man/methods.slrm.Rd 0c360b4b846e145763a31f409e015440 *man/methods.ssf.Rd 7dd1d86d036b4c330d75aecf21e03ad0 *man/methods.unitname.Rd 1a20b184eb2ada390fc37b75dd319fbc *man/methods.zclustermodel.Rd cbb09c107d4d617d0e575dcedac698de *man/midpoints.psp.Rd 47ee423038621f64cd0b294fd3f64be5 *man/mincontrast.Rd aa070c264b489fd2cf5873bc3cd8a7b4 *man/miplot.Rd bc47733e01454abe17ed482062b87989 *man/model.depends.Rd 88e8bf9a4c5321313dc8ee242b8befc8 *man/model.frame.ppm.Rd e1070be931a9d90999dc20c01222b778 *man/model.images.Rd fc52408f38c7ba88f9a29fc21a865ebb *man/model.matrix.mppm.Rd f00ed50435704c29e0b0efb94c13eb0d *man/model.matrix.ppm.Rd 2a367d19222418868a904b9379310b6a *man/model.matrix.slrm.Rd 89d6ce449309fa62417942829f19212d *man/mppm.Rd cfdb64ecdb9135d5aa82dc7a8f7f5302 *man/msr.Rd fd7c2d5c8c968ca3d3e04a464f725b30 *man/multiplicity.ppp.Rd e05c4f7abf7dc0c6542a15c9f492932f *man/nearest.raster.point.Rd d81631d49a989c92af0dae7b58f0770d *man/nearestValue.Rd dccd900d22e8fe855ae3c65eaf8b7dc1 *man/nearestsegment.Rd 025ce529bec5a19eea34eb3b1ac3c25a *man/nestsplit.Rd 20d47e94f17803ad6af2adf5214766b6 *man/nnclean.Rd 26183ba751f095c25c68c741c7163093 *man/nncorr.Rd 9e8545168f071d57745cf82c82373ed9 *man/nncross.Rd af1283be6eb9ac2adee7c074df7f2db8 *man/nncross.lpp.Rd 73f03fa0d438544d0d3e869fadbc8cb4 *man/nncross.pp3.Rd 374022782952a7232d989e163fd0dc3b *man/nncross.ppx.Rd 50ece9f5db4cda508c95aa459fe1a38b *man/nndensity.Rd 7265302cc8a3a037270abff91061a453 *man/nndist.Rd e451198f4e4ed0016adf019c46ed98d7 *man/nndist.lpp.Rd 1f5b99cb663fe52063e037e0858b7909 *man/nndist.pp3.Rd b35b16b4268865e5de862c1eb1fd961b *man/nndist.ppx.Rd 65f0c650eb79dfc75348587c3519cf79 *man/nndist.psp.Rd 6f96676af04f6ce9098bbbce11655196 *man/nnfromvertex.Rd 6bc156723c8f3284f9b299e529b3d5ce *man/nnfun.Rd e6121e183d7e1174506cfe6fd392289a *man/nnfun.lpp.Rd d0696d6e56df786abdc2c9b6949e12d5 *man/nnmap.Rd 481424540c1db4f01a71e201d1162412 *man/nnmark.Rd 418a896aa7d1c53313e7022184ea350a *man/nnorient.Rd 092cf57239d359afc339f54592310756 *man/nnwhich.Rd 85383deb36661f8e585646c68b914b59 *man/nnwhich.lpp.Rd 0c2cde6a1be83d4ca0fee3da38549f49 *man/nnwhich.pp3.Rd c42d68ad1588309b050beb4a53d5ec6b *man/nnwhich.ppx.Rd d51f6141056b83fb5bb509d91335d2a7 *man/nobjects.Rd 0ac08ae5b07184e0f102d8be4902207d *man/npfun.Rd fbe2ea78291bdba8b54502e7dc2098f2 *man/npoints.Rd 20d138bd69544c6250d3dadb29305c6f *man/nsegments.Rd 4e831d956a71811de072131538ffa9f0 *man/nvertices.Rd 291f0f4c0c6d0f396f6c793b6b432f1b *man/objsurf.Rd 52f27b19004ee43069f7d9a187bf71c5 *man/opening.Rd fbb2162039c49aa32691d13932f84263 *man/ord.family.Rd 0b059320eb292ee2c82683b6630bac7e *man/overlap.owin.Rd ae656f0bd4d46dc1596e9ca11f74dbcb *man/owin.Rd 09a475e019a4a377357f721f11bb5ff9 *man/owin.object.Rd a334b67ef716e9124152624f15662c5f *man/padimage.Rd 8853d6a32603f3fa8a5d8314c23139d7 *man/pairdist.Rd e1bd2837f9dce49f1469cde3392c69fd *man/pairdist.default.Rd 4165070b062fb4d950205c19e2464b52 *man/pairdist.lpp.Rd 55dfd9519eb3f69a926a3ffdfcf345b0 *man/pairdist.pp3.Rd 6c69280a6330cdbf13fa31eb8c424641 *man/pairdist.ppp.Rd a9042dfb1c08d23195d8d3d85ff372e9 *man/pairdist.ppx.Rd 24967c12a5bfd7520004da9088fb1d55 *man/pairdist.psp.Rd ae6a17d8b47bc103cfc21d5ccb2f9fb2 *man/pairorient.Rd 06b0234b9456dd5b7b3a2f495db3d1fe *man/pairs.im.Rd 8b8744286691b257a63805405a626ed0 *man/pairs.linim.Rd e693f86f09458e493af2b7b6712fd770 *man/pairsat.family.Rd 175a81ea3c116a4f938a8cec99adb42c *man/pairwise.family.Rd d80f08886b5ba53b2460411d07c5ed22 *man/panel.contour.Rd ead3493c9b1933629908b34ec23749f4 *man/parameters.Rd 0e0f84e091064f49882023a5e5e85ea1 *man/parres.Rd 9b06494a831f88b18f8196c687770fa4 *man/pcf.Rd dfadd4ff1016669603491af6a4362311 *man/pcf.fasp.Rd aeb5cdc38dbcd2acefb53b9b192eb5a5 *man/pcf.fv.Rd 73533e4ad7f5e70e9bee39d8849a39e5 *man/pcf.ppp.Rd 1cab396336b8eab6fca38a9d12e8ec79 *man/pcf3est.Rd 35e9c91309a30d2ed93ea6ceecf41442 *man/pcfcross.Rd f9f8dbf05c3a13aa2d765b14a84f6865 *man/pcfcross.inhom.Rd c745bbb2ee3919ce065ea50628f8e657 *man/pcfdot.Rd b84a4cb9f3aa18d23a8c8e34a452a240 *man/pcfdot.inhom.Rd 49c7c4a8778492b88487f68904cf98c2 *man/pcfinhom.Rd 5bcf6eda621e887fdcb11d5b309a97ef *man/pcfmulti.Rd b55454aa2048e9b74f11307440aecfe1 *man/perimeter.Rd 330185579b97739a5fbbd58d4d11cf5c *man/periodify.Rd 9953f9d721b537108636a59c5205e8de *man/persp.im.Rd 699bce269544143e18548ec248b25212 *man/perspPoints.Rd 218a10f9f210cd6a8832231405a01fc5 *man/pixelcentres.Rd 7365e405b0ee8bff0d995d463d094ea4 *man/pixellate.Rd 0742b3b537455baa198536cd6155cbfe *man/pixellate.owin.Rd 92a37ce30aa7def389c2b625b9931a93 *man/pixellate.ppp.Rd f5620995ec550ba1e66098edc4351c60 *man/pixellate.psp.Rd a2aafee99f73fb9b36ce11734cf8fbd2 *man/pixelquad.Rd cfa1be8542cc71cdce8e9a87c5aae43b *man/plot.anylist.Rd 9ff682b1593457aa4003c2b041400b96 *man/plot.bermantest.Rd d308f7035d40be415c0a1d4959e2bd80 *man/plot.cdftest.Rd 4d89802315fddce7971339e719b58045 *man/plot.colourmap.Rd 40a182b39983f3f816409890bfffaf17 *man/plot.dppm.Rd d3467a14b7475b1bd3d345be2413305e *man/plot.envelope.Rd 00112835ac7e9ca52154b8d7e8b15bc4 *man/plot.fasp.Rd 2902f7b797e405cc56a82bfbde45558b *man/plot.fv.Rd bbceedd23382559bced05aeab5d33761 *man/plot.hyperframe.Rd 9dac98256d47ef1620a824ba2744b991 *man/plot.im.Rd 0115240b221ea63bb355f83505a4c08c *man/plot.imlist.Rd 7b833d3f4991ea3ac2b66dc6a2e2f105 *man/plot.influence.ppm.Rd 2b31e001e7b3cae5affd64006249ea77 *man/plot.kppm.Rd 3ef61cef6dcb869e4bdfa7de9b14ba78 *man/plot.laslett.Rd 9aa99e1d1b95df354762b42cdf4dc356 *man/plot.layered.Rd 7aa5e11efb040ffef8c2040a448e1b34 *man/plot.leverage.ppm.Rd b8ba4d99f9f9d158dad279a64c5f5dec *man/plot.linim.Rd c2a1e4dc6ad004174de60d028e7ee574 *man/plot.linnet.Rd 82dd0004ed38aeec5086d358b49d22d7 *man/plot.lintess.Rd b248fd815346568bf68bf79dfd258c35 *man/plot.listof.Rd aa59caa5ecc3fc10efa0b2ec3a5cfae1 *man/plot.lpp.Rd 8af4ffb510808a99e8df8abed117eedf *man/plot.lppm.Rd 7480127473564ad5c3a57efdf68d9d36 *man/plot.mppm.Rd 3bcbfcd3b88b35277c8d1d8531cc5dfb *man/plot.msr.Rd 06c0e3c0c9fe8910ccca44466705c518 *man/plot.onearrow.Rd d2efcf6a2633aa5cf572cc0cbbf57f89 *man/plot.owin.Rd b3f8636aee9f1ddea0a9baabcf2d9e37 *man/plot.plotppm.Rd 5b1b24a12e3b27bfef1a02dfcf1527f2 *man/plot.pp3.Rd 423654fd5bb7167f57d9044cad307ca7 *man/plot.ppm.Rd babab0c051ae4b6a10fbfc7fc2505be3 *man/plot.ppp.Rd aaa3aa56e0d32acb2b05bfd3deaf678e *man/plot.pppmatching.Rd fe4c5736db68d4ba32a63fda55deb3fc *man/plot.profilepl.Rd 77181aa08d332b819beea5a4f2f28714 *man/plot.psp.Rd f2a2afff874266688981a56ba0f0887d *man/plot.quad.Rd 5d1d72327dba7d662ec2ab7b8ea72a28 *man/plot.quadratcount.Rd 4be5e426df9bf6b93ab71ac270e35417 *man/plot.quadrattest.Rd 29a48bdc9d2be508ee8f66afaf0f475d *man/plot.rppm.Rd 623d09d5790ab06711fbdbc9e72b145c *man/plot.scan.test.Rd 8c87c3c115a50447499916049d547a01 *man/plot.slrm.Rd bfe71ddc547fcb6a00cc8b541b229d76 *man/plot.solist.Rd 4ef4ce06a8d1027d9988db609fbb92b8 *man/plot.splitppp.Rd 1ba68d7431dc5fc9ee52e93e5e9bfe4a *man/plot.ssf.Rd 479505294dc35e7b2d4bfdaa8e55c9a0 *man/plot.studpermutest.Rd 7a7dd6a11007c6401fbd10b20b02c663 *man/plot.symbolmap.Rd a0c6b44323f2d7eb19220044a1b0e019 *man/plot.tess.Rd 803bba4ce84252fe7e91661b36ea472f *man/plot.textstring.Rd 00cd55cb42db85a314c8511ce34128cb *man/plot.texturemap.Rd 9eaa193b31b3538c61dfc1d41f0686d6 *man/plot.yardstick.Rd d8fc082a4e08900675049aa011262b07 *man/points.lpp.Rd 7420469e5183b8f16b1a78e4365e18ad *man/pointsOnLines.Rd 93aeadc1db5cf88fc229ad1065176ba5 *man/polartess.Rd daf959532330f2c700243ef0693ffa37 *man/polynom.Rd 1e4ffe51385b95fa44f17d5ebbc1e023 *man/pool.Rd 7b0c3d7a78fc8ff459956f54d439c066 *man/pool.anylist.Rd ab8ac452b1a923e9577b138e7d4be21b *man/pool.envelope.Rd 22838536f87dc87f6bb34bd5761f1966 *man/pool.fasp.Rd a0059ae9ec4770fc7f7760eb11e28344 *man/pool.fv.Rd 29a3c5eb8f035c5c972e7bc8f5b25ae4 *man/pool.quadrattest.Rd a0cf222f8b437868440d320d8febb4b4 *man/pool.rat.Rd e3ccaf8de1f8eab497c3819dddf55cb9 *man/pp3.Rd 3c83db93276f862c65e5af305ca30d34 *man/ppm.Rd 24d2e2e689c84b7d03a1f020d16e7c67 *man/ppm.object.Rd f872d02037ecfaa8afaf65dc88062807 *man/ppm.ppp.Rd 8cefc7e95a4f2333f1045bfed055e37c *man/ppmInfluence.Rd c691a22008d88d1874b5ff1ad005ea9d *man/ppp.Rd 485b77516b954a0461c22531e21952d2 *man/ppp.object.Rd 5e5c8d7a49e135e171ce3a9cfdf0c725 *man/pppdist.Rd 5b02574996ada6c87062fbe0f6febee7 *man/pppmatching.Rd cd64b3959e89b40f5d82ee98d61fab91 *man/pppmatching.object.Rd 7c4452d5ed6b0d2fb04c2d829d62ebec *man/ppx.Rd 55186ded7f38c9d5289aeb25034517aa *man/predict.dppm.Rd 3136a25f701450a7b2ed5c0caf25b3f6 *man/predict.kppm.Rd 0a4a7f74691f676391a543f30d8c4a20 *man/predict.lppm.Rd 60e0b9c0c4f793dc28883f44719b614e *man/predict.mppm.Rd 2ac47a4d90c7248de8094628e85ce425 *man/predict.ppm.Rd baf7a18910afda5c22e97a8b394f35ec *man/predict.rppm.Rd cfb7e1f07e6b80ba77bdaa92e2fcc486 *man/predict.slrm.Rd 600900c6d48dfab3d5aef02377c99e6a *man/print.im.Rd d245091b62b8fe10f8913b76dad465fe *man/print.owin.Rd 9963e52e777b7d2d562f0ced86f1148d *man/print.ppm.Rd 9efd9c2dc831f6343afb506be3497144 *man/print.ppp.Rd 6e0624fc0182d41c6b557eb57c682a31 *man/print.psp.Rd bdfd0ef9b27f33d246fb7a02b2008eae *man/print.quad.Rd 0282ad9c58f1b1f34bc4d69270c6a8bc *man/profilepl.Rd 6e843ff4d0455ff782971b6c6946f81d *man/progressreport.Rd a0abb0988a6e917ae0eba09cffb0a7b3 *man/project2segment.Rd 9df38351cc29ede4dd7ffae6706f5d68 *man/project2set.Rd a8d0bb1cb4832789478c2fd9fdff494c *man/prune.rppm.Rd 1804afb463a1c035b5bdb6aa098c31ea *man/pseudoR2.Rd 25ee274082ef6fbd570de2f9cce4a690 *man/psib.Rd 64d7358dab074546b92f52a2ca8499f3 *man/psp.Rd e5eeca299bd8b3b0e45c1ed7d5f5c0e3 *man/psp.object.Rd 2a0f96f85d2f28f8c5e767d795170da2 *man/psst.Rd 683f4efe697380217797fe1f37c2a202 *man/psstA.Rd 34a04c1891fcea6592b1c35ea7ad0192 *man/psstG.Rd 8893288f3c6f1ea66929f38b46340d4c *man/qqplot.ppm.Rd 64ff11570ca6ac940fddc6c7bdb26b0b *man/quad.object.Rd 72261006dfc38b28a929ebbf29310c7a *man/quad.ppm.Rd 0540afc7427f901b3d724953d544f6c5 *man/quadrat.test.Rd e5e8567142ba69f23a29231576d8a6c0 *man/quadrat.test.mppm.Rd ea895b1d7a9679c3c48123012a7e01e0 *man/quadrat.test.splitppp.Rd 58514018045e526b5126305571b87c9e *man/quadratcount.Rd 399b5a89c244df698c3c85b7c620d388 *man/quadratresample.Rd 5352a621010c172fa1558b8085ee7013 *man/quadrats.Rd 1753816156050fb9daf986acb7462896 *man/quadscheme.Rd 8837f075074d53be595ccd9f7184a746 *man/quadscheme.logi.Rd 50fe56a582cf822f0107becbdd92e49f *man/quantess.Rd f74a00203731aed9b80c02c66ff765a1 *man/quantile.density.Rd 7ebe2c0d4c1bbd72eb5dc80ce6c7bba2 *man/quantile.ewcdf.Rd dd719c0c8110bc2772f8d7912ecebcda *man/quantile.im.Rd 68961b333902f94238d28d7dff64bfdf *man/quasirandom.Rd e10e5d6e7d8fbd709e9562445bd65293 *man/rCauchy.Rd 7c870ec50946ddc32f8dce58805b3957 *man/rDGS.Rd 940c3a5f14c5ded933607832fcc642b2 *man/rDiggleGratton.Rd 08e89870e624574222db2a21bd3cb9b7 *man/rGaussPoisson.Rd 421bce7093f111898af8174284c107b6 *man/rHardcore.Rd 741c2851958d8a0fefb2a41075769797 *man/rLGCP.Rd ee27e5e6188742cd4dbcbed5e9f889f4 *man/rMatClust.Rd add9d75ec3e07cf63da3152bc7394790 *man/rMaternI.Rd 197cd3def617904dd5e1674df6277029 *man/rMaternII.Rd 40d40454aa82ff81249c9d31e8b930a6 *man/rMosaicField.Rd 168e3c311208ef80aebb1b8fa91a1010 *man/rMosaicSet.Rd bbbe71903aabcf8ceecfef2706a0f8c2 *man/rNeymanScott.Rd f002870b905894af77fa36f1bdc0eb4d *man/rPenttinen.Rd 958b981db668a82a6e9f119302584b10 *man/rPoissonCluster.Rd 7266a51131d3884bf96b03e561721671 *man/rQuasi.Rd 946044fbcef67d750f2a19149852d447 *man/rSSI.Rd d56491a88fcfa60315ed6d66201a9f34 *man/rStrauss.Rd 402edf66306db5e4703087dbcceddb27 *man/rStraussHard.Rd c556143ebe93c4f2cd2c28f0515f9916 *man/rSwitzerlpp.Rd 945e082e1dfc1677d07440e78737d41a *man/rThomas.Rd 1e3830535c87494f824cfc8afe448078 *man/rVarGamma.Rd 4aa8c9349b680a3eae25a4ef140a0338 *man/rags.Rd d799247ffbfec55868bfcb7ba832bcef *man/ragsAreaInter.Rd ba10e2653bf0888cecb2e0cc2d7005e1 *man/ragsMultiHard.Rd 94ccf22fc4d72433a17890fabb98cf4a *man/ranef.mppm.Rd f5859cdb173e22e9551ab30f0c78f1d0 *man/range.fv.Rd cb18fac0c337eab9dd887f2de53fdbe7 *man/raster.x.Rd 48db7d85e4d70016c314a208457d4c86 *man/rat.Rd 76dbaf15ad5654a5da034551b883f194 *man/rcell.Rd 2dd9734f2a1dae007f2b6038f2add499 *man/rcelllpp.Rd 55aeb0c742804dd2fd18971d10ebdce1 *man/rcellnumber.Rd 4f00b83f740761ad77278241656c18ee *man/rdpp.Rd 8263beba126b1c37e440ce3cec96f64b *man/reach.Rd 759ba7977ab1b8e0a8dec1d606541e17 *man/reach.dppm.Rd 10f5a1a3d3c655d971399a31763aaf89 *man/reach.kppm.Rd f9d5416f5570ca8436655a84dcb46d93 *man/rectcontact.Rd f28c3268bd9b08e3070c881a4c787cf5 *man/rectdistmap.Rd 396ba365547cdcad60faa9d6210ece8c *man/reduced.sample.Rd a0c68ea64422a6edba5f9231338f0807 *man/reflect.Rd df95624d2972e9f5eb296c1ee9856092 *man/regularpolygon.Rd 20be7aeda8e4da71d02f871e2202115b *man/relevel.im.Rd 81f73ea10fad8f2c70150183eb7dbedf *man/reload.or.compute.Rd 8738ccac6e37447e056972c18eb48254 *man/relrisk.Rd f79b65f13e8b2eda544cd6f9a85be22c *man/relrisk.ppm.Rd e6d2f368c0275116caade7a495c455fd *man/relrisk.ppp.Rd 74052f9c371c289bd611fc87089c277a *man/repairNetwork.Rd ac347300d2bc3a23f9bfe4cb89aac5d9 *man/repul.Rd d3f95463a8e491f32f2b9c34506cdfc8 *man/requireversion.Rd ecc14be3229a1cce05c876f087cee388 *man/rescale.Rd 20004859dc29aa4363ad80a948fe23db *man/rescale.im.Rd 058b59b5213d55db80da61e4a0de97fc *man/rescale.owin.Rd d0dded0a368a3eaefcd26b1e5636d998 *man/rescale.ppp.Rd 13920d3f7b1391f8bd02f8e2b325a40d *man/rescale.psp.Rd 12334801657f6ed3d3b0e6b3c80eee35 *man/rescue.rectangle.Rd bcd155a7da4cc55760a6bded7ddc8a8e *man/residuals.dppm.Rd 0418405470085449656f5fc8d4d87999 *man/residuals.kppm.Rd f3eb92ee605e655747524d3ea982330c *man/residuals.mppm.Rd 05f5ee7d9e1c701a608524da5abca13f *man/residuals.ppm.Rd 627fcde54f29940a08a9c1def1673bfc *man/rex.Rd f4c5e58df396ecfc397cd6086194b41b *man/rgbim.Rd fa83ddd0842b49a342c54511d97b787c *man/rho2hat.Rd 5217c63897a5a33405a818f43ef184bf *man/rhohat.Rd ee9d83dbf3d66ff2f0ee41dd85c5d319 *man/ripras.Rd 3dd03a5b2c65b157024e2525b6502630 *man/rjitter.Rd 6dc4bbb5b1b2e45f381673a7488bbd44 *man/rknn.Rd 719a012b7c4d89626b977474782d89bb *man/rlabel.Rd 1303979d82a3cc654db7fbe57f3a8b90 *man/rlinegrid.Rd 4c0dc89855eeaef976d52181c2ec7184 *man/rlpp.Rd 3a88872dff11d1c5d3ce1e2414fce8ce *man/rmh.Rd fee57343a540ada6f67acb24e1b33b66 *man/rmh.default.Rd a45639e352f724bc9b50a25f26eb24b8 *man/rmh.ppm.Rd fdaddf3b950e9b7e871b06f3f656d303 *man/rmhcontrol.Rd 7fb92fafe4152451c5f54116faae6d69 *man/rmhexpand.Rd 585ef0f1c7e45d290e1f7c0d693eed47 *man/rmhmodel.Rd 555a1b500fd0b98585642de442b8edbe *man/rmhmodel.default.Rd 754d31bbe18493634e1fd5021d3bc163 *man/rmhmodel.list.Rd b74fce12c103d1c45c14c78ebf3e4495 *man/rmhmodel.ppm.Rd c90b65188f256e0148e9b4152756a244 *man/rmhstart.Rd 6daa23722b901914bfec1925fe57ec22 *man/rmpoint.Rd c9efb98fb4600489851034e4914d0cbc *man/rmpoispp.Rd 00b9cb8b6413301c0182c77f3c7180d6 *man/rnoise.Rd 77f4c3c0403867805c30f21aeb6e49d7 *man/roc.Rd b062a825c0b76bc5292d540ff065b8bf *man/rose.Rd 46de1489970165e679298e0bfa806389 *man/rotate.Rd 4d6db4921d7dc47a815b93d02076a05c *man/rotate.im.Rd 1cca2bf91ce0897c70c83eebe2e0df46 *man/rotate.infline.Rd 420f42b78f4b75d037ce9e68ab287e90 *man/rotate.owin.Rd c8f5365f2f6e58785862f72a7d6e8244 *man/rotate.ppp.Rd 9f3fade667205c62415a1f97fd609bcb *man/rotate.psp.Rd 23e4e349594aaf9d57f6307a596e0feb *man/rotmean.Rd 51349aa10f2e3d2f2cae88a16c642a39 *man/round.ppp.Rd c9d186c7862c636325ad11cad7a62bfb *man/rounding.Rd e7439e3db078d957ad0bb78411706335 *man/rpoint.Rd b6a91ef76fbc45e3cb1bef941d8e4b83 *man/rpoisline.Rd c7a03bb1f0e2e57e0fe02e29d9e5c935 *man/rpoislinetess.Rd 431cc7fdc28659d5404cbacc19720b52 *man/rpoislpp.Rd 1267b0b52b75d4626575776dabc3e18c *man/rpoispp.Rd 5a98dd78a76b9d187fa5cc2fce68d8e5 *man/rpoispp3.Rd c0c57551015e5587fae41ec41d2b56bc *man/rpoisppOnLines.Rd a6b80bce2cc88f746bf34ad4e7048d6f *man/rpoisppx.Rd 2071b7797faa3874b4cafa424d149b3c *man/rppm.Rd df2d3a4e251d836e48a93416afc150ce *man/rshift.Rd 48db298e9fc094f8d5f422336d44cdb7 *man/rshift.ppp.Rd 7025e64603cca3771c59a17930a9d413 *man/rshift.psp.Rd 7e169778102b366e7422e82c1f8b424f *man/rshift.splitppp.Rd 1638325f01a8308a7eba1f4d3efc5b5b *man/rstrat.Rd 034d6d14ca1d6cf729f94f7e17531169 *man/rsyst.Rd e846ff04cbf9038ae51986a3e99a6c26 *man/rtemper.Rd 9c837ec5bf1e45a0c099923212adabf8 *man/rthin.Rd e5f0ef76ed15fe38919f8eaac90df411 *man/rthinclumps.Rd c5d3d8890255ea2ed99542aa58eb4e81 *man/run.simplepanel.Rd 0f58540ffbc0d6b01fc785934fde788c *man/runifdisc.Rd d5d02f9cd0793e69a1c46b8eadeca5a9 *man/runiflpp.Rd f00c10fda16472141dae745742629b39 *man/runifpoint.Rd 2de1693c1362e6e987c05312d0f8a150 *man/runifpoint3.Rd dd5048dab20cece81901d32fc828845b *man/runifpointOnLines.Rd a9273f2fccb179783c06c7ff39ec6492 *man/runifpointx.Rd 9a64571432387abd7755ea566c816b5c *man/scalardilate.Rd a847cfd828fed5a9b2405240961865c5 *man/scaletointerval.Rd be7df2e3d96dd962d36880cb3c21d655 *man/scan.test.Rd b9fab8b1b77760071c342225e9d34130 *man/scanLRTS.Rd 06ae579a38031e3e417a73daafd3789e *man/scanpp.Rd 7cfcd24d528d6a4427766e3a6a5c2ce0 *man/sdr.Rd 20d7ec0572c3f2faa5b0916ae5f5398b *man/sdrPredict.Rd f7720a4e8908af1298edd04ad488848b *man/segregation.test.Rd 844656835d998b29a13720cf3dc80200 *man/selfcrossing.psp.Rd a9766e9a8eeb883777800f10cde7461b *man/selfcut.psp.Rd 289b684b595c6df72fdbf05abf8ecbf4 *man/sessionLibs.Rd 903812cd2ac3c69daed146cbac19ec4d *man/setcov.Rd 1072ec85cf03b2046765d9b449371fb9 *man/sharpen.Rd c9d619e191ae1c73f0df5fe95c1185ef *man/shift.Rd 45dd8f9f531004bd591d226f1d9e2dd7 *man/shift.im.Rd a076cc56f16a7ff22c6d6ebdb0858824 *man/shift.owin.Rd 58ad180730e89c82f53cf173c701424f *man/shift.ppp.Rd 1538f5ebe5dcce4327c659f72def0e16 *man/shift.psp.Rd ecbaeaebcafe20952c1a38fb8410e0ce *man/sidelengths.owin.Rd 59e8f58e03cd57c4b15f6d888b15d9c0 *man/simplepanel.Rd 99ebdd81548bc884bd7dc69feed637a2 *man/simplify.owin.Rd 70198d3f61fc55f1a52b4b95266f7842 *man/simulate.dppm.Rd b1a4fc34f518127e1857e231dccce1c6 *man/simulate.kppm.Rd a77f193e9fc39cc11d662323d2194f43 *man/simulate.lppm.Rd 33b7b6d9e3c230b14c9197929c78676d *man/simulate.mppm.Rd a327ab464756e2b6c6a447e6b99967a7 *man/simulate.ppm.Rd 4e92e07224d7111e24cadf4b88a12b6b *man/simulate.slrm.Rd 62c9b31128433c452625e4ba5fb81d83 *man/slrm.Rd efad4027d784fc5e84d50f618186763a *man/solapply.Rd 9a1fca1adde96597bb0667a94e87b7b9 *man/solist.Rd 364be560f6b2de1dcffa0e5fd6d923eb *man/solutionset.Rd 7b0fe01ae51aba82a512630a7c8445a1 *man/spatdim.Rd 3d7f68fccea4b7a01c87c7b1aaa7b287 *man/spatialcdf.Rd 4345bf4a40259f7c06e63750e636840b *man/spatstat-deprecated.Rd 41390f96ef621f7c948b1cd9c7741521 *man/spatstat-internal.Rd a3d370d729d6856a85a735cbefcf0c89 *man/spatstat-package.Rd f53c1df3734c3d34c08885d33ee29aa1 *man/spatstat.options.Rd c542af7c96e45067fd97f43574d48da6 *man/split.hyperframe.Rd fc5a9801d8479fa21b57db191fa29f47 *man/split.im.Rd a85f30741ee936b330a7aba10aa312d9 *man/split.msr.Rd d6b1c93942ba2a337ad7f6b4503b4e5a *man/split.ppp.Rd 3052ed93f1e7c891821febddeb557db5 *man/split.ppx.Rd f8ca3f4632db9ba53e39edb98c39e95c *man/spokes.Rd 4a8813dd800e5b74f847a19947e1d682 *man/square.Rd 079af91858f6ac8d4e2dde7034740579 *man/ssf.Rd 8d86f7821a1c4b1b4bbbb1b95279fa33 *man/stieltjes.Rd 4badd1f6e4c47d32dadaac43970b5422 *man/stienen.Rd a6106ff5f15272f6e0bcd6920caf56c5 *man/stratrand.Rd 68696bcaa5a5ec54331b008e84b4e1e0 *man/studpermu.test.Rd 68ff5b1d8fdf22dcc9b8e7b18411bef0 *man/subfits.Rd 623138d90e1dc24eba152d8c2b5928c2 *man/subset.hyperframe.Rd 0affd4c192dbe497ed6458020faff409 *man/subset.ppp.Rd db7828e15db8ca8ed814cc958ce66d67 *man/subset.psp.Rd f1c7accea61aea9da256092d74cce2aa *man/subspaceDistance.Rd a733bb706605ce612aca29b0401cd3fe *man/suffstat.Rd 6f40b323e1ce8a8774f8a5368bed3167 *man/summary.anylist.Rd e0a17af0e58b128f48778df9560e7897 *man/summary.distfun.Rd 533ab659806b6e668fa64e8e4a4ef5b0 *man/summary.dppm.Rd d0fe66866ca1d4901ad822a22de28094 *man/summary.im.Rd beed1c5e94dfbb7907d0803c88b879a0 *man/summary.kppm.Rd 48df7eebf9876aa61c2a0b5271fac5d9 *man/summary.listof.Rd 4892b549bbe36df282b1b29bb478858f *man/summary.owin.Rd db0406db36fe20690fbc5ac295ce28d1 *man/summary.ppm.Rd 3539853c347f338355fd6a84a8f493e2 *man/summary.ppp.Rd f3e0a6f7d1ecd0c771e02c3ecf8f2bf9 *man/summary.psp.Rd 7a1165051c8ab100aab5b7f05d4dd02e *man/summary.quad.Rd 935671509f14b24888c6baa8152b53b7 *man/summary.solist.Rd 38e24aa9efb53685da86c344a2159827 *man/summary.splitppp.Rd 0c537cfad89f4f0caa8739f96dc44977 *man/sumouter.Rd 4e61d58afb5646fa59a4f0daf6cfadec *man/superimpose.Rd 9555c94642212f7cfbb22fe921eab696 *man/superimpose.lpp.Rd 09a3c8bb94a975eb23a398d8123bc4f0 *man/symbolmap.Rd 134900919bc03f94aa3418be9fe2fbb3 *man/tess.Rd 3db01ead88a37caf9a432ded8968dc43 *man/test.crossing.psp.Rd abfd2267fa838b94b0019ecd143f1aa5 *man/text.ppp.Rd 44e4516ec3b5e2d588d381b7ac48697e *man/texturemap.Rd 2c95ed0887fe249bd0c7f20649fc29d8 *man/textureplot.Rd 42178ef5976cd4236fd523b4e87008a9 *man/thinNetwork.Rd 85668ad3685e742f5f540a3269406d5d *man/thomas.estK.Rd 5083b5bec798fe2f17b0c8986d8ac24c *man/thomas.estpcf.Rd 2f381662f92656dc153405817e035cc8 *man/tile.areas.Rd b7e64c4e39d547e9bb2a2ad12ff3972a *man/tile.lengths.Rd e6f03eb4af1b8042bef6bfdb54159635 *man/tileindex.Rd 7b815aa5240d4e664ea6d16f6671d428 *man/tilenames.Rd 1e0468de33d16a5faf3848ec1f6a6693 *man/tiles.Rd fd49521e7c21490bf42ec11b02aca531 *man/tiles.empty.Rd ede1768dec67583e60706b8e226d5481 *man/timeTaken.Rd 992911899fba6ca5fc6439da3cf51805 *man/timed.Rd f05325c96c5a7f9e85e8e420af4d9726 *man/transect.im.Rd 33855ed0e811bb2984cdf15156ca0a21 *man/transmat.Rd ac1d70b547af8d8efc12a3d4c28ee0ed *man/treebranchlabels.Rd a76fcd8c05b143c22a30edb3248e45a9 *man/treeprune.Rd fc56759d36af58ff75ffddb35ed1fca5 *man/triangulate.owin.Rd df2c4468d4c21b91dca2b6b89cf20bd9 *man/trim.rectangle.Rd b64a871fdee9d5938466314f3b4e4a11 *man/triplet.family.Rd 936d4c74559f068b8b91dacf9c8d7117 *man/tweak.colourmap.Rd 9363f374d1d9638193af018b2b9b956b *man/union.quad.Rd 372e71d05d84621b1d50af8470af914f *man/unique.ppp.Rd a8f2d4c3df673c0eaf3e2adf79d284af *man/uniquemap.default.Rd 974fe345e6a0a08d62b3ec884ec74488 *man/uniquemap.ppp.Rd b1467b52c58a607c87b2f215697ede83 *man/unitname.Rd 61992a11d7919419002356813917f96b *man/unmark.Rd 898e839b9ce21c5f019b69814bd91220 *man/unnormdensity.Rd d11a2ad5dd1042b6caff2a7aac7aa935 *man/unstack.msr.Rd 7c4da83986827611144497e8ee64a594 *man/unstack.ppp.Rd 3b7da61bc7a9b9e40cdd60916910b15e *man/unstack.solist.Rd d97b7f4286814cf89ce5f06a76409744 *man/update.detpointprocfamily.Rd 7e613050b5075767ca9d48d7070dd386 *man/update.interact.Rd 5f73a555da54aa666f7e9d8f39d3f412 *man/update.kppm.Rd 98ce0cb16c8a4fe6980b9265881b58ea *man/update.ppm.Rd 70f976c07e44c9fe6bf41b9d55d326cc *man/update.rmhcontrol.Rd 8ca5be2ace0c48113afbaf2822674a55 *man/update.symbolmap.Rd 47bd28833a40a74899d25734078640d6 *man/valid.Rd 9449cb5c1fec24621c998a506be0eac2 *man/valid.detpointprocfamily.Rd 1ed9f6e59dad62161fc6867d14156a24 *man/valid.ppm.Rd 9b9f643ceb5ba73627013d63dd7515d6 *man/varblock.Rd 75ce8887852822a4d08a9e44076c5976 *man/varcount.Rd dfa61aa27f9908e772b4dbfc8db2d708 *man/vargamma.estK.Rd 20bdec51627e17637f8e487de060250e *man/vargamma.estpcf.Rd b434f6798cc2ebe70ac4172191c3d800 *man/vcov.kppm.Rd f85824c3c9ec3a4c31f04be59552caf7 *man/vcov.mppm.Rd b0791f02e9d9ea052bc6f13af6b86100 *man/vcov.ppm.Rd eb7578d51b7ad9f21067d0bbba362167 *man/vcov.slrm.Rd 9e2134fa9fdccc85f2ce875e130173c3 *man/venn.tess.Rd bc6d0d00510542ec7b835bf5fc94fbd1 *man/vertices.Rd 1d850409bd4f915052fa4be2b1e62ae1 *man/volume.Rd b3fe75f22e0494e4484532f8b079a50d *man/weighted.median.Rd dd00a09e89c41f24564ceab5dba86724 *man/where.max.Rd 03f030dc0305af42897046f755430da8 *man/whichhalfplane.Rd cd1d44aff4d46566233ded55e833a25e *man/whist.Rd 513778fbca80df00f2ea2b710263fe3c *man/will.expand.Rd f42290be6d3a75590b290967ad76c483 *man/with.fv.Rd b135bf634a66784497a9cb068415a686 *man/with.hyperframe.Rd 967466b70f99daef536e805505a877d6 *man/with.msr.Rd d02099194c00a636349b66309fed886e *man/with.ssf.Rd 4e0573cb24d5d923db6ae673896ac1d6 *man/yardstick.Rd 33376b40fd1dfd0f3fad8c6ec146fcd4 *man/zapsmall.im.Rd 46bdd584bf584fb298bfe431934e36cd *man/zclustermodel.Rd b27b2efe2ed6f95f76f2b9b6ca32af87 *src/Ediggatsti.c dd56326afe9822ac5e34b988f3b33ac3 *src/Ediggra.c bf1a408ebcd8d51978d0dd0e844c8346 *src/Efiksel.c 99deda1b881b1185a27906dd710ffa5c *src/Egeyer.c cc93611b6bfcaec169861cbc088a970b *src/Estrauss.c 606313254e3a3982dae8272420c16383 *src/Kborder.c 597402bd9d35fbb6a02e8d6e994a5559 *src/Kborder.h c6533abef1f8acb43ed3bff9a044b931 *src/Knone.c c394c76e3bf450475cc8cc5789b8ebf5 *src/Knone.h c1937ccea286609c2306f1e373ba4ea8 *src/Krect.c 707949facbb1443a42c08d275604ce41 *src/KrectBody.h 5eef7701a7848be25a1907a03897613c *src/KrectFunDec.h f7ad99f0f44158cc2c9100e38cc1b185 *src/KrectIncrem.h 08a4f417aed6959d94da3abc8bc55d0f *src/KrectV1.h 92268e0af4222764daf4b106a12bbadc *src/KrectV2.h 263f2296e12aee28f73dff92cef5dd61 *src/KrectV3.h 4ab4852a66f7a56b52e999f326f179c2 *src/KrectV4.h c121e1b0d5abb6b24e65e05511632dca *src/Perfect.cc c0ac68682e8c7e26ce52b8c76fd3d4ab *src/PerfectDGS.h 3a040c136a2b157e7854524d2c5c9c11 *src/PerfectDiggleGratton.h 5bc6a1478093ba8401ef8ff47d939298 *src/PerfectHardcore.h af1babec03bedabd8cdd2ccb694352f4 *src/PerfectPenttinen.h 786067ae57897a4392667fa2acab7595 *src/PerfectStrauss.h ec004bfa0111b9b3f3c55e7ede7fb4a4 *src/PerfectStraussHard.h 69ec8d8871349333bf93fffa35f18bf8 *src/areadiff.c 49cc7fd81bbad7661295a40e35a52b54 *src/areaint.c e8ed8193bc33c29418fce2aa4a834d22 *src/areapair.c 15c96da5753675c30125760d4cd874a7 *src/auctionbf.c 89cad006e13a81a4b793d89b2b3bb7cf *src/badgey.c af433219da832d098301a5bb0321c3f9 *src/bdrymask.c 27c73a0901b9d346ba84974f7481ff3c *src/call3d.c 542e6e474340e0aac3ed97f6a92777b4 *src/chunkloop.h ff40cb0de6329e149159ffc5515e36d3 *src/close3pair.c 2ebf50de6feb7cc18dd72446fa94d033 *src/closefuns.h 2f2118840189e4d51f959acd69694096 *src/closepair.c f1d7c73d6385444182ec822d20cdfdbb *src/connect.c becea4a1ea42b8a39021c01abdc6459d *src/constants.h ce8bcc52c3045ae41c8a255fa1d96846 *src/corrections.c dcf500827ff4c8b060819be2b274a7d7 *src/crossloop.h 8a482da6b76db84c12b938e60be34646 *src/denspt.c b68672feaccb33dd5032692c79be28f6 *src/densptcross.c fd5c0ecd545b4d9e50d0d4e1e202beb3 *src/dgs.c 1780b63b61abab575b2d855bb40e8ca8 *src/digber.c 57fcffca69c7d7deb5937acbb6955dad *src/diggra.c 9d438b3137f879ab21f85378bba410a6 *src/dinfty.c 0ae41a393cfc52268c51aff47bba89a8 *src/discarea.c df2799cde88dc90925574e8a0a3cbc3a *src/discs.c 057ad9a508567553f7537e203ae60a8d *src/dist2.c bcfef6be58ba5ee2fd30dd3dbd8c216f *src/dist2.h 734c79510bd1ba5ebaaa3e4c28f059bf *src/dist2dpath.c 485b7045dd1d3e5827e3873a2f9ad79a *src/dist2dpath.h 994fdba0a0747770ab7894dc5eef832f *src/distan3.c 6f37dafece55800d066d8a84b6e319eb *src/distances.c 1d194ad72367093bc9598390c2ae16d7 *src/distmapbin.c 7028f09de82307e1e46de69c104b7423 *src/dwpure.c fb4f072fa1b114c5c291ce62c44fd136 *src/exactPdist.c 7fd7547788f305f41ccde069eede6a27 *src/exactdist.c 79bdd83f960746df0bef5cea8bbaaead *src/f3.c 1df01cb8fdb284bf568e84b097777f9e *src/fardist.c 392c9edaa96b756912bf59450dd72ebd *src/fardist.h ab7588df53688ba2bb383afaaa58d0d7 *src/fexitc.c 9ad3159a4625df6b45245dedba8b816d *src/fiksel.c 0ba81075007b6ab741f3eda829da8e99 *src/functable.h ab8eb5c8d9e04c1a67137776a64fb47d *src/g3.c 3280a084e3cdcb931801c42fcd111d2e *src/geom3.h 5e13151e750d3fedb93005afc8c67954 *src/getcif.c c4d587523e2d2e58615eb0d2084a2167 *src/geyer.c 3228576b7ca41179fe5a99fd0a4d4001 *src/hardcore.c f64c32ad80b0c295009994ffb7299670 *src/hasclose.c 192aba4179d54ad18f0694eb7d1c1a3a *src/hasclose.h d89458d547473b5eba7a73d9f82f2c72 *src/idw.c e8756f5337733631b65efc286f5d7ffb *src/init.c 9c79e8972c24446466e9dcb30ad82217 *src/k3.c df77395d51129b25c2154b11043aba57 *src/knn3Ddist.h f503a500844077eec40c7c30eea69764 *src/knn3DdistX.h f129ad504bf8946cf6a755c7f04fe253 *src/knnXdist.h 07039b406a87967282c97eb2bfd60707 *src/knndist.h 7ecc9842efabd93bc452bbcf485dbbb8 *src/knndistance.c 55295547fd74e0cdd004ba3547e181e2 *src/knngrid.c d2225a652d72aa868364c0cbaedc4a68 *src/knngrid.h 5ca88ac5e99e094f0b91183500a4f433 *src/lennard.c ffb582f82a4711133646fed8bb9fdb4a *src/linSnncross.c a5c1cf5b36b43dd6e485c244a8211e7d *src/linSnncross.h bbcc27c1827cf4e7b3c52acc1620a958 *src/linalg.c da9323b044795fa30043cc7fa4cff039 *src/lincrossdist.c 08c783db42f88ddf4aeb22d785eebd7b *src/lineardisc.c 4c2ed032c9ade8f7b0def614b467e21e *src/linearradius.c db2178ff853619414cc697d9036df0fe *src/linequad.c 12f26fa5b753013dfa6c1fe83fb869ec *src/linequad.h f7d4ee820e1941b157ea7671c6c4d424 *src/linknnd.c 6a1fc45c0e2514ff3fbc64d70c08f32b *src/linknnd.h 68ffec5ced261473748373d870bb38cb *src/linnncross.c f5e9929e504c04aa15bca3d400e8161b *src/linnncross.h 41d4b67757c18834fdec28c49685464e *src/linnndist.c 553cab9435f615564c8627e9ef8c2604 *src/linpairdist.c f470c12dfd1ea5e5fb3131459716e77e *src/lintileindex.c 1414378013832e4d35a7609f780eb437 *src/linvdist.c 5cb3a7386ab9a7899aaac191ee381d5f *src/linvdist.h e677d804970b9268c88a6de8abdd252b *src/linvknndist.c c8d86c46b7fb893c19f31e0a1d9a9cfc *src/lixel.c e3c36533e55e56f4d33e9d91111d0428 *src/localpcf.c cc8ec72a60677de2bda193aa72878bc6 *src/localpcf.h 767ddd93b5f66825c1ed17333a89be1d *src/loccum.c ec04ffaec574081d9d2d834905ea9355 *src/loccums.h aad8df586d677c22758067ff5ec63be2 *src/loccumx.h 0e7a4d103e1c623473fb0b8e0353d763 *src/lookup.c d4f690790bb0e2585fd2a2645e0556d2 *src/looptest.h 3979de8fb402dcfdf1f173a5478f2cd8 *src/maxnnd.h 9e0b28ecd67dd085ab596f3ae9fafa50 *src/methas.c 69d57274cda1c955d631a7c241cb9a00 *src/methas.h 73102b289a6fdd1aacc94fadd8c6698e *src/metricPdist.c 3686d1d68ce9d66cead48126fec506e6 *src/metricPdist.h d4184783a5e92f36a1174f3cdfbe5aed *src/mhloop.h 86d84eb3a26f011a800acc8a41a31b12 *src/mhsnoop.c 81c1a015083476952ee14be55991f2d9 *src/mhsnoop.h cfce4c5e0f35b12efa19c0e5144fa540 *src/mhsnoopdef.h af57f00feb578ceeb59fc71b5056e27f *src/mhv1.h 30677e286f648b6f5cc6a39706fe4130 *src/mhv2.h a1cfccc17f8ec667152b7f04b69cb8e6 *src/mhv3.h d2deceb7ed8f51910ab02b2e550b0779 *src/mhv4.h 8895a12dc3a8b12e2b2fb3842bb10270 *src/mhv5.h 539ea916b0de7d9ef17aaaf821747066 *src/minnnd.c 0cde8a3747fd9fa2610fbd811695d087 *src/minnnd.h c6a2cc088688b471abf7ea5a9bb243c0 *src/multihard.c 969862ca36a46ec344c0697c0ff0c439 *src/nearestpix.c 199b92d2ac5ffb786691613345e26216 *src/nn3Ddist.c 26e46843fa80afc177b5306a47f25994 *src/nn3Ddist.h 45124a9522e4b02ea7205d71f617b867 *src/nn3DdistX.h 697ead926a991d932a2c57cc5ee26747 *src/nnMDdist.c 77417067aa7539794fef337365495dff *src/nndist.h af1ef3af29ac5dc30a190234e9b28e0b *src/nndistX.h c647ce5b415721df1bbdcc69568bdff0 *src/nndistance.c 93dff60f269800a42b3dc330294e8c97 *src/nngrid.c 74149ebdd825d1d392ce4636d9e5fc7e *src/nngrid.h acdf88b1cfedbbb87d81bb727761decd *src/pairloop.h 9340279b0709a21d478ae809bb67385e *src/pcf3.c 887daec80901782cc831ba2dbcd5b3be *src/penttinen.c e6a3adf242e87c2679cdcbd6f833be20 *src/periodic.c 18159504797733ea0f276e312dafe81d *src/poly2im.c 1daedce0737a7e59eb9add32093b5ec7 *src/proto.h dc7d8f0ee5ffe7f9397c25b648d93c1e *src/quasirandom.c 51966f18266cee6238c216c5d09cf01f *src/raster.h 0ad50bb7333ec517c93bd801d0d84338 *src/rasterfilter.c eedea99b2a4ffaa2d25db2bbb8ad4017 *src/ripleypoly.h 574358e78217dc076352a2d21c782344 *src/rthin.c 5d8d04c3408eec3f41311d54d6b78a99 *src/scan.c ea10e9a3d51ee60616d2a9a66fb5f2f0 *src/seg2pix.c eec4b90e9918490a4a00174898738f9e *src/seg2pix.h c85b38af977450a3adb73c3e9d168574 *src/segdens.c 3a5e04ac4ad9fc0efd10ef39dc55f041 *src/sftcr.c 3cec8ec5c0c2ce320258293161c467c2 *src/sparselinalg.c a9067e9d5e909452fc2a885473cb3039 *src/spasumsymout.h 91a81ef72eabefa7b055cb686f55ff21 *src/sphefrac.c 7877dac5d832e95257442e8b7fa8f02b *src/sphevol.c 18b99b034669b76b4b9ccaef945200f0 *src/straush.c e072e3a74914a74af746481c3a3b8b3b *src/straushm.c 28d7ac41aaef4367e9d57b020ed5fb3c *src/strauss.c 0cf60fa5405e4b7f31cde35a0d390351 *src/straussm.c 7065b3feb3489e8d8d628cb8e3c6ab53 *src/sumsymouter.h cb023257546212cff9cec656d328d6d2 *src/tabnum.c 815c8b85d5ba18e85a7305ee9fb7a09a *src/trigraf.c 03e65a27588194512db2649bec6e5277 *src/triplets.c 863c8d7bf79250652cab60e7b1a962aa *src/uniquemap.c 5e2ce9b3ee96ddf6d66f1baf2c41ff6f *src/uniquemap.h 247ad8564b98259227c47ae909d170f9 *src/veegraf.c d43888baaecd5e2a8d25bc8bae058fb0 *src/whist.c 324e7df153964a74722a8e6bb8dae15f *src/xyseg.c 5c127a9d5ddeaee8cc8f34b32218a3a5 *src/yesno.h 5662cc3feeb8b25a9e413547d82f4074 *tests/badwindow.txt c1624129ac630ef4653e3119d70ffa5b *tests/selfcross.txt 74f982388d326e39be5aa9ea1e058945 *tests/testsAtoC.R b3247796ed894532059fe54ee0f03d4f *tests/testsD.R 3c0d2fee6fcc0ea376bf24f61ad78a92 *tests/testsEtoF.R 74a975012a2f78b3a3416df1200cbee9 *tests/testsGtoJ.R 7f40fe300a5f4d69ed912aa649d020d1 *tests/testsK.R 5004ea6e59c9861f1d3614ea07d549c9 *tests/testsL.R 91a0dd2c78710f5637ea5d51b2011a64 *tests/testsM.R 440d8dbf9074b84c9041c1dd9b0cc426 *tests/testsNtoO.R 54c3556b3d778db635ef5f574428a1c5 *tests/testsP1.R 057f090f2c9d3f57d7504f2f8ecaaec3 *tests/testsP2.R 6c8b70d1d17f7a104334a97ddd90bfb6 *tests/testsQ.R 175a132934f99ad725719d4a8983e138 *tests/testsR1.R 1fea812514a5fe4cbdeb8e63233ad6b3 *tests/testsR2.R b9c864e07da632a946fa6d19f28238c4 *tests/testsS.R 2538a453c8abb39104900325dff734c0 *tests/testsT.R fd87eea9eb5c0e5dbfcb7ff099834dc2 *tests/testsUtoZ.R 2f19eaeeb3e582db6477b0c7e042b210 *vignettes/bugfixes.Rnw 8bb66a8aa0e8cee0b34f1ff8f81a4e32 *vignettes/datasets.Rnw 23c7cff7980144d2d528097174bf7114 *vignettes/getstart.Rnw 8cda84badf5153c61e2b6b2e7bf14322 *vignettes/hexagon.eps 28c409e6cfde065a32cdc922787086ec *vignettes/hexagon.pdf 5d818e3b6c4cc36b55b35289c3282394 *vignettes/irregpoly.eps 1dd34a3acaa93d24bf0388fa83caf892 *vignettes/irregpoly.pdf 471ba4e0d3f21bfb11da447894ed47d4 *vignettes/replicated.Rnw 0bd1c5d601f8b5a1f966bc5d746dbdb7 *vignettes/shapefiles.Rnw 2f575cc02bc530ed1409993cad0b98c7 *vignettes/updates.Rnw spatstat/inst/0000755000176200001440000000000013160706031013067 5ustar liggesusersspatstat/inst/doc/0000755000176200001440000000000013624161300013633 5ustar liggesusersspatstat/inst/doc/updates.Rnw0000644000176200001440000030022113623712063015775 0ustar liggesusers\documentclass[11pt]{article} \usepackage{graphicx} \usepackage{Sweave} \usepackage{bm} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} % \VignetteIndexEntry{Summary of Recent Updates to Spatstat} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\code}[1]{\texttt{#1}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \begin{document} \bibliographystyle{plain} <>= library(spatstat) sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) @ \title{Summary of recent updates to \spst} \author{Adrian Baddeley, Rolf Turner and Ege Rubak} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle \thispagestyle{empty} This is a summary of changes that have been made to the \spst\ package since the publication of the accompanying book \cite{baddrubaturn15}. <>= readSizeTable <- function(fname) { if(is.null(fname) || !file.exists(fname)) return(NULL) a <- read.table(fname, header=TRUE) a$date <- as.Date(a$date) return(a) } getSizeTable <- function(packagename="spatstat", tablename="packagesizes.txt") { fname <- system.file("doc", tablename, package=packagename) readSizeTable(fname) } counts <- c("nhelpfiles", "nobjects", "ndatasets", "Rlines", "srclines") mergeSizeTables <- function(a, b) { if(is.null(b)) return(a) for(i in seq_len(nrow(a))) { j <- which(b$date <= a$date[i]) if(length(j) > 0) a[i,counts] <- a[i,counts] + b[max(j), counts] } return(a) } z <- getSizeTable() zutils <- getSizeTable("spatstat.utils") zdata <- getSizeTable("spatstat.data") zlocal <- getSizeTable("spatstat", "spatstatlocalsize.txt") z <- mergeSizeTables(z, zutils) z <- mergeSizeTables(z, zdata) z <- mergeSizeTables(z, zlocal) # currentcount <- z[nrow(z), counts] bookcount <- z[z$version == "1.42-0", counts] changes <- currentcount - bookcount newobj <- changes[["nobjects"]] newdat <- changes[["ndatasets"]] + 1 # counting rule doesn't detect redwood3 newcode <- changes[["Rlines"]] + changes[["srclines"]] bookcode <- bookcount[["Rlines"]] + bookcount[["srclines"]] growth <- signif((100 * newcode)/bookcode, digits=2) @ %$ The book \cite{baddrubaturn15}, published in December 2015, covers everything in \spst\ up to version \texttt{1.42-0}, released in May 2015. The \spst\ package has grown by \Sexpr{growth}\% since the book was published. This document summarises the most important changes. The current version of \spst\ is \texttt{\Sexpr{sversion}}. It contains \Sexpr{newobj} new functions and \Sexpr{newdat} new datasets introduced after May 2015. <>= options(SweaveHooks=list(fig=function() par(mar=0.2+c(2,4,2,0)))) Plot <- function(fmla, ..., dat=z) { yvals <- eval(as.expression(fmla[[2]]), envir=dat) plot(fmla, ..., data=dat, type="l", xlab="", lwd=2, ylim=c(0, max(yvals))) } @ \SweaveOpts{eps=TRUE} \setkeys{Gin}{width=0.4\textwidth} \centerline{ <>= Plot((Rlines + srclines)/1000 ~ date, ylab="Lines of code (x 1000)", main="Spatstat growth") lines(srclines/1000 ~ date, data=z) text(as.Date("2015-01-01"), 9.5, "C code") text(as.Date("2015-01-01"), 60, "R code") @ } \tableofcontents \newpage \section{\pkg{spatstat} is splitting into parts} \pkg{spatstat} is being split into sub-packages, to satisfy the requirements of CRAN. This should not affect the user: existing code will continue to work in the same way. Typing \code{library(spatstat)} will load the familiar \pkg{spatstat} package which can be used as before. \subsection{The parts of \pkg{spatstat}} Currently there are three sub-packages, called \pkg{spatstat.utils}, \pkg{spatstat.data} and \pkg{spatstat}. \begin{itemize} \item The \code{spatstat} package contains the main code. \item The \pkg{spatstat.data} package now contains all the datasets for \pkg{spatstat}. \item The \pkg{spatstat.utils} package contains utility functions for \pkg{spatstat}. \end{itemize} Typing \code{library(spatstat)} will automatically load \pkg{spatstat.data} and silently ``import'' \pkg{spatstat.utils}. To access the functions in \pkg{spatstat.utils} directly, you would need to type \code{library(spatstat.utils)}. \subsection{Extension packages} There are also extension packages which provide additional capabilities and must be loaded explicitly when you need them. Currently there are three extension packages: \begin{itemize} \item \pkg{spatstat.local} for local model-fitting, \item \pkg{spatstat.sphere} for analysing point patterns on a sphere, \item \pkg{spatstat.Knet} for analysing point patterns on a network. \end{itemize} \pagebreak \section{Precis of all changes} Here is the text from the `overview' sections of the News and Release Notes for each update. \begin{itemize} \item \spst\ now Imports the package \pkg{spatstat.utils}. \item \spst\ now requires the package \pkg{spatstat.data} which contains the datasets. \item \spst\ now suggests the package \pkg{fftwtools}. \item Tessellations on a linear network can now have marks. \item More functions for manipulating tessellations on a linear network. \item New functions for simulating point processes on a linear network. \item Nearest Neighbour Index function can now return mark values. \item Index of repulsion strength for determinantal point process models. \item Nearest neighbours between two point patterns in any number of dimensions. \item More options for handling bad simulation outcomes in \texttt{envelope}. \item \texttt{mppm} accepts case weights. \item Bandwidth selectors warn about extreme values of bandwidth. \item Fast kernel estimation on a linear network using 2D kernels. \item Extension of Scott's rule for bandwidth selection. \item Cross-validated bandwidth selection on a linear network. \item Random thinning and random labelling of spatial patterns extended to different types of pattern. \item Confidence intervals for multitype $K$ function. \item Envelopes for balanced two-stage test \item Extensions to adaptive intensity estimators \item `Dartboard' tessellation using polar coordinates. \item Standard error calculation for inverse-distance weighting. \item Kernel estimate of intensity as a \texttt{function(x,y)}. \item Extract discrete and continuous components of a measure. \item Improvements and extensions to leverage and influence code. \item Plot a line segment pattern using line widths. \item Find connected components of each tile in a tessellation. \item Geometrical operations on \texttt{distfun} objects. \item Join vertices in a linear network. \item Distance map and contact distribution for rectangular structuring element. \item Lurking variable plot for models fitted to several point patterns. \item New dataset \code{cetaceans}. \item Gamma correction for colour maps and image plots. \item Class \code{units} has been renamed \code{unitname} to avoid package collision. \item More support for tessellations. \item Fixed longstanding bug in leverage and influence diagnostics. \item Improvements and bug fixes for leverage and influence diagnostics. \item Tighter bounding box for \code{psp}, \code{lpp}, \code{linnet} objects. \item Improved layout in \code{plot.solist} \item Tools to increase colour saturation. \item Connected components of a 3D point pattern. \item Accelerated computations on linear networks. \item Accelerated simulation of determinantal point processes. \item Improved printing of 3D point patterns. \item Minor corrections to handling of unitnames. \item Improvements to \texttt{ppm} and \texttt{update.ppm}. \item Correction to \texttt{lohboot} \item Numerous bug fixes for linear networks code. \item Now handles disconnected linear networks. \item Effect function is now available for all types of fitted model. \item Geometric-mean smoothing. \item A model can be fitted or re-fitted to a sub-region of data. \item New fast algorithm for kernel smoothing on a linear network. \item Leverage and influence diagnostics extended to Poisson/Gibbs models fitted by logistic composite likelihood. \item Two-stage Monte Carlo test. \item Dirichlet/Voronoi tessellation on a linear network. \item Thinning of point patterns on a linear network. \item More support for functions and tessellations on a linear network. \item Bandwidth selection for pair correlation function. \item Pooling operations improved. \item Operations on signed measures. \item Operations on lists of pixel images. \item Improved pixellation of point patterns. \item Stieltjes integral extended. \item Subset operators extended. \item Greatly accelerated \texttt{rmh} when using \texttt{nsave} \item Sufficient Dimension Reduction for point processes. \item Alternating Gibbs Sampler for point process simulation. \item New class of spatially sampled functions. \item ROC and AUC extended to other types of point patterns and models. \item More support for linear networks. \item More support for infinite straight lines. \item \spst\ now depends on the packages \pkg{nlme} and \pkg{rpart}. \item Important bug fix in \code{linearK}, \code{linearpcf} \item Changed internal format of \code{linnet} and \code{lpp} objects. \item Faster computation in linear networks. \item Bias correction techniques. \item Bounding circle of a spatial object. \item Option to plot marked points as arrows. \item Kernel smoothing accelerated. \item Workaround for bug in some graphics drivers affecting image orientation. \item Non-Gaussian smoothing kernels. \item Improvements to inhomogeneous multitype $K$ and $L$ functions. \item Variance approximation for pair correlation function. \item Leverage and influence for multitype point process models. \item Functions for extracting components of vector-valued objects. \item Recursive-partition point process models. \item Minkowski sum, morphological dilation and erosion with any shape. \item Minkowski sum also applicable to point patterns and line segment patterns. \item Important bug fix in Smooth.ppp \item Important bug fix in spatial CDF tests. \item More bug fixes for replicated patterns. \item Simulate a model fitted to replicated point patterns. \item Inhomogeneous multitype $F$ and $G$ functions. \item Summary functions recognise \texttt{correction="all"} \item Leverage and influence code handles bigger datasets. \item More support for pixel images. \item Improved progress reports. \item New dataset \texttt{redwood3} \item Fixed namespace problems arising when spatstat is not loaded. \item Important bug fix in leverage/influence diagnostics for Gibbs models. \item Surgery with linear networks. \item Tessellations on a linear network. \item Laslett's Transform. \item Colour maps for point patterns with continuous marks are easier to define. \item Pair correlation function estimates can be pooled. \item Stipulate a particular version of a package. \item More support for replicated point patterns. \item More support for tessellations. \item More support for multidimensional point patterns and point processes. \item More options for one-sided envelopes. \item More support for model comparison. \item Convexifying operation. \item Subdivide a linear network. \item Penttinen process can be simulated (by Metropolis-Hastings or CFTP). \item Calculate the predicted variance of number of points. \item Accelerated algorithms for linear networks. \item Quadrat counting accelerated, in some cases. \item Simulation algorithms have been accelerated; simulation outcomes are \emph{not} identical to those obtained from previous versions of \spst. \item Determinantal point process models. \item Random-effects and mixed-effects models for replicated patterns. \item Dao-Genton test, and corresponding simulation envelopes. \item Simulated annealing and simulated tempering. \item spatstat colour tools now handle transparent colours. \item Improvements to \verb![! and \texttt{subset} methods \item Extensions to kernel smoothing on a linear network. \item Support for one-dimensional smoothing kernels. \item Mark correlation function may include weights. \item Cross-correlation version of the mark correlation function. \item Penttinen pairwise interaction model. \item Improvements to simulation of Neyman-Scott processes. \item Improvements to fitting of Neyman-Scott models. \item Extended functionality for pixel images. \item Fitted intensity on linear network \item Triangulation of windows. \item Corrected an edge correction. \end{itemize} \section{New datasets} The following datasets have been added to the package. \begin{itemize} \item \texttt{austates}: The states and large mainland territories of Australia represented as polygonal regions forming a tessellation. \item \texttt{redwood3}: a more accurate version of the \texttt{redwood} data. \item \texttt{cetaceans}: point patterns of whale and dolphin sightings. \end{itemize} \section{New classes} \begin{itemize} \item \texttt{ssf}: Class of spatially sampled functions. \end{itemize} \section{New Functions} Following is a list of all the functions that have been added. \begin{itemize} \item \texttt{is.linim}: test whether an object is a pixel image on a linear network (class \verb!"linim"!). \item \texttt{rcelllpp}: Simulate the cell point process on a linear network. \item \texttt{rSwitzerlpp}: Simulate the Switzer-type point process on a linear network. \item \texttt{intersect.lintess}: Form the intersection of two tessellations on a linear network. \item \texttt{chop.linnet}: Divide a linear network into tiles using infinite lines. \item \texttt{repairNetwork}: Detect and repair inconsistencies in internal data in a \texttt{linnet} or \texttt{lpp} object. \item \verb!marks<-.lintess!, \texttt{unmark.lintess}: Assign marks to the tiles of a tessellation on a linear network. \item \texttt{marks.lintess}: Extract the marks of the tiles of a tessellation on a linear network. \item \texttt{tilenames.lintess}: Extract the names of the tiles in a tessellation on a linear network \item \verb!tilenames<-.lintess!: Change the names of the tiles in a tessellation on a linear network \item \texttt{nobjects.lintess}: Count the number of tiles in a tessellation on a linear network \item \texttt{as.data.frame.lintess}: Convert a tessellation on a linear network into a data frame. \item \texttt{repul}: Repulsiveness index for a determinantal point process model. \item \texttt{reach.kppm}: Reach (interaction distance) for a Cox or cluster point process model. \item \texttt{summary.dppm}, \texttt{print.summary.dppm}: Summary method for determinantal point process models. \item \texttt{nncross.ppx}: Nearest neighbours between two point patterns in any number of dimensions. \item \texttt{rthinclumps}: Divide a spatial region into clumps and randomly delete some of them. \item \texttt{densityQuick.lpp}: Fast kernel estimator of point process intensity on a network using 2D smoothing kernel. \item \texttt{data.lppm}: Extract the original point pattern dataset (on a linear network) to which the model was fitted. \item \texttt{bw.scott.iso}: Isotropic version of Scott's rule (for point patterns in any dimension). \item \texttt{bits.envelope}: Global simulation envelope corresponding to \texttt{bits.test}, the balanced independent two-stage Monte Carlo test. \item \texttt{extrapolate.psp}: Extrapolate line segments to obtain infinite lines. \item \texttt{uniquemap}: Map duplicate points to unique representatives. Generic with methods for \texttt{ppp}, \texttt{lpp}, \texttt{ppx} \item \texttt{uniquemap.data.frame}, \texttt{uniquemap.matrix}: Map duplicate rows to unique representatives \item \texttt{localKcross}, \texttt{localLcross}, \texttt{localKdot}, \texttt{localLdot}, \texttt{localKcross.inhom}, \texttt{localLcross.inhom}: Multitype local $K$ functions. \item \texttt{polartess}: tessellation using polar coordinates. \item \texttt{densityVoronoi}: adaptive estimate of point process intensity using tessellation methods. \item \texttt{densityAdaptiveKernel}: adaptive estimate of point process intensity using variable kernel methods. \item \texttt{bw.abram}: compute adaptive smoothing bandwidths using Abramson's rule. \item \texttt{coords.quad}: method for \texttt{coords}, to extract the coordinates of the points in a quadrature scheme. \item \texttt{lineartileindex}: low-level function to classify points on a linear network according to which tile of a tessellation they fall inside. \item \texttt{markmarkscatter}: Mark--mark scatterplot. \item \texttt{bw.CvL}: Cronie-van Lieshout bandwidth selection for density estimation. \item \texttt{subset.psp}: subset method for line segment patterns. \item \texttt{densityfun}, \texttt{densityfun.ppp}: Compute a kernel estimate of intensity of a point pattern and return it as a function of spatial location. \item \texttt{as.im.densityfun}: Convert \texttt{function(x,y)} to a pixel image. \item \texttt{measureDiscrete}, \texttt{measureContinuous}: Extract the discrete and continuous components of a measure. \item \texttt{connected.tess}: Find connected components of each tile in a tessellation and make a new tessellation composed of these pieces. \item \texttt{dffit.ppm}: Effect change diagnostic \texttt{DFFIT} for spatial point process models. \item \texttt{shift.distfun}, \texttt{rotate.distfun}, \texttt{reflect.distfun}, \texttt{flipxy.distfun}, \texttt{affine.distfun}, \texttt{scalardilate.distfun}: Methods for geometrical operations on \texttt{distfun} objects. \item \texttt{rescale.distfun}: Change the unit of length in a \texttt{distfun} object. \item \texttt{plot.indicfun}: Plot method for indicator functions created by \texttt{as.function.owin}. \item \texttt{Smooth.leverage.ppm}, \texttt{Smooth.influence.ppm}: Smooth a leverage function or an influence measure. \item \texttt{integral.leverage.ppm}, \texttt{integral.influence.ppm}: Compute the integral of a leverage function or an influence measure. \item \texttt{mean.leverage.ppm}: Compute the mean value of a leverage function. \item \texttt{rectdistmap}: Distance map using rectangular metric. \item \texttt{rectcontact}: Contact distribution function using rectangular structuring element. \item \texttt{joinVertices}: Join specified vertices in a linear network. \item \code{summary.ssf}: Summary method for a spatially sampled function (class \code{ssf}). \item \code{unstack.tess}: Given a tessellation with multiple columns of marks, take the columns one at a time, and return a list of tessellations, each carrying only one of the original columns of marks. \item \code{contour.leverage.ppm}: Method for \code{contour} for leverage functions of class \code{leverage.ppm} \item \code{lurking}: New generic function for lurking variable plots. \item \code{lurking.ppp}, \code{lurking.ppm}: These are equivalent to the original function \code{lurking}. They are now methods for the new generic \code{lurking}. \item \code{lurking.mppm}: New method for class \code{mppm}. Lurking variable plot for models fitted to several point patterns. \item \code{print.lurk}: Prints information about the object returned by the function \code{lurking} representing a lurking variable plot. \item \code{model.matrix.mppm}: Method for \code{model.matrix} for models of class \code{mppm}. \item \code{test.crossing.psp}, \code{test.selfcrossing.psp}: Previously undocumented functions for testing whether segments cross. \item \code{to.saturated}: Convert a colour value to the corresponding fully-saturated colour. \item \code{intensity.psp}: Compute the average total length of segments per unit area. \item \code{boundingbox.psp}: Bounding box for line segment patterns. This produces a tighter bounding box than the previous default behaviour. \item \code{boundingbox.lpp}: Bounding box for point patterns on a linear network. This produces a tighter bounding box than the previous default behaviour. \item \code{boundingbox.linnet}: Bounding box for a linear network. This produces a tighter bounding box than the previous default behaviour. \item \verb!"Frame<-.default"!: New default method for assigning bounding frame to a spatial object. \item \code{connected.pp3}: Connected components of a 3D point pattern. \item \code{colouroutputs}, \verb!"colouroutputs<-"!: Extract or assign colour values in a colour map. (Documented a previously-existing function) \item \texttt{fitin.profilepl}: Extract the fitted interaction from a model fitted by profile likelihood. \item \verb![<-.linim!: Subset assignment method for pixel images on a linear network. \item \texttt{nnfromvertex}: Given a point pattern on a linear network, find the nearest data point from each vertex of the network. \item \texttt{tile.lengths}: Calculate the length of each tile in a tessellation on a network. \item \texttt{text.ppp}, \texttt{text.lpp}, \texttt{text.psp}: Methods for \texttt{text} for spatial patterns. \item \texttt{as.data.frame.envelope}: Extract function data from an envelope object, including the functions for the simulated data ('simfuns') if they were saved. \item \texttt{is.connected}, \texttt{is.connected.default}, \texttt{is.connected.linnet}: Determines whether a spatial object consists of one topologically connected piece, or several pieces. \item \texttt{is.connected.ppp}: Determines whether a point pattern is connected after all pairs of points closer than distance R are joined. \item \texttt{hist.funxy}: Histogram of values of a spatial function. \item \texttt{model.matrix.ippm}: Method for \texttt{model.matrix} which allows computation of regular and irregular score components. \item \texttt{harmonise.msr}: Convert several measures (objects of class \texttt{msr}) to a common quadrature scheme. \item \texttt{bits.test}: Balanced Independent Two-Stage Monte Carlo test, an improvement on the Dao-Genton test. \item \texttt{lineardirichlet}: Computes the Dirichlet-Voronoi tessellation associated with a point pattern on a linear network. \item \texttt{domain.lintess}, \texttt{domain.linfun}: Extract the linear network from a \texttt{lintess} or \texttt{linfun} object. \item \texttt{summary.lintess}: Summary of a tessellation on a linear network. \item \texttt{clicklpp}: Interactively add points on a linear network. \item \texttt{envelopeArray}: Generate an array of envelopes using a function that returns \texttt{fasp} objects. \item \texttt{bw.pcf}: Bandwidth selection for pair correlation function. \item \texttt{grow.box3}: Expand a three-dimensional box. \item \texttt{hexagon}, \texttt{regularpolygon}: Create regular polygons. \item \texttt{Ops.msr}: Arithmetic operations for measures. \item \texttt{Math.imlist}, \texttt{Ops.imlist}, \texttt{Summary.imlist}, \texttt{Complex.imlist}: Arithmetic operations for lists of pixel images. \item \texttt{measurePositive}, \texttt{measureNegative}, \texttt{measureVariation}, \texttt{totalVariation}: Positive and negative parts of a measure, and variation of a measure. \item \texttt{as.function.owin}: Convert a spatial window to a \texttt{function(x,y)}, the indicator function. \item \texttt{as.function.ssf}: Convert an object of class \texttt{ssf} to a \texttt{function(x,y)} \item \texttt{as.function.leverage.ppm} Convert an object of class \texttt{leverage.ppm} to a \texttt{function(x,y)} \item \texttt{sdr}, \texttt{dimhat}: Sufficient Dimension Reduction for point processes. \item \texttt{simulate.rhohat}: Simulate a Poisson point process with the intensity estimated by \texttt{rhohat}. \item \texttt{rlpp}: Random points on a linear network with a specified probability density. \item \texttt{cut.lpp}: Method for \texttt{cut} for point patterns on a linear network. \item \texttt{has.close}: Faster way to check whether a point has a close neighbour. \item \texttt{psib}: Sibling probability (index of clustering strength in a cluster process). \item \texttt{rags}, \texttt{ragsAreaInter}, \texttt{ragsMultiHard}: Alternating Gibbs Sampler for point processes. \item \texttt{bugfixes}: List all bug fixes in recent versions of a package. \item \texttt{ssf}: Create a spatially sampled function \item \texttt{print.ssf}, \texttt{plot.ssf}, \texttt{contour.ssf}, \texttt{image.ssf}: Display a spatially sampled function \item \texttt{as.im.ssf}, \texttt{as.ppp.ssf}, \texttt{marks.ssf}, \verb!marks<-.ssf!, \texttt{unmark.ssf}, \verb![.ssf!, \texttt{with.ssf}: Manipulate data in a spatially sampled function \item \texttt{Smooth.ssf}: Smooth a spatially sampled function \item \texttt{integral.ssf}: Approximate integral of spatially sampled function \item \texttt{roc.kppm}, \texttt{roc.lppm}, \texttt{roc.lpp}: Methods for \texttt{roc} for fitted models of class \texttt{"kppm"} and \texttt{"lppm"} and point patterns of class \texttt{"lpp"} \item \texttt{auc.kppm}, \texttt{auc.lppm}, \texttt{auc.lpp}: Methods for \texttt{auc} for fitted models of class \texttt{"kppm"} and \texttt{"lppm"} and point patterns of class \texttt{"lpp"} \item \texttt{timeTaken}: Extract the timing data from a \texttt{"timed"} object or objects. \item \texttt{rotate.infline}, \texttt{shift.infline}, \texttt{reflect.infline}, \texttt{flipxy.infline}: Geometrical transformations for infinite straight lines. \item \texttt{whichhalfplane}: Determine which side of an infinite line a point lies on. \item \texttt{matrixpower}, \texttt{matrixsqrt}, \texttt{matrixinvsqrt}: Raise a matrix to any power. \item \texttt{points.lpp}: Method for \texttt{points} for point patterns on a linear network. \item \texttt{pairs.linim}: Pairs plot for images on a linear network. \item \texttt{closetriples}: Find close triples of points. \item \texttt{anyNA.im}: Method for \texttt{anyNA} for pixel images. \item \texttt{bc}: Bias correction (Newton-Raphson) for fitted model parameters. \item \texttt{rex}: Richardson extrapolation for numerical integrals and statistical model parameter estimates. \item \texttt{boundingcircle}, \texttt{boundingcentre}: Find the smallest circle enclosing a window or point pattern. \item \verb![.linim! : Subset operator for pixel images on a linear network. \item \texttt{mean.linim}, \texttt{median.linim}, \texttt{quantile.linim}: The mean, median, or quantiles of pixel values in a pixel image on a linear network. \item \texttt{weighted.median}, \texttt{weighted.quantile}: Median or quantile of numerical data with associated weights. \item \verb!"[.linim"!: Subset operator for pixel images on a linear network. \item \texttt{mean.linim}, \texttt{median.linim}, \texttt{quantile.linim}: The mean, median, or quantiles of pixel values in a pixel image on a linear network. \item \texttt{boundingcircle}, \texttt{boundingcentre}: Smallest circle enclosing a spatial object. \item \texttt{split.msr}: Decompose a measure into parts. \item \texttt{unstack.msr}: Decompose a vector-valued measure into its component measures. \item \texttt{unstack.ppp}, \texttt{unstack.psp}, \texttt{unstack.lpp}: Given a spatial pattern with several columns of marks, separate the columns and return a list of spatial patterns, each having only one column of marks. \item \texttt{kernel.squint}: Integral of squared kernel, for the kernels used in density estimation. \item \texttt{as.im.data.frame}: Build a pixel image from a data frame of coordinates and pixel values. \item \texttt{covering}: Cover a window using discs of a given radius. \item \texttt{dilationAny}, \texttt{erosionAny}, \verb!%(-)%! : Morphological dilation and erosion by any shape. \item \texttt{FmultiInhom}, \texttt{GmultiInhom} Inhomogeneous multitype/marked versions of the summary functions \texttt{Fest}, \texttt{Gest}. \item \texttt{kernel.moment} Moment or incomplete moment of smoothing kernel. \item \texttt{MinkowskiSum}, \verb!%(+)%!: Minkowski sum of two windows: \verb!A %(+)% B!, or \texttt{MinkowskiSum(A,B)} \item \texttt{nobjects}: New generic function for counting the number of 'things' in a dataset. There are methods for \texttt{ppp}, \texttt{ppx}, \texttt{psp}, \texttt{tess}. \item \texttt{parameters.interact}, \texttt{parameters.fii}: Extract parameters from interpoint interactions. (These existing functions are now documented.) \item \texttt{ppmInfluence}: Calculate \texttt{leverage.ppm}, \texttt{influence.ppm} and \texttt{dfbetas.ppm} efficiently. \item \texttt{rppm}, \texttt{plot.rppm}, \texttt{predict.rppm}, \texttt{prune.rppm}: Recursive-partition point process models. \item \texttt{simulate.mppm} Simulate a point process model fitted to replicated point patterns. \item \texttt{update.interact}: Update the parameters of an interpoint interaction. [This existing function is now documented.] \item \texttt{where.max}, \texttt{where.min} Find the spatial location(s) where a pixel image achieves its maximum or minimum value. \item \texttt{compileK}, \texttt{compilepcf}: make a $K$ function or pair correlation function given the pairwise distances and their weights. [These existing internal functions are now documented.] \item \texttt{laslett}: Laslett's Transform. \item \texttt{lintess}: Tessellation on a linear network. \item \texttt{divide.linnet}: Divide a linear network into pieces demarcated by a point pattern. \item \texttt{insertVertices}: Insert new vertices in a linear network. \item \texttt{thinNetwork}: Remove vertices and/or segments from a linear network etc. \item \texttt{connected.linnet}: Find connected components of a linear network. \item \texttt{nvertices}, \texttt{nvertices.linnet}, \texttt{nvertices.owin}: Count the number of vertices in a linear network or vertices of the boundary of a window. \item \texttt{as.data.frame.linim}, \texttt{as.data.frame.linfun}: Extract a data frame of spatial locations and function values from an object of class \texttt{linim} or \texttt{linfun}. \item \texttt{as.linfun}, \texttt{as.linfun.linim}, \texttt{as.linfun.lintess}: Convert other kinds of data to a \texttt{linfun} object. \item \texttt{requireversion}: Require a particular version of a package (for use in stand-alone R scripts). \item \texttt{as.function.tess}: Convert a tessellation to a \texttt{function(x,y)}. The function value indicates which tile of the tessellation contains the point $(x,y)$. \item \texttt{tileindex}: Determine which tile of a tessellation contains a given point $(x,y)$. \item \texttt{persp.leverage.ppm}: Method for persp plots for objects of class \texttt{leverage.ppm} \item \texttt{AIC.mppm}, \texttt{extractAIC.mppm}: AIC for point process models fitted to replicated point patterns. \item \texttt{nobs.mppm}, \texttt{terms.mppm}, \texttt{getCall.mppm}: Methods for point process models fitted to replicated point patterns. \item \texttt{rPenttinen}: Simulate the Penttinen process using perfect simulation. \item \texttt{varcount}: Given a point process model, compute the predicted variance of the number of points falling in a window. \item \texttt{inside.boxx}: Test whether multidimensional points lie inside a specified multidimensional box. \item \texttt{lixellate}: Divide each segment of a linear network into smaller segments. \item \texttt{nsegments.linnet}, \texttt{nsegments.lpp}: Count the number of line segments in a linear network. \item \texttt{grow.boxx}: Expand a multidimensional box. \item \texttt{deviance.ppm}, \texttt{deviance.lppm}: Deviance for a fitted point process model. \item \texttt{pseudoR2}: Pseudo-R-squared for a fitted point process model. \item \texttt{tiles.empty} Checks whether each tile of a tessellation is empty or nonempty. \item \texttt{summary.linim}: Summary for a pixel image on a linear network. \item Determinantal Point Process models: \begin{itemize} \item \texttt{dppm}: Fit a determinantal point process model. \item \texttt{fitted.dppm}, \texttt{predict.dppm}, \texttt{intensity.dppm}: prediction for a fitted determinantal point process model. \item \texttt{Kmodel.dppm}, \texttt{pcfmodel.dppm}: Second moments of a determinantal point process model. \item \texttt{rdpp}, \texttt{simulate.dppm}: Simulation of a determinantal point process model. \item \texttt{logLik.dppm}, \texttt{AIC.dppm}, \texttt{extractAIC.dppm}, \texttt{nobs.dppm}: Likelihood and AIC for a fitted determinantal point process model. \item \texttt{print.dppm}, \texttt{reach.dppm}, \texttt{valid.dppm}: Basic information about a \texttt{dpp} model. \item \texttt{coef.dppm}, \texttt{formula.dppm}, \texttt{print.dppm}, \texttt{terms.dppm}, \texttt{labels.dppm}, \texttt{model.frame.dppm}, \texttt{model.matrix.dppm}, \texttt{model.images.dppm}, \texttt{is.stationary.dppm}, \texttt{reach.dppm}, \texttt{unitname.dppm}, \verb!unitname<-.dppm!, \texttt{Window.dppm}: Various methods for \texttt{dppm} objects. \item \texttt{parameters.dppm}: Extract meaningful list of model parameters. \item \texttt{objsurf.dppm}: Objective function surface of a \texttt{dppm} object. \item \texttt{residuals.dppm}: Residual measure for a \texttt{dppm} object. \end{itemize} \item Determinantal Point Process model families: \begin{itemize} \item \texttt{dppBessel}, \texttt{dppCauchy}, \texttt{dppGauss}, \texttt{dppMatern}, \texttt{dppPowerExp}: Determinantal Point Process family functions. \item \texttt{detpointprocfamilyfun}: Create a family function. \item \texttt{update.detpointprocfamily}: Set parameter values in a determinantal point process model family. \item \texttt{simulate.dppm}: Simulation. \item \texttt{is.stationary.detpointprocfamily}, \texttt{intensity.detpointprocfamily}, \texttt{Kmodel.detpointprocfamily}, \texttt{pcfmodel.detpointprocfamily}: Moments. \item \texttt{dim.detpointprocfamily}, \texttt{dppapproxkernel}, \texttt{dppapproxpcf}, \texttt{dppeigen}, \texttt{dppkernel}, \texttt{dppparbounds}, \texttt{dppspecdenrange}, \texttt{dppspecden}: Helper functions. \end{itemize} \item \texttt{dg.envelope}: Simulation envelopes corresponding to Dao-Genton test. \item \texttt{dg.progress}: Progress plot (envelope representation) for the Dao-Genton test. \item \texttt{dg.sigtrace}: significance trace for the Dao-Genton test. \item \texttt{markcrosscorr}: Mark cross-correlation function for point patterns with several columns of marks. \item \texttt{rtemper}: Simulated annealing or simulated tempering. \item \texttt{rgb2hsva}: Convert RGB to HSV data, like \texttt{rgb2hsv}, but preserving transparency. \item \texttt{superimpose.ppplist}, \texttt{superimpose.splitppp}: New methods for 'superimpose' for lists of point patterns. \item \texttt{dkernel}, \texttt{pkernel}, \texttt{qkernel}, \texttt{rkernel}: Probability density, cumulative probability, quantiles and random generation from distributions used in basic one-dimensional kernel smoothing. \item \texttt{kernel.factor}: Auxiliary calculations for one-dimensional kernel smoothing. \item \texttt{spatdim}: Spatial dimension of any object in the \spst\ package. \item \texttt{as.boxx}: Convert data to a multi-dimensional box. \item \texttt{intensity.ppx}: Method for \texttt{intensity} for multi-dimensional space-time point patterns. \item \texttt{fourierbasis}: Evaluate Fourier basis functions in any number of dimensions. \item \texttt{valid}: New generic function, with methods \texttt{valid.ppm}, \texttt{valid.lppm}, \texttt{valid.dppm}. \item \texttt{emend}, \texttt{emend.ppm}, \texttt{emend.lppm}: New generic function with methods for \texttt{ppm} and \texttt{lppm}. \texttt{emend.ppm} is equivalent to \texttt{project.ppm}. \item \texttt{Penttinen}: New pairwise interaction model. \item \texttt{quantile.density}: Calculates quantiles from kernel density estimates. \item \texttt{CDF.density}: Calculates cumulative distribution function from kernel density estimates. \item \texttt{triangulate.owin}: decompose a spatial window into triangles. \item \texttt{fitted.lppm}: fitted intensity values for a point process on a linear network. \item \texttt{parameters}: Extract all parameters from a fitted model. \end{itemize} \section{Alphabetical list of changes} Here is a list of all changes made to existing functions, listed alphabetically. \begin{itemize} %%A \item \texttt{adaptive.density}: This function can now perform adaptive estimation by two methods: either tessellation-based methods or variable-bandwidth kernel estimation. The calculations are performed by either \texttt{densityVoronoi} or \texttt{densityAdaptiveKernel}. \item \texttt{affine.owin}: Allows transformation matrix to be singular, if the window is polygonal. \item \texttt{alltypes}: If \texttt{envelope=TRUE} and the envelope computation reaches the maximum permitted number of errors (\texttt{maxnerr}) in evaluating the summary function for the simulated patterns, then instead of triggering a fatal error, the envelope limits will be set to \texttt{NA}. \item \texttt{anova.mppm}: Now handles Gibbs models, and performs the adjusted composite likelihood ratio test. New argument \texttt{fine}. \item \texttt{anyDuplicated.ppp}: Accelerated. \item \texttt{append.psp}: arguments may be \texttt{NULL}. \item \texttt{as.function.tess}: New argument \texttt{values} specifies the function values. \item \texttt{as.im.distfun}: New argument \texttt{approx} specifies the choice of algorithm. \item \texttt{as.im.function}: \begin{itemize} \item New argument \texttt{strict}. \item New argument \texttt{stringsAsFactors}. \end{itemize} \item \texttt{as.im.leverage.ppm}: New argument \texttt{what}. \item \texttt{as.im.nnfun}: New argument \texttt{approx} chooses between a fast, approximate algorithm and a slow, exact algorithm. \item \texttt{as.im.smoothfun}: New argument \texttt{approx} chooses between a fast, approximate algorithm and a slow, exact algorithm. \item \texttt{as.layered}: Default method now handles a (vanilla) list of spatial objects. \item \texttt{as.linfun.lintess}: \begin{itemize} \item New argument \texttt{values} specifies the function value for each tile. \item The default \texttt{values} are the marks, if present. \item New argument \texttt{navalue}. \item Computation accelerated. \end{itemize} \item \texttt{as.linim.default}: New argument \texttt{delta} controls spacing of sample points in internal data. \item \texttt{as.linnet.psp}: \begin{itemize} \item If the line segment pattern has marks, then the resulting linear network also carries these marks in the \verb!$lines! component. \item Computation accelerated. \end{itemize} \item \texttt{as.owin.default}: \begin{itemize} \item Now refuses to convert a \code{box3} to a two-dimensional window. \item Now accepts a structure with entries named \code{xmin},\code{xmax}, \code{ymin}, \code{ymax} in any order. This handles objects of class \code{bbox} in the \pkg{sf} package. \item Now detects objects of class \code{SpatialPolygons} and issues a more helpful error message. \end{itemize} \item \texttt{as.owin.data.frame}: New argument \texttt{step} \item \texttt{as.polygonal}: \begin{itemize} \item Can now repair errors in polygon data, if \texttt{repair=TRUE}. \item Accelerated when \texttt{w} is a pixel mask. \end{itemize} \item \texttt{as.psp}: now permits a data frame of marks to have only one column, instead of coercing it to a vector. \item \texttt{as.solist}: The argument \texttt{x} can now be a spatial object; \texttt{as.solist(cells)} is the same as \texttt{solist(cells)}. %%B \item \texttt{bdist.pixels}: Accelerated for polygonal windows. New argument \texttt{method}. \item \texttt{bdist.points}: Accelerated for polygonal windows. \item \texttt{beachcolours}: \begin{itemize} \item Improved positioning of the yellow colour band. \item If \texttt{sealevel} lies outside \texttt{srange}, then \texttt{srange} will be extended to include it (without a warning). \end{itemize} \item \texttt{beachcolourmap}: Improved positioning of the yellow colour band. \item \texttt{bind.fv}: New argument \texttt{clip}. \item \texttt{blur}: New argument \texttt{kernel}. \item \texttt{bw.abram}: \begin{itemize} \item New argument \texttt{smoother} determines how the pilot estimate is computed. \item Formal arguments rearranged. \end{itemize} \item \texttt{bw.diggle}, \texttt{bw.ppl}, \texttt{bw.relrisk}, \texttt{bw.smoothppp}: \begin{itemize} \item These functions now extract and store the name of the unit of length from the point pattern dataset. When the bandwidth selection criterion is plotted, the name of the unit of length is shown on the x-axis. \item A warning is issued if the optimal value of the cross-validation criterion occurs at an endpoint of the search interval. New argument \texttt{warn}. \end{itemize} \item \texttt{bw.ppl}: \begin{itemize} \item New arguments \texttt{weights} and \texttt{sigma}. \item New argument \texttt{shortcut} allows faster computation. \item Additional arguments \verb!...! are now passed to \texttt{density.ppp}. \end{itemize} \item \texttt{bw.scott}: \begin{itemize} \item the two bandwidth values in the result now have names \texttt{sigma.x} and \texttt{sigma.y}. \item Now handles point patterns of any dimension. \item New arguments \texttt{isotropic} and \texttt{d}. \end{itemize} \item \texttt{bw.stoyan}: The rule has been modified so that, if the pattern is empty, it is now treated as if it contained 1 point, so that a finite bandwidth value is returned. %%C \item \texttt{cbind.hyperframe}: The result now retains the \texttt{row.names} of the original arguments. \item \texttt{cdf.test}: \begin{itemize} \item Calculations are more robust against numerical rounding effects. \item The methods for classes \texttt{ppp}, \texttt{ppm}, \texttt{lpp}, \texttt{lppm}, \texttt{slrm} have a new argument \texttt{interpolate}. \item Monte Carlo test runs much faster. \item More jittering is applied when \texttt{jitter=TRUE}. Warnings about tied values should not occur any more. \end{itemize} \item \texttt{cdf.test.mppm}: \begin{itemize} \item Now handles Gibbs models. \item Now recognises \texttt{covariate="x"} or \texttt{"y"}. \end{itemize} \item \texttt{clarkevans}: The argument \texttt{correction="all"} is now recognised: it selects all the available options. [This is also the default.] \item \texttt{clickpoly}: The polygon is now drawn progressively as the user clicks new vertices. \item \texttt{closepairs.ppp}: New argument \code{periodic}. \item \texttt{closepairs.ppp}, \texttt{closepairs.pp3}: \begin{itemize} \item New arguments \texttt{distinct} and \texttt{neat} allow more options. \item Argument \texttt{ordered} has been replaced by \texttt{twice} (but \texttt{ordered} is still accepted, with a warning). \item Performance improved (computation time and memory requirements reduced.) This should improve the performance of many functions in \texttt{spatstat}. \end{itemize} \item \texttt{closepairs.pp3}: Argument \texttt{what} can take the value \texttt{"ijd"} \item \texttt{clusterset}: Improved behaviour. \item \texttt{clusterfit}: \begin{itemize} \item New argument \texttt{algorithm} specifies the choice of optimisation algorithm. \item Changed precedence rule for handling the algorithm parameters in the minimum contrast algorithm. Individually-named arguments \texttt{q,p,rmax,rmin} now take precedence over entries with the same names in the list \texttt{ctrl}. \item New argument \texttt{verbose}. \end{itemize} \item \texttt{colourmap}: argument \texttt{col} have have length 1, representing a trivial colour map in which all data values are mapped to the same colour. \item \texttt{collapse.fv}: This is now treated as a method for the \texttt{nlme} generic \texttt{collapse}. Its syntax has been adjusted slightly. \item \texttt{connected.im}: Now handles a logical-valued image properly. Arguments \texttt{...} now determine pixel resolution. \item \texttt{connected.owin}: Arguments \texttt{...} now determine pixel resolution. \item \texttt{contour.im}: New argument \texttt{col} specifies the colour of the contour lines. If \texttt{col} is a colour map, then the contours are drawn in different colours. \item \texttt{convolve.im}: the name of the unit of length is preserved. \item \texttt{crossing.psp}: New argument \texttt{details} gives more information about the intersections between the segments. \item \texttt{crosspairs.pp3}: Argument \texttt{what} can take the value \texttt{"ijd"} \item \texttt{cut.ppp}: Argument \texttt{z} can be \texttt{"x"} or \texttt{"y"} indicating one of the spatial coordinates. %%D \item \texttt{dclf.test, mad.test, dclf.progress, mad.progress,} \texttt{dclf.sigtrace, mad.sigtrace}, \texttt{dg.progress, dg.sigtrace}: \begin{itemize} \item New argument \texttt{clamp} determines the test statistic for one-sided tests. \item New argument \texttt{rmin} determines the left endpoint of the test interval. \item New argument \texttt{leaveout} specifies how to calculate discrepancy between observed and simulated function values. \item New argument \texttt{scale} allows summary function values to be rescaled before the comparison is performed. \item New argument \texttt{interpolate} supports interpolation of $p$-value. \item New argument \texttt{interpolate} supports interpolation of critical value of test. \item Function values which are infinite, \texttt{NaN} or \texttt{NA} are now ignored in the calculation (with a warning) instead of causing an error. Warning messages are more detailed. \end{itemize} \item \texttt{default.rmhcontrol, default.rmhexpand}: New argument \texttt{w}. \item \texttt{density.lpp}: \begin{itemize} \item New fast algorithm (up to 1000 times faster) for the default case where \texttt{kernel="gaussian"} and \texttt{continuous=TRUE}. Generously contributed by Greg McSwiggan. \item Fast algorithm has been further accelerated. \item New argument \texttt{kernel} specifies the smoothing kernel. Any of the standard one-dimensional smoothing kernels can be used. \item Now supports both the `equal-split continuous' and `equal-split discontinuous' smoothers. New argument \texttt{continuous} determines the choice of smoother. \item New arguments \texttt{weights} and \texttt{old}. \item New argument \texttt{distance} offers a choice of different kernel methods. \end{itemize} \item \texttt{density.ppp}: \begin{itemize} \item A non-Gaussian kernel can now be specified using the argument \texttt{kernel}. \item Argument \texttt{weights} can now be a pixel image. \item Infinite bandwidth \texttt{sigma=Inf} is supported. \item Accelerated by about 30\% when \texttt{at="pixels"}. \item Accelerated by about 15\% in the case where \texttt{at="points"} and \texttt{kernel="gaussian"}. \item Accelerated in the cases where weights are given or \texttt{diggle=TRUE}. \item New argument \texttt{verbose}. \end{itemize} \item \texttt{density.psp}: \begin{itemize} \item New argument \texttt{method}. \item Accelerated by 1 to 2 orders of magnitude. \end{itemize} \item \texttt{dfbetas.ppm}: \begin{itemize} \item For Gibbs models, memory usage has been dramatically reduced, so the code can handle larger datasets and finer quadrature schemes. \item Increased the default resolution of the pixel images. Spatial resolution can now be controlled by the arguments \code{dimyx}, \code{eps}. \end{itemize} \item \texttt{diagnose.ppm}: \begin{itemize} \item Infinite values of \texttt{rbord} are now ignored and treated as zero. This ensures that \texttt{diagnose.ppm} has a sensible default when the fitted model has infinite reach. \item Accelerated, when \texttt{type="inverse"}, for models without a hard core. \end{itemize} \item \texttt{diagnose.ppm, plot.diagppm}: \begin{itemize} \item New arguments \texttt{col.neg, col.smooth} control the colour maps. \item Accelerated, when \texttt{type="inverse"}, for models without a hard core. \end{itemize} \item \texttt{dilation.ppp}: Improved geometrical accuracy. Now accepts arguments to control resolution of polygonal approximation. \item \texttt{discs}: \begin{itemize} \item Now accepts a single numeric value for \texttt{radii}. \item New argument \texttt{npoly}. \item Accelerated in some cases. \end{itemize} \item \texttt{distfun}: When the user calls a distance function that was created by \texttt{distfun}, the user may now give a \texttt{ppp} or \texttt{lpp} object for the argument \texttt{x}, instead of giving two coordinate vectors \texttt{x} and \texttt{y}. \item \texttt{dppm}: Changed precedence rule for handling the algorithm parameters in the minimum contrast algorithm. Individually-named arguments \texttt{q,p,rmax,rmin} now take precedence over entries with the same names in the list \texttt{ctrl}. \item \texttt{duplicated.ppp}: accelerated. %%E \item \texttt{edge.Trans}: New argument \texttt{gW} for efficiency. \item \texttt{effectfun}: \begin{itemize} \item Now works for \texttt{ppm}, \texttt{kppm}, \texttt{lppm}, \texttt{dppm}, \texttt{rppm} and \texttt{profilepl} objects. \item New argument \texttt{nvalues}. \end{itemize} \item \texttt{envelope}: \begin{itemize} \item New argument \texttt{clamp} gives greater control over one-sided envelopes. \item New argument \texttt{funargs} \item New argument \texttt{scale} allows global envelopes to have width proportional to a specified function of $r$, rather than constant width. \item New argument \texttt{funYargs} contains arguments to the summary function when applied to the data pattern only. \item The argument \texttt{simulate} can now be a function (such as \texttt{rlabel}). The function will be applied repeatedly to the original data pattern. \item \texttt{rejectNA} and \texttt{silent}. \end{itemize} \item \texttt{envelope.lpp}, \texttt{envelope.lppm}: \begin{itemize} \item New arguments \texttt{fix.n} and \texttt{fix.marks} allow envelopes to be computed using simulations conditional on the observed number of points. \item New arguments \texttt{maxnerr}, \texttt{rejectNA} and \texttt{silent}. \end{itemize} \item \texttt{eval.im}: New argument \texttt{warn}. \item \texttt{eval.linim}: New argument \texttt{warn}. \item \texttt{ewcdf}: \begin{itemize} \item Argument \texttt{weights} can now be \texttt{NULL}. \item New arguments \texttt{normalise} and \texttt{adjust}. \item Computation accelerated. \item The result does not inherit class \texttt{"ecdf"} if \texttt{normalise=FALSE}. \end{itemize} %%F \item \texttt{Fest}: Additional checks for errors in input data. \item \texttt{Finhom}: \begin{itemize} \item A warning is issued if bias is likely to occur because of undersmoothing. \item New arguments \texttt{warn.bias} and \texttt{savelambda}. \end{itemize} \item \texttt{fitted.lppm}: New argument \texttt{leaveoneout} allows leave-one-out computation of fitted value. \item \texttt{fitted.ppm}: \begin{itemize} \item New option, \texttt{type="link"}. \item New argument \code{ignore.hardcore}. \end{itemize} \item \texttt{funxy}: \begin{itemize} \item When the user calls a function that was created by \texttt{funxy}, the user may now give a \texttt{ppp} or \texttt{lpp} object for the argument \texttt{x}, instead of giving two coordinate vectors \texttt{x} and \texttt{y}. \item Functions of class \texttt{"funxy"} can now be applied to quadrature schemes. \end{itemize} %%G \item \texttt{Geyer}: The saturation parameter \texttt{sat} can now be less than 1. \item \texttt{Ginhom}: \begin{itemize} \item A warning is issued if bias is likely to occur because of undersmoothing. \item New arguments \texttt{warn.bias} and \texttt{savelambda}. \end{itemize} \item \texttt{grow.rectangle}: New argument \texttt{fraction}. %%H \item \texttt{Hest}: \begin{itemize} \item Argument \texttt{X} can now be a pixel image with logical values. \item New argument \texttt{W}. [Based on code by Kassel Hingee.] \item Additional checks for errors in input data. \end{itemize} \item \texttt{hist.im}: New argument \texttt{xname}. %%I \item \texttt{identify.psp}: Improved placement of labels. Arguments can be passed to \texttt{text.default} to control the plotting of labels. \item \texttt{idw}: Standard errors can now be calculated by setting \texttt{se=TRUE}. \item \texttt{imcov}: the name of the unit of length is preserved. \item \texttt{im.apply}: \begin{itemize} \item Computation accelerated \item New argument \texttt{fun.handles.na} \item New argument \texttt{check} \end{itemize} \item \texttt{influence.ppm}: For Gibbs models, memory usage has been dramatically reduced, so the code can handle larger datasets and finer quadrature schemes. \item \texttt{integral.linfun}: \begin{itemize} \item New argument \texttt{delta} controls step length of approximation to integral. \item Argument \code{domain} can be a tessellation. \end{itemize} \item \texttt{integral.linim}: Argument \code{domain} can be a tessellation. \item \texttt{integral.ssf}: Argument \code{domain} can be a tessellation. \item \texttt{intensity.ppm}: Intensity approximation is now implemented for area-interaction model, and Geyer saturation model. \item \texttt{interp.im}: New argument \texttt{bilinear}. \item \texttt{ippm}: \begin{itemize} \item Accelerated. \item The internal format of the result has been extended slightly. \item Improved defaults for numerical algorithm parameters. \end{itemize} %%J \item \texttt{Jfox}: new argument \texttt{warn.trim}. \item \texttt{Jinhom}: \begin{itemize} \item A warning is issued if bias is likely to occur because of undersmoothing. \item New arguments \texttt{warn.bias} and \texttt{savelambda}. \end{itemize} %%K \item \texttt{Kcross.inhom}, \texttt{Kdot.inhom}, \texttt{Kmulti.inhom}: \begin{itemize} \item These functions now allow intensity values to be given by a fitted point process model. \item New arguments \texttt{update}, \texttt{leaveoneout}, \texttt{lambdaX}. \item Leave-one-out calculation is now implemented when \texttt{lambbdaX} is a fitted model of class \texttt{"dppm"}. \end{itemize} \item \texttt{Kest} \begin{itemize} \item Accelerated computation (for translation and rigid corrections) when window is an irregular shape. \item Calculation of isotropic edge correction for polygonal windows has changed slightly. Results are believed to be more accurate. Computation has been accelerated by about 20 percent in typical cases. \end{itemize} \item \texttt{Kest.fft}: Now has \verb!...! arguments allowing control of spatial resolution. \item \texttt{Kinhom}: \begin{itemize} \item New argument \texttt{ratio}. \item Stops gracefully if \texttt{lambda} contains any zero values. \item Leave-one-out calculation is implemented when \texttt{lambda} is a fitted model of class \texttt{"dppm"}. \end{itemize} \item \texttt{kppm}: \begin{itemize} \item Fitting a model with \texttt{clusters="LGCP"} no longer requires the package \pkg{RandomFields} to be loaded explicitly. \item New argument \texttt{algorithm} specifies the choice of optimisation algorithm. \item Left hand side of formula can now involve entries in the list \texttt{data}. \item refuses to fit a log-Gaussian Cox model with anisotropic covariance. \item A warning about infinite values of the summary function no longer occurs when the default settings are used. Also affects \texttt{mincontrast}, \texttt{cauchy.estpcf}, \texttt{lgcp.estpcf}, \texttt{matclust.estpcf}, \texttt{thomas.estpcf}, \texttt{vargamma.estpcf}. \item Changed precedence rule for handling the algorithm parameters in the minimum contrast algorithm. Individually-named arguments \texttt{q,p,rmax,rmin} now take precedence over entries with the same names in the list \texttt{ctrl}. \item Improved printed output. \end{itemize} %%L \item \texttt{latest.news}: Now prints news documentation for the current major version, by default. New argument \texttt{major}. \item \texttt{Lcross.inhom}, \texttt{Ldot.inhom}: These functions now allow intensity values to be given by a fitted point process model. New arguments \texttt{update}, \texttt{leaveoneout}, \texttt{lambdaX}. \item \texttt{lengths.psp}: New argument \texttt{squared}. \item \texttt{Lest}, \texttt{Linhom}, \texttt{Ldot}, \texttt{Lcross}, \texttt{Ldot.inhom}, \texttt{Lcross.inhom}: These summary functions now have explicit argument \texttt{"correction"}. \item \texttt{leverage.ppm}: \begin{itemize} \item For Gibbs models, memory usage has been dramatically reduced, so the code can handle larger datasets and finer quadrature schemes. \item Increased the default resolution of the pixel images. Spatial resolution can now be controlled by the arguments \code{dimyx}, \code{eps}. \end{itemize} \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}: \begin{itemize} \item These methods now work for models that were fitted by logistic composite likelihood (\texttt{method='logi'}). \item Computation has been vastly accelerated for models with Geyer interaction fitted using isotropic or translation edge corrections. \item Faster computation in many cases. \item Virtually all models and edge corrections are now supported, using a ``brute force'' algorithm. This can be slow in some cases. \end{itemize} \item \texttt{lineardisc}: \begin{itemize} \item New argument \texttt{add}. \item Default plotting behaviour has changed. \end{itemize} \item \texttt{linearK}, \texttt{linearpcf} and relatives: \\ \begin{itemize} \item substantially accelerated. \item ratio calculations are now supported. \item new argument \texttt{ratio}. \end{itemize} \item \texttt{linearKinhom}: new argument \texttt{normpower}. \item \texttt{linearKinhom}, \texttt{linearpcfinhom}: \begin{itemize} \item Changed behaviour when \texttt{lambda} is a fitted model. \item New arguments \texttt{update} and \texttt{leaveoneout}. \end{itemize} \item \texttt{linearpcf}: new argument \texttt{normpower}. \item \texttt{linim}: \begin{itemize} \item The image \texttt{Z} is now automatically restricted to the network. \item New argument \texttt{restrict}. \end{itemize} \item \texttt{linnet}: \begin{itemize} \item The internal format of a \texttt{linnet} (linear network) object has been changed. Existing datasets of class \texttt{linnet} are still supported. However, computation will be faster if they are converted to the new format. To convert a linnet object \texttt{L} to the new format, use \verb!L <- as.linnet(L)!. \item If the argument \texttt{edges} is given, then this argument now determines the ordering of the sequence of line segments. For example, the \texttt{i}-th row of \texttt{edges} specifies the \texttt{i}-th line segment in \texttt{as.psp(L)}. \item New argument \texttt{warn}. \item When argument \texttt{edges} is specified, the code now checks whether any edges are duplicated. \end{itemize} \item \texttt{lintess}: \begin{itemize} \item Argument \texttt{df} can be missing or \texttt{NULL}, resulting in a tesellation with only one tile. \item Tessellations can now have marks. New argument \texttt{marks}. \end{itemize} \item \texttt{localpcfinhom}: \begin{itemize} \item New arguments \texttt{update} and \texttt{leaveoneout}. \end{itemize} \item \texttt{logLik.ppm}: \begin{itemize} \item New argument \texttt{absolute}. \item The warning about pseudolikelihood (`log likelihood not available') is given only once, and is not repeated in subsequent calls, within a spatstat session. \end{itemize} \item \texttt{logLik.mppm}: new argument \texttt{warn}. \item \texttt{lohboot}: \begin{itemize} \item Algorithm has been corrected and extended thanks to Christophe Biscio and Rasmus Waagepetersen. \item New arguments \texttt{block}, \texttt{basicboot}, \texttt{Vcorrection}. \item Accelerated when the window is a rectangle. \item Now works for multitype $K$ functions \texttt{Kcross}, \texttt{Kdot}, \texttt{Lcross}, \texttt{Ldot}, \texttt{Kcross.inhom}, \texttt{Lcross.inhom} \item Confidence bands for \texttt{Lest}, \texttt{Linhom}, \texttt{Lcross}, \texttt{Ldot}, \texttt{Lcross.inhom} are now computed differently. First a confidence band is computed for the corresponding $K$ function \texttt{Kest}, \texttt{Kinhom}, \texttt{Kcross}, \texttt{Kdot}, \texttt{Kcross.inhom} respectively. Then this is transformed to a confidence band for the $L$ function by applying the square root transformation. \end{itemize} \item \texttt{lpp}: \begin{itemize} \item The internal format of an \texttt{lpp} object has been changed. Existing datasets of class \texttt{lpp} are still supported. However, computation will be faster if they are converted to the new format. To convert an \texttt{lpp} object \texttt{X} to the new format, use \verb!X <- as.lpp(X)!. \item \texttt{X} can be missing or \texttt{NULL}, resulting in an empty point pattern. \end{itemize} \item \texttt{lpp}, \texttt{as.lpp}: These functions now handle the case where coordinates \texttt{seg} and \texttt{tp} are given but \texttt{x} and \texttt{y} are missing. \item \texttt{lppm}: \begin{itemize} \item New argument \texttt{random} controls placement of dummy points. \item Computation accelerated. \end{itemize} \item \texttt{lurking.ppm}: accelerated. \item \texttt{lut}: argument \texttt{outputs} may have length 1, representing a lookup table in which all data values are mapped to the same output value. %%M \item \texttt{markconnect}: Accepts the argument \texttt{weights} which is passed to \texttt{markcorr}. \item \texttt{markcorr}: New argument \texttt{weights} allows computation of the weighted version of the mark correlation function. Weights can be an expression to be evaluated, or a function, or a pixel image, or a numeric vector. \item \texttt{markvario}: Accepts the argument \texttt{weights} which is passed to \texttt{markcorr}. \item \texttt{minnndist}, \texttt{maxnndist}: New argument \texttt{by} makes it possible to find the minimum or maximum nearest neighbour distance between each pair of possible types in a multitype pattern. \item \texttt{mppm}: \begin{itemize} \item Now handles models with a random effect component. (This is covered in \cite[Chap.\ 16]{baddrubaturn15}.) \item New argument \texttt{random} is a formula specifying the random effect. (This is covered in \cite[Chap.\ 16]{baddrubaturn15}.) \item Performs more checks for consistency of the input data. \item New arguments \texttt{gcontrol} and \texttt{reltol.pql} control the fitting algorithm. \item New argument \texttt{weights} specifies case weights for each row of data. \end{itemize} \item \texttt{msr}: Infinite and \texttt{NA} values are now detected (if \texttt{check=TRUE}) and are reset to zero, with a warning. %%N \item \texttt{nbfires}: \begin{itemize} \item the unit of length for the coordinates is now specified in this dataset. \item This dataset now includes information about the different land and sea borders of New Brunswick. \end{itemize} \item \texttt{nncorr,nnmean,nnvario}: New argument \texttt{na.action}. \item \texttt{nndist.lpp, nnwhich.lpp, nncross.lpp, distfun.lpp}: New argument \texttt{k} allows computation of $k$-th nearest point. Computation accelerated. \texttt{nnfun.lpp}: \begin{itemize} \item New argument \texttt{k}. \item New argument \texttt{value} specifies whether to return the index of the nearest neighbour or the mark value of the nearest neighbour. \end{itemize} \texttt{nnfun.ppp}: \begin{itemize} \item New argument \texttt{value} specifies whether to return the index of the nearest neighbour or the mark value of the nearest neighbour. \end{itemize} \texttt{nnfun.psp}: \begin{itemize} \item New argument \texttt{value} specifies whether to return the index of the nearest neighbour or the mark value of the nearest neighbour. \end{itemize} %%O %%P \item \texttt{padimage}: New argument \texttt{W} allows an image to be padded out to fill any window. \item \texttt{pairorient}: Default edge corrections now include \texttt{"bord.modif"}. \item \texttt{parres}: the argument \texttt{covariate} is allowed to be missing if the model only depends on one covariate. \item \texttt{pcf.ppp}: \begin{itemize} \item New argument \code{close} for advanced use. \item New argument \texttt{ratio} allows several estimates of pcf to be pooled. \item Now calculates an analytic approximation to the variance of the estimate of the pair correlation function (when \texttt{var.approx=TRUE}). \item Now returns the smoothing bandwidth used, as an attribute of the result. \item New argument \texttt{close} for advanced use. \item Now accepts \texttt{correction="none"}. \end{itemize} \item \texttt{pcfinhom}: \begin{itemize} \item New argument \code{close} for advanced use. \item Default behaviour is changed when \texttt{lambda} is a fitted model. The default is now to re-fit the model to the data before computing pcf. New arguments \texttt{update} and \texttt{leaveoneout} control this. \item New argument \texttt{close} for advanced use. \item Now handles \texttt{correction="good"} \item Leave-one-out calculation is implemented when \texttt{lambda} is a fitted model of class \texttt{"dppm"}. \end{itemize} \item \code{persp.funxy}: Improved $z$-axis label. \item \texttt{pixellate.ppp}: \begin{itemize} \item If the pattern is empty, the result is an integer-valued image (by default) for consistency with the results for non-empty patterns. \item Accelerated in the case where weights are given. \item New arguments \texttt{fractional} and \texttt{preserve} for more accurate discretisation. \item New argument \texttt{savemap}. \end{itemize} \item \texttt{plot.anylist}: \begin{itemize} \item If a list entry \verb!x[[i]]! belongs to class \texttt{"anylist"}, it will be expanded so that each entry \verb!x[[i]][[j]]! will be plotted as a separate panel. \item New arguments \texttt{panel.begin.args}, \texttt{panel.end.args} \item Result is now an (invisible) list containing the result from executing the plot of each panel. \end{itemize} \item \texttt{plot.colourmap}: \begin{itemize} \item Now handles a colour map for a zero-length interval [a,a] \item New argument \texttt{increasing} specifies whether the colours are displayed in order left-to-right/bottom-to-top. \item Changed default behaviour for discrete colour maps when \texttt{vertical=FALSE}. \end{itemize} \item \texttt{plot.im}: \begin{itemize} \item Now handles complex-valued images. \item New argument \texttt{workaround} to avoid a bug in some MacOS device drivers that causes the image to be displayed in the wrong spatial orientation. \item The number of tick marks in the colour ribbon can now be controlled using the argument \texttt{nint} in \texttt{ribargs}. \item Improved behaviour when all pixel values are \texttt{NA}. \item Improved handling of tickmarks on colour ribbon. \item Improved behaviour when the image values are almost constant. \item New argument \texttt{riblab}. \item Axes are prevented from extending outside the image rectangle. \item New argument \texttt{zap}. \item Some warnings are suppressed when \texttt{do.plot=FALSE}. \end{itemize} \item \texttt{plot.imlist}: Result is now an (invisible) list containing the result from executing the plot of each panel. \item \texttt{plot.influence.ppm}: New argument \texttt{multiplot}. \item \texttt{plot.kppm}: \begin{itemize} \item New arguments \texttt{pause} and \texttt{xname}. \item The argument \texttt{what="all"} is now recognised: it selects all the available options. [This is also the default.] \end{itemize} \item \texttt{plot.leverage.ppm}: \begin{itemize} \item New arguments \texttt{multiplot} and \code{what}. \item A contour line showing the average value of leverage is now drawn on the colour ribbon, as well as on the main image. New argument \texttt{args.contour}. \end{itemize} \item \texttt{plot.linfun}: \begin{itemize} \item Now passes arguments to the function being plotted. \item A scale bar is now plotted when \texttt{style="width"}. \item New argument \texttt{legend}. \item The return value has a different format. \end{itemize} \item \texttt{plot.linim}: \begin{itemize} \item The return value has a different format. \item New argument \texttt{fatten} improves visual appearance when \texttt{style="colour"}. \item A scale bar is now plotted when \texttt{style="width"}. \item When \texttt{style="width"}, negative values are plotted in red (by default). New argument \texttt{negative.args} controls this. \item New argument \texttt{zlim} specifies the range of values to be mapped. \item New explicit argument \texttt{box} determines whether to plot a bounding box; default is \texttt{FALSE} in all cases. \end{itemize} \item \texttt{plot.lintess}: \begin{itemize} \item Improved plot method, with more options. \item Modified to display the marks attached to the tiles. \item Options: \verb!style=c("colour", "width", "image")!. \end{itemize} \item \texttt{plot.lpp}: \begin{itemize} \item New argument \texttt{show.network}. \item For a point pattern with continuous marks (``real numbers'') the colour arguments \texttt{cols}, \texttt{fg}, \texttt{bg} can now be vectors of colour values, and will be used to determine the default colour map for the marks. \end{itemize} \item \texttt{plot.mppm}: New argument \texttt{se}. \item \texttt{plot.msr}: \begin{itemize} \item Now handles multitype measures. \item New argument \texttt{multiplot}. \item New argument \texttt{massthresh}. \item New arguments \texttt{equal.markscale} and \texttt{equal.ribbon}. \end{itemize} \item \texttt{plot.onearrow:} Graphical parameters, specified when the object was created, are now taken as the defaults for graphical parameters to the plot. \item \texttt{plot.owin:} New argument \texttt{use.polypath} controls how to plot a filled polygon when it has holes. \item \texttt{plot.profilepl}: This function has now been documented, and the graphics improved. \item \texttt{plot.psp}: \begin{itemize} \item Segments can be plotted with widths proportional to their mark values. \item New argument \texttt{style}. \item New argument \texttt{col} gives control over the colour map representing the values of marks attached to the segments. \end{itemize} \item \texttt{plot.pp3}: New arguments \texttt{box.front}, \texttt{box.back} control plotting of the box. \item \texttt{plot.ppp}: \begin{itemize} \item The default colour for the points is now a transparent grey, if this is supported by the plot device. \item For a point pattern with continuous marks (``real numbers'') the colour arguments \texttt{cols}, \texttt{fg}, \texttt{bg} can now be vectors of colour values, and will be used to determine the default colour map for the marks. \item Now recognises graphics parameters for text, such as \texttt{family} and \texttt{srt} \item When \texttt{clipwin} is given, any parts of the boundary of the window of \texttt{x} that lie inside \texttt{clipwin} will also be plotted. \item Improved placement of symbol map legend when argument \texttt{symap} is given. \end{itemize} \item \code{plot.tess}: \begin{itemize} \item This plot method can now fill each tile with a different colour. \item New arguments \code{do.col}, \code{values}, \code{col} and \code{ribargs}. Old argument \code{col} has been renamed \code{border} for consistency. \item Now generates a separate plot panel for each column of marks, if \texttt{do.col=TRUE}. \item New argument \texttt{multiplot}. \end{itemize} \item \texttt{plot.profilepl} ,\texttt{plot.quadratcount}, \texttt{plot.quadrattest}, \texttt{plot.tess}: Now recognise graphics parameters for text, such as \texttt{family} and \texttt{srt} \item \texttt{plot.solist}: \begin{itemize} \item New arguments \texttt{panel.begin.args}, \texttt{panel.end.args} \item Result is now an (invisible) list containing the result from executing the plot of each panel. \end{itemize} \item \texttt{plot.studpermutest}: This existing function now has a help file. \item \texttt{plot.symbolmap}: New argument \texttt{nsymbols} controls the number of symbols plotted. \item \code{ponderosa}: In this installed dataset, the function \code{ponderosa.extra\$plotit} has changed slightly (to accommodate the dependence on the package \pkg{spatstat.utils}). \item \texttt{polynom}: This function now has a help file. \item \texttt{pool.fv}: \begin{itemize} \item The default plot of the pooled function no longer includes the variance curves. \item New arguments \texttt{relabel} and \texttt{variance}. \end{itemize} \item \texttt{pool.rat}: New arguments \texttt{weights}, \texttt{relabel} and \texttt{variance}. \item \texttt{ppm}: \begin{itemize} \item Argument \code{interaction} can now be a function that makes an interaction, such as \code{Poisson}, \code{Hardcore}, \code{MultiHard}. \item Argument \texttt{subset} can now be a window (class \texttt{"owin"}) specifying the sub-region of data to which the model should be fitted. \end{itemize} \item \texttt{ppm.ppp, ppm.quad}: \begin{itemize} \item New argument \texttt{emend}, equivalent to \texttt{project}. \item New arguments \texttt{subset} and \texttt{clipwin}. \end{itemize} \item \code{ppmInfluence}: The result now belongs to class \code{ppmInfluence}, for which there are methods for \code{leverage}, \code{influence}, \code{dfbetas} which extract the desired component. \item \texttt{ppp}: \begin{itemize} \item New argument \texttt{checkdup}. \item If the coordinate vectors \code{x} and \code{y} contain \code{NA}, \code{NaN} or infinite values, these points are deleted with a warning, instead of causing a fatal error. \end{itemize} \item \texttt{pp3}: New argument \texttt{marks}. \item \texttt{predict.kppm, residuals.kppm} Now issues a warning when the calculation ignores the cluster/Cox component and treats the model as if it were Poisson. (This currently happens in predict.kppm when se=TRUE or interval != "none", and in residuals.kppm when type != "raw"). \item \texttt{predict.lppm}: Argument \texttt{locations} can now be an \texttt{lpp} object. \item \texttt{predict.mppm}: The argument \texttt{type="all"} is now recognised: it selects all the available options. [This is also the default.] \item \texttt{predict.ppm}: \begin{itemize} \item Now recognises the arguments \code{dimyx} and \code{eps} for specifying the resolution of the grid of prediction points. \item New argument \code{ignore.hardcore}. \item Accelerated for models fitted with \texttt{method="VBlogi"} \end{itemize} \item \texttt{predict.rhohat}: New argument \texttt{what} determines which value should be calculated: the function estimate, the upper/lower confidence limits, or the standard error. \item \texttt{print.linim}: More information is printed. \item \texttt{print.lintess}: Output includes information about marks. \item \texttt{print.quad}: More information is printed. \item \texttt{print.rmhmodel}: More information is printed. \item \texttt{progressreport} \begin{itemize} \item Behaviour improved. \item New arguments \texttt{state}, \texttt{tick}, \texttt{showtime}. \item New option: \verb!style="tk"! \end{itemize} \item \code{pseudoR2.ppm}, \code{pseudoR2.lppm}: \begin{itemize} \item The null model now includes any offset terms, by default. \item New argument \code{keepoffset}. \end{itemize} %%Q \item \texttt{quadratcount.ppp}: Computation accelerated in some cases. \item \texttt{quadrat.test.ppm}: Computation accelerated in some cases. \item \texttt{quantess}: \begin{itemize} \item The covariate \texttt{Z} can now be \texttt{"rad"} or \texttt{"ang"} representing polar coordinates. \item New argument \texttt{origin} specifies the origin of polar coordinates. \item New argument \texttt{eps} controls the accuracy of the calculation. \end{itemize} \item \texttt{quantile.ewcdf}: The function is now normalised to the range \verb![0,1]! before the quantiles are computed. This can be suppressed by setting \texttt{normalise=FALSE}. \item \texttt{qqplot.ppm} Argument \texttt{expr} can now be a list of point patterns, or an envelope object containing a list of point patterns. %%R \item \texttt{rbind.hyperframe}: The result now retains the \texttt{row.names} of the original arguments. \item \texttt{rcellnumber}: New argument \texttt{mu}. \item \texttt{rebound.owin}: Now preserves unitnames of the objects. \item \texttt{rescale.owin}, \texttt{rescale.ppp}, \texttt{rescale.psp}: The geometrical type of the window is now preserved in all cases. (Previously if the window was polygonal but was equivalent to a rectangle, the rescaled window was a rectangle.) \item \texttt{rgbim, hsvim}: New argument \texttt{A} controls the alpha (transparency) channel. \item \texttt{rgb2hex, col2hex, paletteindex, is.colour, samecolour,} \texttt{complementarycolour, is.grey, to.grey} These colour tools now handle transparent colours. \item \texttt{rgb2hex}: New argument \texttt{maxColorValue} \item \texttt{relrisk.ppp}: \begin{itemize} \item If \texttt{se=TRUE} and \texttt{at="pixels"}, the result belongs to class \texttt{solist}. \item The arguments \texttt{adjust}, \texttt{edge}, \texttt{diggle} are now explicit formal arguments. \end{itemize} \texttt{rhohat}: \begin{itemize} \item Nonparametric maximum likelihood estimation is now supported, assuming the intensity is a monotone function of the covariate. \item New options \texttt{smoother="increasing"} and \texttt{smoother="decreasing"}. \item New argument \texttt{subset} allows computation for a subset of the data. \item New argument \texttt{positiveCI} specifies whether confidence limits should always be positive. \end{itemize} \texttt{rhohat.lpp}: New argument \texttt{random} controls placement of dummy points. \item \texttt{rlabel}: \begin{itemize} \item New arguments \texttt{nsim} and \texttt{drop}. \item \texttt{X} can now be a point pattern of any type (\texttt{ppp}, \texttt{lpp}, \texttt{pp3}, \texttt{ppx}) or a line segment pattern (\texttt{psp}). \end{itemize} \item \texttt{rLGCP}: \begin{itemize} \item Accelerated. \item This function no longer requires the package \pkg{RandomFields} to be loaded explicitly. \end{itemize} \item \texttt{rMaternI, rMaternII}: These functions can now generate random patterns in three dimensions and higher dimensions, when the argument \texttt{win} is of class \texttt{box3} or \texttt{boxx}. \item \texttt{rmh}: \begin{itemize} \item Accelerated, in the case where multiple patterns are saved using \texttt{nsave}. \item The printed output of the debugger (invoked by \texttt{snoop=TRUE}) has been improved. \end{itemize} \item \texttt{rmh.ppm, rmhmodel.ppm, simulate.ppm}: A model fitted using the \texttt{Penttinen} interaction can now be simulated. \item \texttt{rmh.default, rmhmodel.default}: \begin{itemize} \item These functions now recognise \verb!cif='penttinen'! for the Penttinen interaction. \item New arguments \texttt{nsim}, \texttt{saveinfo}. \item The printed output of the debugger (invoked by \texttt{snoop=TRUE}) has been improved. \end{itemize} \item \texttt{rmhcontrol}: \begin{itemize} \item New parameter \texttt{pstage} determines when to generate random proposal points. \item The parameter \texttt{nsave} can now be a vector of integers. \end{itemize} \item \texttt{rose.default} New argument \texttt{weights}. \item \texttt{rose} New arguments \texttt{start} and \texttt{clockwise} specify the convention for measuring and plotting angles. \item \texttt{rotmean}: \begin{itemize} \item New argument \texttt{padzero}. \item Default behaviour has changed. \item Improved algorithm stability. \end{itemize} \item \texttt{rpoispp}: Accelerated, when \texttt{lambda} is a pixel image. \item \texttt{rpoisppx}: New argument \code{drop}. \item \texttt{rpoisline}: Also returns information about the original infinite random lines. \item \texttt{rpoislpp}: If \texttt{lambda} is a list of \texttt{"linim"} or \texttt{"linfun"} objects, then the argument \texttt{L} can be omitted. \item \texttt{rStrauss, rHardcore, rStraussHard, rDiggleGratton, rDGS, rPenttinen:} New argument \texttt{drop}. \item \texttt{rtemper:} new argument \texttt{track}. \item \texttt{rthin} \begin{itemize} \item Accelerated, when \texttt{P} is a single number. \item \texttt{X} can now be a point pattern of any type (\texttt{ppp}, \texttt{lpp}, \texttt{pp3}, \texttt{ppx}) or a line segment pattern (\texttt{psp}). \end{itemize} \item \texttt{rThomas, rMatClust, rCauchy, rVarGamma}: \begin{itemize} \item When the model is approximately Poisson, it is simulated using rpoispp. This avoids computations which would require huge amounts of memory. New argument \texttt{poisthresh} controls this behaviour. \item New argument \texttt{saveparents}. \end{itemize} \item \texttt{runifpointOnLines}, \texttt{rpoisppOnLines}: New argument \code{drop}. \item \texttt{runifpointx}: New argument \code{drop}. %%S \item \texttt{selfcut.psp}: \begin{itemize} \item Computation accelerated. \item The result now has an attribute \texttt{"camefrom"} indicating the provenance of each segment in the result. \end{itemize} \item \texttt{setcov}: the name of the unit of length is preserved. \item \code{shapley}: In this installed dataset, the function \code{shapley.extra\$plotit} has changed slightly (to accommodate the dependence on the package \pkg{spatstat.utils}). \item \texttt{shift.im}, \texttt{shift.owin}, \texttt{shift.ppp}, \texttt{shift.psp}: More options for the argument \texttt{origin}. \item Simulation: Several basic simulation algorithms have been accelerated. Consequently, simulation outcomes are not identical to those obtained with previous versions of \spst, even when the same random seed is used. To ensure compatibility with previous versions of spatstat, revert to the slower code by setting \texttt{spatstat.options(fastthin=FALSE, fastpois=FALSE)}. \item \texttt{simulate.kppm}: \begin{itemize} \item Accelerated for LGCP models. \item Additional arguments \verb!...! are now passed to the function that performs the simulation. \end{itemize} \item \texttt{simulate.ppm}: New argument \texttt{w} controls the window of the simulated patterns. New argument \texttt{verbose}. \item \texttt{Smooth.ppp}: \begin{itemize} \item A non-Gaussian kernel can now be specified using the argument \texttt{kernel}. \item Argument \texttt{weights} can now be a pixel image, a function, a numeric vector or an expression to be evaluated. \item Infinite bandwidth \texttt{sigma=Inf} is supported. \item Accelerated by about 30\% in the case where \texttt{at="pixels"}. \item Accelerated by about 15\% in the case where \texttt{at="points"} and \texttt{kernel="gaussian"}. \item Now exits gracefully if any mark values are \texttt{NA}, \texttt{NaN} or \texttt{Inf}. \item New argument \texttt{geometric} supports geometric-mean smoothing. \item The arguments \texttt{adjust}, \texttt{edge}, \texttt{diggle} and \texttt{kernel} are now explicit formal arguments. \end{itemize} \item \texttt{solist}: New argument \verb!.NameBase! \item \texttt{spatialcdf}: \begin{itemize} \item Computation accelerated. \item The result does not inherit class \texttt{"ecdf"} if \texttt{normalise=FALSE}. \end{itemize} \item \texttt{spatstat.options} New options \texttt{fastthin} and \texttt{fastpois} enable fast simulation algorithms. Set these options to \texttt{FALSE} to reproduce results obtained with previous versions of \spst. \item \texttt{split.ppp}, \texttt{split.ppx}: The splitting variable \texttt{f} can now be a logical vector. \item \verb!split<-.ppp!: The default for argument \texttt{un} in \verb!split<-.ppp! now agrees with the default for the same argument in \texttt{split.ppp}. \item \texttt{square}: Handles a common error in the format of the arguments. \item \texttt{step}: now works for models of class \texttt{"mppm"}. \item \texttt{stieltjes}: Argument \texttt{M} can be a stepfun object (such as an empirical CDF). \item \texttt{subset.ppp}, \texttt{subset.lpp}, \texttt{subset.pp3}, \texttt{subset.ppx}: The argument \texttt{subset} can now be any argument acceptable to the \verb!"["! method. \item summary functions The argument \texttt{correction="all"} is now recognised: it selects all the available options. \begin{quote} This applies to \texttt{Fest}, \texttt{F3est}, \texttt{Gest}, \texttt{Gcross}, \texttt{Gdot}, \texttt{Gmulti}, \texttt{G3est}, \texttt{Gfox}, \texttt{Gcom}, \texttt{Gres}, \texttt{Hest}, \texttt{Jest}, \texttt{Jmulti}, \texttt{Jcross}, \texttt{Jdot}, \texttt{Jfox}, \texttt{Kest}, \texttt{Kinhom}, \texttt{Kmulti}, \texttt{Kcross}, \texttt{Kdot}, \texttt{Kcom}, \texttt{Kres}, \texttt{Kmulti.inhom}, \texttt{Kcross.inhom}, \texttt{Kdot.inhom}, \texttt{Kscaled}, \texttt{Ksector}, \texttt{Kmark}, \texttt{K3est}, \texttt{Lscaled}, \texttt{markcorr}, \texttt{markcrosscorr}, \texttt{nnorient}, \texttt{pairorient}, \texttt{pcfinhom}, \texttt{pcfcross.inhom}, \texttt{pcfcross}, \texttt{pcf}, \texttt{Tstat}. \end{quote} \item \texttt{Summary.linim} family supporting \texttt{range}, \texttt{max}, \texttt{min} etc: Recognises the argument \texttt{finite} so that \texttt{range(x, finite=TRUE)} works for a linim object \texttt{x}. \item \texttt{summary.distfun}, \texttt{summary.funxy}: \begin{itemize} \item More information is printed. \item Pixel resolution can now be controlled. \end{itemize} \item \texttt{summary.kppm}: prints more information about algorithm convergence. \item \texttt{summary.lintess}: prints information about marks. \item \texttt{summary.ppm}: New argument \texttt{fine} selects the algorithm for variance estimation. \item \texttt{summary.owin}, \texttt{summary.im}: The fraction of frame area that is occupied by the window/image is now reported. \item \texttt{sumouter}: New argument \texttt{y} allows computation of asymmetric outer products. \item \texttt{symbolmap}: \begin{itemize} \item Now accepts a vector of colour values for the arguments \texttt{col}, \texttt{cols}, \texttt{fg}, \texttt{bg} if the argument \texttt{range} is given. \item New option: \texttt{shape="arrows"}. \end{itemize} %%T \item \texttt{tess}: Argument \texttt{window} is ignored when xgrid, ygrid are given. \item \texttt{texturemap}: Argument \texttt{textures} can be missing or NULL. \item \texttt{textureplot}: Argument \texttt{x} can now be something acceptable to \texttt{as.im}. \item \texttt{tilenames}, \verb!tilenames<-!: These functions are now generic, with methods for \texttt{tess} and \texttt{lintess}. \item \texttt{to.grey} New argument \texttt{transparent}. %%U \item \texttt{union.owin}: Improved behaviour when there are more than 2 windows. \item \texttt{unstack.lintess}: now handles marks. \item \texttt{update}: now works for models of class \texttt{"mppm"}. \item \texttt{update.kppm}: \begin{itemize} \item New argument \texttt{evaluate}. \item Now handles additional arguments in any order, with or without names. \item Changed arguments. \item Improved behaviour. \end{itemize} \item \texttt{update.ppm}: For the case \texttt{update(model, X)} where \texttt{X} is a point pattern, if the window of \texttt{X} is different from the original window, then the model is re-fitted from scratch (i.e. \texttt{use.internal=FALSE}). %%V \item \texttt{valid.ppm} This is now a method for the generic function \texttt{valid}. \item \texttt{vcov.mppm}: \begin{itemize} \item Now handles models with Gibbs interactions. \item New argument \texttt{nacoef.action} specifies what to do if some of the fitted coefficients are \texttt{NA}, \texttt{NaN} or \texttt{Inf}. \end{itemize} \item \texttt{vcov.ppm}: \begin{itemize} \item Performance slightly improved, for Gibbs models. \item New argument \texttt{nacoef.action} specifies what to do if some of the fitted model coefficients are \texttt{NA}, \texttt{NaN} or infinite. \end{itemize} %%W %%X %%Y %%Z \item \verb![<-.im! \begin{itemize} \item Accepts an array for \texttt{value}. \item The subset index \texttt{i} can now be a linear network. Then the result of \verb!x[i, drop=FALSE]! is a pixel image of class \texttt{linim}. \item New argument \texttt{drop} controls behaviour when indices are missing as in \verb!x[] <- value! \end{itemize} \item \verb![.layered!: \begin{itemize} \item Subset index \texttt{i} can now be an \texttt{owin} object. \item Additional arguments \verb!...! are now passed to other methods. \end{itemize} \item \verb![.leverage.ppm!: New argument \texttt{update}. \item \verb![.linnet!: \begin{itemize} \item New argument \texttt{snip} determines what to do with segments of the network that cross the boundary of the window. Default behaviour has changed. \item More robust against artefacts when the subset index is a pixel mask. \end{itemize} \item \verb![.linim!: More robust against artefacts. \item \verb![.lpp!: New argument \texttt{snip} determines what to do with segments of the network that cross the boundary of the window. Default behaviour has changed. \item \verb![.ppx!: The subset index \texttt{i} may now be a spatial domain of class \texttt{boxx} or \texttt{box3}. \item \verb![.ppp! New argument \texttt{clip} determines whether the window is clipped. \item \verb![.ppp! The previously-unused argument \texttt{drop} now determines whether to remove unused levels of a factor. \item \verb![.pp3!, \verb![.lpp!, \verb![.ppx!, \texttt{subset.ppp, subset.pp3, subset.lpp, subset.ppx}: These methods now have an argument \texttt{drop} which determines whether to remove unused levels of a factor. \item \verb![.psp!: \begin{itemize} \item accelerated. \item New argument \texttt{fragments} specifies whether to keep fragments of line segments that are cut by the new window, or only to retain segments that lie entirely inside the window. \end{itemize} \item \verb![.solist!: Subset index \texttt{i} can now be an \texttt{owin} object. \end{itemize} \begin{thebibliography}{1} \bibitem{badd10wshop} A.~Baddeley. \newblock Analysing spatial point patterns in {{R}}. \newblock Technical report, CSIRO, 2010. \newblock Version 4. \newblock URL \texttt{https://research.csiro.au/software/r-workshop-notes/} \bibitem{baddrubaturn15} A. Baddeley, E. Rubak, and R. Turner. \newblock {\em Spatial Point Patterns: Methodology and Applications with {{R}}}. \newblock Chapman \& Hall/CRC Press, 2015. \end{thebibliography} \end{document} spatstat/inst/doc/updates.pdf0000644000176200001440000064405713624161277016030 0ustar liggesusers%PDF-1.5 % 4 0 obj << /Length 1324 /Filter /FlateDecode >> stream xWKo6WR@\!J'@e|(4-͋ %apK) NRȕ I-Zm} e;iQ^=dC ɳ%6 ݖi\o5g}d] .BvexW*υnsbGs&*Ɖ4K$l72}(xYBhS$sDX[㲪1Lxj6U~#pڲFzk.孎o2)g9s5CAv Z2j+t9IUJ vCL a\@Z Q="ufpHcfs.h#S eIL_k&@kȵgʌPGʺ/綰 l#{Pj B8Ln5 ;SfFFD b_f iMqngB$ qw Ǒ1< !T:^]x[7i6i%7*5G4 ޘD^,>3kF86C0?t>9;xHr<26xiwIg-c@S3(]Zsـ˾U_HgWB\\Q;*U2|:.Թi\a JT )eh/?q}JmLءK,o' ͥjODzSW緟]I\h|A}w-5bw/qr&Ԙ;r{BUn󔚴 _䙽 endstream endobj 1 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/updates-004.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 14 0 R /BBox [0 0 432 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 15 0 R/F3 16 0 R>> /ExtGState << >>/ColorSpace << /sRGB 17 0 R >>>> /Length 2772 /Filter /FlateDecode >> stream xɎSRZR$% CAcK9írK/O,N|e:?-X~dy۶-ןϟֶ-ܾjٖooi%~Rummk?W#, ?3(mU}MxT",] گx}-7sǼ,T˹ Mu㲭 cI^d)kJa0R9v0ınM؅pS5gqQ=CӯyKj<-P+'#ZDkG'UKת[Ys@B4յ G*#7rT#fo~pı8ԱN<4fġX_L3&^ۺ@Bk7b2maA2;`"6A8튍9Ҭ1*zQ}cXMaA <텮ZL ;:֬ 6LQ-U9pemM.mdӨ0xHTGC+mQ8T >C2=bxNI9j2Mwk_WXvbVG_Ll[.nj0ϏU6ܙ{Ryu`L\Y)K.XRAn~7uHPrroCn.lXD#3h9%΅o9k~t"+3=ӆgV#TG</dsDk}qrzl3Pl>)xd3>`"]RaCRC0~W~IP2oB]_Aeq~YC!f z`4LZ;ZgWbQ"tRdjDu;HDN,Do>2!C[;|(eC~Gq eގ̒nmkkhṍ !9^$B{9nBǙ$&:ii}"L(f vQխ P#S10-˵p 9y{Le~UqgdYcfY'\J:y\y*fL\]`CC>FD"pnw4[ |mƉw28v*e4m=>USb?D*j_?zޯ63׷0_1^^S0,В=gB=&XeH 0zl[!a.=\rhc+ǯZ/ WK^*Na)_+g$zqWd|@ǸWqq7y'(WJ.\y/PL˻k-~cdQmEn!P=&\F5+g3aG.O}s͑ yi8([7R,IaOa߆ULmc6GaWM ˴! d62eشIOWi1 ˴.$æ MnaLi[d6&´l&AYMKäI|)?6mR S9LL 6d]´lrWiCԇi3٦M2۴iR+L6mȞM Wϧٴ <"LLML4H,Ԉ?Zis~:k|}=ӳc 5#[A : endstream endobj 19 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 22 0 obj << /Length 1480 /Filter /FlateDecode >> stream xXKs6WhzfLoѭh[c=\Q}|Hnzv[2we^Of7/$ Lbg~ &j+S*x챵_U=4J0&8`%.-, i10%E)K2.I*SDzO("8!?ICjpgRv1Z$ec =\#'USYu[ VV>nGq~5wwƹԃߺnGͷ7Ͽ llv=Y<=Ϯ2SyQcxjG#đu-FUeW@ij JJ@}+i`6%նZpz2UNnJjaW0BiUyL`wwp㚐Qs"x12DZt)H9JA^o!?VbSP\5}éBͅݖJBzd8J|Wl{y2w5?BPKLM?G'}"&.gMQH!u X;/zjLhĿNfd^Y׈'aDk G5 t1'F.Rc&y}IYp>,Hy˜P[k(M'vRFFT^kIa}73ag# 6K)E֑"*n_oλcc~?yEFA*tt-wpǯqɅ X]\]]M=EⓄj[+rB?Ņ.G@{M*;bRMJ~XQ08کEou5ӊ5jr]"v|q lsRs%#'.#W/x۔.;O/^:dtP!-:*}IkS<*.QFUZLyi] ]N ۺjvi?֑c[v7!!!}QQm#lX>[?wh4:jߢ95n,auݓ}TIϳy endstream endobj 26 0 obj << /Length 1794 /Filter /FlateDecode >> stream xYYF~ЛQUa&+.oq:1ÀֻV]>yyUA$fYQ*g]Gb# NoBƁӨ.C'SpϥfW^\j'MԇB}eBVj)B߫[벿.U4^f I E|f=R=nj;) TVd-^j Kt3sjM v󅈜{.)ȕiJ#k A6#4h|.(!U|ӷ-TR1ݥ-H[|svo # Pa[ 2OV4cT?3 tw&<.Y 1:CBAFվZڴҺ/[F:7b7 ^M,[j$HO n p=@Y7Yqwtl5:̛*HWvTsUB!St.w;<"rL\Gvr;lp˿$SB>*t,BdDtYY(%[v.ƃ*k!QB(~5Oh5A uG|T)v#OcQ |ӝzLq>6Ȟ@C՗6ܜ!Q~iBZܻB#EI9+2~C _H)*֖$"8hbkC=U# ) 22Ò2rckEIHj_<_ ŧ{Hlg0(H8徥 (@qL3Vtf v]yd|?)t^$dzPt|OEnoA.VeD2J#V \tie96Yܕ F{lb/}6X J5Y >:NSIjBH覉5{_âw~Qot˟-==@/*SiվS$oTVcrzFiLOlэa}K.^ow{ f}0)vͨYKɸ61kB%) Gty|/lk:Z{ln։yha=AwJ[>%.yj?6 w/ Qeaaȡ4I:nd;>uд 6=MTԷŢamp馉#Y>OtTg$ c& `ٛg endstream endobj 30 0 obj << /Length 1574 /Filter /FlateDecode >> stream xYrF+x$DRIEX9 ApXgg9t (Yz}1 &ނe̛,7$`4$,Hr=k?z=Wba>ğG4]q(a -m3'"a LZXEє􁃲e!?\ eϭg3t bh1eᅠsOp/# jp(/,ܮ¡ 0Ʉ[ rwo0Eqx!a%xtH3j]NR"/s2p::X-ΨGl+_U\:G$W>r):{J 9)C+:^(`JE򃟲(́'_8#d'jDӢ|ь2 5@vwq[g"070'G n ?HJTj EҌQ  Ek6B0A=:tH@Ḕܱ6\f.G2sSrEZ1gXa in{RݒnE>f<'؟ÈRr$ݏ#}$˒QA<`:vj*6>Tɩ0].i8eB,pnWkV0?i(P;j14dMRAkӀt`G$Pc|qQ"El6́1R_\id-˯fo֠Q@HNs oƬJҗ].{ҟJC/l@H( Sェn kd<ԭ } M$Nq{L-yu]˩b\z?Z-2n MvkcrZ:n] Lr۴VaUGpe )Ju;.wMjnoMLݼAJMIdpT^a.B+ {WX㰌<ʞ.d N:NBv)|Zwr1QdSCUJXf,ƗgW};F%&uo_u!dF|fviI7[+!ǮT43 [K;-:2^rV:W3~zՄrvһTܵey,& fqJBtqy/9n endstream endobj 33 0 obj << /Length 1526 /Filter /FlateDecode >> stream xYMs6WHD ocIN궇ZdPIʒ}JPiD۷؟1Iݔw$ĎG|cOoY_8bmVn _zd}߳/cOaaȷ6Џ ?U#34,GjNH;T^b5܋]`lyA8DDщ 0pc/\O!e#l+PIUd^IzmlI= JifVߊ;f ,^262H/K =oú دQ@ڬCk/׹` pzhP \2XU쟁, 2#h4BF\Q#(LP {'LN=(&1^6 cT)b/heI@}vYX xʄ.$'! +XŊC`Ƣ$e1J_Z~H8URF!PX^t&*},\(F0QqDtPrJ A~cߑcqF1LPEǷzp(u{KVT3# Z[6eKISk?1V h?OqÊ8dHX^3;Ox;> oOZܫ 5jc'LȇdR K0xmQ%˜"'H!Æe=t1S'2^:,.?Tq%a'*oāZ2Dٮ71-q_[ 47L/ .Zh&m*- %xrd~p%0J ވ>;t%gsRHG{՞ ύ/{; xJǕM0 WhtPS[ݨ)7tQs>5 C-eq}T͖TfX YuFF[XMl(;nl(LSo6j dSwSYJ uݎDM:owV QFP$?(tעk2+'Z]t(8jmXn)#[ْ[ {\uK$vY=HAE(7ڦjVXN|m U9A0)^}*қ)6VBkRqXN[@^O(rX^b?!"haT~n>;}wKm1ӻ})M^")Ȥ!IZuԵCTGC--Nk9t~abұuIzNEa Tk=G08R̹pCJO1j3+(s?E(EYk_`d-xx#ձlVk26iԯ'0zNI=+-U(J$IaB_.: endstream endobj 36 0 obj << /Length 1353 /Filter /FlateDecode >> stream xXɒFW0j$v=c1%Z*߻řiJ^e|W׷Ө p4q~~4i/> _\wOސf}}0L?zO֍6A\W9] F 2~hЏSji+!V QҎfVvZV+"pPhWe+󭌌 7쌌pf3D9#c'H+S)K%?1aZ4:8:ѩC LVE)&Kۆdy|):5(kOk8QSX`ծlkvfTJeMJ nu'9TtjkdxȂ2 Mbwru+]{0VSWVy?jI[aʩ.d . lƱz` 0%lD eKو4U؃th,FhAb!ˢVRbvИ7!2vnj;c .K 6`okے9 XUW uGҷP$=Ur#kT{*[QBI Y,I`C<(^yx]i`&996AR轆5-.k4^TXiqj-mO#>f)89|{vFQyxW*1K7ϼE 2#}5.;&Y&əڥ򻫥0.d?kt'gCTo}~l8l"v~Uv׸h̶>J:J^WeCW'{o Ue2>x220s] LQ.X:Cc9~O 1}_Tܢ5g$E;gkɫ.t3?c3~_Ω endstream endobj 39 0 obj << /Length 1535 /Filter /FlateDecode >> stream xXKs6WH͔ IԦqi@1R_ $%ʱ]{zE o/wשZWb}HgI=&u| wA<^k4[2pv2̜ceYQ:yi;mm09[`W^ Gk3'[3titc?6=.C{uϛvKӱr*ʼ4S]L^ ਒5HRN0ҹPR`ʋް]CJέyC询葌b[t6S-)L|$X0CB" #T=7 c/P+h5lcPniwP "z6Gm{h:/>D-z T)hAomD YdXX ܚ(ƞeWo֠'(lb=< 6#3?{^o!<衼☠@ҍHc*q$ iY=NB 2S4y9rCCEכp+F xHXĜ) 5L*3ƖRK7A!FNg.arfGɜ[^a@)yGQPȍ-<ّ̲ۡ7l6[1Sh{}⨜3cnFUh-*tm#ҭ3²^XxËl~ ؑ\ ?E.i Ѵ}5^|Pnx X݊ [arC^}ڪ:؞rxY c;u?xWOrlYBk9r3A^slj'ɭ$.I*X],ZDsp/cr0J-YFm>([ k[;X\KCրfȵXFzF4c˾&WfǘiŇA6tրv6+3 5"X)/5FdHa_zy endstream endobj 44 0 obj << /Length 2328 /Filter /FlateDecode >> stream xڵZKsϯpT KT.*S2{%HlO~6 Hę9H$Aw?]}^DI]]^*sAZK%Zy_qp7J ^\(R«u3N~U$AݟU/zt w0kpm״9 [Vfp?p{;Mx_<Gj -JOXbۭ~cJLTa~d}qPy^HVZRk|&iW$ATg5$jTonZ”i4 ԯ61{ǫbNCwGH7w_^cQnZx~W4kAGe͋vNbryD /.agc$ƿɾHsU?HDjOCFXp&nn7uګ]SVgӦ",R>v?!ٞB1DQ̠Xefy57nPF*y,X"'kGlv"I\|=vA{AB 8p0krvhr"L%oH)5Qԟ+ֿgx^"|ѿ[o?Ym:z۟BԎ*3db3ˊVm,hBjC"#бa ToƬk}'#A٣7b5$v)\y&C4{=DY.ABΖ+X0~챈؝zFȋaq,1d!6xN6~%ظ{(h._⑨td0dcrt|OJX2+tDz\[Qm@NQG؟ L!NZ&p4)qZSc{W8 ;8uܯr߄~B6-6Wj)aY\0; ¨To:ě(P.#vq@LNxɉ;u,58o>4pja$ҺXdxj(n$b^&R$ǐ7Ibq$:19PhП0AK#NjO@%n\8p@&? +W Ȳ4:I }5Z437H~i_Wz(;㔅 )ȼbiU6mݱ$Vii[`:!Eɹ ?#P&N%Qwy;M n@fiG)W:֪vQiGA]&Ķ#M"ώ!y}X:-7`s- Eіxۑ" >9̈́g4'Rb ͎2V \fJ# ^V#V9E(D"%6nGs+9w&)׀ @v0"HްPoi>R)#HĴ|;YwV'N-,8 !_¿s j\5{pݹ6WBevyJ<bEs:K ]:aܴP~ jl(eUۊ?JJr [#\WW;fm!6Wy8aV`{y3y ḠMPēD=QnCgoD ͘/}:")IӓCrH_,JcyA9a($iGIF#op˽luCL7d2!3*Rueb)ڞ.7T `Cw>mNG d5Ɯng壩Q ~]`o'jixO3]g5;g|$Ϊfq#oHiD{<9gd&==}R?ug? "oic=,58Ѭ} '}/N3g:qX# Po%UkYT3o^*::dT߁V0g鎏5Ob:rCv47=XNmn;3`,3El7#h )3rw;՞ %gѶh{XTCS1EHleEe&23ԓ=Y94g: 'Vg1 n*{I|iLX'qu6.Xٗ%9~Gwl 9?˹09e"){p"mͻ_e[ endstream endobj 47 0 obj << /Length 2418 /Filter /FlateDecode >> stream xڽZKs8Wr C+oRLVk.9-k"JR?@?@GTHhOx&#QDͲXDyn.tvoIꏛ_zƚy"TS~L%T ꟡g#;[}hD dvꋄm$|5{zք(2!{7|/([;dF?I2.of4sT>f<\-T6q!7փHIE?2˦PwvaW_WqRB)H| EmH-m~.(juo+'7m{oQsגOGK&X;NaguKH#/փj:4A|CMrPe=FqRާqc&! HVtԼI{\t9틅~&O(!|m;5l#fHǞ4.k#]Fl ܶCS62ԗ$сRvw5 ¾ݦ "60"#Z n(kLkSKP$Y-6{2Vrk.yL7K?߂Mqb+/IEqE€2_6#"SL%Z/t "!a Z$BTՊy}V{qD!-Q1 7seѻOҽk@v'-D򁤪)jRdU:]e:_:b6mGYx͞a@&tyAI"QFK >XĩBagxYT Bǁr G0#DY`S,@+0g_ZQ7pOGsZPZR ?X.G9ܐ8rZ4W|<[{X ȗiao]ź96 ~*qMqy^Cݮi 2ShkPFf$Vz—$ɪ;|.53)/pB܀^OJt0̳"~u;/US($Ӟa/'zD:;`ejCZPo)K@{sS'WyJjqeDqpgGȧx p{ [zm"]Prs \Y$oz할/ ]0eu:;+h7CB$AQ .`?EI BGG-)Dѧ ν+ aNJ"M/ȍǒ6;AT: 'b^%)_{{x) (rΚg,Ysp2BU)9tq,HOO\gfdI1m{d#JkП,ʣ9Km'Ԁ{ma!ƻG׎\A#}x7F$|-AuP36E2 K۵̊˒ }UAYU*TJsJ(}f.Ng'4ouS ,E]DH,{/1eLX#G&ByD7Jd! pMnFsS]|ʙa?P#v]nN|{Jd]yEa*|o#)@u\=CrpUC:9m+/w|`ĭLuK ڋEcV?k\乺[/?P Cž5.? * +Wsz@}%H^CP\$]z|Qk2HLyNmUyrρ1u%s-l͘eb-R*X#*la# /U;j$4gD増=в8E6(gWV' $o)ee5 bMbQV5whv7n^bߨMM~Íǔ0k0ɠ/cfȵɱQR67A-̋Y/#WYIaJw{H q6oܿ N8S:r]&(D~'ʭXo`#*,YrPU{v쁈vM'(.$OгZlxСP2n =t urѰzoW;peDFH^ƝnպgZ۟+@Ac3m#r޼yhĻ endstream endobj 50 0 obj << /Length 2627 /Filter /FlateDecode >> stream xZIϯh%n'Q{ @&-/mɱ^}ZLmc-STXWE}y˷<  qqAX_yv8m]E˷̙o qVwMUkw}Z (2IL$'r@k~?c5A_~zݛX RL]gاvG51 63Z¼68{yp'" (qQbwG 'd*H*ɧ(6DlH)fϤ3V[Si9Qt^ jXBK)D ]MT@ԑ(' nj1gCG| 1= 酚=zƪ[BQfr[?TeO"0{5 U܆BɁ.@rfjfBr3=;, $~t/]=XOc[Q&AQd?Rl ؁|I͜]['FI||n^&cRpA&s , `o)iErKR({Ҋtc UeAeJ["kĬT,_ 7l!ƙ[eϠm|6FphX^p~T4N "RpǾan5+=( >!^؈'na }]Tmi==}A~mK)(͆imAtfċq^I~34\>wn/\֑:3 2peY`r$lжwX!o"Ql$kֱUW~A:(J2VK䩹ȓTʳ,&_Z%e -H G%6 c;6 z4d$Yk [Yd+ v4ؖiVGJ+UtQv{66Sh桴&<Ͱs Jש+"`д^jv5R7U4CДL&;g Wƚ4hNaL IgS}kf 5+e- Ci`5+$ -0O+gWg & `#Sc4hF)\?kdڒdp, #Oi>_\=,Z@RWn{V1ftn5 wi?^Af8<'`9mWjBF7x=9!Ŀr^P!Cl>Q)Iu fp wh ,Ϥ=B+bhTR >ɡt8"^ztM' |;ι LCnJ0/8. gu&iq_9l\ ; \#~}ݝjv𞺚&.M7/k~[^x:b?iLM6N6@pkn-ΞzqQD V-͉e!y;8e}Q"Kt 1(/Or@P ?=2&u9/ЩCsD0YsfJlӀLX~w-N>&G0Ov0\{ .X^]233ƁؐH>w>=p?`q^~؍\Zx&eo(bӘ vT!x~ޚir,Mc3qN}a.|7Vn6jAV:l} !ދՁ]9)]HG8 bէ L$M endstream endobj 53 0 obj << /Length 2510 /Filter /FlateDecode >> stream xZs8~_Gy֊Ͷ{{3i3rŖcom+'I%KwJl"HԻ7?}H,*~/e_ݬ} E?};ccW[ˇkW ב5yʝ.N}fCc?ox ^-e|^-Uiyb"/FO[(6nX^#c?4b^-Us;XZ߿]%TG\<,F[ mRhK2 }%~ {{ 7~1Q㢒hȲp-) jJ'c74!r0f8l;I2L`$CdVHAiI˫_8e=%زϘTN'UXnIo4UCA0>takΜm̊V8 A$Á]D=+QatX1>6.qt PT+B$t!@P>D-t`QAj {B1yUOd$+ tw‡:{1KAҮ QB˝*hXҺ[i51xM辦qC ,BuIn>7Ѯ)P6 (rq{@,LlEjB mH K4A=0L}td+ZBьBs kV'Ho34Ez ڢ.Ŵ$xldX1b(qT;lPG';e]XA-y/3]~@XhpvOLGfe[;ܳɝ5H<3 ,Wyɾ&Mj\pshC̎˖55GS'$&HG*TZ5Ɓ 7Q} 'HyS]Ҙ :'vyh#FIs}0%תrȒ#+eP8ﭒM$>fQ?9Кfү4\?A](}D^*HS}$ICCOhq~*=LJoȣ|PҶ{R[qK#&0Fdc`&AS͋98Ayg殾EGs?p)2b?rbJ"EO/`ܒw9_2y+“@  oӢ(73ua s^S=敞[?لvN]Ja)\QRv2u A'uӣǐ~`G|d&2G5Ҟ( 9blҵ0lcwoTVw X}+|ǃb3;[wL[r Fb겯+i"ZۊVcd7𼳵Pp! 4"VO~fMֹgA{@ 5Mn㨕#D 2)epW_"FCnmTn%j9;4YNdѺ(iwn{(dwGCZu\!l9Vc˒g2d%d8n}֯ WBmt6uzipҬx_rw(SÉd]o{1߆'kX~s{BunDR ]"%e\gC,]Y}֊pjI ӊ]8Nt0Vgc Mɼe0ᣋGS[u7eCQB,=q:nS_b!Eh?n21 e/lY\Qc:kwV?B*?FBP2Pd$=[BgNyLz7sw> stream xZKs6Wxzfb >:$md:oI4EيEQ8],ҠDN:msD LJo|vɋ4: ?bqF~eQ]yYɋ.4; d,Z,s)Θ%\uqvǑVvxt _W(pm#Z \л˼/Ǯ?{{JoR9tvd_-{T(9ӕmuFVmQÿͪkՅVؠe98m0~S~I[К>ӄ~s?žvXJE+8^_~al[:2j`'qnEfmTaH!)JfZ*A>EI-¥08)gtB:bpX*TTsyk'/+ˁ\cI>miOK\68NLJZW3V@ю (ip<{;FXOh:]c([O1xqm0]C>S*rQLӴ(K]B6 $/vK胮%'?{Zde>ލg蠉q M'#, _3{00C1응ѹD\wCqM(*qM(n Xz}e5h4gUST1+r3fOs?N\_JIʹ7Ao'5xR"=~.e{f@hcfB3M4O;% [+b1C,ZC{r-;& cQG^Q6 Qw|Xs{՘Ȥr#Ҝs'U_GIZ;(cZ>ҡlh?!8Ulh5/`|xwg0OxIqb@BlgUC姴kq ,֊֐2mz%U}$ qQѪK i Pi~g%JK UZkaˏ Q&v]Xbjp 7GwtCշ/W$^MYyJ䩹 -bHK*RR" `u.,Zq9 h^[\eU1EյU쬰,rOOn-*djUʩ4V8-2a5禓.kGR+VC]Ue&{[W4[Ź[!@=TWsy>%7m~2ƶRްJo£njv`33M>܎aëJ)I !.Sp? ɸXu:M th)+u|f@@/{u@Mi9tbLlaӇOX',.~ЋĮ^Q'O:\h a`X{;3FQ=xlJa M^Q{ ?;$\k ;CUp9˦x:Vϰlך4*hx-oע6r'aGhBn+@TW6N Hcp*(< [ xJĈڨ"EYT4͎+Y߭вڝh=m3Ȳږǐg~2{ic7QL*7- ~f:FpeL9S ia8qKB%+`QRL[1j!{ܪyω'a#z:26D@faiƧ)w@kI[*tp j^WN@z ce Wn n34O'*+z4.,RAG SF KfׂT}B0| 7TҺ+k2,mzHoRή 7>7&[cb(ռ_ \n,{>KjE1\e'5ș$bTCCBz,; oi?6mKo)*UKe,Ne).UEieɦa:1oV1  2HE~ L=z~oZȬg endstream endobj 59 0 obj << /Length 2665 /Filter /FlateDecode >> stream xڵrF%eƄ/ٓ$3;HF!DQy[/" +뷯8{S]WEp5"s8e^Wɷ0.,yu0qc\/~/k8j$6P}9$dZ iG?X^"k_O(dj` U4)bzēLo<ƭ+ie^R\MK ̝0|%<@Z⇃IqO#쮧ad*5.ۃf4=en~{u^&BET JQsHmeSK B""eӇ$9ͅAx=ĠݚglF}&%֝>Niչ%{ߵWrVSdeȢtYLĒYʂ' s!|uT=+q=.lZ~EA~=L6dHg(3bDŽ g}В6ܐ}- z'ؑ~9O'H [2xɐ%e ,mm:KF,(# %jq+bnFG=%QCzQ:*(~%ۃet/͞ݡ͢6%GK{jH q9+ؗ*< TR*kK W.E?̟cB~+<8{${v'>F|( IW=h1K[K]tu"%+:F߀gDVr[(cApV2"<_q2μOՊߵWőZmH}}dEz{xEC(hhxR0'~@UؔӞ ʽM8i] AkSC{@I&O+ӍF]2ҍ+b"fSs'U^zin-PMj02H=}jr/1.W+ _";yfC-1yp2+y-c eL;L&<ҭ;f"?: u4 AӍwZ{\v{F #蹴&J@]zymJ% _B& oxXUdU`)A:EYĪM ˇ7V`oq}oVR%ֵA)3mMӴ&楥]MTX jBJ9^ix@b S{` }-pҎ 'A 2ȓ1~r')j. 4^^}+瞩H4O s+Gy>Y*$"4pL7LJBSN~^Ti YVflTdsS:Ei֏mMXgNޘ**fYvSM NbEۉLUz+JX¼UHnuЅgZU.WV')>cSJ%OK\6\Lg r>)b 7h;EPd٨~fj&)'7'9DgjW`QMNjs2՝{kehԙ"Mݚ\1ف4 0 ;@[ *r:mzߘJv ^.*r ;t31ĺ>WY1N++23H3Nuic}TܟgG tzL ]o;\nl1Is1LռpNBaBy'^rtGFpu8|QwuS&UgxTL~t&K&G~-EH'p^RuemMӔ|v٣gXZXxHMj{k{ 6veإk$[(hP5p"YjQݝʹ^e5vV_it=;q$Ә.|̾Qx]wK=" irC W4٫HXsfUC{dn7Z!zMyѩ=(cUH&q,XR%_zG vǣWjd'/vۭTg~}6@w҇*x5ήjZV "׎=طp0*3@rUۇN*w9s5#bt7<Ƴ5S?%̭cbyh'ـ=ArdbSmzY+ӓ2Z+yRul毽<"[Y󨰊w i  a:;ƤarMj4&ssQ# Y(sg{((qD x*hEVs4vj]Ug7*=m|Rw3I:W?slIWx '^A mHDۂ߽yC endstream endobj 63 0 obj << /Length 2540 /Filter /FlateDecode >> stream xZK6WQSe!ă)[M\69"gF$*"ʟ(q#@_77?|HX*,2HL꿋XUdG \-\kVxSNsTx$./\ë2csW_E,~w/2x2a7sVr) ^mTvk( =7־W؏'Ҩ^{bV}.K!Xg R 4pn >< G@KX%=a +&EB t?nP :"oJ77`|w#iszW k[ @L>fCnxNjb{V"ph Vip{dv)ʽRJ&cb" ;HSJ_ 4!C^Qe,f/ޒ8gyxCpX)&phRݚ~:9%zEA>{)>*L9I6SK%tϭ!^Өşs wտpik? 6'gUS՛S}K.dġ;{GZ6; qƘea}7:(f:h&qzJ;C!y>Ȯ+' aC? _qwQ^Rp53b21Xڭ_Y*!YtNY:Y*_-e;{9!wjyIB#<\xuU#g]S!'n)E'$D3?>% &w(h{N ؙ5D;c9W yco&6έu\v]ۯ \J'Үĵ2 ?D2rj>N_8 ?9%;K Ld,{e2ܔ>>`!K[4M֌|]aÍES eOR(V-Ihٳ?g[ha>#޵*uU{B>}MgA}z>"%kByXgQCKSd"etu*!Vo44w9(SV"̶sUΣz`)}e h9 ]lhI`pUXzRiەߡpv\a\aSL)jqĖ6lgk^휃ޟ~#\sF#4fBghLam3"z jN-QEa9xdg]tgIMTw[úP>+r98!d}3ؕTz]\ru:.N>)R7zm@=S )產%Q~ 3*RJo||wo I*ݳrv|9.MK{.bG?$c!e2 "o#最Sg3'Uteq-gHgOacbG8lQMu Wފ!!uQ'HpPm& &27AsF㓑˒aa}9U/fR8UQb9]~'ӹ+#OF&iZ"pxDL벖xYLJBjbw Σ~ZFv:W伢=8ԉ~H AbѤ&?*kRn^MHQ/$RMU[PR\>z*k֞ /2\6rZ":#F:Py$:i#/clvP uA+빑^;5*_Ԩ'`ΩQ9I9DD*Z#.YxGݞfOlL /!h?vrY*pɞiڟrDcWK#9ZϡI͡zEz{&PL+X>x6#f> \d\Jf,7 endstream endobj 66 0 obj << /Length 2539 /Filter /FlateDecode >> stream x[Ks6ϯ-rU @\2LR5٩+{EѲbITDcm IE˓32@׍@޼€A^^1"JEI|u3w㈾, _[\sxCȐ k%'ٙ\ cdp-YBwD{iVDN:g3ϵFs\ cr٠S8<*Tl,a$8QI(Şq4}rb%O$,Yua!/jXy*Hsb$;TUϤ.ߘ*-QKB~4ŪzfP)dGY,Mk2j="qIfJy0.4*G#SLܖ$,^KSήRLzފُl:¤`q.G-YXqFC =@,.? ͔}B[Vny ޖR̭P"!1?cȟUQRq5Wu(rYF'D)TJ*Rn 1%bz@<+n JqOZl^Rg*$tWz}cyF-O)V|O% UhKfRKX"l-uӸÚɨjǥ vL㾑 5Μvq]@,~;5 SV"HuXvH- i Qa*mN|C n$1ǵc4#TaӉktcB.A*]?m0"c̾7r4TdilJ2 #6N4<8;.Zc|ٶ. Â_wؒqI$MYF-[lc+vb[A5Z%Ⱦ!`@!0O(`[\MtWf*GZȩeJco#ǀAO(4CTfK>޷6qYSG4ykol\ה|7^rMeAX7goO`p1cH 8[jnQmϡu# 7P!]T(AslK& k5%-MM%܀J!ܑt%%,YW;CEje&1KL]n Xn6d6l^Kȗ^SݎS(rgoC?Mn271_^j0[7X[ ө\nj-ĵkL.KZOfKEqJ\uo2EOz kd># pIYMiBtd'sfq@B{VI:Gró]])X@](F:V/ Iĝ<0߭뺺?/{^< $9ƅoIE4(3U5;{4{d˃k[yU)D:m{MR&1rgge?vxs=Hzv쉟ɽڎ-Ueo.%}/h~D"qgT.09W|˘7ə3 D9P0˲pۻ+.{7{W &'S&_gh\INb{}%6OU,hՃu@ܑq9t SIOhO,bH!K#eR?z" endstream endobj 69 0 obj << /Length 2308 /Filter /FlateDecode >> stream xYɒWȊhaD0Kp8ʾITI3(ޙR`-Xe&o>z&SQ]giUO)tY̮W&_L~7\jYI[}NK<\""JķW,ɗjN5Np=MhmpJ\ g JhJd{5p@i VQ:yO1c`Bw!;k/Y4N Ҝ&4D$p0Ɏ4[rO;]܊23&KLޫ7Xlۣw~ 79+~Fڣ m5>D!G4rngwR]Ӧ-{߰;~v(QZ!tl;ZT5;0l!ÊvogM7-6b~ҙfّڿ_S [4:ݱe 2:+YD\3_q5RewYz4`iX2~HT?Kڵ]^ &j[ҳ6v 6AlYlKD`DZ,VlcHҥі Ѯ m\đ[xbb֨x旅5&\Hp޾O"H7NmqZ';^nopdiJ'|rto8|8tN^O>x S8}bUh\jgu66溲tą S9]],spE|\b7HTEK9-i ԐMҡ5`~g[//.ܺ2L Ŀ[iQmtkPI5[B<9}MbbI4OKZ"%y \6*bٟ~QSUT2Y{c|xƵ̇\]h(1nI릯K;t Qnѕx1Sg(O䏤jd3({9{Y׺~5\p}_V,`9xPMf1.8ޱfHUrjq)5yyLsS {;@eVRCj"m`aslq:kG\ro=3cXv!e^ F&;4w'|&2.N*8 KW懲Ѳu f59j,ӗZ$kw}i$ (L=la냖up/,o44eQz,c|&KeK%{ƧV)thfuklX,! N;S/PT|7.a:tnـFžmfz(f3{KCDjoxaSrܖ[ވ\_8K gm-&rdpW+gr`=bL+ťJw M ͉}t"dk:$Lk NSRregq`T=>d鸨q?%Z+)~ZaG3O1u9BRz~q9/j׀oyvX -{Z> R=}4t[S0@Z(|DzJ(TdcM)lO|ϞT1(^.8=zeb̄݌)!d 9ǽw__LcV"*֢ iwtuiF5ZUgf IB P)wY;;ʗbL_ 6%2oJvݡccKCxZc[Jm̌4^ yk`Ο`4P>aKf8fwtF}Ǩgy(m%}-.\| :Ӹ .tZ zw.{p -TE_sBt!Rl Aa7 I)O[Lz6}3_ 3Wo?cww7)jDξ> stream xZMo6ȡX"EQRڢ lEѦn-'nlː.;RC9v= G%(?ܾ672eRʛMiM"?ӿnoza*'tdaOS𻧷M"Q{}'.(L{jEpacv|I;|?۩*]A 1I! a&sLv:~hmDVFT-kV"D$RWGPHbV͖֍֋^@YE73د>(Yget;/b`Y>&e)6މδNRr*6{GdnSUwARƻ]Ywngߟ;!iS͒Z=TȺf_z'aGXa>ZxUpJf"OkXn؎Ԗk [+FC:[f;o(zCYvKw޲Y,!#}s#My&*roMd7Pe_Sf)ME}ǷORax`zcGqx]NJ GՆ4dJ.{s~ JS񂼍M +&,D|ˁHt8@YnZf Lbk+tCa3[sv .q#0Fq΋q!=$1+e!xCL:43 ZOM ' ?oH`ɤt R迲gntKzE 5jHSm\%adܴLv875VtcĢf/eKScd8$ōL K}MmU,5P o l t 3sNJiͶ5iQ Abv} a)btxR_Aw? +hH%+^;8,pUFqUy&t&/G1Oˑ ,OS w;'&)vq4j% sk8hV)aAn)S* Y2@S(1 e8jc]$ WL)Dutf Jȵj. Y wC92HlFڢqM;bB̊uA9\RD\,T\8U=ዢH0[Jc׺#w>|V]gۨ~ 9d5sлY0 h7QM ]EI,- tZĜ/+"/JB%0arV(f:_,]&JN[CD{Huoz"U<ȰrEԳXJH=뱔7,2 y#LcuAA1*A.$BYl1> stream xڽZK6ϯh ^ 0$ ,‹29mu[Z4^(䶻9Ȗ(>/Mot7"U7Y8Kof$쨵tvqVϪn&Mg'ZgWS [[ W 5oZ|y]us{6wpdl>[=Po!ZhvOǿYxo`c%kec+?+׭l+SskW^8 ]tG\t%t2s_aa# [D8N]g\䵈蹌s =]b4/6dsUܷ<< }CbKPl&*TNn luzS2e#7F%ZvhͳMxJab#~Fu`q>AۑMdJ E8(Q`hm.Rׯ ߑd$7Y ;7*_kWYX!k_iGRs( ÄI[uʼnӻ PNG<ؘ'z׳#( $6?F:O'1-טp#u-o=d\[v+n!,0F@1z^Cp@c(~"V~iq JTwq,Xbk] ^Gނ)5܀" s)w9SEC2a8g˭č +(Q3)Vf ۤ_x%"p DHJ[=zW}ޗ֝\/>ʡ7F\Z1&#  I5`5:l&=/taͳV6؈++N!x1E 9ZUR4;@UsˎaSϠ!۶wR.L! 1ˋc()Yjв䋞nsӥ3Y$9o iE_nz HBL;'.fl|+7u0vqi Jd%܌ܫT]i=nzqRBY܄^@1FݹLHS'14'YgHuÐ~v;gfJXXUm/(TG8Ӫ{$O ^ǂcOC tEM$ EbSX,E0_,~"}$sǮckBopTt:z|V409al4s&-[нX$Yy&:}YwaN2P]kѿ_ ŠΗfh?A0$`ף)E")fl|TGcIJgT5pL|@f?Gp⣥EtV >SQ->4Th)j.r1vPcaE%e"DlHъvϹ̪X! X)?6Z̼ïAӣCҟH%2uvt8_j|XScl$e}hpKʼnG(>JL"](/`_d_}54HJL(kFXeʨ"ry$9BvyEt qavdKPYLAv%5:<[:vIz= |ģb0mj CGvu%% ٕ@.8 9RabiPMڈо "e ՑdrN eCLT28Aݙ6IKJh rpI^ƹ8x)<GRTsA{( Lj r)أ>_5IVS!쎬dI||2t9&=b`Ev]ȬYQ <# :U*MlV'畢8b,b?5ýUxx 73 є> stream xZo_|"%ꣷע l{ CŖeڒkɦ;PL%7 ЃI O~;"*.E/q=dtǿ9̅<";ku/{Z:*pp:{Ae#)I/~;`VmU8(P>}[I")z~ 7W9/b|E^zBTJõPKn@ $ : H߶FO#O@ꢲbq]{9nCIچvNIYI"T;"zVRβHXqI''ǃ5C .+tG}-I Ye[3wUYۍmU9_lD+IлGl(wy* ܉$ c;T7n_(id~[YiXM_X D2]|A;#oW@T:٧a"`*{YywVf2߆ɋ-FXC,jՕ} r']Kgq"5Kh=?Hx8XœG)/liWׇQӱ(70tH0=?x2b[~=n_I7`esnho69CdqyOʣ!6&a~);XACw2!E@&Pبh9gj(uy.#-k|´e5Ix>UF|%M1ڿm8 UfmJ]Ӣi -=sHb=yN5sטy+UhS/H5OСEchK{>`}ݾEG_(}#%ZQ/`җ7Y0WoyYY[C=ܮvS.R2Ej$rPXֺ/0 ?O4֡m qR. bW!j=hQ䐇& $U; 6gyy ba\\WmBi,LSEU[WLH8ARt:f1$KDS*1-L0fTKm 83<;CQs?*ϱօO7ote3a g)"]OuIx!Rz.5}X^9@<3\~6W~MrٔY8ma0 & 4To˙K A`uwUN;wG(.mד"7̓W}t.,VqVLT[Em(ZZvnzd-[iDIh6d Oyv,va~rl(z\(e9ᎄЗoo-HSZ9w^}/o0#)I?X$"WYj\@o.'w+ܗ4e}X98`w+2r:=ȈQST rkJi\{^?ÌTޑsIJWMg$p2r9!"S_l0Z'Y5TE/#̭3QiKa*J(;Pt`\>іg»wq% x=qUs&2J~s{"֞8ސ +f[ŝWOSb FF*.u؀/=޺D 8rjR [rV3|٧[qK͡ի(@'Xk`-5a4tVod$ԨXw,ۆĂg=Q-F=IAݤ^U$՛t/,77cدIU WK#3B"yEs+sk|${ᒃWxZrkQ%+m/h~nx; ħZG.X(. ;vթ|7Z.I2`XcomÂ7y^_~ȸ~b=DW #אdg_GDq0^Od2,zGgUL;<&|h :ynR߂kQ|ԍ _k?e0X^ߌtmv>KEE6% RK1=JE4VD'a3{jy]YW^gR'fTotL|o!a(ƛ[J~S<\E=7+l}obݦ@.K8.{4*Oni˰h;SKB&4%81`d0c?` endstream endobj 82 0 obj << /Length 2263 /Filter /FlateDecode >> stream xZo6߿"(P"=p}hӧn*bk[>lz Rfۗ>((j8ߌWo%ٕEȄJ\B׿; ̯bzRݗ͵xY4%\[|TD4\{X2c_WU'.pWR}l:D<31"х;'Z'#\1R'\a*\ _kњ(gn"V%_xRR"s,LIl,+-\?\oCTIHk`ME;'6V9}NNO WzP Ӟcf!hAbQjO=W< 7٣]3G_6^)aqdLt8u|N/S#4BQ xMK[Gb{q jz=fqeS uF`iBEx`]s[JȒթ9洺'2)?ZSk,CCtl얇dr6$60la =㪋`vv[9h:+u_)䨔H%qH049m@q/ a R` 8EZ8֦C+eL 6ģln]Y11]z 2ӎS2+1H!%&<'B4i|yLo= pJPz0LةBU xZ$P.g`}Rc̽r!~Y8H ! W<m9 ھqJ[E!=Gň(R@N8ތ`bW W VeL[˯-vPa)eW$ ۳ؾx%Z3V~cMXJ+g?"]2sX,Rl"?'ڃ?dz|/ѿJ\ȟi ($k52wV;Ay5$CkÂTOqL¤E1瓰g:wl(E,)&C/jsL4yn}mnL&S$ u@X*!2${?#h]4g{+˘v^ [53x .7Nk/- Ѯ AɮSfSfHsl=nNvR,K`I6B-bkײ[c | ml+ y\U$Lh2zrN;Z{M%U'ASɩ7ݦMn~ع˪TяlV7և뫸Hm8;޳!UtTz0iqL C~iLdiVTm~0f-<:%B ?CTiE]ʦ$!mٟV% lHMa4i([!nc8`M/-,#X}ڼE5y[45?*ɽtVe0w+$cَwC(+ߟ?pmMXwݦPtRq*?#:|SvKlH}c!|TZ"bĆe'ᒆT{RYYUBug[|u3m\\Eqb)<ۯrڰCPt~g p-lp 34$q޺?=)QsJt4|6`)c9},wND5-2> stream xڵZK6ϯhad],Բm9=N#>U"%){P,Sd_}Uh;Xy|g3u.t=N7}QR\w>?h&vU *˙k/a%\/T!BW2AJ-2YM.Rm.CU5]oOûтvBmlq-ˑv(rC)JVg%*Dz$SlB!ړMD$W=O~e e^{‡d}N|\f܀g'T;4{ɓV){C^bЉ&nG_}K6uWeV6" tNf(7|S&&R-ٰ{on4xuz ca&_jg1l^H3l]Xf>-+e&d#&E "oź:qZ3/AԸlcٵhoFMlkb;$H72ՃLR=:h6V^E(;iNV'K M@[Qxyɦˁ̄`gl s-O>XRy0nw4wU {R\.&2A&jwP jTRU:a{{(b spB3/:͕_P͖NZuы.IitnCgMRC-AI[(㗖]v.ڒXX @rWl.#JXXdn[lȑXy5rXfs6z>bBF(¯QF3e@&X%Ȭ>Kr_QQ}X][R);-ŨɁƙm4yE\Jʸ YS1yd~-R~dLdiD_/JX.z+Sa9p U/X7GX+~vybl +Ƌ{Sry⋇+nhX |'s!HCz]:SnCX;]yXQ"\ \%V<^2а`n &DhRY>lT2/Q'==[YboBNTOyBF!<'e}~bq=C::h|%gRs)Vv.N'\i}"W:qt>9*l>!t6?vLOa״̘U3\\gq\U';Y!R{Eq݂#%DnӄSjJqh>ic,1V kl\3E::󪗚v&Rr8q=Mrp 6$  =sArzф X*e&%{s(<33'~6B]zlYM):>aȺc~t푐 mhUf\6"0K"vЋ͝kn5N;o ,noie:{:?y2k[.:LeYyYpu2`W)Pwת\{D> stream xYo6_jV{hamIqؖ+u;$*lˊ{-SyˋTȃ<<>K R!<0˟^^$pY`g]9\+nZ}M*k)*qLHV3-" S^TJ9ٝGD7$|#`ܶm42?t[y g<7PEm ޒV<|>1UҎ}95F爽7T(!^{m5#?x)%B]] I};M'OB~ud6'd6[A`< XTasDm+|MRn1f+szv n6[kc 10fvp-=}7ץJ@!1Ѝ6Sm,oI5OkfQ%mo~-uQy }u:6%~ /yɱE^+1E^|wf8lC&UeQtޑZQ# ?q!Tq6zNZ|ig4kǥM_MH 9 Hiz,H.{/?u.']C# gKpl;#HeW9$8z402bs7hVlF;}Vwda!ڥÔ]L#? sX:!805ۛU Nw~a5ST0 Q,lkiʮ%Nɏj8mib:'̂uQg>g-J2MTxM:r'ZA{k.o9G72uMq ]5M/yuX5[jnƹD9Bd Nv1X 5td: mzBǫ鳄t /zsF*nuctZv_.6e5i+hnM,,+oZ4"\@ :ז}8/z`CgKR cKuQXC 7Kr9( ]nIAboɤm8܎@Zk+Bb*yyjU8:WW`t(C& +޾[K.Ɠa2gvX-6nk;?CetdezP8ai]}!".V6g#.61 E6$x0M9e;uX9Zǂ? i+lZ 2Q Ѳ{ P;z˰dM[W3łY$3Y8YYQ> stream xYI6W=y@iIhPAe[lM%;i|FH3dYǷ}oϾ}3z5b, $z9s_o}#w*!/+=_[t9qΉo,apNڍd6ӊ|wjXn{@?] ?uKef? $}-׳#&7MR޵f(Z&|7*xSeZa`Zl?kJF(5\;Vcũ&$VI_B;]%D7rhn;04JkvxZ_^݁̽pӒ-I$ l-- Uǣwf-?ЛZv5K B*ed6#^%B $<> ,B]@EO ~;b`r߶B'jeM]}Zўzi09_[PvW#Hihx/\+]ڱ-JzW bѹÒi!@Jң0 -м( CuLgk(8DLc־k?v6zӬzrh j2'bبOKSM퍘;GrH*?A $K+;.rnZ2$d6yVݤkO278|:ňɍwГIʒLyzkXe›,{ $a>BSܚEzƖT;JLv_b}~SûHӽ}Ob 8s^5c}k#ES R+ϸPPc( ф_#YO ;YY-[1j~}tYcoQlW XQ4)8Vr&M,RAdGb0O1H;Et07<7=\t\~P?+TvZTᐊKp=&d9Iqa>m,ՠB㤵~6!,8iGk8.T jxjFsl`Af^.t1%Hya%A2,2TU]S4q0v.2Q`Rd-T g7/ƆdG[T\ݔ./S:8*}wx`@WJ%1Ҿ3Rg "Ǐ3%67GhH)G'T\jI_y6D~L!c ߊ0RWʥj\@O EĚHAT.Efx*!H˭ܽ ;;CeCZ]aް\d;"r$&^u˾6<We\b&QkK%z#ƍdX8tmᛜk3͐ti +a7aXtǦW mJ$hmFA[~ g gu+7L?ͪ1֪*\cԒ)mI5c.S{jacپaM5Ճh3>oUER.PGΑ**8.>5]77R[rec6a*'t`kyuI1W((")(L)3!aO ?e .!j]!2tHһP=xYvQRь$;/ (Ti0/ Q09?\Y~z9ZG; F*8 J&$y=.> stream xZ[ۺ~ϯX2u+ЇӴ yM#Y{A|97eݢH$Eg|3ɤW* PW׷WEjS\eqYzugٿ' 3]E4ku;S~6crvmߖ8onlvF, H/LS諨¾mmָ Z ^ww0{]yƑ-Q:sc:D4dy j0d݂P] O-y_#eXez5 'hQGTl$ ͳ5f߱jT0L~ fIй`q5.ϳQz] ڑmURwFE3 $dN:h=gWb݂g [ݳ ƚUP|6-xW,9hb0IH%h 7PjDZAULnDs?oZ9Z}0Վ۷:LD.3r gBf0=Z @Mϸ9=ߎ0Vzt2Ŧm!MY"620)ҳa8)kDުk>/԰J^\h< 3F$Ӭ>RÓ/)P1Om1W(s%L'6CDEkWw tsDʂ=T=YFw-L#D?@}F`9(Lîȹ9gKYNh9[e+GIy]7Ά rjv CH\-̄y[0Q,wQD8֋ C$qcg"1)6P˶rS4p9sĔ>?Oixbg#w&Zf$o0IC `dЕ7L_. OAґh|#ݠ&Xm=QFa0˓fSS:6"!rjX֋‘(ڰR1r0_Kba `Jj(}4stl_H 0[SCt]ѫO%~9QOF,Gg8t<*] ԆAČkjf!Gk A6& #?aį9G:9|? f&`B3\*w~gM 8Jͨ„E&d}K'^ʟpL"F%Uo ï vHmZLLf#0weԄZU0ꉏ"O,լc0;hY#^ «Y?'_ja/oPIX#+~dj1Xwe>rC.k_ \dm-su\3eK%~tf(Ku$u%8ae+^iOcs?J[HTPwX\R=bC|r% VZE}vG96#ЗK1^X-Qd_{PN-E8C~S+\x֜R 읟7P1EB=y 2%^U-f:;5b;qGA ⨺Ϣk}ut6`$`nHQcU)N*GPƌmCy`b7+a+hV^5yҢ /?(h#x&]䗉N7uWgw5N15r3"75˴D9 yeWR~Mo`g.RSֻs°ߝz|=JFyO.=%_^cۋv]u&BckRxy.`3]hU;c eZ.b}__f+[*lM9T(^6|ם$ E$ cڱ–n^q%!e}h,•]†Űl$jr’lߞf1+;>oq!-4h?9ױ[}<qHc691I@1fs໊ 9{:4ҿ߆# B_6oo7_^Fj K'=Syf|>f8o|--:RLDD -\ݫܾY+ @cvɡ6f8z/ } ?zy%/M~n5IȉgG1qZSU\u"4ibՓQ:ݢm|@u>;UҩV}L@YfSQ$ aT8qtx|ѧ-Q3S*`냊ˋIrnB.F&m_U endstream endobj 97 0 obj << /Length 2195 /Filter /FlateDecode >> stream xZo6~_@̊^ڢŶ}:`:nl)fRݤFHw>|UXq!.ߟed"ҳo;%?>}mgh W WW W k5\ \k4ri.L徝LNL$J\n[zh*+Dl2L[*[}Z!MZwK;dh QJ =K$"5@4 c?"ӁaVaBVRd3 \ Tpnj2UiTыʢgkq-B&WZL&(W$I7 m}ϻXjx嫚[M ѲadZHY0>6o;+l;EE` 2 lR˚WR+X;By~-Lz?H`Ap+7:PEq_[rQ6 v5p!]֚\ݎ߬-o{V!d|&-rו}L Zq  4{X`"٥usNmDǃ<4UcCpA} * 3%<=}ȓ&fdl`Օ٢`d {})3֘2NjCog"? |2^_ƹLD*1!*Iᳰ%M0m%7EV]l݉H_Z'B0&m:Umt֩{OK!&ACEGڮ~cilh{:^/) -Bl+XCzxڒ3Q0bBzv@ǝE}Z0_ /66(X!U| Se2)KY.|DɺqZBq$Y$5{A+ ݇j"Mhq pylBj}D%gHCߧ=39P%,Bl_C~˛~+XT,u;J G+F1Ix:ųp>eP5ޥqYY[gxlۨKi53;Ũ;G[2Ourآ/}A; gH&"Cu-ǻ[v: u1)*?Fci邏ci6HS-ɊꃈBɭ olvH("#vbqB%mQNvqtic|cդ. WϯQ#{49E5ܯ+l G6F%u!+:6(ѱTSdYeK+!n|d0DZ^> stream xZKБl>.0`,`8zEigW?(5g4c䐃 ٬j~T5i'׷ƨh&U^2?W^ ;j*]MRn/.ͬfuu nzg&L3 E!6,S1ۘHtSW,qʚygۋО~/lBs:yguj*LlGUV%|c6wG7tCs$Ii3dsUdwrU0ib5``%37loEh!FJ]pv,$4n:+2SQUk?Z&8Lx8S֒5U(fZd*g+zNtZSs~d^ uFF/he%KinJI|{z'ݒnĴ_GκE^u;cƧ sU]1zY-34]}TU뇕ACdrʶqOmDfe.۹iNRz,*Ԛ~܆&2)긼HEm|]{ ov6 ~0ZJoI-v=7 | .C/OȆ}x؝[ܖ%!Ʈ#Q$v~JfKdXfڀYf:̈ 310b :Ɲ-p'x˭}]gaY O _<2|csSzقJs$qWU6ә f|u>x^QW05ǭ~ߏIahM)qh f{Ͳ}Y1W0#{Nܐl߂]/;aX{d&{~mcw= _ke/>a&Q~/Ԑi#=t ..FFrL2Oo[RN, X1Z';o* #l-YÓ\Ӓuq/y=G0).k})r/n\z ¹&YjB Q2`CDܩWX>{'L B8!1I1(QOtwnVȨ>oZie:-2 n1j1G`]-G!%IyOȾn$>6DxV3kdUiñjf#ܵ 2Bv#WSxl08!Ka#ca&_'mK;v;ZEpMQfY8WO)Y~R(c$ͮ,^rg $2]lF `_*5kQee^s+J GQmT/ۢR>|IYJC l-c@۽MW+hMT8^IH޴EE I-hhahj"&h)^9 \W>܇ ѺB[ DypC.N=Ͱg^|$y_QJ2o]ZQoɞشĂuA4R v=]v|IydmɩѶLKУ}cFeIu`AUI"ۺ xU -;I+6\H2m,a '՞"Rǰ2$-;Be(WSTz0R[o[86Hzr">!ي([{2[W|ͯ@*RYW͛jUE1u,I9=uу?ibK|03U5ܸLn|רe$o $#K7lDԜKy#g䟼SVl%:u3NKI<~>Un#c5 '!8x_GGrS Y"J&#Yknɓ%ʯ ZB|&Lϡ}Pҍ;a8v}vUrڏ߳"itWQqF²zRLeMZ}ѨY3@l3Ù˗A@/CzB?'9kgE3B7!Gntf%Х[Il ~ô$f)|oFZRƱ뵩V}s#t8M˰2lvKGKN^Db6Yں9ڞxLWi@R䃎We!g, 9j)^+[!_ ƞ$Y?o%9z!{GjoLI(Yà${!** Fuͯey$Y@ endstream endobj 104 0 obj << /Length 1927 /Filter /FlateDecode >> stream xZKoFW>I@>% 6Z=.zhz)Z2Dv?ٙ+Yv)|`Lfa|#")zdHXm6T2yӻYo,6%4j=xUpmkW Wɿnsk2mf25F&StS|ɝK!sӕ+dpEH0&.hHߒSinHa3l9c9ɸk4U: kS*dJ.)Ca:2)<sl،N!:N[*4ۧ(<a),fͪg{`-ʩ[h4 FS ]kӘڈ\* ``A5^/8fOxsvGH65.+wӶnrjo)shGݓ='KzB̰#pP;z~RXcvӭU":AҲ8y), v k:XjDYU “}[~ZAd@k(uHSs {matء{DsA($@R3qIOV"-`$A >h Rtk" N ۓO⛮TZǤOTyj8yl_CWԽL5QUGc>nD=?\5<ßU۩ n %/lT%s):CnsvW݉%3 9z,X!M[f7Elխ-Ѱò-_a˺}X[(R/2PgNЋ(EZd 7r%_ Sg*nA!&ynMd Ȱ/t/i*6RG,[gq0+FY 7sB&rroئ8ӒmgЋ-}n8jVUm/VE`4K$VGI!_Nge!N[5e/R5U!D8 .yM^Ots^c=:#QM-%TZ*=z5w8k׭sO9׾Gr pkY8Mu m\1(.YEe 2D tGQaL,A;xvr y3*tM/-1=4m։0^34||)?I~jc-a8 P u>^\-\p opg|M <ha IK>R5nV}){ʑJ zm0ۼep5Z+]z薏) I=/?Ȫ}/ (s9,X9MXYv?@vB|u"dE29yaKmxn+gb S ]B6ǒk-Z*W0vN 6 F]`a2tserdOYus]+3=  a.UnMÉؠkVZ7~·0j9tXgVK}l>* Յ~yƶs_mĿl>k6 s"%Xh]m&:̈́N%um.w4:xR}2\u6qًgWz> stream xZKFW rQ$9x1Ů$s}PIkIDifԫMzy jf?8|x}btwOwU&)8SqffwMuvZe:OO8O'{=Y|- z?n]mvR~7}L9p±u ?M7 &ܛWkNNSc:Q{]EԏQA#M3 ~JYWc//TlPe6 3ձJAYLnq]gA'^ n64)}kl}NnS4N.6 Z ^=՗bpDeuo_Pض/p'~ qg}7l*xSQ::;q0̅=BS3~/doN-+93=S SJd *OL?dHY̷9v^,=Ѣ;wVpSʳɯQ_43J=2mNǸ<0zxe+\?#VĪy({>ϪǁPY;i)Pnͩn0,I":ơD#r=sXYXgxi[fuͺm5s9ǩqeKG"lAAL43!u.4=_}$x7Swutl(8BC8p"ƳK17Y`Fd@FO ΰ?Xba(.*"_lY9EHGҜ 2@Ve:48B 5L\9u0jGY-2hmKq>gKxsG>T 8c(WK7 "}@-I/^fg+Z֜ SR3 ,?K@47y ޖ_R6*;?M|KM6 JV:kC7w&+63&k{ b1e$hmPb> stream xZKs6Wxzg* >!I:qgzhz`$YR#*iEq/BrZ$o]g_JG*r}us{'*UjRef~?o~|ZPPk*b_1񤨗k=.Oy;8ixĄJ nGp亢SQg(J)J牗; bl8VY⼸b:˔M{^l26-u.8^F[M''j4bME%/4/ wbŜ4ϼgp75>_| ,-sIVn׎%*ﶭ'e*|[8"gkEfE+J< w5*Gݩ0 Үίh)iֹ2cWp8 :⮍xԈƏUwM P(DBUMu׹1JX /5\2‚Z٣2e"<c +7?F{YWlx[66{F!VoPL& ۢu6?>r iρ 7D83Y8?&q*u''(yb!R 0>$,/<Toj#JۄE-o0i \D䞇8mV Yq\[*멎|>}-U{W2ޱ!A۶*(02!wr |ʜO^J(L<>at;N 8N9&&'EZ]Яj]&a5椣S&Jg&,#{c99BsJrۮ< }.8Ps2BoY~6BYxsOYgky 7=@?|DaBn[׀pاhl[kh±Q~3;%4 Qj1vjR0~}Z*>Jxvթ&L=} QP'~R$FGp#1PfӯhxnꀙFX7* h]d X,qc+> ρ+'~5s] 8Zy3z1hrN=}%݉gAp -`%f,(K \V#3^YPY3$E^laͤy?J%-(.4AJpt=y:Y=v+=kbu? #y&d!_Q9Uq]4:rZ#^†:|֞(Zcg'@=xP1ҍǎ9> stream xZKϯh{iHJ`/ tC&vc^ɞF|E)cl_}U~;2)]ER ߳J&<'3j E^%n.gkppỎ=6 [ae[$"Q~ؿT{?Z͞µz{*2d#n'xv Ǫm'Lqvϼ/q-vn _窘55<}^)4n ~#M3{RԌVHӜWFXR\nTc)8UkoI܍}o_V9xYdYtqTmծY*;TP}m3z o^"r[f-,_pA-K׹罽z׉̧ uZF[}kWo.u.2楕_uc S[.TN^c5W;sD7(ʼnɆЄr=*lM|!m7"W h-2yU?~j3q) /=ٸk}~vʪZdΡu^3*3?xq*0~G5 d$A47 >ps(S(q^'TMgyqDƆ/16Fd\¤N467x {e83ﴻK.r4J'0DMفS.3k!i9W|L2;!U&c eq{޾&߱dm0It4Ij0qz]){S8l|`ǚ.xl,(Sz[5eb$bsڌd߆|c嬅QgPF")+88E/,Mg!eZI_i~uw`Z޾͡x_<ţ,&9 [aGw(*n?tpl2pmMz y[[΍cGoQQ иNT4yƒ]QV6$$G 2)Ǿ6C2 )7Jl4$ʸS#ҩAf?$բ C}pLG-oQJȲ J/uT{Йd,"Z %#s'vLO,oE['2QXM z_]T:h"\/0nƌ -a얳3\ f\0cRYrE&N 'qxO ?VLYMiϐ\sjX.^e eء=I3PPwuANdQ 5"qƉ9N.~ plMs(9MN K=~+y[a`ΰy199v9>r#3Z{:/r*j h=Vι{Lؔ )4@AФ<| a endstream endobj 116 0 obj << /Length 2393 /Filter /FlateDecode >> stream xZKW(>Ifw ol;`zz%qBJϧAi<3rn޼)N'2ETrRB$әY:YL~1IPJl&\k5-q&T6=EgHu8@B3쳅_ [w?`;"]rC&Hm<45vԐDdZ4zJi^SJFCev;-70<]x xan{^h\du= kFU9v `goas~!mkDm;dO"ɕ' V9oΒtZnShvP0p[I5*Sz8&J%B9ˬU:zjDZ5bcpQM( gΒ;pUJ)@O Χ7@|Nh%s>pynҤU,8NJZ҃enw5ܚIE/xH(LPzV/%6߳`}qpȂ ܺefэj~@TAsĵ`CqXjvw>ωϙ=IPtşoCJř%/Gc̚N%DIg-{ ו9M.C%hI5a!XzXDnmȖZ:ڂ@RjT gnEN<͵p9@4o4yF6%Q۝3֌^rKJR%'Bn,P=\y(JIHn}Z4{|>ZygfW}7ǭ%@]8в%Fs{sagDy$OO6wžLt0큹T /ix!( ;і0d㏝#zj&)l9K\!=c_/+e$2oQL4KCgF!' ITgBk{$͚ɕ q_8ue58]rp=I06l:f8ԠBuYhڍ}rݾ$3a|ԏ79$qr!i0<9gPUZ4m!(bg*m|䊳p9.˱6O-%ӯAQmy$"`H Nm5+HdGM2<$i̝=)<8C_?I >_cᦢƕ-ZH;a`і0IHD,Ό.(E&VlBFkm!3Z b;[*XӞW 0`(l=mJks]T=I([mڝ8-2ysdYR~0e1P:?CcM훥8*s#`,/02@xI\*dBcϳPQ,_r-+5b?Q"U.$TBkZ7?޼/@m i endstream endobj 120 0 obj << /Length 2348 /Filter /FlateDecode >> stream xZK6ϯhƌH4@ $XM8eu3iOX( ն{zs؃zQ,V}Uy;-xŠW7wWZ$G3?Ow٨-:Jl5_p5p} WG{>Pal4`"WK!S;mCF 9\7knc9 YL% 䚥<[_3Xgڛ {DDZlCEIulN3 ulԌe*u-zRIׇ耂YvF5mI:8ULKϨ(X.8\\hMteZLזL;{ʲL)Me,dEj\zY|_\WL=5N &"uo(xq%8W{o+Z[vxd$\zμ}m_=p;(QwbE '$lA: A.}L҉'NgɌ?Ǽ?dimeyu:w[pf|e /*k.?X1jjvdyKaȚIKyNBNt#HdiE,@@$=A,`m{PE;nU8vH|VI&xN <.M@%an?9_-:Uu#C`ZT35fV:l[X7+lL=vf٢ D8'E6d!͝})qhߪ;N-"[5{ӷudGq]UuUG@pBmBB tDyQddV3G3~5w!۽G`ڴۍ+G[=CҖQ %C_pqՃ(~~=s%[C:0O9K`/}2(Kd}8b<7k-4}{3.[D\58ʺ6|/X`xx8T]@a##v 76 -vAm,ŭGmkv6TVvYD)Ҥ,4w`fi|᝭p*!qI}:Ӟ=]9'BE!N`dG`K;`f" |7"Kgi\%t;0jQ[[Ahk݄<\̻ !%wBPdJc.! *\P5Zjƀs9MJZL SP| v 1&{mO`ǁiO}M Pmp;:9+ʐ ]Tth#%7tDlv H@h+NxXl+BI|1fp ,u]> stream xZKo66护GnE[,(AXKr-I?pHQ IȒ8$7|{8QgW[Q-i)KzeA3 cրX^Gr`Y^,32@)&Q9И&}w&<,= y(>Y>JzlcorU"\oۇؽ:hn&K,6^|YawDž8z?naj=^o:tQ"XK`>;[HRNS+~ PAt, >?Pr?Cf T,_>Igq&½7GGD+nO}'2o8;S݋LZ:K8OXb?^u `C>_=At<_W3ƶ=T>0vXu3P'ٸs (fmO:[CN}-a5j=gŶs'mb=dY}H1?K-ZyK9pd{f+!,~wL#搙isy9?9{u˸ Tlv_Tbj:ٺȱرgc_;ę8)5l%Yq`>Ew` aQgBʗUD vX<5^ML'Я]z` endstream endobj 127 0 obj << /Length 2538 /Filter /FlateDecode >> stream xZKW 9h@dA -i4JV<ϧ^ )kdȁgUW_UO/S4QuRWwWQIU_LY\ί9UnMѫ UY]%\kNvppZway]ff]{=5Poczakʦ9XIzI\U|OVKK>)NMvõnqZ+ tZhet~5Zo ]if0xHmb >z `j&N]<'PֆѱJ'廁d"EWJg)0&LNLi;6D܏Fg/ M{ ++J6%TWE%.+Ŧ_и SW lKCԨ\LX% ڧ܅ :5oXL̚vw,:^ʵ]YiA2zPAz=NѪj+PFƺ6'?u2cTXncm*3#~6dLN*IeځThh>E%Pҧ%ҺNg3&YkH?ѭ2uhzZ:ݾ}A&21IClZo+FEoTs/;Sn[4)*7Yot+PP, ˆvT֮Į`+ⶴ-]% ']QSlҹ;Dw!''yjߥ sJ Q9;H2 hçgVM&1= yzxr@N^Kj6fƚKŵeLS8^$ť.ߞۈ ƼcOG'zp0|Ft9vEw/)_7x_cUhG[ŇMuJrŬB{:T kKS"6(o¯]b#lv$亾';Sj] \"fJ~u>YN)f,}DO+$|R;`ɵtLwя GÏ >ϴqi(E ?b@SY݋n_m endstream endobj 130 0 obj << /Length 2819 /Filter /FlateDecode >> stream xڽ[s6_3JHv^n;7啕FdՒ%ۙo,>He+yP"b~\^ЌTYE/$,. ^^ȋ뛋?'_"|RR"3|{9O6tnzv7.]rl'\2M{Ɋɣx9\tp%'LQ\~ci6KEsad[zjl;"ysߧ'+|V1-𜰪H_S.҂,S#ZOK.kUwQX*B)9'תi||42åzdO3@ ˢN5OJ \bQ,ew{g̒+5|~BcR.3ݥFAk*˷['"Є VڠkYO*- " ~~ɍkiN3Z݋mm \j6lXy۝]M !;oȹ{)ICBJͿň6D5\8#|HQoKz僖HM( dAcv,w{AFLۮѩSd9vt돋D 3Ֆ+>[TW಻q06Hx`HR'x$zi0ko M[)Q3Wrh5BY @WEAJTgW͢> >CkBaFiscK\ #P1rV$ᒾt`d b~lZÈu[s(UKߣHX6Dq5d,jlMćмqѫlpsv xcL*P)/NߢvP+ b|T!VK' 0(ρ'-Fhg =dI䝴HRHN/wuue-!"1V؄kdV(6VY6E*S}_Wlv ;YLˊTyaYXh5ƒp>1 F]: iĺ7^{|:I!|ۏk:̣׈~ߵv{$pp ehG=AA? 7携8`޸ P:L0ܹ3lR4؍ˤJsX!ܗ~8zض ]3M8e 8U몙B ɍB3(575)SJS"r@ )}M9F;6Ť^`P\V[Ϯ1B >jnYZ^'MUP+<R Q_;]h&+7p7kkLzvZxڊ.:[\;G pZȗ*B (x;V k:U!~>:T2R:@:Yƶ0a**F4-)[&UaE9<<_F¶sS H[xZ䓿 |D~Km]!ZoMB;EIHELW-cЅ;n4Z_@Q#"=i䧲&ט|ln.y&p=I ࠯ $r{yl}eׯd}U35ȭ "Nt?UCx[Y1#J4^=` &n{!*=dE{~,P [Y꺳txH6af9LLWXh vձV0x}:w/c]st GH?#zJ׈rv2t<:F$#;IԱ}?JJ1^-Kej]=TAg{mAm|33= CɡFәnf#N 3`T*jgLcM@\H :ޗN{ܕNSkһ:30">>l3Đ~u`Ro%MeQА4mx*CyCog1pu+ݎ`❑O VSȦv@uj@>Yق҉t :C8ʴ l< %;QvlW8} 3G{hu'41xU*x:oއ%x{US`IbN_cUDKƢ`[b٧`FfmL٫(c +d͙ ,O!V,<ؘTҐX@Ql(R *&sbdtFiB]T~l>0A1avm_zB hxs[Q;~2G Iy1tHYHWZwЁ}tavfe|KLd@.5N`A91̌?O$NLȩg~nΝܹ#]pB;8n/ŸHi龐0iֺmkMzs V>2: ':2#MvXszp ǧVEʲ4ry endstream endobj 133 0 obj << /Length 2263 /Filter /FlateDecode >> stream xڵZK6ϯh9CA&H0h,@=Ȳ@ۖxX,RMn9-[TW_/>}OYeK|Le_~Ye}ƒFc 1@< EUDI)7^(IJa^V;Ԯ}T'vqoWZJx*5B>< 2Z U9A) ob)8$Pz[Rhn w Ԙ5ZLQs ςpηLݚYժqNjWvpw+'fev>VԬP ·k= [dbdTuϩUmV`Qm\:'!"iksiGfI5F'ҫU˘]|>`Brّיy5M'D.`lv@ T[mD_ƃ 3NQ'Yb1a{arU]X;3?ُofa͂o|w rY8yf??>5)ewګ/vJcViO _k_IyQ_j4((cigHdv/Da=ڐOpNpbuj% yY&^kG$:BHp͂a$M x/R @ùn|?Q05qsgJcXK)2̴cz߿Yǹ{JXmyб%giob{2}s ?wPDnZYY>Oj[Ν۠U(=˗!g,/ğq9>sS_ܮL / c5O|~b.B{\ bcVE筘 Y2 @ q\2XV{]]CG}B+c/“a<]W2yqEٓ G[9fr@")n.&|["{G V1dYz {KWcrS&9w9Pw& WF[riQ _<^m\pݐl l`@g˔R{OKI5kKhf +sbx[Ri=Aafuعy}.f(# Xŵ >iBn]U"٘[xI6"~׀~ػcM*b|K1Q~0hWjLӲìf5ll+E*x&F%o87խq84%tbZ<\>؋G:>:\Z_[VfxK?xL5*{yw@ufl0< !q=Uߺq. ߫FJVͮ^ ۅ$oqv|"=»8;`,ϏBC uD\^4KV~^XʄLx:P]!Nv>].W- U{l`v_`>s3b qo9Yf6d@hث$V ȒxT"覊6gOZbпZ'rJc wQJz3 l 5A7?_Ihlj#~a^q& J,߰h9OTo47?h3T|CZa8G?3 BX,Ԡ=pI,ʔXjs//5k endstream endobj 136 0 obj << /Length 2302 /Filter /FlateDecode >> stream xZKsϯ)%W`IpksNf*In<^f@KXT"5T|aȖ\@S_?&.>,MT0*qn2E~6}^_ wp/Gp)p0i9k4혪"-)8Ÿ|lUkQ]uwn.Tvߠc =.ۋv*sQ1ƩMUVP9]ɂV]Q;b\ b¸L,LV*ǔ5F8u1<6*Jh9va}۬9Lgl(~EiMLm /ޟ3mH;{:B0Op]3\ר*2f6JxvFec WY>!J sCU]ǁg˃p+lݷs=xznw|~pMl & @Cl繲efiUK{Zz26{I=0[&(m?%::wNuw(*b+9~a> -7^̩J wrp?DcJ}1mQso9ʭt A;$(= (%(CVyBd= ,PiCx][+Dș¼e١d=sy -Xj4WZ*ծz2.X-[Yi{Ԣ{- YsrcRwg<Txwx}&.`G"R<.>qͫy/VnI$ b aa|tlU4h8Cx)͐$"[) >=&bqm7$$Toه0}8 5vӌ>C8u: IoPܵlG֏%H3DԐOA[xtt˟H}e>1hN۔w3gzi'|Z},Di;Q+!;/<=)$c4YKBabx͑G/9CH W5@&Qދ#bR=WmeJCz\u D&2 u哾%5u9,zx^z_hV^g+ihm*wYEI߽F:tw˻PMiK5Yaj;Y)^o;Ե4Adxq`;yr6h1Sw'% endstream endobj 138 0 obj << /Length 97 /Filter /FlateDecode >> stream x31ӳP0P0T06P0P05WH1* ̡2ɹ\N\ \@a.}O_T.}gC.}hCX.OȠl\=% endstream endobj 144 0 obj << /Length 155 /Filter /FlateDecode >> stream x313R0P0U0S01CB.cI$r9yr\`W4K)YKE!P E ?0? J!DH" @ l%r38 H.WO@.E endstream endobj 155 0 obj << /Length1 1834 /Length2 13080 /Length3 0 /Length 14231 /Filter /FlateDecode >> stream xڍP A %kpwapww'Cpw$Ƚޫ٫{]CNL/d2l"L&&V&&xrr 'kj@G -"@Cw;Qd r0ٹ9,LL\!.&YO.sw03wz#ʘA9@`alh 5t2ڼG46(-NりɎՕƑ`OMpp2(.@% mOP1pK 2ur5t@[wg[=:@YR o,ws u,l4646ں[ؚL-yq'7':DCkGл; B ɑpf1[ D-}wgrlAA&alǨjka .Gft|fbb`@7cs?T2!~d0}/ma |t4tޞV/gfX;fxM`fz?ft'dk+fӒ*AnOzLffv6(Zǿl%mMA}Rv{^jO.@Ϡ0}f2~b<?HO=_ߌuvzY._:Y_.ٚYn@ 'c/fma T9Z虙}ޟS|_ )fk 2cXog':b#-^7Ǎ0 !q0AF"w4q_/|O.51t4 w˿9;b/`-ӷo{ B_c GWt;}1xo?x_F'sJ}O/p|O?sbZi݀+ c ˺!|WI9TjzN秏3GPĨW_=Z>%*?{+/Oc N Ыz{[AwK;s~TGtp([Y 8A|~2zr\3Ռ%Ud#%Y^4Mh;{z/Uibq&mw|_~0H@'mcN(y)8F7zjc2뛓&w?'䳑BqkQwvc =f4~W޴kbM$ Nj,a- GUi'ssY]ekhl@/s ivܠfUoRR6sItqja#(ʷɖE&f|%A)"ѵ(OZ]tuĕf 3B78s2D! M9>~'F\kr-AwڮxpZGŸ39]5&_R(ZhfUL #QWc ?}5cn1EsPiJ*EluvaW IӞ$]ћ*׼YO\ozk*M-ybLIi c=ݣڼHA@SV=ĶWǠNW12(Fk5-Wn jI'v"G) PHb";9ދ@ۯŻb$E^'ܪlDT,P,՗o,h VGQV^{Lv!W$ vF/~={uUfc5ΜD T%b;⑓%,ǷCf'% a 鳢505!sZPjS6J&'1 D L a$K1Gp6[NjBѬ-#ݡy&TmRȳDyB3!㨨xvT= [ "E{°Z9ŬL3[891dR&ح0|`4U븬I|1';p۠Kwj?KFe94#WY)WqJ"<$clFPU!8J>]>ͥzAǛH=h)},pCnf3Bْ#h$ŲеTO{zt>ظSV!])ۈouFi1M!mx.f;5z1g6o)6/ ]{сiBduUgGBLXS-T%7O ^cveP-,ڵI'C>oJq TCl raݪ}z N5Ue{UlMx 1k%6Km#atSgX[rk; i8a :1Nc<шJ6}U9Ub1:̈s3 it0jk.U3))JR'.rhN ^aKϴgHqZIêJ,U=x@N>JrD",<\ؓL[},^ҐÑg~.29%z)J 8:tdaE߯DkPal-{N:qg$G= R`8@8]:WK@ڕ ih$ᙣec/b!7F*ԗȹ!'.;^Ni-ævuࢹ> B[6 S-#ߞbVᚨ^( U<^焟5 מ:Kx~](xjoVA['Tׁry).; x081,:DB30C*z圽\})!]颞͚~G|o[!EUnwVHUZf Be%zu٠NYk,6O,#_|2g9GmQf\Y6Q{L.E}}0c+c=~ݐc #GN&\pJE<* W4UW`TgI#Yxh=Ov}/>سjM]a)3H;HEyu$.w=R,!>Wlj>\G!&-p 7-O7zt I9I߱0,(.T)&+ʘOS+'̺tZ#6fh9B1HFyC^/<Iwh 5$_͢Z!d?yWE1*M'0I6S{AVIȚ~yeV."A'-;>9E{ƶq VSX8KOx33G^+;\%UCL@!5Z/| }+%hqz}62e~↜ aU_E&:MٔXʮbf4Z[Ǒ`]Ɉ|R$Zk] Ң#ƽ?Ox5pp"~  .TxLH&#Um1V)'TQʦ&nz3Y#o! =PCa 񊻜i[-|F.kfݞEG2w;%ml4LА>\Xj~`QFgܑ}DI ۽;LC=EVm[tXYq)ݰ6cZF>f{hۦ_ˢu"Cm5 J+yхl]P Ԕ./lSbRj\6E<aq H< nzri'N2g`>$-֏ IPn[ckK eհ?TF}BR(=Cgs|6y/C|Aξh/s*nn&aոtDCQy7iĀ`nc.SpD)Ĩ£G1Wl~DږZfY\ ܂nmX:ƆUrTߓ%ݫJ5 Cj*;ozk l| Xwü"=S mM62~4?D' TʤDboz( *Խ,%, fE%MjF3M x TNbiKc``'Bwԫ5ZZ{};wyrU݌-',߫qdѵD藏?9t4TK7wH^I,f72H V"$['rPfۘ&"P4FvJ-fKQ lz7rS~9i<asKo-yl|tp~}X;gw劺nDo&X{Ў njmCMDXؿTmQJetOkKan^B.)/+ u '=]`6,ݎ#L"#TYo "esfh3ćąBm>"<iU\=) #$jYg %!uI$bR%MbY=&Z 9!;µKvr7wVNag|/6W5?iS;ʙfTOoSqN#u+}A;Aqb3e?;_1ZgX냂#z_G2Mj .R'.9u|1 *s`)Hx+X2Zgwj0x0h DVfOα|AwB[P`8.zl}*lnn7w5O90PfMG7F_+1KnH@~`Tl671ynՏyNhM+xy>X};Se@f1F$~Ƨ蟉;blLInPFi:AҖE^3i>!*$v{.-8esÞAPh^SQxqخ>a(/%WؤKSAEYs,!Dgcqj]vA% oNK\Dc_m۾>b_;Q 5{''zT:ju]>ݚp(> "dRd-pY=$.G_ ϼCpT}X)c?(-TO}bR<$Ԓ=49v&hpNKж`TZJ}d)F7NnOKYn 9W3Zf!k;tW?Hħ@8T@He#˱?_m;WjWjk銡NHYsԖx✭' +\oyf,L_Kweg?lfXa/M#‚ /b* ﯱ 堲pםqG B'w7-r h$6.I>Ւ5eJdC@$x6HN ^mu= "%@aw ?j)qn&Zw~ƜHKqQh!KN<7:Yԫf_il[ @e}`%>z'i0 р h~& nWL* &1ʺ^eQ2S>ӡ$)f*ށu.#s&,qYU:)aS'U~Lͦ-òf8K8m 㡱| _=@ImH椅VEa2 LR-]1H<2bBAi/vߛ k"L$j d*ub $/2(MSm'KGm4b7˼E)#88B,RDbo^kJܯCnH UgWVFyrNYOW.ٱ*v z1YOƋA>ɢ:μ?Ⱦ&5Bh~fJ˩K^/ȹ'\DM8D`WS`4t:YQO 7 ֶu 3w~>@N]Tbo_ WlΡ-\RDǿ3t_㷏 RHNa볹fM 1!?ݳE^wis~Y _ٚݞT^,)8N9~2P[h#Uqt%H< 5+=#npuq^xV9fMzVI |sv\,"Wt㘮 Α&OM/!:Lk_~@)$ZxM4y3#" qSg8km&Nҗ.g4xvץIarFnU^Ϡm(Ց<̆(qi &\~KHgY+RY=ߨ&} C4sR }!wb۬naVz':K+B3@Kk"X3_m Xf&Bi! &P/`!]=:?$2د ~Q[4]p t=fzzxll}r|J>=P؉U~&2=\U|{yR:g{L|kHulٍ9th(,ƣgҚS-m Ohd@Y-\oa u)5:>\Y *PoV?=ѵc.jrhl J,hmfM'TՒ\:|Z-ISB[Wa\/nl4 x"=rMJԉB!%&ȝF7[.iT%EVDq^Qc*9s.8mђM PBdDWJP,Hsjф%' Rm/XfU}ʋGlHPK` \f~c . :QidQF >CG>ľZMnvԦ5~u^ia;9 t&e9Dˀ5OAPMUǵ۞V}'2gYƜeR$vnǸROTVۿ#U}Ơvs!ǮZD4uF=G㻴4D:M0NۢEcDK ӣI3l az@p)O!Zh3v>+kr,A Jk&ES!Bzwa:G2$;kr:.%V|MA`f΀zn8hl*8xL@⥒ ]So@Z&Y;T߷ }v9oG?Šs߸kI5P M18(dҐv!r>{hx1҇%OÌX6Һ\ Vb:T3Me_4AMt7#Z 8jq;K<F Ŝ:[rT92[k C_sR)[ӭ%#&GvM4vlZ4yy9Ų> `` ;;EjbTj { qmWfC`$N 1ʨM.Hi1'i",u:pb9f q-VcX Lxs3 ѿeƎ$oNC ZzjT2׷W&b^Б)lqoNXצ^( ĸo*|1O 2bCua[ ̚VʼnuZ)2iAfgLwdI )!DHAc jpq&pai񄑥0O!)×'{/,j_pBB l4po c,MnH 'Żt_f.y0/ IP+)+K+3}VWf HL л׿PeEf?4^u5Sx\%x8ژsX {y>"WU'f`|!9WUc +.F4O:_DL{%|)ܟƽ, 68<%V|FY٤ձ<+%a~ē˘{hZQ0)6#{)jCNv˛^|WVcέqRGKڃL<TToQȁ# Gc 9e_ QFa-Kf+0@?9ՙ/>! ȟI~dh߭ .0` |R~ BSA_'č-E>.ϓ` S F-X!TsdfqPjJx_o IK}k B>\D=CGWBq#.Uw BȖue]hJMf(€gp~6c7!7db7"\-7ޣMR@xwϹa5wb{-")qPW =W۳R.Y$4?} vf#pLS,(!8R!L҇WL/@?#[9> +iyo@AQi.2bԍ⺲@<#b(]FEN3wL 1[/cL3p):Mw?u֮p%`L%Rh"󴙨3{2%$W]}o (L #Þyt"OMzOƿInuz<ܑ۰ ayhP}?& қ휹U6+r;q֥MB{?HN Նc5F_4~:[ k 1fx`͍dN$L Y/Et6j~;s+ORRQX"p>J; 3@ɋWY.&,080],8/헣=Ek~;B!AY]58Nnwu-$Bwt!nɟLj#!նgX4 *L,qu9Y7ڐhuLu7/x10}(GOֲY}Wʽy^_98AAn"u8;-"%gu^8=gB_/9RK9B8<'?Д:KqZC­G`ֵ Ιǵ;^6p*E(%K'!=|1P${ܵtbd[Ksa$+#"9:JYLY<y /;u(.ҎVp׾|HО~!fl=$i2a@2xd;;*%  RU&1  rCE}oH( :D )]H$&^V{ҒED}rr^,/K@(6eIzάp?}be3УdWgKfѕ_ʍTjN"ëׄ8-asAZ ˂C/%p9KHb\ RAJC3"t }F)29Oc:pTS $2Ǒ\q`~`@ش(+M /[j;ĆvY CfI 'Rxi}p9% ZuΉd)(I-$AАDze퐪Ϲ#\#MmC">TgX֡Cd@d9W\cXZᭆ~^IﲾT6OÈao⥏}MxZU-FsT!v^ܶ0(hK\!vQ1}rHNQ|xRzKWV!xO9"ɟj04l%+Fz2j)3O}x(boh"r;%<5NOw o^}'.9Y0bm2mv'EFmTLԧ܎玸WyǑ4j @}IsÉNfLznɼN$&u$c(,b>3P"&:hurnb_ MF!cKmaW/2Ň,5Txtp5iTl׬!=CXR eNdEw!BFlt8w}c5 H8>|kkvB?VGu[kk|H;ݣ7k ң_Mې N)ZcNRofΟfk+l]u(zi endstream endobj 157 0 obj << /Length1 1883 /Length2 12077 /Length3 0 /Length 13246 /Filter /FlateDecode >> stream xڍPڶBp'k 4@$,hp Np.-}UUckTbr9AX98*R\n6.TZZm?vTZ]W4t{^U Ew'7O_!@'@ WTZi'go(>,,$AP%Pق_:ZZN``ussbgd:9AmY`7[&Y t=*-@C^ `K%b^j _>'G!0d3  2 #/R$5  vvses;1#e^Yb%O Y7ߗkqY!Va̮dy1cx988 ҖΠ?_fuvrX[^=@7;ߎE+dS ?0x'㏟~2yQ?,#)%err89̓\@//kao-U ݘgOTe$ᯀ?wċr^@e 7TJ99X_e$!6=F dvK.uX40 irrpvYڿ</zYm) tc˸x@("%.^^/:ZT1 x`EFxBv ]. A\v]HR`_X;_ /!6/l_/B_?_!/O/Tq;k4.bI^/? n_BAӿ^q|!z  }y^\@^ K'K0갖JIRO֭I-TFVyh&R2cEf J2 {qCRbw))I,Qs|un}ɚ(d~.~p튴.깸7=^5?J~ ElilW)=LNX|&Btc%Gf9š}PLdF?5\势Y*r !6$"MQ$-*xS`Mn:] SE>/R286H,Xkc<⽥:To6"‰GNKY8UW_SsI"m. ɅFjNj'~[UHY}W(; kUmD-#s^R^G4y"lH6JUXT5K z(uwC+:T'ϊ ԇZu5=U6| cm֮טSgfخ*߯K⽭f-V_w ;R_KTu0*OLɌ4uOŘ}o@Bd-at'jm'aϖЫ~f ̙a^M%yDJHCpD Eͥ*Jdyx-ߚ5(Mz'4[o K Qï;0ܘiQ)؃`hAgIY` Iu~MaLhOn Vy@^X>( c_ئi]o)T&H6tN'1VQXn.|D{ n`M&@̿8s1yNd6BXƴѱSG3QWƐ"'@:dFm?6,A5t!D$&]9BmܕĽ?lw$! +WSB'9Fi4YVBc`K4G{M!^hs<͚}wemO8*(EѸւfb{r_SqSyZ|@tn_ (7#Vg9^0E>,yt*MKc:xv3' *⇘ᷣeauuCXDjGxzQ_i[Q[3} 7H $;V3"<%ͼO3+e(^km<7eGԔɚ74pF<=>z1YQf uII% 3#lg9trZJ~HxR1,M4~M.I(1jp7gUl؋fA>wnh62%:}aq{WύAjbYNqEI:`OQG(qgU2F'][>O_I,Kr!,u w~~2_qR&iAn 4{aB|B8 pLuT([na1|ﰄk4">;{.WݕZFD-̖QCExOд}\ kݕSTJڨ(M]4mF, xb|e{3b #ΙNVI&EIkpVL,v!l^|NƨBuJ-cRZK܉F\g|1/od2ƼQh-#ҦV)Ym1LDE.ӊR2Nh{`{Q!y)O/0LLX ho0y> ’Plޯ])SB]p 0r9PyNZ kSF*HZnP-fȐp2{c]j'ĸ0t&{>]Q 4LƬg`1>,ְC}sll}BO=֏5D GuuI56,}B{Huث3٬ AAr7m*j 2MwH(m/uU]W&ϰhBlc~ѐͲemG;V@hV6׈v0?bCq3yoY%R5C]]Vz-*QWy^в Ɵ,Ty=W)eN;AW!)dRdmg>I]xrY3 t'4+ B$ϗU:vHh^=/Dɯy|A7ZR9kǺ \=$bpmT_vѻ*~ormm&T`殝 <&3mOF~jl>6k8]ݓTNrF9lA$9NYQJ2'[Q{yZjɚ9VG2:qt:87Ndž)5i3hu#Zr{>aEn{Ҋ|{"5q4TqX<'WYd뚄qOZRE*9qXY$IyMJ?0t -"=-Y$ZOWe= _2iVV5]r[7t|H[{&M`C?;Kƾ<8 n%r*| n$5k;Gu3OT͘yO3r'JIC}w;tmgXR@= Ȧv 0܎/ Gm S{coEZ' 1ZH޴\; vwY`?NKaX!7< KzP;+}N 8 ֍0#Efq+:Xw&qA>)8ԈzFJQgu>;Ie$AL0|˵-J.ϮP͚ݓ#2C"CC6EYusrwCOʋQ^qM v_} SoV}WsКߔp<͏Y ͱyNU&MYR1 G hO<*FF|6cGR\"i 9aK#䣪Mfgc:؄do'Ɣ $9އ *0u/{t>S!"1F©NU窋i8g<9mש^ۮٺ<;=0b6|!ץqMx²bV@wD+Ҕ'Ё/5JQ ',{NԤeBU Ew;lW"}]ķh-r7h-5k# -Lg9xx(Uʹ?s֣S1!+]ކ1?pQ &Xj!}+T$;9{-"k܈*܋#ZڥoH\2[`/}!Jm #90bcWIo۾NOUǹ+Aalr#/A*{<'u;M!SWϲ~ wx4ax`.U1pAf-@A`ØroL-5M/N; $j ~xXI=9i 8jL5!,&PVs@h?v)m'ȞK>BdH*ʯW`=ݡCw*s;_E,a,EҜ O:xLxڈahk¬݋>u0\.ʖn'd*Qqf,O>:OFwxndc4,{qQ7 -򼽭̟(;^L{EWXt{aK|O])~2ϗ~lڦI-)fNf e#h)}υ&aeHj%7aAʬzqbd>}:(9)*XLX5{b㣷u3ռ=c߾T>+:F}YmNnF1.#"ة(,HM Tڃ!Һ*-RhLolYP(+7i:TQwdԬ酌 kbW>Oc]mn&U35?mվkBƒaVi S ae=-/-~bH?ǡ),V|NqSs$]0FֵAF1Hp%/ُh)k16Uj$ ]eub$&: i, h$}WB"^h|L |#Z831x0RΔ)ҹr^Ou,0vLWv1_FTUF>@8 ahfE}Fm| 6mAZ'hєDRݽԧc[#'ψhN}eh:O%AL:]& |*]Vd|մoWY7 Νod\ʫ9,! (WSpOB y #)8kleaB Ƅ<-[ B)º~]mt;&&0AQ>a6廞򣍲V+PaUY0;d>Ģ*ܺ}Q>ff]#v>w 9ZElX~ }+FJ o*<,Cp~Cn&A<b^?/}B~[ v*r4Zng?Lu%Kv[&&}x3 z6&t0A{yY:#om9&O40;)N )z\ h$U '|2 φLcCr(hiSTb#Ӊ>1h[7(پ 38<~+g=Ҭ:F x* t3̗݌8۹zވ7/{Sy_fwo=һF7gڎhTd$NG-'@1̨I G1:|oʱ=۔*FJr^{cDEHpNd7 YZ0 r}@-(3u3l: N{:6yqjpqE03^fH*k@~k(Ʉlz@JE8,SY2𶂈Ԫ:d%RIzCN,I;qmUtlާ#_C`gA (,״ffj+X+cHA"/";K;L]Â~^B3k"]}4\ʷ 3\*^ld8- R;p ϭ[~g8x<Ჽal,*K+n:-yBr -[n lThJdW !p1-68W+8&zq-׍{b-1EC{#Apn$h?v{C-&&s+>Rux&)K0^`ouJPsK "bωMT^ }3^BF|-mugVNS]DpC3p5W/ y (}5P!CY)ƱBOoōL'X;>1so]룘0;g)WN׊Jx$ORbkr~9zy٠3}hNr|i[S+,/sIkJWCfVWto<k‰B7 nr0}-m#KuN+*]4ꠒĻSCB_|RIr5I4HCz̈H+ fϕȝ(i낍 LNx>cGTQD '2@:W 㡑n۴Kle"M.CM<-}n[J;Џ_{.p:NJ4) :3 !dM >xL( LLJw5^aF'+n`߽c6y> a׷D,QU^Er6F?+\78CfQm(C45DgS\#LV*=Qm ϫN5D\ aqvY_!_H/:\xJ%zV݃w`57W:~?V[!>PxoWSAnfHқ5ߨniEV._u<2Y= ^He;8 mY?rUՆvh{5)4앍&nLrke9873[^0(`%/ͱS.Ĝb/:K>gzSNP.684Hk ͋h⻹Ch{}] C|WsR` k^rLYU̎F|L@]QJj: ^UwZV/v1Lj0Ei.Î*߽j&c5ؒ z{ۙ?{Uˋ%"P zr.\2 m̏(uz[ ݖ G@Aph=#T15VzdVBnѐNa6 *ʑ~nA 1B_U "Rfvrֱx.;`j^DJ_µ8lybD$7!zf{~];Oiqoxn ,{f#koY&[ck*JiDgdj*ؗOѵ˪z^f|` y75P/Xk_U/ȉ p(>=u:0fHoRl*lΒA3{`:(pNII {{&OJ0[; Jݺ(|U=W R.ŐyE2'*3q [d>|EXč,SWx&*r\"uDD QKLL>fxҝ?5$ͨGPegJXkf }q['[1I/]갢mهo6D÷" Q.cklf?dCF;.E )̼l s@Td&7n(i94;*o"BƉZXi*w'Q,~΅߆WxAAMZ4 uפ+:" 6S;$0"+Ivq~OԊ :a9sgE᷏\5eRe6>ߝޕ,q ."%ԧmT3yIY]KdD4^#ds L}njs.iVz OLaYL&}6U AP|ꚽE1VokN%q6o1 &˷ym%^d7o?)|v. o_ D}̐1&8I-7fətX!HΙ4g !-/ixbi A6R m/{+w[uLͣMNXm-}?9},"}փYoAXfئ=\$HM#\0zp.]ޙ ůܟ-$Ly͆H:_Gd'XO`)jiF-˯ؘ&vk4ˠ-o^W+{ /آW"|88()`]VqeXf iVqYo$=msAeC )o$NQ7>#,9|l[Q9?~?{Y<\yc]s ?eܮW{âIX%w ; G#Lz`GO4W3l2jۃ ږxឹta@(ECR9 z`uz]Eѥ 4n"f.}E'beN!xCwh#/=FǡWRV;ҚE%H#X?Zx5-V6)KX](UV7bf.K"ߘ- i%JOseKFrutT {ؠt\SPm٦&?K2d3(>LɚELXP snR[%$R+kDSԊ_`q X:`~iP•}ǽ\ m坿aTIM8HbdQSɣyew@p:RF";wшG`&hnQs!2d۸]gɅ3(AO > Ћ߲(.VD$Ia^ , {n}rR-UQ'1j@(3)IGtC@+k!W6XuIp.apvHE~ 6f܈fF`iFF`JOdWyu/2QoP-jZxPzR*Y!,Hx$z[yB~QzɅ2"~I~|DeQip&5dCn`N;<`ӂ`+w!P},Sn9>6Ǎ*sQ}?}ntRCO]+m魺oqmuяN[+[_YVw7A3k'!m =:ßGINьX/=f+r]nHw>=o"7iv?B B+K~npF ɟ%tBXj@™%&2# Kwt9J;1}x1odӷ;B(>")3AF3iS,=?"!{ HkuhI): 0@J*'e΃{x;dǘ5e$wh:Zp jp噅l6/{3{PTn@ ` endstream endobj 159 0 obj << /Length1 1543 /Length2 8522 /Length3 0 /Length 9554 /Filter /FlateDecode >> stream xڍTk6tH3( -Hw70C H ]ҝJw %oZ3ϵ{zY[G nQP< ^@NCC@ ?>+>[jqFB0Y9C;<ugT_9@HD O6;.P+/@ Ywg-.ߏvK?2g%l!w-=%rOmQ(+/ wBQ]n v/>+@SF!;C޹YAwz*-܀/߁?pG:/ ì~; 3t0@QFP ɋ:w1+ >y3n|= YCaVֿ۰z3A^AT#B@QQ! q@C -GڀxC!w?H r~>Z( wbnc@Lf9c*sp7'?_E"wG ~{7kvm࿃i t ?\h;[+?`G_w}[ 2j@W߭ < A^r(R҆,m͟r AH+ ݖY]#;n#VAƐ:`p/$ ;; ^u` w}"@oџS A> 'wG ~_P/x__Zrv,{qXL-jZΫd\yGb?{U7[HU+Q)kהu:\>\ez.xäid3u{zo2I;$YFB^4mp<)+^=V5_](ET[4:թXE5Z S?Zq_0 M|Pi~k%R7p?U7?YPd 8re%Pޱ-]H:<];1efO5O` C|Mq f¨?CvBWUg'ǖ~RKV1έ'qSkkuz$5.tmok>o쁓FƢ)-ߞ4># rT}ޏC&稭Mm֩2ITm޻}r˧^n˦|rGݥi)x=, )ajO czZfvէ>x K_[&>V}qv^tɦd-_UAG@T#'gWU@ kZ$<sQDo(u#Zd9&&w [$pyO9=J f2n3c)Koxf!. Z]FZp BǺo\>,y"}w.#y"qj݋ "bvE[cj ˼65Gs}Kc6]Ϛgh"6)4z9lX7eE۟C`@A3Ym ,oW}#}Y{$?>"W饡mnʵL vou|֨oSHD<}Cge(3}c'^t|dmJ}gDy:`> uPe)[dYDȶucӟvj&XNX͎YLISg?vY9GAK 8i:b]>с?,`އEI)?ٵ_{o' |xM TW g9ME|PK֗c UAz?MGKzٲDAm)9ɗ]VY,_\yC=iK%(FU4D؊c z>rI.* N_'(b?>!{Xfa,/}D'=EǡK<ߙG lTzpoo 1zϕ'fJۗj3^&_R|"[AUCap{2b1j=3*aBPࡽl\HߍÞk³+#0\[)}yXYP2ѵ@o5diҜ O` ^/~L3)c/.>WCd $-~-Ŧh+"@j:-l7"lZ ha}(p,6Ҟ/RxŇ/_y՟(!,xj؎JRaҾƷYozL[XӴUFå2l=bw (-= (4e4M]۠h׬`KMPb]LgE6_|jnNK x5oj9Y/,+=vU%赦`=j4|ZGx4@g66lRK!{g:9Z7"TBBщ%NsQkK~|)WqhV2Y:# mp LzŰᰆڭYxqV:I*,uMIؿRS9:mRSXOb`g = W'_ yy&CZmJTxvOl״}"`~}9j~x΃LM٪*rTO3olv# - Mej#EѤTXEo`)ܲtxiv׽ t> ~y7ð٩9ȒcHHK^* #b'ěE~IX$K,%|a1dNS lJt#W@S~qۦxj*B5C?+~`?/s-(1s=ӪMSZ5X\Qb4~RohVUTϢ8@ gOCfl|NڋhVKa:GuO׵#Xv}[ݨ@.h"Od2$ w'ryS]wIJYng1_~y< b=}WI׶ګ3ٲ*^H5Z7k}̋ \HE$NQ9cBVjQwdugK}lG}LnYѐDRۯiP<3Q]7[̷ UַjOh,$Н UP35f4PlJ\/Π,Yٜz6Le?-ZXXXtJCx>[C{CF+"ѺbD~N!?V "@mߧ*bXY‘VB2.SZ ~$%CN{ Oza}yWEE^߬w3O:Lwp*g7oOՄem&k~H#@Ig/#S)k2IJ)\lѥr:OGM!vV+sn (>!(7p"D 0aqXf\(J:oMjJEi#2 bmcT Ye*'[??&^xd|L2=-Xrj&uR taggIe_zp+`aSzs'H?J7KKlW4OVVeSEt=( WU}, sa>,tXň #4gEbzV=,.Fq 4Lׯ=DIElA5,'kWq?5|CP>x][T0ԃEǪɃ!^'۴9FIQxIZYn5MXD_ 6z%nI+ǵxS3!XvI-fYjݱNX 7NvvUT*P*UY^cZ:{{َ:n4![F;K#1 TϹT;Y#oFf)'!lsHX 2ˋrPW4'_JwYs^OME4 "*E[8y1]2COOjMuuj\Ғ^YxDM fһ>_A1],.$YEwM0SwyM&iGv̹[A/[p/'ފs=#; n'%zb|@]y5\ߗ}~ va1XIj$K,t =%&y\ 6,.ƞЅQƜwJPKӔ Z|=gㇲ uT߬;8˘ECX$ڟ▯ ka&)AiDh{2',8hqPm}Qwy95aq/j~r #Ţ Q1 BӒ[l `>v%Wm34Gb>;0?#F&7ِwG vPy#\?e{JOjH8ƙjڤ<뻬&:ⴢQ7M$33#ӈ:UrAOZ3`[99cftB׵ST\Gk[#NK&7N$ R'ڕq9oO\@5yW/@Aj˘d5Q_vTxq ʃɲQw+ǬOզecr{!:}p/Wpj֭aNSՆiGn'3Yz&既3v^O=%*z*4mWs6,I76lNhL]U퉆2+C:ģL#ϦLpcӡbp6eUĘ\xD=C@wr ]4$Dbw21cis*!0L=Me qZ}3J[6nM~ Bc3Ut:}\lv < {@jA%J'oVg ¬Aw}4*Bݹoם,|tiWnV~fh8HNwP&]uGV,CCEkx*tRz L9Q2HvG9'_FIX${^BYT $5 @n={DnxI7<𿐘<&VY5Ǝ5J~M$%uuD҅;A(IȭiΚ5,`ǖH} R8>@ʹXcvNx+ G9d_)ֈp|޾c#dǜWgYg5cy18hɾEy^_JE?(^Zqع!V)Tzk&0|HS]hokp0:7ǔ:b r?akY }'ݹ⮥a'OsMͱ!lС~2`ip ~8GaOl [=Wt! ܎'"\W7?"Ԓ?/ ݳ$޲v.9'd2\{ BLtHC8L9e3+㻋u/ hi+#+0Ƅ{N] /sؙ蓳&v=vGDُ>rknCF5)mj2aɤۮrL3<#tֽ=n6}lU5+䊳:BB:PIiM/H[cIE[z '1[V)1.d*3܄iűvBf킽(tfT^Na"MxABtZ^+UV^eLm7Q8C ; $0:5iHPGA[R[{3`Wx|AҼo} G?! <'$F~ wGOaDYB;2ta<cy if,zfeJ370sEc])V2 ![;aN '256\<-|QscQ,{3ަB\XTRs`.W!FWEsDG6NlyIҗ^/5ejUϞukߚP0ΓY6o$D{5w l|> stream xڌPX.C ep n ݂&h M{E0_קJRe5sSȕ `ffcdffETv#F:X;x0se&`;@`eef233/@u])-\i+-/w=P0qڃ3̬^ Aohbl)ajP݁_MWƈH Pv[`a vf@ dtdJ@9 #˿ d $]=]& _&v.`wk;S_M"*pblbmD_a,2s\]v݋ڂ<@> ků"4@Nn@L"2K+ t=ͬ~Wrd%W!n@??E,,sk3W);:X l c ߿ emWt4$_' `5d\/~ lb?|e@ق34,[c):=fQoE &$fg/Gmbomxh\ ^6 @sk7q5/cv+[Y=-5~m5bZ0[t2_.KoSJ+' d0wA`\p5䁚].JFP$,=$9?z\SV: !M7g0"ٟ5-f[oT WWg  ]r%F2́KTgg#pX~Ǚ,r!k]'ZQ;5wK(ONdlx7ЉY:rr߆vHJe#yРDQ*1hTr(SP=ݗ71$lu~ G#ɍ.PRZ&VyȎѻ9+zŧrҦ! Mt˗K6{ufKkP·w6sE}e B-VOCn'Ry,LC5W':w'w7}SX[[J'!osEO%pxYPJ8ow-$?ʃ8~xP0|ʖ W`:~fMJngW -n6gk]_E5Dp;OQk.~Քy)8;|_nF,vJe \^bMjLاb"7Yv|>_h[aApdQ>h}84FZP"|0gR6 ڑv_8rj oL\IX2eAH^$6y혽QPX}ZӃ/i񞈖| ݮGV)QK@m,̪Њ5}Yz*:vva{d tirak+Tذ55sgP+7۸扃{CC,? l InIު>uh~Z6Y#m:,Ktg®‚S=(v,l*-Ļڞ乘xu#Wg{|@;/uF,+ rWR⡐~JY$qzAɾ(T_w""# ֧ouꨗ!'xXL֯^f!ryH}bDbټ"VQQdQ=_m# ZRpǵ"]C'K#%-r/CٖB4\㐨[=LB'dڜ,Uu~@C,ڮcxۍҍtgRp%CkF's{!nжE&ccBtK}D0<ʳlRI,tPn۞B zGQN6v̳sImSA|켬uǫXx<2s0D"u!Z+z7>,FZKR'1^ZIt5"chWÉ*I=1ks|㠂P5J/-r\T 3t~c8j _F,)_!KBjH_>GuͽU#J٥-WUc8(u ޑIOzٍ4&( ZV> &Y d:bE"dʹmz?|UŠIF.dsc8j~{K*; g{S$̞ hڴV>q^Hbb$Q>8fZtψq@H!.ʩ SUT0Mo0^6gq3%7Yz{}v"2 z*>ۆBX6I>)7\=bJ)<"-מ+a/5OPiwvG HI#%߿^۸?-0 oq  ^뱎oExUMcqvKZlL%y<ČCQKUޅ 4KUPr{,Aʥ)Dǀ*ޫu=^Mϵ(>UQC"UQ_rtxk9`6OW0"1>V'xɦ d)o88aPv3FQWWt/AXeɨFWǠWEXydPrT:QȊ%f|6GcɓraU&DCNǸPw3oB 0Q6lɻ #[c# ӳK x ֊ԇD[ulTlX'[Kvc ?)ni0Ph7jқEL0_ן8 :V&<dGs1SO@VcwTzF]Y\7Nmbw; "t tRaVg'<9CBc ѥQ58L ʨJU{5g/T$1L LvT0G@ >mf RVoP㰸MX 3{d} ۔e>>*rطSƍBy;2fjT/m0@w7F`{B͚RK R/ދ:l3|s [L~l߿ I$UP[(Lϒhki<=^76".Pڌ JwE)I } 1k; .n[=6`b P GQ ܬW&u i""C h~|sh,&_1^جAUP<۬!ݫ{93r쓘'Qz&0hrhh]84cu*$7D ?3^:n/ҧ5{ۮc לּ?r:BVh*JGDNp؂n} R[}A!Һ5N 'w q7xQ(o75 Vjjǔfa !Y^ zmReɝ:`ОzLmGut+;y/G URnFRK,Eo\g7cF7#jX*@F~pt#ݰ{IPqr0>~K@e4(6sّx*?B՝^Q-UE~IRqq2-Ү^2Z5.e*C3d"%Cteͭ?s(8ku"Wp+S^NjO2 &ozIkJ``K=l258 _CZE)%./q~vDOt(,h_xW-\e!o y).;ҢoX9rg@}Hd *5?>Qُ6lMz|O$l$j6h'+MqL#J/DΚ=xpmo>ueAs8tNJ+R}ư.ԯQT!=;v5I%{"(ysݽ\_jSk.ݢYR ujS ! 'zp]& I1ȫ$UX`w9[۴>bâGI hl(j`)72H$iׂÚFPÂsE<2~3d>XglnУK!?7> ~Q0m%FW,YMsUp+xA6dfxD9h);d*aP{.!0" y,Jۊ{D>Ο! }1o\Lx_M\Ax7/W HʮgQYz9_W?B&4cc3S3TV5y a/]Snּ:Ql]U SM;ߗPmmдDnW~{+2:a'SԹWǻ0 9O\F)<ǛѧL9v|ڱ@95B=娱JR)>{/ӣg\8U':Z  q C۟(! .X)U wF/9zLAX[#q@qNyT3P69W;*G(9HWp=N~#LXᾠ)mݖHdݷx͍sLLX>(kLGDƷ?vcr?a7ՆwB =El']˿ V"-7{uh}-1"՛Z&ݨQc;7#~@y@*s]^$J(3+j6Y8DL97ħxb7Rp:'^k5Ak3n5y~e:8zo MZ&:LŤ Me4>'.a<윓}+m/ vMay]4]lc/ Oz_X?/ǫL E{ jA #Vԍc;^ ϩR?D6;F tW 4Nvi12^ E_ƆHkHm ]kЇNS`eU)`jyuFzۆNt =|7ST$k^ծX6I +<T.V|3._qL-C6_Qa^mߓ˰b_qe9}'ٵ~Uy1=.*$9ݣZ6ܛwObGLF.c4f{lA1̘r zKcMn"VEq\/R^Ʈq܊ /.MHP23W>tڢ;Ȫ.t_Nf6EG]:x֍C7Z(ŋT:#?<"yzs0e8l,PPow(( F^2<.6C*]6UW:QH6IRNǮI Q򔷟Y&y7ŰgN+|[Z×9_& ':'XħXpd$OuRھ \B/X3%`bSO,"KA3)ZϷgQ5hN{?4s}Cd)BϨ(z BٸNĸ q' ,EY2t^ gs|, @)]Q2b~])xz1T+kl(;EY>l}5 ݙ~ 59o{e'f?..PVƅ80r'@}h~]B 7o_- %w}]iw?}SY{Na|l{_j5K Uj? H203zGs-v| ((HٗQ!(f 9,ICc)u = ›;"(`y2]l=YSeS`v6'%Bַ.cC&]gKpB8bY@枆_m&Ch?@>ϯŇ8>0^b IMu֋0D{>nV-+҆~QjUlCGAZfpK&>D{4ؗDžh~8Q r}U?pN)my.H@u6)mVΦO׏[x0)T* }DnkȘGUf=S;χz$5Ag3ͫ`ez~ 5䉁H"s٪`͈|]q 0eFcJӎmTؕ$U.UmkJ {V _P U-Q@UEA {n\o !d}kƲ͠kGl$<%}Yu ٯO$V@jO}@AL{ BY7Kn,}Y3"ezg+}I{{cx_dd]R. rpM4oaDld $䶦TڍzO"ZO$ N-Mp\a c%Y,3K$2ToݰBT(9OUP`  ÃH-cKou0MLJ*Ŧof;h.hp+Kh0g "U"gIWl+!zd9'}_Za MDfk:;6a;`" km6Wŝ)\6R.P*j4Y{ >Jչ&OefRu Nxɜ|x:΂%C:TcU%"0O=$~XoP\_btTWcAhX -eUD ^*TG=ڏRl6Vno!pCqdV浫%Pן3,MWdq̉R-%>Q~s ȤcWBGW/:fU9T8%BU_C{pֱl+b;AʖfZ'$y4.øM\sHE 4H|\M_=yD/D?&s*xa_zW6#v‘6 +QQ1p˻;2m!5Ru/e=eBb^n{ukCL`qIE; |=lv-RMu tK9<:3whn%CGKwkn%f;&mg*?5$SCKx `k}dl8SF6$.c)q\yOC/S+y(8&XOߦB:ot;G;¥%1͒`ɺʬUh g$B55MDEaQ:Q~ɘ)0_ux/UIg}rh.;Ǭb$h8--T\N$ ի&08!W|Xi3&ZFz7oHq;#"q:'Ca;36Xo*/HG  #?\E,9_ S\e5:tnL-vx}kiMXƩΒHJMDiis\Ul_$DT&|˕\%aQƇ޺C p6&];Z2hWk#4990WrF0ez7e0kXIn.C"`MjC^V7Sqӫ*T#EVB>*Bɥ )bFEjYHw}k=yՖICAoaO`.É}WemC~J΁QcHlm7ɼn-<=%Fz(O~u_{Iז}fX4FPB/smhrCf[OBH(kӅјA!! Q^ ĘNyӢw}Q~yb_x mrt{R>a3YExj6C[ Cc<fM:EK*D@"6>@( ∌L)j <͝ih&4^h- {aiܗwAJq5?\2:/9*IpM 1/@ IFϓ=pC=F9WܦYBK)E:m1dr þh$BEu"{ɤ>V~?=aK8^xć$ªq6FX!8 (rSP&a/$AV55wTorT0Z{J .q..&Ѻ6c|ޙdq)ʪSӄ#LHWc LO, $NԗE=: 0k%!T征PCaod6C'2I#`6eH n:h&g)F$NdӖ($ތIuWgo,D7r!Pi4q%!Y ;eu"UW|?{SJDvՄEƚ.@ivT%#a-(zbX(V_}!<դ*lR!{gSH$&bbqҽ WI%'El Vt fl8SSP4Hx}ϑ#bľMrs駮2fѱi^FZ|F%j;f.]N I<4(VH+F_sXe!C4/(wyQeqv y3Kn"CK#u|wz& (c8ܒU60C邉[kˍ]sobE|~tݽ,E3;*#EkGKa/ [vQEicQ;Edq)"!ls8sv](!..Yc[|Kt3k%3坠QT!TpǑȽeS$o+ջɹ;rKUhi"BQ/LyPyKrmí /0ө/?c]"i'1@ fc:NuѶ^Km=}fr Cl"/5=oS9iSݙJ;X ˱%N g**ܸay>4ּ'{}ƼdmSsR|Mx#_ag;IoGcőKf8it|v 3fwL:O?߻ڮӖF8Ӧfk~$1r‘zC 4uAP!5teFU`QU)8U1o]5(W(rmx`:E=X3qj}lR`yIijwf`>*(3W^R).ad^>TQ9G7ySe#ܖǺuZ5 o 5 Aa)L U;$s`)ٲH@_t8!+20i[6py԰[%X]ڞֿ b˫QZ"Ao?Գfg1vLs`Yq;OǬQjώ ݞS`t0 ɐs}lgd^p$fCV-wkfk<$.g[?,~E4Q3b".\+8ƛY}Jw)Qh=6ӵ3 OS_`_ܻVu*=e5FkڈZcg] J/.{T}۾Mr`P(O7?IԬJz RUH{z-ڶdi}#-E:Jr8+ pDaSe +}#n~a F/*m$oËM|BrSbT CwJbb=?ҁ+IHPZd4NSVoJ\v:Pܵ!x=Ε<߲9+30EZr+ffRXZ5O/Q_ye;brpzʜG:StEkLFCiBCB5g5g>0wG7yixfN/E&:x+8^X:j9L܂ҩx*^I4ַ ~+3Vw^Mc MͥgN[ᵊh˚dW-gHPB:-cľR40n.pēL^q7[NI4KW{qXUI=N>,ǢE"׏fO=lj~}`]|(C.x{8c ƆlY8#%}g* Q=k"YM١l{})V}(5rC;J1xh p/z`?YBmऑ.j? aSNnk7/R)k)l038 8[|tPѢb:M"__}jI+ zbQ E8OzduRW\:)K1(U }{\c pTx3- ~˦Emf4GB MZL%7fT>N؝.#+EH?S(d=RZ15kCM5JY@׭y/1s-p\` CuWLG< *1K.;/CrixkZ3x=M +aWV _5^0%]j{7 D[GaRBu s+l82) SE'U,l%Վwp$\p#7` ۍHξAv5b.@8fA_ > Ds =qxjs{ʇ&nNl[9q* ab?%bǙ@^0M*@Av?AUp.ټijr-( TzZ*W#W8-*m)t* MoHcus+a$6ދBnP֙] /AGLE:lu>9F܈ it1@g5tW"l Ҝ7 sԊc1WKO:FlU%O/'5|=Z[a+ p6/ Fh?kSti'Y~%%P'3{?i02zx)g~5vszv À("&$.-C2J쮐aD@6-SoG/`OfiWP/]iwc:O^Cݡ-Xy;bnuAIYAT#Z芡64m 2/vQ)PQ67ȓ~@, 2gl9nee)y8v=F֎}=Gq'\6E6,4kTubwr4f4tM^Ƈ mόLg|qb" ˩ \GUoцnq5eV ݸebLImܕDژ\W! \G:OJ#g)qUˠۮ=UT|g۸lޠГ4EV7Op}<,,Iߡ펴lvUiNۧd7AGg4d[ᓐm;AA([K$p~VH/^h56s`EYI.*U6z#ێae†ϊa_Gity=yH?~tT8HW0!{|$npPթ2;֮w;N.}Oc=[*s+ +j (4Wsp*+Q #o.59S]){%*[3Ut2941P'y+ak,28KW+=\2qOJPBg xH8m%.~k\ 8 'pSRX$Κ8^pD(@RI&Eـ_9i@uC4əJ'lhԁ?Ϸ#=Xŝo(d&;ˠ~QnZ{m7W]{,L`'Bvp(`b{;d#>Y609'Dk(L\' %3lVȠ*Y| tǮ~Is> +;(D (8Yj3c]T؅ܬx}38c$$9pzkvs[찝PqpE4&VEgJ P=܆Ykfqh\;-D]ge^#:9$wm,3_ [B#mxiEt=8tJp:c;SD7cofYbC;.}x;۟qim4bB%M|C-9lP;(u.˾dY5gӑ67Ẇ煰xt8$/']_RC2P8(0,m(!+.y6@3+w ךlέ9c;=N NIŵ6LdyMn%r>$ֹ^;9ЯOn9"Afwb*)#!3(T`*.MW$ y8xʹ7inL~p*5A>Bf,/:?+T@W5n VkdqNe-1_2ѩWٯAywP: =q-W0#4hʰg=Ty#7Zk?ui`~u[a*o4#5`rY]VV. #248RqTnRCO:`S jd b kI0Wn@V^x36/l 9f廥8_=4\cGHN2\I[eVH": 0̨ҍ*4%&rfcGVT* $4m-^@qrD ?Xj)>or;t&dbFJJ&&f P81uw F:F!L V`}{ۻ*'^z+׹'r0 CZq=:lPiZPA MzH]r}\ǶQKf7W{rܗjb>ۄO2=?ɦWE2԰wܦ &ί 2Hu>ZAEGlx8m$A™Csg㯾o <~+"_,OP4ӝ!tL0ah319|I!Mv@yrbOgdXvbȩ0MW ܾ,"ZIx/Mpd9o71?ܛPv \MMC%*قgc|Zfǧ :N2*/tj_'G"b=FXM3TƢ}I]Ⱑfp33d+Tn0k*pĊqɼcٴJX~d 퉭B[rOrR _Ih&2pl/E\^s͉1mf>fϦC&$eIu#?m{cHf N9\JXƂHl~nEkǘrA?M 25V7צ/YS,OM4/-- 8NEn3S@+wF"Jz x!`JM=h+e;A//NRj~?LL(? */W?#$ޗeZھvYԷv$Ϥ{DM @0w1K 7nV0au.$&<ͱ`M.B9āw쒔q1SWdvثP@z0[[FkNJFw縸C{<5xIah6[JGo0iǯhMİ ,lHq>h[$ S~[P"7 KjBYD{Ms)oE>VIFE8u)[dR+>◨'P6>䷈[Hp`0XH2& ̀DՏi)cGe㷎գ`bCBٲY ?1!6Py"-hy4dhj ;[50av5;勾\yjݝJ^XnsDH#qhIzyDo^E^l=۠WrDy;'`\~/<wD8(I(CЇLU) )G?[0oݖ`e ]]e"L/I k3Rx%*rDXpS;IؠT\v= 1Rds2*7 p>,CI+ڤtJsS/{/gqxaj v Eߎ@PV,<5XD"BZ<p L|юn:~@T>$ei= jܤ}jJɁ+ xXq4]:_(Py>ޝkԮcfЉLes- sxe:ùP;II4P^*xӲm4{vbG457*Bi`T3ƈX~Ar kiD sg Ui2R`cPKGYSNZ`5Ф(7k鸿ϻ:.bnIsm6 ' 2kmG_JoCfbRuĻj̣"5\OW`as79)+k'A锜a7a鷇 SeEQ1V<{a@cO gWI,hywPΙݢ<MeLH{ (;)DIIHʆtmM*:j54@Xk-F*lѯ*.81!zNYzN B;_wϏ_ʢ% ;J.TXvFB4tmPԟE@]G9,tމ>e[NՒ:Hd{ѳ?֭,!x:pXᚓ}h~_u:w_e>7_6dJ::my?ej9BtF/{瞞&"p$*y0CVY /+k0ր$H͢:%|xTy@@wtE\,PC@WƆPX A f(Ck.|^* 3M]\ҏ@YP~~[j[ nL!\G /%4nD_*LpL푮)'*W.]<**Y;Ƈ8":ᣂ_.;w3Ȭ\Fс)H>Dr=ln%#.6P27'HrRT.:+xI~qN-tokX0*vGpu-x-_L`@Cf>y,%M/I@la&Ҡ)\V7bsf=5#wn*ɖX=7ϛy91Ý2 HD8)0'Jt2-e \WAAFN.~CCȞ&Կځ}xkŒG-Zx Dfue!DCD?AyNQ.I_ð O?u…sZIq'J|VMfs3?0-|y] RJ= ]:)&\ sfo*=_nhMFDh,}[D)zߜQ-/ߒ{6g5b4 " }wIW] tR{1y<:& mbxQlϴ9P^Īd}; UEՆ>8v)yDFv k/K ͦ] exY>]Aic$D65ϏN# &KlgZoQbz)`ago~ ?H82)tA]ݨbL~1;Gb>8V `VIә1xmJ} d']pj$=M֡ $kb-$WF{)ծ7^3x]L^%_na٭Ec)~Mb&rFI;k+XiPyY  Ѳ ( BǶ'-aum'bj%Ȣ (Hf$87"< %Z%U,&h*YOĢA)ުuRl9/OA8W o>9M Pp,}sU-*YTD"L1Ƙn]Jvޓr{2*H(ma~zm\$kP͠fB)q68tkIZs(N1; A8m@a`SI*:AI=@ݛc7yЖUANd8oBn\#&hΛEOgAX?u$yQ?d l!2]MHDpQ뵸\Bt87ٺx=u(wgK\+'lU+Q}zX@M%06gL | Gn%]GŎN$< h*nmLC^摱PY͈EDN*禞S;AKн ՅEp6/y~EGofqZGuVsC a10M_l'U<ʈB|s?IA>JK I:}-GPV*B{hD=l{ bOϩUK˥8+#[:ֽy9_CjxeݻhZqI> stream x[[s~ׯPoTasO H"Mdɑdן{Vk'NJF===__fVB DB{a:QlNi#6 ELR"%'9MQ ,YhMIhFh!ECPpV@X9Y%@٪a.`=hH-(0V9᠇SVx𹘅$&o8`2 vAN*Y TS0*{K[AED_LiIE$) d0!gX¹YdE & ]894 =L:xN͎ 0sSX50)x@9kJ *40P$P(D`EBi@(@҈aӢ DI@/s(98=m1=-v |1["b"f8XcB"#'b:0Et"&t2#r`"<QRԳ ҚBI#l!)5ivkD6Q{ZȗA=鲞.!^ z1; qSN{~h 09=Lyo)sLF׼lZE(e(5co#yu|،⻓C/ɨӨӮ5#ҝžGl _|X%@h NG'clF@>ffz@Ťc'HJ3/^>ݥc"uVZm3?]4~ #iQ3=Y !'e=xPF̫F'ZhRe3gSa^SΕK,Ngq/X[I&LѤ:YlZ"G[Wzo%#t C{H̶K"SEU ھ>w}O8DuFG]}{g{.!e~Jl1c`&VnV\mt71êift(ڮ=շ6uQ[@=TWQ+SSVimlÌuU==pqq^q6T ?Y^Un+U8U?. nfllG pKsf7m{3̍=}Ouzӷc܍POR0OCH T 9jz'Vvg3˄IR+Ÿ| ^-v5!O#J2gY4]jTif1' βD %!}$#p,|A+GzFD{E6;;'(!)IADVj`X 5R[ Pd;ŀV`S BM5@bEdfS4֒|c5i"^S-#DK>NIs{KC"+3EyfV2Sni_Ē8(tO X Ȥ7J^N&-h:Vy[%>r5vFp`Q9V2kR:ϭ-X~i۪&}i[䴭O_QKTU૜Ub^rE"<t np>=R0$$~M"Farc>ZX[[ڐZkZF"=e@|Ɩ+-lc˱-Γ3b6+qgʼoyީK,QJ[[xvsoy|eޓ34&NK,eM.7};Q".Ezi W, v;IAycg9s[\3|_ʬ'pzׁ;c,w᰼X]ZtF_uL C''$e46|RPϮkltx(2|Sgƛ^Yv%ley^neTX_x>m >\ġ8`GD%N*c7ˇߌuK[JMſҲF'L8mfR|äq:n/.HvR783bQs{A͛l^$ʭλ?wiIu{qqPeՁ^ǧ;?ܭ!.n"hi鲚4ӃI- >~Mw$r_A,_|!_J~#9d6YOPc-q#lDNLΦlz%ty3ӳ ū?Hϟ^Iִ\ 쟁tM%(=7[9UT.8kd/`l-yk7q!_j}HxX>O39";׈-:tx0FK;UΙ6`ٴ'D֧IuDF)&;ȟ9QE^Y _>kk 3. -JK 8jjg#> stream xڍP\.%h[%h 4Ґ-w ' !ᑙ3sWWF]M,uad T49@ 7;ȅNG q%FÜ!PH '4N Ptpr88@(CG@`gt:)G'O)>LNAA~?`\lO-@-G B0ظ8 qpa֢Lw @ -vƎNж8)rrq'= u~pZa-ew@pYX8:85 b*x@P߆ {g'b22r@VBzj-`'gvg9~y:e>i ltN XAVtuЁB޸2y#x@ ^ _o'G'S`_ r\``_+sr,!.s5O'1O4|`|'^P{/˟txqظx9~^ UUjاSOn͟`w,U'҂p xf.-H5.OWq|Y% .=Z}gYRba'Y^2{ }8=m?T2P G` OtxyޜOh v˓ =_# DxE">?H!78dF\Co$#'˧k9,9DV!OR9A9 >U|/_)?)c =]O `¬pm}pm;ΗWSt;ziLl N{ldkkO[2W?[Q5&'tYqC᜿ܙU-f!s൏HTiC@ i/ر|&Y5XY!lj6lߴ~_pM310}5`ٞ"qkw |2]SSڶi w YׂdѴ3r[1Mw12kom?JT +DGkFs%^P WbʼnqPZKS>uZT^ /`c//6fd޽0Yֵ?t vMR=S51Dt@c ?|=~҂@,X7G[ ;(TܽZy8p܇XE L:C  z;~|1$­5IbU 7KLUyȖ;gs[lr}Z' 2giKTf ]nS}ƟVSVtyoHڵw|v%11!VzV,=լ N.j4JŸQ*jUnFWN%̞tTObię<}RE8h1m+v#Ldܱ|\1D~LbERrSt M62ˮ+-U/( |2{u -ƪK6d!q-5MQQ^wӹشPclv8mȢEB߫$TuN 6,OHG[ ̏kILU!C JJ%"x(|L#Ø@ ;`, S-d1}zwxrxҔB&br[yΜa`DXL;3-јj){Qlo&huQs}"U"akD"v5<QfwtXe)|Ƙ *#A;NؤAZ4Uq;/|fnD;j7\Cj Uͮ_p4(/席) w`X,ۏ8rLٸrL}GGo}Šy5Hf.qR+4QHWǨٍxTT0OKLc":v$ѡZy"4x?k F Uan>"|r .oCD7cHj/ݒGv}9+$j𞥤'dܯ_]0qt❝Tٌ+J g4t Ly22(8LU YҞ$)O4=r{iB,i:5123rQwsI- {_ WUa}U Ejͨ| E7C3уCUվ.zFԴ36(65詿YJc\5,>8n-6QZ sP: Wo=VNPi+#wZaHx^ c0PcYx~ʽypT(0>)(bޚ~x \ZbkI0ɬ&Ums|ޭKܔ nK'̌CJ\QEԜF],fr71}  ᜊLd5ISFb2N117xF5w>,P~U2ȫai5X}7y=:9S%PnC$y1zC;ũ{d~ H?_*\-@ADyvb=Nc?7\)iL9S}zK̄-į,;#pZxH@#.( VrZ#|Neڊq|Zz_g-N$$@1!E|x%/@ɗV bӶ. BQ ry9(p#M|HҰDX`׀_7մ vx*|A*˩v:ID8 [SE) EpU.V:SmMpz &mţv' }%Ş%ĔZJLg3\xTSⶆOCs_j>}>kZxa;vp1r!&qڑ4(ɑ {Kd !b*C%N𥥴׳w_{m!>j{2Zm^qC4M\֧|Kc4x=2Nl?kP3&vh|eD+AkWz&c}2~T=.$9 1#I i]E]4q/}?vw/hp 9hBDЊ?A~*̈́}HzD޺p(KX}%MzX;9 m(m[5% %Y&dԴb1-l('oSzڞ&Ox&F;P:gE5b0ȿ|CM>볋/H{ Ə_- saIAj详 .!yݑ(򂻯U\jDX=42;f ,!,vk'@SNG'AmDR)N ޭhpٳє;,j붏:nE?#sX^hy{Y*Lȧ)z=Ҹ<氺@Bwu۞и򕺔oò)+Bap\rBmT'Pê--l0֦WLplȟ[J6.]r Ļ%MQ)5{PKeIF~z"3GhᾔǀcWD5_I\U<-0ƪo{.kd?JGO99SXsq]P>_YN/*mj%оii}0@®cꮺNS.nǓ<* gC Zx?bE=C-1:H]+]:5w)P:pZ~Ls^TOh㖮VZA0'"QEcnl;*8w)Ř0@EIYmyF \rfc澅t2e>DkGPI}^Uhv4g!}f KFg".s  =k +YN8H$~_˘jʔ ӛ-:YJ ba j;5 @, ΅;5Wp}HPh/tj&c4y[4tq=(H"9z5g6/?StUa8o8!+'g_p:zee3. rFwPn\#v/Ij <Í Ir׺T"ȑrGP3$z&F*s K%[WxWo b7jvIpꭌb\b[wH:$P7EgsB6}?7o="]2!}͈Gr8kb^&tbFa,u0hl0܈4rTBlɛ38@tsЦ^ZmqR0 >a,~\h%Ŵ++'uŋl^\ oJP7 3Q7[iy_z^n|yfE^Y&̷rӟkZnXgر2`Mn/s3tiؠoO+JM-JeΥډ`ݮA4=Z%SeZFDy(2S8?ێ?h r'j>,\kMӽT"^%"^@WȂee~fݏ(͚k07گwSXFh@_ſȂv9%FCYH|(L-zuhа(z}rBzVv]5J|}O}'|KܨiA #ur/V17~^5n!w&Ņ AIeyOBT)p)Sn1F#r@~k0s[VJETC}!@DAA攷TbXZ,.˺6كًy@ڀ]{;.Lz ^"Rnm%V<%-EjLưN8c(1Tqh# }Bx2*Oiɽq/Ţdyb NM*'kU@irʭO{6DQ>J{_߱2*mM{Έ"gLH|74nHBd3{JH?)*a)UYijpUE3ұ_ct' g_7B9wl8P%fֺ?-apUB('w5Wf[l;8ȬG݂JOoj 򿜟d^he tFMwg䫮9-zb;cWS2јNR0 J# \WpYzMTR:.ȷ #_d4 jPJ'q޹zɒPA((1^zU1fq ݖw/"6=r+-ǙtG~T:"-/h'"=t/vp;KkƏ0@g28~~vfLu5ue+g?(}7l,QBDEsdVQW :x;`P9%'FdKǽ# OЙ// ph0B(3=NCrG4\P볆jq̐8e29 ]+}hIш'ꆝϜC^[YOC|T hMd/x5>l6 RkI.wNIg/4=;0^LscHm+%PDqX.ܢcĢYM,9eSw D&<]'KFmMޫ o|3o/{C=pERԌtLfza;3ʛwÇ˄:yĥl:Yr_*{O} ɪң 1Ӆ6[i Jrڣl6p}Y|LZo/ޥ7JbR7=Dcd$[E|J :#XE8On+wS0H< U }sg'd$qezspO, Ul,W!{LŨe^J8Wc rٛ XfAwFÔ8yXk8$Wlk7]"PS/lK,s{ ̾]#YĴXe(Jת}W'e[y6p*D(GƦYiS1]t8 o%SK2ba%X_saq2~byn'EMDSg䗲uTL__Ḙ'-Yg[d#qK?>xFRZBKԭׁSn74^@aK;={Qe3ȆϣSiQ[ >I;Xp AUmh:xjߟK$0̘HV ikRE7%']t(Zt3lന%75|'B˂ޥՖ\9 `?b<σw+œlBy*qׄ '6S|΀gS"pG->XJ_N!jՄ˿Zu.W,W82bZLhӥ,J tcO ִIIr C<rjƟmbL$SS$UfFIQT``c|5 hήe#jL̬lX00|>84FXsGVh$N_[մ̏a Fwlާ)֌O4aN!D~m $v8i-Jb[:dTlVolpJ}$<17Uxӫ>zW6Şؑ5ad%kB퉺,}nXnJ07jb#V*(Hr B$D|)K\<] Laɟ@V\jCAo)2Viw-(\'_΋{-ݳ#&$̲ dj_u4g58f{@cpQv4{]44 [Ν4K{tMGӧ6V+{X&4#9Y+=;z`Pg;.p/kC޽( 725 }#M_RQi~7T͋bP0>)d:}pO ̕ЕJL,|0; A1%V<īM;bzW&r'{en2pʍ~h#ѱz3hh̋ؠTi(LlbP;8M`l֊Kf?Wb "xHk%zYxx]:n3Z;Y,#gF\ɓZN"]!aqTɇBcKeRj$gؕ{|bʯ(y~Խ P}?8FfH3+|MϵuEK/'m[qtB1}>0# fu1E5n##7SڸphQt"ј0hBoɈk1/`pa~z{] 2i@ 2-L^d5a ;g`cF/euзS=臉q\k;M{Aet%m-GeWK2eT1`w+NGo{3zPE%/eEs*}Vfҳ!psJ=H7Sox_*_ JKPgXy'Ii\׉J,H'#_ *l9'd#rzEwh#=`ˀe;9 W4u7Cx&`+CsN7O"&=GTì {+yp Se8qc>r6[K/Dm?Ys`Y?3)`w.߬,Hl3v}{hh=H(FG>+}4[I ָN,iI \\g v|>d^}{R+A1oh1gHeujjQO\@J̐ṵ1iC{K<&א$5M0V&ChW?i*i~ue ȓ̩OoWۯ틆Dwp":y1M`δ_˜|,j6V?{= endstream endobj 166 0 obj << /Length1 1570 /Length2 8430 /Length3 0 /Length 9460 /Filter /FlateDecode >> stream xڍP-ː $;`5Kpw \[p . nᑜs={5U3_wջ:9$:1yll,lljj -o3Z A 2qzI8=) @9g[ ;' ?;?_0~ AAjq5zҙxЁ T4qAhfb TANNɎՕsgj G a Wg,j/:|2؂@P'3|z9P]VlWYoD` bguC-`[PYJ͉ h5hb{⛸mMLTnU<5w{f`;'GGYy$\NI@fOcwgdm0W 5݄&l ;fcc 73+5@OxyOM  h6s,P?AwٞdaP[9_Vy Q9ƿ:OL d2sp<\|@΢b Pe0 _>M_}t=s)D Gظ̞'Yo߂mm?n':;=_  s3N&O{ R`7 /ed`(H}Yf6O7"@O߯o;IH@OU40 szZOȪYMfFc5d9O\ ą>q@V7@V925&3g⏐f/V@f9@jQRWqizfEv[LD 5Kġ-I %O!-wb&wZ _' Eɘ5Dg S:ޙS%[]_oUo1K?hF+5͜%Dqb&Gc=qÚ͞xex|,[爺X)p"~MGDx;:I)$G8Y1e#A. pǷz\Ur= > CM-v[` D 0bwzPn(l|cSx d|^ViBAwljed_C/o..ޚʴpR^6WToBFh?Zq?ń \GIi(nMh+ lPmOI'FҨH򲴱˔%8ҹ_Ld]Wٖ&KjG'iLz:,#Q8T>*I0;I˃SGӇTEXTe-IG&wy彶]L4HĤۓn3-U3YɊvvDDpA ĕY۳w҃W7X.BCqqEQ#̰S HֈFѹ9 2ϋ.8CL.$-C"2g%V'= U[ dX1Ҙ>w3I _\Hyɺ /X}[H֮j78#y31j"AWOqx&E /oCޢ( je>&TGLÓ~L&b;ȧ$Ȱד7لzHhS9_mϡB6yQu raF+ǯPډ17}Gbq8>UhL?y$L2Eߦ[S$+ǂ$?Vi 8d_\]TJYL֠h-idiOm٦|wD쬴h&Vl~PkbkFjӫgƭQ&tܹ|8 qIWyݦC.S4C:{UUhtd5 %v#þL/"eɏzl>R#[)}>tzG;Ð&?"ZQ(elYc68ygw9._ b5,ue%7\QY`un\t*_.ה_M-`;d[-uyѽdZR0@ kȢ4QtlD+\":f?[WxkGwl({-rZ\=ڙ"3Įiunr\pFӷ(OXa0Mm櫁ovsн_y 20_|ƅW":BW,O:j"龎Z7$ƯX<Myh0v&z:XG9 E+i4˶-1 Jҗ2Ы$/[ 3̩ e٪jϸ33o~8־ f`*!+R"K\Z5>(h!oggnwkyOnV< Qآ*IYEd6*rN$SܘW;M[&['s(ǽ b(p i9ܳOi">C8vڬ}֟RՈRS器O[ۍ/SL~WRTNIÖml52U crVk9!&hoaE f$ҐJ1Qc_I%xT9̕sR#gCsCigdP;n@bŲq[4[T7Fڄ4>en[,=P9⠟_Y!3n7u duzC{ep҄pl_D$HuQOe3lE/R%<ԝY~e'a?KmǿVx(ौ9K$ =*3Wn3ҌR6JTSphH5$#`M?!=?˻,plDꔡ>W1X/JpB̐*+QD>&^yG*vմ\| \EFO V6h3 yn*OViLTIB,+P宱O"i|y(<;3? x 2$}!%^WvFYf.:LcasxL#Sυ*%\"~\71Y\ͽ} f`{t콋^2"JKؐ,,g:|6 R:saU2f_ڳgC Ε2'Qą>+y?$h0Wؽx߆(`˲px;aכV5n}XE4mK,Tow FTaZn,y5f ؃^ׇtj̇O!yhTvȥ^nѬۧ(u׮}ɒJu70mR7>PF|ԪП|{Q}S~8&T;Jgy:T" T68WrP7o:[(3 tkL򬔛`$ SOƹ M;E4>v,;&VPVuA#VŸL mG",Xw&DQzHOI/ a_?_W%*pKx Rl)vReqZkAgS(wLYoMi/"ky;#&[{T*ˋeXdN}mYGVVCc̤orĒ@ŏ?u$3 ^5q%Lflۡ}/ΉsӼ_W I۫Z%K~m) BGH+SG?eVKuX}- GV4d騢3p:O­%fIsO?ք1\cFs 2*4 WEH'ln)/)~BS2D&by!g4zŽ{G9T͒X(W cSjEdD&:?_vRt q~{#9|!;ran|/qx\Fp+H`Oyܹ¨)Ve/pi8܋5Vz}V&B&0f*H:|~.ٓXXO2Q؄c~JAX FJ4G?_%+ B-_ (4OޣYH;BV4}K' KVϐY }Sz7:Cק^wH-q5xI3d t!^ZO oZs޺lg x!J74d34qUQO9k d b5acB, ϼ{ %۵OlҙS?uHQh+5*cU~R#KѸJ76QX/0^OyƏn+&b1k'LwQR)]2V|ͰY+DE[t~1b{+-RIr;]9I_t(|=psQ)"$iV03SѦ RϳT^u1yIgL+|y)GT嫕%ؒh7^ (4d\ ew+t|xsEe!8%_%91SI;y65[xcf2x3*+yX^g5|L'sFGkY U8Aum A3Ջ5ΉР#LoW|E F5WG8.Ln%nGi.I:>RV3)Up^E (ړ,&܀K67=?q3T)&\1_,DP?7s6/~R5QvJslSRʖh+`% ֏ W׈x=M0)y /@A 9ZM85GZũI d΅Kv^E<\!b6Ӿk%y*tK=vM>MCJKn-9抣`$)Jz-y@_׃|Tn.u'b~>fɪC\RGVTvcJ8n#-]ӹ\{iv5̌lQ37'ÙkaSWS |wyfz^Le€#P̏lʎ~F1Bq]rlޅv~xE=ja'+:4:،\-W XH' 0܍s4TV֗=Rah7㍢~&P˻)Јڈ%M֢w`Wq2 PR$i (3 5i}QPJts:ןXiP5? UשOd s[OUe+le;nScAƄh>y!A'fc'Z1ܴc!w t-| uiDu82oװC(K5%fWvcF֥emY皳;!ðSP^S_gHoHJ’1'M -ŌYWeev5YQް$";NY@σ3 dͱҽ\τL}K. KX؎/98q{Dn3g9jP|2L@21tF?VzdJEEMYSBW} ho'C= b!M輽|tA ,EzN=9J2|Ak煡omݐo/D %wD*cV{pe-ڸo2ao$W5?yf;Œj&>.=+ϽlȔ4na =':R$FwCcE98"ez3Qy9hr-z~䩊cAL? eR2^xK٤N(2\ha"Y^nt`sع29Tq.kDž͸ Qϊ4vݩIiBz2: q.il9o w1ފ؃xyz]$vh@]x"*J7| <4r?,;I3 t=Қ70%@q$tpJ1cSe"8 `E#rS+˟s?I}`iPO97YBحQ6+n^Mj%9U!eꎗ/ b``<ŚӜOh(vN%9D'V+ iWt˱shlvoR(| 㷮Rǂ=hɠ[=o=FEn3$3Pկq=DDvDQ/(o yzd}FaƗT;S"6@[ʿz$a V>B:5mX o^[`1I13E1ZGhHo_V_֗A؛ūDPtZ!k%R%~aCx$@8l5L=EuJc|oӿ?ҷ~d9wCXΎ#а&SۑQx@W8? ?,2of?5郆 ⱓ7w.4{K?(ϘE&T[0Oc< NF6+ iIy~9 "67U 5~ m`{+ 5w^lS|(.aGO o6kǩ0FHD}&Q;\]f`20*D,{_ +Ӛ,CzDTU_~ ie< E*eN"Y5\Qa3d ؋`֥ok;I+R;?˺=spsT@ N>E.dĠLV!FpΎ)FQxnU/i}NCrDGH](ZK4r5,xq{>mS>l&A•"Dʚ7J#F8v=V$ c0}~٤$wTsk[?*;4lpv* ET"5ffa0ܱ΀U endstream endobj 168 0 obj << /Length1 1688 /Length2 11393 /Length3 0 /Length 12483 /Filter /FlateDecode >> stream xڍP[-wqn-h @.!;Ip 23w+om;{Td77C,Ie-yv;;+;;'*-j V #B ؤЗ@e{@x'+dgW@ 6(! gTZI{'}@N`3  Z^N44 `BXvάN"70 r9P@ScEhYrh[@݀N l8@ANJU`_g2C,`[@UFe!m_@-%ցqu͜PgVg(rsI{;;GR`'˽{=\_ 16;ń% agg w3+?p`BCrP'?P98`3(d d~E~?e0s{1̘o;|w5 >+=e׿50rA݀Y*WoG2. v`[#^}e]o.U]+l8E,ܬ2`wjfjko`H ?%3yyE_ C}4ey:9=P_fx^/[irS6V=%`a`0xylF|6 NM߈%`37{A/e'?  Rf0qƱPp|R B?ǟa67/!Af?kn߸lqrcE\ nJH ݞsF.wlGޮp=7N Q׆`a=+|w¹ބ bnIpܤ1v!6Hx5'+DTq]nuDn,* .]<נy51i2 fh'ɈB5^ Vk}ǒCĊ8'?#~7~4"Ogkgkc;>"n~e?qף;i,3Q DwUrk%X4@Zļ'/MQrq&ӏáABpaDտϩ'Id#h>َQFfbe ^a}F(hv֔Ժ^|Rќ֨~BI+z sVZ%hx7 f|#7s{5p)@_+,4zA͏3Top2K@dڷ'YKo_ r b%f@37JVsԁګ +'mA|^ ^/r\ZV¥uA?a=Z ꬒUyE< 4cY\K䷊_ymcbgW;BݝLS/_ZcPUhǣE`,^}VQ2L {*0qNd6 ,7h t Do.h3=rdi>dEOQOÕfX| /2e|"Ony:X#lI;2tvJϳ#HvN,htlNSRj$<+|MA˨A;dú8+R1~j#:$ƨsb̳za .A;sJV0ı6Yڵ Xn'e8[]b@eRKtB<ŀ̺Uwqޜ]cUhrmEAm th, \_]yM{hYjzd%Le}PzZ0+/}ڻb1&!DXaDՊ %ʻHyc6͓͙$6bBV{\68\iu;_5%Mjˌ4φ'f9E>SPe r=]=sjk#*)ޛe9#1,Us/>>Zõ_CpÞÓh)w-yKi@}ph`]=tdTlN [Uߟ0dEހ|gف1(Un |׷+dkk(TOEApQ~ 7֫5;pSrS*NAȴ$mѿMzr%"]^Rvr,\(j/y6eش]q!/NqK `]Nȍ]w»L%q靡[vhs :f7w٬3%;iq|l-5V$^ Q[⶿-0> M.,r:z}^:kiFC4>ڷIƽ PcLi#T#ϰI(\g9N&$Gc9K*̡):4w<{]#1tE%Cx8OOrm*};`9"=55%SdOMtYja*dּ=[D[r[]/p52̄/XGČut ;@˧0: zn4ŜK:I,f!jGu̞_o{=#': *i9(`jCt\_.NE6$CЌ1 kF!PcN,K{_R7#Q7N:Q>D|-ʼnrv%K)UC$`;!F:ll C-s"]똙Fxzhglg?<_РauǑS6fcWkG^y(ޅ-pJh}]%ŭvix=E8o0Jó5T]EE#djhձqu59 7]3ݠݘdVΓT ? SN]-u˴3G3 v){P02cGI\54-˻n9=Qlern͖t>E(zjf% "sbD=3,,Vt]>N=쌹5G"PYYlN:{"}W/Ǚ?3KE& d@PTEqihyy`K _~mSCYM̏j%M n៌}| Dc9u&eB%mhs!1c¡5 A嬟c<'8ezpSBBw5,89r|3qe [eZqdxC?c\t59LSZ=H#Mےu'ѪBjg*o0Φ`,oCNVk9:;f !pFQ|NJ_y4o=h5f3?:;TעoWCg2ےGA;Ee6V)SxQ5a|WAJx?Z$K0نXqs7rn~6&'Vj;鬱 prۨyl-g]鯚w28Do *7!orKw6/uaOk_C809[.sˑ;V7FXhك۷90ٿ&B>Y"uS(ht@A?̒Fd ;@F"Sq'KV4~LygVͮ#NRï{`ԹvH]%>p/PQI} uchxX$mίc-0{Z? `zi!PS1>CY(Sn_ɼ=$/V%M#':Ѱ[7xlLV<8k\r2n.U'wf`gL yt)Ui6XqLX/l&0Lv?77Ƴվ|DbtL_:oV(qA>bMx:5~ʏNda[]`B7\7FM2"b$o(犷\.M"p 1ѷeԐ+=^kgR]~dеֺ::Wt1WTjt:o$[ڄJctqԯİBYU~40%@O(u-UF.6O*-]{(3vsYKW L;x%|@ 芍$XA!IaVkI?MȗũsۣXWU(:λ}ډrJ#i~ %J~u|^"|T+!A`JUOpQl,e.\?ϔ/q'2U 3%h21?7%Afzv洅w9st9vIWQϣ! ޼=d]2oTEjI@{[~t 4QL .6Υ0gFF#WWği qľ~Ҙئ] Knn[ozLA^Vca< ̩|䕒Ȳm:ToU4&Y~֤Na3RzZH$dPk9Ҭ"v1= T4 IKŽdeoEUTTVB*eex I V%2H*t;kæIcE8Ԩ;l6P,V~?[743Mx?NtJmԴHr%䒉깤Zrt?c3#Hy,V$2ޗ0YT—<9s<jH |'Voe2f6[I6]٠thm5IY"iDO%noL EMHSFewdgҽ6='ڄbfNڛ8|jXZǷVA41]w {~gxʥݥA^I: 4sЀˤ^w# phh~DPv>҃MgɅKBQu('AæyI5\bQQ4[QMe:o~ľ[L>ɳȈ)y㿩lK*gԝ~_ɗu\' # +~n3Nԃqkgݴ-B!Fi`2$bFW,i=R UY^8LYJʼC^ngK49T 0H!l+ct+-E*!3{P`NRoUDžeciR_;Թ<* ]L=P3:ˉp0/~Ͳ(!S%6ݟY檴 ̃`ֈ3`\A~jHY՞h{"3J.1hmey5GBiy $l4W!_6$~S=2롹v7LFG3Ţ?1Gv޳0~y[25o#@"/=~#(TXbтA4~/~ё]ɧ^;qKwi!]KI'uM3P)M&c r)j3?'Ss1f~(IeMh ȭ43(qy-llI bkxɏ z;'.aNv$,WR 17 5|B<To76)\Wb1%GR=ʤ+y'4MQ-; d84.'Ngz/'Թ׌5p,jPu@2Y:# 8!'nK }ayzlKX~Opٜ4lJBhi}y'S߽r1Ŕ{YxGgR))u]rѲIO9vXZc0A2J: jo &ѣ ;-a%0bTq8*U7ٻEdB$kԔk {^xӧo2Dq SAP p G[ -wjo%Ę~f[]}C]5'֚Գ_,Z&YAĻ-ME_e4\#So|83DB]/Z};\w|;59>R2=:<TZ+gPVŒз#V , ,Hd Ba+6v DTeR ~8'4|Y>KL(k-Tف9V>ݓu,("-EbL]ۂ4/[~)i/1;B_= ȓEuTL$C<UijR.hƫat7G%*F1 ʎP_Ź]T`d["KJ\w6:O;'₝{wUS+BVS pay1ޯ%Ojulňsھ "=cfzے:1̡Jq82nƁƱ^YKP#GZYmpn̤SvZ EU3Dg!|muzE_d2z9Qy/AL՜_~N&wnd׍uSw7bJisHGd" ft: {(?ϗ\ȽrF!!,mr(^}㆟H@?/at/WoE*]\W&7Nb/|z\U2&dƒG^ H.5̖6u}GZ7ͩGȃUwUmuI p^fiG' qvT]F S#6)z:R!RxnMHa4!o5Ӯ8Wdb-0Vu:b;mV2~{*E~s orcg8)}*]n닮``ٲc oq"ջr@݇3`yIHs ⡷  X?P8l XYb!/Ut|kþ6V]Y`CNLb}izwc!ʉ>G)j-CË߱Ω&s&}vtXK Ltl 2XĈf T2yތK[Ek*qhA5ȭ++ڋ3[*cح=CYPuȂәV>oX55krf,ߡ|kY" #<+1ޙƘr#"xCى}mh|0 w0"yȂlT?Px"0@"Gͻ+B-x;ԃ׈S=$B.bn{m?#mz$F Bcgw p[ R|$c}ڥ:zo$s]xbgp_Y X -s`U]~Y"MmrO9tOn&o0{Iڢ#q^>?,'~oKگb$?f~ZދȊ e&Ú",.D"\YҔc&A#@~iTҪ -J"üLiB @/nx;DGB ~g@aPe78PYr|=6Yq?\7>:7v;}{f]ͦm>Tӟjv ֍WMr Pu2oh8zoOLK12`.Zz(Y"b1แ´)#|يDGu)`ͨݙ7ݘSgo2j|{:$zml}vEho8eEU7*ւ~~fϡsfr?0CDI:ؗ{^,ƫipkJ ugSN h 2n\pˠ4D$hTj~, a^t˿u!e ]bNs\c<+r?~쾁9t(Kq?};łp^Ki8f`F4̞+{˛C]@TQ41*U !oIEGd !z} fӌ(lEueL&=0fׁ{)ODUy X^5=f;׆g0?c\G'y~b2Ff VjpbC61 U7i _K#i=FW1tC;g+'ix^m:Zh9(; H*nx-?Xb?8[DNǸk b&M,9!؞BE8"}"|uUE_rw8[I+øIX9~F䓾$73sȧqJ$v˨9-tX;1=\0Ax΄^Q=jMٰ7`˫r[ &4V!w"o2_l hWkWysOc_BDcfR+G"3 Ǖ zx~>}o (AyV(UDS^r섻Z-ݍzۆ(6mbD0<^͙ L.1YY sTuw?%< qD?<=#eI j =< > _YιR)mR,wti#'g?gk]u.B*c5vBţXr&wvbm[0HW3=Wv>?6'ۍ\Ϙ"eX`%{ <  ˋR!cQ a+XGpNv]Iw9)#?o6Y&ڤL)$+%փO&_<Q\2IfASގ[׃4{cS[Y* 1\H<9Bwk渉]^|p:7Y &K8͗ƴTtWf׌ UQ{|?vZOiw]c%[V,ϩ۝ᷩ"_ l;~$a~ÜOc 5M4aH%v:k~?48/ Ų%G͟~<Ry 8KXz &3ePm%uy4 ]/mO} ,Fq>!={8{?3Yk)GO4YwE+?voM1^ ,r](Ke|#oJ/]I E'5St ˇph CY}O6+v׍gtgz+>@ L#:H=7\кhvrJ@TEk"Y]A@j-o) X۵b6ߟxv\CI? |bj3 /~ɋ3i]Yrbz37FCE["H|=" O"_;YP˭NE;ɐ:HZl'e"Wfo`[2 T?!\?mKX#S8r%'o)SL`(ܖ_$[ɉ 4W~쐄ԓaJYr}4vü;>@K%0TbDN^wچeܪoKJģ/Sq9ky|*ywߞ/?EɅD x" "^S5` CM'ĺl,l2nòDS$em&;/ )0q~Xh$O*FjɚKMQ3[Cm;rdæub%fw.iu#.pH.E|ƏNQo T#z%ȰJ8(Lf5r &zHh5=cZF^XZ'3F:*7k(߫H"Ѱ;X6'3.ܚZU SFUa!+ Vۖզ=ܦut4~զ k/)&А?i\ endstream endobj 170 0 obj << /Length1 2526 /Length2 18207 /Length3 0 /Length 19662 /Filter /FlateDecode >> stream xڌP\k cݝ,@pwBp; n9p-Y~ Rea;#3 #3/೬ 3B9<Ζ 3P b `app23Xych 5t02lN=-̝Ay@mL` lt06:m@ v@g Aolhhhh&@Cpp6(@_% m.bn/#X[m@..&@G(;@YR o̿ nW ۿ l m=,l@ 3=/CCk';oqaE!dhadaWLY䳝 /~@cP=}Vvn^A&abϤjk H.3:8X@؜*,A5xLAe},L^N@ 럊E,, cg=:H 4;@4~,M4a&v1ݿKRD``ecpq8y8>E,)ikjYPC@o,9;cl `<e$bm_?zC k[vM-,j% A lkfFZ8[M,U3k [_7 @el=@# ڝ(fklgגrp  =Abm !019\|v)'I/ѿ'Iq>#n;0q1 ;b0I#6;b0I#w"@\d;q/qQxG .E(#w@\;qxG .E< .Zg@~F z:+*WJd_ flg Hؼ'kL @5Y[:~!Rh/d f @=57$uT;NP9ƻw t2ٽقPW|AF9 vK,@W*= )2 co &0n99MޫdSxPsN3:Y:=ffr6wt09t n j;9P$ORf4_Z3 j&pc؝4KB HSx+8܃-F}#Lu\4 k {`2XAEh[- S"Y!ޭ{m]ŽJNi(H9y\2hg"XZ wY7X:x(|/ e*N]xxZD7cӔ^"IR8^E00xvjH0[;.|#ie EU6?F^ĥ3us+TЧ7wHg" #ᵷuy|#}|Z39m(*) Mxg^L^k9yb?>P*v E1{SWjM=,r4gL+[͍:a@8T 95>++oaDo Vqِn'ft5ǟá2C  n0%84jrB G'($zn"wPpLC3(EG*{͔RkucgP?ݔli/V}MӚ1>TYR=N);{N^2GCM#vwfQe"*{yT~v/J~-zxVqݦi{O"sLdq,ԋY˛St?0O{rPIjmJz&VoR}'buЋ1F(.`y>u-F\W;8K[KxEI^9jLEF>{7b.$m?9[LjHl+ؙۣ%R/\O4{~fi;O $-4N o2|]" 77'(|CJ}YL[u|8C>u$0SYz6?:3Knc d-!4dSr{tYTdjvBioqI"OLMU=_3V L}&تe!//j=Ѓڝ|UpFS .5-k>t?DZ`#<7D8:3.$׀MO>Kj,VNI)sg(J§t"+4GBq5T{!`"_JTr5-PD2!)Q&J].[47 Ц gmRPf26ye•پ*! qdm2\akDQ>ft0OpIudhXR6$Wj?^hP^~&S )e6cF802V.i8̫@g1ovW Ica݃-Yj+iEk~כmz *"]GUҼw[9]Vt"A€+WŸ4OnD?>qntRsxZB-k*QU$[F||w&Vߗ0.č fOs&L0v}I-(#@Lzx;is/a)'m"AQYOTO!toorjP?d@E͓1@C<(l ,DwT-Աy\^2khwj4V#+l_NRU? 4j9Uy:tjd-}teY0sg =DvJVaCX&إq2$nHM[}Nt )3=w)e .!90N׏+ '\'Ƈ~ 0`T!Mr(S,ik0EKoj+)r.t3gC&X"癗X:1GbL#!W/0vI586 );iFԴ&l 5$B9l,FX2SH2(A]F!eRG1nQp'⥠9 7!ۤxR gFYH fpvurNrFuu]-.j xU]-`Xfu"GEwӥQXh83c|gw5'숯HB+⠷_6c+w&>$HB' gndMN =/r{2cm\Ev`ژ5tҝDc3Twk9i#q E]YPr'P3- 80nN(zi 6D>%/F+\D?{otJǮ}B$dɓ#hhf}4$#e`g hzopy?cu&"+NKVX_[7e:/ dHȳw,+;t_ҋ7uL⴫h oZ+sWY2Hr ~ hj qDdPFNJ@Xl┋\Ӹ5)߯@kќI)!:x'Up]I;. D')bxP,p"Fl&IjGo3> ' }XR܋CJ7qtF \VߴB H )ٺCq;IFL^d[ ~ꐷLnǯX_ޣQO9Ϋ%GДܧ$ހ)TM;b~6(%Â-![7$A֟nT8؍Lxlj85X^ofuˡY)=)@yR^(4_c^i#fkj%bJpel/%'b Շ:q*aW衲S_^r /}+FӃqɔ }Kj}z>hqN9x* ب>Jw 1k7鳅֝eC+EaL]l"##K%WjVע~: 27l{WR ƕL 8pq˒GNRCG* N|Î V7}-7PIy)\07l@vpƈC>al-Y΄ʌP"Ae:V]-Mԇ:I?~_ܨ1z$p&t֥*d^UjsacLK̿1TM̪ ^}>T (h) #J1{'r0F`eqlJ> rFǛN‹Q}m]U2'2C0כּ VDS 5 z[%G0e"404t+},uB@9%Fxk|bUvlihqҮ -ɾ)ίh# bLvBH\Bu\|NOL~׵.[TprP[%4#ߏfk~4NM9nsRJL%|YqXoȕkuZJwCh=魵9< GodM 䘂\i>hq;vlT[t*߀\7Y<q #DLmXs0x9g4V&;7ӶcۚL*i)@6 Zqh٭be~1Y?#0x<> 5,ejXBQ2%뗁a=t!!+dѣ(5vж7u0]CGy &z``طa'aER"s܈|ÌW}2weF xɆ^Fv{ӪAvMnfDWlqQޱJDhT({ʏ v j:}0u-T̆"׵O, +~&pUQqȆRЉF‡A6͖mcMj"~7AF]9mX8kَDƨ~=`>(=4eտ=~̢ܒ5,a+4ee9Ouĕ{VXS\a,hkx-̞Kfn>,5 :\oFGEη1,G > ϕ^;'w+Mc !`ICS?Wxp2RGR=JrNI= 3s?W!(5i}Hqw\{?o8T1x;7НN$"ڋe9X8 ,m :6 f[t.6Ǻ] 1L.=z%Y!k섑kzs!%|LEW醶/ځN(ܛ(%qRߎ->Q&gxjȏur%]+<M"g!Z^*5f70Wk'PqHO`;M/a\܊,XQNFV;+)/RSt'KTb|9"](Hw wǍ=f|*uZ!1tԻ0Ӎ(t~lfcȼ1U͍"QN H±a%9A/ @(v8s0bXfJ]miu.dVȩު#W;LЌ1~B"{%)N4 @G`#VЧQO) Lku?\^iq`8ȼ })AϜ"Tm*rպb6O}MCWN6of[*;"zd,f rh1,.i~vvB9aHȞZDOq[,naxnxi7<{A7 ǂlWPa^mƚT_"{sUÐ7~-G0mn1ȏw5Qo肨y黗otb(gsr x ^s޹dQKy˰Aާ,;$|,\S.66\k˔jP܍,Q/u_+u|lR>4x7T!(?XER]J31c5{\K@xOmR%e@mڷ݇Daf,!y2 `YՏ7EF֜wv3OAV!ugAfZ78' >1 {r9b/be Pih(,M.`(q pܪk^~+4_撻МV^c qEn+k-VO ٩8H֠oUjsn1).rI<;Tg^f kA9̓@}r1b7eFu wG]R{4KcּWS$fFm t;xޜa3n4hӸD>QNj!m O H {]g2!(tebXAGqZo%5nduYRˆ$&S/qߵg<7!tL[>>!l9:j8c۫#ǪK}`ͷy[NPg].4hQ1SBI#rqJ|}P*TbBlYJ/`o>|͌10%i O|PzPoBCVdlPݏ"oVNseָ#w1F:kףt1!:,oh< ! iNKX8:B|>RCo"fD9gq6ݱIZV^A2Mz\\}煴 އɲYZlϣ 2*vϯ$;8(_t텁jC.# !V-&'܄m~h LGzu'!w.ّMnBUniwh[?I{@ Vآ( "5%$ۄP1~ɫ/1Dij)MC^hY-,CC\"m(sُhvUNxV!ߠӜCU wOxLqXQqo(ԕn LZb*Dd~}ڦ]̵قH5rj5N˚Lr k_7vg g8Q>>a*?}^o:_cڡ=ź(yT;4#(NcK`uoWHGdn ls3cZEϾ1nVU*PXoHi++8k; þ;DoBu-O-ݰԵ"תrā`PrhRМ;f XG+.OGn sO9 ց?1`odEqdIq.&@;xsԄ\ekZ$wlc0 ߣm~JpZjSMƺ~N[~Ӈd)"k}>!.6ۗp.:r#(t3傢bD~H-YRRs`i2i,ȏ C2Z1v0F "=BD&}s#=k"%nr&sו/dၾ [zW&gxm>C$~O4w2Oݥs!??LŮ7gG u (ٗ%,S)@̍Ё"B9  +lIIi&t幩| V\} %cx@zw\uM"ܜ{A"(Ev]&XŀU6ޟդ?UJ9±_Fi|x&dvqE.=[<+aSe!RdL!FAoo}v1-H'Rc8:mfroj./F%Q4lI>C")m%u  $;/Bv~/){ 1`dQWB7"|>,dP~'-*!85)V ԝ;"7(>~eZ$hyVZ;྇3)/Kq?V眒9X7SM'-|o=:+H%֭{j_ܖ֚R.r}ApqljTPC>LL1.:9&LlC\19S3urP3`k֝% <C Gv TCu+ϭ%wI?LgD\ P{ϝr:~K:C8tJ4!Ѡ/4R Ӹו_ u gA JiPg{RƠwozt?g[S.Gw(6 k25#렌fQ@ĂBfv0?X>݃::s)&z  dzuyJ |\̌3mGN8mvQXřB/};kqOE6\#E]㜶Bv6D3%5IrW+G-Make& YI\/Np-h]cWFlQ8 ,Mf87v'inʖ]OP]46$zwAp%_ LtEX+%!_ؙY_MLf <$tt`>j \)}) O sى2)a`UYclwa1Aztb;酰kA#iA$gdA']$/ニ3؀i$vz3W3':6p+gh7p?G_~˸4c7?_WRnuZq4t+|kSdfɊm dYYHV5#!nD 2܄$ _,Nn3+KPt.NQSQz媔 DzJOȦ_p XO.)̩x2,l}f־}N)ҶLhN|:U*MM a!ŗ)ՒAѦQ bۉX+Nj T ݸX95k D{Y9ba:I _2M>: ^ е,5fk W PfNA#dҥ #YmGfoMBe~Ss< FX3"DVr:š!Im+,fqk/h0W!vݰ++ <.B? Vϧ6^("}Ym7J*~Y'|yY4b[ob^sF]l嘷TuEP\ކǦxӌJP{bQ|N>Oljf}iLR)'mrީg㎮1Z8̏Ox ݾE ,S>!VLɷ~N*@fQ Da:)eZK#'0)'c>E5[#H,4%[uC%JT>K lgd $T$j?FVbt *R&%_M>~ ߻ءh~7>X?ݐ9͢s#| Dj!W5B^猠͙%eME5%q#\5Q)O얝cUU.[qda#Wv5 jUP7iM_]XoEٔJFxt"'u=z~L--qdOܳC%}: 3)3i~h!4ۂ>g#T!63GdMCOoO`92>%BgM,k4V>dTta?]șf3 Fn Đ@&=Et3؂~Kﯴ"=vlR~ 8{ZFI#"zقL[`oˉ8{cS];>4F!h[[SU߯/D3c1vg~PI D#̔Iu)aP8Kl;d8S>n# X7Ob-2yv5QyTM4ìܩ FZJEw}8b!;%5?C6 : Æ:'NYjҮ*VG@a;*b[lylg v4wưmqoVއct.9 \4G!Gf3$Uʗt[|= Us̑y0%gQ l-!"E0$jQ9H_/`~N ;Ehx hqv0K@H̩ho1b#(a6fO j jOO;:lүzT]6.M>䊉hFbrʄclpNVE$!Jq2kj"C).Q_8 j`^7`ȵȞ<6U)&}04 ç6+/}v=o/I )<>~$0>fq#F%veG c25|dT.§ܼpPC:Z} !,=K<ɯf횄 IqZr ljfm(1⏽o\:ʚy1§,$ӷk5x+CRf]%BOkf3c͗>QŌ7 K]* ϞA2D˫djJIZvyCņJ3'6f%Y=hun-cN/jF:]MQ&5ǾYBnn8ծC{*β ]=p5߼,e詼_j#Q4dЫ|r KinV.He趃m+-Š \rA܉ƲjJ+)9=ӣd7bϏr4z|#FEoS!4C]pPC>.)S+&OhV j_&f _ b=1?vdwlȼ1|Qy6]ݤjqC!8n7qٸPP#g  ߡ@١řݾ? =ĉPȂ07 SB.3C#B#j ڢ`_pnp仪c24KD)fsIF){- ц6^-\aӮ =JՕ-iTsk1ǯX`Ns7;Wv{lltF|X,A 酾Hi}`3|Rw$nbߊs~B4.Zp[5kVSl`o A@aE`u?9%4 ǔ2-*eY{϶C/yԄJ{9nf@-|q[8ɗ`DwQ]Ԛ=8] a-P'c~)*Cbt ?3٨>6/]~Ev)$e2ЊnGd[͓ۀ E7qШ3;z#Ԫr9x. +JH,:qJ"**jIH4,,b؍S?8ޝ#s :Cŕ kD'vUFeÚ\T"gkkI]udQ7mFWZCh_XsёHѿQf} ^ޫ%sdw}J[J;P|Yvb.x[|(yoohzű' kR_MJ06:PJ<UT3HxOK'tg;cR 33oם}WpFZ&]W*?%P&!E 7%XbCw62v(i 1W:kqs\>.u(3yZtw\d(5xzS3ꭲɤ%$98ߟ2Ѣ%;rقg5-j -:m/EXUG+症ӻ lU2ōV.nj +.Fym2Ke,bRqSY'W)˘bAdP+>{͟D{jURڬ9eV#IĺO.ٗӏ2Fs[*zȏX:SØBF~We$]]Gdf@h_vJ$匉{wɺ44D>*\K$(ޡMОB6$JPN{ʒK?sbg|z:w,Hycp[_}`O~pτVqU@$oaMJDM]fe8-UȰzkֲ,o(7&I{`CZy5]gpvz +? !Iݶzd [g0mmv7DK|g\:p J.`l蓢 $i^Ƒر4>~f[¡7&q-PjlӴ™V!ɰS|71]ÒT/hk; Ҍ^G ȷ?E¡GB?&ʠ&1WFhMu RÔGH4mM(FF62]",) y)E-jfC#,bO {ޡ^_ȁi%j#dP"rVb P.3(vCH0\\cSz!rqVȥH)\Jc)PtAl1ZHzrziBtI`1){x I|f#g/tb9cFCPñ[M endstream endobj 172 0 obj << /Length1 1517 /Length2 2610 /Length3 0 /Length 3577 /Filter /FlateDecode >> stream xڍT 8GAuyc͖%5E2f1 3XB$d)Y,9RҢDYC:_R]]s]3sޞ{TfɊ XL|$^=4:"C4yy RDڛG֫؅ Y>٢ ExE+A!.658 ?X&!X0.- " !Wu 3( `??FӨѿ+WqpZ]gҒ` ߫)02id:`}X~ <4kaނ1"}%r6J]#G QW0o#Xh߅ GYX xZ0ABL[( $A,"e:B4Ѝ΄_@+~=0%WB ;ЈtҲp D#00p@,V# Z!1F,8tbJ 4aٵbhJtO6:W{0zsWNa cauWE] ,:^y]@0 $"^<MRBR|n˲uƺqzt.Ϩt[$+Ӆ݊>|Ǔ7bp݄zoQ#h}$u)7޾3PF 4q͵C;I;SZz(!C륶lK/Zgو}r^ɱpDvrvNYm^hHvjJ8̑[4n{Xs7؜0E3/w %ɒ0ŸㆼbC&Qep݃*sLRXO꛻=~Ѝ&񔨐UXް.-sطw,̢|=XpYrǽҥAuA; 1ilWe蝚 O^]o*%e-^tYR5CIЕ5siz඿7X-tWi\ ۝?qi\ ѵ{-5\j[ŘWGkγ:eLв㵷vޮmG i0x-mzhbpqzSbwT?oQS>5I#JǠ,pMš JA c֠WqĺN1f {NL+419~buk ל9_"84K=PbsN^4:TGJ<<=5'}Q{nsV څ&Q.rݨRdBkz3piŸcG dw]+LV{m_x{UBVS7T <_:.mL*CDLK:N0m骯K|q2ZdRU|umr{>yJMW7cGE>Oǭ9Gn ^v^ܟ1(p|`mT:OSkDM}=HMheR=QA ;r1Gޟy論Zz}!Njm'ce%jQ .ztdlȜv䄣 $;Tm5:+55UY%Nz`ܣr{lQ7=v?}]oLqP?ؼ98j{x$f{ 3;6'I80M§$a?୧ K-̑Sk ۾8YD0)ڰfHuk6OW_nvי/a~UP̿Ōr-u^Z,Qv,\+c3s W k'OP, 킆_$mhFcda]TTՕAJ٣ %gfI.ِmL"5zC3bRgPVC~wSGܻ% f~x@fv5뇫T8g42ǫT Sr51Hmu INLL$7tXҒҌԯ7^b:b1Ý%ʁХmx){jj|]E )ii"X\\L%q.7QYՄSm;&_A= |sfҫZL̎|ӫp$fy[/P̝vqJފ*HO V_dL}iH;r_/̶o* yܬ2;" ##3^,}J=B$$"՗n1HiJlp>jhXkOUa\Cb3ݯS5NJo/"+ \.XJUB|{39Sy:M%חg>ڜ6U?.)ކG5 >s? n,_pm2EBYIʛkmݚw4 ]v-=8y?K NnmF*&9H]g04G-N@cK5B&Wy ϧ;>|׿<*WKilE| PEނP}Kx߮}v;6h2#sZ9V!ս]n{wg?H CÕSd=kꏶ:) U gWNRj  endstream endobj 178 0 obj << /Producer (pdfTeX-1.40.18) /Creator (TeX) /CreationDate (D:20200222161351+08'00') /ModDate (D:20200222161351+08'00') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) kpathsea version 6.2.3) >> endobj 165 0 obj << /Type /ObjStm /N 25 /First 200 /Length 1072 /Filter /FlateDecode >> stream xڥW]S:}{֗?f:!@@Rt`v ߕd$IV=:gW+|`A\dca,drINhR xHs#41P/ʣq?` }, !.\KZ/ jf4+$W!M; |=&A  2 #>}2׀lVbOe{<З^Ki#<v8I +I^o<~evnizBFY&9 IJYt-re>3?{|R]Ue3MZϓ\,m9sNEJ=K]4*XI1\k,2{@_'y$rMb&%q?"JZ$r*/⮴]3H8 `kp{p{H[KЍ yQ݁;ItZrR-m?v<_|9usOǫ~^-5xYKyݤjq)7;:ox6ͽ]n޼GF>dMx!7oV&xzUgA ~Eڟ'c]P14zM!4I;.3;4߶rn CIEe+Rw ؁[>DQ$q{`_YQq'=Զ`LU>*_W¨ć0>NQ`SUy7dõ"|{Xл!{n endstream endobj 179 0 obj << /Type /XRef /Index [0 180] /Size 180 /W [1 3 1] /Root 177 0 R /Info 178 0 R /ID [<8E3F1DF3BCBBBCF60F20D1E6D592529C> <8E3F1DF3BCBBBCF60F20D1E6D592529C>] /Length 494 /Filter /FlateDecode >> stream x%Kl qw֪mzU-ܢUz>@"RWH\C'N:r!$.Dw.3/ H+FQ EI[(D|t eDzP}":nNQasnn$EE"Mul "c-(Lynh%EQ~T,!*yǬXF\-9ˉk`XIL1b;r{H^ 9fb?瑃HS'aqY11LQɱqߣ> stream x[kS~~~k;fW{יNf$!䃃)?ϻd *Y^y+$L11"/a262=sL1;bEd`9K&]1j&,2L)ezS #Kc0fT)bJǤd&6T1+PjcMձ`hjF;ϬReS9KGKIc^]YoãQN_lv]$L~8}JOG0*ͳtYXn\ e,LJSG >슸ES| <֏v¢ \[QZ T}k~Np@Hͫ$ʠR qGG<))9w ;uEBxxI"]0f 3|K qeoI|kr9NА0R&f{V-=\edk2OxhN&`@ҕ3KꛀC6ӄ L;_ .E4!zsLt~ؙw<,Ix#) 7DF܋tH 64+z FA\6_o  +݆l L?X`1}+ Ut[ciXi,*BSXh,Js)O]<6bѶp˫%ܶ^=~O|z}=VP[1Tהς¿)' 4 Qi PقۭALTcM;[}96KSDK KϯZb T Pa's2FQhY ZPذ7꟏X^1e\t!\5|D@1.Q<Н.%XZUB&;|7K7AJN '6Kz> ;g?l\dEyFssx:W"vA)_ sc~wU/5p&L @Ni+/4KFǡAQۄy{\BLM&g>ί'h$%"bvy4h04T u)*n'[_SY8-`mqu[V>[RhJ6%}*#"km^V*I A;H|чB(un M+e:슭[B^% t)PLˀedB8j7nLm9xYAUQ |XRK; p9ծWk]՞߿{ƌDeRqbհ+vr $1?O,LGn4Fx8+mGpL$؍Fc珵mP؄֠i " (\`ߔLJ9uy' y#`#̽}-qIi%BL\[WUkYiG6:R$BcCƖ€hklmhD2*>К*s7ʵ 1[ه W>:w82n)GUvxwWouq}ˍokTK~Il`sC '~>+u"uBO~ζƭU42T+^BxLŏPꐏ<,0W"YLˢi-:+X:"v |&EV,}ՌIrYy -f ζA=us#_V fk4JRo=5R"Mĸ[+\Ǥ{ Eg Iǻ2dHrB H<"<a1 ]pHS}{^[3daCO6;q (׳bJ.4SMQɥ !`HRjțv=]endstream endobj 91 0 obj << /Subtype /XML /Type /Metadata /Length 1329 >> stream 2020-02-22T16:13:52+08:00 2020-02-22T16:13:52+08:00 TeX Untitled endstream endobj 92 0 obj << /Filter /FlateDecode /Length 1811 >> stream xXMs5oGqbZ:&TVq8L1ػyhk'!(,kZR-\jEK?gq]vrA:Z> -)*C-7 YHKCNa&d>\-~鞬ZYKV:gu;MvWa6dQkbwis;ec2Qwuz3w-ܜ©fL;{L߶¾(AɱnD*{o5Y\ܸ%\WȊOo娃e`)g)ͬ <\F;CeDͦMlhv{J(}>6'r9,f}eIPѾetյR6!Rڙ=mڜ{Frk1IZ%>Ո7󅂻)Hu.90' xTfoqYU?V&" 63 wɒioбkڬBL4E7z5w:*LE& -[;R)Ԭlߠ^M.TdtN:y + Alł]Qcsvї++q™J>pB=P%N%#eaF 6aмzޚ4(1CZ^yDcMUP ,0^@Ev;rTDИCq%ix`ێW,T-jL1kQu'YLӬ!@NeC<M'YBcww'ߦk7\UU$xЦ {ؤY|SKrN2p= ijm"ll"?x=ݼ?YA}2}no̦BG5VtվzQ [F]_)NJx#Z˕!oW^RmbϮ B%`C7h38c A83L̷bu>z&7~@'Fc\b~ڥXDН:OJɸRCNwPr/l$w :E#p8;=Jpª15SNY+h0Y%$@̲` @'!qgkol,2a,6L$bu7p+G& EobVhJC/z*y依ĭ8ѫ`jP{7-Ü4>Yc&G7ТV썕x7nCUb%Ǫ[Zܜfk$ȻN[vNǕ󣨏KO0jd"OڀjNk]BDf,'$`K_]Q7ܒl@\M|o*YxjMC~RA irCӾ?&( r# %ԏ!B2h˳ ࣹB`}!I]U@S@R{qg=}TUe7qlTjqVov-kM7hn5F!t$ڷP y0lџ<&Bu zi]۲nKҶKCBɿW > stream x}{LSW{Ŋsfro7tx F'Lt:"ԍ<-VZJQ (*>0DMnČۜls]ds_I7GIĤ]4O DSFˆqo}@3UyT> 'If5Zc*Ci͔ϒE̖͉-U+rTlYbAPClFRvAdd~~~DZX9HBC?Qhݻwboj[>7Xx9A ^ v:#[=,Eԯ'b0Ɇ \7Q~:/z\j-,ae̞nX$_6z8oO{kel(B_9/Ґn 5(#gu9iguMWWwv׳.em3~Y#|F<1#p&uu` ; t5dTLJʏO1'BN4N`4 kp9w\w5,zLy"Ԁ{www*i!}lBski_cX1e_\Q8CϷjl& F!_V oB{;hc<ƒUxPnzMv^DBLB%Қ={,~9U<pG(hWmX*3?SwvA 8^ma;]\Wt5fʕ: pehy WVB:@Rm,N&:$FBSLXlHdvC/ԁK+D ah0/.0pyv4d[H  fgjnvEO^c!hy[> stream xELgZ,T-2MP)8:"m ZB hO+?j(JQt KPͭsFM]pMgclyx'y|?X q<63+77)9L\0O"+t#4KX3OE񠔃2aMGw&q!1Vb^PNJOO[L'/[N-ӲBp2 '6P*+9θbRŒ)3%2lꅋi+s&-k3~OS+)3VpZbjYaS&N$1)Ya[\l;c l&6 I~̄o8bY,,xKdỵ[TT!nI6lXf(%x ˅ceL6U bZr]+}pr}͚7TEjl5T|sjD{~ HDַ]dwy"pjpK~rZ. ݫѢRKK$JU_L=pTzjlb"kͱ?{/ 3u(;g?*h!d Ʌdt>8G7xۂ Z>q\Cd0aK֦,U`Z\z@|QrV80Xv-{]}6F0?_DA F]Wԕ@dFh$9>N>C ˖O}HA)+*ہ:eݣ߱搹ʉzg睇kѼ) (I~-<~.=BFzt@%w":%Msuz[(<?LY%{.h}tn#zA/x&JEytʗ6.j;d$cƭYNP>D_ul+M oFhį 0p H^1QjZ"|v:98ߎ2XҠ\:% }p\Eo[+ġd'zJvOuScS#V_*> stream xVyTSW1TZ=-jU\jUhV4J$ a$/@!+[Y .ukkbu:eݗ19I;~~wQ~(H$ _>tPaH0L(,I}'–LL\3v| !գQShH$%)9d:!.^Ѕ 3{r:aTVɥA'A?JFh֬RyL:nqȌM|zYL& ^Th#rY`3JyrF^E=T|C,%5M[#[qo #LIM6PQMf* QoP˨ j.OR GDD6 O7ׯ[2_F)̦A?5g0!?Ч/^wg|kXcH:0cY nju/X,pԛz&{iY"E`V@ゐUrZ[@)h:Yg(hhA`foLI ^aS+h2 4jZ;`^Mg ~ Cہmcnͻ21,!K16?r旰4.X(bi(naSԇN"6'h{fB.׸?'hhJnbVFWFYh0SYCPnU?Z&{m!xdM[`} \TS4*$7PxZ^GbtCxUsKK*KXrc6ķ-NͮȯKgV٥>wR9fSh-Jf}旷_?ؒS5',U{L$*d̹p2ډ:I;?G!ѯ/Yh8yC ѰkU]Y&Y[n`Biė{=<@B{EvA#fwMe3&$AdBrȆ,. ~PY`WvKM!_D RcydY P Ef8>Gݗ9E>F~?,9ǝ;;[+wpM*C穭튃LsB=4m,f^;O9qW~K@Ot Kܵmw)Y!rg9QB'6mV${n[X]?>@,wXqZ["Y혷_ziJ'i+DB|뾆?`W]N{9> !+დf<{/hy;b*s%|bkJbq$|L@ 2~(< I(XI`2'ˢIʰ}izHs]=ihCKbtz'LEywF<1BаcJ~&=Ғ+SԛKM߭ Sh'C.Bkt}b-acލ(|.s/1(u[cx̝AhqD yD~&^`F tj#Z;hF $s&5܋]Kk0E^L;'Nt٪Ly圱@ FSS[]]{$λW"W϶P[.dq.\EW;zHh%{2ʀr'J@~&ѠlFWUm[v]c昽:N .b9|VV1VfSTlf}=F~ko@З85E5zv.CWM6 S5Zؽ/Pi4`Nyfhݕ%07?{3Ǥ7sdN~K,,B߰{]kOۏtx,_셛 /=£p$AyXF\fc2jyW"(C7S$(П=]Pם_w"o.S5lb^(gUIs8>oj4jN4(Lg> stream xYt׶!P XL)#SB :!PCcMr-HrU\+`St/@PB{ @$䭟ky;\ =(@ ZzƟC0־w$Z -?g@K#>=~b/!P M\;v8)gϚ`;uٶ =Bvخv w #;;,, :)0qlwyۮn$0 v霓LIcYC1cm3%E쨑ZjzZO1j,GmSw-Bj+HmS{j2B-SӨtj%5ZEfQk#ʒGP(+JL Xm*DYS6`j5hvRoQ9?WKЛZMRFԂB=}R!ӿ]Di539֠{Z#}w`&K2~~w/jC<1?p/`o?`zuuU668C54ka ΊnKHy\nPf= [SsjF|%,hKY$[*th IO>$Q߃E*^ơ376 %٧U{\gJuKYI_z)eq~Pyg_E2N `@[.A t1dRS!>ۺiˑwFxv&M4A۴^]DEw|Gsq9S$ݝu ᨲCgѭ"gDt.8K-.dQ(]{pQ`|: [ZQ$-58!0w?6vc*vgA֢xIز${x. .J.޿6{,~h;_x}xUTD5S/#OIi5q>xш!8]ZFX<@ih\o0 <@4}[7v!>k@ 4ڦGz!zZB𤰅x>0ئe駹3%zT"!MA"sI"47a "E:t _GoXԃcMu'>[Uij`>]enM"eL 4BL`yDWWW̉/e$$6:ds sCoʳ;F R8-L/'@~t<+]n }H<'3K!H}';RoG"fY^` 9R(HMWKK94%\ CCZvZaETww-'wON{[7ɗi/=owUS`Ytgӂv~rZDE^k8GP9 LSMMڣ9'z17ÒsXM*]$]ʔ8z,KSi ݦz/w(Cj"eN\j1u wcWl[K0|-9bO |RsȁVwkv`f/qX_UUSRVRC-n<[SS%x- NOCq%о6&ώ',͵ȉ;hvGc W >޳pep_Lڤ3\̊#N4:!.EzsX6PX]fZZ]2u Ԫ6g{o_V l*A]Ъ4+T˯XgIc>wFGxjɴ!RV˘ʔcyvۖb[Ro1f=VgT1a[6'aqs!:"P ܢ'OdUAh rծWT8ULJH ]ͭ]\6D${'$fw PWoy)&-݉'7M/ ./%}WO#Puu$8hTEQZ') .5GF\ 9AQOHh|:%.G'd%i%,1UEa8V#ZN$YKq6? [(En>j\rcKkʊw1oGx{9K%Ǽ7  ~9Xo˃HXIITʓ)[mwX'pH%ҪYUÜ^;͈;(0E:dz|Fc-5ܦuKgHV2)Q";zc4JΈ0c$KSY ̮Pz|݇'(}[vIu$t`EcB^|_5HW,ʣtM-L, 38]aUd{kT2 gM7$G/E1'ɻk<˃sHbo]4+`^Mܺ&UN(Hfrjvz@t[?x2iE&T\qLO#.\6z[1qʔ0 , |2Ե?"1gmhe@@hh@@ehmmee-< тfAe=@YzaǤl'E`)Q4 %9TAC8bGpn~Op|'P`TG±\BR l.Qp7ROzW-}} }iAQєX5? R8hJA|W9Y玶jʍ3@5f(U c㓒pli͇K%! , Dw`7=у(Y 袴c81eۅ6[\]~J7k^ 6]ULKB㕝=ƭS'XB2h&=(5 //gWo.u~s{،<3%CKNkJ?`e7SqUH"t8U2LvOC gy)(d|53S܀Rd+4EJ)/G3[N:U[JJ#%X/*uyX,DO:t5kȬbb P}B4#ܞ:a&DiA.*ZdǞ%hc1{%Dז/ l:qA&fbS SHTȲs rrٖ^ GeV_ꤨѱm٪jS)U*0J(Hg<e2SRdý*7TWO!uX{;B]aV-;]v0u!O2oWqwuoM{+;G~/; YKQkzbX/Dͱy Rʘ  fÁ2 *DmJЄqWN_ Cj٪,$܊}.3KOQ6-\G/_h:9=2Xu 4qgs$r [bM?Ƽ*$%\ǛOh?:gj ހ}dƹ"FE5h!*Rbi<2FU^?~jE=g>|< 1~]}Ag7GQk(qpB~0-'S낝$w*BTIPe2$U;eL(n7/. pr_|[Ҙb), 7iH,=#۫n,7xpu0N7z. Ǻno'gD'*lNdTB[Q*tȳS~#޳cjs6e:".=P'H'n _n#ӡwnb/S|`RƱՁˍob] %/M\Dyt}[kl<{Ƣ;Mw+QFW6sؾ/{y*7DgukG$$ :o+4 ~a¡;W/0%cQTnxAfUϾ9hw& Νc:=P tKq }ugarC9YCեM=W_aOthb{6#MaVk4(W͚+M:&]i!Yۈ*/Zh"{B $U+Or[px,$6tn.{[&#M)mkMnEZnP y0T91ƞ{* <-5#H|;z!!ElWֲƿtk m#A4zl;6ʌs?B[%F0ڈ﨔J{'޾v7%qJy D3>UqElh]0 ނ_b5rp-3nJedpZ]A"Sw&IoMr“B~={5y7دuHMӃ͘yU3^XOo~~&ԜR'H::̒`Ly{\eRwTȝb[jlOߥjw]ʶ3Ea ^5J> stream xW{TwԂSPqqRն[X;ByBIH2_$DVXʪ[}=m[78vqdN~o͟x[~z&<2ylG_hޛ˖G>,zd0 =DK1>'/o+ 9ظ57lxvmSm,2d \4C}ɏMgEظrJ',--ϐ s~fmlX$*,eǾ*)cwdHEsϝȥb06A-*abL(,Rfg%dv'K7bJ,ۉ’=^l{;c[WkX2|,Kͻe:bsǂ N_,-Z% <-R9Ř"gTFkəh _J7暵4]4nԸ8b BM!8qn,c=?EQ+Pn(-Ҥk5P!>SbZ:P%\A3>#MB-V$d&L)>@[{fxOHc/~ܚZ.VgQUbS i]9'?Ek :WH5/} AW;:> o%U1q(rTeU3^v-ca ӪLeF7Z0 VO$#׽]F|Ɓ;/=.e v%z_@+wIه?=DT c]i;ӋWI)ٞ w$]FulPkT'GQtC)|e"O5 5Q}h# -Fqhef"Sxon^NM* W hnI],{o.fuQmtռ0a"7`.hUjMū˂-3yHx?!vNlm_F 7H`?T C.p[Y磾?oN|zb֏9l:S5M8P&6@IdVUܛ~Cx}ƆNlԎ-y0"oP5Ƞ LlG"X\B|C*"NuEp' }|5ug>׌o^huvRq = cLg#v|7|>D'( -ˣEr̝#6CLGX)Q =FptkDiTnvJY"lpٌLd_ߍ%jR %r0`TBA3竳o|y'W$1|uR'3џQ+c6}wq.i86Q#nIvȹ^UZq(CޫVy aAqw>t Jm#|ag\X[[rJiqcz<΄yL9~gfΐn~w/%(Z\6~_{6߷fc࿗E0FDiPorUUtDV$I%KMA[-HۏCEXBνnƇˣt=N|%J:3SK G8wEkjwVY(sUyEF-g>A߫#[(RDQ*/l5vpp8<ޚFiE3/:p+im +R_dygAr`߄rqLL\V!_wڴ﯇67 V &"6q`7:kt{ *u5}EtSammW=*Ut0Y׶XLY@6Ks8#jKRɁ/|f٩5@Vl-ːknjzc X=mGN _5v=zzn[޺֔4)OD8'~aBp oEcyh˝G'6$OWR __Urq$_TfY6ý hz1hK, "endstream endobj 98 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6253 >> stream xXXT>٣bc=sI%h[E" *۲ahKcWX͍k,7&$X$?<0;3|}oDJ$I;:9͚w0F$ aa˖Y;rPd!FfMcm`pJ,&- y{zm'l;kΞ9s{{]N~wy{#l'-˃B2#Px4[vG,c:w[3 =d=dEg%A+WV'_jp_sm.N]O1s91o݉͝&8^S j<@m&RN$j35rP.j5ZNMR+JMj5zEfSʁzZCͥR(Gmj5ZL Q#(KJJ(EYQ֔ %FS8j 5ZD V0QfT(Jtzb f1]Tb+QI`0n5o>b1b׆ y2fı~:n8qu~rijPjAwWT/A'e(3PB xHZ(9-.990u{NWԠf5yt _*tyj*N)ᓳBQ4br!͠?:slM Z'c8鬤Ba$ۂJwq(٨pqso En{z%9-YW_7t]4lˑE;<9[PjEW1/X0y]N0_e߯U+0\]d`2UJj,p(tT9|a$l.}yJ\d-|]AN,x #f)KKEi12-$,h }!(.A Cv"f` L[YQMiE^X7p7AT$AB(g/]<{eoM]]'9<Ā aۇ~޼qcZK{:Љ/ ,嫺tv`la.5`KO' w` xVI[Uy{H.qgF3Ӱa vZc߭&x*v s Ղ[ìttۈ|e TG_8޳gMwd\JEja0bH),k<@R{vW'y3+f59LO9-%(:VѣTxf[9#f`"L;W*3#Uu&csXJw4մԝl_ pZ͇"\>C&,7ϿGgڂ x/=[wtU OzX$1^L L Z}x5䎵{˳\ۭG/C[x0j3d>h*la3|517+yWGKMjXEh_ ՒȧBnrCPA[)rcLjVbd|d< ֊j ĥ9n"mҊ~5 Hl*Q5*ܧ3Uv:kEeBXخ?AC"!=Zɷtj]CWzxRq{va.U.pڼݠ'(_] ʯ>(ɪb26<0;(6U]knG8 Z*9Y5~cx_USڐߖ ufIF1G?'ʽxSG𜓆LA%DtEqܹo`|:0 ?;)} z{JsDC`kIz_ 6#8\70OONUg2}LԝBtاy KkZ*ZҨk9i!BcLF\#M_dD:H&Rɝ[8,8$F |W15ƛ#Wt=;[0,sy/ྮ&zԒ=" FÓ4U| )p_ jổ߰K_^ΐnc9IĢ"TJJT߀,LJI77nOU&W/Ыr;bX:oGOi8/T\TBhU"ˏsE8fu0F-Ԗz.&׽jbV҉(%fFd[WF(;$ ;q鑣--j,DLnKwLiFx9rTR|W'GO*>ZQњWu}3MQȩbNL͋-DeD% /= )%F9ԁ>/8Y|JuB-ZP(C?oP@L@){Rz!E=rK,,㬾{_9X P󀡵$Z^Sh3nV 348oԢC̷KA^>"PK`U EW7DG̕h!-2|bڞ@tP65ں<"=Na4kh^3s܉$Ic¿FK if UU UU \6X>Dy;Գ _)[ˎnͮŹקNH/OVuOR*ikvb~w+鳅E~*.+0Y~XZHDng"WX⥋>nUiK2EʐP@5ZQ.; "D\Cn骄j9J -œlޖHIL:"ؚE߀ F:ǫ4[6H Y & iŲ]ሌ\Y%5(Uo|+Iވۛd ›|j:ba԰E 3: yy"#pmi n9PiS:t^Vx6@9`F <: tOnݼ>Oēw{[j\} OM<ąE@FVRZyHu(׽pHF}k$v64ğ^dSp<48)+< ?>rJK,&CE i+T7'rO4G̽ϷYuox*ʼ Zˣ] H cA َ.s̭eb X*ZZYIr,3}|픧WP(,KLۘP#E68 6kq`>''WLMZ&x3o>⤏g.7C O,/Vo{lYtYT\rJr$SSx %TD&4PJPu~TʪեU)0P-j&ĐVK'5s)G^m #Pww/)ϛ CPJzt3,9~.qria.!.g'M ܒv2W>I B,^OM+'y?]m7~4BVtTwSA`ȧˁ6˴{_e7oλ2jeZL-l')s?C)tu8ŝ ltt[~Y]`ͻc-x/9/KּtGs`X>|%; ;tڋWxaU]8!sޏQwE W٬y1(ec7謰Tr*2AW.Ht.]sG_o@;i*kό*gj"2Eo X[I uӰx|Kl {|)?vod ?uG2Χh i P JɎB 58}IayꚮoOnj%C##h mMX-qU>.CLCH"3uas-H`hougsvKRmOLE1 eϝnA\[*C(CĬYP Sȏn#t.]Ă#> stream xW PTg}mC"D}wq(N Ww .B˦, `}iAADVhHb%j2Dc4N~L׀L.s9nL&c=Njr4X& & 4!8=;:ꆼ5/~ 7a2YD3bG\:(8uֱS<f%Yìe1_f>|Lb0^;,b3Ә%tƛYdz3}~Lgz1#f/lnE$a=ю.9T({g:[z\ٳi߫ gY[޻R9۞ݭҒbk i8jv7#tt.N8f *nC#V6!"73Ƭf0͊+qRQmlZ_tKIyH#٣p9Uaeer8^?OyzU  9* /lP"MS9)vK2o8D^g+[ݔ/̛W#ʏ߇Fot%Xx"K#c{a6/S",]J޶Ȫ I.c'?#SȌ2D3=þ1<~#|TT# @͎ X~L g[s3[eMhےmRȌz>m3dSVS,W<Z03FmGXѷi#4ֳCWRJG#na3_v禗]'x6dvWG=1KLV[ u@'Ťp$㶁<:XAѠg4鈕Jl94j:FO 4[e])pV6\A]79!dq9DXuĪ=l診f۟z0 @tsA"{}~!?ljoeeiK%@qj#Grbߺ@)s/ :@4)te⶚H+0i sKQ(F}%Sɴ-D\ͯ7)Ia>%#.ˀ#PY ?qF\#S.޶&J|̵Ym =-(*(:RܴtTԆN|Bl/OP@HśaU61ocr,mUVbnԛ7:pՎZNA缪FHR :t'Ithc!D@8Ύ,zLK! yPYaQ3|L,(ѩR\=R0vBn!)Mڱh2p p‡2p ̾O3 z%&@$:_f=xkK q2)Ue ί/tvtJt2.p]18}!hEYD2ó PI3X,k)\ tp.Vں ʰ[N;Mjm5Hp޽ɾԵ٘-^aW2;rR'g뷵X"k«&R"}}NVjd*z8gB?Z+NJy*|q'_jJMawF;'@n.1UܹdƏV4XQg)>YbWC26&ttJ<GHE$+q \On@ ϙoV%ݟ)+6ZM5fޙ"ksƟbM/ۍ"̮ +B#\AoSAm;VR>}w"$0ryFaˡb3%쇍Oy|OLxH< 9BQa|5NY|[jq2x,\ 3dجAlƩOVc Y}j3ݗ\ŸM5IŊ#G 49]ǎ.2V)5qh@/EXނЃ j1~W\0Se6ThX\[pփ~"2O;ј2ITdⰧ7ϞצM5]l!~/=(8; Í0'z}p$Y4Vk ɟDgF"5#lcMo UV,Uy,[~w &jY$=-o?cS&˥G@CQWh@b:Cr Ҽnնs٢O}<&O_9XR.qFJO0Bf d22r 9>t?ў?ok u6,6uvo>Չr7_]!=yp8ڸ0d0{k5@v•S%1 %C*{]%]h4={=uCjv!=ЧUdUD_Ba\W0dh;ChVE~,]]Oi$4d|I5=U?-YE}ؔXpwUPKh]fZp:+"[J]"Ih6>ĵQ6ACg@ox]tA/«:'!yv6$P/(Fa/:8[:a5|!%>ۜe陹y+EFbSƹK3L:]=7. 0o3F8hɯJ>_|-}#$˅|z^!"q𙵡nEgKOlo f@}!֐OՁҹxw ·ZIe8=$$ }O) 35'Y珣,EK,3帵7DXMJRM=ٲc{@PYPcEa~V'y )@YOÌb'rPV+əa@yendstream endobj 100 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2189 >> stream xU PgqdUD)<ǀWh"t` #0\ 3oEc@È&ZQ,VRȮ&JYvJvvj;QQ2^5ęr ?*e&ꙻʕ*m3&ݰb f1oRr̐zxaOfr\ި=_| K?L%EkZ^5J/RtLF/&,2$_N3[t)4]:hTMЩ_'Ɛ'ըKVbtɉESQX}\H>%K)ʃDP[PjAP_Re(Jx*'[%{2n"B~D><<{o/h_&+_':Xtù«qAa9D-b/Toog2~$@#P1F+y&Je#Am*pB&-@ d*
]dF8U.a"w{^;y\|?x0o8  H=pf[t@Ǿj-B8"9A*$zlu{KTXo9tɊ\ W 1 -VGA7B?/z8}p ݩE3bLTcz}}yۍARnZ;w1[z!E'Iq`8Pek֣5˚g,+$2bc$E@TU{ V~}d7}y`ӴՋ!uV`g6w*&@dt(.6 է ?*`p}[yR4Ԏw*sTD8#iԻp\?ׄS z$s0DC[7/LqM]PbFc.N:0 Chö^ɿ}ON+fnjEH3yo t4~'| {2L^R˟idg[^:@@"@6!=Sp4dNg#.Dv ɒ8SZXB |}aU}RI+7aOeG|K]`Xa Qy(ْg9h1ƙ, K2I]Jjk"oة ~9\7OzKe9e)HD1휈IVNh/vuEEd2endstream endobj 101 0 obj << /Filter /FlateDecode /Length 2359 >> stream xZKo7+8 O/ߏC `fa<&-G,*$"YcQVWg!wgru8L/W-(h^gKr~ɬ ~:{#z߮7r)Ea1Fe\/Fq i°y G}J֨êuI&Ifo:`DtRfw1Xo 46ƅ*/YvIeRQH; \n&ջbWj4.h6zG h7u8FXY2*%28~ xlON4`8](g,˦]Zqæ^Fm?al3d#KPg9؜Ma=H 6MwU]w텇M7:3mhj qa-.sG8Mvrk_8fnA%2N#tbHKM* UzT"8 5Ԣ]5F)0?c~DYm1;Kc+!m C`9S2+#{/*Om eY-p.+X ч&%KtkuB=,,дKF@ȁ2LDt 1.%=D΋Z@"䀉ncw V\1; *5(C<$:WKW!`Sps$8>g#Α/ <Cl4YG?QP=dzo^_wRinMuSGw{tڊk҅M݈>h]KhAETה!Էi97sx(9~\zcdTGfBT-u8C "S6aL3'r֤A>Q{xsIܼ^/<<81AL wKaTj2Kr*X)7+4X7r`I@!;^&/AojxE/Xw|!5g4Q ufUHH0_̉BM]8XQ>afYimnV8nW_Rb#kg˔|}NPi'j].1jLE䭀rIIRg؝*~]EUy;uZ/!EŔpVF'g"bA *wO~AwP/m}K#0+v: gtфOuEnatL"j6),WjFW> ҅Q61[%~R"?|˖܀O<_!Ck^wF8y>zѝ 9| {!OL].%>fxӃ#F1dHV0liۼ>1.ʛY7mo_͙N)^nㄹi$Mcw,m"16⋽On=n{_ pendstream endobj 102 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 337 >> stream xcd`ab`dddw 641H3a!O/VY~',ann}=I19(3=DA#YS\GR17(391O7$#57QOL-Tа())///K-/JQ(,PJ-N-*KMQp+QKMU8SB9)槤1000103012)ٽgf|;?̿ܥ;~3>,}} |g|Waq6;7<_s/wt\7Xp94{endstream endobj 103 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3032 >> stream xVyT2JQԱcAV]kTQ"f!=!a !l@0$@AjO[*Vp?O[[unkyoBTxy;9d~{wG@ A5V_黉^pp/ gqJ1d—v AB . A!hPB(f/ɳ)Iʰ ¢̙6=2rN"X/ [LK͞u2+ldR>wڴx"ierZ^"]\&U/=D+ )AaRb2-=>sGBxgR=S#"Ϙ9k &N"hmb@l$6b XF,'V+Ujb L !Bapb1xE `@b1@X˂Y( 6Lh"6qs T6 lʠC#?  zn8E.[x*vK=Pɢ4M$meGPzmv2`2`*95~܌ϊizH hFi^gJ KrKt,EE׼;=3#\CpYL]_`4Kon|Sbzhô[PI:B4 +6ULCə&d"g4s=pۮsJt =^T j8(}r4\mv-PE:UUU@2?xXkd83܁~c1f@]/-ޅrHG#lŁՑx6]{ֿ_dy-M:ZpR~zWxRt0_R;-2nkmސQgc[ԛ9T&6i.8')#5wGA2Pۿ@PO\O8>_a#_~7QK3شD<mC CDO5ЗZ.Df/{~&^|[W҇)r8\pĸ3CA(A /EMLe?w$ȬV܅*=|PgnZQ7~/2̯Tu9R [{Π[5 F!uߑ ;B$}ӟ[7_j?g+XglPޑ@<\Y|qWk[RTR /w?|R͊t}` lJx,]hWp:DۖQxE!yxqQvcKf-ɹǤg3]`C93VҨe7D/姽!x%VɡgOwy?+v;oR3=9^ޅe'DG\wqrj~.ly$qnv{y}r?"Nxz_޳ h;I/꡾8G)z~J9R ɇޛb"@;0!}nQ6&H尯)^+}̿ 4s#hDhQ_^Dv;-ě(ŎQxr^󲿚\Z]F+|GzvVgTEKttH(mi,{(qszbv {ӈ7-|Rf]u :9j 8pV6jQ-*uhkdTCiʩLJ[ ٛdBۼ/Nކkc/Z0!{ck1Wڡd8Ҷ)v>@a<#٩̽ufylX9p_]}M L{[TQy4=N,ew%zu ^~^4?z;A~")%zRuNfcN7안 f[ M;m=~j=ydOJZ/0}~!O QiUgif:Qh9FVتl)t,tfA!o\9YMa?2|s 'L> γ DB祍fʭ%L~נ@`v:- R}S=TșЯ6=žgOq85 &)a~RkJέ(2xB %@djY"UٿWyV[Un3[}H) 4TjlehF1HFZlePI i x?~83iJ8TCim9j*D1ݭwwF{e@)tr=}A K5RNϫEMX3<ȁ`>n O$qV@̠,YP A=*endstream endobj 104 0 obj << /Filter /FlateDecode /Length 5785 >> stream x][o9vγw~]`f1` dޑZK 9V͒Ԓ1Cټ+A)/4]?QGgOD?\}w  |qux¿G> !٣юœ?zq_|QI)ٴzm5vj MmU zWޜA' zuƔ u/`}CRIj`2k=:ÓuQگv0N!9R!& (?pM%|1FVg$k=heCYkOZm8ܡ Q*ns^pۧ>AKGJhVD>=om`M8>_dbG* M 4ѩJkF[ǧI4ΠTfҫgr6lwviwvS&D$dr#O+t0>MևIY3meThn1аoCGGЌjnf|W8ʸZo l ǠSJ^"9 ]yg:] Gۊff89U{ߍ~mLBNS06`hN ]ⅺi!fhw(E r~&\zxʽ=H9 K ?kE8wHdqĘ# u qұQ/ʚ,a [&2>_P":M`),X(+ +dY7\͓|MMb\ܼ6ҼZh7\ҌA8g !M 1iUAٿA"p#Ȍx?c_@"i; $4A^`V0E%8sFC9.Kr-ŨPWg`4-:,@VV IrNUB( 0]S+iCHatO0r`~i=O+deJM0M u#`a&ejL(f28g Z=2C`t@_bf=zc$o-0h qM*+ ]ͭܺ[םoT"anٹUu3̺s#hy5 Ehe3 C>ܻW_ lXl&o] lkZ▮斤Ժ|{-}j>N6ʌZd7F|@>l#S0Z㿁69,c=|*~UY\f2z֘Lή+cWNx,X=̳P:TB}}sVpM3<s[gF) (9Cb A {&``?̪c48L0o`ރN}Sϻ0/:܉}ď \A 6a;p2lj36.o,C/wwE(aiYW%ۄo yyYC4G_bƘ8PM <~/xN8F Ws?dƸcYoGƥxPSB9Ǚ?Ӻ8́ۍBSoLg!ykA `y)J3l|5j%_/ŐH.DR'sCXH A,e&U d3PO4Nl ' =!ξ m}ܷVbÐ?P(sQV0 Ý@^{?QH'/x 9?\j5A<&T.Jsmޔ$@,ͧRAⷥy՝Ui^v&p^>/MoԌڌoKsi~_ 0$.VԵPb)G0gQxh؈T4[8E%9/vJ2365ޔY4sdtRVʺl%eGgߑQX ŭy48U+?T+[ӟH׆e6Aw~-]k4%ZESVR\7N0Vd>5.\ ' ^=Bax9  {(GPaޅúRmWۉ͚TY,s+ysR }ڲ{LTP{.%9B6I7.ȥ4PF !*{d zX+sVdU;;epΖeBB(2#Yk"Ӵ$hY5/቞, wi?4Iyy]L)_/Js[K& b7D3ӄڡ4Ueaemo9-Y?gj+nD$ˑe!(l+YH-=6ieh+{6kU\~]Mp&לY` ;"qmË{&?eOF ^0?mG tZB& vAe+'??@l-Q^!^OsƶW7$5Oi[CU)Jz0uuBCkr(kyiuq (wS˩\"aWUJohQ c^B\9ҴK5s݈Wx>PlPA& cg{4BpgNc9ū o=6aRsS5(Ԛ>XU,k%~ov&y[,Yʅs @S2Wv ga[N_hvb?N-j 9Ȼ\'<cŜ٫yo9ƍ);&&OIw *"T*1(3- yux'թw`Gh5+w@V% Cb0_,2U^f(Y[B)Uа.WmU[z||>h0 O f岏e" ͽod5xX`8djӘszӱZn]^ōCE"'rX9Sb}5c_ Jg):rFs{_CWW gd [[g~nhZ|9Y5YR#-j7kY֯+jξE!3xi^/wk.M ׁrԹCTK|GJI+4ZrUb._i{hy`ldb;տ2{ +6AuheJYjB\5ռ̛!{)<0lԇ L %Wػˑ+FUE<]6OKG:BfpؖB"ָCAn{+k6 F6*ϛrrJ}̔;-}o1r2K\auȾ | ؈+{ڻn\Z ܷD1SHcH:x2FTE>| /~MXWȑ.4il*Ax[+FHJ`' {vZg7$R,1%0Y{{9f.2MlS-^¥P)ԥNtAӲ30<|gPY0o#osЌUNϭ9x͵mQPq&x=ZY4TcFELӕL*Vk#۫`[C'sQFk~LjSKjݛua촏EZj[XęBs]# PZ[a\xlNO\3y.<|yI5OPMPTT\o ų =[\{7lK]P+/h,`xO+i=oYHl(p>5U {J0Ң{"?A{+pJ[q%U55,6G֯KSe u}7v/r_$gwxŭ~S)Ok^Tȗ!5{F=Y{ +쟏a:k,O&#>O)0d/zU:\eR8Im26=Q"[[Y KgB٠b?C!zǫh9KeFS 0` h7]߄F^ ֫gu7毨֓O,]bQ"?eVڌ<[ >ʤ]4^"vI`Er$!vߴIIys !a'7vmS#6G"M+n@;]WEYK@=6[ǥSi{i~_".Ws8fDqGJ`߶@YSx>Ԏ9c}+~y 1,[aMB#u*j*Sπi*xje$gGSmEة9':@€օ65"/Ǔ#Ld$}[PGUNtm/Sw,kwZ[JE8l?Ht+ZZOsYbB,4ei ;6 (ӽc_ZM9Y&sVYlÍՂCd) g*q:n+&jV"Z1o`xǦ@vVx8צ0d*Q^iGP SLgS֯ađGF뼶X? 󼜫xɕ,?y鵣FZC~ZT> ? !)G*py;:I涀 iQjEr%sqעTSy0&ޞZ?=L2TMޙe9"-=>l<1QmVN2"/-q&=]먆b1#$,Ϋ}p3ueBnfL\z 2q~"^ `el e@w>Ae\4߮CHǹ*oY c.U"'w }))#ʜ^~y(V4\( Rf *3o_\~>ibm|玿1]W_Y][ZJ`_MxM 'KX`5md0?~oBNendstream endobj 105 0 obj << /Filter /FlateDecode /Length 159 >> stream x313R0P0U0S01C.=Cɹ\ &`A RN\ %E\@i.}0`ȥ 43KM V8qy(-> stream x]O10 b C+qP(/ СY:ߝ|pFcpƲ- DeQV-Ɠ剋BwOϲ> stream x}l8/W2j-Ht% +iyQNۉs۝%B'NY(}a}4MюM4=:v ytz~oQaek0JUYW_w֕6aQ^S[gRUj*PY,ILRYuJ65mm۵kgn֭t VcSYWHLr8;di2(nK_l[M][,֖ݛku6#ժ;h2X{ z3{d=.tOMz`5cDwcV5`ǰ}ؓUʰk72=s2tj%ǒJrI9i]4=@)!UFҐKabbj팍isG腇pEF?83 'g`qwD($E繫sg=mZgf S Zo+U: m̀uދ )R#GoNK*o?Ѳ> cJehC8? ⱩK0C]Љ%Jα飿8}b'<[fNEYz RTџcDr`8 D d#MdPs)VF1q6^T]֍ v8OЫ¤P쬰F0r!%A'z*"R S0x="Ɇ8pY`ÐVʋp7PXRTrTޤ 8&=SB<A1 V Z@/捰 )qa|K7ėP3.BN>usz٩ԭKg9EY!dYWbv\<9 RPY qV(ell,* Cu/Ieә CS +>`NC2gq{ZJWz>{FE7щw!`i۬еJ4x0"wM`Ik])."Oug,sl%Wx|&=4 y|;ov|߶xgnT Rs[?i rdv}3E)*%8KU#vZkÃ1<; | =goqfuk{?J/^xU8 9rscGbB<VwN6qNkb1#cM;ISBaTBLa/tT[߱O=*9CK7) 5/Х>V^ц xNH^7J-XU2ٿ^*~rs.4_L_;` ڝɹLx,(~' XCL2-2QBՏŜꞌK;]#'BGjgLy&f"A/)Kв~0E4d2LRE1YYXe;sUOG0?np endstream endobj 108 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 418 >> stream xcd`ab`dddw 441H3a!G=,l(_7s7B߳O``fd/mq/,L(QHT04Q020TpM-LNSM,HM,rr3SK*4l2JJ s4u3K2RSRSJsS ԃP%E )Ey LL@b2X9wʂ(g!};{%Zz;5:;[$6M;}h L5mbSvwLm0rw&Κ1abOo$iS䀶xw߫D?JFs_[zfr\,bi> stream x]Ks#>u;e:z1C!ՔԌ3( Qjvf`AjTH$'D D߳ʟ\n_^H$sv}S`"<铀l&8%ez/dp_s0H I6&} £a5!^t(iomG7zUd7DJXV۠aҨ9=/)t-Ӕ/*;{1y (:_yBڋ$AyEUC Y<5,~ h R|=^Aq8 CiCs݅?8SX c]+F⻐4{߁cw]g6Ǯ@Y2a;WD^`fCx&' T#^Y"9\g۱m.'7xYPBVmp ã},T@@klphM:~A# >>.e׫@kc E9'xJn'¦4hZLPAw%@&5s  w2{=HO{5K{m[ %RGN0*Vh`"ۼK]WicfLq`4”y #6[| &$ tX%n7x_ԡ|%8o T14nVt ؎q(,{}l.#8SLOJS4:xrvZ@9_&F>;/o.R>a9Y%#sWUeQ crF'[4lFa|Kd.KBG|_Ơ1|W5D -J@fSDiw徭 v`䠬;ZM*Cm(d?`? 0i) 2@6Axpe#8W##+v0)F bI F^-)"0*Em^Ɂt97UQYn 2̢9W ~110N+ת7wm:n`ΉiۄAu!#M`1  7KRάj^ˆ%g kb=O_I :`=6:3ULMBg՘pU<ρ|>:\Dٮs6FkGb0nnU1!AkY'NJYO'fpcǹq!q.6;'~Mxeхӧt["/K86~JN2R2Y P#@,%AͫڼNv6@P7fUPDA:]mmd|`F_Z|q"A@^o~-8'R24:^KKX8"?:`>e&v(>,hlMQ-V 5i`|`$Ŷ_]>Yx 2ḃi*F SSh_qEVf4Llb^FޝJs>@cnޛFlI_J}p9l-C8u[P j ԐOf"HId:c w7XZD{ShL"fc0ْe fO˔VÁ'ۏ4 c~G?2I`JeAh\ |UHC CE* =^|/{$ALY3ث{kdDJsF i p͞ߤBaF~Aigz:td? Cm25Z֯j3V9~4&ArT?Ν9S0bp+50i h:h5 )s`~懙wCG ߇b@V BUCpaDK2,ܱxa."~ {5YQ-+hemm~<žG;c'h#lm*,)-&h!O\7/(= V%)4INoႚ $(mGIgH&䜞&,;vj6;k2)* =\S;&y4t І'A,  &TiyY׵ϯԚh)\(wH]9fK_&#c ENEtYRvN%as6Ň#hw}P291ܕ虳Eb;ܕI-Kd)0G [fc MVeqPSTZI-зpI ?<ff!\1: ^pT Aà{( -(<}|R*rmФ羂)z q\^'VY6 Y:'#75x*͉I9C"=XK,uNi0Xܴؒ`hLsW4VDkLt^9vR y; <5:+M_MXw"0މ;Q|y:S8bT kуQ$h pI<>1λ&g'ob<"qwDStဿL %UzコߑϒA;e3rH}hb Sg5)ǿhPewՃIY4'fǙyM(C&+$!ej;dkcqc:u6ͮ TĤq5 "9*\\6ewP<] `*҄b2V\jYݶp@}[.Ob{r;Ό'r.گ8 vxRƱۨ"L(CmNRҹxܚ嚹\ͳI|4ZVtkQf}?zx'xQ+`y!ZL9z?vFyzaVXNrF'3Øǎ©kEvӛXR^&dŘ,O%yԯW;x3GMUs%)oR v!N1࡙8(KP8Eoc:0揽, ]gs ^oI^cIYQڣg]#{\R66$r_FjeHhv+go7ZȹTDIFn:L*d׹0mh[wF,4ZO>j|{DgHX07Vb""j:fn"1rt%JF_gߞ9t.{N7<7{.߫*;5;4f:/ `^>5Y?笕W-]uΛא]G,gxb73hiBYm7~8J,L˕jH $1<{6-El,Ba[:R A &qW~f m<";)? B>ME(nX*}KU" q%#̊"\ޤ.5K;p9Z5Bx4x0㺽,r^خhU@Y<ߟMrendstream endobj 110 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 0 0~ 8@0%!tpw'0^Gk"GpXhu[@fcA剋7OH.O!.y%:EHAڙXWU}uȪ?&]mgM*)J747I3 DWSyendstream endobj 111 0 obj << /Filter /FlateDecode /Length 5664 >> stream x][s\7r~WG4LyNpdImUxYIy0"mCkD٪9h`pBbT~|4_0?A /{y䟾gq~I<\ޜ:O(+ֿͫ4_t`=u@M8 uo ̜z ^t!{ZZ|i<4mu$ȓr Vi-Y2AA~ZWђSRbVfe('/n 0/a),.c)rrj·%f:-7ooQ B@Ջ;M3:K8r]>vj0'qL#h929Sa*q`+p2soӧaϩ8U@މfKhօ\4#'|ӴK0.P[,G d75RRvΦtn*NB ]2=a(%Qsfc\ GC1IF0 10T{jZ=34ntE[ׂb2'͚`n̐ޗ{wi'A6JtdEoKZҼ/MV(j#])52ήt# 8IUt4F!:,.1.:* ܻ~FD&JJuޣ$,{E_u\ɾub)%. Dw2TM]) fLN5? ]?S./{>}kzSiGTa> )T_1ų'z̉Š! ._ ʄ13$9Q zBIع dI.ݒsV0R0BOV}Q:͖q;T_ѫ\vr)51I:3%֪0qsEeB0#ݙݳ?nǾrn"poՌ'bIT;A<Fs_'߂}14`WG~JNKnLl"t^JwjqIj/˶x^tKKhRCΤ*__qw}ׇwFC~}_/`M>TVktbY7@R_"]Ux^"pRY2S8Ɩ8FgӮ |uC^Dӕ`BoIU78㝙*م#U),,:eyBW|7=\}> $F}̗o3HQ :? ~ƆEa$,c(*j ƪŚei6/a K!xH2cSR\%R z8I;)X/*ފ;?3]pQ*Qhك*i.1{%̄r?4Wd2`wJX:u`Ȯ<~@Qs羆/ӿ&aVf@+ZԿaHJW?$Q ˙K+k`T$ .[c)ވ^(#{ӻH4a>.8;>p )(([<_]<!endstream endobj 112 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10 @ ]ZUm?eB:gׁm_Xց$i, x> stream x][o9v~7 >wK0 XC[۞gCyXd{.kAt"}B313g_l}&~x&g.d"ʳқAwgw~= CճsUtŶZ|KE/D\\Pkojk߱φ9A=_zk!Z҄0 qoqR#<-F |>^;eLI!k%va`Y\a)Q1T_9:ߟ+7!b͝>=AQ~puStE9\I qc ZJ 2,N]t0)s1 a5;xqIR>]ۦnaQhtS!Ihc\Tq-*-&,̿fFAЂzܑ0c. C(a]:zR-+0IKUL4&E`U4*_αk?d@`d,F*2) l/"6})`2Q.MTiP4: 3B"/Lf g>/` ,;Dxsl㮺 T!-Pjo{B;:]}p)TS > 'Qו%Q >M$€e-2趮QMXxcpZ@O^ geskr#q"k\^F::wdR`A 0} õ҄iݔP{*> Cڸ /ϡpPIo %"`':8گ7@MgO9;<nΓ*an}3d'()Z5ܧWqEHtGMwPsYP8D :6WG+FW.=V+& T5zIJahI"`Bh7pD m B5: o"$j }P.4$ d}şSj10C/& ȺP*ԝj\ժws1jyC$1D >;iY_]I8 Yv)Y̜f8~}B 7JGZ M^OjoXPq;QL ,;OMpDibVĆ'o8ܜP:k:OBXֱjy'7TXb5' 4[^M j Ը_g>Iĸ kcm aDjVֵt/wzZz0#SU* fB |);-j Ig7[_PKl3v3B} h}+7⫺(`Fذ&~5zj©3?o% pFY^>.moΖK36|_kioOb=i}џqC70Bm҄Bћ1b,!nFzO%VH@uW"w7naĉ0bS߶y `#{S-YMnp#h?O?<JI'2"3sˌTEb`ko!6ΔL¤OnSc9,#<90iӠ|7X/j8":l'Ѹ,T:m*76!;-6ek pqSOa;uW6 o c> iUDNr6QT:Og(3}No~s˲%`z¬ b&e&fB |;ܥ^ r G>砸^Ly]s2ѓrjZ#%?D,X`7F"pE]ғio:W]+xfq[& \kפ|8Q d➚r+k_rQX#n9\&4J^MH"Gujh}*3ZdPr!]qkE ΀s54㾫'`'w0Y#z#YlU߿d-Y)1f恠 KJ[֜ԋe}U;cJvy*p.IzV2ӰVg)<61 :vy*19o13#XăbEc r:Pr}rvNru$X}'zOSE/wCGB)f@xe@EdYemx}LB{:ΣgTC|Gjj[v[,ϓŁovm__{W!t&\Y[2~-5V1s8t'-=06}tzcϬTlϦi><~pGb~E4svq:o;6vZQP`){p+99SFi~K'88K6Jͭ lx'm#At>Ǥ6 R8aYnyzX/3y,eW[f;Dz'uuSGr"c!knH2c-oGǠ;=f9IYifR :ry7G<csԓtLsX?0>tZ1ݾ#\ܾ|!gzR|yA vkЀG:CKCȳ9 Eݾlm eZ$Squa>m-k'[ncֲ$Np*jp{Ӆk&Zm-Mm9} r<ףH2|Y)N* L5S9F"AsD& }FN|%Φ`$A <'En-t"R'kԵ7cL 2 $P.J"EPPy>kX6:6!j*^,<4ţ48 r_Ggᘱ;xСrՊ"4hچG<8 jL>78臙6%zΞ()F.4A^|C%S9.-@E(ݹ*n?t8h2CRxco.@p=wiAGs'dbvE}[C2Z;Qj,4 f`$ZK^ ^鹐:$eXFA/mwGTg6?솅’n+ eu89=4vptAr_ NR3-˗9 iЙOQ(=r}2U:; ʼZwA9x6$WhUƧx#JFb6'4'QkaAws^%82<ci/q^>sMY{<*҅v3 Cp>jO<_V! 7SҰA{6$f]œ YA4[pP3EGN2N/(Q[pCu  c%jhQÛO{][xw@硵{]ꁁki$D%@WŞ/k٢}vZǦ8N ;#@F v4]w0~ƉkT?1 *s8a~Ʋ4{'p;S*/:r֥cN"8A$5TO8}aaD߲ ˾\cpN#4g5 Nchp tXo{N,rK>P|y58n6̢#rXϫfiԀ7W%'z~K?H˯ѣ`^=lfO?7Me4Y m1\Qyr}Զ*{W=Sn.`r9~8ejq3@ӲTdP^%^;jkDiV,fŹ@|W5$Zܾ@"}F`GeW8q/&g g&>ЍTs>AI߬C>4+zz T+.v S}\f5g&h쀟]Z? ?vw@erߊ9)1gkʬYendstream endobj 114 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 *,tahU@p$ aKtpw'y?\gG:ikD&똨A[L+go*?`3]ğB\J!rdgdtFs8VM*9K7ɥҴ43 F1S|endstream endobj 115 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 409 >> stream xqCMR8/vz-B@jL  b_\mqCopyright (c) 1997, 2009 American Mathematical Society (), with Reserved Font Name CMR8.CMR8Computer Modern6v糥ޒԯȩʋh^imz^1!lH컿(C`]ZtVj|tϋؽqhf^XX_wgq]hy_vCoa  7 כ endstream endobj 116 0 obj << /Filter /FlateDecode /Length 5680 >> stream x][oqy >_샽v,''g)ڕ_9=BJZ ֨KDD_|O^߿'ۓ_AE'g/қQo?jNn_ zgxA1𵳗/4|w 1q6oOw8(5g4OWb1D;7tN- ú6ym}m͋}}_5%_fꜾcCCڼMm͋ڼN(+# L gj9=I[XW0J`hzF:ea+-fx:(aA[7l^Cs8X逛 Q k50 B ׅbV P焛k w1I-c<)aie7 zC,l=)=wZzq1r9BLS(,+hL/ȗ.M0Rg4v F΄u{!,F!U|Je%R'+ D1Q%y{,a /hih~<X}CLLΟ _5QDh wQh8<Ή~M fb4%j_/W`N.1fZLӣVKD%:]n3^G6Y.#P Df:tq~ٶ"QP_6}q{~<fR"7Q#1r: |i`O,I8F&AO[鯪pEOa?lP.o\ P|zellH.aVKi05qOK+X\ 6]hʷ؂T}ɔ_\SPoucm~O,(h5΁9taߕrqa&ˉjRï9YeҰWpBepXP(hYJI(IJg$wq%s!k @)BHفA ԵMjéptKk]ZoJ,JA<KK(x3+EmCB1d&Uf4ΆvAd@P_;:څwz\tZgSwZ+F%z:EP=du*SRC􉋀'A~Zi(%v"H35ms(LkmzRplc )Gǹw78y mj0pJ8>BTRnXAo-܀c i `yC{ *! ma iX!9k,J2Z xJIVAV&]FH ׮:[iD6o8^Us12:.*r1#4j&bk\ܢGՀs II8%v:o<Ϭbk2*X[0L3E lOlٹ^a탽``+08JYIuO+A~U3 [ٞ&F Jtq$ۈ J5],UnG@3a4=$koYo`ȶ#MiQyL4j٨ܿ{?-i6u}^]ӄjMzgO;#}DrPn P (=鄞xHqk`JN0qfg ޥ6ϕM鼐5CrAMGC5-/aB޴f͂惸"Qz\$)Z8y&j+njYmnj6yCm^C2gxk m N=B0רB4;P~RgN׏|):nd>t%_A#L&;:; ,} |Iس|иfGzd}< Wn'ר[[vNYآW̎΋m\Ղpcm_^ ӝz;_ x9*7e$iQ LVɮ1um^vKct.~@GQ"?sU']0bIJl=^mO$UB}(B`f[Ej,ŊN,W(+FUAP7WM,:el U(kjAaH-pvYU:Old^OйY2*ddT#^h``DaJ`52 Q ,79ggޤ0IܪX knjg,MUۦ~RBDPxw|>_U{.+yrwMlm*8+{醯*4`ސb pw_M~ >p = eI A E.;w~G2m3GjY']]PP7$bgGYek~ K?O%~qT4񤤃y.[a/ GPA/=x/V.e6|ǂ=K'Ӵ]*9JddR*J<{,IZ!5)sfF;|})0&IU) mP LZ]5 rD3]p P;GnU5ÿ &)b;'EWao%x*{JTWD%˪/)iJTMBX*ys(ͱiIR=?+N`RY,Է*XfJToW. ϊݲ\2vUmKec ?3[Biev,MuɓN'ޭDbQ0aٳiBBNa))aG @2/RW];wY**RXUm6Yw+!vp0 6}-Ť=y_Tt1`7o D> stream x]O10 0 0~ 8@0%!tpw'0^Gk"GpXhu[@fcA剋7OH.O!.y%:EHAڙXWU}uȪ?&]mgM*)J747I3 B}Svendstream endobj 118 0 obj << /Filter /FlateDecode /Length 151 >> stream x31ӳP0P0T06P0P05WH1230!U`aS027i`9'O.}O_T.p .}*.}gC.}h\nn@n.P9?47XΎEAmGTa?ߠ pS! B)endstream endobj 119 0 obj << /Filter /FlateDecode /Length 160 >> stream x]O1 y@*uKVUc"D7ҡY:ߝ|x%I cikDfǢU`ķBW^@$:UZFg] 6R L:չPXqp8I.~r 6/=Seendstream endobj 120 0 obj << /Filter /FlateDecode /Length 5000 >> stream x]IsΙi&`_!v%W*8\ !e1 ?;h= IQbp[;f?f_{zuWx>bNjwG-O8:EGg\ɣz:cT錖~0y)Λs*ڹi(ޖR*y)^7'XDŽ3)%;79ruap I);'˩WaZ=SϠsO?[& , f‰r/:8=ÓUXyƔ/TN~ 9: 5nSylxEÒwZLW88ImI%oÒ0A '7y,|tV~8.l꡸*R|_ tg%߷+sȵ}0XyZg\vZy_q D,P&oK$w Z! =8qmƝ3&} g<7mH@wert\jѯz;l\M.b=Glv<\$riKluY"ppp$v<J*{ñNMs˰[-ɋyAU\PqΙzz9#m8\ԗ֍g{%z.ÅqWJ.`CxSqox?ŎjI `o/#8oJT=nJN.yM-Amɤ2WSq˫ |8>pAy dB5LXl%I(A6yX P9 Ih0! BEb?qHiӥ:y2ddL 0F:'K `6VQ2߻$>X]Gm5Vow# @~{Ppwy}q9S&s/0R[Wq\ Y,VM*c\@*/O H4q%r{m)A+,pJ')#W HZ5jƳlsT !t0"p T: P~i]a.VB%vq[7UC}^)ůj{}(5$P( y"qЂ3i4Q6OV&j}" C3!GBE=|vs$Ӄ?$LH:ӝKA'=ǁ0R҈ZxS y=^L~/MIsILv GreyD3wͧmYq7eWRؾQcs E1}k86Z} r3@# 蛫[/!ZU#,l凢XX um&_ B߲@*U~ i׉2FP1Vnl?붴nm^Qp5 x SCPz\7f7E <)ŦybH,W!W TU5?xb⻛Pˋ|nK+t}r S:I~5YY9zrdÚڹM;:{hWIwDu. M@T|FB2y gf0X'5X{* G=<ų,<_>} .yt*Éq:ݔU@ "lYZD.ᵪCANf@HaxKߔ" ;+oJ8-|mtLvZo7D w br?A hV7y m[D'mmaS`LNJyBOtGXנ vh;9]Dh`չ"{-(E=Ii;'EP)ᶊu`}%Tp̗Eăߵ\iTRyR$ Aqc/WTԍgh32z6©Xk 4d6:'7| W-ŶY87ڤjqAl_:9p i++e1?%h[Bf >: Oxrx'|aqT3'u ,ŷ/J8 m/6m*A~XMITK J.w(IFqN(+k;aw3 HLNVlQ ;С_#5.h&-S|hxoo;E~hmӠAjmgH5 KLDk7$[ n"DF+d2:xrXю^OsQ͐ȗ 㞁Cd0 m,uQK.[0WɄ0t#!&1sVBRMx1lO%yBաp)A3 sE{CHʇ)́Z'D?0p #yHIxkȑ O Z+%$0nU?L0XCf*@{%C#wvѨEr $k.دSlO iw9?RN퀕~Yy\-R.9bTIqe},R)&YG߃I24'OP+9 sHMV!}4Sj G}S >v($DжiA;s*GO]1"IF|%ⷛHoGv,խ4MEtJi/CiH]k}T= y*H}NbꖺgHAc4Mꫲ!ƥž1列R ѣҚτu٩}3x,ihI  huJr`;jkΚN|i惋Bk.|N̈́zR|~`Ȭ+%p /W">%lxN^@4vދY3F`(3,V4I4iI41Mrs봨 !htإ)MB>O(BRL]auLm ieM"~YęKR5ubۻ=N0*1lV𣂸Z<*=UR4:2fmK .T>z!駆l ޿x|[EzEC"$JshpzMD}ܫJ$NkD7Ei% 'r3ITfcB|ݸ(z+;4 u^u7}Z=+޶H*,;7!ԘQoc#ISH]iH东 Se4k'fo~yuYj>"I(!RmOM@cf#\ռ"{Sstt\4:<L);^EU>bF3Z1(<L5PZ_AuԠj|uW4JYS|had/#g#aF b=X<#ĂSBuǶw#ًy8Q+ 0oVh)G Wmo߲9HBgGBLsV\7mMw8<93>=Ll^M9} rnL JxM.Q6&t[*a70Hp.eRBq?Ađ" ">~+ FKQ) b^ PY ҁ nKvĸLn.gʈ! 98fT^P/pZB.a_7.i$35`T8$3?9_]Zendstream endobj 121 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10 . 2D! }I.|D/J`H_#490Lu@vw=|*J!􆖠D[U]km'͟tF{8+/,S\ HJ$pLg9AAStendstream endobj 122 0 obj << /Filter /FlateDecode /Length 5587 >> stream x][o\Ǒ~#hf9K?8A6.aDRlCaTU>Ӈ3Ë8\:gǧ/XŸ3&.*".+y%ٻC%ߎGg\ltet\VC~q\A _EBC^A+-`D&Cp4e0 .b1h`2朱wv˛t_n+w8mvLgQV͎^"!:H!C8:evFu7Ҥlc G -$. 'B OSR:j p؍[,8diߥLTNHNuRvjFȎԺP_v:A4Z# Y:R7g{2zz=JG7ZpoVڍ&vE{eiD?NLl/G'L}8>cWV=[z (&*Kwo.>CG> wd_ ZpEfʼnNj0xW-Ά8t:7նɂ97'@mQLOqXӤМdìidp' 4Y 75Kod.@8Gr1(%gSJo@6Dj <2Gp&kzM"E_Q~1f˅Ѩ|aljgɕͦ\xƔW0v\=[DZ ⷍw^ѱNBG36ؐDBh qвG1&[+ɢqlFWG}_i]w\WtSD`AJjcu RރY_1]|EO^f<<MO2`=sLN\}L=Dlp$&"D63#3.o*d-ewmqRFqˈZ6y#Gp0 Th6[IQdQK`} jm{i%o*ɐnE%_VJ.EΪ9g2y2r!5PKr0|l(-ple<]r"aeq4yvhuMm 6,ѩisQi ~,0sJJ\1}H\y*akۆȉ+%~aijƕ+uӿ:MfFJ<!QVRʀ/pYF)Pn"^~\ȃGf!C֜i؆e(mD]@W+$}E2uzf%b% keٌ`QBÇ ߰M1$VoSbj @VZd"9"-iZԟ\"6QcUsɊi3zk82V4_- z@ߤPBd(9̧Ze5Gg ;ٚ`rYͥ +8Jy4^c8N0j3$ȍ 7A^E%O.s>t?;6Ʈ1Eǜ3-'*[;1i7†!AXD]ZذYzjP&i2)%JWRL~] 1s.Sƒi&[kIt}0ج y9eNr@}*JOMr=Z=e%YH*6XW Ђ-lD$˹1-0<{g& p'")QSբD1ݥf`:}|x?Ba .93@ZyTp[v64!Qڜ hٱM-bcĂeq*Z&0L&tT~n+EX"Ae1nY_T<| (z6L6C~t9Q V:Ka8%㩐{c rYPC)Biˁ8/suڎF>D|9,k< +]ftR]E 2AMF9Ӵ&#s̰gVN8J $x"*a XU] wcso~5Y9tB2@&gcuwl?~h \S۶/59yw[9_A6Fbi|lD}(8_)1T4e˲YfRmZ$}9xp͜~p,m\~'l~?U a%G\r8B]'eK*TkՔ; p|tq`9Vbͤ)G5SQb'?iD6W܉GqGGE7|3RͰ^@8  -*N6N29nSd\M3 mep,{}%ˊP~q%_UJyQ osjnZ127&*^þުIV[i)g;vT>؎>eNswFwgn暧;?DycXr8EʲsV6w*s=;I%Y Y3?(B_J4#,}GL[L#vo3Bjz6#N<GLɅ93rPy9U$YJY Kw[Eб`طdxpb0V0t{7r#=ϻԈOlTYI/'SUwL{%{t8ZQh0ۦR.#J')dҼ:e}1睔:H6t~:3 S  )%9ZmHkÖx~Ӛ?OK^v 7<,p+ڳYaLJuZ~T[L=7(ҹ_cJ!Bv%ߤ=::MQ{*2;LZp, gzS]9bh5Ҩ0hk[I~ЎD4ְ a/xoH3)-eo'X=+a3"ʘ\”p59>;Tύ7=[ӂ*'Plm1 Fz|@3;ֹV͍r8|ERϬUc2A~>go,g{Ё8ҫM{Wad Dsh=WK۫gP'qxwOf9x::=ZoX+s+ʱi=9 Gtמr~@6?:_ÌLJ])i߿}'spZ6袢q28 *֋/ycɏb'hRyeTYt; ?7Ja/$>l}dڃu"˽.?DH;Kbus%!d5K`F4=~'RŒendstream endobj 123 0 obj << /Filter /FlateDecode /Length 160 >> stream x]O10 b C+qP(/ СY:ߝ|pFcpƲ- DeQV-Ɠ剋BwOϲ:VBi )(IEѵtXIg`2i1/%ES&qMsT2Χ_B~Svendstream endobj 124 0 obj << /Filter /FlateDecode /Length 6041 >> stream x]Y7r~#zq]}hCz^9W6rHi8=bŝL$Ш>8CLՠP8_^蟞?c_ݓ>>a.?>K{3h<<='Mʑ9_;Jk{2x6:𵳋'ޯO(W*dl`Bn gQ3Wkk!0ƌ@7Xzпj :R}Oe,_;?2.vn^@n UwFα<竵f)atk0E'AK0]9hh^U cR 9>Wi„L &$8 kFa:҆0}ōО w! *~^i`]o(׶5J\$\a[՛a MF^ Oa k"0I/*@Gb*l|.a$ۑs2ָ::&tD7Ny :Z5]T^DwtM/iU ŮW^ȃL!S>'-a{0ܴh /B8D>8g2=N;^ g{kt boy禿 ; v$| ;y%Fșd/jFƯHL ,p93xC!ww8NɆ)jl&\2& S\ ,r"4QVx/Ah+y&/JMk]̇Ga}< %dST[{k'=eǩE魐dq;pC8$&1#`ϮfG`@-(kju[ZD~^2 5SlaQGq/A%<((kY}ڮ80뙚2ummp=gRbZguF{QqQ, &q岫JYq,} Lb౳Eک B_<1hk-4, ؎Y7-jEwaW$]8d4CM/pI 2=j$H0@ NvkCrǮ^@B-y!o NZ7|YR2Ei{cwS3ADY+^"RpRKв(+=-5%Dƞ=JbeR8U饛U&x@FU&B |5EoC!Dul"?ڲ:Bc` oPn഍=>SԢ-4 lGoMQPula^6#a1ĂeY ^%;{ X aw6}=NwtيY7JXl+} ѧ=:|k+1rg1&1nF]GjtѯΌ\jQWxүHMԶh*2OүvW  {[ٯ":ߧqF(YW^k`gF⚲\#YUelqq`3O+aK.H 4Xe@\3ҘQY~2+6JE4@Få LzXĞ$2lv.dŠi]+Nu%:fD 7ҪOp@D[߂! RrL$ۗsh`7X&_vTjtb`lB؀UwlNM2b$QqsC>ĵ?l^PpjH(ZќpdtQ]%{q@ݙρؤcM$m B~SȳB~W(o'* .TkZX4{/'!Fg `HGĈ*Y.Mp!.Y0 μofDz")-73.".X@!P:",cMt C0_X|֖k-e}׀!>7R#/]@00`%d'. 63&-n&J'A9uY.1\V g0IAUB9xO~ #!Ʒ"Y&L C/c.jD|&f1۬C=fQ;/d b{Z$Qm[T,{,:9{D#غ(>9%݆$#['d޳8l6rdV" rva |_iCT=&B84tsP=[\ikũ5AᎲWsg6vTƶ](X ׅ y@.g8;uPFiG4H,o'HN=}wH[Jvu6Fb|]isn!kUd E,5VWxUlx"

7CNQhf2%7H_mnB ^X]Nax-Q wr 釘2hD\ʩUTR*& lKQ=MfS@!*mKz;Wy|!EG@.ijۙvh0!mE|)cY/q:l",n8ֻB8[f[ .9e ~CixƸ%تŪԪrI$dQQ` \5ƆB<b/U%3n"ݶ̀{ mqܤjsH665$Š'-Z:<_2\lz!w,k՞5 VV5ڎ54|I߂` ')] ͤu=&bi "hl8*iX`hk*ATs5%mIFBj ߆rB:hߓ7g 7}*&DٌBo5 u&n." CzpQ( nU' f!V++`(oL7aacXj8/'?ݒ50#Ʌ/>PYt(~'sIC|G"}q, (?F-.XS*xԇ~LuG#IVF\GP޺ۂqwsᐷ]m82:0&*SeGq˜ {|ȇ#Oa\L?]IeOGWCAT!XF}JIN3M"h&L@*kVe*3C\ *R@R6:k o IqcM!*n$kkW$z<= rb]wΉnfQIՀ ^'+[a+dTZeXF#q\Z(-wJ2_Nl$%z(8ys1-}_}]FP7%O\((ۨM UW$QE|QvL^OPH\ R8Q$Mt~ C}B~>@r6yWl I@)!(D vQmni3eQ 42zQ⧾g\v+}N=$D :޺*Q%Od-4svu• m+5txBt-OVLVA 3hAbĶa؋`NV'FcKl焘"sIl0.4&|]`W4Xաdv) 4B5&ndz)1›Ulh\F9@I\IfNp'g}2  zC䃛Y/B=zjaer^AwݧA}0)`Jě1b qL"ztV!$ֆ^Ŷeرo;!m mT&_iuk?)ofp|T zP1'q9bQ'W2KeNQJܠ8A^)9) ƆY`*_{u# пSDJdgTWtI`qDaҥScB*ZGw(5'?֓Ld?K aM߉/0ʇĿ6Dq*l%2e/߅6G,;z'ڔ4;݈o} -*.0A2O,*\TS>Y~K_n|<,=mՈYTD7"QeEqj+;C rZ:貤.3j_){sqtu%]ow@jۻUw$$[ҿ$S!E!78cQ^G ~\ 246>xh`=r;M"Klħ>ms*~  \.zV.Y&uŷчlI^ۻb=^V΄ސ:-f׊ʠ(Z ? =Xh[T-:toI:"ydX\rb!Am a$UjcN^E@'p//Ķ-^f 6l ;M}zû3۩QqJS넲YMLŌK^GY>=hDž<oz,I4$ ڟ sV^ƝV"l1$?Nw+$MGpF:C!n7 ïD1أT.,Mɥ`U|ς)pa_xAheMk%I6 Xk풿;!6MxN7vpABcC.\\J|7UqEgw$ga@k\ 煼-$'bB5QGT EpW')a# T䫒`v='qD-~l"V2Q jt4E.Qܨ濪>urN3@#l}yqhLHu_0tI" "?wH2 m{$1f占ltT 9dDT _MnX/&&Q,SRF}_@__DQwc7aG.ʸʾ\0ꢷ ':$ ze4!wqD%5sй~{鮥oߪ>RA f>ﻱՓ6>/qgF3^Kc]BDW~(w2Hy»WT\eܘRR0]^(^ Y.d# =t5W_j?{Qǽæè%)d|xzu fJ> stream x]O10 @ ]ZUm?eB:gׁm_Xց$i, x`ASuendstream endobj 126 0 obj << /Filter /FlateDecode /Length 4509 >> stream x\[o\~W#<uiIS@E$ɒHZ+?̐<qW+Kq@2$onܗbv>ywW;bx厤~/D O(?I_]|?ۙo>@ lpb1qv^e_腈]*^tų K4hubг|O F >we6h00;>W| Ξշ0X/XQT$"6Uo](M+7!lJsΎLLcYI jژ-PAaV %vD}g_M=:P3R4^(RNY`A^3| `0;>S!4ÄņeNyoЃA*O*^UqUM+h6/!E2y|KxEƔAʫkFiפ)!e8=h0I=XUb9BګNz-Yk;=vOzWNIUUU]-0 ]iprSݳGBH0})": -`gy1o}DP%AEwnj5"Юm3,5 \Jd:X#T .t7'F +ƽ ڎ]%`$et9Jγ2.U0 gr|88J{$5OLU@%)jf˟Cʫ܆3%43>wl8%9!6Y+풚a#D.jd8N U2bt >‡4q hNv `ni;nۑa< VxQ^Vlq@x띰*mZ0la-N_UnH`Uw +a5$JhB>F*8KC֖ a `7 Nܴax:5'y=w753`EA[7zN^y6_5MV3a8w&h?X#- nVA2lĻёU[<.5*\upgU"C0ޛ.w- C`SHr[O SVq ^ èbK!M\Zt[zIM=/pLձim1e"K k^S !Ӈy.ӛ Y܏Byf,}v5~ԕ)UT2*^xh J;|ws'AHM }R2jQxjDrn8`A~ahgUrm,2 ӄIIdOvLwr83SjM3sN.o3 2"NMLv>_7kPWߒ=O;_c=6 937Dv2 j2 ̂,BҤ_{ɘX?m )aj"36'߻z2jLA)ʝZx6X]c-ךh)#T㌉,) l:͒1x|72$ŜlhTJijҰdsqTZM"P\ދ?`E h?\[G *wsFtcriè0? E'I+T H~0kD;xS;K:2&E!xɚ vCE{u_N7-L9rt Ɓե*.E?~VUz5qc-jx?x;__>`!A:UbWɜ9])~""`G>"Lx `Ǹpv  Mz8tu z%kEѕQ#X@Xq!m j''IySD󬂊]龞EjA 'E'6`"DTnV-0FJ?jX" t-1Sw([|P^! s:k/}pcY_xl~l!?k: wFCX2ERm@i_":4}Ԯi"Ny.?)-GB78uGx/0O7t_W`n {yK>%% 7=ZkƹHCc >ʹ'k+Ew4(O2staFodQ?>?%dC@H~IGO,NjQtؠs}PN[)ѽο'endstream endobj 127 0 obj << /Filter /FlateDecode /Length 163 >> stream x]O10 @+XҡUqP(/ СY:ߝ|reA>E0u- -m1,O)x@fw5|s^[+x$E b'Φ2?M%K17MR{;RB|DYSyendstream endobj 128 0 obj << /Type /XRef /Length 148 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 129 /ID [<97623115b8daea26baaf7fbbf8a22f1a><22ca8a86377bb20f9e786dcb1ae50e6e>] >> stream xcb&F~0 $8JP? 68b@$dUo@3 DJ;@$ 0"VH , D"E3AX\ 8B3؜*0%0YL!.^9ծ endstream endobj startxref 97335 %%EOF spatstat/inst/doc/bugfixes.Rnw0000644000176200001440000007354213623712063016161 0ustar liggesusers\documentclass[11pt]{article} \usepackage{graphicx} \usepackage{Sweave} \usepackage{bm} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} % \VignetteIndexEntry{Bugs Fixed in Spatstat} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\code}[1]{\texttt{#1}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \begin{document} \bibliographystyle{plain} <>= library(spatstat) sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) @ \title{Bugs fixed in \spst} \author{Adrian Baddeley, Rolf Turner and Ege Rubak} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle \thispagestyle{empty} This vignette lists all \emph{important} bugs detected and fixed in the \spst\ package since 2010. It also explains how to search the list of all recorded bugs in \spst. <>= nbugs <- nrow(news(grepl("^BUG", Category), package="spatstat")) nbugssince <- nrow(news(Version > "1.42-0" & grepl("^BUG", Category), package="spatstat")) @ \tableofcontents \pagebreak \section{Bug history} Thousands of bugs have been detected and fixed in \spst\ during its 25-year history. We started recording the bug history in 2010. Bugs that may have affected the user are listed in the package \texttt{NEWS} file, and can be searched using the \R\ command \texttt{news} or the \spst\ command \texttt{bugfixes}. To see the bugs which have just been fixed in the latest version of \spst, type <>= bugfixes @ To see all bugs which were fixed after a particular version of \spst, for example, bugs that were fixed in version \texttt{1.50-0} or later, type <>= bugfixes(sinceversion="1.50-0") @ To see all bugs in \spst\ that were fixed after a particular date, for example 30 June 2017, type <>= bugfixes(sincedate="2017-06-30") @ To see all bugs fixed after the book \cite{baddrubaturn15} was written, type <>= bugfixes("book") @ To see all bugs in the entire recorded history of \spst, type <>= bugfixes("all") @ which currently produces a list of \Sexpr{nbugs} bugs, of which \Sexpr{nbugssince} were detected after publication of the book \cite{baddrubaturn15}. \pagebreak \section{Serious bugs} Following is a list of the {\bf most serious bugs}, in decreasing order of potential impact. \newcommand\bugger[4]{% \\ {} % {\small (Bug introduced in \texttt{spatstat {#1}}, {#2}; % fixed in \texttt{spatstat {#3}}, {#4})}% } %%% LEVEL 1 \subsection{Serious Bugs, Always Wrong, Broad Impact} \begin{itemize} \item \texttt{nncross.ppp}: Results were completely incorrect if $k > 1$. \bugger{1.31-2}{april 2013}{1.35-0}{december 2013} \item \texttt{nncross.pp3}: Results were completely incorrect in some cases. \bugger{1.32-0}{august 2013}{1.34-0}{october 2013} \item \texttt{cdf.test.ppm}: Calculation of $p$-values was incorrect for Gibbs models: $1-p$ was computed instead of $p$. \bugger{1.40-0}{december 2014}{1.45-2}{may 2016} \item \texttt{Smooth.ppp}: Results of \verb!Smooth(X, at="points", leaveoneout=FALSE)! were completely incorrect. \bugger{1.20-5}{august 2010}{1.46-0}{july 2016} \item \texttt{rmh}: \begin{itemize} \item Simulation was completely incorrect in the case of a multitype point process with an interaction that does not depend on the marks, such as \verb!ppm(betacells, ~marks, Strauss(60))! due to a coding error in the \texttt{C} interface. \bugger{1.22-3}{march 2010}{1.22-3}{june 2011} \item Simulation of the Area-Interaction model was completely incorrect. \bugger{1.23-6}{october 2011}{1.31-0}{january 2013} \item Simulation of the Geyer saturation process was completely incorrect. \bugger{1.31-0}{january 2013}{1.31-1}{march 2013} \item Simulation of the Strauss-Hard Core process was partially incorrect, giving point patterns with a slightly lower intensity. \bugger{1.31-0}{january 2013}{1.37-0}{may 2014} \item Simulation of the \emph{multitype} hard core model was completely incorrect (the interaction was effectively removed, changing the model into a Poisson process). \bugger{1.31-0}{january 2013}{1.63-0}{january 2020} \item The result of simulating a model with a hard core did not necessarily respect the hard core constraint, and simulation of a model with strong inhibition did not necessarily converge. This only happened if the first order trend was large, the starting state (\texttt{n.start} or \texttt{x.start}) was not given, and the number of iterations \texttt{nrep} was not very large. It occurred because of a poor choice for the default starting state. {\small (Bug was present since about 2010. Fixed in \texttt{spatstat 1.40-0}, december 2014)} \item Simulation was incorrect in the case of an inhomogeneous multitype model with \texttt{fixall=TRUE} (i.e.\ with a fixed number of points of each type) if the model was segregated (i.e.\ if different types of points had different first order trend). The effect of the error was that all types of points had the same first order trend. {\small (Bug was present since about 2010. Fixed in \texttt{spatstat 1.43-0}, september 2015)} \item Simulation of the Geyer saturation process was incorrectly initialised, so that the results of a short run (i.e. small value of \texttt{nrep}) were incorrect, while long runs were correct. \bugger{1.17-0}{october 2009}{1.31-1}{march 2013} \end{itemize} \item \texttt{nnmark, as.im.ssf}: If \code{marks(X)} was a matrix rather than a data frame, the results were completely incorrect. \bugger{1.32-0}{august 2013}{1.55-1}{april 2018} \item \texttt{rVarGamma}: Simulations were incorrect; they were generated using the wrong value of the parameter \texttt{nu.ker}. \bugger{1.25-0}{december 2011}{1.35-0}{december 2013} \item \texttt{rCauchy}: Simulations were incorrect; they were generated using the wrong value of the parameter \texttt{omega}. \bugger{1.25-0}{december 2011}{1.25-2}{january 2012} \item \texttt{lppm}: For multitype patterns, the fitted model was completely incorrect due to an error in constructing the quadrature scheme. \bugger{1.23-0}{july 2011}{1.30-0}{december 2012} \item \verb![.lpp!: The local coordinate \texttt{seg} was completely incorrect, when \texttt{i} was a window. \bugger{1.31-2}{april 2013}{1.45-0}{march 2016} \item \texttt{lohboot}: Implementation was completely incorrect. \bugger{1.26-1}{april 2012}{1.53-2}{october 2017} \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}: Results were incorrect for non-Poisson processes due to a mathematical error. \bugger{1.25-0}{december 2011}{1.51-0}{may 2017} \end{itemize} %%% LEVEL 2 \subsection{Serious Bugs, Often Completely Wrong, Moderate Impact} \begin{itemize} \item \texttt{bw.pcf}: Results were totally incorrect due to a typo. \bugger{1.51-0}{may 2017}{1.52-0}{august 2017} \item \texttt{density.ppp}: The standard error (calculated when \texttt{se=TRUE}) was incorrect when \texttt{sigma} was a single numeric value. The output was equal to \texttt{sqrt(sigma)} times the correct answer. \bugger{1.41-1}{february 2015}{1.57-0}{october 2018} \item \texttt{rthin}: If \texttt{P} was close to 1, the result was sometimes an empty point pattern when it should have been identical to \texttt{X}. \bugger{1.43-0}{october 2015}{1.57-0}{october 2018} \item \texttt{predict.mppm}: If the model included random effects, and if the library \pkg{MASS} was not loaded, the predictions were on the log scale (i.e.\ they were logarithms of the correct values). \bugger{1.43-0}{october 2015}{1.55-1}{april 2018} \item \texttt{nnmap}, \texttt{nnmark}: Values were incorrect if the resulting pixel image had unequal numbers of rows and columns. \bugger{1.35-0}{december 2013}{1.55-0}{january 2018} \item \texttt{vcov.mppm}: Format was incorrect (rows/columns were omitted) in some cases. \bugger{1.45-1}{may 2016}{1.55-0}{january 2018} \item \texttt{model.matrix.ppm}, \texttt{model.frame.ppm}: Values were sometimes incorrect when applied to the result of \texttt{subfits}. To be precise, if \texttt{fit} was an \texttt{mppm} object fitted to a hyperframe that included ``design covariates'' (covariates that take a constant value in each row of the hyperframe), and if \verb!futs <- subfits(fit)!, then \verb!model.matrix(futs[[i]])! gave incorrect values in the columns corresponding to the design covariates. \bugger{1.45-1}{may 2016}{1.55-0}{january 2018} \item \texttt{predict.rhohat}, \texttt{simulate.rhohat}: Results were incorrect for a \texttt{rhohat} object computed from linear network data (class \texttt{"lpp"} or \texttt{"lppm"}). \bugger{1.31-0}{march 2013}{1.63-1}{february 2020} \item \texttt{predict.rho2hat}: Results were incorrect for a \texttt{rho2hat} object computed from a point pattern. \bugger{1.42-0}{may 2015}{1.52-0}{august 2017} \item \texttt{density.ppp}: Result was incorrect for non-Gaussian kernels when \texttt{at="points"} and \texttt{leaveoneout=FALSE}. \bugger{1.47-0}{october 2016}{1.57-0}{october 2018} \item \texttt{envelope.ppm}: If the model was an inhomogeneous Poisson process, the resulting envelope object was incorrect (the simulations were correct, but the envelopes were calculated assuming the model was CSR). \bugger{1.23-5}{september 2011}{1.23-6}{october 2011} \item \texttt{linearK}, \texttt{linearpcf}, \texttt{linearKinhom}, \texttt{linearpcfinhom} and multitype versions: These functions were sometimes greatly underestimated when the network had segments shorter than 10 coordinate units. \bugger{1.44-0}{december 2015}{1.46-2}{july 2016} \item \texttt{nncross}, \texttt{distfun}, \texttt{AreaInter}: Results of \texttt{nncross} were possibly incorrect when \code{X} and \code{Y} did not have the same window. This bug affected values of \texttt{distfun} and may also have affected ppm objects with interaction \texttt{AreaInter}. \bugger{1.9-4}{june 2006}{1.25-2}{january 2012} \item \texttt{update.kppm}: \begin{itemize} \item Did not function correctly when several additional arguments were given. \bugger{1.42-2}{june 2015}{1.54-0}{november 2017} \item If the call to \texttt{update} did not include a formula argument or a point pattern argument, then all arguments were ignored. Example: \texttt{update(fit, improve.type="quasi")} was identical to \texttt{fit}. \bugger{1.42-2}{june 2015}{1.45-0}{march 2016} \end{itemize} \item \texttt{markcorrint}: Results were completely incorrect. \bugger{1.39-0}{october 2014}{1.40-0}{december 2014} \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}: Results were slightly incorrect for models with a hard core, due to a mathematical error. \bugger{1.51-0}{may 2017}{1.55-1}{april 2018} \item \texttt{Ops.msr}: If the input data contained a pixel image of the smoothed density, this image was not updated; it was copied to the output unchanged. Plots of the resulting measure were incorrect. \bugger{1.52-0}{august 2017}{1.55-1}{april 2018} \item \verb![.linnet!: in calculating \verb!L[W]! where \texttt{W} is a window, the code ignored segments of \code{L} that crossed \code{W} without having a vertex in \code{W}. \bugger{1.53-0}{september 2017}{1.55-1}{april 2015} \item \verb!as.im.function!: if the function domain was not a rectangle and the function values were categorical (factor) values, the result was an empty image. \bugger{1.42-0}{may 2015}{1.57-0}{october 2018} \end{itemize} %%% LEVEL 3 \subsection{Bugs, Substantially Incorrect, Moderate Impact} \begin{itemize} \item \texttt{as.linnet.psp}: Sometimes produced a network with duplicated segments. [Such objects can be repaired using \texttt{repairNetwork}.] \bugger{1.41-1}{february 2015}{1.62-0}{december 2019} \item \texttt{rlpp}: The resulting pattern was unmarked even when it should have been multitype. \bugger{1.48-0}{december 2016}{1.63-0}{january 2020} \item \texttt{spatialcdf}: Argument \texttt{weights} was ignored, unless it was a fitted model. {\small (Bug was present since about 2010. Fixed in \texttt{spatstat 1.59-0}, march 2019)} \item \texttt{ppp}: Points inside the window were erroneously rejected as lying outside the window, if the window was a polygon equivalent to a rectangle with sides longer than $10^6$ units. {\small (Bug was present since the beginning. Fixed in \texttt{spatstat 1.59-0}, march 2019)} \item \texttt{inside.owin}: All results were \texttt{FALSE} if the window was a polygon equivalent to a rectangle with sides longer than $10^6$ units. {\small (Bug was present since the beginning. Fixed in \texttt{spatstat 1.59-0}, march 2019)} \item \texttt{sumouter}: result was incorrect (all entries were zero) if \texttt{w} was missing and \texttt{y} was given. \bugger{1.47-0}{october 2016}{1.59-0}{march 2019} \item \texttt{simulate.dppm}, \texttt{simulate.detpointprocfamily}: In dimensions higher than 2, the result was shifted so that it was centred at the origin. \bugger{1.54-0}{december 2017}{1.55-0}{january 2018} \item \texttt{integral.msr}: If the result was a matrix, it was the transpose of the correct answer. \bugger{1.35-0}{december 2012}{1.55-1}{april 2018} \item \texttt{density.ppp}: Values of \verb!density(X, at="points")! and \verb!Smooth(X, at="points")! were sometimes incorrect, due to omission of the contribution from the data point with the smallest $x$ coordinate. \bugger{1.26-0}{april 2012}{1.46-1}{july 2016} \item \texttt{multiplicity.default}: The first occurrence of any value in the input was incorrectly assigned a multiplicity of 1. \bugger{1.32-0}{december 2013}{1.57-1}{november 2018} \item \texttt{update.ppm}: If the argument \texttt{Q} was given, the results were usually incorrect, or an error was generated. \bugger{1.38-0}{august 2014}{1.38-1}{august 2014} \item \texttt{subfits}: The interaction coefficients of the submodels were incorrect for Gibbs models with a multitype interaction (\texttt{MultiStrauss}, etc). \bugger{1.35-0}{december 2013}{1.45-2}{may 2016} \item \texttt{F3est}: Estimates of $F(r)$ for the largest value of $r$ were wildly incorrect. {\small (Bug was present since about 2010. Fixed in \texttt{spatstat 1.48-0}, december 2016)} \item \texttt{kppm}, \texttt{matclust.estpcf}, \texttt{pcfmodel}: The pair correlation function of the M\'atern Cluster Process was evaluated incorrectly at distances close to 0. This could have affected the fitted parameters in \texttt{matclust.estpcf()} or \texttt{kppm(clusters="MatClust")}. \bugger{1.20-2}{august 2010}{1.33-0}{september 2013} \item \texttt{ppm}: Results were incorrect for the Geyer saturation model with a non-integer value of the saturation parameter \texttt{sat}. \bugger{1.20-0}{july 2010}{1.31-2}{april 2013} \item \texttt{clip.infline}: Results were incorrect unless the midpoint of the window was the coordinate origin. \bugger{1.15-1}{april 2009}{1.48-0}{december 2016} \item \texttt{intensity.ppm}: Result was incorrect for Gibbs models if the model was exactly equivalent to a Poisson process (i.e. if all interaction coefficients were exactly zero). \bugger{1.28-1}{june 2012}{1.47-0}{october 2016} \item \texttt{idw}: Results were incorrect if \texttt{se=TRUE} and \verb!at="pixels"! and \texttt{power} was not equal to 2. The pixel values of \verb!$estimate! were all equal to zero. \bugger{1.58-0}{january 2019}{1.63-0}{january 2020} \item \texttt{funxy}: Did not correctly handle one-line functions. The resulting objects evaluated the wrong function in some cases. \bugger{1.45-0}{march 2016}{1.46-0}{july 2016} \item \texttt{kernel.moment}: Result was incorrect for \texttt{kernel="cosine"} and \texttt{kernel="optcosine"}. \bugger{1.45-2}{may 2016}{1.56-0}{june 2018} \item \verb![.msr!: Format was mangled if the subset contained exactly one quadrature point. \bugger{1.21-3}{january 2011}{1.56-0}{june 2018} \item \texttt{hyperframe}: Did not correctly handle date-time values (columns of class \texttt{"Date"}, etc). \bugger{1.19-1}{may 2010}{1.63-0}{january 2020} \item \texttt{tess}: If a list of tiles was given, and the tiles were pixel images or masks, their pixel resolutions were ignored, and reset to the default $128 \times 128$. {\small (Bug fixed in \texttt{spatstat 1.56-0}, june 2018)} \item \texttt{nnorient}: crashed if the point pattern was empty. \bugger{1.40-0}{december 2015}{1.57-0}{october 2018} \item \verb!as.im.data.frame!: Results were incorrect for factor-valued data. \bugger{1.45-2}{may 2016}{1.63-0}{january 2020} \end{itemize} %% LEVEL 4: \subsection{Partially Incorrect} \begin{itemize} \item \texttt{kppm}, \texttt{AIC}: For kppm models fitted with \verb!method='clik2'!, the resulting value of \texttt{logLik()} was equal to $1/2$ of the correct value. This would have affected model comparison using AIC, and model selection using \texttt{step}. \bugger{1.42-0}{may 2015}{1.63-0}{january 2020}. \item \texttt{edge.Ripley}, \texttt{Kest}, \texttt{Kinhom}: Isotropic correction weights for polygonal windows were sometimes incorrect for small radius \texttt{r} if the polygon contained many small segments or if the polygon was very long and thin. \bugger{1.60-0}{june 2019}{1.62-0}{december 2019}. \item \texttt{beachcolours}, \texttt{beachcolourmap}: The number of colours was not always equal to \texttt{ncolours}. \bugger{1.32-0}{august 2013}{1.59-0}{march 2019} \item \texttt{extractbranch.lpp}: Point pattern coordinates were sometimes erroneously set to \texttt{NA}. \bugger{1.42-0}{may 2015}{1.59-0}{march 2019} \item \texttt{rotmean}: When \texttt{result="im"} the resulting image did not have the same dimensions as the input. \bugger{1.42-2}{june 2015}{1.58-0}{january 2019} \item \texttt{quadratcount.ppp}: Sometimes issued an incorrect warning that data points were outside the tessellation, when \texttt{tess} was a tessellation represented by a pixel image. {\small (Bug fixed in \texttt{spatstat 1.59-0}, march 2019)} \item \texttt{quadrat.test}: the $p$-value was \texttt{NA} if one of the observed counts was zero, for the Cressie-Read tests with \texttt{CR} not equal to $1$ or $-1$. \bugger{1.38-0}{august 2014}{1.59-0}{march 2019} \item \texttt{quadrat.test}: argument \texttt{CR} was ignored if \texttt{method="MonteCarlo"}. \bugger{1.38-0}{august 2014}{1.61-0}{september 2019} \item \texttt{rotmean}: If argument \texttt{origin} was given, and if \texttt{result="im"} was specified, the resulting image was wrongly displaced. \bugger{1.42-2}{june 2015}{1.58-0}{january 2019} \item \texttt{runifpointx}: Result was mangled when \texttt{n=0} or \texttt{n=1}. \bugger{1.50-0}{march 2017}{1.58-0}{january 2019} \item \texttt{model.matrix.ppm}: The attribute \texttt{assign} was omitted in some cases. \bugger{1.45-1}{may 2016}{1.55-0}{january 2018} \item \texttt{model.matrix.mppm}: Sometimes returned a matrix with the wrong number of rows. \bugger{1.55-0}{january 2018}{1.63-0}{january 2020} \item \texttt{density.ppp}: If the smoothing bandwidth \texttt{sigma} was very small (e.g.\ less than the width of a pixel), results were inaccurate if the default resolution was used, and completely incorrect if a user-specified resolution was given. \bugger{1.26-0}{april 2012}{1.52-0}{august 2017} \item \texttt{selfcrossing.psp}: $y$ coordinate values were incorrect. \bugger{1.23-2}{august 2011}{1.25-3}{february 2012} \item \texttt{Geyer}: For point process models with the \texttt{Geyer} interaction, \texttt{vcov.ppm} and \texttt{suffstat} sometimes gave incorrect answers. \bugger{1.27-0}{may 2012}{1.30-0}{december 2012} \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}: Calculations were incorrect for a Geyer model fitted using an edge correction other than \texttt{"border"} or \texttt{"none"}. \bugger{1.25-0}{december 2011}{1.51-0}{may 2017} \item \texttt{leverage.ppm}, \texttt{influence.ppm}, \texttt{dfbetas.ppm}: Results were slightly incorrect for models fitted using the border correction. \bugger{1.25-0}{december 2011}{1.54-0}{november 2017} \item \texttt{leverage.ppm}: The mean leverage value (shown as a contour level in \texttt{plot.leverage.ppm}) was slightly incorrect for Gibbs models. \bugger{1.25-0}{december 2011}{1.54-0}{november 2017} \item \texttt{vcov.ppm}, \texttt{suffstat}: These functions sometimes gave incorrect values for marked point process models. \bugger{1.27-0}{may 2012}{1.29-0}{october 2012} \item \texttt{diagnose.ppm}: When applied to a model obtained from \texttt{subfits()}, in the default case (\texttt{oldstyle=FALSE}) the variance calculations were incorrect. Consequently the dotted lines representing significance bands were incorrect. An error or warning about negative variances occurred sometimes. However, calculations with \texttt{oldstyle=TRUE} were correct. The default has now been changed to \texttt{oldstyle=TRUE} for such models. \bugger{1.35-0}{december 2013}{1.45-0}{march 2016} \item \texttt{Smooth.ppp}: Results for \verb!at="points"! were garbled, for some values of \texttt{sigma}, if \texttt{X} had more than one column of marks. \bugger{1.38-0}{october 2014}{1.46-0}{july 2016} \item \texttt{linearK}, \texttt{linearKinhom}: If any data points were located exactly at a vertex of the linear network, the weights for Ang's correction were incorrect, due to numerical error. This sometimes produced infinite or NA values of the linear $K$ function. \bugger{1.23-0}{july 2011}{1.27-0}{may 2012} \item \texttt{Kinhom}, \texttt{Linhom}: the results were not renormalised (even if \texttt{renormalise=TRUE}) in some cases. \bugger{1.21-0}{december 2010}{1.37-0}{may 2014} \item \texttt{Kinhom}, \texttt{Linhom}: Ignored argument \texttt{reciplambda2} in some cases. \bugger{1.39-0}{october 2014}{1.40-0}{december 2014} \item \texttt{Kinhom}, \texttt{Linhom}: Calculations were incorrect if \texttt{lambda} was a fitted point process model. \bugger{1.38-0}{august 2014}{1.38-1}{august 2014} \item \texttt{integral.linim}, \texttt{integral.linfun}: \begin{itemize} \item results were inaccurate because of a bias in the distribution of sample points. \bugger{1.41-0}{february 2015}{1.47-0}{october 2016} \item results were inaccurate if many of the segment lengths were shorter than the width of a pixel. \bugger{1.41-0}{february 2015}{1.48-0}{december 2016} \item results were wildly inaccurate in some extreme cases where many segments were very short. \bugger{1.41-0}{february 2015}{1.54-0}{november 2017} \end{itemize} \item \texttt{predict.ppm}: Calculation of the conditional intensity omitted the edge correction if \texttt{correction='translate'} or \texttt{correction='periodic'}. \bugger{1.17-0}{october 2009}{1.31-3}{may 2013} \item \texttt{varblock}: Calculations were incorrect if more than one column of edge corrections was computed. \bugger{1.21-1}{november 2010}{1.39-0}{october 2014} \item \texttt{scan.test} Results were sometimes incorrect due to numerical instability (a 'Gibbs phenomenon'). \bugger{1.24-1}{october 2011}{1.26-1}{april 2012} \item \texttt{relrisk}: When \verb!at="pixels"!, a small fraction of pixel values were sometimes wildly inaccurate, due to numerical errors. This affected the range of values in the result, and therefore the appearance of plots. {\small (Bug fixed in \texttt{spatstat 1.40-0}, december 2014)} \item \texttt{predict.slrm}: Results of \texttt{predict(object, newdata)} were incorrect if the spatial domain of \texttt{newdata} was larger than the original domain. \bugger{1.21-0}{november 2010}{1.25-3}{february 2012} \item \texttt{Lest}: The variance approximations (Lotwick-Silverman and Ripley) obtained with \texttt{var.approx=TRUE} were incorrect for \texttt{Lest} (although they were correct for \texttt{Kest}) due to a coding error. \bugger{1.24-1}{october 2011}{1.24-2}{november 2011} \item \texttt{bw.diggle}: Bandwidth was too large by a factor of 2. \bugger{1.23-4}{september 2011}{1.23-5}{september 2011} \item pair correlation functions (\texttt{pcf.ppp}, \texttt{pcfdot}, \texttt{pcfcross} etc:) The result had a negative bias at the maximum $r$ value, because contributions to the pcf estimate from interpoint distances greater than \texttt{max(r)} were mistakenly omitted. {\small (Bugs fixed in \texttt{spatstat 1.35-0}, december 2013)} \item \texttt{Kest}, \texttt{Lest}: Gave incorrect values in very large datasets, due to numerical overflow. `Very large' typically means about 1 million points in a random pattern, or 100,000 points in a tightly clustered pattern. [Overflow cannot occur unless there are at least 46,341 points.] \item \texttt{bw.relrisk}: Implementation of \texttt{method="weightedleastsquares"} was incorrect and was equivalent to \texttt{method="leastsquares"}. \bugger{1.21-0}{november 2010}{1.23-4}{september 2011} \item \texttt{triangulate.owin}: Results were incorrect in some special cases. \bugger{1.42-2}{june 2015}{1.44-0}{december 2015} \item \texttt{crosspairs}: If \texttt{X} and \texttt{Y} were identical point patterns, the result was not necessarily symmetric (on some machines) due to numerical artifacts. \bugger{1.35-0}{december 2013}{1.44-0}{december 2015} \item \texttt{bdist.tiles}: Values were incorrect in some cases due to numerical error. {\small (Bug fixed in \texttt{spatstat 1.29-0}, october 2012)} \item \texttt{Kest.fft}: Result was incorrectly normalised. \bugger{1.21-2}{january 2011}{1.44-0}{december 2015} \item \texttt{crossdist.ppp}: Ignored argument \texttt{squared} if \texttt{periodic=FALSE}. {\small (Bug fixed in \texttt{spatstat 1.38-0}, july 2014)} \item polygon geometry: The point-in-polygon test gave the wrong answer in some boundary cases. {\small (Bug fixed in \texttt{spatstat 1.23-2}, august 2011)} \item \texttt{MultiStraussHard}: If a fitted model with \texttt{MultiStraussHard} interaction was invalid, \texttt{project.ppm} sometimes yielded a model that was still invalid. {\small (Bug fixed in \texttt{spatstat 1.42-0}, may 2015)} \item \texttt{pool.envelope}: Did not always respect the value of \texttt{use.theory}. \bugger{1.23-5}{september 2011}{1.43-0}{september 2015} \item \texttt{nncross.lpp}, \texttt{nnwhich.lpp}, \texttt{distfun.lpp}: Sometimes caused a segmentation fault. \bugger{1.44-0}{december 2015}{1.44-1}{december 2015} \item \texttt{anova.ppm}: If a single \texttt{object} was given, and it was a Gibbs model, then \texttt{adjust} was effectively set to \texttt{FALSE}. \bugger{1.39-0}{october 2014}{1.44-1}{december 2015} \item \verb![.linim!: the result sometimes had the wrong class. \bugger{1.53-0}{september 2017}{1.55-1}{april 2015} \item \verb![.linim!: factor values were erroneously converted to integers, in some cases. \bugger{1.53-0}{september 2017}{1.61-0}{september 2019} \item \verb!is.subset.owin!: sometimes gave the wrong result for polygonal windows due to numerical rounding error. {\small (Bug was always present. Fixed in \texttt{spatstat 1.59-0}, march 2019)} \item \texttt{plot.tess}: the legend showed the tile names in lexicographical order, rather than their original order. \bugger{1.55-1}{april 2018}{1.59-0}{march 2019} \item \texttt{rThomas}, \texttt{rMatClust}, \texttt{rCauchy}, \texttt{rVarGamma}: If the simulation window was not a rectangle, the attribute \texttt{Lambda} was a numeric vector, rather than a pixel image as intended. \bugger{1.43-0}{october 2015}{1.59-0}{march 2019} \item \texttt{effectfun}: In a multitype point process model, \texttt{effectfun} ignored any user-specified value of \texttt{marks}. \bugger{1.52-0}{august 2017}{1.61-0}{september 2019} \item \verb!"[<-.hyperframe"!: Some classes of objects were not handled correctly. \bugger{1.37-0}{may 2014}{1.61-0}{september 2019} \item \texttt{relrisk.ppp}: Crashed if there were more than 2 types of points and \texttt{method = "leastsquares"} or \texttt{method = "weightedleastsquares"}. \bugger{1.23-4}{september 2011}{1.63-0}{january 2020} \item \texttt{nncross.ppp}: Format of output was incorrect if \texttt{X} was an empty pattern. \bugger{1.56-0}{june 2018}{1.63-0}{january 2020} \item \texttt{rmh}, \texttt{rmh.default}: For a marked point process, the debugger did not display the marks. (The \texttt{rmh} debugger is invoked by calling \texttt{rmh} with \texttt{snoop=TRUE}). \bugger{1.31-1}{march 2013}{1.63-0}{january 2020} \end{itemize} \begin{thebibliography}{1} \bibitem{baddrubaturn15} A. Baddeley, E. Rubak, and R. Turner. \newblock {\em Spatial Point Patterns: Methodology and Applications with {{R}}}. \newblock Chapman \& Hall/CRC Press, 2015. \end{thebibliography} \end{document} spatstat/inst/doc/packagesizes.txt0000644000176200001440000002252313624161141017054 0ustar liggesusersdate version nhelpfiles nobjects ndatasets Rlines srclines "2001-08-08" "1.0-1" 109 196 0 706 1370 "2002-05-17" "1.1-3" 116 220 0 1140 1370 "2002-08-06" "1.2-1" 129 237 0 1786 1474 "2003-03-12" "1.3-1" 134 242 0 1955 1474 "2003-05-05" "1.3-2" 148 257 0 2024 1474 "2003-07-28" "1.3-3" 148 266 0 2034 1474 "2003-11-12" "1.3-4" 148 261 0 2033 1474 "2004-01-27" "1.4-3" 166 296 0 3641 1437 "2004-02-11" "1.4-4" 166 296 0 3641 1437 "2004-03-25" "1.4-5" 166 296 0 3646 1439 "2004-05-23" "1.4-6" 166 296 0 3689 1514 "2004-06-17" "1.5-1" 166 300 0 4255 1514 "2004-09-01" "1.5-3" 171 311 0 4636 1514 "2004-09-24" "1.5-4" 174 315 0 4642 1514 "2004-10-21" "1.5-5" 180 319 0 4686 1514 "2004-11-15" "1.5-6" 180 319 0 4686 1512 "2004-11-27" "1.5-7" 180 319 0 4687 1512 "2005-01-25" "1.5-8" 182 320 0 4770 1512 "2005-01-27" "1.5-9" 182 321 0 4805 1512 "2005-02-16" "1.5-10" 182 321 0 4805 1512 "2005-03-14" "1.6-1" 188 345 0 5597 1517 "2005-03-30" "1.6-2" 188 345 0 5600 1450 "2005-04-08" "1.6-3" 189 352 0 5715 1474 "2005-04-14" "1.6-4" 194 358 0 6056 1544 "2005-04-21" "1.6-5" 194 358 0 6056 1544 "2005-05-09" "1.6-6" 195 373 0 6385 1592 "2005-05-25" "1.6-7" 201 392 0 7727 1644 "2005-06-07" "1.6-8" 206 400 0 8003 1644 "2005-07-01" "1.6-9" 207 402 0 8025 1644 "2005-07-26" "1.7-11" 212 406 0 8213 1643 "2005-08-10" "1.7-12" 213 407 0 8279 1643 "2005-10-27" "1.7-13" 215 410 0 8531 1643 "2005-11-24" "1.8-1" 215 418 0 8539 1643 "2005-12-05" "1.8-2" 229 440 0 9031 1643 "2005-12-21" "1.8-3" 237 446 0 9175 1643 "2006-01-09" "1.8-4" 237 446 0 9207 1643 "2006-01-18" "1.8-5" 237 446 0 9225 1643 "2006-02-23" "1.8-6" 241 449 0 9315 1643 "2006-03-02" "1.8-7" 247 457 0 9627 1643 "2006-03-30" "1.8-8" 248 459 0 9662 1643 "2006-04-18" "1.8-9" 259 446 21 10144 1832 "2006-05-03" "1.9-0" 259 447 21 10396 1817 "2006-05-26" "1.9-1" 266 466 21 10861 3069 "2006-06-05" "1.9-2" 268 473 21 11409 3487 "2006-06-20" "1.9-3" 268 479 21 11941 4140 "2006-08-03" "1.9-4" 273 490 22 12435 5619 "2006-08-22" "1.9-5" 274 490 22 12493 5560 "2006-09-27" "1.9-6" 277 494 22 12573 5601 "2006-10-19" "1.10-1" 283 529 22 13124 5601 "2006-10-19" "1.10-1" 283 529 22 13124 5171 "2006-11-06" "1.10-2" 283 529 22 13194 5601 "2006-11-20" "1.10-3" 287 540 22 13425 5684 "2007-01-08" "1.10-4" 291 554 22 13591 5684 "2007-01-08" "1.10-4" 291 554 22 13591 5684 "2007-01-12" "1.11-0" 291 562 22 13728 5684 "2007-02-01" "1.11-1" 294 564 23 13614 5684 "2007-03-10" "1.11-2" 301 574 24 13860 5684 "2007-03-16" "1.11-3" 305 580 24 14106 5819 "2007-03-19" "1.11-4" 307 589 24 14316 5868 "2007-05-08" "1.11-5" 307 591 24 14373 5940 "2007-05-18" "1.11-6" 308 592 24 14390 5940 "2007-06-09" "1.11-7" 311 595 24 14506 5940 "2007-07-26" "1.11-8" 312 596 24 14552 6055 "2007-08-20" "1.12-0" 319 619 25 15246 6055 "2007-09-22" "1.12-1" 319 619 25 15250 6055 "2007-10-26" "1.12-2" 322 623 25 15684 6188 "2007-11-02" "1.12-3" 322 626 25 15767 6188 "2007-12-18" "1.12-4" 322 626 25 15814 6188 "2008-01-07" "1.12-5" 322 630 25 15891 6238 "2008-02-04" "1.12-6" 328 638 25 16334 6446 "2008-02-26" "1.12-8" 328 639 25 16405 6718 "2008-03-18" "1.12-9" 331 644 25 16606 6718 "2008-04-02" "1.12-10" 331 644 25 16649 6771 "2008-04-11" "1.13-0" 332 645 25 16753 6771 "2008-04-23" "1.13-1" 333 647 25 16812 6840 "2008-05-14" "1.13-2" 339 654 25 17057 6840 "2008-06-24" "1.13-3" 340 657 25 17182 6840 "2008-07-18" "1.13-4" 348 672 26 17527 6840 "2008-07-22" "1.14-0" 354 681 26 17923 7131 "2008-07-22" "1.14-1" 356 684 26 18052 7131 "2008-09-08" "1.14-2" 360 688 27 18087 7185 "2008-09-26" "1.14-3" 362 693 27 18194 7185 "2008-10-16" "1.14-4" 366 707 27 18427 7185 "2008-10-23" "1.14-5" 368 715 27 18493 7185 "2008-11-07" "1.14-6" 372 726 27 18657 7185 "2008-11-17" "1.14-7" 374 730 27 18671 7185 "2008-12-10" "1.14-8" 377 734 27 18766 7185 "2008-12-16" "1.14-9" 377 734 27 18772 7185 "2009-01-30" "1.14-10" 381 741 27 18949 7186 "2009-03-02" "1.15-0" 384 750 27 19212 7362 "2009-03-31" "1.15-1" 386 752 28 19292 7439 "2009-04-14" "1.15-2" 396 772 28 19880 7436 "2009-05-13" "1.15-3" 398 777 29 20141 7524 "2009-06-11" "1.15-4" 399 776 29 20176 7524 "2009-07-01" "1.16-0" 405 787 29 20774 7524 "2009-07-27" "1.16-1" 411 814 29 21433 7524 "2009-08-22" "1.16-2" 417 821 29 21863 7937 "2009-08-28" "1.16-3" 419 831 29 22060 7941 "2009-10-22" "1.17-0" 420 833 30 21881 8705 "2009-11-04" "1.17-1" 437 875 30 22900 10614 "2009-11-10" "1.17-2" 439 880 30 22943 10606 "2009-12-15" "1.17-3" 442 885 30 23193 10606 "2009-12-15" "1.17-4" 445 890 30 23640 10606 "2010-01-06" "1.17-5" 451 906 30 24283 12003 "2010-02-08" "1.17-6" 456 921 30 24795 12003 "2010-03-10" "1.18-0" 459 931 30 25073 12333 "2010-03-19" "1.18-1" 462 945 30 25464 12439 "2010-04-09" "1.18-2" 463 950 30 25631 12475 "2010-04-19" "1.18-3" 464 953 30 25720 12475 "2010-05-02" "1.18-4" 475 980 30 26093 13417 "2010-05-07" "1.18-5" 475 981 30 26117 13417 "2010-05-14" "1.19-0" 476 982 30 26205 13417 "2010-05-22" "1.19-1" 479 984 31 26286 13556 "2010-06-09" "1.19-2" 481 996 31 26653 13667 "2010-06-16" "1.19-3" 483 1003 31 26733 13667 "2010-07-15" "1.20-0" 483 1017 31 26926 14009 "2010-07-26" "1.20-1" 484 1020 31 27107 14263 "2010-08-10" "1.20-2" 489 1028 31 27728 14466 "2010-08-23" "1.20-3" 489 1033 31 27869 14564 "2010-10-21" "1.20-4" 493 1040 31 28237 14805 "2010-10-25" "1.20-5" 494 1043 31 28377 15160 "2010-11-05" "1.21-0" 504 1067 31 41301 15160 "2010-11-11" "1.21-1" 507 1075 31 41714 15554 "2011-01-17" "1.21-3" 515 1103 31 42975 15747 "2011-01-20" "1.21-4" 515 1103 31 42985 15747 "2011-02-10" "1.21-5" 515 1103 31 43037 15747 "2011-04-25" "1.21-6" 517 1107 31 43211 15747 "2011-04-28" "1.22-0" 526 1148 32 44006 15831 "2011-05-19" "1.22-1" 528 1154 32 44235 15820 "2011-06-13" "1.22-2" 537 1188 32 45006 16282 "2011-06-17" "1.22-3" 539 1197 32 45153 16269 "2011-07-07" "1.22-4" 550 1218 33 46696 16269 "2011-07-24" "1.23-0" 562 1244 34 47694 16496 "2011-08-01" "1.23-1" 564 1252 34 48014 16658 "2011-08-11" "1.23-2" 566 1260 34 48313 17035 "2011-08-12" "1.23-3" 566 1260 34 48319 17035 "2011-09-09" "1.23-4" 571 1269 34 48747 17243 "2011-09-23" "1.23-5" 575 1274 34 49128 17141 "2011-10-11" "1.23-6" 579 1286 34 49508 17141 "2011-10-22" "1.24-1" 585 1308 34 50154 17141 "2011-11-11" "1.24-2" 588 1312 34 50604 17839 "2011-12-06" "1.25-0" 602 1334 34 52015 18351 "2011-12-21" "1.25-1" 609 1339 35 52235 19088 "2012-01-19" "1.25-2" 610 1338 35 52774 19120 "2012-02-05" "1.25-3" 613 1345 35 53004 19120 "2012-02-29" "1.25-4" 614 1347 35 53302 19423 "2012-03-14" "1.25-5" 616 1351 35 53720 19506 "2012-04-08" "1.26-0" 616 1356 35 53816 19169 "2012-04-19" "1.26-1" 617 1358 35 54498 19261 "2012-05-16" "1.27-0" 630 1393 35 55787 19363 "2012-06-11" "1.28-0" 632 1417 35 56384 19363 "2012-08-23" "1.28-2" 640 1438 36 58566 19372 "2012-10-14" "1.29-0" 651 1470 36 59711 19457 "2012-12-23" "1.30-0" 666 1499 41 61344 19806 "2013-01-17" "1.31-0" 668 1507 41 61446 20094 "2013-03-01" "1.31-1" 678 1562 41 63783 20536 "2013-04-25" "1.31-2" 682 1581 41 64501 21117 "2013-05-27" "1.31-3" 685 1600 41 65545 21773 "2013-08-13" "1.32-0" 695 1625 41 67120 22151 "2013-09-05" "1.33-0" 701 1630 43 67397 22218 "2013-10-24" "1.34-0" 720 1666 43 69219 22867 "2013-11-03" "1.34-1" 720 1666 43 69180 23340 "2013-12-12" "1.35-0" 745 1717 47 72110 23491 "2014-02-18" "1.36-0" 757 1753 47 73946 24042 "2014-05-09" "1.37-0" 781 1841 47 77585 24633 "2014-08-15" "1.38-0" 803 1963 48 80709 25191 "2014-08-27" "1.38-1" 803 1965 48 80833 25191 "2014-10-23" "1.39-0" 824 2015 49 82274 25554 "2014-10-24" "1.39-1" 824 2015 49 81990 25554 "2014-12-31" "1.40-0" 839 2071 51 85832 25637 "2015-02-26" "1.41-0" 861 2135 53 88407 25650 "2015-02-27" "1.41-1" 861 2135 53 88407 25650 "2015-05-27" "1.42-0" 888 2222 53 91600 25650 "2015-06-05" "1.42-1" 888 2225 53 91658 25650 "2015-06-28" "1.42-2" 890 2232 53 91985 25650 "2015-10-07" "1.43-0" 939 2342 54 95950 25802 "2015-12-22" "1.44-0" 949 2378 54 97522 27569 "2015-12-29" "1.44-1" 951 2385 54 97745 27569 "2016-03-10" "1.45-0" 961 2456 54 100964 28122 "2016-05-08" "1.45-1" 977 2478 54 101981 28124 "2016-05-09" "1.45-2" 977 2478 54 101981 28124 "2016-07-06" "1.46-0" 981 2490 54 102484 28310 "2016-07-08" "1.46-1" 981 2491 54 102573 28310 "2016-10-12" "1.47-0" 988 2533 54 103848 28679 "2016-12-22" "1.48-0" 1017 2611 54 105733 29466 "2017-02-08" "1.49-0" 1024 2629 54 106522 31029 "2017-02-08" "1.49-0" 1024 2629 54 106522 31029 "2017-03-22" "1.50-0" 1025 2476 54 104021 29413 "2017-05-04" "1.51-0" 1029 2501 54 105229 29430 "2017-08-10" "1.52-0" 1035 2518 54 106162 29416 "2017-08-16" "1.52-1" 1035 2518 54 106170 29416 "2017-09-23" "1.53-0" 984 2525 0 106672 29418 "2017-09-28" "1.53-1" 984 2525 0 106675 29418 "2017-10-08" "1.53-2" 984 2526 0 106797 29418 "2017-11-21" "1.54-0" 986 2544 0 107420 29488 "2018-01-29" "1.55-0" 988 2536 0 108015 29488 "2018-04-05" "1.55-1" 990 2545 0 109017 29769 "2018-06-15" "1.56-0" 999 2574 0 109767 30024 "2018-07-27" "1.56-1" 999 2577 0 109857 30024 "2018-10-30" "1.57-0" 1001 2584 0 110444 29954 "2018-11-03" "1.57-1" 1001 2584 0 110459 29954 "2019-01-09" "1.58-0" 1002 2585 0 110702 30470 "2019-01-10" "1.58-1" 1002 2585 0 110702 30470 "2019-01-10" "1.58-2" 1003 2586 0 110732 30470 "2019-03-22" "1.59-0" 1010 2609 0 112044 30729 "2019-06-23" "1.60-0" 1017 2628 0 113056 31026 "2019-09-12" "1.61-0" 1022 2638 0 113652 31027 "2019-12-08" "1.62-0" 1032 2668 0 114943 31548 "2019-12-08" "1.62-1" 1032 2668 0 114943 31548 "2019-12-10" "1.62-2" 1032 2668 0 114956 31548 "2020-01-23" "1.63-0" 1033 2666 0 115171 31549 "2020-02-21" "1.63-1" 1035 2670 0 115401 31553 "2020-02-22" "1.63-2" 1035 2670 0 115401 31553 spatstat/inst/doc/datasets.Rnw0000644000176200001440000006437413417031501016150 0ustar liggesusers\documentclass[11pt]{article} % \VignetteIndexEntry{Datasets Provided for the Spatstat Package} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \usepackage{graphicx} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \newcommand{\sdat}{\pkg{spatstat.data}} \newcommand{\Sdat}{\pkg{Spatstat.data}} \begin{document} \bibliographystyle{plain} \thispagestyle{empty} \SweaveOpts{eps=TRUE} \setkeys{Gin}{width=0.6\textwidth} <>= library(spatstat) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") spatstat.options(transparent=FALSE) options(useFancyQuotes=FALSE) @ \title{Datasets provided for \spst} \author{Adrian Baddeley, Rolf Turner and Ege Rubak} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle This document is an overview of the spatial datasets that are provided for the \spst\ package. To flick through a nice display of all the data sets that come with \spst\ type \texttt{demo(data)}. To see information about a given data set, type \texttt{help({\em name})} where \emph{name} is the name of the data set. To plot a given data set, type \texttt{plot({\em name})}. Datasets in \spst\ are ``lazy-loaded'', which means that they can be accessed simply by typing their name. Not all packages do this; in some packages you have to type \texttt{data({\em name})} in order to access a data set. To list all the datasets in \spst, you need to type \texttt{data(package="spatstat.data")}. This is because, for efficiency, the datasets are actually installed in a sub-package \sdat. This is the only time you should ever need to mention \sdat\ explicitly. When the \spst\ package is loaded by the command \texttt{library(spatstat)}, the sub-package \sdat\ is automatically loaded. \section{List of datasets} \subsection{Point patterns in 2D} Here is a list of the standard point pattern data sets that are supplied with the current installation of \sdat: \newcommand{\recto}{\framebox{\hphantom{re}\vphantom{re}}} \newcommand{\irregpoly}{\includegraphics*[width=6mm]{irregpoly}} \newcommand{\convpoly}{\includegraphics*[width=4mm]{hexagon}} \newcommand{\disc}{$\bigcirc$} \newcommand{\nomarks}{$\cdot$} \newcommand{\nocov}{$\cdot$} \begin{tabular}{l|l|ccc} {\sf name} & {\sf description} & {\sf marks} & {\sf covariates} & {\sf window} \\ \hline {\tt amacrine} & rabbit amacrine cells & cell type & \nocov & \recto \\ {\tt anemones} & sea anemones & diameter & \nocov & \recto \\ {\tt ants} & ant nests& species & zones & \convpoly \\ {\tt bdspots} & breakdown spots & \nomarks & \nocov & \disc \\ {\tt bei} & rainforest trees & \nomarks & topography & \recto \\ {\tt betacells} & cat retinal ganglia & cell type, area & \nocov & \recto \\ {\tt bramblecanes} & bramble canes & age & \nocov & \recto \\ {\tt bronzefilter} & bronze particles & diameter & \nocov & \recto \\ {\tt cells} & biological cells & \nomarks &\nocov & \recto \\ {\tt chorley} & cancers & case/control &\nocov & \irregpoly \\ {\tt clmfires} & forest fires & cause, size, date & \shortstack[c]{elevation, orientation,\\ slope, land use} & \irregpoly \\ {\tt copper} & copper deposits & \nomarks & fault lines & \recto \\ {\tt demopat} & artificial data & type & \nocov & \irregpoly \\ {\tt finpines} & trees & diam, height & \nocov & \recto \\ {\tt gordon} & people in a park & \nomarks & \nocov & \irregpoly \\ {\tt gorillas} & gorilla nest sites & group, season & \shortstack[c]{terrain, vegetation,\\ heat, water} & \irregpoly \\ {\tt hamster} & hamster tumour cells & cell type &\nocov & \recto \\ {\tt humberside} & child leukaemia & case/control & \nocov & \irregpoly\\ {\tt hyytiala} & mixed forest & species &\nocov & \recto \\ {\tt japanesepines} & Japanese pines & \nomarks &\nocov & \recto \\ {\tt lansing} & mixed forest & species & \nocov & \recto \\ {\tt longleaf} & trees & diameter & \nocov & \recto \\ {\tt mucosa} & gastric mucosa cells & cell type & \nocov & \recto \\ {\tt murchison} & gold deposits & \nomarks & faults, rock type & \irregpoly \\ {\tt nbfires} & wildfires & several & \nocov & \irregpoly \\ {\tt nztrees} & trees & \nomarks & \nocov & \recto \\ {\tt paracou} & trees & adult/juvenile & \nocov & \recto \\ {\tt ponderosa} & trees & \nomarks & \nocov & \recto \\ {\tt redwood} & saplings & \nomarks & \nocov & \recto \\ {\tt redwood3} & saplings & \nomarks & \nocov & \recto \\ {\tt redwoodfull} & saplings & \nomarks & zones & \recto \\ {\tt shapley} & galaxies & magnitude, recession, SE & \nocov & \convpoly \\ {\tt simdat} & simulated pattern & \nomarks & \nocov & \recto \\ {\tt sporophores} & fungi & species & \nocov & \disc \\ {\tt spruces} & trees & diameter & \nocov & \recto \\ {\tt swedishpines} & trees & \nomarks & \nocov & \recto \\ {\tt urkiola} & mixed forest & species & \nocov & \irregpoly \\ {\tt vesicles} & synaptic vesicles & \nomarks & zones & \irregpoly \\ {\tt waka} & trees & diameter & \nocov & \recto \\ \hline \end{tabular} \bigskip \noindent The shape of the window containing the point pattern is indicated by the symbols \recto\ (rectangle), \disc\ (disc), \convpoly\ (convex polygon) and \irregpoly\ (irregular polygon). Additional information about the data set \texttt{\em name} may be stored in a separate list \texttt{{\em name}.extra}. Currently these are the available options: \begin{tabular}[!h]{ll} {\sc Name} & {\sc Contents} \\ \hline {\tt ants.extra} & field and scrub subregions; \\ & additional map elements; plotting function \\ {\tt bei.extra} & covariate images \\ {\tt chorley.extra} & incinerator location; plotting function \\ {\tt gorillas.extra} & covariate images\\ {\tt nbfires.extra} & inscribed rectangle; border type labels \\ {\tt ponderosa.extra} & data points of interest; plotting function\\ {\tt redwoodfull.extra} & subregions; plotting function \\ {\tt shapley.extra} & individual survey fields; plotting function \\ {\tt vesicles.extra} & anatomical regions \\ \hline \end{tabular} For demonstration and instruction purposes, raw data files are available for the datasets \texttt{vesicles}, \texttt{gorillas} and \texttt{osteo}. \subsection{Other Data Types} There are also the following spatial data sets which are not 2D point patterns: \begin{tabular}[c]{l|l|l} {\sf name} & {\sf description} & {\sf format} \\ \hline {\tt austates} & Australian states & tessellation \\ {\tt cetaceans} & marine survey & replicated 2D point patterns \\ {\tt chicago} & crimes & point pattern on linear network \\ {\tt demohyper} & simulated data & replicated 2D point patterns with covariates\\ {\tt dendrite} & dendritic spines & point pattern on linear network \\ {\tt flu} & virus proteins & replicated 2D point patterns \\ {\tt heather} & heather mosaic & binary image (three versions) \\ {\tt osteo} & osteocyte lacunae & replicated 3D point patterns with covariates\\ {\tt pyramidal} & pyramidal neurons & replicated 2D point patterns in 3 groups\\ {\tt residualspaper} & data \& code from Baddeley et al (2005) & 2D point patterns, \R\ function \\ {\tt simba} & simulated data & replicated 2D point patterns in 2 groups\\ {\tt spiders} & spider webs & point pattern on linear network \\ {\tt waterstriders} & insects on water & replicated 2D point patterns\\ \hline \end{tabular} Additionally there is a dataset \texttt{Kovesi} containing several colour maps with perceptually uniform contrast. \section{Information on each dataset} Here we give basic information about each dataset. For further information, consult the help file for the particular dataset. <>= opa <- par() ## How to set all margins to zero and eliminate all outer spaces zeromargins <- function() { par( mar=rep(0,4), omd=c(0,1,0,1), xaxs="i", yaxs="i" ) invisible(NULL) } ## Set 'mar' setmargins <- function(...) { x <- c(...) x <- rep(x, 4)[1:4] par(mar=x) invisible(NULL) } @ \subsubsection*{\texttt{amacrine}: Amacrine cells} Locations of displaced amacrine cells in the retina of a rabbit. There are two types of points, ``on'' and ``off''. \SweaveOpts{width=5.5,height=3}\setkeys{Gin}{width=0.8\textwidth} <>= plot(amacrine) @ <>= setmargins(0,1,2,0) plot(amacrine) @ \subsubsection*{\texttt{anemones}: Sea Anemones} These data give the spatial locations and diameters of sea anemones on a boulder near sea level. \SweaveOpts{width=7,height=4.5}\setkeys{Gin}{width=0.8\textwidth} <>= plot(anemones, markscale=1) @ <>= setmargins(0,0,2,0) plot(anemones, markscale=1) @ \subsubsection*{\texttt{ants}: Ants' nests} Spatial locations of nests of two species of ants at a site in Greece. The full dataset (supplied here) has an irregular polygonal boundary, while most analyses have been confined to two rectangular subsets of the pattern (also supplied here). % Parameters for Ants data with key at right \SweaveOpts{width=6.3,height=4}\setkeys{Gin}{width=0.7\textwidth} <>= ants.extra$plotit() @ %$ <>= setmargins(0,0,1,0) ants.extra$plotit() @ %$ \subsubsection*{\texttt{austates}: Australian states} The states and large mainland territories of Australia are represented as polygonal regions forming a tessellation. <>= plot(austates) @ \subsubsection*{\texttt{bdspots}: Breakdown spots} A list of three point patterns, each giving the locations of electrical breakdown spots on a circular electrode in a microelectronic capacitor. \SweaveOpts{width=12,height=6}\setkeys{Gin}{width=\textwidth} <>= plot(bdspots, equal.scales=TRUE, pch="+", panel.args=function(i)list(cex=c(0.15, 0.2, 0.7)[i])) @ <>= zeromargins() plot(bdspots, equal.scales=TRUE, pch="+", main="", mar.panel=0, hsep=1, panel.args=function(i)list(cex=c(0.15, 0.2, 0.7)[i])) @ \subsubsection*{\texttt{bei}: Beilschmiedia data} Locations of 3605 trees in a tropical rain forest. Accompanied by covariate data giving the elevation (altitude) and slope of elevation in the study region. \SweaveOpts{width=12,height=6}\setkeys{Gin}{width=0.8\textwidth} <>= plot(bei.extra$elev, main="Beilschmiedia") plot(bei, add=TRUE, pch=16, cex=0.3) @ <>= setmargins(0,0,2,0) plot(bei.extra$elev, main="Beilschmiedia") plot(bei, add=TRUE, pch=16, cex=0.3) @ <>= M <- persp(bei.extra$elev, theta=-45, phi=18, expand=7, border=NA, apron=TRUE, shade=0.3, box=FALSE, visible=TRUE, main="") perspPoints(bei, Z=bei.extra$elev, M=M, pch=16, cex=0.3) @ \subsubsection*{\texttt{betacells}: Beta ganglion cells} Locations of beta ganglion cells in cat retina, each cell classified as `on' or `off' and also labelled with the cell profile area. <>= plot(betacells) @ \subsubsection*{\texttt{bramblecanes}: Bramble canes} <>= plot(bramblecanes, cols=1:3) @ <>= plot(split(bramblecanes)) @ \subsubsection*{\texttt{bronzefilter}: Bronze filter section profiles} Spatially inhomogeneous pattern of circular section profiles of particles, observed in a longitudinal plane section through a gradient sinter filter made from bronze powder. <>= plot(bronzefilter,markscale=2) @ \subsubsection*{\texttt{cells}: Biological cells} Locations of the centres of 42 biological cells observed under optical microscopy in a histological section. Often used as a demonstration example. <>= plot(cells) @ \subsubsection*{\texttt{cetaceans}: Survey of marine species} Recorded sightings of whales, dolphins and other marine species in a series of surveys. Replicated 2D marked point patterns. <>= plot(cetaceans.extra$patterns, main="Cetaceans data", cols=1:5, hsep=1) @ \subsubsection*{\texttt{chicago}: Chicago crimes} Locations (street addresses) of crimes reported in a two-week period in an area close to the University of Chicago. A multitype point pattern on a linear network. <>= plot(chicago, main="Chicago Crimes", col="grey", cols=c("red", "blue", "black", "blue", "red", "blue", "blue"), chars=c(16,2,22,17,24,15,6), leg.side="left", show.window=FALSE) @ \subsubsection*{\texttt{chorley}: Chorley-Ribble cancer data} Spatial locations of cases of cancer of the larynx and cancer of the lung, and the location of a disused industrial incinerator. A marked point pattern, with an irregular window and a simple covariate. <>= chorley.extra$plotit() @ %$ \subsubsection*{\texttt{clmfires}: Castilla-La Mancha Fires} Forest fires in the Castilla-La Mancha region of Spain between 1998 and 2007. A point pattern with 4 columns of marks: \begin{tabular}{ll} \texttt{cause} & cause of fire\\ \texttt{burnt.area} & total area burned, in hectares \\ \texttt{date} & date of fire \\ \texttt{julian.date} & date of fire in days since 1.1.1998 \end{tabular} <>= plot(clmfires, which.marks="cause", cols=2:5, cex=0.25, main="Castilla-La Mancha forest fires") @ The accompanying dataset \texttt{clmfires.extra} is a list of two items \texttt{clmcov100} and \texttt{clmcov200} containing covariate information for the entire Castilla-La Mancha region. Each of these two elements is a list of four pixel images named \texttt{elevation}, \texttt{orientation}, \texttt{slope} and \texttt{landuse}. <>= plot(clmfires.extra$clmcov200, main="Covariates for forest fires") @ %$ \subsubsection*{\texttt{copper}: Queensland copper data} These data come from an intensive geological survey in central Queensland, Australia. They consist of 67 points representing copper ore deposits, and 146 line segments representing geological `lineaments', mostly faults. <>= plot(copper$Points, main="Copper") plot(copper$Lines, add=TRUE) @ \subsubsection*{\texttt{demohyper}} A synthetic example of a \texttt{hyperframe} for demonstration purposes. <>= plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) }), parargs=list(mar=rep(1,4))) @ \subsubsection*{\texttt{demopat}} A synthetic example of a point pattern for demonstration purposes. <>= plot(demopat) @ \subsubsection*{\texttt{dendrite}} Dendrites are branching filaments which extend from the main body of a neuron (nerve cell) to propagate electrochemical signals. Spines are small protrusions on the dendrites. This dataset gives the locations of 566 spines observed on one branch of the dendritic tree of a rat neuron. The spines are classified according to their shape into three types: mushroom, stubby or thin. <>= plot(dendrite, leg.side="bottom", main="", cex=0.75, cols=2:4) @ \subsubsection*{\texttt{finpines}: Finnish pine saplings} Locations of 126 pine saplings in a Finnish forest, their heights and their diameters. <>= plot(finpines, main="Finnish pines") @ \subsubsection*{\texttt{flu}: Influenza virus proteins} The \texttt{flu} dataset contains replicated spatial point patterns giving the locations of two different virus proteins on the membranes of cells infected with influenza virus. It is a \texttt{hyperframe} containing point patterns and explanatory variables. <>= wildM1 <- with(flu, virustype == "wt" & stain == "M2-M1") plot(flu[wildM1, 1, drop=TRUE], main=c("flu data", "wild type virus, M2-M1 stain"), chars=c(16,3), cex=0.4, cols=2:3) @ \subsubsection*{\texttt{gordon}: People in Gordon Square} Locations of people sitting on a grass patch on a sunny afternoon. <>= plot(gordon, main="People in Gordon Square", pch=16) @ \subsubsection*{\texttt{gorillas}: Gorilla nesting sites} Locations of nesting sites of gorillas, and associated covariates, in a National Park in Cameroon. \texttt{gorillas} is a marked point pattern (object of class \texttt{"ppp"}) representing nest site locations. \texttt{gorillas.extra} is a named list of 7 pixel images (objects of class \texttt{"im"}) containing spatial covariates. It also belongs to the class \texttt{"listof"}. <>= plot(gorillas, which.marks=1, chars=c(1,3), cols=2:3, main="Gorilla nest sites") @ The \texttt{vegetation} covariate is also available as a raw ASCII format file, <>= system.file("rawdata/gorillas/vegetation.asc", package="spatstat") @ \subsubsection*{\texttt{hamster}: Hamster kidney cells} Cell nuclei in hamster kidney, each nucleus classified as either `dividing' or `pyknotic'. A multitype point pattern. <>= plot(hamster, cols=c(2,4)) @ \subsubsection*{\texttt{heather}: Heather mosaic} The spatial mosaic of vegetation of the heather plant, recorded in a 10 by 20 metre sampling plot in Sweden. A list with three entries, representing the same data at different spatial resolutions. <>= plot(heather) @ \subsubsection*{\texttt{humberside}: Childhood Leukemia and Lymphoma} Spatial locations of cases of childhood leukaemia and lymphoma, and randomly-selected controls, in North Humberside. A marked point pattern. <>= plot(humberside) @ The dataset \texttt{humberside.convex} is an object of the same format, representing the same point pattern data, but contained in a larger, 5-sided convex polygon. \subsubsection*{\texttt{hyytiala}: Mixed forest} Spatial locations and species classification for trees in a Finnish forest. <>= plot(hyytiala, cols=2:5) @ \subsubsection*{\texttt{japanesepines}: Japanese black pine saplings} Locations of Japanese black pine saplings in a square sampling region in a natural forest. Often used as a standard example. <>= plot(japanesepines) @ \subsubsection*{\texttt{lansing}: Lansing Woods} Locations and botanical classification of trees in a forest. A multitype point pattern with 6 different types of points. Includes duplicated points. <>= plot(lansing) @ <>= plot(split(lansing)) @ \subsubsection*{\texttt{longleaf}: Longleaf Pines} Locations and diameters of Longleaf pine trees. <>= plot(longleaf) @ \subsubsection*{\texttt{mucosa}: Gastric Mucosa Cells} A bivariate inhomogeneous point pattern, giving the locations of the centres of two types of cells in a cross-section of the gastric mucosa of a rat. <>= plot(mucosa, chars=c(1,3), cols=c("red", "green")) plot(mucosa.subwin, add=TRUE, lty=3) @ \subsubsection*{\texttt{murchison}: Murchison Gold Deposits} Spatial locations of gold deposits and associated geological features in the Murchison area of Western Australia. A list of three elements: \begin{itemize} \item \texttt{gold}, the point pattern of gold deposits; \item \texttt{faults}, the line segment pattern of geological faults; \item \texttt{greenstone}, the subregion of greenstone outcrop. \end{itemize} <>= plot(murchison$greenstone, main="Murchison data", col="lightgreen") plot(murchison$gold, add=TRUE, pch=3, col="blue") plot(murchison$faults, add=TRUE, col="red") @ \subsubsection*{\texttt{nbfires}: New Brunswick Fires} Fires in New Brunswick (Canada) with marks giving information about each fire. <>= plot(nbfires, use.marks=FALSE, pch=".") @ <>= plot(split(nbfires), use.marks=FALSE, chars=".") @ <>= par(mar=c(0,0,2,0)) plot(split(nbfires)$"2000", which.marks="fire.type", main=c("New Brunswick fires 2000", "by fire type"), cols=c("blue", "green", "red", "cyan"), leg.side="left") @ \subsubsection*{\texttt{nztrees}: New Zealand Trees} Locations of trees in a forest plot in New Zealand. Often used as a demonstration example. <>= plot(nztrees) plot(trim.rectangle(as.owin(nztrees), c(0,5), 0), add=TRUE, lty=3) @ \subsubsection*{\texttt{osteo}: Osteocyte Lacunae} Replicated three-dimensional point patterns: the three-dimensional locations of osteocyte lacunae observed in rectangular volumes of solid bone using a confocal microscope. A \texttt{hyperframe} containing 3D point patterns and explanatory variables. <>= plot(osteo[1:10,], main.panel="", pch=21, bg='white') @ For demonstration and instruction purposes, the raw data from the 36th point pattern are available in a plain ascii file in the \texttt{spatstat} installation, <>= system.file("rawdata/osteo/osteo36.txt", package="spatstat") @ \subsubsection*{\texttt{paracou}: Kimboto trees} Point pattern of adult and juvenile Kimboto trees recorded at Paracou in French Guiana. A bivariate point pattern. <>= plot(paracou, cols=2:3, chars=c(16,3)) @ \subsubsection*{\texttt{ponderosa}: Ponderosa Pines} Locations of Ponderosa Pine trees in a forest. Several special points are identified. <>= ponderosa.extra$plotit() @ %$ \subsubsection*{\texttt{pyramidal}: Pyramidal Neurons in Brain} Locations of pyramidal neurons in sections of human brain. There is one point pattern from each of 31 human subjects. The subjects are divided into three groups: controls (12 subjects), schizoaffective (9 subjects) and schizophrenic (10 subjects). <>= pyr <- pyramidal pyr$grp <- abbreviate(pyramidal$group, minlength=7) plot(pyr, quote(plot(Neurons, pch=16, main=grp)), main="Pyramidal Neurons") @ \subsubsection*{\texttt{redwood}, \texttt{redwood3}, \texttt{redwoodfull}: Redwood seedlings and saplings} California Redwood seedlings and saplings in a forest. There are two versions of this dataset: \texttt{redwood} and \texttt{redwoodfull}. The \texttt{redwoodfull} dataset is the full data. It is spatially inhomogeneous in density and spacing of points. The \texttt{redwood} dataset is a subset of the full data, selected because it is apparently homogeneous, and has often been used as a demonstration example. This comes in two versions commonly used in the literature: \texttt{redwood} (coordinates given to 2 decimal places) and \texttt{redwood3} (coordinates given to 3 decimal places). <>= plot(redwood) plot(redwood3, add=TRUE, pch=20) @ <>= redwoodfull.extra$plotit() @ %$ \subsubsection*{\texttt{residualspaper}: Data from residuals paper} Contains the point patterns used as examples in \begin{quote} A. Baddeley, R. Turner, J. M{\o}ller and M. Hazelton (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \textbf{67}, 617--666 \end{quote} along with {\sf R} code. <>= plot(as.solist(residualspaper[c("Fig1", "Fig4a", "Fig4b", "Fig4c")]), main="") @ \subsubsection*{\texttt{shapley}: Shapley Galaxy Concentration} Sky positions of 4215 galaxies in the Shapley Supercluster (mapped by radioastronomy). <>= shapley.extra$plotit(main="Shapley") @ %$ \subsubsection*{\texttt{simdat}: Simulated data} Another simulated dataset used for demonstration purposes. <>= plot(simdat) @ \subsubsection*{\texttt{spiders}: Spider webs} Spider webs across the mortar lines of a brick wall. A point pattern on a linear network. <>= plot(spiders, pch=16, show.window=FALSE) @ \subsubsection*{\texttt{sporophores}: Sporophores} Sporophores of three species of fungi around a tree. <>= plot(sporophores, chars=c(16,1,2), cex=0.6) points(0,0,pch=16, cex=2) text(15,8,"Tree", cex=0.75) @ \subsubsection*{\texttt{spruces}: Spruces in Saxony} Locations of Norwegian spruce trees in a natural forest stand in Saxonia, Germany. Each tree is marked with its diameter at breast height. <>= plot(spruces, maxsize=min(nndist(spruces))) @ \subsubsection*{\texttt{swedishpines}: Swedish Pines} Locations of pine saplings in a Swedish forest. Often used as a demonstration example. <>= plot(swedishpines) @ \subsubsection*{\texttt{urkiola}: trees in a wood} Locations of birch and oak trees in a secondary wood in Urkiola Natural Park (Basque country, northern Spain). Irregular window, bivariate point pattern. <>= plot(urkiola, cex=0.5, cols=2:3) @ \subsubsection*{\texttt{waka}: trees in Waka National Park} Spatial coordinates of each tree, marked by the tree diameter at breast height. <>= par(mar=c(0,0,2,0)) plot(waka, markscale=0.04, main=c("Waka national park", "tree diameters")) @ \subsubsection*{\texttt{vesicles}: synaptic vesicles} Point pattern of synaptic vesicles observed in rat brain tissue. <>= v <- rotate(vesicles, pi/2) ve <- lapply(vesicles.extra, rotate, pi/2) plot(v, main="Vesicles") plot(ve$activezone, add=TRUE, lwd=3) @ The auxiliary dataset \texttt{vesicles.extra} is a list with entries\\ \begin{tabular}{ll} \texttt{presynapse} & outer polygonal boundary of presynapse \\ \texttt{mitochondria} & polygonal boundary of mitochondria \\ \texttt{mask} & binary mask representation of vesicles window \\ \texttt{activezone} & line segment pattern representing the active zone. \end{tabular} For demonstration and training purposes, the raw data files for this dataset are also provided in the \pkg{spatstat} package installation:\\ \begin{tabular}{ll} \texttt{vesicles.txt} & spatial locations of vesicles \\ \texttt{presynapse.txt} & vertices of \texttt{presynapse} \\ \texttt{mitochondria.txt} & vertices of \texttt{mitochondria} \\ \texttt{vesiclesimage.tif} & greyscale microscope image \\ \texttt{vesiclesmask.tif} & binary image of \texttt{mask} \\ \texttt{activezone.txt} & coordinates of \texttt{activezone} \end{tabular} The files are in the folder \texttt{rawdata/vesicles} in the \texttt{spatstat} installation directory. The precise location of the files can be obtained using \texttt{system.file}, for example <>= system.file("rawdata/vesicles/mitochondria.txt", package="spatstat") @ \subsubsection*{\texttt{waterstriders}: Insects on a pond} Three independent replications of a point pattern formed by insects on the surface of a pond. <>= plot(waterstriders) @ \end{document} spatstat/inst/doc/shapefiles.pdf0000644000176200001440000025277613624161311016476 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 3718 /Filter /FlateDecode /N 81 /First 661 >> stream x[[s6~_m;Gvb>0mEWg~ %$ӗvvwd|BXIf,SL4:fJ,VX̬U,q:s?:ɈqP1njƍƭA21~5Q RJTP9jLQ*T71I@,bҠQLF D'TDgBsiNR3b>0(-31=1+yS!(Ԓ)p `J2`EYd ؃uaҨG,68Ơ:ˆf> 10cLeⶰeBRE"PDLw1!X>M5xP>,Rdž㧃ӽvظ(ظ(:ݨ46/G;+NgTopBC \ a# E7oUč]6HQ0b\W$#&Xp+ [irp]ꉻgXp$"#5Phkv оɂ|7';Fh9O,6UVpR[x.:=%L-!p̶fɪa>[PKT3\vU6{HuW= 5%շt\YRq$E}xcZnNƘaC~HZ%Up[?>͋e(f&#UsS`Ѵ:|u`; )0%e׉h/,7,Ӗ5 n[t^bX Nch놯Cy(sCӢ|o?~p 6Ll*oCN nkKoMI@hպD ]Q)m l_V\d-@f5 IwE@s}uH%[ݡɐɗK \^en9T~,Vܧv (cXPt/tTU)\P OQ.xCMN(W(4UƑ q {8P r.&wЫipy Kiصӱb't^4wd gm/Sס>jzgH0jè8ܓ^5幼R &ǎuݮnԭnkF[<<{{XӘcbZ-jj4ʖZǦ.r띤NB$2~'?a({m2ƭ~t诃h:MZ(ǜ>UbO uۈumv:,/'O~~k~[TF,QG fD_#fҥt[#jME|u?]>b$fha\b nlyiY=WqYW,7#l8z\Q62gތylT6,W 6^h6lJC>x㛳32OQH$n U_hi{.TFk5N {rdy =ny-|8:jo E} 4 4CU]H,6vlQ &M∾LϤ5C 7Û86/űu&kLb؅,>;z3QqcMj{}S:#ק>czО\̷h4WyUH_GlnRհ櫕2z[;.\Vzwnݷ3Lq+͡WM Qʡb`Z}vt ]:j~u~^;:!w鵚[:KB,*g锥Τb$O>'Eؒ9j;=EWhon?˼+{.a {SI!NWʭql ohRUY¨u*KS#OvBh4MoPQ{Cl5Wdia9vYqlh`yvdOIԕäJ33-5)V{?%w 󲈔C> stream GPL Ghostscript 9.26 2020-02-22T16:14:01+08:00 2020-02-22T16:14:01+08:00 LaTeX with hyperref package endstream endobj 84 0 obj << /Filter /FlateDecode /Length 5273 >> stream x\KsGr !8z _VB#<$DZ(Fꮬ:t`ˬj3wywtav?_Xuys5(;'gc]W;wɕwWfrV_K3ShZcR޼α֒=S+7vJ4`߿j+RkhGP0!M!=Bm<-# <5SvLM.̳c-z7HʛARO>'Hy?e~˽f}SSͶγ?hx.Ǹs 2:hS ~cu;kɮZJAfSJʤtHl`$[c,~AfIXX6h9 LpH}aM|.xΐt/lIuM{5-@r o{«q}YT00쏖L(83P-UQqMVwŷ`àj?@X1;guݨnx|E&mؚ K6ʖ|3yweC)}|Mj:6f뢝>kڤO0sy_.]JD=(5˚mrYNjiِ_@4Ƨj1USMm@5_Bu¨{<&t֡ rvXC"v#@X7l/]K~k3M}eT$eu?o ~2s@S?oRk:ظä6BTuR˙w}\5uZZsN1W@HEԚ|ͧͧC-;hXtИEPT]PۦQ_ -Iud[ x/=@UNiW˿iaO[(heJbVI@B3x8WlASm8<h(zw`@p`Fj*ڮ_LZ O.#8 ,Ux}SM& ڦ/ۮ)kۀyA¿l)4v46aAY:o #Ù҇YnK% r^ 4?YЦdW`Oj/2;K?րCu0pm|7 DW:nȦyb$GtetXta/\gO@ ) _ژ$\7So޼M` 00HetNo;4ύ9 Jnpܕ9i)\!O5ƃX@?*|;m85\eK33ZR&Л|ͧͧCMKDSZ#ڒڊ%X$ǀۂ\ vӕ3 l5qD~ $x|{IO25ֱ#.`w:& od;%j>Q0ъ xP Ł j2OZ{;[hN`Six,H/ԡI֏ "e^Yc(prW!APNM#HǪen 0+'IayB}7i RqRx6?.trQEYyWqj9؆m!yn60/:~M|OsEn .ɔR $W /yG]ng̣$ړ:(.3[ʿע2A8˩  g MgʘhŏNkT:c0݁_2 &)[VrD7<5]ky'.cIbV_!؈0Xf0RQgWRJǦ;ߧjm?ZBblbsj}.mk%e3ȫ́¹mr|GlVt̽q."+ZmXaV-+XHY *kYAj*=,F8&T D(knDz;eJ:4@$1i!-X1wb낚 ԱDЗ&Aԟ; ج X`ToW啚X=NbmNBaew4gB ZK,Z=6;M9矣0u^E{'wLmuo~Lv9MZE8( bYEk "6Cw4?LE!^T:_ 8Z֕ 0ziR'ؓb<?WuTjg Q4Lҩʘao줍4fʟhL3䨼>s5DяZ\(=}ëxqMt%.5⑃sbfa0 ӥb-y q6jzal,UګkS]9")AbN" jR"hmDHDV[l3์hx3˽ KI#aDp{])Yl4QN+YM,,oHi6= LI'@H*ꪘ)aYdvc5Uj@TN߱x6\m6kveL1^ug_An^iIA[ $%:6r~ywЩ:Y iܧ -=O%iBR GNKE#얍;oX?8d!示Ʌ Mdd>Cbىv<=[(Ojvw\^_|<3]|V\u;<^xZ+'mDRN+8>Ĭ&>4XAeݿtPR\]7KTL,Z8.QJj{M+[تeN+,n/{YOINdܚoBd8Flg#E>˥@ o`:uTCI>ds*r6|q1Z_}gZ:T Eó =x1YEZo5oŝ$:r ^Ua\.io+|8B(JNc'COy%]YWmՄcAʕW*J, : >#`*-c eMݩDwXK E2g뻝j .evBD3F S^_Cձ]>lY)p1k:5 YV$K9:sd3?qQ 8(glc/Y9rGKa>}']rKx&@q-?ލ "^eW1*ڄڧ>V߬xu`73fw<:LI d9;m]~\_< N&yr s8k@%ѭv_HbI.'l5x6`x\#\mfOUT=Wݧ S;ъvPBZ]pvhN,*ut >g#$Hi\. Lz~w5H>-9k*"nhG`59O= s^ƩLbYIkCo>qyכ |:ovV2 cKZâOݖ[0SFP0zX=vEiWp%$|NѢt ,S |-"$ 4P]ѳx˷< VkY(7Lcpŧfܔb$7ͻ<\]\h:rim-MJoqkG\G1J$ ɭK%#?:J8dayt#=jtV/%HQe]dg.Mu"_ XЊV |pW"%`Ys&ǎaՖ=.oodF#N?K|6d߈P<~~07+ܛ]on=KmW מw~j[+W*OBt@endstream endobj 85 0 obj << /Filter /FlateDecode /Length 5938 >> stream x\[\7r~W#&y@}"l?8A硥Ƒ-T"e/a<Յ w7gO$zzwCxDgUyT/ڻwg_[; v'js 2v\0#<*eXf)oYyΆ{(4~sx^6{vU4*J:nb %ˠ2&lp) w*[L 2"lk"_^EH6PӦo9Qkcr m0R8<1hh"gL_@2%Iԅ.N(x|sK(IfC4@{~Ð||V8=v fY'Ms f9K @bPFװJb031YB?Iu jht?TEp:v蜮D)BGxXV" CB2N!]ުbrn:1n:fItĈEU+T+B2=J<%03褈2/:qbeiȡc3(d,anEoOu+Y֟Y{VnHьֿg]B6`jVp{ll|KdDlQhׁcm,~΁cZ,4 ȗ$~} gllq' 07zȈBV^l_=g+rc+sS mѬ=uL*M#D KR ۑe7DXK`$#8t0~!)37<%X&,ٞ]W?[gAyM\mP`:o_H h9ߦ- E",mdm/z2Yhj0c gkV:g2t"sH*Gs;ENi:NýV=t%oĪqђV uyU9ժ!' $ѕn8_ c>LirG#D.h5I-(04U(jo`Ƣ@qjlo1"IjW[uhct_|\0$ )7iQ|Dq/eWuis(wqR8Dh=SSʹ~!c:YI< ﱆKMU-ҟb~'5PӦ$]!}PeŊ 9$VP^Pptxʍ2dcrML>ɹQp XBF`Hѐ zɀJ11r_'HBvҾy4!JaǬTϙRy(9}vA[Y {އLEĂ[\#{nW҄T^nnkqq$}WBFEdjhC>Y\cRm,>߇#g :P#Fz-W?%a݆0"iuեY mPFkMD s%\3s`B)5 Mm8$`"r"FI,YF)_j|[&N2)ZK@j[aUl&eN]) .Af[uN䰉H1m-fOQO䉰ãnVT$\$t(T*KmumOG@:m: UN`IlN`?#_bh)@O-t}_^g޷R!``TIr}'(&16Kv]L`љzC[W RKL,(݀L( ,#g=2QpW*.A-Yo7U!S% 6GERqyN! ;Z/Ukm‹{Z IZ:ȔPB5 BCм'؆e72$Od6` iEiѓ`J b%Ja)&'EB x!3^2ؔǡzaL L9ӽϻ2a|i򱄓HW\Ni7N%miIsM(QDa¾sMCIBf8bR~ڴ\YȡOѥ) N]ԙ-J苻R.B:1ngEwsG2c")Uƪ8ra}i0FQg8-ʏͤ0hXb rLa'&K >NBXE8{!8yO 5\t @6qHry/ŝ^rxj xajr]~A,"}xRZj)۲U)qǠ-Sp9W9R>{j>q;E5 \ N D"S4Mc}zr#W] WPb U2"ZJeh٪ &eB  i !o?滻/V\w*.rk-BUV֧[XC\&)8 2@ G5v, (@pEo޻*nM[ӝNELnU+:d:[a8ZG HNM&4evm}tfעY~UE@Ͱ"=w"tYj;QsTٖYk t#08q(7Ƹ9 .'5+[{iՔ 5g Y%&=E,>>3&>3zY{6=q}0lMg4fˣ'$;S\V຿VMR>*$"]/sׁ:5tS* Vp2$FvV\99 2sUa`WE(½vx;bUX"_]9HǪKYesfف紈^26pfUB*%@BɅ!_o|tx5JO1QZ!m+gBW;.CdTCJUlcxak rYp0iLZ@C])!ęw2L,y}r8T`͝t e;Flky]\?jg_UYIQ-θ4ryVYtks]WG  EQtl+7Z {$.RGBWVR~<<=> |uT$%~ߢ*tQlRYe-A95|GUu;1k!jg cy!sUU#g>A_p_}MCRj [M*RD#C񢗓>ƳUo}j6'GAU3VHTh淳s2:ģ2FX-k='? HXM^M.Tp*Zq VG$WG*:0Ͱ/1D7 =S1SDj)!A:RwDU91K# վ6EyvWJޯS{qƒR[=SD}daj~׵~mL-0wQR[źкb׵~7CzP KϵK?ܩC]`Ҵmb>}U#P!ORhTn\]_EE _DTF:/7_ߣD Z}.(x-w1D a)ׇxl' ,lj0.^W$^v)È ׾F7"Iٜ[렭9Q"RdhF˖327zЌR_ŏclH_! TR-g}g?=T&<2,2:dK< `jiaœoPoWǕ/aVYi_(vǫX5?G9_3ݼU0aKWvTb|[~E)ewz>O>%"2,΂/Xðb$O*S> stream x\[o~ׯU dMHi )IX*lK=3 ;?!oՌk4|GR=4_4'\!ݮ+p^nG,$s|˦<ë0NtX|i׵M~2ӽYvSVy쌔IjZl{rd׳&K&"*nfzY4 O2y^q-!߶j29r}/b3(ѭVl qIFWk!yդ֌ 6ʋIFjk8on̖>ئ̧ _#3? H a:Xx%b! ;{ɽ`$ oWR¥N{8C=  ǫLK"2`lx| W 6 /Zx["҈tzz˞N lܢQjZ wdg&v{{)u+j.K%PC.%L󠘴83qmy"*[8֝YPƟ k26`ǥwR Dp\;u4ִ*=PM.Q됩 oO- oISdR54ppA}dp^9ࣗmaQ. ~ ^H;6$%6~DyQ D]@| _g 6- qQ|v3ǍP8Xփ|<,YJ:rG^ċ;|uoŀdW BXw>$ ` mXt^ؾN$'v޽ҀwbvK8@=w|&(/frK*Bh!A|/eax XyE@&4)wtzɗyjv=m8NT2tJv 6`A;' ILR;@b&.I)v" 02o“s٨ \$|y|qsJQc+w2z-/ cш/ m$+M.=z#8{45rǁ%yg h`.  /"9,Hy8L Ͷ!Cy]! \R0RnBg;sإF/fLh^Rj^*MRVMyB Bm)NC7iJƂ62 2>L Py1+%9Kojj8gj{ХwH؈z!јOӠX0&iJp̰3%0-KGeupE-DBj=RĀIq? ,}4)Ҟ)N,UbaIpB)l+Pp6ds!yt' 1d{ӈ#q*!m2`!,22yyY-b @6i3`s*7֪EapqоԋI(tJ ]x>n\ZPN8F4jz(Cz1Oh?\eX.#Yު*Y3ƅ4FrQJ H!cUs\F-ċr`==$ZJe7@|MY%+ ծ_T%iUK DcR{+f_aVB,S3N>`;@,*8+E,Ifl9iO#f)ÔbSȽ`&₿.:ݜPe$Fl3W 3p+Qy8KY:w/eR! LD"_8)2jm80>+Q(0a&o&#&b @MۦmtZ:/DqA2ÕC%XKS"~; PО3-ZEL(=|l1/0BIA4 6soҋҾM 3n*DldSI[ 6pgVE'qA T?zB21K*߶*3VV"6p t/,w.R<1ju6.b19\J17؋rgq1hW,wm8 &p~1fn{D,㫒~_I5C7>OXË0В"=+RFbSfPl%1)nCbon)L/rqoݚ0? 7Tc{=С5(UrVv,]hA><+G}K>a::G[vgK<< ?knN<ZM;lYA׍29X6aY^xpz\(+cx+Ey/}-ƿjgۇ5 Evuyߡol-ݥ^qkQ m HIĜ˗ rO_V;˦g-OV|AǤݺ a4 \ 8LNkjهP YXX2V S4Ģڣ ~66Lk_!T.5Y3?7Ofnl9oܑ_ np!!PJʡsx# F,-Xs똺hëͶQiWA]е ?Z<^=qZb7ao*,Iڽ\frRe$q*d)jW wDlb}N[+!qֳK;`ǯ9$ö$q|hh6}lAwli:|Е|Ǧ0XsCbY¢IA25wIוط-yw 7j=~15.i g9[S{^dسzBxN9vCjo -A+D1MkS˥pukgn;kFTs/ "mЭcֺca؄zAawCy #+6eLQ0{j.y:Rc]`7 X!.p,N酬gqHfȝe}+8?]죌![a.0ʙmǎNÀbu~ ru+f`}>тl*&zE1 TM_6o45?!faZe>ae֚}]hp:m>VS @v A2~S! J1%H8Ѯ6bNMC{<#Rp;\ O<6] v4p >clut1-tTR7fۃwwmy&_T.endstream endobj 87 0 obj << /Filter /FlateDecode /Length 6249 >> stream x]o7v!35NE-(>y[,([#9fFE|rx.s!ٴQg'gO'??Q׳?lҔ'Su'sLؘ.~zʮfѫ|i]iOzR+>W 4>*Ms71{ SJ. li S\vJ*D ծ,%1ފ0I6cLRNjl\M 6q {~}a]mnk=5]ojs[ok/þyU:+Ww>/\MPN*[ 3P%I"P)mFz01^Ox)jIΑ) ΎxF5ge$:<,uYs_mohWz_J!si DsWaqs_xIEyM9LFcJ/;SxMve࿈CGA-рh Sqbrsg|8;WvW߳H8Ӓ|BZK¯$QH`**=Q~;6y» <4zv]K aÄ}r!kcЌVMQfSj2CӅNlѬ,+ VمUZ,lY0, D騾تjoPeliX1^2z@E2?T}cvUSdc:B⼸78Gm[18uϴϰ%^P.qn^n/(.:܃R!8<@@FQF`0 5/9;DWNOzG5i%UӾbrW%n~'q Xo'9>:4 |4[A88biz-UiV\((!=Z2Xo$RIe]py 1Z(J 5>I֮'?KxA*//1j'YL~v 33x ${5K7B>)%lPDJ^vYDF۪.ÊﮅEop*]'լ z.BADB.P!cR~]LO3ud-5;d@l m|襡B(g=|_սxϋ/fDA~)v v#|&xa%cR9 v9؆wWr_w{E)[l~a "u1^ D4mtw<`6^&YG(HJڀsJmɒ{QQZ;‰,E\W/-kfBmo$"Kt'a"ȔmNm5>◀m0ۆN/Y{N}(Nz_*EB|^58={{63 m3M=]9:EG.#; x2~j ԉ3]pb^5<JG\Xk S/O)K j!~cr,jEKmD2Jx$7}`5I'Eľ @g]+cmu-J 41nPa8&ץ퐝o"7f^3@5ɘlTNԒ&FH=Jp"|)LZKo!(Dyz{vȦ#@]m~[ok6ok6kaSlspFeRHiVJFo6Y)&HQ+y̭k=jWof' XJt簳e|l`!g(aS |IbJ, DU%|JE1_i;\6zS9Aw8uh!ڥ'x7R2tHbyQ)//l\N05Q˘vQlj- 3AAPI˒3}уNKQD?yR vWE?! G`nU"ekDwmm͛ڼͫYMa~6y|&&<]Q"^ <ԕ^vfXux_/)G_vy{Mܲ~wW;T&1"Ɲ (-T[?!čeMg]#-'>8:4K yr`*@%Tfb5Fl4UF虑] A-U$/y2Rۍ@&]($ #BZ&?UNFlL}S҅ǩ0,9M @/FtF8P\8Ii9؅ eRRkê 8ujkEW-!Q%SF2q& Aj-ҝZ(T+ꆝ3:ve睬jW 8tR#ijO/3~TWh;?ׯ s7l/H !9? j)SwmLwæ|> Za9mj8]t_+!\<|fo*5}?CHBE.ơ1IܕuKX[3Zp(\J Ռ', Oà&~=4.Ϊ_10!Dd*7àmqUSU~kTՓ_*szv֊{gCy_aS$nD}g&6㭠_*3?,VKGwu{$;աM[qS)!Ur%xn0|J&&N7B[%q@,%k$N#JزsGu>& w9C@cP&߬KͦS,#X013rHѢ]ng Ҝƍ{o v c_%.P[-kJl@{iy}|/ Ow1RŔ&ט3w4܈{$f}c)-m{Sۄ#>ɘ6Id?g`\Y Hຣon/\g+65v.>nrԠy#[TUa hݝ#c~mPѽ6=h +~FRp6Dh-Tׯxc6u&4v?]0iI|ӢIãoT.1)p \+CxMU"88J wEDo:Oye湐RX>9^ַbFSRT`0b`BwR7,Q}cSy LLWo#ky$ip}U3&QI{`Un*ڸ/U.辟 o~W1^& #ظljƞR-e(A:jc4p2;C=ь=T ?p8tT1>D.y𥲥#?/^T/FE\( ^V>5|xqESƾ!xY-DTf1_^5u/uףp=ݯ;h ~T(}ɝ/bDEDDTV\6Wz'O Ue{zLZH.@upNWRJ׮ y6(#9i8¥?u~'W @p< %8zU"P7ǻ*@mWx'%B,$]TLWK^ J]*xz)Nzcm5|W2? &D?VX›!tyͅ7a6+=Us155f8n\acpm̏†+H@PRvRSĿ.op!-;YUm|lWƺE m}Y2R! N#Ezw7L ̾ 9WZNF7`}_Rd#,+T&'ܵ". Q%!^i{0*ۃ٘Xssbe;])i7U?;,wR- Ck^ݔ0]Y!J@x?)W(?aQ_"JLGݹ[Ni҅|+(H;e :Rr 9vtM’P@*hڢEhQ&9X栯dW''!$favڠ^9R<* ‚ߙKjj|&oݤSU]$K< iL9X_/BYm&lngL//GnD8wYAWDiֱ95+̐cu:d2!(e\aSs^~=?E~tNDm Y͸ݑ Qܑڼvc`K'ٷ X=t71-ĵ@ڗw-<%(*:.n^jCG^8`ֶpNi _ W _0VͩQfO0жϯb"Y 5˩ۮ42_ia:R d["Rrb]T]UVGMKhjA!h|'Ԕyendstream endobj 88 0 obj << /Filter /FlateDecode /Length 4675 >> stream x\Is\3Kf3e씭TY,^R#.#Z -T{x I)CJA<,~hC1CǗ~@n~8aG0Y }*\ԇ^A{wxtybar"j+1!6 E;JH҄N)fjrjQ.h1ؖBx7ԎQ^tP0YMN#l.q^_:#!џUW~^NC +-Js]7ԴVcmEZ<4t d}HkNG8b|]m:gWRlj.yΆ 1SEo2 eAy`3 40-ɘp@`Urء\iQft6qeE1 IˀK.^E#5%]"_ݓ`>I53WdG9F|=E#&IdC*Әy^<-;0]Y&aمEmGJ i$&n ; F#|ːJ3 BVMO44U ^iy5y^bt{鸊4 =-)λ[E^i{AfvpZ)dzÍ>3q2.SBTwK"JI|]Kt@% |ꭕOMCqWkJRY<8=ONp@aR(C8Ґpj$Z) .5}];< ;2ت%. ΤbQslB+9Ç'u)> GљH},bގtM A/n} @dpyZz sm'fZ8_"~0D5#lfP#$] ~SI"(▊,5#ñoQ]qB9ۆH`b=q}vB|=kͼ}y8JR$BY!M\-܎Y Y>\8Ipu~a Z(&hM@T.r,hp{i3֐ [嫉% k"ԍ DFN4V֫԰H*̯BqA&M8T*y^56B"qx^[f Yc>kd >} A JtU.#-TF*IQ Anl[y^$tBT NÖ}sL 0P2 Jq0wD@Z.K]; Dp {-vCnGh6]= #'s$I'Im+mȟYA[`4e|I9u' x(`i~ப#^"qHJ5F;/Z-az"g01! BMѠcd_ y ʚYMƤ}%AF&"z1ʏ&% 9Zvk}nJoӀ:<dŀ #=LWK $s%`q P>l,G 5Xe59XU]}Rj!qL,)!YLpJWu7i@ }f(|Z$D_#I b) &gQAFb0<+C`EOV0SF0R)bpMl!y]랜$/|yRȌ '+z@ &EVFBqOs#)\Jb(ϥ (mR'#yZ?M( s]u @7/6RE8تX-46o泸YenWO旑ct LoS @N qёvY;RM?n6aj>ޏXnʕ۸kqËRx~$a0FL`_SF VaBKf绽1Ì?o: Oʾm܃xgj<]1ӧX4b^,0dgDq3pKOM6>C/߱mi di3v8#,MaPdc\~djIԠٓYas&'E(k/F']Qnr!=9+ٕElکa?۶w{gnEX1Fb ~\I%Vıo$|m2m]R66K22³䛮~^ڻc24,ӁI 4{-ۢ4W47rϮZM/*`3*ۿxR*! ܤ)".un@]&(gⵟ$q(١5^ lsyGsgG'n&v0f^KM<"UvQ>wRoBX:V'SU 5_%fӖNsEj )˗*xd V#eFwTjˢM 5Ds"D҇U "׽JkC`e<^^VED8\g̍xW tT 0C 0SEV2|{{ <՚,xo[Yx T^;3)̒S~l1VS):LKZΨ1oTNH9h9Ťdў X;hc~4毡YfUE5Ӣ\]Ɠx!.”]X".aMV9SiddGטϴ/ eN2 ƆglT蚊EJs]7ڙX\a**9wr*( b`p>)# eK*FMMY]HQ7x#P*r)6MexlZh4aQ_ui?Vv ц2 ȋX=8,-́ >+[xXɀsx)|ίRRSJ@4:y̽ux4$7ѪBC  %DHFMW>-CKid~!Bo }0 Wxy2gI*d>Z|-Mp5zƚzv12pt@۽-({d33{fVD20w%(m196Q^G{ RLs嗺jd܍WsV_[A ASAR+H8 oF+n4L09MY2O#NTI9.,{8!Tm-\ {D[mo@5riNgꀲ},Һ.}7X`Mw, fuvLx.:W8nr=I|~Q'6̨ˆ;F{(r -(fUKqmf+3̆ߥ%M-+`OHKz.!I~[Nj:4ɲN){J9vL^YeM%y@@\c^pH_r֥Rݯ=PXkKdh7,ebb92h tcNb4fXn=S K;`x|^T6NuQ_p+Җ8ЋZLy932c:&YwnO4qT. 0[qA”1V^ e eֹJڂ7)]=…]O rUp> stream x\[s$u~_G0zIi:;ة(e+rUϮ_酢]ܝ90z4&uvTqgv~=lw6!*aàGm6W8qm۝IzJm0]J.8¥&% >77j4>n~cuJ1z٤dۼ!fJۦd,?U@&m;~>z/4oCJ;|<774n~ԣx3htk_>&Uv306n&RVPYB4 1ɞviRؼmO ]WcN[ay0{ǀhʱ7Wy ;܈@G1v :0 :KkɎ66:;FOQ3S܎Jm08S\x5ev &+ٿql8OЪ'C=$2vpr-1p:Pu KyKNB{V2i6oj~giW{Yb}rIrN!xh` 85zOON$s>*_`fnݿɟQ-qPKwA9R 㙁@$TgʁBIt΁gO;|[OqfTQK BmU+h< ḜIՇc‹M | X{ X}xeP lG4羭 e^mX2d RYPv E}o  K(;ff 91sZW./ .Ctj8 .5ojU:{њJ ' ۊ%C6YNkZr < mo ĀWЬ-m NvE0}_`#ӥ@',u))%O[6^ԱB ao~]{wP{o'{66}QY6ڼ^{_nKIC?A6beͫE|ʹDbfg_k`ʧ6]O} EA̘~Q0oڷySbދmTqi5FF0 *E/dk&'Gnc_ TN['Dl+Mf<>ƅ_y^ >a[&9V'xAmB0fPBs%C%ҙ_{'̺/򒪙"`97}[D?73˙eBqLB,.43@z+ 3I𰾃辪=Rpd!Nj)yi A um&%COrA2Xs]Bdy -7ŦTwz>5o ɑ!|J mvw6&HDɬtB r˞x(G"倎qp_6ʙ5lJ%5aH %zV'TGC(e+ב>߬ ??sbK줌l(1Ę*8⍐\ & F >2ۨ.wS wN8MY)bM̘Ɍ>E +q-EE%aٻ|M\^nrc/OVTW`bNեESPBĿ؂5øfQ+iOа,m_ T.+apapǗfgeGY$)C-s7Qp| G,(:P(%Ddl7]gp\l*=k,oO]5#B#Rn :*ࠒ{ u:(iDh_ "|$9F/mB}Kĕu`j ~ C2tLSԚdg:) iM7r%)f3- ׽b(JѺ7bAH!7_8q0C;=1h_:kAB"lc",r ͘U)Lf^Ḟ(?10}C(Yg *a;X8|XgUsx@"af~_䞜\W} t$RNK8m$M">._iA娒%ކQ?SN lgn (hq1 |qetQtT׼iRSOz*Ƞ(`L5);A\A.[f>#IΜdk/kvWr%BVMBG~n S2O/Pl3q<ۙuY?ɚWA +\GpCrup[2;"|Q"y 3l}F*u,srGN("U8-@ 4[&OYUHn鼕1a'yѓ{=^޺8>Bz"tk5T)!?օHS6̈@h%u^L*&%&q-Z[PM]"`9ℒ%`QtDX$bWn\ܲRZtœ)]l힕`_FTY1!OT魺cZ=rP׋c\jOWwȍ7#u6LH^NDD"Ulj.}9mZ€I9ox]0#U$3Kq I^p ̸l٢H@tx"&lN1]bdԢ\>OҊs/h;s= jW+sq\ud>-f}0hCE'zW7R΢k%8923%Qq@ࣈPse/> 8`*ݳ:.L&n CnґZNόGBQ qe#g*s)ʣrZؠ#8 V٘:i"h+z'|@2XJbU \^SwRp\m]\= H@f/wB:ufA^ĘXM7mH!GiCEQдZkBg? dQ+7B &_+ܖJ + =]-󑎵 kZD%2'aFjHz%'c-&V?TnjF`jˇD1eŠJOEFi-ߴFІ7 #&3ZkT @~W͕ i;'8tTֶ1W%ڜn+a#Qք<֥jiGQ"BX8WFqOIE\ժDxV@03B= 3txAB0Z@j>G{cK65&QYwk!_ 6g̣.UO o=:-[G,XFZ2CQ=9.}TE|zuϤjԾa6 3>`P%w@̥A &kPpXj#QT?9 A8pq3^vΪ8$+v@Rd,gd|Sjd04@Uv3^T^wkG_uVJ\{qQ"JM۔X}85u6)%\cO\5Mm~]_kS~Wk F`̰rh3z;GԦ kꍁ خN~rxM<]q*Ef~暬A-yŝvęsO\ cӜ\X(ֱrRnZ} ※ڼZ)$iD.G'!b)ʕRn\J#(49V`ws2s-pP5ֲ|(K\hY0Ͻ~]Von8qHԢtW擯֚Hgqkdvgкh%*2̇Ζf溳:Za W,1@@dgxs9Dˤg"x;SL 7%ٸl4]VN=_꫱M*NfZR5p1q1JoABՇ ZW7OrɓmϽ5Ku}VxA?Bۓ줉Gu%L t3|4,GJՌ ԛ"d#ïv+o'tzfsHiUd1]7C iMڼrt_3N9uAlA ُ"ESw 'FkHԓ:[Z~> ~ $kŽUo%'`*8.$͘=L+Y9X?n9@̗WOZxV R ^ka^|'u[=ۢr$ + +Qy25TGc[|e7<ɷ^W> stream x[Ys~gG4NpSR7塃\JOw3hb"1@OǢK8">9Cؿ-䢐\7GyBMSqؑ|u$U @Z E@ဨmpE$c9ecwa5) 4|UpƆ]m =|UYcAtB ٔwVE Ryb47Mer8[vsXt6hʗ;+bx32 =pu>\zѪ8(p0ߠdחŐ.R/؟ÑY}A g̞Ғ‡(}!jU[I/6'yK2ZgT\Pd$Sh[:V |܆2k+MJ%XH=LK`\K<;2-v[pF 7 .a@Q _U݂s%h%}5"$0\Np &Y"a/ d&Fi&];t‡ FZdNz-Wd3匹rI\n.d*9HɂA*[pIYSIj c*$,^/m `E*jj[>tƀW٧ŌӨ&A$p4*6K: SA,IgU1++xBcM0d?`CfgR_!DRP0CޥV`:f˝3gEA=L-`,ŽB/qﺐdMlɓaFFs ұBrQ{mk8Æu-OjJ^VJe BSxM~p~WK|X[ G'y} |Θd}^Feuwh|\ᇮ 7<+WEiV,Bi3JzYX?=}!b~]g¾~B6'Cii0`k ;±SQ5M'mr_Qy9zwkN`@@> ""t֕l+ YS0^.z}3B#Gc"9mY~QgY9ͬ'le؄Bl_V&0Cz;éG$ `u܌1᥁\QP g*Sx —iIe{î@ * Y8k{>8kfRz0`Su (հ0=ف}REA*K&S9[|D*.ou} ΌE7J IX)Fg)"Nre 5x僋s.KڝRr [,X7L#!F\Yd#>ҕ'Lp.0`Oև>֍©׊a y;kJa-Yz7bAIixޥ`Yl;wj[X(g!s_*d .f)!MCccp {{0򃾶V,%KƆ!_W{Fx-|ۻBS_ @S?ց$gMI&a ]nv374;IW)nXq"CJaJ+g mkwjPXFt )Ջ^{0Jt}=K HHЃ]P%fpʛXiz6𼌲]M|ds?Oj-C&4RDk2ZlѪY>?7wT69_b$,sy99]NM(mr3>alt ,1{XN)s(hK!q'bXG`*zGocjW@w[WK_n7(l1ki.σ@^h[Ռ2:sCrlW* [3Ì;HNKj_wϨURU_ [i˻)n%g .ٹAxe`|o(QnZ"WRYy€gym&Bo8Fag3[aGbz'r_}Fهg^Џԧ鉔%7˺B7}a͈~_,'J5{҈|YȻgLcvF_򐧁 `28[j:ƲM^> stream xT{PTW%fzʽVGU,%| bX K`vWv]`e)$q+(Q&>ltBvb^ɴ΄93goyH"8 IR=)9z+Z.) $3昹0bmmABVO#JT)V/خՙ szyr<:&fKQQ1W4"_ЫU^k9*Iu[## CBS-̎[Anѫɪ"Ua*Kw+4*:#ZX*'iTA,w)YluNn^(A!)bAJ5u"+DD0q ";O̒g$CVT|,HǨ4*/=Le$Ft9y/ܭ,e)^z"o<^JeJd N0ETfC)gc[pcE`|Џ~rߑ  8G.g*s-˾g^ ;'nh;{ߋR,MjyWÕ Z#lq]Љ tKxy{KYՒۗf? 4(틞-n,r_1dӊ&M v3f7px eZu64w:]m1~D]izuWpGgtuZndKW`(}p0neS7ʶOY\Ѥ'ѳhdB"idT-DK|عj4q؊GZ4fT @hy<pChw.#t=9te(rG L@bcjy+~諣雹tjmyuO#}#r7fJvea6ٔr(k٦sњsZ&h"]/yR| AQ jmj絸5V"t _}ϗYɆ Q8pwt|1ŷ⪽Aځ 42eQ;[^x~?4:iԐ UPR,T!zT$EAG~Vpgͫ-9VY)&i YmyHI!"%>u-.I=Q&UI["9+nG0LbI_5vZ,^;3V_.脷q½6NR2eC10 UC+jvOc??Gsw8m0ylPF}#.цv5y{hrM&kUzaoN4hG\`טEO 4Y? ,ZHdWJvKz*Ǟuuu_=2pxzq@4R+9-ʿ4]l҆.BCMOĿu7endstream endobj 92 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1416 >> stream xyLwgXXFEۨ,\jT걂.{Mrr,+*-qUVMVlh1Z4L2|~ `8G@f!,rajY57m18B`ņgάa l6'aX"t R3 #ghƑwBZdU+O|?1>cvI 58SV p"@v@@ȝr/dgk_Kw*k G?>͑I[^hl/JP?:C~i6dgs GȍL*"[ΖQ&sn*6c~&-У(4Y;wVj (l)N:i^ v ^IŕvwQ Hs۳x/m&&$LrjXYOG@L?y7DפHBc*=A짫B6FS6&}g 9l,a|endstream endobj 93 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2882 >> stream xV TWT$L Ĉk qAhnhMfih^4 { .E{njbqFd"sf>bg̙ST'lFQ"H2,)&&  N^r]1yYX{IvǣWPK(eJ,bRQP@'Gŋrt;wH",0@ F(Gfy`T8}iRdΜ1劐eNÔ^"NZ.S:zDJ9M*=AR+e1q!^auEQ6RSifʛBRnvCj5ZKͣܩʃ^&P72 xT8Po<3mKc1cnhle?+ " lf6W*fianylFFqM:|PrMt@)lH-dM NFW鯓%%TlrDPu0$Tlq^h0,BExQ,lB,܃zemgn颴x&G b+Oj/IZ}/ a3[ GxI|ܹҭNkuKft*BuW9/z:8K; b~xk<`t2޻_j|Ez>C%߷Yew.mYFaeK 6*o68X  B#z|oHaIaPMWǣ6VƬ'IFrK?,WX\%f[L$u̒pjKH3:u1zGw!;KܗQ*l.k_ e#_z[? P(v^SFс#(g`P%Ħs5᤮q!S 5oL?z*$&ޘT r_c޺rFe9L hywOSj82׵0w(wvӥ~``$i4lS5U+k_hZso?g!gFM{l K=Iյpymo8|⹤?;IV8C<*@ RDeVR/K:櫒ۖ\(HQC:lE/̪IllL).c-fI -90s x{;x{TM F[W%Nn4VrÞ 2k$"臃&LC/[:Hn .aIqm " 5gA Y0P9q!2IGbm4=gyJC<'_obD7H&3,UbtYХϿtKgxm4DB4l Td>&>>1Sx[!~Q,/@P }!R@51Q#Kk t:GFw0Oamxh FH:+*B%bT6ć*Y{ӼS UP[f!ǾkdcL [j˚LT-dgq/z^'VY}z8AD u3іL\\[,!6:PVgב3zPiGogfek!4+,[[$88cl7\`gwnE(Fendstream endobj 94 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6915 >> stream xYTTW80Ƃx%Dbo`IF:0e Hf(XF-&j؍ќ?ֿ[ł{e݃^y'g~?k_Slޡ|SΑ[wlOj"~*$2evhXlV?(ޣN2iqSfDl qZE~ rr 4rTT~=38rlhߧ8EowZgӼА(<},k>;48L4tODEQsg>7b^ۢ=cbmgr?[]V ^烱O8i)C>n#GӋR˨arcj.zrFPjjMQ.zj6C6Rsy8j>L-REDj15ZBMRQQ!-՗E(%%P,6MSTʑD j+Փ XP֔jU=Nnu/6~f!O0+So~6}<+1z=`K6Rv+sWaN}QRz Vh${Ah,I.㩸/D2 9p- ԞRȑ#_^Kw/l^$gkШ0%T,FiRƄyǖ fx8eb*TiP o"bKgƷ뷟Dbktt0Z\ĢH\:5>s 't$?]glP%]iC#rBxk*+τs'te~Vxԁ|!Zg6*&;sq 3F:'03t4~bH.FX&+s/Wܔ!5XZɤǙP]ՍД #wlwg+0vpWDȤr4/yn uqR ~Z\p0 ǷҢG)*ܙD&V& }Ď޺yWá/ZnYI xFl]+=E+U$]k4I .W$=)]t_RJ!ruxnt]uS:|{m\\ hƤ6B (+;'DVE?w#r)) afaZ>ѓ٬Lz5jj :oKH .t u:h:gGU+#:UԲn0)4EN%Jv%xrYmA>a3UF6=R˜ԗ%wSbxMXpH:hRɺ]LO~y ;cqy{#G;ғdIUC:2rPc%I[sx%rR5cDY辅BX4Vˑ3*K! $&4ڨ+fF8 mq$`jtMkN^3߉+Hh;u.#:cv!QsFY4]z#泟GoͿ T¢NWr_& n@<9WZyN +JC {RfYE ) = L&Gz 1[XȀ]sE0ϟQ!n٘Yj6UIRHf ʲ+JD˼<9zO}1l'%?#j ,.OW![TH4u`6 >r`W>K]d VY?Jy`U߱ S}Ys(D*d3OF{+^ (k45wE~G%O*kOXx췻hi?|hغˏ8BV ̵[Y$4X6?66@I7bn_UFcž>/{^?c9reT>$;M b/..<4H7UDjU3Eߜ]Cr ;xiiіpS'z,N-Y~A,"MJNWxO `ʄ!q&ΗNLjWզUY?{JȌ̃kGgUTPt17Vk9PkGL['s?b%xI 2#-Y~b9s1}yc\p7LZ\QnT1X]hhltcǯ>5!!!!555ĺO-G (>1ǶOe;0KȔځ&գ2OY.)D1[!q($ܣǂ oz(ic;J3 ˀ(`dqX<'x(w@-VVI/iqXǘ8y yZHUMs #X;4YDe\}ܑcL uRu#}‹쟣+AlD7R5;ߘu iztF~& \έj.E!6'DF]U>*=KWfؽRVak@sP3 @=9mkA֓E<^)`oU@vk±ߛlF>Ƥrt\a#0{8'+W7nUDRU4HU`;#J'4پf)2TJPS6D/oؔVUL3!5^ұ#[򋛎ބ60lʉ-l"=#o"l呸/P_b3$mt~4~G̯?!YmmKz[lf/}eOF / k"jDV@<0mdrB)̿ɡ5o+,V~h{Qr@7JH~+[d[\7lL6t#<"62\,zu ~7N4',Dw]FW%|RV 7VAXjJ}aa}_MDX@\uG>;H9Bl By! ^Dhsdg]BROo4~8AX n>pqiBEьw̗7ݗkͼM}XixxXlܱQN-> stream xX TS־!psBKkE[gV,X "2 !@AaA0XgP]mTWm_+ڪ}ÿge7dsoekCD"ɂMlsHi#&ޅՂky4LY49mHW, r G %dʴjyHP}l[Sfzwɓg{FC#W+#!}, $Pv;'X1{$J5?"jL4E@yt6ŲH*@wF'Z~-EP*+eE ۱p|q2Zu: v۪ [yS37?y7YGjj4AP먱zʛ6P󩏩j#ZDMSK)RjEMS+Jj5K )'ʙR(ʕSQ5LN͡P)[JM!f)⻶Ѷn]'=>#Y!b3qtI{g{}C!C'q˱('5N*N;ӤW>L=b÷j 5wEP1l!Mv8Dq(%ѹjg%'&Up:ؤBA|v0eBuFW4ƽ{\%G ^ (l6aWڼ(taEͨ,OITʟ~'x.%q(Eē[Μ+}a6y>Ӽ O7IZ|Cbbڂ :$8;PeD(:^"kQ-{Z( $C +SqQdz0QH޿bo;C& AtF,8Mbۉc<lGa<\y׮ݸykĕ= #}$BKdE@&tzs>|p)ݬ"O+Ϗ[R>c~4bg.ۄJQR#viҲ8?!}O1+h5a{ #oim9Qx'bLߖ!ycQyvk t.KO nj7xS;fua&&'4B֙8f Q:n䍘c`׿i\ >' w7=;_߱_!]4D A cӥD75hSGv64 c+x]SdV4 nh'+d `IwueKVh‰zNWt^ U3c.ɿn"#-؎gavЧTD%cDǾ/$lOWi錪]ZDHsyY7~ eg%#ekQvnWWwĪw hr\enD>%gҸM Wu>;JS:]Q|;4?KLڍpĬ|/X]uv.`*Q ҡfI7@_*m&u AUHjT\v@\##ŗڗ?OAHs=BKg*-m|˫X4nEyc]/\GgQM/=DJzb(0jh} *R][{oikx;=&^ 0@ADEh3z,bG AZEЂѱ[#@/\W_1u."gh`!޿Ϟ?{?%:hG̏]7Sy]I]i{SQ[6i}Fmf>8j3b&,7cKEhY /TR\2b .%z%(| a;Ѫ>aP_31TRsQ:J,M*VS{9;fޱE|}yBL7ء~%E*>/5y{VD4 @ûO!b§)`-YÝxȀD& ZTȲ+kd2R&Q55zlנZ5SvӿQiϖ6 :Vh+zv5{"p!*ZW&Q~%#}:ҿ$i?uM/r}2b#R-K|wϛ3ceD}(dq\P<lJ? @X3xG`8陑(1'>\*dh26_)8\^6Gص 0๥zޣ8_"7r5o5|8m!m#$4>!%ݗx{'?5JHƎOzq@!RpZxvxD> H^aum lV fY]>q$([MD"m>v4֋ILneA::S#&lqe C(Ru@lrXЊ..uUVF{MQ[R@)PP21𦌕P|mUy}`c1OzTg7{ut+"MsOunyH;Ley*7B2^[L{ix1CCqX6~(<~2!_aXulI ji F^M?>䤏эM5^[;aACv$+[ t>:ʓ3+JUC}uEc{IF!ؗ>`;W-8N7ᤂʀ;>zm*CV AH+YDE;sK=/[\2: :nH*fa+Bm m(C+LՕ991j}&)cx^Ye_ mQ W,u_pm ٝ@uI~{kv|nգ < ~|$ıŅ{w*6$.%S=bP=it-MՒWVO y|ٙsu!aXX Oؒwۉ%JNxwli+ 51O:1"f^DxgQYZfpC`FP^\G<ꝑx vE䔖so%!z`=,O{wB<]_L,*ӑɩ(I-P- E[4?tf3stlϾ(x^C邗%0o/Prl&JNBH ހ>M"yU-`7_ GO_={-U_ agrIxAAȠulQf7뢡;霉y,jbKK.rS\LuR1e΃ag~9ФbaA|PLcu0h UC(nendstream endobj 96 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3198 >> stream xVyPwq`ED*Io@5fcD`f9s8DAE" YDͮqMecM&qkQ9U~=c3d2yֿ 2Qd9.C>˖L~`/{ż #b1SCwP/\`rBҨ]h:!,8J@/"1ƒR_ KHkrr\uTܘgV&')7%)bQʑ;;1!8N02:sw\|ªĤdfWPpHhئp߈Ȩ9sg 32- f+3Of9bV2|f c#3njgcx&q16L sQZvnԄQ'G=/̲fֶK0(c}\GM}9F?fL}C]I8d ,C9pԜ#<Ö  rlUbɇ^*xsbp۽O BXG?Mz,kA{,G{Ԅ1<S2q`8FL} M,YO4/?q+>dq_R՝#}{;ha>mrsZcHk^꽴ry嗝E5'0&ڦ fIyT~\Kbs.it1!EE@&$MyЙju7H_Wy?Fȭ5l h+PY+96T|ƚEMkxіҵe,ws0hEQ(8`EՔGʾ`;t1TrqLE_0=`t'upɾBڏ]>,XmTq|(~h"?hQDJ &zΓ*==4C茧69[wX .tpPZv7^+x=U7܄O,B \Zdka!J6eLoI9#_\%,l^ z4lS],&V E~ ;vOƕKj9:a#$]kh}C{$C9ȠėAb;Eއ&m8-y+cvani<^55JCD ؞+!:ZL;ѕh'p4_ y6e*W3S8'  tǩ[M!lⵂj(K^_Fk"+O-;Ě^4K%tLCc)7%Ȟ3^gǿOռ]l/.8jI*mCf|}?ExT{+=>.dQ~熳p֩tce䊱kV''6 :Lm5ǎYm#~E>ܪQVPXȘޯ4 "!=+; ]R׵U ^F\nǿ$u 3Ze-w0~'llTX{~{2KʆUF].2C)15S4II!ğ#l~}Ed;\LKI Ӧ1NMϕU_ll+|W*l>PPQv1E\;}}xGfm2)%\ `ˣ^~ ng,xŒmqTc;:O.|tx OlG$Wl|4,IrOE'ˬ=MO;:-w*0#Š0y"b`-h?ƧjoL'>.l  % NFCr͖ MdxppKxWK j:?skDK?YVϴ =9Kup1߿v2$&CjZ!+[Pg11S٪>JxXK:*5b3GqZ` gĀ JJɰn'n5R72%V1hqܷe%!bFTA׋d f+_S F\ϳсM*Ρ`ಒab8H}{ !E/Dv$G>zJNj~ȇ>:Olm_Gʊ0 y}r҉EC! \> +^9^ueuRũrL>L7#5sd0К -'kem`!,]KtU8uQv gX2٪Yvxz,>tB|N.1W+tIZ"HBh?_*P=vʏl\;{%t;_<ݻ%zjT""YٴéNi+֔(w6_qz]m`Jp&LRRGf::Zp ŅṸ/k?_qӐl@ \ZԎI}@ZCتx+Нo)-U4ǥ!=S lU 2~P+*DfeC,j>R~aǻgaS1` a"[cfS;kK$ءl`g~tk= ?&endstream endobj 97 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 332 >> stream xcd`ab`dddu 1H3a!#., Conn߷ }O=F19(3=DA#YS\GR17(391O7$#57QOL-Tа())///K-/JQ(,PJ-N-*KMQp+QKMU9RD8)槤1000201012{͏kg^:{ 䒒Êu9ؗu>Tz,-}w-{ydtL WS'/`8} un9.<'\rendstream endobj 98 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 337 >> stream xcd`ab`dddw 641H3a!O/VY~'Y|<<,k!={3#c~is~AeQfzFFcnjQfrbobIFjnb ZRaQRR`_^^[_nPYZZTWqr-(-I-ROI-c``` b`0f`bddS{='8[w%iKw8f|[Yw_/~7b"??Dlwo.y҅?|_8{!'nrq> stream xcd`ab`dddu 21H3a!., COnn߷ }O=F19(3=DA#YS\GR17(391O7$#57QOL-Tа())///K-/JQ(,PJ-N-*KMQp+QKMU9RD8)槤1000201012{ӏg!?EWwyzvws/>}|9ǟ^:C7C{y7%tX#WS'/`8}=n9.<'ܛ> stream xW TSg1*B&;*V*ں*(VD G 'y oimujgtAik;nm.zٳINNr}xm'v09usV}|xkf^卢^#x|F+1YdʃH(\-ǁX_92Zg>EMw|ϴnn]YG۫ĶAv# v) )̅?pFWInCg_$wQ(n敖Mx9?hjyGJ2!&+~7Q7շj77~y#Jȃѓ:}fPɖfK=jHMŸx|Lr5A8 -٨ ڪS8B]sA>zCENr./H$5b6'*SNڦ|k kK9Ro* ,2MCC=Sj)+Z17=7͂ 50Bh嫔FƐ5@Bys.j=En[Ks#,Hjrf-r>x!'Ќj h5(dTC@m e_Sx{ 6;yl]{4*2-AL[ѣN@ivDc:F|L-\ќhz{,-$Z3>~x/6(iAk'3WgJS}܂=ߜ˽ 䅼6rzA*g€ tS-͢䴁Vrq|44ӑ;@oOe-ƽHI)ҭiqwwp;ܟ~Q,J.ڎ^܎ 켶2`ښ8;kgOC5i Ҕ5UZ1IQ8 tK o;*luq̘X8@,s-Lh;M"LivU]^Fnn[XR>n-}yWZuLNhM7m_>/uP6AD9/tE^7DۈXzme7){bP 0R Cu9SSqscyȑs)CQU!flg0H{z')*ȟj.{_yί.XdH' k16I w UߙJpW 2P&]ʬ.]+-3;/ i:^|U:~8xy2e  օ+^z%} TGBD{r>9@#C"1@DQ%-*(d-)/6B>߷[1(CuHg tz( Mx}H+sP 9%nihݻ 3LpÔ~Y'd);Z5Iv ,hXq뫣lAE"SAy՝14wS%i ɝV o{^8v̈`1-; g[i+~~}pu>Dၮ2+)dWjt%U> stream x{LSWKɒ]L(=kFSaNhKh}/!2^Jy -Q)&;ǔL4n.9! A$m[/KDq1O/!(DF' nR"ԙ6Gc/S#bbG FJ'*u,zVm./Rc6bWZ+zcPUkȷMjc:K^3˓jd+&}bV,QG( ԕz8u'k:2o#/exCeuaKy J3ge&mxETv}^-,BPWlPv:d!Z oId3{ݖG {Ž£PtTVbDc&=:<{~o"K%ei5# Q\-%\> stream x%KSqq3O6f~ƌjTz1I#KI밝97_ߦ.ݜzrT"BRn8vzC[<1"A\pimIoD&EU"`hPӊﺬ@})!"ԕ A,"۔IRjx|zrL~@>IJj&`|g]m(6e.CX3Ԯ1\ce-5Lv7<Ա6 qE0C.9|!m:8#f'J b'2T,g))ubVq2Q(W endstream endobj 103 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2625 >> stream xWkXS>1J>9Z uZUZu;e@ b! G !$@nnx"xAѺmLegCv;Yɇ|8  1.}ץyӂO2)-ӟ^Ɠ. 0&tELTbfk9BUS{Cs+RԬ R%ЗOmEmZЩP\Ih%DFgbm&=ÇP6bNPݠ# @*2Tkl`vTŪ{0x JRIA ʍ稭ŞZw-C< ZP}kŗzZ\u4YP6/3P b9Nvt|-ucZRy{JZ4yr0]ԣlNgJA3WԒ^{`>Ca-pkd;XlږF--bY( E+a0!)-K "KGR?Ael뾳WOg{++f֓Ҵ'jƭU&+Q{ūNs &C>3 <%j}.T:E+p:{JB=s9tԶ[M}В C9Wu'ќS̍i` +Y ?|TI5a0nC.lz=׍?z͕^GD|%dY{o^kDnV\m)jnjO? M~5~G8>D8N Vi9"F"QUEi4cX꠱R %{3=IPK[KX m4{QtM*3Tv@i * e*B,Rֻwt kn7>H[MEOnj![#]yI k%l_L ~g]?B: h/'J6)~+|6|Sy 9~=:7Cl{R|>?aNPa$"z(&5ZD-\wz㨖L%js_ d鎡 u&C,ora$(08$2ɉQՕWkQODiKj#(8j4 t^F"ikҁY~x(гw=sN8[[f09=uƒ9< .}s>:O΍ MT]m"VhĽJ{a)|ӶC_~~oAbyPGG|&e$lj6K?|?}o* CNZiWnmuu&N zߝf][i4j/UE72zeuUt3)n8xOSRqK~^֩;mm%->vzjwKOQ1C *h>:KGqEXM6p9PDjCyez $ |Iߝ-95mZR 4N,k՝xMzejg<ЦJ Sz]_BwdO}p s#nO;b'#9A;AbXЄ;ܻ w2 oܾ+n1jk&_*ӊ*[%yT5Zvd mGP}=~5Su bb@#xo{-גߒI3>u#Xu4٫XmT{2xs7; zS9=y܈ 8G$y>G[R"A3 Frs=#ڵPڙ9i; K.P'tҢ8W >1rnE[`Tmx?l/D2rN<^~pn$zv mH~ <9Lc퓩s0;tJ]B߻ :5LJr|8G~ij8Y(}mLRJ(\<}bu$J8]Y`> stream xW TS׺>1xPО ՞CU:TuT֡ua @ B<(`[v:ֱն]Z{kktmZo'QC_}"9m3DW _.tC0Z$ <+¿~_.yhᳳ|Znj!Pn5DٺҔlY|l\zЄB̙59his^MGE&LIL'Jcҳ&̋KOO;ujVVVHdRZTAYqAkbbd1AIӃވL zjȣKR2cdA+1dB_M(MI]*{--=#3+R#<;*':f盱qk6lP}y^XydKPoR58j-5ZGE6PE&j15zZBmRQ˨P*zZNHS+7j(5BmYxʗD=Gͣ!t5Ce$bѹ$VFgǾk} 5ztC Q >0am?ψ#jGh HsSa vw?pOfeL:7F.״ž4H3@k?Xhȃlc~5V@ڽ) !ł!!,E*(hzVX ׷^a{t'&SX"ޞV4tN`˓ y EPX#l'yӚǭG$--$7)BpzKZI{eãhx$U0{.Fv3zr}J NHgGb1Oq(Jqn(淒7s,_'2+~u?W80mؔ*9X25_x"6+5oGa0b{ϣ=?! t?jxTiE{< ϔlV 1pR ]$q:U@׵<>f>78ᠨNi"mgZ~).aqQ\DFn+z`)ؠQ7$īqsEdV,ȑm Fc"23wsK?⬝LwU/c'\%ĩ(ZYY-Ls(8|NB&ѠVo".J;M+8|20iP "²0ȁ/}Q HC'+V ٱ Yq0t]{yCAjD*7iێ^CjLmqRf^9g2,'CO9E7/.CZvh%8"!K$7[s \'mtU`|w N"t#/]0V4n({Uș=*_RLe(vyExp6;TO~IEi46چwAcd AFi[j8~}Փ}"۩%9Bb M*z&3oWu槛i4Fs"sZq8jjפG'mS ]ěVcJ1q,$OsuMx.P-3$J 5{-\]S% mRK2Y/lp=9m)g3٘?{oBZb*ط:LAJND:E* p 46-{UrA6CV\]nNQ)T4{SQ\WiO t&&sG{17/td* .tÁJ}'$DN>IWj2X!ǖ]} +}m,¡x*~oB#+Bq$9N0Dy9:Pqٹ)끙5A/Q( ?_AFϳ /z:v6o~¢iA|at1cDZNq A>Z )!AiX8 S`.38@$ 4Nc5ր>0ǖtT~chYb!~}Wo[DjlBlkfW fnJX\^Ri]69`nOϗ{cendstream endobj 105 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3499 >> stream xW TgNCWJkEqqG]@@mfٛMQ6"=PƸE=1:c4Ϝ$/3sީ]]~d;L&cvb& I#ޑF=ߏYd.i{VV #1s b2Yd܁QI1!Z ͝;gtKƒbB45ڐpMr0?DpԄ))1 'NvLՆ8z:GFhhƒ6:42<*N:20(&aȨXm\|?) 0h^;çL>cs3Zfd 72Rfqcf91YbV3k=deT{3DfÌ1VLsK% |Jn#2Y;X[Kx* kZlG6迼hնa׀gvz$]=+l zlC^H*r]͞2~U pEpvA,w^5/ɉjr}a$oh^y,NVʶk(Iw%YVb>+0Bæ$s.cP28?yyrR% Jlfh]b /%O!":%YW@F.|QHFcIIc2n^ZC쉓ϊ=jEH-#r ZBZ5+ps2f$%n8hABIbE3oVJ,^pmYP7<>*;`H MBUU†*^8;icY^c_߬{9EE rמA^1٘arObK )S|tqH^pd Qw !pʧS [<✢3J2`pMఉ7Bpj8%.䔠X3KEeh>lD+IENOnk|eF'xq"j0~ӄ :.iM7_,GwT Xt˳APIi+ؓPWq翹u|7^X wL4HUJRd+ɿ# 1[4GjJf03 #DE4" )bz M]>/&yw[sŋVn(Pgy)4t0$* >pЀ&kwִ`L|5dY  [B2D]9*`h}!~R9\ĵl_`i;^ ~㇠D k4OA2glKxTşʫ1:"!Ut$?;lJW ĦfnNSMf'MM kE{nFq}/՜;*loE󒵲3`熧qu~N dW.':h@!/5 2YD!dž>6< Ymא`K '8i7m[ 1- D qgbC##}EOrIN:Hr` +c4I[}Ld#CO)84r%{`_AGڜ Ƿ_|UuGubWQjqkIq4Eŕo+Jzkl蓷c87jq"C(WV$MIZD+f.CcXY*ª3a_H{!i(+w7fCa߽pz˛!c>cw#lp2scLec*~E!ܾOmV4nݍ w8Đ$" d; @G2p85|ZvÍl7+P,9 a9eN׸oqu~OuQfC)c?ijs]ֿBV$^ν\ɩimK<]cjjn[00bM9qq_nQd_ cx=T3f[Khh@c+(t=%ƜI>">b*23!9TYyĮ:d.d+tHcZB m}LdHa1,E8߮ŨFWnNQ-yj-Lh z89rST5@#?%BGD.GFZ_vxwy.Ѕ |Z[E\-VWjcgV3szsﲫYB馪5V-$d۶fsm+OnKzf.cY5*5{ా,R qb7&/];-+'/-5a.dpPLTǧ8RYyt *MȚE<0Mwk1KOҭEEZ>1y@ՊٔWAO{)GX HO)2rvPZSR(jY-ڟ'=~31j>mj|y,aJʷiFh> [Vqt{;evM7=PO{z\{薫ʠ0:U =@

LYJM^FzQvǠ;rcTj.a=RnZQb3/8dGΫH.i,;{^s$9*/2}bnG4){iO$-LmAgn#;Й Ǒ젤4|U^Eר VVUVQO6eSs)0:] }> ʞԤB8Ʒ*P}0vtO™ hªjB|}Uw_1tE<&c^Q&Jn⻴IE4WqRo0${>Vc+5[5%[#<^xg"@ֽq_8>;pU@ Ȅo]!쩌"}-qߠuEhJGw6Xޥ Fp{ u@$H)+97_,Oa~0p_B;w܁*-iq v\/T,Ԓ\hJ%VwB\ȆkNd;a~&wqѽ[4/p:Kʞ8"5򁨆#k.pE,?P=$` %m3UadlShݓc(rjEɧrRx8'(!n(VL"VKp ]#bv:;ł+uMNnGk*:c? |@cs Z.Y5ug b#^h {w͕%Ͽ_"^J+?^ΉK-Sa峢 *6Vsk̷E)F[;oFendstream endobj 106 0 obj << /Filter /FlateDecode /Length 4950 >> stream x[K]qϯսGvL+R,&\CYugQHI,A Nя`{0~~7X${o`Kr_sb+d 9ŅT-D_9zxo.w'Z=~ܟn~?䴔təń͵.l7V,~k<|~sC1-xNkY=a uK<+gڲș+Wj3g:,Q,v(..q&ayy Af_ H1!S@ƻC2Q1S#,C).A(gٴX9DAc>$~ :P)DKe/9D y+ u70c2& 9exSJKI$=5ٕT])$Q *&,MI.!,Fg +1s(Blw2f*bU{QYNG_TƔtM2}(jNb'WMK"m)['gս8M;;@LY%Z6ddRnhusHr&v%Fm=;'ZPDA㴗CA9 R,q.v9SI{8c}vQdKˡ9p$4,LxB,f̘Jed̤(4 #!HAQOj$׿r'tk 34H:wYR T$ A!Uj .$eCI}J@3K̅*Yb߰:N8Mjdk8u)u$R.)AΓ=GM lO@t{!@Q&1ftCԠ3+U At !KR?` QpV{5Ȑ~ 7` TڤC 1-OkNljXf \` *Z`eIhrD|b̀X*gwL3hg@s/&d,' g&xXQDt@ݥ Nk҈RIJf &o=ķ*gS 31o݉6S0S2wUjfV?zYfb8WE ș;~c@S \ T5q $s1ۼ(:.ʦ!D,YnL4TR0GE]tw2r71ytNXa$_^}A 8cC9ޝϕi()gw=| , j> &h ? h3ߞ ZR_eV&L:fҹ?9T @S9>`gdZ+LJOWC[~9$^C#6 o"a~Y%@pXHV+./Õ6|zlq˷'p;Bӗr<2D!l/(|>m.E#Ȏƥ=ST[ %b/IOy TKR*YĴW'\Ol:IP.U666v߾'P>ߟ, N%4 +U<Š^^`kAvOLGr/A;~g̋q18#K>v_DdǦQyYKJ:"uK*L a߸Nz0*V.2CÎ#D#"hAȸn0$6, r(u^ﻻmC"(*h;-&J+ZKR M'Y$Nz%lu^Uxi?%  Mc~B6w(}' xӠ[1Jdu t@|qcDCk(,<[ &i(R>b% agI-C#;5fvٜ6NOubBV@^K0L#a3l(e<f6 otMl{>̰&{٭u{QUpɗ7Z(:ʯ:W7_ۯNSdw".j\%@KGJ +ׯRvWj1lΒބmڄ*}"]QRǡ*>?HcN:KD^l"7(w}YЬr$-5=P'3Mc>\^r>o~nxp`PhYvzAtECB~k>H%xuϹuXW6-fl)`S@P:toI{f': Vy-m3"TKɌB^65s1lդ 3.gCZalK ܍-|E'(xP44RjrjGڂ5/bJJ %s8OF39]Chov{ݘv/}khP/:}Ӈ>x)(ru>~aH@/_<7P %x[bmv7=K{ 0;'#=Ŝ%?JDn {_U{" WCjz.;BZivuz1)BD'|?s]c'TYQ>KlOnhywnHh7_}?| XzSeS&!OZEVw}yy-hc;MÛl4 Z4dzD~&]&)ވ J Yc_a[SY|4?{pGȼv?wa$wH_xv JD9f\P.9QAy̌1^"xwǫ >+ni!lr&̢o/t> P{`Ӱ! Rߌ\"pr˽ZmoϿɢdr4T㳍0{Uou2u厏94TOxAu52͆&ι,랟|aci]v0r-ZdŚ^%rcp6TvI 4.BE09ٟ76.&@(\;{ۯtm'>ŗF>+1ET .Vlt|^_oP/4KBerk8Z/׻!oT7)aƛ]':LWT53jyzUA|Con‡ֻnvzoT.zeNK8/OsG r= jEjpҒ#c?>|"9KaKy[x/MIY~&lpyO+sbN&'蹮=!P?S>䋵<= d΍>Vwd׆ɺo?݅|c%`CaZsqM9@}3b,\^ˋ?+qk1t ^\RV>6֪A[5L UlmE]ɍ"!TfS`HY &{Kwio|m_Bl >%mv Lsc?m^+/acP[0q]~i]IP 6r |ɛ,i6w:]L#ؕリ2ioK-WiSjqoKh?ۼ40yQ=FkVBW6jپk:'x(aq.W l^}|Y3k8MCJn7>[DjN]tiwcO ! klQ |!åm;eq1f?q~5q2֍-a`H0vyWn#6Su4 F~Fl~$F$h)ȚǨK.`mq ; nZň6qgl;un+>#mU#`Ik۹ ?clxtT >>{Ggmp_N(lS1㨋|{UF*wzwzdendstream endobj 107 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 992 >> stream xRmPTU>w?ugCZ{%@WÆ6>V@(esw/+8|L qlrJf5PK$44MSdPYt{s}^)da0VkvY+ʷE[e iLc"JsR5kXe$x#> H0Ody$lbxZvX-U.IA#VCA&VW~S/w8X]S6[6\= dBYhbn)P&:~e L0*I ዜ!xRkW4]Ps S^;b6OԠɚ&襜 KZM!\۠d9IhɎ8tq HK$UJڅqv޽;:;@NX?JfBկ"ʆʠL%q -#zpb:k.-ZQ;w1;Kg(|40KRs{o 8|XuSk3~ g}6#%AE^"$PFOB &uwiM+jJH=$sY̎-#G4`Q߰^pÛ׳n_t x,TI vmJEiZo\o_j{j|;R!op#(VbH$TP2̯27YǪ-]C<;QgWOu pj!&-3R7j[8౦g-I,y@d7yF gWՕU |G}(vSx -g/TS%)hOM\v dᷟ' "35g#-il|4ĔS6t~nR fD0X=۪]AAթ@H3FW{CkAw4{5endstream endobj 108 0 obj << /Type /XRef /Length 123 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 109 /ID [<2a983d91ef041a6548891257b5d2edfa>] >> stream xcb&F~0 $8J[? @6{:(EHj)|DJnB=`g "ٝA$X=) u DKA$*0 b3l "/H.^`@bn endstream endobj startxref 87153 %%EOF spatstat/inst/doc/spatstatlocalsize.txt0000644000176200001440000000024713624161141020153 0ustar liggesusersdate version nhelpfiles nobjects ndatasets Rlines srclines "2017-03-30" "3.5-6" 21 85 0 4677 0 "2018-01-30" "3.5-7" 21 85 0 4677 0 "2019-04-13" "3.6-0" 21 84 0 4663 0 spatstat/inst/doc/replicated.R0000644000176200001440000004062613624161275016115 0ustar liggesusers### R code from vignette source 'replicated.Rnw' ################################################### ### code chunk number 1: replicated.Rnw:29-30 ################################################### options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) ################################################### ### code chunk number 2: replicated.Rnw:35-42 ################################################### library(spatstat) spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) }) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) ################################################### ### code chunk number 3: replicated.Rnw:180-181 ################################################### waterstriders ################################################### ### code chunk number 4: replicated.Rnw:199-200 ################################################### getOption("SweaveHooks")[["fig"]]() plot(waterstriders, main="") ################################################### ### code chunk number 5: replicated.Rnw:207-208 ################################################### summary(waterstriders) ################################################### ### code chunk number 6: replicated.Rnw:216-217 ################################################### X <- listof(rpoispp(100), rpoispp(100), rpoispp(100)) ################################################### ### code chunk number 7: replicated.Rnw:222-224 ################################################### getOption("SweaveHooks")[["fig"]]() plot(X) X ################################################### ### code chunk number 8: replicated.Rnw:253-254 (eval = FALSE) ################################################### ## hyperframe(...) ################################################### ### code chunk number 9: replicated.Rnw:279-281 ################################################### H <- hyperframe(X=1:3, Y=list(sin,cos,tan)) H ################################################### ### code chunk number 10: replicated.Rnw:289-294 ################################################### G <- hyperframe(X=1:3, Y=letters[1:3], Z=factor(letters[1:3]), W=list(rpoispp(100),rpoispp(100), rpoispp(100)), U=42, V=rpoispp(100), stringsAsFactors=FALSE) G ################################################### ### code chunk number 11: replicated.Rnw:324-325 ################################################### simba ################################################### ### code chunk number 12: replicated.Rnw:338-339 ################################################### pyramidal ################################################### ### code chunk number 13: replicated.Rnw:345-346 ################################################### ws <- hyperframe(Striders=waterstriders) ################################################### ### code chunk number 14: replicated.Rnw:353-355 ################################################### H$X H$Y ################################################### ### code chunk number 15: replicated.Rnw:365-367 ################################################### H$U <- letters[1:3] H ################################################### ### code chunk number 16: replicated.Rnw:372-376 ################################################### G <- hyperframe() G$X <- waterstriders G$Y <- 1:3 G ################################################### ### code chunk number 17: replicated.Rnw:384-388 ################################################### H[,1] H[2,] H[2:3, ] H[1,1] ################################################### ### code chunk number 18: replicated.Rnw:394-397 ################################################### H[,1,drop=TRUE] H[1,1,drop=TRUE] H[1,2,drop=TRUE] ################################################### ### code chunk number 19: replicated.Rnw:410-411 (eval = FALSE) ################################################### ## plot.listof(x, ..., main, arrange = TRUE, nrows = NULL, ncols = NULL) ################################################### ### code chunk number 20: replicated.Rnw:426-427 ################################################### getOption("SweaveHooks")[["fig"]]() plot(waterstriders, pch=16, nrows=1) ################################################### ### code chunk number 21: replicated.Rnw:442-443 ################################################### getOption("SweaveHooks")[["fig"]]() plot(simba) ################################################### ### code chunk number 22: replicated.Rnw:455-457 ################################################### getOption("SweaveHooks")[["fig"]]() H <- hyperframe(X=1:3, Y=list(sin,cos,tan)) plot(H$Y) ################################################### ### code chunk number 23: replicated.Rnw:469-470 (eval = FALSE) ################################################### ## plot(h, e) ################################################### ### code chunk number 24: replicated.Rnw:479-480 ################################################### getOption("SweaveHooks")[["fig"]]() plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) })) ################################################### ### code chunk number 25: replicated.Rnw:492-494 ################################################### getOption("SweaveHooks")[["fig"]]() H <- hyperframe(Bugs=waterstriders) plot(H, quote(plot(Kest(Bugs))), marsize=1) ################################################### ### code chunk number 26: replicated.Rnw:507-509 ################################################### df <- data.frame(A=1:10, B=10:1) with(df, A-B) ################################################### ### code chunk number 27: replicated.Rnw:522-523 (eval = FALSE) ################################################### ## with(h,e) ################################################### ### code chunk number 28: replicated.Rnw:533-536 ################################################### H <- hyperframe(Bugs=waterstriders) with(H, npoints(Bugs)) with(H, distmap(Bugs)) ################################################### ### code chunk number 29: replicated.Rnw:559-560 ################################################### with(simba, npoints(Points)) ################################################### ### code chunk number 30: replicated.Rnw:567-569 ################################################### H <- hyperframe(Bugs=waterstriders) K <- with(H, Kest(Bugs)) ################################################### ### code chunk number 31: replicated.Rnw:577-578 ################################################### getOption("SweaveHooks")[["fig"]]() plot(K) ################################################### ### code chunk number 32: replicated.Rnw:583-585 ################################################### H <- hyperframe(Bugs=waterstriders) with(H, nndist(Bugs)) ################################################### ### code chunk number 33: replicated.Rnw:591-592 ################################################### with(H, min(nndist(Bugs))) ################################################### ### code chunk number 34: replicated.Rnw:604-605 ################################################### simba$Dist <- with(simba, distmap(Points)) ################################################### ### code chunk number 35: replicated.Rnw:618-622 ################################################### getOption("SweaveHooks")[["fig"]]() lambda <- rexp(6, rate=1/50) H <- hyperframe(lambda=lambda) H$Points <- with(H, rpoispp(lambda)) plot(H, quote(plot(Points, main=paste("lambda=", signif(lambda, 4))))) ################################################### ### code chunk number 36: replicated.Rnw:628-629 ################################################### H$X <- with(H, rpoispp(50)) ################################################### ### code chunk number 37: replicated.Rnw:658-659 ################################################### getOption("SweaveHooks")[["fig"]]() plot(simba, quote(plot(density(Points), main="")), nrows=2) ################################################### ### code chunk number 38: replicated.Rnw:678-680 ################################################### getOption("SweaveHooks")[["fig"]]() rhos <- with(demohyper, rhohat(Points, Image)) plot(rhos) ################################################### ### code chunk number 39: replicated.Rnw:698-699 (eval = FALSE) ################################################### ## mppm(formula, data, interaction, ...) ################################################### ### code chunk number 40: replicated.Rnw:709-710 (eval = FALSE) ################################################### ## mppm(Points ~ group, simba, Poisson()) ################################################### ### code chunk number 41: replicated.Rnw:743-744 ################################################### mppm(Points ~ 1, simba) ################################################### ### code chunk number 42: replicated.Rnw:751-752 ################################################### mppm(Points ~ group, simba) ################################################### ### code chunk number 43: replicated.Rnw:758-759 ################################################### mppm(Points ~ id, simba) ################################################### ### code chunk number 44: replicated.Rnw:769-770 ################################################### mppm(Points ~ Image, data=demohyper) ################################################### ### code chunk number 45: replicated.Rnw:788-789 (eval = FALSE) ################################################### ## mppm(Points ~ offset(log(Image)), data=demohyper) ################################################### ### code chunk number 46: replicated.Rnw:801-802 (eval = FALSE) ################################################### ## mppm(Points ~ log(Image), data=demop) ################################################### ### code chunk number 47: replicated.Rnw:819-820 (eval = FALSE) ################################################### ## mppm(formula, data, interaction, ..., iformula=NULL) ################################################### ### code chunk number 48: replicated.Rnw:870-871 ################################################### radii <- with(simba, mean(nndist(Points))) ################################################### ### code chunk number 49: replicated.Rnw:878-880 ################################################### Rad <- hyperframe(R=radii) Str <- with(Rad, Strauss(R)) ################################################### ### code chunk number 50: replicated.Rnw:885-887 ################################################### Int <- hyperframe(str=Str) mppm(Points ~ 1, simba, interaction=Int) ################################################### ### code chunk number 51: replicated.Rnw:914-917 ################################################### h <- hyperframe(Y=waterstriders) g <- hyperframe(po=Poisson(), str4 = Strauss(4), str7= Strauss(7)) mppm(Y ~ 1, data=h, interaction=g, iformula=~str4) ################################################### ### code chunk number 52: replicated.Rnw:928-929 ################################################### fit <- mppm(Points ~ 1, simba, Strauss(0.07), iformula = ~Interaction*group) ################################################### ### code chunk number 53: replicated.Rnw:947-948 ################################################### fit ################################################### ### code chunk number 54: replicated.Rnw:951-953 ################################################### co <- coef(fit) si <- function(x) { signif(x, 4) } ################################################### ### code chunk number 55: replicated.Rnw:964-965 ################################################### coef(fit) ################################################### ### code chunk number 56: replicated.Rnw:1022-1023 (eval = FALSE) ################################################### ## interaction=hyperframe(po=Poisson(), str=Strauss(0.07)) ################################################### ### code chunk number 57: replicated.Rnw:1028-1029 (eval = FALSE) ################################################### ## iformula=~ifelse(group=="control", po, str) ################################################### ### code chunk number 58: replicated.Rnw:1039-1040 (eval = FALSE) ################################################### ## iformula=~I((group=="control")*po) + I((group=="treatment") * str) ################################################### ### code chunk number 59: replicated.Rnw:1050-1055 ################################################### g <- hyperframe(po=Poisson(), str=Strauss(0.07)) fit2 <- mppm(Points ~ 1, simba, g, iformula=~I((group=="control")*po) + I((group=="treatment") * str)) fit2 ################################################### ### code chunk number 60: replicated.Rnw:1178-1180 ################################################### H <- hyperframe(P=waterstriders) mppm(P ~ 1, H, random=~1|id) ################################################### ### code chunk number 61: replicated.Rnw:1187-1188 (eval = FALSE) ################################################### ## mppm(Neurons ~ AstroIm, random=~AstroIm|WellNumber) ################################################### ### code chunk number 62: replicated.Rnw:1211-1214 ################################################### H <- hyperframe(W=waterstriders) fit <- mppm(W ~ 1, H) subfits(fit) ################################################### ### code chunk number 63: replicated.Rnw:1235-1236 (eval = FALSE) ################################################### ## subfits <- subfits.new ################################################### ### code chunk number 64: replicated.Rnw:1248-1250 ################################################### H <- hyperframe(W=waterstriders) with(H, ppm(W)) ################################################### ### code chunk number 65: replicated.Rnw:1273-1275 ################################################### fit <- mppm(P ~ x, hyperframe(P=waterstriders)) res <- residuals(fit) ################################################### ### code chunk number 66: replicated.Rnw:1285-1286 ################################################### getOption("SweaveHooks")[["fig"]]() plot(res) ################################################### ### code chunk number 67: replicated.Rnw:1291-1293 ################################################### getOption("SweaveHooks")[["fig"]]() smor <- with(hyperframe(res=res), Smooth(res, sigma=4)) plot(smor) ################################################### ### code chunk number 68: replicated.Rnw:1305-1308 ################################################### fit <- mppm(P ~ x, hyperframe(P=waterstriders)) res <- residuals(fit) totres <- sapply(res, integral.msr) ################################################### ### code chunk number 69: replicated.Rnw:1314-1321 ################################################### getOption("SweaveHooks")[["fig"]]() fit <- mppm(Points~Image, data=demohyper) resids <- residuals(fit, type="Pearson") totres <- sapply(resids, integral.msr) areas <- with(demohyper, area.owin(as.owin(Points))) df <- as.data.frame(demohyper[, "Group"]) df$resids <- totres/areas plot(resids~Group, df) ################################################### ### code chunk number 70: replicated.Rnw:1342-1345 ################################################### getOption("SweaveHooks")[["fig"]]() fit <- mppm(P ~ 1, hyperframe(P=waterstriders)) sub <- hyperframe(Model=subfits(fit)) plot(sub, quote(diagnose.ppm(Model))) ################################################### ### code chunk number 71: replicated.Rnw:1358-1366 ################################################### H <- hyperframe(P = waterstriders) fitall <- mppm(P ~ 1, H) together <- subfits(fitall) separate <- with(H, ppm(P)) Fits <- hyperframe(Together=together, Separate=separate) dr <- with(Fits, unlist(coef(Separate)) - unlist(coef(Together))) dr exp(dr) ################################################### ### code chunk number 72: replicated.Rnw:1383-1392 ################################################### H <- hyperframe(X=waterstriders) # Poisson with constant intensity for all patterns fit1 <- mppm(X~1, H) quadrat.test(fit1, nx=2) # uniform Poisson with different intensity for each pattern fit2 <- mppm(X ~ id, H) quadrat.test(fit2, nx=2) ################################################### ### code chunk number 73: replicated.Rnw:1421-1422 (eval = FALSE) ################################################### ## kstest.mppm(model, covariate) spatstat/inst/doc/updates.R0000644000176200001440000000503013624161277015436 0ustar liggesusers### R code from vignette source 'updates.Rnw' ################################################### ### code chunk number 1: updates.Rnw:20-24 ################################################### library(spatstat) sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) ################################################### ### code chunk number 2: updates.Rnw:38-74 ################################################### readSizeTable <- function(fname) { if(is.null(fname) || !file.exists(fname)) return(NULL) a <- read.table(fname, header=TRUE) a$date <- as.Date(a$date) return(a) } getSizeTable <- function(packagename="spatstat", tablename="packagesizes.txt") { fname <- system.file("doc", tablename, package=packagename) readSizeTable(fname) } counts <- c("nhelpfiles", "nobjects", "ndatasets", "Rlines", "srclines") mergeSizeTables <- function(a, b) { if(is.null(b)) return(a) for(i in seq_len(nrow(a))) { j <- which(b$date <= a$date[i]) if(length(j) > 0) a[i,counts] <- a[i,counts] + b[max(j), counts] } return(a) } z <- getSizeTable() zutils <- getSizeTable("spatstat.utils") zdata <- getSizeTable("spatstat.data") zlocal <- getSizeTable("spatstat", "spatstatlocalsize.txt") z <- mergeSizeTables(z, zutils) z <- mergeSizeTables(z, zdata) z <- mergeSizeTables(z, zlocal) # currentcount <- z[nrow(z), counts] bookcount <- z[z$version == "1.42-0", counts] changes <- currentcount - bookcount newobj <- changes[["nobjects"]] newdat <- changes[["ndatasets"]] + 1 # counting rule doesn't detect redwood3 newcode <- changes[["Rlines"]] + changes[["srclines"]] bookcode <- bookcount[["Rlines"]] + bookcount[["srclines"]] growth <- signif((100 * newcode)/bookcode, digits=2) ################################################### ### code chunk number 3: updates.Rnw:91-96 ################################################### options(SweaveHooks=list(fig=function() par(mar=0.2+c(2,4,2,0)))) Plot <- function(fmla, ..., dat=z) { yvals <- eval(as.expression(fmla[[2]]), envir=dat) plot(fmla, ..., data=dat, type="l", xlab="", lwd=2, ylim=c(0, max(yvals))) } ################################################### ### code chunk number 4: updates.Rnw:102-107 ################################################### getOption("SweaveHooks")[["fig"]]() Plot((Rlines + srclines)/1000 ~ date, ylab="Lines of code (x 1000)", main="Spatstat growth") lines(srclines/1000 ~ date, data=z) text(as.Date("2015-01-01"), 9.5, "C code") text(as.Date("2015-01-01"), 60, "R code") spatstat/inst/doc/getstart.pdf0000644000176200001440000035502513624161307016204 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 3365 /Filter /FlateDecode /N 77 /First 631 >> stream x[mS۸~촒%KtJ[@ N?`!9v6 -H:/9:R$L1`H1BiX(b1B  ,XD᪘"fAHfJ*Ôъ %=0OXL #PD0!If&1(Jc13W0%IDT"# GJ( -\5t!Ɗc* Yb<bl %BUcЊ%Z$XbB ֈLĜ294XTBbI0S0Qbz QEU`UFx`J`ЅeO@9HbphL9,e Eea, 2"L"[锴̎v;s2$9e~|w^iA5i~Q\"i, ux)НZQP~ҽ5}r6$9$!}W-&Cr__k1/?MbLQz5#hTa+Ml,ٯU>n!wMX>|9eh Xq|hIÞlY9גOoN~ tlvu [׎uti | ZC Il'lO.=T\OLj$\TKGꦮII 6U)_W,z a^^}|cZmMB0OdVLJh}tB_['CC3p(ǤaeZĮ/הO(6/K>;?'Ps>8xo>|M,G*,O5.^TtۙT-u`AqaK¾ik!` BP{[+<]齾_II)D@ x!2: 1 ̾CFYф7UUA%Z?TW ҽtcbUO)VIhR=AЕDWZaaokŷy >?{~?ha6'~gP:+2gu>Mg(,zCh2,Ύg7gQz+$ڐt9 Lt_LטFD΃BJ۳-~K%WKv>˾89Gs;M~ PZH!lBtB;98O'zߘ(K]U^]xvmCТ.IH_$ ub2ӹ]>ȮaBMi8X-Vвn7i7 u3'e{>&˶I5׆ԦECEZ-9d0G. !6M &k-b#H6k|yV(<quc( m N(Q,-+\䩔(%BGyV!jq-ku 5K T 3zd3J; E#V(hQШ Z`kc%邓/wGoz’t|]tKz2) ݚ.Bi"8ʤSW' J% suFAwRS*;\C K,10q"Jk=Sh)uXU_V>.T[7\5?iSWO/Գ0#g}ۡ0s}-X\)is)Ob)kOnv.Qߎ~,sWM0v(a 9>*>=?dAMN~{Vô-Gah}+C"njY@甫ߗ/c1"RIa i}ykҥ_UN۩e ^^cw(JĶUePO*(h;g]87@ %JP2*l/0\٧9{;.Ia<D+^7tkpI/VWs,W'%]RPuw mV:Hing\n^J5Ej.\Bx!u!>/|A+{ ,*:{)Amҹxـ%?C3>ŒtHqԓSݵkOsάn^+:AM˺Q6=:x`~S2,Ip&aK{7JqGJgLJGcǽE|l0]:Cakt⺘PƖDjt nP޿6x]a#wABY,.6Km:ݶ6dU,$AT!ڡ,}#JvaLe 0&m8+xg,B,2:i( N?bܲr&[rKWY⍻v@ l|::x ݺUO2o$vW(ܜOF+ap'U|3vă!!2ʊN/64@batEF`Q_#3$H> stream 2020-02-22T16:13:59+08:00 2020-02-22T16:13:59+08:00 TeX Untitled endstream endobj 80 0 obj << /Filter /FlateDecode /Length 4196 >> stream x[I$u|_Q7WT}0$h  xꩡ4=5!{/2EV96ddGE;3؝~Uޝ.wfwPv6ư{x{'Ζ2w!o?Z}?ݛ!VZ]6?_?:>3$|^hc8J;rVc% }[cR@2.۸?ʅO(&FN?1! ;p/||`E/5A;jlC.yo,~s|uf"fM) w'c|Ie82{B݆#t&8mW)5R'!@DZ }?k+Kqp_ g }(BlAEHzdP@/džZ߫'qT]Uc# 0!;0p,#|?(zHA)CZ|MIwE;)^|L ^mҺ{!b=>Ym[^Cij!ECNM3A^UwEO[UWVĂ/p_*x0h͊ *,?3%Vq Nt_lq|^6W[8 G iz[fӧZrv e(3܍3u4= ;)X50Rpv%\K(Ʈd|kq|Tgx1~;`w,wXx{v3}aCD Libe6?GV]1N3 bْx-0*"C Ci`#|o XB֌g&Bm~Xv1qY?4Ftb$:@5!{@gVp푁ԶC xBL..h$7,>=mv'H8o08)Mш9R4gglgJ)irN"H<ݲo I1L)Gc)65]27-/BͽL^?y\2fcSIJgKmŧM!%OcΦJa ,T f\3+ G(B303-h`n͏6טK $5! |Q ͼJrğilv{@N:hpu\D=JzʤС{ă)2%f?̴AP8RǕmWnRm@j\=y4ZamA;7j&RA9#4H|jǙSn3ce\s*+O"I0I@A%V!|nrH\*P->h~ ȁlF;ԱGG2Oj’g%LBNh[և%\(K],%]TK¢^jǔ[yqm $Ȣےb+"R<žU n~oZ|Ѣ|a[ynU{A%6>X_\O/cSOJ=)ӻ)K"F jhi=!P:z]r5NdJBf~XScs]>lH-Ew$8bA#USY2(,: S"Q do=Aŗ%Χ48S* YI g1n*Rq1'X04Mn;k"/ OzVVgJbzpz+QXWQ33Y%2߁E!-:DOs$hW)Hm(Y n0G נ V'I.].A R0Hu=rEIvBJ*;uQv'0yqĢf2NݹX_ցtev)w(@'$JxtNQ=$knZ#RӢ{mR48! MbcQ-cAˍtU$\ &&((G?d^oqaNB (J/ÓsY(&]- )PoJv_0̽D-Cc\P*OëЗqj\^zD|P[2l6o6D.UrvJ8 #OLoQኪ[ 11]3tCr/ :LptfxKQj% _<2ݯC멖s\(~)>"$o:+0Ź9k}iRȫv6o3Weks6O+NB=-@<$PVaYގi*%hZل|Y6ɎA qeqJg;u7 ]r]R6# ;Ur}y}pd~?῿ݙ*6q̻;CW+!:r!cgȾ|WƇ!9T~:M`P6?z GTtb^|]Gx JCT٫f$K?6WJA2EbXFNaQ8h \] Poth5uqj[|O_T4J6oI%i㭂CBWoWb ֤==.@W}9(n@z;_fi;E1FnT l#eث~7tU7v&>銉y|QV RJ5OԢuz^OHϽc6u}ܨQ8ǥ ?ZJtYA?njU? yn<97z;4pilx \<'5SߋI(6vvYG_q2o)/d~5-ͳfrATXEk.VڠFrMlwfճUб:$Rt[:ufD1^cj)+Uo{;O*.5$ֺ&TVԵu4ЯP=v)3 |0и|zCo?/xϼZF1 /2^?u:춳ns4`U/y4=MjƋ|{.lLVdꗛNߕfW]N/ LzuCV *-ũ?Y4yU꧟>!sz 餏 /.)uyDdK,$ޛ./rpo[[.K93y;[`Ge_e?/s^ur3vY* Q`v 'ʞ^zGw=ު}K-endstream endobj 81 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1625 >> stream xTmL[䖨ɽ.Ф!4(QҔ|IE-q`|ccc`0HHШ̅FQN6TݲSFɶ.eُjگHG+=眣$ I¢S$ɯO7-.L@dCCR $ Y<r Q@J$bgėds4(U4(؜8'yw~xYGq]δ|v*(} NjN4P]xR3j3a~}E%Ws@q76}J|oQ7EY,˥;.6;=[>񲺅u!RZδ@pf`d" . _!/ "F\Svq[[ʉ'"YjϜb?jX Trӑnܶ2pۀ\onWήޡ`T0я4vq_(\o;7Uzwtt)K'99~ K[`8<zKn[d~i` (oڈ_㿢[gW1ggh00:\wr}9l)uȹV}Mm=*zם)UN=YdEQbI_(,-9V~qz,v22=J |.箣]^-}ݧgҷ4ğ3rRhm v^aRG?֪7[4ZM{Ob鋆A6T  " #Q&+t9H?8G,y*G8?~w7H<̀8\&;SD0ز!T {WG{vn~747!DL"o8wrś _pI8;:v)8^jC= C JΠc#Ja0 _,:WlajP@;J8F=$(gh=N]fۙ*_(.ѨnLX,BrM30;з>tЕ}Ds'B?uQ@"p| lG&,Y5uVsMn0d#3sy0 Cp W?<*>v#]',e]_$Gx(y-A jendstream endobj 82 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1224 >> stream xELgZ,'T.nQl8N0lSЂZWڣ-R(<v Ee%,AE<7-]6u-[\7]S}|σcQ ̬3_ %")EX|E1ó@.y(~.Ax6&q1W4%,PNJOO[F'\No(W4E*bK*V,LFV kJXVz ٜ*7&2uKf [B窍jIX=U&FL\_ɪ t_ma7IT ێa; ,b󰗰XŒߋ&,JBRV{9t僡 S"̫?.{""jt:瞏_=k+SS\F͛7?0=ArV78*u8xm`BH\)2HPӽOCq@SMujrz @hbV];W_ew?fT(p=VP^+"8q42~&ReRzaT l].KZssO~hKDfdX WsO9|*,A)ΒބMZOGqNA>pڏa3 0iޜez[1l&6~`ZTve 7BXavt)~C BKIoghVe>! #H,캬JD3'@3pfH)2J r_p 8IcHR}]k[M-PATw7x>;8B=ߠ%UzQZ)k wQғ.-#u)iӶ^F.怘aڜ{^! ^ kP&#C3)Vkk@ܼSB[g)(t9]y(t5}F,% šӿ.07cԗc/"q{}P'?vjx; (ޣzQq-MPG!KG$En=m&%,.Ty0_ §$]v8fqnwפlΤ|fz):`,UgrryP> e_endstream endobj 83 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2705 >> stream xV TSW~1T8pUqZк;hIV%$?@!+[Y .ukkbu:e\ΙuIN9}w(aH$Y:k_H7L/,E [<>cD!@ ~@{C)HHWtxmCCϟ7-x̙eRE6^&jKRzVxV`ƌRyt&nQȴm|:YL* ^Th#rY`J*E+Q4 ^SY*ݥƭKذ'iEZKMޠ&S FjFS[ש2j95ZIͥVSkQT̢Qѝau??#+idf៏=rا^sV2s`3òA]J|FAZ 4I3LRM j+?! e7S^5>PJЁN"@|8QbMdbh6+ڷ&$[y\CGgJz+[ xAr #8?n 72}+W\O^t󎸫UV?9`KKogJ̥|`|Pm^a(IkG rzG[%3$F%>!AO p .E1;&FׅY 9)ڤ=Qq+6dA|[<߬oMUU8߶F>(>m z,u37¯wr DRsi;NY$yM1{2_N\>tEZbK bVTlb~=F~m6mA4d; Յ%v6u@gu=5:صWŧ+T`1bucbęcRěϺ2J_CG-oW/U>'q^pJ/tąQ1bv8U<$#.ʌ#ڳ|^ kk=9rjMY e,J x]Pli0ѓپ ??1 ٺTRפ49<p\"$)M}%:LfhVq+EfW%LN}4["~=8 >jUv@6(U9 iFV%F~;XvZfV԰CP_ 4]>'B*ùH>'+ׯ0WZ:1,vVZ"sB 1G2ufL$Gbҡpm2Rc)j3V8`!+荫kwD|3h+\`[D[oy U(gzendstream endobj 84 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8097 >> stream xzwXTWő7TrKDc]lmPff ׁ8 {5KFs.wf%<<0{{ZbBA8u᧩~h~kLQ}>,?̠3t s~9&&! #wzzZug5uM2j{NWgΡ^~Ρ_M;C# d䰰I~!=?7*lgF]nVC9[9}q_$=jm{?EQ - ؾ8pIe!CWHVZ:eMHu=7xmܹ{O4ej3fΚ=|OF;ζ5R#'(jMmP 5BRm"j"ZL}LP([j)5ZFMSS4j%5ZE͠VS35,j-5ZG͡>̨~Tj!52Hԇ ʂAYR!T&CC}@Q~T_^zR;'2lW/;fOu-yzz>I}l6֬~_3`mω|6~ÓF*gqwK"2vȲ!qCbhКaÆ,VU1?a6 #G>稚Q|'m+KMj kuMQISl ;0*?sgRkW $yߠRR&Ŧx..+I٠42]NO{B3ث*/C0y-V+~a[\*S(v,ZѨ{|:c ayQ4ڡCt v Es`eW۔>Pqpw<7% ;js!3:avJ3~H7y M)z4m4сIuH] pE/T4$ҴPRɕaqEku5N"\D*jE,0W Q4'iv& 2 hݔ%!f)8Sjx5-:{bQB*%O1|jmKӰ3vF3tQ,U9CzBCcJQ)5 LPŢz< *YKa$p =kV}"w*x3Md xRuʇzGoA#οb97 K;|^ƨ6KUQ7dPoO UZI+iw"rJQ9zzibax̻vN9ơ]ib<SDN~Wלg]6I krns摸`^WdmMWdr<.ȰdMpkAnI4A jxS,@әTG>SԚ޴}f_Nƻ1[6յ0eOo7ˤ)8dGaִVFVVV&٫UsZ9|m؃;MT[y<l.9$9z;\ꏜh榓V, (~6Vǎ1h 'lݚ5= *>>1 _m!ӈ!}%.KOȊ8K?73r ]┶]~ #0&JQ{CX)A[Ա(Sdo,[;b/ n Y;+Q TE *!ݰ^ +zU%@SH4IyPښs_;"SEhP׻U'Z"P0Fr!Eb+,ڥLGTUЂ T^`!adA$U!\h(UNap*+]4<|ݽm<+B@19]Ky2aҏb^Bpש3 =SȒnDIJBr% m+/U;6;>zea$1[JErhMV.l@$|/4"96[+l5$K^OB!S^&shdtzMܡoءk w,;1Wi͉Ml]OƦqu{yq. ({at3y &xb AIv:9A!KTp^BjnKXG T>@dŽ^v53‡'̴Y}Ox+:OO$fiy2X?HQڔE2(Yy 3s>G%0/\pm jꚎk7[Y4`&7J_UÉN,ȟlȫdy4r xXfv'"ygN.@=eGHw~C%ZcZԦ7GCD ^T9H7,ʥrlu L- eW:>zCnahZG!;z/UyBw!'V I$uK"a$yEuKmo^$zXle,a`mwjUGӴ2%I,%JAj]Y-m}jA[HBWӖW>9J._3IK6ոL#S0{9 ɈU%N( &,پx[  Dwv;kPs28Pi4u;/z>"~F=Ģr٥$%4)1fio.aDC^nfė0sur(qPTFDm=pO hԳ&?$߿"jehaIe>B:Alc hfAيB(C0bFp+8H^Cccũ9%Ѡ;4vtbjͻF#[Y٬Q^O޺ ",#Suժ32"nk5y܏aZnT'zlv慣2hBk1D-X#`86Cbu*vAiFxO'^:Kd D?~EA:8`ĞJ[N[{&Ihi*}]DK1td7*'RVnej.N ^Iq,DL!ë;vYMZo5@g<,^FB~i(EFx<@O7xqŚ1/qxrλ#;K63oCoڗd0~ _%㰺;_)K!dD.xLU4T܈'K= r=K-c!K"4:-4 < HCs4 IGz燖HkNl6Ahz{>sxW4Lh"&9Xj5{o p{cm>3?){$?HjcH,iLJ|Xo KiVBN~jv#ܛ-k1(&23,&^& R Sk3p^k80ɩ9 p/ }U]HZ{(wBY.;o[ ְ(eϲ)|:eOmk݂ Y7XIz27tDH_( ˥'ns0 KMP7t} [Pn<*RA}Oc3|%|FhD)B( mX&_G~CO:5&5%NaӅm:ܖʈJD(\Od's?>oZ1ތb)hq@~II+ WT_#pT9yy+9׃DBoݪWIN?9a}Ds×i9!()V&*CILRB<#J3/۸|3^d2 !>1SD&ɓWy4]skdYg`=L~&I;0萅.i#Zu͜.yKޖ$=*An8*g;vF:=B$?05zhѠ{G^Q#ydϢs_b)˓C[RY]R->K%) O%[`aWQL3%ƇHPkSw tj7Έqq' X "6t:$WƣbyG{L=]xY;lMWZ.|+:~1CO R,y?O4',Dc[-z;++8A1sƒV{6"la{=F_49߰Y-cp"/ zc"4Yr3oAIP5H!X 9<4М7A.hއ́\88"Z*p(pޠQ֤ hA}Y`[=xM4F)#wviA]xp|rR.I׬ƒ3Ii)i9EH5-t' Y sǀEP?4 kFhdScznD\}O`ӳUj2-ڭH-)Z~3F"#ڡ$BSx:`w <vnU2ᎢHߗ:yۉϛ?CÛc<AQX1.Z}KQGendstream endobj 85 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4375 >> stream xX XS׶>!prdP!:ԞCcZ*(Zg0@@d632&A8mViֱ)ݾ{o|ٜ׿S {xL7|kz+7wa.N(C%Y6P8 ||OPd}&NwtvOb-e v'O8O$Ν6M.OOHOD~K$!2b?ރNP"Im%~ HBᲥreQ>~;ENj?}Y|'m#g*b A%& aGl$Ebb )p%fn,bp'V0š!&"FCCsbaEL%LS8cLMUf͚ɱ|-\ |yE$ +3"Cg6cXwZ[[k+|O(v_X_5T_Х<ցn&ՏPBHQGqq3ɒZ(E1,&G]?pTd%徂yuySS7o.EQQwnjygtP!O)6:[c#;0G l_.]rTw'g[K0M DžpB9} Ϻ̚m'<޴pba،'nT%~oZL `ɽ£ ~+&3\^*UY* =X}Ɣ 6!R$Fem58԰IROcsU `Hs8,G- jP"f2Ռ*V'(C5tK텢?9O{ >lO>nݘ;)%eR+Ҋ%BrCA 6e35\3~8 :(/qhW A'ip Kks a;d% 0,8D *e]o]}EL2DG'k^p#Ӂ#EӘj^J#0ΒB$ptO; 7j}(_E0h!ƯiH70niޘ1m 8@ht%ڋ>7 g2}_հ56#"[/dɍYVw,UqaW+3ir*\'Ɋjns5qw,[y> +53KL2w} T]Q .˩ͫkimNu;WMz&*xDi#J{gnv +kC.buLu%A=Qߞ ^ ]xc\p{#[PW~[#G$C hO(rz;P`f Xv& +͠JP_ 5\^FGȁ90|Cg?\B,yu>) $L4;DjP<~s>z x"g CzIٟ`X7f&>rЂـ)9E[!_yE6CFۃd*7#OE@07^V5JߧHQ$ S+ɕS!yTC# W9;Q:ҥqte-@{E44ZϧQU k:wքd&%.j3,q mAf*cU.>N+ؙq`'l?տAs?v(s,rSG x)l^m̛lx/j'NSOA"OV^xE2ۻ[ZD18V_U7ޘ TqFOR( $ 0$A}.X.ӯuvvx 6D/:|#TͨYY|!( X` XV" hk:P8Pt(Ұb&tx$w12}c S O&x.OD7lI}9T.[Q)ߜ Q1HBI+RK+[a^ϵ$ǘH>&NހztbRJ* 扁@ z8|B籋{ ,C<9;4 =L鱶ӈssśVz=8@y`s{e7|z{=s 5`śĪ ޕ46^1o:=JQh&MO_D(nHs|zbk/j css󐊪RIeʠWq6cte9׆>%$!*^ev.:1s3r*0\LF܁e}/ϓ  R3 5Esf'λha6tE4G"䕼=Ր,0VYJɚ*rrw0uMg9vy程NjV~$YVpZ…ZυZ/+T}.S~3?K/-]ȕw۽^}%r+5v/?8:[aOFS{r,ٍM~=Zw+|< Ľ2 |?#?ˮ>y=R[#3\qXqm;8a*63SE{NUJ(B L)Sn("D}ͨ Ar\y`Sc(״Γǿuf^.wuGΜ[[~4oc c;E.i̇g5<9g537: <8|5=rqxqGw"nE/mq/$!k̎Ԡ[YnH?7.h2[ q e;O&NF?3,'77S UT-XęQa:H|wrIޟ\D'ˏRz߻yF&/ck7p+=#ԜHy]`@A}N{.xasO/8R5zÞheYbhxmR]RryVJ`v~c)c`h剟e.yQ;:b6LJGqk-7=bLKOFOM-@(<0/lz:~PnWx:~XKE1~w+F`RVJTUXކGBlqlzFˍ5=6WU)K8G#c5pDISIʢ],8`^) ;0߶Wr %'m\ crG[ʊ6*kc 8o>Ogvq:ʱچD Cs6ts"4eityӋ/D{G&~ M(/,E$V : T!YZ?endstream endobj 86 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 337 >> stream xcd`ab`dddw 641H3a!O/VY~'|<<,k!={3#c~is~AeQfzFFcnjQfrbobIFjnb ZRaQRR`_^^[_nPYZZTWqr-(-I-ROI-c``` b`0f`bddS{='8[w%iKw8f|[Yw_/~7b"??Dlwo.y҅?|_8{!'nrq> stream xV PS1!H[}QzwVq}U+KUDHY"$,DIA6e _D R\mmN(cZ=޼vZ;In?w_H0"](x "aamOHEoLjxb5[^P"#4ً5;SԑQ:IsΞ?W0VW)(URGb4j.E1(nӦ%''OUjj"OHVUZUB*B)(cUL-LԩUB0~qnydeJxjGdzutL b60M$o+L0yY,eerf3Yɬf5F2);0EcD2HHx( -4O?@^.!B1p}v X7)#l98 | G53c\fu1vO9NaCԈXb52|yd:=k J{/טm0jvX#%:_Ll4 }ВՒJn?$.sy%Q'', Km(WG /ߩxGr<ںhUb8] (.My(5ra%u*CZAmbp JybĉFۤեƉ7sظa X .lwaKcoG3LƙI,nRAnaE͛BzO'p-7,%x=~H0`ۚyo, xV=~]|R\hpp\.Hh^bO8Ł44tMȾtQ{HBY}iC9]D= <pnz 7 Ov` O =aK5Nld)҂LlqYKtLh|4@x|%[ʾj+u. nRV~|\9 YQJ`uCޢdelJk7WfWZV\y8Kfۉkn;Q%?R{T۳`|uA GDeN~+in?mmp%vWxAf=$)R_ORI9fx/q2@A|_{2v2E,\ȀL%9MR}B7NT$TÜf&K,D$>{Ӏ#w0f4f,7)78EX& KF|3 'Fl|B;pSG9VF,zmlbz35pq\ŀgX"+S S /ZubZDc;jQ7~̺'1^VcC 7+6XGwmL?d]^9if>GrERgƗ/%ּvge߫] w=؅Zrf6.2GʧJ$.)pDkd"V0y;I6{7PT؅nkJ٩P >#ΙQ$/Wn1:|`6IćFS8Kc])QCkvӴ9:X]];o~>]3!!/ԖL_WZ|ÁV,p/ `V%HBd?oZG|o+CsĺwR7cϭ G 9Lx6aC҅o:&O<`zۭTnCN@i 4eT7!֡>ea݃{zܰ%&I ý!9oAJF|@C; i ys3r}@(c{V~{'}}|"#^Y"4>e`2lL JtǪt@3Bbβ t7 *,;nbNMxh%6)7KN]O 1߸ѦxI0dd/Ch{$ڒ0`9kj{ёe"A鋋{ִ+a>[%#c0zw.ש!` tf@{\p(Yda ꪏg;|K*÷lЧʧOqY˞b(p_d%)<<::<)q||:zG l咱X@b'$71{Zvi:4_x"%Sd?J(9T'D*UE%RƺP4̮ Lendstream endobj 88 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4302 >> stream xX teN ?ZF+2 (ꨠ RVҕ6M}I&i钤{Ӗ@ًR)88.\7{̀s:srz4ߟ{yL&sҪ->z40x`BAV.tSL {q+, i,6Y#3^:b2UaI./=#?jA¨˖=('X2;-ďNHNo)/d ?xQQђ%?,\Uψڜ[1);-jF?d ri|1w%_ \&7omEI%)ѩi{6glm^Ĉe1XXx2cc'c5c c ZR:zS gƈfld30ucLdq̶ '\fL\<+,/+GFɓR?5liiw=|~{_팓7"Gfhƒ":s(x mAaėZNW :Bpjl?kM.sAtfrj"Cl<(O(B Fz=4f'Ժ ڮ6i [EZY4SO_ !/D(zJ*g|ij  wS)BbwKѯ:B] hԤ\(\ D]ho6P>4Ov.{=XB*!E)M%`r>Y{&DbZAh:h2kYc;p9/f|tCϥ;)8h.zʹM~>ڶc-7%ˌ[v$_GƮɑg5:WYaRYfܢhNʸErli|KicI"&s,fgb:OIws%j(c<{M 3T47q b˯[zJ2OYhŻ9O)_ Hqaj aXx=;CcBJ5U+l, z&>#ͫb; Ѯ7_\|Ə:: ݬKH6)h0+X@?~K%sE$T| Z%h| C)4Jxuqr I.FBDGTUPY5hI MgCAW@.G;Gyl72](Xx7#g`3dLP[qSj H@j߈6KՀH{*Z2rui֮wgGD9a+)|~^A鄈8+Qhȡ(,=C% .Uh0< LG/p]! Ys{SM?!-u&]#~r@<+)]Я#i{OpwF%0@¬ rXS2&lL~xQq:^0콘vBa;ǣE zixv!cgwΆjp JGF,άΜC)"z3׼OE~LjSE3o.Pq?\{j>|ۑIZJM2s&Ytp)g|6Q)4T'A1WfEF#`iʓ!쁵h2z=iL ȶ)KܞޞΞ_:=p/0#E:}zv lVuqIPMxEi2n{ɿHـĽLG"H4rcC=W UuV`mNAK_ϱW Mr\ӕɕT5pǦTׅZw2~s8߾挟*#=]_/.0i!= =s)K&hr¬Pmihp˙ar*RjAn0bBV#n>={x{C)(ubv&csݰj+pW/IxwРX/usM حn|~ ?#u·bZ学 }yu%a!9qhݕvΥOYzo~d[)s=Do RhN] ⏼>f5t6*. r=*OK]+ C-L= N 7D̫t:lR/aջfI}YS,jjJbGOȓ+VtPoj6:H0 r PJnӴ )Im v+p6][ϗGv*/5.j)6eTO>[֮)Nv;w?@w3nV!XVC3C8Z ( x\JB4kdZđ 䨝]d>d2'++h$ t=E :V(YQQx> stream x%KKQU˲P$] aD/MآV-#ATz|QDEj뾿&#; X0ӐƼX);Llozv'嬔}i&N1> stream x%RLunuΤmrwf1? fua6-@{X)(׾w{"fi4Gbܔ_=vxy'Oñ ]֖#M;erWPj5~Z_:j@[9Գ݇l{LL_8uzHshSS3}nam&n5rVȕA/>cY8/]wqΗxHfkB7k?26әXb37D?RIh4OMF3kN/K$_l65/>w 03aM 䫅&lg8uqMճYX&Fn̖r-pwtJSVEل<Eb/7<w"\4=f"`raZrn)êQ^"+/8~̭ke>>1Hot0 ް7 :C 隰қ-@|_=jx{7t/v,dd{<_vL/x#[Kb`te8Η84 L䳙Wg|h_*:4A'E֠~]2w{X92/3YR DNZgODh7C.'!jZiشvlRŰ2endstream endobj 91 0 obj << /Filter /FlateDecode /Length 160 >> stream x]O10  ]Z2D! }I[.%Mů F UqV&:uxf I>J!􆖠DSUmcm+͟tF{8yp8I.~r 6/i Sendstream endobj 92 0 obj << /Filter /FlateDecode /Length 11707 >> stream x}[G;?\m k~ ۰zه)F?{DVUDdl,If˹_~g?k{酹{/Wx<9WSݫo^_ڻΩt>xN=}{Ҝ1&Wx%S)J+>/\Wl<=~'S{r<0lwz]|g,U_ >SN8֚+本G>!Knj<}ڞd<|\adӛm/x A5\uxͣ&%Y|7puog׾kq ^߻)otqnEQ^f^"%-{zsW\A~n`s^ql_@xlW_Zv (HlYO @/v $/ $^pn!xO >[2Aij oے+z Lm%GK# c7%<a60n)f,oq[\)3yܖ\G׍L=W_/eȢQStrxB|!ewxR ~Ow^PO /,8;\~!} \ PW'_pS"˕|.1cٝx]( SI$S-<n^^GA{霡 Tfsxd%@Vm-N, gcQ~M):1Yd5P\ϲe4I} ڳM%le^90%,Up zȁ\ i`=Cr@ `Y]mQ<q I mi.Yn,5~nW},1 {VnrsVCMly6 Q0S@TkoPx BZ 0# Wxq-؂͸v 5Ls+`a%OS]<4"!/G)a 1-sK_u383N|Arn҉Du弱iD7;ɸ`ˆ N+NTi{yq6VXDQ!gd%G?` яԥMA/쀄!N Ƃ82Ic-F ls3cQ[%<+ P񒓑ˈ&BR_;B/b,ܤ191?tP;)ݱ ƨXPrU-Nx]ݬMO[ȴz?&V1JVk dډN`&ҶVd5m'X>tEKһ 6S8C(K^$,eGw@K paYF#V% ;jB  0pWijNj(!ېϑVO>&'!IУZsr ṯkWq0N\"cJ],œ `%M:Ac Vϫ'\1="Nn @zX] ;)뢃J-L; 'V !gN4(QII Q "q8iRpծVt%^Lm 9w"2o=#N]V\ RMU_ ;p:+=E8u$GH⳥ Y-mGϞ^ҎY" +lZʨx5[gX)L`&B(z tA~alN`uT7+dmNu֑SHo9~. -`1Ɍ^z8pA{%; #seϜ =ne%&Y l'Bq$0a 5yGUuV Cm#iZa2@FMy2 X @^hYSJ91j?)]>\?bT4l&u6dQKb,V&GlI^*n2Msӎc%)[朥TGwGJ]!?jAŜ+t}'sID2dR2&9%ɕy&hdL4JM SŜa.+jr4SǵyՂn/, !ߦ%Lggtkjۻ4| j>f&^IB"zig}R-@mS1},gKȮ&]y gVwSL1oN%Dֲj !GmwC Ѳ K@r%13K0v]/ɎzjJ;f<ՎcNK*o0ab6iYUW ~2]i(QNRت;`#Rr#Q-Bъvŀ q:fq,+58&9j *qeb3/ҁTM78$VeTCȤDZvJ dL9_YYTG PN#EP3 B.jn RgEʁMV SnH7[)UVN-=l*@/df.~LQm-!ozц)SΖ2کJ]L <Ԓ~)giջ@LQ q,Q_r8{ gϮ>qJkK Q$V|bG Ǥ`D$}u%@`  [Jj/ ѧ8rDX1rՂϤ*/{jęJ1MT`m=εҔd T^B˳Ffij{sJu:LCϕ) :A8HgZ~z67ryO㮬g][ug)cGRV9+6$J]f37E2={ վkþNhԒ07Ӊnl鑫4WaC6>:oN|KLRXT[}*^m4-6ݰv7?uVت-8A,XO^]=|Rei)\g}%DBG…%Y1&fXiW]^H` :C6N6k A Cst{V;IKuI_r׼@ o?Fg͔ f%/9|~ '۳:v=.lċ&hJ<9骑2n}_e ebiدMtDF`AY-,ydI\FdV`ͮȓzy=e<RrR30!!NԱU%oPJ z Tf'<\"M_$- %xPI@ dTc6U#},UHIG6)lPVH`Su.L5&{D`{egZ} 3Fx^bxj/-td}._Fjm~8rj2$j\ڭ7lfIcaez11'Y.*"K/!@靛Ys7Da[DGM"M<5k5Vg\̠h4Hz1l,-E_ g:SG@VB83Jn+x^0 mұ5EZeUc_|$vV݀R`E2@A&FbyYzzu^B`r)3AtG' Uއ .b}t-r0*o=[(}D,Ыے/Ql˖"x>DF]HӍzǿ{UgNYrW}j)YH:ċlQĽrİ]uea"`7 +w.[0I>ikIUu!X:2$$ 0KFbW .YgjnD?R}wV\T,Zjln6!g}Jc_}i oCbQ35FYł#GL KREwE\aC ,g߉Afch|]1aNM3tje6M.^Sm^}vkfUHxыNē:<[ί$g<ǼmCL P<*ƖL1=pj -Ptʲ[.QZ_p؅i&=an4I99BTo9[s,a~}1{MNVtCN=K{1w5ɴ <9%L¿zE7Q5̄ga0lJ'3 ,cdX*Rei OO/JL?+fY NV㮿WYuxp[~aE;9)qZY4&<].삗A)ZwOSsItlKd\_Mb:cъLG$cY*ؖ+LSc?>f=gcZGbAZyE;l#Sb~KEI]flE"\:+S0BS%CʐD{`i\MY-\ȕHF|plPS3⤧>k2?Dv;ij]JId.g$QVwdhnZS>MձU,9djGwU`̫I87^@B9ur#IƆ97J;RSp;s4%V4w]oUW `vX؎~jƅoJr#ޕ±b lOA[ q|ܲS]$PQ[' ,Mw K1VQg\,OҲtStXW{@+ L@eιAm5~zkNg\5@W񛡉f|&y0#ήEC"c|bwB5ͱAT>X!ƨ ǔMpڔ1zp=_gA">'M$Fsj"^g} Phd t^2Ly* LIܖ>I3wp5~ޏ Nd3t4wfT@ eqN<^}OW3qBhF4en2V}Ѯ^p[ڰ.1I7{(~X.:0K$uap)Dq+qzq-NDBu7Z'W&D{A=2iG:o͏PV>Ƭ-U\2]oe*qޯmd*9 wXdshapjKT+\@unt}+ QjTײR:CgūY zG_wXN GPD߬s;m L6Du [ON$˶jn9#<ϩ͵w`8k]FxLˡO\rVf2~QF$+rnK:.B\t*ņWa/I:;eLE um?gIAt%^Q۸iéU-{b^G#by%UM@'?tVu5brrRWRjtYm-%1k#ʸBlHMsr$Hslkcl?[Ψ'Q0Q%6ͩ9gs} r1W͈%D)di]z"=Qފ})ş¢C}V&4l*=idwqޗK2Ba>e2OCȼk뷲zTv3d -tr<+ͷ-g-/[rv 2 d*"2 M^3l:` jK"[f>bڅVA>4g%CRuHq9X!oLkT<ң"]ѴLKIcL:uzcBGd0Uplb.}a5+ L1xP`4%Y=E? L'uE6[@.Zѥ%Z٣3d>99r1ua1Ite[f2˧%}8BS[H;[uF6c E/n.KAVbg}eݍՎاܮoVX.KlT,*URyWR$EO|9㱂t)M\*F_1M?k5"m%?0Z?e$0Xo%gZQ2Kl* F͔pef`fj:uW2P 1 zI*Zt,Kb<9V;؀P z{ѱ4Ȓg_0k..ǫ[G/z1#,ぁFPlD 疯Qp`%2t1z"K1z4(y=~FJ2o]9T_8X p.sv*zY)Gfiq"dA3 D\h>~}/'?}HӶ132Xoё34„?ag K\J SX%[?}/Ν茰={`$cl}=y5|zx{gcؠZNeޘpu%ӏ`ÞnoO&k34=)| *7 !\߆ʞXV =|o>wCs;0UbqGX{Kn*8LY6%CMr I*4 y6\Ôq6L{a \r>^Um5Odr.}"pe"0rƵk^f_w{#+`w(}9#+~[ʩ;?\tgc2 }۞.IcUBPF¹I8M&YD%U]U{B ,Æv@,a<*i2RJGS8fMc{¬%r>2-݇qw߇"ά]'ߐk`'o`N1*W9t]/;.a_Nq#s ṡVZ/mAoR("ePtܴcw'mĸJ;hZ6WZLp. U=s7;1XmXpycPf*7jqUEYLg}鍛O jn ar]JSSi +]{QJ*UaGͮmL6nӚXv (Y?q-/;CƬjݬ8)4ehgu;MۿI)XD2u5HL?Nt-' du_r'YoblCWG` ;a;G XTxJYL_" {k^5q3:*-XZ_'E^o,R=YDul{$~(SFeUM׏%Q:6F'YjOW_9ߨ e4iW\`y[ ozF5{YvaYh XmSl;No'MhruiFՁ=.//y j#h1@ˆjtW.ۆn.G/,0;6l})m{(eac4Sw4*n"1t:^A^0:M<~)y[_ KX#a0HmЛ@ em,irN B$B}|xp} {̐5x|ӻ!36EM1ߗU: !V^r5S,^$&DI(7<w#n8.1\oZH9ק7Z:ǥP[A|nͩ.a5؟zUz0(&͒S3<#ӿqbKvF!LD_Ft.>`ƄݩOE̳Oy^Rf`yoZcc\O=Ny޼oa\X4iqKܴ-F; 7_#(َ!Fg|;h˧"PoBMz1$1(a?\YO7WSa-N~fM%i;rendstream endobj 93 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1926 >> stream xTiTW4JlPH4lBh  aYBl-U\pT& ("#2CEڸ I4EYee'#gs9x_s}$anF$9=H,RR'+gn.ٛqU&7ϒ@di^gO;@,($?\YuUn +#S-yxKS4ҷ¬ t!)R(sr*uXVNBit( fr Eft{G''"H"!bD<&0šE"]!fbb1j?)%ylqo|Z sbs4JCRׅj56~p`H,;L6_޶P '亅_I|zs [ e)^?0:8>z |ؕ-cl<3-jD 60NszI bQUbj!@/3̲"?N[C\(QleXgw–< >b`gfO~Xt[ze`sERzD(*97d'Ң<)n(189g5;<jyҶ47}W\W?[-l;Vڋ:gE<pKKXTN{UL Ψr}ȐR-_4JKwߌ茝,7ddY-So/ڃhՖ ,vb/lcZ0ogξu0ubnQ?C-Oߪٛ-,_Үv].2A}Gx!2(wu2eK<3]-cUꃍU4V=4[Cr<`\-<)8]SrAauIZt5}U{cmPl}c,m  Y>Wx.`9ICJ:1#EI7y9aD\^w!x M99C'ذkk^SQ)CHRKYherW?^qp\%:Kl&uxI;y;.Ŷ XN/PZQrh֞l;\_yUmhJ\= d@?th/AXtinD:^@s)XMP(})$͖ɒ'|{) ("^%"Z$ @B{Bގix%W+jf1h@) F9C.ϡq f% $Wh_S-$@BSoABQFSFPo:de71wLLq#tP|!APؑ&˦BHB3XXO8`ñcĢ;a>]NȜfMPTg|o'F8sI3+=u90sOx.v/*\lF#S2m3[3{o~ ypO V1xV?˒ O nBroA31fډou~z]>A E<ȊܓQ9ȃvx A ǥnIa!z?S8Ýe61|Q"Y a1V~BGR s Ue%. 8aF]d˖x-d$jȻ߾8@6bxX|0~q&ķAg(?1C/Gax6uw(DUfLGrrA endstream endobj 94 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 536 >> stream xmkRa3gINMGC]f&M֚-,2=;'.E`n.R9bu|^x.pH߁8/vv)Rf#)#Jo[ fЍt'o씒J$UA;z<>|8-)X4Q5)j;\!9)Jj](\t%+CRx\Iʤ9cѴg/yURpPKJ!*Bw Q}ltڴf!$,`h<Xm6^*yw2Rإ@a<9,V&=×Ew@<᭍bZg%*dlb^߳me?p-0;jcV ,>|^//:_y,IrkaYE k[ynVN{fۯH?r1' Dx}Pqv: l׌`(yaC$`fBFendstream endobj 95 0 obj << /Filter /FlateDecode /Length 159 >> stream x313R0P0U0S01C.=Cɹ\ &`A RN\ %E\@i.}0`ȥ 43KM V8qy(-> stream x]O10 . 2D! }I.|D/J`H_#490Lu@vw=|*Boh )jHUյvIG`ir?%Λkĩ4-Mr{&SA|ASuendstream endobj 97 0 obj << /Filter /FlateDecode /Length 4170 >> stream x[[od ~7#@S/m zEڬH0k{z=Di4x7":Ep,fy,~/G28s'@:}) 7ӳ,Θ8B,k~?q7qA 'gKX.fQMZx5Ӽ0Nц8]H8Ho78FE9]#m#& gztmn=,tl2ӫzӦA|1X52:Xy_6qqrV=`"ަ/2d.Sh,B3? |/-tUwƣh-"\WYtnӖIEypв--L("Y@[7m_z/n^yaS&mHkΘ*y6&D\!b0~P%.%=ye eVރ@ZٚH7U!͹QW|IJ,xSY6ĠuY5hR6G?}ըIUQih$u(NNp@ s"u'Pj MK/SMPBNx.a).T Ha#]pΜ&l YU vܣ\,A]u$WyRN^$JCnOVd|1VRe.BĂ%Gukq]cl V#"ʨtN=#jy!j2lVIjFFϕ 8ea V(OG@l@B8бa:ءP2b%%>pM:SCҗ"GRɎ^Z5K[]etG(i(2W4}ǧˇt_x5 `rOXʑ _ˈ #.FG4|5\tŠ U" SXc<ܰ M @0]ZCq=8рnW1e@sb EPljVX[2jgm(PhrۊzuxSl:< !ky1[q6 *hNBQjW(C{G2Re`p5:cd@ih X l\X[Z}ø>Qo]lBXҸ9hh29w1:@sjJ*p T,sS@"{HZZx|%ad9+?Wڬ(0_E:wR5:l8btZ3`9 { tXk0)t:>`'vY?:p8:|5\pQCvWp߮#- >Z zu51]vιlry~f 5. +t\D{Ȋ.4j׆ КIG ܘsPlpl-taZ- `;]|lɣߕh|P>1 68/&!0X K e=gj=lqn7}p\p ifYSm6x#0Jyrΐa #8]j(sχ*~{c'@8+l,ˮ\Ѕ|buoߡkdy9tYWûo+p͓[|RW/pE)N{!uu #x\p%L4yM`R+ZR 9*R"j}iM̥ fmRER/1/`@ȥk$ӍX?#)$&ڢA8RXP5.+$.H4|MT~*1p[HAlO<tp SC"sC>!ٮ{2Ƽ^jJH(Ӗa#&ǚ7,n  ,:L_AAZ8iA_)moTj˚X U.óܩkW0A3EH|#76yl>eU+>tkZDG}WZR#x;kD^ U/4r]ϑUjL AE[8, !F-5gc %MZv7;.p?JE,h9#jmu$^VbVmEYA@*VX5n67-M1,C>ht(6|7}RIR@yC۱ :[,9M׋&is՚9.6 :|) m $Tp걎A.;7dr8wc@L/z%7982i7OUxP~*_iØr)C5qO>îu.YqRDצ*,gc:Hz.`"KSC7uxgGۑd!v0`O<>4݌,aoڳ5[ݕj!+tU3z4BW;4d)x k ^sDz^[( tYk%&LS[S"';DOGT:ElϩJn2/sZ"Lه7jkKzxFGM 0˘mA{vymJTߡSqub_)4Sz>u`[pATaz+V͢mn¦TjﰣM!!}Rҁu #~41h]OҋXFS]<{Q! br \eˇ%Uqɟ-|,j$@ÈW@Mʝd4ؓD+(a\•yWl?[k3Wre\rvJ4umJ]ӏLtCp01aA %FTg|x:e<&. zAovr7+X뚾=E]%,?mҌ -G/z0|D[ıWI<\>g e7QR3վ"j_n{ːDIE7> stream xW TS>1xPsPVPWss) d 0 & 0$@ ũJZqhk_m:V{NuWJVaaC0l[̈~y;3 9yJyFό*HNⅯKrD̗TQA"`~tt~~~T7'/L[4%2W+J㧤 yMQn^qArJ޴͙Y܄Q13fΞ;aނEGbl#ۂmŶa۱X$}-V`+Ujl[aX6  {blMlFc0l6`FkV<Ԑ!qހ^,q|4^? xia]/47H;5" N DMyH(ߛXp1SNQ,nAW@fR5frEU͉<U@ da//~~+Qtre1 ւJ/dAѱ3ۘh3HE$mgj@V뫪ep]kL>{vNzVLl^p'j\e;p4qשZN '匰4a@8Tg^uXea-mf<]ԫ?=b"-aRQTJRswb~|_sp@UCk5Z (ޟ"'L:Ű/7XG4qoƸs"P7Kʕ#jM+0ɂ4z'T3K@29E~~t}PL?4>`BrP?TcC,ŠLGsA{]NO#- Gi:uZ^f^V/$JL@=kft-K"> * [ r77'0M5$P<8>vl}a=Cȩz6X{.\`ڷW v9~6,uœ?NNF_GQcѤjTL'!3uk]3_:*`e=RJv(V D RzLfࢽ;z;g gᤶRkd` &D*뉰 ' nݝ&t  G^8 \&;DmTCp#ꕢ^2u k m4ymBI(8wSȊn߾3{M|4<޶ee)JJ"LXL!W:{zs8 9/Ģ6]h IHGw1?a X/l*tٛ[kfj>| œPBF/ hD^QaAlTj{\S<>.7][?|p`S%&> g57(t0oOz~N(Mۺ|GzgymsKc h]WV@Sy^Wިjn@tlf[: &;.2"=ՅlJBb6z:qW:n6}iLR혔1n6w杇L0:l2,> LF3#&~n|y$8΀Vfs!Eg!jrVn8?WB4JY5 TY݁UݳiԀQPW(SS9JAA]Q OPkI-cDCH#3\*dcmU- =:SC%5rfEb\VUA>֖(% ,ڤ7V{gÐc6[I`)޾!C~A.Si0U a.&:.:d=Kmpz "Q~np!TIRɢF4,8CP)Qfw0 /G( 61Y2Ux(Td!kBUELJ_[06)> stream x\n}W[f!ȃ M V$E2Is{ff9˕r`VwWW:UӽtjНϋ/~9qq}yɐUME5(Mml 6[t7& I?n` 7[l6[5؜]?E>3J!bKH~9Gr5_]kV>}k߷U}NaTRδ*'Q~Zk|YP.鹅<Ng=&Úf.4-4wКO}ז`w}l'L]8|v$ok\r0֘kXm}(_4eeXe40zu*Z]c7/R]d"2Bm0yAɚu|Kq'+rS4In nGÁ;4)_`ix(C6.0s….wVMVCm&O#r-o娡ߔʡo}1bXm=!M\6Wq"ڥ4}u:"6d]$Nߕ '[S2߀rݓS c"s^RXvG<IV^ܜdɉ@/iENy8 52~ً}.:Q{rƊ/]ix|Gߓ2fMSd.Zݏ:j>""`&nͫlDZޔ=ͫ_ Е; ~H<R<鷮b+Pc 41:)٤e\a2MfA @ܒju'^1W<+1AA2=x"½Qcn*5:?AB5BnUרٔl57g˘Z)Yl)!t`Ȟ>OR/)˩]爣 ɁPuRc *;Bk7͚y=Qٺ)մp: kM? %&_v+Tꈄ͹3$li"5KX#O|yY[<8nD?oGMvLXj5D?QMk>)ɞ~ߚ/ZӶ揼8 O |ݚj0 eMs{ڏC}aŒvOw$N#Z+>729>L5D[x=qEVh [۽.!ĜoL&(vXi8;cqzkZ5M{Ɋ1t1dICk2Kݯ6VMkޞr3:Ul!N{VTdNWLizS_Kso {)<ՙA|/| ֕E zgidNXADgpΡGb4ϴ}͠Ӗ }pHp_jg `J iPvS]\ַ&u,%o޴u74gI`wCpvU_ AT%U`3nMtψS-hH lf6ږ,]F2]~YcC//j.1D֩ݙe&=\OZD'G<_L#wCP4}ib#?nf{[g,ә[iڅHtC)sF:v>fWlaIl1_H 2f&;bVxKLϊfGbe6ZWYiay),\Y/Gl22o'|q=r^B3$!# ( > stream xU}TT?wf (uwD D+-V60惜F|r XBGcI,Ar}d4-e[{;o 1 3)\Q(-ȟcM85Q#5jªr0Z;'011+g>d_le΍%󞚯_~_Rh/-颢l+Z饖v}fծ'!$Yme펊4l#^FLDYȀZdDI(-EhJG BPBt(~d♣-MIՆi˵WuY!ݿؗ v;{:BUn5&ZრWA?U<=zq5S ڐ&t<z`Xzz,.`L'uO%\4a$%sbHRKXzoҌ,Vs5! y ki4Dk f؆SPXxZq"+N9x&ؤ+ɏd' ߽=|OT!թ.^|'AMf[k6W 8%y^XAIuF3#dHԂ$-n0PRfx>kZх˅ MEreömԛ{L&Q|p},7Ըe'nvCVQ4޶,_h2V%CWEۇ#X?,vVUVV)ol8ɕeޙ qR WWcێ-M H"XEKr>iuo{vy߯k9=W>;iѯ`+e!#3p ,1r8U,'d7~ƣD5% _`3K \Uung{kZ1U[Z.F8-9nu|wx4XNBȕObdcYXsya~A4 jH@x$И=rnȗ?c0LcԽP!6p=/bύ': !:"sLWZ˦Co޷O%uy >?6:A)`T1R}fe!/0hk_WًL+3m8^ I8wpF6떮1p 6c9Yra5v0}S5If2<)4*HLr6WyQKr#0WSa>n:ה2pP|H]yB?%|(b{<v.Vi Jʱg˯lOt0,r3?Lڡ5n &iU<1dHL0C"o- oP#Đan6Dj 1Lfv_ NhDm ɏ5w{4D6ճ R_ L!8~>{endstream endobj 101 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 ЅUqP(/ Ct;̇:Z?Eƪ@L4D `,,O\g|Ix@w q+q)ZD ĺ;{FVI%0l>nSIT @6榹I*`,Χ`_CSxendstream endobj 102 0 obj << /Filter /FlateDecode /Length 26864 >> stream xϮ&9v乏I^BZjRRժ,55C#C͢;N?<_rl/!r^=%yv!pՇt]/gq|?+[H[3oU.xmٶjoUIG;~ٶs\[/0/强{ou.v7^9yr]/|WS9-Z5yB͇v:z*R1~e\VW.ol[)تWqJ}z-m+G[XӮ8'\T8-_E+]}4q]ciz5>C򚏟_e\爴K? OkMW~o5):v_Zr<_|L=~wǓ2O?}8YG|5m\~x7-x[Wp-O?~ηh7׸Fil_r>k}m孜uj';s6o|qsr/Og[ޟ9ߎ $s [%F[nH"7w"%* ʙ=Wq2"0hoZ j#r.t32:Z=3ftǿ҈qgdy?#'i/3rW8>#]g8ѵWx/xck*Pjǁ7#2H=2#i*tv[<Am:oGE$fdȸfюv?2n\xGd\ȍ^|Dlq[n`=ҸZf#}4N at(ťт/Elr[>fK;Mѵ&SxFBsK5qѵ}nȅ;"ѐH 9+To3rΆ<5{\:sLUpAow.e6=VdLgs"]{uS͆E*ۊ{^ͭ6ِ畿_7"[w~F:xP+.f;Sm>Z-׮2[qniz􌜳J{cCKlh[sGNjvGs/YHE:\]+G,&'F5# !3Rq|O2Wǀ3OL4u<.Y0[Exx;!o93<Ɵ>+6 Nm-ǣ)2߼#pϦq>M- R}ezts6=.ilhxPKɖ6_o4mwфh"-mw/q|i4:m62Z EC+Քk2إ=[pɧo2Qd< 5n =2:f9_ -b*ϏHU3lje$؀G5e6-2Z].Zx9ffYF#f4| *3I>h~F/}6:/2,[[6ck4:qá 6u\M'p3f0_hmPe| q ҙ_l#;~KL _B`zpU$͸[|sD4|hhjmܮیM!Aa:Bb0lCS#3l%90lZ/ZZ'>qa***Q4BKk1mGKk1ժmm/>hhh /s8ƌ~/۸jg~"}v_p)2KT_x+#lj7*7#;Nc1?f/QD+("0hESO3Eh>_l'{AfLmhkl~m\4EhjZ/qOُ:;Mc~y_1"|,F6}FvC Q3Bk#0}gn0Q{3L7I0{P 7S33LT 83S>377(@0#C:w"0{Qq:Kڄ&&f~ fbx_/W2vwEf7u?˨V~_ݍeGy'םRw^G>zL/GgnYq}&[fd&#r뎡Pt3竏w43z Y!݉Qq!"eƈT}t OA/;Kiޏ~䲢ϱ8jE':./_lNPd&5≢1Ic$?ˊv<^9~q`㞍ȭ1n*cչ6j=^1Rf:bQ·82#ִ F䲦<d |;4JFoecpe IzM=VQԻC#u_Bu.Kyu Y/Ge(e7F_uMhrViN *E >#gS)嚏ݏJ+rye$_u>##bˆtf^Yy*G7w|ϼbHyF#DCÐ2[̼Fת#EbWY}j6` ०HGXAEKC-&1pΆVΦB- ?^")7m-M/Qa6 "44 wμڠڃ';KG dzFf^YT~S=QgVϬ,@6aVZ>1kH*GTqH竿?AZY/eIIe6mUɣE;m373rFU"|HQƎ`yv#r^_7v|eM-*+2x0jcD=u]cPE;Qrhg`C5(٢Ue&c6LȎf6h}ȥf6s;@+؊=^f:Y?vdm߮R fAv1Pctb5Q|im1YU*6&3ۋ (vQv̪\z.ԈЀoӉvYfFY¨*#213mpϽ"1mB- YW\38F/(j2r%6ΪiL*j/I%'!4qf&ijT"y|IeB)FZC_"֎qgfCz%ar|^ߊ'aJomȔŮfY]a`cWؕ+`Wx+l# ],tE ]qd+]]A_YN(q+ n%1߸ŭt/neḘD$1mJ+I \H+*nʊ\H++bpeE L}6petbD$΄pcWVJ7~JfN++b|E% _Y+1EBr_]1rqغ+7 _Y+ |E5 _Y+%@b|e_Y+[Dx4,|\΁ &TdW*Et_Y+K,e`QhX -eE4VcɇҲXg0^9la,{K3c%/%PXV/ք5ĂSibA{%Jbeb*.eElq,[ KU[^ K qGd, R % Y6mKfZ UxdayYnիdi/cZfLܞ*% YA5 Y0|,]BYPxFYQE( 9m0Pp8y(L?/%Uƃdɧ4rPbY,SفLM,bYnU-˲"dY6N,KbYI`Y&IJbYFkӱeݚe 1˲"x3XZ4,Kmc b,-h!,e`YX˂1bYnKe, _,]fYQ'˂wBY(KW ʂ1gT`2@YnM. K%WS3fY'5eA0˂bn#edIXV,(M3b*y c'β"YF#C8Kf4̳Ӏ<^4hghf,N,C,[@8β"YFa#ς g*Y9Yp<@,dB(,x̳\1 m 'Qgqx)Y2 %khx\X e\%6YC1@,%BTM&r&s)Y# e<" :ɈfK,+bs8܍i-2?fs >,->2#-YV4 a,`r)32޽슃f6YFS[4`-B0K:fɀXbY& e)&DYLV, ˲IJ\Un,+bji̲TUTi˂W,Kkze"dYXc+Ybz,I;,=T,Uɂv$6,9-% %ɂiP`5dbȢYʩ9@BYNM) ^GA-BeX._X"^z b9,EU bz-eE \UX@BY1R4>ˋafcXTvkbьD>XPry7XX"bz8tP,qBX4Q,J*G)N}A,Fr) Yˊcx*8bnq,Fc gq,oR.,bKKJ1biNbO!NbLb钓-%.߂X.eT%X:bbb)Š!+cK=  M@,S-etb t=( b,.zeDt8ʥ߀X!LĂvNbIB.} @L%\r>0+c,ơcKT9WXp{8Ec5x.Q+X0M$&0X. cMS coK3\bek3]Ḵ"'IczDzXN)8ű4S'J˩C J bGbǒǒ>R޸ύbѯŢaF(b}BŢRXPbSK(߳E)bI)3 cGC,F@,!rq\X1?""X8 y'XĒT bb*R2,fE˩DfQ,o~P, pXL,%nASCvs{X0#AeޢXN~M`nmILl\)łobg*X eKKw#I!]X" Q,Ѓb ‚D7CLq!,兰T%a.aAR;?K4ֵ ’RfX ˊa)&Y K-4 %ba1,1j1&#R1aHKz9^wi>/2'l"tfp{"=2f;'8&a`k3Ƒ83}לMn͊m~*= ĆTN6ʘ6fE ب6 fE جGixJ$QCהp}1_"lNa#ب³6' QglVĀMDY6]&=،"הlNC/D$ 6z!_x|5+b%'6&"k4ʋ"kư_{f5+"*j\ 56"k. 7FY5U3ɋWN2k1_R-bV1_kV|Ȁz/>_ӳ 5~tU?`^)l{,|ŀ =RzP lf 1j6+b&"l Y6[DMfl`lf¹#6+ #'aS4F[M"lן>1EuEl6[DM:aS lNI aj6+asY؄z3 rbs[ <#6&bUX &hxBle 6܆ JI@@规"6Ĉ #"BlAɗ%#6 @lʥʝ؜^ʚ }7Blbn8@O/4Dجzj#v`@4> l0; 6[D͊-T[،ǯFML!=l1`=`lE&lNpXis)C6QlhI6'tAS``'6g>_6qр &'^10pb(ftGד9fytM=Ckg1]'1]7kKeZ+<¼+kVt͢Lf]s5#]3>66k3~Ex JaY M(r!^5[Dx *jbUWx Xk! bz:p2^*x ,Rft )B\3z49O8k%\b1[S=%lM|5 NkBapr?1\&(kn\tUY50Z`5[d~4`h8SSִ i5+bw3L`b.55 >5W5YÑ֔7ؚr 6ؚS [cp5~[)?ckl;[cf5`k F-ƎY5+`%|b >.&!^c[k$ oV9(QL \`׬<5(`M_Sg0kjWMo5G [5BY0D)kfY =B&Px \5G6=cftK21a>G.0"lCas $ l&&yj<-"&*x "kFGw 6]cHؠaTid"¦Ńj(KsCfdK#VxM[8ExMkDxMM.5p2^E0kmB.7^Әi[sTٚ1[sX}p\,"ka{;Mks?LDDL]^IV'k0 5[Dt&[k01]sV,kkPa1eʃ"k˜9I vؗ`3bA . LfYvlO( _Ta !}k< MatmGBM$k&=5#Y_5k&iļ'9Y60l9`szĠ`s,6yU j3LT7Alt?`*ↀ Ş9< dӂpn2C 9rY.vR\A逓l`9U6Pn&T 6`c9l;lNpҀ$xlROA6Eؤ=)&vAo b v"6;  l۲@l$"6mjLXP E@TĈMn+BlPP@3烰#$l V`N; hlྫྷ)d{)Y䗄Kfy(l<ʉ)YnA`}^̈́HlP+fR6P0e}IeSĔ TvEDـ"af q?mXȰE" 1)UXnp6(82gS0b~{|eڢ|jD٠p>bJ?|lP (چ jH6(= l"mPbG6)2f#n`YYP93_}gdBalxٜjͲ22gs^=r6'fJDސ9Ku ~ٜJ )a6iQ,lP=BhLٜUl4F ~JF:lPZ0!,lSrf3u٬1T 8dS`lf]asHL$mPr!ise"AAT@hz0@Y;6E!hs]i, ڠ `+Iڠ Ր8y6[D )&mITtyExA'=Ps Q`ބ s&cdo^ҿPX#Ғsix9M76 F1؜K@bs.M.6G\^'1SS~R9JSeڌЩ6; @> ЉVAes%AˎT3qN>sj`GxN;cMXxQ$ǵ|qg9n%,ˀN Żi>#(YNIsٷF|z c>gy'9l|asˆpIVDs]/B#5":ś&EFjDt)AEBtqp핂AeGs#b4tw9ǥhΡy8ќCԅ]%LϘ r|N_s9Fȃ1/m¸?eT=q ߛ_ k)'8ɪ.ߛ,|Sp-TD囊|k"S|7MŏdrX`]r 2ҒSsIr'I.'.g:=Թ⤱Lp9IM&;ryk4bec9[`'Zg*gz*gkU@rR EPN,HP^`⟡TEoQjdGP\L ΄rJ &NQHNB!ª/ht`@I#9 8pNt Ɂ˫Br(\NJaI <#9ٖo,ّ; #&)*%Lk&)r};l8o2myH6utɓI̎i:!9D8o$'$$q2BrraQb9Dh5nȁVؓ&HȁKleNf9&RMv D* l&rN4f$gtFrr<-Frag*&QZ3'9 C9Qdmob3TnyzXJAr_|@9q&P*LwSbcrJxmA=DNj7LFRFH#%!9 #9#@rJAPNLN=)r3K(䘠)rPmk(g7M3k*iΠrfĊh.ʁ$(猥&~FTerP #, ;֤jAP$a93r $g&_"Jbre4L&gS S㦣s5Ɂ39W,0g&yq'fr19F#1"&'9lH!ZDN ; rRHryr-XM@N>I92 r |E"b]SV4%%"rz_&rmC"8CEnAD/M"g-d"'p嶻FP PNT@9gzOrN~Pe/C9eXPNZoA9Uv˩Vldd#.gyiy@s9q83*Q `)Us9GxxrZ0$ƹYsC.w-$.%Ӛ˙Ĝwp9-\dsc.'U5r{hkhq9aZ\NiƑEG-. -\9rPCYALr`?TC.'ɾ6r6!r9=@p9iAVrp \ѽВD먅d3c99c9  %rN#J`1r.M0nT5a9Uer[/* 8)d8ir4 nW\Ng[\Ɋ˹N˩a,,ڹXN\rƵҗܧYc9b,<׏su"3 _eD`{?o8DXP`L.TE\䀹3|`)FH\ƢZc\ΝsTr吸 mySh8$q9GyѦCa9iI@$9_H8$sSʇ s.-0$,Csd,4r0" ,Gs tr$G`Nr0_ ҃sϋ˩ˁ|F,1+,"C8r.Q}\~F,d}`?|'ßrxG=S}w:􂴦D=a7}xoะ0jm~xsO0I;Gv1W?G1xOk _G|_F3LTˋm~&ۮɌ}h_N>4q3a yWn CW>u<e8x,eH}2`10yo|;quEZ{+}?w~}48nr;ϟ֟joן߯?|}ǿo|ҿ~Y櫍6ן߯?ݮl/B/&RV A/AGkxC$lxuL:d[H>8`fuo*"kkٶLb <~ٶs\[} ЁrCx1uk\&mpڽ#eE89cmhtS_mh H`=zhַfFw/~ztK_~槟?{Gr[`;Nq_d:ۖX/y32ߛʆ^3u1׳;5y&ZP;83RekەRlĐ%ztXONIȜ'9XG(uxDf}hPPXJ=0-zd:1hf+2g us,u[D:GdBhȜ&")txDaDI}Ҝ'O#2!yieKLnӅ{bGdrh E{;G!5aFRGdJx%l3BF\R4y0:J:<"spP\sHZ-"#ؒWb6.vH`%VFbـw#i ISfP#l2/aF$&sDuCu#:$T@!UNa-|%wH=.^ln!L܆rGdNȵې>"'[\Oj;lMVr=ЙGtެvHs/ۊP#i;>{bGdtS;<"SLaGuPPO*;v6Lm>J)v8 YMo]0RsiCzXwvtMچ ="S"V;<"sX0ICBA  B.Lr %Cek)wX=rO=BCŠj$wHXQT#(wHX@CCCMloWf-)xi"(xH`ilnW,LLÈt/L|"n6et%xŋ9Ot6d[K𐚻V ֲPu HQ F!5wy<$ 'M0h.|q/<$L=r=7aP򐰖ImE(yxD.KҜoQ0tn8<;@y/C ?y/:cH<<<"LK`;!a:=<"S0_"L=<"KS@K@A0ݼ{i&aysshPUw"C9ٚEaDr nާkiښh<ɬ1l3R<$YIp|] I񐰄_1)3"i)FS*l$ !zxȥlj݌$y3<0ې;S0ռmW^2՜ZE38%qdxlh&C!SϋD3H!n(zH=D3OC0'=+KG(zxDlaDjT=̄԰T3!<]0" \XD[snCK {xdv3ґX&È!+I0|^{P‡ N 0)|>>>l F~> fy >dЉ!6)I2!OlunF:/;",IA]r‡>Oaf:e>d[ XDCtutO2X=B=dsxf0i"kXS8KC?.#0s̜‚[‡ m>4蒬aiﴯ)!ϕ)-!CMʇ }S|51ʇ)?!0!'#CAWR}@CN.Ck=2e)˚tf3!,q.}PM_JAC2fIА!ȼw36T>d&%w|!uD~ʇ .O ?#{XFʇ‡=BCmn39*y~%yXeJÈT.(|CCI\‡ !C*Itk@4)!f~9>-=$G" *uTy !#2R$RR1ϡY(Θ%_ݩu{Z'Q'!2wCP2˞/jKC.}曺i)vȘ!a,|SMQGg(Ŗ!InVb<%wuȘyh2^:J! nRD끏^TR !a\ J2`Z}wJ2j^bPQђT:+Dy(T:̷b(Iu)uȷS:̗u:W%L'OI0_r 9T:dJ'd@:^Yv!,"٢?KmiK谔Rt:H'/Cٶf*ŏ%_7--J6s!t; e:dhtNu8_ ZB9,1=PJї[1#C?疖BQoS0vga|H0:l#kOCNu2]6umJÈxSbZ< F HLCAe"2|r^w wm -GK0UR0U|JPf{ء7%4OCZ2֡f.Pj *l:N:gp Mj t25 u/K`l8:Sʒ9cZbsǩuZGd8ʔ:IA{!pu֦:^`"|֡޵ٌ˒hiMiH1|%P pK_謴_3miX:j`p>kR\mB~I{&ex޻s2qLm_eP;;MqGSE{[rw{<{d>6h1k=Yx,ʫ?FU99xNeesR Plj ̻.tnտ#V߯>p (4pttzWɘDmqglI{6o1_KdwyU =^W3ݸ7E@=_z3QObܹwiUwmq2bc ʒ\E?" &[afwޒղQQr?MYR8+Ùos{Rj2Q*p,"JA?΄k=Neiޔ ɒ2d5'q/3G,[y)Wb9U>CΩp2J|1}sUvfʟanrrX]d~X|Gi%&JT @-ΑEO^C~Z@1mY$g s9k V=BhcjbȆk_Z"cRT;E/4<vh m5F wHjvNk\14Zy 1@9n^*(FkQw\bFOfn"Q]{SbՐ`ɋ5d>|rU=C sv=DWP3}%4X>^NJ<#/kZr&ewm.U`knXSsRҬfd>jc6;Wwݞ19qɜҴ92/TZ}缮~I95A <Ϗc[@M ;w+9͟&t/g.abt][|Cv z5"B"4B1fn/0i## &LX+ H{h*ҹbJMAJW?vNAKLL _V4'N΄>D "p' =i<TV R%(is*ɔTX b)-C]QM),|e4j--z ΃eVM; š&5id Y=PHiL,7dDLlBGug2zK~mJ|ĸe&HeQy΄@nOi*b{$lo}EHoFDB;Dp"ZlaFy yʼnr$8a5'|D2y<iIaN؟7_MX .7SP`HGڛ],@Q|ŋ.վr+ w+ :aFHX #F!)x 9  =4D{FA4@h@|^5WB<ư{VU-P-B'fFp ;ov[/ uN9>tޡoB E,o򳮿@53P/V~4Zi] [D UaWÊaa3]U6:IE.qY+bKnbElvEdq ]eEdq#J{)BrA\>""JV"$ Ӻ.MD ˒¢weW hBf:JT=R;T`!O`XO ~=s*֢zq`nE=dP(SpGuEF*oGf`ue'Jѧ"T;RM,͉_ѡpY?x!T]Uy{GUUUPFÖ3!eqХ=$sE\dx29d_R2+q=f΢$ς_xG?b-B0;zMWnƔm@ƀ&FH~NqqV,#bS0$F8/2S=sFN;ʐpjUSF{J#bM<.l&6yǤ[٭9Ȧѽ͓jlNhjfmBYgb[[d pnΒrRBszsU.3~doq0C阘c̓< ^9SY2\Slٓ0&N{rD>O>$WXeSqKHLw11sUx+MN@B<<'~+9 ޘ{< h*X{BOy=tЮ>" !aʕ8ps2P9&Y IAAwP e궿yAlL.~u.8:9cD̀;YɈT#f+MS͕lu` ~WMƌ衋I'%G_9)la ^v8f h ^Dg35Y`%G?!U,%݇ `P١ɼmئ}4uvK>B{&0MLLBEw/V*S*i֩x3g8,thI Nmi_XL)1{a #\0"2U| xN$1AZszaЭ6*mQ©QJ$EnHP݉F]ԋN+_AacB&블UGNj/о"ã,xC?w}6q1oˢtNhy D5[D6 ȁ5ph3tuJ7&xi5E͗%$R\rXAM!vXqf"8=e04"WQI:rHQF"VObhAa_ YL},AU5k1%ŤAƷ< u2~K/L+ѝ [CKeGx4ipXT9+MM@!3X'iZ~ˆrYnȺ's]_VLN%V."3NUҴ,+ X6;M6 S4Yi+(C/P^0xvR ^RuZBr@}eC`0*tS)){{R O"G(2RA;2~UK%DyZ[~7VdbLftf0}N )54l'$d:%nB|.f- No ?I u{\R+RVK(xk(xŴRsxW ߡV/[uO뇭LH!1{+1$ַFG(1 -?[mlV0e x0Vv_.7UEƥ"_J-Jo C~H>ڞ5P߻, |(0$PDGKHǟQ+K72@ \6,) s&$ rmrA.~ Ny..3ev $xOs.Y&_aQ=D)MD$(\gEm-7;;=Yl([YYܲ(/+yovrյHH؅hY@lGhhL2+<yɊ%Y>)! /}yRY,zz\g]k:pï^Ty^~H/߅W*bA5 Fvstu6G4<dXN)tS& vi-cd缍956{f%E`ԖSU1s\a:.X((2N&nYpDyuk "Po68N.@WYY,Z,$mMaXIvBMd,$ѫ3\g-$gmq&i 7 R <8XN5N< qCp`, |$׶>Su3On[(Z3haMԅʶ$qX)ۖm7n;k5F!֘l3ַtS9|A|?gq-τjX숖g x-t3kܖgRd-Ekm&f2K3ӋGyi& ^W<^)(Xw49䔵y%Lr [ླྀ T8 pP:9I^, s/' 'A ' g-ud̎$ҁdU#2O9fq<߿X,KHbqMZ,]SηT!s!y)X̱t<\ U3"o$ӱdIy'`![o ̗-Mxu$;(Bt53*/]yy&Sky k)CzK/6L?KYYHBDC!|:)D".Gb+ĪLŪLXt]t.֫2]2U+^{U&2kY&<9= ;ֱ,S3.293X2݇fcYx.bY&̽΀Ve=32u/2ݻeBL=nɧimW%Do?Iȭ[Rҡ%9};dU|n/*.u}E/^o'T?IɛLxj#.˄%282zJF2aoyYj/egP6Z6MŲL@e2ejՈereUlee e\,S?e!)QDk N|l-f [_$B#i3? {\Xpd/6>Nl.b0Aeya[3ψGQ}xQ&/Vk2-|k2{k2լN5d V,dJcMfX]vLgtJxM&̉d)/ģ]¬'TqhF싓4s]l{j/d)L3*9m4j \KC Y^pe>),:K -AW y,ԼdMsŊLX{+2ua(3(;NFˤ)*0 Ю4Ů*z^azf \Z4I|1r8"Sg8pAwW̡0՘Z<^dX՘j@6^a9Y.#]]E/𝆹𝅥𝅮ES/7V s/IIh*7fE|E"ߠTEfԵSQvGʟx8m~k"E!br$<:{ eKַ@1%Bνg9;$r>{X1:x ryҏRk عAg^ ]-76Iy^8^ 8rLi=c塴\w)2bpʐ6A/ĕiL9ERp)R3 N:Q8B(,|uD{ #GA Elp"0/c A~Iδ UdB8h A8(i8/4!k6(rGN ZC7]A8i0V~P8xb(<9:OR8JV2B-ɹ|A:*IY\) DWHpf%I" -5$ g:π -I5Ng6A 3ʼndT_U!!9ʘB8sS (Sj$w!SnhS30o}A#P)%L7t#o} lG:ߎ/L+"a<\vOz5~gۉ"#{kl֗ܿutƣn9yÇy8{o>/ku4.7ϪxL|l4>7 JޘM~xsO𘝛!Cz?|o;/_cP 5~ׯC!# 98ll6vș?~m'&5ROsI{/({ 'ǖ3^J}c#acy{a͟X5%"{?_m?Ap|χ =^9 >N~N~q4/Ɵן__jo?|u۶v?a\4(>#eq|qwA'jaz+R|?܏->G'; a\s8w{p bQ+{tsWo۝7<a:;zNl9FJ.YMxϭ%_[OZwp1jC ~Hp},y3Mendstream endobj 103 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1490 >> stream xTkPWMZ3I3Lqj}?*VDyKXh`Cp# $< y >B[)ˠtЂNg?N9w|(j*S$4a˼ yL}k/؀OޙTb.p9gj̙ lڻ.d%KEbꕫ֊IY'P1"Z4^r4>NiD>Vi<drEx#>/FRdqAwqDlM"AZEfucA_l9QmHGm䱹F hT96wLz^ؙ">L:oSq@zG7uD_˜ Ԃ>cls^8:xD,Gy'ѭ[#!%n D\#\Ἧe뢙`3?0ͧQU_a0TzNw\!C ɲQl4vS0vw: ӎҮdC3 %mm:넬,a3/>bv:-0Rr|cs'8U+z[bgRQxF˪ `̂#e~zޡ 2Y vEs@WHƏirRA 7΄XΨ5Dg1Ae0Η* smLA`ZڍyΦXL[jB?ᛦ촜# MNcܓŢlgJruty0VYЮ18M ҳ@ bR .|] {\>4;Q4T s7 7qKsx<)]xdQ!=?5,mB ; 3%뉣3fU5<;#hkbAӍڳUeis+i]c1\ۍ(L'p9MS\ pZJm`+Fs)O2^Nvj7R^qq}P6XCx0oVsC5h !&q]$ݗ%iSQr (h|㛼SLJwHpGɽfövsl17]wkg 76j:gc5L_l$)N)`f:o_r=Gej~~~e+y4t+-[p oR@ޝ4iƔr`5D{<% BZ,&~|0-9) ĴRIlAH2A֣߀N @J nKp.x0ֺo'Ph`\mO+\aU+~!~νQrv2H@kl~mwl0*/g r3endstream endobj 104 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10 BVUAD! }I.GOJ`H_#L4;0L| p oMTSVj75$;k{Al#0ٶ}A4Xyp8I.~r 6/DSzendstream endobj 105 0 obj << /Filter /FlateDecode /Length 1771 >> stream xX;SG&I*R)#<#B+k(wYk/&?! 痤H&="뙻{w M@ȇ3g;g0 ϽjԒuAK|1z9W$Ro Ri W[2fx˘\frueru&{+>>/󍙑drK2"/auw\="ZJ1G&yNNV3,̂#̈2 By#Vpj}HBM`JOdAnŷ~ I%e4r\y1YI.0Ϩ(g; ˌçj)2WHQJ5RH#(7TH%S lU#]{'40tЊ 0Q SSckIȒ/Z5PT ddT0j05%wȨ@b |jkJTqPp&*AZﬨAI:p`s#*,K57[( D{T@V+@\$7ĨNj"M`t-4Ey/F7iי<b0t

ruJh! /t6Ơ/X9>Wt$Іgc 8 J&\5vMd (Tɮ&).yv~wު;  mK`(,".ЭjE|]|zah .<]'BJ=b, E,VEd1(!6PPaX(W8(K׵(j0O,&p<*1ӡ5p}0!(i)M\90]pTKLSr<-<H(E7 \p]$fBA^eH?BNXM]D[)yY" OO5kKG2 bRgR:TPXFIi\M Vnr.gh3:+KDT4q4 Z^l8@};:vV;NTC)%cc*>t "㦹dkC2*Sw' UȺ~9JC_l81a<>R33CW<}^~d.ozY:L~3{ܻ<X#>Vgӥ/[YDG˸2ûTقsU !;Rv/ynvw0ʍ.Ιyr"e!x>x8[>觳"_pdEvݷGgl5.ɻoY-z[~mTߺ?Jui ;x= +5JnW0endstream endobj 106 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 5293 >> stream xuO7܍+AE%(.5j\!QUt}79[=мr>f];:gXћљQl6c>nlmmE;ݰ/w_OG矃XPv [s^+ '4}jQY jXl5+zԌB)cT_0$ZB &4_=oU/] Οd,":82.t!:)6__50̝*` W-FD {FB/Reu ʂ۩&,pƪb5?uZǩ duj?;0k$zH}'7)hO?-``)4k© Y Ѵ|d *^20`+U @ h9q%9/!4 CĀv`j~&EXT:G\Q{@;8r @]VZ ֠bX&& BIIz}9wDmI;1yUz`4D_o@4^7zA dꀎyVP5>"~pfFS5/Z L)&bMB0\PWKgOBS}DNbt7#z2ER+P>sQk. bP D}@WիW49;.=@P')Ͱ 42UVrTߑEb&GG P?*4'E|MEIbAD`c# ?conQ.]B@ "(Z[nS 7 w}F@ŋ]g3 >0T~ mpH( *; D *(Z fO?|DXGӸ[ ]"GeEP2% USR>cEKi.Ǫo1w'@B4 ;}FPǵ_\WX&ByS I\6>݂qy :"Gy"%pGsYxMSF P~p)QsRbSO]xwARiz-&*@D3p!$htB:sCgjYۿB@ox4CnȀJ}K|ЍÔj6Ό(Vdžӟ+w#:6( >%Q`kalO't 0J3U}`kIڊN"\!~;R?ՀMCO6@K$+oVaMhƍ6OP!$&Tx_/~P /P& IJ(5K{nV0/O*mcd"}\ia !ˀ MwqV[uR||`7c`7 nDc`Dc`S ko$׭P f"Ł{jc7B_Oُ_KGPPP}hD {l|oY7^{ ρMТgԍN 4Kb!v#^y\o5wI =fgI V1{Ic9uT 'P> 6zv.aFcZh}"e_ Pbk8 t:"TH.pjx(/Rm=Àzra^}BqyT28T~I1QQNO1D+9Fg[Uǵ` nD0@Qz ~RzqE߃?xǪJ!CG 'y߽3:;z.ⲿYG0PQ}T\U5.D?ꞿl&tDa ʽ+H!@a#tL6wє^nI NV)'_؄4G"!D$6~i-?] zy7xPq 2hz?+k!K1\Y`8 (ztU2]mAQe`4E0h@#-kFW^` +z4 ZUmZD9Jo* Q`,dY *HtEց Yi2GnKPPF(ëj&: ##z?wn D+v VWNP'I_'xHGӑ_9[#v'xvm'}<m.mwCQC?4>u#P-fST8z= &,|S(_ ؕk]wE`c#kFjxwV;zFg@3R>LL?~G*l ufD>Vׯ EDp1wT}aH}:/,#>`轇@TW=pgO 0l`9,xPwb6|k׬O?m(J ?~@F}0gY;{ @珵 ?<a|0qH}͟ᣯ{KڿK^$Sܑw,1|v.>۬&ny=o1h#AS}t}g\.?;~`%O<ңVwɣ?Xw._u{\u?~< ;qХۯ?RǦ}0F x7K{k9G[qɣ3G{oRU@s } ,N↏P ԩSBzǠi)  k;ac̩VW_r'?@KhGo_6=`_ӵf7RW>Z' >:d9}4+7>ń ܏qתY ދW鑁qͅ_K4|O8ۿN\^ڿGc^LR%˙3g"#ݩt@U\ % i÷jS3;N}lWԓDDG 9{Ӕ]L@b^6ܢ^G@3V}=5s(k®K܁7/W\siM3G"7p  4{oW[ Ο?b[Tz `!nB+~mHF|ť"6P.&u`6%6~k}.\б{v8P(l]uvn͟_ŏKDӋ/D׋YtAˉ55U_7OmV m;l|D\K.aF ̀:fnrWtZUvG'ΑM/_=d* ]i\FDԁ) 0EPlx$DWwLu5Tz*Ea~E^DDנ@{>nM\ > stream xZKovKpNrfðY~=ÊHK.E.r) 9_C.n8_uLwΒR" `lY]]UWU|ް7σC_͞Ϝ1wZ6iIE7G5ǐa&}5w阑6'F0ynclDŽifGgI{6cr9/nϟ5lݔ`^FH.;Zibe^q+:xcs[4og ;g^5tyvDZB"(BNastdaNf65x)P+-b6^1>/fpk@u Sўqqw>};P/>ۻs>|5ۻ{_l֗ X^]_%i}yȏ?ݻ_~;?~OٟW}vy0JN w9 3XUhk"yT4e F԰Պi]]8cBH+Y?TR~T)4C!T?SI (!+4SI)`1_HJJWJ @6JA 03Bbp^wW?b L*ϫR^T ,x}\ sS\ 4g9Y \prtz| #"7hRcJDkA=y5ڞyT0(BQDv))X0L)]g*)YA*TRIB !l$;P& .8CyQI?n:;Qy^" ҎYTR|7s':hDf G4<|y$v}lbP2'd-CTLr(^OuA49x/M;S:D篰'"yëv |ʞ Q?4#弰a%ׅaRا7C>g[W਌$,%k0d?P"<ƺÄ́/ y[iB%~İ[XYX6zdpqj6@PM(D80Zt3uT+rabSx2Dn ¦^2D'k*%->kV_v-q=϶*Wq⃼n]±f4"'ì T*T "dRPd(F^Facaw_fhl uyx;*)]UI=OW<>w ud@ 9uG#Eq-YWw ^d$67GJ:kTVud]ֹH\ ^Da seAڃ šO/ۛ5A !jzİ;F#B^\zbukVJ.fKԾZF&DjѰ,%kCdY(KBJ9\k~q[ɗ&GcQPQ||I{م\S~yWGUi%EZV>9!Cthh([:I˶ڻ4-դN&),a8bwK$<NGz1qSRytWTsDm 5E7ePT QFWqE.Olx ;]+/≔ A$ʢs;ٷ(˂yy QĎP 3ʎ Q钪IhcWWo7QC;Șh>z`y|Q;.rRDɎ7ԏ*=}l5yӻYF[AyxZL5/#(,{07sS`𹺡uMxqU$ػMqśϨy՜WR[_>Bds;#{o҇+m$pȨFTU3C4L?2GJ93YM2vmk ]khbr2p4Sq^'y +/G-T[ wJ *>aU'$#ꊔG-) ;&q3R`oZendstream endobj 108 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 P ]ZUm?eB:gׁm_Xց$i, x`CSxendstream endobj 109 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 4764 >> stream xEŻ71泈""QxW  jB4Wfͬ9uzvcXzggί.l믥Zk1M:O<ߚqی_ ~z1-O30`}2 *ATHyJPmҭ?4e$q mKPhX7~8W'h]^e@ 72o*c@+<0tLy :h'VIV@8=3(=1 ` uvh7a 4?]8Ge"ңdbPS2 `74.W4RjO :x8&L rNm( A7L úJP|֭[4D9-X++6@[ "e oܸ{[' P8]ö6s 7B ^$A ! ic `6@y8HD^~] ͨu`Nh*S )p5gDV2(H}S_m%(J%2Zwo hs[" Nn.XW6tQ>@2%`_D#d[W (qr\;y+u?b?Av^(€*bɡO+DyȀv(֜ h .{UdP=YFE'~L@c`s.[qTU_ ￟` k;ؚeK 0;w! (?+AMu 2{"k *+o6^"a M̥I n2IPE]O2_  tl%N;eE>/Bd0 [ևIBB *S ]/_[һǠ,}H4H<4||V҃=- CEi ;wţ@6W%hk|!:&#wޡPtt vS3O"O^0~.#&cPXr!l%^݊Q2"׷z{" b{UYbq~A Cpo+iy@&D* U~V?o+8 M]{ak+GUb0Aa$@J&0 I1^u\7" [v#Gf]Qĸq\_{5QX %(AM8Hpr$0 ` ? :#e8 AbTzneST2wZm1 CS&YfE tc W=lji7+yѺ@PFzeufԏ׼>5 }B0dp jGGI*vP1%0QNaɫ?wm/Z#CSG$(3MB/#Yh λyzE\2Ec+0%AڍZDB뿫 VH2*b?fڙ)8'W/A@@lGQ#1/HfDg˗ōs;Bid+togn?$=*%ɿ Ωm?C.EfP0S Ou?ruB#eЭBcj~;9" ttwB Y6n TT?B$ߺj" *ZidM̀Jn:`/6 &>(6-Lk>^v\ L -ꨟu yU{Z8%hEmXԫ?=7"ѓ0{ G= ˕ DNGa՗ΤbGY;O 𧩰:#[[SJA[I2G bH>PeیΒI+rkY7I 0c'] Oe\,~rChm Ż=vnjw`uS%d݅t·%jI Plq(wxA<*jH6 A3~q֍wnV}!=bꎧEDN1,.o?΍"$R_3#0i028((9vİڣ?АBD>~:tUo=B4&R_`(A@"۰o)R_dyJGp6*{f(ȃI?`ƚuG䭊¿;hξQh$GdU$d`Juʠ 8ap# |%Όa:#6TYz + k~a ܰȀ.TJ0  1F\%aXdچmJ&tcyԽ7QIDY> j*H^U!)ja#tCw1|6 h,oyLLрvҫ?mь.p੧aP)[). 2 q{cߪ?n'6;3u'.ip p q 5͑v47>g_3΂'x@(k7ۤ8!ϴ$A^}ZQ6 'vG+H}#Ǣr $S|h 1tir1}4"n/Tبo[T8d< ?1g4t!v.pg6G5 CB{hvA*z;cxgX X(+S܎D]:I&%oSO,>6݃}Ey>endstream endobj 110 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 5049 >> stream xŜ=E_1`Yʲ, aHH,@l{=uw`T3S]u{zjZ)ezmȴ>x`zǴYkNO#lθpQ{tm:x8}ܭ+Z]}ڎmfO:M.}wvP&1ocZSj$]R9&k[>G&c_mn I٦>eژyȠS9EƁ 6LSfk݋b՟:S""g@4i +2f!"[~ cN?2XaS k P!]SPu?u- ̋3e`9yI5TSKH"+)f(t>KwAu8:ր*SPԺ2+Ӯ}O&Ym ՘ +R՗3AW`\t3H?vրn ]1(2?2Ov(эYȮpw@?Qqw%WʼXT._5ߕ_w[FuAtk{*=Htg6@^}`MU`jHA]p@~Wt8up&pN.h4bs+aէG@@@N"] r(f#$`@ayEt#PM+E۱w8JA7$dM+2 a&+vD8g*@c2?GD*XQ<a"8AmV_&.Z^&p Z w/F/tQڕ0f]|%k@Ġ᩵ܕ) 1 D#o.Gv(ԅ@ D ~DO_(xxBNz k`|8@ߧGZFKA (>Q ?(.ǜ!X44{ˆ QݑA$kCtw,L'-Y3e@a-DJ(yg[l" -lD *`spETjoF}|J/sݻM;"|)h{(#g6?QopY( E) BQ=JT?o25 "8$0}B]ݻGgݺZ_@p'+eLj@A4Z_$jْFqrh(w8Qr;,4|GO^RQ_Zu[i:Bȥ ] fUPmE_8/U(DN}Jh&~4:pnyϭQZ jX7p-VJ"*4EgOAV>3zPᒘ./YՀ L9)zIA~hZX)D[U1ި؏[]'#q&)Tw'|&|"y*f)*G;2>q޺&Z6u @J܏R Gd>hQ.Ha@͕:1A?D G@#'z ʲMG`ԠS+`Rԩ|AE[?b5eR >e۠ӉH0@`ލ`ؕ7]82Ha5dPܹcuG+ (rvWhZ{dg!aw;9&"!,MDIoOGV[otN>9)#aZ @1XWXA7o:SD ֥#:OA&ȸ!c޸q6 f!P0ĕ  .+u~}5cEUaj3Cb畠^v _E JG.!d`Y*huYP2>W^3ZP<@]%`Q,ˑQ @.,%$D@P." I{W\[A3ґv@Uc]y+\|9:B2X/X"4!ҥK(coOnr&hqsmu&8#IkeE%A2mL _}PM)^,v+he} 2M1)2d$Š@%zER7tDd2gV3Cp 2&W \?zhvȤ'z饗]cnw. fm40ui t(%mz[҈AT4|`n_! gݠM086B~e^q / `VR]D[ ZT8?'CAWW<*JO? RR=z1Q'`[Dw1>q>;> i} @Eq!OP)+.&J"];gs %NGJ 0~RLPRQkU~( kS* ];yRuHu_ `f맞zj%)}긜.0X7'Yv z @O[NRZ \c)$,H_ f'x'ŌmgVe@wڀvUF&(8Ae8@7jdEwpɻ%`t+"2 l ՉȽk JgNXH">e^tG1! HY+wyeb r'b|:\xj!!py{&HzHnԁqHVXJ9… G+UE ٓ"L4|f=/:T-_EE}: ;IA=ꫯ wٳLU $Utӕ["4].$1hQwc.;;!:6D@ilE/p^0Bwa !Z=)ep|| y`dĩ_HEHP ]yQCZ\3h5H4nX$ m Dwe]=R k7,.N@_ݙRГ| ~:^P!S;6T? J*N!! HqF>+H1 p(h]v_GqU4m?vZza @~{|;OA7o\tI鑁IDRAs|M;]ݜpT?€Mc>{i@էx?@& _tq\nBEwPq"D K"1 }v$=u,VnS)`e* xκ؏t8|w$#(dfD3xwo2t 0T~& B}T)_5y~-pCDwG}jJS͒(sDB5g?u-LwQ#自 5lR}f{G:x_,AYu^2&IણG  0J^S pѢGAu ˸ 6ׁ !eM;]ϲ/P0z2J|@e`X`WdgNc[Vmǧb`*A$]87AOcELu&1=Yȭ_,ALT}4;UlP;"M"nIn1(ึ QF.31Ӆ~.̕ KAv4 ZB"n> Uyh4*w´u1OAb$?ԏ>;,7b@S9 [92@vJ2OfJuJ6D5`1C5ȭ'ƎP? ;P}s`rU̝KDt͇N}P>eP/)mS3:$WC4"]CI9իW;i gqu~;h`'#DgnE 6hI]>pş8>>dk} %qZ#: #: (7߈" +S9R@yZ*>5I]*/_tדg-)"и Gs/žS`cNP. P+Uր >wͼ@沐=2/!0߂,6B~v1Q'x1)h_~QD rոۙDy9N1 &[?fMendstream endobj 111 0 obj << /Filter /FlateDecode /Length 5217 >> stream x\Ks$qӪ÷%%Q2M"<XX ,QݕU= Z5/É={_œ/4Iw'B͘U'o^/5c$8NN߽vz sT*qWŷM?wr|gqz#mj9;q^0\+ha! 74L?axVgGsQ$찻sd}α0|s!c ԟV*4yh>\Ow*h_$=nX%-QdM-1d ^1+O:i;iI;&K~ <nfxYegmtw w2gor)iYeGg2%:|T0bYK^|lr:Ζ9npnd]6<]mFS9bxDC?m`>YidN0@s\g`O~aChoR`7;`XNd@y؝ 9y΂jMZ)K[۰)l)Y' Ps&+3H6àBΰ9lð6#&d%=:ʹ;/Fz)eyWQk>YzbH u56yvk'04h_SLsD]Ob o5|;46H`VD.g2!%m#y]1(fb5C˨p64ta=.K\j3 {/hmv̜UvRf0T;ұ~h` ߳]h|JՅ ٖ MW!<ݙc?bs4O \9,ȥ݈jS{6ql?/ w-`e|$}d&B/ O!W%gb9k0 gb 2lFR̿OtB;4Rƺ]4+vpN;if^bfHc<<_VV. ޶XQgP3Xu]0D;M:)LavKX#lE\\'߾#O64oShb R'M'-jLy+X,ػbvNfXRYqWY4OXB|aJ [$J^ P7[Ϳ?F2PrƁcħe!/{cG:w tHul9JZ3O7 jփPBw*4$wR IUi_lX BE\!52 \k2&H0ǘ z;Io3 }1MtBI>NI'Kxk G_JIŮ0'eM!1׶@;L' ~Xdފ:)NdSV'K&:h!7Uȗst}F!EZBSl}zn;POfT7o"v߫٘ܓ4t sS>7#2U1d{Ͽ4`"]Y\HVmW=n9$t=+hV;G +URȡ pG𾘊Dd+H5{3)] 'Uw /LZ(~D>φ/L,XA >c+BRl+5Q4cu-' ,C80w#%ՌN_q&4%|\i 򆧒Q!nգ9r|p9B_J:1RR хtŁ#ģT?zi iҬaƋ Nl;FG?DZ{\Ԧ-޵qHY0oDd?,t*J ;:gy Jr_2a6/l AvP.})Ijp76/E4u 9 _snL,2)QՑjp[]Zh4%-Cm/XF~HPVK8'"RױU30qPE7:ee6v:go(=L08Rlkn `@t7NƄARB'`>k!va_թfSn!P[Dd@/Oİ3 "@)Dוpn`&]. 0P"L'L*oC"AgV X-)FJr%zDZlb&"C178c~v!k `/`.B&|ğZ 91%)M(}[xSγ( xV|uՁş)|̃dΩ،FgeJ])߂)%3N9#|P%.ӚV/9mOn$.UVz,n Pn@D^?۠ZZΡ1 G8Gl@asd (+ѽA`whfz=ݒ.eǘf6ei=zAF].%RI$`-A[ <)R.`IBLV? JN`~śK5F14h]OZ=abKikb¯7947vڮU@Qr|RC \Ry-gW{DSmj ڊK(~Kiǩ}dD89#1#VqJ-~ir:2c-@VzQy/A*rsu brKWp-*rksC%Ѻ=+fa=ïß{*1_vfRZrFN+Kz/"#ƤRZ%|Xhx5 S,!]4\)@; -RqzLX|GT"y \7Mn%Uir` }qu,cϿ7+,IocjN~n߱Iw=d.wi- Sf2U6tL"yUs  =<=_ʭGUDԔkԊ">yEA'-{0qj=1t/Ocg4#/%GXZvؠh<C;,?}_(~IӬC:ڛ_yk }f tB':%"ѹ^皮rh)e@e@Ƽ&a (FeKוrP#Gq)>7I ONTXEk?t ~V`H hF]HJu$c>+dv0)r&|L5UR$WYõiinp1]]Zʄ>Y';uxbJXڪ\ÝDDȂ`P0SD ˺ 6> stream x]O10 b C+qP(/ СY:ߝ|pFcpƲ- DeQV-Ɠ剋BwOϲh> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 114 /ID [] >> stream xcb&F~0 $8Jy?-@6[3(NւH U Dr A`!dZ? \," c`]K o"ɶ H22ʴAm "EA$XRLeWC- endstream endobj startxref 120942 %%EOF spatstat/inst/doc/getstart.Rnw0000644000176200001440000003126413265066746016211 0ustar liggesusers\documentclass[11pt]{article} % \VignetteIndexEntry{Getting Started with Spatstat} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \usepackage{graphicx} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \begin{document} \bibliographystyle{plain} \thispagestyle{empty} \SweaveOpts{eps=TRUE} \setkeys{Gin}{width=0.6\textwidth} <>= library(spatstat) spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) }) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) @ \title{Getting started with \texttt{spatstat}} \author{Adrian Baddeley, Rolf Turner and Ege Rubak} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle Welcome to \spst, a package in the \R\ language for analysing spatial point patterns. This document will help you to get started with \spst. It gives you a quick overview of \spst, and some cookbook recipes for doing basic calculations. \section*{What kind of data does \spst\ handle?} \Spst\ is mainly designed for analysing \emph{spatial point patterns}. For example, suppose you are an ecologist studying plant seedlings. You have pegged out a $10 \times 10$ metre rectangle for your survey. Inside the rectangle you identify all the seedlings of the species you want, and record their $(x,y)$ locations. You can plot the $(x,y)$ locations: <>= data(redwood) plot(redwood, pch=16, main="") @ This is a \emph{spatial point pattern} dataset. Methods for analysing this kind of data are summarised in the highly recommended book by Diggle \cite{digg03}, or our own book \cite{baddrubaturn15}, or other references in the bibliography below. \nocite{handbook10,bivapebegome08} Alternatively the points could be locations in one dimension (such as road accidents recorded on a road network) or in three dimensions (such as cells observed in 3D microscopy). You might also have recorded additional information about each seedling, such as its height, or the number of fronds. Such information, attached to each point in the point pattern, is called a \emph{mark} variable. For example, here is a stand of pine trees, with each tree marked by its diameter at breast height (dbh). The circle radii represent the dbh values (not to scale). <>= data(longleaf) plot(longleaf, main="") @ You might also have recorded supplementary data, such as the terrain elevation, which might serve as explanatory variables. These data can be in any format. \Spst\ does not usually provide capabilities for analysing such data in their own right, but \spst\ does allow such explanatory data to be taken into account in the analysis of a spatial point pattern. \Spst\ is \underline{\bf not} designed to handle point data where the $(x,y)$ locations are fixed (e.g.\ temperature records from the state capital cities in Australia) or where the different $(x,y)$ points represent the same object at different times (e.g.\ hourly locations of a tiger shark with a GPS tag). These are different statistical problems, for which you need different methodology. \section*{What can \spst\ do?} \Spst\ supports a very wide range of popular techniques for statistical analysis for spatial point patterns, for example \begin{itemize} \item kernel estimation of density/intensity \item quadrat counting and clustering indices \item detection of clustering using Ripley's $K$-function \item spatial logistic regression \item model-fitting \item Monte Carlo tests \end{itemize} as well as some advanced statistical techniques. \Spst\ is one of the largest packages available for \R, containing over 1000 commands. It is the product of 25 years of software development by leading researchers in spatial statistics. \section*{How do I start using \spst?} \begin{enumerate} \item Install \R\ on your computer \begin{quote} Go to \texttt{r-project.org} and follow the installation instructions. \end{quote} \item Install the \spst\ package in your \R\ system \begin{quote} Start \R\ and type \verb!install.packages("spatstat")!. If that doesn't work, go to \texttt{r-project.org} to learn how to install Contributed Packages. \end{quote} \item Start \R\ \item Type \texttt{library(spatstat)} to load the package. \item Type \texttt{help(spatstat)} for information. \end{enumerate} \section*{How do I get my data into \spst?} <>= data(finpines) mypattern <- unmark(finpines) mydata <- round(as.data.frame(finpines), 2) @ Here is a cookbook example. Suppose you've recorded the $(x,y)$ locations of seedlings, in an Excel spreadsheet. You should also have recorded the dimensions of the survey area in which the seedlings were mapped. \begin{enumerate} \item In Excel, save the spreadsheet into a comma-separated values (CSV) file. \item Start \R\ \item Read your data into \R\ using \texttt{read.csv}. \begin{quote} If your CSV file is called \texttt{myfile.csv} then you could type something like <>= mydata <- read.csv("myfile.csv") @ to read the data from the file and save them in an object called \texttt{mydata} (or whatever you want to call it). You may need to set various options inside the \texttt{read.csv()} command to get this to work for your file format: type \texttt{help(read.csv)} for information. \end{quote} \item Check that \texttt{mydata} contains the data you expect. \begin{quote} For example, to see the first few rows of data from the spreadsheet, type <<>>= head(mydata) @ To select a particular column of data, you can type \texttt{mydata[,3]} to extract the third column, or \verb!mydata$x! to extract the column labelled \texttt{x}. \end{quote} \item Type \texttt{library(spatstat)} to load the \spst\ package \item Now convert the data to a point pattern object using the \spst\ command \texttt{ppp}. \begin{quote} Suppose that the \texttt{x} and \texttt{y} coordinates were stored in columns 3 and 7 of the spreadsheet. Suppose that the sampling plot was a rectangle, with the $x$ coordinates ranging from 100 to 200, and the $y$ coordinates ranging from 10 to 90. Then you would type <>= mypattern <- ppp(mydata[,3], mydata[,7], c(100,200), c(10,90)) @ The general form is <>= ppp(x.coordinates, y.coordinates, x.range, y.range) @ Note that this only stores the seedling locations. If you have additional columns of data (such as seedling height, seedling sex, etc) these can be added as \emph{marks}, later. \end{quote} \item Check that the point pattern looks right by plotting it: <>= plot(mypattern) @ \item Now you are ready to do some statistical analysis. Try the following: \begin{itemize} \item Basic summary of data: type <>= summary(mypattern) @ \item Ripley's $K$-function: <>= options(SweaveHooks=list(fig=function() par(mar=rep(4,4)+0.1))) @ <>= plot(Kest(mypattern)) @ For more information, type \texttt{help(Kest)} \item Envelopes of $K$-function: <>= plot(envelope(mypattern,Kest)) @ <>= env <- envelope(mypattern,Kest, nsim=39) @ <>= plot(env, main="envelope(mypattern, Kest)") @ <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ For more information, type \texttt{help(envelope)} \item kernel smoother of point density: <>= plot(density(mypattern)) @ For more information, type \texttt{help(density.ppp)} \end{itemize} \item Next if you have additional columns of data recording (for example) the seedling height and seedling sex, you can add these data as \emph{marks}. Suppose that columns 5 and 9 of the spreadsheet contained such values. Then do something like <>= marks(mypattern) <- mydata[, c(5,9)] @ <>= mypattern <-finpines @ Now you can try things like the kernel smoother of mark values: <>= plot(Smooth(mypattern)) @ \setkeys{Gin}{width=0.8\textwidth} <>= plot(Smooth(mypattern, sigma=1.2), main="Smooth(mypattern)") @ \setkeys{Gin}{width=0.4\textwidth} \item You are airborne! Now look at the book \cite{baddrubaturn15} for more hints. \end{enumerate} \section*{How do I find out which command to use?} Information sources for \spst\ include: \begin{itemize} \item the Quick Reference guide: a list of the most useful commands. \begin{quote} To view the quick reference guide, start \R, then type \texttt{library(spatstat)} and then \texttt{help(spatstat)}. Alternatively you can download a pdf of the Quick Reference guide from the website \texttt{www.spatstat.org} \end{quote} \item online help: \begin{quote} The online help files are useful --- they give detailed information and advice about each command. They are available when you are running \spst. To get help about a particular command \texttt{blah}, type \texttt{help(blah)}. There is a graphical help interface, which you can start by typing \texttt{help.start()}. Alternatively you can download a pdf of the entire manual (1000 pages!) from the website \texttt{www.spatstat.org}. \end{quote} \item vignettes: \begin{quote} \Spst\ comes installed with several `vignettes' (introductory documents with examples) which can be accessed using the graphical help interface. They include a document about \texttt{Handling shapefiles}. \end{quote} \item book: \begin{quote} Our book \cite{baddrubaturn15} contains a complete course on \texttt{spatstat}. \end{quote} \item website: \begin{quote} Visit the \spst\ package website \texttt{www.spatstat.org} \end{quote} \item forums: \begin{quote} Join the forum \texttt{R-sig-geo} by visiting \texttt{r-project.org}. Then email your questions to the forum. Alternatively you can ask the authors of the \spst\ package (their email addresses are given in the package documentation). \end{quote} \end{itemize} \begin{thebibliography}{10} % \bibitem{badd10wshop} % A. Baddeley. % \newblock Analysing spatial point patterns in {{R}}. % \newblock Technical report, CSIRO, 2010. % \newblock Version 4. % \newblock URL \texttt{https://research.csiro.au/software/r-workshop-notes/} % \bibitem{baddrubaturn15} A. Baddeley, E. Rubak, and R. Turner. \newblock {\em Spatial Point Patterns: Methodology and Applications with {{R}}}. \newblock Chapman \& Hall/CRC Press, 2015. \bibitem{bivapebegome08} R. Bivand, E.J. Pebesma, and V. G{\'{o}}mez-Rubio. \newblock {\em Applied spatial data analysis with {R}}. \newblock Springer, 2008. \bibitem{cres93} N.A.C. Cressie. \newblock {\em Statistics for Spatial Data}. \newblock {John Wiley and Sons}, {New York}, second edition, 1993. \bibitem{digg03} P.J. Diggle. \newblock {\em Statistical Analysis of Spatial Point Patterns}. \newblock Hodder Arnold, London, second edition, 2003. \bibitem{fortdale05} M.J. Fortin and M.R.T. Dale. \newblock {\em Spatial analysis: a guide for ecologists}. \newblock Cambridge University Press, Cambridge, UK, 2005. \bibitem{fothroge09handbook} A.S. Fotheringham and P.A. Rogers, editors. \newblock {\em The {SAGE} {H}andbook on {S}patial {A}nalysis}. \newblock SAGE Publications, London, 2009. \bibitem{gaetguyo09} C. Gaetan and X. Guyon. \newblock {\em Spatial statistics and modeling}. \newblock Springer, 2009. \newblock Translated by Kevin Bleakley. \bibitem{handbook10} A.E. Gelfand, P.J. Diggle, M. Fuentes, and P. Guttorp, editors. \newblock {\em Handbook of Spatial Statistics}. \newblock CRC Press, 2010. \bibitem{illietal08} J. Illian, A. Penttinen, H. Stoyan, and D. Stoyan. \newblock {\em Statistical Analysis and Modelling of Spatial Point Patterns}. \newblock John Wiley and Sons, Chichester, 2008. \bibitem{mollwaag04} J. M{\o}ller and R.P. Waagepetersen. \newblock {\em Statistical Inference and Simulation for Spatial Point Processes}. \newblock Chapman and Hall/CRC, Boca Raton, 2004. \bibitem{pfeietal08} D.U. Pfeiffer, T. Robinson, M. Stevenson, K. Stevens, D. Rogers, and A. Clements. \newblock {\em Spatial analysis in epidemiology}. \newblock Oxford University Press, Oxford, UK, 2008. \bibitem{wallgotw04} L.A. Waller and C.A. Gotway. \newblock {\em Applied spatial statistics for public health data}. \newblock Wiley, 2004. \end{thebibliography} \end{document} spatstat/inst/doc/getstart.R0000644000176200001440000001154513624161265015633 0ustar liggesusers### R code from vignette source 'getstart.Rnw' ################################################### ### code chunk number 1: getstart.Rnw:5-6 ################################################### options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) ################################################### ### code chunk number 2: getstart.Rnw:25-32 ################################################### library(spatstat) spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) }) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) ################################################### ### code chunk number 3: getstart.Rnw:56-58 ################################################### getOption("SweaveHooks")[["fig"]]() data(redwood) plot(redwood, pch=16, main="") ################################################### ### code chunk number 4: getstart.Rnw:80-82 ################################################### getOption("SweaveHooks")[["fig"]]() data(longleaf) plot(longleaf, main="") ################################################### ### code chunk number 5: getstart.Rnw:139-142 ################################################### data(finpines) mypattern <- unmark(finpines) mydata <- round(as.data.frame(finpines), 2) ################################################### ### code chunk number 6: getstart.Rnw:157-158 (eval = FALSE) ################################################### ## mydata <- read.csv("myfile.csv") ################################################### ### code chunk number 7: getstart.Rnw:169-170 ################################################### head(mydata) ################################################### ### code chunk number 8: getstart.Rnw:185-186 (eval = FALSE) ################################################### ## mypattern <- ppp(mydata[,3], mydata[,7], c(100,200), c(10,90)) ################################################### ### code chunk number 9: getstart.Rnw:189-190 (eval = FALSE) ################################################### ## ppp(x.coordinates, y.coordinates, x.range, y.range) ################################################### ### code chunk number 10: getstart.Rnw:199-200 ################################################### getOption("SweaveHooks")[["fig"]]() plot(mypattern) ################################################### ### code chunk number 11: getstart.Rnw:207-208 (eval = FALSE) ################################################### ## summary(mypattern) ################################################### ### code chunk number 12: getstart.Rnw:212-213 ################################################### options(SweaveHooks=list(fig=function() par(mar=rep(4,4)+0.1))) ################################################### ### code chunk number 13: getstart.Rnw:215-216 ################################################### getOption("SweaveHooks")[["fig"]]() plot(Kest(mypattern)) ################################################### ### code chunk number 14: getstart.Rnw:222-223 (eval = FALSE) ################################################### ## plot(envelope(mypattern,Kest)) ################################################### ### code chunk number 15: getstart.Rnw:225-226 ################################################### env <- envelope(mypattern,Kest, nsim=39) ################################################### ### code chunk number 16: getstart.Rnw:228-229 ################################################### getOption("SweaveHooks")[["fig"]]() plot(env, main="envelope(mypattern, Kest)") ################################################### ### code chunk number 17: getstart.Rnw:231-232 ################################################### options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) ################################################### ### code chunk number 18: getstart.Rnw:238-239 ################################################### getOption("SweaveHooks")[["fig"]]() plot(density(mypattern)) ################################################### ### code chunk number 19: getstart.Rnw:249-250 (eval = FALSE) ################################################### ## marks(mypattern) <- mydata[, c(5,9)] ################################################### ### code chunk number 20: getstart.Rnw:252-253 ################################################### mypattern <-finpines ################################################### ### code chunk number 21: getstart.Rnw:256-257 (eval = FALSE) ################################################### ## plot(Smooth(mypattern)) ################################################### ### code chunk number 22: getstart.Rnw:260-261 ################################################### getOption("SweaveHooks")[["fig"]]() plot(Smooth(mypattern, sigma=1.2), main="Smooth(mypattern)") spatstat/inst/doc/Nickname.txt0000644000176200001440000000005613624152174016133 0ustar liggesusers"I'm sorry Dave, I'm afraid I can't do that" spatstat/inst/doc/BEGINNER.txt0000644000176200001440000000202313115225157015570 0ustar liggesusers -== Welcome to the 'spatstat' package! ==- For a friendly introduction to spatstat, type the command vignette('getstart') which displays the document "Getting Started with Spatstat". For an overview of all capabilities, type help(spatstat) View the documentation for any command/function 'foo' by typing help(foo) Activate the graphical help interface by typing help.start() To handle spatial data in the 'shapefile' format, see the document "Handling shapefiles in the spatstat package", by typing vignette('shapefiles') For a complete course on spatstat, see the book "Spatial Point Patterns: Methodology and Applications with R" by Baddeley, Rubak and Turner, Chapman and Hall/CRC Press, December 2015. For a summary of changes to spatstat since the book was finished, type vignette('updates') Visit the website www.spatstat.org for updates and free chapters. For news about the very latest version of spatstat, type latest.news [[[Press 'Q' to exit, on some computers]]] spatstat/inst/doc/replicated.Rnw0000644000176200001440000014174113265066746016472 0ustar liggesusers\documentclass[11pt]{article} % \VignetteIndexEntry{Analysing Replicated Point Patterns in Spatstat} \usepackage{graphicx} \usepackage{Sweave} \usepackage{bm} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\code}[1]{\texttt{#1}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\indicate}[1]{\boldmaths{1}\{ {#1} \}} \newcommand{\dee}[1]{\, {\rm d}{#1}} \newcommand{\boldmaths}[1]{{\ensuremath\boldsymbol{#1}}} \newcommand{\xx}{\boldmaths{x}} \begin{document} \bibliographystyle{plain} \thispagestyle{empty} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \SweaveOpts{eps=TRUE} \setkeys{Gin}{width=0.6\textwidth} <>= library(spatstat) spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) }) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) @ \title{Analysing replicated point patterns in \texttt{spatstat}} \author{Adrian Baddeley} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle \begin{abstract} This document describes \spst's capabilities for fitting models to replicated point patterns. More generally it applies to data from a designed experiment in which the response from each unit is a spatial point pattern. \end{abstract} \tableofcontents \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Introduction} `Replicated point patterns' are datasets consisting of several point patterns which can be regarded as independent repetitions of the same experiment. For example, three point patterns taken from micrographs of three pipette samples of the same jug of milk, could be assumed to be replicated observations. More generally we could have several experimental groups, with replicated point pattern data in each group. For example there may be two jugs of milk that were treated differently, and we take three pipette samples from each jug. Even more generally our point patterns could be the result of a designed experiment involving control and treatment groups, covariates such as temperature, and even spatial covariates (such as image data). This document describes some capabilities available in the \spst\ package for analysing such data. \textbf{For further detail, see Chapter 16 of the spatstat book \cite{TheBook}.} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Overview of software} The main components needed are: \begin{itemize} \item the model-fitting function \texttt{mppm}, an extension of the \texttt{spatstat} function \texttt{ppm}, that will fit Gibbs point process models to multiple point pattern datasets; \item support for the class \texttt{"mppm"} of point process models fitted by \texttt{mppm} (e.g. functions to print and plot the fitted model, analysis of deviance for Poisson models) \item some tools for exploratory data analysis; \item basic support for the data from such experiments by storing the data in a \emph{``hyperframe''}. A hyperframe is like a data frame, except that each entry in a column can be a point pattern or a pixel image, as well as a single number or categorical value. \item four example datasets. \end{itemize} \section{Formulating the problem} We view the experiment as involving a series of {\em `units'\/}. Each unit is subjected to a known set of experimental conditions (described by the values of the {\em covariates\/}), and each unit yields a {\em response\/} which is a spatial point pattern. The value of a particular covariate for each unit can be either a single value (numerical, logical or factor), or a pixel image. Three important cases are: \begin{description} \item[independent replicates:] We observe $n$ different point patterns that can be regarded as independent replicates, i.e.\ independent realisations of the same point process. The `responses' are the point patterns; there are no covariates. \item[replication in groups:] there are $K$ different experimental groups (e.g. control, aspirin, nurofen). In group $k$ ($k=1,\ldots,K$) we observe $n_k$ point patterns which can be regarded as independent replicates within this group. We regard this as an experiment with $n = \sum_k n_k$ units. The responses are the point patterns; there is one covariate which is a factor (categorical variable) identifying which group each point pattern belongs to. \item[general case:] there are covariates other than factors that influence the response. The point patterns are assumed to be independent, but no two patterns have the same distribution. \end{description} Examples of these three cases are given in the datasets \texttt{waterstriders}, \texttt{pyramidal} and \texttt{demohyper} respectively, which are installed in \spst. \section{Installed datasets} The following datasets are currently installed in \spst. \begin{itemize} \item \texttt{waterstriders}: Penttinen's \cite{pent84} waterstriders data recording the locations of insect larvae on a pond in 3 independent experiments. \item \texttt{pyramidal}: data from Diggle, Lange and Benes \cite{digglangbene91} on the locations of pyramidal neurons in human brain, 31 human subjects grouped into 3 groups (controls, schizoaffective and schizophrenic). \item \texttt{flu}: data from Chen et al \cite{chenetal08} giving the locations of two different virus proteins on the membranes of cells infected with influenza virus; 41 multitype point patterns divided into two virus types (wild and mutant) and two stain types. \item \texttt{simba}: simulated data from an experiment with two groups and 5 replicate point patterns per group. \item \texttt{demohyper}: simulated data from an experiment with two groups in which each experimental unit has a point pattern response and a pixel image covariate. \end{itemize} \section{Lists of point patterns} First we need a convenient way to store the \emph{responses} from all the units in an experiment. An individual point pattern is stored as an object of class \verb!"ppp"!. The easiest way to store all the responses is to form a list of \verb!"ppp"! objects. \subsection{Waterstriders data} The \texttt{waterstriders} data are an example of this type. The data consist of 3 independent point patterns representing the locations of insect larvae on a pond. See \texttt{help(waterstriders)}. <<>>= waterstriders @ The \texttt{waterstriders} dataset is a list of point patterns. It is a list, each of whose entries is a point pattern (object of class \verb!"ppp"!). Note that the observation windows of the three point patterns are {\tt not\/} identical. \subsection{The class \texttt{listof}} For convenience, the \texttt{waterstriders} dataset also belongs to the class \verb!"listof"!. This is a simple mechanism to allow us to handle the list neatly --- for example, we can provide special methods for printing, plotting and summarising the list. \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= plot(waterstriders, main="") @ Notice that the plot method displays each entry of the list in a separate panel. There's also the summary method: <<>>= summary(waterstriders) @ \subsection{Creating a \texttt{listof} object} For example, here is a simulated dataset containing three independent realisations of the Poisson process with intensity 100. <<>>= X <- listof(rpoispp(100), rpoispp(100), rpoispp(100)) @ Then it can be printed and plotted. <>= plot(X) X @ To convert an existing list to the class \code{listof}, use \code{as.listof}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Hyperframes} A \emph{hyperframe} is like a data frame, except that its entries can be objects of any kind. A hyperframe is effectively a two-dimensional array in which each column consists of values of one type (as in a data frame) or consists of objects of one class. The entries in a hyperframe can be point patterns, pixel images, windows, or any other objects. To analyse an experiment, we will store {\bf all} the data from the experiment in a single hyperframe. The rows of the hyperframe will correspond to different experimental units, while the columns represent different variables (response variables or covariates). \subsection{Creating hyperframes} The function \texttt{hyperframe} will create a hyperframe. <>= hyperframe(...) @ The arguments \verb!...! are any number of arguments of the form \texttt{tag=value}. Each \texttt{value} will become a column of the array. The \texttt{tag} determines the name of the column. Each \texttt{value} can be either \begin{itemize} \item an atomic vector or factor (i.e. numeric vector, integer vector, character vector, logical vector, complex vector or factor) \item a list of objects which are all of the same class \item one atomic value, which will be replicated to make an atomic vector or factor \item one object, which will be replicated to make a list of identical objects. \end{itemize} All columns (vectors, factors and lists) must be of the same length, if their length is greater than 1. For example, here is a hyperframe containing a column of numbers and a column of \emph{functions}: <<>>= H <- hyperframe(X=1:3, Y=list(sin,cos,tan)) H @ Note that a column of character strings will be converted to a factor, unless you set \texttt{stringsAsFactors=FALSE} in the call to \code{hyperframe}. This is the same behaviour as for the function \code{data.frame}. <<>>= G <- hyperframe(X=1:3, Y=letters[1:3], Z=factor(letters[1:3]), W=list(rpoispp(100),rpoispp(100), rpoispp(100)), U=42, V=rpoispp(100), stringsAsFactors=FALSE) G @ This hyperframe has 3 rows and 6 columns. The columns named \texttt{U} and \texttt{V} are constant (all entries in a column are the same). The column named \texttt{Y} is a character vector while \texttt{Z} is a factor. \subsection{Hyperframes of data} To analyse an experiment, we will store {\bf all} the data from the experiment in a single hyperframe. The rows of the hyperframe will correspond to different experimental units, while the columns represent different variables (response variables or covariates). Several examples of hyperframes are provided with the package, including \texttt{demohyper}, \texttt{flu}, \texttt{simba} and \texttt{pyramidal}, described above. The \texttt{simba} dataset contains simulated data from an experiment with a `control' group and a `treatment' group, each group containing 5 experimental units. The responses in the control group are independent Poisson point patterns with intensity 80. The responses in the treatment group are independent realisations of a Strauss process (see \texttt{help(simba)} for details). The \texttt{simba} dataset is a hyperframe with 10 rows and 2 columns: \texttt{Points} (the point patterns) and \texttt{group} (a factor with levels \texttt{control} and \texttt{treatment}). <<>>= simba @ The \texttt{pyramidal} dataset contains data from Diggle, Lange and Benes \cite{digglangbene91} on the locations of pyramidal neurons in human brain. One point pattern was observed in each of 31 human subjects. The subjects were classified into 3 groups (controls, schizoaffective and schizophrenic). The \texttt{pyramidal} dataset is a hyperframe with 31 rows and 2 columns: \code{Neurons} (the point patterns) and \code{group} (a factor with levels \texttt{control}, \texttt{schizoaffective} and \texttt{schizophrenic}). <<>>= pyramidal @ The \texttt{waterstriders} dataset is not a hyperframe; it's just a list of point patterns. It can easily be converted into a hyperframe: <<>>= ws <- hyperframe(Striders=waterstriders) @ \subsection{Columns of a hyperframe} Individual columns of a hyperframe can be extracted using \verb!$!: <<>>= H$X H$Y @ The result of \verb!$! is a vector or factor if the column contains atomic values; otherwise it is a list of objects (with class \texttt{"listof"} to make it easier to print and plot). Individual columns can also be assigned (overwritten or created) using \verb!$<-!: <<>>= H$U <- letters[1:3] H @ This can be used to build up a hyperframe column-by-column: <<>>= G <- hyperframe() G$X <- waterstriders G$Y <- 1:3 G @ \subsection{Subsets of a hyperframe} Other subsets of a hyperframe can be extracted with \verb![!: <<>>= H[,1] H[2,] H[2:3, ] H[1,1] @ The result of \verb![! is a hyperframe, unless you set \verb!drop=TRUE! and the subset consists of only one element or one column: <<>>= H[,1,drop=TRUE] H[1,1,drop=TRUE] H[1,2,drop=TRUE] @ There is also a method for \verb![<-! that allows you to assign values to a subset of a hyperframe. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Plotting} \subsection{Plotting a \code{listof} object} The plot method for \code{listof} objects has formal arguments <>= plot.listof(x, ..., main, arrange = TRUE, nrows = NULL, ncols = NULL) @ where \code{main} is a title for the entire page. If \code{arrange=TRUE} then the entries of the list are displayed in separate panels on the same page (with \code{nrows} rows and \code{ncols} columns of panels), while if \code{arrange=FALSE} then the entries are just plotted as a series of plot frames. The extra arguments \verb!...! control the individual plot panels. These arguments will be passed to the plot method that displays each entry of the list. Suitable arguments depend on the type of entries. <>= plot(waterstriders, pch=16, nrows=1) @ \subsection{Plotting a hyperframe} \subsubsection{Plotting one column} If \code{h} is a hyperframe, then the default action of \code{plot(h)} is to extract the first column of \code{h} and plot each of the entries in a separate panel on one page (actually using the plot method for class \verb!"listof"!). \SweaveOpts{width=7,height=5} \setkeys{Gin}{width=0.9\textwidth} <>= plot(simba) @ This only works if the entries in the first column are objects for which a plot method is defined (for example, point patterns, images, windows). To select a different column, use \verb!$! or \verb![!: \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= H <- hyperframe(X=1:3, Y=list(sin,cos,tan)) plot(H$Y) @ The plot can be controlled using the arguments for \code{plot.listof} (and, in this case, \code{plot.function}, since \verb!H$Y! consists of functions). \subsubsection{Complex plots} More generally, we can display any kind of higher-order plot involving one or more columns of a hyperframe: <>= plot(h, e) @ where \code{h} is a hyperframe and \code{e} is an \R\ language call or expression that must be evaluated in each row to generate each plot panel. \SweaveOpts{width=9,height=5} \setkeys{Gin}{width=0.9\textwidth} <>= plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) })) @ Note the use of \code{quote}, which prevents the code inside the braces from being evaluated immediately. To plot the $K$-functions of each of the patterns in the \code{waterstriders} dataset, \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= H <- hyperframe(Bugs=waterstriders) plot(H, quote(plot(Kest(Bugs))), marsize=1) @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Data analysis} \subsection{Computing with hyperframes} Often we want to perform some computation on each row of a hyperframe. In a data frame, this can be done using the command \code{with}: <<>>= df <- data.frame(A=1:10, B=10:1) with(df, A-B) @ In this example, the expression \code{A-B} is evaluated in each row of the data frame, and the result is a vector containing the computed values for each row. The function \code{with} is generic, and has a method for data frames, \code{with.data.frame}. The computation above was executed by \code{with.data.frame}. The same syntax is available for hyperframes using the method \code{with.hyperframe}: <>= with(h,e) @ Here \code{h} is a hyperframe, and \code{e} is an {\sf R} language construct involving the names of columns in \code{h}. For each row of \code{h}, the expression \code{e} will be evaluated in such a way that each entry in the row is identified by its column name. <<>>= H <- hyperframe(Bugs=waterstriders) with(H, npoints(Bugs)) with(H, distmap(Bugs)) @ The result of \code{with.hyperframe} is a list of objects (of class \verb!"listof"!), or a vector or factor if appropriate. Notice that (unlike the situation for data frames) the operations in the expression \code{e} do not have to be vectorised. For example, \code{distmap} expects a single point pattern, and is not vectorised to deal with a list of point patterns. Instead, the expression \code{distmap(Bugs)} is evaluated separately in each row of the hyperframe. \subsection{Summary statistics} One application of \code{with.hyperframe} is to calculate summary statistics for each row of a hyperframe. For example, the number of points in a point pattern \code{X} is returned by \code{npoints(X)}. To calculate this for each of the responses in the \code{simba} dataset, <<>>= with(simba, npoints(Points)) @ The summary statistic can be any kind of object. For example, to compute the empirical $K$-functions for each of the patterns in the \code{waterstriders} dataset, <<>>= H <- hyperframe(Bugs=waterstriders) K <- with(H, Kest(Bugs)) @ To plot these $K$-functions you can then just type \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= plot(K) @ The summary statistic for each row could be a numeric vector: <<>>= H <- hyperframe(Bugs=waterstriders) with(H, nndist(Bugs)) @ The result is a list, each entry being a vector of nearest neighbour distances. To find the minimum interpoint distance in each pattern: <<>>= with(H, min(nndist(Bugs))) @ \subsection{Generating new columns} New columns of a hyperframe can be created by computation from the existing columns. For example, I can add a new column to the \code{simba} dataset that contains pixel images of the distance maps for each of the point pattern responses. <>= simba$Dist <- with(simba, distmap(Points)) @ \subsection{Simulation} This can be useful for simulation. For example, to generate Poisson point patterns with different intensities, where the intensities are given by a numeric vector \code{lambda}: \SweaveOpts{width=6,height=6} \setkeys{Gin}{width=0.7\textwidth} <>= lambda <- rexp(6, rate=1/50) H <- hyperframe(lambda=lambda) H$Points <- with(H, rpoispp(lambda)) plot(H, quote(plot(Points, main=paste("lambda=", signif(lambda, 4))))) @ It's even simpler to generate 10 independent Poisson point patterns with the \emph{same} intensity 50, say: <>= H$X <- with(H, rpoispp(50)) @ \noindent The expression \code{rpoispp(50)} is evaluated once in each row, yielding a different point pattern in each row because of the randomness. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Exploratory data analysis} Before fitting models to the data, it is prudent to explore the data to detect unusual features and to suggest appropriate models. \subsection{Exploring spatial trend and covariate effects} Points may be distributed non-uniformly either because they are intrinsically non-uniform (``spatial trend'') or because their abundance depends on a spatial covariate (``covariate effects''). Non-uniformity of a point pattern can be investigated using the kernel smoothed intensity. This is the convolution of the point pattern with a smooth density called the kernel. Effectively each point in the pattern is replaced by a copy of the kernel, and the sum of all copies of the kernel is the kernel-smoothed intensity function. It is computed by \texttt{density.ppp} separately for each point pattern. <>= plot(simba, quote(plot(density(Points), main="")), nrows=2) @ Covariate effects due to a real-valued spatial covariate (a real-valued pixel image) can be investigated using the command \code{rhohat}. This uses a kernel smoothing technique to fit a model of the form \[ \lambda(u) = \rho(Z(u)) \] where $\lambda(u)$ is the point process intensity at a location $u$, and $Z(u)$ is the value of the spatial covariate at that location. Here $\rho$ is an unknown, smooth function which is to be estimated. The function $\rho$ expresses the effect of the spatial covariate on the point process intensity. If $\rho$ turns out to be constant, then the covariate has no effect on point process intensity (and the constant value of $\rho$ is the constant intensity of the point process). <>= rhos <- with(demohyper, rhohat(Points, Image)) plot(rhos) @ \SweaveOpts{width=6,height=4} \setkeys{Gin}{width=0.9\textwidth} \subsection{Exploring interpoint interaction} Still to be written. See Chapter 16 of \cite{baddrubaturn15}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Fitting models of spatial trend} The command \code{mppm} fits models to multiple point patterns. Its syntax is very similar to that of \code{lm} and \code{glm}: <>= mppm(formula, data, interaction, ...) @ where \code{formula} is a formula describing the systematic trend part of the model, \code{data} is a hyperframe containing all the data (responses and covariates), and \code{interaction} determines the stochastic interpoint interaction part of the model. For example: <>= mppm(Points ~ group, simba, Poisson()) @ Note that the formula has a left hand side, which identifies the response. This should be the name of a column of \code{data}. \subsection{Trend formula} The right side of \code{formula} is an expression for the linear predictor (effectively the {\bf logarithm} of the spatial trend). The variables appearing in the right hand side of \code{formula} should be either \begin{itemize} \item names of columns in \code{data} \item objects in the {\sf R} global environment (such as \code{pi} and \code{log}) \item the reserved names \code{x}, \code{y} (representing Cartesian coordinates), \code{marks} (representing mark values attached to points) or \code{id} (a factor representing the row number in the hyperframe). \end{itemize} \subsubsection{Design covariates} The variables in the trend could be `design covariates'. For example, to fit a model to the \code{simba} dataset in which all patterns are independent replicates of the same uniform Poisson process, with the same constant intensity: <<>>= mppm(Points ~ 1, simba) @ To fit a model in which the two groups of patterns (control and treatment groups) each consist of independent replicates of a uniform Poisson process, but with possibly different intensity in each group: <<>>= mppm(Points ~ group, simba) @ To fit a uniform Poisson process to each pattern, with different intensity for each pattern: <<>>= mppm(Points ~ id, simba) @ \subsubsection{Spatial covariates} The variables in the trend could be `spatial covariates'. For example, the \code{demohyper} dataset has a column \code{Image} containing pixel images. <<>>= mppm(Points ~ Image, data=demohyper) @ This model postulates that each pattern is a Poisson process with intensity of the form \[ \lambda(u) = \exp(\beta_0 + \beta_1 Z(u)) \] at location $u$, where $\beta_0, \beta_1$ are coefficients to be estimated, and $Z(u)$ is the value of the pixel image \code{Image} at location $u$. It may or may not be appropriate to assume that the intensity of the points is an exponential function of the image pixel value $Z$. If instead we wanted the intensity $\lambda(u)$ to be \emph{proportional} to $Z(u)$, the appropriate model is <>= mppm(Points ~ offset(log(Image)), data=demohyper) @ which corresponds to an intensity proportional to \code{Image}, \[ \lambda(u) = \exp(\beta_0 + \log Z(u)) = e^{\beta_0} \; Z(u). \] The \code{offset} indicates that there is no coefficient in front of $\log Z(u)$. Alternatively we could allow a coefficient: <>= mppm(Points ~ log(Image), data=demop) @ which corresponds to a gamma transformation of \code{Image}, \[ \lambda(u) = \exp(\beta_0 + \beta_1 \log Z(u)) = e^{\beta_0} \; Z(u)^{\beta_1}. \] %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Interpoint interaction} The stochastic interpoint interaction in a point process model is specified by the arguments \code{interaction} and (optionally) \code{iformula} in <>= mppm(formula, data, interaction, ..., iformula=NULL) @ \subsection{Same interaction for all patterns} In the simplest case, the argument \texttt{interaction} is one of the familiar objects that describe the point process interaction structure. It is an object of class \texttt{"interact"} created by calling one of the functions \begin{center} \begin{tabular}{rl} \texttt{Poisson()} & the Poisson point process\\ \texttt{Hardcore()} & the hard core process \\ \texttt{Strauss()} & the Strauss process \\ \texttt{StraussHard()} & the Strauss/hard core point process\\ \texttt{Softcore()} & pairwise interaction, soft core potential\\ \texttt{PairPiece()} & pairwise interaction, piecewise constant \\ \texttt{DiggleGatesStibbard() } & Diggle-Gates-Stibbard pair potential \\ \texttt{DiggleGratton() } & Diggle-Gratton pair potential \\ \texttt{Fiksel() } & Fiksel pair potential \\ \texttt{LennardJones() } & Lennard-Jones pair potential \\ \texttt{Pairwise()} & pairwise interaction, user-supplied potential\\ \texttt{AreaInter()} & area-interaction potential\\ \texttt{Geyer()} & Geyer's saturation process\\ \texttt{BadGey()} & multiscale Geyer saturation process\\ \texttt{Saturated()} & Saturated pair model, user-supplied potential\\ \texttt{OrdThresh()} & Ord process, threshold potential\\ \texttt{Ord()} & Ord model, user-supplied potential \\ \texttt{MultiStrauss()} & multitype Strauss process \\ \texttt{MultiStraussHard()} & multitype Strauss/hard core process \\ \texttt{Concom()} & connected component interaction \\ \texttt{Hybrid()} & hybrid of several interactions \\ \end{tabular} \end{center} In this `simple' usage of \texttt{mppm}, the point process model assumes that all point patterns have exactly the same interpoint interaction, (with the same interaction parameters), and only differ in their spatial trend. \subsection{Hyperframe of interactions} More generally the argument \code{interaction} can be a hyperframe containing objects of class \texttt{"interact"}. For example, we might want to fit a Strauss process to each point pattern, but with a different Strauss interaction radius for each pattern. <>= radii <- with(simba, mean(nndist(Points))) @ Then \code{radii} is a vector of numbers which we could use as the values of the interaction radius for each case. First we need to make the interaction objects: <<>>= Rad <- hyperframe(R=radii) Str <- with(Rad, Strauss(R)) @ Then we put them into a hyperframe and fit the model: <<>>= Int <- hyperframe(str=Str) mppm(Points ~ 1, simba, interaction=Int) @ An important constraint is that all of the interaction objects in one column must be \emph{instances of the same process} (e.g. Strauss) albeit possibly having different parameter values. For example, you cannot put Poisson and Strauss processes in the same column. \subsection{Interaction formula} If \code{interaction} is a hyperframe, then the additional argument \code{iformula} may be used to fully specify the interaction. (An \code{iformula} is also required if \code{interaction} has more than one column.) The \code{iformula} should be a formula without a left hand side. Variables on the right hand side are typically the names of columns in \code{interaction}. \subsubsection{Selecting one column} If the right hand side of \code{iformula} is a single name, then this identifies the column in \code{interaction} to be used as the interpoint interaction structure. <<>>= h <- hyperframe(Y=waterstriders) g <- hyperframe(po=Poisson(), str4 = Strauss(4), str7= Strauss(7)) mppm(Y ~ 1, data=h, interaction=g, iformula=~str4) @ \subsubsection{Interaction depending on design} The \code{iformula} can also involve columns of \code{data}, but only those columns that are vectors or factors. This allows us to specify an interaction that depends on the experimental design. [This feature is {\bf experimental}.] For example <<>>= fit <- mppm(Points ~ 1, simba, Strauss(0.07), iformula = ~Interaction*group) @ Since \code{Strauss(0.1)} is not a hyperframe, it is first converted to a hyperframe with a single column named \code{Interaction}. The \code{iformula = ~Interaction*group} specifies (since \code{group} is a factor) that the interpoint interaction shall have a different coefficient in each experimental group. That is, we fit a model which has two different values for the Strauss interaction parameter $\gamma$, one for the control group and one for the treatment group. When you print the result of such a fit, the package tries to do `automatic interpretation' of the fitted model (translating the fitted interaction coefficients into meaningful numbers like $\gamma$). This will be successful in \emph{most} cases: <<>>= fit @ <>= co <- coef(fit) si <- function(x) { signif(x, 4) } @ Thus we see that the estimate of the Strauss parameter $\gamma$ for the control group is \Sexpr{si(exp(co[2]))}, and for the treatment group \Sexpr{si(exp(sum(co[c(2,4)])))} (the correct values in this simulated dataset were $1$ and $0.5$). The fitted model can also be interpreted directly from the fitted canonical coefficients: <<>>= coef(fit) @ The last output shows all the coefficients $\beta_j$ in the linear predictor for the (log) conditional intensity. The interpretation of the model coefficients, for any fitted model in \R, depends on the \emph{contrasts} which were applicable when the model was fitted. This is part of the core {\sf R} system: see \code{help(contrasts)} or \code{options(contrasts)}. If you did not specify otherwise, the default is to use \emph{treatment contrasts}. This means that, for an explanatory variable which is a \texttt{factor} with $N$ levels, the first level of the factor is used as a baseline, and the fitted model coefficients represent the factor levels $2, 3, \ldots, N$ relative to this baseline. In the output above, there is a coefficient for \code{(Intercept)} and one for \code{grouptreatment}. These are coefficients related to the \code{group} factor. According to the ``treatment contrasts'' rule, the \code{(Intercept)} coefficient is the estimated effect for the control group, and the \code{grouptreatment} coefficient is the estimated difference between the treatment and control groups. Thus the fitted first order trend is $\exp(\Sexpr{si(co[1])}) = \Sexpr{si(exp(co[1]))}$ for the control group and $\exp(\Sexpr{si(co[1])} + \Sexpr{si(co[3])}) = \Sexpr{si(exp(sum(co[c(1,3)])))}$ for the treatment group. The correct values in this simulated dataset were $80$ and $100$. The remaining coefficients in the output are \code{Interaction} and \code{Interaction:grouptreatment}. Recall that the Strauss process interaction term is $\gamma^{t(u,\xx)} = \exp(t(u,\xx) \log\gamma)$ at a spatial location $u$, for a point pattern $\xx$. Since we're using treatment contrasts, the coefficient \code{Interaction} is the estimate of $\log\gamma$ for the control group. The coefficient \code{Interaction:grouptreatment} is the estimate of the difference in $\log\gamma$ between the treatment and control groups. Thus the estimated Strauss interaction parameter $\gamma$ is $\exp(\Sexpr{si(co[2])}) = \Sexpr{si(exp(co[2]))}$ for the control group and $\exp(\Sexpr{si(co[2])} + (\Sexpr{si(co[4])})) = \Sexpr{si(exp(co[2]+co[4]))}$ for the treatment group. The correct values were $1$ and $0.5$. \subsubsection{Completely different interactions for different cases} In the previous example, when we fitted a Strauss model to all point patterns in the \code{simba} dataset, the fitted model for the patterns in the control group was close to Poisson ($\gamma \approx 1$). Suppose we now want to fit a model which {\it is} Poisson in the control group, and Strauss in the treatment group. The Poisson and Strauss interactions must be given as separate columns in a hyperframe of interactions: <>= interaction=hyperframe(po=Poisson(), str=Strauss(0.07)) @ What do we write for the \code{iformula}? The following \emph{will not} work: <>= iformula=~ifelse(group=="control", po, str) @ This does not work because the Poisson and Strauss models are `incompatible' inside such expressions. The canonical sufficient statistics for the Poisson and Strauss processes do not have the same dimension. Internally in \code{mppm} we translate the symbols \code{po} and \code{str} into matrices; the dimensions of these matrices are different, so the \code{ifelse} expression cannot be evaluated. Instead we need something like the following: <>= iformula=~I((group=="control")*po) + I((group=="treatment") * str) @ The letter \code{I} here is a standard R function that prevents its argument from being interpreted as a formula (thus the \code{*} is interpreted as multiplication instead of a model interaction). The expression \code{(group=="control")} is logical, and when multiplied by the matrix \code{po}, yields a matrix. So the following does work: <<>>= g <- hyperframe(po=Poisson(), str=Strauss(0.07)) fit2 <- mppm(Points ~ 1, simba, g, iformula=~I((group=="control")*po) + I((group=="treatment") * str)) fit2 @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %#%^!ifdef RANDOMEFFECTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Random effects} \subsection{Mixed effects models} It is also possible to fit models that include `random effects'. Effectively, some of the coefficients in the model are assumed to be Normally-distributed random variables instead of constants. \subsubsection{Mixed Poisson model} Consider the simplest model of a uniform Poisson process which we fitted to the 3 point patterns of waterstriders. It might be sensible to assume that each pattern is a realisation of a Poisson process, but with {\em random intensity\/}. In each realisation the intensity $\lambda$ is constant across different locations, but it is a different, random value in different realisations. This example is called a `mixed Poisson process' and belongs to the class of `Cox processes' (Poisson processes with random intensity functions). Let's assume further that the log-intensity is a Normal random variable. Then the model is a (very degenerate) special case of a `log-Gaussian Cox process'. To fit such a model we use the standard techniques of mixed effects models \cite{lairware82,davigilt95,pinhbate00}. The mixed Poisson process which we discussed above would be written in standard form \begin{equation} \label{mixPois} \lambda_i(u) = \exp(\mu + Z_i) \end{equation} for the $i$th point pattern, where $\mu$ is a parameter to be estimated (the `fixed effect') and $Z_i \sim N(0, \sigma^2)$ is a zero-mean Normal random variable (the `random effect' for point pattern $i$). In the simplest case we would assume that $Z_1, \ldots, Z_n$ are independent. The variance $\sigma^2$ of the random effects would be estimated. One can also estimate the individual realised values $z_i$ of the random effects for each point pattern, although these are usually not of such great interest. Since the model includes both fixed and random effects, it is called a ``mixed-effects'' model. \subsubsection{Dependence structure} When we formulate a random-effects or mixed-effects model, we must specify the dependence structure of the random effects. In the model above we assumed that the $Z_i$ are independent for all point patterns $i$. If the experiment consists of two groups, we could alternatively assume that $Z_i = Z_j$ whenever $i$ and $j$ belong to the same group. In other words all the patterns in one group have the same value of the random effect. So the random effect is associated with the group rather than with individual patterns. This could be appropriate if, for example, the groups represent different batches of a chemical. Each batch is prepared under slightly different conditions so we believe that there are random variations between batches, but within a batch we believe that the chemical is well-mixed. \subsubsection{Random effects are coefficients} In the mixed Poisson model (\ref{mixPois}), the random effect is an additive constant (with a random value) in the log-intensity. In general, a random effect is a \emph{coefficient} of one of the covariates. For example if $v$ is a real-valued design covariate (e.g. `temperature'), with value $v_i$ for the $i$th point pattern, then we could assume \begin{equation} \label{ranef2} \lambda_i(u) = \exp(\mu + Z_i v_i) \end{equation} where $Z_i \sim N(0, \sigma^2)$ are independent for different $i$. This model has a random effect in the dependence on $v$. We could also have a random effect for a spatial covariate $V$. Suppose $V_i$ is a real-valued image for the $i$th pattern (so that $V_i(u)$ is the value of some covariate at the location $u$ for the $i$th case). Then we could assume \begin{equation} \label{ranef3} \lambda_i(u) = \exp(\mu + Z_i V_i(u)) \end{equation} where $Z_i \sim N(0, \sigma^2)$ are independent for different $i$. This kind of random effect would be appropriate if, for example, the images $V_i$ are not `normalised' or `standardised' relative to each other (e.g.\ they are images taken under different illumination). Then the coefficients $Z_i$ effectively include the rescaling necessary to standardise the images. \subsection{Fitting a mixed-effects model} The call to \texttt{mppm} can also include the argument \texttt{random}. This should be a formula (with no left-hand side) describing the structure of random effects. The formula for random effects must be recognisable to \texttt{lme}. It is typically of the form \begin{verbatim} ~x1 + ... + xn | g \end{verbatim} or \begin{verbatim} ~x1 + ... + xn | g1/.../gm \end{verbatim} where \verb!x1 + ... + xn! specifies the covariates for the random effects and \texttt{g} or \verb!g1/.../gm! determines the grouping (dependence) structure. Here \code{g} or \code{g1, \ldots, gm} should be factors. To fit the mixed Poisson model (\ref{mixPois}) to the waterstriders, we want to have a random intercept coefficient (so \texttt{x} is \texttt{1}) that varies for different point patterns (so \texttt{g} is \texttt{id}). The reserved name \code{id} is a factor referring to the individual point pattern. Thus <<>>= H <- hyperframe(P=waterstriders) mppm(P ~ 1, H, random=~1|id) @ To fit the mixed effects model (\ref{ranef2}) to the coculture data with the \code{AstroIm} covariate, with a random effect associated with each well, <>= mppm(Neurons ~ AstroIm, random=~AstroIm|WellNumber) @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %#%^!endif %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Studying the fitted model} Fitted models produced by \code{mppm} can be examined and validated in many ways. \subsection{Fits for each pattern} \subsubsection{Subfits} The command \code{subfits} takes an \code{mppm} object and extracts, for each individual point pattern, the fitted point process model for that pattern \emph{that is implied by the overall fit}. It returns a list of objects of class \code{ppm}. <<>>= H <- hyperframe(W=waterstriders) fit <- mppm(W ~ 1, H) subfits(fit) @ In this example the result is a list of three \code{ppm} objects representing the implied fits for each of the three point patterns in the \code{waterstriders} dataset. Notice that {\bf the fitted coefficients are the same} in all three models. Note that there are some unresolved difficulties with the implementation of \code{subfits}. Two completely different implementations are supplied in the package; they are called \code{subfits.old} %(used in versions 0.1--1 and earlier) and \code{subfits.new}.% (introduced in 0.1--2). The old version would occasionally crash. Unfortunately the newer version \code{subfits.new} is quite memory-hungry and sometimes causes R to hang. We're still working on this problem. So for the time being, \code{subfits} is the same as \code{subfits.old}. You can change this simply by reassigning, e.g. <>= subfits <- subfits.new @ \subsubsection{Fitting separately to each pattern} For comparison, we could fit a point process model separately to each point pattern dataset using \code{ppm}. The easy way to do this is with \code{with.hyperframe}. To fit a \emph{separate} uniform Poisson point process to each of the three waterstriders patterns, <<>>= H <- hyperframe(W=waterstriders) with(H, ppm(W)) @ The result is again a list of three fitted point process models (objects of class \code{ppm}), but now the fitted coefficients are different. \subsection{Residuals} One standard way to check a fitted model is to examine the residuals. \subsubsection{Point process residuals} Some recent papers \cite{baddetal05,baddmollpake08} have defined residuals for a fitted point process model (fitted to a \emph{single} point pattern). These residuals are implemented in \code{spatstat} as \code{residuals.ppm} and apply to an object of class \code{ppm}, that is, a model fitted to a \emph{single} point pattern. The command \code{residuals.mppm} computes the point process residuals for an \code{mppm} object. <<>>= fit <- mppm(P ~ x, hyperframe(P=waterstriders)) res <- residuals(fit) @ The result is a list, with one entry for each of the point pattern datasets. Each list entry contains the point process residuals for the corresponding point pattern dataset. Each entry in the list is a signed measure (object of class \code{"msr"}) as explained in the help for \code{residuals.ppm}). It can be plotted: <>= plot(res) @ You probably want the smoothed residual field: <>= smor <- with(hyperframe(res=res), Smooth(res, sigma=4)) plot(smor) @ \subsubsection{Sums of residuals} It would be useful to have a residual that is a single value for each point pattern (representing how much that point pattern departs from the model fitted to all the point patterns). That can be computed by \emph{integrating} the residual measures using the function \code{integral.msr}: <<>>= fit <- mppm(P ~ x, hyperframe(P=waterstriders)) res <- residuals(fit) totres <- sapply(res, integral.msr) @ In designed experiments we can plot these total residuals against the design covariates: <>= fit <- mppm(Points~Image, data=demohyper) resids <- residuals(fit, type="Pearson") totres <- sapply(resids, integral.msr) areas <- with(demohyper, area.owin(as.owin(Points))) df <- as.data.frame(demohyper[, "Group"]) df$resids <- totres/areas plot(resids~Group, df) @ \subsubsection{Four-panel diagnostic plots} Sometimes a more useful tool is the function \code{diagnose.ppm} which produces a four-panel diagnostic plot based on the point process residuals. However, it is only available for \code{ppm} objects. To obtain a four-panel diagnostic plot for each of the point patterns, do the following: \begin{enumerate} \item fit a model to multiple point patterns using \code{mppm}. \item extract the individual fits using \code{subfits}. \item plot the residuals of the individual fits. \end{enumerate} For example: <>= fit <- mppm(P ~ 1, hyperframe(P=waterstriders)) sub <- hyperframe(Model=subfits(fit)) plot(sub, quote(diagnose.ppm(Model))) @ (One could also do this for models fitted separately to the individual point patterns.) \subsubsection{Residuals of the parameter estimates} We can also compare the parameter estimates obtained by fitting the model simultaneously to all patterns (using \code{mppm}) with those obtained by fitting the model separately to each pattern (using \code{ppm}). <<>>= H <- hyperframe(P = waterstriders) fitall <- mppm(P ~ 1, H) together <- subfits(fitall) separate <- with(H, ppm(P)) Fits <- hyperframe(Together=together, Separate=separate) dr <- with(Fits, unlist(coef(Separate)) - unlist(coef(Together))) dr exp(dr) @ One could also try deletion residuals, etc. \subsection{Goodness-of-fit tests} \subsubsection{Quadrat count test} The $\chi^2$ goodness-of-fit test based on quadrat counts is implemented for objects of class \code{ppm} (in \code{quadrat.test.ppm}) and also for objects of class \code{mppm} (in \code{quadrat.test.mppm}). This is a goodness-of-fit test for a fitted {\bf Poisson} point process model only. The model could be uniform or non-uniform and the intensity might depend on covariates. <<>>= H <- hyperframe(X=waterstriders) # Poisson with constant intensity for all patterns fit1 <- mppm(X~1, H) quadrat.test(fit1, nx=2) # uniform Poisson with different intensity for each pattern fit2 <- mppm(X ~ id, H) quadrat.test(fit2, nx=2) @ See the help for \code{quadrat.test.ppm} and \code{quadrat.test.mppm} for further details. \subsubsection{Kolmogorov-Smirnov test} The Kolmogorov-Smirnov test of goodness-of-fit of a Poisson point process model compares the observed and predicted distributions of the values of a spatial covariate. We want to test the null hypothesis $H_0$ that the observed point pattern ${\mathbf x}$ is a realisation from the Poisson process with intensity function $\lambda(u)$ (for locations $u$ in the window $W$). Let $Z(u)$ be a given, real-valued covariate defined at each spatial location $u$. Under $H_0$, the \emph{observed} values of $Z$ at the data points, $Z(x_i)$ for each $x_i \in {\mathbf x}$, are independent random variables with common probability distribution function \[ F_0(z) = \frac{\int_W \lambda(u) \indicate{Z(u) \le z} \dee u} {\int_W \lambda(u) \dee u}. \] We can therefore apply the Kolmogorov-Smirnov test of goodness-of-fit. This compares the empirical cumulative distribution of the observed values $Z(x_i)$ to the predicted c.d.f. $F_0$. The test is implemented as \code{kstest.ppm}. The syntax is <>= kstest.mppm(model, covariate) @ where \code{model} is a fitted model (of class \texttt{"mppm"}) and \code{covariate} is either \begin{itemize} \item a \code{function(x,y)} making it possible to compute the value of the covariate at any location \code{(x,y)} \item a pixel image containing the covariate values \item a list of functions, one for each row of the hyperframe of original data \item a list of pixel images, one for each row of the hyperframe of original data \item a hyperframe with one column containing either functions or pixel images. \end{itemize} See Chapter 16 of \cite{baddrubaturn15} for further information. \newpage \addcontentsline{toc}{section}{Bibliography} %\bibliography{% %extra,% %extra2,% %biblio/badd,% %biblio/bioscience,% %biblio/censoring,% %biblio/mcmc,% %biblio/spatstat,% %biblio/stat,% %biblio/stochgeom% %} \begin{thebibliography}{1} \bibitem{baddmollpake08} A. Baddeley, J. M{\o}ller, and A.G. Pakes. \newblock Properties of residuals for spatial point processes. \newblock {\em Annals of the Institute of Statistical Mathematics}, 60:627--649, 2008. \bibitem{TheBook} A. Baddeley, E. Rubak, and R. Turner. \newblock {\em Spatial Point Patterns: Methodology and Applications with R}. \newblock Chapman \& Hall/CRC Press, 2015. \bibitem{statpaper} A. Baddeley, I. Sintorn, L. Bischof, R. Turner, and S. Heggarty. \newblock Analysing designed experiments where the response is a spatial point pattern. \newblock In preparation. \bibitem{baddetal05} A. Baddeley, R. Turner, J. M{\o}ller, and M. Hazelton. \newblock Residual analysis for spatial point processes (with discussion). \newblock {\em Journal of the Royal Statistical Society, series B}, 67(5):617--666, 2005. \bibitem{chenetal08} B.J. Chen, G.P. Leser, D. Jackson, and R.A. Lamb. \newblock The influenza virus {M2} protein cytoplasmic tail interacts with the {M1} protein and influences virus assembly at the site of virus budding. \newblock {\em Journal of Virology}, 82:10059--10070, 2008. %#%^!ifdef RANDOMEFFECTS \bibitem{davigilt95} M. Davidian and D.M. Giltinan. \newblock {\em Nonlinear Mixed Effects Models for Repeated Measurement Data}. \newblock Chapman and Hall, 1995. %#%^!endif \bibitem{digglangbene91} P.J. Diggle, N. Lange, and F. M. Benes. \newblock Analysis of variance for replicated spatial point patterns in clinical neuroanatomy. \newblock {\em Journal of the {A}merican {S}tatistical {A}ssociation}, 86:618--625, 1991. %#%^!ifdef RANDOMEFFECTS \bibitem{lairware82} N.M. Laird and J.H. Ware. \newblock Random-effects models for longitudinal data. \newblock {\em Biometrics}, 38:963--974, 1982. %#%^!endif \bibitem{pent84} A. Penttinen. \newblock {\em Modelling Interaction in Spatial Point Patterns: Parameter Estimation by the Maximum Likelihood Method}. \newblock Number 7 in {Jyv\"askyl\"a} Studies in Computer Science, Economics and Statistics. University of {Jyv\"askyl\"a}, 1984. %#%^!ifdef RANDOMEFFECTS \bibitem{pinhbate00} J.C. Pinheiro and D.M. Bates. \newblock {\em Mixed-Effects Models in {S} and {S-PLUS}}. \newblock Springer, 2000. %#%^!endif \end{thebibliography} %\addcontentsline{toc}{section}{Index} %\printindex \end{document} spatstat/inst/doc/bugfixes.R0000644000176200001440000000315613624161254015607 0ustar liggesusers### R code from vignette source 'bugfixes.Rnw' ################################################### ### code chunk number 1: bugfixes.Rnw:20-24 ################################################### library(spatstat) sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) ################################################### ### code chunk number 2: bugfixes.Rnw:38-42 ################################################### nbugs <- nrow(news(grepl("^BUG", Category), package="spatstat")) nbugssince <- nrow(news(Version > "1.42-0" & grepl("^BUG", Category), package="spatstat")) ################################################### ### code chunk number 3: bugfixes.Rnw:58-59 (eval = FALSE) ################################################### ## bugfixes ################################################### ### code chunk number 4: bugfixes.Rnw:63-64 (eval = FALSE) ################################################### ## bugfixes(sinceversion="1.50-0") ################################################### ### code chunk number 5: bugfixes.Rnw:68-69 (eval = FALSE) ################################################### ## bugfixes(sincedate="2017-06-30") ################################################### ### code chunk number 6: bugfixes.Rnw:72-73 (eval = FALSE) ################################################### ## bugfixes("book") ################################################### ### code chunk number 7: bugfixes.Rnw:76-77 (eval = FALSE) ################################################### ## bugfixes("all") spatstat/inst/doc/shapefiles.Rnw0000755000176200001440000005064713512337523016475 0ustar liggesusers\documentclass[twoside,11pt]{article} % \VignetteIndexEntry{Handling shapefiles in the spatstat package} \SweaveOpts{eps=TRUE} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \usepackage{graphicx} \usepackage[colorlinks=true,urlcolor=blue]{hyperref} \usepackage{color} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\R}{{\sf R}} \begin{document} %\bibliographystyle{plain} \thispagestyle{empty} <>= library(spatstat) options(useFancyQuotes=FALSE) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") @ \title{Handling shapefiles in the \texttt{spatstat} package} \author{Adrian Baddeley, Rolf Turner and Ege Rubak} \date{ \Sexpr{sdate} \\ \pkg{spatstat} version \texttt{\Sexpr{sversion}} } \maketitle This vignette explains how to read data into the \pkg{spatstat} package from files in the popular `shapefile' format. This vignette is part of the documentation included in \pkg{spatstat} version \texttt{\Sexpr{sversion}}. The information applies to \pkg{spatstat} versions \texttt{1.36-0} and above. \section{Shapefiles} A shapefile represents a list of spatial objects --- a list of points, a list of lines, or a list of polygonal regions --- and each object in the list may have additional variables attached to it. A dataset stored in shapefile format is actually stored in a collection of text files, for example \begin{verbatim} mydata.shp mydata.prj mydata.sbn mydata.dbf \end{verbatim} which all have the same base name \texttt{mydata} but different file extensions. To refer to this collection you will always use the filename with the extension \texttt{shp}, for example \texttt{mydata.shp}. \section{Helper packages} \label{S:helpers} We'll use two other packages to handle shapefile data. The \pkg{maptools} package is designed specifically for handling file formats for spatial data. It contains facilities for reading and writing files in shapefile format.% \footnote{ Some code in \pkg{maptools} is no longer maintained, and may give you a message recommending that you use the packages \pkg{rgdal} and \pkg{sf}. However these packages are more difficult to install than \pkg{maptools} because of their software requirements. So we recommend that you try \pkg{maptools} first. } The \pkg{sp} package supports a standard set of spatial data types in \R. These standard data types can be handled by many other packages, so it is useful to convert your spatial data into one of the data types supported by \pkg{sp}. \section{Caveat about longitude-latitude coordinates} The shapefile format supports geographical coordinates, usually longitude-latitude coordinates, which specify locations on the curved surface of the Earth. However, \texttt{spatstat} deals only with spatial data on a flat two-dimensional plane. When shapefile data are converted into \texttt{spatstat} objects, longitude and latitude coordinates are (currently) treated as $x$ and $y$ coordinates, so that the Earth's surface is effectively mapped to a rectangle. This mapping distorts distances and areas. If your study region is a \emph{small} region of the Earth's surface (about 3 degrees, 180 nautical miles, 200 statute miles, 320 km across) then a reasonable approach is to use the latitude and longitude as $x$ and $y$ coordinates, after multiplying the longitude coordinates by the cosine of the latitude of the centre of the region. This will approximately preserve areas and distances. This calculation is a simple example of a \emph{geographical projection} and there are some much better projections available. It may be wise to use another package to perform the appropriate projection for you, and then to convert the projected data into \texttt{spatstat} objects. If your study region is a large part of the sphere, then your data may not be amenable to the techniques provided by \texttt{spatstat} because the geometry is fundamentally different. Please consider the extension package \texttt{spatstat.sphere}. \section{How to read shapefiles into \pkg{spatstat}} To read shapefile data into \pkg{spatstat}, you follow two steps: \begin{enumerate} \item using the facilities of \pkg{maptools}, read the shapefiles and store the data in one of the standard formats supported by \pkg{sp}. \item convert the \pkg{sp} data type into one of the data types supported by \pkg{spatstat}. \end{enumerate} \subsection{Read shapefiles using \pkg{maptools}} Here's how to read shapefile data. \begin{enumerate} \item ensure that the package \pkg{maptools} is installed. You will need version \texttt{0.7-16} or later. \item start R and load the package: <>= library(maptools) @ \item read the shapefile into an object in the \pkg{sp} package using \texttt{readShapeSpatial}, for example <>= x <- readShapeSpatial("mydata.shp") @ In recent versions of \pkg{maptools} you may get a warning, saying that this code is no longer supported, and recommending the packages \pkg{rgdal} and \pkg{sf}. As far as we know, this warning is premature, as the code still works fine! \item To find out what kind of spatial objects are represented by the dataset, inspect its class: <>= class(x) @ The class may be either \texttt{SpatialPoints} indicating a point pattern, \texttt{SpatialLines} indicating a list of polygonal lines, or \texttt{SpatialPolygons} indicating a list of polygons. It may also be \texttt{SpatialPointsDataFrame}, \texttt{SpatialLinesDataFrame} or \texttt{SpatialPolygonsDataFrame} indicating that, in addition to the spatial objects, there is a data frame of additional variables. The classes \texttt{SpatialPixelsDataFrame} and \texttt{SpatialGridDataFrame} represent pixel image data. \end{enumerate} Here are some examples, using the example shapefiles supplied in the \pkg{maptools} package itself. % fake data because we don't want spatstat to depend on maptools <>= baltim <- columbus <- fylk <- list() class(baltim) <- "SpatialPointsDataFrame" class(columbus) <- "SpatialPolygonsDataFrame" class(fylk) <- "SpatialLinesDataFrame" @ <>= setwd(system.file("shapes", package="maptools")) baltim <- readShapeSpatial("baltim.shp") columbus <- readShapeSpatial("columbus.shp") fylk <- readShapeSpatial("fylk-val.shp") @ <<>>= class(baltim) class(columbus) class(fylk) @ \subsection{Convert data to \pkg{spatstat} format} To convert the dataset to an object in the \pkg{spatstat} package, the procedure depends on the type of data, as explained below. Both packages \pkg{maptools} and \pkg{spatstat} must be \textbf{loaded} in order to convert the data. \subsubsection{Objects of class \texttt{SpatialPoints}} An object \texttt{x} of class \texttt{SpatialPoints} represents a spatial point pattern. Use \verb!as(x, "ppp")! or \texttt{as.ppp(x)} to convert it to a spatial point pattern in \pkg{spatstat}. (The conversion is performed by \texttt{as.ppp.SpatialPoints}, a function in \pkg{maptools}.) The window for the point pattern will be taken from the bounding box of the points. You will probably wish to change this window, usually by taking another dataset to provide the window information. Use \verb![.ppp! to change the window: if \texttt{X} is a point pattern object of class \verb!"ppp"! and \texttt{W} is a window object of class \verb!"owin"!, type <>= X <- X[W] @ \subsubsection{Objects of class \texttt{SpatialPointsDataFrame }} An object \texttt{x} of class \texttt{SpatialPointsDataFrame} represents a pattern of points with additional variables (`marks') attached to each point. It includes an object of class \texttt{SpatialPoints} giving the point locations, and a data frame containing the additional variables attached to the points. Use \verb!as(x, "ppp")! or \texttt{as.ppp(x)} to convert an object \texttt{x} of class \texttt{SpatialPointsDataFrame} to a spatial point pattern in \pkg{spatstat}. In this conversion, the data frame of additional variables in \texttt{x} will become the \texttt{marks} of the point pattern \texttt{z}. <>= y <- as(x, "ppp") @ (The conversion is performed by \texttt{as.ppp.SpatialPointsDataFrame}, a function in \pkg{maptools}. So the \pkg{maptools} and \pkg{spatstat} packages must be loaded in order for this to work.) Before the conversion you can extract the data frame of auxiliary data by \verb!df <- x@data! or \verb!df <- slot(x, "data")!. After the conversion you can extract these data by \verb!df <- marks(y)!. For example: <>= balt <- as(baltim, "ppp") bdata <- slot(baltim, "data") @ \subsubsection{Objects of class \texttt{SpatialLines}} \label{spatiallines.2.psp} A ``line segment'' is the straight line between two points in the plane. In the \pkg{spatstat} package, an object of class \texttt{psp} (``planar segment pattern'') represents a pattern of line segments, which may or may not be connected to each other (like matches which have fallen at random on the ground). In the \pkg{sp} package, an object of class \texttt{SpatialLines} represents a \textbf{list of lists} of \textbf{connected curves}, each curve consisting of a sequence of straight line segments that are joined together (like several pieces of a broken bicycle chain.) So these two data types do not correspond exactly. The list-of-lists hierarchy in a \texttt{SpatialLines} object is useful when representing internal divisions in a country. For example, if \texttt{USA} is an object of class \texttt{SpatialLines} representing the borders of the United States of America, then \verb!USA@lines! might be a list of length 52, with \verb!USA@lines[[i]]! representing the borders of the \texttt{i}-th State. The borders of each State consist of several different curved lines. Thus \verb!USA@lines[[i]]@Lines[[j]]! would represent the \texttt{j}th piece of the boundary of the \texttt{i}-th State. If \texttt{x} is an object of class \texttt{SpatialLines}, there are several things that you might want to do: \begin{enumerate} \item collect together all the line segments (all the segments that make up all the connected curves) and store them as a single object of class \texttt{psp}. \begin{quote} To do this, use \verb!as(x, "psp")! or \texttt{as.psp(x)} to convert it to a spatial line segment pattern. \end{quote} \item convert each connected curve to an object of class \texttt{psp}, keeping different connected curves separate. To do this, type something like the following: <>= out <- lapply(x@lines, function(z) { lapply(z@Lines, as.psp) }) @ (The conversion is performed by \texttt{as.psp.SpatialLines}, a function in \pkg{maptools}. So the \pkg{maptools} and \pkg{spatstat} packages must be loaded in order for this to work.) The result will be a \textbf{list of lists} of objects of class \texttt{psp}. Each one of these objects represents a connected curve, although the \pkg{spatstat} package does not know that. The list structure will reflect the list structure of the original \texttt{SpatialLines} object \texttt{x}. If that's not what you want, then use \verb!curvelist <- do.call("c", out)! or <>= curvegroup <- lapply(out, function(z) { do.call("superimpose", z)}) @ to collapse the list-of-lists-of-\texttt{psp}'s into a list-of-\texttt{psp}'s. In the first case, \texttt{curvelist[[i]]} is a \texttt{psp} object representing the \texttt{i}-th connected curve. In the second case, \texttt{curvegroup[[i]]} is a \texttt{psp} object containing all the line segments in the \texttt{i}-th group of connected curves (for example the \texttt{i}-th State in the \texttt{USA} example). \end{enumerate} The window for the spatial line segment pattern can be specified as an argument \texttt{window} to the function \texttt{as.psp}. (The conversion is performed by \texttt{as.psp.SpatialLines} or \texttt{as.psp.Lines}, which are functions in \pkg{maptools}.) \subsubsection{Objects of class \texttt{SpatialLinesDataFrame}} An object \texttt{x} of class \texttt{SpatialLinesDataFrame} is a \texttt{SpatialLines} object with additional data. The additional data is stored as a data frame \verb!x@data! with one row for each entry in \verb!x@lines!, that is, one row for each group of connected curves. In the \pkg{spatstat} package, an object of class \texttt{psp} (representing a collection of line segments) may have a data frame of marks. Note that each \emph{line segment} in a \texttt{psp} object may have different mark values. If \texttt{x} is an object of class \texttt{SpatialLinesDataFrame}, there are two things that you might want to do: \begin{enumerate} \item collect together all the line segments that make up all the connected lines, and store them as a single object of class \texttt{psp}. \begin{quote} To do this, use \verb!as(x, "psp")! or \texttt{as.psp(x)} to convert it to a marked spatial line segment pattern. \end{quote} \item keep each connected curve separate, and convert each connected curve to an object of class \texttt{psp}. To do this, type something like the following: <>= out <- lapply(x@lines, function(z) { lapply(z@Lines, as.psp) }) dat <- x@data for(i in seq(nrow(dat))) out[[i]] <- lapply(out[[i]], "marks<-", value=dat[i, , drop=FALSE]) @ The result is a list-of-lists-of-\texttt{psp}'s. See the previous subsection for explanation on how to change this using \texttt{c()} or \texttt{superimposePSP}. \end{enumerate} (The conversion is performed by \texttt{as.psp.SpatialLines}, a function in \pkg{maptools}. So the \pkg{maptools} and \pkg{spatstat} packages must be loaded in order for this to work.) In either case, the mark variables attached to a particular \emph{group of connected lines} in the \texttt{SpatialLinesDataFrame} object, will be duplicated and attached to each \emph{line segment} in the resulting \texttt{psp} object. \subsubsection{Objects of class \texttt{SpatialPolygons}} First, so that we don't go completely crazy, let's introduce some terminology. A \emph{polygon} is a closed curve that is composed of straight line segments. You can draw a polygon without lifting your pen from the paper. \setkeys{Gin}{width=0.4\textwidth} \begin{center} <>= data(chorley) plot(as.owin(chorley), lwd=3, main="polygon") @ \end{center} A \emph{polygonal region} is a region in space whose boundary is composed of straight line segments. A polygonal region may consist of several unconnected pieces, and each piece may have holes. The boundary of a polygonal region consists of one or more polygons. To draw the boundary of a polygonal region, you may need to lift and drop the pen several times. \setkeys{Gin}{width=0.4\textwidth} \begin{center} <>= data(demopat) plot(as.owin(demopat), col="blue", main="polygonal region") @ \end{center} An object of class \texttt{owin} in \pkg{spatstat} represents a polygonal region. It is a region of space that is delimited by boundaries made of lines. An object \texttt{x} of class \texttt{SpatialPolygons} represents a \textbf{list of polygonal regions}. For example, a single object of class \texttt{SpatialPolygons} could store information about every State in the United States of America (or the United States of Malaysia). Each State would be a separate polygonal region (and it might contain holes such as lakes). There are two things that you might want to do with an object of class \texttt{SpatialPolygons}: \begin{enumerate} \item combine all the polygonal regions together into a single polygonal region, and convert this to a single object of class \texttt{owin}. \begin{quote} For example, you could combine all the States of the USA together and obtain a single object that represents the territory of the USA. To do this, use \verb!as(x, "owin")! or \texttt{as.owin(x)}. The result is a single window (object of class \texttt{"owin"}) in the \pkg{spatstat} package. \end{quote} \item keep the different polygonal regions separate; convert each one of the polygonal regions to an object of class \texttt{owin}. \begin{quote} For example, you could keep the States of the USA separate, and convert each State to an object of class \texttt{owin}. \end{quote} To do this, type the following: <>= regions <- slot(x, "polygons") regions <- lapply(regions, function(x) { SpatialPolygons(list(x)) }) windows <- lapply(regions, as.owin) @ The result is a list of objects of class \texttt{owin}. Often it would make sense to convert this to a tessellation object, by typing <>= te <- tess(tiles=windows) @ \end{enumerate} (The conversion is performed by \texttt{as.owin.SpatialPolygons}, a function in \pkg{maptools}. So the \pkg{maptools} and \pkg{spatstat} packages must be loaded in order for this to work.) {\bf The following is different from what happened in previous versions of \pkg{spatstat}} (prior to version \texttt{1.36-0}.) During the conversion process, the geometry of the polygons will be automatically ``repaired'' if needed. Polygon data from shapefiles often contain geometrical inconsistencies such as self-intersecting boundaries and overlapping pieces. For example, these can arise from small errors in curve-tracing. Geometrical inconsistencies are tolerated in an object of class \texttt{SpatialPolygons} which is a list of lists of polygonal curves. However, they are not tolerated in an object of class \texttt{owin}, because an \texttt{owin} must specify a well-defined region of space. These data inconsistencies must be repaired to prevent technical problems. \pkg{Spatstat} uses polygon-clipping code to automatically convert polygonal lines into valid polygon boundaries. The repair process changes the number of vertices in each polygon, and the number of polygons (if you chose option 1). To disable the repair process, set \texttt{spatstat.options(fixpolygons=FALSE)}. \subsubsection{Objects of class \texttt{SpatialPolygonsDataFrame}} What a mouthful! An object \texttt{x} of class \texttt{SpatialPolygonsDataFrame} represents a list of polygonal regions, with additional variables attached to each region. It includes an object of class \texttt{SpatialPolygons} giving the spatial regions, and a data frame containing the additional variables attached to the regions. The regions are extracted by <>= y <- as(x, "SpatialPolygons") @ and you then proceed as above to convert the curves to \pkg{spatstat} format. The data frame of auxiliary data is extracted by \verb!df <- x@data! or \verb!df <- slot(x, "data")!. For example: <>= cp <- as(columbus, "SpatialPolygons") cregions <- slot(cp, "polygons") cregions <- lapply(cregions, function(x) { SpatialPolygons(list(x)) }) cwindows <- lapply(cregions, as.owin) @ There is currently no facility in \pkg{spatstat} for attaching marks to an \texttt{owin} object directly. However, \pkg{spatstat} supports objects called \textbf{hyperframes}, which are like data frames except that the entries can be any type of object. Thus we can represent the \texttt{columbus} data in \pkg{spatstat} as follows: <>= ch <- hyperframe(window=cwindows) ch <- cbind.hyperframe(ch, columbus@data) @ Then \texttt{ch} is a hyperframe containing a column of \texttt{owin} objects followed by the columns of auxiliary data. \subsubsection{Objects of class \texttt{SpatialGridDataFrame} and \texttt{SpatialPixelsDataFrame}} An object \texttt{x} of class \texttt{SpatialGridDataFrame} represents a pixel image on a rectangular grid. It includes a \texttt{SpatialGrid} object \texttt{slot(x, "grid")} defining the full rectangular grid of pixels, and a data frame \texttt{slot(x, "data")} containing the pixel values (which may include \texttt{NA} values). The command \texttt{as(x, "im")} converts \texttt{x} to a pixel image of class \texttt{"im"}, taking the pixel values from the \emph{first column} of the data frame. If the data frame has multiple columns, these have to be converted to separate pixel images in \pkg{spatstat}. For example <>= y <- as(x, "im") ylist <- lapply(slot(x, "data"), function(z, y) { y[,] <- z; y }, y=y) @ An object \texttt{x} of class \texttt{SpatialPixelsDataFrame} represents a \emph{subset} of a pixel image. To convert this to a \pkg{spatstat} object, it should first be converted to a \texttt{SpatialGridDataFrame} by \texttt{as(x, "SpatialGridDataFrame")}, then handled as described above. \end{document} spatstat/inst/doc/replicated.pdf0000644000176200001440000145564113624161311016464 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4183 /Filter /FlateDecode /N 95 /First 798 >> stream x\ms6~߮7/3qۉصM>mՋ#i_CKu ,KX*f TfI5̊9eeL*mL2)Q$xȔL&Li)琦,Q K\i12dRShe)0RHA_%$`Aip"P.EEsˌ玙 Ģ'YHNj & HsLY&%)ˤs 2 /6ոXfbZ"c ]doʀZRXdRdZq4PCu.aQd=R4*l N OdU ʉ)(' iHA9! ('%E&Ch´PgRʠe`hru^G'SP4<`ìXtrqaS,(0V2 FtD`SA_0h$Fu&Ō7i . M;S(goV`VU>@9#LSA6 i?AoуD@W?s苿3G;OY[ "g[ %R ]}/?翊r2tg౟[vx?/aגdAR͏9/ZA=$|QIxhBLOB=hJ!Ti-Fe\U I/Ң\E?B" ʂUHDHmQmAЧY]dOsU) {{/K"fi#d#UB)|8 Q{eqx-:@.lڿOe?Vrwl1~zv$ 1/KWUz7.~9fd> gތs9o{EJ~x%4{5,I m~:ֵɐϔ|)@)ҧKJi,p[tf;xxO9_M"QnN RtKL%˃(ewqޜan ?,?~| 4OM.:D5^vݖB#MN Djq?scZ-D /ej!ZiSݔޗ#uI'A4wWx*g+PHab$fPBNcwEe )e'F  AC%}~&h4>Y!(.5` WR `ȵBo,% d~-SL.l.W(-냃`;eaYoe$$>H_%am|ȠO TXT) V0DK9_x 6`YiXc=0t\,&/>>1O)??s~7^|1oe~F* -rsoaawZ4H1lF4G( gu -$OBCmSJUuҺӋw*+]&(i)CQR6]#xb, +!O޹ QJe^| C3DB PW0ohPϟaD|w!Pԫ +[KVC=UZ/D羛t4~8^ Iψ J1#\o)t]=__ǽ1kޛ уa>R7$${>Z|Dty~|.pSq?Uq7#_ 5]<< 02U^+݁ׄwFY? Fh% 2mKtE%sho_գ.mߖXupq @1h#K47{LG#_G |>_atA z 6= ( }Cͳ7pGVzzRh`ҫ6y/X/`Sօ_*&7HaUw4JP[U6`0;YRu+i:oP [$A=xrY-`7OHխQ]ZA okMߞ07ȅ\p‹oL2ߥF)QZKjHuוӮc|9MPBP2X^YօdmUwݎLL=+=mCIM4*= J$ﱏPEcلMz)'=p? bMUcIH:|9}?9dV>5 _Ň _#4Ցpm2)9n"L-v=Ge x~(clXI\ǐ/)hϱc,;i#Uyy;E24/5RQ.*ڦIgFJǶf) M>#doZbkelI6q<] jx+ 7pҗG?_|@'u}TK_TZ%22N!#R !IԦM*lLBءs!2t¸Uf__C> C4GuۆL] DM (y '/=޿B z|I/); "Y`Ҹ,V ]E˘u)jdˉl r]Q ƅ»᯹65fKn-`#&"t>v!#ߊ5F_^<ӏQ2OGʼwEюckkb/hbתB.5l b/V1hnD*p:6id x 5,oC4cA+xh@@[B{]*~-OEvozP/%9 vcw@ =9}(mk"%ݠJMKVr|yk51:׶F:HS@J6տ@"Bqz~lCT3UP4u94nIßtk6y{9Oe>m{s lgκ]vKB!3hl2ut[Z;~nla Kt&fs3wk?FSmi}͔4r`D4Fn0[rybG}W5ɪm&;N}amj8g7 Pu7p15/¯Oo~ J6f.~xcmڦk9zoWj_̜G䬁X)OTRc:3ugՋ< ?'&oU^06WQKӼnʷv Ef]kpQ}͉/hG'U~:{.MVx_7QZ`_o%|jiї]8SY U]G75H[K:Pv? _zװB}YvK06D=]hv)"#:P7cr޳r=Fyᷣ02⛖w6&TMoM"X8CBq#/t8t^`X؉MwC>!]n=9PǬendstream endobj 97 0 obj << /Subtype /XML /Type /Metadata /Length 1329 >> stream 2020-02-22T16:14:00+08:00 2020-02-22T16:14:00+08:00 TeX Untitled endstream endobj 98 0 obj << /Type /ObjStm /Length 1995 /Filter /FlateDecode /N 94 /First 829 >> stream xZmo6_U; 7ْ5]lm\WM58vf)X_KrʎcR&玼G{bc >8s )IiE,ӞYMuϬA?3Gy#PɡV+rWc*4Dm5L/@`"V/0UNsPW4LxGO,qFfK*x&T3)CO&%% 4::H+ < }ed0 阊- \̔!0]e % 0TޣRLKBPa# 6Ǝ'!Gp6 dIjŌ"u`G0Qwˌ|v(hjq):Č@zɬ?n Jr b| i@IkbE,-0`dsͅ[q(X8(vNƓafNl=`Γ^iOpP!fp(zbWEScǼ&tZ07 L 7O0b ]NGUt+˲"i,y3x4/ڜ'e>Yc *%&v5K2FҎ/i`Cj9LrȎ^>;ao](sVoS]R+l~i8DpWUiރnj=@ki返;!);jTD8o"{5Do~yg2Ch+f9 *D %X~!M]C5BrY|P!"*QkR׷02Xi-F>b߬8doVʌo#=|C!{ x1[~HW!c,^ M@/+OUt|57Jh|ڔty>ӣIJ|,_=l%C)pzCbqĤuVa=fIً'aP8\dڲ2 ,f&_.׶3 WKTvٌ$y2)I>UUH-ItC(ΓyQZQ({߱M~d>xD׸^24F'<^LtW4ˀaP2yr I #FrIw5p]cp1jtl6:"d `[v}:-:Ε)N3z"W4u&ǥ>al*%ufvdM% >^6eisQ 8{5Zo v[`FJ-mFΩUu@xvиOr#Two( +PjPWKNN$P :vIPiL~㣟'ǧ`<~b䵿yF^"#CAu1ЋM_=?|]Bu=>UnmÉrkݒJ_Ƙ"{7>Y-nj:KQ5D$ˊf6:OW=} ; w8@/d,[ɅkV^T5 Fal?Vjգi5.֭`E]]D66|BHn}Pc5͛#\}3n"V ͫuj>nrR{`p]]65tm܎[^0Ə ak٘  B `+BIX^gC.i݅l+zǍ^=NMqwۊD<%glq׬m] MCciJICQ‚1ˍo]+f9pz޵ޗ=tb>s{PN&0 [}ahZdO>y+-?&&mX v_>%dbV; B ׬e$zkSayeendstream endobj 193 0 obj << /Filter /FlateDecode /Length 3057 >> stream x[Ms#av7I9'aU(R"h̠g+JdQrJfnuuʌ2}vsȬ.?uvh)őL-`0zL.W'&/ QĖW>1FJD)Z>C;>U ޷mR6+=B-Zl 1b{Uoizj>7"^c`9[V"Rm}teQ-8;rz~kmȯXBBMY= 6P Od&k>Լl{)4XTGR 5JD ~VPf{QmCŨi0dP1>7uS6} 1 xG>*C..Zg3 ';x# *` enrN [y\|3KVfAKq===Owv;=nyMy=v?=\hP?}Gʋ? Xș1L " q2$us9RMq}L.r>I ˔Di_Vmg*@ӰUÕn,,|m>[$ɚϦMRJn{ܫ`yJPs"[oF"sP'0ۆ4[JIۙgf U+cK9(Zs+Q+m]B xc˅Ogp 83B5~J`J'kEW-ׄG͕E 5hNhsC[Pt8?5oiU[<.*{!y/i&۔ۺc! BUa*Meho oFj Z +N[` _qqz'R XTx]f8gyOR&Y?^/9~A,, k/g }g4 b6?aV  @M]Nضk9xb D> /lxm-9d"ik s0Cݷ ry:`BU(PCxuH~|r@< -\]#gJx}[kkTAFԐ-,(>LQ1.FxC%맳/hdsA tdRc˂.7D.#H#m9u.)~KHn tL@zYFrq&,C8Wܶm돛x+d%ۛ2#wFkԄiu~K8E< R[.,2^Ė;.ődod+mSfj3'Tяřfubޡ}o:|]@6KI݉[) =(*%FZ_+^@0 SwD EfkG|p*B UY2ѿT|vq-1mN[-< ^M 죣< ٵ-8xkϯPStx\RZYƔL9f0ѯB;L8t[} rYA±K>{fga&qLhf8rCeuA}EKqWʴ q!=|G?^Гt7@'.iqoQ8bSc!a_cSc2y0n[SZ>8Vk=ߴm2.e' FVLL킢;=51 26ľyAL?PEÕA=2$[ˁ۠XtXy|xs[AVL:6sx,zF-S =O۷(͹L>ZR:{P@ k] 8:.?/@@"{\XS9 %pϰ{8w&G1O/9f^D|qƷ?@=z$tyOpOhI.٠t?k'<߿H d?:Cc,| ¦Mڒq潌@A\1[=!invo/%$Vm&BC%ȽmXaj#H"İ& V8tlsv̢`n;tTSQE]d8I̔ CZ{[&>/~;bXܭ}Mث2ob9j? ݁Lf3_) Ѻ^mx9khrx]? Jendstream endobj 194 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1856 >> stream x}U{PT7J8z鈴E>ƒ*c >xh/e",`awY}}.lXXu0+(XL4Lhx.=̴W;M?ߜ3s|}C1I̬UOVI~a H8o*ujW"bΆ_.yA|B/ $KHHJm*tDY$u͚U$WʵE2i$SSȕR)ddErQNө._듥ʲd0-iD_SHerm@*IJr`q+?ʠUxDx|EQ]`j>dzoUb<:NCM6^Jnl;\#rFaoq(rⷿѸylWɮ+犈OGbטd"wg)"y8 nݐv6#\d4?7&ྦྷPPm=}y97+~S(j"Bj f&mT}uB Ѽo{L}B<x7-plh~Y?0xK‚)n L!{n8lk5e5!Ow_q赃5bLϲ21)ezr $>)BLv/)vNK&k{,5A~Z'2Hi+Rn Px`̦2_'r7%3 'Cy^v/ixpظcUj}9[¸tX:-X&7a̢ͦ|87`Vf]0x/z=5^et4l:BZ* ]B4{6 PcXh p.]l*P 3w΃+FIӢ,M[P :c @Vժ/4N^<9q6(sWczTP>OO݉Χ$LׂvN_g@iTE"җ[Q6#5aW603<[ƧP+ xGV߻;1Q:UV|SN.ȃHLſkpz ~҆nCdp)L!zi?4 k?\S0 `:X0mj:km+2Ab]bA2Wgzk8|b)_;<-ĉ-ie% -^/\Ym 6@ !9'ca\D># ZO3!c`ԕ P{ m)~8Onwx1sjSN-~'Li~ohF5QYX*97>> F0endstream endobj 195 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1442 >> stream xMyPgwY\V AQ7V,\} Tp A‘%$`P A3UGvxՑo;݈g}w{~߻88p>3q~ ?k%&rZ,ߤT6:F/JB,+T0VW&.İ F, B0,S`RĦaӱsbx(2'R\繞J"5:>h30?/a{t[tKWVDPnp*s(N`ikttd@,-A^?AӐǢsBb۹KfDf=YMXeoƫ|>}~pq  [4ϼơzIY$Ă(aQbuB;[֕Q|=w !JR'F([b*O p]53}3N0qk,>NR]eX(;pnpp@b4~" 959̒nw 6hJI,7rN=P+R5:P;8}ka Xko[N(7-uP vPDa&7b;Qk~|uƖ_TKaRQ D ɹ1`jC|Wn]tG᎗wΌdt'o_.*~UjY|5pg|{QzhsjȲ6ck,pE ɷNq=/4Lvըne˖RȠrkv4 :iǼY6X::p]|DHG[^DPG\je"QSš&k{Þ2z(oVl :aL&ӃA/ɦ A=6vd Pt(NR4FslUj1!`,L @R9pZ`KOp/ĊSZwp] fHo@]^YRi^i K 4AfZgo&d2Oנ|T$o6C>m574M1@!^R!N(Bڑёa$AzSvl< Zfr'q(nwܗ˃#C:>#J7nc235G+I$; endstream endobj 196 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1738 >> stream xuT}Pevx&QFRAmIj"!-jW8>Nxo!x`h4d1M㏮`of><I$IS=!0q8;ʌXA,Nxc#vlC$Ǣҥx1R.!(J5Z /_X۸1~b݋/nTlVt9JAJt(RlŪz+kזQKhtybW+ tUJWڧHzERR͹fQkK*"UOo42a ~BM$~Kl%';bk"H!Rp"H3y/lK$<#ˆhN^ҁe_I0,j~qF/l0>~TJ?<6{i< ep X~]qc)3_0`K#pL./FIDLiJcog8 Pl.g.ҙU"]o;+]ᇳMAQ͵/w?NA #%KD\s1?tu2:+Mέ7%r)C}y+Sv>fno ~I:lb{1ůw,Km˩Mm׹48CYElטcRß!Zh) q& ]¤vCQ<5 a0/@.Cjqf 0rq!CςNx%WQW( ۽>yR ꁩZiԶڭVO7goۑь 7pl>/u$ p xNHǚ"g57a`aI.>yib"(Jc(1] UPm=nhYv %wֺ͗";nնC8lM#Q~Wߟ/SiO!7+>ucz y$[ZR.HLa{P_M,[vTRI|h(BaQY1`3WkUU8ފ2z?Wu%լu 9> stream xY XWPiK@M5ƘQPU@wDdO7ƮֲRn\IDĘSxy6I&7F=s"cz`d2`;ӏS2iX59ƎDט^㤂QW}{6 B%s#ÓEzٌ:f̙3&Li3+w{20/06zEیGFFNv=gDH0gP/O0^6 ?(<+fYWH0\!14lqK<.Zm*g~;7Ml7eӦ9㭙owbs`F2+Q*f43Y2k8ƅϬc\{f"YLb60 Ɍ1)bf*.81˘Ɯ d,%3%cX3C 3gz0h L,l|=.yn=1`V0WT4ث׽{_n};?bmѢʕʽ jÃ\n9ֲUcH!$yG8Fɚ:ɥi||n@ؓMJ$.B7TA}Gvrz gŊdD0;hF gɷ ReFF T Vēˎjڹ=3}XkR[1W $ymg`Ay K2SY2@MqhhgpaR)aԇ}r`֦9W"GʹtQL}g¨%O}OF䨦Xu;|yZOee 9JN19U:g/E2֧on%-9omYK@.?b T9k-Dy >['4[bT>)vqƔOV:ټ[H-L]BJ(H \zX *r)Mgg]NdC͆i`&IIT%V۵St::x6<]eqAg([La}JyG#Z(HϾ`ׅB VruMNArkΉ ?:?agת lzЂV"kF`iC;`B;+&=!=7k3̷zF4R"8kiEHo;`OqEQ>AjzWʡ{d31ƖCUǎDÙ|>seB J$L ʈAl*9~";׭u-isUf تqTݕaRy=9Nl7K$oc=HYF hR"8h@+%` hw!{J(+.o.(EGNJ؝B'3c8dEEI~>ۻ`ю8Kb vS)}{ڢFGedAz2AyKVmMת_,fUgs]cۯsSHdϙ)ۓv͵Iߒ*e=2YCu+g{<6xIKEڽ-&Pc J7Nb`l1OtTDmޣE, Ӻ2 u{t:ae(6o[wV^Zu\Mho`@q{'zJ~q; |Nm$r/~pȢ,G#eh6.UvJG ж`k4*矽R3Z$Y/p}BLḨp.PRW;O186T$Qby<.9X-+}WlƴDo_Lfd0" f!CnЈ 4&3L] VzWm["Xe{uۇ1# É7q,%8^VUᕞ$XCI91w8hPUs<4К*sϩI%J;KrPU'Tی.Ӧ"ɐtl(HѦLQElt[BW סIUMOCXqMEcc~%`/0KuES ;hJCH4Qdي6d$Fb 8݅Ξ{|r Waӥk((s`8!|-eSW >c/i%?O)yV/=EV$㳜 f IQŹEU.n::{a܉%ԇM誩Cx i:(hsS=2iy2q aRl2o1ɢ[BW,c6:oVWxfX'm'W:{ROud_!OM&B ?bҘMj!:߽x>Mި5<<)`}RG ~qp\C5:ܾpHҕŵ+`ӡߒ_ 7}of6w}YQfs-Lnhkx #"qߺڌc) 7FVlI>Q8L̊f=:a6%T)^t1y#5<A6%q{{[ ܎ސB>$?55*w*Ru:WrJx u]5L~[O!"VKDo>a܉F;U؛W[d 'D9>|=!=BW3RQiH afmeA?-n~=j)dt֜յpɺ>K 顠}pix1!^ؼeUے:F,TjPPN+ʛJ"ƖSsG%p\;/uĒ.c0de mmlx82=Q)o߾cw7ugeTɩLM'hE!pD%[YYTXX .15{%!FjMkcX7Jmd]31 M XbХNNt3)c 7zRTvݥ{KexrmIÒ|k(u7 _OiωSBcvyK2/ѹ#'3#L*rCΗ@np9Σ Hub_8umI{Fw;<{mXNT3e줽V!6jnAݫ"|!;JMZ6eزƒŦ eyF-al"qL7Yf k qGIJW~Ɵo{ܭ>VTZ^}y{B*H[=NG,GAS@kh{0DɁB;8^8bw wDdI|u % b%ڄG8ӴGYLGvC26EЊ=Cx7hVۭ1^s`pN"vANi2%4ɥ>JlIPPYF}?74v]7o/84Wu SZ\mp; (n[&1^úVSoi`CCYEe+02s:aV;MBSWT͘JN!>\APCҾpp8x;g;骧b)ʮ1Uz恁*b+="] e&*r?+(H7eށlŚ%Hd( sYbε uM\O3]3mB9 E}-!ucU hJD0k–N3mq?bqКv+ qa]EAgw܌I;9}ts]Q8݈L fCksf<χ`27m#D(?h!ra==Ct4o0N94I1m2i@̪?>1|˅RtUvXSBwCWNnGzIl_^{q@*܍vc&K(5 > stream xytä́b22B-$B ƽބm"[ґދ$[.6.@h/!!;:yl}y[[兤3>{PݻQ@4IXJޖh53 ?z W݃l}k_J(% n?}3g5B-S[ Bj"D-&SK)Rj*F-S+J]*>T_j.Տzb?eKQ *)b=ՃE'Nߺo tVVwDz-e0o|FEמTϽ"z zozO}׳~Ciy(.}s؛Y˷6yَm0b 8f= mp_: i{,0뷽5԰j8\?wsvx_ D0&~NP6WȿݶMWfFC4()ؽ[ۨM1*Ɵ.Sԃ Z5ý͡ :G@qTRA0Ok/ T-J(zfjlsXd[aȚoo"5ؠnzb=$Fu#J 2/A.?{&a-c!T{`-խy_CdVxŬ-M$+ d$wɧ("]<ȡu7,2 %٧-F-i/?{+epa {og^< |DZ @Z/^q05A뎐C,zsKf@9YpB=4±}`VTop:K7c|$@w _lçj1JwU0Mdϟn.R<qrh NoLϙho<Wئv՘!KVx#^{.A\pEN9m^RS8_]q-t< D3G@f *. ěv#m)*=3}/ϓxdf\8䀺{[$ N&CzGKۥ(!>S&$*< Ts:JD&Je,ɏQjJԡP?ϰ&\`!PэiRW;LotjZd->cqbR!/&#-8 vx%'cW슦)hDsnV>EzVfD#t(Ȅ ~凱h ECER׸}_t;&Zk1<*:g&dA!?g5׸rG1'|>:VGUS2vU'BԊ *B24iNѰ\+QR!xv [͛zC_wr%x4=&88쫪Fv۠B$5ǯͽU#EYU7)do7=г=S=6x95g@U86Ύy֠ HHTʆ,[ p5vPBZ `pWX&}hF|ȇ$0 Qw4}F6 _'ּ]A X繗V=x~\::*1laS~9,) K उ^e"LmG`i1q2Gѽ4J׃?-%9^*O# -Ѹh+03:. *UnSK*rd-{t#ng潧g8sp eGcowVw;hfiŒAgh1u-QxZn)ݤDNHHJp-_h4b]qof kuS7#-`ydGIǫ ų9T^ao. wk]ilgV'a{B'8lМޯ7=F%䔻3nP?Jb&y*u$yբBЩ Ue "ҿD1VOmժL` T+ P/O_c}݁=g!n6\tQ:F5E!;XT#2hAS}̄a`nϣaޏ9ID?ާ-B.>5*BB]yW톏oaz | J"xfg~|QNifYq|G͕suJpOI\L/06 =Mv;3# Uk`%KHV0ia#GiاCaÂͣ~*^\0>hs1hDgSj/ɠ|dS4hΚ I^ &HR&#R7%(63ɕ4Gѯћ0;'4jNlla'"#9+KT%%$rpag1 :Du; Ϋvz2 ʺ-\j _l1U5dJMmlWEn ,~Ӵx}~)SA6LV9n͓Θ2ݣKrS_ɈT&J҇jel?#pN ܚczUN(\ŵg5ڣgQ72<[r{$&Ea?ny9s1yk˵l-K 0MWLE.((*"dW?;ݫê˫"YGsFemfHdih X( p`b;ľ͹-p߆N0VPK]J } 4iG*F!-/_7J-!jM&>ɵ!- .rgj$J}J'ZUqH(E*MJiNd9ҢћEs"=XxADl<C-HӿCiJ3o~#aDk9_4LnO tAڪ>UAli6a9!m)G]LNQd7U;׷ !RmMf *ϫ;O^IqJBt^Be;l:^#/X`/$m~n(mfd"A^@|'`98|-bss^Nu¡ҿH`ɫ0!$%'4@_E8,J*/D\D8OTi w KYqXsmKiL%M/ i)y^ȱVMR̴|4OEgt|im "K7ዧm9oJ|+TT_sm,X7TE|=F#o}WdWBb!S,7S<6$@=hJ["?mhw4XdU'D*$.YC$5|OEY=UJ;K!ZgJ#IN'8K|B &71[~G(Yi'N-HrLIJP)Ϗ5fy̾-N3OX<$@֏=ȣCYGL+Ik$h&;,r;cȣ89&;gVRDtnNI/)DHae u-7wlՄ";m,9Dxsh#&- EzJ ID█2IZ%4I|L6#-j8v Z-+K+2Vyxl#SZBYDw}c}M0uAYx~jҦa]|CBu9Dqij50MK|$>P$ ІeoyDy<^٢[l9Mdk#-.c EO;.~$?D"~uY Y'rE)b+zr8-/:7^aXaJ`[aNUۈH= @_,ᤀN h?:VVpog8İ}HgmRwC1-.8U>!P+cO>1q{sGiZP#/N[7rFV^߳G,kbFȃngNٝ5g>ؑwa/}<a+DX? ݶffw0/V(HX|k9#O^%Ʌss wQ;'Shn);Ѱ D5a0A}C=uDn/vK[ۘwY  ?O|O2]zyͽ?jv!Kes~7SЅt \:WipE?pN']R62o f = ]9ffoZ|SS QC={~gXpZ4+j=\\OUE4 |aUm[<\ڞTn91M h hDmQoV1k8#~{m:F9}R:D/:>` MHIV)$G;-rWH`gD;}+w~kNBn?|}jomwaթ+8sBsiDlMI01wjNPp.€+J76x-["9wßXD5MmmMtH'R( lA/1b:~)0TvxkW:to B<I?85+ŸCk4o~7I&-&- {N>$/oхG 6_.5>CF4GKu:}=0'ݯQZh2˙,l!'E$KK'JƉ +y^>dp_?]qhX Ҳp]zGVPi'#OMW%~ <'9^xm:*&OKyxOqGP<$=xZiObhwh:VMB~i>"-BA(XZ: c|BN{&x3!rΖGTIAAi.]E9ު"Bt! *KEz_|Zp!@f&1ZeJ؁?klW}BsSʃ +-++uj}&,]ғzٰf*4+Z,hD.'J4Sލ&:ܓB~?m"fD>>O}}v[왚6Kf,f6b9/ωI_Ȋ~zCUTNeBLTA"3;nk;=[gPlIMvԿ[)kc:Wf`7H!4x^o> stream xYXT׶>bc<jj,5Ʈ (VcAQކ lz3+V,cI덉(FKb޷f>0+7%%`ܙ}=t z>47/V;߲斻=d=A}hgu@k}_,9놼7ACЇs9_gN mPW(Gߴ'<Š)XWܧ]cDr"jT½K+Q}tbyTR%]R s2 PbSXgEQkT:+0K6:\F&F|i Ѕd bZz}'Obc\~xqfs/?A!!LڂR+'80|ł9"wP?! T/#Hsuj^Wըa\bDȧ Dr1{י˃|_Aމ@? B T s8VPv%-}omwu*p> ] ;VN ZR+:*Xpog M`Og w䇫Woܼb?qü|7B&XI:N^9hY0#FcSYnPl^Ch|/I?ƙdTSV֊E"eQOhbL|"#eI=^ / GQt& ܆~1ރ.I0d #x};fN5]knn;V40N:F9To:p''#Ye23v7v+MURf|z8Ӫ!ݥ!g#AEWu>ݑ[oHIfݍTH>" IC͵'G>cծĔ>q~IVT)$.&4"|y#A{ٔ ې.F,!KvZـqc'g&mDվ\zFz*f*C2yPײ7=ow}2%i`ZQ-6뷬!P¯th`([OIsTy4@]y(XtЮKR0* N|t]-]Gi9ڪ)iL Ԍ{凕|ԌҊ˙O>ZmmBFjbj;%&#-oɍw@ |4E9)NIQ"^z /ޅ}:v/9ip3u&Xna(1|}3HXO1  k߳SG}a^&p_\~zѸ?f)4?yC6ܯːwkl ̻?Rk홴PWq 1q4b#rff$ƒ Tk&gnLvN`ѥv$u )%h=ZѡGi,;BmTtbY&l[WNh_ǠdAɲ2_GJFLUh?,4eG:/붘tF Z_"C6G8^  )eJTjJn Gx 4Вu.KbgЋ0ًeAV?A ص.N;|ԙ֭޲ʼnΦ% mua&2 %ӿ">1%u! k7<6ye3~Ѽ[K*e/f6c6'[߀^E3uWbmh|mMkIb͞QoIsz*/t<xXu@J0b2M#0}<k71`(*5"HR*bj%0߹0b/X\O_ߊAu`.o&t[.+lڕN&mF ٛPfVfH̛L3Dx92,hovǤp6 W2NMw;:KքM񏴭:SpcLQhϟ?x lx3V;t>}aFJj:aXV9B亐[ή)$y`c{I:dSPNsw9j-OBoK$`(1^ƯKN:yh@2Dv؁0{+%!]sADbn-rǯ1{^Gt`tT+Tbg0#૷IGFu\.=@BNG FHݳ۷GG7}/"H$8\鼾 IJ\;E$#+M,*;0>k(#pL뤎^aQ;$k$ 7;ˍ;PqKHDJFic՝:ďñ?9r+k,F}q3﬍.T'fqEG5E/7OYa#oJ?ܶ u !?m[{an/KK+ZfsY;jtaK?OэM'DU;N'[Q+IDG(4A( / /k Q.QႨHuTj+JEu娴0(S-V>FNQx7p)K1@5>}@T0v4/% ~ n[lXw%~[!Ur3>֋eP$o,Mj'<# ?\gY:yqpO.39/Bl!͗`&!ǯ@5h*H$? REuR1qI_0[ D| @Oi=8œ1`hGBLdR#򓒃 ᪬"ڗ.ӟ̟fzaǶ4BVtXwjBx>^(gSII^0h/KSSQ!G?P@A!K`u!Hf:G)A;H_=iÖW5GZ97`G?*:/xy<çw/pq#;îU=-_bfF݃cS$7uߔpM;Botږ:%"-UҔ}з̼$r:6w?=h[JpqzFF&i+C2EW"}XH ux Kd {z *9r靈}q֛jb3=OU(%XqH܂ߙ_0{;_DiyҶn/b%KG<3Z;oZ,pGH3GLFLm`"-isvkC=*Iou a: ;$N(%FHMe~7q\îz#Y8*="EQ`&`7slu$:~|\}Z5AV l^VA6*c*Bcwb&DơcW4a2i>W߿.'8x&)yb;D& 8ԋt'yL`u ,*O 6}Q[5Nj;ΚJTgM&K\`ĺ[Z#KPEgaix-`2Uu[>Џ4B2|CmyMƊϭ#j=vԄlĨ JJrSxYǻ `UEMGu9aju税8ݞ37.5eeN2G6ЈxD+12*>2)l~LP!_X{7ڝS<3:o>u Rwf8q>s$F-0Nwk:I- iS'|0KRM# @S)<*)fWh"ѡƗgvg[}\d3RRyG,7,ϢBgч.endstream endobj 200 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5066 >> stream xXXTg־+"\w 65AcŊbE2C&E"z&"el챬u71Fn4Րsc d}|;}32Ƥ#X;M~.{#ɶ&A#~ ļ`.sÇf M`a?te2= v<Bb`Sӣ&v*{p?v3}ldyiwdߋ-WN`6 `@T;@^;|QL0hfeer0^u9Cz9xKAƛ+$7Ul6k>1iG/m׎Hma-KuY c.Jqؔd9 &CMih%`N'uʲpvx8sp,:ڢTK(bj#c&_i{vN/=,I״n/28\^߱WwVY\6wa8s/cu̩ʥx HH'ϭۖEGɈ4W+BȸsBTlx2OلjX q'ay&ƚv@Zawt͛ }1 ƹ+* E-N?W+ȬN|@5pW+ϠW6w>>]QC6xgXb9y掅,1AXU8R3ba>K#W'DjYg\+!#2QqBrJLBRNpdMge\NDф@T٬3EOHUG$96{luMIIIЦ#k%t78o~9 #,= ,s(2KvRGղ 8 јh@Ji%r$KW:bd */N8#B1p5Vd:ȧ"_45(w{EQ eqO2 #$0-.E[Zç?=2/Mq)JIIԦH8Oj A44ю=8ƢC}~Iі!۽C2DC^Z !'(kkwKhS(7Q6d EO@lV$$ݽt 9k'.qIj5ZeJJT$r#yV/.%ÿcqZ rZ{4F:$A<GAB9ԋfhaPmp*E.ek fkSϥX}lX>{Vw*XQ{Gzl&h7/6 .5L|Cmg免ѿ7f"#_~N+XD:UD]ZY%}94Bsam:\WwgrqzNRݴ.o*>MSqN8^T ڔDee!th-;25/+=ZfES7Ewh-(ǿICIAVwe!Nr_+k/8I;]=lKc\VUm?&ׁM̬^UsK]4A5y"_IF͟ 9:F/DƅCZvѣiT"w'0z.y&>ۺPsrǏ8o^#3W0|W|dؒ)J6jh.m8ٽZ:"yAr@sW*<^KIJI(.$Cťj*[&ZaNj}]{<{5U0a$2)-gqGτO?m~^5^ 5YD.+'9TPA Hm]yw1C\۔܆|pŋ[ɐR!-=Y'hcՖSn9o-ۚOt8M=*ĩB>"&3 /vũYBNNV^Emi*h%F4{BcΡ17x׺z{z746diWEP2cڋ6WJ J (k}-4eߴ I:f%M,e>Q} R׽)w{ 7Y!6F D⋖FyVns5w<8JLl]!D& $a 1Rl!(/4"0nµ3ʠDRzN`$&roh %Z4csb*Ǘ,_oa "U+B!,#JۤNM!}hв(;EzS+0");OQ9(xΡ-ʥWLh,+=S.4lK/Y|\TJKBc gIGF7G}4ɾ^ S>> tք YvYi*}WSOy/LdJMWKU$&?|dtk>4ǿ^G/s S%(|dk Ms٬kΉ+"m8f /)уS!x_ d%Y"TKK]+|s^ӭƜy$P˩|>͛>)U+F9> stream x\]s}= |\v[|۴Lb뭓Feu$щ{pXR*%e!Z_误H,Ώ~9sr 6(ora]^x[?M\v)Z\ۅy":UsN(!R➈k6>լ?F=JFSWhdmY6F#)YhR7 1:v5Fè?ȸjgǧxlf'%5%[+[kʷ6%:-+)#b}B%M0HtESRh -5=6ץ9]=@0]Hk,WsQV,-5ц皳]ZmH@(b/|#-f&'BZ_fPQK_ۣܝu"h ʨ21?ɴ"5fncl~㋻&{HQk =z 8Mod.7-/uŒh`yLguú}\ H cbCAߛH#H!q |0J68 BL>4 0$sۀZ/A>Sj-6[Y,,j2A1&"C6Qj^7Fk v̆[ aމ % ZB"iP+W$`6{/ȌRf(QPHr1Ϥ<K)Mv mg2&AaBi%-fj![h(Xt]T)(C 8P*0"A LpuMg%:vȳlY1HBrM5pĂӦPٞhts~N5q[X.8~]"=ɓ(-/*K)Rhom,rPwZäm: o`Dj|ۻ *kgi褏 qy{thַ1ؘSd"MdvN y+mCtp=eD{@*mوZԾ6dSM|O!ɧG&Tu!e:u|1tAߘo:.gd X^luj.œA%pԋ_3C H˭L)K[Bo<#!t 49.jCvfOȋJ-&(Sxrך}yt@R*%JڙJ0Lq֙*iwU9uyʙ "( {9ܧځPӱ g?` vc9d8Py>1l|C$rRښdw.,aC CcAv!_M9#@2[33cR5kS&mtN (} UK7Wlsq:ߜDf4嗁 SрjFܥX$ղSe37J=Һ|VY<j ܎ f#f裴}6Ŭ-*x[KCp\zCsz6o|y;u$yYZ,]3cv߭03Rt@>JFi}$٤ VSIKRȠFaȲCuZ͓=Oa{E.<}#CrymL a}Vu2GGs^!\dQ}dN^a}cij5ěe QL."p0x4З0Hmp{Z6p݊i}8!w:r˂TЄ)/8J\9ʵvSMr'X2TW# V88*8+·7}1I80" mS0"uG^n.xopg]N>4bf;aNف'٭g7p whł㷉簥`(x +| #/*i<֮G?V=ϯC!fG *S~2O2r?%pmp2q96C(%\TEqxW%qãp\Œ^ĪSG|Nq/:Ec7( 7S0w]H; /gl`«|t]l݌, qk9%Ӆ3- KY> stream xur0 _yrq/ xQ~s/W0p>ڋKs!w\:/{#cL%ٹy_&APb]]5lrco.ƟA8M"$$IxJQgAK*/ bUт 4٧]_þY|v)iWԹo|w/I| nSpJX&$-[`׃k+R-r`k#-yq%ŀPpM̮dWCDvK?s0 Xa1B$MVd*poƪ&!wE'6C~oy?` X"-9 oܠ$s&Przw7Iފ6B%9 a@0cbX HΟ`=11XP!W6t6IpDQ-9X_r*ͅp*|'THPl@!AV  V Ʀjk2@ĐI1E(mZHuPJx *3 Ԏ7ZAqʂe10R ? SR\(JEjx ږUzaaÚ-~H ,K,M;.[\0 C(Z3#(&끡 ^ ie) 7au)T`7= ]wfviTz(r'RHo_ #x:9Kd hJ iߦyHcQ f o>I;7.(BYkUļ%<o{2~3e^M+du5{/@$=:KȫMEَќs[̈Ms@':[|/*+V5͓ OUw+t .}QM 8*]UgqpƲT '@r*ngWsڄwwJJ` HT*eJ!e2'e_p NiZz-Г`BR# "}4J^%4`50,Ȇg)(HޔfƷtFAvW5b'_"b>)=$=WgW'c; :vג"AQ}Hم3-,HͨbU~dO@'4"Qą-gbؚJEJIs*ULE2g~B2+x'N]\ʠ݄U¡ eCղ Sbc1`б ^6D3`[*[OMQB>V]+r(>1>p׾{[b]ϑ~50TLuvnȓC2fr)z< Rȫ= &SnymG̢ "h&<:ú'Cw"E%65ס$B䷼IMYZ,w"aA"4fHOJ( +a]rxf 1LډjJ1Ghy-LtҮF`K~VUv&YKPad""*DVAB]"O^>E%&deY(1fKIKv~?~*\:d*H3̓:t5iTH r> xvMVs71-3MF>L̴'؆_kHu$v@x٥,;nDÆ #sUCnSm+tRĢb=ۥƨ('(zQ{zt]?oU;jMc$M H%Z uȟzb6|VTE*2}(R=l9c5O,֋ų2ɕ#Қm;W,~ejf S]IJ:@膏J.*#%E94$^%jxGi |~<s, .R+d-Y7ԃkl6[lTm'`G*yVo*Eum%sxM8i}>av P䣥Q~.R9,djjC@F<c5N' fpS+oK)xꝓ2xX(qR@♌X6F:V%.\_6"@3W047ǾG%>{8EwFY kYKJ'۲jVbu1"SLqZSl\Iu4 Rwul1 1Z%0a4#m2AtM6~THG~դd [1J\ anjX*6?>%bD$Myr*&oQ`1W.p=f&^kg "7;zorאT aGlEVM]Rgrq!Q?P_MiWVC֫ü6tv`_>2N&[є#H =(\+b &إC෸1xhլ蔦QIFp>[^utPε2+æ9І%JEp? 6obnjtB]0vgE6& nUYmw&+P 7ֻ 9ʊW$ 8"Y;Kt>kl3q#e#sKϹ,5k¼͸>r*@tژpy[}I̽v gIJf€ k1t6vɢC=)!xR)2Z׎UڡVS OU GD3%FEsEh\mg{u3tDy`Շ]|͍`"@og([,Mad-=jW[8h~[]^|Fs|~l4Hx(䬦'lh+YjL 7 䰑@sx(D1cm DE8-ޛq9%Ց%w .R {)}=`C$#.GUKh` rvDtFNI]k_y-lVעvYh4!o"@>:'WG/Ǘi@Nׂ nqdIɈ{ԋ^.XGYDc$< ЕVVC?gՂ`xwmmchU!,j"n3X3x`BQMW {wKr`jSQcy1Tݪ/RewD#\ .Af|Ǹt 9y=ԙy#gpqdOeWƫA;d ؿKfk gnb3|7?"J~7*㗇/c˕z|xRū8%Y\fKM #}BxW~NMjk2VoiѫA淢a;n^h%5{gp3/~M8ČJY]up mwڜ}_.ʡD9/ ؁J 'bl~+,bcC5 \.`Ik gVٸyκi`)iqׄ ! D쐿HPm7 .Q׳^.áęVᗹ,%wG,eF9wɣs8*Ca`׻GyQ13x#E+&Tm@ȹ5Op$ͪ).v*a`G[2bx˶6m"ES,.h+('nGKcuc(͚ŒlSOs9F)]&B&A)GqTĦ ɢ]9DHb|dendstream endobj 203 0 obj << /Filter /FlateDecode /Length 159 >> stream x313R0P0U0S01C.=Cɹ\ &`A RN\ %E\@i.}0`ȥ 43KM V8qy(-> stream x]O10 @ ]ZUm?eB:gׁm_Xց$i, x> stream xX XSWھ1K#v..kcG֭;J-!, |IξK U[-vj短. huZۙy{{!Ǝ!8θUv_bfyd +^9+E2%@ąIcBoBQdfjW% !sb,^y!O/Z4dEJ$1&J)*#!.%*=5&1.#;d e JQ) R%/Ν"MH'Ɋ y9U9*%.dt FJMgfIB6IDA,\!Z*^F>k4JJv̦q'lK.ܙ+e~f4ob A03̞iN?17u՜h GK00zS&|Тo@[]`7bg;ht$eHUBCA}*K4 ;:rg-rA-P,_^`xLpP4E9hX=%TTZ@1GdmNTD}>dنZ]*4bܭ@fJ]^G]ۅBOWCeJJ{"9A!O(TZb2yTժ> R2^ *;8Man>/>3wC+hoе6yɋO lx|gECyrUуAÝAMhi1=ơq|*'͆d^2X6d7iO>;ds_263d>z¦dM6I/⶜7D- Ciю@⧲|hȓзSd>HchvSZMaPٽEA4˞S}&9h Oy}ŒQkS kgqmFىt$rn o:,&A7q}Ek1@<mEQ"D-˧K)εiv.n\`%m{8 q6s^By|=|j+K%l` $x+1Se\5jD4 '\`s[̕EY$ ̂<)AU@V27V&!"SV J}:m,4↢RSQEk,p<: t^5L2̃tZfA.r.҃Z T:eJ_I/ɔSتM3Sy^\>M94}Qa')?xV)9u `&%Y9FOvPK;?\WszC:M u1I-[|h¨K2Ľ%U՘繨yWD9)*3X59VDe)u^0VFN ֘=u[_Y H1ԗۡވvC/@7LVnM4E+lOa6Y(eGZZ?L;jPELE.H`n.-tij"N[gDgzۚʝ j|}БbD ӠV:3h9YQhHK_"Gք[͠4i.MRf![e7NtAw&M`+*76DJϡ'?eH22vMKեYxneq;U'[Z>mx~&:K\tQԏ?ˋ)\qn}e7=@aN8 ,s^ײϿyXРWn4x#/4,9l5;0D . v ;kq5rwv^lij|?1fZ,WQ 2_ax,D=h2z?0X X#>f21]UoL<L^+:\ahT|N7Cד'b)f6x VTLto`ksh@PV͌ٙ%,t;#Ÿ?  #qF˜U Q#5':韯Ue:zK͛OTT%:v62C>Ps_~DZM5N$츄hI7itҧ )|֠a oΆ2F6~C?T~4F08 7(vWKg#-kP l4M֪t{s+4Ho,ǫ%G(,vK?W7>760ęRHQ?婺|hj< z;fl>Un^{ʲvwAtDRH)Y.>p86z1I1 7x+mRGd 4ʰxP̢*kA5v%iBo^tiq[ -)h8l??7&tbz1\TJ@?]=Q;7nGc#1L08mGZ ȋ;w)=8P/ ^>D?ۖCPy"xݧ|vZWb@xa;`S*yz*g{0}CRgҖZs ݻ+qASsF}ĠAD0J[ZN_D/pժgOdޝuV#0#]%*K~#cOjEOQA/d2R"K,MTkJe -jW/}C^L6("FhTScZmb{qa]}bڦN(u+ w&ē^6XMfL9w>' c Zf7]]mq8{H\0ҤJk%<˼F5YhN,i.ViF.2*-"jT'E WorN3u k/[^Tc#*7(RQl7LP4-i?t$&]e2Fi @3b=춈]Q1ԶryBt:8(wvװ'\\tpGawX+2ؒ\vgF~{ٷ/\ POJ|pP9'EP~`_lr(Vë%P ᘾ^/bo}vHXrE,;SLepj :\.Ц)wŃ?\)+3hW9Rai^E/ٸ7&nA*у4U)HuȊ? zr2ocpns5yANPQ(dGyW_FeEk㱡&Yv;-'qrhW\HYPi9V}D8w>wTVOPStEX?FQ6SeV٨J7)HrKkUUP?N)'ꏗzq xT޸Mj z-'L¿0Eh,8ySwR+z}Di6Jj$TR0;kO(ieiZe&xw<*;#'TQ8U`8Wyex1ٍηc^>;g@#fII [nqU/qR1EM620ìrtsF&\HMlI\+iendstream endobj 206 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3780 >> stream xW{tSuO ֢;*(0"JG@yMi$ͣ-m޹{wڦMo[Rr By∠=F㺻Ro234MNRTz[>8=32o&ϺW2wS<ȕA{w蝻P\S2[*:=G ]YI!?҅xOkԺBUUzTQ-2RzcLE*~VWb|S4ZEUTuth5յ.H*,-ۯ6JTR_ҕje%D"Wkamys ;I*% '%JIHKvHJvJvK<'y^NT+\I\rrR/O2SrLtƝ32zfΚ1޶OuY:[o?[;tǺֹusgWV! Eߞo$䜜npMX-P2H\x -|Owh|.QvV⥡__"vm@7IӚ-sFRpgXOfY}zPG{\dԗ.²BL<1 D2Xn;0O:Y4s<LԲ=0˼U\'x l0q!4g2Z Z3uYx̬ j?{v;1:ɽ= @+s&}x:6(Eg_rBSC3=LG'_G_8)9ofck@,ι!?JAsS+ ࡫)o#x&>g8K$z(sL60Y>r,0S]"c9(`nw|=0p2w~8!,E^0@-*8./o!w >T^zCg~.2'djI0OV'"u)EYf 27 I}S xBl;>g0pV7ov}F[| xWq#/Aa-5F ! Z"-J>9v ay^i;)|r;lGG|-*7C;qbdb4۸VL|hVBor5P.b[ӗ 2/cUR<ٗLii5+j%;L(!T/q3> :Q_oGR== ٪jH4}(Z~}8NM~}>i&yHqkh"X5KحiopEȶSpUTiec<Е ZIz>jF76m*nh"uEY벓0G ޸}ȾnmOկ/U c< p 5ht5{N._ Xpy 9,X^KWQT?#:ѮPL)E'z}^<5o>6[@zR2˱7p';砙SZ<,[Mc9KO@ǀaYW‹F9uc1Q%D>n9zCW~ 7h Ss}|Tn^{{fI^2֞ũU |`,"]x6@[=ub*~U9 NgcX֐`ĒrsH$>0u Gp% |(ZHgfݭ#aZexq/pŸR!ѾTSǎ`cb^p:`Ἳi{4TpaeǓdP݀1uc>L"sY_p1MK~YUt5`:*n5no P x~A&5+kNm>0\Xz|SL~:V8oKGod.coa6\~v=hif-nN:!hv8 Eq$]XeGP mny,2sT`Z!Xe_#ޛC)ʲӶzk9b-#t<TLqs@@( 7ݐ=Ϊ*Sك k2%`&|=^eC)kBzp<~yD;ׂFsoxIдӪmJƽ{2 vNbmunHѳ euM:muf #cYe<]XߡgaNgUfgGojlvH3B'Қ=M5;0`?sT~zwebo~:bQ:p|'ENJ:(a@Ltyoxz]^5];LOAh/H1fAy֋6ikJɭ[6|7WxxWpXԣh0V| CGNًp׊nW\d@wMrxF]:b48!na)F;kha{cJJvIhڒ@TOGW ʉ XšŊ̅R}Z bݿ I(~z$*,]4Q45w!PL}9\Z0~0n/'MXX`/>潢 _[zF\Xi C}crQRz*cItT'G[ B|^ۢgRܗ0Nl6ĴB\.hT574pͬ*O7|Fdaq|HC n<{v?(i(t?S=瓾8Ȱ0zI[ęջu+0CKLKf<> F%sG!?!޾3Ds݁%D%j 0tz+? =[鉲jB ##͹z;9gdt 7W"Oendstream endobj 207 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1663 >> stream xU{L[ǯ17 sUdS4Q^ӚLSyC̫6ߏ}qcrLxe ZB)]6mզ6]-zL.QvC6MO::~Шð* 1bwwط^l/Ǯ >kp-:-9hȒɑAB$CG4\8 3N\4Lx4hc 1y=6fx8fFIt:[ θ +E.C.Jl,&Pd*Ey&C]$&&̠]zf{/{VNeo:cݡ6QRzoXwz];󪟜ӜLQ:Nr3жv$;uWw,ܘF2t +5_vi 1414lB#>  ,L:+l6{T/&3}SQ24 }V-B VǛHW'z8Oʎ #Y{yC(inIƏe^l$3ΌG'X7|~+rNj̃dz'5ZiǹL/8;$ I]ݮcu#ٱ)*6>uz&8#xMӢլK0KG?dWe;Kl^^^<3|K >AZ߄ha}Ã>\cbWauǩ']\19ؿs+OtPNpz,nN|dɗ晣0OowXp?`\x6=5|0{sG,v0dzSڙʚr\cELubRH)^77Ib>4aGFtC 8 * KqL /MfЀqٍ"Y{@KdU#d4x06]=s6O |8\|%Hz-sC8.?<mtgu>H"A+}~ @(bpMH(@!*epX^Rr{<߶#ho5M]6uR=;&|(ӛ?Πf dOᓼ'X G9^e.OMpo#SÿEƻ#}~tK3_u *\X{t4$˸q]T&3MMNgU;*W&. dV˾Up뤪lI[\i/~AyurL4R\-mNl?2T5(俁`M[endstream endobj 208 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 515 >> stream xcd`ab`dddwu041H3a!nVY~'Y34yyX(&{+#c~i˄E% ɚ : F Eɉy %%@NBp~rfjIMFII~yy^bn^~QByfIBPjqjQYj[~^_bnġz9?$H7?%(/3$5(1$4dPRGߏ.E !~,u-ズs蟼{&NJu{3B>;3]{j΅y%ݵy _8cu:)ZUr!5_͡_XY%ƮvɆ)s~^d{,ͭz7[8N/:A?ؾuj۟c :ֿ rϾkKe>I<<@A iendstream endobj 209 0 obj << /Filter /FlateDecode /Length 4543 >> stream x[Yo~wV r !<0ȲҌ"ɻTdöx Lb??!N._L?oN~}'sQ{Aof~ޝݼx=]n,Dj؜YhCO %czsuP9OsLpl|yG$q <U{|=(f'nLESu:Ft(7zcvkW8%E64~:鐦P@'l':mЙBCI!|vO3xκ= g`#.( D ~!h8Rn ݩa04}^/|Ǜ+HLXhN ¦[ ) !J Npuޜ}A +!aWeKk9[,Kz ]4p>xfSV7AID:AGUs]ڐy-,2Fg Uo{\hF:ofSr0Lߠ!=щLr[E㢤2S+_JdTb`T %U+GIR {6""\x:g9@lEh.C 6fV6@G`r D! po^E<t0ʚF=!JTp6r(]pped&Zr вޑUREy%h˕JԀ[Te`~jjFI٘=mhDޮVy:"D2"g{FuPuxWùlU_l?h< /^&ߦŃT~XwuÛ:pR P(&{&TЍ&Z[mz1$PGp7$hhr_H bA?6Y ˞xMl2$`@r{MI]W8dICdt&W㢠ȏd\obp#@vQaDIVj)k|Վ.\:&T+_1E\3wt^8wj 5N%9GoON>˄ٜJp?hrdǭGiIp&H ;xE+u⣷ĠW>ȯLE"{:#X@7JOξxq *Re$]z 7 uR"" ZvU 3LT: wR 6HJ\P5#=6>RmbM , 5h=[q , deMolrPyrVNjcAuH.pix_ WuõVsE pӟ(HΧ _d$} >c$y% I TR>_ % %Ƨ`زI&M01mHX0|Z`gLzXSK 'pf.Zڇ\mޛy&-+ ;.{2*IqZbT7 [yr18k47 z TLi eۆ>&'] Z,?=!DzD[v·7Er$|vhB{=#v[L N?V3zc]`ܠr.@֣nh mzp|߀K %!E{6BN&Lc | "؏>/BS N*\l IHz-mFє I:TqSR,Bil(8W5:ea Cxz*J{#c(8CQJ`3J\#S69Ysi9Ց}ƶÙ$`$ %N۹4)6 F יpQ/ԔՊыOk* _-4i;<@CթۤENۛ.OrHQGU8Y7)t)f@9|!h 2۔r~U>Vn/ۧm* og(wZJ$Vj҄1s]=.{y5(+Jˮ u,C+H2Z̸pN#Y/*l%%Ũ )`ۢ-6|L8GY]дNC4,2fKgP#W)`lÞ>HWpU7uvav-hD_U`ؽSS`1yIfВ rP`r LH̸(@c׿}V 6#5L$ Q(m@Q AAGX@c" Jw$>L<:-Y}\-Ozoy^Z_Fv9T8okUfרoR{L HvIj9^U撕ꖰL,D@MIwCU=TU}_rLNQtpUzMl|jPذIUlfJqq6oO[ab*sQqܷI|lc0@$0)Fnw!/ `o7-&}[K[(V&f PI@,,> M[Ⰶzd!2) 3&W+LR9s`2ɶSM7MJ9zU8Q"P ;]smpa2q{ JR6% Zyp!q%oKmLARDZiX؁vqz EydG5 ReB_vU"/pZ)6ii2xux>yܵ/1Eurjq:t[2>Qox#-ev[|2rA _U\j槜Gk<~$7i6b7|'?ތZqM1*("s']c6u7qڎϸ wl"Eں2Ė&MFr&mN& {{f:'Ft?G`7 i{c cV!OX2lVYlDrq, z%+|uP A@f`]\`l` I8M3b"wL6`+w邁Ln5+" ~0ǺͰz0xSnEN,c%?XCkeCSr8f>4o>@]Z0#R--%̈́gdaшK0H%g2钱sĘV[GDy Z<(~h]S:@h|G=󩴵TF~3˚ ar[q20v gpUk^4c\Fſĺk̾ y]$+bA"X`b}W4aH*k;߾?*Rg gDAI  p3꺺> nI'z~evaS!v;c|Ȅux+'ED ]a\\^üqDL}-ӛ""HߤHÞ~.o1֟~b=@n(L=g#:W> stream x]O10 @UXҡUqP(/ СY:ߝ|reA>E0u- -m1,O)x@fw5|s^[+x$E b'Φ2?M%K17MR{;RB|DZSyendstream endobj 211 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6734 >> stream xY Xڞ3j]; ΠUU۪uW 5!!!$/ *Vݔjm]nڽ3}$Aw!(əxD Yr݊L2 1O4ŕAߞ ß=[? <^X@&%%$f>e֬çN<+8QRLtjĸLKr:aLR\8|̴ٓ&LNɘ(%3><')31|m\F(;.6|053|UtJ\P'>aJZVf(|06NJʗS7nIۺ0ѢřKfD/ݳBR*.~uBڤumHޘiDŽ&MNMn##f?œFmxIDb51XC@1B, &[Db 1XD,&K4b1XA V3U<џ@ $`b'$!P"=aN/ _b޽{+"!^mBHރz c;oJߞXovc#hৃ~\d蓭p:R84zhw~z'l^XNOK]OObTl[>1<'~#ZG8rH Wӏj76hqR 462Y7g!{N'lR6r|4ed=4R}$b )BQ{(q/z RpǏH26<wZ-*73`3彃}?r3 4!GcݫoxInj._b}p2Kwn/ ƣȍ0RP_&l'~H9B Q -]E@ph ]]/DrɱR#w.twUCξ/ ڕ>KO˜O.ר?A*$oآrl_>KnCGC0[&oXlw*삭hs h  Q-` 9A/7pj "@:"]q+ӣ st֭}MuuoOFd>~*yo![r/NH䁓ى*]#jM=( C+HMgAyv atȭRTHkA#0b-ox펷u2uFr'ikIǠ[$JP,HhbgYed vyr&(2jyͶ3r%PB嚥)|QՈ5j$M)#-;I8+߷B dUw$lESh44 fǓ8?2sCi >lW#w%[($B07^J6%6J$\̵{jK{KT26mKg|^A!X#Ŕȭ.k2a*~5q00j_VAC6K5&(jM݈@}%9r\)ZQ"@Ut6CayrGV׏FGϾ1j; %+d2Pn}RGW̓)Hu&Jm˅Է}oxb #^4 оɷ ~L sKt\F I=wy?Nj+)-xERQʍ|"PDIJ@5s*7*P 6рU9O@Lɽh%x[//62e=AgF |ږ-ʓ&l]eZw䔩57Mˡ8'=HH6v|1w@00P"t8vAH3UnxlC , ZD "_ yt\V)̸Y;5{[祮p d-@cg:V( HƏq؛wL@89m(=9~@P^d7-p&=u,ґaڪ<@E=`` ~ zҘl_Q%ɔnknޠ8[8v%۽HB*YTnF,XZ %m )0=+ù6M%'r{[\A.mn ՒDǷ44=*9 /"PϏz+inyDfW!ۿqjq8p+.ӗGF8gHGE4t{ S.#b,? ¬$FwkN%؊8||Bzc7_ρ8o|K =rrޡزN@ϐ@aƃcP!MnTmio ۀ P^9b [iE$*"w9XXr\[]d.?bH8M,7~9?;cVD͋dGtA _~l'+?wT]ǩBeU#RHoSQVMc,r:vW4VfFlgI}z\5)ۋ d83Y+חçpӞ_a4ִI)'.ܿw5QT#=6`X=ruxeų1>ŞP3 |FT眷t+r#YcmYuWAKkS=A^oEBV(A>⩬6QH*^ܙ (ean6ճ:s#(KA` 腾/_mްvɘԾ>]Q;t]n09S(Jwg7473h}q&@Q{(F Å+3}~uphIo߈kZpOz[,zn:au6۔֠$ݼGZ0-?8 f`eSR Z)BSMU̱LoMl/csMdʐȨR@v[^cb߰"ԗuA `n^6DQh`ؗs'BI'w iyx6;hS! h2eJ`{W}7!+aThjfx=ȫ F^:ް݁(X4HێBðͯE_ wûOݯ4WfVo-wŇJ-xf!knRPޱv@u J7h<+eWǀ(ܺ^Z'FdG$eg6쀢|] O ,&G1(BLf`N?5kN]i\/.TFqҗXo>5}OL%z_Y*q8].jw|/v[5p1f۷PZzҋIg\O0?AT]KMhxw{G8Xg$ZDHA,Zd'`]wn2Ku'zIy"HLlv<7Ĥmm-t~Gf5o\-{j;U-1HQ0SR]U[yCE>=ICz9=ZB3|.9/~+~__>4} ;d}XyyPxIj'!kJރR6 d+0H,9jd5*QȨNiPbR8: sؓIcZ;% Vj7ޫL+ْ"JHE;^^겵 RX$ :*kZEgU10 B p5Sަ7uhD9_j<%ss[Z4&t:\go/^9*Q][oW9f4 ̐`uDB ycxRg[u$˲v|O|cYsBʖa: tEwGv jQԂ,R6]+ȴRh*R-E]p} 89˃}>]"%#ٗ ZhϫT% ^$/ܗ)bJV&[,FS^z .ʬ6ѺUWR݀ҭτ  Ab>yh@„d(21:y-/<bDYŁ%֑M\'נHo{x 0s nR:UnTOd.fcNz M?9z넌[.*ei76߆C_w]Hۥ}\{#X ~Gq;nS*VL*)ni(;ٹeB,ZјkKȏATԆ8+tT[vFHc09';.څSQMif9h1xͪZLu.#܅8npk_?#\bӈ<|z=Ty 8m^wC P.{ 6`*Rv:&mZZotL ̄]^.GV.r|31ejx#l`1X5RZʒx . tN2E 6O im 2.6T.,?ӿ$M.ō&䔝 i{0=>7{~]Ւe) e6jxkH;cn6Vϱi$jFע^9 ^s@ԑȍE Io0L8 Y`t[o(b!jJPT6mzTma_g,uff~Rʅ`%XvB>p/<>t^0=^(.MV@2}"%.F렼R-7 FG-v'52P[Ϡ(:WQ˔f5Zgx 8bYA9(teŠ2":mv~s> P6m/ ^(P"UNDPΫB=BmAhJuHFsA' yú枪fy' aS-+)5ZA}~aeQqb1#Fƿo+SDX$G,?F}USskVL@( nqYZkm23zf{?8}EuKiQQ44,{dDd+֩6LR.OgH><$*.Wr(yiɯ';w'Khn-1( ^ޗ0{}Ceendstream endobj 212 0 obj << /Filter /FlateDecode /Length 3805 >> stream x\[o\~_qhhM"NPltsI+ke' s8gŕbi(R4ϐsf;Arwn%˕L9?N?`tr8X/Tӏڻz|}rt,F%~H:BoN>#TjԠyr ?1qIC/D\y᮹-Ϟ&VN`10 zC[Ujfm4q:4J.`/j U]\KԂnb\o/2 '׷GazKp#Lt=R:S6.Oc)a:}ڏģaBV^%xtl}TҊBd>+ {l 2 PN1}f mb9x|:!D%x&=i y`|KapEqNIdUDWM64Kq)=x{:ܐ}8'IʛXтLy o>5@:Z\'"Ne&n}gь$D1` m>TZ`!% IU}jQӯUE~}Tm &4,6۞!51H IHiE 9: ,`0`햊eεɴ7BnCmc ~ݮu˸dl&w݄3N ܹgK# )3xuKbP\6&.Z#[fx, `\qeX#Cg 3&&v<%C[c.S(o9FkUbX<|K Ni70 ::H/]R`˧VFۘé'ǁ|qG팜Bttl]8eev2g/lK.>૨GASpX ~9}!߂q GH g\r?"y0lCc`t,> pVk)UB^)jJ됭=RQT=[d$W79.#+m%2f%yi HZI/B֗I]-[ӇPN4qg \k8Hz80HpWuË:lB`b=n$?iMh vԃYb2,I:M * Ys2'QS'2 |)}IF'mx2 T% [$pc޸Y n ]bKfn+tC8@wyL2F+"8K!jF>g)r y 0DMn ayWDǣ9p }T}$ACe^HʦY']Nm$ԘL?k/Xv֒-+Uj2=/C&>;x`L۴9فf.C4-V8Qa.E\!sQr KҎV BnXnv,d}St![Yr [OK< CGX\Jq^1,oK, W'}=2Ԕt80ve3$p2?8"=NԘI~#yd Qɕ =p"0$zT^:d.WǽFb;@ُ`Do i_|SO:+t brqGa^)vC#r g"8!<|{!UCyBu:)vh@H'#PTI@l):Q)eS|EN歷pqo*Y|xٓ=Vޏ=ѐzE>MT.[L=ӋFj#ժ[$y o]4Zu'uzC5m}Qbi ͯŜ0-j~~A_&1a4cpk>&?Zͩ6X/}y71ԏ~GvZS1-n la~pE{dA0ol` qHL#γMck~mȻYMf7uXO?x;}ohu%됽v6h}֖Bff5O|vYg{7~jh cy6԰CR~_eK,?T1n)Kv!9xg vMITE2 quhP&X>,wt`Wg_:i^ɆLgu#^ZwUUl_;/ls?co묪i?T=m*)諦kO1g[Ԭ &ӱlc^5Ξ}TM!sqYLү2o*-f1/ꬨcYS^qWnu_dMbf`nsT'uo~M~\0}-heϙY+*4MS,,,F9v&b:|OgS"LNU L8u)Y1,9[U!TלFNi=weH0~H%CKL?d[²Ќ),*>9`/CBa?" iendstream endobj 213 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2629 >> stream xUiXWb:%dSQApqivA#hC3AZP!l"@HPQ7I,DM-5I9?z޻sνO1/6iClzBtxԤݔh#mđ L:) 1cFegyf>їHKrC:;n^ kU&NQEeQyǦ%ħdžؤuɱ)A Qi)i* 0*+u޺Ȁd&O:M0 wE fBe301fƜ`; g, cŌ`%c2(>f%2IIoFFtQ s.l.'"B>krGT](Ek *S\qJ}mӉJ[]y|/(Zp+7#` Ro8|> GN]"fMBx5%RS'. h΃{25$8>G@7A*oU]K@5Kq LRAQJ mb#(A^=w3{y˗z>,\xGRҁY5Igv=* A@{ʐ?Ìڳ;]/sK8 So 7GN _#0~Ef  +)#:S/lJ& L//X~!ޯFMV$,w0qN*wXA÷􃞤 D=~/{p E4r/J 6flVYY$KɡLּ/ُ p'#5e5eϓVr}M1`)Ni>O[4_ǒo/=a pK~Nk|TV@|e..;Is/:j=o&zXE<+""}&]U/4U?n5_Q*ȇj) 8EC,\Q>hrV9iG{Jasۋ>!\[M; ZJL9/h9O!w8Vq=!rPU`U'3vG+MCޅ]+^q2C }X[2ο' kʼn#S$c KT_ϭۭ-B\DM'p/C<488zKUtds>-:waU82TP,xͦ4hIh( OP|D質M=ו: ^`Z" |N>%mo6ct&C}s`uJv&frMxz{/pW\&WH&wGS)C%t #,-!wigd*,-.&di'f?|IghDNJ[ӱ'`.h$ZSOou7:S(}#mTZT_&A@bTT\KC:K;j>$0-?$|[N5WG|`q"3v{flLN&q$`sӻnȃܯJו!TV@lq?l> stream x]O10 Ԫ ]2D! }Il[6|/`,@[ 4ZebY8)/d+x@fw5|VK^[+x$QESb'36 7h*q\B in X3 sTendstream endobj 215 0 obj << /Filter /FlateDecode /Length 4982 >> stream x]koq_q?Rr]O# I l ZVOwP,5]SxoO]gvClve嫳xe)]U뢵.Ͼl~ݻ .M?%.Okԏ!mi"}flmxw"Y>]=)*}FvҙOnct rlyK %]ݽ6UKĊˮ-.Շ]zK#پt;S|}ϫR9Q.MwOI'|] eNWhs*V՟1&zK}vm ܵ_l716F?o/wv4÷yG\yLߙ+'V|Vvia03OMma^÷5 ΏCbH8.=g4D+AԋhqĸXt%{ۮ@9Ɯ `S9 cȱb~z|مּd AsVc/_VX (9'WoOW_mWH6Zq,rg%+\6: 6|~-ZҺ2Qے.nkgo "A4ވh3F\]gMs״uNJJZ*,Aٮ G2ZնTM?qg A 0_M+:s"y >iZ5t<6m <%/mFEr_onUZ󫻕ɂ͕2؋_0`0 02Uӫ7NAf{ʴܘ`Bom$Jp^LE5/Agy\ֆG@ԤrQpG VW'n\u:H*[ƞ2r;n<(sn_[3&ۍx-v}q)ml)8"ԸF+Gԝ3S{}]7U3;k7smg:8+AgYD~8EMCAI2^A~ is1.fG|[OWw.1'|.gȻ{ẠDZj^S,a7>TrR(6Ӻ@n܏ҦWSW\M.eGvڬ{I_ ]@ubnLo'6϶sX[:V^<$'䜴sM oÕ>,-e '56g2)M\Lro{rȥ;!XW5$:Rxl4zc]F"I$6O2xb[Ǝ (%F`W2KY 0fa_*cD!a^*qu8>B`0%s]m=ZVxPAߝYò[ZO"68QX/Zw2B JOBb(*z7Fb_CJH"0(EV2%APNn@i #汉 .%:`^4U7l cmpN ̏W_1Pkw@_6*0EhI=9z^DEle(0L̕$|X4(ytHjxsɥ0qbQgDbV*h2%.;#)!\\8yxW0-P} 4!)m]}AiDFÂv:2 t4Vf*Ţ_ KPV73`'cM <@i ͫlz y_{C,i)jHQm3y.մ[$=H 3-nj%JK&qᓟDL15hSkUv2̞(9>[P?bڈ\|) /QrsQ3\)TJ4 6 j ZO\\8b+8LlRE]\FIxPѹ(3JQJ1,L~e4LZ=1YSJeL`އ7`8 `qaLԽ̖P s+J0Y,FVfi[71vsz`DZJ 8e$Pj'ڼFeJpFFj^`TToS>Ѩ5*,ޝ jHw*g:T'֩B)[ǁmH8F)/އE:*ƶvT!7ad=3S7 )iO-mqGC N Χ߼uB4 L +D.W(y(܄Vgu0İ{=gpp(e\5s tW )/+*mrys‹= P !!ӡWϔFkD-'`0EQGrUM~\)1U4FR=*L#ȂsGW z(g0 58ဦYaB32n) a֎K(9 KuW)%q"!1--'$Vu3MBd2 X@ѹ 8ob]uT^Z0J)5"q(2r'~GӪ<*ҰzB8UdBQJk90Ҟ|?M9;mY$d|iH*idf%lxdj ƻ*8^&I2 5Q&VdBt'3*B\d4D2 P@@^Ty_ &9C ɶ*^E'}L)k)mDcctѩLyAQc,Q/Ex'#Fdž p"6XY')$ĉDj ʄ|4 jE3GQ,'Cѵ ѯ ^$yў`wppЫY7$RR?{ ەPC2ޕG5"Ђ湑kZM0ofd)a$(iu|y(2J(+N]Q̌.skE U)-Ԙpր Q9A? ]~8Ɇ'#ޜ2/ .xEQ0>qeO`TQN0EIh]-C%gaz2kZrS%sv2i>Ni3"r6UZ|fkLJ7Rgon_ܔwߓ}Ӵ{y3:^}>}IWm~y_g8yoNZ}rt>/N_nj<YbzG?rM -ېS9}dN/x0VV&M?KRC2endstream endobj 216 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3009 >> stream xWiTfDT! , AVD@#%nj [dq\"qcEEE@~3sTTWn7MikQ4MEƥFĮ \ 0AK B8a`@X8BEhv=HEhzVS?eSN5w^nOS%296:AaBqk#RƇKV, KHύ_(&,_.u}آ i+#"cb-[Θeg4{)Eͦ9ZJQS(jr̩ʙ \(%rS )wʓZLFSCXJJє5Q)15*t(sB&Mn͠j5MkVZ y+y;-;ՌQ2Lua#|.k0lްau tv-zB5R cTM͎C*9$NJUJCP&2 qzXKPhP!@E X 8G#2oe)͂y3] "8.`e1LaxYZG<qm Hn6)P$x B ۮaNMX͞-Y ɶFn'pW܈84؃ rZY##[shxo7۞rjJ> .ܾ,ЯvJobW9p֮!*vn.]#z;-~H/HEQW9F:*8RΆهG4Dߠ]痌폺~p t^I=2=8P0k:Dp$ן Ư$ؑ=_˚/wm܈-٥rAG!Է5'A-pZ-~AhЯ}K%ԱBd%WK ߕ ڔ_LҌ**sN2il,i 6U;`@b,%jIx2\A wjK ( :D™ ~h8|B>J;.^-KN{d \X _fFen:vX6.r ^6THAGPΞ6Wٶ1)%6.@ĹGQR{H<7o_!/.dk EՂ TBgjXk/r@!._C>+6ggcOf`KeJlG"|T,>_d 'p˝'zG$ &8/ c9%jsdtI'#+C`tc-ŤZ8˥aHkZ^4]wʖ=PEBFRÅLLb%9+aT_K6-xB{s.֗K>6~B6hXʹ 'V6;!p~EwxHcn+y.?z`f><~t mz7TBO ]+܇aJ7Cm.2 u4 ȼե1Qꁉ/c*gKV><Ɔ3{kr@0P 6KsNqw0鯰Dt< \k~K+QrjV^ ֝nM$ΆTT3o{=7 ˟'[Iuٚ}9gU?|H Xsvጯ-9uuy `[Msmpңl??M1kSǺ6K=;b)w@M89D @ūBG p~*1[wX{ 2=ظEa@`I!0yӧ۲w#()cendstream endobj 217 0 obj << /Filter /FlateDecode /Length 4463 >> stream x[[sd7~_ߘϠ%EZ )*E` xx~6}ёZgtx *jK֑ZoOV(W?yƓ˻/4w~s3``m R ٞD6_zq[tQ۠Uv~Vzc6W8] z>YV; U+)Dl_p9׳_:IYe|[&{rًɿk d8k8WVJDp*Z>9މJ9+8 r}<,|+D*m&>`ʆpN55e_&DkFs4f~T^sh1Gh2r[3)rqB0>#4#e!&Wt{QNT*k%@'=ogr1pC: 'D[d\$| ?)ۀu=hFߑiPIW.&&o}Th8RK^ d p۪oU~Y l&{`ULNg;,nl&N6nKyKYv2t|1.Z# 2X;ik6hHސDPܭA$`ugs[ݍ5=jq7]v@o?,10XUN#neP.6#L,EqTÐ;GYϠe5|@B:C;ž,]tB } t6$q`ntʱWn4/XCy;^jȪ)L0f=R"/`AYa'WiŊnMpݭRRS&n١ );\-ӑPс ۱C3@ ZԖ.,rDfSIhN ^kW]E Kq!_mۦ(_ e}i%;ɧg9D{R;<}/ix),΍Pn, /hwm a3H*V (LMG/X|m9q$#x2I+mk]ݥ l[5˘SviM ^!&*t*E!l:4Ox%BIE  $lӒb;ЖHZL,A?E:4)mM`W-]1 oT/u'c;xh$}rbS,|-NvQ>coԲKap[By!?2GA '~qe˰vë3;,"e).;0Le Cyn{[=ݕ|Bƥ}5&Bb_Q,Q`68_ 'G VxODbJl+=3h(>+␊}`ya~/,Fr:%N[JVRDeN3!` cD IP}ii+* H'T󪻺PLJIt"(vW2o",(kRDBfdav8820SdX PRߨ&xg􌖁׻d%s}IŒ"Ԓm?eo:1#.6)?泦sħcm<a+uV+y.1_,mʥ`ʠ~Xu<Hr-9?M!F?HR(88`ej 9Bˡz+dqw ~ v.¦(ޤ 2R%u`XO'Y"="Hr1@ݍd*x85KjFmE#of8k\a$jvM ⅗:2,Tjy? v|4_3C/`\w 5Kg,rA邈|ȋF6yTIBBo"bp.1Бl~2fKJ] Bs2#ݏ:`­T<>-הD*OPxkyIQ}qPa %!8R" G}B,XNX]Nd|VfB2{3ҤD^6FνnI` ߅,"IZԳ218c&bj]~F>Dwse(ҿYOzEzswYkH88@GiJiѝ>n^J^_\L&'-ชIu 朣ٺq~ Іj5{5Ǥ,w Q/)sj(ˇ,FzY+sJ^[ C?0/z* ORFX%&)M*f7F><}Pe]ՃcfY<|F-꺨LѶKp!i֖[w%w!whVؐMΆbh=OW |YWu*EJcd d.pIJ~ C(-}9T:F핡,^4ՐɬUFԕ`4ȴ:̌fm_3'Y8=&Px6z4ϏrRSȾ ݛTzɓƋ'Us :YN e8=YvFNDk j}cJ^;.=zaWwRirK|Qbq*dv|M-v;kiz_, ? d]^o5'A=V&-=^c+yyb(~CF๒jU QCvұ|_.~*6#U[fpU0mlOS{}p2kM<Qդ[ s~ k;^)5cź'0:N7TK>y8)}/q~*\jR">R*+Ć-Cl&,5@cKimr6țIbY $"ǻ-+gp7^5{\]=벳:q"[Mg,zcy'\A+lbFw%7 QBWc<wCc16ǹwyD_xw ԄrZDӼ bvl>/M<:zrB, *t >wU5v}B[ʓ@ oe“ `?qBىGJ\^s05y5?x H{q?+R y!*3xGr=KՄO6ņ\M]/q5MUb1 a7osq-3J #_-k3j侑+n֥; Ga jd0dR& |d~;<-?,{RϏכbǡ*"kHb !JqܽRBjH|T9H )uXOjnА55Hf a.y"+޿fņ͈Bx(Vx9_uƟ5w| bo ߽;M|u73VȞendstream endobj 218 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 0 0~ 8ʀ0%!tpw'0^GGpȍ%pu[ΖXUsm!',31ܔ<݀wx^̫Ӹz:)Θ!?&SmgM*)J79l! 47I,|J ADStendstream endobj 219 0 obj << /Filter /FlateDecode /Length 4351 >> stream x[Koy7!#Hqb"%ťIQSٞ)K ؘ鮮W5?A++O_}U<>=RG?i;}y L Ye}|> !h`c8>yyd5zSuoy閆QzY'9zp֚㍶w%^oRqM~ޘ,zҫOkZrӮBb[U:6n=V>6C#ƌOjQvz?n+ibx>AeB2aW687j0JW/` bOsRiuX5L0AbkIm5Ht6g=UFGeS!STڢ>t"aAAaFڦ!Wvk7V?SK@&m{σ2_g8h Z )NrwDE09*{paq I{'Ň`$0åDD[Țzu%g<4&j30 `|N~ v'L߬s&31j+:x)K"T\1CIbnd4[:fW +>IN)˼MPx(|f>q|8ͽXŸF!ڞ +g, e AG_rFK7 hɈX9 avQmeW<{\mҷK\JޛKl泃nNC6qF=؅h%&@@76!EWg򁜉IWAEY7|?V }aR&iW?|/S?g{ )hCR ~eW{OYm 3¥H>k|80B hҶ!3cwl\>/8QktQ/y r93bweNbMu 7H@is`ș\H%Ou,fybf5ޙ"F2\d uʽʦM^CxҞG F֐ х.EJ<\=~sH{[ӋXkNKi3 gN?oKBh !E8| rlá):Qi֑8F۴^FAJZy%(i/(M)<.]4L,<^ju~wC7d#UQ)` l] ¢b~ d$RƁ7hO@G';-$&0R(ϴF`eh֘a+Dgh뛋 zދ)Ey sܨ(#|qFu|Þ7gIӾ[ /6>zu!֏j5ƖjckmOf4I;^{O*$w*}Jra~`F4R%O!9W2V Qp2aœjR AovܰLv1)&u Pf2\nbmm%]kwDKF%KSrChJ>jc.aȜ+{éu|KA#:org<2ˆcS;! q ZFo\TҠ'&*> NT.g;w-yhKm:OTk'.PcC&C5$\ujh;25W|Ey,ئ;Jީd7pA`T`RZXo&"3AH0IW_@ՉFo7N+;lѳK&ÌZ5!1,ilM`'pMo&׸v̠Ԁh]wuIܶL*:t U4֯-=u][uwK˹h`!̱T (`tPROUl&Jz8 9/DO:r|?ޛk…{JR<ëjh`{}6!K5 !PVk4[+Gѵ-9yʥq~"1\s[g"<7Ur"X`j4 ݧ6ѝCͿm+RwuxSO z9G#Àsb63/sщD%#J$qSu3։_9X.Gֺ.p5hP fS{>0YcևtثRZKCvl6D+ݚ쉣jJT=/0w(\Q\-$;wZ[Κ lymMWuOGnlB)o/`| "0} GYHiQ_SA} wKyFN@;@Xq*+5jcd!`E6<:9;SVRIendstream endobj 220 0 obj << /Filter /FlateDecode /Length 1435 >> stream xZMo7W,- qhQ-zhSZ89:Ē3\ȕh\r9fvgxu3{;a9NG^Tլ[)*mPY e M+׌eocr>0_6M2mI>ŹwͫO3L+/ϳ .L ݘN r如)iW޴B[.1\P)̢># j sSpd&"E2WEžŻ|Y!lQGٜ)2g;O?DY&BcڒO<HyS<3cڰid*9#rcɓOMܘviz14YEfL/4=D1]ɍoG a }J48v5M˙\zt+9k^J&=T>P֜틈3";+q9J^`]-qY/v+W R0/d#p/(a+ %scy*-Ny$;[iN9&.TgQZǹBk6r=\%u$s]y{iDd9>p9m3k/UgDN ֦A%$64yke+u2I%ÅyU{oLerDBǗi0Z䞳Ύ12xUJ3jՄ/S F# Z1eC/} αCkG*لێf`#iUZxhC50ޤ#/ΙA]]ܐb)7ͥgFJm(eeYG3[= '̯>dy̲}qm23,EJ^ heH9GCKIwBQ@mwA0oCf5#I&$@Vl4SmDPHCT{̬\t8jEP]GPL5dsT$wCa  EVWt"| 62uKg:ewۻ€y+@(yvubի=*GRAh ŎsEBX~*/>(-i'+eC`UiPB/^`JE2e2)!=R2?lCE5VC27\&sdneoȔ^4ǔ]-5糢.grIf67 7 J܏wmOUT}F1$g?endstream endobj 221 0 obj << /Filter /FlateDecode /Length 1843 >> stream xYKo7 9ݩ$=( k4~v^qveo] -EQ$}}#:g隓hNfg2?%*H+]A6,픍 8WY[^Zvo~u&3fbf]?񢈫"񬈗|BM\7B :!ۮ,^"z[]Xcv2sI(E("E)~; ,0v_SBS^ѣvZxSN!L{VZc,f :TR8mx<_뢒}P <Щ|vYP^&H%XD_M^b &^Ss1Bq9QEIk~EF9Fh4ԙKdxRjiUdmW=ձγyLR[*^:켠Bp,{rtLhLLKq%I/*~A Ȼ4Oq !)wߣ ';E}ɣwZzM6xU BJc`T<̴V/ j$U߰z):0q#GIwU`"ejP5[DRu(W*VLbmJ1) $m|5j seRy^2St oZ(Y>%':fB~t*!&n!@BR~!\:X) бY'0ɨ1 XC"ǃ2ϐP NWZ1 L8MZ Mib2xU3->1mQ7<' +}(%dxw)x\Mȅe!Bɤ)Vh.d 5+LdgE䇣 tJgxiaXIv24 OcRY0m޺W>iAH)s674Ҧε?i_geuQzBj1Lu (WD(ܦxw >"^E\WWjc{̯l^ECSQRī]@>{W9.Tu!s[U`h ]W{Kf0.:W]b\I?VcY=%` 4 ^[ v]e4sp1_oh#MуP4&'u~8{Amp!lM}@x;H]|v̱P5;$~ B F{棙9*͘":xC<_b:N}Ʀ=>aMYa$C~C1ɐ3[_UfDrme+FDTƴ0|Ӑ544Ն Rz;$~TZU5>?p1}Ƣ1{Eă꼣 S4){V ??A|xendstream endobj 222 0 obj << /Filter /FlateDecode /Length 3908 >> stream x[ko_^AS-udג,+gH.9kv`KqIr_~;G28Nߝ` r:y~uzڻOX1q o _. qsՆm'9r6Zi+lMT$*7JB͓|ۯ!W:χmBi&i޷6iCwMgFE9:ӍnO$M JN&sm(prLs[WZ'nl M\=ni4'c'[1+!TB h4d cTIJ'TqsGFM|^b.i(9W#%0tɧt=q6y>唻"htl>b8V~6d0`[ڠm ΖA&$(Q`Z8lzcRo|%QU1h 0t(n!r7yI14'GڔJEYgɵlM޶Wmx҆I~=2 *hAńLP "TMy)zAO[qR,ɓdTu'x"㳉4NNlLA͙R""UXKF†sUGH Ko5 p:\{  E\&UhGq)/ɗ)jbv1 ٳ jÓ6|܆O6c>W{79QϦj>>SK+ i޷6UJT(X䧞5Ia ߆`j8;k?I -Gܵ6nC7hKEŜµ2:C!+eų6<R2$Pm" anD:,08{[3Ӧ_v;<%]) Y \On`-I "=Ӟ^rU=͎ǔrFv9@&e{e3Y"݆A/ryH|B=K+%Ê݃|^n{ja8b;yi8EfJb "f{Tf<8fjYU#xN4/l̒ ݋6߄s6 άƲXr7!k?>CˣFowZ]vCۨv]![vhWmy$PVjt_}3>Ci o"&-"OC|;[(ۏ{ppũV!B80nHe3f(r#_fnт+ic8\$cLjO2?wEֆͪ~ $bQk z=\ۇ/ڐ$WՍ,,y@k52iW[Xa\nȞ2d X7b,0󼒈 eyo6HE:z*fPR hIʷ,aXBcb}p{9;Ž^A*/X+0t~o[eܮK^#qj{ωo9Z6I({g0⓷rRօQiqu +ԥsHڲ,CH'.5/)ۃYT I3=# ɩz.ǔ=.௨wHUrk1Ʒ$>FW̚ RfYu}Ү F.esg9T8C>z 4-:Ï^k+r"d^6?&A=P#|.E$AdaQoUqɾez!=HIܔoW{V4cM<̥09&:fpץ"Jk[NKaW ;wYxpӻdH!p¼'3wE+2t;i^rx'e$/7Ƥ;7ݳ=X;niNXY9ܸERoxoˣ"X'ZT'p"^{-+7;^HC72a{ ?ǿbԞU۬JlV㗍g7hC-e6vۈ#J+듣Ӈ2aR42)WU{?ztza)}:Ƈciscz M0.Mbp@5 ^ 4 >(\4D,U$6ӔUCi)$$DTB$˦e=IL`kgA5"ecƵ)9 @oV:HI*Tڀ_" RQ4L*z WD?mzrr~}k@^Ix]jtEE_i)&<)(FPBi&%{ъr,dBO@)IMRGRH$Є΢Pâ<ԝ3iU"U퟿rID () 8f:%9z {Ġp&8NVŴuBP1߇8b<q Ni]r"Xՙ hO8u> <\MjM%:O9oQQ#^(1r u%Y^w>'H߷cio˦@V mmUT= mPI(}qBr`1?Â1VŮ4G[ "3 Ѿ"A8ҵPr@W^bR1ccb 輧t\(2l YqO.~>@i?J0Ùg~擡IkKMefY+E&)StslʭnswrR7} 5_ 䦇0'Bσ1HK b@ J?cq!б ה;@Q%ЁtDJ#ABu`q: V 5"4&K򏡿*Kt(OOtbt YYslA#:Iݶ.HO (LG1~KX05Zn;%Ip Y8vqq8-%2m I vHOs/SJm$endstream endobj 223 0 obj << /Filter /FlateDecode /Length 12331 >> stream x}]%ǑX? ̌\`VЃK} ?i.VqNTݪiRC -sfGdĉȓ~.ze}Eſǻ?O%Z+w~"~ZewF͋.>]z5sKDiKJ˹Wzc(/\ӘO?s>|]\+QRJZWIA^Iך{vAyH$HMr#DRqw/7FJo06s摟f9]ۘ~ ˾f擗F;{YڋKVŴַ(Ҝ.L7>*|cgKX ]Nms<~y) }}*7{=r<}*N7d}[>ǜ?Zrt`tXܦq{Cfcj߷˯NŨ^w> n>1yN.m5֗j\HmX1[km2| mPS>E8[H3mfoCkD_48_ZX{~V o~XNK2ΊiHV*k^G> ۣ=/ MPEϣ=ː9u@Qf*FcQ~q,O;|t7jU_m宐bmr5zο}}tphYv)էW5rR_sM026._081>vsklx?Zw\3;nx}<~s<;?/ǿ{^p$$ hҌ'-}C+``l׺k>>stʗں7Ǘ/gN]}.m]vyH/۟DrsضV\EUw^޼_B7~7Z|Vsu9㮗y~5Q#n[ߟ{aOpGuo޼pn%o%~ޟи\ժ|MG^3kvw Auj]`/[5wr  *e1k2E}%w90Erۄ;n|j ]#LA׽}(dj kWеCfW֊Be?~RL =?b^X2IĊ!tM7ű(4CCB[d7|>(߂vG^\._>+. i>)r]+Ɛ1W V_eWIi  Jq|.'ɤ8^1h|5e1e9r28Rt7Ӛ$; 0KAM)qwQN;a8٠/Id_dJ|ktݥT τq`%x@%LRlugEAi9)$FœB{<+pkύ|,х+̊.M^>vmuEҬNҫDgP`2ffFY̺ !'9*ĻFcy*Xc/8 \N1 ;̷-?%=|-Z,"0!k:z=>4V$*tZgZt©M1մ Fpn+401K pFfr|(t^Y P)!:INIJZefm$lf.,ŝ$KԭQc5PbNn\z.:*]oC-Uf/â7M-Uar(XEz/e6D^[.1P 9YXH arF"P΢0@Ql(V!掭E@LJuc\ϥX'=WKHcSHM D풕]"T$rmB1EbHڰ(BG pbRc,KiXvڄjt# DDm]GUP6k)t-KMrL@*4a x4kXE!4.)S%:@:IiݱӋD_gfy0QőE %myOhBcBQvo,PKN2qxP)S6]{hJC`YaT O9-O_=c${ȿ1)A)X ]hrT)N`N"@Gj3ް#VIL9cp*4)B$b$Ѯ~rP,dW5 荤B5 XUW3KWfg`W ֎Zd,.#iɐ>Vd(P@a{-HLe]hOF(/, MS8#8#H`Z>0)1DֵWRtd4&-G" [.7-C#1P#Q]-_lO@PXbJ،rLsXpvKB`xg^d`6W]Fe &24z!Ūv awhP,cCWGP$$r0?PP̐!,耋:wLpڋyӪl{>Ԕ8%y ÑYnXI.EaobuitCkSB!D{tVЯn JGwH F݆3$mHKϫhH/w\oYOןoZaq o<{p^3&I<2S|F"؇=u%p;֐l@E3Iâ7ʅ@WP$heV%HGQk k_Sc>2,(ۊM<I+^V2x8@SH"4 6_{`"Q\ʘtK!6HGP%/a9,9hMYAd࿘A QۭJ`XiӐ….UɳcWqIZ8H'9Ť%A}K$Cg iNqvP$oR"5r(krmD=TW ?v0 E$m)vf*oxs&䐙bg& u-.CVFN-dbVW*5,e2S,/L b(玄<Xd'hᢑGAF2dcGdM8$#H Ya`MZBx"K ]2f)@d!bb@rnUXm;5fmm./&T^H 7^KBcnJ6f+@jA*$6mI%8:bR&EFfhވ4~G;yrv!&'pGCfTō-IFb:Ac R4`xa".؈gxjH[>uV# &ohX:p"ZP[$+L0( DAr͋IIrK*9̉/?̐qt5 \i8[o-B"U*0$w,xU[TMaX jNs/]]fk)C&D")CAdO_oofI.wz#7C"5}8"!5Bأ f]KRFo9}v?(9~K8?q% ;Y3 q.krTV?HH5vL!A mFqĸqV+ + q; (ėb3"ledL&HGS&𕆼 w@_%^aNjP Kh)Q 8Je@ؔT1 /"|H|+$D4Gf\ȉB N̻Q$D!5 JwTX])i sY9YV_YB['їvcRЈⓎ$~ FBK9! _24y sfHH:[9i(!xDe#e B_[ ڍ*@xkFtl2;hu#* b %& @6YbiJw^aĩK-.G(Cf ;vݵPJhD|%a$Q8U69b%dT Dr2qB,:,UDj`H F:qXmg0Cpn_\r5FFwv[8EA$VʌU%!HtY-PIalciqB5r ;h;WH".cGVm B>RJě"%-Lk +T9p"5y qÀH,<i Er,NMPmoKfy'*E(~"tp3r$z-a Ჸ8]Bm?$pvΈzv tL.*َS,ܹT@|@J`N8u)[IsJ~P\Ӏ 3(عWo5]ĜHy%kā>%9$9]xif{ZEP=ȴDhǐ*^"3+|):bTU̞d»$&91,<$t8Eʝ7iB' gxe]ãW"qp?RWabk® }!?u p"T>$<At$ wN p(A# x} WKFلc<}C Nd5l{klmiardmK&{ܸa?5Gmdx<€rvǢV҆ |S xH9Ǖ0]>+qJe=JL{y|Zco/xSm?߻[0SH_=͒#9Y/g|_^l[T0._l?Kεhezk?Jv-xmF rJ;[&]~ņN{͚N:?8凣7fK6C?|ӓtO>ҋs<$Ų>uhcnP:ѫ˼:K' xL9_}ONY۸费[W+#ml~?=}~|ԶS>?FX{#bF{1:` }p w\>l㈜C.ޤ` 44~8>+ ~I7*>P|f:6ʚEHx^^r(38/]_/)5Gj 5q?Dڽorݲ^<_[܄=8;|:s<1x<~=Kۜ{ ||}<~s<;?woOW5|^>[/u /w/>'1qs௏ߟ&+muȤ0]D B;>3ÁicE%o\~?sۧ3vka$ܤ\A}{ +oR2oո{b Mw_ \Q2vf}aO?+,2_Mn3gzl%ର%8?'r5\~rE5ێIHDBI sQDkwjax;k% D>͵TYS#w+ov^~c ~$81ot FBJď{b䥸Fw/siYx $ ssC`1X@6ttiD*C Yf8Yn |BXA*x%GL>nb?2TĿ*f%LEs\K虫Ch- '3@?GyIA"*KԃuyIK|}hpDI%asx4>N6H,$oϓ 2,YAt{Bm?,o,Y }^m^Rm!\? !K:a !qxpħ|[c~WX]^7{ GFF`CI]8j3Q.;lS%pB]qC m(B9ZعZ÷d 1bdv]% * 9 ȁ% & a%sJ* 5 (!QwGX֌oQ nie70H} G(]E }yu;׉M}ͻ;%#k-/8QMC) k9f 2-*.Щ خQɖ%ZbaҰJ|w;pk޶E7c2o;pBIx ;!s1-GZB[IQo[魛;W^r~k[{zt魿j&:܁j:8V\g*Q%Cƍ?|Lcnq?Yd!K |[=yu|Vɵqzk+yB:zun :#abyqmoT1b _s} O`+(MuxlWR{لsb1R%JGp3[Puo捗8%d/&3,JJ*¯݀%$m%%&oPŅ-aa1v,:5d #(nw %)Lz)o PLF ء({IDYi: H,1 D8hMYaIP@lX2:Yn N,|/q+H43|S}dĵ EBIE[yIBIO Td\X|X s27MgC=-`ipjK[1VTӘ%) PipӈTeI.7}=ݧ1ݴ#%(JtAK/`#Z#` |3~7 Q9Eo7%9lpm@ @O\_ZHiCY7|%>Pn!8#oPFw>(/ x BsآIn`4ފp)Jtq( KK6LjZF7Nek?EF  YHps%韤]Lef̗} s40r3 S4_+ᶖvJpn"_mwďtY]0d +vg n/,%.]9lpữ8ts>}>KP7 l !c&9.d XN9ށ0iZA4f EZ` v%Fhؠ**,tEeaVLjEMe,ŐK%+6|<:#=Ne@G[N 魽֦No%%{޺ {=:l`#({Pʞgm'H v3A [z歄RPEm|OԂPږ tr~!ľǒ S Pcl/wO3AƧ} k>^0uMn272VܚqE*8 @[57rF"8p\"k&)\0he {kۭD) 橷O VhhrŊTYn We~\?g~(ƸrIquW ~PLbvi ojvdůvnm=L29XZG Ƿ)#؇a3>[Vxk/9uSt[Z=KovzڡX[r {DOJ#Ϳ>V<`tq:(y($Z@a]d>cȿ,k4Q&|?\x*<Ȥ[ÞM}|}dEB ;'}xXnӼ[WOZ{:mҧr"N6Uzo_ٳϦSyaܬg3[ש/&=8nMSXɗo9g1~ot.b6'S͑{Wľ+zWo4_%ʭS[NZxi92rRVTw@ϋՑ}$ON#ׇ1$=Oa)s:ӆqsc?~e~endstream endobj 224 0 obj << /Filter /FlateDecode /Length 32466 >> stream x̽ˮ%IvBO4F'NwDrP$Yl YU̪,#Xs=hyek]OtPbȽۗmotߟ}ꃟOzu<}7N'g=:,<1ϧ?%y>h3?>1sz~~sYwt4}|G??~GZW9/:_P-?7>~qK6Ltg󧟄?,r/,Lw9g#o^PґS?uj.y矽>\G~~\e3 Fk6G}uo̙u X?N:BӺ?yik?!-x>cџϱ~z< 4rқ5rmf!:3~BO͙9_ouٿGk9ú \w)p./ں];G֞?G_c^',޼xU+ߥįl%oƇ%8Yw4>{̏Zfi*sm\9>RN9s=QY%i Zj`[ z5_)j,œ:???NQ풳5~p@ڂsՌ\,wPGϜǺ}nM-|>jJYuzqk>J{9s|oeh-abrh@(Cpm=}rP>Q!\]Et oȫx%O)- qnL[zdpX%}o3yG" nP!Rs^-YfعRwC pCvTlb.w'v[z)O׸AM*շR W#;W#׵r˨7xޏxr2M5_DkΫ;B5uGRy=>_x_{'g_Y}}d}˯?_Oſӿ?ɿ?OWO\GNg {uwW/_`!I嘏b]jy~iu 5̛HU'WI#7't\UW|}*) йĆ*]nWpj׸m:_Y#^g70's5(\aZYYMKp<μp SM7pz'WXVӺp ?^W](uՍ9o8|[C9VczR) WELt\5rpnqQS=hͺ8 Bcu W;I[eUS[?zJ!/ ۼ+U+5#5=VU^#ʔoh:_+j$n8nuC^CN5*Bw\!zBY=xׅ\=Z5d24:Wzѹ:Q-jg\&zєByǰQzʃ](:zJke jʺn pҴ ㆶ*j }جB#uk=)7}` DHF]'w;(WzU ;FiuyøedJE1Sj`Wqqxӕ>=Gw%Q|)U(4wq`\mj%i,wR9Wonxjz]h2h2Y/.QKU_-M]EBeLB$t MC]3zL͔< '\F#t>jn¹zwD-5tmGRXWXE[X1;j4YVXVk^%wëCSwԼJjtnj͙igPG_Ԧz 't|˨,(M>2#-3yՓ:Nx Šd5%]z];F T(2;oZuʤl*Q&Z{ 4 .l%JօޡtM[# pM^;$)WB23F5%~2&lppKU~5jɐƒ~ǔHkgrysPw ָk}rdP ;IDvG!kdl^nivL3^()qebZS' D, gQS6orlL+o[Ǡxy]hRYTƴڟj|.ݫ$rjKif.jr}G9َ"?qƣ@h ("vT#a; d;a.( w⇻z"+`Q̯#G)$.Un%.uU~35dw-ߩi>h"䆱V*G4N\z~XRm4p 3Q.7+deJ^ڠ;F[>G^iB·(4wU#+ÿ́U Hp>*7)+ O LOR9!=׫w)՗2hdJcΟNxg sz -<w ;n`b&;^ HHwӘ4(vGW-*JeZ/%7a[(@z19 6ru[:5;P7R%s {+!'H1|t~j=|tEwS-L ='[fŔ '7tMq.5jx餥;na ypst 3zWyKM뵉<]4:#ztN-1>ق}vSeBoikelc=~wz79f>raw4}L#[^5/^MZYȒ@[#;%hfp>nAWdj;ͬq<︅ݤɍ͸$$400 5ZmYp$qG|KLa;䥻[N8WGQ|[& C`f( +c-$ZJ{svA).z!#y)i] ۺ㕛s~;Igyt^Xmw, DS?.㖦a鎅Et'i;Jܣw3{ƪ׷( s:m ӽޓ3 ד^-nlg^wxEnxAwyO[xDʢ4%52כNHKh\sGwr fIp ffi\\q9I(b ]{[ZZeVaT}vay|c,(W'u}1'XgewHE:OIr1H& ,w,Ȕl1=QgG?w3HiLxhwѿGVb1D/6;]! {kDbܓF\!v;k2+򰕬++\!'{j(_-sh4mA35k(ngH?iÒ wS[8kP?9[!oijF䔗!YpNz<ִ; yB{q4S-簅ՓW4@*Go'E+ƒLzPv}M|y:r*7  MUX;A}gqh$[`[N8i 9*,7\w8%ֲ̹1Q=RkvmOǪV!RS-F%\uu#Z{v-m[wU {ʩ8ƶ)?sno(znJ][]lϧ݇{߰A,ptH\v:Nm-FtXǖJ>[ m=u_l;m=ae[?z|K#2o~^Y m޶ erZhV{aѰoZPOg5g";[(S}W+u7{w`p=y??rCݟ~{a?~??]q\*̃wP~3mwGo;@~A-߰I|18ՓZK'}7%MFW?-[ՅV?t'-آ]h]^8>:-Z]:%+5-Y“u!5rm+5),53ѯi-;DCb!pH tuVdӇo:ßvTL 3\w/53fGQ1$W JZy7S&C?n'#Ry^}6?Ū*92(I&[/=y?AnS(՛M<-!͢H\q?_O׳]*M!K*}Ywm>iB-I֕ۘ4(Il7"'v729#IZz"4pYvr&%~}ĤE~L1Ϲ1)X) jȸkD.Az "ov;_.R$55z .1nk(Z{ڷJK341EC&(vx^'NMIY׿ݹX㫝_e*._&0hAi@ǥr Nhfno98o`n#mw0s@Z %Y^N\kwR?vP\(f=TN(N5)AД~͍7m;(OGk\{}\mh|b( V7*%IwRAE*¹AA^] NBjVwp[ HF*RRH;J!! `N(N4UkcrS5yu($@P4ikD,A"@{&[J@!1DGR QOhB&希$C$x$F 7Td ")HD(7Q֪B "U!8@27Tp0D ( '$$D SpBiNP4.2$'%H6*AB{.BngU%&J!pcI65  AIRKN΃H7Vno#dJLhW,!r|M*Zvݴ!CˁjʅkZI"Bh'_7Sd )H؟IB$q-ps $MoeKw@$ne\r\xjq4nceglʭ"<$s?I\ي@ 7O4Ѳ5;tJfLt  te AjPAڈ ҁ@bh  D@ H)FWA:H@RAAڦM+@ H;QA&sEVA:A  MH*HA: MWlA)A:Ҥ=:Q1cp BtёёHD$E*DG"%B4 /bS!OED@ DG!ET!:VMvB'h'*D!@šth'C'СځЁ@D͔iѡ "ѡ#AA"h С#@tHP4m'JtHD&@FI":t$( 'С#:D CUHD_#IF:v:t С#ɗD`\ck С#ڋRI:ѡ#Ђ]ӡ#‰@,!(wbD1OC^"t$(@DЁ@Ha DIA; T^W4ρ@՟?R p8y#IBF]UH џx?o$!A8p|1ӈFL}V곑| "F%Wl.Ѡ>1YFL}6b%ado>R0bSg#FT}V3>7Slg%>1وSDg'خg'>;Qى%IoW4@>;Q9QNT}g =\}F>;QIg'>;QىJNI }JT}vҮAlg'>;QىI|SDg'>;QوNT}vES\0ىJ\}v곓z g#>;QC>;N5Sk>;QI`QE4_UwW ދ]t6bC*:ى΁@t-+@ :;Q9΁@ts O.:;QىNTtV⢳HBD)Zs}!:+1YDdQUtvs :Dg'*:9΁@tDDg&:;QIa|WUUfjZ3ЌPM%f|U}Y,YوNTV־ee']ف *Ț LTv(TPv=d ~ݦ%;ȗV%d#;^[Ud"]vrm5 9hNTCr Аa _sHW ߉( hȑ(Ʉ ؇*NDU5dRDP 9ѐ# 9/ !(l+D5@!3hNTC4`& W1B7oKXoq n@8h7eq x2nxMvl@cPz/C9DHp8Q#(WUin]\76U hl@5c&be綄9U1Ze PVD@2c \ RhL*6J Nl`^.[Ub!wU˅>`\.\aP\﷪Uv+[E:@u8NTp P@@y4u8NTDDIu&.:+Q: :@1TVy8Á@tp (Tp i$)@viaP$ .@daUDnp  ˞@߉@?B p k* Ү(DE@%4a* ; P5THЂ@4%ؿ˕_\U`*"pMx78NT:p rL"/NrNTDx'ЁQ9ވ' !NPN G"bp$(@D bp$"GR%CFHD X\&2-dY%ò?@ V,*?Y0i#v$wlA\ 5g :k#z94j$($!ʦ;1B&AzAD VA@HCAu.p jE$*{HȑB ƛ޺Kü b@ ȧL-7Q~4*T' oSD_հe;7c!wc(h39v?c,8(L1Q+w SH(aN!ӿ_ 7xg$ 1#=YXALI4/X&E# gOõM0޳=<0U 0I~J7,"ǘFҮUOC]z nY][QE;uL;q Y hhC14tnY ћOS B97* 021 2h]RиfubPLR/_Djj7 0= 0-D 0ԁ01&v ͟ Q )!|-X|uz.hf~$RD M>3gޗqּ/g&(Ap2Ų̪eT121ŤS tX}U}YޗNĽ/S56H"[/{_, y_h eP"lޗԽ/:x_h4[\7KBfq} JYRՆB3ǖw.Pn9/4oPC8_y 7 /Y/.uм28_ٍ4,F|A9Η&J|A3"AsB|!ɚOsЬujXgZZ@4_j Bs˺P9{$oYB)^mB {4W Epn3,SR IRSKAQ5TMicjZ-jwq`jj SYh>!i:.-:n}Bj+Z.uL/4nkڢ bh\߹Ұo˹KR)I4%DeJ=ar.xufѾ/%՝)?@/Bx5P (" !_[iy`/lGy5_0IwBVBJn4&_H0*[ i>Zq-}=0|]0$y-Bw- Ŵ3UgLQ.8cHA 8cH97gL;':`uƐRr3Y[ gLKEWpƐtR7k I'xii SJsL[ kA9䬦 cH]i[RW-BWwsȰ"f&{dZIШIjOuɴy&C+NO %ЌpPxyp*Ӫ),5w_VTHo+-?r-мs.~#u닁oXRauAXeyuy%td o{Mn=qaqa44qcb lh2.̌qajhR O CT[!y>-]y˸@ FJ^ҋeV E'R'HS{6ʐԣW$$fW$k&qWROM?WL2$+a>24*@2]a[ja @340Xh9iI"s`sΐ$}:gUwuΐtsW+`{T%nF.дS],кM@YԤ T6 UpYi҅ JtwtfK҅U{w 9U}Ϲ0:Z{3PB`㰺jUWe`f3Ь{ 4s♅&ueѕ.M44+  M?CcP$?C2fXϭp0а8hnBC2N! KƍjVECӻ3p쮤U H%\]ibICyP%\"oLC"7ͰSNC3ʠZ:}j5vefP͔tuQeEmdoA2h :}u]_W% ) 5-QDOuY@5Pk 5ꩱfҀ 4<W JR4H9V ZUC_=@ihVW MJG W =WWʹʠߕqj&rTC)f.XE{~f!1TC+mQS,[M54+˼a\R/PM54+i.oB=kz5?Y5h}gM K!yXI]-ȸb?ƺfv_Oc?}&Ѵp\HA sMi[y\5^Ӟ[tZ$]I4A+M>5&he55&h呀f&rqY$iz $ \5fc_5dp${=bEokͽ 6M-6Ghq>L6Z&?B "yϷ0ڤriL6Dt ,2M9T <6Tnxl<6:͇f1fMAMMd|S=Bbή4?dJ[1| $@I@~HOH#> Aӧ~<8g=~ *?GRz rg^A&{߷zr*9 X 5zyzcB~o7Bz/^ۿ3|@^\VF[?} Mm{ZO}lrO_Dohx5Oo襆 t55@~ȿ]c]ߛAoǶϟǯCNz)!y J7d b`SoHw莬ƳX| fhW m ??m$uVY`nËhu-kPf]`krϿ֐*h> }; 9~j7赕 hfzfo*/?y_4?VkmMtAc^Q4^ݾҬlFxJc+=DCHd_!4Fѯn_!dOŸ?B'H=`dwoHv! GlGuQ]p( QFZ< $u= .:o!1ق3u=>߬:E#jFV{. ~vJ58>5]SK"a޶P+:!fqŷJsF%oHT̫Hz??}o?jUR/(^su߳3f(AL4@ dycZD&#HW3e̟y"]"vه#!s Hdj:|"'y~ QXH!΍t4ȼQyj:r kU! D{Q^h/Jt =Ip@$U8-mdFh bd I Yd5ND#pBN?Kdڃ (;ɗ ɔN%r)Aܯ0k# $+T rB9)zBLT'=aDĂ@*$iFɰ5 M&#]Z%"D2A$U Do 6cBo O$Yt%rm"Q$@Pt "F K3 %BD2DI"A$PuffD Piiz:AQ82]|55&<,(I`£ 6rLJ 9v29 `&h x)BG6$2e䴑&A$RԘH}u'd(iD Rp@\r G'rTND7)HD8Lu'R L&KL Dz'R첾nPըhiXNYU$A|6rpdYh$*)WP2 "LGKQj#(')N1 5u)HPN,̍FPG2$Y'$ۧ$r-lۧ$r-YH 9T~, `A)*ѓ>9NȎ<*3D Yɔ7B h;2d ^NPݦ'ZdNdn IFBq;E `7=cKYd'>7YH )@ (@x!x> Il5_!1V^k.4* 4Elce( Y!YKz/+u'('!H幑@vob',mdJN& "^η&n@PFhy!Pz1B `HNND I%H'g#Spdt$FP Ɉs&xNJp`#`#RžH$F$"i'UlM'#8*%) qK@Aݶ K$ Dim#yf4 u&+vX IT.7%@&12y-Ik%*|p^2ȼ@ ȐwHorE'9rTr)y-f.;)FE"% {#. } G7u@gr?0N q$AN Sp"6"GQNa2F!c? O)!('ZYAD822H0c?S()&!lH́rN ՞ܭ!UHH Pe"kTbYpSE08p){Bl<(ADiD!$inm*b4#+6mʾ "Uq?S|<$in@J"indJNV]hD R46'G[$}'k8yQ}fD R<QrdIbFHZIGAѧI^ eC`-:޳vl!k:EAIH%'Fh[Ifj2Tq$- MR/ ȐhdOvF2UNT#!)PeUmufu^pذ6" 'edAGREP q#C"0fo#I"W$k2. "pp"AbP=\S?Xo/F%7̺FzWިU.7*qQJLoTz#HkAoT2AQIF%7* \oTPAQ AoT$ިEI*sĦ7/7pzc AAщꍁ@ zc hDTr.AD@% Lotzc 5|-  Totzc גp&NTo4z- '7:QшNTotzk7:) Totzcי7v5ޘU5шNTotzިF7"DhF'7:Q7:QшNTotzDF'Mo zc$Q)zPLo z#^ިiO\otzc iD~\@7ˉꍁ@o :f3c , pT.6:@)8($.#5"К Z*6RمFKB 61)%ȏ(@t*6Q4k4tN$ @ DFjh#QK"&6mw%ؘL@Drc PH%P䨫]r %]N 911@IˉJ$i6\r jAIQsHcP%\OU@:щ@u .]c :vc0Ց@98QՑˡCBu D.$q1Uj]N::H CBu c PDU@:1SՑnc:1Q#DTHDu c$:FҰH_TGZ)@u c PT@:V$tщ@u DJ"NTuڼɎ@w cJD;T@ <pq`} @x c (˻7¡c$;RR1@ut[o@5VTSͱ#סkاJz5ɅiWUHhQPhyb3 h *TtUGjBuDT@:RSD~DUHDuE~U#Ց1Au c$:FPI~ȉRn2i{[_CF0$ d^'iX(d`yȄt  @59\X$h rc;.:QЉʱNT4brcrc$_09ɸart X'*: TuPADX#&:yQ&:yQ& ?_ȱy[˱y_˱J\Urlޗ"cؼ- X%/a]D:Nt[D:Oӈ-t?kӈ-tAO%3_*J|__,T?O%Sɼ*) |hc5˱s.:îP1X(Pb UM,ȰNT5b2a@M?\ 2a d@PNT 2l I[iĖ}:eNTE :eMReN _ҿ/4b>$Aa٧]D}eNt٧_)>+oӄڠŠTXWaUXUaRPa UX'Pa¦*l*Qv>^EBUV7q6a4DX'*1։69aŧ]DW|W|:NT5X*ZD%X'*Q ګ] l "9P 6H۵M5bl%&:Q ։JNTUHTu`l=>D%@P+ ]*w诺I믁@ _J!_ \$.o_j W'_s_ ꯁ@ k _JpIt_ ҥ译@ At"DHVՉꯁ@ k _D@5~VM>$Au_jW':q5_ UJLUk诲I_NՉʯYCjɯNT~u_}fDW'*:QՈɯf%0Չʯ.[]~ D b/ :L@ :Q5ȯDW#*:@W'* k-?NTu_DCO_@5k믁W']꯾X@tWLZ.NT D:鯁@ k :Еݳ[4 +AJP'4诔^UW'N:P诶@ 6Uac_@5X*kUU` U`Ҧ:Qֈ)NT=(N:Qv6A *NDeME݈+J\/X%Bg ` TX%.*qV AURbt[@)^cuc16EDX@ IJx[z1ol$]LTRA6#"Pʛ߷E@ d @ :QAqh"6XLu9ր~c@5ֈ@C\ j&75ց@ lQfisQ]U=Ɏ؅Ȣ6;ICcu2e&_IdAc-BwT4V#k+׋rrcl]?G% 3BXL%Ѡc3Dj("1`kEc;@$bJbrMI%A`hA4Lz${A4e<7Dӛ VL~d1lJDtJ*-A`!"(QU#<> "ZYhչeQNMKBW5&HX?@C%KRZK։Q":&AVNtb섾JbyeNN>`Dn8A} ZtU(m:A 8J EIsJs;uH"!b5D,E<Dѿ:3@fE)8 A {эI_EԽUNX+YX r MkR&f&r9&m8˾*+傒/KL! B)fEP;"o+ "i@r!dY3娛<9<,9Uv.vGۜBđS 9#FsP>͑#Jȩ@9u1iG #6:N~ȡ}@iɎe`tG}9$~-#ǖ#u%iP9;24:rDm6Gh8ȡub9rz&d )L͑c9rUG=d#G4wGM :vbsGN2vGN8]s7Aw38:qhבSJ!+VV1T1+Yqtb l͊3`v+hgt}Պ3_5+P{Zqd]LVYq&r+L+[qv7fřڹGRv+άL̊3S0+>@8k/MYq&E#έ8,ZnІН˃Q8tkaV')*R;7!qh[ytԈC!q}(͈Su_y3A8;q͈CdƼ z3 "F*;7T5fš#m^ڍWZ5T)6UHz~jkՐSiFDFǡN8uwơơmt]sT{kQ7Np@^85w#(OV"nc{RZRxZR۹Ē"bn-ݍ#FwSu87NE4w@84/%"Ty6+NiV[kVujYrl^@ZVKzqlTafCF fi]3+ -*3L-5О P.Axk`ȡ@:s!Zv5`w`">l>!&?ΐmݎcqFF~TtniP;ۥq8'^ ݎc;- vr /G~ˉ1'BDZ ǩ=%D]{u2G=܏S͘8 x,Ϻq+q{0?NjVR?N*G*v'eb~jRAf0/]8ߖAggnnˉqP%DМCBn8vkՏCcݐ#绘@ NM8` ,K<pHPduG>5bơwbu{ fSw3h v}23NG8.$wF8ɄG8]]^"@`ƑdJb5D+M@t a%B]A4¡7!hS%F_Yuk<Nt!xK σ9- &qΡ ͆C3Z+ Ff!m 6w "zo,zc 5f`'3#g`-d`?5KoNoH 9W7%UoIhYĊr}$H@#QdUtO~+fwn]'d}ϿnT,zS;S @Ӭإzޛ` Y֞=CEӤ[P-{)262={ZO^1 W!&`_l FuA ިÆ5SN~_˓KO_~B[v3VsĊᆯp%'Og'"=!mG]iv^K6uz&~k0ĺՅǺ \uwj7HXIzVY'OխV3ŭp+xo)ޣ~tB]c>|!:ۅ4yW_GU -Dx'Y.W2}JhO+$eGLH@Ϻ8^6̋G"FƣnfϝTF*\Nأ Wyx$2'IA{"q,O| F%z`mtRA$d^גi%Aoy'g# ̎R5$*Ynd\h H%&DyHQmvvD "jn M%AS3\#@#Hd~</u'A8H0ry$ٮyA$ y$(@ D$u'ȘvOxds9*H\FDI%z^ cڮAT%d`ɐإ\I?KpɼC ,_e mCN;df6 BmvbD2y<vF%P /`u2Zl2?U Ml,1 %,;$E@V'YDe#)@ 23̏Gd(@d~<x$(@dj6!Dfk\9̐Gq3@F"un" '<(tE 2KIFe<KQ|bF5)Z$I,Hd 5mS̔GDh؉LF"3Ly2SHH UM F))@2OmD[”"lۈLR("XڈD$SpR$"l_hm]h"%HH$"H@J"γAIh"&#%HK "ZDC$H+ "II 8-Lx|#CprFJ"lF#2VD (@X$D ,GS(IJ$vl$=Դ&F9D,(Ԋ N)N@Xq؍`pC۪*_[pT~†EF)ڊ4pH!2d+)H8FdJ!Du;B%nd^ɱ=B2Qb*ATx7 :F6ɶ.R"(@XHPľJ!1a,J8NxF$:,ARg6v2%' )D(@ذ وN2HJI;īH 80~!;ƻt^-)HX'dHIgBY<T㰣ک) V L)+ g*a!R%!)'(HADRyn2a~IH EbqkF& iD CrF9!~挂rsI \)D) S=8KJ@*e ; ͝R2D6U1 bmxcR45we2;Hi`#v#3K v'Rt !))B"d;) 1!i T "wʁrGE @,=;h\r2 g\ʕCl}}'S.I9%rB'@{ ؁x/Hqrk vS?.dɪeA8A!TJHOS DJ@dCHrs+'S$Ebp8bPR! d^/S F:+Vt! f#E84ۑDB6br%;[ȼΝT "!A8$IvH*c2%3 @Nq>$d$@  tIȔk=B?X/8%5X\sjՀ@3`jՀD8')q5Ήj@N%W㌘$Ad\p5ΉqN5SkZ9qFTsp- Wk15ɋ05ɸaj| B815ΉqNTbȠ9Q5ΈqNTsjWUSjW㔸ոBSjW㔸^qJ\S5b8#9Q5ΉqN\Ɓ5NqJ\Sjq \SjW㔸8%)i LL3jq05ΈqX8'9Q5F\[ά\ *9'*QA΁ rH9'*9 @M.rE@ w.QΉrE@ Br=YE@ Q.AQBr@J&@U  rq 9g@8P˥g@e8r*9KW n`AKp .HpFL\ @s\ Hp@s\ * qλ$8'*|\8*)07 7PָCgVVҕ7'-=7'ې)YoeʛU@8Q-i(򆍱\xsMn-QSDU@RDT9unH)D TH ZE"THXuO ђxP-n@uKt7'*'HoI(o &"]x [ ('*r[ ޜoȼa[ ޺& Iປ@d@Dv#yDdn(*@vbO "-"؅ˢd"؅dHXv@!=N E'"[$n+]v d r(]c*!n@v_!5JvŭgZTq'[ (#2[ Pܜ@ voȒHXq [$E"[$P{@qD =tlAj(nKE՞SVʍaUf IIq$$JOt팍$>cE[R|=m_HkVmOIBh{ v+3n w+ޭDyB LBVg&azB LBfb6X<[!np&Ȼ™B0 |+$oPBw\m+. \!0p@DC!4p%+d{A8Q \!4pIh +q=tr4pI`ee3WL%~}~mpت{q~YE +J$pPB W%\!pI(ᴓ=%\!pPJB( ;X . . W 4\%".V"q@UW D\%qPęv_p*עLJƻJă"Ru&meO$o_%wQv$ K߅t$ DžAt${}ѥLĠK߃{S.ĔK2߃40&4M"U4T]I$I$MER&K2^Hijbi=4DJ$I ,MAL44i I$Ӄ454M3!"ix4I{B4iI$hQ|tl_KȕH#/@s)$=iyR{$IC'M"OZI 'B4II BOZ=iz1Q)=i![/EI 9DzB8ȓv?$2IJYRrԤ[vU)+̦*ZJBAR .4TiR@*4Ti$J'U_%S&*eJY$RIJP])J KBUZg"Ti!B$PBUJ TiK+.-J`L+2dpJJ`M+\TBcⴒi%{QN 9Z'G@ oZ@h L KПNaZ^SP@Yٖ $q.-di!8J³_i&(E݃ESȄ1ʴ(i[ 90$Pat Arb r_]/eS˗ǫ$胜I&<\ (P=\sLɈB#Z.xI ǗtU؅Vr 0k읽M.tɅV9.JB+ sP \h!pbB+ sp/z`g]h%pVZ `3tЅZ]]h!tЅBZ]hP_/M^^n 0TD!A6܋+At&AL6o,hlY>.s-$s'V\DbܥgAvAL4jĮau]w`9^ oj Dg(bķ$stn~Ht: 7uTݱ 6\fo:oA [Ē~c0aǢ ':Mz1pm[5wuDmq"1 ǸVw /%wA縰L&+Vmsdl|oqU_M:GVM!@ ;7A2bYcC3~qGM0kƵN_;ү'ͦdx AЛ=Mw&o$ޔ[4 ZhAE8(xalb5Q7)fgfJ:[4j~ ѯ R,Jc$ޠ˟=gcAL;j`ca#6c3+' l Ǯf. @kv3bw:H8Cݭ=$ ٠ G 9dsC6nBٜuG]!S lƶIٜMhjl)2˵C6e!$ لxlu^! '3dX!~# GflK"#6)a!!{- ˄MVfĦ/͌MsMFיAM_>Ě.T t[v&ᒣM, kC3ΰM_PWڦo 66sMWKfmbqmz"<4C l >Q=fml,RmB0hMaA-j*Aj$S Je2h:hӸ&6ylNs6z2gsͺ-s6DAlB*gY'JͶ蓫NlT*ju0emԺ:6˃64M[jPƗ`mzyƇ 4>39q^qgnˡЍw;tҙS5 3t+C7]/nb~ynsͽ?JfJh*&` ܨunNzfM?2p㲻]njFYȂSpz7+)ff*#7Ύ37Cgǡ.MN\ץnTW's7;oQIƮR/Un߹՛ 6;t3XCęFͺz]]2s'ǒQQܜ8skIfnt ,?27+s37REn(rӕhYz}Fn;/V"Qƌ|"ȍ'\uY\Ȁ*wFGYYSP 7;q>Ʃ +q&7Jdf67ǧtl*li_(Ae ~}]\LXu8]@__WDZ@ɵ,S2UWmCimk06.ͪۄLƠ3DV)F{W)JmY#}mVvF[HAHԨTԠJU`,R.Rp1)"oY`WE)x8q~rc+ W{Ə* TpK(JsQdU*Js\_+W*'R6XZOJvgE-T<GJcPiS,TnBǫPBh|qY$tpWW&'lֺNyPm2:۩:%V\a (hw/(F ܬ_QY@FF Q0(iPI.QB2K#V1QSsSg(HY`r/`(zBpkt,9[0(=,Pp*g j>+YeShq= ;L~YXiHSYըiUYO.5)s}Xip(8`q6L㴅q㰑|fq|wg_ 3?px,88gxfqvQ9ӇXY~|{Fq*83kQ#\G'a'>*N0iB8A~dY`j= 98&ٵ  Xl<ǮMp] <,Zu'xM8N|מqUKF8ΦgnqbSXqRg;xi\8Nb8_i}oqvٻOibai_NĊ,Oi>U^Ai?_QNf׵%&{d}뽴$u6׽}o#SU&`MH~9n7uy7$~үnXi'm?b;?:JُM?}#4g/#sQ}ێ럲g?Գ>F7|\_> stream xcd`ab`dddw 641H3a!O/VY~'Y3yyX~#=C{*$fFʢdMCKKs#KԢ<Ē 'G!8?93RA&J_\/1X/(NSG<$C!(8,5E-?D/17UL=土[PZZZ`d{N|q00~Ksp跲ֿ^_n=E'l~];\| pBIOpb1y4^V|cendstream endobj 226 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1761 >> stream xTkPYN覝I.qө-ZG " K%B0@ yhx7DA^!`haf 3AE`kdܦιu}EX"ibT%O(KJ.EPj #Њ9F 8fGxw0}%b⣥:~Æeq*-[w%C,U1Ti)\P䒔da`"Y(VE%ʢT+ tJ-9)%#G@(|@6"A!b A|Wˈ!!Hfb|?n] X+fE6:P@/PLhb-6wÃ̭q\(8AVj*@%yp.W28DRُ1%UVjc3lT oONf[]ݯZ| oLW\)B o^HgP' _pǨrʁwVS *aqzԓAL*wUMUJ \In {}0/(sʢʸB˳[>HaMFuJeÔs٦]Ͳ,.(1 vOGWb8{FTש*x"AlI- jendstream endobj 227 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 39 /Predictor 15 >> /Filter /FlateDecode /Height 53 /Subtype /Image /Width 39 /Length 1223 >> stream xV0@7ԥ|+5ҖߓA0>G!y{{{yyL&F5Hԓ- J[ >==e[k/x ]ZWpwwwyy^ROOOM%0.\]]M:Vm0K;!@L)G^\\HEx*W; SCY[[LNX%h fʃl%8Ys DP>!؃ A6'S.Xq p񯠢pN]7aR9wJhVPZs}`Lձd]x^*O oREҵ`Pbw'E,=Sc0Ү`BE *O)gjPP/::0UgSAPL!_THWk S;onDlT07`*A}M %3V 9:0ʬc3X=bP=d=T {jih jTo±)[a,F&w\ Uܤ7<wA,7lUgLGK4zuS(K SU›0;o+y^47nd޸=;ʕZMV6/:n?FD< [ ->;U70Jk|bGRNTk:DlSdg0_B{%YhM$T6u[^O<TmeWy'aendstream endobj 228 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 53 /Predictor 15 >> /Filter /FlateDecode /Height 39 /Subtype /Image /Width 53 /Length 1350 >> stream xŘV#AE%<۔ #$"&!က/--TAWuUooov.?<<ӾNLLЎ 5 k4vh? {+ 8C{{{K A`vjb-L AZP+,M0;>>M%!S>"eKo,L/F 3|lc5DUD [XX ) L?\ZVT%*|www?䛚N("d|t S?J0)XA_da8/Y~d28 bkCv_+%cnnV,n~ /kd@iQ>S1eusYS?ĺ|Z0\oA.+A<<<kqqZ֍ .QAࣃ7!@r|Vamav{{{u@nB6;&*/e $Ƌ];'qсST/M:~p۬!@D<8I_&5|H+/nGo;WWWGkgc~r (*CM^vFߊGVu`> /Filter /FlateDecode /Height 39 /Subtype /Image /Width 53 /Length 1332 >> stream x͘9R+IEU3,ER܇:Y% ;T)S Rv;U?booo|sssKKK?S'ߣ+d%\m?nggg|+++?d=klxbemEfZ\__nыeBmbbSSS̽2_!>v:==mG,466l ӈwqq!@+NNN%>AFL@DoC>~uUnϏ0G\^^Ex=ܯbMtq'%oDsgaL;[7i]@M}jёB<2V,f;y!y"< |]v>j^!@H3e)6 fdђ=ٛ(,#MdCN$Ѷ-C/hyrD3ry`YA-SctA[_" q4_j"eNJP52>f,gv_y722Lmk1t e#6 2V:q+p.#OP77ޞ /#ycWRjS?qAB9a~øJŵEL^&ɷA{:BƷ-|lLpޔee>Ҿ t꣙&j Ԯd^@r}{?HHr!vehq̛N|o˨4-N~qCO>Vg{4pNSB6`B6'O2LRLR[[`2l?? c㡡~gJV^>,^Uy4=*䣥dʇ9 >kSy&؃ogg̑ژ? v0hA4"ʞOq{.[僦~V2CT? ~ VG5?sag_CиD >,xm(?I>666ϴ|nu10ʙa_٪/C67B_ɔ֪I_?>/Gg-TQV_2F2g>ƴeBe Yc9Ҷ330k.?2G~~endstream endobj 230 0 obj << /Filter /FlateDecode /Length 3736 >> stream xnG,4"` bMÈH:.%%S3U=ӻ̼DЃ}TU5D/Wn_]#~\nV/OakQ^)W.׾ޭNo^ua},2>1 kыq+VƏ7}Lq mFk6i[ iuCPJdwf1~>GA]ڄ:`ӫN؊Dz\h`.g6M k@~RX*\ۙl*M.q,@-*VB~mi!d&1{.i'O1370f!fҁWL45ƑfPC"`&67Z_h<c4h0`PDDkmh-!κEwF7Jpw@˖ 8G=Q;ў5:#[%'QA{I@I 9uLRFͨwE={&I w qLKF%zDdȁY^ɒ48OQQ  Z=(xMݶXQqtnO N=#Q, T.QJ4A fm|DS'&\$m2Fl,X=pK%)^H}GRL˶em=Īe)s!CޭC"Xt`t^*L4|sY=Ue_ᨂ()z)k'1y& sT!7٧zU\,xU5DjĉһT o tue:d˅kj萇 C atkm #u*u>>чI\ ^Lۏ@M)Ȳ2d Gviن4"oȘ"[\#t}ҝai!1h.89N]DCb|0dI7cНɀP碱o e"{nw0+ZxǪ/} \/8 œM-KpkY`!\m*=*Mφ;$AC|4\N$aH @P_^͚>e毚e3`|83 o LXgD?yG:HN`j^ۂ{GLmr=I&W4R-uJ&H#L^xN\64%9 ~Y)8+ BE_~:d(bΎS-͞V=?Ʀɣ-,gNܹ(Q0(^I8-&"*u{F "\:ocSgw4ʪ>6 5C4hK7p ##Yu1Loq;?! ^xFԚGs^1g85A6ቖqK eB C`<xd7i&W '8kjм_k:z\g@a?% |EXgqF7)~UݯUuzFKuGUT[' ~UuMb\u?刅R {kFaJL架eOj=d}(BW(1J?U̞2?<{Y2BsD؍YD/_SϻjNfD$~ePr|wVu:;ެh ծ d9v~fK ,~ӤJH7'_O7.@bgGC9e/f*ח$æ %yވ܈cC#ʪ#m@(Qc2 ;x+Wݞ?7sSF8<_2{j)Y 30Ly),ʺ5@DU/%w‘B zϝ0hg-36gkv6>m8yJS;&ANۄg^3Po5x(l^&DcFR[{O\h‚RU`ރ&{0pGփnGnغQo=ؕ&Vx*{]æqWYqUOiTh,m677I絬W,c}s}߶}ѡU0jF#aW UܯWZqO^Mx/ u)*=U1%@EXڬW:Wn+CM9*pneOaͶSy>wn{NFO{gL4DpMkr[ޗ@9!%oV]ԑօewON='9}R6`F55~ JE.g@!Or͘5oO9qj}rG02{|;{%Bz,7/~2yIn,|Eg`_$5D wƯ:!IռAW9Rֶ׋^6l};h/Y@/n4ɼ͢&B@{]Wp):i0^ѿ\endstream endobj 231 0 obj << /Filter /FlateDecode /Length 29406 >> stream xm-qAUUt03 kt(ɖ撔H X+b>G$e /Yl߯z?l/_?_}7_'o>_8{y(~zG^?}?ꣽO_oV痆>~^>zUv~oϸzqO;seZZ߲lo28{Wߎocu3>?VǾ~ʞQƛ_Kb}[-o?o%io}[7W'* ?>ޥ>uwM)wG㧗m0~2?mO|e|c;97t?ǎ{v~}:~ߝ7,)?K9wc9yc| ni_v.?b_?5>~3m~Tvֻư ^k?oNrE#Yǯ:xm|el>wc[^}GU)#;_MYƞ~uc`{mǧ,˧6;_(ws;񍿱mWҰ{/Ǧ9;#_y|sk!m&{W_|㴍hLJ׵O~{]WG'՞?tKz&-Ws{'m<ƻɷ}>Fwb^ n<.1N,~QS׾\P~g]Χ"7A.}zO>ǍЗ8nJyk(vj`9[}*}On}U>GQvKV-R>~?ϏuvKA;~RK޿tϭrԑz]n;mƋY[G{/6_d_qS1~ohwiy"~㿗m>Oqr)}Ys {{(~|cSޏc|ٵ:MM8_ɥ󋎭%GF柿y%ツ^pM߽PO?v?NT_g? %CZׇ?{w k \c[nZr>K ?㾫~ 0%'sŸ׹PyZ{y1c7ǭ- _~CB^qཥ.O Zgt|Q+=4ju} oO;ۇ?|hr/u\?FrV^Y^ͣ^8=m\&gwƥk{#x^y/d[|)q/_>󇟿,vY{,}ָ`y>D5ey>-"g~>DeKȳ~^?_켎?{n~|Uߴo2;KsU:!5bvq jv}߫q0=y>n-Z>`yҸyC< 2$maۺl3.PtrM|hr<eӶyo#n?.?x޸;wx&zмsd܏m|Ԅ֬mN:bR6ҧqܟ(6 xw|pOY7a߿~(m66*oگ[ά^αTg׵^ k ek|K|Cqcvji߷ qMa`Lgx+qT+u%㓶܏z?J\+Y8\0}w_Wb?JmKu@cXHeh^g$Oxsܶm')~F-Z܏ǹ?}%=؏D^V2.rX+,d'2ʹK!μJv[(}%N:v>+NUclOˡ/hBF+Ar{AbĊwJ8yAb%JZ(ժ>T+!VIXZ ꉴGY7ϨVAjĪAR r%zsT{^VBFz"VBW4!ZkTk:Z+ZU'2պX^фZ [yZl(VB4!(VBPXY&mq>y徐M+!(VIXX /WPreAIX\ 76OdՕ~%JH6:u^XTQmf;^*Iݼ OJH&J\H$VBhBP:Aj%~!݋yTn݉W+%^j`V+^ڮNTW+%^xc/VJZ ARr̫S%e^vJWΛb%ϫ^u^ :%]JVV5J@eL@WIY\KW^PO䰣z!^Yzmo+R,JD ARr̫.%^É(%c{",JD ARr̫.%^xkkNF)}!vEDfR5J(%^\K(!ﲻ1]NyQk6Q]d5ꉌu-dԨ9&ﲕF i^DŽx/e'gK`5j_ jVR5또k#*A'jKDPF iT^LJPYFY9>.ಋQ]p޼HԬO2  v2BfZiW[u;,u;̻k%^Fm?d֩ZȬS 9-dl7Ur̻Qs$N-Z-[,_ȬS ))9/d]/dm_Te ivY2xZȱ>xH-ҿ6 A3,R EJ"99YH-d)%V,R:vWs$PT0oX۽7j!]i-dVqqA;AB]i) êB j!sp!]bIo%6DDfRP):d.+nS#f:G{!sp!^v:IۉW*%^xbSkpw'sp!^lޝxRFku2+TBdY/dV9PmxbxR22r5B.Z*os{/dZiBY̔tRBd^_/ 0/TJ0`Qk?m:ȲP)B+/UJV)1gfWJ+%^xRJT2սn+%^Li/VJPa@%}J^Єx;X A rmsp!(VB.'B,4 Phk߼Ν ),2orB%b&dV*S۬*TJR)iv79TBPxRJW*uwJ\HWBPx:loTT zR*훸4%VJW*%SX,+oW# cJy{]mG.TJR)b!^/^[Qe{BnUO =]0z: t$IPO =z: t$IPO'NB=z: t$ABO'NB=z: t$ABO'N=z:t PO'NB=z: t PO_?NB=z: tPO'NB=z: 4I$IPO'NB=z:H$IwlBOCINB=z: t$INB=z: t$APO'NB=z: t PO'NB=z: t PO'N=z:tIPO'NB=z: tIPO'NB=z: t @PO'NB=j H =-zZi!(VBPX%ab%zZi^\ }u˕+!B@O A=md᧋@;r:!tAM'N@7n: t$tAM'NB7ͽ?t$tI覃TtJPxJJUi%}T A758pB}M 3i!p{E-$O C-ZuB`@S ;GS O}g `@R'NQ GZ8j겒Z8j!pI訅Q GmlA騅Q'Z8j!pBਅQ'Q G.~u:QkNo8j!pI訅Q G-9?N@G-Z8j!pBਅVJG-Jj8j!Q"q؎9$BPV Z%J4@ z!sPuc덵sًꅠV +7otBઓU ypBઅU Z\ucpW-Z\$tBઅUЭ0 SNp,\Rc8P˪@Q'E-Z(-5($TBk+⨅@R ⚺mִ Q'NCmp0B`P C-Z >o=j!0JJXSP[ '!(OI <{%(Ov9zIS>N< G>J wqSz| A-: j!B @P A_!@P qA-!j!I(@P >ӟj!IAZj!B N@A-Zj!B @P'A-Zj!B PP Z jB A-zu*$TABQ'NBE: u*$TABQ'NE:u* CRQ'NBE: u*$TABQ'NBE: u*$PBQ'gUIPQ'&IE: u*$TIPQ E: u9.@E-GU:!p>B@Q aJ#*H!PBX%*jVI}@Q j*HT$VI}j*V$>BP$U(j!VI}@Q EZ(j!ڮy}AT$@QY>E-J @Q E-JPBP@Q qE-L$nW\ E-Zʕ*"5GM{N: ᨓQ'NBG: u: ᨓQ'AZ%J8$Q'NG:upI訓Q'NBGcK:$tAQ: 8j!t;pI訃Q'N@G: u:jtI訓Q'NBG: uJj!BS qO-Z<')-RZHi!I(@J }JPQG[%Qo7F8Q GCX!pBਓQ cjsG-zgl"u: ᨓQ'NBG: upI訓Q'NBG: upI訓Q' G-Q'G--I$v|HG-Z8j!pԱeUᨅQ G-Z;hZ8j!pBਅQ'G-ZQꨅQ Nj%JW+OQQ NBGvLP@P'@j7&?~Zi%TBO+A"YtL~/z<\6_[uc &p(`Bjo@j 36J`>flDSl .~m}C՞:>N~ ZUG'm>j* [I4˾粦N`m=iwoUN=~Mok;ư {m#x'Zc&m#-2s\5&oڃ q]-2-2 WkrZm4mDվ@jwzS,9- ;z0l!Bj| 3nAX([HxoTg-yr.x=v$AvBXW`86$c"Smyr)O ;_PYCXL\} :Lܘ 23Ϛ(;8ubs$pAZ̩>@r5+;:Ȯ+x$pz+86" Z5 iW9Dvs88@8mmoluW Ȍ-nbz"ss񲈙W!GA{ mdnn#ssݗ#s3%+zl!#sc>]9 }fn:BmYRw=@9GGiՂq-D/8DfۘAf>5$271g;27Bi 7 tg&n:m 7}CwBێ}m̌ NسhG8 l^W`į( &6}T i>0winc ^t vzþV`^ôMY阶>amcR"6}?جZL5*3mclM?L أ>´M_6@_=V }} 6=+1;Ez|ci as%DKimz\&3nceGq>>: ͟eKܦ-m.-!L;KK݀T\AmA|brr5>+[`IױXAqC-BaVqsain"l3Ѿ6hq2l3.M!m5@Q͸(K欸g`flŶ6aMq aSa^mA WgQ|IY$mBDf}mqa"ic o$ml 67oG#i3~ML7}EB*fD&NIꑴg$m:Hڌ_[IqRAPl)+5i/<6*{0jxPi㌆$+mֹRmޕ6;C^ʛ Rל͜ ;{ͶuA`W'̮HB=<,T![ h9ބmllX6slѿ6ꔍ-riK9xAS6'm3 k QC|m);?;2eC9ڮg@fƆ #ksZ(SGQllQ6@Aqe׬a`H?#N GƆ mKƆ#jc~XQ|c{A>?G"js=)+zsHoY఑YBuf=fs؍1K渣}^j؋?߰jio?~pth?__|{|i|֣ϱ]5L>mI9ٙԊw_adTOvx9~?W 2>'뎸߿Al~q=(\?=c?=+l_nF[[am?i;Zl7_֢buO8˿+ʷ]Uqҗ_m?|?46|/{F_o>]qR dy}y O?2@z\Ŷcl6sgk*IYΏL`#9| !cufbF1C{۸zrý͊ZػѮXǣyns'(=/us(;W_z?f4EϼNySFYChAEQr`r< =)@cT4q!<ȅ4ci 浒I$ĒTIrEO:Q'rNv'JP,ɉ;InI~'ȸ*0oD'Ӆ4O ~'ɍ=A|dr!)~+_s!ׅ\hxaAl\XmX!&Ɂb iX!&ɉUd{&w'v?zX!&I*2I\k]c}]JvIR<(N-z\hEOySIR<(N-zS+hCb#B9 y_wra9$7 2Jv,yx,]H&J$,VBv,y`ɫ$ ˁ%bՐ7?&=.*!3&J#JӅ*!YUm%UB׳9V =J'1/V,gPV ^Ob1ãBPV]Xz*!UBG\ X=rRAP\1md/U NRJspW` Ary5Jo^̈́>ͦ.$)^Cz4>x&U'=FRrxlHHFkG^4?\oJȾ zّoD`;I*$9g>=r`Nȍ;AX=/VJ n #l"'$Р'I_0ިVJ$xRҐp:kcUD "HJ)ߎZ%{=K2j qLRqLZ%D+Jת<<<)x1B%J $P̈́a 1%yQxܞpLj!'#>9.o+8{sP-@1I񈣐9s!r{QHGđ#Jչ$z48 <)'GjZAc;<ѽT-B+$zxZȁ'|JAb 0ԚO㾵mg?d/WTن,>3l^r{1ɨS yy"X:|"㟜VllOD1 䩐 $^rxq 3V, X-, ̓JݰN箤ySIl!'tOb͇:%uJHb Ms%2!SIXx74u(%!"ҭ J:IP Aus6Z(T;CP4eCpϘ>=(RK5b\XFuQPI? +P;#!eF H1A G@+g/Qs(Vȉ5bܞp=LWQޣb9ҰLk&xx圓S,ƻ?/SJLY٫DB.8 2%<(˔µLY p 5hF!ЌBk1$^ H(Q$V!@2 dLB(Q$2 fdԌBP3 f(Q4׌ @3 ) 1&b @xhF!0BP86 F!(TA0 A( }<8ƎG8F![C$ȽM)Ƚ'ȽGo/z!<}ƅ90`R7=T#B#{uDF!0B`0 } a( 7`V[za(Q F!0B`H~1b p- [ABFųtB-&[hM-&[Ln1թz2- [n}lE!pIٳ0ݢŎVdB A Js0ܢwJN,I0po-`^E%xR{' +ܳAIE%W,BJ*[g-I[nQܢ$t,nq|rE!p\B`@/&_(Q8N?1 q(Q8F훐Qc:Ʊ 1^Փ1 c8611 q(-8F!^߈K(F̐/U x:dJEV!E%`TJT}QVa`~TUJJJ)qīBxRR@% b/&_lޡ&bmS/ _(B*6 E I~.~q8 / lSF7?׆#-k$I0Ǔ'x&$㙄u\I(_P Q΅|MBž&~98k$ԯIBx_7x_7x_7$ԯAB&~MB5 kԯ _PI6 l* t\"8X!t l:$t$`&M6 ,`&EsIha;絅MB $,lZ$IharmZ$CZ$Iha&MB  +V,X$B`a q +-ZXtV Y&MB 6 -lZ$iai ®<]x&ȃ-0 `a[ {3 6 -lZ aa6|రha {6  륅ۤ $,!鴰Iha O!yZz Z&h~Omħ7آz} h $7Ё3aMrў{>1Q_g9N~'Wߢ_4ݕQ6"?.|FyͻF[TO9@O_c}An=1Ė YھZ1ius7ڍzPN4Sߏzi7hwB}Y^-W}}I:<HwYcՈ-5^@_3~Ш7r(t]^y2GuKH9.y4:u2RSRl$*(7V*[`}`Mck*.ߛ2as$VİA*7G5A.*[tS:&aG&B)"kS\fm*1ʬMŚc{+*[~z#kSR69%R6+R6\;S6!#e OLXHk,f)qL4,:)[ TõelF) P@]U eӐm̔Mt)鑲sѕ%S6玂))=)@9Fumw' Pgx:k<^"es6HٌO^ה1l n#es޾nclbbul΃)fx(L ejv,Sl )pR2S6,!ͅk،욝%Ec$l.^EeY$l-șxn&l.^D&%FpEްG& Lp1L r a3~w?>"as΄]2u[RFlngֈ) pPFl5#6wǎˈM-"683bÉIxuhEM4 eĦG5.2\sfcp8qzLsGfؾ#E8p]ᚣ08pQ1>5b`M{֜ѡɚ%ZStԨMVDk@5kJ@h fbh zfBK\vhM>h_g@)hMkph }Ž"ڧ8!ft\iffÄykh#WÖa\MyjZjN;•^6OU[vO{:@j69#Wsaaik~pՐ{:\gfCl1s5;A)$|A#`g2E&VT}7j.dkj L?٠rujZj`svO>YvOA{vO=kvO/A=='Gty=љqKd~[Aq$jH75T@Sm$jv@:k3} kPX]*AC̾@ v횎|JtM ciʆ%41b]˛c5qE~DtMrvM<6q$ SSwT&#Scg vMJvM?1~]ӱk:Tzfjlʵ6M|M1^,etNoɣ!_X OKtt;;$q:K678q5(O6` owv}DhV7|fhޟZX-YFE?@6mzPi~, 6-?Hm6݊7T߹=m:jMlm^M?Jڦ~%mL8B5ML,0HM"R$Djl5R!&M7nh~p2R!Fj, Hء*8WDs[߀7=G^镻hN?wdjW5%>jF}jkPW:]u͍{3GƆ}0Pc l^xDG9FZm j4f1әJJƮl~q~446 olwNfttAtQt-'hnDqا~0fïqze :7,ss`*wza3kfuwfQvN; ;}gT#zcXWzWn蝎~H#Ef ijX"T#m7VN'h1#ӱ-uz$0PM,<}t잾sas[X<t78 mPb'ao {DʼnV>GF2InCn=}Cwz=!;nNy;twzYK;=Nniuz+ZƍQMu-Ix Zp:dt)Zu~l>*#V9@:gi~A.ZK`K11^&rآszџ'm(~J?v<4}~/2/Bck?˂^_fGmeil,qeKc\bh=϶EƯ~c_Vds!7 p9~Q{~5l|_}|\:}T>QӦm7i:*m6ʏlqK v;.o_0_g9Wf+8Fw3c#e/w]?4>u7/Y[mwT|yښqx1W:kLwXϻvb\ 2~U\ktx>0Y$z@cdzHYs1Ez޲%Y?T>NE.f=MIcƫ_<^_ZJu?6(ײ#w#Ʌi>Ō½<DIR6O I $|Hy|쟶qm~-\Ϝ$umulq<qaknB{t$~CWxzYj <$y,v4J1ڛƍMՆ2n}_ogV6DN{VGQ^TebP;JMmz"KHؐkS$9~<户Cqa>w.='ԋTHS/BnO$c_X߻ rz>F!1Allu%c7 hD$g_Noh|F! 8Oϸ-+IG85K=Vr[V',!-\'7R6č')薕zEȉfbI.4K7b̴ tJNr[Va$=ea9nY$Gt"-y{!Wt"=<-dnY$Bjt"9-z`!#|IF>!3!(TB3!GP%j{"^b ~i(js@c'J`YB *T*&.hDNO8 ]96 #|? 6s!ʔ `J2%m (SBP\>D!G Ks{"^,P}۔ !sхxl[HfB !NYXG:6c|JL)=$#u%'xR<$s^(SJL AR0 K 2d)L):%<$#+21G!^xRJjUkzFHJxRr#E$J)!GxJR|BT))sl}S2cKbJGƞ祳V))<'EG[$J*|L5H4نˉ*%^͇i $Jj*!UZVnZ%x=*!UBPV%ZU$_5>!*!Uyj}IX^τ 3z&ċ>2,gj!x < #-QW=-K.,y!VʹnܞI3w>wbY!e\st~Jf M` gax,!ͳ0BN= #{^&ݮRQi!'f܏pXifB7Q=0 0B.r{/תR< 3?NT{BxZ>!>h`_G!t%bc{BGI =ċBv *3PT 9!\눎W_j%JV<V0Ќ>Uÿ=o><4ŠZ)irM׬īyhFȜ [Cc {;S3BX%i(VJO)+3J!^X)bdΠ]H،/V6x~͋+V^타˗iұhJkâ)I,AHs $e) e)GFR Y4 ei$I(KP&,M@Y4 ei$$)KP&,MBY4 ei !KP&,8Dž,MY4 eiI(KP&,MBY4H$I(KP&,MBY4e)HY4 ei$I KZ Aj%Jd$!K7.4 ei$ K LR Y4 eiR,4Ti IN’&$M"4ר| A~4(MB=JztУIm7zѣf*j8G@&ևԣ;.S =zȈMB=z4IFUZc w4B ؆>( Bт  !B$DJR!BIC3'OZ+PxR!BV+ <ܓ 'MBO*T<xR!BI&'OZO*T<x$Bܓ pO*T<xR!A“ 'O*mx:zR!IICR&%MI`}W-aH@&M=edRPKQ!PIވ"MBG4 -ijҘOn6> |yn><}VbvoՍcM-}qLҼO(MB%*JT(Q!PB\ p%zx4 D&MZз?m$B`C {\,u$켛BT QB D@6o> th0 jC>:TtP!СBCP *:Ttס СBCوsgi?_sO% B`*$nHh*U VAW'!J8' y=I:z`^ISO!B`@P^X+(P!PB@@ q Q R T($TB@@ * 4 (P!PB\PP@ *J(PT.@X qjw/Veۡ@xR 2SPՇS*bċ/VJX)q* TRl(P%^x*E,B*q* T+%PiP@@ =w*P!PB@@ J*ИD_wu\s;kNmA9DʇŁsKI_M>pVs9<eiN+9|҃ԇw@!wwr'<e!/'֥@Xps1Ăٔ7U._p˹#^/tW W 2'4/~߼PWު˛6[sGqpy'[l7oWow dwycB o68C5WUx_Ɩ>570u WVier7kPXt6(;찑0WX"y.Z@.1-?a胟0t?ċf_Թ V{64{vo6rz/Oi+Hf&| i[E[LL#Usr<%nixlsyψؒ'x:eu3g156cufDH \{`A "j@'J%e}e@ hfb$qX 4&EbmT\e8蹣@ vhlv.vXi3L12aha90McЀa a;li=ivj-p#^M@֔Onܡeg4; DgY rtNYbHG: ԧ=0^>v[w8EM)_l*5 ߖdg ;hvؽfc68 Dh,pW}48|#Oo(n/i}K lkc32l,=@Dž|6عu@(BvH /LD. DxkhgfaHDmXMUj Fcؙ6Xk= o`@ZXbui,ыGc_k] `u+%}W}N9WbWTG8غ`+„|{F_;@7 \KfuKZϦ|aS gS(8T7C48pMJfS#( \7#V3v cf=Zza𽿢٢h zʡGߊp$vQ'c7/)5]cFk5=>RDU>~#|ٳhg D8jJ;*fe媤֩Jͦj@ RRO'R&\%hPI5(S3 FJYzkTUJ PJ &K ? r orRc (5!$(5t+Sj`s'QS,4PNJy"i NN8ʉ*]N)d):M%-U:WBd0S4NiO(ʉ| ൻ_S^ /]N z }N!٦0^͝$ hG'sɿW6 DҕMT.(dU&2 .]4*֒B;O㼭P40hCd2wDYlF,dd+|Bц 2J![\U^4 TVfҬ. ~$.)KCDIBWaï Kq#Ϟ"&`.9pR\M)9笃$mϔPͥVgcVow%6ftwOz3853I6}odz 1^R1vxI5ϗTvĮ?ҽ`о%H2$zmWq@Ԇр1LV2b : IWLGFB 2ZH6p\ `AU?X`rg?v w)1ev!PAt;5~@._fX}nvӛn7$&%,ib(3=be+WpT&.b_#Ma^NI c6׎hG^5CtZ>/Ms1 !m/hq5݈YxR(,#; jAf^m]$&`mO~K=P{X UAn\emp.a[cWVG~ ) ΎTu&p̘I2d)?mO^i4қT axbeD `TUM/M_=$wz]b&^?AX7LٹMib$m]9e-euo{x@7\hAX ,3oR}lƱ2`V=GJۺ9N,m,m+714ъtYLӫOMY*Mt7cj,VV;TfDюF7IT, A BΧ2tNHX/MH嶼7!|gG}~s~+_F&(Rf3 7*#eP`s&!Hg`buU /*cL ?<U`vLaU=Xc VaaMi'S.MD QT eۛ okԼ"Pzqt[fc̱[.8b) WiY}yQ"YؑˈgK4+VedxȬV\0tY^|-" c]86،uV?sYdG{O*[Ãhip`Н_5/̡JI$tInQD=~WzO XG3(镑D rw4ˋFg#'[Dщfzx9 =H^X^92wH 2;?إzqXkA7O x5[OvbLNc[ B@o_e? pu)yxv+Vx}-cHoa[zܿ7)=)>^e> stream xZKs3TeqRJÒ APy`g䒲]>k83 q!zD,'Ods[| 2HEIR.] t_w';yo?K'b%uoMi˕BvGlz!bYSiѸ\YؤGIhz4;ҹC!fSGS ӔLF$Kv4_Ug$6F+/A*fB72tVS{4dB`G&/:V^+J FyT)H̲H␢ %7(:1#ј_lJtc^ֹ^5 Rṛ^J9J%0zր&!Z܉)Z_-koX)dBKt˩YUagJ&yPa)^ c})svT2_uI@, 3"X!0jM~94댗,drnG %QMj+Lj824xU| ^J&T˛ _ھ%#Uz_%fHVOKMѩzet~@@S@L6+d5,mJ>j+e{+,]/WWBKpН/W*OiTxic;FmwN:k]-,Uw 2D 'tW0.wyTG uB_տ_%g>XQFE{,WZއg8[*vd F̸I.,ExjxF;})ٝ7nChC&8)ĈdJ7Έ]b:?/]0e!Z0<^D;f_y%$dz{s# ^> ' mʹ][6Ơ$r0B</WF> >cjha3 N9)6gi򸏡U*_ՋԺ<%Uo =,sыO'?d z=hܡ;Bb:L {h>e]v8k hg&t,NldLJK'Qv7 ;8^l*,ǚ[*vT}VeDF8DtR& P{ md\rsN 60# tҶ! )+Ҹ}O^HP=^gB+=e=LCZN?(xy^E4)uxvfڤa@@Vf kr~%svdbˤ)u $yPE z`]=4%MBPAO*H3͚NMaS:i ėvI)ӽ&Iٶ]˽!plJdET]$eB#k0Tc iW v7YwFzg{(m[3fh /-Dz  }5+TR--B̀gp\5&!,@󩧘HuRx8n<*-c!qjuqٴÊA#4 hQwؚܡy^hi>PC}3D:# &ڠwjh UO=KG-QܳkܰkYhKf-X|o'R&Srܰd}=DFp|4yaLduz„hp 3t-7dmd9>E^d3}k $i.qװQyT^m-6G& 0Bw ȍ2% FzI:zP\~Cc?"КRAU5dcAfnV L2&N$m[ '2i˽ƒ+|Z_G.q~P{U.@b>(ߔ9yOY{9L fn"ꩳI>$oTDm"'ҨAK?\xB[O5MJ}iEkB Iʌe"4m<̾f/7t. 3 H!N.}DD5Axsg'=G3 KoXo+co$?}&UW@}r;s?aDH}a$[$\$끦z;6ܷSg_U.lTNn6ߡGgK [)`uԈv?]c8oY_%LɼfM? x^Ϲ߽{&+>.A>k_oY$N%hK,.t5.ֿ_"7hhx~͵˕}ʦ=W'rߓOɘendstream endobj 233 0 obj << /Filter /FlateDecode /Length 6014 >> stream x]َdq}o# W`< ~虞pIs"Y]3tAkĉKf33ow/)Min}O;oҔB}zٽǷVzs+4κ)T_w]93Pvٕ)ڲ{uw_/~g9цuaB6z%=ǸS1%[kvSA::H>e|b{}&֑ũ'4G^'.jww6;]ߤ\*,zf\)K.ON4<ƕ0Q& UݽysE3o11zv{ֲ3UhθɅK>b[QV)wv{؄7@Y&ËT.2{v{5L&8KO)?> ʟ|f34G_/3P"|iu@ln)@8(=kΧf ؿ.,h^rv8YZ4eF d ňl@+>vs%pb!A1re tʞ;苫Smf3y˯0ȄƜgV -4YIdy0q~{. i. 3談QE4éPF Gf(%u| T]>$:*d|D9k$ MTu aO ~cѥ@p@+JXŪסY%ܒhJ5TҒ3-Yh4EC;Q:R4/%(4gÖgQ- ' l#5 ǮH~RAUX[4 4G-fW9Fk[5KR$4-єTr1`R|b{sP1<+0="EN%)F@4uPY; T CYATR!bب8lUDO/Y1]5RkD;Aaȁ|9ѡ ~A? C% NQ-: D!7|Xɵ$4(i蟺2B<4)3no<0>WXJ|2" c$+.:F-"4(sѿ7fR ]&hp"(Ո]#jH$Hsq 6 q|L)oPhZ?+pHmfc4hi Kir*L# Nj|"T= *m4W$evd_2iXnn7N#p>D;H2V~3XU`w9bcNTÏ3UhZ([(hU@]Zr/J$P*<+SW ng%8C8RRAIIګn0^|(䖚^33 iqt-NL0/ďʵ6N JUxbNjy%ȁ `@fBCUxij UErBi,Oa6i/M5X9U,ˮɛ5/+~S 7PBt:OuӰ*D<(ĹE p+ytJҼe:#TB8%^/ё*QoTD[0.n2fyV.kUxsՈ1Σ6D5 g*H"Kkk1Dp܎(zo4/k%oh0(pְ%$V"S_g vjvS~e4<@@Fs_@j`ny-2S3pJ :M `P؍udA,eԊ?PZ!īK <}!W1D2ܧ%<(1;R=?̭,hdBVGR)5̔F9v435|hvȤbyF8UHAhFbHE1ܗ8\#RuAR%R`4/MUʐF)Z9-&Fԓ`q|1)Hi8l1EO)O+p( O52K+V%< HY/aS8I -1V5|xxN$ ψ0qh|LkDtR5 U$Ywbx=OUqǍ•dyKpl["U50n4Ӏr-`Egt]`!Y;YB>BkҸCN&^;vR<;Jsx9 *Fzeak)?N2#뵴$^7}xbʹ@n?Upmm篷ǯ>mo?nGxB‡N[ש?/<˖xSN۪c2F._߮C~'禵JI.t,Uݨ&]ZaK'{Qj/`?7 sK^>ĊȂq^&r+ `)>l(bvpԐ_,6Ky_O?`bt5o fI<b3\,@ZzC`_4f,[ﱙN dAeQ~ %ChPӕj)ڙghMX,RX[65@y2D7&!~x}԰z!~Y=wAjcAﶝiMF_{ߚieɟQaXtr|@dWۥ/XAz7MB:uBilbPvUosco/tp|cel^a7@ې@_9-K`%#mm` !50ڷ&D @o7μ޴yGde@, 8dmƩP;,lr2`m>ʴ|1=ṭq p` _=  d~L)}R|N9Z  -ڻpfQZ4S3xJ߶6B]p>g=RPL +vӾ6vhҷ~@'}4- Ca@l̂nQ?㜏7O`56QmZ{hn HIi;ߢ%k/{^E}Z\&]3!յ%#=o2L;_Р>tWW™9Y;tz7;}!8Q m9w<0Qޞo"5WY4cC?P2qJ'}ol0IS{s', JY]Тc@:Yա|&[/I=p=l2[(a N3M$?JH}V]9PQ^PCw |'O\44q667wm0615E2t:򾵑l#r~yŏ6[3] 14!7mtS0x#=^HLϱFDvwFmѢ[)/u3  ӯv 1B;'Yl$ u t65Jrh@XU#ɧY9~!ᆬS5r\ eY^jŋlzecOOVTw^691zv_Eڐ6N/8%oPq}<3na@ͭ⨣c[Pѱ|IzvTOtJ2ef<w.T%cp/to^фu B ̒DąXG0}L>ܴ@ A$eYi~{|?lO=~> stream x[;sdG6(nx"X۪r6jeKݕJ r9K\9Z31p>X|ӫ󅱖58,srP)져eVp)`E\d__ a!!* Oi1H np 9^,ƫϏ_,9\|jU ifA a|euq23`t*6 o@/ -U礵L*`ǰVeLqiPB;kg^~.,>H8x݃wo?zG/N__`˫<>_\ 7_O___g/;ß~~?,y8|B:fB=Ţ(狉mΦNތ3./|3Vzϔ rxBPyIb S$ )Y%UJ (K: $mB@J?wImEC[ p8a) Q QpbQX!ԂR(e&iw9d?z)q!=L:E۬M]dě"AN'3(Uҡ$|ORPtgVʒG%M)龏ca܇+}E= CUxM{<৾vtέEY#h,UJm֦in(RlA#CI0Tt(e!H+ʒ ( -c "IÃEGqo!Y3ɜ[q_ 'XJHpjXY<ڔ!&an:@avAI"K$A $  5!*$!I iJqf@0sTJ]1s;2x WD iR%ʬM]GĘ&!#'gP^1뫤CIS8ʒ'@Qauk ʒt_t|"'ǥ!5Pǐ-]ʃhQ;@`P䥠pSYjC^h6%mҝh|*h @f#ŤFH(AMesBPH((k@@Drm5ӒpF^ {@g ':{Sr/X DTR[$ʉзEңNZC%I/PTt+A_@?V`)7CiVOEޤ]Jw_ܥ]S[)KwX aRPu1AƸH:QPt/ <%*@PDhĵ[vזF|?z|uF|/V04ЍEέޔ%&-(nA~̀-;" ;W i-/\v`76jUE!OȮoUGUn=wzYBgmJE9.Bt T:SJit('uqAEeI3O"QAtPIҡE"mriUk;aA&?{1mrr2qp1 Uvm!qE@׊8M8iw1A)[\:>J1!r.r(| 傑N! y.*)lM}Y-WN*p 5N@2g1uJDžPwBgsebTَ/b|\hA b.n @;[H1!|MD' 4%Ϻ_y>E@}Ѡ|Pv9<*O2y2/:>iK[8xY6 )T45x]Y+ȴ֯zz;(~B1}n 1jkKmk/3=_~ncvš3(:gRb) J͢Çr!ӉZQ 67`VF3  -HtMR]%)a]y$dj Rg zy%1AZL4ugmsДC˼X-XkQ8<.ܵb#۳V\pR=~pX) K-Mk-e:X!G蒌ޏ䯡~d^2@ʹ".ږ8O%zeAW?WU>*wqwU[Pڗͮ"ɛ%"U MuVGhKZ\6^o;lʣ66J?Y,cƿ~Re|S]KHN gT]W+@INMp;"T7k&PЕ[b(YjjnIX>\ 1!=Zula__GUZ(L iF6]<5ִLS 0Ǐf8k%ڰc~ҋY%NxݸgBg Ax~z3WrR#0O)endstream endobj 235 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 84 /Predictor 15 >> /Filter /FlateDecode /Height 84 /Subtype /Image /Width 84 /Length 3361 >> stream x\ y'((D"hTJ4*DP)h$QH("H4*?w;ws{ߕSwߝ{s>s_~]__h۵,//徭N=$۞d4 êqLmij[nݲe֩q'YZۦ멵Im59akg۶mK-:h5OHd?$(1DtLc?E0CQ&L8ӱB;ܶYhig1x18]ӦPbG=/N|"Ca/'O_|am E𫝁vO ┰hy|<h9scΙd6Ȟ#$9ہaOB&1C&O%#|;J=38aqʘ]'! r?yg'é1C gtBn'Ʌ] $!>|`]5?<} C2rwޅ ȞlX$<% vW޾}Um$QᔙBNMrɧJ?@o޼)0/;L;C9ͺ既ygpA E'ׯ9Qx xysv(Jt3ՔĿ@a;tA$ X긶s 7z * ߿?-ask^L;T7"19&rwMX4be?Jn{޿se\qw|-G$-`z3>L{#v-p/s)7˴R#_{S͛7A ZwƂw0L\[E1]~6k8us=G]HV <9|k׮]*`|-n4K'Y?vn漆V5< xGիiܩ-xIgWZ8LSw9g p\-$ 'W85{; .]4Lvl||Ӟ?UGJ9J/\0^(Q >w>08v~sU0XPAC9X^˜'.px9Uny&gV) qOxS?\ Z\Iɟ6~49- ճ%9z/jMr|'OÀ߽ .oWk`׹b!kC/H+W8qB3cQe0Y3?i? ۳*+9 S@ye&=z͓ U0r%y4|/{3?gH;tÇ{)K:rMg pAe ǟfV|$?6[DUWkρ |ݭU[mM ,<#/@ڱ՛,e ݻWԒ/. n,9l"0WY 8 ~:ݻwvLżh~$Wx\';y9;w,/.6o ^gڛL'#1 &Gر9aTg=U4&3|m߾r.Z,΁,5C1s\5V}/f92*.2r௭-D gx\3c|<=V=|d[IsI_AΡ^YUSTs s9]v9r^p(payޢI^]W=a;g\_E+d5ϴ\sBGL֭h'D!~)iBs({ Wҟ\⇅%3_G)w,y-;9伕;lmes.x}A:F5)_}dU3h\g9^sBS#Gƶ'C y^o,G;Ns\;ve:<'j)ZsI>sɺ|gڅŬ.{r {Ҽ[y_:uI]'~g>0 ?v0#|g^a4/^t]}-3S}Aacezw䛂yrF=}Utav]|:̏9/L1l \Acޜ;sF4bީ^<Օ76}˹fQq=ۅjp{&{o߾n;?{[y>-E6W۔+Ed=\ʳtnCUd{Eӷ\s]cÇ=)kP-CB~Y]"̻GdaϘ)E >W s^nmf ˙3/^X?QϏʳԫc3bHo h26T~h57f;ڄB͐6dpΓy^ 7 0_^[;-es7;ss.hݢg؛SF0im&' o߾qKˠv/|`=ss~&߿KK+'َ& E{jӟcp[F㟃Z%)Ԗ BgjE-Άyރ3Ä7ϟ?=yk?+g{01/ơ|M_y!Wb~ 0s [y1׿$yO%Yeø= "k*sFayo]Z9\C$D BC-gl7ߎU.'|N嶢>>!vyendstream endobj 236 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 84 /Predictor 15 >> /Filter /FlateDecode /Height 84 /Subtype /Image /Width 84 /Length 3742 >> stream x=ݽKkkV,DH1U* #*v$UHcB,4B`Bݏ잝3?0w|[;<<<:::wN:^p?0Zk.띬Vry ''' HaTN\?ƚZJb`Nblv_r%a]E*TA`WqWݲ_dM $ ́%3e>-d-73B;^1/m/01 =7x#LVDs}J[|3 y_*Bh57|S8QR` yb7x#Ykhcy뭷ȳb]HJJu?24b[؋'DG_~mR'` $N,6,B xFG7\^ d;CAAՑ0k)8OuNY >e> F~yP s>oF{,ٴQE`n汐uEHeUիl8#;ɽ 6^#4M1~G3*qʽIR '/ȋ|"q.'ׯ_9@2܌NEi'IƘ'OTm=t3B<4ܳ=^NaJ67O>EKy"4xz{ S* /' C;s~pϬP~F5mM䢽?ZJd^-ɭ!*Sώ5 V4W,X$69U@oWLwkj |嗹DW3OTNanSru#zܗGI[W_}͘gdD8gݫې3ETPkuvuHrۭ#f u D;ʊ@';UЃa^!ϴ5\ ;9o0)f5@b,g+nơE?tX1[/ *lG-C 'ڕĶj WޔR_4go<\ΒNy.b+ga: oݺ% eF-%xҒ[[fNDNYܼyŸ|{xi>OaLoqM֭\_Mth >ZXZ7F {FAA|TFSܸqc UkgakڐD~КSe?Spyd~:Gb1p {$xyY.%sN]qܶklrסORg3\-o7lHbI,2bC3ize >'^Otp2뾬)Cr||Ϡ=K3&~ֹ?k0a_V2&xa>މU{LL*KSL,.3OW7^`t;j~_84x:<2Ϝ˺} |,f߷"d:/h'a>ތK]YG}DÖ+bmg~&4a~sҾƘgyLu^nQOM,)ݔ~quj!N˄0rř꛽^ey\۷oi1u![/^b9(u^(Y v0.{Zg?jIV9EgSOh4Ø].GVBYܽ{WUa@^3L~ƿlyeoWb e ,twRU+S 72cTk"b-?uC%&KȳQOvj6'),R~G6^a0agԬ_>vumb]ѭDvW~7Y\&ȟO{Ɯ ݊jGIy6@&D1O}ci/ M-ѣG bA|AU*sN#&j'P7-XNPY`cvcgs5QK[f%c x!lꂝ/6+f 6H|sXfN)\i H09{ϓ'OYN0kv~5Թ͘L {:\9jo[BsW] Md[ʊuV?~8)MRK iټg|\d-|4umZi"к z(Λߥʷ  YVh&<2"9[^5S c!٠lcM6z{ճٯQYX-`}b ޼>-hZ93΅͏m sl+YSa>3-pfKm}TAU_J{Y8G34ŷE ɖiZN5*M97[zӧObxHҚz_꼬u-1΍5[ r|.FF0WVmsB2cvnVy"Y/?gW9Cxnm<|P+86|4ur]ʌ|JOs3Yޥ[~װr3{|nL+vN3 g&~>>3e71531ߡ d~s~"6n&& Kr3 ?= } 2uc!dF ;"h1Ug"7|c#Y003F8w{rkдym2!6o'֭߫[uW8"inn8n(orf^w<'8711/{ΰ__d`3~dn&xc |ɚ%.1 NA 8O7oZ0)/:gE&0k=$!/꧿#ca Ȑ7Ti<gڵk:/l!v"ϻOo_F`b>GYĊl,=̰W;dK\^2SmMA2vB%r??YGMq~= ~4~'ώ9Y^ʕ+Co `? ɗGE$ĩ8\:_xfJM9{ŋ7L%<{L޼hƌBHz7ߪwo D&Gxu m[A>#i3MTDI}-endstream endobj 237 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 84 /Predictor 15 >> /Filter /FlateDecode /Height 84 /Subtype /Image /Width 84 /Length 3643 >> stream x\=]ޫ+ "hh(hbddfbbb` Q~swʚ>'ٙS]}X?~؈l6W{oE7eys'8ƭF2Z$~Gy4sԍ~ ks~xG>aLj1Ř_vr+[ׯL{{c fy{}F qd+@ϓ `>wy*E|'fcf.|/.FG1/#Z 0ķcBGhgEFCrG]lu?2l a1!Dr;@u #Bܞe->|`org;A&y*@␃| 6A cʼ\o3s. 5 Ϯt: ¾I ~972@ pI# pႂE7o߾<<'>[ 1 [`xe!ŀۙv:`3>i_߾}{n/)5acRGvEp;KHqln/ [hd<:J7Q-` 6T€\vmn!Ƽs>Ch7{簏:N)\ti﷉B$/EdxbFDn;vK3+k.ˊȅv5GGŸSwW. OO3v5_rހ ׯ/V1V9yŜy;a˭[*Kj֐ picł$RzԹRוOܹs$ 86^ e^>k^ܖ9}{,we"OPԯoWu'b3˹-|R{Yr?K8y0#rKhfyN-bf!%=ȾHV<#?Ӕ1/ky+oMK@y?{ԓG>PnDc}`E^4?/^|^.㇛Ǐ$ ,fms,iΟ/οI۝ڋI ͡>6OX#P+8+b'Gf=/-0^Ei#˞›ybMȚiۜm'q_ǜ2.{ ?=jA˩쵩mǶacH= 9Y}<3Ϛdw;%3๼Z>icٛwp#}r&_~w}Jta^y5vᐄ3nb16U%A>'KXsۏ=cU,-j6CŹ sSqEzNj[)o9{T˹Ӿ59_is*˪o?Zh 'y|s|>|)3_@XGK}/>wcMvȑ#Xs!)ЎGᄌu.ޖ׶\L +ѣ9a?IX?wo}^g<)x?: D9vX_Ҟ0T@)%QquNڴ;i66cZգM5V*cL./'N}o@~n? /oUc^ ;++;ydܗDEacvlVˑ e7O8@R˩S?+P/I^~ȅ [Ϝ9[M9Q4pf$Ϟ=;u$ko۳CoSKz]Xvp,ܹsxs!~Þ?!™ϧ+N^ܑ.s d+ϟ׿Oe"BXS$> /Filter /FlateDecode /Height 84 /Subtype /Image /Width 84 /Length 3473 >> stream x\IoK眸pĉ ']@Xل؎Ǟzr:U3V{hn40oM䝦|żKzlĖw25gS}r71߿ |p7ڹ!Odv؊U?<]pAh춌y#proA>ϖaS-eȡȽ yx!gNm᪹;;Xּ/y_g>߫ Z#k.]2VL<l/!KK̯F˗ $Mvv70߽67gB\lvcΘ7Yy{c]qPrGwիW '@-[#法=8Bja(vqrhuH`x웡l9ro.?{.x~ Tǚ7X`پ^Gv}肒_D#v[v`~֛;z*L͛7&ol6̩^{lz\۷o1h 0#g)x|=W{̒#C/I߿g xC&]`f>ka*%%""wvvd;QӧG^vh9GowЛol>O}%i`>|A  ; 꼀gpWknbK ",y-`?}bk]MJ=?s^nEJ^|Lm+7 AkHx0&-P8H߾}!#Y94%Csh/I})u/?~4}/8ɛ82pە+s߁gzf(L;H7 MA^;ɹ g!xyGԎa7]^3KFjPg!~}V GAd"GsVs߇`!{>RuGKȧ4Wy⎊"B ,.ȹ C#l_GK^ SM߿gH%jv:v2QwR]"||*hȹCU 30@>ZhSI?}NNhǀ$wiB/owЕ ,0ӧOa˹]`sGyjJSt  4xx ~ww͜]Zm@^JhyG&!&:۸!ʸ x] G - ~VX3;a$."Rm#*ǫJ|𶶶ÀKKHmYl.HX/ׯQPx _*& gfd?=\ &1?'ۢOΕV&Zf4l<6 g({ɓ'Ͱȯvdxh 9FI3O>h|{VT*<̇3J͒y‡=>f=o]j'#laòj6DW M rЎ^缪sJ̗4/?Nu떏y2':**!~/˪RDP766%dN ? :7eg4sxfuEaStT]#cE 8zUۚ4KoEcP,֭ϯD73oD!z%KvKg{0A'iߏs&'!p9竄ݖ9#qCp Gr8Z)Eh2μk^<3/#7)38"GK.! (~]%n2~Ff旂~6<H۬>g4.6Μ9-QmcI9<9a~F-=nT1$p\r!togϞ=~8y^iyãCC"ӎml"J.?3s;/uL`/g ~3g 9̘3έċ3YFbIضc>5Qc8 xĐ/endstream endobj 239 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 84 /Predictor 15 >> /Filter /FlateDecode /Height 84 /Subtype /Image /Width 84 /Length 3344 >> stream x\G3e " BaBXFB6)cbĤdDD$Hgoꪫ^fٙzU^j8ǽq0wwwׯ8ڗ/_主+6ul6K)qX,bcs,GyA7gG\+-/ǣwX#M32r,,_ 6 |>֠U_,"6L)|FW6%Mk+dd\g5glLk6g8#aq8Nxipz<~qgy|N8xJd1 vp. x:uL>[/Hd<<4i|I?#ϴC2Nwl| Ѿy:1iXiyIX= G D#c;<"ʙ9|^ܒ Vzᨄ{W%D!x!?cSڍc{g ūr]fY]yC"C"5ˈע }I\JݲRckecA_a;C|y_mZ%5{ch^!%ud9aaJa̗ Jg^"|e~YXQ8{al6E8KCsɜs׼#߷wyXEuiݞ=jkq[Z|* ~񬀏U[nEN~O~r:ċ'T>KOl|`7oxK8"\y|ƿ@PoLגr /Q ўtg9;l S]W^~@_EG>Y pAa™yS2;yfΆ3lwp/_.UtwX\C_pSs:~I}'>]oG^%]u@.uXM??x;tA |.הTك$ȷj}ϟ;xoMAy|Yҟ)e+;:|.c>~XC%mrK'? :2fw{幃[(kzofcpj̷wvVҭ[z0Aδfd~Wt )|<$ľ~uȺ4WoHW^K@R@Ø>͖:/6 bHCd5~9m3&{h &?tÖ_~z1@ ϫrPٮh:Ki i#2e?wQ(_Z#ϭ}Ű2h/bS3{..\M[Z 1xg~(x1va_lXjDmxau.m|gco!7% Q?=/ys78͛7Lþ]qUȼT{a/^ 5ж.,vk3_v݊2 / ̎Gto bٳgRyy?3:w$"Z{&3 ^hg0XhXjHɩv_~^0a/ǭ-yǥΕ/Bv~Lx!rʽPnҮɉm k9O01xGR䱘hH̏a4B$5k9O>,uНݡ0EX㗈[B5C=f5h} Oq(YxB r>NLN(^b  gZQXpMoBq^6޽{ 䂟C>:~N~X'jgvK?+Q6Iwa!xwveCV9gCN_͛!Զ f∙9zh *U<ۀ]0+7tVAx?TUGuyn\|8$XOJc_򝎚Sj8 x~5UIfwu)Ο??ށgQ 馺 >DՕ/k̵vĹs:ˢ!BʷE|5 gKbnpTSٳgC3Dm8+S].ѵJt̙pJ$4͜c%brAChdd|6lIl%_sW%i"5Hvkۦμ"w򻲖!6>wlyA2vcƷ¼Dؕ)q]DB|j*{ԩSXĹO[@Y o3_~-}wa3B(H{C?> /Filter /FlateDecode /Height 84 /Subtype /Image /Width 84 /Length 3360 >> stream x[t""` (_ox=fZio9ׯJ)]FfϟXFÇ6qCnű6?C1;wƟtoi埍 ϟ]q086;a@!{hӡqǤK+ /<# %6A/c瀇 B$Y+//7$y( :!;~>3’ݯ R}KgKz`Nݲ # x'rXyYJl6V~X|_|-_5:T YY}˗T*K5; 2A gT;#,>Wi|0|+c,47K2w;SϋaQwfM4j>ה|r#kjYse_s :[rP:>K.m|7yv$测΂DryE?J׍˯hކ9t}nϻFO?z4xr*ϨtfFŒvQwj{K*Y&:'[[m]&QmSmS)=+f&<5A)"wn> 7NTTqW&oGdNm 76s d_ƣVN1W.3`ݧbEeI &?ʇFE0əة;)]w\6;KÄ,n-[< 5;.1ݑ\3K5YZP}e\/=H%Fqt:JxΥᵁ5zSf]|ZT>ܸGX2 TNG_a6[g˄i,R)>|(y^!DBNX@, Ʌ Ci$Skqx46߯sFpw۳tC^n˳tٟn Lox$wügڭF?^j۷ʽtL#jkca`&o<#0ƍ x'dw~yCـ?`aq3իWkC%g;ؼKp>r ]< kV/z.,q.]9Uq5zu;b 2Hzw!O9jL-G?^sçf~^O^|Foۋ2HÛ/Ksu{͆lOmfGn>soE T/XΞ=~p M =U~mVR@V6Ϝ9̋aT\8Ǟ/tJ.𬹼$^/,ӵ sx9{lr12hrq [3^yWʩS:k<1RdnÚ`x.j3ԂeɓL9ɹOx:_*? yg'N&Ǻ6v4?WSrq$yzIYg1}ǎ geTAg/Nk׮4>5<om}wׇWI9z(ʹꔮy3R᥼ |}rH`޽"; VOGuni^y#Z9tT{3ZOu Wʹ||hQy> /Filter /FlateDecode /Height 84 /Subtype /Image /Width 84 /Length 3247 >> stream xKū{-  >wuϝ{[5wuνRZuòaeXUkV]b1,ڰόf-V`{vk?ahG5?߿هmk{V_5;ǏWj!<T{O9 ~hj{j!bWG߿9N*?,ЇWÊQ|#q mS޾hP훱pf䮼 ?Y4+S4xc3yV*#1Sg\9m#?v2{Oaį>3s/_B qI|]Mm|M{asZܚu~޵`asS6=G=O㬏ӧOj\Pw~+mxl=ٞL?R|so<|sr vįi1a;C> MC);)捜 >[}M~.hyw{M]ԼSy%Oh˰=yLvYj~)[mRR Tv*̓T yS=6gV˨1z)OcN5<ؖC5"/~ `b[W|bC]@qw5.RWhV{LK+9~q}10ٯst:nj:w0GS=/).5į%ۇSS[o(GvET|`2}u}ޣh$Wve=W\@;vv8Òॱm%M) 8 <.t[ʵ>:ѣrJSW^=_R{8 | J5x_=y7ooʆ;_5AHH޻lӧzO_H03eWR`#IC= ~8 M!'9oS'V~j%F͓ϟsaKܮahHKj4Ύe_D;$1nnw;c#0/_g3=Ti6 /%gOr}Duxld4L^zSᲉ*p)%xRI=붝gQ Z ~NGۗTTH6#:BBK'ځȝ_O]JS><[wiV HB|I${13yU"ܖyVxDhBC )7^peY*LFXC7ӌ UPU$9\?0CyC/5û8K˜|ۗz hZ)gbB􆓓8K;BkOy~Iz$;.Y.¨KcP %}>!fk4VE ;6ʎ-T^3:mҦ5N]^7|+jKrǫ1d{R'0H˱ݹsGe~"7cmvy_%P0۷oC*J~<\!l<%< ߺu[t<hWr0LK7oެx]Gٓ,k>3|/ylyWnܸQ1<2Ga+uAxu6?{&讋WTm63zvi>Vڵkz }B F KOQUj<1I^3^\QORSf6uʕ+=Qz;Tz Sr_\|ݮ'V&a ZIJV1XTʥKt5[8SQ턽>ڋ/hJ7IS }TfƼpLQmDL*|Kjo::u*uhC&BrWN$'O>]1pu p{xቼ"FǏBLI.M iy9|&aOlNcǎcPy]oGJNOEO%m5{G1=9r|eS:10{@|m$>|5N`q7'cṢcPȏ>E +kCAJ6ImO͟o}|_u$zN9㗩 `X1gHs K![Vwg_UsUmlcc@S>&މ>+\)$1ź&6֠ԅUIxIW~/[Qyo+1OzsQ=='+/@Tn7>C( 9%WJ4(8;ֆ}qE yd{7{8? MW(tx''Iһ^5ciBEb# "w.ᔧrb~<44Q7X+׹^;{grhDJBœ `7H|Ч&@ dT9{Zih/DŽg{h+ =1`꡶4;… )4?I@7_b1C~QՄGaq0endstream endobj 242 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 84 /Predictor 15 >> /Filter /FlateDecode /Height 84 /Subtype /Image /Width 84 /Length 3240 >> stream xIUM/A  $H,X `bf3pm_|8y"2XXYDDӮN߿OLOZ۶vٱ`q'7 ~p!擭۷gN?%!h۷oCH;5S c` h]C#gGxFZ &7/r_'&g+oG6(o'<ʛHJ.H_~ļ/1 3/x~/gAyB(9fPՀ C{?&tA勑sjFłK3 rns8p#W(ϟy\g|mۓ)QgNgSv]|B2B4Fz-R2s>;\zq 1*9޼yDsRJuM-Hׯ_YYZ|boy ۬^x R/MPfϟ j/.aO5'UC ~Źtޟ6-H[[[ /3-J{Wy%71vȬqNXzI+_ ,ϸ |Sv ˤ?~JϻT\^ Bx>#_4ɘqE)EEeyZU-s;<V_} $Ƿgnc! ի`x2vGw,h( soˢ]Е x)Ъ yaT<]xz>_r%C-&6, nO# ǡ^zDK/<s(I%x3_ގFo0*$O >|WsI>1> #84ZgLy$7ۇ.X%+oY5ϾcE|_:w7ؤG){,8"056yE~]gr ׀:EvWcXY~\" ! Up-O 7nats' $-΅:^G|clgHÞ9'NN8ϗg 0U><_i!W]5E[nl Q, Gp3U{ gv.u\2\G-7qzKhF^HNI(jG*-{Or`ϟ>}*cLDR*l =?J?*N9LPKy#V%&2P߻`ͥgi2f{F*?{2CK݌.Eä7I %K=z6ySo/}P?>_fE|IP|Mȥ=Yzԡ$ W<׿ɫiXPSڿJ^Uhd5QYЄҸMaYhfk;? LWgWf($&K+9 <ʯ G{^'OT7+JKV\+VR#Rt,NWh_ȥ%j)~[GgϞ5y d؅"|xa[,EJZ5[d<@^ҘSxEp"[?Bޗ뜅\эG{)usQE Wu^yn*z]uro9!C/p x9nՍVDGE^)K__I aGl)޲$.-fm G_Wo߮)rXzK|mf8n׀7{~ԺǤ¢6޴`l&Q_+v<)`xw~wi=y𬹿m.l#__._D˗/d˒FvJ[9-M.]Ol^",i<^p`Tw%wɒŋKKҔ*VOuN @.\RDs.EeqV"]vk6d Žs|o}~ Ν;Ts.9dDv%$KkGg}N[4䡅1/坋9P}n3g=>e|\]Piϼ6gө.'0 Hx<:g'3 GjŬyFO:UՄr ܎Y~ ; y:w'O5Rpd 7$kQoEJV ȧ'N݌ص}xs2?kxiGñKrN/oaǏq ACvjc_LT.Ztر|fl<:$*GaG#,~OBwK?=z49ln}9#4dp*+ff6xȑ#Xq&;kpDsǏm ;ig7 |?υ#>|د܀x!#mIkgY?%s{x&Q;e o0eendstream endobj 243 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 84 /Predictor 15 >> /Filter /FlateDecode /Height 84 /Subtype /Image /Width 84 /Length 3486 >> stream x9oM5ؼ;xٱdM,XEDDDDHDFDFDFFD>|g6o}L?uqGu)aA߲bϟo߿+?hdfvزel>/--lyyy޽+++]޲.m| vd[,_]Zw;N ޾of&9Uf[ff4f!ln2' 9>}fCskpЂ+>_d|&D x>6Lz {W(3YY.GɞUD|Ӗo ~ɽ m/_H!QW HL1LRGsļDg{ϡO~vF| Wg}sv>`"1/"qZ̈'V 2߻ELyzqC ds'GVq.168x/! xgO䐝5(}Έsc/_<.`rPHف=yZE,'G;N֕ۇ ߗgQ/7|g{_"+SyϨ8k:rwdK.ۙobO#_2_5rBm(ukLQS4 qˍ KP'wQCqarBr <{ײ>l9UT1D<| &]YxpR@.s2N·rʛi : |Nl Үf}FC [**^r+,x1~湐߿?r`qKET0$EΓvʟ>}: •٧FDFe QP#k,xކ?w' q^s j7x^8r- s{m5-·װ|~̲&鍀lg%`gZ'~b>< Ѣ_;rB>A>_t흫:~q[>yޗi. u)\saʕ|W/8xV/Ca^8Cc}E>OxVg7ol7Jn"4:>ɞI.Ft`VNoBQYy~#;w<9|F!{mC7}_ ^{%Ù?{$!<:O8eV|f3g2ƜL˛ m^ ʋq!LEyRzuð3?=>oڡTy9ۑxa?ncmll⹝yoц]>T'y8Y/`1kC.6Bօs؛CA}$s%K0Kk1~iz W)>yU+K3-I{}ʻ/3t2,~ПC f\~Vy%Ɗ!%;ŋ%Ȣ+ga{ym#ډ~zMU\?C};EJv>1Lʇ qx0faH?YKqg>5Z]*gx<<랍7!/졵2ͽ桹Վj>.bz+i/c|翄9M@ 1)x? 13y_xt˾E ~Ag>`x\VzZʘ'%P|gx~oW'x_*C]EyK|ɏ.C8K~^Brfiy8I\sGǡvH\#=/ʝX<|?MI.8bWƿ7v/Pak.(ܱ]-5s+= )/ƭk]7|E[Iv^P A!={6$n²μ/B(#|Qm،ouuUf ^v_= OXۅ{]a| Kl"wd1 彧tAOk8d; en/#> /Filter /FlateDecode /Height 84 /Subtype /Image /Width 84 /Length 3183 >> stream xoKħ׆YyX2!@!DDBBBDBDDDDBFDBDBDBd.oƽ[:ݻ|WsNﻮkVl~i446fٰyv~}~cv?yygoog1~;l0xkСCixizxf_29@XS4+EthO1b˗/yKϚ56?wU@,Xv<>ʏ?Ǚ/j6Q{g#ӧOK AI68.Ked ~TǏKəg xbyZGEk!$Ç+‹IRLC\ ([߿gR;h~o{  J>T޽{N{[?5O)CS6 ?O -y̡ע&rt0}zPU9\]Â*ύ/9{ٗjgt<Yo뤐>v!Wė_j/sSJ ?;Evg ɓ'}Խ~-ySyVXj/#T~Ǐ{:EWBIsn<4Ks~<=FoN.NGs4ԗlL;Nzagå^Ņ(yƦL}^|WdiH~|{@(iSy<3$\U9P-Pٗ+LC~W&.%v>\+h_ɠ~8{k^B}ܹswҩP*L+ 9]Z},b0~a%Ç,ڏY6yu2sKCW`!7!L3-]۝zu1K5V6?Y=捴8M y*AGQ1쌮ϕyfX*CÇn5inuqEG oMѕY'nܸѕBEp_s طE]D(pŰsׯKhyȿTyn"%SCƷnSȹvZIj!~*|Iylx|">PWʕ`\ <旪=>`%x/a%tRX<Æc!vIv)%$N|Qq 򱺧 ޙq?lr6H)e{hx򕜇~\ k)템766:Zɯ/!HVK<%xo9|Iq~'Od3|VxZ‹杭ֶaspN'NceJ[QvC[]k'vBǏ+^|[> αc03=\Nae<%.$o+WQ~ѣG_|  %.:PkES`R'oJStȑg{S8iɖsI{ ~w{owxPjFZg$[: _[=džuwTK6p[GgYei?,]o1v<{qPӡ)y|bE'B<)ftpl >cB͙os-7T\_+<ReũG,!y8X.qg{EyȄew*ӄ&9N~8ROatQUHyB{Z19n!?wx\~ ]MT|EcxAҞgf/ ;·R.^\w t;+ AOr~kkr޷\Md'| =7zl+\ytV/@4*ӧBJDy]5t4t-8MuTə3g[L/TOx^F(ϾSgP\;wN$ }QeY8ã yJy/Qm#䬹 !w󒻙 }E9O,+F˿uu~^L|735'aLϟgal?{k8GfQxCn_&|br~EUO_&Cm#9.wȆ ?l.ó쮼 2n\|_Ç'.rγ\K˦+O/nz*cuC1ca];/uޏPx%*endstream endobj 245 0 obj << /Filter /FlateDecode /Length 31829 >> stream xK.9r%"r[BCIu-$5 HU"%z1xN̮ C01ߌo1[9}V(?r.o߾۷K77~~^R8Zo?~MOHo9>H#/?Ƿ^^S:>t=<釷_zm1Vkno|~/u.A9Ϗ>m `WJe|)`Wʹ~)`W*c1W+y~CwߗP} Oח:W%߶s |_wCPk)Y+\RJmu<7% J3Gݔ,ؕzn#[)%u6$1?J9X9G5 5%[;qK\Fƿcs{m}YyO+#xMՕ *}51B_Y)e&ҟdf q>LZ#s1LIG;;V:{ z/t>?}_-?]:x=K7V_e.\jeŋۛe>lDZZ9<>=Ze{:aVyMeK>?da5G_z11=dm iu)ߋ\C&$KLJ^Ο1C࿗5#S *JrB:]V$sI~em%}1(Yvd}Sc;y4 `ũsLRT8 $ʼ:e=QX#}NU9|X\B; XGw_$6d:UL=ꬾ;tɅɹ9aGTMhLۯ۹R1!Y7ɞaNYSI<$`X12k8ۚ.c kC>1OV0|m.&Z:5&%_&%y#%x`͎J֛LJV{U?V;eϭGYCof)c܌2mY,픔ƥV$sHlIx(%kV1='~,/I]3|(F} %Wç˧`_VY=ŗtd.IP βY75'u֐T9--fx6ԩ$sx<5qd-^^,QgMɾ(ɰqTbvV6~thhkZeSքs1e ZWiqXR`xZQT%9cl-%kmxץb$N9XΚsE,v=kfn9Tpw;VWgՀ!iu e;(@gͬ7\\$vKר= bgKR%T]Kp;acbO|L8Krh&jx~uiIS[Y"_/X zW]oXl 5C?ZSEPYjYkb_#>k> >)4t˒x,IoeuJI=s^,zJp"/IX}8Y nEԗ5֭RZi*gX7H-j5۹TifȆLlˮ}WXݿ\a6VF¤&"/{3,)X˸d"B[--dN^@7$2&v:I}30xzzsgp>f][g7?rkьXpʄOT3|)XÃK/<`Sg%(;%g iu(a^,Ʉ-Dj\p^]WKZGPūkq^-Qg$M#ڲlg s5lֈp yh_cfc4;H}^@/}ER3g\/m)Ѳ3sM ƌ'%y\%󀭵KRf[M"AǙo՛zGw(ttbi݃Q9i.kaheu=4Mʌu"K"߁$nJ=}@,,5v$\ufF"#jS5 O}ŚםsW%j۷^QOoMB`p m }UP[1H_bȱ˔D1ZeK۰ շMchjU$:`P;}Q>@ CM uI7|G/ ;{7xd8@ey4lfxP+ˀNh,ټYs"_S Ae Z6~SIK%e?%?A'܊!4 SslVK.v{AxN7G/ֲ=>SoX9M=)<H"A [8{TƵv"PvlHM8YFF&6D$2+ܳN3w"YFUc-5X2*d`;FbF 5f[}?j6E+Tvɑir<_=,錠R K=bx@*In*ϣ4TC}:B4,f&֐}CK.~e\ߗ*$^xAwIގUFx@]G$:Z?G'&>7;gVM,t|xbXEv y:} ۪/Bv$Us;,$b}3m\3&|8TӽY"'K _?*:&6 smt5}n9g{97n%X8S%%t?s앖M`|6qƃmqY@$"=3}ї Kt:Ny#|fqʆQ:#74|xY>'uu}P0-AV+-^$`vĀD[s: '8K&țy,TD~V^ i{lGxJ?Qhkԁ$#*$k9hOq` ;6}%pˆFǑfá< dYm/!9aGHsdrױv(A!qqRHG,uy޽p֕nS1]Qy}uűat#2qZL0bL+N84r ;TtcΊf#غ#ީ]GőΎ3pD#T -)b HG+r:a_Iݎ^''p;N`M(AryTz!*n! ̦.?H:$Qb8ЊW@Da;PLX`_DsдR;BPb:!_ $>aE̠L7(Q߱ HynCBa"N[ٿSԖ/DKu5PMn'?;3Ѵ4þ %V*j33V$팦 r(A73,vj%UGW3*u;f=T̪)cujlIU+o0 uq6'8w%AWA.$Ϝ9 Ϋ`${Gؔ-3ᣤ\WP5ˮ3+-vg^o{s ꟑ^x=3s8Izlrq ze^"zF뙏 ֋_e^5iXQ8y_Ћ_G3*k>5# 8 /jGEJ2_Ujo2A8W*^PQAk j޻W@ * 5#BmP[_+ UUUUUUUU۫6ë6÷WmWmWmo j3j3j3j3j3j3|~{fxfUUAj3j3j3lUa7 ۖ ɫ6ë6ë6ë6$ j3j3j3j3HUUU۫6ë6$ o ߨ/~0h0C)Ж%~HcM[+Τm[m+  {UpޘT}z>@ ϣMjxF2*i6} UWGp!F϶^Tb@NSs[L HVb@Q'(Ā3!z(ĀTebW!d܌ qml`ұ(M׹Ca2s7: 1Lћ1܌M*1\r~)o&Y TTb8W%WQ4B HiLjB dF!d4Q(yn'i(ĀsbW%d㴚CQIPG)vׁ4 y;Q̀P6ReJrH*À200_ YZpQ&\e⏢ PU5\9&oTa@Nɾ0iU!l^U)UAep R4ἚЉ\Ua@%9 )LLnU'oe+'* p* ^9BUyUu oQ30 5`0 0 T!3N.€;EynGZ+"j0X`xQ5*b14& @QzD}6sD " D#\D:~^g`ܯpaQݼҙM." ȕ8IЮ00jI.À4eV|r$5U| !Q1(€iv9a`:ҿRC]`AEKa@JQO)|S4TG B~gWatQ1°_0 JZU%ut0 px]v35~FDXጄ`$L CJW Γt;qH^JHWQa .Q] ջj'>v\U 9].Ulޭg $s€t0U { ŽGCU0; ITjp=E2:$;% 3K)_zO5?s籧 ,`E9ÝH ;Y  q^5T?S(V1ģWi@jH LX!dN3I7 $M0##Z!+tb y3'В k1 fqDl-l8Ռ1`&2u9wl6æUB,țk\ᇑĎhR4ҘS)ҁӡD=y*vu/];y,R^##)^i`5qd\vK'o'UVr;G+NwQ!k@D:|0}pX( wSwHމMS@ؐDK!hD`$kITbą z e' 2NZ7Ps#AMC AG%tk![rǁ?@c k"I*NQ&. g@er= `jm`N;9;<c0%.%wfb1$ aaKx moO%y3%$O$ K%Bum=谬Fml]H4H ;sդ;hCz8 z|`@xnr XQ~e­P?a |!qs1U̘6),׌) eo:u/5AgMb*k͈}~QApHL*A+EGϛ>U1w1~AK^Фxz, <*AaJH6lvL+pWVl.+PZTWVN]5jH3]]wUBF%T-. +.s~h W`[ [fZ]JL ^mp8θ[}\7\S}ɲt__k+W5ǶQp0 S?9i'^na{ŧm՟?|xz?5ݩR9YDM}۷Xs:p%\JRys)݂IdΦ]IM)R#wS :RJJlJXyK+€6<y(Y+Hx\v%7m`WnJ!ؔXoJă߮dCIMɂMo:Bbױ`W+Y+Ԟ%x(s v%} KJ_ߩ;u/v% J`ݮc4iBPbBvWԑ9ؕ,4DZd<Ɨ EkW*ڷ_}i%x(1+YPOcq v%6o+zҔ;a#נ7L~$zӃU+oɮzySࡃ-ҽ,yhVVw-Kv~:[+$-&wC_imF;*A[[f8?}״g+ s{׿~߁=5o}__{s3f*_%D ,u> A} ,nm ]!x"V?ο!̮׾=?M;swpH9+ KV3O}Fr ^g_PdY0]kvuf#kF80ަ3 Dw F !h]ݩ, 3 _UA%t3OsG6%a? 7 wztv C~XœlɏR>n'5^{֑i4VLng]hA'Uu=OO9䓬g)ӁЫze]}҉;鬦%!˱W4ZgGd:xv{ bd;@A`*wYY 'K0tp Q>;tO}KЇrrK2K#9Jw̡YrwɪPj !AtJdx~%8k̸;IuAh4]}&1[p:9Kgw*G 3?e^>fNލ ^D)qӔi'tnw^p2I*UVbk Pu3%sJÿ= T3T̸A@Z7,f2F .>ٙȹwv" ^$],5c38MT !"*Bf/ '_|Qװґi2mNը[!|*xZdFN&폮@5_q*Y.tPAn*e\x >0T*f|LADW1g 5)X &Z'izV(#bw̺kATE靓`4`5|[+ߙT],RLyK}D\TTa`bcx)?]S@$>0bǴ>&SP\º\ 3TfPgALPF5iNaI-NdX14eY5ĬbLТ`x`:D~ddԚ18I_x(mF7$Ax7@F?42 D.C [4OcGt3MYe877]QWg^m ٷTe?]}b)UM+iot4v5 9|ZF]㦎, ?j3$rT2cDj$~=$uA3.$Eg(-A[գJLٻI3NO9ЖìhӐ3E ⚑c]rKC{cf<8cvW ?Ll͘r .HQbFT%(7xfݿt`誛zj(`vOK9^d=@j8,*C9#l5&Xs0 gWa"W*zWrNY^)`53f2 º7g:e4]W¢sFZѫn5RGWa(Yy-V:mq8a=M4ej2}c[Ճ7Q(7l== ,e!miN/()_ ǜ5R8X8o l{&›%5"eT9,+7˺&JBq p=X-52<q q[{.ƫfw"s.b0?$NȍfD-^~rylz/>WjnpJxVK2r% te izV+&}7wlebK-O zw+uLSdm{7tgO (puqa/JmV-?t_2ot횋:&Y900:Ky{2EtqnjK_Gآ[NIgM'J3U3~1y0MA"{2d@=G1/[ fq5婒 G{-zYpr5Uְ;pk`Œ#6[AV/U}$qa4-e5ɿ>ާC?IӼΪhWM捍wlI]`iW3Ps%hVN)InWC K87h)ܰ/e2$Z@ҧ7?* n8TW8ǐmFNk9U-}:T!]cv_F.yd4{Z_/XM8۟= HR#"^6kMUuE1R3&+}$[JU=zSt!8ytq!dCK'I`>$UN 6ebᘪ.yȫ荳ag-nlmX2S%̧<# dd8f5$.kκY<}ֵffImIG si!YR](N3E3}\j&#Q\>?2vś։pб$Y.E,):8rCZZQ gIu%K2|o'YS}:vŇ$kwy/GdzkR-A}*%nGiGߥpZqťs+$OV|ɨJ ׮;z!Vf\(7|pwyWxumrX!~t\ Cڕp#q̏oh[Kgݗ˵+|) A]tmW֣tu;ʑjL\BoՌRKK2|GL휹8:q9*۩e~5GcuW uʴ_Će5Md7SxlVH;^y=6xu ʩlRWVQ8#&DM]1JTFMp4PO%{;N2rXH<J ]o|JRdKICuH}}K®E(%{K)QycYKy+6T{'|JEI"殪Kbvߒ(ՃzR`q\INuM˼$J@J{VnR(m ]v%%j%7W3RҊQX_=$٩K_wP r7Gi$n%i< (IJw&r[\?ےkpN.1m @S{9;ɽ$\"=U祢g;fs1.A^#hM/Rl2 K%|,Ia&fIb -Mw醥Zr= l 5$.,+d{o6KYK%,gVS)&"qmGf~I+ $ %iaﴥrV% lv}-I$W>MpIIV{#eyAl+ǯ̏k]qFic_H=xDnK%b97?4._tW(CMwTl:QWx[ FU@`׮"[%[pT5]FH^-tmMVn w&9{WQz[4osls`O{U_]s)3%^:GpyQ8P}_GtYkɇ`#Jᇮ/9ͰY\_%CsZ<Ա(SC 1(.z0j:f4|l9flGa ~Z${ k#ű|%60@p _P_F6[%2[=AkJ 8( %ir4:⨠?BJ I$JrP2[砝,;#޵|ĸ8.i| oD퐝dTsz[_qU$QH"8ˀ$?f89e@&; >%`t9GP=)J"JWvۑѥߠ/'_YaJVXkݿ𳦳C Qs{ {_(s NpJ8^NdS`KD)0O!T)L8)`,$L kX88A3(Q.tk@u:A3jqIZ>M?AiJQ*5vFy$P;FY٣% K CReΰT=tz|OWVlCePvmRe{n0%NUox%)q*HICJc^c$>~v.(՗.u{!\@I}+Ѭ~pNkAcZtn=9́sVi}J(5Γ{O Jʰx$n;/,.TKr#!xXH#EY'G4cb KR#-) *Ul6uKٺv^( $M6xFr4:7ƁB!&n`&K"p(P4qIΑ߈0a!i#մ{3w^ ]nGL(s7;)'͚ax\,#0d(͠{Ss\/'i=@ҾJl{! `KItzE %Y\lrS1FSp-fH08)1j]Ǜsb|ü"9;(- {f;yu~Co(;6 BS/ ~@uJ޻r< =%mosÅ)p~JR ⟧c[Jn.Pq7YFn_gY.Y[̋Ufpl*< H-GQIafX'jffpKI~dCťa(1rhJZrzhdMQ󓋓:#ޛ?p1hD,`=@ovR`S4+&GySFbn7/N ڛ)Zܴ:⿈OW+W3Y +fRYNk MN~IzDR'&Q5zq`D_`͡N_厒d:(}k:qa(P4MPގ[vVO慚H7K(1Ot^B~ 7,PfPQ4A$%ܲoG s2ggZ萱3ԟQT)D73tt#_dh7ٽ!ojАTofI޴!ahv!wdwrA) 9au\CnêmVC,#ԐY .IEnq{&kZ U09Wcg7$}^6tlަnCFӻ!3z% WICf*֖pOLm5NtoHDچDTnH;=~C)Z Iѽ!iHk.S$b 17֪5$n,cC fz6Wv֬Kiw7#ސ(o(u!#v$ {$vH pO{{*7`C@ml亐YG/XmWng;8`H;6!O,w9'5˽([z;-dh4= ;B9vLsҼ],g {]EEsg>O^4|; 3wx8=Om;vMtǹֿ2ѝGTKDti c=#q&ItOÔsOc\tty8NM4`玣Q0sA( ;O93K{ ;œzvRs2tax ; 8;44w Y1^19~ЂqjyY4wL^`158;Gpܱ{i{XUs=s$9$;YrNጼlŏDjpSp<2`]%*65l$;ӧ#خy=<%YGA|s N!3mfdO(< ;MD"l'F%~Ą(8!5UGTW̐b՝$E# PE"xj/)GB'uj)} Hm]"=9E>gd쎥ĿqwGub9 Ǖ68w )r&)AxdX%qYC/Pm|wHWS>l;)aO;$tEwhd;̰c]0OAwxaݏ#Aw q v`݁`I\k CIx ɱݹcL$8D IxM?IPم%aaNw} 6yF p IûIA_Xp^ψm=/X 4va'dl>XXfe>XY^y ;懋.TScD{|`uޏ#]l:!ӈ􎳤}g>ylqqF5ѦCbrihJ;$fz~mJt#+IEYE)S`twL3}S1}kFtAfob&MuGesߝk;%t$?+ヱ~2>:dNv%|05ݰff9~\U'4Txܯ-5xX$o"04:]6. un`?YTrCq8y0E#(+3*fSu*,Kج8thQXzNt_5qkq6V[+zf vy0w3d`Tf4,ó?8ųF:x@ K|g/r7#4,Qo%hpjf9z̛Y\?vTx'*<3$.M?8Bt=T~3371(\-<7J7ct\&G-Mw- d ĭl"d[|7Ƿ?f}KyC7:&rI?W`\>ZC|[X֠s]E(_2[~'k&>?_<i)~3xx;s-X(o _wmuЇ$T5{s~B߶__qv T+|Gpv}c1'X.X/_7lt80) %~o >XieB*̍Ֆ7BJ!ؔXեmJ`WʊJ!ؕT|S C}UjT:nJ!ؕ&cWR6|Us+]/Jgt֯J$JJI[)-eSR+_ׯmJ!ؕ lJ!ؕ*7lJ!x( ѷ)Y+5y;RJ$JJ cS 4-B)!K+֦]y+`W_4SJyy JLfJ̀J%l6<Ӟ\/JE:=wJ,ٵ+YP{T[)e?T%ؕr}{.C]ɂ[ygn6'NwϿK+!]οSB+uQoJo>nScq6~f#4{OI6}Ìc8uFcKO5'3Kv-u C ;r7-KZ2iYkucyk/ڴo˭2NS|^o?}gz|Y,G=K\{nN9~ePy2}4Zd[9T3Tp3j?6 [b,Dsu\: 򫁹2{x ЍH_tL/t685gDYQ?H/-޲g_FDz .F/m.ZW,]:у#:@GEC]F1{I*3CPJ`RY6kQpHt]@- s/Eto`.i;`̦7VӃU/neNi #SY 0&e£ <-IB'5wVT,I;ٿ#ítĽH:H -p!wvך*YXDn$ǎhգd~ Cg}͂̎\4;ghkx; E_ɈDBn)]=UnMHD(-gP3q/ʒ0?ӓ ֽk) 9. ,I"u / 3zqKBY% *WȊgyI&/?!F?q;-LAbFAM@NzPoBQRx0$S9M)zp']7YKpb 91փ0Fѣb3%̿^(}\"0=HxvŞr{Az[-D=kR@HwQi˝gTj3y'K>*@ν0ɟXX=,1`]9شH;`^Y )ڳ#+ 35X=@FwNu{uX|WO2#/OȺ}tXć&nM Cƒ}xx;.lƋ[Id^8.EVNޫ50BR}y6O ^/uZA-+gV%4k.4%6LtNcCl];L6uw#h'"7^/It9H vx%,,^?]scَ"^˘1q4+rFw8 `2i):c('gtP@`\Y{]-,Eu[IsnV C 9y/P955J]T]ՊK<7 /hr"]xÕ9}6A!^9:[:y]R: k>ggS* )H| Ћ:-!QWA<8AI6LU( Q# 3i5^HB$peu*cdnw%p\zK2aAPUy:XnytD$WR`(}zcĻ $V<;Ýr asٝ9yd,݀8T`e_@Mcyٯt&ZͤIɗVn0~uگqosmh`KמrD9Yc DwT7:vZZ;cK\]$8|eAӑVAqN ^Y|R`yg23hj"D(sSuѼ^QtNCJكF)K!e┳kc[R1H#ZC+[1HSaU@xxS~ Ra0p&/\ydJs B_Άiv:*h{@@Ja<6_Xo09ex%J1:l>Z3`kVx*.0.muL5`#6&tTDU'Hd-z%C ۑ)ԦgiP\uXqg 8z#F:YǏw-\nA53VYr/訓c)N8$3h'X[d6ӒèuYpI+kA\0qϚ?b8vDg$۞u*VG]fu{aL?u\];gƒI;W:HBrڻ.MHJt\{ uZTgE}2tǒY7 ׫Xn%aE\ eɂhY 5HȰi57#1k 0D XYzHBoAw28$dAt#%!52e<[27S-SHK)qʙ4)hFQ FH(`GLҪd]yD7Y4% ]Kޖ@PCs$ & pC%nw)dk, }BCJ[?!%h })sԙu{bPX,Fdͳe% mO7ٚ" 7zوCZspឍD]蘱 \pYpVO=l42^= k\EظPQqWW;",HVXMH`47ê:B`97#ƺlT܌!K-ʟl0}ހeT67Ư}5L xvd46+N-o'ؐsrqnRaC[`q*$& d&oTmVˢxniygopڶFC46Z3|6_dIPa,65o>M4C]FDuXl^0>>Bq Cs \>~ֱ_Ӯ=.~ AN'kǰXn??/4 Wia?Hwn_|htvR7#$e-JaxE  瞺KFхUb}}t 擕.ć/QK>jŲ!. (.OuG0 ?@GJ|e{jJB m>KT+n~GxTﶄF':]~:otHgiAax\CF<* Np'sqP)8%J#)lt29# _#MkQf:b;nRH,^QyŬN ~B_ ]Xc f ChYU l ]GXkxu,J,=Vm@ 3vSņ !ƺtܭ+V!XgC/p %)+Nʚӆ 4K6+ܠp:S߂-4ҬHkKr@$lQx1ZxS2kS}߀ J>-dרJvs &adK\R Fc](LI* j=:E-KP5o2+FgsTD*J3VH_' Oį6rD#J H,}e{7Qbtjj?( dVDxWlIcz/~iˏ#aסӑU6ȌShzVEgckxc-a j#jB5?/p] :x*lIQE3I5,Pi՗.$2Օ:6&(A0Pņ =;86\RE5PyERd-w@erF=a!6e=e%[=H Rջ1ZQ%`,*1F Z>65ry_LXfHo`yI }˪,f/ 40oj;bx.z^M_, BxB^1pl〶sk۶6JpКÜtJX^kiE fuoXooC{X'SL9蜠3KP `:҃s_³ݜg&w͓i\aV @B9^ksV]Z -˲ Gi~Y,QZ5±Ymcedl0;QRCe~[Of7͊7ϺYr_ ¿ҦҐ0NC fbJzj#_66xCl/%yluߔLjclc8{`H >LTcFJƹܝca8,qN͙HϙљfL5f,0F%W vA}Wz u|ó 7{vtbp,'$D:g!Hqq YVS}Glq38vYg OzАXT$5MI/CO0/sgԌ >Y#iOa Y>#sO8}ZtS{(sc鸄swo Hq vKWp dU3qG՟y<ڛU `0Ƅsa#_iY!#Xv"l۴aSOް9\HC׳A>mw bY`aTNum&(戜mIyxv;yg#쫎'E5h¥xkO%02"Hko./$TE!ѻ8 s2a/jf<U+.8;&9Rg8Gގ)$Ey'8$PΠ+ S|މ~蜛EHn]; XȔS)bFe SX/q"5lq\Ceƌ"=,gNlVO nJz=б'd/l2ħ H`ψVo,Iqlp-C-l ـ^! 9J6@LH)J6IP0SHb 4JedlP14m g[/MWuU%6;c HqB5?-EQ@5W=TpY@5YA5ȫ]jb{B5nQҀP H(U)E)EbY!HX0ޘ n~L%p M]PLbn+N31h= DlhV͍3APB3Hf]u91AF7_AO33{=$ j=K*nm7%'H܌ m3/%A2=Dt%S'AK54)m6m\{"voM)-e˃4r[ʮTlZG1fYN;^k#siy)M5؉ttͮަbV,}PgD61H- #@J@oͱqh!_ygǗ B)[`X"4X9ՃEpK!>~uuӵO[VqHmΕJWZۦGM?]9:;9T"i~)>0!ۿ[*v7߶=daØ 8ՃT@4y!+c9t @tE JZ.9 X@ A*Mbd݃TA;>&8<{xPD 21w1*%nPݎ%. z }_ ?@& \s@Y-@q+ !J(`y ~-/` UR`_1(_c%]pMGHĎRA(b a*Z9 h>,]zYj2AqD܃T@ \_j@*p dPuAxuA[\O]p0;@ZP,80&5.v5EPFv,i;)D HIm~y@#]GvjWX髫k<4^mKS}s)-Ӈ[7zgCwS޿F^ǖvW”6O%m׾= tWP[!F{J6ṱ7t;6| ܶPw\yQ2K?eyxooPLZ4%DҦ[ qSY8w7DJ6}uּcȮPfCZdHuqi&RÎp:>3v5_F4iWWT^6(uC)irN2ݽ獆[i<ڦcRgR& 4h4g3 lE{ WZ Wfqx!BP,coxA᧕BHƜ2ɺ[Cqj❱3O>ئjnG)v;IQWR\!եL.*YwD"WY˜' úAe }8jFk@^N 4êӍꟴж^׍ƅW_//6()5R/g;Q./ֽu> k)P1>uܠlIb % ur0\nnP ybMˊ~[ipd^ m)u}i5&tR4B_;͌LpBlS6<Ƣ`u/: -%\~VCye $OOUH iM!Wډ2^iiD7WԒW,~zދ~fLs2W;uͤxϗ5uy͆.Ok-syvZkSiH׶*8 ~ ],5huksNi<n<~*?0oe<~i)4gOO퐾?K~jFe<ތOxcoN.=|JXd~zTwE/{nŋނ֗1yg=YD,bR/dWܯ=V3)v?2Ha\T=zmtñBSD"[6@e5Wp,qsW^ &F)*1o0 vtFi᠓EPJ(MNIB~E  eéJmI w|60=~br*ǗKO|!-[eB}\`;d)5RjH-h5W'Sԉhsl׽/I2Rx'G7.rVZ:˼;>ph܎} 9}/in/!ʋ47vCAK9]B1wKx<錄2lX4w3W"ݖB$]/\~Bi2 C'42S>^ھoA)> f7H}ms-~ou?dfj$/]g-h?pr]Ozd㍿_.7NOW, eRNK-X-8+p]_rCndN ew=??n\>t% {d1hk]~7&֓f0i7_i\g[On(OUny_ֹ?}> stream x[K#x%N~C*)dTeDRks.W.9g!f)JJ<}я ?[l/~Ek}% ^ko6ra5ۋo;dFH۱ e8zxJˆBuK/zü53&^vr%5 \)Rfv)9w;2b@34x쫋Ͽ=h{K:]I#_qE,׾}}WE`q`4hKd Z1Nyګ{u&=hrFX<$E-cwE-({ [p&5x (.ͼt ܺʍ>)͘j(U 0L0N;5T8ܧ G\1]L/{[Ʃ8Mk270Dﺲeg `\: z{ÄtmtHTڒSvqc{Wn&k{XKLCqiǴ12D{I[Yɍkc=8+giY[)`[rqT 01 8AeǠX/(7 ihvTa w˰_!8B^ӸIg='w`uARJ*|d#1 )ouh87;2da0|5< hJ „>DxHT >DĆmd^! Sf\GC@ X(R5o}tX쫝ox["8}29D́+j'dA'& $F 5PcaզWv`B {y@tnc-M“Ϥ 䂪.>PutcTM^cNG9`ﬧ2}_ f&_8诗ÙɂdfP<{ BbgVnəFyXߩ 5:K/1 ӦLKc{/S"ӣ"yGoO̖)CnKjrScRhe/1)k&|4+wWH1iQ < !G l;e]Rwܧ.I]ڥ8OS!>| I*aL@_1@D•4"yA'2&L$WS՘ܳb ^)Vxb`T-+"uU?"Uat}q:FSLxЬ* '3cԬIiK7np)SPL@ ")LY8iuڻ9.J|MPkǮQ]V( CݘJ|HYCU7hB{66k׾))Gp~Ä&]:M-&pYIdV҇whϔ#NTpBRg|BTV蕀Jm|˕2\^F"Pt_N..4ƕM"2YyМ\,"ilmxPě"x,"oj`]+zn3l3kM7ҀԴOyy;:U E<6_耖xU漑-E1P]S|X%9|f?YL:rx5uaGsg_66N7{49y\uS$F[7h4u&WTBl?N,)uCI""2v΢_F4N\O74zlZ@q;*D[v~K}YQۋs)<}njfn$vPBoM UN5"F5I>ǤVxL. ; 7wٶcthqdD/bZ])f27~N.l^/= EYg7dQ=~q[762nGo)S9ܨ);} 5 X+.el;pZLϹpN:flƟIK/u,0^foE@Yϴb;nNt4 HT"MSOia32pS܈ͯl"x`M1t5*B2_ݑbX!x5K>!<.T Mm1O;#佹ONJFhU fi9z ArI"O jziWC-i`WБN+)t҈v 'pN`G SiSLiOӈSxO=US,pmKƒ*-y&lD K\*OY"&.I#DdEzBŧFd7HM~("G?݇suEL4P5ҧFkxsƺ9c?[b+֝cn^~y)D?|SIyV`IȒ&}CiJ'z'7QW[|r|ݣe_=V_f{ Y'nF9W|ks/1?x;endstream endobj 247 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 @ ]ZUm?eB:gׁm_Xց$i, x> stream x\K/r[fOHP`J`YY%=U$,Tʒ*b"{[A.W={yD,N;E0@x2D$) .~-N_|r3&._4%/yZ[!|˻F^45Le8[~Q*[Mus:yJn v`{jzrx ^} f,AHZ{Rn*Вd,:.u_ѸPT#" Uи]nﲐXL^S&U; 2$-x\j痟{U6eO0j,:H&%7m#oVO $]Hl5ϛɻb2LZƙاAsRp>?mAe#700, .7+]4,H[*JBq$&[~p?i\s"'*Q\Bޯ< k%!}-K߲$@3XIvú5MOY' 5 k|sj0qDoy@R9iδ>*$m$itEX3Vp;V!|c5[@uj{*TE_q}mCp$[3vTI{͒O4EK'r/Mv"uiphs+AԟPeէS7S'jj<}I9& ҳ$H3% Mrn ! !r!Ђ&P++M0u: '(c)#J0Tʷi0DR䒫wNѶLB`]5nFx?Dg}S Q=RڜO7^q\gI2Q^m-)b[&v,6u TH])?q<;7!N\VIn~Dg|*8l"(!x&P5{=]+`.oXfLAw9"BLJ7 iUA)+ D5E̠ 8b-ɚr޼kvEַo> ENGo$,综)<ӘǷ%՘3LJM{b TqCLӄ2"x5dRt *hEE"=-DMUʵ N | mrCSvu ђPszmkȮK#| WwkcV׍6GoIl 5{0jB }Dشrܵػ|4p0A\ԛVB VI7x%Y53yջ n{@1OdY8҆sE5 -Vosē0CMѿ dwB d¦^-=jLo=FمtgdۯO$ sP!>@3eɥ0*U$!αxDluarC%w9EO1 :oYZRւurZýOYo%"%at<{حLC@NVaoTIzϗKpt|"GE0#S~9>}H`$'Hw)1QA؀xZ=ߣt!ez A ooO*KQx>豩`H{bUj3G+u{UNzT}UJiĚB !вp&T$ڠ82YT!x`B5sECki|F3ElVV*1E  SIq%8P1:P˾_ قӑYqo0\(JZ)}O "+GiCG㇑ԨJ M|@Rfp&NRE468 RG/a xH?KUv2/(EݘrkB9)-Ĥ"E:k-NƯ^&:ݥݛ!ݢYɔV$W@ % /)o?}}).#w''%x&'X2%a`W~oW,9ĕ`<xJN}ӓ>&Q]endstream endobj 249 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 754 >> stream xE_HSaƿf]=$J69d8 {<> lih"zmaE7aha's@G!(PV'q^wKXj ^x z]h yf~D`5¬5.z X 7Qn5Dc$0ۍ]ݷF{9y!_3 r\Mj`Mղ>[Z!s7Z^+YQy{SM҄N<x]--J^|\!,O0@y7cj,ˋ?k}t^$ Q’fh> stream xcd`ab`dddu 21H3a!]cO]VY~'Y3,yyX*={3#cnas~AeQfzFFcnjQfrbobIFjnb ZRaQRR`_^^[_nPYZZTWrp-(-I-ROI-c```20d`d9%}ӏVdZx~YJ;wv^{\ Mv_|ho<wv-]h?+ǩۮۻ[*8Ĺ.hE7m}ClΕVNHa_}r?Yu*r )lov›݇(oJ`W鞱FlYN^;q&{r\,y8WO7endstream endobj 251 0 obj << /Filter /FlateDecode /Length 4304 >> stream x\[s[~W#4y 1O@L6q&3=hɉ$*D/p#1d2y| ⲗo?,q^}ӫOO/sJ7GXN;ql'WGؼ\h.,c> )3c\u^ w:egWl9_#;-~V}/8]E9׋ٝHo߉gNng+ GfL fwd4ڪ&T]KkaI|I2}og7t[w]\…5b?'_mtCsiQ ϏN+`&Zq@GK"{X"e\pPճ^k{u;'Ap6]>M: 4ϰ;m*ށR/{T3h^tK?JkA9LdtK30\.qwr32NRH(J}!`kܕy/ 4yӣѬ3]AaZ*`=,x=_(vgbOfiзJG%Kj):܅83opk>F?p\niR$4 ,"gL"Lqڷ.W-ӒSg/*0FQF虈П Y!k'f {$˦gAnK0Z~Kq aDLmgMf) Uhyyigw;l&ٶHgDEЃ&- @=DIh|}~m ;(x,0nG@poJ l86(i1Q0yiϸHԅPIG#]r!nN8Жm'%)|^+&k$r?"QϽ 2y'ex /Pt>FMn67y_oseSQ;į<*y)lj2MĔH(vO(G7  f܏:X$[mٙq1}^|ŎR4l-UiI~vnc2{+Iᓝt{їy[sxl:T!\yOmʜqS0oq1` )*lh@e{°&>g[zCN[ >i#qrت]4'BK6E \a%*FǥH[(;)'fxT"(LaSS)u:WRxGs$mlX;3Sq1_LSHMd$xN4 4O2Э ;6C)'..VbZ.Y!CGӰ8\4q1ϩNҶ)ǀU[c *|y'I'nR!QbU1δ}M]:VQvݟ:8VVMu2"|øwwyB¡*?q畆pOտqF2Zd]t(,Ǎ+߂Nc/.h Y\ 5Q}:e }uUx9r=.uBEnߑ~.X)V&T<+Q/i~`"O)ì߿N=T޴x %cX;ESUmsmzj|Cgx! "LE&OS,- зLk=o f;jzZwW uBS w"x3!D㚸(W\Y aATڻL(⌵ʩ{[Ԑ\P4)9ؚz" Y:-gZ\e !:.+d_לbn"bi1?VU/s_,J}]+Y ,/OH r a|`Ho?Yڗ~s0';eCPL{7>hOٿڸ>ex7ᣎJopfmк12Z2Ֆ{>Fs J)<,.f|Fe@r14~1`MQoNIp~gnj5A᫙)Џ'6-)x{qT%  $pN³ ZEk.Ѝypb˫4O{)ܭ{uPv:⋌yw\s -7rtb.TRY?H.<ф~[J>C K!BqXHqYȬ4U>31%dXh>=8Z{)  1{uG9c'VMS̀VBiCr^V{*Ҩ?fQaBJ^6}Qz0QE\4c*K 13hG3e3PA{ԉTP<^LӣFeg1a=VRx:6 Qe&O܅FaI;\piX{6|K-hEիnź`Gw<@{J@(V Mp bfhI|‚wz#Hejue]Ʊ(3nD4x V/?`.WRP6=8SFT*_K_Y7MLYBg[qۥgY\6?5 P B}&$7wuPgm4,5^Qm-G ~LUvVÎ;gO?ѿ)tendstream endobj 252 0 obj << /Filter /FlateDecode /Length 3140 >> stream x[Ko#h|?(Ā {sj̴;kg/"dkɂvtP"z|UE_/Iov'_|g7'lqq\6Oaw0{$ mzJKk0|wUC|[ȳJ 3f˘ Be۔ѡI^rmoL)N9ʋ'+Tr]tB]SQsbqnqzQQ_HYH[HQxBB*$l<wȓ>4-3Pd?8oN.M_sM2΍ 侐/+=/Cu!o yC$KN9  34c0}Ѭi0QuM}=̽d ƤWdeB_H2j )/ ߋ}wMrB dB†G0ea$c4?~8MF0Z=W[&mOD$ aGy&M?&M3iNx#{QCﳵ~ܝTDO>ƿZ^jX k&},$ |-W@{e\ narJjû~ ^s~n@_Siz0Ӂ)-1 F:r(w^#3Oȋ3fAK܍ I"T656 2 IL,` ~hD8ƍ Ia5DUMsҹþ0_P0gԈpPڌ 45è kA(0 0E0# qA^Ҙh`яv㼣n*Al&8NcE=N$+;2FYjfDNWߨ8˂ey܅cKoKBڑ 8X0/W +qcNz!e@Rlԩc>X{F+4zo#7^jtm p~TiJGc.zHgWJ+n?+{KeC(#tZImH8up[z i!B Afwp~ f/ak >NCu_B"0GA6,(DP5+>n^.:Mr+ik* 4X,A@N-I7W1 >пmQ΂^`0q. DLv~Iœb_v@K`T Q4{4udĀw/8fpG*fv p|5*ˣ1'e m!&R=f1MF .8G\SXbdWwT}l-8Mx%2$區04 \qz,F!LCA^0le|OPmP07>CRXHePmŘ< DY(ry_Ry2GI^ir`H;aUV"%GDd<Sh =2Ww`ybTn^gb7&x67RGlY@1RE4;R$E YFgK+%ͽM sf3:xIϒ*# qiV9Ti/CgLsdrWBn 9oH9*(^<`rVƲ`Ժǫ** (YP@c!!J䠉^"QTưƁDױcJrC*u. 5Ŀ w$HTmo KW]DӌIAvww\gjNT*PPZUEVlT㓬qXI76w[X!-ߥ/Nk4ݥIX5blO/ >84M6uvooI>h0uu;>@S0$xBaY:=ǎN9(E#%7KVom}MYuCo_)^VYے]؂@BH7x3}4Qxú"vZ,מV#*xqk:D8I+$"N&$ecrO^ڱߗ?û*D'V& ]|j1$.榯]g8d0"9 [a3ݵ_.lPDy N2 ɺD6PvRCeDPȷ.亐2w[\ _=曹!BL+nKP`$xh`6-|3:V\.7/i;oWSWЅ@H G5#0&JO=ZQowDkWtBB|_;o[]Cz2|U_܎~a2ݙ<˷NB ƿl(*14u9jթ¬hiƒFRo>Ob?.1cNH S}4iI@[2x6ⷻ$@CW7/CLF[^6lwĮ$'խk\MP?͉hXw(CM +l9Ɣl]Yö;G41;PQm!1\ZnZ^IHFG>hn$_LA Q'OyB^>\墡>Kط6ca?)Zm9_#,yy%ߜ L~GyR/V~R\' Rendstream endobj 253 0 obj << /Filter /FlateDecode /Length 3565 >> stream x[Iod+F~nn /Y$9$H>8n-XR#Ml_S,Olɚa b-_-|۫-o4:1NЀ#SRIЇ)${ml WV>y)8VL S&OIX'D L^3ٖՖGwegCpt|`ɗG_@ɻd?;8~j劯)׏ :fΗG =9kᚸ8<>Ay#Ĥg,I3 L:s5qR)  7TN*p0+&2yFvtҫi̚'7L: fsOKCicr1A=y\m) d\6{7$O2fs=QH 02x[>>:=dSQ)$QэO2f;(D34YPۡ#_ݢwL: ᾊ'LL& YNm p0ES>xnFPZMN)ebm ¥ OTOζcUp5Jy%6Uq 4ckc(w**f5l 1O$#]B78Zd8&oƋweN'VV6 ʄiW:c؁GOz|0X4:84B-Q%guQF-\8Y'y|c@?!yD (᯳[`|ؾ+<ǠQ58%&H~C՛^v;)*5_jmbJaUNOޅ"4ʑl]m\Yen֣\ز Oە-Г<)ӷJ<g惶:t@IAu˃$Y.GƜvg(K˦f̡L 7 ە9):EÝ}&LrR4ꌸ:l:y,k}wuzDM2EpP#犽vi*&S*P]PH,~SN.:Oi`xUwtpk:;fESJ}4ز]i.6Pglb s hQm-I&LC3D]u.+Y(N5RBaV2 /d:hS|~^aS5wv*ҍ$ `l B'Wxog'XUtKj.śRЇ1Fb@ >s(0亠Q/~~F[]s#!)&<χŸz<0zEU ].C&tnՐǪ Q5**m^LarvL2y$:a*zWU {!\"Vc)78S/-]яy퐔nG1N 02إû<)M&#~ W"qڽ m%mL5Z䚢|>m܇G ɱ-CR΁+)FRqR6:2r)UMSI `J*gI$UjLg8ga#Y CiY7:Ķ= {hn"f?/#[Zy'l3zhK?nuFک rOޱXfu[0ư H]eEmLۛ2'oE[`.+={O*JR[HT|(x-!i>Z /ap0bS&2 YaM ]e6u RU8qZˎx- 󍋆qrA)74ș0Xvke"0k ;jy*eJX2;B_m0"-i$U'2Yjr2׆rֺ:fVšTVl0B=ʃɯTp& mjY0č"%,2A!{INW+ۥ YE1z0{[bp`7ds&Pkt"*"Dl{Ҁ̝^:-ռa )"$} a2Yi\,v[|JEƐI%J&޳ʽ V4v eٵ^ KTr۲˘.a//ÈTc ˒9:ZEz f?tߏ.rl^#_[|r>+۷ؓE{nϗ)d3Xdp"AKxX`mBBs϶dK]c_<ܕYO$< IXKa? DC"Gsỏ:$W%^IL7ƒ0슛(=qs>S|dQ5Y9?SrTcea'D kT DjpY2~#vN2QY[tц.Cd3U쨢#}[-Yih!j\~BAt/4r{rZɄWa'o~(}w]ۉ7<*o(Q=M^2/_Zݐ} -Qͣj)vnh!B_p޶?"癗C%G;d~xɧ= 3*ߐ P{=VnE~>O? _?ܟQ?n~q~x%8:_>/^ vaQ`.vGYӗc[&481OYų%C{i슛ܫҳZ} ?YGBGn8 CRWeS;q0S Nm)|P{(~g?Zs>^n?(6&̐󙱱~Ijҕqw:>+7hendstream endobj 254 0 obj << /Filter /FlateDecode /Length 151 >> stream x31ӳP0P0T06P0P05WH1230!U`aS027i`9'O.}O_T.p .}*.}gC.}h\nn@n.P9?47XΎEAmGTa?ߠ pS! B)endstream endobj 255 0 obj << /Filter /FlateDecode /Length 160 >> stream x]O10 @BVUAp$6|/`,@L4[ub,,O\r)x@w|^6#NRP<說J`2ٴ}A*)J7cnw>`?qShendstream endobj 256 0 obj << /Filter /FlateDecode /Length 6785 >> stream x]IsGvo_m˹/;lr8c> }h$1 ){%evV7@,/ ,dU.oޒPP_ͳ6^\?SWL/.o.t LYe}3~S_0l/S2ͳoB˛|76yٿʛwyJMOowNg>_z_rSa淵j6X_׮S1as]m4?^3'ŋy~FR_H'"O -OӨ/TU U S>_` xNMF)-pM6S{ lvHY0kCK2 Q?.nzKSu>m3" M 9heCmeeP4*loh"t:eS=|f̻tooMAM4,D M4sQ%9g<;`- v}ٝlΥXᖸ(4M.n,6kzj;hMS<6(Ay+7C{қ-$k67xY)G˒}  nDYv󊣍Vl-KHJ -!)T6ؖ:[T~&ߠ(XtM΀;HA> ;O6NSކf+tBkG s(k Ky[A|hC`uDBA mYX-O`g"*tu}~d ^}! lvZ k0Jk!LDHש^leaK!f[l0@#.#qƧy8Ni$`` PgPڼrƂi{L>΂ HWU^X6#;0}#"h΅-/^HKx_X)% 7HhW4FIҡaJMӨF|"iH΋ƚ&f 3мy m)[5{R' vSqf9bj(Z/ӗ $ [wKj9,a@v&cZI}$QT4 `T̫E C6 ({,oÿ́"?ϧezG_~ӣkj^;ܭml}6G6jD#|9|]m):iUjS8•瞊v)>?Lik36Րp5AͰo_Kym^  n8|þ7þ o9p fC~!Wq yUpJ_n4 CGuyuaf]Df'ͫ}~y/>g?.jU\]]z1ڛ^S(s-Z'AfUݤ;] :IG,NXX `nDg;&=!+tɩfd˭7ynY4dP$8UkԆK(R0M*(ø 朞i,܍ yG@G>CXC\7yr[x":lk)%a-nvZ[E#\4ǝH:@O2t*| BNd ˀ4&dix6.2kk Pfm^|ZyDBQ"q$%kPb m&ce!gk#@pd7"|5^(Іq[vMͻڔq@b 8JR~W<]?r.H*m̒[].'(—{cO8zþ2ٸSa^!H!h)o nYПV Y{=0w[EC@ @O^'Wp0yFO6q`!`F,ZSo8Rx!|ږ0)}(]/}rS Q u4ò  d9 0pJ9Dcyҹ@Df}ܜDIf-MZb6k9&CHLG ?UtBdNعޅṽH~.&&:}`hPC&`! w.{Hk? -'14xtڼ͇<.ir)!aYp@(1.[iCE&- Qw :N~m%ߙIDPJ:deJ$)yL:0%S9lTU?d ~0靏O& Y4:!9*kK1*;8P=MơjIF P_{2|Ad>.(HټW%,[EO?Ŧ'"ˆZ^\*&hXnfD=a^t_`vgV\:hjjJp3x ˽ջ'"p<ʕW#6LfDߛa[w3IaQ9؞ ugwMfISv_ ->=^ \e<2)]PpN FBaZQx<s~Ji S.(TQYNGULj>zODQ)81ۡˍj+pz+a%R/bX V rj`ma"IXMK3rfx !嵺sψa)*_ ӚOB^ƚH"0o3Vc1owیȓf :&"MzQZOX>H*a!:"`fN`17< .U 4JqHv%C]avuJ/Xbǘ%q[9oaMQBFۭ026?H6HoGQ䭧Tˮu̪ꕅ}\6%m t>b&GC|͛"R0~&lf8iE0dt5UtN*x":'RCmČV\@KO|46 e0$U[auĩ/13S͆-tY:۵;w_y&*e쌩Tc*@b7U=e G_s3ёaN֚' T\In2>͢)˪ {WCޙΊ^e_$;:ݑ.D;yP i1J@;kȷJScpC]0[CݔXU ay2-|+%ڪԖcG?*=5?v'RdH}/, [ c;匛>"= qMRz[ATWm z$f.AO:D@Etn+1cleҢZx6Pɝ0eHy{|Y&ZϹͪ8riDU3isb&j#gSjS"6WD00M١m oE@8\RE zv]fKGVb=8C3T .1&8͋BНx:G3j{T\>H PliOΣľ&&]N-%8[0n,iOD(^It|3/vLV\|3DQhR~tcնU#,5 xKf[ >f}]8o+!iVxdp n_a679E%ʇWcLO9T8MQl1q.Y+Ca9w-V::?/*fT% "o]z: bVP}D-aҵVar:vjsܼ9>X+APIGV3VIeQ4+ qvyKWˤ2*hm-_Zڼ otk}oPc/LS vx;SN'˂p/{Z]f!R;>SZeg "WW՝9Na%=WT WcٿM8ⲥˁЯik\o+/hJ~sXwp5 [-8 T:>p:?9Ut?\VtbxU +5}CpB ת8J9[}l*~ 562YLQ|QYTqG!|z1k`Zɫ Adldl6EMEIst{s#+˱q8Qidz갞z #!U@Cxѯ{e;s GRuQ(l9OgcfH{tNFPryQ95dN a Nv ¯vt@&>)(hS|H"Ϭܚ$_Asxa9V 6! rQj:EVR)Xp6!R!9߬dz[ilFw=ά)=LyO1x5SbAA*Hc) Tkc:N@W{5fװ:8PC0`v.LOq`+^}p8OO3aཕK鰟_6t0%Jێ }|͍ 8^dv&&׌r9,ZKՌ#0Kɮuoݝ"Cu+}Bt;r :[=)KF&AuKOS tciC9 '0t~k%,JQ__o HT^/_DD[GΜ|| .v-6);yaw&Eh])2q-ƾ4$x,fB_ 8qjQ8yUx[Co \?xTC@fS.O91yX'\]:GW5K&˦\d,5POי/1ԤoßMѣVh1 G;,~5_|PӘw"(PSnG?b~|j,J= dVPASgӉ1#9sJO jkGΠ3 ŞYpEQRu`+UE/ŇGqBu}+iYQ DsŵJ9r nB@]>B.&J cv}ƒ{}^<2&7 BmKwLW%y6 XqV;sC2kx}[> stream xcd`ab`dddwt1H3a!SG$,,Ǐ|<<, }=I19(3=DA#YS\GR17(391O7$#57QOL-Tа())///K-/JQ(,PJ-N-*KMQp+QKMU8SB9)槤1000V00D2012ӯiB/ ʸ%!/\W]vɦͫ\䗕^Z.!fk7o<> stream xcd`ab`ddpt24H3a!ÏVY~'Y3yyXV-={3#cAys~AeQfzFFcnjQfrbobIFjnb ZRaQRR`_^^[_nPYZZTWuv-(-I-ROI-c`````d`bddz7~~;5?TE+38JYaŞa*䊶 -?vV铺g˯xvyns++jC3fvKutZ?u% geE8qdö3$8]w53k[ s9R+^0 .z9aVн{~ _՜Wg7}3wVn9.|<<=<| \,endstream endobj 259 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 961 >> stream xmlSeӖs۽|✓9Cn_+5zv]$`fVEm,d)hOD$ϭL~yNs~T !ij9unS/@NT/J _,U JPmI<n4|rxf 1elX'b-˅=2Z9sjk~\{~+oa_㼜d]N=npplUWNM.sy. v98'okuXEQ{jENA]GQ((ZRQ#it*+ZiR d&YsۿT}ѕX"IZ Cp1ZC5 6['7Gr2{œYla4iӀls܌^ph1bF1mna 46i`6> stream xZY~K -^\n>^K-f`ȣ*"g`5 Y)TVEZ[M&vլIuRFHNIUh*lgi+{p$1[@x'al*x0 N CaSR(DQt|$wY{2{ hDYm7WjԈV߷;UW {kD^z*!tv!6 7EȪL('\_eI| O:{l~O,}Lr>OUl80z-'['Rp\ l%䍼Ba"LImؖށrj/U gW!;kԍd2H;Ԥ,<6\W6% b) 1xJ;\V"+Ta񪬦dW󜂬$-KȢXvgFW9T=D2#bE8)h8`UABQٓ !;:^&5{c|p۫O\x.t)nȈԖ=ikj͹B b ܙا(tSZF)IǨʓ ^wā<~:KPf2TM2*Ib^ -Mz ,> "@pR9?Hxtf/L]pM( U.-s!(ì7)SJe,ud<:yZ_c)NbPfe#2K<($yRU)äu9& y] RIO?UQ<+@AFu.ap UxU9X-&}AJ:8lI5ҝUd׎O4#eaJIL>*?SxP8"5JZ&&VI:ҍd Q6VՀv.6Θ6[gYΊNVa8j8I*X7pzcجk!iH1A(GV &ͬ/[0r:-x'8!`W0ߚ4^#S.-W hV p]W7ԯuilLVtLbJ&E-)qM$J X~΅q]m6d0Map(#;) rOT.:j!qBr2![ ݟh-&M10RA|;)$S*t*.^ٶ,&M0)'fU}-hb*x^FϘA#9$c 6UnYY%w$DSvV]N7!i;-tnۻBt,IeHQXvw} dBAWHdŎ>3k[K=7{$i_!tuФ|ᵶ}"~χhֻz )߱ )ruyxJ.(M Rpr1$̯X@TUI3-?"qW$~uuߣ!VHnXڏ ਏa\%:cmItNn#YfC/>F=n Gܸ &~d;ݞNwHn|$&5 Iʸ?م\H}:M?m[QW%~[νbq2}T\r%IM=eHk'9m<ےDH'ɔ}Ɖ|GŁD jGR#%){]UHt$ ç& ȟ`(Ƹh>d vkؽ|C̻s;~#6TFObkkO3 lWOsX};T1H&|C]21—6kadMZ[$8X {6.+ sVX`uÁA78©PO}\S+q&}ݿ{I]^߻W{UJĠW3\N/؆.q'lY !!IQSݔdS_@uE)~ ѿThendstream endobj 261 0 obj << /Filter /FlateDecode /Length 6826 >> stream x=M7vwy2݉]&n qd@Z32Y#R~}|]c9BY~b?W>&]<{4]<{#C?^Կ^] oe*#\]$٦W-+o?Yyov`i~ySJ?!y7y8(ަYY+yxx¾?cn|olO%_\?(˟oƿ+X~|(<׾Tnwذfv7-nK 'IJk<2s)H>qO`D*OQz䬃#᠒] } @~\2h]B dc-a2;wa& $OL.oӊm2!Ÿ-x@`wSbh0}dpa6^kl/ kq ⭛(LȻ13Xe`c< &)x"Cʞ*l^ow4I*<N%?.gmEH]NHm/78 qEYGxM4t,a:x~ʴ"sZ"{nh=8+:y) @ O勛` z\ܾHfJ>Mgz-!bO{ S/ f?YD sFrB$.{'q N[_Abܙ|RĽ>#d!S3HbDL1J~-8314NJ8yؕ7v&#~r7gi;!(!=t;apK6`by.|b= M-(51oE(+?ȗ ^l%6fJQ\dmDxlGx1l2 ɊZ(&piv6Evy؞ 6 HcKJ~E,ϴ~xʑc@]J?HQe; dpzh^?X(rbdL֢Vs26qf, q2Y΢*i9b|ٕ<$A "9,_\_u .Vڊ"v3?0R#y6pSy*gi7r v-TPXZ⹂v2F|Yx_7Q\Œ6E®:WчUIwFU?}7cc@36T$,xџ!m?>em(B)o0 |+9 iՐ;:R:ٷOՔ YPn sdl*hIz>ϏxDLGDJk ;b(J[r VMwVd-E AtR!i(.En0ɬY$qN̑rv 5&"|eU&QԚ.6@_lTq]9#a0=H* W JAO%0ĝobԻҪ>;)uL:^>9|_=?m8b->qճ(d gOf(ilh O( wY$?bsqs) c^ŔJj mlmѴ(GuQy%Q朌Ɉ0oۺ͢?pu>"-r*uaVF ?W?AZc_Q(˳-dgw7@-kj%l>܁Xe0=i';SGA[Q7}Rs\IOz]QJ&<>̼#? $ؠ`1m< <R(C鐍]7³-B@A]IڶXD_ݖ4(Zr~ f4kGɌ.՘`{=Ɣp(P7ƀzfS  #В/E 9h0'b8)DY3K_:Am[sE W"W(NVf}s:+P3`aJ.y_?QW4kȐ0±s_})ka<tֳ&U~)|j#Nt9t9/D)VyZ_2|_m>[wfDÀQNpZW1M82Ѳ2a%cD\OddZ?_NYL}ѧĜsw <ՅFrJ чh)es7ؾ̘ǭ(I.w=N*I}l"{.MB`YeQeou/mZ'2WȂ[$KXDy*Ҽ'U޸v&6DeS) ᥨ'(h.v.\HNQl5>FsĜ/02ex%x~kQC:L_0M:-KtS^VTQsX f@~ߞ+ccn u'yk2HlzAbwl0׫f7h':> %XANzAt6+,Af/2E$W` W$3PaQDS`kUVeݵWN3p]>e%`bJVQ T7(91iԏk,EQZIU6aQz>1Y4ʐ+{ FG1RTZGnW vX@BPt( M3*qq ~NbOdW%[Jt'j6 VƲe͒K}#d,`4 ݜ6R }H:)cѸ<$V aZx{U3]I^eF}1!:KJ ǘY$|eY &oX}D$ ΎR T7dlIjཫ[N&f^ 'C6i5UV"H4Z'O;Ta~əg65LF5:=EHm iPS8,yɻC (G@$s*EB狚y/ x {\|x|J:ŗ4mqG+ꮮ*>vΰ_}+D `|uJf/>!Nn| jU 9;T6wHM`֍B(۵NTv+1W"X-\B Zoh,3e"-Yj9g%8V5\FqⵠQj_oŌcz3/mVS,N%2nt ~+{xh$9#t`HGqT&7E9ԆtSE,U ڱK Ŕ8NtjI՜C2Q9MԪ:k4ݩNJjtAvj܃[oFZ$LwFM}6wd?5ְڶrfE?^z}C&rܺuMVP\8' &&c c6 7CtkJgu᪮\,%i?VAgL =l'S=72j䎭h Dw u\JRB̔7.rX}1*jdg U@=y7n5Rg5lg|O5b ms/MkZ욡|ەJdn..ǖ6:1:^ҾbN1YbqJsDT gv\c4T ahj!Bߒe&nqqO bnA~}^}|\=ƺ@ n$S@IP} ze3W,P RwJ IƚYN&>Ro%T4KKZRa_u>yNJ;U/!5(O?|.V솕t2V"y_v4 XEl1_99PGn2$@J!rFQyH8lL(@㖮J›8m醇r΅E=H`\oQ݀gMض/]i؎AIo4`=H\,=)I<{JnUV 63N+@@id]a,YǺ6+B+Nlt UkW)?ʬ.H>m 7<%t:k]Lɮ0е hٳ>sBǐ,9۩N~ݼW yYj fU҅$UKrLrXsJ/a$YA;]7vk~ɬkf'WAg!OpjoZDu'/_dzd(S%Lo t诟]b _ڲYt8iv0i뎴I~a6 8Yj&=y,P yĎ "b3ѝGq%טS5}͎5K]zދ .Xy2UeΌ+g\N<ΗS*sS}j /sز8Qq,^Rܨ(ʫt]̆ r۰(i#`鎸;$=*|ѡ_o-[PQO/( GqA„oۭQ uG\h nx=废, > stream x\[dG~#@+KQź~y`2+ @V5{doDf̈E]O$GGt(ԜQ5q  IHe$N{߇A&ܦ"cF/8"NB^TVBPx`4۲VFAj􏩤 I@ '+MMvB=bsQ~(w땟g'JB0pR@*Tv!yfŎMJS#FP\KGNx 9Eh@⑃=rHG8aCRB@ (*BQR"BI*郦d¡asݻ`*"*0<$%r|j#y8@ ,|`CH73b}PA+&m;U]ȐYl0)L굠Q HP6PyBlO*TW5S>L]=-W ;I@ NɁæJA=cVLc %s'VاIf!FCcP%>gQ\yk64&դ)~?IOUSDzOoЍw ERdàžcHBIz2`Q)<÷Ή"7kʛ.9ToA\jU,h Y1k9UECr2>K, i qS>QxJE%_f4+_4` GBTI@RtI;25\6m$bs.|V ][$ ldV)@0.($P;j4!:"FҬKS%Mi)!NWǾACV>-iYJ2WA6=-g(գ>%vŠCtL.Jm:uA'm'*TN Z@R QM!+@c本dh& ,KbSغ3<8aV)Z%o:*_$-2jnη]3!ΫgguYQ|+$C$>kOaY Yt"jb5*݁^V]IbϲOe3<0Sve+3.X@'.Fپݠ߱rvL ) d]wШ3 /!4*Vsޜ؇<~٨XV U :4 S%Y&Syv=S MHJsҩ#BSW8rt,lr!Pw@[4Ϋ`gZ@|G! ";G!J̴pq2療>A,ŪbՔު|}v3oFuGR*r%2 a{hEdݚݬS jjf=$Z M[-yP*_T"y P2F 9jJcSxKiwF~Mf~bĬZ.q =2cN x{*6o6e$sLT`8> fJnV*!DF0Pxf\d:y?^?U< >p8NZ:<ˌK*]1_7X}N6c?֝ fBX _Ύa܄|Ee\y D| fNhdmnfez ͻl` Z rswU~Rld٩,2m- iM32q!Μ\Mq"A:7s#lؕEP`BT_6oC!hho_^3!VW.YWse{璸e$_dBeǹ >bP}b $^yך<*œEA- ZL97))[%|m86ޤu2g0v!AK/. 8A\$ʒJ 7T51=Go91=]HcCEnw1jb1䡅8j]|!UvB!C^3!(!#ۺnMnHdw*GLw?Pzfɡ@^m쒄q>/ԂXV$$7I,̪7-Y~n!zoIz"ଲsP@ɮoF6 .Q&g" ӯq!gS>nf_yC]kmzY'iwEm~@A޴r%9Uz^<ވLoѨc`8Zی=3lOLF MTl &9:鞧 ɝ§9obh,Y~ӚCAJrz n#y&['+y5ѡ6 cfV Yzi5{Rr_l-5?jT%m %Z<ڕoI胯qY^\TjfDsM`q񆯿:/&^d|M6eyw>B]$tչ86yDи_Q§߉ _KʯBŕ|m}.IdZ0Cƞ 3kaɯsj__EqR̲'WO=6 ЂPwJ6=bA+6qK2֥&n8<龾ĕr1g +3óh;;(S<'A|rk՘FRص+LY 5Ԃ֚tLl@e)H I>_E!ZчUm|P# ŨE2g!R ) AJ^h{KWXL>e+X*mTnc醓G/aR+ٝ[:n0F㔓8Awbt1/O~ M.GOLj.? EԜ?I~v?X[GFodW-F5ݵQ1GDJN'a=$SEjvX5њjE=f3g3N҆.ڞ:S:Yw#]^uqz~+nNPO#e/,`ΣX|S_Ͱ{ wu¬OOZp?=_?kUO[󿶵k_lk=$WV/[5_ {y;\!y5?iͯ=硹$Gh,Y \PkϦ`ma (OZI$[1ZКǡ`ۿmM|s=hfnn~l͗yߚX‘mwӯtG|ӓjYɺppӦz=9ZzIlzukFB&i{:`YV뿇p3l~.oWп K_Ik~Mk>iȿ_q;bNU2-w>\'g>/nHrI 1cbMƙ?N~@9 V=۵LkN)[Siy՚6dRyG%0R^.0\d΄.>WRO{RR;N,*0`)CDm4QX5qCBk@ȵtLnlSCzMOcZ˴v&5鲹WGILaZ< ߏw4.)߈">f-~qAendstream endobj 263 0 obj << /Filter /FlateDecode /Length 2751 >> stream xZ[o\ ~W#q7b4A\A MѤk],E+5䷗ssc-D1g8ȏ?,Lg~u^,sp>(#]5.LH .ϝiB]^x9,!1SV|gLZģGdW^0,w1T?}d,^x, W ߭x_,N0K5$!vCy!b;Gt,E#/L;ZՍu<˝xu(TU`@|㹷W~ץ:&~ܪ+y$k':퐧Q Y@ՋGk 9bpX##֒_F!Hlfm[×<1GR}:,W|LQ 44T1+F>u` ?° Rd8<5P (:JyBIRЋQL*h+Js"D5,P s?e'?*&~dTKou_A"oqП·(eX~a=WYZ:v=SIIoK2v2&Lf'i6gUFK PʰI'2Re@d!Ak{\SP]mKYC\Q:75OJa6B9&,Jx !sdx4vYa`@YH+b51b0m'ElQSp^>i Dd)%CzP]BumclzT691`QR;`q 5pJ5T:A%CdBsab6H kaя[AXvzv SfD?NKm+}Are+U'WE_RVXv,y䳾,O3Mk׬t*b,v#^l:˚$U7Ir$a͑Lza dBɐ_^Po_^91j˨9Ͷ@a]QN$fK s?ӎvY HmN;{ Psy;D?JO糕> , R]a*.&=pUO%pomYY!-uN'c#r=&YıO0 6KiIXN0V@P\I}ъŲ+A0}F8hy%Ј!\ @oի$W@~n ǖ<*ߪX 7,~uw~J{g-^Ec7bFxD}*<`5e *&7Ko'[ޥJΩ= BvG0ܱaJ2X>'Ѓ<[3 KF:U ; ' $ :*X̪𪉢n&&G £QA}\cY Auk&&9> stream x\[sd7~7T~8C~T(BQS) 'b!p{5005Ig ǐ!JC4y9O "#ljm2-j y! f)Jdaϋ?NSJjņB0v?uojr|M#P+^4B;\DaD.,ڭlTˋyWEwU&-z8oZ 6!eZFdRLTiYl%$D#̰66/p5yb2u[++G/vܚhV"jm$hBhC߁^ a-6=d%ƛ$U(A\IX@ 0lv9^&:q40G7ijJWwis,V-s9ɝw&'Ais7|1iwvꯪqHj7MRo *iA mK f}q#i#DX^kzVZljq`]\L:٬Vvg sH!&]XlzYF4I Z-F'hV`Ks䎵TI gO!rέ1TZ*ژG H"jgn Ϧ 8t FuQ>{xM*Jmk?l5Fp5I6n}x-$ReL@|aC;0&p0EPz-ziف41*:1()QoiŨNJk@WUȨ&ƣh:`&t "+וdW8!7TO2met"U#mp7B-[a l|vT#5EFGǺ ?i{gN!#\b:Τ޻)ZVd:ײ274O^"jsS^;ܒ\k8;ω. L"ZBf꒹_‡FB5˦,KѪx6/)rrEӤd4ף=ZuCc!J+ySmvYِƨթ@~ŬWd6Bx:c&ЙJ*9VrWJ{);z-uRE)CHv1@V8C3õ NMnKx`nOE& Z -`ҀmمӢ%m9|A<|=Q. ݿZch7KfOj뺶~,X]%/+YCs>{xv0y.9=x5)Z%87f {9ͺVE`-T4X4uRr+CNuz7gtšn/E%Jthi_@ʊG=2ͦny29j+Li7~DlS,Rl?ʵ6Js 8a"B"F1K_('8M[ ᬽwJBtG8`{n N,UQfD+V"bX6Y 3CvUtL c )>/k8;9R]Fc'S*fRŋ女U6G~L}t!4{k&(mhSJDV1f0x, FA|;]r7tՋ]544 3}uF6ǭ͙z1aBȡx:} XJҀn+1zZ!T7ZP/$x&ג[,cUa}n4n⬆ @J(D`]Ed~x a%F}ZH :I'B{v>~KSS.$w9sP~XUt0i 7ϲ-V쀹)cGh3[ub6f`!8wGm0]>:N@Kk10Ol>g' I$AW򓊰׵UEo+ ﺰn*y_UWbݟ.(?Vi##TJ`y}μnI49=hMR2_J6V}%w.ں$]u%xJeD>.Е=C!Q!j+IYZN*(gӮ\{ts^ZB8e|[zyokg]f< 6' I_TO%dJn*yCLkeW}:3$;v|HQuW^2ed**+y~I)+iꮭykoM8.t C.36_9Symm*WQjþvzt|&$'wRݍ,,& @ {,ϼWx , 0|SWO}f3O`Z YImuC'nLC7qT}qinpai~~irF'D *9}u?`XĜGkĨtuj)8HWx:mcd_*Mt.@9,,׭ y(9vxZl)3M wN&qtfdT%g4P;WoV*3yh<LWm"Ǭg=Uw%Tk}{ y<ރPl׃q2ժ i6xķz4+z\*kMhzt;MGQ\=XzRؽiXfoޘ^ ͣ2 ^~(?kU.^~*o9A'drVHm4U );l1MwT s(0ݐL舶-J&V ]|"i{2m៑I02_Z[GmZ;ccpf;]+{V0wKo'^3M="Vʂf澵hUÕnhw}޲ֿ/r,5l|xgΞT22 qXd^G:T=ZO9U9_hf$7i,H^콄 ofJP6+TE>mXz }`̃LZg^:>ס c-"^"w9Go~E7yuUS `yEi1.p*}m `UUl@csX1R!7Dbq G)_wXB{„Qjǎ^uKff>n<ힰ7;2T#q^e A0ڂ84?Q^2Iy)/kG: ƗbQƈ5;:Sps8-I S'XuԆ? =Lm1=<-M9|@թ.d,l\k[AaVqݑ~yJӷbX aG~Kr9+S^[;/*U=qˬy-/߽w7\oc|v3.[AY4Gv~{CӖJAgMO5'ۻ*=xJ:u:?Z$2.X zE#>6Z.9/Ajendstream endobj 265 0 obj << /Filter /FlateDecode /Length 3421 >> stream x\;svMr'2ze&w Uȯ(B9)h!ym^)ON_"M;v{)Lb Z_uםχ{[꫕ϻGX`#fN9D]S;gW*`K֦itZ4f<a4?fj|zix.T)fxOiXi|Ehɻŵ͂'}z0+MtG[r&Ipw/O4|6 "V;-~,.t>~OR1wRwt-?:ب(eRȺD dCⷘ#cR1[TorfR(ychE>}0CFʬUdcHv|x?m¿`\YrO*[o/x)$H4]__[f#s270/jQL඗e>@OZx *0z%`7EּN.&Q'~T<*|°BцU.K@[qf#r}NL'kZz?!qv×0*hhHmV6T% )>MC>.L63[F^&1r;\aZ(mQ"d[eQ6Ͳ…Y 3C0nfe]kڵ4d:YEA֋Et˹L8jj~iO/狂 i7X*\dH$L *0sIl XOu$g:_O; )Jl+w{i9%gp; NÚ/y͗?Z}yΓ;GzY.z<>.^Z`zt/4^ aWdkY$l-hzoos6̋ޚi]'Cqb,_:$}60R6*e1{on3,ݘS1N!k>@5vuN?8[w_& 09ۚ? %m;{/XHa@W%FBʖB$ŮĹCqՈTC)D\BMda裄%Wx|QIwUx Bo|_O#} NR#8J=4 $d ) PI(C2$pm1YrѮ|n[CM!G2|/G7h)c/X́`U]H@7t(){ / iA:~H 6MmW'oreB0bfǙv ]TgE!ZĦvQiU E Nߣ !['CWk-NKVcjg"3Sn2!~ )*Ar9EM(&6N ,-`2n:x*%OFїw"Q9ILPo[8z1 ܝ!!Y oA}?6@Q˹ qijw r1 s$.>=JgdDR_ ۗ@"$G&0_zp HybZhP}8[\r]B!dw%*bHWO*1Wn "7 \)JxUL+u79O2wE0.Ŵjtm Z"ᄐ;9o,{|\I#KR*/Ecg6{EwEi#ʏSa}ӣ4$*~Ώҁ_ǞRhxzN} ,w7{O8S >?Sj_:1B.ll^ 2xcilpfRXte٫<™tnzWsr|~bӼ~/_쿖of?tpw^am ?/>.?yU0nN^~xE팺@u}ƓpadIWvQm׷X?QOl&*xendstream endobj 266 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 2068 >> stream xz,;L/]*p8ML&(t;z~-JkyPr$ƮkKTUmT^}}}4 *$ P14W([y:oxa  (5Bඒ6*_ ?:Jb?voC@`6Duʼt# +ILK IU{`7i ѷӣNT.&S1!)> ČnuH:j =  i Q(qR]}9?qXP ȿ`B9}ZSI3NrԿgܿ"ք\,M}{NAG1Ҵ::pp|2&Yd\ 2r|o.02"@m`Eid?@Tuq60(qKRo\_yM^qȶ^/<`ԹHPkQCf5ksLHsWɜ%s22[ F?0z "MRHv0HT:mI '/ [4?_^@ ؒBlrƥ< kGET]@4*.# j HlW뼋k$HуrWZs"R ML(@4#$/<Dj޴i` l%}ŵ>U9iǰ;)c2nX2 "۝\@2qG~+ڪC@ mr(]!]eSpY2ANnsOPɼ MU=F!;6TUl 2l *&9֬6ք`"Kn@N|)h`\X-Pq3sG6怚WDxi#eE `Ǣ) -aefcᕏ `:k-4Xb0Y`Ƀ<x1%;qdid0#܏07w /Ky 4e,.4w =6a>`2H{ɓ/;OPzY@d" پ"<`[AB@4rTy ѲTzZx81`yc`vH`KӭO5&w TNmE_&Е0Zi>dž!$l_ ke@q›wX[TC1Imo Udn#C܍| Ȝஞ@[`ؔFT Ћf@Û MFT;r(rXSѬh  YH;r@y7W:DNv逴 WV?iؐEB:G` U5p@i^ bI꫍@6>t4#STgU$j5_] u X0$3Lm E&ap$js0u}Y⅀tJ{ ojj$rE "P0 eVkmNdO*$7;~HJv&iE_4yǷ*ÛSe9Mcy?V0't6w[7V8X0[|f`eR350IZ = " ؜y"|-RE~]YmۏQZe7Q`(k!]%mK+7^|pUHc;:ʭl+&ttҥ #k?r|endstream endobj 267 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 1863 >> stream x흋r EKOÝN :L=z^ fT̮KAd1:GWt`v.p> NpkRI T[gKZ -KѼI<9,g=6'^obO!kbc 5@ĠUx3ure0|4}u@ O#a,AVb F9f6 t i%|0@9 m\-,VoR]:6< הTl0 ;1ٵXbZw 4!TkW6w.OH;?t kp !h9`5`ic  {{;PgDFNg\>q: :3;Ix(rumjtp8_V6SeE{M5 jx$d@+Vײw O=p߯R㑁K5 >CYcƶ$7$b*x`V5VA}cx0p#Ywq@AD(p|H *H1c  ^ap2˖&^S[ψ{brXbn>@jPvU{P}7@o@Ojꅬ.k5%ƥ <ʼOdȑG lQ 12Ӝpo'> @ `UVVCvw<26q='&w% mY@1mD~tUS{ @KPH`4ScJ¨@6w!xnqo^*:@̖կZqv֬rwAZ+j.R/SD X^5.땠)Sޟ0HyƓJf7 ;L-[PC1 mGX>+l Nrm !oW@ݴxGeC9:bT) @Vv`Np UqRvԧ?O-@MJ+HY9}@Mb D5V(D/G* )t@]P%_ *?8L13 R?iUs:`X3ÙUG7HrTjl;iFeX,ԡyK|#82IJt.6kau!:V˝MYýhn[-FRp(7 Y6nzjD^;Ҷ_ IQ`(5T\s_?d0+/a@ q'1= Eഔq\6"^^jתH`T7A m|nuH @qJ=غp ˼6hCݞ-@n%(͌p# ZC@ zґfrAS+ ;w_]3jdejXr/Gmb% Z-B]{X|W yZkufxb j-A܇`dJTTw`hգ*DO3Ƚ5@; }.ʆ,ե7[@i02V| ߊp:V+wcR J{9// +A1 2|jendstream endobj 268 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 1671 >> stream xz ;󶛭(oϏ>u^H;ٮw 4.G:* \JtƃXqAj60}E {>LyCV 7ը%9p E@?ԮuJЏ>\>!@@:)HaIse $sk?2uFIiD5@5@eQ%xR7<` TcE9 o:٨U@NrߛK.^s,H /< N.ldK`-joeJb٤*$c?}2u5TEI|znT2׀3ePl,sFp0-lIlmYZpn<%8jj >SvV ].j%})0tB q b~2\Udx p=Qn~8ع/rKM uXX~…}]rDEYsxjpI817&&v% e eUn tXrU0 v*-}@euo|i kD`%X @M6 4@l6ȸ=aHFc*PojO)AǯYJc/±@ǫHNVsM`ɾdCMbF En` "[f +J#%@^o>Wy =*\|dendstream endobj 269 0 obj << /Filter /FlateDecode /Length 2644 >> stream xZKovK2HNr9~?YYB#Z"ÆKd]E?! 9_C.n8U=5^$]ϯf>ϗ䱭Ei5UVRV+]o.U9pG>Η3ȚYn**вUug˪.כٷgg՗L+pumk Ir.xDmk%܃5]땖VPWͪFWQ6\іVXWkuM ~3;]߀/`*x?7PqeFJֺ }NɧO&=&_BU?Լ<8vj޽b|\ázڦ#z_⣏~q????N/Ho˲ :s=''ɧ2P, aΆ2EL3V ir;@p eBg eO"8)TP) JuTH@*9\(A*Qki qh5&} wr‚qsj1 G@,#eM ?ob)bɭGte%p8ݩۜ;q6-1蠤$eE+MHu3JjH%@Y}H%@{ġRgRg &R$u)=4 P-g@]iS>~tk{+AE+ 8Gp.5ݪ}7|;\ Ӈse0=<' ^#C;ss "]Jց*@uġBJud!j{M9 Ef8Wghe𝠰{rMyNѤa1 I먚`1/cRDP f:Ɯ+7O gW&;gh,,5^%2!688To.^;P!jn."YgZ+0bC^;?Vg}ڜ4g |jv`5P :d5 P* aO*!rtBZؽ *Eyk8^i&D% >$ ,<fjC2`0FZ Vj30 =„8P2+FdXc(…3iB+& b.TCBCa܀EɄ%4xG!ݏuI^ΧsvB)Jup.΁mXRR+L`+uVNanzY213aEQ)0`NS!Y ǻay!^ @XXp-P}9tZ2*QBBuӎAtR/9t亠% UHjf:$pK:,4c,6.kxCDh>'͂dz4*A@I 7ףP '\s[[x[/ֳXLҭ]9:sILWoX]5M(V4F(e:AKWCnhûpb $aP8 06:<'$k4;()AzTw7R.zjpxR.?,יfrQVe~9˶i M> on8X'ޯX/2Q-ސb߀8N^H; }0C})%a1hbC㝲_!{Ew T茖t_9 s.ܖ6/n/~On26yHX =cW O+,.F'ځ2 Vyi=@|9oa6% _Sx!t(g>ړb][*i@3yRTG|ѷ0GGYyZ. H:2*Wfhya|h#h#)u&Obɔ!9\儗Gw"鱰O 2psjc=+ƚtu|N'\'&NWȴ;xR;/|+7AKHsOdwE.9F(KT{TݏQO3I tGcDuQbAz7;Ѧw5vRLrqw][d> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 6306 >> stream x]$[?(((>uED\POEe5 @܇;]ޚ9f~7hjz{^߭GWavիۍ-knܸmnqaofl8G݈+NnlO>==N-^08>p8lۙ8l_@fj[X&|O r4FዧAah =RCۧ6} ÆLšDL@=O:}]+WpD@P(FE٢C}pMχɐy^ɟEw_t\< ?!CS(W蕃ZpID@`/QA_hۧA ' l C,? QVYk*B& 'SuPāE?>ʁ% !|@ q^"p`^@)}DaL'dЗGP 8^ N\o p{PiAV]bO3ͪ:AѬ "pEJ\a'jҮgo eh=YO~nBb)&} $3Xbeh?BQ<(5YS`%zOZxEA@~HAS Φȸ]Ed@gcZh""c:@C6Ъ.)iOK &܈xJ][i;Aш1&@ZKTv`w}>fQsHg>;0B)\p x(: ed#"N44x#p؋? ~<Ts`qй_~Q 2y)1)kwڜйE"kqb\ 8Rn'7M$7ld( 9XfݱA+Mо+:I P&2b(sp,,t RL1A" Ϝ9C6!<+%Am " ztGP#;@B'Zl5A>k=]|>& ާzA@+:-Y+\ N E1"+-JVA|#|dOy,t6| @Pr;K??q`in@3m@B36|/h% / p)'Me%Ѡ[/"TߤX(nCBS|UX_+0GBG}`9*uX/K)Y'tBL&VkQB='>"W_]/%!lC=DOPҴ7xctѷL,%V wf$W(A{뭷 O-"R,FeÄTбaXA{-8w}WOSt%^(W3-BZ/?}֐z.?Y!d:n?>z`bn C_ k);>#M^Q;T9Pе+- ;VED9;>{^!PP:v4YgM`ߗǗzY&t/h%Ѕ (JALp#=У,a?kċ? ׷>,@+%N:8UGgwcrע_P@@_|_lIr E+fHBz]ГveD@`/,|,?ݦ_&UY 3JngLLm5cI p #a d'kk#3W 6=.v1P: ~9y`LV; KH~!1sJ')LSMdkq^4Y/_ZA0EM3W7* ǯ(ۯodw? 3&ZdnKE#BnoFyX,뺚 "b? ?$)T@?`5F$$:U6p$ p].]H+w_Faٟo~+ ] ݱn_~& hŏ-@Ŏ9O?E]2,Z(.ucr""}4éu` , 6{gM0px%98Le@MP)TlXƤ/ixǢ OmJ[?1aM@)dKbKOM@q{wY \,F{ |[n! D+b!4+վ2ov 0pD4J0}v!EB86o~fPn^ v1Qؗ@ր!d$"HHc#Nn/^/V6wD,dKҠaFhH(qg`AB$c$VDK/4 zF@՝D jɑ ay#NRMPFАE(p?/e_AL{#?#ΰůS$'-F1NAM N&-,i@3'og |!m P jGZΞl>v CSkԂ=&cDH@;<,lհXjULJ_g|쾦ڹ% @`gb7j(c$Ad4菒 ܹs1r#`-DNP#JWW#⊾rBet3g$q!9-^h Nd1 2n$ G~}. A0:P??7yj~\@pTeW>?;u"X~#*jCa6(lf/K@GSrg* z82'qjBD5A3^s.& bMY!!#hߢ z?N=ZwmY*.8Pz"Izm~(Bjb:7wuW=%`@im*[NtϏ?gfn^sj.yf;k! ,G?@)QrDlbymDAy*$:싺S$ j1o]'bQdXi!:vk]C%F?mF&\aE'fk::LkAwKd4L.h@* UBӉThG-^p-}뭷 f]YYѧvS=%[Ѭ1 _;pͧs& FCX_$gHkzUzޕIi(x"?/|GWAأ/EHt0:UPqM9of-vٞv̦}lqf`Ş0.d9g?LH (uF"%aG@`OxY-EP'ANGpM@5^^a#XMi Pu(IVdeW寢[$ #&%s=kZbo>$w&=8| @?RF璡Og}gśA祸~E6E܋`k/qv@<@S&*J2sS{M.=CQeE\ qpa'u:BO"m%b`wYoPpu[ɿh>Ҁ-ր3'mPG61,>^o޶]h]&`z 9SPǥeB(ƝVВ%LC$`."w0  rR~0:`MSkяУ&endstream endobj 271 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /DCTDecode /Height 128 /Subtype /Image /Width 128 /Length 1754 >> stream AdobedC    %,'..+'+*17F;14B4*+=S>BHJNON/;V\UL[FMNKC $$K2+2KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?SJ@<ѵT6dwIceq[M8P!]]W`H4łk?j1Y~j[ +Wba]N6:&RWvjheJ7P>> stream AdobedC    %,'..+'+*17F;14B4*+=S>BHJNON/;V\UL[FMNKC $$K2+2KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?WpKW%~rMojW۶MgV>aTeN?P[mOi+W[bQ@sRUr m'Xځ4jqrk]vsXPx Q5fO'<1b⣖.*^* d̘r Ѹj9bk:E,kVhMV0sҀ*EMkX8A=+Z1@Zt{q[EdڨP*h Z͹}٢Yޡ@.bߚ˸'tk9̎hALMwm`ƛn_ "$sQ2֋[J>7TmW\Ք"*hdGQDԪ jP[ȭ1YEZ6.(hik):MqQ<)͸ҾXwsP|~8w {PA%;WW.:Uy~o&q\״nB7UU;WVl1@ 0 izT ֜G,LOMy@]6sY2jwT-Z@H{X-[v8 K#E-#^RMjZbԎn(h\`u;E2I z=&[;v9@=hs޹ =j@K?iL٪qEK,5YK&&f}uyn9@$pj ҙޠLκk5օg5!&<ܱn,i&G/QO7h]{kTnhD7-Y!&[PpVl- un_-endstream endobj 273 0 obj << /Filter /FlateDecode /Length 2134 >> stream xXMo#7 }l-"f lvlLF&9ȶ왁dyFr&+٬ڱ'|pT,^=BuW?J 'mgJtR Bkbݧ/d"h;BB;Zbxxu&o~7 Neo-THQ8h4y_hW0YUMٱ젬6S`{d6WUU@˪DtPhU/ϨLA^bSit "8AX3Ox8[2xx;a]R"6ѝT(>]ErE-U׬FV8/f56]4VD"&!9np+)1V^ aVU3`V,.!*3J7w\UMJ T>v[XnE㌈R]|ںn_W#\(1Q:| z|/r\y_kvN\SpI8ǪV5(微Z3* H2):cT9DM4R4P-N(%j:kcJƕ +NT2Y0UGVyR"ɊZ)#U7$Kod"S{;d6}owdBbp_hC,Z Gu), 8KM mC.v̤#Q LG=OƗѥnoo9 9~)uCO-# jl-BMDޕ7ED%^ǀ(cθeI% 9-+$}W2R*+`"@M9z2gb;~9_Q)qwlQ:%N^X+:G,!⦉wMo⡉&n(0+sC'UF~Hh.CFz?VIS'dRM2(#͐_s t rsflEϊ矛LGbTmcĵGME狦f%Sx'okī&Ύ'>="|S5"VƉNْH0E-] JS-h@D4v9͸n~L^Hr5%^~`i7\xC- 曽 eL=+E?t\ f0`[[1S3{Ӯtt.2&\D lڷM&~*eVFM:DM?z;zc;gmpr"S[Uتī%v3&x3nofSwϮY9ZwM<a,Ix5>fmY2Olx?|3~coݓ8dh.ATYߗ߭_-T3endstream endobj 274 0 obj << /Filter /FlateDecode /Length 44855 >> stream xˎeq%8ϯa>mTͪ/IJ)Xe#"E3=qb?͖۲ޮηç}c6~o:[[~>+?|ͨoxFo?O[ۼ{?}~ٿ|~~[ﳾvo^o~s5:WYo_?ez;őq~o}ow!\r/odO3,mߞ{^>[{}_yuc g wyo|kX_灞%+.soc~{-_vӸ2Kpuu] k_8zWx}˺ 6}a ,VQh/X66Xx_ 1 b/XX~j_-_x)jeu +X`})}b|aZy7\:W/k9`Y/0_1 /Ys-彬zny+b:K疯;>Fx}+v]6ʋ}/v>>$/x#Kk;ʛyQA?NtŔ,cQ:L*K;%K%4YBcxqB/_!HVd9[o"nE"gOO$H!2` Ss9_ȡy@6^\!߇#)8j 5ο\![stP`(8dB'ݎչr:!%|89\"s}Ct"r28 Ր$rS;ƿ?| S^|wγߔl\ p) MN;GF߂*P7X\ H=A:~O,37v~kE#-Eo=/,~OMa/<{A  ^Woky;\X.s)WǴx|AAE=V`ہlr-g:YUaVkԝqfnrv{mTlTZq#>̆Ȝf 8_< XC`ugw\l;^e}@zcꓨJz\'gC_r.>l?o\W SX6y'4c@.A&~[*뤜m HOv pjsmd*gശxw&HWWz*ɇZڂ,.zYV6. -G-K`9mM`vSx,,NA΋Qd+Jg~߁ԕ@&Fcl [\gK _v.B< @};qvGVT/ <s aӺ)ȴ1WIYI8);0~o. \%(  %[;A^ͺ -o/0 kFƣo{00~!Qf3 xh:@t]d$!̧" HOs=ϦITƹ殡~@}@lJv^ԠCilY5ȴN5-?D2? ]A%N&}`={+2@ R`7=*@1ܺn,TH!u[ |%o >tlrmS(bSk4O !.M BLnb @=3?}s_T? ڑOp_o$B8%NŢA^~C)eEɢC(EP2)=K'fq-8Ad0GN=@hhGpx߬u%.;77kfC.9tMt]R_,rm"x;nKrkRMoQ/ zyqjS1hS=Gk"@$dqJQƃ$aD~0C~9U5i?"LMhݺ>' R\;V"Y4Ε#y|?UH$#f+[ V0=y+o5S|%0VdkF$n@D"ȋm56G|,+fݾF}h"697i)=7_#|I,Aj<Gd&:gY#},#2i/%|;IbG2Kc ef |(w#hh%- ?wc`1@+Jα=KMY8ki5Fl A Yѫqd=[<˂Ĉ^-#KϣW7?w X8C*#vHrHC6I՗#ɣ&g1 _ Ş-7h-4-<0oi XR5bL}yD &gD ,ͺZY]yeWՑ" DDAo LiDz0$p%(c(9x5<5ǣ^rIЏ;(c6ET䬬? ?hHtOUI2QSk|Iv7d7څ}[epƻ\#=5ܽ,DrdY^t|Ǜ$ {9pf=H|>aS گMtIB"60\>IUrA?fIeO7Mw,>xHU]p+p O}S~8scbI6˝P%d_ld&rr p &n'pxOœ{$]ׇwJ^wamah}vfhpQqK˄-NlY  ԙ>#eX9\oz MʱwV!M{zg0BPIfcԭ[,یm[ϥsz۝nX86maڻeҧc5m8ui , ?Lr|\'m9xroqXieg@`oIOjD(zxw=4yL]Y40 ~*Iѿ HsNyK(6߻iBr\ϼi˥ nT`auZAKNtRְBR C^gЭia( iq{,t!J<,CǛoۯ_'כ$lмa:\6"_ĢIrH.vCx0qC=1Gtd 3 =WsSv @m'fh9. XR\cFY/Hj9n`aAx$7<9IL53R:THtYiK5GdYrHؽj ִٍ|PIJ@@@Qwрqm`=5l 2wV04APa|$=2fDQ M@v0xDxwd<")pAx K YZ똑NiyMNHg6s xX9|Qz奧@J#jBtMְt=5iځV4xyddSp# ӫ@F \a>rz/JdڛSE #r C}̆L?a.'R)"u(7 zirB aʓK4DOϵ OC?fLbhVdIZЈ մN4ub;hK -Y9:mrL[ ͬiK -(n%!-@$qh Ԧ2mi4&Z09K,$%,(]ʒR@$Ũvb%!, !e,3?@₠Nĥw@$Eb#jUuN\B ;q %$.ڈ#N\2RY@m vF\S܁hf>g#.Q(qAȍa%.I\2%#J\$>2qɈ(qIȰɣ%F\B%.q5$%#lҐ!.*dDKFbEm@!.ۙ$%#J\2QF lqɈqI%L\2%!$.Q"t%.PHqTiKBH\2#UqI HBT\qɈ(qIK%w m؏,PBw,!eI)KFdD)KٖLʒ,a˧@HYBY6ʒERe!*,@FnUχD˚{[i?<؇NC7D>@4C{734-{ܻA?s?`ܮgW\ =?_ohߴX0);e" ; J䞕?D- yRvPR2ʜǧjST>t?tigg9ް6XlKB"yM!n_=ޟ?~}?h/~(?ꏿ0)j-ϔLC h/ŃCv6."{xxa-dKU>2 5~`.kXZ;U`_n[kZWp pZwk[("c|üBUZF;`![ۛQ[mz+;RXkF(,$.qNCSWdݷ Bּ--z3YMح "+.wJL{^UD+wKݬOwS3fekXk,Z4 - i1 fR46er#V*/Έ€@tYsċ] I=IH:V@P=Gxyq4MY7D'l : Ĉ#Ntd :-C3 Ĉ4ʑ On$bekDGLt>':AJAyCBCHN bDg%kҘ :qĈN@!ܘ9`,@$GK&9`$'9ҨeceArqn@5#GG#9R12ɑX+i2 H!ArPe'NHNfq#;fArqHq't  HN Frf32 ̙#Nr!A; 9 H#Nrr$SAsqYɝ_8D\bDU*ydI<~0n-ќ@84'9HG4'9DXGb4Ǒ *ќ@{9Lx98 hN ntxLt1Ht_bTDNf?: Ik7Z7؍FnqI:' HG&ds@۰4q >ߨxIIP#"1;BzLZO->gԾ$L>E-IS K񩳬|2>eStqj"BmeYu.E;8JO~?Jl1敓2:%ESD򧚖O?!Om)ɟ"?#҇|ҧJ]}.׵ 3YE5c%5[)ZD"F+k}W/-l_ d / -Y*Cbo}?lz}R} o"Ti(O*J|߱"'"H9H؊||"v8 V@a/ZpQM 1e_#`1H]a!y%p /"c`DK^ʡ(ebKV 1do ":+J틾p75  !H@B+ڣBe; S%=_)/¡ /sX]\g>WX/~l :H \^JQ ,T &ey>AJae\?g t,PyGɗ[CX &Y@Whoz!8*Z[S\4N^2֪@i߰*ȀjDjQ VI;kCu9]fs6Hl@=5: Ok$9V_J *[bņ W!ȝ,hg5O8 PAt޳FZ ڿ_]<GLMoPCP5Z$2fa))H2}t)L;_˭oRYZS˽uכ<D%ٴw6N5;K~`^]LJamRsh݈"mV΅lxyKn .ރ9A|y"V~ Dq%҈КdT%!\J{Vo¢ue8lxҵA5sĠ)¶~n<f⤖ȆECwuV# ĠI]پU6M&ӺI"'Lm\3$s_)b3tIgk Й `maŘ)Rihzof&_\%6o|HuUذMb4` gv=7lkMa?k,OCY ~TSԜ!]{3=n~v1,r-]_ʗ}bAy23Ṿ l =@2aO*pemѕ"H-)F+:49.34+H"Ҧ\І?pca"6mlj:Ē^ x }/ 26GhԂ' sba޺}&OpdVU)*AT3mJ5m;Qڱp8Yv}ȹ3!Ag/[hkNPmTa =3Z87L@ڜ hyy/ԛM #w/[nsU{qxl6"4<-an(0!ٸ^7[ˉfGk^gntzұU߭j:!b? _=\tl mfkMmKA$E'.EL9kiĿfm6 :ۢv^i #ݴT-pݢG5/ʂK+^ZKYgEz;`ϝX_P(iCBAJf\ efau }o0V-ger2ѭQ3rvmKd BF]s#nY-1Lt ynA8 TV~om94#HHAvL G+AwB~P"oIWXM;*W!m ۺ(ūsƀK[$urZ3D`NcnԉZ9U40-OU]<ФD \݋#s 1ӭgDm/xf|p|f8 dq`@,bPA982v F,'2g*??`cC`@d3PΒ ""gI[=,TK!$@Bپ,ߘj|wEVG%s/&3S}/xq˞NMkލ&VԽfSUgo?&Ksm~^t'M^gdRf xPmaE;ѯE+j:D 6ǩ Uϯk;ӘrD6O4;E'cH3ȱ#&έ˜L5c"C80}B{%#Ô~f ݅G†Ci9O&׾8W=`Qy2z(:V}^/oUO~]2\O_'`enm#.^`ձN7 (\3TO!=+K}E+GF#̒MgF(LąU&Fu&ab4LQaThbK5GAC5Yʰ?J@,{a`ӢiF5wa@>di&IQ`-\1F$Bޣ) G@bzd\S;\MuAGV4|³dtGxH94.,uRx#[xp,uƾx[þƲDGmTEUXiui8kvl$c 9^uBID\H`1Ax,n$GBBӌ3)h H+*2z0鄅p%#b7]MR@ϒ )AFkBmv[ӳP5͝Ja=kf4zf4c3~[jmlA:E]eg.MjcDZ5ЃWe.贻d~v!7r@LRpB=KDƣ| dڃ &L$04x$g$Io(@$T E""3EjOp,M= p#r "}>.,)(r㺲u-J^#%Fްq hb=߃X#Ep9LvENFvӧZb7Ֆc8ipº D^VoR:O.}ӇM7tp!^7QHMB .Nō|ς ve3GU::gб{rP58ݴ]Uq=7:r ܕo^M2\^PʷjwΓs֞Zf~|3~|O~HM{r?RsfSxi1ޏF@] r="ڒ6x ц#D4E64?TdP sYmt5!Gmnfp&Aʇj=nM ÞYNIVq-HIVf^.*m &emब[kmnК+kSG%emswLƎBYCcGi@~(k9IY;{KIFI4Y!9.-CD3&Y!qhVՠ(jTD4t"*FqFQ\E3~/~ƅ5HQK~RW &,YZ ȒQzi/Ţ52f2hO2u^B [4BFXc ,QOuiVf[.eI"n- RE"IdDB"$K$,Q՞KdYO*%2m%KSBc`R.-N}.$0]!TkHEbTm'M!Q nǐ$!%ǗD,0 mK<$R$su0-"߻eIxߚ+\!l^Au`X0mkR \+Y,5.J#+9ښ+Fq3wE]CS%# }5,_+/ʤl].r4]E1;j2GX.Jh$foqqjf( "@Jeo! 0 Bsͱwcp?cj9Qa9\@VMq(H*(b&gr  t)PBi6sF^Zan:FbZ؛QQ-D' "T {u/Zok"']ϻϑo~qZ@l=E׭o 5 mQ"[sG{eɤK"N&= d{0-Kp1MwW0ŝ1G&A%o*ck ML@OyQI #UR#L(#@Xe>% ]9-]PBlZMćfV7鈫9)2B˴?Z]Nhvɮe6=&-ӞEhm I-i|5=eu i(Y2MZ&W{2; zC>b^RI˔[2ICcИ$m#,evMMRIzg-jʕb4cB4˨] q?I~5pY3MF1<L~NEdC4/yR#&u/gbkތuuyug$xBi*xNZgBXDa I٦LafU3̧ۋ@ uL`i!%ʊp$d#8f]K9BR>H{Ц!LB/V*eu&yRkTcţ e vtI(ZD O(D`hZ Q0燞 b.XI|ӵmxģC2bRP'-IJ.J[#Z-fr M==Yѕ˳}hE h&J*CdPRI rxO5tMZCɬ#i-D-#0JA; zg<~oEcPOHI3/MzPT{8nxƒVF]@#e REqz6&r,Z556.FCнj>66iF䳝<^yY}Ԅɍ(>9x\"j+@Cb eoUR9 W DŽ<@iZރǞHFHė_oT IӺԮuYV'R%~Ek#;lcyfp/YKb= ֢ϳR$X},[ H"'{%isRx4_>VliKB.!R§9Y1$,[0˵gF+G 'zt0 { b%6XD4+Ir aiZwyH;}" -iVG{@>S63 L^ozӔk|.h\%E?˯t"c/{.X [/K5^յ•.@NRNоQjy&F]ږg50i&TB)ݐ=XN@$)n"}R# "C[Ȫܜqd);$a`s-9yF)9zPTefHf]c[ 1ܺqPʜe^JʮE[NU:?u=CW?™l^o훉s|%j3B d:F598kWMRc4NI%P Ҕ-,2/f905oP(:%i8P$;i_$Q )LffҮ:9q*UҜ6]9/B^NWb%C\ڻ/K\혦/Dޖ&ե<S_ 7 L ԮIsщ̒Ĥc4Cbl@HLwCOAոfhZ^^hOf"l C3&+TK9ht' Ռ%RMշ<%V11>t6ވZbD۔]36x> EDfS:$!(5 骉4@$ (u@R#3f(5Hĩќᙿ ĪYi|}-I&f]|2Ԗ^@KG9;K>Gy* Wӎf!;?)Y3m*B4hD.ӑD4 c5ʰvL73 AܪheAt/11LF,sx2[ڰ~}{۳Pwg}E&2ѶL {*KA~Y "2T"pn v 9=4_YnLqW>]*5 bnw[K=w%)%w1A'"LbfU^lQ%ID\!)^k]ᴗh/Eer # A:G]H ;Ӄx/dWBdZI<.ćc̤.r @Fg¥vs@/LatrL232! %"$OX&FrLz= ?EdA|4-IlzYBkWq!$*i)Oa(di>r'Qr%”,X,ɔ?XS  >eL4)e`.{V? +!2.»xxx=4CZ@b ҟ.My< n+4h=}k efZ>Z0!^᭝ȝ_Jf-l6^pn߳hp c U=B))k{\\P.mOJ(XI: 6"U[kT_t=Յ'ZjsVĥb(wϸa'tat#5-IN;ӌ)qUňI=GcFy3Dͭ?fx֮;尭xvox؆FsǛ{ݏ73M~ g {qGy!n::p q!NQ)$>8O>+ .UZvJJrdZKƕؤSVдrRyX`./ȼ0F$E0|i:/6|1aX7O6LxǕ S45Yqd,-sRP|:,H&ُ\_P;Nxsd05Q$}c:"Ik :z:8{x6Z-0$O?_A0hdZFcۊo_o"ߛ&i15Rv&1R0HXwor@BtTx,RmF$/#UmhlFh&nS)2!怚@=g Jfb̮4Of4KBC jAq~.O`1ÑukGB&׷Á]3 S'`i%r t Xwm92DfR4K #+XH-L C19=5" vts+2r`0k ҉>&NEk\3R+!TW17~\ "s.Ἄ@H2R5l_dMz4l@ IP;$F)Bbeט*Ya MeQ+A\iu 擶 hdq҂ @_],=QOƜn!H%@Gaq`Hj5?!K-Q: JFJH |0D>H ?fzZHV:ё^K ƨ[*̤HC[ǎbEC=E@4M&03*`HV4𞑡'#ڢy:2끌Gֽej=]ٗ֔$Q\ mM=FA6 `@j#$L2N[j%S3ڒ-\F[Bڒ-M?-8I[2ҴhJ[Bڂʴ%J[ mIݸ$%#J[29q$mi4?- !m9TiK$%#J[+ӖH[нPceڂ); msna-mɈҖdD~blhKBH[2n ߲0 $-8vI%-- P"k'5ӖÌdN CXt>M5ڒ- !mHز?Ж@H[2%#PF[2”(mIi .i-ڒҖP+%dqmɈ ͉"dфp L\hq=KBH\2B}R[dsM Ӑb#.Q⒑p!$.*qɈdD |F$)Tqң onf3'GH\& ,FwLۖd%!$/QL^bGH^2UsfTE(y #/7tuGH^@FʃaWw0p\eCy ~ixۿ~BvO8_>]o=<߉4gn Y ؇HBþ+CWCXhr }y/C?Gu(ɺ#ox c𳯟~ǻJEs~ǟUhgػ2>[UY|YEA9C~o__o&/_0VSetky~j ?) x}*+S]gOy| 6*SDC"ˍѾg}"?\~Xc{tM7rM|_~o_? vo׷>r}s ͯ/+Kl *A! i :&ʇx@K:5i|/*ElMmtmJ4ٽ^jIfxc6-|OP M?W*]@pdjO>XraJ4EڅS\74[!caiP1Q|5÷awdq}Rh,Oأo3[o aE쥃?hag4ߊU_TQG%d{$# g€y]oPr(?aDG2⋦!7ikK^2(+yyϭh~gy%in;75ü)' {\2i0wV@Iun8 Omw}苦 J6=l&{\ꐼI\,*imI t|Ϲ.vģ~%bPiӁvɷ~d~Ծ})ָH%uo`T5fsאŸ7ai'[QGo~absdxfj!3n/Uq{jy%aC[052`@g8R9}!a:vHZ# zˍ%ݜ2m?ܗ1k|jlBuy Cl*0ZҼߖlXyai6QK8e y 8CVjFN-ip1b5s< w NDIk*"6٫ʙbVlwp<{ QTCly&3mgYK:hk[vJKHLVM5Ƹ6?/ kAęeXJvjě?4mAN9qdDe0R#:O2-~U"n4T,oQuvˀobȾlL#V0,҂u`@6fdi,hո,82َ!2+刅a2ć?RC $@3+刏I ћ@,mȑ0 [#,Fq`X%L y\!̂!Rw'&>DMKƴf1ߑ0 LZhX2$N~ 6|]HMww1#e@7eH5XK =ȐʪxQtii6&HF,LS9M1d8F`0#$g0k &!d0̖TL fR92К`wb0 ^O Faxݷ3w#`Xx'04oڅr9 .ݙ$% Z%3T i֠,KBM08I]zhI LB&so4M Fzx֞LB|!`Ҭg9 `*3$$JBp0$ddw"n1,NLBuAE]S"0$"@f/VhHe qˀg^B"H-$ m4%/ !I[[ILB#d02 ^1XϢ[˩$"8 Fb82dk;BV"O-K"Ԗ)Oɟw(}*_!Ȭ"<(z*vתbUl8Cc-ƼrSr9kI!3 2g22ӧ ɟ0k!ǧwC_!oo!*Oe ˜L*r䊂ӎw~2VCWƐ<(9\g8nwY[HSol~wǹ4?˯czoрx= G^q':0Fm x>q›._ +hgݖ~!8ϗ|q}hr䕜 MU_8(%~!4_A. * %;EFNyKny(\_}h8^ Fai~e@e<`DKb0b^Wb~!^ܯxZK[|OR'LЅ#_p6lRֆVcjk'xhpoj6S[M6ܦx0*iFkÿtb/AzZ%M:'4& 4;Jj-y8 M'Ae@Mâ.}P1F@5BJ=M}jx"[yH́jh 6JeS*ԧ/E瀰0|a9P*t_rb;bT{N#udto<mI5 _q\yCCȃJ42ڰ\󼈆m#%p.C&*jb-M "ZD[߼X/d([Z$H}Wib59aဪiyN_y}âIj~}Wdi)2u@ClaM- dML-\%q8oaȫ k> .[i:HyOӤxJ]pIFY9UlIl5k9*)>zŽ83V|?`"-¡ ܾqmt5epv2e VweK0,UF%x=lVR1 GV# 8 0THưPM0Tn Y(tM:0TS=n)D t.?Zln҉pKoGbpp R,0eC'5f2\CG*3Ôu ɔ l(A4?p(ePmv[_Tnkb]¶5=Q[FSW~.t"U-پբŒ\.V*+\Cs?iRg֔CFց .u E:DGb8(^DפFND (h_#vh}(%W@UJ=毡}cU`K3JR5barQTVm|\Nlmt4m uFQgΛK>L3.RtS9PYhuzXkqbO部aK}gچ@6+XEBCc"ݧAxҵ4%C<a cF}qz`}ل ﷘yɀrdNsT:3S'Nh;vkni;m6 h x>tt} oOhheL@t@4|Ms :`%@+(8`@}q9:"qqv,ʹ߀ C].2 jyLza)?B)[Tg 2}i/jOoí3R/Nd^H$dIums"Y(&=YRdIOv$7KN$]5踱oٵ:ᭆou܏ = MDRQŒIѲ&#bطYdu*O|G/ATv)j2Y,hB{p͐JU.%s (QEǶQevT95i>lj#IpR[Gs^,ş<攌+-t2`ni~nBnqXؓtL}zyK4#jՏcg=s}۱k@>}Q[/yIBup t wB\i\\aplVNɛR&Eg.Cޤd:E_XPI͓3!_H =V /G[7'-R-Iɛ]!X2I Pt! Jbnnkv\ k1mB}mx0洘ʵnRZ5i`{!7{i W$- fEK_Z Fj7DQZnC+ie)5N^lQ!E' |Jf?.Yw :uHл->{X-rܨL<dMb(>}2>VuL vG OUmݒaƕ y+:U [ mMLו$i76sXӠXZfN2wZ3W♒Ɓ˳\-4TFy٤V}![;8*7 ,piLܷ-Gɻ T8ݵ*36qs C o{;hBSaBc%RNoyq0rR:-EZhvC:szrҔg.'#$jPeQvDD@=*e4{oLCdH$ĢUZ 9Fچ #h7)@ a݌\S]6`aۘ p\'ƺ@$( DB8rNG iWcDĸH Blύ饂Zu/ 8XAH6$5*j0Y7FH6 W.zaHK)RFN+͙xIC5 +iʘǺL ,GM۹5 P97##YO1_r32vp`kd$kЍN52v[ceck +d&"ٚ<7|Fl55ͤLTs˖37}7+@TfRSk";DuZ6$OT}ݹwKȅVIeźW%ͨ0k{4_4=j(c>`xi=e숍ufD:sqDC#%6**悴Wt V93~K5m:׺lWdV_U>]P*vy^5]B_o&~*IJB.gm皠ʇXXZzWzߎh1cm8P0ĺǠÃ"Ȅ}f1蕨v=d$)mI.z9Z`K/;}jY;/#؂|!Úۤ?K]q Cr{!er_pY춵(A۬d `\@k)xMmdE=NW&4,jVF b%K9w^r'=i4I DԠQ,^֭zy$(4+[<ǘYF:P%m>J$Bv9i 1f'w~{j>Bʀ\JPj|zXM~Zt x Gv3ףovـp1h |AN_aN`_޹p|j ӗ;spkgIħV%mUO.F+}6$oJސ W:9!_}&c|^8%>}F7L]oOÓ p낆~WςF1譱8FV$[ Fl]HV"bèA-!V9ڄ"!<.@S"v,洓D]ΫV? ä WH|ju1q uݵC  +\OQCu"(l ՉR`tlvפLCq:TJgwrC7uɅC-XѤPgх:CxpX&ѥA BCk"4T ͥi9$X fE喆W3H,g%&q}.T8C\2úRۋdr^a=CnE21'zNBU3$P$YްCyM˩bX@4w"-BhMnK!PC !P( Db$P(,Ir:Q&-7DH\9S D=Cmƒ":Dذ?"22t iLʄtIqbMW[Eأ G5e(9P\޲(HA+0%Nxa8 2̏! Ԗqj FDmvOPw̻=>lΡ]9K3 B7bz'a8% c.a߁0Klvv\n 82Z XMjf7+> *,JU) tta,HQ{3ώTժg$/bp7rmIt7j:ഗcv+dG[P-ht5V!WTSHpS_kG[AqDHOc!5Y~^Qe!DnJ>z!ɿ }s+o%{Uu}.mΜT'8;k._5,< A|RDkn xڙp;k Y$F'%6*&6)}UtH}W7r $(qW%H"v$7mmN 0s.X-w).@! K *lhsB A(dʖi!:"BLVX؄_D u6[v D [!xbC5mCmUg{DZfؾ,usMiٲ9,W?.9=wGC(W<K 7fJ8 6 Lfju%-v%ck0iiKUby`ѪׄK1>N"=uZ5\@k&HiժuQ m:#Ŭ4tꚺjݛg\Z2pWeAUp=RC9P=tM |t=-V%KAӂ/e}("x7&PϷ6cv=^ '+ n|x d-9PBFrQ*#Qfv{s[zJ]ʔ>F E٫woV\V)G}oNOplrb0iJswD=͋sRcHe-]$85҂s8«@CH3BԊ1^)  3 7m ~vJ0]57W@ IuXUrdxĊ I\[ߤ⽼qU\C^LMROkZc*LUIYB,EEaL 2\5?&H]W=R6kaʩO 'W @YFux;?u`pPF?:=M uFxL*!.뵋e6 d*A궦t~Vr4_]:wWGXpPJ)HIt^ !.k `T[@ܓ\^%G~:pw$:w;@"& FjܚRҚK?8f ӊWNYm {؃*T{~3}2+hvih "ga.zH [k#ڔQ>G)Z]-&Z"U1"*RDk%[3/4z^N4%qϋKe=Ü' 4*~zf :,]s'4ꤲ5! zos\%Ԋ:DݻP|.`NޅjuɻPܾsĽ D»w?־_Bm/m iFuw@ܗP S0Qwj̫z;[*+Js_rΕEx¥i;N f<qv?B3S7ꋢD%Iy0NDž^GňڶQhGꚭ+39`\u2\Ůӡ#J+Z+׮P*}RNylNTФKv1ۼ[,7|tI2/d17W&1lf6FYO1\#J`w 7nO8RNY Ljo'8o-E6j.ƙK{sUI#J">pˊWΗ D#.}=qb`SNVrsY\_iߢUGMxZh4:9R0EʵFwZ6rH#Sˎbոr' M1HN Uxrh`p[H.usW(]nPcDuWًH&"B@Drie|}iMB.ނe9]!0 L^[ E5z̑aZ$n5fPrD`'vD K+b&+>,d1<9l)кm aAy66m~x"Iv,D%'kBsAܗU }Aџ*LJV=y%n릙m%\#C 7EHk+$ \PWg1J*8](a [c a!;{`#MaYc-s]| QGMwr6"!ȣ=6R-M-=n"wO "H)lX$+fFKfd=oAՈTA/v8.[B$=GJ3PQJXDڗkHG!#?-7uE+R4A[m] 7D@ӗӉ.B#{dx`Iv~#&Shtޅۼ6XA ai.ОNUY|*K?˽di *-3dOһ$I[kS('IOze[)(8i=ܙ"m8*vJzpxEm\mW\Ǎq[۬J+*rϵގUJݏ2ogC#zg!P";~./.cruDL\.SV%& I۠N:OdmI& گ=DSH$ZD hIynHKnHTqQ'0 ͊;Ob?謨"^^vT8)Ҿ5s"R)HgD%DFX9QHtQv5bK֯ s$`';ZAgSY9M $38u L^ ^ ^qRC uem'*9{Dhg ZAg|Z %xj:u"PI1 oٻx ҷ( S2 :GPzQEPOQ!…OXX,I3DeݤyoH8DuTL9uj}^*}Ɇ{.{sR̡Yt+{gAe+1.V߉&z*VP.VzE s9]P3SE%Y7#ȐAeD}~)f^0#/ty5)enyLQMka?w y!3+f+oa`Q4?`G#-,6\Kb^p)aƥXs:q\UK+蛸d'.%`wd֕b-{DYíAIb `^Y:@3tRLaC:ɣe$%Us%ы6I"W,0Ð 3H v\VstfBqm0WBcdxtAFuǓݞERO4dJVmHe> \uaW<@#"3PMVB%QL/ܢ5"(o KʦF8Ƅ}Ewv\nQz!?[w#U^0`WE @d qEޛe Uu-\/jkAtѿf b=m} AGiQ? *H BrV=.vOyD.GiXӽy@Z@9I'4 ȷ<1&ƙ&UAX9`ƀm[KC`ǜ -21WiT(es"30WO{+:sAh^l%3撙Hf*쒙d쒙d.^,k-_)RDk"ZSD "Z|"ZY$5!E4nI.ȄO"CrM3B]Vcfufռw@EE$Ph c D*^T!Pzs_D][>L"Ÿ~ImW [ u*DEUn"g2lP?M Y*o?bC)j~!>g$X.jL-(ZԚpHb  gR3Om ҉ I=3?[;z.kH8g )y& HUK߆*+p@n˲Hbm>GYxˮpPb>]g/B m v!Ȭ݂we9 N)mf15໘~!Nr/`"bpYm]m8,z=+Y}.f6|lxݺヒ+<4JvK*\רyHU]N ~l ZةS|nU Bn\Y~ MBu\rAQedᛇgˋƒ+kGjʡVr=x+!!*ݪ}~xeQsl^j ]]3Lُ:SVWc(h|+ᡸ̊ΊQV.*oc?0Z(6Hx7$m( .7׬NX`B{Ao_^/ mXyOc][p b2[F )|s! bFۺ!,,:x-07-/:titەt/d29,,qkc)|i7+6+Hӳ䜒gQ#1R!YC :Y!ݸ8#yvQZDŽyBO­PC?i9S^t_"2'wC { 8Žn|D:{Hحn";pݬXDbcs"L d jXN,Ht#Fx_ӹEBGְ&}'d[`"."=9G.rz0䂉,Ō-"SFp y)+ OiOil U79XHDq zlUA^鶝STj?!="H{Cٽ(Rٖ(R.ĢHD">_DDBa H>JWQDnm;( H&FlE KXSekխ~Y\dz,ءfLhC'YE<:$%"QDQYtԮU (Y땏"zqp&V };e.75䓄j_9b%?& Yz c'z )ciܛ^οދaO߼~Χ/_?K)z_ \ץ]pI Zw1Mtקs|GC֑Kxk"=y8ku/+XΤ xJе[}>9ZJLzܒǬ}صbruH@.v'cn¾ FQ_kt.yz4x{;"zCy G&UysCE'.#^.x/ZYk*y Ӈ¿y-=o֮.V]cЏvV ٮ #Zlϝmy-/5!ZRm.:,}S)7J}:ؿɆ%gy'=t V7N s~(6k VRb_W pȻ~s\uKZmgj57[e}tsgپuy6ꛞ4~exa &9SϟݭW"/u, ֝o ^ 毥n[\x4c3*@-JM6N~D&F';^®cF.qȊ[6ͭt1>䯽k5,e/NqlBU7opְօAJɡ6RGƐ GLˢq-ZXXk`S~Y :ڗe# ϒ[ǿ먧@CmΠ1:>~l}N~.>_/'Oޅ*9ПD~X‿ )>_\Wˇo ]_<t_?=~&x~Là?=|"y8}~<>xoۇgx|x?a"Ck{8{ y}bcQoii&>{x7O_>9}^ޟk8X|=o.i~W<٨ e?aO/gg_6l?29D8oXk=k+g|#m~GқS{ g|ևt5`̆ɂeAލDJbE1<`X:v58c1"*}:_{d?iGΫщװ˞O@=.Ip h!eEcϰR7ÇorG~@4D,@^&z%y ӬPloܑa<^IMNx1g[[ &dT?KсҏR[HCOkQ'-ǜQ*w9G;CԻ!q C9nQB&U.tN[=M+*?rP?%͝!]Q'y610ӎOk:Qt tM1񃞅 ~¡R2& ^1I~^/4f-zx3~WAqoOB.jyүAʓ-Yr}^gنrM%OB,dxu%OSSo/ 0}_ܺH8s'M6&?[^0r|vLf_m]{f5U"W|[7rrOnkwŊ[ ݻozݎca?v=gWyE-mob$n&ԮYj<>a`12YT/HRE2>%!> 4:P5f~Im̂J ^C~pX $l2,2b`ˑmj%p߽7?KsV+?:lhi}̍I!8c&I&W8,~,aoܼ<Ֆ/> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 282 >> stream x10ѭ ŹYendstream endobj 276 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 2984 >> stream xݝ-Ho*(P8[áp8-C֭[íZC;ɛd8ϩyyut:/GǷOcv1]D=ʫK|]yo B<~-+׃g}rPIZslhg)<4X)1O: Vq)3\T^H?[ue縷7?~R-&Aҧ̰_Y^SVcZ?ſL@݌ƃ>Ԁ}<Հ|O_V1J J:,H l.A?:9`g};[\f6cW7%O)M^qS,2GЏ~kFMIH \_Bd}i-]O~d`ܧ$G}o%X|7o==k0ҋH4zbRJǪR7[E`b( [GɺYM4I 0Z`};| aߗ[(*iI:RS"a֟|͢$YA\'O5g;"$jlk}.h+3|J4Ur\f?1kd}_dd=D~J7:LsOgcZ_{Zdtr[ۂ0Ttdt tpjH@B! jC߀-Q' `ź >myYF3D'W/klvgvOc@Bگ+w#.QX Ob h%aX$U /:Bܳ <9D%P7rɇ :&2Q$>x=B533}܃jng4ٝ BT9^)&;2!u&h<<`@r%wg.;aN4 ["]E3U{P> wg)YF} @-iڪ`f,8KBk>1x7`ǀp cnƼxώc:qU'$sr &G<4O郌S}XwpjI<ס:c\`pu~|"F7e 6` <τi/Fw(v.N!i=d)\^a8EuȪI%wMx[nkLVu0-x0Ѝ2 k}kƧJqj"Q Ǡocp\(t*4vo m JZ6h_zmH(dn:ǁ\i~'o Os Z-#n`hy+Z(#77ņ<a\~0XuDD*XiUw\-x j1 ~C pMkӼ^^}BAhr"$@ ;8 ໞ0p j.D,4!g] ~]> O: ]]p?&b>Jner{."#_ $ =i^2VqG\s0d(ol.;ln!!5ieZ_(G@Ui3Mş:ⱚ `:қTpVz]Ɲ0U(l7O=g*b@1(mbw|I*-N aźSPM!T!)C{0M'1P辮O]C޸.gpC@.QNJe@~  Usߦ钏`P5yu;wo[1vGi=F MclBFzy>$Kgwp̤̈В(A@~NJnS%𺏴ܪ0SW7OAyf!j .势6l:k= o5 o 2bv@u$CxLsk‘J_Wn' NK @0]j qi]XoU_l3"g@ttW`& D\1%\_Cmz,U k%+n7h.WXMV2@D*gݡ=b@tZ{ӻ2c}?f܏00u }@d~s#ׇww?z*y_0/qOPe+Dg] ,f01/endstream endobj 277 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /DCTDecode /Height 128 /Subtype /Image /Width 128 /Length 2497 >> stream AdobedC    %,'..+'+*17F;14B4*+=S>BHJNON/;V\UL[FMNKC $$K2+2KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?ZsUZn+ < -GҵbU-c[.Pkn]51oSm?}j~[f1*.n;#|Y^OO$G*+Zș1ւ\Uu8HfxoR7xrq_ pXjf43 zq= b<J $ՙNѰUY56w@s*IRKW/mO#'sL(WZVȪP-i/J꬏ΫHѴNBTSE]b+3S%|8T~7j:^O5|-y;z8h~Jl)h8l܊eں7 ZVӪM_ӯy {~Z3 G5zdvNkIXЍZTZRb4t(qWJɄr8"2:_q.L|z8 7k\i&$I~P;&!Ba\l ;W4r:zVµl:WuYjZZ,% ר_?_ MR?_6ufݞMQ'5zrk=_Oc[V`E& ^泫 VrI1<;V{EJԂަ"N]?{9VARh>#êJ/ Q;TX J-~q\mz͇Woߒ3qɦ3w[8V*YI⫥)h#0xikWyhꤲW\Ņ*]kF_oZ ٟ+~յQl}S".=.R'ڲ8&c:L>I֧|V{> *Kt]ѷǽ\zZ +3lG.jḾBMqTV<ꑱ&SW7Lò+2Vxs`i~yU[+tЯ+U9ZLJS_Usӈb!IcZm0Zċk,xHs`$4'.>wo5|`݌L9ΑYx^Zc~+fX5i7JMnHK|pdZj??ʼO6dı'Rn~Tf5 *4!K?#wI7ݕe5NSV&j+WvF-!V'?BFYQl0 s^ncGST5SL|p+^Mt׀:5ޯ\tp?bSjCW.G&l}=Q<-h?JɍjQR:U6\<TlepMgNqԿ+c'_sӓJѿ̧3U)Zq;DH k @VBX2Msשps\ɧ6f0'V zݻ(P~ -$K 5z5#s{tZCWYўj-Uhmą\d 8Vn#*Aըv1Vr2nYW/֯]?Zȹ~,TWCJ)ݕ'z3Z+׭N'J|c˭N6$W8M]U_xb`(2%iip>,͗Tu[jھc<^Vû6QSڪխ*^~N{'ZxXha)Oɪ/ɪ^8AN6DrkVrEf[[v++#oN$qW)Ʊ&M KP{z֙h7_ۊ1|U࿤fIֲ.d۹:Mĝk0>KTezgr5zPӉendstream endobj 278 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /DCTDecode /Height 128 /Subtype /Image /Width 128 /Length 2464 >> stream AdobedC    %,'..+'+*17F;14B4*+=S>BHJNON/;V\UL[FMNKC $$K2+2KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?u/a?P=jLy5WNLp|a.l]G:ȭK${Gaaju|WU1^z-SWFzƷNsXwkNb}ӲlYl"*+::\cv 5c61[qǯOroUl?Or֫"T%1V"g~Sg喍%VkJTRwG}9\ (;Ss֪9ÁE9=~3 0ʋi%[dijʈ-U{jrs^Z>B4 dc]d\Ml ^QgaBi0j0W]]6gus\S+[q֣s:Tc\TG"Y]r:ᾞdy%'2)yoxgNuZ˸J2|ʘU9Eh̵JEB )3`S[;kmH5F~j·OC.zdֶ'^pkХ#ԣ#erV5Sw=jN5jlY, AeѼ:[ Ka(u܂0k~Ett+S>I41)+>QoEw5<]k,*=~?PxLLtOON9"aA_A Ꞑ60ai+6|sz֦Cu¿ ``:  }EE"=Ү]7exꏌͲm(Iu3=-83O%kF_+/<Od;jtd}k3W-(QW' (_O Oњr":j~mX2WFtU9 KQ,9Udڷ^Uw:^/Ҵͷ9mGXЭ 8.1JcJ9KF;0ToY3&Jg>皬ǚacZ*eڗս:IIjnĻN} Xl$ qLg|NnQW=9&Tlo ғvoV1U.r9__[ $,C{V*V?zbb5͇7~G]m^6_Yk|v5j \Fhھ^JN}M \+N)KҭyZ6}j9͹lGWT S)TyFJ-DCUc*ݲ=*%*gB t&f7X8WT5j8]Ryw',_B79H ~kD %X92օU rVDmSW4qGniVzv6d=4{uY^aiJ|(^M?S8Dҳ %zTcy/U{雫ÊuwSȰsO2?ȳlk[h+^2ќW浏1-$ّq|Ʈ7ZϜnA_r?#mNV5 5EqZ#U+UڦHľR&zq9er2P ݚC|Hlkд>fS\e7R B&I]endstream endobj 279 0 obj << /Filter /FlateDecode /Length 1815 >> stream xZKoG znQmZVI%,ɑ촹䷗3ZR+*c!y3~.?g d8] tpDp<oʡ׍S!§owb#(!Ty-LTi "TzjtT쩮&M :[#etU|#I{U- , M4N:k],#e"Hycdtn Hшme5'ŀŬHl̢@N_ 1:AM6ƿxxD@ȧZ'!Iw-!N@#ȻD#Zm;7Zĭ+ Z"!ZF`uB:;Z1`&ٳf7".AoJ#Tg/SJ]AC6*_5& 6%HtQ8 xf30`Vg Mlnm*dR@|un,T[ MJD[ >pL,c,jLI^\i@+N`kȱ" @4N7܆@r.5=*Mk"}0=Ҩ3ŔPYEf'JΚ<纐%ׁ93SjcmIsIp;G9fU6[QMŴ[*ERR&zWPJb HaLL ==ϧ`S; k$+es$#7+=LBny3F)3{Y \!0 BML\n99if]RN.l;c6,k*BGtEc1LNXmhB> stream x]O1 y@*U"tЪjbL@ }!:g=(NGZ`: SeeM'l2;S\Ne!ridgdTNqPr48n1Kii XGg9BSmendstream endobj 281 0 obj << /Filter /FlateDecode /Length 4019 >> stream x[Io>&O@l׾H$$>2Q|E^Uuիnrhe*4k}q#F䋧~suw"6W'?M QD9qWʍuzwۓg<QAxz&F#Ц| PBLJF LSua 1z1Թ4A9\SutF|rA- 1:3. X_;h~&u2FpBxv'6?u Vmο:9ų<]JpMt0QuVh;o"{u7{[9ܛM&s3ٴn%O P>zh-ryJh}S&_䱍vڟ+&!8SpHȏ h[ǠKw(9ui/>cӵ,ʵ<]~ 醿 #/+WN:*v63qG3c_ӾP+SC@}2 IJF(t.okhʐNFZM^AZvܞoB,Y,TQT̒_t: 0YwУʫΤ->Kȸ-%^\ /p>Q]oce8U֕: 2T_jHFڌVB:g+1&А!ǀw@õj}Հ oVr' R$CUFIuB: uci&7BJlcg2iP^6#69L{ M;)' Y,ħҌZI= z]+ټӠ!ZiV{IHNoOOzlR0D:khX@xB;izcz>6pg )swM0,6Nw6N8:&dƏivуӰy d4Kf4rt]_-ԍf "{%!S*$YOT{.nN&R2ȠV BbU,$fl&F|h_RzYJo(P`LF:tf/E 2(ǧ-%Vp#t<8eh/V#Nu6'%h'ߌ@jn'Fv! 'ZI뗉3 OlBloe ƕ7VB:\N,»&Ni5g`qpAAœl8N*onFe@iD[dL6 UDBx\sܐF=.b8EԂ!@$ЊPJla࢏ @3x|S(|Z? 7d;GJ i.pX_:nI4 F׏IVHG7,g, 包 t#VB2״xBw3\`[۠Zp}ʌ\(<.16R$o֐Ln4VrQ;k59}ҕ^(;Z$]ړsDZ=d6O0VW+!Jk_S\2I$f`x=0W8CߴEI?9&-t! եnM4~TCL^fm~A#ɧ3" "##2;<pdf '(GA/' ]35+hY:as[m 'PQs9+-Mq9|͓㐙'@Kd 0z]>qSgtb#(.eR"@67ᐤh̳j+`IH~Zق#oZ'%!R)=]}(퉻Zk(H mx6$=ؘ%->ǒ%,QN Y-;7W@~FlHҥc:f?-rkP` N- `AVQx<*Ye, ;B ChӜ@H?dB-_g2um ͘ lc'ՒP_]H ~~5>'bo` h_nNY*Tʷ*eSpraU"Ȫ$$S:Q?0Fgr8 `'_<&g! L;()hEncw w'";*ZZk#TGX/J R8u| %9߸}W*odT~ϚcѼ9Pח+"5UkwsϸkEqBնX@#d71ߔݓWsGmٮ[2n}&xRe[ka{c2UU%i.v;c'FcNmW:0ufF L '' [b_AL) \hN6gBk _-لT<&/ oIvû6ܶΗm}Hb)fXl 9SX,UͬaiI*;(T x`z"/yѐmӎ]2`rQ2b80G6y@|sMlHF>ҰWP }9} Z o\ieގ vKuMr?{C *MӄG}i*a͉Aiu|I1a*ֳck +%sa]]vQ CO:E:I~l*sD;F)Y-:R/YQqMƚ{NPo@uE}pevצ=O",'$ϦxTo}*DH!"CRၢb[sI4qw 3gT᝜]Et?`Ҹٲ2Jh,]w}2hxVyTXrh,/MNi91+LǬWPq%WamIViE=Ta,E%K*~q3hGj_endstream endobj 282 0 obj << /Filter /FlateDecode /Length 163 >> stream x]O10 @XҡUqP(/ СY:ߝ|reA>E0u- -m1,O)x@fw5|s^[+x$E b'Φ2?M%K17MR{;RB|FS}endstream endobj 283 0 obj << /Filter /FlateDecode /Length 4340 >> stream x[Ms!R)q=|%KV9d$E1(ڦu_h+U)<ŠׯχZ_xx~N.??7vmׇǯO06_R g4Zѣm~{!A? u4&Ehhf9<~zpK48A:)H'yy68dSz{WF4h hR wh iãՑ5V4 c~` XWßh>\jij fT{sŪ1?*^''nMfv*ObLhiC2>3g,(x?l1m;Y-S-nt .RJiXPLL&>K&a걏7yZ<ܮݾ-.0:EoWUXñOZ)2 p@EL|QE_60>\ Sa譋Öm9*D,:H-Fm='Qas+kXã%>^!raTaB.ؾMe2S.#k9Nv >[1anDz㪯K"l SL$s.iH^9r^:9p61XE[V Z&d_W^s2y%P@ĹhhRv7"Gwn.:G;oZqKf3 6hTRz5NVS#wfb e8oe|b0f?u&^@=ܕP, 7 V1I8/"HjjGc?0%LѦn|CuofÎR 'r*Ej$DcTu@\pR SΘ:M;}_C}P<ʱWMqc8{wϟh [N$<5$PInN#WLެn[J_V<22JD KyLnń"[mKi/oE9 X$F5<\8~uohdz5Q{.hjw%PB?-qzCSS2 DT9m3UMLde&]H25I 갫fZXjj|{vD6坴6-GF-ì]|©N"Q$%?z ̟dSKrAL^Vq7-vpH[w( ;)7m[OAseF/`K ǦwyyS_֗eG7;n}3)LM x*Kn U.Cla?JGYfp#0r?=SκS6Eo ŠÅΛYU+(d7Y25`W 9'nP!p(liϜӤb!D>/?['u|~('-gی1ʒvl %A92X+ʣ`jg&-AGGX"{ę"C+3s]>"<[0 |(@tc&REliRb }b$lfr}G۳Z+$:"dŴ->8Z;">,z3pqgjx:7υnZ xTugmg{jDƵ┤=u$\_{䟯W.ENiwoDl*)N&8sTh &ʶl} Q~E72)P5'N"vG!b,%q:wJN=g j|.yq$pvc)W`܎gt`e_xP.!tWCOQH~,0J,UYZbFddC v_0ŠzLEլ˦Kb.^r6눺&BX_˪yMhQ54L\Of/2q1Y5M3QXs9[+eY{fK&o[8c> Ei{?jMFbT1^izbjE#dLrby;{UW!\崷dbxv ٌUBjML3 PAq $j_:KlC*x(rPğD1nn0nM(EzגU)~HW@b  tnS옍$@#,{BiIwABp׫v~SpAɫG`Tѝ.ۉrG ѱ1.:R³:SXVgM`u]HwEWS)<(L'S]7+w4L̃UvLjE^B?rU K.MeswGtJݼ闻}{U1yUw8odlC-zzF N?iR_qo.銅TxPÛ>Ӝƹ@/fU{lc4%-2ɍ տSbEý)19s=Z+'Kv `kχPN9t%# 'Zendstream endobj 284 0 obj << /Type /XRef /Length 334 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 285 /ID [<95aa5559a6ef0f11e10bb0ded44862c1>] >> stream x+aݝYvYQqO R"g)@[Fܕ$/pB)rݘs׽>=~mlǶq෱%X!C6_?]MQ (dcS9שMNHX-SbaElEOu+mM^i <bπrľS&^GMꅄU*oudwT`6HNy|Q SpYL?Q6gz\uOK;'6J3C8=)e3ErЫ1?IafεN8S9IM1ɩ0rc{; endstream endobj startxref 416064 %%EOF spatstat/inst/doc/shapefiles.R0000644000176200001440000001306513624161276016122 0ustar liggesusers### R code from vignette source 'shapefiles.Rnw' ################################################### ### code chunk number 1: shapefiles.Rnw:7-8 ################################################### options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) ################################################### ### code chunk number 2: shapefiles.Rnw:25-31 ################################################### library(spatstat) options(useFancyQuotes=FALSE) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") ################################################### ### code chunk number 3: shapefiles.Rnw:140-141 (eval = FALSE) ################################################### ## library(maptools) ################################################### ### code chunk number 4: shapefiles.Rnw:145-146 (eval = FALSE) ################################################### ## x <- readShapeSpatial("mydata.shp") ################################################### ### code chunk number 5: shapefiles.Rnw:156-157 (eval = FALSE) ################################################### ## class(x) ################################################### ### code chunk number 6: shapefiles.Rnw:174-178 ################################################### baltim <- columbus <- fylk <- list() class(baltim) <- "SpatialPointsDataFrame" class(columbus) <- "SpatialPolygonsDataFrame" class(fylk) <- "SpatialLinesDataFrame" ################################################### ### code chunk number 7: shapefiles.Rnw:180-184 (eval = FALSE) ################################################### ## setwd(system.file("shapes", package="maptools")) ## baltim <- readShapeSpatial("baltim.shp") ## columbus <- readShapeSpatial("columbus.shp") ## fylk <- readShapeSpatial("fylk-val.shp") ################################################### ### code chunk number 8: shapefiles.Rnw:186-189 ################################################### class(baltim) class(columbus) class(fylk) ################################################### ### code chunk number 9: shapefiles.Rnw:217-218 (eval = FALSE) ################################################### ## X <- X[W] ################################################### ### code chunk number 10: shapefiles.Rnw:235-236 (eval = FALSE) ################################################### ## y <- as(x, "ppp") ################################################### ### code chunk number 11: shapefiles.Rnw:251-253 (eval = FALSE) ################################################### ## balt <- as(baltim, "ppp") ## bdata <- slot(baltim, "data") ################################################### ### code chunk number 12: shapefiles.Rnw:301-302 (eval = FALSE) ################################################### ## out <- lapply(x@lines, function(z) { lapply(z@Lines, as.psp) }) ################################################### ### code chunk number 13: shapefiles.Rnw:315-316 (eval = FALSE) ################################################### ## curvegroup <- lapply(out, function(z) { do.call("superimpose", z)}) ################################################### ### code chunk number 14: shapefiles.Rnw:359-363 (eval = FALSE) ################################################### ## out <- lapply(x@lines, function(z) { lapply(z@Lines, as.psp) }) ## dat <- x@data ## for(i in seq(nrow(dat))) ## out[[i]] <- lapply(out[[i]], "marks<-", value=dat[i, , drop=FALSE]) ################################################### ### code chunk number 15: shapefiles.Rnw:388-390 ################################################### getOption("SweaveHooks")[["fig"]]() data(chorley) plot(as.owin(chorley), lwd=3, main="polygon") ################################################### ### code chunk number 16: shapefiles.Rnw:403-405 ################################################### getOption("SweaveHooks")[["fig"]]() data(demopat) plot(as.owin(demopat), col="blue", main="polygonal region") ################################################### ### code chunk number 17: shapefiles.Rnw:441-444 (eval = FALSE) ################################################### ## regions <- slot(x, "polygons") ## regions <- lapply(regions, function(x) { SpatialPolygons(list(x)) }) ## windows <- lapply(regions, as.owin) ################################################### ### code chunk number 18: shapefiles.Rnw:449-450 (eval = FALSE) ################################################### ## te <- tess(tiles=windows) ################################################### ### code chunk number 19: shapefiles.Rnw:490-491 (eval = FALSE) ################################################### ## y <- as(x, "SpatialPolygons") ################################################### ### code chunk number 20: shapefiles.Rnw:501-505 (eval = FALSE) ################################################### ## cp <- as(columbus, "SpatialPolygons") ## cregions <- slot(cp, "polygons") ## cregions <- lapply(cregions, function(x) { SpatialPolygons(list(x)) }) ## cwindows <- lapply(cregions, as.owin) ################################################### ### code chunk number 21: shapefiles.Rnw:515-517 (eval = FALSE) ################################################### ## ch <- hyperframe(window=cwindows) ## ch <- cbind.hyperframe(ch, columbus@data) ################################################### ### code chunk number 22: shapefiles.Rnw:537-539 (eval = FALSE) ################################################### ## y <- as(x, "im") ## ylist <- lapply(slot(x, "data"), function(z, y) { y[,] <- z; y }, y=y) spatstat/inst/doc/datasets.pdf0000644000176200001440001650240013624161264016157 0ustar liggesusers%PDF-1.5 % 17 0 obj << /Length 1687 /Filter /FlateDecode >> stream xڽXIoFW95CE ("P@%D" vm(AfϫՋz0& /B%ST]>)]:m[h=v5|65N-N~+)S ͧ`m‡m(32ۖ&+Tipr_j} nqH&}+*-Lr'G5 +}sC'Ҟ xz峂)|מa10e*}P:H0g'7swR2d 6C/ި"Cj`}Rt` #wAz:cD4&|/m>5# .y.iM7CT(0 F=LN%Ef?/F;H͇4 IuQCKSƩ,KR aX#xh&sDh p» $ !Ie3}LdqwirXs5@gbΊ0y#ARgZG5YLp0. Qd,`r/MǑD͠ ]]+$Nx͎Mlt|&L `TwJۑ0V荣XnRK3?n3'S-Rv+M5zz-U 27# pۆ=_SKld=s^fd- Ա@ "ʴ቉@ ;<%na0ًw[AM> AcB )6{YEJ.c0 9^~{d~i'_/"$*wL25\0q;[!f?j9Jb1䅡H +)u5F,x|ġGs#[|S{QhF$@e!i$c^m܌`"J:G[46\ԑxBڵ>ep_yV.?(<3 :t G lNiH3k{@TTÖ]&)*@{n! g$TM/^?C \ .mz{d=?sbawFixCM+ H8zǙ=0 uKGgm|%ի=gZx9δCjA?v >#Qs'8*b*yߣ.Kb\#m.ѻ$p/ nb-E=Va6<u$rr6/4p6ȑ{LL(rAhfe_,Zfn*;]2 `b$C#K)(bQxޱE{%? nsBz4ext>ՉaeFWpmR՝lki=r.l9"-%\ CZ!e^_K@ga L957xa4.ߥrpzN.!cdx 508T㻡dJ~i^(9y#h ct3 TH'tX O]׼0"DYM(>+}4Q1L!\C9^Y endstream endobj 32 0 obj << /Length 3885 /Filter /FlateDecode >> stream xڵ]Kw"7Wz?f7IwIe18sz' T'>}E`Cf>ϔAbiW?{2o^O;2a6)-g?³%u3zbΐf~O)V"o}i>,=Ÿ3mPE`F9d,^{/p1k{W.S-ܭ9nŐZ b(Op ~i ö v(-YiG".j h#@f#lS^*# is]7z@@i9AHh҉cr]0 0JBYOepD8Gޝdľ :/ &12s3y|> 0+Da'Gf`!@cI,qJh/`$QX5bRDF RVa4h"+,=ea`kQП,U0!E+k%Zgj7AQГQ"o$oݭp//d`e>$BSS*]hpZ*'SuS2Xzag:nlok dx K%4%k|`Y!p?zr<Pɥ4>ۙއg 1Hކg|StQ7v($ *쓥ɷ~.DpuiS9E2b_@u )b<&.sAlVr82P&s|nM D WC]. #NsQd2E )sQdľ B*(}(!3xl0Ef#-hZ>+0q}vLVg9f}9 E&ӲЁL#%e;Kdľ QˁQ|}L&h^?;/Rg]`}׫S_Q.6#1_h1ˠՏ[fxe[QDF 2Sp$ F gD S)9wkG8tZZuMt(!@BȈ}\*tƀ)ݞ'[?ff88r,UU2@6Sy 竏 _`'k4\t(aބ)d$VJ$!$p%笛Mp֢܏;h FbQELy| a>ى!ԀDz2F͛mڶbó7es ]1BĿ^*>H_7ў;SD.Rl$/biP{æ cqHy= ;$h]Ȃ_B(i {}UC[Bm/ `_`p5o'!,B HF PN!DФHT^C`{,8ƆJV2AO.$2* Ml*G [Q=KFl V|@[uBY { &Q4YkN<`\} E/ A(Qp0,,F\dľ *ꤐ0WN\e"<\J(L3χ/]0~ކdžB3u~t?@ߙ:$ n`u sڵP6ݪ8ORJU@2 9邴gw̓_t#2=HR ?jUjiiP$̟\صnǦ˝]\KPNcP<3i9QxTeu9C!{m^vo"/䢪 k2?|6l!Wܘ"[X$#dɗr`cWY5-CI˘[)KOxsPyz:[aLv\+s^Sz:@A;2߱-]GsO ǭdݖz5Y5UU .l=t`W/,;2U"#dPnh-;Z"a&1gSZp]|9Cg׀8rZ"#dT2T 2MBCB@P+Q.s~(ϏЏJI!%)E,Z""DAP넒]Qq5~Zb0DФ[RK181njHA%cR/ C:4(DXuvDDUb_cI:h᭰[+v2UD"2#e-be`ApVĀqczO37nD'.]@V;cz#nd2Ri~VI\_ץ8¸>*JڂKYHT*$G}ǟd`֍+zH @aaxid›se,Md;|b ą?$rJ"uar|=@3Mo:Xt1]6p|܍zTUoeR",/ *-tdľ _C 쓝vɑ_[OLByBZs=bnݢ`!큃yǮ2}-D#]p]skbzesm?ry'ۘ ^fyv&9?oCcipϛmS+>9l> {' d[B*t]]+Ajp6Ӻne۞m6;;v}8ȁ$aEDRܗm7k.L,5ERb7<ĠEvEτ\n= v/Lq5N^{k:AYJ\n;" endstream endobj 2 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 98 98] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 35 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJG endstream endobj 1 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/hexagon.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 36 0 R /BBox [0 0 98 98] /Resources << /ProcSet [ /PDF ] /ExtGState << /R7 37 0 R >>>> /Length 118 /Filter /FlateDecode >> stream x-1As^1/@`X^` 2E0T YA=qNOc?4M 9,T;SvzI!ŕehV)OkbJ#S|8xh+] b endstream endobj 4 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 38 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 3 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/irregpoly.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 39 0 R /BBox [0 0 226 144] /Resources << /ProcSet [ /PDF ] /ExtGState << /R7 40 0 R >>>> /Length 311 /Filter /FlateDecode >> stream xm=n1 =O0?}n #EJH6@ob%^EsMo? 'P*fvjqp}]",D <qe]X/pRiuS\ *qX2B*XkӜ\lDU1J卼PX]rrZ0CP$!7D,(ƃ[Q5C2)RHQ5A70=x"EuZ1m=@4)YJ9VL,6}9f,q^ѕ:'1V8'3&R[UXߑX | endstream endobj 5 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 41 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 6 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 42 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 7 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 43 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 8 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 44 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 9 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 45 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 10 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 46 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 11 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 47 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 12 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 98 98] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 48 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJG endstream endobj 13 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 49 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 14 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 50 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 28 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 98 98] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 51 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJG endstream endobj 29 0 obj << /Type /XObject /Subtype /Form /BBox [0 0 225.999 144] /FormType 1 /Matrix [1 0 0 1 0 0] /Resources 52 0 R /Length 29 /Filter /FlateDecode >> stream x+2T0B˥kJN endstream endobj 56 0 obj << /Length 2422 /Filter /FlateDecode >> stream xZn#}Wy$`m;f$ AC[jBtqq*VrK= vH)֍x}ݧLtʤԓ$/Upd9iV[Sp;|՟{' hh2S¿T",W?LIlYL$vɿyt|Y&#pd=o Yw;I09)6lnV;6vƿı7<@}{}AsmKKFĮ֌\m|_؈}[fSz5cY~ܗ\vã ~f闙˦U}U'mM$=qNx$b 7^$&"a{?2uya64i-Ms^2x#R;X֤ CvL~!ЀP6'N-hb̕BZk~*Q9ޯy4 ~EMjyȳJMFQZA?|b)ik fS"keTh _G`߂=~AZGwCE)as=L :.xo ϱ Fh2}7̼BC}/9͖M+. _gkw&,_3e-]VSrԺ42+^F*ڵhAo;~kS@j^ˉ֪t$JPTO*i\f6ڤx٣3E 4%p@D%c?^D=`d$[/,^^5x_]D+a(7Y0@E`ɫvޡw]NA~]qG72<ω8J#;CjwMqJ-8T?%@9qI$=*g+_m4Ϟ!&LڍCL━̃hᩊu5ׅSc` LYE7\Wh~ 5ٝU4$"W#91[je@a<m\T. =t*@YOncW椌j .6 B[?bRԵXB˹)eAshĉ;(#0hJ<Ë)}"n"Ub095\,G2,{f3p3wHa==!3V3 |/+{}gx2)CUjrx=@`鷑|<-(j^I*MF*jZ4+NvE8 Mbu>1o ֲq&I_]čԲB~>'~͕(A.<[3pYl[]BIBTmXH5u(ŻYB]AЌ6Ia6_yRx-Wt&jn ϲ-3a"hKexo\ NqE[Yk{pY¸KZ3Ow`Tp`uWզ["onO>([I]5N4wZwJ ;#Ch:!Hn݆w*>lMf[W,/䀈XoeՂ@]ݞ6hso[ߣ:+;[@eweQooopLЎ)-y5E!#Fۈ*f@-To84Va{:> RG[Zk77Q)ǹ*j :YA`ip%7gڦ$}rfB+zY0} W8Q> stream xڅUێ0}WD"؎sA mEXihҦ=ss 2sf<>s>:pJ8[$̓[|_2.#ɔ|Q,rqs@'LJ,Ir| H; F?YO7N(v: ?GŶЍX($bkmi5|jB<' &k~G7# s A82L&GKPvE(xeE|DAr a×ĩGȧKjND^p/ax%T=_w'䨖Yc^ˑ.]2,- 6Yp 3-Հ >}8R6ziK Wܭ職6{ bE"bӦr#յU.-a2Ci|~kj0j Bpa'k*1I=Pw>"Y =iG“GSТ,>b1iCk>)=ߠƒj2e8=PS YZ|E^lj|`!1kGҙR]Y_~AtPvn^._he z = endstream endobj 53 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-005.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 65 0 R /BBox [0 0 396 216] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 66 0 R/F2 67 0 R/F3 68 0 R>> /ExtGState << >>/ColorSpace << /sRGB 69 0 R >>>> /Length 4574 /Filter /FlateDecode >> stream x\K.7ϯ%l: $$B2A$ >uαC8ro*s.?>ϿǷ뼮o.7GppQҎOǯǧo~ܡ3WW_a6OY3^8r|?w/b_O[9׿p-y|QϑTYufCNo})ۓcW2֛p83#szxXC9$8C0TφŇΞtOZ҃yØZ 8szH&wA øSk e55TG:0rqp?R9k6`\[ӹ҃)33ūv'=>LDiytg2y\(f!t` @8cD)CH2xH2#E(^fzLB#er"q !=9XoE25+Q+n҃m"0KR5Y섗JkZ;L/q7$(dMzڷI>"#ʝH־"D}kQ wĎҁe"ڨBvZMAc]h.ss'C{ZC攔/XlC{}ZROKbf$Y ^ͬ!i?mDwly#}(&J-5*x=`X2E+ /p Y/ļ=^OHn )%!Яx>> ʌ]t֛_u rxh5wHFb3 ~  ./=^O\PbAXV}/xIӚ64#kj==^OоQN(@5f *C{tQ (<}.7kf&VE88 mvK-RlV4ʯSHv7g4o҃͵: rm08Іt`rg(Zb={nNnvVrRnSt`rz :¶C I u:QR_s -= Z3r ;J͑Ln4,b&X7D7">]Vd7(V,̻z72EhFO{ZjB1Ĉq%G񄄶{5BiaC-K'n$r*,#'k '4u㉨" )ӀF i Ii8:5EP:d/Ivfш7JeYM{ׇ<.>|oNXe-t^umn'X8Fk`TVQzdUuܾ6%9\?ah:9bV+[njY%/9tF knt=^g!_6%lu ^u 4dRnEkFg9my on=Sd!dO{! =^ÑL:"_ъPT%f7x5,ƨ:Fkc)˲t`o෮?7+2BT*}Ep, 7_e`eoDP#<B;j5 to`ӺI<X@UM:z7stnLyTe7uP> -_Q-CW=҇W[g0I72y+չ&\a!X"ʜ@yJt&=xq[@,mIMz⶗Iܶ'_ks&t`ps-&^"`v; ?pO?f;smWvmpCu dFqnd`d ܉iKzO0fi nh mHҽ^еsgڲپٶq%ϙ nI2/6V^~IZErwtb<~f0sb@Ήa )Gl-]/g=e~AcvשY|\cm)pa\:6K+k!w5<8 3me1%'3SזƫsS3[5Dg!툮zw΋mivKw[f:1S+KՍs ԒqH9wWrpzoRgvz a9m`>;ZgO;q6TCqg&;UYv49;{m(vM&=cÕoa*}oTL714`.iBFy|c[ޑ'tp-q S1́#+[^ua[F2Gb†#~}13X ){LF q}#U%qIFM\5a*(K^{LmI \['"{d;ގc,Nw;#;oGqx'k@/oj5ζnx+l)U7pJWy2Ͷ$o81f6=xc+߱i: -{[9tvV7ͼ;_e)ui6|Ԗwfۨ3pm/g_t G_!~;N#*Ɏ׷$nnY9I+y .wR֤ĭeMmC,6oR`Xj3ܰ_İᎶ\puH?<ҮwC|ܵ V¥y_[F lVSI;,wǡc)a;dKP}Cmiv kN֖W\P?ncqĐpovz1> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 60 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-007.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 72 0 R /BBox [0 0 504 324] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 73 0 R/F2 74 0 R/F3 75 0 R>> /ExtGState << >>/ColorSpace << /sRGB 76 0 R >>>> /Length 2429 /Filter /FlateDecode >> stream x[M\ ϯxn699r,u=>hwF;=EE>9~8p#}r_ӗ[N9_~t!5p8kI(6/gTף_>XWZ>4=>7'xG>yp?ݦ&VgKVHߝ~χHZ +X۟MְtMSŬh^V5=(+-o텶uQVRGϩ֎5vm/JǬ=UשosT9i/.f9`)uj=Ʊ/6~a,[7F|u`u4x-RҪ'>ӹ p95> N~z̈ ˃ 5}pk Z5KjZr3l`No3AWn*.;Z `v"L0y$ \m#UwE] qu+qم[ ̏rmb4Hc#$`+eVM n C|N+5Mg(\ z#=jx ,^xbTDIPC79\6ŗŏF/e-MVm}#NpDҏ1CP$qZc(x9Ǹ!0oO.5ø!r~Q)?)i\ܤscaru1}G2N4?{?}Bmj,7]B]`x- gvgy::VHiEWsvd#~v'ULŁV 4T3ݮ[אP ,BN_5W{>6K(/ZlvM99SX4v#3p0=.P6/ +N|kGnMz;FI)w]SE-lNJe4&3;+ϝH;VV*Hk8 ҃DOeus5&Qщ^(gR މLCIį}J 4.;c w̨ mc {SqTH4)ɮՆ} X='\7w6ةj:m/ѐ8 rb%qEyB+ٞu)nGQ϶8=:D̝-VK@~0Vi΄x'Hzb̜\l0F"A`"y-x]A$$ T"W԰hz6o]w*pzDhXU /fXD`'E٩oM3pZ9k`>۱H>;|rsgZk<1k۩ni$e#xP3L(E|MNP,)0KlQJ#/d Ǡ:8G3t|!2*n)|sՓdWao_쁊T2kUD:`p5Vg3I(V+#G!.[Ox?jsoh齪ٲ`R4_ai4]< %ԍq|Mw`Fv7?z;|MXC :eMRz0| J T"͊dȲ`ڝ}#4Z@<}Wܬ3& /M(E`Ngc buf[M_o^SBfTBWUW G~E(Vvuf>O!&QMg+} —fMrrKŲ|ӖK\hڬՊ vgS၊g#~>"xbP<+(fD/w)7$5O~+%y^żmvʢZV1@#d$,턞Fva+g62gcј;L)^mg=)܄y7ԠP ΃Tgh㻴j$@y}>n/?p:9աluzy,X]G~uuk|Jo== endstream endobj 78 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 83 0 obj << /Length 725 /Filter /FlateDecode >> stream xڕT[o0~﯈xJ4qH mb 4hmҵϹƊ@kܾciYau|ǽl#`5+IAa^͡hN3NPa;l퀀ɮh<՗9TӬΡ% EJ3A|7XWVL֙o_+$or6򢶣j1=X;qv$Ux3m5zZDg?Gk7ٟrpw#nQx^lf+ľ endstream endobj 61 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-009.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 84 0 R /BBox [0 0 453 288] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 85 0 R/F2 86 0 R/F3 87 0 R>> /ExtGState << >>/ColorSpace << /sRGB 88 0 R >>>> /Length 2293 /Filter /FlateDecode >> stream xYˎ%G߯%l~l=$KF2 bdc=%sΉUQ2@>yՖ?oO/^/o_o17O/~]j`_L9~$cuʡ^V}= t+j+cзm>ƿ{#ƿ[v_?q۟ni ?ޒ}}+q6BY˭ ĸB!z\Z n+{)_Y b0YҴl|'|3C'RLF5tNvZ){eVSB(UZDO6fRR;e,B(7 LcoQT11ЛVݳ sjxjEո\T쬸IO2t =)2f!_-id{dǁ'd=xSxYS0B J愂d$iG@D\&Y=q" dWDQED=`qSt`˪̽ Cqs' cA,7)"uKsJk o_ۮ"39e6w=G\q Gvg{UH+s+x x ly@j,"G㑠0'=v睉Mܷ/˴]-3wrNbE/P偳"ϭ=DM7:YstV><0SтWu~y|H+x(U3Z)TWrέVՃU`ʹǢ, z 4 YP([){hTDžs?>VZ4lw|\'Ԭ1w}:w`6d9۳>쩳h3bgZԿ"O9 8u2g8>EKoO OlR/ݦF:*h?q{-f3}1]AǍ˹7Y8 endstream endobj 90 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 79 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-010.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 91 0 R /BBox [0 0 453 288] /Resources << /ProcSet [ /PDF /Text ] /Font << /F3 92 0 R>> /ExtGState << >>/ColorSpace << /sRGB 93 0 R >>>> /Length 40817 /Filter /FlateDecode >> stream xe_OQ͸S=m@$kii'v`{{Oc ƪU_SCwQEGO??ÏgJGo>?r{lG?o_ȶ?[i?D?$=:H??OdMG-g?_3pLj"t+t+ ~y.Ľ~E ەᚗMI3 Ӥ"2%“.Ij(.xPG`ic2N`W(kp۪(梺S:JM›ȃOYǃr<.a'( lwFzrPG n>sel бCT~uD,uV[NɇJ9 Q,'VNm̰ب"ǃ_~#9Nт;Տ6/Du> ,3ʼ pr"x{c> Atj*>d8171A'6b߮CNW[B'c^DIą b&tQĊv떎1=9w>w "UodETŃ|.]PLO>ALO "ČČ'Yqu>+ 1y= W[x+*`g,:hwT^m_ns&ua6)w{p#>4Bez'+b_20>X҅Bq.Pxq3q,_D-A.{sw ;g} 1mۛZ#gjSoF?I0{ghwh{OF[?MN;쿟鎟 ]%nj ?Wy_BWcd} {|YUZcݼJ7;wJ]<-Цd,}eWdƌ=ݾ27c7?_NF˖2f#5Ο?OD˜P~9f o)w4l^eIy~e Cl=֛T`7D?h7DAƖ]䇍gCf}aWmVo.#yPc=[yȓ /j88oq};=kDlQΉ둱eftf}nQ0?9Sm P;$kL# _&hRY,|6eq0tTa~8K| 뇥?>qxiC;c!PCЄ1Lmh4ި;=,tXGΪa 'i&,pf#Ce.sEVכirTon֟bOȭZ@o)LJV9^&/Ikk)gX'OƟ6mpƿgHG9Y7+ra|\b~q>9Al~^6l~86Ϳ2~)l%?m}i#ΖPx|-JXxZO&%CIML^F{9H,oc2P/0*c(WE觢>L,;L^G Yrӯ1'?b}|dn^$u+_g7o5`{XOSOɋyE[zayzVnUvOy:r/^>,. F^?S 9YɉMnKfn])̽k̼XWQYWvl+X ܼmZKlgY/K,9ʗ3Jp{6"ɕ (SYܚvyOn[a;7OeY͝#)}1^<{Up򰳣g+Y|f"wxߛg8Lݺ*6=.nPYh=TE>\,'r}`)\\s0Tc}Ų_ycd3j0MiD«E,i@nf u }|^fxx^c3377eV~0z5] }.7AnaoofV/٭(9w,e]ΕYu9I.dEeտ^$'~2ʹ~;v)HfW\qx] ,aZ>ns/ۅl,A`ѯw 0p?w2>w#wZ>͹X}~p)_.V2~oX+G.W}Y{p^:^nqxq^<͸/l&vzS0F7/S`aiʏ%k/.=O^U+CŢ=A#rj#ja?}|싵nwXQuC߂0jdF)a 'a۸: C=қd~Q_a诇1_WJߛr -q9Cx~s_y"5)xs$,z$̫Qз~+>^F^2Pӕ)}ꭁ7{7&2*v3a{3,-^y?F,tپ2YV.g>E:P~Ïb#qx }}j7{^ͳg/ƅ7j?gw397/+Wo&2o{W3ԟ쏬|k|/o[Us㕱K7;u1^%._~ -,_ŁU0_=1>g }?yFyo`c><{η /޾>>56}~fU}/pͷ<{[ӓ=X#a=~_R/6k=5;S?$f'ޤ=PgY3ȃO}RXbvC z5yBXM?v.SvwOiW\zm}e9lͭO?m}P'VlOWڣ_8_(_xGn>l?9W+ρ&j-\l*pUrZ]^f9:g޿/qspїKn?b`a?x\+YYyxx~kq?UXۏoR eRaoK~pp1n|;0]qSu.mD'8O_3 ?oyAOgf{O8/swRIXڻWdiߑe}AXa5FO5$>n.ߩ᚜~3}O0~C|O!>U/Ns"U?ٿQgQ.o9Gd6~c֯B0P2p|o?yhbo=OUM8sV*˜*WxwGU1;q&ԟXL5TZd k}بozn+7ɋagC 1|<԰c|j|hr1d,ߕg|5x.S}r^0?Y/"C?s78?EysG|Qo? 9㼵`XxJ}vds HG8_|U ~ +΋=\.|Ow:g3C=sz/|/jz__ k؊>:΋C[Ϧe2CrMl?m<(v, G=3}7=H?끦 KCSEWE_T]8>iQ0ƿ-pq=pzCzϜ^gMM;YYlO-_[R2z۳>>sa? <=L#p2~п[+8S}hE?3nyg ֿ:A2,gɶ~>wҿr=}-;45y_ Cvyݟg~j738R 6^v3=|q~xca|M XoF[ {c.ù4}|?'r~׮SRKؿϵl?w/0M·|s/83o7yϝ9l~T/pac/p˙S㧼sV?>y_OnyW 'Ɩ>_dŽ͇ۺkKE'|~|oxͽ|ɟg,/3NJp ?LW,?s G@ |_co8E=7#c~101<7#B?=ޜ-?>oX^?KїC< rd9_ʔ/KWE'^|~X8[>U=gVb$ثnا)m>z.̧oF߫h褯N+/YFVqCU_뼹\w?^Tݸ Ս΋{o<׿^+7k/>(p~V9._P__>)=L߿7{ 7Z/yY}aG17|_)7+ Ͱo Ov}#}D:n8ec\?k̟ɿoyxY|ݬ_ԧ-=foT6;[+c- ۄ[O* dcg {"ϯ6nfO1i`?Q~逋یsf:yd7؛zyso}=Y7cEcovnjyY|ϊ}3*ؿ)({sLE_+_=/c>}3'.߯3_g*~|ы!?d/POc|p' /caE/ǢqD{I C8oسZye5Xg/F{ekY7N.<ߌͺqs?/ljHuY 1?F7D_tS#䭿~7y9쿯~^ ?B~Cȯ:xח?8X~Gopqn1lO.V /} ꌟ̟;,GZzl0 C|¹[LO/~oOXY~߲`Oy-?|x7{~4m*?/Sy+ di/XzodߎˏܮS>{swӊ/7>3ܗ} %p"F?O5&ov!zQȲg~Ù^?>켜\]> ,?WHWY˭ў0=C}b5:{8SMdOohOs55RKAl' ]儳=9=:3i>79ݾW[:f_y(OǾ'+}=Ց߇QxgPz`7-.O'ulo./ ,E?6x߼,a׋i(r,O7Gu~BJn-O#c/ѿ:rO|VK'371 \wL7aeCTGo;z3.-!,͹,gu,}HuC8W@ |_'㾮}D}m< <=pG;^^sL/ϯܬ|Ri䗜`(<3_C$7/( ҇.)'qC+V/ ܢߔ B9]OqzdzC>҇ W0^L: D^f gƕe]oo3ϜVrgwtxc7 |Jau o!DŽ}B>ZGW~_n-w|dW:t1O=/]S~|+Swӟ_7> ۋ\\g3{bl?dc^>csK󧎼/pʳ0~/_ut+7;W=?r!,G9ʘ̌3 ~2q-r;f~~ۊ |rȳg27>c ~s͡= />3ǾtN͡|{]?Zb}X~ gp*ӗw8FYlgS-;cK]~}͵{V޶59sY}_QQ̟s<7)sΖcy=5^/apOП?S)ʛmǐax_ex@Q>'~2!zaH*(ֈA"[M#0au 7A2"CF8?:eRG}= .B7B資=<qQb}_/t0XXF}/&P0nyginb<㧼fz3B>lT;{*ޣ 4kl_7=6^o{o/9~,_ʸ2 _Xg}_3=YS.߉9`#Y?^ͩ^Տ\[g'W;ѿpOvNuڣBq.) g|_)/o2g?ۻw+/rUO|i+A9yѧ?kd'ߟ.B}ӕL}ߠ m؈26/1.7E -س |_07EKlt#te:wΧrP>8:c~Ffl,>iFa=_0%q{^){X3jf5׌kmh5*E\kx8c \2;H˷՟G8lﳴ/?Y8qg:Gg8B3>:c`iuGuGxog8 \Q_|[x~0'v{}uZn>{zRMxpb}_ .=ƫޝ'p|g򖟤 Ԍ~ zsYc|:0+g8{޽0߯沣7?WWx73z,z*Z~"K`;??x(Z& 9}xr[~ckcr;w7^uT L$3Ϋ 2JRz[BP u578(9\%&CĄ|XNn} p^v}.qG7aUFVtQE]KQJw8vs0TJ7K'ynV:Y7O@Ů^ rjN#V9X),Jo )LwT+ lBlA WӚbțX &h~\\.eo8PByKCREˇ u?k˱dr![Tp>Xr-4您Aق)4 2-3b\˴4yV4)z[9rp\fq i9naV?UT"lfWF[}E|: :.E,=Tm1'wpΓD CJphaHC'vx*m! w,fD%gPd=VwHQ8Cyצ'VdѶ!OMӆՋNc.'}*5:l5$gXr]XLUy:&gI-6U6Q6_¦*`_do<վyuq׈=ǚߙգMJN%z:Z\aSKM̾dc%] T5 C:sA}M[=iEiuOD|*mPRgkoQRRkoƹ#vDžMM8/<@eTx+{ӈKźy񙕏XC8hpħa,ӈNTC[ĚCTV=Fa ο_~Kz6pY}F,D O#ʪy#bl!*uq O2Yݏ_i᎞Z߶|c`ՂMD||Wא gġ1Buz>'GX5O F0Δ`i$#z ?y 7}X8Ql#fሃZ9$>qn3[),خ(*ދ>:όq\ x"D|<,">cVlo`TkMa wfe'*Ǭ9V}ȅ>rQcE\*,Bv ml45r2c_r%3"Azux>F>n{-4}3&?sayþ. ?A]{c;lODO$\ б_lP꫾'*5:=6'*= nR|'t_ZjOcݴSMU>RU֝ ``&wCnXrH}hZs=/bPYGXvW3$.  MEUoT#Cҁ֝}g~f]CzdpFVGL'ky Da G@lGHM@GJIY ;#vwE*6C ۑe4âpvM8 8#8<~d : ŎuImAZ>إ-:>}8:cdD~/2Y lGJx@oXND^:~CS%ug FmEѶEotEk{xzPrkE-Wcß[3^"5f],{d?zC]abraa]yE( ~6)=o$*g>?#z=m}g~~qɫXrq<b0-??g>`xH,Ga1]Sӱf<Ԁվ1lߘ;|t)˅pDlaolcFq,t٫gmkzhO{q iȩBh;ꨥp\v*( aoA)H==̶8kXSJn|dSm9i,6G5QӁ'vA 1v\->6opd/S_ݖd;UZ+ZWT!&g^>MT_}#36|^ׅv 12@K7T]^PF_m>}:}cڶed>}e|g~ x.U6o :,*Yţ*}`-^v"\wsa6W6x[|3νŷS^z:s{LwAwtR8",;etC7C46]M6ކބf>_9T86bu-+QMsS !ٱƘ? tLWj>zP2kYPXd;6"sX) >yWWB[1Bi!OAMxXYU:ǼfGdУBske5][.Z@Px-q,\A8c c+ztK~Û(oPFV&6pF,MNQ8EᡙzZC2plTʣ<DvrQ YF-tzN)fHF9EP(6*|7j-+Lw)<*5xaVY ^|1zUoזcZS1dM >ܠ3ac8">=փ[jZ-R%_ ]13UAXгdLk*yk,+G]}.&JǥM\Q{Ʋԓ찎谸h,^k,T:4[~&<#>S^.\*{#[u@q>P^PG e _uq V]eTGXG׃zUĬޑEW'[}]{D}^ޞ߭o!QQ8MlQ5dd\%M\d͵xM#ƩVջ<]2JЀgjH#py ?SaMFMs/OG+7ujG"r%⺅n y݄>Jˠ_s#B44!QP)Ʋ@HNO5{b6; ʘpi ֬q̴_8n >Q(ΰu'*gXBhV+˯#.rՠ#7*ɠ'ZvB@93d$bਛ՚o˼Q1p FXP" g:dT<YtcۅboEY ڟhCP]tk(,*Ota*`:ro@/u.L@YMGIffsYV?(jVZqC9pa8 p( Vy?K+țZAIDhm_Cc8;dXm~A\P-vf.E:}fUl0N>ـ}mQ>F Gɪkx)K^c 8ThQ9\z@r/v@]At/pW37'N#IO6p@"99!ؒ1j[Fuٖ爩za,o;Zdx-y aA+gzwxQn&xH+eƕcXvrdrg'L0khWDzsjwlR@K@*+wdyXdPӳ_($yտܠkZ}Rkv^itX.:.Kq .Q2 y9~ҨV 8prk1wPbXA]bQ9ʇ5T$lH Pt:᜗]\1C%UmOY] J̀G$zAus]}KF, 34g[>pv#gewz~"V: wߘ-UψW69bӨpS8XoP`u|Tz˾CFl9dJJi% b'/4SI w!ԅ煩^)̤1NՅ֐0F|qX> ^tg߈b<1SG}^ ?{G8150Ӆ2y~d5<9ᅣ7Qy!j%6AGV^y(Oݚ}(`I!*1Pث۰Zw5݉Y .M83L9typP>PJ-U/?Zo\Q!wL3u @.C^XwUUYԎ{Vϒ꿫vkBgUuuPhnmڤ!ۤE k ]RB"=i~&_ClVuUOeŮNS2ЅeZJw!k4C#C/=༅W,nl&LXV<KǼW-} {i%*Hq4RodOѢkٺa^X%!t _H&\8%|!@[^X%rs߭6hg݄҅ךM`/32fmCO^bsC3uS#>4N1'>2Y4iH~u~gU>Ue;buX٬2n2-bAX^Xu7oDš=bAX "]vbuV7ɦN?p[+<,I~J˚]#u [~bVo&҅OBHO;"彑݈ !=Ή%j@=:zqK/µ^)IFr#yjGoɃ'GL/`c8C.N8m{9kD-}K`>ps@.v?˅= P0X.^&VDla"!iV>?,fu^0kQ^*g&`C{zaXS'jhC{PJzu3hR[uˆE;4,/e\G)m׀!k 1Ԃ# p`EDZuX~( }|!M,[sٌ7!lJc-`Mt>:Jf-bHԑZJ%]4\FjhJ>L| 6h4c` qdSV1GolCeSqp bʱ@Ra|WmD[KD[cͪLǫ XKQw!>]Gаc؅H#~` .>4u s,暮P`OWqqy.? Ǫ1ǂZ^F}! Z ضB^ʶWlAXW 9omqF!/}fd v//C. 'ÖBhf[ 47VY# E,\:a5pXLw(g8׏eG,ztN}C7pۄ/3\cp;;J~BĶ@  V|6{LB\8/T<H B±BTfA4C~7V%LILm SA]ia]uIqF;w~㴵sY[]wG=j ]GG=*i}y>vApPJr`{#*ٞ ,@4}Az:[ dNďM.qKZʜd`dg_EV[OSuWmߞ!¼/'Cx gb^a{'_b,ih|'~?'~?ɟOhQ~?9¿O~?ɟ~Glkgں)6N` Hy}_,G^OC bc4!\OC7FٺJKs,=x[~=}1~pFоGޜ_GU?6!% }cX!s1`\|'Xlp~/n wğɣeWnV>_Nś'z~Ȟ~>o^V׉mc8v&@aC]VM&ꢊԖL^RԬGxXM=,.W}%%tg/򷰞e ge>}/`a]3]<Mqf8]O8 \rWyt$͛ %5)ЬrRWwOWtNo` fHLqw|;<+@c=ڟۏڧgqN7|뾹 _7ǿ{ULݬX:OQMO Bz%|U?4/_=_[3CW҈Wu3l?1~߂+\mxd3џ_{< Uݡ~TW}Kow#+g;X屏|8P/,w}>q[/)03GVg\o}0Ϗ>r GOzӟcC_[ }C~BdoU16546=ԯ=\O2#7v11^td<,>m=q~}8[zm}u8]>;ܭ+,E"~{j?dlO }}0xW?c~+O X~jxӗ?N8_.f߳9˟_[Vv8]7Ԥ\kLgmc~3W1޿a^_|ŵdߧ9[zRIgYȇu~UUcx>fRs&MEgs爷'Ox*95RdsϺSh?)ò,EKxyˣ~GyGJa >e;t|+|RXƛPחXJ᳎Ke`Wߎ)7rZpٿ8zyYj`}GI諑e~9U< 6{GY$e0ʓ=~kӃp.}~3#z8ju#Oiپdž+_C)_r~}ߙ_?_R=ipv3p/ᙦY/qD;9`1v/rsj1>)lil/ޘ0/Аy~̗WiRGdݿ8<-\SY$$~uW~m-5t.f+=~={_1.Qk􏦯 ' 1M;mha}P&F(Oy<'cO&o鋊П»w1{yRߚWyGKC![5}O b&Ydf; +OK ~:8ȋ>=Y#<ͼP|\'y ӕ3lAfybpƼp1{8"~LaτnlNf<,Cc,Y+̸>K]wΧU_-N.\S %5ӌǒTfF06iu%ez(!N% .Le<'Oif,l1pAGMKQòehދKQ#;ߊXK85ߊ,7i:pVD:~co;%=5]?C~埿Whp/ XU '3=XVͻ3BUnRwHx"QȟZ͇Ky] pE}5{q|;9ߔˣUc,6l>a oOW캃4+~ GO'-?f~{m|U %).5ƧS@;{'pB Ajo.;{xCp} of}XI_EUOeݨEPpky[[xŴxr4ݙ6/>ÈG+;GT[Y>+-Y˯!o@]PFWgqaP Sub$..?kĄީ^ZTL~A+f8ɭ ;;'δȈ Ӵ Wg.퇿 6 _gcf0c2zwœ—#bxCM-tzG&'&5D5qa{grD2d^Y-8igEifVb +;ik!O!S5W!M]mcA'f >] Z8a~v'-j5L_|/_=Y>>?c>vVvb~^jQ`~;Iryp1^[xhxn/c,z`Y~/O>!bx^H_OEyCX,V(y`}ulU,͔G}ZQbC-Y/V7V^yQY~cO=[{:LyUCC{y9<>py9쿯~^ɞ]92cy݋ubٞ\/8'׹(w,?Y'y~y{|#糌¾M2=G ";w9.b~xnc_|U+o=;8/p`ίv9[{=s};g b}V\)[@50J h! [i"w􁃍N-c!}~<#@Hr52?ܰ? p\Q}91`#NN!h~;ou<,1>̀ | ߧq)gX'zG`y^֞>ϗ'8W@yapYl<{ya)F(GLSY y"FI/\ݣw@NbE-9#[@9h1AB`C X?֛ @#Hs(7.3y !% yV|lN)KP./R _//bXmmk?a? Pʿr-MహvcΕ‭հe ܚقa E) h .5A&6dKcΗ }^-ό3A Y}E0jBfraYv$$3/|!xO/>X֧<)/?_3\TȿZ^ɐgk]t~"W7rQ~o,׆p|\~Rc7{vR-\op?eOf~?_X'|kRo|Η?&65{3O;[yF{c>aF u}1}AK~IQoiw2C-AK3E~sY}B xdt}6Ϧd{U?9u6}8/pA2l?͓7OSu++쟾v?/ߢ~jЗ;y/d+,/}zNnO";з;x WvQ_YV}3kz߾_2]"'jK~+;<0G{K<_T"?|ʿj}Ɠ/^)fj)!)xDZ8?@ۿۿ\q.h4'/>GV k+q+~[^Z?"78<};yARe|T$jd9y_6zkG"9~{THP1;귥#8DfW_Qn}R@ @,+`zب}>zp=Z'ky=~2~_$>}o$p>Y3>6wG}_n?߿x}cCրV_r,cFv??&TͰs^-\ \[ K)ۗdNZG?_ jZÏ7pzY?Mh^,P&tZJY:!>?T_ȯ%&t4A^Aظ9`?~u<)"?z<b WZ3ȲJe%KU`m&U._=N[G&-;QAT%c[e=6\:t̴\7dVIx6+gI,~~} b)e󑤜 t܂`k4@? ?nL9Zv [2xʙ*^e|߸$E}ןWooEm~o.UΠu%nDtcҒL,A]O9ӡM;>#m'灘R8YxbeKN J2fVeS\:Q=1jlblLb6B4̭jl~nq'dlUFMuJ;eƈ({cYJ}sZ:J 3|&qu.lo}aobTk{(41p٦MȮ\zd +1 rAv#ٱ֧zLY)k5yɷh}:VYrIngue.vznK}x,=p>eI^W%8έ`؁|p?Kc; F֋(6Qjk|b8Cvk*N]5ߊ庱[9Z5mVaIᷜ Kr ">[ba愹x|"SМbz#bӶ㷉R1ՈX0ؗ/M#tTl$c@ʝc6f!nКb3pO[hK :KRcxY|8|YQ3a2p{Nlc!ĄDn] ^ƥVl vݡ[>b)CwV*Ky+C^*$ZUy7 Kgp $O_+N/^df*ghݡ2?1s'•2D72:i-)ھ.zp{%/je1;RJؤ.6rVNۜUe!C5n},:#x sKw %Tl/^ є ߎw)IHݙ}p~*WIZ2NCS71ue |FC猡ŮuhyGCr 5eHڡ46-W$~c1x<48H#ouoh@p/-)VÁ;~Now#t#Сb6p_W~~5p߷CHCCVװa7ЧqDUCo/C7$5,׻NnCR}!H]#f!y"ӮѶ@L#t#jwR#to\{*oG7_^W%V|QT~W;p3NGļmmv^HYгS Y7{{!` _pKğw:~o &cn]yx2ҽqyRqb0'FeC㨯 u/W D  :y} .O3?|o/)5#/Kfǹ*oBeeIker5=eFr7Z)ؼXLɱ9?Y닠uȱ)n5٬)?0WS|dE\I-%I`Y6)O.1[V_^2_Ə|g\>N,ﻱkʙp}ܟWSYEYH%٠ <߯*v=S>m=G`y^=|3DH٧PdKC @M)XX$ae:CFOU< y++ўr?kyS!v?>"׫~j'"bGbaDo&tܼ-#kX8,@"qr9'rb{7cUV2"hDI޿Gĉ ե^GQ;DRnn-Y,-eg`|Y,7+K N=0V}Wu *ݟqKux{0y=y I!o&x'= I|RiyͥxXc=mؼ7 ܎O;Yzw Or|>S`oHzt 5֋}<90~. LORj|ϔ%ܦ{o.~s'k9g}^ZyCnVfV־Z@o{0  9'ˁ%e0B%1|?EON%^_=ߴPC=Ha|ʍbKB}a/qo{kT!…O"X|wY_#fϑwD2TflWu o67DG9k9yok/vH#ĕ?g/_,u+A=Z}><\h]B1c S+F]Qm袇OUS2=Y΅_1B*8Lj?38wKTF< cƏ5~88댸|2#213GqȌ(えp "l;K%( Km#;xIN* ;}#zW=vEq mN뫷'3b'E_kBD~gZ"z0 Ov{|_;3Ō ~uN$lĽr]/͓W'ԯl#ؿ9!ڛ`#@̉ȜD zo|r=@ z;cf}dF0`}FxpnoܾqY="bp}qI3ẍ F1N12o#sj3 nާf1|KD/)E\_tWp#p:z[# >O >#){&#u _>YD$}APD+F &w>#NPbTy1.a"~, eF4c}1S}`x΁R(g=D$A8\# tr#YD2#lÜ"0n|r3fSϻl<~+rN&:V/65NmbX!3=w*_WoǹF:\6./ԟ.W#7! 20D@r? / 3 pK'8u#=Ot-{ՀEyb(Nyq=5+3bϋV9i#v%z,x܃-Ci;ۜmKEp=k9|,b,jd˘/^Zxo'oy+gu8Z8L/eJ0rCKat4sd0Le y<;Ozew "sيur\z6I<{94?ܜSVFsr=9}aݩ-΋G;\OU !sC=dqP%fTδ_\޶[TayG*ɰ^ԑsykE>/WC>>-ƍoz+^O#trj~/y~H*'"1x[f059'+򶷴!BWþ!"Wb}DԊ\D?"܎_D̲!Vm[TAC,_vjtǰ!o `̇m{ǥ9KUywOxQT`5t6`Ep/Ey?7'ϧ'uc]p%_%vSǮW0^)֯x?~9cOgO9ȹs^f=,s`?w{.ahc<[z|'q=/ m1z^T'aLJ֏~27拗ߑ1?N>sFx2e2~;tsOd1 1JeO/!y]Oߪ>DjF={~z~y絨=͐H[a<Kwqb7#!gjK'=Q_ GC}Oa(on~`/^x?טzR{i{s=}:_zb/Ӭד^Ծ"}sTQx?u}jw{U\|ԓ'8޺{ܼJ[1ѻ3< f?{I'O_rVy&mw}-G-ENW}Q|i(ּ\X^_WOC䟾(o#bn_DZ?K~~/kyOݯw"O1俆#?p~mϑ%~Ak7 -&\)dzYOr!R"7>OgZ}15rW{T^7ˇ^Z-?1_4Dh˕=DxCc< c{/Oo㏔|r=-~+x(!%VZ҂CbR. >UFJ%ұzQ_`471v II|EdbV/U` pI,Tq6Uzc8nG%Dj SHG3R }kА*r4lwgShuh@:B~1Q,/l<)  k0 q7“(88pAaQ-2qRav h Yni6E5RZwgz&FYDL]hٹk<Ǿ\ܹkvreO9&bvr"s%?͙Z ٟunlTQ9VݸPk_6ֿy'}ieZMKl'/($OWལ`z8RT)<7`Ljuj oO^djK-lKkk1թ1B2OZK*slWjSxjR_RyP3*sy&O}g>~/ Ty -4QKRO=3c嫜笠ogb3yqSռxSLm܂3m\V/JV! V6VdZ 9~q+#go[=x=ZAieW.%;Uv^Ekv k?;6-w|NS忭n+ӊ2xn+ӊrx +ۊⱲ0n+׊x@?V"%lV&mr[V.i%s[ѼV6is[V>i%t[VFmt[1VNm%ZQVVmuZqV^i%v[VfmZVni%w[ѽVvmw[V~m%xXVmZ1Vm%y[QVmy[qVi%z[Viz[Vm%{[ўVk{[Vk%|[VƷk|[1Vηk%}[QVַk}[qVއm%XVm~ZVm%ZVm[V%p{^p{1^%q{Q^q{q^%r{^&r{^.%s{^6s{^>%t{^Ft{1)[J)-q |%*F/R;倞EцkIqp M}<^1#I8BGay"bK8 Gw$9"CGD)"X)D˻8?ZLH2tqJx$,є"rT^8/ m ?Y1Dl!zdi#KYb1Og VP 2S"րV~z{LOlXyߍp-M4/6{6Gz0TW-KdglGPDцNbE!ŹJm%|ڋpF𵈳B'8"EDnK}FS [ //XyQVL_x|k_G&f@XU+0b4w`qa*׳ÅhaXsLcE "sL:V^_`aBOFRZG1Q6# 4UBqvbmFP?L9&7|Vl>HӬ}Xs[2N>nI45jꋍ1Ws=p'¤B"+s=:Bu5T_W9 5+V {u󹯫ᒦ,,l k03X'SNjSx{#E*ffyϳ/xOfQ2 [ͧTNf4V9ɱ!\͖2uC$YKb$۰'9Q#Z95,|7p})Zeq !ze9BX@5E+9*[vȤz`ً#ooB"k2__k%\%M%\Gs.-ѣC4LN]Gu.إ34jQvi^]ݡɻ|^Cxi䥹҉^Kzh[/]죩_C|i󥙾֗Vy_C_~i]?_'uzp-'׹sjqi\'yqZr<'-9uJs\'?'uz_']e5p\=e2\e%qP\en\arٌ<%er٪\,e\4}a}s<;]es]C=emt"]Jюrzm. ~겮zlˬnzl.^&{l.KNb{l. >;l.˻.ڻlk{c I](}U!lߌ= ݋Xpw-={R~V:(+!U`'ʚF:B(-G.)Gvޯ}`mwMWQy c>cf9yՕ_L_['#ؔL=k%ү,@=31e=1t=iZ(?,a ^\C ( N9Ibr#fĭP"uf Et' my.7 T#dTĭ*5%LHS_.)@d`GY|%ޠn"Fc6V{'n[tSiDqWCLU_Yo)ʎn1bci._JFHwkJSiG%T^Ygefef+0ʽ$@I(;z{ʍ)/(иC'{w$qL"N)&.%ɴr#qމ8~-i'Ib.T 'd,V5ACU+4'h U,ݨ)n pOȺ8ne\D E*kc߯ fqj>}J18q++pl3!Mb"bd [vmgҖ%ﯭ;IH<dpFo NM *bFg"S8/ ;̈́tiL "b\T!J}jĦadEmYl!iS^-$> QZє4a04XZeGjb:q?ax8a4̊~T4r)$l|aGˆ;;eWkw˝+-Z[f,1Qa6uu DtA}35 1+ihBwN'o!\w?00jey{˲DlHImj(D:*pgYۓ 勣 wrߥ4bb~VԜ}X6|}#X8ѫZ>_q) lĚs$mv0@eyA,]#CUFi~0 w@HX+#5\࠲ 3LɟaGv^*舿[߆%qo߁{-. m#6/6h?)Jp'.Ml:_h }/V^n,A6Ѩ[ Ǵrz鼣쭣p3'{`/ 2hJHJǺ7[pDZ#CSp>xamvFxWR)u#r\G򡳩{cQI*.pF@ګ<*_ⅬTgTF9R,`yTpn\Ȅ:Vx]f3Bۖly ~*{ [J;V֯w$1ѩe:_Q\A[R_ ֍Ͽ fQ6]AEkUˢ=];g\h83P9NJ gFz:*_Qy.j[ }>~TI_GL뾨_ ]M4i3zEѫwT>r qExګ<*󷭇agGnB o\_\$sPH߽ԪCW72u$"yeY&nenm-jAYp_txrGЍ@ Xe(R _oE(V%>Os>2ڶq`-c(n{U~K EWq!b}V]/f?}r~)S̘ %#Rm/g2*<*[~^L!a4wvb؜scXw~4_ (?^YN#tTʥ5IuF,͟NM#~ß c_T9oވ=L8R~qPˌ{lݧkGa Yc 93[p^GX`V/1Hd3- jA4APpe6}L".}XʁU SxUWaN틟Y,J\ -O(-вa svխ`W*A$*L194V/po_o/xٜ$Z9? Ҧ,Kïg4sxFP4,L{U>B>l wh;vPw1U}s+wbCVu2h.bGe5U/CU-ą30[ aT!_p5-hN`( i] /4D Sl9rR`>upyp08|.,|\X[>nPfu>m$h3Tn0뱬appD6>&>#h/!>sNxͨQ^ +yȮ6Tke"p03+Rt憝 BB$\ _T+iV>greN+?  iW /.?oAl0#``0D'7~34EcEu`U}H;MP0t01K:N+˶[-mdĐ #[wGCdySoG Q,8auB#6F"#X?L`$吂F2bv!&D 1n-`h~J-ͥڪ={0CG;Za?(ۏ[ endstream endobj 95 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 100 0 obj << /Length 718 /Filter /FlateDecode >> stream xڭVn0 }WdlR-˲a` v^vyp41YߏZ#G$VF |ds*҉ȣz53ZCZ082v> |H&LJo|B4&d3&=x>2* FȬu̵L#:V-@Y59E*P j_9TCTN] ݑg]l9ၖy&jTQ-9HXP~ߍ3{:BY54tńf1 ?:2\Yy@ѹuhx#yO'Lp{?(Ask "+\5r[ endstream endobj 80 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-012.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 101 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 102 0 R/F3 103 0 R>> /ExtGState << >>/ColorSpace << /sRGB 104 0 R >>>> /Length 13976 /Filter /FlateDecode >> stream x]K%q.m JXJ 1Y;Ή̺=Z'e ^xڥY8_yן_/~/^#|^_?_X>_>g׿^~5}.W7?/gk?_ߟ?"_=ȯ^?Ky_}|ζcyY:gͯQ?k<>Gzg xXbεP!b/J so9asaJX;vi?s )9}g? x6fgGya{ <ۆFۋ5Io/>32׷!ٞmBڮ$cۢƿ x6hm~iTe;>' ^Nf>vGOT<?_^ ?jӶ_׭yU1߹tlE'5n,2'~j;'aw.A%/1[}yYLR>So3Iou>d]]xJ$,w]ľJvsug _\Ww(4v_s;1Avm#OSc߻`C,qS[K١qm]שɵt7ygu \P/6I`Ëf0cy1_܌Y}i3tfWٗĸ~\ȶ^g/oxa_k:.Xc =KO]2i–Xk@[۷-oiNe }dK\:l' C.s-o͘Kl]g3Nٗai뒸h}~3Wx]V)WmoR6Ә"eLQ,$`b `ldy7E[1+Aha( wcO#4y!Έj/MLTeB@kߘq*%ur +k/K$QO \NNj*e!2ݑb_4L^Og[|[4C;S }-Sk{"~)T'd )Trb…HM}0`@o _~vKwT(LhƇU~C١ŠS=Y$YȋI mW_ڠol apF2&_U榙Nj\H|O0DQu)Ċq)^p;cnVECN4Ӎ̷IIB)k{Ğaaa*/$Ou̅3c%0i72:HD *_a_d9Z5 Nݗ(Y# .l5 E<@!T3@$+QۘA^mȮjS=!tBȰ{bU0x5_dc*epZ/uc3!d-"_uW320U;Aw?wrkynAf`^&:+d=fW $ri0ʔ9!mW1MAWu3.iM5gF^$M>rD Rpyˉ4bT=NkrF\Ok@YНawu*3;Ž$iin}痝mƗW4mx(I2s3恪JILvSFFHLYɁE3ceWf=8H褔YAJ] or3Aӓ-H^nzuliRk .=1ۙƲ.;|HtW~䃑]=&7h&9ieDctz94fˁƀ,R ^D(kRSSF +TldhGuBC4Dߩ"Oi Dɍ*5#"M9*|f(PrB$nꫜcj=PcAp3UJ'(uݪBdz<%3]LC@^BsQ½3bpKgi|vb. fjfRB->!\(U|o^W"KwʅjîWs55c#ד y*v9_:PdT99H hUܩ^e#ԒhLT]Gߙ֭|S=HJO=Ɉ<|or'1w$JDFE@fCV\(UY;,rF )의EεV;DT 7>Rs;]JeVZgZY>LlUӈ 0?eLѸwn')zAw"2t"9Yl7BIt?Qe&j? G'ԏUiZ,N2.!3Tަuȇe=ިJNhеxβ]f2.7Z` %Yƭ@ZU]gqeU?=J5VQ%Ֆ"lh_Wccbс`:K_2OEd!NԮgrtdzE‰4bkEW5 7Y[&Rr+.!wWոr^CI8)@j){C2%k&X`ꔍq']̗HCUQQEj]=8X1ّN sjY%%nFW"[Y)f닪1zYL٭Tɥ0qSrPKOIQ3:nO#UZMGd9YQrR#ȹj[ 24ē9phUocx 7;tz2Ye6jg7Ʀ%zp1B9QbxlW9"U*F1"|9C'ƣCG9|H{ 8 3/|F**1S]ųb&[sC0v2ml@-;V2Qib# }L:go,Zg`}gٯGt,|Ȭ n?i<bs.<$o@L!pc37d7 x䕭@]4Y|68PnD^Ie/W}F\OyAe; /e( [.,l?My{?=GFX} ^~~4tB|_|/37Վ˵$1u> ^}~ 3: lxG#'%+]˛%?StͿ)f^#uR uR`}uRV%~}~{oCr@>[R~{?ϽyȈ"u}}5m5ϵG~:bmsL_jC-~cbq)/ne}3s뭜$ =h ;_n ϯI!tްn7˅MRc)k=8]\\j}Òeޚ%'m1uKٵ[#Y2z8߇f6,hidKl\i3R]}f̽fXjs_UZwߺGp" f Bٮ4s i.9H 7V5Ue@G3!v'p6ȍTgpT^:Z[>Wt*thWø M$уŠANCM[AoH] O"i(oPMj!2ʠp z8ieΈ\cۤ)MJSh֐TN}Uiz,W]N$dF!$^8[؉ӽ5Ԝ̃}9w/[Ngf83.junR5S*N E轩@zxW]PSɑA@< <4.`0|_@s_3xw]%q74~"U9I*MٯzyrIŚRD-gћ*,[kX4JD\%1Hj VoBFFLG,W"R@J2ժg D ,;}62(ɑu+;JɈGh4XĔF?+a8TUFp(')w MWAY*G E@ 1`0:RdCF*ը1Gq+@e`*qĥQwn~ 6d+.<1KԍLu>:Sح[AU&|ףcb r"Sej/̜u,㠱mf==Ra9^YтB5|k2&)3mYaT{qmC0-۾Uf~]tf&ּU*b(SNzފ|9x&kEn iF-rWz|%ۿca 4Zj&˗.کC@MwB 6bw熾U^kΎk.n8+(VT\XQ@7'W zr:ӱ- Y@RyEFT#wAF?pJ9\ś#:]#Nfb`; tUۈMFiˉEGBMq3kY8 VR`Z`t;frda-7ɽux)͒ol|kv#ngަ=_Xb$iz=YcfS&%ԛ%t+<3T6z[~UYN|sf޶#܊hH :ZL%dV2Z4,o>k7Gw}ΑW֮+IVsv6:l*7ڕ0~d{u7 q&|*#z`RksiG~V9;w ENg47ʦHORvBg"x=S32R٧Cp.e\&}$j\'`u^ݓLE AsW5ŗ39HA&(\5[>= ur0H¹ʁOB"b n؈}9x&h uo(cf3 _U7G>WLQ8r YB=2wB6jp~PSVAʉeY6 0wnF5OQ QJ.f]rbq,J.o+= ۤe~adғ+NM@מ{}(@2IEC aE%snҕwYj` T9LZQj暋#c"%vHñU5Wj@sno^eBcM's"`sBe  |eTF ^TX"e.eno-Nv";<|:}&\@^:?E:a Us>7S\dW4;ҺQ6|3DukUUԭl].o)͑d& ?20C2 C^0˭x-[s0r!c]&o(۔rѝI~ڬ =I|y׭ܳRXOJr;@&>Q霙l\Q4ox5`=xyzrX|#ڃPU@Hw5 'ҙ jLƐUuRϿ!3\"u;ܪQ4#|Pl~u_=;. e9셱U.CaNa]_yܫZ(yXNTF.* o:I31UݼlzDӳgEvW?.>15*]m׃%e'=?Cvd閩},GuϏr\2u=;h<,\v*5=* ^EFB3hra0/ۙo6w>4WI6CD dṭYBäDp`A8B3q<~9܏U`)MKE9X<@yѼrШ 9рڔ ֐rI#t=hy`DÑt9Su>PͩAɀ1yձ\%& `0Xu&1S &5ć_gҹ rpar0%0 `H z="B٧Uk*ӖM0Uw+>1BV-uPG>Oa[`T쿖P58ZIۘj GZ d$GzЋA$BEs4䨴͗]=խ@sr(4jO(íQa1> C6ax8G&-<[U.HXK9~nld6n5{)Hr-W3ldՌᏅF2u.q-&eҷ$#r:pF R88(ǭEUmw:UM:'eZ;Pnm]L͓}/\fG,%8;í#Z6jz j- -[-]>{DP8ټ,l}"ʄ͙:h M: d]prT)rēb&ـ4IN,5E>6d wT==M0(>[5]ÑҌwJ.+|&PPOL^1Yrz'1Cz k %da#1pRR%o2GrfGpG='2@ ]/,C9V@>inEYI<+>vʣ<86<ƬOŋ8f9F[G5#RB9$؜$Na=9ټADc cWQ`BuX~YtDH plsz1bH)q07*qD "Q7G@v+ sI .x[N8c,tsE'otGu] kh=,}hLu6eԨȐLrc$s*9IC H0>N?rI2*{^yoaq,wQD- Q4u}WHމ)zqZPN2ZGB9 zBj*7PjFG5و3iV,PV}P6ȰQVCco6LC=AH~A*3)~$ÁǷ62 h:IFϝD2pY?Ac3^MYCЋd#wr0ԁ"bUɀ(oCO1|B<(fVylF&u`T_! }MGBguay0[ZӝkC1%iƯrJc)䡃3Xy0]9iӈJ'4* eLy!k@z  `4r(a6=Xb~zh?3 GfM" ߋg( |^=3 ʠ|ϡ> XA,q,?`y@Π+"q4zy&R#  Pփ>95Mov0">o ySYkJ9ځ@5GĝR/M6ܬ).SJ{[գG3$/Os>DB˟<)*X4=ђAS~?+'#8{D=UtUdP篜ÂSQјtNQ&HMlЫSJ[3D9x1iLÐ7zS^ǓZD̾1 [)O3$k)PКntJd ^;ʢ"5͚;ùF);f/@ o'e]:xJu ,J`83ǐr$Y?o/#bN;ʾ~Pi45R7ey;95bא#nd H2'vd}Dd_TK2-zkf/ztv\S֟%`Q|[aV '#+a14+n<(Ϫz:nʷ& S=;(e;~8^-^冋4c牪F]э|o#9է RH&5ud%{뵆8r0u1~x}m052%5.5OBΖcOpcY =&ٳ0 RJYy{J & X}R:meVM˻*X'Ğz.dO^OopTD9~^0U* ҹ,vO\g;iH,I7⠇L!)e4ɔeu乊vaԥ1moQhH 9{&10 'OMe)J:L) %~J&en2=Bl_رtVmQBSDC>W~Vs%]gȓ&Tj7v&SL f&!xY8r=4IxKҀ2uY|mv>Zv2ZvʎN ]Da6SC4ÞID'l:<``u]S|?#h*G;8hr  5;IQ]֑g%b  C1WLv}Q_yk䘋vbyV'Z͸%96*Ƴ> 9Rt;T0C,(a*jGTVL&MYΛ^/3h0/AW@;m*:WNq8 L&=邮|bMɚabX?%&[RQHmMOs]k.kӐPy-X#d*϶E)'BȳD2^aW, z&/3O]0&2utљD>$z%tf FNuo;VXb*a+ϯIx[p@[SBC Y6@堮(֔k2;t8rPgY1L>|Fގ t֛h=>2ԣ"IM>l͈tC۔@d:ٚ f{Wz8h/&]&ViO"8`"+jBfIXlzf6ne s:T `FyVnwg{OR,+>ec]4}lz4Ӂ591^?0&4m*§L+aYβԮ,J'yC1h@?'Zi S!^ *CxߎfnjY|Gr'}J?J 6xk(-_}#(ԛxVoUy7O6^"boXN_W'GT_[{OQUs~o ~u⏊?5S5\UE0Ht_M_7_]O_#u?jЙT>3zoF}>W9.8 #dؿk{޻]gX^v-89OnFspsM!᛭XK満.Ly~l坥r>t/km*R\`ݒnI[zKwYriZ#P۟dހ%,7o Ե[ޮ_x˒Ж0Gd> 6;,Me)Kٌfå..]Z.._ZKK^di--{ݲ-Zzҟk?I4~nS%w1|Y%-oޯwx.ݒFfqhOn-i#-?%u G<>9T#C?Uܓ&_u> WuPx>O7 ￵9hsp99脍Etψӹd玴6R{*ϸ%kcq-i,{-iamf>PP(귮6a*oSsM|_a"c| 0~ u4\xF%͜8{/qI78ҽl<eoolAOj8Q}(+yz`p[ cAOҟNN?F}ЃTM8bҍ9\ CDe5]xFi'uk䅃Nxns}kpcmswPڢ⠔t N p0eit"ImZdIZL~oM^sٲ߮.;f8K +{#0?)>ݟ+q!1WL%3;6We n꡻ ID`U)|dOq:}ґT7?"uلUOH Ap1M #gUMI݂ tDW'3Ui'1\IF䡪4d摒уx .Rě) B>pzÉQA=}Hu)q2# )rRf_c!#ˆ^Dl=l7'AE? #(rBBz,%E5Rځ6Vx=b+ʈ mNNz͋z#j- {KYc[p#_UEb·X;}KxH&9Y~i%m9  0[}UAE>Mc-zO,1nIysT9\1^<@KxI*vb֣\0(~c3Ūh2~Uj,&L>\O@& OMJxnEYxNޢIkn\M1] 7%5ȍA {]Dҩz8my%>!0?_CJNO}c.qbPrM=;!7U#=U!Ejh' >USa] >!=ޓHDθ F0Oh.&EO0J"vtE&:uE[QeSGxI=p 's ʣu^dHS$9ľoy/9z?!D~®MX`D"]7G9#:mxK/nR#3z ٤Z0瑞ѷ) Ȝ7gsD]!Qb03=)p\}74ܓVfuU;|+4uΛ<),$&fˡ![kzc rZ#-Ǩ?ԍ> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 96 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-014.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 107 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text /ImageC ] /Font << /F1 108 0 R/F2 109 0 R/F3 110 0 R>> /XObject << /Im0 111 0 R /Im1 112 0 R >>/ExtGState << >>/ColorSpace << /sRGB 113 0 R >>>> /Length 25412 /Filter /FlateDecode >> stream xKgRS! h2-a,0}gDc_lkZUY_U~?/__/G)_?AO>Qר_|}oL#ߣ_W-__~/~gOOPV|?Rc<_=lcmc?//VOX=//c_#YR=ӿ׏zSůy?|Gô̯^6[ҽģjk лpbƙ 'fo# ч;?&z0{#+7ĺa-lX㐥1{ec82{R~X{~Dv]/=$V&?~H sqw/G*SzӮ? pG[_-|%[Y~V -?l75-F*V.>16zp ]xl\q^y9SZ8rm];mduCs υfjB^޽Ļ^aWܾ%z5sԩ%×,YS0ua5'u; ሇO"bliþݐO:R(S֡]ӆKB]!s /Y{ Ips}V ~q1[ǑłvbE8&"&'|gZw\#BG$}]q3v[kj\¸+$Zk; [wPzצMppR:~kũjQ0*`+GzŴҝ*S|iW9r`궥aس5|6H0Uw{ې=_ãc4|Vo$V=|W R&6\`\VUK6gdҥ#aL5֯ϡw>1r/={RN mqΕh#Ɉ+_3Xf9a ;d+|  3!k3'n|m]ҢV_&daZRՅ_9\J]qV/kzm)H_KiNe +;D>a ƥʝ5BH˅#QPT5ZىtJuY# 9J^t5xvr+Lz@zD_ۅqIz8r"{]K{DH= 1,0a"+Eq4) ii H46^!`" :K0 .M *>]uLbU5tԌLDDLGK+P{8fS TWiٯ]~pg7珊7eq!T Ka;D]Wm|@+QHi%nBʫْc8L+xdFSlS@~8rl^7Fc qE ?K$Q_#U"*&o+.UE3a2PJd '1=Id:e+IG 'F _ 5ط Nt]h. l2՜ gK[=ݯWf mVʕxyVh 6BD") K'a&gщ]tFF"fA zx#@5dNK;f?,A:Z8md.[5dەzH#=>{Y0P$_3çyڴ=!dTƻ, xnN̆N2>25ÅMK䖵D 5t⚉ t`UIhk VKi**uC&myRcZ>Kbμ`xGǪ=D) 7fi8(/Y7;aRH}o@ Z:'9`3MX]\]z$"my;7NWO\lwtwdn&mP, ts$:!x+iVfۘ>nVwGn'Ms6Bi.`XP Ws0޽'u.Ć"f uB!(Uvw]2Ez;XH բJZRqo Zq_/+ IV/c'?no%п(d7/Bac.{P"YJ Ne>|1`l@E+/p|8kSʇKZE~E= o̻ق$ܨ*FĨđAIj <حVVz'CR0[MAfקMdmD";rUA_1zduȧ^\:3zxsDzwuEz?'a)?P2;k: 2$rf|d+9%*]mG>r<òXeehC:#ɨ_,֤-wci՚3}s~ĩ5$;yWT9SaW?,@m<\8:"b?7=DTE~5 #s%uV X1K'NE ߢB^|wq5mMtT!<:6ƁU=&:}{hwWݎ'hxID*ervG:~6.JiޕZm`4r ͮl ]y9\cuJ H.&!04F*rv2kFnMnG??v*Aknl)L/VҗEWVT}3ȔhH[tozX:JI%aQĻǚ T`lsMR+ (OA=XOkP,d !g4N%^[ RGQD Q'-$G]j,Yo % at?@ жOtCF wRY?LEhuMg@Pڴa}2%Ap >Ǣe;V9_RQS9M'&ŕE85{Y1Z 9pʮRIBWgf\^o&Ƹ40mY|uΜI:맺\Cb6]zH-z3~9MN(MGW|~9TV A[Zh4?Gׅތ|<-o×xPbvfkL:x'л BDW)p"CAsC-U9mɃT!Y]FG4l~Ddm| ן#bXVAfž9D|3LzYi?x]CX hò^Ge ˳_֊4ͬ`;^e*&#ţf|,d4',Ɨ0MU'Ūw-]-l2_ T(&oZq tjwvN 4lZ:]AU ˨Gtk=( m(jpEOSo͉-CwsӢhY m=QI ڡ[ybRv%4z*5dPeEw;W.ɁVFr+Έ̧k;nhfe g a Tf3'u#EOzg͸{3cRߜ9LhoV[6 ӓ1i?%ϫ[T~%9fj4*j&0^u2`{h˧c ›k R;p\[͑gÓN6#]UNŽᰃ]5БPF¾n:;Ɂ,ji{مaf!F J_WQ, ( o J ّq['9Y@={Mb\>,Yܥ7 8{<+[wtyDbqYAEzqB͢qb'OjNrEafq ö(toCtp/.hۗbZ7;C [xx1zOn Q[lR.;fg.`!j U4feeW2oĚՌ*trI$@/S lLl㢸#Q h#˸e4z4vu#ndnMJ㖧.Np7R)Bv^CʳhTY6ݩ3HYuV$k|R!q]lPprw1C%1aLH}BFd8$!(`ꣳ:4੹G#2^n+dߚ|sW>WknPbfڦ ]ih5[0^S_ eڦy&kk'w,{υ,QUIQ\#ukt"/!=suc0fwu E&iӽ50ޅglWA5Gʕ/2 s䤳->4KyŶU z07 >}KG߄bnpq}E1\%0oDüޕLYs0 |6=VҦ!>gZZEؾs8/^=W`_^ՍxfC -\&p'af~^!Ea&Y 0KaN'0.4ܣr, vI֯ >F %ookIUX8MRkY5G߯oxts6\8A5\_ U:LԚ) BUS`MuMJ]ߝZuwI6i5!bzMͱ 8D(z8*,GOd֫Q\OГD}.O@?uרּ8ߊ \+N`D'}gy1QOTO9bXb'N5Q'c2>tLQJJ0-.٧0l}oI?2LZ_({_ARM5wX?nj@-b$>$KD2=S)W&8FD.U7_#5&Յ<ྍϐNzKDoBhPllA ZZذ>MWV_ ڭwu/(_6_HY/w+5#\kb۴%11) f'q2uQ0RkSAvVv5Og'n2@Pꙻ9`K33J[Cin0ꩯ9C/j'zP${[z ;N l|{Gz`:@mԏ>k wU ?t/:VκHM;NHؓ-3BQb%={AȈK1h |=re}:GWF-P7@*31F7=(Fễ0=F6sffytؒpcCbaj[6 7 賥'\uU7 S÷:z6(L 'pؔvBBԢ4MOXy4J|OWܿz;$b\y "}hНj[ !1y}Lk"mjrPě]afxo KkEJ=l5(^+߇s]WOSPV^zzpO]&;QɗGzx72u!cJ+-Uu=f43Kסh;+Ow5;JȦ^V=s!WV?N Juy8Fb\CKJ$9`(ܡ\uniVF?Af~$r"#W\5 vPnI@'|wuo.SA=rJUj=J ~t3%ئmBu-A(nu.[JrڳL5No7^Z*pA[Lu\Em&80m9uL̡r} &%Zt$d1= xuA9(%Ɩ*mC$P1BA -JL,wWztɅړim(C )78+>a+(v%-['BJސq3Ng-,T\j]8 dLgb \8Jiq w2<MOr2*e$7WJN' 6jm{ߑD=ޝ U)U_Y[ |1pv,_\hA\0=JVv 1'e:H;O"bܲ-[GbKV$H\b>RgVAL[x?P,jdTQܹ>."?ԯDthtSCc~$WɎj~ŁE}`LKPēqmx,`NCʚvrAGV+NK.7M̌(,3ˍ =Z) Ti5R-V#,iU`T=|+wi;/֑Mv7mST R^IÑ:M(:@S* W^. ;-6nSnX_rPA6n,d%uCC`׎>` 1g\6mOglM:ȇ٧Qחf"2,Ѣd\zDB-TTG>|a4j?:S![éN(b&X^eꀗʂz0F.QX!f5|3#3- x6ROx2ZOXbDB;3(.#{3a,[&zAL!kVBmK:mbAWS RҦK3E~2xE u#QP1;r,R,tue@rw6ȰH G /`WZ: `X}3w=Ic8Tw]s39ą|45;j8B>Ozwj*Y'$oyFL[B=ӳ7Q2ijC̜J'] ֨i"'"5mTu%hy<*|_7dl#Z%^] "Dؕ?4*$IrM P_8 2sN:wX1qH}w+27+ZmcxEѱM WwVw7 [j w ᨦѝ0LU~'Dr8!#h;礖K"dgdAҚP}?pp L\*2j!OK9'],t`M')T ]eJ5]EjI~~$5hղ3dBcԙ@iSݥr {(ȪXe̍fp5z.]#\L - %\cVfZZ!f6ܪ& JbqBC ^`80ٛj5DwgjbDOLx~r86+b,dJ5lȷ!ISS5zdO5\kk"n$#lPJ|m={8kkNrbF\=QW;I71f. [֔ըp7,nJ7T.N;bRKQjHK!+ g~+O7%4]阙*719fD|m#$e6NZN8hi9 pd6ʉopB F,ʹ`:˿>\:'8vfZ$ JUNdz?3\r wimFc{8?=MZ䰶 y?&i5) BLsg=C^YpJ2u]I6=oɈ?[*=~wU^f-]#Dh:}h8`OY W|K{e+I`iCv3Ɂ_k֛TGRo*d]4~δ5\'N"pC!מ6T;ԙwI=FQH\]E.[|zfGj:x%Qt %Q"$EAhI^oV;PoY:bv(&`d>~!==RܟgRf62)ajJNVr0ჵXd5[\ORQji#иz`cưz}H)5uvHIAbVq\NEQF'5zYAa*Ju\M"?|x3]Xn~]֟P)hZXl}[/.BRbychI0iPxq/ru'weGU\ WrFwy4*۲>ӧij>f lyu`m;dȣ挾Y @*N.aT~5\1e]20GF!CZ*Mٻ#P}u 33kZn.:6b{F 51ԌEW\K52^V>~X;ς.pWYi 3LaD>p"QaRmFOvI{e$q%m=p݃brto;C|7m-p<|&NbQ_]!dI}Lŏk& vRÇO DHĻZ+ a;Ywcܱ>atѿn Q]?3ruwS9gz w\qtK׫k"2'@]_g=!4 hL'NA`*uX(λ-m ]Q]xdfhn&A=R pbL75Si x}m2=%+|x5N=nlhg$4D ?'nkԧu&&Ka\ϛV$JQ9[Wxk4b\j K5QےL?WdzvJ.I)E:Qu-J.pLD#TN㰚muWKb4\_2h͇ÔNjjaG_B& j<"m?K>U@RMmG.bz#tzԮĘH߽vQ-@b h嫛l4GWC5#PN .4j?q+JkT:PᲚ\hy7],NłҸޝEB†[XQwݛX-4jZƣCK9VF$ahI J8R DPjA1e"2E( l5%nlBnz(Dn" Qz3M*\=Ñk4M&%LhЌo)I[Kp`v]:Jݢ=wPbY4^)}tdOSgJ!!8JnZ`|1bzUXZ+ғ?\;5{WGcb a}]NXGk, b^3{-+OTzдhng([[On`i Y-0*#gIb:/Bi\$6). zb.>4&2=ړtG `IQQf^EZ`` ?,N#*\}EXD1M#F8T1WHɎ'Yxu0nVFjg0e!Vo(9/ b$n61MM=,<X ]uz=#-ʕbJ9q;V(GWLHgP42悼ܨ*l",EKIbb aۉ5y xSpT* WIOS ,Ul#@ ѮE#=o~g 1* -7^Sk4LaSj Gnp5S #^m-6oQ+vg;KY5[Jy~,GuS 0Q!)%䧦ĶK.XL\uǹD'{T ܶE0b٩C( +cԟk.:ÚsZK'*䶻_{;[ҭ-qp%P=;j1¾m$byv\oFഒJ|aBr%{(b^wjNrGn#ĝ—uG ];5;wC[K :$|<2I@VWa&N֠! #puiPayJUu"F+;u&$cZp|̶_IŅԞ';T`4r&J gL$۠e;Qۯ5oG!c)қ양%h̟P8I~?ϳ'+vz$bE!ꤟN@(Ofĉ}TQ~1W IĪI,A99%-Ȼ~rD^N=î)"yajG^NyU 'i;6Ъ(P!c[:<# ¬̄"z1Dbg)^oV0l0vfجRÎnVX e}úʔJD5mgȧЬ` $\[%ΤViEKofK \fb mq+M67{ut{[xo@d_40wq֐{>4%[A@vjx, lId[Ji ˯#~†|.MlKީ`4^QҲŠ?HԽlXĮ5gDIvd:][Q UĮkMZӢ߽m2&FisU= ._ҎՐ:7r]ʆBxa1ðo݂dFJ00Jp5s|K{~pIQ&e787KdmȔws@rKd͓ #CsJ2JovCoWLq\~y{&&M?h-l ,$FFE'&Ph_:(k1 .\ :rcoNڱONn6n}U?6 m iŎL:wa? Ћ&naXb&$a1]WYL~YJT?j=Uیi"RW)Tea#?8g )L7(Ny,oba6:u*AtL_4> /,pc&UܨB&2iUۓvؤ{ KZk'|b@P;fMrs6Si`_݌#e҄y䋹LI,οw)73q  gfS'V&< $|8V͐dxRK7FCnU8w양kNb~`gZ 5_s٩ߣI&rѩޅ rmQ5yِ;"[oNjLGO~]nL5N*,HFn$?OU^KO; 'K7`&fͨB?_P,X54&YIN]ޠwu;#2_X/I9a *%=e~*,$CZU`MZ#'Ԧ IpcKx˺ {[c0ctr&y-Qe 8Est fa; |8oO2V`&,UYc:Abc̈́wvߊ)jh(k*ֈ%!W3L:K6,kNG"d`&7FZv"%EӳSFQ~aUg i(.$7oEr"c5Gr ah^'Pc'Ğ`>Qa^NT#["wvP^e&Vbg1CVGZ22TIKqX3Q/O'A.R$lfdPȞ#LC32\(a oY^pC.d4[t(κ_[=P=:m%Hh6 `7u2 ju籠)l2s3CVĤ.H(1V W$L<4F׬!>sʨ/1Zٶ1Ҡn3(M0ˊEs/ \#u3knrX +̱u38fJmhȄr$)8m!ӱ[HH% wZjݎ':bI\ڐxʧD/gߛ@Ӱ*q81J"S+[;I:K'Vo&,,:oH(%:/>76>(%~jTv.$90qZ{ȟpȓ:Kb0JG &B;Y)ᅄڬFQ)JP~_ʋnuX~iv$O(Tc$I\-:P " T[m0u#җ$8e&|^ %i QO>7>ae'dBk~+ݶQ'JVE^geE絻 [ $>v&d=}w:1ƱzVXARQ)$ PN쨬%W?:2;  >]^b)6J`f2mz_%I5%sʗ$tt Is z\g˨S#TFڭ&zov[FIDBQew5 a+j"wZ2)|#@Uh (:ܭʔTfcvt.ӝWH١N1CVJ 鶓M#f[#};t6d- _jޫ()Ѡâ N"oDtqÐڅh {TFl)4R- K9vmZ$ axM~F=g^#*Q) Љīg7-/pEjםޔ,9z8ɰjՕL݉G>lB6vV싋yTԫ,Wl ^̺td\y".0P0pU~xӢ#S^ C&mu-y+HMZP7KW)hrh'܌*X+G"5hk"6WwzRXt[/+Xkg{8hyN!0Z40{¯ ?a-}83C۞#p9ă󈩲pi I*zx {LpksTAIeBH[4zna &ٳOvIzJ[G~U[5{Z3_@{8~@-KU bQ/$t^b"Ǩt3nZ؉2ߝdA/A}~{~!nұW.>}wUw;MD 0ðaǰL{w3 oAz[T̡+ײO;:2bO|vѦMLԱn"4&t'ehO44O' oN%/KK((n95R4IbH뿇S$OF5L'-(ŒMmFg'P=|+e3L!W߽g5-V / ϻd ӝ>ɕGzd~]s!Y)WՓ'wbT2 AcZR4yweC{ΕNўD̎m`AHPЎNTᕜz]Dj-ZaKab++8%^ܫJu37#ݺӾȒ^6S b7;EI1qLAPtUƉ"^E¬F(2ٷ 6CimfQcF@2Pk$JvC\e}~֒Ay+vIk^XqЃɍ\bB^k>a4n\تRvmXͨw |H<8m4:|;:!M%2%?9'`LĤk[zA`:.5> /bQȢ4pVߦZGtF$4 ҵ4`#2@Br,]5$~'t2fU u'h_Qioc~SIryLw׈}1o*żI A[5LX7+TfLw[CU[;v!DrH=xY'-cb\2W6/R4RqI O9?Hl^N~eNa$4Y"+G$c7ćDg4ٶxƷ Aq)>cq,Ա1 1bc%05:%ki.Bd+&ahV"ֻOa/>JQAzJ-Fs>1ȏ[2Km8U! 2yD4TNr\gN$5Յ.q.|`Y*hO$_&@DP(b7.O'FцyU7 fq}0'6% zfJBj;˕YQ$IcmRavYA%5Βd!ќbov3n}7WE-Ww u5k;|èg5['hWdѤf78D]r'^hʴ7%j Z*[ed}U g3@m$^pc7[2|Ԉ kӀfUeRbEJo*YLt5H<$1 YTzN4؎|ij!MQ\%c >% =YvVnd_eS if,D~[S#bv)S?+ŕ'4Vl ""&ә\IZw0דSaz}w8 MُE#+3hq~bxms\ތ~:䗡,[3{9ݘfXYeNH- 2‡׉$U.fgEI?q^H]ϸ&]Ă?҃w[x&]LL#_JǬdٯ1rL;wEyN:/bL2s^ ۺ"HaV?Hdnih=[3,g%ָ&fSɓ}E|Db3dR0" ͪnIo1%3d& ҫZCbImX҄ dXV8,s_-uV2JbTwXW&"`9MIThې zgXM`] 3i*YbLK0/5/:B|Kl]eȊti^<&;M_7%&ljYd?^VՇ0E'iYrV˴O*Ú!{[D nP/9l;B (zzҒTUR.8ˡmTp{a3A/<·$Euʩ R G44ImΤf*QLΌOl5Bua"א/i!] S y%Zj͈cXDXkS rfKbY:N(o;q1bZ)ҋ #Ǥ$Vi["6%F:*38aYU"soFtm%vl1INՅ? ړRL+P\`+ϼ䧢ƮD=nR#4_fnM1|D@B-R4!L$1.ʺ/~Iy.#2ND XWu@^zf* &5|N.c[liP".8 k2$S_< Ht \XP+()3UH<:brypvv$apa JU@ƴdlLw.uvPb[In2 aI5#xY+W~T.$"iwEEٶ1^x7_&$Zfxϓvr+3[BSE֌CjdF$R֨rJ XF*$v$pu]Jf^01"߻щnɋh鬴HӥRno8EפI+'n>’VJc$ny/fjL ![aq2Ky$r3T'E9KAX.ɛ_]l}3*,pď&gIeɻIq.SÑGI2xw$^R.>&^#_P+)(ۡ֨8wMI@Z0 ~Gr?GZ@jWIp#1ḟn-Bl) InKi'7Fk-[B:K3f֜`A#~vy+F}թi=; V!bi6Th.̄%٠bZm0z*Ou+UG.\!t?~=?T Tҹ9VO+bg~cU\N"(|j%y2SMf.Fmw}c5f۴OJ<]Lr:kN5!Ere1I#L%3;lت{#+- SPkfaſ'Ng[]=HFJM"{)h*rpG@y9&`V,.ꮫXcմ4h}mi(kK)b0$azlzXnsݲrdd[İ>Z5RZiDA1~ɌT];L9MDIRK,.`=kpLWusѺhA,Z/Evr-OUd2,F`:ŒK>1Xfo01~  دPG="Ju'lDFD[RP A$oL#BЏ_cTْ?XGw7bIt uXgf*Yq* z7HAJJy^F*!V 9[Pn` &1XDYv, d M^E354#Vѓ'44 ,D} K1Oetf6e5Ŵ^cH^ʎ7z3A2,{Gl  :&@ǼDL\tTVn 'rh݆SwU@7yO:F+6)Ҹ0vH.MF;9$38MRy:SN,L4on6g[M>DVo;yQD]ɯ%D֧RHRK:KOƜ[{\&wr,AB:ANddCc AaH!dMċExp3*g8"D钖0Cq-BI=ы_HoRe|f fWnKb%frw݅MYsȟN k^29_#]LE%{Ѭc ̻4DS4WZHuKa )fzrZPD3z8X$þDPJ aT ,7ITvY_=<$"RI&CrRV/JAGtETtE}P&";Ia aܽR-N'CNP %{~Aj;軛쑗 Z:%ƉDDD"{ҷ;%Oocʤґ,MɕGlq'E-h0_Y"?bo„sb4ΌѭN`kChho;atTD4Etex8Rw(R7??1{:]6֝3sq:$"!%3 J BzSxG o=Z,ٓly\r7 `!1r?MAp&?ܭ:0#.&'ch-*,I:p.ƞ#iũ%5(}ix#1N{$zt,%bbV79DW>8uؐ[MۿjҨ9Q*Vi+R-.&.I4fQgC{ӱߦlLhkUzwk5 ܳR 5gW궥at.d/ ;E yv"9Cs&rY |;q)W:]éÆDauXчJEé1S-H*_.-&tKAM؊H ٫ȓXc&H'9C k D_y-ݰd:*f3eJkte+1gDl*jVʮuo$ԕ{JRq%]HlB+ڎ-<vp -~p|J0$2*3k)V}[j uhQ VnP:Xv}-ܢ/X&Sb|nƄ(2UAG JXEg uy`$EO$kҒ0ϵelbPb$+~mY(Cq}[}HeġߦvvgBjׅf蝪h RESFs/ /5zqmQ(&bn "oVN)^Sè(Nn%;HpK(mPϒ=eg&_#f?Uht-_ 43uk&@U^ ;K{"C#ȭLLrcE'@oc롋F#')¶,Ǵĸ.TWJkV5iPwOy,y֬WJ%u SYE>r)Qdrd ֓e#pza甃C$]RD{7{Ku0hĿ*%iAe57+ϏsH{JDKId!Zp&!HNȔ(Q/gM*8GO2Yqq#D-a;$I^TN+􍆋.R 1ݶ"W2-ud֡:I^[XxDP-hJvt׮ț&@"h8bhFbaN4b=I Qn,lMRc@\AQ#H MZ6&%j,2qΌqlLf`#yɱ̱ͬPFg$p\tD`#)LHEV&«N0Lv/ !bd"[N#:]:K*(p).j+@}BO~ٚDžغ=b`jq\ɥBXu &U5"e=p%1[u1rMx2RQaȹJ tfm#mV5^ɏ{/VC.WnY2yorLCqnǢboƵ^RO6S~]SVaݨVHFgƦyV ^.h>kpfxQA"='޶j&{2OaȦnDL1h9Aӷ1h1>=nd/6`{2!a 9t֙. Կv.SeɗܧFC6{1MD9;]{aԡ.Og@%?Fʼn¢*UD7K-jL#(zcy\*`$rAm38$LtӭF ES|0U) {-֯6ەQtWGmWKۭa=|6wp*iWd%IKęE\DmXvV hf%o}U4X &fJj:eGbQknbb^geGNHhu uҩtd4`& ڴp?V,%)k+SZYl^MٹT&:I#C5\!p;OJ endstream endobj 111 0 obj << /Type /XObject /Subtype /Image /Width 201 /Height 101 /ColorSpace 113 0 R /BitsPerComponent 8 /Length 16064 /Filter /FlateDecode >> stream x]UoiE?b;3&'azǑ@:`k72(1B1c!H ' /όRjTvߚ}g}NVzussZ{~zpOg~[_^p˚1wpޒ{]Ë|,/x2ߌ/.wX ovk S3sxr=k .6nUKOs'g{^~zpuM ,}b>z;7n:'ݼeMx'w nMVa-9C8κc?_ݿO\-t>?n nPhVp M]5' tq8c|' Gv@Ex\D>zwÉ̫͝[ht=kZh=k+K [ ]s/_~H3/ `\s$q53Xkܓ p}Lm3)f+_ ׌'}҇8`|qdA5)\9W$:HW {%\n%\U8ې^U)\?Iu88pW<V * d vrT"W%ZGgN:!NseW7w]sЮc>L\ ~ h8ނErG{z0ȟhlbtES\*W $CE?Gh&JtUG+PJ\`ƹQ Z EۓT%zhVWlnВu iT"Z%WB-wKKkK o $^]w^ t"$h?ߵ&ui唏bn94>!j|-$>vsE0|`%PsqR.CȘWҫM!߹"ZJSSd+qEڗj7X?HO2B]h~`<_h57½Na.^k<"9p`a/\2]ʍ% 1pAwGL&OhI|+:ϔ>G*\UXK;\*<RIBqLq5d=RCəS6X&:H?.q\Ēj0GgOM]ZЂMpׄ0 t2r8z"Hu`%<.kN[{kOd1ᘍ)'SHEV]UbEpݪM3SE8]\cXqx1~הM~4RJ 5O <&0$\'JG#+U<8Z8ZjԸ4@3Dr;1~-E8]p@dBk{E[Z.0tz` ;62sWu\wK-qEW\*< 6Q¥"n1>-x#¡jGCk/hQ^GC}r^\‖ ׁ$\p,~'%="E:ad!>v<[R"UZFR. D/xZxweEZ/bv qpxn Hr Y Э \ %W8M  —"Zt##jN،]Utq btsu, <ړf_q-D|Y^\\ L$k~] DA@z"cR,bU!Y?3  &F Bo!Ѐ|u> AN9>o,:`1>%WaJH_< >vs( ݨ"`=t-b?h+X0ʥ雙G/ݟ΍ B[ԸrUI $ \'I>xr<I"vp5R*`}Gk, % *!_8ѭr8;_3/) J h'hkuIR lz-#g^>>SS*2phChX? 7י=@ .'nx(xWBZ40v/@ibfh]ur <+xy95R#E⊳3.Ē.\*cDa 5LH8>Bpdp(\󰔣"%zpv 0K.}]Eo:~R%hcti W hCCW"ox` .Iv 'ݼ[<}ނ7|EIAdF79Z,P8d X F>ApGLCԝ+$ ?P-W-E(ͺvp2~𾜫cmwf ɣ3[Ez88g  NuƓ+@WnqBk \wprMs+~;]mk2.A7stL\8 @ ~JtZIpre q7&KC9=ewn唏_sf4g20PפnZ 2s4MܣH78S(1 :<ou e~SNb|Ct9Z; $\{g}\zʂ]lO~T]Wd Ejh5>nc,x~h(É]B-DN1UgpúW_6Pa5]rb徴|fD~Oq+<X1O=cﴆDxl"gKtM u( ţ3+ؔ.UUD~ G04\\4#.)08 ѧi2""]@ 6 3Zpq5 O9tL9hh_<2RXy6+7O!~GZRN˅-SpiXNlX9+vqEZTc+E<]qᗅ v`ړWÓoC+C)!SVh(\b`ծT ZI}Z֐hiV%9]tIʩct=IU.|0IJѢxĄvKytS8]T h]D E` սi= JPF\y(D}xD,qϓ."kQB%JDZ^R|n5j깈- ckQ#tF7 +_)XՓCe|Yqw"f-D5XuMէaB(TC0=%og`}IdLKEn+KRIS蒅ESr.]t=Z.E!ܙT2PNՓyJEŸ-G. тAT~% ^LG11\ͫ^ @xҪJjRBT,_.1FHJϣ03n&+,hKҏK.8a| @KyGmV]+ܰmhZFkQ8مC]hICH<CB:W!bYɅ`J6ѢAOLE>Mplb@+wHݪU99!,t"W 0E_cr1Qhׂ5<"R?IbYMph)\e)wO׾RN S(zWŔxp1-@Ejx=ˍ/㥡tO-h&J.j+^zʇLV.HQ=˙SŇ96Zy)c\UJ0~r PO|p&V{P༯*zk;>_ sM'=ECPv)Y;qᇆKb䊱,fp"7'k &DmbȢQVH -JE`9RQ?lE̽2 N UPXS6W5&w..^t1QJ h+b j,]kpR_ES#\.<2nD cc4.hɍ/=NiJ. 񎖧=(& < sERYe[ɘ{e4fi72GʜW9T+0NZ5~U g3hmhuWũahHdncDD k=S-Bڰgujj qy]hx) 0.G.aZc|11L&$.W?|ϊTR\VH]\zvx{FZAklF+-e ˨qڋo![!hzs%ڄ5mEJ CDRյ~(Y2T,3GN\ 9&$ՠ}T?QGtɥ]!z&}UBk=7|,[Ç->l1Z!1h\w&}](lK ;oڐ:B?fTk%3$UTNRSƤ`dqh#3%Ex' ==..ÖCDW>TLRy,E)4bbHkd?BZ!Y@1)];(3'-D},e-_!i)![և bad=}s,.zT;u=֪X*A X} ֩ghZjjuSfڜ]V@\͙Ŧ4jl Z)̙Yo;N ]O2&-lnrTS<ݳFi ZZ;`*WU_+4^A\ZyK杖\BlrB%/1+;ITPx(;-Lh1~gFUEkM;~zZQ ܧj!O>i*:nh+PӁ߾fkj96_ɍ+-{w6xo 9.\cZ8=x8s;dG֐e;tC(YU3ֵax `{ A;oj!/;ɜp'GB"ZUwgZ ;(Z&R:lO&&Ɂ徍֎vjݮ~==hybR4-OWޛђA\WBM \ W1_2jhs$A=U֔̔] u!>&]CxPǭeeWi!hiihb"M@Ku]຋K_^U:H# ^]m܁wҨ2_:f=h%D.*8%ZetGv[u#E'9pIy'zZxijڕ"Dt5^]yp}[2_Y1?x#pōRV "TXPR'jk4SBj-pyõx2=.ZTA4тfݞX]sZ@ \A ^ũdK,r8˪&Zzp nW@TTgnCR\J:涜${|ٰOinakP/q\jQ jֻ+xD s%ܸ/ٷW.Ւ0#߻+5%E~;ڻR^PbySg.zNr|[ȟ*Z*\e!lصӅfdR#wBRYZ!)\h+BxY9HօgoI&(S N H ؆'}E+7Ӯ@DzRV~ UnG>&hu WuZL,Ws7i(j߅uiwEn&? Ҵv nrjѶ9yﰉh]t֖,[[-KMK΍%t@mvsXW1֗*Wl !(F Չ h|yQm!˷ w(ѪVh=;ڝ[8q"^i__qŭ1IjfGsMZ2]wZp(sEQ;u$Vj~+D:W m"Vi=F_FXEW(jh+'v{pqᴡZ*dNhaxO|ؖcr4Cd>~q_uICkM^_{%|k P9`0E}q~{jn[ĕV*Yr^.>W+\pzY,]=^ZEPUCKCCY&m+( !ůgڢFpG8bf?%Όթ$K[^ӿ48gVXE.h/AXnpГ3AI[C|4N '[=pyz+}8(Y;7lOhIOA(5D/‗< lq>@ cg4^n Gesju8H2WK??!Wl'N#[- ˝ JNQ ׉dGƳA'\$ Ka9cx:s.;ڒ\Xh}58g+H1獻%T-i` N ܺuUE%ՒO }bӥ"VK *O|Xx8Á8,a)`|6ޝ:+pm/$Kh $&9R҂.#Ӭ F2C{VGKsvU-[TXa/qڶa(U֎p9]2U7d0>֖/ b8>g]5|{hdxܛ` YaA~Z9wLO'pp202sxv&'btuVmJdMTɕŵMYj_V U?#b\+jo1rW 2~)ݝ~2F-%Y( ;wO^u57 |B p<6* Ϩy_xz|$fn|-#G2=WJϪgF-T7X-&8J׫}|{XiZpqF^ܰT;gS‚C-Y韇0`"F<>iK9 q -U.|NwLa]H/l}[-(sΞHtzߞUimtǐIh}yq8O9Ę)4ގԎfk›Z4P9W ^5q[@DBDוC ׼q:AꂪG:h4OY}ʐSc V>+}&vC~paSv9CdgPp5=Kg߆w ۓp'pMłW-t2c! "ݭp)rD{bhdIvg,VCB/zO\vW>-&q ` YoX(KxZt~>rgr`jZDXR˯EVe8@kI/rЂWƒ*ߏ'u"\U{'Lx`D{_a?I2v.17D`6E~[7[!sw#)]p4=\1:ƹᄩ5DqbqVYFѕ0 uf#ӷ҆O<4)*cq5>NMkL &nZ7dất&rC{Lrם\->s E&b^nړӮD ݒ^/9ZBh1{Y8YsQ-** hɀV- wNcajK1& UL{(U|'yzH2[qi yz8R) ٧tgNQa \K5bh:QwjSj&WɹYyʢŦ PH;Ȋ{$ h !@#ގCᘀ.` *#h/Ƈ*^?e4(z:.彷CYZ7!4qԅxNJU `(L'X!F"Qq,ߠMe9뒶w=S͙EFk˖t5dѰl[2;`UC]x3` P C;>ʶ$8<#%blF c : ٤G(o1m;t-- 9D bAޅmDp_>i?aˆSޚ,K,b_ hY>ZxUǡMܖ|%6\r!>/BB1^IR:|6`t}|x|хx5:T#̔ZЎ.(|d%ivy>,Xl1m{o퓋V~/d gM )srg6tx4ޏ\u'Z2@KkEy=\|2١b?+:NO^4H"|#fZʙ’@EFܧ&0(,,ȕݒG7ECx|ok.¶2!\qyHfx%m"]_V!8v&'gAy!شݢM,c%]-xAET>Չnll4%lx#7Eя2Sh_8 q/n˔á&7Ls Б@ߚDcEwUCnrբ^q';Z&Fé߆:rM#'0ӻ,!VF*1 b h[JvҕZZHnٗIt+UpNҕΟY1ɯ҄Zt'Nw]YyVPl9]X5h8 Y 0(xY[lZx1x*ȁR+H .@&>ؗj.d-|%eV,v =rsB`ed5%z 4sE#f~&Jq0>Wͬz>c >ahOr 069;߲m &CC,q <`hpZtUnCv}R* d8>Ӈi8cl jn-<4M9^B?gҡ$F?XcmdM]d{S|ߎ?.⽪Zo&/:SuTKf]@6W|=9k0" DzV$bY,q\js5 hQ@);R@ .aNGx GYգZTᒕo68NREOljneNf3Waa,x )U{pk5aTDҴ)lQAʝA*sE T>hMԩIwhhRCki*W=ݞ $gx@?r -\p犒Q-55->NJ¶PrhSX}lh`s&)q4ٖ`S [pBIb|OF^կ:YA 5{T[U+9ٹ3Jn1"`tN;+VLA5#$볃B&`Yz/մGD2(-:Lnљ']kGIc܅/q6^l|' T;5PBh#ܐ1۔.P .[f8=,tGr>moL 9f=2\L%~kU*02,\N@<3q,p58̦-iӸ8Z]۳uZlQP~Z{}I-qX lj8.+_Q ޔezo%*kVa'ea5vfgLro^t}9 ᯉ`n_\UxBtaɤW[o\K_FƼA7  k$jS/L>>xwg}y9[!_8>/lVEdRl]JAػR$~8mœN[s9_cڮjGˁ |{el볹ܖ YDLz x}ah,;ۦUfA㽼nJ;Gi\) 5=v9jŠKjeH\ilᘰ2 [Ӳ_+q%kTߝ̔qo xtU܉}g;mqbU&W.X~2 opvy␰Nwd?A&AjR6Qk,P]Ԡ?m~;js6zE.| u9WtD&S3>qu ^IzCB$W&F=wk1P5BK}$ ȫ!3i2v]rr Hc /Vi!NΪ&]2]$A?(i,ƛ1@T#עj:OZ]^9imen^T<˵a9Xy&adj4n\8xɲ. fXRXX|*UNLmTH7^rno)h{s܉݆}*Ra#Dҍ|A@! {c{h]uZ@-#ߑ89"Zùj<􀒮 /+,K '>{DoVbuKP/^6MKUR& hn|PmGk2z?W#\ӴN'ySdV^]ڿ6 endstream endobj 112 0 obj << /Type /XObject /Subtype /Image /Width 1 /Height 1024 /ColorSpace 113 0 R /BitsPerComponent 8 /Length 723 /Filter /FlateDecode >> stream xugP$ioQ.K23/!g$!BIBhHC$DB2BeD"#U3|=zˏ[iK/lM7F1W=9Oi-}B>=z'V&75SJ+i^Es@³xSD,닥8=KXlEVX8`P9֘e#0}$a(Lc1i<&c'cQ1 1l: fal<y猾+z.DE|1S77\];:/ùq t3> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 120 0 obj << /Length 499 /Filter /FlateDecode >> stream xڕSKk0W"eCh |K Qgڭw( i0cf4߼5SijY9 wBVUmPPɈo}[JX!^vUL+٪Vŭo*=;|:#Ѿ]o}!FN+~-|&Ef| 4E0[C FI\ yb6c<d"6DP v#|tm6P9$tll)J,c焂(ܮy>"CUmgRF k-Մ/` ;ÂK x00{3̄IH#5p ڽ9 qT06\ۋc,r)O.QH'h Үqza^--5?Oh=!.}@@vյF:#ʤ}n_# endstream endobj 97 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-015.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 122 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 123 0 R>> /ExtGState << >>/ColorSpace << /sRGB 124 0 R >>>> /Length 379353 /Filter /FlateDecode >> stream x}KLݼV+H IӞ̞x FoݵK|^D?ꓮ 9d0֟:~k?~g,;/?,׺??qw?z><׆/ǪϓcWz;|s^ɾzqIv䯏,|+=y>z;q,q+>y~5vtUWzw'=ޙ~V&ZY78Vmmڛ^Oz'DT+V濱lqA0[ٱ]^i4&>=hlJ[6+/y ;yyW ԫxȆYa2gޕs7%/)μ_“V+VG_ٯD+leoLx^%Zm<3O38c֫xGu ag4ϘRem珽ϿG3;5iGKWA?&s ZoDe=XˎOoetg塬/dJ:p3Wƞ𬄃x_ ۄa{G}6~U}JafG'_Yh8Ux ,}Za2oKdJqPOX9yeyiW!=Jl2޻Rm*w8V~^%ZU=Jly쁼KU{k>9ʾpXQC8VwQ+cEqcC=$lg]y쁎 5Xn5Ubq%eWN^?Wlq蕬htGr~$h<θ*X4ׂEg?GUU4mcT`ZvV}a6+ƕ2kίޞ 0 g~ m;=2GW2\lᝪWֶ+Y Vf /6u5Z-?*HUbmΏd?mϨUV`Z Zxª1:le~*FB ^E3P*c~y2„>U㿩ᰏde/Wߗ}[\IS3W'1}adDZ*}f#^iu_cz*G/`l}Sz'nM5m}g>zqv:yƼohStFoo^{OA2*ߧ7&XS՞Wsfu?s}dU8V-HB<1cw>C 2wZ OʫպGWnHꡲ ZW;ocr EMUV a ʈ;+gW^7'c#vĴ>+9>G?]Bxρ9MqXnH+*Ag~X,?*= '{Wfۺy^NH78c4y &Y.ZFA_?#w1<*y8d.ђ߫g8a em 2#k\݈ޫ`𛾋s2'0w"6׵]x)9@V/X&;#eNϷTK=p1c?:o"4;|%PIq7Vҝd 봊S ~ 1OtA'nGic7y5KnὠVx| O@8_|&?^ gѐz|iÿV̅q3jgO\a23Ggt WZ;=3DY&3,yoNt2oP?D&M֫ 9)kQ9p,2`G$q;o9)K&B̠篳?Q}O.#y{{&| O~ddɾٞגwq1=g~78fp:coK1P~3ո^FO|͘_=T@$a?|%?_m~zրH=}f BB5 *uN_L^j\q/~SSnRs(v;?D\]>qwXەc.K [x<c~*gѾUp=rHPq77id%dhJ6 >xɈؾ/={%/`݉dF6 *^nSf+1}gz'o~lu*h R>s^dϔ wqcIfg{$ 4i$Ȟv >SuZVs%bvbb`Q#O_nAU/ƢUS<"9MkH묶wH(Ŀ.rs OZN٫ZnEASE){UuەE(!$#mީB "FwU3˫pgq Ux7ߕuѰz+dG|ROqh] UqIB6v6ALzh$Ȟ>N3_З.\kB6c50Sd8VqQaau{d_ޝU*.<0v|'fCet,7aoи'Ô;~F}]ƺ s+]ݨUx+U$.j;9kb{>"+v>ш&wʑFvw TH[LKV~4^V~8ɊhИ&џ O f}x$*|"VF\\q/a푟˪k ߃3b.z,X [_z"g硲g ~{0y E/)"nM5#{ӂ6޺cr (c6.|m <4SܸپS&uͶ+eg)3 x_7y߄9V_5Xwm(WH>"n3ulu(!75#VT 3t0k՞W`-{!L־՚5FǷ g{gqAK}t\AxP*"\¡<>Q_*v;wrt+;[}yF팷(w7JpSkܤ:KWNS)ǾMUX+E({6q6Sd`˞4;Y!$R*{@l2rs e{rV5VyUmFlT4}m"\|ܤ'ClxnqƳ`;nE/;~e=HSj3G} +?ɛpwy9hqюN:xk*Π܅=m}qP6z a2FH RgtwVKqY/ í,{t%H୲Alz+}ꜭs@mƯq;2pv:dgڥx';NQ4dO}R.{WqQfS|l)]xONz@~}Ɗ~%"{ƑT%kA3uUPqʹqAfY9X]U#nѯ4z\K\~Fb3eLGKFR _n 4h^*e]GTv9Y*;CdQdrosj9_˿YP=J:_7x'cAx2TXF[q5TީBc֘.aZa2Fo/ {սquz0wH## LƵJH]Tf6th?y*jH<ءЌ2}Zۢ`@ 3㔍cgh gΞDŽ>߰#^O0<[{bE똲a+ c=HgCq' +fomcLN:ٞ:GvWz'X'ʪ2k}<+n~bxmlͱ3$1*^>䦺T#&cojQ7;+eBd+3uY{VΞ^+e kJBO e 4oS/(^bUc5܃|os|2q8)kڏ*K}kh8^}r랿o@?{JȢ nᏌ/${WBU6jP_]cx4Ȝ{ox  UU ߮~u[* ɿK&\MAfv:P(+l(jݒoiGV?{h.%1Ţ/KƵÆރ4XGRq?@5q=#Oxs+mR` wVqS}ِjab'ߦEyЮaӌ= u{>uĊ3l_ uwz3i("n7qpۅVU8eyMΌ -|G SAш40CP|h˓g{g՟xFJM'ꑪ^+a)]8N~7ahV=C:PEڃ^ gϘ;+̤'h\_D#Aqڋt|<:ޒyIF}0E8d1SD+{hGct8?G%Jjr&J}@&w#!G}"\+s)^F㓱h624 Έ1PCX4o<f9XԭUm {+d߼Oq̥7 F*Λs\"h/wT]Rr4cψG:1>pRbF;_\[%@@0?i+i5ƕ2T9AmAigæ{Z."JVo8dRՑr/x-i r2#D)z61>w"{\wBg{Pb0Qh@ Wёn671{wRvC$ʣ"n*E+Ny xh` qŁZ{{d_G%oX%g$1^EbKO 9zj2.[/2&EEgM:RXc?Cw􎛴"B]쟫* ;[VzF1铋}9@{Y{BYS:wОHWqQfˀ.39by1\9Ĺڕ$ۋ(Injl_z :fr/[$rXDzυ8|Y~7u;nUUZE:쑪~XBubecNSnKZ,Z(geg$=XPq|6gV]ͤH=rVj[7,fʘ(pM\믳?Lv7ZgŹ_X+Pl RWQ\q/!Up2x[fh*!5V8eyuN-H' G=^#d ws_1"$_ة']J;PmSN&Sr3uC.Og<6z[sڞQ] B0{ލaj{^#i;enyEAtVVBzNkl6,63~ɵsO\vV!fj8=ʥɽVZeÎ8==i!VlF>* J[(* -9NїeWsu- X29iB-jU羼 bL[ĸ|w;3o<=o߹:䘴Ҽ,Σ}=;_ۿs9:Iǿ{9y$gP,XnsGI[gppӾ`MAByڣk29ݴzy{n ,eG"=*V{.Nw{0>IzY;3&eiM]ɬo1$ΰSUK,!kɷ3rjqv+G' b՘Jn+6vKǒ+~]1sNr$)*9|"A2ȧrHke.v[4 fl ;+pW/z_d"|k"? +R%Ts.qsD_|]S- BRN]Z*ߣ:?T1kޫ4'u?sj8O8yܨ] [X T&k01c"?;3ȧusH\ڭVB+o /iw Oތ5}"ƿX˵ ~앬nS>*xɊY"XG yɓJ R-~#Ĺ [ix ~k*j^k1SY{;s΍vqq`πvsݗ{W}"s*U/a =RHG [πnpH!F9v ' \mczPa+,E2&!'Ĩ#6Gµ=*,ƇXrBڊ(GhT XF[>=I r# lMx ΫfWcZ͓35k3J(B\=gXpH=#q];<^OXp-x2X$筪ӊۚla';晙ǁJp58f}PwwCFTf(8dΊX{g+>_v c|[ddRB0q='oXŞzDrFC6x^NY+y=+YaSŮ]Eل gI.Ǘمe}h;ԛQ'm=z0h:1p"{Unyw3G\oJ ?dQq97ʹD6.Q1WQ,;q\?^sҹ0gfu۪=緑 {EG*vT,7x#n UqV`WoJj U.=xmo+pb1TBu`g澊7?U˹b>Y1-9T'.dNJ(r|b/lj!f\P!Ȍe=@c!rfÞ}iߎeW/}pgi3y#p'iC T%2N&PV%<~~hԽUh;29g#ފhoԟb5'KWW3u~kyiޝ؜U oWɧ\&2x{N*z![vكmڊ/Xv9Ǯ^V]o ɤ[Q-Δ'UE^A:Aa.zњ2 rU25Xp/|m-״u\ГCv*xٳMd@b$\*3"˘Ћlpk[UJ|fJ(YŞ꺅`|xdݮAFuVh$2^W8N4 %;8㐏Wws;c(xq5[N\iֲ I|b\J޽:O漻9{ٳTC wt{vpC*̨޶զ'~[2xb|{ҙՔp8,3+ΧP뢊B>^z:tяm%׿%Gb8wz J7cdK{l"ʃciZh#bWv$fbf:ۘF;Ƹ%FE_ij3){4;ӤѰX1c:W8+eL&Q9r3$&u 3죋),aF=N<Gt:=Vu~I}aڽ kT9Zt,N3~EjOOwi[j< .F5%#V!1^NYeq 9U"Ȍen ^%o ~=!U\KXE~~pZ+{E삓vxpMhS:{ZDh}P/T*p̋+4Gk8!v0Tup[V%<|c 펝Zy\@}xk樚dOA*Oˈ|,R8Τk+~hWc-+qTj=I_[v}Nn G$39צ c|oKdiM5Mdb!09s`UhTY"|qOTYx.Wź aN5Kfr70P $R"ڤ8e)L`rntZ013mAAV}]hgNrDa2@oXGX>;6tWgf(h؍ R~g~~{F>1~%=u2d4)kS}d_>GJO'6Aݟ`UR`+VȠ\+s)ϘoT'yE=3^{g{;i hS%7얙2RH/=wB*deb4.\)[WN^>|8CJPfMr:c>рIꔊ2ڊܭ{'6&'6~m߹]C/?N;ڀǮʵ⦊ZPߢ<+Wgc3w/FXnifd|GK` P[9_g*kaV+hlAFɱq*)Y)^j \ Ӄ{xyT5lk<vKOs_i}dOsUf\(w_9gYJGzjϫzWё9YEfǝ(7 3ώ ?O97־SBHf#_JWa{g?(k+vu<0C^d|yMSv@ГW(shm8y Q}iwqJ<Vxm\͠,J(nќe@e{O\nA]uǞnc-!{NANbV?@.oεwǞbIZKycw> SIq2o~廯+n5uJYcy[uPdvGv(3WǔA!/\`R`UUQ, 1ũY]xl3 *v}:z%3סmx.spxf4V|pUZFky/+=1玕eHSwLǷcdF~gE=N5Ϊ缭hg+z>^+=:+Zwt,w>5{A5lSέ/]~JgTEqqł3>}"Owd=sUl^X8|Y.v;qNⰞ]dWWe&J&ff2Q+^;3qq/؀5sN{YjOΗl\bDo\|暪 Wf- nS>8X^%toU[J`z]'rXAߩrmWE_fY^[/ US.c1SDaN;N..yzϭp ꄿA8Ӓb=.Yf9o럟G,/v9Fg}.>]d_ɡd|{M]0#q2RVF&5"ڬVk͵j~3/*Uu{kkN3K$+Z JIQg@x%2 ώe30v 6Ah4~*͎Tu=+P>dZSF1ҕldj\ oWdF^iEkPz8U"^[GcX\fiv*8x_ JٲӋtt%+]kac&֝sk+ޢVd'38R.lXc7ڭbsfZkIu TU"1wU(+"2762HsQuB̨+ F_HPт'KNx5떞W|g~P [-3^ރ'82 PlҲ͎RhVHs=rt:Q!""g:633T )NS R/>&3[ :/Q3炾AfXr~rcv]p2ܻeb"&^+#E?yh<ыlr.YWP߹yH'f+VG纎Nkp[&3[\yAϐ9_ t N8m)I6[o&fR__;UQTl_ŋ5PJGT*/xJSͮoPם%BWRSw`eD4x6}!D8x -c3!;Ǥ]+wr 83s:Ƌ`'EJtaHu ^k-S{ŽދO6sV];.ؔs2#Dw~};r}U%)pJu놪dm ~SSG'(>6k시8+}gc.RHܫ}U }X2 @_0e6Rv ߤXyir$nlhEb)ŔA'fg]iyxٞv^tБ ؽ@j1īC;?k..PlgL Oe[ao7Wy&k0qwkW4M-\cdoFc+:@o\UcWo[ե~uFT!uEZ3W.q*zz +iYhMU +}{ʌ-O#,Ovv; ]~g`<.ίފٕ1 \ d igSrE`T]"hQ8(wԪ?Y݉NdguP.w@q#L-Di9i U*]I}JA.V;ڀGŚ]*,1)N{f3uCj>iVZjenJ';\UcoUh}yD?isԎ^gg]BZA͕ޡzkϸ7RRUgcȹNe*g {Xq;W{$`b3V':g}H \9nU;ʐ~wT*)c5*ą ɕvjGVs_el4]tS“V2o;i*o ĊWJR?|Ccmy⺉!V)͜52 +#]y55Xi= 㓞^s2c;8qZy; Ǵ{Ky%%+%^7?Zq_xH qJY7Шc\88qWZ8IQݤ[yy=D߹O3#S?D`|ṣg{DZR gX@̄55HnpLhu +WϿث1YI,f 6cjc|ՅϪgHcOP5g{n&-w cvᖧZIUnt(3y;Ԡ;qY/wVGY  x:e1@M6#9YZ{gЫSp>yeoፋ9E=VZ W,(<3S+:IhгFq`zQX^URzg)-g6GAw* koH7!gVq-1NR. /!j%Bok5Kpŕ2Dq VUqZ ɨUr'5zȉdm^c\dɄ5oReQڱ=(' WYhl*Y:pnnc4]ybL Vzƞ%\eJJVJӽ'k^ʊTfߵ/d<յp[VR124n>pwYewA>w( ~R<\tׅT*͜gWѨv&.)Ys')>Uj\[33 + Z? W³n9?Վc]VJcDR% s,wo ;1kcm>MU]VJdpד B^jO;OZTY7:'ĹJ۵м%K<2b0#7݌̓߈6lJSZ:_~1˲'Oڰ^g12*5 齴])ΪN_J842󗀛8E'/8ܕqg7 oQS9y{u\پsf$z,?JV7usmF-Uv (B'(Y+IEe;y*Ⱥ PPtSZꥪXHg*5ڎ(Fy_;8VoB^A Q8X?n\-eN<2se56p\>sPojtrA-3={5kڟ[EtJf-k=\=~\=Qsw}ШiiYx4X\>Ȯ_UGe݈Yp?GGLx~`u:8_gC14lqRltxk;li8\ouX']+Y`3nUsv%d&߄dOm8UƜy~ǍuXVOYXtRnpfPXt&\tS<Qw`UUwEV3ge%ǭ,s w*oc..8!(|:y笄Z N wVʳfc (p[ZiQL, n0.X'CBJxUvpQJ]VO.NS (/Rsj^΅y_~NL/+!+u>w{EVg;ya⳸Hsh,-t XŘk&YW_w?q̥hu|zn03A}S9v6lL:vP[״6;W t#8Fcw-~(2nX)\iƞ״Xf.괃 xEep|̂(fTb"&DyS7GTϦqnϣGY 8A xݩダA% ׎ٕPikJyUw7[rz=yC{<yQŕy5&\ZUߩWhJV%\Q¼W9Np(!8Ӥ^afiix*Le!lF9+#BPPk|fe*+Y. 't7M+bDʘqSo ZXֺ<*nN!:Wye;Tp_ e]"?8σ'c?g{XmUuuRXѪWլjA}UV9!Ht R-Vm(+zG3"_RjTR/3q+A<9ZKEs9yLo+۸os_W8)9 rySWUyБu:XBNe*$;>lN}~L;+lZs3^3,:z[N7u#Ϧ88iČ]o1k}I#d| U\Ը# {,fxAfGQ'PNYW~ED>_ Yq*em)&ѐ\s_5Oa<$>f5QNɽizޒyC:v^(|>8X.Zf* آM v8Q$:3GЬnby)u3eLQC;\jN;@-a>iNEA:OJ>7;r曵J;Pm.a<9=fUev.Ef1Hn8t(Y<Ҡ\褔2hCU{cWò^v+N}ryJockm=w `~gT XvPDڨj;ߧ _UcZ&k0ޥ hR nWo|ƕN*ewd=4JD trdu'emˏVҩ"+r9ΜÌu^a>AȪCkR1IV˝MDx?C2h705fv1̝'h4Y ]yo;: k ^R/A~),Xx׎J4GF;O$wlU:x)ƐfUɘ+MOÆ;⿩Ӿ.t-orT6ҺfM` Rޑ >zލgZ_zRٓJ(΋B,ƾ;UXcםZU#̳1z@pZhB A*|\vB:,ț;/:ٰW:6z#Χ">A;[=J],ϱ{Nc;*@tHǬ ԭc=ĕ$|WB_"(TȪ班Q..+m\qǠɜ;p/<۫a2!kRTjxg:N9GUgT}Y/ͨc*{Mɉ9l${hSz;+mW߿xAaP6)oɼ$Bzd㇙y T@uOg|Snbް*iYP;tT6uA`ᇍ8÷SuNs̚WZ7!=JԍƁ/:ܱ%.xb65MN8WeWNŽ8R72+e($Ϲ?"xN엻ٲX]$n߇ |wu4‹kr#}+_XvFV.|5}VXF[׀<"&,+muNeu*b>/>wkY]QnnРCDU\^~kIʝ=.'3؎f13id@ѼoT@IuhV<z^`W%0ToZ)$SWIJZ;VN1j7k2NI ,Wq Q;p*1j,l]h㵲V%BYm=;JKW39/aX\fi+O٠@ϙ디x c@yw5(1SDM9s8id\gMF.$<|NUyT~NGBjF!g " nbF't{c %҉9uB Ȼ x~0(;\ua2E&=Y))w9̓8xOT9!;ÒAT=*PfZlU6Z}b~>:w8qKc|9˕k,9N6W,p>ٳ̆}tkkwe㑏=qo[0?V0?B為DwV*Z8) >wvx:8rx:dt7,}-OZXֺ&>z%$C=:5NʊG>hT: TV;+gG;~ƞE:jYص_;pF++UN0bV(Yt'}94GɎ]dU2||0?.N9#u]ReߩR3Ux\"(uvjR2%(fLR=x9C=.eQD"Rrf^q.:?>TgG^wj1H%ݺ^ 8t8ʨVͱU6@Pw4^zsNƒ<9us%޳I3-g6d [>ke.iRl&gf ] AAŤZ6+qo{A+x>ھs-] IvJx&@WX^d qZ/^!uEPU;N^s4Y^23|o;o'Od#Ź˓/UveRO)sXcUӮZ7=9ƌ_am =B!W(8U1bu~rU)l+>OB٨g{6sa"՝E-*6$VVՠnj\:r[UJ¯Sj/M-03Z70WRn(pm|Y ZٟU7*PT)b:ǥ3#<𽬡{Щj5z9kbѝ_42aknjt{Xk^Ţ'wV9N;Ẕ&hlծWڔ L߹$:Hn}kPx:e\{%VFeuN:.LɈ;uvTMJ͉"Ȍ=GЊ|jܱ8"ee}.OJ  b8ZO?\-xp`e=NWʺzM9j4YzHdn:0;ز}Ctr(3Z^P~ͨ6Cl'}/Dug6vv.YN ZgLMfpvw8>&@h-4s+^2eA\Dm<ὒ L$B84ѯzPڃt1t 6xÃQEZVgZXֺbI: !sڪFU'о~*Uҩ Tzy}~ƣ5i~O ŽPP;kAAz%N{{KrսҎƁwI wVHCȟx5 f)";[[Dg032AV:wʆU>:᱒3W3J\NNs6WܯBۮZg ȍw Z&]}(0wFV.bg\ou42Qv.##eDNծRbVf3=8f=lqIhV`ϸ3:jTtj 3T^6] ٟ݉ &0QCF"^)+ޗe}p#M*4ws^?uFx8r#~X/B&9QC5ox]w]dse}YIժ{Ds>7 YiY\fiʕw@0l6gXjl b"hۮWV<Q-Vs䮡yJU+'jT;XW}Ag%͍8V-:Wڟ;uxRTk7۹`*U=8crb|\QJ:FC* .EU+soJKWO5&ӮT*M[5>J(_Sʲ,\)5NU^INՎ ꖀA*U(u/r^v*rWG;wX*X{FAg0Ĺ*y7v#ԫTұpYwV \P>w;$'S 8P|xɸ^rϞU6µ+\8j 3݀ࢮ*Nk1n;(GނvlRlYE=~m^mxK 'g!\4+Xp~O9_t^K|Vub VgUS]b&r $픆$xFabP;@9xSGь,Wm@3wF^glI`8# fJLaӔY:i']Rc6x\9:b $[NF_WNO[pރSf,lGY{bwMOW(kA^7\졆/S:.Rd df񛾏3nr XtĬzDkQ+$zkSGKVW41v9c`z*ng(~:lrYVƼ֯?p;?7G} eϪY'."yfjvH:6b?[\\$s nwj$s۔uyNj>”%qRf0{ J7޿VɊW"T(hɧ_ec4[վ.ʕp9ψ:"` 3T)bKυT'oQe}g';O` DөN'7Sm eێ013^:9AYf^lNϺ:J.pc|C@*Ub(r?'҂~E|)جoyx&z+UUU?nJ_=s:.b63ޭRó\oG*gbmGİ6"߲G\Wnwh= jVMN"JGTEE j_V-΍nϐ AFn!'Jt*Ol!+:7*->"[6ċ|A*1*H쓵jVM#k]+Jz&3wr('ksv3ᇝV-2V u(?F'aj6}2eb̨j(>2eVE蔚y[f]NZ푻-x}])*D|R ~.ws Y&b{6&k3γ8՗bӌ=WNlQ.z3>&ݱ u,uz5'^EU,tʜczn,0KURxU>lPnJwU޾.)',\bm>cp.O%lRcTi2yN&j53%^oy<ރI~v)nuIcdnys؟͐l*YHp/Ssgo]dsZ/bqUj=r3s8@ "g%f9NUC!j {<.^Ϫ2k㔄*̉TqLJwX_sɬOPfp\[hV<, ppǩ^YՊ_Hy0^dv;UE^b;$އAn8 X>2a; ^aYԆVw Ue# IݷJUD_ؠ}nwڲ+!sˍ b5tǢ6+^n5(!;l^)<}c ;ǔ𵕂dmG2' WNZ%U:a1VOZhu[5&-q7ZP8/&RE4+^՗|`,ԘD^.v길zTVӮMǒw~*>K,rnfMJGWmg;懮M7֋zNDE:\w`o!5UB' gC^h3.^ܩ9y~wɜD_Vi&2tBQ~geYgc)'뼞X%'ow*}h*xpu2Ъ}Nu>?CUUsm%;?AJ[\cu7cb+8BʳUE=S g$!d0^NB4r)Aqˋ3B=xꭘ2ž^gYU e(Ze' RjDɋt,V6njJ籫GZ j6}ځ\љ䑪g\'3`Y3# (}glJ sEO=dNk9o[ (r$*Vc1U/؜3>5hJgHO@31#GR0yҜ;u{s_l׵^RP!s6u~(!WVUUnX^WԑvvI*,v-:J$`K߸}l )@uRٲs(Hz_hfdfJl/$%kuĩTtWYUS6 f+ѱ6r7X&~_lFʲLxeox8;'}\<_Tڮ_͙rh.*#Dʁ&"Rj̃{+-WuC+oI+c2RiD"sTh*#Yx:kZjo~,8m*QLFNd|.sz,3' +6>H߃qCzcW8t߁؉iUDih4}`}{IKE#qaWI>,ҝBtΗvΈT+лcqg9eVSjV4 'n>Qwj u:fʖ)ZI z0KX?+ Y1q6j똅e;RQҦd7Te, zZ_ʬQ[o+ٙtz)ʦ4cTzkoY[c^梛`7͹'ks}&TTmGQ%|C3}wSg+:71?z7gܰ9g9ei2‧)eńWK.ûϊ/IjsZմ񹘉QHL4\?`U1d"eC<1` ܭCkV`-qdh>pFrܻ,̜9!}%'= 6;Eo E,DJs}F&+~t 8R5fO|EoХp?DJ_ˤjŭNU Ng*NSK{(Ч+Ɋ(r1_ᤙV[׀.c.fLeg)U7%Ta6e5):pYrخZ}jxܮ{긴'̨Q浹 (+L?;b4> n^)m$CPFa޲ݺfٻ:(!u5ooXU{E/翇:3UJT*gBQ !U/vr3m"J/r:ly[mf7~2:搾H1'p[z&|zsZ4\ հ@$ +׼}^gn-.<ڕZ>zYNF ߎVѵ͓Lx扻6+\Y29l7@cYojd{L2bOtΗ*\|j=-Xɮ85k|Ý":z*q,hn&eэt9_溦()([hLZ&7$'Yj̧s93wfu# ;ig۞݇v3J5+g37tɍ{xYWXw; fΪ^V RMg޲w H4pXXf]\|cױR+]pzE!+su]E ":@SU/W9 r'̈́yH^z6c{*fGUA e's6L3kq6LF]/PiIɤyHDѻ:!dHl?Ϳ#NVxw gގj"暥S?ASqV6ǍyXwPR*WI~kS0Oj\CEª%;cKNvY9;jwGUZ;Jy|V?>65;š,#)Ϯ_M1XsrNR2%nΝZCyYN\r|в*[H6mv)W͘_*Rwg@\up=!*h,B ⤏\̤8+v+_ؽ٥ 9wAs&7SbS]^tZkI;o ۮ˦Ej?2s?hb}gĬ*-ljetq\Þbe=PjX2HǻwL ϔ5t+q?p5 {E>swҖİ`+熯9Y1qUaV߳sŷ iTϫR=5hυh9pZ*T ?IJmmD4&Ӗ/7W!&ēԖSȉdϝCɼ>vt\-q]c'9[*:ϸUWT|%9}䌐LFc>y3$>wu]:o)Eϔ kwɩf,C8 չ+j+UϟcwSt%nv.'B{®^6YXf]\gl!˳̺iHe=mW"q~I԰T暕9jxP`-.azO*tY >R=?#|: g=g:qZ4$kŞCV@Yoj*:T@YI,,lT)B{t3^hϙcUX 2ŰɒߺAaM_亄yH9r3C_?vDƨ7g*'rKdrgL 13E&Zݪaӵ1-ߊw1slw\ΧsV:* E;S}+qZ0b>~G$xdRpS/zS)ƙ 37:Xd%2+̝>?%2+ e[qiu F J)lPKQ?}]:f ,dǖ tTU\zunUZ+:d8 w_e3ojEQJ;3̉k|q]_ۺϏ+=y?~kR!>S}뿘"?1XV+Sch2Oc]V,W©_IZf5'(Nu1~e=_룹#28 >g۾[r)WzG_䰥R㖒 x:sMEI}T;Ynk+g5Tǜ-NXw^L3zY9]8n `xezTz7 Tg2fLQ<& EP+jʑÈUσ Ώ>sf`>և2EsOtaRMB$++ɈaӘ?SE3i~ZɌ5YgZ`g_\Q`9ӿAPqRޛ ho'ujSVb/h1}KZPM"k\_")cTbvuϙΤ_,yNNȱ.[OR+=AfqU_zl \zU0HZ1fdR1`&HBԪkl홓眛Uᵞ̳5|ۻƣ,n8yTꢏ]d&rWHj=͒,޻e5/~0<>$V8'adFw[UuEmU<ȿ˵b`BeĪ+֯\}t=qs?F{ݼ]'oz$D)LНKr3:l&N܊ԷεゞrQ^8>UjJeIVfdQe,ӡY1gswW5{yC}v@~*3FRv^`3cyZ~n;6i]ik\'8C7 q^SvTD?-{U/[37oAM5{ Eϩ&~#9Ywf1m !bWĿᡉ7Pz?og;o[8D*4U?H- )YDo<su)d CCU1b=]F8"4k n7T[k7:Us.7S:f;YM)Wv}~C*%sPvDfqe+/L}s7d֩eW Pcλr^fS4Gͻ7\3 N!n0sN!@#҅50k?~"3ׄmksBLRac=ž۸&FG"QSoJHgwB}F,2FHo0|r59bq,\xuBL=g{"HjHhb7skIqtibڪ?/jLY-jzoݹl=9}Dgk~:C0Tmދ߱a sQZ-l#(Yq_mz];"6hnd1JfW׸n_Kϱd.W}ި1S?{&S5h6oH ZQIz.'qJ +rR콆MQU ={g_zK{2R ~UvNF83S >;{D&$qg'ȶq?؟sggfS }ׁTJ6Jj#ҝIvtɶN _Zƭ_eSx]db?;f~{+̯ $ GqIɤIxOwh7v sA*Y~MɮPwSE+,7)Y-VCOޜrlb#XQÙ+\x!N̼lsAv4D}gZKv-VqUU˃}?Gp=]stձ@\7'fVILF褬if~[㩭ĉ!ce*ƕ&> 7P8T$[IIϔLS:.@ }I'Gm)QrZJ, /8&3t(Z瞸{3 rS&s[ N9(׏NYDpwBqqg =H nmHza"U5?sz>ru !]6;uޞ 7זsJUY\A2늙N8tB`}}e;R;=(cŒ]u}0>%ʖ):mtþ 9F&̅> ~kYSu~:Y6Oz;ȟPM=x-zkns2Mq8'Vx%_(䏕Oʯ*UH!b}lM@Nwї8ۥcR׳Z+`9MЌjfQ.!G#(./qS3TOsc.jty3&;5py4S*v֕wәOqt$FbhG1D'_:ʵED Έ&Xo2ρc|vU':WhOB\)]MEU~L 9o8>+:.֛ |J;qə(A/qr}g;3bҐI3No~r_?]2Wa=׳roX'Sj>w˹G6Nt/CyeOA1sB_gS@p)D\-*בv3W=v;Ǥz}1k-6}-k sBn2~& sΌ)*gVAg^f)bI y[5xxV*gJ,}@{{w_ֳuSreYfWl?8^qc41)@: J2 ?r~=aEd`_cIqK{<~U4ʚ &91\GW:~C3uV|x|/3k>:hKg3!wZ~:gf\>sqC2qd3v~sO>Mgz68z2X/s}5Iށ `QG~ V6rp}Կ/4/tv' bo(&ߨ8CnWXpY٬N!N:7ƌ0xU6*~.2:tRX}qCs*sϏMf01Ή|IX,6+LMTJ.波?Qd"uC&q#ZAmY%*+?⹢'@o^(M8D?ou8}JUe||NƉ'ZŇ}-Ʋ ݏzezϊ۬APT_ uRQcAȄ!TOC(EHj_V/OVԕzfT[~ew5;#O><)pv rØWWYC/@j!Y"ϙ,3 O˼Uoc$Q_pl $K`_s{J2H/k{ݼ{kUu@PgUON7)8NgC&. 0J;P,L_&-S^S3_h}$lt2O2cۮyb>2^+&&ݑ76s"~\p\ygEA~sC8+s+9'6t$8bz H_锾ɵ _ J2\Pft_ܸϾip+GfQMde[;DN V;4\ɵ k-8M#M;k̮:2 UWU)6xC&bK^4w@'ɴu? E8&GJDFq#Yg9(\jknS&/w>EuQRS@jZ#zX넣߫FBĩn מN79LuzG3Vҋ+JIr1xa} y&Dݏ |J]^&t q5sa3{oD"yh^9Vjϯ Zz8v^R׻&suLjľϵ:SYBid32ܞU,Cbg)Yh ŭaď~{0iL-iu 踎kYg@Y\paŸ|f !um761+B[V0ݬ}>TjһKx WiޭmnBU1TO%B`؞ eBĕbYvһ b,.͆Hje\劉ЖǺ wv*lǃyoX'*K]<̚^q ܑxVV:Q{ܡQqYg#{Uc-1-4ę{3m+߬`f==> |e ii% Uag])V|CcsZ0+Vo=k,hWS7=̚AGt݃Vs$s'raf40miDEVڠ&WyzD|pc r|%;\9nߟ6BuKK̝Ru77)Lxg:<1aXwjCsf}P4q'W眪[y") ҿJcS/~Cˆr`2(d !zĶ3+Y?a]_E>p:{"k=쫸{A$2Ÿt33 RY[hϨɵ W7V/jekl( ާMyvRNg9d;Νp'{~7nQF2B ԟu<.^(噆&=gaOV UljIΟ8EM-$;n:<*XlݒɴĈZm iȻHjggu[7ؐTn$Nm+{kf'U?25$q~7Ukv)=Y1In,r~N<D(ʎSU9U9^pk+YYfIo>ֹ/5#WݞX˜}}oq/>n|糢J߀nܩ֩vOCzȄ{S&k"AUgUUxxw{(?Nws!W00CĻ>w[ud)UgIYA,Xwy<2u:}ź#ez|w:`E #ڗTzo*[{qLU=u螒^1?GɊOR+SAث6Z'Q%ʙ}3[1{!qi׆Ӻ:r`|Qmjӈtn:Ӹj>'cwCĩX\ɢ4ƅe3W &ԌƜs\<'nxEznLg:t8?d}kOoI5j~G3檫[urKDmnSLML٦=v;|J:P}1r73Ua eaArbE%ޕ=㟆5 fIY;HHiJ_vWje<6 }./.V:J* s̡BS^s@#NuzL~rv8q/\۞K-F}u075Yj^|Ɨto<ʖ;s\9U|Y=%wbA| m{ |>SE 'Qlt*=é(vvAW-1(X!tm.UU3^xci5ìax=Ess}-k {sȬ5r62E:wDƒY2sc%S\p9qUjC$RPU?9ͦϓ5 ī`cU))?wJ/MX`*&7]HZ1/~U-Ւ=?S]&)wMWb3YϪ;qVN0_ Mm^2ᏝgԳ%ճpAYyǡuz~1$2Yth$wffc"יN\q(s3ɑoiu "qĩ$ ׉|oP޾l60CXmMJݪT"A.f֋ GWtudU)\U/C.}D's&։| ˲C呋 d+NJj/W {\o:8^^랳ΜEιk=mxQ?Xu^Z}\ukSQ.Љ2YӬ9\:)]N~7D+o?pUj[rUUi&yM,qD=rŦ IA 5* DYG O*#V% ﱬxUd;:Q[y|h~Ptwϔ5~ddܧ qZ=Yia*ݗO?Uu7k&Ykd*<ˁJEԭN7vmKOS wG/NYA?33)Y`7Fq7q_+ti|8_fV56Af\}u8XQof+LmΏ7}tD*~N/YL3u63woSp#hy~Yu|̢C˒Ӳ^UO5@_{ 5Tyi̟d. (Y`En?B K8SafӐr ZR%p-j$Vٲ& >m3`Μ]ohŪs kB*TkZ*6rô)G,e 9 `4ũ)*6)\H 3'}7+ep*J@90qVC8#!̛`L05u_+J wг-wzeR{+f+;:.nx/Yq7YXf]$gU<=g㝐 ߫6dR+Ɉ_色eƚѬsGTp+xSk:k$yp=7T1GY6$wfd%+l_K>?JO{xZ)YfvuM^=r\+iQD_G YiBS3jݥ&;=]N_1u J|ig>Sūhjn<2fw~r=6ffq/Dj?m=<#P>Fпǐ]e m NqPw4ʺW[6a|#챒X˹c\n*]ǕNŃƳ>wBVU m6:~M昙b=E&@C\x!-Fu?|ۊNۻx2?%LbpC[zdhx0r0qL1 df螱wy 'M'ҝIv]|c~\X>'EsϏ1o_2k@e}Ã8U?Q[NfO53wI:e\YmjT.~,of%n':@Xio>WmtyWO(1'ـN7OBec3o#~BurbhHŭ{ӆe&iΩdN]"6 dRTۃvm^hu =ImjR|ãj+}_ O 3CrG|Iv3Q%E.PWzYq s U<w#=>f,͊(WpS1BsĴaI(mf(.usϢv6>gnw#aE&kO]/aT;D*̬V2?{"O&оj[БH Z+ksXUWaz~Xw [Z#m6nϨ|%+-5Aqg+Oޮowd.s$#<)2/c1A{c9(o` p,;kߙTz915=,>|{g1%k!As#u)IWʺQ\;x}XvVUE}mi2vF+Ϝ)EZO-*+?bu=\2NAnS_nݑ/+b^6 xuҬZ#MȚח{}vab8pDž(n쌞ϊo|z a&p\^cD@_P;Upz#od;wPXfk] Fzfv' F35ޣ,j;Ty|NE ;gV8vNg.y[v> H 4<ǬXqLݵ,KR_<3*bj\pǾ+`ʑjaxt1Yn=?q2 jҳQt)pKy׏]%9CpC#NOM͟`8Sz-kLMgFU:}fvghxCg=gFp tW/eߝG҉V{U(IJDhfv[͘ 32uw[ۭ﬏LŲ*Mu,d:'8ƅOFy%URpCI]O 95Εv n W7+'t*u{PyZov..*?k mʎ i1#t ,[,pL'-]v`v噆3 lsU8'97Sηz>Án^ !z)}$fF282zHw Ւ ~TUO= {Z\~ܿ `/h4YaFFb+gT41=CcU%We1mt7Te3K@筙AR'*9AU]]efI2vKz5֡c:amCO3OkKtYF@=O˼i{> $7L 'sfWJR\Mj{/2Ò={| w>ں,k_MC#K':$oV}_<?_sf SZ[xT[S{jԝCaŘ'n6=&Fzx.$U*U0 lɂ$ޱ)L{l'iYcIɍqE({ӹj˪:Ns#V[ge3(inיNs4`3#ǐ0 :/T̡|k֩uCXz|$u_uL QvcYGko':"8uxn5.{07 .>?#I9`HlfC+JҠ!խUҘE=u&k}rje<}NźS}dve]ﺎh|փ*q5YwN$'gq!+u^ɛUJ?>uΐg"?+gm3_gER I{ڿS AmΓuHuߝmҍ@<81gՋwlhԘĪuqgsѢL$T/ic n#'n8U@> %V8YAԩ/e"k{={5V(V*k^|$J{S )D=똡]D eȞcv #{Ȯ*=ͳ׆B m6ԟVcf.V 7%>n)KYk8]rS *qAvust7U\ҘLgP5d2 ZO+W7IJSiRbVô&Nl!tUE4+6>o*{K.si3k>03z^3+Niu.fHIJI\;u-Ʉ=Xv*` TaUI#G*:ιuҬ :hP500ksQ;-=yt|R~UTp\X 7Ww1o=s22\cI%w:beCo 7EXoUOkvEnn$PXWr췧ěs LR;[-W^Չjxm5;<5 C2|o>wUTnouY۶WLe^鱙k Y`z $"k}-p~0#NUOқDu)_Õg,i@ 'r9޽YQzvܢoV:uLi^J#׏vȯQ4J{&6ˏ89W97g;a֟},٧췍HhtYpW쑼uM؄sޘ9i栬֚U+s8zm\}0d-kkʘ nTz_nA"74Jj!ߩyQS©B'nV}nұ)Pѯ8T`z3"NjnvێCu&)Ϋ Y_{{T ~9!-9kőw Ge;s;Khoե;-B>rY+bj-GZV9s榍Ky㙑(t\UZ]mw24BzwI\lq.C%~k ^=Ɂ5' HaFv /3X%R}WnVI~;Р5|V/wI`_9f4u׮ UV:j`;1e.>ۍSo&bXm}BQ[^RUϹձ#vz>s1zUaL3j]u+nj}j'x'5gʺhDZVTcy׳\뤵1e} 6z\en[&̖LŋT*vGRb ļ4͏SH.󹘫g!!R힗M'II4W.L2E9{&"6C"Ed L t\F@*UyU/v'vJ]|ч[15:7 ,ح6L<3x#Lb>44~jqT*2IKY:m'{y俗E}{ij~vܐgdOr\u1h"U5(Y1$ޫ YXe;ge9sC#;m1$#hǁe^  Or?+y"yV6y=M^(쎕dDN͑0[Fϔk̙:#l[*;o7t~!.T_{R̨<(>W}W=S#5n\3^Olyݭ]n\`*d 4ը6uw[_޿ew @x@_5i!s?kwƏbny9ff|J7K9J9T1ZYEc[)>|{u") Y1|,EH)f==*)zx2J/YB|/ӞsLhz=|I]U_(n|0+7+e,W])M*׿ r^aUT۸ocT\_deyoBu)wc$| i+DKQ ^ē+VfrZ|.VfJLˆdƺ1+Y㌯W}g&.s*ĥKvwj9sΙ1󉎻ʠ_=sTZR7bJY7b)'43zәϱ&B`b־sYψS|}Z5iq^6F[fDuJj'mY0ffV9 z.[C{G)yWΫ鬳?W@OVw̵Qi8ˍKh!d͑w7{Jǡ/TcA[S.CHigfT˻ycaos[J/f.ݬ%A4OnM`kfHq$Lƨ͢AVڨE\>Qɮz2~q>$_nXM(Ȏ/=B./M"'4#DeG}nn\gwxpדsçQv蹴Ѳө>mY嬡&ȁK7cc]=xɼZڐUn@_/vuob/Y:S|ps+O3.>_ޖtRQV$~qTnj_iyᬇY]Vu&cv(7n)ў{+M5!8 zf1W}Z){J`t3R3ّ@х;sZfV垚NG^V dy7v$ ;R=U-Zm]*f7,BrʡYJY7g{x[W*-NZ4~3n<;Iosnps;s)T+HLZ":6?{DMWAgT 敺WI;!o8ds~)eʗmrxY8T.ׅĬ+e݈+.B6כ)3OǴUbڙY538=ć3e}o_"';Iv 4˘|'9<G|iU2|\z7mJ|856:w9,w_))~U6Ug%K uK3Yl? mT1u~TMkt3oUa"!dʻJ,jx~+5sgPϦ2{bHruSbDv9ĵ~h킻h'o}\/|r@Se+8jfnJ9fM36ص!'ydߞ^c=2TRZd2^Ϻ>SSk?f #>=43哦̍Q !"q)U :b{`V0r1&mD2/\81a=nHzu>y݂. 7+1G[ Y쾁&=VQIz0?+Y)' ˒*\QnjrpJu~"^MUZ,W\o) lVX.yƜ?SL}\Xvÿr3^ΰ۔pnJqc!d;I {ɴKK~ӆ̡d.p|#8~Z0 ƅϻr|;:ć3eݩ/VSēiMH T:k_<̬ef)Om8/k}~7t泓Uo\L9gƬޗѱt3ج +TjW:?R)Z/MȔIʊ+=?;xjiLǍ|':C!@mErh2Tlƺn&xUvխj Hv+YaeR?q (ALyȿ|XGS s'E[n2/rB56Yu`FMC;trqtuã螘= ً4?i7*s.z8kbS8ZIw$ȷse9}G%>_cq9+!:UTC_[r]ϜƷ>7qJ]ϸL[S]7{IYoj"~S S:gWb/,ްʆ 4&Q{NS>@yS[|L<k՞K5QVxm}Mkx4yW>ǝ.*z1ɴ%~oF-&-bU~dI94pK^t ˤ.8pXiyO&{9m|UyHwN̍%9{F~?oh9=d b$V+bŤ';2`հwg,ȟ R%ji \~q̍Μukp!ƅh<]{elW[:i `gOryKSif\C*[Oge0+_nbtoȮJ˛`|7;S{iV˞l 車.)C"{t S[u}L uHV`BFNHUb[싍`$ɏqC9R zz[ ]t }+Ie UZϔcJX7_{ Ldߟ0qyCn%CVI8Bf N`:2g {P.;kx݂{ fR KWuLzY^?i-Z[9SP*˺mg5}i t;Uq\z^̄ϔ@) j~;"+XM0:@:'YAuh{ixMC,˪g joƙ5X/̼w/ BŞS;3fD}ODT̡abJW޴rtrv `:Ub+ץC;.|NB(7zEl,g[gE8?*sj. SΔs"V*3Eׂ)?7+f=XwlݡB!)hV'skU1 YլJc)I~Yϩs8n4A R|S}kX9i *+ ^qhsEa=z7+\1]c]Q(g9VI7']x^xQgUo_CVXnzbj'n>QeHT劇5֏(.Pa>NgX I?s}&ݙr} hnEI~ JJt&pʋD^O3}gEshn\])7͹g91Rr'IJ6/wiUZ<:EȢLt&"*׼+!4g1JV˝cy#a?Gk/`xN^_I\Mmh#B)ݼ{+~7^7N /i"OX8(Dы&*Z>.F5@hYaȬ kC7R9;fGwI{~3+O [uqc\=G;*V&!f:qcy]T*볆۳@A=3d/+i⻵L;(;IuE/R5sDVpnSLJ+8ÓJ{BSz}u6Icf׃;9Q ʺ\Tzt x&>9[?vo]Yrf0Z[ezTZ^'w w[YNC{_IɑU/ Bd0X4ĥ/]r`h椝&V h`.#ǯΥRrMZ/c 7}1qwG{k)t#Aګ񜬸~ dA'c]?61E+$s~"5=DT"S;/3iYSd%ןៗ_(ΌW='ݔntVXWrndg5J|`vi/{{͟݋a1wo&^j{:i, Pϰ7Xm(h7fa)Ycm='ȅX'ӝ54y7Y-s3w]T4C4?$ɯL+& 굉=w}'4ԃ9fM7]> )N~7v`,}|,:)w^'9kJ9Ta_5ʹ1=wSh <=QnjQ*UGRҫJ:qpgu['2KӷVv6`Yσgj*Mb~AO;<՞yiU^VzUmidbS) 4DuQsm='Zg*,qBƒ)D3fǺ )!~!?Y|~xWnccSBT|F٭:(Ϗ񍰪*; mzŬ. e?PߙI{v:wviENT9= jtr4\8%@yyԯRV)uґ);w{+8Le1Ӑn5gޕht "pfT*TIfT6H;%DJYݹ=}cBL-9crV ScKI9|nmt{]vcz{ϟ wjé_|jACLJILyGt Z o㪥(7:sȩTNb=S0sr0fb̂={f8t7&/71WsdҒ˪X͟ˋ}>-:fuNVa]cYv0kj2/Yﻂ Z7SL'|=|N\uZ4ʹ7εϠ\;UJ[uXudeXtyn;9}򛏖a} (Y-s6Q+0լr|]udI;9p'eOB q5o<o=W7$`iAKZ1+s٬wmR9dmF>؍9Iѓ X.LJv] 󙾳)r^Ggy>9gJqSv|b_myScvx l&8˝g'*ma3O~o0|uvRDO:Xͺ+{Tg.nuH3]|yf͞O !!ӾaA+#z_tV6^QlE~Rb{%؆!e]붽*EBmE:W Z@΃A&3VW,W!VvcxHHB3Dv 7X򇝰3UdpmȾ3%Wb,|WHə!6=VI֕>"&+3Fuxntq=WD,ev@\gn#G 9Pɟ[x#=sͪ}}4Tg;O?3t;V>_w1c6o|\ȪWE7ӢEbir|Pxv zyDv9ZQOzZRep=ulc>/{u+Kcfm%)]H1mw 7H[9EtF|^TQW%YfV//2zKw_VO~8E]D1SktrLMhN+pWgV9?A.Ióʻr"==-n&2ke]D^hY|+!fmwXun9՝߆bYtC>#G-Kc1[)PGK,ܱvx]O&NNQI=o3 \ojʫyV7Kiޱ@8+sOt Nlo[JSwr$|hh=#]31~N^㖒w1U[_V ݼ~;?= });N4F^ַ7fÒ4o|U=B(Y97ґ]+ uҬ2(yiҸ5k y.0FO2݉jM4\qWO9% }\e^׬& =UR/鉽nR翿3gUNA'vLF:waf nϐ35Z^[]gҁSL؛[e 8mRڨ>(^ UWe7˧Y8[I6l(nOPf r֭yvXV9E%T2w>F';,C]ļ*,CW3aUU1$ [̬ZTV !zq?.!.digWw }?Aߑ:R4?P W Sƿvw"&Eci:z^q?S?[.oyMcwkv򽇞wu]1ircL#%2+35ugX.WcEsrǼ냪Yjw&ˤf=0V1K_EYx\H$ DJQz0XdNCO봞!o+>/wo_;z=̢r?ܸ\ߌQUx5#~? ۽"*X#`?Ĉ5U'7?w&eCGvL q?2Uۊvs&Rwu=ǰyUϯ3J?53pw=UOb=lwx+ok[P Bl8;Cvh" t:+1w3{,RM=NT֥3lA+5[4kRzkσoyp>Ҹ Wnd75Nf6ЍyR8aX\R/:_5cU&]XS]C*/VA?$ЩOe&+6l>oσydqX1l"\p )UfAbVsiW,󟧦5kpYM1ܘǣɿfnKc1bpg-]Vc5BaXb̙!sg[.\}&3,ŎNyH|˟[_qG$c9Ƶo,b{&q'+*)۠밓";%X}l =j `ȷOͅXzMU!.D0 ۓL{ܩ7lIzX-j83陫dÝ$~1Wmw?Iqnp{PB1׺3ne#C30g3߅ D;z,8L&ls=j=BR6D< ~E@ѼJȮ5[@}9rͬz-)"Qs7y4#Yh׆1_tY=d1~4]Nn=JUlmP8웳9oI}]`VKؿ*N&6̠iU ld;zZ} `tNe;ߪY.Y=yߨs' ԯL> J5WQ&!v>d_]jsS{1Y&ceVy,xBPZ;ܣyl;kϰ[ldHCo,tS:EY~fO}Sp!}:i&]|$(a2}C5;EJ;XKJ7#+g3c~鵧ۓ}^8Pi&dht}֚Ըv=ifݿq*>}haꝮߩ 8b=k(ʁ^.ρmeB>;(InD2 *WQz& }zZ"mg'XaŎ:V&"=(qѻDf56{sCe9r><ֵ2gzoAmNX/Wmӳ??v=o6j"DC7=vr~w^kxiQ>X6З< U P};|X1dK+͙MWm~tNbC{/jq <﻾>q7ޯVCpZ]4eQw׸◿,ݒ]DŽ+Ze/UcVVgї+g_كN R `W*(&/+qr$S z-hwkNuix[f.Jke5fgΨ b$d> ЬzeU>RRSd3Y?O݉VWz\ xg5]:~h6}=gIS촉%p$Y&c{ʬ3ͪ&ۅ JGXWI7!L2qx~$;DpoVZb{u;E{<, z+#Dq̽R^[EUQ 1ixެ> Lb,꤮g§.N5s%OY,9\]!d=m\Mꁰ{=-ehuwvztu Ygbt(Ѭx(hؗ3&N?+16 9g.&km輻.V?,L-JkOMp+VX0{ bC3غVU59V[ԕ- )'9wNu[Oɧ~E'osy~SU|k." >©_Ig' p bY(0eXO$yNs(sq[{*sv]IhWvܨ2$sGgyے :(ˊJQ=DL:Ht:(ƾe%cm )=>oOe%_z[Yz75:鹭 aIoy3szVOVvp^o':f6}sx_7i&y]~_+M{jsAd^WBWR|ЉqX L)s}qIaɌ)+D\MD)B9d׭@F#.sh:9fOuV:f3_wJ]vEWDŽ"72:}E}q s]f. ŝ5IP]'w#M;k_n\j(* &~z Xz^uaMo4TA,1'rSzKYƩ!ap>uXW]?Ł~ g&iGgo] 'id"b3B[~~wٽTet2T\;coDf0FF&4ȱ>i^\0gOA싑!˼{ku2zyWۊ=BL!;Xtd|0Z-$5&- 7Yh|iI܍ou*٭+h t@I[(I q4ȟ{s= 5cE*ϞQ,:T<ߍڞ`e?-eN%_-D7gW]XeNlu3*g͞-J)9z}x5ὢ;A^sHtZ.{uJQ̾W,+*UΕ}Nڭ5".1H1VͻcF[Jwj5N^&';Q{=IΧד gfG7E|";V[ۖvS}x{䅑WK4j7pdiA:o}3DCZ}CNMZ\ ԼqZ $*;R7UTztJ|E'X[hnq:w[OǿSg_SF1#ZOA!`z6E2; ㆪ:`ENqf2e}Alä*&†Xݨ%3ىWPoI7d2g5^ÙM)Teo=|)؄*U,C]/#NwZO.'q[ϑq! es¯WwUfg;oIr0r9;N&&43Qꨇ[}S S8dܫ魗{ؙ3CheMnx$WEI: ڿD)^rl>Q[Foe}D gݟgP2'ܯv}ow`<.oJX2Lw!PyX|Jc@="NńLf.U;zcĩ\coԶȞ]c* .'stFktuNn6s;M=k{.)qfrÓ]6$ w}#$,c]e2Y.gH|q'щ9I+Mߩg|'YP8d{= * ^c6)6˳*ydށ#u߃\+gCa 2]*TS 8e!8%iS^KY} OfBYIiեϼ=q̊Z|z4~̘^JYQo^!U<;s%¹XyxO2) |o[6rs3k(zz&ʓʙULv]g] ˉqώ:+mې1z@ǜ=dw:z=F50v|:FG{\Ű#m`2&;|`w|'lj _W=^gwֻ?MX1ۃ]2ݽuia;oY]csFb+41-@Ž3M̻}cw~OljӍN0?᎑,o|LEH8k YvB9ąʓ]Uͮ)s -#qUVm/Kwdwr=9oLҨN"zE#g_1@>~}8#&S|՞ BOkyH6@_x+SZLk񩛅jcTZ{KWXwoeǾnPpNtx19s5xcf\[iShߏw;_Wn`]㾽NE'+6ٵ1Cy(yݸWˤbzYN+Nc#Z'T+b[VghYBD1Edɦl $&M)x%.w.ZX+ZWL+ŝy=칱n>5bvx?vj\C5[3 6uV Yט&d}(ɠQ3eޞ kШNozV{bDu*p:'H2!sYkW}=9R*kbvYڙLDe) 򝖏醤}fmurw> Ρ:Nc! Lkﳀ2iXĴef-wӍ7ɱf/6˵7! YF_J@&E3藰aTJf~.z!>|~ߛW^&zw1L$O+Y |E >]-5f+ ?TPd*nlM~^rJ+qWS7Lu̓ݫiiG;#I1/J̾Cs8dGkLwUd":у7PIf8_|KeU (׶qYeJLQ\W*'aUTN_OoU6}KV\4՟w.w&qUvnwIwQeooB\ y9]rĬa^-s 8+/w `vfr>@G>XXU$ZzPSnz/z {wFu|ޘLSEME%CܠU̱ק QR+ Q[SюU!T;.jgN*sPL-tNjYgלgҝ:v~Ճ\UԘLRn^ֽ5NoU V1x(5aŕ)ӔZlTUqyJ23twV|^1.MSEwDCߪ=q7 =W!/˩jaz|?G"f|\c_ᵱB_jZ]~5^VfϬr-Sޫ7*-}oP7ÑEtΣ Utq/JjϫM}U]n'΅l]yoUe<1tq4jXv'@_]ꁕY[D2iBh i=9o~W8x W/v\MzGb\ԣ'8¬3xH|7Y"ǁ^y9㚷 f93d.}Vtx{g0yoCHk3EWoE>M2ͥ.e콧sdts3a*ugQ oEuez.sw N |2}܍p*{:=~(n(L90o1Y<Փ&;^TI7/e)VM0/u#C^Ma' "SӱķT+ka:5,|WywVKV24Cwk!u&"Z̳@[ G3=r7+? :U,B qණ*@|vc~n J]VX#)+چrwvluɲ쥐Uf&3# V~|UfܫmvYN2167MיfK37Xzq++!FLߚw|x@!M%ˬ{/`w3! /7+e1y"站wgf=a<鱷 KҙŬ*뷫&iP&E92=Ap:[mZ؍*Ѧ;WpiQ9fOv3ndM}(!Sn&˝ 񨛃;eWYul5>3uXz|,qt2FE r%NH*d}}\몳:X=3^{&trM`v̫6:Ũdց+Sx:Ŕ1 }S~CBN&CM9laΔL m(9S:;=4A21#޻=du@?8)t{ı4Z5^e8O{S7!gGh[A/ŧ]\~YPȜ#/;ZxANuk]'L>kĿUs .s.A8 Z@Ӳ^ՌEGkQ/J\IF'_XR;c%ֹv 7o@bvyGg-<+/;T*z`.;qy;̞ؔ}YRv@{&| -k7彫6?ql["V tVe;y3= 1< QR(rnf BY W3W+rY8IY9[7GXgcEMp~r|%'Yu/+]|='N#(kAp(Rg=fzB%ʹķkqAݗWNtXVĺ*}*w9rD3zdG\n-2s6۾wos3t2ѷ™p8wL"/{' pU~][ёHqFy 2y_ǀVǻbYńNf{뤫AN0F1d+]IR|Sy(8aeժ%+Eq!s,w Yje^bM"3Aj W{Vi*)gL{Vw %ثY-fwvdxpBev_tcx+_|-91/ gw5*j;ߘDf"i]93kQgW3 vFzLL uA{T7~ eiC!羙~JPZ>n"ĺ'L]w+>8I#2;_nj{1')ytFce'3(wDQO9ƅ IKoPIW;єL{ONc`_/vw|7~L Q+O;*#J0Sq'OԎ|\gC8I[q]>f{­s*ݱ!'=&s X ]xuRP2ooP|hɮwSluj&T B6:θ,r <^*L u[)xΠL.&sgwz[oߑkuB31ܻdS5ű\#Z! o $ 3W}{iE^z86_;dNU?>\d}3cqq,qsϡcw>㼨Ef,M` 3\O=^@e|3Ƿys0\r+EW'ʼq25K~RòŮQ[ex~@ͱFfZYa|)}eƊxޔtAx6c)Yi#I҈,H|r^+ǃ˸:@sOJiMt/wuk=T1 =P l" f8TT_ }`5  ŌEVOk>-S}u|^M=]nT1)=;y=͚ 2jKuRZG^;bVg~ӯ}&'+916u&^xmg~__DQPv>6S.췥:ngt'qlN)ŜeAcz9{g_ݗ}XuVZeXPz1\ :Ċ]dٻgaN|;e S*.*#Vl?ڊ>@S./߸q8&8i Tf8WԞ$ 7u:Ԯv&C@(6\g! kLo7ПYv7y5w:>V8%eo>N;,uEw'Зr#mvٯ~synm`q`zWeO 9qjSt#J=9oEȤ6 1/ P`˟_jul[\=rzygO>+)0Qxniʗm#&BLwLdLV/HNG,|}t$^¡ua|_+uޯ#vZw*6GƕiRѯ6%DDS~ruw_69̰V)'&[=0;e,jDڌ''2;1;k~!աi/ꢨpg9˜&EbiOg]?fGN t3qFU;h*oMOeoc dY1.߭ߎx|P-T߁;%歍Y.~ׯt7;ǽ&HB=?ǿHw_6gy!g=y_UX)'/eVF/sSXLhz:k cx9Y~^G>iA]bv> ~g-NDOMW{DN&nةk:PY,oݒ_']yyEVcrg0\;U<~:*NQGK2,WDdJqrYpS~ӨW+!-! Tk@ӆb.wI T{EuYBfU3M=$+Tq%uQ&(YO*nTeP/Zk^n޺BvuGBv VPr7{s33]rfq0cY,뾑wWY{ԬZ9Eݦ&?FQYZaÒbkPy?G~M5W܌jZ&p +.;a?1Ɏi]⡼/z7}gUm5W2Uݦ\4_ L&`:ovKʸO`;7^1̳ E:fGx \WD:hG.L"sG[ς*g#F>E,槾2 Dd&.Q}"]Ǽ[kLr7|O$kZSMx{ܛ. SRJQ[9XSuA_IQl~gxYG .LO;~x ʊ;b7T/&q͌ǕA:WlnNSBS&suMzG|vYq-j7惑Z/lJİG;F]yUYE~tY^5B2M$k"\fed{=rA>/w9yn=~hXvWD:}{7e|ҊNA~J_ݭ3WJqC9 ͫ2M)/Q@VbxͩӬN6s.%ȉ2yb _bWdt3a{M;k*w̆X' IN 3PAΧsusϭzw;{!USo57sP~rj~PSNOd=N*=Ay ޻%?gO|~B#P+#l{we.\aN@zYi6׬]M`1aJ7bw󟅥nsA`&5\yR_!mն.JڊX|- l5"F5ӧ{ɏLx ;oNmǔIdj'MVR蕺q5ݒ+wO3Wqϱ=EIyEAy[qc~+hy]3=Y5~H&Ԝn%=5|]hV*S\!5gvb[),j=%PeVcdA ]fbʼ0BX)_]-uŮ 4dJP]۴;"]Bm+'I?.%;Z%Ezo3}=# yo_Trioԕq t1P3"T7uGd&c=SQQjtǦR=cYfɹN\X"3iSnC{abw[)2ژ":e(uJq~;e7ՙut+:?Џī7wpx :/Us4>SC/3 dYa}qr$Y8Bb:B,?'&=XuY{|Uo<Ϩ6V+g:1nAk=5oݥ~< Uܟ_btCWǬʿt+gg3`9, yE w.* 5lP8+٫.4m1%36ٝ}KIJEgx1! 6XIQ|/{D{wDG"NX)w: _&1%hAI4vm~N_[1ξBjb 7>9ɰ8k;O~)?M&E7İpLL|[pv _0%H1viWʾ;N))T亄ݡV1tz ?:1zw1b E`Jcdz;H:|74yx7ݫ)gƳ9=)Ap^! fZ-yvn_;=6*?᮶RaYaʵm ;zG#q,Sah*'uf'NfV!\O.NfW b̫ ;>Kݡj6-.+yoSoTm\暕;Ws{m'rQ3&Z-b= nfv+y ubfd'[.5RǍw4Q*TwXyEk:5:޽+(Gjks㝝nq9.d]N[''{Dϧs} Ƥ&xT暕}W8쇝lvXu$ͼA9 6g>Üj|q& :YwΗԈ"NIADE Vek }Ei=L]"o&d*Y5yԉh+e݈ #iw N9OBѼ6ĠfƔ >߰O;DEOMWޓ[U))Si镓 ~nDAdʁq׼NOf=:11tgRRVTg kn>yVn9% k(W$hVGe-+A:ݔTgf:VMv鹼:Zw|zu[^7j);7Fz֛ؔIVe{ytz+N8\=QZD*˜iɴMS".sy[[gىV”ORN]͚yy4q*nw}o;PQﭻ͂'0~*;KPBl'E2 \ꢃbwxNc,껲[Rz I&p*WWc Jz"^ :\) Bc91E3²L޸oEZuvH\xK4,bbT>bdԕ}<܄e׎6 Owq}U(3a%+hRF#߰W$*[e{F=0%eŁONS{=$;l~}P'y!gH&~w;f ynR͞Px+gg}>{9+IN'̞žѻ}W샗V8"ɎU2Ǚ]l>84q:ЍshW;r~Ԩ}%&ol 6r"FU[m~(~w'43șJNH)ۖ#7\6uos9IR:ԱPd5ijNJ}dל0ٛW) =[/~FՑaNaZuhw WЄ/~ 5l]PTIe*^ݠPsv&3 +*GZsC&ng ~R{:}>U|P//(%V +*ۼ*_v}bcYuQӑNtv iYSdiڙ^A! Ί{iGA)3g9v[I|ױ]֝jGDF_?\]>f;3c,9_I|wF4ӢNf;F̏rm4]Be0dNU$v9a;(( ILf gq+t$2 m> Vx}g5VoS !'v9Jѧy\wD>hxAg0Y3`do0U٘W|gm82/`WO7MvfX9l/YW Lزjã0=a>꽊{H?jL7ʐ'OFNЎtϪCɧ4# y4B z=&O7y^W=sXwY88nLkb }r1R؆ͺ%Uq%6"1i q_Q[ŷHivGYu_V<=N f?qüW8Z[ykf2gdg9f'zNq7}5 ) ox"S75|ݘu~'ȑη}d +&{K]hV{ǙBnGDҌB1LOfy>[*YMY7gUV tO>'Yטܱa²VGgi)/3L.sq@FX<57=B>6|g 2G4W?p^״f|Ցnsl۝iS؍.g_6_Du^9}Ǟ4++Wԃqψ. MI<ICP/gXɚw+z9y1-Эnqt1@qr|#EOa}OD ,XWʺﻡ#eTme}U]^GDVhqa•>NatҬf_ӑcĩX/JX4u Ks{dڵN7[pˤ^\9ϜWd_le! 75XLPy@4L y[Fy8$ }a%{͟9>dx>)G\*}y'x3{q9$^j^t$eour}ax{toL.;A&2.v|HsKIzF\IrkӸAH&Ab,fJ6&̻W2%+"6ˌ#NE0L}ՌCY k, px?ph&Im`!/3y%TtWY1!>f,d}}ɼ}sNetyй)S؈NqC?s^6ٵ̦¬+zҭ^Mop0JĈ 9N`V՞ 6Vy:Bq> lVXc%Ppj;qxJIb/oU!$v2Nqp +F{׋t!O`CUXm<ƻe/9{1Pۃʴ[2+(q7&3SLu%G _ ;;3)/^TDzqf3 ̒m8kcy7ɐe_Se8j2aϨ8];35+`=Qv)z*lh n;S_[l:W>h+%٥.R2E2;5Q~s;z幝n{7ĺog90ωڊ"?kMc?U5Ϡd$j6\Nz A&2 [Y"xdU~Hc6{Eu/W}A.Mpp~ _>5L&a$%ƛ>rͷS?}֦#+;hNwj[W)z"8X4wlknO"e(qW Vksuʥv**Lݻ` u<< kܿna -(Y~Vrߡe%bm+3\+2V> ?Er2.Z/Kb[=uX)YW$=,x %Av,zӷBMl$ m/+B{+੹rL*# ;y#GɊa@DNS9yץcĤ 2Ho܈HrÜKb8Ʋ1b7aueL,9F$72.\V@lvXeZ$jD{PfNrMcC ˾hydg });H/QQUr3[ɾUkWj4[U1!OHo>Nj9WìI D *nMWX[w9Js~ 2 r9 >s%ˬsΛZ.z+t}RiTc4ڽ8sdOΞdaZ4Y77 t~VQRs樦FRw)k#@V˽~rʚf2wk߯Kw=~W+p{yA~+FXHN-jja5U\wIqDu/oU?n5i{/w5⾙a"j=]nBC/Zu:PmcZ>k.>sc01]0Ds˪XWgS;p^R7uZ5veT$Me퉕ݨݡPbuXzOOE:y |ǿS daۙR:Fzi XVi~2 `Ǯ<3A&7,3aw>1~:Q0o%VDĜ'}]MYG~meɓJ viԚo̾f]c>k$R&ұcvje!V߷NG~s䟤}>I0Y.3WgfL"|wlNٿ޻W<[X;;ri!ILF?;S1qBU}gVA{Pho>2qY)uO+.'nBXc/Ff1V+zqkd}\*K`V+&Yƻ1! &'s+j+E[11em)?Ş&.'7>>(gٙ35b9p^C͓XV!$?:P:%Ug{ʱ+,8PN_ c3<evtcu K6Y, =ϙyͨ}(7xu|}`sf8&/e-;C[zKQUuf1G=qG̿0M]ͯϾ"df^DdLJ!N*sP;iw G=`\l2Eõ~G Mb {T|]ZHKnn c~`S\NwVE罂:ѵ ͓ndcjb0g9NJvJXC#uifATjlVVĕPn饷,ff2Fe5V_nGH+2?WC=ܨsK +M5Hb6;ff =LÎXжJz!ƞ*{/P$cNRQc*YL#FvOC.A&[f&3󞜟vWI3c͘.U|!T.tZJl_4?=>Ly>NF*pz}.fjc՟ xG\unḍi]g^ Xuz6Lv\ fqeF.\B"-l({E6K1'l'Lgn?P-8'FMpY|3;*'~zvbJ SQA1W\fjyM_nI&Dh})G'?`06T/N+~׫RC)\.X@ff0c=yp漻wʓ]?Pĉ\V^em<{ YoE;9/ПwC,GDez(&Sp&/j$G ӆ</'Wb\mʰzs gK]lB 3+}*R撘hW:j51`w5S 1sȌg8{jKU;a/Fo}V t^>n^o:wS~^}LNtQb\Woߪ˟^/Z0  D`YȄ'{tFˊqŞrA'S52x7pz#>sKk2!|3O6dr??:T;T URE{mq2eZTX]w҅*}M ":q!>XQ/~|Cc{E0iDk,S/3>}F DZŷެӰw: b8RH[0MzZ˘0yߍlc:5S.8tSH/}H| ݼբ;r̍g2[']ZvZꆙ@>V*Ty[tư[pfe: [6kx+y:S- oLw~gyD1V3/=(͖0II f8&+Xϸ(z-cƅ2gnYrIN !m{Ps˜S }r e6zعHp1iu7=9LR} eN3KJlgfYt䄚Y@x&S4`#NNx໳3W,"8J  ?kY`MϾDŴrcntُj,MR֍Xc'aVJ|_+#ZqeSÝdP&b7ND|OQeҢϘ@3miW:Gx+n{,;CM6/73RX$>\I`Ss٘r*<2;eڄs_VUT[Y؄N}~abﳯgܷϘ3}쬩uYIJ Cm!(G՛_O>9?.}y cqȏ~s|7 [!.T$۪m;GWKb'?b8'/9P& =q8ߺ3ܹ} ǵ iPȔC(gQu=?ݦ&:> YjUa_:nE.s5XU$¦7!;B>uͭ;fa"{WOU-,PzIBv XQtl mWNґ8W,dyXMfkbMōyX)!.TėtPo^nuJ1KсIbOžq) .Ӵ~KS;4MPRV>^jލ(졏1(}h_YQG>kt+srw8wLe4UN ⊽b}n+t(}_>1';Na>myhRp@Xl" P`g{}2d~ĺYcoY焛ع=/+a}> /qvI?bUsxn 3{ wN,UN.2U*3~eC3U{}2&ٵ̦1serΔde]nu J9@1Qqz6=CiY=}p1żԨ[G|RطNn]i'ȑ>>&odWCJOXWw$/̪F'"S8zݢ5_>Ǵ@CF0E)!ΊwD{Ȼw3zZ/~|v~so툉s[oU\wC{qC+z4ӴW:}n4&9/CŠo_.3UAyk#J՗3Q΅(ߒ\3VqJˊiOxmC LtŜ,5cQ򬆳s*`j`}پb9ܛPZ=;ݛ27gEuK]i+sS'˔u՚q"ZE[ڛ,UuAf#=;~g f6ڙfevr#'.+sW1h"]ġ){qB9P@BؗS"Ov[eImΐ>9v&#>~@9 :Oqjgfx2QQL&XYcԘhVYyEP8OS;C31qwDŽɁgΛ3?_Q>^>؛wvģ˟%T΅ﰃTOvojsρg;o˾c^Ny 2c!aJsô+iJ>jj9caP=QGmmo]| >_k̕hlFY"E'IM}QNqٓlL78T(z W&׌C$"7ϞnaizۜHqcF԰R@cwBuL䎈?w %}d6ڪmuJb &/Xq%0lg+V N>JȴQ( -WM-!7Ag4kPb~2zQ$R6W)ZVO6 51^m#0{Z2tQMƝk;}i=I3}GH}ّi5PjIQ(e~{.|re"qW[vAY1{^jr~y.?])O(B`5ut Cy*\TZq׊bQۑr\zRhv^0 r6w޸rΩn;&m;w&Qa*fE/p697ٙao^^6$$:Q<-Oc_/$J|E'v+6Ih>w˝s\>k=tC㗞t3P3)^mTgOAdʁk}2Г0|Sq|UIZXֺoj,i#.+sWL޵di}SQ"XwL0ƺB9EFإ/POQ/dp<a+ &jxnhn"<̷Ż95Nd&;3 "{ńLfĬ \ ]֕^M0h3(N&-mо{*wx^AY\PO~ZIǿRzߍEU$(W^wwSH/\t\k#_6qghgن0! 4{B8/%pƈTZd ;h9sd5e9Ўn0ϠfƔf'Q@r]Gr#)άO'D27'0ߤЅ[%yN\pGˬy}oVEMe²JK"%v9yV2Mٵ̦һPs Ǚ=LvW|C \MSyqCmýrWMC'BEO՟QƿxYf@U!4~&<־B@^n&'2;qoc󳇾ߪ3gM#3\sѐŖjvvRnV2 ߸f^Y߈FC0fڊ^K9 ܗ2|E7؍#0i#IJsxޣ'Cp//yQ7v'z"{9dɼwXϺ~Nj'dv+sûY<β]03U)shs{jOжJ)+q`o>J_=wT_?h*sR,>m?cy}~LyYI+&>tC9:*(I9'fBr%oǻ,iK{sq",΃9+^*r3{ɋurob PvUgˮ{+\5N\V殘$9*gl[xw Y~GvP25^zN<*n֯юXԹ۳L0JcD$N8!{bGi |OP)*+w#gɧoj9csDwbKO~*իҷBNV~D0fBs$F69ƈըˊν^@EOy['ݷ; ZNܰr\+32W]Kރsuf!C˼fվ W y|V~WdsZgJYL5Uܬ+퓫٬+z)wShO)*{'N.VښˎTN6;=n6ExNEΤ:l{`%gmkna3/y fsz>u=l BOՏ-u^~C0>Y-{e-]m1n'>(6ߐg3&&agNնmqϗFIL: |v߹"+ +\sf_UmjwP91E$/ԏ[m2i&Ydz320| '0sw:QC{,̴_ֿWϘ(%59]|^llԃ!V$nhG{;dT@L0fBiԸ1.QęflDq:OSʊ{8]b711xk2f20o\c *#ݸWDSqYjk*FQdt{ﶱò~^ݽb'/OK3]M9}|dT@WA2|e.=0w8G<~r K6Y -wS-3R"$qghvK`@J$? 㭫νsBA.`e!w5EvIza9?31eSP|PIf7WRG=lk7Ȕ{,&\H[3~uX)DA&bF9+L= *r.o=(_zɹ"!U|[q!يKz[ZȚR78yjRi:q"41f*حz70gRfFg).nfmc_ C6nTYx 79_tuQ~K1SǕ}hՃ!JVǧ {YL"5ӫ8tʉ:/SvݪV]FqU.֝;),S k˜XEl>}2W =a/;>ݨ' oEaq#>e:Mݽڀ{{rVT7B-A,E)a&f|^+"H9j s^w+^C\IyikV@vߩ"E&IgyW8ˈ@N+d c+:tUKu}}͎n.Fݭ{EVIYî_5/<ϵS'>7X.vw5pҫPZ'Bu`ȄB.S԰ߎ Sę枘dr2s`ZnQ*g*ጟXd6³m1rN(SBB\'㩿ǿ6>{cY-ig LZ;3**y@4DyuuQxTS^_d+9m3C X2 "8mmï6veqa 5gB$GCxM(zx5ָǎ'Ni~ɨ~M/Q?E*AVGm{!Mfk ッ[{abWϤ^/8hGz+ơ{DZ Odb ޭR}#n%NW '9 x+{:KƷ&vjƤCuu'TV(o3ߺbĥf@.[viO0J_wwñC󛺘sY̺$5)ȳ J,fo+NlzO ~RlY9U8Unt;{slXœNߪۃsb2+ƺNqo˄gLU{ߒ( RÓ`}7t+P=wPqX_vwˊ\)E} `W>8wٟc,:,u@sJppޛnb1945a)ʗ|#q5?RffDe'|yy!ZWn7:MD;Ln`/Luy~gQFAuɎ +]e$U񸪗rߚ4{hL,ysW0ibPfn?Zbjzk\k'ů}YaBOŁcr(̨Ս=c9@՛= qY[%K& Ie7{FZǙM6WoO \V*cr^v{@SmKәt_ yskcox@\O)sw}{e2WL_*֜noD﨩)W~`rWvUnTg M 0hc3>}ru '<1 O.;'pAyK=zk ;1Գ|Ջ`|U/L;3fAI9mfLedb,k/pzWc$"T>ӱ\Mz\Ṳ^mSp;doS]&^S6e>_v'R׹t%*8q0E0apcS5>f ROۇ`8옶)jn`3Ӗ̮vxזSv X?ùb9{s5]2/R)Aށ(*{vy(a->xʔV܁:,n dwnT5[vL~_.,"gʩ+48}u xe͍Va>>C%0{||>%eӘf9}K"}g 5{_Z W:5WgݐgpO~Y9c`?tf_'DO-趌:&Kz6U;vN G-5slxHOTC'.>s̝Jƣ͏# ] JGdH'+VWJ4ME"v.앙|O[Q3Z(Y1my'Om`&y٫g,)+,g^B[ٿbǧ}covϻ5eGع$:@t+/, oIwr[B@x6OV~+~xmǷe[;eAꉏgJ[Zi8k1 z{9OρNEWd4nJ3LueQC\MpvN8lZ2`i+[j{>@ hVUjzn\H7'4 7irٵVTVΥpW>:tâ{4*R)Ql}+,q<ჼ![jUy\z8Ɂ̸8BNJ_HIU*#1.HJ)2JtouGjf#vd hO&2#Wd F#`d v4s?rL{U'[@hs@cb}|cEoT-皪n&^FksՂwr&;~N:VI脚Cy|+y=s|qŬPPi&~'9L-oΕG:Cx1K$̴ymf!7w!V&8G8JE"b%)l#qYopOV﬚7&fw2LRwv?,>&?eV:Id!d|-gјy˔wǨÞ9c-:%FDV=O d P8wd+ﻪz2:rnf/&a(Cq76<Or9Ǫ*3g71f=VZ1: |<~Yƹ2u鞭ڇպcZy=UW,!tg?bO3ֳw>)7+}L%xj]߯Yv-8X2q1%+YL^wJKpC^*QxcaϘf1qĝΝ밅gЬ}wOsE\؍F\{-,k]w1fo_<3*̿AN %˜k…urM=iucHJ=-+W9l>ٿ9Č2轆'*A^Rc~7y&6o5nҙT<͚m72'9hfeܡ_LgL{Mfl֣7XɪdV_+p,dSv|E'G[>wʍƔѻaΐ;GncwU~nզg:ldg2c!{EEhq~֒.)x(Ax!Sk8WEϨYl _A󮺂.3*]iNz]H-Ή̺R֍E9l*{r3u^;RY/L :ԞV晝!ޑͭ!}APT|ӊL 0y|<7B^bB|3}?,{Q,=2Dg;+Ǫꅼа6yWb止M`D5kp*zk㐘1=C$ |.+7Yr{rfl%Gm~㼇Axy^(@9{3zƸva R+-$.`1VVV9"ܳ%BKb#&;>ЈNKoe#:so"E&I2 F4oҿф> X tr%^(@~7t\n(Fq4OѹȜ6Ǭin<,sͬY6on̪^ViVc7[Vҭή/9a'hFLv4Eލme*I}3q_'H&]  YVj}}kΓucd2':˅Y}dTy/xU'7LVو>0Ws5[3Ծŧ$6= ~J[N]4iuudlDfױ>g )GpLXtv!>QvdlBJQCPA2%ջS f xiUu0iWS9hWjгgSswI~ws\dG~4֩JJeJf av=1yQs1L%J2ҪW 4S.\c !Mo{uMӺ)Sgٞz_R4lGHM1 A=هQ^ϢQg7X&s;LiLc%ZMÈTi~4;RB?zͬg&TU9Vo撷s{-XfŁMN{^Dƙ371gEGSU 'Oc\ֆ~XL)k]GإLjSV8_=-xu^x;:p$W(O&g:rf$3UlW~)N_q{*튝E-]E;z%Oke+F"U~3x$'e(z:cwW=a=v_G4@Vhg& -9kŬݏ8*:Ǹ#X'mXYb¡=Ĺ1TckhV4wa;K(l%+g=@+: ̼rDmu<155JkdpDgM> ]lujOSEH&'2TefYM|ῃOE{n89y}>6$mo2 E֫{b*(}wZpos2gPu;W:< G_s&BOW DRבyqr82 +{bˎqc﹀7R گ]'Opy`nJ昕=p kպ332;xރHn*g>z6Zvd "S2t}B>P#kSɻM |Vߙʜѭ{L8 Ow޾u|Oc[hvCS D@IOAd!['z^1;a:5Ǭ)232Yjϥ^,\ο*ܟ94j0)g3L2S2s7 Y79)L9p&%/׹E/6dössMޜvY33;7zg37suvA1/y f:@Y$3}QoA'SV?g1Q͠Py3=^χ)3uǧ7s-vT0ǒ9I\Y{ը,[΢1ѭ:V] O Fٿ⧡}3qLwuT?'g{D\ٻesYS̾,o/z؉cw9[8=Q"ݺ[iܽϜi@!xuUZS~Tr풱{)3L ov&yԠ\5my[d'2˾o_T̑%9zurXXtPIVBIIXG~7-!%tjL ŸE|TWA<\mVm6tP3qd8qb ' }  P恘QqNpy8a9o_s΋!IQ>͠~FRV<={ynңBǿUWڍWWdzwӌc`{FS[V>Ka󊜌:ͱr.=ߙm5rmg5l ۸va621m2۫ =F_3l GmkUfI}ս5ZB&K>9g}ފ9N+و>D+iG`VF[VqChmw!˻n YxK YpUܬg1Y?hOdX'La?YϚ|<[,ЩJce=+/7qMo75"$iˢ܃r5 _t 7BnD¢FdI~kж CwBfخ>BT?W!!N;x&'E19CIW]dŠYۢޘKkEkD"^UF5mEYILD򛲐ȒPN03u\O{TL;s",L[,WL䊈WP^lz$Ϙum7gH3'lhZ*9-W=ꠉ UB3 +'z66gs~jzAY)RyU6 qVRL0dBY3Wcf5:7] BV Xٙ:iWjUGˤ^Y-Qv)}<*f= ޗ{U/vc7Kx\egWy8qsQ~4o7~k'l _`JbF}iSyfWά٤KG~+/mDgUyy҃NE:_/SM߻dHMc&!u!#?T)VSњtI5=sG> VM2MyGxzb˘`̄"atPVpe&Ù?)lfs\'KUh[MF߬q%.+sW788 TdW2S:CtOt^&*+REOg>MDNLrRꦒi$wn;= h&ڜ}#S/AaBjcQ~<%g;>c:ZDK}90"6FU=ǬH Q k ŬƳ}uǼNk~d4cW؁02ҽf*yʝEZ kYKYOy/oD[ViSS sDOg:: a,"`03:3ړ6VsDO ߺU-,ǜE> LX|İmd*U.ˍ(BfI]atO8%+jguvS]N{ ҇~Z3)ѿ/م6+< ;6޲W5>x7.1_9+جn3™Qelo8A1jث ePHLJ?e”+2g~\2MMTMև˚wkxƬĿCZ1<`WldZMf6-D-L;_'m9B51!Beb3O߹\>T V>B>sU8zM l&*'&GN+ʞw*zzx.W8m;uqe“]@(itY`q*&†ٔFx=eb-]&t~ؽ1鐙˭zQ)ς^< O; 9ǔFk3syD\`!V9~7Lz 6;ip†coƼgy'zKy{1gZZRslYB}6599>n>A?`ڙBA9HBV 8;4m]cY2BlW~\Wnq:%H299t}LeGHZaE$ʧ^3c7+U7Ko19  A:iAYW[ 2Ŝ^O!.1-ʳ dEN/@wjB`flu~0(ݸ}e)JulΦ{MdVBD{1OQDQeF ZAd[fkXZP.%r)}!;~ e2ȭU~d6PUq;ڒu;;"1%94~> [ Ojl5%}r Z.]b)S%wjywONQ+:I#&;ߧ:nhBR> 1|X^o_g3SDXOK] tT<'̓+;2=zۨ<|γ3267==C='h]T:tk'2n}W<C+M畎\?#UL=Wq-V=!?Lty6OAW&XAɳk~B=rř{eށp:/4~lqTIcmh>_ ČeݩjU6>sWOM:ЋWd?>߫N&,>d{儫ֆ-ӭ^e='N# 9휳ffuh=[Vd5 tjDdb3QUAp,_t?zW L+Z \u{3ٜ-kNnYZS;=L]f2zgMFA~Y)Bb%>(wSzsGi(C7xꞒV ފAO8m0)IPf\U h)2 nguF ̬VFSסe+YzYg\kEMEO~Pyzt֛i֜1Y"˕V}{ e(w[+Cb4[ e!3Lᙳ]>2vJUU=ϞwQe835 r iTQ=~7q f?o*x-kwᓕ9.b`1͇oExFًoՌaۅ̮s;r,U͡jjBIQ>Ԍ{r#[i:R*+95Q!;ro*}Ld\7Etva%R4L Jw3ItuJLO.` \H q6̈oXQq7s,Sw@biӫt=O1_7TB^Sd`6:όĒvi%p1w=qIZ/Nߞ@6yc=A [RFvSx^{-}aJ".+qLL+PVV}S36㪴l`쪫h#! 0k|yO͢PՉq'dNSH7;5ʔg|ZEC{Ӗ5Vk7/0#Q:{mW]]mt̔Y !#4ϱ")&i1϶`c<iY/cʫҿB<!39m|d%|W)YU9NHoxWvuXO^9%. iw.~ {U='Vb5梏x4{߱Gԓ9 $+YDƟĉd&i^Y#< ̽wopCӔ8Ŭkkn:pID;!t@*9oiCY ,+cLJu*rg%LNY%3ru3smxoeYc2_]uZi&lq|C),R/L }&vpV39&'<(jNC;2G'i|x ̏ iGI#9he[`U =+زJߚWߨ>g'+q]#],U:qZLlYfS<+v*ެP‰UD5܉Jt@(8#fIV=TPH2%)6jȽ"i>梲M?~[yZhD`.Qe·ͭYFY_'8sFQZ:ȗ~ a&58F}f[UA*VDEFNrMH,L,"*T\4-sBojzTK<=GR rd;ݼX2j`k-VtVg2+/wl_SHjX(ΓLpU9?vZA'25~)(j΄u\=#Q?Xlꠉ^v?W 6*rLˉ .;Sl<\ ]*{ тTʣލʬ@\tUWujQ.0o|v~qf(3>1BjtguB_2_<5k)d%טUUu齆{Pr*Dy̛30 թ+NG;**1Ym>ЫUز(6:ԩ>՜g iϴVH=kYriYweugJ Y O}^Ci&Aͺ+k$9 b"7YuCWv̍{,X^jݦfS_Tß\j)ce/I=hMMU*ԣ]=oy>y)L 1Ԯ 7^)v76\Y5_Q~FOllo O qc)9Vg} DA&8=#j9r'qx﹚PFl#^(@Ϲ3 fQ}$io/1*Vtߍ+,D؞ȔC9+Q8Ǐf9sʠW[D4ҘUJ;PsM:bH-gzZ*!HשħmEU 3Ъ7:%uoB{.mAgA9c;$l ӎԐ99,'g|Vӑt U këlYEkgen.njWetKR苨>83Nab=Ǽ8'WUŒLjĔ4X^iCbxiS`4=zt|7N'|WLwo{!K˞@ [ `q7w#1Iځם@~J# kѮt֎1?ڲeyn{QmlYMUIL.m`ψ8H~vUV=CDA&Xiy~“AitITtL>Uq'W1&2i=jT<^c4WmNTDllb^t\f@{1W My\[ĹM(C^OU{^n2KoaEgK(TD|e+z: IZO|8!3 T'Jmh jfLc !TΆHS=U?n}- ~[&E~7^.z9`%+kaS+j7_Z@h@vih28Sl+2?maG#wv#xY7ɻI BHfqJ<2¿w2AexNwOj4¯Yw|ІC*U\o/҉{VM&zaw jqF Sej|v# ǪBW'R3Zo ԔɦtɃIHcɜ 3#heVc?+q6<؀13XpyX~^M\P9꾈wY;+Шi9wY["l|R;z9{7wY.@cXP(لxG&ٮn3:+|]N#!^1"@mw?j<'9(f]M16qPr|˟-+7WRtdoc#5Nةvh6y`AnߙfMW x& YvžYɳy[O.G\V_nbGˏ5fDw9kpsP`VH2?Ω^lN&ڲŻS)۩j^e*Z<'ud.DaEG3O"e(FVe_ؐ`"~wʭ{.ɜ2oY3gr [8U5U{SGn5X̱zu|a^qϨL9&93Uo7KCR5":e)W~3+Pf!x -9?+2?^`̓[μf[}MU1yxA6dch[Q#,tѐJU*&!*KڎLldfcLUps8˺Fl?Ii<8ː)ulmw )ZE ,QęgoK*=@X!PU>|^a}CHc"c씹Y^;44zW*!ldfY|-yEzi0_h:2}ucY\@ޘ%i lq*rc`N1UE|yojF3Hw3יn7ܱuX$62Z~(9(λ Vu`>%3gϻd^EΔ$R4ȃ UU}6LӲw ݊LƵcq O!FHQt78l1Na^56Dʤ3#ڨ^otc12W2)ˤ٘}8v.'s20FaRwCDc,-lͱͻboб7Ҙ Ȕn=?AXpfT̞_R#RAWX2BzHn@A3kY+B? yk Gaq`Ȅ3PlWa^es9u9!ϱR"RgNv̵ jLJΔڞC >#bVxDl۴ɒAqo;aә̘0N" ?.AYn\eN4~/L h7[24ݽWALEV{ Uld'{ލ9Gw06e5Yr0R˨lDfNY̪ra㕛>,N$|y8Zͪ^V[kuk$XO"~Cy}9Bh3e7ճEѵlY%OǤæ&paαw~dvx[7:@?Qegp*Z+^M Z86dZ+X R(d%׻|D޺'LMXFYΘvB7u7YŪUqeV$[D(C}li+%e*-.y uGy˜dUw#̯6ʵ\lOKP~9߃9mt 5ӫ#lI>ʱY.(L1=YH>CXSoIg#0sz-O b|E'𛧦DX1wBG(9K?7<4(a뜘LCeKX˭gs8ךs1N2;:[61ڒnun~> ʊ˟O1YX}te$iaofʗM{foYmU!.()'pCYXղD5d3k-}s!s޼};`+zGC-u=ǥFڦx$k"\C}ީf^{IjyV5܆b|ݔux#S^[o{2HY ~VܧJ0Uvmܚҩ[iLF?h$Ghªܟ\8̩ wSB&޳O'fZabʽXɚo.zKfL[JV]z[gOOLA[:e:vXJrſ$n7:s2'qcDEm5qU%(x +/4\1봷{'u+W-lV}ZŦE;n;vW)L0Al+b56z2%W|Wm裸ſY$x*ՙs~8YMor%XD s']F_T-}`TM9 6ko=YyԨM({2mՕU>"GLwYU2&-hLMK>z9  ZXֺXxpqj!j䆾ʄ-#:mm_w`Z+r#K~=Xv׶_&_cV{gVdBO1 =HLz2J+r2Ϩ7ZF(s8= #gX%*™f,ٔ8S6&g[ZxZyn]*rꚭ`2g {#7Sy3>! טWZ=C)YlscŽr*Jv!NJoT>݋zDIZӢ0eNVޞgr̭jDb$"]hj'̅*Slcgl-~mfS<:2mlZ-Hj ]+ qoGo`KZQ&n9sɜ lYi "X*NY@צc7jar g"Z_"*;2'}D[sr>godÔLrƝa7r/1j9̇&:DIDhIӵy_#sdŕLZ>_ʶ/TV7iH,f6׹Ct[7|Z۝3(1V)+tr&7t<#BXA{r: v7 '3kHkjsblQ1>hQ'AޖeՋߙyAþYe{jQ.J牏fd>ĉsSH G;mZQ-NU 3keZ g:~'zY: [c=5w6Q뙂TuRrpڿ}Q9x)~CUA*f]r,)X9KZU\`2t {`~G76j[dNLf99ׁX gse^4^Hlx_J_NCo ?S?)Djykyՠ&>bσ]jfW{vK68OT5f&9Qu0]h#eLgFEM[;h*!_U1`ܥ|0 :UWV B[U"\B4oia'Tt;OFQmWd%f,GTl;g/cbB'3ڞ:N3U*4f[n6OIT栢#w1П)1At3NfxſV(zBi٪+fKftj̙?cg2XSDnj7X˪]NYǩLF Y=ڼAɢAYDpY7 7=MȍLfUwKm&챊{䟷{V[}ݜ(z=XGh2ɨ.棫NFf\k㣻;smNz،b׼]ܬpeEpGYCXiz#LezM.y {O0dB9 ~]{9\c. 6fƜjtL jfLYkxMpe΋Y^UuN fEŰ 5" 3rcY3͜_\ONQv^aV]N \cYyJMpI>i^r㕝Nz~mt\~L(Ryh>-3ũIe{PW"/i B4A~iH39j(ms9Ϊ8c'YlP9b; ~[U##ѡVsz4Wubf  bJM<3*6םvC s\=wrjtCٜJ/z+:@u 3pȼحg.@xV,ff-xJb?ά<9goeXuw(3!,JEDWY)S v}:Zk3d ґRxW|U; z FUQj^} ,ge9=٫/N*rcUv V+8hy*|q"29۔RW7T|`oDW&qb'*bw̙D@Dݨ.DLo0SE{KmkC'+/>zXɚukCtIϺ~T)z('L"e(qVee*0Ұ{+&d2#J&aUy?7Ot|P+R>+3)ʞGB,1}}z^(@ wכ]0Ϲ|vA9 JfJUdYe^1U؃r=ig/sUz^_\6=׊'C6VdmvtŒd5jH5Ϊ0ke,EܧPJh %{GOfo2[dYOH' "rۭ/Dٟ'=UVL '9(f ZO(Ykՙw<k=*.~'*C>_Λ{giX??ISu5#hK$E&%o2?fq~NZXbVWoxΊ^[ʼnvJԙi䄑k3v6Di,>SO1i~s1yY4i%NvDAv֬YMGre-jjf[1 Sϸ_u;VjcªhI,|mReF>y9.SH:{>*n*y^$sYfʴ灺A كѥџ|tYK/{aVLdF_Q#yERߵ:@뜝fWVd2l6 +uYcjW64i,NEMESɞS)^6Qqtfœif'+Vq|P部̣nΔ 8"7-lDYBL2 >#13Dɬz65=r(όvyD)>_VZ8EsE Nt\'2aFїjY wU'yp$y?t c͜jC7[>8NJ`7U#FwYc=xuxW+*?xE>l+¡B]^Y5NXQ^a7خL Oд+6"f35ݬr +' fSqU:"wZcI +;#:1id*&d2#iT檆OV:Zꪞ-zs9g9w hS2wc<>YzӨj^TmQJY7L^=sFWU<՞xsE>̌>))7sǑ$=Lnf2ɻ^U*ڬ{_bJ⍢IH)yw" 21998׶ǔpO%q}oYgV9uG||0ڞ`*&d2#=wcnb}9Ȕ3}f"}k2==zr܁l|X?R7Ue u^:{<߳gr MH=ȷq}BQt1VU=,!a Cf<[,^<%^YṼZ[&)2)T4v}η僮e S2s"nyK9Cf]uR'{v~Pz{1b[W]4|UaUU{CB?$xt4Vt>bX "~8li^ͧjZ#82V?*re6أ#ߠ{EFDu*P $+yc9x!; 6׉(l\&G)F>?.T?͆WxFD0miRSSV\ޢ_21';\{ݱ$:Yg.?Yiwʅi#9zr止 wȀb._`2#2de%++Xhd@7ˁȟ1N\Emj7»IvvqZ">Y^~'u,3 f/zML͚%>/7>E79U%G4O 1d$g̙W]QcZ'oNeW0؅ڟ M4&yx2S j~Mb-ڛyE]O{`97=#j̼Ch}EC}#b֬7qC??**aR kix_*2Gu9L|Rs2963g=II_7 ?&9WS~zG  utKUńNfͭJ%+V1'0|rNQb8?oj@ @|0gliUu47s9SQ1$ ={b՜9ֳMlZ1y=D,g^',kh[<5tZkcU/i v#Wo O|Z44W؍v,R ևA]L3ojgVU7d\[i7u3%z}ЧA:iAYlvb'; jφ)E Ur3c^q`ÿrr#ӞyƙB1ASEѷsyv}aٍzacV iz*fg| s.g[+K=H˪xܗ2ŜX kYg7d@3҉|r*bvcTt}9ݛ*\=(c !txC<ιo7ȞƍNk3y=r$q3a=W>I|ϙL)¼Ldj&O ,բ/41ՙc:#P[@GbJ-Sh93PSˣrԗh2wT*|`*ί.˅ls8f2_aS=яW'"r^\IOk{{<]:\g%F *JnOE{XUs{cZ4ȊfKCFGZU5PsBҧu Uu1%z-~>G}W*̌q|y'/%1Wy,3Ϣ2~Wz4+&%8+n:K6Ҙ_տljxyG= Od'>dĜ zieJ MjdW @[*BGxC99v=/{c2!1[{Wܳ`=UĝπfQf"k緇#,v2ݬ^̯0Kg_Z )YYbjFR$d ~zُ 9V HfAXjׂLMb#oƭ)!eGLxJe2yӝXTAѴusQ?n.2Wyt2]XD}0i3 Q`z'e^]I:4=AיvU=Ԕ袊Ʌ^a k28ӄm\M}F}0ۅp"P}su&y!3 qwlFS%ysտQ'ƪS"[\weG5<ھdsosm0cK%.kJ.X:s9?z>o)De:k={zF2 !ߊlYi2bnϹűno{_ˤh g5$X&8STet}Vn"}XJ}lu#]9ʱ{ŭB-u'rCeݎssm\笢F >PEGD )1k9/vEg8\u90^|8{-)OzˤW#^T՜v`~ٲW2'+ e?wO Cf]2w,>jfmA>slo6gyU*b*X4#2Y^36ĪKaȌ]$YZf#0+mƲ^tdө{f,{ ;'M?`vߦ`/\I+%R/QƾSUCz?+kJ\hjUyz"߬gwd#NH5z^ya8|VpAQYky[؜V@K^3Cˢ<&x..so 4v7ۅr,=e S*f19+Pud kYg^I~zq%WR=bX}!^6̫ Y픂41eM5bZESfX}l^SNQ[Q~j--N8$Sܞnu/4 Kb3{LNՉAQn7{,+`TvPS&E2e.TMiJ 4 6e_4'v kk.fS]Gcdfx6sFN3uzU,SX̒oVBVOzUgx.Bi4\9Dc$&.\ў)evȨ1%t//*A\ᘩ MMɪR:.ƨ1y(jIFޘL[fb,+=U(3e}FI ̨5w9 cgsE]L*}F ]fb޴HV $,؍3AG2dh9,%ƻ7s|w{Љ=N&Vy݄3i7!X2o;o/#pe'51h\26j[gFX_ڽE4)6'fs#\F8GesrjgA17OL#GmZz's5"&n? UTWr:SV3'}%'sѱMf6"L 7:^'b#&;1%eo+|~MIU#%ҿ\[w`B_dc`C-jVn  r]|~ͪ.uWI'y>qcU0#{KFh2rǜ@l kMcFH{!d(zi=yX9#ij_jԾM;Cq/U5h;BJj2ޛoQW 98*򳉸6}:ccl]^= P-RwْfnOXR1SѽS쫧^2¬lEyjC,SS7uC`Ma3k3giQ-%ݜ sp1sjYd:UnHVZlJfOhm=']<ެ~A B[։F%ad^)[GqCȱUd+M^(賘IVUzh1zo$(kA\):h^q8C6%%S-iE KU)XuLc{P{W:] rcWh8Q7Xgs3V+^b6F+s&2F6 6TFmS?8Q TO!Z3S߬ǎѷOz' /GFuïLg=,oL7bf9oPsEl&JK9Lq(ueͤ;gm*8Sn[ɹ8f}H4m4ԜZA*Q;aۼ ^DNLyE6qWwA݋*iA"&zW})9?6׆Lg ~[N >)1  Y(gOLx]ui(~fl|,8{z׍b5׭ݸ4n ?ip߼/8T.!fbw1lƸ?g;Ύ_2^/q߱U6p5 =Jf]o e^g{*7r=y˭SгJ^ ebXQS@x %S&l_[!9et V3꩓ W "aJUmoc/CrepDX5;APsF=W5F"09\xCe$%\ \bquO_ց#ڊ^t[7=#9ͤE4p Y_`e7L;GY{PVݎ=mc6jWȼB[;dO>v#b2վ=XOz+-?+L|?̑UZz[ #e65^?EZ{~'9qw9{g^s:AT bՁ*h*t{.T9m*)dVm2ONw !uvN:RrxEX$w29w+%G4v o&fe?YqR2 (*wzÍ<‘Y8VܰWTX ^YW)[^I*W4sI7Wut(} icY*mԃTϺdamM~Tfw񫣟ow `/t7>vlʇP`%_*b癩4>Ѓ^gLt.`xD06q]IWVs[6NBZFgFIiOWై$z忡`bQc>y6^Uey7aJl:}CNu3sbo ;{?KYqʺ{>WL&M;kѿyz'6O\ }y09(H5 p;2M@.VBbf{h,&M++~v* n)sIطk3m3ᓙX P>YџwfH>M[ƈ :~*Nګ37tV{Um-Hgqߘh6<1mat>3|d2^1@ާ:=V̱#O+ }9/ G7`AK̼!8P }Y/w2tHݟ'JmYu`q=' ne%myIͻE"T +LvinSHܒ皦bT.t%?;O\5o49Ơ$8'T7zΝsݱ{wͮ೽^X'St!4!'#:)Z>):XEx[{GTUoVn^aeeT[H0xd_lRt{pvٛiw%72o91+b8ܭʅ9[Zwɞy@ٷ]69 ىӦRܧ"ߓ $mVdZZs!ls2X $+-|7ob.Rܳi" 67PV(g}y-ҏNJzBe-({-O(ODt&]iޠL+Wβ2JD/=4ϨS 'Zf& wyspVkY*cy<-Lc! k#φɅ{;R);Y]bVˤbz~=17|ҷeŽn f6>:',=Ŋ`j^&[hi+&g2;=,{{ַ ]/5@*;cn)sILL9n˪Iy2]*ą8ϕdnI6O9qʟ')2)q0hL[&K'>[^2}N~WEU2v[Q{|07}W.*i*L^x#W\=EZZѤA@5kf{p׿}wCa 1{Yݹvxo9*{Kb)uciDo'?N<^Ab[DĬz3uȺj m ad$ݽWhC52k~ȃ[f]56!gESYʙe:'vyqVZOX%!̳$E&%iv{˾_U15̳>xZLajpO{&bD1s˞k*󻉠 6MTԳykIžOC'+NGUǨ%sӰ{^{V[SOE[$K"H2!š4Sm:Ff2K-uml f4#SVoh# bڼ;VO+TfUUYes,цMhyE^m"\IQ"iQWEw]cIRGzmcY=1{yx;C+2Y6׽_COfѫxyYa=8HϪ<+}<0>zyUeJN]T8ޮub'.dCC+&S%<=_SU\{dzQY b6һI0V#q6xd?rcab$ĝcH}gB)HL>ƝAEtLm/˒WToj"_uOW,[B>YJUˊ*~ -p> w䮖M u;}yr Ld fM p\ajf86zN][g|L瘡ezm~c~V8++|:=a<>.2W5iҸ27YpggsẸm9ڈ.&ѝVN۞P^{Dzc\q_ 2 ~Dia%)F1Ϸ7<t8@5v[: SnZچ0Gr@`.>s̒t0;g2|ʍΗ;eπ&*jP*ίi l6/Oɳk鸡_]6n2ūĻUM H[O>4rGބ>QT=~{ГXxޘj7&57ք[6D` $NDd2nx6UVm|쫰ߞRz6vfʕbg}sl!ev Mn1حU#exs Zf].%,#XwX"N}Nm)0>K eV =y׆NGpQqn rU:sqѡhJ_2&Fg^M!Mw9+L}DZqϊqV>?slTwm:5{vG/<ẹ߃2}9R@ܣwrna9k_]bmIsDe:}Nz}uPz.p~Ba(}Qٜ4ﷵcv w@_[>i_lа^=+) N/歧DܖƔ`]gqSdq23ɫ#;=l.8TMn~9nObS2{„Tm og)+D_n2F)/s~A+ؒ.~NA}w泹&~g\an_Ӳ_$wOzJ.'p#-:m3%@:؞T`>,_1450%Fu9TS+tIWU-U/J{;V[OmsF_'q]Vx2ćggOu:%mU3qwKnfv+Ӣd@o|gV˳N4xn7*y*S\O(zn6jfV+Y9o:qb9xU˅ * eo{-GpF7rcک;a/E䉕L?gOu:SH y'21#1_g3b|QIF%>!l:)f T{aJ<g=>Ef= 0U-$&Pa5;_nNf R'^#Bl Y2woG_\!'>O %wL-Ć5SdDa)YJ]؞,ndҾ^33' c]*oN\L#"DlM M.yW7V`<ѪhF׈~w"I~cno1ﱪo׭M*P4IG;kaq>N*8!sD'Zc5CS5Lwܱ?ik#feӎ"f M ߈M4 ]=60D+p^1|}¥7ʯ Ⱦy7t *js ~ΚiOYZjlc}I}R 59`YzW~ˆk֟KnL JE``P{k.7U?sgOCgOt&si֑ޗo Ȥ{m$J&R;iƽB}^M'+ZAj=C+J]d}=svuʄ=r/eɬ m-pjV;d!>1:qwBU;nb Dd=?lKܯi{ϻ+uךT&&PmvB|y*!SCY#DUi}HVg<{ͺʮV+WX/%_mDS~ٮnܻt=]Z3oxΌ̄r宰kHLJY~g}ɮqyƄ7dO;*[VјXy~fs?gqk83*~:.Ή S}.Ճ7Nخ~ږ &COVwXsBCVUWX,\ۘWU&V<);j'QT^NuP{k 3˹=FϮ*ȟab5sp 4S|24v=v##-.g{ gFEM}2͔iY(7f;rO͑Bĩ@' m~Vzԫ9=b.衊{b4&V 氎ҕnĞuBDdAwpe[ujZi}1h/܉ޞ%3Cǫr ]O8,{PGx3Um)n ٭dM)Z-c ^vj2qf:?lYENleU(;"dXV2/ϧtź]U$$-.?#i%9@G#^ϑVmVuU\N7Q}o5WὌ}厅ގ1YG=ڝYP T)ⴄfnpQfw]և49ʗ(z7̈́L`eŠ &QrxU꺬IA|cOCuqi؝Fd!>-,Qz&ZEe$ќu{p׶{,75O`@գ'0̹Ip dAd8UA5V| z2üG98ee*f ׶!J;{Z:uܰ=hӦ&eU\StHؽ|Uq,пNPU\L)~r9dEΧ;i!I7КdOcrߧ2<.wh!,M&K]fw p!m0%/Dm.zݬhx?ٛY=S^;fږw?oN9xCng}P ȔCTBk)fF(;F)xV˳#~z ˬgҡ`b2#w\ȳb]PQc*kU@-:s9T_ t=XiDe'6[+މ17rCJeZ]Q1R9+˱2q?p{4B\ w&GRtMU]UT¤ZBňWq\<@uv*j̯Nw_PݦuvvFv`Ls[a`g43ɞ]V]7v'7=Nr_oY&˘{aw:}>f#դo?2+{*y-s)ƲgkDb4fn4ƕqX"_`˳oXkC|cf'+cr mnQ/sBW^N DOЊftm\ b2Lez(L0N/Ϝ1¬z㪊s6)? ֆBCLW IxM3cTzus8?v ;Q :` Yp Wܒzw)R*h t"txDe.+WXOg?9ūݘdf"Yk'^|.ЫL݋zUM֯7U6GPf&|pѶTa7A G9ì:H,|:# ME { r+f63gܡmcsR#tx7e׋3m/Sl"5¢aLʽt]ώ0.xqVfE z-{xIg>ZWʤTZ `8dϳ~=)+cRBTqq꘲ +J|>~O߿3ͼ֝ tE%)MQ%긡CeQb߰N<$ݸ*^MU74M\zgWw=kt'~pOfϻS!&CoZ,֋NCvj+!6LYЈDL&cxhM[2”JW g}2Ð>!;G9![ >e)>f -v3!1`յͶ*9nʬ@rU?4^-p陑 L,-f}t.~Sn4/]WnSk;nxnJ 1&&U0Nw*:Wqu1n!Hs*hw)Yeܵ=_4؍>Oxvj*޳"aOjYՈ:g9m*~B!\1B*gV1%ioL(0 **bFͻdWwKide]wg"2.T6088*~9?,4$;mݰ9a2Gۺbazm=kqZO*^b29Ǹ PxCG<9^^q[iLsQCNȮ(SU/N+xe/>nfs:^%=#4>=Vјl,ډI\QUzsA~s6Cذzz@L`dv\ܐՊ?|bώO?ph1sgwC2| Ô\͟?|'Zd~ֺW'9;9ǒNBI]u*qv6eJ,>:USf]<1k ?yNj{KĊhT*OWM"ԆBw7QdH4ODE:6Sah@9QxJju>mùfdڲ{/?{LoP[LXHĺӦiBLBEҨ ]D͑-ϬK6{M, {ҩ㨀0/Y QדLݟĪֳs3u54>}"\Čt PN2w;3@e Ufh7pv w}7̻r˧)D3) /?NKnZ_wYvXk75:ҡЉIJm}fŦ3g}Խ8a?tG<7kdSPYs/!Ppͮ9goiq?jVLD LCz/wnd9?EW2NZj ݡ* "7Y1jrBE\yȼ+f2]P~o=.St<l60@6V%~&ۃZ]dg%y̛Ew {6fƿ+|n2mCБ»[F=%~4ciL+1r'yhN/F&3lYw־'!3$NdG^7%w2NP)6*n;ȝ;;Zj{#e5kv{qLa2Lg S Ms O:}c4ĞcelH( ^]aϿPXu4>SkHVy78nE\V殘*ksθ4p{ʿ^6NۃXǾkJ1Zj\CQ>81*\1OԳ ׸y3Y~m|ø*fϯ:=tVZb[񶢫_YgQ~O)xs όWx㹞ei6 %3;))S2G)]"ޏ~s\ݙngFEALJ}Ϋ^J6n}SfصhwA@e LI;e:.DN;5yZ0{ ˩/jϬ.f^8nv[YYp4ߣjMح!es.97{oP%0>5?Cvr ݛtߐG1ghXTySy3DnǪ忟Ϭ {mvZ24'գ:w`lΥsS14Zyu_sLx$s#Q7v(OLӣ!iGoM?YXYNd':D~qS q%Qe%Jl7TxjeBVEzw]zJO)tVA~CWe~F摅sZhGם}` o8 #^;M։W6C\yޏN=F;gǬdYP}'J;{~v,Ɵ|w1ATBIB{T ؝9f;-h؎:ʉ9L=zÂ<fcTS|ZƬZL&´ӛkV8?vL`}iFx$Bz-l]L&*]<췵v˳Zl1#Äųt742O8"|쳳םl69 7{5{{qz"j,Sa .F=ׇ{n,7[9u{wo𣺺40F7彫0v̅Z0 ^h aqwqK G{vR"6d. XbYW15dYoE4Ƒd_&%wff0m#Joyٰ+jX?;\- ʟ-7e,V˨3GJ.{tOY]cn_v|,\+2V m\UD?tn˸jё~dϡ,s;6f3Qӥ\nt@3דv\Rjzc3q4C;^?Uϻ?>G7yX5;pT+vcm̕bL|铴E9Ϥw4uZU{6Dg;V!CU~BQ̺Py:2ӏ tUP'9L}Demn]E78Ϩ<__ĥ(e7pƈSca`'#8c\12i5 N-j88 rXE M]rZzyJg|dD=:"٭d',rKE$Tj|UU7<8[ugU>f\1q /^8dLPjl{ZrUk^Rҋmv[n#sl#'=E$!n#|"J~om<ُoc_ mv_ݭJ3˶js؈#b=,*; 6n#f=_)ig͚ j:?S A۪nuxĻp &&a dP&kςb6L 0ڹ=/q׉OrD]5הYp%39Оwq U]Cʷ8IJEȷ  ϙ 0nBX/vLӅJR `vjo:1}샲s8VUڶ(R/UeuH V*׋M|6qr}m`ogugNGKA~+zjU,;'v#1uN! Tx\@qDZm侑j֯CBf[Yͤg5.~هbNЊSf+z\ys~ӷXV79VtGsu*ll jNf<u:؆3V@Z #Au+ZoS8[iP+}(6(2G^sk3YyUaDlvSP*׫ %*fmHjyVٓl]54enHnyg>f{(YϔKl {8b$Dž N7YÁH3FRNxˎ2 fJ@Bc'+ß#a: r$E9ƍlWȁޯ[_,Ⱦz7 ͉diS$w>)Qw̢[Ÿ|p;dM%oRlOGy~34*=dOė6+1>h U7 Ŧy4Ζ:tUgOa=64)wbw.Ff+z4z?}]pLa/,{]ۺNJ۸vL&{Ԩס.TI+zPE/t${m"(jsgr`3jduC|CZAh|?Ac2' 21ݶ*p!'tWɳDb[)w9RUbXعF=H]˹+9BMYr(jAqwEvv*gV+9?qN) !U=xlP*ѦaγԖPW>u&%CrLp,gĪӄM8+!̌GiMi~pZȞ Y~A6Ubn7J& 7ٗHE@UkfjӛI%*{ewVykE_ 2VϺfl_ Z0My딗e(ıdNu735DYOzUMgXGf܊k'g`WH1.TNt{'YtXIT֞9&q'@M/7wYMyY̊1V?rIGgNmEus^pWaYt糚XKLk\KGnΉY<1ep `1=IQONǵFp̒eV N9 :^A8&_nd9>{8m&5ʁm˻DRC%ҍj^_kZ*$8vf1SNw;\O9MB"&jf$z/é_yӓhׯ8)Jdׁ:>t[ޤ+e݈Gm}$1{IxYv{_²&r bOВ}IoVӔLZq+ww0+s#LPOerI>n,dL9PaNx;U>_;y ??ӨWGmoq _ʼ+iJ\dՃ!Utw:޲ƺSǭ#3w;FBv 7>YἯq :Vg_?}? 8?}_z531? fq*zjU]z=;%Ԩ:ז BsYu }7zJ-Sh ͦHL%D{57/}W&;(cA~CSOojQ/#g*:3ʎGVzٵJaSJ꽊`-/7OV.tIZj|eԻ-"oΧYlE\u^UoPHQG-ꪘpC;cI~V7?jȋ;iTړ~A!>?QZw_C9 ^gԭ yɏ*UNª8!ҹ' 腹 dg&ꦣ uicX':QYǼ'3똡jU~]U^{fݨYM`6*ëwk?mNʳ~13DqU&dup5{?<"Gzq 2Ne˪Lu1ϴVy]U4yt];ZGzmb'2Yow?vuw_kiukyA H>_m4{{fKy*>S1s봍9Fxhf-O)fu뽊RSwPw1wHLĝd빜LtkrxeoLx9o`e.ZwűԜUxcpFbyѱ870qEвo*}ieν=t͸(]l[#`nOdR~`N7s1iXK8nxU욏X}}WxUh gsfojC7,>L} Df1YS}Ϙ*}Ӭu7߯&>BKw9:~xV@> t3\ L(}g^]6Lb z6nxK  :؈>)x*zحt^o=OfSFf~&,Lnnon\2g*c={a6Y43ɐEc=3Wq7u'T݃DeR/ʑ?J~©g_ 2GN#I.taW:jatfovtYGhag2N;'Pks_9}pE /={I1%GɊ|dT 6~v=Vy [57@?+E\QD~w+ 2>S(TpHʊg]RY6 2G_ȡ| <&vW{?X</zJH;Ld:LZrL*'J]O:Dn&AO dN=r bPiuɶMVIue ¦Vְ ais}os(9(|*)jdgvxmA lVXhBCQ6:]l 9 1`G=aU8aW /`N Wľ[O In#4r"S9\ywqc7(~_-i d$Fƛu3i|7b#&;~g.1E)*-nR7?qZ54FghP۔$V:~ jR` %QE[otO%O%'<=v8֛;{G!;1 nkl$ꐟJ7סձc=Ǹ90ޣ׶Ze}] WBZa4g٥.vVhjWUočCTSktXmЊ~؈>0*T8AJ]r5+jƒMQ[1 ߨ(n U{Yr_1ʾm,WG(gv8ӝοiDWyBa_%$;cRr/9Q3'p}$}vLL%{kyfO#ߒҙRL yV>qt< =zĘOYn+ivG%~˫35ƅL){ͽo G&)YkcMEY=mVHjn-֕w̟\4F\͌u콟MObYUz;T17fپsi=هUŊ{=N("_ q`a$.~{⍜8OY ,j3?E7&*M"2Aj3?x[6fQz{?,4(u`ȄB ;^lYLۘժmuI>79fy>tQ2s3\&Qcj{գ>{ػzPkd_+p҉]i L, s*}wm9V}n> v-Æ{?^(mf_+WύfE7Un覌Ep&0LWh|F5 {VqT*^u~!cUVfhAwb!exEuW/vYnuߔFsSz3m=(wRZW򿳿$< z/7!k='`nN:9aݐ#y#Luձ0$ Ԟ{ob&s*T(bG?OŎp0g0}mɭ":2_[q`"T.ɰ5VZkH`,-^aZW/8IΙrNJx}pb?Uz` yGǥdW~m2iv XHs._*iOi ]{uJqbގݴr`CkF]TԼ'Rڠa_ƚv֬{FluZAΚ5-tY#DXU?No};!`z2N]6cq;Z& Ar8j)*v7Ye}ݢQ3y2M+̮DvX{U']8}J=3?vU$ YetFiUn&F&3Xo[9u `m<{&Z7^ -d`!̕^;v r[ySdl6'2e3ȔC]+}mgw*ҟM)r 70zW=#-.w;wܚ~K)FrgHy?Qr:)}j rwvjn^V;za~VB?=mtwT*pJy_?nFtSo"P[jAeX{_'d_!s7 ]w"o<:Qb]] eujf*Bgv݌܉)v;=a'dfHo穫 $]O5X=XMhs}]l:m-o? )TJ~зnw Ei&B~xef}~X?U/WsI:*~; %UVbo<@ ]LvyTWpUbTJ0z\7">DE2Ntr;Jc='`$>76<4oiK8++f2-9! ľ8HmOC Kw&PzN8jeU0Z1OwaU{*#<@NwhI{/g~ܕϭ.Z@b c5ZrN g n :d5{sV@D&P+Vѻ.1O7UZ|&Գ\svZuky(;'CbYE2sTUV|] 녗 \_X~B ޘfܿzp?|CbwQ|V3wҹ׶goΘ|p5DUZILJ{ec +H[(aQ֛ZNz¬ `Z\d KDIDڿrQvev+n=Pw"g0;q?ߨqb̨M wWu>{jNYtYRffK.':-^[w3[ZOlP>8S=OڼvADekiTf_r*V5Z6D:p8g9Q\ ؜ǺTC=h.2%s+<7u6s.7+OSvvlv+$-.-ML ْnE3`h:Vޠbό {ޅyc" vٔy"d2.܌:[D=RN*EB;nȅXWʺqQtӉhL_R&<՛7SO71a$͹uzw kMMώ2! ]H)ʕE`R3W1b-sbYLrL"{R7t.hݍ8ҟV81ɆӨ lqꬾ;"q ]nDgJY1c3 Ǧ54nj$VO@VCNJw)ѹUX+Mߩ'nĒP*Kex#׳DupO)'^77us{>TiS#4]SOT @n.w/uAŕLu/)awRL"gҘiݹ!w(s*—XƔlU]> i4^*aҼ`f-hѳGM㿤T=UjjPt pODžLru vwqi(ޛ78 _ԚJ{t|r3ٝro_C oa7 nZ,_ fjuSPRni h>݉{)6=\W~IuwIQ?c%mҨs=W "HLgrv|kqݿ v QeRe/YkV޳V~}5&TU*P[} U&@249jv!N>NvOX_[^C e]!U,_d^',zgszPaԤ ﱊRo{)5+zB[uVށ#JGKV͸#{EOyekCJ&'4L;-qI'j7,O/,{]tkCbܟVT *'gUyٿB \օ:OfڏYwk~BcܑFĐ5/7}o=yḒgdELS|abg2+a.;.P6e7)]meo͚cgG1n42]Vˉ OJ%-,k]16TڅZrx}De ݨKNE>%n9PU/0kf3m>是)s͊֞4wU5ǜ>OlNPv,׊\m˜J |3=׃!5uTk҇~2 04 &$31'g%*R˳sg &J*f4n flGޕˤ[&٘j=F9ϱTizBfo=;Yj*)3ά .wYrYd+&1O/`,UyCEMJOgkv3vف*iJnL={ňS5euSƍ7m 8o>@CV3~uO"_]oB]^NXw܉ʊwl&9Xd8M ?q{IvALAxJb 8]d=&"[\pZ8=9ׅ*'ٲ/qQ=(sOYiTGw.qfn̼fRY+h9gW8f:nnzf3 i?Qf|&!o:-imR}8r_#yM.za7KPFQ2gևm"2i~uKH:uȔCTg2n8aoj'hE>{؟@Cm^uMvFnd{q*˿H&(;[ۘC5?kS{#p^ ~]݋;p.~|C05rS6B gsf~bO=rw3R[ _68Lg0mO;%l3-s*t|"s,siU5Hz:.Eg9Mq3bQc%Y8 e!CFX9~wV~"߰{jUᘏtթSO$M(NLxꧧtZ|gL4;!SYZ.b0~voP\;=vpXqf3f pp♑ OetYn=S5Ϊ033+(rNn=.!*HWQ14S%4Af>+YɎoM<&aW'Ϝ&ZucX߹>JujB y`p0w.)!AU# w>d4wYO(PBs.Shc3N+bP3cj#W|`k.uv8 BVro&tb*3S1~vW~PM|}q9C:-wWbL3͑QȎkmDrfYloݚrO"jQ-1XVQ>긼Reج:sg`|'\sL[i];vo:?Lû~!MV .*8!.~'vY)o꽊zzWܠA|5s}} Mu`\Cvj\*M"ˤs5F~ȱʎTzwW3<M֏1M8g9u0yB@-ILS|TCD*WݠJSw3|XQjb]ƀˠVb-/>kNTO&iF®qve蹕 ޖe%s*{l k"7&荒;ITDty~-j?(H %=Uo,dԆpVf*3=68W7ǔf" 21Tf :fq#jځw)7[$0o&&Q7Oa@U'Uw:neÍ7rR/fиz3ZYm>*?ZuI[~V+ VwlWaݦYۼ[ʮz|>!~b.Y.q#ubܺgb\Ȅ'hzrٍ'+er. .)Wz`^(@1kYh1;UlW~BG}b ) ,έˠUZ^_Wc'bv0 &;w&h:K }7xNB&0L*u:hO€H> -ʻ}gsNNY q$2iHa׵ b/`wwQF$rG4n̪dp3|SrcWND"RN0kY).{C|E'z Hq̭Y^+c@}}fFZU~[a~W {ʚv֬YyLg-m̌wI]N'r#ujT\ JQf%ˬ;-n*o74v3[5Xχ{f.+qq&CŃy6G i?pz'踁(F>Zs)-{(ϱgf.1рcR+Iq=r-Ŋ\y*P7~r1ee)|Odv?udm=M@{ۈcY*La)9R\29) J*߱c+3ge*ЩzX߿1̬VƸ[etj#c~Zs^]0Dd$f*VT+MMDwd2vp<uP1cfB<3颢!'ڞ95a5󉒂p\ǹB_pK>B~d2:U\dۜcYOs{8vECUV$`=`{ |v{hN4SEl,d*PμKJŕ/t^f3˼gK,;hg 7]m|N8=a~TP^=fBpO=gO;D>lLO(e2 &f uoB93-^tsrqNvdY!*9hC!>8}/'F{W]:RsXJoNN 9&9;$xcgWUo#PFaNЊkRS0ҷu7کb1Nꨡ 9Ǖ(mvvahCHn Wm>ʎ↟3Š䟤Ey'bi֨= d.RrֳsnTkt`)`ejEȽ#nlU՟0F-ȼs5o(zJ[ѡ;ɳϞ}s7=Aq'TNt*zD[A2=v' iaYoIf/Xs  ѡZʂXVv/*ۗiSvZExt#ZV|wO;l[羌ͻ<]7z9?sd^#˾ԏ#n\YlݒWTbLkRdBT9 qӄu6:IJqzw6u/eWU^ttňSq>wwO*zϔ=;7!*BrvU_+'`3ƴd X0Cx*ˍ(?81-BE>M2j8ʄ T Ru]y~b#2B^{DLf"}7k3?4q6#mf8;# 3ͅy[TM#DOw7w$l{3QŮMedpZfm;RF]8֘YDRl~"nhsD,!TOCw^m=g=7aXedBZy>kvs99L@D{&ce_z_w)>80^u:i/ P`m s^cYN߶*@*lrb3󷤒5XH*qY%iJ {dwuuŽqL\9f c9# x'L4etʶ;8EKg]?ohx?w9ޕ9Z1D]wOf3W^]蓿}ŝP@ɎR>FT=6>b>amU vd.n<*"D~ A%`%'Ln`ŜvO;nTK, &s`{2Nqs#NL jE3&i,Zf@9'+szN;V9 Ǚ=L`*bWB% NUf7VC|52R-cYe􍲯mG Mu*E^nжVW,t=l|YW~nceݦP˜W\  1L&=j+2· "S'3ƞ6?d8jKWv]SbT1U)ώc{rGoILEU iToU5鵱K]XPǼbf֒p ~)L (ۼA !oXLOs_F(êh- V-w)})ʴ\E\6c/OfoGn ,Q[Ѵ>vk+|ĵT@'[M"}D$ͯowoNpM6xfTTܤ:J ev[->7QSIuf:ENR#+v׸ͪ;q 4uHEkU)YXe:LdjY ֲO141ꔸ\'fvR (BbNH;.t Mfk{Nx??uofE<1ce?Tc]cyab6'bL00sR9@q< ~: Ao]u U_-e=;knkRw EٕȮ(=";M r`6_w@`=-ewWnfv+GmW؆Ѻ~F+]DM ,bi5ѩbdCZ2uJ&zlzVt>^՚ۮdz E`H灸pۼV ~" 21]7D1%*s&E f/ƕu[LTz+N#VhM#W8A c&c>0-`̄"3P}xJAŌ03W:ZS)D]?V2=(2r~K e(;-ӯ˝=ojaJ+P]JSNYԯ8p3%q&.{Uw:{>26;zV+ٯ:4N2*Dȁ*iJ3͝ס 7ranJ7b sEBOfrjcfh9s{+ 1o\Nxgܨ4=ɌX!5 ]ϡF/_*tރ\07a͑Coދ{riZF Up"ôiZBaPBnޫzlԼK'f+.Tqd녳|+¦Г3 >9W=.ߌ<|9U7} r,&LVwp*ҫL&nX C9 jN"i[پzT.6>칃Ha>avL뎔 ! tՄ 5,vvY&zNFv[\L~+ q'rŮ ^O<_հ><(>\ 3"J~‡DCULl]1:[WavˑMBvP5zĽ*UU'5VFQist{®X.JR֍A9yaV>zP7cjE^E"Xiicݥ!_{u۔$f}BY?]+s\2oWZ,r6e7.]^,9rQR*̵X[̌ w9ߞzd'\ b L13'zRʺZT23*өɳ`hEuو>p5i_Ξ]j|YWSE%_su7|%ʤ_~V\ ;^g/YA('>=q#*nD^rYii5FkiYlvAKU'ߋIzs\+}vWDxfT? q?5ӾAgXV\}W缨&:R5fn1PճZK)F䌲qjV/mݐSvg&p4?L ɞFu0~<fI."h өA1jf|34bV }[oaY3в f^3X:i{g.s4;7DJɝf]mu#q$ |_Pi^EYz3+mDZDMݦ&(;"Ruk>rO|Rh_(dǀ5AMSϱ*lgjς`SG5's{l3z?ꠛv{=x;87WI5I[Iq;]g}6"m7&X,XW֤fD*f:^ṙ#oI῔~hNKdcҨ:35>n0>lP*>iK]_uBuL$3E):0vZE:$q~=bNfJ3dE@sQ [9 ATYZr^d0NcEGp\ vPpZ=YYmt7ŪC~K[}jSuwFfIK+d++]FN'ޕB{PFKpK RXH5*ĦǻJ/wб;GVؽCc7;Lbw*Lsvyȥ]1Jfo:p[Fmg ' {`XUD7xdžw*LWǙJ/(!kfgs]u{wq{9s49UwW :*MsoTg~B(?L0z±{W|p Y& 5J#;V;D!/ՄOE4[kQW xt9 > ͝ֈ2q#@fo8RgN кV=Ζ_*IyTE{GvЬ}~mQ1WY+ :޼ bO܋jr o p>Fu59 .f~o-ɼTݝL C}ץhK)1 oA|T5? NU;]pfՔy穕&'+7f5>m6{X3ݚ4?`ڬDz=_UXO8QlRUja3㻯M Uø2+k_gӂNf2j` CUլdRse>!w Ɯ5dv:LXCni$V1_y2I6&:eB ;3IpMU)7:3I.yrߨj?c0{|/F$Z=کoXg>L~JdȟFf08#{E=pVҙ/|0q%a]'ӓ8H9XImن2L"/T {=i-^kRYo7MB 7gYA1TFNug{o@oI-^ؘUWLBo`[ xf{=3J9̩+m&m |t{.NLW^gu[DZɞmwB>QeZgxmktC&h[wh]Xl+N9R9GOtߩ&Gƺ[} Әyı,2anŸW1GC3n+f! 16}`n!JVfaԣ~ d^7ΡVy4ϫM5Ӵ4%+J}R+p@ҙ%.@#~׃"m]Dfj.ZaN*O{z-u9[.g^* &Fwy/*J 8I4}W1jUBG'ރnAJ+֟*}GxVV]=]_4Oh8= ;faN6CS9Ih*T˂\BItkC^Y׾3 f;S4dy#c]iS6uf6 ɄHzN!svs?g׌jp 3'2Zq4r31C Gtf-td:UVӺ͙!R'8Lҍq^qQ@U esҦINS< 0N7 ן6npVVLwg~,X.Ȇ: q`{8c o(39i)21ķd~%jο}CnvoE NBi6j?qTJbۧk.=:'OV+371ŠZip%}H'z(RfLj8KEnlq!sqkƆmr<&Z/L1'UWכ{Q>wvqDZrH3ȸ#l\A |w>T;Bd6Ҷ5ZAňuDe:-*<]; 4˪c6ȡZ= *?FYNcY }̡]2vS83*JпEnw \ < [^]Ÿ+ M™Gu~7iƣ*'W1LFB}7/şT ΂&_&Ph9(A)RYeg}Whid٨;AIKv;gc n>Yg xI338# N(O>[ % E )4ʜ@ZDN}/xgU#nU' S?z4F-U6m>9"H2!h|pww2Ȝ!vٵ[TTv6sdZ HVۛ©_Qj٪om3̕ dn=lᜫ(ItQT6m1]b*uYc's79;&yOyE m.NS2::+NFI9I.g۪L&S g7Ǹcv+3h"rc´F%\M0b'7qP2B,KQ^OT[iպ<}DX ^;wUݨSrǕp<r\b}3j6MӺCUB>)8ɁFu;7Z1=3fXVun=~ uAxSt$;S ;g_cĩO7پ.V]53'sd@OpW~C&Tu\]ό[\SONsj I}&C j|ĩɝ|aFXX7XD y4ك'+7~P&ym3BD0a3=O~B|ۨ*Wu.-OߡG֔ fÍڝ]rORmC?; cуGPZ#+SZL c^%!&q \1QLw2Փ޵#z/;$ŏTj̿O=SJ,~#wxYaZSի8{gv_ʭ).>jrW/ny9 G Sa~?#2(?1R7QcLyTE |Dbҡu1]j `Ң݃evw<+N?#]1UXJ=w波$N#! LµJLA/W;xb|kܐ*;FɊe[Sʵ;e+2ȟ9g}~Rlj&!]2;|SCĩ߹(Qc=ߟVZ?0s#{L]ɐ{Lm>nd 2S1iTɱwz8)A+rcbHq*rMɷo|,|?D/[1E%y`?r3֪\=K(9))aoϨ>?hŹ圽eOP̞c9w=k&YaJgp+6'Y"HY t{c;?Áls9l YY= հO M;k\)n^Z+RavΉIxD{mm[ܰ,sۥE,]|jiO8'Is2L'%;%LO\l[y>bu؎:6H1k6⦀S"<'}շ },O%ZMکOJq 8=\[T]F/d}. J(3 `j1Q@;3 \`Eu\7էeMJd1Spb\<:ANs A #&IJZ- X.53wOXFx3I&: 'IA~bYE0a:]b^}T9Yxg>3'tvl,:1;&ꂙ aU7*ڒuHGm?V6y׆\PiHTv+y]+2摒øv׳8]ϸ5[5x35T1vxwH…;Eae7SX6l~ߙ%1{7|"f~)ܡf?1'v:r━.2rx#kԖ*mYsS~cݵōc7'y3qO_OzV{7L='Lؐ3DYpW}Ybq;NŜp6Wd8;}Nъ]`۪^ !7X;1eE|sؽI1IɾVb"qtYgi9;U,?7 tEaEDMb.4P%UO<5XHbʔfSݞ;,Pyɲ.{}Ыzh&Ylo0W{٫;9AKWuRQuR)!B HI7)^^5hrEG!'F3>u&C xal!0LMDl b^ÞzΓuJWmJS[6"8֍f&wx>1n9fwacHylo8ŞDz0WS$븱# 1.TD$z 4-e.)VBo;?.psLrg5k1t:3nŽܧ3ʑoh=fV|Ϧhܽ;JV+~rKʲ3yvWg)dz7 ZtyU'$|0s3T 3+sXjr;U]Kݱz6Q=awl7iE|vߙ2sQk}Vru,{.sdO#^{Ap^dߝ`?pU\ÃfڰY?yuB́Jic5jА\֏|.|Hᜫs'5v<`A,9y" lV;a!gWS4ȍ?(]+:i]nKHߐ01- f.^+iJ]un8OY\llOحDfTQ|42b.`"[{vx2)(ؙf's8*i$u~$O.\SɆL.3ݭjA΢= g9 ;axVUEN:UC.O#G>a|p!c;q= c-R+Rty?9LRVQ/~4q{JY|,,Ǖ7Xoz#_2_mzGӥί苐V X":bnuMnfߟƎZEa"QYcYJg+WAwY§ ܣKn_vbչ+ ;فdwH+Doo _=Uw?dyuyGW(ĐU {h{16 YyCWS]顸*^WSψ}L zBɦ3=ESwpQ"Ov];2GYVѸX̺2c?5#׹91Vܿς=Lb_pܳUͪOsJfao3ҿXϺw+ͪiȇݘ춰?hU+N U;ewQ_p<V]j^z?Sĝ9r wB^]maȞ >sޫcېٔr2g3&c.dgAʄ7j .T$)V$~Y jwX KWUqg]a;]› 뵡ޛ[k#skc gG=ƕrajA'uҕ ^m6gwTPw2G(-Yϗ1׬(.7XZyhզn`vgG'ޗAɼ#句UvRdϽd;QiQ)yFvh7aVr͊XP}^|ǸҸۊ p,*}S/z@Nd8Lӵ նSw<S^=tH<=!.Tt_X|MȞ,U /wUm@#9&_[R6? u/AP{-ۺaU8ɬKdZ>n~pX?q74[p2{Bx#!6==⻨X+-Ra~"L᧻l(tU0I:#a3stiMoNUT *n{VZ,9KI*"GqU7}Mi{h]5"_+Zovل<8+HaV)W@ɰw<{̌Ff0XS=j/dXQ8ȤsTFPɡ^F>7g7wAN t-3!XCK .{G}"ndw~OzG TBXn'.@b1{=:u!IΚ5Sɝ@ǵa^'+mj=+M `ws(LѰ %û}gK,G?Fĕd{ٙrvw]b?S@<;h<3*r]!ah;k\*=gʿq/#^XX1՗ˎsZFZ, }(#$7jl"< *d!F&}bw+!ggsϻ"NţK6/if d Ȧ 1=eS;IOd&3/: r=Ddjx3^RkXb3+3FMhTtwevK6`#[z.+sW1~:7}}oV '4Naô,BAd]&b\yjىy ]:#vܤy`=ǝP_ޠ(l9{Sdezƍ2O!Op9%vSMuWd~5V97eY:1~Q)BlPF0i;m >7(670W(1K;* Ώ+Οvߘ1h77ӭeϔ^;bW'3/5SE?dUOWxcn}ǸxG3f{.sI; 8UIi@!Gwſs#tПDʤQ|Y@[ OG'RkW1C{5Nl]aL ?Yuy 7} O>iA9=q!'+k\$vO0,d9NO%r"𭼏}e߽c[j DOe:*ƕX4qsoT԰ߨcwk& r\M>\9(ܫxbڐf35FZuzf扺K QTE_[A}4zJ1bJٲLAd!4Zt/`8pq%j''N^9Xj|,(}ń1  b~we2:rnG܀LyY}rOdUixk~grֳs ;W=&E_҉r;&[nY̊1I)esɊ9TzK[f<=/))q eֳ[o<׈ G}XqZ["_{a:S6D.d.&}l7V]c5A sȾn3`ҩ)"R>ܩM)#n׃pg5侅:jy3Eı~s5}voʩ:ؽ-}$U JfL]{%Ԑ2%ČTroΠe}N < An?{]? ik3uᩌê7y$bwuBԧ}bPQis䥑?{:՞YoW;E.FC5:~|O=eٶ.GRQzqVOVx> {+~F qZ:w=:U|(>xce*MAU;=T4mpYqG\#;'ލ_'N/iCO-vcY I-\Ui5ِT$' Dʤ{/h_2_Gj:v%mB4= ވ'nfv+Yoy p߿v 齰ueyareu0iY/cۃ炛 3{Sd6a(SHykHm1( |ی+A4#3+ &5uOx7_kNH,}N_N%bֆ L0y_Z,=)-3;c-we3=ͱ/`g*>SL)cêG|eHeI|,>cW ?sߠ`F݀*̘UR7q#tUO zF!uΓx7nFr#{=lE{;fsVYvʹVS2f|:(^Oz+Xgzc{hS'<-,򬆳+sF6Gıb:Fq7+F+ 䐖ll)ή*`k#DLdb wNn^nX/4y늮;9,G,sc@vL\`^!3!`VeR Ub2^k lzؽ""w/4u/P{73jV5τSb8vJ=MIENއF|vwԚ*61wBz>63TBZ˔lHiԯz?.lU!fri|Ua'dg;=;aOVLq;n}]NF4&oocw]Ui3*s>uMK=!dl|&w2&hڼN2@VR9ӡbPTc̝)v:eWEɰ-FtՅs.ɜ+'rOeS©_^|{d[43ǒ9Qt0ٸ.h.%/:g[Xy&`g2YG=pTᯓ b\: 1sEcYGkܑ1wYgOħo w *<NJN]gLሓ7BMsT+^"2D=tj M4pUim>lVo0 &Zbq#(xOe -9IB3<1ީa!D5儙LjEw`1TK&ĝ90JpV=i59!\4ꞙ37hP\>UGmU( gƍ\"ʔ]&/Qhd]&js~iƺ5շ ߩ짧JLr|ϜwA_\ 3{wqcSjrXv3FQw6I53'"7u! Yn {"i[IAZ *{PBX>hd ]l[pVֻ*kUE$)>XuƍV;FZ=W)l΄9?ǬWrc~\> *Du;ŊvTNvO;컎$ZNJZXֺoiwX7֞*iQ%̓ ;{~ 4zIu:R9Juwȁ*gﴡ<H+P QgџXuKAxKmu<ֻLzvcyqs~.vvȲN1'n~>3WKKsሓIOߜKUy^+"릝t:nBVuy!nPp[:k{'lF <3O ;e~W5~Ddىew#׵TG}dLd[a81)K!?Ν|G$E:-򳗜GՖgOwEubdxuG:F^wBB}47;U}n^gjodPi˴V `{b}]vۃ*n^M7HEAɌI̡w+ - Utwʷm@V+a"O#UMf,K;\-~lO\nd޻_ʏs&c9V܌ςzPTQoFLvsKRD^r_˜W<͆r@ ;]  4}cӴS󜿧r9V|?s{>fQF4%n&s11 nNxJ qE}gLK; :Qi(۹F3qQ0ފo!=AɌ o܋}'zn&s1YB'\jsG lDfԞG)i H&ApUS"2NK=ߚǵ# rcxd e',kMoYu;s,90;>Ǹr|U Cr.H]̠dƄ;>q®vY;Nxtr_9/fz-w ~kvv\sxsjnKTTLBjfV+9[k/i*R҉u1 ΚOfCo[o# MDzLiTz3dsCV1ytڠ6ȧL6e27U=ƍd.K,p_ g=;\1ub[+S Kԯ(OWݾh: 9ebYS)V@ g11;*ڊx˻MjF8'} RƒX jfLkm_ x$5ˍwk\?4^IXDx˕;?ԕWnv.<̄dւW "Δpԍmw٪*cx䊥ɳ~9x̣6_6lºาA]+UU@[uLx=m7zȍIvΉd"Lnd2#.﵎:{r/unfS]ov3{ٻeL|(IukC3g~OVTϤ~Xݔfw瞘*>iAԗ XaUU}g^>RYȱfx!*;䧼kDQ~YlڜmL&`Eo1aߙ#{ݜ).L\;o4hMU'WG\#+N&r3qw@8Qq#Oxǀ}bB1m2aFSn6 )׊<~;K-C4օT1{ky]/89#X_6 !LY5Vd$f/ڙq/ϥ"zGEaLXqd:cӻ'DJV,]kA 613+Kcٗǘ9rnz Ty,+cvYjl?g`eUp4U)LѰGغR!׻:X93(V]l";uىcՆpM T< +v|߸Ozsʬ@2ҫ; $P|Q5rLhu!:Q1'9({z=-ƻ]ZTz?xý6\yhCE̩-t~V;d _Dogt:Ld"]yzdb8x憗dxpQ*NjrTӼ TZZ>c"jU}™;cdO;{uxx7zwrIv4e.nTB8Ǎq'drϜm;`޶Sʮiue=YrFAj_0~U6jC_8A+=mm> Vw{"'= NdA~ZbНLsC_.ٷ쬫ͣ;3;{n#A!g&SL|L ޝPɿεaWvص]ox.+YYOk)? mѸ!3BxW"$u*׆qۚTaZ&P̘g}i#V1O=XO'E |grOg2;قk-"d{Zc?ǽD9fO=N+ؾ &N:-.ɚNMQus$#O1wnTWjN65vLW=H͊g|Z G"| ϙ1ѩ\&^kn6YgNS-a3n.3¦W6}qO{aUU:"VQ²DK︂q/rq~+#̽k;L+gU1ާ (!Uԇr'Y~'r$&Zo+-^a˟7Yꔃfzv93NL:c&89w^LF7Kuj}ƎUvovۅ\چ/]]|;wv;Lt1m;+3 VOK*E[OUWd~"&B|NeR"1aKw D> YfPx%s*/ƣKmuN}ޭx\U`nO$¹Oe.F?>]׆xX)q#z8'>7ih{/jrBdveQ,P&gUiǨ{LGyTp%_pn9C4`R}11bugs[#1G)9(#X}\yHo8\70"6eU4 ݬ|TH^)TDnok@rW901Dc2mi_&$J|#{Keϻe_ ʅ;eb-X `"|*rmEIr˧Sqm3é9VϹvZ&1Sx2of=vF?*B7"k Ak,z~& jqI!Yl1OL5E+͐VW s$-+ >mϜF#!&HM")2)*Hm'ԗ "ۂRyj*ܣDlcY?>c=fP2GzP$!Uoh--LOg::~BXkA-wQ>P4"FjϘB̞ؔj\#wDTz,\xuq4M9}pw7}XNI&DX)nuż1=ԞKM8%$O[|CG\IFZN9[\!\t F>;*?2 G `n}`*!Sq4oPFv*r} <fdօYNwp[=r{We|OXEMng,~ؘ؁4jʢoê,Igق6Ȫgkg(FՕYA n%FɊ9LRO!ejSeRf!LzⰋO̵~ZXm>qJUI]mGpfTlᄏ&xnڷwo ǞF|C= kc[Ȥ3I]&~k"rQDnf2 n{lu^1oPZ;#RշI%O%dꀕ.+/r9ew4։ ggkgZ>^ЖF%g0;̝,/~.ȴŽ-nê; wvw_hly9a 3~uK=nPiq_cr\;) rs mלDόfomokUxUeNE-7msNhGTsfg&yVw?]f5 HɌC3 7rdnju/n`zDKǟ.ij vκf;.pyV;eN U+`?l"]qRkkwyzs)ޫ(IwD;f+\gF+^ OT=(>^Au*9yxc J0Myn+lSjtP=S⻯^ o;&$pv)OI֦4›]1ѻ864 }8/Pٝ{+ZP |cd "So߬w{#;g^j߷xOLyz,yc+~Z˘] k!W,)MSΝ.²9/*?61yYs/ \ 3wc7@[=o0qV!lh9y!)%=NMw5x˩-3tc7Hg0ebќ`0ѝxBlvhT ?O-tfkqß׳ڝ2bYK <)[4٥e#{' ".s ˿ߜ +ﺻpNƶ"N*sPQl}2r dn3#;fU5(Usà~gBwxyުV732TT*&$om91AQ8n} d qa2۹9?DR{rï:7;*qELnr7/u~p?'Yٝ;S8?Rs=; crx.57;>Cx2tGr`@̔c|#3/nu9IamT:_G%>Mal_:+ğfsQ^1ﱊU=W]tSN.D5EdP"2i#V1j?a8چo+w崙O7ޠU=U#GRW6sCvtR8$ZS;=E&h:gxgmUznLer#aΧQMH8ןvQr`r5j9/2{Zn1rN6D=;ZmdcJ&@AOVwb* WNPQjbk\ɭ}#$;KN {S=m3`,ׇ/+ nx<Փu$=pB8?= |0oocgBUi UtIϿߝ1gwSY1=ĺxnSvLv~cY9l}A'V=暉 1\a*ފȻyu%+"} A ֵpAc^E"]=w {,l^\USrL3S;I'ʊl 1_be U8pc/}YGwuc ;dSb ?Y݉aϺ*~6]EyNtO'7 " 0AuǵBz)L)Bf.:~3Pkg %a4TO0}_&] ٜY_wnaZ2K"E3 ,9+33Nabڴ]s݊[mϾ> iTשj~;6+n;ー{E/j3_I#y*+Rwٗ37L ''V'o6J3N):Isko,攜;ۻ#"-`N_[qY>t|NXգ'^(@!N5ʸqcY9~3z48vcՉn ;]V$f$E$i[ic=['_HLӃz{n#CN;4Yu8s$vϙ0-0+|;.4z,;`lxJ تc#Mmuᶑqy<罟B. wiL }\yW;?Fڨn;Z`eD<c,͓g'2;J3x,NDx]UNZ]@טe+Îʜ0טŨud(14f*a> އ};?2iWrʟc.$we|q76=M~x&ic}=:2UQ˓jt}_Bip؞j)"U*ǟ ݸv7T>8gh_ş^Q >:UMuj9WNn`-,ϜCwɓ"a7jt]dq*u~ w}֛:w_o{0>6UMrN$H:zV?ԘçQi9n=l(O_;9 eӶ!1V@s{+ǕNxl*7Ydba:|ƭv!XIP@;<|dT d\U|7x6hYzDfDN&n}SoT\4൅Xyoӛ=MV[V@[lozw$`~mɝ.̉ǔ<`YeȬ<^aA5AD∓wo_MBxJ^+{DV3|PI+/7fhCpOc?Pݥ[{k"m(h{ &@K+.7KDD5q_r\W7~j)ۇ}6>ߊ\ꄶڡiLc1m;]l33´{HLsUSt$fg:w°ќlqv g13kɄC1nTOVςx63$D=sw:7kNdQn׸Y 6U dW1ى#NUn Q]9/JSPh]EC\ɗڡ7_cYݗ}{B8$?1 ~׾i}]-cҳe㊝VO>6s7?:Җ54"*י0c5<.Pv/S$sՇ%"SQ{ "9gϜN5'"?q ̲AE*z&?0sEwv dh vƺʞ1WQ9[y*^; ~IfPX&.2Ea3rzLlqN11-*}t}Cgޙ AÚSY'7az܅cfozxTaԯXL-WJc!CTZ.;:们m?WF3Z)񜫈I_Ս̼-QJ xiwgRnQԳQw<}n}3섊7Z%x}txwi6 Yn=M,L9S@ &IE`n!'S;w'vzg=I@5zcW˳tU|0!AšIDX\a=@xèBv#uLtŧnd`QǕܟ=ZO8Tψ-6ǻ٭dO|;fO;wVL!G"'+b}9$,gtXc /ޠ|=Gy9[s'tsSWcՉDʤm i^I(sD3Yݍ|0'{bY =N}7ɜzK!!ƲxM}]%>32Yֻ2'y V9/BQ IJ]6$[oo!.dܫ>5ilB|CU%3ܠЯv f珩֡z1W:aQmIz5)$'5 )aVxuM6Ʋҕ{iJ>٭d~4xaݍlh:{P+]֮ziY͊'sJ^Ó *;۬xCW6ryWEm"53|݄2 Of/f#V3<>9FL{ʸ)&<۠Z$N (#N&7o4]YYqRj)1^w2pZZf/VrX27W}ԋ"?-eu*c,TEew1*}ZO`ȱ+Uu-rOJ;3nx#zU51Yi~"Blǘ - !h$P䟟ДPDNc|e2s䙤1qB,$uh gM}bm6$o/;U>ϖ`%!=fho9}֋:Y'sв}IÍ̈́,{PzT;`Ƹzרgr?b~`Y1JVT¤~Ϲ-F"\Wf~27sNdB@Gpμ}"nN/ Q]mnDO҇wQv(?XU WXCUwנnxQAQTH#hJ>6UG׸)^ъy_z!wLH&:t$ֺ!ąӺnzkO~D.XE4:/mKX8h*5~3dűjr$޽d=bdy*^ﱻ ;Tй ^8C1U6k?;psL'hyw檑1#VUww7,zd;khEM:m@fdρY8XWv}0ht!#̴Q;'P>R㺺&8N'9Ա©.7dO~WvVkDMܢ"{i=;! 7r!V;_N==fm|N=#fESgqvwsXUY̲JߩbJ#Í`~#+XƜHfkw^>7]CLnd2 9A {Y;))aGg@.@φ. v+7;a2YqKj_=aqxr#_t.$ _y!Ʌ&M߸;fBrx|iV~G38[븕 ޹Kxfw:LPHĄdɎ_(oF?pN B%_VfY;:ԟ&1kIj6rK܀KJOXꞆ(Ջ6HJ6{s=&.fFf%i'T*Tze^O6,C^ 6na½q*?wX=^:wa[H= T劇3̩oY2rQdff~J83*ep*O!Uڅ3wrPUqʛlou&ъWJ-Sh1yrES\1S#R+qj>к2 {f=޻jkYS$M}Df=sHcjw |U$5T3W=sJ{4(Y_z]1n qCO3ٶ6Pc&;314>0T3E)!vf-x)gO>1Jx ?y?1;cEop`>{dht|@hL]Zh2pG L{j7dQPJTGub ҞF zfV۫>J_{5O˺HcR >z_uR_KNø~v FQW̾mQs3J99%[紿v[{CFXѴm/:c>Qч^Kht &\*HaJp*  pG`'-^aZ`ݹtDFo083קk=w']8+zYszlqCW/ɥ*1D{~\HYͩxwPmC[unp>4>hCi3-+wB U8?NGߴ1́Nz]q9Ty  U'7WU=4ɼatkbz:!f xϳH.2ӻ!玉f1JV|cs$ɗ%<-^|4ڑ#쀱ت'j%-^a߶gN݈ɠdƄߞ9Y,j~oP#t@_p]nSv!hLdD#n&s1*O^8( 5{o:2w=3KY$w⃛{Z|Z8k{QȎAo| ʫrSڦ[wBN?YɺLα>:gFj"9y&uޕfwd?+Yag^@>0râ{o]TOv8gHgI!v>3V̊iˎ}n=LOe:*[ mL RV؅QMȳVUWvWq;jҖ"E,>lvXYaلuZ˖]4fZ*hU፡q:3o;mTJ{rSFY~1Ϥh&Awj*`p^y2s3gȏGŏ_2PrACKZ9i͑;|˲.2a(sD0fBv<ۘXwHŜ$sY|+Vap e5iѥ?bSęf"E_GBǏ2~H0r@ !eOw~SrѬ1ShIs[6Kg)D [{b qٙ+k1II ֒~G zim} P vOkTh=?[]p۪2oƜku `j:LAfʑ]^qg8 YsZr='E׆tm³]݊'bWk‰;EScSQ&#ΖFv;؅mᬬz7eaEv}fa<+Sͧ))i)_^5vhrYV|&);DLIvzvZ>3׆=/3~ܯh4n(qf!۩H77qtSe^y#ֳ:>qZL~g=q]jeLHE*W rCǿU|#smPV,k=?4Po}_ ڊ';9!&ݦl|-4|W|Of&3bJ4XfJ[%Iˮp#}_7J~A#]p&lw宷C\0x̛1w~AZP?Z#ud?VYy8q˶zCeS%{o6'b W^Ngg; yy\+W^{E[;U%*1SU}wkfs>;>]8[笎Dni\Y1ؔɮܰO\U ö>CUZ~?]jbBXT;yۆr9fh9lh[l2!3`ӳlͭKjҬS)'7z:̽p0BmT?|B440S13 ̿O: c&me*h2B-^?|L |&=sfgC, *Rǯ(zRL7\6nU nm+]f/dGjHc㻯*;2U}#&<}jU3D~UG?ۻ EL]QXtyΡLo3X-~*3圈ӹyp~td^ݚi_FjLR撘ifXskN2X .>NJT =qwg2z[{ZyŊhV<|?av>eOu@?]DT&٪L[z_%Qd'u2ÌP}}1\1ԩCWk8uݛ6~9uY?ȳ0uq $>[QjwEӛo{Ko0Am"SXU%ZcGܮûw&ӫtIbVh׳\|`.j'3鐞P!{Byk5 w;H!p[zuD/S&)2)z[[a˱dORfR风m&K|T u^?w[r(T Z_̏ _6;eK%%kxc"1c#$uq2 ĴiI١GmϝveҎP/$vavie⫍ߐ,_[Uq#5Yƽ=n%=G '2ΟߧN 3^o,%Ǫ~v uC;bLm碴t$+t=bm1ک"7'J KAoBPKG\Iej>C=z" a={N*sPqw/'Zpc2oƴSɜ]B&d9ӱ\LRM+q<*g:.qyj1vvYpmYie:9שS7{|TSeIn:@{?:1{o|]"NO.k~YgQrSĘX&>{do5[_Jwk8q+]\y҄5Xy#"sDVsYH‚֑ZnPs*tҨN$*|oV2d*LHQ߼XV7>y)1r"d!LFGƔgF[1 +bf؛ĝe_zkdGmUok+*) 9PIU[ 'hEQ<+q/qc኿{EGwBEO7^mPD{י~tsi6YErOV'[O91‹8̩޺':{Q{bQ%亍9%"D̪i7{0! z?ε`1+?a߶-e0\uI+i8gɻQ3>>-F^Ϻ>Sly Ǚ=Lv{OҪMaUx\M',;`e\Muږze&/f=xx=T e(a]p7lDs[}@X bxV\+(b_Z lG|3nLQ2{ޛyy@QYzs'߫cѲ683H Z{]mu iU^W[ ڃȊUVة}+DH}dbϿ9hLc1s_U[Ȯ*MVpYUf{0~Q9u,3Qm aY P`Vz $>{L7og?n URfɸzώ&ĸO+ǟ 6N"E3 tUr]Mx{1{7#[Ȕ{~z"'8wGcPggWچz@qW'r3q+w-Jnx묞h&Aޠ՚YO`*{)\8|$-CWSp:8O70& T1 gCADA&ek#W$>i O}P&fWpXe*а6ǰ!gO}vw]܇>7ܱ*ʕ>M81LAd!֜ͻf7/EL{x6g݊Q;_=57@pR8d_9M!G#Rv*JmRlR'u@/y+[L;LOd39K/D1lePYF^򘈕Lenr!+Zof=DgrV bx_ʣ*b]/V Miz&SBU1nϠq [+]XzWr"TUלSNfqՒ YV,GWgy(3ELWN]5l4Qt vc1L>x?&V2\ܯfx+XV`WNd%fn``yoR Fs֝"N`^:4k;۶bA'p*_o~]v"ٵ,oY]Eɐp5pp@xCOR]GxUo$o={_TE+34 ysEO[ǃ,oLSuc';?`pLf==vv@ſܿ<3[a~A{>S'wUiYcDFA{&1cz=9熩E53N6E+YqYf'atB3_Gs"ި@qg{P]*~+zNdc]s W,nf01t=Ef1*v~ V$ LQdjY.!H٪ӕv汹7vC: BSzŬ2i W#brwg .fl*rƟfԞ>&5(;X]nƆ\lJ ~CCN1!J wR112UgGUaPqT ~'5;Enoܽѽ\J7` { N2oNC#1S}<4t7Nz~3#&=}gayjbs/{rMmL%\w$)E2#&3!,ރ*L=XW=4{O 7TZ>b])mɖ\peXݒ+:#%B)裔Lc?[ذ^%*j[ޫvZ\Z}SzZXxΦBx:w$ ׇZwrg_N1ݿU1&;QgwNɑyduQe08nM`ؑU[w?㪪(I@Ǝ{vjWUŷLsE=@a&05e/UD׶w%:rU}B_Fr8g5<ƕu:ɄA^k{6ȆL.cuW7T;~V?UFNHJwVY9_Q}/d.%NqVcn,KIxrs^B=IIJͻ 8a(3>́3?q@N<3yw\ {3@{SpDzW*#VC!|W[ DrA!) Ƕ Sr)ʪDV(Ŭ%K磶X"8 3YNBo %"{*O?Ww=\Lqyʫ{Y}gi}~\a>hQw7JLg; :~wd=ÖF y_6JVEI~-pbOk4Ya;{n"d&A{,Jm+d(3k+z{$xU@{fo'N9z% !7*dU}mV;ݬݲSiVs*]iNmĸި2 "}~^1_HzW-օl`\|j^G4C=%*5Ǭ)H5[vϬX)1inJ RÉSiScֆL,PC0օ-pÞYiWhu;`߈~DN}uQJ I:mEr|n9_"9nf^ ĄkAOVDŽԑra3w,}ٟoeR *SU`U9ܥ\pNq'd_]:ʵ[1i:@+b^q` !VܨȀ%Ϭ8#aE9֬_Nu?wac"sʝ+ǜG̀HCy|m3穹9f!DdEo(~NM`8h~Qa&.ʎ{OXf1\阓]J6KObP&E_*#6b9Ws8.**>FYe*/]! F2 _ZM)Lq2ޝjPYZ~ 0_k|m]T _P\lw(\mύﷷ|ܦF)*p/S0}f@14fv 4Ӫ)$5mt{ mnSm%4ֳ={&5`ѵz}6ʞ=/Ac/q7hBNdv# vwcU%&1Gxg>c6 YY=7{,s`!£v2(Mj':tĊKU&1#ª++ *X to Z@EHOF${ًw&@ 9jE|!DSgϓ~-,]εLVɨynww2?*;/[ \q㝿s1@1^hLcjEɋ!+f5DeDkQГԞ]J;͙Q._iֽ9~xtM%{r!RTgxmWN{"^l>$ODTj: URD.*sWkTHQNmKf8ޖ(;V{]9[QbXϚp k5|Hw;,g#2A PA FEU'V< :QU|PgEwʐWWnb~Sȥ.tU_j^s!9DNB]5Ƙd;5D*w HTg~?FtADK߶b}z넋y?/Sf|1#@E4)6^&\ч^)8z&R$ qBwk&yZ63P 퍊I# aE>?6 .=<[f I5^$+kC۝Ldz%>VFa1-"TUVȦ'O"z1WZq4@vhgf')'(t{d˻p7[ )2I-drvg&/9{Nw:^Uٹs}t{ٓg{ \1g-454ďf>NyL|PqM\Be@>ٴB}!P^4 ԭ]+bOTdۺrm==9ώλ/5gI'kcϘg·~&x;zޞVvBj$ g=y@2V6'I $Y`oSo|SDFOY&Ș$ru9ƝpO9챃f@*]iN]}ʞC̘?> N{ƸC34@vYc= c\sd^=>OYF.;<|N04[ԁ;%׊䛤_CUI!T*ʶu Vg̯7ƁTRܹҨV}Ng>zuo1:{x*~^FwXwwc1ى36tXLUf7x,MJ% s8Yh~a2R, !Gޙse(TW)?*裚yXUZ;9E(]2❑Yy^ۭ{|vX%Sv;5$čfy"SLTs龌Naױ'=5Fq%+>6h3ճ'8;X k6_W XVc{ ]G;ygE JfLk`7i`6y7r Qku/,PO)[~?R5#,}')Ȏ%~xcTL2̉8\! b;E|E,'/FLXSVĖу9 JfLlMKh%;9>gN+&Z2y=ĩ/ L}߃B6-ǿ;+4ɨyn]_v{ksb8Ouyʱ'$ݿe΋Y.=n(=3Ul'q\ޛ`й!ꞳR{ۃ~HDf0ZPBaPP*hRFxkQrH x,r!h3ǕYuko lӽafB<3Ҳ @ EŮ{o(V^C(Nb] #9 VuNvM;7Dg⪪w 9}ixdwӻ"Dx⢾,ߕb%@KN[=)B?<{NܽB fLxBqW0K\9+c.Ԝě>dݴ^IL]6~^sm+^hD* tԎt} ϾwjH/'$$1M_>Y>i֍Ԉ*VUq 8@M@|B&1R; ߊa(sU?j`Jbju;Diq.=pwZp&hfFkU-1np˾9}r) x';͝;= {1OƫYeբ i{ ɏ >fTŔDKK]΅SjBcꧥxe3d<" _nC3A1>Ghud^~Ǭi{ͻ%.Dw@\>ꬷL6LջYsJ2ŜK@,>Ͼz٦+ޙ)L9e2b]uDfڏ38nevw߱ D2Q?DS?]$)DdIS3o3z0Nޗ)BeǹsS*Rƽ!*$̴඲^V^fTPJw=S=aeĘXDg"'ؑB l"FPiZwK$ݝɜg|9cIuDN"z`";YT >V1֬<|NE}o{KNwvYӼ4l@>g9M-aݘ{ MSH|`{t6;I"lN @S82SNYYĿL 4],?uc&+&mnYo!-kCN99^pb T&~6MlLnd2g.} wuA5S~3*9eg<C _|Т:72xwwۯjiL]fi>ZkD\XM- 7,!_Cx ?ߊݷ/m6JUK빪qFUi5w1pw("rjO5YbZVTs*M>"6qEԜrâsme^sw !1j2eVtйvÉS> nSqp/TA#c*7Lᥐ|;DswD>!t gYfP<)aLUZzޞ?Vokf 7T|jKj3sCYѯׇx. ٦M.:b/dbP2c#Jygmy9+즊]W mu\X6)>GWVL3 -uhA?SInSSR6? u}|壼+v(q2W]jcqs/ˊkam|}v Osß]А/;͙+2YObȇ>;;L1:Md(3EQ :.ݖ}v{_:qdu.>3vӻ=(т(>;L A3:a2Q :`&iE%S(1N³INƊm\0U)8AUլm 1»ba\spv=q}f*N e)*n炮 2oCVc>i$7ϲ]zZt^j2wa3ƙ[6/QbmN9̩_K{0jU=_c_u2;&;;^G= 1*QJ ǎ<}T* =>:;#</qk#{]cϾ rF$T>z Z+5)0رVP, Nc1 ]F&3hwhxv/i;ނ@Q4 $'+;UVmҺDOj|ņC֒|j;"I`@ҳGILѺ8~ݍWձz?!kG:[9;L]:K\Wes\Ùn_v486pz-5#ÕwҝbފogGpV#;!wUn68֙[K]U:fHN*)|=7tǦd<>G5 q4͎'zYw^69&ʷi:\#Tu^Ӱ\Uc&<{*+Tz{[ߡ]gT+mY)ZЬ&>mCw씨IT?G̒{/ _x }ׅUI+{;C˔wLlHn3 @Nǻ!3w[ug_U%̪·N[l|pq怐.xMUלPiS~CP3AJZopnfR7}2XDJB=M42n.Fp.T:kLېYVRHP+wRU#SGUtI %~/ȀY:L%pſr-_w  2RY1>jxėܞo-҇ B%efa<+3U32vaٿ\(x!wY?1PUu12&USo1b4;j!Npg.u'U-DWYPW.4 WkIGǨ+UR{ÌH H̙Ը%nD+W(&C=+սy3 `_`մuBN '\ȼ!{P[nNtrAK) uHLhf>S1GY.W|d*F^qB9jYgC%a<uJ=F%3!RhTsѝy33\[ՋƝAٜ߰vԻܩb)>MoyH㼼asd}.:O.*D.Hv9kNQa{*1Xrv4 pI8H };})?4oJiz^gQylz?u(H##)FpvXa9[FqfNb9ʁ 2)W%Nz0zyѯ3ݙf,҉SLvx{] 1eEȶ'yo\^3ʬL7nV!ZZf؅. +&G5bAb8&Qy^n=p⼌VE=NjVꆛ1*VrE !CDC՝%c޻ |6#&Dy%cFRA̟9r8/ ߳< ܜdg6vDiU!Fȴո:GKT'yJ&Au|6Nz!J[^LR(f6-J*UFXUܤqdctYyg;_+Ԑ*dyn}_nBoa3e)v{v YfH>#/f/oODj`Oe2EG^PfNYEk#~x[2 @j~L^kR5~os%a6?:ĿhI;]7txֽ;i?y#56~zyWse2:l&2WϪȰpTD$q%Q.zyo*{O0ip+N]vX4K 6#&(ϡ!$^R|e'뢯?9/0`Gը7_ǧ9edSZzcoNÙwg7N9UғuV:H/i+Yzx7;20+4Y>8f&x;3C2}ût`,> 2Z[-`]'Z9x6+e݈3VUQώ^\aO6Qn/Ś6娡s1MB{̵|e'h"2-Cv*=g>Lqt֑!u#r3Lhj&Hk6݃duM~>sc͜*_=c٭D2I֙qӦ. ;0قV}.;]Uyٓ{ٴ;vsܡ#ZiZ.iY#gwi*7_}Ky2yg÷[fR*Ga:=`eUp9fOy3nt ӌb}dǁ#$7S1r1 eٯ6LUd :T4r/5gY9PO!ۉS],*$FQ.۪ߓ)))Vwm< =.`GAԜYwl6t"[ϼ^u!ZWd7zjx&.1:R&ѕ=( Uܻ_Ü2m_țjժωmQTgbtRwDe.>K8.lH )VzMd21/oXZhYMl"& ם7\ZoR?BT~G UOv^:J>ޥ.t(qi!֬/k2|M d|q0'?#T*e3FZW(Td.D[*S}]1ӻK+\ =9ב`{ߊD^\s"3㰹DOg::)Ţ{j\pwn#̿e6:t06YS'Ǻl.}B3 k5, :+>L>y2wnk<_?۹aҘTES&i~uRU-{=ސ Z.81U;*lonhC,nS m,=Eե;lJfP*k \dVjN02ަ|zZ>~U6Ă'MYѱ̞ e'q_!8ɉQv)EW@dj&O+{ QALևoSTeճdX54\<)JnƉr!"uK1RI多C~52\&Yv޻{ZtمfZ,N 64I73qd3LssN)W8'Z~ŃeIN s.-}Vr)&R{oQf5k5߮6 z~ m+i2P=kce6眨L0Ӟ2iʅSgY>iAY@?-Y.vgs!yKDWLS0amcg0;94㭠jKruA';'7$E73Tf)9pawS2:)촴l\]YGofyhn缗U<y/V̄xf=> z3{cͻD;2Q;g[h)b2+Myg.Ttu zBţct, &byעr;'ݺN8i5i#ރU$yzx]gKLYWY8%/NJwz"βyQbHN&qؙhKlݺ ;#]J;0#zQ7i&-?kywY NQTyCV%0u;a_˥Y,.%d>iI/m2_ڸA/I3eZ)_C"k@"~AS '&j'Tӧ2^F|4~̕xd* xɂ'PlxUv5uTb$>Pw*>:٥e u 4 2LRJmuXÜy$׊p /* .'jpd\ÙӚxUd$^YYљȱjp,l6 g2=[^;(*#Fq>@vZ6a'TǤC{Klnߪ /V80aXFϕ!x2i wEWUO%5W4l=3Of "`)E? gW; +)Y)P\Gc_\GBv XLF=\8\IFtkuc"@ȫ+h2Iz"nŢʹQ'Q_^ò5rzj4VǤC:5^4}Vh^C9;Z&l+'q$S-C I6XTT9T*s#wVg6EfFɝVNTUky5D7.t 7zk@9qLγKE_=A4~&o7{Ct'=>/x1g=w 6 |V+'Hz>˲}y"􋰬 ζ2ϕw}'"Q޿1򮸿Y40ݪ *3ά6غڸx1jPNz_Ո>A'~{ ل9Ec&-dngcbȻ"7B~^D/FPxG!;1+ (RG߈/G5+RGM鳘?>o?pЕ7/,Ý+|_ÞozҲ/}WvW;Y\ZTV.t[i۰SlNPg{֝dfޣziKNnΨz?xo#c"}>Ԟ9ms3c3c}2^Omp=1/,Pz,fBLUfS~sچ>SEkٸ8=%왛(S6Veq'T|uC:;*%֎|9)/p,SapFR\a`'"*T4(x3R<쑳s8钢7Φ*OGdIލF/t $w>t',u O(k5jlcFxFb¯CQM2M Ԥ%Q?Uq'R]d!*og;uىc^? %#=𖙻/dvXt<;3,?KGhͨ W u_ fEMsC]E(1jx?9֞U\ߣ =87JIDX)kC9i:FCFU5w윶^>NwzD a;xD*Mq[ȻѼ:gu3vo;Lհ_~^sUdP[&ػjd =QsѻTxR 8*=>VLx  ?|Aᄀ7|VA@*J+Lwbr÷͜>NWU&);gCJհ٣ +y^ +YgW{X=<+Qmuߛ{.G9gU~=kѱ\|;X|Qf3 Ufƿ|0 e.\O=bϽD̔#{]+3:.:joH.X4OV6%\vw2=^v>c2b= |X_28rY+? kdYw{\-ǁ)8Ęgi}6bw_<f:߅|.~>جbwk2=4].ؤ*cÙ۠<٩N/=IubKk>|c#aȌB<fA"tBg;b2 4;WxQ'ވ:yvl.t\"#BҽSqq~e7挺> wz ۪9t=`M;kudz:j5|T{cGf`C<2቏6Z/V~{fgbJsJ3JϪD=z¹Y>uƵ-#^Eem"zؽs1s ݝILz9o5z5TE\:رv*Y%H̃g`?V}_|;p8jGwuA}a:LӼ*)8,(S-|o{٫U2Y}^nt0 7S&jhc0 IoVRw螸FT;R)3ɞb"yKѷj}Y͋Yϻ"/gnʜ@zN食iDl5uqa)VyӋmE9T@;}Vܔ{/׊o댳T#UKB  ̈́(S׿w. V{\ `H_*˪_ULn5Ÿ9Tz:ʞe:N*2^37.nf+ZLttp_TNz5{~Zi2'h#?~]鷵7/6JBw"] ȵ̮ŧnr${7W *58qn(cȢYV9Haj}ŽvZw3㾎P]0z2TպxVnlj291zr!ET+\aNg2LRM+; r`􉞦|Vˤb~Rq\74 1UqX й+ifӸ}v2).OM"6Y_ 9-UXEļ^]Fo"GUd;jk*磝\31MV 3bð:OX.]4?"(':[}hJmQn\]ı;[%U-a\U1ml(foGv()[Xvg*ŦPU%;`/]5I<`2EB0H3b3uFgdF $#YT{0ʪiY8RմEYG+=vJTyu5o|IU>у _Q+bg$ry'9yW2dP7E/ev%ev{F6ExgD>FUt2)t>ZM<؃ȢxUeb]͜ZSs :lG:*u@ 9F&g6T~-/Ɏi>8W0[f_Iߴ|Og=V>n|RmwGU h,g׵EU"3MYvbE2 ¾y{JwSiJBWy<~ʓ]wKqeV:eAǓ eN?FUke}߽|cBd*LL0u _\2nޝ=}GBLV4>TX{1E %C3+fD`9+w;e@?o*/S%UZ9yWxZYx#DWI1IV#vxބ>wCf,Zt:@s\VAL3yکSe'twhd#}y\V?Sţ?r,G=.T2@OxzB#IN pH^iij%aawm纠#ъ;@5ٯ.ݿ=jX uR7 VUTߝtˬK2w>/1/E1{aOO_ۨ1JEŦzՠ_pCiޛ>޷ iL[0#Q[]]3hL[ ܀Muro|]c|VLjw rXuQ(.g./SS}] [O^5-\ӧdIcف0ܻ7NN&q} q$3+H;Vy3d[+_no|yf"S|T_k^*Gݹw9 ;y'uC ;%fv[cwHf /vi~βLr7e{p='}֟k48oH Ye=wa}7ex3ݺ߸In#!œY>VesJyrNI7MJͥ&Qmչn; ZEWKs8Ld]מYbQhdϖqw- nx g")x] %L}J5:dO23 W>1^-9|/[{!671Zy#6v4N"Bg_qx>H >~LsL32޺x[ik .PXF#+>5zbE&KK| 2OO5!O=F<oWDuvLpV~k^H>3ڎ"u}4>v[*j9?!8Ox8jݑ~pzۊݪ;mYDcdڂ)k,{ tn]Ls66K|F>~{Cfu?Oɪ;{Cf0̨&2;L/7Xm'm=kBkhV=nF7~5wn{e~ґ;uŝԜ˅Ruv~gyrșdUW :NY(-wBEqsKn,\Hj2m<3 d>ӳHzOy@{Ps\G<+li-1g;wwOԼN`Ծv]/ 4F {,[ *TʌOG:y>}ۗ<Λ;rRtŠx㉊lO)yυ;c~ǡOvc4.oO3Lwƛ}IxnD'ZPB{U;8N#?/+cr ݝ#t#! 64mY7pC@Y||lVGI&Dv(D;N$xfnw_+2cW ]Nl`sK1U\"ƅFOdB$ƪ HU0yxvցzƻ&ճ(PvG-W+6e+z,7hxdH $s#>sCB63UǤCDz4h_Utƚl|FmMǻݕcnN4rYo;%FW>5&_1 +vxC X';wC!Oet#A+*aR z~߷GA+ = ~IVU=>?Eeۭwٟ{=旉D6β S:x>{0n٣kj OB>j2M*gFk(1T&`VkǬBfl5Zahf::w= e" }R2?֫;VݵZ_W´@KY< 阺T$ߟV9}{0|J'{$#ʞkA`Vj]-GulO g#q>u{-AH-Ǯ'Xt \?z)#O0 EZhUnp9fMs>kCV2޻(*?:'i2nǀ4fUe8 P<:Su̾^?q]XU?.Jᔬ؈NL(#{:?)YcϨˎjo0 9&Dh,G[\s Z9صu=D}&s¹VΥmT}g^ouVk߿TϱVˉl3QȎ珽&bg}F5x~UWL&*U*;%P 4B9<ѻ n q<u[]wC[Iu=[?k4k5auf =k#8xw'Nw=1{aj"FrcpN4UX9üt J8L)lnc+/ə?ka7؃x֘&m*ǻƹnU Ud~:M\tgv=1xW]OVws,T WʾC* 9"~O8zgt烌8(L9{oTGmak?8E;~nTlTPU ѵC[F߄Ig=Fߐ 5dMzń^OgJI.?ٴU3N 0f?̚uڪP;~ۊT)^}|f4kJƱQktUҹ :f,ᅥ`m`6^+hyDE=^t/Lr⎤^/%YTycb]*L1 fb*l^0^ }_J܍B+3W q1N*5|J9+\[3ŝ.3U7T! finrpeٓ-"n7X$g7<  ZLm{k#Fh΀Tf.xB1+JZC<%I+j}r-O6qݱ,׉'NUMOOUZzϥ]PULk /χ=V>vl}>nѷ Gh/ȕhL[UҨKXUnxlYH6xBhq>3 ϵ%v,ŒFfS2{c a=N,Ujܢb&#ޥm}Y9fxR4;]9_ޘƟ)3@*ǍWڬvBXrϚp݉f,0G ;K\/z?{]z.1e]{\+ք,؃F3%)v;2GVE}=.WOx!!>nd}wY)S!oコDQ'+7ttc \u' Y/t9)/ya񪪴91ܺg2kY*v|ZBL{j=-i8f [ohѯރ 7>ܐuSFVIOs֐ٟUyt׊g2dZ +gYߩ9>/gĜF>+dz*=y`t856R0CWGj&Rz鶴S=Ys(Liz  b4knLZ+Uկe* rkEЬqn+@zmccӹ\+ЅzeW=װ;67YRqF{8荊Zc2+ב7_ɜXn >NtU@5 g9{`$WZ怰JUv]xdah8ҋ4s;A!/hK'(e%.F 3Ratڎ\v꼓׻,bL_Mo6k{@ʞS#챪nVu*9\ţO^UNUsjO~ϐG'*u.ǹN1n.4QamsNkox'V+yhzP_ {!Gy_:ɵ5녚yrģe,Fg̪S2MxI*Gs|u'LAzJE1/>`/s6֞ȀV+⺨{*3ܪ|cqZU]Ϊ6zYxUĤ$X̲ {o/ x8nT^[2"{wS,U5咾H93\WΧT}]m*rsmv*Kȕo;E)(K*Da&c ks8;/؉ +9!E PI8x ̀`cP$_11^Lpd$}^]OqT_C|ůj{64|ۊP'9ny!u;:~!Xpj^֪:߼amTpVk _6(Sb._~PKZ;))a݃sAtNKTX1Y96f O}P&SSUO9M+,~ ʄ2힐RP6XȞ u$aV$g.~o8Ǡ5 5Y>,zm,;whp eG; }'g_>}?>ARS]Ǭbfhis/K:!Ϊ=ϛ'"ZZ̤1rm;\(J27LϊZoe= ʮVk;$I'ma,I'/"]<<)Ǹ*wm6̌rǾ#7Xt[lOD%ыNkVL~U6~pivYɶ_7ALeMV0_UCGfS>*~/fVد^|E* >9zs;8{MI:nGȨk&ޥZd5R&*Tg|1Mi1%6T3irw; A'dҝ#'* = ^8bU>b z?!DgֳǪX:4ZMNpvzΞ\K<$$gYÖS.:Xzl;!&23,ʮ0Fg߱rG{鳢k?xֽsQtA7h2}| ̤dܣͯ=a0ʅ)Liŝ)qzGdC&=iUy~xwhp^bȵe;웷Ύ)c7,_;iUsLEŠ7β,!%OTbh4̊e7$5M1N2!h#܊w"~QUf2ϕΟ=MB{8mm;"|$(>,3VU~'u/27294f5͂qgr_8𻝣K?JǁtJ./[2vwQD7([61҉/7! 19L sߕJԊYtܣe\iFoex/ ?wO6*%cu1͔+-*7U2}-c LѝMx{Ϡ98#JW Tpϰ(s[7ױptx߯} ou")|N gw2Û^w5b~~w9G_ntwQmv(1^TY˾>ȃdݛ:7^‚^"F#3L:t (ysdLW?Mr'1F ^m.t83{#a_~&û?aע;7`[1G~B=? ]f=~Jb;|3*or-jI"lNwGb'V3U7\:JzCf=OLPr{ߪ:/YeҢd 4{SOz砽C phuzWD |؄1w2îEO/_DZGWIAf1C#8Ľ9pj$] kL 2N9sm\HwuHI+ Uvzn)fʴ{K ]=S_VtZSUB-IlsjxϻwґtgdΓwxg!!seo{EANJrD 'Txx ?`MwcЯF3AEye'6;R{ ]+>*Gbo2[>jS,8ET!vev+ƅ`$p8hqoxV@^x?Ni+''Tq,Wt)!߫K4dyMU!Lg ^N^OT$?'#| Nt&lDfk uY9"3Sr/EXGeœ#NJd=&=:VQ}dg&y.vrYYUs\ձ/]ug6" Vv2 /߹炖hFJOOI1U_`23Ů_ 25.\} F>&GOrLgY9_r;[~/8wR~s%ˎ<-@osѓXHZfӘ89.HLރ.JY&ȸ._*ĩ#Qvd"S; U| =(8ѫ9o3{ǸS0=/eXѪ^6h{y6?pnSܯ8߳HakbMW8F zFm+bpeSŮ۬wUN5,ia3!(G:)\Ow|$"2GCF/W.vLs5UVCYkX=}ߩˀ쭱:4#($|dS| WBS L(_] h sB~UEJ ١ƴV빨Fvn'e=3%bP2cšdO zo;)L ֓{}tXz3~-x7*_`ȄBNїʃȭ⹐SvhU\z'H"S̽`fą~P~#K=].<1^nu3Ƣ|E][L=.Ob3U? ߉2Mbu S2p*[VјʘࣙL:eo-MT0@UTcwLBC/@e^)2DW4ɨoBhR/+mzs=nA|/p|=x#j"iEҿgى}i2˦&0i,&[>Č mɎ/mEzt֛vLUv2r2߼gTdj&O)e[b 2G\ `pļC0s{3P΅Y]lCA[_kVne].q'D^=i2ψN39.5yt1Uk8 kPuDzԧ=`Ȅ\sh+V݃lx:]eoΛЇf.*@w5wSٵ̦s=.h斲 ]vI395sԊyLl/+Ag#2ӻ+-mZ& X8?pW$fQiW':x*3Sfxw}ٍ񞋞|tE[ d-wYǻ9/d5JaY Q7̺&'֝HBI]Y՜e1\ipY9Qm]NK'T`Q&d Rdo)|;T**?%q[%ogQw((7ɨ;ױeU49",Qyݐ)0BPfmֵ43Lw|)$h\Z3ns}ev/L)\4q vc/f}:cl*,h&/qr7es(v}?fNJ9U `H*9HRm1m׼rRZ9_X`g\+ɚFTOn۳J~euy q]P3Uؙ.4O -՗:vn+h4 y<|Qw=jF'}{yL;g 5ZU0n8B*1;lh(.BSF^}v&b?ct> ZA=^z,+ť6\-„:F(@,є7cⰪ=8ombNg4`^t2`%X^J=)(S JXS5;H+,lwta|͟<M,㉳`]T}RuZvїKl/kcnxzoz֤kȈnJ^'|y,*9:`JTndNN#5]'N.#W]&k|csL+00#W>V8Ktg3Sܻ"_ pbFYb:0tC˻Rcv] svL(#k&{N<{_ӥWʎ64jau2JάWaz﹡1k lKi1m{^ ƱFN: LT{Ц ljwVUg]zr=Pʜbi$+Un=Y1UxtI`TfUΙtE{}=lgSΜkO3u}ƕ,VuO4IAE@PYŪt45L8ʕrzwzA6tu X5Fy'(8߰T s9F{k؟<u/OuU9V (:q_1`Ŵ&-HWQ[Y8ZcshOHc]ӡU6t~ƊWF>w%cF&d 2|װ ]D͔Ɗ%A'[o+yDk*i!nS v},Fa]PťV~LN\ rv6L\j])XʖRݮa3Dj~G=hAѳ/ήL3eIly-Wq!GO69G *qh|x}?MUU4A6Bga]uI*#]uc&)_qoFƶkfcFˏгş7L/J;ST v6$;Jued$MbF܍V{K,$SGkT}O0UI3Ƭ3hښf|t?cL\Nv>)k<< nE,@2(9rOv6FBy,)MNXn"@b>$Ӿ9)S3ig5Z NԾy_`z,jVf+o妷i},HRs3OnT/8HSRNb;9gꌱU;1?N;ŽUCRz:}_2&5΅N;ڌ#uHɰ苜)NI[eTF, UB{ *4"xCf03YV_V݆럠\ӪRjXM9_7aLvv6 J.0EP:GTMb2 qrq&GJTL);X EN\Cռ\&hg0;9霆ﰩ;EUsНg81cNSDoU^8F;K4IN'*%VՌM}93a'''Qr'5R酪0pWrv Uֻoh4avn2{#;h]`؛QSXz҅:83t|EMy4(2+f^&^zLT970xpD(Nq7V'(D]TWK0G!-)H 2>nn?~1Aṱ\eH~*\;#onzGJY*&yJns ֝1zqKooX?T:ve7\ύRJ:Xps9"ʂ ^&b%5}Vۊ` ;61:QU'fwTc<~/;BA|'氦g?ӘXXqJngnI$VJ=2Bԃ1'߄Ykteڸ;F~/y )8RU^gi*0`>5nmg>bz\DҤ1.Bucuöf@Iv*-6m+2g 5?K}}ॏ޺z笺vXg׶#|М̈1( CuY*Y7Yٶ@Gkfk6ΣՌ̸qGTvWppy8jUn;l1MtFv´$cRk)yu7xfE2*R5q*1@:VdCL*B?Z:Aȋ _I g&ģTIQ'dE{ ?K9 O)\fKY痤:c yه:UUGE{IzَJS޾@YD(Iw} _2*)cR]|K'~f6ӽ2xr)cR ^.xg2c&:SQ ) B猟|U}=dK,FLTy`؞ڑP&mJ'WbMPyԠ _c<˔X"8qT㷿KohKBn, '<\Y℀&+2R{p"G-nQA|ȇd`Ffd{9cRN}x=R1c&U T|+ӝΠiVLʿKXcw,'Ȑe%sT,>~ Vcd7`v?9X%7LݮVu[hB'h˦]("v:Ez8*3ƿT*>UjffCEntƌĔk؇fwM'y&)Io<׽EFtͥh=`U\vO 1k9(U s#f~*bOu$k 8ןQcT2x"c*0.& i9Rr$0K]큂}w،Dŗةqӽ؃U56U ='f^w& #4G~=H+q{]cjYET֝>h ]g2wlFG;poX>uu3F F}lq&C8S*GT [ ۂ&(fN,PT.(ꗬ|:Fťbbdr{tg1JwsXQCZEl8h[[sޝQ!z@vKm z(la§3?/&}Nb)`)w;yEu +[FuW[fzToiy L+I9愣䚔K<+;ª;:88W\ :S(/8 }Y,D;Ih._rUo0wvڝ]uo+GxC̊2)V~5{Ng Vih&b gFX9*gL7MV{n w+3`gtsۼ#&RޥysS~]TdS}jn-K 2V̴aDMϔVT1Xn uS ma˘sљU݌2Hgpb+0 U>kj73eVU-f~"-_AB> !*(f|9)12cTۻeHU6ʊ\],\Nu1svd^E1;\CD\A\r;\ݖyMQRQ# q7*?Ŵao?VePAF\m|y81o6&t$*|KffvTcծU9@6:κҶ۹7#{'8VLgƊȻFb* SWi  '1N\9cEGM9#3> FvmWcgKJUE9cNIf4X'W~0,a8c%R?AsoNY'fbwtdrWHOͦO))#c,,g;w֬ ~N}߻gsu.WMx?Q<,zo93Fb|>P8̱+g8۱"gjkc~^ }zp{σ҂P<=S.koƂj"F;pz7Gf<W>&;s;%ΑHr%\mƷ3}aIDW ٪i7ӝ{>R UYP溧?tMS rŪۗ X6i¶)[O鿉#N`"6hۦO7Vm̾Fx@/*OWVj fiswF]=rvKnugr3խdWd& KU='B0zꑳZ&Pa.*w1S1MĄsW\TNEggn|/&5U^5~ r3խdf@Y| S+" ]n8#?6ƪ 3qPjx{A} i;yWڃ^@1+Ul뤪Uf26gQ żvޫtO@Ȓ)+ƎCEĘkw,73W~`g9^ v|Yn’k ? d;"ZUꡏ{UO5G7uyޑRvr}}灚r, 3F35_FΒEspP=,0NwNFTiR$|_r<%G_{&m +~u#=޷ WR(߭#YڼdĬyvQ,YeXc 6C84E׹qZ| 8jxΫ缣rNS%!rN&ro+ܫ5qy "b=EL! &TOVLk_g{/1A0]S#]U c P=",@;7)IVg d7xT/9d_#ֹB?})ϘzYΜzTو91+o923YX+ͧ=gi`F/LVȁVP|/7VoVhfZXkWRpFg$E'-JzZ]vgکҔg[fqE']-p&[ϝlpFyX4c* +s51*3Gt }}g}";b{tx9,3UH(ݑ{=M@~Fj,>CO3qK%1ףl ~FX_+X$dkq]wt%|K%* Ƙ e"Zn4i{w1 2>D~y)L/} qRWk}Mf76#ťVӄ8{zp8zڣe&p!U6-3P⫗B$?M2v*XWq{7J3+[*{ݵG.&&bԑ>{&rk 9q~rs},cn|xme)k^sF2)x*kʱr,UyuLrLZ N}κM}R/JN:%ޗJᯀͤ{g&Z陋Ȓ2EYu+{JYˑ+sLmߟmR7Y>BG:5s3Y:\(/ZBÞC gsN?~Q>VYnRq5e+wJA<}T+ddd]=O;sf|_vwrw>;zsQ">T{).-^6&t۫]jUJR Fʟ: ЌlAj<+#8벫1%7UYˆjA;XzE>1kJ߷ݿk܂X bUM7ktNvU`eɹz?]9RVae+A&o:kiqX[q7}j`%SV;IjjouMɻ%"GXIs'dџmxyןycFYiAwzR4wD˶O͖tYO{=2YD9;z\;U]= *Ìn{5VSzS2FR'nZ!8[¿vFtg]:}Fktu5 O|U;+t[r[u-0#Ykߔkg3TJ]{Ƭ3utr{qRG~^+X.{k Ue2F ثP/n\oRۜ>bCYNpG"Gv*WEz\G[/{ a (9 0P,r`ՉAQƄyʩ9VRIsTM;o^ n?\ec欝=>S#MPZkjx[c¡׿ r4z/Ssޱ[b 䑟Ա.p.̱X5{V߄g9BMW9CsS2U#sɩ -қ0cP:Rf_ˆ3"޳=s3F3fbZoo״c,1gѯ8>E7B?;q^5bDj֬uk'v yN5h)wpVb+]6ur.kA{>QK ɞ5 J6]'[LO@Iё$:#QJ9 dVGڳΔ*1+}_V8/An82ʘ +ڔyvEGx;A` o7JFXG<Dz>~'6,i'ee Ȕ/0F*A7&SJ.0ks *DyT~&ٯ"ZF|;&+%Y圕>]w?/$1V b$ֈs71r_c,ox5kF2Ly7kٽw]> lqkUYTT$w0M9.(ſP |}ZyWT˫ݓI߇FE*gh'I7|P2ܙO}2S+h8~1k(ر ~ceB QkFaf'RwvqAT.M&a֬gm=`YwX5 6ƟDePNnԃvߜEA$I&\~q3wLhGkݛ7 fh'Iȋ=.!rCɌ| S\ۜȍNvnGv7cfꤌNKܷ3vPyghl V#'3wˈ6agK\z1AvV70C!3Ӡ@xq_c9ωْLA(s,ל;+ƻ&2݃d B)&n/

s\QsAk^<^L;|m%&[i(%0R5q;x488[#V9λgEzVn/ډkÔʿ}A/xg?όw;Dھ.{wwrج+$:P1RvsS,Q3 MC4XpI^MϡUWhZρ]9iE1Q+e8Bʙ*cٛkɄ㳟e>$G0ׅ+Q9Seցmi_s۔E-~<8곳W9FX %g֘Ui{ rgW#jF3*30mzFmµ}E}_nRUQg7|ٕRc#UVs~Pqfߨ T>%o賣;y>&^[Lr#*A ;zWP5Q.跶mG(9SI i'\ȶp3XJXݬ墓BVm^MX6,l.*La7A}fs,Nαlhg sLQt*ouiZSN\~v <"1j⸹E4rcmaڻ6 } l宙"~@o0Jwvu1r[d[3n܁+gEt̕8Vh<(t׾RW =>_lqZVf_O;\;Yެ4H;U-dwtsv(UNI %%_y;(g2H$k?șAY.WəejT81.r$/{F9n:"I~0#Ό6w ZZ' M)aTt<ʚe߰I@~}fT?d _5R_8;+g3Nĵ ܥMP*VMz!Vw|9boʃ|FGiS%}~gp0c_x{xʑT~2,WڍB<0թ(6jk_˾pv,9;f;hA@fL쭲yUj|츯Iʌ [9-g;8TnXx|=R3Ƿ;e;fkRa/P{;5 :dWhYPqY(??6i83ex>&̸19ArY{]Q\X>δJ' b}e*a{>BYc/7=7wtqg籊*j6' ?Husƣ83:7Wi0C1:d\T9^Z3󪾳VVU37g{sQFyo/dn]_9Ys,OPC!%L)%>Go|^U*\I}P'qA5>8;'Aw̼*lyV;F^Hy|#5*w{ٜj6#512f1˰?ɱgF';*'И ̩{όj_"g_|C*m4 OhAH")iĚw@Wbb_JtF[ΠDR҈Z'0EYȘ,t ݍL6;+=XuG9kC_U)'8۩䅩ו{ctTDژosB?8bTf#}dR҃uzfÌoXuN\3U^5 )F= r`Tn(QRY2M9&#;J3S>~ Kgzmskwz,\^ŋ ͟53۵zah륮V]*Wl.歧A]E蚅il.0#GUUz3ӄ<9-՗d/gg[',9'y[f {/TȳRjS>qm(' P5>Vjc2!QJ92KvLH`MI3Ʀ{ݲ9i?{^)Ub؂ !{e@?*gLvTNo#Apt1݈W1/џJwUdeb'5܌{]"ʇq䬜_/u*Z)K\4{(c^KeUrS8GVcó)s9g${LaW=]f]Ie .%o]s&j" 㯶Mפrb"w;6;^ r1Yјi6nrRs?qNgPi#bO*LeXcxmvރί\Ezƪ"='D{r*"ijHFk=쭫w*mاn13hf yWkn1+:Y#UmjG)Pf GwҨݲ*H&SiWhC$=Nӌas;0 yVvWo#,Gy޶XWye: *ҳ1L̩),.Tij߭B58SṦlctMKgmdFߌoR/6,+s.齣FAg$U$07cR*F۸ q~/f0[Ϯ\O#Sَ*gYLT%EIsJf^3CJ\xBV/;?|N@w,Ab4S#;尟9 c4In(MboίU)CGIV;O87ȕёV9߭|F/H1QQzy]D)d@\=j`7AYg[NGmNUXWα{#n7A6/}v\a" $oR S&US+`H!sFߌ=d&ӾC/d_}nOV[WdāH{5lrDGz|qn2sD սD۳;5tMr;O߾wcBE5R1m׵E굪լR{]o_К> d3f<4Y'r'+,hcM=L#,#~;C $H ^e:n7Lz쩔q̂L*X#[}\4ήPU0e)w.mQ1B/7WZ 3Z=AIߘƴHzBSni 9's%zCT60ʊΥe鉤PRɴ}qj;Sb J,JٌNM/ycfVi&߅a)?&n)6g 2rnŪ,\~_gϫv_f`GNJ9 o@W[/x@V"T yJ1RgxN|5gV ?% J W5˅l;]n1GUQ5G;nԘUC4b4`ݿ5Q3#e[vNkThJq0߼)W;pAjnMY\X@j䦒9}u }F;߱{ܚo8<5*'5rToNxN'RUcTOό#<!rrP'DKY}pN'$=5˵c΍fvk5|nZY`^u)͸m>岀Ĺ)*:rlVHd]/Mo ogwt8GF"Tڰ͒%#ֻqޛzkL3ʞ`{sTOˍ9fn~+NɜZN oqͳy$OtN 3B}6E 2>LLUy_p2M5ns|zP1 ',#`.e4-"SfT"]~d\(y񻨰VadFl/L9z[%>wff*hNI2NVn;h8꾳_]uL$ r}QgWO )_џceyP u0Gv%dNMew^| :vzָ^\Tgt?a|Ft?ͽwvoPwB0ɣOunįipagfּ/t..5 g'nnXBMdp6az+vlgo yWv3t}>P ܓ{&h^߇'ُӳ':es4z śY*2cG7("`?QI3Aole.qʁf͘Q+F?^ps9njC#>,vջeq*-TqkEdGY=;X-U5tꨵUY1~C:U8u6aҏ-Mo)n4gpi) j!&QLn6le8{t<뎈f v#0x}1|Cݡre}p0"DiՎ3]RyGXfřw:տ14ho^w˾S ϔDEWf{LuNQRH^;m=9FVX>Ug&D>?7@m1EEr\Kĺ9SJ90ipzO)*(O[~q- Kz0 w'3"gK;;-ة=?ᆎ4Jq] /F:bf;>2Zy_^ ,Oe}G߼o3{Ld$VQYD'/0 PDvV`6uoǎ!̻ÌϜ LvDd'ǭz .OԔieN܍Ls?G<}gv{@mdJ*mANN3GX_3bO1"*AZ995|7ի[f;^Vb|e۫2 {7~o84;TRCTy֎wLYEu;lv cl[Lz}#V&ޡŒ]Q Tr7kPh:>L-jz6 Ag?;R{)?9a7[o7XJXRzk݀vm:֜-M8ف̶1_U:ozݳk(o{/EMѼS. ޕQ;۫{6s%gNww͖")1ɝ=@اҤS?L(Pa!z16~S\86)ogwyΒT9ų:O+BV~kHP٨75"c|}hĉF3.f+)d6+}OӁ#n0eXQ=Ӻǚ?+Z$$ן19Ydʷ[:E1"Uk-'-oUrR){SPvNݗi7&ad%YmZzEF]'v3p|ǷwOhl 3V=ᆳ1+(O== 3r,wGщE޳zˬ?٤bkqէe>Ȩ'-Ȩ^MjrIy0#C"//;K\ǦJbUXV՚}ZO<H~?Vy˶)gskGxݔu8|!䚝ml1욉J]=Y7SծU9+5ͅ]wb5ߍ1f5߾p`H"<ޜ(: QBjYeb ̴b§0'REꈪj;3XX[A;z)Y|$U)6l3f|^j*#'5W)1OG9(ba3򯓁z6.WUFe%:HQņRGXE~~\sn$X2d_qgu0)oQjխfLLorT'g7U}Ty8ɮ&T=WU.׺ l ,QJ=uCC uGhC MX2-8s359Eq|;}p>*4Q]I ~Nd&XUa˸q#?>hѨ0:2{wsϹbWɊMAՖ3)0 v_~1{߽,P}oݣ1Šzp͊ HA(a}-Y~3Jf0%Yp.Y {rB 4g9OYxGP/mQeC#DV_qz~IOπ\mv)㶪LH~78ϗЌ[VmMkȜ֫R=#SS<Rڍ^4k#JffS.fLF}=?yQ8q{{-esƱww'hfBd#jX&P`}ԟOa_mR7i:Gx 2 TexPVjL(q$Iwx͎ouA5ZbŮDQP >eZgsTݾY`8ΖLOHi o'A\}Q=&JE}tyOдӟS'rt*k˃= gUND!ٱ|WO=G3`zwcAmL.drBÉq*|- qO&Sՙ҇?85]zZ&$+1Sw(ThVqd1(Q)}3z T] t+tLkRFpd+Ǐᄃ|W9`¡Xߺ zM5+{|N7Ⱦm׭.,Q'U]~7t"7~.Inq(~bUA$W ji8/i誑T-\+*kBnVU@OۍVLHkZYb*+ y熳r4cwu:4J zS\i]t34C;PirO>&?8qhwsa T4 2%ΔRy7+KUtQW]?k^7/!-) Ihɹ tMU#V^Y3T θ8)e*3ns5Y&x*%F͕s/m \=ur,,@tZsbU05r wc]z+%>{n13W83Zk+QwO?AYXHԅRW;q\NgYΘU?_ 8E׫2UyC=q&mz{3mo4_f/70WMPftvwd(QJUd=3?)wi-3 9RfW ߃S~KǜDh1sY'Lsѻ`<\PxQяv m(5A\wyOD U2+}ʁq*c~1zE^yrRT eG՜Sՙs/p\1)o ArLVT'*zD>)DY_Zp aޛzk쮖s &~;#SXkcv>Kp?=ǸjjO酷K;C\y]˻zv}\5vXߧ'xѵ"?ɅQC>]PpwPeGrK1DIwfQ;wLd^x6'hڛ\{X͸oΣl=Cm{J֗gӣGIصvڠeOzƒW찏6+NK Nn|)䞪;s+02؏hst`+q+P3wM)vN}:%mJQ XI~KE,`b~} ?¡댉NZd2{?x}AM5M6H|$B ڮ2v!ζޱ2VM n-J0Q:LleX _)\7ræ!}>M ̫JY+ÿ)E}=z⨅֕U[|YMykʦ5 y }dY-ƅU}p.dzǔnP'gƹ/õfڶa޴gȧNFᛷ<⻝yF\wtt8QUggf7U-Ĭp3ܝ\|7j_2 K팍NctyA۝Wo&Q#ڸiR#1U5Iw55&딜c]1ܡUArV7e=\ءj"!L >(rf,.h_Ձ7AOidz3򪚑HT*I:X*~u_:{2L(\mI٘^F~mJ>dt|Seq T?o>U'?ZffsOŴH3U 5Bu>ТE{Z}e=XfJM)Oڪ9Pig֧3wNvn0/e’%yV#|Ѿ*6GbW]Zf8z͂ڙ1YoU]BOٔ }g5"63 r\OiSV1}?Y{l>V}3pZ\GhC N^ȫJ]Eo۬;+mqVjߕ ݥ"qCf,yg<[,qiw'~±{L#OcÖaG}vʵaɤg??jg_;SY/ٚN)Τl:Z]'W&t摜3Z]#Z|JeD٫jժNr1NU:9ݏvR5l]}1}i'sę>Z5eώ}Y:S`2V8}e!m}O՗|6j{nh'wkoH:vBZ@zюn*YT|0<% Ϙ1kn#V`{{AsBa/Ų)>"S~n0LΛ+N_{w%^g}'eZ8cG} VL/8Vd6QB0ŸPJA; sz޾0 nL`)a=Wto!_dﺎ.ߋ]1jOiŒ9TE=nhJn/_0F'5 4@8rC[8-o$e>Ӹ#jQ/wﱴޜ:pNJ[+#ygJ1UVVQ%`;/˿Ք 67&4Ux4;*g2#ƿ\dNȑv;c|郊ٳNlV,}+D+nBc@YEs8={ÿ.DvS @kkiquwXSXuA1K1ISY8T8͇QqѪ Lێqh +++ə߹9>g\H{k5g_ѨuwNwS©"c'IUk:qfegzw@ҍds9jw23&tqcM{{.P85YOI s %3iפg}aZ3&d*yD,9갲Yn>WLeʿ74~74ר?V:f ?>2ZHѭڈ͈7glUm үo*؄S 낝>Q]OdIN>q*_ɪ1{cM70Dgר}UF\쐏)֋~CGIoYx<P~"ǕwBkr#W:#Skh_mH칋3?MXJrlSyg=+4f[ثsz^Em埿?.K\'8x/H8y!E=VI&}Fult݅'u9{}fd9}iӣRȝqIe ,3 ?\T^yk#sFO<&y4qOK:/kD~:/7댿ovs6鉩 x̣+oX?ݟ?푣#Aeͧ<U}]mވ{NulSvūqftx*O`G]\uO6lvR'v_if?M2T8c*TUUMYֳiqVcٌlAd!׵Hw.^T`%sFYϗReQ|\{vfTBJogWJ'?crڊ\4m2vǡVg_$RHfe^DZl1e4FYλ۝~vniuʤ仼?**qRAzu13;f/W=@~<#:.w߯لsz.BA=v)3bݫ_W*}]}b=^:Wyor!k{‹"?M2vjfF$ߐ1J:~﹨U6 Eilx[/LH])~'5~FٌY{WDN!TR]U+젓=+ 1:}uO#ތ caaOG"\pv>*tGkJ e0- uQ8*k쳂U;i/2Tev]L\V+|}"+T̄*|O~ǘ6ʷUfgW 3w4Ą*k:8RR*Eu|a3Kկ2Tw$P~{gx bO RׅvU0]VO0&zFgP?Y'rJ`ʁ**;">:d济;Cfe;ݬ?3o\~Bڍ9R7st;UǫgTH-ԔBqeK-5Nq/4vbP̴)ƬlMՀTޏ=xͲbgs4f>]8#3#sLmA'gHIzԏ{#Q@~G+-摤gTHA-/? 5F@~ok\m; ;X;&R9bU^ e)L(Mߤ9B>S=J䝡:Sub=Y)#g!F|y+3R?5{`JM]qb{ZU3r[E|^;#QMn%S?>8u|K1Ձ:Us==̭=CdT}7{?͞y`UҌݖ{t}"zs28sPƙwDkViճTW$\Wl޿Kq70^'yV@VNqfgYi~oƁ9f<&gvꃙL  %(g`iUkd+HeopNH }b9*$gkUh`:-Z+HZlèVzۉS2Op2ahpGZzDb41N\bcgx{6e~mDOT~/gBF \=F׭@>7XXGp;Q{#Bz)d L}NP}0OPg˃)쨉̑ۢ{}RA]wOpN>72v}g&nq2J[XsľZ|ЎXRvUw+t9r'\@P+/*NEx} !ʦ|Sѡ/_e}}gNݙ?;#vtu rZϫ;-c=Zf*W:ǧtFuv,eWJ@ÆOPIeG߶:2%bٌwizkޡgLUgj^a;l=w?8W},4G˟DIGʻڳ &AH'3*AL0(JQ[!ԟz/Tw9O|glDg'jVe϶s\P"f˺lR:*;c1=ܞ5;_LUS#]Uc֑iܧ3&dê<˃=gVsT󰊵ٱ>Qp]_t 6!~93|MݦrgOKiߡ~3#eT[ *XeU.eӸiəGM./1S&sߙ+@X(8PRtj"BZY+4+ ?Ύ$W{88s̘߮7LPI/閪K2:^Og ;갲X-M.ߐo#׿{>A3Q' `&vҧgbK'dk'~:[^%7F$5l؍sgUs,K<=Lu+Yo+qFqߪn0aE|ƝUY2n~z,FDIfL*ujH(̅7; ?AJ8J[7潥e!SyM_|︋jdHQoP𗶻IWJi޻ĨyB߶؃F*^UW[9#Qy6ut|9Ʊ:{>5> ewEhUƨڒpqXݻaRd :WaP3%'UFu$?!z[IĬKsToO;Oȑ>~a fa9ԻlŪh1ר3n jׯ?\IgNU"mW+L\4P|c5:Z7Оs NwwlG7n74}:D/+a|#!3fo\DyA|9cPu͘ėi{+0'O#%Xkn>qvOP%XY)\/;59 O&3 *u+ٍQxwN=- #ww ᛅ%6Q$׋}q$:0{l %ks|浓s+䲩Ʈrr7.#d{̂~sF\9Ǝ>Z~voo[WN:9jUοs"@?gw?j"psy͈qUJvRyp{Xuq;:WՌ'~CBl,+ ucg)4bn# tBO[,=9%~+#]&w@P|lPr!M럨7Wkٽ5&\Q*_ǟꑉU%RH`sctw/ .t1!qt*]fJNiGm@ opʍ泯";1 IOOm}ޠ='ق'w?EgBJe\uWfou-=";D]rK\=5 7=FF)Cn`PQs03w̸=kȠoic6?l <6JfEjwMqF3}=n!ڤ<}vetw'8=rTMy!T ƈ%R؟(5sbc,ptY欱vǡG'@]!1bqj9HGuivrk~؁[#g;@9@얫44Ⱦ#ٕThV˾&!)2H9b9'#_|ʳ"^G;ZsRVA՘|c?4ru,, Wku%Yvtmƅ߽cP/X~48~{{_CeMyGF~`ZjC}r JPꀝ10 7jr=߰s6D'yErh)*"'G)ȑ,) q~vSyWLhuX $FC v|_8R>$HZjl3cD`)a~[, 6ٗJRE?CV߃ޅ qn6̕rgբ$N2S91s5AxN|>(gyN?A/<weIi7>/S̹̐|WLgSx"3Fc|E[6 I V7YY[NL?!ZY Ⱦ`yJ߱[ϼ;ķ_VK 8/]jʦ"Уg=+uC?bdK9kxϟζWwo<gpީY o:+M**MpD?ߕogjfmq%G=Z)oSrcači/>0q[Lgƌ||?St~zBA|&K-v+zX 1"LR\mjv:'(Ax8JRE'q#W7nT9)'5rTeο=%渥Jru'b\]?7u^e+׏N?1厴ҤYu+t.kgގ뽈wdF۔ٯ;17=Э6EQ؃!&#[eks>N32t\nwa#VX*2 dكs8VTjX3T?LJ+]9HVy'Ubq ߙ#X7U :"N謫sIs,AЦo' 98{ W0Q-z1\RVgbf,`] MoӌJtQ/'9'Q/c-bgTXLJӼs{=٤C{.A`r?@-Ye-=RDN4˕a 6ydɊ?"t7NQdF;5gwֵf7VT-{oPwyX]UZkC>U(WlAE}Ql'^UUQ7[nxߟ[xwRMYTl1{NlzƦg@őqF/JOö+\%]i(ef1~3bJOc6>n9(U?Nv.1k,uO 43[еIVU7Ҫ)]).+p~&gu.i63ҫm-˕se,:vb4`֢&I ZWQ) 7FEM3bO:bUZgZG`o9ϴqr$]cKvM!؟}x/w uוIϯݏ o5Gܘra•菵}cF誔B\r^ V|G,|m=t2ꚌJC\[QrvB+v )=0w̙5޽WJnfy7S$˦dC!վuSZI0*F}6ԚnC#_5rzl[/ͫJA)P[mY3Wg)`j9=2O8+OGdԮ+: .; | UIu[G7R g_$GVqjbG{L*̵9+9qsѽ:]N+WDJ1pf;vv-"lDu ,v聫ǝƹĈE󸁖+|^Ԧ%;-o$һzgnQfe 3(eHoj9䮦5Ez5S>r^/ E?;bJ1p|h =32nE_jd]dK":=kϥONn<kN{6.U7r6ׁ=Q<6\ZREHU'Od)S5tZOQqکy@~v=!R,SFwŌzгk FH}S\@u}$٢gkĹLz׼uXՒ"_An^gKqA锑^ֆ\׿H;D \Lf1zGݝgb%ezefI_E݉^y^g+ޱ$J4\~A$MȒtt_οw?p;Xc@ [ {tCȹ&XOZI<%Nٽn]1f/;|yQOd.&u/3O ugYiZ [sK{u 9Rfx2 #(R*t l;Ii,T:ZXfb*z%ewbTxs9Ɯ|>֋jN3"m|ın>AsϦ>{ho|bx%ioN#9?7{K?9Sܻ"_3*8QOeR.x#q5+zƂ$EGLlǻwDdg^L $uiՄ:'osvJ/W GѴ^g>O,\{1z:u$Ok'ggy?kelV<;y[NN{!r78?/;+HqWELݘgǴfVIaީ\u* e3A(ܥy&},ѹ>̺)˖#V泐?gHJii -wzS35wGg>_Bl?l\q\P֍;dyݟ\hy`Wfj>U6U|uZy][n8+%_{ eN8Vhk" aϹ F.T TFoIϭ 7V1~1#T>Q{'ڌ#H~y6L(taծiaFyjdt~3;5o]Z)K|BK,[/\S׭ XVjW^7lȮ:Ƅ|ku((sN2#<4ľwȵҰ|"ũ|D˴^gOwSȣQ]9;?E1Y܌Wd(9SƊ]}/8KW#V7YWyȞN,ٶ>Ua*@%K0eH y.`e23Bmmy}TTyX͓m3'@tnS<Ǡ7LxvŴtNN5eʄg4 =Dg 9/G=-G[G`Z'hS]LJ ol =9R0tqAqc:$`*.{ՙr~4I?GݧȾlr:KR 0nzՖZߚ##M^UWE>x)8XO~޾'7tp7+]~dN8TہtL(13 t>y .4fta=rz%X*T7+~31!|d-|FqfGſKpE_G"EsQa%ndSou ֦:D+"|":|bFȌ;[2Ձ;Gy_~sND̚UQ Q}i}JƜ~Ft3rQIQZ%zfBF V˝OXUSS[YCݨwRow~k|u%y3Ʃȴ]cBF \gsXNdʊTGH|-$F\nAe&5:4irIW]?ͷQmUzC3V 8ۄBNwGO %ᾞGfmW[|q~qyVV;5 r97~?=@ӊu eAmաBX83R((jѵxnD(MºS @>#;sUydG=cJ)IAS=m72RS zdv٭SwjAH+jMs[ewe ";:` lS'@w9y_AJ{ N[빨'G)$D!Ew7'm=74U^ћikcos9 :8Z2s=cPۣ\zfr-~9z7- 6³Ox@W|f+SDJ gTH-љ "z(JU3fb g3j\M+udB0;#Vt.g{f?qe_]͸1~;Z}|wMzctv,WtU Ƙ\gh)oS{Key7dSO>' nd0fUY,8KNݝ9qO}y{֕v\I_uyQSiUG:+8|m,3e='gwFu݇6$y2ڕZܘOQrwO&8\)sBI+g 6eʼnߨYCWsֻ^?  lVB5 ufGYK@נ-M:c#ƝTH~]qO="YlsZHyhV77 uwbE`.F/ٝ,jJYCN;/c5N,o?hVgDe)wiExP)8O9W9g)nz{?n|g9Ɍwyvm\J -Hu`V:+O:fp-J[Tsep9*ZYjA])+ Nn\ O;z+\? _0Xo{l2s7FC/W7,볦RrXyNV*_FK&!uiUn< v}g}U_ijhi3TF^Hy vJqaָƝsTO2gbu\nIe+Fۺ; ݝ>cmTƍ_ cmtQkU-5G| Ɂ9J*eD=DQ+`UR-Hcgerf]H97Q֊:zGopNҪZ>|sq(Jz0=o(?qn8AWO/ [XZ:䎩{ YrBlik=記6d73g!s.˝=5˅I1clayڿtP%RTnp%7m'g ݸq5 m}p*"V؜-Z+8N$nr^aFД٭ouZKe{QO`y*r7<;ye|6J2S:ӞN8F#,b~ED)\v>ؾmzƜozӊvq'yq:eЪ:wGG̡Tp$;9y r…27Is' OG^erw \L3ڽ:Ӽ#!ZŜOĊS^j&\ m}`T rYTR {kּ/G{E L-N5>9Ά:]fB*jI0_UV#|5՘ZC]WkiV:FK4a|xS`GޔzOTQ0#?;貞z뚵]gI5줨SnMv7 چjmXo:ftWg'`&Ċ) WkһWWbt-1CH:tqVT&Ii゜uJrẑ;,.L-t}zz"Qrwt?Ϭ(7bFK-Ju.GlYL1=6I_x_DVϹ6½^lpո:[>?w^S74S9Io_>lܢAJP:{zD~ͬklOHww:UYƲ£¯ >ՙb9?g~v@^uC|Dn*Ɋƻ9Uq&h9f-ނNgR\w΂S]/xArN`΁fbWKjuʆ|6Ѯ4ټ~qZϨ.Y/_d_9.(0 [X̡H?9cL]t|wU6)PzvY-#;ɫ4e/w35T3i=쵪vg&uí `y7Êe;0 w<3~i*3%K-5C_Tss4G7ľFu+f$Z>i*87hw;2:wY`+;YWVT)G[c< ar#iC'\3w$k1)5Sw;ζn<9sߌ8PRIgWˣzUfm}Y؅ ME-3;|pA~?U=n$}l!3`$ ZNY|p*޸܅r965*PoIAͅ.i7HN"ԾxxtO=;# 4ҞV)oRΓpEKn{yFGN1b#~VqJw143y#ߣ+!0 iU|14ErďH;|A>kʘM=QҧeDo%rN٤:^ʦeճ{>X>/>Jw%x>qmnH>go-|&]>3f4XAºyO])]H#ONqI}rTgYoQ:00PKi*1=|iDp ㉫'j~Zgqo38gkbg>x^RYAFs 9%T3`7 kL!yO6?9s(YEwQ_ 6իg߼I_߻ |gN 7ܲ\ɮ5.v-RFVOJGrUCO3P)I%w8SLk?Rɸk| Au?+BTkAM^2@+,T+,Կbeuii1SL< q_/&ZOz@\UBEeAz־ ݃&~:^]vQgަ֢<nh-z/Hslve'Y *|'S>Ͼ, vW ^`S. 酪&91%=>3lH'pQi|dzd#%@G*w=Koٕ}X1zCSFn-4+D jj+H`VUg5\u1BcL?%g # { 99%r}4),12%Vy6:Df8s6wg۠)wә1SYg<9z=!'jHVרѨkVwOd,:lzNY:cZ)'uǭnvg3_D7 ,99"OY:W+ &|#ߑqN ,}ZaqJ:#:T-1o>4P/B` rT6UXx}^ݫqC;θœXImNہzG7 k'}m5EtHbmKoQJ%Ф/`kzۄsqyorй&xw\Olv#F=ȷz59&)Tyuš#pCA=уxt1`E;Cs8b佩\@1#yqo^"̧HqvKm\]~:2MZ~"ގ0X G?y/]>&Ī_<"g`_2z_:}].DDkݍΣ^qE5aLv) TnBG5F)56˅w|a~߆g-#VxP%vJڽSi'3nmw*uF0t9S9dz?DǎS,YqyS> :Q \M:6I'UrխAQPǀ gPrߊuu%)5s+%W*\Qr5xN(qboYz37.3!|_]/PYxLn(jM&D.ԙUPuͺŽ>sŭu7[7f3Y7*mm<'瀒n/Xs,60Efwk71Ի_3wq|vMdK٭|l{2ȣ)oƮX݈tn^)>: yoTa!*ݳgػ>ɾ5f5K7OF'RTcb^~\lFuz)@xw{0R #P곫+f9FMN5764鵱bKƬr~B3 4lbT{PܙqZe' j#C)H> (*/z!qƤg/Ӽb؃$WwtyKfl8Q*a*e #€o *AfkeaCzŜfRTc%yOӂy1 61࿙:e䘮l"_WëϞɂ5-BnY 4EzNqA):ذ"_3βXH2ά~uCiZYwG7"ֺz(on }|uG_~09N(.1)Q_]zc = NqI(k#s8+yƹc=?7}C9ëhOvJopBZ+*^XG^U :Kuc9Sg,G4p׿t_He]ھᠷtЛH/W/&7Țj=woOP.~L]Vȴ)TLvOǓS-{@^'=~t9iQy%fsƤ4&E&v$8kH+Yx!0d]1$WZ3o} Eh:ޫGWFL/_tTPm&dmޑ$cڧō\]ki NO䑩A[ JGp\)|{]2sfs[Dzڢ{gxz!gUVL 9i7x'![禄yO 2YU߃{:êz+ FvҘ#Y?{lꔻw+V7r6"bfWF_'fdu rk;~?@d / MV}9\36''vtfyeT."ʶv#~[S+[뿘5CR\3 cЭ/UZ2PǎUDQWݛ^M!QGT~kvZf>顲e £;LbdۊH|+yUzP;Qb#hƨ~[rer^ȫJ(T=邿}P=\qژM;׮W0]%P]>n 5:UrQ3V+Nʰϣ>N+eu,i -Xɫ tQ6bF;c0k&cahO2*!Xjr~wӤۣ'P=>j&c ކ7B$h=OO<=`S_?=6sǧUD%ΐF!pFuv'gØU2fgc4r%3+Sk_&(۬{_u5rT7:e a,;xmO~7MG{jJR_LEp+sN%6s~gowyڜ+c5g@̂%ػ*#ąsD538#d@\rﹶ'RDYmO[5Fڙwн*74D'Uv3|+G z&v@ـ|W\zC΄J䜦8vt__qM^z]>>y2ĊvjgSlQjw/)Ϫ,_g7RHγ:Yk+]F\٩yX#s\y>QGm}ȕ݌JkAON4e49hz|X9+oĎV~;fߡ~;Sï~֪cs_4'y͜>Ocŏ)KxtfY֦p\(> 㻠ZsNޣ節;1#͜V{9w*XqZ8ĉYW*>k`h{fQPdh?s]Pϑh[ϳN,[\!{au=Ƃ{w;*Pg2-2O=;`sTOݡx|"#$JztZ}}Jê(0O5:mFux:\ =w5rݽ󉵱aF hz3oTߝmsP=#F7r`aZu |5n%6 P0MbOcÝyVf۟74Mv(sOk:ޡz{gl5odW݊.?|1x*4JTvC"ꃲ{D$iJΠ:\?vMFgxb԰+N;yęf|׎8K":C[rKoִUfe뾸)QJc4-kAFZj]̹k֋?慔bt{~mJDIGj{ :$L2iU8nCW3ZR^LG=tYW2۬c\1!/ gI&֫k'púfe?ZfvKyu'pbEطP=6sAKA?EOGQ %`r(TS~ya+]M9 _+xvIz:߈ 4rQWe{Jwƿu>)|c@L ^ͩASd}PLE)qcv`܉٬|6>ju|`V+"xr.{CrFr1d} "7qfYw7xsę e=| ; 0PSwS1%יٝ^M[|B˽9>]p_ו}G5Œ3ʊzXFk1Ӊ='*M'l-߀ߝ2mdV+ݺzTčIGGcՌcaQM؄qQzjJT~_Jt7.%N9v+߸s=Jc3W[jF 3ϾOֱƍW('b݁fͱLq\}VH/I'U49-_[cd!Lsm }8qjU[TfF5=>Go5XSz1vr{^?sMuFׯaG5aiΈ1. $O|fCy|wMzxk+ԧq=w9!_V}Wʺ{ڰw"#IңE`-fDO'ħYŪWጛ\Mi܂95soǎ阪}xϾI\<"#'RFк<\)1U5w1s3NnOxO{UK?9+3)69uQk9vO$>gɿ9ciҤ09sGo]QĞ|2Ʀxo@Ϭ썲5ܪט`9c٫q~]z3^hjwDbIjCb:ng@xv7 ]yF֙R-6Qĝ)_Qx*?ovEcvV{Fzhz0N8t\T}xԏ=h=3?)ዒn${N14F ofߩGx؛*P l>{QHɫ] ywfX]aVʊd\17nuYM/3e?sֆ|G<튙ΔiÔDP=߽z'Y)G[a/L.T|z 媆sV(yTU:ܝtӔFNnw{_ĻqN6GIuGWα>dmj 0s\:Ķ>17י;JɞݒtͲyaGu~+B)g̯gLY1v-*Nv_v_13HTnwF"7SVgw&"Uv~SV7}CoifE`ŮO43U5bt9)v +HU.ZėD+sY]P9KHJWV)ު)̠*cʚwabG3Jm|Zѹ6^gc7߸3rr^XV::y:<1%#m9'e8=HVuUw`7QVu:WY7&_rrAV̭qߝ5fՐs-T9Y)H;Żn^ u߯ϑVZ(} a~3?m5c"-#e6鴹Iv:x![nvy+1wnVAzz̑⮊:;c2OUX#UA( 07Rka:>߅YEesn;eBҟ'jZjCs&]=o¯ ~s}Fj&`P=\._:3fe|}<ȕVz`J)vsQ}z+%n;UofW~ J+N=`K?nvֺ}r ] ʜ7qJݨDΏ<XkU-kg76BIQ2Ot@s|C|/1+LQkFX-cbM5뽪S1WKI+"~Zc=HP=sg9?CoDŽʦ*k z w]M o,fgQ~E5 1…`_I}#&;*RXև 3FE9fj~@g]=rvZJmUٓ%e`Vņ$({g6w9RSN&!^lOTRӗl]J[}]֘@ 2rQ|ÆY+ 3S'UvP\ .1,E%Vx7捿N r䲩uvpOn=A)HJ;{ w}*S JWV+sHHS%5ª4OAuXuN}1n]i ,%r,,r/{ :PUo7hƭTvthkć*qcJ8?hl.29)!iL}Y`=_BGI"(;Vfec=Ϻl'Ą*Iw)N昆lLIÙ5RoZ7I+R-yfw`:XWOԕ*m(W wrb J.S{+:M\J^iL{=UFա)Q&3Ƭ3h5opcQO'Tľ`$9Sg9N_jvvþ};,HQ!NFZfҘg'67A${%o*;nTߡNEbNgtAPn%Эr 0Ų<quzW'pZhf&Qǿ;CS,)4Ck¯fs)js6:kDti䟜y><0r|Gi6OUXD|_W8aZ#3qK%KU+ /iy<AYVJ;gxp;YSF(>OՃ ^z埜` ԓ;tV sw2y5K7 W=]vn?cO wM̎.[?-^lGYBoƐ}ZSl#\7ȱEI#;q"}:2cG%R٦ Y9cSԪ }~0a鑣r싼,*ifiΜ1{~U+?Wt=6#ƝY6*aDbJq[c'Fn9ZXjO;j¥y+o_NԣҠJVc9sr͹Ӫ4:ks[!/ح؃ ~AYL#4SKںjq]zڒ7ߩ@ԿeC[w] R+J/? ;f*TWJ?ʮK{L9NE\^EzבQ2&,8bZ}h&*FIVYoH}^IզFΦC%Guk]ڧ`)sdz jw_P?ǕwҲ5VZqVFfĐꥪV[姽5R)S%'5YeGL ($BC.ĺqTu}(9sX,=p\-r\4 \'iڪYSčɴE}8.(G4rFlz67z[M]ôѿD)jzX^xW^'39>yXp3b}JVU):g` S6̓I0%X{+j"m)lëcHpvLU7rqWG}i?8=0#?:q|lƽ#VkVVjoʼn`EJ5yW{{A.T,u4==g[>hb7SJbkW H;9 T8rVIZZd-L.f*Xw݊L 2>9l̯PD+Y+j6qf޸,D׎?I9Vﺨi]Iװ)̴1o#G%s=ɒDcSSoVܜ6erBBEr1`_>??[R?IT2M%uv3fӮzΡb= |X?sٺ ם i;lq`I9ks^CL>9~7rv_Īw8]*+|Ym@)Y;6%ըr^};nJx5A9O̜5lVGwJ(gz\ ElFW_??D'+*0)PzkK|"cĽ%~s_I+~d*tgK̗Beo-"w9PTm_4tӛl9<oJq<Gg]=_ىdž|mj(efE}Yq)=}$ݾ1-eXuk{pv4z-?= yQȑ”ƭ:PsZݤdg=w͝qG/1r9'p~.r%wX}WߎivZ4ę=֋-N:nOLKŌ3;nJ)Z*3gzk~#4pLB$_u}L;T_M|gtA`/^bRLR;"=Cʠ*7?k' HD$:9k{pG稞"{~67rR)a&& OW&BѧX*5q _UggT2ƉYM3(TĄ(Coq?gZ)s֙3Op&Rf끪ZU[X׬gGʻW~Z9W!S)չK#i;]?Ӯ4F^l{ws\q,sY|;sjq8тc^HyqV?1)%`) N:T faa:a{LR()Pxg?<S)%c."{sq;95iU(UX~Vdžc!XGT0~>q{a֧Mdw!)uD#g`-&6t ֖ ,%X*l Pk^i$kD3)y|Z8Cߟ;:c+4n_r `ͮ~V&}VMj3s.{ f/MotVqUNo}_^.9.̈NzֆJ+<9 ;(ϐV~8rx4߾Y2c)l(1E^+ΑPRyh}r1:uUj'q?3-k-K?n@Tמ]E$]94%@M%wt].`YO8'~b92YSEFy`誒\=2`o}عZSV hL+.I||Njˎ)'=b,vRj*i+ UkuZx]5MkAD8a X6# :PWsު9f?j_tbF#";kZdZ8}qqęc\20mm?*{o>*1M1bWT|<=vr&He,OeΒeS_.|i b{?s!ܠ> s3s%aP&Pf KI} 2+k {R%Tu`?ʑ^W{pM Z-j6ZkC ιo%Av]CِmKgvO;q G=>Q-qTϥUY^nwнzN|b-}NfM3-k]՜̜ZOӸrt܇k!@IAZGW/$H<ٽ MۧOc8:,uQ(eɆ)wp0OZ{Cb"&W^5gn^V)wc޽gJvP.7G'`ށzI>qc5t ؕPׁUjՙym*m։'4wΉ4'ru$~'XQQz[1߹|“R,SVad*uܨ5\b> [P;]Pm9P{r*!3fT5;?9T_izN:FD_VFf\Z11/n/3ctw'?ɝ3,xHj y{omjtԑb~;=Fkf"b ox:f w*oTaDA(w 9چFIV"p]DG RTO}zN] ڌ69Úz1<}A0?v{iIOd;I[G̪r"=9;*z63369b \M,QϹ1Fw{?+‡ߩ>DŽÓ;C?DBwLU+%Nޖڗ/4Ž:ɗz/̖W2XV([D"F54~gfusi]c/3~v?Jʨ{03/kޣ9OL꼪ZԙO3\yLi]L{luli:,@gaBZJc1׳&'=ǩf>'+5"ÎJ>3f|W|cNwﱎB.ٞqڪ Um hҋu#PC{cX6Օݩf{@m1U(:VUuT$UVn緬6:dꀱ՟Ƥ/|be1j4,0sA,0kee ke3 }qAi#zU|+)'CɜS{5VV Wt {B[#>"=nXyÌQP AqZ} =w uϨx;[GUWotj-= fðG߳\#L|@3nw;:oJ.J%>#]-۬*\yŽ TtK+=jJ_v3=k wɗUߕymcO31UGUYu_;*8C&:tbn= {ݩܘpbpvF ;㉳On4S]7o]n ځj\1{&\HQ ?qv{CW!P,?C]mX/A97gTg'4x gtDEۿ+|9jX|L]}R婕eM+nXWd*->(9&Y7sFTuPVZjJhՁ0{L閑~))8$c٫'3.xgS٫P/"Ǖ(j1LaMU7pqON}9FޣLymD)%ĎC?k;C/"JWe!U?t#wʗP97ӕYW.w݌`PB] O#QY=ڪUk9fn7P*5B[H4Qqz컯BUɊ<"''RFVyYŬAʕh'}LA)i*eQڗ =ꯌK<a ~R=tQw' z-7X5IU{uN< t y:7oݘ35 U3hIMqL1X@ǚ/㫎n>'+lkB/X։0 0'*M943kjwhTU}U{.{C_̓uf{,w :8a?g`z&XUYn[L fL0v;w\]ZΫwdGzoYA:UYnu=.We{ܕmOjs?SGխrK?p^PNUkMNr\.C_W?ɱ]vc=|Ґ5)ܩaM5v*kpdws2 sݿ% ר3zt}0 .EI G?IcAvAcP0u[ʱlog㺎fg75E:0> -~׃Ԥ GSߕ[z#`b[=o8+f$,g:|nӪU̯OiF0Gi/2uuJ9+{b*@Xw"dgP?nH;=u۸P?Jױb<êrQQrɋ+kuv`'ndk?V+pg|'X(럍<2V}٬vs받neU=ѓ+121hFrɫ)oyљ`TDUd:XdUi:fi}ս1 =s8n|"SfGuދqE;3 + ;֘_"JT\R>:z [*F:X-ґP~ߒ<%% bC>qv/mBLWTbF.8٧95aG%?9k"nT1) uG'tmv Zl6jD9~Wg+Fk,"5^⩀R9c=2kY_G$FЖ}B8cVڽt|)MߵwMAևUQr} uGBF ~YhD|e-^4+:w[oٕ:sX&q-Ų6%ij-{3ow+m6g+uRE$lM8rSƏoCf*z8|BB#'j8kHˁ.ģ]X|V\sʱ ghNcu'[s6s"ǿ}w-^ MŒ ͓FvzFǔ:7wt@mEj0\>9^F]/;s0+Z%g2<z3+?jh՛OĢ1~"gS=LvjjQ}ѷ= }ڷK֊+V,{i @Vhka(Ttp;g7<Dž5Ɋⱎ;w\Ukݽٽq8{GMR!,״t_ՅVv7~6vwʛu_u}֝(i&[ecw{-?{5o)g<Ud^nkdٴ\S7QP GB%X =rUxTkaiJk8͆`X`=TZE慔XV3o+H1mCKoҔ"V(cSx+;TDj嚚bDj-쇔߾aJRL=ݥ RGzeoJdeǖc>x-3+OTw,6tl74v3R?a4o_uı(cf=oߋ#+oW{*AL:}Ns^" #Ό_:X(^;b}nK_2PlF8).;uE=d(W3rJt|֨U~Zqŭ5;*}T/rčI/xsw[bvc=yrAFhoꢪ9~ g&s&Μ6!z0yTW˄cg壚o}'3LHa7]'Eao;oO2 dKUWn;ᷧ0.6Ec9^Yok|mXW긻neZܷf 1U=UJry29{eW V]c3}%tݨeVtʧFh7cì9.dYT+g[\V0{s7[1\V*j3?u3Q|V=M8|y_Ѓӻya+{N+#9y-Q u|f9)xHi9j517w/-¤ՏJk=m?bXϖ*~%U89 0PmSuC\)Tj֛7o8 YC&ӑqɔdѨN% at|ZeΊk*ij%Wz!_h2\kzHupD9+1e.ȱEM58ӝɽN1}%G;yy V`Uaeu)֕r>|%ډk%tPkTۧ7sjvӌNH,O\Kzv_{)]蔭7fW09 uߩ?ǎV&,G ])՞8' =8{UcFtN>:|p㗙#eXb m ňtqX#qLE%c\Tj">.Ùx׉HcdY\mo8u *ZJxYEesVZm'1rV-c 6.q؁U5lY i嬴?3'rCɌGߠ9{3%eh3{ ng7fN lΞY gh3q+f:y?5禀<}F'pEK`ʄgd]'Nѻ=~ڂ H9L{UЃ^z]bi:9h/rO[I:䌞Ce 7ι}X43I\CQV͚>3iۇ6@8qeXqX :x9oy #ѵR~gMotApr9oSo{S&S!NDRfnfw;{9#9I+?K$'ZeGݝcg ~LCVKedRcSx^%!GO"ثcŻE1nۇCU6M\y>^iz,_G;ߍ|9V]z9UYd_+srw}8|~[91 ێ4++)]>O* Ja{h)RU gߩ);g~MIUFEdсW~9duZ5{}Tkξݲϙa蜬vo4e; i nIfB nKHJ>jMnN^n~Zӎ5oWfw~3g_d_Ic{)lFv5?G=ӭd>ٹU TՃmy4`~,3,Dg!tѻ>ПO \Lu+YrQs}'Sݢu ʵUگUBx${wdӦSL2orQO2m9枬•i5<{jzT'N^*C7r*WKǗv.cH"{n8}:jydS]#I@ΖW%\>v% *}v@!߸7KjLK7^՝:: $k̯oG.:YZ}.WF&'HL6>J!K,97x! ̷ur=WaSN|9im,v ŚC*Hy5IŧMG~6Wr(;,]݅~g('dwhЂ =SXb|.+KFIVw)3\y pomt *1_tV7D/8ɸbčʕB$+Ǩyf6V#Ik qoeJaut2ot{)G#{fq9vtXsQ _e;h;D,'XO<6y_W1QfR/j7ؽ$IG5Nхu]4-f)_P3о7wBO7E D>^z?Z9miO?7$YS}wH7Nw_/ꄬun0uҬƽӹxցuԛȮaEamK6u+zKBO~0uܻzk}-[W%/C;h]WUxJ`8 jwɮO8b/)U'g}vγwg*L/CpF ؼe+XO3lH3PRsl;]S7CTIY~-nhϋЬ6a++3jp`e~SEuuM￝Ԗd=L1\t`.ڼܰ_hZ5tzX%+?Gu83ݱu͌'*]98g.iW6aU+KuJdՁ7nNz>Úq$ Oz wYe*3ZԵ{ddءǿv,Ĺ]2w]}cވ?f}29ƪŅ۱M#1s7鮺d7 c fYg-A2b2T ͪdIʬyLB)*2X5Pv+F{=:p>Qnbdo w+ g0F5=+6*J0?](z[?6x̃(ۨ;v?wevŜGT檖Q>Y\!Txmw͍DQ?!P1h GóF5^smVwZ]ck/%7PN v?cz]Z٣|6{gg'Ƃ{4 =lufײ&ebi8]Hlɜ|mcYq.fœ]uK-^lzyemu3$B "E,ј񧦝5kͻ!Ngx͂bvyQ T/i_i33ukN٩EbX][i3_ɆTvA3L_[{\ nL^/蕹 hM:VzJ^^/{j͋3Vz53/ޔ\}Kٗ:u>뀛_MMuu";jvƺQM9B~ϪW:vOT ړVq+XM&RGIC/WѰLg:6C!dMk޽E\\T9w XRmukZUwoFcc$7US ef #/FrdK7{aWf~zv;ldSjPeo~TKD̝vG/DtsOxmX~Yg*ǣxd}njV{q$lYEa/WE""y_3\(ƞU~W5x'es'<;'!d5סsඊX/\vyt e @aūh3gt| jnFfL% dCDXa (a=x2KYv-%U)>/oЎ'isV %@7̜7b}Xɺv@ W%v$oSQ);)e:;\,Z/[ ׏﬩\/nۗټ~ח_{3m'!+$q{89_{.:7@=K'w_tˊYJD9yur*?۾Ή6xm63=߆RƸ_[i6}wm'VV5(w|x~-d#{1g՚V}Y$ j~ZfBt53xCVqJɹ3p DyK.߭Co8_ve{ZĻ⮖|ػ\Wd؏zͮg𿞝oU m|bX|"h+B)[+Y鍱8'*WwSݕ^6}}'s_FsX3 6b0I>4~ 1T~-sIr^MYO\Wۛbƨ6V;S|5efJ= yus09s+\.֛\K_C[pOW(GJIEp2ug.;5o@1DJvdٲ?OZlב’vZC *e=,5s qwooY]cEuBbb]ңB-YSRRL)K -{ EmsxeUeeڭ-f3+)>ӷq#fᬎxG52z&.S@6nڇBMݯYb r\t~>7aRԎNW&<3ܱZO7<ݫꨊ.jxŧU{$\*ynz1EZq>ufG*ɝ;dVǮc7û7DR>ʗ,1ϧe,F){`UzC5%gm:Ȇ9{#Nn31#'Yk{~7܎LM_]U~ml)?&_}\Iw9@\xlׅY\(Gѡ#Uլݽgu2'jvF,dZp5!Gw7T&^߹짨wDߠww/]Wܪ,kqw<С[(fDCڃBru[z]`0,ef9s8)B|Z1ɩ'CC__̾.ϧӿ^]{.s̱whB4a9uy+v ycvgЬ{&47 2O֝<`/,ѹVld;[W7m0{f`=C3T*y)+z{{-hhMCk-\GIJe-V:c`'a zb3qysT' j78 }ߎ ԩ:Gg9f΁s@PygecEU\6$d߬նJG?1 &׊bQJ=we%+-ר>_߽ e3B<1uΣ}2aݩ2@V^77V1Sαc!ۈ1Ǡ=>QI\6u>{ܠUᘦ<JF 3Z\ǔuP%sS/o˘[ Ţk0^FM`oJ{̵۬f.5oTl *"Z|ݨ+~n"p'H"NF)vJ$ P_ν:EWdB,+뮷.{?Y1|X2q73sΦ΢e1Flodj*d/+}_;fX%|3NB|]/掹Hz_Vd:.jL%eyr<: f/w`o0.θ@0Atlׅ.q/7׬;XęC<b54n9Ug<׿oc䔐*oI&mo{?UY7[VßIoMۅNB<} PkAw#r|eGWi)q`~q2ϕN1n8Ys̚bc!0x<9VxϢg;` WJJVJb6o=qcE}8淛UǝJ?Ow,w' %1gr!FޯYI_e(0󑾈aq\OOMKGTYKT7ttݹ(̎lU[^U*"\;wֻd vQ^Gw {oj*".́*}xm5}aǪ8fm~كj\ r3sD)Md+uOԆ;ԕKj\ /澺F@mU3:_[Vto7OJ1+U&哓OAg i^J+ZӯSM֥*ךT:D"}#GNgʪ[޻$eFR aK{66n@,nfi ecn2j⚽,dj޻=c(X:Y0~k0 >NVz>6ۋYok}Y>c\5;JTEV]䇔6~]TV:ynGg9nQ^nXA٬s=\݇M * KϢKw@*u޿n7Dv\J8UVCM<{vy>JWgŨ'jяhUd"98m^t\6\0[Fl+mp=ʜR81ϙYJש{DߙSM~=~[pe+`3︪^3NU"=F_qD%#JZXdסsY^ǻZS/?赳$Yψ`֍,yBĞV <H̓gTJnG-ZG63q4|_g35Xu Ձ T_ʷ ,kF8_hqY>;n2TyV~nʸ_띡ϲ_ A6+zot٬2kj*0Ub*Y9g瑌ob3ͼSnxoXZ=O&1VpZP:XX_t$&s0hSt\T;eV³mx41>>/YfWFnu@ߩRл,tex{Iu]̐BV(^ k|Pj.aҖ;A +h-'#_eh#ɄFSe8± u]$ʌεm^9C*P~nͪn,ZFevUv]o >}<(c{E=m~^^{Z~n"+ ;_eV#ouI8zɉ|Cy+e/dխ-Wxsu{ 9gdϹvq؟Jhw!z_ƨe=3BHSם`J|t曙/(nsh_VEĪa0rUZk4Y{߼cCPV|J#W̷6Cf9}A]1=3H!bv #1^x9ޣھd$[4z)كu/Ǣ6")qgDf~w;l6#|XyE`wsF5xyh@ЎҘg\ve`o6;B-duA`< ~JcV"hANoDzj|_oEwAnSn.xb3ϼftovό՟ei﨟4<Ԅ3v]Agy̳sVsկxQr)*_l+zm7{8¹Bnw_/+{:MPf~mB? Ȟ ~3F="X ]]'3{17gJw;*bg7_$s"i v݆3ɜb{:~n6)-lzao- =A+5s3l3Xe#]C qDZ,;78p[g#NzVe;t1Y?k: iɿ);hQ$cj4""Q[o¹B3ghwRv8Kv< 4>>/dofNsoTTM`8V)VL˻'z~3nvX78Je&ﮈ};ܽD]j6ٞbߵ)+:~7~3W'j9X=<<`57%!Z&r5}{P8S7B{PTXeE*F{C$d%˿2r]Q)k(P;kdH3sC[jRtL\.?!WVJtJKM ]16 &yk3COG#_j{ 0eؙsQ +}knb~_-.^EXȪcVx#VVuJ3ggb "9Tl s*w ȯ8^fyjA)"!v1O\'x Mw%#Ơ@qvzNJqPy Qr5dc2ºdzy>M^ Z& וnN_gzނp.ޜ?h1fƐtOW'Gfw3L9}]|ݕ Wm,KeQ}C$w \i+71a.EP{F [uj^nj+> V} SŻL^cؕ~^"^<16F2-}qb&K59媃~ 3ǜ9dּ;~VP_cGLje'Ey,Y!ִfot0Q1%[bTy_w)/c Jr9d.sCu=1nD iuV޳3+Q d3NŽ\TLJ(JhYf,ɢp|Wtс<$;/2VhGMfnЮ(R k(ziqlCs*gtu\ǡ WV(:;ū1Yma=)?[*þKB6ʌg†hdɋ1ffDYqo3yKx|@`S)Y{b+z;Nd S6 :+^Ω0g^ٹgnkL%0ś`1sq$}+(;˼]&V#jw<^? 1s~? ^1V4'Vlj;"_^U ip&' c^d WHԈӒ0!J𻲩l9Fn Q$+ |4Tc>"?3:xjr}Yrio}>c3T̀3'KyL)YkyE7!c#rxjd nxy7厥W3.2*f wb93X\w0(socngx5*wʌ!:2V3z,[dx&㕸GmѿT679bKXj+]~ g//lSR=;Ntvl}S=V%{\+h8zN$v<3M49r@VXyh2aIVڠ?I;q ,#ފu\~g=8;ӛG-w@b&Q 3"U]Y&6,]õWt`Ģg<ۗA<\}6Q<1D5|Q3lR3:aGV#n .{LGsdbF,Gus}ݜFebuSXMhglm' j{ȃxu2+lY|t?%j*nYu|hVEf1V?n抾txf4yk~[䫗G dv+a/q٬'3OE}m` 0k|]qwMBNU~md#Fw?t_`\"s-C~PmYff2# @7^ dC%{nF?L&bV{̓u:YZԖNuиj: [2.c7V}QS:Ӽ\!gDw!Me.jU9qx^ы[2.}<8.tJO*+:RbNOQ,+Y?aޭx0Bg,OtYA' osQkEX ge+=>[5  ħe,VFCP9dnk?!V{t>4/þ֩!:>P?)VYyq=8tߙ ً,TN|zS 7`qdeNE_ZJIJKr=襽G2SNof9,nIwF5DC#4ցε ~gOڨĭ@Be)据 " g2*o4?z/XO 0+|3 - {16 oQ)kkEetXz5Du\7lӦe<1{Ss[wTTCCi8 i3PdweLN[kvR2hti]P@? c#hbb1'y7 =l>ڷA2uV;/G|S³6#nc,17cx0>컗>f)w#{vo2nHW73٫Q4?Eڭ̻҇};osC'W:NxzQǜ9dm'V~2SE+6f0{P 都AjLV[vxG|mcYvD 3Q皪װ/Ur4YccCڲf5F0?XKVԮzB=vP%*ywowsV毘ϝlcD`2/}G=x=-eqW'"sc:̈́xXi4ĔzFoJ&e߽o3k\s3ΘP<֡$p]Υs["W I6YstD{63O031vZ5N넷27^}mWveE'+6\uyIf:goyZ#'b*Z&/p O*TM Ӌ70TkY);:gU}d&4sA0>Ê{ |p4!3rprNq=s1p,JuX^Xdb)O 52zKxúGes2C~:!]DRWQZҌE.?̓$;Db6 YznHNmaCL]fb=(:Oe%M#<=g9bl`Yl` l:ԟX:5o[7?Cb*e-VJAeq236F/ʗ+W&6紩Ρ>N;Mof6Jt$҇e@5w#x઴)**T,WD}qmz2'5sq4{~:$ĵϕO32 A-Vbq'۲ރՈyX:p4x?c*N;OPs=Blpx٬p]b3zȻzFsx2eֲ g3{=cGnYycύHnam<|S16EF7 kVA8 %;Uف &9+kn_s%κ@EoWb=sWw{Cr;dJ_K[1~vĤd$fQ,e/ ^*k^l;6+D̅yվ_zt֛H3υ\љߐ&@yzd3f~kb`xp[*Š5rWǢ݊9L>yoT cvy{VٲLZ2?F/n!,:ZTN ]a1Vf <ܲNQJ9b}3[1 w/+n{&sXNCִe<̓1je'wZ^QekF>h'YD8ns=N,z<_g>3gʿN㰽ޑ 5;||$aAV^rv"4 +_ƟZiSlf_*ħe鮂noZ #NVx|^i4Fo`E6Lt㩊>uqV.5GʻfL ٨'Gf}GQ);NpAV%j1hu#)x],FUEt>ݼ٨R=+tt?^U ? XrJ@ى5ϊ[> .쪊qqZQԛ1g .\NdBaq F?f**gA>q$d5#V=aGe_ɜ` 97<̼͞$m!o=ۘ?̆36Un@TOz[SnF'9 ~iV29>=߰kXDC d9 +ZOLR1B^?9e-L4ܟ~pQ {6Q)8Vx_V\bӍ B fY.խd/3g9Yݔe4qO/:4ז/ZՆ25g=^x,q&d)H O'07:jRO:Zd೪~޳{X|*,?=~ܵY ? !+H^ajkA3c.D}VzTf|`{x_qk*'7bL{Q'Vd ˍ:~HOEuWIO3f/jV}L߯h8^sS{b?s>z䓐wwhUMgz&W( tF/S]u֘]2=:ϧ/r2 U8ߕ(DUR ȶ.Ɯ'Z V؝XF:n0gݳ~|<|Yaj ںsFWrĈfƓqsoT*򬆳V"zYyʘ~(o1cEu^ĝu+ nY/6tswHĹd o*$PXJ9g-}_ Ugwu/ʟ^{2Yw#ݛ/ܬ r)my(+Yc.<3rs֕x~ǺCrJ@*R: \GrЩ=܇e3{ȪEWm"f3ϬX[olFzsszQ) vcn=,0.mYլ$MW7 s *nPNbn KwVX \@xw'cPn'swT? vƩK[WŘ搅+ׯCzF-{,zǂqwj\3†y=:!ĐA^̋=W뤪0W}w8SsWL_zw>QP?vS3T;2ٓopcnݲ[2/y! B=bؙy^SF-#J cQWvY̚v2" ;&=Y'6Y!hj~oe56B|Cy̫0/i,bN&3ZdT?z鸅1<\-ʵr\oGNoV*c mcbT~£fnUݯCxL i*Bڼ0)#NfxWv:eޯY?؄4挊Ux7~#01`lxun@z5:_"NF窟07{oR3q7m1GT֞"n[tz5+us~mK`Ƞc)sqϏOBVm fpQɃ7:~ WѧRbof2 x`b )0/^.022h[Լođȟuܵӛ+;2ƘWE|pRtc}\*Ъv$]84h."숝2UC'LLpfz0wzs̜"޸=)Tdî2E1WZ+]W~ sN>l#1k13 ^t[[萛3j1$Q_f#@mZB N]I/y4Y-D̡~3c1U~ndʌT; Mx!)Ud-" z|>6aKw"Nƴ|F;.D7q>DzUj ,q1Aj YCR6hQ|Uܓ u[QZ&b;+̇\5gm$K xoS)GQ~>/mc5N-V=qߩv'۟ԅ^$dnss}Lߩ n/vRmu]NuryZMaD#"oޫ8G_QcANY$9eFuϠm}>k͊^Iz]pPT?~WUnS!˭\:Qnr ;H㲠in+{oY鵉D]61ٟp٢sZ=>q3TۨG2S4 gF̴ ego, 2NV. b6 .НA=llSϲek;pߦT>Z4N=+ Y1`N˃27[1V[W7HZ3Z=W)ɢ p%s@UJPuO:qsWD#k qSdױ2=G6\/ǽT2ECyWe!kxC\AjXqce d33[UăL뻁x6|t:t&s2!U|Cz [$d^N}en7lt_`2ƘŃHЎa.uטsesz[l޸ u0T6wSYu`aWR fFVnkg3+YeuϷ9%+nʎ v=\O]1`NnR40V懱DUlVy*E#N (;@>_2n@rد%32};@cky4gFry]y>92h&/'h͉#{uhMC8V֬ԦFMIa"cq_#NƴY=m^Pf3è*T;XVbJj:)&5v iӛ^;"*XN2N jk^*3R4nWeFJ{Vd&&ʟ n33oC7f}kY+mz %+ʮ]b\Zjͨab->cZ 3wtGߚ~$fF٠7TxPL;;p3کzŰd#VM-zx"!7 idŜ3[lT$5"XV2.4Ǯ7$n|>Hv}\i:IT2!%Z׎\Vߍ 04#-w9lԉľ6͵'c[?:Nc)";jEvɼe%1}]5k:wB9;2cھQtO72|wAa[YʹuU7}{-gF=?|u3f)iok\ +o ,:s*3T]iV("&֩4/O >0<uSV?M?uP6wUM&=ہb++agLw N3XQڮ]:MN|ۗ:?Ye6#_(^5qyrǾhHq*6f5I9>G1eeUd : ȨʬV+3TTeY¯X\_Q6{w+mnVb5yR;'"9T/+_N| x/3 Fq矈ϋۈ7|3ͣ rϦ.'=/1E,_eȮfF.7*˺Ŏ>;ONw/Nup K},Ckך_M/9/Q?ݗlTiϧl{?+N7eީuɱ=@o6b3裫9wwDY2/g}m_֥Ⱥx63̱SdPL '!X^jtߓOk0㿸&+pwJiFSy6@myGqӳ:c1ѳdVeN`Cg}eՎ9ɖ 8uĺb|,lsYP]eΑW]qdtP ة鷊%8zAmyL^㓵/|Gs1?3CE݄ϊW:(3{Je'-V}ȞUj#~OU^pJn~ BdT1>O6,ķ;,Cf=+f-2q |!*#G9*yfY]p"u}dlc? avnOBV=7wonc0'a,uqcY W<9Inw*uYW4 n>ᓍةbߔt=3\tڄoZE"b`LON<>ӿW\LiдM`Vc>1zIM2bMa}sB*_+5ТXT;G<]bdj *F 5^ޣw=`RocśYDM YVx`GEw'*bt{ɮTz GkCz&w9_YT& \:@ج2d2{LU'y3UVIMj]ZiK7 .sP\Vu}.͌gı7r`e71gW 0?e?']U<30Ձ>BVF2JEܽ־$ŒDf'qsYORSl沶wr1x̋14>P5`>O%g s/d<|ϿF??H;edw pH,.ff=;6^i33pPg7Uã/!PqgFN5.U:| W|3-|ab5q3e6u~Ƶ9E%#/7oVܹk_iSCƎ׊Q){L'1(^63nwX2ë U,dף.v^#\ֱ\o}:'*q<ٯ^չpt6J*Pu.\ʧ=) k5N,bf xsՃV,5 =3r9K~xe2HGjßקbTje\mLx7.W)8C5qqgƕq7dsm;NuxQy,/#!&db"*eWPC8it f2Fa$b& {̦NXK;jNq^b5ƫ:gUi΁ ld2[m R|լyB<]z`qwCrfסjeT]=YDx8BRq?837}?*FדCLIL+D;7C\ #b"$+ZzaT_?g C}]G.~t5&y-PB<>gZ_?OT,8aƀ2MHvrdRdoHynR\V+^jV*'䮸P;ϔ.[ a1ϤN%Lǹ"{W~:g 2RѱϱCh~-=z8m~;/@O |9dUcs^cuv ԰+ԠPDF qz?fⅵتs>C[Fk1;1TwF,+gb2™}[A_+`2 թ"Ʒ_ns(zW@e+WU[f\fպˏ}骺UiwUW9H4(ʯGo1yls'h:VP Id:2W7 w~>=_qmK:"W$vwvÄH} tH;#Y i9\~u\v/y"i63btb 0ٝByh~_fY̊d|[5HnX{8q_'WNWL Q&5O[O@ӀcEkōLͤ*?yp80㼘i;E@G 7jPs\Naf'={n~oy,%Y[1zb% /32 ItYc='$+ ֞p|Τ;h-|*Ny]z+ĕjWS:^Kj4^spֻɎ;M?:/uKYvoWŵQ.F ̈$~91I8wx*kVQ#Z *ig}~Ǯc&+v~[7]'Y_Isoa?94ؼE %2k>qRU;ǝRdy^oL's8̵M^L[5h'cReF^ m(k4ڊLM^`Ɇf3ꑐ®rF~)<K2#3ƺ+gr@;|G$l7t>_w3nΨ\mmGE#\3+$s_CSy  |0u#̘ qs+:8>`=vؔY$2W)J;nXby2üG卿NoF-{cq5|ox>KZcW|1JD݈ ]I'3|{ІwwCĩPD5rћ,ez}U&ž6RϕxIC5)m夠wd_5sߴi=fVs \MkUA(\~X'߻NP2.M#F;3֌zptݬ;" w#$0Ovo]? }NDŽ3r+=jWz*fqJ%]?i#13ۊw6e:t>&sE;uQT a=ko+q<\Gpi)b,8c_ RxW׿ܯ83C/jeWvz#ɑ,+?Yىhq0_9|of?+:ˇ/'Y=g88Q5'giR΋dYQ]cdU&3תg=qd)8t *f9"3#PMe̪Rd"e@z Tژ_y[8q~c[+ԟNvo{fޖ9{O:"MP9<̨6޸Je7,05-=~i@Ng7VZ@9=~>mf$^ɟ M3;nx?$T̼ Vw9;ƕ*pFoDQ3Wwx' /6؏Y^J7!bk.irӏ㞷QO=ܙŎu`6/'Մ/' ȌC9 ~C b}\i+~=SfHc u[&uκ<]: E?Bְm v[rݲk߬`] ,c包cαȯQn>_%<2V-NR~\̕j5|=0;~8p%4ްNjMADf8Z=Xb& Ҙ>Y+F7>t\\nOV7pM_1)~7qϤd#Fَl$/OgJ(Ԗŕ~2s(9k 7~gWQ\Nxm. {25;o5j8;D))L^>v[S=6>cuW głJJo6[*\i1vl9YcL[;׆̈́:3dW7$ßQ<ȁ+Qۿ'DrbKso>F>m:ԁ~gr o\_eӜsM,\|_\Q~Dxs?6VD;/+x2. ۀ֡OZ<->߸~1.udYۉK1,2r`6/򻭃YدrxC|f޹n'Kl5:Q`Fy3WxƘ}<3#/Ub-Ao'/y'z/Κw29ie]cʏWފOlܯU:gV㖕Pf wr+@h_9o&VXnm6CNV3aw/"\gb3xǫ"~:z&Θ76X8e2﹚An<+OBf5xL-.uւP{ lnox;1fkMVPN9x^綫ASVX,武Z޻1Wt=)O+T1>p2*7sq3gP{߽ hYNwXwⓤEju!g{zc}ggw{..Eep}5FBp\?ñ2y̳\i!%_7YCKǏrxT _'W~ }89cd k,Kj vH3`r/"eQ7ujbY mZ BmUqt~F޾b@&2EDTխEc.nq $ߦRZ8x`^O2/QlT^97<8hLVcy{?Yk1:j+@"#zRIVeު亭ou YLgގ/s%+VV0/p mg4ٸռVCׂ,]f [Sn>nt*\i]٩Z]cp^OMWL֋!U߾ aho=nɇP1~W6gF|32#ȍEOo|WE?L/\ڴs VJMq.xk6A%Ō<j\c!v&/*psx+!q{|FĜ=^=/yX5 L'RgTtbRw<řf׻6?759E/0_f]<.J\VQK~y Ľ ҁ"vWih\0>ef$ZV(Y{o2Ǔ9:LhIDZfXu]@8KlŎ@f\YJ_EvL| ;HVgрYʺ0GU28 @aJeWUYܱ6Tː2cɕ}r:^_8w*K>aN:l=dYL;ұl/6M.DxoͪJ!)ˌQ߂y^f'kŽeoQL1c[%/'>{ITEk/~32`1 L0`m:Ua`uW9Tg:ccQEƋVX٥pƞ1p|_nl#׌L{^L_: O?duۘB+TqL2jF? ͪWZ *ḿ +_UrwݚYV2L(&8=б3:?x.sp1v 3̇0fYY+Tm55;.5~#}3/6|t8 5i(YPS29*k%å}a+2z'M9sq;Љb `Vgz!Xh+va6e~1>[TZ g3ΚT6R6<iV:QC7ir5<;UJT߸QMփ;Ds5a+xBJվsuWo?%cb;2Wx^EC2$2;-vSk>rXDR̠9z5bpj\Y1.d厫Nޣ^%lҪs 9S31ޣۣr=.HZ'2C&Ze}1; frpT6}o/y'ӽi4Qvؔ@o^R̕ mf̚3'N=QЁ㓌?1zd}F#u"t K'yFb2ob/zn[tgB07#0\ d$Fy?BV|[gG-:D?+dm 0ԩ𝪺y}D@&3qHE[nOԅ-XKYgG `\nT\[eVӊXvDTwVv:P~}*~2wAU}0oT0ܻޒ(^̕6ۋ{=VxbIqYÞM32.zF5q0ꋿҪ|`'C9(' (/֓γ]h# }~:Q^;'o4 uJVH VyI\WPw7̗x>u%5q,'퇎IOBjLczamziBP f̆_~7 ҩG"A)+Fu_~3Ϥe,H'75&|8;/._U-/]SzCxEAw!иjwgs {(L5V *=Wጤ{41&{t;#YR _ޙt iknNp˿Ve69'NlJNr(ݹ_\6 {tG 7}Z_G2(q֜nѩg윓Z6ޢo~9&G,]f}sA}+d gEuš]$i#Q!mVZuVeэW]bcyj6eF>$}6nǖ=oq3b>z23Sf+fJO|=$TdN~HZ糋r9 j#aWe֙Oo}K jX09o %8q;7Ne$UUÖWg]tK ӬX{L_bi*0DyVr]ca8ެo֏+Tq<D8sxm~X9d$~/ݿϨgC:C(8Ɩ̄3j5odP 8̵ lB["A}6OwTݙʬfڈx䚑)vg$M G*[>4Z/L{~mO.\Vu[v93B^42+Y>gL<2U;e&+.}mv팦9~u\PB!qWĆ[ɘc:0 F.3\xÿ}QDxi7\_f(qG^YYY +zJ{u Ԯ'XD'}pW*\a2,Fq{ +poLk^V>ឌkc߆] Ǔ9,/U][\k\|ٙʌVskqm=ź)#FC!0Od2s[zcٿ1[7=VLMMifhϺ Ŋ>kU g?3o$b*:c-wEKt*%$zܕ#"U{vUc+G8n slhS)[#sǃ?G4^]*؊K;VLI%cFx5O>?H>9e3|Lms]GH؜0S3Ybs#նv,ֹG2c)W/wzpJg;3tb){0_X>#p߫Yr<\rL.':uHCd]N̬fjpRTПg;[ 1KŻ XեיUa<[?HR =?_݄ uy+Ss cb57#}sԬjRTU)ZuyTe޾M>ϋy/}zzV}dݷ-EZ|O8u}l{=;X A4=5߫ Rtތ(qryom=+tZzܤVi3&yu~U'&jGM0z lQ߯K31BkA!{>icyl8]:<2wXuŲOY}Q-ϲ]P-{i~;?,QpMGY'QDMкFh:x KZJeXg+gslc;Ӷ %dhqt|HLTJm}JIt̗O+g_"CN~~'e-i||n}NG^3 tS-TwCbiY+EƋ9G媴bO[ist:WrzZgУrʏ֌8Q]qWA^1~1bN,Ghqȡ ZaN2-؝‘d=4>Und2H#nyzڕ7Jx@Rb:9G]áVH#-˖.b'YAz͞V`o"e [e6$5%]]g|J jJFm⻵>ϴJ8Q6c V:=V]l?"=f6~l;ujH8[^v>jSStǾ kY1jz^wg-iJW6w5`1:Cܑ Ksuisƒ+Vc}UEc7*Ks NC`ulM!jcyur5~gszLs~,&5Ynڞ]+خ<Ǭjv =~rLj=GIgfϾ˾2nB-bȢe|zic=gҲ+YP>;iI۰Gz&lz>aMJob>$:wgh5,XE_yt>1lj{Z>/J;Q VhcwI*ɧq:]hh'ywizZlF+6t>b>°w W[A&lyhkUU 2[~&ٵ}򂫩Y=hZMQ? vLGpcł[ӣM+W8mk)nY0rͻ}w;[V>*Ȏi:(;<{gjXcTi/_F3O#q zWZ`;WHiү/\xh~ v>PFѽws`Ǫm[~;3>SAXx XPÁI[CZ?-o𗺟(¶QU`S-*ծ534 󬁵+z5n|zڕ'<6B%t|44FK|йV1l=w"~^{=ٻKuxǫ3ܝn{uGN=/y kBnl4䣝ٛ{2f`Tgi?rI ;k3]ņ_64 ѐv,zܓӷnc PpIN+9'7Ί埾#׿ \|.Px<ՔA}>ڶ #kC՛*3 coTgOOKwWmJeў8Ww|ZBӾ*;eoVSr W)|-%_hiZ1^#q ҬKyzPCfy 晲;NNcWE`YW{WL[vQ_u&QkT^t 7w%gPb jbf+"gqSj?o%7D>R(wGc#vǸ{닛`ƤvQ'$ Ҧwq/{8+JY+I6zW4NCjJmñ^⭵c[#0:ZoU>wQȍ˚ֳb3_9,"S r|<7{t* Nhs[3𷤎;28vud#WxY`=NǜO؊%朠g\g']w -ߔf̥E<1Ie>AUS9,{7X7.U7"] ʕ),gmԽϫ't敞io|/LjQd =sjiKF5ȩHV|h?:κS_։^q #:92:˦.9F>>kyVG Lњ,b]u{^cG6zٞ;]gqrДJZ}U|9~3-3ʠ9N,G8 Dh٦/ba9YiU;{Įq-phǝ7KÅDZ㼚\}d:;圀}jѦzi-AN~u?Fjc,;(k~q\u*G6=>u9 0e8~T%Hgg=XUVU*ytnN|1 j˞/4ã c|`9whRtg0l.x`dv)l^}n}'gj٦/rŨo=Ե|Ae(,Qi>h>\qlQq~{=2\9r$:&ALX婣U'gwQ>G]m0xܗߞV=Qų 4f3[k*zV|x6oIN}ъQWUf]ݟ*rsme',FKv=5hQglVC:۾_:iU~<*,oJ'&|wU:|3U<>ӎujVhcwxM=2xsmu=HfхEۯ]> ϭGmPjV zA<ֲ畮Sl&fb-x)s{EZ>һW /ʪ46X=i>E04˪4>FA ye=='W/7hzDI9(ΠEy'wTEGg<,汖x,s{N_(#ϲbqM ~Z#1~{:{yd}M\o{~uʥ=Ѯ5ZblA-Σ/WD5ѐv?ǂwzD;?S~:<<W䤘hG;6vX3~β v"jov)i鮱[DvOPrq5ѐvlLZC'Z]6St#j{|o=G̢Z6`1l5+G3=¿g`W>ki|[6UdM_YnuS۟#=VʮQŴ~!xvoK,UQk'nBVV 9߯vt:W,r~` YG,f&?nI=>!ud,_f5h(z"߹tI^UӮ"qWn&Gv)($]wEwS0&|ת# HB* H4;+Bv&ŭ*$:VmZkn3_> lm8o᫬6\!m5TLU4dP FcS͜wTmjbEY>Hm.s!fMcJ!޷PuB7Q5系P>efy\߫V7k_{Tw ǾTgb_rґbs?4ཽ1,ycm'*OgG$yWrrϴr(w,)6xz ՜}U?z'*/ʁ^yWx6V>ex)sP+S$44kOGvW+%u^l ʮ6erUg8d[{:sӷ@%pPI`PGUZVbW=- gwĒeg΢U[q7gi1$&[q󈫷|E'>іT߬_pB>´> P`$L%~o0}hl0}i}.QA^rq2 1Tb>d&{rSnE 6rŧ(z3rVv]1#6=s( XqO4ܼ[W%w'PHshnIJ\ ?z|hm`Y<"Q;IJ#-xJ{ S׳~cKki<·XmbIKV\nz6}&e)d ӟ M{lq]~5(Hx9BIS0-b3ݾ+ޯ1V=W<#x?f~Ut2 {iy Xy,@Kn-L,zt٧/彐EN}➵o:+.z1jb@&fޱij{\%YKAo{ K:ޓǶ#ޤUeגmcb&;zYR Zdр9u}=F[y:^S Xl힣t/ꕊ)]Z-9߯"wOKLm?/BAlU=.V qMcTzZk 7?EDn)z$h={N< W9E'{ưo rldoVr},!Jrˍ+=ҷgfkSq,x ƈ+uޝg>Ż5LMuE{40Gwձﯾ[N{Zaer$ԗ1}[ x,Oėc c|{wbÝ9#Xb/sh >˶.HBfHln[~;g5cZ~N_"ТzeU3#8ѡZ}GђGCk֪Ĝ>1.V* $ԊwxwI<wpLM~!wY:?*ҧ{hEߡ熅};?/BWQG-U @RR=k`> QD(\E,~Sժ% x"m^K7Klu\9S{Z/|}r޸('wu}AA~o}wNVS2JPZV*o퍏[5Mͩՠ軼f?ꑵؠD-OM[z0lЍؠb^6q_m5KR]=zlVCBߞ?S=wdOvrKUڍ ؿ|OrVEֺ)N+}tiiqN|zW75ɰ]=q(Pi.xǺM 7|AU3_1]]']w~-][oD ]F Uckcb_o ZjG\qXMƪ:b1k|zϣE}=j ߨ!QTš@넧]j_,UI5){Cl||dcc}U'T,Y{UʌW߆V>dzZSٓaUɥ[~{<q_C+4ޣZƨo\Ӹž].t8F{r+NiŏZ!$z [cK\.-=n_m1yD42E^_ۇzZǽ{cmB\?yfQ>'Yy5C3Ѷ(3ZT5= -,@6nz`oe e.Mo?XoY Q3j89Ģ/'Se#^{vY,v"dqG:--GO*X.]-֢.&ќ}XYR85hܣ~leDnLuvVZ]#=  BLǭ-1_~zwoj[ح'zhwWݐT5[cŶ5V=ݪ 0f-^Y:\:9o5|{*|4CEXk`m:Zko߾_L~m?컵2>%g+{-:>ZW=_4U/xv#^f-O9\jEM8GTW'<0qk=œFy4^a~Zy(~}O>a>"}MfW}7G]<P݆xiS{>G+FRWn*/e0 Hk;8 +WT0W.x>jV(}=WxR9<>s{/ւDgm?S%)3?,R{j lߪp1biex|IlӽG,SߖXU_ }{T)-4ѣCQbv$]K$9Wmh9iMmslxO]QӮ1=TT5nӮx /.-cbpǝ~DjXxb}W$5hogμ/Go(p1X$ei>UJ&jYPE ><0ܾncצ;Z+l>nR[OgA*aZؘ{0b-[k)vrD>¶(ԡ{JVx~Y_jrZC"3mmrmV}KuZaZaO+[GhIQM+=zOWcBݐk#PR[mN/G A=@W>]S[P:gtK.B}ynֆyʍϴ+G"іrYl࿇gppm"g>  ~{ϗ6DҠvZg}I6 [Y=8bKZ 4+ӛ\q%߯׀o~>I%8iK]~!`FOiKchC?뀶vQ,e6X} ā21U<$ЦZ2D ľ@*(h+}H= ٷy%{J 'xO[[I\@qH~-|/)Nܸ[W e߸A8m+mb74Ëv!Ha<\2F4pe@LhX&{ %kU~t\G߱Axowo) {S z5M}P]7}"8(.y8dϾ[x:T4cU&!Яg 7G؁G&'WeG&7>wzOI׼i(|/46M_y>Ͱ^JG=2_)oĶs냜 |0ߞ:kl~cIsyW|}\1M@Mlಫ}܃LCq;M@`>*&sTpA7d =ܲ?pj;"|~>GaM'wEg>kcߏd֣o3iwQY5k}S>{O{v~Զ9cg<`Kƕ+ƿEdV0QhvZ]!v= @3Ma4 @XDN?[Ә1[Gq| ~u[h+5Nf+C=57J[0:,mJUQ(TkKZ`-*][Tixq@k1P[٨iS_)I Qxbq=JJLa#yoR ̍Ga6nuu:b+$>ZM$YE>b}ǣO{HrC+_ +WhV69[Up:e=> Fw 0hyklFUwbBI>:+T䱐_.%`svx`aþg;w"۠/yfVSIjt}O6W,P.hޛZ Q ~Ղ_m_heQ?TZ[Vzp! CE{lrIf=hɡio(-Ҡg<>wXY 㶼[_Onn hnw趏NG+w> :}Gx9& RC1 |{inͼ(`vגŁ: yf)"<*ƨcD +=XrG='QFeY9<̚/1D#6[of`gנ+iob%Z^m=ᖔPwxvDj>KXSg. rx!WO3-q6-kh46 o/뽨5;G & gj3Xڣ7AF {}ݱ+13FCl" Lgm6"h-B{}=Fԙl}c;j iz5Zs7:Ztz4c־.1Zh0`[&mu7_xoѮ0TJT{Sl.VQdxh=G݇5_^RGl Byxh1_c|;MRCSF#PrPh"0 ; ɘVq5VsOזC׵nyZBJ壭z78;mzaNingUST q:LgލJrL?1=Ç{CmܖwKvn5Z0\9|BD+kf)}(xz5_` ֯2?G{C\l5{UmR&ϝ؂yE41i|@ PxGpYT곲?76[ xYF6qOo{nf|wv oUEM+3 v)Һ]ıtiPXQ k (,l[rzMGcJgkf`SIvGvYʋGn[w1_ NߴV@:[;To~,ycyWzMͦ\K<xdg2zrUSfKZVmw࢈-<܅L&R6>6nr+HA-R.WG* 6˾M ICEsξ?erjJSڝ儥DE|lntSg&|[+/!m1V[Emhw5vG-l%Iӧ#olegS ֣`+0V:2]+v{84¥=)AOiApe_k=I6:d9޹[ (Kc+VlBBZ3tt_2qFCǕ+?aO&i7}>K2 Ϲ4븶~ݮ7lohe;>36+Sm{iqmMymK-{C=T(V gX>?$hGh%w}۪+llq՜2iջiG7V}&MvKok#v¸2 [ Ӈ_a8jeg;y6ņ9?Wo gaccHvQ5{I|f癤6㨼'f-ZQRhO+W7Tif9wn?u9h/tRͮ\}QV{U>N23 U'?Wj7g nrKCػl4w{##RgW ϰ;"HgwcL3)+ڜ[h7{V_ue[mAК!ee9/+?\M}D|_=o*s?yZ,ď}C-w{dBSkWg9:)B!C>LrC񆘗4> t!'Sg_a񉚱\i'YYW+옱{s&+ ܝ`cScjb\wv3<2d*ٿnmF|hx {z-uQ_y6g_9E\nj\kw,ő}1`~;n6?94߳/PŠΞ?[7)+3(Zss7 sQ2VnEF짫Vml*gBiH(oəjOti(>] 4Qꣿjs.LI|g.i {NAp+VxBkW'snWV6Hp;+5|Ҏ#N- ?f`7IAg  Ͳ1wKk3c#6Qz`|y13ZzuO}ymZF~n?(,+m1kp# G눱1o~C|;qez-Y5U*whM;WTc+ 1ldSz^ǜVgwPl3ߙ]yzRiz 3f"_MfQv7+l~qix%P[Tše'vђ&l yMosu#SDŽ+9[.-*^>Df/X`*>$;C.0㬋{Te]jer^l/zt;7Ól5k}֣4힕 t~{G`v\}{ڷ)̙9+g@06>npnK@~;L/W/3+f+YH͉m췩JH걊o4f\Moh"zS?zј9s5-*F\}`~I}+l}'m|NK&L](7B358>|mg(3 }1/Wip GgD8j#u/ހ'o3G/g̙3X۠ "zHFrfRv|g7ۦ}^xܑ2&K.3SiӺ->_ഘ` Z?kz`D:/0/i5eTMO 2)r4OW@dߞ^igl; ~;N̒V`?]U秡Ow:5,6P}!K6mPꡁu 0 |=T tcvL]\w͹:CRw}߈{fcv&Fb{v8=ڛ3Gglه W4ڰnasa|Nj,~}wc.8=m혳TفtQY?ݸSavH$YJngg ;<D p}sd%"'TsǼC@oL>9{!ܻ}ds P'a_{f\P?nqԆ'ټCr7`ci?:glSe[3%hŝ&_SievG.°{] ykؾnE 2 U'G&`4ƍ涒92۪(J}w9`,8^ omawy߿:n4Bh~"0aټ$֭kƙ-~I.2޺9PCx_۠5w;+#b߲A9u2v@ +^p0޺9 >6HJӽAkF{wb"Q2wX$#nm-QKzٽH`_xW6hzúŖx7iO,}~a)gkv/rz?$q!Z}!̓mc(G1}隝ːqCo?[Wu|!k= wV0&t3ԁoCսUν#lkWwuws?~o@?/o>)~q__~??&#=?[5o׏r׷YˁQ9_M0e w xyzbGɕ|_ ~<DϗWJWփM,?!zb#dICd[El3Kgx,W!yPyMEEyȓYrNZ.F倣a,m{ heOwԛp|?6= J%#5s`oML C9eM:jc* '2AOćiPAy)D؀(0ԛG-BAI1`3wH?_CLd |$"s/ڍH"7b9.PTtqI?_^-YgvC]ijCR↥M< ^H u}u)Q=FlQz̈]WRU~q XFqu/2ԇ(X ]%&3˵$^ yylVK hgMD:ՙW:e( %#ϯeF]'EL#[|"|Lc ")r)"fyu6 k̈́8jս R7.R?#XFnЙ(TY*N<C["c@bc},<{A#'U0yXJ*J˩WLərQ23a- oͣH-I}QGFy,g[Ma_ywJyPj|an@a "ÆQ$N f3nreNLghZ41'[J̨DM;1̀3,1>\R0'.KqP&w*c^,VM$J*K,I ΘlSJԇKfӑ$)W7Rhsyt!S.D:( qPgvX)a;S""&#&H*Y9%{ GuS01&sxyvW 'Ja@Q.8(z0Kz`'`"gB-Je jnI1Ae,gr\]' ԛ"> I%E,MtIJ|Y-bḋ#HB&85*Ԧ=$|p h]px>75 (:̛?/en*3%H0N?S{ <P:j:MԣV<堓 $2J(=y2)L`ݗ>KN[v]]NԞpLadсX!+\|L{gwO"ZΨ\"̜2-2GGb5H'!t\:Wp!/|HJQO-TMGԮaw(bXPfGEO1'п}ldm.aTD"^%l E>O*9kP-{R)8*=\SX yT#3uP"+"ԛpVORP=#g50'T21 2L\stJ}I9.gehH 9. +yEy,{6ȣQ6PSJm#h6u9SH K{s\%]d^@^(|>R[I[U0'UKrX7Yr]˜Ay@&]S.R2lIVVx:e>t\ \@&|(ktҙ)7!q.(dN${)vX#-gzbN<ԇX.(#|l2U9zW뇭qYb!Yr*ZCh2ML ̞xf8.HZKZo9I(oȠRPZ %{FKH IaS˥  oXN E! xAd;K.=/o*AbIDcm,(Ub:1lBEq[gL5q^\ DjJ"PS}S1r2K떡́:%EQTyva#թz5(iffzBH" #Wtd^So,O*KVA!D"KEb!!؃\2?)e2't0J}PVyHrec̊ $@usUxZD)TnhAڵ!qRkpJV.PHfkD _:\'9%m] @ٺ\@Y5t5N:ȄM!( QEuC0.S%%QjL8hS%X<t\ѥ\eT5/(nغȍc&Y!n*1")4ˀcPf(~ &pNgZláE6IRdcnt7i*22p-ҾɇNN9Z 1F2psnc]ƉcP!-'ѹݼ&N ;ߢ[/2ԁ=޿ɩ#pT笺W>J<\InLLN Xx$̲ʘ'Wseji 7}^S*^rLBy?uAFbZJ)e?ԛXT}K0P(=QD!T:H rZ$A#a\-ŝt98q>b4?ExHYDd5<7]WY#0YN:cd&UZ%WЁ^pÐ$qsܙ| }SꞃKm$ "$i@IPQc35|sN/W4*WBW:Ġt1I@&.bnQgAxK=]g>uEF77+F0HQP)Ѝ6XѺ>8 ˩8gj Q !2Y:EFY*B[(EXN6RWg澦L'pLicyHt"3.Cکt^T3VKi h9M'q=: "ܤM&yQZh2E0h5zn=0(K*u$ti& g7M$^Sy[&gN 54u'p}I1|'he Ql ?.1(bԴ-|pVk|.^lPCy.j4TMs!(⥈L&^4^6/İPK>I#ΥP< }"TB]=W!*8Qqt*õQL NK J(*H}]L'`(#fzVoC66>҉L2{j.C2Y'q al>m3 ̦HIF!PTLνzAԨ)-MQ(sZNI]"H,jRbcv4Nd8E҂fRxGYd9yuNT{hBS6K&<tPE#rzd:3rCBQɈqXu` (QrA2bEI] mru`,Mn\,:>uEUM R:> (Co[DMmzݸ\^]Pڴr&&7Wg>^ /5q10Lb&X:٥#U|o%J*O垨ZK D%@2q1As7f 7?.x=Tb_-Uئ4:zgR/ny*.{j";&6f%ke8c:^AJ.vL6-~nPz?eZ GMX#ÝV٤SUs"W]Ga=O]Gvu`FR5Y!F*DIR`MV6)GGUҹp#'+Z'}c9G6&i)}!ETuQWt**O,QBduE$ɈP.QqGNlS6*1 _UGȕ8TՔ9}GT.Nl]yoNP VZr-M䶍$D-kp;DAn.:t9U")hT݄T]FԩF'H33 Bi5MrPN\$;е-FN@VqR&DXΔ v) @GTn2zDM#/AXEb2+,rE`5/Q3GRsxj\;֜ԈǑ, HxxK!>hy\^exțF8%CdpJMK}R E%N \ʴ**ЊTw`_jB+N>mVXH!S2Y٘E7+tJ.Nra/+w?dR4R{wEJE#L( VJmۈ?rRT#p(a2"|IVu( GSm<&:Wx@]Yػ.SJmO䤨zK翇4BN馚}j vmxQ'T8LFZ'rE&Ry8)P'ኁeR"@'Y!XfSRYepL=I^ jǁJ43'{t$@EzErBSiܤ PB(BH)fD+FbSLKr(Qa957Kr$Ga 1QZ8# odA`,p2ǞMx-|R5)&g-f.m)%bChDL%UA,9Q筯Fyu`JQMQ^YE[JC$Z@Vb9''}2v%2h|ujBiH|pq!0:YVuREb2 4/\UpI ^/[b=sdNI}K&V98:/ кĹ,3PH95fi 4^s`4bC+hʥ89jBq3ˣ4@4m@6isO!2)> \@dz9p&LH)y\na9y ,W'a,0T7n(@Dr8蓋eMɉic9:·6u\ZG5r!p`udi UR˫CA) jD!<Ԇ2B -j~0 pCF'+|"WqjVZH .:AKӸK.hN i\\2.1{nݥB 9h*]NaSQE *x(=Nbs\|EuuA Mu:(Dj*vCu_z8PU+$Ήi!~h />cOqw<*0\B&2n:7I"V'ðR\v YL:^c`"R"..1\U:roq JI#5hKJF@,>Ry8Id._ 6HPWOAC<)_NN"e$z:^p#'i&\*QiAc9,mFжK-ׯ!2~\K<23)%R꒸>@ȳPdBEɌGiX[mO{V2o^"̵xj&!'WpuBIբQnQXspPEBf2 I*}(Ń8#z_b$tz2UIlCćS!&r4DV$JcnJPE)mJ 6j釓I(B(O[r8J2E.¨) =$DI9˿"ozpY,4n6 0zSH \2VIRp4L_!W%CiC&=""`e)nRFCpQMًmFUcVWrQMZ+dNPC*ܜ<_e{`a;BPϞ4J}%e{(M:d9sK&*$A Ic9u1j:puH/r:/ aqxuT6D't0m$.] THNL>CqPC >$tԞ,U8dLɣ.Tp)FhLȍ;rvBCـ)HhVuJ?)Y{JO bP 1%qTPbcGЀgnn1yTo].Sn檛H"i(ߝ&<'aBpʚFz Hl@D6"J{ v GA)AVM&jeYcd"F=n3hAA ɮĦ.BDD!S`uA;(rA q(|OIUȴZĆٙ(2' '72'H3ooTA.*}f4Om[дuur%CO/̱톍 {5ex1O]E tFw #)Q/GLVM,/ۍ b@\gq2=Кj\)T-|\8$&ʨ3tFf3Z_O\`WW=y1`c"5IW\@)JND\,FDŽy0Z!ց%wul& }Z@ >L&䀮2"qP:mqn@.hvHUmQH J -ڙ8\-|A#e-ek`Za\ UNZz(g82K:jω ?1yӑ9I~{Vy$-!uoӐI͙+9gXZ%fsX'Jo-TmP.H\;N 5@ )},YNؼw'`5\r;<6h5Jf. :CeE\íi ݤp/i6#e6Hh\sAyo *fMv0代N-(=X\Yj˨)H66d}C;>:ag !Fn@eEK,AcvLF[?`ˬj pbjƊJ@u@ρ pɚ_etVɪN A$ta_UJ#Hfm^9a5tɢmO-9fQ/{`Ah27"N;Z-KѸd8Xxю#M%%4$ֱۊtt_;6]WYҖyKqmhX-ygnǒ5#xɈ#:woj.i]<)>,U\ZPn_!FH0lRՆ=!oS]8|#;\޿WheЎߏ'VДo/ꉴ@lN|ʼ֧ zx1nOGΖ٨~:r3YEꄕa*6 }к PJ$=uƩ+$rR1 GOu3)E/Rg*@?v1[v6² w\ˢM(֕ RC?&+%)*qsiLbyH]t.N I#U)(;oâPca-5Nqy@WdѢW-0k@,˘"B2!q+μ<_6*c"jUdXw 'MUhE*qI7pEiæX .4}cr$ u:.h>Uw0tjRݥzIjޖx%Y"uh6-r)̼ .Jr _'jP:򪈀]4+ jzȀD{0v^s#Nvfn`e)Y;Ji;W J_+tZW)-bz PL"jTp\"L$.09ym " (*Kg0J|8ln@ &d  wL=}YJqesQvі dhSFdJ0#0Z=}H!vYﰾ39w/zD9?%n{`A%0pĘi-ܑ[6:tV#2>v3+h) RX7 0~f8\'XPEs;eSӄHkTn˛BEZi= XH٨jY4a{Iva<]^;h]%52Ul\he=D2)U:4)J}ٌzУO.};ldQ#HfbGíF dNO&2D> }mtAn 'k^̅dPy(PɶD*Twp~QRCpCr?^Q"n إt4RH=TT1;/ HZvhieB{\N>y[续&siVdԩA/uMȚ&P_(R) Fh ?*uj ZAnG@EOѬ ]>tHڔr T EZmVDYF훷yQ4-]:Ej6HR"s)8:ή)B#Pr[d5%6h*;a7l(TgS Kr&I͔C *@~Ц _f3Q,D+!eA԰+Qϛ8ߙ:H5T5<@Ɯ,`@ NJ$Xu!3ߋqV7Z6? +éeMJ=39xYG``BUz׈ ۰;Ҿվ//O1(dRKo!i;fb{cLc>/ rL WȥFFUt:[PCWG QN Sɋ$'RDd淕&5YMm uqneckV4FtĤ7̨pqtb=) Jh:!1Lp}_7+_3V4q%V mF q]G򺌼(u@]!I)T6 ;.pU|c%iaH@y@ `.'|B SF}N$6:e =VE2aZ0H rZr چHc‹Vu0e`a;\X#5ڸҟq@);X*](![$nⱃ'Mt̐vNz({%m9T)-n }0|+!w+į"C^Ïc$nX#RtZF֘--/$0X'y[h/5"vwN 5ӒB.J.58M%pUdNhI}^t|)N4d*jV[Tia EZ5j||5inKkк᚟n㪝}K,L]Gߪk bd"Z(֕aHA.X7Ȣߧ݇UNdR Lvz-TETGe~nKAj6g66Wfg #B {|Ɛ.hd!db|/(1d.Q([v#6q-Bb5J!e&3=mYI>~ )F ;fuVD^lթCDR廳֋g0҆:Q !}ȝG4QU1@ʔ" >$ ]ľ~:3`eS#iaƚ(!n^3̋ \6E@f1̶):h2évac&XTBSw\gu6TVgw(Fh,˴x s&]fU\j4gVX}J"2/^#0UJr"v xu1{Tx`XeEv4W.Hgct6˘:o wiu7 Ҵ2Pc;NRƱ|wQFթmzi6E6iR2%f$SfM 6L\G@8sdzTrP-}r>Vؓy"IY[fEtKv2aQ}6p; Nm3Kٰ;yg |q *r FF>XͅVrQjeT?2׮6pdÎѬShq}}}D/.p#Vs'٬g_cn5&qac_v!D!QHLtBR_yEl΂$5\6R&8x& |}ĆG:k6ȼbBYua$ԇ T0bȼ8qV|74 i15V$1ˌF%_&f endstream endobj 125 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 116 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-016.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 126 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 127 0 R/F2 128 0 R/F3 129 0 R>> /ExtGState << >>/ColorSpace << /sRGB 130 0 R >>>> /Length 5089 /Filter /FlateDecode >> stream x\K%q_qck `  E2HN$A}a_q05'AwXWެ>_˛s/zGçk? '.LJۏ}~xABy!S|[ZƔsD4aMGXwxEeICCLmhnؠ7| &S" ɚivf%PnIVxBBf=AQYXm mw|=AG.q uu*x7| %|EL'68a*/x qࢫo&9`_=IlD͋,4Bs@T,O͇-6 M(T:q/ $&%Ć$&_KD݂oAC96j?!(7|D1 TW-TTI %Dtɒ :=Lx&Ӥb1@K@4T(_$`KŸE HUBqxp_ zx%=E_FT7}rMrADiv\ 1Y ArYOo9Lw:4j(»r  {x+HK ݢ#Oi+G9: -:CVl.EԽVAP'),KD=)-Y4W#Ca }v=ߋAA=;!D4D,/rlqquv,jt-(I JԻDWϦ4eV.M 1 (&O[1W,[U0-hcYC(NԉMX2Ł$ <*".?l[aoY(P\[9hu>ٶ c * l:i: p ~=qqx# k ʚBMȫr:>,əZEH8EtlOQod2Vc~T @hq/IM!WξORgD2Vc~khnCR[YT9M:-r2^MY9}/YA9hxx`hImCX/il?x޴BVV5U3`U T0Y QQ=;*DL^ڲ^n S̢^\YyŽ]Hp= +p05? x~:6ǯ8AZű|,jh6j1O:8mDOQS>5jw@:bd[#9BSa~ksT\:C['婯h<'蛌21o<ϊ(iB;*5;!$WTG_5B}ҹٸlr2V}vEX˝U 8xѣh`, +ժۤ[hԮ =N_,Ĉ Q]BVFEc%Du5Vh%kRM*k9hFϣk]+pU welPK R*#U+SUk4lӀuVgAUQC_#e-pLA'GH();ʉjs@8ʣt; {_ 4G_Է5ssvoCZYJL| 6iqT͠6iv lNό ؂b@ENĭ/f P5N[h,oR ] .m' *i5 ,RUr*f0pGm_b6 Y4we-ԕne Jʉ7f Yi%l(X}ɉqrbZ)Z~&ACi+aX_6-& +ŕVW+CeJ=ig~\A |Jߟr%:>Ҿu}5h$oSs>tu4ef ؊=}3!݋έOJϏg;~|>4BbtS{ϲw@clO(2/O:{ q>Y+eߤ7;[$8V^b`We0w}jf?ّ?^]BuWfqڢ/o  endstream endobj 132 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 137 0 obj << /Length 541 /Filter /FlateDecode >> stream xڕTK0 WJlMHpXsc9t;i33 ر3t+d۟Ntө+e9']Uݘ|OdQ& N|pa:}OE`D$*J6fwkTUnM.)ăAS, Q5x>eMQ$++ÇiEC|IJ@."b?ɖCl)Bk-80JQo+3Gǚ&ՊC3eN~Gn` 3Y,a bP~-Ve $vn]eL~--.Ǒlr8V0-Mֆ;0 9n`ٶe > /ExtGState << >>/ColorSpace << /sRGB 142 0 R >>>> /Length 12761 /Filter /FlateDecode >> stream x}ˮ-9r~J#ɜ` ` @@jN͵Ҡnuw#Ͽ}~ӏw#|>_~_}bZ?W??}Oy1//?_秿EshWsBz?:}k>G~DH~IWΟ۩LG͞f!Ӹ]8Z MĹ{c)6^y^U[ ӂk1aqޑ$=S.e!V!MvV<}:Z<;nW͞-{sP *N/͛);3*dcX;;ύ^usT+_|8{psSJɫsؠmye2۵Z25&wVY'uLS陋8тpRX#v(NsTb;<|r NգNgDoy+k\u˿Ia ynN` ^-}r)yսɖwׅ]HywkUX^]>Oy 7ӦSB*gs!5^hM UQ#ygrOX+'O͉bL_qhtyngFUk0|1sBMcbXˌGi޿g1 CSKf~c sbU3}ۦ ǔ(RXj{e`qmq*e^U6,͐gKn~+&tF璀;S|F9=}R>p Ȳn>zIJ,y 1ɞw2^ڻmJX4w] o|Spӹ=J0;ZէQ=Cof.z;6@.Kd}:W-7}D y.lS5Y̷ `Ɲg-Ha uG'8g[a(AJѵOsg4D`wE#u6}m=6Oc,Sup'i>,[t0=29I~R2]wzm\tEe5ݕb1q=0§r)dd21eҹ])O wr)o29/ ~ dZy1]L벖gI5ѱ@pr .bZx.ʼ㘷⋵,UN0O]sdtlZ8'~ӝvɡ p9t? +oi>Go[eJgpKΏ/%XW݌0X"As y.+N% xWL+D{/1}L=nrywܔF!O9_"v7/0y#"M۽.T&C|nLy`SwЬ7'D=:CĨt2-4 ,S. G1]j̭)]J'4 ͧ@uyr`϶rsvZ+}^ Z,Ѕڄzt_r e84=Ff9r F|ai=|-QOŗj#>ܰ. 2r2a4ߕ@pAV LpɎpQqC0sqsr:oέ+5?Z+Mܰn0:9-f+4C0J2@xB,'=1̑{ ȃArQ בGenTۆbrSR~\F>}q(T˭&/ϞGwzwyzɧw\]Y‰יne$gT:u+•-\b!8 s v7M 9 ~ϙ*Ȍ汻+d^'2. +OR/vPpr.+&Ċ>߻ٗ`DK7Uw7p C;LKn\ƅDvFvά.NGGBy)%yRv\Iy@[,L>Vt6w>}@h h#ӗΰ?)5ULfwWBܰ.FN$8OqһiGM5亝6*W%k ǘR_F&T7}9_Jް%dh4sꅀ)lQuVgɬ}h:-Ŗ=naQh?jϫb?bEwyŒ`Mb#h[<6Kwb۾'kT?@ -1ݙ 5Ԙ=^/l=YsyN MK1urZp=%(rJbFzKǡKߘAE@Kg:o>gKz8T,sFfgFL26W55F+Y,`E` 4jLAl])Oz#.h:230j6_Ŗ=4b :Ra]j&gu32 GAb"plza]3#煆425e"<ȃD=Z@=Njh:0hd^Hh7ZF76b!*7,R15+?8n<>pq{BCl8yoqbFNޅ-xzZC^Y>8|BK5]Q;xi7]?熖 Մ} 7s21I3u]trWVbw00|ۜQUb FFCx֜G@Kl[1zhlX4M죳kxoty{h՚`7V;jx.+psyAk~wCn ZFD 1[cB+22*3h՚βlhM5 y35;{: &;|,-Vݹ[.4Ē|Ѿ VS:Fib9kF҅Xi;zrֺ D~~!Zպ%Z4{ExhnC$5T 4+U u N9,I !҅/-MFC%PlQ%,V-=z@!'9$@/VUŖ^=P9*Rt?Lo^=h}Q?k?"^#쇋-!F |Ecl[WК%hz8u&/-8K31ҫ4|ͣZK?H2! | bKO"ţTvY>BJ25bŖ^gPbj5Vz]KLd0Bbҫ}^.P\H:/mMa}XOj<[zIHPM<7"Hg3ҫLY{N>;kq´z8[meCصZ("Ŗ^=账!;%5XgXlLq& 8`"z,bK> \ɴZUbK`Jf -zyP2iPg_:E  <S*lC9-֢\|,ڰk *45$a`&~TȶMc(m-z'a/4j\gl\MXa%TG]l="BM .]}^=#1 FbsғW?TQ7W?T1-` z3ʨ;ǀۿ^R`K/mE'40~ѫHop^1rf2_{&=q" %\lHC,O[)R7sb$mV1*yBK-%7m %P ԑJ.NU ?ch˓%⌎f!Vkf=eH h?IJQ1R/J(=]qqO6W-=t$ ΒK4]7%d)}SV푮QŖ^oAA"+34^.g9Ŗ^'f;c@pg ĕ*SC w`Kha>s4Dg۷~ o#mWV E_nUlaK/qD7cſ#8Ŗ^=0|5>cw듶aƃ_vW`wuڵ{W2!X4j)AŖ=W;g1Qj4Rjҫi[GEu-쨭 MbKƮͣ8$r^3ivLbS3z`P*Z_ZY-h0k-haa.P3/- ̳92.^'ϩiD}i3 a=p%Kz)Al饣ҕ$2ECu ܨW[z)Ix Id>-'/f&CɧVgbK/1 L4xC<'~oK"rsBkyD+=M eMz&7_ly&d/+lyXle#>(T0m.zfXyBCV1mVŖ^oAQ=K[Wz'`ڮbKo{6.[ЖXRaŌ} ~ѫ/G <~۝#ti(/[zr@B*AE`KIV33U3Xbn|\>yW*kCo`KdE.Ǥ؃-$龨zgn~{off9ਬx@/~KIxWEQMKW/z#?Bz҄$?lՃNr .++Cҫ*XVp-KM.Ŗ*{d{ $_WX5.F=Zl/ᔷS:0--bxŖ^:.Y=b rI*iZ#BZvŖ^oBRlÞG/z4-QTЕXl%ӬwWsٮ^`Kod{gWG^^ e *^$7О|^=(hMeg˲rG~[ 0l+-ELluPCDjIqkc&7~oI5,=0 Dz`ɂfUֳu.rlR n޲,vXlijAzZrBeW̐@ZqZtYj`Ko4w%---`K/; IoZjp^x.ړQJ͑YCiAklm-Vgԫ-an,' rSE 'e*"_l=yU0=aaKowԳ {G_;bKsН,9 "$b W-zjz-z@E}gyj\lՃ g#<}˲>Ŗ^="`#IU5DT(!v߶ G/\,jlO+\~0bHORU~1^ xϦhc ,W=(<=l --Z[zݛ4,c[$z \<8[zceb 2$ E}?.SEjf~]S^Y3S/~sO*CJ2l| -o3%WlBorZ꼶Ko4v1\l鵣FWJ\WT_d @zzЪl O_ބf|W–=(.SB%2 3ҫkK2 J}" XluUZK4H:#l~k-P-9O :X[f`P8i0ŖގA[3t=-z`nlv_Q[My*lAfܵvW?;sgl]}~kصO_N 8ܳM_[xc].r2lS$+11rW4aPO[zM}zc 4ƕC R fgY^ׂv-YaC'0Ɗ@(}ELʋ_!x? ?9-tg+YlՃKOByaC~Ζ<1$;ɳ.~1l1\lC=yXYWay \gʞ,YX?`KX֊_$a,ka, 0V2ҳrEsH̅EjHGc#JE@] wQ%lţ.5& 4Ŗ=Zp.Vh[5.~-oc"GF v>M^ܓvL/Y& "6g20V3|3rt3x)5x/!Fݚq-P j Q\/iYU"&ҫD*}n^8!Un`KP?(*x.,Zaea h]lCxɴ2-if5Vhk5~ӛ=gТ$t'=y^"Qkj6ժi/z08H#:/zЇSY"_ fMaeplfbNi&k9<{B8fu~ӫ$IZDyx7C`zoh7Vot?!0 ~ѳȰ]eu.*Ŭ_ƮczXCuM.ւ=8,ӞP!;$hW'eG`qou-VQQ?sanlu)v}z(hȲK3a:E0wd1 &kEf2yl5Ta[Vܣ;I!7ofTެfVҳxTSi[)Hn4-8@POƺbc[z-zP|˶CXzXl鵣VBᮞ?P-zPy838-U‘3(`bKz P_=Zzӽsvp 'hE;f3+>ݰ?)BHΰd^=Ȧ*q?A7k%ߵ@dj- {?j$ʨ+y^39X^{dY/ ;\n_AwAwAE`1+77Ni[z#_9rD VfS=,l5\C|\Lg0uem7z.[z$CDq|jC[z^F/б-~~UثT~}:qg*S|i?&{*ze7b[P~< Z,Ϲps[d!wl_{ Ȯ=6(`Kϵ6PUVV UJ-V+k(R?^u}9; A%2VT4T0EB-N.ll^[zͤ}Vc6Ŗ^t'Ua}R@[z`٬EZ{2,_{RކY\vRRR%r5U{4ң1;ߜ`]GQ~R Q.\;S^;߽3v&ߕPmwNةNw:#;kt wfhuz`|7>4 wF6h|5G#pmA⻑gCWW?4 0`.{dQ&CyAN@*'ULiucQ0_ G,eEpFA!-odj&rlU#JyU6(TN+ aTQ J" `1PlEA'3*"spXX2<). 4?7&Rq10?GyW!ƁH̏t86!z[!&aR XOA+byRtU1Cџzv mQ^6TtA7SVXaUE7zAn=up7#I8p~jy I8V%E ^Q`CUPޒ{}WKoU}8 z[] j^d؅¢`9Xql|5bQ =PlSTI3QgAEشPggX[}`-Ujr¼(G,}^XJ: Sj~N.%*ͺIfu,fKtU VsB>>8Ng3i}\kO¢` x"x傺AEJnX{X֕nܹga+&M*,CC1ulL *E(c:gp]LOTYV Z~. w$Mu~XhRD1aRD- ) {]XOjg %VFONZeU#( |<@>9aq]xg> 6bZs";;6Qiӕ( =p/\꾑GĒaOp~aV%+Qwzd"J$0z'NEvi^֊^9HQEѕfQ |ݺ9 ,›K19'<O,>Ə"&^~*?4׬OY6&Y)#,6#}t"(켤@g$abU|j;Cd:<N!2p;6Vǜy-t|ljYD$(eThV%ƌE4+ vz%F6J}S G E0|jV2?H Sl!] Gܜ CU݅fVI⸪{om7~t]Qq$Eo =q,j>,ӕO 7E}s{ϥD7&EVsjW UvG|3]*<QNB A,O=SX{+W:}1~*&b*+!]+=mrS"1kCQ %҃ ;)/A|li&uu2/Ak,8il |Eɵ>%7g^B(K&In8#O,MwōhJ_?F k@V_cg^ͤ .@Q{f=JD'qh_19^~Jox? *VςIl8Puk<˜jP/}㊫Fir8;M<5ƚXPk3<<3~Y<_BIL"hDeYcTJ]P1:WTplUX !y.8%g+BAi!x` ߌ3PEL$D&lʵq+X*mZS0kD!K W(hBWO v?Q!mL :)?Gӟ]t"z{VdU3Eoͪ~f Q;ԧ◿}~˟x/|BZO`Mp q~ꄇ4fuG};: o&4?Ƈ endstream endobj 144 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 133 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-018.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 145 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 146 0 R/F3 147 0 R>> /ExtGState << >>/ColorSpace << /sRGB 148 0 R >>>> /Length 5915 /Filter /FlateDecode >> stream xݝKmqWٞ$B$)Uk]Dd|m!֝k5g=G/?~ۯ~ʳ?~ۯj?>yٟwQoO>_CbkYGgHڟ\dzH׏}|1>zY} /7~oo[Vm=[}|l]+[?.^om4b.~tJx{}V ފ=խq[˳kݏ}>:<|-OVAqӾ{w/={cÏu89z-2:^GůcmnGJQ۳Z+R<7:W=jkx]C=NHϻodN5uw/2uw_5P^5$_SN!3*P\7N43A_`؍UOG+(6bsO 6Y]E7]8,B3@JQ)ul?;&JE\fjƷoxԓZVQs9 P]՚:ڞ`h FegwGU ?7$۔o!^Y`C ؇./\]7tꢚ2Ařs@6 TWA&)!,_N6rh;Xu ?܄Dh:S0YmDg^Ḛ2f,:R"x$.f %?nƙ'=|>@I#tePXd|iH$ KN%p=P7R$s!2[w1 =_DR FP$N+7F5/:3Aŏe%;@4/M<ݳ(veAqegEםC ȮKS*=xMUe`sˊ@Z+Ƹ tw2ĵus3Zݕ 5Fv%iŷJuhډҨ~.S_`f'#rӨ_ͣx\~gd) МzN;0}g'젲iA lFBlW ͓m@Ev›"Y=ہ "ѐ6%i^HtݢLՃ7oA\< XJm'!;}GCebgLJ0Z:S`n5Y1yyя@mVMa_|EI|.LgjBldv;lO#8+wPi Fؓh:g:,^$QGflv!`xBƲ13*)HMLGݷ9/ظٙ&A$aR"Ҁk+‡+) #a]׷c}{wA,BЇ7} :"l~Zm!]Y@$^]t9M\- D0aR: TIP_y9\3]cFwz"`RHߺQT['b]4_i9Æ[ٞ0Y+7:,d4}Wy K0>{d-' T*Wd\j t]1fxSd1K6r>P!zYϼV,%6t]8B:F; Ȳ%rF݁@[0^[QQ6Иkup!U!utl ъB8BjHë2AttlHKpT9 ,JDk@emaX]9] Av!+-.jBT?JRDbx``u 3E2ityrl>u$-MZx$ ud@i@TCCH'fA}'.A*MH< jvaGX@*yY8{YDMGj%h8萇xyZ$EZ}p ӏQQJ`\|=-iPio܄+u30Z eВb'  &u~)UqMV 62.I<.,(َAGH>mlϸqV`A($UIewpْ0JMʬ˾p@|{"Du[2}a[ǸU. Q)kdVw*%1CJ,L>9 4NQ/3AI![`x ِK,t_Wu@ÊE$"F/v'Ydꬬ5dHAw6:)(q˼ kdaqCGZ]\9WJ20[xQ$Tp@?/j UXWBtER3uY\"˨ {p'餪nvF[x8ݧNd\DHnQD5mR~)O@VD61Ϋ;Rjc%tl PJT`YELŢw~ȈʐR6NwI[a⢀`!Yؕ3 }2I}fo+lL C"th_5%§=obNqRkZZXhUo-LJi+9DD w2/u;*{tQqU'I+­k)=6#>"Od^q(;9VW .x|/hTndŁoOX^0v|7|}^ P( 5{IٮqgBѓ&aȩ~df+454Y78GT=$"y^@FMIzw}]8O]l]AbI=(5=WgoDNb3wD{m`;DC ^7]G`4P~4u[̮!Afmqh5ur炒tF"gʂSw>csVi*Z1Y {uJxxYHl>*Y$A*`$PۚNBҺO[5(5(W9>9yVu( #6QIb'mCF͞0z%^T LWA>{ L endstream endobj 150 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 155 0 obj << /Length 708 /Filter /FlateDecode >> stream xڭTێ0}WDGڤM<,J $ xIT4vaM\3SH.Enkml,(ُY$ M](bMLp>EM oM%\*Xy"N$XVgQJ԰.5@6Ú(:7=*m̈P/bHK]F:=C%i.7T/٨.`Ӯ;ѰgR|a`$։>e}jh{Sp*Ls=AEvn,q\Ȧs{| eVCN`09yLl *&Pp\1L%2FוQLuk0/hRMAB'T/!ڈsY_9J!jy,?,L&eV26Ix.*(2}$O}`n܇. FӔVN;Hbr`.X?-*Nw&ėULd7k2Zx'b)qVG@+!9I;NO%r_!Dy\8]R3>CN{/ӷO-0wcո{W<˙ m,2. SGYy0xgMb\ ֫jPF.'/kDVaٓ[5cjkZeXZYƯ?G endstream endobj 134 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-019.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 156 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 157 0 R/F2 158 0 R/F3 159 0 R>> /ExtGState << >>/ColorSpace << /sRGB 160 0 R >>>> /Length 9900 /Filter /FlateDecode >> stream x}M,I~[ ؀F x15]m֫$Gq)4$"$w?}_~o㺮o_a7-!|0}񛏏~o'2HQ^7~7)՟O[i7eTB??Mj[_?-|^oǨ#@v1b,$b ~(^k` .oiE b1@ϋ(W0IȧO>kGցr[y 2)D^Fe+ }`!" "2U@$?(Fs7:*]QOIB!)$W1gJ Hϋ(Wց\/kF+Lh$'I&*Dd FAR`}"2\ f *IF]jG@}d׫(ˈ,I&u(|B$ \LU2/2²$3]וHHCNߪj=C#}"v&`怈WI?&z^@ 0%F֕D\p@ )r"p٫,+a HA@.ܹp\`ˑ -q!Εp]z2u][VM`Ò,+o!Y OZгΥt2JZ5h/0Xԙ,J^@J ('9B>9T U,6(͢ erz4tf"16XI4Iej b`$ E{q\NMH U )z`WP_ ʀaɀo!](5R'#Q0R|Bla,0h@Ǟ OqBp: y<8* cY8m"= W}\HtE^h] *:+'vP][he8e|,Ex~w*)￙"B`SQTJ* |&@Wa)VeqMuO"#ќ{;eDȓ1 ;~>M0;-eK@SIۂ; !3uD\fhLp3\zD2 ϽBQ:hſ X X޿ -0kcuUkR&Rp4]O`Y+8F!0yNMZ H>䃃WSwgt :MbgQ|E9 t ưQ^}lDI|ީHkgO{LM6"NRILPMȁ6"&I˔BB|յ&oOgFVhR`"ck|x<>9 6]dp!Eʙ(;ؾ ot3s4 d4l( /.'>X=MIXTDB'.?0pZWt}U%ę@+9A"~&V0R|_`ޙNOxMrg{YRS疪,P8{Q:d,_Q,rTu-7c'יMAq3W$279_H$sh tbn  -mGurePrgSޣd΄5U>L;_p~E()#S72籤nFS,%z awE2aWWYZ\$L4mvfλ1Y$j97KjJd0æfnLlMkVj.27!B5YóaUه/LySKGEgFiJLu98D1]xnW;\{!#4UH3rHuQZP/;qx_\EwU k;1ʻ5Dx=c,ur5 kb9z{ZBD L2F蹏+*[fA ݹs.0VnмV:L5ʙsT= Hwc44"}$=,&ڒ/@iнqm&271jv 9Q'RDOCߥ5bΜpȜ񰕱@DO?S|wMlK^a&B(ܜ~\dpa-1mG4ϗlR|LRӔ~+}ǯٞbD!)_۳lu(6v$ˬtA $J 6u@Ѽ42Vs6R-gkͳ;8UFUl4j𙠊@H!)'uF!é/U N[o&"{&ѭZa^~+P sAo^m2WʲIk%_[ V Ԧ&o-lo2[ul6LZÈ!f冉USfpw4D`IOZYZ϶.ޥsqx}Y鄂mE46eptd.I,=eX{j_Şm]F{ko%uЙf [  ꯈpdw7E>_k(PhCE?LINv[5-'m%|o2[vǺVmn"kw`b$͒x "[Is?_j?+Tj7Pk5x',)SL`m%]Dy_evRb{^[eQ/CW21bT[bUb,^fu|Su x%EmXZbBqdGiguk OzҖ2yki*q<[#cq@D}Vd0۟ɬȬly2D/ Bm*ZS3V?,DT #Ck:igIyB`5LkTDgKI{oiU@ ;?_j`F cʌ#ϒjUVR޺Z ]@OZR϶J{kФ14lkx˻t1 }1 $VRdfOL֒-lR{u؟ͬCcb ȪJ]q,贵_kM\9%ux}km^ɟ@[MnҊ@/exV%1(dd_kX%!= 97[GvKe=ՂeC$wze[W3J!x=s,N51e4B 0Q rel^̊Q>z[s?w6`!vR_yCv_ F_^):TQMEKG̙ߞڼ{ζSnO)T/.:ZE\`<*l hAj !\>UfF (z 8ӢTH!D9Ex֕ gB#`qm$P @9<" 7+P&նsZ,Y<(7 Ǥܿu%9 P Piu'2z`Wشby<1/p񼱐,ܼJ[N WQa$eS~p)#Uw`ata" wr\퐚wjT:Ev{͐vq)3pK TN}t8WJ3 *~]dE>;ȟhd6-owXXSrzs  >ˇ6y dž]t0QyNVWC@> $vi*{uyBlN1~1C]otMN @=1 YMGVCjEㄾ"3 BQAnJNzS ¶!^Ndw4083e 3+Zs`DڎP&<\n+Ø@ؿS]W@ S;O 0π9mĦz;a02g3u`A;tNΦ 3 ƛ/^p}EaHD>ˋIu?<C\ko&U`r/%6L\>`7kiφ+"g}fsKbU,XLЊHY@ yAG ~hZlE!>W\o&)F= p}w̾%Gօ0:k;[mefpC?fF@` \rIl'ހQ/!Cq~ r (BRC}rDPEʝ@ਜ਼L8:k<.3tN/Ft- |dY`t/lօAzf)ՑCxc@[^>nt6Ȓ̔M>m$&n3m4?*_lkS NϵzwlCI%)1-gC X T5{㺃P^p/6Q+Al,L b!03w'3fzܕԋ }9?ۉGa'!6v uY YV`D!믠S7XΓT)sɳ&ÿRNG:B08g$2 rJ*DKX8x *} y0!vZ k K16ovvaAtVIa$b>xU=0v;|})!W` KߊJ ۱y< ?b #=p3py,XUKEє:?9v,Jtk $LܙCpvCjuӹиWsla8 *Og聁si_W>=ӣTf@nU][Gr+xݟn6c:Up^wNwf_A}_蝛^ߣϻ^žnzF 8~+R< fxOvڏI#@fF3#x^]-G'tV~j5pA3Zx \5H)΀s,p^68ݰpLxKF%=X>-660y[IyTio\AٕQMyE#HI!Ĭ7G%{޷v)"㚴?_j#e${ ɷaP$}ylOZ+'NiVriOZRP}ݷvZ(a oxb.ZH72j C'"k"2iq,D~֒J6}ݟ-%>D8YFpWUlXʼn*"h-7ɮ6!ϗFvt8?\yfm_kM!*ʤ-ܕ;y'X|ݷv>|c5Pc[~ ]J>mRΒ {yeOl?[J{'lB]fFpVN'-J+W"OZ+ˬ 5f䭥yONJ{kmA?>_j2 pVN{u۫jQ7} iMq1i#Ǚ4i?[p?vDnRyu$ꧮk5O3i,S%5RhMR~ZIy믭H5ZEGl?[vV OEbLO5zJOq?S\")NEΒV^ktͤ Ԯ&o-M1g}kC g}DWzШn zvF8򞂛5㚴[K*vTiobh&1qXK?)kUYRޯxkU}k=EEE%k͠!n|zpbLiI%TI;Kyș4vev]_~o/*}n?ZCNli2Eϑc,6WfP/S>O>zi endstream endobj 162 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 151 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-020.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 163 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 164 0 R/F3 165 0 R>> /ExtGState << >>/ColorSpace << /sRGB 166 0 R >>>> /Length 507 /Filter /FlateDecode >> stream xVn0[& />ڈHR] ]bB.wF$<{pux9>[O \F"PZb)>;‡?hQ%mư-TcP(v~4p- ~oG XS^2՘˜nTmW=ߗF43 %j8~H #l=]:ZY#UHKdv+61BbA/0Jƚ`1E+ǢWT~XlTbb JSgrEO{V9qEW-+=ڣ|צ5ZRT}Z՜yFU|Z_}VK<+òGjnVK҈nuu]Lѧ]fSbiz6nH}J uNӨO2s߳mN]y0;Ÿv 6i|v1NȪw] VK+ns~EYt endstream endobj 168 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 26 0 obj << /Type /ObjStm /N 100 /First 833 /Length 1534 /Filter /FlateDecode >> stream xZ]OF}oITϝP&iڪ;5]{=`,BlT>ܱ={όeKPr,0CFcd%-9Oʓ47>ӒB) &ij0%4w417007ڌ$֘+\>8X2a*`-YCRDAu$}( $ aIirLCz;)8f3_؅Zަ|7ӭ$Ñp%Ka K !W2Eu2Fw2e\?OigΓ* einڦTg--a^$mUw6@ J8sw?{Wzci(NquaS9k4GjK~іxd/ndGџ2o`~  n>rA5ēz۠b͊o>UX)J->bP&}˻[ '#lÝ͵u"tb;Uh'_OҩzvI:\OXXߍ]wfSqw?ߜnf '9ESY)@f)p,8k(%Iyޤ8;ʆO۳i) 'ME_QUY&?j|rT}?yl7@}PQ-z8XSbfx߅y:8n8")p*GxPӺxܢt>Iq󢜶1/I9EVAGE9{cZR% $T0TM5A<'(᰸x:hXLۼƁ$Eלe)&܎䊱ߦO^-5i"Ds& b_*^JÇ![! H?fB+uYOoK = Yx2oAD>tDJD+o-1-_HZ_Wda~.lK"PpCתG_y՚I4\o[ݷ H5x:g0mvMߦxίG!j%~5/*Ԇ_WWÊ]zXrïO_[Z+:ruW 6{?cD endstream endobj 173 0 obj << /Length 901 /Filter /FlateDecode >> stream xڥVKo@WXG`k{ Z Nvǿg^lJT(Z{3sg~YMOk+փ0Mf ~b@ ίt(Dn{qƩ7+o~1TV 99fcjiF*$zhSP,$㘰Z]l* B=_[$G:i"AyYt=܊[~ )Yyk̽c~&6igml2߈5{s;I{.ז#J9BZ#ZbPl|8Cr:pH$ A»x!S(p-M9V\-☢2NPّ[87+D<^Œk9&ukzRĦ6XE,$BԹ-OX=dbWH9_KY FF8ʏ#}䜧ι}CFPLa(+q8)|RߝP`{JpW.`]\/~5F |:sPRtZ\ $Q(~G2{w8BK)s6 CYa:G$m)ږHqn˺}KA- <%r?c`W' *j!BM; ^;9teozc+ jT@[CcKӗiz/hpĉ!wI Ud/^` endstream endobj 152 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-021.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 175 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 176 0 R/F2 177 0 R/F3 178 0 R>> /ExtGState << >>/ColorSpace << /sRGB 179 0 R >>>> /Length 9123 /Filter /FlateDecode >> stream x]M%qϯKi~7{k# `  a%18$H~}Ꜫ&gޓߛA-Qj6,)onGWwwRz[?r\g~kWoݷ?}{{ZWKkKSVK^D%UK~dy~U[ۯUNo߾_7,<|qףowKo?ĿWSzGAv?OwYC>[}6wǻoy1菱>i4T-1'{ǜYѰ佾!_65|p22U.Z-ז_lty*lQihև[dZUu*t ,Z[/cЂ1,d/ZMF :ryvk}z0mu*kI%a0R{REODE^I_ <}mpU^i^YB5O^:1'^Z^4=9}h8i>-YONsaU^뻅>+-QiAWK^뻅_09u@$!_諼o+&EG!݂>CIPVY>, s%jZA\-tU^BUTV}`U^c<.f@]_EÂ}X BGuZZ>,kYbM+a/tTFw }ja]?r"Ii*.MM]5e6F˼NA/df X^m=E7$ʊQY?#A#b:hfdey m:-^uw,5~ȲXrH|{LƘR1EcoH (oq^p$[iE^lDc+[敀2<_$)Wy +jhv޺B&j) G|ka{M"Q HU֎Uk/K x&~` 4 yCFd^Lj Xsѷֻ/Fr dݗR:䏤 ~n I"A&rG[ t9oM-\FuK<%0TrmT__oOo/dٵ^h^%fdɷTV N9^h} Bk^h3~m/fQ&c^hb^hj5 *qm/fq$b{ F絽m ^PYAo/F R|{Aۨ зX *],ju\ :$` M ®'n/4}ym/طBUKSr{a7r}b U-MB ɷv}qr m`÷ GԂ^FhФHSdMdl{@o/h#޾dt g8Ln/=ɷTN^'o/T}s`['[|{@S MaAs^b&ZjMB=R^PҢ^Ǝ[B摮]UǷB+V M",龽ŏ{3sZ"F䘲P>zAd=P,I$c6"),v31Sm.UV0.yB 4uk}! cb"h/N!цL/17 yk}p ՍH RFr QǡmjRV?f[_p?8->zw!o?~kHʓ?tel &ԨЕHLd5$U 0$b8Du[QR4> ._L^a`;6`;!J(û>h&U6L0 l`x { ʚtp(i{){(, 5ûYT.gz\` 8n= >ztt0l߲_``})û>,#` P`+/]`Xewnm[_T/Mu'?k2#kv4:oUُW}맃` 3Ag]>}F l'X݉Dz&ùA {mԂ&*BaXYm4CHn:Oac`^jtp+\F.彤˶Q`54^HdH5p0\:5 Nm2j4VOB^oc6DǶm/w@tb@ Dx@x @x/mvK9Py=Z.ZEeDDA}+V:/:ڿ`:㖸}:ED:ڈ „e- Y:}a4+q6R2$FJHɐ8)]!FJHɐ8)g#%/Hɐ8)g#%Cldv8)g#%CldHŮg#%CldH ldH !q6R2B [q6R2$FJf_!q6R23Lɐ8)]!FJrQm S2$FJf_!q6R2$FJH q6R2$FJHɐ8)]!FJHɐ8)g#%/Hɐ8)g#%Cldv8)g#%CldḪg#%CldH ldH !q6R2S2#7#c771stL]Zh># fD"β˵@IlX6*- 3R\}lu-n/ֺT~(wb15/6qܴp`AQ^xQ >N54y#/;v [+/mmo\^t/OW}k#(x;)G9]Ej襤.;kD/lAlBW@t}qۧvOIKu09@'J˹P= )HoOm198yh?86Ҳ$Fr`=0D k}'7 Z4}rTX@uD;r9Ȟ\ƋHAEΙ\_\X\.lKֿYtq0ZPnA츂BG (]y$hِeeB#Xd}^8 U7/D/^ϣgEpV]8.~ (NEd =2C3mݑ&~~c"ʍԈ֝~~uR @+(+"Z mQ~jS5ц8~lGxI9? >ݨAv /;9 C}:t_9 ^N />܁[zՁŗs o?v+H{ u@tW(xޯt endstream endobj 181 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 169 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-022.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 182 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 183 0 R/F2 184 0 R/F3 185 0 R>> /ExtGState << >>/ColorSpace << /sRGB 186 0 R >>>> /Length 6552 /Filter /FlateDecode >> stream x\ˎ%qWrf+[ ll@V^ZF $[w3j]]ydi_]__lx~g`<_^~wy1Dj巿{Ś1)_Ƈ5}J˞{JupT>|`02ddb(aXS^`XQ 38%<`bj?6 cK>]ajǖ" z5^2v{ 1 qcl0bxIpƖBd3s{ 1'Mq2R1G~G<\0hHslJ)Yei9"^ʔQ8u/eh8 +3LdCm}(dTƖBX05R#+%.+XSA]<5?j`l00 *GwiHJG|c9#Dm6l2*cKDH4ќ>{3QOzl)sH[--3#{O1f 0QΠE'3tЭ#T(/fl2*cĨ#G"hƈ[JYef5)20]Fcf 7J26|e{)SFes.ACh]Fe(ͨMFefeB]Fel)'jbڬ\S0+II102*i%f:O1o3˨-"OJëjzŒVqkukhKɨ c18G!H'L^ʔlzʞi2e4mV& JYeTƩc@b)2׫k- 4E[1~V޾-(c? q"U$ʘ[40m)2R2Uܓ=*ʼŘ)zz=3H X,T9K3\5AD2Tq)py,\V.1n'z)3}]Fc"~ZA"z==M@y~?=撋~=!?ɳ6NZ6zTۏ )ǽC|M[4|n%HZEC- &"?p"a 2Z-<=ExYe#rׄW`CGi6?#gB6=?/ Δ;i}lo q;*hi)m'Oq þS]uw~Cs//i3~{=r>^\׷ׇ?yHE|ӟ-7?ӟ^B_Q z!JӕՉ!p7c-^ax=70J^<4Ijq1wZ<KftWnXbwl;ZAHzΖ8ya}}e9EExϻbt _/-O|%iYERbYT xr;Z>-مj:B;*UA A[=cVyׄ uBE6pf^) oȽ[|+VدR|oƭx#Dqʩ>I_7Ɨؚ@ x͑noW-]N8d)rOM2Z"שE]񶼽!M{3n-Fa擽H9z}xaF}G'b-C E;#Z'y6>6#/Xf ^ÚCp\8`,#G=Q]!E1d2رT=,S,]#O^!C'ԉYt3<0 z> 8 .#y38,\A)eS0sac~|pc.&MdRU> S<>+?9M,g\m8ˡفY' 2Ge7kpxV9>F`xaσ-]gk ,cy$/-o $Kvbnk^ .w'V7 x"=Ȫ _sq 'edyx9f1ˠ), IՔq|.e`)c܎n950A`vI`+ӗ"_7Hݪ{O9 ]'2"@R8-X"t7ښJ3S8>}^ܧX:f8MH :3M|IǐI?~`*DuVK2|*{`~+輑Ys{! 3n4#"a"߬71YG-LW2Ͻ4eq鉇W>+Itx'D/24"(\c%x;?f~N\h: p@NfY9p-դ}dzVfω7Q="߻w1'@7ș\[vnTmy%fRu`gd*awXsωkd_Fvfѵ~I ߱1V&< Hk8sw<cؽwƉʖω}N~2>xOڲ"14Y-Py3| E*vF|G\[X<D 1²o%54˴ً0s2e` $FƫbX>5 Xgyt681yp#ȱ@9q/q7e/tDg=Xlkw&6KTρiTw,yYEnm.>,ur>KӗtiQAnzvi۫`DxցW>k^ޥX1=f ^lZ>5m*UEuӂ7'hV1*p/+1皛} u9ޫ*ɥ6 2]䈂ئ.qz>]ʗZ iuWXk˟+ȗn;q쎍cIM(~??66g˔?3٭O]O^B77v$GCF^_? bNDUuՉ}^^F7ސc7>h_? > endstream endobj 188 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 193 0 obj << /Length 1118 /Filter /FlateDecode >> stream xڭWK6WA-n@PԷ,HkwJh`Pg3y}|ddS:rQխ^qcRӛgHD۟;E?x iLf*\n(dގvUʳDMbZX-V !s%L;~]X)=H'x S֖ɯ':{ãd,I;D n >Ĺa4Be #kyݣ-,>VَHޱ'bGJ߁v(qRVns2JMLY9 l] 81f? J2֣ZP42x6q2q I'BZw%xO?V+q8/x! (S䃻\P#UkXk ROeQ#(99*ֻG٢O屦G Amx9zc^u?$eC &>OT5/)Wvʓgڔ%>S$,p͂S';;U N( CWᣴw^XKMLjhˎm;oʊig;X\D",QlIGdp#?Z'h6;͊!X[:@i[n.͔ ucO#b8\bP]KRi%W!y> [BͅHR/J2b7K* |Ł&mjHwF&;nCcM]ˆS7lk쭓ʸI3 B׊m{EP턎]TO\Lˮ^, ka$¨f ;Vd  ׇWKTU[&Hftjŕ4av67׮aFp!E\Յi?a.9ɾػ#> /ExtGState << >>/ColorSpace << /sRGB 197 0 R >>>> /Length 16858 /Filter /FlateDecode >> stream x}ˎ-Yn~ΎcmI  [aMym̀Uږx<~|k?>%u9Lgq0)s=?;sڮ} >qoǴ|l\?1xut,s\=g1oy>9>ş3~q^|_2q~=Z=rDZuWc=}O}}y5k3`-x:k~~e%?o\>?|xGCk|m<b<=xI_匯??zW7_y.A~{ǴXoV8yX;uk׏L\{؏-z/\lbin灍cb>sxR;[ka18ƅ~x_SgӞb6 m‹3s|7eѮ%~x۸eza}cW~޶=7:Gx~⃖nc?q[z9ko)_|<~mM31.\kp˵2_-O{S8ecKl7;׏,G#=ײl&;2vϏFOG1kySUc07ۂ=[Pw(qŜhY|qřQ8F3}W8cū('N}Mݎ?ٸb[V-cuL#W۹=u e7mڸbQ:Vz-%Y- w~d[rO(>g~EWR)شwuN47]#0_e%'<Бcʘ683 Y2 B{u+1 ݞzpZk8#eicNF!제زgk'F9Ԯ2vssb#y\w've_N v9uF\9N%Oh-ΧXul_YX\s>1c<~ŽsZ93l>W\g`υ=p`-O6>c(5rlgfݏ頔+w+%M_)b+3WNt0_cӏ.ۿOzi[gFUb51n"ˉ$p-9]csvrܭ>H|?H0NH`L #*3=6.g^<8C.ٸ"W)v9m\q.,1ݴ+"˷g?{fTO@+#)ѮWf9q bFDkVD@\9qF(Fu-퀒f׌m{e rG=^uMOx/'m3\,D v0"|/-{pp-8+p/aA"/m3m<.~ Xmu 76BC֍P>v E*x,HOE1tJu/ߏǒ9uX {#Xn<1ف*0=wԙ6m\q!dO(5>xk;N lۙY ՟qFT>#c.wqڃ' g<sc]f(İwn!J$mw%H>^5^ۆf};=ֽև>,4^i:^XҴ=xSOjʾJcc\D1Wl& юrQWꌂϨqeFV1xPx#a6QϙHkyV>u`{ `>Շcgji32Qky.m{VōS~y:Ʋُ4YbISWKhWҔECYJƿ&=5?c=v טJ+Wڮ1=@ cc`Ov{g[|pwl\q2.h=;F6HA[_*o+h[~̵Em:c?}nun9Ź_w<6rءy,byϜ܂{5*ckԃOtlOGgmm#8i8>cA)8H2O|׎_ tO+!V/8xv \[5mEv-[ 3ni`>Q4Wf![ "Ng,/xVPϲL>[!{X#%2Y[SeFn{Ve{`=+wgWoavYv(Wb 0G̣q8.<6np.-nԻU(H=8-+T/Aki+9o> G- ~_1"5y(FLJb`kjRvNS8me82Y]deSl'y[t{.}aA*>lc>Îa>c `XEZ°aes #>h`zOvz5m\qi;6eە<##ǹ mj;^TK5q¥۵ Aq7{AΙ .uhp΀A 4@ΎZgG%pm|K'!JiQSZNF8 6 Da*z+%lJh[ ϔ=S?Slrzγ< ?lj] 0ti C&[w97J̋l\eĤӜr`^ WڭuI'KO{.a85# ;7)·\cۓ}_kpaB&%0&`5qmsgO9$F~])GcBTJqE(feH`F'!+@Vxq^+Y/@?%o*HZDн^{1bE טقkd{Pn5 M(WV!{Bnq[B޷Op1*փb̞Sgdǔ[h)(6Ɓ")Ahn DW {q=uRөX};-:/0dpw>mmv6]l5/o2Z42а6Ҋ xnm2h>ҹ>.9\,mu87f_oך o-~_ynu>ТR -TAT>sTASM LmWPos@"ƲPIp-<+W=:>yW?1@8o# չY#,F@8U4/5 e мPŝ!p2] J Lu 8QD dQPYZ5C3kܫ/Lg@[%,8g]E%4zg튡}$u㊄}n5 { N^Nvw33ӏS;Me|L<0W/._ oQ'G_*ojMl`W~GqW/yW)T&l+gcnxAw 8ޡcSqwvE\Ljjz2p#@E py2Yz0!P>O֡nx`bv LOџG9tW3\]1쑙yGXV+TWv+T[ݾ.P̭u!_Y,D<γ`י o `ؽ7@Z+[۔Xȭ ŏ@JOuYA|/?~ ̍R:pEv<\߰].pێ ~) pN.S$ڝx.2^:_|X}쿥vZ[#,Vg5|B{Df:{c _Zǜd}x"S@(B"S0'/=ZФ=.T4<=#[O3G4ۮNmG}> s`6#<#EHhG~B-Kkᵃb /r6qYMY1q/Zu@(խՁ'%H.Hu2Ckoۛ Pi!wyAkᑠTPuDB:2b`2RJuZ(ja[([mo$xm M㬤)ZUGgȮ,cbelqAQly<髚&ᬪ)H$b~T?1,u Z)L(E/O~Rr#)o%7H ̆< :Hgm]ZKvz+[yWNݳl்]].<6-oI?@k+ɊoпmڢmM?Cd#(o}=VӼj'=b'Q{#RQWPokϫZ{PyC}F݋~Gb8Z M돚Aqu)pAs0Kv^Fa):L>@t%-gߒ[^/%OP0bhzf:zf<[ϬR~y{y4H.-J;qmzddj/xg>r6QL*4Yۅ͘XA*ܦcՊ"ܯV$ՊbuDP-/U]nڇ8ܢ~ߒ<[Vﯙ6OrMOr铲ܬ6>@ bk]=P~"z[}7M'W`H'7UY}{~lR\vjˍM~e~O퓥e?m%M`W^+GuiW\k!. =  Sa$"*DlUZ玲G I+xWNjP-ԮZ7b-RjٵFH-Fl#x벾l-֗tE@h85_QW9OQԧv(ɾ܈qo`S<0ԣi bm!<nVMu 7qWvℨ*NHU* *_O#U@^Br)=IO8u%B2պbZWhE[cXZ"TQǝ~"D9{}X@#l'j髅;}p'VX?3MיB:Xgm$Vk$~5|nk r ahm0A ͹2v̲NF 2}ŀy|fWZKmF;3@n|򆯪Zwm"g[s+zCE-}yɾlٗj>4zW;S]B"2/ʄ #}pwkD?1qgx.įhRM1k2-cM1I0. $pQLLv|u*6T% UԆL5/z}.^YiB+7Ucd6Awy~OrV+B+MG/l$xBoAy"Vx`edJ22]1oǮW&yuIk]|$8ʒheJ[Lib?18>$%qO3P3lyIVsE޲lh/ږ-xB I3h+zĐd%1$Xq<àv8N/̡vzq(Lt!y Baqz,z;#Ϣx皙`|3> OpQ^]ԝj}QPW~qv%ƜMWŒL̄ < 4[xUεs}0Z{>{96Fyis5}T\N^4;;h+2ש+HqhpGG#[zm!rƜ6~wTNZBkjJ\εlZ:ZJKG\w\$6ĦkWS3^i1Բ x@DdnDd#KDv/`ƕ2WRvq%-Ha:}'umD'@2Az`ڰ^ya+p=0WsxNibcvu-~.Cܒ!e?,X޽䰍>zk_ N`X:aN gsa -rv-Ηtͽw57=5QӞUlmϜ|)0Ki/]<[dz,q$p|IB7ʅ'bGO5أK31cDz=s^GYz]k1=ϊvĜ9qak[ze7= ݇ty*l+Iu?+)h9%6 1dٮ9O7Ψ"QNJ`&mHM>Wg]=kUED5I(F#>"Z늱ky+.Q _!\3 ]`eD[ɣZ@Cкh69NVl(ps4:n~7)R$ D((V:" î '|a l k`dX l>BG7vCn4@#`3 ,P0,1 m~3(RD %;@u`@{O T?&dZJEv/\ηCl"c"&a))w ZjEI-c"Iɘa0CrO``d`T2ܙ WU,GZp#5CG:eXfI[J.JDE֝pg->y 1%k5?0 % 6~Dm3JSv{B` /,68IZjꢖ`tj+~1jBm@|>AQ4Zꠑ;h86S9δ!zo Q/YAʣ_~YϷ& V3E)J]Ot:@+0}udMWg;Yl+B!]0$f[}(a'Y6:\d _~.T2E"qbu11Fv*RpEF#ki9㰹˱+FU4*)‘`IH>fk{}82~jdh)Pj3T}`XT W:* Y <nvƮ#bnvƮ:HM^;vu]`ج!OybR*ZT+ik$^cR/)? [@An nm;ihxebc;i`Cؑrhv~35B3ʙ>r&WХݑ][R.jD?98v9*2''\1o1d*6b')GVـMU6hafpXyg\bnj\K!!JBwZmؽu5nkz.`-nxDIcu_8V?hhN/9/]^\Ű΋qw8RG~j> &d[MLzi^ACL9M;X3e|Wi#O |{b讇+ˡRAPOf}3IP'?c.>uB,%dak< Y|/R{i BPiߧs0OaV#as"))+̹Ƭ`5fZ|<){Q[վa,M 9'l9aq:d9yb9ؙ` 3vd,n:7eWq󉳁Թi`u+ Jɕw+C4qFwuk||rGq.xxM>'x R :걝S6Hܡ"}O`fqG~c171?2E ٞ/ iGOcYJ; ml"+pB[]Yqo]0cv:ZAXcKܮFN:k'iKܘRV ]4tRXT:zI:pmususB~0Z5hK1MS#zwGq8[g3Gou,R yEZbk!,P+)Cvݘh ߼0M_4n@gbggɵ|vI}VRP+ywCɓh#SUk/R qxNpa'\A[u3:A@, Ďrڀ[څ/p|Kŷ &֪@JZ5*VeJ}uXK^ֲ*V#C⁑Zxd-@4!R⇔&Rwq6k<^3pu5G 15$R@"~[O `y.ToWܘ$I=T"dTzt%{':ndF@MRB`MRB&)X1&)VdAr+F"=08$<%>2s S0ĸ 7!ݸ B}h7@*O9K2yj:zdWG/}Dr"DSDnhZkS4WUKќ^2gFQVKX{a߫&^gf|a扰{D-oa[7ڗ,-{fuUU=\祾uI'yWõu:ֹpGdjlOgߟ}zڽ}{ S)Hwl]Ar$avIއ xuN yWK]o_$9uIr$m_1&ލp7vn j}gG=t{JF\[WJݺ 08'{ New"m.Ud  1,pkhjh~n솤jAak$Q*iY/`hy%vZu^ЪY@+>1R7NQrR7^M ևUϬU9dx#>7Ӄ*87΂t)qhnmx 6E29v/Kl `mXq6̋0v^FyhNGI_,@i"Tjp>z$~`r47A ݛ$]bj%JkJM=nכ 4B GTtGTH:#*֞kLFD}<Πb=YjҞ,nYS]GjLv 2M(܀sr8`9) J{VTY h *Ԍ+"U58f\ڭuϾ6(W.Dƕ`hQǕ9oҶW#'$$f t51n4Pߍ_o_W3$vN93I,i ^ֺzU,YǕ$};)/U;%*@oYmwוۚF\TqC&M2،#RT@D SD@ovH(OHR,U#KR8JgCYPK^M3]W$h7rŲ|VnY),YG Z$I+BJq/9H9H{ 9ڲȡ0L52 v{o.fxv5_r:I)F jΆ6՜ ja4,9$5|5[@'HuEmQ8ʤےz^ߺNK]uZŷuU+ҺvEEZ׮*D(}[r[&8Y\;"k]n8~떺*E0H5UyYEQ7*Pxc7 e^P PA,ϩ!}N k<?3@ *[ ְ{ Bl86F4‚R HR rU\TC.}[q>?u} e-gmVDӬ6_kE8' iD:,rJvSah `(]eh{qBKA׻VnfPv#L}B2U'܀AIOtxq͟8_#V*蘟Y!6`&$cO3]юeLdø*2UOqшqg~LףHk8Ah㳡 |> }SPt>,&)#r7yoWnO+aOӿ> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 189 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-024.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 200 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 201 0 R/F2 202 0 R/F3 203 0 R>> /ExtGState << >>/ColorSpace << /sRGB 204 0 R >>>> /Length 163199 /Filter /FlateDecode >> stream xˮ-ˎֿ_U oѕ ,v] A%#UAF0r5>kn}X9HƏ?m_/__KBW./G,?!.iwWkQ5p]ՕqIA٫| ՌqundE`mph\Eq5}Tkܻ~wmpvzcSr5V 6ݪ5eF'Ƽ@ص;pk;^ꍵ LJM>Kq&/4l&e/<ݿ~?(XsaϐU\o2/#T-,s)?TNB%xuR(^{tA,+|_5fmb=aX_dn73^GG!O2~zV)CL5c@̽Dg|jp#k=Yz`}>w tX O;-w꼂Ak+3dmx(6چ+t?[+#畠X]Tٸ^o6^ I\hG6CJ423@= zwUUbl#m?V|YS0/zPDl?_K_gް'뉘S} LI^j-4jqj8x\ߚX=쯩E4})X 0'㷛}da5؞iNj8tp ViD`uh9^3hIx CzeV[5k-I~o;g{2~W{C=x)hc{ /uYE߶e~t~OWg/ <$[N_ =i26᠖bބ]ϟti9tHX'):z6^_r]1g%9(BZs'9$OT'6,UpCu^.z[~~4L`\{DNp nv]g_fKhw^ U'V9]_l|־u}O[췂~a_x=|qWy_dwi<)okQ?Pړ;y'F޴SQI2WzX9ׇW`2 5r7 =!s°sW&pie?`۾#Ł8pPSa>g؆gP+n#2*wGd?j<&W5u_ϗcZ*<7 c]7 .\"'z/BOVgJ+aχ?P0ǃ8vW,S4`\${^>/b?ưp| fF# k_xy9p>=(0laπ 0Aϡv! ,ҿnLFPuwñE̷aD ߈s@:aыbGO$ /ÈugcZԫoWeN2^'7BF na꬜:^QfaqS}:%a]0B~|^!6g60k{fY,f_Y?]q|60Ū{`~7{Qp!h/!UhѓeE_O sgegG=J3]C'^:k\_/X櫑jJ㦽O0wXRłGQ,FW\m~8} &~P?csGkQx?o˜jϭ 6/X?'8CЗcO?WWk039 z|*\i`]?a5Yo;멍ڸ~fJ}sV{iQwlH{Q8(~bѲ+~7/!XD^Q&V4/ORDRVx)9^CW7K?I*bFyQ9ҿ ˉ_ks=NRQMqn-/|/Gq~'8c\رy2͗YD|7JcU2AeVeO.Kx?wy~oe=OT=x8Cp,U>Uʼn7c~r=($>R_GkEz;cvŐSٟ>or?A.[{bUiQ~ 8s?xw0,>Ps d#qܯDQx{#T(~CG' )Xg1_:Orm ϣc=[Li'w^/O;LAn _g74aU_S3 Dػo4'ǒ=_}@6#SS{Wh+!|Wk-7NOjki7Y<}`Cwb__[2ix|/:_/EW V7Wmv]?%njG_~_χЏ0c)x6bo2D"=P1y?6Y{iZy>|"mx!N&/K :7ioc߯|{of?0y9Do~Ϡ{r#0Eza[9B%cr?,|94D#:j%BE|:^ѧiG2$C~ !^y}g{kf8~̴?&)ʟFWsLI _o(Kp"_*/|ya@w*oun n<$ ڴ^]G|lx^)//Z^o >G 0S8+"gg jOu~þ~-A>"כ_3/ڔ.XWx&'|wZd~IjG:Ҿ@yd;ƫ', l_~>~^LCzxN|N i?Z?dU姊~>Á}`u=$RJmo#?K1~~.Oo/I?Vk|@!|X֛8Nѐ#L'omKwt;ixh2_^#!yO&?jfuJKKqP3=_Вsp}h?u=82ߧƿ uR}ygK| WJ|<SqPx_+>߫=ioHxz*Rd|`C ӢJadzv"0WO&^D2#g_}y>OW |/q<äy>.pL ~(FeRٞR̿P@9߿`"[zݬAD} l~*Xߢ^!T'W+|X\.hOU?JW )kUϛd=}K0K5wKTOq=pt-sV|i|oo?6]5]"&MËAUoYgpm'xLQ_l4Sqc3q4wo.'4+G? SCҟc+Â*} پWHyt{uXso-_f%fyц| /q}h_7O0i[2y 0\{0⹢'.a~]7|h󓎀40_PgɌKϓ?(;ηY0>/Ogؘḓ.|?YZa`___#mc{(K^a4]#Q<OE1,Y>Jԯ~ۍt&7}2δGr~D{~BCw2:K~=$ģE1Ư6迤8s>a?`_y^VT,@]{\oA ':羮K Ƨ6Õq~UW p~Q\9_*8+F|3dzg|o :xqo s܏<Dڒ`]G|s|w8Ғ4T}b9?K'F7QCr2 {?Tbl?~F9ggc}@J0 |^=gNCMQ? F{5_u {ΏV8/r>n=j:ԟ D߳[gk/|< ?Oa W>s}aRs72c3Ng~Tݞ4>t~{TU{l335Eq~6Řٞx|`)}°m|fWE0I,#(竦X7 Ѩ2GG5v֭=P*;ܿ fqҔ뚏#?jOoC?ag0^>/ġFyÂj=!I73MWf iۓ'Wno=ϻ\]0LXTy~N\F>6sԇ@ܩG?qsOx!ThkjoA :%C3?^8=Wyi \OR7+[x$Ϭ}Q1|~x)8y|@U7_5]#U,e*|Ҥ̿_W j~}k3Rg]Ay=(F-0PQf2?O`s|WEio=%ڳ'$Oi_4|دϯ= 1ؘK'e?*oSίC1D0>'I>1񧂋oQji|\ |ofEqc`#pE=|٦YҰ#CR>` 2SԵ?+[@o~)`)1m|C NfWu DsPTRx*)+*_ >fsav^H`jϖ\Ϥ%]dGl]`k~U7&w%CׂY/P`~Dφ|pE;j>L/yxO~~=Ozho!#k" 7 ?%,)t{gKI\·~泤>*Q",/a?jS_gb=`/@UA5/ZL?||Se/͆%"W bOCu/ܘ_u̯J}D_CaEߓ|C~]>Z?%_tJ~[_17L"~x f߃5AW/[\Llؿ9oDVd9^j|/9ğs}> Ni;tW< 0dk~O7y߷E8^y0_;Ԛ Qe|p~̗=,wɷ<5^A]}4{C`~i?.X~9jgO<H k>5ߵrC>qK 7S?婯Z>t;5A>߫!qXD!ϓԳ_jOIX,UoLh!"XcS{*U!Z<(YIإ g7;~UmdSƗ'i$/-ڴ^B:"-{QGz(t濖,m|C[2~[/5fAͷ3Z(G8X?UM}*TdbxBQz|gfϮHd&27@pg*:}Jb(jW/u Ϭ,6O?$z1{eŇs-037bXkig?W/i=ݏm?b=/M:Hoxk~Aqgߓ(W;%Ak_c}]'>w{YIda>WͰ_(gdך^$E}heho1(XTsoywmi=Cg{&Xwyi^O=S8{h9Y_J~/?7L-wo KLO1?ԓz\ OA"ԟPcZ7bi}Vs&v]/ (R/y7/f/J}a@.3 )6k=8(XDv@|%<{t籌P0I"ƆWWN'y vBQE7&;o3Z_ # [=ltgxb`ttn_>kB3PX}GSO㕱~bG~NSa6խSdmiRd,'EVэp}*0f=jzYm{V[$5g5Z;mKo0OT,AO&!ynXfjO&4xV ti5"d)hi,@ >I}d9|l\p𘏡 |eLJ_C^"E>ۋ0 EY $q(1Y:=MCyH ?->yZP5Gx4r&pLF8'Mdߧ?]p>m0Lyp~ <5&K aĥL1 o7@Ov,d$LvR7@n.i͂6qkCfBތoD]騠7܌6^to؄+53nfĞU]x Oc]L3ÎfTVvM泟v'<(1&uw^*_hZR4RvΒshYlԛ5C0f / .pbVe)&55p ewh(uu0 ,QZlvh4~ ?LOFD kuiBa凒XҺĠ?,7Xݻ%q]CRWfFtGP,D+ߝ~WךgcaM* ØDTF`Ldj4HШކrةnV eqoFAipafton}hqo#әz+gj6cjӈ8\0<A<î^.p]]9V50WH@,0^[{@Q hkS̫ݸ-U5Ѝ$DGiVa?-a ҍYzI817L: B=a+%Gj`}v݀k_  Z^K 2:G ^۲JX&UpbFQ$aOk9!OgK߂'ˆ5(&N3sL莳CUt]?UkgMJ-IJ'8XF#D8H"FHذ}ё0[2¬ZV cuˤ# 1yLN5@Kd)Y]74f<+s&yB`'eea ;đnD>EgHZ<\n5A6eaRn 5*p@ف0Z#a}wjO:cl@RFiEpǮ "u$ˇhhxt؏n8%$Wsgbs1@u%slmwbAۜ0GFs]WoӞ Hv =+>,'>en4HxsSRen `hiJWJÒtfd8@\4_Y82Xw#$anE]rqn%AdW2Ye%5B6]l/pņj ٱTO$/pa0%ykyU؂lU@%md?EG Y? -/C)J>ߵ 笌I_7$p50r vILH6&nPl+ dZ+vZ$VMkU>Z*2X,sQ:xrW[tonC:V RZ?KW䱔%-},j(1Gٟ D{WSD"(Lmmf,KL_`0cYjZ߾g^hg_8ׁyT#IU膷dō לlZa7ZYҸi0¬'k99iƜ3(0Y G jh~(m.h-nJ2j#k.?5׬.%¿}kuBzS_]'?z- GZh wuw7:.\_NG! !Y *& !J8C $z Iq>XtxU3^&:EKox,SQiϺYl I-K82"Ų%`1/yiqet;+~尗zJ 8lzϧi_?&Hh `2&SAg!tiZX5:-% yߥȧf&-(Hw$IS`/H)MP; GW:S}=Jy1 u2MSDXw䥵(j-~= Z[x!mH{%8 =# +%:y n+1H$DpUvN%zgC@dEY2HH. H?.}JVu2$eEB1ɁWRc bBMsLjc}z-˄qb^#nD&$B*O[<5:aܭ*XCv#Y+.b/5-$%Sb|9Z#V"~V86ȠWMW6,f@6h5P^}뾜YPyI$ `@c㠗t0213 u8@=gΨW*^]"/i!{@ HL3bLA V< ca//-*WZŠfv8(y{E3p҇ڒy8b[y!Sʼ2}ou w8Bn` :$S iGǑ˷S]0RăLZ<""U^'"tc똬*j8j c'k^_l,"a 2"u[VN֭ז9۪mY`/3҄e2{oS6~e?S߫paVu| "^_euUчcr7(= ou:?PCοp|,3l \fX6p`t—l(QҒaSBTL7BEHL#+,,M[sXn ܱ;g@ej$ jUkNHu"aHEk:oE[m"Ia&LK!(-&%-;^̏VȡKb_Էa/V0dE g:\/T G? \uV0\g尗85m=`3/a;vmxc9 6fzK{56D _<xL6ǣpu#@' =2nhB?z,\^~[~3^QB?ׂe:#:D͝5{D^3,;b!8у4ƺT{1{ƁNw Nr2\N{yZ{7!i^f*R'v 3fV4wˣXbc/b'%@a)[qf =4rW+J]iTg5鸛H:^~;\ϛ޹-w_,6p _p~o~iQmcim,U;'}jI_}7椯}hvD'o䰗3\6Cͥj3`/ZUK"j9_KL|Yup[85E'stlNJf9;h5CG7K.ޮͶ0A^ũ9tA?P3bK/N ,SLl6xTk#9;~ZjqrcSdc"3ȓu8 $}n`>YSzn^Q"d`;\4-VonQzm _*LǮ*'ύg*Rqo*xUrũ܎xs;$2Io9\rQ}%h-8 ܾ6W94?j!ca/| z @4||W$  I/NE_1ΎRpc&.[ytr[Ygl^#,!h;4a=oh[筤y+Tc1 3ܘ;Nly֛;:tc/TXޛ V&N*Klu'׏Jf4w3Իo7QJ?7BbeVՙ~ah0XNf9aHꕓcLF25"f5CK 3\Ag"l x|_7p8Z諒Hh[B-FT6J# 8:JQۑCWJǹB_:;NN46RT ;97EE[d JL>fL9 vڼ ;߆#}}{4t=}ncx{^~˃]^ovp/vy58w]nz]O{c+ #/Nww϶_mѯ69 ~‘.wGt^\,6S(q[,PyGݦPnӧRD/|_hRIL/a]K%vMZhwVV+q0n̈w˿ؑ +5z+I$X쌴I.zڤ=S_Xwg3S-9EI1 W6$ =<$$}D1/,d'IPMg)Ag7tOym *%ޖ[ xf33g4D%L)Ҳ{}GCkHncfѷi/ev/z'hw'8'4'G{Qg^S-<}Ow6/kBAJ̅W#w#gF>g쬏byKs<+|㱒k():G9m~ű9`z6җ ^¦ < 1Ǝ1C{e' q,q΢~1^c~1ŏ (ܡmCڦO׋D_LeFD8B';÷GÕn'{ڋP=YFĶ*>6—m[W}غ.] -̉#i{( xQD0 [A"T"Y/©_5Cͻn{5fٺ~g$Q'(9Xʀ8rp.oj]fft3ڕA6(-<7<8OPBᑥ #D?fWkig/֘wWgUK^b%߿XOb I.],7XO";V`%&Ґ霌I$ޅJkO@N` 8˿dBV3Qi De꧹= (lkSy˿Н*=ۆ`6|6|x1|b}j峭 GuG-GoؠBi395crjʤ4NqNI~͇l\~oh~>UDݒ& KE:5c,Pc5h 5G޺[VOᶐ8_iL;BNRbtb_#gW;<Ƿ$5 w){Ò5pݬgZtm&6Mlv~Gq\dPiF>6+XiygL}rh .:h(ShJf|뷋,؇b?{`~mㄔ};KFoLi(b_ZVL&%j4gCmfuߌ%oZ7Wd$WaSf⨽q3o*j:]^D:dGQa/Ɗ׊ BFTo? t_o{k[y؞XNڞi_51N/DUu+v{6l>tE&ۇ{yGtHG7:/i{Z_L|<샽.&`bBzzx|bq{d|x!֮+p4O&aVVx˔V +sہyaX%[cΒmVΒmN>Hჽc4TۄVRɁN53\,Z-mVn|'${JeCX~%"*Sf8V0XǷ/~j_8GZӠ] ᫤i1s[bjܝIlJO^J&>J*eaJ9 77E7͠;f|piӶ>n0*w5dтs}iLrm85 &m/u~˭wޞ %'@VcڈתmO'`;c'YI'M$%$%I _dhA9SwL.y$pox~ݴ=vӟ#%,ĉ)%EJ`wi-(r-:Q;Y. wR-{w|Itn'|Fz3<-m_i7_v'O4WO0tjN'4 tĻjaf5jYM ߰4p=\$q^vo{pn{[O>y j\oXv{ֵ} Ga(=sQ\ e> A[B3m7,חXsjN4㺝dZi'rO&eioͬfŃ,tr>d;fJ:jB%j8jĝӏsy 6m> x9&Ӏ_Tӧ ,[toLcg]x SZjc8˿Cq08U2r8"ywe 4`/B:MԇNS/F*'*>('79#.y>!eTnʦ!x;)Wrv nwXhŗ`?ߔy>`>/WS=q5Ϛ$J=kc5Ҟ}iԓ\"J#@=vEO9}:}{?Îwޑ{yrZu}.^uA;]l&Y鲂e/ygJβUesPK S>JY4gzg q!|Z9>z6vͧ|cOL  Cy+~rKzS W"%ێVҒm;:IK߰]iHʕĮQg0Jg]߷njZQKt˿8.ӧ]?vYCOYliC%WHWrU܅t >OgmeK"#ו ָ;ו1Skr3DGx.xpۃ?;4V,Ҳۢa;JU%EJ㿎k`W6|MWlo} j.ygoH˷tWd'pՔ:ved\ed! TFVcI@I@^rtWk˳n|ڎmלTo @'l{D%ZKȡħ8;!f t39~ߋSpeCFav; 1͇y|2͇y71~';cNnoǶu->Upy9( Oٮ2"{JjHҶvzyP.kD}ZkeܽV~|l$۠+^'(A ,J`x]y52%e>e>=8R8uLuq2e۞BKhiT)ƝTQ-y屛U&!q fwLH&T7ĩgX?3z}=CpiU4*bʩsrݫFo"*2רnnרwt+0g.L+0w\[bcA% x|*⢮u=6~W"[ ]x\Cc/mQo=G KaZ>1Lkz?\)*T'PxX§g3g3 ?;W-2޹j#WmMU5WDƫ %NJ Osy"<"H/6o?ț/l۴^ i)<|Nî]WiKb{/;wr\ۣ^o=:NJʊoXQx!KAR~yon<\--eӒ'Wܹ4^5ڈ+FVڈFV #+$bRd:oN$,ӹI+GC\O!b--ZYh#Y=m<KTW;@>VGV>v?UPhӰ8ָʒYd>x|*_h5/V t  ~MP;kzwzo|ot=e;jG[nC<]lj\=38!>8; YEi폃yZJ=-yܞnOKb)e-\kAUzfFߎ.;aM#zK /QX㰗ʞU8GGOȽ'Ǡ.N=So%0cgPQ$8Zb (^~ sźl^(^oqsx㧗V Psw%3H|aufn:P;2F`N o7][p"`=Jڵ,=^2ȧRvWeRuOE$%hIqq4Lbɨh|QڋާgujZ?uuh>CЏuhz:78-iZu@'?ՅIXu!x3wJᙲҺ^ud@>V77߯Ľw3~~T3SM¨&ȍF(zr2BT d6^^xc*WxGKOCt9W|[l.l>]->")pnҎ׺I._$X/_ R".wّSwz9rj9GѷHkluww %q|7'g 'w¸+@]ig;_^*ŀoJO.ަ: / zL|E; Q]\ xR*9#t#o}> Eezڶ T_G`/Z~f0+],/(,\3ޮȏO' sǗ[C0+\G'u 'U >|efz].0Uv|$3bWٯ_#\eKd&9ט N5:a |L x^,0KU?t[{4^3qWۘi ii' $Ȗ$AyKKiӮGۣݷV&Aq5pe{yrqnwz>oǺ ELP.R{%M+o˙F7{Ã6dy-֫Uyk^!wY/V>vՙUj)$тR;U\̍|/k! j?t ݠi|U[ .w[/(YXwẔ4n6~~fjFj~W3rKh-fx1Pc710xzy_nyA?8vq `rH//& Eli=>l Ltq곛e75`MucpQއ4P6d߽iGmc1:gbbo0WphИwƑag.1=gK(gKl.+I@@m"L~!i\U `Nڍںqάm m9QQ߲҅l~J_ϺƇMe@ %i|NPܽ$(Yd,HIzag [La䱗kn;ƈWdS8Ի0\{ynIg<:#:*O{Y&<0wA&9 STgvojQEFyjQE)Yj6ZAфV06uRneԙ6olł}iB҄&._.;kaIZx_($%w5dQaڱZ`- ݇u:Zm.ip۴6m2zG2£'g' 5^UTb'VQωU>$(~(( s$03Fk ^ő)Hv< qL[B㙶ěxY>e|b|2zwxF_AG__hm4Hm$Hֺw vm:a2ky̺fm/FhIu$oI{ߒDm7ߒ[Ts*T1vÂ͖3?㋣IR`ꓢ\.yg<: y,NeYߧa IFKm:FWmFW9؉B_ b};Қ^IL’(>8ϑ fd&aG28G2;mË$b /W!~##!{C?Wq*&a7>7ˌe⍏ -K΅WyM &(s;˓T^o^M>:Ջ$TŔӜ/ֿ#rY'[O8_i/?G [-p[ մ| KjnLwڲ5x.zwYd[+A]Ib qCi_fo8:!8Jc/[aR"/Q4צhޱQ09({ю2eN<5qMM֖?˿V:8ѫ>zuKhxʺ[:[:{y}H>5ɧ#|곧rEy M-_D/於Q#2_F|E17#ka쬖Wwyjyɿhﳔ9[ԥ1 KcRXkkyUGn2L-QtO11x4sӂT}d^83WNVnHcz~%`S|d٧`wیaq6l31^NdaXa/b bƜb9%9%6s4񷎅O_d!qЀ6zܷ 7J튥}[r8t9xoNYt9Oo"j8 b|J:Kq;;P<-MȖ&qȖk-)~6%_3_)\B׮{ _wtC$oj9Nas^ُFN~yֈT|T3!ce dFqeV!R)'>,SO$ė5~Sʁo@R0OTs-O(Vv˿5z_I}'A…'>(%]n?^ޅA^50ȫՑ7IFntVrJ^޽]G|-BZ,A[BmwMű6yyp wTBI+qaUV55ei5ȃ LqqXb;i-= IrNRqTw#B`31d}ClIU7Y^RV Fv-K7asF{gsW+ܕmJvo(1?(g% ZeZ3D /-l\bֺX}n(;blbKl'w͑-#8riO{dovpEFKduc, )Q`/R,hRA >Aٕ=PnK8$:U%_m_([r9*#uD@ +Y>7 8C2׌M)ɞG$yq` À!l NDwCXvb2,$Ib%G[h۔f]6wuǟ#;xH)-l|ň˄&K-y F է|7  a r斶yƼyKZFVEP6z7sj"k8ql?o>-@W`3KMcc 1(lwSPt9>KD#ѤhR8e@&5 c^r5׭\5S/H+-Z9͹mνns s j-e`\G +CTySwӆxw=u6( &(? !q- di\-eiZbF ZlG mU|%kX-qX0,&E<;REؤ!VM ߶訸}4J`XM>9XyM/Bb;='ywn,WeDf:YD|1VqQO@:x.U)(i Wk| 7Ԇ"ZߠRԍ‡,UJҼ|:okJoAV䵠!s8 _k%hc(tԳx(MjZv6Yfjd+še+_VJe+|sGV.VGAtZƖ؎-q' |. D%;c$6.fds77 bF3 ļp&ՏvvޅjlW}s:8j1QZ8j7)QJKM٨&Dݦn%KX~fm?zC ҩZO"V׎PkGZ)lMwike2X aVCQ̤Lϩk[ \紃HU7_瑃M|o#_(J_δ9,t,G9>0`e>6+3@ /6՛(MǾzw ʻd|p>T9 ?Z·?NVuZ[=Bgr ݼ}lJoX3_WF^R!u~g7geLba4{e1g*篓oJ fsj!|悩5vxUxUx,b8jG333>CG&kAo~5JU,OG\֥]c.aO4 x5M6Zl&TlL[1oILW*P~hض#aeˮe{/m~5rX0j]i*"Еf.l[]iWWÝD[55@ce#ʴQ bnj1vbqg]l""[%7AAsv"=`MOX4.9u"ٕ4=_:&~M@K veU; ZoT7ǣFV0\溆*nދJA{qąs5ڡ#E˦>U79iFa-a-ZZðJ˞"2͞@>V3>S9:!#-͍- ЖmOkYcc1ߍCČ|LyrW5%ZGiey?*>P5>h:+x#GgEk`y7`yri`}ZuĪϊ)}'6}mKv,2NdOށĶ'TʦOH0Ʌl8 /ޭ:Kӽ%ΣuI{ NW2tClqlt$wK#fcb[χ,[4+Jaňv#scQSRwt\xhဖ(Va@pȒmp1 C0 /2ZA웬iGF>X샴 M?j):B L#=1xaY+^؈ۀ!5aW/x,eb@J'Di&$}9Ox7k\C5demoi3$xϟ茜'GOgC4Q۱S<Ä:7hTHu$ //7܌mrsrã6hFшшE5AK8Ar&kP/7VÃ1~4ƴQ%n$:? s] QlegRZ w5v"ᮈgJnܾ ëpKAϐ=efƢ]i,C,f X wn_;תDQ_ݳhV{ )R~IBʵyj:~xOSZgz񲻠k}+p u|/P:XeV<ꆸ\Ygwϱ]cנ2^cgGI8_dD|CЯ:xo:x+Y\Y G4f]DWWC؆fl'u& a8l`iG{S vO b˚ϽS>PV+ S,%xŇF| ӿ3i1_ nB'ixQ.I3+8LB~ *'d-R^zs߅ :G'A. d!x#D "ǒ mEAk:[Qpň:Tn ,jo6I*{` 71̓^ZֶꄶOm{ޕa#Iƈy16yoy%G<.9pVA꙰VU8?(!6qQ*r%h&5FMD)bw)2t=#?g%D3CO;æYTv3|;(0)0_emq`|b*؀2xV^hA'6yɲ-Bx৞H[d[Tm#Z)G!F|nQY|nQYHBNr(ۉ⋲rdWXZ˔eo{Lc?zl7˯#Z)zFfK-[c 5})̓NrTCUmozHhHڀf$ `nDk ƲN?%zϋnJz.*.}##JuC =L鵘s-2#:7 +xB(abpAx$5 g|lgSj{қbZ!#5 =7DbC)TFZX^r.y>) ayC<癆a ul: ㆁ?R;NV 3rcZF ĈY=y-=yE! &M]1VOA/xZ69Qw[x뺺u]O+FD9*DNUBtTCԍ""QO|Y^κs;Az_bHsx鋼2!x&3يJlUR<[-]3K77;y")=j۽mBZԑ&;"/ T7nyYGuͬc^:M c1Xܴ!cƘ3bq%Zux&I>Lqz".޴V[KhZ;B֎q_wKHEoؐlZ8Y1 -dnC Y7B xp=[8_ݖ``V 0w#ޓ7(9(3j0 {pCLJ2'TJ_I ~5Dq̷7@dblrԁץ ?E8OO?/qUWxW;Lk‷gb3ow26}8Z(b+¹XElfnCO+?]6s = Ue B#Zg[˚uG)֔X>"4"/+b *hTiPZ#.7\khX>QVz#V3iU3 `Zb-8‹9?*4 M34#y ExM,APC '06giCy^Q^=׿běMh$.EkQd@Ks; 5 nkqVUĵ|-GZK\'ҷx\B KM{=gzލy{7ʳ\{GJUdFjϔ**ю2 O&@xxx"aPAyw=g؀65 ԀbBh4wYS#mBf ƈVt9?V{ԇpsE@P,;#C;CkG{.e>wp`7Lw9/9Cx'b<*z.|!qT2<;75q06lޒՆFC&NW!#^JT(|lw :Kug8ϰ盖껼m]Ҷf"V@X;P8150}ശԈpz{醴u{RwQjJl+OE]4#l̾k̾wňj5c_u>X6Ձ qޏ0x^߳S B`klOʒ7(kڿfwUQoP7Uoem^{Py4 zA7ϵ s={| kkVә@YDǍ !^xqc<-=x/cN_SIBx0$ :3@5MAj/N`Mz"H-lzll <~bx{ ߂`Ʒ`oj:{:@dT<>2tUtxv<*tȮ)Z--%K%sY"Cٯ2CӈPul+aFӡ!]TXQ3YeFig$lt!F8'j[^j̋^b^܂Dflr!ޢXH(K M)<8kQi9u{~3P2t:Pܔ;gJ JS=cѨybY[׻502ukE@FU}eQ_XY a+!rLuΐ+~E<_e@E[#ޟc{S|ܞ_i8kM;*]R0qxZqcMfXǸcױ{_rڂ[Q?<$j42 ~"UIiuIUf4F=6 zK7ކ0!Ty8y{Xb߬3ޙ-GХ7y,.e4&L0.!_W@ jwn4Nmaw _olB#r+r WKWd+v <#gaX1f;[jEXcpte1+M%2iRm[5bENr[EŊ@?UO'LNmvBd7vB;!ʩ8>|q9e놤r"l)×'wQC05]D*4W LXasV鴔7Jmd62 wܐuKv([􅩮3-uY+ubO>j|M?~w7% %Q8[xc_]ܮ1/ר5oU\UhWqV\2"5+b !CiO`9 5-\,[J7L ܧ`% S HX \,%ROGKVJ1)98) דS LCֆ"jwH^Y[է~!}6ybD7Ck*u 9Ŋ37]i- b1O;}ܘ8n\O%/ mIe|"+u^K)VRRR+u80' n#~xbpb%O+qHu[E prJb=v}ENĞaJfb۰MsŽ#PB5o::ߒ÷թ"m< c+7+ +Fٰ;il؝\05Sll2ؘ`cϓ=:=Mav0-^ܮж+^=*){ X ^Є^ 4ubbL|m]PEzVVo[Y?ӫatyzgCC [Rd5 a0ĺ!چk®au fz@47;@D*dbDt; v_]f=^s/nW(d26! alu>=c{.fQr8p-w9. k_4$[]JnWqR[_QU{E3e ݜp7'm7?ݜ];̑5|F߮ѿh҂<=\>h.ψsyo @IZdW̬ybMؐ'z'ƣ_^ϴ) 뽸_Oqnv X,-y*{i뇳bl2b7UHMe̔ڄ0EoB3P), ¶A:*i*⢵{ȱ#\\TO|]NC%:G}mhUudЪ#BY0PQx  +rT6GG5_?k+Y' U+ ^}G^.TQK~ܮč|ĶgvRn5}~/rv aZ!\ rvX9 F %( {X#_DV!9*ʍgZlm³&z}FI5'e֛%J xo9G{ o'^TCx[mRVWj>uDO=YOdP/ Hu-,} &CM3ʯ#ʯv;$ų ,g%y|Jsz>uO?a!SƇOM릆!V\^ǥ^Yb2=R!K)ѐGQ+ƈzz~=~m5/#{tV"L f>$l#&Qg 3Yߒ:oiloɘvn8-վ&T5j"08Xy-YaY\LH c?a_|6ϹQFa3 ?KӺEgfGc133efx:7 $J$R1D)+3_7JaW!GuJ7Y`.V7R+BPxqYZbRQ5 m.eO'9BMq~c*S(XOaxh:sƫEfpYh[6Vd>M#[h{^ί6tv׃݊porx 6i~q.}J~DO詠Ba+F'e0ial+mof9^x3+,MI>;#1fg qatz6o*J[R1e:z߷$>eOZA@?IgzKRilc4b8B(p&o i7`>?}U9PhXk1gˆtXFi;IdIacU99$C!9i F&:AÈ^8 x™^Gix+^[u/OJ. F FQ9iT?<x|~>l$֢[:."l- ygQwbGܬ!^&/MW3WG읍r1;w6:2I z#Xn9\Zxksq>u͑g%|1 xcX`GsQy³CL6t$bz R\ ʒ3gdNKOkR6b?i[W *i:((a9{g[geÐh}^韲?듟,݃@;SϚd@=3Q swth lεބ18pnWh@/^wf[@BۿOBlKtIME+F:eoϐ\f*W~Μ_/s#xeFWrDW:+*Sd}fK̦#^3 _9"MDm(тp89q QtdMN dQ*Ooepe+FSUa E:"6Qˤ8GQ 8/ԗen^>>G r-$Idk#ێ߰rZn#n֞,况KbێZ+c#۬A^8s22RG40FBLYMj& ClPco8B' s ,vvw6Oc$)o 4B aCx#'EM_^O|jQjVBkAaAxk57i)6HНFM] a1@pv>笉"63ũlh-h8A<)z RģW2dQkQ#^t>}6 f,jl VTt xk)[-兇uxըd^5)YC{9eMe+F<e%6҉1D2[722[laen#JqϷfwseax)r!BY9bt ۼ@z9|]`ui/;7~YebZR_"Ե1=B*Q }Svx#Y!ZZՒKV!6jC{ii+[#[HgMN*PC7|~at3ti 1&CL] 1l0ssEߜ.tDP23Bc^ť=mizӗbNgrN,P1FT^6)/8U߈$ шN)1)N 82}H׵$+U"1 KK-!Z II9CjɛC쐪ba0F%eaj钚,+tMB,MGe=:qGq$cxD&C+d!TcЩ1+Fym5wTc;Ɗ++TiPo :mlB.6<#TЄrbACJE<~E5>5NXȹnT^yRDp;_u9^fF]ή 5ǩܧnkSG]qWZ(?Φdcc1MR7rfM¥ ndew#cs#+ BY 9y׉6FZ XsFܙ#k +wvP [4oz?>:ŏ5S=Qф5 FEfp]b̻bPy u&Ί)Ghi^툤t_1EH8R^I5Gy+lXCk瀪_I#~V.xO2Ekk4&1^$'c]cz7@)YӔ଴iJp+F+ݫEa835#Cfx}'K5 9CdA!9ᤧUewAdo+0/fcT]0tO͌_ҡK47F&5[3Z賅\$Pc4*؆,ҌӘ؀w.3wp_/m~8 {(D3&_8N= x^ߙw+FTyTRHGxXHG~GaMaEWkSVb=: 3: ^/}L3 Z9 r3gi7%u([SauuǁYJs?Z#q5zW soiSaJZc J;:ǣm;Yjm=ǭ6~;]-Cϝ`=vDiᮓ#dM%mWv|zpxi")P)WK-*UV=!DIX)$6Ѻy* Q>`#xTa1M ;LNw!406:N21Pj2􁚓1^c-Fʬb/p!5>{%>|X1Н1mP1ǖlyqxL2h1 Brx!FCKcK>P8!B/(,G0܉r1cdzjRcK^α9ԃF72z`Z~X@xe%t xgдԵƤtNijh@#wuIp_H w^y7Raw:AC}7ĺC49P!l(>ac,Rdϴ:(76ļEDJR!J*0VYCOiOXҚbk"<vE5rm1mqrm y7%kvZnƹK ژTode.lJOHT,CdTb#{3}.<V&T@T36zX0c)Wz ](޾T TSS!PSS&Rr$ār9#^ ¶o`,s`rxgŒYbɌsϩ.͕'$7ߍ+mߍ҉-My-bݻb^l^mh6rZ -3eh^EZECXw:NtX,ٞ LhvEpV7]l8ņxk,f3~kkXd'ˣddy<`SxP}JG\)3בPXEEǬ}Ov9K` HqWIgUf}\ [dIpŒ"yʴol)Syl碐MQƈ`e 3-#t# !ްAm9DJ?P&+eiExYZkmQ> Sz_k*mɒs xl;sgzijrIgesy+s ϵ"s۽2_3S?3( Ymn0`nr/FXd}.8¸p.4n2~( (X4iBvetHo.P;8kN{L'ѡt/) 'fo>؏8R\yFJuG9ĀRFz_Т=ėᾗn L1R)-2}29iv`pǕ9lw#xXxwvV X2> i:E<7WO̬Q:ҡ-JvSϚ*BBZ#ވ#"Y`aMkcG^Ž"Lj+PY\@Tқl3R -N, r\)#80P!lN^WK #Vן{Mw&Ea;3Q#h7!j+u(ۑA݈U(v][,C~Nξ1Iq]6AMNL߳Uϴ.g>NX̫,mpU;*1g 62KM(@oZ:eLQP?ϛK3WBMi[b͈<&zq^6T8(:Bˆx*W./rPr")jňkYcXb)#Xڟۂ&9`z隴-ĈnqBMboф)'-e _8yPySASpjfel6MmQK%{%;aT +$iB!Yy^H :5چp !kXn#:@9 Î I/'~'5)~b}aoa@>&9S@'Ptt aCL[㈥Dfn̍#t>sq> g>E%bߓzqZ$θ^};c=6RfKCK\< |Ӱ*մƊz 9,d 2OFFhH`B}J's͓y2|Šr+[E9Ŋߔ(|89E~~QӓBh Em|L U;}Br~n{<@A5Gʿ8\"D7_p/Fx :mlkܜ?cq:ƠACryy_zR/#M!8(45:WAo\cI7WZeQU!ŋXbtnU[w喣Vܥj[!l+6ȷ݋,UT{&>(>OV4N.88厜66$3hCZ `Vkѷk5v_ ^kxz\QkZ^Zרka9,E!l'$SA71cNRl?R/Ț6ׂ~TΟr`XoyOWdo(i p6Zz9ԫNUMj/)VX{>O>A;ʙI*ʦ!*Reo,! NΎ10,oOTۊLBh NEԹ? 64ي?K>gte[ÿ;lX- #np?eS($k^*r1*D b!o,B S.+Ⱦb<ԓTOc?tRBqJBҗWbJ7ֹ6չ6e\{PT~hxQUT2AkO4Nm\)n;GGVtRUbHIxO󽐼R%k8۰4m4os"pbW+v1w ]l"рU' +*ø1UJqߤu6=υ6#`3e'$zГi)K:U֩j[Ϋ=C~pk% FCW""ִ{Zwn}^{|0[]#,CtXC8ﲕ a7C<{ԑF=7.os);ExGQ>Џ 9Hgh֢@}RpE@,y>˲ Z XhyƉ;0ǣ[qolu5y(ahL"qbhhB=3#'\*6ci "+&ls9H\Qñ4Y՟kE8Zhױ27yE-~pOݗ0i4mr Qab e*hhP@d˜x`рkMS55o7pm .r>36o]aY( rd߬F))]QJcIgB,]LB<;6aVLmfj33)3"r빮՛%&ކ@Ʌs>77 1I^bԹJI#GM =ʆ&n+ƈ\lβ9m, 0UO cJ=Y!͌u͌uF\D(pt.~$(#H 3:mzqژi(hXXOۆӶ!N'w\̵n n n }| awC< #pÙ5k,zHYPLj7fS ͮ#gٳ< sˢkV 4!x=~D!V''>O@-}񉧈O.aטm+Ĉn\|afD C_|aZFIDj. B}ie[AƳWedK[69c6 N; Ni;~v !zȀiOP8ECmOa!IuVf ʓVx sJF|%# ./17/+u[1Vixj2Ew!P;PନœX,Xi7+:0GCny֭EvΓқ5ʙrj>8[LeTIT!l*5F9n&},#~#v#Jt#FFf667Ek[xeŮ"km70eZg ̹Q˲#77oulUfujmsS_\$ᆰ'YXy,Bs=r-™h\>J n墫|l%?Am:mnO*gsE-"J{ogݐl8w?팒/)l!UBmnYmXbK* M_@LU b;_Av F/v BZ: #yݑ87L@$b#bMU":zxcN\1-ޘs\@L_8lD9;n[w&ؓ5F 6`do=V4@EClm4J P]t >h% ;bKyp!xlf 5qsV8VZVXxTU,UjkUJv>x.4ye{Frul|BV9!JebbC$2sh9Vvxiv/+cָ^57Fؽl 7$z`ԫ--8bB ye42WvT߸zI+/3f9 _8Lm_G8L-d=5.Σnט5*EJAV^U_Х7R~[ׄŏDu0;]ɻ|2OiSA6T8r7+yۅO#9t y;ʐe Y!+(~EnftQE-w0/&$DHU#ߌS#hQ$:("%:[ 6DڔmڦlTe6(SW1,fDb2ysȡVuU1 $$J>%Sq{tfn$,CAhE7vnEϹ+\;o|v vR6\k{ ׮\<+?lxUdt5]pzBG7LQD\?2Gh\XR=h,4 0ms$HP=і!Fcb\C//f!y9QN+/2Z3_Vs[N\osS&e¼)2aٔ ELV[ i%hX8 z"+FCΒL gIGYgIa% L;^ڋ#( FZ^e=NR#-|ѴesOͶ>){,S*˔˔|d9clS&4+^(.TUg oSVlN?l+S϶j;l[r&c[HN YE%Ь e^hدyN z@P9#~Gܸq 7m7nFݮkhOٯ]{kw.Tj#Щ"Tq0.־k}S k JZ%YWL F=,YoBZ3KE|3FՁƾA`Q;քq GXjn_Y Gd> VL`QsKb! c}dr냷#~%SFYz`J=ݤ|LH7)2 "R=R=t*'Hs==^w _8ņ^k>t^xQϝHL_1z7yY2MWLEK~fu\Q}G}chN5f7CZdxۀm@z̸?H<6f|}\ZA MгAг/R!9 B;sxs׃[#.N}3;B(ބ9[4cM*#pqPIMwx"H-9bMi" P6i "ԿNю0FHLzEBU`g79l!|U{UmW*2>`S' V`;}PdB݄#$XtD$G|]+Ϗ3 '(&F 4b@Zb磧X?TW~q:$<[GçjgH?6HY#X#a΃ꗏ+ eX) avp;j#|m8W3p?\"|Fl-R\\iZE=G9s_7w҆qǨ-=eN4=Â(‹vVE;if#$畷6>JzAcYkr?*Vn2f ]b{b%MI1M!q4PƛY;Bx"733Eqe6bXFM2 H9rC!01GExyD0n~,\9oϿv۟ceYYnbߔ̨fn2bd4d+NqN!FYL` 4>*f/O*:v3GT' iSk0Tew=r"Sh2DYD˵o%o󒃩bb_;= f z,|(%U  MeExSYO*inhc##&[27%%#ԒZVb#tN aC,R+)2j%0CdoItKw bKFwPcW&uOKe US0CK额!.:eٵsgߪ2B=TFXTF,~R1??hToňw7]tptm.'~דeQ=t1#n @B۝;ODmouhXJQi"ْ骝Y_sgs爹4Vレ}nF}3s_̒+/'Qނ3L%_v3L%1{Qy7)TK|MvEv ""1@uHG6O8}C_:67 c\Jb\9BWp+ +C-&{ <^0*@Иv6]4<99f<3i93wl$l@gW b?XtXT V?Cg;$|&r, .ZXI⧒YQjDWEH2UKg߉繝o|3MŧW*RSdUGd!kb˹!_5`lbU=uK?}]+F|FŮ[偖M}F7AmM |}ۤ0UF7Hbj}bIZldxR"JE~$Kx\^g/ux\z\~ׄ8!)&DǡvFqzܩ5Pō otEZتǬV5Yoi8f%nFrȂ`b*ĦfK @ăq.HG%_)a]خ]x^ugq= / zb1SEȾ!>!ַܹVTD 0vwL,G&al*Mj=H>+o36-[ޛeXW0F\c 0F\C ӶO?y|v7nթ_Hd_2-ʽ8"QVR=TE%Sx9J%>GKV*FXƦ:̮B:N(„6 >]#rG;\iiO挮97+[H_w&mlS2mrĦi(tC[qLjT(KPC71ew{߭#l2d$ٸ+$-#3;AZKa䩅:.Evb>*O٧'4˲>oˆ} eDW!aq/|pxMĉ1:}_)g 3n=̴%!Œ99cl;7e!Raڟ˶}'X z;@+ {Hu+Zӽ*, :s{$Pm#rX =BRle)#pG2dhn WHiŒWXHy~E5m*qJ +S{KQ^}#Yڢ&|ڢж#-35ѓ%i|>ޘKǑB@EN%uMRMd1~R0q¥Up+R2O7ڎ!c0P:i#<.(jӢM6Emdu.&vӵd*ey| J`văA>ƋRH!)EqrO;;ߌ!E8R 뇷' }\@H.?&sw\s´rg 7^M8Vy,XsV1p9rܟʹOGS9OxrF gKxF-$Y1BߜɄCEճ̪)+Bx Q $qjP@'ZhO$qa Gs^<&$zNOZv8MIB~/.f81óJ1]c-" 1$o<6o}mQ/:_d-eT dň6 &}O OX ^_|x!/"7ese1-ˍڲ?)R|zhcs"^f,⃽tY{b<9Nn'"ih6*f14<' U-{N~ZNBњ>XiM`,1m, Gv#\ݎp#X'J߮$QE%1#`VWS N)-T>7e*_2,J]?c+tml*Zѥ Z o]Q⪢!(q4|'-ɇqqaʇJTYSڪS)!D S JE!jz{놦ӎк1n к\'肠8H;Ǧ&<ӳzxzswѓ ;KccEh76F P[T?1On-On-O%'s[˙ D |%<^aE z蹰w-ɧ5SEOܟ3&OsXcFNV(So lpP6}P^ƈU(DM FP+Y"xE{ .<ˏXy(@'{Z"F6ӁzU]Vx#KJ\ ,=omԶ8QۂWuPj*!v bpnS/Y/0͔PcijlxJ-~T?K/cnB#[\u%o9?i;n䭼w3c u BsRc?ѩJ윉cċY ʆXdeCYzZtĒJY:P"ҥoKE}ZdتW 1gN\t%#FFtYh`1ILw#!5t;@cKe~-NpU_WeIŠ|+EWDq;1FA-7': 77ors+ϟAp06XŤ:Ep4C^ %pBɔBI|/i*ݢlR? q( x ux)zKI fm4 Bc,U0e~LjW=bݙMg.f aoWX"  -2BO[#^eϲ84JeD1SC,1SC܈99[떛X1*{{&m4{S>S4`NҠxEoy7W|Slu2!DL&,M&,Ľ _3<.?yyo?}##y9s_w/rmoJe@inJ2Ju#^VKůؕg=&/vDwC\1FsqyK %BS[,Z۞tyeНY_W>Xu܇X]FG۵丌m&l:ohWK{nr:Z{w^;oOR-^AUNQy᭓QNFN?j l}djb_lZ֒ jIZ3sfVs\y]<&Gǟ|ճ C4ib=vGf"udAJux 9֭:6#`Q7&U4*T|=Gߔ󇊱0d#Sl2[Wv`]_JHo}/j%5kbL( 4Nw[O)pw`WSr o㨜&1"5K8Feb!+>4uqnN'ܧ8j?Q?^ؽrH".!,^eya*0dktjUCRvU૆ǪRORr;Ĉ׺c֦笍9kݳ]POHFNCit6x^zd-B2ݪӃ_u_upo-v[tny< -_6bzR:!F"TU6 ǻ]-s(^.#&#cB>w dOƊϦo30+: vx|˧24fY۴c4+*Ycp{-W;4UNy 1=4D!CPA :6AaLj݀{=GQuGqȨ]80cZֲ{U7j|ҋpGI$T cQZB:o@k SCm˴B+XV¢|7jU_i0X{'YvbSAڿENF*)e 1o}ɣw".Sč%{E6HUW{׫zwϪS{k=!惷Vr^iF>+>[d~ɜ~3]:R"%_ _`0jӿA+oyʽK/V)<5EmFca!\돎@ #eJ}ff V* q[/Rےt<+bNjkxֆߙ;S~gkNv.V^F ZV!0tQƾ'p5db?D!QƎO;$Ot"tEo-5*4³J7\RYeJVf^"P&Gf=?# 3^QSf{? ̐Gf|f d6ɗ(Hd FųӃJ,O-CJ _z,z,DE=c)g)"n8pT**ӰRT(nJNnJo)Zmցgfl3&l3tz,3Qhw= OwGq1ABIQDWbkṃ6|mN_,}}MTQwOPE[E{TNOmt_p -p0ˬ]3C4ĺM͘Yv)YEBAr!@'OW?U16A`V35A7 Z"!rls3;lko϶|ήsmzisۼBԟ_H5mrF~Fp:?UV1'`Br儌+NZwœH=!SEx)BxYy utz]:Tk\1ekP;4B0gNˤjLj絻*}gj1BSu*#vSϪI:SV]Oҙ1u2Qөew^[OfgRBxc9>f;7~(uJق1a 2 ǡ`8evUv_]g~ǴNbI==tkl_5/ꅮtㅮB# ]ɂ].UUΌ-Mq' TkњW;;(;JΒgoI3Q᯵k)\-6vˈ2C!{G ?|Tg&Ѧ]fd5z1|Œ'Vym̝-br,El:1Jۚxx>& M-2wyC{.5\?nUl]$8&Q1!CبFİGa#B%CZ|4i_\{wgv)=Txͷ2q/U{J2*́p}JK)b!WaaWCxN $ % DIg.[#X5VDPU0E14O|/ ǩ֠5Jk86_ۑSބ2M_H(;ܽc]m> LK /i;%Y~%:,ukcjX9fkSCwrj`_@=G.d_]Ⱦn-௫W+@2T)CW0h$D,Ub)bLjiߋS_\MzAޏΊPSgE)!o-AӮ.#|X>t N%c ,ud(BY>cAkԼ5|)~㯪>AoЕoʧ-tGkb{?;1>w!;-'5h :^Xҡ|hG^9}Sjpe%`UVMν*SJt1|Z\򡧯1ÿKߖ6:mkkk^;ip MT+qCL+LFqV阷ǼU1aU׀BXl!vް0Z^t)B.E=]WՏIMV=:el)&*KPCV6I/So)ԑZT+&n&;u&Q3NNI::< 4*ンxQ\wS_/O7lwg*ʱwnT 95{̫ϦqΔ]Ӫa*a)O#^]kq,"g`xj]X;b_jJ&QeUb 'cu0/YG|dšx3 3Y[{sktkonz?ZA&_c.6[woOۿsc.oPwh;;#ggY5+˾ΙLi3̔L o<#Ӳ嘖Ϳth!J.[gÑVV?ܩ-xR tk{vڞ۫ct0"d@^f d Uu e& PD>Js?u;"lb!n! ̴LoHWbLj~78GR(+ʇ|1ٝʙ=xv{#We|r5v(D$U_iΥ5JsѷMJj?UR TRM4;eͰw,O}j"^{u+mpQ`!u%(ɯ9FƑ;LJ2nO<_U4*B WWUD2C w+o+Xп˿DżIQU1oF1o5Ui9#;Ex{sB{l-h'Zak}V{;FE|eJ&JRE*)ӫGO۬}2)}^"Ĉxq[j"|rh3I͓O;O>JR'qg]xiC|I)xe 98ƃї>ڥ1L]:?'d=gٟx=YW/Ax܎cr?8&y0+&\:]pEɽ$ĈΩ9U{#su>37vL0ih' fY|#zFY[p:=b 7d-ݑiȴ GZ pj:@56P_X/)k<x_0J~M,^Y)InM.0ݑK4%rg1icJn_NLrOϧ5jِ5Zc?._"+ZEb5 }xGNf rͅBRSmyFYwN$6Uc3NSBbFz ,iS\bD:{wUEҜJd5'tFlj;l]clX3ĪwYOXLWؚ12?l@3&Y'j:QЉrgKtd U,oiyj *byusO |<=AX%%.Eɑ;ҮfqhegIT8X} A ;:!!8ZZv֫1;uLR;ɟ[}?O܎Oܾ8ֲܗr6⍿X&Z,_7$*B#;=R!AVEߛ;FS'O'7"8 ?r;n\&0ckF(D91lYq,lٲ?[nk'R eB1ĵ4ÖZ]Eh7[wuG1Ew=  z6)0 )#ǫuj]uzLj ]P_%Vq_%;F5-.xY q' י(d};T'',_='_L#^GRxkr멥jĂ$[P@ r0 "PE)ѷo!kFS$=Le"$*޼_Y;L '5& # '69r`ܳjG=z= 鍡-V:mZX#V:)-Cr(e^%,JE(%A8eBC}Q}3̾{!E N; ki'ƈpT Y%=ʇ F^Zo{@2=9]w*X wT5!GX]LlO- g4!}6y% m*$yhk_nIڇ7FiZp켅XXcJ!,J162\;_qnJ7D-,J}ʀ!ʀ!LeY]zTN&7~ߑ3ƐB/g<-)p^;/<3tDc=uf \qE)(7D[p(\QF4~d~d~/ TZKQ*G>RULהGSM)_7"2`ֹYD *7(Y4ѷ )uIL(yJ6uZ*/XDkP5R5čZ/hpF5EYM3i:2g?Ȝ sG\VNXV>X#ȕGF:fc?].M\Hc.jE9> O2hls"8'kos";KWҩܶUn[*/J7)>l>x<9^ ;Ү3ax-o[댵[ mbX7I4F*<E=P`ԏrx=[ K;5jDd[vGٰm(yiT& Ti'~Ms({ouvٵAJ-B[wAj;u!.[ȃيOcڼg,hx,hlN6 z11 1^mظLC̎]nl-W UX*CX88J{u{UxW!P_Cټ:BPd6]drxmx2T:hoT@Ay2j?B$صI3 Fx#} #evAF< $sFC(b+oy^ܨ'jw3=ƈ]E6X29{rZasQy!4$1FM$-A;T6)ĈXT6| umԄhÍ-7Xb8* U@[BcEdRyK:3J `I@dhH @Q,8N N#Q.k!^QM9]lw)x.~qKɇ{&lz~qs:p";kY׉J:ngW<+%3@V Y\JT n^,9򨧗GGE/X&,[rek!Pߖ"fOK%mG -v@;Q.EyX E7^xϬnLJ5j#)Pc/vUw ;xzx{֎!> ĈPl֦OT &IC3|zn\P+MӏwDcE۬mV?^U^f˾yyˉ%D-Y%<ye߈uvmU\&&.fO_Y<󧗏?g^Vrm3?5gkkoLjd) ;RǿNѥ~bܥl;}r:P=/ bO?|럋\.3N0;|<{Qf䮛Y8y^΂U'L?|w'=Z쁓̋E=^̇88K#~UToj,L)y-|z,_?\|0W~ʧ?l_]>< b7||E.1q`n7NhF>CF3HJBS^$E!]UDCŧʴO7܇: /U@]_C,]_CoCվVZmU6č5o(D|xͨ+FFn^Qd?J:`QXGa-{ YP;]=_|O_BOu&?Yxo^Ї|˭Gr?d~N?Dꁝb@$+NT^Ky- a,"s~3E?;ni8Y=g[f/1::"ktDL!bڿ1]4z޶E񧕈70/4]z҆&҆Uo+!ջpH4CLnBX~=4jDlv#v 5Wv n9b=hp5)bzoϥ5Ʉɹco1l# yzbw]ƟӖߞ]J室7wOFUsgs3W̻l1Gw;-?msj8k?2l E&hա!7^hiS L-@p%Pn2(Bx}d1QYvb/J(ܥL^dS&L2o8LyJ/EH,:lCpgp\7.Q<|\Uö5 1KKxf0`sl^6ج?D1*锌Vƻ2ɗ& %W}:-/¢b_yeemw]Kgj`laakvؚasbVϘG=3 mշlq<S̔Δ`)7VuB>t}͈%!XOUST08b>}f",e, j&ryPb[q̯|nȶ^J2OjIhC+.2 ^&LH^&$9[zpQc1ɼΖ Kg" ƈܠr8WUٶU㛼OJP.y.i޳TE_T/j(Eeapndapy ""\<נX(5/׽^{]h>[`Ch>(P l\{с˅;gVMraXEwHqHxHĖWĖk-k|',jp^wͩ.]kK+g«uLxu. H P>xN8oQ?xEuV zqճ4s8Y&#X$ڀmr{m#^5尉5v,_o15e>epK 1! IaF cZc[íւ&SQ=Fb8k$'k1=!ָAJϰJd8f0~G;G.i|]JmuJ#G؏lE:4Ѷ0#*b)/mp_6+SU53;lx(h[sxpm0͘`Ӌ9F %& Lu(D-iaISK'7E41k_l]55޻?]$II_KZ뫄cīR젰E**CQ9.f?uꮛK7?'&N95ݑ*̦"(#Qy ƈ71q3G;sx1ۘ;ם?d1 ~J.|=vfvtu(Jr,oV~}v:O[޳68 ;bG1 k:1:ԙ_hlZE"chEi,7d1/γoV`ʈ^2kUQ-K(GP}%,ZDӭeQu U+g/q_;˙i4-f藗֔mEnQRܔ ZEi|kE: 4"Lc_dsY, ֲ2*k-+Fk.,~bLsӧxH 1MsĖns`hW&>u'#qגB$g &R -':vY1T^7w5gM2oX|6Nm qG`v U1|f"Xbʓ.!5Њӵ'AƓΨEDLj>UXZh*d H8_[K ^mFʩ mA$ϳbh$mhD3SOZ#L <[cwoҏvHzXYN+ql9Ez$ TcQXw"Q{sMA6((PP|?M1dJwh&(ѲlcFQU4:@ik α3D; w*Qu3Ry<*c}$ (?U˒Q)cCDT6!U63Wv%7ecxjSᩉS~ʆt/\IY g{iO#FpvEFpvEEFp Fyuˁ M6q'oP+>ə cq3ϭ!u259`XwY"b3xrY²N8 WוݓRQڅXbj5b&Md3)&c4oNM+ga瓅< ;~a;lK#4(WjќN@8:w<:Q˨cfc$"o2j%.nP`y NNtf_<ڏTϽ&עzsOs뾔8+rX/N14>"$Nď)"Ak>Qƕ2`X;!vu5\=Db]Ii5j~o +{ID\c]Ʈd721u6BH3\`b==69;ۈWmDbȭ2( CΧ;F sh|b:'S712 dO\4G̎&fQͩP#UQr*x ™p!Lp&Axd̸{<27#ʏ{?{zŹpgYϳi l:?)tޟs>פjoUV͙g's=*"[$Ĉ;$F@cAȎq:<2(s–FOH1X}g6>6{tYھ#$a;b!XxReΓ]E,*/UN|Dq>|矕hš!Mg%5_h?YxrgLjvTCY j:*Փ")swk7fkE1u2v=HuTqTE_gv¬X²}e粭}4l_Tkתʋ#qژƻ6Yz.'˘ \N.'i|hQx3j WfnN]qgGzpsCԗ;[u}oD)}\7xS'OV^U;ٍ[t%r-㸋L5aSٮEdΰ8փV3sp57R2WqR6;0_\ @I~@^ǛW%#eF.X`9X$lXdތ\F;FC?aЎw/X .>?PdeyMQ6_]v,j|' iFz2i[!L-1  yp/hG^Whגztڟstm<lkϹ-#^kF܎q;j~BiѫAy[>וEƬ!dYc徑} Ǯ1`+YTg: []?>R) O, 73Bٹf qӽghۑ:"cg6AxV ~{+^kWZ*YFk%|me`|ppO'_9@` "ƈ.:?do^㻌o_ZnOP=oy9T1 y˶odƥ&JB.ݗK(Uxv{ޣNy={,Yi{q:Jn$hTdf%cQ"(CR1`8A DjUlq'e%s^l+σ ,Su[AEۢ7ݖNU,D*t N7^D"tJiM:7xV?I9R]bwRs-ÔcWt~#ļH\߿:/e~^xxD}U,Flj/ݚubfP}Ẁ9sW<Wp^qהowO kEzWHrMx )kiz$Ĉ.'4' EB90F-hE$/SMM2en̊ #n F'r='ôW9jB '5a!v&t$X!pcbltDa]K-#ڒYf'{Ž>ެޛs8^Kaߵh Uh\*^_UDjFT c_Ԧk-Qk2uiIe 5] U}VO[bsc_"B"1rךXdu |Zي۪m9Obu~2wW53*\vm--O!u^F3H\ y(v6;7J8Y:ӑ:j/f;Ĉ_Pz=sXsXኳH^T7JI^,v5`x{z1[~Ng6\'4kfxjRIDLNV0H[bS0yV)C~^0}`dfndN5{|qf5aJ>DA(?.ekh'Qr|l+ĭ=/T B4c{f-Q.EAl\ :M%DognڿR4Xq:_?eI6WjꐴB>+i%Qv|E?ZԶ #OmY-+iDQ-+L4& kgdILo$Uj?cqudh0!:)_7TZlOMH_Ny5/ask>Ȕ+Ĉ뇥_p f>AKaNAs!Ȝ6|͎G7;,6+roQ/OxN|?>AxJg]a`9dd=̌1/Xb-)#GVKpG\v7nZih{߃==|{p|dubgӜӜXӜx||6T #j.D^5㯵":U"-C+Ql<$EzP*1sRvp KfLjrۺ<%=ڽ@?5f:krtڎ/+1D~,"qdef'/9'v:[y{+LPB-J!Z?@C b|<;Ih;bej^ab#L~z> aŚyV6^P72A~EqKjʒMGJ4Q]x]h / .J ,;F5{'Ha;D񔙢#>C9>C3;~9PگϞ0]M1"%P|p2tX&*5;fv[H #Qk"J,(J\6^O)69:۔sƋxȎYLޜ/W͑{|:8|:<|O{ aSX#^I۷Eb[CAA {@xXty{9[3d{"c1pkAk~-/ɼ e6BX|!v_áJbf**%d~v].Op$ ]<)WnmTpbውם `zߔQXLxI8lCD!K[x][Kkk:ںZnjyWDZ}UM*llZyuZ4iiLxz!QD(B4j XWNNvҪKԤUU[MZ.Vb:,(3ٔp/$'d㛊D>Ru[8Õp?m$y@5^Gi B;_t;#:#\Vk1ÅrKm">MDdvx5 o:|RE+]];Fgqi3|B|%, Ix/-QoEq PV޵]8+@gs`|38`Q#uj?+DuXbLbm$=[ygJL!#b)d#NkKmG5®a]]ږ{eZnǪ>󝜙ӑ+/Xq흷g0y*"JkE6Rh5htȌaU᨝ )Qpn!Fk0l*"M7e-!X!$(IyK?gЁ5K$/hͶZ#֬PHoTofTC?sff~A,#O=kVU?a^N:4d 4gp^Ԝ^ÙXZ N )A 6 -kr\-{G2wzW~qOL>q zsG8ܸI;,:mvOiUKc뀻 ŏGMUC*V5UCo1ooQߢ4LB]h7՛l`tͣtr cP!@/x}OjcB_K|6lR/Ce ~UH`J)iLW%%zf_ C ߁j$α=79rx\h.Zւ[V/ռ2"$[IwJ \O}JD00$I9,<^b5B  UYƪ *޼V}ڄV}j\̀$*9nre@ J"S> m8m",[c +DWչ{pun_q^lnjn,`5Fm, wWYqC|'h{!K:r % GB)͑P ,k-&˃CdO&CK1֦.bcKZі%oBk-ڲlPt :Ux YoP 1cO.t"CxkfaC~eYqixAΊH ~$W%fo"$G,9ȊK,'='d1[9O\> YL|抷|: j!9 ~!~}G_B_BW. N/NN6w،f|=X[7,-.) T&Y 'kIA`y:gJaO'=w#HW.AЎGPPPuP P }sN272,KFX75`c EfJR{jg6zO艹]Ra+~{;Pߑm㗳N"C&  "&z+%aժO_zns҇svZͰXF Uz)b׳ R{?*<gD oDNn7Lnu1ɭ-1ޙ3$a"ά%ʶWlOAٽ} FpgC,gCl(yA s^Њ|,x0ޠ,#Ly(z;FsxCvlJˊ5Ɯk5+2OuphcDwxoTk2p6JMR&vLOM;֤!_֣[YG¯6XF~nDqIcQ$k%"NYC'EW Y$0m={= CFByi3 +B 劸#')wC*aa=Dk+L㸞zzdZ2$v$ _hPOZ8")DcSB*&Kry[DZ$;j6VXĔ(+5Y bc*Ugb!FА:-u=- *}[/ɼK>æ!l2 c+}Tm m Ub.PJ ְ@^^bF/|% &m4[N.`B {3߫߫ݎnw}S&JZbN^c/|LB-#srէ:O ĈIF><@:=t|.g2<L6DBy&;^;}":XU~KX"0Vܴh.$4bw@[Vx@VoyAm͡.78 ix @D%vXD"8@5oX,,Bꋐ%!InLӥɃ! ^GqƫʄQT EGQ'kdD9p-u F>CO}jNey/^KW[xVܭzxؽu$r͙>cp>ޛ>a p]ZݝzS͓pt:Qeu#^\sRIZSQv%OmtQ:@'nw#⡜2ooCP.m ĂN":tĥ 5x݈e3ۍaMZsQozMO7zZx͆Zx͆:CM ;4InĈ?ZтF#^^ᴗ;uʦL>HcR|M:ĈϋKGϘbB+w%rbNvE/V;w8pq8f)?Jl9_|`LPhyaQ @+.oQu1jux1!Fdu.sufXBM ^xO4lҍ ;+aᚇ 8l oE/|Q=ڽvxMH! H&XxLヘV7=1dGiN8c#d;ۗ=Ĭ <00<6ƃc;=v/>1&4Mi!ӴwS!l;),[Gͫ|Sy՝޺_랇W 5#mZw[~M5i˷EHL-\#]ZN[ĈײaN 1SL<>"OĝT1]+{1ݑtmLΎ{-O4Ń莚 o Sa'Q3WxO#]ţ2a%q>ʫ䨟AKϟa4RycMn ?}ez[|gyP\bL6wJdf&XW&uhav;-taui^+BLY,Y;%)2n=hzPBS99G3}tә8HE깛lܭs7=''sq~ CȿlB׬RGC,GC|aA{H$.=*iԺӡu3M/_hs;ЩB6zg} =*N\GŠU Q1(3UL?S%7{fj;锎W6"S!4`mc0-3%PԢui+m=k;FZ$i2:+jѿ543s9'= Ĉnf* D:F<}Q7FeX;T>ĈWڀ9ACцGm/ )KeMJV1tÊohmڟԈ6΃9k67(cێ/,uQ{ UbxYLr4Ѽ\q\y^uO_4s<ǃ:=uo1 7rL91 9ߙe,R'eErͿ=>>>%F?hz]vU{/%Vpd͉d̉춹omm+m6M"QVL:xK̤:j\uR* Ss][I;M^)_%k^#u.=1y79̟[l(_(WlxEx"nGS-t7-vx3]m7G7Ef.*xS՞5>K\~pXa<_?cx218IeZ*8f=Wc52~Pu(i#^9 kj#K,;DØ<$_LuʼЎr$CLIo+|ބy Fv΄v%tarJc/Q}A':IRsKJr =B 5"5R}kdMg4QpLÛ$Z*f\E`kLjZwy" C> Uو.sh M00FD'CQbvu+Nod5F1`ZU**.G-TĀf+y@ҊawXZxVOWBFu Ně) w"e=]{ `[n-Z;F'~D_ܗ~B{jߧeUqyuLg)_\n~:>̭-/d 䅒d]3ܢoTCP>A_l coYzWg'JY 7oP͂K]!Z{ha=9KFRgeVE:*XǡDDݸ)7v[l d;2o%)sf˚pqaЎ}m#ҎKLuI͊7*kD˖5eK1D1&D1&ܘ( "TGpWa1g=MN1 ꉾIbR%\ir?>tφҭwreI4XR<]6  q(2vga|B 'HlmkvHxcJںRn]=L&#ShqdXYl} +?[W>]xf٠ /B\>yM:m6~<lo6~vzB:=:;*v65U>iQAx" ZeZ7jq8fP1Ӆ3u{F0%_W_>Oօk s"8 3˚ -kE+. [ޞ:'nhW"%&.V^Ad:5Cfԣb7V9&-Ջ\K|7G]Q}^o>og7}ՊH ԊZMZ ''T>=aS*|GAtc[}D \BO$E'"aIlp_;392^۹ o3Ce38fɖEtΚYKkmNY5nt2Ih_H 7ZyċFqqq{V]:m?-uZ'K,%²I,~ v;b}G2mdSHW[vd9A4r23o3u].{T#jי:lYoYd=H7EߚeǷ, w0"_O 1v:|$%I,<5"WbD+o }f2S+(8DϹKiE #_?IiEbaH?T;,]K_06q^cW#^faBs0JXs!̾w1%L ߳ѫ{@ 9!~Q(0|i.orع:27M/]d[s9=qfXM2W/G}>eNOc?f3NOk硪sc_$Wٿz q|cJ2Kn-Se[Dce' p-L㛵=B)C$~'!Е!U&ؔulb(f)-xyJbKT<;6yȞM6O5yySco'O] 6-ĝMr[qNdooۄɬмA KZ)V9SZi /."[X}&.%٭w*EDsXeȄ7U혮tU~GyXm|J8_1y/]D3%dTe*2IG9.} +||燂8PP㡨~(K#-Ƿovv۾nX~rkЧ~q|y~Eom<(JӿXQDx>xXBKߒhm^QH^)Ifkgr`T$ke );q<51exSe-i4$Vb(M6cvKL j3 oiT.95-hQ1=^' Rx ;刔:L`Ra>:_F-kTZORa\V&p÷9`n^?b[]?V|;>\aFᮅ7b4cDE|BajOmaOJ1:UvrQݖ2Ln,] XFRgY 〥ɹsC=|*&37?u==涫^ʩ`W-&ۮz`W-f :;ƺƸk%6FS2K{I-Wu"Nxu"/v.JhW@٭RIQ/Zrk<" W؀J)T8*CjCe+~ g*'GS5d%s bGSԽ3.6H:3Z|ddݤ_l4W[#j\wlF!bXJu\,hWkן.s@D"SeKu }:8kwm ЙջB%RA"A s{q Fqv>aj~%_5*OY#_v7w"X_j\K {د7 V4oI¤Bu>0::=#xψNdlψ_qJ {AD~~r[4m涌R,y.Áu91t`{ 3cfA]#. /2]v W"]@yޛO 8g}1՜QsHjZ[{8=MCOFx?ywe칊[>*OV_!M e9oHM^&ѩ+*|p>%L*tzNNkvԦMCƻ୯[<շߣns lۍ?ofOthGaZ qXS\Z& e'P+!pG=rM5t-#͖eidj:c${s} #H~M0ȝ/;rgER 앿:fl$h-7s?v!82Y8F|?ɉLI4(bPԱ*VOھɢ zkUwj %L^ä7`ȉc__1(<WU]|DUk9ߪ=~Xcmtˤ 78Hms.'UUc| u!VÕwܗK-ɨ`Ju ,>^Յ+oDٲmqorJKVKd &8,!ϯoK1Rz~99/&{Za70C 2wnhpC mW1j@иr L<%T|}:*=Q QVp_\ 7X\ ʏ*hx)z$1_ަ aTX!xV5=QQs!ht&ZThN`bS/C۲`o^Pv|w}JJ&;B8Ў+twok7+̏T O7wA| ν`h7xt\#]c"X kl+k3cִN-t vFt13&1_}LPiƜSEsO瞘'MQ>^q2+_Ŝ}s [Mi3O7jw'']+<~w G:*̉Rr᱅Ͽ?ww].,:"E^Ϙu! -{snEx]4Swǜ*t~U]TSH%?g*(ΓXIKҪ%d^PfJ,&PzEq퐒\A-eT=S.#Dri\~==Lw(Ȱ< xsታSt-ʘz(+c'j.=ܿvۨ38`IZ|ufbYP'rP vmvmZIs0TO̬=y2ZgqOǼzr0=[gGI$X(b"<,Crc`v M-DhH' *SXhp"?! [p 4uשrEW**WO̟|0F#݃rcN?YQ\YHRk/ks8m+[`q㲉%)E;s[n2[ D;]e~w'vBl6ˆw]2 lO^+، h3(i @if-z]mEW,6*2d YƂqYa ':‏aοC0K+@+;`hvzSO<',AGPTZ(^ӎ}]ŕ ж*A7}kUHSSצ&'rҹ0n/1c,XlD")dŌ9!!zDη!y҃x+οF!Œ L*LB2?[BF1-Fq )_tsQˉBrs4NMްˉb)eٚ2*x,w)-i(U=8IP+^(Uk|I=_;eqs/=Vxq7-d~cɛRŢ%J9㧻NUÝt]AuV!T,7qdT:E x7Ȋˊ=,[Y eצ\KVh&ŜWLS_[8˿~jehMh.]F?h}G>㘲IY=^\N Y xLʠ0M>|c^aoU}ǜwm^tGb#zݑlBAf`?U=jw ӨYkY;c0>ѹߞgףnl_N UWEe0%`[ʠqU\ʟɯNs?4*̟\;8r/ӞUn`%B{\ ʗF1l(7&X1?#6yp#t=Dž.L4-ز`,CCv9; 3@S:o>}q.Ⱦ8Ao})BטLu 5Ų?Z1oO4ayS~f>yS375X m5^6ѶB[-`d]OwWlEPFJ } v_&H[~.gS@^7L|s|ڞKH+v^KW[mQh]Uϵl"SI#zFVgsqS"LI;wUEUPUc}U[ s6`I-9;I׍( gwϋBvsrX f\=ڶ.`_ۻ{{i3Rǡ.T˭NNtM}I3Q|0q;J~}81݊b[W`kS``b{6P/kbϳ\$-^eŀuoq׫eM c! EO)8s9ko}) yTAz?\#tKO/Uu)N~XeO7CD&4v"x9I: 2cϠ'\ZMSZ_1`ы3p~/00~W<|\kY VD:kU _1Yje2ْlsnvYo}2> fejQf֖-i `xYҘҶ6 ))IaǜfsaQr>t g;_ 49Oxi+V2(o'Lq7-&OP:ٙV3T8h3(P6Xޕh)a3o%]gy?IO F$Aj(!1IOs>~ |Tk-MCQHxtv*} t!TaFW}2)azӉZ?Z 1i֦.ǻjsRhv24rWH_\ ; @ޕw]'Rݻ?H-b9**- d QJt'q%5d?0?xȬ*os nNZP(ԙAkZk4rsap!z 󭝐 e쾝 eHt8lRU}<@:!C>F9 l{vWm'XlcSk߸@LLA2gnZ_hy*;N)1/)ZLfYT<Ŝ$=)np\M%!u2F/)<*mdD I69YQfCak;,_yg]_6TVV_} ٯ:*xd-.]h͔KVE$ kF5nb3֟{0_{ ;SSP/8uNǗEP#KL0/85L.v=ZTKhuiSq$pVNږV`.)Es1vc0$/7(f ;ⰼ^Qc;Nx\,ԛyek\K%r)_.˥Csm)9P[ ,5Y}}]ڇidև؎_2/1K#y2ƫNvRK nWuPbƻ]A瞼OsyzK`XZ( OyWT_)ٞS[6':P6gnE;;FVzÇ2*2Trq@oGA9 2K^W֌;ѽRm9TJ t8|<#vG؉?`2 +tnl3۝$,sw/^ǽ>94'0'O<O`#wDT#,^wD~k5VW&SK4YSKOuQ<+dws3QaF!%[Cw)%kLC[vyP?oX] _rV1MR:AƎ9p(AR15A*/|ʶ]*FAVs r7"% w6"xQ+f|T@01ழccCqfWP bB1cͯeQ;bz v, cW|{Š&}s>&Y4 e;K91a"@q6u]lŜO8A,LZ듸wyWS ϡ)nάЩBٟ"kQ@;mJWt1>2HOoT+`2l;/}>nO_Z:2\n{)Z/W^ -X?oc1_lrQ<2crvh }y" k$[9%x˷'WNKoc=XBJ]J!g AFrY.ۖOX~`3K?;=,ecη+'|WWN^tt v﮵0ŏ -rE盺T27+, 24ORziȌJP ,1jǺhYǺv; .Brz8[)V.ecÆ1UlŜ_\e4Zۘ[f|4ga˳0w=Z NQèUZ0헢|z Yį"i0&X i+i} qn#2;~(v<^8 KK7Ii#XUPrr (A8|\Ȼq]ǜf5HeAвhۨS1k7g^;}UPu%eh2gf3U5c7w` ug|K'I+5i X3[ϟ~0H{3ǜIUlQAjȲk7_Vz$谬Gѣ\]<]N1cnКY̌UhFH_#L~ݓG=oâ=.άHVoYVo񣱑AɆfdx=Q,vZ`uMג!BdD3K6T6cη?]%(% r\Up8wld y}J{*k#Lnfv~C3B7.uk ekݭ˒ K%v͊cJF9`- ˹2|n2J &M`7 *NӡU} ?Wf?q3c}q3SF^|dža>0$>f>^g} <+ Ɂoq ~_rh!wrhJο9D԰vISL:Xxi*U\U?Th^F@SH:2ɝ+ɝO'ɝxɝgRP{(2'fTڶ ٖ:CQY *yf@ 0"ːF7ib[u8[OrirdyO2g s GELi9䨍ӓFгCQ@6ҵs13R8D3vHLh*1⤪bkX 51it?t%O[!eP7;\5%*rq]6m?wĈ`@t5O^ 9/ծaLҗ\ݻ|$#$ V@=)?__TNXޑ+DL'H1K׺CuZM+~W`䣧JHp FQts]WbM`t Ӳ:>8,t~J'8|@4J{}jłIrM|j2b%֌99W{MIO\S[丌a!9Bqs=VR !M)kI3-9X:R2Y M۱3FFYDeR>vH<[cɟ\M=p5Քq5%}Wᠬ\(AYÕ\pry-M)6vHɏ|Ţ| bNbѮxhfS<<˗xEאJ~qkd"ͫ2iwvړsN 󾙘}#v7x<t8WQS^GM|~Vq Z[zzo0Nl(Xy: pV7%s(;QaFTs҈cch7F ǜO: ?:_(ad^ZUD{s;ȩu(vS'3&hŧ٧1)hsH9|,j׏4Z/ e;x ە Tv˕kfr\MdrI9vUҍ(Si3e5B]?* zI&SMW_eo3Һ*%ϫv5JGۄ{>$:rbf(jeN1XQG3ѵ5)9fџ3˺] ^g x%u02 tY[Zm3aӤH-l0la .)C8)>7On!PnyBa Gs>ܹKd(Wwj'w w/dpl^![0nC8Όp2y(峗b|}=y*@uT=mCRK0L-gh&ghKP 2AU pdY:3NUi*Bhs (6 ɚ0_zz[&8!sF;kHLPD3 ec(#1)A*(|#>if6ilS 9#*Ѫ C>7ݤ3Jae7,~!с)}A;d\\A$=|;yJ7O 5/iÎߪzTgWx*eQRgYJ]} Ԭ3i3gƱpqjqߍ0XJ5mzVeV3k~+XrwXH1t7p=nr ,bj %mb&MAG;*ʍ=`]~=,_X #1+EDcSqR 3/}:][Ҭoŋ-́mتn%bώ9y/FmΨc_cYn&t,[k&oskl~Ykk;`X[C`m3~UͰ'EZog*HM3͂_3z@'wUޒ@E]oRP~=M?[QC_a+k5~l#)c =)Ypɐ5K|?=;I]&¨gFne\wuu]SBSW^}xW(MX)os!:k&P)Zc ˰RwIWU,Ќ HO٢ɗB(]9;$-R*|hsŏ` Lgsn^u [Uj2s飷I"*[|VpwD?3߯ڱls /[!5B|F;>_u( | agl^t&dCřDldIE uZJΪ!}U[hOH[E`.sk@QNgG+Xz:X?麍–ѦkR>`:bzrn܎9~qv^*wUf{uOf.y~ʺ*gy(Яg hɟY(D} x ‰w#ԌI8Ϻ -HƤ3Lp=k 1TE( %ŀѦsyG;@g:=]'tzfsaWp:1|/rS8h7U▣݊]1vҘ 4&hLk3&{ `;ָ45ncw`SFUۺ:X7KyA:dF1I~nn1g:Iƌ!\6#~y"w#0\gQ01.U+OWA϶ f1> ᐑ*ᐒ3ǜ?WɭUr?*jD؜߆m5YE)~VђUT>8˿- ©~G? be;,R;N2ه}7:m?贝!%Fjr@4&GP&nj\ʅ-Ӱ&X3cM:㳟u&%6?ޱ>ZL |k~\aWh<k1r(IXގ9GE-86.zuPt 1-`8% s1E(Y*)B)|J;~V:xUBM$ _ 5?UGMI* 6sԳ+8u]s!1cp&Z&^0C~W-;hZU r9|ËbJ"|bqKm|fww1pl˜6CMN2i-~0ຨd?NAЩ鍼cο[`v,NvNRũ 7ˇQ\(.UL Ԥ,s#Mes>굂R^X=ǜ+(^-gX?0 ?U!ǜd{]q88XEnp,o;.?uSFB XHSm!Aن8 e_X;s{E,%òȰ`Л_(F>AG&Pؽz;* Uk8 ^b}1E^lOZ'B*sz,YdzXYҡa rh~ ^⾣Ϸ6J*,MN>8Kc[%c_nܻxcbwk/8\%ْ2D %\VIc{J%X7{a gl·E[Rm#Es>24)"!EZE9WPGz1k7 PSnodxNQraAlP T,͢&;vv23)[Hi^v]tl}R--CV>W&^;L!e9?G*z]eJϥdx@c[ھӲ!ξ+cN;H=d=$\ҏ=_<.Tۿ-M+#3Lycη%u1dd] Y2YK!0@*ܧkM1篓~J[z[YKOc;$ 2 ߜvwh%9,%[H.á1wCJcŁs2 8*vT(2"i\rp!Jܰ R 9w~2 ԣ2d,?w=r j'JvߍIl7wH7Eԫ&'Z2' jL1.j &]{?E s}C^-jW˧vɈO;eI"%9'DetBBtu;#+ˌs&|Sp8N8S{֣ljz{T[=$/[+"ivtѱG1*a!`DoVx9hOۮ":wltwQ9^OF^wǜUx+J RbdO}75UU9c}@5ѻHvkR؏I!a.߮PBN 5+׻RUL&/%x7o:OLj%9:#t0s;s90 ԼL(B04(K~Z'7 :dj*zFMeI;fYm|9P^QxJ$HyDQO]qq^$,ſfء 0{j%_ oU؟=oX##+ZL;={{ڏN]WdrC~w "7ҨXM=oXpU .-625XQ N 2L 2`euJtTCݔ.]퍧^ggڕ7RZ?~o[Ʀi|t+3z+$Qo2 'דY82u PThߊ9@( nPP/CM\R:&O^sםoիCmv:Q۲]jwV0 ^p1?q8Z _H7Lڿf;PI!şNM  u6-bߴ7-KzkZiޛo0ze֠ek26kPǞ,55?3fKMFw#jw#jB3~(_`c6U@+׿4rSî||o5a7uBuB ouBuB uN*r<I}>sbcYƚ(ȍCt:{CݻMݻ9)~N9s@kuΧe?i]l(_!fsn~j2 ԥ%a SQ?]r>3 j$N8+vʋVZTCA[[0+ǜHc>hķv@:$D[:)}0Ȱu`˃E/WB QX}^Dtxuؿ@OK?.z4{r/g_i' w,5iIC:IKsaL>d(O?:fa[ 7j=ȫ' 4W%אCc/eN>}%Voh s`eT g'Oc'Ab*J8| O:m⺖U}Y[خ؆V@ M(]H"B0f貕р?[0|}e G~>zO0gO8OYs>@ Lp.1![=oǜߙ4rZߑx)rm<ѤkTa˿+!y.LB*w }_^6 y|>JܦŠe|@h bVo z!%CԷ1D=:>F͌vz I Ŝowjl~} ݯO)X.)i-:5v|,uU'Zɿߥμӄ`@4Zix|ւg*ET J1*ŲLdoAWث'6󉽚bs'VָqDW=^oC , Y̽YAq^P\ri\I(*s`2 :F^Af9(~lD >i\:#ي 6t|qF':0%tcP"yZc| 4=˜ƝeJV[dVZ`>CJ^hϦqS4u`L1ߗ1>>fd`1NٵCJ~MնYwU9p%+/pjb.x8 )cmA)oLEK%$q,ʴ2(xeƣ|P[oK0<1ux.M}? bW*FYn.\zG QB/W;_J6L&N!:`מjϲjC+vvTJGQ0~+ RZ)A͟QWK KK%w~bR+{JV/Ŝv24Y&g"49 }>QR|q$;x^:((/-\Pӝ2Ep~;|;pɝfWř[A ͮvfsߕ7mkږⱶ;6ehQ?ETֈD4~ *`ZQjW&BSPډcJwP7|)8z$ɡ40OxkqF&'({I}M_rTKK^O|UuT'{-ǯwX xP(5{RXU'aBX:ǶedSKF:%#?לA3zTz eic׵#dė.]|Ȏ9}q wh3B>@1,5XW_jyF1cz]L}SC"gOܓ" ks5H" +u>?U.f{Gudݳuih v4k9ѺFMo55qv^Ek5q .nz"DiջypzzxaȚٶg0/#;!Ŝv~ ۱v~J te;ǜvlt#|Σ˹NIଘ>C/yHKOCC{/_99w7m*z8퀺ډl"U#b Q&%KMIǜkf}ܜuk'#}\Y&)W+y߆R%$tW$tW)|+xZ]_: ??/8z|<ʍ480-dݫZl!75dsBw,ODʞ[sPrk?7GcgLW;$7z ڱ?Vq:u `9_ֿVO){?|: {h25Ra@6͈񊛮pl]]qG9PRVRnͫf| I|qٻq.G:|:Q~DsU=QAzɝzRg}c vI,ǜS6\rh^X#Ћ1jBGI_Otq9sx #a9G-k1߁" GQ'd1.t/σٌ CHʌ{0 ׿ݽ-pݠT^[c"ֽ2uo̪cǜ -*ETBAp@"*7ѿ}Z*÷TuTRgK=%VkDs#Ngu),8/&tyqܩ?)Or|8F E"[OXv9ߠlp tp ֻ^GmA ŀʫ  ɂz %Ɵ `Um'(|} @5Fo58j@,*T1짻ُlhj#VyiB{)襸Y /߮O|>9J>Yl"2M䓥pǜrӎpxA.f?Y93ajI?}rOcο;PH!5LL??tm1`ʘ4hX Y[ȐueLC֕ Y>\جi?D1Y:yB1.9*G]~u>սW =yD{\$/ڄ~O&$'|&٣ k*x(xn zǜo+J8w! @)U Z!Fm]8d]lmoȺ(ю9f=:/AAt^^!%1K$y-$)AAwƏWk\ȊȒ.,;3Oc>cοYv:- l#hD7ڐgZ_O>PF:%5jLBXJCJ;8e b~ s>vU% y2w5\ܡ&0+1[]IsG5FHsG’9=vtkѭ[[NuI,\b_C=>v||ZuÉ C]>N;4v e?ic:Pu9#Qs8vwԝ3j=< X+~spϋ!a%9QKA'̎9fy{/I==*@ZUBȌ>j]} ǜ'V33`u?[ Xt87]=#gk|m՟JV~D.(B-9 B  ޥbjӕ*<]):[Y֤7j85j_;:M? }gQ? LUy :y57f-ؤ=ŜӌK^Fdzo2۳fSxpMbǺZyVޥ*9W 0R1 X" 3cxη8T0SĮ ET~q&^k$-\4I:5p8ZUU0]9|/xk4wpPdv2|9(G﨨[<„eIGUKZWk S`-~ҏA?^e:Mj󭦙8Z7IO?9(5r@nQ#Z{Ɯoǂ lQ;>7GcjSZ,YF'@z݉9A-4ѝ|8?/DFľ߼0>*8D,T(Eu(|G'+=!%Q chη¹VzFcηTզ|9 /dWڜ%u?9BOSkOm6Ӭ4cj?S,dÑ.T3mBBMͳo1>߃ehD 9FC4] "cT7pEp%s7[e@`^~3匧ϊw_`_f/,PN}НV? dE۳mO#i+i+~HfjX,2[sϐ`XbH'߫-Vژ4A$SV`8FBÏR5D_YϿC.$mo\WlG1݈3R do`$~rhLAk'khh ){!`@꘣_0{`<Î93-}g\Z*[PCɜM vx1@R54rRjGkXմvqhT>AVE9bo;][~uLkak`.T1 F:8|8+r7zbŽK gF]hkKPV{ ׀;R{uzmfs>hFk5J1 ';xWޱ(|@?D t 9agOW5=3L2ӫq13;pznNavǪUkG+c=JVG~.Wwk8&$xRUHϊ/ЅKgfj$Gв-36@jtU2se< [Pb 0*?*\lRK'C~WAWhqC;wz:FW7; `kYooO`>UL {vpбB/K\}? 5@lB!+cCV0U8(3{mS):C;|I&OT O|^Y c\h} 0OXX+5FڨX{*"w쮯",c 6su&P R1.Բ}\>^CWP7IdڋWtv89Aou'1غBaο'2XBx*t 鑁߸3uXW6U{O'a[BE۝R!%[ r3Y7D^81q]hkhǜo*~]Y !s~ʎ9{;‚AXҫ Íd-c'YMyM)D0,]Ь@cOetEHzP7Uv5*G] Θy o ,mxkJ^ɰFն`N9Sl+`܃x$9~@L2ά oD6*6Wt`ϋz檘MsYHE^a`Ta`,H5* Uȑc=BxR x ėw:d-O0jH8 =}h>@@4bο)Ib~j.KS<+vvhϑC;coWӧx^U= f+['1rur ȳÞIE 1.,҂Ӵ7K hC1 V/⮽8~m7Qk'y|khv]CRְ ͡zr+4 Awt[ͼX=Sc12 6;?8+H4K塊hiexXxVo< Z ׵ c=XqŜ*9AMC1W3ڭXV:vh tdz e`4` :LVoLȌ;w IhBym7aYB*s>U巹C=> )Y=Z%F'btR/9`l6o^m0ߴ!%ghλF%Jg9foLYE~!ֵ!,(-~f& >R}ZVLjI2Yvw֞A1~s8W-I;|\AP:OcX:]~qʎ9~qvӑ^{<m FՇ!ɥ6]"L h;ifp>Pzyޚ6&<_p1u'}J0C_kDOvusJ|6kžW| {~n/?p8}5 ܮԯXA!%!t ݟZ'2vXSevjKcql:+s=J8U|8))G7i8 F >x]"-܆siN+pW׭+ms5:(8~zҽtwNgbj: ۍ%JPH4╵G>!ޮxB4NEד!Ġ];9@;ݴRGQ1x!T B ?T|C:PáZX@֙T b"܎9aw•V&k15(?Չުv+|}fzN5` ;ݐfCIb20b~k*:D[洱ceU+ǜs[@خdl35}[g[,o%>?djbpK1%L no-poIh ޡ^i޷ay4# m@.+a./},2t6ٖzyo\c_ϰ'z{Bϋc7d0 ԫt|L.7qr$5 rw˄#*jA, ~Du]S^TZObk2C9qc(Sc(WP' eѥ(^e4 bDZE *K(g0s>K{b99t*sM).*ŕaeR^RUph2?;qh(^h4oR5FRs&Ka79ONEF4H*co!H= A:|=xp20 $'LƑ;(@B !`B3Xa~-'BYj_[:οyUNm\h(2s]\9;!Yw U X3~]0zT*-`B|6H%k'UAs-@!RAsu&\X^/hkui:W`PL36LfLާm+6MP$|𬴋wƮ0JD\DvkxF?qFu?c5<UrNƥ;WܵSg8v\N9EKg﹆Y?*N?53bd k}`X:77QE,:^1[3[`$Ehfk[ujLŠ9XϘ1y-Uo%Ϛq1262iT;^`Ө4>p I?G)AjIZKU)vC9\6\t_BMψ0byb<#Nhߍ9+Fєկd&>;wߌe]> U >RmI~<_#ke(K+*:zwBJY+zk>ʨhsOb& =; ް/:DZ3V؇dTb7X |f"(K5 D|$*up p;Dg2N8e u[@a. w#[aw-􂇦{]>w`vJ=J^E0廌qtf1ߏjlFYnK7x,0e_G-)&8Ahڣ:M=F]c"M&@Mʑ 6ݨCӆRfh DC~Krp 4Ud=&H[U%sckrҷnv26—u2;H;wchiyʾ#8i? Wοp gG_,W|trv;]KH-R 9f}Up*!=>ÖO.yBN{PehpZ6KTi L8FTl2L;0~|;5,x=%?ҜyT<2<2ͣNk&5U\eWEHgV$I>" R]YbUX3ŽF)Ӗ'`SBG&iH9^x.sbi+ ] <~AKS_qgj @~q12փU3P2r_9üTaƖ!g2`nhL>H]86#FF5=Uuk!BdW&cǜoTU4uKi {1ۙ)B|fU,y|'%߆U&߳0?lPR[ cI n;vA;\mbIi[lxƼ ǭN=jkFƞSXepv|op0]Ԅ8^Лev~nR6bT_ b.b]_Fl꾌&}ǜSf;3ʼTal.vI]Ϩa aϨavڗ) ~9VEx\OO ftWܱtWܡ뮟T@r(∎zBIic}MBFͤb.zF#GE5'V~CAa9B=IJYѪҌ@i6pd9wI$\[r+fqpNc0weLx3jv?;Œaz}iasb[t2F%5EGfe1~tm"_Ve$ bJƌcm $e ~=66ϷHW#߬J1烷U GRٚ1cOZ)>+!'XVcHEQ• y.,?3|Lڦ\>U_#%;E'V&x:kaUxD7Z3ڮlGYBB3i qE%UA!%c8F;K IȎCBvKȾ}bࠁ䡁 &\mDi:r?֮$qWS8w *rC-R d5 R@ U۫wc;H7e?wϿcvs;"1f M$8. z68PTQR%ƶފAv.`ѨыǪ2; VֽaLr+>[--)/~iZD@n+w]un/׹^s׹LG&Ov*ؖ$Ze#q>nG:ab\J|Y<ܵYMěp:8[đQnH2kHl!FuF ҮF5F\aq`=V V04юRXKw-.\4TuT>TcɎALAƯEy08lrf uul.-.r]g 9yd??`Ҿ..g Y"dڗZM%n&'ol>8s%РH+ڂRKfg|޹Pw6Qc%h$ B Jk4Te5Q^w{sLۧϿ.ko{\jq mZTU_z( !2fq:4|>46y;xgJǛd\t5JOVjܑWmԀq3#OȪ)O5G|vaE5^[>Vuf=lt|Q{{1"|jTS8N3x>*׊OdG t[kc5X*nbQ'c>iU['IIVu"Ew𤮢+v*W UmFAa.&;w|zN B'& 2?v'ӡ-PvK#0PojU6lʆ ]h+?K8lx%@3M7I@wӅ0fJq@3%, h9_`~o\`~.HuYPX_bUЕ^\Y]|}> mլNS j= afO>عqϧ&|sjA#_6͈>旨0Y0w_%c'TC#RN$yݠNcZ͔,^=O}$'[P^9*,&1GSʨSvwӌ=`mM-'@@ !,A?th@#8cXI7Ml.i{l WZq(QĩH{<.k(yY;etu=h [ʧiӗ)ZOq?}>LY|fGge"itxlο,]>OsDo?83.}yK}>0O&9#dM[ 'Lf!L:hjf)pX*܂_UL~I8k3*^ xMλ][yjZ)`ݾI `-O@^eJsk(Q>&P}6⧃Ў|wdd.w3Fa^tGN/޾g-/rxC|CLܿCCChE{Qu"Vv-s ~cS̀X\f[<'T7A -< zJ!CSS*SY!; }\W< NTA+e@CR=pҹ 큣 ]gs:Ud)!&%HMJX"%H=2xdѧ@ENJ8^ׅ߾ۅN2#l̨ԧJ8ߟ- ݍF;t2=8!'#Tu( g|{N2qUժͣvR6p(_#pE` WuZyu*Q]] `Bq9ih;Nh4,$B ɬDY4l!@]IjRkqZ C-BTIcf4TсhW9'ȉ`U@N\;(]\h%SP"EZ#f:IG^ōgr~[]EA=B ϫ3&VcoYPRj֟ jv߭SI!?9D*%1ky{q=vc~v"R3 )dfb.6BL`¶Ġ⊏紏 Hf(mMmaȎ۱U+ D324y. w>lCoiOD'(7!x*!(<$qYTRot`_EƬzB wQu;/;uKG dCC>I+-:bHMju'i:mbY1'.7O@h*.R|% W+(UC|w~πjkkHinl܇PtCgF3b- sv$D\sI_qg>WQ(ՃT\*Zn.7~tmS,CvE~MC}ad>)ִE3Dsymi-3z9wNٳ&Cp7ʆ|5- wsp@'Gt?!*N,>m}g#Ee۱7Ӡ 2U>֍.~}Y-]_Jח|]j/* QQS}{G\ vNg/UM`Sp^۾m_}(gq k+3*WǾTzm>֭X#\;"m9ٴw>VKFgait5ov95RN gĩCp: WbMځoЀnrXTLڧq(m۹ Π%ηտso\G8W~t!qt*X,& Ңdh 7E:@ zdڱϿG޳/5~a]fb-X DOBBIeG:룝~^-l&AspfClGg=tO:ݍv T 8J+:|>]bEz#=fGf%S!:R킐j?p4osic^' ObvvX^KƙϿz!!om ي|1Dl@gdxYAl /5zfZi׉$ PV|qrz"*(Qu>f`TNyS]ꤣ62VYCE#BR`Hv#a'S:<# AMʆMB);tgZ>k50]ݤ4G kaR*]'o!,bu5y6 7o>sفPӰx4jHBcfx. Ƃ9qǔ~ ?We`69]e泽b%#p(w=ԛxV~Mrs'9S }YzgS͉@ 9,f944ZVL\fTܫ3Yͺ?JTVn ls?㝏U;@&mISIygbK]WϺZi+~1A+!2ę`d ڱIz8&ɫ5>nF!-s,1sGjBޱT.kPkP`4{,#&$ SC;tN=+;Ɋ{*P\1"@Qc5q,91?(Ae~qxFS&Ns8\o ,\LiRc`gpiRR!]/+}>.-QOC|Run<&hP19tF> .p~ CJ_Vp_SPU sIԂOÕ̉0g?gH$w)Xa?>s-iڥ̯wonHSQ? oo =zߺ׷^֨Oߦ2]ty3/!UǜK? TL ѣaG/_g-LIȗ,"+7 7]"5 ?TObd̐75c8r>,'A-˴3zBKجoi֨Yj!830._wD^K f$Ϸn{Og>mƿ`S]:|w*j7.E1-ߜ^g /(x~boW4Cbʺa,/'ʹ&{g- TϿģzEj|-%eç^L8 ! 8 Hp.t(Ƣr?<*}occ0{P9!C0x~UcQ.߻9tٙ*ry}H^ԕŒB[/##a;vจg}I>װa'axiLةLP }8̈́K8G9ݨCm)0@7%X<>`Rp7NC9ϳfʳ#|CFjjvOְ;lAw.(קvD,#˄]'1Vė)߷.ߊ;yguG]/@-'C%|CKf {6"V V Tņ\-bA*MqWmN`UJYG9x܂QKL]er`҅ .̻ߙy+GD|-Ge7T7Gtbw V%5/'~9yk kҨ ;7^-K %[ݽXk}YF"/ WGG*b&{i-s(,u%'= JWj]4@ߍ)L(v^Ɵ: 3CmbCt+"@~$sHn\_إ&|wG=bׁLnj9ƬF2{.õ/ӿ (D1LB >…'pPq PϟN<~οCu_M*4&lCl57;AnƝJuc&|)pך1_k+y*q̧q?~K.${]cKtp [+B;աwkj K5Pj`ߝ>[g -v;S|1/|Q~BS!~=29WĽVz_2#S{~R*ۣxK'ciD~ste.71w?>SӮ:ɝ.. TS8>"x yNmi9a⾼T'{I[uӷZU)|VWUR6[EDb.~ \Gc |dq(? &|:wst~p}\/߹] w?R,D?az0%RU.`5|NǤܛ 7~Ʊk_B2W(جY5黣zz|*V~>~|k?~O(q1NUHÑ}:=綤s] ݃d%qI.]rΗst '{lZ'3,3ƘQVjurQת? O19”)8SWi%w(FS@Laڬ>fgvēCɡ=.׶z/1&(Lj. ;Wt}We ȩhOU:|R0~4?_ UK >ԟ|xU]ҞEE*M_&Y]woew]%RIz,|2sND7OϷ)d 4dRp&n({?R T.=@~2fo )Q˩4)4&3 r)Z8ϪlUYg:\9d yޑO9x?7͇ h=̔r& #t ,2dꡈ ԓ-A3U$asR ~E v dk I"/"MVw\z: Z0I|%Dipm I+>oYF%giFٚFoM#H U_׈R[@LgBwJ8 Rm%qb|(JP-89䌲Fu>D,E.ލɈ"߱nP92`90ڦxKXOUt-WE տ4OL?)}A4UBc F5bM&Uߎ}>7ykЧepsٱ'RHҪ=Gyj]=0]#.;K DVhtA0>FxK;%]Sߖv=!Q Ȼ n AuAz;>I밢z䬕i/" \E[obtͯH_@6\\BO\1[t0+^uVtN%1F&e(" D*ҤuF EX 1xZ?J9} Ѫ]9ߵjK\7W 믩kqM3g(pH׃;OE>N;;;oPh:j$cVW yYItAI(z;P4UP.EC=/ECwkzULnC] 4 %Ÿ/ )<;\uPޡol~ãw1El祡m0S3Rb~p+9r#01[z4[U .C>*Z:+_sU:0ѢJ}b_?ꁉF.H[˷Ο= TC`T->8T!le qs]H9WP|RXR^H۩@Zw ;k#۩#BKGhNYkk?7=<=7Or,slXlBXz{4ggwxM>y}֦(;[@%,1tAq5_76a:W0,2+i>QԞaK*ls磰ږ*kƲU .$;.%jJ ޙ|&'7\Oߴ-ߴXmiAΈ͑kV ">cO^?O,/.v*HaK.;}>d8mA:)VɹG0\T|-*|ũtVF|T1 tVWѮn}1٤ Umw+&Y[K 'lL3L{S;7O޼p̿QTC=z  фh 023{r읗co{5,PҘ{jLk95`^y-ݰ|+|>i mU0jTB(hjJ3v_qSaz5ʳ8Ы1yfE7̼Ri=dM2:YƸ=d2LaSlÒqr3C?*xӥRJ9)h7|$)w]ޠߍɮR"\hHkuh->Oke}n& _WEc,,f(aעy4`--iф4RG!>1r_3,Y?weJaI*$Q2.֑ )&F:Hbf~}I5R&SH=V|?6tD9C q9jSe;Hi/َ}fC@Dtkhu"Nci eA_2FA']N`P,h^ Vo~•:1bs4S=&=, wUwpyx5ݎJgthpCi&|;ij{qxij>/ѝ(r 0-,f14ج-+JҸ;FӞgRAz{y8wtk8ڄʓ}xgmǕX;;.?b;MOу gB>%L V*(uMTgy>⅛.tƃZ]Qȿ PХP`ZO E1]sz(u Cx'Z-2\jx!QCJGP`=c>G×GNkTL`X>愔1pDsZ?.#\߻I[7pR:פunќSRuUԻ}ox/o/&GlWJqo4y&W&X ^87Bh}?y(+ɮATF7Nn"L0xp}>Ax|=/^~@Gp't> KB9Ѝ5 ~S:)!袢$dTl(rWr4jS.zL"$oL|kEM|{:D_Ӻ*U|Cf'R5*Ś[]S.\/ S/QTF-Pg.H 9Ͱ(9iZFTeSB}Tr7cH7!<c*fl~|bxʐ x( fw&8 yy|Z|Z銗멜JBj +S6I4 GuKfC7E^xw)j@# +ȿCZ>Ȟq;0bc^ V%'S[{f:U/'FĘaߡ '5Acu'+A? dQ-XƲuPh%s ] Ytv쵛kPK}t;2݁R ZiT8OgK*w*˵<^hH 7U] .wwHT9[$AaޔcEk]4w3ߨ]5nq}<ܼ`vKȞs!.#=w={(DƧ?.Jռhco_x)Vuw>Ae _t>f5(.txz竧{uHN `i0YϿ@"(t6AF4S hf.GZyPMF|M)lBy68P[4xْ9@Ϧ)Qy!_w^yzyQ;ݽr[.wo=^ڄviKA1("t@W3|\2d!%V~JL "+wWt}RrE&{uqYfe!þL sk%TC c%ްTK(i.yjJx#S٩F֐\X})(0 'g4৻|HۡK9@8^=WhaX*I&w>"(T.D}#*6;<@iѡs<cCc2^i# ܎aƤS6s-m[5aV.i13Pˣr͵+tѰU:JzR͗8xhATլG$(@X. H& xkCe(M\s;mRAjH6TImJ;E˾Q[Ek>V~YeU\*>;L<42'(U A+Mk]V?s%J9@@tX9~z9oD)>9?pCd"Oi.wT?Mie镢={T y> =\<9>xJ&'G:AR2Ywؚ4H'<7h UR~!#+4iBBg# =XFS e{ 򺨨 gh/"*z%4^ȉȃdwU_dK]Imyk#\Nq/P<4;@ep̿bɭ4/0.='5Avot':0Rnt5^ ^-Ag+ڜ1ӷh_sm5 } {N;cx1w:Oܡ[rc$!; k]A׺D ^x<'Rdyk?p,\09Y h#_B65#2+<1z%cZ٢wYoj&Fh2cƽ3}DGQy4|9䟜Spb 8[ r]5`1ԇ-م.)(Hѕ')tЎw[:9 up}XejUK׵j!jIsUF#biIw== h\q\v"ޘ"8P"XQgeşDR){Ovp;ܑCU*D^u }!(9h.w6XsGݣ}>1D< `GRcG eqСށ g2ք}>[h:,la ฿G>[ Gdv3IlA.q.w@aa㱼p5Yv|Wk<Bd3 kEχ0` _Nu Qhe5M;@\sn2?U)E>9uNB#;0f4Lv qdp7((O4I<^I;ߦ^A{Juv蒟CkB .v"PeÉ+ccD?,K#fҶb[SEP;tP>ugh΀yHiYqRH}U\hnclŐ:cxCyPe/DNQV<Wԡ]|C) HbRHsPMzg.c~x+>񝥮Ԣ,΄*V!(QyIDzy;j<½t𝦜qNgi hw)d{GϿul.@ڱW%|G}YKf.:'NM+Wէ9|;yHŻC_Ta<Td#_LhGOB;p@obQzWtTRF^n&PY|NcT6BP[˜Ne6ܬ[A%J-clSC { ,^w1rmo݈%xƁeͻۼK οDzο([Rwlޫ(<?F\'uov`LL?d=E;A?Ő#`'yzȧ3I/<,%(r޵:d4f&oCrA_wrhMEsx"4҇#Үxiߔ.BTN()/y$EP\!$bM@ $XS- ꫀ/U 3gV(dzӒKpwȾ+S, Fo%{5v}1˓?h%ˠ%.bnPbaMcT1ֵ2\^qSV 9 hJmR} Uc{f@k?L37cr4dhe~tWq72a!33i%Co < I?Mx<\넽_1͘4sQ/ӌy!˖kk;E<;>zIđ͖J-u. "9R@OSb/6/،2IHY*v:`"{g&H7Gr~ Gl*(hPM-Mv|Q,+V-dGWHudTe@f:ޱϿg-%| Zwk](yM0J@N8sp' t"(l4Cl;я0ewÛ|1f]V5\ώ]Zjpe!%:M;tҍرu/"@K:_ cYrdP\;IW;w} b{"x!L{Pbu:dZPtzTɨ:a1kcr_az mW^p  fO#i#U#\‰xOzP2җJ)^1Q ϵ^'}u3%u_`;|dȻ(bΩ]Ǝ}>?&N}9cR9b>cq5b*g,/T+׌%,x,GWHgYm; "T.UWK??J]ևMJjB`1%AЅ:H0u'9')oNox%oRqJd7{ԽGڹC㏮׉ \S,ȦUjV:\|%.-~i!K s Nĭ:G[ ]z!OR^;-滳z"SE=GL=ziԦ!=7 9Y7%km"kk]co_یJW|u>jv MIsmw|S!aId]QT4\=o~O4f40~9)Q Y{0'5L| P( [I~ul ]2* Z׻\-W@ծ%ă~L%4윣x">\ɼZzM4l &s 4|}`o7Sr e.gVƧ\xyKV1*>Zj^-|~6 :P+6JK] tg"vC@2qߎ}>?VWL͂X޹b?Q-tMx N4^Y¼ aScf`*CN[$?`\{nrSnJ|| q?|j }MBg|7=Pȩx>nդ߱7šVڄ wO=/%̩{ts(cՇ>^{i/XD9I̤ANj#ugwv{-+2xNw8;рrmлg;7Ϯ$%+LjQF˜I{*R"Mrh?e|H+S_n Ýp <_η\Pӂ2,iمo;U!{*dzUȝϺ/7|TkT)1FCUVf1Oa\3`Ux +0]؀Ur3 I-+֡euiZ@ DHSX}>aK MɁx_ܡKY b2fPę,68Hv+NRHups/xݬ;twz7@HʦDL8f7YA)G(0Q6 |Zp cDcc3 (hˀ8V45k[  wnS`G9s81w(|2j΍ ቌWNj+R(c+ ب(ne9FGoɂG_п`W]Y9#`v7 <\oԊmAp$Vl N#L ͵ZׅEs-3|Qp \OĶmrC&?ʪe[Z(R ZhR74 Cї|ѫїqiv5tn]gvs++FIX,>LsO"U} d>Ν;ypWS! JW(/Ry;yޑ8 ٸ/b$^!̿q ֘똱s(#G2=Ab ͺHN&<+Zjh";tF JCSW>kMI' ;t 4= 'i\Hlw?2PR; ;AhVb:AhYvU1h%f`Jk|k Dί;FT|WxA]r4^0@w3i.cpeudE+#i2=%Dvr**н0}F8w*WD0 x/hI]M=58 ]2KI+գQ{K}5LjJQjo>TB?Г]q,h$BK捐A@ފ}TG-~a楆?'&=}!Cm[֘O,/!)1 ZN(D^|y?5ϻb싀q)*T rMf([E flR :|"8S3J$ D>x0>Ɔ SZ؀9@rU|*|p]ңƵ+XEf H_͑\OI{oq-3E@OEOnD__X蒄׃ڳj(;tw!vR-јnzUyn( Bć*l7^{| ޣ38w~!>u3N\ME^EЇ<^v>/az~hu]U磘[y0Z [+vs!5S-}QO +N!{Zh 08G*y>ZPJ휮v'(%TQWa/C3w ]J?fӸpLv|+/zmmx{YORv9%vR8!%פӦ(Q-H|C&~Ju3Gn27"6|W}9|m!>%BRUCR'(>PI *ad}͝3G?Ο 8T=MFD-{PN6;"VR|>Z&|D\m)ǡ,stю}݄WEX!4AaWdl l >4./#c@KPO]K' %43lK C| XS ǽJPO&F+! kۡKfhX#d@%vrȧN%ץܡK&r]Z Ki`#\w7Y3`ZbJ'԰p\~}*n|RH%RV [/A/%tU](MKB_Z ԓ3 H< BmdɭƐ֧]M i߽C1ڕ6  ܔ2_l(ڍ̑6+zUf;t6lv_6&ݒsb48kG=Lðf} ]Fe r A*qј(uҊnWMr?<6ȿٟ2&uDYh>xi\tsE9,F~Њ JF7<\ˣjRUb\;sݞxeǧ2G3ew]Jw%mheg$sJefU-SiWܘOxK^2GwznH1O]:Y YZXhv,ZNN8@\ H2i1/ H-3h,sB}[g>7)Oq@'/NM|Ƞɣ!4's"WѴB|yVoFx q^V"X]xZ.IKˇIˈIJ&J 2V{IcrK}Dԏ)U# T%˗4MjGrڴY?;*|X) 4ITxfx$ Hw!7-#=6.v=V=&"7},'4ͽ1(!|#p26}1F$= ]3I8-W$x!ر5H6 -}p[]2!QEbT.ס] hm+Aj p>8a<w(+(|3@}OUσm?}^-m)hKJ Q\DJH/y;afdYt骬|BPoE)Kdg=mOAx ڱAyT͑n'B|Θ!&ejm:;+n;ՠV۫j{> I穂4.B|B"!vr(e4w'F]5uQw+uHVNoqMUox{@Q0,Xzr9^{rE0nnl!3cCp-s+e20a)ӜnpqϿ l-+^:s T/ptB(+SX`s2]jT9]o |sW]:# tiDl^oQ6T) a:a>\[l\wJ95B-+.'Ko6]֡- ?`)ڴ HC͘vj_qn d9kˉHN+Z $.khLte+~ЕݮT^Ŭ?yz(GPEZ1@7(Ԡ ڇ~vl=tM'*"z*a1w80GKVl}x*.J2 ];vp)=^y);ߎy4kg54dZUDbVE$ ʏ+ /wOYl'N~.[U#Tl4Ֆ:&Q7 V>D,n5"o>w.xWx iO0cāOLy݂d >ΙjZy|<|H"J 0A' 7nwx>ATpOT+!yf5]^`~)ds}؝0죜pU}>بS t}=W+m^mdo?HQZIh0E ]w]oBkZg v^b'$~\li+( ?G >G|==?@OnZ|tyG[GDž>:hGE+&A}>1x>wf?v⡼9%y8߀}w߽woo LJ&BdԀ#օ˰.^N;<87=@K>ؐ ,Cxnmy[ Ҷ $tuٺ CMi5bO%npZ nG绠fzyH  կc;1U`!WDI1)Xy=K̿/*lGXgF6gc<ӽVjUtη~l|?-VO7Yӝ8?Uc|gd7 ZRVpTNd܃#ؤ:'p}'uK tb?nx3$B+!Pd!HLh>P0-҆ihpϷ_/yK?=ɦTO InI<{꠵Α" "C'F2:FW G!(q "`>=f#Rk,2Nu4fmlgLnJv) 4r?ȑ cB31Icbޗ`H@C%Kwjʉ;h|465{4W1>6jͫjZ++ٔژU>VpP>kZGyi':!^aöIh9>6},Ɯ9;'1\OyhM6 +VGFb/ ls8^ ~xdPHxv!gR;G쓯'Q5ӣM+w: Z>lDžA5AyN=PԾ&G܁JZ`ʘPH9g.R>m |9^xS1n3YeGj0=E0=}=zuQ8]Ҍxk5\Vq}ݵū{r4c俣?-BBe37уp4m0l`%֦\j1,gO,lTO)U< Gdt~S`^#t߿m<88Ósj &dx<4N %DAwh 1u*ؕ, d4RMqӛCI!c S"8'(3Pi\l|dݓ$?\"k0\T +{Փ.DUNǹU}9<%(Bypuc*߿4s)A/*'}`xL?onRe\&-I9)Rc&m4eJÊG`ίS_4 +7#!˪ZH We/qH_>XoYHNJyՖ)4C0g3ϋd=3@8΄|q&.dǟw{yEmm63Di&iz Ip E,@AJ Y@z@؎:av0d/vvxˣE>`YyT1 \a@&0o3NFo]1+~VGt"tg8; ūކzh t*%paw蒗ߘ3Q".zT;4j؋8--lVG{AئRPՄLUí@=pQc\`b`5]jjͺ5]4ޚ.o82rkO+2d; ߷O'yZ=O7 i, -[;PuDz8.|fm2([3ެ˯׉DWweC_AS(@ iډwZIe%d\Vu ݯcmf㾚v?S0] rK0@0I6uL\=;¹zc2V[V[?3g(PPPOݯMk^tcN5\toYO@\nD].(eB:Ubn"Z9s烁'(zj.$e/K >;@-BGRk|v 4Cd"ϊBxp"R-9|Ԗ*8{CIQ{]My Ab!5` u.qϿAA r @[tXhnc]뻜hPܬȶ΍R zy]ȶq{ &nLArJ*4~s}z%Z 0ڡK]\0FI"'x|2{4"L{6CcL Yu\a7qzh cCA'hqs{^ 8\ (\ +>֣mӗq8vG>)T6!_I;^8{rE 2!cqn77+7$k\ٳ7=ڿO@p&JTm4'(}@^}>۝.S ŝsr9w@us5vڥЬ!ߨ [nfud%SNČm͔? Ess{rDTPl*Ř*_sb/Αb6G>[0˱,Ln+{cTSdK3(v BLq |h\`āJrP Abc/1/* ¸ Ca7V p O^K1]+8kMm"db>*.l좶pi3G< ҡ6!}{18C,Nz`\&KW~lS~;@\̀]ic9 *AƋu؞/Q7m[&hK]Zy42G"K₠w/*CKĘ7+b?藧J4#˽lH{d8ˉ:#+Њ}> t{:=#N͔RܐW-퀴.hΰ""Q MJquiϿүˁ+Jc2vҥkVerݴutFa9@aq}#'̭VϿYɺCXiF2fCg׊zo oK+E'(.7A nF5 ;#]1F=EI0\W| qwP6hixV q Fehh+&BU2v_4HJ|@җ4C$Wy=tC05(T ׈nWTbL;ϡcꄱ>r7EzDrN4d%sO؞ݗ'诂go hRgi3¿= uD҅m6jܚ:ݘGϿ;.A)*rGEGqX{v`i Z7e =}G|;(@ pGk>~Em~\Iy NJxz]nalSK+盂Mxߑ #bP ) *lVd005{ rFZiׁjxY B@R;tϗW*_g'.'xR}B{FC|=um˨k~BO8vz+@3-ăk>!(WW3IW|&)fE i'5?]v'YKD߿M1E=D",}մ㝏whPy d(u#j?~}>,'-kJH;@ TvYTCPi&^/뒩ߋ\f(;&fX}aaaڬta1ъl؛Px$^1xDC&)|7y񉚧OTFo瘗1ї d ;$E,}͛}Y9oje=ؗ`Ӄjb0F$\|O^Rp2iu-!0xjp1fA"V{ؖ;hLH<)ORH&BhXJ@<͡n)lMbrJ)l|RPB ^'O,-x(m8Ż&"|b6%C}͊DVԙ2Ί2̊2‡鱜ODOV*U35i\Qc:@CM5nĒ=hPl[`K/DŜOb<+9/2]xZPh I3}9duOJ:H8zΑog7apLHBZWb'igK DKP"X`c}ǏگjZ"Vb4kŵcO3ua5W#ߨT'RXώ]- vږ`vyŮE%8T;>ɇ.ߞҏ75`=GHdT"}ZgaWr+aG>gywvYzB?G֧F訔)Ĭ16FdN4ϗ +#ή-Ts `!)ErAȿ;A&( Ygn0W2x}4\ڡa+O*zݧ++8\ `r gDٰpW~5 Gjoj9f\C%-G5ȴZϷ5b:R\(<6 AjVtʂ}Жgr;)+>*s&G\󟟤^{ LF|4,ǰ3D *e}E~>Q҅J/Tx%O{Xb}K%eӣ\iAP')㑘"H !dt_2WȺʑC>+dZW M;s-K$9r{?L Vt PUYKKk%22" (TEG=B qm>)gSC0L)ʊ8s1Kx0y" 0c "gNeL5^f8?2'(HFb6d$ g$BPMyD=(&TS2/zq`vEñp_4moH,'Tj31yp=Ɏɱ}B0nRPOtI,rWyK3`޼-Mэ |ZES`H޴^(0n=B*\隆Y<~W <jF@:#MQ_3%ǦQTjn`n5ӆ䯓B㺿PfG8=#Z}JI"O/z"?#h a^YWR4E@;g*o4l{[2iKkTw o73Gp@AMGZǛϪ$yt6zC)Y+y L50}+ۄ<-SYlXb1PTәҡrq%G)FJe排#l38{$ABiݨ*#jq&AJC_ 'v2Gb2I&AqiQ> #:ͼn̹5ʊ*$Sh), t nzx_omr)wK"OI2DUǴ9K8P6W-Tx^Dϑ7<(LWO,>ЀF$cjE~vww:gx'/> O%"@Q,_w^_pX)!)m/L&W1^I1&f?9Z)iꋡ&~ʶ@@kξ=xx|!&Nho3ыM )o?UYA+1h6#L.nJ[9Uɼߐpa ׋\0k$YX5 8*  k#̼i%k`+&*WoPNPrfr;De%{pW+kU&=* qӲ nH?m<K 8뺩$[7PxpA DHk}I@@R#0:ʦ SOj7P%u?NLXIڈκ m!-)k%$VDVgاhl-j1[3l@^4GѰOWD6nqw-rީz>J1iLdeX)QfyQeFr$N&\yDBOXY&§zoƧ\.wb"yU+{%Y4D k ),s" NDzY.v*f我ar&&7O^鮚Š;VjV4UULE* Xn^] oTBy 叠bkhcjH4,rCvwϪkMKqQL+ I$'2IyiNQɱgdzHrj83[-T>(>ʺ} k?0σw%Ր! 6?8o}$䔤x}'%Z.G8o.ˇPOhN5ߏ T~5x4q.Pq"٦>84EL ZG " ǟ;.c-ŔLU|/UaPi>[c:/fhImnk[ ML d|NR[M2sjR_BrDxТ"³(Nz,:`8Ff]l}p9O1R@ۅcs q9+4Ht; {& V)UmoGkgV)Wt\:cE~-Jpa_ p[COFmrP ]/ h-˭-cSq%A"uaՒiNՒ W/϶N2 OXgQHf1!WѨp:nhN}p˔\49B*#"&^IPlGEjqilHTK3Հc<3 %1[@oHMyp+t{t$|*qYetKH'tCT(JKr\͟e ,cvLfm%de]'n麊B:عjUǝ1v,63'UZ +v4<&;&?eqGڣԞ8c1:ז=EPa,zs f&1C4@Yן71okH*&. Rݫf1r*w߃cΨUB >h&ψc<_aOt'Wn{ 8{Hpxl$1K%I C j&0 SJ7Krծr.[UfJw0Oϼee}|G8}lV?r^RX-[<_݆QU/@]8k {w 8p}y6e`֮Eq,ݲ皞8c<3bffgHT90?A:^a'ILw^c+Kw{Һl,;A@4y^-aE·g%NxęOJ.%/wN:Qc˃Oz1^z 8JO_ H')/kVuj n+ZS#D)}WSq흃coDA}'Z{` tk}Һ˵3@\MOb7;)pU9>Q9j5#Մ#OOSꌳC4\AOһZ>kAQyKeg| x|DFe3lz㥱cem ^[:Ѿ aMM.;֪iB6x85yP9o57 &[eX;> P׬ѧ!k҄3k@dw勰82Qor -\*j${6gu"&NDurt(6sozQcG.㕭^|1smR-O"M=α~\dj2|t)alu1v?3n;rkM ז^[SshxMu v1)2r?O厯oUISܰ_%]4#U+`ET;3DIpj)l>ӈUpwԑqR{h#s# >|x;r⿙lr>4ɦU=MyeZ GRoQy𫚇D~ՔtRS?&cX/]ӔCJsFrEx~a*j(X 8=HH=,7r# u/-bK#g\8kLIj jFpՅo^nU\*c?u$(fiLҔ1&OSccRi kϔV;6* 0TQu,CEU9[p׹L5Ob5 x}kUͽ܈rwX1ޟqJmrK%Vfd╭kLAzt..|/-(q _݂f^-,Q·Z:u3N.1$/6d苌3;ΪeJ ߉; gR#OVgǒ]Д.'84wUuvnE~Q-2CQws x}r*[nUYKnU_ђ2lj2;ΐ981bEļeZ?n/~>l9x5ki ]R4U9 2?/~W)sRA<+^Wl_27#G+^cprq9 T˦a1oxs{fo6_98?\*HcIYwFpPA-xqW;wų1~t׾=,51;"N+"Dwe4.z8!pWE6Ȁ\$Y% xT14Rv"I1^;U%3+FS3;#eOE fDӔT,+p|5gӅp;O|x|Lcp;: wjc *q|bMo<Ӌ.g0c`p_aI&hv x :W -.HvF'qB$c7ln~2]΁ES[5CY'dA8N&aYIȚ?uIJm%V& Nͧr*Cz(V]#h_@ 'nr+7 Pv~*U]nG6 ?0X`šOˌl$S #ىq9']+Zsf㛎vXbw Ta\1/?~Vcp0 pWLࢵmBqDjHqWYK ugq18o|Ȓ}p<׬} v l&Ku#FF39'tui]˂5A׬k1^}1}atQ2Uvli$U=`tpFe-l-~$ERgvuzCk- s#s܎]ۆ$=t>d//U:uX UxΨƘ*UnH3% z6-1!<[IAspm>[Ef{ Qf`)'rP&{䠮x#UGTRfH%r"Q(Fe|7"tK45& V+Xצ8U#c{b<j$D_p"1Nä4.3xQ1]yLCŅcxJC*ˤ 4 V"\Exe,4ϭ#z1zd[AAs|둧S%)*`U@)@kYB`bH˭;]|>$b +P ί*uXqw6pv5hyXFjw*$T4_}48>C4` ]:^H:3p|.yWi"aJ* a?,sˎ8+6 nb]݄ ct H]]_cɐt򡕞Xo 7C;x>t>bSCTJjI1O#5-ܭ\hp#Sun1H ]J~_D a3.LJi5Aր,SόS)ƝCVݖ6~?5q kӃ7Cg܋#ܯKu%ٸ~*)q1q[xHD{ GVj8D#-Yi>9IG$h~EL5fs.S]c'l6q սlk<ж "NJ:  8=HGcf|]wwV98&Dݒ2A( g3|_[ƓwV[BF8̳Cxb*`EQT[O{/K8jy7qNݒvRu3QUY,W%1f&a9E!9J1^;Ry)ӄ=:ku>`~IQE6n|b;fqi\vj"=[ӿR腭 wr|xM " I:5s^ppT-֝v}E 5]rn3Rr7Yv>x]t^w:p.FE^#vF9ʶKs vmc48evL ߄a.|| 3tW-a])CN9d^axm';q)ZĬR6kq Mm^! ]p(t0\{<ָ0Jk!7d:c)'_!Q/nq61EQ\XKu!R\?&kʉ YA n-Q![+ƋYOݼR&ZJ0u[^0kHak;兵d)w ,֡νMPnq 8ƿ_^Y(^*^(O@#F?x= WpnJ]oQ(ڣݹDkv'-j^16}z ۍEr%͐4Aan]$CMhh:T>#_)ڍ4Bpp׆ -QΐoYp+ *"†m܅֍ x[58™q$?U͹&M3뎯y?IjI)VX R9gjӛԸ)v~(45~fL7cR7 bn] %= 8c!܈Qɱ8;y#E-p=81LQB YaL]8Ƌƾ\Ŧ,.E!ja6I}@@N;5󂯭$gK;.ϼ&-\C.X(l3cNMZIf8cַ4Ԋ8;ItuRv$[=ie:L-}amKO w+uϲ,Jgumgco$'2*Rq+YDMa5-b:/"/˟)C;'fXIuT`~shYK/%ɳcfA:eξ|Ū5_K}>aE +;ycoi PuGdysr1^ӴF)~6_8?Sګ$;k:}iP?691$'WYmrbZ͠ pivXirpPv[eac(賒r4OYSxm:?Âk8pQ$xҺUҗXk"; J2?i1ha[1^XR}kL}W17\^$ohѿFz%v~re|b5HeY_@O|eJ_7m6uDimJ$zv2l+\9" dbOr70[tzfHP}ӴGJ `Q^d+yr%<~j[TbPݔ,j_d-R$ƅ*v3 *k$bv|)Զ/~+ԺG-)ؖCbޞ-hñ']ӆn(Sph䴰":;3-\r2!,%+h-7꼲. lܾ5VXBX Zt$'8SZOGCć5ߤxYI,/AjЉWǽm8#bxkOR`VE1EfqY:b(I·xMӒ.p<.Ji>ޔ x=G@E@:S.|ڛ:cu*7/1~1QppWU! dV/g^;1=MQme|?u܋׷r9>Myu`{#lA28ʃD/͢nZ3 =]?p?րı́;M9.%=ˉxjDފrq^#5ΕkS&ux4/|==ʒR-%:Y=[u[7ּ)<{`aCOp@B-aǺ+aK,g5(5ɶeMd+zLbh΍<%/zPnYчG,%ҁncf˷`sx}ҫЍ:#ۤb_^gݫؕ dkmH1^O-ZR8ͧ4E://T^m6Fsw0s 3 \eȜ.:scrG/uO!DxJowz; KadA;э7<;N[ԇ>G1đnXzVib٬a+o62á^v!#.IU˒qFzXb1%43c݁fY'bc>uJ:*}"3-6d ߦ QjB R&QM w8$B[J':< +CKu8۵e3L(M/ʑ[<]GaPg4,QcCv*g:dsui ~4g NI[b^\"Ls(dI<=w/Uېxjr7QWfVC4qo~q~OPO6ϝ]R, <ȱ4*cS^󋣙UWyT3EtܿMlh!My-rj8{mJoXI(~13i Ej',UѾ&tJD{i/1Rs-,j4KFQ#[W%4kj +7|?kkl- l-az{U`  SFSGᗬ!c;(&0q g3O<|wʚy?UwS{8R~2C?>3k"M"M {ShHa؆ 79X ExDEc-:Qd(.ZSh.&[SWܦϪI<#p=ƻ5s[3n͜n,cđ)7 4"0Kf}iŒ/MAo]Li+<^M6kL4#&aRF[8P1d"$4 ej&3c>jvM"WfS1acqs䟊gK$$I͚7qJ[4 0'+m@2=E$?* uZÞN FXg!Xr(p1x-e.&uKNg+8zW5pxQKG $:Q x%紣-(-)mJss~tKs6wJKA'6mO2̾kː"|)e_Qs:+_TpY` #/W~irrSՇuOx_p%Y+3u\8_8ƿ 2̏ъ[Z5j: G4xדS!RٝMD}Plh\Y><'U?>j}JX߷;_X.>8?V{:>-drB^|to2o p$J&77c<TX2l:ljK-$̯{4 9d@ 5˱~՛oR[YƟ|j ӎƅ3uNk:OrD ^8ƿ/a_)V1h^4O1NDC_ ;ɾ+zeW,e< q D"G1㤆_舯_?|}-?^_`:FW4o!]FU_Ͽ?叿2kC׿ \"ob7 endstream endobj 206 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 211 0 obj << /Length 793 /Filter /FlateDecode >> stream xڝVmo0_!$Rx; hH6i ODz] 1!;wsj %=:HXa<1SVɴ]DQXM);Y}7:OeEo'SS$JZ*\ ڊ*,N*@,d3>pxZs^]W@GnƙQ2=r+ۃ,|t2u<,,dZ SYO6Dt%B7hYsAsgkD`'uKb,ᴬzbSKFz3j Vk y1 mʁ*ܐ0Ф& \%̸iQU[3w*{b^{,w.Ϝ/.xw!>b:*YFݼ%yý LQs4 N9QF$/ {v(K~kjfn"x>ʸsGI70 :_7i$ީSsO D#g |Yվn Ϳ n Id2,0L:WV7ne.>)3R׿Awe<1Nڡr-Cw&-݀!JaK԰mAnqh=5a5t+:lh]w.2U墰%yJ=^ۮ( -9]aTAGFIB{K(%Duzfߍ̏M> /XObject << /Im0 215 0 R /Im1 216 0 R /Im2 217 0 R /Im3 218 0 R /Im4 219 0 R /Im5 220 0 R /Im6 221 0 R /Im7 222 0 R >>/ExtGState << >>/ColorSpace << /sRGB 223 0 R >>>> /Length 996 /Filter /FlateDecode >> stream xXMO1ﯘ#9`^*R+Dq ڿߙ턴͒E(y=voW9(+<L`FMh)|Eu:N>ͺBJ u}N>hq|*ԩ`e.Ζ#P~@m1i GU6]? o+Wei Zzdobr+j㼐?ryӇiepK$E{ivE"mLd𘙑ss&>,ik.yCX%uL`]DvSyê}+EԔtQdqQ-8.Y=BaPN:x%H&sҖz&9âaNaю) â=mngX=g$B%Cź1T;C+j %h4#.rWr c<=ΞWדE_iIonY|K2]vtͯ#?;欔 endstream endobj 215 0 obj << /Type /XObject /Subtype /Image /Width 200 /Height 200 /ColorSpace 223 0 R /BitsPerComponent 8 /Length 45240 /Filter /FlateDecode >> stream x p^u$$NljmFUeCsaF[˔"J8^\4 3Zar9\. \ C5vNb{̝w.^~<9[^y˗FՑۿ>r럍O;8c_yGm޴mGF'ÿo~w|?`oµ/^WF!ZF^:md?_l'|}_G>G?ӿ=ٮ}{۶}_??\k}ד[߹g!W<5q텑trΰ3De~8OWxL 3t#ó3H2Jw\3 6okxFnj6`ox/~W`ibUg33G| sdnf*|_z?082zz% rɇS|Wr?A;6?# 9-C7@ չ v#],nAO~x>o.p]*&g?aßG>?Y.#/-kN?`tH>ytpG?0`ϻwxK $̎=@+r*}?*\s-v@fx@GG2쳱ÔM1laq<[d0TH'qȶّ>߷ ?M_mbMXx0(6v \Qnu9J-f{PdqbC#T aQ΀O9s:1`ɗ E~~f#5?xq6wx{ <3^ o56<2h!ؔ!L+b U 2$8C=t ;xKyXr'6>;KE%89F )UGW䓱b^&AŴ*Oz7&5؀ D66yBl!ʎ4, j RtMX T8!3oOQT"%?|/ظ=O&r$S1 <|ppu99^z~GS{R\bp@1K#3r8 o\`|ɑ$fJI(`D=XO?W?)Q &}6ZV ¿8aV; n0l< '3g[r'r ^c&C2 Ȝ_<;cr-4rf?@ !Qy}L%?AqB >q/#V<O(L绢DL}ⷚ{G6l@4q`f[3pf_R Jhq6>D~C`qkSYwj^ ptXFORh15L";Zwڮ1hxIM͕q冊T-*$Èes4qiԐWlL¿ ?gT@aJ33,+E 0 7ɫqf|p'GjK y <nw'Jr*ŭq+0PdLYGL=b6}Opi_/}{ %SCiV!pBP8l@bbQ/7*5c}D2bkqR(ĵP<dJc)68c$ 2Z EA fܾTlP LCU+h(qBpc^-)N+6-?x< xa*] m9^4Mh$ ZD8p& ǷOʅxGKdH9; աa ;r!m)aW%6R H0XhH]?BZϤ3s>ۑ`A(K h! / ݷ4="34^1" p:Q'K-yfCBXi#ǴrkΣL#+Ɩ_*u8,")@hqL.# /:&TC h? k%pu%t800,h]F#zNy9?IxnGCqc|<Kh / T5l0a37a~#@՗wigEVW`T`F +@h-oT\|3C.Jr-lɗmmz㌿1T]22@P_%8lۭӷܶցa!K FRjM5俼((- ;<,<ܪu#S"`OƁ} q9;*8_~SMX  x%8g3@(W~aK]6'af#hI~H Ra%0P Pp@ӡ*Զa8?ɻ]|>x9d!}Abl7}|CG0S*A$fš2T cS wEp"!ek`j %ç0Oa*T!h8i0 "ƀ*2UzlB J(\ÊP\:2|k|P1L0\TgT⪙fKMVPB,DrymN"ph=CL=N}ǘxerf#!7~F C{ < +Ѐ#8/y'CyOIm%-d.궙pj|"&޽>_?oD x5 xxlyR 06A!!޽y4(jN{ ၖ.Jm9fb:edh@UF]U?p]P5?D`rΙ U;J;FJPPisvԙx.DP+? ǣMԞT4.jw0\͟{cInl>fLvhu P|r$֓'†7ì5-DYf_B !^8^d6 0 O=،w!9T3S_[;!ys'ui/=~80'z}BZWoXR/8p~NGrA5 @ ysj#:Z:[b35þ#sZƃ0RzmpR&2r&w)b i92lBdCvS!,ئ-O P#0ګlm@^ G&2eBGi.ֿkMacƌGn[L( 1 P/F}E§l?Cpͪppm/o:'C*]6Z C}( xhYgajJO#ۂP%Zk&2/E 5<`}͠-hbBEh҆`جո&~$@51tq~4ʎ9kn8X'҉[ H?Tn-eS~.\o_d7"UDkv6J8; 3C,4¡-יRzym0;q N+ӫ&»Sa Y'0(XD3&/G6:>8OcϏF3,=H#^"Ɉ'.eIg=-7CJA7Mi0@b1h^:f[4 _9!zc\@I/5Mф,*%T< `n sO3N@r:"}!\3բɈ3r Zž/ B5LnJA a5r&B3F܂7fW^%fh  Þ\,†"ONJ/7N~Nr7ڟX2}  1@ $h *8;u8 vus+O y3Ŏ"Ncp٦2RP)>SX)Hא Q+ p "5{h^lKx=Zں:8yjrF'Tyeh]Vc7bVwtb `T~* S(&M?iѤpU]Jfפp(bd ͈!>U) p RmɐA4U hIsFLxb:Iga;4fz<жi4@Yd|sާqޢ^\O- -6ɩ$U~;hER{č- ^+t}=Os-xvK B_^[O#̇ZyJj;ү] ɜ {NdfSA1ŰTss!T'N\]d1 #p5REd^xR{n#QeFS^UlYw:;|5#8y)5þQ4 xFw/Z0a#v8!Oxd|FQClTaMأflRHL 7>&-E!9VdyX#Zz ځ dH9g(T mQб 3/9y:&R G-b߄\7Vu=lFKfeT1D;9bLAh"Khf:+6S%3dH -d ׺Č+ajޠv2C )ufO~e3l]^ǐYd `b0Tf=?A] #*]3HVRn29ŬƆ@z!} 4005hmswErD] ; "\$NҘ)Ch1AeBGmݨI9-jbV6VK pVjhV&;1nV\F] b)L351ĎF8y2&S4FeR|] GlHfS/аK!|KNase 0;|oGHC=PFkcHӗ EqnUOLH󧉱nHjRB pmmOLb,+}_㵔 e)59JףATJ+qRIދaxv`YIi#? -P4NGQF 0LDSR[>Yq&es*aiE(*zk񍰦@qQfTlV,_ aks!*rYՕ|dLW̲YH= L5'G:L3knJzh'ך *;݇ 6d LBG}+BY[WҝwHy·@S$ k8q~("c~lT^T[&THUM/DCI{㜺fHcPY6_RISEYɮ8 gZՌ] rʫDC_##ϧ.[E`BHiA}!i@Qj5+[[="BsIPUhWe\ȷlt֠g)&2y%Y.R;Lx!gA9f$LZ0e hjLWNNak K9<6}@ g-<ZXc/O+XCTF'UT]iz}"Hfyp3FiRk}#ylde-Yw#JTpb'=66:9 3p-fIF^|bW8xdW˜&Pw`PqToޙ ON>1!beFJy$C|<^U6JTD݆xoFn *ϳA@Җ[>W2)iѤ/,SoA%uYe^ bCSs;h7mn({~#~ίZ5LJ!5SQ#X%U'gE?# GTRڙ72bԏKu"J>TUx"NZ#K3㪕&-"6 qA !$܈hQJݏ3jt/ڡKӨ6%r+ȩzʃ@8E&adfOխ pWxsZ?#b>?[(cnj_b(Ȏ8&EF8U]P.5]N򌫙1e,dU:•d8 !Q1 ^\U^D]g:X ϮBU1b)CZ3cvf" I52kԾ|٦U7vCicv!ʹzJ0VϕDHjQJJ9J'Ew5nz/Vt䚿VFl `Z-A4[k(Pu?)^K3⿦5\Dד4ͪ@TW~ӱ~0QP/_Is`OqEc& 2T^7 &Z=M: う:I5c3k;6 9 $6T%ŁAECsW wE#Q46r2d԰i"qEp]2i%A6$UJ)MԈxj@l "tTEQcjƋla^ITnҮ.p3v&vElHj怰5Anl{'T "(Y{n⪤ٵbh $/ŸR\2 fŵ80 <1 'đ;J'ذҌ!>㐀=Z7O4`yi_y){ָ2\)mPd).6}uGZV Ng󊩬 *hlT I3Гʴ#XsD=4bTfj1laEO}$ 'Ukד,BjoP(J ) B!g\to倓E#kCEʂq'A;+4Mi`|yWВXQX3= a tRIGWəP$5ˆ1\nVOcI.7ä''p-Nn`kN3Hծ!hh蓩CV]bV "  ,}\lf+X&f$"}I\Y2+΍}i[ kdNz.R0{#%mC[Oh\ Z&Zo!:f;6p(UQ+\\ n93{\ڍJN.:pDqG4R5w|UE)#lNMyq^r tӚtھOg|{n C{ {>=% ssȅ)%%k [ ߹G3gFatZ/[ b-2-mz1`TT;"<Lj0Xɟ 2sեi8VoqqK8V~+w;9Flq%Cb>!J rUջSr՛؅VI8g7d2t%e8֊26L_7T`1`@oNhȁ"{. _VxSjkk KhܹY'OͨAC`}u 4<eX*>`Rɕ~择/U\viۣ#saއ/m}Ư83<3^HĒFMc:XTJC NMgICXX-TIX3nq ph15 B<|1|#S̪) 7)G*ɦF"V@Z %tϗw >g(ew>~نT{s !0y*PX*H]b\۠Zm3֕9"z=l-F1M /=>[Jfo3< c &ـv5Ye w/g2t#5Zm"_֔1k"*leh*k"3[==2sLۯu6;h=k[#"5ԃ DKD! 0@߳qOP ZEޮk;|d`NQm-SV}V#pRF򯬔*eCIK68*. h,*0WKXaB?M=X1}CIneb6TYJQh]l4H"u_S*"Z5$JB`\4bTH0ev- %&Q9`mr{wvdB!@O(;>Q2PK;P(Ad?QC37PlWX0udQӧ*i {18mJpRY|1,רP)X%_k60wKlYQQrmm #UВS5բhYyfFJי2**[9Yjl o0w+Q,mJ?#L,+.#*`ѨgJtf)3t'Ӵ>F OT|0hJ3Nda칌W37KEWJ'7 SšVXڠl[K 'G( 6q5t_ B"JF[ S(LZNԘ"iT]<,#`1¾gb h)([6ciF Wf(g07n01ݬ~1;"g34䇪-f9Y5K.Z%-KMp"b@iqU%?NXe-s_R]RE43\ P9 e& c3YkPB(q]}ڕN1F@ah[mk#vǢf59AbWݶҫf@nu1F|l#s2C|J{YjVթ΅\*"-r+*ɄF_AQ&pL)\tʢs1/vv&hY!b]^$65MQYpp~,vv5?nEفM7&poڣƤ|څW'AcYMjlkb &P83\;%XJFʑDb)9#L)9֔zsrfEZoVw ]+KbK^U g7 e)AWS>uRKbAVg15}2Bu8.8"X# VY?6W6D!jck7Dَ='pcMFD*0{ sQ[0`17@tLT2#D3&ŔW7v3' 0b^.u^.39Lhy23Ѹ^L&ݩ-֭W[p5..ĪHŬFw5|h{[i"4rC~wmʇw*;!\Rhnӎ=-oo0xe\#%VAA W{=+fR5+Zƹؙ*_Jb@`Cdew?X5٪KVl .\kv6Wʐ!p5YQ!N2RA5fTgobljC'"-nWT1kO.=* !j谯f\nuד uO<6pDCakICKb[K܀u"l5u{J0%nr+eЗewX˹!`]kԤ}.W@Yrư1b )^nܹY$>_jQ:܅y^HwƂvCN܄Ҁ:m᧓0Q ^TRb2`*m* (S7rָS6ٳa#(oJ'R(ЁnrO<&@;kv_̃sV`dnVe.$T_2{"rwJ։b֔­-j\[ nS{  "-j ҘrN}I Ӯ ,~y9RFs[J 6 yFbɘ%=TuIQK$վCgMuOȯk(T +rV:x Px(_ }aوjkD)fp-ڦ !6znI |V:B.OVm+ W}&8չj)hJ+ic՘V<5Zjg%kig^Oڳ=㣿Ц>ȋW2%R i_I;NC8FΩ,V{*6B6j/lBmY @SYga:|)Qؚ g{<åkvVi߭ =lD*|Φ:6.e.┖%'kQ |lJp].4|5$/fC9)&ׄo 7PIXGZ†\`#fXj ס?7S_v̫-2KCt#:PƧ1=[ "e^O>*~K-5UȪlZX,CAU#J+x}mN lgn5L 1k,C5 C_hoSDW/eU1f4ouT-VN$DN|Ҿ Ukq)(Mk(ci[l(՘ i0uksiC&tֺv<Ԭݢç%zu c/EU=mK.هhɲ >Ԓ;ŽXָPe`pu#y=_͎&ӵptP R,9sKۍ* p6K_+-P'UkQU"PpQ쑿" ~ZH/ZŮ'ٟ*foq,Er8Ӻ*3(QuGt5$FF 8ԙNjǕVdTUKj.>\ZMK{uu/ԚYЬ֧u͉H^*:mNZϕ s\AeQ#F61lU O-)pywkF(RfxGUhIte.n9}O< T @}AR:sVm?Nr:~c=؇*3(,/ꙛdZ^LE | WC+XBETM^Ȼař k]e3V.:Wcn7dМ63پO 5x^ԍ s+ Wְ*)ޥ23W)J5)aLs+0XY[cAb YHs[7\nJzv4^P:R1<#T'7|,T.Ϩ[]ތz *aQr\P@^hqHsG*P{z<+.eeLO3i c]_?ؠ-RCK>[y/E&>ć^Ӻ?:<)v[˷Pޥf9˨֭5&L'ԌղOt>r%[N$]͋菬TnSP]96VI"kjhkt)Vx \mU<Ҫdx-(բ -gRJ҄.y&dME$"yU?n2 "f5 ]>CMeY2$ȡuG\.6]߯G9ZFi)˫4CaAuA! XDXwj4PM{=ORVj)٩[M`BӇBkSt6t Uի֪J.QyVTTKu[M[Po4Zh(uG*RLX3x7͉T\a_US8/%ZM%k@hrjfc?L襝̕YfZd]ऩ `l<".jS 8.aa7G)M*+3V덼zQsi6^4UvqS\Me:F+P=+iuJRX0TfXx?%>=WQĭܮj6-}-SZkcVLh %(j`*֭v$.V?TNҙ~v/ Sh4/ø6䑏ޏ n@xXRg6Z@gpKV/ k8.~ݿ~GJ GYZSM0"¡2"z2׭)~|媽;{),T֙ր,$Ab0EM6~bhKh*c C`eіJv {,_"0eL|Iĕ%1zd36Ǜmvpl(O?c#bW_En\~K;Uu`|7%k~W3t]J[!g(StZ[C^F1p͆P|f.FqQEnf;K'ѝKaˉ0;BED NU,3(z+&(ca6ʯ5G41e9C;9`˜o34n &9_ 6,Y\j}~k(Um1?kowi a-d&\m%r~nsEWK V E*{D*f"tYuAO r4l6D]DAݤ'Es7=Y1j&FTkOWr cʪcTV%8?34cc(A 's:}I߲%+p+޾iL.U?鱪knӁk٬ԖiHb+tj#9(p͵{M;*MU*լWO?]š)%ȝ͜u9ݭsd^[D[L,tx =IS2-Ch$O!)M8ʫ+ȆΆ QH&,0YeyWll5-1ʞTjC~%w)U/eUQZd)N^+LjMy28l}<9+ ,]rJd~Jz(<88,M=:k );Siuӑ՝ֳ+'J=s*vQAaaε\U f! @J m~9=r +׬qs6L:3 v- w) "ʑt_O;p5qiG9og(*܃/lQn=[̢䫾gM[fYSs6%%|2[p۪,R* /g]R^5gBٸlDM6lMl@:юB\5eN-ūx)dU}b+'n].1,k׼C@ F{ĦI7lwt_ =?KW kR/ł)V=4 UդHo [/td EO͚ɅNzhiX;kDזmWv8kiiT`OJB}};jj^F鵩V9]PyfOGzÞB7rAYJ[+QZd ;GOR_vݨF{.;S*bURCW8+Uf'QY|WJ{> %.2v˫VFnZJ`kC>R:KKj)olX9+'RD՞6^,*x~[bS7jiJYZI/wn?+-z2[-KB] f;\_7la^z K2`}_xko[_Y񊎵,!ĕV:pU G!y(G>O?,7̥{e6.]txXFgђt#' 0A /,Nł+srn KdA\SqU}ިy\OWUZ,Iu~S\d)4qy(ow~D4X~Smi7̯x•lQe/g^ԃvl7pÉ&N0_Ӏ<|=aFS,G?\'3. x$0pmW`4<ړO[*6v{em̂1ӥ\M\)l4mUGf UUumͨ}ؤ=TUc5^%hSbz}SfS&gy2J4Z׻V8?viIqK4CD\3W<3yp)'Zo+. >8J,3BGL$+bL:=r'˹R.0e*͋pR [_,bKxi?|8?7!7B^TvG^ͲLHU~1kveIM۴Ėr_5,5".8kpsOg [&r cuw`Z}@ivN%w=iK qliZjcWVĈ-'я5ݽO G'F.$,sTϾiȮ`¿ W7.H^rWҦй\I+#oMثy",llUTs( z4|7m_) tmֺ,fWǖ:(t?Um U!VfԎe/`-Z(T,b>4m<bcxBpi7U3tE(rhWC2Wz{aUŊVBiz :mlۄ J2r ]G,3uc3U ML;90W|9 ߪlxΧNf:-Y GG2]z4Lbb#kAˬZZb*1./߷SXI*` U_h2 'HeL޶cau}.2pE=Ts|-(@U֚Asz{c\mExiLCRnKҊfDv 3avjꐺw2>JK٪M/4jPWaNTKI6Ui5f̽":B8:t]<NL]dB+KƒCH'llt2[WiӹmB@7}[qCǾCL׏T%;`^ yAHGRgjf!e\SUyIDWP`TEadZH.*Gc1l`_-칖Y~ߞ%V :  6 ;ݷ,(X+0fKI`P njiP\z1z:ɍ O^ժVLr U[%n|5 ]B]irfɸ%H*Ԁ:[cAs./{1 s[*Op,e͞xGK5d/'\d~M`J7.Tѭۦ3Eo۵ՋSuEIƩx*6q Ѡ~"WĴ*9Y$VWo! ]rH*ȦW7e/u'{lC飿3@'izɺ7ɣ Jh!~Wx4hj:2vPwc㡪^%UTv gDtb,Ϫx o-׏t+\ɸ(SVztUπ KqHҀtF*d!*b]Үjvgdk[uU-X-lP԰a=w5N Qc|6N|ϗ>{0;Sb&/Qz dI-]_erێ;^y7`IQxǕ߸FK ==ÙF9=zFtaluJB6kk!-R-nYKM#SZ3Y'R>w>ʺ5$ؿv"k+Y`>٘eJ W.آAQApT9;m߹dx@@oF>b/ ZMrhq.uɗ"ZYPK$Iq(fW&; '{M蚽SNx2̟0EYp$eysܝMm63W}bdе{ Y?ŒL%DL;l}bSQ+޵o| z D2!rNvT&+h{4.v.#z{B W|2v{fSzV,?3H1x(JKi=;+|eXl2/FATqWK!+#Mŗ3'BS; łtv5(+` 0~iRklu^z=Q Mkܟy@)1! Wg ,tV &yMRYl|W]W \WpUlm6lHsn C"#EE a١8!|r J4Š"Rih-ec)qMt }/@[Hӷb)Vzܳz!.$0T`y)1w+pQ=L'uRcE+Rj+,`;-⯣KuM\yG8sPz/޾+(@ DAFLUF? .B+U/ЧHn Mi<\Y'5'5\58*Zxzè,q&ljD3V3zT?6B*) *i\pqEec(6`<2ZH{cM&cOk+e+X:mB3 D6p5Nk O 8՝)=Š11Ca]51 ^,N^VTs]Ot؏d$ +"w W`cSG6/5,F&>|."B9}Y%?yOUJ'V!zM7_I%9LK Z(iRUGhBDZ1Vc? ѵY+`TȨZ<&giH$$2?AQɥK7HV\urAȏ\|T`蛄.Unb[;c1;'v kGn ce_AϠ ŗi qUY~jU?ҭ| w:K@FRŔ1P ;?fcp>UkٖWWW n/,óЀ(G` 4yp=@*J;kԕʄldBYiUz{-z%K6λ)foS0{&:sJ7Tr|Y8L=C|!IGdžI+WQŚ@eD9 &-SBFW~T1+=AV"+p *ˌS2oz{ـdx9FclXY3[!;4Q +s8 u{oǕRxVcZSyi0EeUƻEfUC:D^$6SB|)~_^UX[yjg\ʆ}7]Y5ҁ--/~'l'fU[BS!D~O0vdids#햔LڰU9U5\*=*Wb\_{!Z&D> +D*-zm"=tVN6gRJn 'MZ+;ewo8+]y>+2~!x2Z,VFkGdj4ш;ςDw 32;IgKWZSAhCQD2wLTZUUО*\h+07:*/J:L0P K2 T Hx I:aPR*UNlcE2 'N\ qlk,!{Y (\:.ڋRMwM,c e0Q_.ݜ,ϋR{2-vf3la>+7c[pIZh]SQ&vxh!9rUZ^V G" i+ݷzG W +$V#~J(.!C3XZ_T6 э&gWvwUndg,/'S#]H%fnBpQs-(~'{lw8ǻ^ռPsOR\%vxW֟Auo_4%,jITh'^16ꌮKOc_"ٲ]T/+MUbp5dog\GT~E({9Ӆ\HOoOu/ARuRa9D`@BQg4|yjmhݪ̘$&A:d]#ITDSɫ[)6&~-ζwMRpmIdOky6 ĔKZY#T|JuEjLOޟ2&4#( (GMA}WMoS5Y6مbj%1dC/2)=]`XYZVϙS/g[9}2PVpW]Le}ѨnWhq_Zi h]CRmijDhʂRMl>wqU~߶DGpVB~θm~# YӢ3twJOes#:IInQ%4b[Wc.-3[JP[m ^1}TIfFT5seO&KfȽN6;؞tXClm̪eC  r-g!P[:hqs#~ Q "NF׫wdcab [)fA }ZV6-r}+<'X!эURۚVH*bW1n69r6)YgᤍM^0]T_p٭%j*wUR۾4iǞ?h@ZHO:otW~1tZ-$q=y z@բyJ .+`onnDtAxG^w%ofQ1aCᑍ٦RQ\TءJ"xǺLx$vT}P XTg+Ъ TN>TO=^J&bij~bҖSZUYҶ2?졠^/RV1.ť-[B9JJQyOK?hl x{ZaAF~_doT Ri/2(W!Z΀ۆMaR4ȮY=`=*[3^:zɌ0TޤMЪW樲PQOd6ոj>Q/q[uufj{H=32)Grȷr4"I$Z\gM㍘7-dޏb?-_n[IKv9 NT'vpxiC.oB'6%ePaVl`Xgg{(!5Ek]x.ROgg-V}ڢSQj,kgBj]k6\71el2>4+;ޟ>A)TenA^:'W%yVjQ8+>#e<ʬ3-q+ 6A5bЊ[P\5yնL4+i BT'#~(*z:d+\*[D!c4@jmW5׵ jJl-:)~Waw} (h+vjfM}yFfn}7j[Sդ7`cr }-ק0=rrqL+gTh +X]BlpW˩+h2Lw 3OXe|M 2-F^Xw^ž+Wtٗn  ݓ-{,per4垔0{i<[3ѸM=Ѷ;d?9i"B^Ehe>ײ5rj5z,{-B.̓+u MT9]ۃP>#)qHXf30t2?nZ ^ 'oAalYӳ6ɮAE( {_#6ZمLѵ0V\w2*Y}{1)*NbХ=2]Ղs\⋑i\Љ{>H'5[e_VPK]q"ܼPN=8 vdztzd3nsp#(wqA)4ŭF1õ [i+\TqߒIzH} вVuS2c,FX+uLsk9ke4_4[JUhR$BǸZF[Ԛˮ2?n^7I;_lA*Q|+~ĸCER 2\K\{XKu!v֙ږŊI  TԸG$ƶX!E;1YaOXiBUUA!ֻ,eY(K{ϣ3we{b° D?KO^++Ŋ]STz> {vخTP4 >G~vuP; `bR̔&Y |D" @V_鉕 d<7Ikt+6=zUNN&'բ7QyQ|}VŎ]gQ~v(NVݨD%h$`+Ճ2" !A]w*uu"-k0V/&¼G@ o +e=bkʣH˂P"SzPTub"r/'|Q#9&g'SHI▗[{OP"r:D5ueB9ʢD.ӧuv>2JX"v_g>3]냜\Zʫ~)P_d@8x,[=މ::bx\);0-|q84lj}1 Ή̌Dk6յE~ `l5О#Bq{r5B-PC, #8HL1psa`X.p#׊'S醥,E13Vމ6C|4B?tPNQay8\6Ձ6>yG}HArrJΕ\}&ۖ.vc.:Ր)ɕcߕ}VfR J'0"Œ[-?jBd(ء'79Enk#oȼBS9cH$bJ:Lݿ~M؞M<: twۮHɁ7jqI{ؙl3S7. eWβ!bf"UbPݼQyL%v9su=BRt`(.b؈2{mWWŪ)OCLw#BI'J2mw^ ky4O[7>b_D|~+Y%6~˘F݆8 (AXĻʨ/G;r`f6`E0ۂ}< (f&2sޖj\A=9,CFؿ $` {SO`XY|Q* u`ӮPB8vPx" 2;$i Ru l~&sr] j3Qd(jʻw#u c0|3|Gv P쬕2ΤRYݼ:JU@eʹz[`K̾<Ns`7#5Hlo2ǝɬG>>`AT(xِkՊkhRk᥶5&0_˟#o\ Q8>GKOF~@Z‰rXA?+fɢ뎥LIpئ97wsPfnn9RPP!z"ҹe8v(`qE1-&JEH Gn19]  sh?t"!/CR~~?ULܨ0FQa WUe#c z#&+f"+=hWU;eibHP$Le|c>jzOՠ8d@r~os91NY@%٘)"ly}񥚓5_*ҹRZ*޼\h4wt!.׮COZ Q_ze41zh_9j4b ׿Od`JSM( ?Rq7jv0{'o\" /̝iTxJkIgu^D ([.M?.eTätDf`r\w,jL SE%RX<̅Ҙ/P xZq0x\;SyN@;KF5|}ih}@W P(dA@>~"0Nz(!򺘖m]JKv~+[jlY9֠v%N[~$s+ !=7:cehT>QX7; krraQhEMxM P(:%>=R;# CeMgաHB$j>K&Vx!މI1 pxt`JE{J韊/~1ß"oy]?*>ja/C87LR3-y-áޱx@XI׫10.5tk@i&c"Nrw*KVzJm]U^d\ 滃@0F5UEng"U'ryH> Y4Y }5ň&Ͷ.gV7,uϹe>b4uT!+])*9i_=Jb ,eGt5 &Desr4ېPi0{7C[x5ҀdQ%1iqChf3Qu@#.jN/3Z5] Ӎ$/}^EQb&w(ox.kF\]B?w =E@A dk@=[a&<ɹN&1#jX !o˧sf`X{0PQJ˟G_* єHQLYVmT7*:_b "&4 "\ ):* @pȲFtS )CҮݦqLf5Mb +{#me<'\3jQ3)@m ulw̨QMN'D O$ÇNScXlHUx<ІU3v7iXhؑOM=ڲH/83nWઘx23e[lju mXZz }/kM|/6RJ$H-/ Y u&&0ΞN"KDz(Iɝx0n 5?n7O< $w]6J U kePYqSpP`GA,{v%[(XrVyENr[Ah ZKs=QŚ_F>`}R3Ž>R)+Tt3f5و1Չ~GRw鰳nf \%Sqν{/j/2pPu'MÁ'1yJl\/ Z]SQ򓱬v;3~>N>,n%' Gw9\VH:BX1魈_3#{B!7"'%+p{,}:fRs1Pbb@¢شuAC29/B=#WB k?WQGٮЪ,h$Wi]I?HPg@_Me* aGQQ{5cJ 3vj|2c dz%Y\"la~ {ς1vjNbh0V~7Ue`-Eɴġ-uaWؒe3S1q:GK *{rdc3Q &go@ V~o+bǜt켧+ﻁOp:U7'nr4յ! CNdZdij\:MHTΦKiyθ4͝#ycVf2 nc4q M7%ݠ A˧@A;u8t+ē8+G=nT`_L2@DK=102ܼz A[Ģi4L˜=f:pr[9i~m0E?>9 'x{wҮCYmZZ*m炢kgu-ل?C[1 MK˃8QTã0W6~޶LӹUx'Ȝd)ڞ8h.P\(Xޓ_4 :SR 1$;c+G1(M§c/@WjVjq{'jW&:V_Pxg`92W[xSWtb؎.bupk+?S- 8JOWQ5`(HzApO7_b)S3r΅ˍp2ΰbt>cpi:ȦX: 0rBN@&/5'(OL0oJaxrqe+riXuCnwXce2i6 IC~KPh+GwdDZhITn-Pk3 metN U@=DDMA0 &+2v,zB::ȅe;BJwԹ7 d A!s¨XPӱedHdgDI9'8 @U#k:yZ=oi}Y-<+ 43cF jz,9Zٺ`T*ٻY(1~σ(7Sg0yHV`<n9:5gʬR=x"!r6DB8ɒ:uu]H*Ϟ&ʅǷ]QYYF(dIcnYgfB_HLE.srڱ(Rtw <+#IQb6;8|޻Y*/m$ IbbF( R_8xzIבzvgw:.wvi86Ƅ|$yb%ڠ0WEExgSrg 3}`Pa5!C<|#&2L[ҽq0Yx+}P1SDslIF%W??qU'_ʽZױqZopX19EWwv dx+Y gt+PQw3$IuIN.Yՙ FFb(sU6Zldf 2 ׬gK4N1,Є.R- 嬸SLZ @O-zHy^WMD-/7_΅1O9ӥ( {_1Їu.13#'xrXWZkYٮtӨ"rGOM$Π]+'Uc(+2pt]`-|7UvjLO2sR ע@tn7:\אlLUx )ZDg!?OЭJat$3E !cO- !F)It5`V|MΒ~s1sXvR`\c[Okr|B蘕 ϫ.xQCQY BXWiWnsf S*pc]N/܀<z 0,O1 u:G׃GR7xO^C :\ 3,8 C'+l.+RfnBg8WB,b4Cր{Do>Ys!W6<Gg~OC5gԅnWƩ>ꉤ\ obW ;V@][trҮBȈN1\L!0[3mӣQ.qk 3tf :h O &PYa"ݧSZD֍ qfOCeyQ7L w]֡n:yx(M~Vr0j\$Hk.c[cpc0FkP{m BoE~%وBU< LkPS Fn-|A Y+uڱi?1Y ,&Z0@`@@d(@~ 挑Rd( ut( $mT:*E97]2Vc =t<+H~#;iw!|g4V\'TN"cJLVJʤ1; z"6'`< q)t,av% wEz637I\kc~5=X47gKg^L[cu]>ŴhZ񹞔_ʷFHX'4O@Gԁcܧ!Dys~YZ_ _x41_m{c7]vjF{ʰ+N ;c0:.h鍓Шuˌ| |5Q g˲YQpE!)TmDO&m* "(R &X~.uxB+B0SGf+P :]_ (ey*_Rͯ+KrcBsOw$Wz1"+ye+{k?Z:m S _`WDg[^#ܘ&yJ-fjw<ʬQN!̻N)e& *?%+Pk>\do|Pwyvy˭sq\vatv ñ䈞bi8gzha TH5Y+1T'4}2dz)ĚrTzSqeq>`dl nXP]/'tfkddV bQfg!Z $FG7LI"I.Z8ٰ+YmᾨFY)b|^ a c&׃]n{u_*EЅO"1bquq1*PnaL%i-*^*a?b\Pvs5?^T2.ؽ>sdQWԉ (Xmvu&WݺƲ1A60pjx z J$qZI3W~Æct¥' +ItѴNl@ ֍#=D#O;k/ {=ukvKQ0H_(^;\Qw*d?LHvueozPƆi (T%8r}> dݴ=̼\AdÖGZN~3U",fӲE$Z{P8T/ַ#h± kFZu\ؒa͹GDߢͫաq,2i[2v1?op8&6oܵy.#RJ侔b|\P@!<|q }Qmj ]@,yﭪDC4~ΰ9 8lɰI)0#+mZANsfx+ ` ZFΊe$N]Jۉ 2 3c$$t:` ?`Z5cҴlYT{ĊvzqoyjX+#[* jʺ.}4"J{ɨ6eZD{sГ8cxFB~̹"s ~#V^C9CvLLl"lr!W!'atq\q-xfSgt/v&}d= 4(\m %㉰riIϜ5+M6pkOUNXSb+]a9WvG6i)YɜN[?=bn >˳eGv%sglKFR _tv[(~g`f0UF[qyo NZطf6nc˛xs۞~r7ELEet9\'қBH+@jӰiz&ze&,Fwy0lƞ2nT]r֦ת׿<zv펤]%\APQiOF(KZlBv`J{Z6^%ovsDI/mMj{Niv&c`{7u:ts,4G$f:ɐGjA@9z<F kU̻XU#_+ޚߨߓQ|daҝKK\O |sǭFu63ו z+ܑGβGGZP)օm.>PVӓ0C,Z$+=]*u@Q|0 9YSIwb2f"T1d.ӕ{O3ގ!R#pM{0n}f]( !B@Ҥ$22:"N!P =>1+,OQFwaTIB4D# Z.ƸT1m׃}9z, Fr[Zknye:^.kSdfA!( [Mg:h*[zE`dM9 ǀ?F%HE.ht6ٙ;] dDik${Z{~_huE,]5+8 _HT ~Ird?Q lvXf(Mfk:|n`4lK8S뒑oGQFV=+FUTc OCbu>l k{QL0YEO9J.K~2+d!üU:cR s0[GMOFe e,"sx!{r̦t`Ú ?]nĉFhR =<*Uy8.Haw0/88d赺 J}eWrrUL.XDKϪCj (P/^<rrx(KIdZ"PSJ$glZeG[Cy!ءqfXZL:,Z?g LT[f*-e͓zkt"ج_fm+CKK6>.RX:]qR*f6D(^uƺSj 󄺞 H>Qggk;Y`.30=%͕~gN8:{q O&G\u2()XUyWpS9/<%RPĨn%Q_;V P\R<?3gi=sFWj#c, +gu#0< R>,@ȁ P حbm%Yth t12y @rNQGF^W%wP*݄BY'PQN F%f”,n<9[都QdQjn*Bu&HvrO% 'ama9w \5cvYÉFt<M\J>-℔.`\z`38l P.."uHb9ƘZ>ʹ.i ;鶝Q2(/\ ׎jZ'8RDp:i&s儂N|If#z̭7 AIH> stream xugP$ioQ.K23/!g$!BIBhHC$DB2BeD"#U3|=zˏ[iK/lM7F1W=9Oi-}B>=z'V&75SJ+i^Es@³xSD,닥8=KXlEVX8`P9֘e#0}$a(Lc1i<&c'cQ1 1l: fal<y猾+z.DE|1S77\];:/ùq t3> stream x< Uttw!ĉ<$H R@ RL4A&ymms-\.33I.XPBAPH 6:#ʊyܙ#2wZ_k_]U+?zx{Yg/G+U׮]>~BwzW4^Z;v5z~[O^ox?_;㇁JOJ{[g,UKxwsewW#7_uwɫ/7?}Pmz>Y5hGgeYsna MbMU.ohT__}7~Y9}6o-lv oUۗU׬-#;ceWKSՋg]Z5]t7x~ov>9wK|/1|f/Y}L쿽wt禪ky?_ӻ|JA;i͆sz_{t-̛F;<\oU miO|5~WX֏FU^ߢ6ey˟Q oѪeUT ȋzT w^U*nU՝K?ZQg)/MK\؟ xū(WVg@w:5WoIJ9UR*c_cQ뿨Zý-A ~\]uuR:zzw0g')P2{$ )mC`urOU+3` Q4@mr3~Pô6ڭʳxRPF{jWX!ò?t+Zk5}>_O^BtD6_E>F=1:{]`X-Ȼ1"X5 -ZmiE6BZvѢ16-y5퀷ɳhaHlh_U' w ~̟UIՀ2<Z<Z&셞#+Z=Sw a[ɋأycrՀ7mu'FG1@~9ߥWt;:jt˽v _ JIVG&C: M]\-s$1 m Xn]Kë5PpiWkAk4 TWJXm`摘$cw݀h':xVUUo}Ǡ6t[Xm01>vxzVm9]7w䕪ꭌBCoXXy.7A tY%sK/o{"w=&?mmdYw~dL;#E_؝0pv `8t>7ͮ*CS@WEQnLkWEh\xl~eXy dI; 0ϭ=\AOɽ/L|'V/X(c{1g`vB ֍+r0d;Nde t32XMB;ųt*qmT_!7PX|l{ 0j?} "IIҐߞ$C<_;vbk?b wJab_Kַܪx2vD|'2ا%9:+-kUz, g@Γ8w"BOFxvPY=ts폂pÈ}h =~Zl_x^BS ׆{=z5w\ aE#bAޮ5AoFr+t^|rMuKtvpHaBwp;M~xd a=u㉳ЈsDW h|L^͞4N5Փ% /nĿJ:]B`+58j|zctg0FGVIi{0@gu0A2sjn'тȨkx Iii6 4iWeTXu_:(TXUߗ.3ĭ=`Fy;w"M?JStO^:[҂CM+ceFN_bu!ʈ&~ BDPQu|iίCH0 rxC·p 8 Rv$5 -}TCLHa猎qa# jMWKEoP \ _-E]ʭA;] gM [!}qj ݀"D 3981{fBѢ#vEAgpgdNW˟#: J!#/(+cLoCˀkt :~]"k'_MU 2y;8d;A4CrPNE/|`DhptRY#^V5pn n7 YF?-~7. ?idR-4) 15P49YKBb%160>'C 5tM%'WMue0Cu|&@N&xB T)qmh! & GߔXp0 #FW= 64`uZ+OyL&AEAt@ybBJ@ |Ѥ"Zxy: %s=+eBh܄ho^a:y j.DyPvӺe!BWݣ\2#^?|퐰#v~)!-( 繒j ]usVX@I4 -@ބt+|; M!Um =7gt>gBNx ?=^1vT"8Qh8b5mEx{Bs-h}+^0C';c7z=g,D@2A)<@I \ _?_`9r>&& JkNa4yOtc.H9' ĻuC0Dq3j'"޽vz5f>ʼnv^)t^~-  ucGR2ÁWoη`,FJ*p]wD͎C65j D:oo %00i7Xxꃃ߽x;i@??Gaa靰X`ҷ|rN%,_w>QO ,@ P}dђFJ1BI8 &X)~~\b#|q]b4"RxC~@Z4 ԙ |I1V?a f PI]9 >K<ׁ:ɯSоpzD&l3%_ Mm)*Wr<e~b/? (QwSBػ%Yk7osڧApܸ-Q_VgSljY0ULP̿7#I*!=Z㸪&PD5%SP uCЫ:i0xTmӦ륇Бhg0Fa Hu1ZʹF|hE-DGM!,Ye%zv# 5'L$qP&>bKlQ %x ?.t!8*`W-&I3(uՐ{{=VE8U8A7| 'UYͤn2&^\rg 04߬jC:,z䏄 x 55FFVx7&.O#I-Hd@=Vƕ}V+|l4@,"@=xu1וX0da E0^\6__-`5:\^4m,49K` bi]]OÇN0g-"tx2tH>ہy@} Gx}|\_UxKHA#`EFy@Jx?-OFgKB'qzA\7}ș0,EV ixg $V{LJbP`ueN%_z@BX}v )M5(DWt^0\|saK+rpdlg1` Ҡ r&E5R %6[t[>_~pqLJ13%v0FdN?D-{т3$ʘ uM9иXhqѓmb*̈́g1+6H\{3{^6ߔ d%\>4wY|/lS J~!vyhGDɐ'Kw,GMj3k<-dTzTnq\5v^ Xvsk;d\& B%9#sF`L1n Bt'gLf>i_ ܨa22RFa%Ik/bE#Gr[))Bni5\i^w1=voH/րO̓' dkxu Xoڻ ohd'i7jQ FYM c4U- gݮl] 0F-ϛ'v4"o񵴺`TNԛq mnC,{/2^'gsՠnSD.Gh߾z7fbXu )aL#<úw='y!_l آb-\1:BzĄ0i`chKMZ_ )A/s<ˢ])fY&_IZ0uX3hrd( iOF<˨4uDor;ċ޽~W,>?@"vody:R3o9 _z#TDZP$XD`jX&$=_% {rtqy$q QksARNƄ$Udv2^ HEIm) uB\]5{nqF/x"]HI-nm܆h7ӝZ8_ {#0MZ5{mrIuwdLBI`饇fm( <?MzS'T&qaD"vӿo=!!>P솈T!sh9/1p3o̅׹]EmH =d06%!9y6>?#"@H72Dk/[ϸ3GbD\_W'|(2Nx)+#I7ܮ[Z~՚ x3N IaHGv0. _Wގ`8/1ź?!mm B>uΞ.x?ZBT7,$_0yl'9 8?\!0`GvqOu*tO/lyo$Ǵٷ'AmLGl_ v!?1t׳8WoZg>TYa,tހ`i~hȌkrDy1$72uy3vޙpk[j@ 0\< a-eEx"! 9sw"60? ƨ="gAN\SEeh ](8'fr͒<T_u)hqKX6BmF(݀I]cE e}:j$m|܁::1*q'Xf4}׈@=bB aWRc.Vqm tᭀyug tQgE099Ir`آIb)Bf@x2B?ԣ NO޻cK!W:t5'm{o\|rbtp8^T b?"=Tck-ظavfM.ṀZ.Ŝ|3TucBiwڽ^c?Į;K9AzXSjx:LWM> !ޥiaPOeX4Ie#WHcd B>NSEt]MxN>!mpKkP 4Wuڤk81erxHE~ƷB)_!ظ+cS菈ܳ'&`[Z]4θQYA_:{߯+s'AKlT^?Z|ҥHOc{ƈr=W;:h:zB{`*NsCAs#ʽPcdy'-¸i2p$;yCzy/1@t6C#Cq:]$s#;O@syCuު!p=J~qqHjps!oz> ;Va';ouH;oB\erm."@ c`1h1(:0E L@5~V|6ךᚻs`!9gqjV.Ѡ@cnUbnKP!&pt.T{`S4e8ː@ Nwk+ hZwhǦ5N'tPg`nB?.gΤ3ž#30/ Wp{s3 moBw'I6K|ip·5܅N?$P\| > Bn="f==1[*K'S\kv,k0m!ғL.ոf']7q%uC njf΄ 2Q+D,!G Iw-{{) utJ(s%D5=]12\bHZpjlW#о`F uK <}+K9tyoPREŘ{v*@Fi/Bh22m|]6^܄$6[!.>-&m{lBO? ׿ePD>I:jCq&!UDG0-_@t8Js5sPGc.,|$&Β|2 s-G8 uPSW2` %K'<ǞI& ϭ}Zy?hE͞Nnv%@8@>~3iQ@hl:/yZg7-¨26RƩ>`E4s&2шP4|{i( 9O̥)WZ(\i;3X:8\KrnǟjsY (ɱVNhtAS@ysƮ'f&"Z@Zw5W-_ӀD"aȇDZ+:,:GFE6a8\!^`c \VsO1ijҊEVwjw f ˥BS_U@ܻcm\Wg@9p>9O'N1gIV0!4D6Wu }U_0v>f&iԱ=!pSCw=-g~9j{#zHjv;LAhűEP4Bk;fK9{: y1$idy+c?3 ?h$(Q8G/y0~ g|9}~a8mOlmt52nnw g+z~,Yȁg DL?cxF!F̨,柯A&I 0v2 9@aq0RčN2d>tv+I5M#)8ƨ9Z7e40F@QD'@P0(G@Y걮C_zbK,C&!:[ДXY;TZHW826mC]'9gK+^>V9{*w^nK G}h okIBF,9ϗNXXMUV݃73an,\"}ZB5Ѳfӷ0}7vSAeGRXCb:I,x U ͥ، Vs16 DwBie71Zkt z56*SnNsy@<>Pu}B .%ExأUd0yxH`C4gjzô <<~W#" 7Zt]@GH]$,Et3tb`ԑ9 AWyv*8&諮0bm!|vc'i˓o3@gFTCCAgNǡ0=qDG֦ ց8BHo|Xce!>% :p)Ac\,THlK ޠK{P@7"6T\A{y"Opbmq:a!=2"Pc_/NHd7#[62귱A_=7p34M%B~##Yy5xig̱VGoUPLv< Vy\swݘ$ VD7~Mu9_h]dIf>]_ ۉF#hO y'TLr3v꾭Εr)5*tڕBM/t Dz%-pB,A7ЦyG5jKomCi'`G=؉&;q$#9{eVL$a-w}` )Xz'ty#|یI3 1_] 8z۽K'@4ȿ9!F8P2'tD㫥|BA'w艰ݮ.̚&TC쁬t@@P+s̙s̈́8,䆍 0 z\B-yfc!<ƬKH,O1rчpH>cu @DJLӎɸt5~F:/ٛmp9tCK!9 dբ H,vUX9L^2b4n4Pl|E 3gD98Ѻpf N`8| v$8 s9Cb8+(65;D.y#U}\ʱ#dxIJZ lA|sfjsHQ=f$è!vOA5qhL 8uDZ؀u~q<"y-IWV4CU~־(vL@g؉:(UL؈%&P'OCx\s *V=bv1y&:/NTYbϷ2F0]74pkW6aA.Cg:cw h0ʵ]OF<ӞFP K\{xގEzFSwk7 F丹u8Ǩijhϐ5p݀ly]O~,9}DKܸhetRnp1Q'9Vf9$Jcif= yT+#27?V.*`#5"aꓴ#]6-|7Nq=ʼnŚY4)t&ڑ V2zy>N,;5juO \̙x|dhJɨ-c+mP+B#~<:1pMsHƸri'Рު6QH-^8t}hnƋq~Lr^A:'2W46 KUD+ƞrJGb8s[ P}t}9 ꉳb!%bXTc3m1ymZDˀ@c=PE ITA?=iIF,N"Y,) &ܕ^'Y7p:`*@kWsmqBUC]b'՟Ԙq nozM`W*Og ytIƇe͘P%#r@ze' N\f߻F 7~P&ZR>ƴc E}³r3xoALU:T__l-n NTr2\(#(Eea@]7cGGGX\XP $pOG-cAu7NoM.A'a* Jپce~+` X7}"j=CrpZ*6آI6ug}N|7GG.lퟍvx;R0K'/LAFB5NecՇ- Cn(+#Vrs$pZ$W3Fr%nޏ9InG7)D5*@YCe1* FD>q59Ic? ؟e= tޚ+rK10NIhѐpl7ę6A3'Qܖ37vOge{ZVݓVD;: !vWKKQÿlkhOofmy^^SQ#DKY$_& ,ì[C@1S%"IvkyK'ml9P4huyNIoB x Hf(N8Q#ʓHUz,K'8;p3z}E!%jM aɳuQ hf ?vnJɳar$J!d  ehᏮcKG'5E(uN x F280^mmV]X5'B/C! 1DiXE5zK+NzFv >tk48=<7rYEDt%Osmbŋ:c#mѷ鏣/n|GhEa~oAȪ{ЈbYwGp `bxggwҚ;F4ꉳv6zO9)fޕRإKEEn@YG{ ƣ1P#C| uhNQ {@bAG-{lL!΢z  #|^:nnQidǦ(}` MnƛYܤqa]>@@~j{J2P~G)6h# t&V9\\!a_U0^]wڗe$k'bJy?K`0FV$ʲPA` =cbGy9`czag^O)dlhcj$$_4TpRy#^靘<xetU<)nbQ;G >.f`D|9\\愧*bеcQ L{`[rӥ8_Nϵւɳ&A nXE/F?A s>8O"HloK(ZݮIQ =*`2g:mrlGeL%Fz>lL2W(/W-zN[˸8,vuҼK@THx1qDn,9@G0jșb:?͜ݑ/Ė Ēø~"?!Þ{n(!X)yèFb)*qM»uPc8zpK1ja]O""c%ù :*k,AA pQl4 T`wp@8Xn~4MTbc" B8RRMN2xŸ7`{=Bk$Cی}u⚨Ÿ]:)dH`̖_b Dq*+'qGQ*v[Z :Phh#eGhQz+"Q?uobiSRKcih_ i7kw.q7*F|m3WG*(θ5{3qJ[t4 ^-AŖksg [rFtI8fo,9nsCD9Ѕ`GF|pxݖ81u"W`kX@=UʪNg='rAbTe@8.>( ryi%|Cp2Gcs>a- 2L Ubo΋4 nѽ_GL<7}qbEH` T=x%6hGή E{Ʃl$+n ߲wl6 !W4ZZyF[?&rzz!Y Уbk ;#buLMvӠ.@]87Xy΂>g I6]uQu k'#+ ; `UVХOJ1`kiXf4a8z$8S4x ϻ%rF2b1o4];4ƹ s/lC󱛎Rrxj, =̼%T19둄<GHxx*h!ͱr(gQDcR&$a?Ўy/& εg-yP21&^"~/IkP@]K)*V>s3 ۴Rr8OD{ :.P.|5gzHȿʴ%ua+Na'MIV@J$0&B^KA5> zVb4<H1]!Q+,?4"nY7#T;!GrfCƎ)PonB:@8WG&|.\舘|#5anް<-r$4ZƁ}W_STLf;r 5!=9)-xSy Q:qN~{w#F0ÑyaM&|hL0f_lNkg޴Xҁokussn\GLe*/>óy#&GvA{ \XŲ`}_FDo;oRBw-t(:[EEɬ(<ƕ[xADD쯏3Nw$*8M\T~q*>$<(<;5DlTac(7]*ڡqv_wޘ7eh[eU?ܵZ w[g菢r{ROb9{=rޒSnݞOxJbM< ؜ͅC SEQ,QlrιNwϞNiofbԔ:UL}8 p~Q-sO#:P% srp0^$D pyIiKLxL$eg&qsq#$ x stZgW9#ڏ.BԄ_ q)(':J J9iݞu9qEÉqR^eN:`ZBq4!]9P]ܐw LpDT#HzÓN6飑!CP(TtY5}-{>\ ߯ v+H,wf)$ĕX! TKqO\G<_KtL{:Q`x|nǖ=@,t|^q5<-8 }~"K0u* 0/ yjqRcoHأjSXcsog5:@ޯey?K.3I@~se!I R(q1|y`.mq%zޯ*]ϱok1)?nkKؕRU{i,O 7Cd7ݼ,2w2{XrE^Q>o舂PNy9]*cx};}e츝^X}B lMZȗjn*R0y3^KqQ.~Ƀr⋮D-q>u6pzo \_+$o㯌jTx_U44<9;sV?=XΑOkP-C;G!f R`HAzLIfOCj"'=R VE9s_*a-w{?G2jyRumAi @_sKf4gD'y4 xgFtǍ۳*[Iy2*e4)6U+?^P,yq. vJ- a[9XX#Qg+ 5y_KLq xt1_eIi[^v 0 yӼF Zg -+#>88ՂyA6snc?7dΛ&'LS̙y<(onq&œʼnudGXPYw9'X]RүGx~BMcy1/.#W{=T:5FCT^:S%p{*{f -/ۊ*D&^B/j=rK7|zwRlz?~p%u|= NU9Stk^hK̓h5raEHeU7#IZXFvXEg_Ghe^>o9}_fDbGphn*ߺΛehЉAwr=Ob~ؒW͒`29GTC.П-'̢~=g$^Jw`ђ^obnIk:$+MG )KA CYxqzv&a&fb#$@szH(3hGӓ~gXχfeAt+kw= ZMF-@e$? cU2}~ˆ8iKr&F^v׬H =oJń]Å}ʐq`m#v"U\-ب0iBZwԾ_<#1j}QCkaY ]WZv4o2ZSLy1XyoP ݈BmnDBm.3`aYH%nwLpXbyv[uBs$QQ^P]VqEx5sYK?H7+x2idԺT,{bGnpqZTZ.8iRF7H\J:(O2bw[< ,Oq3ojqԜbd^ʍ^UgJ0<}6P98X M\]4iulvOmeI-|XZq /Z[B=7;_LA$yɵ+{nrtx ,HsE$w#d>a`xIhhVz DـQoDD;+7gX͇^uj:k`# EQ;M,,h Ìr5ZkTǣV{V`xtqKzl$ _Я9fQ>T)v @$GIu 6E7ˆs {e)EuwwP~s8QP,߯CsSBbTuF3gF9͂Z0dPOy#>Z#T~=.jFk,|qTTdܥe̸nX/E\,bzZ@d)O|_5t7b-5Ѕ 8Ą=gayhs(v|G\ ڋG!#^uՊlvˢtƽ~Vl)kc/-=n-涎p&Ǣ.v4e'SHmSbeͺӵ}JsfX`9=@ۜFyI~ AZg`h Uz*7N{Qu8,S,ҍ#9 ?w1= _Qpސ w⮥,ܝ\* NGKqIB1ƨ!GOS>Ei8+ǻpj!~ܧZ-leZ_ǤbgQD[l{D3lm+-%0XX,A/'u-˹%nCBӯ*&p**`L&%5M<; o i>UuzBGkjUp l-^1:Hu9_tb9Q.ίΉ2vW[q 2Ɯ4Q Zb_! YwC,?%DQnm8; ۙOyùMa'H'(w_lN%w3VXH-; (gRY{ldH~o9Wx+ }X=Z|Nq4Fq:>wac7DA'CrxoɃySe8(7Nq==l ֢*9&RtPx,# 8Y@z;&A fd\*IHlz9+H: {k !F3S ͸Z>o?vas&a81ͼaBl}[KQQҬRZ{gW` =Xt w=mƢ̙Qu'R6o .Ll#wM z[ZC8BBTLG8 ?p;1ҙFb[)a^tij;\tR4>6D: .Llڈ7#b.4*EnjFx>! 5c*y!)Nj@X:B55y!<6&F&/Sv͋Mq@^崙H 1^ s&ʨx7}%FGWKY۰ XdK ~FmO)׻N:YrGKprF &<]d&;8J޾Gt15܍7e^$/9JE,guǕ]Y^O)-{݄NTEp4~vyttv.-ldHcΪA;\IeAnw]iPM!JG`SY0Q ~;n>ZIWy 7 F`Οbb-%-@n7W/Od EK*khs3 %K'A Ǔ̃H\Xg ޲oo&mQL$Lة:qŒR9 PCҭk qxt0Vs#xt9;]OT֡X\U;DWaI/FYE#AS95+2DBixZߜl+T݄Y8@F_:1?A$%\UUA{}ӄ}q{Z+;Dm\)Mnf6o |[[#84ξt鲿BHz+9oaj6n -xI]`X7P{M )#7R:ZwFd8 ym8ʞ|>>a a 59>R#lA u.eu9c MMGfm` eL,w5ep+ Vg 5J;԰?Yidƈ]6"GvO]MđTŸֿ ]#uI0l!㿑z$9bCT dEvem{j_EnKh=yit+Z/}퍪Q6MU-;/^}ܚ9΅C+ 9Kr?@\huDv6457ʬ#«{Vݫw#NVfI^l!w?+~c{‘_Ul2y%mG:x ߴN?c4EѪ+ 6_! i@ E?'yjA5j"|mArM2jV()Gj/QB?ުz횗+-) $z>j.x0nSx<ʤ Yq䓈kW<Hz|-^E14;GpG@`Ϫ9&ijNh`Rl)Ԯl7m.(61+֡d؍ x?4X0\`H3ujO>\8˹#hr5]o3WC@yƘ"в-"$!YI6]\.g UMCUk8 }܁DZ^s՜fNb>P.n:mEqp t>LBũt/ݫy59HW.S?T$yx:8JՠUnkԀj ȴ_u ul_=w}c."8ͷu_ѿfT5Y4XaIJ}W~VM@Mj5T+i`LUX@z߭^`Pc/Z6J /x/}s|~19TB^jٮ @ax1پJ} +fkn<zDafe}ީ!MCsAS泩j%TڦPnp2R-)GY5L1= !>יє*:Rz;JIBVCis4'붢Mk2KBx>;aUm9ҷ-jLH(xsrsìmA+o'So ],QH G5|P-0Zu{i,ϗ~G4DX7NApU容׵ke텎`NGk`z3ر.OWx^a3:\_]N#6d!a | u$讼wVLjX j%rMB3 =Vro̍ %?)Oo QPr@@Ucע 0K=c.7a`)AYkPuAq<*1g 7ѠlnN.|ཊRtԲ-!}\ /]M\N5n^(Em\ڨj߿ԐǺOo٘g" }#\ƣN@-M^ܧ*Y劳dYh݆WFU6&*sC?̢:!}u[2s]a38]zb Z9w3ԯJxu<ĩ]3mOIy*_X9M[v<ۆt!X"໰T=-+ 1#$/sI-(ρzw+Uk [g]I5zN\%Ym.ʁv/,N4aKz.x6kһ뎯Dx@T "ðwPhuz{VmiRTdb; -cZaAPe{0 8JgGn1.*R 7%;U\"&b0UDk#Neq@U W ;6s|~CF0ZwZY/%3|ZC% >5_G ( {bX% em_G͑Pϛ O劜+6 eGEd+8VizxKFW3TƸ.hӘ?YKΜ4|^r7nkzۂչ~lyu:#:t\#Yu՜T[&X/ *6p ^ٛ-]XU,#l7 `Ѡ&-A@6B ̵ZS+)/W;8[&+tj_OX>k9ߐڴFqÈIzFdaPHk['TT-,lUuΟ̻3+8S8t6s]Ωh2:d>cIQg#Z il7:r_WR4Sjn Ϛp_k50篰e9(LET S{uH&'^1M9p-c6=iwBm(6^!\6HNxqMִ?##W$)t!) W _<QXu6CձcHdɵ3#s(u>oUr86DzDT뿝6\*W礟sZ eN?0G^=2=6U·TޜMfZ+/8~SVhs*qw:~`8K̋e5!f|߁۝r`H8 PˡB(jsNkeX^0A!T {7GʵQf? pkotgfW IUmĉ&]7@"/sc&WlVh5V{;aEV-5m,(g@6ToFIsbAE o9g3rGEQ*΅˺CwjzB".[<, w@að*"pCR p4 'at`zq0$E+$e t*c@/>^W/ 2qu fє:mȒ!XI&(*7͓ lT2u^0Ab &n痌$IM)@ɱbZ.G% p hlR,(4ͽ/B}=[^\ _j9/8mAnⲳ6#.XsM}dpuUw P+iv+f/[z0uaʣuK(8pPFloE\:etòXrua<+ CqW͚}ڹ(hC`@T5/UglY5)2pMsUg*/:VsA @M8ڪ66HΎ!,C5(Esf>VX?If:3NDvLYʢ{ LO>l~lnFuV ~iGZ22[99Q3tjj+!ޡZMFngVnLC 40PH+6 ci8y8 `[  ;zpį Z_SjX[,δkw4r(֣nk yޫ;и.^}8s)G`3m)PaO[Q˓3r%ju;p᛿&.$U?oɣO(Q6:Nd>twktB)KF7SΏ %aדcWnW03j^[{#z3(j|RnsoX6{mo"'{j4t>gmjMJeeB3JAaSoX 9Ѫ6G?fO6 S>⃐_ Ϗ2N?P{*=@2yF3@FH;/R)a=yq [tW59`6%;:J hkGLgLwO ΔdP~qK7<&/AcX g*3H9Q|Uv0[}ͧXs(Q7wsSc &ˈ{hR\aqI5|wT!{2~U'tEHA=t_\UbԬU2pР4j &b h)ja}IJŴDC Ҿf<V3M釴 rG>{pBo$/fz}v]=͞Njs9^P_Mhr jQR u+5?xdʄL ZΤ!w*=H:,G}a`eO;Oj) !D8ȃ9ݦzfҞ}՗5eVDv. OzlwMUSkHqe}v4埽FV[ ?=yQ\PޣkQI;dߥ^mɉ!@xZT7 c K䖫.nV]NOe]8ր@~[=f b{sR"mق*e:~_&[|4H_w_7O:7rq6ݡn‘AJ} =7Q,]{,0Ag5c_GRuLQJ(<3#!)$qj^+qh^L[KF{1:Z@u С".[{Rm85v$-@.׀z&T LKwh]oܿ#` 2ZS\C5ϥ9ôRѿU0[g(/3-s3==GT8QwՠUyjkbl*_pW 8TMr*j%iɒb}c)zYɒ.Ja<933akIS&WvcTEq¬ <>ҽb(>r5c3:CnL.6˦$M.J)yXᗶ~ުve ;\yqG"l|[ÇU>08a CUJq2TE/jU6zP7cǠեG k/}d:k1Rsq ca^lgDy9.Ly86h.yd5zXuUTvoGӳQJd9ઋG1#yh/zh1E*-@{ v9l">.9ߜ$%4 Q-AJ:8r {mbpMVX7')pQRHkV/ ?27#ί|mUx<6}Aŀi˲dq;eV51F Z3Td WrA| ^鼕6_ib ἺRS bgET8DXpb&)XRf`Xyމ|#H8:@J%!cG_c;J"|2oO쿱cfJ@46NQsNwjl51 nڠ}|3%Ub藿6]1HLiK>m֡Uo̸RDi/,hWݎZf /5Hhdl#ެ{5:UwaՊ~ɩo@ݙs4& _-l\jæEҚ_t$!le~z=ܑ4EtwOr7]NjpQ:(tR* #dpVEsSjUHCZmSOGh_] ޿ JBY 4TEa*4T)f8vmّ.%; qĒL'#}dBwڳTI4E"q dV]j6 S^r!+ ="sWGu#P'K./5n^]&Wz/-؎=ʻnwO5bJUBu 4KdbW{P,m72ʇżq'|QIy!77x-(V>bY!^dfF c1cc/o=ru +[`)I,>3%Ap|9,؂/l84UDXڂ$[ 4ߖ^͋j)ٴxojĵ7"TҲ=Ik54hJ6$ȕCı&!)ɴG/ {TX!V^0Y<p>09jh-SK! َ!xR w6m*duLUr{$kj8PՐp| DI}0̸;vJL6ț'a;s^C.tN L%Y%Mb_G菄T/S)x3sگt 5\9":aӔ{!~|F$0HŹD{û̉@wUyKc2OXeDw$R!QU&6E&̂"n[gSӽFx_I*(2r<=xOpܹ5_{wG[5ӃQ$/`RazQXɨU7G SB`{wfiv{/YfڎE;7^#)㚹yLx]k.&~0$měR{< Kۑ8Zc̀I>mA0/LM>u~RN*a@:?ڨ^Ξ*S>c+յ*K8_Y=>h$k&\ jԤBn ϟ~ʔ7C}h2 Ƚj,P4Mq-Կ0Qォ[7׌uÕ[U5&zhrvd^V+oCf/*̩FlIx AJ=VMg~΢]7 54s1MjϜ&Sώ@qݫu!^%Hii=:~ʵ&MI@o[v:cT6l 6ϕ^_CP{w#WDQ[]7H$\q[%OUF#xzW2Vm蜠6<ޚr[>3^Pu陣CxWR}4Mvp,ݔ' ]?άXd#N*7;7BM ǽ5k&X-`, wFyyT ,Jp/R g,/Q`@CۓmLk:*[3Н̜9`wgPciuvW3JKIבAE ZFd܍gM9]8n+M$8VT5U"@ o'S+x ne1Α(ՅwlZAewb8*(k$Cx䓃ST"+=!u5V otZp Xd48,T[]l1]yvm/0'īc:6]qXX#u`5yxJ+k. A,P59L/5^tFO[K q@{s*Lwg4m O?'}6WڎmmޘݕWW>` #3Q{}dG=c;g*тjӅVν|` s3&pѨMª^wLP@y &xVGF,Lj9쪑ai| I nZslӠZOd리ҷjbtIArfI]7Ȭ޸KgFJ*?oSip5 f^'uX ={>t߁$NZlpO $Vv2 c模i"ӣ.n-ґVΝlpA- =COw݁6񳔾mlZB!8BAΚ*]atnMC~65ӣ^Y]y+ aG GpzѦ :]51N$Z@Rl9"^oU!ˁq Xu X2GXQM5oHZBЧN_4^"-O·ˈVw$6J}ǍUx?myDz:x[CQ}rÑɆc2WLi6 4|HG̊Dz; c_GHi3{p"4/Ug۪Hwю GDC0Rz \ 4"i"d cxz? XɩikLPV "4\j!M9ܔz{MpRقsȢMO}t̷>~2#7 HQ=a( %1rv| /򉧡FkP3`;:˰ݠ̈ᓕޙs :K%}.jO߯#7O `]7=굢\&~byQCJ3uzhgU~5mФtvJGG-Z}- PT&v2o <XiuNJmuIࡹLtZt oPJ򙿦ZtPAZ2S[:=K{yë]-G]˫q_bڠ uMaD'ɿZ`("cN |0Av'V+l[*x[>Ue5h:@o/O&zP:^!J ?װl+;/ tZ_V GZXI?r}^&Gx [֎^<_u0\TFvzCs_)ѬjM t̴Pײ; a mB}*ZMjf8*sosV]1 eGMK?p%ZN!yXp^.xgPeL"f~kBm4t +jM5JK`E,2f:ۑ6 1K0˨`| jhK{LnQlTju] 9d$xx +NҕuǾ'LN. ,2SuGa|j)h1s0_xL`_1+fӻql!O SM %|ob IuIS̲Jh 0T=lhmUK[eVspSjYs8֔|l>c*dbQUIO.D.ګӟ4HW1`` !i*5mӛ[1vʣU) eß% /x7wTD\a3ӦsRҊ 9)If:CʱP'~s [-F9í'I B" cٔ#}2U V33!ouݜa <@㻨6 f|pɬeg& W|[29jx4hև2Ȥ5ErnsMx/wy8l1}x @^c8Jd,vd\w2 DHdZ?\5EHN9?WzlJX\@d2 1#Gt.N'xBw4rp jud:*d>sT ,ٵNtgf񖤜NS>vdqΛ:Q5--Mbp+Mɷը 睊UImIeKWaig^Υy,J\{qk^fV~l҃9!M DH+h<eN̺s.;j =uLTy uP=`>*F(;dwewy'rL@(@&_⟻֢q6`(Fn8$ͬe*QPv)g YF K1ۃ׵.kX7EdJnQL%jMNT&[jt1!.7 ֣cUE|]+Py8`@)qp/`P h8@Ntm5|.#^?\tr@Jk=[ Ct՗kP2O5S{= B`3 Q_k sdtqeZ8p/R~ tW-GObwz9}7ۦ:iVNWi^9Z'0/Q<^!|zھ%_trc@ͬvg&3d_鮡}  #0TS:jZQ^NyL7z]oŨ{һY%sN l>ڄiһn =&jFRׯĭ&[ ҺWM 2éf|žR7a.~he 4L0fϊ[^Ћ#TjhrV2,810?oF*k@rXi6[5G)W9 0U46)~fV=S.~=xAWI϶@ r[^%]zQF/2X4KF#~yW|0}!}87ps/+ i <#GH,SUg&[a)t,Q '¦a/),@t}>frc׃,/<{!Uˠ$VsD]73Izx}^ܨ+:CMٶ6mS Z=m8:i Åk8DH4>@Tj4G=kBHVY:L d0Xq[M'n@ |1nr9u0i)2:y\`;Ϩ}N=gj\SzjRl;m{ϬׄΎ%0Lu@ WB+6< Je =e<[> S}R ڎ7 aӜg&%)q+1Wwjk~\[g]Y&ѭ8Fzڅ(7OϟŠkZM?5@fgupAj a{N:/d/U .:Yϣ8l)>=(F=,?$iNۙKp~2"WNiq&T'U׾|j\8p-CR,OdCnX~%aašXf tMzPjWTvCSt2;Ou Ys+M67qv@u' <h3^l}#3\'krʚ$xfz5*UKba)W#U77êr#:S{zVlϐ⿦^ꀭ5t12[gmBAxՑ0lh}v !]F|.ԵKs xT-d'ߴW ́U*1 (g;48\TڠC<_Ws"I5"TGtuLwR~i x}Tjg]A&@gt|ѭJ@:2?_QGt_G'?_) !< )LhڋtwXTW zLvZ*p eЫ>#3x;끒%gwTͺ_)9M_֜LhFpKf:U #ª,x~ZNWM˕It]MR77 s1I`Rz;whYO+c-?cJ^|m~;ѽ8> G*7g!-5TXqd,]CTـqQty 2ٕ3~-~齮I8|=SS}`tXz(M\ƿH<)鈬1QW𼽔QD іDa l7fUۤNkUɀB̮eI3dm~z,kJsցĆ#jJ~FbjG?}zkoEڡJz{ދV TLͧg0ɐ9׉Dux=֨4-aX\M-AVyqu!lB2|q/{m'XY0Nۚ^Z3knؼ{+m6w kP T:g5Knon+,t3z!oJ)OJB6^"-N5%b&}B%"O@+ =gۆ2ڑ-zuRy%qnJcfxi}2y͸#=7DTULXJ,klB;:ru~%2D<"zr$tʲ2dJV(th Jx[v}0V9p{Ch1^]Vit<4'̒|<4vӦ<*|F+ ^{tr3sFLH6l)yFMGPCV1U Ji${.7^/ncayi'5Qϣ%f?U ͐/[ \#(-]J]43,s*5K+[i/z?lzhѠ#_e4< O`Ja%@!qveMNk=0]j\ j֏w3<|wMBwiŖ2`LX}Yβ F 4`,qU&  HN)tԤ+y\y웮J( ՇymT%{a*tcdGs+e>b "='=LW]g E鹂YX/uO{>U4qf9YG<YvUB;F\%ԡDu8V'Q鎛-P{✇W{ 0ǑI;r݃UG/1G[Mj!{ҽUTXDuy2~A4qJ܃Ok}}J=xi}Q`F/ơ? j4xrmW_5gwk~'}q%r+j:G{ŀ*w}=CşT)Ey$2F$\^G;]k7'K`͝"D,nV#78S{|&{h|KC`8n~*UP }6geW˜`]7`b_@p413vM(iVݘc߂ < ]R!u \U=3un\&UFMs$W-ZL]yt"Q;Y<&_9Յ;ʸdTKZ=` a2@)A@}‹ VTt$`1bQHcWt_x|~,<*)&m4W$U3hU8Sw5 n'iS?Rd+jF[:j}4Mq|xB)f'.#H#f%̑O"&lL͜VsXC[V{2a^ XYݙay5,Pq'&@lNGL<V{2lxiò <x&rVrHwy8 &"H5C*Pe(}y 8}։PAy529q#kbX^zXW=-#Z9^)ba>lq`9J{1f#F5/Wv cߦ@~UYPOzhsK_~l\Kf2eKvqWu8c!,Kb珫v>I!J>4pDL?q*gϔG cݧښUKF4`jbd]P:n)G UA[2 |Ȁ:S9 ~j%joa1*s%`pxUU"xE\Uúj" DBmG34vD^-jDZ #lDo%1Tnh&^y. d?v=HC5Oi~xE 5ؑ}$^jwN+_L6*֔Nߛ?yGDJA"AUCKMgڹC zEZG}A|>]wQu !8fbbu>dƱi c`i^ l(a1I;TF`{5՗hhi 1HMo+ ݩAnPjG>ga+ӌ%]P^ L/hYߪYMۀ)&ݶTA5P$W'2͹Т{XVxJ6eH-=r¦H;Uc_æ$+baރR16x(:A9z`|]JxAsuՔY gt,szQ.,mn"61[GLJ\Fe~颪^df?di{JVzGd9y/0:w%rʵ򤹂7~)P.һ~µ#1T7Wx)Yt2L7I\wƐ%NNld,Kv3m:'Sˠ"yj K 79|Sm Quj6rUGq:W5^⣛Ⰿ* k /h6Vݫ rԿڰmHlbSڣY) *cgtԅ8|=%ӘhIݷyw9fٻcȶץ|5TTtJ 1i_F $DfL[uQdV@Ǔe.<^exa!X*45 !HkȘb#:uBȇWekPMkMKO*')?|?gS3tpɚ!w=߬!G-UCtP̦b++sGiseK "w)6'U(0ӼM)d<{s;Ɇ_y5< {˅l 2fMs_3U9|6q n,@|~ -b{~FvcZ=7~] 33]6R2aߑ$ ? @fj~ׯ@wHH?jjV!ϲ;j WRMAfzҋT7tV5vvf=Xbw#=āgC^fhG%:TIӊj-yM1FG?x)ϹE禭ZKjOyfnҡF/jP39.@DBګHY8;Ey *&!*KVNP5IC7= *[/+ubż6f 6_j6 *v۲]NaQոٯWTsJt@.=aArm:#E_}sĹJ'3Pl6 !D7du[U5}2mew)u4j]Ν""6)xx#iaQ[0 {WíٞR8M(RVCaَ!|jEdfHWO"U{e+0z5O$2~e~R SYғjwj.W5bk 5ھRvA<]%)3GxߕH/t5F|@%]l! x\.8Q/$;܎!EXoJdŗnFpτNލF!xw[ڹl˲Vզ^U:=䌍ukgN\hJ%ǥT»C)x45؂fw3&w@:+< {j%qp)`;*ibU-kKxgV֤+0h(]M3Cv(`PyҚ^>1xRӝU1['t\C8S xЦaQA\aK"ɒ mscM?$w> uf댝ük+3]HlFu@pXUN)J{ Lyݰ:cTN05$Tb{Ƭy*εl^ ]h4 zfV&zA3mnT\y,&3X!nmz) Xތ&6 BXws2T^N%I7rˊqf3ffuA .IN뷠Ƃ PKsR ,D1&O{=?ƕfn X%z% 2 y?i RA^|!hR3! ןR?] ;3yӼծAmXƿ7*tD\3l?5pGcrW (8g '/ %@լ$ʺe&oDhȵ*M> s1>9V}Hʚ[7!o*i LKLA=o!Glh+j-y.{_+$ifyc io~MtVֻJiJd\ #T72V*5|<"z>^<#L_{y|gSM >\4mdt3V{ jhL}MIۢ&-Ze7p'{ k^ ݕ#LA._q97$0|F*C2?>"D/=wuѻ1{hR4x鹭|,kRN[%i.5_e6Nʉr?G!şSDZ zSp.AvKBNz5F0RqInh_]keD8wU{捻4/zxmX.FXbϼwkpXW*3 @"!,cp^!d(_j+8~Yd&ʩC<9,kF8Bg鮓i∨ח(oT#L b3`|]L*hUZ1~ z߲_w >㻾TgyĉRr韸׉uY5Z27)+ou=0nx8TùQgU!⣌ ϮP@^{ 0ܡm[S&*cWuhtgnm* ]0A^ϝR͗= =}ecy@n̑cf:Oߎin,Zov8ݧ߿rpk_13j5:U Y)U|l.C 0qScYuU"BW ;qkf_2v?@mǣ; WW$p 39&hPCT`l惀,Zj߻ϏWC\>8x.}5 `Xaj^U@!H -F`AȕZ? ^%X'#o֐ʚR'4%5/< =Np6u;f΁!Ygꩲ*F.}Xd ;I!;Qp*G7tX]s/bE8:Ԏ̘q7Nfvؕ4p$D k5clol+ŒiV*Q $ߴWݶmMH RΙ\ަ*^r~' +]D!=-߮pcRfvriiKd{ӕYf_Tk~ܴ&%3K=A B9ŒUڲFR )32sst2C'#[M=C0 pZ+nY| ;ZTsT1 ikT~e8GĦﭽݝ^.d6GZu%!fe]D4qaę"ޗA-fx6):?rN5m=x_/EEf.;@(!ZKow⤭l<6c=9fDVHåpǐj/Ψe8w>Tc횂DkqЕx&ZzJ.OOFA)) ,fhM1pÊe];r*xΞrHt.L1sdձqKۦH,d,[yY]0%$U5<.[,^|:ڙ(^QCk /Z!?xQ']$bxg{kjS]@\P[cۯ>c ؗr(&3}AbUU[V/Ur/z!E31T왌[cݱ#ٗ MVyA0fLvzbE ryPӫM*eu}ޔ%n?@5<<liOff7V+JhZ/KF+ޠ%4'E"^X1pmJY KY[|Ysu ːZ@u '[O_lvzjεЗVt0;]Gӏ7)>)Α~i Y $n̍iskkKMnX7yÈnš#RT؝'08vN`*q&骱?_ ]@U:FS_zDzxHkEɥQI})?I֯Ql')EI_#UL-$- ]eyX+v&Uζ}d*$[!Ͳ*Dm]m!!ٗ';.٨~ uɎ!*VGY`rU$|$d>}7wjkoQ]dcfh_GU337'{wT{+,Yt1¨'dtp$4dBf=Ξa1ote[952k洺D)±eդE}Q49o Ƽ'43>J&z~fe4kM@jv5JO:ŸQ8ou&XaxLZtDᖦdw4#16!dlꭝ&E adQR&<'= T5T[Hn?L@s6𘘔$-B: Df͠:+zzGU1ۗۗ16Ef7 =#\]d >vrZLK_}72uzs!j>mIX$»@@xwN LF,tF]p'ί|(!/:N09X8dYV sqKxMgkR)^!A֘E/i[kBJոKA*y^qgV-,^ձɆ15NHd11x\Çu*Ǽyt8OZcq Ir6c@Xۿo u2I:Yipd!ܖmAJzsD {jh-נ:5ѩh~xi%ide)sJI<   *d"EzM_qzFˎ2| ͬ6JQ8kI<{^p{ZTMAÍqhߦyfwDʚU5ZsL|mi$Cz[^Yg4O(H6בփ95!fhr-徶\eTȝFıae'ȶ~PZ@˂q uɪtp1dˑTpaP,.cy0P3 e6,i;ynP:qF*A.--tpE"?:UƯSCJ;}du`&/+4ILoס>x "m@x7`fV~R<*z+,qK&!ƞA݄7$2_yfsoH+x]&XEDԉƤ*v {D^H)2>ftOJہN]VA"m=ȄnWSu4"?,Tn /]`Nsdt"o{nh: lkaa<-S[d*S6DOn|}.Qtufd0 }1Wަ0/HŴ5\cr=l'j! L_} xG R*X}^lP޸-(dU*滶]  *K1m` h歆5 H: {CWy.E⽝k7 *-|ip7Pl70+!Jtݖt.L Wؘt7Jyr=igzGARk`/LreXa ?-dP`Zggi'2q؂cSY]Fk| HOy9z[jo}@L .높\<K^Nmi aƿ` dn,^D<_f5`ղ=y/2fz IrxelPCYѢb0KÈIыR2̀FreC^}̴֮ȝ+G*"\SXͅDaCV '. ᗂqXԀ%Ji5~ A٥}ݖO,w|y翓O!ƨĬ뽝)j LH1F=1k&9t^Umш.(}$H2eeVm]E>P^-"q %.M5 LOei )j^H)=XkX`}lX^'8?2>ĵ%D4)0] kMwxH(Nxեy~|{'JGTɩnV_[KW_ym#+CABԡX7^j"9׍bHUm5+]+Ls1zNԠ|^+A1mП9έytjCF%9_L1⼁=Z9<_3[vԅco9e$O.g3=kv6K#|OU#׸؀!˜ž$0spob%LAAlCT ;6{{GOuQ{] b#A=]0ȃ}\7 8:#8)U'Us::T.z&ujO S6]srݪ*n0CwAuWR?g$:r<-ռ D~E ƨ1q0Ȩ|͇ k gf|Bx{@1[pDtTLɕuD#sQ''KQA:ohҊx24Jl@<%f+D`PUۺw`=(CιA(,br< scp P',2- ʦlP="*{ Yn7a~PSs&2Yۑ ,-[-!9Υ1P\ltm;\LLSV`=z? 'ݏ<$>q;#wkz^(iNf RTx:ӻzG2PޭAtRp &-X{K?k~00\nAp5A19J5pzsB/$DRY9QӞ9#uV~,ːGB::}k"?jN1BOsyMď vv~ ~5cL=҂_!v zq2qC Aĕ5.Sۃ^뻡3f!P\{XԒh>&Mg}fbS 쯲胍Xw:x}9)]3aI໙qPӅ.WTX+sݣ"斐h̯{m!2bcS]{ U>pۂ2Hv.A{>oǾw<T.(f0G),ILYmlc#0UPe}C'vf_~"${v+۰i]=ܪql!{:>}IU.z?^)u@k#j [ cX*] Wz@),E*TodjvOo}}m <7 Sy͡!FHѾ*,4f#`DPlo;=L7.~#̣*5ÎwZ jY7{(#3VlZ}j@`:Fo& 1RfcJ''/{0jS]04G|"/ 蘗18/neGՍ(ÖBm I&54BB9d+ 䝾`t3 +̟ o;?} TZ!%I 83dpCR<م㿴}_*T_h@~ VՋI~V}ߴ6u23kVC Xq)KȭTa\aLݳW2Ef1奄?{+2Df{}9zD~oJ aBd78ArU:nD@A*cK>q"Ii./.}W o WN2i%2S(@v7Wғ\z߾k˶v_T;L) 4Dņޮ!HG=i}0{֫4_(,o5ba戀{S?B9@qڟ7͈jzU-3h0SbDw}^CeQDPR}b?xo/߄Za B#A`{N7D`BXlG\UVarVљygE.<ɼY;H0"<\o3 -Q(dGVƯ3cBA)5 ooF*(w_wB/-jQz?"~FTb@ו7nLˌ{z|rP4>K?{3'ZU fqO߂ +z3zpJ^4XA}YNW5ȳfL+ ]eU:wyϬ:#'B" aC77C-_,vq&8Hx=O!*9+n2\qST crj+Bdlo4JՠCluFy+.as|j1BS ҡvhCR_Uc1"=˹7q[ k)Fݰz$,iޯ/a$,chK<AN=U mW"EM3[' Ұk\ 1ء^]Vc)?]hRoSܟ3v\U1b G:5/ø=Pw DM |Wx嘴014hq +$\݄"G#ЎK_w\\ JY./DbC9yx~F{MKEFUk2/dgI92P[?(L\4]!8Ʊ$Ҝ!$6WD-,uEfMqޯ]է"`4EۗCa=P\_ɲYR緮{H2׀竎ۮdav+m8P xڬYれ#fj,2)V˙5gf+efCr{-Ra_{-Z Q1{ha{x@d"ȝ胴s3)Fu;ם]Tq"Mh[6- cw"td\|D +׭` 0 ӵJTb.0Wu(Zqbgk>-fױ i"f[9Â8.+ bY<4-.| ~.C7)Ofjz?Om, h12gk hIf1Eÿ=C%-81'$VGTܣ%JGkO\8~TvS)2?`YôA}]]% :a`$&}ΪK| UϷ7('tk .v X61B UU7(Z'dD(ɋhaë <jJdD'YD:LN$n\0lPλ4Myy ͽǥn+I$_)Zڐ="Wx3ׁnՠaM{ `/2勎o.|\$bE!HיL N uWikf,Ev_*)~r%JZsi% ߾?y?];ҿ+I=<ÞFXoa]q)>^6Gdw3/F7ّnp8b^ڑ$t&ϬH4*cّFLJ7`ea¯ XsL+4US"XtL#.e@EK}9ULE8k"G63&ؿip:e?82e.fM;^a4{r4dRuc![Noʂw%_V'\+ert"|SPu_%$ @"/R{Noe"᪾7c7Z_M_Ƥ;5aDuWSk߭ ~\]:gh\}GV-n_>x_8uO_&zx~ pqX_Ɠng/⳻'Xi7 \c.#\}=?cagBUa@!*J8Ҟ5 {GIi5Tid[C^4 NhBS]br,2h5[ZQ/< IB/\RI8#U7ZViyKj{)|OT` U {暜nc|rMI Fc[ ʻ߫I=>*?ˇkT|JM9 q e, 7Gp|gH?]XŊSoLAυx|?YDRX"[/^Y:dҐ#EDM^I ں#k VnB63DhG M!C!cD>ojOpu]>Q]ND Kؤ˺s~@-sGvjSWX=鹫TkeaRaݦ9م 'Hk9./njżwj2بFTCs\[8vWS'rAr ȝAIZR{$b1RSv[[tT=K?@E^n`Vg4PIH!iyЗ]j6Bx>$ NN(<#<KҷC--ౙ^mD!Mj>xC5e+cƇ[uUU*A0`vrhnuӚK_{퐴{zTT6iV(&xY*,">\a.,@x#M ang:zG-&!:tHV 8D ',. IQ]uU /PYa0qOvjv>!Yug/F]!/H^ʱq:e|"Z.us.9Agi8 | QaLP'$H{Z,\ <\wn}cװm2=kȻ'RB%ڗt]30EŔ*,gg^b! .נ7y,q6-"jzsxv}>4mrs3fj7#6Px] \X6s\x'|Bw C!Bz<.1k|ؼ)55vT~yմ<@ ; j/>RH/9Gk}K-ZC{FCwk[׍Fl XL̨S4ӯ_Aɟ͈w3vL͚U}Adg%g/: VB8^$ # HU~7aiFj5kՌuh>4ǡKNᢧ~tguxJ aJY{ 6h㮠ҋ&rJf)?-XC%35K܁4eI(wUӮzƌJ͡ i,A<c1ճow?x5gxQe6DAl+G+߆gMr#jf|] ɪ.ɓQC)^E3FFTS1 oU@NQ™bxRzA_&h둴zb{o,gS(?n 7!KFGZ5z ϐyȾZhN7x <H4DxZ Xv@{$<\X!>ix>/&޾ endstream endobj 218 0 obj << /Type /XObject /Subtype /Image /Width 1 /Height 1024 /ColorSpace 223 0 R /BitsPerComponent 8 /Length 723 /Filter /FlateDecode >> stream xugP$ioQ.K23/!g$!BIBhHC$DB2BeD"#U3|=zˏ[iK/lM7F1W=9Oi-}B>=z'V&75SJ+i^Es@³xSD,닥8=KXlEVX8`P9֘e#0}$a(Lc1i<&c'cQ1 1l: fal<y猾+z.DE|1S77\];:/ùq t3> stream xt lUyn>=. e(ĤD#i10=40=N $@$BBHRo2um]EP !!@  ~cͻ;u3#Y%9^]kkh>>M.Fha?Z:MOHh6?ۨ/_ūgF?8Z-?u/]zZoMnM~Zݯouq]j}?{enl;fvg-7ͺ-KOFKۓhro9<ԕp_]?=Z{۫#n[э^S߻~Xin[;xxj|7蚺B3gy֯uW%=VFeD_1P[Vq;gC?iucYd݀hVIetoZ mwo?|t3?7Zu7[km_}PQZ~R ߺAtdrVw{eIOt/Ț|Wˋuo~.ڗ'I/Ѳ\K/z}Z/im쾾NW"hI}PFjӵ>znR/ YӃK<>eSkU+:b%F+~xOﭭgםm%?,ֿtŶ4C}=ݏ|hqߡ;vߪsqV?WnL grQBc?>zO Y=GnaN:Z&gYoDm~̅]}\j|a{C>=ّzߖ* A\v~o]:0mh).Ot1P³Wb\Q׹*@S,hGRA=ܼ\Cmh-O^^ק&64zRɧ6Ef?풽6@}[b dUW6'wl=EOjJK Ji=%;yњJHћ\f8o/%ȰUbq]V"]~їfJ&ZҢl%udh}.ѫT_OWoCq{%a-]@=ҦGgRk.E- ٮVO[seo޶Y~|~Ìwkez_ϫdÊV,Y: e k}܆ DEJl#m/YiCť:)k ž."IֿRVڝzvJɰSĸIw?wu ɀ`X: FߤdT )ґԸNn@-Hw+YXTXW/\ {Zz m Q:?#v~/|>H0[xw.[Y$T籔Ճ⾀׾Wrߦ0X\m%*%O\uf&qJKlJK?վԡ6N6ޞ\h}T+(D{󘋏0(nZm_Oћank?W}G+Bws=Po*Jʰlr&YR\_W.t5zj+Kҭ bׂ6$)=91?UlVY$h9RIViu5IɹqeӕӀrǟWK+N<3}RҋPͬP\|WI5ײhy oGI.>޲2k}|+ s9DstҍȪ¥]?:˺`Gʳkr)7P2P@򕎿nFx|>zFo# T@o߮畐Pia_ $uD}ZoӋSmAo4eu=)u-JG !ѥt;Ba#h.w-ϥvSo(}hn!F',?FW4}5a i`3WoL4RgJu Wv3s,!P+2+vp/Y;>2yQVD= ƲujOqb5z܄}7x z[yX NNP-]/j?%U'}V8@Am[I"8IRS|[54/YL=ˡNYM\>k8WA"cKB 6Q=fL),շKf7IXk/ j(kaGZK88 A:rXԵ<0O@ Qe%pk;p uqU@i9':30[ka̒|u ~ꮻ=[hc_*b.dO͐gBa|xw9@45؛'兩v ,_IS -nԲB;k7Gvꐐc}Pҋ:kVBSzEޱdXجNh@BۿwK*`{eIraZIu+OK1<9=˾\&PiI .# ?|tlI8'hGc/AZصQf]ցD,nl ZΑEZ6FV#c%S)ÝL-rKw΢-qb@;nufkre~v,-Htd'B-`\*{25yZQVY[6lo^'q]f;PZ"-DTޮBWо_;k-,91sIo5:\6eZֶMϋO*:97牁'ⴍ>UDY:8/)2LC$\vaatq$a-Ot =L-Z],6pBa{mߏ,Vaq97-i!jbk;U^u݃g|{wiPSKGGN^hIm(џǥ,KA ۇa JXW9MJ$u{wIZ-;UK Z=iNrɵh˲GBŠ !jM6at.y$)Zo-N]7 B/[{mԻ..ܥ_:>lE\|>kux4wrZy#K!.vaO^L#4&őԥYUPd얕ξԗ&1q&xN-u@4H੏`vt=s1֭>=PȺǪ0OJzϣȞ^-H}X Bs;;ucpHrۤlk[tI{?t8E7N58ϒTEQJ;N-`p^;>1,>gj:`T⾼K\r:RG|Ǻ"LNo9+-Q)eAMGfr+6yi$SJaKur-:vDR󾻰9\[n2,N7LrtSrm fR J G^+4+X%\-kRu^٩c2k '唀vVꋛ%C,#K.?FGOK!0IVpl-$yFj9997;`[׾QwhmCD7S6}6K ywJi$KƺeX 99g@n$}?f,Ztu * pKS~y P .Hg&VBZ_%׿|} J~iDPXgׂke霸7@n鱌Bb7'~"v4,'2SRw?  nED+sLY؃rٜ;VO@DR!a੕\z\j߉ݘc-k{""rRBD)?:*oo /tX _/CWVT_}i~L:?TvIG*jYND+|ȉGY{ <_Gƈ P hI-g#ҋR8\'/#XyWLa5@t,ClNNͼKV {+(iX ' BOb}^&+cr+.Badg*VPX߆LzmZVA)9rEV_W_ b/Kz?Ե K.,3{0Zϗxyj{MgKuX\H.-E3_xA=ݶL '8jrbMGj7r3-Vr+َ b+_)?4Yx1H&#Ts"|:VY7R.bK2yIq>u0jUmlM G;HcIe\6IRM߲\E8`Uf '(g!p~pxMU JpW%M\*Hs׉(ILN03ޓZIwɭpZm)$3B=SZNSIpz9HL`zGS/6B9"+wL|Cj);29v$GQ` 48{jaϣ%r`;|#V'8%rD>JBj%HC$~/;f mD5#ւ5ɋ NjG8ұ9N;:u}$jx;3B[9yI|U: (%兒*{y$\.|y2zZ@|e^!(%z90F'P9 hIKHf!^RW;Jr)^'S n$;`б Y,bJ,:+mTž3WfmGklhx P aZ4Z bPFRl@D+X:.Rctou{BW/gZyJ=:)*GX#veoҢIl$Hz Pe@DuPp;J3}<ԍ OV>J&YfbYARgDh.u huis̡IE~N A_D⨖HÉ?~"^YxaWto2[$'Ry+&o?D=_df[«ÞbgnSz^vzh{Yc8-::ŃܘdR7;'7t' y9Vz[o9qHqY^B~ ۼڤzF:)̫K!'t[79%:1@[;⁅|bKG䅵PkYİrF$4DS!UJLzy/EJT>`#"Qg fƼ)(݆F+b%dOGRuR܁`u?BrfL4Rߨc4BR@ҝ>o@,QA H\vV< r!7忝vEREB')OݕnIU'{RQǫlrK"W-+ʗi$R*t?(Ũ捑ǚ׉{4D$~!Ǟr|asnɑ|/Ђ,fy5G0UtԸr HiS-(Yvo3i)_ׅj{vnc)Ejy3EeO97C[[ü@j51 -Z-D#++P} _ՄVΥlxSh,? %ڜ}0''>Uq,-WbM$p#)]¢ :J* o?RDRpx:S0RAx#\wsr-?r6L4DyS\b#it{upHqtLwQb;i'Scvqv➘ &ɺ4p38 ϓM`xtK م)ʫ]ؓs~Xy}E(H9taGS5!ɀ|r77c @-s)T}T  Uǟ7CY7ڱI0̙O12PYחZ5n_<~I8{Ԩe=??ZA0sMtT~'8Pz_:WL` qP«S|- W͎.y.0\Z)Yask VBVO'%"ͪİNNR.GR nÐۈKvEJ`@r1f6*W)u6/听"P;gX /{3 8:pV VP!(e9,?I15[&?R'""PWVK&7F~l~Ytڂ5%'&|}\H +'ɗHxZ7aN +>IK , (IVc鞳~Ќ DaVoW8MLzXhDStR_/KMKӁ$tQOHvP1!ǪelsM( G=n`@ːqv{zCR5Hwz@!RQrS9PXLJXvq*Ԝlu}GaW܊CSR+a{ W$gFB7/ADZLy G=#:n}QײoՏE?)&\j}`JVuK'8Wsl6l(ÿtpQt+n5!t1%ݺ?6$,veA*[@-O7RJcPq9>Pq:ѩ3H^ܔQ(pD`>gZ1C+1 ,]%.$et헔$H̬`Lu=J:w/jZnÑG@QBNWG}5 ,ߋ.S( 7:R?Q2fDbҐ(1p "5@=ݭ\@ݝtא۾d۸,o$-nu!1ۛxSJj, ۯM/,2zohH|&ڊËsw*y^}K;N̰tҏޠdGx=={$V?E2׺no#$FXU1=lb)zӔfzB;jxNk%劭9&Irn+DirOI0pH% ~OR4Fl;P(N,caožAgbH=-ii2dih /hS,cIݜ`0FT0emR$PvR;leb@[Q6̹ >v}Von^׿8!Qz~׽AhN  tqd p4W( ;.z HK"KAZtR jfj.Yj ZDi rʹ(3hme~iܐ!E {r-i(lӣFw1l^ (r^ A1`̂Lhy\z,U&JI8Rh^vItޖ$骓u4tL8Q\z%_T.8HKiu z"Vi9]}[oJk;1S'RtDCZM"~#U+m^RYTTɡ9dC3(&X.HϽGv6@z4AE ?O#T,,tړڃӯP'Y*C_ms~E,)vzY4ᢛ׾F}Q+A F\/YD%EΦ^1ʊ0,oPpyd|^2tRqy N+<[j38`W@sT;U_wPmg}+jU)@ZT;bJ~i\JȠlX*I[.t:J%vI;KdLEMa"D\gg=MGrU/^r#uxJ;$Br@/mSjqHǁ94TݼXh4kE!w9Cj30qh z 3PL~4Gi[D@{'(7&Z[w8!j)!@ E֬ZRh0,O> մ6SNou dih\p-iPيUq9\"hmUu]͋KvHn|HpK;~EkG}Ouk\tO+ xSΥ>6zRIb4;=,\+$ivʁNގ4V,[On[LkZdI:n..HkwT@66.Ux9SOe1etf#Ci.R(^cuϦTUxo! r~;EȽN+.l_歒L]}KB-](d uoj6 6IsU ĺ7PT-$z`H ոx.-I%`0`OlqĀV~ʺiz&2U VjTe58< UhTGjzx&0@h%k1K|3uyD[ b/iLF!^?f`V+7L%xLfDW2zQgzΡkY.$*w!_)awHlYqzxD㶛 (yNˎ{8k%]]\eP!:u!ݩ: 6#%̿7. ے}95kSa|Qh[j7;?&rb|[ZG]:}Qy5 u?Ew*WoLr`EL;mlh$ M[ )V8X)聬)$z}K) ;+{ga?St yhp''HLؾhqpb|ŷɅ/d6j1ٹ wr54)r%)=p MٯP) 3v>i۰\'Ar3/~c3es(O7^>W<˿sz;9ї>PF6\{]MR Hb2tgeech , CNY}{"Fx F n Sӿn{)}o^Ï/)Hl,ˢeԋcD6?(̢Û]MMQ"ٹ`Zwչ8@ۙvK^8@ k`5^ع,h~tzg$!JN\Z4lG_]/sF΂cR*+G7:jQU |xbkS &e2%`+Sn 3z$ qH$P6PIߡO߉{e:| R}1oQ* ~C!BKաʑ䤮9Mr} $u$y$EHa8e3gxcӚL(hSKC{^8ۥ2 ?S(kQIE,ޞd;H/CF\^wi('Ʀ;/= JC82EFwOҼ&Ɵ9)M:=>4̓ LE;j{ \AG%Johk2DY[L23pЀ5A[<'*fPB +Y/HmFA-!Q#g"hy/v@#?u2ro)uӭW&G̎eev+vaT$.zx@8!<3$mlk;@VObqVT.qODi0L6<}KF˴#h0Q>9piz!0Dhj/ִ[9a | zj݀^G4b r6S``r. 7BjMPE̻ޖ7Lfa6xI5;L=u[n}>T(@#4Šl}EꪢǏS~A"$ 1JBYEo-?k<{{rpTD3,~~uvu#/rQz6e0|2)M(̽!?sm`Ly3*X^tyPͼN0|0&3|UPnx\-D 4$!ݸTjpEF替%KG7eU{v`~'xCr3- ϬDk0P9GPw+:y)#JM郹6W=|8q'0ZiatcLj3J$zOܠF B0Q3mHuɆ6{kٙ=e(.8ԣ.3w\kG9߆t{iFqnvIČ! m A98t68ģk%XPfrʚ[F`Xg:}P;R43Ҩϐ"*i*')K MeH{d4U߯S0I@J'2-b 8;SWǸ2=ֳEtʨ1l3%' W[lrm緲Jhcg=LW9s^Ng\Ե›7@w{7Ic3e@_'4hz@f8[W&1^q|FK+Ы:O^&{C/<=U=qA(t}"''r⿽ @ũ'fB0PNB\i$ozuYs㯦!Ē hJZ"G h{ؼXPVJVá/#DCb[zw n`.0$bMl,m.rCfx}x2 ϲO;$\7+懾QsAtʹB0EIS8X"ҩo݌.'q*!ERBtD\.Hi!x`r,ITǏqkAt,n+O3K-vue:~<>}Dg֕22D}*} Um]M e^(y֏?r2ThZl=K"vh/1I֟xj@Ҹ03 E_.K*ga/yCx/x3/,jwϝ ΠD scq.hܦmTyw®eD>{":\,!kHT߮{YdCWQ”}`(젌DKkuUos3@l:;:A?pWFxC76j۩N ǂqk@|`\5ozi x-HF \nw0qhgah`v"X_Oo))ψ1Sڰ6TBT.E&2LJ LR&6Ylg7]i |EBr@K';FXifIBʯ pT2&Iks*CI< Jd"'svsFHITlћ%f6pɐ!+ :Zm΃g|!n෵ 4IւɉDW \Wr݌vOFؖN8$m^brCކQ!WLb|\85aڸ B>2\SܶNg_;Jʄ/P8D|h ]3tI [JX$*Ȱz]pNgfn_UJ䄂o߬`ȯ{|TRIxY@sw?et.߄bOwF jQO'mi\Ę6NSiLk#}&dJ2{`!x}*E@2$_ -=vc̽n1kŢ*)&]hˋk:^G(ItbUN\[@y#m)Li0ftznvSq AQ ׾097v2rI: %ci sa8[ESaS1D,N ML bs6}I}^Zg- ,h{C&ϸJFD2!GIQg`Af܆j՛NS;9wh%6 S~-a `܆ȸ2܏*8!?*Ps%G/AvHaBM_-,+/'{G|F+&KjM&+nH^: Ηl#Dk%'G͖qÍ'E΢^{g*)XPSٝӦQZqG= $A2{N-BLO&diR'L8qAETZMn)p2%}!{ 8wGfrJ'E>J- mʜΙx>'RSm$] DB6KtەFJuA=JyoҩW(u "-et}*h6' qEd67 hP5U'hM0X7ͰIC.x`ctC:VFR֓L:mR:wEAk b.ex>-S, )L5jĕM?:Eoʺu(@34\&Iey { #] OҕhhsƙRؾ?w _A^l$ƭ$͋5`=9h HS3ZM~- pM=ERIU}@KȜ<%Tzgl|e). ncl0x0>턩+,YY## >qcB񺸾K0eF\?4Zѽ 3G:YWJzuQu<ڣd3qqήmhsk!>vz\@x5< l ABŧY񉑶n |)0ratʹxg2ɉo嘻*LƩJu0ԣ#R?]y+j0nKW\&ejqKH(:U j-]HjW@i֓tB1 񘿖:=Mbk6{1 6vu6n lj%c/VVS. ;62H7Fz`.i8OAb|\M7YiK(LoyPWZKdT-#thI?wɢݼy'W)ChҹSe:vY! BD Q`Wݛn }a 3{Ͷ^1)r4+:^ise}u"ZQs v(4gWm91D nY9N?±mQ1YBs*4j֠%d¬]_S!S~wxqVτtu#b"A ;$@P2pvF8§ Ii0 5k*^5uҌt1EI"w3X쉄P3Y='D gq͎2MMyM1g  L<ӵ`-bJ.H#ui^ k6)#; %W4ɤ]N7:!A23mV-Q$ ETS/^sxXwq=(%G5Zg4:Q0v?3AIm;R)tJKoWsij7@/JV%KzAD-AC[]5lH@NWxz0|'+npr!]M;[zbwNZvI $к#$wFhg)<>I@7K{O!ʂO?6zG׺UTH8}ew*%ҳ{ŭjk D"L^ pq̽ PcĬTJV7_m$8y $M9>} # a}f1zg'S)Jack&8NԸ \lHz(e;<݃gj:0}V'[f1MeM֐b&CVFձTWV:}ܴjJ;F.Eyj&H@P/N[$is:/wSØ% ;eHk{/4_O?{oIA#/1PQ%iѷH$ru]Ƨ`U~aWNj9q wZF/\tގx 4c&HSz)4Bl^GS| 72CUi8 < 7YAP859{)\^f0qu\$$᙭=P,=Y9gb,kx zuu:&w )>v=NH$*7\JVWZ^B Ϭ'!hEҥ7KHyN(Jmg8>]¹a\k[AfDo KxI{ÂWsc2YI=TL~t67HzG=LC5 0/ 1z RM7-=IF+Bt}zOߛyg3YUI]1FHdߺ`Z6&Ұ woWφ@_gl <#[3jAB|)uaÎq6dfM,>f\ۈi/m n?|0i-V Z ͸5GҲ>6~*Wݢf~+ahݚVU&0zmVu[ñu*DyE$7_&*I͗o:#%f{ß|;$izͿ<ٳ {CƲvA;Jr):YVXE֌xtFًr@u̶֭ޠĦ T1rz`M7Ok݇3gIh RqP6gBtZo,®ЯwsxO;sM3NItMyΏ{צ$3+>`|dwaEu69RkI7åc)D.<N")U7O7D,~,T2ѻݫ덉ts%IP4DGH'`+^^qZ{NkۓCl;D&U&Lo]@;A8¡^==vx Vh HU.vG\R;hkW$4,}zvZ)weWۄ-v6ODӼŌ_L[--J$o{#A/9z,SJ _WZ%VOEwqmi \Vv#pO+f{HT(`=Q$!jwr8J> )韎b:L&U<O֔StYKzgQrêx#X@`;,dd7F9L[A#u0ݻkZN|mp5BrxM:- 初X.VKyd%K 3^dy6|`t$gizvuv2D{Y3iSgˉcV'Iyy ng 3/*OM]s;gX-M0rA]{׬{~;mԵxOOln#X@K5Rjc~ {vpŠyx?4H h+a7Δ] qf/~ExIw>7o׾qYE UE_|T+007&f0Ǵ0=!ۖmfVl: ֚L5`/ n!๏Ɵ9yu*TOA$Mm IA[WV~NAx&$EjXj5Q|O1#~F(5hkU[J.+m"ڄv7;&zs4ɘ!c !1< $ qjt4.f ~꠯Z=>O*oǭgB!n}Cl,?vVSe?L3HXK[JT$Q@(@.-C3i('dV6)DA=^;A?J8tI ~qo,<'Gr.PP6ˋ1SDc$CҔը55tAQeK;]5P޳f[[uuH҉ne:>n7#EC5ܬ&&G& = ikݲ >51FiF; dkK~!lj36Qss2SQXq5"mzp5O,B X܏39_G Pw_EV7=2csҾ[pFo'G3YLI?mTBrQ .ʍ6R'F.Mڔj!!5%o?hQg q㋊0͵X~2 {0L jo9ٖ=Nw\g'b/ \62a܎~>Ō֪lg$ Ծ@9w =UwTs9_}^g0LFPΣC)OP 9j'duIRPd%ӞB2$hR!lDf Eݍ}~tG՗b]L(3)>xĵƍ5^B{PO .'n_@Tb&hcTw*{+V!ۦL.B)mSC*ӡ\7Jo `!K5+J%WUWҦ ?!~TJ)4p1b*of#itl:$Wߪ3ZW6T+OfY_rG/dSme?L.T Ddou^6xađx-3 Ʈk\jS}:>1.snm8 ;x9p ;1]ju흫fNQBCe"]ʽ']*&B׻fV=ni{*^wDW;:2EqЏWj$M_d{8#HTG) b֋#uC}w>gwJ3jZe3SS<ӒaݽuӫδηFP8Γ {xWg䈖/z?WjSQCE9nB[B;$xz%.[pH Z37_V<\IrB1]'(H;K}Cֻ%K$+]K1r6=@3Upt G~:L4 q8n r4eep:KrVf;26%:KTеgg[a !ThYE'>ǔ:S\ ^ j 8W= Ji[KfVmEklYxr"h~ő{\FjYO|h^ 3m5V2[ѩ)ۯR$>{ޯ.u* MuK^SnE;n|:Cj T4^v=ѠC{AKoC[{؇>$簞{?A"YrI#ZO 2g} e ;OY-ݥUy7qkS?vئ-{){"H8n5CBw]y0{قkc4M'0աh<ƭzn[7;¤ P_7*wq)7 Xh 1݆h{C~U+W`vGW bI^Ò4jB355٦Ѣ̇T䪕V)3yP8f\t3O;sW݅O1B]Hqa2niN.Mt+ 4{ĜHQ^lk*_K؍z TS6|uqr>͙6aO9|&~| G+O`nà<oF Ϸ("+Oმ=;N^/x+U/#qn.W6ӯ`Z?.=pp 73Q<x&G>&`k%EW391Ā~$[sĹSS;+{%`:ViS\3;#wX P(XrЄf[ ;oj%a} B9m4Q-:_ Zh" ̰Tʊ0 6,]w걪e(Nô zVc$Saᾗe6BIHL 89sGYbhɮ_ bm븾|E~ 9mSqƻslIpWY%>H28o%ɷ82^Gqn]Qh!0v i&2T61(Vr6>}Ѝ|g~+q%athn᪸s'P'K?e)2<@d)y@yWѢ- Cl,f &˭.ID[Rmi_}i*"};IFP_d(j߿Fi3 `ksfWR&PVB7)%檄Ho@>N$Ы1kJnk[rW'{sV֛VA0@{bM=;s`Ow^!vSGCBBwa`oHM8 =ڥ}rq1Njݧ05 ˽ZZ6b蕾ŽF-%}|@zH}_ / l'sm.U YӝfԼ;dNZ+R.[!?wD|tMͬ$3=BPa%H)*sq q&lArA%d :ʚml.H=Y|O5 ӒV@э |iU)Hg?&2n K9e*"ele lraX7J;Kh=}L]OY2 XGV R-P=n=La\G>zG\a[ѽgI/IHe%HI"YֳOVe' gkA:g:ЭaNnq:JƫV%MyMEV5O>)cʎ29&:a ,CE3+g >D?*o=,B)Rq"*ak7pϽGN"墶_'B>,22kI=զf쉓s* pHtO/҂,19՝GàvcvC:m58wț~zpOfj侻Ϡ սr59e]eZs|L@BA6>41/{Eϴsɹ H# u>zoR92Z)<#֤~N"WOݍ*:#wp]Ր԰m߇$`=Q:qAY 9dwGG'O UEGETCCQ# ,09akC SUK;d>5eb;+z&S\t LfAa+q]q;C2< vVشlfnݚR;̚H_?%Ju5^WxǛ"f#ћ鲡~֓` ґJSu"8 ٍ`$ⷃ;kTrS<`SKEjAJ]Bfj4vh+;8hCpfg']c gQӌ\62_}菖Lmz#>,AzȽϷK=vD  5}Ƹ5mCSB2InQiZrW$h7UW.-P|k,փ[Vu/`>xmUh> ^լ+W`L|ސFmnݤ敠"򤩼o$ަׯGɦO &cJL-Ԫ9,+KFL#GKBdy[F;yD>B|MdwRo1^ZY(Gc(iYI3!5cŖXoB#P1/S(EtjIܸz#\j$PAveAaކ4CmhG!z3ŴRr1h'u8>A;4 >Oฤ2B~2*D/8B\G 1Fơ;#11ْ7߯fu݅KN B/:RKktt\Իٞ.LW=luf!Y+ ;[mV 83YQa_ ƥn_ {2熘"?!|JAbSUҳ]!oyE<Ш/{?] #rt*{dš,Q19#/|œznz(?ҧɛ470F:6 G7Lx!ǹ8ǩ)'%q@<żJB?)YZt+q!#0,AC)d7L[$xP^d9N҉xcobdp;'b"$*8ik6 '^ E_Ze^ $E3QPሓ'D7 =3,X(m@cذJ] =}.c$a2˳ Ӂua?s}wB ʶxa`۠W{nA>kkwNE۸gHhmxeWf D#j/It&5E%ヸY,dgkj$=7,W i\RENqqgL 9xէzX{5%]3mhi6㥊- 5ڤ& 7М)Mgl.Qu\[.ؕ;h?ȬmiupoZr,DKft[J_t=0F^fPADq2G JBeG}g=Z!oz ?wPgy 1/% vԹ; Qk%hp;vzu9J7C?5< K8.2A\If5|^#eVO7Dsy^y?Sb^!y(YrJW l 8T)>(Bdb wPφ;A/ D׳m>LnE!LB;>/'d>F#M>V{FkqOwt%Pg/T0'*3. |Xw* İHJ̤'^eu;J'.zK\jh4c^sՐ ֜M0(w:)q^zcS!EEAi=/3 Qeč:M {](c݆,׸ )O뭅+ЩݍsZ,$r8 07B-;IA$oz>&z+7TAkA!ja'霬_P`ȍRX9f&L(mAc3c_}+`׀G OFWK[g Shj^P6OIͯY rEfUtQYWon'>ia;/VnxzilLP^ERhbUK#~uBtl g-@^_Azk;A7R4cMl^OJV8]H8jE/'d^o%vւ}R0MC3ByR hJ0XQӟ{tLsֹ LZ296ڽhPyc7C-jG}mݟsk39m#b&T\57@t& KTj Z~+;iFCJ񽽹 8-z{q]6aWO|x>7˕-*CgL쩔VC5 RjM2x-+mɶG ~Rj|w^jޓ%!݉ы2$i8 Z<3w=Lvw$3En\!8NޣnGy2tr؛y:9>qMwt~>1IGa1#֞dgk4٬v[1y6%5{A35d$n ؕEԼ0%d J.˃h;+"z)QU| z)GDZ{oԖV+T4\L:|1q@Yu>Ab:e18بc \߫0{e!IF YԭS_yF՞p0ʁfG48eJ&`½"~yKw?Y7bɣWFo[.zBv0cU*bSඤycӀO*3CK8O{wbbpw :i ڟ=xh5KΉn\ʁh?ٽt\f.O;:^oz,ȳG)8U -P5L•@O{ 6]G8MZxAiTFrLCwʳ%tO*yҵt:$mFX8ͬ1|7A#^,,V~Xߋ ;[f$"+/t6 v5ǀ}2(3;s&%w WB{Eh,NȽSNЧ_AH2:gy[1g¹IWfa/M3_yRl0:?ClOP7<Ff*=^=}mO%q%p~:S Ԍ;ta$Wnt SN )F!N,cӦֺ($!}ZLj%)-.vE2Y-}{6vr|6nil-2StZ +Ap"U1[>aYOˋn0 |tSR^Z1~S12O&SKbRie{k67 uNk,KFƦ+`} {{n1o{WlåsfkfTTN ?'Gt!/-6po)u ۿk~@3ҸC 劓@x9$lsycS(DMҁr: {~22$FX;(݈V9F:V`ZbOj.=YiFieC8<S5bwK5x?!5nw%$>uj= IɈͮ9 _=ׂ2YFIH3?л>UyXڄgXC {<; eЕ00AWiUVKqrT¢e'ح4=CDQ,`I% cqIw˙:3TXEE֏4[{DX.y\Tq%33 pԡX1\ZQe`ⴔJ3jyuoq9K(Z ④GۑLH:Yd Yit/<}~^CAT3+$ê\nY^j>r.C::DK\zӎ&=~)COKa12Njz =".]xkqwHypU6")#/xj̉VKyRK C0Lټ'1 c҄GT~:Ƒ\Z-KTf`B{O[Bд~'ۢcT#ٿ˯wLOwSV;4z9SA%4 9uETvb;8-ȬQZ=ms: E0 @jfwFx9,d{C/`dK5876gbOL8A0W.k3_3PڴCs3JN[|ڴĚRz #悥Mj.̔H14!kWCgK:ue|tnh/\½ ϭtohwnCZcS2E1Io"vםc1Zjሰ偛?b c țY[rgRڱ *0 aC$GL{O]zU'( =2ȂgXزr^!K ԒyTQGavU S] E/6ۏo%{o9be<q\`@?ѮG[>|۱Sm&_cBhYT 8 ڮKܖAi/58Nt aDΗJ0-ͽ9A*N>JB"3t"G:Fafgg_T&s;RU+ZP!h0Ow>Y,ov%/e9jY` zĹmn"5.qKra&l˹h)0%%0h.IKC_@rӚ_"GYT kW!fgd^ݻW-i F_*# ЋešGipL@ZfwYPH&(wF};FAOI^C;0v_Y곚CKEp;moy7&*Ph ?q$Dew~7dP&۞A56bVOS=V̽ltVvF'Zxp6e0dva{q0sK1!KO[HmZ\eYQO 8 :\[)2SUweQș]gգӷdߏb?Jt99ʼnr ojS2m$&ڭ8ۈeu(b}L"yO֭1`/E9#?Y$Zޱ-zdqY:Us׈0vr'5w8NwsEէag>)3Jt2:p'San;aGa@{דreRhtaKsȈC>Ƭ'; R((H +ń {:|AZ"Ǟ 8i|.?qSQBfj~p+jM-Խ[)~3huViʢ/{d5ۛtUqXߋ&մT:thoEm%c0h~~!;'Y}JuTw R}6d咫GTdz4t8bz>M%,PI B*2kqdfi]s闭Ergz,/ i3=H??~0zڔu ;{chL-،#ma%зx s7%,f\XRH 5Mʦ_Df .vxၬEQc&76>E48_z/@pk:PXΈyM u"oRS̨2 {ݗGXl>;f+S2/%LΠ |=^لT'UMUL>G復kU Ap;$='٨?W!=M2"@ljk%R`0gy?y+t㍁zQ9䏸ͬՑokM$ӄڙq 6D&?ೞkperW,Y>mΎkt)> t{n+kջTz6 ԉ]Q&$&LA ttI83mSGCsV=fP򇾻bPX@G2 \Wp~{={pF9x#a#=^ii̻hcvH xq YƸ avnG׬dߩÿv;kMmjD,8lU[(C⺈7_1˶4>jum<+b#7YٴCfbB5$Z[FS}TpY:dF(y ?ןv(VK;uuwX >NkAr\a#3ih04B->җRe\;3z_U;S)^~bivzwS'en0T2Jk;Lq+.\VAGUZg<6A0 H)R3&I3#쬳tW/<>)){HVqG0'һm؇}e3xч#{FP +=li=(jrr {I + baH"?F?Wx1r,B_m9j[?uu}.J/He@sw?<6U + 2~GhŐʈt[Ͱv'zb9kڥyzvZ!oRn4nɝ6dꎬZġJ?;@z>PŪ1 ]]kFi>*IX6nB7Ok.elW E$%gcz\TfA ]tv>ֲKiB`{h 7MQ Z\ݯ^[p=] ] Gy0b"\jn]8K5NJx#\~;ma%@<Ɩ6Z~[wz%g}jvzmRO9z[S2B V#R(2^N)ŔhC`3Lu(T BtS|c+e+u:zVKo^ DBR*Q57",قFz1Ktd <9|GǨY>01Ȉ䤆OFF#\_Z} Z탰XBPW>qǜMysxX2X|y7qaNƶK=iح!1C;4ͽs vͤkW_6յύ6uw2VOk52%yɵvs*)vZG>A4;m!A ;ħq}nkt n^W-D'^LZP$_S"ֺjṚ#MiU7伭)p:2-@(%>fG0JMR&ў4 M{A]%1`iGּi d];c&a7"e`]h$C?ƟcH]pR7˩7xls{R1I>mĻDR' U3qЦB-iҦX=Zޖ*. hIG߫/~Ἧᙜ|g@yCB4>_-=w/3Jcߥ8O /|tVhVò4dٽ\u8A h^V]m cl"\ZmOAޥYn6,Ӫ7ӐG="*heyѐ]PҔYwOx+䕭}=x eמ *@Q}<Sjy}eI^P"k/ Bפθ^ynrS5K")@7YJ4+k)FsmcnzJF^\?׽Åb*)C ; >/0 \;謦Aý_pam|scМw=qIjOCyOTahjLroROUx - j2,`bilNc[;-` g5I+/0Y.'T4anP/E f*#:B .WƇ| ,D$$;ɘDXdAܡ'n̰=^*f<}T~˴ĩ|#ƖPmZ:<+DA~gF):Jfy@Js+xӦfiw!>n@'[:ׇonZɋ~S\iaGʐ^%X~B04>G-E.mc]+%;n=6o_o]sE=Y},#IVBu,mY}$ޚ:|ݗn'H!cǨMe}K%A6o㚭AռO z s#Y'L o̡vRS 9'hdSqVĎ&Wt:z-t %Tb$hs[\}Sziеz݆ܞۓHO -cFyf@5ZW4۠B+Y=:?"m" RzQyR5W#uZ{K)ց;`jtۍFabP3[p&M=Fxa/O*M!DYwRCQ  GѬL7=xׅҶ~0{]+AT ?WD8X kNXkW{a,rT {tA@e0a3)M \_"?4Ӵ|NtfkyÎr7bMcy:`ޅ7-g % ݙ*+)Y,>5/t(Unj }OB]|7қf5;/\ec<6%>iYu)TlVDE(!_T]tѴ.]~/Kފ6߼f~ڥWVSI(C1)Ӯv9{[4-f>Rt✣zlNw4,oWuS?foxlk6U_[]WۤKiJ5&& C lE"L'6mbFvý[mWٛ::ȕJcs~)X A\V|!L]2tMS<4߽H,M d2j< \+=˚Xuh"SlizBmL\v8)(-ۥZmBQ%|zqZfgA{XƗ9 >\vԏmXGxLgr4B^b A‘+{2P_6b|K{v״}wV-)pn_Fw++׽*̠GX7D>:$+T5rސZi'vG[(p>)i(!VzO[kWn{l8MH13sHںڊfş)hELEd .ȧL@"| w(hS|jvl/K}ʖzHDNp _L i&+Xդ0IϒCYj\"pùr"0xa%&@X1X߽.to`)OJcE*hWDG,)^Yq™~!M "[ѾE ́Q?WQMS~}@GYS8;(/Xm#eFV%6' ݘ_#sԆ^ڣ FfL~i_;@ah2& bCGhٝ0">)Ōz[+F1RɈN觬[BH-GS0EK6|7Au&X>b6Ml)Veb)eF{ ZW3  ѤxS."a>)xӛ/tKDA8et%]|fs7y69g K3Mb2T{  [iXMT]9K5Ʊ丹NVq~u#_JGOx3 >&X"1ZiN s`gGo><`{oMiGߊ IbC_ ]f m saڦz3$Tp ?@O!jîGn+4tf/#*;8a%Ȱ29nu9o0v^Ӈf>*Ƹ7k_P2Уӡ#?܎>t_R%CRP!baiFx(u!*!UfhK魚6Ԡ%܋ W2:mAxZ@)0g06:bfmN8 6kz٧GZ>a.J bA>F yl@?Aa-XAB yr?hCFi{7Q$Ȍ '1rٞVg0Hv&; O402X v,uϙώ<dm)ãht< GYńMok@d]ZrҊnAaasB526+.jӬD9V<::Z$Ȝx~Cӟˣ}r/߂9fO]zOBv\P~  ;}/o N'f F|ӛ.+ÒCM0oٙ3ֵXo㝑\egy<Ҵan{O[Juuw-ˆ6)Pqc%ߩ"6xa7"5A[{ު+4J|v$==?]y/2b02@Y؈TC 6ʓ>9,KkAz٦M@K*ܨ_hJs; y( U:[muk']f0`kH!=YFz?j{aW7'UyYRv'ݤ͟- Fڳw[&a2DkM'b js`3blO7#O5%)>w8intX~c%pdOC䫺lO= K7*#C/pRRϏN8WXaKEM\qTyhqL;,նzI ?-jWAe%+Fᕻil{!ea[*tas 'RQE53uXYet^%e^_"COI4a1xiEMwYM[il_}++` I$=_W' kgNj'TZ^rpk0SX1{[n 馄ռnAۍ75x&VJQnfP`f8^#&`ިE _v%]5DiW|^Xm*v(r*׸T'}|{VLEzSc:^uNY>*>Iwg56+56wLw_O~a+bmr{AR![H'9C e>cҽ5'wKozAyi=YR1y&+-'1tͻd(+'cibDSbeƵoȊЊ"+S;ODGC `:V"㸔*yfN1Dd<Э+7_svB#v7hMM6y-\bcӮv1aӫ{K?{⛎ObdhcYa n2|^jL[>r6qa@u;$^ yGK! mƛ\J!/7YgI3ӎi,5WSvR ^fɩ]#e? ~otU: i)ǡQJYug(Qu=w/W6Բ@OM5IőUzZ`?-ukR$eV֕xF3Bʘ,+>nO&5  -nBZ o;j2l\IX{g;)vgiemG0Ҹz;bb֮tz Kt8v<:jIO+O*O)OGh݌ZMD~Pxv{.K_CM:S/3rbmCg J<RKNg8 U->  *!&]?zm|?z:, wkhac`'E؍]* zq:c5+o\X%K'MI 3 az4t1țDh~x.8-.\i([d3jљYݢ-:@w8mޞc~a*eŏKFmRS!Id+2 nջ:†gc%:7p'(z'|1:>.BǫkAs/F]wqp xxFWO_]Pvu$nVaVr峽̡XK#gcs^0I!)`)znEȫ>ݖuf" Ns w`_aד>Mql@oP7U&+*:ܕ&Ra?r8?}{u5d-Њ߲U$Da# 4f&eFj0젞E{C@ݸ~iװ v^vIgq zdИGзK:0/y=+oxzϥPEF>SgƤPmJQq- bĸFb '%'00BZKG,츺ux{@NPP#sZHs-gCƧ5L,o~<(F0o( CrqCكoo 螌̂M"^5^k {pC*qvH{ @*F>F%]/[U}6AUNB[ɛ l (8}yA53>&2><^*!q >~S=%3fS)6Pf@t]VP\oRh[~?85%J2\<$$/f#o6`$4z yQ~ir#?7[6V^U.IgF >*R#!тykuiW;l5.;q|篌IҢWD0/Ҋ=,5==*uW 'AbܑN{V#X[tLRs?Ι%*^~vJD[a)Ta+_t0">ڲZ_}+H20J~ǂrM#'\Hߨ0F{Ơ uө5=+ BO>,[I_}OkCJ35wbU( /J} Y"=7EA-j~n] :j,v1;+t,BJ(R-W]6[9v{Tk'GT^ .F_^U ~q k3>> stream xugP$ioQ.K23/!g$!BIBhHC$DB2BeD"#U3|=zˏ[iK/lM7F1W=9Oi-}B>=z'V&75SJ+i^Es@³xSD,닥8=KXlEVX8`P9֘e#0}$a(Lc1i<&c'cQ1 1l: fal<y猾+z.DE|1S77\];:/ùq t3> stream x͝K+;e.s#1$C(]^O8CL#m.R,PHgs6><=?煟 ~ww}<_W_b.x'*gc9A:8 l>Y5Rr3(%4hpNmժѺUݪgMD/HxFT+~S8z;G:ZEV=C+20X + 0:-ԅ,˛'( )Ze#t}ѹ8krhU.nN8=NQ7TP0 zS9V%pt 2pVLtHn=S>|~V:I\R3bHS,&ɗѲG)la8!SĕT>˦i(eyУug~_Sbo"GΓUVEaw4CElv=wX=?NYi`CJ4/6d[hIM%T2V: izL# !`T9QA嘓㳲S/A0s(c%q\Uw֒,S*Y`jW/ 'gtI>^}F>D tZU9 $ ef'lKEG6Y/PG, ϭAE;y`J#Ueܚw'uI 7lPٙrnݨ'+$@,9ΟYH  ux ש807|'lpQW?w))1 *Ω:]'ܭd-" %; 6E4>DT t;+z 64tUTѵg-'T!|YqpA_gT+MU&:f.#G_OQ(t%NVJti @iﰺn+oسdZ?D']6& ЛNnkz3~\ |pĖɴW *K*k KBr.'x9ѳma Tra\W(fI\Z)s&]4pzȿe:ۉY8H7gǼU,`ރkQ dhLf,@UwRry&MT fu`#GP{+ʮ `IYW@% ъurxp{RiQ;1%Tzl'brM8%b)DqNPp56c&,}hKuܴUnn5M*l c!-,W2Fl6͕)0'ľ$ NZQАмm#,6vΜ>mWK]:<Ԅ2 PKt1kX" D\ l7`dc0_>v0;UJb9*apHWaNNs )W [EPE1aKX"䮏f < ha?`x%pheN99U=C?N;KWMBnΰju_U 2VC~iA[3!g/W@_DVIAWڄ O2vhɎvI@t(3u)Xhϡe#O*9_fqm7%Y cN0'Uk]lȟ4x>=;`zѼ?e/&}UPְj>A0e$,4D9 M]m$Z:~>& u 5:µF J{|<^<if>q"y04Ǭ16S?%H+,5u7ge \ԌZ5O2 *6lRKxHs䖣xڏQc2.!]+^t˜*bu' WBfʱsq5Lnkh<  y ,~*6ڈ|Tـ`v e^qT7J =8w3]y/*G@F6y!ϰ v**w_*>3Jś|=L BVܵ0L|#؂_ܵUd%xiAs.6k$$j%hs)‰rh++W 3דiwJOkS-O\,fi zLm%JxOlÃVxmi*hop0h*MZZtվ KJe&P(<&J[|lgûX.0v;TVuٲ䱖:5 zxϨTcef̨%h}h&f2E q<{ÈAW?+ vAf&Մz<*^_]j  P/z\̙y4a7LtNS?gRRԋE'Wzf߶ٌRrHN<}>{Q+7b WD{z*xh|`YC[LXhv 6mh[XMY5qp0$1n}P ["z7&=뇚B:̈́H:e "9@'TlCvOIW2D+QV>.4dgPBhj*NӸFyܔ:D,6baeĕ>:~VG18-7i8Q~I2- ^mUYH2<=h>(]v+uރ_[&s2hdJ)ky9,!4r7^0y9WR(~D!0H9T0\vb o z` 5n W9-),f,[E4O b^@蠘on4Xbr0z 3L۪Rm[A) juBpHh0ψ2VyW9*jf;* :;# H`kLx.>lښ +-ɲ~o.8G&`&nxv0>#4C_fZ& %Mhъ03NN1xifB.?b:_PAЈ_Do *ޟVV#6N u A4J#oir)9db<,!1H|;<~ʮE/Kc |ӯ.ޖtُP08o~71k6Fmͭ>RMyیxɘ ,IcE9dTo9VTg~5r%~edtl4" n/Q 篸pP|nӧC 9ʪI_ J$3 6p]#9qjNWcK:UrKno3,5j g {PύLͺk| HLdq97.RkhP)['w5tY ܥjQK}Ph@V3 >%1ncDW}WAHtJ>%rFu}[J T>9 FA0 s/ _MԵG#P>[o=W1> ;z5xyzϞ~Z_C}eblKƃ&~~n|ݶF?T<[U!8WצZD~ WyI@4 ffP7 <*?T=–A Wlٳ̵Ǖ _rxxEXȉ]`N]G$&E-SzGد3~/)Y+N۹@h2A_>՛G!rmUJS~ҭ ȉ6(䣝<88"mN5(rU(RO_m^⪐dǣ(H¤Q~~qJURɇk.( { Q.)վ {C6jROEu?A!LJy]/ɶR:bS{,BXE\eXV .Vpө~jF˽i"$٣@H GJYW$. e=I!z7 rF2x 8*L O,)ݓg6=ls-J-/4'K!C,Y%P@QWgls*x6%r~͓MnbAD)%!0SOFcUPX5jqQSOt>RhQ Zs9!N^{I%fSBnSS#]p}kcR2jvNrvZiKSR'z$_BQPݖJ3s,07/YmZ@g M0S1۰ZY~)j/yCCJdW`ihKyt`H324EhE#[[*!MV*n[tlKQDw43R:q~ڧFvcGi YMe wn y׶尊?\!GDia=N)u$ #"DH":6W6c잩J#: 2JNs=q4w4N''d&eq c>\2hxc '>&-pV!f|gqP}?ι Y=˦B?PBQz32v;~+|ebe?N"͏@V]s(!7-'S[4Ah/?z*'_.m_w"}=xT`!` GQ>;ǖq56r72f!KhQFҔ#V{SսX+/AEԨcB=YEgꬌȃJujG d V `iG׫}MYhg+NA ")pZqsKg #TK訅.aVڍO9WWS6>ꪗN{ ҆!?# _~3OaY Β4S32s!lܽS ق*F;qK%ǞEPC{nPrcDwV3C d_IQ O7A+SLo+\Se'B"[HA'0ߓ4a AcĘnVbƤtԨ>X{Qt.L/4ϯU+*fv_IyY;PY(b +ZT l4Lr=0{YWg^oH!2 (3kv]W!VGWE7FlF|u.6LQ xqlR]Ih 6k@eHtW(ٟfY+>7 5hҺ. &Td!յQlk o?628=h~ƕ*ŋ#WATj}IUؕEOjFz9L:^i6%KRY-WV)ZjU,J K"ID`>ΰW:͸z%TA*Õ;6K.$m ,+j 蛫J/aWH[AUyh&t 0aIBw@K.|&J2GZ#lDM:Yg⫽a*`W’ Xn[7`&r{UYe0KHFz~Xp/2RខVUvO {cj|-[C0ӿzb=q,wpdVE0!VjiB(zeH_ulZsikYt2W*BBvzSP25~kwN{3`o6ɦa8#+Qxם8=I WY ޶z98@ uޱӚzm4ōշ>¡e>^6"9 59Û5!C"uWDo q>ޖCuzڝZG! ĵu_WYWb_~V9)|m* _A0$}~4>xtN,VQ;)̲'V:ehiiy>D{əQYUvMN؟ɤd9;_X]I_<8 SVcXHuWrZ{%.VfpcQʎ.'fhs{kY)؆>l2Uf4agTM%v&,7x9#ꤻ )xA􃾿(uV|qžVa|1Y{#+9p ɒ(c=#2~YǿxWb7$BPe񉊿ھ$rό'eema&8N~PȆ8H=W; 1^;diMX)0v̥)MQǓ+ 9_m79fFͩm siNk!a_H*cRьW]h75O8{m2GӍʸt/;)Bz4ZXqPa>DRZg&@ M2[-r(E`}MASYvԹgx >D_lk ތ,YG_isMƴ(]@mqVX[KDV:l= ~ g褿P VW {˔W焲Ogs"ذo8XB/ Y,AvcuٿN ܚo؍rnvL竭@*1#]w{~f ʤv9DV?KTT[y+dUb|Cw06| E*0SM u6 GcNqRt."-Iut»+P}'VP0)} h&1Քö#!3۽x׾ig.9ld#VuCؔ#kHT6 bfH!'I|EO{a; LӨ)8&"M ؼ8j0׼9)*( v7F6Fང+5e_9gphP Ft #+$ [C۬kQ ]B膭y9 r ;g4]0&\vH!V)WjS\*&rʭR\Tݡ5%v+WSG6Tz+1eZ@Owj h^Fvelh0ѣҖyl@5>r F@) `Y+Mzzڜ)\^tb4ѲA geO3Ge= A"vkjA_x(܍MOt1Nw߰-6\}f]zHtL! j1,Di+)a߯ %3-Yص Y,B%6t(w{qRx~Wft|"Re@Zo_Je18n|Ip}y _BQrs>`0y*b"_?rFш6B~{_;0QSi(fE\WљKznU*PAJ&NޟX ggZ`A:J3sqTBcf*WC2 0APmfĎ$|6)Et)rcγIqs_PymSc|zPhR99TU\j 6b?xGJe5LGX#ΫόfG`;1+2)oLcR>o e)1sSњh~As8%r{W4|!TcTZ y>rR`B=r)6ټĊ5}Y _W7KSa\DZU'#̅Ue„H~G@>Lb,ABw#ǼʛqW=M >6b!btqUڻ~` |Op3mgq`n?9TU$%FR(ʇcCAjul ah!=ɰi:b&ܤZYEWW5x;I)NiZxV po.Q@{ ,\m #tDGj>۹f/杲}Pm6"'J>݀dOb%\$H6c3Up)4<:D]c13.!W蘁j4UBB5`umBf dOJ\ߔ< XcӜN?WJJ; rt TJɶIY`E*sBk& '>dz|(aq<)X,}@Ls]A=#2,>MaPf)KP1!*P\Rt3Rݪ)8g>vEP+HcPc10P *@w?*qWK=A:|[ b/X> sZ4 O4pS`xVY|>۩/<jJbg7%``9y`4S aTnm }O4+'C.FllkX+\"Aku00{q3],VNU%Wa,,Ȋ#[y?d6&_tAlv-Q W SH ݢ|h-jYk yg9#Y1p!r R+J7pQK.3@BaO4!dG,*4/Vt09b||>li8w) h j:̍Catkݜ=r]JּVs9d*"SJF?cs ` 13 Z5Z i<܆pZ@+@q<|\ \lRwQ {\9QMЯ%qWi"w!fU峃c XG4**fO_Y]ڇW#KFFf[0†lk 2Ngcʺ@ ]Qe?R:+9qr4kCi~<1;wxc7Ӷ؍0kBR,5F42B*c$Td8ll25xChUvu֪}_*䃐—nɽAfu h`|&* o{WW9A A:A4!zg"ӺI8(2-u DoTbksdpkhaa`\l+_-2V fLj[9tp!EK,Ȅܽv-iv*Po)Wnx|ۇ[Mr;o )C=jE{@;s p*h:phdH?EC8Tv5Χ :L0I_]01ńo] JPpkCV/|96YQI0y1a/Pp!֔Fk%Ւm6#؈!\0Mb⊺^Z&?TX )WI\W5Cպ`8L$P3fi,߇ -47B[ZUm+55)T'BD}eHjۈ+L=qf]7lrŌ.K2xZN6H0VK An($i)ތ:vFzn9X $l xÕ~{wSVW6ߕ`>e(IpZUH;t͛p*ӛ-b>l_SF!p.ͦ+ Ј!$)= hQ?w۸¾sFmh'ij"&)clo72~w Uo `o-VG@>HhX%wӾ M5d#<~n!]if XǣADсҰcϪQVV<`H#uQá^ w[8 e 0nZ WT P$Q7 KJKnjF/ڰ!<}PHpzߚ4w#eWk Srf ļ<|%> DY1VlQ*C^W#WYGidiʀR\Dc&5, Z7 F}w[hSu _.7bf]U4xZ62{:7R+}JJUMDSrc1Y-Jq%jRYA7ޕy%J5RDWJ"T3YըJqSh;jͣ) $1D&wy=yh.eVvCyaSW{]ݱdkua`.`7Ly0OCxz'H 5tN󶌭G<ԦF~t3tͶyFOq'`|ihcb69T8+`tph6ٓ |ռMy46Zb"!kʽ估0Q"p J`#AڶNcĘe0fA6Qvc8‰2R;J`{hFifxK3z^V?\Ќˡhx2ב+&XyӦ*gv[ nө{V8X8)!"!L[,k NZV3.J֚2 f\+5Lf7u"qd3G6s7Ԓ« 20kS>̔c{Pusy%SV~Ý&Q?&hIa* /I8sɤ-K=UDf`f[Xd ϧz_Or 0(|a%TNǠ|0})NX+*45&y'~- !|ˊˁϦo9ɸOzdܑ_c *QȨ U!"NrE$OisjÜ|IА{C:YB٬f1-{2jiM\iټ~r}!ddDV3ODl ;NoqlRan"h˱zZ Ecy4)7}2o7Wx`VTMDT8GȔ26Cp뇛p Աœ؁"$c]M[ӑZzwr Cfؔ JdC|oq O:"Sq<`ASW%Ԓe4Fvl<ǎKqzX_Z@/[ƕ|:wQýQZ4VSad]j )wI\ b|~xTqz>Xa;5¦p&L%0C;(:&n@mHU`r 7MG"+Q f.[^@ke#\q+2jP6O2!h%K?!!Pe>䤿W Z޽ F0c`6 ?xɃ'+xa(6R 0 Z}Ӓuݒ( rZ)@qVsCa#ظd-kRkHsʆ݀wj pBj)#֚͏̢ endstream endobj 222 0 obj << /Type /XObject /Subtype /Image /Width 1 /Height 10 /ColorSpace 223 0 R /BitsPerComponent 8 /Length 39 /Filter /FlateDecode >> stream xw_^-e3ιPcJIK+ endstream endobj 225 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 207 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-026.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 226 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 227 0 R/F3 228 0 R>> /ExtGState << >>/ColorSpace << /sRGB 229 0 R >>>> /Length 2775 /Filter /FlateDecode >> stream xZM$ ϯcriՋ؀p999aC~RL`$XjQ")XTv|8~_ݾo>~| gx_n_}0_sc?͎llx:?onyG:gwϟ P3s?{*gXO?;~{F[9}?pfwf[LߊisgmxyM&4xt3wl* Mޏ4>]NGv#ό*_.>ҙ`{)g79A.3^\7vv/Y:چxgGJ8!^9^*S(~m\΍,eGlg5 q3.3D\g3d\M:^ bg Ńđ- ;x #NiCppq0#-,W刭6!GqۂJS{gO;Cp剑ISerN Z<=ֹHd8p"v ClH3`8H%$ůGчv|2_Z3xp=."kG펒/|31Ukf"W-wv8 gR`ha<"zM^a!i]D S4pWPrmRLЊhnˏyŦ].zyc=O(.G{6hD3aE Z&dxItXf8{A~T ԯA `tq#F|S I`*N6.QTPl1[vP?@˴q,\{ *hB-f:^F|FQEJL{<\RШm,7ϙp?EbxTa\3n|D[K<<"^䃷hYu :#B/ ` , #Ͳ([P'AyPZ̀kF`PIS5o0'R$t}B [2xƆ Esp-삏 1KlQH*/[phpzX* 4tw%5gnDK\#5nߥ1q MYrpSWNgj$s^NyYrcOFYO,(86&6QcBzjX9LfeGFpMJҞ#QFĞxSӗoƺg?p1j#нL0˜ ?`M֤:mQ!600*[\_ӃcbFmArN]G| Q|ya,! O3AU LQoIØ!f:HSb%&a@FEo}M!BYTbRtH:g]u.~LbيB:^c!/4x8L@sd}snRa>-qF{34EbR΁3-d$-Lʹ`(N.%r.F5xlC_ِ-sw8Έv)?imF:kH_i$lTr$ks",v/>L0$EE~Z ˌ+q@截ң u^FiQ< af<}1e[.t=:+OjCOT+=APt:w("_X}5beVBr+90DZܧ1<ӗw牖&?XM!gx#y endstream endobj 231 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 236 0 obj << /Length 828 /Filter /FlateDecode >> stream xڕU[o0~ϯ@{2R&mҦ6-o(! RЭmS9sQt?,Ί$0&_U4ӨwPĂ˯V]}w?8by6id\B݄8_f܈mDVuV|Vf@Yg)$w^;'%5z+T֢~->":iKz=΅mH=#G}Gړ#wԠ܃CJ%Jn02:>yHxyh`e3͉s,}_|}:MbWw\F-^W`79OCE&ʞ& ;ؗ`Z*="C(g8oξ(C Ո[ےmId27+d~&1GM=glřw@R O9X$N%I jȕ}9G=49 y\y$bPھC :vi"~|kj%3# -)g>Ph[?G+PY\$7~UI6j8r- ww2BH޽,dw,2@k'-;1=0Y2x-tdK J|^{j.zy3/19fkLþƒ6]4[P*qnO.姿,TFhtͦ)94֐{EApm/ܳH;\Nup*wy%yu ,?#[q 08%nfTcHߧNL))$a% endstream endobj 208 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-027.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 238 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text /ImageC ] /Font << /F1 239 0 R/F2 240 0 R>> /XObject << /Im0 241 0 R /Im1 242 0 R /Im2 243 0 R /Im3 244 0 R /Im4 245 0 R /Im5 246 0 R >>/ExtGState << >>/ColorSpace << /sRGB 247 0 R >>>> /Length 3365 /Filter /FlateDecode >> stream x\ˮ$WɈ|odKxbX fXX{Yu00gB%άxY}-)mş tl?^>{l ;oYۏ/j;>}w?OO_Oćqc9?_l^V^ŞaMmfjy_-9u v (k3{3K}kh(7˔,n >Q|*[>;#<,]9Obϱp5O43=1Pܘ~ EN[$sO PRFA K"+I g%GJMS4i 1 n(?%ܓ<&mKF&Y[ * +L,ӑ=Ò$9A(09R@%N 5 y,+4:g)bbcIùx(d.u=FP5sĂKybxp ϟa+J@+-X4&^8#L*$u//[RYcIcj5"VB Z҇y!.~*3'0{YDcљ3㡸H ė$\XƉ1_qEO"#q#{)ː4#-dJwR9UT&X)/[=+lA&Y)RWX:\ 1;PeODeA#ۤ Bp)**֣iEaj0V 8i+pDql=2p6S2QbJ [d9DRKL;fh*EyJF>pkDREQQ0Qv茭E1UEaE8D J)ha*u'%^"Y;ґ"l6)5NK,RoQ/#>-^8`NqRdM 3GٔBNgRjw24VTJLf{/X^HH4U&·g8<4;")>[ۓ,d1AJRG E5ꇰR"BC2Za6qKںHNS楃JN [;aG`/wYGaZ{RGU>,󹄓C>p]c2=Nqe'|ge>{\Yse'|a'|ϟ,9\d |Xc2e~}qt&#UEyޯ{xgl(+wyq^yUi 耝 o1^*i^PΛO ʥ;z=9FЫ6NUj03-ϝGh9;9O,hL/@l`kOaGar h`T:|-[4NgmEĖSR[$hچC@Le%9GEqOX|%uElI+>BR3gey)C^XUR5ާ1M+奨8J^HdrT~݆]#q"+U.J\' mH Z%[WHv-o0VȪhyZU1*X EK q`) v>WКbYbmȏ |u endstream endobj 241 0 obj << /Type /XObject /Subtype /Image /Width 39 /Height 53 /ColorSpace 247 0 R /BitsPerComponent 8 /Length 2605 /Filter /FlateDecode >> stream x{eE4j%cf!cD4Hd*r(%T<"""!"hJS)ҬҦlN7w9p g}yZzz'L+6S ?vCh^_~e :ۺ۪"CǕ!;²_>|?fl-W@1ow\y)-]>}kW //b9̉pwW?(nxgΫ;u_t%p렲eK~ԥov_8㝧t]=\uRcşO>e?,nxZ:˽>{*ΏpSc/~l,f!՜uZY-JrSC\r}-VTdqC|G<7S[܄/͏s1C}yCǔk/W(-{byzuNyƮ1\Ql2\TLs'R6dsa NObCwg4R?+Eceuڠ-l2[3>[.p{kظh 8<;jNy'Sߍ<41I0b=p)  t]Q(n85@>(!㝓iyYvH;:9?Gʰ,;R־Mz}yQkntZL`ۋ[w~H{ V512_?$&IӞA 8p R#(hGE% >*S+Ph`dyGp҈я A/ 65suM]PCUW*h5Q5×) 6ɐ鄒GXJWV|S{>AM1 YjͿO89Whd ׋.݁2~*c90ҋKJdVhXݐ[@k IQ#LXYTQڀ$P^V%c,##U" b (> )n)_j1ʟ 1OPHQWM?2i6~C 5ACQh'!Ҩ_x0RC6CL&>Y8 㦙a),H,Fћ[(1ϣγ5"鲥5ʩ4f=$a#.H&ap*FH&I-+OV:2aQ) )\0ZOtՆI.z8ZjytIS0gM7Elʍx+Q?%x{rº˟$F0fMg}&#ʌlrn Xȭ NXjէm;&ԆL0žÆ<ݫf`NƾMSͫ>eW|~Ūƅ=*k RE mbmً-5ujlp(m-q6݁N;F)e<Ѷ|;]g %gL"]AWT38TFqFd+3c@u#ËIQ,z #^jJtqi]Vt_StYRtU[dL턩*DDt{ Nu~pG5t!_=5tQqu{q,'XkL΍ې ;pطIMNݭ :{Ioas&RXp[T2D`J2q%Ƀ&B&#/ݻRxF:h2 `24n˹I=ln7Ie3{W|RfIjni&<&~űaAͫ.#Lttܒ7DhXN镼,\竢 TݻL7@@0[5&Myz4eb˶aCmϠS̱USbDp\+˫/B'SBtK+P˕du9I\"j٠+ܰA..n$Q'I{1:Iw77 'kr(ʮhP Լ@8lL Mޕ=?_$$~ Tw6eЩxX?ȸBr)HDU?JՀƓF( u/acZLD'݁êN4N3eP΃ÚN56L< SŰoYۨDZ[PSum)~Htrm4w ɪN-YÄb-7mXBaa/vZ endstream endobj 242 0 obj << /Type /XObject /Subtype /Image /Width 1 /Height 1024 /ColorSpace 247 0 R /BitsPerComponent 8 /Length 723 /Filter /FlateDecode >> stream xugP$ioQ.K23/!g$!BIBhHC$DB2BeD"#U3|=zˏ[iK/lM7F1W=9Oi-}B>=z'V&75SJ+i^Es@³xSD,닥8=KXlEVX8`P9֘e#0}$a(Lc1i<&c'cQ1 1l: fal<y猾+z.DE|1S77\];:/ùq t3> stream x}VeÇh" RHeCD6JD*(" " (-Y1F&1PB556dSY3쾻ܳs=u_u?}=wO}~e3~ݿ;{llVִYYgvWt\}Meݶde.+Ns::/ͺ بNo=g팳Vobuk[/Yz<\K(VyHhв|+c,Qesum6X"TՆ5nw?}9llΎlenc;>ry[YYudU)XKE n{6lx6{PZ'f.[bpN0+?tk.wM~#lzТaBuלx%墲 *9 |%y,Ol I+=ڗTRe[0~xx=p\7=c׻.7RD'77lE°Ұ0wPduИ 5RL2ԏ|j@., BD%OUKQOfHiޕA WqxcU\luȤLnQ)'WT E6 قx:~vv6UD5(.h"+pEԚXVoW~v;mz(i_E;08AfrSyˤZmFiK&:FA) 0[iC+L>GpjFw||U8zjd&řGS|o %vxT, 3Qr->iiCSA-9tcivB)̶xQj P0$>7|Ac4N)™%sdBI%kb0icy*82yIt&OD#}qhiyDFZƜBr2QtyJ3LlTcqtdYi}:ljO8vsb͓6+!ʆDA@ #w =WD\¸ Oc?>W06'J}x2|h-)",|#Ÿo Ogon4ݑ4ᆹ: Y3xL-SOg)ҥba0Sܷ(!-cd{>^]>jCQs`Abp }4CoJ/xuy8 owKEtWxFăF`>sy@>B&xx. E3;%䔹Fu[q9Y}eʢA"oix@ݥ8ة)p)U*OǜKkSwKT7 ˆֵ;}K})\ A0w91 !fR!Ge $3 4*.DZ;>&f1.ZDLa'Iv-aT*hW4LV,"2W0[7E BϿ;ދR)/5-^qtiWcp~!v6Ʈ.Mxۤy=i+GϜ#8TP>S'$*~瞪h tǜQU[x=M'k endstream endobj 244 0 obj << /Type /XObject /Subtype /Image /Width 1 /Height 1024 /ColorSpace 247 0 R /BitsPerComponent 8 /Length 723 /Filter /FlateDecode >> stream xugP$ioQ.K23/!g$!BIBhHC$DB2BeD"#U3|=zˏ[iK/lM7F1W=9Oi-}B>=z'V&75SJ+i^Es@³xSD,닥8=KXlEVX8`P9֘e#0}$a(Lc1i<&c'cQ1 1l: fal<y猾+z.DE|1S77\];:/ùq t3> stream x}eD@ٳ(%CD!CH4jD$& 򵠢"|D@ceQQ:H@@ TSSiSM6}N;9{+f9{u_u`LG;m]v{w|ֵgÛ|[9xkȟJ-?mWMEr!?h 2;^hh˧f0T ΚE{|셚|O!?X#WjDžp||wl+{sWޞ׿dGon^]-.h7׹Ѿ勯.Wm&s`ΟPDE%[Wn')w_Odɦ,ڼ+-;z !4]1ZJnGv xלˇ&k=[㲜Ào|?jl>_*|ɒD wnjdyWv쟗nA,?;(,ӳbh:*W`ZHe [)/ZWW;Oϳ>g+?ݢyI(I6?(=ق|WxwǂP<5oFWgQv0_WSFxg ٗO̜0PV*J%*dɖ4g+ٳe<-ksI5d}n]^ÖIbԸn˽/eswf mzUlYe; AUA 7ZpBɕ'o+W=g'wXn[ExFw2Y~6$xUn? a|mgW~ue!Z$Z/_v傳׈o'.OvyUkcټފ+egS2KvLc%pXƿUZ6/0ϴYs 2d)q~S1@ҿz/mIސ&/ qrx!؝#8M8{ E!Jgy@cWӊ2.(+({.X v4*d֙M\8D{ Cއ,vsnj I%!DŽ\Tw ]GvRHdʂI O21y@+}p4!l5 b/iZs87RX r0/PT&= C'p$NcQi6[ogZ`|q{8Ob4d'H rARϡ 2rKIO| +h<0v "a!Hup㫜SN2U!m!ďF&@r&o{ M n%B TG)0P҇ԚT8o@f!14\54 iJ-=G6iCr!#e/V;B +F u/P<^*}%"dd*TCs۝}rgB 6³ !&dXI0!4x3׆ `B(F"c+[1ukVi%-7P4iit#qnibi΃\A7UDiTlDB]KW^mL@.j@ehdRD*d\.j#Gv`lI߶@ҭAiC4_8gwڅ<UCnm^UoƤ-nܚL:QGxYՖ[b0ںeW^l0%=~._#uyڬƬ7W'&$NT /^Cfksuq_q{ l /bhyUqAvY-I<ȤIIIOKzK endstream endobj 246 0 obj << /Type /XObject /Subtype /Image /Width 1 /Height 1024 /ColorSpace 247 0 R /BitsPerComponent 8 /Length 723 /Filter /FlateDecode >> stream xugP$ioQ.K23/!g$!BIBhHC$DB2BeD"#U3|=zˏ[iK/lM7F1W=9Oi-}B>=z'V&75SJ+i^Es@³xSD,닥8=KXlEVX8`P9֘e#0}$a(Lc1i<&c'cQ1 1l: fal<y猾+z.DE|1S77\];:/ùq t3> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 232 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-028.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 250 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 251 0 R/F2 252 0 R/F3 253 0 R>> /ExtGState << >>/ColorSpace << /sRGB 254 0 R >>>> /Length 2642 /Filter /FlateDecode >> stream xZMHׯx3"2Ȭ@ivè{$>Ei0w+lg|xnY~Xdra-)e/oc"yݾCV%K_k]^ޖ_-7˧/q\tm6_o_uH՜f*3O/}ۚa}fYS[w-1 avX2΋hEQ|y*  *ZS+K߾|feᠲÑƹCNt2ǴYãb^pz#Q8Y + 6L$4<bJ NHVV>P` n1@aFwqRՎX rHHI0<_p@` n=qW.qԻ//ON߫ 3(i-6YX_*y`g#8H1t h2U9v y! (Y)<ɓ8k_> Fz_BC/,fLX@:pKxK^٢ ܂la Y?ɳ~@ҊH^ ew 'yD pR `NABE4 " lAZp_h@7`3#0 =ꃽ8)G$Xkl|&eNx\ B8)6u@ XuİDk+h~QEXprR8Wp>˳~09)܃dVx6g+ =>zKx,v*YĎ$ˆ3Z9>ɳ~ȻaF"5TxL |+=:x8 vI9Wڀ?fFO8qI/QߘcD3M>)Z (Jt,6NZY~&S1Ɠh1Ԃ3:taAHKQgxbP"O,n͢F%RX6^ۅI/u?U[=Gƈ̔b>~$aU"w s-o8"I9 h= P1=$ RqN7o0L'; 꺀q iPAE grNmDAk KY 6 _ G N+3"'A8:IM0 }ܬ}k8HZ'qRЁvk0>\n]R9ZY,?*c$Vv$i\a[p]-c -]>I67 ,G| ~-{,XT]a!1<{P>ֺ+c0 F瞵a#e`\>(Ы(;K}: a{@itg#n25hYP1ckbm8a}lw?(qO!LF!&#]>LXʘHㅒ)@c[œ|?̼r֎n5^Hjoc&,WO:ĥXc9$Re%7>n@b]_0I#`xj oq0lH8)դ ؆P QCj{ZxB#gW|wN%w1x-f@@\5ipG<7YyjH< XH\^FZ[d(e_0j:?^Q̜&qRld&U(Pbyh xPWvV [smfyֿpn&T9p\+ݞw:>?}dpbn n,ߤ endstream endobj 256 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 261 0 obj << /Length 944 /Filter /FlateDecode >> stream xڕUKo0 W9 6dI0:l ٩MY>H`E"?>R2J'#c#L>&ޯ2хd%>8^+}zFLȏٕ"Q:xVUV,Ox66ؗgWA2#rI|WaU/t"Mh%a3JZgK5HTOVOq5ï`ۄB-+|IsaSjGA޹S.J9@ I,u%wmbxx]PUp!0E)$r!?@%}z+MM ?bA5/`aRKLF_#q.5W*Wɟ$y*Xssus ^M!-#~A u9pf|-$\c"oȕνHGuS,px]-{wND5]䵊j;X9-uh@$%!}@U Y"2{uw.Vڛ,9HC W"0Pʖ|NM͌]wJ^qJSKpyf?!S "'<QzB=K0MPԈ#mq]Rʹ,8y~lh߂ڦME>o>9 ۆ?hs5RU\>y)Tgw006.vw+WwD#xLm}dWX]mҖ, ăx.h&#K Eol|_3_ ;v:`;+KX?I<-Uf֤6[y7jtA{4AITqa G:%'VL2"U=qv=2|r-T&2Oow endstream endobj 233 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-029.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 262 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 263 0 R/F2 264 0 R>> /ExtGState << >>/ColorSpace << /sRGB 265 0 R >>>> /Length 16727 /Filter /FlateDecode >> stream x}M%Rݼ>&Ϊ)FB%J &`[w22t}_,Ԯo_߼[,%\Sx+!?_o_/[x!_?-߾[|9V~o9߾7 kt|828[,p{wqL#+%ǻhM~k8dp=A8!H良>9vdpĊ7^8ǯޏɱ#c\c5Aux{p^߃ǎ ;l}8=j=1qS##8xDr8w>>Fp|XZBjűnG c=p a4k)?4V!h\-bcG16p$B1x6{5)i酈8jqq\88.DwZx;f~zڅ7:[ol!k[_~xK:ecp$A ]b~9crvC</Y# xD0@%y͡wq8cg9c,z7܎5b޿c8µvD'#酀CSS8%Cĥű#Z_+!ki>C1M; k<{p1@&OmWzV;{4m#c:dt[R#v2Vp$hžGāsolDž#+55vdzUnq:MF N氍}۹Mlʡ} iw,mB)"í^҅`gzrveҼCtȓv;t{ @[HpKBZT;v{/|hgC6ziJ;4%ӵ"M)C=!LSaBLvܑ(Opȓ{Dԛnnu"/ 5mcʵd#clHA#c)pdqؙu>CpހN/IM}dYMc3siwDZosz<7v`@ϵpp9Bל6GJ]s <weeGN^89]c!S~?3; 2dR4 #}3!S_f$̅L}/%a.DI6]p&Fv,'l~:rYIC hzCL:d2:tW_Gt@N 9/Yiep\By&<\3\c:5w=q\cG>2m#}ǎ D+fGt5( k%w9Zթ:dpZ+=1|GçN 4Tp8Dhwi@-2̕ :~4dKs}Ek/D+;+䟖\yDk}#Z"O;pzu/G$6CtV YZ?z%,-oyjq8؃f8ۓ|;,T pXŪ4Ck!=6u8*uf8 %hyu4zuix(vyk:q<4ͳy!jȃ5tLA}vCāsln8DCvG t8p :B1fn< r[V`7$XQռ;B)pte.CqJlhŽ6YkA"JHt!9ViG\0'ı#8 .iJ!:<n4yGp7w NAhM>q4Y ~82T7 s8<ǎ =ၶCA G?r<..sL8v,r#8LUE`UbGQ1pt ~SH"p f,t82e#,]E5ީCgeG.= ,7_'+=mu.}죸X٤Ҩ^1 hBKOX1h ؅H/#iXsyDe!8OjpP,tp":` \6Źuc;2O0vGdD"0Bm⩖!+D۬ _"cG=ȗ!4Io!߃;"+:^lu#DJ_WVl#ਊVWt+p(g\Q1q0!chAGBc>CA,3w#Zq8Bii[G_p,>Ghf(c);YƌC$+!|;DĽC!ouzxDqPJr!a"f|P*8d}n8D~ZH'+ͯŗ٤Coglz;} ݻ(GAjS̆ 8vD"zюjz\;BQ:z̋_hGd yyDxBp5#ʃgxY=k="k /Ӆ5<[,@Zqiꐙm9vi~\,gk[B ܼ;Nʃur~IhAf:Dq±ls ft-&vDЊ ttr-\95HWƱ#p"}; C~#~d//OY_X]"V 333t?6 GI)(Km|kneAj++!zlGȃYzC(ti;pϡ90C_2\y("OXpl y&`"G >cG̔T=9DBjg}ssB0Q ݲ/D6y:DЋp0y0 zHkV'ĊEF"Z;EA<;m!jQ(bE(C?qpXC9âVv4a4nqȃK(Uf]5.+`efh,:DevGIq{̾?WvD$G3;#G/Q|T #[ڎ(.vvvf_Y&ٗ#d~BE,#zZF("xnPfDž( ;iU>:DMfȎ7#3sK}q\.4[!`VҪȌh&-]_B͝1do}G}jIZoh~4/Gd!j{q ~="nUOGtcGɲqOU7I1v8qq!∪۲GRU }^U5Ue%>8H>.dr4Jv~DVey^qUepȼFQy]c!F9őQav=<"W׷ݑɁ(Gˎ(@{wܯ#cmBx+C#?A+CB:2Dvd$F9rX׸q Dž.ܯq!x1c(1kԔ_;u|y3=ȴiR #ybGΨɯW~ЅH]Y鐕/UAʄY'tla /N!ӎB-~Z9GɲvD!(G~*p"g ayB`fQl-y3`[^C=ee9A\Qs(:~:Ӄj#~uUЬQoiU%;"1=V#3&I[!{~yap\~2<(1=wb;䡟8;D#xȃ ʼcD² <"rxM>"؞TؗsraEUa0C\}y{{p+!A$!% KSTi#!3Ţq2¼b"}k +'JŲpvA7~]|t*3'MuH3׌F7 .{׵*/d~iF9{cb/9]q{Cؕ~ϡ>(Ec-,ZiJFKgO:AA4-`Ԡ:2ƺf/S$~YnNMuTU|}z[Dѩ`ݣmWnk(M3oɐ 6HȤeҲ,$&W齅٘3vaJ]+6- S+irYlvc%3~qt}{gpTs`!{Y%1ۍmީ|#*/z7nz nuRC[?Ϭ%lBL㏤O *WdPGyvN7˳\OWv {M^}t0:/>S4w TP={ >EZ_m\␰8_$1r1t%o ƨ`fA]AC=re#rǨ~^G#DE17gd@cLeKl͔' @*]U(wmiNNeڇ> Tؗiն^xW[,X}rړOgD8 ^MT'~3N> ( 8ԧY٪CU2 wT<~]^w 2P#αϱɍWG} Vxs*wUeoj4*2 6Y`g4|Ҳ"-{>9?7$}/-SJ͘q ;:}L,OZ4䟍I+zb-Aï|~SwrD$:{sVorLϼv뤥pw3\I%+ QEYϜYf(^і22Ѣ 4L@W{{o7vme __K~F|V]@o9T RTrqw~'}bZA۫~L`̛4? &1MᖙB*!"ào>Q?aePwr!"X=J;#֟Z08L,vjUV\]??C .z90*/=.};Z=/#)!ӟkAMo:b5|+k6> w?(;RPVi&M5 zcןFeGM#I17~L^t68ۙҜ ܐ@Au̼J̮gGB줕T7+Hn+hY+rT>wDEf̗{]@ |۔Y4eau,nΟ#F4\]h:,2)eZ}XȭO#5yC+tV@/_LMr#yw-WmkOvMA(|k[=CӾݡ:oH? :GՋ1Y9 '!JI" H]8?59*:VP*էYgEois>75 _%yXxCw< ,4{5C:zD 㞬>Yt`;cѝ=%Qh݇ D2=#z?pf5$[EóƔn.ݘ3 ۍeZ`WDf7# <'UԂ:9\$I6|Z4 ~C#WP3RfJ//NAڋ_^My8hҪ.nniI7=Umu|&pY9f~-/2% k3zѺmM d)$ F*I.F#nuwE:IіՐ5!~}ztwJF~ş]h!Lj2?8ҊUSI_#fwe3kj+vMWc>40^B(ik8Lxg6Fa"f"s¤T' 11LVewvih8ǔ9~>9(N %^N0Wۃg.w9scԫwr.zokը?Ìy297~!pΡь d]/z{S^-SL,R#kю 7aG}> J9miAY<Ɛ^, 1:NC &҆>GᚁY5C&/PwΚت҅+&4**NH_K4+.G?vң ;lWS]X+(9BhOu= \'OS;} 6zM*T/23Wksq6x'ғ8̓#Cnl֓/4!h25w"qE;fSmr@5@shhmSE yG0Z9MpI0OrSIv4x> ^ʨVkrỦVMڥ yOtDb(gg˺F.5ʮRǽ蝟\Cb3$e!xB|MF/ߥdv+2  7]MМeH;iУ[Q5ƾ 5 !.o/]nuLvts/ME 6 .F[tӲmo>{h+O"c`h Pmsբ=?> ڮ9'57B!5gaM(>-j2Gw~CT7! 0yw~Cjj$rSr(4$kV)zS0I3v҉x}!ί{: *&  EF;/$4 twlN iidb/NvJ|mMd[I&C+N:95i$DwzUKitCBh] 0SѶw)MM-t8OAR3/!R; jy;g8#'<,|XYcUi* fJp͢Iv>M"2QS@*XYh 4r9:t6MLS;,SR8ha5%E5zZy_N_:$BP{z :2x걥E;~7 MdRA 2Kj6z ԦP haw -u^d*zsg@NMᚁQٓVP*e)p'wuT{(LGL5ӧQɨ?9N]E_P:,opϐ8|X#r;N'jggPdW u}kwQj9HM5B ntF,87MSΜ; e3..,Ikw]aΡC^q}ؔvwGa3MGe~YF8ŦiRwmMNd,su9wz翯H#N($Io56HXyJ'OvFk3y1Y1B7vz秚:Tܬ-4Mr"11JM6dFv'}3i-/dgК'CǃDR1ZB]j1c}a) +DD¯Vi!8"BLl"AJ2-є 9+k=ZT8}܂k T_fgksјkX( \lhiDRfTch,-F]b(3ѳAf0jHGah/q`Lu.4^| F;/ewS_~Mшp*+h5yTD9&}gg+ Va53\|ў Vt>vV>SB ٕsđǢ%n4zu/eWnRBhqtFAJ8"kOy. 6f#O>GgQ GkȪsz=;gn%确P+V?f$\DlFwzw}|Y+NŇ?50$nNwM);w7r`γġmOe+ex:MEyHC4I5O?"쨢8NsAcɢ+WwTp*S'ml6f=gWMK6[`6 B;?5L yӉU 6I{K+)">,ZQƂ:8(У9퟼=]:U,(?\l{kotZggD%e~I}!aFpc0ʕ;Xcۥ)Mu(_IQViD-A . ]Bf AiEE6{8f^@yN>[9+qQ!Quhy@_VbShjOCQ>:ogӆ6/ŧ^ŧ72齒ةM:$6HDI.+MS .{]VXM؊l>W:]5}ю_dv;ӫ.Y VF;~\EUJjn ],L*/NثђI'x[ zKǢx@DC`nԗE!{Blz_D"Zb;ۘWT#&}s u&t0>iKP贮CjU0ɰM1̢k=u =z;GS: GGC{U*r+^YI|_n/A{,hD^!tL}R[K,SƜB@vڥ*83\a1\fow~>E.2NR+QS}"GM}D;/w EUhމe+3܄9bS=NzgF6:Gch7tW^_ٖ634ڂ`TTJ&~VXjYh5+ܽ o/gX;<ҲEik_q:ٶ8@ES_Gfi!(1UHϺюy^OS'cUoN23R.e0i8czSA lv,ZЙ<૳BGhzRn)lȤ,sQ֨EHnڅ4) S.K3}n&rNSɽY 6zbh^QRRanS.+K`̠]R~GEFoB %yrJwzUwC|$RMZɵ.DYX+T5.Pf}v_hrEݫә֗? I?j*h9i*ƛ)|evsN:t|IjW_Oh*ӣWwtlWYl1NξyS ~2GcF)+ˉʋiܜhǯS7_B>PM%t;}ܥ*8FG=yuw~=T䰆JeF;~ a׺17_]eƛdHw7R䘞_D3yz֡SPpu_JU:SYH zqg>VA]٨'խm&j>gmOKkE͵2_:rb9m5L3"zj4e}VrଜFd ueb<>N+ F^ ?UGכJQR/z\g+ kʂ0}; 5iO:ՙތ-?DEd~g3]7eJю_}pLO8"MoG6z痊T ?ԻlQyvC_ [B.U1>h(/ @A+\>Ghrke*,(q:֪&OT1PLlYULS]ѣ=[vL>#<ۢי.psƮ ^k‚E/8)4;Ek)Dw\*L87"ڠIt*e B>fZB 4A>֝ȧQԮIxE լ`юW*/ǼBcڲ<'i\kf&A7!Ba2َ{X+:Q&Heoʘ' [7zۋ`їj蝨,_P$ylp]$l*Ȱ;+#g ݛ%gM]_4TvBZ{|$ȝ_̲ҏ /P"}`w~IgvU׳S+ ¨;s~LZ:*j:N;_Y2O)F=UBS3Nv2g5v2C@y2s w~J3WyGSYO;+&C0 ooX, fTY<_6 Rem"X]ί{,Gӈ'Œ]y*l@3kݣ<3wk6zשճN9)&+1o:~橉jW2~Hͳ2-lBC0[\/Mw;B-r5vvRYEԒ$QCcTgHӢW#)wW{:wTV[r۝ZoطE$z,fS]ԱˋRQ|gjBKg䲹h/y3ʡͪvU.mw~EO߾jb#E (Yq0ImJWV̅чl, YE{~I/fd5x0)2G/KsҩCNЩ& ߙa ў_URe6;Uy1CvEQx5m%\> ,adV|*+L_!۪lE "ͻZ*z=]ot)b~uBAGU6 8'"UMWw zai}W5>nM'aMVМ`ů}.f,].|+pAdF;~BݩɳKs_~b)~8j~oX5qU2Ox`ȧE;~>MY3tShft;=~~>Z( <3K)*,G'1y #ίo'2*t&a1O3nKe&;X8oV]iSdUV΢;UssVf*YurFkؑ,0,\30uюd"4arƪpZD}6zjNlMv5]{jfu; 8oyVycP? ij69f} bG=?ӯ;Z$N?rb+ThWUW}''="#J)O)N׍jq6I6pq/(`p|8"Y":JJ<8 E¡v'nJXb_nF)'ю'fOR3MSw."̤sQ$XFjDӻjxF`ioZSsɆ_RKoMkP|K]/zՊ#0Cgc (ue(_W'V6e:kFEQkт.%;?7KDw[Rm*Zf: Ff1X3Vɘa1hǯoqs"1aߢםEڨ/rjYi>_P5 A4tt<,zJE}1xc/Ծ[i,'LZnqh:hz_uddNc(d>DEfh?}JU\mga,_闹7bJs;HS7Ch^ 񬚰7 [_HhȟO{cg*4ۇI23x':+W&RA}Kbҹ"5Go/OW_ǫHUG4;Jau6ǟe?g_RLmXOr r?O?o?X endstream endobj 267 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 257 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-030.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 268 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 269 0 R/F2 270 0 R/F3 271 0 R>> /ExtGState << >>/ColorSpace << /sRGB 272 0 R >>>> /Length 5566 /Filter /FlateDecode >> stream x\K,mޟ_d[8pYYד |ɌH dqiJE_"+pnG/o_񷷯?{_|[8S%[$(!O_*){ɷ}rۿ| o~ox-rz5c9ri9{Gx>NjW}VA$ dϟ"姹&}Y iXӽu:u=Fj_ г?Wl:ix/C9b42䀿bޅcā`5gKxY Yeڵ[h`V}=k6q RȌ>q{6i+ħml7 w׳|Ml!ٚOoI +BCV{]ڐ/;zPB"jiCNԨMܷ%X{9x !3˽-#I![0"  ;=de7ف,nǁTQ0&KFҖf0=D.?Tsʽ_{[$|}d2?98 Qdhi)m}Z[DΊrܪ[h+[Ƕ~pw|Wa'_ӍH!ܓTF UUHb@ۆ"4cNK!0a)l;3:$hB3qlkwbĭs(d|mKqXej.Q8)3; \3䔙KmW.GlKԐM6ʄ\b{MJ Uq R4\٨X\`ۣ9U+[A8HSy W yEŽ#)X_B%.3Q<x3PioKaHn DYBlŒ?+JF#r%%Liԯ=T=*k 2~B5G&w+ (֡(0ζM =R:&$"}J! 1 Bikޒ0\h1i+ħmlwNw49|yqh[)hLa#|?./9>{jdIi}]J_|>,`d6S3mNM9qUHNIr71@J;:O-;W\_͊N]kVZti W?VH]OS}n^cc5Fy@o#] ~-EaP+]nF=ҩ6Z[& IW%B1>i{-(.~6ȣo|50 tJUwii" }Ǡ>̵7FpM@B.:/9 sI4ZZK^w8-/TU@ Mf6u.1q R|[qI[!hSQ$&L{ט$j@ ɀ- U{7?xTms ͒ofz"3\ҩӱ2ߪqRx/%ӱq>u7>s/w\wڏr/} .,5$ie)KpbxLք.:7KNр>@o i-F!{ڡ;/_ˮ _'mCG^ʚ_I˙ +RM| W-]yѳ;d5*rrqPc'#&1XKxQ==f~qx':0eG xXvJ!ȫY([)G *ʼ%5Hiiov861oB*!ulwNwTeɒK;OS! oߟZd(Lg*˪JSG)m}ZOō N:mE9U:c%8R[XSTV*oҽ[t)Iycȿ ǭjmTo|8 Vg柬e࿈Jэ{z-p3 [ aYK-KX7^F1,X4HOn坷=2gK™s\I[!]<S}Gۭ/ ݗ 5; fv5bҦ58^u񐈺V\jd~ *s;RéX;"XmL=gKi6\;J[i*%sTI񌨳-j!;wʷxhV^41DϚ9b KMa36ɿ~PUŌ$6AX>49B H WF%%ofNĪ)gRvFRA!fr dBTbQFT&A|₉6;<SҸә3^8&Ի3,s zFޟ p|~5k:D|ZPP'n6_ v.0`=dX ۚt-J'Wnuim &V9mtse;5~)x B+Xʠme8?i~ukУN[n|ÕKeVHEǶ~`P=}ŃsKbCwnH6m~<S3HL[cJ!&!6u!ޮڏ6q R|[1Ǎv&vmΩ^T@.jڗD^4@;jܭ} ,EO-t!W4iʕ ͭf?Ҵ5HifK`N[!MSPU* St` ޙ GwH_zY[5\9[Os9UK^P՘W^5YԧGpܩWԨiu=fX6\@^l_!8O|ᕀ x>F>mmWj][]r{;]?'N Tr#ZhA8{=2QfwB["- )r \|)s͂DNa2!Vs:>Қ?G ayk,Q1;[=P]%I&LDoqe8ޒ ,r i/mag:Jɂ^Y \R3IQ:mK2˿mA$Uhfȳ6O*l?E?˽7 Ơւd[:ea͡?Sʡȶ~0N{ ;d ˏ֏VcPe^VN8 ,.5x̖ ^gi7uhx Be6i+[Ƕ~pw|rtU}qOe]?o]_0 Vf([~b,3ʲ_EI;-Q\ qDZ3p⺥1r %*OQ ۷h3EV8m* _MX-s5oЏsWL ?p9z 5[0ՙ_RfV*NK$Ғte@4QoBX;,u S[u5~HCX\ fAYޟ)m}-pP\T @B/񽔳# ph [6~+Xk`lp0I!2'}s̖{]mfNMmc[s;?L9ըX.8+!x1?;"P 5?fKǵb8jX,/,2_]סY-*Ew\d ;_{E j>/*.]WPZSvLNVF!!ޟ[XDP*&>>`o#S,?2q"m}Z[2K7S9m-ΗV FiE[:C5Z_O! c?Gf)m>-Lw\|Jl{S"[[&7+)m+0bCFɠOT8rVd9#3R endstream endobj 274 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 279 0 obj << /Length 938 /Filter /FlateDecode >> stream xڕVYo0~_Rp9*=Tݷ d׿g̱%i\a^?D$1¤Wn7"eӝH6⍓[e,dĞD^qU",'Hume4sc gΚ= B##"ʯVSδ_xU=9wXžḥ.T0YBAuAR,0g =8 S{xhc z~rYheNI,@Neo]qjo%?k7A\Ġ*pDkIn\ ~օ-;=[unE XbӉKXWJ޼َe6T O.| TW\ך!6"YMAdoD.ѱWޟ}5I>:uc6ឰn-9~pYXZv?@;;YI 6D/ГurÖܞ[MDW&"׉J-L7K\h[O`k';J7mP WNCi6TJGne2HZے6c[Ȫ)%HD*7ElKW\L=+ f/l3-T`1DKz5ψ^S9d>>[쪞vP&*b ~QD:sD]u4D٫SGJwdu!Ƅ1 q"c893h5t:bKvד0 Ӏ~6Ҥ"ymNg gu:e > /ExtGState << >>/ColorSpace << /sRGB 284 0 R >>>> /Length 48483 /Filter /FlateDecode >> stream x}KnIRݼ~d.~LA%dӒ! C7FޱV}jK]̝;?~>jR-_ǿ_O>?yx?+Or)GRG[mտ|_%^Q?|ԝM?r~G_?_~5y=@?ۚ~Vok~@?>/]txoe{ay:ǿ>g+J:p[O&`P'h?o{>JGۜO5^寬o qw?mlV~xo*}S-KWx67vͷ~Q|L!J}VDzid=囷-'W&ۊ.u7)6?߽mO7}_K{fMKǒyFp^M3t7oo6Ov3Oj~IW#!j|(Eg]+>n~랹.^3f,~`mrVlfб[fq7}ڬ١G쫡Vy[}<<ͤզY%mO}cΏz;+nV3'Oy3tWFu'JV\k՜.mf:K<H9)~%=_C~)=rY[yJO#[fpw7+Τ\Xrx+.jLst7mfm݂zfUgә7mzǐ`L,젽jСq[+xl] : W}ů>χ[լ{Fk3Wo&y[+))U>ů>NuwVBOq1k8vW{ݶ^H+~1[bj6woH+~}H[٬:+~u>}v+~y,[ߟwxc+>+~u3b_OeϋK]kT2L}v+^JWY2.C(>JՉRqKǒ-Ğ3˷/κ ]{Fj蠤Lg6#;֜7Φ}ΘϏ ;ǟ`)$M%Vg&8^PG[?m:łm?-fj$bRleuZŤ{Op; LcYlϳ@qa;RGD_uгVv"Ǥhxc-fnš_)W\ Iham Jf=}=(.zcpZ&ҙ]1{x &gӫgw(U "~Aϣvվp(;l90 {~:3Jt!m:),YV :w:\Ry aSxC;rʮYUpq=퐥huքFSTE<~JsQRgn?{dgd 2ĒW-kI)SBPk&'~"}o/ 4l"8 wN""溥ɬA;Wj^}\ňli5VjM#BixW 6%홏1p%Vp 9&D# @2E,SӪ]XZeK1-t/>H.jV.¡GTQjbR@5qNhČ\LZ<<_0*讌&} k?ƓGeO1ImTWFPimaI&u +x:ΨCQg ekƓZ alZo&=OZZoZ[RXAҟΝO9 O2ӚbRtκ岭b ߃Č"n2B4v'76M{2xzAwLAj&tk<0)2QBp.}5;+[FX/{cƖ5aV.QOueqvQ$c )WAݧp$ Q3޼ &EƩ:~~^׺cνB(QGwYx޾n3=rJ:̄WTMa;reaO+FLxioښ2^W6Bg }䭕պ?2MxZrnJHƤscwDw™}N ]+dNALfxV:Wz˻v+ 9,Q*Lp~QqM߄L1ariP@۹8ǶiԟdǤvq>3n}sLMaMCl'<&E[.a2x<፞M,L z?_N=ۓ[25JҞtӥ!'g- o:8Z XcoGP,jfwa*&cRXxkwJZ=9&"E8tDFv Y+b^T9N8aץFY6ÿ ݈LVv<z8s3zݳ&)CaqL ޜ=u8SGlj'v+:&o^u ꕢǧ9k[x!R]ݹd9 " . θ\\O G𕺷aÿJE>A"&Tgp ^nxe7r4[-Q/,\Ak\-9hr$ZYԗ4՗#$ƖpO7YY(h]  mJUGr갍m>uYWs7n=JNxS`UGe|Wf_u:25\(IAws"e.x:!u5BO%1)2tzq*Q%*p|Hԫ["|9-'Ϊe&dR 1[ZF: Vo+G1u#dkcb k D6Pb~Ff_l;NKw i]^ ȾaGs!&w@QvL j4vrp^tw.$^G5)ggy:ېW v9Ʃ"[ =1)x+"g:B2Ǥ(܎ rr@-K'3.=>}'TdGYaB)jNܒ9K&!BgOȔѱvAo)f6{:;`'QoLx{r2&d 8*3KTd e[JORlx1bz4jSWc[^ pL^fVh0˻f812p\Q^ ECQNj"|4'g>׸> @šoe1k(P:#CuF¤\Q)AAX.a^ªcF=(j y*vKȨd??d4 ^/wBN!1(a/m~c"p|KQΛr]ƴ&Kl)mGP _c܏ZвsE+wim׏n|^4Vc_ K\;p);Zxe0)0Udߗ>BEn5X,AA7q/_ 7u3ޱXM U$R:tkB(,ӵ.??>,?&.b# arή.)vxMY﷥1Q?cuGE,[17EMr rlSHuj~HuqIA"5ƾX"Q0pi-qiX-ml[KPK\UiK>ׁ6eۊ,mb&UPB.F18KZ>vYkVj>&F[HPۧ96ɱނ)W[0lG[p܎93w( &Xgښ`DŽu8dp{o?Z )9v*w!s- ~*.u۪klgt:L_CԑpCLO,koIhQDi% +l5'뽵cjm_) u _WrCT6}Xَ ;KJlxc'$qMp6^v9Imq'+) ĤR-]58n zt#ckM-]o}׭(FԿ5ףjH)GR׷oMZ|auL)a|<뱛ǴA0OIjQJCUVekeF%2Y ی|krq;*ͱځ'4>yT $y7>^GAQʮU*2bqܮ]gPTozE~p#3=7dS;kC,0;s2E\V]Χ9nxnA&~3l~q;W\9 +}9bd˸:nqk\ MI2vdFMIOtSg<!aOK-/h#ZrǴtqL D:Ȩ Pm[=|LlDXSXc֫x>iFt#jFFߴ}1)&4ߥljMں^x%Ǥ|Q=E1>qW;thMPf1)`:(; hO\Q HǪOt%'ByV'Ca qU-W>tAsl;UwLfe|*Fc+,&Al&WL<EZ'{wŠWI䒘$Y7 /ZqLkqv 'Ɛ/: ;bt^{6H@^IpUU8; 9.[`6H?Q!w7G07Kfљi"ۛܝf՛$HUYU;Ǘ!; LNhGqI+/xcr˻,ĕQoS8n͊kzset]޲noz f3n x&qwekѻx1׻m SrI}xUeDpv pU|3 ަ{r2).{oME]߼*k~ԴS}#bxN+A޻. DwnSDo7|]<ֿ{AlL4be;Ǧ}OVnH3Y<Ln'Ameih&\Ec*PH]z˞'apōXٰ0)Gʷ ǡDnQ MzGᖤwoQ٘[<٘WT6&ᓍD2mmQZc0' maOr<_(xe A\:HGF)&L FUE8x:<B8qgy$qV[DGWyCZ1 ޖC Ym< D]-uq)Fȸa]Nƍ:'sDƍ:xƍq&V5y6tA1[`H2Y/ZbpgxĽPv,vT0|3c6l9uؽ~UG3kv:5pajl[oh0)`[mZrx,2nEPU֓qCdh8EN]+{;Y;nʅd:*V{H$pl)Ά%vwRRX".=ju)W, Ǒrߖ1x6LX9oˬ<.!(!aR, [J;gsry:&) [LO~2z?{`ׄU=cP،d<96bkߢ^3a? <n gsNؼ4 oxB;oZ&3Zu@!<"V!xwczKGVo=C>imj'xDBķPc<,$O^¤^4ˡSǧ(c 51 sVD]j)=Ku02xudRDE^a}1]]cQyvhaFءgn&<3Pkx.E!H̕S}TM^ɅOlrN͝3b&1)ӱ׳kHWMl|  +-x ZE-U[;n?u{\]r{|$ΒdS[҅- ?!K 3>U&M[X61S8 k#=MEXlS8!ߖL:G^9&Eus8ۦϽS?^{(# mq6)?w bb#k:rVG,{٣q{@ˆ#TTxN2+׏8)T0:BDU!K:c;'ɩqc&gais 3r2e0Tw'"p0nI)?h^Աz|&wr-Tw1cA<.:ü rR GC( Ҳߙm}osOrpd^<<,(“_G43q Fp mF h{EXg{v˝߾DsQ:-s Q`Q&Vz;Ej_wu(tR4D)jt \p,’2M4(7*d*)GJ*y( oR$`< |a|ދ_Uׅ#a]]<ԾiV&-G~^~ޮ ?mrS ;{ qlp|n󢇱$#|$#8>Ɂ':$Q1):4Ɏ<ҭe kYr-M-+bkVoΉ WT֬ G!Go-g -V q;<{K*5,IHZ[8ň sO0uI0ĤiU\xas>H[`5cu||]UTE)b7ΊbБwr֚v \˖eYn<4)qЯwvd)^j`yw"\bXhTa/~.9.&d$F87mgxo.c:yR`;L.΍+[ [muVXXS8t02Բ4Fp iGm]l#vP?&AWPݦ8" [۱ȒoM!v"9+f{=̤ !a yo]W2M-Fۨ;BxIհ:\ #մa&::S^|]lVxy3Cp_OT;l2LWrU݋LD溜 h/jiN.sEqET.r w:0sebR4#lz(k^ZR#1HL)EB^+F쑘4F^Y+] xR躮ͱh1pW~}Lɞ@nY,V+Rw.b7xj[z¤8k^*[~)L 6Y6 +(:ݣgGg.Vê6/k.HfN:/±BXyDZZ2:gR cR :o tVFtI=EOfz<ۙ8FRem? 2]ج3™RL["72 ;8YT"YLce^0)4:ȇмU91)hX Cn1)٭ .ѻ%aRtt" a(f=zj sp ǑO G}ș" ܵ;t'0 "IޖVtG{8ڛ*h8m :Wф֋;e%ў;n +0mz;֞l}ѓ}IMX}Z$wQSR;&ŊLO$8kqORCv-mQ{}}Zͮ^emaԣrNsPIg+m:cGF=Iqwqpoԅ{pp$ W՗9K>\وkq˙Aaƚm-"ppd“,1}v"cR]v:&FynKS})orSպҩוuXV:u6`egax IX Exd9vh|StM8x: ճ3;""bt{4\ehO*-L8\(' XҘ>f؅ImtS%cSk٤+B{ ϳVISXsw˳5l- w`Sp=k1):8gxj0Vi1)*[']-핲&o9K! K;g#{“ytjy]0tCs Cs{ f; p8lMX;t(Wskceuk|3mfu uk!@s\@XO0bfI-y:<,݅\*giR4p¡=Ux Ly.pUNe9Gvlwpꄱ“BDH9fgR5d&iԑ.{ơpD8vYǘm2}cxrJ=(/Jp@tLoR8MzO,~h־U8=%Omyh|q] X˷-V0}Y-:w5::FS8g5c}}w L&6Ƒ4)2gAy E~ oK?Ǥ2AsH<qrFy5[]&ܶgu+x̶i9v"p f|Ǎ5?|N8ObLwq]Bhnlbp(poĘ+98Uvs\DňNw JoG{sGhoGZ'z<6f.#f_vkqN5'PFw҄E< cU OEX|rg[# 0`f;vp9iAxW:Fswϻʁp>0 󤰒z:tLӖ0K!3p8dvJҲWٱN-> b,"$횅M8f2AyF 7q}paTm35mnƒB+Q!Z)P㎏2CegJF3%n4q Δ_k_"\4 2Z|L:bL~*Mf]j":lؙsT ;˹3#Kq#FϽuXo)t ӷOS$I.\&PQI^^GªV0¯w${AW aSڞ,t-6B30{AbՑ9C[ܓMY=yGQGڗchp mwu *8os y\A?3p1t00CWVSCGLs[ GO?:ƢA BMl1 z9<|&.o yHS_'e\^~0iR]i,~`o)eڕlIF7M0qtevww꾱l7.ܕa]L՜yqs=}M]26}U(5-eKk<ҹ o|WlLF7.LtWr\}2hM*SN;Wqoa`.rUdeK/"%̥WsdWow=PM\^4.u#NXoJ,ny{n.;P݅iJ=tJ{`o_Q<1eXh{,Իs^`\4M}nF7b?z&w ^%{4*y7J'vһ⑉z-8b Uj^i 0?ł]k@b+sw_OU6&2!)oCis*uӓ+É9 NsoozL}Q|8ziW jsq|Y}%;oS NZ;:l]%[G+uRfvf?(WwR]^wZ W6XIg]5&ںm(¾x9M7e)Bq90):Uxx-(3*E2/)2LI8Rdi2GLUn-3xjE]W?i6i%" ި4#͆)#[ ,p¤`$p l_0:-C@z1LMS8 \b-㗳MS-Eث#]2\b8ab4:G_F3Qߢr8x* ZXk> k<< b]w Kb2v¤kc㥰$6ihjQop{[)zSdց03y0Vkf[20~Y8:З^Jn=A KL BnY95כpueueeId}zaӃAFvX(.L z(pܒ(튴+t(4,;"EFH $" CڷZ yܕ,ĊAo+bR0r+ exg:wxnv`J=pӎkOj=)eI)39R2vxœ}֨{ҝ|n 6=Fk=1)h[_¤ 8Nܵ2r1)b $ s6sb/¤Vx-y(^Upi7p[IgXq0ٯcRsl "U9C|H^u~1:KI1S.ɘuco[Xo\ћBa!pn<OvI8W2]{Ьڃ4I'i&x>o8G#wL&Tc}f&VK)W GcjշSXv3S8ޱnMOGBʦ¤L\6uBhvf$,|r(b 5SIm"E!얦ƠV6a atI8{P$d1AMJgܐ=0g4O?3O~c<>ƣ9R(F{4 .VG~Ϡ0 5 im&. /bMHo52 ;։\hFlA O'B LN2k\cM{Bn|e%&GBOuues܃bN~u-2+&&łMbjL[xf'yN9d+r(WC%"~*)a4ϴ<fXB^%n52Sv)g uϺXI.E"܅펉 tϏ5?RvNcY:$;qd# ]"cRTiL>-pG[bbf'%[&IT>@JmwK -\/JC-k,Q(La-+wUI,켘EA^uculDx抗^2cR0QM%:''myn:-KZ$.) rv"܉c9~)Xm~7,ZRvX'rk׾p=ky tAz邂b2PS.暰2vd$9X+0x-xK,eڄůW].Su;C!Y05G%CzV_m keWr#4Dez3<҈ufo.+xńb jzL~S%8y:N񋧫Eqlmnq唸GoYo=_ {\Z}< 8~I׺ohʹohit |TPFagIN* ʴ VCLM $گGTk^=tNd:GǒpS-#uP;)C(.)CF6&%t6K[\0TPx#72NxF.Uc(w8#IH N᐀=Vv)ړX` ϹsǤ|sq;geZ55VacpkG_P(K}3b4 'M,U + l$ %d .{85->'SB$wgc!|DFOK`*IN6\_ P:&TH]q;:iXٓ.[8Ēl^3HXL$~͏Ѯc-?X%qf[)V 㵉[;=_A&Oak°7q>nY.Uyt K,܂֙ՒۡtR~;NJO;{GM(Ҳ8>= Wq l+hPtDk'{Eqw7iTq{@pT|Uor8yi.fp*Q%Ge&2{SIX_%4 n=q 3] +30yiQb_b%q׫(텉 ;2::ޯRfj-rX-T+;lvve6gLw+̘nۻHU)J͖38ҰwG2seϻWϺs7. k_yo5W~ՙ#]p.WA=U.ws[^ch;Gp UP#\~x 4b?HSZ&%N67 vxwm6L+71v*WM_ׇ[W.tws;Uq# FƞV&|θF26&PuqMlmlg}OX ]G.W 6&`Hq`f‹XWLoY )Ox? s:OY;fxr3^v܉?';b g. býdM(~]d;,2sK_ mV\5 QffzK;̃%GqDò7[eȬd~ގs.n;#p\eeg7aj2+|ws7U}>W^>fVv#sf#M wX~ ⑋8 =A`i2`o·CNUφ7̭}ϗ3G./tحm<_fXhÒ쨞="n]iX6S0f†,!TY0 7by*X7N,*,'M&kejBϪa[cllzSF~rxfүίDg5W_tUshn>n'1j!!UL ,:1-~k_Q2{ad?ezg+| wˆl}o\ Tu/oK lo4g}0&cߨ'ɊzΪ {&;t{~0F̌(=UXD'BB8 ;zkPq~uqs9?Q 7قtQ~rcGqwlswc | ƙ3b{[pw"]ϧ]s;s_wR șx+WD?#W@IwP|`n_J!G_vuDul0~ŧ.:V) h<daيX%#P-TJH-(ƧCϫ[ܚ޲*g=!G B؃ggZN(B[{lUF<"̙Gߑadiw8f.,1aX]~󃪼S @rʌ-O߫5)D^<.kN?[0~t _wryu~]K|%(vZ"=nG ag2aD?PT:`V/LAil1~| Qdo3nwxo|~!3^>|9_fQ>ɴx]@ԕ-9/lovi`xU wN(d+7rF,1B}U7$`CwAcCǀ *l 2L'?M m(/61Uvg0fY`n7cgE3ǟpR' ;mw"' `M(9eg/Qm|Cp!ܒKb~bs~9a<}9p kQԺ]vZSXbK= 0rL 9u'gO#>50yK-nG=Xpj[*xtU>5up@pf_x_ۻ$ݎ#Mbs"@&dtfuuS5v Dof$Lx/KWw+Ώ30A%fcS~$4ٶg8>[]!1zg^Quh~FĩX&x^lK3?G /_q6VZ#E7%F/+_vm0eÿت6ful;>A&"߉p( {>9T(Za $19w.A4¼b+=x| x@ *l#'Xp~\&sJJK"Ic *'h"|BBcɹBhѯS<GmY"}i{wh_erQVx)*RbfIjR|pE_"_0UT~͜| j)[(_{.]7w1EWɻPJ c!2 :Xa>D?}$$0k}cm!W'eQQHNhaUw("1ƻ~麎#U:3 1,ZupC؅3< /ʸ-ִG(,Qsp{t)=O .|Bq7Ӄ.-襻ZM֡W^aukmwՠV0;~ )s;XM,z3c !'N{CAAWA`{?9b90%qs(P%^i}b9[2nw pK v7Y(Q`]zOysۘm<6) sz˷Ϥh8 {Sh Z_(e4 z6+NB1d:Տ=${Oi 8 ch~$)Zx: )T?ǟ?6V+hMuⷽޗq+OŞ&BW䈫r$*I;HTPܷ-E&ߖt8nY28RN>ȊlmAuo@2bx|EIOhƋ0O_%'Л35qƹ`< AJs܍{rg8"bCr_g5OGg{HC 99^&Fg_(W zgó_b?~02ø^ (8'QnR }&qV K<0ƼaxSN*<8>WyE_۵ 6*_mTį6t8hT)i!ֻոߐ)8EzW#+h@Ζ8g\]g#9*>nļ'wdr xFȰL8SPܾۏDv Υ8ԍ:4e_k=d7'&q@f-*yGyM6_W,uopHE/\T g<>tiCa D=E8uaקFs̛^x]˳r0HǹJgBwem~MΔAm1Low}}uڝ/qY,2q#_'f˔bc`/^6- :Ca^|) "K,4a*>1^<7북 o66~ 軍R8p݆0hm ca:rw~EO~twcӈzSQluq3jq/ӇO!v[8c* ߤ&Izw]83cq7؏s8b-EX)ւxNshj3v8M%NyJctG\@+x75IM 6}R 6}PLCŞsE Ur z=+I R\T=I H17g3Ŕ?Ha s$ (w qn.Hc#(fmbQ)Y[tVriqiR }[T뷨 uW<%6w6Oڲ lcC16,npKQ)QE6ip#g?I?("IQ/d#?Hʠa_}S$ QE9R$:$Ypo1 (Yĸ`w'v`*Im!) r毃1>ɩo1gc*ϵ E5i\H?{} Qګ||j@0l?Վ瓊K W" _F|d;?}%Jq_Y W Fl|6ZQ0'_]>q6 ) qaDԆ߯]13߯ݐI$5Oo2ІGwqe/EuO5vt_p?>HHʼ@~ޙ"||n+ļ& 9X>U2EZ(3ۓǬ/KL`]zI {gY@@R~7˯_|\XSؕF0M\ُFL;vY+ɔLNhIo"nL ZTH"ڇx*V$߫1ͮ ]RQ䫤TsLC Et~3Y8U*~2aEhTT{e~ ?UG(7|eOxM<0G 4M},>T8I֡3r>\"sԃ&nJyB2%V z|o,Qg~9)" #Q0>>g !m3/X?ojA[pFBymAb+֣-p}"3XQ*([}f k70؍=, Uo0Q P@tO&ʑb{ ӪdpcV5!ˑ\3Q aCrjh9@ɟl5ff}W}uEęH?,G`Cs9-d !'~>}>OsmX(}3. Af_.'<̋C]e3沝m ?QI\Lx *ܟ#tg9e/>u/ʧ/>x?2/pS:+|0dP8Gqx7rAaXcdr&Gs+<*W(n<#$j|A& fr#Uؘmm$ᯍܲVhddV0LưNB` ?콮=?LM$ݭ3/!ˑRX >'ϝc3NuVRHR' 9>h~i%L7 x~jȐ؇yA~ ߛ[+禤Cx ?StA3')b@>ad,<ʍ  ,*)]D}/ɒJ4|~8>EC&I}?C /tI g6GlBɿV>ID,$KR: ݟtpFoJQ42v?\NIgT(C $a~a߂=?)!/x!,LF*O&W}g}~'(&~oOW`ٝ.B !ˑn -U37tn}[RxOXnln?&!lϧIM0)33FpYNLi?14b@I$pU p"%W*DH )Qa=?CPfB&0#BM c`/awr~t/B/:y$bC#I|X+:,cxMp\@GUO4|@\E0DLN`0cJIƭ$`4T*h ¼4Gqr; EhdEƠ؟ldG& Sȏ 2zk5p+6y,}%xb~J$ w uXg$>x},j>ؙt\NՑekZ'.9"_[_ԨW%NHզ]KRZ/S%\iu*,_ѭx COk*4ɮ gg I!FcpIF1Ȯil)>"U)((z/}FGI(mMg$KkE ᖛ0KB͟c^QR}> v؂ {=׈/iiZܨ{ ˘W ^S*xgA`~k 7e]MiP 2p0n+ Uh^)CNӭQ0f2|ʍ2Sͧ%Ys=Wx{)V8z?[3Zw̎p$Ad8š;*mk$LF@ft1OdAىrƳ'I=xwcFD>d\5 #E!_enON=}ɕBCmLBuW4A#码$@ k2ax{ mo516d<Kqƞ~8F3;W¿x5 tc=Q}/wVĀ? +O6|M|ؗ/|cۏXPM1 9KNbsvg]Ĺ) l Fʧ'#aQ.il,J!fX[rF֛C:,PU<2Y)z5u<]ASEbm*E8}'35J`b^A[YxٶG8,%b^AWQ0[wWeчK]w+5mĶ-m 򽐠w2NSO; S[@SR&P-8 x&LcϧG>:bv ֛1L?ם_yŶ?x܀燊~ÎuI~lsO|Ɨ^΍:ucwc£5grSLne[Lo!/wοX8 /-lqris<8Ye.|+cI+&}9Ȃ医Ùb tcדwj ⸻mkE'$[*%-W>Eo'xMo9DW)tߤ3CpAc_`}}}ʱL{Bh6O=rpu)9|D^\; rdW6z3<5D{c>woD P͓O+%5okk~ڌ_F) KYjCOd3Q&_h;FEBB!8c8뤦14j0j#5h?U:XǣR}vkcz^} /O~ uG_nC_C Yċ }u}"6QFϭ>T |*ӣB+|}|_G8VFN |;D Ɠ/6C2޾5d>˰y&]Sƃ89?Nmzs/S(a:9a?M1#`ƄДc9cGy;,Gx3S?Ι!SG m8fǁrfhT$rվ9~Y}paLasNȠA;O^HDpjy#B#6CƂ͜##{ڋsdlx9b#c#.H>ʱu]9OxNw/ʃ~gvb,$t4<@EP{ߐY<ꡜ~plGr<fNXQ,GX.' XPz7^7벾B Q{,G(Bh _O~_Bh4ߍA8`T$q}^X~|4?ps}Asko||}z}}9CgrLx)T6HP[$Yx><L#V°rHvt$-6ʼn7kGLXQ\N7`Pm9' sb28 >L䬗"#WKp&}d}H#m4ـi/Άal ei~ ~ARx}٨ !=/ 9}ũDŽ!8}J·ΜON>I `8| \CTx!e3δoÔAYTUAo~¼'v~ obj~tx=m·>_oB0/"xk~?Eor`R,`!eTakq4uNk?s挈 ׾wѳ뮟 ]^?ɜ%ΪΜ˕ $K09FAt?C3i؂on$u4qGu< ~~*P`$Sbr{B#j?.P(D!<+'I;|!d9 L?= 1YpdyÆTSef RF9k#,gBY|IpĄґsOcdzȹ/SSGcrD)x>H941k0;fE)ļRg2(\s@"HohŌ89w:!];]/3e/j]՘\/k=->L+SɸYh-D 'fܝT2~{b=d^G!1ܘоĒ֓g_݆KH#F&͘pDe+`I@ Sq&>qS瑳܏z5_7o<tqz}$t^?/x|QBl N'b^ͩոܹ G%c@7Q6x \3>H~dEF7 ` [\A\! BIxoT;< EC䔌9!7Pa*~Os.0ljtl&n2Sh D|F|s]m@oJ:*nch0)~j.8J&8G/)I+{1>y/E`{nLv4fswƤ1:hQ !]{H_@p}}Ɣ՚RS+}eSM4Y𕽨]¶R$NjNv=Q_jKvTA_(UTI|㸞IG3̳IUH~6nYg9XwcJ,)T֔g%sxWA<@o!J%7 ˥TI*m]zY6dyE%u+-AļbC؉ݽȧWgS2Y=Tc2vo hG(ڷ%sE3I {e? {W?tv? 6:wm+܆mCsaZ4޴FQDdm<¼DbJ|)v|xE*ga^M_i+8`6o0^,YO }yE5LPx|Jmݒۏλk$a\4 ՘m<&pYI1|S(|ƥI99$yE2K:0l}`+Ue!q[ΣgC6sm>| >v˦Gﶢ:n+QHh'v=3ݿt؇2/cۇ2Dd.^A)d=jqnzXǍ($(\ ~)=F=sa 3:$Z5RʹKC~H4cD>7*}#=;zA(Ԇu,%jSy|Hޟ'>Ho^ }d!AuLqd}$l="{#|zjpŌ){6T>2O끥;O |"n~Bj=8'oK"j#jbg1}"-L ujCHbOdH3»S?S?j#Ph!}SӼq.lr֗YK} .40?+rA>̩q!|É> Qܐ/ unCh FtR<ߨtP>٬Aa~<ȲǾVgMyEꓚ`W@/L'`}zir_CRhSwl>"gv I l/NߪuVT/ }5'OJއ__ DV$.y>ȼEo^ViCV>W- 8˘c?_*X!=·(Q?QRۤ~d]7{0"&܏a/i\3'svHJ*S}M/f!Mq~|Hd*e0;?Uo? 9~T2ǯ& 疃և*>eo}[uð !rշ͠C&2aZ&d9Rf` H0p@Z<=_%e\HgjJp8ڡN\iY>ۤy.DjOrLzgVV'|կpgG ˷5,r24ݎ"WZ;.GneHZLAz|BKaz^ f?qx2,o |nXY7'€b|"5QA@a!U36L!9>}Tp}?I+~O0f FLa1B"(޿`sLiD{BC6ǂ_}*L/l._ $]h߮'?j G3L~p ^b8CB~FAmT@jT!d9[}S>g*)ߝ by?\̏k0[cf~'i'f曟7ؼ㽾.j}C\6׏2~.W屻Twr,oTE1?m,TE)('w}gx/>~~P 2LQZe-?/+˙dA_ |Yp.^G9RCƞ 7_!)K1==>p5Aa]/Do(u͏z~mx _?y#O0'sB&E{FE(" Npd<\?Ja}xLrP0yGA#]kK9RsabKY=&nH倣83&{U''>I~b'[6JY?^ ~G+?V/&~tgW}@f7Fa 䊭e7  QIYz}}l+C=^`gl Qe?O0̲rEe+ǡy+/R5_0eDx ~J1t&d$4,*\_ևgqI8<t$,n 5dX`Ts8>< ;C L`j(!d9]3 ~ 6ic|")k+s~M7z2,G.'\c;N2'T<%c0 3UU[[w4^1>jO~k䷎ ٲk?󃟶 .!Š~Sp};}oA/@/S?b18y,/S[s~< {sw(}WڷG:"~7H_ jq!mlHȿ }j/Gs~bFҒ_>r~AP~uGc'ak_sd}8dfO~{_MYs;Y_0{5UWwzWL_d}W}x–Nϒ0K1!,~ +;!a ߰y79u ^|ĻրR}¬ NhVT."d9ík -^mv >46swԜo DZ}6Y45ן?5%Hk`<<넶mGxٿ}?8 G*_W}>sSޯ7SGdItaF`|&nx N&~v۷8ɪ jŀ$8>&ɯ?R)St//CaO?G&3 GӿVm|opήBwQgA_oN¡J;/Ox9xLNH_v _NSKuSXfSnL}3 S- }vAϏap)ѡB'gw (. _K8n}?^)(>awB 9~(3e HvJ.)z}wAs6778_x||Ѿ4L_.T͡>_rdƆ_.=ۗsV_7nڷF,<wY)~zÓۀnrN >=x?gsp[/憎m<}].'<] "4 z|E&zC9b=:#VgA{S}Pq%;n*'i3l疠O_p_@-{wj|?,89'dZ>^>?'T^үD8Qd9l8u^A;~ců[^?!˫ۯG[/op)(y8,~>ڱIEVrB_ Ʃp+ A5:, ,'b;3j?W_0Ycq+IXt|C A~WsF?;׷G'χ (qoA=/}0hN}37>Wsq->gWӁO3q1q7o z@o?wp&=Are?|~ _u<ر[-#>6k;'Xnz{{3V6Bc)f+,-y%pO 66|kEzBX3>k5Ế?  AסG_."oG8odd5wr8fn> cgk}(? }M}<Ǜӂ|cw|>CAwʖ)>rvڿbW—"sЩW03zP!778:_"Y_IYN' ˫8g#gYsVTA/S}P7 ׏Y=3v|=/ƧLDmǷ</(gb`>*N~_W`>֫~sf}ì?jeC oP>|!˧En~j+/!/D_ ^仲^<xF}q8ž/>MA?qK1HJ0oP-ÝZK_1ҵa+Sl>ߏE0?R}fr_sߎ<}>x B?> )Y *+/!tul؏}=[GOd|KAGPSru /n|)N}{pGB}/ k4&n~ia|E]銞O~gO|x!x'4Nw_~rL8Pa?ɝC*'FQ_p@Aa}? 30 >(KtMu'냞'Δp?G;•<='I}rkòg5rBFV%Q~ƚ(0̉Y#}sÌOYU1 |g ʱ? NO A\gk1=KO #ɺlEq@ԫ ojkUAW̿ٶ (xJL/g/>U}RJ7R `{4qˎ 5(A0w{2 pnSX[0%P&˫ dh5ֆZ\QW_i!Y+ʈ(8ӘWl P[6yTvwۻh w vefM:Ue{e$)W_ƶ ;1}ُu .>k,Ws-| {2 G=Ķ(T=R @CxLIym9i,}Lǭ$3zx0mH@NE7H@WP0y%X3.R^7I#Q9J)ƶ-mxDRY{K{Q+S=͖vs\n[3Ynn;zH|x(%_ܧxmzj $Q8)qnxB~Iib^AKng 'NM'dXgm8Wrł},clfc^JV. sbvp{%@<ѩGjn=ym~&lOLc9LeiI/N|a'{8(&sm9QWm bމor{H aCۆ( BN`qK_bSl)FlJl`?n=}7iZLcOyxeH+>#guCg*2;#orWKl\)68V).mt%XGy1*(mژǑWB8]W( /ONޣvc=*{V}U)r퓢ݚ|lR;ҸkPx^ GڕNjyPCG r WlT'}B/rncpjcz~'ȀTc^lhcG^r>=..B1a+yלRP H֌yVFr*pl[,iV0X% |Q6׽q= C$7hyR>Y%ޝVCh^ƗOٶe^X-HDe'4Bx8W%yx9lž?1໵-Ŭ.HO}e.Kd `Q;/k%3!@Pd;&mffm8۰nh^%ޮݑ{{CFoK= ?ǥrXhl^%= =Ǵ|`>|SSq]bFV7\ɦH~SW>k"1؜̅b<>\+Ǹ+{wY)=7DŽ3m0am5JE;SPo`da:,y,˴^|8׉[nA7^-R{.af x%ġ's^cnS,P! eBPCы;3ʿ؋o.aIw'YU:Y[Uyщ =mI<mpjpטmڨ<m>aMoHhdOzČb[ғy;n?nл0 1Coƾ)udޙ͘_ԃbcDшkܨm5o4ԮKzυM_Oٞ'\xxWy C1 pOR=41QY,+N£nE_Ry_B/gE0ޖo%~"eU_b?? endstream endobj 286 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 275 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-032.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 287 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 288 0 R/F3 289 0 R>> /ExtGState << >>/ColorSpace << /sRGB 290 0 R >>>> /Length 1856 /Filter /FlateDecode >> stream xYˎ\E W2]6D"LK,P($ɄL}q 1yrq]nI/w~OO?}YzTr)%]?'|<ؿl:H2Yyt~tN_}/6s|U%=M.pp?t~IY9~2^R&_}ϿŧO?N%=Izw'C~sjrL!auZ Ⱥ\%+I/y >Px^HpfϢIfBE!UaQ\yn|'=W[!Yʠ=U47- ._yx\.=aIפ|Qn>k?j1rlxny̤mwotb[?#^AuۧlVc82|8 S*ȨIϙx嚫 uǛ2'p oiL;\psJs T-Wlq>#8#Q;q󰦺ѳ8?셤#u;bT%GdMDk隷Eypc%ăǃF?<2S"@{=A1u*?6|'=˕1( !d؏cJe]tDXy>[#.-6+n"x<⪍\RB̝ Pfh@*l+-ZG68ؐ&fØ.g qhH<$ϡj H@]}vIIox{FoH)p|mJv+"vÊ0~vWؚw<Ю[dxM6 COais~ e'}SP@90˃iݠId⥈r1 $:cRe |)˦vO 5 1]w\>9Qۨn8t"}`mSn;d=~1oTmOwWсџx!(|+ԅ\|Onٞ{/, FQ1|pu]l࣊`$3`sH T]'@D κ_>`%+8+Hbho:ee,tt43y;z0:dxKX Kh$m}DҦyU #ABH@- ;EM\!XnSH;(8#pxCr-EP+hr\7X,vllEymW4<|$iY&l#%m XhZV<emz$Zaw+7Ca츍kyrt*xq$Sr] +{eҲ87G4m>L| V j_Dw-򆀡B3sN"d%UQ{V1$ғ{-P]ΉUTȵ{RME7Y94:#V|<뚏ppdωo(pvΏzSV%CL-p]osQjpcYgM!(nmTKlT}(0xE.Rÿu?B X7Q7P~Ľ;< rݞnˢ9ǯB9{TY>m5\nnNC53wF}X"T9qQ{#!64'yuVawk@wpVn Ҍda-(Wʫ, *:X endstream endobj 292 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 297 0 obj << /Length 910 /Filter /FlateDecode >> stream xڭUێ0}WDJIW Z"mJ=ss".BQdܤaiwSmk%-ZZ--{9uZhU.j0_u'@Ygw+?G3TKlZG#^:L%^^3] v0>j(iZ|dPo <6MzOsEBདྷ4^NxzRP\ϥحEF{PJqBWpOEW834Ԭ8Fv?S[K-"9vp~UNckL7ܣRNi xfg/1A g1@ -ɞ?)ݘEf_r@Ĩiz܌ezwOTYPcͻv[qg񽽍krډCGkBs4İ̈́]J { ɕR-ՈIz90Ѵ;鶃@~ͫ+jD婰^,iE29={ǭL[ `~|WGDާ]$Jfű¤ 7J0l&:4mWo6rWĨ#Qj(WKz"?8^{ތJBvބxyFF{@X5;v2Qh}A2%0&)ֻHG1):3\4o((әs95\Rv2ϼ +4PNkoП1Eik Ӭxgչ70 $ L+ uJAg<Ӌ6 &+KUYb[9 endstream endobj 276 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-033.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 298 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 299 0 R/F2 300 0 R/F3 301 0 R>> /ExtGState << >>/ColorSpace << /sRGB 302 0 R >>>> /Length 11005 /Filter /FlateDecode >> stream x]ˮ,qܟ` ` xax!2 x]s(ւ ޞꪬȬx-޾_/o}y 㿿|ۯ5[,w KqV~O?߾_s:?~?ۏۗ?O_S%ؿGn d?=oy0o_px;?[.꽕[ >퇷V}7h6n){L 0s}[lxi|eኇp K̈-ׄ?pFXfoqጟ-O~}^(^9 m̀%>E O{Z3/kP{nPsҼg|oӷm~ܾe7_㽞̅tLIfXFl||{X}2~='|4WVĸͺc_;0Ը8\7xc!NO[֒Zs V/7~7M37>{Fl;_?3_γcaٺ&S7n~MLXyd:N[&wL_>` ׵pMk u_k=9u0 \\2ve /s^wZ|u0E{[Z/|mb}]+|5rچy*맯_apGEF-w/O:[)Mx+)Λh1omk\e^!ܹBWx2_Uzku,;S 6ӍYްn~YXq[[ug[F܆nS-a+% . _t#_ k_we,F1wܟ iwlZ:c#w"㣾^!>ZŖϾltK֒e1Vy̺4ĜO#n<pG7o;J%<4/0"m)ݦtڡ0+ږڎ^ZŢ[WvO1 &)~;۸"*Z rewvVxb?c!y[ f'ŴY[>vmaYnmiͳ!`&n'qUxv|.cg֘g:E$Un쌉fP.0!4IJ߰C뮡qÄHk`}|B9гٚ6n7w]^A:Xpi-QҶ2{\qy'ދ2ye'2@@}=oCxUR$;K s\;v8`'rI1mEBw!YVخ1H,I.|3=-k+]ֆϞ·92ky+OeMj]&3АtLko0k΃a˻(FEؾפ/qNVԸ;i4!o$kvB" -]ˆ{^fS!"tF,%F)$Gs̨u{m? b|Y)\koϙl|s+ >+znAw'R˜ǃw(4yEaw/Bx$JCT kP:#?ˆ'F>E)BĄyKĺ 0% XFm5Gn(".E~ 1x&ߊ9i$QXxg34"4*1^3rN+R`6yX7nfӭ_I a^b$Gq&v9r#xDf7G oxM֚.L#p܍-G/ӷƛ!JTdb#ޞ;E%26x\esE:ȱ[~,1Fǚ -.jAԀ ~ٙo+1ڣ9v~bk^z * rNښڱ:vbMAbz\w2OM]!nah;!O74'a]Qhc}Kzբ[vS ?/^<gJKvm2_[c&9+!مhHkP 33k~0XF^ax.{6s0k{JzcU7A=MT:^ &`õ{KtID&|]a8ӥ_Q’A{:tمhwJܩů2fQ{[;HɎ6JQC;?3u&V*6+&R:R#qEzZ=,Nd1=۔Q%95lI8&m2u r?ָiܯᇮHeʐĞ`,!tNkjb 1Y~iϖ>[:L3)50U9-iDZn2Ω5;sLXWLS9 O*V.pjYl:bֆ\c =[xrZxLp2* gbFXF$0XAl!2|n^N:^2'bˬ6ǣ?DžJ#®`E,"tqlͅ/p,&~MXXrƽll\ b'\_tW>FdH❠gdTTv@Xl3(øM85*w["CHb*qO ttQZ||=`AYg-# w *vԞP:Z\kP,+b>1@rؠ|ñPo e VY£LrPAj}dDwuHu 늮Ok~;D|=ϗPܙa"vwӱ͕ĹVUF(Z"DSk2o oxmFk#Վ & M| U)tVD[./%q,IK+J&k '"aJ.l2ʧXz9-wagӃϒ:#P# k.UF`mC%3-NV˂Qi[!;@I E?Yg`weWaYD| 6Te܂5 Jd 6å\Q֭M_ˑDyEȏ`C)tp%qgTދQ0gҠb9V<2x 4 #dHsu FQi$ŖrvkuWAb$p.<$rByP:nav̭:; MDxT na~G E<^6#Kvec"[ L!."Lв[u_[dxx'EndsEfRީV5nGj&N7uEA[j}I5S(p.CIѿj!D&"J\ϸUeM@&Dm(p4,X [zG:Sabpriܦ_H毳W])gs9F~%ꉶI؃Y"B%bV+pMl$ɱfR5[H-[H 4UMbO|T:FzecCZ@Ӷ+)ӠtaqZ1 3 窑ҭضtc$6 K4Upӣ}At$k4Pݦ\616K 0fluwME:!AQ4*.о`B-7+.t-ՑfYQs&/Zy^e®թ9:VB#mfxT6f{)?om`V8R@f5<fEQY*A06jv2i};F92fRDrִ:!āDEBaqDMtIFҠyZuiQbp%aY)+ S?aMdiTRhUԔ}l'-7 d bTU#%N5b}UraXgL쯗`h*;05+f yr1G)m!g/ <;G TuMFppTQKeu(MFu+N}-e) sQQa.W㡜.νv!.uT l έh'?X=V_#MИNBWQNlW+$vQ0aO# vRۖx>L{lEULX}^XəU=JuEh /☶N>Vur@?ZJuYҲ?=fa1毒Sp~V>( pPLXOR>tӧ@p_vL #L_e[CykaϰRI\ ù\Rʱc-:SmĚ&y#ݟu [2yAY$;LQ_SG4=QX&c804@e,]\v pK%wɡabEf.)LC5;$1 Ftq'4#7PgRq 2W:~(ͩAScOߝAi\^؅+[<$IsI[GQ4 ?@Ne4/xǺB&GUxz8f-+ձ9r0K (uڒЧUH6yB_T1,߁m`fM/ LDI pժ+9fntTxEݰ~bywaPXRbj 8=8֮ζlFaPl l̪Ҥuae g J<*?*iD]ḻ;5t\ͬWc'lhSf?EWgjڝfrؾ#="$Ԯt \ٷ>YZ~-x͝F7!Y5fOx+E|a=r҉kVm33QvrEbVΡX8c8kdBTMon[G..~7Ȫ) j4RTEp,ݘ Uc{==U@?.u6%*K;NᚺF2lr[/v0W!ZΚhyL5"h-#H\+[-$Z&0HY-GC@^ens2x^;/.\O3&"K0֧33AZD/hHIVsgp%X H$aHzddKa]Q!*zqP|7V`Hձ %|ht7=5V/&$tmvI[9Sl+a۪uR]ںcQ$-CǨmVNMBGa]"S[ZIk}*BD:b8r87Ǻ #vQ}4{ܔgtԶ.rR )Q)u0oIp1( a%-7( GLH耯d3z4 Cx0!#HBaG< /$>ӚE+{U܌xl);Iǫ>P:b G*nvTN5p]©.fnykiksK5GkaXLB0,㒒-!vRJj3;>F ˚gtk+1uc'2mW 30ق@0z`j0m;GnK8U4l(+Z.aNfCa3pCj/kuEWÉ&qc8 WW`;,2ْxdtp5ܬhѱ`K,}(K:[̛◠SjM񳊁77\<{zJ 5 =Z3RaT]{gYD =qU 5B@%WK|qw-f=x|{VFvLG5?]:X&V0~}̵v-8w<љ <z-S JޤY0ي|;Ę+tnXP M2*jGquƋQ,V,@B_ѷRtqt|h_sw͹5B(79z{iѽL8+&C0[:^;|˪EƏ- jR{ S dO߽F,k2ǕeM`r#18!ˏ:QARw'}"Qԍ=S&8TxIcXfVcOTkQ8l"$['T7 Wc4EOpJ(̱N o0'I7Qr= N5yq 1)@zy5D }tfDfrݴ c"7 e6 Hz7Y)b60ֳhT5賢=VNHF9U v-xwNmy9{B-Qa]u|Pw=@vnKda';q4>4 T*(+؛@4RZm!=ZHr51Kӟm0T@OאgWҢ6/NyWOǙɻ͐yn.lm%OE!b%԰qjޠ: ԸRt?9&B~J0onҥcl vu'赪1mI'Qe~ZgrkPIQ*smCvzTP uzNS8٩Uٱgwt<.9w[-63(ߧ9n io_\?OuSҪ,KPG疬C=*³_;iztᜮuBw~kVt<) h*nx q~ }*~t2 ^Z(GA0'Ta^tP`qaWR7u[jgb<4g>Q'{D\Vh+᷿}-ݏSxpLs~t_~#/H//ܨ endstream endobj 304 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 293 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-035.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 305 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 306 0 R/F2 307 0 R/F3 308 0 R>> /ExtGState << >>/ColorSpace << /sRGB 309 0 R >>>> /Length 3750 /Filter /FlateDecode >> stream x\Md7ds򷷉)H'@I @{9vuGbWSÎ~>_}~|±~_&|?̏)O#8[;^^O/^:d6Z~#UsX=3]?ޭ|GFr7G8%p3u׷?_oY"kTuvA3ak&G;sGw%aph_0Yx/39:ǣsOrx~hptiZVk;SƏ3{fm3:Ɲdlnd3zmv4;:N,Ǚ=nL=ck=p[ZgG{N0PkX fl65>Ll4<͛?Tߪz׳&nF'x;.fnHӒ{98u鬎\w,L]E` SމiΧp 3p4_ꕋ^­qy2La8v\: : .bYTऊhbط `HuϠ)Aɺ+QBZ"g\$\GN]V^haFA a#^XSPh V9BgU9dݞ'L5KmJ9 8Dq ;Nn10ԇ.g*.ۃM4y6jq.-;PW_$:B͗|synrZ j82O~}U$cDCd?29S3˛/uQHYQ.nR1yo=Gm5<"AE+ʜarwԻk[>y\\͋/k \,O--ԝFS̱{XypM6%EP9xdO23doKt͗g]i]…{nKWOj\ e齧y'Xqܽb|axr7YFʒ:-o~<߼%0|2C#Wy7e\ @S n-ZTl7eN*'+ٺNEꝁCܕmN3lBuWn7e&>ȋs}}{c.7e6%s^H.14"˛)u!FJuKܕz2rl 4vGj.qSf 03AVWBKeaCܕZi ;h|7db[/cwi-*i;v=kL:b=]yV>w^==$DJ}fW;ଵ< sMYƾ?+Vx߮ђ꛸)uChgWklMMYNYخa@CqPMY+ƒ yHS'T+,ӎz'~w}ɄyUm&Lo  AOnY`x|,a\'>,9%@c3sYVy^/ ׈٫qvi m54&? 0((2^cEqㄢib]_PtE1 ײZs}SdyZb5anM;.I_OH"reQIK-/q0L ](dg(A̘sO{h4p5@ȓOĨM7i0īBlӵӾ˻²Z?QNpQM'u^?1*̙ $YR7|1~G]_O3E`Q%^lFR]_O(1FeICH&%?o-50K%w>Pl ~d=vy!)v?6{]_Oh|MoL쒟! V*Ċ H0uy]AߏԷF߬ * 篗 ["~oƮ/31fv%?黿mѰ'o6M,2+O'[_y|7˗|?+j-X .,}o{O췺|7k endstream endobj 311 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 316 0 obj << /Length 814 /Filter /FlateDecode >> stream xڕTn0 }WQjU,_ ln6tm݃8WbhI_5 ,GyўeZ+mzvw!(q}f'8 3cϰ0b0|\Jf*r=2I#o~- ۃu` ~/',:6p<œJ|~`WNp߄tW9- 8nvN6)5*y q&j)LD1MG횾Eޕ$jk>0M9Ԝ"h9oVS,xC'nC:e6r`Or(h?Lqj\J0ƥh#mQ{Ncg" I12BA6o=(; 4fn%]ɼ^TVҷ@ jøTVq ^0nŧ(Dh3QբQG8D-512!Qt]n0 2 ƌ"C=ǡL!m5Jv֢$sJ,e^$\HƢ~HqEsk"JwvڝG1Nȳ;ݞʑȢkr,eyS--]c7$_n18 +AqPM9}IQרj5se;$at~$rX}'T&٬??yO7/.n9Ie?zZ=zj[/,RSAGVṔbNRȑnDk!;V^ D_R!I ͡s 쬉=Bhd@s1u2G endstream endobj 294 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-036.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 317 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 318 0 R/F3 319 0 R>> /ExtGState << >>/ColorSpace << /sRGB 320 0 R >>>> /Length 1970038 /Filter /FlateDecode >> stream xO˖ k][d $pI4 ddJ%H|}2"Fe4y#3|ǯ?~?z_o??z_w߿ۿoߞſ_o_o~W?q>O1~1}#?߿__oo>~__{;oo|_ϟ}|7~?~/sc@__?~zş[?>>>̾~}s3{= ||{VǷٟ/7,?s>Y}s?K,5YjWR_J5]jwR}K)5SjZ)5Sjk/؇jiW5d߮'c?ٷj~T_柌5d/`2כkXOƚw柌5d?'{S >kXOƚ2O}{|OGܧjɾ'/_osTO~㯇jTO^>Q>U>Z\/d9gϿ>YKHD*5Uj5Uj5|YT1=~?iI*?UQ ~O_. { ~O~O2~]YO{$c ߁Oh>y[}qX~ 'pA 'tqx<|\qxB ǣ y|5>5KGK[~Z~Z~3Ywq:O8>鯚??n /'KV~ZV~^ 'YyBg $^]G:ONl/8I /^qxB 't%?%O8<dA K[[}e`{{GÇMt~Z/ }$Y>I~=Eo }ݣOvݤ(G2O6*k HU_emA}}B*Sl U_em2ʶ`Cf[ǽ2*l 8|8KPۂ%emAW`ۂ%emr^*m 8Ky嫌=ʌۂ%emAIʜۂ%eUF -XP,q(oߥ]Ftۂw{[q.o :emA߿ۂw[qxBп܂w[qS$e nA -8ruA.șGi=O} rfiA7}%5'߼彠㰼FS/Ks?%O8,KOO88>r4[[[Kz\---1lG.͏Ҙ[#ggWG>O8|8|ܒ4[[[[Kz)\?-?-?-?O$Qˈ\퓎Últcn s4ɟ|+qXfI79'5O)%Hri)qXIaI\`O%--1SyM0K׼K4枰4ּK4}5>YSS#tG.? l9l-?l^L*aaaas n1xsK{k^*^ayxsK{܆>5/-/?ܠj 'pn_21 _877}XPuY8 K|y 7p@[X%) t( t( t(PK8%%;жa|m7/(Mn;K(Mn`Cm7ġٶXQe8a8v}.m7ġLٶ# m%e,ζ@IhݰٶX n8 9vݦm7ġٶ mmm>Ӷam7qm7qmw25gnٴ::::`]FeΞ؞@Nӻ<^?8~phWr&ž@ș{z77r&ž@XPF ,q(#BnXF ,q(#B8!{znOo@Oo@OeDȞ@ǁ@ӻa7q7:Ğ@ǁ@ǁ ˈ=%eDȞ@ӻa7ï8!{znOo`CwCyzKʈ=%8!{zK 䫺=%eDȞ@\ ,q(#B.HOo@Oo@Oo O:::nXF ՞@ǁ@ǁ@ǁ@ǁ@6rPyzȱ7A7r ,q(Wn(Oo 98+{zȱ7ġ_ȁ {zK=7A = {{{zTP@BOo S7c,rŞ T=c7c,.HOo 4 tc@iMOo.HOo6=S%X{z77}jzz?'9dOorܞ@ǁ@ǁ@ǁ@ tP@ ,q7A ,q7Ayz77A@ ,q77Ayz77AyzݧwCyzK tyz7}jzzzzzzzzzzݘwCyzzzݘ7qwCyzݘ7q7qwCyzK ,q7}jzzK ,q(}jzzK ,q7Ayz77AyzݘwCyzK tcyzK tyzm7 gж{a>l~·͏y1-6?a#>l~‡.m[毺o Ŵfp-w%w|p|p w sk0׼v s˶{Uqk^ke=llXL+Ppk^Lv sk0׼skh=lk;5OayvӿcӶ{HayvcӶ{||kX6_3ƚm05Oay}oray&ayZ}[b6_66_6wC |ؼ~;ln6_ 66ߖ/&+asFBzᷘe>5/+as0׼e>lnz暗ɖۗQ+^I+P@s'=q28p~OΝ u8w,+*=+a eep Vέ%Z@ǁ@ǁ@ǁ 8P8P3j(q28q28q2xCLmep`yư2xC8h( +KX}@ ,q>ІRmep`KKX}m}8h(q28&+**7>P@ep@ep [OVo}@ǁ@ t P@6 t Pk)HȞh-%<lTXR2xC/)/nep"_%P8@ep o PyE$p^P%\28b+7rIM dYo*828I@6 ^@@v w628q28}d+*728d+**728q28q28q2xC)K ,q28A@^X >R~ɧ28g^*:Iee@IQ|*7,tVu@y t @y @ ް YXPf ,q(uVtEep`C28 *Kʠ>XPa28A@ ,q(3wV@ep`C2xA***}Φ2x2gep@ep@ee@ t t t t  T%x]&lw6o(qCylq 'l"P&@-X?8y~MjbݲMā8 '9jaq 6r&2ydq`C<8 7,G68#KMā\` 8H6X 8H6X p[є o(p`dÁnES6X Mp@p@p@p@p{o o(p@p{o : o(p{o : : o(p`dÁ% Mp`dÁ% 8|8H6X p`dÁ% o(p`dÁ% F 8H6eÁ% 8H6V4eÁ% ?eMbY8l~$O6?DZa8l~OA5 毤oˀ滀Ŵpmtet|p| pMtk^ 浖pk^K %=ln>ayi0׼6sk0׼ay ay<ſ^)=5OabZ;85OaaeaycS{|{kX6ƚ0<ſ͗i0׼ sk0{DWI4|?l666_6hr"w ?lZLasJy92|1߇ͷA͗A?[L\ 暗h}k^\ 67sK4d<w8pnM 85[&JS 'e!8:[K[b g q =o q@ q@ q@ q@  th!th!@ǁ@ǁ@ǁ 8B8B8Bvy8A@7,}v @Lp@p@p@p@pOt oX,qK82gp ),|ip -$EJ4f/?2\dp _,EÁ?DÁyo]p / @^[yo]p g\ ް|Yǂ.Jkp g?,t@~X42h8Эhh8eBp[ r֢ %t+@.[4EJ48P48P48P48P4V4EÁEJ4V4EÁ% 8H4DÁ% 8H4X p`DÁnES4X p&p`DÁ% o(p`DÁ% M 8H4V4EÁ% /Hp[ : : : : F : Fp@ Fp@p@ 8H4X p[ 8H4X p`DJ4X p`DÁ% o(p`DÁ% F 8H4EÁ% 8H4V4EÁ%hxB4|ؼb#y鉼@^lz/6=UƋMŦGbӓxAübӅMI/6}%beMw /6}eb=M/6}k~Zx1byc\hŦesϢ͋ /暟^5?/)k~^Q8lPx1by;\nf tby_5M7^; /ƚhŦ.^5!~X kn|_5!~y\݃sϢf!!~YBbSŦ~MG/61tbSYBbыM׋/6.|KbӝaŦZ/65_l.zŦf Mn^l(z1,!>lk~k~1,!~1,!~bYB$( S©8 ]8/d$!pj\8/Bl$!p7^8/d$!𡺔©x!' B%|,wR _>/ 7TeN* >B%|1P _'T >B%Xġ I%|aCɓJBġ I%|P _XP˄T8* ݎJ2' @%|aC֓J@/tqJB,c{R _8@%|p`ޓJBqJB* /t'(T8* } JBay e(2_ЯO y,Cs8p6_Ȼ9j 'ۂL9d ]&0_V4 rVP|[0_ȍs/t P|C :0_8P|[0_8P8/t+ KfC%8p6_X0/,q 8̆P|{o0_X0gCl(a6_X0gC%P|[0Ά KfCnEP|al(> ݊B /t`(qB`( :0_ :0Ά {B gC%P|[0_X0/,q 8̆P|al(a6_X0gC%P|{o0Ά KfCP|al(a6_V4 8Coޞ/%ua:l~Vi}I6?taCZL{ͷ_XW_W7iak0gyskK0׼Ň7 qk^KŴqk^qk^ki+0׼v"sk#0׼!;Xƚ:l1Aƚ.&0<Ň/>l>5O}aycS_|kŴpk^pk^I_|kO@K|gu|ȇauaaass~1)o3M͍a%bR"6K{ŤD>ln暗0׼ȋI|k^J\R"6sK|k^Jay)J@D;:sGgC-&@fJ878~R"d%r Lȁեȁs3ѳ9+g`19q9q9q9qyC-&:T":T"`%r@%r@%r@%ZL tDtDtDPIl(XXŤ@DPI%ZL )J-&8|e%r`KDdJ-&8h1iC-&8h1)AI%ZLPI%ZL ,qbR@%r V":T":T"oŤ@ǁJ@ǁJ@ݬDPIȁlY8PȁY8PV`69=O+/^+0 X~Z ,xi%Z ,CZ dJ@9DJyW%N*ya%R"λ*ʆZd ȁlZY@%r ;V"@'Pȁ%+R"l%T":+T"l%r o?DtVDd7J@ǁJ DdKJ@ǁJ@ǁJ DtDtDtDPJ)KD,q9W#V":Iq(6k3Ɓ>ERl7}8o5IqOoX,6)b@7U(6ab2gq`Cֳx2gq`Cس8Ab ԞƁ%epb@SlXP,6tSb2gq89>8q8q8g^7,}::oX,6b@ǁb@ǁb@ǁb@ǁb@Sla8Ab@k)6,q(~ =~T %پ@aؗS/9%rʾ Kų}ɁK/97m%oXƒI<#x^?8~pþ@K,q(cI%oXƒK,q(cI%d_e,ɾ2d_r`CK/9g$7,cI%:%о X}Ɂ}Ɂ<#ٗ8ЗȗB7,cI%8$yF/y2d_r`CK/9ġ%ٗ3}Ɂ%e,ɾ K,q(cI%8ȗXPƒK,q/9g$KX}Ɂ<#ٗXPƒK^@ǁ@ǁ@ KtKtKtKް%ٗȖ}Ɂ}Ɂ}Ɂ}Ɂ}Ɂlؗ|Ɂ%%f_r`|Ɂ%%f_r`C/94Kʰ}ɁٗXPKd;2,f_r CK,q/yA*9V"H%rCT"rVJ@XY+9:c%eJ?bk%r GJDPJ7DHȁnQȑ+]&T"L%T"LDtJ@.Zȁ6S8PJ8P8P8P8Pn3ȁJn3ȁ%R"8Hȁ%R"8HX %R"8Hn3ȁ%R">\`R"8HX %R"8HX %rT"o(%r`ȁ6SX %T"L%r@%r@%r@%r@%rkT"o(%r@%rkT":T"o(%rkT":T":T"o(%r`ȁ%R"L%r`ȁ%R"8Hȁ%R"8HX %R"8HX %rkT"o(%r`ȁnQX %r`ȁ6SX %T"?T"/ua:l~ViI6?taCZL_XW_W͗wik0׼v,sk0׼+sK|?5Պ\R"6Zay-UJay-T,}\ڦ85]\ڤx2*cS|kJŴAqkJ滜ˮXT"ƚ0<ȇD>l=5O%ay*/{ӶayJצayI,5\R"pS|?l:l]L*asc2.Tʇ7\ fas󰹝|Tʇas'^w1R>5/bR)暗J0׼Tʇͭ\R)暗Jkk޺@UK=sgC%w̓uɁs'pn~wJȓuɁs3p dK|KuɁl$X87?KnuɁuɁuɁuɁuj/)q.9q.97^R@]r@]r@]K d%e]K ,q^R O`%8h/)0KX ]r %8h/)A{Ij/)A{I%K ,q^҆K ,q^R`ٔ.9q.9q.yC%:%:%'g]K tKdcκ@ǁ Kdwκ@ǁ tɁZX~ڀ ,xml L`k&Q7L`̂4"B#r(4"Ϋ*"#r {6"o(#r༪8l(q Ϋ*jU%ɦ8=ZKg;TMq 8Hzf Rz8PzȎǁ8q8}[KJ78[KJJ78q8q8qxCIK$=,q8A@^bXz8P]7) KuqOtTM@.Ł>Q]A:}8 }8':}8ġLY]Ł%e@S]XP.8;K.t2jguqT8y;8q8q8ЇS7,wV:T:ToX.@ǁ@ǁ@ǁ@ǁ@S]aƳ8A@N.,q(3yVpÜ8]@^[zǁ=OKy}lq ,=P@^[z"Ky_feh4)NSXzӕǁ%e pǁ%e@,=ް YzXP,=,q(Eteq@q@e@ǁ@NZza.8q8+KJ:iq@q@e2\dq OWoX,=,q(E8"Ky8ġ Yzǁ%eIKpǁ%teq`C.8+KKp Rz8Pz8Pzfǁǁǁ"Kl8q8q8q8q8K78A@,=,q8A@,=,q(#_fq`C87KKȗǁYzXPF,=tI87VhuqT@X]9cueli95 5ԈƁ!zP8S#Fq F5t k2 k!*{P8o(q:F::::Lq@Lq`Ɓ%o(q`Ɓ%8kƁ%8kpkX q`kX q`Ɓn(kƁ%Lq` k2ƁƁƁƁƁkƁƁk8kƁk8k8kƁ%8k2Ɓ%8kX 8kX q`kX q`ƁkƁ%Fq`Ɓ%Lq` 6sCCbZ8l~Q6?V6aSl/\Z85e\ZXL浇qk^[浃d6_{ƚ0<LJ7^i0<LJͷ@͗])=>5OaayJcSz||{kX6_.=\ڲ85\ڰXL ^qk^X?\66'6_'6_6.&aUaMaaaa-b)6_6k ۴$R>lZ>ln6_6_6_+6*/&aaysKDʇ%R>5/a]aysK||Mpk~IS܊ Җ86"T|3Ȱٲ8Vl|3,ˁ<ٲ>9L2v,>T,έ@fϖ@v.lY,q"T Or,8hjAZZZZ7"T@r@r ,:,:,:,oE@ǁ@ǁ@ǁ -ˁ%Z iϖ XE@lY,q"T`e9APʲȎ-ˁ%Z ,q"ԆZ ,q"T`KKXE@ǁ@vAlYthYthYPP-ˁ-ˁlڲZ e9qeyCYe9qeyCl(r ,_Vn/^+7j&rX~Z d/֖ rXr -ˁв? -ˁڲ޲-ˁJe9pު j6e9[[筚 UdӲ-ˁ*ɦe9]\[KdYd'זiYthYd;ז@^bز8вȦ-ˁ-ʲή-ˁ-ˁ-ʲ8в8в8в,ˁ%,8ȲX r o8lY,q(qH9o)Fr_(R"@aPCEʁ>QaγH9Ї6àH9g}hH9gKʤEʁ>;PXP,R 7Eʁ%ef"@pSa۳H9A"@0(R,q({)(Mr`CHyA}Hy2gr@r@e"@?)Rt(Rt(Rt(Rt(R 7E?K$R"2grϟem]qLų́}9Ceb?s 0g|Lgų́yCyl?s ǒgMsRfKZ?JX́<XPƒgް%XPƒg~ X́%e,~2d?s m3:3:3oXƒgtg[$yh9q9o3:3:3oXƒg,q(cI3f?e,~2d?s`CK96KXXPƒg,q9ġ%X ?s m38$yh9ġ%ϼ ́́́a888ϼaK9= ðyCKgt~KgdK~2,f?s [z38a1ҳ9ġ Ȗ́%eX~@g,qyA9+`?s [3I?s gǵ933oX&>>=\ɜT9+6;rRf@W΁nEI{9*6;hvt+fiv΁nEUt79Эh{yC݊9qyC݊9Af79AfKdvPf?\2;8pX s`X s`΁nE켡΁%2;Ms` V4΁΁΁΁[*]T0IFs `t @-́[*79ԭ́n`tϘ RR0k @`P )K`,qyC)K`,q9M2*79A @7ɨ`,q9A @`,qyA*_T0/y,aln/U\Z855\ZXL;qk^浽dT06lƚ0<̇͗Zik0<̇=Y`>5Oaay*cS||{k XT06. \85\XL浘qk^ \yh/og>:rfb*`1}:a%ab366w<ͷawÇWËI|?5/=ay0׼̇g>ln暗0׼̇ͽ'9pnֿ,ds{PNB es{ [†R0fs܊ jd`|`[̗́\XX]@`,qЮӂT0:T0:T0:T0o]@ǁ @ǁ @v3`t`t`t`PŃ́j) +KӞj)AN>g> Á>>aK}8ڇ}|}8ڇ}^}8/ܴ ˔Á~}8ġ>aV}8ġ̫>WXڇKКÁ>>XP&l2f::DGe@ǁ@ǁ 4Á>>8>8>8>8>WXڇ7,m8>Á%e@h~%8Ђ8r&@[{*0{y[koq -P@[Y{y=eoegSGCY?J-Ł%e ,Ł%e@-ް[XPfy-,q(<goq@oq@oe@ǁ@[a屷8q8={z~koq@oq@oe2coq {oXfy-,q(<8Y{yܳ8š[XPfy-,q8ġ[X oq {8Y{yܳ8ġ[ ŁŁŁ~[8[8[8[a屷8{zzzzxCyK-tyK-d3y78@{KlŁl[XPf-d32eoq 8[ Ł8MO{礷8r@[1{݂XxCI9RbqԔrR %5tR@XjƁ)8ХIqԔ/Hq ',5tR@ǁR %5tR@ǁR@7(5PR@ǁR@ǁR@ǁR@ǁR@)5t(5PR@)5,q8AR %5,q8ARI78AR@)5,q88ARI78ARIݧxCIK$5tRI8}jJJ77ЍV~<) tg@?O~<)P<% ,S@7Z) ez7ЍV~77_o8HX 8HX o;Kn(o`d,QX o`dnRX ΍eI{0a-_gieC-_Q˯O(ZQmBpP@vHd@=j#pP6@6@=m ONm}8KQm.j|7|]Դjo@jo@o@:::n(o`l%8{|Kd]f@Ӭ|u7,C`6%f@yi K>ͺ~ɧY7o4-f Hͺ~KY7/4f2fn_ iݰ̇٬X nϼ4811u}hY7ġ̊٬ ͺͺͺ>#ѬaY7qY7qYw2:fnH4:4:4:4:4uf ͺ%2tEn`C'Y7Ч+u嬀 7vrN@^OPiQ;y9c'rrN@ ;y7,3g Z?J>[茝 7ġɻa7ġȃ;yK茝%etN@ t tݰ87c;y7,3v:th'o@'o ߌ t tݰXPFg AN 茝%etN2:c'o v8;y77ġX 'o`C7AN@ ,q(3vh'o`CwA:y:y:y77q7q7qw2:c'o &v:t:t:t:tob'r8Ⱦ%r86%rn('o ۈv8Q(;yF7ġB6%eN@ t( d_@Qkrs95byn -]CrY~ 䐆ӏ! q77=cq9a?n7+~@i؏7=cq73KgL?n@?gL?n@?ndn(?n@?n@?n@?n@?n{:n(?n{8ȏX ?8ȏX ?n`ʏX ?n{8ȏpɏX ?n`ʏX ?n`ӏ%gL?n` ҏ18 88X~C2 @i  q6mDg7q6_4Hl0hPgKd t߄ e ,qq6}gKd ,qq6mDgKd]`=l6ξ0_lz/6=M8bc|)_lz/6=aͫ/6}]bS ŦaMc/6}}bw؋s/暟W ^5?g_ljk~*x1l}y\>msϻͫ/暟 ^5?k~^" .;^5싱a}yyX0ξtbNja}1@zaCZV^q{!d䍽W 9,#o셼7B{CE-o셼7B8{c/rF y#ol`Ya^;K̂%7B%ef(q(0^XP`䍽ġ X`䍽ġ {aC7B1卽q7B2#o셎Z2#o셎)o셎Z xc/t ,0^XP`䍽GLyc8yc/,q(0^#8yc|Ĕ72#o$XPFl a&2,ooeF@^^+Kyh{i gSl/ 䍢`4 ^EK9b{醲F@Λ^+K7,&=2ob{_~_?x~8yġ̛^XPMl/ ,q(&nXMl/ ,q(&8yKy4q4qt2ob{i@{i m/ݰ̛^8^c|4q4qt2ob{i`C74ZK7,&8yKKʼ<^XPMl/ZKKʼ<^a74A@km/ ,q(&Xk{i%r#R^''bj%I`8&H{i@{i@{i _km/ th/ th/ th/74-lyl/PK^X {i`쥁,=m/ ,qBKYz^XPN^r@8G4q|@lf4' >kF/ ֌Kf4' kFݥR3ȓ ֌Lvafؾo0ܰktؾqrzؾ/.ye'<Nz<N:loye'ƙ4p:4p/.ԍd0l K3{ 86;4p_2"6hl ;@@(68FH *m ,qЍi th th th P7BHH٬@8@8@8@zn:4:4:4^!lZl ,qЍ@y@zn8FH _?m ,qЍ dbi`n\(i HKt#$A7B.ԍ ,qЍP7BKt#$A7BH@8@8@zn:4:4B th d+ii@24qBdjh`OilZPG4g dwhh [\{DGByDG4=z'sKh :{DwN@tz dUgh j{D*G4=P@v::^(h@h@h@8#X h`Lag3O0/3 tIg =Xyt.= >Pc}؃u%}Hg@gKO>>/3Х'uunyP@ǁ@ǁ@ǁ@ǁy4 tP@}8HX }8HX g`tJX gKO>K IX g`tJX g`tnaP>R8Pyna) t( tDig@ig@igk'J;/3q3еJډ@ǁ@gSy%v=I;K$ tDig`Rډ?bo?a5lao?aG5l԰sLI9l3Iak kt~?ఽ?ϩt|~g^yut~g^a{83̼a:1?3<:-?30ϼ0ϼ27 0<8ot8~ga{=l_ S93O}43O0<0<8o> 0ϼ0ϼ<:>3LbareؾjoٰIUsؾdfyؾR晗090ϼĜ<s&10ϼĜ<s[ay9囁{gwƁ{7B];@囁{IwƁ B7if+@Qq `f 7Kt"oo8Ło:o:o:o^k囁囁ld, t( t( t(߼P:777/Խ@64o8^G X-߼P:Kt#o8^G`u|3A:.|3囁%Z$ ducf`u8^G`u\{%X{囁lr, t( t(߼P:77YXyu:oʹ|3q|B7YiZ8PXcOձ@vv@•@v@2pN9 N@64v@2i d;ir@2888y%r@8X d KK; Kt7(q1ir t&@Ar&@1ir &@1oir [&@,mr ,q(Slr &rD&@ǁ&@ǁ&@ǁ&@mrV1q1qrd&@.mr thr thr thr thr 7)/,Xlr ,q1/498s,69ł&r&@XXPN"x; 2쀼P@ Y; B9 y`@ _!Sv@_'yt ,q( dod`C9`d W ,q(쀼P@GC9p~8 v@8 v@8 v@^X/XP/XP/w; : : /, t耼lmrlmr ;M!%&2Ceoc`²1P &@?Rir ;M%eoc`CX c`CX c ymr ,q(kc`Cx MMM|1q1q1q²1MMMMMlrP&YX c`Llr ,qBوX c`L#%698#%698xL%29O讆& er yx&@W49^(c /&@^B]] 691lr ,q1A&ir thr tuIc@291q1] M8888&@ǁ& er tuIc`L%29^(c`L%298xL%291A&L%Zdr tDc`L%29BKdr t Kc@c>@-x|j=1Lc`|%1<1A>c ty|qSDYrQ&Yq)?agxdOp'Y㰽7I6l [a{7l b0ϼt0ϼdog^y%kއP0ϼ@0ϼ<: ?3<:(k85S8layaggayKayq)kwq)kƙqؾ9Lg݇yu}g^܇yu0rayp晗q~}ؾayqgg"a0wWoY7Dq)+aeؾn ɇI8l߭ 7E㰽:a9wl!7*<9&㰽>晗qg^"$r晗qg^"a89&/1p8socޮ]{e1po8pߠ\(c _M-r }X]ld,r d3cc`BM"݅8"@ǁ"@ǁ"@ǁ" u"q1q1mEEEEZG@c@c@ㅺ"]۬EZG`u"],qе@998ZDž9ѱ1Oß]P:Kt#A:KtB],qе]t(r dcc@c@ㅺ8P8P" u#q1MEEJȺ"@ǁ"@ǁ]FYab@qv0,c0 dEaa 9; /0ݜl0 ;,0 t0 t0 t0P9 K0 ,q0}%X[1Rp?tCGaQpxa9aaQp?trgaQp}{Ga |8rga`C9aa@a@a@a W^XxXp8Pp8Ppxa9aa ::::kC8Hp K Aa`C9ba:(8 ,q(@dlՈ\XՈlF j@nF j F 1O}\XՈlF ,q(F dm5b`C9`5b tF ,q(FPj@6GC9pġ1ġ1 ՈV#8V#8V#j@ǁj@ǁj q/,p ka'@~ɷ0LBل[pX&l-8l-8 ,P60F ka^(a`Cل[pX|ɧ² 0A@ZpXP6ׂ @   ka@a@a@ena +     YYpx%0A YYpX 0A /0A?nD-8 ,q0qy@ym? @{0l? t;Iᅲ؃퇁n'i? /0$퇁<`aKd? ,q0Ai? th? t;Ia@ᅲ0q0u ~8~8~8~8~v@ǁ e? t;Ia`쇁%^(a`쇁%8~x쇁%0A쇁%8~x쇁%8~芖 e? ,q0-퇁퇁j*Mᅲzi? P~f e? P~f@5~f@5퇁~BKd? 4퇁%8~f0퇁{~|b8lLa?a1l8ưb8l$0a{wN۟HҰ7{ѰatERog^Gye1y<:>3̼<:>3S<:~0ϼ/pdb83O᰽>L܇qi1sayZ qi1orqi1ƙpؾƙpga0hqayfQt}g^؇yu}g^aarg^ׇye1mǶcGA }>mǶcvǶbp~ccc[)V~3g~ ~lk?3?u3'?3g~?*1~Pce ~l?ƙocy?7mm1<|7m1<|7m1<|7m3l?a1~ct5h}rAΰTn ڃ[UT*Bm Y*4_R>r"|ŌTn%}HE჎T:P>8@EwxqA*YHE჎T:P>t"|qA*ǃ,o"|a _f" /}X_xa ,q/}*YHE`~?nu"|a`~G~ǃ%d#჎T:P>t"|qA=R>t"|T:P*vJE჎]_BpI( %|-PAK( . w$|nyI( [/ ;P](`.|aK`.|8wI,q PAPAPAP0PAPAPAn$|ߤ |ߤ |ߤ |ߤ |{3 7) 7),q(%$: >ݱĄ܀}AO}z{ ,{L®{{LB9G E߃ E߃~0B+f(Kʙ)7 )d +E߃I wRA.{K )wE߃6K R=}x4wE߃\JH`î }8}A;H`CYYK+d {`CYYXPVGϏ>?J(q(+k),q(+k),q(+k)䫴}:P=8@XVR=8@Xv= ރ,;W,gٹJXv=X>ϲshAV#=D{hA? E{hsh-e*ރ|!hshAK`CٹJ _%{qhAB,ރD{:=8@Xv=~Ht {qhAI,qE{h]`.{D{8좽] R,qE{8좽]`.{a=X"矲9AnNe -|rs* ߃ܜXw ߃m ,q]dbC]`o\(C] Kt}#A7.-2.A7Kt}#A7P8P8Pwo:4:4B]th d?cC]@C݅2.q.qg.g.u=s,z dc܅g.}=s&=s,} ܅6} dc\ N{Kt"A@6@8E`YH\aL܅0=s~31=s\3& =snB\3ȥ=snB܅倀=snB\`C9%`܁::rqj܅em!\rm!\`ݖpI[X~e-m!\NRG pJ诓QC!\5]XvQsol' / Url' *@;P*@Vri\ z.J.OY[%ݕUr?MK ,qJBK ,qJ.UrUUr|J.ġrP*@V8UUrUQšrϏ=?J**ʵJ.ġr d;i\@\@܅ek\@܅ei\?OFg\`<.*y]Ur]Ur,HYpX%Ur,8 * da\`CEZ%XPvV8H%ȗpK.*@[%XPvVH\@\@\ _­ t t t" d'e\@\@\@\@\ ;).J.A*@vRV8H%X \ ;)KP*@wRT8H%X ܅R8H%8S,Gr r@.G- r % r@.G- t?D ݅%t<+` ]RJ..g, ,q.A IK$PIK$ ,q@JJ]RB8PBw$tK) t( t?D ݅::::..qBI]RBX ]`$tJBX ]`$t%]( ]`$tK) ,q.& ]`$t%]( ]`IK$ ,q.A@͔:ݞ % hRBѤ.U%tJBѤ.U%tMJ..U%tMJnO ݅zB ]) ,q.A@W]8HBw %t( t( ܫ'?er;la _7:lS2$?[폖a{k8l/ ϕac0?T}2l (I&7lo·yul|g^2a{i~-3/ܰ/atV|g^'ŇyuN|g^er|g2ay$ƙLnؾ*oq)ƙLnؾ$ƙLng2a~hg2aytxg^Gyupg^өay׉ayayItPg^2ay~erxؾ3LLn~9d^adؾ;hm؞a{ԇ߰|t|za{=l=&ڰ{a{yud0yayYtTzg^yuLzg^EkF{gayֆ$Zƙhmؾuq)Zƙhmؾƙhmga2ggayֆ{t$~g^yu~g^Yay9ayֆkayt}g^ay~Ekbvؾo)hm~b+abؾ;La{;locan~p&ڰ}-1l_ wti7I6l_ ayֆy%Z;La{:3/0μg-> ܛ u>p 1Z|77{oJ|ȗ:=pBEf^d5z _,>P"3pgY`HY@Y@Y@م:g8P|8P|Ȟ@ǁ@ǁ@ǁ %> t(> t(> t(>P{X|X|B~,q@ Z|X%~P@v!8C`n?\%X׭g%X%8P|~@ǁ@ǁ u!q,q,UgC@Y .,Egg%p eU/ dg}Y ٮX_ve,X/ db}Ynu db}Y {wK YX_Ȋ@.֗8H_Ȣ ,qЭ u+@0,c.,y0,c0,M6 w݄n@7,K݄n n@7,ġl6;n@ǁn@ǁn o[vrj YK Y\QBݫ%d/%dF YK YK مe k Y kCY q׆@6rUdCY }6](CY q,ݸ ePȽ em( Ȇ e%28Pv e%28P2І Ԇ@~ᶡ,ġlNm(P@68ͩ eQP6G9=?JԆ9,ġlNm( dhCY@CY@CمesjCمe g Y _f,! ,۲,n @?j(! ˌ%d5%d|,QC Y -! @XBvaYYBX Y -! ,q(k8Kl Y`CYYBv %d%d%d|[,q,q,q²,o˔::::3C<2 Ow<:>30ϼN6ldh7}ð}plbruޭ7 a {a{:l_2 Wn ~J0ĆamؾlG2uؾZ8L6am &63/a +aȅ:ȏʰgP/a{|{}a|2,p/;p//_^;c^zʰ u=L%ʰ|?ʰ@ǁʰ@ǁʰ@ǁʰ u=q2,q2,EaaaaJ8P8P8Pv*2,Aw"he؅X⠻ |2,AwKtW!=a%pa,C ,q]UPwKtW!k+KtW!AwKtW!q2,aaaB@eX@eX +.]@ǁʰ@&V:T](eX B+*KtW@*YX ʰ@(VB2B)٢Xʰ@yV]3,S ,S dbeX {D+$ZX eX D+Kt?Ag/ Øʰ@? l Øʰ@? Øʰ@72,M w*݄ʰ@nq w*.,{+݄ʰ2@**Y+)+W n@n@͢+)+)²/+);.Vz+?- dgW >r1gׅ+9p[u_\YȆ@.|, hJX W`_JX W`_[:La{5;laX ۧj>TV0O԰}4l/d4ltvdw*v۟/jؾƙjga>0q"ayqo!qƙjؾƙjga0yu{g^织yu0ay晗j~{ؾzבay>L繇yu{g^aayUⰽXɍ5l{0lo%ⰽR/k^ a{>l- o ۻҰ}4lI᰽C?LnaBi^ ˍ53/7arc&Ņ?m ۮ x ?FYHk u"=p/9!6r5F^2r=FS;SP'<:z{7N.ԉ2Hk\%:~ \\\\Dz@#W@#W .+q+q+qB]"dc#W`.F u A~j#W`.8A KtBYXK%Dp.8A`.8mq%]",q%@ǁF@V66r:4r:4r]K\\lolPY8u\.m th ,q% d5hV {YثuZl` dcV [B{.A@1j@v1jW+Zl! diV H{Kt=A/A S@?5 S@?5 S ~@:+.,[~%@:+_" :+q:+q:+q+ R  @I- tV( !   ., S #_A۱Yَ`_A۱YَX؎uXlm ~v@Ͷc](;V~?XlmǺPvX%c8ȎuX%c8Ȏzv .v@kc8]Xʎzv²ڱ=}c^Bٱ=}c^+G;օc^+G;V5ڱ.+ڱ]юX ;V`Xh ,q@ڱ]ю8Ўzv - eǶʏm*ďm 8? Ƕc[w-ے~Mjm-Ƕl Ƕ c[>lW6}l Ƕ|lk?ۺacy(>C1!3A@g~?1lvcy(>[ޏme]Ƕjc[ض+>ZJmǶǶǶBkخl~lu|l[u|l{yVe}l+vʦm[mʦyweӰ]tQ6q`^jA4Qn/nσ[ 'GJ[ Go~&xp+:>V<n[g'q'j ߵ5{w? ߠeza?=اt`zq}*p?ԃSnYnzq}AاwԃS:O=8>S8'[S KU[K8'd$ԃ% > &٧,qO?XⰟO?XⰟa?`% K:O=Iاt` O?8>ԃ,d O?8> (٧t` S}AKaz-SG}A6IOY&> $٧d)T~AvJO=Gd$ԃ,dzuSP}AVO=RUK8ُ>BgR0A?!zcHR0*pZ=&Z=&Z=&Z=&Z=V V&Z=&Z=XPDHj5RHt z+dItV zW zdItV z_ zYAgRRAfERIj KlIWIdk'Ճ@Ij 7PZ Vr#Ճl$z%Vndk'Ճ,% ܥVx4wՃ%]jK,qإV8RIjXVZ=NRKʊSR]j [;I,q(+NIˊ<ġ8dkw`ÿ-ey~8QPVՃl$zqAR┚*,zbKjz 5U`YlIM PS=ȷebKj,5Ճ|[A?zoRS=gT @jؒ]M ߖzġ,zoRS=XP[RS Aj T:PS=8@M5U`YlIM_zqAjtzT]M !,qT8jIM`î T)?9d'%ԃ\g! WrH=:CAz?9O]! WrH=՟RCAzu Re'ԃ%CApH=X;,qR8KvTzawH=X;tԃ%C!`ԃRCA!I8tzu RCA8tzqCApH=8!;t; ԃ%C!;,qR8KvTzawH=Kvԃ?!`zawHKvԃ%CApH;,qR8pH= =}pH=CA`pH=}pH= =}pH݃!݃!;݃!`ÿ-CA`pH=X;tz=R:pH= ! _N԰:l7z^C;l/ j\ۋa{^q0٠Q'u^Xa{]}lP3:lM0La{XYwjy,&հ=+aea{Bj~pgay:>晗jg^ay=Ljyuvg^'jyYay=Liyuvg^a aO5lﱆ=L~aa{5lo}}qTj^^ ۫abcؾWO5loq;aJc>laS ۷ayy-3(?հ4evWr _(p/9vW{iFG u?p/9vW{iw,:dX; YNz"p'8Bqt:^z"p'.![u:p'.б*q*q*qBB.*q*q*qBdf=U`B,qY@S8,~`*Ag/*T%:X⠳,~`8,~`_%:X⠳Tlʬ t tPgYYOu:S9*qBY[O8POX⠳,SO*ŖTe de=㎲FnIdHߘ ^OX|QGvzD^Dr`*6իpUeT b*\AիY e*8zzAUBWǁի8Nؽ6/Ϳk1v;k <摽6Խ6yZͫkڼx~Lݫk^<}Lݫk#xͳ{u6H^wlum^8__ͧ/}µyvm5?׵ڼJk^ ]A5 k~ yKpo~+W8/yף\D8yt+ A6ރzX<\ [pC@W8oRy;$8uP\AԣգգСw^ֻB/Y d]oY d+^? AǴW:B1uPi i igN{ǴAB:.vI+%Vq`I+%ABX =,i^puP%Vq`I+8zX cI+8uP%>eT qPIJZ2*i8.㠒AeT fI+\A%/0eT qPIJZ2*i8z͒AeT KS%p|z bf+uV떙,f*ef+6-3[ bf+uVm3[ }Vm3[2l8(zV2[2zVq`f+rlfB3[ǁKfAݟeh{6Vk m]ڼ6/Sh6yJSh|rl6?4{׼k^>eWh뚯yO]52t'#\cp]px}2um]1u?]޵y}m>yLkڼ6/G;k1{kεPڼ6/+6\7|ڵyvx׵ycwB D8y2u+WR Zkp^MR y:Ϋp^J79 Ϋs=gz<\~N3\~N=Hr:?$=H.cy+z<\^ &W0ἑ =s{B\ǁ}sB\ἑ<>Wq`+8z:>Wq`+8z:y#.㠗e2B5\2z!}p.㠗B. q s+\A/#8ez!\A/#8eppP/#8epz t+8z:B\ǁ}KAe>W͠\ǁ}s\z q2ƒs\kr>AB r+~}z?}kBv:'Cnw ?s+}4eߐߐc+|2zW\ }oW sc+\? A>W:{\{\(}:>W`eay} =a:;a\9rOV2B. YV2.Sa,s+ v+sX!a`*ŠūpeT e*\AūY e*8xzABWǁūABVz8|Nycl^]W7zO6[Լ6yb}kԼ6yV ^OZ̓zmԼ6?xm>d6\G %h o#[ǁAEB#[ǁp^Td+80z =lTd+80z =l+ q+2z!^ёprV^qq+!7l8l\3:.WeA.WeC^q8WeC^q=l\=:z =l+ǁV-#[Cq`d+*ґAEB =l8l=TVݕSY r*+˩ki꽀[,B u* u*+̩K\B/ɘ u*+\ۏgG3(z;ГV}#[}lTd+=#[}llB SП,pd+ \U _ Y _ Y y0W/,pT+ \2n6ˣH.pְBٲOB+^\9xrgUB. U.H^\q*;cB#9xrgU#?4 qP*\A ^8(x.U%W qP*pDK2eE.'^8,'^\9xup9Q|~Ya9Q|~eDU%Wǁ˙UZt ٪˙U٪+V\t ylUgB'U!W =VNlL٪peBޯ8[.㰜8[٪pqPAfBUǁ٪VfBUǁ٪˙U!oߝ =VfBU2!r7TȅR!_[pB*B q'*!rTȣ"'B3!up9*rB*QR!NH<*rB*?R2QR cB*\a9*rB* p%e qPBR2JH8(!.㠄A%e qPB*?R2qRo =LH&B =LHTB* T?^`LHTB*80!z =LH&B =LHTB* p%e:TR2JH8(!uP p%B qPB* L p%e:TR2JH^ 3!uP p%Boa qPB'iv|zVX bTgeг2UTU =+,SILzVX:2UTge;)L.2U-T2*S8LzE2Uÿ8LzE2Uq`*e*SBTǁe*SBVzqxB!4Rxi|jum)?g6Pg2ju?VʵikicZ]<)1!6cZ]6\`}mks1UWꚯyUW1UWꚯyUW|淃fڼ~LUkAx]jummU 5|,W]rյy[y?U\umT^|{m^R>rյycm>6_]2yYU8,W=)jum^.^󵫪յyxm^,V!^WyY΋p^,] eI8/y:v*%6B8/C^̮Z$!UU8CNUs1 W]pz<\~N=f~P7釼pXćC^a}8,o>rև.CNrX.?L/9s|&xg?\~|&aI3.c?>ᰤpXR}8l?9=saIaa}qsXzև9ᰤ1aa}qsXz/9=sCÜ0~q|#ag2.0ᰳqk>\a|#e72>9q1!a}FƇ8odFƇ8od|FƇ8odFƇ8od|FƇcCnw|#C_0>89D72>89DCÜ1!Wa}qsX.0~qa[>}@}пWr r/eCCP v~q|C_ sC_&sCP}eCCQ+!W({b96~q|1_ p9&؇^]Mpn}豟`zu97>rn; WsCŀp} s0COa{ `؇0~q }s0Cmak _`؇=a.0>\e`؇g9CMC~&!}~q}e jbs5/.Gb}>25؇<CMCQp&_cMeƚ؇85_kb.0>\a}e jb8>\a}e jb.㰜&!}rr/.'}rr؇\&8,'r2yB0>\o.S}oRr<`؇9!o, x yc`؇@0C 7}a_\ p1!o, px y`؇8,Ss0C 0>}6az`؇9ar<`؇F0,_rOׇ"!~}ȗ![d>~[d> _>{~rlׇi~}rlׇ^;ٯq_.0f>\a~p12ce/ٯq_.㰜!_.㰜!2_zׇ^;ٯ=sׇ^;ٯ=sC/.{~}q_z4g~q~}q_z4g>8ٯ__z4g>\a~}Ø12ceׇ8ٯ__.0f>v~}Ø/12ce/ٯq_.0f>v~p17seW8g>Ft~}qxb q4x%< !l힆_Xlvs5l}մi.ec5C 6W>nXMմi_c5ejڇ8մĞi8V>\a}%\Mp2c5C/jڇ8մp}%\M0W>{XM0W>8մ=s5jڇa}qiLg}qizjg`~:M6^_6=nڵOzm~8O1uӮSݴkk?-YS7l|4vmGoʵ!iCk'|ͫv׼iv׼i|ͫv׼iks7횯yuӮW71uӮW7횯yuӮͯu]5nڵ4|6xm~yvӮ!vm>$kݴkȏvm>ƟݴkM6oS71uӮkB|sm>6_ُvm>ٹ6s^9͋k|i8=VSڼ{LMkk5ծk6T yeΫp^Nr:Z8vy׸j9Hy9BwS-W; 9n7yr\AT6-Y=.?E<+z?\&[my8opl=[pR6d3zM -Uf6-80zM =̦꠲iǁٴlZq`6iǁٴlZq`6iἴ q{)2z/%^JK eq6-\A籠8轔Ege^AeB.M q{)2z/K q{)2z/%\A{)2z/%\A籠ʦ\<:z꽔W{)ǁٴlZi^Jq`6-"ٴlAeBnM =̦8(vPٴOߌYBd]c u-kl!7> B\ wp-WkB.\;7wB_ 9rZ}k!wnˏAu.?޺EMЫK6dS-dzuɦZ%jT `S-MГͦAuzu_u kl~kl~kl~vP5Yc m5Yc m58klUc Pkl28 kl?ڃ qp-:Tp-:qpr^ZB\ ZBn\ qPp-KSpe\;Zk2 \#:vPpB\ qX\\ \\\ qX\\ qX\\ qX\\;:.㰜:.㰜>x_xi]\_<lrvZCn\ήT =el=pSrvZ7BzT y{Z)cS-큛j+7eT y{ZrvZȻe7e+7dS-8zT yZq`S- Ѓ:arYuf!ץN|/ R'B>A%BK0 yY;W&.!N<q, p R'e'B/r0 qP,+f0 qP,\A p%*a.㠄Yf2JT,\A p0 af2af2afǁ Ћ&Bf0 a,80azÄ0 YEf0 =L^0az0;YEf2J8(avP p%e0 qPf2Jk2a.㠄Yf2J8(avP p%e0 df2J^@2a.㠄كLyi2az0 =L8ype g.\Ay g.80xyÅBpǁyÅBpǁySÅBp q6?r#ǔ6\Bͯ\6m<ܵWqm~qJ=ܵkk?Hou=ܵoѵk ]|+-r|+-w׼r|+-wm>½6WZySZyWZ5^L]Ogw׮gZ|@rgZ|6um>1fͧs3;w?;s擈kٙ{Lٹk)ĵ|$wm>6?qm>xLٹkiܵl|4}m^^q);wδ5_J=ܵ]kkǜ y:-kpޒAKip^ 4 B^N˅Z^ Cp>43p^ 4 8-r -iipZ^9w ;sp9A p{:w eD8+Ǟݹc\8B=sǁݹ\q`w._՝ =΅vBs՝ =΅vBs՝ mp qл; >wweNs2zw'\A\>;.ww;r\ qл;N qл;2zw qл;2zw'8Uw.VݹWzw =΅vB.(ݝ;\q`w.ݹAuBݝ =΅8;wPݹ7T.rQ\GB.t;(]Ƚt!҅m;rk\=sٝ \-sUlvBݝ luB/ٝ ~q^G/ل^ ?d.z&\T eAB] e.Fź*օ^Xz#b]X;lBuz޻?nBz"\E?!fB{c/7fB{c/7fB{c}of }3l_fB0Y gB0wPپ\8.l_}2=l_O^gBna lAeBna l_O^g.g\q/پ+\q/䡄}2Y}28(r l_}2:wPپpeBna qX u/\a9vrdl_rdl_-}#kge#kge#/>x.Grl_C\ =e=prl_gB8zʘ y{l_)c/큳}?ge y{l_rl_Ȼege?d.ZQkaGB u. ҅܈:J)gG*Jr#(]SGBߗ1Jwp9%q.)t!}҅8,$҅܈:J.㰜8JzM(]t;(]t2҅8(JwPQpEe qPt2yt2\ qX qX =҅^S0Jz;(]5tǁQk F.e҅FB);(]q`.Q(AEB) qP.\AQ҅8,҅'0J.:Jt2T. LQpEe;(]t2҅1JwPQpEB qPAFB =҅FBtǁQ5F*Jz ^c.80JwPQ5FBtǁQ҅8(J.(]t2҅8(J.(AEe qP.\AQ҅8(J.(]m3t qP.QpEe }(]t2JQ(]F*Jz =҅F*Jz =҅FB0Jz =T.\AQpEeq6,˓.'w0wm~L1W޵»6sm~B|=޵6_rcܵz6_n)wm~.|]/kvmЮo=޵okk|+{|+w׼{|+wm>6W|yS|yW|ߏ1wm>q6?__һ뚁kac ]ĸ뚁k9۵ػ64^5 ]OUgg`1'*kɵ01'sk1yq|m>F6*>5;{|*{Ckvػ6|s`/^8y !/ayp> y;+z%@$yߐ^Qp`/7!z9߭9߭98w?s.ëwy z \W"r἟=^fy!zx =fB3yἐ;L^q`&/80z;L^q`&/80z;L^8/eLޕ s&/\Aʄ\8.weeL53y2zW2y!w8]p+sPʄ8]p+.we]p+.weB_eB =/qLAeB3yǁDg*z Ot&/80wPtgB3y2T&}z!O! qC/ ;[7ru^ŭz!ח_:rL^OfB}2rLA =gB}2zLA.? S%z'\~Nr^%Kxr^%KxW,T /%ЫKB=KxfOq` /8z_z7{꥖pRK\ z%\BX;ZJەZB_ ~ː` ~ː` ~ːAC1$v!з{ cH0s# cHB!qH0\A!pdH [!WnnVAC ĭHn\U V`U[!? V`O$C~7q+0KSpe %a}ǁݾn_q`}ǁݾn_q`/䓁vB} qP/\Aݾpu.n_CCvee.OoyPeM{xmUk1R6?zm2~L=kk| _k|m~1WkW=k;x|ͫx׼z|ͫx׼z#ksyW1Wyͯ+{擼k)k6=k6]O=k 5^vS|Rsm>6[^͏\iS|bym>6_kac^㼳x׮zxm>\kW=k*ڼI1C^,7|v0WgI烕ױ{:;W`%V1WgI9yr<Wp/j.#y?뺃z; \FPo~"uἽ=AVy]zY =.zX;_q`/8zX;_q`/8zX;_8eK^y uA.W^B<\ q+/2z%UprPU?WeK^y9W^eK^y q+/K~2z~~8ƒ 8,'^rz e\/ qXOx~8U080PzV( كig(<:P{!;PzV( =ށг@a\Nk( qP0x C3!3 wf0nܙ7gC y`U3!vf2!W 6Oĵy SgyuWgyug5_ ^5c ^55_ ^_k1vGykcx]3xm>~Lkx]3xm>6g?t|8tm>*6_w6 =|m>ۿ6/GB  N|Vڼ~L}ka5_^W!l yΛp> yyOΫplAOk}p^ԫ)|=!}pl C9 ԫ)2z9 \~Cz9^2z9 )[<e>r@8/SyΫxad0wxa8C'ㅡǁƒ Cㅡǁƒ Cㅡǁƒn1\AÄ8up-02z&p.aBn/ q0/ Ft0\AÄ8uz&\AÄ8upsPp e/ =/ Zt08U08<c080^z/ etⅡǁF C/ cw080^.xA NIr &39r`AC.L zv01ă &ܳ:r`b]!OW= a-.[?xfC/ YjruxAjzxaㅡw>xa%ㅡw12^xP; C/ =eԫFǁxa?^ C0/ =ԫFw8Up Upj.xaQⅡWz8UY6 %e_Y6 %e_Y6 %eÃe.e0> leÐոleÃ* b0\Aep d]6 Ya2 C~lxPeÐe.,s0gˆc C~lra2 C~leÐe.|il.㠲aʆU6 qP0\AeÐx l.㠲a}ˆ2*T0>ep ./l.ܽ9^.xA C߾3^.xa *^.xa2T0\Aƒ\ +a9s0\9Ws\ }%0xP9w9w\ #s\<9z =8,gt8,gt8(xp9s0\a9s0\a9s0\A9Ћe3:C/r qP0+s28(zAe a0\A9 a080z =C/r<`q`0"9AC/r =*.`r8(.`r qP0\A9p*.`rT0\A9k~e qP09pd09`5?s =Cs =Cs!Ps080z<`r28(xP9pCz1.<`5 qXt0ke5ύ B69k6׵9$kt4\6=kh4Y6 ֵ9$ks6MկMCkL4R6vm 4O6ӯMkMk(]Ck~ 9$kMѿ65_sH5_sH|!_^<|MGn6@8ƯGdצksdצ'1~<"6;t|m CdצS_ "?Dm:w5 2xm tkӑӯM6\ʯMMMg6tkjצ_]͑_ 92k|]ϑ_6ˏ)2"lE_/Nyy+2 |qڗ8k\a8%t|t|"@}E_((2/Ӡ`8%2["(~8?_/.#8?ⴅyqZJ/.#8?ⴅyqZJ8_#GAt_#ⴔ|#Atù#=q@G0;/z|『tù#ⴔ|q͔qLyqZJ)/.0"(fʋ8o:/.0ETGe7S^\a~3%Lyq͔qLyq#q;/.0w_8;/rsGEs ߹#|;Fu_8#iTGE:|+vu_8#2sG0;Ǜ}@"*u /r \I@REU |iù@"* /rb݋^8Xź=(ֽgT{b݋¹X2seb݋8źp.ֽ\{fX2ˣ*ֽGU{qQE^䣊*ֽ8,*>Uvwm~6fS<6?3ymkT^1͏K^''6y S<#kUk^kkUk^Tk^kUhv3kkY6>ݵهkY6^zS͢ݵ\X?R¢c*]ϤGRkIkqc*]fSk|um>}LEkEkU6oS|t׵vyv.]8/yK!/oyp>^ yh ăz"x)h]Qp.!Ez"\A/PԳ_.gyΫǃz<\IϚ%WἉ=H҅$vyzإ =H҅1 Kzإ;.]q`.8Kzإ;.]q`.8Kzإ;.]8e^FA.2B.<ܥ q{2z/#]pqP]>we^F82e^Ft2T.\A]puBt!w҅vBtx.]q.]mtǁ].]ȝtǁ]҅\Kzإ qPtɎօ Sv.VѺ{&G9Zrh]eu!Wώօ\>;ZwPѺ{VGBn ku.銣u2Zrh]ȕu l`.^Ѻл7Fd.rѺл7FB;+fGB v.ʀѺh]q`.Ѻh]uǁѺz'ʀѺp.hA.e Ac^ qP.Ѻpz Ѻ_ Ѻ_ Ѻz,'u? }h]OFB;0ZѺօw`.\AѺpEdh]v.u!?;@GB~9ZѺqGB~9ZѺt\\ w.u3WGe qP.nѺpEe wu2օ܍;Z.hAEB qP.\AѺpAGe ; \3p!s߃8῟o]c]T$<晽6c]O6y\zmk[M)wmk^|.{mk>HܵyDG|+w׼"qk[L|+w׼"q)w׼"q|+c=Hܵ|zm~kFǫ)wm~kF'1ggv'S3BpSkԵ||m~| Bpk+kQԵ`1S|z׮bo)vm>HkWkڼ51B^0|v-ׄ5Cױco&<)BNco59ykr{;eAMA=L.WG8oy1g-*[8oC_񬲅Ye M`+UWɶdAzd[^ qP^ qK:2J^1vP/8(zd[^ qK::{n:{n:{nHW;WB߹Ε=w칅`-+{ns }c[zn2m!'ɶtNsm!?l Ad[:'.'Ns-ɶtNs-m!?l \NNl qP-ɶp%el 8wm2J\;.dA%B.Νl qP-\Aɶp%el 8w򔁓m2SN8,O8vp9u-W.Ye Uᨫl!S;l/jVB~v-E*[Ԯ\G^ vx-ᵐ{0dx-ᵐ{0B^ vx-ZᵐATx-ZᵐA=`xkǁз *pe^ }Zko^;ZrZn3Bn^ }3Zexkof^ = axrZex- k3B/kǁpLpLp.g:8,g:8,g:8(ޞpL_ 8(6pe^ }oAe^ `x-\A^ `x-80z^ = Bo^;Zq`x-VAB>#Zq`x-80vPpe^ $cx-\Ape^;Zk2 8(vPpe^ 3fxk2 3.Zkw 8( kw B^;Zq`x-80z^;Zq`x-80z^ XkǁAe^ qPx-\Aᵃ 8($e^ k2Sz|Jp)=B>)=eŷc'pmxL9km?5̹]!xL9kӉ\ 6pm~1ܮmk_O{ͳmǔs6yܮW|{m~隯yܮW1ܮW횯y~9ǔs6^n͏1\u͜۵1ܮO0\u͜۵|\rn?Ɯ5̹]Μ۵vS|um>6^Ol\S|{m~^Ƶym>6>cJ]dcJ]Tڼp6ɶWm` }{8?Ll c8)d[8/Ńp>~ p> c8Cm!Gɶp޷'Ńpxz lo2η AEB Y f8C Y f8/5C Y9yz -N8/5p=[p=.hV5Y2z.<\Aυs2f8(.hAEe qP4+80rhVq`4+80uPѬe/p4+80z p4+80uPѬ;>GBY2fT4w4A\.Kx wpA_A_jŅe,+c!-ԅp+S!-Ņp.ܫrBZݪ i!.T!-\xeTH ht!-\AYH r!->Յ*Cg!-RՅ +d!-fՅ +B/XH;BZ iV,\ܺvPoӄB iV,4ǁz&򈅴pM.BAM.㠷ieTH e!ަ qP!-pM.㠷iBioژO }|Z6Bߴ1z0+; Bu0Fw̧Qd>i:O qP>-\At>-;rO )|AB~ :SΧ\N1O )|Z#B~ :SΧt>-Ks9t>-\AG(Χ8(.|Z#*.|Z#eO;|Z#eO qP>-\ApB8vPpB8.|ڃ*З&#h!;vp9v-7cGB~3v-ZoƎ4A 9 v,۝,wd,۝,wB.w9 v,rםg:T,rםg:AuB;gdzsz9;Y윅8s.YZve9 = r Y)c,dA =`,1HAdAз \P$ =e e#e#e$;8H.8H.8H. Yee#B-3H. Y[e$ qP,2d$ qP,Apd,:dǁA Yq`,80HA Bߡ3Hz$; Yo$ = *H. YdV 8(H. Yd$ qP,\AAp*H. Yd T,\AA[e$ qP,Apd,A Y-d$ = Bd$ = Bd!q,80Hz$; Yd2 8(HvPApB> Yܛd!{s,\ayAϽ9H.< Y$;< Y${p;'ʮ4kc]l4?k5VjeD\6qmc]q6pm|f{mc]U6^_9k^T+6^6k^kU+{LkU+kET$6^X\"ٵx1ɮ\"ٵd|dx>߱VjeZٵ0\X+{LkAص||m>k6?yqm>{LkkC}|jeVvm~.ڼLk5_]eV u,wBl*eZY8oy$d[j2~dc0n#=:*:ŸA:wl*ZY8o=p> 糩p> c8/Cke!Gp^Ƈ=pzX uK8ozX uK8oyzX uK8oyzX uALrX8[yxP/S?+e2ua!W .8epL}PpeT qP9a2*8zX fp9,8zX;rXqrXq^`U =,B\ =,T9,rXaU;_'>~ͱp$ +^A6WCB$ }V!wt%݇WrO!urS.W<{nǁAB{ =\O9vPpB{ qPbo!֎8(.Ae{ qP-ֱ[}{n!7T-=kgBs=gBs cznWz칅^rA})8zs c-8zǞ[q`zǞ[.㠞A})\A_F qP-=2Rzn7칅8H22Rq`-{ns `[GiB?JvP﵅,zĞ[ B?g=T-{n2깅'BNm!'ɶl;d[_tNEd[_tN\rl ɶgWNEd[_tNEd[_tN|i.GN8(ɶp%el yvdA%el yvd[ml yvd[m2J8(.d[ȳ+'*.d[ȳ+'elɶпl;ed[Kɶ~l;ed[Nd[Kɶ~l }i2ïm2rd[Um!VN}d[Um!VN:rd[m!Ofl;d[m!Ofl 9ɶJ&Bg-80vPɶПl qP-\AɶЏL8(s y+̓ @8xW8J37!5[8oCNknp} \A_ٵO;ή1W[fyλp=n] eAλ-kl A$] pv-W] Z^q ] qPv-\Aٵpe*.ZkǁٵgBkǁٵʮʮgBkǁٵgBk] sv-80.76+fE+eVl YQ-p>d-&г`[8Bl =+ \9r`ABl = Bnl = @9vPpBl qPm!8(.`Ael qP-*`[ȕm!T-eBoleBol cmv 1r{`A})80zl c-80zk`[q`z`[.`A})\A_b qP-Sm 8KL2S!kn:Xs Κ[B? 5zY.C8knV~`-C8kn:Xs ΚAB?u.㠚:rl 9E`ABs-/:BsrZ`[_tS횯y%~ٵϔ]6^O_"\ڵ3e׮ͯ!\ڵ|kx}2c̮}50vm>~f>Sv|vm>y6C_/e\O>Sv|}m~!>Ƶ|vm>}Lٵϔ]62qm>Lٵk5_ʮ]9 uv-gIg4,eZ8;yΧa!UgyyP/iX8p8;yr] 9 ή?48vP/8Epzd &[8o-yf-4ἵ e苚Mp^ ἵ }QkpZ OM 7OMp.&A5ed qP-\AMj8.&[q`-BM&[q`lᯚlᯚl!wnT-8r)&[q`l!7cn6ed;T-\I el AR-p$>d-Ѓ`[8B.l =H \9zl;`[ȍmǁ`[}mǁ+*.`[m2 T-Fpel;`[m2 \:!l!yn8vPM; 6Bd `C6Bd c-JMj^z&[նl&[q`-JM&[lǁMSe l2Slp})\AMn6O2jp.㠯?8O2Ӈl~`-:l,d &['t65l^XMl~`-s6ld `&[qpv-:rx] ٵ˜] ٵ˜];:wk!] ٵ˜] ٵ˜] \Nk] qXNk];Zk2ʮ>uio ?}"epZ<?۟䓌RflC>+eJ=\a~2s,SfqSfqSfqSf2{Ü2{Ü2{q@!2{q@)pN=8Sf=C>+e]2{ՖRf=CRfR)8k uryeC./9{L9{L9{5:g);g9{]:g=e=NS졧 \sY8wrcC:g==>OtrwY8w.0wreY8wreeY8w.0w.0wrEtrCn9 C09{ȵ:g@":g{VWz=Jp=J^smu;N==8s+=tz9{:g=蜅wzÍe8=\aw.02s7蜅w.0wzÍe8=\ae8]D,9{wt~F9 w9fs=s6:g`C?gs=:g9J=|*eCR!])e2{_WJJ)uCl)e2{_WJ=+וRf\d2{r Y8.0.0`K)pN=\aN=RfqSf2{ȃ-.0.0.0.0`K)pN=\aN=jeqRpyW@P+{ȏ˻=Ije} V'U+{\C˻]D!=V\VvߪV+ r#ZCgVQ8CW@=FT>̮r(O\_6}ٵĵ͉kõ4|6JfVvm>Lk5_]O?ZYȫе ]+ 󰐗ke!?Va!Gyy/<,x87|r\+ 9 @ip^q2z9>\Az9Cyok5SfἨ e8-C_L^Oe苚)p^Ԅ^Ol2N|w,䣿Sf!2 q2JT,\A)pe2;YRf2JB>;ez2 =LT,80ez2 8U,n)W)Y Sfǁ)pB-2 =s,\L) eʔ2 )S,)p2>d,2)SƔY8Bn2 =eL9ez2;wBSfǁ)J\9ez2 rRf2J`9e.㠔AB2 qP,\A)J8(e.㠔Y-Sf2erYȕSf2 `,) >d,)>B2;Y}Sf}L{;evc,80ezǔA};*80ezǔYq`젾oG8Q2JԷepB2;oG8(ez͔Y.oG8QǁO윅~g,S;g9 lA!Ы.vB?us!O윅~g^ Yqp,|:er>2;Y_WNuY_WN\Nk2 )[NuY_WNuY_WN|i.N8,N\Nk2 qP,Sf2J8(e`)p*e`)pe2 qP,\A)[NT,\A)Y+ еLX+ еPZYLX+ е矮P?ʇ\ ȵ\ rJ!WV Pv[-Bm;)>d[-Bm;)BhV yZj!WV;;)BhV =HlIzV !mABg[-\Amp>d!-BZ,XH;BZ,XH HB_`,B [Blf!-RYH qX\H BZrBZe3B?.㰜.㰜vPpeTH Ta! i2*^.Bڇ,~Zf!-8zXH =,B?-vPBZeB iUH iǁBAeTH qP!-򈅴peTH qP! i2*8.BAeTH qP!-.*8zBZ i2*tSARFB_񌠅^2vPW<#hxFB_\񌠅|W<#hA J#hOA =T-\ApEeA;Z"h_)a-\ay'8,oX9 +Ge7A #h7A qXްr-\ayoXqy8b!ü6ڮͿkkXO6_׶qm2ƵLkUqm(`=|6~zڵxm9^#k|cLkm|{mvϵxm%~zڵ =kUOk^TOkd=|zm~/AӮͧvm~%AӮTOkk?Ʋg*]jCk7cY3ծzk7SYĵ,3ծ'M\-ͧx3TVLekg*]ҮTY|rpm>81B^.,Z8]V ep>G y=Λ҃zW?-yS!eZ8]p.weTV;wCi[ypwnO A--i }A-$dO p<-V32T<-\ApeO;xZi2B>;zO =T<-80zO xABxZqxZq`<-xZÿпk!ή\9.Zȵk2ʮ|*rZ̧k2r Zdv-PrZq`v-ٵ80z] =̮Tv-&ٵZ͖k] qPv-zٵpe*rZk2ʮTv-\AٵpeBx]V sY-e*^prZjrZ}j},TY-e>B]V;wzXV cYUzXV cY-8vPߪ cY-\Aߪ qPYU.oU8zͲA}*\Aeoep}*\Aߪ =,~`Y-?j,XV ͲZ꽿,ރe,~`Y-s6j_ ~#xZtZȏv\NG }%hB~s-G;B_ 죅h>Z#NB~Bf\ Oo.|qv-Zٵ)k!r-&[U-Mr&[ml!n<|q-rM/nT-rM/n~v`lǁMl6فMp5Bj?SeЗ j?S2aY-gj.g.LXV ej,LͲZ j?S.㰜ep e,8,g.~`Y-\a9pY- l9pY-\a9pYj2*8z²AeTV qaY-\AeYV ͲZq`Y-8zXV =,~fYjǁe,UbBjǁe*8.㠲Zj2*8.㠲AeTV qPY-\Ae*8.㠲Z-,jUV qPYCvB< d7, a솅ynAuB< Ug7,ݰ/LznXa qP7,\Aݰpu.nX;a2C^.㰼?nX qXr7,C\r7,\ayݰp!wB@.?̀QkkO|߻61FŮ͟?cT|\o͵6_53EŮ̵͗6_0x0*vm>LQk|68gb{8ݕk |LQkI[\|+*v׼b)*c }pص|\wm>kQg ]ϭ:c8|Jwm>Lk6W`8ص\]LqkԵH]6_>S|6{m>6_W惨kg}8صy`k6/ͻs,U8XτQPKq  磠ף`;w((d4}9yr;we.zEqPE=n -N8/5ye,8p8/5C_'pg|"q,䳽`! ,8XO;p*.8X`2T,\AqpB`! =*z =|Tw`qWq8XE`ǁq_!ܐ.#W_2rWW_rIWd+rWq`+AB =B. =\^uPpBn qP_!W8.Ae qP+׉;;'Sk &Bn `C&B c+֎J|1zkW_[;&B_vL|&Bo =L|{Bo q{e:8=2J|^b3uP_ qP+p}'\A_ q{e]Ɓ~g+_ b WU_, W B?guP|p+:r:WHN|WHN|\U ɉSN|WHN|WHN||i.ǪN|8,ǪN|\U qP+_2J|8(xʉp%*xʉp%e qP+\ASN|TCB~s+ϓ ̡?OB~s+ϓ u PWOPׇ,!rWhL||^q+׉A_!7N|.&W]}Wm_!W]_BuP_BB?Xz:W eП W+П }%035_3'B_ L|L 7F&Bf+ƅЏL|8,gN|~d+\a9p+#&_2_I qX [ qX:W_2J|޸0uPp%Bo\ qPC&B?3z =L|&B_Й:Wq`+/0&B_ B_ǁA%e qP+Šp%e qP_2J|8(.A%e qP+ѬBfS4LѬk!׵|{Yͺ6^uhֵh|Yx1um>19Ӝk3X擜kA5ƺ6o_q>S|ym>ھ6l_kag c}0ֵyoXCkƺ6s+U0V}cJKa ףX";7%P<1Wd19cycr:7ewqPq2uPapԄ2[ySsP2u0V8ojyqyP?ɧB>; ac|w+O:B>t;.0VX2 cT+\AapBX! = c*z = c|wXX!Pc cT+2a0BTqV*rVZ2Jjm8.ëV Z!7iNj*!Z!WNj^&$ǁIVZǁIJj\v9z =Lj\9z rR렒Z2Jj}9.㠤A%B. qPR+\AIJj8(.㠤VZ!}NjZLj;;zևLj\>;zǤV}Z cR+I{o'4}Lj&B =Lj1z:ӄ1.㠯ӄ8(uP_ qie fR렾N.㠤V7Z2:MN.㠯ӄ8(!Z, `R+Z $cR ͤV%Z2]Z~ЩSS!/MBީ yvj*کASS!NMaj*\ALM]?\޵Ώ15umsm!_oy?5dj|\kERSK|\/k^6}Եyms^okm|Lk=|vm"ĵym_~Եk敚k^cN3央QӨk5^KI]O>SN|,{sRCkgI]ĜԵ8ǘ6\]ΜgI]\G.wkA3央gCky|m>h6;~gI]7)'um>kP9krڼ1B^IlT8u 9p>y=:'ˬzK;:#OyλݐTQpN*w!9zK;\Aoi8-p-pz mL8/'y9eN*1nn8/'C_I6&nng|pN*3sR!ҝ LT'u?ke qPN*\A9I8('.㠜Tq`N*ssRǁ9ABsRǁ9ITN*8UN*9W9I\89'zX:bT.A*F\uPŨpOB/\ T1*\SŨ 1e>UŨ`B'QbTq`1*ŨbAB\ =,FBn\ =,FluPŨpB\ qP1ꠊQ!w.F8.bAeT qP1CBv l`)i>d)"iBޘv:SNwoL;A;tP_B {c)"iл7BNwoL;K(woL;8K(2J;ԗPe%pBv:/8(zʹS./8K(2J;}zSp֛C>MyYo b^ zSȝcI!؎%T,)ϱ>ǒBK:9XR ǒBK ys,)ϱ>ǒ?4c8ǒec8ǒ.p%8,p%T,bI2% H7.x7B. ` t)GMWQ)\*7|ITכB\o .u)dr]zSȣכr]zSȣכB?#tPzSg$֛BMUo zSq`)Mh f)M 4~`w)O 4\v41 !B?3.h @@Sòw)1M2n܁Џ^ 48,q?ݸM2n܁ 48(.@S/M*.@SM2 4}@SMMǁ@Sq`)80z@ABM7 4*uWBMǁ 48(.@SM2 48(.@Aeh qP)\A 48(zY .3KkY ZeBoՙY:Ref)VRcrgBoՙY =,Tf)VR2KY qPf)*pgBY qXqf)*pgB AeeY }2R2KY }2RÒYpl0]?\/kM`66_Ᏹt7`6_SK|E}ӵz6_N|1]gj0]zm>6Sͷ`6Pa\gj0]®g|ͫt<;Ktm>86]OQzag|f:Kk^Y6^O?Sgv?ҵ||rwv>Sg|trm>9濻:K#kg,]k|^rm>=LRL-k5_gj)]5쏹JsK)g4T"Rjn)Dk-p;K|*Χ|Hkp^ÆB[Jἆ 9 n)K2z;\A/q8tP/q8%pCKἏ d8'C_ ->^ d苚pDŽz^ l-|wh)䣸CK!Z 8R8-|zvh)\ApeZ:RBK2 -B>L;zZ = -Th)80zZ \AB_B.Z =?RȭCK7Ua: Sת.0ktPp^UB.7\a շ>ex+1WexUa`WB/+L Sq`) AUBn\a =0VB\a =0{tPpUB.\a qP*L!7ޮ08. AUeTa qPCVBn]a `)k V>d)k VBoXa: S+Lv0׊BoXa c) [;VB+Lv0V&v08&207MeMpUB/Ya:o8z Si.o8&20} Sg^VC>+LyYa bާ :ABZ y{sh)͡CK!oo-dK)eBóײ[J!--пR yABR yRURRšݡ;FBZ cth)ѡ+wBZ:Z cth)ѡZ ct)\0S S+L!ץ0{VBK]a y AUBK]a y S(VzXa Sq`*LXa-R #cKZJ?䳥GƖRl)\6n)ˏlpC>[J!Wzn)ϖRg^.ne [J2j)\6n)8,ne [J2j)~eK)\ap-peAeR qPK) -j)8zĖRZJzRq`K)8zR =l)DtP-RM[Jǁ-j)|-Rq`KZJ2j)8z1ǖRZJ2j)8tP-peR qPKZJ2j)}Q2FB=:Q)c(ѣSQSiGB= =eT(ѣSQ0G= qP(Zѣp% GB= qX^p(Zѣp% FB0ztPѣpEB0z.AEB0z.Q8ѯM>~m ~m)G6} ~kӇc"Dzrj6~m~m~m~m~H үM_n6qm:~kDfkԯMotF{m"t*kYHkAksצ3_p~m:5^/"t|sm"tk^D צsksx k1~m: wGDצ_}"tzkǵ9kM'6ڴצ#ksD6~m:^5_gsצeMc "=䕦 C~~St𐗛H!NAC^s "=Ghtp:i{8RNԇrDz8RrD 痭.0lpe8Ap~2qH]ENˇ/jNp~sᴺ|AӮᴺ 7w$Dzȇt1]A|pV!Dz\s)HqHqHqHDzDzDzq@!Dzq@Ap"=8 C>W+As!J "=8|M9[%k Cn4DzA^r R8.;r exog<\wvCDz D CDzE팇zDz%H="s!] "=8 CDzq@!^ "s2s!_ "=\a"s!7 "=\a"=\a"s2s墇'\\zrErrCP.zE\.zE^CU^\5r׬rCP.zq@k(=8\xEqp\x8墇6\x\.zm3Eqp{q8C?"Nq8C?"N#ĉ~DD#S(DyS!`=Lqp9/S!`=тDyS!`=Lq)N/Lq8,eyDqX'zȣʼn9Np9NG =\as!'z'z'z'zqq*N'zxĉײDkYqG-#Ng:s!Š=䙎DC~\@rrC.U.z#墋(=2P墇܍\@e7rC.U.zeEQ]墇\~eF9kpGF"FC.5zWb5z{FKU!5 CR5ze YR5zq@!G=\a=ErCTP. rC?\? pp\+= 墇I\(=~墇~ F2ˆ[墇~ F2ˆ[墇~.C2ˆ[墇~.C2ˆ[墇|-n.lU. rererCP. rerCP.z\.rC)P.zq@墇zP.z5E\.zq@(=8\E.E墇ڡ\p\p\[;.0.0.0¹\p\p\p\墇ĉ҃8CJ+NЃ8CDF q]ͷk]3OkU.1։>S|sm>6K^5:ѵ3ՉGxMNtm>6G~:5^]"kY5X Lk9Ƶ~*]ZGk)|zm^^/Gy@ D@tm>&LkRڼ1B^i.Q8\ ap>"y͹@Kz: Oy';ѐQQp(w!z:\AoT8ppQ.Q8DC D἟ }Q@ z=7/jy!Ƀz=7I>@a.|v(# D/j@2*|v(\ApeT =,|(v(8@zX :Qq`(8@r'AB D!C.@r;P)2tPpRd(|22r=AEe> pd(\S_)Bn TdCFBm = )B#Cǁ:GB#C sd(802z sd(802rAEe rd(\A \\;2.P"C qPdCvBo uG(V uG(VK2v#zIƎP%;BݗK2vB/ 1vG(P%;BǁE%;B2E:B-p}"\A;cv[2#.oY8[2Eq` j@?A5ЋրE> y7q'Mw7}.'PnrYOȻ>!&nnOGOȻ>!&n\N qXN w砚>2j\ֻ.㠦A5}B. qP'\AMp5}>d'ٞ˹=m ٞ˹=!: ٞзf{B.Z ylOE??˞9.{jB. s'b9B sಧv'b9B~w'b ??Owk O-?!@u'=KOB.9Oȥ?!~.c?^z:z q'\A Ox6} Ml~`mv'zMOl\nM!6}B?.l @̦Oòmv'C>2˶Mml8,f7}?m>2˶Mj8.㠦O> qP'~Mp5}>d'MOq`'8z `砚>ǁM; 6}B> S'8z9O>2j^.㠦O>2jT'\AMp5}e9χf{B =T'0zlOYa'=f{*zlOYa'ٞ8(zlO6=8,oS8zlO6=39lO=3 qP砲=3 qP'\AٞS\?_\kǘ66_sŏ1s{\/kxm?S|!^kexm 5^\ov|m>H6ߌϔ6߉gHckm|LٞkѵDye{~ٞϔ6\SͧxM0sm>KLٞk5^\O͇)s=cL|4ϵ8|qyG擈ϔ6^Cͧ |"<)sm_ϷZR~ڼ1wB^M <yB36\Π͵3mgsg|pxm>7LAk3hsm>31mg \OwP|ym>lLAk|ym^^͇n)h5ck^6o!њW5!?39ZK5!?89ZK׎5v9wy)Kp> -d8o!y)P0Gky r9wesaqPaq;2ք|paqm݄ny'sPo20w;p^QOӾs7! M\nB>u;wzV9M8(yc.Mr7 qP&\AMg_nBs7ǁ݄nBs7!W?T&80wrMq`r7U MmhrMd&EBrM.#.Ch.#͇,ڄ\hzY .mB6!7r.ڄmhr-Mq`&8hr9Mq`&rE*ڄ8hrM6U v&\AEpmhz ;u&?52ZrhMFkB9hMFkB ރ1ZzhMȥ5`քރ1ZzhMq`栾zhM.hA}u!\A_]qP&ZњB5Œք8 2Brk yQv%r׽{-!/jZB^\:k u%E^ý{-!/jZBok qX:k u%\a9p%ֽpZek u%\ApZ>d%zIˁ,d vr$K,!7Na0r$K ,!k_\r^e^K-{-!<r^Kȥ{-!<\r^Kq`%ǽK#(rˇG`%&1/4:KŜc.!7T%b1_\B?1rP1_\B?1zsO쵄ez-k o^K'Z.KOZBnyk ^e^K[ {-k t%1{-2ӽ쵄8,KOZB?.,=k ^Kòt% lYz.-=k9^Kz-2굄rPpZKSpZ>d%^Kq`%8zk `z-ǁZB{-k ~{-ǁ^AZek qP%2pZek qPz-2굄8zV }2zͰAUB_ ^b3eX%aKlUB_ TX%a-*ץ TX%maRUeV9Ju)*2 ^2rPapUeV qPX%\AaЧ 8(rPaЧ 8(.㠰ϧ$UͿȵymEc \_/k;+VU:6_)rmH6_ OU7k|6\og \kֵ}mCʵ$|{<*)rm>6_\ΰʵ3UxsgX|wm>Lak3rm>1Ug \'wPX|ym>Lak|xmހ^ͧc)*k^V6 aW*!?39󐗍*!?89׎*.L ua8 y{~P/Uy]rV9eq^6qPX^6q2 |pX^6qPX%2zCVWB>rP/2  p_KOQՕ㮮|wu%C+!]] =HTu%=H8rPՕpUWeT] =|0vu%8zX]9Jq`u%8r/AUWB+!CVWJorPՕ[)WWB +!7ek WWepA}!ՕpAUW>du%vՕJ8!8zX] su%8rPՕ;;WWB+ǁՕ;WWB+!7_Tu%\AՕ/WWeT]9Jݵ+28rPՕG <ʇ̣\:y,Q*y,Q[Ỵf1r.L7;] v]2!˄p.u˄p.uL Ȅc@2?3 9 s@&|d.kadBM ˀLȵ2y qXȄ~e@&\aY ; qpe-L=de2_`ZpdB?1 .㠀L2W@ T@&\A+ dedB2ǁLq`@& ȄdBo? = T@&䫊ȄdB2 qP@&\A@de qP@&\A Ȅ8( .㠀Lq`@&uˀL%62 }2 z̀LQdB/ }2 sPKldB_ Ȅވ2 sPзqdBoD qP@27 Ȅ8( z#ʀAde qP@&\ApdB0 .㠀AdB0 .㠀L2?66_#;1 sm5m<7kkc@[6_|~̵"6_K|^/k>6߬k|lsmQ_ӟ) smK_OlͧV[2Úk1"1ck)ĵL\>S$|w?[Fbgq棸Ɵ-#1cc;6kT|Xxm̵|Pxm>'6o2{k)g |̵\|,tm]~?L+!} -x! -xk!p^{-x8 /~?CC0 9 {2z/8\A8(sP8p`B>8sP8(pzX )ߕz6pʄ޴ l] JLgnWbB>ەSJAUbyI;SJL*1U qP%&\AJLȧ_WbB+1ǁĄVbB+1!?T%&8rJLq`%mAJȄ2!7NȄK9!z> pB栾e.o\b8!.o|[!WNȄ|*!!2!NȄ&d[ǁ Le2ǁ JȄ9!z =LȄ9!z 3sB22JȄ\9!.㠄A%dBn qPB&\A Y lJ̇ĄܑٞЫ.VbٞЫ.VbY b%&וЫ.VbBs VI1.׭V9zo?\[.׭r.} zo?\[\Bhs VuChs]>}c&o$`BFb&4`B@c&qC0??o?GiBM~p#χܴ yct&M7F7mBݴ yctrvMȭ6!on\ήܴ yct&M[u7mB~Np&\AM[u7m.gWnڄܪi.㰜]isPM[u7meԴٚIgk.iք{2[ٚa5!ք<=p&3[r;lMgkBn' woMȏv\VބhMȏv\VބhMݛ7!woބܽ9xrMݛ7U7?2xz = ބ|q&,Gжcbb˶}ۋYaE<.ho几Mk C`&N=oB 7!T&SoB9M)7Ot |M_ٴ b栚6i'ĦM/l\nڄ\iK>6%6olڄ>ir7M3/62nMg^6meݸ6ςlڄ8,q7mBٴ qXvnڄ|-q7meԴ }d&\AMp5mBش9M6W@lڄ8iE6mBFش =lڄ6mB6ǁMЫ6mizش a&8isPM/Vizش =lT&\AMp5mBo ٴ qP&\AMp5mi.㠦M6_d&k.ӄX }Lzu2M諏eЫKiBY }LsPe{jiB_},ӄ^]LsPe7ciB.Y qP4W,ӄ8Lzu2AieT qP&\AepiBoY qP47,ӄ8L.2gHX6b_kq1i|6R_~%[5j͵"6_Kk\/k{m|Wk՚k|6Ư^7k=k\.GO5UkO\a?jT6?K6?J6?X-5j_SL\]}M՚kݲZ1i߅ekڼ6?׻6?ֻ6?ջ6迦2͵޵޵޵yym_U}MeLsm~sm~sm]ws&2M>.ӄ.;e2M/@.ӄ.;2M8/pe.;.w>{C4Ἴ 9 .۽2z7\Ao8LsPo8piB \9{eT mb qeX ˫ep^R2iy+Kʃz6N2M4!.ӄ<ͺL2M)c4ἤ 'FepiL.2M4ǁe'\iB4ǁe*ӄiB4!<.T&8Lr2Mq`|&\7 M.  ^r|&2>r A.#7C" 2z?pAgL53ǁp~?80>z s|&80>sP񙐋9gB3ǁ9gB3!bT|&\A񙐻1ge9L32τ8(>E&dȄLe2LȄI1!sP Їp&dB落 \'! ;)&dB.h b%\x.W,y\ z<\EYBce9JP%vQpy$,rvgYl }g%mŖзq[Ba%[B>s%[B }+,c&`BC0!~8JL;+1!ĄS.9+1!ԮĄSNJL;+1]ԮĄS9+1HĄJL +12s$WbBn] qX#sPnWbBC0!7\]9LeC0gW:rYLf&!z`Bn Q~"k:!2aB&wM'd. e'dB~tB&wM'd. e'dB~tB& 4'dBnМ PvB& B _a =LȄ&dB =LȄ0!spYb;!ruL12 tB& b&d*!@̄LòwB& peLS$22& ЧH&deM2_`& p%dB" qPB&\A !&d*!.㠄L22J| K&dB2ǁ Lq`B&R JȄ&dB/U =LTB&NȄ&dB2 qPB&\A ;F&de qPB&\A JȄ8(!zX ~+1/0VbB_`ĄNc%&vKlVbB_`T%&pT d%*1﷬ĄN.JAUbBo'Y qP%&vĄ8.JL*12Ą^ֳ.JAUbB/Y qP%&\aĜlPtlclD>JϦMϦϦ?~ɦMw@Agty_ 2?.MϦKgӕkd~6666=ztt6d~66=ugӭgϦ.P6Wb~6=.gğJϦgJϦf?M~6=26Wb~\9̵iiϦs?lZ_K0?lz0ϦϦ%ϦGN̵!Ϧg4?p?vpTy+F%~i%J0%G%J!`N;p~ᴒ~8NOhN;ᴒ7xrTy8rT 7x.0p އ8%p~2qK0_%p~2s 1|eicLL8jJЛ%\rͪK8pp~]+_~Co?^ ׵.9@WzC/^J8U*}C[}hs[塿 _VygjJJs!J\9[\+$[rkdC.ΕlyF!J<\ɖpN p{.{j%[dClyGH#b$[9b$[.<PH<\aydC"ly @ɖ>E"p-[(p9ЧH$[.0'[.0'[z?dK8'[.0'[zdedE$[zdC-=H<8 dC/Ul dC-TAɖpN<ۈJ<8 dKx0.0'[.0'[zLjdedededK8'[.0'[zPey=5,\eykU1,cDԨ<5*K8WYzO*,cD%,}KEw<\asw<\a<Up<\a<\a<\a<\a<U8Up<U8U8rmܽ6_#;Cc\?r͗ǵfym1Wʵ:OW6__Sy|_k{mz5^,\o6~m~rm__Sy|6?6?#6߾Wyc,|Mkڼ6?+X_Sy ,\`]_}Mk1Wʵym~mޚ_]]]7_Syڼ6ʯOTW6?N6ˮ벏p]%Uy{p]%Uy{p]%em8oy{Sy]p˶0%e. 9 ˶2z6\A/ۆ8rP/ۆ8epUy{.㠗meTW q^ qP]%72/2rP̆bz%vWyyP̆ܝ^ yvz%镐gVWB^ =LTz%Wt'|2AWe^ qPz%80Jq`z%80rPJq`z%镃JWBr^9J?J)c]ޱ'r]%rAc.SwC.\W );2ez>up2UȺJ}*ǁup~>8zXW xs]%8rPu7UB*ǁu;8UB*!bT]%\Au1UeTW9J-*2|ETBP ^t@%i#TG }Zf@%TTEpEpEpEp)fzIc&wow8T$'/c&o5eb&?y3 ˘IgQe$'/c&?y39geJ'?tX: IN>IlNB5]: JU3(!?A d%䧃3('3Π:˓gPB>q%gPBn~A d5A ud_gPe'3ΠT%KJ'mtrOI.m\: vƥ4NB]: =,ܠtrOgºtrpºtrI/.Ik_~]: vr$_NB~u$֥/trI-K'!ς.\. Iq`$8t,Iq`$YХ J3(_d%k'gPBA c%䫝ΠEt.< p$ LpNeT:9IJ'2*^trPpNB/UX: qP,yitzX: =,NBK'*,T$8tzIq`J'!_s$8tzX:9IJ'2*1t.IJ'2*T$\A/2fzȘALB_&^#2f2a$1ЫhLB_&T$*1wMLB39I&c&׈8(frP1kDLe3 Fdb&28(f.㠘Ib&ꌙ8(frP1[uLe3 qP1kG61͟czm<7k7̏1f5L5{2frmb& |^/k{mxe|#6ǯͷkӌkM|b&;AƵ1е}׼b&~5ϓ1)frm_yc,k\D]1kckS)frm~um~1KkTPLڼ%ZYY%ߵy}m~5K`ɵȵyumz}W%!8XK%!8XKׇ%:by Kp~&[pz^ g" Wip^ qk2z-6\Az-6\Aņ8(XKp.`I8/eZl% qP$8frPņe$vLyyPņ_]3 yv$5VLB]3 =T$w'fALeT3 qP$8f Iq`$8frP5Iq`$5LB.s, ҃`IM%!wA$KB,9We|ȥ%2HzU>\Iʇ\:X.`, u$80Xr`A*z, \s$80XrP6KB%ǁ{6KB%!bT$\A1Ke,9`IM%2 |Mb6IB$9&I1$Cl@&I$!nԋCЋCЋCPY$VeK2ExHg<$ !ǞПT<$ !? Пd<$ ! )!5II򉕛$?$ &$ &I$!?tȧΎtv$_ّΎ\8;[gGB~t8;rpyH+Ύ|H#!8;뤳#!WΎ\8;rEH|ّʎ|m*;#!Ύ\(;;rHȅ#ّ egGB.9H=#!Ύ|-vr$nKB~v$`I/\`I/\`Iȵ%!:X %ǁOK.[XKBe$80Xz, yt$80X`Iq`$ M?!6IB$ b$OMooI@&A5IB$ y v$Mj>3;rgH[fGBgv$ّ_9쌝 .uv$ّʎ|ّg^fGBy9H3/#2ʎ8(;rPّpeGe qX8;.H#2gGB qPv$)ّpeGe bv䠲#2ʎ^0;. Tav$80;z =̎fGB/U9Hq`v$RّAeGBHq`v$80;rPّpAّ;FfG*;.H#2ʎTv$\Aّ/2;zAeGB_&̎^2;2av$2ّЫhfGB_&̎Tv$*ّwMfGB/9H&#̎8(;rPّ@fGe dv䠲#2ʎ8(;.H#w̎8(;rPّлqfGe qPv1ȵ|\Տ1;rmT6_>_y5J5+6_k|Mkm͗6_x3Wrm6ͷkk|r%;5_ʕ\5\ǘ+s%a޵Y5+6?r%Tckʕ\O]gIkbPLHڼ$YYYy~m~85%I$ɵɵy%vmވ}IW$!8I ␗$!8I ׇ$;WfyA p~^pވ^ % Xip^ q+2ze6\AIze6\Ă8(I p2.$I8/el$ qP$\q`$-$!;I{ʃze6z$IC$!N<:I$Ie䠒$ἧ ;IgT'I*I.$I$ǁIU'IB$ǁIJ&IB$!w5NT$80IrPՑWGB #{ȕ#gՑkWG}pr7H̊wY!ˬ:EVGB6] =\:rPﻇVGB\ =Tu$ՑHq`u$ՑH՗#U qPu$ՑpUG:raHq`;$ɕ퐐>CB\ a;$#'WCB/r t;ފ"/M1BߩY}iN2A1BߩY}f#c 1BߩY}f#e*cS*;5WQS㠚!.55Bߌ!?ty3vS#M<nqp#ߓR7.Otp#犃n|zFȧn\@:遃!9r遃!n8,O_55B US#7nj܈rFȍMQ75BnD8F-!7nj|/Guk!Kk[8B~wಝt#|8B~wಝt#|8Bc\5Gq`#5v5ߌY=q8By]=q<z" n8F!Mtp#i n<-;FӲ}Zfp# nF3/3qpY:rF3/Ɯϼ n>2qPg^7eqPp㠂2 n8(.A7eKsyA7?qPp#\A n8(.FqPp#ƅp7F ǁFq`p#80zA7B7. n7*47Bǁ n8(.F$2 n8(.A7e"} nTp#eF}/7Bfp#eA7B}dp# nk2zFcp#\A>7*.F2 n8(zFfp#\Ap7>$k5rmc^?W?ƵS|y\oocp< ~ n\/kmƵ¿6_|_/kܸ6\iϵmMk㚯y7Wpc n\kr n\S}MkCk1qm~Bum~@57͏O?Ƶy1~m~um~um~um^MkkkӬkڼ6?_Spڼ6o>FȫSqK_Vqpވ+ q8?/ X8oyA|P̆pވF,48qP̆8p2.A2.Wfeq^ qPp#2ze6\Ap8(."moߜ>mg'n7By0up#A7yOOwB/!ϨnTp#\Ap7B!n7B= n7Bj8Fq`pw8Bk{}k5;8}p$rqG wA!Wq E8Bkǁ538}Gkǁ5qܢzX=qܥzXsj2q\.A8Bn]"O n\9FEx>2zFȭ_dS#ƦF-lj߲qPM[65BoM[65Bo}eS㠚lj~?4NuOHJu|XTG}gNuDr#mܩq:*E7B2r]遃!nqpO H7B>=pp#w07B. 8<=pp#po~_55BjpFȥ!w5nj\zztS#Mjjոr?+c} ~r#58B\ye G-kĬqT#tq>@A8BYqP#\A5q8.GjUqP#~5p8?TqP#\A5q8.GkUqP#5p8G] kǁ5Gq`#8zWA8Bkw5q858Bkǁ5q8.p0zuGj2q8qP5p8G}kU}zG˄5>8BoY}qP5n8B5Ycjq.A8BXqP#5q8.Gj2q^.A8BYqP#\A5]5kG61͟czm<7k7̏}smk<:*湹Ɵ +桹6̵ydǵy`rmk\gkǵsqm~Jtmи6f|MkUk^kU+c۵9ߵ15X6?*Wckx\\}ōkkkk*n\K]J]J]|T"׵y17H8? ]ōp^4\ōp^>\A7y Kp~&[pzޙ g" Wipqޙ q;2zg6\Aōzg6\Ă8Kp3.F8/el qPq#\Aō*nxi2G8ofOUpUܐK8B8Gs!ql8*΋p~zytr2q8(zyt#80z8Gq`#80rA8B!2.n7B]qq#|A.wC]Y;2+z=zōp7Fq`q#8rAzXrq#8qPō/7Bǁō{/7B!b.nTq#\Aō17eT8Fͯj9pʨF] _c>2zWèFť_dT#5ĨF[*oj2qPQзTF5BR QзTF5BR}KeT㠢oj2qPQbֲ.nFqP7#\A݌pu3.nF-2f|.nFqP7#\A݌[v3.nFu 2f|݌v3Bǁ݌nFq`7#:݌fv3Bc=fT7#dfv3BqP7#\A݌__f8.nFqP7Lc}%0z4F+iл71B繁}%0qPi{j1B{coLc޽1.4A1BޘqP#iJc8(.4F2Jc^E3.4A1BqP#\aNc\1~6}zlF~616}tCgϦg 4[i~6 ϦϦ4UlϦfflMiٌMi~6ϦI4H?!CggϦMOs~6}93_s6gl1d3~?c~6m~6-)pzpzEx8yׯ4C~!Q}k@ib+=ipzpZl=[=o8pzpZl=[9 Jc˭q_n}re4F8pև8iӞ2˭q=e4e4ip9p9i|iD7ᴶy8m1mۇ{Ku3LnF8w3nCxG7#-C=yT7#qq=fb"p.rE.Cx{}C"}C}\<_!p>.އE8.އCxqenC>Q7򃟻x2f<\~s7?8nC>oT7!?xOu3¹_䧃.U#\(kD1Lipyz4CKxȧJc<Qi|z4C>=P!׈Jc1r4C9q"?OeYN*S4Cn'xxȍTtoo_7E5rF8G5ۛQT㡧 Qpk*E50Cfx)CT!3j<"GQ<(peC~VTQD50F5xԈj<8 〨Cxq@T!j<8 CPxWUCPxsyVq#yVq!*n<9[Q|HQ>#'D5C客}|GTĈjsT!Yj<Q>#Q>#p9p9g9p9'WD5.0G5.0G5z?e?eeF8G5.0G5.0G5z?F8G5.0G5zeED5zC=j<8 〨C/rCATQpj<[j<8 〨F8G5.0G5.0G5zeeeF8G5.0G5."W}% л7D5J@Twoj<QCox#{CT#_UTwoj<*Q8Q޽!p9л7D59p9p9p9p9л7D5.0G59ЫhD5.0G5.㠨fTymFwkGfTym<7˻]b扼6y~mk4~M1k,^Ϛ1k ^<_S<6yxǵYtm(6?k^1)q׼b|+q׼b_xm~w[:/]]]k;s_S` u\j]j}Mk#Ƶy'57ϝ͏Ok6lk n\\^vp##Sp_!8;׀:F8yΏDyKp+H$^ 9 n+2ze6\Ă8(q?F^ q+2ze6\Ap8(.F8oeqPp㠂_ n65B8tCUyHtS#Mjj<)\FfS㠚 ;'MF75zqPS#8Fq`S#8qPMFq`S#Mfܟ8rlFq`6^/Fq6#80lA^.Cq2z<&ٌpe3lFq`6#80rlA^z{r6#80qPٌ (g3Bǁٌk(g3B![fT6#\Aٌ;.g3e8e=s6#Aٌ{f3?}Pd6#ބٌKDg3lF_6}f1lAe3B}c6#ٌ>f3B}c6㠲}f1zFd6#sp+.?xe3B>Vq6#\~ft`6;音!?lAe3B~:8MQ75.uS#nj܈!׈nj|@Fnj\#qpy@F5_Nz<TS#]?MI75Bn'8Fu!nj|{?F}n\9qPrp#2qpو:rFc!AnA7Brp#1pe#Fon7B!An\6n6Fq`p#80r+Fq`p#VFqe#Fȷ8F'!njoqP}k'-BNٷ}8e[8o.㠾l㠾EO[8o.㠾E] 2[|o.㠾EշqP"\A}л-o.㠾E 2[|}K-Bǁ}Eq`"R}[-B/Uط=[T"}Eq`2[8ozȾE2[8oqP}p-ȾE+}WW][طb"}Wշe"}Ы.-oȾEU2[T"}p-Bط8E2[8o.㠾EU2[T"}p-eԷ|poqm6_#;c[\?8͗ǵf~ݿ_wg}{cn"퍹з7&*7Ds!wM17Ds}{cn"퍹ϟa|q}bzɊE84T"\~X|E>UÊ]:Xq*!?t\ECU\:T#ҡ*EPEuC!׈U|zPEȧU\#:TqpyzPE57]C,U8Tq?N~vﲬn'=e UN:Tr;PA*Bjt__']yZv"_']K Ok5rmc^?:?µ|y\owwg|67k=k*4\y{mkMk_k_<_S<晿6yB5_*4\5B5_*4|MkUhk^kYh6?N 7͏OBõpm~t?s] }Mk㣏pm~pm&]Is!5󣆐"n55T!Wyλp~ˤ"n5w!IL AΏyˤ*A.e^c^cq{2z1\A58λpUeTEZ2"T!KSUpU*BCe'!OJ"<pPU%WB]E="T!7H%س*AUB=2"VB\E="VzXE="T *აB_ ˺!7>yt^ Z/= BnW>8uC!W,*|rAq` 80|rAq` Kp~w2 IrY(}.c pECX(` ΅/P>BA C,PpP! B߇X(u }! B߇X(8BA C, ]nR/[ЛB 9_.l 嗭|A8 嗭|A/d||AO$ B~"9_pPHDr 8mppyAȥ!?\68<p mKO B.6 m6 t m/<:|rA!w5T |AEA>q ა~pB=S=p{pP=_~SrO!سppYѺr'Bȳ{ !ς){BSqXV)8,+ZeS"{ ǁ=Bȳ{ { )B{ !W#)BS=)B[c O-~B+c !qqlKul!ɕ=*rBc {-Tl!qpe[8BqPl!ыpe[`l!\A/0e[qPlb 2-8(zAe[`l!\A/2zBq`l!80z[=-D0pPBMc ǁ-^1z[=-Tl!\ApB/[qPl!\Ap*.[}%0J`l!Wc w5-[al!꒱7FBj[8B#c w5-8(pPлe[alb 2-8(.Bb w5-8(pPЫKe[qPl[flymFwkG[6p^/kk8Ƶk}|]\/kUqm(ͷȏ1BqmA^xm9^_S|g6;km|W"{5^P\55^P\ox3Bq<#xd⚯yE(W⚯yE(5_kkk3k1Bqm~?P\_"'ocV޵6?s#0-۵y1G(L8? p9#PrE8yΏ`yvPp99-ls>PL8/yr8Pej^@ qP^@ q 2P9\A/8(B;pE(eMc"2PT"KSpE(B]yrވ C`"1ʅp!؃*T1BzMEȝ#e,TP>BE C,TT"}!*B.]}b"}!*P>BE ǁ*T|(-Tx?g"\!/Bo'E\C _5E8/R"\!/?!!/B~ :|Sዃ _t"88|r.u"DzW\:_r]|EM!8_!7W\|#d"I+Bn'q!?mzض=l[<m-BNٶu"mθmqpYѺmpʶE նq"YmgA-m,ȶE2j[T"\Amp-eԶ8EW#l[8mE2j[T"\Amp-B)ض8El[8mE-B)ض=l[-Bǁmk -mzض`"8mqPm[;-Bǁmj[8m.㠶E2j[8?8mqPmp-g"eE˄Ћ/B_& _^0|zE˄ዃ _k2|EE}d""p/*|zE9 _T"\Ap/eqP""pnE&2Kpk~k _\?Zȵym\OkqmY~Msmk?7O4ٸ]qmkX|MSqm6ĵy$qmk68\? Okõsk~\?gp<x3qm4kяk6\55^~\g|+q׼|+5E?W㚯yE?W/^\^\^Ak{懖ge6?- ͏* ȵA6?6&͛ɏ9ϭU>[T$FAyQ΋p~nΛɃ >*7Ἑ EAAU8o&y3r9WzeJo^ qP^ q+2 >\A8(΋pAe l 2 T$KSApAG8o5O6?B^#>Gȣ!n~{6?rGg#͏j~{6?B=l~<^z=l~T#8z p#.Y-wȅ[f=B87՛!Ozu7՛!wbz܊9Ef=BǁY g=G-ǁYz\8z=z\8z7?8G!.w>@zrG87ˡP,w'rrGZ}b#}Y!f=B߇8GCz\:>ĬGCzT#}Y!f=BhGrzG87?S#\.05?¹.rYy͏n~T#秛!͏ D !ץwARABK Ft$AB>=p$AӃoփBB rP{MBB5] r-j!!?Y r-$\aY۸k!!!۸k!!dT-$qBB.\ }O`-3v-$̵SBBN] }O`-$\arp.] qP-䋬BBk!!\vƮ>;zX =ֲr-ZHc-k!!߫q-v-$EB.kaBBny\ }Pd-$kf>(.ZHj!U qP-$\ApB.ZH k!2|.ZHj!U qP-$\Ak B.ZH5k!2|k BBk!ǁZHq`-$?^`T-$8zMZHq`-j!vBBk!U qP-$\A[;BeT qP-$\A8z/vFB9H + vFB_`쌄^3z#H ꌄވ3.:#W@Tg$+ vFe9H;#2ꌄ^3rp %3.t qXR 83.9 qX 83ldg|mFwk'+;#kqmY~|}1tmk]l\'kk]Sk]gI8?) 'e!gOΏIAIB~t$ p~RλЃ꓄p~4Oyλp~4pP}p~RλpޅI pa.7e'97eq$h \Ao8OΛpIe' m$2T$KS}pIB 9wCU yr$\*z7$!ODN8Az 9Ie$' #'H*Az yBr$80AIq`$80ArP AN~P z'HB }2ArP<;Ae$ z<\[NI\z<\[NrI$_d$80Az q^'=L\8Az 9Iȍ$ǁ I8Nz =L]qP p%HHc+#%H+#!Wd>2EVFBjX }ae$зVF2VH[ +#!հ2VH[ +#U }ae$WFB+#U [I $!NT$ pNէI\}Js$\>%HS$[:As$䇫$!?\ 9IW'HB$! /2Ar#IȯN\8Ar#Iȍ$ῼ4  Ftǧ/r>I[IB' t$}#IOrW}K$!G\zOzx' qXOɕB쓄>Iȕ$쓄>I $' =}4IB'9,' As$}ZIe$2B}ZIe$!IB$!\>XOz' =쓄(2A.A%He qP$\A J8(AzoI$_`J8(A.A%He qP$& J8(AzI$_d$& Iq`$80Az `$ǁ Л&HB$ c$80Az 9I$2J^1A.I$2A p%HBÿK4/ b$xIK!KB_1^z]xIK񒃊^2^.%CT$\A!Ke/9xI%21^rp(8^.㰄/ qXZ 8(^./9$/ qX 8,ׯK|lgv&?>6];?y4?>ܮEMi6M{@g_g̿i6 Ϧ6ϦMiv6Ϧ9v EMb?>~kEM`?>EGgÍM]?5"xͣcgQDyQ~6}b<(?5"Ϧksg"|E("|E(?5yR~6=PgUџMt6=逸lzK\K(gџMq͵clzioim{LCCZ)^8R j)zm8RNO1NO1NNkۇ#k)zimӠZJ8 pe8/C?\aq_~\Ky8=x?8̵8̵8̵8̵Ӫ2s-2s-%k)Ҝk)qk)yV-%gUKyj)Q-!OK<@lR¹RzQKyȃj)\KyF-!OO<8'(RzPKyq@-%k)=?uw~!OP-pZPg!<䣅z'EY~Bh]ԡCvy{.yW%\j0|K8wO:̷>3r |K-_wy*L0oR„I &*AUaBߤX }b&zU7)VaBߤX }b株0oR„|:*Lq`株0!Q 򁂫0[VaBX VUз7Va¹ .׭0rݪ UpnU VUpO>*LU„Wa U*Lqp&8|dL1!8udL1!X8spybdL'NƄ\]:ſrO&=ɄWQO&=p'sP=_dBn~ݓ ^sOz2!7Ʉ\'rOrcAe!Ʉ8,B.Uܓ C`O&y=Ʉ!'L{2y=;FdBݓ y^qO&=dBݓ9ݓ y|wO&\aY'LòrwO&1=Lq`O&== dB{2ǁ=[dB{2AɄdB{2!Ʉ<'sP=+wdBٓ r{2{2Ʉ>'spY'ΞL+{2!ߵsO&ɕ='Wdeԓ9Lz22Ʉ8'smԓ }bO&\A=Ʉ|'.㠞Lz2Փ qPO&\A=Лd'.㠞LM{22|=ЛdB{2ǁ=Lq`O&&=ɄdB/ؓ =TO&b=Lq`Oz22Ʉ8'z1ǞLz22Ʉ8'sP=pdeYƁ-c3G̈́n }26zyL떱#fBR }26sPOel&iлTfe w4&hLȳ1{\z=јp4{;51!WTƄ8(EFcB1ǁј; Gc hLŅ1ǁјƄ^8z =ƄǁјhL8"03„>۳ x|Q8`)Ut?';˿¬Uw*C<0.Lg{vaBnt a]„~W/g9H7KjBC"}{{YM>22άN8:/ꄾ3sPYЗqfue )_DYЗqfuB!fuB_ƙ9NfuB8z9NØ= OCyBxyB0.+^AП+ s'\V< Oxye+\r'88sPAYyBO(w~Bn sPPv'5);?! Ow~eSw~eSw~B'.㰜ʝOq`'~;?Sw~B?]z =lp'8銝Oq`'f;?!9O#w~B?۳}0w~.G(~g'3/;?#w~B?۳Gϼ8sPpu~e qP8ыpu~S'\Apu~.O:?w"9O:?w" qPCv~BDz =v~B;?w"9Oq`';?ǁޘc'8z9O:?2ޘc'\Apu~e9Oa\A n yOuOuO#v~B[v~Boz/떝Ke'e';? qP';?2T';?2KÝЛG8,w~Bosp.OÒqq'\a選.χ[y^6%\W׵Z{mEk]o ymԼ6_%k5A ĵpm<\ߏk3]Wk)tm,\ g&\?!Wk|=6_k0tkkS3~kkqʵ<A׸6^g <Ak\ ]g A׼5@5y>S ׼A׼5@_e<||m>b5xеktAw?S<||k@еyWڼ #p> y@P8qABޤ:g|G~Ἡ{Pp>3p> Mp 3 _8onip q[2z;\Az;\Ao}8(g2 8A2 8(ׁpe yi*.@P_>d(? 5ntP O6nqMf@! =l|q@' O:nr(ӎ@ǁ AzOkg~B>`;zO^}|r^`էC>Z9.O/3rəpe~>d'80z Az Oq`2?! =f~Oq`'!K>:X ΒO8Y #O|B?%5,(c=(dzY AzPf=(ݟP ПXCYVwB AR:B1! !c)\A!pև !c)80! !B)9zB:Rȃ%*c! !^ !Xf)\VBH?B Kxe+!pY !ˊW),B B yRqpBH!RB 9!BCH!C+I!\I yJRK+ޕ$Uw%r^JR2WB?_v|)ߓ+/v|K!W_ /d|)K!OI_ yJR}jǗ*Ԏ/8,ߏeCKs94~,:.:Rȇ6ǗBa8Rȇ6ǗBa8tPm/p|)5ٕK!7_ R= Ǘe+Ǘe+ǗBa8.㰜]9Rq`|)K+ǗB?1z_ =/p|)80Rq`|)J W yR-BN`}[+~Zf_rR-B?-`}O+~Zf_)\A}+8.㠾RJW qoyb_)\A}+W qP_)\A}+8.㠾R.㠾ReW}Л +BJǁ}RzW ƾRq`_JW =+.㠾RJW qP_)\A}p.㠾RJ{g ϯe+٦lSm'fBMǁ٦N6fBo;1z٦lAeB2zm lAeem lSMm lSÒdq)M2K٦N6\,68(.Zm:$Zm qX*-68(!M<?rm6|\W׵Z{mk6]; 3]D>Szm\/ ǵ3%k|ƿ-Ngl\U|ɸ6_1t6z^ŵZqmT\gL:]gymLD5ϤӵaykIk\L:]?'q3tkIkG5y&q3tmtLIk^J:]W׼N)tk^Ik^J:]W׸^{6j_q0tm~G|}m>Ͼӵϔ{6e_zaڼ|mGkN:)c82wu)UN!o`t cp>V SpG>S8J82>r8#A%1y948tP8p.㠤A.Wet }p%et }p%et SN2J:4t qP)N2¤Sȇ'BSt:S''B>;zt:Sȇz'BO6N!rt:Sf)䓎N!t Sq`)8zjS'"WB>Cz7Ү6^`6|8r|7C>Qզ[VB%.jӇ6VBM!7 \m:7CM!w \m =6T)vMǁզjS8zXm =l/~<`{)2KЏl/~Zf{)֑K^ RB?-tP~ZW~Z8up9q * cP/A\b }` *1ПAd *Lƥ,ET)*+KQ?\Y YQ)JQ?\Y YQ)*V!KQ?\Y =+,E[U =+,E1r+EUMl*@Ug$Bnb;@uPHPd**T曏k6_kwuk}ǵya}gS]?QWk|6~:յqmv\/e|LkUS]/+ƵOk|6_+͗k\S]uuk\S]?2͟׸Y6\?,qͳNukuk5y֩qͳNum5:5y֩͟Nuk^uk^S]W3թyͫNuk^uk^S]%}Vͧk\gZ]_6_qjum~[3UG׸X6152O C׺\G.|yP媐7.WK8 d81T*\%O y9U|{r\:e{^|qP^|qЋ2*W4U:rUU.rUU!] qP*\A𗗦U2*W|6vCƩB9NAqpU*䃍T! =ST*3Tq8ST*2NAq[ZS|q*80NuPTb*S! Lzȧa'B/&B>81uP2! `T&S!7 %ԇLL&BS!؝:CS!۝ =LLTb*䳻SǁTȝ"'BS21>ЏLL7C31p~<}<SyZ&B31z)FB_ }V #SZ:VB'0>p_wl`g+\Il`g+\Ip܇l`g+ _ \N P2wB;[ yJAuBf̝V,ve }V8e qPg+;[2l86˝VȧwB;[ Vqpg+8r 9l|Op<|q+C#\/S W_#\HGB>98m|We|Wm~B/B>9w|.s+䩗]L:|WS/Bz98w8(rpeC2!/qP+\a9v]Cw|t+F]Cw|t+F] |WȍB_0::1w8,w܎q+\a9t+v]2]!? =wBn8up9t+']ǁ|WB]? =wBn8zHw`+w\U |W't.Ǫw`+:]78 |W'te::.|WG/e qP+N]2w qP+\Aw8(.|WKSw8(z'p>d+N]ǁ|Wq`+80z'wBo1z:|W9B]ǁw8(.|W9e qP+\Aw8(.|Wq`]ǁ|Wm'*z |Wq`+]wa =wT+.,]ǁNwT+\ANw8,B.oI޸zKƵpY}e[7e\7e[8eS-CֲBoz28oLi]oK|ݻ65U3͟ndJvmdL)k|a6_Wo˔ֵ)K5mҺ6_O͗k3]L͟dKɵJrm\#׸Һ6_Dq3uk)kgj5y&Ok\Li]gJ!zk)k\Li]??q3uk)kGgJi]WJ׼RZ׼L)k^Ji]WJ׼RZk_ \?q2um~|}m>Ƶ<׵MkA.\旬?S|}kyk_s+O1佲\|' y\Ać8pp.Y |Wȝ0B>#z:Vȇ GB>";z%|?K!  Ad.+A/|`vd+##[/l*rd+f#[2z =l|vd^2=l|vd+80uPOlFB#[!Le~d*+ЏLe;ݡ繁:wC?b2ЏLe;ݡ1 TVp.6zCBNWB_5Y }d+M \o"X }ABD&7_'Mƅ7.lwuP7.lwߎ7.lwABup9:s+cE!O =lwT+]ǁ}lwT+8pBαpe vW]2jws+8vAB>r 9nwB]!:y~կϟ~|t\ yWeWȣWB/VB> կ~|t+I_!O\:Wȓ6WB|կ~8rkկpUe2cկpC` c,c`!w c,c`!w;$XZ'B,9 j$ISNq,\a9u,.`2i`!wy qXNk X$Xq`,80 rIi`k =L&B8 z X$Xq`,80 !^ yWBz9up9v+^:;za {`+C8^ apap*.㠰We y).㠰W^ qP+\AaЛ {T+\AaЛ {8(!^7 = {B^ǁaЛ {T+80zaAB1z = {T+\AapB1.㠰W^2 {T+\Aape[Ɓկ~VB _wX:WYa+_gկ;K~he+uPկ~VB,uPկpUB,.%xV=;ZTG+\KKp{.wևh)DGAGǦޏMW>tcGM\FcǦ>G͍>E~lt1smt1֏Mך.5?6]i>7~lt"c^sc[?6]`~kGcǦSk~kN ~l1y4~k>`kGcǦGcǸcE[?5?7~k~nl][?5?7~k~nlǦGǦk~^hǦFǦl:UPǸEksǦFǦco=NytVtp:x8>¹p:x8|<N[އG8NއVCN[re.0p/?\a~92sppp}zz'p.0.0Ҝ[q[,C1{o=䮐[CV8r@OIpo=|No=.[V8Ή|Pb!z?W_C>&+时Bp~iǞ_~e%Y!OJl=^[zq@b|Vb+_~q@b!zq@b+[C[=Hl=ү緫9зHl=S$[?m3[ꇾmFb+߮~H$~Db+߮~H$)TAb+߮~veC?E;]}rl/(w=(]}1FMPz1]}1FMPz{p.w= ]}orCߛ.Z.i}{=$;Z9¹pwε˿s=\~s-P {\ {8}q"ja}ZC=jay̧ZXp#=Yja=s-!T {q@-oaQ ZCja} ZeZC¢p#=\a=\a=jaqkaqkay ZCjay ZCja\ {':rT {qP-!ANp=8|q_|BW!!{'tEWPg*=Y e\({ȳH,RQZAS, e(BC,PW[AS!wT({rحBC,Pp[YR2a e(BC e=(=Β erحBC?JPBC eBC e(BC e=(]D, eyhBC︠POW( eBC?P.g*= eBC︠PO(=: eBYPp ]8,g*=\a.=f eq\({.0 8̅8̅l@, eq eـBeBEzzP({q@l@, e=(=f e=(sP({q@p.=\a.=\a.= eq eq eq e\({\({\({\(  e/{AB vB,e=Hȗ=e=Hȗ=e |Ce/{]Xz/{m'9_p9_Nȗ=\KKHs )_p{si )_.U!.%,|6."_ۈ[wmvmľ6keq66X_ ٿ+^揨D6@]co|6]?Wk hg]a׼^D6_ׯk3]Fk׵uk^k\l]Z׸D5&ڵ3} ̃ꬅwg-o&3y:k|~7|a7}8gu8oXwp.eY;e}:ka.Z:k.Z:k!Y qPg-\A𗗦:k2}ZZ(Bnv2e|r--BOkiUK N)c--kiUK =e|r--ki!D; Z~B>; A#Ax{З7B>,;qAЗ7B>:rAD = B>;vPB>;zD; ZȧaBhǁA<;Y[X6B?ѱyߐ} Y':6}зlڡ< } Y':6}Otl~c,gvqP,g/N8Sj/LKeJ-e)w5L32ΔZB0)JaJ-] SjjR;9.㠔Z軚?ߐ"l!>a ߐ"lR-\~CCc-\.w?d-乡#l/<tr [8gvB:za;[@GB#la;[q`-=5#l2f-S28(թpEea yO[qp-=#l!|t-88ra 9GB>b:vP[G|[|[2Bn68o η^ ̷\o |[ȃsyZ['tB#9z1Ы᷐O;r7W.s-no2ˡo!w~ qX~ [r[wBoǁ᷐Y\~ [q`-80r7[wBoǁmj<u-Fnv;+[B?vpyWm9l~|g-Fnv ζ[}M. .㰼+[n2jO`-\Amn2j8vPmpev Aev [nz?m[q`-8zv ABnv =lT- Lm[q`n2j8z{mpev qPn2j8.㠶A>dnmЃĶ[%zv Ķ[Ab-n7Zv =HlT-F+nm;KlT-\a.%QZnY )rgR*rg-;krYrE}Z];vB_j7*۵ڼh׃k_czm^Kg]ϒa6\?c)"lϐkGȵ #lKg]/p׼^a6_ݮk3]ڮ͟ ۵vk^Wk\]/i׸a5۵ 3}!<#lk5yFخq3vmH55yFخ͟׸a5۵3}׼zrk^_<5g]W׼"l׼a6]ۮy+vk?<{w>ѷkߩ{\l2vm~|8~m>d^\|,~۵}65ݮͻap> ̐M8eT-䃄n|x·7|sv Ûp> pޭp>97,3wy:48vPo8p.㠰A.7ev pev =pev S[n2 4v qPCBn6r{_nrp- vPn$B>vPЃv[ȇ1BnvPw.|6v-_zY; l*e7.!]` [geB_X` ['f>d-8zX` @AzX` T[q` l!M]` =,}ȎAuB1p{;FvB?@FcdGލŎZ;FvB?@vPF~bG-;jQ;went:jQ;wC C0!l̳a-5yil˘g }gr&<[[B0yз<̳a--l2eg }g9&pVaVap*.[wABN2.2aЗB9vp9s-=!v = T-䉟nn }vABxBdz.+^ls*\Vmn XUxeū>+n>v[qp-8rn XvABm!kn 9zX} X} y[ȭW_ݲrkշkշٲo!F\} y$[ȣ\WB>շЫշ'P^}|w-䁬oU} [YWBȺvPշ8rշ8rշpUeT} qXOp-\Aշp8,'|?<LT.IN…ct.IN…ctp! $\K p $\}0'e $\򖁓p! qX2p.䮀pǁI$\}0'.o8 zWI$\q`.&pǁIлL…&Bo!r]} [(V.o6z շЏמּ\lp-oY} [wVB?z շ˛ 8,o6.㰼[oX} qP-{O`-\AշP8.[V.[VeT}շ VBoǁշ[VzX} [q`oX} =V.[oX} qP-\AշpU.[o2TCVzX} =Hw^.n6@^,LV5v۵|Npkk\l]?qͳvkkG5yۮqͳvmLL5\׷Ry;)vk^k^j]W|0vm>5v5y>k^}/l]k{a+/l]_6_ͯZVܵ=kl]_LkQ_c+ڼ܊ p> V\8 AB>G>|'ἩwPp> #p> =p#z?O@y;CN[q>q2j2z?\ApqP+.\A_/MeԊ;V\ȝ eԊ qP+.奩V\ZqrsíanŅ|s+.[qjŅp+. ϭjŅ$B>!o!]| D[-o.vjBnѻ!o]|;wC>]ŷO.԰iŷ.}[q`-8ŷz;8ŷAB>`!mm }n[v m 0n[8ݶz9 mm 0nAaݶ3춅~gnq2궅~anqoa܇}BI1$!Cr?: }'Ő\.GsɅ~hcH.Cr:BI1$N!wR ɅBCr;9$.㠐\;)eS}'g9pe%Vm%({.+AٻޅJP.\Vw;]ee%({oٻǕޅ02{ٻˡw\= yn]q`w!/;] f*{z3{1ٻpYޅ~ b.;_╽ ]8we+{.+^ٻAޅ\ޅ| r.88{wPٻAޅgBw!ٻ]]g*{r +]qpqw>_˹;|5;|!* _lvBnuzQwp9w/V;|!`?习 ~!] _sAB[B_u˂_BwP;..<&w/1 ~U qP/\A.T/\A.8.㠂_ֈ ~2*8,o|?qX~py3ry;3ryp+wPys/uy+%u/~~2+%s/\ayy}8,8ry_q`/~~WJ _q`/80r3y_B~2 V~g/=#}7 lH_ +FB?3ٞk,~g/#}!w/\ayőH8,8pE*S/\ApE*.H_"} ;H_"}^8(!#}  =FB#}ǁ[T/80zˀHAEB1z =T/\ApEB1.H_"}2T/\ApEe;H_q^yABo1wPy#ȼ_#B ~7 ʼ_d~R =8(wPyKʑ`?eiU9z`d+!#} =ٌ?kyr̓{mk,]?8+L_6u}:ykF׼TDk,"^:c/[\:cY\:c,"^bqm~5y߮k,^ˆ|l!\6 sp>6>a: ,8wBld|Ntld p>6烁p>9 .7$e p}C"\AeÃD!.㠲a8 8l.㠲a8o҇8l.㠲a eT6 qP0奩aq`0~!V GaA%Cn8Az3A ƒJ^L|zuCVCޘ2`țSWC>2WC>2rf |:se0ՄOh peCVC+ǁj +ǁk V2oyv`HBo `8'B,1$ف!p~;CvgC,1$ف!z;C qPHq2 ~v`HqPHY\a0 ?W }L\a0m?W <-:At зYL\N }a,&Cǁ a,&.N8(A6 p OL8,g,~$pY}ˡ!c= էa>ee-'S0\Vra>e?d0䙤/L l`uC?3zĐ;.㰼`bg$euL `bL y).{C?#1.`b L<`b L qP0CCo0zL = &C L<`bq`01ǁă &Ac0180zL<`b2 &Ac01\ApeL<`b2 &8(xP-k7XS C'k7XSs·}|}Pǐ>a_8w8oT1/Ͼt#O7g|Χ!ǃG.㠯y8xP_q4U} =>wt1䓳!\} Y}T1gCC^vc[LC>Xr;Ɛ7n7 !n vcg,C_n vc?d18zn vAzn vcq-w53*y73C0&yp~;] w|C0&yƃz;M>og qPq23gqP1MC^?o;r eHf(CD2C#wP~ eHf(C12C#ʃ3!^ } wPf(C3ǁ e;Ff(.P8(CpLPP8,_iTeqXg<к.㰜Ѻ.Ӻ..frV.f2ˁ2b~.fq`38]̃٭L y|.fq`󠺘!p<.fXw1C/jv1C?.Z]bs0\Შ `,ju1eQ]̐]̐bw1]̐.fqp38r ?.Au1C! 9bT32b8C|\,jESƢfm'5CºcQ3.E̓˫.j܅uQ3^s! „s! y|fEfȯ8zQ3r̹͐oS8yP͐`mPvn3䆲s qPn3\Ar qPn3䶰s2m8(.frߏe8,/}?8r-28Cոr-28CոOA8Cոr-W08.Br'-ΐ[nq8,Br-pU(8Cn].㰼 g]8C[ǁ-ΐ[nq\^r3.[ǁ-gMO8C[wy =,j2`Q3=Y Ƣfg{5CٞE,j\ްrQ3=wX 5 5C.㠢f򆕋2*j`Q3\AE2*j8^X qPQ3\AEл,jTQ3\AEл,j8!wX =,j5CǁEл,jTQ38zWEA5CozX =,jTQ3\AEp5Co.㠢f2*jTQ3\AEp5eTӷy[׼ͭk^*j^Og׼k^g5y}]׼um>5oj]WQ|{k^_Һ5\׼uk^EͿƢ5y5qͳym 5y5qͳym5y5qͳymʵk\,j^gQ󯱨ym>k.jw8}|tQ3!<] þp> p=f8a_8}F8naA}#Ͼt#O7BN5p}#\A_qPQ.㠯y8Χ2*j82*j8rEp5eT yi!ӗ!q eA/C8}zi2}i˃J_~ e f(Cޗ9CiʐP;s޽ eg(C>8Cz> p eGrg(?d23ǁʐO)PԻסǁʐ*Pf(C3CyPзP3f2WVwPKҡo;rQ;̐hAE3C>;r 9fG3C!w[! #g>[!wD qXs3䎨[2`nq.o;.%>Co;1z =L|vb3d3zۉлL|l&>*zp%>e<N|\ g,&>eʖ/C8!w =eL|be'>Ce? >lǦOcyc4MǦcµcMǦecgMMKǦcq-cgc'y-.?6]lJ_/?6 t1c^wՎcǸ1y?l._/?5qͣc\tc\p:8?sx87>RχQ(t2p (tp:8}<>NGtp: |8_y8̍pe每<\an|>>.07>.07>NqqCee_^s"JRC/0:ù0*u>C!K|k| 6_͇|Uo! @͇| Po! rG͇|Po!ۼC>C>W?8_rJC,wmso?|T6N_~e6.͇f8wm>]*z}f8wm>\a~2so3_g~:eCߥί3?\am>][΢?2|Wd<χE3_z[Xd<RCk_ lVчAt_p}DG.Wы>] }C#:.>䑮}h*:7>8 :C=>M>rȫeC?":pWч~Et2ˁyeS_EG Lч8,G>Aqre3`EG.0GG.+:p9:zчDGz}aEGLXч@tчDGlXpsW-Me]чަ@WR|۹+pYsWR|۹+pYsW!)}uCnS+vꊆsW!)}qPW!A]чuEr}];uEù+㠮Cک+㠮h8wErN]ч8]ч8]p>\a>\e~-(W[ʕ&ʕ>ru\io\C+W;ʕ>^r\C_+}ȽTL/e{j>^Z=hs!7Q-ӇVZ=h>䖞Zyi8LrKO-Ӈ\an>\an>Z2}2}nL.0L.0L.0L.0LùepeqX^[z[^b sd>䶓B C:}m'NOi8NrIӇ܅U`ˋx >䡄B el㰼e: :}"BqC:}q@Ӈ܅U4\^Sw:}q@ӇUӇ߿ۿem9pwΉԇަ˿7?#{>Ca >jCa >Co!pmDU.0UzqՇ8qՇeej8U.0U.0Uzqp>\a>qW}= UzW}q@\qՇUzqp>8 {>8 qՇC\qՇU9p9p9[z>\a>\a>\as\2s\2s\2s\5qW} ՅCSW}F\W CO6ՅCO6ՅC"Гj8UzqՇ8qՇ8qpW7H}}0$R.|uCԋH>>=+H> %RzԿ&R6ߵs1zmT6޵y>S"ڼXk|YLkEڼN|E6_?S"ڼD+|1ƵDJ5%%Rk5| Lk̵}k^k^f_c"|q5D5y&RtY5D5^H5D5y&R͇t׸H5D<׸H6ߒ|o]wys׼k^5y}e׼3k5y}Y׼*)wk^ߓ5o]ϣyYּ!wk^ߏ5o]W"|:{k^_5D_c"<׸H6<׸H6<׸H65y&Rq3טHkNv8o|t"5|!.H p> p޲=Dj8^8o F8`A}e%Ϸ#O0@/\A_Y qWVep%R++2J|.Dj.Dj?^`J8(.DjKS: L:=wC8АwN|t:4]ӡАwN?v:XthțdCC>:!ӡ! |thȻeCh@C߃1zP: c4;C` +?t=WnC߃1` + q+2 +2z6\A` +2 cC@C9!?" yFh[;@şзv ~`4#O'COthCOm:p-N~thCC3&ЃAӡ!: }t7LCCӡǁth|C.N8(p ϼL8,N<r:4\a9 u:4ِӡ2ˑӡ! qXE yJthr8tACeRCethq`:4IӡRCC0z =L<3u:ҡ n th[CCop0έpYJ˺U:4[n uth CC[CCnp8rЃJp:488r 9NCC9zPАth>CCӡ thҡ2JT:4\ApCCnvsi2:п>FGC ]h_/N!O }5at!O AEG?dt4!o =Tt4g_z AEGC!7 yAEGCn::8AEGe qPt4޸ qPt4޸28(:.h qPt4\ayqpŭKIH"i,ICnH"i,IC=Y$="i,ICnH">ICuHrsEp>ICnH.㰼"i]Ie\$ I"iq`48HrsE҃}.$c48HzX$ g"iq`ۿSEЛ,˿SEpwHz3Ec=`t40 #h= FGCa0:zpy{et4wD h= FG.o@::pEG*:St4\ApEG*:.h0=h0 qPtCFG4 =FGCǁ{Tt480:zAEGCa0:z =Tt4\ApEGCo1:.h2Tt4\ApEGe=hǁENICouH"i;/&HzY$ =,b4سHzE;,{IHzEpIeT$="{ˊ_슆bW4gW4.u+z]ѿ^?=StmkG_cWymk|y^Ky^|m,y^W|k]k3uEWk^K^/+B|wm+6_y}k^_k^/׸5Ϯ.׸k2׸5Ϯ<׸6]gW<[պk^b5y}׼TSwk^ߨ5]WW|tm>h5o]>yKt׼k5y}׼k^5yuEg׼k^5vEqͳ+zk]k󷎮qͳ+zk]kqͳ+zk]kwoQ\gW<ۡp>cώn\8ST;4ۡ|.r|L; sp> cp>烈p>;o1u?/0}$e͓p}$\A< qP;y.o82j82j8rpCe qiM͐AnjtS޹ 0f[C75CrM͐7nj|,rSC65C%M͐wnj~[MpRq1M.猎|b3\r燌|~`32a3S#SGG>C_5 ёO|F>C#ǁgF>.'|8(!p@ґ|8,|eJG>egq`3䱥#KG>Co0z =|C0UpY|˺U3$#n _t3u3䎋#!7 C#!ϐgqp3# 9|܀t388yPϐ|8(.AE>e qP3~#2m6y?aѐ1,Oưh=cEC G˟lyaуâoGhMlWGCnb:zVX=hMlWGC !hYau4!t\=hIWGC:zX=h2ܬwu2ܬwu4\ApUGeT qPu28:rwуVG:h5WGC6:h5WGC=Y=h5WGC6: HWGC:rp HWGC6:.㰼hfWGe7 ] khq`u48:rуVGC:zX=hMlWGC~ n%I嗠$A%I嗠$i$ u$ikj+ 5^i,JCoWz7[#앆av{F+ ^iJCoWzл\.u4{2T4^iz2T4\ApJC~WzPpJC~W.^앆`48Wz+ =앆JC~WzP^iJC{+ ^iq`48WzPpJe+ ^iz2ꕆ8WzPpJe+ qPz2ꕆJC;+ I^i'{\^ig4|Ar`5Q]RN&vm:}ϗ>nȤyWzI^ig4;cJC={+ 3f4\ApJWzpy,VGCoX }nY au򢉫9Y6 6µy?kk\31I|͏T$6?|g|Ok1I|zkk>zϼ拦k=5ywg^/=w덿k>z||g^/]o9zϼ3"ϼ^3"鏱HzgEk<,^_3"5yIoG]g<_.3"5yIEpt4o"!q"i8߼[8_Dn|7o|W |7oJ8_DUC8_5[^Y q++2ze%\AE҃ze%\A8HW 2*8H+pIeT$ r4\AEpI?<슆׌!ι+zPߪ OwECh_ ywEChܸ+ !sW4p}Sp~U9_Ud:4/LT:4SпD0ί*~j2C˝Lԗ;Cth_" KӡпD0./w8(zP_ qЗ;e KӡpCCt\1+]ѐ׌ɮh+ wECˮA}$=eW4^]уI_~ IkЧHՎ-Ӄդ[r4[~c4vl\n(2 [r4kJLd4-G-ar_i'5[ǁ-Ӄ˭[Öiq`48ez2 =llezpt4\A-g4\at4?2 qX32 yiri+LeMLCe.\oe-p-Ӄj8,n8e [ǁ-i OL.Wn,ez2 =ltZ2 }n2 b4\έZW@lrn2 si([rn2 b2 yn2 r4F-Ӄj\er2 9nLC[!7nT48er#iqpZ!7n8e.㠖ALe2 qP4[2j8ez,_)q5+YA j+YA _R jlVPCn]A y jg+!ꮠ+!W\zVXA= jȕ+gԐ+wWPC +!r*!r58zPpUPeTA w*2룩 j*28. AUPeTA qP5^ԃ8H Djȭ!Nf"5ΉLT"5ΉԐKl'RC?H=Dj%}L\b;zp2!N8,_t"5ۉp˜N\:zH =L\b;zPDj;'RCH w"*T5\T5\{VPC/=YA =?;WPCXA a5ЫVPCFXA=|QЫVPCYA `5ZЛBVPCo?XA=|Qc5\Aԃ|TA qP5\Aԃ8. j+UA qP5pUPd5 jq`58zXA `*ǁVPC+UA `58zXA= j*2^. j*2T5\ApUPeTA= j*ǁNVPCXA d5\[({VPC=+rXA =ރz- jg*¬8. ꃌ>&ޛ0:z?hch8M~:9wm>y7?͵]ݵݵqwm~=Ywm>#|m~]s):zm>{m~]9ct{Lo]o|6?ۮ͏k1wm1m^_||2ߏ1:zm~p_gtzKϼ{k5ywg^o=ռk>z1|{g^o]Wt|yg^/]Wt|yg^]Wt35yFGx35k<^gtӵ.{! g_ +#!g_ G>d3V|#wy o8h_X=hg `4_ c4,C?X=/6Uп zX=/6Up}1\AЃbc.h_X=/68WF>C y烌|g+G>CA}"佋#Ke3ڑσzER v3пN.7FC.7nˉW;4olˉ_. #ۡrFCd;4"'b;rvhq`;48zp|t;4? =lCCۡǁА jC.Wn8zvhr#vh}ۡ2˵ۡ!/i qX& ySvhrAvhCe[JCCٸ.\UzPppCd;48z yov;)CCۡǁА7.>},^.O+ @p9}mOЃ*ވ.O+ @АА+ @CnD]=h !Аhqp48r#A@C !7.@r#h 2*T4\Ap@C^.h 2ß+%n>&l\zpJۡ! yv+%n\ֻrYvh{$CC?5 Raѐ_FqX4aѐk~ECâ wX4 1,rhAbX4aѐ^TX4aѐ^E*,.㠰h¢!=h¢! qPX4\AapEe=h¢2 \z:,zPapEC.=]=hh}!߮> :$:zPѐ>WG$:AUGC] `du4у˷K] vu4\av!߮8,.uu4hq`u4уVGC:zX=h}âGQX4\( TX4\( lECR = ^1,jAO6wo^:zh *\h WѬ^:zh#W#\h :.AUG?>`8:.AUGeT qPu4jу8:z5h:z5hq`u48:zX =^:zPhǁу^:zX =Tu4\ApUGCo Y qPu4\ApUG:.h2Tu4\AΌ?&IC/ɘ$ !0I.Ό L~&0IzI$igdL3I$A%IC$ qP4\AI a4_9Ỹ^6\\0 pm>cʃ^=Gϵεsm~\:gcʃ^^7xΘ6?l{G53AϙkcyLo]^|5yc̃^7x3<x5x3<]g<{N5ywg^]xek>zϼ^Ļ3zϼ޽6_^׋w|6_N^;w|6K^v|1A3zgykm5yA3zm~nf蟟fp73vfwB^HW;v3C.h<Վ̐[Zw3wBn_Yaq3,n\.\ YQq3,nˬ,ww.n]pY uq3r&fq`qrfzzX =,n7C!w.n\n\ qPq3p͐W.n8,7}.npq3\asq3凋2˝!o@\ qX.\ y f8,.n8,7.n8 ǁfȫ@7..nfzX =,ntT3? 7_)z?Ĩfu)r y\#:$0yP!ϐkD<$(ȐA.g +2|>g +ǁgq`38zoAU>C+&|V>zogq`38yPpU>eT T3\ApU>eTC+?V>C XC?X L`3V>CoX Pf33σ|^(.g\ukצצצG1t3mz6w3m_צY6w3m_צ9imks7צim_9C7צ6kirm_ܢk_޿3?wk>[wikkt~g~~|7~m5_󙟻6˙_6w3m5_󙟻5yt3g_~g_G7צט~g_G7צ{_gc_8m_.U^ù~"5ӥʋӥʋ,߀xqTyqTyqc|q"8m_.U ;-RR%߀xqrk /_\a~e7 ^\a~ee88q/r8=08}E[D/"؋k/"0sE[D_c(_EE?a|qzE[DS/1~#pԋ~#27^\a07^\aԋ8G192sE?a|=E_N[Ƌ0"/r p}mE_o0m"|E@Ea|;0"/z˕ڎ/.7[%_Em\v| _\F6sEnav|qImNjh;ڎ/zv|;HIm=h;q@1\v|H_8m=h;q@E%v (_\an;$ڎ/.\D"j;rͫ2˕ڎ/rA8,Rj;-ڎ/.\N"Wj;rCc8_\aReE_8my]c\Xdh;q@Eڎo|F34_f hE YB 34_fIy|,)"7sEno||_8("AhU1/ro|V_8(hUe|8p7Üo|q9"o|q92se|c8_\_;ϟk ?s>? k ?{^_}_䥄/""/%~|e ?8/RBpڃ/.7*ET!_E/"DU=BsE^ GUy*AT!_VUy/*d8W!_VUy/*T!ù 2se*䋼P2/.0W!_䥄/.0W!_\aB\|q Uq/.0W!_VUpB\|[y)dd8'#_1A2En'|z%#_1A2En'|pNF/ HFs2E.때|OM$#_^pNFÜ|q92q/.0'#_\aNFE/z|d\+=HFȻ+%#_8 pB2/.ι 眫\|+BGE/Q|c*ޠ "WT *ޠ!׈?u /zc|}YAEo~|ѫ_vDEF~ /+-N"qÏ/.092se^ a?~|q9AE/z~|_jp?q@EF~|c8_j=?q@1Ï/.0_\a?M!/.0_\a?~ 8qÏ/.092se]U B+GBOU$CE?0P| T!_ U@E/P|;cT!_UpB1/.0W!_\AUȟUk^?ksf6y1yү̓~mT6Oyȯ3~mk?by}mk<,F^G1wmk>*F^<8,F^]|65y?ymw_g1zS1wg^]Kz|U6_w\o;k>z91wg^o]{yk>z)ϼ۾k>zϼ拾k>zϼ?3xY3bzB*d8*dBV!kU .㠯U8 yP_ qתeT UȃZU r*dȽ r*d嵫7ahCn] !7zBmMX s2!BuE{Pȃ˝_hVr22pnȐ'#e> &'#Ca \n|Ȑ H'#C'!NF\ =LF&#.NNF\G:z =LF&#C!NF\ qP22Jp&Ȑ{y'#e('#C.睌 qX w22\ar22pvȐz'#e+*'#h252JF>ddq`2280ȃˍ7hLF&#C~ $(zyc]*Ïr~LFx ydddNF>LFp22\a6Ń8(}eQT|%ʐ/.Q|q2dDyP%ʐ/.Ql(C^DzY u2]KU u28DzX<eJ2*QpJ2*Qp2\A%p(eT qPJ2*Q8DrA(eT u280SyPgʐQg*C^8S 1Sr#Le3LAe*CnD } 3LAe*C^8SLe g**S.Le22q g*e qPAf*C3ǁʐTT280S̙LAe**SzLe9 ?2 y/Leg2cLe}3!oT^<8T^]|5y?cT^x晩3Ly3Ly{myLIk5O&%|2)ymɤ5O&%72}k>zϧ7Ӌ||>5Owd&%W曉k>z}ϼ^{L]כ{|޵ϼ^ڻ3fϼ׻3w9zYϼ^ջ31)ygIk|}gaȐ r2o*"/yuP2ME8T]8y57*C8_܅j6W|S7*C8c\8 7|q*C^eqP2W2z!\Aap^<8( .0dȕēw_W 篯W* 3 __ 篯> C>0d8}% } aȃ C> C0d83 Ðǁa? = C0dq`Ð2"SEp})\Aa?z-ظ6ﻯ̳qygk1xgk|^sVcjU^WUy猭kczϙ^sWE53wL]㳕k-µ|xg^/]/ӫ}|b5ywmI3w̫UymD3̫Uym?37̫UclU^g<[[k'@f{$2R A2C#9Ɂ 2*Ɂ 2Cޯ8@f!<@fȅǁ@A2e qP 3 qP 32 d8(.@f qP 3\A̐ e2*.@fȅ2 d!Wdp 3c 3ց̃ d\:11yP̐7d~2̃ d8(.@f!o, qP 3\A = d2CX8yP@f <2CT3& ps.N50 cfM>Ǟ ЫK60C^lyP ЫK60C^l9szy\^0sze3G\ޥ2sz5\ 3w\T2e]*32\T2\Ape.CFf.͏k{m\c66?Sz1ϼ^K1f.[k<\^g|^G3cz%6pA]߃^{L]߃ނ6/ӯWhaڼH6ѯͷgaڼCL]/ή |tg^]W|tg^]W|tg^o]Wǘ3 5yf Gx晁3 <3x晁6ߞ5pU:ny˹pU&sἜ ]u8k>I:λp ̃|:W7|u.e]|='2pACOˊV8!pPeSƲb8!zXV|epC8!zXV =,+zXV =,+òbq`Y1Cˊ?tXV qgeTV<τ832*+eŃLʊ{i<&.Mb!.\o:7)f/p ^sv1Ő}gC.ٜ] wv1bqxO.#.rAcȕk-$9۸xp)q1\&9޸.O5ǐ+iܚ k!9C.0]s<\zXs =9T18zXs =9T18zXs tr}cj!98,X9.\ercrck2˥k!W98,7[9KR2k298,W\98 kǁ5cȻ..]9zXs = 6T16]Lƃ 6^2.e 6f16p16Ɛ]*r`c它!Ɛ`cqp1rƃ 6C.wl 96T1rpel<`c2 6p1\Apel<`c2 68kײxpk5ǐ_cq1zXs yc)c1s˚cECOkzXs yc5C^<˗qP2\>[u C^l.:bП-;5@T2Ŗ;5@q2Ȑhw C^lyPȐhw C;ǁȃ@8.dțw .dțw eԁ qP2\Apu .d:!W@T2\AȐhw eԁ<dU#O#!7Dq$2c$2בЧȃD:11yPp9}D~3"đȃD8(.Hd"!/B qP$2\A =DF"C^8yPHd{AG"9 ?"o ?"ǞȃDt$23cHdu)#!/{? ;?>v Cȃ+d@؁ b2ӄOv Co؁ 4a26]v C?M؁<d:2@8g61v ͏kgzmHk1u ͓~mk?yʯC~mk_'1u }mkt_d<ڏk`_T<桾ƙfڼ3Wcڼ35yv W4Ek Đ7i#r[FbP yAC'!WP = (xP ybd@!P qP@1͌P qP@1͌2=q+S1\e y.c !/e W.c2v2]O0vC .cǁ]ƃ2\ze =2T1\A]puCxP]puC..c228xP]puee v񠺌22\..AuC^ }^ PvzҋӋ!N/>`L/Tz1BŃJ/Sz1Ő+N/Tz1\Ape^ ybҋ2J/>bq`z180~ŃJ/C^7>DX8.N*Ƃps*z^zϼh1F+k<$^g$|d|L!k1v͟kEo]w+m,`ڼi6/]㳀!kڼd6_-]㳀!k1vmUgCϼ^_3ϼ\3~ϼ^Z31xg!k< !^oU3xg!k5y3cOp~g"/BD83r|!8xPp ǁp w&*|Ηc Q8B ˱pe<@`2 Xq` 080Ηcǁ@`8_C|9vP@`q` 0/*z r 080xP{(eՉp2D|9.㠯N8(Ηc21a 0ˁ/d 0ˁk/+ˁ/'ˁ 0C} q 0ׁu 080z< Nplź.uCn\ GͲ`w=0py,[vC\ GQ] as=08rzez`q`=08xPz`q`=08xPz`q`=029.\rz`rz`2˥!WЮ8,7/C.\ί8,W08.ø.wY#zX =ުxpq=0zAC/ e 0\>x*z@`| ^X1p 0ʁ[*ra@`!CC!T 088r@`qp !8(.@Ae qP 0u2 8(.@Ae qP 0\A:mY ߖ^C/!/_\(a `T0QQQƒS0Q6T0\AQpE e% ym(a2>(aq`080JQƒF Cb>CP0\> CP0 !XbA C^Xba+ C?X, 3f0䍢U, 3f0\A(a.wf1Jz(ao;Jm^2JxPQWz^2JxPQ+=F e% `𠢄2a0J.(჌a0Jz% =F CwT080Jz(aq`𠢄wF C% qP0\AQ+=F e% qP0\AQƒ8(J.(a% qP08< b0ba菏 + C|,Ƿba &,^XXiba,~Xxpyp C/b;S;G _ů kG6G _¯ kç kL6d6_a_a_1à 0 |ml|3a_Yà of+ ?^ |.g~||mX3?_㙟à &k3?k3ekqnHkr5 |mX6u_z^Α׆n6pv6s_ wk>c$^5|HkÕk>i#ϑxHkEk.>.>BAǂهه<\(~8d}8TcAaupч;Fſ8?n>\a,~Xű 2cAeƂMև a.~8d}q zMև a.~8dX0?8_ z a.XK#?\aÇ8_2>\a,~8d}=q  O )D?v p~"rEHÕӇR!!T~8ܙ|U"rYH\"!=~5-"~q#zH/w& pl8", pl=8"rHˣfY#!W ~uN}||cCoE>Rr߇\"cCnV>9H}q@C.Z1!hECR8>qS.0~qL}Øp1!p12ce/qS.0>\aLW 9s$H˟s~豟#~_#zHÛB_# ͑y݈H9c$eH8ymH/q#q#m̓8ÿ8ypqK.0?\a,%~XJ(%~XJ R⇼D)C\J0q,%~qKzR⇼iC)C\JW (%~qK8?JzRc)eR8yӆR/qK qK.0?\a,%~XJű2c)eR⇼Z@)R8yR8_KjA޴!(%~Ȼ+?KrYR>`s)R\֣\Jl.%XJl.%~\JwW(%XJ۹2c)eR⇼B)eR8p.%~qKzR⇼E)RlXJ7P(%~|c)R⇾K>LK8?}6J0? \JτWs)C^Ȣc)CR8qoY1czk7?{!~5C%?1_c1czk7?{!C[9s C)/1qczM1?\a!s C)9a!~qcz^S1_cz^S1=s ^S1=s CC12c e1qc.0?\a!Cp12c e/1qczI119ˋ}!~o!~CO9&s C~1czE;?d!bb.0?wR?;6?mϵӹ6?jk\?kc1cwm;6ܵywmk=yڮvmkK^㌱wxm66o;k>z<{e5y̳w޷z{]kz1hwɦy {mdڼ6o`͗28l^ݺkU5&yzmxL/]W|qg^]W|pg^]W43Ϧ5y6 Wx43Ϧea݃Y.\@x.e'a-Y.>aǁÐ;8xzX< s08xzX< =,xzX< =,˹xxpq0\AÐieC]< qXnc\< v0\aq0p^Ð[je ppe[eT<|aq`0Ã˅7,T0Qp5 OHQлTF R0Qt0Ղ!Gܥ:jxPQÐ#G CR5 p088jr5 Ku𠢆!QÐTG C5 Ku0\AQpE *j.㠨a!5 qP0\AQpE *j.㠨a2T080jɨa?'AE ϩag05 =?!o5CT0ggƒ>C~[ƒ~[.na!o- qP0\A- =v C^Ⱥ[xPUl )j. )jQÐW֎~`0jxPQÐW֎~`0jQ F C5 y!AE C5 qP0\AQ& {c0*i .w0& {c҄e3Mz4aU4ӄ& {c0*i *M.4aeӄ2J>4aeӄǁi4aq`080Mz4A Cӄ L *Mz4aq`080MxPip e& {c0\Aip e&<4a҄2J8(MxPip CӄQ& 1ML\s04aӄ&L~0Mz4a ӄNL3Mi4 < e&>xm~^?kpm~gvS}z<5y̳"xT6o?keczygk|Or3Ɗyym^y^HqX6;j5+Myym^?ծ̫"xm^_;j|U6ok>+xY3ϊygk<^xY3t |tPщp~#G8N|H:1xPpv k҉p~"*1ηgkp~)\AJ S2J Tb0\Ap%)801z k`q`b0Bǁpv:`q`b0801N =L \ 91z<`ݐ2J 8(1xP_JqЗe kp})!\A $01r`%21r`M|?r`u|r#`ȝ!7N 91r`7;1z =L e`|˦ۉˮۉplu!N ˣfy;1r;``q`b0801B։J &*1.! OHJ bb0䕵L Tb0䕵L bb0x&C:1xPx&e qPbACo O`(|C5*_Ck 63xP5Co O`(B '0.P '0z = CC T(080zP`q`(B CC qP(0\A5e qP(0\A 8(.P`B qP(AC l\^s 0\>U9C<*`[l\^s 0~- C qP g6?kL1Ok1wmǔ6cJ]<ٸ6Ƶy2S<汸6O5&#^p65%kφkfõy xmޓ^*\;?ƤߵyEzgIk<]Nvm^(^/  a8G]w滇k#f{k|pslߵy6ӯq6ok*1uslߵy~g^}]WڼA3W+c]g<}5yf3wm^_g_w/Uk_ DV !gp~K$oV H8%ηR|+r\ <a8JTs0ߞ[*%m8/>+\Ap>+\Ap@.a |zX =,}Vq`08@Yǁa8gT08@zX *Cn\ =,T0peT <a.a8g82*>a'3C\ |п@'iu03Z%+4C.] r0+C ǁƒV9l] vY̻@xpYͻ@.w0Y GͲw0py,{zCn[\ GͲwAC] =,5@xPa ǁaq`08@xPaq`0ƒ*8,wM.ܼ@.\8@z ara 2Փ !.8,O.ƻ@.\B@ίs8,Q.8@.F@.,C ,\.\ )dG0\(EKOvC#zÎ#[wC.=<`-;!lwC.= 9\z#xP`ȥ;!\z#.㠎`: qPG0\Aw8#.㠎`: qPG0\Apu#.2xaOП ACB,~`@ƒ*~`@o4t  !/ ] ` 2*8@ƒ*8@p@peT qP0\A/.T0\AMeT |w.C U =,Cޗ@zX yAaq` !/\ =,@.a !\ <a !\ qP0\ApeT <a 2* p 2* p0\Aƒ* p0\2,r0c0ƒ*\@@@xP7P.T07P.8,Ÿ@.a !m\ =,C^Ⱥ@xPƒ[.T0\>!C^l@. U }a+kC?0X <a+kC?0X }a 7,8@xPЛ|eT qPA.ߏw 0?fX b j~|^uzACX e 0Y<`/km`UkCkǁ5`۳xP5`kǁ5=kǁ5AeT qP 05peT qP j28.AeT|5лCB)k\ Ohym5л.zW`k\ a 05лeT9^?xm~&k'5X|Lſk <ߵypm>m>wm>m>x&X6?_$I,HWOu+a'p ;O'|gwV!T0*}pnE8YT0pJ![.>a|.>a' qP0\A}pa = -8Oz' >aq`0oOz' = A C!' =T028OxP}p e' p.>L =Ld01?r C'C;A NT0A!5 9N ǧ>a' y>a'<>a[/ CgOi>A CgOi>a[/ C?M' }>a' }>a2~` +OX<`?VCf2p >be0.+ Te0.+ >beCVCoY = VC+ǁwV2zX }ge082xPwVC+ǁ 82.`;+2 82.AUeT qPe0\A C0$`-&C/9$`C? L`H0 C >qaH0 7C \|߂e+p6=k 6?׮}m 6/ 6/k6/k 6/ 6/k6/k\l^Wg5kyM[G wm~_5y} 6_g+<[ԗ͇iMk#hCk#gh#ky5[kY5[擳kٵ3}+Hk5y}!׼Zk^.5yk\l^g+|f|kk\l^qͳg͐%PC(C)C~"qI0rI0%͐ pI8KT0 pI8KA5'p {7AR|/Aa8"A״|_qP0\A p qP0\A ƒj8A.a8߿6C|z =l[q`08Ao =l6ǁ grn6A` p5e <a2j[ǁ Yb/80z = J3wPp_4}ǁᾃ J3z = T/\ApBo qP/\Ap*.p_}2 T/86_'|8 -Bpz =l>`/سml{[ˎk#G6Iϧ?c~t6gJ]WgJ]y^Wy^gJ]Wy^5wgbڼ@?WOk޵6??w͏kѵ5o{k\L]gF|tm>f6Y ]0wm>a6W_0wm>]6.]OqVѻ6,}o]OYqVѻ6)]+A<+]Wy@׼ѻ6^y+g]gF<3zk\]gF|zk?Jy\hn\jn|rr#{!{!n}_] \yr}pW~MpB:_G~MpB +7E"|WH!zT/ )]pD8_!T/oT>".㠮_~=pue;_~2\q`/8sǁ]_8_υvB~|=wP]_q`/z y_q`~!O qP/\A]8.㠮_8_υ8z y_3GwB6C>~!O ۰ y_8Ϯ_3HwB<]z =\n\ g/\~5|7\"\ g|73ap/Ɵupy&,}:_VB?X }:AB|X =B|ǁuB|X;:_|!\ qXn\ }0:_r:ABD.\:upuw8,WqWe8eT qX\ qPCB|ǁuԬ\ M50gl>`c y6^7BwPn< tc/0Ѝgn< tc{!n70Ѝp5e;^{2j<wc/\Ap5e;^{2j8wPp5>dc/{Bl wP?!6B?0 y{A5B?0 /Lsc{? {!/ `c{2j8:Íj8:Íp5:Íp5e qPc/\AnTc/\A{n8!{!o =l6z =l셼rc/8$ߍA5B仱z =lTc/\Ap5B^lwPp5B^l.^{2j8wPp5e yA5e y^{ y^{ǁj^`l<wc/cc{! ;^^^ȫ"7._qc/U{!;|ƍp 5n8!{ǁ^țS7*bOH[OH腾bF/ݲ3zTF/ݲ3z腾bF/'8(wP'8(zػ e.){w?dzwЇֲ݅wwPЇֲ݅wٻ e.){w=ٻ =݅Bokٻ;]q`.ABokٻ =݅w.]zw݅8w.]zwջ qP.\Apwzػ d.ߓ.dr.سwc]g.a{wǞЇ ݅>e.سwwp&YYlzDlllz>Cgܴٴ"MMgrٴ6-ƟMkܴٴ6-ğMgoϦExm"Ϧ'ϦϦϦMOMk7~6=6l:ݱchl:!u0VdU]P8[]P8Nx8}tp zzx{8=i; oust5px8}g2s2s8=8=p=\a=\a=z{q@t}Czx뻇z{8]߅s=Nw{q@!{q@/{xy"ee^8.0.0Nwq{x==Izxy^CF{R=mC{ȣKNHw@!/{S{z+Cܫ[iz{q@/\nT{UUTUpP2sOP{Lr'C jpbBU=9v=٨=!v\{q@P{q@UTzP Cv==!v\{\{5T.שjk=\aS.vye;U"CU8,w=mvqX.T{8}2EvqvqXnT{\Cv==i3veP{oUp=v}CajwPUDa(= sQ{-& s}C@0& s}Co1Q C sDap.=zP eeCo1Q{\{\{\ eee\8.""DC{FD'#"._QD=d#"._QD=d#"pvk6?ekLkҹ6k¹6kLk6kF]za6?;͏k!ǵym~n~V]kеyk^ߪ3ޮG׸{L 6j\OA?Gqͳvm>и6^g|qm>˸6}^g|1rm>Ի5۵|qm>L_5۵,׼%rk^k15y}A׼zo5yޮqͳvm>55yޮv׸{/!WKq!ׯKq!WKq!.Ņ\.Ņ\.ŅR\ȵR\'KqrA1R\ERAB81߱XA5B>ݘ XL8Tc.c߱盙p pwcs|373!n̅7;՘ po81.\8 \Ap51.\s|zؘ =l̅^q`c.81xǁ\8_Tc.81zؘ j̅6B$1zؘ;\8X^s2jTc.\Ap5/\AY yR\ȣN>d).ށ.ŅWKqr* 竢{B|ߥ[iBz+R\q`).8wpkp-I}n2J޾3.#\;8zΤ[rۇL޾3zt }ŤA%B nOt =L&BnǁIJ&BnOt;[nOt qXt }Ť[r/A%B^S8.9IpIN8,tNk2]n2J8,vN8(!nǁI[]&.ZUնgƬ>`]m yfj[SWBvPնd<$s-䔹նgƮ<$sm!նdWնpUeTm;j[m2<3v-\AնpUeTm;j[m28vPնpU>d-䩺mB vPն?!VB?0Xm yjAUB?0Xm ńdrm? m!/\m `m28ն8նpUնpUeTm qP-\AնT-\Aնe8!m4Ym =VzXm =/s-8նjAUBֻzXm =T-\AնpUB]vPնpUB].j[m28vPնpUeTm yXjAUeTm yXj[mUm yXj[m2}j[j[oWB/0V6ն ն ն^`^` rWB6ն7_\m qXj[mzXm =um!/\m;j[Tm yj[Tm;j[-VB^jAUB^j[-VB?0Xm }j[mUm }j[m2hy'̫ 2vPy'̫ 2hyW d^-jO W q1y{A*zW d^-80vPy{ABjǁyʫ8(.㠼Z j2ʫ8(.㠼AeW qP^-\Ayʫ}ȼZ j*̫ބ3zW =̫ބ3vpΌj7̫l.ߙq^-&yГͼeԮϒkvjgvvmoxmOxm/x?Oήg(GgٵQvm~]ϱkc3}6 \NOk6\ qͳvvm/S\îqͳvvm_®`[׼U;6](yͫvvm>5N\WX;55yή͇^׸Y;5ٵޝB))xBx*zZG;ipVC8߅T'-/w[ |w!I o5.$BBZp':i|w!!7W|8.NZ8.NZ:iI qP'-\Ap~g?8zI 相NZq`'-oB;iǁp;NZq`'-87qI =줅<.t'-8vPpz相pueI;NZ:i2꤅M\:iԝ}NZ;i!OJI  vBw>7줅<3u'-M;i줅-p'->NZq`'rkfYel8,ngY. Y!el6B;YvPͲfYc'6B;Yz, =l6Yz, =l>vb젚e2j>Yb,\aXs,e2횛e, yfYrfY7e67B^;Y.ܶY/8,7nn8Y.\Y.fهl6BeZ% ҃,Y3)fB2Kz,:KYpgBI9KvPYgRΒd,!dB̒ 1KvPY?!fB?0% y,AeB?0% 篓q젲d? d!t% `젲d2ʒ8(Kd,80Kz% yXȻ+<.?B]<.?<屐.~&ޏ?,Y d% }R,Y dF%Y{AfB0KYdO % d젲dO % d,80KvPY{AfBdǁYʒ8(K.,Y d2ʒ8(K.,Aee% qP,\AYY ey,lB/cl.Bqy,>[(.g< d\D'b).vmumoumOum/umbLJHT\|&tm~\]Vյym~T]TAԵym>6?Ϩk\]q3.qm_?W q3.vm_q3.vm_擿k\][k\]泮kQg5yŮ\׼Mk^]OyK׼3Ůq3.vkqk5yŮq3.vm>5?9{KutB.o^'Sukuug\ ] |w]CB.wB>] w]bῪ|q`z,\ːJUX8s!/C2cͅpB8_eH_NTz,/C2$oJ#4*=w|8(=.X8.Xc qPz,\Ap~/?80=z Ccǁpm =LcǁX8߶Tz,80=0Az!oe qPzc2J8(=ηmǁug<%u]CBo,X yTX8BX.up `],䡩b! \ f],ub٬Bbc|ei.'.}.Ks9wCByY 4Y }AB/MBOjBzX:Wq`+8*}8 p˥9,}8,W].}T+ɿK_2˅K_!] qXn\ }WrW8.\.WrWJ_zX =,}T+'c^ }ØWCOƼBokCǼBz:r[WSǼ*1d+mc^ǁ1AżBc^ǁ1AyƼB;z:W#wǼBc^ǁ1y8(.㠘W(Ǽ*.㠘W(Ǽe qP+\A1pż*.㠘Wb^!:Wb^y).㠘AżB;.㠘Wb^ڽ6u^Cuq+b+bB!B^۸.kh^{]!m ym^{u8,p+\A =uBb:.O%$W('$A%B@1Ic$A%B:z }$Wg+;\2JrT+;\Lr$&B3zX:V#=VBoX }ևn޴ խGznX }VM[U }VM:VM[ǁխAUeT qPu+խpUeT qPu렪[2n8z vVlg\cvVd;+#vVd;+#vV=/Y|g3?k6?ϔ6Ԯ?k)eum~h] >S|bvm~x\kL_6?6[kqm~d\g|Fvk)&3뚹k>|u뚹kڼ6|]fڼL_ςq]3Wum޿^ρ@k^U]'@׼gU]OyMk^UsU׸晫5\յd<+9WY!8uP_ųBq<+8_#ABVsY+Fw|qPepA8 4N#7w|3Z|37U ep~I>\Aep%peT:VZ2*kKǁeV8$zX =,kZq`Y+87jU =,kFZǁepBA !oeT qPYZ2*k87jǁDzge҇gX0ppL `<+ Y|zcxVcZdzB^N8zxV dzB =gBo1٪ ׻U![UlU^ lUbUuP+'.lU>qa*􃑭'.lU>qaZUǁVUU.VUe+.BU.sUuPnU8,]nUa*I!sTQsT!O t*шsT y4UȣBsT!F yRUȣ*Gr y4UqprUȣe y4UrT2Q<)t*\A9pe:UrT2Q8(GuP9p>d*YsTBQ 1GuP9?!B?0 yAB?0 o'ZprT? sT! `rT2Q8(G@9Q8(G@9p*G@9pe qP*\A9QT*\A9+Q8(G!sT!W =Q*Gz =Q_q*80GT9AB;Gz =QT*\A9pB^29GuP9pB^29G.UrT2Q8(GuP9pe yAe yUrT yUrT2Q}/ܪ yNVUkB/0B^۸UzUzUup[U[U!mܪ |­6nUq5 eQU.VՇlUB[U rTWQ* sT!/d:U(B^:Gz:U YBO6sTo =Q>rg*\A9Q>r^S,NBdz8zVO2*Q[/FB1*zŨTFB1*2aT*R^JTT*R^J>`TꠢR^JFBR qPT*\AQ[/Fe qPT*\AQJ8(*.㠨ԇLC?LC\A4TAb*iЃ4TAb*iЃ4T-&P?7֟>S6t?kLk.?]/˵yO3kx6︯k3՟Okڼ׾6?Ok\?]Oqͳލ6*}7q]tmv^ϓq]tmr^w棤k\׬?]wek5k֟{k˵3Zk˵׼Lka5y~k^?cck*?tk*䑞[S!7mnMdk*85zؚ:TUԚ ZS!oܚ y!AB@5B֭ГAB^Ⱥ5zٚ }Tfk*;[S2jMTk*V4$[T2VBRzX }NU}CTO /c*1CTO q%0DԎ!2*DԎ!2BS0DuP!2BCTǁ! Q8(D.U}CT2 Q8(D.Ae =lMBo1ٚ =+lMBo1ٚ:|[LB [S5g26%k_7Lɨk)um>6?ρk3%GIgyʯk_G3%~mh^6y_6]g23~m>L/_f2ڼ{6 ]f2ڼs6o'B׸6>;k\LF]7vӓkgz׵Q惓k5yQ3k^z׼Qd5y&q3um>)5d5y&ͧ׸晌6e|Wtso^O ?ӻEOBt^B>f !%oXB>o q*GB>u ! q*80DWCT =U*]BO2DUGCT!;DKKp$9U8/!Η$|IWCT یp!:UQUBT e qPBT2 Q8(Do݇BCT}q`*80Do݇BCT|wP!Uq`**Dz p = QT*/,.\A!p*D.UBT5٭p{B>5lMd)֋i?>Bov:SNO"v }S NO"v }ABD0$i'L;T)\AiЧL;8,_N;>`)\arN!/v qXv y#SrSke0pFipek1evi@S.*zo@S.BofhCBu9r3@SȽMh w)ށځ{{Bu9ro@ABM!4.4;.\;tpv)\Ap*.@SM2 4T)\Apeh:@Sq`)yLl0!tP p!S8SgL~o7㔱 6.SL!K`:SL!K` qPL!K` qP)\A p5e` y{A5e` y{SL Sq`Lǁ S7BL!` =l0T)Lǁ A5e` qP)eL` qP)mL2j08.SL` qP)\A n0T)\A n08tP n08.Ӈl0B^8ŁЫ/4^} 4^} 4\$@S@Se-_p)M!/_h:|IpK48(!Mǁ@A寢@SUh y@SkU*ת4l*ZՁГ@S(BO6Mh qPm eMOh =e 4>fMЇ,>cf)֋',>cf),>cf)֋,>cf)֋',Tf)֋',f*.R2K^,8(.R2KY qPfCBC C C c{Hwt!^!?&F?????slwlvl:4w?gӖ\5ti~6ٴ4?\54?vg??fgӈlcl:qϦSMol:3.ϦWa6]_1RC>V))/AѥRCRV)!_*%%CV)!a*%=SLLRC>TJz*%]D)!g*%=8UJ RCRC.W)e2(%=SQp.%=N_x8V\a+S2s2˅zGq{G;zq@; C;z+zKAJܒw;t¹w;trC.jrCXw;t¹wC;zqP(\w;zzGqX.y; k^.0.0¹wpwpwpwν8̽8̽8̽p=8wv=!wνa=3ӋL@衟 =^Cl¹w UGZ"iSQ8'I2$IpN=!i= a>2@O&zU&z4z0Q8 a#!LGsH=D}0Q8.0.0z0e0e0e0Q8."/л+zi"/Ky]!/gY = y|5Fk< ALkNڼ6]g<Mg]̮'f_۵yޮ"Aa6w;k]uHеyށ6oq]3tm_\7=gz׵"Ay~k^=Hеy~k^<_W$ 5H5yF;k\]g$ڼ5?zm_B-y^>6߯ͯ]wO^>[̮q(cU\%:{JWB~ p(U1UMWB>6]% !UNWB~`pCVBDǁU\%:*Qq`(WBD, u蠒ER(ۈp8dQ8|m!?8YtPɢ7, ɢ9,:dQȏNdQE, ɢp%e, qP(\Aɢp~=80Yz, CEǁɢp~=80Yz, ǟŝ, =L\8_T(80Yη|9z,:dQ8_΅\E2JT(\Aɢ*QrW>d(UiD!]% {VBJUлeVBJz{*ч0z|A寢|P?A7lBo g>A73z|Al23.ܖ9p8,fEp>(\a(UA2 Az^r|PA2UA2}P# F*zP# FBg yP:tP{^GBy 4 u(#@!T(88rPqprP=#@2e#@XGe qP"@28(.AEe qP(\A}Oȣ.w~B= d:?ǞcO8z ={!O~9OOȓ_w~B=;? qP'\AgT'\Ag8sPg8.O:?2򄼋t砂PfV'YVeV':$TV':$ޡ3sPY[fuBЙ }`ŬAeuBЙ qPV'Ypeue =L܄^&L܄^&L܄X0qspy؉c4=ǵyqm^?b4Ƶyqm>ܹ6k¹64>S|sm>ֹ6?B͛kڼL1kX\?_晸6ĵy"1kF3^{1kk5c45kh?M5kh͟k3J{Z1kڼu5h?Sڼm5hy+Fg\g] pv&oB>]9K\ v&s] v&'o>d&8~zX A߄oB~Zq&8~sPY\ v7R&p>?M8g|Fg!?M~sPT\ QV\9M+߄M7U poeT qP&\Ap~;8~zX 痾C7ǁp~;8~zX 痾~zX =߄AoB7|iwhǁ߄Z8ߡ8~.AoB6!ݰ ٰ yM8OφMv7l)|6lBar {^6lB^0aC>35Ԅޒ0SsP;tfjBЙ Cg&2SzLM:35 Cg&;tfj*SzLM:3525357Ԅ8,wiT&Ղ352ˍ35! qX՜ yLMrLM8.ܯ9S.LMrLMq`&q K4U d&q K4?pD.фf2&0ɘJƄ&cB2|B >b%ᗃ >b%b_*zK 1/χ~9K 1/χ~ `%\ApO,\?^?^g*\?^?^gcg*\gy%6p6V6ܮk%]5yIV5]I͟{k\\?]?][׸vY'6L/R^w=׸vY'66x5]I͛k^׼ޝLuk>׼ޛ5:ɟNrkuk\\70׸Y'5:ɵyg,|w;kF,zoڼ6o>Ks-Եݙkܵ}kIgz[̵]krC?s$ u$su*\'9gIB~v$u:uOiIB>]' ujIB~BqCIB$ǁuT\'9:Iq`$IB$U' C$!ஓN):I8T$_xp>\'9:I1:I2T$gIB~q$\AuH:I$28N.:I8zX' =ǁu:I8zX' =U' =I$ǁu'ۮIN=|.:I$U'p  y H8_Hȳp7F*,Gu6FB1ȍ8l rc$GuBBoX 9RH2K!w,-zY [f)$n*-zRH2K!U [f)$npJ˥ЛSBe{-Bߥpv˥g.8,W\.r)$\ar)$U qX\ qP)$\ar)$8-EfDB0#rPEfDB0#13"HsgDBn QtF2"!7Έ(:#EgDB3"!7Έ\B 9Έ(:#.܈:#pZŨ3"2ݨ3"!M qPF$\ApeD*#.㠌H2"2ʈTFCfDB9#ɌH'3" =̈{fDEcόHgF$_y\AeD$fDB:#z9H2"2ʈ< tF2"2ʈ< tF$\Aʈ< tF$\ApeDe qPF$ٸ3" qPF$ٸ3"2ʈ|ȌHȳqgDB3"ǁʈfDB3"!Ɲ =̈dF$80#kwΈ^C̈^C̈\wF$bF$]3"ᲆƝ yHȻgD.;#pqgD>dF$80#rPpתƥ7.T)$ K!!o] =e,T)$䍢K!6,|Gh>B^TqPG}^}>ezW<{h}FloT{# ?6zA7Blf{#.Ї*loT{#.Ї*loFo'qP{ƵSߵCߵpmƵߵ޵y}m@_zqm<_gc޵S޵y66w۝kn<Ƶyskkn3xkkgkF.]ʵyrmޣ\e{3xmG\e{ڼ6o!>;׼v޸6oͻk^z37;k^z׼5y7qͳqmL\g{<}ğO]߁͎ڵy7tm ]A/]wBgzCڼ 6rm]N6vmWӮͯ\_L6vmٿ6'>B΄k!skujׁk!?Wkujk!/7k!?˻k2}|Gq`#8kU=}|Gq`j!?$!k!}G8T#Kp> ǵ~\ǵ\ȵ}GA}8qP\qP#\Ap>eTuCkǁp~_78zXuCkǁp~_jǁG8{PGȳt>BkU童pZ qP#\AY'r-Yr<&w#oBf#Y!od#-:Usey#Ε;W7BOw,o޹qP;W7B\Yseyw,o޹.\hzFrA7B.mp卐W\.o8,\.o[C2M2*o8,].o||Jey#KЛ67BaS*F3 7Bn\isy!7m.oܴ 卐67B!7m.o\.4]9.oܴ.k pr千2 !0\qPy#\Ap7.F2*o|ȸF5BUWa\'qГ͸F8z=ٌk!O'8F¸FI5BO6qP\#\AqGzkT\#\AqGzk8(qPqGzk8(.㠸F2k|qk8(qp5>d\# ǁqA5Bǁq'܎k5BE;z8Fȣh5Bǁqk8(.㠸F 5*.㠸F 5eqP\#\Aqp5*.㠸F!8F!qP\!qP\#\Aq=kp\#b\#u״yF5m5B^g8vqkڎks\#\ayMq=kT"#!8DF&2B^9zV8DFȫ?'2B WgLdQt"ό}DA%2Bx&2B2t#,C}E菰 ]1tqPac"Cc"C?2tɿsQQƵorPOP=yKzmkϔ6oGͻk$_?8]?7]w)WqmkǢkڼ6gU\7׸晫6L/]ey.sO׸v6L]?_eyq3wkWkGk'k^z3*͟yk^Us׸晫5\ŵ5y*q3WqmLgU\߁v׵ygrmޘ\Aov]w%Mgzڼ%6qmސ\_6um/}\6umz_o)n9/NY-E8NYi_NYpE_NYiOINYENYENY8(e!Sǁ)EȏMNYT"80eSǁ)JYEq`R@LYE8T"_ Cp> )))JYEHNYA,B~Pr"\A)p,eqP"ߎ =LY,Eq`"ߎ =LY,؃JY,BSLY,p< =LYT"/(gs"_6'Bs9V?9͉p ͉nNqs"^͉dV"ff%*+zɬDJE2+zdV".YJE2+zɬD]$dV".YpYЛ6f%e'g%*+0YpY'J8,QJrV"\arV"ߴ qXnqPV"\ar9"G,GT9"G,G#.G<2p9"刐{$#rrD=! \Gr9"8rrn刐rD 2! \qX]84.\6p#eTqP92*G8Xr\[uH=DcW H@J,rPrWNJhı;+迓*V"вgDeX}6вgDeX}6'8P_c%yRX@˞*V"AE䩝c%TD`b%yjX"JX@9V"AE,rPD`b%9X+XX@b;V"AdD +h90V"r`ā+h90V"_.MJZQc%-JX@E;V"r`DX+XX"JX+XX@t+XX"J9(V"A*V"AEQc%TD`b%yX"JX@E;V"AEx c%-Cz%0"M Cz%0"+gCz%0"M CPȦ!٣s8āe9"=:CKp@ˁ8!=:?@wI-?@h90Lq-SsÁJq-SnsM8@[8\ރ1!:sc.BK"Bg.EtI\@WE`\ 7}E=} 0l/Ѱ0ta{U4l/z/a6aJ7Kqo;axخ?tawg}s0͟ fM>lf}S;l۷ø|0lLsLyaӰ}i^ۛ E8Hu:k!{-g-RZ{Y q9k@9䬅@nԑc eZ[Y |O9k!UtX䠬2k!r`B@nԜp-f-r欅@ˁY *k![6g-ZLL |981@%&91!"'&rcĄ@n͑=rbJLɉ #91!$'&Ą@nXĄ"%&9(1!A gĄ@ˁ gĄ@ˁ gĄ@ˁ JLh901!е-&&gĄ@Fzh@q; p9zh@t; p9zh@v; =Gb^( TzA2z#@eL/V^躌˘^p ]1 u ]1@.czA29 ]@1 ȡt^p y"GrzA O՝^XPIN/d;E}$ȡt^X@޽1 ]3@EaAk{FatA k{GqA Gh@36d1h@6mH98 Ō ,M@GR6dmh" tA k{G9 ,-AG9 Y; AE6,rP6,rPA` -ݟ?S^O@+V6 @+V6 @9@~Q+L/䩝 l"9( sN/8PEJ/ ^p y0"9( AEJ/ TzA` yN"<lyٶmmq]"mmm2kX_ s=/۶ /㺆EeۋeFe>c>2"mm2}gnٶxnn? q"2yX_m^5˸aٶ9 mmoE|>mm2d˶}˶m߰},eۦe[e[eH˶mŰ}e[e4˶am /:dIdʕ}Bd}Bd13 }Bnd//3 g}Bd0/dE"EυE}| Z_h9>~!7q -/NN -w>~-x/;E V ;pU_lz V}U9B}?[/I!$s>^9:p7G_eBs7~/_8߼#ewdl{};oޑ=Loޑ};_?wyG7~پ/_߼#ewdF7~< ߼#ewdoޑ;;_QyG˸i4;_uM7~>;iwdZ,߼#e^c/ۇXyG˼eه;_5O7~پd/㚧wdl}owdl}oT7~پ/7v߼#e{wdl4 ;_OB|s߼#{ Tܽ:H:ӁԽӁ|:}%<[u:=[1J w{Ngt ~N9%A(EN?@ˁ@ˁ@nl>P@ˁ@mh>P@nmh~ Ё|} Ёt] wS6@r?et wT6@rOet wU6@r_e2@rket 7W6@r{e2@ret` ЁE2@9X t>h9h9Z4@Z4@#i Ё ЁHځ2@Z4@Z4@:r:pI G-ژ)9Gܶ1c6snۘ1@o icaḿM7t*(r:{S9 ʁ^t*S9 ʩbN@3t*S@9]ЩbN@3t*X9v*S9ȡtsTٶʁEcr T,r(};ٱS9ȡ4wTdN"S@9H3r<4#91ʌ f@4#ڌȂf@V6#@Y، f@6#R6#R6#r9r9ȁEf3r bJfKf"҃9ȁE2#9ȌX 32#9Ȍh9oߖ~Qߖ~@~QvƁ/Ɓ/Ɓ,88pw4~7Ҕ8Y9o|ƁE9oȣ.8A~@uoX ~"7,r8A~@5o|ƁE~"Hq O~777>P~@ˁ~@ˁ~@oh9oe-8ZZ(q`ƁEb9oa17,r8A~"8A~"yl9oe7>P~@(oX q`@k8_]Á>5诌@,_Ywk8-1k2jp [bv %fp v ?V@ j_V@j.>Y@Ym.ɧ\') uŸr\') uŸr\') uŸr\') uŸr\') uŸr\') uŸr\') uŸr\'9W/OjS?OjS?OjS?OjS?OjS?OjS?OjS?OjS?OjS?OjS?OjS?OjSN\C)5ğr \C)5ğr \C)5ğr \C)5ğr \C)5ğr \C)5ğr \C)ʗ-ڢȇ-ڢ-|Mڢ͡-"J[tCE7K[tԙE in _9"AgE>@ˁ@ˁ@m=P@ˁ@nm h=P@ m h} -|آ{,i٢Ƚ-m٢{,p٢--t٢{,w٢ȝ-{٢{,~٢X n`,E9ȢOqZZS\--W@n@n>u,--Hl hz IMGY48ڦ}#,^y&mi]=P@oi] @oi] &@oi] &e] &@oi] &e] &@oi] &Kg@oi] ,r([WyXlj`Cغc[WJ@Jl] ,r([Ww``CغX 4zAk4Ĥ@^5%&MSmz dik 6rOm2rOmk 6R6R6R6Ĵ5r5%MEek KL^J˦K˦"5%ME29X 29h9ߖߖ@tL/M/M)55p5|Ȧez Ҕ5pwZ4>ez ,r5A@uzLE2˦"^5p59X 29X k Ǐmz=P"^ykk`L5'6Z4Z4(k@k@k mz hz ph9zL<5r5r@^dz ,r5^5A@_X k`LE29zLE29ȣh^5A@EX 2(ڦ"^dz} MMhz WFeԦ@e4{`k`MlfMR^L54^ٔ5'S@uF}><[z-w^]ro[z-w^]ro[z-w^]ro[z-w^]>o#[-w;brGo#[-w;brGo#[-w;brGo#[-w;brGo#[-w;brGo#[-w;bV~fteݲnro[-[-weݲnro[-[-weݲnro[-[-weݲøv-@~3p?[\o!f~ȕƁjȇƁj|Ɓ|j}Ɓj|N8[z[@r_oq w99(A'EE:) ,rZZroZroq@roq@i5>Ve5VV@nm5Ve5.V@m5fVe5fV@h5Ve5NV"Yd5,r8AV}v.r8r8p h5h5taAq@q@ZZL8r@~yoo>-my@wmy7wmy7fwmy7fwy7fwmy7fwy7fwmy7fw,!wmy7ȡtl yͻEMdn m ,r("w-y7ȡ4l MEidn@n7Kn7.?@s?7uވҟ d]fn 7r#jr#jn 7RRR.?ҫ?7r?7uEegn 2sJK"Ҽ?7uE9ȟX 9ȟ@Zp т/$ nI nn:4dm >i dC@@@6dm >i Ҕ7pZe ,r7AdX n`,/:@/:}/?Y}/}сX䠃"9/Ah9h9JȾd9d_t@_E&/:r/:r/:p8>P@d`_t k/:e}с,d`_t}Eb/@/:e}сEE9X _t>h9h92@ˁtD_t@_t@_EZEZE %@Yc%8ЯHZ%8;FZc%8;FZ%8;FZc%8;FZ%8;FZc%8;FZ,#[c%8ȡtl Y-EdKp m ,r(}$[I%8ȡ4l }EdKp?u8л7{8л7]L|ā޽Lbf@h&687{@{8r8r8r8r8Ōāā,fl&,r(@YLXP6X69ā,fl&,r(}AwwV`LX 3i&/D3qn ,PLh}L|`Lh}LȞā'ā'āLh}L)3q >i&1LX 3q`ā],Af"wX`āE2v"d&>Pf"d&LX 3q Ƿm&,r4rf@ˁf@ˁfe&h&h&f@ˁf@ELh9L|ā<8r8r@d&,r8<6(3q`ā屙8Af"d&,r@d&,r8G6(3q`ā<8Afe&QāE29L@,368=Lf@l&>LȖādKf@l&dKf@? dk@ւ]SΧO@?oi i %M@'~.?Lc)a W}"eؾ}QY+27ۇQyke׼a^27󚗹y<øino.q0lya(ð}a>0lo =Lc ü5ބ ܛ0{O2kͶ sZ{O2 ζ9po=բ䲳-:po=@.=ۢ7adޓ [|~O-M&@m&e&@npmd9h:p .&@v6qrk:p dgw 6q95AE:p ,r4qZ4qZ4qⳉ@-Yh9}L܁lh>P>@ydcv O;}ځyF>dcv7iO@O;}ځEi9ȧX v7i?>@ˁ>@eiZi(v2--O;rO;rOb(+v}bEG+v_tbzG+v}bzG+bzG+v}bzG+bzG+v}bzG+e+v}b9^؁<;ȡ4l؁Ee+v ^b9֗؁k5ȡl~ ց~m=ցރm=ց.I>Pn@tIBu`t[AYm=ց܃m}ց܃mH9mH9mH9mH9mȒnKKn@n@$v[9Τց,I,r(I, JJn@,r(J*:An@ˁ@4T[kCu%HCu%HC[kCu%HCu 6TZ4TZ4T[kCu%HCufPh P{,Zd=Be.A"w\` ՁE2T"d>P"d$ PX Cu m,r4Tr܆@ˁ@ˁehh†@ˁ@(Ph9P} Ձ^ Y_ͼ<Bn܏/,r؏/^    v@x/rBp -x/du+<܁Bp_h9}wo{x/d&=xBop_/<܁Bpp_/<;pp_/<"}aᾰap_/<rBׂp_h9{/t-<Zp -x/`mzk~}ei_lzk۴/6 MBo`mzk}vilzkEk'aȦ}aCiɦ}!i_XPwi_l9&lnv J#O6S6h+ FBo`4/  ]/  ]}7h0x(Wd4/M}!7h2x_H9}! R2x_zEzBA Y}aça7x_zE JV҈"ҋBdȡ4de𾐽+/,r WM/ }% ށ}, }, Z0x_ Z0x_Y2x_XP2x 9 7҅"}anp]X/,r n vE;p7x_X/,r r^}an#2x_X}!erB} V" (Z -wtKi^l8/ x0 |q?w2m {7{&poer}nTg?poerVy:\6ncx {+3A"u ,m &e j?@nul dmcx ;6rocx Kiau w61<}au`1l[h2`Sn0)7 ?rO`Sn0)7 ?rO`Sn0)7 ?rO`Sn0)7 ?rO`Sn0)7 ?rO`Sn0)7 ?rO`Sn0)7 ?rO`Sn0)7 ?rO`Sn0)7 ?rO`Sn0)7 ?r{] 7e) .Tޅ ܛT{@(M@=pBM5Hwl b{ b{`f?P3 EQhdns| N6(s| Kv9>e]ldns29>e,"l?PE:d%as|`NtX s|`9>r9>r9>Ŵh9Ȋ@ˁedYms|@s|@s|49>/tY@oi?P@V6zKs| +@9>[Yd,r9>[Hs|@s|4Z4(s|=--9>r9D{7hD{HzF{7hzFzF{7hzFA=4A=ȡ"=J@@,r(?=ȡ=E/:9>B9>>9>>9>e}.h?hs| 9>r9@9>r9>r9>r9>r9>e^񁔃,l,r(-aYXP6X:69zXP:6es@@6?do@@@l{`C-~`-XPz"E9;,r@,r==A@Z,r`{@{@ZZc{@{ ?P@bh9h9~E&,r=A@X {`E9~E9Cl=A@bX "-y6nz [E6Ze4Udz [E6?i7~&Шgʨg~&Шgʨg~&Шgʨg//KEUmz`C1V?A??aa?aG>lć3lO{ag-3l ;}?s폜ag?omͰa3k6afy_}ؾS/0jaNf,Fm>lxϻ ]Q[Ө˰}eI;?lrka\Rq>kaHOч>l>}a?kað}a?k; aia>3l[9að}arw_# a^oxyk7&Tޓ (lܛP{*pRM5d)S dԈB Eb{ބ {EQ,r@(9hD!3'?ުU]@y`W} lvR@q>]塽/ɈX o|7?@ˁ@o~?P@ׂZZ(o|@omRN>R}-Хt)]JA׍~t)]JA{b+zo|@oՙ 7aBWV ު3_@+z|@oՙ:TBWV E|@oՙXPzW8P l9_!ȡtJ l<;_!ȡJ BptIOd2CYLf8P ]u0!U]u0!U]3@%3`2Cp&3`2t2C d@VNfpYu8!rp2C d@ Y;2d@ ,XP" Eed '39'3!d"2d@6dXBb _d+ -$/X&h!1|!1_h!1|!1_Y&XP&p`lpB`ClpB  pI/9(|@/t E _,rPB`I/"/"/< t8|!r`Bh90|!r`B h90|!'_py@ˁ TB`,/"/r6 E _,rPB`pX@p;|@/9(|!'_,rP _ h9<6dǹs-܁@ˁs-܁@ˁs-܁@ˁst܁"bw@`Cq;w  d ۿaa3orEۿaSm->L_;ڰ OqSY6l ۟dٰ96l1 bg0y `44k2l㳀 ya8˰i>' $˰}eOЇC,ü52k^ Ya^O5a0y' W|c|0y' g|°}xd<q3>a>70lOxa{7vތ& gi.a>0lJZ3 N}"aH ۻ&}a>0ly&a°} a׼a^@5?܅vl ܅vl %܅tvl u܅=p?5Hu9w!p?@*̹ EN,r49h:!A EN,rPBz CT(C 2 ġ :! eNС pBt(C 2Hġ *!CY;!ՐCT(C`BY9!A E e,rP(C`BP@ˁ Cy*Ph90!GepBy>P@ˁ CYe:![2t(C kM2zgPȂӡ 3!ECT(Cw edP@8!;c29(!;c2< C3f(CPB C-2P@@$:3!p t8@sG]3!:TCWIWst8}$+t](Ah90|"{0O$aD` .I>蒄.I>q']0|"% ']0|ҞwDKO$aD`C;|"% 'Jl\:|"ȡ>'Jl;|"ȡt>;J߲}zSIVelE7t]؊@[躌[躌>`lŁt]؊@S0"uc+c+)V.slE ؊[Ⱥ̱c+)VR[c VXC[H98",r(S"1c+ʨc+,",r("d?۱Eehıd2E NVLR )&SXf?Lh0"'KNVLh0"'KNVLYf?LXPf?Lq`p2E`Cp2E ZLq)LȃV'S9(@%SEJ8Pe*B"'8Pe*B"',VdD DeD-#$TD-#$n!#$gD ϩ!h90B@EH#$-FH"EH9|!X"EHrE,rPD`"$!q"$!X@S;B@EH9(B"Ԏ,rPā9#$!h90B"mGHZ!q"$-FHZ!q"$-FHZ!q"$-FHZdGH9GH9GH9(B"ȡDCK1lo?LC.a|0b#Ϸ t0yR ۇEy+b׼r)s"üK1k^øK1ka\̥5\a`c.Ű}aʥ[a{u>{05s)a}`?k  i`>3l[ia}a5wO9a^2x yk`׼cά̊o@*͙{+pox@͙{@-@jΙ  jn!sfE T3+4Xir49(@-9hn!A{̬8PYsgVr̊@8"DgV6rfŁʬVљ,YcgV̊@VIάـ3+Y)9@eV9("3+YX̊"eV9(̬Yh90"'ά8P3+yl̊@ˁ*"gάYh90"3+`fE PgVufEw̬8P,FY13+Y:@eVzg̊@VάY3+3ffE`2+3ffYh90";cfVZ̬8P.Yh90"r`fŁʬ N~2 .: bNƁ͜}/Ч͜}/Чs2TNFWI 0'#)s2X䠜@XM49&;epG2wz#@oD躌躌*#u;]1#u;, t]@e ,r(c t]"2@vvXP;T;9"2XPWOC'O`XH bbXH+@?0肘a!.肘a!.!@ ,rPXH bAR dA찐@a!*,$B)R H98,$C 98,$rpXH χXPFw!9X&xXPxAZ_@9H<@ˁy <2pH<@ˁy Z @-Z @-~yi9|r`qH`CqH OМr@e9(@@y *'#|,ߧr2}2'#'ɜ@8P9>s2 pNFo䱰s2 pNƁ䱰s2-dZ8P9E,rPNF לqr25d9('#A9E,rPNƁ,rPNF`r2SNƁ,rPNF X䠜cad9('OAk!l8D#ZaFh0D#ZaFh0D#ZaFh0D#ZaF ;8+8D#ȡ+8D#ȡD,8D#A!E%gh|aư} ۟Sư} } Wy8lۿapۿa+ac?apcL?Oa\L?Oih<6s043=a43ߟa,4 3ߟa0J G`a|f1c~_/bǰ=l5a^JOt W0y%z ۇ9y+c׼==q3c<=#ø1k?DaÔ1loGa0M ۇYk2` MY,rB`@^S#@a ёCD)uH "ȍCDY9D@rw@d ICDTH 38!","rBD" !"E  ,rPH`BD"@Z  "C @ˁ!"sr@h8a$; &*vH b'z̈́0 #p3a$FTH7L d섑n0 7FY+;a$n&< #FfH0 #F-&@J]LMrI]1$˄$˘MrILMZ$^&& g-}Ll@S0@e~1I]2$A$E& ЄIMrI99@ˁ%22^@0$Л_@ׂ . t-@ׂ .9P%\Z%\r`ppIkAdpI`CppIkA9 cpI`CppI . ,r(. KK@=-QBL5 tT@|L59P&.™jX>>`ɁJ5 ӄ&.™j"&.™j"&>bɁJ5 ,rPI`RM]3:$rpI pRN59P&,jH98$rpI T@&F2}e^/^ y3/z٧g^睼ly692>#w=lyߍ;y>0me^˼m/N^5睼lxw2=0䝼ky'/G˶᎗q#e\;y6qN^ N^uD_ lx6:2~; lx652˶6˶a˶m-ЗmЗm/f^M '^5?_ lx>$0lxw2=e[e\BP.ܚQʅOٳP.ܚn ԰P.,Å[B YY(n [6perVʅ[mBJZY(9 9 9|~E,ÅE{ʅ[BY(r, Y) Bc) BnXr!ze\MP.d,= B\r!5e\S eY(hSʅU0 ׄ0y t4\4k^Eü5X4k^A:AaP0y @4O4k^Dü5K4l=5Aa^ O W0y Ny+Hg׼ttq3Hg);ޜs:r7딝@[8e';Zs΁J );,朲# @VtN 9SvY9e@9(e'SvX䠔"9(eL h90e'N9P);Svyᔝ@ˁ);*e'N ʿɔ@V?N d@8e'5SvY?,_"?<'yn@oȳG<:G@W ,BRO`"x]u0'I<:@FZ t@ˁ<*'E8#x-FZ9P<e>OWyT>Ok^z1'5/yT>OWy@1'Ы<`p# 9('G ,rP>O`y9|"|@N8'"`xρe=>bxOXFi"=."=|0'E8{]32X@ t"2_@ ,r(C6 tA"2i@ ,r(6 {̍{t@῟R=Mz~e 2a&P=fz0(3}L@/fL@~0(,ec&Ё S@>`&P`2}L"e9(@e9((A@>`&t&P L@~8(rp&Ё a3)gR  H98(1:L9(rp&P  XP @g9:gX XPF /jPű?ϰ} ۗϰ} ϰQ:l_:/a?EfؾhkaZ2} zQ>ϰ9lv1 ۟9k<4Z4kϰ}c׼yy+1g1k<ø3lcOQ0l(Ϛ<4}`>K0̟& =a4F0l[a}`>O0 ;a0yO)|('pSH ;'pSH!;'po}p@ NR ;D %|" N9hp"A<jp"AE,r,?r@v !{uxO 5{Y9@r@Vz {TxO ==,s{Ⱥ=E ,rPxO`{@Z ȣ@ˁ=,F 4QC*j(AQC>`P`5Y%G 9(j@E r`QCE -SD) tuŐ$C}Ɛ@WW ):9(Ї ) aC}@60(Ї ) aC,E) aC]3(ȡ 9(E8CʤC]3(ȡ9(E8C̑C9u䐢"2x䐢=n#rxL ~e7:PF^`7 tx@0@dQcoxox@ן7:PF~2(17 ,rPQcoXx"x"9((177 oR7:PF1k^BüR4l5a^ )p WH0y1 gH0y 7q3h)0 fa{u>$0lO G2jՋl=lՋǁj`(q`(q`Gee0Ed)HA[.*ʀa-HA F0Ra: HA[7h #8(tQa`mF z0RH2 #>a`.*#)eF qR& d5){&`5){&`5)IAo?XM zjEU~Io?XM zjRVnjRCyjRYM q(/\MjRYM q(\M =I2IA>r5)Xơr5)Uy>}L*IJ*}T k-JAoTR_kT zäRЧIJ*aR)c&ZˤE%eT zäRJAoyT qPR)XAIJ*8(,㠤R[&~I I L*qR)sˤE%8eR)sˤlI-JAnyT 2eR)ȥJ2٠J2٠J2J*],T q(T rlI N*d(EG} > z> G nyEߺ}佃GA G> qP(XA s}tQ`p(XA`e> qPG2j8}j8}QG>:ȼQЇyy)QЇy5F7 P3of(CͼE卂>}7 P3otQy /%7 q(獂s(Xơ4h7 yCQF2%FŒq(Xơi7ehl=~5dퟳc:c5PcO~<ퟮciӘϒFc1o4ퟪc rQv(on]T(ȿ}@\~}tQ } }tQ`RQG2j8},A܋}tQǁ #n=l]T( GAG7jR٪?W-IA~t5)͇IA\M 򛠫IA?\MjR+W\a`?0R_F0RS0R_F zF0R_F )S)XLa1,S0R_F:0RS0R_F zF0R_F zF zF0R+;w-񘰷ǁ-`o)q`o){K[ z[ ^鱷8z8,Ee[ q]9-8,R[ qPozKAro)XA`.-Sc 'cLAcc 'cLAcc zSƘ^0tQ111a)u cL,ǘ^0ʀ1`F1W18Z12`)Xơr)cL2ɖcLA c q(c zv$K83NA[fΌEe>8%W]8]T)/8b)sˌEeI183NA g2N28b)XAwR88(,㠌Eeeq qP)3N?SS;)gg.*N     wR8],OAq rq rESCyS+ZgeʳPg.A.q:ȌS G8}nqS#g>8}nqXG:e)3NA[fg)q`)Xơ,5}nYj ܲtQ-KMA[>,5]T) eJIǥ /\j q(9sbI,RSCtu\j q(q~急JMc~5Ύ꘎~C5ĎGjl?Qc:4il?Lc>K*5?R:c<,5ퟨ@hgZyl0gigstg1y1yxYj۟?c1y=C1y=:W@c>z43@c>*5(Wc>z33?gg^~|gl;13?c>*5&|UjWil113RӘϼJM?RDo0 ̱}lvSil_̫4[Sig^51y՘s)1 ú䈺Wjp_ rN]c  ߯91˝~ܟ\s G5~ܟ918A8tQ e Xw=7qs`8}\Gܷ.)?_;ȰTawX*?!bX*ȿ;,,B KWwERA~kuX*T_] raT__TRA~uX*XAa`e qPX RARARAqTT.*,BaǁaOo(I_'姢$U߁:$U?&"$U'\9Ia'\9IuQI @NR rꢒTVI2IuQII2I1IuQI2I,S$ULTAse*XLI2IuII2I80IuQI2I80I80IuQIǁI9LRa*DŽI9LR=LR]T*ETATA/r$U$Up^IǁIh=XAI`%.*I,$UTA/ɘ$UTALs*XAIJR,]ݞ7h,]]T*,]b*CE^utgC}4 h2uQy&PAM桂>C]T*;硂f*.P2%zF3?81yc>C+5?3Wjg^ycjgy1ycCW?V6T|Nթ :uQթ :aW\:uQթ :թ :uQթ`UjTS2N8:,AVVV:uQթǁթ WDN=N]Tu*=SARTߜ ?xAgX*ȯK8rX*5RA~vX*R rT;$K>RAn z ;/RAOR ;/R2e K˔),w^e ;/R z ;/RAR ;/RARAR zo#&^1IuQI/0&^1I 1IuQIGLR=HLR}ld}ȺXºc#+ȅYAo z z q(oa q(oa qP#by FVCy FV[7.ndd*KWAKWU rUU.]=,]u* .t֥ 3\ zXU.]8tsW.t,U7.]8.]ycUCy`U]T*^U?o٫^UwUy^UwU],I!eJUȽ`r*XA`Rrb W,PCWup{ۃUmp}϶Avsv>Ngggۙl;Rm'jl?Pmm4}|`5>>W?>V?>U?>T?>S?>R77#Xvi_֌k>ߟ|3?g~V}=!l{A313%g>;϶wh>߃UmO>߃U϶|`5>G3y>|3`gqZ8}zq@_롿oK8ZB_롿;)z'CzZ;)z'CC_+XzZ8gk=}e[@z2AZ@_a*P}^pCyCzq8,]=evlCO]=ev=ٮ2dzlC ٮa +=dzʐ ٮ^!Þz]=Þz]8ٮ^!ÞzXavl2{a=+=d.*lCzqP+grlC]9v=8(lC. 7v=8(neq(OzXzm[o=B|!Yo=B|+ǷrC!ķ !,z[ŷrC.wzq@|eʃ`ŷq(zXao˃`ŷq(z[ X}ߪ'}zZ}z ģ+Z:C}zZyCx{_!3zXak=+Z{_a7k=,P쫯7k=,P쫯Z8k=,qZ8}AT|U^zcJCT+Y}LPzM*YJV?FnUzOMTrJVtTzXơTzXơԌTzXad=,PFdKHeJH8fV5pgkl?Zc~\jl?Ucg~#5c:Pc~41%U~sulX?Uձ3ulH=3cd7c>d̫5_Ώ̫53J֏53J<+Yc<+YcC]Td,ȭ#cA!FƂ>C], ɑ v===8Ԏ8Ԏ8(2vvd,Xơvd,%#c{j‚>L]T*,mSaA[‚|q*,sTX_‚|q*RaAn~ ũ-Sa ũ /_ ASa q(TXC‚eESa2 TXC‚e qP*,XA]T ,;> }XX'5Ok`A.I` ,;`d j`h#k`A]XzM8dk`2Xj`2%or ,Xơ\ s`YO~3tl?o|dfg^f̘ϼ^ˌ~g>32c>*~c>z#33~S>3טϼ_?<_c<,~c<,~̳5߳׬T/;ֱula'_t:߮헫cSZllSkle0߈bc_c]~3<3~vLů1y|U1W7-Is+_k~丹ܯ.:B9sz~o/:s+[ U`B8:XAUq-t^6/XA2zۼtܯ݂>$q좮݂?/t,_]T,u{`A.  2‚0O 8O 8z.*w‚fXX,.*{‚b,,ȯ]T,,XA ;,XXba28(vǁǁ W]]T,,q`,,}caAca rXXΌtf,X~ʌpf,X~ʌpf 3cA r?X[9gƂs:3™ wsΌ]Tf,3cA3cA3cdA) Y#Yk 6ȂA6.A `A5Ȃe zMYdA) ;Y#Yk 6Ȃ6.A ǁ ǁ j=ld, dAo zSY&dAo z YЛB6ȂA6Ȃ޸AvQ G 7l=l]T,XA `5Ȃ^uA,YdA/ YdA/ qPdA>Ut,XA `5.A,A6A> klO`by+Yf6Ȃ^Avt,dA zY6Ȃ^AWL6.n8Aƅ ` 7.l8nqa,Xơ]T,k˂q,ʮ],FzYC5 ˂eʯQ^e2(\/zYC5 ˂eT/ qP d  lddA< Y?O6.AdAAd  ? rORr,XơD q()7Ȃe q(1)7.d2(Yp3~4c93ퟣcۏ~jЌcO~`|^ ?;αsl?7czW33~7b̳A63a93<dc<dcz0sLe|Zfg^oe1y=Wll1y5[1y5| 16x gllglg sl<~5wםcm< ;αslۯjl?~?_]7Wcu~?_Sllۯp| /A63٘ϼd?YF7Ȃ߹ߏ9in~?ǂ77ȂE] g ~ uE]9yn;~]A,`]8AvQ2.qP,8A,Yp;Yp< ܲA < j$4Az ~yvQ %n$t,sKg Y?6ȂAW7ȂA7ȂRE5ȂY rE5ȂzYRl j8A|7Ȃe qP,XA `5A8A8A j=lr,q`dA. z r!YkM7Ȃ凭Yk 7Ȃ凭Y 7A ns,=dAn) r7E5Ȃ\ϹA8A8AvQ ǁw ]T,X)P瓁 `,XS`Oʂa0P,@Y; 2P|2Pǁ `,q`,q`eAzX/ ?eAjX/ zzY;F˂xnY/ zzYzE˂1^|^=cd,q`e28^`˂eT/ zuzE˂eT/ zuzYeU/ e28^vQ`˂2miOjiLc,5fAo6 zkǴY[;͂1mvQi9Lc,oL],26 qP,Ef2ufA/r6 q(O46 zôYCyYЋ͂ecM͂^0m,P^l:m^i`liJ8^?sjACl]Tk-ե[kA!ւHukby`,v1-ȫ".*Ŵ \L vQŴ \L Ŵ Sv҇i2>\L q(bZ73.8_p1i2>\L qP1 g= ݳ*Sa좺gAT= YT=YRyAYR]T, wςb,ȅgKݳ`r,Xơ= qP,Xơ=XV8g2~Eۏ~5vl?Yts5Cvl?Tcۏ1ul?OcqO~cml`?WձSulP?SEzl336fg^l|fl1yE 1yWml{Wmg^QcmgQ1yF1yFxEo.cWNc~i9YϓQc+ʱ~lhcc~4_wc1Eۥug^Qf阢hc>󊢍Ks-h/4GтEVp YAh"un3(Zpg^u&lpk-e[kAOZ rZ+?.ϭǁǁj=,]T1-XϪŴŴ*`1-XLŴ`2ӂ^p,SbZ Ŵ},]T1-iAiAiUL zXLHdk-*lak-%[kA.Z gk-%[kAO6[kZ zuZГZ[.d88vQ`ւeZ zZZk2jdkZk2jdk-XAjvҭ`ւeZZZk2L׈Ld- nAp zo„` LFdby6[kD&܂^#2 CLFd-/L],Hp qP-fyI CL8Nb-X6YnR'܂eR'܂eR'܂ep q(oLp qP-Xơ<4=Xơ<5=EAAz_yqz_ys^Ћk88wQǁǁ`eT z^ xU z^ x2*]T/XA`^`eT z^ xAY qP/XA*8,^k~~  .9.]T/5 xA xA xA xA xA]X~}Ut!ܠwQ`9C*As/>C,Pv/3E\( xA~w xA xA.] zX zX zX q()^CM eTX~S`.yA.䍅 xA0s/^|s xAn]  xU  /\ bx\ q(^C%r/Xơ xU q(^ xY^+.Sa/ xU ^?姢Eb/b xAޯ xA.]Xe.8J x2%T^ x2V+s/Xơ$\ qPر7tkl?[c'~`jl?Vc~35c:Qc~40?:c:Ӄ1y tg1yO1yxY?DxYgo,Ҙϼ.1Zכ1yXWol|-g^O˷cz43gJc>z4BҘϼ xcϼ'̫7=]Ҙϼ x?< xc<,7c<,+wc~9yLZil/&{1rwLc~C?_ЏIc]~;Ln~lGۯKձ^~l?~4ߧ̫r7Sng^1qqcot.XeU]p U*qs._V]mp r\ w 6^m]pZ wAN+w2 qm*wuw,㠻`UZ*w2d.+w,C][W.,]AWEUܗrЕ5+wzkE%1 8 8W~'.*~'^П`L]T//N ^𯏦x qP/-x2J8(,^x z z rE%&܆980wQ WbN]T*wrT r]U.rÕ`9 p +wAV\{rϕ Wp.ȕ+wU r]]EUV.r,gU z]SEU^pr,S]L*wA/8X )S.+wY zX z]>V.r􂃕ǁǁ=]T.%+wAWa.-+wA.Y zu]O<+wA.Y zY]ЫKVlVrwQ'ǁǁ8r,]KOVeT qP.'+wU qP.'+w2]T.8r,EUeT qPL ׈Ld/ xA 0wu/:xAXލ: ׈LFd/xAxA[fbyE^xA/ q(I zy^CyQ^#&eʳR'^1,Pޖ: ` wL8WN]T/Xơ4u/XA L=L=LKebyt^[&&&^z28080wQ ǁ ǁ `%e z^x z^x2J]T/XA `ғ `%e  `%`/XA `%.*,^xA0N9Ny^E%p/qp/qp/qp/qp/Źxor,D 7h]T.X*wAn\ ~ŕ +wA.] rwQ ʮy]],)]] eWVVVeoJr,P~S•`U.ߔp.Xơ+wAޯrw [uAݪV]DŽ Wny VE\U5[uA.U5[uA^ùU [u˯U,P~ǭ`8nyV]CU.U,P~ǭ3S8Gz743WChhg^O|`hlk1y1y=Wnl1y~91yx晣ox晣1±rtc~K8_wc3c.|l,o}Krtc=~3c~E~L9glWnl9ݘϼrtc&]T. =ȝAS^ǜX ӄ Gdz^?OyyAyAޯwQ?X ~?Xϻz^+z^ e.y2pz^C,z^C)w\ q(Q\ npym'}϶avxvt>Ngg۹l;6mfl?4mmG||^Kllllll,}3@gg'g<}3@gۇg<}3@gg<}3@g>3ug>ۤ|Icä|gI>ymwΟmWΟϑ>w_"}3Dg~v3=3g>{ z3g>{ y#<ymʟ#w ϶ ϶DZ=vsvvvymmmwmEmEmc{ m v>>.>ۮL?ym=g^ ۆ{]+=pVz]+=ܮrj{]+[և-Cx-5߲>p>{ݲ>c2-2{/߲>,߲>,7ng8 eܐ(p>{q@ !$ 볇?ɯ =PYp=D\*@^p=P?2y#C=!U گzC~W=?P yP=!W=,r z2{=a^{Xa zCy S=/zP{uy=zCT Di{Xɞ{5z1z=c? z}zH=1AZ!"J=*Pi\*Ыz i *〴Cz=zCC=!W=,zz2+{=aYy@=ozzC/Q ~zCyzCzǷzyC!#zCR{SCR{O{ZwH= zC=[QQ/Gr(C((_|==[QDz{q@a"=,P~]DQe(_|8_Q!t{6zyC{ȭzy^pO=V]iSZaai{ <OidRZ/X~7Hie)C IiS!WS!Wu_An"GYe‡\̩L_~wy|^#\W?A^}|FVCݟp-"C ȇB/!ԋ|F/7ȇS/2"ы|ȫ?"ы ȇS/7ȇP/2Cދ|Xơ4ԋ|XaE>,PE;)"q(ԋ|`rl?$cۏ9c:cOY9h'19TbgBO~Mȱqlx?DZqlh<'scz73gpcvag^o|Մ c>z63&ؾS˷1y5!M1y6!xلkc<>틖}rLDZrrl_[űv{.>Sql6Ʊ}8_I7c񘺏cql/#}8o8Cc>>ۺc>̫83cp_p r`} 72\95>A2+_r} +r} r} qE~u,㠋`un2>d1q`1-n7q`1UuI~5t1%袺A.E} rcEuǠd>Ǡd> ׻xQ ڻ{wcw1ȯ>% ,c_} qP1XA`ue}roG ro(d! rG(d!E!/=VG! =VG!2 b2ȍA  r(d{F!F!/* QȠǁQȠǁQȋB=B]Q Bˬ( jQ`E!eV z5(d̊AF<(dг(dЫF!2 yQQȠW#B=B=B^T2q`2q`2M!gbdk'#e12AoaY ϖ/ AoaY ȋzxȠ?jX zbE####/,bdAoaY qP12XAȠ,F^T12XAȠ,F8yQȠ,F8,bE#eT qP12qS^ډ!ʠW Qvb2-,C ze[X(^;1Dyu2K (2Dyu2-,CAoa z z z ze!`(ޠ1D,P:D!`!ʠ7h Q8QAc2Xơu2 C2CA/ q(/qeCyeB z z zE((^(3D80DyQ!ʠw Q= Q= Q^T2q`2q`2XA!`(3D,E(3D,eB qP2XA!ʠw Q^O(3D,dз$lMˉWk2XNZr՚ dз$lM[Ax&%qk2E&%qk2d'ޭ ɋ| e0(܍;D,L! Q^T2XBA. !ʠCA !ʋ Qw2ȫ"((/_q2q`2ݸCACACAC2W q(eB8D,P~!  e!Qf2ȭCA :DyQ! Qy/eeQy/eLQ^,eCe%(e/+9D? \p ˠ~˾e_ܷ A~s2{j-Ǿe>-Ǿe -Ǿeee𻼧>q]S8q]S8.o+=A& yР??efGs0Cql?t#0pl?L?plя??査Jcg8cwL$x/O1yfGx?xgvtlgvtgѱ1=S1y=3cz937c>z93wl_ϼ^>1y={ףDZ}33c>ʎ1y=vWvtl_ϼ?JSZtg^is>4079·`p 'E|hp /z~8W}%88,2ʇ^`=qP>4ăe z~>_gm Wngm n^u}W'm 7}n룋jp[4}ۢA ۢ z |h_R|h?C|hϝ|h_ѝ KAH1zQ ;A~[w>2ʇ`Ce qP>4XAЃ̇=̇=̇r>AA z|h+C/[+C/[!=w r|h!C/[C [ C2b>4ȭA r|h@CC/*RРǁРǁЋʇ=̇a>4ݡ2+ʇa>4XfE`C^0,|hЫC20jР7̇^T>4AA z z zS8-,jvb[4?xE^;-mѠ ~+zE˶h+ZE^;-mѠ? zĶEEEEE/-,㠶hڢAoa qP[4XAmѠl^T[4XAmѠl8-zQmѠl8-,㠶EEe qP[4q7AoX% z*iЛVIfJrU҃b4.AoX%XݺJ^JrUҠ==^T4q`4q`4MC\WIeT% z1*iCy*iЋ9VIeʣ\WI^̱J,P^JbU`z93ؾQc>z93sg^;|sl*eϼcBqg^:|U_wc>z93ꏱ:3პV\gܯVizQ/T?x.sa5_K]Ta5 A\X zVXXj2̯#c~5o)ί^T~5A~Uq~5/ί^T~5ί-ՠ?_j_՝_  ;zQ`W}4_ qP~5XA`We_=jjjk+W/*80ՠǁՋʯr~Tq~T҃ $ίR~T)q~5XΐT-q~ A!W\8 Wί,t~5q`~An _ z_ z_A#AQ jFT+,'^ՠˉWD5#Q 3ՠ^TD5#A#A#Q zQ zQ zȈjjTTX zj?V^;􊖅ՠ*,ea5- AXX zEjR,vba^C=,=,=,^Ta5XA`V²,j AoaYXj AoaYX qPa AoaYX qPa5XAՋ*8,j<=c6kЫ.Y^um֠W]le ۬Af z6kЫ.Y/'n}6kЛ_Y/ne5q`5q`5q`5q`55۬3]Yef zk6kCy6k[;Yeʓ]Yڱ,P֎m`xm֠vl8nf5Xơ̠tfS> " әAȘ .̋Jg#c:3ȻH3tE3t:3ȗ*Ng̋JgR㎒,9r[niX]$ 7;a_P|=Vifu&p%tfrtfrtfE:.í+ϯk3?kumQ]Rg9]??kF6`+揫kյYum6R}kԵcYbyV.gt<+̳rymhgk|Y6_H|k~uNZ.r|wu2rug>!Gp^˅Z.ur\ -u8oC2: qЙOu.3pU'yK. K:ԡǁaɐ%6ǁaɐk%*,$_ @sX2isPaɐ %C = KTX2\Aaَ Sa;2#CT؎ vd^#ȐnGb;ڑ!_ݎ !vdȗ`#CȐonG8.vdڑ2jG~vdq`;28rvA#Cۑ!7BnG#r-v/G\s8,yPǑ82apy<Ȑ %qdȥÒ2,cX2ap;ȐG%C = KTX2adq`X#CÒ!|KdÒ+,.O’ KdÒ2,gX2a>%*,zǰdq`X280,yPadq`X2a.;)V'GddNI:zEd|DhY eu2N+ZV'C:z'AV'CǁɃN8:.d-2N8:zEAU'eT eu2\AɃN^Ѳ:.dU qPu2\ArF ^c2^e{kU~Wz^em3{Wz^em3{kU\kݫ *ÿhWyp9u2^eq`28Wzث `rp^ezU8,wU^W.ߺWz^er^ee {2{U8,UލW.WyPp(׽p*?d28Wzث gzǁлq*C{ի g28Wzث<^eq`28W.^ezU8WyPЋs*CyIʐ.')C.띤 w2Iʐz')C;IyPIʐz')C;Ir$ee!W]NR:IyPIʐ.')CǁIʃ˭ǁIʐ.')CǁI$er$er$e[u')e[u#C r7vd!#Cj܎ w;2nȃjG܍r7vdեۑk!Kۑ!KۑՎ %Ȑ_ݎ %Ȑ_ݎ %Ȑ_ݎ\a9mp΃v~e33hg_:yP!!hgw^F;*vv^?gyF;M3y< g!g^~y]~k~u:xϼWڼ#6o5?:LZy ^PyE;+k~u#xϼ5?:g^Ο)3y<1ym]Wrܵe}hywm^]wqUܵywm^}hy+}m6勞+kFڼLk:ڼ6/ͻk3E;+r"\ %b8oCND %b8 =yxPWR|%r>\ %b8/y~PWR!Ep^"N=䤸.+p]I8yPWR2 qP3w2*z~GB!}=0rg8/C s!}T3I9rg8r!W}\9zVCT gf>*ٹϐo}br!_ !gg>*Cϐ}8(.gr2}~gq`380rA>Cs!wI}>*rA~ұk[u.O]CP O-Ѓ:v Gq 4\>ܥ![>@C.T 뾃:v t 4*-A@C =l@58ryh˥2U˥U. qX.]. qPCKC˥ǁЛ|K\zX. g48\zPЛ|KC˥ǁ҃*KC˥2*8\zriʥU. g48Ͽwp4!woΚ5 {s45 lp!ie!WΚ!woΚ\E;kzPYӐ7gMCǁYӃǁYӐ7gMCǁYiriri}gMe}'FC21rhŹ!N\ pb4܉m܉ѐƝ m܉ѐƝ m܉уJ6hoN6hoN6hoN\. m܉p҉уե2ե!nhȣ'FCǁhq`bǁh/L\. =L~db4\A_~ qPb4\AL8(1/ qPb4\Ap%Fsr,65?wg~2iqk~_3?7OmY_տg~>,5?sg_3MKchڴiwmnڴikӒצ߯M+_6|vM_N=~mZvڴikӦ.ikӚצ-MK_vv/~8N{ć~ȱPG|8N9>| pzQG|8Nkpz1QG|8rT/}| p2s4o.0B=\a>qIMri!ЃCnT6}8-zP6}5ʦ\6}g|R!l*>~?eӇ\8lk6MzP6 a.>8 ^_/ xC#^>䛯/}ȗ_KW҇B4xC!^s?/}ȗaK.0K.0K.0K.0K/"^xCR4=>RH҇K9^!K3ڇ\(^=}s!W(>2R҇7>"E҇7>2Eҋ>ӇxCnT/}8 Cn'/}]=s!>8 ^xi8цs[$H9A $H. }\9A $H/"A-@}Hs}H> ҇$H9ACC ҇}w~4xi8K./}mF"Bˆ>cx?a#^Џ xi8_[?Z҇DYC=>8 ^q.0KzxexexC/w/ xexC/w/}/ xC/w/}/}/ xexexſ p~5"ʦģlkDMzC/rP6}5"ʦr9CQ6}5"ʦģlkDMzCQ6 ;bMzP6}q@4\U6}&ʦ } ҇~n } r幝D.J> ҇s+A H$H.rC/ }rC/ }rC }r$i8'H. +Ap9Az ҇$Hz }}< }q@H>8 A ҇=H>8 A ҇$Hz }Ü }Ü }5?q }5CnД ?hN>M ҇?^(A e%HJ>M ҇2P!J> } =Hs!7hJ>8 Ai+ACnД }q@ ҇$H.㰜+Ap^ ҇8 p9WC~S!)v|}]>>.9v|}]>>.{bOˇާe)vA]>\aS2\|rCw(vyˇ.Cˇ.z|q@p]>8 vCB2\|q@:||Y]>\a]>\a]>[bqc5 p]>\a]>\a]>\a]s%̇~w@ "Jw0%̇~w@ Q|w0E%pR P|c0U%̇LCQpd>LCQ̇Q&33Fe2jḊ~F&3\|ȫe2׍dlQ)nT&22vB&2:wc&Z?6?kG׵GumI]PtmԺ6?kcd^?͟VkgյIۮ͟S揩k6!u<33ym|gk|ɼ64]3L5>d^By]3k~uNa5?:g^kڼg^q3]]3[k~u wm^y_33k~ɼ6o5?d^횟ye2k|ɼgcڼLIk2ڼ˻6͛k"ڼLIk|:pm^i_7ymy}me_WgJZ-w8/y![ce8/y!η5!gIp^mM8ք'-y! pޏmM1q29*NZ8&\A52JZmMnke xq`򠪕!] yYa2 <=+V|w򠪕O-ʐ.W+wIU e2гjqP280LÔm aʐST2Ô!_8 Q0A)Cu:LaʐoSS|t2\Aaʃ S8(L.0e”2Lz = S8LyPa0e5Ôǁaʃ SŸ=c{2Ƀlړ!wnO=.ZCn<ܞ LW!nO~dɐ'ZCܞ 3dq`{2dq`{Z*/.?ۣdȥh*/yPy%T^2y h2/zd {c^2yA%Cǁyл7%CwoK'=.ϐړwRlO߲=!ۓwRlO߲=b{2ß'ۓ!'C?ClO}s-ۓ?Rٞ b{CۓǁA789\A+Z'eԞ qP{2ɃjO8=zEdړ՞ e{2\Ap'ړ2jO:=+Vu#g;FV+?۟Zzje#W@V1Zzj凬V1Zzje#^W+Cǁʃˁ;v C/ف 'asu22pycWw C/ف 'axu22@`drdrdrd:2;2@8,@8!;ǁd8;Ձ =@ލz؁<d_z؁ =@T28z؁ qP2\Axv eԁ<:raACǐ +Cǃ ݩǐ[Cǐ +CǐoN=:wN=\X9zzӏӺ6]յ'umA}ӵSS6gk֏ym6Z]?͟U揪k'g:ƻ6N]?{k5>ll^3y66gct<̳ym^k~u{xϼ.g:;g^Gy56͋ky^3Stixϼ וyY~?3 k~u_xϼ Wcڼ"g^̫cll^35>ll[u޵ywm^]wyU޵yZ=޵ywm]wyZ}y}mf_?e8y_ p^Ѕ:ռ ]8/(3e8/(B>n^.tἯ>#ἠ }u8(\AGF2:2 qP򠎌etd.e8Cռ e8/C !_ݼ evYa2[ռ lo%n^?yο9eq7/Cnܼ =+l^T2\mYƁY|Ze;g-*k YːxZ4a򠲖!_ i¬eȗ=g-=g-C9kY˃Z8(k.㠬e2kz =Z\z8kyPYe͇ǁY˃Z\8kc2oBӜ % Ts2\0\N8k.NPC.(Y!ZAܲ9k7wǁYe%ǁYAT2\GSYː3g-T򠲖 Zˣe}2kd2>Y5f-*kzƬeǁY˃Zf-CkZf-Cט =Yռ L/l^߲y!wRl^߲yc2606/C?`l^Խq-?oټ bCǁA88\A+Z6/eto.e-ռ qP2p5/yzEe2j^g5/eԼo]GN2zCl~dg3䡏;7w6Cbǝv6Cǝ͝p͐͝:l8,woz `3sg_Y;yP-#!egȻG8*W֎pq3=#33G8C/ $crfgrfgrfgU#2:33ٯkGе/~mLOkuwm14;Okε3smL|ϛk͵yxmg1k|Ӽ6\3Ϙ5>i^?bgLy4͋t-yϼn%ץ5?LgHyH^3yUx/>g^-gg^w*ڼ.g^'+ymޔ_3kk~Ӽ6/ɯ!5?ic3y<×۵yvm^]n)|ym޺]n۵yvm^@_ϟ)|ym>_ϟ r8oy [p  X8!| A!RȇqpފV,uv2b$qRet.㠸A!8)\Aqp^/OzX e8C˗!ߖ] rAb2+˗U l._.οm9eȗg/C\ qPʗ2l,_c2R/C|yPːc._|!s2 ˗U VeO/C|yPП&,_|=s2 ˗2*_8|.,_/C˗!7._T28|rMeq`ʗ!w._~X //{ /Cn\ L!._ѐ /?d2e8ȅ\8hȕ˗|"zX =,_\q`28|rUA-*nze<}[ӧe})nz凌[~ O`2zq˃[^1nzƸec-*nz =[^1nz ^c280ny8|yPpyT e.˗|z'e.˗>/?y|c2A]^|Ø;)/88|zX =,_p.eT e2\A2*_^Ѳ|yPp/ChY qPʗW,_8|.A/eAɿuzffߒzff&G{M63C5 G{M63.ǵnfkzffd338z d3rjffq`3rffe SY@ d33\~*1lfb3󠚙lfb33P73CHzffr ff82˕ qX.E qP3C63Cǁлq63z g338yPп*b338z}x~!f3͒#>GC0X P.|T3Cg_ ȅσ*| !\ 2`rg Wz,|\\ q82Q 8>etw`sm6\q6Aqm ^?$͟?qm|6<\?Bpmd6͟ 揅g1kG5>q^3y8gڼJLgy`^3tyϼ/5?q^5?󺺼g^7y\^3skyZ^3yY^3yM~ϼ.,Wg1k|Lqkڼ6oKk3]7C?e8_yί k8_*dΛp P8_yΛ ˡC P8/Jr(Λp ,r(\AC2 qP.et9.㠐lU5U=ҭpޢ*CLU[гVe7J*U$aU pB8RZ!-ݪ Qq2\AʃjU8Uzت Ve/mnU|UqZ!Vܪ VeO*UŭП&lU|qqZ?Mت Ve*eԪ qP2\A٪ =lU*CUyPVeȷx*C[ժ *Vep! r2\0TL[2pytRraVlU~ت p! : 1r2oB[ǁp! =lU*Cn\ dށǃ <8rcZ*rc-!<;rcq`!<<C3mC!<<CǁpLہpLہp.g<~_AC;!?x r1nC~ȅӐ!,9 4dw!* iȐNC|wpҐ! 4dfҐ! =LC|wp2\act2Ӑ2Ӑ!_ qX<1: .1: i =LCL<^Wy]Z^3;k~ue,׉5?:g^kڼg^3U^3k~uSym^|_3k~x6O-5?J<^y%k|x1&Ks1g 緺p^r1@ %G8/9&g %AքmM8yKp^rnkyKp^r'&\A52 qPnke@ qPC&y!zV@ b8/C !_@ `Ya1@ l@@ J8y?~P Đ^N 88.A%e@ =L c1y'C~w!;?M@<b/N 4a1{'*ӄ Đ@ p%e@ qPC&Cǁ Đ/N T180- A%C:c1oBN@ L|rb<`: p L|!2c1oV*ot0F p =L &V*80z@ [J ~g1\gH ăJ ~g1\!%C3!!&C3zA%C@ c1G*@ =L &C@ =L ރ1z@<b=z>AU1K8#~>b#~nG I`1s>b}ă ܲc};)f38zG =#fn6eG e1\A72 e2#^Ѳ.>AChG qP1\A}ă#8!w5#^]zG< ЫKCOW#^]z>b'd1f}ЫKCoG}>bq`1}ă1<'A 5&as1z П L T1z П L L`rb&Cט@ qX.@ mf1\a9s22J ~bq`180zA%CL &*/L &C@ =L &e@ qP1+r!#NnyG t1}Đ[C.G<>b-!#|u!#r>bq`1}>ACnyG =#.7#CnyG =#C2 2 2#\nGn!]V =eŐ.C~qqY1kˊ!W].+b'_?˿w 6j`c!C8xPƐo:6|q1䛎l `c7C`0xPƐo:6`1\ApƐ/68,W6|w1\atru`cru`c!?d180D`cq`180zl<`cq`180ƃ 6Cu0zl<`cel qP1K82 6~`񠂍2 68(.`Ael qP1\Aƃ,/9 z+cCoXs `15[CBxp t15ǐP9^5cq`rcrcț%e@C,}q`1s˚c7e1!k!\sCL=~z 3xPxC?L=380zz =L=Ag:ez e1\A2: eR2J=2.ACoz qP1\AǃJ=u';<@^zzف[^ k_צǵkP5_צy>Mk04˿6Mk _S6MSkRצiͩ_צi| 5?aB|zm 5?EAzikϗg~6'6m|k~MK_3?~9kӾWg~N=C#kצ-ǯMKcJ=>%x8-!NKpN=>%ᴄ|8-!NKpz8]=%ipZr\aN=^D|YA!z|8-zVz|1N mLpN=>I~;UXy8-NpN=>{R=eC)p9〚C _Us|!˼ju^5LJ@1k^5LJ@!׫5LJ@!ܫ_Us|\s|\s|\sCj=9>j\s|q@!_rUs|q@1kC?`9>nruON!?Q;|8 5LJzPs ͇zPs|q@1o6zPs|q@5p||p+Z.0l>\a|-j\s|\s|-jqk\s|-jqkqk\s|q{>\}:_CЁ|u):=@CKс|Eץ@>ȇfRt Rt .㰜}yȇt zЁ|u)jhO5LJ޽ËCPs|uËCPs|E5p9S5LJ޽Ыh.0.\ppO5LJ85Nj9>8〚CQs CjF5p9>ofPs|q@5p9>8〚eeCr?CKՁ|ȇȇ:R!{d8w r?C@ȇ|!V2;u r?C:=@>~Hȇt ù!u zЁ|q@2\Ձ|q@!C@>8C:qXՁ|red\+oJ=>|raC~Q!yz|ȅR%_LJ|U1S%K=GA!N _'U|x$Jp.H>\C] oW*HsA!߮T|ȷ+$sp.H>ە q q qX@U|e qX@U|e qX@U Aɇ8,g*H>䵓 Q|q@A!T|q@Aɇ$zP C =(H> \|q@AWP|q@A2 eeC p +(HsA2sA2sA2sA2 q q q \|q@^ K>yɇyɇP#/%6Kyp9UT^woK>m{C^!o|q@^yp9UT^2˩yeSE%Iyɇ'F{W@hOs{j'AɇYR{2ۓyCoDў|Mړr§CoDў|ړr§e>'.㰜=Ћ9'.e S^g;^'76 kÔQ6OyЯs~mk0yƯ#~m5^<ߟ)Lymkp_gy)g<3Ly<ÔUgrg^7yݷ~k~uzϼN[WڼL6֯Q5?:iLy]^y)ͻk~uzϼ”55?:`g^acҵysm^}^7ɵyucV3Ϋp{󠢕: I8-3:9p>[ g8=yuΫp$RQp2W'!ʃ:[ qRΖe Vq2\0]+=J݊{pG pyt(|!0F+C.) 󬃺V `t2ϳBǁp> =Vv)C.ܥ ]^٥ V2K8R~ g2N]ʃRIKz'.e>v)Kzإ =RIKzإ b28KyP];)v)eԥ qPˊ]p{^;Kzإ vb2].eǁ].A{v)CNRԱgq`28Kzإ Acz2o, =LOޠ1=z> =LO\ =LOޠ1=zdғ2͞ӓ2JO8,{NO8(=!ӓǁd2ӓ =LO^(3=zw)CoD٥ cr.er.er.e? <?Kym[^xmk濟.cR^GR^Wy^3k~un_5?{g^]kڼwg^3^3ck~uzm޶_3;k~ե6/گ׉5?R^wyu)kڼ6>kzܞ g8/>}0# A'y=p[ g8/>y=rPwK|΋p^|z$#-cȷeCz zcȷe*ÿ'ف Sa2Tw C~Ku2OȐ/m@T2OȐ/m@|is:!_܁ d)v ȃ˩䳏.Or>b/#˓$,#bFzG =#>AC䳏zG<>b/#8.>bw^eG %}ă#8.>bG qP1\A}p.\E!C=O nxbw{C,9ݞ'8'~g<1͒>CO yxbU!O ='.'|'8,'|'r<1\a9s<1㉡ǁ5㉡*'T<1CxbC8xP;FCO x;FCoO<9.㰜9.㰜9zxbN~g{\6%k@q6Oyͳ|mk$@yc|mI_<ß)xmk_<3ѽgk|H6?S yݔ^3k~uOI15?:%g^k+k~uDzϼNH?5?g^ףu5?:g^ky]^3@y~ϼFW k3 ]D-ѵygzmޝ\W'?b8oLyc: i8oL*Ϋp^P8oLycΫ +p$4W': 9+yurV<+p] 8J(\AaŃ +8(zV 5c8C Ê!V 5cYaX1 Êf =+ +|sXŠO[Êἑ 5c8*}apeV L׊! +~ȰbaŐ<렮CV bq`X1ϳBÊuB.?2GbbGbb?dX1a/ +1xPa!CV ưACV = +1zV bX180xPa!eV qPX-RV.#VC/ZY]#VC/ZY] =H.hzX] =H.fAbu1 zyAlVCǁŃ: qfw.8d3\A'w.Tu1\AлTVeT]&#9 py0$ɇ~9 0p0/ 1"Бp =F+8c.?u 姢a7Wv SQ0+;*~se0"Ã^czÎa)v czÎaq`0"aE;ǁÃ^c.㠎a:1d/э~" ɾ_¾_HBݸwpAs/R}G7^R}_q`r_r_Ke4B^.ܠE}~ nϾ_{zž_U~W]\ί b/ބ}~!ϯ qXί?_wlɍZv*=8uOď/L/~ѻ;z"yg^$k>zyg^o#ͻk>zyg^}kϼD^Wڼ3xѣkڼb6/WӃk~ڼ^6o{kZ对Ϋp^=2*j UAykp~Ϋp^Zު[p2yr 9 nZ%4wPoUeV%\AoUe;_ǁp^A 2^_8 C !/wn 2A?_/^y[+p^AT/]pe;_wP.v/wP_.AB~}wP#Ȃ_OpB^<] =,|߃,B ~y4Y;_q`/UABG] }X 7V!.S/r.LOQI8 vY }X yw/XԓÐ>Uq`/8oN٪wPзHvVзHvBٲ_ ;| Ta/R?j;_ ;|ǁKvB;|*vz_:|2T/\Az zH_%#}2zH_%#}-#}SHA= =e2FBc;ǁH_q`H8d"}8dHN2wP$e d/\AN2.H߃C^zAuB/r d/wPAC5 =kz_;|\Pz_?8PzyĚ^C͚^kz>Ԭ^PPwpy-^C͚^]*kzw8.<sM/\A5pkz2,>Bkzǁ5лTzX BkzU Bkzǁ5Bkz28`r_;;|!\r_E;|!_;| {wB hu:|!W@8,o qX5._;|z r/8wP+ wB;|ǁfwB;|!W@vB;|ǁpm;|2fwdj/ ݩk'B^I vrj/7S{!oN\;9J^S{ y%qj/ A v/OvB^1;_bwpy]ȋw!/ޅ(:{] w5w5w+ޅ>T.}ٻЇٻhT.80{ٻAeB1{.]w+ޅ8({Aee qP.\Aٻޅ8({.]w qP.\ayE|c0/o }c0AB>q0/u=?2B_ {0Bo? B>)q0/80z;s0/\ay`^'%8, p=yot 慾3P3`^ȇ!T0/Š +B/;r0/Š . Np坔ywR8,{Lɽ6^FLyhx摽6Oy`zmk>(޵yVͣzm^E^<)wmk^g< 3(޵y<3wgQkŻ3|ϼ=>7|ϼ;^Wڼ6oޯ5ys|L5y=q_A=p|L+wm^_k>]|%˵y=umN]SczYrmL]SԵyrm^5yIKpGZ%ἤ=`^8UyΏJyIKp^ԣ3`^)p0/*!`^8UBNy$\AJe$\A 8(z d8 C y!| 20`^8 C y!oT0/v0/5ἂ AB qP0/\A 8( w! yt.䗵w! =HޅvwٻޅZ:{z yt.ٻ =ޅfB3;]q`.eٻAeB88B7 ӧ&^88 p/\N#p~.OC^{MЧM7{7AG s/`6Bx^˟x }Qd/\l }QdA6Bٲ&^ x 7a/ބM?j;&^ xǁM{6Bx&l6zo&^x2jT/\AMjzz&^$xz&^$xM-xMS&A =el⅞26Bo\;7ǁM&^q`@8 dxl8 d@^@wPo e d/\AMj^@z^1+ BoD;`^y7 慞l!h(y'QBoD b0/\ay[`^uu<ׅ>`ׅ0^zx]x]-uuuW]ׅ^3^zx]quׅ8,/;x]puǁx]'u =ׅuǁ񺃊ׅuǁxABuǁpBA.r\ .u.eRBK] q.e=p."e*ۅv!9.ۅ\lwPepv2*ۅ8,] qP."eY =,ۅ\lzX;]Evǁevǁe{MBvǁe]epvl]v!+.ۅlewB\ y_q.e*ۅl݃,ۅɲ]nBpl'v!.T.OB?];s.]}wABݧ }`.d.d.}G}ӅuOhO3}Ӆ\O3}>ABfO.>]toӅ8O>Aeԧ qP.\A}Ӆ8O.>]tէ qP.\ay|c.-uotׅ1^uotׅ1^2^Fx]ȗ*\n9^zx]ׅ^p0^yuǁxu2-B>p.\ayx]ׅ8,8lFDz]:vODzk^v #0 0 0X{m׆Q}mXE6 kÜfcaJ_aF_FaB_㙟v \{g~.۽X{g~|&5Ed6>|g~|5lڰ|mؼ3?>|g~|o __@3?|mx5lڰg~|X{m3?>y2%{mXۼ6lm^b ׆uk&_6^Va ׆ekîxkaWpX p,}8\N?v5Q>k](rPS"އîCxrPp|i@cq|e""އ8Ep}8.?Vz݇"z᰺0G>ѻ9z!/p}g+݇"au᰺WDD>\a}0>*JqͥRR܇>}s)C~!j(}7>u yaC).KqΆR܇!m(p,}qKqR܇ñ!op(}q3rhwRr܇3rC>F9f>|8p9yFF.|?laȇ;?Z wRz܇9#]͇XX7>j.Ņs)هi2>>a.}ȍJqz0>>a.}&s)c)CR܇ s)C\a.p,}}\p2c)c)e72Hcpz܇D1cpz܇x4܇9ip|Si}Acpz1~8)0>81=s q~Ћ92q~e܇^1qcp?cpz17¹9sw;>"g}{{ЋsC{ڇ~yjWۏ9s^Cxj?\!s^C/ڇ^yqj.㰼2B^eƼڇ8,OWp1y=s^CÜW91aΫ}W0~8>n|Ϋ}qjzǼڇ9aΫ}ØW?^:IDC?t{M$>^IAC5tk~$>:IIGCctp,n.㰼EeƤۇ8,htp1!1HsCÜt$>8IIACÜt0'~ECÜtI$>8I=sCÜtp-n.㰼}"i#!!WH}n򎄤ۇ\!!? t+$> IyGBC ttw$$>Ip}escCնl}k-m?m_\m yQD{C^^E>`n}6>kz0>k?kz0>=4~8>=4>8=4>\al}^׃2c{C_ڇ8qk.0~8>\al}^pñ2c{e7zQT>m}'%}K\m jۇ|Rjۇնoe}ߏK\m;!wsC>)AC>)A +T># T>8նHնqX^Xos +T>\ayajۇ8,/Pm0W>n}K\m'~}K\mϒPmXm;wRsCjgI}\m YmYm.0V>\a)2K ննzmko>Um6_c<扽6y^zmTm6yTͫk^1UۮSzmk^G]75y3i5y=3ggd|ϼm_ \sk>]'pg=M6ڮͫTt6tkڼe6/ٮۜ ڼa6/خk"ڼk.9|q =N8/y|PEp'ߢ<.ޢBNn 9.yr\t;(2z.㠢[nUt qPAyk[ȫn =,/k[KnUt l%?]t %O85yyPEGeTt qPnǁjFZki!/FnFZˑi!Gn=FZi!Hn%vP%7B^H yYr#i!Kn6BM6=Sȫir4H !nT#-p{p9znFڃ)`y_%i)8zH;'.''.o̠˟2ho̠= Z8 ЗpfBA } g-%?0A; ZK83h`0 Zq`-% AeB_™A qP-\Aʠ8^ ^;3hwfBoA =H̠x4A =H̠$f^Ab- 1 A= =̠fB3hH/\AeA b-\AeH/6z. Zm3h2ʠT{e-л7fBjA f2hxA f-2vP C.A =̠=X?>VBX X}cW]:zXc:vpyX/ck^:.XJձpUe28: cǁձXcU =6:zX;Xm3cǁձAUBcǁձpU1ywA'B.U$;$YȻd!wN1:IvPI;F'B$ q,I+w'BF$;$Yȕd2˻O'e$ qX:I.$YIp%BF${I$YՈdǁIJ\8Iz$ =L\:Iz$ )t,80Iz$ =L8,@$ =#:rXȏBW:rXȏBWB \ Xձw$WB \;X;c!W\@:I'd$ yu,O&By ׃|9 ܌|WAEBp3rMW/ F*rMW/ FB#_ W"_2|f+\A܌|T+\ApEe:W"_2|8(uPpEe|\^@>qvvXa!avv؃l|vXae#vvXea!azvXȗ n.ϒ vXȗ n8,ϒ }f;,pYa2˳$Ba^lJij*vX4a œvXa7Kl\zvXMa Yb;,&p%vXa2KyX?ȵyk?53(,6y|mk,_G1Ů̓|mkBlvPW;p^Z3ך yCt;,\Apz y q:^l yq+m y!q+w^Wț;^!&xT+WɎW8?Y ySv+\:^F#up9xp~.GS/B^z,>J 'K/p~z =Lug+\ T׃Lu? }e+bǩZB_k 3Ju2gS]LuB_k =LuT+pe:TWą_ ą-3z }[f+~ЃTWGЃTWAbą$BS]oLuԛTWq`+80uPoe&.\A#e&.\AoB/:7q2Ju^1.TAd+-S]1LuЫ.Bc eR]1Lu2zx:}2VX.cz2ABY b+upy2V]*XV,c^XzxY qXy.2Vepd+8zX bXǁe[BXU e+8zX:2Vq`+8.2_|*U~ԵyXͳzm^^'<'umk^<6OcI]<{Rxٓ6?5yL=k>zxg^oczxg^5yk5y=:|Lo5y763dž|Փ6?3w|Փ6"Ϳj6o˿k1kڼ6[k1=6kڼ6/y[S w8jyp~Tk*o yݚ 9nMK3T)pk*I:'uP9W39p27 yAtN*ך2I%:' M!?]o zS֛B~zS+MUo y p)=7כ2zS+M&MB(p97󓈐SכhO"PM= y9rc~!h>̅ z0rCzS8 =7T^we)\Q` }Ӄl0cГSL!n0SfL{l0l6B_` =l0z`:SL2j08tP p= YsзHfB 3Ko,^0zVY 1zVY =+,sгRYaf)-zzY =,f9X8(zR89X 3K,\AKfBcIo2zXAO d,){Mƒ*zXR&cIGɣy< qaɣZ&B:p蠒GL0yz<:R{H!5C:RW]!t)&=OC yr)&=!zrReG!/3N8yɣЗ&B< QȻG< QȻGA%B< H&BG< H&e< qP(#G2Jbd蠒G2J8(y.A%e< qP(\AɣJ8(y.Q/F&eGqz~?~nQw_]/z"+D| Eѓ̈́U VLX|U V2aVǁ Uq`*80aV7KNX&B>p*80aVǁ ͒V2˛%'B>p*\ayU7KNX8,ozV.\ }g*şЇ{0VBX c*U`T>ԬTރRzJU=+U2T\OT8,'WyWͣ{m[6^Fm5V3{mk^T]7\Kk>z'^#k>z"zm~wg^C̫Ruy6ϼ^>JյwG6zͫk>ڼ6ykT6͛kڼ6/x[>ڼݽ6/wͻkڼk[f77| O8CU o8B׭BV4& nr \ OAp*(nA= qP*\AupխnzX fCͺUKV }Y yt*5V![T*v*? AխB^,] V!o[=FU7B QߍnTQuPw7Bܨ ypU!nT?V-V+p9 XV*`\W[Bz{>ǐ A= q*{*`uPouB2`.( XeAB3 }c* X1`FǀUfVot XlB = X1`z:UV2 X8(`uPp }d*􂃍٨ }d*􂃍FUFUq`{6BUlT{FUq`*8QuPe+\AЫ6e+\ABFب:^2jT^Q߇>] V aꠞ^]2`z5€U%V a*٨ }se*ƅ{7B\٨ qaUolT^Qzب:<>p*Ћ6B/rب =elT8,ܨ qP*\Ap5Bo?ب =lT6B/rب:FUq`*^FA5B5٨ =lT6Qzب =lT4ŭV!eXT*2B):U{V!vX9`uP[;Bn p*pe:7B^ܨ pU!^nTDQr ^nT=FUˌU? ٨ }a*ލlTۻQuPlTۻQlTT*ލd*jT U2jT8Qp5BQuPp5eԨ qPU2jT8Q.FA5eԨ qP*G!U2˻=/dwB-ff4+-Y YfQhAEB? }d4+G*hVW#f8, jѬhVq`4+80z jѬ#*GBY!i8z HѬhVq`4ѬpY!Q9.zzg^O\ k>zzg^[̫um_tg^ZUczzg^OZA5yf3ֵ5y=d3_G{{ڼ6/͋kڼ6/SmڼC6kڼ6ck.ڼ6o͛k诹;pQM9G!Ok[U r8/B׶Bk[' r \ QApm+(΋AնeT qPm+\AmB[B! gsˠVȋZ }n ytP렂Z׵Z g8/<*^V8y.㠠AdP+wZ VȯB~;uPAWB^ y pP렂Z-A`B[qZZrn 8!j˹UP+_p9 j81?N }n!9j㴃zrwV8 =bd+\ ,փb;8,VX }b+ՋY,AeB_ =bz1z }b+80uPYW/fe qP렲X2zF,VMX2F,VMXǁYXǁY,A =bfB:]ǁY,Vq`^t8EWXb8EW^t^p0uP/e `*Rpd*;FB0X cdWw,_1| W,_JFX }d*j嫐\ }d*ƅS呀W7,_޸|zU)c*\ay/UW2*_8|zUq`*8|zABW,_|zUq`*8|uPUq`/>*)YYk gBn y/s+Yb9rk,VȭX s+Yk ge qP+\ay:,VX29ge qX9.,VYpeB)zY,V5XǁYb\S8z =b\9z s+80z =b=?Yb9r,VQg*򅕳X!?D p+2Y2gB):,V{X!b8(u?8 Y!Anfbd3+Vͬ[7B1r+fA5B1r+fVDuPͬ[7B'ͬ(6?lf8.fV/F6e fA5e qP3+\Aͬjf8.fVY qP3+\Aͬ(6eGqnf@=_G>S\? )LqWWߏ;/S\L)w+)w^BTqR\Lq|Wp)/U =LqBS\ǁ)/U:r+\ay׃Lq~0upy WS\Lq>Lq^2zW_fR\Lq>Lq^2zWe S\2Jq\VNq8, +e{6qm6C\G1umk<_׵yx3ug)k[5y375y=3|6;k>zh{g^l+k>zc{g^/l/ y5y͏ e5|6oͫڼ(6ɯKkڼ$6Sڼ_6kzڼ6kڼ6/Ư[kR9kp^-:w+w+oT y 9tԻ3LW)p+J!LW8/BN3] qP+\Ape*z }n yt+-3]!/t24e+Mәt_t( =LW;3]p@2]dO+{Z!? AB~ǻK=iTO+ÅúV;{Zp-=p9iKwOܪ/uZ[BEz>;ΐA qO+U=z:=pRO+G!{ZrAV;{ZHiTO+=w$B'{Z }GbO+|ĞVq`O+=ABߑ qPO+\A=i8YV{ZWi=ȞV{ZWi?<`iB{Z,+8z }bO렞eB{Zǁ=z.gY2iD.gY2zzA= qPOA&B/8 `2+ɬz'zdV5Y}LfT2+ɬ>&d2+-ɬ&.w2+-ɬ&*dV Yɬ/ d2+RɬK&BOY2 qP2+\Aɬp%B/U =Lf&B:dVq`2+ɬdA%B =Lf&*z 9b\6ܺr*VݛX!oWb\6uPU+WB\ {s렪X!wobܽr*VX2;5Wejb\6.㰼Ws렪X2˛5WeT qX.*VeXzX lp+8uPUWBXǁU5WBX!wobVBXǁ᫐; Bޮ p*W fП _\68|rUەW! _T*᫐e:Uq`AVBt\ *VȋX!/bbd+Ubbd+Ud렪X!/bO$X/PbT+"Y qP+\AU_b8Ub8.*VXU qP+\AUpU.*VX/Pb8,O\ }b+G>X?Y UbW*Vߏ;/XLU`w+Uw^VB7q렪XLb|o*VUM\ =bVBXǁUM\:r+\ay~*փb^upy*VXW#b>Ԭb^zS*VM!XU )d+f+UЛBVBo Y qPҒr+\aI.T<4!i6ClggӼC2gӴlMi6MϦA6'~6Ϧ)뀟M3iįɬMi6Ϧ4?fڜ4?G2g۟ϯn3?6?g>s۟ɬMcۯm3??6g> ۟M/ 3??g>c̏~66]6~6ѯ~6m6-6m~6-~6m6-ЯɬM˧MM؟MMM{kˣMK؟MkM[M+McJf8_W2E|%^W2qz_ɬ9Jf9Jf)P2i"Aɬ9 Jf8m^4(ɬqY/.0'^\aNfs2"X/h"b8-C_DE#UzqZ裉*֋LUl%?Uzq8-C_\zJU^~7qXQz%X/]U*֋NW+/uU^䷺X/k]UyGU=܋"/bkX/N^RUs;W^^륪X/.vb8yqzrn*֋nuo|w/Ρ*֋õp~"w,b۶W/A^W/Ջ |o:_sEtz7^"|}AE W/Ջ^M=_sE_zq9|2s*W/.0zѷ+^᫋_W/Ջ^Ջ^8 |/^8 |}B*_^q@EW/z W/.0zq9| _eW/zـU8zq9|u? =HfY/zd֋a Ώ^2ɬ@2E/ d֋a Hf]D2E_1zѫ$Jf+&Y/z5dV8'^ɬqA2EOY|%^ɬqA2Eo\zSd֋8,W2ed֋8ɬqY/zd֋$^8 Hfs2EY/zd֋$9Hfq@2EYzd'B{Y/rMd֋b*V HfVd֋#rdV8'^ɬ}!F2ETQ2+Y/.R2e[JfwRb˃&U^RUTAE[T^RUcDEQ *֋1-X/zLj*֋1wb#XᒋRebX/.0W.umk?xm'6_czK{g^/iCk>zF{g^h͏5yUGyxm[_7M czjtm޿_%ӵytm޽_Wum/]Key~m^_WKѵyzm^_ҵy\ y] [tȃ*Vȣ*V8ߢyr\:GG!gUS*V8/BX!GUp^*W.*VX2bTA*|U84C>B^ fUW l% P84yyP᫐7Gyp?BUeB~1PمBUfB~9PBU/T*х*TPB\ sBU8 ys*\έ U&˹U*䧿 UPܲP߅p~~vP1C;\ yu*\3SU/)FBު }`*łPX0BbU)c"T/P2FB_, =PX0Bz:U{6#T2P8(BuPztBZԃEBZTsZT'疵疵ztܲܲJZA=: c7t֢BkQ(\AeT }Cg-*\Ae( ztz A[Bo 5T%PW B^1uPA+B/| }ic)ͧo| }ic)ͧj>zSAbj7B| `)ͧЃS[k7e| qP)\Aͧ; 6BOǁͧ+=6z| c)8tPͧ+=6BOǁͧj>gB^u s)Y:܃9SȋNu yr)Y{0g*rS=N! :8,Ϭu Op)\aykS}3@ yt(3@ yt( fB`"?0 qq(o.EץH~Bޥ*|jOȏ_~B] Tq'ǯK?!.T'ǯK?!.8sPp, ЃJe(3@!/m9S6g*S6gB9?t2f2@ qP(\A}8(8(. P2@ qP(\Ape*. P2@/8, } b(8zFPA A!!FPcFP }[f#(1a#Aol-}A qP#(7e:n|FPq`#(8z =l|+F幎Aǁo =l|+FPq`#(8tpyFP\Ǎu qXA2s7B>q#ō6Bc }n a#(vI6z;FPsFP$AlNz;F%FPq#(\AFЃ86D6 \kƀ53 tmApm9pm1pm)BO\\1mk_G<Lk_g@<BLk>zyg^4czyg^4y5yͿ׸6:ϼf^׳*ϼd^׋k k>zyg^kk yt[(ᡐWC[ۡCr >!ˡVx(_p9 wxABjB~;<g=C6(\Jޙ ĶЃl 8-PB }w`[(݁mЃĶAB =Hl ;-z }w`[(8-tPmWie qP[BX)m }%a[(mmOB-B-BX)e[(e[(mz8Bol R+8-ζP+8R:BX)8-&Bl -Won ԻK2B  $c[BWl ^-^ƶPeB_\-^ƶPeB }/c[(mЃĶn ޽-zMP5Bmpn 8-.㠶PBl BBvl T[(8-zkǶPq`[Bvl BB|4 yw[(m(-rP B!/n T[(m{0B:P=B!`n '-.㰼r[(>mp9k=osP6zB k=G3yd'5qzs'5qr8! 8qqBn 8Nˌ8 8Nˌ82T'\Aqr΃,焼?t,c9'c9'圃*c9'a99!$.c9'w9U rN92*c9'\A`,T9'\ApseT9rN92*8sPpseT } b9'\ayrN+ 9| v7wB06wB͝l6wB06wB͝͝en>`lTs'͝w^6wB͝j8n8,X {{7wB;ǁ͝Nq`s'9},հzWrN%9U d9'c9'ЫKsB.Y d9Rq9'\a Ǹ.r΃,8c66\p6?kƪ5sm!qmqmqm :_l\8\6<ε'õõµµ5yVu?̳sgUkOTչ3|>ϼ^'>lj|4ϼ&^WU;kI5y=H|L55yE63|Uչ6?3\czErmޞ^*m^l]ȵ_"\wZֵ7_ <ϵyumf]w\upmd=ϵy{m5yx̻ԻΗ5G5p+w'sPϘB΀k&lꄼHsPMWMp^63W ytS'7߼߄u7!?| M 7 n!/}߄9~sP}p~Bܷ ySs&ᄼo.V}p~ܪoc}ٷ }nٷ EM8?K;W!7#M'lBa0a 6!oN؄f&76 ̈́Mn&lB 6 ̈́MYa&76? ̈́Mq`6L؄8(a.㠄A= }dAVjBR.JMJMY }4Y }4Y9=&+5&+5'j?Y }e&8RsP{e'\Aw^Vje'\A{ByY9=K{F?˄M!&lB_™ } g&﹜9M' N؄3az'ńA%lB_ b&튕[VjB߮X w+`*5oWԄ^6RzVX9VؕK2VjB/X l`&R.ؕpUjeT qP&JMq`&ԄVjBX =T&JMqpkZ3!on̈́\X5AfB.ܚ y rk& ݭj̈́5raLȅ[3՚ rk&ʭ[fewBn̈́ 5zy_G_?ȏ ?\G?.#9TN&-s2!$Ʉ s2 krdB~f9'Ls2!? ypNr2!? =Ʉ8('sP9pde,fdk&­ck&w[3!/n̈́[3!/nTk&w[3!/n̈́ckZ3!/n̈́ck&w[3՚ LZ32j̈́ck&\A}lTk&\ApfeԚ9LZ32j̈́85sPpfeԚ } bk&\ayLk[3ǁj̈́g55׍nTk&ck&ݚ }ؚ _6[3!LAfBݭk'fBݭj̈́85n̈́8,Qܚ wfB[3ǁLq`k&/ݚ9zNxg^k㩿\׫yzm?&yzmޫ^׵y:40\pmw]].\:4U׵yum^pmµy:4|ա35ywhBzwhBCCpo<ЄwhCrܡ 9Є~+ Crܡ Vip:42Є8C.AuhB#5# } yt&w#5!oT&"w&WX# AEjBE i8AFjB~,;RsّԄbv&7#5!oT&Ʌ-:RHAEjO8g ys&ϑp~HM[EjO8g sHM/mGjd&ew\uCM/ vhd&mЄf:4?١ Mq`:4?١ =Єf&7;4?١ =T&7;42Є8uOS325S=fB>f?LLA } } =L̈́aL+S3ǁz.=2J̈́25.=2zAyCL̈́bj&UWifB˩J̈́|L;S37KLTj&ЛqԃЄ@CM ;4/ЄCM};4oݡ c&>vhB;42Є8CsPpuheԡ O`&8Czء ^c:4ǁ5vhB;4ա ^c&8&sP5ldB\ yqMj2!NɄ&AdBt\ vrM&5Ʉ\;&rLȋk22kdB^]y_˯k2U LdB~ǻ&sP5ɄЄd:49١ M,whC7Єr&wwhB~fCz١9M,whB=;42T&\ApuhBÿ_q` 6!$.؄+ 65Y y%q&\Y y%q 65Y y%q&& 6U y%q&& 6ǁ*؄d&\AplBk`.㠂MoMl`.㠂M 62*T&\ApleT9M 62*؄z`.l߄7վ }4پ OoBMo?<&l߄7&7վ K2oB>p72j߄|M:훐ܾ =l߄oB7ǁ훐}w:M7ǁ[;oB7ǁ훃oe1n߄|M:훐Qܾ qX^Ǹ}17``zÂM- 6<,T&ғDŽKOlB/=Y d&ғKKOlB/=Y d&8lq{>yL]\?kG_c'zm?Om?\?[S˔kkOǔ6L6H6D6@6<3ϼ͵5ym3osm96|敷3Nj|t1\3w|jϼ6_\wxg^czxg^Oa5yR3͵M_cڼ{6^͛ku1=y6][k|6ﵮǔ6ۇkJڼѺ6wͿyxLyk6ڼ̺6vͿt6ڼzLyk>\Wϼ6!O6|gy y ;{8BmB6 9ۄmyr 9 ۄ +48osPypme qP6ǁ_ӎT&O#5!T&v&T eAEjB( eL̈́95թ_N̈́~uj&ͩJ̈́{77fBݜ9`L+1!/MƄ{&c)eX>cB~: 1O1!?` cĄ ?l<&L+0Rل &A5aB +M'Mj„Je&g&W*0Rل &Lq`栚0Rل qЫpz }GbA6aB &L&L>ل }؄ }؄9W/O0O0%ʀM?ل W/ǁMpz qЫp5aB_ل qЫpz }QdAf_Bd%\o*rK)/!Ae_BٗWif_Bj}9K4/w5̾kk۟MM;۟M+۟M۟Mkk󫟟MڟMڟMڟMMMksgӯ-~6g*g&g^go,囟M[MKM+MM ksg>sg>sg<*߼S͋]E|o^Wz_9*߄sE΀7/r TyqZ}APE7/N9 *߄se͋8q7\y͋ W&7/NK}PyOo¹|7/N%47/&͋"7/͋UE~|ы|"{*߼qo^OySE^ԯyqz-"oM׼_Z؋5/ T׼3~͋ UX8?kyqz ETh^uIň ͋bD&+4/ň ͋>ЄsE1B-*4/ň ͋bDE*4\y_м,ߟ3̋DKE:Вyg- Z2/ %Z2ῖoMd^BK/NO^%-#-'/ꅖ̋8O^\a~uYrHEΈ & _1WF=A:L~Co<Z2qO.<Z2lC "4A{hywAfF`<䁼B3Co 4졙~sEh桷FDhWyw^f}~EhyyȣNfS$fˁB3yC#4Я< !4pyC{h2=4pywfzyq@haUȇfzy,fz Zvxr1|>l%jrH/#)QJ<S!߳ym'%jIvR&5DCn;)QNJk*J<\aDCn(Qp床55DC;(QJ<仃5ȣDC;(Q쉚|Q!yw{%jyW|>W铨 囇|囇|?2o)U`/<\~d|O*|П`({!RUyC/<\ơo|p^yC/<\ơoq9yΫfC(.hX0Echqͳ3?ƸYgmgSmgk^1y]FUc:5{c^:5X?J뇙c^:5ۧttk^WOǼut_׭1yvڹ|L?lO~V3֯qy=s* qΓy=Ϲsc 3vr3y]\?ktm?֙fX?ρu3tL3ME:4X뛣c`Sio}ѱ~J0֏ƼUg::Ә׼Lc^3qͻw)ط\3]g ` rtvCיי}74Ap)Qp)wCי.\Au23qPLAKX ߱\JRRg|r))4}:/.KIC %1 _IC 1 _C e1:R/ !JQt սL; ]ҽL; νL; νu{FA0i;FA?f1icF4 1I4 zi2itŤQNŤE%~b(,&&.*icFetU'ejQOo4jQ˄բY- zZ2a袮LX- zZWu~9e(OMVN/qU2\Aբ_X- .㠫:etU' VͤQ/mL_ˤE%~ic(/=LyQ4 I O4QLMAV~9erVjQorjEU?\CyhjQгjբ ]- %բ_Y- zVX- %բ2]T(EeT- lբǁբǁբ Ox]-jQj_t(ȗSw7q/u˩;@A  j  jPoWP{5]\.D\aP/qX.Ftrr.P.E|TwA(|TwA BA>~ 䣺 BAg t \] qi=lf{(6CA>k=tQV{(gM[? Y?PϚnqP{(C .Pp8=tQ2ujQjQЏYjQ/jQ/]uZ[EAZ[EA^^p(VV.Z[EeT- .jQoqP(pV.Z\Aբ2qP袪EeT- .jQpU.Z\Aբ2jQpU8Z\a?jNEAxV1y]<NǼutk\j繶֎w` ix~mvsX>X?ܲ418,!-~3֏?8灚k{Y_{c\2%ıA>2+>U kY zX񹨊O_ˬ\픠WC=~C=Az.*-C=Az^ \픠WC=A02A0#C=~;%z8vJpN .PON)et;%nPAz~`'ZzB=AX0wi^zD8s;A>p;s ? z ??8ϚL z02a'ePOϚ\T',z|t',zd'gMzdB=Az9\A2 \T'B=e .PEz8(\6qw9u'Y|t'B~Gb'O~(d'O.;?Av'w$.~Gb'q`?A#\A2Op2`?e .Op.\A2qP?e .O[qP'?eG=p9(5.AA AAtQ堠g O] zVX xAA AU .rPprE8e8,w\ 2AAAAAAAAAAotqrPrPл,=,rPrPrnAeY.rPpnAA6\A2*qP9(q`?AoX? ϓe,\푠"X e`?.e?X #A?S\AG8Hp}E .#et{$I}111?AU .eO>Ey|w'= yprl>Op} y . }d>OO>Ey|2v'/ty|2vs} _ z }ǁ=A!{.*b'5xO\T'!{|(t'!{R'w${He=A#9\A2\T'=e .xE{8(\A2q@OZe(26|t(Q6~/c([6~/c(n]T(26S&Pel]T(n^&P&E5~/c(@e M2jM&E58 \AM2j]T(@e .&E58 \AM) .&Pp58,7 zo`P=H y`P} =H y`E$c^*y1]p1y]nWgC1y]kץc:5c^:֯y2sk}ro1XρEu?Ɵ@c94wX?g@c}@c0w=,~ruLE1~45"И׼@T;c^*yX?k, m~4Ƶ˒1];cik%xLa!.KBc}'pc* q$4ַ.KBTc^* yͫ$47$45И׼JBc\. ] = KBA.}}/ط\. 9. ]TI(pI()pI(ط} ;䓱?A>;䓱?A~g;sQɟ ;ɟ ONdOONd'q`'q`'GX'q8u{n z23ef2CA[f^ Ee˺Uf(gMf˺Uf([e|rf(qf.*3䫗3Ce .Ee8(3\A2 ]Tf(2Ce .P;qEAT( _( ?( @Q yՁ @QW\( yہ?( z(@Q qP(EA2P\Ac(@Qp8(P\A qP(Ee(@Qp8(Pv Ee( .@QpBEAEA^Ap(}M$.*P&EAEA^ApEAEA^Ap(Ab(@QW( z(@Qp8(P.*P\A o8P\a@Q7r( z( z( z( z( zw5ެg(q`(MO..( .\Ps(z8,( z2 qP(Eeo6.A.DAfA.DDAoA.DAoA.DAoA.DDAo#QsXjKd@ 8Kc!11-*EcEcq:DZ8?Ǹ(럋c\ q3P4?)P45@ј׼Ec^r{L7nǼuvk^mǼ(G)c|kk^lǼu.َy׼.؎c^];5dc2z4ַAX?"ϖѣ1l=c2z4Ɵ-Gc$`S1~1z4)z4yј׵G=Whk^ѣ1yE)z4wǼ=Wh \祆XbcX'bc1]NacX߿`R<04ַ!c )]1yƼRRWHik^!1y\)d ! CJ7[A..* 88[ACJACJU48tQ!2 )qPH)BJR .㠐A搂!O[EA[[EA[tQݢ _- r3ݢ C-:nQЋݢnQПnEug(xv|q袺EAƳ[EAƳ[g:08008.Ϳq8===]T(q`(q`(GXw.[\Aݢa- .nQpu|s(E- ݢ2qP袺Ee- .nQpu.[\Aݢ2qP( w82,d(b|Cw(7~!f('GAxoܰr(bQ/,]T(ȋ.BQE~!f(GeT< ţ2*?E8x\Aţ2*]T(GeT< .E8x\AţX< .Qp8,W\< zX< „GAox xtQţ7ZY< zX< „GU< zX< „GAG/00QЃE8x\Aţ xtQţ2*yţ2*d(C.=,=,=,=,]:GAGAx8xv)GAGAGt.qXӹxGet.}zQp8x\Aţ2*d袊GAo x>GAxtqٸx0GAo x0GAo x0GAo xtqI۸x6"G~\<:E2ֿ+d/>_ch/6_c}auqLb~t65X;.\a9r\(rzP~8->S<CCA>D<3CA>S<3CA~-?k 㳶.ڡg\h}֎zYt}8~>?kqͣYY\k͢ϸ,}$fg^Y|o}5/yyyfgv||<֯5/5߯n55>k[3=g,v zFqgY;E=ϸ^3 |֎>k˟3=X}Ε?=ϼ{h>=ϼ{33=ag^g|}֎ͮgkq:뭣g^u|o}z3:k>W!WZG6C.|rum>l:zP([G9j=u< uvjӠQ.[Gq譣8Q.[Gh{!!:zȷrD`o=䫁ZGBCnu4C!yBHE?R: Cuh=䣅ZG:zTBݡEw(ػCz[tzV K yC C)C?~;гvi?zYAwaЏ=\ơ_ZK q./- 3 Ew(WAw!&zUzw'ݡS;=3CͫPwZݡUw! C~;=\ơwzq@wݡ|Uwݡtz CC==8;ݡtzzQu;pz2u.лCqݡ|/Sw2;ݡ|/Sw2;p ezw2;p ezw2;pzm u.лCqݡAtz/z")NwBz5!Ar{M!~ BwBzuC WWB C WBz],!=8 л!=\ơzW!8ezwBzC!=&BHBzC!=\ơ.CHBzC!=\ơ.CHBzC!=\ơz !8ez2=)z"~:7'{B!(H(=" ާFBG oR'zPz&J(=" 'J=H({B2=pPzWJ({B2=ׯPzCO( "ׯPzq@B $zPz e$mD%zPz$zPz e$zPzq@B)FTB2mD%| 8,Pz$.Jq 8ezB)JqoėFD|w_zmDėzUG|iUG|w_z]uėKeGޝd|OcZ$c}P+d/>W_c|i/6c}auqLb~85X?g9Xx럎cq61yƗc^<5kc׼a8NA>p;NA>p;ײNt A t A :S:qP NANANA>:808080tQAǁAǁAǁA := := :E8(KNet .㠠S/m:qP颂NA9\AA2 :]T)Net .㠠E8(\AA2 :Sp8(tAǁAǁA 0z r)W;QՉ .NT܉ .NAvNTNTNTNTNTD]\G8 ;QA;QAS88uqNTpz;QA۰\aNT6DqP'*:Qeԉ .NEu880$SPA3q` * Hޏg SPA3~~8[#Z 2hqPD #ZA#ZA#ZA>;808080uQǁǁǁh=h=hyEE8(˩#Ze .㠈V/hqPD"ZA:\A2h]TD+"Ze .㠈EE8(\A2hWVpE8(uǁǁ jѺ8{ypW[@w=e wpW;wpW[@ w=e wpWWpW[@ wyᮠpWpW[@ w= w]T+- 8(\Aᮠ .pW; w]T+]e .pE8(\A2 w]T+]e zG2 wqP+rᮠǁᮋ w ypW' w= w]T+ OypEOypWpW L o9\Aᮋ wqP+]A0uQ2 w}pWp2E<gr/ᮠǁᮠ z zoᮠǁᮠǁᮋ˽T8,R 2˽T>*b+]e .pWp.*\A2-W;z}@W;z#ʪWzd+QV> `+V..&W޺dUcZ$c}P+d/>W_ck/6c}auqLb~:5X?2:X??_cq61yV<^c\zczyͫ55ט׼^t}k^Ǽu}k^U~|3HǼu}k^wُ*׼.yX?1yU_8g loNS l\qͳ6)1ƸYfWqͳ65X?g lk5~*3(ǼU'2c^󪁍yͫ6cƼU;ؘ׼j`c^󪁍yͫvL5~35ؘ׼j`c,65X?ǐc\'cOӯ_<Oc~3OX8,c}~:ƙf)loU~yL14KacfٌyͫvL~\35Rؘ׼JacJac^*yͫ65RXޥ`ߔrw),7&RXRE‚‚‚&Ap),Qp),h.\A2*qP)JaeT .eA;#3bA;#vQ _b  7;ȌAfĂ|pF,ƌXП̈]TF,[fĂeF,gvQ*,RXo,]ԍ?X zGA۳c),؏g),>A۳=Kau##nd=y\EFb ,3CrIГX咠'1 O 1 OS 1Xp8(vQ12qP ,48^Ɓ1D ESw ,S{..'yX뎁^XcXe''c܆v ,ȝܖs ,&܉p , i|Tw b`A(:䣺c`A(:vq9q ,E8,2]\e .㰜8䛫c`eS⋢KaA>N:#䋢3b q _E _qׯ?N:]Ce. lNPtYHNPtYЋ .tYpYJtYHN]T,ȇ%˂8(]\A鲃L=L=LtYtYtYtE˂˂˂.*]80]80]䙎e. .tYNqP,eAJ;]\A鲋J*tYp˂8(]vQ2JqP,e. .tYp˂8(]Βee. .tA˂˂˂YrrtY;KEEςlYpR,w{F2zίgA3zVgA'gAou1zjG.*zVgmYYY[]=]T,.Fς8(z\Aѳ= .Y;]T,ge= .EEς8(z\Aѳ2]T,ge= zѳ2qP,rcѳǁѳ Eςg,FςOF.*z gA'gA3zvQѳѳ /Q9z|2zS,KTqP좢ge= .YGE]T,gA1z\Aѳyѳǁѳǁѳǁѳǁѳw=\u,q`,C&FςFςUg,q`,q`rѳ2[Gς>db,rѳ= .YpEς8(z\AѳqP,gA犯&siA2siA2.,siA1vwaK P%KtK zӓsiǴH%2WX_ c}}k̥1X_c{f/.iYU1nǸ&KTǔKgce8Ʊ85\X\g.mkxL1yƼKW.ric^?5[c^ʥ~.;5c^LǼuk^i׼ny+6=ǸYgcʬ1 3kc}'| SfmXGBctgi 26Yaf2kchy+6πƼYWfmy+vL1yeƼYWf2kcgk^1ye_cfmk~2>ǸYO3濟~g?JϘ~ʬC~;ƙffmo143kc}s|cʬqYDchk^cʬ1yeƼYgCǔYWfmk^1ygւ\ά6x ߙ 3k BA.gւg.* 88BA3kA3k~*48vQ2ʬqPf-2kY .Zpe2vQ|wf-gւ|kwf2kA8䫻3kAnt9v K z0sciAj2&ciF߼LE.A9d-@.W{hA N{Ђ<t-=y<Zy\Z]T-')Ђ8\A=qP-zhA?I\A= C =Zp{.Z>=y= < rwɳJ+Y[dNyYN9YN]T,='ς|Tw,='..g#NYpɳgeS'ς|su,rTr c|t-ȷHЂ|t-ȷHЂ|t-ȷHЂ|tzhAE[-䃨lA>$D` A ` A  `[p$5؂|@s-27.le` .A6؂6؂6؂|_q-q`-q`-q`lAlAlAl` z` z` jqP-w7؂8\A _` .E5؂|}w-le`[p5؂8\A jqP-le` r7 2jqP lAlAlAfvq[лYl=.NmG#vQݷ o\} zݷy'ݷ78} zݷ?} zݷ xvQݷ} 򞷻oAvQݷǁݷ} z}[}qP-oA\Aݷ}[pu߂8\AݷqP-oe}[pu߂87oe} .[pu8vQݷ[g}<[Eu߂>b-d-3v.|M2w߂Ov߂?^`yݷ2]T-oe} HݷqP-#1v߂8vݷ} z} z} z} z} z'ݷ=cw߂v߂>b-q`-|v߂v߂v..} .3v--v߂8,} `ݷ2qP-oe}[pu߂8N>oo{3f1.#3f1.=cg1 qA qAwqIv)qqǴHp%2WX_ c}}k,ƍ1X_cj/.iYU1֏Ǹ&XjT'cki8Ʊ85bX\g1nkŸxLŸ1yƼUW1qc^͈1y^Ę׼qch"y7"Ƽc^e1y*X?6Ayͫ7OLǼ/k_8 +tcoa*tcv XߗTTB7ַX?B7cnMqXB7OƼUWnk^1yUaԘ׼*tTWnk^1yU莩B7OƼUWn5VƸY.m\FPm ܶ ]mjǶ]p8mwmǁmǁm _ܶ zض zض zض]]]E<*rveԶ rWm2jqP.]8mwQm wܶ .㠶]p.m\Am2jqPveԶ .㠶]p$s.veԶ;ȶ]]]7&vny_mǁmǁ/ET]pR.2|U qa. H`. HUEހd.ȫEހd.q`wAo@2|\A2 ]pޏgwe .]p.*|\A2 qPwe .] qP.we;]EwA-0|ywA'w ỠỠ]]G =  ޛwe]p8(|Iw .]'m qP wAz1|80|80|80|80|6?w[= }]] = = ]\nE;|\a]Ч^ qXnE;|we .]p8(|wQ2 qP.m~.*|\6m~&ފf2/&ފf2/h&1wɼ Gd^[L]\hNdT&iKd@טc4wX_c}]Ӳb^qM07)7Xp럍cqkɼ85dE\\Eݧہ`?@ہX 䠿X ہO\A)Vxce.ȃ7W.2+sA2JL`e.#8WRIП y\Ϯy.\q]Te.o{V82\AqPe.*sA۳2\A \ z ɐ\;dH.=Hǻ2wQ 8] 9ޕ wy\l2.+se~W..{qX] rǕ2^+sA>2˩+sA>2˩+sY Aԕ _N] ԕ^\/ /$_Nw/&^pR/ wQM/&^n&E5|Tw/ 6|uxCM a Mj&^p58wMǁMǁM z z zĻ&^&^&^&E5664dj/dj/.*|2 S{A'S{/0^p.*\A2J}E8(q#S{e;^ЇiL=L=L=L=L}zjS{AS{A180S{AS{AS{NqX;aS{eN}^p8(\A2J]Tj/R{e 􀩽JqPj/ӃsK27H_GE7H_{f #}A3黸 zBo/gm|Vgm|g"}Y[Y-ں⳶*>kqM Y;<둾IgYl}4~5Hgs3yD>G8#}yHg^=|Hg^=| ϼ{vLY;-kng^7c/|5-3K1ϼo|5#}Sϼ{3^߿g3 ])X}>kGqg,@3~ Y;}5yg^=Y;k>ag^=7yg^=|g3k>kgk9g3yo~{w?߽/~֎>kqe_yGvY;qgw>kyϼ{o>kGyyg^=Y;ϼ{3k^ῇ\ =l{ȥv`=W!@`= (Sv~CCN{C=\ơ.{C=\ơr Dῇ|Q!_ C( =*C =+>lg~gUTzVP Ձ*v_>WYCu _Տ8=H>#y d`'< T2aH>䁠嘇@2!O |ȳp%D<T20ؓdez22=8dez2 |CO>i=H>~B2H>\ydC=y$d`p9 U2!_ |ȍ!%=/J>ydC+J>C |Q%!_R20 (CJ>\a9P20(p@R22) dC*J>䛫H>䃨dC*|/S2!\ |Q%lo z'67Dp%‡|PK^"|CD%‡|P?P"|'cDp%‡|2V2D%‡|!V2Dp^"D%‡EQ%‡zP"|q@0K=(>8DazP"|q@!ϑT" C/>\ơrD%`/>\ơrD%‡8arD%‡8ez0Kq%‡8ez0Kq%‡8ez!U"|C/>\ơQ"|q@%‡FT02PQ"|q@%‡eLCUS`>Xe .?)|.d )| 2ՅLC"SR>/2y^`>/2=>/2)|q@w~)|q@06=S;>\ơg =S;>\ơg  `>\ơg .3q`>\ơg .3q`>\ơg .3}Lez2=Sp)D%‡f/> J}C J^"|HzQ"|cD#C۠D#/^"| *>\ơDp^"|C/>)&J^"|C/>)&Jq%A0 %‡zP"|q@%‡>=@0\GW%‡>LC%‡>=@%‡.i(>\aCDp:J}ez2Dp^"|C/{2Dp^"|Dp^"|.g?8aлh>!C画a A4 zW Ç^&h>:%VC߲a_7 iD  d)0c4X_c}]ӲbďqMa8֏a8Xp럍cqk ñ85φ<c#ؿ˚Wpk^ ñi8515 Ǽ0ׯyͫa8ϰ9׼~Ygk^sL35os1yΘ׼cwk^ 1y5 ?95LA{~'O6KAxQ "] % ] l/?|=~J穨dAJ穨dpy**\~J^TT2 G%xcT2G%/**\> Q2J^TT2,;r$ٶSyC"#w@-<*BaGx*`d`%JHd@d@d -[*8P*8P*8P*yJ*8P*8P*}nq d eJ8H*X d , ,qTBI%yxdd`%J^(d`%J8H*y%J8H*X d R- ,qT2AR) t( t( YJГR@ǁR@ǁR@ަ8q#=*yڮ˖B*eeUX~rU@ ]>ᦫ2>*}GWe W(vU^(We@We@WeO骼P@ǁ@pU8Uy儛*K 7]%rUzCWrU8UX We`\UX We`\%rU^(We`\%rUzCWe`\%rU8Uy ]]S@/ Ε@G Ε@G@/誼P@G@/ t 27= ,qB*K ,q2;W*/2A@\ ,q@*2q2q2q2q2K */,U8U]]^JU8U8Uyay]UXP2;:*K ,q2A*/2A*B*K R*K+K!h-ƭ  @Ĩ  @ Qy P3;*0/,C+0},Lgaaj>" Ȱ}>F}81lasi>8Lc1la#ig a0)07Oaql?q?qƙsHLό0~cm5?37;?3i<K0>'/sؾomwsxyeg j;>/3߷ )<3ߺQ5} u}'q3q3qB] t +5@g~_! _jy@ "֎ G:>K ,qB9>K ,q3 ,q]@f ,q(p j<?@O²i3Ck<ylӁyfg O迲l:'jɣbKGyd`h ,[:3KGyhl腒@Nd腒r- w$KGyfeqbh O, ,q(KG/,KGK @Y:XP(믿@H6^(#i l$ |Iy`#4CP'Gh>AiϽa ۧg><?~Ͱ}lOa g0̰}ba:d{_R?9assg?3qƙCv{@w=Ct@/% #@ҽ8н{ܻݻݻ^Jн{ܻݻ^Jн8н{ܻ^JнX n`ܻ^JнX nwtt^(n`ܻ%r8Ƚ{ܻ%r8ȽX r8ȽX nwtt8ȽX n`ܻҽ{QM8NJM8NQ{NQQ&n8H{%8HJX n{8H{ Q8P8P8P8PuM t vz@ǁz@3 t t׽hbn`uݣ^7ġhbn{8HX n`%^(n`%zA8Hu%8H%8#b@o,( Ƃb Wb@o,( b@o,( Ƃb@O<żXP{aZfy7P;l4@'}|3la ?ba f>6}h3sFf>1G0 żua> ۟93O1a۟8S;l\&10ϼļ<K{ļ<K;3/1}7l03Wy"azqg^bayyayyy%3(10ϼļI;lCK;3/1wgbayyayyq)oچq)ƙwؾdq)ƙ0yq)ƙwؾh>Lbay=lߪ w@ǁ@ǁ@I"8"8"8"|,---"8"8"m-"X p 8"X p hm,qEBYyYKdPYKd,qEBYKd,qE8A@"X p`,"8"8"c~[/,wyl9---cpb8 Á>b8{$+ˀI1|ÁP1XL@ES1| Á0*}Mp73T yKptP@ǁ@ǁ@ofP@ǁ@oftP@of,qb8A@of,qb8Ћ*/b8A)KP)K,qbB)K,qb8ЋJ*K,qb8APV@u*2*/b8tʨꏊ tʨꏊ@_0) b8A ,qb8A@P)b8Aꏊ@ǁ@ǁ@ǁ@ǁ@:cp@pWT :T zAp@p@+Kk1V zGp`Cy-Ɗ@,qb8A)KP)K ,qb8л*K,qb8Ы?*K>@]q`?[^8+jQ8+jB8)@jB8)8+j/,Zk}M'0 а)=la ۇg>;x>8ͰLʅayjIazuzqwyJk|aB}!y߁g]}3?lr xؾ晗xg^Za>~g^Zayiä5晗xg^Zayk9[@Ɓ}kc྄[kXk|Ɓ̀ƁLƁ>A8Q8p_2 _(AJ`)%[$H ,q8AZi%|`Ɓ%KJ(Dȁg!r/yg!r ? [S yg!r ZygqDqD y y y|.wYw@]B] yf oxnI@[r ۶%WF[r }%WF[r%rm[r`ɖ|lɁ\wۖ @mKMے-9A eKڒKdK,q-BْKdK,q-9ПhK,qŹ@N-9ġ\{_YCT)L*@[|aYȓ-y>nr OR䙮Uʁz@r ϸvep B7'd}s O?oO .@Zo/97a}s O?oP@>0o6/,; ya}s`/, K6@ZXPVB7z/?'ЁZV@ǞV ˡ<BiK5Ձ%T8HSX Mu`4JSX Mu`4Ձ<Qafg>7=Ͱ}f 'f`a0yas?7q>La3sgayz晗7{g^ayy7{g^ayyy/7@y晗70yy晗7{ؾ\晗7{g^a^}g^ayy+ayyyafӛ=3Oo}>3Oo0<0<8f0<8f&o0<8fwI03/Y°}A8l_Kp< <$ ˑ03/oa$ K03/o}?>3/30ϼ0ϼهɛ=3/o0ϼLayzIoar& SƄayʗ0?)Y°^~0>C/A&a~f!f1ɛ=zy晗7{~`g^ayy晗7{g^ay{9f@ف}{Mgۛ؛}ف̀فLف?A7;Q7;p_2 f_(L`2%R8nqB&A y`ف%fyJygv ZK/T;-䑈ځ<T;_-aځhv#Hv#HҸҸҸ}wo@EqB]E "Ѹ߽ Hv 6n_(ve4nrShve4nIv w6n_ʸEہu@?h@q;CKdܾP@q;A/q;A] Kt."h,q(lܾl%ynv m䑘ہɇA9'Ac c[p?Ƕ6<fc۟`P68c|lmh>̰}d>MǶ8/0l{x ۞۞à>l7l{f~380l{\ yw<ywl73? ۶ywǶEm,??]f~^|3[/>ߝac]w1nPضg~7]|3?? v]U qaPضzvKcyԇqaP0}sQ|?E1<uoǶ ?3m+ac]ǶmǶ n 3m> m7> mywp?8H $q >,$N?XP6pd85dV2K.N2 L[y Sp d'2?2?ny&S|`9F[5ʮVv已k䩀4ʮ<FAI#RyH&~HA# OqFA H#|஑?8@#|஑a?Xk!4Zv%F]#k,q58Kv|஑a?Xk!4F]# όayf,%F]#?htg?8@#4FAht5:?8@# aH#k,q5rA %F]# W(?Xkw|EKv%F>p?Xk,q58]#`îa?XkLKv%F~ 4:?8@# e[z?- 4:?8@# e?8@#khFA.殑5ze4wX@#?=?4z~RC#4FAh]#^A#|஑K&h,q58F]#LKv%F]#k,q58Kv|஑a?XkKv%F]#?݃)A\aq)>p7?0?8{0vMz SL)`n=wS%)Ao`a7)Ao`q)At` wSLzSLm8v:Kp2?uLm8,q(o`CyNKvS%),qM8΀)>p7?XⰛ:KvS%)Ao`a7)Ao`q)A3`M:pn-^{t?1AoIyN^2&EK.zk?z~jk}`kW ﯽ :L4l g>=ΰ~>8Ͱo}h3sFf>10 a'9lnkg0<8k?3/0ϼ<̼0ϼ<W;2<k<k?l~03/0ϼ<k?lt03/0ϼ?^ayzq_5ƙ~g^aZug^ayzaayzq?L^ayzq*8LayC+aayyC6dg^Ґay)Cy?LayBy 晗(dg^^aayy~g^^ayyy?L^aayQyFߝak?l(0l633OaR ]??33g&y0̤柙&y~b~htۗȇɘ1l`4 amaݢ{pr]o/>>)>p8 ](]_d쮿Pf}Kd ,qB}Kd ,q>p8X O`8A<8>_)mAzض4@!zo@~<_1txĿP^*^*^*/%@J@-?pt.j˨ܯ[O6PDŽ2@--ӿP2@ e]Z1L?cL?cBruo~~9.OML?V,>2/L?ПM(,qL?A/%] ,q%@6L?Aل2r2@YaYYOor=`~?Qȣ?$L?1/,1*L?2@L?G8Mف>,q(۲Kʾ8}F3s@((8Q nd(paYQ qp@ F (8Q qp@ (p7 0Ѝ.,+Q7 0Ѝ%e/F fԍ%e9F@Q ġlH(p௲8<"o] <"o]B  w뻻pAR"䡧.,bQA` TTX~e**,2EWYTE~BE~ '..*tXT:,*PEq[[r(P7Dz†@G  "ndCD``y9 ^7!"Du#"K !"F6D:ltqa!"6D6D '0 а>la ۇg>;cCİ}ps3laCt>49L#3las aj?:OasƙgCaj?3q13φa05D ̫!bg^ GT@>BG#y@0>"_=]CFG#.C7D\ /"^6D:lP,^6D:lܯ\ ڿ26DYe u2Xb}D wPװ=Cb!Ga@]b}ą^Vb P@.;]X U&,q P7KtC0A7ل%!&t?7D\Xan46Dr=@@cCD %"sCąe9憈@ C@7D †@!"ġl@6D8u"., 37D8"}Ȇ膈@ < tCD "7D\@ "y膈 8!"ndqaY!"n,q({Q7D\X6n,q(Q7Dl %eCꆈS7D†@VS7D†?_!Oyȍ" ;?a?Ͱ}la g0̰}bao }c?9assg}cƙg0<7Ծ13ayo ̫}0o Crg^9LayɆaayghg^ay9yվqC< ̫}c~dg^fayo K*tB<2 '43/a Y $43/аc0<77QqپqA8l柙T33hff L13l3l6 o o)&̰0>'ؾ1l0l3ly79aMaEayoa%ayo ̫}c~?0o ̫}cg^8nԻ}#pwF GUfN  7.TF 3@͜@79L7.*A.KԾq\V%rY8}#pX U`7%j,qPF~!A9yj@~u5G `\q9ڿᣫ9.K^j@@#M]q9y$s !]q.R@?XgK=.@?XgK=Ij/'-zR~[B]^ tyu9.:@X]ț $VׁzX? ׁzXr@yu ]h Pu % u %Pu <:]Y ,qЕ ue1AWKte1ПXX+<:@ῲsWH 'raY޹+$ПeB]!w2 drBw2 ѥB.TWH ஐ@]+$qpWȅe뮐@]+$ġlqra+$OC+²uWH`CYR>͌[Fy䖑@>2'[Fy䖑@>2+䱓[FqH e$Nn -#̸e²qH sKm32C*/ ,3@BRyI \^XfH%$G./ $ЏT\@$ЏT%Kyx@ǁ%$G./ tX^8B8$A%zǖ@/2cHxlP-#>zǖ@O[F|^e$LZF`Hl ,qPHxlP-#%j -#%j9-#^e$q`H@ǁ-#[F`ȅj t2[F\X^tHxl t2[FKʋn Ƃ-#52XP^tH`CyQ-#%EM8e$A-#%j ,qPH7lP-#%j Ƃ-#%j ,qPHxl ,qPȁl -#[F`ȅj t28e$q`ȅEjzǖ@ǁ-#28e$q`Hxˋ8,q]xˋ8,qPJxˋ8Ox~ ,q(/R% '3la,Gq`}ۧpLd̰|dq'3l۟89L}20ay ̫Ofg^}2Ofg^}2<W̰İ 0ϼdy'sdy'33>ašay ̫Of~ghg^}2<_W0ϼd~dq'33>a%ay ̳Of~!bg}28߅ ƙg0$晗jg^}2?äƙg0̤#柙dD3hf9Laaa|NOf~0W9>aUaŬa%'3 d ۯd ̫O0 ۯc ̫Ofg^}2XOfg^}2<ƙwL }2@d9 /w9 d'sd2 `2 d' d'se%8ZX> e- ,q,A}2YK'_: ,qPL`dK'% ,qB dM +Dȯn ፛h.TM ?׻&'8n 乥hM4KetM @&qpͅe肛@7 n.TM @7n ,sƝ2j iw.TN`[54ˍ;enոs wyƝ@-w=lܹP;laIh>I?L \ 1lOǰ=l R0 ۃ1lŰ=?P 3q"1lOİ0恽A?7hޠaa߅ayޠa߄ay ̳7h0 ̫7hg^A< :LA< Sf^A7h~c~ig^A< :LA< Woа0ϼzy4l43ޠay ۯ. ̫7hg^A?A8 ƙgoа0<{q4l13ޠay e o) ̳7hgA7hgA8 M:Layy1גy:Lay yWoar{ 53ޠa=ayIy4l43/ay K53/0ϼD^5l73/0ϼ^x?A8 _ƙgoa2w ̳0"0\dq柋i~%l~q9LaUa|h~e~l~)0u 㳀@;`+`,`7a_W70ϼ7S70ϼy 43n@NK.|wr _r |r B9w2 |2 d  d tn%r8Xn t ,q-A@9K _r ,q-A@%KP_MdP 2*/n ю[.TP ?U(;n 䩦[}׭B5G~j(OMɚ uqCc۟؏mmOmmϓm]/CǶǶgǘ#4N}l{|l{ #c81>C8갽qc>1cy4N >gṢqc۟a{<{<{<{԰qcqcqcqc픏mw>>SSSnh}37N}37N}l1818^<{<{ehgṢqcmq81<><gṢqcۥmW>ƙG8h7N}3Ʃq8]1.Nvc۝ywiڰݙ1nLg~}37N gi1Jg~ovcݒ18~< mGg~}3>hv1Ƕ;Vyw'Ƕ녗qcy4N}l3f8h>ƙG԰]1\v1sٵXeo;>]vc|qc۝a7>ݳhv]c幏m7SS}l61> 85lovecqcqcma{<{<{8jzSƩAr8 G_Sn=Wԃ wރ̀d 8vOAAS2 jzp' ӠƩ݃`{a=X7NKvރ%{ԃ=Kiq=x8Kvރ%{ԃŠKvރ%/p=ˣ{,q=xQG :UG AvA~%PՃ=_wK8iq XOxLxLܝ۝۝ۦowDڃ硎yIk:X{=ԱߧDڃ򡎵At=X{Ա#rqPk:X{ġTZ`?K8,q;w$t=XߧaOߧaO`~AGBڃ%}Q{~Q 7!Q ,wT?FAGڃ4Qڃ4i5jOQ{ПQ #Q{;Aը=Xר=j,q(T$Q{ġPZ`-Kʅը=- j.TڃjD5j2Q ,wT ը5j2Q{qPڃܒF-pQ{qPڃܒFAA5j守jD5j8 #Q ,WFT`C5%Qڃ%j*=C(` ,g=BۃLRan<5Oa X cS6l-&0׼n\ sKvn\ sK65)5/۰}kk^RayImk^RayI݆S_\ sK6l暗mk^Ra0׼n\1J݆)uƚm>55O0u.r5nɕa0׼p;L7 say65/aLo sayI݆[\mk^Ra0׼k;L s뮶ay65{5may65چ#RayJ݆shiayJlXEt .I ]Ѽa0> (u!Tް}vs Gy)$ugnYa(0> (u;LRa0׼n\ 'mk^RayI݆-u d[-u d[w,KK.z̀nLn] `[ `[>{4Xv ,qz%[/AR u^` ,q-p ,q8nݭX⠻K$u dGRݭX⠻\[/Aw7z ?zEr\H.nj\ !r"@nZ$ Q$ȭVHBd\ Wr" % "K$EEr%O.[P-8貅]X.e %l!A-8lw.[,qe %l!c^>%Dޅ:%D^ Gl i?%9Gf^?h mz&M/k.W۹P@?k/O<G1`(P<G1`&rbb ,ŀȧ0r^X&R- ŀxK`ŀ%e6b@NY xaO0A<Kt#s%:X 1`?) ,q9b8b@/f( d7b <ŀ^P  ŀ^P Ⱦŀ^P  ŀ060Ab@, ,q(&F1``C72qb1``C:0Н6ŀŀY 8X xa?0q0qBiBi0q2od1` ;m8#/,cG8#i0ġY x ŀ!/, ( ŀ!]Crb@ŀ}܀0Ze`1` 7 - #b@Y xa^0[8/0;h8/, 8f1exbK$ t( ŀŀŀJ 8P 8P 4f1`@1rb@ǁb@ǁb % ,q0Ab@n([ xĀ%rCbOçAbK$PbK$ db % ,q0K$Pb@W, ,q0Ab) t( t( db@ǁb@ǁb@ǁb % t( t( t(Pb@ǁb@ǁb@Z xĀ% g1``Ā%rb/ g1``Ā%rbK$ ,qBK$ ,q0AbĀ%8H x ŀŀŀJ[ xĀŀJ[ 8P xĀJ[ 8P 6ŀ~J; vlvxad50=ZK Av[K x N8888xnTxnT8xnTX k``nvX k`^iq50A@i ,q50AYKd PYKd tY0=:ݲ0Е@1`{t^)@1`eM1~GG1`[)A1`[zb@(Pbݣ0Ab) tb@ǁb@ǁb@ǁb@ǁb@7%(Pb@ǁb@7%( t(Pb@7%( t( t(00ġ00=:Kcݣ0ġ00=:Kc/0AbݔBK$ tSbK$ tbr`b@( t( tSb ˁqݣ0q0q0q0q0=:/,- ,q(- ,q(- ,q0=:Kʁqݣ0ġ0=:Kʁqݣr`bKmb obܲ0Ab) t( t( t(ط, t3b v;?zӰU}~asiXퟬ=l$ ۟Hku8?ӻyjG0]`濋8QD8l ۟Ø?a0= 1s"aGac(" ۿ)"3lƚpk"$"ƚpk"a|a8nk^ gÆa\2075↹u[0׼D骸aOy75[↹uG0׼D1ay]wnp\nk^k\nk^7 ۧ EXކX)O2 kP05lvc)/;cØi KAaa c) G1 &}|ok^Bay {I(85/0׼X - ܇Y - ܇@ N@fB@B}f/AP0QP0p d,Pw8NX /Aw8H(8NO ,qН|% i`` ,qН|?.MX;PBX;=]Kt'߁?/qѝ|Ï;@n#[ +lb \V.rk@n([ȭ+lb \P@Y=+fR.ranb`%R.^%R.8覊 uSE`n,qM%BTX⠛*KtSE`ꦊTX⠛*nr8ꦊ@W7UHYc߼5C@RQ9gYcOIR9<!JYc?D)k (eB9w?D?:KXAg.Y ,qY ,q ,qY@ǁ@Y8P8PyR:o:orJ l囁^Q8Pyaж|3ro:o8Am7Kʬ囁|2mf`R8,U`çAgKt*A@/( ,qY 囁T_3e5/,cRk$;kzJeZ~@\ ʕ~@ ʕ~@\ ʕ~ :ۯXP t~2Eef`C_3ġRٯ3~@~@6 d׼Uٯ8د8دyl<ۯyl<ۯ8دyٯXPF ~%e~2gf`çšٯ_2cfk~@nٯ_35Df 7 t ѯWM259`f w 䞱~үM25/,l?lak?Loa?Ұ5ld _?Oa{%;lT ۟T=lm?bcs.zhEasɠh?,a 9yØ?4I0414h? X4h&;akay4ɠ95/0׼ \2h&0׼ \2hsˠ9l_֠ \2hۇ`Ask^ay4`\2hsˠ9l暗Ask^a 0׼ \2h14iƚAs>55O0< )ay4i ǽiƚA04iƚAs>up~YKnk^;ᆹu#0׼暗A0]75)5u0׼n暗As>55 o\mk^w st۰}qk^ sηa(AskaLܰ}qkt0c9٤ s>4lO&0f&aް}LoIa2aGz\2as˄yL\2as˄95of &})of K&})p dۄ؄yn dl d l 00@& u{^`n ,qy%2a^Kt{^`L\^`n ,q8ݞX f 6a8ݞy%=/}L0/y%=/L0Kt{ށ4a:4a:4a:qy훁܂}3or@.l 䶜훁܌}3orC@nUؾy웁ܡ}3or e 䦅훁%o8Ⱦy웁%]UX⠫*.U%"AWU8誊 uUE`,qU%o^*KtUE`^Ҿ8о Kv*?Ag0h 3#6~734~@pn?/3c{6~8\X`΅]sa%:Xsa%:X g l ,qйi (\X@g@g l̒%&r?9gg@g`C,3ġ̖I??/,6~8\X`΅8?Kt.,A.Թ ,qй@y ^P-K= 3 bJ= 3sK= 3 bJ=/,s\zzALg {zzALg`^SXP, tR2eeR2eg{zAK=K=C3q2eg `g `煒z2z2zn煒z2zng `煒zihg`C&3[zz8BK=KT%eR̒.0?/,3K6~h &.0?]`4~r@}$3C6~rW@X g 7l,]?K̒M2?K̒%?K̒$2dg`CY3ġ,M2???/,3K6~:4~r e th &=c???/3A?gl2~8=c?Kd ,q3A?/3A?BB?Kd d ?/3-?Kd ,q@???B3q3q3qB????/3q3q3C6~^(g`X g`Z3A e Ъ%2~8y%2~8X 2~8X g`lYX g`88Ȗ8ȖȖ:~I K=2J=]处%e处nQXRVNJXya9)ag`IY9)ag[zHgSFg[z:z:z:z:z^(g@g@gz^(g@gz:z^(gz8HX g;z8HF%JXiq3}{J=K$ ,q3ARI=/3ARI=ݨ3q3нeJ=+34J=/3п2J=L3п2J= L3нeJ==3нeJ==jAgiz^(g`nQX 灔zFg@g@g@g@gz^(g@gz:z^(gz:z:z^Xo[XPo[f%𶥞nQXPo[f%𶥞JXPo[X g`C9mg`%𶥞%zFg`C9mgiz:z{@𶥞nQ8P8P8P8Pfۖz8ۖz8ۖz8Hf%𶥞nQXPo[f%𶥞nQya9mg`%NgOgg`CQYX 灔z:z:z:z^XluzE8G߰C`0l*O 0lް~an?l ۟rw x?~k^W ۟nQ<O=0=lJ( ?a&E{ØMBKQ#0B1?@X&QckayBI:5/Q0׼D\&Q0׼D\sK:lϚ sK:5/QasK:5/Q}lk^ayBe\sK:l+暗(tk^?FQ0935OQ0ay&w.Hs&ay#7lccS:lOFcSzƚs>7lƬP9lC:L7l (1+wGy$ƬP9l cV({ X(X2 w^KKt ^`. ,qB]XKK$ gKt ^`ĝl2ZXK.3F;Kt ^`. qi]Ȟŝ]ƣŝ%@;;;;/%xÏ. tݴ'|,4Жr²@nE[eܚ,4Җr²@nJ[ B/,4tr²@Yzdܸ,4AB/,4+0Ktƅ#AW`8 ]q,q%#A uF`,qܟ,@BBIaYh@Yh+0=w+04_RK@IQP/) J%EAiOI/,(( ,^XPP^XPPJPqA JKt-Ag.q ,qq ,q4Ӄ8ہrЂ@ǁ@ǁ u-q4q4ӄ^X-( @ǁ ˔+4q4ġ [PXP-( 䄡w JKػ%:v塠4AgKtBq ,qq .RPU^uPPza4Ћp Jꠠ4Ћp J9`AizNAe~̂@/)( 䌀^SPX Ai8a2 J4ġLYPza)4ġYPY J4q2afAi `Ai `A22rV %( d,( 䬀JP%8HPza4ġNZPY JKRP4d(( tQPM2 J/,Yr̂@[PzaͲ4drł@ XP=c JK$( &, J+`Ai`CͲB JIfAi`CͲ2eAi`CͲ4d^Xf,( ,q(Y8, JIfAi@Ai@Ae6˂@ǁ@nYPz$4qB JglAi@Ai@A8HPX Ai -(P JglAi`%8HPX A8HPX Ai [(^(Ai`lXPX AbAi`%HAi@Ai@Ai [(:::^(Ai@Ai@Ai@A::rPւ %( ,q4=: JK$( ,q48HPz4 Ai e-( ,q4A J/4A JK$( dڂ JK$(=@ǁ@ǁ@-(P@ǁ@-( t((P@-( t(( t8[NgXPQP^[Pza9aAi[^XNgXPXRVNgXPRPQPJP8PP8PPF%JP8PPF%JPF%%8HPN8%QIA8HPX)}{ JK$( ,q4A J/4A Jݨ@DoKh^4^(hmi t׋&@$$&0M~$zLn$DIBD=Ah^48$z Mz$8$8$8$8$m~D/I4qI44:4^(hi th thI4ġI4]/DK)kDI4ġI4]/DK)kD/I4ġI4A&r&DK)kDKd t׋&r&@wh th 6?MS6:4Eh@h@h@h@h^4:4:4^XNY$XPNY$XPNY$X h^48S6Eh`C9emh^48S6E唵M%28}MM%agh`L$8$8$8$za$M>6?ay;LOaW65/0׼L&ayDi} c$:5O}ek&ayD-YayDi=L&ayDiOx{ᆹu+ܰ}k؇5+ᆹu!0׼ep\2u0Լ.p\2uamk^W say]vn}3sʷay]6lcc4c$:l0c$zyƚ0pdPr}~ p ܧ@-PPetY r}&p.PPÖ.0p. ܧ.T\w>Ne%^ KtU`䀁8M`. ,q0}KtՅy`9``. ,qT?.M]@X FB]@X K0APH9`@9`@9`@9*q0q0]@8 䆕\9M+;iG` vr@.j \9x\9Ս\9x\9X ``9X ``çA/ԩj,qЩjPKt=AKPKt=As@::e`@`@` 7 t~u)q0_o^2z0_oBz0#Qz1C!a3z1C!a3^(!a ,$ bBPGeKtT&AGeKtT&AB@!YHX⠣2RHI$   /Q@ǁB@ǁB@%YHxa0k^  /,òrB@B %$ dCB K$$PB K$$ |%:?eX̶@WmA0G[a@Vxa0;hrCٶ ˰mA01r@n(VX [a wl+ V- KʰmV4 Kʰma1 KʰmA2,f[a`C0ġ V4   /,b:rͶ ˰mmA0q0ʶ::^([a`l%rCٶ e+ ,q0ʶ8VX [a`l%^([a`l%b[ᅲ8Vm%^([a +8VX [ၴ::b[a@[a@[a@[ᅲ:::^([a@[a@[a Glm+P 0A 9bk[a`lV[ K>-rֶ Kd+ ,qB Kd+ ,q0A@m+ ,q0Ai+ th+ th+ d?۶ e+ th+ d?۶@ǁ e+ d?۶@ǁ@7~ˡGo~J00] ,,>2 tӐ*r* ˡ Kˡ 4@ A 40q0q0q0qB   ŤB  Ť0qB Ť0A* C0A*@w12P* K>Oa`T%R8HeX R8HeX atwEr@ ,r0+tK` t^(`[tz@P@O69%rH`{Wt:t:t:t:tz  t f=9z:::/, ,q( tr@ ,q( tOC9Hm``C9Hm``% %r8v89% ]989z:/, t t@ǁ@ǁ@ǁ@ǁ@ t ts0ġs0ġs0A@ ,q( tO]9XPR9xWʊ YʊR}E.e_`>)/G_@WϔnP?|7 5l__a{} NU=lSۿ[1l. ҇%=^GҰ4l?ۿVyra8l20׼n7?#|zSa{=ٔQ 8lp۟ØMa30=1TjaaRc6 ۿ[V2l暗pk^jä暗pk^jah0׼Ԁ\Rۧ暗p>P55/50׼ԀjayO5H5aRcS 8lƚpkjaȰ}vjkjaypkjayS醶ay6lK snt50׼.f׵l\R;نu#0׼Ԁay]65/5}0jk^k؆u 0׼`lap0׼._k?F50<ՀaH0<ՀƵ?f_>t]* uR>t]*}$pP+3ܞ/p ܇.J\r>eOCW b>|nW ܧLKtR`nW ,qJ%Rr*OT|ܬ/A+8vnW ,qJܶBݮXەwo_`nW:*@ǁ*@ǁ*@ǁ* uR@_@_ WV:T:Tr`_ V](_ Vrl_ "*@nYȅU|JU|\MX5U|J%U|%R8HwT|%:XiqЩ u*;AKt*;A/ԩ,qЩ.ԩ,qЩ@;*@ǁ*@n/Y8P8PM&./;M:ӎ@O;ziGO_OI${ҡ/+zҡ/+z./#@zCO_`\%%:^X%%:^X O_ ' ,q X=}=}=}aU{z9 dO_@O_@O_ DŽ t 􂘞@ǁ@ǁ =}%e՞@N wac/ġX%!{Kt$S8xI`<}@z^/z.,S zNO_+zBO_+zNO_]XfB 򝞾@ 򝞾y|/ġ ;=}%eJĞ ˜=}%eTĞ@/ d =}=}{{{./q/q/ ](O_ `O_  dP@vk ,q/A ,q/A@/X O_@O_@W=}I2{]}r͞@W=}>zfO_B{e`O_ 7 ,q/;h]X& 䖁=}%e̞ =}%e̞ $=}%e̞@wa$/ġLXP& =}=}=}I2{zfO߅e̞@ǁ@8 e{zz./AylO߅8 e{K ,q/Ay./Ay_ByK dŞy./{K ,q@zzz_/q/q/qByzzz./q/q/](O_`<}lX O_`<}/A =}%8|<kO_`<}%8w<}%8X O_ 8X O߁::mO߅:mO_@O߅mO_@O߅D7ˉ$D=} =}=}!iHO_``9bO߅D=}%D=}nw =}=}n8888w<}=}=}bw<}=}b8w<}bX O_`<}X O_](O_`<}%8|Z ,q/Ay./AyŤ/н+z./wEO_`G/Y/Э\zLByʥ/Г wEO߅z@ ,q@zݻ/q/q/q/q/Л](O_@O_7 tP@o88wa9HmO_`C9HmO_{W8wEO_`C9HmO_{W8](O_`C9HmO_`<}% =}%88=}% =}]8zz., t t@ǁ@ǁ@ǁ@ǁ@ t t黰/ġ/ġ/A@ ,q( trڞ@ ,qm/PQ5ٶwa5ٶXjl ,5$ށh t Ѷm¢nm/a{˶̗mo/^_}~V_=b//޷_}}~V_=]l{__S~n/޳_fl{~9l~ e ˶/_=//sw|T?/eD.˜ݨwz_=l{6ޗm/۞10}0};l7}ޗm'_}|ްݨewɗa2xtXf{/ރ$AWx{d=Bnɋ n{==cy,qؽxrL^2&/ރ2&/^{d=XPf ,3h=XPf{d4y,q(3h=XPf{d=8/^`AA^I&/^`AA^I&/ރxrX^xt ܽx8^Kv/ރ3/p=X{䞱x8^Kv/ރ%݋-y,qؽx8^Kv/ރ%AP{a^B݋`/ރx:=x:=8/^{qA^xw/ރx:=[yw/ރ%A6{a=X{x8^݋ o{a=Xx8^Kv/ރ%/p=X{,qؽx8^ϖ݋`/ރx:=~xA^ϖA^݋ =8XNXDȁ!d9rpw/ރn!r"D^4r"D^r"D^Kˉyt^=8=8/ރx:^xt{]Lxw/ރx /ރxAw1{a=X{t^Kv/ރb‹{,qؽx8^K>-Zv/ރnË`{a=X{,qؽx! vo ݃n2A`{=âA\!{.p=+vzd.p=t sn v݃v:=8@`݃U.p=8@`w!{q.p=]ut {q.jrYdrYdrYdrY݃%PvzWrYKv݃%Pv8drYdA]`9,݃v݃v:=8@`݃n2A`݃vPv8C=XP5K``.{M&,q(%{M&,q(%{M&w܃]$F2=X~c$܃嗽40=_6Ls \`Q44i;L/aazߊ? ӛqb&Kc;qO*/ap:]8l߄aqd O]zA@nOAꮞ}5pG!ɠ}P.prPwdP>(ٶ/.A@nX⠻z./.[8讞]=%']zKtWO w ,q]=888w t t dǾ@ǁ@ǁ@GwH\ ?- 2 % wer\Z&=o\ u, erJ&?'c\ - j2@~[&X \`drJ&X⠣Á%rk\`8p`_Á%::X⠣Á%]Á%::X⠣Á\[&w ererL.qL.qL.A:](\ , t~ut@I\5i &Ms֤i.5i &Ms֤i.K/i.K/.W^4zE\^4rǦ,qС,qi.C?68PƁ4rǦ@ǁ@ǁ ̩MsMsi.qi.qi.@6:4z)M\@\@܅eզ2j\ l4XP&Rm ,qС e 䈐Ms%:XCPF`Ls%2zg@/fh b ,Ms^4 Ms^4 Ms^4=Msi.ni.A@i ,q(c6zmO\`Ci2a\`Ci.#6A90a\ `܅eæ@@ e dl dl Ms484ȁ .i.bKd ,qiBKd ,qi.684X5t.Mj]]( ]K@&5tA2f ] wЬ 䆲5t6kf ]`)2f ݅e2f ] wЬYC-kKh5tJC4kKh5t6kKh5tA2f ]`Cm.ġYC4kjj.,m:r h5t5tA.q.::]( ]`4t%rC ,q.8HCX ]`4t%]kK ,q.Ai_.A diK;@ǁ@ǁ@W t t tP@ǁ@ǁ@ǁ t t 5tJCX ] {8HCX ] 'w ,qBiǥ) ]`4t%]( ]`4t%8HCw4t%8HCX ] 8HCX ݁::m ݅:m ]@ ݅m ]@ ݅,5t) ];,-gIn$6%,5t(RCX"XΒXCwa9Kb ]``9Kb ];H ]@ ];::::]( ]@ ]@ ]]( ]@ ]:]( ]8HCX ]8HC.&5tJCX ]`4t%]_4t%8HCw4t%:4](\B4D\۪4ni t[@瓦 e t[@O4>Ms4)Gi.mUi.}$qN܅2:4z㜦@ǁ e 9MsMsMsC68C6D\`C9l\H48C6D\`C9l܅28C684XP54X \`C9l\`Ls#4XP54>MsMs8ir٦@ǁ@h th th th th t@ǁ@ǁ ˡfKʡfKʡfKd tr٦@h ,q(m t2,"ﳨ, ,OL.пOL.пO.,"ݔLkaz3?l_a{} ~^~w߰Kp^{7^wް7l ۿn/a߰8l20׼?Ӄx{z k!<0Q7l۟Ø1 a0=z1c s¸aGa c(& ۿ'1c&aܰck¸ay I75/a0׼q\&a0׼q\ sK7l~ sK75/aa sK75/aܰ}hk^¸ay ?\ sK7l暗0nk^¸?Fa005Oa03lbvay-65ņ%;L׊ sRay 3<\Olk^¸a0׼;L7 say"65;bay65Æk¸ay kay n]-pŹPib-p Gq.7h>{8dZ>9z gQ}2-pL gl[>Orjsnp-A8 ,q7zBXol[`9j@ǁj@ǁj@ǁj uM@[@[ {0V:T:T:T]o.ԥR+ӥ<;WFK] r ɖ@..l ږ@n$R%-uL. [..k[ְ.+l[.. [Kd ,qBYKt8A@m ,q K] ݶ8dq`N8dq`,udq`N8dq HK]@K] 7l th th -u-uR}([Z_,>~@po7M] i?nM]? ~@? ~ Pv^oX⠣ (H`8(H`8o%Kt@9d]@]@݅e ~@ǁ~@,o8o8o=vv^o8o8owa.ġLo9&.,8!YKtB9d]`8(ȅ8oX ] 2~@/跻o] .  .  2b]w dc~@ oX ]w ,q(S)zW~2b݅e8~2b] 'T2rj~@~ ԈvvvvvB95b] `݅Mn]`v%](]`v%rj~K;~@&v.M..ХI]K~@owa.;hrC~ |vA.f2g]`C.;h]X 䖁v%e~ v%e~ |v%e~@owa.ġoXP vvv>f݅e~@ǁ~@o8o e..A~l݅8o eK ,q.A~.._v%8oX ] +8Qb_.ġovvvo8o8o8owvvvvo8o8oș_..A~@v ,q.A~@oX ݅r~K䷻P~K ,qBK ,q.A~@6 ,q.A~ t t d~ t d~@ǁ~ d~@ǁ~ _Q.rD|?O[?}?|#*_X~X)݅刊w%刊wn7R~w wwn7R~8P~8P~8P~8P~wwwwnqR~wwwnqR~8P~wwnqR~X ]`wS~X ][](]`w%8H~ww%JǟK$ ,q.q.{TsJ݅R t @ t @PaVvJaA**.НS*.Э"*****7N݅R:Tzo @ǁ 8vvvsV8sVUD]`C9l][ET8sVUD]`C9l݅R8kq(疭 8vsV8HaXP-[aX ][ET8sVUD]@]Ʃ[.q.Э"******.q.qrn rn rn )*.ġ[.Э"*Kʹe+.?.V"[@[.п[bC. ?7ٰ=ذ56lǰ=6l ۿ1lu^^~U1ls _\ְ+0/a;k _X'az] s뒍aS0=Ü1Et;n?{ӣw3F0>w)&0f"aѰp3Fa ۿ=)ƚnqDt\ sKD75/a sKD75/0׼DtAaX0׼Dt\&0R 8\ sKD7l暗nk^"a0׼Dt\1膱)ƚn>x35O0csn]`ax0׼;L׀ say]65/akk^ sKD7l_\ Giuakk^~ s뺯ay]u s뢯ay]5l$c cSD7l" cSDǬ b.u3X 7ʬ b uL>sXL>%b^f9},&p ܧ9@['OSb1N.A\(\ ,qu3%n&ǥfKtL 7P8躙@"['XfN.qN.qN.qNB]78P'8P'Nurururur꺙@GWKrӖ uD`]-ȝO[] 'NЁ<{6rc3^ l jf@m Ζx\،-Bܷ/B/Af.Ա;,q/68kq/68q`8q`xq`8q 7l;f@ǁf@ٌ8Ќ8Ќ=1./c6:4:4]cҌwNf@h #x?'v/kv/k/Ki./SR6z)M3^4]&%:aX&%:aX 3^P48Ɂ4rrf@ǁf@ǁf \xxO/q/q/ 4:4zN3^@3^@3ޅeVf2k3^ lƻڌXPvm ,q e 䈕x%:aX 3ޅ28ȌX 3ށzmO]3z1C]3z1C݅e~@/f b~@/f ڞ~@ ڞ~ ˘v1^oXP] ڞ~2b݅e~2b] _2r~@~ vvvvvcB9b] `݅oo]`v%](]`v%r~K;~@&v.M..ХI]K~@owa8.`r[~ v.f82ph]`C8.`]X ~2ph݅r~2ph݅e~2ph] 췻 oXP ,q(r~@ǁ~@ǁ~ vv2ph]@] t 䶰vvvoX ]`vBK 䶰v%8oX ]`voXA~K ,q.]KvoXP d~@ǁ~@ǁ~@No8o8o8owvvvvo8o8o]..A~@v ,q.A~@NoX ݅rr~K䷻P~K ,qBK ,q.A~@6 ,q.A~ t t d~ t d~@ǁ~ d~@ǁ ,gI t Y#[Β?B-gIl?JY+Q.D%r r @w; @ǁ @w t t t tP @ǁ @ǁ @w1%.q. v*K t )K t )ŤB)K ,q.A ,qWT8HaX ]`vRnZBYX`IK]DK݅IK]h tÇ e 8-unR'-uR-u-u-u-u-uᦥBYZMK]@K݅z@ǁ@ǁ b[Kb[.ġ!. ZKb[.ġ!BYKb[Kd ,q(gm ,q.ġ!.A@7|h ,q(gm tÇ@ǁ@pRwa9ClK]@K]>::::CK]@K]@K݅ -u% -u% -u%CK]`C9ClK]>83vF\`-#Lk.tߖ@7蚻 N>]sk0/aٰ>;L/a{hU6l/a{ S~nag}b3l <ϰ3luKgk^V0ԣv?h9G1a3vsDܰ;l0>a|'7l;L>a}ra0>_ c'75Oܰ0䆹暗Onk^>暗Onk^>aySay䆹;L>a%ٱHjկO슄S2u70|r\ 7c暗Onؾ35/0׼|r}ay䆹ƚOnk>a0<}rX j>ayOn> c'75Oa c'75Oܰ}0}kk^/ w]ޭڭaytkk^暗O0okk^o暗Onؾ25m s'7l_;+ay^kk^/:Lv sZ\zְ}Onk>aJװ}kk>V}++--[5} B%p_o =p_C ܷ }[YVV>wd_V}++p doV} %pʺPV@6xm ,qKZK^X⠗k+܅zIK`^~p%zIˁ:::]::ra+\@+\@+\@+܅zIKDokd_ʸ R&z[C VL dʸ@t'ȓ}ri'ӎ}r<'Ζ}r<'}r<'w|r<'Ï}r<'w|r<'X \`|rp`8'}r%X]Kt8AwKt8A> u8AwKt8MO.qO.0O.qOBO.qO.qOBt(PE]@I( wMڟ@פl.G}$l.'t}BlBBe\^N܅(X⠋"%(X⠋"%E\`.H\ ת, t( t(,Z68P6NesesesDP68P6:esesese]Kʾesܺl²k\`.8H`dsܿl.AEK$PK$;@) I@t( I@t(lX6es>P6es>P6?es>S6waفl.pK8M}l.ġX6waYl.ġX6ȭAc\ `܅r;Ʋ@@@@@nX6wdses܎l.qlB`\`ds%](\`ds%r;ƲK$;@&es.M.l.ХI\K@,1Z6>esl [6wacl.}0Ki=F.,{8=Fl²h\ 8=F.l²6h[`6h[ Nv]Xv ,Wv ,Wv dn@Wn>.,kvaL[ Nv]Xv tv dn@ǁn@vav tv tvPnKv dn v ,q-]XKv ,q-An.-An-r8X [ v8ma9-ġl ȡnnnܢ-q-q-qB.-q-q-[v]([`nX [`nܢ-An v n%r8vn%r8n%r8X [`n-X [`n88ٲn8ٲnnٲnnga['t]XurpQ{G`a[xt{v]XuX"Xun8nnnnn88!n{v:tzM[`nX [`ngX [t]([`n%r8vn%r8mq-An-3t.-eBC[4:4](C[4zNC[64](C[g4zlCC[4HC[64:4:4:4:4OMCۅ2:4OMC[@Cۅ2OMC[@C[@Cۅ m% mXP. m% mXP.v m% m%2868XP.X C[6486zlCC[@C[4]X.8 m m m m m88vaxkC[`CxkC[`CxkC[` mXP. m% mX~EdC[Gb4wKC[4z$FCۅE!dC[46ϣa{FaWa{} ۣw>Coent>?WŰg3l;L=a'ϰg5l3l9L:\zazd۟0Aar<д6lP۟19yim4 ۿ&0恦a̰{0恦dZOƚimkaWdZ暗imk^ayimk^ayֆeZ+\2 s˴vLk\2 s߯id0׼Lk\2 WTeZ暗imؾ25/0׼LkMkX4 cӴ6lIƚimka}kayֆa"0#WF }B$p+#UAكM-pߠ 7هM-p_ 7.M-XKdS ,qkQ\z-J`^ȶmj(%z-J {8(Ҧ8Ц8Ц8Цv^8Ц8Цmjmjmjmj(Ï^rެX LoVd fR`zB [a d+@6a tQC%mlYȃ%mlXȦ%m<.YƘ%m<2Yv$mXs%mO]`I[ O8HX IۅX˼%w`I[`.8u] ,qe] ,qB] ,qe] t?)i t(i d#͒@ǁ@ǁ@,i t(iP@,i t(i t(iPy?{ n"Ic nJ[O>o4D[A4+@[4]([ ׶lp @whpPIKt$AIKt$A@hp ,qEip 亖 n n n]à-q-q-= }|-q-q²k[`C-\6]Xvzmp ,qE]$ ,q-{]68"I` nX [` n n>c n>c nm} - } - -].,56/q tWac[485.,668]mc?h[ `[ Wnlp dlpP@n8888ȍ.-q-9626]([ Wlp ,q-A ep ,q-A@nX [` nҤ-ХIۅ24ip ti> nI-ma.,6f[`)Ͳ;iۅew;i[ `6]Xv'mp  h߳,ىvaY-{?;˿gY-;I'Zot]XD N@6rD,ى8ЉFhhkډ8Љ8Љvh%r8ȉȾhʉX 'Z v8ȉX 'Z`h%r]('Z`h%[D ,q-ġlډ6h%esN@D ,q(vH'Z@'Z@'Z VD tD tD tDPN@ǁN@ǁN@ǁN D tD tD ҪhʉX 'Z Sv8ȉX 'Z VD ,qB9j'Z`h%r]('Z`h%j'Z`h%r8ȉX 'Z v8ȉX 'ځt:t:t4DPN@ǁN@NkD tDPN@NkDܔ,uʨ= O)q~8eMm?!QRVnJX{g%e妄g֞֞zEفԞ:ԞzEY@Y@Y@Y@مҞ:Ԟ:ԞzFم妄ggS{X Y'mԞ8H{X YԞ8H{IgJ{X Y`g%Ҟ](Y`g%[8H{vgf-lvf?=,Oم2zIYg4zBم2zLY/4zIف4zBY@Y@Y@Y@Y4](Y@Y4:4](Y4:4:4]X.lXP.l f%BflXP.l f%BflXP.lXti6\H,Afr!f=|,ġ\H,mr!f@ǁf@_h6 th6 th6 th6 th6 f@ǁf@ǁf ˅TKʅTKʅTKd6 fr!f@_h6 ,q(R' ,۱,),п!="ޱ,ЍsR';L* ?S)/X0} ?M߭1lΰ0} )naȰCd2lFu>A Ϗaǰ԰cq>:&?1a΃ddY9yl?'cr@?0>#':L~ad]aadO6l_cƚlk~aW'暗lk^~aylk^~ayɆ'Ӈ%\ sOvd\ sO65(?a sO6l%暗lk^~a0׼d\c cO65O?ٰ}ydk~ayɆayɆ'ȇ#X cOvdX cO6l9L/Vk;"ayR0Qik^S۔';LR׋' sJ\ ۷ۓay9ik^M:LMD sI\zcҰ}lk~a&԰}jkޖ}^oM^>ՁF]7e}}#pf ܷyu ;L6S 6ӅjX`,c%4h$U[.ԋFK@vXm ,qЋFe,qe,qe,qeBh$qe,qe,S[ZZZ.ԋFZ.[9,rȮd-4@>' tiOVdlOrp d Ovd<(Od ?Y K1f?Y L](?Y ;d?Y ](?Y Oe`?Y`dm`n8Oށd%uX[%[t6AnKt6A~ u6AnKt6,q,4N,qBS,q,qBݺ t' t(/PwE@(/ WXڟ@,Ч+},lB},':lB ,q] ,q]},AwE,\::]Xz-/ t(/ t@ǁ@ǁ@78(/ t(/ ٞ@ǁ@ǁ ˦e%e@zY^vaY,AwEKtW$A@.}Y^X⠻"%](yY`e%HyY[DyYHDyYH]XVm,/ @(/ @(/ @ (/,X^(-KenP^XPvp,/lX^XPq,/ *eeeDZ,qB722](yY `yY `yY },/P@@.X^8X^vel,A.,AcyY`ed>]}](?Y~@WdluOvd>겟,G?Y.ff?Y k?Y`dluOvaٺ²gWX`ݖ>ﶬX~eϮ@M dѮ@?j G ]avaY+,Џ7+,q+,mD.+,AF+BK dѮK ,q+,A ,q+,A@v lX WX v8-ZU+,ġlv ]a]a]a\ +,q+,q+,q+B.+,q+,q+, v](WX`\aX WX`\a\ +,A ]a%r8v\a%r8XAK ,q+,QK ,q+@9+B9+,q+B9+rk)+,C?)w[ gd`WXLt[ v]Xn-XRVn-!]a8!]a]a]a]a]a88]a[ v:tzJWX`\azX WX`\aX WX^t](WX`\a%r8v\a%r8XA@ǁ:@r ?6u`R袦B=n,У\=_B=ʥ,=n@=_,FPBP,qBP,q,qr:r:@W ,q(W< |:r:@W ,q(W<P:r:KOK ,q(W< ,q,KO=_,q, e.,W< t |:@ǁ:@ǁ:@ǁ:@ǁ:@W t t\,ġ\,ġ\,A:@W ,q(W< |:r:*vǿ-u` Qu`@QvaX8t`kǶ|lKǶaGǶOmߟ>lǶOa~ʟ?}R|l9[ӰScۇǶψm۾0}l0lxk~}l{}l{}5k.cCc3cyh>=>=폻uz}lj4lz}5Ƕmߊ>ƚkخضq1<^cC~خk~z}5k>ߵ^v\c]16ض1k~z ۵^sZwǶݏw\c\c]mc]1 ZkZm{cC1<^fcC1<^ۖ;>ƚcyhZkZm+W}5pc:Ƕm 51 }5k/k~\c\ۅ>ߵ^>_,4ض1N7 _(m{cmBsضt^cCmicXs=-Sxp[{p?Mܖ)vD]azqAdzqA1A f2w=8sW~A?؁zw>[h ZZڟ ZZ}.Aˠz2htZ]{z=^MW~ǃ%K;8Z}܃@h.^:z=8@X6wzqAw?zqAZAZ}AZh6^8^i䪗^eWZK;8w>,qص^rKZK;8Z]`îzaz Az(h Z}A ,6z=^׃>@APz=^W`Y%B׃%-AzġHXpzġ,H WqPZi>^2zZ=8H WzqAAZ"$WzqAI Wz;z=Xk,qص^]`îz+Bz=Xktz׃>hw׃>htA []zZ]}z=V^zWV^rAMZyAv~zaz=V^e_SZ'9׃7TV$z \!zɹd3Pr+Is=\eOrAv?$zqA6%zqAr]`.zas=f\] s=X˹,q\8rKv9W.zas=X˹do\r(+9׃K`CYA%zġ,J {s=8@9׃Arȹt \:s=8@9W.zqAr#*9W.zas=ș\8rKv9׃]˹䎨\8rKv9W.zas=X˹,q˹,q\8rKv9׃qJ`.zas ArȹSr]9׃qJ9W.z#NAA ,wt=8IݿA݃=8?< AGz=AGzqAOzqAt [:o=8Soo=8o=Xx Kvփ%AGzao=[ݿ`zaoKvփ%-A]=zcU(tB[$Pl=![nzCC(X=%bA փ@Pl=8@փ[- VzqbAzqb+pWl=0[:Pl=8@X[J`Cn)փ@`Cn)փ@`Cn)փ@`Cn)Vzġ\b]`Cn)փ%brRKvփ@`Cn)փ@փn CX[Jփ@փ[:Pl=8@$Pl=8@V`n)փ%庥[8Rl=X+KuK)KuK),bbAϑzп!(o=Gb+8gzo*NJa{Raa{ Cr>2Oa{>bOsbA1lk:Lagİ#bi1l|8L\z sJQ5lx ۟]XTe \azl cR5,_q*kkaȰ0.UYI5ll?d&U0׼TY\Re sKuTY\Re sK55/Uְ}D=l_暗*kk^ä暗*kk^a0׼TY\Re 74暗*kؾ15/U0׼TYUYXTe cS5l_ƚ*kkaykaya0 \c \l6X ׅX[%2p`W`n8l`n^[%X ׅX[%N \8 \ \ \l8u \88un:4p:4p:s]"fL=Wςs1\?O,H=WS \> R \>(Ru\>(R"\nPu8H`8H`\>(RX"R"\\\_7+q+q+}+q+q² l=W`CY+es]X ,q} ,q+fs8H`\JX =W`\Rf\>]Q\>]Q\}+Ч+}+Ч+}+Ч+.,>s tzc=W s8.,?s8c=?h=W `=W w dPzC\\J8X8Xȵ$.+q+kIs2s](=W 0 ,q+Az ,q+Az@%YX =W@=Wz@W\J꣞+G=W []s](=Wz@ tQ+dz@.Yȳ\ZX =W []s]XV d'ª-+|VeaUV sVe:Te]XV tx d'ª UY/UYDXRƜUYUYUYJX UV`TYlYuTY%Re1gUV`TY%Re8HX UօRe8HX UV Ve]X] ,q*+}jKʺUYS[XP]:@ǁ@ǁ@.sZ8P8P8PuTYUYUYUYJ8P8PeN.*+A@W ,q*+A@.sZX UօRerӪKʺPK ,q*BK ,q*+A@ ,q*+A t t Ѫ t Ѫ@ǁ ʺe2CUV Ve:HTez2CUVDUV[TerPiUօ,*ϏrId?YT. X UYR8P UYUYUYUYUYJ8P8PQUYKVe:TezPIUV`TYQX UV`TYTRX UVGETe](UV`TY%Re8HuTY%Re8HX UօRe:Te](UV= 􈓪@-UY) UYR躥*+CC=*+S=4*@=%*+CC*B*+q*B*+q*+q*rѪrѪ@OI ,q(W rѪ@OI ,q(WʺPrѪKGK ,q(W ,q*+SKG=%*+q*+ma.,W t @ǁ@ǁ@ǁ@ǁ@OI t tʺ\}*+ġ\}*+ġ\}*+A@OI ,q(W ٰ/dl 6@h /6@húhel t6mX0 ? I0}* ?_Oaa<S/?l0 ? _s`10l' ?'?Fհy1Bհa1lVG0 Tj0^hV ۿ4 c@u TaGrA9Hjk^aye:Laye暗j>A s@55/a2P s@55/հ}ak^ay\2P s@5ly暗jk^ayio: c@55Oհ};5O0< TZjkayjkayK M5\zOͰ}aؾ05wW s5\z=0׼ T4\z30׼ Tayfk^a*0׼Gs^G35-ayfk^/9L s%4\zͰ}ujkaΰ}kgk}+p#}c p_ .;i1/ 44p^(T@/gT1/\(T HM@sAd*qd* Kd*qdBݺd@ǁ@6l @6l @l dK@lP@l @lP@l d.]. ,q*]Kt4AKKtB]. ,qu]. ,q]. ,q@w"h:@ǁ@l th th d#@ǁ e d7@ǁ@ǁ u4q*q*qB]f}*_i iҧH@"i )@"i ) e )@"i tS u #A0Kt #A0Kd )]8@"i th thl@8@ T T Tn@8@ T T T[K T\/²kU`a8F` T\4*A.*A@ǁ@( @( @(lX2%S>@Q2%S>@Q2%S>@Q2uad*p;O L8KOd*ġX2uaYd*ġX2 KAKKcT `ԅLo d, d,P@@@,Y2u$S%SYd*qdBIaT`$S%L](T`$S%LrgɒIz]`HaeT @=RlX#u d@vЬ t>lY|RȖUQ-=O2****4.**AA*BK dͪK ,q**A ,q**A@6,ZX UT VE8UP**ġ,Zu UQUQUQܺ**q**q**q*B.**q**q**[VE](UT`TQXX UT`TQܺ**A ֥UQ%RE8HuTQ%RE8HX UԅRE8HX UT`TQZX UT`TQR8P8PȡUQJ8PȡUQUQJuTQm~$P*²oUTDUT+TE måYϏ~xBUTǍTE m~=_**q**q**q**q*B= *²oUT@UTǍTE8HiUQ%RE8Hq#UQ%REzDUԅRE8HX UT`TQJX UT`TQ%RE](UT`omPA%mP mPTAmP=mP mPAmP =mPmPmPulPmP8ulP88uahT`ChT!A8kAzBT`ChT!A8kA](T`ChT`lP%mP%A8kA8AmP%mP8/mPkA:AzBT@T@T@T@T!A:A:A]X!XP!XP!X T!A8kAzBT`OS§@|(| 'O) ., ݧ,n?ć_i=La{mf8lx۟݇=_p?Oaw?Oa{e?Hi؞aHiGҰC0ߏ"av(R:L"av 4lgEJI4l 3"ayHik^"ay%RE[\) sKtDJ\) sK4l 暗Hik^"aZ0׼DJ\) 7%R暗HHik"ay{X) cS4lycS45OҰ}9T) cS4lt&0# gJo$Y C dS:}do$Jmq)A:@׬C ,qЋc)A/Z8:@ǁ:@ǁ:@ǁ: @ǁ:@ǁ:@C tC tC tCP/ZtC tCPwt(RPwb` o i҅X;l}Z QVEJl[ QtoR@R l,eR ,wKR Oa,fR Ob,](R ,8fR ,](R d,I"lY ,qн ,qe)- [Kt/3A2Kt/B ,qus– ,qн ,qн@)hY:@ǁ@lY thY thY dΖ@ǁ eY dΖ@ǁ@ǁ u/3qe)qe)q{Rt> 7L>R/T0?+LMSOT0JSpS'LJ'L>RFLC`>8C`>8H'L%p L>R8P8Pt>:T0:T0CӅeu @c` t`,Zv LL%e JkS Ӭ`ZX⠻%X S WԬ` ,q)A ` ,q)qe)(Z.,{=,@RP,ERP,ERP,]X|lY @hY @hY t?cR ,8[.,?,8[c?hR `R wlY dlYP6-K-Kʲ8ز8زU([.e)qe)P,2,](R w?lY ,qe)A eY ,qe)A@BٲX ҁ)(R d"@EJ!vHB]C)dRk"_!N)r"@-R d".,x6aeс4nQ V6:H4]Xl< th< ƣ "GG暗hk^vd'暗hk^vak^vayى5(;0׼DN4lsN45/;?F;0 yNo{}Nm e' dv}o{NҔ(Av@l' ,q(0ۉKRi' th' th' th'P/Eth' th' dOv@ǁv@ǁv@ǁv R@ǁv@ǁv@ǁV@6C. DŽ%e-eZ]șE%e\ȾENEY]ȉENEry V:.P@ά. dK@n. @6. 1 . dw@լ. d . @). ds u2A,K. d] ,q]P,Kt2A,Kt2A,Kt2= (q(;³(qBdz(q(qB] t. t. t.P,Bpqz}(5 ig^zm^@y5 ^@(ЇSz.(ЇSz}8(- z. p,q p,q(ЇSzKt@z}8(q(qBpt5 t5 t^ ˂Fk8ktaY(нzzKʶF%ea^@ktn88kȽ6{KtByfQ`F%](Q`FF>]kta(z}(Ч+z}(Ч+z}²dQOWtEQOW@Q 8e!{l(ġl kta(ġ k!{A{{=dQ `х d5 d5P^@^@^@.QktFF\(qBy5bQ`F%](Q`F%r^y(5DQ N5 t kȶFk(m'{]C~ v(xmQ 8]; (Gi Y(Oхe@W<Dχ<_ph?aהX?a3y eD?1:I3t ao093a'ѰhƼs?=yg?gN0>暗g>SO؇暗09暗g>W暗gk^ΟaH}k^Οay9ay9ƚgkΟa }kΟay:ay: ay:9LΟay:>~a|>)׻=ӫ=bayck^ΟNayck^ΟaA0ck^ΟaT|k^8LK<K \zǰ}3dk^/׫;K!?Xt B X6kzυ!p&I}v=D&Ȏ=\8p_P@l D&q=?.M{p`=0X sO L68m888s&88N====m==Ïss?lOK> dGΟ@@ ,}@E t> dΟ@NC t>P::'/k}u'п>j}y '6k}yBi}ـ'1k}لBi}y&'k}Bi ,qНi}ْ'AwKt1Aw/ԝi ,qmqmqBi ,qНi uN:?gO@O@O t:\(O [u::\;>>>Nct Gi UB@) Wu ڟQB@U(Gi }(_) ɕB % ɕB@\) ,qЍ u A7Kt A7K$ ɕB88B@\) t( t(P7  ȡ²j!P9:]XvZ- t#B@ǁBj!P`Cn(n]%rB8PB@nYX !P`@JX !P@!Pe]Xօ, tB@( B@( B@(,Ys@>Qs@>Q6@%eB@)( ,q(D]X, ,q(;ErBB@B@Y8Xt@f  .(q(q(Y](!P `!P w, d,PB@.XX !P`@JX !P`@Ͳ(AB ts?.:]&tdυrL dgΟ@ ?Bv ?<[saYij'EMO  dZ@ tPZi}K dZ ,q'!k}K ,q'AZi}.'ġZX O`CY'AZ@K ,q(k]jO`CY@j}j}j}hO@O@O@υ:::\(O@O@O PZi}9 'AZi}hO`>J=Fk}K ,qBi}K ,q'AZ ,q'AZi}9/'AZi}'q'q'2k}.'q'2k}j}.²oOBOۥ\XV tV Z~xz%+-+\XV ,Y)+4 >>P8P8P8P8Ps>>>PsaY'q'4j}K >QX υzFO`>Ps>%8HX υ8HX O`>JX O`o>Q躥Bi}=ݣ';zBυzMO!z.HρzBOzMOKZ@ǁZ@w~PZ@ǁZ@w~ tPZ@w~ t t\'ġ\'Ѓj}K%;k}='ġ\'Ѓj}K%;k}.'ġ\'AZrZi}K~[%;k}'ġ\@j}='q'Н_j}., t Z@ǁZ@ǁZ@ǁZ@ǁZ@B t t\'ġ\'ġ\'AZ@B ,q( Z e ,[{'[{^{=B{^{~m 0=NOawa?ǰQ:l鰟aSt?C(t??aa-g ܟɖ3lu9٤-0rarI[ΰA9lN&[0fa|FҖ3lzsl9Qݰ}p=5/[0׼l9ɖ35/[0׼l9q0׼l9\ '\e9Lazk^ayr1riƚ-g>ƚ-gkankayraދaybk^暗-0bk^暗-g/kP8Lo暗-g>lK.;.aybkԼ^n1l_כ-^a?F[0uM:}QrOr'-' rOr.5@ǁ@ǁi ˶@vl @l dͶ@l˹P@6 l q̶@6!l˹P@l d'¶@l˹PWKtU0A@$l ,qU] ,qU uU0AWKtU0AWKtU0AWݦ-@rrٟ-'q-'q-']:rr.-':rrr.U@ǁ@ǁ@ǁ uU0q9* TMNST{gSs6U:ONT\(NTXKN`/Ԗ`KX-t}J'A[Rc-U:U:U:j?qJ'qJ']t.,kVCN@΅e9*@wy t ,q(;V85Ut g΅,qJ'qV8hBtgN`T:%R\(N`T:U:>Qsa%J'= t}hJ'Ї6t}hJ'Ї6t}hJ²YdNmTFNmTFN{T85#tàJ'ġYsa6J'ġ,Yȕ#tAttwdN `΅Rw d dҹP*@*@*@.nYsT:U:\ܲJ'qJBtbN`T:%R\(N`T:%Rrq*@ǁ@Wm9ʖJ-'Е@[N G\([N+@6l t%Ж+dȶ@ٖӲm9Zké6Xk²1gMZ@NlYk&Skm]}WcM {5uiͅ8HkX M {5\(M`6XkX M`6%8Hks6%e_ZimKʾ6%uiM`C״&KkmKʾ6Rk8Pk8PkȅCkmjmjmjm.&q&q&qBimjmjmphͅ8Hkf6%8HkȅCkmKPZ@.ZkX M`6JkX M`6%\(M`6%8Hkٕ6%8Hks 666]Yks66]Yk8\saYG&q&Ik.,6:4YOsM`CYG??ܬSήΏ~:5%e@7i9@ǁ@7i th th th thP@ǁ@ǁ@O,h\8\5%e@Ϯh ,qBk=&A@O,hPkKd ,qBkKd ,q&A e ,q&A@8ā@Oh tVh˹P@Oh m9Жsl9vӖ m97Җs m9Жq#m9 m9?,MrrP-BrrP-'q-BrP-'q-'q-rζrζ@Wh ,q(l |rζ@Wh ,q(l˹PrζrK=:rKd ,q(l ,q-'rK=:r=_-'q-' er.,l th |@ǁ@ǁ@ǁ@ǁ@Wh th th˹ܣ-'ġܣ-'ġܣ-'A@Wh ,q(l | e ,[r-'[r^r= -Br^riNs֛accScwm֏mďmOac?mяmOÏmÏmm Ommmm_ .ccۓcۃcsun6PLj>=>=>P4l7|9c|.P+ŰPm$m@1nk~7 5swCǶ\cPm"1nk~7|lF5j> 5P1< 5cPm1< 5cPm1< 5cPmm1< 5cP3l7|5CX0|l6*1Nmm揹I _'12WI|5jk~\cdcsmC叹wG _1∏F|5[j~gǶŊF|5.cNe0|5CǶ}mc3}p< *ns̃d&p<ñ۠mn{fdEAmn{fQypT>dE3`y=yf,q=3yqgAxf[:<83 <83̃<3xftyqv`m` .yMOIh@KxK) ̓/$4$ ̓$HB M~AHhd'TyAvdyп[j)LoAd 5!C̓

X_ |a`~%A)`C̓ 5?'C̓ 5:0<. 5:0٪Aj+:0<8PCM~%A}B&p߬_ay'tj|ji'tj|j }BAɇAya 5 C̓>P`þYo?Xo?Xo?Xo?XjK0<3/ 5:0<8Po?8PC̓nPXVHey- jt` ,2< 5:0PXvdy= j }hA`yЇ6j Y$C̓>Pm0IhX04[?-&sK`AA m2<2[dCÐAdn -!s˃l {2Kvs˃a`nnya7?Jʚ&-s˃- s˃-:0<8sKnnyqA懹%is˃`CYӖAt`nya7=Ӂ0Kvs˃%[,q-8KvsKnnya7&暗Iek^&axk^&aIek^&ayT1TiRƚIe>ƚIek&akk&ayT퓯axk&ayTIek&ayTay`>~ރڃay`k^<暗I0`k^o;暗Ie>׋eRyay`k^7 ӻ \?_dGrvJbs}"~lWPy(A2xyg*%GrJpvM <"6mpZ9(C ZC ZC ZC9PAˁy(Aˁy(AvrnуPP_AG*I%vTO:2>PGy:\d$ NR $"{tJвgJ 'wJвgʁ:2ZLR ZLR @ˁ1+A>dK1+A1+A631+*f%cV|9f%cVTJ6Ǭ٦pJ9Ǭ"9(f%~cVt\""9~/A1+~/A{E 2f%h90f%cVcVcVl9f%h90f@ŬsJr`Jr`ʁ ZY ZY ZY9P{E{ڑa Lo9P;A?,t?-A?,IÂ-At?-A?,;-*%w6[#\#\"9hG>A-A\2%w6[[[Ԏ|r`zKr`zKm':%[[\Itۉ-Aˁ-EN[#\"9(%-<9hG@"9(@9( h ǀ%`@K=c@K=c@K=,,9%Z~1%Z~1%Z%`@Kpò䀖%&9,{Lh r-ZZ\gr@Krp@ˁ h v-A-A-*%H98%H98%8gZZT@KK7h .rP@KpZT@KpZZZl9%-A h 䀖̀ [@h f@KGZ\8t@Ko^ rʁP1%ȮPwhAsJ]pJ]pJM2pʁC I< C .rPJpP\䰬*:%aYUtJpPUE9(%&PUE$sJpò<2%h90%h90%ȍ9---<<<<ƜPTJpP;%Ay(EC rcy(EC9Py(An9%Ay(EC9Py(EC .rPJpPTJpP\< G(C .rPJpP<<< G(C9Py(Aˁy(APr"O\Vyt3$>#O\Vyt3$0#OK/+ӎ< -ȓRʴ#O\VϗO.L?__V/#O#On 3$h90$h90$h90$h90@E-F-F8gɁʴ#O#O0$aYvISF9(@E=%aIp"On3@E9($A'E<9P'E< .rPIp"OTIp"Oy'E< .rPIp90,%R 1,@ﰔ K zlða)Ama)dXJc=dXJdXJKa)Aˁa)ArRRS3,%h90,@ݧfXJr`XJr`Xʁ!RC0=aXJpr气6 K .rX؆a)E!RTXJpr气"9,`K .rPXJpr气"-}j9,`Ky R0,%h90,%>5R\؆a)Aˁa)Aˁa)Aˁa)Aˁa)Amrr气"a)E!R؆a)E!R0,%AI*dJ!&bJcU&$$L %eBey%\6\J(5ey%\WBe@2׼J.s+TBey%\ƚgBey&\ƚgBe2<J.c3yZuk %PrʜBd+)$A6BrNG8$A)$A$B\䠓"9@9.A'uEJ!9P'uE: .rI]9<)$Aˁ)$A6BBBd)$Aˁ)$*$^SHSHSHI]r` Ir` Ir` Ɂ: .rIB)$ABrσ~0$~SH~0$g'0$SH~-3$~SH~0$2SHT IЯe9h.rypRH~-3$Ad IЯe--GL!9PAˁ)$A7BBt)$Aˁ)$Aˁ)$.N! .rypσ|\ 7B\B亝SHB\B\2$SH\֐Bt)$A?Bs)$A?Bs)$A?Brಔ{L! {L! L! .rX6Bt)$E˚SH\B\*9$m%N! RN! re)$A)$*$8;))  lXTT Irp I;hN! RN!9P)$A.8$A)$EJ!9P)$EJ! .rP I;hN! Z #cHDl8K@e1K$FDȘ%, %@a8uFSa٫qƁ q0 {5q0daA>NaAW<0|: #^0TFpò0"a9,{o.rPFa9,{oW0"eadFr`Fr`F[Z Z Z 8PaAˁaAˁaAˁa* #h90 #h90 #-a0"aٺtFp00q0\rFp0q0\0"a0"a9( #AaAv\0"a_ܬϲ|Y*~};esaAˁaAw'q000n3 esaAˁaA7\l: #f=0q0n3 #AaAw~q0\0"a0"a9( #Aa* #AaE z0"a9( #Aa,r - (AO7*#-wpo81@p=eGУ"p=eAGp4to*#h90#888TGнqp-p-p@9#arGУ"p9,7PzT"A\@9@p9,7P.rPGpr"p9,7P.rPGУ"p9,7PzT{ 8prGr`GУ"p-p-p-p-p=*bGr`Gr`ǁ 8(p9,7P.rPGУ"p9,7PzT"p<A_Arq8'8`:HaYrٟ$?G.\600b\6v/z,f^6YS e0.uq,fU\6YS e3b\F=0_)yud'YA>YAd̂, ;΂8PYAv"d'YAv"q,, ;΂,"eYYAv"\,9 ̂ẐẐrYAˁYAˁYAˁY* "h90 "h90 "h90 @eA-fA-fA,\, {o΂.rPDp vrDp TDN΂.rPDp TDp \,\,"eA9( "V \,2 "h90 "h90 "V TDr`DhgA,uWgA]̂,uWgA]̂,R˺ n1 "T,u狻Y]ϗO.>_,fA-fAai2 "h90 "h90 "h90 "h90 @eA-fA-fAmfā˺  63 "aYwuDffA9( @eAmfDp 0 @eA9( "AYEʂ8PYEʂ.rPDp TDp t'YEʂ.rPDp ,,gg ;Ġ䂷"wHAAn:(@E=cPDs$E=cPAA E4tA*("h90("9""\AAˁAA7\䰜8("9"\\䰜8("a9qPDpr㠈"A*("a9qPDp"E9(("a9qPDp"#1("a9qPDs$E-E8gPā""#1("h90("h90("h90("h90("9"""\\䰜8("a9qPDp"#1("a9qPDs$E9(( /ĠB zˠ>^Kx.5{\{\0%\6esm\6ese\6es]LƗui|Pa Aˁa Aˁa Aˁa *,!h90,!h90,!h90,e|nns?|* .s?|* .:!|* .?tsd7 6m-U4C Apnn:@-F;pCxF;sCM>G;sÁvh ߀vhCA?d-dEu .rЭ[pnԭ[pn݂t\hu\["ݺtdSAˁAˁAp:!h90!h90@ݺ-F;-F;-F;["ݺ9j+@mݎa(D/Bݎa(ā P1 POP_: P1 P 8PAt \䠭"m"m9h+C!VxpB!\7t(DpT(D;.rP(DpB!T(Dr`(DЏSB8C!\ tA?N 㔡A?N 㔡A?N qಶP qP .rP(DЏSB9,;\WP"eˡ.\.rX >C!BB)BP P XC!C!T(Drp(Drp(Dp F,*"H98">B)BP ;h.rP(DpB!T(DpB! ~C!3!^TCxF;٫qCxF;}8LR$ +NR$CLR$ NR8PI A>ߝI A>ߝp$ NR$ NR$"mrX@&)-&)-&)\$$$$.')\$$$$$ pdI EJR.rPBENR.rPJRrI EJR.rPJR.rPBpp\$"%)ٿuBp@&)-&)-&)ٿuJRZLR밄R%] Ke°R%] KİRn;1,!@간狻YH/KQ/ ,:,!h90,!TXBr`XBr`XB]X%,:,!h90,!.,R%݅eXBpTXB]X%9(,!TXBp\䠰\䠰"%9(,@%9(,!Aa A\䠰"%9(, KZ KzFǰC K7ΰ \C Kra *,!)&tXB4%d{q A>p8.!ף8.@%t\B%.t\v {⸄"%KSnEv .rе[pԵ[p݂t\䠸u\k"]It\d;q Aˁq Aˁq A6p,:.!h90.!h90.@]-%-%-%k"]9ji<11K!g1K@- bB!f) bB'bB f)`B!f) bBpTB f)9hiS\6ǜt ) Ay) Ay) AypG߼La"e) Ay\9!8[Zb CpòY209,eNar) SS2s Crp ÁJa/A) A) *!H98!H98!8gKIrp ÁJa"09(@09(!A) A*:!h90K!^TB>f)٫qB>f)PeRx C|X8 ȇCЃ =Ѓz;ۡAz;ۡ.Y=Ѓ =Ѓ =.rXvz;ۡAˁAˁA9 h90 h90 h90e7ˡA9 h90 h90 h90 h90 !Ѓ"rApBkp\qAp)MT .R .@\JS4@\JSA.@\V' [ 8PAˁA.@pಒA?1@pಒAw\@\zYtA^V2@pಒ|q?Je)e%|qi.+ Z @@@t.+ Z "e%A7 @\@tE @\"9(@9( AE 8PE .rPA e9( AE x O1 h9 1 &6 \%v:Ap) It{L'rwAN@;tSL' XL'ZL'tNNtAˁ.&N'zttL'.rXMNt.&N'.rXMN\8 a94q:AprhtN\8 AEˡ N\8 AAtN\EL'zttL'8p94q:Ar`:As$----=Gb:Ar`:Ar`:ˡ C9,&N'.rP:As$9,&N'zt7@\7gAУ\-A?r{ xP^/eCql #@C~l_6|/> ol xlp_v x56l߼lflпlˆMl xk~ xk~ x0|k~ xk~ x0|k~ xk~ x0|k~ xk9e9e9eøe9e9ehe9e9eTeÌe9e9 65?5?3җ#җ c Cɗx?2x=2x;2@G_F_^6L!_/F_^6 _Ec\\\hl<}0u|k~}k~}0pl xk~ x0{ !?8 |p}p}phW}p~pf|p2]{aaac(2aa]{cav9!9d2Aa0 2Aa0 |p2Aa0 |rC~p 0 |rC>h9!?8 |p~p2EqC&l=pf|pxE5a9׼\0&-|pf|pxEca׼\0^~pxE5׼\0^~pxAaNZ pHZ0'-|r~p0'-|r>&4~pkAA*s tF $E |F 1 d1 1 d1 ?80|i0|#0|MS0xAEc GB 97\0}px97\0}p97\0}T0Zs TE -9ზAVAaa1"ზAaaAaa00xE97یax,یxA7o3~t?یaxw s37>v霹AWߜAWߜ)O!>TߘA?̍.7B|p~ϙ?8B|9s~ϙ\0BA?̍.rO!>a<⃋S.rO!>a<"1s͙\0B̍?4gn|r37>h9̙?8B|r37>h9̙tt⃖ÜAK̍Zs.37>h9̙̍O!>a<"1snΙ?8B|pØA."sニS37>]dn|pØEc9VMHj|=9V~ͱ?C"Vnͱtc{sØc5>A0X\#A0X1j|pò$X1j|pò)XF-tKo"e_?lL"VニIj|kոAN"Vピb5~p[Ac)j|r@E?8j|r@^C)jAEc9?8j|pEcE-9p0c|Mc|߄99YYYYYY?l!|"|"|"|"|"|""8gQ|r(>h9Y(>h9Y,Zs.kȢ EAa΢0gQ|r(>芟&>T7Ǹoԃ.߄1܎AИz;4|p\~&dg R>LSs,ԃgœzAvzc-9,! H|E=|/9 H|E=|9KQ/H=s^6 zϲ cB\6 /?rgـ|X,H=0|rS>h9̩?8|rS>h9̩thN=eԃ?4|pòԃ?4|pØzcS>aL=ۥs\0|pØzEc\0|pØzEc\0|pØzAw ԃ.rS>aL="1 8|rS>zAaN=?FD$#>^Anz"vD Wc$=#>6D"|DΑj">A#>9G"|r#>^c$-9ΑH\AHZs$K#>a@$h#~p@$9,wD"\ ჋;D"E">aD"\0F"|prH.r#>E">IAaDs$.wD0G"|Г9ზAaD0G"|r#>!AaD0G"rH.rXE">aDLs$9,wDLs$9,wKBs^>!c^9/p\^u\\̇)l/?OCl LO̟2.>LÝQeeÔp<5Le2׼2.s+yyk^敉p<ּ5L\D˘pk晉p<̼5LXDl\ƚg&eyf"\6l.'晉pk%eyf"\ƚg&ea5 !e2׼\:5ÿ\Dx.s2׼2.guwk^\{.s2׼.salI^yey]6#2f"\ƚg&ed/sAp]p:&lpI݁J6]܄ 3< #lrApIҁJ <x S.M ]L42pR.M ]L 2ti2e d@u˔2e!22pRsC4A)EJ 2\䠔 WN.rНfp4| AwEJ"i9(e 8ԝfp4t\;u\;"i- r@r`@r` ZLZL딁u2@ |q@П S)A2)*e >S|:e ^ST@OS ِq@mH  2N.rP@ 9.A7vE;P7vE .rЍ]pRԍ]pn삋tcd)d@r`@mJ - - ٬t@r`Jc锁uc222pn삋tc\ubtX bAM2&<A?ltAolC1 & Q]ubt_X狣ZZZZZZZZZ8P'E:.rPB]F-9"A'AwpN,tbtWQ E:8PQ Aw\"X"X9"A'EZIƨ"X]|g٘{X,secήAˁAˁAˁʕ>h9Е>h9Е>4]\6JJSEƜ]~Jӕ>Aʕ>4]郋Jtgʕ>AEr.r+r.r+}p\郋J\郋J\ W1t.r+}p\郋J@-h+}r+r?PAoJt{A.ٕ>|oJ]\VJgWtr ȮAw~J4JJt{.+vZtLW@W{otZtZt?pY+}pò2mWt.rXVJtEʴ]6ӕ>aY+r.rXVJ\ W"eeڮEr.rXVJ\ W/t.rXVJAˁA7J2mW@W'3tZtZtZtZtzDW@W@Wi9,+v.rXVJ\ WHt.rXVJEʴ] ѕ>]郞ҕ@OyǮu\]u<|\/?cl/?arl/?ɕJٟeyBe\arl_暗+eHyyk^J<ۼ5/W\rlk^暗+eyet5OWXtlf^ƚ+ey_6n.cӕ2<]/g6Xt5OWh2<]/cӕyn0K]ױeydyk^Rӝey]I]׍ey?LRuuk^3\󺌺5/WeyE=L7Quuk^Pu 0C]6$/s2׼.Ǒ]/cӕy2Q<8vsotΓ ͬ Σ<;P{y'uŃ?9:#87s3+8vxpufVpnf-:9Q<87(\ G:PEr`G"99Q<8vtv\ G .rUpŃso@]9*AgWE::PgWE: .rUrx= ;-:-:@G@G [rv?PAˁ/x[/x$/%l_ 5l_ A> d#Ⱦ)w}<ȷ}t\+"]9+"]99)A)AWNEr Gd_Aj=>AEZ.rz|pŃnQAErgLG\ G w9(\(BŃ\ G(-{s?pٜx!ڍ ƃnn<ƃn<ƃn<\l7tvAh7\,n<ƃ:ۍgۏ\l7\,nflc6A>fl6AmΒmtp"7 Pv.WnA :he trnA+nA>}ܠV6ݠ\Vlti9h| eKAFR* 퍆4U(>B9 e{~YV/?*rgYsrsrsr2|Z4|Z4|1CU(>->spò e34|.r2|1C">2|.rsp d| d\ ">(">99= d\ ">?AˁAhh| nu :o<ݠY :ȅ&Aoj7 \˺ݠ\ht=FAa7 !A4tn.vZtI7@7蠛tZtZt>pYwtpòj7蠛t.rX]tӓnE˺ݠ :aYwr.rX]\ 7"enEr.rX]\ 7gt.rX]HnAˁnA7}j7@7GbtZtZtZtZtzD7@7@7uWA9,v.rX]\ 7Ht.rX]nE˺ݠ :ݠ\rZtôk7llQ>LnЗex1^6eg0A_6e}arl\6A/'?X/^6}&7eyA_6{.}k^nЗ̓\r5/7eyA_暗_F7Xt5O7IeyA_ƚeT2<ݠ/c ys<޼5O7Xt~nZ.c 2<ݠ/5eyݲ\6Op.癗u0\ey\暗trk^+}<5Õ\rl]^trk^+urk^*ӭe2׼U.sLyV 2<ݠ2:ix'{RE'ywLsO*8h"8sO@:lrXEpITpn(S<=ܓ -ڠ@Se{RE2u.rspI(S":;spLd[E2u.rspn9'AE{xt\#"#"9'h99ւMMMOrsrs5:(S@S9-MަAolo6u>PA>hm6uQkSe6uekS 96u>PLA>omdSǦA>qm\K"]29@]29)ALE2u>PLEd .r%S:?AˁA{lhhKAˁ9֏MMM%Srsrsrd .r%Sp.V |o$:>;ht|D;ht|~ ~#9 r|Asp^!A tAˁAˁj>h99h99h9@9h>AA \"2Z.r |-:>9h@9>2spVV\"99@:>AsrsrZZt|Zt|LtCAˁA7|V\ t|>P+Er|r_ӎEZ?PA.m9A+Er|~8;>ZsЯe:>Ks#:><˪n9n9n9\tEGнT:>9,;tv|>p٢spòHg W9H998);>);>g ` `d͎9H99ޛs7;>99A9AEr|m"9>-K]l7m6]l|zt rn ^v B( ^v=pmХI۠%H]t _$\v?bHt .~e.4/]lϲql"~<_,---(۠@۠@۠A]l]l.E]l.Er=P.AF"^큲~ .5$RC~ .5$e\jH֯d/~ .5$RC~ .5$5~ Kנ@Y})_ؠ?9ز/lk .ʲg_  \>e)ξA=6N}a6ȵC?,M-9/R}a}a6h969}a}a}a\ \,69}a8`_"e)ξA=6aY/ .rX \ _"e)ξE .rX \ _ \,6- -݊/R}a}a/lr/lr/lr/lr/lKq Z Z=pY/lpòg_"e)ξE z2C_"e)ξAOf \,6}a 6u:(_ؠI_<}a/벹.y0^6͟ex)^6͟ { ^6/eOׇyrL;~XKw5mi2׼V/sk2׼|a-\q5/_ey_暗/e`2׼6\Zk5\Zi6/~k^̓̿ 68hH_< =ܓ -< {RE'y{R6g"8O,sO*8s@EpITpn-(_ܓ .r/lp|asO@96ށ}a \ _ܢ .r/lp|as6AE - \ _"9@96AAˁA   {|a}a}a@-^ilx+Mcl46_m{Lc|46mצ46Mc|46fMcPo_il_9p(ACE::PCE: .rPpLcPpt8dȦilril --ilri2 /dؠ@ؠ@u8hhh{t8\á<~]Q@-{lЯ+:{(늎A(lGe|p% @G٠{ot Zt Zt=PAˁAˁAˁjy>AEr @G"-9hy>fe|pn6Q6AQ6fe<\<\"-9hy>AA(\Q6eee|rlrlЭh:@G٠[t Zt MG٠@G٠@G<\"9݊Z.rlЭh:9hy@9iG"-9Qt JG:;YlOi:lQ6F+e(tgAw(tgAw({kgG٠;Kt DG"eΎAw(\Q68ۉhlpò}gG;;9,+xv r ώeeNrlrlpv Rv Rv=PAAA({eeQ6H9Q@9{lpe({e(\ G [v .rlpÿnE46}{Mc46וMcH 6 ue i2 ue$il*iMc.MMcKi.6 \v?l{aRMcGrgxX,eæAˁAˁAˁ46h946h946h{aؠ@ؠilpòaؠilpMO(cSƦM]ƦAIc"{ililӋƦA˞Ʀ@y9`wixz( . .eޥA]ޥAN]ޥwiE{9`wi?]ҤwirwiE{_ٻ4h9л4ȉKK(ڻ4h9л4h9лeޥEK(ڻ4aYwiv{9,W. rn"eޥʻ4aYwipK+{9Ȼ4aYwipK7wipò~e M]]tGޥ.W. Z. rޥAˁޥAˁޥAˁޥAˁޥA_ٻ4h9л4h9лeޥEK+{9Ȼ4K+{= wipò~eҠB. /D ]zK'Kڻ.벹.Owey9^6͟ex>Lޥ͟ex??y^6t}K/[e柫ɻyzk^ޥ͍eyy^暗weD2׼K/s˻yuk^ޥ]ѻ26]Ov\OnS4AkEZ?PkEZ.r}pzt׎EZ ]O4h94h9@--ĦZZt= MӠ@Ӡt= Zt= Zt==PkEZ.riMl"Ħipiv= .r}r߲gӠ[t==pȳi=)^]On4]OI4]OI4]O\ztOAz\,4]O=g[t= .rXzzಹg"eyϮA/irip Rv= Rv= ;AA4H94H94ޛ]Oiri7)( {ov= .rip\Oip\Ozd+ڮEr= .rir%jk,D B[9%jO/[>Am˖ARhKeˮAf $v= ]}t= Zet= w0l4tz\oֈ]OAz`X,[#ek|ail4h94h94h9@--;]O\Fzz`t= .rXFz` r|D>AF"$}DHѠDѠ3 ZH=PVANmV.;K .egVAlzಳdР r*mР r g Ҷ G BX Z r g%[-Z9UhrUhc8[-Z-Z,*4aYUhc8[9,;K r*m"egVANm\,*@Y9,;K .rUhpòd"Y9,;K .rUhK .rXvl䒆BB*egVAˁVA.i*4h9*4h9*4h9*4h9*4ȝ%[-Z-Z,*4aYUhpòd"Y8UhpòdР .rXvl_VAh䶓BUhUhp-B/벹.y(0Y^6͟ex)^6͟dz ^6UeOׇ*~<lI^6`lzB/ezl]暗UeyY^6.s*2׼B/'@ezk^VB/c*2LKȗ浂|k^V\>5/ey-?L{Ǘu|k^;Ǘq0-_6$/sk2׼v/ǑB/c*/Uhpn>Vy{R'[ybx*48s68O,/8Uh'EpXTpI큲 ܓ =ܢ Z =PV'\ "Yԁ .rUh[9*4AVE\ "YmpBd[*4AVE .rU聲 .rUhpBBl-*4h9*4h9*@Y-Z-ZYU聲 Z fР W~[ܱUh*@YڵUh*4BUh[Uh[ ߾ c ߿ .rGp>tq>t\䠣"Y"}9#>BHР@ A Z Z %dР@ed_VAˁVAˁV#h9*4h9*4h9*@}9#h9 4N@f}pެE7YtcnA?tcnhO/ݘhpެ?Pnst Λ$N@@@@f}rhrhrڬ.rf}p@D 4AEڬA7Y\;t .rfr A7"m9h@m9h>AEڬ.rhЍ99ht 1G7Р@7Р@7Ytnj>h9 4>5@@S 4h9 4h9 @m9h>AnAz6냋tnEڬ?PnA 4A߲og7РNt M7Рt K7РNt D7РNt D7 <vhm'vhm'9 4@\tۉnER@cH]X9,yv=pYγhpòg7 7 4H9 48[))g `7 `7dn 4H9 4ޛ@@h79 4An 4AnEr m7"9 4E rVAh ]f*4ȹB|*4mzB|*4_l{VADРVAKVAf[/BK-[# =pUhп*4续Bekb9l<_,[-Z-Z-Z(Р@Р@РAUֈBB2 a2fп4 Lh4ix!2|!A2|!2|"ᅋ擆.rOI 9' /\04plO^a>ix"lBar ) -ar / -ag#i$#Z0|#|BF/`BFf#ZwdBR(#lB>dB&#I"#a#Ze0|!fp 0_T߲5"#5"#w0a/taп9lBPdB`BeqGIȵD#Jo1/|ʼn/ɭfܪʋ?E "\-&D>E "|"DQ !|G(D>E "|"t |q AH\M$|q AHAa7 A(JeDy(AK} " %˾.q.qXM$|p,|pòo"AKfA<` %˾]"tAd"t |D>8@A䃎:D>} "t |q 2HeD8,&D>aD>8,&D>8,&D^ {08 eB9 t>|pv@^6et]6es19 /esl~O!^6?Ïy/e~~r@^6~Lѿl>Nl`l\l/?&eIey9 /2ϼy值l/yg^2ϼyyg8t@^6wq逼3Oes]g8t@^6\]6[]ƙ2<Rey: /y|Z1^2e9eAey-~L{yEzg^;yH/k}2ϼy9zg^3v3M<l>yg5ί np.p?H dp> ΍Upns3ށ@*8<#8@I ?9gX*8J3sct( :@( dpnK$ .q287VJ\ d͂%%@8H I K$ np$%@@I K$ .q2A% .q2AȠ@ dŃ%AǁȠ@ \Yt( 28Pt( |2@B` dՏ%AXy$A [䛁%A[y$APK%AVA@u ߌ- 2ȷcK K\uu8h?A\uYAJ %AVE@J %AF@J 2GȠ@ dq@I %AǁȠ@ 䁒@8Ht~pp60h yc?6"~Е AtGCi Ƞ+="p-7ICdЕ Aǁth :4D"~q!28th\l>[3O9ey?8#^ƙDcڇ3mӔÔ<ڄ!/k 2ϼ /K1m@^ey//k2ϼ䈗爗y1-=^ey-<^׺Ǵx_A:^צefe1r8p> npnw?Hsbp> uVps;߁2':+8#8:@?9vÎ\g:+8ʜ;sthN :4'(sbpKdN .q918Yʜ\ sb͉%2'8ȜKdN np̉%2'v@KdN .q91AeN .q91AĠ@sb͉AǁĠ@s2'͉Ar6'(sbq91fĠӜۂ͉AB6'`s2'lsb 6'ls2'@sbo6'ٜxу|m91Ȳ _mN .q>zpуK~уK\}>zpуKddsiN :4'#ٜthN :4'&ٜthN

N3/9ey/<#^晗D3/9eye#^ƙ2<列ey/Sx|rgr8#^6\6\ƙ2<ey/Sx|h1<^eeyey-;~Lyxg^{y%G/k2ϼ䈗煗yxg^r.3m)Q/dsO; |psҎs\ӎ|w np>·:PvOΧ#8Y #AǁveG }Vp%#>@KdG w1Avs\ ;bp .q1Av\(;bp%#8Ȏx%#8ȎthG 18ЎthGj?AAw#8h@0hG .q>p>pK\}hG .q>iG ң18ЎthGpA7ܴ#8h@@1Ag*K2znU W1Θ^B{.U $We^Š;cz.U $W1蒌^۟tIFb%%ҡAd*8,*]ѫ\ⰬګYAw*8,K*!ګ\l"ګ.B{Y$dU 2*g\qW18ثxA^ `b}ʫdU W18ثxA}*8ȫ\ *8ȫ\ b%*8ȫ\ *8ȫ\[oA'X[t1K|t1ߚ.(AXt>)]3<*^6^bǴxg^[yxg^Beeym3^.ea^ٟ_d88%8& έqpn?H#cp>S 5YpɂskQ22,8%8)@?9C\,8thd :4218d%228ked .q1FKdd έqp%228AFsk|%228\ #228\ #cq1BFƠ@#cq@AǁF <18dg#cid EF +%|ٰ@|Ƕ17|϶@|ն1ȗmld]At^Vg8/3?۟tr hI]A-xಇac_t9Lc'mv9+. r .т]AT]Aǁ. r3/eY2ϼ\yrl./xg^.2ϼ\yrxg.8t9^6×qr3Oe9ey/x|Dr|Brg.8t9~L+qr3OeǴ,yg^g"G"y&1mI^׎eymH^晗cZ3D ν@pɂsk[sMkC|x\& έqp>D g&;P.O·(%8d& :t9]Aǁ.r 5Yp\%r9@Kr 1A.sk\ cp\5.q1A.(cp\%r98x\%r98tr а18tr.r :t9Y}q1Bco!=7Y䛈EJ EAXKEJ{EA[ddVuܢ K(|%1A%V.qжV.qжzpՃK$zyh dpòh d<-Aଽ 2@[ .8dl[ Kd .q@Y Kd .q2Y Kd .q@Y Kd .q2Ae .q*"\"2l+"܉"2ȗS+" kERD,X;AǞ+"|G"2oA*"ˀ-K7VD-HEdpeƊ+"QHEdA("28P Aetq]i 1-A-yತa d_˴@L dp@L d@y g i  l :@Z -A;yತa dq2s[ -A;th :@,i\,i1-%˒-A\,i1-%˒-\,i\ dpòa dp,%˒-%@y*m dpòa dҶ@Z ~eIȠ@ dҶ@Z -AǁȠ@ dK@Z -.K@8,K@8,K@8tF dpòa dK@8f-2--A*@,A1?@^6e]6Oes1Y /esl~O!^6?Ïy/e~~@^6~LB첹l>~lԽlнl?& eeyY /[R2ϼ,yel/yg^*2ϼ,yeyg8@^6\ƙ2<-G!qi3O e)e!eyZ /1L^ƙ2<-GӲeyJ^6{\6^ךǴ%yg^;y!yg^ȏi=2ϼ#/y|wg^{yel>3ȏi#2ϼ!/k2ϼv!?UC<ڃ3-6=(cpnAIp~UXpns3Aap‚s|P 18Wa%08Wa$8*,8WaAǁǠ@cq18xL .q1A\(cpLA6=8\ cpnKdz .q187%2=8ez .q1A1AMA6=Mthz :4=xLAǁ ;?m dЏ _C p2Wk 27pk |2ȷpk 2qk |25%@Nn dp6҃K\⠍\⠍m8Hy6҃K\⠍ )k ?H dq2ȂȠ@ dq2ȚȠ@ @UYt :@( dq28Ptk 28Xdg dq@i .q2A .q2A ok K .q2A .q2Ai 2A.q#27W;"?HGd vDjGdo8 .#t #t|U:G>8dpG>aG^EՄKjƒn?|pt5AA&<2E.E.|phB@("t ("tE䃎:PD>8@EdpjƒKfEn?|p|5%Մ~@&n|5AݍCEơ|q"A|5%Մ8̊ݍC&n8W"AJy?]4#te䃮 |Ѕ2eRݠ4#NP?A7hG> ݠe=RݠeGRݠeQRݠe[R'C؃.|pò3)dpٚ?%ruR|q? 䃌N¼#d 2P#d|ݸ2G>2P#?AG>aG>aGgKfKf|p|p|p 888|p|p|e8%pV>Wi9+/A.iY _| $g|iAY\|/mrV>e8+\lA-Y2`e H+aYYgZA;GAK@K O|_Ri): Zy'-BAgHKСZy!-<ݓAZh)t|RZ-僎8CZh)q:R>8@K\v?|pò!-<ݓ%tOZ8,R>=i)\~HK.qXv?|pì|pò!-Kf-Ki)\0k)a.qXv?|R>8@KС .R>8@K |qAZh)t|R>8@K-ep%.qXv?|pì|Ѕ.qXv?|R>aR>ȝi)/Am|.YK -sXKy3<ڭ}ɹp/R ہR98J!87lp΅ Ǒa [p.Kp>ʠJ+( O2>ʠfJ+( ^2zʠ5J+\'- .qXv'- .qeeʠ5J+\(- ^2a٥28kɂn) .qX6*-//k2ϼ=er.s_s ΅sp> ΕBpn؂s R#s\8|ya ΅sp> Ǒa;PO:Tl-8PyAǁ͠@fqb@)6s\ fpa;P)6l; .qb3A\88H\ fp.K .qb38J\ fp%Rl(fp%Rl*6,C :Tl*6b38Pt b@)6AVl~? .73w7l  _  @Yؿ\ fE%Z.qxpVxpVǃK:\ Z.qxpVǃ, Aǁ [17Aǁ 17ʿdAffq38пyAǁ͠@fq@7K Bp7y..]п/. A ҿy..ҿ A7o~͠ )38_\8P&A7o7Ņ@fq38пy..7Aǁuq!AK 3AKtq!juq!AF .qŅ 3AKtq@]\.qŅ]\.qŅ7n .qŅҿtSHfq38пy..7Ao Aǁ͠s7Ao7Bp..8ȿtqN恺\ f9%pAڿυ&Π;n"h 3&Π;i6;/k2ϼv:?//^6^6^62ϼy%lnѽ3%?jfp Lp.sv$r'0T[psAJ6dp؂s+|$d38wlr'0@28wlJt( :lJ6d38Pt( :l(fp؂K$ .qd38wlJ\ f}%%l8H+I6K$ Εsp$%l@I6K$ .qd3A% .qd3A͠@fu%Aǁ͠@恒lJ6%Al(fqd3JВR[%A_l(fFlYY%Jd+`fHl Xy$AlYXd)fZ.q xp$Al8hH恺tAfpo'G恺tHfЯ{h]ңAz4~ݣG32|;@y48h]ңtHfe =Aǁ͠@恺t :hݍӣyn'z4 :hݍӣt :h %\ f8=vBpeyB/׍.kcD3=fIi͛%AYRz$AVYFXRdaI遒HXRd'aIiM%j{j\naq .[ v5נ߳q ԸMk-,5AJzn=))=P~ؔ)) aSRzn=[aKRtBIipzr4Ru!>JJ.U() .qЭ%) T4AKt@z.qЭz.qЭIJ)) .qЭRRtHIiq48PRzn=JJ%Aw[AǁҠ+wJJ%AץJJ%Cpn=8HRtNI遺\ IiЕ;%%p$A.~R`t@i A 4]6`z A 4]6`tMZ Awo4ݽ`zjiU4 %ˆ .;68,k68`z*jipò-jieaLi0 .qXFm0 4aYe}LKd0 26L+8`dl0 Ύ `iq@L A qL48`dliq@L, m0 .q4Ae0 .q4A fLKd0 .q4Ae0 .q4AL4AL, m0=PL, m0 :4Lm0 :4(i} AǁӠ@2L A_8|w'{$AٚzಽfiݧA }zಽfiݧA(4w$O4w$O >=p^48` ?@i4@2@i4y3/KǴzzg^ye)쿜A-~L;ye)l^)3uӏi2ϼvM/k2ϼL?58ce) S+c"@YJsYss+^S\"-8%8:(Kip.҂sY-Rth) :ZJ48Rth) :(Kip.҂Kd) .q48iR\ Kim-%8R{YJKd) νrp,%^@YJKd) .q4Ae) .q4AҠ@Kie-AǁҠ@K遲ZJ-Av(Kiq4ЖҠ?~ K M4w8Ml.7 z7 M~ ;;M|@M,17 ư4"~7 ˰46~MԆxp6ăK7 ְ4A%.qІ.qІxp6ăK7=P%8odg7 :ot7 :ot7=P~ >MAǁ~7 :Mo\mn |/̨̭eneF >H3jsK3jpop}AKS5ΘNS7U.T .;uS%VNՠ;c:U?H)hAm) .[]$à4膛RР; JA?醛RР; JA( :aP tx)=PRР; JAKt A E]$.qE]$.q4JR]$ AWzJA"Aq48P tG)聺Ht( 48P tH)hq48P z.8"ApA7ܔ%pS \⠋J ^AS zwi)ho:N)hЅAS :AlaZ taE)hЅ.R \,dZ zಒi)hpòi)hp.8,˙]XQ zಠi)hp\R \ⰬiZ tF)hpòi)˶%8H d, 2gUq48X =WAR `)聒KAA–()hq4ȺR `)聒Y@Z \ )hpJ \ )hpAn[ \ )hp%()hp%8H z%8H di)聒8H di)hq48P do)hq@IA- :JA48P t( :()hq48P \?K 5 [{?GgAk.[]~ _2T{=T{=T{'՞ASg<՞Jg<՞AS!՞. V{OgПT{OgLV{=՞AǁjϠ@gLV{(gq3C&=՞J!՞AǁjϠ@F՞%F՞AZ\lX䙫՞%F՞AZ\lXyԞ%F՞%R{8,9V{8H\lX\ g+V{8,9V{`gq3=\6r :T{`gq38Pt :T{cgq38Py಑cgpòcgpòcgpԞAnX\lXF՞%R{5bgN=뿓7 :T{N@c4@?6ϏMKGjǦcc)lzjMfMjcGMUeǦmjZJjj~3?=3?=lj3?=3?=8P{gjCcy=l3qiyǦݡCcy=/J3qy<2MF?6g~^$l#1Ecy8@}gpw>8@}烎YT=aw>aw>8eYY ;\0;\0;88%%;888|pì|pì|qA"w>8@}gpw>8@} ;d)(}烎MnZݞ%NnY]Gn*',|sr{gl1|=ܞɐ38=d!KfKfgppü { =\0o?apü\0o?ap n8n8ny!ܞ:p{>ȒOn=t|Uܞ:p{g|qAn|qAn=%&Vl}p2eg+ a}+N +t_ps7XA\wΗ\.gAwol>*]6hx6t?*]67x6t .|qg38{6tK8[y%VK8[.q={g%Vl> x6t [:l>87x6V<gA惎%ϖeuR CgpYAu߳a|е = Cnaa =_a|е %8,2t>p:\lW_a|pòb)C烮`|pòg)C烓tAwR0t>aٶAwR0t>aY3,] C烌 AAl|p<8 Cgp6t>8 C烬he Ά:du)C烌 K:\0:\0:%%A. .q .q .q  K:%AV2t>8Cle|q38:d/C烎 :0tgC烎 :0t>8:d/C烎 ,qA@ =/F=d A@\|п;4;C'|}A@ }<3l6H|qoIvdRM` P"Ɍ=ða_w>/;ݕ=;zw>|9 ﮔ |C·|w|Cpw>+;zw> >w>\\Q!_*prG·||eE;U9,} |eE;.s9,}|aw>\\Q297|a|ClP|ClP3\.(s@!o6(s@|C·yG·=;墏9,}|a|es!/(prG·|es!("p_&T'|8ym~Hȵǔ6? ?kյOums?k5)ym!1%:ǂSkkkkǔ6_6hg^kݢk~g^kk~g^k5?Jt^3D1y<3ym>gk|6_3D5>Lt^o]/]3D5>Lt~gy&:/>6ϵ5?I1]$g^H%k~n^3+ymtϼ^3Df5?51]g^WFׅk~u]>58߉;p·N$?Au88߉p> Cp> ;0O+H8"<g8z!p9Χ^ =v8Cρs`:g9zpT3Oep Sp ygAp:|.sP3\g8 9· 9.sP3\Au8ep =v8Czzp; =v8Cρΐ~pT3 yg9ߟe3G8G:C9c#!/ 2Ü#! Α΃tf361C ?0?Mlb&fs061? 9A]9|AWe*.sP392] =61CρM̃*zlb<&fca61CρMklb&A]9|AMlbUp 9A51?ef/Crer%g2'Wf/C0{+?2{\˃Eg/C0{)k^^ˊ^peae^k^peaef/eFg/C0{.sXn1:{.sP2\eAg5^!eȳg/C˃^ӣe98{r^!eȣKg/*{r^ p> /O*sF8;|e8^|A+Cρs`2<|e90_zW ӫp2+2+C]8_.sP2\|e8 9(_·2+eWaA+eW 9(_yPp2+CρʐGW|A+Cρs`2ɞ =+C9_z9w{<" =Cm>2 >R}.^3Ϟ5>^3Ϟе5>^3Ϟtuy\g:c4zϼ^_\\3mk~uWyg^=׏5?$zϼz;@~5?^\3k~u/y 5\P6Q;깆·"?v)A\P6Qp> Cp> {.O*eE8;k8B{A\Cρ=s`5s=k9z칆{s Spz2\z2\C].sP5\栞k8ʆs 9·2\e깆A\e깆s 9zP=pz2\Cρ=אG{A\Cρ=s`5ɞ{s =\Czeg|~sE~ <>ȃ>ieJӆf6+Ni{'2}$\pҴ 9{'d6Ӵ4m90M{P; =iCρiЇLNBρiЇLӆӴO혦 =iCρiڃIA; 9(MiڃIAiЇLӆJ~垱Oح xvkCf6?J[4խ Q٭ }n򱻵Oح ڃdwke-dwkeEdwkCe6\Fv6Zvke˕dwkCe6\Kv6)2rOyح 9l7٭ 9[.sP6\nmSvkڐsp6)!nAukCz[rֆ!nm9[ڃֆ!=ݭ 9wk[p2uk[.sP6\nmȯn[.sP6\nmAڃֆԭ 9[.sP2ukeֆt_c|%c^kյ'um69}Lkҵػ6zkݵB޵G'c^]]]Z\/]35?^]35?^]35?{̳w{<{k|ٻgkk|ٻgkբk͢k|ٻgۏ5>l~LCԵkk k~>u%czϧ^۠|M1]SAT|皟OSmkk~uytNcktm[q[t:r p~_UC77t O$D,lp> p> %|"zl膞 =6tCρ s`Cn9zlTC7Oej j ynA p|@.sPC7\栆n8 9 9.sPC7\栆A5tej =6tCzl膞 =6tCρ ݐnTC7 ydn9{P ݐo9z,U 9R8\)_` >>}*sOCc'sp[89>9ps90 ss_O19Os }pS>d8yspn<pnCρ9GzsA}'\栯}A}'\pne:A?p>vb8l.xCf8s?g3s6sOa>v8s?~4K{\.;.sX;.sPr9pr 9k>\w8Ze}pC0.sP8\Ae}/:rpSC9sp89Cz:|P9sp8䡧s!ACz:.sP8\Ae-e˗t-e9(.sPr2ep90zs=Cρ9pr?9(|P9p_Zp迥2 9пs`7X =Cρs`7X =z,!z{PпX pd7[/Cρs`7X =Croȷ^.U s`7X=oa"oȗ.\.sX.sX.sX.sX{PprEp2劐2e!e*oa"o;.!D{p"o9No9z,!z,+B.+B.+B.T yEprEߐ_u7 7U }AG}CQp?^vmyTcg|m#6 _S {mý?[ƀ?"?|xwmwmxmxmm1ŀ7??-׎͗>5?_o]35?_/]35?c3|w"!\ _EUߐ$\ QU߃!p8 |·|AV}a8zW8;p~ExPUs`7X So9{PUs`7X =V}Χ^2U}eAU}e9.sP7eT CT 9.sP2U}e!=\ =V}CρU߃oc?W}zp . K¡ACρ%s`I8X>p|(a.*j˿!_jA.V8\qWn˲ z AzlW+uf826Cρ s`ġq9ߗ^5šZY/l_ñ^X/~zAՋC^šJ^|P_M ށp.sWeeo׋e}o׋?zqcD֋Cρs`zšOY/>&šOY/=֋C#^zդp.sP8:դpšOY/9IU/ š?۳^ПY/>\w8'֋Cg8Qš?۳^=šO~Y/>zq>e8׏2ՋC f8\\w8b֋eˍ|׋C f8\\w8b֋e|׋C f8\\w82ՋeT8\zqAC!zq!׋C⃪<r8\/9׋Csp8qU/9׋C^rT8q2ՋeT8\|'p(׋e7\/m׋eף\/m׋eT/9^.sP8书U/9^\s`8X/yzq9^|PšzAՋCρs`8X/>zq9^zT/T8\zqA⃪T/9sOO/c,8Xp觏Oc>ƂC?}~ 1|POc>ƂCdr,b3%c2ł?XpȗLcXp90z|XAłCρ/ =Ƃ*%cXp90|Ppr#DZ\ >q,8\q,8\q,8\q,8\q,b2Fc2łeˍǂe9 ypr#DZW =ƂC^Ap,r#DZs`,8ǂCρs`,8 =ƂCq,8 =Ƃ.7r 9,7r 9,7r 9(Fc2Fc!Xp90$/8+ct1F!n_i_~SC,k_۾6k,6:8צk3D}mz6=O8צk5>Km|mm`~צv_~ma2ŵ9Nצ_~mGVצ;v8qMg~'tk~8qc<_38צD_38#N#N5>m7еed5w~]~e]5,@k呯MW Ɵ~m7?7_)<ϪWc6}tkC,kQRצK_|mk;Mͷa6]7|kۦMo6݃9 Qtp:~8yȧFUTrC~Q8FprDt"}ʇt"pzApz_⇞=NGp\.~9\s@rq8z(?P.~9\Gp#restrCz\p\.~a.?N.s9Ӊes2\p:res2\p\.res2\s@!ET.~9\s@8=z(?䙠\.~9\*?P.rC\s@2lsgÏ ?mmsk_>,29mdLLes:39̙LC+SΙLC+SpÜ~aT?\0g.s3~9 Ss@!LC<1VLu8gXꇞ2=dE2=de269SpÜ~W'T?\0g29̙Lu8g.s39̙|Les29SpÜLes29SpÜLes29SoLC|LCꇞ2Mꇞ2ᜩ~7pT?~9 SΙꇞ2=dzTs29Sp2_eT?\ULCNW9SpY~ȗT?\=gź2~e#Soו~Ts_"Ss@Lu8gzT?~(Q?I!Jl_D'(Q?{$*J}pCGB:K}pCGBoD{{H(Q?-.spM.sXLE%ꇞJ={o=z(Q?9JJ}pC%>)DC%g98G2{o9{{aesQ~a|П~T?Q2p"2 lLCu!SП푩~T?/2ᜩ~T?/29ls39̙LeG~a 29̙p29,$Q?#S.&Q?#Spe|Les:39̙LC!S}Pꇜ2y>LCApT?29erT?~9(SǥTs!LC*SsP:0gTLC*SpmK׸[&wJWJ~sV~>εֵ57ccsm~umyp͘ž6Lp͗Wp7pXkk9,ֵsdv8q_8O7w2;','C~r2!?f9򃖓!?j9A&hȏ[Nfx8 ף=&Cρp>;dv90zLf=&Cρs`29(.sP2;O*.sP2;䉈2%eJfxAp|.sP2;\dv8T2;\dvAp9(.sP2;y`dv90zLfT2;=&C:}Ps`2;䡡١dA%C:zLf>Ap!>dvANf>dvD'eJf>dvAp!ӝ>dvA'Nf9(.sP2;\d=&Cρ쐇NfT2;yBdv90}PNf١dvȗLNf!_49}g2%Cq2;\dv5Nf9(}Pp2%9(.sP2;\dA%eJf9(}Pp2%Cs2Lf!_9zLf!_9zLfT2;9'Cρs`2١dv90zLfdOCwO;\=>vi٫٫{dO;Z=/>v7i{ڡACρ=s`O;Eo>CLf~dvs&kv_2>dvs&CGb2;-&kv#1١Ι9,jp2;\|2s`2;>|s`2;I!١dv90s`2;I!١dv90}P_ 9kv2%C3}P_ 9(pfA&C%П}dvLf\ddv3=١П}dA%Cg2;/2m?pp'Ce2;\|qПy9,^q2;g^&eJfT2;Lp2%*.sP2;\dv# &sp2;!dA%C9rNf!dv98DJf!OD9'._s2;䉨2[zNf\dva!OD9,s2;W9,_s2;W9,s22%eJfLw903ݡLAeCs;=f 32eC!vrșprșprșprșprștCt9,9(.sX9.sP; ge!gC^p;ya™!gCρ&=fCρs`;yșs`;>r;\r;\r;\LwCtCtKܡw&.qT;X&%k6/X6ڼkͿj6c=smյ̵̵\M]'k{_gqm^ŵyM|Luk"̓=}m1սwWͿͿL>Wµ7µɵյT62g^uk~Ug^uk5k~Ug^u_cyֽg|yֽg|yֽg|c}xmmu{`5|-5;b}m~smpXLc{͗Wc7c͇ӽknsmsm>׾66_.\\o]_\\__3ίpΧ|Xg*%uG.\u..\T:C~q:G?u觏됟c\W)f סu9p·eU=Cρs` סu9pz,\T:ze*\`Ae*\<p:\uAp>;9p.sP:ώe*\T*\T9p.sP 2e*\ ! \=Cρ*\ סu33pz,\<8s:X>u3Cρp 9p.sP:9 U9p0ͅp UyuAp U9p.sP:䁲 U9pTمp 2e*\T סu9pم*\ !O]=p סu9pE סuȗ-.\T:\A됯\\9p 2e*\T:\uAp&Ae*\T9p}Pp 2e*\T:\uAo\ סuu.\ סuȗv.\ U΅s`:X>u9pz,\ 9p6R;{}忐_Hmojc Cw;\~`=d; Cw;~+ء6v9}Pms`;}msEρm/d}6A}- YC }6v7Plc6A}-(CO`;;\ﶅAC1zlczlc>cd;=Czlc>cd;=m2mn[Am 9m2C.sw>6v䳍TmЇdlc>`rEmg;ءmЧlc>3fءO}f6vAmp6ejcmp2.sX6vϼlc93/2Ce;\6vAmjc9m_t;y6v9}PmG@nc!6v9rnc<u!6vQCmy"6vao.sX6vQe˷,eW,e>6vAmp!=>6vAm쐇ncء6vwncms`;>6v9zlc=Cρmp??9mp2Cpءpv0z+ g ١pv0z+ g ١pv0}P[a8;V*.sP8;2e g=Cρs`8; g!ߗ9z gT8;2Cρs`82e g|}pva\pvgEg|}pva\pAe"e gEg9,9(62r!oS8z gMpr١pvg١pv90z g\pv90z g\.9.sX.9.sX.9.sP8;"e"?6v'mM>6v'mpod]WtmѵWʯ}mյy@\\num__/P]35>__N]35>_~rm~1]>6_ź6_ߗ\ߴ_\oa}LNͯJ/ٯWN7N}kksekɵt||nj䚟y]1"zɯU|*kEȧԭݪI­ꐟ%ܪ>VuȏnU@VuȏnUO[!?WUo/eACρs`:ϧUzlU[աVACρs`:ت>VunUԪAۭjUԪëp> 9U.sP:kejUԪpZ2Z2ejUԪ>VuApZաVunU[աVACρs`:[ժ=CrUzlUT:I[աVuAjU~9U˭jUԪyVuAjUvR;}凔!$ACb;Cρ}c~>v9zcT;=C_{`;}큕"ZW\>g:+VrX}"ZwW\uzV?? \>g:X}:u9rz\\=u9rts`:X}Xu9rts`:X>/Ep*סY>/T}XuA_D|'C= Ї L`>{c;a[N`{<ء}v菵L`>l`;Q4}v90.sP;\eJ` p2%*.sP;\v菵L`T;\v菵L` p2%*.sP;\vS& sp;)!A%C8rN`!v98 J`!=9'.Bt;䡧2嫈N`\va>!=9,_Jt;䡧2囉N`2N`T;\|E p2%eJ`<u;=&Cp;>vN`ءA%Cρ s`;>v90zL`A p9(.sP; '*.s'gᅱ?Y=ACOedY;Y=ACOedY;Y=Az,k ڡ'ȲAe*k|GvAeڡv9z,k!ѹ}Pes`Y;;:Cρe*k|Gv9z,kTY;\Ae퐯]>vAep2B2B2BU9,\9.sX.4.sPY;\\hrY;\栲v.k M.kv9 M.k!opz,kڡv9Bڡv9}pvg2B2C^hrY;\\hrY,k~Yye*k~Yk,qYk#6J56ڼkok߭~\smErm=rm[յy6|LCͿB5?6?̳}m1׶ͷw?ޯ?ݯWT>'vʵֵT־6%?״??WY|C뚟y޽g^e_cYygY|/ygY|%y\E?\i^L]ͷͧw)._Ekkk{k-k%ktJ|'ݖkՖkEk{k~u嚟yUͯL׍k||nAs;_ќ3p~%v;OŹu\wO)u\;ÊsJ$=Cρp>:\w90zus=Cρs`r!?;.s\wȏup>9(.sP;πeu3pr2 r2eu>\wAprݡ\wSCρs`rݡ\w90̹us!OΜ=*̹s`;\\Ae?y\Aeu\wApr-u9(.sPr2eu>\wApr!_ :ACρ=Cρo =*Usݡ\w90}Ps`;=*.sre?$N˓Dx< J|{Dx< JT"< 'IP"<\%C^p"<\~0*d"<F&Cޢp"<?DxwLDA%Cρs`"`"u'C=L>c"`"<9(.sP"O.sP?<\~xA~x9z쇇!_~Ps`?< _\\\ntm~Ϳ >_]tm-pm%15ïcƭ~m 1ï7?ܯ?ۯͷW>zfʵ׵RT6.Ϳ'7Ϳ%Ϳ$Ϳ#>z5?{ϼz5>_3z5>_3z5>_3.\\NVǯͯI͗>+4 e3kk}kk]̵˵̵̵c5sm%pm3sm2sm>d6r6_\3m5?)sϼ-5>Tu<_ ]p~vu<\񃪎LxOE\Y:Ñ"%X=VCρ񐟒\=VCρs`ux9:zTu9>xAp2U:.sPu<\x9:s`u<X?x9:zTaC=Ї*,>cdTa<X:9t.sPs/l6_f#=H?Fzezl^6顗FzezlT#=Hl6C/jHe.sP#l|Fz9zlFzWnT#=Hύs`#!_zlH9.sp7.sP#=\FzAjH9,H9.sXI.sP#=\ܓr#=\Fza'FzACH9,Hy1čs`#=7.H=6C^ q#=H=6Cρs`#==)7Cρs`#rOʍprOʍprOʍp!Izl~nHyōj~nH;.?7үU6o66P6׹6^n\_\/'\\o\O?{:{ [:K:k 5?j_\39̫~m~Asϼ\3ywit8 軻zȏXAuC~rw='-wC~rw쮇!?p/gBρs`w=]s`w=]=vz쮇AuC>.sPw= ]9p2u4:\zAp>9.sPw=O.sPw=\z90Vc塟!C?CT<3Xyg'[T<3Xy-Rb!ϸ+#?R+?XyApÏO+?XyAG^+?Xys/e+?XyApb!}+?XyAg+9(V.sP<\X+=CρGT<+ıs`b!_8Vzc!_8Vz|%XAe|Xygb!_,8V.sP<\XAe+T<\XyApb+9(V.sP<\XAe+ށ2Vz|Xy90Vz|Xy90V~P+=Cρ򃊕cXy90V~Ppb2mT<;hemsk_CD.sP==heTONpUOyp2eTOy-zzz9~PoYO=Cρ􃪧zzKJzT==[×4~ρ/YOoXO}V`Ϭzz$0XO/YO},zz藆/z>f==XO=C%a==XO},zz9zԷeV`AoIXO?oTOs`=='zzSCzc-/F񬧇>a=='?ֲXO?zzSCρp2Ce==\AkYO9.sP=2eXzAeXzzAkYO9.sP=2e>a=]O9CrT==)!zz9r!Oy\O?|sp==䡧2勘m5><:_nk}<:_3||kӍMo?4п6gt|kEM_^|mgצ;h;7_ޮ|mbצ6_ͷm6.tkUM_ީ||k~צ)_3?߰Mϗkg~nmzrL}p?_AG8)?ް<,GGK}>z83)L}>Er&?1NoXz?G9G9s@>z8z?G9}estpGa?ùpG9Uh?'Z*?OCrQ9ErПP9!T@CroP9C*9̕C F2mms+9̕C F2rp\9Ĩs2rQ9a?bT.s+9̕p?\0W.s+󃪜?T9!Urs!χT99rsP!CA<R<\cCA<R2嫖?q*9,߷T<\qe˗.U9RU|Q\r%.=^)wWYCb"3%KYq,.qX_r Ke9pSl9pS8̖8̖8̖y"*,:?Yt`9Ζy{ ,:g,:?8r-:?8rr/rrra?a?QY\0[\0[,:@Aw_z7&ee>z&ehee 2y:/㚧2y:/.㚧2y:/g.㚧2y:?yGeue(4s5>P~?Hy(yOyGL(yydyEJdaEACQd~DAC8Hd\wD%8Hd\ yP8Hd\ 8Hd\ y(ypDAZd\ ypD%8HdÃEAǁ" d-2?P"@y< ̃EAǁ" %,2:y7a8HdE%(y8Hd\ 8Hd\ yp~l~D%8Hd\ 8Hd\ ypDJd\ ypDA[dÃEAnYdt(2:yeyq@̃Ҥ<8Pdt(2?P"@yq<8Pd~D%8Hd\wD%8Hd䭯E%8HdݯEJd\ y8Hd~DAN>Xd\ ypD%8Hd E<8Pd EAǁ"%2*"@yq@̃EAǁ"'(2:}Kyq@̃ʥK<_˴<|<}xDygkh9je9:Z΃Kd9.q<_˴8r~-.qD?zACя~%8ȏ>D?8ȏ>D?zp=[LуAǁ~@?zq=[L=8Џ-Aǁ~G~@?zq@уKG.q=K}уKG.q=A~G.q=A~=a=A~e~уK/уKGr~e~ g\G:9b?ȗAǁ~ g\G:уAǁ~ GG:\FG.qXFG.qXFG:T9czok@zP`L.eʻlއ_J.Welނ/\6vټ^6o_E.cx/o!qlt/܏i˽$]6﷗qS1/..&/y(cң_6L^6.g/a..g%?&=ee`eeegǤGl lԣ_5O=e\ԣ_5O=e$e\ԣ_5O=ee\ԣj2S1 \#?曔iyyyycl?ljlll>,|..{.{.ω/oM.de^R_6_\5˼Nl+k^-yl`/oͻe1[e8]6𧻓Ǵ^gss.˸iCatLn qy'l͓͚A汲!ˏɍ~+l6q>Ӽ/iohg"h/iȝ/diG%6=A|G4 A@thȝ@|q@= .qӼ@Kd h@#  A( .0l7 A2 A  A߯ Aǁ@|p %2}d@|p  %8o$Kd.q>#>A h.q>H48\ 28\ |G4@ Adl?P l26 A Ol?py}i|q>Ksyi#Layi[LK6yf|p& m.qXf䁲 %L>ayi|p %3MKd@th~@2yb|q>8 Aǁ@|q@ Aǁ4e.q>Ae.q>ODm.q>A /l.qX˕Aotz Jw˕Aotz Jw|[*AotJw|p\ w|pAot(w|pAot8AR:tノAǁ \?P@w|wv;>;WノAǁ.q;>A G.q;>A;>AK?PK5K5K.qX.q;>ȉK5ボȱ;>8D.jvP;>8t:tP;>8t?pP;>aP;>8PЍA>j>Ozky\6o?zoyu\6.8|Le>yټM^6.-c!/7 ql/7Ǐio$]6oqS׼6Fueeexټ^6|Lf?ee|eeǤl\6'lTlll3jo?˸橎kɴ˸橎k桴˸橎aT_6Ol\6/]ƿ3?aѥwFue5e4tXZ+MkMqV>8P+tWAǁZ@|qV@i僎AǁZ@yb|pAX+%yc|p%yc|pJ+OK.qV>8_|r'%:Hyc9|A@<ձ> Qt(?PrD9|A>AX\ 9yc9|p%(9|p%yi98Hi%8H\ 9|p>8Pt(r%:yo9|q@<>8Pt(r@9|ږ(9|pAj[\ 9ym9|p-8H\ 9|py8H\ 9|pJ\ 9|p%(9|p%ym9):}M9|q>8PŏAǁr%r@9|q@჎Aǁr@98H\ 9|pAނZ\w-(9|p%KSr%.q>[QK$?Pr F-.q>ArK$r0rRt(r:r@9}I9|q>8PAǁr@9|Wჾ>8P^}I9|p^8H^8}p-zi\⠗%}5O9|p^8}q@ *r(rOro(*r)*rr)?pyRb9|Г }N9|q>8P Aǁr):\X\⠷w%z{\ 9|pA@QA(So$>A>e>2噅AF>4>P>#>A>>8Pt(.q>Ar (.q@>2>D9|p%}d@98H%rK$?PrK$?r `9|G>#჌Ar `9|q>4\WZd,r噥AY\ⰼ嵥%˃Kayui9|ʖ8,O/-@rJ\ⰼ>Ar!%yl9):yl9|q@ჼ_>8Pt(?Pr@9|q>8PAǁr@9|p?^8H\ 98H\'8H\ 9|8,-roV>GeA>jތzQ+V>͘ZW.z3VZ7cjKz3V>AZiތ?PZiތ.qVS+t:j僎AZ+AǁZ ok:(|j僎J+\ |pA/X+\ |p%(|p%8H+%8H+,.m8H+\ⰌY+\ |<8,m9c|qV>Yk\F۬:9f|qV>8P+t:9f|qV>8P+2f|p2fir" _<?y"o y\6?/+y\6o_.|L{eVxټ^6o_ .]c/oql/i$]6}q׼m*!˼5u׼~MTWzwpVe">ȟ6ަe7MA>ߵ6 #kS}?nmw@Aǁ@S}q>_6ꃎMAǁe:4ꃎMTIM%2ycS28T䑏M%28TM%2(S}?68Tt(DJFtV(2Byd}Y>PFdAg2B}B<>< >A2>A2<@K$2K$.q>A2RFt(:yfj胼,>8PFdAXFt(:yo`}q>l>A2 -.q@<ٶ>A2[$.q>A2@K$.q>A2%.q>A2>A2<<>8PFt(2@yc}q>8PFdAǁ2@}q@K$.q>A2 /A-.q>oB-.q>A2 C-?P2胼>A2%f2K$.q>A2 , eAǁ2 ,:(}׍胎eJFt(:}N}q>F胎e}~׍8}~pd}~p8HF?4>?A2o).q.q@z=/5ASc5?۟_<5A]Qc=/5ASc=/5ARc@}q>+j샎5ASct:y>AoK.q>ATyMA6T(>^>f>OMA>5>m>>^>>8Tth.q>Ah.q@>O>A(Kd.q>>Ah.q>P48T\ S28T\ S} 4Tdl< `S2y`S}q>8Tdl26y`SKꃌMAT\<>5Kw6Ĵ>ayiS}k68,/2mxͦYMA6T\ⰼʹ@K68T\<Ҵ>A Om MAǁ Om:4(S}/6ꃎMTth:4>8Tth.q>KSKd?PKde.q>;K6y`S}q^{\[?=5A/j )?p`?=5A/j )zQSO~{<%A8H\ ~{<J\ ~{<%x Aǁ@~q?+ ?8P=Aǁ%@~q@ K$.q?ȱ K$.q?A%.q?A ?A e( K8 K$r e( ,:9dPAǁ ,: Aǁ ,: \,.qX, AqAqfyl"˦_6Ci; _6}e?/ٴM/v_6mvl~˦Me>˦mMl~ٴil~ٟMIe˸e^˦q˦1_6 i;eӗ_6 ]64O&/6le/FI/l/~˦_6}\642˦_=08Qz~W<~W<z~ Od+y2$<>olplpl碲?a?a?Q%A~~~~~!:?8~ OLeU:g/A䭁:?k%A[~~Z880\0\0Ս.q.q.q~~~~.q.qh[ aqAo~fGA^~~:gKfKfKf%%})KfKf>8\0䵧.q~ >eplplplpl7__A_~~!:8̏\08̏\0$Ks~~Ώ\0?p8%8q?Z߃?x}XA~\^~`Ї?\t`wW?8~aqAF8\0\0\0/j} 5u5>{AEC.:пgoP?ha j}35~pV?8@5Kf5Kf5>l%?8a.q.q%%A6@.qj8j j8j8jpìpìЇ P(5AjA6H 52R?8H .O/qAIMj8,0.1pSjy&5KR?ȳ7\ⰼʔAEKiYy.q.qXhJY ƿjQ:Pg5AjAjtjtpìpìpìj8j8jyA 5~Y ODpìpìR?ayyyXA˸ׇayXI^T&ees?K>A>AK&>Ao}p^8}p^8^?-sO_"/(B"O)B"/d)?pyb }Oآ?8PI>EAǁ"@˃K7/}E?>?_Wt}FWGt}@W˻ uEWgot}MWЇ t}FW73t(Wq?8\ Wp\A6\ Wr}@Wp\%roÿ-r8a]\ WЇ t8+K.q@K.q?]A 2v(W v]A `W vuٟIlޤ.ge^ڠ.˼5X;pYllj.'y/+¡A!ޏv5t}J[K1桺Jiyy2yxcl,l>glulll)f./\.G.e4=w׼f.e5e^k^7,ykb2yU\6_\5a˼UIp|1 ]51˼5$wO3r ȟ$ΧA@Uh ȟϮ$DŽ+ T%A]I/ W;ڕd%Aqv%A?]I/̴+ + + UJJJJUItXItXItXIp* `@,>c@,$./3\,O/ }0bϩY,I>c@6,8PAǁAǁ%*.qP@',.qP*$%*.qP*.qAAb@pKT,IKT,O/ 8X AX A%*$?AADX 8X@ yb b b b b O"\,p AA̹X ay bxi."],\bG..qXEX ȃ9 8ⰼt@p@%*.qX^IX A%SI 8X ay/b yNbX 8X sj  b ϩ],tX,tX,pT@q`@q`@p4U,\b b 8X ; bp@pKT,Kw.g$A^ 謰 WyAJ /\Ip+w쇛\f?twj6k /l:zdAл&7%MAl:zdA_"tɦeMA`Ap2ᦃ78 AM%j:.qPA_"t\⠦ /l::l::l::l::l:{TAq`Atttp$pAq`Aq`j:.qPAp̦58 AM M%j:.qPApKt䘙TAp2u馃eM%j:W$KK7+MAǁMAdԥtAq`Aq`Aq`Aq`ASn::l::l:8ptAq`A#_n:8PMAݲ 8|feee?etpI~Ouټq]6[͟Ӧuټg]6oY;ee~uټ]}Lefu}ټU]6TǴO]gss.7˸tp׼62ÿ́_5fceGehԂpწO2OSw}Bq`}Bq`}B?]tXtXtXpT}BUO.qP}BGVO8P %O %[ .qbWO.qO %[ KT B6.Az!eU2a B˄%Aݸ!e o\䩥KT B8.A%A,A8P%%*A.qP Bg.A8P%%*A %%*A.qP BpJKTA   yUtX䁾KKT B.A:,A:,Al%Aǁ%A@ 8!ȃ` 8@ y 8@ 8!A%A^!A%%*A.qP *A.qP BpJKTpJKT\ ]A  yb J\tXpJ0q Bq` Bq` *A:,A:,A:,A8P%%*A.qP BpJBa BpJKTpJKmqA%A!@ 8!KS%%*A8P%A&!A%%*A.qP BpJKw YtX䅺KKT B7m,A:,A:,A8P%Aǁ%Aǁ%A_<!8!6  Aߴ!AKTp8!|pJxf z\/!|p8!|p8@=.qC +VGu?۟_< bAW2UA_q VOqA72UA_ V}w**YetXetXepfU%˳W}w*UeA V2 ĶOV2 >2`[ mA e}dOV T[Aq`[Aq`[Ap KV KVp >2`[Ap KVp KmqAmA@8 #8 Hl+.qP[Ap T[Ap KV ~ 4VdVp <2p[Aqp[Aqp[Aqp[Aqp[AGn+8py趂 ං OV\ⰼLt[A'hn+.qX'偢 K7n+m%CEy涂嵢 ?˟tټ]5*˼]6oAyk&2yV\6{_.?/>*feyyyc2lv]6t_6^^6O"_6\6|LUc:3U?UǗǗqͳy2yV\5*9˸YeXep|Is<1uf!z\Bzp^8zp^8@X.qЋ/Vpl*l+bmAb |}ɶ/VpmA_곭 |}϶/V  >g[Aq`[Aq`[˳8,/SV T[Vp >O`[AЧl+ >^c[AЧl+^c[AЗ/l+8PmAǁmAǁm%j+.qP[A l+.qP[j+ȫtAW 9j=AG=A^- ȫysM0} Ax' >!O Ab}BO Ab}0>?XKTKa'} %0\Q\\ >!CO8P %OKTA'} Aǁ Aǁ Aǁ Aǁ A7a}O:O T}BMXtXtXpKT\>!A %O.qP}O.qP}Bpÿ-O.qP}BpKTPT}BpeF Are}Bp2#>!_O:O+\fD]tX䌨u}Bq`}Bq`}ˌ>Ȇ G2ݐ .?///Ɔyl}.?/??ylx.O]y-yùlo.i$]6o5qͳ!2ym3ͻe^Ck^S? ͒˶?9=yFOslklll.EM͟ܗͳ͓їJ͟IS{eeeȚ ό]stXspj<3vAq`Aq`9:9:9:98P5Aǁ5Aǁ5%9TApjKTspjKTs\⠚ O]spjKTsIkKTspjwm>-zm>;ӵ|P|ltmNL kk5~pmgl \K_J_5µd5^l \5µP5^l o">µ͵׵ g#\\\O}]@^w>kkk>µ͵g:xm>|m;}m~Vsm>u򿛏#4|5_:;^5|5_:x׼2|5_#\\5|ͫpm~5^5S|L5^#G񄐿O[!O8c=s*.B'MxAB pBț#Gy#}#Gy7}p2e#G}s`!Gys}#!ʻz#T!-ZBρ}s`! zBρ}7kG8>BA}wlG9pP}mG9.sP2e#|j>BA}p2.sP!\>BA}#G9z#>B9z#|>B9pP}O6G=Bρ}#>B9pP}p2e#~>BA}pG9.sP!>.sP!?e#T!c?e#G9.sP!Co>d!G}s`lz#G=Bρ}yG=B?b!G8C?b!\e#2.sP!s^}pG難zA難zA}z_=\ez_=s^ŸB?b 3; l1z'xB90z'\XqAB`!\>BA}#G9G92B b!\>BA}#G9}sp!]B}#+>B9r#!>BȻ#\^t!GyW}p!o.sX^t2}AB$s!\氼4>Bțd#7Gy}pG9,.sP!\氼G>BA}G}s`!=cBρ}#g>B9z#T!G=Bρ}#>BA}/Me#G8>BA}p!oֻpP}p!oֻ.sm#Y>Bay}ˋz'|zx=B>=p\~6}lhEgʵcgӧϦ؟M)?>R~6}\?P~gMKi3^(+IgC?5?Q?Ϧ϶ksYgMM6tgөksYggӗϦ#?\l:o;\Vtg}Ϧc(+l:fxͣLxͣ3^(+l:N3^(+xMϦe?W^ ?lzr,Ϧ?5µXϦcd?U ?l:Avm>OtϦ?l:<( Ϧ4?5? |(k~>>|g'k~.lz,3_ɟ ?|bgϦ1Ӓ?5?xͫW Wa%C(?Vx!aIB8N HaE@a%YC~(s@X瀰CPX瀰Ca Vx9 s@X瀰B8 9aGB8.s yLaÙ9̇3v 9̇39Vx9 OGx?[yS29pÜGxa#<\0Gx9 𐏨Gx9 y~<6Ut W;Tz.U ]CՅ.Cu᡿P]xڡ.cT]xڡe.<\氜Qu᡿P]xa9Cpr:FՅ@u2t 9,cT]esujesu" !Tz.<P]x9s@u>B8Wz.<߇P]x9Յ =Tz.su2p\]x.<\0W.s 9Յp.<\0W.s 9Յp.<\0W.s yOՅp.<\0W2 9,RU]x_˨.<\氜KUu" kՅ kՅp9CՅCՅ =Tz.}m\ov_]^z"õ?um6j666_}"õ@Xd6]3'g|yg|yg͏}ۮͧ=?S||>)kkk^Xk6?6hL;͇7͏xgٮ';?ӹkӝkuyL':ykuL皯yk^kkuyOrkUk6?L7kupyB^5!/|Br!Suv)NOH)r8C N9C[ N9|ȔCO Bgp!pp!r=Bpp!r=Bρ)J9SC90pP)7ݜr9(ƛSr98hȻoN9t4\!9.sqЃJ9C90OAB 2 C?!BHq!C*n !!:A}DBIt!DBMt!\ C;2d9(.sP!\ Ç 2 C2T!dygAs`᠂ !o:z 2 !o;z 2q Ae 2{ CAA 2 CAAp d9(.sP!e 2d9(pPAp 2e 2T!\ CAAwXdAs`!mVBρAs`!BρA 2| C90z 2T!d=BρA 2d9(.sP!se 2d8 CAAp !9pPAp !9.sP᠂ T!\ CAAp 2B>vCBρAOd=*! C90pP Cg 2 21z 2+21.s+2p^99(+ yAA\d9p^99p yAyg 2| AW C^ 2~. C 2~. Ce 2\^Mq!crBg!crB?&g!S/BρAзd=BρA*27V\8Bl.-B_:l.Bl.7Bas!}06Bfs!6Bcs!S6zl. 25ej.BAͅj.BAͅp \9.sPs!6w9? 25Bas!\BAͅj.\9? ͅsps!7Bͅ xn. !A5Bͅsps!7.o㹹rn.Bay)ͅ\9,o湹ppy7ͅpz !.sXss!}07eˋzn.Bay[ͅj.7\9.sX^ss!\Bn.|B9򶰛 A5Bvs!\=6zl. A5Bρͅs`s!\BǗ 25ej.Ts!\BAͅ\8BAͅ\9pBayoͅp޲ ͅO\8dBȧn.|zBe遛 !7a !ߙqs!ô/ٳpp9:ZCz5\θ7k YkBB [)drtƵ)d! k 2k :Xk9,Gg\kZCa9:ZCee.sP!_ Yk9!k &ZC9z5k ZCIpPs`!ߤXk=oR5k ZAe5TkÂpj 2e5T!\ZCApj Uk9.sP! .sP!leˡUBf!\ZuCBf!Xk;ˡUBρпYk=Bρs`!Xkyhյs`!Xk8ZuCBs28pmj6]$AkS!rm 6^P?ȵqm6^?;qm6l\?53ڟ7/q< |Yq׼_5 õ[1pm\{ 2| õ(sm~m>wm>zm> pԵ|||6g 2\O^Ak7?c||yg\gy[gyzad||3Ouuޮͧ@za36y6Akkit|||||36?k^'>ytyk^G=͏m9kd6?k^GBsj !=k !?!k !?B\kyµp~sĵs`!Xky7µs`!Xk=z5k ZABޑs!\ZCȻr5T!\栃!o͹.sApAZCAI.k BN6B'qs!?' ! \yͅ \={6^ gBn.Ts!\yͅp 25ej.\ͅs`s!\yͅj. !ozl.Ts!Q7Bρͅs`s!}t7BρͅF\8BAͅ7H\9pPͅwI\9.sPs᠚ 25ej.|HBAͅp 25.sPs!\BAͅj.\9& zl.B9zl.|B9pPͅ\=6Bρͅj. B9pPͅp 25ej.~BAͅp \9.sPs!57.sPs!67ej.Ts!S67ej.\9.sPs!66>ds!\ͅs`s᠚ zl. x9zl.~rB9Q A8Q 28.sPs^9p pP/\ͅp^9p^9pP/xA/~r‡*'ĬB[*~Ǭ‡*ϬBx*¬gB?fV!|fB?fV!lfB?bV!lfBgV!U=f./8z,'T9!wBe9!Bb9!]B|g9 n o; fY UN=Bρp 2B|g9!\rAB|g9!\rBA儃*'TN9; UN9wrBA儃*'rBAp UN9.sP9!w !rBȟ.' 7\N9B儃*' !rBȟ.'\^s9!\Np !f.sX^s9 2=:Br9!\氼LrBȻY.'7\Ny7pZ UN9,ֹ.sP9!\氼_rBA儐7w]Ns`9!]Bρ儃*'rB9z,'T9!XN=Bρ儃*' rBA/Me*'TN8rBAp !請pPp !請.sP9 !請.sX!v9 BNfB>=pV6 !8遳 gϬBȧ*|BbU8Bȇi*|-Y>ô/k2ӄA_ 2 COB]A_ 2rerA_ 2& \d Ck2.sX8.sP!+ 2\ \d9,jd8q!\氜q!5d8 CAAd!\ Ç 2; C90z 2  ABρAc!d8 C迃1z 2 d9(.sP!Oe 2d9(pPAp 2e 2T!\ CAA?Id8 CAAпd9,]d AprAd As`!/t.]d=BBg!d=BρAs`!qWBρAs`rAd C8d6^t õ[)rm6\Q_qm6z\K͟揎qm6^?6͟q򿛗tmkAkaqmk^Akd6k3F& )pm>E~m\_ ^^µum6666H 惲/k>bt]666>q< x3p< 擱x3pg:fz ';3u)pm~sm~sm>Nwm>\z d$ݵ\5^K 2\\}#8gӤ|,5_:Hz׼~S| 5_:Azm~ls׼^5 õ5_:9z׼ 5|5_ 2\||5_:,z׼ !zBnv!!/}~?=0U(:L.| 2 AB{p!/1BpCB~8& +爃 !D8z 2 C90z 2 d=BρAs`᠂ !9.sP!]9*.sԐd9jAGPCޠs!\#dyAYNyb9! Bs9 !z^ rABs9!>YNy儃*'' !.sP9!\rBAp z,' !opPs`9!pBρ儃*'urB9z,'rB9& UN9N 2v 2e*'T9!\rBA儐w]N9.sP9!\rAe*'TN9pPp 2Bt9CBρ?XN=Bρ儐7]N= rB9pPs`9!XN=.sP9!\rBAXN9.sP9 2e*'|rAe*'|rBA儃*'|rBAp 2e*'~$r‡,' !z,'T9!3Bρs`9=BρЏ8YN=B?a9!XN87C?a9!\7e*'2.sP9!#N p q.s2.s2 p9 Џ8YN87?d9!B?ic9!6Bg9!gB[a9& <N> <3 l3 䳜z,' C\Nr儃*'rB蛻,'rBY,'rB,'\ f. n rABρs`9!\rBA?YN9pP?YN9.sP9 2e*'rAe]~.sP9 .sP9!\rAe*'TNrB9 !r6B儐sp9 !rB9 \N9B|w9!\氼rBȻY.'w\N8rBay儐w\N9,on 27Br9!\氼rAe;p.'TN9,/¹.sP9!]>d9!XNyss`9 !oz,' UN=Bρs`9 rB9.sP9!KSp 2.sP9!\rBȻ.'T9!\rBȻ.'TN8rBȻ.'}]N8rB9ۗ8 \ͅ on.|Bn.`s!s7B>ss! 5n.Ts!c87BPBpn.e 2br\Ae! :2E ClBA 2f!_ad8q!_ad Ca9 Co$eqB#1.sX8Apr\Apr\Aqe 2d=BAs`!d=BρAAc᠂ C迠1z 2T!_d=BρA 2d9( 2e 2d8 CAAp 2*.sP!\ C+ 2T!\ C 2d9(; 2 3z 2  C 2 C90z 2͟kgǵ3}r\?8_揍kƵC3}f\%]?0gᚯy}X\?+WᚯyߚAk g 2\_;<泇擸s)pmR]͇pGү'үg| õ d63ѯЯoe\5 5^ 2\5 õ5^ 2\5 g:z G 2\~ õεε޵5^K 2|zx-1pm~sm>N^m_[_\i^^5#8||5_:^5|`5_:z׼ '6|@5_ 2\\5|+pm~NΡ^5S| 5^2dAW w!䟞 uF+V)G dyAdyAd Cț2#2 C90n C90z 2T!d=BρA 2# CAAwd82O ykApΧt>5 :e:zPAwdA7;d!Bp!m>*򮇃 !9z 2T! ?BA7d8 C}2Ο 2e 2d9(! C90F d=Bw!d8 C[2 Cț2 !ob:pPAp !d:.sP᠂ !og:.sP!\ Ae 2dy7Ap 2e 2T!\ CAAp d9(.sP!N>d!dyAs`!dyAs`᠂ !;z 2 d=BρAs`᠂ 2e 2d Ap 2*.sP!\ Cȇ`2T!\ C'a2d8 Ca2d9(.sP!\ CGb 2| C90򉰃 AB?a!d= s`!dAs`!3BρAz;3ez;\ A.s2B?d9(' 2.s2.sP9 pAzCB[a!6B?ic!|B?xf!.o83 33 <3 <3N> C90ppy;Ad}sA?d}sAпWd}7A7wdAB^a!,Be!wBb!d8 C90z 2d9(; 2*; 2e 2T!\ CAA?d8 CAA?d9( 2e 2T!\ CAA?d2 !;r2\^is!d9*r2 !;ppyAsp!wekn2 CayAn2dy7Ap֛ !f9.sX^}s!,e氼 CayAp 2E8e 2 Ç 2 !o:z 2T!]BρAs`᠂ C90z 2T!d=e 2|i*.sP!\ Ae 2dyWA 2dyWAp dyWAp9,/:.sX^u› dA o2| C2`!sB>s! 52T!c8BP Cp2T]YBП`5\k Yk[B erǵe!:k 2,k Xk9,gy\kuZCa9ZC+eYeY.gy\k9.sP!Xk7Bρs`!Xk=B퍵5k ZC9pP{c!Xk=.sP!\ZC35Tk9.sPj 2e5Tk8ZCApj ̰pPpj ۳.sP!\ZC5#5mZC9=k #5k ۳z5k ZC9k ZC9pp9bZÇl.esym6\? kgIpm^6-~kCpmF6\?>6^myikͳkͅk5kWsᚯy5}ͅkgj.\ϟ_S;ö矩pmoN]Elͧί͇ίͯ`|µ \6w36gͯGͯ/^\55^l.\5µL5^l.\5g:z 3泩pm~6sm~4sm>wm>z L ޵D5^Kl.\\}èg1's|15_:z׼~|5_:{zm~r׼^5µ5_]y9Lͅk#kuy5O[>Qkuy3kͅW !7ws᠚ !<;A:ouF+\ld A 2˟ !9愃 2Aw(dgX!?ddyAs`! BρAs`!d8 C90z 2 dyAp !o9pPSe:򾝃 2O 9|jȻw2t> !o9! 2򞆳 !9pPY76Uy;Y+cVᠲ !9ze*AeBY7Uy{Yp 2ee*|ȬB90z*AeBρY7U=f* B90~ B*TV!\栬Bț*U8B;*U9(pPYp 2eBްwV!\栬BAYp U9(.sPV!\栬Aee*UyCYU=fBtV!U=fBwV!U8Bț* AeBρYs`V!U8BAYp 2eBgV!\栬BAY*U9(9 U9(a 2e*򉗳 2ee*U9( 2z*|B90pPYOfU=fBρYz;U=f¿4U=fB?aV!U8WC?aV!\We*ԫ2.sPV!JfUp T2.sЫ2.sЫ2eUp^9U*U8WCρͅ\0ͅ\}ͅϖ\ͅ˛ n.~Bl.~Bgl.~Bgl.B9zl.\q9 -  ߲: C.'IrBV,'rB_,'arB,'T9!XN=e*'TN p UN p 2.sP9!\rB_,'T9!\rB_,'TNp 2.sP9!\rB_,'CB儐]N9./r.' UN9B儐]N8rB9 2e5Bްr9!\氼r5ekk.'}i.o.sX^^s9!\氼rBAp 2e˛l.'TN9,.sP9![>d9!XNys`9 !ߺz,' UN=Bρs`9 rB9.sP9!KSp 2.sP9!\rB{.'T9!\rB{.'TN8rB{.'r]N8rB9ppy3B,'|zr4B>=p9!B`a !a !ߙq9!4gBYo*gV!oAfB1/*gV!= <*gV!_~UmϬBo{fe*<*UmϬBa9Bo{feYg.gyU9,gyU_f.gyU9(.sPV!UW;fBρYs`V!U=fBՎY* jǬB90pPYcV!U=f*.sPV!\栬B_:*U9(.sPVᠲ 2ee*U8BAYp 0pPYp 3.sPV!\栬B*S*ϬB90? S* 3z* B90 B90pp9E‡*῿o.d~6}Al ?6}l(I{gcgӧϦM?6}l6g6}lϦly?6gkY'i?5?g~k~*l;YksVgMM76=tgӹksVgMߢ?l:DϦ3YMo6}!G*l<Ϧ?Ρlz+gUyd~kYMnkYlϦ擭?uϦg7?l:zmN.lznϦ|?γ ì?l:3^gH.lzX޵Ϧ3?LlzN|Ʌk՟Mh~k~>3_k՟|hgӃO\Lgê?5w瓪?5?S9I̵|'Tk~>3^J.wkxa>p|$! *p|$6c ycPH.<%. \xțJ.<>%z+H.sr!)[Ar!)Ʌ yOɅCoɅesr" =$zH.<}@%9s@r!o+s@r! ywRɅ =$^ =$> \xaN./S8!|Q8!e '< LᄇpCpCᄇxC8᡿Nx߂'< _ᄇG8!\N('<_~Nxo{ᄇpeˉ.s mpeˉᄇ9 '"pr"Gᄇ/ r"Gᄇpes8pCᄇ =z'<Nx!ᄇ W;z's8jpCᄇ Nxa'<\0 9ᄇpes8! 9ᄇpes8! 9ᄇpCA8! 9ᄇpes829?Nxa9(pCG8pCG8!\*s@8#s@8pCᄇ yPVᄇ =忢pymkwxm^?cw|l#\~m6}_~my/kȯL6/}wmyuq_Ӿ6/yתͣk^mkF63&pm>~mo\_^^6µ||| gj#\^_ϛ_߾6=3gͯGͯ/^\565^l#\56µL5^l#\56µg:z3zspm~4sm~2sm>wm>z|޵5^gl#\\}cG1|ͫN^\5|pg:z׼N^5sg/|P5_j#\\5|]yFk^GQW3Ck^PkFyջ7!|!/y_ Ǣ !o.>UUUpBȟ*Y‡*LsU!WY6WB6qU!XUyUs`U!XU=Vz* AUB>tU!\株B[*ypˆBAee:/n 2=B[ABp <B_T -=B_A[{AV >e|A90zABρ?=*A90vA[T \A?8Aț?9(pPp2Bޓw \AApnsP Je?9(.sP =K>d ?ys` ?ys`!;z?=Bρs`2e?S \AA?9(N>?9(y2AAp?9(.sP ->d ?`s`0zu90z~.A90AAlA2l.sP^9ep 2pP/[?\p^9ep^9(pP/[uA/[~.l}P/[ @p  oֳp, Y88A,YA,~ AgW,~ A, AE BZf TF B<` #F BKe eF .171}/п1}Џ18A90bz19(b2#2E *b2#2E eT \栈AAп18AAп19wyAAp"19(b.sP eF #!A_ˎ#19G B#!A_ˎ\^ s 1kpn#!o9b.sX^s#2-1G Bp #!9b#!_q_?s #!9b.sP +/#?1mψA߂xc _~1mψAo{F .j1mψA2b۞19,j19(b۞prs5\8b.sX8b/\8b.sP \栈A90bv#A90bz#1=F BՎs`"jLjA90bzT \栈AAпt19(b.sP \栈AE e19(bpPp"2E Bam19(bpPp"3b.sX:b?#A\:bzψA90bz#Aȳ#?wA8;ymg6/1bpm>F c6]>үC6K{m>Y~m>X~m~3 h͇ʯgʯoX\C+gy g|ny g|B3P댡k#kik*G*6Ļ6K댡t&||"3 \>akk kkk*LPOZku3?k^Okkuy ͏Vיk*6?Uk^MW@35k^M1k*y;TwC!| *TdzC!/ BCv kW X|}pAߌ˿lU Bnt W >d +!obB~b+A{'+A9bpPs` X1=V b+2U BCt̆t`6DW e:0.sЁِ]19AU BStZ!o]E[/ME[/MB޹s_߻s ;B޿sZriE&[!o]E![A9En["=Bv "8A{n[A;n[!6EpP-pZ!o9E.sPZ!;E.sP \Aej"y-pZ2ejT \7#"9E.sP \AA-%"-s` IBρ-s` ]yBρ-j5A9EzlT "=Bρ-j"9E.sP zej"8AA-pZoֳEpP-pZ!YE.sPmj"9EpP-pZ2B?bCBρ-Ϯ"=Eq[A9EpPL[A{l[gEzl;ӡgE.s;2pޙ9E[tA-O"9pޙ9pZt63.s;ӡEzl~AHlYAǪlgV[Ef=[E*[]E*[oֳEzl["k-R"-7"}/-п"8w Bs #1BAB>s +n|$ABAE-l-A7B-lgr-l/["mAA-pr-pZ"9,j"mAa9W\[2\[\[2ej[jA9Ezl[AڱEpP-s` _"=Evl[Aej"K-pZ2ejT \AA-pZ"9E.sP /E.sP ej"ß-pr-?"=Bgr-s` Bρ-s` "=Bu "=.reuym6\?Ck?53Tpm^6-~kCpmF6\?>6^myikͳkk5k_P1Tpm͇ ?S|6(6/66=Lk=k7*668686~qm~g \_Ϛ_߼kk*kkk*kktz3 gGW?Sl \Ϭ^uPg:zm>wm>zC2yY̵5_ |#0||5_:^5|`5_:z׼B.|@5_ \\5|+Tpm~Ρ^5S| 5^*١WC*١C!PAA=f!/W Y}pT1pT1R\1A8?q Aț*+!obz+AU Bρs` X18AțT1y:j.sQېw]19mAGmCkt \栣U1y-'"gX/0EƜ[/0BޜsZ/0Bޡs =:S }:BޝvCBρ-s` ;EzlSA9EpP-"=Bρ-7"=BPtZ2BUt \ABZt \AA-j"9Eκ[2ej"8A7E.sP \AA-pZ!dZ-E^*[E[沈E2[SnyAGlA_lyAG lT "=ej"/MejT eej"8AA-pZ-EpP-pZ-E.sP ej"8AA-pZ-E"9BZv "8A9ErnT "9BZvj[!A_n7"k-p["9,zE[2/Br +nT S/BAȧ^nT +n/[?"mA߂lxc _~"mAo{.j"mAE۞-l"9,j"9E۞-pr-ls5n\ոE.sa9WAa9WAA-pZAڱEzl[A9EvlT "W;Bρ-j[A9EpP-pZ2Ba \AA-pZ"9E.sP \Aej"K-jnsP \AejAa9Al[Epp9A9E?[A9Ezl[!Ezl[0[2-kOpm8gl\{>͟kg pmL6exmy_ͻ6y͋k~;[/72L-kk||||3ȯߔ惶cSw0>S|||ŵEpm>l~m>k~m~ygy͇jgyW?k"6?6ջ6\L-kkkogV:c3X6л6W-kkټtR|n|,dy>kcku>yNLSku0嚯yJk^-ksku y͏\Yk"6?mLP)kuyB^nA+-jA-pࠎ܀[r%Y:n.WJP JJP JBJqCB_ lA8?q Aț*ns`ZA9EzlT "=Bρ-jAA-7"82 y-pچt6Fe:j{P-7]'Q/(/BbsQࠊ/BgsQ 62QQ 6BcvQCBrsQ XyE* !7(z, TQ @BρEs`Q gBρE]8@AE]9(pPE7]9(.sPQࠊ2e* ?@AEp2(.sPQ \w2e* TyEY=BDtQ X=B>wQ X8@{. ABρEs`Q X8@AEp2BUgQ \栢@AE* T9(:U9(ْ2(2e* T9(!(z, ~Ģ@9(pPEЏX=BρEz9X=B?icQ XhEs`Q|hEp|9(pPo>sAEOX87e* ~Ƣ@Ao>sAo>T87ez9\w|Ep|E|XEwYEVX8[@, Ϣ@, ~ɢ@[, ~ɢ@;,  рзK  р }р? 8_w4 /FBb4 RFB f4 !FB?=`4ࠢh@90.sP4 \h@Č 8h@Č 9(pPрp2EB f4ࠢ2EB f4 \h@_nsP4 \h@Aрp3 9GB v4  8vh@98rT4  9GB v4!h@ĎW рp>7 9,/e92,Gh,GBr4 \氼hAEe{Z 9,/k9.sP4 RG>d4  yрs`4ࠢ!o:z7 =FBh@90z2E?4 9(.sP4ࠢ2EeOhAEeOh@AрOh@ayр[7 'h@;\dr4 |GB>r4 !l9!_Qq4ࠢ!l9GB>r4ࠢ!_Qq4 _~ h@o{FBd4 h@o{FB3pp9:h@o{FBрg4 \h@a9:h@Aр3g4 \q4rtрprtрe4rtрp2EBρрc4  =FBρрs`4 _ 8h@90v W;FBρрs`4ࠢ2EeuGɖʑE[9% ~t,iV*OD ɏ8%h@`>p>X⠏8%hh@`>X⠏zÏ8%h@`>??XP&Zр@oр@ǁ Ɵ 8L???8?8?ȉV4 qG~4bGKDw/ ۟g ׊_/ _f ۗaCs|ֈa1l_! İq9l_ô8 ׆arؾ2 a0- ~ʟa05/ sk=/\Z E_/ kn@|(0lNm]a}w>0}Q`@?FSaHEa}}>>l1lx2~Q`>>lD_ƚEay~Q`>q;5/ c?/s}a{;aaΰLg>7lvIad߰}uk_ ۇam=l?isklfk^\k}Z5:5a0׼Ya0׼&YaY0׼Xa14:5ay ckzm {jm  rc @_8Pm@ͩ՜Z`?x  Jfȯ @Z.K6Ȇ6r]~m q~m@}m q~m qxK@65X@v 8hF7}Hm A3kg-g#gOߓd̟d̟dsٟd̟d؟x ? Ⱦ? n? ޙ? p> 8l? p> Ȯ? 8? ? 8%,@ x,@`> p> N? X8%,@`> F? X8%,,@`> XA,qgKY} >,@:,@:,,@ ,@:,,@:,@8%,@`> 7? X8%,@`> 7? p> Xȟ,qggy8iqggKY} .M},@4tYY@? 8ns:,@Y@ǁ? 8nsYm,qgm,qg}$́%,@Ym,qm,qgm,q@Gwm~ >|4YO"iO>?z}I"i>?z}I2zK;4%@htG@yi?|?Q?- }@@2:4:48X 48X `X `%2zK`~ɧ?Oß?A8=+P62626( ` `U`6XC8=%ZXPF`e`C e [@68[R6d`C*e [@68R6(`C3e`%%2#jithdG@ǁedG@ǁ@ǁ)yI@ǁ@ǁ@ǁ\28X 28X  6(`SX 2Om`%F[E6?|,ld'@l@lȃ-y eb l?P@B?+~KW0,0zGr@?)r@R`?﷔N`C?Џ+K~\QXP`,r2c`ß~KerK$t(~{@F@r@ǁr@ǁr%,q?Ar@of(,q?Ar?ArK$?PrK$fr%,q?{{K$,q?{{KЪS8P=U?q?q?q?q?C::Xdޖ8w?phvnؾ2Oa6lՆkȰ:l>L Ȱ}/c,aZ9 ǰ1:l_6ư}x֌a?+ư}ƚk^Ű}J15O0!?LvaP3l ?lo۟Ia}|~ aۇl#aa?l6ۯ^ c?5O0rs@@htht'v@ǁvu9Н|Kt9Avu9AKdrs`>آ?AKt9AKd?PKt9A}.78ri ?z}Hx,l"nE}HϮh"n@ ?Z ?Cz[Ke@S.nzRm-P3 ?q?q?A ?A% K$?P K$%,q?Z K${<%8iq?A Z8Xm[N22( ` ` Xv= K ?;7R_XoW_ȝH]!9R_^ȵX]:P /tq?pW_Ȇ8 Ԕ?paM/,qg/,qg/dS K]? B!d B!e_v,lI!B$b /dL]![_Ȧ: ٵB /dX /dN]a.M< /,qBv$a_X w%]!_X /,q8]a.a_X{|_X /,q-':_>: B w:_8@ /,q8 KvnCa.a Kv%B!8 ] /I8 K /,q8 /t ЧA_8@ /t? /t o>_8@>B ݏBCa~]|a|a.S7/,qx  K87/,qo>_X 87/,qo>_787/tgp_~< X2x8/ }<G>BG;W8/t_ Gnp_+eX\ Gnp_+Gn;/tqa;W8/,qB\述aw_X;wG%wp KvG޹aЯp_X;/,qpa述aw_+! Gܹ! G`$G2r 9/d运;W9˭&9/d运;W9/,q(运;W9/,q(7,w述ġ\sB6r述ġuB6r述ġ\xB6r述ġz?pw_XPn>a述ġ\!rB הB!r_8G`%Gr^ 8/tqKsw_X;/,qa运h9wG%B述aw يaC(G垎B6,t运z9/=9/GlϰY}0I]aSv>j;l<ϝoar>l1l2Jia݋ayJ)ƚ>V;5OI0<%/>0ͽc}R?l? G^&ӡ?q@ir?q?q@]ntt'55nS8P.7N `.78H.78rs`4>@]n,q?'P8rs`.78rs`4rs`.78rsO?PKt8@ܩ5m?\H?Ч{N O5>?Ч{N2zsJ~4m@jht뒦@oNi?L|۴qݫi?ЭK9i?нݬi@Kd,qi?ЛSKd?P\28ȴX 28ȴX ~74(~`LޜҴX ~i,qi?A)M%28ȴ8ش8شͩMMG62626(~ `~ `~ 76Xٴ8شͩM%MܜڴXP"ٴ`d~`Cd~ {568KI6Wc~`Cd~ {568I6(~`Cd~`L%垒M%2uiithdҦ@ǁedҦ@ǁ@ǁe%9R>l(߯ƚMk6ayXcӦ0MfڇE a/>0ch^ ~p5l?'o1G?Lc)nڥ~Z5l0|mviT ۇ emgT\׌4b;5ayO5[;5/Pjk^c\ϣ5Q;5/(a,0׼&imd=|[?y[Y2A@f@*A?Q'@'KP'I']}$@d?ɟtl*h b@} q'~ q''~ q'~@} U Ad՟8Ṕ%`d՟,qs`&k'K4@ٔ@#ΜȮȮ?M:ٚ?1?i١?q@^?q?q?jh@Kd?-7K?P X~!=B/${`zdgҮ@/tSc{ {vz==J`~\lT88~\]]]X {`\%ruI{`\%r({`\%ruIr8%]%r({[t8X {`Kztztzt'@ǁzt/@ǁ@ǁu6q=q=[t:tJ{@{^*]%{X {X⠻%rhu6A@-X⠻%{X⠻%r%{X⠻>Z@ݽ ,q[@{tEK{mzoKe$ޮ@-]>=6t؆@@=Oݿ=/ԹC{ԹzIe:@pSv uNR'unP =q=q=A:=A:s:2n{_s,q(ֹ:Ks:M{`t%ҹ({`Ksdsds:@:U=q=q=O,Jsds:r:@?s,q(wLs?2=ġ\4=Kmَ=ġ\9=]։u%u%u%ҹ8 (ֹ8Hu=q=I=I,Pstsuuuu%ҹ4s,q=A:s,q=A:@6s?P:P=A:sdC:Ks,q(Ĭs@{ ֹn{y:@2Y~tlRuk?Ek4l_+ְ=l_ôZ a{ؾT WaB0S~ʟWa"55O0׼a45O}>L:a0=Թۧ3 I>l ۟qap}|~aҹ'isa%aQ>l)OW cS>5O0Lw05D?w ak~~˘%N9a\  ۇ&#~~27l?Kۧq-aJc@ ۏS0JaI0׼ƅ5,}+YV(%} Vd?P̀L)[ `%} @&1@,,u+} hf)tRfne[>-5]f}@}@}@:::(} [˖8Hfej~;Aہ1[fXol}`H%RB%} Vh%} ;Vh%} ;V?J@6۬d{J@6ܬ?PJ@حdJ@٭?PJ@׬ttdJ@ǁJ@6ڬ?PJ)m>AJ?P@-\,qp=}7  هp=g  فp=qp@ نp=qp=qp@    p=A K$\tc K$\?P K$\tc%\,qp=Ѝ9 K$\?P@7(\,qp=A-[$\tR8P/K@ǁ%\t@ǁ@ǁuu6qp=qp=6:Gz@:~%:X :X⠫%M:X z8l`8l`l`8l%:@ Kp= H }@z[ ,Ch@7 )\>Zp=G hҩ-&n@ҩwj:qS= H:ŤS2kzߩTtDžN@7 TN@w\Tt?NTtTtT,qS=AN@o1T,qS2 nz`CS=AND%e(N@o1T?PN9ŤS=AN@SөX z`ʩXAN9;;ŴS=qSrN@N@NTdTdTN;;ŴS=ġ܏S=[L;K% ;,$T,q(7%TdNr]N@v\T,q(w&TdNrqNT,q('T,qS=ġܠS=AN@6 T ꁎl@ک8Щ~l@ک8Щ8Щ~`Faz@z oTtTtTtT,qS=ǥ)z`%r(z`%rglr8ȩȞ%r(z {v8ȩX z`CdmzԦ7nme@Ϊ[ȱ{k{jyTdmz M䬺Jȣ"k9nmz M?P?<--#T?-ne@6腑 EL|h{ ~m2b|_i{ %e@??i,q(c,6 %e %e@6`c>A:I|@|@|@|@|_'i?P@ǁ@N8 ~>q>q@Kd,q>л+Kd,q>Ae,q>A>A@KdtKdt2j| 4:4@e.@ǁ@7hththththx,eKdxKd,q(?a˨?a6l۟ӊ6lglp/faZʆ+ٰ>l_džذ}{a?Kذ}ƚ~k^װ}eTaäOa\{ۻ~ޛ?a{c~ޗi&}w>?l ?z a:0<XTcS?lƚ~k at)a0̿n H0]EWq-~>˨>/0KBuXo>:l~#aް},t>$=kӄ}"tBub~7l}F1+Tۏ5=5a0׼50MCskzk^3ayAsK?l?0׼ay ?sK?l?{:~~>75ou| @(d[lTX:>:@<2V2Vs 񁌂\ `u|`[i:@KJlY@]Tfu|d@-lY@u| nV:T:T:T:T(u| V8HֳJX@,q {`fم:>A3:@W:>]Iٗ:>]uٛ:>yHu| qV-ou| qV@歎dG@筎?P@׬ttd@ǁ@6ڬ?Pm:>qR=eBz Vkz˄J@vTtP~lOY2R=Jά^5TdJT,qR=AJ)ݰR=AJ)R=AJ)ݰR@)KTtÊJ)R= +*KT,qR=AJ-R:T:TKz@RFz@z@8P8P **R=qR@E tJE ,qR@E ,q])R@E ,qR=ma*Kt5AwQKt5AJu5AwQKt5ma*]E} 1RnSߖJˠnSnS;nS;EҚ~m5=MZ{5=лHZ,@7UhMt+@"iMtS@iM?P@ǁ@ǁYKdM.2 mkzw8)i[Lkz`C5=Яʹ8ȚX kzw8ȚfZKdM,q5@YKdM,q kz `kz `kz w2Xؚ8ؚ8ؚ~遌遌Eښ~``kz `kz w8rikz`C`k冀%咀lؚXPn ؚȦ%庀lؚXP ؚ~%ހ%88Ț5=q5==F[Z5==F[ZZ,7lMthM詭過過過%4eM,q5=AeM,q5=A@mM?PY5=AeMd[YKdM,q(t8PJ@ƭT?[n+9nz*yTdz T Jȣ"+9mz T?PJ[&Z״2qnS{94oS{ 'L?m~ C|7z>; SW ,C7@(+~>; Ce@?),q(C7Ѝ%eЍ%e@(? XX |`~>q>q>q>q>/(|@|_~)t(?P@R8P8P%8H %8HX 8HX |`JX |` R%5B|`%5B|`C>Э  2Ek|@|[#::::z%z%8Hc^]?lK/~0lGWaa?}u'}/t:7l_a"7l_Kô )n_߆0<\Zچ+/~ؾ: '<&;}ؾ2 aH`~"0'4`~0l? ?l2l.2Aaayᇱiƚ~><5O;06;YU~~D0M'f1+ۏC퇃aهit|B\pk^S\ sk`|k^Ӵ0׼fŇ5)>l?ט0׼3ayMs*?l?p0׼i0|k^c\q75o| V@6ld*lp*U>U@̈́22g~ 򁌂_ MU>- [K*֜l*@Z]Cg|VedV@-l*$;Զ_g|@|@|@|@]m|`lY*%dV8h?k[K4@ْ>MI>=uٙ>xH7| iv)o7| [jvI@vd_n@v?Pn@6ttdn@ǁn@?Pn@ǁ@LGgz .v3@9|3=-G;Й~;ڙՄ@aL?P@&tcgz`nЙX gz`ʙX gz`uiʙ~%rCgz`ʙv %r8șX gz`;IgLtLtw@ǁLt@ǁ@ǁu}3q3=q3=I::3=q3@] t] ,q3@] ,q93@] ,q3=MO:Kt}3A7Kt}3Au}3A7Kt}3MO:]| љn(әߖn(әn(ә2n(ә2Eҙ~m3=M:ݓ3=лH:,v@7ULt@"LtS@hL?P@ǁ@ǁ9KL.2kgzwt8`;Lgz`C3=Яt8șX gzwt8șf:KL,q3@9KL,q"Lv2vrigz `ge@@LdLdL.Ļ遌EڙXPL.2nge2ogz *v89x;T3=ġ ۙȦ%e"L,q(Sv8șXPFL,q3==F;Hgz@gz {v:t(gz {v:t:tXLtL`過過過%r4L,q3=AL,q3=A@L?P93=ALd[9KL,q3=q=ok=Oj9Om-z OE?PZ@Y k9l-Ң4Z@/[ ko~ vBgz vX&Lt#@L$kڙ6~P=лe P= P=CX&r,TP! >Dz}BD~OP=ġLX+98 ,98 =P2cz`%:z{@z@z@z@z@z_)T?PB@ǁB@6S8P~~mP=qP=qP@ K$T,qP=лH K$T,qP=AB%T,qP=AB P=AB P@ K$TtSB K$TtSB2bkz*:BeB@ǁB@7U(Tt(Tt(Tt(Tt(Tp   ,a K$Tp K$T,qP=Ѝ&T`Z>lȰD /}0l/a{u aðaZ(1l_&a^XTa°}yaZ?qk*ay-a5l}ta5l_ ӄaaä[% ۟a9°a~0ևg#a °̰˨[_ߘƚn}kayևXԭcS>l=L1]*0ֺ1̿a o-tck*a}Մ}qQ0MzFtr>;lla}w3FPahcƨp j i}3F0׼؇pF؇퇐\_4>5ay ۏ5>5/qk^\R5>5/aSה0_3c[Ȫ==+YV({ #Vpdp?P#̀L)c `{ `{~4X~FK4vX*^@6E,,/{ h)tSȞx=A]9h{O[J%In8B ;喿8B`.+_n{`.+<@6H-d%O6&=RR<"=M<ԕ@XN<+@^=q=q=:f)b_"@vO,b_"%bd "@6-bt)b?P"@v-bt)bd7"%bt)bdK"@6-b."K$b?P"K$btS"%b,q=MK$b?P"@7U(b,q=A"K$bt"R8P#E쁎EJ E쁎E쁎Eg@{@{{:B{@ E%X Xk%IX {8g`}8g`Dg`}8gk%@͢=#ܥ=п-Egܥ=#ܥ=]ܥtzHz[@t͘@MZt @oZ?Lڵ7c7k=QQk=}k@KZ,qk=EK|](ҵXP|Z1]%e׮@ӵX z`\(ҵX zߌZ,qk=AZ,qk=A@oZk=qk=E,v2v2v(z `z `z 7vXFZdZFѮ2nz 7v81u,v8Yu7k=ġ ۵Ⱦ]%ejݮ@MZ,q(v(z`C_k=A2nz`\l#ڵ@Fk=qk@Fk=qk=qk2nz@z ZtZtZtZ,q(v8ȵX z ;v8ȵX z ;v(z`\ڵX rkz`\%r8ȵXP.}XE쁜m=?E쁜m=A({ O,biE%@yd{ G-biE߿n[=Q[,#ӶDK{ liȴ-"@o)bӁ"@(bӁ"@,Q~`ȱ=/Kz@{_(bE;P%,9="2c{ LXXP&r,b?LXXP&r,bށ"DE%8H8PE쁎E쁎E쁎E쁎E~m@L{@"@ǁ"@ǁ"%b,q=A"@")b,q=A"=A"K$b?P"K$b."%b,q=MK$b,q=MKʈEnP8P E[T=q=q=q=q=-bt(bt(b?8-b,q=-b,q=A"@(b,q("_=4︅*/[^>.[벥.[a㲥.[elyly[˖lY`/[˖˖rlumlYZ/[^4.[˖ueY [W~ʟ[˖%2p5.ve˪wr*/[.[V˖!lU_ƵnW_2\2\1\1 ˖˖˖˖Ӆ˖]~reEX2p5+/[f/c Xeh2e0ehe[F.zg27F.ߪ[\e[--a=a2e2uH22e"e>?lU_^^˘]_!_˖˖ϭ122:k~UƇ-ǥ1\~:k~5_^_/sͯ˖\eUr4zk~5*/[NEq\eu2<dCA` +eYP2(?@< drA,qX'?XⰎ _?X?$|do>'Aa J?8>>K 'Av*O ەq}tv'Aaɿpz?X?Ȏ=|/\},qX\|m{?X޹`zl'E Z_?~=??Ȯ-|\hwOK,qX tKo7pAvCK,qX~a>K,qX~->K?vnh2 C. t;f7AKwC. 큻(?7A7UvCc Fq7 Ao?nh7Ǹ_?8:%%e8I[_Xfmah`C%e8[?nha~3 ,qX /\ nh`jh`jhjh`jh`jh7} d`h 70qe20q d`h 70LArCK@8 F8p_Xah`C 2C7%eHoCKʤ8 /\ ,q(0a5ġAah tvCFnhjh ۈ0q tvC 8 tvC9CAa7q ,q(s0ġ̍Av~ahjh`jh ;0a5p5_?X?X?X?XPQAah ahAN AAAAAAAeB]A@AlB2 }2d??Mv+un?׬2?]v_v?]v_v ,v`C?.2K,/,0zw*2la{=L } Wa;l_s%wOs;l_o)~ƚ}ؾ ۏ#&0bؾ WaIİ b~0݇0uۏ 'I>l?~> g/}~ma~ka~gkray݇)wgcS>5O}2{~65Gvtcgk cmaqtWgk^7uIayy. s0׼. sK0ݜay \35OY}(v;)K nK 1?LaLzIY0fİ}$|IYm~=lF c6)׽ayKay݈}:0׼.C s*İyk^ %8s 0׼da 0׼.? stak^uak޲@Ve[Yʷ,@ټ,?oY~ `Y XX022 ́LeC`.=8C`Lly٤X"~ [6kitVhd&@g&ed&@g&@~miYI?_I?68Ȥ#I?Awcyb~`8nL OCl,qݘRVel[xhY~ ',?]AycY~ ;(<,?A,@{,?q,?q,(| {e|d@WlX?>~>_ ~>0~@K,q~>]~>A@w,q~@~>AK,q~>]H|@|d:(|?,0~>q~>q~>M2~>q~@]Y Ɵ]Y ,q~@]Y ,qЕ~@]Y ,q~>}0Kte5AWVKte5Aue5AWVKte5}0ԕ]Y} nP?1R?ߖ˄1R?1R?#10 D0_9g|s4z/HeBՆ@0 n0 n0 yUa2j|`C\a>ġ̮0 %eՆ@0XPXm/ %eՆ@0X 2嗆Kd?PKd^_h| `| 626X&mdldl?P@@@m?O080Ƚ %eچ@m,q(6Xm,q(65b|`Ca>Kd l0XPƫm?P2bm|` %eچ)a4:4Sh|@2Sh|@|@eچ@ǁ@N0808080XPm,q(680 %280 0X | 680 l0X |` %280ȳK9(k|rP,L@X"ȡUKD>g:rh@<*x6?o~>;tС~>[С~>`Xfy;ޒP?w0F|$,~~>ġX?+<8Y,<8Y%~2c|`%:zKB|@|@|@|@|_թ?P@ǁ@S?8P?~U~>q~>q~@K,q~>;WK,q~>A,q~>A~>As~@K4%8H?F%e8@7rttpnP?8P?8P?8P?8P?::(|`%8H?%kG|`J?]?pΉnak˰.e9l_Űe^aô8ۋqؾ4Wa{ʰu0Ey25H7}A~a?}1ƚ??l_~~T1l_Kްb~L1l?xnؾ O(aN &Ga??l0l0l?ƚ~kayXc??l c??5OtgkayqMayO紇ua35 <\35/twgk^7wfs0o| ` ??022 '́LJD`D8JD`ÿ-X⠹@v,7dZ@ l[kRk[kRkȮ?O:ul}@}@}@}@Ziyzb8Hk#kKt['Auyb}`np?I l6Zkc%kp@ ْ:Zk^JkȖb>|+yb>{}+b>Е@| wV(|+@nd׊@tdˊ@y?P)KƟ,qb>*K?P@o,qb>A)KƟR18P1>J1M8J18P18P1>ބS18P1zN|`8H18Zj`IQ18H1%Xk%X Xk%Zj`>@78t@oKePڊ@t@t@^@w/ųb>M2*b2j|_~tk@7ɨ^@FtH R1`Lb>ġ̦Z1XPS^2j|_~,q(V嗊2j|_~,qb@)K|`%R(|`%Rz/H/b>qb>{A++,Ce^X쁬!{=P.=({K c{,CrI=KQc{Kʴ%e^yKԱ%8c{Ke,q(8=q=5{z=5{zz,C:r^@ǁ^@ǁ^@ǁ^2l/{`CB=A^%#j/{`%#j/{`Ȏ%8X /{`< =I/{ '=e,2i/{ BeA쁜@yyb/{ .eA#PgwNȉVَ=zVݎ=z9j{ 6Yo{ 6:Di{+Di{_(m?LX4J_=/h@W(m KFi{`C=Vc,m,q(1Xc,m,q(1zBie:IK$mt(m~@ǁ@ǁ@ǁ@ǁ@S~큎~=q@IOi{@i{@i8HX i{8HX i{`JX i{`%(i{`%z[Ki8H>KK$m,q=]JKʸP8PNqWKJ塴=q=q=q=q=wk-mt(mt(m?PIK$m,q@IK$mtcI=Ѝ9JKq>lf+/?W u^{*3lyVa{a=lW鰽H%{ؾb_:IôX)n_+0<Fa8l?xa8l_ca)dt/p~B1l?O ۏ'&pb~61l?L _~ca!ݰnkFay݇itJc>5O}Lz~,75O0<ay݇it=Lgumf>:lו0׼ s0׼Uay]暗}>=5;2\u?I}Hw~4?LG}Z}3MduO14cil Gԇ14O0f&ay暗 a1l? ayxg sr0׼f ϲu/ck^&a10׼d s?l?mayMay-ayY6e| +&edc&@M MF%m$nJ*IXiboUF<@ ) p.dl/dl/ /KKFQA& FqdY 9@콐#d)콐sd 2/콐d 2 9PNe*[^Ȼ   /콐*.q콐+_({ yb{TAŲ%z@ 9TB KBs/h>@ 9jϽJrhz7Krh{!'_({!s:H+{!Ƕm+{!Mo[/'<^a>u^ᓭ~셜@^AVBiePV%ۥ)+{Y/ЇpZ 8^AV%.qЇpZq3)Z Z/ЧeZ Z Z/qq3)Z Z }ZqBwZ2KiY/{KiY ='lnsc=l.6?'l.᰹3JT;ly6T;lc*lI7eXj)dXj)c{0LHDk^Ry95#\j;ankn&vnKa;ۇI6_wa;a;ۇI6ov1nw]aoؼ}^qs0׼^osK}m{\z!5[ ay a-ok^3%/x\z!5/v|K ay kCk^/BkBVۅMY]ʷpB 9pov!`zwp)pp-d,.d,.  /Ի KBPA BP[.\.٪B>B瓪BN.t>PB.t>.H٪?_:Tu.ŪBަX]8P]8P]8P}T݅VpT݅ZB 8H] 8赚%z,Vu:qroz/pf/t6Bm.3J۸ yew!甶q_B y%ew!qrDoqrjgtjrxlv!vjrdv: ySB9 Щ]Sڅک]څ/٩]ȩʩ]!څK.9N .\ vtj.qSB9 }ΦSpڅK.\ v9 }ΦS@: : }ΦSqSB9 }SqSqSB9 : : =vSqSZ: :/K>ҩ]A/].qSBtYA/].qSS:/KK.N%zp^,\⠗. 8ȩ}^,\⠗. 8BOyԾP/].qKҩ} څ>ө]!څԾpY=SC2: }SC2: =$SC2: }ΦSc':S;qvjzDvOttj_lک]NB8.؉NB.NBԾp SqS'::/\6D.\,ک]aS':: 8,ˢvjSpò1jv[X: 8,kvjSpʩ]N%rj.qSB9 8ȩ]ANBA;  5]ȳ]Ӆ ].vM2HvM2HvM_le5] 5]ȳ]Ӆ< 5] 5]ȳ]Ӆ ]5]aҴkp\ӅKUM 85]ш]5]85]ш]Ӆ]5]ш]Ӆ]Ӆ].vM:tMrƮBǁBǁBǁ%˾]ӅK}M 85}Ჯite_ӮB욾P%rMret/k. 85]A%rM.qk`vMHޛu҅{N{oI.N 9Nn fIrmt!׬.B 9Ͳk] 5} ]Ӆ5]oBN욾p)k, 9ܵk;vMret!vMret@t@t.tFfu҅nШ.y:B7hIAN ݠQ'}`t4 ݠQ'}`t4 8, I_l6X']alN/\6.\ t  }^NqNqNqNqN}:Bǁ: P)qpwJy|Dž>Py\Sy\ Jy\^BnXy\;DžK<.%˂Dž>Sy\8Py\". jV:TNq@q@q@q@q!_>qqB) 8Hy\A%R_(q) =7pJy\ DžK<.&㰹 +:l.ch8lubk9l~}Nz\aC=l~N\asy8z Ig96?y(|aH8l~y6iaRϴ6OApgT&q<ga8lͿ?Fq< a7lIXT<wXT<0rimqϼ_(mqi }p!~ p!~ }P@~ ]CtHok -B]~{\[G*][8[s]]ꍸB-\7 8{ވ+\7 8[_ވPo.qc ~ 8荸%z#pވ+\ z#pވ+\7 =BWAoHtpJoО[8О[8О[8О[8О[V e-th-tNl@셲UUqU^(lYe alO.qU'WZe 8,PJl@lA^B*[8*[+|OVBǁVBǁV e-\ lYe 8*{Kd-TV%^(l.qUSK R96wCas6?m͍P\as͏ {k_ 7}0>_ :ä 7}|6l \ц楣 sk=5/au/\Ztsk0m׆{|%z{k^:ڰ6050׼taE`k^KayhB{k^ay-正-d[G[yu|h/C/h Y2^]Bf:B:2AQp /dPK^AKPK^A:B΁PK$-@""Bm!:^(m!:rmm/ll ycm!v,-t)-t(-t(P"B^Xd[A"B^Xd{DK$-mEKFQ8.q RG{ {BNC-ur&jm!o-\: _+0h 9WVrb5lk-j -jB>-jB7Trb5l!V^(5l!/Vrh5lϟT^(jHZ|+Dꁔ[l V Pzd>;PZe~HQzd PZe PZ8PZeeB([-\ 8Hz^*\ 8HZ4e%(Mj@UA/P.q TK$[P/P.q TKUҔ^ 8)[=B([-$B|^lZZIe>zQZIeDPz }\OkHj!S->BSpO]*}܇OT }OGiT }Oe>Bǁ>BSp Opòhje3>BS-\ⰬڧZ.>%ˎ}RS-\, ڧZ.>%^(jTT 8ȧZA> S-\ jT yheB֐ŧ!O/\V,>-d Y|Zȓŧ-IB->- =1O 8H|ZȃŧR|Z8P|ZȃŧŧJ|Zȃŧŧŧ.b:BǁBǁBǁ%˲ŧKe1O 8H|z,fieYBr,>pYO 8H|ZABr,>-\ iO 8H|ZABX|zħKC[Q ъZȥ ъzڊZɯ\BYQ 9K7R]&?șMzᲛek!gR6r7&Bmb-&B MIzᲛek!gR6rDkk@k!gR6:4r&ek@kp^ XZ_AV }FlM!en )[-B7^ XZ覐B7^ XZ覐B7lpò+`jeW ]V 8HZABǁB([-t([-t([-t([-t([- %!-tgL i˄BDXBZ$|;Ba iR&H iR&ˊ%>(RBZ"%>PBz"d i@ i::::@L i@ i@ 酒.qp$K$!P%@L iIH/bJH 8HBZ1$N# Ǧ.cSlMC?6*cSlzl?6a3cSlzlzlj>6Ǧ:M?6=,7=/cӸ/l~l}lz}lz}l}l}lҏMMϴM3M#My ǦǦǦǦǦ !444icyHH?ƚcӎX~5 ǦmˏM폱!!kҰycyHH?ƚc8;l^8k~^7شicy8l4k~3k~2k~+s sҏMsϻsҏMKs !?kHH?6"|l;?kHHf YCB1~֐~HH?6m~lڽ/~HHf Ǧۏ Ǧ Mgsҏg \cYB6l4k~^:k~^97?暟?暟?6]}5?5?KH?6ݒ}5?o5?KH?6]}5?/5?KH?6ݍsϫsϋcKB ^9AV$A$!}/ $^̀$2>8]? HB %L$^K8{.q%^K89p+p?[YBO%> $ N6$<ۖA'lHH g t!!}7;>ȻIHt!!};>8@BZ8KH%$.q%GYBYB o{$!}p>ăK!\0 it >;IH!!}3QIH$r.* iHB $!}QIH!!}IH-$) i,!}3FIHS9gA7ܐ>ȹ$r( i,!}IHQY B}$4B A$>:A>:!h,}'tAB}BA?0 }}<:/<>B8/X !hσK||'8? Y,wp~>>ܽAn\:A|Э:˺Ap~>+ >gY,>Ye O8,xr~>s.qX|Ѝ(.qXV|Ѝ(.qX|Ѝ(.qA7p~>av~>av~88 %|_˖pYA~ْs>S!9%|9!\rY Os>C.9g?%9Kf98@9K%*9\,QIYY,QIeJr9qpYAN\$|p,|p,|9\09\09\09\09䪅䜅%A \,8,TJAH%OD g(K%ODTJUruF*9P>QNMP9AA.nP 'h2>ȁ `( Ae(}C郜PZNj[2> :0>8P 'h2>8P 'h2>8PC郜IB!!}gAHH=$A!!}/$~$A7. >BB%HH\'HBe?Ae?A88HHYHHt !}qpz>Z ?k=z>|pg>AmzBmz>CmֳpAڠ|Ї6h=t|qAZh=)Zh=t,.q.q.q%A"|pì, Kf>ERk аj?Fg܁͕6zϰLY6w>asi͕6?c>zä ']a#*l~Bcy63l~8Ϧy6ϷSKsͳy6OVcsSy6ϴyƚ35O=gؼƚ35O=gؼ6OuXsZy )\ kemV.q[!KVHRA VHRAoFkz+p 9Bh,BBV^.ZYc->ZYc-=Y' BftOrB",tK.ݓn',nݓ>E=Y;n' }{{evO^l=Y8=Y ݓ.evO.qXV,\l=YݓKU3' N=Ya7{$ݓK3' N=Ya<{$ݓK,\ rO.q{qA_ < YȯB~e6H` < Y | Y < Y .k<6H.qAb$AqAb$ $/Ab$ $ $/\vyl,th,-    K]$ 8,<6H.qAe%. ~ ycd!6H.qAp ԷAp Kd,\ d$ yo2H.qA@$ 8,+_6HrxײuײekekB.ZYBY+ 9P5Dke!WJlP¥d,J(ȁ_g5r Wl,Ϧ e,tӔYyM|ڦBl,ۦBW8nB -Y8Hd!gvKVnBK햼Pkх!CS" y!B" 9A!g+" 9E!$ *YY yz }By }':z Az X8x(X"=RX ~ eT9*Bڨr,tJNUr,䮃U>QXȻ_ }hʱ]V9^lxYX8PXCU.{^V9.qXVr,\l{YXUK/ 1RXaʱ#UK/ 1RXaʱ#UKr,\ R9.qmk,b[c!x XoŶB5^X>޶B5!e[c!x yZ/\6Xlk,\ [c!5H[c@[c!5:5^([c!5:5:5^X8X=    8,k,5.qXXlk,\ [mK5 9ðeŶB0lk,\Ⱜxl춭plKdk,\ [c ym[ㅲ5.q#= 8,P5rg[c@[cYXʱV9rP*BΌr,tQX /ʱp)0 ac!gV9^(c!78y,r=x<=Xy,0ОBy,tyҹX ; ݍӹXn ; ݍӹXnB7Kt.sЇS: ݍӹxᲂ`b9 8ȹX8йX)EXek ]tx@atHatx@a!/\VJ",.B",.BW<]>EX8EX]]]EXA.%r.qB 8EX]K"P.B",\ a|;Pw}0(+\ q_.|m.q 7}jkp}n)+\⠭%ښ/\⠭%]%ښ/\⠭B7]%ښ?nB:+nysenB:+t;I7߁2B=Yw {~O~eB}nBnXWȻB b^!w߻pن~q~ .\v+\ⰬEYWaٌ~ 8,Q~pò#e^> 8,R~pò-e^> 8,+S.qX+d+b^!q XWoBv]8XWnBv!e^!q y8~S.\6+\ ^!H^@^!:](^!::]=XW8PW    8,k.qX+\ ޅڃ{K 9~eB)+\Ⱜ=Xw{~p{K+\ ^ y9jޅ.q~S; 8Hw {YW8PW8Pw:c7_nB^W꣛B 93G7_l(n¥+ |w||K@&|$z|@ 0Vri߅R:eTri_!CH+ 9)@* 9)H+.qH+ *.H+ * 9qqH+..\vl+tM^n.\vl+tM^n аW膛B?i+tMޅˮ {)+M\[ R.w ro)+tK\y)pى\-ro)+tK\ \ؖ\q\q\B 8H.WAr%](\ ݈R.WAr %+t#J\ F=P.66tx1_򰹘Zax#~as6+0=yɆ6o(1 ka5l>$ a"l>'0>'(;L6Vj|T /6SSjS6Q#aZ cS 6) cS 6os0<ray dk0<rat0-K*6/!͛aayI-0׼v$\ڐ sK w#\Z sK 6oF暗@.l^ sk%0mD6dk^i2l޹ sK 暗@.l^1 XȅͻEa]k$ cS ƚ@.5O\ؼ_ƚ@.5O\ؼZwray Xȅ);L0׼ray \&\|ךuk^Kևi:5 0׼ 0׼\ȅwayU% -\ZsK 6_u0׼\Zc[ WȪ@> Y](\!~-+d,Pԅ̀rLr\!`\!`\|]4X w6 8hp6 8H w6 8hpr[ w6 8H WArKI}rWZ: _KG\_+G\KG\!v#WvʎBB^_Wwqò#pqDz#B9 8W,; 8hp ̈́GPzBN?G+d_l=Z!' ֣'B 9䬐;K NY!OxBgمEMY[J ]Ԕ'B](Y")9+\ YI.p$gK$9+tHمZ\.\ Y;FJ 8hqp 8hqp$gjqp 8hq#%gjqp$gcVΘB;1pYǬН1=fFVțU Ccفz6B.FV66 e ݠFVȋV ^`Y!/l#+䊁md\e6Bǁ6B:h#pYpòccYe6BWh#+\FV6%mdnh#+\lFV6%mdnh#+\FVaYqߊmdlm#+bY! 6FvranY!f 6FVȇmdlm#+)6B"m#p>pld-}PҧB,}*d[TXtV K K 畤BҧBFҧyCiB-.qj-.qB-.qjI 9˶B-.qmK 8haBI 9>qשA:?^B^Bu*IS!u*-N'NiשBy ydSIS!uP^%:^%-\⠭)`*djS!f0](S)`*BQ*O$5J<3YTHj 1RTȓ5JJT莑B&5JQ*tiRT_$j 1Rt4JEFp4JKQP%(.qF 5JJTAB7h(.qFp4JKQ*\ ҅(.qFp4JnШQP%(HRk)CpY^ @* yohQ!7D4:JB^ZiT5+ ybх˪Fn4*5F<Ҩ*VJBXit8bQ@Qx*.\G4*\lXiTaY"Ҩg* 8,$VYҨpòNbQ%* 8,;%VYҨpòXbQ%* 8,%V.qXL4*d4*bQ![X+ XiToJBV]\[iTJBV!eQ![X+ yҨ':+.\.4*\ Q!OtVHQ@Q!OtV:T](Q!OtV:T:T][iT8PiTȑFFFFKb+ 8,7V.qJ% FXitrClQ!o4*\[itFҨpFK4*\ Q) y!kхR.qҨV.q@* 9AҨqҨqe?JBǁJB^ZiT]+ ]T/QrWJB&FZiTF|JB XitF|JB XiT]+偎:?8Gv,cП-KڱtKg+R!Gv,V; 9c7v,QCҁt,ŽB?jX*֎ X*tX*֎Bǁ X*֎BǁBhX*tX*tX*֎ X*thJ*tWCSҁ4%)] MI~ДtremSR ДT讆B74%Bw54%Bw54%:4%](SR 8ȔTA eJ*\ SRyiJ*\ S҅2%%)3y)ccS~lُMO`J40TccӰcӬ$l~rlԏMMMcMUHGǦ'Ǧ Ǧ6M8l?6a cSt?6%cSc0lfǦncSW*l 6M}Ǧ6cScSEW>65cÆ1ƚ )l^jk6aCt: >暟>6|lk~^d >暟>暟w>暟mHa\\ c\\ cӂ\Zؼ1116/}lڊk~!}5?ې>6-D]X!}lش 1ƚ cyؐ>6m@}5X!}lZ~ mHcÆ1暟mHs6gRlCش116o~5?~5?o~lZqk~^k~!}lnk~|k~!}lZlk~^zk~!}li ^?暟]?暟]?ƚ AVlHr.'҃|ِ g҃Ɇ _6ِ ddCz) iAA6ِddC*w]\0>au}plC*w]\0>a!=ylHKf҃Zˆy׵p!=ɵlH:=M\7A 7#у*A^Ix#у0AG£Yx o$ȋgstžbmlypٞ _̓=A^<˞S8sy<ȋgsų9<8, ;wA+8lnv\Ϳ=a.Pkn0<:aP| cӭƚ[0Å cӭ6&\k^{paIؼaZ sk.50׼:i-5ͷ0׼:a[k^Koayuݪ0׼Ӻ[k^nayv慪0׼:ayu]['5ONؼ86Q9Ln0<:ayuXtSayuXt{Sɭƚ['5ONkn s˭暗['5/ar{RayІA{h\Z sky6lގ sks65/Nؼlk^ny'*5}0׼:a:aڕ skS65=0ּ:zu 9{[o΅rrgN!nBn "T!3`N!S`NU حS(حS8/B2 v\5%Z-\5%r\5%Z-\ N!gv\5%rr0mN^(N!v:4 ѐs_l,DCN!/ul)䵎 9 9ڱ!A!Br ycCNDCN!xlȹPBB^ؐSABǁB?i)toBC΅2aLCN{r ٣ېs_ØB6JC΅27!ЏTr ݛАs 9~ҐSA%2\(CNr 8ȐSބ e)\ CN; r 8ȐSA%2.q!Br 8ȐSABw4\(CNr<7>Er/)䕑9\>#ts.\)tB}N!/)䕩9[SkSs ,`}΅˺99[s4`}Neo%9[Sa>Ѝ 9K%s ݸPSa$>Ѝ 9Kus ݸPSa)>pòV`}ڐSކBv6! 9mȹp! 9m)sȆBv6eCN!P6\ِSABl9BǁBl)thȹPBl)th)thȹp!q!#Zr r r r 8,W6.qXl)\ C΅՟ 9K?r 9O!φBXؐSa!Br ycaCNr 8ȐSA%2φ e)\ CN!/l)\ C΁4φBǁBǁ e)th)t~xίίVB59+>EM}N .E-}N!߽>\(}N!_>V)t)t)xvr,lN2u ѭSȱ:ʭS|err,lNɭSȱ:%[O0u[$v FN!v\(N@N!v:t\v:t:t\(N@N@N!v\(Nu!@r ѐS~B?Mhȹp0ؐS~BC4!M> 9q! 9q! 9hȹP%2.q!p 9ʐSABҐSA e)+ 9%8a%l.Q6Z .l~cyrasO߰6XZ K0=xn<[ ?FM܌ͽaلa cV( а $ [а а 0~2 {ϰ +3l;ӮZu?afNk20 )\ ẃtrNmwL!o$)BSȻxc 9;>B7.tBPSٵ1;wv\S8S)1.wv.qX)\Sy1Kzc ݸSa;Ѝ 1K{c ݸSa;Ѝ 1K+|c 8,v1)dhwL!?xc SȎ k5c 1SȎB>)dhwL!Pvew̅%1K))t)Bǁ )BǁBǁ 6c c 9h;q;q;q;prfwL%r\ܴSai;c.\n)佃1K6c.;v.qXn쎹P%r.q;pv\(wLc y gwLc;pv:t:t\(wL@wL@w̅r//nB51ˡ[;EMwLrr.E-wLrhrh!펹PBfwL!)ݲ1S8S8SWe 93X'2>))b %)\Oe 93Xp>%)bB^X,S2R,S+e xX3ce.XqX3ce e.X3ce e 93XqXqX3ce.Xp2Bu@Zg I:SN֙B?jhp:SN֙BwRuZg KL;)Zg KL;)Zg I:s3Kd)\ LYg.up3up3:S8˄Ø ?FL< +l)6`azĆ%6?`k<} )ll a%l~_8lnXK܄=aZ ;W)lX܄) `k0<%7i.5OMkw\Z Vm0׼V\k^{saymͅ%9L+say-̅% \ڕ sKr6k&w\ڑ skC.5ô6/i% sKr6g%7ayJney5+5Oa܄) cSrƚ&l^ cSrƚ&l:L0<%7ayJnX&Mk^0׼$7ayIn&l޽ sk/75ôJnk^ aUk^۸ayIne0׼q\܄{Vay% Wmk^۷ayކ-)d[rS/Փ%7|Kn.䦐C=Kn Y2\BfByA䦐Qp^*d,PKz[AK$PKz[ABι-PK$)ے%ZPBN-)\))3J8Spv g ]}\(L!/Z,)tQ8S g.S@*\ l*\ YRdSR:*\ Yr.¥].¥?tT BQr.~PRA %R\(K)\ 8HR:*\.¥p.¥p.Kp)\ K)\.¥p.Kp)//.JRA p) B޷YRI.¥7Vrk˅m.OYRț+\ yfK!'V B<[rrlK@K!gV\@[Ra¥prmK!V.qX.p)tBKF B7.T.qXp)tBKn B7.T.qX.p)\QR֖B6򳵥--l mipM--l mi)ƖB6dKK!H\RABli9BǁBli)thiPBli)thi)thip*qI[Z ZZ ZZ ZZ 8,WE.qXli)\ K˅U--K"[Z 92ȖBmi)\\r,-Ra*BYZ 8RAB]r,-Kdi)ݕ--Kdi9B]R8R8r,-----kJR8iY8iYZ+\ ]T(+\ ]ToZoZ^(KRRoZoZ p)\V8+\ @*\ *\ *\ b.,e.a 9eB3L˄fBmPf¥Ld)(f¥Ld)(fB S&0 Sța a 9Ba a 9qBa 9qha a a 9Ba 8 Sț8k0nh)tF3L4a ݠ S# 0^`3Lyi)tF3̅2fB7h4ABa 8 SAf%2\(3La K3La.pl wsu6fy6V\WaS6lc K*l~װy6S\M6?Zaaqga2ʄO0׋2ac(l~ \/4&M4ф=x܂x܀&M~w|ͽwk&c D6܇i70l_cD6/A06:4rmC˅2:4rmCK@C˅2rmCK@CK!'6:4:4rmC˅2.q>6.qoq/|th)'0n'i)t;I+LIZa }п\ l)=0n'iPVB SvVB\(+LYa 8 SAV e)\ +Li)\ +̅ ?FK< k'l.dqdNR ebe 9+W= ??l>H懨0asՄgh|OPasńsϰ<N0 /aqs/aaz%l>i&l>g%?2R$ c]=a„~>) {;0'0as&?I)L ~kR0<0ayJa^ckR0<0a 0m46[ay2U0׼\Zc sK sv\` sK 6/浼暗&l^ sko0-浴bk^ i_1lޚ sK 暗&l^e„) ]0<0I ƚ&5O)LkRyC.5O)LkRy90IaX„) cS s0ayIa\„%9LRy.5E0׼֤Ӗtk^;aymHa j?0G% \ڌsK 6/暗&ly;L ayC ƚUo)L!-)d[ s0|`o)L!`)̅څ.d,)d ,) Ka Ka mBR ]AЅK ]AR ]AЅK$)䛻0jp0|xpv/.q.t@)LlJa RBٔ=0J S賏RB6._0>()G)L>Ja.g0K$)\ )̅.qp0>(PR%p0K$)\ )LIa.p0K$)t+@)̅.q?Z(@Nڭ)<:ySpT*7 9ϟ?8o*HM!=)x B>|ZaS~+l 9ص¦ϟV\ w)s6|¦.+l jM!G^Vrkͅ˨ Bǁ B>Zas2¦p2¦p2¦V.qXfV)¦p2¦M!6K)6n )\Ⰼ)tSHMelM!p1ƭ)oM!?x+l.\3V¦ݸ6ƭ)9dM!q+l y9B)l ٍ[aS˩6Zas2¦p6Zas 66ZaS8Pas6ZaS8PaS8Pas2¦q¦V:T:T:T.qX6V.qX6V.qelcMelcM!jpXaSȟKXaSaXas6XaSaXas6K)\ M!bͅR.q¦¦p6RaSXaS8PaS8Pas666|״¦OV7OV⩰))tSaS>Yas6KKaS8Ȳ/V./M!_)K6KKas 66666//wQ6ni)ù8[q ]4f e)\VfB>یSԭ8|8s$q }ӌs 888|8Bq q pn3N@3΅2fBǁfB>یS8ЌS8ЌSȇsq.p8#ٌSAf e)f)tIM{Mm koS6Mͅ~Bͦߦн&6oSפߦн&6oS^~%.qBm 8oS.qXo6?W6O|seۄ/Uasͥ6a+UHuNѰj34l>Bb9Laڜ O ϸ O0'}:av|:aO'l>-l>O')N|2:s?a lׁ6p9a] l 7"3'l͗3'lʰc ҙ6as6wə6asƚ3'5OgaZ cәƚ3'l0׼(潥y_,5ô;dk^{ay9sdk^+ay9s}0׼%\r{ay-J=0׼$\ڑ skC09s50׼9ay9s _FgNkΜy)l^ cәs9ay:sXt愱 WXt愱 jΜ0<9ay:sXt&gNk^Μ0׼9ay9srvؼ}fvk^{هi-;50׼V}\rvayb 7\s˙6/ 0׼\ھcۙSȪ3v̹PΜB>ZڙS33B-_2v2vu 99z]!`g΅Z.\%Z.\ g΅Z.\%rΜ |]AΜB>ۙSAʙS8C 8hp3_69ogNl:s }љSΜ )GgN!;s /[ΜB}t3g9ʙS賏Μ%r.q3B9s 8șSAΜB}t\(gN9s }љSAΜ%r.q3p9ʙSAΜ%r3B9s 8șsλ [s:ZSȵ u 4`΅ڀ:\8V-*BNͬ){U<"XSyU<|UeflO!_)<*BNѬ)U[S袦ħEMO!-P¥%)䓻%>KQKS'wK| 9ħp)jI|ħqħqħO\(O@O!-)t(PB>[S8PS'wK| J| J| nυ.qħ(K| 8Hs$>@YS8S.Bw4KЗ6~  E ?NOT~.i)tJOT~.]* ?Kd)\ υ2.qi)y5c[Ǧ2TncS|l*MǦ'M/XaYt~l:?6=^};<=aFǦcA >NMǦ3c]8|>6`ac1tv}l:>6\acth]X|l*|l)|l(~MׄMMMw>tAt?t=~MwyWccO}>6] >6 >6] fǦ+ǦX|5Oؼ1<>cZ6e~5?oe~lZش16/d~5?c~5?/c~5?}M̏=̏gǦ%̏̏gǦ5˰ycycycy2l|lk~|5?}>6]X|lqش1<>acy}>ƚcy}>6}5X|A}>ƚcy}>ƚ'l|5?}>暟>s~ycJ\\wؼ11icycicyci}cycis/l^k~^k~^k^~Yyߧp~A@~}dyp{iߧp~püy%ߧp~pü )7\0}@~8݅iy%Fwσdk σσl$)?`Av )RSA>) 'Pr =T SAN8䓻BSAN.ЃBSA>)Ѓ|rSAN8;BS%SANzp*Br%ЃKYmЃ mЃR zЗ6hlh gmЃAfCThlht mЃR *AK6Aw=aڠ8ڠ?aTee|e͏Qa6a;l~ ߡӉ6ay6gaa  9L c}R65I64aO ~S&l>d30 ~\|'lc( 5;$ -)c R6wasC6aAJ|f0-Gͭxk ?L[0<%>ayJ|:dk0<%>a=0-B6&[_ay@ 0׼\~ sKsV\Z| sK6o=暗'l^ sk0m;צck^$ 7\% ~%>ayJ|捣y.5Oa) cSƚ'l޺ cSƚ'l^;o<%>ayJ|]0<%>I暗'5/Ok^ô6/؅|aڼsk:5y.50׼$>aF]k^ayI|e0׼\{ti:50׼X-){%>|K|.ħ?**K| Y2\BfByAħQħpޠ+d,PՅK_]AՅK$PՅK_]AB[s 8HS|K| 8hBI| 9%ħp 8h@J| ] 5ħЕ@OFJ| Xs$>>))d`O+B`ħ#%>JS胑%.qBI| 8HSAB\(OI| }0RSA%.qħp$>JSA%OBI| 8Hs %>ħXK| %)W4 `O!W,) B#XSȅK| 9᧐6rgO!xm)XÆBm)3 ?.Cl~ eO!6rgO!|m)hφBm%4:4׆ ?Kٶ ?K ?|p2- ?KA ?oi)\Lm)tKOemO[~ 8,so~~_LnB:*b7P!ov[ߊ@u t2 y@u TK@ t2p@ t @@ T8 t@ai T8 T8 t2qv:t:t:t.qXfWv.qXfWv.qeve7Peve7P!ߤp] Tȗ% 8,+.+ 8,+.\fWv.qp@K*\ 7P!b7Pv:t:t](7P@7P@7P!Z%:+Bg %*O,*tV(*,P%+r(aqP!](qP!r(aqP8q8q8C   .kl"jQ!fQDQ!:H:Hr@`х.A GT}DGTI>#*t#*t#*> #*t#*>Bǁ> #*>Bǁ>B#*t#*t#*> #*\ Q!e.qB 9/p|DK&Bɨm3MFbdT4](Q4=MFni2*_{ 6dT趙& e2*tLQf 8dtLFs(l~  >,F6?~o_\ @Q|(^i0-gN|'N|aͧM|؄gM9L s]c61a60c準(lKk?ay:'lon09^?lnN?lnXt]~=~듎?Las{(l>l(l>5OQk75OQkn{VA\Z W潷0׼@hk^ay8:L˟ay~8 7 \ sq6/y=\ sk35/aray9\r˄0<GaTؼGƚ09Xt8 cq6o8 cq6/&Qk0<Gay:(5/Qk^0׼Gi8*d`Q+Bt`#GqT胑%r.qB9 8qTABt](Q9 }0qTA%r.qpGqTA%rOB9 8qt\BxmG*vBymGpYn #rvBmG*䒃H\sY6eTnR!U*^B>*۫TQJ۫Tȧe{.\*^BUU*(^B>2۫TqJ۫t2FWqW*] U*\U*\ⰌU*óJKJU*\ U*tOMRenR{jz 8,cv{ SӫT8؎TH؎TvBWlG*gk;R.-ۑ y_GH؎THʎTHێTH./ۑ 8ȎTHҎT8ЎTHHʎTHHH.S/ۑ ڑ o;R@;R@;R@;Ree;Ree;Rّ.\^#.qX^#5v H|Yp2Bّ 9p2ee;Rّ 8ȎTAv%#g4#.q@ڑ j;R@;R@;҅#:#:#v R':HT'B N*ѬN*tN*/ҬNP%HR'rbauR!'V'](uR!'V'rbauR:q:q:   .:qKJT遥K K KJTRTRT遥KJTDPҥBN,]*\"(R!.rfRItqtqtK.tqtK J.tK J 9=tqtqt4K.tpKYTAҥ %]*0ҥ%..qt8-.qн uMRT> k*B 5U}uMnթk*tN]Ӆ5UЭ:uMkpٵ5raW6#aS/)l~ _׸10Kak:L{ars%l>VS%l>T30)%l>P$K%S|EŰ( sJ6#a16"Iڕ)liO\R2?0)'l &%S͝O׀0'La l70'Las6K?I%S͍&%SMd ôKƚ)5O%S|=L[ay퐆{]aik^i}450׼VG\R20׼F\R2kay-d 7\=Lay-d sKtLack^J0׼La/)5O%Sؼ6//d:LJ0d*H%ӅR2`pLKdPJ%R2.q#LJTAJBT2.qpLKd*\ %ӅR2.qpLdPJ%R2G)(u9SBrl T!@rl ԅB5P\K@rlm T!+*j5P^[uѽQUn; 9˴#vDrcGT!vDێ IQ|#S; 9۴#vDriGT@Gԅ\ߎBǁBu2ݷ#p2#p2#/vD.qXvDɧ#p2#M>QKQn*\*ddN@֎B^*ʎB~vDeGT.6; y#(; yU Q|O#B9 y#p2~#p2#pQ|O#@: : `GT@GԅrD=BǁBǁ 1QQ;U8U8U8UaUaUA 1QK1Q|^#e gGT!*\Ⰼ숺PB*\Ⰼ숺pUA%rD.q#pQ|#pQUWX; : :.#q#q#vD](GԅrD:HtD֎B **t*8Î *\$GT!vDraGԅrDraGT!vD.q#@: : : 9ΰ#q#q#q#B9 __G]p.UȹR/R/R.U.U.UȹR.UWvBl*\+T!Kr gT٥]q]q]sۥ.]q]sۥ ڥ.]sۥ ڥ 9w]q]q]c8ۥ.]pR.UAv e*v%K.q]B٥ 8.UeT 8gTgKU^ *=^BwRz@/U; z }=z.z }=T.6?l͏v-/lL4l~gd /[i2l> 0-CͧC|8gC|4'aKB|,ͧBkvH8Laڕ]*l> gRa]٥Fk^v0ln0>ln>lnX4HM0< Rayv0혆a6o}aym0׼VK\Z, s uJ\) s 6/=N暗A*lw sk0-e s 暗A0%0׼ Ray_FTky+l^m c u RayX4Hi X4Hi wA*5OTk0< R 暗A*50׼ Ri=l] sk=5ôJ{k^ abk^ay~Xdsk0=0׼v\2H+i=50׼vX6Hm*[ R|.A6Hm*dlP̀ RL Rrb!`T!`TἜX4 uV 8hupV 8 uV 8hup R|AB.qAp!.qz 9%ApV 8hu@ ] 4H5AЕ@TF u R>i*d`T+B4H`A# R U胑%2H.qAB 8 UAB4H](T }0 UA%2H.qAp R UA%2HOAB 8 urlT!vOrlԅrOrlT!vOrlԅ˺S\{+vOrmT!.*S{=u S*۸S=Uq 9{ JnԅS|5{! 9{vOr2jT@ԅFSS|9{e/%jSK nTeEB_*\ =USKe }={pò1`T!vO{W/ y{Sz=U|ˠB^*B5vOeT!_"잺PB^*\ *\*\ T!_":BǁBD=U8=uS|{q{q{egT@T!'vO:tO:tO:tO.qXxvO.qXxvO.q{egTegT!잺p=Ug' 8,<.{< 8,<.\xvO.q{pSK*\ T!_a*\ ԁtOBǁBǁ *t*t*+S=uSS|{A{B 9ΰ{A{ .{p Sg=UqS=UqSg=UA*t*t*8BǁBǁBǁ *t*traU!HiuVgXiU|RZLB) }&PiU3JB3PJLҪ + 3AJB3*\ U)ҪqҪqҪ +.ҪqҪ + *.Ҫ + * 9ΰҪqҪqҪ=+.ҪpVYiUAJ *tJ%RZ.qB) 8HiUAJ%RZ.q2 ʰ ݛPudXSUބ2BW(*toBV+aBօ6eXPUØ2?*l~D ? >~}Waa͟Z6Owu64v-l> !l>rf|8gC|4'C|0&V|,ͧB|( Oô6ak g]XɅ6?\MYk^.0׼\X.vƚC+l`&VOTMw0.Zas'67Ra"KV|8La"KV|oae09D|/a="5Oaژ cӡƚC+l?]0׼6eCؼ'זaZ skE650׼Zi;650׼Zafk^kay90׼6bBlk^0׼Zay9C+l^ sˡ暗C+leth 0<ZɡƚC+5OVky135OVky'09Xth cӡuZay9\sˡuV0׼\?Lay懹浘6o^浕暗C+l^ sk!?5/Vؼo.~k^y0ᇹ浅~kBVZ|CoօrhBZ ZjC)Cp^,d*d*, ;. ~.q ~9. ~.qCKS+jpZpV 8ȡUZK_A+ҡUJCZ: }0ҡU *HV!; ] th`C#Z>кPBth.qCpZʡUA%rh`CB9 8ȡU胑%rh.qCpZKкP%rh.qC}ZʡUAB ֆ[o۾U [q۾u[s۾UA[u۾uaV!ݶorBm*䢇[\}s۷}[|U}S۷ nV!IorsB۾u=`V!m*dBSm*K[ھU8оuK`V@V!mߺp(}pòT`VeB۾UaY.} ڷ 8,obAVeB_,h*\٥ھUB^l*B~ofV!/mo}6۷.\|ofV!/mo}p2}p[K[Kd*[ҾU8оU7 ۷ ڷ.}oo:o:o]l*th*BǁBǁBǁ%%%o]l*\Ⰼl*\Fo.q}Ozo.qXForgVegօ%o.q}p[Kd*+[Kd:BھU8оU8оu[[[|}{Sޮ *t*˯] ]UA] ]u]K* ޮBBPޮBB*\ oWyqq{ z z z.qql?g+X!+ց.t`JVu`g+X.G u`>j+|: +\ 9_p9j+|:%ҁ.q@   9_B  9_qB 9_q   9nB 8HVqu`KP:B+\ X.pt`K+\ X =8PVބ: +Ku`M+5:B&ԁDXAԁ]GZVku`am .'lt'_FWf62a6Ywsa2~а# πOô6?l؜9as+lN~9akа9ak˜wa2~Oas暗+5//+lkV- ) aOZ(lAVjprAVV6_ ;az7B6@|/i75OWkVpvp\ a 5ô|mk^ayYmk^;ayY0׼m\zaym&Wk^V0׼^ayY+l^ s暗+lezi 0<^ƚ+5OWkVy3uMsWd c]ƺ+uMsa2wֆZô6ofaZs]k?5yk350׼]afk^ay]0׼\2wki?5 0׼X6wm*;]|.6wm*dlP̀]L]f!`sW!`sWἠY4u 8h}p 8u 8h}p]|B.qpW.\%2wrJ`sW.q4wh*dk`sW+B4wAB }0U.BW]>i*HsWF.#]Kd*\ sׅ2w.qp]>iP%2w`p]Kd*\ sW.p]Kd*t@sׅ2w.q}r5ZB*ZBPZBβ*4ZBγp֫3mk aWS{aW!9*Z : U7w; 9]v~rdW! *˼_.v~ίBΝ*ίBUȉ__.v~:t~ί }; 8,+v~.qX*份_K; }%pò`W$t~.qX*ί_hsW!T UK]U|Bx ]|ð6 yi]U7 .p2𳹫p]K]Kd*]U8U7 .o6w:4w:4w]l*th*BǁBǁBǁ%%%2w]l*\Ⰼl*䓞].? dsWegsׅ2wrgsWegsׅ%2w.qp]Kd*C]Kd:B>U8U8u]]]|hB 2w](sW#HsW!wm*tiPB>UUY]UDPB:l* e*%2w.q@   9밹qqqB   _8́)[X}VɌma)[؅r2c[X}V|]([X7 }VɌmaVor2c[X 9-plaKd ;BǁBǁBNfl PBǁBNfl +th PBNfl +th +ҶBǁBǁB*m P%rPi[X.-J 8VA e +\ [X 8VABh +\g8wBm(+t?DCم2ŅBC4GCY! }ݣ=.\6=m(+{?MϬ#?67~lz M/6͋.n>6r)r6o~lJǦlǦlJpؔߏMؔޏݰy)cSp?溞=cڶMMW31k~}5?{.gccyx>ƚcSj}lx| Ǧ6cS1.d:yOcuc]8>65>6]5>64fǦ{ǦkǦ[ǦKǦ;Ǧ+Fؼ1<dcAr65?olX|lW|5?osϫsϋsy cycAicycAicA6;>暟dsgY tk~v}5?;>6m^Xp}lZش1<dacy8>ƚcgn^n^fǦ=Ϗ^|2|leHM ǦMM{s;snM+ssnMۜsϛsnMa\\Xr{=ȪA- +_n z/׃ ^̀^2r{=8p> ׃ L^?[f׃|i%K8 .q^]ny%i%Kf׃|%K@t% [t%Fd Wz#^KAW^`Ap{=nŸOAp{=av{=av{=av{=av{=nF\0\0\0\0 g׃Kf׃Kf׃*^.q^Ő$r-e؃ܲ2AΣ {i)LZʰeCʰ!e؃LK gR=鴔ar>-e؃ʐ2pː2%jar-e؃Ùa؃{=I|brn$؃' w{ =|br$؃' =|b:.[=8' 'lqX{c\l' g=aY#OA_{pòK ؃'e@> ex ]{|bOA^{E仉|b(؃('  EQ>e(؃(' /=a=a2''e(؃Kf؃|7O,>仉|b:>n"؃|b:.Ft{&=8'؃|b.q0\Ⰼ{p+\ƍ=a7' +\ƍ=g'\Ⰼ+}br(؃Kq|b˸Q>8>8>8>8> +؃KfX |bV>t+}b:=8' _a+gkgك˿v6&^䛱Ld:0&f,ك/Ldr"Yl"{p l"{#E&D G(2=a6=a6DكLdr"كLd:0=8DV8t`"{qA&¿ùg.54\jh=9g.54 gك#~RCfYl?{p~TI~r889G%%,t`?{s$ gكgr$كgAϑ`?{qAUe?{qA9Vgl?{pl?{pl?{pl?{cU\0\0 gكKfكKfكKfكKfك~~V88wgA~!~=+&g?i7lҰia6|6`jx6wVa=#lfͩ:LasH͉ 604a ,lav oaO&*lR" sE暗,5//,lH" ck i8ln|k@k. {ڥ,lwӎqv" [" {cEvv?5OYͭa?Kk85eⰹsk0-Fqk^.ôApk^.y5650׼\daVlk^." sE暗,5/arͷ0׼\day-_FYk.y/l^ cEv\dayXt{S,;ba;g]{S0 l _S0a< ΰy0 :\Zs)kPay-6\sk?5oX!NB)VʷSB9 gX!NBN _ )V)V8q2v2 vkLblqS/v.qВ-.qВ9 %Z/\ X\AKK+ bK_AK)VJSb: }0)VN +HX!; ] t`S#b>g+HX9 8)VAN%r`SB9 8)V胑N%r.qSpbKPN%r.qS}b)VANB t:rlX!W",+±B-+± u paX!rmX!GrmX!W/,pYppòaX!:FrLjم6BJm#+6BΎl#+䖀mdFv)`Y!Um#+6BNm#+D6Bǁ6Bǁ6   9[e{6%mdK 9epòH`Y/P.qX l#+6BN 9b+b()Vȋb|7SE; yQSSE;.\fvhX!/v.qSp2dSpbKIbK+仉b)V8)Vw; :.S&v:t:t]+t+仉bbbbKqbKqbKp7)Va7)VgD;.\ƍveN%˸N +N%˸N qbK+\ X9 8)VȇV; 8)v b|hSqSqSB9 : : j؅r.Z9 b)V)Vga; ^:.Sv:trJb؅r.SS; 9%SB9 9%SpbK;NBǁNBǁNBNI+t+t+tPNBǁNBǁNBǁN +t~ _8Bg o=Z 8HVhKGPzBG+\ =Z.phKG+\ =څң.qphKG+`z%ң.q@   9زB  9زqB 9qs^   9B 8HV9hKGPzByG+\ =Z.phKG+\ =Z =phJV'hKG+\eI=Z! }=B QV8_Ueaq|a/,l~,a0-ڰ6P|a0-Ԇ͵6~\asu4fasՇEƚ,Kaڡ oa]if'ln\Ҙ1 sKc暗Qc67ayjXԘͽaq21PBԘ.qƬp4fJcVA%Ҙ`Bi 8HcV胑%Ҙ.qƬp4fK1P%Ҙ.qƬ}5fJcVAB Ԙ:QȡhU[VE 9k 9eQBjXVh][Vh_[Vȥ .\6,@+\lnXV9hhYVϖB[VihYVh$ .\v,@+䋬h#YVhEXV8PV8Pv9`Z@Z!]XVaY!pòE`Z!.qXV ,@+Ջ%>hzov] 8+dlYL8p5qV[gLg.G; bY!oviY!mY!ov.qXƏv.qX&v.qp2pg|T@: : bY@مrQŎBǁBǁ Ygg|Tqqqp2p2pg.H; 8,H; hم,ҎB>qVaEqvgEqVaEqv2pgK8+\ Y9 jY9㬐ov:t:t](Y@Y@Y!R8P_+YB9 ^: kYKمr׎BBB8P%rrbY!!v](Y!!v.qpgqV8qV8qVAggggqV8qV8qV8qvggg|B9 8qVz; 8qVABή8+\ مrrveY9 8qvgK8+\ Y9.pgK8+\ Y!gWv.qpgqV8qV8qVٕgqV8qVٕggqVQggqV8qV8qVQgqVggK8+\ Y!Gv.qpgqVA%r.qpgqVA 8+%r.q-p?Y 6?7Ӱb6?2ôU6wˇi4l:)ask67as62i6as6_$ 㰹;aOʿ0폆İ$l~3 0׼_I暗+5/Wk^_FW ) cS67Iƚey X|?as{ƺ+lc~v6uMWw)*d`W+2Bq`#e\JU胑2%q.qBɸ 8HUA2Bq](Wɸ }0RUA2%q.qpd\JUA2%qOBɸ 8HU>2%[@SW!'l6urlSW!7l*l٦BNm*|٦ WaSW!6urlSW!'6urlSW!7,lpٱpòfaSW!6u:4urcSW?[ 9cO 9c 9𷩫 .\6u֦Bl*ԦB*l*th*thpYq# .\l*\UaY 8,;6u^FSWe1(6[Ue\l-*-2B>Xu2kH˸ ^˸.\&qQ2B"-*-2B2B"-*\Ⰼ-*\L -*\ We iWɸ bׁq:qQ2Bǁ2 %*䣊e\e\e\.H˸ ʸ bW@W@W@WeiWeiWɸ.\fq.qXfq2 Ye\|p2Bɸ 9p2eiWɸ 8HUA2%q2%qHW!K-*t(*t(P2Bǁ2Bǁ2B>ZU8}l_ˇ W!u_ˇ W!ߌ*@ׅr|؎B t|rbׅr|.g_U_U_K*\ ׁt|:t|:t|rbW@W@W@ׅr|:t|:t|:t|](W@W@W!P%r|ߎ%r|.q㫐S/; 8u_zUA%r|](W9 8UA *\ W9 8Uȩ_K*\ ׁt|:t|:t|reׅr|:t|rlW@ׅr|rlW@W!v|:t|:t|rlׅr|.q㫐C`; 8u_UA%r|](W9 8UA%r|zJW9.cU: 8UA *\??Oa5lo8ô6?Ä͍Ef5ְ6=Ea v|O~ؼ6' ,l^?;Lajrͳy3lJ>L;a)+d`9X+rB`#`JV胑r%?[$+\ 9X 8HV胑r %+\ 9XF 8HVAr%.qB 8HVArB ](9X 'PVArob_˓cl: 9sj; laX!GvrhmX!v],k9Vu ; 9sk; 9s#l; 89vᲺaXe{αBβ+tΘ 9TYHVaZHvdZHVaZHv2pdK$$+\ !Y j!Y o::](!Y@!Y@!Y!R-$+t>?E[evTf'Y!2+\>> O*B>E[eVӄ* 2+SUf>M2+d* 2+\N 9ʬpTfJeVɌUfK2+\ فT:T:Tr2cY@Y@Y@مR:T:T:T](Y@Y@Y!V](Y 9 ʬpTfK2+*%R](Y!eV.qʬpTfJeVA*%R.qB 8HeVA*%Rr^fY 8Hev UfUfUfYevTfUf[eV8PevTf[eV8PeVUfUfUf[evTfK2+*%R](Y!)2+\ Y.ʬpTfK2+\ Y =ʬpTfJeV,UfK2+\ مR.qXTf:gx/K6wן_gGô;6Hﰹ/ii6l~ [ﰹaa]wtW0'Egas}dg暗,5/a%: sKt暗Qt67ayXͽa): >L_FYD=DQ): ô67ay!l>0!l!lnXm䰹}05i,+\AƲ%2.qpe>5i,PƲ%2ԤpeKd,+\ cY.peKd,+tAcم2.qMeKd,;Ʋi,PƲBl,+ƲBm,PƲBΞm,+ƲBΟm,pٺ{6r mcY!6rmcY!g6.qeƲ%eJXVa8j1etrdY!v](Y!gKvrjY!KvrdY!w :+ɮ } }묐& 9a묐3  .\ :+t:+Ů  8,kv.qX6 :+Ʈ%˺]gEu :+doY!v]K:+doY!vɷ묐w^ dY!vkY!kY!v]P:+׮% ծ :+\ S:+\ YejY dفt:t ȮBǁ :+]g]g]g.cU  dY@Y@Y@YejYejY?;uVauVg' 8,cU dYejمrrjYejمXծ%r.qp\gK:++]gK:;BuV8uV8uv\g]g]g|qgl/](ZJVn V"峕/B mZ 9B F"B|,B+\ څrz`Z 8Hv EhEhEhXV8PV8PV8PvDhEhEhEhJV8PV8PVEhJVA"BN,B+\ Z 9LpDhJVaEhK$B+\ څ.qpDhK$BP"%.qpDhYVA"%HZ@Z@Z!i](Z@Z!g:](Z!g:rlZ@Z@Z!g](Z 9[pDhJVU 8HVA" %B+\ Z 8HVA"BOk)B+\ څzZKZ 8HvDhK$B+t7_928;g =V=;up-[u p~w?ۅͳ#l~ej ;l~  /\i:l~}2lkW0]͏a5+leoa aޅ76?7v) /Wi:5E0׼wayI.5/]k^һ0׼wһ2ƚ.5O]|:Lһ0g.K`F$Fb wAwAz|_mgg)εU϶Ö϶϶϶g϶g1gۡgcgۦmOv2}vٶlN~|51϶gN\3=ٶ_8k~3=czy1ϼg^{Lbzmϸk1϶ k1϶϶g\5gSgCg3B{mckm8}=+=*mgsgcgSgCg?m8}W?_l{l&~5oYyo5C{yϼg3=l}5g^{h3=C{mϼg^{hm#w ϸl;l;ۿf~ퟲ|Cg1iqgigi>ێjW>j>L#vJvHvF3g1϶ϼ/V>czm/$5U>?Sk~Zqw n/>俿Zq'VӇۻ˟iCξZqyZVCοZqӄۻCڇ!V]ۻӄIVeOq[qy(V2 ==J@!!(sp{={0"{XVž{=F#rp=?{Xa=,o=Þ{Xa=F{a=?{Xa=,qsp89{Xa=,99{Xa=srp89A9ܢsiwj=䍍ZqygVCT[qyYVC^Wj5ZqyQVC^[j=եZqyyV2{+.X.Պ{Xơ۫j=,P.O7o =䙬BryC}"$ =Bry#CI($ =佄BrrC\{ NN!Z!,_!`VHar12nZ!Պ{ȭZqrVCnՊ{Xơܼ[q87q[q8+8q[qyV Zq=h=ɒZq=hVC,VCZqVCZq=h˅ZqyVCZqr!V2BVeV\\ȪCU+!ZՊ YIeʅZqj=,P.dՊ eV2{+!OaՊ eVCªފD+!OaՊ{q@+ފ{q@+8?XWW\WW2핹^{XC2?P <8We2H2{e!T{XaC;2^{Xa 2C*syC*s==82+s==82\p=82C;2+s8앹wPea2^{k8Uq+s2p=,Wq+s2^{Xa=,W{ea2^{Xa=5*s8앹e *s==82ҪC*sy+C*s2Ҫ=82Ҫ=82CJ2+s8앹Vea2a=,Wq+s*s8앹e2{ea2З=,W{e/Q{Xa=,W{ea2^{w":s"^з҈=|"^p=K=u}+xC'u} x u==5u}+x]G_?zw^c ~3wcvޙvα}lɎٱX^ۏMؾOtcJۍױ}:Gcؾu=Ǽv׼vc^JSnk^i1yƼ敶59/aE15w/NWÕle~rL*c?3Hel?Q9^8L͍)ca珩cJ͍)c^ek^Ucekމ`'; 'Ɖ gƉ tcD[D[p:E9w-l݂a-q?8]oAu-]oAn]~X\~ r[p߂u& Ϥ\~ r[p߂^߂eo2*8{0߂eT~ q(a.8,Pn\~ qP-3)8䙔oAoU~ L巠ǁ巠ǁ巋*=,=,=,],.y&[r[Cu-XA巋`r[G.],.y`[Cuo?^`r[o2*y~E߂eT~ `oAoAoU~ zX~ zX~ 巠=_Ir \']V߶ $A`Y JPP.ȓ|ȠE=ؾ<0pwLL}_1T7G-c#)T7yӜ1y߂= סoAD߂{A.F߂\ο_Kkp> r]:~ og X`}8M~S;ܪAnr~ zOp76)^.n]T/7|q/Ya/_l uwQ u,^x qP/XA uwQ `5^x2j8,E5e qP/')6.,^ORl8w z ρ I ȻX. Á / ρ / ρ /ȻX ρ`r@r@^Cv /y2ہ @^y@E\ǔc6}l?ۏpܱ}~L4y냆1yƼ;ߘ׼r~c^y+k[1yƸwǔgolz1yƸX8+L;cdl߶qV7)cel߱Sol6'cdlߝqV;?ؾO07oҏ鳏}>5o>׼>8=Ƽǘ׼Rc^JSok^1y׼Rc^J1y)75ߘ׼Rc^JSol{y+75ؾ5Ƹ;ؾW۷cNuœ1}_4ڌg6cchl?07ՌG5cI1}R4Yaol?i8+Sol?w\/U^quKq z~q\/ߡO \%_/c<<$r\/?zq*ȝAn;zq*}AB; 7+?A0d g8"F`? y `M`п" W$#AdD0_^TD0_8(",㠈EEe qPD0_^TD0XAE2",㠈`"28("xQ`Ee 8("##2dD0'FFs?C!A?2?C!A?2?դ˅A?2?䥼A^Q:?%A^S:?EA q(w^,8z#1?,PnO~Gy_pE ^& yipaWu^T0ț s0 p04ÅAa8\xV;\> ǁ ǁ o%. z. z.X. z. ~‹`rpaCv0c' r*Gr0W #j䑁U5 raGyraG9ٮ^,wyaG8+HW <2p0XơCjxQU`ra28jaaa'h=^T04W V V /j8j8j8jxQUàǁUàǁU` 2W eT5X^Ap0Xơaʮ^, j92W /j䁲2W /j,㠪aA jxQU`U <v0XAUÃy,aaEU V V <v0q`0c<>BKA[pI1X֭JnUR :%ŋ*)˺__g,V%`Y*)˺UI1,)^TI1XA%`sIJ2*)ybJUR %`eTR<Ȓbbb{.)=,)=,)=,)^TI1q`I1q`I1q`IJAKAKA^۸xbJAbxb6.)8]Rb.)8,㠒EeTR qPI1XA%ŋ*)8,㠒bJAb,㠒bJYR zXR zXR R%ŋ*)=,)ybEwI1q`I1K}wIJ2*)K2*)^TI1w\\R qPI1XA%ŋ*)8,㠒bJ2*)}ΒbJUR &%`eTRbJ2*)bKQfbf1?!fef1?!f/*(3A:bзA,3A`f1Qf2 fef1q`f2A_280mP,)b?@~6'ӷ"cq~x6'chl?)8Dcylc~bvL_)c~<0Goۏll?5!AȘ׼>Wqk^c7y+85x׼⍿x~ 05xO 8+7}[4Sql۷DchlqVo<`h`x~.pLc? y룗cek^_y+85x1ǼoWqly+85xؾWc^7y+85x1׼c^7_cqkc7;}<8+7gTc28Yaql?:C1 c ~4ӗScgbK)bpu;ȿ*rL1 sL1_ݾ/a]?S ú~XwQ1 mS m r'~<~:ȽAp Njz_+-AD9 78v19yQA>9㚣A? 0A? 0 Q`E#ehd2F(hEE#e Q`E#e qP42XAȋF8(,hdOƌF^T42XAȠ qP4 A?3808(S*U )EɠRY Eɋ-%~JeQ27\ )E oN] Eɠ7m,J^, (E`​˫.J8\ zƢdhd$F^T42hd%FyEhEE#1q42{BG#5q42{G#޾3݉-A z z 6ȠǁȠǁȋȠǁ EXn q(F8sG#>vb4 _ HFhd{$G#<2p4򢢑A OXn; 'ȋ O `rhdGF8OG#/*,hdA8yQ`E#<2p42q`42q`424G#F#/* AA z z zhdhdhdCy`򪅣2F^,Z8,P^p42eG#/W- `򪅣 @`򪅣 qP42XA O~hdA:,hAF#ۏ.8+c}6ZSsl+{}68+cӇ>ca~V1Yasl?8O|cblgc^瘾ח=c^cy+yLq1y1Ǽc^cy+9}c^cy+958׼c^cy+9oq1y1)9ؾs0yL}9ryQˠj2r#A>q9ryQWA>t9rc#A>x9rC#A?D0rC# !`E.ee"2\EE.e !`E.e qP2XAˋ\8(r,eԌ\^T2XAˠ qP #A?S3r80r80r#,# ˠa ӑˋߑˠa ߑˠa ԑˠa zGre;:F.e \^,8r,Pp2#A#Aw8ryQˠW#A^z8rU# Ñ  Ñ ? zoew \^,ݎ\f2q`2q`2[ G.F.F./oG.F.pbw2Xơ\;r,P ؉_<5Jy  rwG7?N@>@>ێ?ߣmc6yϼg^{tsln~5G7?ߣykn~v|5g\n~sϸl}q|5g\n~n|5g\r~m?c'Jm%ql~ ?I gmWٶ)l~9Bsl(m?v:A϶s϶϶s϶g۩g^HcwHy_!}59?߃c{3=lp59?߃mϼ k~r~59?߃c{mk~r~!俈2ncMׇۋ|v>p;{p{v>p; ˇ2-Ȥ@fpd>[2İ|rR 3G<Ó@C>@)P d>@fpd>8̇e@fpd>,2q@fpd>,2nߝ=,2{ a=|Xad@2{ a=Od@2{ |Xad"Od>8 p^?zC?dzC?g˵dzC^ݫOƨg>UdzCoQ K|3zz2&_`W=arzCoQꜗ i͇PZ35z Jk>䥝Қ=wJk>͝ҚyCd( Hk>mҚroCБ|q@Zi͇PZi͇5[i͇5Ci`VZar2F[i͇>BZҚyLC!(25Ai͇ܗ)JkC˔|c 5Ni`TZ!)|ȟ Jk/SZar2Si͇e2{Za=Ӛ8i͇eCS(Ӛ8i͇!Қ65[i͇e+%Jk>!Қ8i͇e2{Za=Jk2{Z!|XaOk"Jk>8 〴fpOk>8 〴C(+〴C(+〴fpOk>Қ=Hk>8 Ӛ8i͇eCޯ(Ӛ8i͇_QZa6{Z!W|XaOk>,㰧5qӚ8i͇nTZ35qӚy2{Z35Ii͇e2{Zsi͇5z|F5z|q@Zi|q@Zi͇5{Zi͇5Ii`yM[i͇eC^*,i+HJk>,P^VZ3{$5q(i+Þ i͇e2{Za=Ӛ8i͇e2{Z!U|XaOk>,㰧5|q@Zi͇|@i|q@Z!2PZi|ȷ |q@Z!2PZi͇5-5{Za=/(Þ i͇|Fi͇e2{Z35qӚ8i͇e2{Z/ؑ|XaOkC_#Þ|XaOk2{Za= 9Cߟr>'}C(gpr>'}C(C}"ܣ}C(C?(C_!/L ?|bD9|q@o|q@3G9Q·D9J Q`VQ·D9Gg;DZ0>[DZql?wc91}3:Gcrl?bOXcglOWÕql?x?b~2c^*vg*}Ϙ׼c^*vyͫyL1y;ǼUW?9qͳ95b~|rL1y;~r25b GAp:~|_! oT_Qp?> G ip?> Gլ r~ܣY. inV^T2ȭAnܬ ʋz*ASY mnVss2Ƚ{s2XA/C8e =O?ƙ<f!Nac) E¼f?ƙ  G0/ ȧ0|q 3'0|q 3'S S2JaTfR2Ja^T 3XA)`0~a O0e qP R2Ja8(,E0e qP 3|0/*,fLa8(y)̠ z z åNfOd^T'3qv2~g'3kCw2/rw2~g'3 sw2~g'3);A?8;攝̠7d8sw2/tw2e%;AoNcelߎѱlcel߉ѱ}:9b~>cͱ8>c}2Č1c^jjk^Ly+9517ǼW~slߪy+95ؾ-W~c^oy+9517׼c^o_c~skcll{1Af4ʋz>pOWJp?] oX/~OW ~OWѻBP(ܧ8@Nʋ PYq2Anwܳ8@N -Psq2ȽA^e^P8]`+tQ d2SkuʠƳNj)/Nx) zp?A)uʋ:A!u \ ' A>N/)/N#2S8NyQu`)eT uʋ?mT qP2XAuʋS8N,:eU qP2XAuʠwS^T2XAuʠwS8Nyu \ zX zX åy.4 9ʋJWteLWytrteLWyteLWe2x+~gbvv2m-ӕ2ʋ`rtes?x+^&LWytE+^&LWyteLNW^T2Sw+kr2ȓw+<{w2+<wbiu2=ӕAӕAӕA;]80]80]yܻ:]80]ӕӕ2`rteG]LWӕ r/te|NWtҕA9]^ Fte{A+<q2ȓB+/D+<q2ȟ&NW^T2X᧍ҕA8],teҕ2JW8(]yQ`+e hʋJW8(]шӕAӕAӕA:]80]yQ O z ztetetetE+++e 㥩teҕ2JW^,8],P^q2ȃs+/Wg `ӕ `+e qP2XA teҕAE;],tA+}3 Cc^kk^py+9514ǼʚWJsk^)1y4Ǽ3c^JiSJsk^)1y4Ǽ<ؾWJsk^)ͱ};kLiq3kP~4z>hdpWÌ;A>»3C;A>ƻ3yQ ݙ Yޝ wL^ԋ=A>л36֝ ݙ ޝ ݙ qЋ=ՙ ĝ`,{.Ş8' D As2_,TЅʋ*T ~O|sNvp? Ѕ~}Q'AhP Lʠǁʋ*T:󭣏}  r} rkE䓮A} i w} `sQ ;,A`s_ ƹ~| 8O rp\dp?A'9 \E ~sN rO]E4 qP.2/Te qP.r2E8(/Te qi\dr qP.2XA`"/*,\drA\drӥRr ŹȠǁȠǁ?\E=b$[Ao,ؒ zcd $9rKb/uK2[Aޙ%Ƃ-ɠlI`K2[[Ao,ؒ q(WnI^,nI8{S9-CAoN z 14YC z 14䁳CA^84yQ O š O ١ɠj ١ɋ M= MydE&OSrl-}3>8Gl[7fc>|l߆qضۏ벱@jl?۷Gca~5Egec^j[yͫm951-Ǽն)k^m1y-ǼնwBc^j[Srk^m1y-Ǽն<~05׼ږcAm95϶寝$SAw^ ;_ q rg|Ed䃮An a w `dsQ [;_,A/+`d8m/-A-eep?zGAqږ9=~䳖ۖu܏s\n[:zGA>umob-/mܿ 712j[8myQm`-eԶ 712j[8mܿ qPږ2j[8m,㠶E-eԶ qP2-?eږkTږٶ mˠǁmˠǁm?\l[=l[d2]ۖA:ض zeJn[^,wn[`2T-um&mˠwl[`bYu2]ۖ2zmˋmˠǁ ՜ zs|e˄ |e˄ ϔ &ʋWy|e)Wy|eW}V|eGW^T2q`2sf++/*_aAAբAA^8_y\0:_,P q(׌WLry[L-wږAn1ݶ m n[^T2H-bm䉋ۖն re'.n[yE-eԶ qP2-ۖ2j[8xeږ2j[8m,㠶E-eԶ qP2-/m,㠶e'.n[=l[=l[yeE-/f2eЫˠeЫˠ z1|yQˠ էex _z×2 _8 z >ˠǁˋ _}eex _^,_;|80|}6×_=-|ypc~;sӗ\c9~;Wc}l?:Oܱvcl߯Wc~{Ln|c~n5wTϬ#1yU1ǼU<*׼c^byͫyLU1yU1ǼUWXۏƸYgsl?:*>y ϱk~ox~57N>>q?zlm?v 6vvٶl;fl;l;ۿssg^{397#gp!{|ȧ>W|_>Xχ)yȧ`>rC> +|ȧa>q χU2[?8o~qyXa!dU|_>Q|a>ng{L^|?ng|S3?ng,,?n~=,?mïe2{3?q8χۇ_ ;+d;Ȑr{vF+*\?>9@fKfï_.q :>8X>8ͯYP>/AԠKj94 w}_:P>ԠKj5ҁA@ \FR>/A\{ ._A/A- [>e?t ߲g烮!?d\ gp>6r#l >8 g烎+.烎 [>8gp \ơ>ae(*灲>/W? \e|>ȣ >/W?d#Gl|_>F)gp>a>a>/W?\0?\0?\0?8ȑ%%%%A6rd 8ȑA?d#G?A5e|qAl|qA?A?\0?Ҝ.q.q gKfl/KfKfKfKfp.q-Kf煰>:>8:>8 ;ܲ>8 >>8r#烎:gKfKf.qr#KfKfKfK矲>a>a>a>a>ٕ.q gW>a>a^烎:>ȋϲ>8烎r+烎:>8烜]\ Gw>ٕ.qXr+Kw>Q.qX G>a>a>a>a>Q.q.q烎f烼 烎 o6烎f /.q.q[fKf烾oKf.q.q 烾og!P>躅AO¡}u 5胮[A$jߥn}ГpAA\vV>I8Ԡ.u;Aj}1j8j8,ϡ}qAO¡}q48A$jAjZjA$jZjAt6589.kͭNeczvvWl2_67/G.M첹|ey9E/s)z>5/ey9E?&ey9E/s)zk^NFes2<l}LNXt^6.d5Oes2<ln]ƚS2<_iͅescr^6]6. ]˘#:E?te͵eskvEvܟln]6w>s1Gt^暗S2׼S2׼lnH\暗S2׼_5/ey9E?&es2׼l=0:E/c)>'y4p4ÁA, PlhG G<[8zA- tlh߄FA- 8H8Q]4:P ;ohp.8с}µp4A/A?_EJs32fUAѪܛν hUR|p|py2@so>8<ZU{7{Aǁܛ:T(Ui LJK*=PJK* ΣJK* ίԂK*=[* .q4A* .q4AJ-8?PKd@ .q@PKd@ .q5 ʀ\ joPKd@ Am@ :4P58Ѐth@ mjq5i Aǁe@ rdjq58Ѐz %28Ȁ4e@ .q5i %28Ȁ\ jp pPKd@ .q5A ^68Ȁz ANl@ .q5AҀth@ :4yԠ@jq58Ѐz0 6P AǁԠ@jS/P\&؀\ jC`P\&؀a ʀa Am@ .qX&؀PKd@ .q5A 68Ȁ\ i@ :4Pa2Pajq@Pajq5; AǁԠ@jw"l@=PP"djp ʀ!PKd@ .q@PKd@ .q5APӀ\ 2=`5AP5APoBi@ :4}߄e@ ni@ zG2]4]4= @P[PӀtҀz AOi@ .u+jУ?PBjp %+kP AOi@ :4(jГpP Ah@=pyemjq5I8 .m@ :4PKԩGl]6/ЗCes}܁l\6w.mKe(油pjl]6?'j3.es|< \ң^暗1Q/sKzk^z\ң~Lz\ң^暗2׼?z^eyQ/cSzFGl\6w.cSzkzeyQ/cSz75O=ey*P/ ͅVǤ@lBln,\6. 1+T~L.[ ˘*P/kesk첹AqWln}L.cV@5/ey)P?&ey)P/sKzܑ5wy)P/sKzk^ ԏIzk^ \R^暗cR^6w .sKzk^ 2<? ojg^+P<Xzny5cA~@=P ϿVy5>+PM VgjGa+PK@ 8ljpn(j5A7Kt>Z\⠛FA Ԡu/A7~ܪ Aw~ܹΝ i?}p}pyl@us>8w<;ڏ;stG Νܹ:tG ?%8ȏ\ ?jpy\ ?jp [pʏ\o؂KG .q@QKG .q58a O:5A~-8a .q9/58ЏtG aiҏtG :(?jq7m?Iyꁒ]Fyje9es)1|lX^6.;pdl]暗2׼͍zk^N\r^暗cr^暗2׼dat^67.czkNd5O'esCᲹvkNXt^6w.czkNey:Y/czܖlJ\6>&_eesGⲹ!q܏9czw܋lnE\z܇]67.{r͟k͋es?czwsD_eyZ/s1Z/szk^neyZ/szevk^֏zk^\^暗c^6w'.szk^2<}?rS' ^es)<\-\ 2׼\ 2׼\½5/½5/eyp/s˅½l6^ƚ 2<]͍ƏɅ{k.es2<]½ln/^ƚ 2<]͝Xt^ƚ х{ܛln\67*?&e'es[油+sܔ9 czxܐl\х{܌^/^6w&/߬esSczxsDeyp/s˅1p/s˅{k^.eyp/s]j^.\r~L.\r^暗 2׼\ Esk^.\r^6wg~]avy 73]A \aq. vy @pS"AM=p["AS"AM Cݠ?=p["h jnZt.ZtE7~-A -ANl=Pݠ˄ GOE@Yt?آ gZt-AN"l=Pݠ@n[t-ʢLݠ@nqEemnqE7ȉ-.sh[tKQ-%4Ytvb@)vζb7ȶAVmnA~XX{Ava Š NV(nVvb7A .qb7A .qb7A)vb7A[ .qb7A)vl;Y{%Rvb78Pt dnqb@)vl;Yt :T(nqb78Pt=Pݠ@nqb7A%R8H{%R8Hd֊ .qb7ȡ%R(nC +vK .qb7A)v[{%Rb7ARdo܊ݠ@nqb@)vAǁ Zt rTdnqb@)vYt :T(np%R9*b@)vK rTdnp%R8H\ R8H\w%R8HH̊)vb7ȑ%R8HA*vAǁ 'V*vAǁV9!b78Pt :T*vY{Ê)v[{Ê GbV8,0=P 'V8,0 rBlnpÊ 'V8,0 rBlnpJ؊)vK Aǁݠ@n&=Pݠ@n& :T(n& :T=`b78Pt „J\ n .qb@)v?dnp%R(np%R8H\ nv*vK=PݠT8H\ R8H\ nM AǁݠPt' zhH]]=C@wxwӿtӿ{A ./nCCwRBnp%˳owA :(n3twA =pymnq7:.Ͼ :wKgKC{i8?t_67/G$͕w\xC~et䲹.ku ^X^6es]6.MRV^6es{< l ~Lney/sm|Ɨ65/ey?&ey/sm|k^nFes2<Ɨ6l~LnXt_6.65Oes_2<Ɨ6ln^ƚ2Pn Ovy8ȯIպ vIiqp8m!n];Pn Ov8j]pu%Z\gKt.AW<%qf,* cALÂR<; γ V&(erpYpYgg+,8΂,yvvfgAǁ<; γ@e򁚝QUpU8H\ eR&8H\ erpU8H\ erp~\ eR&8H\w-R&(erp%R&L>P)68OnKL AԬL:T&*XT&*"Cq298Pt O98}Aҧt>1}AҧBr)ݫO9S8Oeor)k'ܣ@lt& &ebAsFs399AMAw?hb>P& %618t&@sq9ȹMthbrxbsq@thb:41mb:419?ejnsp289amb9wfis61{Mds& Wlbe21ns,Kdb>P&Kdb&Kdb.q@Kdb.q9A&f|L%21Ͳ98thbesq@fthb:41(sq98thb>P&@sq9A&L%218|L%218Ҕ@KdbraspL&[db>P&Kdbn218dW&?Hs]uMAǁ&eb:41isq9!MAǁ&ebrdsq98|L%218&eb.q9!M%218\ spL\ sp-=@Kdbr$fspLH&Kdb MAǁ&@sbMAǁ&@sthb:41MAlb>py.bspLANmb>py.bs#1K"61(sbK"619!9ay.bsbK"619!9A&ebrBlspL%21&@sq9 61(sq9 619 61thb:41ya&eb.q9C618|LA9A&9A&Kdb.q9;M%21(svKdb.q@Kdb.q9&41&@sMhb:41ˀ@MsEj98PtizNM4jSti>P48HD婹5AOi:4jSti>P4j(R|ܚ@Msvj\[ti:48,Oͭi:w AO(x_<n Pt砇<l 9&x |O]`YA(lo_6O{.?צͭAes_6esﲹ1=3lln]67/ٗ1/l.^es+/ cJ_6/sK*}kRJLR\J_暗T2׼T2׼җ%5/Tc}kRXJ_67?&eyJ/;s}XJ_ƚTE}kRXJ_6w/cS*}kRpwI8}ln]6w.~1cNLh/~˘1 /}esqlz_67?71cN_暗p2׼p2׼ӗ%ln]暗p2׼ӗM\NL\N_暗p2׼pwk^\N_6~ӗ)l7̓MASlbSusP 6UyX:OT O6U8ap8Tݦ]=

PWKt0M%zA:18OZP9K.qA9K4.q:8:Aq|ώ@uq:Iuq:8q}r:Aǁu`}ȧ;o^ nQ}r%젿y)';o^ nQ7/AR}rARc 5Jcd[#X]&X(uЭj.jXc}4LrbuЭj:5%X:8PctrcXjXct>P 2Xj5.Sxk5AN%>p[c\Ⰼ㭱.qX&X@k=  VX{Zc5AXIbXٿ:OklXYc}4A~[cdi:AiB:AiK>PiK.qAiKaeX8Hcd@uq:Ȇ5Aǁaeuq:8Pc}4Aǁ@uq@i5Aǁi\X8Hc\ X8Hc\ u[k:A X8Hc}4A3.qA.q:A {X(up4Aƭ.qX:8Pct>P@uq:XjYct>P GEXj5Jc\ up4A>Piiup4%X8Hc\ X8Hc\ u#1k:A GbX8Hc}4AĬ.q:ARct:X9!:8Pct:X%: 5Aǁ@uq:8PcH$X8Hc$X9:ayKbX9!:ayKbubkK$X9!:ayKbubkK$X9!:Ai?Huq:8Pc k:8Pc k5Jc k5Aة:Xj0aX8Hc!kK>P Yc\ up4Jc\ up4%X8Hc[zNup4%X(up4%X}߄Rct 5Aǁ 5AǁXj5Aة>P@uvj5Jci(Rc\ⰼ&:"5Aǁ@uvj5Jc@uEj\^[ctzNkrk5Aǁ55%kr;:QA{qto't\:QAON7NuГS:H@uУq??9]$8gaeSMC_6M1l~˦/Gl/˦/oPٔ_6M_6&eS0.)ljC˦L)v_6uk~c2leScَ\g;/svf;/sv_暟ؿ5?۱M_ƚaeSَXc˦/cÎXcXc2<ؿljz2<ؿ5;/݇˦/:v_6O~MeS3M_ƌ}.M}_63;/z/rPeS?M _6LeS/)/c`eَ\َ\g;/5?۱k~c7\َ\g;/svf;/5?۱k~c5xؿ5;/ڂ}k`~vuy:Aݥ~'uy:Ag~:A~'y jI$i Oj?8MB$A^ ӄA^< }p>8M\0{\0{W%W%WiB٫٫4!|p^8^8^8o?aj?aj?8 ٫٫40p?aj_ ʫai«ȃ:j?8<8w?r! =!~Пn?krAs Zt;{ltП0r?OLAe~t #!GFl~GFq;8\0\0tkFt`~ AF9AFl~ t`~q;Le~qANBe.\\%-IO&|tAv$~?]I W$ܕtA~H Yng^tAv$~p,88.KfKfKfvpn?an?an? 88͒t;8K\0Kd7KHt ~,It ͒tAHtAHt Ht ~p,~Ksn?an?angKfKflJ.qr!Kfvpn?Y.q.qPn?an?an?an?an?޸.qB {n?8@vpn?8@X)rT$Y GEn?8@vpn?an?an?QYY /VJYYYY.q.qr%vYY 'n?an?ȩ.q.qBr,:n?8@\^H n?8@:n"c?.u<Ad~p幈َ c?\Dv9n"c?i./Ed~Zٱ\<ANke~pl~plǾvرt`~dvرٱt`vyyAvر$vرt`~dv8vyGv8vl~wyd~pl~plv8v8v8v8v= %;ol~C`~pl~Гpر\0۱\0۱ر/Av}1vرt~зF~Y:Aɡ~Y:Ag=&:;8tV~crtV=&:%+:A~p`\=:A:Aɡ~q:A.ƥ~q:Aɡ.ƥ~q:A8,ƥ~p`E=VAH0r??>tF=V;Aϑ`~cUtF=VA2Aϑ`~pbty_67/ߜvesy,ul]673/[\]6겹yտln_6Gczzes?9Ma\ޗͽ\ry_暗K}k^.ey?&ey/s}k^.Fes2<]ޗlL.Xty_6'/5Oes2<]ޗln_ƚ2<]ޗFesysln}^6w>/c^^6w=/1cty_6wP. vy8rfp.g8.]P3Kt93ȳ]%\˙AlS OGy><8O>H xg$[+8O<'~,a a ?l?PC< C lC5 :1LpJZP78O%T2ACY&ey/ӃX_ƚ\yppkrX_6 .cS.~krq[(l^67k//O͍>es2esC{Ges{cz!|ܜlU\6.s1$x2׼㗹%㗹%5/es2׼㗹%l,]暗xc_ey/sK<1/%5/escQ<~kesOYJiR >y<3A,%?P6<[J1R B()yKɃ<^K<KɃKt4ALK$%sR2=PR $8ipn[%e\[A-%?PLKt4s%e\[%eha̮5S dvy$<8O>Hy2΃/8O}Ieeeyr&1eKK~ӽ5/1$f5/1ey/sK(flk\ƚ2<#I~kbe42<엱)fld\ƚ2<3X_ƚy|(flz]6w/Ǘ esm|G14e|1]1=Jln_6N.;\ec_Q~k^b\Lb\_暗{k^b\_67.sK1/sC2׼엹%f \_暗(f5O1es{k ,m\di{'#Kۃ=ӑiyD=CJ䷁A~X7J'%\%`i{pn(i{KۃKt#6A7bԍ݈ .qЍ 떶%݈ .qЍ݈ .q%Խyij@M2cuA{RaϺ< |ֽ({p}>ރ>Q~yzAǁ< >P@{p}Qpq@M}Y\}ptJ\ {pty\ {pty\ ҽ8H\ {ptJ\ot%ҽJ\ {]`ҽ8HAރZt{:Խ4{:ԽE@{q=8P~. ރuAǁuQ(AKA]V?p.TݫJ?%U.wwWC~J?SPo~?=ʳƞg? =Aw??P]`9g@ynҳӞyg?Aynuѳt:9Դg@y=AN6:(~M{=Aǁ,zҳrŞV=%ŖO^%A٫?֥%A٫? ?N-?P ;qj KK?ȏSKl]Z\ 8H\ KK$.q?A%.q?AI?[$e` 8Hd@ q?֥%Aǁ%ui q?8P$Aǁ@ q@I%AǁI\8H\ 8H\ |K?A4%.q@I[\ p$%8H\ p$%U?ARdW@ q@I%Aǁ [J2Yt(?P LJ%J\ p$A^@IK$B%%8H\ p$J\ p$A,?P[y$f p$JHIK$ %Aǁ@ bK%Aǁ@ )K[t(:(~Q\Y4*R !J?UCV9J?/FZp* V8,r8j~pp*TUAJ@UAJ?8PTAJ?8Pl*@~qJ?ȻV(~pTA^J?A*U%R8HT%R8H\ ~pTAϖ.qJ@-S\ ~pTJ\ ~pTA_JTjA~qJ?8PT*D~c8J? Qtz*MR$JSt?P*T -?pyo~p*1UAǁ*TJ?4UAǁ*pT<ƷJ?8PT*VU%c|KV2QM lJSlJ2QJS̕8,ok-zD 8H\ p$J\_ ~ꃆve_cz{?#y~A#?Pn| 8_',iIu| 8_':t Nt#^\~}9EA?!2/rrEAEAEA?}99"}99"}9="EAǁEjCq" xCq"!^t!19.r:\u/r8PK!1^\EjCc!AK@-r.q"-r.q"-rz|E%Zp9=>"[!198hCp9}>EAǁMEAǁ4-AO!qCc8ny8P[N<2nyz*-jC)㖇tʸ@myz*-%e[\ \h!a2xCqt@myz*-Aǁ[qÁ[<=文-<ny:\h!a2x!!<csÁ?>ny文p[!!<}[sC3Wny>ny8pye-%+koyz-%+koyz-%+koyzW+ .ї3??? eD2esy1l/ey sǴe>ӗ\Zqk^+ ./ \Zqk̗cZqk^+ .skeya\qy=D(8P!nz "e8P!|z] .q3!Kt}@%\np8n|8PwKt}7ȳC8np8npC|!ݠ]oAVu"8sTq։Aǁ['}݇ .q։9:\P%t_['}݇ ɭ%:qN։m.q։u"A['Ku"8[MAW<7]\s󦋠+.xnWMAW<7]ݫO?"f 7]MA@"f 7] 䦋 tt3..0n8P.nrEp)0m7]MAƹ"[@m.qЦmMAǁ.7]y7.ԦMA^𦋠MjE"8pEqঋ;att"7] r7]8,ü"ȟ,lzEq/'2 ^fOeAv?@-rTeAv?"޸YeAv?"޸Y8hŁZf\e%Zfd3,K"A,K@-.q2-.q2"A,Kmq8pY8hE/:\ft"fY.8P,lzEq2ejEq2eAǁ,2eAǁ,K"ǥe%Zf\ejEpY8hEq/8P,K"޸Y8hŁZfdo,K"A,K"-.q2-qe%ZfA.qeAǁ,YeAǁ,Y9*2eA^2ejEs$/:\ft@-.q2- Ye%Zfx/.q2-.q2-8P,K"A,zyŁZf\eAN"A,2 ^^f\e%ZfA.:\ft"!Y.:\ft彠Y92ej_ESL8py}Am"˃78P*bz_Ep=oW***bz_Ł˃7r}%8,޼"ay}%WA:Wt"ȡU}Aǁ*{_Eqྊ"ȡUzB}Aǁ*U9"A*}%WqUy*K"A*Ծ.qо.qо'W\}j_Eb.q8P*K"A*0}侊}A_ྊ}Aǁ*ԾWtʸ"IU}A*NU=x澊"q_EЃg:eWqU=x澊E}.B.qXW\"8p_EЃg:WqU=x澊},08p:x_EqྊWq"t𾊠}Aǁ*KU8,BxJ2L\ft%pŁZf?[.":Y<2+,2qEW":Y=s2b2^f\<2L\f\<2L\fuGIrHE4nI$3jceVInCyy1Fdblߥ6-߱fl7|1;C_c~H3ь_)f153c0-<~435Ř׼bccYy+f15Ř׼b)f15Ř׼bc^Ycc)Gf,˽U$,et7XA{ro.8roY`] !5`] q`] qPA,,8˽A?࿺܏y v^#?AFp?~~y v^#?AA5m~#_ĝWq56FFFp??A1=km~eqP^2k8(eqP^#8(qPy`5eqP^2k8(ܯ+)qP^#_I WRetCA5fA#q`A#q`A#s{4,㠂FX\qPA A^cqA#XA`4,㠂F 2*h8!0 2*hTA#C`4eTqSYqPA#XAD A A߉`A#q`A#q`A A-a UzXzXl*h=e,h}̂FSƂA4>[fA#X .h,g 2lpA#Xơ .h=,h}̂FA4>[fA#q`A#44ǁϖY8X444eFC;\xϑXXW _ٲJ`A#s4]z%9 A2`A#s4>VeA#/F4]q(g9 2l4>GbA#XơFH6(9'ڶv>c}mP7_y^~m?_}m;_}mjymfym~m;ym;ymVm)m;m:m_(m'c{zL5=xm;y׼k^ʚy׼k^{zm7q#<m'7c{z5y7^۶_m^Gz5y7^k^Gz5y7^ێj!ڶgڶ1ڶ/vJ3'4^۶^_۶_v_GBclm7m35y$4^v׋__Ne^^v_ێd^Ndk^{B5=y ׼kk^{B5=ڶy ׼k^ʚmy ׼kN1$4^GBmm5y$45KpLyHy|P^#N_׸5.#5ԋ|*P^" ׸'5{^"׸5.Xy׸Xak\,Xa{Þ׸g5.qoE2ߋe2ߋe젼Fp{~" ׸Xa{~b2{^cy5.z׸ 5ߋ祑OAGprq;'%=.Y@Iy <?!>.z.E#>.N.E㢷Kg>.z^}Eoqۥ}\v)zqXa}\,_IqXa} qEzy%Cq@"eqGp}\ >.zq@#XqEJ,XơSbr-OȝB>.r\ȝB>.#S"F{"Fȍs>.q{qXa}\Nz8콏e2{#>.q{8콏eGp}\,>.q{5GoqȝB>q@"w q@#>.rP>.z>.zq@#>.zq@bqXa}\,>{bqȍs>{bq}\,>{"7Xa}\,>.q{8콏8W#>.q{2{crW>{>.I$=.HI^Hz\!Qs$U;.EW;.EW;.򪿪q,qWUX^X^XV^XV^ՎeՎG"ϮTՎejE]qß68ՎEb=}Z02{b?ae02{// 10E}ya9.zq_hOz\1˿О2$={IO$=.ID㢏D#'=.Ie>٠G Jz\,P%=.q(g=Hz\1=HzE#q〤E!,g=Hz\1w6(q〤E8AIe^ Hz\!}rE$=.I^ Hz\:~C&$=."E"q〤G2l%=. IeJz\!8C&$=.q?K>'{c|l?ۏc^~r^ۿw}|l?4όۿphlۿlScl?*Wck^1>ƼWck^Wck^1y>~Thk1y>gclςƸgcl?gckUqV m1uB=}|l!78+<}o|Nؾ^K۷C}ml߂O W'dk^1yuBS'dk^1yuB=1yuBƼ Ƽ yL1yuBƼ W'1uB=1yuBƼ ۷1yvB}gyk7qͳc۔;!A>CrPaʝ $ *wB>lOc,㠋 'wBet9XA"r."8"rE`]D`]DqE`]DqP'AvBvBvB|p'."=PNH."Ot9qGd$X5 U!ܯIO.+jӋ&Lp!&U5 7d c\5 AU!8j8jE ǁU~E= ~E8j,㠪AUMeT5 qP$ߋqP$XAU~/"XAU8j,㠪I&U5 qP$XAU EjrPU`UM5~M(XAd$.0VMVMVM4Y5 zX5 zX59[sA&A&A&uk.q`$q`$q`n8\&Aアj,[s25V4&uk.XAފf$XAUͪI&2T$XAU`UMeT5 qP$j,kA.kAqPwyXz7=Yٵ7=Yzӓ}GЛ}A>eTqP#XA}Gj2}Pv#ȏ̵ w\#s#->kAn䆲kUr 7]qPj2}8k2}8,A>eTqP#XA}8,G[}o>2p#q`#q`#4>>kAkAkUzXzXzX8GGGj_^}8,A>eTqP# e>,Gʮ}8qP4UqP#XA`>eTrCٵ}8k2}c"XAu`!,:DAv AAv888qPuY!!>_aA'A'A7qPuuYz>Y8:DǍC|w]8X}w"Xơ:DCyuǁuYzX8:DǍC=C}:AA7q:D:D:DCyu`GY&%X#鈠ᘎz0<A/#Ug:`y 鈠 A214A3+AKNG8``A21,P^u:"C&#eKؕ?}}l???ԕbl(ObleϜSWblRۿ il?mۿSWbl6Ohl&zL]ik^]1yu%ԕWWbk^]1yu%SWbk^]1yu%ƼՕ1v%1yv%ƸٕۏSWbk]}|l?EgWbk]ik]1y!1y!}|l?6zL}}nl$-1y!cؾ;>5>ؾ^d7c}ml߄ψ#ԇWbk^}1y!Sbk^}1y!]1y!ƼՇwƼՇxL}1y!ƼՇW1!]1y!ƼՇ7}1y!}oyk}<c\=Dj9:u6߷)щ rtnDNAE'|pt"gG'|vt⠢2N8~s78(:2|Pщ #st"XAet78~s7@Aoq Jq`oq`E'dt"q`t"q`t"ȇG'~sDO*N=NTt"_zo׿&t}u"#_`WX&2Pp䣛A>9Ea`yǁY8X^ow#q`#q`#Xơ *GY UV.>db"#1.^ ]gE+w8X^eu"/F.>db"b.Ug"b..WYq(:t!C2UV.>db"XơEЇL ]8WYk?g 1`cf~&8Zc5V1J9ؾ?H= 5acY~5Y= U5CPc5*c Ԙ׼*c^`O15 Ƙ׼*c^`< Ƙ׼*c^`yͫc`Mc\`qͳ13= <+c~45 <+?Goq{c^cz ul?XgclFw3Tw}}l?g1w:[c\`;яӱ}|l?C۷M1yk*c^`yͫ15 c`yͫ15 ؾ<5 Ƙ׼*cۘ׼*15 Ƙ׼*c^`< ؾ<5yU0}Xgcl\ǸYxL/qͳ15 ׼ ABF.d A.BFO .d BA] 1ą D\Qą*d4BF#.dBA2eTq`]qP!#ȇ2et1 ABF.F8bA]q`]aŅ,㠋A>,㠋2,㠋2*d#3e[bgy|FCyK_zz,-13#q`>#q`>#ȳH33>b>#q`>#q`>#ȳH3*,|FGg8(qP qP>#XAg8(,|F2g}|F``3e8|F2g}|ƃg=g}|F|F|F|A3*1o|FgT>#e>#e>#c83*21A/`{g8Нq(;801Aǁ8XCw>#q`>#c83Нzzq(;qsQVByO^ gT>#?[3^ gq|ΩA129A29A280qs|FCyϑq(:92S3>Gb>#Xơs|xpoD5Oslߩrl$15#S?ı3Ӫ秶ؾ5?TTc7cjkcc~D5a1Oyͫ15~2c^jkyͫ15cjkyͫ15Ƙ׼?ƶ~55϶<c!cjkqͳ1️Oc\lk؟ؾ>=qͳ1o?7BS1y5=}}l?pzLm}nl^w1y5;cؾ>5϶ؾM׻cؾC=5>S[ck^m1y5ƼxLm1y5Ƽ۷ǼW[cl~W[15ƼW[ck^m۷Ǽ5ؾ<>c\lk;c<1y5ƸzsnA>oinlFO'n|A] ݍ R1ݍnF*nAu7eqЍ`ݨqPw#w7etA&FnT8FAݨqЍ`ݨAݍQ,A>͸,2Q,2n<'_FunT?A> w#_8;VRIn}pGpT [A>X;VA>qP~*߱ I׭ u8;VA[A[fAݱ zW ͚ǁcܯ52j}8qP`>e7keqP#߬ qPZ2j}8,A>eqP#߬ 7k,Gph/8l}yAíǁǁ/.M>>>eGGAݻ zzz8{2w,G;l}8e]}@Aݻ qнˠqн˃j}}@GZ2j}T#XA`>eqP#?8mǃl}#ǃϭw2rP!7 z3!79HЛ HЛ HЛ 7A@ d$q@e9HB 2 8(rP!`@eqPA@@@g`Hۥ = = y-!ǁ!ǁ!C AC A0r\u$Xơu$Xơ\u$ȏ!  r! ?2@s$ȏ! 09H>H;T$= @v$XA! 8(,H[z8(,HB  qP$XA!`@*,HB Aa8rP!`@p$q`$q`$-=@@*䖞C AC AC  z z z9HHHB _^ 8(,A@e qP$n@*,H;8(rP! w qP$XA!`@e r! 8(䁏C 2 <H>= = T$q`#cG9^Ԍryl(GЋQryl(GЋQ 78(G,jE9ܬw#:8(Gr^2r<(G(G(GgWr=r=r=r,o9qF8i` 8i0 iE05`Z# 5 FNky"2E05exiǁiǁi O8X^sZ#=555*bZ#XAi`5*,㠴FA2 AA28080808qPݍOu7gw㠺AO6AO6AzqPݍ'ݍOz8FЧ^ndw8X ww#XơFCy'ݍǁݍOz8FЧ^n=nNA\n,88,P +Li8fZ#?>5ziO5cZ# 5>dZ#o5>dZ#q`Z`yi`A1,P^tZ# 5e+Nk}ĴFCyiJk8ϟ%Qc9)?~5c'<c~c;Vݍkl?ۿSwclnl{Lݍjk^ݍ1yu7cWwck^ݍ1yu7Swck^ݍ1yu7Ƽ1v7c1yv7ƸOSwckݍ};~l?g[clmϞƸwKc5c&ؾ>8=ؾ 7﯏c5ʱ}g}lXf[clߊ~L/Tyuʱ}ml߅׫15Ƙ׼c^jk<Ƙ׼c^jkc^jkyͫ1oyͫc^jkyͫ15cjkc^jkyؾclkqͳ1.c\lkA>¸qPM b9MOQM`59eqU`59|q#XAWonr8*vbU`]qU 82S2,㠫2,&ǃlr=lr=lrP&A]zMǁMjrAAA>$8䃒u;XAW|ZzKqUՅ S 7`|Rq$$gGBBX+HHpo`$$XA7H*#!28(rP`EBe qP$$XAO,HH2EHHH{5,uɂH[,uɂAD޺dA$KD޺dA$KD޺dA$|D % "A ,㠂ADeT qPA$XA*8 ,߲ʂHHH;,,[] z/ǁǁ /? 8 8 r\uuA$q`A$D .8;.8k. ~ "An GH}.  "U 4 78\ r*H.8 rP`DeT r`DeT qPA "2*8 ,㠂ADeT qPA$ D ,㠂H.=,=,HADsA$q`A$q`A "A "A "A "U zX zX qPA$KS`DeT9H "2*ADeT r`D  "2*8 ,㠂H "An rP`DvA$XAY 4ǁǁyX|5kU|5kAnqP5`Y`yqGoqP5 7]<?ɍs<<<DOf#?x;zuK;c#;>Qd#;;;-q([:Y^t#Xơn`GCy`򺥃2uK;*,㰾nzv\[>k6d9_[>k˧[זkG|m-er[ז?5qm9źb][bk%wm9_][-_p5qm95;yͯk˱Ul v\_׼`55[׼`55qk~ vk15=qk~v\[Nbk츶l_[b=^[-;זsk\{ڲ [wI]cז k4*<-[tזkeo8^[vݯ-׸ǵe:xk~}ڲ'wm١5[c׼ט55qk~y֘55qk~y\[yͯ1k^kڲvk~y֘55qk~y\_c5qmق5>o}}|g>>o~}|>>B׋LJ|A5!~?}|ȧ~>^\CqXCqXCqXpXa }|ȇ!>>,^~q }|z2+qXpXa!xqa |BB28W?,^={CL}^㰇>>SBzNjkCLJ=a}|(>>8졏Nj8W?BqXp^Sy2=w>%/͒K@ ͒Hfɋ띸Tfɇ~.ۛ%.>\.fɇܐ@N܇˝)fɋkN܇˝3fɇYz'C,7K>\Iމ7K>\|\7K^\%.B>\I}Xam|Xam6K>,6K>,6K>\I}Xam|Xam|\,yqm|Xam|Xam|Xam6K>,6K>,6K>\I}\zqm|Xam|p5ampo| 5h|q؛%zfɇ4fɇYao^7K>8͒={Yao|q؛%/h?,^,л{2%8h?,yqDa>|ٛ%qX/Ѿ6K>,,,yqm|Xam|Xam|Xam|Xam|×͒8-6^%zShɇޓڃ&zv|==hɋkCoAEM>4[{CoA}bM^\&zv|=ha5h4444yq |Xa |Xa |=ha|q؃&bX."h7Zɇ=ha|=ha|q؃&/|q؃&aX."harA8hX.Y!, %/͒߇fɇT,h6K>i%r͒fɋkCnpY!,Ah6K>,6K>,6K>~%qX%qX%qX%/͒8͒8͒8͒fɇefɇefɇ@ŵYaY!78,7K>8͒߇fɇY,}h|q؛%zfɋkC,7K>8͒fɇYao|Xam|Ksm|Xam|Xam6K>,6K>,6K>7%/͒8͒fɇefɋkCnYaYaYaYaY!,yqm|Xam|o4K>,6K{CYao|q؛%/yJȇ^&{yŵ7#G^\#r #^㰗G>8yȇ,c_LC\C#.!yȇeʛi|Xaȇ=.a|ȃ-E^,o!.!E>8qyȇ=.Pȇ=.a|ȃ-E^,o!.af"q(o!.!yCy3 q}E>,E>,E>,E>,E>yq|#=.a5.a5.'{\$E>8q}E>8q={\CE^\"4h|XSָȇ"/q={\COЧA{\5.'{|Ӡ=.'{E>iLvyqˋԈ|XơHȇeʋԈ|q"4h|q"/q}E>8qE^,/R#.a|Ӡ=.byq={\CCy/qˇ]ދ|᣿ؿ{|?=.?=.7ȋHE>wg:{\Cq}E>8q={\^$"q(E".tȇe{|3=.a^$"Lg|Xơȋk\2q,`lOE0} <~5c?c~#c'?c~5p߁<2[c7=~55Ș׼#cc*yͫ<25Ș׼#<25Ș׼#c^*j-Vydk#c\UۏSydk呱}185ؾ19oӏVc\,[ıjk呱}w~lߜۏSydl߿7}}[~k*ؾ%?ȏqͳ<2a?Ǽ ؾa7o_yTWydk^1yGSydk^1yG1yGƼUƼUyL1yGƼUWy1G1yGƼU۷7#c\,c<#1yGƸYgydlߋgydkU #t$ WIU \% iU \%9*IDP*IET$'#WI|6r$WIJ,*I$2,*IJ8AUIU`q,㠻2䃓$u;XAw|zr$XAwet;XAweT%yUǁUǁU \%9A$A>QJ8JrPU t$q`$q`$ȇ+WIVI|r~8wOY8AQU`qoqYB %*KA> #K(u/ȧDP|Nt %X 7d % P~/MP/ |A8rPPP8/;]AK(UB X`PeTB9JJ(2*;]2*8t8rP%`PeTB qP J(2*8t;]UB qP %;].8,y%ǁ%ǁ%/.MPPPoJJA zXB zXB zXB92,J;,8on}A qЍߠ|XB qЍ߃*}JJ(2*T %XA%`PeTB qP %PeTByoP?<=(~lƆA5TPfC%46TAcC*A2*Ao(2*Ao( *P zC `5TeP9J*2j8rP `5Te e7TdC%q`C%q`C%6TnEˆJJJ+l=l=l,P zP Ă *2*2*5C7T)tC%O jSJ*AnrP ϑP rk 7P9J[#nqJHnTC%XA `5T)tC%XA `5TeP9J*2j8rP `5TeP rk j8ֈ*A*A*A8rP w P zP zP9JJJA5T6T6TeP TC%XA `5T,㠆J*AnrP `5T8wC%XA jqJ*2j8,㠆JnTC%XA 7P qPCA6Td %Xơ J!,T %XA%BXB qP J(A,JJ(UB qP %XA%YBy%ǁ%YB zXB zXB zXB zXB9Jg:,J(AobrP%'%'%tXB9JГJg:,=,T %3Peˋ.,/R,P^v %XơHJJg:,=,T %3PP>a `y%ǁ%tXB9X^v %q` %q` `yѱ`<˫˫>]^}|Iy2v>5c'˫I6}FI6==,>:v,P^}t$cNeʫ}lICyѱm; q(>:vrP`c'?O1P52o{cؾ<Sel?ؾ41o\)2}Q{c ~5?>8ng[cW׼B(c֘׼B(c^ GZ)25ʘ׼B(c^ <ʘ׼B(c^ y+cׯoPƸ+25[!1PƸByLWc\ c.~f55ؾC.ۏƸB71P}_~lߖwǸByL;cTV *UP qPA%XA`qPA%',Tetg *AJ8AqН`3,;A>v,;23,;2*<ȂJJJOa.ԝǁ \P zXP9J88S *A *AʂAqН \P qН33 *23,㠂AqН`q`{%QWHLJOnԕ L^ ~G*_| d^ W e^9Jp2ܯ 9JA] z^ z^ W`JpSܯ=lT{%ߩ Ŵe^ qP{+2j8ܯ8,Jpb,AWe^ qP{%XA핃j8,JpbܯT{%XA , "WWW4^ z^ z^9 A+A+A+u9q`{%q`{%q`{. 8r+A, 2Q+u9XA>*b{%XAQ+2j8rP`We^ qP{%XA할^ qP{j7[할ǁ할^ z할^ z할^9J{olJ[lJ[l}2AWފf{%XA`W,J+2jT{%XA`r)^ z^ z^ d핃v+AoѲ88 +A+A+]iWW>`{`1JC4JC7r w ^ Sq{+AJ[#nT{%s$Wq{%ȍsWֈ+An9+^ qP{%XA w ^ qP{%XA`W,J+2jT{%XA`Wq{+2j5JJJ;n=lT{%ȝBWWW888rP할ǁ할ǁ`W4^ qP{%XA핃j8,JnT{%XA 7^ qP{+An,J+2j8ƹ+^ qP{%ȍsWe^y ϑ^ zI Sa$")Ag8HJʎT$%X>ERPv$%7_I9HJʎ#)A9 #)AHJHJ+===L˿R'An;u^Nv$e$e`ymͩ : -9uS'A9urICymͩ`Nd$q`$q`$ N֜: 4ȩǁǁ O: z: ܁ǁǁ O:9X^[s$XơICymͩ G: q(9u1S'25N>w`R'2J}IR': ܁`Ne:9IR'2J} ɃL=L} IIIIGCP$")A0,?ERmf$")A'0w#)A0rPI I9HJЇ/wByّgGReώ87I zI ǁ}HJHJЇ/,o<;80 #)ώ==+(:a#)A?]Q|a?EGR0HJԌ,(:#)A0a#)A08080rHJCyGёmI q((: #)2EGR>a$%XơHAERe;|VWC1*cؾ?=ؾ7c^R<*c\ ۏu1y 8;c^z1TƼPW@ek^PW@ek^}+{k^1yTm1yTS@ek^1yTƼPyL}zk^1yT]c@ek}zl߰g@1f<5π<*c\ c\ q32*AJ]A]"q W q JOaAU|s\%ȇ1U܂p\*28(,[2JnT\%_GW q-`R?[2,[A>9rPԃetK=G2UetK=XAԃetK=XAqW zW zW q80c*A*W VUUU|bs\%q`\%`W9[2䣛*2~Pԃ|~s\%XAԃeW9[2,㠸Jr-e )Yʲ[YA>:䳮,uI1]gY|u%_R Y wze % w{e9,Kp/,K,A]R ze ze b,K_/0]R ze9,Kp/8(,,AeYee qP%_j qP%XAY~-XAYʲ8(,,K,e qP%XAY~-_j;,K<yY`]y~Y o198080ť,K,K,A]yze ze ze9+A,A,A,u9XAWee zWY`]yqЕe9+2!,2|PYe qP%XAYʲ8(,,K,2ʲ},K,V%]?<8q`%n6[c%n6[c%-=6[-Ao7-Ap7-ArP͖wl qP%XA͖j8,fK-l q(l qPA6[6[6[>a`fKl=l=l}fKfKfr=͖ǁ͖3l9X.i,Pi,Pj;N,An;Y w e ,K[#βT%s$gYq%ȍsgY*ֈ,An;9,e qP%XAY w e qP%XAY`eY*,,K,2ʲT%XAY`eYq堲,2ʲ5,K,K,K_.MfYfY*N,A,A,e ze ze ze9,K,K,K,_^ʲ8(,,AeYee qP%ȝBgY*,,Kβ8(rPY 7Νe qP%XAY`eYee rYʲ8(ƹ,2ʲ<,KHβ=T\%C U2ί*W *A:/T\%ȝ_U|q Wyq |q\%3:U<q\%q`\%q`\`y3 O !')O r+4SOS܊v?%X泼~J[y2~f)24SeOyǁǁ OfO9XLs?%ȓSSS `?%XA`S,~J)A )A)A8888rP`!) )CP?%maSw)A') )O ;OIO ;}J~J|'WO9X^Mv?%Xơ~JCy5ǁOIO zO9~JЧ$=}J~j)A)Ar~J~J~JY^&|LxOwyß2)_TO d?%hS O OfO ԋOfO zO zO9X^&t?%XơL~J'38 O d`2)A̰,P^&t?)2eBS~>SS?el_#c~v7/}}1SS}mKcl_c~@)c~0Gccc꧌ߎc~*65yͫ2yͫ25~~)c^꧌yͫ25~c꧌yͫ25~ʘ׼)?[1꧌qV?el?zLV<-ϱ`l?<2v{cI}17qO۷1S}l8<ƸOg?el~Lyż}/olKy25~ʘ׼)c^<~ʘ׼)c^꧌[c^꧌yͫ2oۍyͫ)c^꧌yͫ25~c꧌[c^꧌yͫ2Z)c\짌;c<)}1ySƸOg?elߦg?ekO LS|r?%'/Sp_~J`T?%g0S| s?%0S䣘)A>)O qP?%XA`RqP?%ȧ/SetK)A:~Jn8ARq-`Ri,[A>,[2,[2<~J~J~JOh-ǁ O zO9~J88)A)A~ARq- O q-)2,~ARq-`S|s?%XAWw|vv%ȧggYe e YS,A>G;>(,KHβcHβT%} wfYfYc,K,Kp+wPwfY~W.q`堲,rPp+,,K,e qP%XAY~W.XAY`eY]`eY*,,K,2ʲT%XAY`eY]~W,2I]9gYetAfY,K,K,K&,A,A,u:q`%q`%q`nR=̲=̲=̲M`ݤqP%zfYet:XA7>bnR8&ugW̲8&AeY>b%XAY`eY*,,K,2ʲ8(,2ʲ<[Y7+8V%qOFd%]u6[Fd%]u6[Fd%mD6[6"-Ao#:-Ao#:-A2rP͖wl qP%XA͖j8,fK-l q(7l qPA6[6[6[>db`fKЛl=l=l}fKfKfr%͖ǁ͖:l9X.,P,P6-AZٶmg|a4m^/S5l Ζ@~vrobg˅rdgK &vrng˅rrobgK vdg˅r8X gK ׈v8X gK`-X gK`-%r\(gK`-%rrobg˅r8Ƚ---\#8r-\#88r----88X gKGSΖ9[KlPΖ9[Kl Ζ l ,q%[u;[KlPΖ@nl ,q%AΖ9[Kl VΖ l ,q%[u;[Kl9Ζ@l tlPΖ@^,l gKgK 7v\(gK`ll Ζ@mlPΖ@n~l dΖ@l9Ζ@ml 'UCg˅igK@g˅fJ 6:e4\(J 6:e4:e4\X:m6:e4rlJ`IYټȅ+ؼraټXP:m68ȼr +++ؼraټ+++ؼ8м5?+++ؼraټXP:m68N+<7ڼXP:m6hJ`Cټ5?+N+%2EJ`+^ӼX J`+ʼX J`+zѼr ++zѼ8м8м8м8мr?E-K`e !Z I/,J/ jYA-K!Բ\(-K0e !Z@aPr,>PX0J5Z K5ZRM%ġTe te !Z@ǁZ e !Z@ǁZ@BeTe te !Z K5Z@ǁZ K_@߮h^ e/x~vu~?_o ڼ@h^ B K_@Ѽ +]Ѽ ++++%/hJ+48+,}AW.,}AW}_y%ġm^P8:e9LZa /ܰ}z3läeІ3la aҲ Gf>1Sp6l ۿ<e9l OfayiYayiYye_eg^ZayiYye9LZayiYye晗叝|,J23˟J2l?~aҲ }:0l ceB퇁al@-˰} zy 0Za=`~eؾ헀a!`@-a S23O-˰}}w< wy0ϼzwI23/-0ϼ,<Ҳ&-0ϼ,<Ҳ W<Ҳ K2l_ Kr,<Ҳ K23/-aҲ W<Ҳ K2lZ1jYqeo ayjYSwgZayjYqeqeƙe~9L-aT0<eU/cVx 31O?{ 䃠1|"&V\(EL  1|&"B)bXhEL  :Ċ ,q"&A)bK 䓠1%zB)bV8uN^,q%z>OV\K:}  ,q%z>A8Hs 111|Z"BN8PGF+b*b."& mEL@EL@EL  t M[s^,q|"&A_,iEL`^,q"BNXK c1%z>A8u Z@n _v Z@P~r`-M ~r`-M ~~rf-ͅzA-M@-M@-ͅz3q&q&pB8PK9H8PKs4S%8HKs4%8HKHX -M`4%\(-M`4%8HKs4%8HKHHx4%zPKXKyGS_Kye85 Z򚹵4QKX -M`4%8HK{4%+-M::zO-MW\(-M7zuI-MW\(-MWzuI-M7zuI-MWE-ͅzO-M`4%\(-M`4%8ޭ ,q(o[KX -́::zO-ͅ%xkiܥ&q&q&Ї-jijiji.,[K8PK[4㭥 ,q([KXP^&kDkiU&4\#ZKZ@Mj-M Z@nխ Z@nխ Z ,q&AZ@ ,q&AZii.&AZiiK[8HKX -M &\(-M`4ܛXK8PK8PK544JK5444JK8PK8PK8PKs444%x4 ,q&AZ ,q&AZ@PZiiU&AZ VZiiK ,q&AZ_4JKX -M 8HKs 4<2YKs1<-@v ,1\*11K1*1ȋ11N1\Z Qs1\Z Q QsaY Q1%HfEL VaE̅fEL`CYX ÉT:T:TaE̅fEL oV:T:TaEL@ELWT:T:TaE̅fEL`CYXP:mVgEL`CYӟ1%fELWT\X:mV8H 1%RzNEL`1%R\(EL`1%REÉT:TEEL@EL@EL@EL@E̅R:O@_, ,1^s1?&&bA̅ۄ@_, 11ۤt폹t ,q(bcKJ@ǁ@_, t菹P@_, t ł K@ǁ@_,菹t tR?&G&c˯~K'+?&+@#1>2 1>2sa) 11XP}B̅gL`C)XP}8b1%g̅8b1%:(9Lrat oݰ}5l$߆S5laL / a i>N#ð6̳$a ۿW׶am:3/̰63/0ϼ2a K.33/0ϼ2I.33/0ϼ2<֚_eq%ƙ\0u?xfؾfoY7anB̰}:l?7 ۏoØgԹ݆1+ ۯ #ðv30l/ S<3l_0<38 ⇩_73vݰ} 8l_̫Yw3< K<33/a K<33/̰}>3/0ϼ3}0ϼ3I<33/0ϼ3<&̰}=3/0ϼ3uxfga{ؾƙx0zq)ƙxfga~{gayg헞ƙO-a~˟Sq~ƬPs9+ð0YQYڷt'Ɩҝ .w - #;|HtBIwlN - 䳲;JeKwlN D\(N`;%8HX N - ,qPABIwX`N` %*(\B` %*(ҝ UP,qPA!Ɩ8X⠂B` %HN@N@N -ݹP@ǁҝ@>.[8Ps;|~t'qt'qt'OΖ:Iҝ UP,qPA!8p |t'AIw.TA!AIwHmN` %*(8p Wmi]O W.\(]Ona .rb]υz6z}'p6 z'zNBzQ'q'qBj8P8Py^ t _T tPEzKPzK ,q'Az.'AzKPzK ۼPX⠷'Y8P8PѤ'q'qB8P8P8Pst t tPo8z}Z'Ao8@_ڨPo8@_ڨ ,q[JKu=%z?A ,q'AzK z'Ч_z_zz}'KOz.'7z'KOz.'KOz'7z'KOz}Bz}'Az.'AR'A'A t t Ku@/w t t I@ǁ@ǁ K@ǁ@:빰 ,q(zKJ@ Vݺ@~rh]O ?xz7u='kDzU'{zU'G&z.'AzF'AzKPzK8HXP^c'AzK ĺ@ǁ@ǁ@ tP@ t tP@ǁ@ǁ@ǁ t t ,q'ǣ)]O`t=%\(]O`t=%rh]υ8Hȭu=%\(]O 8HX ]O`t=%rn]υ8Hȭu=%H]O L\(]O O C'zˇ ]O Wc]υrEk]O K<f]ρc]O /mba]O@#O V6a#OB#O 6:+4:+4\X o6:+4rk#O`J)<[zBzg+]Oj ł W u=XPz.' zKJغ Kغ!'ġt t ł@ǁ ł@ǁ@_,빰t t ł Kغ@ǁ++>z˯~KIzB 7u=wu=>2Q u=>2Qsa)Y u=u=PXP}B]υg]O`C)YXP}8bu=%g]υ8bu=%%Ƕ vtv6>ۦc ە<ۮo&c|lmmac|lmWm7y^v%ϰ]c۹c۵cǶacۥc]1vd+y>ߕ<Jyw%ϰ]1g~W|3+y.;Yݕ<Jq׽f J?Jm{֏mk֏mGm'1+P|l۰~l7|l}Ycruޫvx%Ƕ3Ƕ+Ƕ۰]m1<<V~&8P|3%ǶC1ޠضضg~o ە<Jyw%?ߕ<Jm;yw%ϰ]1g~W|3+yJm{yw%ƙc:c6cy(yݏqgJqضgJqvwu?ƙc8%ʏsW}l8}{cvv7'?lJ <z='u.=Ѓ|ZA>H إz=ȧvs@A>K ޥz++w=Ѓ%]`îza='r,qA>H`ޖxaoKmKă%{[A>K%,q1]zKă%{[-`î=Ѓ@:=ȧvă@]zw=Ѓ|AzS@:=ȧă%{[A>H`ޖ^zKă%(poKMAr#wЃ 0rx#wЃA:p=DwЃAn/>8܃K:p^"}aw=XⰻwwЃ%;%;%;(pw=Xⰻ,qA8`zaw=Diza@,XJ =XPz =XP=Ź@An z@r5"=P`yXzm"=ЃJ =Ո@rq.=Ѓ#I\zKv=ЃJ`îza=X˛=M@8ۥ @8Wܥza=X,q@r5"=Ѓ@:=M@:J=Ѓ@Azt @:=8@`îzǣ,q@8z]`îza=M@] =Xw=Ѓ\K`îza=X,q@rq.=Pza=Ź@8z=;@AU y7I~,] ץ<*$?AK%yUI~IL<*$?$&σHXi:.Ax|K3Mx|TKJ3M.A^ ,4y|,q(4y|,q=>yqAy='4yy|tyqA^yqAyqAy='4yġ4yġ4y4y|,q(4y|1MKJ3M'4ya8f<>σ%Aya:?8@`wO.ywH~,.yЋVH~wσ%?{}w'p8<>8ρ{>qJ~5lH sְ}g~հ}C5lC ۯrI3la< Oܰ}y$oqSܰ[uz$?30ϼ$?< /pI33/0ϼ$?<&0ϼ$?< K(_܆q)ƙ_Bܰ}I;l?Y xI4l_kŰv79hؾ*ga@a?Q /vØ# ayb~;LaNp~ƙhgxƙhga^0u݆yt/+ayhg^ay y%:Lay y%y%晗hؾ晗0 y%晗hg^$'y%晗hؾc S4l_u78㙧hؾƙhga~{gay 헞ƙhga=L5ԽpB K$ ,qp(A¡ p(A % #C%u8qj%u8ȍCXZG 8XZG`j%HP@P@P ]S@P@P@P@P , t( t( ¡@ǁ¡@>EY8tj%uQ¡:.T#; KT,qpB:KT,qp( KT,qP#A U,qP#q(ϴV֪ * _ &˪@*Po/reUQ ZVo/rcUQ 7;VrcUQ ;V]UEUEUEPU8PU8PU'{^t* _ ߓ t*PTE%R8HUtTE%R8HU'X UQ`TE{%R](UQ`TE%R8HUtTE%R8HU''{TE%jQ8Eq UE|Ѫ@ǁ@ǁ&UEUEUEE8PU8PU8PUtZUEUEUEEXE`TEXPUXE`Z>QUtZ%jQGUQ`ZJUUE%jQ8UE%R8HUX UQ`TE>QUX UсTbAUQ@UQ@UсQN=F>Jc-,=Fc=Fc-,=Fc-,=Fc=Fc-,=FctJctaqc0=F=F=Fc8c8ctaiuc8c+ =Fn=F%aQ`Cixcȭ=F1 @~|robхi{mbQ ׈rnQ &rnQ L](Q 8c5=F%8cXPޥ(AB=F%81 ,q(Ay7(q(q(kD{z.@ǁ@ǁ@1 t1 t1 t1P@ǁ@ǁy<8cX х8cX Q ׈](Q`FXPzt8dQ`CFtlD%GgQ`lD{>Tڈ5lH ۯlְ}h~հ}C5lC ۯrF4la< Oܰ}yd#oqSܰ[uzlD30ϼlD< /pF43/0ϼlD<&0ϼlD< Fh#_܆qi#ƙhؾ/i'a0و a~v~1G WCŰl79hؾ=L-ansDѰ>1l?O ۏuF4l_ O8 Ftzk8 F4l_}0ϼj=߰}=3a F43/0ϼlDF43/0ϼlD50ϼlD< Wz<&0ϼlD< FtlD0ϼlD< 76ayڈa2{g6~< 8 F4l F43OѰs:8 F43Oa*̫J{jy9̟ G0c)Ѱ43̟"Ѱ*sj@>Y~GR~tG\T~UG\V~tGW~ȍGY~tG\[~ŅG\~tG%8~X Q`G8~XɅHKT#P5HKT#  U# ,qP$kۏKT# ,qP$A5ُ(q(q([ ۏ.)q(q(q(q(j::rKaQ@Q l?P5HfQ`j$F7ۏKT# ,qBHKT# ,q( ۏKT# ,qP$A5 U# ,qP$A5@ǁޤ@>)ۛge{.7)pw4;7{u7BJśI\ٛeIқ}IٛȝIʛȵIIIU@ǁޤ@ǁޤ *u@oR cFo@o҅&0o8țX o҅&8țX oRFo`I%&o8țtI%&8țX o҅&8țX oRFoF&8X⠾ǁ&=L{zz8&:&:&]G@oR@oR@o҅{:&:&:&]G`%&AoR`%{Go҅{8I%{\(oRO&8XP&8țX oR`I%&GoR`Iқ[IIIқ-I>gЛ-I>gЛ-IқtIқ-I>gЛ-IқIʛX oR`I%&](oR`I%aboR`I%fboR`Iқ8Л8ЛsIƉI^ ӛ8Л8ЛIIIIIЛtai؛XP(&8.Iܪۛȏޤ@M goR &&]X7)&&rhoR &roboR &do҅&rnoR`I\#ڛX oR`I% }{KM ,q(ۛX oR`CyWޤyK &8ț5Iқ8Л5IIʛ5IIIʛ8Л8Л8ЛtIII%&x4M ,q7)Aޤ M ,q7)Aޤ@MPޤyU7)Aޤ M VޤyKM ,q7)Aޤ@nMPޤyU7)AޤM ޤ e? /$Q cхriQ >eсcх|fQ vz40P@. 0 haѤR>(УIQ v,3;(x;.,3;KJ9(q(q(x;.,3;(q(q(x;:P(q(q(x;.,3;KJR>();KJ@0 ,q(3;PR>(A@W0 ,q(3;}_(ġ0P9K0 }0 t0 }@ǁ@ǁ@ǁ@ǁ 0 t0 t@Q`IptGPpX~ezIх_GW&Qo](Q(8 @QptGPpXPZ]XZ8VG%kQ@Qo:](Qo:@хkQ@Qo]XZ,8 ,|)Yp G>PpWǗRx;?=+Q/BQ/BхfQo:AQ`C)Yp[G›G%fQ`C)YpXP o8›GJpXP o8HpX߇JѰ}am>xa ۏzܰ}a=0ُ6lab~Oڰ}h;l ۏxa7a x< ~4l&0ϼG< ~tG< ~43/h~ƙhgWյaSqmؾBgdF}[a d~ƌь4l_c0ffa0ԆwaH51l?&3Ұ}_8l?x ӌ43O3aj ӌ43O3Ұ}g~:i&ȝmR&ulR\&o|.M*ۤM* ۤMB٤M* ۤMB٤Kd ,qM*A6٤M*A e cmR%*8s =%*8ȭmRXBO W7I8XBO` =%IHT@T@T 79I]BO@T 9I:I](T om th th f6@ǁ6@>&u =%*q6z.T'ϼI8X ԅ*8X T W>I8XBO` =XBO` =%kqP@zmU P@?C=Txz~* ({@z*{({By*q*qB8C8C]=T=T\CX Uiu`zOS`}J$OJX~>ߧO)Ӆ>ڧ@} OJ}:>zOS@Ӆ>zOS@S>]XJ>:>zOӅBfSo4;϶Tlv f@:hv g+SW[j~ev f@:hv FSo4;]X*d6;bAS@S/4;8 NXtaXP*d6;8 N%BfS`CtN%BfS`N%}n4; go~7la&Ӱ7la 'n~i>ma/a 0ߺ+ް7l&Ӱ73/0ϼNa2; 43/0ϼN43/0ϼN<2;1ayqY{aZx~$ iؾoapGӰ} tO<> 4l_43/Ӱ}83/a> 43/0ϼO4l_43/Ӱ}h}ƙiؾ/qi}:L֧ayZqi}ƙi~ ƙig֧a0Xqi}ƙig֧\Woug^ayYSeu~W_ug^mTVWUug^Easg^-ayGayTyeyM晗j~;LaydlU wS6Pr;eՅ2PrAeU WT6PrIeՅ2PrOeU 7U6PrWeՅ2PrEdU V6PrMkՅ2P8@X U` T%2PrcdU`G@GhKT< ,qPBKT< ,qP($.T(Aţ@l ,qP(AţKd:@ǁ@ǁ@nlPţ@ǁ@l thP@>k@8@8@- T T|궁BKT< 䣷 T%*]Q m ,qP(A U< ,qP(A@l ,qP(Aţ.T(AţKdPţ@ǁz@>[Os.*OSr/e=U@=U wSSr;e=U SSH=U WTSrIe=U TS](=U 7US:S:S]7*q*p]6ZK _ ,q*Az ,q*AzK ,qBK ,q*Az ,q*Az ,qP(p{-#XP\H]X\H8#X T@T@TO H:H:H:H:H](T@T@Twqd*%P@/) ,lI)dBI/[˖d*%P@Q29%SdBI8d*ġTY-TY- ,q(UVKKJՒ@ǁ@/) t(P@/) t( ✒ KՒ@ǁ@/ ,_z#X>G*wz}wG*&H~|;eTH@TH]X^H@T@TH8=R;#uaz#XP^H8=R%eT`Cz#uma/a 0yߺ+ް7l&԰73/0ϼ{[2XVR`ZIXVR`ZI\=Y2uZI%j%rdT`ZI%j%8X ԁL:L:Lreԅj%:Lr'eT@ԅL!ݒ@ǁ@ǁ@, t( %SXVR - ,qP+BnT`ZI%L]VR`ZI%LroeT`ZI%j%8tZI%j%8X ԅj%8t T|䷁*6P](U m @g@nl @nl:@.l @.lP@l th thP:4P:4PrkU@Um w6P8@hwX U` T@X U` T{%2P8@wX Յ2P8@X U` T@X U` T0.*A*/@8@8@Ѥ*q*qB.T+)q*q*qBKJ ,q*KJ ,qP+)F.T+)A@i ,qP+B}n*A.,$Kd  e ,q*FKd:@Wh th thPi B@Wh @Wh B@/iP@/i B@Wh B@/i  e ,q*A.,='P*ġlPRx*Ai th th } K@i th th }@ǁ@ǁ K@ǁ@Phl ,q((KJ+?hU ?!F* Tܛ@ua)@/ F*[u7*[uydR/*[uKJR3*kDKJ@m ,q(.*ġl ,q*ġ4l ,q*kD*q*kD.*kD.*q*q*qBKd h@X U` T@X U` TM.*A@nm ,qBU*AKd ,q*[u.*A@nm ,q@ydG*U{.G*@{YűG*"۠ *۠ *_۠˦ һ *пlڠ *_۠ٻ *.۠y *.۠KJ6@l th th 6 K6@n~m th th 6@ǁ6@5i th th 6 K6һ *ġl !6һ *۠KJ6@5il ,q *k~ڠKJ6@i ,q(+۠.,+۠Kd ,q *q *q *k~ڠڠڠڠڠ. *q *q *{Mڠ.l , UԅREzMUT`ң*B$HX&A2 RE](UT 􆛪@1RuTQpSXP:VE]X:VE8ΩUQ%sjUT@UT7TE:TE](UT7TE:TEzMUԅsjUT@UԅweToAOl 6@h ƯlP?o] ޕmPmPO@ToA:A:A]XzWA8ޕmPXPzWAhbT`C]ȢmP%weԅA8ޕmP%A8w j>{[ư4la 0٠a Gn>q ưwlPi۰x1l? 'm>h5l_GaW0٠ay٠ye jg^6ay٠ye:L6ay٠ye晗 5l uU3ajW ,R}e|g^}T晗Ejg^ayYEjg^ayY ayYye/ye:LayYye晗E0Y ayYye-R8H 50<-R"53O0<-R8H o=8H "5l?0<-R8H "uڞݰm>Ƕvv {>MǶavv6ime+c71eg~2}l ۽L^yw/߽L1]?lm1lo}3w>m??{D߽L^m yw/߽L^acc1eض ^qeضضg^acyx>ƙcyx>z>ƙcyx>yMɏqeg^qeW$? ȏyw/Ӱ1^g~E[ȏyFǶ<{c1ބg~2}l}3 ?߽LNu<{cx|Nsz9=E|NrU&Ssz2L>1)0L>6A.s }Nr'%Ӄܞ`s }N8>KvӃ% WT9=XwӃ|f?`ޟ S8Kԃ\Y,qSro%Ӄ%{?`ޟza9 A>K>?ӃeSsz9=8Ӄ\kӃ|̗)pO=X䳾|N8? szaO=Xԃ%{ ]9=X,qS8?`ޟzaO=Xԃ%{?5dA${ eOr Ӄ5HSӃܱI l===dO!{z6ɞMn)p==ȍdO:==8@wt {zqA.%{zqu]J8첧KvS.{za==X˞ސa==X˞ސa=KvӃ%]˞,qeO8첧7ސeO87ސaO A,ӃdO:==фAȞ)qAdO:==8@ӃdO{?`.{zЗȞ,qS8})pO=XKT.{zЇJȞ,qS8ҟ]=KvӃ>TB`.{Ӄ@ӃdOAAm {zЋ9ȞA!{ eOzO ӃSC6===5dOzO ӃbB˞,qeO8첧KJJKvӃ%z%Ӄ%ҿ]4Ȟt {zgȞKKPAȞȞt { ,,ɞt {zȞK=KKJCKKJIKy_A%{zdOro"S`0H ${zkDɞV]7An%{zG&ɞKAUA~'HXj ==XP ==XP ==X˞,q( ɞ,qeO8dO8첧KJAKvӃ\#J4ȞQȞwӃ\#JӃdO8@Ӄ\#JӃdO:==X˞hKvӃ%)p==X˞,qeOr(S.{za==ȭdO8첧] ==X˞,qeO8첧KvӃܪK˞,qeOr.Ӄ%i dOr.ӃkJ o==EdOr)Ӄ,+IXJ==5dOr#ӃkJ J==EdOr/Ӄ\H`C)+I 9==8@ӃkJXJ==ȽdO:==8@ ==8@ ==8@ӃkJXJ==XPJ==XPJ==5dO8dOr/Ӄ%$Ӄ^AXJ==X˞KJYIRV)${zġ${znȞBӃpCӃdO:==8@OpCӃdO:==8rKenms *]0WL=XvL=S`2`2`2`*pL=X涔C%zЗ,qS8rS8삩R`R`R`A`A]0WL=8@0WLrS:pH=X>ҁC!Rzԃ!z)CAzx8)ҁ:?;>tzx8tzqC*tzġtz9,q((9dCKJJYCҁC*pwH=XP:PrH=X;,qP@԰}Ǖa{h>xarH ۛFܰ}'a096la5e~[Oڰ}Cj;l?+ۯa7arH //%R\/uRܤ/]Rܦ/u?ާ/ȧvS_*[5_*5K䗺P~K ,q_*A~@ ,qPeBuS%O8H>ZX T`S%O](T`S%O8H>uS%O8H>ZZS%*^8xu S|@ǁ@ǁ&SSSx8P>8P>8P>uWSSSxXU`S>P>XU`WbR>uW%*^IT`WJ>GSūKT, ,q|*WLʧ.|*A@_1) ,q|@ʧ}|*q|*q|BɧʧĦ|*7ʧ|*7ʧ|*Klʧ.|*KlʧĦ|*7ʧĦ|*Klʧ}|BɧK$ ,q|*ġԵ, ,q|*ġt, ,q|*ġ, ,q|@ʧʧʧ}ӡ||* eʧʧʧ}ӡ|*q|*q||*q|*ʧ.,.˧KJ|*Jf@m 'd3U *6S]X6SOHf@m f@.Ul f@^l$l f@~aLua)KLXP6S8ʄT%2S8ބT%2S8T%2S8T%2Srh3Ձ4S:4Srh3U@3Յ2Srh3U@3U@3Յ8L8LTTTT%2S?-ɮ-ɭhzwL\8hi"#">MKd ,qjT%2S8LȎf2S8LfLfKd ,q*Af@V6S *Af@V6S8L TӤ|*S% dcT {M˧d|j`Y2Y>Ț@9O״|*K&˧YX>Ț@9O״|*A) t( t( did|*SSS5- t( diT@T@T {M˧%S%edT`CY2Y>Țd|*5S%edT[;ʧ%S%OĦ|*ġ,, tMT`CY2Y>5,, ,q(K&˧]bS> SS.) t( t( t( t(_Ħ|*q|*q|*q|*q|*Э4āNrZ*AN9]biX U[;:iX U`V%rZ *ġlN ,q(S;iXP6vZ8i5lN ,q(S;KN@ǁN@tZ:tZ *%6VV.X6vZ:tZ/BViXeZeU|:i tZa tOU|:iM2z[U7˴NiVVܒiXPUvZrKb2*ġL ,q(*;KʴN2jV%eZeU`V%Vci쾽eͲ{Ŵ>e-19e-On}qecrZ-m}lGeҖ݇Vec[;crZ-*Z曗jFV|rZ-i7/crZ-i7/2߼VV7ey:Jj\6Rjݍ/eCѲ19ݵ_v-_L1tZ- eвtIղ L{e;2fNew 7OcrZ-;ew7O22߼F|h./\曗jr7i2߼Vey2Zv?=E2߼|Zc.?c>HՃ^^a*W% dfU 6 YYx5P«@m^o*W% deU k7 K$(U`W%^8HxX U - K4(U oU`fb% L,A3uYx5P3YxYxX⠙X`fb%8Hx WWW,X@U K0  JxV«@ǁ«@ǁ«@a^:^jfb%$*A3)*A3 j&X⠙X`W,, ,qL,A3j&X⠙X`fb%^ L,A3Y$<6@aD@ٰYn؆z6@6@zaس +՞mX҆v6@{a klX,l th theB@V@V>ڰڰO! +A6SȁKd ,q klX%a8Ȇx(?A6ٰ8Ȇ5P6ٰKd ,q klX%a8Ȇx(?P@ٰK4 ?x(?Ai 䧥m th th Ӥ +q +q k&[mXmXmX5 th th thV`&[%aM6MK4 'mX5 ,qd+Oa8h5P6@?q҆XV`&[d6ٰI@ٰKd 'mX%a=HVth th th(V@Ut( tkG@ CU[; ]bSx5P«@^Ħ*o:^Ħ*%6W~jW.) ,q*ġl, ,q*ġ , ,q*ġ, ,qA   C2*Н1WWW~ӡ*q*qj`{Yx8Px' їW%eeU`C~Yx' K«@~, dob*_! Y#ZxV«@&^U*L^ , ٪[xo^ ,K Kʘ«*A«2*A«*A«2*A«@ֈ^=HU@U kD  Jx«@ǁ«@ǁ«^:^:^:^ *q*q*A«?>M K$ ,qjW%^8Hx«^8HxV« JxV« K$ ,q*A«@^ *A«@^8Hx Wl- di@ YXx^«@.,XL^*EW5- «@9^*EW5- ,qA   kZx5,, diU@U@U {M  kZx8Px8Px^«edU`CY2YxXPL^*ġ,, doU`CY2Yx֎«edU`W.) ,q(K& ]bSxXPL^ ,K& Kʒ«@^=HU@UKl     Jx8Px8Px֎«^:^*qjWn( ,q*A«@^8Hx֎«^8HxX U`W% ,q(R Kʬ«^8YW%^ ,R Kʬ«2+*q*%6WW% tMU@UKl YWz«E() , tOU| =«?LWn) tOz&oYO_-) Sx8Px8Px5, ,q() 9*ġ, \«*s Kz«^8W%^qNey-e[em-Ok}Yi!-j}UZv?,&j}P{Zv,_%Ǵ̷$ղArVey9Zv?C>&2߼V|rZ-iV|rZ-i7/?FղqoN4Zv=29Zv{~}LNew.@Ge&V{~]li쮻i٤jt/iVZo]r/i7Oc9-i7Oղ~Ley=޲^װ1횖jZצio^NZ曗jo^New7/2߼V{o^NZ曗jo^Ney9j][/i7/ղZƛjV/ey:joNey:Zv,i7Oղ13ZƛjoN4\(so^ey9s8712߼i72߼VeyM0Zv?N.k}7/ղIno^ey9ݯqit72߼xvZ-z«@«@o^jW, d gU {8 Jx*«@q^jWl, d'gU`W% ,q*A« K$ deU`fb% d`U`fb% L,A3YYx5P3نYxX⠙X`fb%8Hx WWW,,X@U 2  Jx«@ǁ«@ǁ«@e^:^0MK4z«@~«@ǁ«@ǁ«?6@؆Ȗ6@ard2s +oa +mX9نȖ6@؆Ȗ6@a8Ȇ mXmXmXlylXfNa +q +q +mXmX,=m th th di2s +ġ̜l ,q(3'۰`V`C9ن7۰K6@sa ,3'۰Kd tOMV`C9ن螚62s k`9نXPfNa AڰڰSӆ8І8І8І8І5P6@ǁ6@ǁ6@sa +q +mXmXe t1GV`lX%a +A6@sa +A6ٰKd(V`CٜچXP6a +ġlNm ,q k`ٜچXP6a8ͩmXmXi th(V{jڰڰSӆ5lN- ,ϲ*}«@^:^a( tOUx }߲z[Sz«@^:^:^ ,) Kz«@E, ,q() 9*ġ, \«jW%e=eU`WE6e-eCe-n}vɆ(-on}r[v?,& k}mc[v,_%ݗ>dZv]v?H.#rq k7/2߼lXgdZ曗 ko^6eyٰ ko^6eyٰehZv?;.ӆVRǴZ?lX{~/߂/Ɇn2l kIֲ^v-C1a-ǴZv?.c6iZv7xa=&ֲ[vxa-ӆPxa-ӆ쮶io^˧ewnzzLeyM5xZ曗 1ٰeZ曗 k]c/ˆ7/ֲ^曗 1ٰeZ曗 ko^6dZv|a-ˆn1ڰiZvzo6dZƛ ko6eyڰ/6xa-ӆ~yLeyڰiZƛ 1-/浻\ro^64\vU.ko7c[.kj7ro^+eyٰݏ|X.ˆ~[׶ro^6eScU.kU7M2޼mXz۰ + +mX,l(V ;8۰ن6a +emXl(V .۰نX @ٰKd ,q +A6ٰY|نX⠙@ٰ؆X⠙X`fb5 ,qL,A3@aa L,A3@aa8h&X⠙X`fb%a=HV@V@V 1۰j&8ІȆ6@ǁ6a2 +q +q +emXmX,lX`fbll ,qLlfbl ,qL,A68h&X V [4۰K4 ,qL,A38h&X⠙X`lX5 ,qL,A3@ a=?id[bV {_E@v&h5G+ɦG+н =Z dchփh4G+=Z,(V C{zzj8У8У@ǁ́ ,qG+psYyK(V`MYKd ,q%j,Q%D8vҖD8ȲޖYȲޖYKd ,q%*A@D %*A@D8 -Q|)-]O,=z ǮecS z dcS KO9)-]O|)-]O,=z ]O88ȖǮecS [Yz8Ӯ@ǁ@ǁ@v= ,#KʈǮ2)]O%ecS z ,q(#]4xz ,q)m3]O%ecSfKʈǮecS`C趙z tz tLS@S@S@S@@]4P@ǁ@kt=:t= )]O%r=8趙]4PKz ,qi\O%eiS`CE4P)AeiS`CEXPvv=:t=m)qi\Onz tz tL2)Us T*9=HSs]SdSY)9uN.ΩsX6P[6P98)q)qi`@YXP6P9raS`C@YчuN%eeS G98 uNs ,q((Ksz}_*s_jefٽZv'aew_j=}Zv`ٝew@cK-}eK?_jc`~]v XvxL~ewZ曗_jR|K-/7/cK-/7/2߼RReylk/_fkrO-eOeeZv7se-S~"~]=>k4{joey5㲻t_ƛ{joǴZƛ{joewX|Zb-{ew˾7ca-k72߼Si}72߼SZ}o^eyݍ2߼S=7/2߼S|rO=&ԲF_曗{jo^ew=7OԲ=_vxtO=&2f@7.4+ Y)}g6)mN%e`@ٜK46ٜK>6ٜو mNmNlDms ths(S Qۜڜڜ8884P6@ǁ6@ǁ6ٜ4es ,q)A698X S QۜX S ls ,qilN| )A6ٜKds ,q)9 )A6@>X Ӄ9Aǁ¦@v6;YY.¦@._,l dyda|)呅MR-lX/6<)O 6<)]MM,,lX/6<)q)q)]MMR-l t(l t(l dja|)ġ,_,l ,q( ٥ZXP/6K)ġ,_,l tFa|)A¦@68勅M.)l ,q( 勅M%ebaS e )q)Ѕ2MMMMM%l t(l t(l tFa@  ݠQ8P4P¦@7h68HX aS e K$l tFa@ K$l ,q)A¦681M%eLha@ Kʘ¦ 1M%eLhaS`CZ8PB¦@ǁ¦6P)q)Ѕ2M¦W)8MPn¦@w6:6aV(l t7NaSq JnoV_܍-ӪeZeaSq   iM%eZeaS $68iMܒXXPU6rKbaS`CVY4P¦2)A¦)l tFa@ BaS]4M0,4Yղ; ,sMEղ; ,#~Zvi}_v,_BݧAMGegGc6-#7/a2߼M$lZ曗io^¦ey io^¦ey %lJaӲso¦eyʜ}1Ė'Ӳ_vd]v`]v\}L2ewK?S)sZLSݯUe4eNj1M–϶˘iʜ)sZƛi]A. eyʜ)szL#eyʜ)sZvi7ײs\v7|~=2߼V_||-K_|{-KܗZ曗iݶ/KdN|9-K7/c9-+eyɜ%sZveNx9-ew7Oc9-S洌7O2@5 Hni@ ~@74 r`R@?ЀXZ32 tKd@(R` H%2 8ȀX R|h@ ,qApӀ8Ѐ8Ѐ4Pi@ tMRk ʀ@4 i Hnh@ tkGRKlр֎@Ѐ4P@v4 8] H%ebR[;Kʾ2 8 H%2 =HR@R@RKl Hnh@ th@ th@ tKR@R@2~)q). H)ġ al@ tKR`CY؀62)m H%e`fLcd,V d?dR b@C+.Xi`$JK-V Yfb@֥+,V d]jR Rb|K$V ,q(зX)Ab1}K$V ,q(շX)Ab@+=HR@R VJȢb@ǁb@ǁb+:+:+:+ X)qX)qX)Ab?>MK$V ,qXiJ%+8HȢb+8HwK$V(R ,V ,qX)AbK$V 仃J%V ,qX)+8H J|X)qu)Kغ4LTl] sK,l] dkR '*.u).u)K|ΰuAZں8к֥ebR +[ZZYXٺ8к֥@ǁ֥@ǁ֥@. ,[KD֥2Qu)K%ebR [[KD֥@k. ,[Kd] sK%ebRfZKD֥ebR`Cغ趙֥i] th] tLR@R@R@R@@YZZ]Ѻ4P֥@ǁ֥@k.:. u)K%.8Ⱥ趙֥Y]Ѻ4P֥YKd] ,qui`ٺ趙֥2uiK%egR`K˰֥2u)ġ l] th] tLR@@Y6Ӻ8к趙bX)e JOnb@w+X)X)@RqݍS4Pb@w%V t7Wb@wNݍS8P8P4,,V ,q(-9QX)ġ,,V DbزX)Kbb+8ŖJA?кz֥.֥^ K?v%Ǯ?͏]SbcWJ~ ɲۺc׊ǮcЏ]euǮl~~z !Ǯnҏ]?#~]1ď]?֥"cۺcۺcۺcۺcۺcۺ.o.o.o. uǮúcerǮ.ǮelwL?v?v1ڲcWc[~yǮǮuyǮ~=QIǘw~7#ӏad1NP.)v9}Š\Nd5(Y8#lr`r ]N,q]N,q]N,q]N,q]Nde&KnSr ;>Xp>XpKKYx>Xp>M.8C8C8C8.p9}q郎\Nd&S=٭A.A r9}q郎\Nd&>A.{({(Avr9}C%P%)}}v9}\N,qr,qr,qrP%P%P%)}}}\N rYA0r9}=oYA1r9}б郮cr kH>"R.p9}]\Nd)Gx>JR.:p9}q)v9}q郎\Nt; >x}jr`r^#8.8.kDkDx>Xp>Xp>XpoKnKnB>:I AAP'}O!>Brc u}CP'}+P'}|}^C^C8[[[[[A@[P:郎ItNZ;] ÎAvێA°#}`G H?aG{0ؑ>Zv`G_I`G Ht;KDv8Ht;KDvێ7%i!Ht`G;] ÎX'#}Ht`G;]ŽAv:#wHt`GRؑEv8EH,q(ّ>v8eHt?;KȻt,r$] Vҥהt5%] Vҥ{I>Ȼt郼[I'%]`{I>^Sҥ|I>^Sҥ8OKxK>XP>/KnK%]`-] +ZIBAҥt郎Kt郬h%]HoH>8@Aҥ.qt郬h%]R-]-]`-]`-] K,qK,qKdE+R-]`-] _,$]`-] K䋅K,qK,qK,qK,qK䋅Kt%t|t%ti!KKtA^r."R`ȫA>ȫAV]*}ͯJ\D^W|W郬U _y>ȇy«A6*}!*}UJe."YuɫA^:*}UJtU .y>8A^W)EU`Cȫ2Wl~U`CȫA6*}ġEU4x\D^8^!D^8Jt K\D^2W%e."](ë^:*}Ѕ2JtUx>8x{>8A^AW)*}qWnURU4x>Xp{>Xp{>B^8^AW)*}*}*}*^8J,q(#;yoKN^8^2W%ed'KN^:*}Ѕ2JtU Jt x>B^?U>x>XUox>^:*}]ëA*^W郮Uoxo]Jt^:oR`sɫ2Wܯȫ2Wܯȫ2Wܯȫ2W)*}ġ̹UZuuݠAx>>x7hIuҲ; ?Ͳ{ʵN²;<&uҲ{ŵ;,xǤNZv;e[cR'- ,sgGcR'-#7/u2߼IǤNZ曗:io^ey:io^eyNǨNZvf.S7OuҲc-ecڻ-EZew_-Vi]//bj{J~~]*-[Ǵm[v.jUZƛViN.eyjUzQ/S7OҲL;eyؖu䲻_ׂ1 ؖ5_[xmo^ZǴ\[nmo^Zew72߼J"~o^k4V[Tmo^ZeyiViݾ/K7/ҲxǨUZƛVi]/;eyjVioZeyjUZv?<-S7OҲ1 >UZƛVioZǴ\so^+eyis7}2߼֝iܹ7i2߼ey:UZv.kй7/Ҳro^[eyioiǹ72߼6x*U e|[.l-O@( da T kMKYlZ5P@v@ݴ*%P% di T KNKYsZ5][J@ǁ@z@8HX T`$P%@*A@*AjXU_`V},,U_`V}l, ,qЪ/AK$z@ǁ@ǁ@@ Ԫ/q*M%P%P% d5b T@ T@ T KAKJYX5PٔXXU@YXXU_`$P ,qЪ/A@@8hXU_`V} ,qЪ/AIjXU_`V}lS,z@ǁ@VF@4*=% dqd T #K{J]Q@֦@=H T SKٝZ@@*q*qj$P%P%PR) t( tJ T@ @IދIK$( T`$P%@ދIK$ %@ *AIK$( T`$P%@=HSkKS/*@T9iӗ@Skay)З@S_Ir ?gTN~%)S@ciTN%R98HX S`TN~%)A*r t-LS@S@Ӄ5)B)-mM:mM.hk([SN[S +ښŠ@V595)ġLWlk ,q(ۚ]XXP&,5 )ġXlk ,qAښښښ4,Zlk tC[S@[S@[S{Mښښښ}mMmM5ikXV.58mM%eb[S{MښKŶ@958ՋmM.rhk ,q(ۚ]XP/5ȡ)qi`dUNY8P8E=O"Ğ@>Ȓ̞@v<rbS K2{bS K2{'{)=O|B)%=O"Ğ@d<:<:<$)q)%=O=O=OyX!<8E=O%ebS ;c{K"Ğ@v<8E=OyX!<8'{K"Ğ@<8E=O"Ğ)m3=O8趙@ǁ@ǁ@ǁ@ǁ<:<:<^igX T`P%r@8 ;X T 3 ,qA:gT@T@2)$69)MNQ&@&@69&ir d-lS _Ilr df2)=MNMNMNlr thr dfS@S@S kaMN%eFaS`CQZ&2)MN%eFaSK2MN%29)ġ(lr tgLS`CQ4(lr ,q(3  MNMNir thr thr thr thr(S@S@SK28蒌&@ǁ&29$)A&X SK2X S`LN%29 )A&L)A&L)A&efS`CYXPi69:493)qiLNir thr(YS`I֎@v5=HYSKlʚ]bS|ROYSKlʚ]bS4P@ؔ5Ħ)%6eM%k tW@dM_ɚJX YS`dMXXP[5rbYS`CYlYȉeM%eeYӃ2,k CeM(kX55 Rִ>efٽZvdMղݗ>ec̲{idMowevkv[v;Le7ew1\v'_v#Lew%kZ曗i dM|5-Kִ7/Yc5-Kִ7/Y2߼dMeM>x5-Sִ~AYӲzLe2~ioeyʚ㲻p_ƛioǴW[ƛioewj|ک-;ewþ7c-k7y2߼dMi7e2߼dMJ}o^eyɚm2߼hi712߼h|5=&YӲB_曗io^ew{Qִ7OYӲ9_vx5=&Y2ȺAXBG=H{T K"ۣ=*_!ڣN@ʶGW=jQm dl{T eۣȶ@ǁ٣X {T`Q%G8٣6٣K4{c`f%=L=jf%=Q=*AK4{ ,q=AڣڣڣY5P@ǁ@G:G =*QQQ,\m th de{@K4{ d e{T`f5{ de{T`f%G 1A٣Xc`f%=1AKdc`f%==AڣڣjW=*Qll ti tF{T ;gۣݽ Q,m dl{T gۣ@ǁ@ǁG:G:G=*q=*U4QQe 7$&A٣X {T`Q$&A٣IL`Qe ,q=*A٣X {T@TU @5P@ P(^ *Яt@{`&t@ *ύt@ P~=*A@X ԃt@.*q*q)%5O)5O.y ?Ny tWC@iSjP꒚@w5<)M>5Oy tWCS`CYXXPF1<)ġ,cy(S`CYXX Ӄ<:<:<i`X肃@ǁ@ǁ@W<:<:< ,kjQ4gy ,q(kKʄƚ@}<85O.8y ,q(ck]pPXP5<)ġjy tAS`C<8Xȋ)5OyXF<ҳ)oS +=kYYȋ)5Oxky<ҳi`XJϚ6i4O%eq`S`4O%ev`S`4Olyz@ǁ@V<:< )5O5O5Oy ty ty ty(S@S@S`4O|<8HX @iKy ,q)5Oy ,q)/<8H4P@XXX S`4O%<8H kJX S _,y ,qAjgS@SIS _Iy dfS kakw)=5OˤB@) dbP P(BS(B@ P(Ѝ B'- B, - doP H K'- diP`CdBc(X>YoP`B, - ,q(P($B% ,P(ġ|B@ =HP@PI     J(8P(8P(:B : P(qPhBc( ,qP(AB@ 8H(:B 8H(X P`B% ,qP(ġ, ,qP(ġ, ,qPh`+Y(XPJ 8BBn') t((PI  J(X@ P(BR(躔B@ץ : S(躔B@ץ P(u)BK) t]J@ ]P(q+PWB 8H(X P  8B;X(XP? r`P@-P[jN Z@8(.]3@\vi3eU.r}I ^,r}s\vOǤZvg^vc^vkM1iߒ9\vpx1i\曗ho^Ze1iZ曗ho^ZǤZ曗ho^Zeyi1j?|Zƛhs1Yƛ2hݸ/ߟ:jR-eOezjA}C|SC Zv7e2~2h]?%ԲuP7Oe2-S7OeвHLeymݭ᲻C_1͝5vZio^ʠǴsZio^ʠewi72߼A|o^ۦ4mZװio^ey)2h]/K7/eвǨ Zƛ2hݍ/ey*2hoʠey* Zv-S7Oeвi1 Zƛ2hoʠǴ[׶oo^˾ey)Ӭo27M2߼}iз792߼|ey- Zv?.kķ7/eвqo^ey)i72߼v{xV VyV?崙*DȖ&@v6iUMD,hm" ğ?T6P哦AjYLYS/5EQS4P@v嶦(=5E) dmMQ nkYw[S4P@6:8HS4PiK) ,q(A@8HS4P@6X8HSX M@iK4 ,ql/5E5 ,ql/-5E%8HSX MQ`4ERS8PS8PSRךS:ٵ(qh4E) t) t) dkMQ@MQ K/kjLX1e /kK41e /kK4 ,qhƔ%S8HSך)K4 ,qИrƔ%S8hLX M@)K4 ,qИ25ERS8PS4Pl(eC, t>) tkGP jˆQ6 eC- dimP kkˆJ6ڲ@vז : l(ql(ql(%6eCeC.) t((PKlʆK$ ,qlhdC% 8H6xeK$ ,ql(8H64PɆK$ ,qlhdC% :T@eP|*j3/An 'tnAn 't. (eP *S)K t tuIeP@eP@+P-H+@Y  jV߂(M!@Fh t5B+PsZ jV@8ً@%8틭@%8@%=H+P@+P@+PBZ-@n"h th th tSH+P@+P@+(q(@>V2(ġdl t1G+P`C &V(M@%e4c+PZKrV@78@@x[ٽ ȋ(ݛ@l 4l ȋh` ȋ(ݛ@l ,q(ġ l ,q(ġ l ,q(ġ, l ,q(U@ 8 V@ǁV(q(qh@@@@e th th ,q(OSVYKd(+P`@%h@%QVY G [Kd ,q(AVY(a+@YKd 䣄@%=H+P l th th(+P@L ,+fJ1[16ˊ@݊@YVܵb&|bf`T3|b&m3,w deL ,+fKX>mL`CT3%SV8H16ˊh+f]uQ1XP>mh+fKʧ tẼT:Tb&qb&qb&qb&qbf333=3 t t(LmOL`3%Rb&A@۞R8H1X L`3 ,qb&A2Qb&ġLT ,qbf`X1XP&*V833 t(L.*fZdMYd'">i t@L5ZdAZdi tFL5Zd"z@k^Ef,2"8E&q+@YdKd ,qE&E&ġ,Bl Gm ,q([dx[dm@~8׊@G^ taEb&Bb5S1ef=Xvb3ƲZǺeǤYv^v^vT~=Yv߱bfzٝew ߬߫bfey)f楘Yv/Ebfo^ey)f楘yLey)f楘Y曗bbfgoey*f?wӏeUg/?2Yv1g2|~fP?nߗoQeB̲xLe2~~foeyg㲻n_ƛ~fo4Yƛ~foew5|,ew7ac,kU7M2߼3iP792߼3B}o^Keyg]2߼F4iC72߼3|Z<&̲@_曗~fo^ewwQ?7O̲7_v"-S?3x,S?7O̲hoeygFI?7O2<3x<2߼Fd|-K?cWeyǖ5{L˱eyƖ[v.k27/̲to^keygݯ|-K?~|L#eyMĖ5[ƛ~&WoL&÷~&oL&x?Q?Xy3Pʛ@V5&wJyNVgV0&$V K2pV> dSfNkK@pYۇJ>@ '}8l d7o@pYۇ8ЇX @pK ,q'A>p ۇX @pYهX N`|8 ,q'A>@6 'A>@8ȇX N`|8%@ǁ>{'q'q'}8}8lg` ,qs ,q3A>Z~8hX N hpK ,q3AρZ~8hXg`|8 ,q3A@u@VD''5"}8 tH΃C'5"}8(N tpYۇ8Ї3P>@ǁ>@ǁ>@:U'qg|8n ,q'A>8ȇX N= ,q'A>{X @pK ,q'A>8ȇ8j>V0BMxZmI_=(MxZmI_= ti(Mh ti  6ti ,q&q&$6666e tGMEGM Zmjj肃V@}&6i(M Zm]jXP48jXP&58jXPv58j 666hX&6O&q&q&}666V@ǁV@k ,KV2&6%ecMZmK V@ 8-6h ,q([m'jXPV98a77׬ d dfM 5+o+oɂ777pʛ@ʛ@kV^&Aʛa&Aʛ2d&Aʛf&Aʛ@VM)oK ,qf7%R8HyȊʛR8Hy +oK(M _, ,q&Aʛ)oK 䋅7 ,q&/V8Hy 7|&q&qf7[2wMNn@Vv ,P&4|&&4 d'e7 eiI(IdG:_%j Gë́zЁP^i&;)iK7 K84o(MX 7M wRv8o(M4%v\Xl7M`C4IMs 44IM8M8M8M8Ms444~ BiiN7M@7ͅr%niK NniN7ͅr8MX 7M`4MX 7M`4%.b7M`CMX 7ͅ.b7M`CMXP"v:tz'E7M@7ͅrz'E7MpkCkoH@o( V) V@/( t>) S\%5^Q\s5^Q\%5^Q\s5^+qM5ÿ\(qM`5%є&AR&ġ>, ܏~4MȯMpn@/覹H :4g&_V6yc 3ۺۆc#cc7?.vǶǶǶ/lx|lǶ?-kۢ;l|l<c?1l|3g>3ac?1g~\Ƕqgm-q?W6v60l/|?[mIoh ۽5>?ޮK|llS}l~%xk>-mۏo8|3oǶǶU8|3oͰF1<5[m?l/|3g>m??߫3<{oc51Wf> 3ޚmy2a{=crc[1ضW ޚqضSvagޚacyxk>ƙcyxk>ݕ>ƙcyxk>ޚqgޚq[f~|l|3>߽5Ƕ<{c5l}3>߫_ッywoǶ<{c[X1g~|lSۛ^=y8<ȩ$ _ޚ9,;8KvNa}a>X[v\K%{wX,q;8KvNa}a>XwXrQ<8 7RAP<ʉ:P }A' +l(x,q<:P<$<:P eA[vF΃^p[v,;z΃<,;z Nny XvKJGKv΃%$΃%eeݲ3Xvt`y>XvK!H2eAXvXvt` , Yvt`y4Xv[JKHAeReAo`yġdy+Xv,q(!YvKJsH2eReA `yġtdyġԈdyYAפyYAפy5)xdV , )xdVyY't0yYAפy5)x,q<8:<8 KJ'C Kvσ%!σ%Anσ<r,σyġ>΃I b+΃Wdy;)Xv ANaqޡװ}u/ua{}bޞ?ea7ۇa Kǰ=oga ۟ ۣ6l۟ɲ3l0ϼ,;< ;ɲ33/0ϼ,;<&0ϼ,;< ˲eg33O0<-;ϖ23O0~в3l_?ۗ0d柑*6ɲ3l_?gv0>h/߇aavK T3lsayZvqiayZvqi9L5ayZvqi-30ϼ30ϼ39330ϼZ3<0ϼ 3< < ˲3l_̫&sZ2< ̫!33~a < ˲3l_iƙegؾ6qi9LayZvqiƙeg~:ƙegga0Yvqiƙegg~˲3l? ̫53/ay ۏ<*y ̫u^aOWzEKυzEKOW:\(O@O@O/:bAO@υ-bAO`?%\(O`?%bAυ8HצKPڟiK ,qBiK t(P=@( V"=ުS}Q'[u{}d''[u{گt>) V@ǁ@) t(9@zQsoDP=~'Л{EyO7z1GyO7zAyOԔ\(yO7zOMyO`CYX yO`CYX yO`C)YX yρ::z1GyυgdyO_) t( t( b@ǁ@ǁ K@ǁ@(ﹰt, ,q(#{KJ@( ,q(5${OyO`C"Y{KJ!@SXPZIşRM'ġ,?yRذ'{0yy'{0yy'q'q'{0y.,%y'ġ49P~氟'A~R鰟'A~@9~@ǁ~@ tP~@na t t繰: t ~@ǁ~@ǁ~@ǁ~yK ~yK Q~ ,q'G yKP~@% ,q'A~yK Q~ ,q'G yK9~@ t tP~@ǁ~@ǁ~@, - @3, =Zȅ=\XYsa=\XY=\XYȅ=%8[X yO V8[=%{Ɩ\XglyO`C=^XQs ==^XQ8P8P8P8Ps===~CB{{NyO@yυ-zaEyO`=J=% % ,q'A{.'A{KJR'A KR'ġ>, t( Š@ǁ % Š@ǁ % ,!{2'+{'+{2''OyO7hzFyυzFyO7hzFyυzFyO@yO@yυe=%z=JX yO`=%ayρzD?O Ek?O yv"Q'G?ϰ8lI g>>9L~a{Cb^?Haׇa ۟G{ǰ=oa  ۣ6lr3l0ϼ<< k33/?0ϼ<<&?0ϼ<< 3lƙgg~agahƙg?Woa_SfF W H-oه3RfK kӰ:%yԛ]D?0<<8 W0<<80<<8 臩#33̰}g8lߠ̫s1< ̫33/?aj ̫33/?ϰ}e>3:0ϼ3/?0ϼ<_F?0< <@B9y(;yB9y(;B9y(q(AN [~[ ,q(AN9y(AN  N9KPN9K N  ,q(g;K ,q(AN9(q(q(W;.(q(;:.(T;::yE(q([U;.TW7A]@V ,qPWBuu_(A]9.TW7A]9y^(A]uuKսP]uuK ,qBuuK ,qPW7W;(q(;Xk'7yB5'k@jy'Л_jBi'Л_jAj.'q'q'Л_jj}'qBi}'Aڟi.'Aڟi}BiK !ڟiK ,q'Aڟ ,q'} ']}}'лq}co4z7NOOE4:4z7NOO >ލ>^8 y$'ЯAz+@?υ5~@o ~@o V~@P~@o Θ~R)'A~+'A~R.'A~ t t ~ K~@88%<<<֑<<^XsaXPG8<^XXPjH ~E'o8B<~C'ġ :<%d?O`Ci'8Xw{Ͳ氼',{{Ͳ',{{Ͳ'Y'Y',{., {Ͳ'ġ<,PR'A'A@.w-9@ǁ@.w- t(P@.Z- t( t(ﹰ>, t( @ǁ@ǁ@ǁ{<8HX yυ8HX yO \(yO`=;XX yυ`yO`=%8HX yO \(yO`=;XX yρ gyO@yO@yυ:: ⯤_y' yn?O rae?O V\Xk?O Vrk?O Vrae?O 8X ?O V8<%;\Xk?O`Cί?Oa {ðA:lۿ>lNg ?B[S' 3lx۳6lڰ9lp&?ϰ=f< 3lm&?0ϼ<< s<< 33/?_F?ϰfg~ayy-eg~a\gؾaۇ 8L]a3l_0?#hgؾeHa|.3l߯ۯM0>Sof~v<8 3l_!8 s28 3l_0ϼ2}>3va* ̫33b0ϼ<33N0ϼ<0ϼ0< ۷ e:KJN@- XPJPvmN҄(ot8:@%e'P`C)EYl v0R/0(k' $ v0(k' $ v0(A0(A0(k' ., v0(ġN, P =0(A R@0(A @na- : @ǁ @na- t( P @na- t( t( P, t( . @ǁ @ǁ @ǁ B0(šP, .  K$ i  % ,q0( K$ P @, ,q0(A  K$ i  % ,q0(6 K$ : @^, t( t( P @ǁ @ǁ @^, t N@3 @ 7t:[;([;( +;( +;Ko N[;v(ġ|N o ,q(߿(k':(q(k':::::.(q(q(t]('P@'P߳ tPN@g X 'P`@^; X 'P߳PN9K ,qB9K ,q(AN  ,q(M ;K4% ,q(M ;KJSN@ǁN@ tPN@ tPN(/tşN ?@~((O'P`tzF'ЅrzF'P`tzF'ЅrzF'P@'P@'Ѕr88 =ؿr]('P`@%r8 t ?,Q/Zڟ@o 4? ~}D2lä}ar?}ϋ&ϰ19lJۿ>lEy?!g|gmaqll柑:2\gؾBa|.Q3lߞj̰:%jqƙgؾ%̇qƙ0aqƙgؾ)?L5ay`ka|g^Weg^ݗayiSeg^ayi[ay5^y/ćyUv9L]ay5]ysW0\[ay5\y/2jq/ayjggڟayjqqƙg~9Lڟayjqƙ0yW'kg^ڟT>yW0UyUW k~Wkg^ڟasg^ayiWay5ySjg^ay5q [N9r O)1oO `υ*\22@ڟ@Fڟ4Xs W%*\8pXՁ?xڄ!M(wۄ%M(ۄگ6?WBbЅ H_yJ5X~R TCyR5ȭUCyR5'UCJ5'UC<{Y5×UCJ5ۗUCX5UCJ5UCUC%R ](P`ooTC%R 8H5kUC%R ](P V 8H5X ЅR 8H5X P /V ](P`TCC7=D{+4=z( z$z(J?Mz}ݣ@ڄ.M(;)ڄAP_iP6@u&pڄM(/ %6@hiP6@&-mB~ MRM(/ 8mB&XPzR 8&t mBmBmBI&taL&w^ڄڄڄM(qM(qMRM(qM(+ ڄ.,5*ۄKJ6RM(+ ڄKJ6@&XPjU 6ҭM(T 8mB~M(ġl ,q(E+ۄKJ6@-&mB֊mB\& &mB\& &mB mB mB\&tai&mB%bЅ 8JmB% 8^mB% rjЁ : rjP@Ѕ rjP@P@ЅbP@P 7 : : : 8rmB%bP`lBrmB% @`P`lB% @`P`lB&mB% 8&X P`lB<&tlB% ^fP`lB&{mBmBmB&8&8&{mBWR5mUCuX5wܵj(ЯT reЅV reP V reP YV rkP YV 8Z5mUC%V z'EP`CUCZ5XPkPwRT HP@PwRT :T :T :T :T ](P@P@P_©P@ǁ@S58P5tTC~ j(Aj(A@S5tTC%R 8H5X ЅR 8H5X P`TCJ5XPV 8H5ta)CX5XPV 82UCUCIQ58P5tTCIQ58P5tTC7$PT z+@ЁT z+@PT :T p @/ɨP@/ɨ @/ɨP@/ɨ t tPK K ,qj@Yj(ߢj(UC,Q5taAX5Wi c0la&а:q8l9L:ap$7} 35l &а8lӰ=NwtB( K'43/аqtB< K'43/a K'43/0ϼtBuBayꄆq~ayS/fؾ Ƈ燩3?)[a'0Ԇ9LʠaB|!>T 㳇ʠa*|~ICeа} ~/ 0>{ ƙ2hgʠaoؾƙ2hgʠxƙ2hgʠa0u]ytay\Seg^ay\y :Layu[y oyUk晗2hؾW0ZyUgWeg^UdoyUc晗2hؾ˨ ƙ2hؾtq :Lʠay*q ƙ2h~ƙ2hgʠa 0)q ƙ2hgʠTWjg^ūay)Sj~Wjg^ԷWjg^]arg^Eay)7ayuy _yU晗2h~j.('kjK)PiK~}ܱBiK) ǚi.(+okK) ,qBiK) ,q(k.(A@}) ,q(AiK):@ǁ@ǁ@^)P@ǁ@) t)P@ƭ) t) t) UȚ@ǁ@nɭ)P5դ*(A5 UܚդK)P5դK) ȚդKT,qPMBդKT,qPM:A U,qPM:A5@ӭ):@ǁ@ԭ)PV@@o') ʲ@o'):@^, @/)P@/) @ % @ǁ@/) t( @ǁ % ɆK$PɆK$ 튲 % ,ql(з+ʆK$ ,ql(AɆ.l(AɆv KP FGPBЅ lCPBP _'=5}B t@^  w^Z.(ɧ(TzD+Pyi ;/@ވ t@~(QZK+Ѕe+Pyi ,q(5([.(ġTl ,q@ZZZҊ(/::zD+P@+P@+Ѕ#e+P@+Pw5]XR8@%/e+Pw58Ҕ@~9(ġ4l )@%>e+P_Ni ,q(*[rJ+P`C)R XPT8:ՁA|2(+ +.,+2()2(+ +2()2(+ +2+2+2d2(+ +KJʠ ,q(+K ,q(+K Fʠ t Fʠ@ǁʠ Fʠ@ǁʠ@ǁʠ Kʠ@ǁʠ@.= t t t ,q(+KJʠ).,+K wfeP`A%R`eP`AJA%R8HX eP`Az4 Pʠ)Yݰ2(Aʠ Iʠ@ǁʠ@ǁʠ t t Iʠ@ǁʠ@OiŁJ@^IOi@~oR@!P퇥J܍[퇥J\ZtaJ\ZݸJ\ZeJ܍[eJ܍[5J%–*zMAR`CJ^SPXP-lR`CJR8PJJJJJJ8P8P5JJ8P5JJJ5J%-*](Rv*8H5JJX R`J%*](R`J%*8HtJ%aR`J҇J%aR`C)}X8PJJJJʛtI)oRW#&z5Boҁ&z5BoRW#&:&pM ޤ9&](oR7&zCoR7&](oR1&:&:&](oR`I%eSIʛX oR`I%&_qaXJJWJ,U fR R@JSgEqҰ}c0l$No aq<o$N? abza{n$N?Gf?8ia{Zy%N晗80y%N晗8ig^$N晗8ig^<'e' ?(q)Nƙ8iq>"q)N:Lաaoؾd0$NosPa0Ka0~NS4ag~ gIMa ۯ8igaya~{gayS)hgay[TWhؾ/yU:L=ayy晗80yU晗8iؾWgg^azg^Wgg^}ay}SgؾWgg^a/8igaf~ƙ80q)Nƙ8igafgayw$Nƙ8igaySmg^ayφy%N:Lݳarg^ųaySlg^ay5Άay͆y%N%y4晗8i~Wlg^ae0̆yU/Wlg@NI$oqR G&9'2']nY 3`qR S`qR~ d,N d,N oLI[XnY`e%']nY`eJ0ZyU)[}U)7[گ V@^lU ϿRV@_lU (x8̐dMJXfH2C5z(k ܲ@eM!ʚ.)3DYSg@,kP@,k ɲ@ǁ %k ɲ@ǁɚ.)A_,kPɚyU)A %k ݲɚK$kPɚK$k ɲ %k ,q)&˚K$k ,q)Aɚ)q)q)'˚.)q)'˚ʚ.)x˚ʚʚy)q)y˚.T:A@-k ,qPBկ)Aɚ.T:Aɚy)AկKTPկKT,qBկKT,qP:+|˚)q){|˚.To/Uo/))Qʚy)Qʚ)1˚)ʚ.)ʚ} ))Bɚ} )q)ʚʚ} )qBɚ} )Aɚ.)Aɚ}/BɚK$k ɚK$k ,q)A %k ,q)AR1}L^E}L}L }L}L^E~@SW1HR9T.T.͕ʥ \ _թ\ +K^P7W*JRT.](R\\ ^ʥ@RtaNY7W*KJʥ \ ,q(%*+K\:ʥ@ǁʥ@ǁʥ@/r\\ +&KKK^P8P8PtaiWY8P KK%feR`CiZY K%neR_1\ ,q(++IR`C)^YWL*KJʥ@bRXP*XV.8K%eR 7.1>dc >@nyc dctbc dc dc4cc dc >@nyc dc ,q(%Kc ,q(MKc ,q(uKc >c tc >@ǁ> c >@ǁ>@ǁ> Kg>@ǁ>@.=c tc tc tc ,q(KJg>Ό}L%3cS o1](S`|LX Ӆ1`S`|L%18X S o1](S`|LlgX Ӂ1$fS@S@Ӆ1:1:1$fS@S Ob$J鈓skY)ksk=<2cS zw9 >RZ*[yZ*[ZfZ*7Z*KOyZ*KOyZ*Kw ƅjfqZ*ġ|jfZ*qZ* H.Z*qZ*qZ*.Z*qZ*.Z*K ,qZ* HK ƅj ,qZ*Aj.Z*AjKPjRQZ*Aj KEjRQZ*ġTT t j@ǁj j  ,MZ*[Z*[Z*Z*CMTTKzIԅRKzITW@TKzIԅRKzDT@T@ԅRK8H-X TTK8H-X T`RWL"NzGԁNzGT {vw"w*N>ީmCmF>Ȱ;memciam]m[>lN}l{P~l{N~lKǶ|lǶg; -3"8lN}lǶ|3{>߽Sv߽SީA1?ix>gSv.Cީmka{Ocۉc|;1 ۋOﵧy<{ixضg~;}3>m/wcyx>u>t>ƙwjgީqgީmqgީmGawcyx>ƙwcyxUy<{c;5l}la~3>uv<{cYr1^g~N}l;Z~37>߽SneywǶ᰽H1^g~/}3/ԃzy.$rz/ԃ` _ީy݃̀S2N= dzQw` Tޡ{a=Xw,qؽS%{?%R=郔A%zIjZRy+A^e$zHIeFRy9oo<1V>s 4*y`zak~Kvփ%_A~y%_A~>ۃZ:k=cZ{A~y)_A~ݯ ek=8` Z8~Kvփ%Zvփ%_ݯ`z,qZ8~ݯ`zak=kZ_ݯ Ojk=X,qZ8~Kv@tzq_A^ Z:k=3Z:k~yw_A~M~B~`Ag,q;{A"za?X%{ݯ Oqk=Xw,q;8`Aa?X%{ Ok _A~y_+p >d=3,Yz K@X-O%A3` -YKփ>gKVnz XtV`z Xt`z Xt` -YKփ%%ݒ\`zad=X[,q-Y8얬}݃%ݒ`nzad=X[wKփ%%ݒ+d w*pN=Szz+Zxi@أQأ{Q=AE^{ԃ^S`zeG=Q=AvG=2أKJ^{ԃ%%{TnzġTdzaG =AأҊ=Az{ԃQzM{ԃQ:GQ:G=MQ)%{ԃ%,%{ԃ%/%{ԃD`C)M_`zġ4dzЯ^G=XPSG=W/أ,q(*٣Q8"Q8.Q8:Qr!{ԃ|ݓ=dez٣CA٣KEA٣de ,٣dez٣CA٣,q(U٣,qQ8Q8KJEKv{ԃ\5أ2Pأw{ԃ\{ԃQ"{ԃQ{ {ԃQ:G=8`Ci`Ci`ïG=XP/G=Q8Kv{ԃ`n Qr/{ԃ%=`nzaG=Q= G=Xۣ G=8{Tnzq=Ayؒ=Ayؒ=j?sT`; N=țSN=; o:N=bSwAt@z!y@ީwAK ,wAKzyTީ.wA^zRy@ީw_yީKw~zлx,q(w_y;ԃ^];ԃS:N=8@˚ A5=)p5=dMeMS5=Xfh5=Xfh5=}dM YӃ~B}5KvYӃ%]˚,qeM8dM)!YӃ%)!YӃ%)!YӃdMzYӃdMA k ,_Ε)p79=X~CAo\`rz7.09= LN:09=ÉA/ arzKwӃ^@*09=8R&t`r MN8&KvӃ^@ǿr79=XⰛ4$Aj kzK2Ț )8$kz/5gYӰ}c0l(&YӰ}"q)doKaa# 󟵄L}yUD:LBaxa<2 Bax~2׏B!Ӱ\rG0>_(dƙigBaJoؾƙigBT6ƙigBaZ0ՌyU2aySf^Ba2{g^ݢay Shg^ay +ay5y%doyU&:L]ay5y#W0+ay5yo2 q)dnway igBay q)d_kq)dƙi~9LBay q)dƙ0׆y]Wsmg^B2lS̫63a* ̫63ڰ:930ϼL0ϼJj<2 O< K4ln0ϼi< [ȩ)p?tr-d [_ B@B UK d,d d ,d ~ LL/iBKTK ,qP--AB XZZ`L@ښ=5r/m[S 15r7m[S /25_mM<cmM?ٚy)lM mM%58tlM%588mMmMmM88kmM83mMmM[mMmM%5]([S`lM%58iq [S Ok58tlM5X [S`lMX [S`lMe?aJwؾ=LFap('ga{nd?Gf?Qha{Zye晗Q0ye晗Qhg^Fd晗Qhg^<2 e4 ?(qiƙQhq>"qU%i ۷F柃lCz!&а}:liچCа}:lA Gmh~8Lͣa1ngayچqioWayچqi:L]ayچqioShg^a oؾW0ՋyU.Whg^+Whg^a{g^ayنayuShg^EayՈyU":Lazg^ayՇ꿌ayچwafgdƙmhgayچǚayچqiimhgayچqi:LayӆyM晗m?8 ϔPEtTD>PECUD UDJECUD UD>PE8PECUDUDJECUD%R8HEtTD%Z" ,q(A*}(A*K" ,qBK" ,q(.T@oDc ƅ> c F>@o\c tcP>@o\c tc tcP>Kc F_K81HS7h1z@S7h1zFS 1]Xd1>:?a09훵a_?ϼayؾc>LΥanca{6폺\?b?sia{"+ay9\晗sig^Υay9sig^Υay} ˹ѹ4l0ƙsigΥaa8LaWiؾ0Y˫4l_j?uZU3yK0>_Uo2{aj/* w)5l?p ӫ43O0Ы艧W)Їzz}W)qWBy}W)A^y.W)OJ%*8ȫX R*8ȫX R`J%*](R`J%*JB҅j8~ K~Ϧt)t)/.oR.łҥ@gS JbARN.](R_,(] ډҥ@XPta Y JKJwҥ %] ,q(!KK$]:ҥ@ǁҥ@ǁҥ@gStaYJJJMR@R@҅XdR@R߳)]ԋ,] ,q( #KKJҥ@SXPF.ҥR7t)o.8ΑK~t)ġ,] [K%}dR`C) YXP:H.%ҥҥ@ؑuM|)wZtaXȷ+{l){l))).,}l)ġRkPRL)AN)A@ެk:@ǁ@ެk tkP@ެk tk tkTTk tk k tk tk tk ,q(KJEź.,K8HUuMuM¾uM%58H/[X ]S`tM%58H/[ttM%5z4k ,q@}.)q)q)_ط)q)_ط)q8_寤)ߧ)п[yز)ߧ))-.,myز)ߧ))-Y/)|.,mKr a. K.@nr ,q(mc)+w.,_|)+wy)+wr)sK]S W5e]Swo5rn]Ӆ5zM]S`CuM/>[XPl]SW5H]S@]SW zAR`o: ](R} : zA҅ : : zG҅ : zGR#H҅ 8HX R 8bH^pP8P8P}HHJtaH^P#H^P HR H^PRá);F B c)FR@҅ zMAR@R@҅ 8HX RwJX R`H;JǍ(%476IƒOXɺn#8PH>SHn;Q;HH>PiO=l&Ұ5lozgް5lo&Ұy7l 1lǰ=Ga t\ c1l rI4lİ=>0׼H&a sK45/0׼HI45/0׼^\ 1 /ay )@0 aVSa{+pfS{JIr4l_Nh! n}3Jô4l_LƚhkayJݷa{{kayJ.0<%GX ;Їi ik^;Hv۰<5ô~45ay sKrtu45/Ѱ<5ayIayUay- skhk^KFih]ׂ0׼֋?F0<%G˰}25Oa cSr45O0<%G0<%GX g*Ir45O0<%GXEay sklk^ôa6l)sklk^eilk^e\+T65/Ѱ}85}ayIay sKr4l5ay- sklkޒ@V%G"oQ Kߒ}~Y2] @f@}~ Xr(XrK.Y`6KAX х ,qY`- ,qفm@ ف Hwۀ) HyۀI Hڟ@{l@ @|l@ @ǁ e@ ,q)A.)A9)q)qBd:4 :4 r j҅ th@ @ǁ e@ @ǁd]d8h'+A;Y%2 rgR` Hʀ H%2 8Ȁt H%2 8Ȁ HʀX R g|6 8ȀX R` H%2 HR@R@R G~6 ](R@R ~6 :4 ](R !6 :4 :4 rhR@R G"6 ](R`9)Aj8Kkq"p`KX R g6 8h8A%ZP%Z,q"p` Hj8A%Zi@ th@  h0 Hڀ0 HҀ Hn Ӏ) Hʀ) HЀ芧B=%)OR$4 :4 zJBR@҅2 zJBR` H%2 ](R` H%8ȀX R` HҀX R` H%2 8Ȁt H%2 8h%gӀta3)Ч}Φ)q)q²udR@R4 ]Xvl@ ,q(G6 8 $})d҅edR4 ]Xl@ dRO4 8$Kv H%eA@i@6 2G8z@#e=R`֊H>0׼HQ0׼VɆGO0׼ȆB65o=R z}`·)o=R> w,.Y 3`=R S`=R> dG dG wLHj,Ad% ,qBm8h,0,qف#rc=R@=R {#rd=R #r d=R'9) 9) 9)qBKG ,q)Az G ,q)AUH[X8P8Pt6Y#]ͪ@ǁz@G tGPz@G tG ,qfՅڬ ,qfU`6KYX =R y#8hB9߰)Az.)Az9޳BKG zKG ,q)AzG tG tG z G tG z@ǁz G z@ǁz@ǁz@G tG Hz G ,q)sK{v}9)8h7A%#r6h=R`v}KX] X]8Htv}KX]@NG:z@ǁz@PGP[n SHn St HZ0HPtHP) Hx.)S]#zJB=R@=R$#:#](=R$#8HX =҅#8HX_H%#8H!H%#8HX =R`HJX =R`V=%B8h%PXP,V ybdR`CkX)ġ6Y;Jb@@֥@B.DgR ![.,0.DgR .aR .aR .aR .a҅eA֥@68l] ,q([2.](R`Cٔu)A֥.cR`Kٺt KKٺ8кtKٺ8к8кtaٙu)qu)>[ZZZKΌK%eg֥Y.,;3.8[u²3cR l] ,qu)A֥@. غX R`K%.8Ⱥ%[.u)A֥@. غX ҁ.rI֥@ǁ֥@ǁ֥ e] th] th] 䒀KK\u)qu)=e]:֥@l] ߓ֥@l] o[{Z$Z.,A.rfR oۺӺaK\u)[.,A.8ȺaK [9=u)J+;IR gW+YoR +(V db@ή,V tb@6-V\X)Эh٬Xrf@wai t% @}ΦB] 4h tۉf e f@h Pf e tۉf}Φ(ġɷ(lv(qr@)''g{'g{}''@( % t@y) t(P@y) t( t(PK$ t WK$ t( t@") t@0( ) % twgp;La{b?؆ϵab޺8La3mH'`a{gI3l k^'ٰAv?Wq7l sKs?\ sK35/a sK350׼?akayb0 {OӰzVjgބ=2a 07)g?밽:l3a{0 cS3lo cS35Oϰ6l< cS35OaZƚgka{0 skQfR7$svd!35ay ~r0׼Vc%w35/ϰ<5ô35may sk0- ;\ڂ̰y(ƚg>GQ)9Lay ~)ƚg><ƚgka06 cS35O0<?iik^M\g暗0-3 dž\daZc0׼Vay/ sK3l skuik^ank^[K\ 'uicik^J\Vƚ'UoO>d[ҷ'p3@? ?jY)')'p22 3@ Xe-+8HsKX O>,qвR@O ?:+nO G@!nO @k 9 'P9'!.'AKdPK8ș???j'q'q'#R.ԆO@O z:\(O {:8hBm8h'A>% ,q'>Kkq'Kd ,qBKd ,q'?.'A@m ,q'AKd9@ǁ@ǁ@mP@ǁ@m thP@NIl th th |@ǁ@KlP94'A j!5KX_RKX O 8h!5A %ZHP %ZH ,qBj`?j[5A9X@9]B rhO;tHO vKO!t\(O;tzBO @->A>8s>X O`>X O`> s>%rz.HO`>%r8X υr8X@:P݁%mv?%ej@KbBT('JiGPO: OPO: CP` ](PO: FPO: ]X6, IReЅ 8U(KK$:R@ǁR@ǁR@)lEY*BBB>S*8P*8P*taّT(qT(J.,R 8e)KKʾB'P*XP, R9ePP 8)K}T(ġPY*B%eRKeP`CYT(JBB'ZGéuDm#XGéuD|XGéuDXGGuDXGGuD|XG^uDu٫(ġXGttD%e:KruD%uiс:uiQ@хuiQ@Q@хe:@ǁ:@.# t# t# t# ,q(>8 K#lXGXP6|# d': ˆuDZ(A:`Q`tD%8HGX Q #P:`Q`tDRGȭ.(q(q([ :r gQ@Q`{JGt uD7ZG'uDYG:r gхeY:@# ut{9(r`хeY:9²dQ 8e%.@JݝT(+K٬T(zK]C YoP gW FP ]Xp[*2B@j OOt\(Ox:|/O{5t\(OCtWCO;t\(O{5tOOt8;vpJO@O@O{5t\Xkc-M<6:H4XKO<4XKO4:H4ci.M<>sL<M<M<M<M<M<X O`L<~X O`L<R:e;hNONNOW\(NOnBmϯalcc[c۳cۣcc(v>=>=>%c[>Ƕ'ְ]y?ǶǶgհ]?UǶǶ\lg.k~|5v>e;v\lc1.۹ m!ka{BǶ^Ue;ۚޞ/ e;^{ǶǶǶ lc[3rؾV1ƚlcyv>ƚlgؾ111.|l}5|5o YG{s+:s :ۦzs9sﲝmcŜwǶ\N\lc0nؾ111ּd;%yp;<—lAd;nc1܃,vd$ Wqd$y)lm ΃d;ncvUKUKUKvNྊ`þ`.ypJ?XⰯ ٻbA FXⰯx>Xkz,q5=]`îy#izBMσH>ѡ4=g M@hzP3AP 5=g Mσ@]G(<躅APy#hzhzwMσ@`îyaKvMσ%AO 5=8욞=Q]`îyaz^=A7A zЍ9x ޠ%oЃ>z^=X{,q([]=X{oЃAoP`Y7A zq7Aޠ݉7Aޠx˺A:=NAeKޠKޗA8/ytޠKADoЃ%e Lޠ}7 &oЃ>`CY7A zġ`CY 7&oЃnptA2=ǁÑ -P`Y\qA8 2=XUC-Ѓ_,18X֘dz)CJơdz)dIơKNC$Ѓ%eIơ8`CYnq8`Cpq8 2 qAơمqAơ8 2=88P`YsqAơمqAơt`zġ98`CYsq8X֜dzġ98 285'PBơKvPnzC ,q؍C8ơKvЃ%qA.`8,q؍CrCơKv@C:0=88t`zqqA.`8Ѓ8Xdz=w@SơqAdz#Ntaz<ޕCr'ЃY/Ѓ=Cr'ЃCrb!P`ٻq8 x2+BơKޕC?Xȧ <ș|:?΃lb˧3}AU`yЇjJ AayЭ.jt?}Aw~a 5lC̓P30PX1J_`?355:e<35e}̓>#A_H;@_kt}vA]_<8@_}Mயya mςpn},Z:ja{wc?gaQ09jϦai^Bt5Ұ%OC9Laykƚf>ƚfka0 c_35O0<5ick^\暗0-~ ۧ\a? skck^0׼v=冹15/Ͱ}$75 aykӸô15ݎaymv c_Ȫ&p< d_9\ @  __kk9\ `ͅZ,qbG`;K䯹P%Z,q&pK888_s5˺&pv:enM KnM Kk%k9a&pq52_95N5_X M`5%\(M`5%ڣt 4@ǁ@ǁ G8_8_5j"q&Akk.&cAkkKGq(KGX=Q8_)5%ڣckK ,qBkK ,q&SCk.&A@ ,q&AkK9@ǁ@ǁ@NP@ǁ@ t诹P@W t t d@ǁ@NZ쯹Pk9n&A kK䯹PkK T8h2A+_X M`5%\(M`5%r$ć:r.cͅK ́r h M;zB ͅK M!hP@Bh tBA-4BA-4-4BA-4%8Bs,4%8B -4BX M炴8BX M`,4%\( M`K-4j{;AہXBXi Ă@GہϿ߲sv;r\X tΜ@f̹,ٙa Ѷߖ@Ӷ"m;mi Aʶ6"m;3v7m'Ev}Pm'mDv}Pm'ġ,ٶXPvl ,qm@vvv7m²fNO::oBN@N@΅e9Ͷ@ǁ@Mh۹ٶXPl ,q(jCN`CVm''WvKʚm;>ҶXPl ɕfNO8 6vKm;%eͶ@7rh2cN p SVd#H|XC dn@֐@~ ²f7P v* bv]XV #n@>R, G@l XP d3n4f7Ѕr81K ,q(cv8 f@ 8 ޸@@ ޸@@@2(q(q(q(ġ XPV ,q²Bf7P`CY!(".,+dvrnK nK ,q(An@. t@%rrn(Yt:t:t](7P@7P@7P Gv:tri7Ѕen@9ʵ(O9n(\{9n²f7P Ǎvrkn@Ǟn@ t |n N@%rrh7Ѕen@W ,q(;mv](7ЁXK-N<;Z tZ@6έ Z tZ@k tӓZ Z@7= w<8%8HXP[8Psa;iM4@ͅ77>|17>|77X4@LMiP@ǁ@|8|s72&q&qBoKd ,q&EoKd ,q@o}ҡ&'o&'o}ҡ&'o.&'o_0iVgaj|Td?폦a{% y^a2 J4l &Ͱ|;l?۟E7~z05/0׼7|350׼\21oayopf~n~t_hƚfka{Clkayoaayoi9LX4 c|3lH} T0lo~ [\Z(8L\&.0׼7i`k^k\2 {\ 暗fa0׼5paym sk_`>c4 c|3lx X45ayoiƚf>ƚfka0-9 c|35O0<7iak^ \l暗05 |\iFaZh)5ma8b>&0׼70׼e䆹浿05/Ͱ}wv浹05ayoY6@7,}o9\> d||soo9\ `M `M> dlPk %Z[,qB`7jm!Ak %2c-88Ps8l[Ajq'S'kqك''kq?ᓵ8?Y PZ@ Z@GZ ,q'AZiq.'AZ:rTb-N@-N@-΅Z!t t TZ B8Pq88Jș88%Z!P+%Z!,q A`VK Z\(-N g*8HX -΅8HX -N G\(-N`8+ZX -N`8%8Hs 8883Zs885Z8Ps8X8P8Pȱ88Xs8%rc-N`8Jȁ8%8Hs8%8Hy8%8HX -΅8HX -N`8JX -N`8Xs 88Xs8n Ss 8Z08Ps8n S) 8>jq.'Sjq]}zJB-N$zJB-N@-΅zJB-N`8%\(-N`8%zhH-΅8H!8%8HX -N`8JXvzbA-΅Z,qjw'8h@jq='q'q'KGU6 u} P'M2 u3P'] u$P'Ǡ߲fNxvMNϟm'OvӶ$m;=7vݎm'Ov}m'Iv}m'ġ,ٶXPl ,qm@vvvݎm²gNĴ::CN@N@΅e!ζ@ǁ@ch۹ٶXP6l ,q(q?DN`Cِm'bvK~Zʞm;Ѷsaٕm'bvKœm;>ӶXPl ,q(s89vmڶ8ضm;m;5:v@@@@, d, d@ jAYXP,lYXP, djgqP`Cٶ8(ArgqP`A%eKA%]jqЁ:rz`qP@qЅrz`qP@qP@qЅe@ǁ@N, t( t( t( ,q(x8 <K$lYXP6, 0 A8(A9L8(AK$ ,q8(?](qP`A\8(A) AAAJ8P8PQAAZXJs ?<[?Z?/?Zsaݳ'OaOKO rY@|YX O \Xv gρBOO?P@7U dݞ@"P@ )@P@" t@6 ==%8 \XdڶS$m;lmr%Ӷ@ǁ@6ӶS$m;lm'Пʹ:cѶS$m;mBvv!JN@΅C@ǁ@ǁ e ,qm'A@h ,qm'qP'g u}vP@ u}vP'g u}vPB u}vP'p?;ŋBa4_(ƚPgkBa{jkBay uMa5ay u)9LX cS3lz670lR {\8L+\Z׺0׼:iW`kԼ&05%ay uayay- sk5`k^i/`>R0׼V?F0<:İ}215OaƚPgkBay uay u)"ay u)ƚP0םay暗P0]O↹uk^7EaR?l8 o\?5/ΰ}65\ ۧn\?5/ΰ}vsF0׼c[ȪP'p-- d[Q[ B@B u?P')P'p22 @B u?AKt?AB u?AK$ 'ρ%XS<:kP*@6 I*@v *@v *OrleO WVF.'+x9'A* ,q'A*x.'A*]t *@ǁ*@ǁ* u?q'q'\x.u@ǁ*@ tP*@N t ,qu u?AKt?AK *]P*@Nc ,q'A* ,q'A*@#P*x9'A*xK ,q@xxx9Bxx9'qBx9'q'q'Kxx9BxK *x.'xK ,qBxK ,q'LxK ,q'A* ,q'A*x.'A*x9鱊@xx9B-gLN 燶gLNG(\(N{ƴzBN e @m;ж m;ж8жsl;жX N`l;ʶX N`l;(Ҷsl;%zHN`l;%8ȶX ΅8h;A݁JжsVK L݁%Z>@%h th th Ҥm'qjwO;}'Ѝ9ݧ'Нݘ'G/B~OO4'Z~' ?;~'Z~}'MO~}']~.'ġ,X ρ4:4:4Cυe'φ@i th th t@ǁ@ǁ ˆ ? ?saӳ'ġXPl tOgO48=~}'ġ,C8 ?%eφ@i ,q(k|68M>~K2 ?Im dl dOʆ@ ^ ? ? ?88C ?%?~~ٓ'ġȞ ?%eφ ʟ ?%eφ@l ,q(6'eO`C'-Z~K ?XPm ,q'ġlX O [6HO@O [6:4\(O '6:4:4\XVm th †@ǁ@ǁ@ǁhO`CY 'A J ?%e%І@lȭ"~Kd ,q'<~Kd ,q'A~Udυ28ȭ"~Kd9@n88s ? ? ?8i ? ?ٲ ?- ?- ?@~9'K6:4riO Zȁ ?@~Kd ӆ J ?XPVm9@s<=nƹ==> s<=n ==s<=> N!=='Эzz)'qm'Пi ',m;>ѶOXv K΁@Ҷ袦m' @h G!m;ʶ8жBvv.m'::\(N`l;%DN`l;m;m'HNi wHc4 cӨ3l" gX4X4 cӨ35Oΰ}r05O0<:al?5O0<:X4[\c?5\20׼sfaX?5k\?* 'e\Q?5/ΰ}65\2 'k\G?5/ΰ}vs0׼cۨȪQ'pm dۨqZ F@F u}>Q')Q'p262 6@F u}>AKt}>AF u}>AKd ܧˁ%>X ΁4-:ʨȦ:,qQ'Fuu.Q'FuuKt}B],q],qu9|Q'A/Q'uK~Zd HFuKd 0F e ,qQ'IuKd ,qQ'AFuQ'qQ'qQ'Ju.Q'qQ'SJuu.Q'cuuu9Q'qQ'u.Q'AF@Nql ,qQBu9ʱQ'AFu.Q'AFu9ɴQ'AFuKdԹPFuKd ,qQBuKd ,qQ'uQ'qQ'u.4@Js9"4'maJs=%4BIs4'SJs]C\(iN$( Ҝ@OI( Ҝ@OI(͹PҜ@OI( ,q4'AҜ % ,q4'AҜ@ )͹PҜIs=44'AҜIsK$ ,q4BIsKX @(͹Pځ%܁ҜmhHiN::ciR8P8Ps6߲gO;ܿeg. ֞]hυr8BxK ,q({v8ί]<8ί]<]<ί]<]<]<eBxx9'q'q'q'ġ,XP ,q²LhO`CY&'sAx.&4 lK I`XjHRC&K<\(M`! lcM`! l&K<rlM\(M@M@M G:\X Ď@\ Վ@ Վ@瓎@,Qȹ5?OrȎ \Ў@^A&;j.,Kvv8Qȹ5%;;j9&ġ,YC>jh.&]jh3& jh.&=cjh} &M2jh.& jh$&П+@ Md/8%Gkh!J M@RC=54&H M?D (54.jjh~&2jhjhjhG M@ ͅ@ǁ@ǁ ,q&A@ ,q@f5NLi9@4qf5N̅2kkMf?-_3a ۟Ű"9laC0d폈a{-Kq|?Mf^* a{r~5/ Ͱ:5"0׼s0׼a0׼s}hƚf>Ci9Lw·iƚfka`kayZh#t|kayZhi9Lˇu|k^wˇe9Lˇ#ay*םt|k^ʇ0Tύu|k^atmk^ȇeֆu|k^aL0ayƚ&Uo M>M dBҷ&pӴ@-4 -4x 3` M S` M>M dl dl ܧiL-4x`.8x`,4x`.8BKd ,q '_VBEM 'Vs@'@ ̊@ ̊OrfEM aVF.&1+jK ,qB)jK ,q&A ,q&A*j9&q&qBݞt t Ҋ u{>q&SH+j*j.&H+j*jKt{Bݞ,qݞ,q)j9&A Ɗ)j.&A)j9B)jK Ҋ)jK ,q&A t t Ҋ t ӊ@ǁ LNJ@ǁ@ǁ@N3 t tNJ ,q&#+jKP@y ,q&A ,q&A@9 ,q&A)j.&A)jKP)jK Ȋ t ,Ȋ 9@ tϘ@PP@ @5JQ 5!*j=B&#*j=B&#*j.&#*jK ,qB)jK ,q&E*j.&A@O ,q&A)jKPہ%,qvT\moz@EM`&s*j*j*j4 t tPہÏ߲?hM{?ڟqr@7) tor@w?( t3r@( t3,|7>g|@i OMxo.,k6DM4MM4MMN4\(M`CY&Ai th th tۉ 7n|8|8|777}Boov²UhM`CY,&ġ|7%e@ h ,q([6e@M`CY5&-oKʾ7n|XPm ,q({68CoA|8|77-Dooo.&q&q&-o.,+626fM`CL&4oKz7EoKʎ7|XPm dhM ;h68Eo.&ġ-|X M`C]&A@6m9@ǁ@6m thP@6m th th,0|8|7777%ehM`7FoK ,*rd˿\2_E.@n%s\2_E.@n%!]2%md9K&)d.K&C`dd9QK&qK²f]́ri]L rf]L :rf]̅eͺ@Ӭ Ju1 u1Yȡu1XsaYx.&A@Ӭ,Yu1RojjT}ĤF% HjTMMJ;.Ԩ\(JQ tDži-{$П<h- '%>ZOZKCkɁ֒@Z$WOZK}$q$q$П:\(kI??h- th- th-P֒YKKd- 8%#{$C #{$Пvt\(H? ? a{8}a{E r^&Ȱ=Z=&Ȱ U8l?_ Oݰpq. ۏ\r ۏ\*|#\r s!<5/KȰ<5O0ѯnKKoDȰ̰ذ5yn#b79l?&Ȱذ8FGDȰ䰽1lox c=25O0<#0<#Xt [2+aXt c=r c=25OȰ"a%<5;[/;`\|sr0׼s=rsV0׼#0׼.s=2l>s.a <5\<5K0׼.sﰽƚ{dx݇9L~ƚ{dkaayG:L}ƚ{dktwk^{uwk^tw>g׽ay=Lzuwk^z7uwk^alk^ydžuwk^ad0ayƚ{$UoH> d=ҷ{$p3@# #n 3`H S`H> d d gbL#n`8n`#n`8=K ,q{$p<=$33[K#Z%G$s[K.$3[K9L$#4[K?)%Z_P֒@l- ,q$A֒ e- ,q$A֒YK.$A֒YKZK9$q$qB]th- th- ֒ u;q$K[KZK.$CL[KZKKtB],q%],q%YK9Ӵ$A֒ e- ֒YKK~<鱵$A֒YK9㴵BYKKd- ֒YKKd- ,q$A֒i- th- th- ֒ e- th- ֒@ǁ֒ e- 4֒@ǁ֒@ǁ֒@Am- th- \֒ e- ,q$![KKd-P֒@Nl- ,q$A֒ e- ,q$A֒@Hm- ,q$A֒YK.$A֒YKKd-P֒YKKd- ֒i- th- ֒)& pb@7)& |b %& tCb@W(& tPLr$PL2$I=_$I=_BI=_$AbI.$AbI=nBIK$& bIK$& ,q$Ab X-hz@1ɅZ,qЂvG 8hA@I=Z$q$q$Kb@ǁb@ǁb 8PL8hA߲yhkI{e֒ %Z%%D[K$ZK -VAIT_J@©4 &W²}hI{RTNITEKIT'EɅR8mD+MK49J@ǁJ@ǁJ@4,&Zif&&&IQi8Pi8PiraYS$q$=)*M.,ˊV8}E+MKʢ&QiXP4 tJhI T8 F+Ml$ġ1Zif&%eJhI`Ch$н7*M&&Yi8XiraYn$q$qB)M+M+Ml²hI `I {oV8G+M{$ġl=Zira{$ġ>Ziޛ&%eJ@ެ4 ,q(KVfI`CلB)MK6&%R8H+MK4 d+J4 t4 d+J@ǁJ 4 d+J@ǁJ@ǁJ ^&&Xi8Pi8Pi8PiXP"4 ,q({V8Hiraً$$=% ,OIBbIȅKKB9$@JBbIH :\(IH :rgIH@H [r.hH reH :ereȅe@N 䒀= N= @1= lraY!$A@NȖ= qadoD߂Fވ@/~   ~ _z#7"FuEoąZ(7"q7"Я+z#z#z#.7"Aވy#FC_?rnC_?NC_?\(C_?n/SY0<|lKǶoɏmvǶd~l ǶVtjc[(톇mVoc[?qnxVj*cۗǶsǶǶaʏmێPs+?߯UoU~5>s`xk ێ~;c}ȁmw>[!>]5vc+t~)c|l{{~l{y~l{wۮ|l{q~M!>}53X0C|53X0C|l_1<c jǶ5c 1<+c 1< /[~5_vycUk[sw,?oX~5!+?/W~5!>ݝk~W1nvmcJF\}ʏ۔sw)W)?ݕk~G1~c5`kfmw>ݐkfaɏakfav/cy!>ƚcەa}ɏakfa_k~&1~Ic 1l! \ȏˑs7#?E~l1~)c m 1~c m1~c m5l1~ c X2C<Ȫm _fY2CCVFڈKʪ8ڈF<8@mănXAXxm h#txq6A7xq6Aڈ(măamD`d6(mă%eQڈݵ6(mănS@`Cl6A)xš7JvF Ƅ1a ucƒ~]`LxKƄcƒ~]XȘcƒz1AƄ1AƄwcƒ%1AރZA=xЯexkރZ p<2`7l;Lna{Wְu5lar 3la{I ?Ǟ6Ӱxϰ 2l;L7wΰ1l? sa75/0׼\r1 ay \~8l_2lm55A'°}~./Z cM}>lU2l\r" / ߬z$0SD Xt" cӉ0lA25O'0<#Xt" cӉ0l72l625O'0<>0<Xt" /\G8l saB85 \>85/'a;85\r" / sםay}t]pk^uUpk^=aEay]a?F'0<0<f0<Xt" cӉ0l15O'0<20<Xt" cӉpns0׼nsˉp w s0׼n\75Ka0׼n sˉ0ls s0׼0׼ sˉ0ln;~\75}Xv"DZ,|;Yv"s}!B] dDd DZ \+iB] ,q9KDPN9KDǼ%r"8ȉy! ;#ЉpY؉X~99B9I?DN D$vv"rng'B~B99!AN9.!AN9KDPN9KDtDN@ǁN@ǁN uq7q!q!K;.@ǁN@/DtDPN@1DtD,q uq7AwKtq7AwKDLN9.!C;KD,q_7D,q!;KD,q!N;KD,q!AN9!q!q!sO;(IG&ңq_%2W|: ̌./ԝDdw":B݉nDtx'B;Dd_w"\;Kt'B C!Aw"\;!%!Aw"\;Kt'B`Ddw"8N݉X;.ԝ%!Aw"8N u'B`D,qНl Ny'B;EN@WD1Ddw"̘w";] B݉ Dt%N@Px'B[(!-މ DPw";Kt'B`DPw"8N݉"DPw"8N@wy'B`D,qН%ڣ,qНj:A{ԁ%ڣtw"\=Q{;KG} D;މ8N?,Mމ8N@ǁw"\=@ǁw":!qW{„@„@„@P)0!Ч?eW&܄&&"/L1"/LlM„@„@| ~WxaB„@+0! /L-„E_0² &;/LC&;/L,q(0@^8„@ǁ&4&\XV}aB0xaB ^,^8„@ǁ&\X}aB } .,k0!ġl:„ } Kƣ/L/L,q(k0!g0!ġ>„@a„ } K/L,q(0!ġB„@„_ _#=_8„ Z/Ld|aB .ԅ /Ld|aB 0|a…eG&20!Gz0!ġJ„@„/ .,0!ġ,M„@„9 y K/L䑞/L,q(;0B]XP(}aB`.L,q(˔0!A&& ^n_8„ uaB O}aB ^pa٨ /Ld&:0!q /L,q(0!ġlT uyA`)j]^ȥ_^ȶ//8r× ]ԼB]^OK5//d{ʗ ف@~@:|?A ;P  5'O^' 't~@:|?A  ²dPaF?ЏqJ݄R@?)wJ?ЏqJ?ЏqJ?M(Ҥ?M(3R 0VC@::_(`,?JKYF?(gJ:JF￞RaWa7$gha{e k^W&=<԰Ѱai^M Oa46l0l0l>|aOp0׼\2JayJ4]7ىa$0/7Pݰ}hkGu}bb|醹t}Xb15475ay s2475ay]0lz@0OyM sk0 sknk^cp\;L3p!ay skm>x0?@+}K!TB)?'nPFnPFJ%R8H%R8H JX T8HX `KP[ԁ%ڢ,qu{T_-mQ@`?=***4ttP[ԁjYTOYe>U); & Lj ?y@yeҗ܄;/]e~Wxeoїw.2@|eE_ ?y@R ?y@`Cde2@ǁ: ЧY ²}^82@f2@ǁ: ²^,^paYse%eї8eG_ȕ_XP6}@O?x@`CY{e>e%eї8H_^XP }@`CYe%eҗ0B_82@2@\X"}@ _p.d|@ y.,; qe< e%eUҗ0З8}I_pa٘e%eiҗ0З8I_@_XP'}@ }@`C١e2GKt@`CYe% g @^82@2@ǁ\y6^82 F/tx@ [E qe/tx@`C٨eJ.Fa~ ;L׿Pz@vtPuU<ɷ>]}T$ߪ U<ɷ>cV?~@\al>W~@?~~+qE?|#L?t!Ey +vhy g"0d 0L{H3pxaa{e k^WϾ4i3l kj^R)aari^M/ܰa?_5Vsj5S350׼濌atka aAχa{x>815H/vf3X? Ć aa|?ȇ}>lo skdhk^_F0lc}>5O5Oa ƚ|ka{0  sk$h|5t5 45Iay0׼>x535Sj^?\9L?\0׼f~ϰ;5yayM ۹ayχa{'wk435O0loayχ>ƚ0 skgk^<\rIa{vk^c<\9L3<\ΰ45ayχay s}>lo skdgk^a{0 skXgk^:Xv>{=,|Yv<BM2v2v@@F4}~&uK>,q,q!L<< l}%vOhy>q}X y`}X y`%r_(y`%r:twcy@y@ t>t>d 568}灎}&灎%r_(y`%r8}X y {v8}~l}X y`}X y {v_(y`lt}X y`%r8}~ 灎灎{}~灎l~}8}~}8}8}>灎 }~%r9dy`}%r8}~%r8}%r8}X r8}X y`}X y`l }~ 灎"}~ ><@}Ky!tB<'n}Fn}F}%r8}~%r8} }X yt8}X y`6K>P%,qІp{t_ m@y`6<=4>t>t>P灎灎}8PCGWOڴ=G_WOYݴ=gW$>c²i1zU~~>w} ޢ~>w@?>w}Er;>͢<ЧYt:t:t4 J>͢t>i5GKʦ%e@}XP6>%e@~}XPv>%e@~}XP >,q(v8]H}H/d>d>P@@@~}~aّt>t>t>,q(Vrk@6BRgw d@XClk/,+~@N([}࿥~IMow~IMw~IMw ~@~@w~IMw&߁߁~0R}ai[8P} ܁~} ܁lȖ܁l~=ۇɳ=l d^ipcư4CŰ*qaceu0j ۿl ۿk ۟(oZ/Z\׈0׼\l2zayzK{=ticѥ=l=[F0]>aIb7ieL.a{q;l&85ay]Xticӥ=lo.cӥ=5O85O0<]ⰽ85O0<]ڇidk.ay}4d25a{dBxa.l0׼&K>Lc%\*暗K{3<0׼\v0׼FI$0׼H5E254B2lsk~dk^#/K{k.a{/dƚK0 cӥ=5O0<]~0<]Xti[iNdk.ay>L"\p0׼\ڇi2dX0׼BL0׼&B52l skdk^.a{CekԼ@K{K0׼\6a0׼F?dۥ7PYvi({%ov `dd  ]ځ]ځ{?1iKBM~8ȥX v`\ʥX v`\ځ{{=A.K;M@ti_(v vi3~&Хf]ځlڥȆ]ځlڥ%;vihv>cp\ځڥX v`\ʥX v`\ځ%ri_(v`\ځ%ri:tiwcv@v@E ttd. 58Х]ځ]ʥ&]ځ]ځ%ri_(v`\ځ%ri8ȥX v {vi8ȥ}\ځlإX v`\ʥX v`\ځqڥ}\ځ%riiv`\ځ%ri8ȥX ti:ti:tiiri:tiiv@ridv@v@v vi:ti/dri8ȥ]ځ%ri_(v ;Dvi8ȥX ri8ȥX v vi8ȥX v`\ʥX v`\ځ%ri_(v`\ځ%ridti:tideA]ep@:[F]eA]v2.;н.;н/.;eA]v`tJX ]v`tفnQ}tف%eG]v`tف%ڬ ,qfm`tj6A%ڬ 1?uj6A>.;ARc~4tt˾PuفuفuJ8Ј_F ~؁>8)+6bhF@҈߂4bF ˲؁>@ӈ4b;/<Cӈ4bG8؁~ш4bLF ep}&E#vϤhththD}&E#v@#v@#vϤhththľ)ڈ8Ј3)/,ˊ6b8}EKʢ؁;ġ-ڈK؁>;ġl0ڈK؁>;ġ2ڈXPm,q(6bF_h#v `#v ldlľ,7ڈ8؈8؈}؁؁؁<²h#v `#v l,q( 6bFh#eFh#v l,q(6bFi#v l,qiq(6b8mHKd,q(+6b8Ȉo;q;6b:4b_(#v mththľEڈ8Јn؁؁؁؁%e/J@[5 +y keJ@Z|࿥|B?jtJ@vڬtJ@?jtdJ KJ@?jtJ@?jtdJ@?jtJ@ǁJ@ǁJ@Y|ai<[8E[9m[>9Զ6_(ks Ƕ6kka6?T훪a{ cؾwgȰ61>WpWŰ}`>R0lr ڇi`}jujmjej]jk^\#Cqk^_Fk85Okau}2'm̢y듶a{_mV7>i{ӼŰ0G _\dİh{ƚy~?lz c|"i{ƚyka{kkayڞm415O0 skak^40lok skak^iak^\u7\505/25ayٞmay7 s9': ΁>)²isF(t>B@:G8΁~)t OeB@:G8΁NsC:G8΁>B #B@IQ3)   }&EeB@IQ8P8P3)   /,:::LB >΁%e%Bhs g-t,q(:BhsO?(t,q(K:Bhs)t,q(:_X6-t,q(K:0B_hs `s -td,t/Z8X8X|΁΁΁<²hs `s -t,q(;:0BheBhs -t,q(+:0Bhs -t,q(ˎ:_(s`CYx9ABhs`΁<@  y6ns@:lB@ǁB@ǁB ꣅ΁΁lY8P8P8PXPVllցl-Y6ÿ?Ҳ>/ems`mρ~mρ~mρ|aiYmρ~mρN6mρmρmρmρmρ\>Ҳ9q9[(=Hs {:=@b|lρ-b= Ƕ式mmmE> ۍ۞moǶmvǶc;mcLǶǶ}c<Ƕqm[zZF ssswe0:l{}5ϭac]؞?glc1.l am珱vImmm)vwǶ&ǶOmsmًwe=5X=l}5X=lk}5X=l}lk}5X=g,>ƚcy؞?5Іsۺzgssw}c}cY1>H1n{'kRc[c}|c}xb>;91>81>6/vlc %1 >[j~~k~}k~=l >ߧ>g>ƚAVlnY=?җ[y/ lρÃ̀l2=?dd{~Q s>`n{~a=?X۞w%ya=?8 '=?m?'lρAOd{~p0xбAPd{~CٞdE=?/لAe{~p0m(%a=?X۞dCQKv%llρA؞dwRlQsn{~}Jٞt`{~a=Kv%`n{~mKٞ,qmρAqd{~a=?X۞w% ۘ=Kve`n{~a=?X۞,qma{~qAڔ9p=?8 =?8۞dG؞t`{~Nٞt`{~ٞw%Ad{~a=`n{m8Kv`n{~a=?X۞8`n{~a=?X۞dH灰=?8 E=Ad{~G=?l]9p=?l]Aw,`{~ ؞tݱ9p=?|؞tKvsn{~a=?X۞t};x=?XⰯ>XⰯ>XⰯ>X۞|؞,qWg,qWgtc K}%@؞I>l:=?8&l:=?8>8lρA TT*ClJOڦTnJ(M1"T"T,q_[؞k s`Y0A=?=?=?G lG8lL Ϥ`{~gR=?8>9Ϥ`{~qA}&l:=MDٞt`{~gR=}Dٞ,q(+=?XPe{~=?XPVe{~Ч=?XPe{~Ч=?XPe{~Ч=?XP6e{~Ч=?XPe{~ġl,`CYZA|l2=?@ٞdd{,=?8 sn{~qAAy!s`YfAAy(%eQy(%eQ(%eQy(%eQy(%eQy(%eQ> (<9l=`n{~g=Ay6.lρAlρeQ؞dH؞t`{~q?eK@>=#t aAv,d,[b2L?ȎŁXȎŁ\[Ft-#L-#:Ht? {L`~П+w?ػػ sKŗٻ,q(#w?XPFd~OMػt`~qAػS2 {@'!~m8 ' Aw.~Пt?~Q=l7\o}M:La{܇-CA:La;lw_ߍ!rIa ǰ}Yx>j0l4qayMp s65/ /{`ƚ5ln&A05̢{뚂a{t& c]S=l a05vа9:l>L3\tOay&A%:5qay XtcS=locS=5OA:5OA0<簽95OA0<݇i,fkay =435qa{g(a0׼`%>L#0\暗{0׼0׼^[joyͼ skek^.ie׬0׼&]_FA0<a{sk425OA0<Xt{XtcS=lo[ay )ƚ0 skek^,\t)a{wek^#,\`9L+\^ʰ25ay ayͬ s-5ay sK=loQay skLekނ@V݁{$oAw K߂}wYt2t_)@f@ X(Xw /ԔJ`݁%t8H}݁%t8H7K$t(dSł}q;N /; !@Ǟ@W,dWт@X,dgт?/ل;E ! %dт K$o݁%thAw`݁%t8H8PFAw@Aw@At:t:t;iAt:tEiAw@AtOiAw@Aw`JX Aw`݁%t8Hȶ݁%t_(Aw 8t8HX At8HX Aw ۘt_A@u,,qB K$,q;A)t(t(dkӂ %t(dӂ@ǁ %dǂ@ǁ@ǁ@:-t(dǂ %,q; K$P@6,,q;A %,q;A@@-,q;A / Aw ;Ct8HX Aw`݁%tGdAt:tQdAw@At@w,(BAtcAAw ݱ; ݱ; /;'tcAAwxt_(Aw`cK$t X Awxt8h;6A۱%ڎ ,qBm8h;6A۱>ɧBm8h;6't8h;@ }OAw@Aw@Aw&݁݁j;6q;q;qB  _8Cڻ}N{wE3Fڻ}N{wU~ʧ݁>r;Ї*?e ݁>T;gw:݁~mikK{e@w:݁N{w5w:݁>Bٻ @X_w:w:w b݁>;q;q;Vw:w:w_XmtheEۻKʾ݁%ee@XPm݁%ey@XP6m݁%e@XPvm,q(댶w8Fۻ}RH{/;q;'w2w_XmdldlP@@@}at;q;'w8GۻyRh{w`Cz²h{w`CY};'w8GۻyRh{w`CY;'w8MHۻ/;ġlCX {w`CY;A@} ݁݁<8;qBٻypn{w@{w@{e/@ǁ@888XP"}92mej@|T{aej@3d;j@-!o8ǁ~H[f?C ~Hьь~Hь~HььlKdF~،XPf?lFdf %ef2a3z?thFthFthFthFthFYP}޺wsØ Շm44lƬP>l -Y(0gEL\c暗P}ޥ0׼꿌Bay Շ)T)TƚP}ޖƚP}kBa{GzސƚP}kB445O0<6aQׄҰ6l@sk:0 ' sk4ik^I\ay$ sK>lo9skik^Ba{yk^Hiik^cH\Ba@5~45a{wQ>5OzXƚP0 cS>5O0^dәȶlFڙlHڙ?.0ʹ+igz>^pLڙX gz`ʙX gz`%r_(gz`%r:tcgz@gz@gr:t:tigr:t igz@grigz@gz`ʙX gz`%r8șΤ%r_(gz [5v8șX gr8șX gz ;v_olWڙX gz`%r8ș~ 過過^ڙ~過laڙ8Й~ؙ8Й8Йn過ؙ~%rcgz`ʙ>%r8ș~%r8ș6%r8șX gr8șX gz`ʙX gz`lٙ~ 過ٙ8P;PZ Et߁Z@Ԣ@-zԢ@-zԢ_@SnQ~VcKX -zpԢ_i݆=A%Z ,qjl`j56A%Z a=j56A>=ARzjjj4EtEtEP聎聎J8P8P~ >7=ЧԢ":H-eZ@Hsj-e'Z@Pcğjgzә@t_X L Ogzә_t9Dgzә+:/3=пLNgz@gz@gzLڙ+:::}`Egz@gz@ge@ǁ@Xљ~aG3=ġ$ڙXPL쾝%e5@ЙXPL %eI@ЙXP6L %e]hgz`CYZ3=LjtB;;yhgz `ge@@ LdLdL义eF;;yhgz`Ci3=Ljv8F;/,v8F;yhgz`CYq3=Ljv8=G;yhgz`CYv3B9K£%r8G;KL䩺ҙ8ЙSu;:/3=v:t:t_XVLtL䩺過過過%eٱP=Я-\/P= < P= AP= P=#T2bO{HO{HOeŞ@?itxi􃑞@?i􃑞@?itxi,q((:8a{K0=a{K0=%eŞ@88882z/,(HT9j{PnUJ&Tݠ߯3T%_%R}a{5l"_"R}6 ߥaaa{c4ifq>C3lr 83i|fؾ=l0׼꿌JajkJay*ՇOaCmL;LJa 1YR}ii"=5)ay)ՇMay 8 sK˨TƚR}kJa{zkJay*Շ]ay*ՇT7ay*ՇT?LLXTcS>lRay 0 [r0׼0׼&545/aZ0׼0׼T75t攆545ayM(a{zk^I\M7XT}ay*H0l6cS>5OrTƚR}kJ4~45ay sK~掆ay  sk0M skhk^F0׼FT7L5e45/W25ay)Շm4\45Ѣay  c[ȪR=poTd[7H~a J@J 5W XX ++~a `+ ,qR=AJ)/R=AJ)y`R^~)T_(z ;2VN6lXflXȆ_bz VJΤ%R8H~%R8HX R8HX z@z ;0V:T:T_(z@z@z V_(z@z V:T_(z [V:T8H~%R8HX z`LZX RUcz`%R_(z`%RSiR8uJ)/R=AJ)KT?J@ǁJ@ǁJ@v/TPJ@ǁJ@0TtTPJ@tTtTtTd7J@ǁJ@vwTPJ)R=AJ TdJ)KTPJ)KTdJ)KT,qRB)KT,qR=AJ T,qR=AJ@T?J@ǁJ@TtTt߁J@T OR@z_x*wR=}*wR=}*/kTt߁J@TP[%b ,qR=m8*/k`nQX-m8h5AJ X-mJ X-@SX-Ta=ꁎꁎXT:T:T_-@ǁJ@ǁJ@ǁJ TtTtTt[ }>ro=п"[#}>ro=Ї*?eSԾ@SE[#}Iz[;~hMGke@|G8Nkz&G8>5BY#@IњᴦL@ǁ@Iњ~aY/5=gR::L@ǁ@ǁ ˲過>5²rhkz`C:5=ġ,ښ|[K>5=ġ ښZK>5=ġ,#ښZKF%e)hkzO iM2@ ˊ遌遌ʚ8ؚ8ؚȣ[/,2hkz O mM,q(_XmM,q( hkz O mM,q(hk8F[KdM,q(8Țȃs[5=q5=綦:_(kz mMthMthMl7ښ8Кȃs[ZZZKv1@طv}Fsh34%aȰ}d>?sg\暗oѷ>l@ cӷ>5?}K҇%Ø1zڇ}4c5l)ƌ>loijkoyMV skjk^a{|k^#U\2zڇiƚ},ƚ}ka{|kayzڇ-a{|kayz0<=Xihjk^#S&ఽ%>5q4-55YayMJ s~Ƥ5$55/O>5ayyڇayFɨayE sk*jk^3Qi$j<0׼_FO0<=a{{k455OO0<=XXc>layzڇiƚ0 < skik^N\Ia{ek^cN\r:L3N\p|Ӱ25ayyڇ퍖ay5 s>l s-5ay3qay 3 skikޞ@V={c%oO{ KߞY2_I@f@ ({/$S`<%8~<%8٦=p-Uj-!@*@vlbd*@vQbd*?/J=/ٱBٴ=A*/=A*KbP*Kbtbd*@ǁ*@ǁ* btbtbd* btbd*@ǁ* bd*@ǁ*/=A*Kb,q=GKbP*@vcb,q=A* b,q=A*@6#bP*nXX R8HX {`TR8P8PUJ8P.U쁎UJȶU쁎U쁎UlXZ8PUJX { 8V8H~TlXX {`TJX {`TdZX {`T%R_({`T%R8H~T%R8HNUR8PvU쁎nP藏B %TtB@|A@zA@z_5@SQ~T/P=AB@(TPk%_Fz`TKX5 /Ԛj`TK#w /Ԛj`T}Nz`TP=G::aiR8P8P~T   /P=qP=qP=q8 ~%>=Ї_XO-qa=%Ie@TK}:I{RIJ}XO{_>JQ~aY0=/%N{S]J@fQ~$N{O(qwJ}E{@{O(qZ,JJJ}E{@{@e@ǁ@fQ~aG=ġ$ZXP-q%%e5@PXP-qq %%eI@PXP6-qq %%e]h{`CYZ=ЧBKKy:i{ `e@@ %qd,qd,qq%eFKKy:i{`Ci=8FK/,8FKy:i{`CYq=8=GKy:i{`CYvBIK£%%8GKK$qa%R8PzKJ/=::_XV-qt(qٸ%%%%%e@v,,qd@Z~ai= %lX~ai=KK<_({ !rT~R[nn@?n@?n Ѝ~R g~R '5~R '5~& XPn3n2tc7|`C2tc7|`C>ġ HI7en?n@vkWٞBn=F>ly _aK>l9ly{jۿ_ۿ^ۣ3l9LC6oaȰ}@d>^sk?!ِad0׼ᅩafkay:އڰ6l&0fa|>}jwڇs'Ø1:އm45l9ƌ>l(4{55/>5ay9އ]ayM\ sxƚ}ka{/}kay:އmay:އxwЇ ay:އx?LUXtc>loayMT {0׼00׼F5H55/a 0׼0׼ƧxwLJ59u5655ayLa{K|k^R\wXtay:ӌ0<Xtc>lo c>5OrƢxƚ}k455iayB s~ayMA sk0@ skjk^O0׼fx7L545/W25Լ5t545Iay;Yv @,};I/ d~;;~a `{ `{/ dxPN%r8X r8X {>txdƎ])Z:/<\ 5v@@vlxdҎ@vQxdҎ?/J= K;frii{`%r_({`%r8~%r88&888Fn%r_({`%r8X { v8~X {`X {`lF~%r#i{`%r8X t:t:tAir:tKi{@rmc{@{@{ v:tcr8.%r_({ [9v8X r8X { ;v8X {`X {`%r_({`%rct:tc{@{tB9 =/nnj4GtA@{et_u@x,q=2:/Ժi`Xu8h4A nXuȝ nXu@Xux;Xt:t:t_u@ǁ@ǁ@ǁ xtxtxtS~?EvO>Sdt^2+T@u|RCO:>Ч:>?k'P藏@|T_XGu|O>3:>ЧT >$:>ЧT@ǁ@QaYi:>ЇdT:T:T@ǁ@ǁ ˂>$:²hu|`Ct:>ġ,;ZȕKƣ>:>ġ=ZSK>:>ġ,@ZSK%e iu|=V2V@ Z񁌃񁌃J8X8XS/,;V2V*iu| =,q(V_X6&,q(KV9iu| =,q(VCiuR8=JK,q(˔V8HR8PJJypnu|@u|@u|@u|`C٨矲dw ό-d#@6B,䙱%;K~R:%;KBt>-Kt>-Kt>_y(3@?)3@?)3 ~S:g~S:g<~S:g<~&P:XP,32d|`Ie 2d|`Ct>::::# +VVC| {W_(|+W~e0y9|=r50y/c>s&p_aȰwaO Gc7\2zayz凱S[9Oao;Las~GG?loV1tivk>2s~ފ7ihkk^a{O~k^Z\r\暗say:燱7ᇱƚs~ƚs~ka{}yƚs~k455O0<~a(ְ8losk 0Ma skkk^X\ray _ s9?lskjk^a{[}k^#Wijk^V\׬a҇5g55)a{9?5O0׼0׼&575lo skhjk^a{dk^R\r[%\[j^sRiLjk^CR\ƚs>Uo| d9ҷs>po@ jB*s>)s>po2v2 v@ 5!X |`%r_(|`%rdZbXOj٫V>po:ac| ۔Qb| [inWZ+/V>=KkK,qVBiK,qV>AZ ,qV>AZ@ǁZ@QttPZ@ǁZ@ǁZ@PZ@ǁZ@tPZ@6t,qVBiK,qV>AZi_V>AZ dZiKPZiKdZ ,qV>MGkK,qV>AZiV>qV>qV>=Hk/V>qV>Hkj/V>kjjٓV>qV>=k/V>AZ@6j,qVBi٭V>AZi/V>AZi٬V>AZiKPZiK,qVBiK,qV>kV>qV>kjV>/J+~tZ@tZ@P >rV>=jVB-%F|`nQ+JKtKZ-8h4A %_-8h4G_-ȝZ-H|ܩttҤV>qV>qVB-:::_(|@|@|E_(!}AB"HH<t߁*@>T@}_OY>}}SiZe%}JT>/UuHQe}A}ϩIU>B}A}樲IU>>q>sT_X6%UUU>>q>q²7i}@}樲lOZeXP(,q(;Vr*Hi}O,q(۔Vd*Ri}O,q({Vd*\i}`Cٯ>ġXZeVUUq²mi} `} `R2V2Vd* UUġl`ZeȃVKUELK.Uġ,dZeȃVKVUġfZeT%e=*>c²i}`T;Xe UU;Xe8PeT;Xe8Pe8PeaYԴ>q>GV:T:T:T8EMyfl} [(Vb} ό,Ye*@P,YeUlXeȶUlXe UC}TO}TO}T_X&3*@?3*@?㩲3*@?㩲3*2yd} T8#KU#KU%e*@DRe8Pe8Pe8Pe8PeH/,G[k[}VmV/.n#[Ƕv[Ƕ(|l{?۾*~l8ll{?۾%~l-hcÏm&D> |l4lCcpǶِm5s`koMn;wǶǶgd1f&9ǘM??u?ͫ|لc[;r>mTc&L:s3[s&m-ߧ&mIwe05X0l5X0lk5X0llk5X0'>ƚcy?ۇXsXz:sSX!w}c}cd1>v1nUk~\}5[}5[}5Z z1>f1>d~Lcd8c[cyUcd1@{1>=1>;5lk~k~3k~k~7lk|5K}5?uJ>'>[j~OI}5H}5OH}5/z#e/[[ _&d dp> d [AA&}@d`na7&Kv%y>&`r>a}ܺ:0?fL)ld ۘ2?%%2?^Lnd 2?XⰛ,qMd`na7&Kv%A&GA&wL:0?6LA&KA&d 2?8d`nM8&Kv%d 2?XⰛwld`na7&Kv%AeM8&t8&ydL:0?LA&وA&d ;32?8dIdd,qMQ#%>p7?nL8&Kv}na7?XⰛdR&Kv%d,qM8&Kv}Ǒd`nLt`t .1?d AV!?c~1?d>UACV^+Y%Aw WF,qeY%%%%>p_}a_}a_}Чa_}a_}M&t Ч?8@VYX?8@VY}2Yd:t qr {?O5oݔ˷kwik,O)A/΀ ?lJ?_] t~vA>) 3~vA>)[?vAa_!8+>À?Sʁ0gp? }>ÀA>҃?XʁqA}:pK9tGzpK9,q(r?XP/; r?XPV0Vp?XP0Vp?XP1Vp?XP62Vp?XP2Kr qAʁ ~`ӔAA9w2r?+9Ҧ2r?[9,q(r?[9,q( rN9,q([r?[9,q(r?[9,q(r?[9,q(KrKb8eSKvlgȁ?8d;C8wlgȁeS8y:p?8%e?,2@Yٙ=AvfdʲeBt% +eBdE_=AyedRjM jZAt@?:P9)GZAt@?:P?89)jKʜ89)P椤ПP?XP椤ПP ;$WTJXuk+)q>qitpZ@?~~`ٓK&]@t=9\}тa{a?lqO aaaanɰ}dU/|SQ ۷Jk7\x=fky=0]?;u=p0]lz0a&ۇ}eAaZ/ c6y=>O55a0׼u=>׎0׼3^ƚay^Oy=kay^ƚa~>ƚay^?L\X?5Aaװ}*9lsk0o skykk^[\浵550׼u=>׮aZע0׼ִ浤uz!0׼浞5l3^ƚa{x>zƚô55X?50<cz}`ry=ky=0m` skjk^W\Vay] sk0-] skjk^ W0׼u=>ע0׼ۇ!\ڱa䰯RZadkYO@Y} YzZ d|=?)}  z~ @0i ZX%Xz~`8z<!;mL>@j| ?p:<yrJ@~ '>ID| ?Jg@~/vKt ?A/ԁ,qЁȿA9%?9*@ǁȿPtx ?BȿPȿP93@ǁKt B,qЁ,qЁKt BLKt ?A/ԁ,qЁ@} B,qЁ@} ?AKt ?A@ǁ/ԁ@ǁ9k@ǁ/ԁ@_| ?q@ǁ9v@ǁ9 u ?A9ȿP9,qЁ u ?AKt ?H,qЁ,qЁ u ?AKt ?A/Kt ?A9@ǁ9@ǁ7C u?z7ݬ @7y?zt7/@7y?z7/@Ϯx?г+7/7K3Ao;K3A7/ ,q@7yB ,q@7y?Ao; @7y?q @ǁ74y?q @ǁ7/@ǁ7txBtx?q @șOLxZ?3Lǧ9|i@_|Z?Iy~ >˥Y!<Y<WxZ?~]i@w/,W>WxZ?п0xZ?~]i@w KOO8+,q(W>aYi~%eʧ'O:<8~OOֿ_~ @~ ^>!O OSO*0׼N'\*5ay- s7iayƚiahkyZ>ƚiayayƚiô55X0׼voX5ƚiayLӖ0*0׼N'"\ia0dk^ X\:?lUj^W\:?l c~ ާ H ߧY>` ߧֿP{ẀO2>d|Z?p2 >KtZ?AKtZB,qi?ai~AƧ|#~ >YOrRڟ@ħ9i}&Nrd%:X~`N8~`N_~`N~O_O:<IO_Or\O_~%:N8~`N8~`Nr%:Nr%:X~`N8~ G>N8~ >X%_.~`N:<8~3:8~ g>8:O:<8~ ǎ>8~ 0>N8~ g1>X~ 2>X%:N8~`Nr%:X%:N8~`N8:X%:!O:<ȡO:<O_n~<f=OYn~<vqn~gW<vzv]~gW<v8~gW<X⠷%zX⠷%:v8mg`vqmg`WvzOqO:<O:<v:<8~~O:~Stw?uw?uw? ,?x,?xݿPw!,tw?GGşGQ@OIx?}0t[GşGQ _tOy~aye!~7λ~~W~~  ȻnȻ:f :89:f _Xe~`Cy%i8>8G>8>8皾>87XPm~`Cyn 7@w@w/,8}w?q@w/@wdw/,/:}w?q@}w?ġ<@}w?ġ N,q(O<}w?ma,q(<}w?ma,q(=}w?ma,q(/>}wB,q(>}w?AwKO,q@NI|w@txw?StxwBdwtxwwdwtxw?qwGmQ@||?dG/,}?5ģ(r( >ዏ:Wxw?e7w +Wxw?~] nwݿf~],,q(Y"yw²%e7wKnHtxw?q@ǁw? O Or~?~~>gia{A߰a:?lqzz2o%aǼaaanɰ}d0m ۿ J[%͚ayf=Lba4OgMaym s$} `k^ b\:?lsk7lk^'Oc$0u־55ay?L^\Ia0k^^\:?lsk0x skkk^]\Z:L]0׼5l3ƚIa{3y>ƚIô55ϓX,ׂ0׼N$\Ia0e sIatdk'Y>GY>I}V Y>$Z d|?)I}V $ 0iI X⠕,qI u?A'Kt@p'}\$ G<>A'9I@B|?N$">1O u?N,qIP'Kt?A'KtB,q'Kt@'txBtx?qI@N }Btx?BtxB',qI u?A'Kt?A'Kt?#D,qI u?3,qIP'Kt?A'9RI u?A'9WI,qp'Kt?qI@ǁ''/I@ǁ'9kI@ǁ'/I@_|?qI@ǁ'9vI@ǁ'9I u?A'9IP'9I,qI u?A'Kt?H,qI,qI u?A'Kt?A'/I,qI@)}?qI@m|?qI@xBt'CI@7y?zt'ݬI 3Ѝst'=I 3г+'=I 3A'=I= ,qг= ,qI 3A>K3ЍsP>K3Ѝs,qгy?Ѝstx?qI_,Mtx?qI u?qI@ǁ'P'tx?q8'Ay꟭.O]?[],.zKSߧ._Kכ._oA/KK'ͼ /K6 n3/_X"ksd_C M_T@7Ux?-w^tSc u?M^tT@y?пx?=F^@y?q@ǁc@ǁ^@ǁc [O_,q(=}?ġ@ӗ^ӗ^ԗ^ ԗ^ԗKkP_,q(B}?f^8 ;8m/288 Kl塨/2n/8n/8Gay6K%/K%/K% /K%!/_K%1/8`CyQK%/K/rK/_KlK/_XKlK/:XP`CyVz} rd@ rd@ Hm幥.M 9ӱ@@t,L@U-,q(--87o.T:@CG@|T\XvЬ?tT @CG@z Q=&T84ۄf@`CAz²f@`CAz ġY=\tttt?TH@ zY=I<[=.TgaR z3ȇz`e\R|3hƚ_2IK0&-0KK0 IK0y`RK0y`ؾ0l_` cީ%Okf۰}gN-}P;l_[J0׼aym sKK0l_V"0׼ߌZayj %( cSK05O-};akZayj ayj %8L kX cSK0lF8Lj\ZT^\ZR;L;j\P~0׼i9mk^i\ 7浕65/-}`k^ iimk^h\E&aZDo sk mk^;h^7`kZa{yؾ_05O-aZ=ƚ`kZayj [X cSK0lmayj %ƚ0 sklk^+f\a\ek^e\Z-;Le\+Vٰ}25ayi 탔aym sKK0l sklk^Za0- s`>9ƚ Uo-A>8 d[Kҷ p#s@ j, ) p#22 s@Z ?Xi K%PZi 9\ p:H rc@>t($r@/-Dr@0-$ ri@>Pr@3-,q\ Ar %,q\ Ar.\ ˳K$8r@<,t(t(Pr@ǁr@ǁr@,Pr@ǁr@,t(Pr@N,t(,q\BK$,q\ Ar9\ Ar %drK$PrK$`r %,q\ AK$,q\ Ar@ǁr@ǁr@ǁr@ -Pr@ǁr@N -t(Pr@P,t(t(r@ǁr@S,Pr9Q\ Ar %XrK$PrK$TrK$,q\BK$,q\ Ar %,q\ Ar@-t(r@ǁ@wtW@w::pnN@zDz!2!f`2X⠷%zX⠷%\%zX⠷pn8f{87N@@@@@/&888p[z߰wߥLgA`!9 K Y ,P΂@W,,5$gA`!9 . ԐWYp IgAtMgA#HgAvtMgAtMg),L΂@,촳 ?: ݫ н: ɧ н: ݺB9 ݫ ЭK: ݫ ЭK: ;΂@.,< ЭK: : : ݺ q q΂@ǁ΂@.,< ġ ġ< &,,q(oB,tK΂0΂@,,q(C,tK΂D΂@,,q(D,,q(OE,,q(E,t΂ohgA `gA ;v2v\XY8Y8YplYpayEjgA `gA ;v8ǤvmgA`CyQjgM%YpYXPޖY%偩pYXP^Yp%奩%r8v8YȁY8YfYf7v:tYogA@gA@gA@gA`CysjgA`Cysz@A ;rdA ;rdA Lm剧[ 9B@|,4B@\-4,q(O<-48/ o}_!+p$ w6X _ΆUʇ@;WJW_O@2f"ПMgj&ل@Q3qaYUf"eLw<5lBD?P3&L 5OD`CYUf"п㩙,q(L8UEk&.,L8UEk&Kʪ5CD@D@D@D@D?$ȇ}6IriD l'F$?6I ?&İf?a|}0$?aO{||^eô5ld>l`>l\>Ұ}i0-a ۿ۷Gay$M_4X4I c$q?21?Z&eb.L Ea2q,aK0.ebؾs2l_9/` Z&ߤ.}j21lM浄85/İ}dk^\L Kz85/7ebkayZ&ayZ&iwiƚeb4o c215OaZ1ƚebkaaZ.j᰽i:lsk0mskpk^\Luay-s21l0׼,5ay aysk{pk^iup>#07ebka{yؾ25OaZƚebkayZ&{ XL c21layZ&iƚe0-sk-pk^K\LaXek^\ZF d22q6[&[&1b `D `D>F dlP% ,q_`,2X D@D g;L@@xl-2٦-h2-ڟXÖ@9mS.e"N[&Kd,qeBY&Kd,qe"A e,qe"AiÖ@ǁ@ǁ eththҤeBY&Z&9e"qeBY&9e"qe"A e,qe"AY&Kd ЖY&.e"[&Kd,qeBY&Kd,qe"A[&.e"A@Nm,qe"AY&e"qe"qe"B[&.e"qe"C[&Z&.e"#[&Z&Z&9lۿ9 ۿ8&ɰ=liۿ4 ۿ3 ۋqؾ\wvߗ_ߖ[c흃ôT7l Ɔbay}3jLbXԘ cSc2ls?yR2?OOFI2?OO)0:5ayO0׼F?l2:5/ɰ}#hk^ۢiYtk^\Zךa,":5>}Zͨ?ƚdނ*?9L{Xԟ cS25Oɰ}gkayOô :5O0<'Xԟaymsksk^ô9l sksk^{isk^K\ZY95/ɰ}25UayOӕaymysK2l aywsa25oI }·$oI>a ',OO.rg 3`I S`I>a d? d? 'L'j3A˝%Z ,qBOK?9@}l8 ܧ 'pON9$cON9$ON?Ʌ 'p/\(I g68pX Ʌ28pX I` 'pX I`W 'pș ' ' 'p8p8pȑ 'p8pȹ ' 'p ' '%2\(I` '%28pX I g}68pr '~pX I` 'pX I` 'pr '%2rhI` '%28pXU@I@I@I 6\(I@I 6:4\(I $6:4:4r>hI@I %6\(I` 'pX Ʌ2rrbI` '%2\(I` '%2rphI` '%28pr '%28pX Ʌ28pX I +6HI@I +6:\(I{㔘?@)1 to@)1 to 63нqJL=*$У"JL.@(1 @(1Po3K$1  ,q ,qB ,q t 63Ao3$ā@)1 t(1 t(1 Ҥ$q$qBILJLJLJL.$q$q$qBILK$1 ,q>'\r8ȍ7(%r8ȍi(%r\(7JAt8ȍX 7ʅr8ȍX 7J`(%_%-)nPf=)/)NRf=)nPf=)7;<8%гOҍ(!Ѝ(Iэrayj7J{Rt'E7J'Ѝ(nҍ(nҍ tEK7ʅ(nҍ8Ѝ8Ѝ-(((׬v:tEK7ʅM(%Y(%e(|a7J`Cyj7Jt87vI7J`Cyj7Jt8׮vI7J`Cyj7J`Cyj7J`Cyj7Jt|CQQ%qn@n@n F dF dF d_n ˃XQQ%ġ%m~QKXQ.,cF ,q(/dF dnLn@F ,q(oeF dn`n F ,q(fF ,q%ġ%An@F9n@ǁn@F tFPn@F tF tF<%q%m~QQQQKYQKYQKxJZU8U%S/[UX&reJ ^qnʅ公**#٪ray!kJ HrdJ`*%公*.-},GK -},~r|,|KK oϷg8Oz|;7?gԥoÿ-8.C=ۥO8nq'Пo ,q'Aڞi{.'Aڞi{KPڞ%#?.0 Ÿ@'FυeŸ@?HO@Q'_( ( R?%eŸ R?%eŸ@²7nO#X*T<>DO+N@W<>x:}]tHmO(Rm k{.'_ \)Q?|5~>~Vχa|Vχaê0u?=Ö&ه- s~캟[ca&-_ר?li}ز&au\Ö sͯ_u?,}k~]0~>5~>5~>li k~|k~|00aK'XX'l]00Fk~]aK\Rtغau#\>WOغ au\Ö^-sͯ[_sͯ_w?5n>:>5n=k~ye1l|k~|Ҁea]:k~|k~|k~|200en7k~|k~|k~k_?54k~[>5.3k~]e[7?51k~beau\Öˇsͯ[/_?5~>lK_W?5.,kYA|vi\W>XJ{g.nҞKǗ]An]_ݮv<:?p<:nw΃ݮW΃ݮv<:?pnya50Aa<>50:z٧Aa<8zX[Badz]atv=̃%%08밽:5ô;5aysKru%q暗eW暗ek^a{Kuk^kʇiKyk^;\P~aZO浛<5aDQ25O˰e;l& cSr)qƚekaDmkayJ\ô<5O0<%.Xayskxk^ôuVm% Vmڶ@h[ t@' LVIJ9CJCJ\(J9IJ9IJcOJ\Xnڶ&m+m+m+kҶ8ж8жrayiJ@J\XsڶXP^tڶXPuڶȧ8HQv)%'T^ZQrayhEI` gV]JE7$0( dԊ@B($R+J Ê ( d( d( dˊ @+J+J.$ġ$R+JKSA+J.,( ,q(( d׊hЊ@v~( ,q(/( d׊|Њ ( ,q(O( ,q$ġ#$A@v~(9@ǁ@v~( t(P@v~( t( t(<&$q$_+J*J*J*JKcB+JKcB+JK(<&?K?_cqu1l8l~xaİ}!0 ۿۿ{$]a*ڰArцazְ};kk^;hX~ )ƚc0ƚcFwK!0R2l3:LBadn%[ Adn%[ A ϰ}hؾe7(7w0׼Ml:l_צ0׼ ay-sK2lmsktk^BoW˥XZ:5WBa{GskBay Aݛay A)7nmay A)9L{X cS2lo^ ay0׼vG0׼G%暗0 A%暗dި暗dk^Ba{rk^B$F0׼ |}aymskxޢf cS2lۻXay A)ƚd>ƚdkBa0mcS25O!0< iwk^\Z暗0[\Zzan0׼6{aysK2ly skwk^Bakk^˼\ ]iwk^k\Zƚ$Uo!H> d[ҷ$p@ j7$)$p22 S@B X8Hrvx#pߧ i]t~r`dGO袦#S#;?9+#;?9/#W1ȡÅr~rpjG`%r~\(G`%r~8q%r~88ო88ٛ8)%r~\(G`%r~8X G rv~8qRX G`X G`q%r~rRgG`%r~8X ǁt~:t~:t~rpgDžr~:t~rzgG@Džr~raG@G@G yv~:t~raDžr~8醝%r~\(G Gv~8X Džr~8X G '|v~8X G`X G`%r~\(G`%r~rbǁt~:t~r bG@Gz-SS>5SS>5^6zzlCG6z\zP Pq8H %zX⠇z%zX DžzX G`pSq%zHF>'XPO*6IF>ظP@}RFƅRlzbAFQ-%Rl8HJX F'Tl8HqXPX F`JX F`%Rl8Hş@wҿfnEӿӿnEӿfnEӿqaylF?nEӿӿqayiF;tS@7Ut˝@7Ut tS@tS@tt KI7c#q#q#=F777.,&tt I7KJ7KJ7.<~ ..@va2vahF va\Xم] ]ʅ]]IمqaygF `F {va8rvahF`Cy%gƅ坜]%婜]lۅXPم]%ќ]lڅXP^مq\%]%ra8'tva8ȅ]҅8Ѕ]]ʅ]]]wtva:taEkF@F@F@F`CyGgF`CyGgF`\wtva8.qE#R[49%E#S[4.,l@G@Km@Nl@G@Nl et-k),YOTl),YOTlWcƅRl8XTl\(F{5TlJTl8HX F?SX F?~Pq%Rl8HX ƅRl8HX F`JX F`4~Xon߸ڿonH7-@-Iƅe@#"߸ڿoo8^'9z"VOs</'߭_zbjؾ5l:La_Űobj}jujؾuVߥ흅a{caؾ5lwְ0֒~55Oİ}'ikay'ӯay'w775Ae0XTV WsbkBʊa )+&8Lʊa )+&߃TV ;&Uab0b,9L;\~}mk^˙\RV Wц浗95/eŰW85ay)+_cc͟ϱJY1lcSY15OeŰ25Oe0<FȰ-85Oe0<irkʊay*+ôt95a{#p׺aڶ暗bk^ʊay)+bk^ʊay)+퍿ay)+楬楬8Lʊay)+浶;5ô;l{skawk^7bkʊa{pƚ0mcSY15Oe0<0<XTV ۻi1wkʊay*+橬8L\60׼iw%0׼p0׼Vp浀;lskvk^ʊahk^\RV D\ڹ暗b>:L\ڶ׮0ּz++P ʊ@4pdP̀L4A"Q"p2 VV\U8h6Aʊt-ȉin@0n@@n_Or`D vK[-ɨ-X ąrK8-X D`-X D`-8-8-q-qYtK:tK\(D evK:tK8-q%rK8-X D`-X ąrKr`D`%rK\(D`%rKrfąrK8-ȑ%rK8-X D`-8-8- -8-1-ȹ-8- -X D vK8-qE-X D`-X D`-X D`%rK\(D`%rK8-q%rK8-Q-8-yR1D}D{GgL}D{GgL}ązg P YG(GzB}DG(G\wf%GzB}D`ޙ8Y`ޙ8Hq%G8H&6JX }āG@}R>"G 'JP_oG\(}DGRX }D`JPq%Gz(A}D`J%G8Hq%G8HX }D`Jǥ,M%݊["?%.,On@7薸.jZ).,¶R:.jZ)͕V@s"EM+EJqa"~i7WZ).,¶R+%e[@G-\~x"ПR)#܇K,/edIJ1lF6l_;LRa_ӰoiMlEl=lؾuv_†-a{Gbؾ5l_#ְ1l_i0׼ֶeayJ))n:L)KR c웱(/ cQt1uFŰ}O0.$:bL$:bLa3.a^а}KnRt1loayskYtؾ75May.{q\Z暗bޛ~0׼D쯖Cj5tkD0s0׼DqaZ0׼V-d[tY]-箁5oE `Ņd,d ,箁 EE5iBm 8hc8AÁ].0,9H"pj tei]eN]eI0.}B,9P"#U,K䲸P.,K,qB,K,q"q",,,."q"q"s0,."q"0,,."1,,K䲸P.,K,q˅,9 "A. .,K䲸P.,K. ,q"S3,K,q"A.,"q"q"C4,."q"4,,.",,,9T"q"C,."A.@N,qB,9n"A.,."A.,9m"A.,K䲸P.,K,qB,K,q","q"# ,"ma,?.@t[.@t[. :-S,=%"S,.@OI.@OI貸PK.N ,qN ,qB,K,q"Ml,.@z%W"z%W"?[z%WBy%^@z%.W"z%^@wD+(%J8+%J\(DJ8+X ąJ8+X D`%Jz@@āJ-LDJ\X^#+OąM@D`J-LD+0+q gJ^@78t^@t^@蕸P^@78t^@t^@Ǟ^@蕸 W">z%z%z%W"qW"qW^@ǁ^@蕸*W"ġ<,Wʎ C+;K[+;^8lpayveC kv<2v<\(C kv<2v<dÅ lXPbn%=Yv<8GYv<lC`CyeC ;v<8Yv<#jC`CyeÅr<8wZv<8XPkX C ;v<HC@C ;v<:t<\(C ;v<:t<:t<\X^l8Ȏ%Ŗ%Ŗ%r<\(C`%r<r@`ą@iD g@ .,O,@'@5-@-d@'@- %8:@v\d@t>Ϧ':@M$#]Z0$#_htW@6l Z0$BY05@6l>Z0.#M2Z0@ǁ e>Z0Kd,q#_h,q#ПhPY0Kd,qBY0Kd,q#A e,q#A@ #ā *EBE떢@}"_)tRt]\XV-|. *ENŁUrj]Ņ&u8I]@*؇_! bqؾ%6l8l8lxtaҰkڰk[;aZ C7Pa׳aVذyrv†a0׼ ͨH cSW15O]Űwa45O]0^bk\ߌ5XlI cS15Ouư}jkay3PI15/u0׼\Rg&u0׼\Rg sK1lo q\Rg sk)0sk#vk^ay-sK1ljskvk^a{Csk^+\Rg|ƚaQ95Ou0<0<XTg ۻ50<XTgUay3w,Ӓ0׼V\a{rk^ä暗:ck^ay3:ck^ay3ay37&8Lay3Ja7燹=5ea{_Q15Ouư9locSqvƚ:cka{#~kay3ô=5Ou0<XTg}aymkskW{k^ô=loskK{k^;ڇiE{k^ \Za浛=5/uư}55ay3SaymdsK1l`maybsk{k@V*ouF K}OrYVg2Vg\E@f@} X(XOr3."v`K} >q/ʎ@^dKގ@_$v\rkEྸ{Hq9{ Rv\8qX E`qX E`q8q8qqXqqZq8qq]q8qX Ņr\8E`^Q8E`dqX Ņr\oE`%r\\(E`%r\reŅr\8q%r\8qX E`q8q8qiq8qȑq~q8qiqX E Gv\8qq qX E`qX E`qX E`%r\\(E`%r\8qq%r\89o v\HE@E gv\:t\raEt\\(Et\KEt\\igqqgqqzAEt\zAŅzX Et\8aZ`8qX Ņr\8qX Et\HE|Xz@E|XGE|X\(EPc5mBŅXz@EPc'5ڟoj,"Ai,="A  i,KPi,K,q"A@tt[@踸j, j, j,.\^YcQ8PcQ辏 wzX.qXX)QJB,(d-lą+% YY)Q1RB)% YY)QWVJ7R嵏iD!#+% 8,~(djD叕.o(\[QOJ7 =ԔnҍBRQ衦tu .-(c(ݸplƁTg Vgθ, (|lKcZM}lH9lj|l'/cǦMa%MzM5Ǧ/MmMw>晟o}3?k ZM>ƙVcyh5>6 m}3; ZqgZM58j|3Ǧ]C1<.uZyg晟ZygǦVcS1g~jͷ{?晟~3?tcZ6UC1<aݏqgZMe|]cǦvcSW1Zyg晟ɏyg%xp $xp,xp,xp,xqAc<8@9F,xqAry>$9F,xqAI9F,x'Ec<8@DN%;8$\0xp,xGcȖ^r@1t xU:c<8=? 9AHȜ[X3 gsƃnaax-,Pك>9AJ%`(=C 3PكKfsƃ>9%888ٜٜŃQ8/_x5?A'] Ex5?:_<^lɆA_1 Òc<rݰ Ac<1t- 9ƃ>%#jhBc<'pyE'9ƃ.8 x:c<rA 1t9ƃ=pyY'9ƃ xqArA9ƃ;;1t x}k;*.AIT *dE+QEIT *?DAv5U*t xqpym#QŃDA*t xpF8,m$xp,(E.qE.qEpU g^Z0g^78jr(o6r([Q8gk5 k5.EBfZBZAVQVp>-dոP 8_\KiE|ǽ5_{!l(\ E/ 8Q8Q&BǁBǁ (t(t(AQ8Q/ /./KBv(\Kڡp^;.q'<_.qk (dmE/ 8qK(\ E!O|쿸P%_%_.qpK8BǁBǁBq%_:_:_:_qqB_:_%B/ 8QȪ%_\(E!r/ 8QA (\ E/ y\dE/ 8QA (\ E/ 8qKʵp^N@/ / ٩Q8Q8Q$1 y`9FT1.]*>ZG c\g`>ZG chr G chrB-Pq.qG c.q3%c.qpJQArBǁrBU(ǸPrBU((\*c r %(t)(N6JQ31 l1 ]zRQgN6.=)(\ 9F1 }@9F1.gc.qpJQAr%c.qp>q%QQVrBwƔcG9FK21 SQVrBwƔc\<%)(o1.\YQ'n?((tA9FcO9F1 ]Rqn?((tH9F1 ~PQSQ2r qc qqe .,(t((tH9ƅ˛9+. ⢐[qQŊBV\oE!+. ZqQȚŠ ,V\, (diE!k +. 8,Z(dMaEmK-V\.qX^XqQΊ%3+. YqQaybŅR\.qX޻XqQA%ˣ+. 8HqQΊ(t(dkgE@ŅR\qql(t(t(t(\ⰼ|pK(dmE). 8HqQۊ (\ E!Kl+. 8Hqq,\-%jPVBi5 وZQk5 =j\(F!W(hRQFZ xjz4(./(Kxj,ܸpybFBtnKsR(AF!T 9 ]QQvBBdrЭ.((dYo!G[ 9 Y[qn((dYo!G?(PBBǁBBQQ8Pq pK$(t+@!G 9 D!Džr.qpK$PB%r.qpJQAB%r3B%r.q@ 9 mO!G)(PSqrBB5rz)(=jj5 aAƅ~zk5 }jjWnEagt}a7t|.l> ?ä 0ltt.l( KC °{0ݨ 룰Yg^q-O\ 9j2j\̀Lk!`F!`F|Z4Xq:X8 u'YbQȃ@[, ;bQȃ[, yhE|#XLJy!Hl(-<Ţp,-,m(th(thPBǁBǁBbq,-<<ŢqBY, ybE@EP 8@(\Kd(䁊-K~BY, 0bQA%X\(EY, 8bQ[,.Ţp,(tYOąM:MnnB& ]S7Q|R7Qv?۟t>(t;ID& 8H7Q貞%M\(Dz& 8H7QA (\ D& 8H7QABM.qw]-]EQo{( EQ-](tKE(tKŅY( ]uEQ.B]{EQ.BwtQ:tQàЕ]EQ.BWztQà]=]袸pyTfE+=( ( ( ]EQ8EQ8Eq.Bǁ.BVz;bC!k  PdPȚvB;ILllw(dkgC![ PAv ;.qXXPv%C  YPaymaC!K2 8,O.lwPv%˳  8Pay{aC Yp ,lw(thwPvBd;:;:;\<ݡqݡ%K;.qݡp,lw(\ C pKdw(dglC.ݡKdw(\%?[~8;]lP.B6vQ.B. (]]lDP.B.BEQ( $( 8,8?[( ]] EQ.BEQ_tQKG(vQ\(E!ԿY>^*bQ^BX Ţ-n?h(tHE!k~[, ~bQȚ e(tAE!k~[, GŅX:XsBǁ e(-Kd(\ EZ, 8bQ=Z,.Ţp,Kd(\ ŅX.qŢp,KdP%X.qŢП{X.qŢp,bQBwXzipmEBzbQB5-0h(PbQOZ,.\bQ ?oO|)lp"lj?I/Ұ4lb?L>lE[=l=LWа [4l @0ϼnyu/3{N[g0<a.3OE~ƙ"3OE\qi~6w.ay/8_W"3/Eg^0ϼ~晗"3/Eg^ 0ϼay/0ϼn0yu9l0ϼas׽0ϼasו0ϼ0<ay/33a/&3;ay/&l..8_qi8LW8_qid~6w3asS晗0/<_ye~qay/<_dg^0ϼas+晗0/<_ye~qas3晗"3/E\2/8_eLJqi8L68_qi~6ay/8_QFg0<ay/0ϼ^hy>#3/az6Way]azף0ϼd͇ay~6Uy#3/E|J+0ϼaaz0ϼ^_qm(~Q8_-~Qѷp>-Oj 9_2_\̀LIm!`E!`E|R[4~q_HB|#UU <`Jp]Ⱦ*B2YPȳU dSPU BZP& yj…R)*BZP* R(tR(doB@…R)JqJp_Ktp.q%R)Jp_(B!J 8HPA* R(\ B [pT KR(deoB 8HPA*%R)HB@B@B!|.JqJ5U U JP^*Bǁ*Bǁ*B6V):T)JB 8HPȚ*%R)\(B!N 8HPA* R(\ B Y[PA*%R).qJB 8HPA*%R)\K>p' J@  هZP8PP8PPȓz Cg== Y(t-LBkaz.B,gе0= Okaz ] ӳPZ е0= Y(\ By 8ȳPA Y8*BI…R):T):T)JB A ]SPRpT .R(tR(t;IBO:T)JpT KR(tYOB.Je=U KR(\ …R).qJpT KR(\ Bz 8Hp,g?xz OBkaz g== Y(?= YpyiB{0z ] ӳPC,:,IB z ]pгP=Y(tAB>z.g= Y(tAB z {z ѳp|ɞB},:,:,gqgqg1= = ZBd2H֠MB宼 ,ɬ5(dIfA!Э5(dIfA!sk .\n[kPa3 ay9LOA:L>-dR(dR(c %<4h (I<͵?PCJ Y_?P8?p(t(\;x/\;Ktgp(\;?PȎ%.qB 8?PABV\(@ s?PA%.qp?P8?P8?P (t(dm@@ qq5P%p?PȂ%.qB 8?PAB.qpKP%.qp^+\w{8BǁB::>@ CR(tJ@T.ÖBwK]*aKT ݥ?P. ]*R(\ @ 8?PA 8BI::;? }A@#H@z A.e= n'(IGB.qp.(\ pKP%.qpK(tYO@-q%)'( .-'( n('pyh9AIQNP蒌rBwƔН1>B@  n?('(\e JNPrB=.)'pyd9A@   ]RNP8PNP8PNp =Jg^Z *3(ayi 3 %3G(ay=A [kPȩ֠pc\֠oA|[8r5(d5P/P k k BZBFZ0iEEg)Eg"$ e(-<EE-"P3A[ "P3[.E--KtB.\ۅKtpo.qE -KtBY Y"PA%\(@Y 8"PєEBY 8"PZ%.qEp,Kd8BǁBǁB̶\(@@@!f[ Z.Eݛ---lm(th(d g.qEU-KdPBq.qEp,"PA%Ep,Kd(\ .qEp,KdP 8\=+d]g:EqEqEm-"P-.i(tuI@KZ ]]"pޑE%-.iPH ]]"PBW\w$.i(tuI@Y 8"PA%HQ@(B 2 2 > ()(p^Q 2 SPQP?۟t( (tIQ@ 8HP>%\(Q@x 8HPA % (\ Q@ 8HPAB.q(@.\ ZPBPP ] SP ݃QPZBSPZ w(е0E%EQ@@Q@(ESBlұ(E( (tAQ@  { QpƢB}::(q(q(E ޼ѿrC 9_ Y;y!?ѿ7F _ YE{˭io/\\F%io/\⠍K _A 8,ѿp6.qXR{m/d?r쇼ѿqF ѿ7:_8p}jo/tѿ7:_8pKѿGS 8hmP 8hm/d]jm/d]KBm/d]Kѿp6.qF%_Ⱥ/F%_Ӄ(?x وZP6as[6ͷ ln M°'8l} lybO㰹- ˲0] ,lO0ϼ [ay `g^as-ƙ} 3O@{0 >ߑqi>6@ay8Pg0<aed>晗} 3/aye>晗} l{33/@g^dtg^733ay33ay33Kay~ay8fgGW8?UNg|,3O@͝fg0<2tg0<asyay&l.1<&@g^0ϼay} 3/@g^ >晗} l.,<&@g^0ϼay} l.<ye_F@g 0<>ƙ} 3O@g>ƙ} l.ۚ0<ay8G5ay= AMg^&l. Lg^*3g3ayd&33ay= >Pȩ}p]}o@|N[8rm(dlPe   sBBF@./䑛BAx!޼B-/9 zZ_#8//d9e< 쿐 \ 8]A 8v].\e,lp.h_elp.qв p.qвB^.qвB^_A 8h-/\ere qB6^_8pZ_Ȫ ./t쿐%:\_/Բ%Z_ 8hZ_ 8h-P 8h-/deKp.qв p.qk%Z^.qk%zVvBǁ Yye?./tSen M!) M!)B7\^t)B7\_覐/ԋB7\_覐 8h-/\erބq:H_ q Е;:H_8o(t}:H_莑 lA>Bw_A 8h+w/\}j+w/\}KB/\}KϿp.q>BW_A> Wx_誋 }<>BpI>B7_o{/t}>>W•) WB\_dsek /tkǕ)5W:\_֎+/\ޥxe[;/tqBv\_8pe.Tze! W+/\{e!G+ Yye!#/7W6ڂMnl}l >65aŏM=Ǧ/M?6uaCc|lMڲMeǦǦMEǦc'ߑp|38p|l>ƙCcy8>6U@!1<nyg晟ygǦcS1g~vyKW?6U?晟ΏyME6U9״?ƙCcSgg1|=;l1<! MǦc!6;>晟yg@g~v|3?;>6ygǦc!6;>晟yg@T3?;>晟gMǦcy8f8p|38p|l*?ƙCcy8>6uacqgq_|3?g~~1|l?晟|3? |3??g~~@cEƙCANN! G_9r<!P8?yCA@9dIJ_Y?2-/?-dZ T-/?ȳ2--?ȳ-/?*U?ȣb-/oR?aI&K.q?fU\0ߤ.?ȪI\0/püp^yKeiղyKenղ8\0/pü%@,q,tAVZe-q,qylcAd;e8dEe.qdOe.q?a^_8/pü%YjKe.q?a^_8/pü%8 gg.q=a~v k,-IJ,]?8`?WAAWzX+=,Е?J ò]ap~]+=,Е?J ò$.q\0/püA\䴲p^e:HXലAwXeN++t7e:HX h?I tLje.q?a^+w,püp^+w,pü%yKe.q?a^yܱ%@,#L A0<2 'u2<2?L:o0tax03/oAg^ނeq-ɰ[payz 8q-;0<ayz n0x [ƙ 3OoazC0ϼ9y-8Lo.>3Nay=o:L:L-d-(d,(do@!,PB6 <˂B6 \(@!,(di@!K~ .`l-(䙮>t݇.\ЅKtp@-(\J0PNȂ% .q`B  8H0PABV \(@  ًZ0PA% .q`pR0P8P0P8P0PȚԂ %(t((dWj@@-q`q`q`B  YZ0P8P0Pɂ %(\ @!$  8H0p,,(\ @ .`pK$(dj@  8H0PA %(\'`Kp X=+\'`,8BǁBN : : n`@  C`((tF@7 .3Bwo {`D7  ݽQ0P % \(@  8H0p  !P!P!P8(tOM@B@>=5 |b@B@kD: lYC5"K(\ @[u: 8!pn(\ @9.CpK(\ @9 ݪ!PA(tH@.: !P2:.\!P2: mO#7; BW]tCtC!tH{; WC5.:(tMA@k : l: ]S!P֎BtCɦCЭ./=(tkG@@@@@[;: : :~5l (ǩ5.f (XP"ǚB9ԚB9ܵ& 5,r (dkMEZk 8,wi (\\&p4K;.q&pr֚%.qXnZPABv5HM@@M@!k j.&]555.Wl (t (dWcM@@M@@M@@M@i x4 (\ M@i.&p4K (duiM.q&ե5K PBV.q&p4K (\ M@!Kk.&p4l (\ Mɷ&q&q&Bi j j Y [P8PPZؚBǁ (d-lM@@M@@M.q&p4 P%Ě%.q&@j ydM@3&p{.b (dglM˳k [P k {j [P#yd@!h; YE!prBVvCU43Oa q/ƙ_ l8 q{T*3O@g~0<Tg^/ Tg^~t0 Sg^8 r(r(/P8Ǵ .Bf~B~A_@ ydOB yd@!{I [pD,'-(d%o@!_-P7 [PA7 8s UZPA7/EKtpDJPA"%B 8HP"%.qpDK$8"Bǁ"Bǁ"B\(@@@!KM .-EEE7-(t((dc.qEK$P"B6?.qpDJPA"%pDK$(\ .q;%zUA" pq.q;BCH@@@!!   ًZP8PPȎ" %(tIF@K2 ]Qp2$%E.(Po ]QP蒌"Bd\ -(w@.qB 8HP8pB/s' 5οuB\_:B:BO< :?۟s@/\uKο8.q: ο8.q:%Z.q:%Z_A 8hM/\ur@n/t'ōWBsc+Zn/ 7_ey8-7# ]rcO,@npy!pcnpy'}7:_> ]qcn/tN67O _z n/t7:_8x~!?Z徨Wӫ Yx~!/䇢WjB֥^r{ԫ Yx~!RֿpCl ZMW.qXz~!{/\'j Zprԫ 8h~bW.qjB&^ W:\_ī ֿP ٛx~W_.jBǁ ٛx~W:\_A x4ZpV.qj ZpV.qjBֈ^V.qjBֈ^_A/jBֈ^_A 8h~/\ZB/\lսZpVlսZqjBǁ/jBǁ /dWjBǁ/jB^_8p~j~/\oZB/\3Vay:L-{a!IU!3LΧryZ_: y~_:_i}ׁ}yqu`u`2EEF/z/z/z/.>~ǣ>~qY:Q}uPK/.>~B:Q}"/uPe/.>~B/_\A}"uP E/z/zǿ>~>~>~'Z^<Ѻ_:Q}"O_Te/D>E:_/uPu`_/ Eo/ˉg_/z/~>~?Q}"s/>~e<(qy%}"yPvW2/>@_+E //_:_:_Ņ}}qau`E_\/.>~qY>@_\A}/._Te/.>E:_\A}_Te/>~qY2_Ņ ׁ /.L}du`EE?b2_ xE?zq 8_ڱ>~l`ǜsL}|Njpb"6.Q?6[l~Џ'cQ͓c.6b1\c.׋1ϼ^<*cySʘg^S̳̳zL{qǏ88V3>~3>~le88̫̫̫L}g^}g^}g^}|&Wܘg^}g^}c~l>~3>~3>~3>1cycyc3W?W?6:cy񏩏̫̫̫L}|yǏyǏw_c?ƙg?65c?ƙg1<1<1<̳̳oV3>~3>~3>1}y뻫g^_]<'W{g^_[V1ϼ>yUl!+WV1ϼ7g^X<柯by}[̫:by}U뛪g}"}n>~>~q 8[_QTnyvȫ[Eާݪ/Wꋼ׹U_ڭjysS[E(VE|\ڭ"t[eԪ/V}qY||Q"nueԪV}qY:U_MϭjuPÞ[eԪ/.V}qY:Uׁׁ"|n_Tu`c[E[ժ/V}V}V}w?^ꋼUQjy qZժ/"V}qY:UQjuPȃ[eԪ/.V}qY/è0>*.VE}U\AFuЇQELܪ?V}V}W^^ꋼU_:U_ȭj}U_Gl_Eت/xV}q~+[EQݪ/Ņj_TZ/݄==nBQjs78RP3GQ9"?@pf{sş\Z9}uPrE/.E>3G_\A9_Tre/.}qY>3G_\A9}csE}}eU/}q_ef9N}el苾2G_d}`sE//sE_/HE苾 0G_9}`es//H}}}G2^\|t6sE}]2d_:0dqy!ׁ!N/tɐ["v Dǐ}Ot 'ߧ3|_#.b1EcM$6DbCUl~͏TrL| ͳG16b!"y cBl> `l)++r 1ϼ^88)r+r+r+rLg^g^g^|B͇g^g^c?w<+7\ W3by}yV3}l}+g[1ϼ"g^_l<_kg^j}O:ӧRSqY}T\A>yp SESE>/z/z/T}T} /*U_ O@L}b/T}' >1UQo}bS "oNy u8:(UQJd87.*U_P3U_P3U_ E5/z/MJCTE}Lrjꋾ1U_\AJ}fR/T}qY:(UQJuPRe/T}qY2U_NL SE?g3U_WawON7)>;1U_/ T}?JF_ ~gO~g;kU/5`}~苾F_:F_:F_5ׁ5_tSKE|mnypSE^"ܔ/|mnypSÜE^"ܔ/|I7:,%)_\AM˻nuPS򂢛eԔ/.밼|qY5勼a)MׁM"onl_TSEEWݔ/zؔ/||||qY5?M5:)_\AMjuPSEܔ|qY5<)_\AMjysSeԔ/.렦|qY5<)QMjyvSٔ/||E5^6^6勼)_:)_-MׁMjyKuSu`Su`SeԔ/.렦|T7/)_\AM"ZpSٔ/7O˦|V7!M"n\pSCE|˦|V7MM"n}aSȃEݔX|e7>()_䋕nE.ٔ/7}MOll_TSu`Su`S[ Ք/zؔ/Væ|E5勾հ)_\AMj}dSEjؔ|qY5:)_\AMjuPSeԔ|qY5:)_Ms7勾հ)_:)_MOll_\8wSu`SS6>])Mؔ/MKMMٙŦgf{MϦsggggө$6?%Ms4MwϦ#Il6gӄ~6>΅MϦ{Cl~3g<<떟y-?Ͻk6>̣7gϦ/-ƙGo3=8ƙGog86>̣7gϦcso3ܛ3??Ͻܛ3??Ͻ6?{qνϦil晟{MWϦg7{yg7g~g7g~6]I?Ͻp 86C?~3|lƙGo3ϟv}6g~3ܛl3Ig7gkЇTj|q~9?9S;yT5!o?z uŹ0)P=?" p #<a[Naӷ'=0C0C-}CWN89 pY9 n.0s/?\a?\a0e0e0e0e0C_" '=0C0C?J# ЇV0%0CZGiЊ0C0CBg{z?g{ԅ0|q?=}Bg{z?g{˻ ? a^zS_T j?yOP! j?syOP!oj~{ycTC~uXT!j?\ayQP~uuXTs!j~u@!j?:^yP^ߋkj?:g z~u@:?͹pYpY^uuyS8.0je{qn?N::::ک^uyVs=C~u@~u@KLl>G$wLA|7b3\lSP>6__b|nj|y9|l<ٱof35cyMczQ3k1ϼ^Ҍy5c /ƙgP>ƙgP>6v3(qqGgAgA|qqͯ=SP>WP>WP>WP1ϼ1ϼ1ϼ/1ϼ1ϼy{3wc5׋1ϼg^A|sy{31(3(3(/1<1<3(3(J3(3(\Vc 8Gc < jl< SP>WP>WP>WP1ϼ1ϼ+(+(o1ϼyyy?|l WP>WP>6tk 8 ;il ƙgP1<1<1<gAgA3< 8 _byg^0^Qq*_ԫEw/{q~վEw/A_Tܽu`ܽww/znw/znw/僢=AqDeE{{s˸{'=݋?_9@8EE݋:(^uPE_w/.렸{qY/*^\AquPܽE_w/.렸A݋?_:^qWaE¸{яҌ_\>rܽ{ї%݋>2^4EZw?~{dwۋ~|g-cwۋf~Qo/~{я~{+~{я_\^mwkEEE_o/zoE/.ݹ^E o/d~{^y2p3o/d~{gD/.^"/huX^sKeo~{qY<ۋ:^\ayAyp EE^"o/zo~{ۋ^ۋ^/^:^:^:~eo/~{~Eۋ?Mۋ:^\AuPEo~{qYۋ<̹^\Ayseo/.~{qYۋ<̹~Qyvo/N~{~{~Eۋ^ۋ^ۋ^:^Eׁ,~{Qۋ^ۋ^\A&o/.~{?^\AEe[E~{Tۋ9"oso/~{?-^|F{h݋s~L8O11Q.6?S=6ab &6?懸 /oח<߱yc1#6_^G;6Ovlcdl>A#1̘g^c㱘g^cr,6볱g^7c1ϼyXlm,b1ϼgg^߉<_by}"̫8by}Ӱg"p~{~{qU8*\^~"spk+E`]amsț+E {w)W؋ƺ^nyr!\a/B {wYW/^#+epqY>|QuPȋ+Ua/. {g+W؋:^\AuP +E+E+E^\a { {,W؋^V/^䳽+E+E+E^\a/zXa/)uPG}W؋:~Q"]a/. {qYU/^\Ayr*ePqY},tQ uBePqYU/cX>* AV؋^V؋ ^:^:^5ׁ"/#_T+EFXa/4 E0_iO#}a^/< {BW؋ E]a/J {q"uP*Rzǖzǖzq}cKcK8WEO[R/=xԋ>ԋ>̱^9QߌR/x/^\A-"_wKZR/N͖zqYԋ:~Q-juPKZeR/N͖zqY^a-˗5_\q?>Fϋ~re򅍣O9}d'WFϋk2z~]ϲk^1׻5/]5/z5/]C@̮ye@̮ow͋k^:k^:k^}]ׁ]_t>Ek^CE>k^cE>k^a]iy9w/.塀k^M]"oRuX^Us׼ȫe5yqY5w͋:k^\ayq]yp EE^5/z5yWw͋^v͋^v/k^:k^:k^:k~e5/*yEu͋?Mu͋:k^\A]uP׼E5yqYu͋{Ql~y36ߊ/Ey/bZKlS=6Dlވ|sy`czC41ϼyn1Иg^08l8lb3{3{l>+S=W=W=W1ϼ1ϼ1ϼ1ϼ1ϼ`y~3cI7K1ϼg^ |ȍyj~3̳̳Ϸ1<1<T̳̳Tf̳̳~L g_Ǚ{lS=W=6_ic6W1ϼ1ϼ1ϼ`y`y`W٘g^ g^ |y`?{3{3{315c1ϼ1ϼ_c=ƙg=6_cO1<`q`q`q`?@8l8l雷g g g c-קn1ϼ>ty`?bOo1ϼ>qy1}붘g^߶by}̫kyM[3{l-l1ϼkc-ׇl1ϼ>cq`/r`/?59n9n矚OC/z!Ћ~Q="CyuȟC׎^o`y)rC/h"o_TgЋ:zzeC/.z#/^\A="OHuPzeC/.AЋ^Ћ^Ћ(~Q=ׁ="J_TlЋ^Ћ^Ћ0^:^{C/.zuPzE>{^\A=_TzeC/zqY}*T\A uЧBPqY}*T\A uP>*.O:S"C?zzyruXz' /uo}`,c|7 ҋ<9^ϱz];cE^K/ίys,scŹP\A􋊥d,bEOcEOc9mP?ָXzXzqN5>/*^ 뎥=}}c,s3^뎥_T,bEXzqY/*^uP,beK/.XzqYҋ:(^d,cO`K|Xz|XXSҋbE_yK/Xzя}d~ fbЋ>^1{E߇Cz}bbЋ~ fEbb/._^}=ׁ=ׁ=CN_\^r?['ϋ|vGi'ϋ|qGi'ϋ9y~qy";y~ׁ"j^/3_Tռu`ռu`ռU5/zX5/>ĪyEU͋j^\AU}.eռE߇X5yqYU͋:j^\AU󋪚uPռeT5yqYU͋:j~Ukڮ}bռu`ռEKY5/zX5yy ^E?E2\^S$bEd PlS<6?]OlOl~.g|26?R1c'6El^|[ͷc 效*1ϼrLokDl3py3p1c3W|~c < Cll< SS;by} Z3byci1ϼLy.Ϳ<(-W<6spL<-h1μEN""G򃌈yqDۤ#/TxJGċ)z˷+#ExWGċ;"^"_TDGYGċ:-z"e/.레xG/*"^\A"9uPD"e/.레AFċ^Fċ^Fċ8"~Qׁ"O<_TDg^Gċ^Fċ^Fċ8"^:0"^ӯ#/.레xuPD"E>;"^\A_TD"e/ xqY}S\A_uЗ9eNqY}S\A_uPD)./s:˜"?Ȉxxʎy%rDu`D?!Fċ'0"^= }O`DN/oӋ<9"^Az;ț#E^/"kyZsD8 :("~Q񃌈_TDcDcD8/z/αmDLjEEċ|y񢧏vrng7cFċ|񋊈uPDw/.레EEċ|uPD"e/.레xqYEċ:("^dD#J/)N|\xqCP /w)OLpʠwѧK~8e E?2])Ej.Ao5 zpʠw z zpʠd0]:0]:0]A"AkQzgwzXwzXww0/./I9]cE*wW8]\aycA"..밼6E:,N9]\AASzuPлl2]:0]sE.9AׁAׁA z z z z_Tлu`лu`лe..렠E:(]\AA"Nz_TлE..렠E<;9]\AA zuPлEwqY:]\AA zyuлu`лu`EEE^a.z.wE<@:]:0]:0}QA zuPлWwqY|A zdл@ zyu򪿃E'E^DwQn% pE^D.O2]ĠE^1]:0}QAC zuPлEK..렠w!/*]\AA zuPe..렠wqY/*]\AA}>}qyڵwG}+}}}y[3boq1ϼyyU ?<-W%<6Kg^!1}g^_89_|W]ŭ"/n}_Tȃ[E.%/"on}[E^.wJ.ԭz^wqY:]=ƭj}uPȣ[e..wqY:}ׁׁ"o4n}_Tu`C[E[.խׁׁ"o6n}l} E:]䃬[ewOn}uP뻸Z..wqY<]\AuG;eE}S\AuG;ev:裝hȇ]]:][E[E[Ez.z.Ol}}`[E_wӋ"bn}ysM/2w1sؠ[E.aj}drEsEs9P?~wƜwqN0/*][y=`y}cλs92]y_TλrEwqY/*]yuPλr..렜wqY弋:(]\A9yuP sE.z.twBl}}a뻸 -]ψn^w~PdwEv]"EM.A.z4.Aˋn}7awu`wu`w ۿzvN]ǻ]䃢\.A"].].A"&gzvw׳,]˛DgXzvqYu"׳/]\ayguX+r=E>ֺ}ׁ"k].zXϾzvggg_T=u`=u`=u`=EEeT.x4U..zvqYճ/]\Agyq=eT.zvqYճ/]guP=eT.zEճ:]gd=ȣEEU.zX.zX.zvzv'=׳^ֳ/]Iׁׁ싪guP=EzEճ:]˹guP= Eb=Ȼ].z?Y.zy׳k]"gyw=d=-=_i"NkymvZE^.-=63]/NkEvѷTiׁiׁiׁiJkLkLk}vݎO QuLoQ/QA76?Sw;6_b*6?GPݎؼOyb'6W)׋1ϼ۱sL/Uv3v3v3vlq3v31ucy-|'y~3vl>8n8nSw;ƙv3|=v3vl>o31ucyucyucyu̫̫O1ϼ1ϼ۱̫}Lg^g^g^cn_:byucyucv3vl>7byv̳̳̳qݎqݎͿYӇt1<1<1<]3by}=̫}Lby}7뫹ch.'s1ϼ>? (򷧃[ۃ?( Eb.(EE8] Q"mr+.(w?99]aQ"Oryqȓ.iQz+(wqYE:(]QruPȋe..(wqYE:(}QׁQׁQ"8r_Tu`+E.QׁQׁQ":rr(EE:(]Se徨(wruP..(wqYE8]\Au>eE}S\Au>e徨}:sܧ'aG2]:0]㰣EEE>vn}`w"anysw8^_\/(vg1w/"/cnysw8'<]D(.A!!1}Qigi(?=CLk_TZ.z.ƴvg+FKeZW־vqY>(3]\AiJkre..렴E:(]\AiJkuPZk3e>ȴv5^f~0s]\.d8V]y XuPUXuƪ~b{cE?@1V]cE.XXǪ'0V]:0V]:0V]=_t.ߌ.ʱ˫6UtXuOWUoƎUtXuǪ|rccE^\.XǪ|Ptc..밼XuqYŪ:,/8V]\A">XuXuUU_TEǪ^ƪ^ƪ/*V]:0V]:0V]:0V}QׁׁUw:V}"_qOƪ~dRu{C~u6p3g#lO0tlc}6bs9tl`}6=6=gxٴMl:[}6g~6]>NV9rٴyg9rtͯT~晟_3?G?ϑ3?G?ϑ}6g~3lm3g9rgۺ٫^CzyR;^C!^=^C~ z>Ǡg>^wqu?OSs;^Cz=Cu?I~BOzu? ]{Nz=Cu?y ?_9Eou?u^e^C_~.ν|O]:̽:̽~~~~~k3zu{A^C}BW..*+c? +]\^Q!˔~9pY .0.밼ƣ)Dy|.Sy~2z~u@8z~u@y~u@y:ac?\ac?\ac?suuyP8.0LC (`?:] (`?:З%s쇾,vq.`?e ::>pYЗ%ssss8.0.0.0ghU~ -ZzQ~ =(`?쇾vqyV쇞x(`?*`?{5*`?#& vqI ر 6ϻVOURuucnl>S+Ul>R{L|pc.1ce*6al^|Wc*`yUyUc*`<*`<*`<*`S;W;W;WXqYqY98,`cycycA3ƙg;ƙg;62cycycrS;W;W;W 1ϼ 1ϼ 1ϼ رB1ϼ 1ϼ Ty 3c81ϼ ر@g^|y~3̳̳/1< 1< ر̳̳_ ̳̳}Lgg|=gycyc7W 1ϼ 1ϼ 1ϼ TyUyU͇ޘg^g^|yU>v3v3v31c1ϼ 1ϼ ر'_c;ƙg;6{c1< TqYqYqY?x8,`8,`:gggc̼>ya]31}UyI]3{g^_<.6Cg^^3obycz1ϼ>yU_Gӧs1ϼ>qOɚt{7/[sqqsA6+7Ao ruq Eh־v+:]/rNk_ԛEk.g9<^9]NkyrZȳ.7:]\AiJkuPZE־vqYj9]\AiJkuPZ.z.z.E^t9]:0}Qi".z.z.vvG/϶Jky pZ.vqY:(}QiJkuPZkeATqY}U\AD]QeATqY}U\Ai >*.8}iׁiOLkLkLky,s=gyt=ȓ9gyt=CE].:zE_!"Og}a=E ].zvzv3zv3zv@ֳ/] ]1g싪g} d=b= ş#E-og_T=EY..zEճ|guP=eT..zvqYճ:]Agd= EEX.gHzvя$gzvя$gzvя$gHzvOgz\׳~Bg=u`=u`='t|"]Ӂ"WȾ@v+d@v@v+d@v+d@vg ||w ^E>z9]\ayǁ duX^q e|..@v^dd u` G/^/*]䣗EE.z.z.zȾ@v@v@vqY?:(]\A d@vqY:(]% duP KeȾ@v:(]\A duX^Zu ҪeV..밼@v9^^/./:]:0]a΁ d dys u` E.z.zȾoz:]\A duP E@vqY:7=z.'E^D־vq充vQ/./x;]Ei"_sZgZE@vk`53w"nyvwE.53wM"qw.7w߲]:]:]:]:}QׁׁVn_Twu`wE.`vqYu:]n}bwe..vqYu/]\And=e=h].znY.zvszvї%ֳ>]ܲ}qyA 7Evo主]'E?}q ]vg#ibpl> Xl>Sw;6?Wl>~Wl~͏ݎw|16bl>ySw;6b5W|:vl^g^g^|9v3v3v31ucyucy(Wwݎqݎqݎ98nvcyvcyvc3ƙgw;ƙgw;6:cyvcyvcrSw;Ww;Ww;Ww1ϼ1ϼ1ϼ۱B1ϼ1ϼݎy3Wc8{1ϼ۱@g^|y3̳̳/1<1<۱̳̳_ ̳̳}Lgg|=v3gyuc7Ww1ϼ1ϼ1ϼݎyݎyݎͿ{v3v3v3vl$ƙgw;ƙgw;6JrL8n8n8n7{16{g^cX/6HKg^gz1ϼ>ҋy^li0y1ϼ۱Wg^*.렏:c"/ nvdu`EEEN\C`лKE.>ĠwF/SC zyttлȳ)B!=:]}A zuPлEEEEw3Ġw7F!/*]AgAk z'w>1]ɝA z}rgл.y:(]\AA zuPлe..렠wf:(}Ak zu`SkE?̰}qy׵fX.aY.aY.afX.ٞY w}}}lwO]俈]Ìk7\.aƵ"f\._D].aƵ"].aƵ"].w}_\&r2׾:,}QkE}uPnkeT.̵}}\wEվ|.su`u`jEkEkEkU.zX.zX..wǣg[վ:]skeT..wg ׾/]\A"}uPjE)\..wqYվ:,/ʺ]\ayQֵˋ}uX^ukE^\.zX.zX(wwW;׾/]:]εׁ}ysu`u`jelwqYվ:]\A"ujeT...NxByKu'R;E ^-՝KyKu' /oyKu'ȟ /o/6xqGċ1"^m{o}fD۸#E/6Έx?9"^Y񃌈S#E/z/z/z/zxxxw0F/*"^:0"^ׁ񋊈}cD"e/,̈xqYEċ1"~QuPD"/.레y3Ĉxsˈx+F/.o:"^2"^V}eDeD#en7wsru׃򺫫E?j^摫E?-j^2E$[ 0rL5-b#l5[pL58oI?6?S =6cY06oql^ؼñ$xL5|7W jyucycycj1ϼj1ϼj1ϼjTCyUCyg^5_c =6C+ƙg =ƙg =6q̳?XCqYCqYC7g5g5|΍qYCqYCvL5g^5g^5g^5c<<|<<S =1ϼ>g^E;3zg5cxYC̳TCYμjyW j1yc>|UC?zg^5ϼjNg^5ϼjyW j1yc>|UC?zlaW =3zlWkxYC|231cp'3c>ˌngg^o2|1K1yq|bg^5|{33Wc>;1y?`./7w;LN>kwˋ#Aˋ<NGЋ#EQ'ˋy":荌ǁ]"Nk^]"bA/rey75/ry7+5/ry[w/k^uſ۟m w͋w׼e5/rEu͋8k^n]2qP׼e5?Ȯyyy.w/k^8k^]ǁ]p׼q`׼q`׼՗EE5yq?8>]2_T׼ȥe5/.㠮Eu͋8k^\A]"wbq]e~WquQwq]e~Wqu/2z*rOAv͋v͋\k^8k^8k^4ƹKtmE_J3z~QNRtE=W',1z^n"_ԫEo=/ryћ%F/*z^\A7K=}=/ 1z^v󋊞}=/z;yg󋊞d3yѻ7Fϋ?۟wogE/=/.EEϋ|F2qPe=/.yqEϋ8(z^*2dE=/?6E_0z~qyt=/J/=/J/=/Jt=/Ÿ/=:Eɋ8N^䓞qPeT'/.6IOˋ1]^F|Lu'./ӄ"7N./r#ty'EX'<:ydv%EX'/r:yKl/.ON^=ɋ:yON^=ɋN^8N^8N^8N^8N~QuǁuǁuW]_Tq`UEU'/z:yqɋ8N^u2beT'/.:yq/N~˃n}/zAY7Ƌ>}lvbcxk'6Ƌ^1^ccEw`cC}<S n҇L'31E/Nt>}~{чG.OwK׵X/|˵`mum9X͋cؼ;|U?|g^_]1y}q|U5cVg3|l}-3|g1c>|U̫6Lϼj1yc>mlWm>31c>z+%3wRb틘ϼ^H̫6\|.Jg^|"3Pb>k1yc<[1yc31c6ߑ7$b>Sm>3|g^ϼjT̫6Wm>6ߐ̫6Wm>6 ̫6Lϼj1yc>Sm>6߀̫6Wm>6{5c<;.tgcxY̳6gm>6̳6gm>6;|gz<Әϼ:wc>z43|lۦ1yc=s|q[o|/η~"N[A&vR87P(ί'!.5"o+9)_(OBykIGdRE./r%|?!&/Հ"NyI"TNyGI"wUNuRE^;)_I"oN8)QI2JqPRݕ/.㠤| ,'8()_\AI2JqPR EEE||K-'&/*)_UEEE/z/zIJqPRȋ~'8()QI"/.㠤|q%/*)_\AI2JrR^+.㠗8襼z)^+.㠗8()Q/qKyeR^0'2)_80)_FIǁIǁIt5E.]/r5j|#V\Q/`zqӋ8(^\A2 _T0ÿ80^"7NgjMSE>1z?x֋:^N_TjMSE.ΝZ?z+ /.Ou9^?SE.wZ/rz+ ֋\:^rשS]Nyé"u8^S]Nyé7L=L=L=L=L_Tjq`jq`jSZ/zZ/zzE֋^1^\A2JejREZzq֋8(^80~qyFEL<#`zg׈ }L/z`zkDӋR0򌨃EoLOoB?O- +F2^:#E/i/zi/h2^ 1^:#ei/zi/zaH{уA/w/*y"w/y݋|y*̻Tau޽K/wkcʻ摏ؼʻ|w|$3{g^ycʻ|Hg^[&1yW=6-뽐ϼFIg^|w5c<̻xwͷGb<̻xwͫ<1ycM|O$3ϼ{gycʻxw3S=3{l o|w?{g^yϼ1yݏ)W=3{l W=3{lwW1yc>ʻ|w?{lW=3{lk̻xwib<ǔw3g=3ϼ{lWg=3ϼ{lMyLy<1yc<̻+1y?˙Wwc>z43@-Иϼ |o>3?c>ʻ1yk?P_ɡۿES9^ !ڋ|Coq{Ej/OC!N7j/OC= _;ECE+Uq/r{WW܋~Qo~uŽmrŽrŽE+Uq/"yqPŽ*E\q{qU܋S^\A2qPŽ*Yq/zXq/zXq/ǣɊEU܋V܋]^8~Q"/]q/zXq/zXq/r{{_TŽ*E^ۻ^\Ay{qU܋8~Q2qPŽ+e_qW\A]~e_qW\Az߯+.y~ǁ"_==dŽݟ+EXq/zĊ{@W܋fŽ-+Nћ%V܋\^2zC+En]q/zĊEU܋8^fǁ+E!V܋N~Q/}Xq/z;Ɋ{gdŽ3Ċ{5V܋?۟vkgŽ*E/Yq/.㠊EU܋|2qP*eTq/.㠊{qU܋8^2dŽ+E+E+EXq/J˃}Ê{W:}ϊ{W:}ϊ{W:}Ê{}ϊ]W܋gŽq`ŽqpZڋ$qKڋuҡ"/Ij/rMPڋ$q5CGtj/ ݡ"j/ ݡ2 qXqBej/.P{N:~Q2 CECECE^]9^80~Q"j/zj/zjP{P{P{PEڋڋڋ8(^Tßmj/ʡ2 qPȫ+/*^\A" qXtg'>/ry?!ϋ 1}~qyv?0>/rIϋ`uE`0}^:ǁǁJqPßm?/*}^\A"se>/.yq/*}^\A2n;q`nl֋\9^?6sE[/6֋fnȕsEc3~Q"WzέvQ] '\T/_E".huQ] E.Z]T/IjXT/ +ՋzѫK^}Ջ^]^8^8^8^8~QEǁEǁEw5,_TQq`Q] EUT/zWâzqՋ8^E2*aQeTT/zY/E.}XT/zǢyMՋ>C,cQ3Ģz>Ջ^G*5]T/z͢m7T[eb?{+؋ޠ^^fd[]b/zb/ze{#{]zXq/"_CtŽMXq/+W܋հ^K/Xq7RyxLAyC$6S=6b16~描ؼ+c*揎'<6xlؼw?{l ͳW=3{lϼ 1yc 1yc>*|Ux?{g^_k1y}|Ux5cZg< 옾b<,={g< nCg< w< 1ycqTx̫W=31c>*|Ux̫ױfNg^ϼ Tx푘ϼͷpb>zq$3{l{;#1yc󍛘ϼ^̫k,xYx̳oxYx̳W1yc<,4Mg< Tx̳g=6ߙ9{g^|W&6ߔYμ 1yc>*|Ux?{g^ϼ dg^ϼ ^Rg^c*|Ux̫W &dg^ϼ vg=67wQc<,S=3{g< ig< 1c<,xYx̳~L|&ig^|,g^/b>z43Gјϼ^ wk"N4N绵E N/Nnmq[[|8߭=tzq~x{.N ᅋz;/N=LyǁGdE]G/r:zKыOH/ qȥEs\G/r:EыuME]G:zqы8^zu􋪣qPeTG/.:zqы8~uǁuǁu"W>_Tq`ȽEUG/Buǁuǁu"W@=yI:Eы8^uel:zǮqPUG/.:zqы ^\Aqл{eEW\Aqл{eTGw8ݽ2zw}YG/zXG/ri:z:zWo\6E\G/:z#ыܞ^2mk ,AsRy4ʋ|̥OKEn\*/$T~Q"7h.Ryk~ʋT^8T^8T^8T^8T~QǁǁW#,_Tq`KEKU*/z5Ryqʋ8T^2*aJE{E_7~qyҽO{E/<x'd}/zIxK2Ƌ`do{Exqx[Ƌ7~$codoCx#x[/.7^7^VGd=.K`Ƽ+Wf̋|_x41/Q(g̋^0c^"K󢯑1/!gӏiٴӌ͉ϦϮϦgiM ϦMgGg'g 6'?>>>>i?fiMڟMs8g~N6g>s39qٴ͉|g>s3[/6}3?}3?}3?'ί!qe<#q=>G3~!q<#qt3y$?Gx8gϦ_bs39q8grg>s39q8l{6g~N3?'cs3-|w<>|3?8lng>sg>kωkH3gs>5$?GVgӍx8͉x8gx8l]<#qt66'?G3y$?G<6g>Y6gӃ /s^;!oޜPtwtw8Dpptwtw!׶?>8=l7,8]}8=lpzؠ8-q@!o]>C=?jD\+r)xC%WqŹ,Oa,p,p,^qqqqFY2sYe>e>e>xq?\a?\a?t>CRCrե>xqyX}\uO'}\u)?x$rǨCcT<C@!YR!WJ?J rWWzH?8 C=HC=H? q@wH?8 ^@2s2sWzH?\aN~ b下3q? rYBU#U#Uz1CcP..SU2 +߱GRy3yLyC-6S;6b>26揳ؼ͟Te,6Ol3wg^<1yUc>|1Uc>||U[b>ʋ /3b<|xYs5g;8Vc<|xY7b<|xYͿxY̳;wg^ϼ*1yU]μ*߱ϼ*1yUcj;6̫W*1yT+Ng^S|Uwvb>z"3wl[1yU<+1yVc<+1yVc]<+1yVc |&3wgc|xY̳S;3wlco|U>wg^ϼ*1yUW;3wlaW;3wl+W*1yUc>||U>wl_W;3wlUk|xYwb<+T̳g;3wl g;3wl|L<+1yVc<|SNvJ|8uG?V|ȵS."wN]A"N]cqQ/=L]=L]wq`q`q%.r)u?>/"7cN]cMSE.r[u;2/*u]SE.ʜ.r=Ee.Bʩ2J]qPSe..uq8(u}ǁǁ"7 N]_Tq`ȵSES.:˩ǁǁ"7 N]=L]yE8(u]eSewuSe..E8赱2J]zp꺸^+.Ɗ8赱zm^+.Ɗ8(u}QqkceX+82u]80u]Ωǁ냬Y_ԛ'͓"׍Y}̚EyR5뢯Y.oxtͺk͓"YtͺkE].zyĚEլ8f]5ǁ5z;ƚu5뢷Y_E0֬Nf]cjEo'Y.f]z5'dͺkeT..㠚uz֬8f}Q5"6vͺjeTuqլ8f]\A52YmfͺjY.zƚuuEլf]5뢯X<}uHY}Ěu+֬FbͺkE_#f]ʀ5WY2`DkE X.zXE׬rͺȋ׬qͺ5].b5"wY_׬qͺkU.5"(rͺk{׬/f]\A52YqPjeT..㠚ukf}Q52YkEkEkE^zf]8f}Q5"/\.zX.zXuuuEլ֬֬8f]TͺjeTuqլ8f]䥗k٤.r&u76/. I]8I]&McnR=lRpq`MEn"ܤ.zؤ&uq58I]sonR_mԤ.778I]\AM2jRqPeԤ..&uϽI}QM2-_gfvȝg~.r'tOu9;]?6,9;]"N tveE.ttѻ7fffff/*;]80;]80;]N=Nl`vq`vE/..tqe,r~?[ƣre<El.zx>ǣa?&u?7ܤ.Mw5lRa8 ~lRAl^M15celEUl^JQyLMؼ1?%cG@l8&ulycُԤͿ)湏̫IW:6|g^MϼԱy3{LMϼ1y5c>;/ϼb>‹̫IklR/<1y6c1}x٤;M<1y6c<1y6cy1y6cjR;>1y Wؤ̳Ig:6̳Ig:6̳Ig:6݉7wb&ugM<Ա15c>jR滛fg^McjR|դ̫IW1y5c>jR滙1y5c>jRm1y5IW:3&ug^McjR1y5c>jR&ugM|c-6f̳I}LM<1y6ccqX2ё|8gȂttt.H_TAq`A% E U.ǁǁ".H=,Hy}E8 ]b2*H_TA eT..㠂E8 2*H(pA+.78 z+.78 }Qoq`eX 3 ]8 ]dA^?)ίtAKi/"7.H})͂tq~5 EoX.ίO\ }QYbA- EoXtq, ]8 }QH}X. ]zEƂtѫK>`,H_+E.Y. ]z'dA eT..㠂tћ|8 }Q" vA eTtq8 ]\A2*HmfA Y.zƂttEFbAk$ޠ ]5 E XtH,H2`Ak$^ ]ʀW,H_\tA E ҿHtN:]#.IG^q$wG/*]#E^;]#E^;]#>DyHEE|Ǒ2DqP$"e..HtN:}Q2D#E#E#E^]9]80}Q".z.zHtHtHtHEEFF8(]T$"eHtqE8(}"z.resчN=}z.re?>L=2pCs+\8\J=qP깸RE>E8A2J=_T깸Rez..E8(\\A"PsRez.rs9rEn@.R 0N rmEn@.R "YN bE.w@.rr8\80\f ǁ ǁ ǁ ǁ J =L =L })E%&fq`E_J3\\A 2J d-E_1|qy L ` E0\ 䢷vL `5E0|qy 23hr='xrW'W< E_2\"ǞǞ䢯\O<xrcxrW'='}xrxrx8,O2_qE>&s+6ޠ|M"q E>&%V&sml2^|0c'6czzͱ#:6db**6ocslt>^sl^Qblḏ!6:S96(?bBl<>^sg^ϼz1ycAg^ϼzͱyzLϼz1y}'|xL_1y}|eWk_1yc<5c ̳{1yc<5gQc<5xkͿxk̳;^sg^ϼz1y_b>5|kK|*3^sg^c5|k5|*3wb>5{T1y~W96ߞ̓ϼzͿ^sg<{ͱTg<{ͱ~Tg<{ͱVTlg93^1c<5xkW96ߊwbc>5S93^sg^ϼz,wbwW1Y󿻚1Y|L|=w5cwW86x56cc86 c-?,<1y6c<ſp)m|۬86pGg-|۬86+rGEm|۬E[Yqmv-|[YqmV"_E[E[E.k.z.z.z2[Ϲ"/-r o?>~,tsE^:[BĹߋsEnE-rE~\8[\A"/oq~^tre-.oq~2[80[80[s-z-dq`rE^P8[80[80[䥴sEsE^N;{Q2yMoq~/*[䅵se-.E~8m2yoqT\Ao;qNSqT\Ao;qPv*.㠷8m"wMdq`ȅsY-E6E3} fަ(ro,)6E!}[-".^EX-ro!}/[\AEߢC,qw~/OsE>~N2{Q}-z;oѧߋz$sE>~^1[^cre-zYoq~/*[䣥qPr-.oq~8([\AߢqP sEט-z-zop^d (b [E_@\>-j}p'\>- lO` l[E.r-I~qrE:o3ymE~qk{~qk{~|:ƹ"o9[\A2^Tre-I~/*[\A"tq`q`ȫ+~~/*[ՕsEsEs-z-z-zoooq~?>qPr-.o.o}彸<.oѧ]"彸<.oѧ]"-[䵽E^ۻ[䵽-]2$-.w~]2qPe彨.oquy8[d^TEI2gp\8[%&3C-r ngp\8[%&3E.r-r gpk:[^" qq` 3E3E3E3E3-z-z-܋=} n Eepd2-'ĘmїA^\r̶cE_3f{qy1ۢ?0-"1ۢwR}Θm+l8퇠mq!t[\~*JE_ t[\>T=mׂ,an?2nL`kAn/.Źt[8t[ KEKE_ t[8t[8t{qy(Υ2Cq} ť n/8[:3E> n8[Š"Fqt6fpS|+?b>|o}S73 ng^߉1y}#bg^_1y}|5fpcag<3;c< ng<31<31yfpc1yfpc<_)W73 ng^c|+W76/c-ϼ21yep)W73'c󍪘ϼ+3Gc>z2Tg^|5fpc3jls|טQqVQ`b|6YaF5Y`[v\Tȝy[v(-T9PZvEn(@iqQvBp8(w;2PZo@iqJ8(PzQ2 qPe(-z@iqJ2PZzҢǁҢǁҋ = =}uziWW{c+K^6^zQҢX/-zziK^6^ZҢ ^\reEEziA^T5]/-2"^T K ^ZeE^^Z\pŹQ䥗eT/-.ziqK8^Z^TEzizizi^=^TK/KKK/^Z8^Z8^Z8^zQҢǁҢǁ2|4U/-.ziqK/^Z\AҢǁ"t\/-^zqyJҢOE^^zqyJҢOE^^Zc Kw K/^Z䅿eT/-13K/^Z\A"3seT/-.ziqK/^Z\A2U/-.zi.KM8mZ'ӦGJ6-ri[M8mZ'ӦEny6-rM\z:mZg^0mziӢǁiӢ1L=L=L=L=L^Tڴq`ڴq`ڴOM/*mZ80mZ'ӦEӦ6-iӃ,ᬗC` K/.n^Z\~˃[}zi;)KgpK/.n^ZE8EX/-z'ziq-K/6-.?!MPdҦ'iL M2mZ"ӦEM6-iGiӢ?06-BiӋtN=L}ȴiiL=L=L^\sڴ8AM=-ڞӋK"ߙqk{vO|"cqUE>%iWtg_ǟ{l>]l>yGxLMjy?׃C?6S56cf06?9?c'}lԈ3#6S53&jg^MԘϼ"3&jg^M15Qc>j|}׷1}Y|Ueg^M_c56IxD̳13&jߍlxD̳̳g56~g53&jl𘚨1y5Qc>j|D=&jg^MԘϼ1y5Qcv?6̫W1y5Qc>z?6ϼVg^|Db>z?3&꯱g53&jlg53&jlg53&jloaxD̳zLM<1y6Qc}cj|Dpc-ܘϼD̫k'9/$B8/*YŪcy!Yy?Q1|q|qyE8~8狼vu8~ cvU[888888Y80Y80Y80yQ=,u&p_ ],ɥ΢,ukKE. \,WI:rJE~YUKE\꼨RgR.uqP_-]꼨Rgq:ҥ2*uqPJeTxd;.h2Y^Ϣ&3q{Mf<>x{cƳI5,zƌgqe<8(Y2x^TƳԜ,.㠌gqe.c'`26>^SYϢl|`5G,zMggAxygk:Ye3En񼨌gAxpƳ g< rƳg,r+Eej=hן a@>1 Qhhhן = ^\sO@@^0Z\ayТ1.Z>uЋKu"q}E>:hZzu^A/.3Adu< yb.6OBlcoS;ؼ ꠱y 6揎qLu=bFlЈ͟+1yAc>|U1yAc>[W43˘ϼ-23ʘϼ(c>렱K23:hguxL_1yAcnd43:hguXhgu<렱<1yAcT̫W43:1Ac>|U̫(k1yAc>S43:hg^oj1yW46PMϼ꠱^Zg^/|U5AcʃqP-z,0YE#E^;Y1aȋpG>>&|yg|^\sȋpG>wȋpG>w䳸"E>EE>8(Y`|qP䳸"e,.EE>8(Y\A"s"e,rgqq`ȍ E_Ezqy0"7..qqȍ E_EZƅ".^\ q E. ]-@@^Z8Z8Z8Z8zQТǁТǁТ"YhhW,=,^T ;E_-YߓϢ/<~g#vj<Ȏgїxqadzv<>7.xQÎgggїx=x^\sdzKLv<vhѻ7@|ЋKТ"QCi>G4CM# M'M?ϦgӾ$6@?vMϦϦoϦϦO\llllll|g>s3Q\g~.~6 ||oeUE\gӗg<(~3gclzg݈g<(~3g/̣< M~3g<(~6j |g>s46@? |gϦk|hl.~3?@?󙟟lPg>sg>Ϧ{iϏ3?@< ̣t3y@?Gg<(~3g}Ϧf̣< < ̣t86@? M?n3?@csqNӦ|8m:N}U%@N·/ Ї>8-NpU8'@N!^}8-N `Q Ї$@z}q@!78J>8 C$@|>\| 'U>gC"C T|_&U|K(U>s!T!T|*ŹS|> AχR\|\|0U|\|\|\|\ C*ygq|>8WZ|>8Y+yC*=|>E*=|>䅗*ŹpU>.0W>s!|>\a|>\6xe9.0Q_x2e9.0Q_x2e9.0W>q_x2 Z·N?+Za·ŋ}VNzVeVC/r,||,έ·8̭?3},,?GO Їa ; $@zhqyH Їa zQ ЇR!/}_D}K/%@rhqN>䥗P!/}K/%@#%@L }8*L Ї8 Ї8 Ї8 Ї8 ЇET }Ü}_D}q@ Ї.S }2%@z}q@8'@z}q@ }q@ Ї8 Ї?>s2s2s8'@z}cC^(J>1A!/ߕ}cC^+J}w%@] Ї|W!/ߕ}Ü}w%@s2s!S2s2s2s2s8'@.0'@.0'@a1%@s2s!7KJ>\aN}ȍ}hqyjD Їܸ(%@rC_E"$@rhqyjD Ї\@* H%@.%@z} =H>8 C9C}hqN>8 WH>8 D/Q| *}gqypKχĨ|T|F/Q|*}ACP|F峸D?}KL$@tJ>8 ЗH>8 ЗH>8 hqyN Ї8,)$@z[Rh>K<>ЇxC)}5ACCzkxC>xhq)ןxhl<G/61CcZl^uMgl_B[bOl͟='6jvyCcEg^Иϼ⡱"3xhg^1Cc>ƌ23oc23/ʘϼ⡿xhl3g<46?cg3g<46g<43xhl03xhg1Cc>|+zLИϼ1yCc>{\g^Иϼ+~\g^|ͷb>z 3xhl K1yC<1yCc<1yCcm<1yCc|-3xhgcx3:S<43xhlo8|5B8A8@h=hq-Po \T #ηm|[ȋSB/*Zo E^:Zo -wɊ8Z80Z80Z80Z80ZƁТǁТǁТǁЋz.q` q`=<U-ТE \-ICr=EJzhR>p=E^MZ"t=eT-WLC8Z\A2qP= EEE^ezQТǁ"/\-zXzh[==yzhzhW^^T=E\-.zEC;Z\A2^C8=2z).@8=2z).@8=2^{ eHqR|?Wbw8?^\|Ahq~|8?^_/ыz|8?^_/uq~|_/uq~8?^_L׋82bw?8E-.hquGFIFI/)ǁQҢǁQҢץ^SEEK%-z%^2JZ80JZ֎Q'=ce%-z](iqEI/*JZCnqP%-.(iqEI8(JZ\AQҢ،qP Eo%-z%-z%(i(i(iKlFI/.O;JZ %-ZQҢ%-z5(iѫFI^0JZjQҢW#a'%-r5h^yhk;Z䥗En"h^pwK/wGrw-ыDF^qPwe-.hk;zQ2EEE^z;Z8;zQ"/-z-zhhhEuGvGvG8;ZTwehh'iы#|N}-"iы#|N}-"iѢOӢE^;-ZEӢE^;-zQi"/-"i"/hqE|i2JqqE8(-zQi2JqPZ'ɜhqE|i2J^2TqwHvG/.ϛ;ZR"*TqwHvG|"w^\7qwE-/wGvG^;Z8;Z8;Z8;Z8;zQѢǁѢǁѢ"hhW=-OFIZf'E_#1JzqyQҢG}(.GI{FIZfE_-3JZQҢǞQҋS]qw+JZo㌒(i^T%-Q9(AFI8e] E1JZd!FI8e8EE_2JZ80JZ)EE%-.|U3#1yLc>7qf׷ig^ߥ1y}ig^_1y}|U35Lchg5a40JzV% }(Aݢ=+>d40JzPCz2JzV% }0(igozV% }0(i2>d4\AQ҃(i2T4\AQpEIe% qP4952~(i9FICǁQ҃FICϩ%=\bi [H. XhG#,>a4FX, }4biKCXzpbiȣKCn\, is4䋨!7m.6?ϔ:6^:ksm~|Ixm~\6׼:Lk^9sk^k^J^1׼:5Ox?S׼>iys׼>e?Ӈ5y}^5yN5N׸:5s3}^g?S:55yN/׸:5c #;-g;*G|rlg8vf;Cǁ΃z =vdAf;Cz2z<{ơ= =v>{c37q=gov8(.lgCOf;e{c380zFd3 M˜U qP3}09eT<2,sн6?#͏kgjz^rOkȹ6?q>S9ym6?mk^jz~5y5=yͫyk^Mk#׼׼6 gjz^'5y}^קg5k^5Ʀ<׸6~k\lz^g*׸5Ϧ<׸6W^g 烽p>q+Z|!1ne9w8sVfq`+38z<+ǁVfȭ[ǁVfq`+2C[ǁ9̐oa2m9̐{G0C1:rA0C6:rfms t3^9̐oaT3\A9̐a8(.fr2a~fq`380r'A0Cs!Sa0*rOfq`380rgfq`39̃a8(X9p0*pY*^ˊW2\Vv!\V_ۊ..ˊ.ˊ.ˊ..ˊ.ˊ.ˊW?zVf8_ lepןCo1<|9?bmA]d+3/چEp.2.Vf2.Vf2e qP+3\AeA?܄BqO9C{d}c>gg3u8س}c>A](}>gg3A+2P. šZ<>gZ qP!P.>g qP3\A}p9e }6>g0}>gq`ǁ}>gq9. }>gq{Eg 1˞U }"ʲg!=CyPeC,{>b3˞χX }>IJg!=. 9u!R 9u3Y 9u3Y!7wܜ:!7mwT3#!7mw8(.xg2w|Ku2w|Ku380z is380yPΐ6;C㝡ǁ΃w;C㝡ǁ΃w;C2w4 qP3\A΃w~xg_6we3rKxf㝡ٌwܡ;xg!wwܡ;yPΐ;t;C Cw2wܡ;.xg2m6wT3\Ap;C^s2wxggxg;Co<\q3!8 yxg"!/8΃ws2;C9܄ =w>7a380z =w;*z =w(2yPxg"ײz[>g].z[>gm-}Z9Cz[>gm- js>a3)Oy qXI.垔{RsoE}p.9巢>g'RC9C? }>g.M9Cs\ӹz d38z>gq`38ypN>gr}1s8, }>gq`37w>Tas3@9Pΐ` ygwx2xg 1xY^]$ɵG~m~\")ym~\k6?>>S|{m~\?IGεkgism~\W3;y+yke+G5y;y+ym5Ok^5O!{k^׼{k^k5y;q3ym\L׸T;q3ykk5y;q3ym~5x5y;ͯ)yk^k^w^W3;y+yk^k^w^TLkWG溜kcgkF>kiF8]*6qz3kkDLq6a"׸55kk׸qw/1x8_8_8T3p> cp>?|g#~|!0wT3=w;Cǁ΃z =w;z =w;nq`380z |g-!3 {t3仦!wwT3 !wH:yPΐ{I;C& |A;e |g2w8(.|w;C!`wT380r#|gq`!wcw;C!dw;C˘<|g!$|-o|T0k3|3o|uЯLp7>C63IxP7>g8 $a8- u3k3|08(IxP7>e$ qP0.IƒJ8(Iz$a2j.㠫2JUp]/]b0ӄgR,8jff>bJ2*>b0\AÃ*paJ2*T0\ApeT: qP01"K2*~asCKǁÃ*CKY:<ܶu0 KǁÃ˝[gC,1xP'K >Yb13A } b%fC~ b%fC,1d1ÐZ C:fr[aS *fr[aS C3 u0m C3+ p0l C+<\r0>۽l C+ v0>۽l W.^a}{28W.^a_8W.^az!WxPp C^q0\A+ y^aȓ CDW$½{A CDWxP{A Cqq0{+ 7a0Ε;W .+ }^a CW.\r0\~+%* .+ ߊz[Q0\~+oEO !^W8) ?\ C?+<\s08Wz^aq`0^aq`r)νpR{c+ qX.ŹW8pRc!w0 cΒc!"1'.ak,>d05 .% C3<燈lz\Wlzlzllzlz\/Vlzlil:tpl6 6lzl)smlia39fxmk~k~k~lzk~k~9f3g^ϼOkϼ؟y?c33y 5Ϧkϸ3?S3Ng3p >C!! s\tfp:~ȍ3z|q@\qpQ〸CV\qz = >8 .〸C*.?[=Sq). >Nqp > S\!7v s\!v >Nq|T\0qeeeeEz|q@\!` s\q܈).〸`8r7C= >Lqz_xEtqt2+~ CߢpGpGpG﷨=ʇ=ʇ=ʇ~Etct2Q>-*{qQ>\aGpp=Tùp=T.6s{>\ape q/^kGпOq .пCV|  G\0Q>C q>B\R =ʇ>B\R >qQ>\aGgR =ʇ8q>B\2s\0{9.p9.p9.@|||||cD.0/".& >8 .〸`8z|q@\ ۯ > = }n\|c`8 >vBy𡏝P C;E >vBy!|ݲ"C|#9.eHqܗ).e/S\0ۇG >Lq-+.p9.p9.p9./ s\2s\!_D|q@\qܗ).〸`8r_C= s\qz C= >\a >9.0.09.xp~C|ȽrI[iW~Cn|ȭV?0V?!>VZp>\a>VZ88882s?2s?!o8~CydDC(GJ>䑁C"  DC|?Q"!OД DCyDCp x$$z|q@"pN>8 DC DCBE"лHz_\PSU{]$BrAMAzC Ї*= BqCrAM-~ Cp/W{?.ϡ{A.ϡw- G#h=s-4{q@h4z{ Z~=h=^-z ;hj=\aCpZ~}4e;hz{ȯ{¥ACB}4CQ!o('=¥HCywWmƅpm\PԵtm~<]ӵ.@]LĵX6?6^Hkqtm~}|m~]D׼Lk^^W˚W׼"׼6g^ק5y}_'g5k^5<#׸6~k\^-#׸5<#׸6s^g<#55yEy+"׼5׸6rm<|S_|(5yF_\I6xkQk(_)^g}g]M5(ߵk7)wk^Qk/q;wPQp>|p>m u/oi[8>AE?q>m Ӷp>|(AE-C;·ϡǁQ(_q`/80wPTC|ǁQ]GB|ǁQ(A]S =FBÿq`w/ݽ@uݽ(wݽ;)wB;, rw/ۥ{!TUݽpue qPw/\Aݽ =vBn;^q`w/^ݽAuBn =vBn =}UpN׉W8_k<^WXvZc8_k š_a k|15~ew5pݽp]ZcWXvet1\Aݽ.^{wtTw/\Aݽ;:vemt4\AMe;˦2l.˦wteӃlzeЫ}   ^c_0Ø}.㠻2 >^c_0\Aw;et3u3\A}k 8/xm qP_0\A}L qP_0\A}pe }\ʾ`/|}`q`_ǁ}`R.k }>ľ`q`_r}ǥ/7l }`7C1>>{c|0!/<8>>{c|0'*!] 'C9!xP 'C9!r`ȍ!8!r}`ȭ!7N8(!.㠄`!5<`!5 =L&Cn =LTB0 `q`Bǁ `q`Bǁ `/M%e qPB2!xp`_!N\f9!WƄ`ݲeL-;!r`ݲ [vB0n e'*!.㠄`ݲ2J8(!.㠄`"'e qPB0\A p%C^.rB08C /T} &O }*`ȿr_0v}`C2/O yH`?d_08/}`q`_08/z<`q`_08/zǾAC/zɾ`-& \./ą}[L. = b/ą}'. >qa_0\A}E}0已s|rp, d|rpyH)>z`メw>Ta|0C`q`|0.5CメwC" =. qX.9>Pp傚メU qX.9>PeߡLąeƒKe2a C^)q0 C@LJ˄L˄w,s;6?>k6:Ok3]6?ͿkIum~d]XLxm>6?k4Lekc$6?yͫLʄ׼U&525y ͏k^*^WLxk^׼|k^׼>~y׼ʄ˄k\,^g>rqͳLx,^g<˄k\,^gykek\,^_7?S׼ʄ׼U&52g*^go8_p|5%_n\۸6wkſko_]Zb g*]߻6oXߵTgyͫwmZ׼U p> #;D8 _8qo8T/#p> ߐ.T/7 |zX =,Bu48zX t/8zX =,ԭ_q`/8!|,ܖrc_IBnN\;_wJBP\ Gq~t/>ſo.ܫr_wLeT qP/\Aſp>d/8zX rǁſ{-BU r/8zXQ|E1(MpAuB`8 "n_8_ |]0(}u]0 u/Ŵp.xPC.2upue /c}2ޗ.n_˾ݾp q=p q=2vfi>i8 >`H080$z pi2$z = =`q`H080$xP4CCǁ!A q=pC1$.{2Ԏ!.{O qPHB!8$.㠐`B qPH0\A!pe } ː`B2$؉!`q`HBǁ!`SX.` }Đ`q`Hr!Ч +N> d0a  }A%C2A0 70 y g |tH0IC!<`ȓC9$r`ȓC^qH0! + ;2`ȭ+!Ү J2xP[iWCn] ve*2 J2.`*2 82xPpqPe0\ApUeT yɕAUCn] we0ߕWCY  Te0^y`s0W2Ϗ\ y`V?de082h`q`e082zX<`q`e082z/-7 b2$zɐ`-&C >qaH0!@COC >qaH0!'. 8($xP{AAs+rͭp/w d+rͭpy-w rͭ{AC9V`q`+08z/V[ǁ{AC[lC[;hn8,w }4V`rͭG#l8,w }4r!߄3$P!K!%`CC^ qH0C12$bCC7 k6?>kSл6͏k3]6?ͿkAawm~]u)2xm>6?͏kQum~R}ym~J]R׼Lk^ ^Wd׼"G5yEy+2xm5Ok^|5O|k^׼|k^k5yFq32xm=Z`W_+|j8r!W8| v-j|g!߄] qP-CCkǁZA =C] =Cku;48zX =~_]! n)rA5B6r_- kp/ͿonT/Ϳn8._2j}_q`/8r'A5B!Sn6rO_q`2&BpΗfL8V { Ȭ_8_ {|E6#~|y//opΗB12wety/#~2.㠬Aee qP/Y8(zŬ_~2R.+2ԕp] qЕ(fJeT>O`/\AW*?d/y~ǁY_KY_q`TfB~ǁYRz =fJeT8(xYp] qЕkԕp] }Ƭ_~ yYpee;_~28(.㠬_Ȼ8(!~O =f*z =>herY'Kf.O }ʬ_q`/\a `BC2A' T0I!O  yAw0 !R y `#=* AGzܴ9 AWn is𠂀!O is0䑞 qP0\AAoT0\AAoC!7m*r `q`080xPA `q`080xPA `q`0\AA2 8(xPA_ȍelXW_ȍ!7n\nur_}!ngr_} qP/>Ϳp5e qP/\AͿj88B qP/&7>d~!ܾ;r_~wt3 g*zGǬ_ȿr/i~ GD`/Cr_q`/徃53B}wt,BX =,.\ qXp嚙}8X qXY r_.q B}FX y徐78\ }Rr_.\RA.b6ϽtԻ6?Ϳk#L7ϻko|{m~]uGg2um>ɽ6?͏kQum~R}rߵym~J]R׼ULk^*]W׼}G55r5y>S׼>y׼>?Ӈ5y}4_5y5׸Y5rߵ3}_g?Y5r5y/׸Y5rߵ}<}׸Y6~r5yyͫwk^Tٻ+{ \#+{/Ȯߏ]+{>Seصyp%Vߊ}޵;k7oĮ_]\ l8φܕR|>{PT8φAU?q> p> AU@*gC.އVB+{ǁzX =ܹzX =V>fq`e/8zXo|we/[+{!];^FWBܻޕ|we/;+{! ];^ͭ+{!_#] qPe/\ApUeT^q`e/NʕVBn\ =Te/ʕ7>dH/o-pwXfQ8_;^B]8_ K_  v|./څ~)dH.څEph!A] RȐ^.څ8(wP!pe GbHBz2 #1.2.6.2.H  pp }d^n8~Ȑ^#BCzǁ!&Czǁ!Ap = BCzu180z =  pp qPH/ Cz2.O;2!p*.Cz2o㠐^ 8(.㠐^Bz2 酼^Bz2!^q`HBzǁ!^T.A }xĐ^q`Hr!g e[. u/1"| }_cD&* ЧL_ |KuH/Cz!;^3 B9r^3 BުqH/! ν6? ks3ݧ6^Vgxm~T]Twm~6 \R5yU>Se׼*{׼Uٻ5޵tk^k^] 3Uy׼>y3}8_G5y}0_We_ce|kk\]??5yV񳘕k\]geRzkk\]Gqͳwkkg]We׼*{׼UX˻Ɵ5kyﳮ_g]ϚkwY毲d]ϚkXwm_`}Z޵k/]]6s}62{|zΧ!eqU8Tf/{3{|\Χ|z{PG*OoC:wPp> ې8(!3{ǁ^q`f.dfB3{!nfB3{ǁz =fB,|owI/䛻Kz!];^WJB%*|wI/{Kz!.];^ Kz!_1] uI/\A%peT%^q`I/.%*Bn\ =,TI#rC^8%pLΗKmKz|JzX t|.~cI//ӅepL%LΗ2]>BKzu.{Kz2L.㠒AeT qPI/6%*8zĒ^88c88wPWet1\AWCoX;+2T%p]q%Ч,BKz.MBKzǁ%zX =,cq`I/8zX;+2.㠒^C2et1\AWCwPWet1!Kz2*TI/}eA%\ qPI/\A%peT y%p>dI/Kzǁ%ABKzǁ%g,\.{%ʧKzRY =,8,7?] qX.~qG l>d/$| ;_ n>g/7EBn\ y^ȷTBaH%6Ba%6%6B鹤wP%peT AeT ^q`I/8r^q`IJz!7m.BKzU =,BKzU =,eT xi.㠒^JzU qPICB:\ dI/%пOB:\ pIrq%B:\ wI/&%pBn];^Jz!7.8.㠒^Jz2*TI/\A%pq8a\A%pB^hzx܄o.\ wI/&%pBKz, z/Ȓ^?aIqI/%yյzm~^gxum~^_Azm~~+W7xm>6?BOk3ݵ6^dym~]bwm~5޵vk^Tٻ55yUyͫwm~l]We˚We3Uy׼>y3}p_5y}h_We_ce}kk\]??G5yVsk\]gezkk\]Uqͳwkkkg]We׼*{׸YһƟ'Kzﳮ_g]ϓ%kwY毲d]ϓ%kXwm_`}޵k/]]6s?uI/7|uP%p> ݐ[/,+v]%p> p>=^#gY|G!J];^8enWSeT%^q`I/8wP2CKzǁ%3BKzǁ%A] =,BKzu!CB%.TI/+Kz!_] AB]%o.TI/&%.ܨ.㠒^Jz2*}Ȓ^q`I/8rABKz!Z.!Kz!7$.Y t|.oΗ%pLΗni>2]8_ tX;t|./Ӆ~cI/8wPB.t2*TI/\A%pBoX;^JzA,8c?8c8c?8cmKzu1\AC .T%^q`I/åɒ^q`I/8wPCKzǁ%A =,BKzu1\AeT }Hƒ^?8cC2c?>$cI/\A%*酼^Jz2bKz2*8.㠒^Jz!8.㠒އ,>>d,/ B1KDZЛSby???~?U???~??[U??~?l6=B6=A6=@ש~6lzzlzxlzvlzlz]cy?^#~5?~6=~5?ͱyϱyϱyϱMyϱyϱM}mk~k~}m3Cg^s,by?>5XϸY}m8 Cn{q@ACx==8 W'/"k=䛴v᜵{ȗieuZY|V.vNCU+kڅs!=KvT!7=\a=\a]DYdr]8gz{͔v=C8{8Ey8󹈬...Cof{8]{ C!kpppoo====v|Y~{CYpoo=\a=\aڅs2s2s73څs2s73=\ape曊q{ vqo*>\aЛd.0T|=vqo*^D=8 k〬?\=8 k〬]8T|q@YdCv==8 k7.0T|Ü{.d.0T||S񡏺 益qo*>Qvqo*s!o(kp9kp}AY8Y8Y8Y8YeEdYdz Cv==Y*vrRY>@ -zЂ{mZprJ-zCZp B -冕ZpqXnXЧh=\aaC p冕Zp}*eVj/̡?|LϒQ(!@J)=>TA(.\3 =.>ӵQ|m~_1n2]gW|m~_ӵ{m^dkLͧ{m~^q'g]WDt6?ܮy+""r׼55yE͏k^]WDϟyEy+"wm\5O~k^׼~k^k5yFq3"wmL׸pFq3"wkk5yFq3"wm~5ψ5yFͯ)"wk^k^k ]6 t ]6tm6Bq~/8 ]Lk>@\6zm3q޿|(·mAP8C\ Sp>>\ȗQ,P?~#ua.O,C0wPp> C.̅80! sǁ\q`anNB s!g.̅B sǁ9zX =,̅,̅|va.k sU \wmBm0wP/.̅|va.䋧 sU ta.ۧ s!.̅q0.\ s0zX =,̅܀0wP\] s~7 [. '\8_- q~.բ 竜|3Npi7*?Վpi7*gW;]8ߴ v_혟;vǁЯvυ]W;e qP~s2υ8(?zAe a~.\A#et92\A#e;ˑ2.ˑw:p] g~.\A#?d~.Ɵ\q`~.å\q`~.80?wP#CsǁA] =υBsu92\A#e }\.G8rds0rd.G>c~.\A#*?ނs2υ8(?wPpe qP~.\A qP~CB,1?z =T~.80?z }ҦsO =\n:?\rpsZ;\sO'Q~.a?T~.EB;?mܦ gnӅbMO3ܦ;/ŹMBt!(m[LBat2jӅ>qaCBm73lz3f[ m73l\nzof[ mdn>b-fͶ{{6Bofl;Yr-\~˝%Bt rgA˝%Yr-ށA˝%9Yr-\C˝%Bt = BnǁA{.wt = ;0zt w`-80zt;Yr-\a[4n2˝%Bot qX,9z+͠[rgA }n!Y/A[.>d-#9A' \J0>`_$nGI|m~_gtm~__ |m~[A{m>Ǽ6?{͏k3]6a^{m~]p5yݮOk׼n)vk^Ak^ ]WH55yݮln׼t5k^TL׼~k^׼tt6_gC{/,*eAp> Cp>>[GKAp> CᐯT-OP8仩n2 }Ƞ[q`-80zt;놡ǁA[nǁA[q`BnǁAn!m!_l *f[e7BNͶj|v-[m!Il;f[m!_Ll t-Ͷp5elͶf[q`-Ͷj6Bnо,w7nT-բ[7K8_-Ͷprf[ m|ml v|.8~{c-oڅMpiͶizl f[q`nڅ~{c-\AͶp5.f[m73lT-\AͶЛ6et1\Aet1\AͶ.6.73l8c=m2!ml6Bm.M6BmǁͶzl =l6cq`-8zl;2.f[.6et1\ACuvPet1Qm2xPͶWl qP-\AͶj8.f[m2jf[mdͶf[q`mǁͶf[V6.2l }f[q`r;Ͷl6eKn8,4l }f宦m2uMB,9WvP[iB,9Wr\Yȭse!Z+ wpre!Z+ wp,ɒse+ wp,ɒse!_+;\Yre!_+ =̕B+ =̕T,\Yq`reǁ\Yq`reǁ\Yre/Me+ qPre2ʕ8(WrYwjB/BSeze-n\.e-˄-n|v,-[Le.㠖Y-[f2j8e.㠖YZf2 qP,\A-[Lez__Yȫ3IABCg!o9tCg!ЛSBCg: 9e,;8t&Cg: qP,\A %at,ѱ[FBoI %atrѱ;tFBoI;\rt,c$ޡ3:zK}ҍc_.8:%ѱp/n;\qt,^ѱpy-n;\qt,\C˥G9\qt,K>cǁѱXq`t,80:%ѱ˥GBc_ =~gt,80:z;\qt,\atX=/c2˥GBy qX.8:zXrѱ}b!f]_+؉] 73>vbW,Ɵ]K]v]+vm~^g|m~~+1{m_ggrm~^3kt6?\?ӕkqzm~^Gܵ \Mkp5g]WW׼b׼6?Үyͫ+vk^]k~55صs׼>?Ӈ5y}_5yu5vŮ׸5Ϯصs3}_gW?5Ϯ5yvŮ~k]k\]cqͳ+vk]k+g]WW_cvmKk5`vm}>ص6qvmٵ[kf>S}ke/5to8> p> rX,Ob!F 瓮p> ߃ IW8toR*,']2 ܑ9,z = Bbu/80,z AsX,80,z = ݿXq`X,80,!`!_;ٸ6\m\A_i .}K6m\APqqbWpy,۸d6@Wq+8th LׁK6@Wfڸm\A_i :qڸ\:.q8,۸"i .qX:.q=E\t\l z+aul\AxX{zX+Vuo7Zժ _+W zCUWs6U].;el~L_6?/җg0L.ЗO0l~4_6l^]6?/˗O凩Zrټٻl~$_6?/ȗ͏)g^'e2yi&e>x]3/e>x]6?.2yi.?\}4^Kuϼ~//ϼ~/׏e>xex]6_3Oe¹>ANEvu(WWp.O]]|y ]]dWW ]]v\]A_ ν{ E}A+8t z^+8@Wׁru}A+A+AW:P=W .qP1A=K:P=K~oqP1y .qPtu=t :tuht :tucq+8t:P=Ơ@WWq+8uz%186Kc zEWׁ186+]]%ru8u\]%ru8\ WWp\]A .qtuv+8t:p)Ot zDWWq+8@WW[X]]AǁB]]%KҮtu* .qXvu8,_.J[fu,_A|Ͳ+yŖ w[\bׁ|ܵ+yŖ Y|(W-_Anl fׁ|8͖@Wq+yŖ@ׁ|9th :|(Wq+8th:P@Wq+A/MYKd .q@YK-_A^l :T}L o؁Kg `A P= V}L;P V9Z=  .q,ȱ )K .q,A  .q,A Z+,8n=- Â~ <,RaJgY<,ΒaA~ayXc-aA~ay؁=RWvu}@=tut z+{<]].%='WW[_q+-]]ouq+14Y:.vut\ .ObWWpy,t :tu]]Ao:pt ~KWWq+-]]Aǁ@WׁKŮbWW]]%KŮ=K]]A{tu8,+8^1 븂|sjWHW߯Xƅ:'W\\q=R4S\^6?/җq~j&eѸl~8_6?eT\6.˗O,l]6?/ȗGea\3BeSxϼ$S2y.󙗪2y.KuϼT]>L|g7gg^2pϼ"pϼT]U]͗x꺌g2y.o?U]Su4ˬ KcÕ5^y)AXx-Xp^ (W7Nk,8/R@it [iׁx-Xp4^AN]xj5^Aǁ@ׁxj5^Aaxj5^AǁU :xj5^+K5^J5^A| kׁx9YgW\k+ x9YgW5^%x=@Wq+ȫ5^J$eWpPӔ5^J;@HeWp.O[HWp9XYBWp(W4js%.8WA_Ш :xjW :x}A+8Pu4^A_Ш .q+A .q+AjT1AjKTq .qP1AiT1AUW:PUߩ .qPx=St :xhRt :xcq+8Pt:PǠ@Wq+8Pu*%88H6UKTq zEׁ886U+Ȋ5^%x8Hu4^%x8H\ Wp4^A6 .qxv+8Pt:p)VZt zDWq+8P@W[Xj5^Aǁ5^%Kxt. .qXjx8,Kk~)R@i߭ reW5^Axܵ+y rkW5^An:P kfY .q+ 5^Aǁ@W5^Aǁ r^+8Pt:P@Wq+8Pu4^Aǁ@Wp4^_>x8H\ ׁx8,UEkY\T %ߎB_A!:+K_A^ u_A^ r+K_\ W3_%r|8\ Wp_\ Wp_Aμv|(?`A,  f t(XXd N`A `J@LWxj+{<5^A OׁKgZjSuY+-e\/A2/qbWQʸf( 2ʸf( "JׁKS2LX*qgT+(e\Aǁ2@Wq+8PE2e\Aǁ2/qʸ( :qʸ\*q8,M˸( .qX*q=Q\4U, z.+aiXud\%KSÃaSWQޛ@|9jSW{1i:pQuIS߿TBel>Oel~|?LMeh\6?/S}|*.iGea*\6.חOe0F.WgxPg^}.rϼ"uϼL]͏|eg^V&Se>2u]3/Se> P xYp^'uxZgy .q+۩5^%x=@Wq+8Pu4^Aǁ@W5^Aǁ@Wq@ނ5^Aǁ@SW^+M]A|m kSׁ2u9gSWc+Ϧ 6u9gSWM]%2u=@SWq++M]|*T+87\s'8|l:P\ 5 G1s%8@sw-ȁ̦Gs@[oM]Ah :4uIh :4u}+8uL]Ah .q+Ae .q+AGT1AGK .qP1AT1AIh:Pi .qP4u=th :4uhth :4ucq+8th:PǠ@SWq+8u%?8ŠK zaESׁ?8Š+ȊM]%2u8uL]%2u8\ SWpL]A6l .q4uv+8th:p)Vth zDSWq+8@SW[XM]AǁM]%KҦ4ut.m .qXj6u8,K~)ASWpR|Rve re`;X4l re`;X el;Pv ʶ9#ve rF, `A^ m;Pvth :9#th;Pv g$`Aǁve :`th :8);Xp`%(;XpҀ,K`%K `A^, P`X`.U( , P`X`A9;Xv`A9;X\ Xp`%8Hv`%8H(m؁=<>/ȶbA~`XMyłF^ W,b+1YW,nbA~$cXУ4bA~(ci O;؁},{<`Ai;pIXK;Xx\zRgi2pڭpZ5pZ5pZ&Sa/V/V/d&s Usp6WU^8^8_89셿te{[{j~!/2gs 8*s 8 a{s ^8a/t`{s 9B:0q9,8^8B:^^ȫ`/Wr+9Xp`/'9 ys,8^Or,9 9IB`/\0.셎`/t {!oӏkpR%a///*@/*@/&sXp6pӇ^/'s /[.9Ӈ^/ 's =©a/ N:0w0^8Bz$9셎a/ :0gs }9Kfs 8l{%9I` .qa/Hs 8.q/\0w#_a6n 8.q/HsXpFp܍|'t^aF^s =B:0&a/t`{sXpFq9셎a/t` :0q9셎a%s7Kfs 9Kn 8zasXpFp܍|V0p܍ ~ s 8.qa%9Kfs 8.qa/dA.qaB`{s ÂKR:0;)^8BzE s ^-a/t`{sXp)b%KSzE sXpc%K%S.qXZ2o,`{)s ߥ%K?K]XT4e+{!Klʂ\SVB.e+{!2ʂ\bVBe[;ʂ셜d+{!vQʂKf[ 9Vv!le/t`+{!2^8Vme/\&[ ^8Vme/t`+{[ ʂ셎le/t`+{V_>Kf[ 8̶l+{u)[ yK^ le~%[ }L`+{!/hr^셜Wd+{ leVB+le/"[Yple/"[ 8̶.qme/\0^ag[ 8̶.qme/.[مtl;=s,A{!x ? x d~@~9@9J?|~,=@~C@~DX+,ɕ%+KfW 5\y/\0^avgW 8̮.q]y/\0^avЋ^av]]p6޽З_^Yƻz.d{W#0޽г?K끾x/aiuxB_`{򗰴d{?'^ƻj =xB0޽Гw% gKVw/ ^8xBƻ:0޽Гw% ^Iƻ:0޽Ѓ?w/t`{]piux%KKƻzKVw/ 8,.^ƻ.qXZ]2g 8,.^8^$7~/ ?X$xblƻz ` .^qƻ5u|/Tںl>g_6suZgel~_6?e4]6oy/͏'Tкl^^6?/̫uټ}Y̫u4l~^3NTɺg^|UǺg^Ge>jb]3/Ses a2]3/Se>2]3/Stg^W|ug^e<4]3OSea6\3OSe3Дwq68^zt~/糧i5d9!|mc5RC26VC@!|mc5d!S[ yԐAn" rOm5dՐbj!DX @!ՐAn" :TC(5d!ՐAǁj :TC!ՐJ t :TC8H )5dpԐ%RC(5dpR%2jJl5dA^^Za^Z[RR*oe`dCA0( zkGdpJ^6'9(S9&)lesF.esBfeeellx:?ͧy|+q#q0/?Ϳ̫xaj1^3eazϼS}2y/Wu2yI%/̫xϼ͕Ňxϼ̫xϼ%2y]!.2y2J%//S*yn8H\ K+mf{K7\Zіn9(>{:uyui]gЧ 2:>}uy35uA>:35uA>:ˬ

M[_߈yټll}g^WS!2y1/|U|̫yϼZyg^|el=L|Ug^|U|. qϼ.̫oy|ygFxil72k^3/e>2k^3/es2}m > Mr @uς6=8oӃm M?PF 6rapަm2rap^mzd939m :4r9AǁFe :4r9m :4r9th :4r> /Җny5t3 GHK7t3) HK7 ,

\X?>\XyDA~eg>lZ}3ȯ-Ѣ>3- .q3A% .q3A>K$| AO4rT3)FJifc-Aj=ҫ#@fc-A_ zW3豖^͠gz5\*j|Yfz5KJAjz5Aǁ^͠@fг.L{5Ajz5 :jz5\*j8,L{5 .qX*j=Jӫ\T2 zW3adګy%K%^y501R8l& [^ҫV^͠dj=ѫyRW31^ͿQ9clNeses.ul]6d=L柙_xs0:/`.Oeas^6/i/W2yU9/_Wjo^3/e2y7/󙗏󲹦06/We2y6/W]a|\3e>x\3_Fe2y8/㙧0]7.㙧25㼌g>xl~p<}ѫy|Ӽ|KeNeaj^6/ͭfeNWyκlv\6ll~tyz._3]3/e2yy5/W}ϼ>q=Kp^;-|p9yey Aqcp(gwc818o@8?|18>[}\ gWe8K| }Aǁ>Π@gq@8}Aǁ> E8}Aǁ>Π@w8}3ȋ}A^ r3> r39> ' 83a> 8( r3ȡ>8Hgqr387xs(J<; VnyQr38?[ r@r387xwZr38jfг Dfqr38PyvAǁ͠@fгAǁ͠/KTn*7r3%Rn8Hy%Rn8H@w8H@fp%w8\ w8\~gУ4\~gУ4%w>͠Gi*7Aǁ/M*7Aǁ :Tn*7t :Tn*7T3A)7,Q\~gpAo͠7KTn*7.uL+7A/Z :Tn*7^Rt zJfqr38PyRѴr3aiiZ%KYif .qXJVnށif{ߏ柏yټg^ |l^?L|}l~ _6?/Wa=^3e><^3/Oe2y/󙗧05/W2y/Wa8^3e>x\3r_FOe2yz*/㙧0]7.㙧25詼gx驼l^_3OOe8/ნyy   .q2k%rX>ˠ@eq28ayAǁˠ@eAǁˠ@eq@5Aǁ Ty۲2њ 'Hk*2!Қ Hk*6

[;,N Pam w5vX4= r82CMeq;,a2 .Z S;,K .q2A9,2Aˠߥ#@N_yAN_9~J偲_ȱ2/l M.rl s5/\~d ?Z28~ǚ_(epA~iep~g_8~\ 偲_8~\ epg)V~ ˠʹ_=E~IeS$_AO_=E~yR2)ˠ*_=E~Ien/KbA_i :_/Aǁˠ/ܴ_+m :_=~th z28~th*d^6:e>*c^6?/—̫00/W 2yu0/{ϼꗗKy\6{̫xyϼj̫t0u./2y]<.W/q<՘Sy|xSyTc^3O5eЉ༯  RmofpW}p^(of矏pW}ep^y7@y3K mofpß%fz3Aǁ :fz33 :fz3Mt|Wl{3, gofåAf(ofAΘf9؛yAf9؛iof㦽Af8Y@o >y@1sG'8j Vc[j̠/Tcn O5fp@1Pt :Tc(5fq38P%j̠@5fз1՘Jj1KP_AP_~pXpX%ԗ?pl}pXpX%ԗ6?pT_~n/?8l2?8,?8,ਾ? P_Q}%C}%8:ì0/ਾ0/?8rì0/?88%?8<<՗ ՗r=> O/?8/?!rāD GQ/?a,D~pß%ÇMv8-?8t>8|Aa o[~p郼nA^aw~l }򃾸v:ll0-?{l0-?l0-h/.KF8v8-?ao~ph.q=~[~pX%c}KF\07?ao~clc}K=v.qnAl0-?Gs[~qtftfnAa[Aa[~qtf\07?a[~[n%c}K?po~pXKdv.q\07Xn%KF8v8vlW@l0-ҷl7l0-?l0-?l0-?8vt0a ?pbn%Kv.qX [~/f8,L-?Kj& nz&\0-?K&n%KUvl0-?8v}lKgv:lKsv}l0-?8v:l [9P_Q}A.= _A}Any|%|՗ԗX ՗;/?ȱ:ì r}g9B}G}g9B}%KF8.q՗?pT_~q^3/Qe#2yA/󙗨y05A/W2y@/Wa^3e>\3_FQee2y:/㙧0]S.㙧2Q(꼌gx)(ܼl^ƿk 7&efBx)ܼl~0 7/_q\6la^6I._o\6x6.$o6._l\6׸l&eKwKy٬"g^|%ܼl\3/$ܼg^|%ܼg^͇A/@u҂- Λr|@u҂f?8o!- Ϋ?P2 /ՖqUfpqhJWy2% .q3;e%q>2Π@gq38PydAǁ2Π@gseAǁ2Π@gq@7eAq9XZ2 K8/-͠'W6}Aǁ/M6}AǁU :m6}t :m6TE3A6^з\⠊fp*A/r}%m>͠q8H\ K2lg+ 8\ϖq9Z\-?lw52o2SJPWoSq95lNes.w=L}]69u͡leses.v|/|.{e/el^P^6\3OͽlR_3be>^6yJ̫{ټl^_3:ƽg^]|Ľg^Ge>*^3/eFa*^3e>*^3Լg^|պg^ۿKe<Ԝ^3Oea\3OePsz<5Sez|cT>L*ee2}RezaR^6ĸl~qbl\6l~0L/^\6^.]\6l~sq<*=L*o-.rϼTKez,xg^*ӇIezϼTKezϼT/@52 λ༻rD@5>82 ༻?P* /VeepRhJeyw*2 .q4{U%R>*Ӡ@iq48PezTAǁ*Ӡ@iUAǁ*Ӡ@iq@_H[iӡmA·y4Ѷ DJ4AѶ GEJnl+=P EJpl+ rf4ȩѶ5HhWlkEs%85Z /֊‚k[+mZѠԊ$3 LhpL@iE'P+`3ɠ@ҊjEA_ :Ԋ}OV48P+zA_ .qV4AZ .qP!2AZѠԊBdpA_ .qP!2A"K=P"KT zV@"KT zV4AR+IhqV48P+ţIhqV48P+z AǁZѠ@hqV@"AǁZѠ@*D8\ hKjEKT .qP!2 \Bd;FjEKT t(h㞝A:G~9zA:G~9Ѡ4ՈA~e!i I AF,$=PBҠB!i3ZHtV($=PB h4PHdBҠ@!iq4o-$=PB IB I4/{-$ .q4AB%$ .q489ʀѠg$:G\*v})s4AHt2s4K!A  ze@h3.[;GK%KѠg$:GAǁѠ@hqs4AǁѠWt:G^9t :tTl .qX*v2s4a9ʀbkh4:GK9\Tl .qs4a9t}?|4Uۿ]9GpY*vs48_S:G/9z]69{ѻlNTles.Sw]6Gaj^69oGa^6\6eeLe>j^3e{ϼ[e>>LU|UԽg^5|l~_3e>r^6ove>^3fe>>L|Uʽg^|Ul\3OeL^e2+z6a^6l~q\cl\6l~ 0yE/_B\6 _._A\6l~q<=L^/.WYrϼ+zlxg^^ч+zϼ+zϼW9z~Av}p9"9z~ywwAΉveep(hg;G28@9Gtl ༻ mr89=9GHhqs489t=PѠ@hqs4YѠ@hqs489gC;G r>s4țAΈv9%9zAv9*9cr9/9ch3AnJ :nJp+ȻAޖ- _wya4+Ao) L9h[A]c% r\ ;95ZzAǁrР@9h[AASt(=PrР﷔8H\ 9聒8\ 9h[ATs1ArР﷔8\bp%bp%j.=Qz%j.=Q\) zܣ48Pt( Ѥ48Pt(=PŠ@9hq48PzAǁrР@9hq@5K\ .q4%j.8r\ .qPs1e %j.(9ho- .qPs1A\ .qPs1ArAK$ E%KrР&A.}BAA) :A^#Rt( zH9hq48Pz14aZVr%Krph9h[uAK֡Ao) .qXU~h9hpAd4ai!ZK&AK*48Pt( %.DAAǁrfAd48Pt( :AlYz%Y94uIFDQIfc?Hp{pXhs]jsh!l-~ e-c͡ZC/9!C z`sh!/6^(sh!_l-BKm-th-~Bǁ e-~BǁBǁ e-th-th-thPBǁBǁ%2x4e-\ shC/\6.qX6r?dsKB^m-\TmPBl-\Ttp)VZ+ZP+Z۲ X+Z呵 J+Z呵 -[+z -[+ZAZ%Ҋ.qVp/֊hh'ZB^-$P+ZȽJ+ZIV?V{kE 7sp7qs;Gs;G $:G qsB9G $:G qsAsB9G ێv:Htmh@h@h! -\ h!%r^(h!%r.qsB9G 89Z89Ze]9zܵs'F:G }s¥kh/Ptz@hOtz@h tEKs%Ks%KsB_-t-t-t-t- 殝'9Z89Z}.];G 8,];G Ospܵs:G 8,];G ^spܵsB9G 8,];G 89Zai9Z8gKûq9ZBj-ԿwEcQg:0# S96BaRذƿ*GLjä "旈6dg0)G7Y#6@aCpaC晗r4lDŽy K96{c<*ye W73Bog^u ̫W73/13l>8qi Ә6ӑ)3OcfK4f?Ι/8lgY~~3l>Ga2{͋Ɵ͞aa2{/&aI\ .akIXr̞aSIl9 J84{qi H'0ϼ̞a&3/gg^fϰYR晗0=<2{ye y̞as<33og\PMBm,4 7BށmPMMp~(E UI,zp~|Pzϐ\XYüJY̭<[Y̭RY蹥sK=gP8](=g@=g@=g!os:s:s:kV[ZYțһX8лX8лXѤwqwqwB z z z/T)qwqwqwB 8VAB],\RX }/wB 8V5KT PB>ڻXA%*]RX 8ȻXA%K%K],BǁBǁ Ε^pлX8лX8лX^pлX8лX8лxòwpRŲw>z/\X..qXY..qXJY.zGbeb}..qXY.zGKE%.zMbeb..qXZ.Hb@b@b.^],t],t]PBǁBǁBǁ ],t],t],d ],\ b!@.B{ z`b!C.r]jb!1.r?db!_]PB],끽ۻx|=wb{ .wqw!{ z/w!{ z z/wqwqwqwBy z z 8ȻXєwpK]pi#ڻXai#ڻX.mD{ y!wpK],~%K],ӻX;xz yȷw+ { =.^(b!W@.z], ],ӻX;\ٻXA%.Hb!otV+FgbT+FgbT+raR+Mb!V+rab?Vp ڻXrobb!&.Hb!&.:e.rob.:e.robbSF.w],t],wqwqwUe..qwZeby/wShby 8Ȼx лXk.[{ }(w z/\*.Db.Db.z+@bH.^Tl],\Tl],\Tl],BǁBǁBǁBǁB ]pڻX8лXEһX8лX8лxRwpRw+z 8,[{ 2wpRw4z 8,[{/wpRwpKK],t.}?8`Պ^PX8]=jŏMΏMˠV̏MˏMʏM k)))ascS?6%cS>6cӷR)IC1\شg~~3?y?6]ޏy&ǦǦTN$N$-c\,{N/N/Tf9\H #rIL.9$r@mTrp#>;:#>{:#>8@ჼJ /> %ჼZ /yA,?| gჼ^ />ȵ>CTX8 ~A$*|߂>D[}w~հp>oA ZTNj!*,[TN-_5|߂>anQ=anQ=anQ=a-8-8-}(pnQ=anQ=C!D.q[T>C!D:>8@TG t *,[T:>8@TQaܢzqA ՃKՃKfQჾ#ATEEW-8-2%sp>ȷ< \0\0 ՃKՃKՃKfQK֒D.qXKBT7>8@TQaϲAA & & t *,\ZM>a)6ITs.& \$*|pRqA/ *|psA/ *|pRvA/ *,\ O>a>=5D.qXzO>=5D.qXOBTQ჎DzO Qa҂A gQ჎D:>8@TX8 t *|qAv`$*,E.qE#Q჌,iA۲,r*჎,ry$჎,?-b჎,ry$჎,:>8bX8[ t`1|q%l1|pl1|pl1,\x>a)b G.ECDr$Qj gQ\ITy畨p>衆Ay%*|+ g{t^&A7 e >t^`|pͺnA)|П` .-=\cnAn? nAn?|)nAn?,u:e >t:e κ@ >A7 Ht|$`|pì|$Kf`|'KfKf` QAa|Ч7F}CQA_I`|W(o0 >:KVF}CQA/`|7t\tae|p҅Q0F}QAFt`|qQAa,\2 >8(0 >8(/0 >8(`҅Q%KVF}Q%KVFQ%KVFQ%KVF( +KfK..q:0 >/.w)xw6 >8ok4 6g3l/Q0lf؜˰9as*P͙< 62lcǰ9asS7lb؜İ9NasoT $ͫ03ajۆyյ ̫i晗Q0l̫d晗Q0lmg^0ϼʵayUkS63^mg^0ϼ:FyƙQ03O`|f:LG0B\,a> ,\~d Dn`#O@ BE,>_H,>R A[ y!WB[ yW[ y1BY yC[ y޷BY yE~[ @Z ^Z/p]Bl,w>~ X2Z }g0Z/eMW ]F+jBMW ]F+`5 8 UAM%^&T5 8 U UAMBh,\&ԁhG+`@+`@+`GVBǁVBǁV Մ*th,th,thPMBǁVBǁVBǁV Մ*\&TY }ӡpPKԄ*şV Մ*\&T/.qPBY g+`5 8 uPKԄ*\&TY 8,#[ 8,#[ЛZ Z Z/\ZH:zA+`@+`@+`7:zA+`'e={.{ [Z?.+e={^uѳW RgЫ.z.\j@.zK^ d^7.qX*AH^@^@^7]āBǁBǁ +t+t+tٻPBǁBǁB?ٻP%b^!`^!VrbޅRa^!7.VroޅRrb^!ViޅRro^!Vri^!Vrb^@ޅ?bZW8PW8PWȍUzUzUzUzJW8PW8PWA*TzKUzܸXWa)YW UzK+\ ^!7.V.qJB qJpTzRW˩xM y =](!^!w5z4)+B %+hRW˩xXWOj ( y?2 y?2 }B x'XW}x¥f!^5x\SXw x\SXWQW5x,)b!^!:eU,+t(+䯬XW8PW8PW_\B 8HW^pxJW_۲pxK$ĻPB)+=BB(+łBB߳)+łBB߳)+QW{6xgSwRl vl 8,5P 8,5P З     }¥j!^@!^:H!^@!^@!ޅK B%K BB)+\@-+B%K BB(+\@-ĻPB%K B%.qXj.qq֮7K7a^ 36G,lNX_F]9]asl u asXͩ C6g*laj́ 6)lNSr:a7K؜yƙ.3/]ؼ{ ̫W5l~89L0ϼan0l^yn=L0ϼay[<څ_ayuZ<څg ̫W53&a*yUc ̫WQk68څq橵 Lƙ.s5&^ؼN,Q6UrؼI>L:0ϒtxa9l^"q y|Tya;L|h R6&U^6?]ٰY61+T1+T/a Uya UyaKƬP6~%H?Z!z4G+~BG+hҏvh\uُVѤO.УI?Z!rc?ځ2cYn)2P"B^f,2+-EfPdvDf[dVȻEf[dV y嬐k[ Nl9+dږBl9+ti9+> rV1嬐[ AZ : l9+th9+th9+rv,gKd9+/rVA e9+orVA%](فLY`z@Yo˴z@Yo˴ph9+mBߖi9p*rV2-g^rV2-gK-gK-grV+ -g-g-g-g-grvRUq7tZ Z }Cqq¥hYhYo贜.qXzaEYhYV.qX](YhYY 8,UE[ 8rVa*ZVߥMq_) ﵊ 9aseT 6'/l^؜9v6.l\9qas*d؜9las樅I;L%Ȱk'lNRؼ S晗-lZy@ K6&Zg^=հyq6<jg^0ϼay)每0ϼay)]ajyI ̫W0Q<*yUE ̫˨@ ay*8Tgtd Svp,Q6o-l5q@ a9l^3&ZgI y6o UI߫Г@W!(*#^>UF  .Tiq߫q߫qBf 84SA~BUA%*Hׅ*.qPiE 84s^|ױ߫pJ3KTP%*.qPip^K^K^U 1^^^.m  }!߫q߫q¥`cV]6f.ڥ`cVKcօK+ƬBƬB_i̺p&ؘUWƬBoh*\»Tl*6Ƭ%KOƬi*th*th*6Ƭ ‚YYYʘU8ИU8ИU8ИuYYY|"1B 8ȘUwb 8?:B^߭*^:B>ZUuZkZutZ[UȽuZXutZkZUuZfYU5uZ[U8PutZ[U8PU8PutZuZuZuZJU8PU8PUA:tZKbuZ[Ua)XUuZK*\ V!i.qNB y}N@ =&T[dlU!f^U8U^U)U^UЗSګ }9B٫ }9ЗSګ }렽¥g{Ug{Ug{UoW:W:W:W:WN{ՅKBǁB_i*th*%BǁBǁ "UK"U^Ua)^UUK"UI^Ua)^uUK"UKd*\l*\ {Uwߥp_cU9Eas 2: 6'lNO 6''lN؜96asjS]/lL91as`&U6')l1q A6S33gg^0ϼTaGg^]0ϼTaR0<HA }GBA 8R;LKPbB>vXTA}%\>HA 8RAb%Kb%Kb)f*-bBǁbBǁb LLERT8PtnR߹[T+J JRT+J J|u^pnR9+.\^߭W*"z%JRT8PT8PTEJ.o+:+:+](R@R@R@҅+:+:+z W*\ R!2W*\ /^p ^z+nR!}+](R!/+rgR!o+](R!}+aR!wR+rmR!/+:+](R!/+:+:+](R@R@R@҅+:+:+.q^ǣ)R`R!/+.qX +nR 8HTKJKWPzBA*B.UA*ϓ A*RB<]y: 7sW23eg^mT ̫jW23/(l^q: Su6oӑ)3OaR0UGa0Us猪y6-aRyΤ: 7a9s&aR2GaRo;aE wä: _u˜1٥(((l~ cƨ: cƨ: qg0fYu晗(3/Qج: KutTGay[s %?%?%?aPs mO@O@υ:::\(O@O@O!,P%ϒ%BK~ y!䧐{0K~ eO!/ĖrfυBlO!``υοV8K~ y䧐!K~ 䧐bK~ J~.䧐bK~ J~ J~.qqqBI~ J~ J~ 8HSєpnO!/Ė.qX^-)ؒ%.q䧐bK~ 8Hs$?SS=>aS=>S=>SK=>Sȕ=>s U=<[Sz yT TnUυRjUO!oVjUO?PB6)تB^9B^)tʨ)ت )tʨ)تB )dժBB[)t)t)GS )\ UO!V.qBz YwpT=K9Ba)􍎪Bap)XSU=QsT=?>z }7:z.7:z }z.\`V.qX`V.qX`Vz@UO@UO@UO@UO@UO!_pYS8PS2U=U=-SS8PS8Psp/V.qX`Vߪ%K̪B/)\P%K̪%R.qX`V.qpq+O|i6(lNQ9C69Aas 60 6&lM؜d 36G&lNL9/6%l^2;0LƝ0ϒ;a8lyd9LƝ? m<?lp~w*IXȽ=>ST݈m<QsJ %?%?%?QA%’%*u.qP\RG: } pJJS K~ 8QA U(\RG: 8QafXSagXs %?ERS8PS8PsӰq٪B)]B5U=.Vz)Ыz ^ޣ)jeGiz Ыz z z z }CڪBǁBǁ )t)t)tPBǁBǁB/P%RzaEUOz~m6EcS>62Xj>6cS>6cS>6EcSǦ\}lǦT}l ǦL}lTlMyMiR)Ji1<,5ϖMŏygK6?65A>晟[asIc"1\g~|lڬ3?w?晟-5as/c1 g~nͅy:6?YcB9l|lZ'KǦ]ǦUǦMrlgi|l"lZ"̳4[jfKǦ`,Ԅ͖2[j>{,5ufKǦW1|l2v|lzKǘ?Xj>6=|c,5q>6=|ccR1ldg~Ԅ͖ygK6uz?晟-5ϖMu`g^i (T8-vr/"K̓ӣԃӣԃ܍c̃ӣԃ+{K\yw[ Ryp<ȯ1[ARy-p<Ӥ-n"{˃A<gyH9h g̓.AAD )E4 "]̓<{HDS8wD4N}%zpzQ.TNAMypz~pf< A'G:$dt)g9B'̃|;NA yANA yqNpfI/</A_,}y x_ޗ8}</bK}y x_ޗ}pGg^]T ̫W/3/)Lؼv̫晗&l޸v_g^ݾ0ϼ}ayS/3R_g^0ϼ }RyOƙ&3O)Lؼ"?LE0<0a9D)Lؼm>LRyY&l^4{y|0a%Iasؼa,I s0ac/ƄIaaƄ)aLƄBI6?1Ƅ)(٤0&٤0&l~ c6) c6) 0ϼ1a0&3/aLg^˜Y晗00 c<Ƅy% Ks1as173/aLg^˜( [Sm1p~*[&Wȝ1kVUȽ,Sk,R>R###>rR)}pR)}pR)}G 8,G` F }l`f F.`f F }l`f F 8`LH\(HLH\(H F }J`pR`O::::ӂ ##ER0R8P0RK###. F 8, F jHcH!U-)\`,)䳪#K#J0Ra)X0RA%KƂ%.qX 0.q` -?;Gt6G>63le˰9as(LN؜Ȱ9as8i x#as$A |>r#as  >晗}$l^/ye >6}$3/Hؼ8 ayS-3[g^ 0ϼ#af=3r[g^y~may<*yUi;L0ϼlay<2G=yg0<#a0u8砯$a2:sF3Iؼ>Lfy9$l^B;y}$a3Isؼ}Lr$aC/$NIfKa2)3I\6[Ǣd& c6i& - a!l~& c6i& c6i& _˜MI˜MIǡm(3/3Il& L晗$l6ye&9Lf0ϼ$ayI<2&3IW L晗$l2I8G2MBY-) 7¹iye\X[R8od\X[r*:OX[Rȓ%kK yBiK y곶 kK yBiKWB ' cBI!?-<)“ N)1'ӈ=*ʣRHzT IJ"QPBEңRHzT yޱGB I\#P})<)J“p{zn)<)$PxR蹥sKɅ{zn)<)RxR'Q蹥j}%{.qPߣp'|epK(['QA}B>8XxRA} %<)䫃'K(\⠾Dž{.qPߣpK(\+,<)\T,,<9“Bߖ)<)t(<)t(RR衦d/::H`ʅK%K%KB ,Y)\ ,Y9BPRc%+>fQr$+>fQRc%+>fQRd,JV }̢dBIV }̢d,JV }̢dBIV 8HRȷ+KV 8,KV veJ@J@J@J@J!߮,Ypi6XR8PR[%+%+|ʵdqdqd¥`J`J!,Y)\4,Y)3%+Kf%+|dpldBIV 8,KV 8HRai6XRA%K%zAJ`J!t,͆? @Eq Y8!,)R`S蹥4 l =zn)pCX`Sл7 l.\4zg1EaqE˲Eu;a)!ly_FN|@+l0l>gä G*l ?_cAajz>U~oL ?R8yq6yY'iAz>G<aghg^W733ffg^ W)33/N|8L0ϼay15XsjgKT;+{:KT;PBB>:XSA%]Y 8vVA%KB%KB))BBǁBBǁΜB_)EΜB 9.vzL)Μ 'Z;s 13зe:s 73зe:s 73{:s3q3q3зe:s.\m)t)t̹PΜBǁΜBǁΜBǁΜ )t)t)Μ )\ gNGt.q3B9s <3;s ^cgN!/vr`g΅rhgN۫3'c;s.3pٙSȓ9ܛؙSݛ9(ڙS8Йs9(ڙS8ЙS8Йs9999ʙS8ЙS8ЙSay3pkgN9s.\y)\ⰼڙSȋ9ʙs u16[6GyQ;Lan| ?uݰ30},6ކq橨 6l 63ٰS0}Ȇy楨 ?a0ϼ>^S23|RQW0)jSFg^0ϼ5a0YQS<+j *j gEͅ{5Kl+j 8,V=ۊ%{5RQSs5>'PQSs5JQSs5>'PQSs5?0*j }N*j.*j }N*j.p5||p5RQS+j *j *j *j *j bEͅӼ55>6SQS8PQSȷH+j *j *j.\歨)\<[QSG&+j 8,OVɊ%Ӽ5|dp4oEͅR.qX歨)\ EMiފ%R.qX歨)\ EM!)\<[QSG&+j *j àE`EM@EM!)t)t)t)t)#555.+j 8,+j 8,+j 8HQS5K5^m85a%l˰y,_FM|b G2lȰ9l>͇6c62田y9}cg^0ϼay5Sq/Y丒D;;La}@7uFͰ>9La3kܰ>:0k >e @Ͱ8LagúcX&0a1v>[_3fXcn_3Oԇ0׼5úfk^ayku0׼5_35/0׼5\&Ͱ<5/0׼5B/fk ؗu>cM`{JcM`:[|&-4lP@ &[h.&}-4%svK&Е@L v\ݻ@W]2`%6. {&]2 dl̅raK&m]2l \ Ld8oÿwhqYQc@TX&25JQc@T@ZhM ͅ@cBlZh.Ԫ[i ?6-4hPn}3[h93&o~^U@l \ M .qЪ[r` ͅZu \U@l \U e \%Zu \U AnKAnK [h8,KH M k:\]m -@Ρl tBs2&eB M\Lam \D@_i -4JBGZhB ́::*M ͅh@ǁ@ǁ e th th thP@ǁ@ǁ@,Bs,4Kd -4KdP@,B -P@d, EѢ@ X4sD3(Z4c6f[4sD37fHfL /HL , EѢ@X4sD3(Z48P48P4sD3E3E3E3J48P48P4aZ4aZ4A QE3KQE3|7h@fكY4̢) dfL {0fكY4sD3, dfL {0f.h&=E3X4E3Y48P4E3.nf8,nf8,nf9h&p2h@f'P4>@ \(Lf'P4>E3( t@̅Oh&}E3(P%rbLfh&fffff9_henL@Lfff9nh&qh&qhenLenL H.qXrdLenL H.qX\(LenLf8,wf8H4a[4A@Α, \L- ɢ@ǁ@aP4s2h&qh&s$fffff9Gh&qh&qho`LEq`Lr`LfGLeƢ@?Q4ġ/}~ ch?jc ~,4kZ~cXd/Z~}?>kMZ~Xb/k1',4c?˟kꏵ/X|5߿k鏹'cX>珹滅cikeȏWc ͰnXO>;sw Zk2?~5߷?ﻏsa}c1|_zk<^ Z+1<,4cB6XW?ƚcm6 ݰnX}u cmr?sB1Y| >fvsu ͰnX_M?ֆחP1|0|:uCP1|P6 5caXu|Pc4 5caXc4 5kI\Pfkj>ߗkjuC\P1|7|5 5úcmcnkj>6/cci6<>VÎ5mw5].Ã|9oH| {0od&@od&̓$y=7]| {0od&̓$ كI|{0oO9A9&pK|e.̓KI7r.̓KI7!yЭ7Av̓n yЭ77Av̓n yЭ7&o\7r"̓Kf 7r"̓7:<8@|̓H|L%yqAw<8@| Ǎ<8@|M2}%]9G%]9G%]9G%].yp2}%]|e.̓K%]8t̓#I|e.̓#I|̓~À&p'H|̓#I|̓7:<8@| H<8@|MT%UA8,bo\7I8,K7<'=o\,0Va+zX/ah{aZֿz39>Qֿz3^{>·fXaxOaY\ayZq ֿ\~s=ӷ{k^Va=>L\ds=5/+ΰ޼ skArk^Vao9Lˑ\Zb0׼"V0׼v"95}_F+ΰާ cӊ35O+ΰ>u;LVa}7uF+ΰ>zT0Yqd'z@oLVdgh{Xl2ZqgX7&+0| '+ΰ>?LVa48ú!dXma}b?g3M+ΰ>Lӊ3g_AYqe֭8\ sˊ3J0׼8Ɋ35/+0׼8\&+ΰ=5/+0׼8&/gko+}B-D@XǾ} "_@Xط D 9@\(M O o Z|[5}i/Gk)&M5Z_sI}M B-H}M k[_s5 \ }M k8H_A%\(}Mׅ@lm iO+NO{Zq}ӊӞV e iO+NO{ZqZ|s7?9) 7WB_@P| z7Q9&8o& o}%& o8h.pV9B.q] .q݅rZaM.q݅Z \ջ%Z \ջ%^7K o&S;oo]&r`M [L(pZ|2&Wo.\f.e"MoŅ@_I( 7KA777S|s2&q&qBoooo.{S|8P|8P|+o8H|+o8H|s7~&pÿ8ʊ18bڊȋ8ʊ+8w'=8ʊw0[qSۊ[qf+N V e V@ǁV@ǁV e th th thŹPV@ǁV@ǁV%V%V%\ m \ m9@`@oكY|@`\(M {0oكY| % dfM {0o94&CCoo94enMenMenM_.enMenMO{o}S|Ӟ % iOMO{o}S|&Ч=7>)P@&o.&p7X|A) |@ǁ@ǁ@ǁ@ǁ@W,p[|8P|@ǁ@- t( t(p[|a[|97K7#Y|a[|97K7J|a[|A%%.qX.q&s$o8,wo9G&q&o\, t( @ǁ@ǁ@ǁ@ǁ@Α, t( t(p&X|a'X|aq'X|A@?Q|aY&Oz.qXn,7z3W^Q|3:z1<ه>gXosg}X/a>ӇgX/a|XͰ^z%?X s>{Xxo0׼7{Xns}6%{k^ayׂ0׼7zr#95ayEayDsk#rk^aOƚfka}x )gxXK $wXK 볻a}t7OfkIra}j7톹$9Lra}~=/f>mua G0| $'Øim֧Øim1Ӕ a46Øimfk^raכ%暗fXaymfk^raym%9Lra}yk^raym_F0ּ6}Gj1er> }B5}r7ć BmyjXn^r@,, yZnO6l- r OmM&6j1/FmXn&r % dO`MmXnAr%.qBm8md`]@6v\(gN`_ +Μ@7t3'M9n"̹PΜ@7t3'7nh -7E|[owKM4o.]`_E P@l @&l }@'l \ݻ%ڽ {AwT|Aw|q7K{Awj.pv8h.pv8,6.qXv3l9@l th t|ȁ7o|2ekM˄@Whp|7|o}_&14.qX&6:4:4Nͅ@ǁ@ǁ e th th thP@ǁ@ǁ@f|s7Kd k7KdP@f|A@Z Z@? Z Z@>uYȆZ S8l S8|'Okqc-΅i-N@-N@-΅:::\(-N@-N@-Ne|l-Ne|l-Niq.\.qX6&=7|@`6Boك|@`6\(M {0oك|ȉ7(|8|ȉ7.vo8,vo8,vo9`&p2`@o}|@4\(Mto}|7>i Nͅ2@&:7|s7Kd |%2HM +6:4:4:4:4rbͅ@ǁ@m th @ǁ@ǁ 7K7#|a|97K7#|a|s7K7Kd \Lm \ MenMo9G&p2}&s$ooAͅ"@ǁ@Αl th th th th @ǁ@ǁ m7Ks7Ky7Kd 7KoGMef5't?v Ұa= ֻa=z 3aX0z`X? hX/aI0z[4^!p΀a?˟'~ cә35חk^a\r a?5aڱ~k^ay9s;Og0׼6+Wy*T95ay-T}aymSskrk^Μaƚ3gkΜa9LXt ӿa%:sar 냿a%:s߰>g~ə3̵$gΰ>}\Kr&gΰ>љ3?9ú?09s3g>9sar 3a49a49Øi:sQ0fΜa49˟s˙3z0׼9\r <5/gar s˙35/g0׼9ə3<s˙35/gΰƚ?^~- >1P}A8>q`_ j#2?@ u[KrBؗyݲ&[h&B-4jI.-4Bȳ $O -4<`m d[i ͅ.q&p,4@>[4AnK}f8hBfoL.qЪۅZu \U%Zu \U%6E3Kfh&.f-9G&FEM 4Ɋ@5.ST+j]`T2CEͅ,Պ ݞ@NT e%XՊ@ǁ@ǁ@ǁ@pZQ8PQ8PQs5555JQ8PQ8PQ.*j.&p5 \ EͅR%R2r$c M /r,c ͅrj M l d7n ͅ@v@>XB˩-4|BYhy9&q&qBYhZhZhZh.&q&q&p2s&p2s&p,4.3W[hfIY4Nʢ) d'eL ;)fIY4sD3, d'eL ;)f.h&E3, Т@ - t( Т E3KE3KE3[4a[4s E3>) MLlf.h&g6E3>) ML @̦hBf}fS4@W,P%rbLfh&fffff9_henL@L Ǎ:rhL@L@̅ݢ%ݢ@N, \L- 4Ȣ%ݢ@N, \L-P%ݢ%.qX.qh&p2}h&pD3Y4a[4iE3E3~àh+`L@L A::::rdL@L@̅k%n%q%I%E3~ңh&pò:cL6́K:XQ37Yz G/fX﯆ Oa[t aֻa=6zj2!zC5'fX̰~|czX1+T cSQsaי15cX?0y15/EͰ~X g0׼Nôc95cbk^0׼5 15T sk0Uskrk^+\Z)P@K&#o.&p7X|A) @ǁ@ǁ@ǁ@ǁ@P,p[|8P|ȉ77(Z|8P|8P|s2`&p2`& o8,vo9 &p2`& o8,vo.&p2`&p7K7K$ \ - \ M A.qXrdM@M0(pX|8P|i77777Y|8P|8P|sb0&p"1&p1&p7~ң&pò:cM( \ⰬX|'=o8,388z6l ϛ_F+ΰ| 55z5]s52!3\zHfX?_nkXӰal9LG˰$ Aƚgk^gʰ~ s@q2&0暗gX?Idk^aZ25dk^Va15/+ΰHaym\skrk^ۖirk^\Zך/gXoiƚgXz0<8Ɋ3h֧Ɋ3hqX3&+0י8q㿻8\g a}>8ъ3Bg>ZqgX_$8LVa}`3M+ΰ[ 0fVa480fVa48705/+ΰX&+0׼8.sˊs8\ sˊ35/+a \ sˊ3I2Zqm V> g׎^d+N``v|,jd+N`_ |9raeN HV@^ gʜ@6Vde΅ leN OVT2Bm#EeN f+sِZs9K \ eN h+s8HAʜ%R\(eN)s8H-|d>fl%*]5=Jބ@7'н 5=M toBMυ7'н e;DXZvZlB:rgN` tx) d²@_,( t²ObAN G.q.`rPaN^(N .q.`^]% \]% \eXaYͰl@v9le>|@ˬ tQQ5=Y꣦eXlMO@߮p[T4=~l'ckz}'p2='q'q'q'Џ \̑ t tP@ǁ@ǁ@ǁ t t c5=JA@QA 5=K \ M/''kzy''kz.'Zkz&eMO xkz.'oR'oRMʚ@^k +5=Jk5=5=5=J8P8P8Ps4=5=5=K5=K5=Kpf&@6K6HO %x,f& e ddO %x,sLyi KOO^xX`4䥉''/M<F&@74rbυ2.q'x8s M<8888M<.wxx9n'q'xxx.\6.qX6rcOenO >6.qX6rcOenυ2.qX6.q'p2}'pLjzuea GM0| [I3 czX/LS3gX_-LS3gX*֗ kPä暗gXayizgk^ayiz9La}Wzk^ayiz5_FM0Qw`_ > wdO`}U|0dO`_ |V@:|'g>o> w @~ mυ @v[B9|8A@v.q'p>KP%r.q'p>o9~!{{.7(]G+mA~ޠ@7(A t?DoЅ7@ ~?KO, ~fQsV 2 ~9³':eraOoraO'}' ~9ҳ'pV 9'pV /'s ~8h0pV /Ԋa.qЊa.qX6:, \,uXs ?Ys2Y' ~9ielO ( Ջ@, tQ ~[%@- Ջ%ق@ǁ@ǁ@ǁ@DPs2'q'qB ~ ~ ~ ~.'q'q'/\(O ~0GO ~.'s.q'p?|'-y'/K\(O Ve@6\(O _l doO _l ˒?ȗ%.'w^.'q'q'qelO V:\ m \ m \ ρY'͒?Rfɂ@6KYB ~,Yfɂ@6K\(O % ~,Y)?n( t( ӂ ?K?K?bZa[s ?>\) JOW ~.'Ї+?>\) JO @pB ~kR^@W,P%rbO ~' ~ ~ ~ ~ ~9_enO@O Ǎ:rcO@O@υ݂%݂@|, \L- ǂ%݂@|, \L-P%݂%.qX.q'p2}'p?Xa[ȁ??~àŭ`O@O >::::rcO@O@υo%r%u%I%v?~ң'pòcO( \lX'v?cc-k'e|mkX!ֲ}ul ȏcxXk>ahX;> ~cT,ec-c1|?>Q1|?>Ρakh~AkG\ַ3?\k>1||g?sȕ2\}sXka~5߷0?;r}5X|u[X| Ou3cA>fúcm1 @kˏrX7}5@kˏ\ 4>Fka>> 1|0}?_7}' f儏10}~bǘi>L 1ff10}#|m#|5@k1|7}5@k1|7 fcnku3\ 1|7}P_3X?׷7mAť zMl`[f~P=&2s`=ȷ(il &%m@hLڠyK =A56A~Ԥ ,6A~٤ z繴A}Aޠ zA6(k\еA.qڠyK zpC=ġk6%] zpCvmЃK6%8/Uуl* 쪢Vm`y(tUу HU{0tUу*z=TE]U{0BUg*z)AvPe2Q۲NTEr#Uу@U 0R=It*zQ8e9%}Y2PF8e8e,,,,eD8,(R A'* \GY$Gn!9zDItBr 9=ȩ$Gn!9 \=躅A_!9 \f=m=wH\Ⰼ%9z&=aKr4Ht 9z&.vIt 9zq(Kt 9zqA.9zqAnQ?[%]rM 9zpC=gDH\%G.q蒣߲r ҃Jˀ d@zV2 =ȫ HJyw)䃕 H ҃|A>Xɀ 2 =+A^e@zqAn@zqA H*-҃ H ]8,3tt 9zЧ$GYh $GYA6K=fI.9z͒$GYA6Kvуl$9z͒$Gr*уl$9zqA\%9 \V$9zpò уKIU8,+ A=}~Br%GA=X`=}~Br%GA=Ɍ$G]r.9zI\%G!9zIt 9zqAHdFen/у$GrP)у$Gr#у$G:く$G.qX=ȁ$G.qX=ȁ$G.qX=ȁ$G.qXvуK$G.q蒣8,s{I\%G.qX=ġKG8,s{IGH$GBBHGHt 9zqA9AH$G.qX=aَ%]r 9zpò#у~҃%v$GI8,1='Mrڎ?nXOփ;caC;gvX?~nXo5uX?kXI=Lְ~l Mܰa=uNa?˟95O0׼a955SjX?55aϧayNiusk^G0׼aySik^g0׼ Htrр45/Ұޚ skO0iskIsk^+\Ze4 0< HX4 aZƚiXa}95HҰ>Gd0e@֧PskPd@xkah@m0ɀ4? Hɀ4.&Ұ0iXUa}ka3M0fa}aa3M0fa}WaX_U暗iXaye@yk^d@暗ik^ayiXߣ暗ik^a}р45o҅ 'ۀ}~72s _l@ }ؗ/)oQ6 }([JH<ݭG z@~G dn=R x#]@#g=R {.nb}#ķ)}HJAz%#)pHKG \ =҅#.q)pHKGPz%#.q)-쁏䈍ف̬d )H%S 3+.)`-*T2](%SB*y;)M!Ln d tSH%SB*.)M!IIn NP;ITڝw09;)w$9;)g#ѝI\ڝA;؝A;ʝqIKA;j3pv08h3pv08,+v'.qXvWN:@5Gp[Ҥ)F]#rc=R G#4Gp[Ҥ)E.\#KM=RMG \ݭG HKHR8P8Pw.\#:#:#](=R@=R@=R@=҅#:#:#݄z nB=R.)/HKG \ =RG \Ⱜ8X{J|vX)N+mR ,V b %V 䳓JXXg'dR +b %V =b@ǁb@ǁb %V t(V t(V t(Vp[{JJ.y8,yyXsb)V deR ,fYtJl,V deR ,.X)mJl,V b@Y+:+rZk҅JK8, +rZkReyb)V IRO}~RtJ>?)V IROX`+X)'JJvb@+rlc҅+.qX)c8Ht JX8P8P8P8PȱJJ8P)JJ.C}9*X)qX)o+.qX+mb JKJKJKJKJJa[Ab%Pb%+.qX+.qX)"8,C}9*X)qX)o+]),V t(V b@ǁb@ǁb@ǁb@ǁb@,V t(V t(VpUXaVXa٫X)pJ~ңX)pòWcR(V \X'=8,{5+Ib {֥aa=h]a=zmXo^0mzmX?؆6oXt gڰ Gɬ4u&Ұ>֧\g2+&ҰX/2+ cѬ4?Jo&0|4+ 'ҰpJ°0[f214+ ciV714+ ciV֗]ay_`暗Yik^faweV:LfayeV暗Y0EayeVwJXǬK}4'j3/n'S`IZ {Ё|)OJ`߃PN@TWJ _d:N d N@v2Vi'S v2㷓)J;.Zc [;)mLj1_M:+N d \ 'S94AN%r2.qB98AN%r2]('S98N%W','/KT H{}^ +}e4oP_ t%uLڰ~ ްa=yva?˟;5O0׼αa- R\Z֏a;L0׼vFkX?utuay[\:e֏ayX\2H a::5ayay-sk-tk^KaYƚAjkaO;LˠX4H côh6uF԰>dWͅaݦ3N0Aj3M԰0Aj3M԰0* s 50׼ R\2H 5(0׼ R\2H s 55/a2H \2H s 5I2hM-^2[3O/Ծh`_ V> /קe+U`e>}T|*/e@Ju TJȯT5l MV@~{m -V@~mPl;l GV@R]U@~m da+U n[.*pTKd d#b+UY8JAV e \ +UY8JuTKd \ +U r[*q?  +Ч=MXm iOV {b. +-,MX}<`& e tKV&@74a +/MXMXRYFʪ Ҥ*X+}/Br@eeUeTVrHeeU'}/**+9*2*9*pP/*C++8h 5pP/j.qj.qXs \lXYu U.Rrj+UV@l V 5[9 *uK+UoRrj+UV@?Jȥ[}s*pòy`+UoR.qXl th th th CT.R:R:R](+U@+U@+U@+ՅR:R:R!V e \Z8JuT~ȡ*pTKd &TKm![I+Ueeʪ@>;YYg'+.*w{+deU /3VV](eU eʪ@>;YYg'+y*NVV](eU VV:TV:TV](eU@eU@eU@eՅ U[Y8PYuᲂ`eUeV@:Rб@ZyJV@`R](+U {0[كJV e df+U {0[9[*=TT-Juja+U<[*pòja+U g˶R.qXV-l:V@R*Ч TJSV@R*Ч T>i tSH+ՅR)*M!TJuTKd V%RH+U >R:R:R:R:Rrc+ՅR:Rri+U@+Յ:T#J8Jȇs[8,RV u[8,R.qXl \ⰬJaYBY8,R.q*pò`+UY8,R.q*s$[8,Rrd+U@+U0hp1wJ8J9TTTTT#J8J8Jub*p"*pòtc+UYG+UeV@?JaY*OzR.qXnl T.K7eeհ?aeTV 0|TV g߰ a:ް~ =a=a={X?Y7gs=z橬!7zk^'0׼ηax;L0׼JmX?نu]ayj\:҆楬ϳayf\RV Ga:Ɇsʪaa0׼vF1˨;ay*橬֛ô):5Oeհ>=L[hXgTV ô6?Ψ'sX{&e0יUsXysIYuUXcQY5?U/ &e0|TV 'eհpU^ð0+N0fʪa4U:0fʪa4U&ð05/eհ;\RV sKY5_֠U\RV sKY55/e0׼UIY50׼U\RV kҿʪô=Y&հp0 7F0fH?jV` WzLaV`a; j5ynWzrknt٭ȷ:.[+ǃZl n@vkcV  .g #yZحn 6n@6Jvkz`օrk.q[+pZ \ V8ȭuZK \ V.[+pZK tWCցtk:tkp :,q8?)W+>@wy`W; u|^ tMW+> tMW+>@7y+ 7}^[j65DV oFօڴ @_( OFV morlV/morfV/mo](V Goo.qЦmm^M%ڴ \M%ڴ \,Ya3x-i V  []Դrrl+WV@l `V@5\. ri V [8' Z8,r V%ˊ\88' Z.\v-l th thPV@ǁV@ǁV@ǁV e th th \AV@?AV e \Kd \ +Wh \,DQZ8,[Q.qX 䳓}^|vBd`W > 䳓}^#g'dW 䳓}^'.+q+q+qe>@>8u`We᯿ύlD@>6ȓ܈Fhx,[c޲@HoY e{y,[.޲ - djoY T{٥[veR- djoY ::rnooمeK5{8,k,rnooYe޲- iOoYO{z}[ve>- iOoYO{zX`,/e[޲@7r^foم.q,2{8[v e[8[8[8[8[޲ - t- ޲@ǁ޲ m {9i,q,{8,r`oم6eKm {8,.qX)- \lS[veKm {8[a٦,peKm {8[z8,޲@ǁ޲@a[vo,q,6{zzzz9i,q,qEboYe-޲%ZeK- eK${GoYe-޲@?[aYK,Oz]%[AkIRa_~:2J͆VqXO~0m%SqX g~$ ?8Oa7?gX? A8gs=z쇱)5 8'~k^0׼a;LG0׼qsoX?uUayx\:%5ayu\ a:憹u skvk^{ivk^K\Z/lXo)5ƚlXӢ01Ix65(ٰ>,g\&ٰ60o s q R6?;ehow&0씡 Nڰ.qdhİ+102a;eh02a;ehzİ15/ڰ \ sK6n0׼dhI65/0׼dh\&ڰ7暗 mk^2a}QvG0wRv$jð0 kØ?ZeDŽbZ5&F`ĸPkȁ}+?/Mط/}-2M@>ȷ]񿐷ʣchW܅r+.=]q|+.g]q)sԮ dѮ@v+B-+.}]qK䊻P%r.q+. ]qK \ W\.+.p\qK \ W܅r.q+.p\ql;@ǁ@ޮ t tᄚ~!X:q.p.=tO~@ލ짻P~@^ ~@~ ~@~@_I ~@_I th tN\[uZ] ri\/ri\'}9e.P[9e.ЗSZ9e.ЗSZ.e.7WZ8h/:p/^tE.q^tE.qXl \,~2w -s.jZ9YeeŖ@5-s2袦e.3V[9e.EM܅ˆ-s.jZ}[eeŖ%:-sKm[}[e.pòb\@\@\@\hpY~e.qe.qeBYZZZ.e.qe.qe.ЏG](\YxD\Y.e.ЏG.qe.p,s~plwvl~]@>@M/pM/mzl?l da^ .M/ݸmzm d7nޅ͢M/ݸmzh th th (6 E8,>.qX}l fmzKE<#M/mzn?h tA^~Ц6mzi t7NޅM/8mz ڦwlzKd \6%H^ 炶::::mMB6Ӧ8Цwlz(ڦ8Ц8Цw5b^ek6@Nm \lئ[$mzK}M/pò5bޅ.qXFl \ ^ek6%.qXFl \ ^Uh \lئGAޅ76@ǁ6@Nm th th th th #mzmzmz..!8,W.qX֯l \ ^h \Ⱜ_٦'=8,WI6%mz~ңMe6%Ձ-E>֚ɏyT{k}wcX!?ZaXD|kZyX: gQ8~}>> k~J~gsMk9ksc`ŏRڡv&~5Oa}c~~5Ïjc$ksw!8scjc{1|v'>V'>諾Uk>諾_Sku\]1|W}5U{újcM1|W}5U{kA7X۝X[hu Bǘ[hmˎ.MO(߃%{M)߃mOAH`[ y-5o-<ȷKl!=o-Ll!2=WL|A>H -wsჼKHE dO-|ɑAR>țTϐ0o(?Z*lHE [l첥"|}T.q*"|pCW>ġd-K%]E" *8tK%]EU.q*8tl`"჎T)჎T:P>;T:'Zp~q \~?|]*GAw>;]/m>K:vჾA ჾA/m>Ka8|з ჾupo0>ȑ l7tYm!ჾp 2>: p/0>ġ?ġ]8]8]8]8,v2>aٷp!7⃎܈r.7b˛܈:p#>ȥ tF|Cqj܈:p#.+Ir#>8p#.Ir#>ġ/>,\'ɍ_F|pò$7@tF|qAfl+ɍ7⃎܈ݍ7⃎܈:p#v7⃎܈:p#>,%ݍ_F|pCw#v7~͂%ݍF|ЯYp#>aYAfewPn 7K?-G/%?˚݁~,Igٵ;/m?˺݁̊tQ:(e`Y>g',t-e#|cA>;Ig9:(|vA>KguI:,t \|qA>;Ig僎t:Y>aٓr_|9w{.u /3r]>Ȼ\{A^f|O]r]>@^f$|D.] {M0d)5%|D] {M0d;)l'% "NJ I0d;)惎D:a>E0530\ⰬIeL"W#K530Bg,te`Y>:~@g,a@gq,t7e`Y>n:ݍCg Yvơ|C`,\u|C`,t|qA:,tg e`Y>8@g;c,t :9A:,!,\ⰬIg/Y>aYA_|pò"$僾(BgeEH:|pò"$K%ˊt.q:8,+BY>ġ, t.qXV|&Y>8@g_" \\Y>8@g Y>8@g僎t:Y>gD,t|q2p٘%Ɯt.qX6椳|pCY>9,\lIg栳|pò1'~%Ɯta:ecN:8,sY:azŰ~t2,>rXֿ9z ya1y9waX:rX?*qX2 a5Uay-2,pk:ay,0-cSg9d0-c Rg9FR>rXCa}&zQ8tkP:ä7ŀaA,uSg9-a٩Ɵ:a}#0:İ"U.0:a;u0:a;uİ15/暗rk^:a];05/aYsKg95/0׼tIg97楳暗rX߈e矁:aW'g0,a}kbX!1Yma}}hXe`o@X'3A` z`_( %j;C < %}*ÅPo5|}G?2u޼P@ԭ 䛬՛[y~z3;r3)=`k|̲3m|L3j:7 =o[w=yӰ3pV8AF%2zag=8AF e \ g=8yKd \ g {x=3q3p===y 3qB=y3q O2kh'h|ZDGh|ZD`腲KhTkB腲Kh&KhTKgo:}_@t Ku[IDPȹuF3/}Og/}^(g/}.q._]%\]%\,#ZaG3(6zi 6.{X6zi ^x=`gh tyᲕeg+F@_ipͲ3pEgeEF@faӲ@===E–888yyKd kKd-2z5F%2z^(gKSF%植~͢3pò>ig_h \P58,?~4Џؔ礜4p9II{I{IM9icgY<6]@k\wMKisi l.pu46i. 仦ͥ|A|A$l-=ȪIG'+/m. d e. d@ǁ@ǁ hͥͥ\8\8\8\XPFl.=@0l.=@nl. d@\Mͥl\ȷTK/qZ@֚iMmi _5 |BiMmi _D5 䋨JkQkM"ji _D5 t5 t5 4Y;kMKʬ%eZ@ˬ5 ,q(v6@~\z̥~4Я4+@\qK/4o4m@~϶BK6Nsi g\X s4{ͥͥͥͥͥ~3BKKfLsi@s2cͥͥͥi'KKʴͥ(\XPl. F2dsi748i'K/4ġL;\X si`Cv4A2dsi`̥\XPk6oBsi@si 4^X4l6:4cͥͥͥͥͥn#\8\8\za 4ġL\XP&m. ,q4н7KKdͥ\XP&m. t2hsi{o4^X&m. ,q(\z>֚4ݰI2l_WZaKg?DKs~yacؾa1lttؾ*폍aScؾ%tا1l` cSk:5s95esؾjEsk^sؾ`sk0l ˨5vjMi>w۩5vjMi2wا}b>;lw# g!1Ԛcީ5A cީ5ƼSk:l@ sKk:l! sKk:5/݃05/aҚsKk:5/0׼Ik:lw sKk:5/}\Tư}D0P:a@ư}c۩C=L:a,ư}c>4l?%0y0$  cx}A 5ȟێ@v۱_ݎ[`5~slڱcr kj @DGX~Aq ߩ#>@SG||ābGwjNm>@SGwjNmeF>2#jq`C8[LKʌ}ā~塏8Я<_(q_y#+}ā~塏8J8;B8;9az@q ;#,q@NMq@q@q@q@qߩ#P>@ǁ>@SG8G||ā~8q8q2gq`C8[LK~[}ā%e>2gq`CBK}ā%8?K#,q(8G }ā%śhq;.:@şhq@q ;#t#t#t#t#t>@ǁ>@ǁ> ,}ā%e>2 kq`oi6GXPfa#P>@#,q(Fq`C8н7/,8YX8on$_F%s "%u9laa:25Opk>aA,0 }oi}kP>aaaA&}2`>05/x#Qaox#OD!a<İ}b>a1l暗xخ暗xk^>ack^>#暗xk^>ayxn暗xk^>aGL>aox>1lvx>1l& O& c#>L3IC Øw_N$'@~ 5?ȁ,ك@րȁك@j?Tȁۏgʏȶȁ|ڏְ~~wNr v2磓9;|́ĺl|́7́9i-ۜ!Bٜ~ns w9/ds msܿes _ms6@MmjR?TۜS9A6 es,q9A6@nYms,q9A6ٜ/9A6ٜKdsP6ٜKdsV6isths~6@ǁ6@ǁ6ci8|l΁lg88|l΁9]@t`=Ё=ЁD}<ЁDN=Ё~}<ЁD'=ЁDŃṔ/79 9 9 /9 9mD /ԙ@o)l,qЙ,q(s6:1; /,v6:6:6r@ǁ@~X>; 9bas@as6Fas`v9ġLÝ%e@(l,q(x6Has@as@asN6_X,lt(lt(lP@ǁ@ǁ@ǁ %lt(lt(l> %l,q9l K$lP@(l,q9A@(l,q(6Das`C9m' Kʬ́n;QXPf-lP24kas`R8P8P.,J8P.,́J.,́́)j  Q ,́%e2Q[F-ld+ %ld+@-ld7 d́lE[殅RV́lZ/36_(as 6ˌ́Q  ܵ9q9q9q9q)kMqlYΟ2v kgt`)t`ɗ_^:B^:8^:{A/^:s{:CvO_H@=Ӂ n@n @n@ǁ@ǁ@n잾L=XP&Z,q(vOrlt`ChA:`4H_(th; ҁ~A:ÿ' ҁޒ -  - ҁޒ y/A:[9obt`  WuL2H:4H@ǁ ek3 ҁ ҁ  XPm. 4 ҁ%-e2htw4H8iD/A:ġL# X t`CFA:A2ht` ҁn XPL6HBt@t[4H_X6H:4Hr@ǁ@ǁ@ǁ@ǁ@ithth XPsm,q(ù6H8  ҁ%e8@wh,q(ù6HFt`CεA:4/,ù6H8\8 =lv'ɰ}]2/m/aaZOϏaj:l_Lgǰqa::l?6Oaza'ư15O0׼abk^ 0׼ayaygsk^a:25rk^+0׼ erk^0׼ 0-\Z8sa:25C+\:25+_Fqkay0< /ayi>LϛX4Hۿm?mۿlaA_555 \]zڥчi.zڥw]z>p&a.=lvg,]z>0ڥY@}a>05/]15/0׼v0׼҇.=5/0׼\K&o35/0׼_/ۿm'=J=!a0fR&+}c>1l8ԇŰ}i~bRۇ:VE x @V,#fPG @ֿ܁2Z XXu2Vs2 VsCLJX 5w`܁%Rs_(5w`܁%-Rs:)dR@|-Ow [`zBI;_<,d/R u,*-`K;m`KK$d[@6 lP@ld@Pln 6~rdw 6~r ew 6~_߁%2~_(w`߁%2~`w`߁%2~8}߁%2~8X 2~8X w 6~Hw@w 6~:4~:4~Kdw@2~Udw@w@2~:4~:LKocXrz/HWxtz/HWx[#t_(Wx[#t5BWx}t_(Wx[#tGWx[#:|XOH<]H< @x @x @@x I:fE%:X⠣;%e"@ǁ"@ǁ"@Y$8P$/,c:r "@ǁ"@Y$E⁎E!L}H2ix`*Ex`CȴH<,KXER$8P$8P$nEL/H폓adkay-0׼Vay 0}k^v}5Zz sk!2:5/a}  :5/a}l9l9ly%La?ۿs jka](La](L߅a@aC ۧ!c(c㳀a|P>lg,0}>1l暗0}n9暗0}k^adk^$L暗0}k^ay 0}~ak^ay ӇG#aaȰ}0 Ӈ70}>2lߌ$L g?$00#A  saǗY @,} `0=oaz `a:A XXO|22 LJX az`%_(az`%-[~ّefzdTmfz VOFz gVOFRfz X.UlX Vfz`Ά8lX j:F hccI{ i;aI{ 6rfI{9 KKe= Km= K/ԁ@+,i,qmq=AIK$id’IK$i,qBIK$i,q=A %i,q=A@n-i?@ǁ@-it(it(idgʒ@ǁ %id{ʒ@ǁ@ǁ %it(it(ithp, {74H{+@oNipt ept@jhpt%~ 㯎J5B{`> >WG%=Л6?_A{7m4A{[à== /== à=]X/A@0hpt4 ,q(6HI{+@Y芧=G_XF=-itS7K]rȒ@w(itS~a=T|=,JK%fQXP@-it72 jI::EIe&Ԓ@ǁ@ǁ %it(it(it(iP@ǁ@ǁ@o)iPI}=A %it7I/=,JKp%fQXP&-it72mI{Y8YmK/=ġk[X I::ҤBIJܥ=qBIܥ=q=q2oI{@I{uP~aշ=ġL[XP-idے@-iP@-idڒ@v,iLZ>%lZ~ %큜)=]K8BIܵ=m,it(iZ8P%큎%큎%큎O;n2g{2g{ 6w ??^b@,,?凗@S b@n-dgb@~w>[i/>,Vb@?qR>2+o+Tr_f|[*/RB)R>2+/RB)/R>2+/R>2+*/R2l|`CPR>ġL([)6%eBb %[*~K>Л6?P M޴Q av>Л6i>Л6i>i@_>q>q>q>q>Я_(1|@1|_)t(Pb@6S 8P 8P %8H ]$%e^b@"),q(zI1|`C״BKʼ%8yMK$,q(8H  %Qk1|*:zO1Uk1|@1|*::::H1|@1|@1eb2l1|`Cm>Ab@(,q(F1|`Cm>}0KlQ am>ġ6[ XPf+}+/?ɰ5l_Ja1l6 Wa{0-Ұ}7a#iD:L}y۟FѰ}i>L+OwhƚR~k^K45xk^say2/uxk^a:`45%xk^ 0׼wk^k0׼0\Zusk=L 0׼ s 0׼}ay*凱橔rCXT?6ta듺tan~MtI0cn~9t5tS'n~>0l att0MoF}0MoF0fT)|}b>E1ln϶;>'Ua|NPE? ayay臹楢Z楢?L*ay臹楢暗0g9楢暗~~Ia &0TgU*[Sa1aʰd0>_?LlZ#6|~>eVꭰd/ @VJaȆ,++/@f @ }6%A>Q>pM dP )K,qB)K,q>=~+|b㟌 %?6lY~ȶcS~6cS~Y~ȯ:g:V6lY~Xcu%:V֟2ڌ6ڌfl1ٌͣ@Vmf@Ml.f@NlƿPN>?ANK~[d,q?Af@vRl,q?Af/?AfKdƿPfKdfithf@ǁf@ǁf@6lthƿPf@vlththƿPf@ǁf@ǁf@@] 4zK3~[@4_(3~[@4D3~+f et qY@oitьΊzN3~`>=~@Bz_F:/_~@)t~ t~@)t~ u^*m ݵ>m ?_8ԁx9de~@W<ó>O} gtc~@W<)TKŗATݓ>ġLo%e$~@,q(sH}@}@}{R_XTtt跿oބo%zN}`o%-_(}{R8ip4@%e&~2n}`%e6~Kʀ%H}@}@}[_(}@}[:_(}[::_X&t /,8#8SOmKglK} {ƶ'eKe֖@RI2[>Ml[٢>Ml[/>-Z[Ķ>PlLR8R--O>hى21g} [aeb"@v",dc"@~u'(S~ ~>=?~@=ߓ~@ndg~@~w>[i/>Џ7>q>q>H} e>2/>п-ܗo}o}ܗo}ܗo6ܗoa3>ġoXPd~@ǁ +,QaTzGG}wtT_Xƒ @訰 @訰N @訰? @ttttt1Ja8Pa7c**/>oT:T:T_(}`%RzH}`Cɴ>E*KH(RaXPF2P 2i}`%e$ )KH%RoB}`CqZa SaaqZa8Pa n#Ra8Pa8Paa_>ġ/[aXPƗ,q>Э.*K~[t 2l}[]T8e+2l}`C_>ġ/[aVe|-N_ mm mwؾ?l{=l[u==ˆ+öaköööE}ؾ?S{{kaa}%k~_=\\=l?}0x?5/sm\0.ضdW쇹a}~k~_K\B0~ae?l{c}k!:l?[0<۾>l{O}k!kba9mocBkö۾@<̵}G}6U0~a.6QmaXG bm7a 61l6maTömsb91ö91aöw16[\a]`0.sbw1]\a]]~0wöF]]mS.ۆ\s?3l?lpy6ö;3aٝm^N<M\& Y߿L_ʗ>p߿ M_ 2߿)T _(H~mL%%%>p߿a߿a߿=cg@W/t%_iҕ3 yˌBW/dPfJlʌB~lʌ{a~ʌ?-9-(3 Ks~/t~qO@|_ }!Rsc@|_^<^-.x\,Bnu l/dN o],m Mbr^Ⱦ.x!wX=b ~/,q/xao~ K^X_,Bvt K^X_,bb8 aX%/,q/xa~ Mq \,BTt \,B D],B  E],B q/tp \,B  a;` xظ;n^x;h;c O,\,B0p  ^XJs?B0~!;.H_X qnEC({;. HH_$/_BBF!禤rJ2%+U ]PSRu U 9}%U ꂪ[̬T/t z[ U`%YU KT/t8)ZBB:P]8T/tU`@B:PqT/]]BokaîU/88)]_XⰫ_XPԥ,#RġLK2.U KvU KʴT/,qU/,q(#RaWT/tP:PMO_8@_'T/tU`9 U _oP RġJ%\T/d[X* hplJB*JB$!] _殄/dsW]BK%!$,qplJB:?eJO6& #B6d!{Ʋ_ޛl/fl/dM6- l8ي>:͆s=@-8Ձl628+]f28 e/tp \fB6t \fBqee/tp \fBq !8~ @\Byu W5/W  \Byu 55/W {^]Byu 5/d H {^]Xu KTAxaC5/d H qM/7[itBoq ?mq M/V79)tBoq M/V7[itB"q Mq M/tp tB7qM/k3n:o:x〛^ftB77Я͸n:x〛^Xtz^Xt87iq(s%e.U7a%e.U7a%e.U7a%e.U7anঃ8n:x*n:x u tB7Up tB7qM/tp cM/tp tXu Kn:xaCM/,qo:xY%e[7,t2^nn:xaCM/t7 7ot2^XPu M/?t0l1폶aBe`;l;۟j5~;l=L 6l_އ]w0?>l_؇ذ16l_Ӛ>S?y0׼akk^+0׼as0\:5,t050=5{k^7 ay\`ؾd{k^0׼VayՇiB=5CZ\:e`:5ϛye0ƚMϮayt05w:H2ljz 2vy{sa:?2̵J\:9rn: gvuhdk7at`3t0o:YsaŰPL}b``>Y1 t0 t0lT s릃a[fk^7 s릃aRkk^7u0׼n:M馃a?At05n:n: M)a00t0lpϷ O M 37CSAaa|aT˗@Vo:d/7} MtM o:Pt}%AMo:ܧZtpn:,qM% A7\KtA`n:dsNBaGUxGBkw$xG…#!kw$o;- !ޑH,5;.@}GB ?@}G…:}Xӗ%?-:}ȆH,q@`S `]!?}/C{.Խ ?,M8^@ǁ2\{8^@ǁ2\{KP_'^tW/ɫ n uj8ޮpN  v uj8ޮXO p P}\X q}ބց@7x@ g|@{ou Ou2[]}u |u [nցR}:wra_nց@oxeշ8I{u ġ ց@0x@`Chց@ǁ:u = :pamotxeշ[:8ց@ǁ:u Л6:pn,qЭ޴ց:pn8ց?[Kt@`Cqq:XP&}@`C%u ġLց:XP}@`n8:u qnց u@[oP[:8ց ˹:8ց@wyto,q(|@`C9#[Tv_(| []ve| v_Xf'dծ@Ov:vb| }vrieɮ@Ӯ ˴]b5Ȏ]l@5-]l@5wj/k>A@S5X rڮK?@ǁ@ǁ@n1ttt蚿P@ǁ@ǁ@ǁ tt)#ڮNǮ@UNǮ _@t_ NǮ@tNǮ@t߮@t욿Lz5XP&=,q(vro|@|ߌ`u9b| C1u-S'2u(R'a N>лeQN>лeQN>qN>qN>qN>qN>o_(|@|ߌtP:@S'8P'8P't%8H'"u%8H'X 8H'X |`tJ'XP,qN>ġ Z'X |`CN>A:@M,q(Y7N>qN>}/,Y7N>qN>qN>qN>qN>mD/,c8^KXu%aE|`CN> +KXunXQ'XPztÊ: Xu%e:2k|Vq:a9l /N~;l;۟j5~;l=L 6l_އ]w0?>l_؇ذ16l_Ӛ>S?橓b>lx sk%:>lnU|k^GsE0׼t*gk^0׼ay0׼VayKaZ^skZ}ay-\:}357_Fuk:ay/020uXTaͨϔaͨߌaa&OX ,Aat0>'Ua|NP5? 3aynay懹楚[楚?Lay懹楚暗j0暗j˾> 3lw Waa̰aRI}f>3l?-2l&}e>2l?(2l 3ä J j~>eVꭚdӪ@VUJ5fU,/q@f@}%Aj>Qj>px dPK,qjBK,qj>w} %dҒ@W%lDZRiGI} oI}@$-,'I:Ƥ%eI} _c%:%%:XmJKKtl3 ߓ@ߓ@=龿P@=ߓ@>?=泭luؚʚ~ٚ@v=ld@v>lͿPg5?[5B dYKdͿPYKdd YKd,q5BYKd,q5?A e,q5?A@l?@ǁ@lththd@ǁ ed+@ǁ@ǁ ethththͿPYKā@chߓ@w'iͿOi~>Zݎ5Bt3@cht3 u8:ZKpt1_сt8: 0>E 0>SL_XfR-t@Y P PaP0>} _02ja|`NzNa|`CW0>w K̪R8P8P;U   /,#zNa|@a|@a|@a|@a|+_(a|`ޯPX azBa|`x)K)j+KAj+!=X=b*Kj+/=X=TR~RJ8P쁎J쁎쁎yy+*25o{`C=ġ[6May 6May Gl<|<]Kَ< K/,1r:R@n,5LXjm Xj&Yj&|KBIK$5[%_(y R-5,ql_؇ذ16l_Ӛ>S?-b>lx sk%:>lnU|k^'DEo0׼t8dk^0׼ayyˇ0׼VayyˇKaZ^skZ}ay-\:25# _Foukayzˇ/0lfzoXt?ۿ?)ay9͇Jc\?LNa}b`k^Xt9a4ia4ߌNaaX +ap0>'4~?:͇9AarG*\r\rsi>lV si~\rsi>5/ar-\rca[|>00l-|k·3aYaO0·C3a1ag/&2l׺ ayY?jy +߾ ;dԾ@} }T 3`y S`y>) w(wO2 _(y`|%8w~|%8w;P@6/;tw}灟/]}iy {;d/ӾR}_lhwOWi:XlwX%:}%:y|lr96hJdϦ@WM遮/@WM遮)=Toy0|`б֋l8ڱ~رȮѱ.O]ر~viz LGلcB dҎ9KXP9KXdKҎ9KX,qcB9KX,qc=A X,qc=A@X?@ǁ@XtXtXd؎@ǁ Xd؎@ǁ@ǁ XtXtXtXP9KXth- ZTBtK@7Uh-tK u4:MZңl ۏ ۝2K>l O u2|0yɇ~a|K>5o/y ^@N%dK~lK<B d%d %ܧh{{)@^ %,qL_ҿ~aյ=[Lݘ=Ӌ1Gz;.Ber@o_tDž@:_tDž@w\_&2kz78i^ =ġڿ~ 끎끎ބӿ~a=q=q2kz7::::zA8ȿ]%e. h/$z`T]!zetF @ݦ = ݦ B =m / =m /,sv:tMAe.2nz`Cq?.td eQd#@6rd@=Xȱ=]Xk=م=Xr@YȽX͌9tc9{ 73_Xn,gfr@-gdr@nf,gdr@nf,gPrn9{`JWuK$g,q@=q=q=qB/=q=q=ꖳ_Xf,g,q?ȝzYȭ{Rȭ{R~zY'~U=[/kى=[/k/,tֺF{`C2Ng{`C=mn|Է=mnG}fLs{ ?ȏ6zHs壾os{74(88887c/=q=o4:4_(s{ߌinthnthnPKdnFKdn,qBKdn,q s{`%28"X s{748X s{7488 큎8888 큎큎RK%ev=ġ %e@hn,q(6Dse2@js{`C =m'8?ۇ?6l_迌a[8l a0-ڰ}ywal,;L+}a۟cذ}Q?LkOw#lƚ}k^55|k^uVayg\Zay-\Zen\Zen/هiz=5zk^kaZPs0׼Ny|7ayۇinX4ۿSinƚ}:5t&sa2Nin 0׼oɀayP8LaT}(`k^in)a=inD a=inߓaamSAa}b!43aX0>Chnga@0Rp`k^ayۇGen?Layۇen暗0ۇs\2sLel+}@a>3l?ql ~>3l濟l?l =K̢ 0疆eƚ >Uo| mP6@6emdl̀mLmdN `| `|>4l%8X 8X | S|Cg󿒕/tVh />]޵>B} {6ˆ @8@k}`k: Xð@Xð%:  %Zdd @Gy>,tQl>]}_v>]}k}b@~>!lt%zFZL>-d[] f}+@lPM>5[k-e[T>AY/>AY_>AYKdPYKd,qBYKd,q>=K[>q>K[ZZٲ>qBYٷ>q>qBYZZZ/>AYKdtgiܥi?пMnXѴi? +ܥiBtÊ@7witÊ u(?[ݰi?[i@/,6zK~4rԦ@ht @hڿLZ۴?MnѴMnѴMӴXPmƟ2}m~748li?qi?qi?/,6:4:4_X&mƟ@ǁ@ǁ@ǁ@ǁ@thڿP2In}`_[m,ke2Sn}er[>m Z^ZݦBY^Zݦ>qBYݦ2o}K@ hL@XJ @Ǵ^>- /> >Oఁ>OఁB> 8zn-dkb ,J[lֈ;XlYw>[i?_r`} d} 8HlȽJlX } -,qB6n}`%H}@}@} :::_(}@}@}@::rsjeb9=ġXܗYJ?6+] T_(%~ VށJ@dJ@)ĿY#lSXXPfthv[/,mv[@6zzI}w_X>zzI} ?z]$~mBYZL}@@ǁ@ǁ e,q>A@"i,q>AY/>AYKdPYKd. e,q>лHZKd,q>лHZK @"ithtS @ǁ@7UhththththtS@ǁ@ǁ 0%e2i}`1zXP9mtg2i};K8aN[Y2i}`C>ġ sz%VߋasoeT_a#o0l "ϨAay\Zׇ/\Z҇i9b>5|k^ aZLJ浊s͗5Jay*XTcS?l=L0cS?5O%ukJäc]S{?l9?֋N* \?ۿs]aۿ?s]t05oi~Ojs؇i~Oj~>Eq8}z>|=lO&0>CN 3a|P{?lM sK{?l?|05/0׼s\&0׼\sK{\sK{eqd~d.~dO6 |L.G|>w.0=aQaIa|Qt\sK?5o]~ ޺@jd[tlZ.?.B@@F}'i.BK,q.?A ,q.?A ethth /?]ݵ?@#~ [6Ælڈ8ЈNk#~`Nk#:EXSlڈXS%:E母%ZddF@ǁ@vl]ok]~K@&u_K@&ulZҤ.?'Uh?5D~ [w(d΢@6-t Q&E!ȳhB dڢ@v,dۢ u/mlK$,qhBK$,qh?mK$,qh?A %,qh?A/h?Ah@h?qh?qh?~/h?/h?qh?qh?qhBK$,qh?A@7=),qmq-qBm?5m??e]C4_(S| w6h~@mt ݲMa1H)>[i<)>6HS| ?akS| ?O6@~ʵ)>{^))צ@~ʵ)>O)צ@?ithththth 7M8n/)>/4:4:4_(S|`L%2zIS|`L%28L%28XAKd e,q)>O/=E*K(R躥=E*/,RW+]ToB{J@-[*=}***/,#|V8>+K%RH{`C=- *KnQXPFtt J %eJ2g{[@T8#|P?l{}~{p]=0?5sJm\0tضۗa})k~_ȇ\*0|YátkJmcC0<dcCCö؇tkJmcC0uYr8ETv?XDF䝬 IAͿۭ?l;~+ƚM5?lj?zح?l60a_[6maþa?lۭ?'?l[[?OX?OXض&1l0m+mtض a~İ +v#>!0~ ö۶">5[\w+3|k~[k~0na ~na sV߭/z.?|ö/zm{v |{mö޼ r|z-?l[0n!?lmö}} aw sƚzY?ް,dB>p bY?,w Y?HBmA AR pyj|n!`n!`n!`n![?X[?X[ZrBA򁻅$BA, `! [ʲY?h9BA6e! a.B"a0B>p`0Ae!`0"al6B"anW0"aaAW<dYL;?6tÙAW<džL+,g]pS9?9™Ad#Q}.gݙAvL {r5gS3!8?Ⱦdž.g\3l˙>pl rawawܝ,r؝,r؝d]9993E3E3E3}L`L`L rg-8?NLg9C3wg9H3L;?h9AZpܝ,r؝,r؝,r؝t/99-Կ˿t3A7ax{00@?8 w A7ax$<8 t39. axw?l͂mf>˶Aw"`[YW/?nlt7 ʚlt'9]y?Nl,r( -?h9Aw"`[X6e[-y?Nl`[-x?f\,?\,_VoWpT|ُK@?f\t%A߮y?_ȋ,,_򁻅(BA^e! KS( y(B>p}0Y?ȋ,d!/2w$?_A?;VP(""y(|`q<|Aޑd> w\d> 2ȡ|"|"|Aޑd>`n>?,r,r|AZ0w$?h9|AZ0,`2w$?h9|AZ0r@|>|"|A^d>Xd> /P2ȡ|>?_d"`x/]0 _naxoW0?躅>A- dL&l3}WJÙA\L grЧq8?șUr g2s3ʙA_NL grЗS8?9?h9AZpЧq8ZpЧq8?h9>pwЧq8?h9A3E3E3™"ݙ"ݙ"ݙ>pwawawaw*S8?X;?X;U?;| | yU_e%%2nwA XX`CAEeM9e|t'94?N|,r(;h]Н4?XPv`CADw"v.l>l? `8l?"0ۏm O#۰va`~.ֆߝL0l? ۯ-oa8pN~ۿ}X4e1`7530׼No?5 aݰnk^i|k^?\*5_ays˔}c~뇹K?5ayays~k^+?0l?ayۇm0l?cӔ}ksa_ kGа}>OCô>Za}>=lOV~ ۇadza ۇCaL0gM3aa2sϚfY}ؾp܇}zؾ[=lg}_h>l ܇f~e>5/3aۂays}`k^f}k^f\2s暗a2s%暗0׼ ۟?x25Hd?l2l25Haa2?kA}`ؾ3lrؗkwo7pe 7aayoA0׼ ⇹e?5o@V ٧A| + >jmH BR6 lR6lR 6PE2,rA|` /A|` 芧|+>O Ƶ=]d_=9|+c>rzf=>6 m{_EzȮ=>6Ac۶|`^Ezh9B t>Mi{<ٙ|+xz_7xz&=]PhA| 6dK >J/o@ ٧A| U B= dlX >mz_y]`2,rA|` /A|` dȦ dX "_("9 >A >AE2d >rA| ;6hh9 >3Z4P$ >rA|@ eh9 >rA|@ eX "9 >нT9 >AWF ?r|{PZ>=FZNk@Wc|괖P61Z>]uZHk es;c|`l|{@E@_>{|`t,=>m F@>{|tt񁾗=>ȡ=>29~Zth9=>m _XVh9=>r{exZB Z-}_z\,e"@WhXʤlr@ZN@-}_.4iȫ-dir=([*m@^lȫ-Y\ʖ끼Jr=([_(@^m -)[_(@^"}@@^flӛmy{ y65ۼi@l~aYc{ Ooye6\c{ Ooy,r(k,y,r{`ly{`l/{ Oy,r{`l{@@ˁ6=r{@@ˁ6/ۼno@ˁ6m-ڼZy6/ۼ9=6ۼ_XVlKmʗm/{ []y,r&}v?X4dx_(@)l芧A|4dxMa  ]4dke@_JɁ[VJoY)9J￿oY)9e-B?e[C @Ζm-}el @Fh?-9[~/䪅-ja @ˁ-}ӡh--Z_( @thh9B?r,r~`,}~`,dX eX "Y9BBYZ4N'M})}`YgmJM-{Z4t=M)}4iJaq)}`CYm)}`CYm)}`L)}`CYm)}}4,r(m6tEeͦ~Д²fS"fS"fS@w?hJXPV۪){X?l?* kd9l?'0:OIYۏҰt aa~h_E1l5Oao~ƚc0<'ay:cӱ~~ƚ+0ꁮQ0J?z+}(>l\ata]a 0gMWa&arϚYӕ~>>LY˕~>OCa 0w]a~+0~Е~o֠\釹J?l7暗+0׼\//J\釹J?5/Way&Wa0׼ sѰ+0 ]+ϰ0 ]J?l35HWa0ؾ2ls\釱-o ۗZJ\ -}aKaywY+0׼\釹J?5oW@V]ً+} +߮ʕ> iJHؕBRv JoRvJoR vPEr,r+}`\/+}`\J芧+}+ؕ>OW J]]tdڮ_J.]9+}+Uە>r+zȆ]7Mk_缁Ezε]7Ayٿ+}`^(W@6JX缁o=n d~ ζ}B? gȞZ+PZ+d{~ 9}`7^(?w>G@i@Wzhο} 6Pf>AfE2PfE2,r} g6,rjr} 6,r}`dvX evX "7kivh9> ZZ4hvф-_(@'lvh9>r2hvh9>r2,r}`dv-dvX evX_߿e=~;пPC?Qzz@@=~gпP>C?3zD s=~`|~;h~m^M~kt{6Wt6n&?7:F@hm^M~ot,r( lmʋ|Ks v}9z{4GVh-9zϷ4Gu -]`8灼@<[[e eq-yy <<([|k eq -yy ` eq-ʶ-yks@mlks@lt_X)lȻy9e<=GXr<=ȡlS=AE2GE2GP<=AE2G?聖-Fgs@ˁ聖-_Xl-Z4Ghh9=6_XlX s@^l~a9z /m6G,r(C6GPlp=AE2GPE_EUdö m/z UVh'mభzB[@l~avzB[@clh_e'†aЭ=5D@à[{knʭ== [v"~-;D!>S;_X6p&n>QyD@78hM |4p& |/VM]4&>r |ot4hL} |@@ˁ&>A&E2t&E2,r |`L/ |`Ld m_.H[ =m-$ڪ.H[@ mzD[@Fhm-$ڪ_X,Smh!V==ڪ9U(۪G[" e[ em*mz`CYzUBV=ȡBV=ȡBV=ڪ9cU۪qjǞɰ_cP2la7\1l?ۏP1l)'a|^1t&aws~8La_oayzs1lak^gawa:9 skz=9lי0mOs050׼<ׇgay>l?&S0׼\:! s|p\: sh05/Fa)ykX\0<=ׇaxkX\>5Oaxkw_>l gCaz10Z}>lVV0ۇ0kEQ+c?>5؇kɏ}?kgM?a0g-?ay>gS>KcO~B?apysˏ}ؾ?5/?ayW,~؇>5/?ay&?a^0׼J s԰}я}k~yưҰ0 ؇>l45H?a\}ؾH2lt؇/k[~ao&?anɰ}d,ik^~>5/?aycۏ=Uo?@ʷcd~,RcP~Ԁ %) %T/{`X ? X ? e袦zr=EM e޴-]Դ\dږ끿_i&-9zlUr=rz$~-H8=k[_GEz$Ƶ-H8Aپz` _(@mXG-zB@vmhrBYmm@k -/ԓ@k -zB@N|lX'r=^/Y{fY 4kfʬ=]Qr`@-Y{fڬB=_ ,u狁lڬ=A/Y{ 6k,rY{`/Y{`dAdidX "9ȬB9Ȭ=Aflڬ@Z4kdf큖-rna@ˁfʬ= Z4kh~&Z4khh9ЬB9Ȭ=AfE2ktfE2k,rY2k,rY{@'@OF+/{trtN=G'@7Z/YAtr0N,Bz@'@7ZX F+-dVp ݘY{s4ktcf6ktfnѬ=7:1G@_h m/,KIX<*Ig<')ڂbA[ ˲m]`Qݶ<<%ۂn[ e ÒmyT-x ̶<,l? &ayڟt0<χi>l?c|k0<χi>z81l71l?`Wì#Yԋa֑K&kar>f01}}ч5}vч50~ִF50ֲFa8x> >LEka$x_h>/F3ayY\F暗5}bk^}k^\F暗5aF? s0׼ W~ط޷ W WKKeeekaz2l2KKa|e0Yc~R}ؾ@2laT sR}k^d>5/KayYcR=UoK@mʷTdܖ,[RTPԀ-[ #-)[ #T-/z`,dX K eX K e袦zR=EMK e~-]ԴTdۖꁿ_iƷ-9zlR=rz-8}p[_GEzf-8Az`_(K@mX K@6Ǐ:;6c,zLhь==rZe4cPflی=*{UF3 2*{UF3@mhь='912cdCflڌ=uK3@`m/{ 6cf쁮[k3@-ّzXVٖ{`/{ 6c,r{`/{`dфdXdX 3"9ȌB9Ȍ=Aflڌ@Z4cdf쁖-ra3@ˁfʌ=cZ4ch~,Z4chh9ЌB9Ȍ=AfE2ctfE2c,r2c,r{`eo@w~q=x8= z/==Πxt=Э.z_(w"#K"#H@w~h9=r8tMt'5=W/H{@Mho~aYԵy&7Qho }sy&7Qe6{(@@D<'c{a@}~yy O>d<{dl@"}~){a@<{9P{a""j"%j"(j@}XPE,r"&2z`?,r(kv$>#kz`w@aY ;| vrdmw@rdmw"&9>A;|`/;| /Pv,r;|`;|@w@ˁ>r;|@w@ˁ&rnw@ˁ-Ztd?&9>_X~B/;| 'v,r;|`/;|`^;ȁdξV}7| v+ȉ}/,W%K % H_@}l|k!@_@}/| d+ھn6W²a_@+K,iW²q{oˮy?>.>+MO_X&t ?@D@N.ȅ K ȅ K@wD}~@@ˁ??AEt E,r~@/@"e} ^E>k/@"eŃ^n>F/@7Ue }K/"bHk/"yB/"Kk/@7UeXPVeCť^E,r(V>ȡ>A^6/-o?lްx~atzU7ly'a0G_Ȱ,2l?km{!cȰ;lۏ 2W w};5Oay=_\:w s1l=Lgayۿa_\:m&ay5u暗 1530׼\tbk^gay,bk^NJay*?l?P ck.arƚ }8l?Uck.0<]?l?Kckw_ ۟ ۏ߇0kEN}:Zәt?lۧìF~>80~t?Lny~?OGч~?O)}=lA&7a}z?z:LoQr?xo 09۷C?5/abar暗C0׼ȪC~ vd!B99n@);_(@jT@ @!?AEr,rCr,rv.j:я]tȿP!?EM@l!? ;ָ-:_Ё!?A/CzX䠇Ёl!?A:r;9!rdE_Mrdl!B=( Ȟ2:_(@6h!?*CzPh!?*C~ {vٔB9I|p/@@vmh9x?brFb eȖ9(~@@6mh9x?c_A~`/~ [6,r~`/~`d)dXQdX "9xB9x?Ao@Z4d;-rb@ˁx?Z4htZ4hh9xB,r~`d2dX eXEzi?Et+в?ЭhZ_(@WhV4-=_e@@i -/e[ѴѲBY9Ȑ!ЭhZ9Ȑ@ZM@ˁ-4B>D!²k@"35}ŤC~t>D!?gj:_Xl:7?;7C`X|"3\|"M9sO=y=_ { >ߖ) 0e!) qqB LYP) qqB LYt+) *e!,͔@&S.TB[ R82` П-2` 7W \X}0+a2` }M À@7r0 9_ 9(` Ѝ 9_ À"뀁@w\0XP0 .,,rP@`CqvE E 8C 8p#᰽.c48l/a(8l? 2lnTap^tHQXLOdدwxk\: ۿu(בd|$\Jۿ}u9L\: s25adk^ay O150׼\:vS0׼\:q s+}1}`~ƚg0<Ô>05as~ƚg0<ay c3}`~ƚg0<gviti3piudaLw>faJ&sa֑^+ L0lS20~L&8Ly2`?O& g)`?O% 1}(>lda^b2}8!L&& gI0~0`>W20׼ #aRJ&W20׼ Lp cay=ףa1o=y.zv޻ 0l+è?&!n}>c°}} ۷OQLBO'œm$aöay=kW°M0׼^ s+ aؾHq^ s+ ak^IXNBd; !=z'!p٨wB I.TB 5$@IF $@JING $ X$"%!9( B%!\@vp ] ;a[\@veA ;u˰@vɓ5ΰ@vvh90B=d;aEz.C"=d_aEzX䠇; A/TA [;,rC"/Cр.k@KI8 !d…JBdCI -A&!\׶ -A&!$@KI9 !Am/TB NB8IٙvB$@vș.TB [NB`I٨vB$@6pZLBdIEJBPIl; !AIEJBPIEJB,rPB 6NB,rPB`X$"%!9( B%!9M{`޴$h90 !d'!ZLBA-&!\$@NSh90 !r`…zh90 !r`B$ =Ao=AIn(3 !AoBi,rЛ"i|޴x< PP p3C!Гf(  3fB'3P73=a…Pt T2C!-=f(\ "e( "e( @wh90C!r`B/[8W2}!/@wy }e…J_ 2}!/ }N`B홾ck@pk@sB 흾ck H98}!N_paFuB 흾XPVRpaYJuB`CKuB 흾XPSïʆyuB`CYSu…J_,r(N_,rPB`CWuB`÷{,:"kq- Ar q#9"+q\ApD W%WAr q\ApD`CrD`CrD` 8"Ar8"A9(@AZAZh90²8@jh90"r`D8@ˁq8²8"AW8 ˂ ٫qD`CYPsą X8 X8"A9|59("[Z uVpa^sCP@kOpÅeŹ ;!6܆@Ɲv s]Cmt 1!6prݎanC mt; 6Zmt; EeŹ n0!ȡ8!I696;܆"܆@w'X܆"69(!A Em,rPnCg'¥]gWq )] ̧t.pL`>?[JB }}%o?WaE@:}ݰ`Cqյ`|}C`C1׵o,r( >Ef>bk"9]_XwXPLwX h9?r[?߹ öۊö&p!ööoۆo?ld>la)ö!mUawaömama~۾?l05Z>l0~b0~^y~Z0awm_)ssGs!O>5N>5|v4L>5K>5J>5IGssǑs!!öȇay|v(|k!M>l;k!ƚGmGc#Xay|k!)5W|3:C>l ~60h4aB>l ~6X3?3?36<?öNmsۦ QF61m~aK?l}6:0~OöAчwsF͈>5ۍ?5k~70n?l7mNa߇}H/IװEׇ?0 ?l~`af ?zGmaF#Xöamߟm}k~և[߃>lk~a=Xö ևgZi}k^, x`=XA6, _R ܃, U`U) @AjK yWƒ tyr⯼=Aw?W {+xl԰0ǭa}.a˰uNZ~ە4l]ƚg0׼Xayu0׼"&$ayS0׼T\:P s+bb150׼"&t)jk^gayjk^ǧay111l?8 c3bbk3a:2 c3bbKƚg0<#&ayFL c3bb~gƚg0<#&ׅ115Լ^ km7a֊av~f]0j1a}0H G)Fb?3H gaL0H gac>#1/caca~aİ}7lhc$ayH s+Fb>WaW0׼b$0=u놹qk^ao=ϓ0~ϓy20=SOcasCMq^ '0~0b>?Laa4Ű}?lsA0׼)ayW4Ű}?5gp\ay=ayGSMy)YP8"h@ *"p4E Uh}%Bp4E h1h h MXh"ES9(BL0cD gWt1`BL@W&wD>L@β0/cL@ˁt ,r@6<0qN9t ;,r"=dEz8}&wD`N9z8} &9pD 1`"dD >0q&9[pDɀ@jɀ@38`"dD r,r 0޹&-Oֿ>ֻ'-$O@>q'4>ɓ'-$O@ ;}BOZHLd EJP#;}"AEJPEJ,rPD RN,rPD`'>q'>X"O\Ez|Xl3;}@OZLd'-Or@ˁ*}"OZL>qZL>h90}B=,r"=,rPDL,r"=P>A=|aąz|XSL NZ'C-gEL t*"C&ZP@jh3"m~ZzP j6?C-j&C-.TE`B-gE`B-dE jh90"KC--󒎥 Ͼ$w2H#4iP 4}(dFH tˀA>2H#-iP @ߑr4}@cFh A?A<9H#i ȃ4y_qF KA<8H#iA`iRAEeGA<8H#ȡ, ;H²* "- @\XPVȃ4ް4ywF`CYvƅ ,r( ,rPF`C"vF`4ywƁ  WY%vG G#89wG 7U+#8 ytG 7Uqa$sG 7UMGp"I"Ep9M2Gp9(#&,rPG oˎ,rPG`"8dG@ˁ7qG@ˁ#8.,{d#8-FpZ#8.,{,rPG Gะ1:#&,r({P:#AEPE,rPG`"8.AE,r.rSqarxG ';݀dxGdxG;. ︰9#w@ ;ُwxG;. ︰9#r`xG;. ,r(CtDžEe0#ȡ9# Hw9!w"?@7 X"w9(#An@2#AE ,rPxǁ ~<;-w\@h90#xw\@! c?O`G͌j~؏@qa1vG/Pt{@1#~؏"L؏"~؏"bP؏@XP\~c?Uc?O`DžŮڱE,r(ՎV;#ȡW;#A2#r`G؏@ˁk~؏C c?aĆ?8cǰ}|خaoͰ}9lӱk33l?t \]S_ta?.a:i UnWҰwek\:b RujkrV\ Hu:L\:R s@55؏a0׼R\9jk^ayu:LayuxWcǰ45؏ay~ L45؏aOS0~0c>ć!؏ao> ~ '\Wǰ}15؏15؏ay~ seaz9l sU0׼dۇ|?Xz9lǘacazI8~?ƌ cFaaabaz;8lSY)W%ay=o( \pk^1#݄ayWȰ}-ak^32lH8L!z PJ=Z`@IS ( J=Z`@IS (P%E ( ,rP@I{ (P%E ( h%E (9%-0$r`@I_,MZ ( Pr"ypcES~|LL u)n21%[&\Ĕ@7-S`bʅJL )~01%[&]r&S}dbJORLLNL YӉ) ӱk33l?t \]S_ta?.a:i UnWҰwekA,\:b RujkrV\ b Hu:LA,\:R s@55 a0׼R\ b9jk^ayu:LayuxWc˰45 ay L45 aW0׼Xe *ek^A,\ bW0׼|a0׼{s}25 a o>ƚsk[;skk[O;^35a0=ƚgx0M˄4*&LOit0&#r0|@k2&r$| ,>9Y| ΧP,>-d9Y|`rx|"=P48&A,@F#h9qͅzh}2&GZP48&dM 09dM@eAh}2&3NG9a򅊠 4#h@4r8&*cM  4*&_Gr@4y8&*cM {P4E d#4EP4;&A4EP4E ,rPM ! ,rPM`"hAs"hAX "E\("y 9!bG@ˁ4Y` Ah90&GZP48&r`M h90&r`M X "y 9(&=cF9!A!AE@4!Ac| x5l^ϖ5;0&| ܁5*&)z@^z@Ok^sk^X@7Z^sk^k^s k=w`xM@ˁ5X ^h90BZ?r8o%el_y{;7z2_y{Bz 1'Н߯r@Of]JԠ@Й-S}%ajЅJ tA05( +]Ԡ@_IS}%ajPoeܑBsG H@-#yuP  ֑BJ;R²pH@o)ȫ#.TP Ϸ UڑBAwЅeܑB#yuP`CYBwP Ϸ ,r(;R(ȡ;R([G 9tG |H"H@o)XP)t"ʆ#)XP)XH@^)t #-F H@WU/3# eaD8(k,# VÈpQ FtaٺtQ # VÈ.,[# VÈywQ # ,r([# ,rPQ`ˆFÈFtˆy}wQ`ˆFt È-Z # È-Z # FtauQ Y# Fh90(r`Q0@vFtauQ`ˆrхeaD\rQ`Cuх # dsaDE # ,rPх # ,rPQ`ˆFtˆ0"]0@6wO;K?Vr@ cheQk1Fn2('cheQk1F,1(s]@wch90(Н%]XVch90(Н%9>"@wcXPVcc cheQ`CYsQ1 ,rPQ`bcX@7ZcX"9(@Z1 ܁1Fc.TQ1 cc.TQ@ˁ1F?}k@c6cfQe1 t׎1F1(m k ǾcݵcQ1 ,r(V1 t1FEo1F*(m 9}M"b@)ctawQ`bc.,.1 ,r(F1 ,rPс1 ch90(r`хœ1F00a]ba1hؾ0>lWװf>/o ۟Ncװgf~{vM ayk~ 4lqIkدw+X1k250׼NW_tWѰdC25Sa1בjk^ay >uWѰ uQ\:E s 55a:@ s450׼b~cay c3h~f:LGay y1FX1ƚgѰ05ay X1ƚgѰpbc45a?kNQEyr1Zka St0dtа}0layؾ3l( ki}|QӌOֆsa0jAڰ}O;LA\t45ay=h}2B>h}2(ZL @Ez)|ҁ9kp:Ё i!1( ]@ @W8(D]@oI-$rr@ @:B9((\9(B"9(B9((A@i8(A@E ,rPЅ ,r{@`tX @|t -@ˁ@9pP ȱ-Z P-Z PEr,r{@`eP`pX @BtAx6!@Ϯ+trݢePgW ?s.TP' ?s=`Ѕ 승BX0W(г+ BX0W(AB*W(ABE tB*W(ABX0W(AB2W( Z +\@ˁBs.TP\@zbq/4ϻ$?n3)cH tIfDRߌH 4I!ˁO Inp0?)o'F 7}c~R}OPI1?)>'F@78Ƹ;y)@_ژK}`tR /mN^ Ka8y²@@yq҅eyKypR`CwR oN^ ,r(N^;y)ȡl;y)7'/9~'/"@XPtʂXPX@^t -&/B@ˁK*y)ⓂdfS oWl ʙM]9)vl dgəMܵsfӅe֙Ml68)g6]Xva];g6̦@6XPvaX̦"e69()-g69(Be6e̦"e69(@f6Zl ȍ9g6Zl h90²̦@vМh90)r`fȘ@ˁM9²̦"e6̦ &3AsfS`CvfӅl dCٙMEl ,rPfӅl ,rPfS`2t2XAM*) -:@@N,taYtS t t1)[:rb@@7-]` t tÊNg8B:a@@ˁNnX1²@@ˁNnX1)ȡd:) +:9L:a@"@@7XPV2-JݿeS`X@"M tPNE t ,rPS`dS@@3h90B:z@@ˁNg0B:Z t taIpS[z t Na0)n:@@K`S[z t 8)H\٢SzY>K7hXU-$f-= à)ġH,t tBb@ à)ġ,t tB"!)= ," K$t ,qš,t tKB$)-=    ,v qBa 8_FӰńa{Ϛa0uj}ga:v 3Cװ5l`؞aa:o ۏ[ q?\G5O=I0 暗a6 sK45ay>L+!0׼A :le6 cS4l GcX6=LKX6 cS45OiӰ}><5Oi0L;\~}ay-wsK4lbk^{\6 ߂J0?0ssksk^X6-m Ҧ@VMJȁM,KK))))p(A)Q)p_ld,m:PҦIHR +WFRe+oRe+r`RW&R +reR?hW 0v@}mW:P停خX⠍@vHmW:P%(dvm8h<ەKQ~JخX⠍m?_Z wKR V/V /KnXRtKpXRRt~^^9аz)A@^ ,qR/rb`ҁ?C Kߤ)) KAJ9)FK)[K9)) KAJɵ@IK$m d;ҦI)=]KK$m ,q@IK$m ,q)M%68HX iӁ68/8/8HtB@`B@`B@ [6=Ҧ@ǁҦ@}-m t(m t(m Ҧ@ǁҦ%m Ҧ@ǁҦ@ǁҦJJJ_,q_,q_,q)Н_JKK_,q_,q_ ҦK=)A!_//t@(:P@w) @ EQJ EQ.=|(@=/(*=/(*ПE8HX QԁE8HX QTE(QT`DQPX Q) @ǁ@ǁ?,M(*q(*q(*q g1_RG nҔ*?e@ՁZ,;Zz@UKZ@ Z@&Vh;}!*!:==*b:*:}!*R:*b:.*WyvOY}+q+b }!+7 B y!+7 y! a!V o:b?d!ցbc!V Cbhg!ցe1B@B@t, ,q(bc!V`CY&a!V`Cٔ+7 KʺXXXPv&, MB8a!ցb8 KBX%bo!) t( B@ǁB% mYٖKmYٖu`yٶ@l[V kea[V {eH۲ð-+OJi[V {ea[V`CyٶٲKd ,q-+۲Kd:P@6l ,q-+AҖ8Ж8ЖWm th th th:l!ևm/&|ؖ۞5K?l{{öeaö̇m\7a[>l{ [>l{eö8}p5l?i}؟u[>l{|kBsG)WOW=MgsB۞$=H>5BsGssBR߅Xs裡sgs'a\\\0>l;8}kBcCaۙi~d0l~6S >u aԇmƩ\ak>l}6Ic]Cah.핂.>5 ¨s¨s[w&D%?5oK~6": ¨cCaxöЇ!/I~k¨cCay>l~k¨cCattؾay>5aԇ!/D~k~_0 a]5l߄ma} \}\䇹^ 0a]a?5 >l{\ㇹ)5;?5/;~k~_u0ּQ% .dK .@A¨ YF]H .d$)0e  .d$p[i0*pF]8 u![rB] P'ԅl u+pB]&Pi_yP.'ԅ u?p9.SN}oB6儺aߛMB9 K )}o}oB儺aߛܝPQ.'ԅ%ÿ u[8.dk_N}BN ߗBN u!rB] u 'Tz 'ԅ/Pr%'ԅ%Bι䄺a_k ܝPr( 'T'ԅo u#'ԅF ;.tᄺ 9.MN u![rB]aP:pB]ᄜP:pB]fP u!;rB]X;w'ԅl uaawBN Kv'ԅ%BN-䄺awB]X;.,q؝P%%%* ]9.,q/,q/(CN 8.dkWN 8.tC 9.t ܝPr!'ԅP:pB{:pB]8 u'Taþaþa]8.,q/,q= K= K= =*p߃a߃г8.,q)qx+/tA5uPM]ITSjB7Г6.tA5.L ]}PM]TSjBOڠ3. Kvԅ%j]5.,qUS8쪩 Ćj*aWM]XⰫ.L .L .tK .t US:PM]8@5uTசq =|6 ]Wg+׿A+pc]X.,AXz1օ%He7Zb =_B b =_B!ƺA+I?Mi).t?J ^RB_Ժ(wօ@u Pj]n3ZRBԺf(.t?J ^RBwOoBgOoB7~|['օl6ȷu!{m]f|[eD yݓoB }['օɷu!Ϸm.dۺ=.,q(%m]|[8˖|[8E.uO Kʶ|['օ%eD yݓow"Vۺġȷua?ɷXPۺam]|[ۺqoBv?ۺqo+pm]|[:*ou!o2u][L])SW`y\ yB6Ud꺐M.dSE ҪL]"SׅWU.dSE KvSׅ%s.,qM]8즮 KvSׅauan M]!Sׅ%5 .t`꺐/uSׅL]:0u.dKO .t`qB ғ+4uan꺐-=JL]'Sׅ%eAu!;2u]XⰛ.,qM]uan꺰a7u KvSׅ%. BPd )}0.t[ ]`0u]0L]r"Sׅu0u]L]Sׅ.t`qB` ,o(uSׅuaCyCY L]Sׅ% e.t K2u]0L]87ema.,q(o(.,qM]8즮 uan꺰a7u B Bu$0u]8u$0u ʥ 4^ׅAu{x]74^ׅnAu/mx]4^W`qLHu[zx]4^8Մ4^ׅ%7!W= h.,q( i.t KzB À+'ax]XPx8 KB Kv@h.tqB⤐Bw\{kð=bΰ}Q5la؞af>LZ0l_x]̰5l?s _*gjy[ְ}a{aQa:i SnOҰ25O0׼XG0׼W\:] ۟&jk^ad שax sH550׼4^0׼R\x R9jk^ayuzP\:> s45/_Fװ45O0<5^3tdƚkKx cS55Oװ05O0<5^;0<5^Xx ۯ kk^ayiz4^\%/kv&װ}";lfqϟa0aR| cS5l< #a XT| Mk.0׼_\R|=Lay)浨955͇`4l0 skEsk^ _F0<_Ұ}45Oô95O0<_XT| kkay*Uay*ƚa0׼/z/caym^skaZ0׼V. skrk^ak\Z暗k0׼,) @瓢@\-:Pw+A% dբK$:PK$ K$ ,q@8h >Ak%u㶬E]%Z,q| u:uAkQW@QW@QW Gu:u(QW u:u:u5@ǁ@ǁ@ǁX58H58h @8h >AkXPuKE]%Z?P@O,?2 =w+EMWz GׁqNWp:P@ tQ9_XZ@ϑ ,q+AK:PK tZ ,q+s$=G+q+q+K@ǁ@ǁ t t t:P@ǁ@ǁ@/,qoioW[aIX~|zE Z`4-|H%hӤl[%h4-Ѓ-J=-qen ZhH}V>Wz^Fځr#JZetDځrz^FZ[@tz@@ځ?KZ;tz@@V tGn@ǁn@~ʮl))>SV~le %lΒlΒl[y -V[-JV@V@:le;8Kl%e#V@^me ,q(kAXle ,q(An+[`CY-p[KʖlXPVle;PV.d+[`l%?-eiV@"me the the dOV@ǁVee dOV@OY`e[ Vke[ Vke[ Vf+, Vde[ [@VXl Vde[`m%e1ʶ)Kl ,q-+Kl;Aʶ@Jle[`mlX8P8PWl tl tl tl;lXNmmmmmZv`|-Aʶ@v l;lXNm%eʶl dʶ)Kl;Pʶ)Kl ,q@)Kl ,q.?ll ይlme t[V@ln ይl0l>lvlllv`yV@ǁV@he ,q(>l%ut[4趲8>l%ut[-ġn+[´8me;PVYKde t[VYKde ,q클:-L+[@+ہzJB+[@+[$(+[@Z^AZEځEbZ^AځEbZ^1GZ;AZs87k%AZ`CQXNk%bZ;8YkDPv`XX Z`CXv`XXP!8H@,Txٕa{ 8_Fڰa{ϚaHt}Ea:v 3Cװ5l5`؞aa:o ۏ[q?\G5O=I0LrakՆa8ur05jѰ}:l`raa _)ƺ\m>1暗\mk^ray[skaZωi95=a/\mkraxh>ƚ\aZƚ\mkrayՆ#ayՆ)WOGayՆ)Wƚ\aZ"0׼(%W{v(ay-Psk}aڞ0׼6' skmrk^ra\ژ暗\m"0׼%%WRړ暗\mؾ$95oZ r@,W d[vj(X\-\@a@rҟi џfi џȆiWrhZ rcZ?0O do@N!O;PkʁlP۟X5@6O;Pkʁ%ZSd)8hM9jK|ilZ۟X5@ǁ@?-mviKZ {:(Z ::-@@@ΛO ,q?-3'KEzil? ɟo@C:rj`ځ:rt`Z b(Z {r2fZIZ :jځ8ȟ֪i%(Z 8ȟX ځ8ȟX Z 8ȟX Z`z`KX ځZX,qiqz`9s?-q?-=X9}?-q?@9?-q?-q?@-:::-8ha=A@%O ,qz`z`Kija=A JПX⠅O PZXth_f=m f=mvmnmmmm]vmm]X g[`m%r8vm%r8f=mX g[gWt:tzvEg[@g[@g[&mmm888vmmmmL8-.Ӡ[ M"u$;P@O) ,5$q]`!.Ԑu$ ,5$q]`HizHq]?R) D@ǁ@ǁ˖uuj vnj>5vbjvj)&vnj vNvSj v.}jZZݘ{qe;> -,+ZSS t,jهN}xlp؇N}x-/s/ ye^ `^ 8%./ġlrهw`/ġsه}x%e>@68 ,q(]a^`C@Kʆ}x%85/K d>҇8Ї~}x}xʇ^}x}xßeށeE&@^m %&@^m %&@^m [6XVTl dO&@l;ؤȷm dO&KʊMz%28ȤX ^ 168ȤwOLz|&َI/qI/qI//ۤ8Ф8Ф8Фw`فI/IٝIe^`LzNڤw`فI/IKMzʤfMzWl;og[{5tuig[/tuIg[ G vWCgl hζ@jl tζ@l tζ@ǁζl tl tl ζ+v:tWCg[`Cyζ@jl ,q(^ m%x;ݫ-ġog[{5t8Wl tζ mXP^-Aζl ,qiq-ЭK:::ݺ-q@9ɧ-q-Н|:ԲO-[ԲI-[ԲX4!ֲZK-[ԲXd!ֲZK-[<ԲZK-[ԲC-[`C1XX -[<Բ8lnO t;RaZ1X?-m؟v`؟X@O ,u+O tw@ӟ8Пv`؟i ۟ i_Fڰ}>lOװ0l t>?ah>gj(xFsѰ}?χɭ65Oڰ}3l{_ƚ[m>Oƚ[aZƚ[mknayՆayՆVO1ayՆVƚ[aZR0׼VV{Cay- ska0׼ay s˭6lsknk^na~k^w\r g0OyՆ wXvV n@Vjʭaj,[-[-)[-pi%A[-Q[-p_? dO @O d/@O d??+9z>-Mi9>-П ԧ5m}Z ֧@O ,qnj u֧ig}Z`vSKvi%M=P@O ,qnj/@SV ?n@V;P{N6jۭdӭvjlۭdӭdӭv llKWX Z Qv8h@٠[-|Cr=n@Ct:trj`ځr:trt`Z fv(Z {vrjfZIZ v:tjځr8ȭ֪j%r(Z v8ȭX ځr8ȭX Z v8ȭX Z`by`KXX ځZ,,qiqby`9s[-q[-=XҤ[-q[@9[-q[-q[@-:t:t:t-8h s45/_Fܰ45O0<r3tdƚCnKt c!75Oܰ05O0<r;0<rXt ۯ Cnk^ay9䆹{r\r s!75OOܰ}hPWaa c]7l nj05=qʰ}8lc]7lW|:ƚ'nkaa/ƚ'nkayzŲay sklk^il>4:0׼]aym skl>*0׼P؁%Z,q:v`loj;A؁%?-Z@ǁ@vRm{ th{ th{ @ǁe{ @ǁ@ǁ888vֱKXu=Z-A؁%Z>P؁%Z,q:vG ucz@[`ֱ-УKXuc88h>qtzB7]tI7]'tI7]/tߧtM tM7.ГSK ,n ,q.гHK ,q.An.An=@K ,nH7]gt:t:taiM8M8MwttttM8M8M8Mwt%r8Mt%r8My6tMX 7]t8MwtgMX 7]`tMX 7]`t%r8?`7]`Cv=n@ϳ t t;1=!.qxߤ.FZP.Z=}. eZ.wZP.FZ. eZ=n.4Z=_.q. eZ.q P~VXH^`!DU)k3l&ڇڇȮ}xl;ه.}xEv/]u/m'y϶h^ `^ N8Ev/ġ.ڇw`^/ġ,0ڇȶ}x%e>@ ,q(d^`Cg@KN}x%8FK d>҇8Їȶ}x}xʇȶ}x}x}x:MaY^ MaY^ X, d²@v, d²@v, eyY}%P, dβK~ey%8HX Y^ ;K8Hwdy,YX Y^`dy,/q,/q,/{:::Xv- dղ@ǁ@ǁ@ǁ@ǁ@6Z-;(ZX Y]6%, d@( d@v-;lJX\u[\u[\6uQ\NunSP\8P\wuuuQ\w`ٔ.q.m KʦunSP\XP6%, t)aq]8M ݦ.ġlJX\u%eS% ,q(8H\w`ٔ.ġlJX\X q) t( t׎@ǁ% t׎@ǁ@7 n+Bv@7]oWtvE7]{otvE7] tvE7]oWt@7݁c7] t@7]`CQMft=q)O\`}=q)O\`}ɉ=q͉=qYL'7yHO\G㍞@7;HO ݞ7aza{Ćퟘa_FOܰ}9lOװr~tO3χ0l퇆an؞aa:0 Qݰ=N°0)_'i\ƚ'nk^ga#ek^ayOp05/OܰI2l s0y↹暗'nk^ack^gayy55S0׼P\:A=Layux暗'/'n~pƚ'nkaa:2 c7%z↱ƚ'n~qƚ'nkaakayzׅ75/O0׼&0.]pYŰ}r7lcc7lS ۇv0.]p Ov'7l[\ڼôv5lI skjka4/ nk.a$b>ƚ aZƚ nk.ayӸayݪayƚ aZJ0׼{sayR skaڣ0׼vay-P s7l<skwjk^.ayk^kS\r way-L s`]jk.@V]pʷ @94 .o\ `܁ږ d d ߻d d{ mZ$2d(i[YvmRXQȦmYvٹ-3:Kɲ@-8h.,KKTX⠥@6r-m ,qR݁ki[`Hi[ [tBi[)9miہZ t)m dҶ@'Ҷ%m dҶ@'Ҷ@'ҶdSdSX i[`mXX iہ(i[`nwӾ[IHi[ :Ioiہ:Soi[ L(i[ [rdi[Ki[ :&iہ8HNm%(i[ ۙ8HX iہ8HX i[ 8HXU"U"8h9AҶXU"oi[@i[ [::oi[@iہoi[@i[@iہZEt(m t(m t(m;Pȁ%ZE,q*r`mPXU"U"8h93J*r`V=-AJm%ZE,q*r`VK~Z~HZE~88GȁCZEt t Hλ@ǁλ@ǁλ@O t λ@ǁλ λ9K;Pλ9K λ ,q.У\:K ,q.Aλ9.Aλ9=ʥ@9K (λ9H]Gt:t:taiy8y8ywwwwwy8y8y8yww%r8y'w%r8y;Pλ9:w%r(]_.Aλ9.Aλ9K ,q(λ) t:t:tX0wwy8}GNN>mylіh;P@h kO[ށɧ-/s^-/Н|=-/ЭKҤ-@ g]_YvD }X~eSԆ@ʲ }Sd?eaԆ@Pl d̆@m;,& }w } } } w`"/q/M2K2 }lXP6Jm;XPJm d̆[jC_ d68S$/ġlw }%eԆKʺ }%2IfCi th d̆@ǁe d̆@ǁ@ǁΩ }ßsj_ 1vc_ 1vc߁e n@c dwn@c dwn@.+~WrYn@v' ,q/ġ!X _`~%rf_`~>~%?-rf_@_@_  t t t;ȶ~~~~~l w`Ye_QlYK}lY^}ң/ң/à/-=9Z/q/q@}i3c}_@}_{8à/ġX}%eg@0 ,q(;3A}_`Cٙ/= KΌ}JXPvf ,q3c}_`Cٙ/ġX8P}}J}%|Qw`yg@w( 5@_( t@_( tW@_( tW@w(;v, tW@w( ,q(K, ϶\, ,?[ ϶\,;\, tDžBQS.=B@w\( G xPw`X2x ۇ ?Baq؞aװ1>lu G3|5l!װ=S?a{asazlSnOҰ=5O!0׼G0׼\zT۟&Ӄzk^Bad 暗a sK75/!0׼x0׼x\ R9jk^ayuzP\:> s45/!_F!ް45O!0އaj>ea{}k^+G\?huX {0<uӞ0<uX cSP7l7 cSP75OAݰ uX cSP75OAôS45ay sKP0- '\$ôF45%ay \暗n>Z0׼u0׼% =LC\0ּuz ٿ.oA݁oA] ߂@, XPXP2=@> kf.OJ dԚ@ tV d+Ԛ@;P]Z3ɗ5sY3wK&5s% ,q~W ۣ8h@i#f.qI.-$X[k܁Z txi dk&@&e d&@&@&~$$X \`Lrq$X ܁2?)]`wAX?I] {v:to݁r:to] G;v(] vrc]cO] v:tOi݁r8Aft%r(] ;v8AX ݁r8AX ] v8AX}}8h6AOX}i t dW@ǁ@ǁ@ t;P@6 t t;Ptttj6A%ڧ ,q.m~:KOX}OX}O݁ڧ ,q>mt8h@99.A%ڧ ,q>m`tzA@::=.q@9=.q.q@9::=.q.Ѓ-::.Ѓ-:K ,q@9K ,q.C`:.A@y ,q.A9K;P9K  ,q.s^:K~J @ǁ@ǁ?,M:::.q.q.q@9:::.A9=9.A9"tAX ]_$.A t8AX ݁r8AX ]`t%?AXPt.v:::, @ǁ@ t t( ԋ@) t@O( ;OA݁ 槠.C` .m~ =.}M   槠@  槠.q gF] TM]`S݊)kV_Y4/.{.h,+Vf] '3V(] `] `] VXO d djj] {oV8]T,ۨV8T{.ġlZmޛv%e5j@ެ ,q(V(]`CQ.Ajj]`vYm@{.q@{.q.qj]@] {otU6*m dƼ@6l dƼ@6l;lTژ摍y}Ҙ摍yl٘ȥ,K1/OJ.e٘XP6*m;PƼQic^`y%28ȘƜy%2(c^ s688ȘƜyyy\V1/q1/q1/q1kc^ 6:4:4:4:4OmcށeeR@w?( d3R@_( d3R@o-;,Xf xgXf xgXxnRqxxxJ8P8Px%KJ/ġ,Xx%eR@w?( ,q(K<4x%eRc)^`CY/ġ,Xwx%eRI,K<8%KK~J( t( t3R@ǁR% t3򞔽w{w`yO޻@ e޻@_f t޻@_f ݞ޻@_f ݞ޻@;xt ݞ޻@ ,q(:{,OxWl ,?^"_.F1^@.A{݁Eb{]yX/=OB_3~wV{ݗ%~t}/0c/=EIe?ys~?~g~??l{'5}8}~~?~'~?y55=u#\#\~?M>l{~k~}'ɗ~|k~}f2f2f2fߏ/so/so/}ee ea\\\?^eN_ƚ_u_ƚ_u_aۑXx^Zu_ƚ_u_ƚ_u_e^e^e _ƚ_u_ƚ_u_a\\\6{ݗ7{ݗ7{ݗ7{]j/= ߣ/cM/s/=2jM,t_{ eBe_{a۞З~^nuϗ~)m B_em;\nЗnp/cͯ/=ߣ/cͯV5Z辌5Z辌5ZXX~v>l2j2j2jm\ϗ՟/so~h\ϗ淕6~5|k~/so>_,t_{em\~15-|k~}ۖ{5|k~[2n{AVn{Aw w ٌ݅-t/-t/ dv 2 [>puɽMd. 7 i_{A$w ^^%A. ͧdwrwɽ PK٭]rnO/{Alv 8lO/Xm> KKͧ 7 c^qX]r/N{AKw- {Km1%l.txW܅Km]r/.txW܅b :K%%KK6 r఻^as]^S6] ʦ{O)U캸tW] a.tq/dd|Ž g).Ml#@eŽ^ ]ê{A6w]܅.K6] ^a]^]%.K6]܅.K6] 8ldg}Ž`æ{mKM M,q6M_a4}Mwi%ۦ 8l/㮋Uê{Aw] :.UlﺸtV]܅.}uq/8tV]܅ۦ :.Uêp4}mKM,qtq/~{mKM M,q6M_a4}AW]܅ۦ 8l/~{mMM,q6M_a4}mK6] z<>pŽ^UêpŽS.UêpŽ^qXuq/i{AaŽgW.Uwᦋ{AϮV] 8l,qtqn,qtq/X^Uwᦋ{M宺,qtq/X^aŽ`æpŽ`æ{M宺 7] 8l(wŽ`Oê{ArW] :.UX.UêpŽ^qXuq/8 7] :.UêpŽ`æ{M,qtq/X^ 7] 8l.K6]܅.{M%.M%.K6] 8l,q(paŽ`C .W] z^qXuq/8 _uq/8}Ž^qxݮ.Hluɽ'īK=X]r/{A0^3%w{A0^3%. 7 z^%nz.tV :K=X]rn.tV z^qX]rn. Yk_8n{Av ^}|waw/ȶn{Aw݅es7߽ Wv 8|wf{^a3߽`f{|l#,qwndq7߽`f{w :|ܻw/8tV :²`^]|j{Aa5߽㰚^qXw/ȮnUn~rd3p۽{ v/Ȇ.rt3p۽ ʻ \v/.{Aw?V ^]n*{Aa]^qXv/8ruo]XVv :Xv/XPVv ^ġrtc۽`CY9v/*{r^ݏUn%eh۽K.p۽`CY9v/X.,+GK.{r^qXv/f*{Aa}?ŭ_d׽$ݰZu/+꯻^W_ Bg׽o諿}C_u/{AW 8l,q(>_%E^ye׽` Wv ^|`_a{A0V݅EƲ^7W>r BϠa{/n>-k3lۇGA$ ۟}l4l԰0=Oahaw}ۓ4lc_7525G0׼暗n$?H{u\ s_75/ݰ15/0׼ut)jk^gayXV:3n~Ny)Xgt θa3:ㆱևV:a1}?[ƚ3ar s75/g0׼q3nk^θay9ㆹ~OwK۰1)a\d>% ab j{f>̰65O۰}2l c0m c65O0)i dSӒC|RΦ%m'%mJ%m'%m'%mJ|R|RX I[`$m[X Iہ? [`!HX~һn-@[ {ֻ([?w dcz@=w;Pz@v w z@ǁz@ȭw tw dzw ,q-CKw;Pz@vw ,q-Azw ,q-Az@6ϭw ,q-A%ڵLast>i?CG?$ ۟X s9l s95i0=4%?Iay&0׼dt\ sKF7l| sKF75/ݰ02چa=P6چ1>چNY0(kچaYv?{(k{dms;= gemmk^ayچ%k{dm\ sK65/Y_Fyڰ9?lU Ϛa{'a ۧTzV:a{#~އOma{/>A@ERdȮEf EfJd֟Ef Ef EfJdRdX Y`Df%mY`DfB-@ ,?>)ˏO Tr`Z?M@ 719F-П&TmZ V(Z }V:T:TmZ@Z ;V(Z`hlYX ځRhZ`h%R(Z`h%RnZ`KX=X=8HvKX=@m ds@W__ڿ] @W_ڿK%hE@@vm #_X8hE-=cڿԊZ`V3+A+j_%ZQ ,qЊZ`VKd 8 __ ___88"__8u_X W`_X W`_v ,q+AO8u_%8X ׁ8X W矴(W`_X i @ǁ@ǁ?,Mڿڿڿ+q+q+q@ٿڿڿڿ+Aٿ=+Aٿ=@ٿKd hٿ+УyڿKd ,q@ٿKd ,q+AnW`C끴zLW@W@ׁe@ǁ@ϖi th th *T(5XLKֶ-*eש;&{R- 0eg,%`4AB_21 'LZ``4XB߹2 vPiW Lse,Y*`4X90 jiJ` =* jis`,;4X9ry)űp^cZY ,/8vpy=űp򆪋cZYABnű.wpq򲪋c!Z] 98r.c!O\;X98űpc!Z] 9,o8vpyűp*c!Z] 9,ﳺ8űpRc!Z] 9,o8vPűpvc2e+.T y؇,c!Z] =8űs`q,X;X98űs`q,?hUtҭ˫nd,* =B^JUzl[eV-kB^JUzl[eVY9U_;PnVY3F.@UUѭ"* }VYxBU![e!* }VY3FBρs`,*;VY9UzlVE0BρЇ*l* }VYay̭Ї*l* }VYay̭Ї*l* }VYay̭j* 9Uvpy̭p"[e2E0Bρgl}|%A%a,WB%as:BCg,WB%a,[eU:[eU:[eUvp魸U.sX+n[e# (G.OQ,\Yȟ|9G9pYrd2G9f,O#;tL# 9?1G62{b/1G6-{kg}Q>2ޟc̱~g콩g{Tvs{N?&?ӧ؟{/i9g91?x{g㘟y}2?M>☟yޟ$c1?ʑ}٘y+G6g^9ǘy+G63gʑsdc1s`l#{1s`l̑sdcwsdc?̎,;0Grdc?ǎ;0G60Grdc~#3٘y>Slϼrdc~#3t06>L鰱06>{W4Ecqp|}U4)LfOcc%P|}&cLOCcqy>c|3tyއc|3t31g1>Lg:3y1g^][c~L\}1?z3l1g^Z׋cK1?zbϼac1?zbϼac׻c~{3W1g^oU;c| ;|鰐Oa yHtX鰐p:^Bu.G쮃<|q,䑣`!B;uz"٣`!O] y:ARuJEAT<s,\W* yDtXȃ9B鰐sNԏC鰐'tN'a yLtX}2zLT:,> 9(.sP:,\tXȓjeJo{XȳPwﭺbV]p{XUWCvB^+_5슅&B+_5슅XAԅN]A;u2S.s;u2uꝺpީ =@֥oY zA+[^ y6RA_5,ue+\¥p^ 9p^ 9ƂY =BXz,uT+K]RW9uPs`+X }RW9򅥮s`J]/_X 9.sPJ]2e*u&ZApJ]?.sPJ]2e*uT:RWApJ]"Y:RWAw,uTw,uK]RW&K]RW9uPs`+X =z,uK]RAe*uT }RWApJ]oY:RWA7,uT:RWpe*uT:RWApJ]2e˻.uף]з,uK]R=iBρз,uK]RWAeB0N7xT+3^o }AeB3ށWxT+3^8 }ʌW90zxw`2^W7MfBρxw`+ =f*zx"r:X\d:,aO\ ytX诘Lo%  Lp:, a2'.L>d:, =Bρ鰃JatX诘L\Ls:, }tXay3'.L7Ӝ }tXay3'.L7Ӝ }tXay3'.L7Ӝ;tXay3pa7Ӝ 9,o9.sXLs:, }t؇LJtX$LJt}NtX$LJtX+B%a:,7tBCg:,7tBCg:Fq:,\Q 9,*. A `  }*!`  OBXK9_(bl}+8{c#cl}!8{cϾ].3}?{cO-{Sc7{_Ќ4{}Ϻ?_^n<`c~!73O1?|{|O1? ?I$c~L1? Wlϼ`c1? W3ƺgg3ƺyXOß)5O05ϝcc])53cc]cc])5g^1?uW3ź+5g^ogk_{}Xf_{Ϥ?c n}={|W%gocAZf} =>1ƺgc zugkugk}އ3Xyƺ3^3 %1?u}7 .c~Øy}37뽇5˘y0g^ ˘y0g^U1?u^s31>u| yXW'߱ud8+䱼Y%pV~g ټY%pVshB;uPᬐsgYpV#iBρᬐGtgT8+\pVs:e gT8+aY2e gT8+\pVAᬐggFYAoFAQ.se2Q.sP8CBe*ZB9:EzLQ/ST!O:~B)ǏNQ/ST yU2EuP/OA/>d^ 9腟NQAB;E.s ?2.s ?2B3E!STUxBρ)JQ>g* =*EzLQST =B_g0EzLQT*uST2eJQT*\UA) 9(E.sP*|eJQT*\UA)pRT 9(E.sP*ST 9(Ev)pRT2Ev)s`* d* =*EzLQSTABρ)s`*:UA)pRTᘢ 9(E.sP*4ST 9(EV)pRT }+UA)pRT 9(E.sP*\UayK)pST2E)s`*:U90E)s`* #wW }ȈUFB߯0bX_a*;#Vo,:UwFBX0bX`*\栈U#ZFBρs`*#V =FBX0bzXT*#VU90buPs`* 9mmskk{ޞ*y>g+\/梁l%prK^S] -ol%t+俇\ y6Wȫ"./r.yK^U 9B%.y\\u+\ 񣹼VB.sXbuK^2UVB.sXgu+ٸK^2VB.sXluJ^2Ve*yW\] 9l%Y =Bz,yT+ٸK^W9upy%s`+ٸK^W9z + y\gBE;(ڙG|qaCBǻą%,yK^W9uP%s`+X %ˋ}.yK^O\X 9,/ą%pbK^O\X 9,/ą%pbK^?~4\:Weˋ}.yT+\氼WA%ˋ}.y\ 9,/z,y>dCB`+B`HB g+B`+K^uK8K^K8K^up.sX:'.yԉK^U A*y Bb%Ч,y>U%Ч,y'K^!1%+W/,y%{cɏO%{c﵏?1c7{썽?1cM?>'o}3na>,{Ic1>,yׇ3nϼ>mi>̫5${W3̫5g^%1?*y?>̫5g1}ak_X{~1}akuj]cwk]c?\l9j]c?WX95j]c~U3Zטyպ>Skϼj]c|Y{}wW#czk}R{_Oއ^c#>30>{W!c߱5>{߂O}އc|YL/$g=kgT=+!Y!׳BY %'U.YoCe*qKV!i]:̇py 'A }n`˓V< j`˓V< j`}V' bd+|>䉷XV}co7Bρ kn`XVȃe7Bρ mn`T+\V67ej`T+䑛X25ej`T+\VA 'n`.VAb.A.sлX2.sPC6Be+Z6Bzl`/X!:WeB n`/X y|VuPʄLAʄԫ2LAʄd_,bOy}w>a_,'bX9/zT_, =/vpyi}s`_,)b2IB/.sX^t_,)b2IB/.sX^t_,)b2IB/.sX^t_b2Ie\^t_,\氼4Xayi}s`_,'b.W }W;._ }W }X+XX\,4c2XT_,\o/>2`_,\&؇싅>2`_, C.!BQd_W싍o"޳{zc싍/!ދ{z6{?~϶Gm6{oj>ޟic hcϳq6gK{g_lϼ>cc~!6g^acO3dA2g^}3ؘy̫/63w5Lscߩ?{!3ƸM{Yod3{1odkd36{nϵ^c|{_7>{B?cg} :{ |g>cб:`}9>s}0>{|=ǞE1>llgck<1>llgck}~?g1>llgc3ϼ~?g^?3gϼ~?g^?L3_oc~1?jlOc~{yϼ~?g^?Oc~s1>nl| yV'ߍjl\69>Ge~ yWAѯp_2Ee~}W90z~_W |90z~e+X =ֹBuuPus`+]\:AչBuzs\U =ֹBρup\2չBu.sP\:X 9m?9m >eh,\氼DX]e˛>eh,\氼NXayءpRCc:X984ásph~Cc!X984vPsph, yecBww yXayء˫ yXay١w yXay١  9(4.sXJvh,\XȻ>dh, yX904vPwCcABρs`h,;X904z  ke!:( yDYȻ'Bu8QÉNp,( yDYȻ'Bu8QÉs`,\DYAJ( 9(Q ʼnJ( yDYAJBq,\DYA( =&Bρ7NeDY90Qvpo68QFщs`,( =&BρNBDYR'.o9Q|ǥNe,q)e!o( }>DهL=p,eK( =&Bρs`eDY90Q 1eJ( =&B1Q.sX^+u,e2R'B1Q.sX^+u,e2R'B1Q.sX^+u,e2R'*Q.sX^+u,\DR'ekNJ( =&B2Q!e2Q e0Q e_:Q;e0Q eO( }_ߙ( _ߙ( KʼnpRwq,\^ Rth,eH }X/ Cc!;X{Bc8_{}RCh^寽fk_{ݸq k_{}>^w-ԯ>Md5˯k_{}*?~׽kO_3د̿?~^&cد~Ik_3Ccg5?د~k|5Dh^sc_whد8د^ןDhlן~Ǽ_65nwh^5nq_{Z:8׹ɯN޿בL~udk_{4}S_{^G:H᯽ !kS_{^G:1ׁgq_3կ;W\#p5Wc__{^Ŀ!pk_{lx_3\\̿Sk~߁_{̿Nk~߁_{xvS_3!+puOWyvE> \+puu-rWHF^Z$.˷QydTE~W2"ϝ?+{_Ŀ9t:j;]G.Nt!v ߿ m"E?`h;E~T"Pvw2w2w2w2w"Tvv m3Fi M}fBE2.z(4]Ph.z(4]Ph+.z(4] M=w/%Ph]h]h ߅ޅޅeBeB2w)|..sx..sx..sxw2w2wPh ߅ޅC2wiCBEhts@BS.4]Ph9ts@)|.z(4]Ph9 M9 M9 M}BeBeBE_ M9 M}BeBS.4]; M9 M9 Mtqûtqûtqûtq M9,/ϩ4BE(ts@BSg`GBE(ts@BE_tswy,Xm%A/ v[.mCG)|.mCGoIv mCGoIvKCw2w2w颏v9ts@oIv ,s@颏v9ts@oIv9ts@)|.zh;]vvv[..sxwoIvvv mmm%A)\!U颏vayTm>F2mR..sX^(U2R.mj;]v[.rj;j;]v9N9.rj;]䱰NN9.Dmmv[..sX޹U)\޺U2[.Dmov[..sX^U"oIvayWmvayWmmrvv[v9t$j;]v XXmN=.DmN=.zh;EmN9N|..sk_-~ Ħ.F=tŦ.FQl"mkŦ.FQl*\CWlE^(6cSOĦ..sxǦ..sxǦwl2wl2wl"t ߱ޱQl2wl*|Ǧ.NGޱޱAĦ.zM]+NŦ.zM]9 6. Ŧ.SbS=Ħ.zM]ZT":T(Q]JTJT}EDu_Q].C%>Bj%POP9Dus@E%]9Dus@ꢿD.ϪDus@OPay}V%>B2Y.4 %gU,..sX^UOPay}V%>B2Yw2Y..sxY..sX^U2Y.z(Q].JT=6a5E Bꢿau_а ,_аS4.kV5 >$CꢿauѧhX]V}*E pauqQ2|7.7jX]yV!VhX]yV=$4.7jXKC Ȇϟذ{_19ذ{_w9Cj}3gLPci}33}4?޷7c9?ޟJCi׽4D3φ՘y}?hϼ>ޟ&a5g^ 'dϼVa5g^ 1?jXWjcT ;UcϲQ6S51vL1K?c? qT}Nrc?ʍqTqT}NՏ5>{r>ӯco}=>L?{mO}cc؍{O'|cx3}~]>8;Nc砟~T>8}_Ygcw;Nc[Îlu}:>YL?"{q{̫4옟yt|ϼ:Nc~׏'c|q ԻO;Nw)|_*|ȦR?۠J!@T M n*SJˊH! \F:_X\F KH!1a)WB?&,#T)|B<@p)vB~qw)\2RAepH2B~w)\2AB~w)\2RAe*#TF 9e*#TF y2RAepH2e*#}2R9z,#߲H2R-k9z,#d( FQ9otpy}s`( FQ9o}s`( G/B? }AŏB_0~"T(EG? }AŏB_3~ T(\QAgƌGQ+ƏYQ3cƏBρs`(MƏBρs`GQ90~.sP(\Q+ƏeT( G2ŏeT(\QAW(T(\Q+Əe됎Ba(\氼HǏeˋBa? 9ǏB^8~r\ޑt(? 9Ǐ*~rG!ό?:Q98~spG!P? 9,/:~tpyUpG!P? 9,:~ pުG!P? 9,/:~tPpG2ŏe[? yчG!P? =ƏGQ90~ s`(? =Ə*~zG2ŏ?~4? 9(~.sP690H.Tp)=K!\ y%uB#9 .Grp)=K!\ yR'0.sPp)\Ae .\ yAe .Grp)\AB#9.sPp)\҇ .KRkUBρs`p)\:)\ yR90z .K5DBޯ8.>2`p)䁲K!W\:R2K!\ }RB1z .KR90tPs`p)\ }d}^Bρ'h .y\ }Ray'h .y\ }Ray'h .y\ }Ray .y\ 9(tpyp>K2}^Bρ'h .?^TSBb)(Bb3mB'0 SM@1x_j }TSB'0zL5\N5>SM2%FkL!S֘e+1}S֘Bo5?Zup)H֘~k콿F?k=ޟCc[g{?ޟ@c?g3{i3L;cy֘gy3g^6c~Y3L4c~Uc{?H̫jLc~Uc3Әy՘?4,LeckLcci'TcXc{4g1,7:30>{GO#gscAgz`}1>{ʍϨއc3ci} 7>{ǍOއqcﳸc1}a3|_;1}ѯE r}>9V~=> X+{L%ǒWcwkEc#1?yՊ?o3zWh;1?y{ϼkE!z׊B~w(ZAՊB~wCFsBќ9&9!9B~s4'0 gLGs* C~s4'+GsB~r4'\hNAќp92EsB~r4'\hAEsB~r4'\hNAќ 9(ќ yhNAќp92Ese|hN90z#9hN=b90z#9hAEsB8z\2vB]9ʹ턼rn'ݕs;3sPwW턼rn'ݕs; ywNȻ+vB]9C 9(sPpr;2vB]9sPpr;!ﮜ 9(sPwW 9(!s;N90*׹s`n' =v.U vB^:z턞s;N90spyѹw:>cn'!s;) yN;v*w:>cnCvB8Ԏs`n' =vBρ턞s;Nc v.;z>cn'\氼CNS;ve;>cn'\氼CNS;ve;>cn'\氼CN~4w 9,;.sX!vn'\bve;w =vB1zTn'!?90$_ Ջ_9ܡ' PO^ >c'W/zBD0$' C=nC=2%POa'8sP?u'1C=z>d'1C=POȟ:sp8'C=?bg}C7{~ccg}97{ycϡ^3}?AcO{Sc3{_ 4{|ϝ?_^CgLo&)އvc3 b}Bc񌽏އcúY3pG%cߢ߂m[ާc3ñpc3|}Z813>*{ ?oǸ?pއc31{/+3}gy{ϼ~=gO8!7;w82cO\؄A' r~ۏ36!Mo@T&|M؄MAp262ele؄MA؄2MAp26 9(c.sP&wqgl*c.sP&rgle؄ 9(c.sPCflBρs`&|/ =flBρs`&|/ =fl*cls`&36 9(cp26 y:MAp26 9(c.sP&A362 <\_+ٛ 7bo&7~f귢!fB~wo&3L/̈́v'X =qBρus`^8!\ }R:NȃsqBuHT'98!\ }R:·ㄼGr'I!8:N9zㄞ8U =qBρuG#\xv'X }R:Nayu'7] }R:Nayu'7] }R:Nayu'7]9:Nayup87] 9,o<.sXxv'X }R:N9sPu'ㄞ?//.?w' [>("[>(sp[>M E-_ }ȖO/l>7a' [>O9spI+.sX n[> }n\OA3v' s=\O;spi8 1s=?S){/lyco({kQ3{_N~ϙSf!3{oj >`ޟ/c[.cϖ2gK{ggϼ>S)c~2g^'cOa2g^'dϼr=)3g^1?Wǘ=&yޟWjcgUL{ꍽGc3Ə13>{n/ާ7cAo}7>{}߂3Y3>{ߍO>ӏ3Y3>{܍g3%kއvc31?y3{ϼ~=g^?3dMȧɚ cBEL?"8DŽc1w>& 'rc*OvBNv>&䟔 9(.sP>&\|LAfv>&\|AcBp12c*.sP>&\|LT>&\|L/DŽ 9(.sP>&\|̇DŽ1|L!\90zDŽ…1|L!\90zT>&ـ1|Lc*.sP>&!12c*p12c*.sP>&\|L!AL:\栟I3s`!&cBLDŽ_]9K~LX -ۅЏ 1g|ɅpyL{ɃdC&bB[M.%ed&beAbB9.s%edA 9ՅY =bBz,T!&ѫ 1BL9sPs`!&X } BL9s`! 1!BLAp 1U 9.sP!&||8\BLA9sPp 12be*T!&\BLAls! 12bBͅp 1>.Ą 1BL& 1BL9sPs`!&X =bz,Ą 1BAbe*ĄT }BLAp 1/Y9BLA,ĄT9BLgbe*ĄT9BLAp 12beˏ] 9,w!CbBߴz,Ą 1\ =bBߴz,Ą 1/Y =bBߴz߻V+ٖ }NͶL eB-:ml˄f[2oؖ }ͶL=e-m6l˄c[22eej˄>f[&ؖ =eB_۰-sPms`[& 2AeB_۰-zl˄2Ֆ =eBρmp22eB_۰-.sP[2mؖ 9-.sP[22eej˄a[22eB_۰-.sXޒr[& 22U)e./K-.sXޗr[& 2?L9-msp[2!mܖ 9eBmsp[&ܖ yNAeBmn˄2Ֆ yNLAmp27 ݖ 9,/-mpF2!mܖ 9,-mpn2Ֆ 9,-.sP[&\氼dLAm6n|ȶL9-ms`[2!mܖ =eBρmj˄2L9-sPms`[&ؖ 9-񣩶LAmp2Ֆ 9-.sP[&䵍2Ֆ 9-mp?͇q&E{6!ݳ9M{AlB gcz6!ݳ g^=لM{AlB g߷لԳ 9gsP=pz62lBѹgsP=pz6!ܳ 9gsP=wtلԳ 9g!{6M9g=s`&س =l.lB^Ygzل{6M9|/c78 yMKnB;v'݄rb7O yMKn>d&Ŗc7. =nBρs`&9M90vz݄>ac7Mmn./o;v蒱pc7. 9,/o;v蒱pc7. 9,/o;v蒱pc7 9,/o;v.sPc72mneێ݄c7. =n*v蒱s`&%c7?Y^swySCe'w^yB1;/<2;/<7% }4OL4OQyBe'< =yBρiKFipRRp'\ 9(ip<2is`'ϴ9 i?G23{Ol콰͏13{k=Gm}>g޻{ʌ?d޷ncM?a>e}6e>Z,{Icϕ1>Lg#eϼ>Pdi>L+3${W3y+3g^i1?JSgcgnlc<)3~nǵcFg}L2>4{O?ӛ!cyp}x>>{3:c±g1b}D1>{׍>އuc|{ԍtc|{э 2k6c㹱pn}67>{}_c|杵 ;k{6!?Y Wgm1!?^ XgmB~:ksP? 'YgmBYYu栲6T$h gLgmB)Yp62emeڄ MgmeT&: 9(k.sP栲62emeڄAemeڄMAYp62eme|ȬM90kzڄ6M[90kzڄ6AemOBρYs`&vgmeڄMAYڄMAYp6 9(k.sP&|~.seAt90k1a&c¬M3ksP 0k[6fmBf&6藑ȐLڄ&6!9_FȐMڄȃڄdJ' =tB^v;zL鄞S:NntBρ)s`J' =tBz2J)ЧLm_ %m_9.O3)}B g'p}BѲK8>Ϥ }&ŶO9zl\: n>2OAmgRlmgRl>!q%O}~ʶz=ޟ7?ƶz콮5c5g{j)3{_75L0cϗ {Ncgghu%?W̳3g^)c1?@3g03dA2g^m3Ϙk{~3}qs>Sg{`gJWc;<3~{?#c\p}J>>{9cA`3r|},0>${'dc1>l،gcc|&ذL Xl}*6>{>ӏbc~հ{w&SMhBi%]IhB%2.JB%r&䟽\9M_.фSK4!MA%pJ42he*фK42hD].фT 9DsP%pJ42hB~pJ42hB~p&\MA%pJ42h>d&X =h/Bρ%s`&|b,X =h/Bρ%s`J4cM9DkK42hB~v&\AhOd/e*фT9MA%pJ4c{p~OzɄ͞LɄA<0?ldB~uO&?ldBeO&Y{2ǞA<0䁏{2ǞLCd2<0ɏ{22fO&ؓ }_ƞL9'z.b3%Aߒ0SsPԄ%a&!35oI }KLM;tfj*SҐwԄ4d25Й }LMAԄ 9(Ss`& }LAejBρHԄ35 }LM90SzT& =fjeԄ }LMAԄGb&\LMAԄ 9(SԄ }LMAԄ׌ 9,o9S.sX^6r&=35?LM98Səsp25! 9gjBsp& ypLAejBԄ35 ypLMAp25 9,/:S.sX5t&\氼nuIr$ٵm(T%r#cvmu[_/G05LMs$gjeg yLMa35 9,>t&\LMa352ejB#9SAfjBρHԄ35 yLM90SzT& =fjBρԄ35LMAǗ252ejeT&\LMAHT&\LMs$gjeT&9352ذ yM7la n؄nٰ9MO7lB_l؄M90pq)7O;pz,`壏ݿG?}W[=Z=6[=6a[=6sp([=a[=6Q4[=6[=zl[=[=2%VOa+.sP'\xxBYOk fs<fs<~ s~{@k=kӊ6k-r<_6 k~6M6k~m:6J6MkӍk}kӉצM}m\!_n!_36kk|MKtyxk~m|mk||qm_|t't#9sm|y$w6@6?ͷGrkw wksrkįM&}<;1dn6=tkӳMC!;76ckӿտ6=OMϭ6=65!;צ'V_9;iצU_U}mzTצU旾6=kCcJZQ!?y  :/|ʢK8%pC>jQ2-p<u^.sߒ{a~K2-C>mT":/=t^:/=t^¹yy9s@%<C|CGB䡯$NJ@$\^U䡯$N 8yH<*'>GBCpyAV>Z@$'@>Z@O?yOh>xF$'}҆C<'m蟄s?yO.s'?ya<\0O2'=Oz< '?y9+<? Cs@I8Oz<?ya<\0O|I8O|es$'9CCp?y<\0O.s'?ya'9,CQ'9O|E'?yOr<?y9sP!('@Y'?y<\0O.s'?ya<\|Mpԛ'9,|S2o<\0O.sX>es2Sp<\0O.sX> es!W?C<_QI8O|E'=O¹s@Cp<?y9p?yKs<\0O.s'?ya<\0OIp<\0OII8OICq<*R!G 8C*'rC?hE923*rC?hE923*rJ8S.sX>res9%\>regUNyaL)=SA+)=S¹ZQNy9ZQN 唇)pL)=Sz, 23|h|Є3y؈<"0d1G@H<\ldC?Bg|Jɚ~dC"YЏ V)YODyG@H<# $kɚ~dCɚ~"dCɚ5*%k.sXJ<\yaN<\9YPz"YW<5ɚpi(YЏ͌ɚkڼkͷ_c|6ڼk||m~c\sm\#sk7rm\s6?ͷqg%]ox3Ys׼n5_g\5;Ƶ1/W|'6HWcJ\5d5_g|s6[>[59k0gsm=c\6+59_ckSkCkkkkkӲkò)rm~Tvm~Rvm~Pvm~Nvm~j|m~J1~m~FwWCs~%cs~^yq~% W* yr~%WBq_ 9(.sP~%\J;+2W*s.sP~%\J8+2Weʯʯ_ ++2Weʯ_ 9(AWBρs`~%_ =WBρp~*_ =Ws`~%_9J8`z̯+AWeʯ%UAʯ%UAp+_ 9(.sP~%_:oez,tYX JrPoӅfa%ߗׇBeK| J8Mg +6]ȇ!.J".t2M +2MwP?46]AoӅ6]AoӅTX `Ѕz,|:J9rP]X =VBρz.XX =V]z.XX yJ9rPoӅ<~va%\JA*TX 9/TX 9oGTX9JAp +2V.sPa%\J72\X9JAe.sPa,k'X; $[>ej'X; }I-Nvpj'2Nv.sP$\I臻k'INvz>`$X;9INBρs`j'I9v.sP$\INeT$Yk'2NeT$\IAgT$\INe~IApj'U; 9v.sP$Yk'I9võspj'tivrk'!I9vk'U; 9NB>u$\;9IȇT; 9vrPpj'2NBuv.sX>.IȳNeg\; yIak'U; 9,s$\Iak'2NBuvANBρgk'U; yI9vzT$X; =NBρk'IAǗj'2NeT$\IA[T$\Iȃ-NeT$k'2Ne`)gWNJ90-#*};z#*2cq?Ǿ??cd% -n '-Vn Pᖃ-n [*rPpU%+[v}[B?b%--X1rpd-2-X18n =[B?b%n =[.Ls%\| -2{n 9oy~˪KAUp%[V]BρUoYu9|. .yb]7ƪ˵N~m^׵y\[͵t6|6dGM]0 |6&{mӵrm|Lk?7/|_kUkuO6R 嚯yNwfr׼.;ɵFr׼.S嚯yU]g|6?>5^,\㽃k/RSy[͟6u<+$MS\uz'!_!/uz'!!=B$r#"=οӆyGȟH{GȟJ{T9.sP#O'=e{T#?.sP#\栺G82=e{CqPup!oӮ{T9.sP#\栺Y==Bρup~'X==͟s`#X7Bρus`O9z{?U9ί:?2=ί:?2=e{T#\栺GAup~ 9qPyf#_6^K8rPyf#٬{A~8{-W}{k^^K8g!4qPyW8.sk^2=p^ 9qPyWAup~%X==5s`#Xus`#X8׼Bρus`#_(:׼Bρu'{WcH=e{T8GAupcAup~]Au{T9.sP2=e{`/#5{'^ 22BO=6B?hei#Su6A+KOY}F3W6g,m>dbiJ\Y9.sPiJ26e*m~F9z,m>w`iJFs6Bρ*m>w`i#X=6z,mK26e*m>w`i#\A6B;.sPi#\A6e*mT}A6e*m>w`i#\F,mT9qPpJ26B;.mK!\96spi#\96BZ]8F9AK!A6B>hui#\FA*mT9pJ26e*mTi#\FAp(6e\9.sPi#\Fs6Bρs`i#乃KA6B;z,mKU=6Bρs`iJF9.sPi#FApJU9.sPi#䑘KU9H̥pJUy$FApJ G#1g8B9qPGbp<s#8 Gbg8Bρ'pT#yB Gbg8ep9(qPp22e8B(:qPp2!O9(qP'p9(Af8Bρs`#3 G90zp\p3 G90zpG90z xOv;z[I>XG2=B?aqK=?˳. ~я 9.H  |O o]O9|] ]O C]s`$ ]s`$9|]p >ǵ忛tmu\55_m\|q׼tk^kFr׼S㚯y?g|6?>5^L\㽃kJSy?6kfՌwrfܯ A rĮfU_CNՌcv5#]9hW3B\9.sP5#]9qPՌp~AՌpfT9jAU3efù.sP5#\jFAՌpd5#X=V3us`5#XmBρՌs`5#_ =V3BρՌf6jF9ίT5#\jF8_ίۄT8jF8_ίۄT9qPՌp2U3uz3B_ LjԋW&5B_ Lj&AxJ`R#VU+Ip>Zp~$_ Iz*4 B`dR#4 9ūp^ 7Mez*\栤F8i.sЋW2%5ūp^ 9(oF8xzLjU90zLjWU90zLj[<U90dIs`R^ yFAIp9(.sPR#C 9(.sPR#_j 9(qPIp2%5eJjTR#\栤FAI'NjTR#\栤Fco'5eJj|INjFKIs`R#8w[fR㠒2%5B-3z[\ވr"}zEe-BρyЇi[O}̼E90oqpyKys`".J}s!:U,ՌOY}jFf>db5#ejAU3B2̕ՌG fT5#\jFAՌpU9.sP5#TV3BρՌs`5#U=V3B-zfT5#jF9qPՌs`5#X9.sP5#2U3hՌp2U3.sP5#\jFV3.sP5#/MU3ef~jFAՌpU9.sP5#jF9hՌsp5㠪!\9W3BՌsp5#\,Ռf!rfT5#TW3efT8jFAՌp!\9.sP5#\jAU3efT9qPՌp2U3B-qPՌp!\=V3BρՌG fUyjF9zfT5#X=V3BρՌfjFAՌǗ2U3efT5#\jFAՌ'mfT5#\jFȓ6W3efT5#I2U3ef]Fc8'5B9qPIpNjȤF90zLj<}wR#=&5BρIa8Is`R#=&5BρIǍNj\Ip}Ap}x~N͌GSLgMGa4j|?b#'qp>,ߧ ~rPupT$As*wAB? d$' rp a  :H9a :H9rp 20.sP$qCeJ|ƙ =CB?g:N~jt?Ș6 qmصyaͯ1rmڼkͷkK w>̵yW̵&sm~ڼkc\/߶s6\rg%]+x3r׼)[5_\5ɵn1LW:|'6HW:cJ\5tȯ1rmI\-kA<ȵ)r5 AK!G9qPo.\K!7GUqP%p,\;A]ne#[1BO]F UOYU5YU>j~ͪFV5B_`jTU#?YUjT9.sPU㠪2U5ej~ɪF9zj~ϪAU5BρUЏY=V51?F9qPUs`U#X9.sPU#c~V5ejTU#c~V5ejT8FAUpqPUp.sPU#sMV5ejT8FAUpj!rjTU#c~W5BUspU#\9W5B>tU㠪!F皮jU\Up2U5.sPU#\株FjT9.sPU㠪2U5ejT8FAUp!qPUp!zj!zjTU#c~W5BρUs`U㠪F9zjTU#X=V5ej?4U9.sPU㠪2U5ej<vU㠪2U5Bѹ.sPU㠪!\9.sPU#\株F90tɍ{NnTr#鞓!O=$&7*tɍCbr#a=$&7Bv;ɍCbr#\FAɍJn9(,ɍJnyFAɍJn0XG90zu;.sX>0Aρ%ЏhY͒A</%ϤX8|%,y~D+A Џc t p 8 pdw$6@B_ l~: O' t s`$ =6@B?d$ =6@.t$\|P pAM7@ej~XϒYȝ%5KqpvS90zLLJ90rP)p~)B =P}JB 8}pR(B 8}pR(2P*.sP %\J8tPo~٬kY; I8okY; m٬ zB8/AokvoZA/:2.sP$ 9pj'ZA/T; uBρs`$ =NBρX; =NBρzy-X; =Nyivzey$>GB<]˻Mizy 1g %nPB?f %COPB?f %S(|B }J+)J>a %J3P*.sP %\JA)JB 9('S(J90 )JS(B =P* )s` %B9J90zLB 9( )pR(B }JA)pR(B 9(.sP %S(B 9( )pR(z2.sP %\APeJB }/:rN<p %B9J3PB)sp %B 9PB>t R(!JȇNS(B )pR(2P*.sP %\J/MPeJB 9(rP)pR(2PeJT %\JA)gNT %\J3PBρ)s` %S(APB9zLS(B =PBρ)s` R(J90.sP %JA)pR(B 9(.sP % S(B 9()pR(B yhJA)pR(2PBρ)ǍNJ9z,.sX>J92a%3cYB_&lT%<,/YB?$ciUYB_&l~f6KȿN]2./;1rPɗp| ؉ɗp/w%c'&_B_`L~ | }(K觰L/K90zL~ K90zL\>!Ka/2N| }( Y`>Ls`B_>w` p&H`ί|mZצ|mmCko%_Fi#__~# ɵvi_n__^tt6'6&6i2__n;~nZצxͣ5_kӝk|wM7k=kLtCt?sm|V5^h|m\o_uVx@+kk?xmC_h<]mѫCU!mmӯ_6}imί=imm.s_zan<^za~E2mpes/pn<\0m.s6 6ܶyanޓz86pŐ.s_|8'pesek9mp~m2=6=mNI=жy9mO'Ѷy9ms@&_|9ms@>Xg6=mN=ж a=69̯ >\0m¹mpܶyanB&69m>B2msm>B瀶Cm?4Ѷy9ms@&~9ms@瀶Mg6}Cmes2meh<\0m.s6Qy(<' 5WBCPe(<nj|JpyJ>DPg:(@瀶M8m m6=m¹ms@瀶es2mgh<\0m¹mgh<\0m.s6ܶyan<\0m mpn<\0m mC?D2mpܶ es2mghT!砶CmsP&6yCAm69mrjT2mpܶ es2mgj<\0m.s69mpn<\0m.s69mpn<\0m.s6yM8m.s6yCm6yCmpn<䑘6=mzhۄs瀶Cm6ܶy9ms@2mǗܶyan<\0m¹mpܶyan< 6ܶyanB*CU8\y9s@@Y*CU~*eUy<\|]U~*eUy<\|]U~*eUy<\|]Up<\|]U*N|]UpvUqzޮ*eUy9ɜ>A2硯!$soɜykHˇ`ykɜ~ڌdHwyԅN8v.yG]<_v.yG]<\.ͥC?Bl/MvQb;ROPlBy'<y9 s@lC?ElCpb;9,Ul2þ<\0v 忛rm\5Ϟ5_m׼~_5ӏk |>/wk=kӹ6>5^\==k5zW1Z>ȹ|7G 9wzB( ;=0p yp'T'| <'mÝ7ΏyBA;>96O8 z*~B\9O8!O苚z*mÅ5 ?o,T wez*\O8 .sP'\O8A~e*T + ?U 9(~e*T 9.sP, ?AvB_}턾X B_}턾X9/Kh>vB_}KhU Op~&_B w2C0fm'ߺ eT9NApj;AAvB_ 'Wk;vp~^G }%/Wv`m'߿ C`dm^G up^G Nez1\p^G 9_:bAz1\eWN9_k;Ng턞k;Az턞k;*Az~N9sP#~NA#T9NApj;sPpj;.sPm'\NApj;U 9.sPm' k;U 9pj;dm' k;N9dm'X =vEs`m'X =vEs`m'X =v.sPm'\N+veT 9 fNS6sB959 }fE*7sB9OI }fNS6se[Un>4elsg?YS6.srP?[&B?er(C ɡgL>cr(CĘ ɡJ>cr(&B09tPɡpC2%eJTr(\PAɡO~ =&Bρɡ!LTr( }P909tPɡ!LC? =&B09tPɡpCB 9(9tPɡ!L 9(9tPɡpC2%B09tPɡpCB 9(9Sr(\PAɡJ 9(9 ɡ_tr( yP989tPɡ!NC!P989rN|?C!Pȃ'eJ|PAɡpC 9(9.sPr(AC2%eJ:PAɡpC2%*9.sPr(\Pȃ'*9.sPr(ACP909 ɡs`r蠒C! =&BρɡJCP909t2&B :9zLTr(PAɡpC 9(9.sPr(1C 9(9ɡpC yZPAɡpC2%e] yP!*U缮 BU yP!*U B!y] yP!*.sPU(\株AUe T yAUe ȒQC`B?g(KFQ9dz,T(X2 =B?d.KFd.sX:%ЏY2 9,_QKse*~ϒQaB?g(\|uKFd.sX:%*p(\栒\2 9,_QaBρ%ЏY2 =d1?KFQ,\:%s`(c~._Q9dz,p(\|uKF2._Aρ @ }R諏 l @ 0 G@ }8H>6]tH ԎpnH ԎH VOH v`8I[B?c p )I1z $I@R90!6I@R90tpdI2$Ov;.sP )ClBρ5I2d5I|26I1\pm3Ig;6|S6.smUc#\pm\o_W^1 wkE\otwZwkHk^??׏ksc׼3#y}M|{<~!׸7kc}\V{J_~G$S}:tPI%'BI&'>$S8? 9U'Bޠd gd 9X'S8?u yr)Is`)~AAƜB޲s k9tPq9SțcN,s`)M1)ǜBިs yr)s }b)\栘S8.s+h2Ŝp^A 9pv9s 9(6ǜ*.sP)n9s 9(.sP)\栘s =Ɯ*J`X)߉ }%0J`XJN\+aWJNAJPX)_ w?V 痀p~E4Fe +V 9(}z;2R+az;2R+ap~,_;#C_ +/:X\ +/aX)ߎ az;2_ 9p~% 9pގ ez;2\栰R8.sۑ2pގ 9(ίJR8z +J2z +Jvd90z +ivd90I!JA9J2.sPXގ 9(.sPX)s*.sPX)se +V 9(.sPXJ2e +>GbXJ2B#1.sPX +>GbX)V =^ +JAz +JAz +JAe +V }İRAa2rЧC`(3cFB1rlP>Lc(FB0rlЇi>Lcj#G2Ee~fQAp+42 cu頪KY] Xե.Tu)caVB/եe.>/cu)>Vե?X] 9tPեpK2Ue.Tu)\RAեЏY] =VBρեЧ$.Tu)X] }JR9tPեЧ$.K?X] =VBtPեpKOIX] 9tPեЧ$.T] 9tPեpK2UBtPեpKOIX] 90K2Ue.Tu)\RAեЧ$.K!RSWBե.<%qu)\] 9WBեspu)caWr.|,R9tPե]] 9.sPu頪K2Ue.<%qu)\RAեpKU] 9.sPu)\AUe.T] yJAUe.<%qu)X] =VBz.Tu)䡡KR9tPեs`u)X] =VYRCCWBρե.?4U] 9.sPu頪K2Ue. '*Ip}N2<ϟ0>a> gBJ;A+3P!ᜁ: T@w`*\|с3P 9,_t Tsfe8܁pE@>w`*\|с3P 9,_t Aee8.sPE@/:p*\|с3P TsfBρ@>w`* }  =fB;0upgBρs`*\|с3P2 9(upgBρو }$FTKlD4و t˧݈ }ia=Q/M6B?wy_1SS/FB?d*\hU皌V~hU IѪ5 !-iGB?d* \ѪKѪs`*sMFBρѪs`* =FB?g* =F.w*\|RѪpI}GeV~hU90Zf*[FB]1Zf*Ϯ~%hյy -\ hյk36Ϳ\]\km|Lk{ѵy2.1,ݼkyFOkky6~_5g6̿6ȿk^_1}35_^_chյrm|LkUx`>(\V!WNZ\V yr*䄝 yrLZCv*Ip~~{PIsv*C1'BρIs`*:CyCs*1AUV!omNZT*UțV!ocNZ9izLZ9iί _AIp~/\Wez/_;Wez/\栤U_Ꜵ:UAI9i.sP*\栤UAIpVdT> B}2La S[z> 0U8wPa? ג-p~i57p~K/_Z apT2e S[zd*0U+az_30U+ap~A-_P;5C_ S9Z\ S9FN az_3_P 9}p~#' 9}p ez_3\0U8.s2}p 9(L/T0U8z ST1Lz STf90Lz SKqf90LyT0AyT2.sP 9}pT3LuPapT3L.sP*\0UAapT 9(L.sP*iT 9(L4apTd*iT0U90Ld* =s`* =s`* =*L.sP*\0U e S}a S>`*T3LHa' S\^xr*T&0L a' S>c*Tן 9(L.sP*3ce S }0Um.G( }(V 1Џ }(AŰB?f +cX/0ưB1.bX2uP1gta 9(uP1pbX2ŰeaT +\VA1Џ =ưBρ1'3aT + }2V90uP1'3acXAŰBρ1s` +\VA14 9(uP1'3a 9(uP1pbX2ŰB0uP1pbXOf 9(Q4cX2ŰeaT +\VA1'3acX!VȓǰB1a`c+9zllTc+ =6B?ec nl[3 9,V 6ew68p nl>`c+\|g[3 9,V 6ew6uPp nl:|g[2; 9,V98s`c[3 =6Bgup7Bρll\s`c+ 9,Va7ejl\s`c+ }i/З&[zup[/M6Bdc+CϿ?#ϧ8'X)No'''X %.Bj~ƖAZb%-pXlXl[b %vp)Ǹ%zl~,̖X9%zl[bXSBρ-s`K n/WpK,\|[b2B%zl -gl 磿_??ko\wrmk;c_6O|ø6tmc[\qmW\o_˸6'>ĵ.qm@Y8? =ABp,0Ɛ?Bρs`,(;Cρ2PB y3t^ y?t,сD*P@Yțe@AVN@Y90P ʐ:PίUZeAp~2\*ez2_<*ez2\*CV@Ae Վpe2e KbU wC$%*󻈡Y ϒX8xP%?X 痯]p~57!p~1_ s%pJb2e*}ٌf,^< XXlMp~^< XX8Zoڅ Wբ9xgcai8i.sЋjQ8Z.sЋ2xoڅiAp~.\Oeԋ2x.sP,,ߴ =Bρp~. =B?{c,, =s`,, (s`,,7Bρz47ez4\XAx.sЋ2B?f,ba2B?f,,\XXApba2*.sP,,\XX*.sP,, ca2>XXBρs`,,XX90zԻߡXX90zԻߡXX90zT,,\XXA'3^[VB?e+9^UY:^Ws$VBUHz>Ve+9^ϑX:lWAUp^߲.sP+\株W Vez>Eg|>,=З&{` }ii3{`;X쁅>Z`,X6B_쁅˥AB.sP,\Aeꁅ 9vP=pz`2B?mf, =BvP=s`, {`ABz쁅{` =Bρ=pz`2B.sPz`_ 9.sPz`2eꁅ>|az`2B.sP,feꁅ;XA=pz`_EB=/{` yX9r{`!XȧT, =spz`!6.sP,\Aeꁅ yXA=pz`2.sP,\XA=ꁅ 9=ꁅ yX9z쁅RXaH{`! yhX蕱=g^{` yX蕱 =ꁅ^{`! yX蕱.sP,\Aeꁅ yPAeꁅ4df,+%Ό3cAeBρs`f,caf.X903pmΌ>%af,\|ۃ3cOI 9,XSfe˷=83pmΌ>%af,\|ۃ3c 9,XA˷=83.sXpmΌ3cOI =f*3s`f,) 3co{pf, }J =fBρpmΌo{pf,\ =fB-03З&3c }i23Y*3c;3df,3c/MfB?Kef,I/Q]>2voye2gLZ ȝc9,N)`bz\[[{"g`'\.2Qk-\.2Qk-=F.?5Z }'PlOMBrgk-\TsZ }˝s`k--w.BρзZ =Bρs`k-Z }˝s`k-Z;­p e+/Z 9s`k-EZl>Gbk-EZ8#mkkP;6j[kP\'rm6}6{>-6V\?*_˸6N|LOkwkOK6:\55_d60\5kpmL>;͟揃kG|Gxͳvmк=vخqc]?>k;li7A=5Cl!?b 3! (t-B~:A~hPt-*Cl|6b;[90z Cl[9[ȏJB~X:!T-gCl!?b =y֐eCl|7᠞g !p~5&bZ9z,ZvBDϛzl[kZ =BρZ;s`k- [k2mn>Bak-\氼G(l=pB.sXZ#eZ;ZApZk.sX^\Z{l[kZ#Bρj>Bak-Z }n[kPZ;­s`k-Z 9,/pk-\氼­pZkW.sX^\[~ޕ>d-EJ[˄5+m߲vp}{WB_Ԭ>`-EJ[>c-?.oepr;8.sP.\]A5хT 9F.sPjt2eхAeх]A5pjt3X 9,]!BgFzх>`r.X }]9Fzхkt] Bρ5s`~eM\ 9,7q.\]=eM }3UxLՅ♪ 2Uwmum^ѵyD揔?c/۰ϊkI1뚯y뚯y뚯-~xkcƻ6>~L׸iS7Ea8#<Q׎AEB~h;Gz!? z|;2GB~|;η*Ǚz^iꅞz^90wPzꅞzn^OnwB~vݼs7/Gyxv搟s慾 TsoS|#XwO5TsAO5STsAO5Ts|S2=򦈻y2ue慞xlBL/4A ip~Dx`0#xL3l8?1x`0.sP/\4Y.vЗ y/vIЗ y/vp~t}ʅ$AuYp~V. 'Cd79Ipgez:\'Yp>]_գd.'vc?O6Byav<ٰ ɆA= ΏƆ][@lԣavA= v2= .sP9QpvawP pva.sP.\栆]A pvհ 9a.sP.Q6a.sP.v2=SA6Bazl؅v/M6Bρ s`=6Bρ s`=6Bρ s`9pvOذ csel.=FBJ06S5ͅ>`l. }(\3:BJ06P36ͅ>`l.\+L]Aԅ|Vp\~ܙpԅL] =$fB>L]Aԅ 9(S,ҙԅ yL]Aԅ|a.;]BρЇ/ \j]90p[- =BρpV e[- 9(pwpyw2 wd.) wY }ų~b.]¬\~SWk~06|kWo}⭯roݺ65Ҩo~e70k~燀燀צk3Kӽӭ̇ӭp͉HC~P!+ΑHCV!|(waHC~Qtg?#}#Y"}cY"}=Dz={9 sCR!? C~NT=E_8?W=EL8?_{8=p_ Ƕ.s~8=Ώm?\0?pC~uS/~a~l!ݨpC5C{5z=}8=?Q{8=pzxC5qN?Q{٣p\{9 pz(C_ =`^8?Wy}% pz(j8?WygN>`ٻӳwGQNOf?F'.s~8={pdeg1ߋ=[N5>[-u}koxCA.}8=?j{[#ׅc񺇾5x1߇~9̏>B.s}a~21߇xC)D.u9񺇾Sxes29^p{aׅs29^p{;ׅs29^7{a~$"u}񺇞u=DxCp~$xC񺇞uHC񺇞u=G.su}}?< >CߝDo֣OCBNO'P=zzH=z }Y>C߬G2O.OʨOpܧ{Ӆs2O7ѧ{ayH}>eF\&eov.٭&eovp'=$4UM|AMx\{!CB!\{! CB!AP2M&es!" &es!"{ans!"{an=\07.s@&Cͫs@&CMpn={9s@/x=4zh=Yxr5.sxy4&^\AMA0!*{[=Ayyn`^8A02 = y9,eP0Of{ay-y}2`es029p{a=h.sy97i(s@0Of{9 >A0`C .oP0`C .oP0`C~9,oP02M =A%y9,oP0W̻`C g=`^C=`C3F0/\`C_= y}#{Epy;y9,oP0P{a=\0.sy{a=\0.sy{a=\01y{a==c.sy9>BA02%}`C s@0P y=y=z={#z= 9,oQ029p.sy}`e`tjz}ţЇKNG5>aMsM%ڼk)ƚ޵յy@\?M߭_>kt$ϑkklͫ>C!/T9]??ӧǵ埛tmk5kqmԸk^|1}^\5]?,͟|czU5_zQ5_zM5_]?>>q]uq]uwq}LL/LL/3VO8]u5^g]|L_Sep\AGOB~q/䧁~U %eSu/!>Ȳ_82`p/!~!?\ =B~Dz,~ARS9z,~x_*8n;C~q/ux_8 :C~w/ _ C~r/\z<\ez<70p/ys`/ -|e|o8? zL_<zL#o8?zL#o8?zL/|2%Bρ e/_+ z;_+ p~5n=gC_ L|pk8?}P p~/ [Y?;gez;\gip=B{2ߓ9ᅾ^8?zᅾ^8?zᅾA=zᅾ^8?zᅾ^A;xez7\^kT/\^k 9(.sP/\Ae }{9 }9px sx63zᅞsx/MBρ9s`x=Bρ9s`x=Bρ9s`x9prxob3j>vob3jQ˳)څͨ]90j&6v!p.MlFBf򤊣vob3jLQ 9,8j0v 9,8j0v2IGBf.\c0𠂁2C1.sP002e T00\``Ap !``c8C ``aq.0``aqC>``!ϳ x`wk =$C!ϳ x``le T00\``AgT00\``ȳHe T00Y2e ~s`00 y4``90z  =Cρs`0𠂁``90,  9(h  y``ay y4``ay! 9,/tp00Ѽ2aM0X<`9&z O͚  kmX 9,opM0 k2  >aM0\氼a56 7L&؆5p C۰&.sPM0\栚`A5pjS&.sPM0\栚`A5&z >aM0X<`cCρ56 \^D`9&؆5ˋH\ =Cρ5p"eˋH\ 9&xpyk2E$ kWX }Pɚ`+5kxCPfMrC_ >_aM0Ϛ`ʬ >ndM0\x./WpM0\氼\5+ T 9&.sPMj2e T<`A5pjo(&xP5pjo(&.sPM0\栚`eK5+ ko(&xpI@&z >_aM0X =Cρ5s`M0 k`9&xpyk2; W&.sPM0q#k2; ~g >4d00E``8} g0=Ok:͟揊kk2>&͟L9]?"͟ĵ埛tmtkkpm`k^ |C1}$\5w]?|cz 5_z 5_z5_ ^?>׮rkӛ׮{׮[׮;>W]5]5׭]5槭gy ߙ>Lxa8 [|~%䧒!9^rT02 CN_/ |C_/ 3r0/ qxa90^z Vxa90^z/X} ƒJp0dž>&ߗ }1A;!59AΏaȯNN!=9AA=.s2={zAOa90AΏ 2AxP p~,8=^&p~,a8?ΏSa8?ΏSa2A.sP0~hyk0^b05xA=Zb05xa8?Kԣ塯! p~6-?xa8?<hyAԣ2=Z.sУ`90$!pTH0 CO!gH0 =ACݞ!z~7 w{dH0 9pBnA }!  }!pB2e <`A!pBou1$xP!pB2$.sPH ː`90$z ?4 =Cρ!z= =Cρ!z= =Cρ!z=\ge ͐`Y ͐`.Ϭ8$V4C`[ &b!xp]7CMCb1Ѽ&bȣy7C>&b!hMpD 9.sP1YD 9,MpD y&bAMpd1D =6Cͻzl"&A5CρMs`1D<&b9zl"a01\`bAp2*.sP01\`b &L 9(.sP0 &tL =*Ls`018z &>a0Cρs`01\氼ppe &\^`bayኃ2+)>aM1'kb 5s`M1h.opM1XS }2Úb9V4k*YS =.opM1\氼a5'3)TS 9.sPMj2e)TSaM1XS =Cρ5s`M1 kb9xpyk2C).sPM1A%k2C)~5!)>ndM1E͚b87œ5kkP;6jkP\'rm6}6{>-6V\?*_˸6c6J\3]tm6B|L~n^ҵy'õᚯy},\5kgp׼^Hwm<6\5]t|Mt|=t|ͫxmk^k^5kסs|s|s|s{[;5ksZxͳx8b90z)rN1S =Cρ9Ńݬs`N1S =wB~~Đ .&t`11w"C_`,&^bboF.&!둋<{oH.&bb8?~Pϳ<{AϳbA=.s!o0{Od0=z a`  } gC_ `SngC߿ef0[fCg03xpyÙp323.sXpf0Tfe `ayÙ23z 323xPˆ323z 3`903xPs`f0 }[ ~9(3hp2o 33.sPf0\`6fe <`Ap22e*3.sPf0\`A  9(3s`f0 }JAeCρЧ$ 3 }h`903z Tf0 =fe ~9. <`CCfe <`Ap2<`A  }`Ap2 9(3.sPf0 3`983\Йspf2! 9gCspf0 yAeC2!AeC83.sPf0\Aee  y.`Ap22e*3.sPf0\`A  9(3\Й  y.`903z <&wf0<`crgCρs`f2`903z Tf0 =fe ?49nsPf0\Aee  Ԉ3 9(3ge Tf0S# vf vf0&. K!] 0K’U] Co%. K!O] 0K’`we* TI0\栒`A%Ǎ. TI0\栒`Fe* TI0qK2e* ~%s`I0X y`9$z, KU =Cρ%s`IJ`9$%˛\ 9$%˛\ y`ayKU y`ayK!O] 9,o^pI0K2 . <}wI0\栒AC$.sPI0\栒o3!8<` af0\`Ap22e*3.sPf0\`  9(3.sPf 3//MfCρ >af0 }) 3t<6řs`f0 9,Mqf0\aym38 9,Mqf0 32)n>a0'aë [a0Ml6./p0V }a議A&68 6./p0\氼D g:l 9A.sP25ej <aA pobAxP pobA.sP0\a36eKM g:lobAxpJAzl>a0 =6Cρ s`0a9Axpy92mjA.sP0'2Dn>d0 O1( }3PΧ+e%ڼk)@յy@\?M߭_>kt$ϑkklͫ>C!/T9]??ӧǵ埛tmkkqmԸk^|1}^\5W]?,͟|cz5_z5_z5_ ^?>W]czq5_zm5_zi5_zeƺkkk?cy g>p^ηp8>ߎ zaojܰU/ uCv0W6 ?za8s07 $ꅡza9^s`0X/ = Bρs`0X/< Aρ t`0w" C_C ^@aoFs!9P|oH@a8?&Pɇ@aAɇ@A=&.scd0 oy +c0 C p~7=a蕱A:$|蕱A:$|蕱Azel 3/fp~$6e`˄zB `/&eSNu 'eJu_ayɿз. 9,OQ8A&Bρɿs`/K& s`/|&Bρɿs`/ =&*zLo2wPɿp6%Bg/\_[L 9(`ɿp2%*.sP/\_AɿJ 9(.sP2%eJ>`/ =&BX0wPɿs`/A%B1zL =&Bρɿp2%B1.sP 9(.sP2%eJ>c2%B1.sP/Y2%eJT/\_AɿGL!_ȣ?'BɿJAU[UBߧf/)~!];_SLVB؆UPU6>d/X }lê_9wPUs`/X }U;\ =VB۰.sX_cVe;\ }lê_ayg~mX 9,lp/4U 9.sP/\株_AUT 9>5~2UeT ~_cVBρU>a/X }lê'~mX;Us`/X 9,/?q/\氼Up~.sX^~_ay~d/ ~8Y V^} ~Swpy~ª_cVBoUY }ɪ_議wpy~2U>a/\株_AUp~U 9.sP/\株AUeT }UT }Up~2UB۰.sXL>a/X }UKUs`/ ~_9z~mX =VBρU\ 9,r/\氼Up~O1Y 9,r/)&~PX }Pɪ_+Up>'~/Y]GtmH3V߯6irmnumj1}\sm 6\U]gsm^rm66qm>=,ܼkG5^]5k5_̸k^⚯yaqmk^ӋkKWq׼Ow׼~Jӻכ{[>]5W]5]5~Fxͳw<~r_uηp8>_r r_oc.ܩ}U 徐cu/2>r_8r/3}r_9s`/X =׺Bρs`/X;_ r 3!J`/7B~q/ 5_d gC~q/zf=7B~q/s`b/ H!o"9zHLs{!1s =$&p~7K=$&p~7K=$&Bp{Ճdk q|dk q|w q|dk8?zPz Gp~|(_8?.sЃ2=}Pzw9O΅X wAVAW+z.ȊA=HcE/Y IC_} +z i?Ƭ腾5Š^AзFX;Ie腾5Š^Ap*z2UeTE/\栊^AзFX;^Aw Y 9AVB)dE/X =V^腞+zA=:z腞+zA=:z腞+zA=:.sУӡ][\p. HB_`݅][݅]\"p. HB:w swo@2w.sP.mDeC݅]ayз 9,8wABρs`.mDs`.]uBρs`. =*wz݅swoz2wwPprwoz2w.sPrwoz2w.sP.\]C&e݅;]Aprw2*w.sP.\]A݅ 9(w s`. }ABρg݅sw }$]90wzT. =e݅ }$]A݅>c.\]A݅ 9(w݅ }F]AЇ݅ 9(wwPprw2T 9B9wrT.sw!]98wr݅sw!%;]98wP¹sprw!% 9nsP.䡄sw2e݅Ȣ]9hz,څ<`w.X =BρE*څv]9hwPEs`.X y.څT y.څȢ]s{B̰hE+c. v!];]9hz,څƢE.څvOfX 9,/:p. v2E.څ>a.\氼E'3,څhdEpv2e*څT;]AEpvoEh.sP.\栢]AEhz,څ>a.X;]BρE'3,\^D]9hdEˋH\ =BρEp"eˋH\ 9hwpyv2E$.څh.sX^^Bzel?n 2Bf BB->l^[xp-+c Be  }^A-pZx2.sP /\^A-j 97[x 97[x2ej>a /\氄 }^97[x[x^Bρ-s` / =Bzlᅞ[x.sX^N^ay9[x2B.sX^N^OB-W<[x|__+6gl]_]tmϵc(6OArm66ڼ31W5^g]u|5_g)}m!1L66|L??7]u]l_\|Lkk¸k՟6?^w׼^w׼^w׼^1%y#y!y؟6kk?ku\u?ηk8u=: \o@υܩs՟ 5cu.W!>\8mr."s\9?s`.؟ =ꗰBρs`.؟;_ ٟ o!?J`.7B~q. 5_dܟ GjC~q. z<7B~q.o cL{3N̅ s`l8?~z+L̅`A%Bop~0 Cop~0 Co[ab.\ GMC_`LTb. GMC_`L̅;'C_`L̅ GMQӃz:\8?[Ώ gٺp9z:\'?\˄-z39 }el#/B-p~3e\迗Ώ`#?5ق }-p~3\G0ez3\\lԂ 9.sP .\AejԂ }-jԂ }׎-p [pڱzl[p/MBρ-s` =Bρ-s` =Bρ-s` 9s`r[[z,LXt }KE7wYt }KEзXt;˻n!)]t }EwYt }EwXt 9,.kǢ[a9w-];eˁnd-Xt =Bߵc0 =Bf-Xt =BρEs`n[9#nUt 9#n2m*Ȣ[AEpntXt 9.sPn2e*Tt;[AEpn2.sP-\栢[[,n[[,T-Xt }Ţ[9vPE'P,nABρEs`-\栢[AE'P,~9Epn2.sP-\栢[6.sP-In2B-.sP-\栢Ae*Tt }Ƣn![ȓ6BE*Zp-\栢[AE'm.Tt 9.sPn2e*Tt;[AEpn!O\t;[AE'm.n[ȃgBρE*5so2eʽ{ so[cBρʽ>a-{ }l]#νsom{;kĹs`-{ 9,q-\氼kĹpr[ay׈som{ 9,q- [pd .Q[pONق =A^l [pwpy[p'\"BO-ق }r\ wpy[p2n>*b .\\A-pZpՂ 9.sP .\AejԂ }o-jԂ }o-pZp2B.sX=n>*b .؂ }o-K-s` .Q[p\9zl[p؂ =Bρ-k܂ 9,5r .\ay[p؂ 9,5r ZpONYt }ɢ[Ep:<DkWM[4M Pt-k 6kgצoH_ ]?6 kצOMߍ6]_k}>}_~]}ܾ6Sks] צM_>6}_ k_~~m 5_۾k~~{۵m_5?k}ڶk[۾k~~g|ol}mPX)5^(}6~mzkӓ _6S!RpppοGV=#gjo\{{ȯH=kjoQ{{8m CoӃbN?VP{{ȿ7P[8.skoGNO!={8=З&:q܉{8=pzF/MtD'.~K4щ{ȿΏl?N顼3G,?щ{a~d2pz(/~9 Y>*{?O^C牠C AC牠C AC,NY>7Nn,n9Y>\0Nn9Aes29AesD AC߉@2ߋ=9z=t{KAn=߇n=z=z=t .s}9 . =];2A!ot{v=];1ny,C߉@ob#7t{s=\<CߘC2n9,(vAn=n|CAOCAn=zs瀠CA[8.sn}3A~9Aes29t{a=\09pt{a=\09pt{a=\09pt{a=qz=t{{s瀠C!s@-n}TCAnt{9 s@29pt{".sms29pt{.sXUt{a=\0 AC- pt{as29pt{6*sP!Ot{9(Aes-nn9,>[n9,>Ez=䉢nn=t{9 s@!t{9  =& =t{v.snt{a=\0DQAp=\0DQA[8DQAes"n=z=n=z=t CAn=9s@瀠C(*.Res!t az]A_W!t{az]A_W!t{az]A[8]Aes"n=9AOCb* sO1t{C}A=䡾n}lCb"Dmt{C}9s@瀠CߧF-\CA>A2 = n9,oPmt{ay;n}les29pt es29pt es29t{a=\0.snt{9 6=t C s@mt w(s@mt w(s@瀠e˻Ft{ay׈n9.=\氼kDA>A2]# = m}fC+C[4zeh=o4 j=l{ 4zeh=o4pͶ^m5.sX٠fCpl{an=\07¹pl{an=\07¹pl{an=o4¹pl{h=\07.sm}feKGͶ> BfCF-\=j=l{ 4zh=l{9s@Ol{9s@-\\fe˛l{aysm9Ͷ>E2Ej=˛t{#NF|GݮߣC6g ]B]7rmȵc6Yqm6q~]l租rrqpoӏkkٮ?ۮ?ڮ?ٮ?>7]??ծ͟k'f6(6$k^ak^oa^v׼^v׼^v׼^1}횯y{횯yy횯ywf6_505^5#kqkki??ABq-oAp#{Pp-W'B rjo!?ko!CA#ko!Izko[OdBρs`-X{; =Bρs`~{+'koA=koB~Es-4Yܐ\{ M͵p~75B~]s-M?GC~es-6M90ņ}mt =p~4=t b؃ n `8?Ώn![ȿ78z T-\栠t 秐p~F5ɠAp~F5ɠ[KAzd;ɠ[KAu}i23,?t 9pnPA=z̲Sa-ߑe;G%C0H̲󣒡Te w$fQp~T2 CYΞYp~T2)fB2`-\,[[̲e 9(.sP-\,Aeeʲe }ˀYʲe }ˀYpld-4fBρYs`-,[90z̲#,[90z̲#,[90z̲#2=A.'.Z+wX^ }/wX^ }嵃9kз X^ }C7Y^ }prZ;h,BAcy-\<Y^ =BρwX^;Cρ7Y^ =Bρs`y-X^;Z9z,kAe*kZA嵃*kZApkob.sPy-\Ae*T^ 9vPpk2e*Ty-\ZA7Y^ =Bρ7Y^;Z9Ls`yktX^ =Bρ嵃*kZApktX^ 9v?鰼.sPy-\]eS.>dy.kO ɑʲ%9vmu[_/d2Ҏ05`.sPykOX^ 9.sPyk2e*>Lcy]^ 9Br.Ty-ak!Z9r.k!O\^;Z9嵐spyk?T^ 9.sPyk2e*d\-乽jOfW }PɸZ 2dqT\-W =Bߊf\M}Bρq'3o;dqpM}B0.sXZeW 9(vPqpj2eT\-\核ZAqзW 9(.sP\-\核ڇjOfW =*Kqs`\- jW =Bρqs`\-W }2øZ90vpe/8.sP\!_ q\-\bj2>d\-ij?W 2?nW 2Bf\ ^jOW 2Bf\-(jWƸW 9(7jW 9(.sP\-\核AeW 9(vPqpj2Bf\j2Bf\-\核ZAqЧALjOW =Bf\q\-W }ĸZ90zjZ Bρqs`\D'r\-\8j2B1.sX~qЇ}ȸZ#NB_Ԍ'DծC6c]B]7rmȵ3Z\qm~6T|IoH?Ho?Go?FEO5[WҮ>]_ͯkg ]l]j]_ Chh|Ѯ]5_CL?v׼~ 횯y5_c{Zȿw.p]ηk*].|_Zȿ|.Ty-+Bsy-,>dy-EBBρ嵃*kZȷ.kZ9vP_ =Bρs`y-k2}Cfoe }0 l!2:O|,[βS!::ͣl!z9 l!B:w|>dy-,Bqy-Z8?4ϭY^ fك*Y^ ِ5\^;Zȏ.Z\*Ty-\ڇ,#J^8?dyk^8?dy-A=dy-ZO.ܡ/Mp~5>Z8?.s2zC&Ba"x DZ3LH ap~1x C5a"-g{&MH ٞp~1_&eJT"-\DZApiH 9(.sP"-g{&*.sP"-g{&eJ}DZ[]LiDZ&iDZ90vPꆞiDZ90vPꆞiDZ90vPa Za tf- +VBw+hoX6?+h~WB߿e-,V.'TA }7pr ZYW>d-XA =VBb =VBe-XA =VBρs`*h Z9+hUA 9+h2U+h2Ue ZAp*hUA 9.sP-\ AUeTA 9vPp*h2UBPf-XA =VBPf*h ZVBρ>_a-XA =Vz+h2Ue>_a-\ AUB7.sP-\ [Weˣ>nd+hXA 9vPTA 9vPp*h2UB+h!\A 9Wsp-\A 9WBT-\A y@ Z9vPTA 9vPp*h2UB.sP-\ ZATA 9.sP*h2Uea"-䡾iH =&BρH;|މs`"- i2+N>a"-\|މ6L;؆pi2%eJT"-\DZApiH 9(.sP"-}j&eJH 9(!iDZc&BρJ>a"-H }lDA%Bρs`"-H =&B0zL\~1ĉp!NH;bi2CH 9,DZAЧLDZ1vP[&BзH;iWDZ &BзH }8DZ1vp'eJDA%e9(4pi2%eJH;DZApio3vPpio3.sP"-\DZ &eKƉЧALio3vp8zL> b"-H =&Bρs`"-iiDZ90vp"'e/9.sX~ȉp4pENT"-(iDWH }Dχ@&ҮC6cL]B]7rmȵ3Z\qm~6T|IoH?Ho?Go?FEO5[]wgWG3zڵr|m~5Lkk_k+5}a"2~0횯y\5_kSiҮ]5_Ik^cL]_vg"y&ҮOV}j6]͏t\6]O>o4\6˻skƵ|ǜr r w)pη忧sU[N AB t-iBtC&>Z90vP)s`-r V)s`-r =+h[90zLWBuq-\栯}ȔA=|MSn/Bu-[[inr )p~8[\Bu-[DBu-[]r v)7r }3xp~Rnxip~7g"*cSn!?9.WRnr 9(Pz떝p~0 }ݲvPp~0 }ݲeJ}ݲe-Gzw-;o!?T-; 9p:oL0ÔA=_)fr 3_Hf0Ô[8?f󃙡`0=Sn`0=Sn!o9=Sn2eJr 9(vP)pRn2BgRn2Bg-\栔ۇLŔ[90zLxi2zLSn@q90zLSn@q90zLSn@qAZ[{R'kmI%kmIkm \k }O篮Z[[EZ[V\k }Êp kmoX.sXDpCBρs`- +s`-[Bρs`-Xk =zkmvPpjm.sPjm.sP-\Z[¬Tk 9vPpjm2eT-\Z[ApjmUk 9.sP-maBρs`-maz>%a-Xk;Z[SBρs`jmZ[9.sP-\Z[SeT-!km2e\v-\<Z[CC.O pjmUk }Z[ApjmUk 9.sP-km?Z[9եZ[9vPGbkm!Z[9r<pjm!Z[3B<p-\Z[Ao|.sXZ[Apjm2.sP-\Z[ATk 9H̵Tk yBZ[9z0Z[akm!p-\|sԵ7G]k =B~A˷d y [+A|AWaCB;dA lOfd yn ABρAs`-h.}w-d }2 [alOfd 9,}w- l2>a-\ [AApld 9(.sP-\ Ae d }+Apl2e } [90dAs`lOfd =B0vPAs`-d =BρA lp-\l2. [6e~8.sP-A%l2ze T-A+c-o. [14A+c-oB2ze \~?Aplo3vPApl2il2e d 9(vPApl2Bfl2Bf-\ [AAЧA lOd =Bfq-d } [90z l [ BρAs`?r-\8l2ljd 9(4ApD>e-\ }ݲΧ͵k[kS60kwI\Wpm~M6C6AL/\_>k߇kw_kw߆k]e? ? ʮ/?Wkg]\\_ohWe|ʮϓ]5'LMv׼~욯y.5_Uc*xk!zh?oB_C |[S!߆;Cnr|7[|G[}}yȨ[[+!p~(8}3Cr }3OAC!?9vP!r !pJA!pBn=P♀ W<p W<px&1W<px&B޵p}3vp2%B2`.\wpvPlV[3l3A= [mf8?l^6[m?۳l^6[m?۳6[m?۳.sP-\V[ApZmj 9.sP-g{.sP-g{ej}V[[]l[mV[&[mV[9vP[mV[9vP[mV[9vPsl,1ḏ[軓̱[;Ḵ\op-%B:s6slo2F+slo;1.sXvp-m'e#α[ay9c =Bρ9зc; Cρ9зc =Bρ9s`-c;[90ẕ'Aeʱ'[A9ʱ'[A9prlo2.sP-\Aeʱc 9(vP9prl2eʱT-\[A9зhc =Bρ9зhc;[90Ă9s`rlO,c =Bρ9ʱsl[A9prlO,c 9(vP9x̱nsX7vısl2cB1vpy9зc 9(vP9зc 9(.sPrl2eʱ>bc 9Bz9rαT-䩗sl![98rαsl!o;vP9sp-m~B9ʱ[A9prl8.sX [S/eʱc 9(vP9prl2eʱT-\[A9^αT-\[C`Bρ9s`-!slAB;ẕslc =Bρ9s`rl[90.sP-KS9prl2*.sP-\αc 9(e˗Cc Lsl2ˡα\[90 oα\[3tB_̱<4t-#%αc yAB_̱|aq-\ZAŵpkU\ 9.sPq-\Ae*T\ }ŵpk2e*}Z9ŵs`qk_X\ =BvPŵs`q-X\ =Bρŵg,k_pq-\zk2.Zae˯w.sPq-Y˯w8?9pc f-c }9O$8ẕ> b-c }9Ї̱ slHp-\[̱T-\[A9prlc 9(.sP-\Aeʱc }9ʱc }9prl2B1.sX;α> b-c }9K}9s`-isl[90ẕslOc =Bρ9Km9pDα'r-\[ eˏ9p9s`-u[8bc6U6o*6_6?Eqm>&6}_76?Aqm~6 \~5c3}?||qj6mL̽6w\O;g?[89?[$']8[Du-b|uro!ߐ:%ʉoJxp>H ̉pmzLT-x =&BMs-x =&Bρ[90zLooۜx 9o!ߺ}7~Fp.>8\諏q iqp~f:B~dp.q q q;8\wߎÅ8\+qp~~9%g._*g. C~up!?:sppp2Bρqzb?8\eÅ;8\8?Ώ( }3wPO쇾 }3Wp~+q7( 9(&p2;'HCo[O:쿅A=Aoi8?AO/o?O/o?ޢo?.sP-\[Apo 9.sP-.sP-e}[`쿅o[&o[9vP7o[9vP7o[9vP7oN2o(x &Bb-ɲo1oo2vpy‰зKx }󈉷˳Nx }󈉷poo1.sXpC&Bρs`-#&Ys`-#&Bρs`-x =&*zLo,1vPpo,1.sPo,1.sP-\[Lx 9(vPpo2%eJT-\[Apox 9(.sP-V&Bρs`-ox =&B;0zLT-o[90vPs`-x 9(.sP-o2%*pm9(vpyىpo}i.@;.sP-1o2%Bg-\[AJx 9(`t-x y[98vP[No![98rNA%Bwx 9'*No2#N[ao2>r-\|ĉ[Nx 9(.sP-\A%eJx yA%eJ_a-\|'+|p*n2UeT-\栊[Ap*nUq 9.sP- eVeTq 9!+n[VBρ>_a-Xq }ŠAUBρs`-Xq =VB7z\~pTq;+n278\q 9,[AǍ}P[ ^Cm^ ^Cmp3vp-B> ^Cmp3+c[j } j 9(.sApBm2e T-\P[Awj;P[Awj 9(.sP-Cm2P[Bρwj;tyj =B0z CmP[90s`-j;4j 9,?1P[a!e >a-\ĐCm?j 9,?1[苚p>Ɗ۵}Եy(\_5~k[kF6`\>]=}WWGO5k?6^my6 ^WmyWTj666]Yj6}_k^%v׼~I욯ygk#b|'ĮKm旺kYj1ڮ%}*6]͏J\_],})6?%qm~66Kmߏ5kk5c|3}|qm>Ѹ6]+n|*n!_\q {z|;俹+nVpW-QWB!u-˗+n!ߔ!+n|+n|O/Xq;[9z| [9z+ns`-Xq =Vo!ҹ.s7Bs-NJA} gWB_}[O3!?L+ntOL[w뮸\[OT-oBBs-EP[8?_}Q3Ώ(#j }Q3d#!?:vPBj 1ԡp^LApBmPA=g-2B_ T- 秐C_ j;C_ j yk¡z(?P[j y¡p̭Z8?yPzHZ8?Ώz󣞡'ZO̭󣞡'Z?^̭AZO̭aZAprk2*.sPn-\ZO̭Tn-\ZO̭[7[ =Bρ/MBρs`nQ=Bρs`nQ=Bρs`nQ=Bó^ j^ jհvpyEjXT }EkYT }Eз.YT }Eз.YT }Ep$j.sXpQ-XT =BρEwyXT;CρEwyXT =BρEs`Q-XT;Z9z,ĢAe*ĢZAE*ĢZAEpj.sPQ-\栢Ae*TT 9vPEpj2e*TQ-\栢ZAEwDYT =BρE,TQ-XT }@Z9vPE,jABρEs`Q-\栢ZAE,TT;Z2e*.7YT 9,)f=j2aeB߬gQ-\栢AB߬gQ-\栢ZAE*TT 9EtQ-\T yvZ9vPEgW.j!Z9r.YABE7]T 9fj2{.|EpmB].sXZȳ+e*TT 9.sPQj2e*Ae*|EpEMB>Zaj/j?й_>sn3B;gn-䡡sk!)qn-Z3t*gn-sk!)qn-\xeʭ[;ZAprk! [;ZAέ[;ZCCeʭ[s`n-[ yZ90z̭sk[ =Bρs`nrkZ90йw[ 9( ݹw[ yhZask[ yZask!Н[ 9,ߕvn-sk2eʭ[ 9( ݹprk2>dn-[ =B9.j[JBۻd-b yn[BTzl>a-乽[lb }2[9V4[l}zl>a-\|-'3l}d-poB̰.sP-\[A-jb 9.sPZl2ej[A-pZl2>d-b }2[9vP-'3l[lOfb;[9zl[l[JBρ-t.sX~-pZlp-\@[l2p- [l[l^[l^l^[lovpB-ЧAl^[lop-+cCnb }-jb 9.sPZl2ejb;[A-pZlovP-pZlo.sP-\[ eKU-ЧAl[lovpzl> b-b =Bρ-s`-i[l[9vp0.sX~-pDnb }[6B.sX~-5[lty>k_kN~mz8ۯMo~mȯMצO6{6Z\k0aM6OצzkӾmMiצe4_vkӫܵkӟ_ k-zk6]k|Ͽ4k5_]d|?1k5_ϋCoצ_51~mz K6ytk6= kqׯM]0I_^mkAׯMǾ7צ6p6צ;6p6+6tkӹMwhm:9n|8Q}}‡mn_h=[R}p=\R}|ENSFQ}}‡nw{9s@!2s@瀾C}pC}n=+vR}=HC-Yw{}ܢC~rQC~xQ!?pzH!?aw{O=.ߟ}Qow{r=E7Fp=E9χcW}p=^cn9?pw[8?WBl=zsC!es#pb Cl!pb{r!b{=\<ŬC߬G2Qff=Bl9![8.sCl9!>BBl9J!Blb{ȳ+r =b{9(sP!o+!Bly^!Blb{ț =\|YC!Ye76b{a҆Bl9,P!Ϯb{a=\0.sCl9!p=\0.sClyv[8.sClyC!BlyC!p=QBl=zsC!Blb{9 s@29/9pb{as29pb{'b es!lP2 = =\|R!pFEC+WRC_˷b{+!b{3tG!b{GJb{.sCl9!p=\0.sClyh[8.sClyhes-Clyhes29v!Bl= ]!Bl=zsC!Blb{9 s@! b /5+pb{3tK =䡡Bl9,_jV-Clye˗b{3t.sXԬC+pb{a=\0.sClyes29v!Bl= ]!2n3Zk}CͫЇ/h]Dk!Z{,^Zk}Cͫέ>|Ak!Z{hWZ{9Ї/h=\|[>|Ak2+j= Zk9̭یZ8.s[k9̭Z8.s[k9̭Z8.s[k9̭یesk2pZC>|AkZ8Zk}Z8zh=Z{9s@k"Z{9.eoppZ Pk278Z{a .s[k}E,Zk2uC ÍZZk2^Zk}>DkWZZk9̭ÍZ8.s[k9̭Z8.s[k9̭Z8.s[k9̭ÍZ8.s[k}esk|Z{a樵>h=Z{;hK=GZk}CZk=zh=Zk=zhsk2CpCj=Zk9,?ZrevŴY''Ӯ͗j61ӮoWB6ٿ6 6L?"Oصy5~1^xm~e6/6Ih|h!_]E ;z|;\E _Ua]E J*AUBu-˥h!̺!h|򍢫h|G/XE;*Z9z|*Z9zh}s`-XE =Vn!B.sB#~OA[;%(Z苚[ ȱp~;g"B~*rl-ȱ[ aűd?w8fl-gBsl-EZ8?.}Q3=s[ }Q3zs55ck!?;Sck2O⎭[z?ZO[;Z8?Ώ6[ }3vPO[ }3Nck+K[ 9(ckC8{jS{j,g8?r{j?{j?vPrl/=BgOzjZA=pzjS 9.sPO-p.sPO-pe}ȞZ{R쩅{jZ&{jZ9vP {jZ9vP {jZ9vP Y kY Bc,>Bd,>Bc,Bc,eS5Yays`,> =BagY[# gY90|z T,> =B7ag2B7a,\AB7a,\YAз> 9(|.sPg2e >;YApg2*|.sP,\Yۈ gY T,> }Ws`g3|z g> =Bρpg2B21|.sPg> 9(|.sPdg2fB21|vpyw> 9,O9;|:g2e T,\YAЇL g!YC&B᳃ t,> 9B᳐sp,]u*|rY98|vP᳐w> 9,p,]ue6> 9,p,\|m᳐L> 9(|.sP,\Ae > yAe =B:|z T,䙫gY90|vPs`,> =*|z g2?4> 9(|.sPg2e |᳃ > g2K|"pI._t,䡡g!`RY+᳐gT,Y3tB>RYe >;YApg! >;YA᳐>;YCCe >s`,> yY90|z g> =Bρs`gY90|᳃> 9(| ᳃> yhYag> yYag!> 9(|.sP,\Ae > 9(| pg2>d,> =B;|vP᳃ YHB;|> y4YBE2|ze >|a,Ѽg> }YȣyBmfuhBρЇ/ C;|puhB0|.sP,f*|.sP,\YA᳃ > 9(|.sPg2e YApg2>d,> }Y90|vPЇ/ g_>;Y90|z gYHBρ᳃ˏe8|.sX~,pgp,\Xg22> 9(|,s`-&h*Z5h'*Z߬\~Ud-ih'*Z߬>e-YE;"h2UBfh2UeTE;*ZAUph2U.sP-\*Z߬T-\*Z߬TE 94UpRq-ih*Z߬\ 9hOXE =VBρUs`-XE }*Z9zT-\4\E }*Zaq"WB.sX~U>e-\8чq|E_/kc^|1_k_kkۜg3Tڼk>ޮs6\vmڵyj}_6\gvm^ٵyd旜>6\_o/7׸MϮͯ5釿ͯ4|+|vm~k^?~뚯y5_k[_?ٵy>\G\Kw\=6L6-]Z?/g曲#kd3}|?|:q ]~^&Sk-kĵLǾ| #M|t- t-ZWBu-˨hUE .U|*ڇSwh*Z9z|?*Z9zhes`-XE =Vp!_.sЗBt􍆐o%L ɴp~Cix&B~r2-G)'L yɴp~;G*'B~r2-'BρɴޜL;dZO-NB_LT2-%L 'JL GDpA苚ɴޝL ɴpiL 9AɴL;L ɴW b,\栶ABm.sP,\栶een> b f|-1m.sXdv,(f2ej6 9mmt,6 yY9mvPmLnf!Y9mrnABmw6 9mf2nYaNf2knofmmpf2ej6;YAmpf!6;YAmgnfY3WBρmjRY+mgnT,϶Y3tB>RY_m.sP,\栶YAmpf! 6;YAmn6;YCCej6ms`,6 yY9mzlf6 =Bρms`fY9mm76 9m m76 yhYa&f6 yYa&f!6 9m.sP,\栶Aej6 9m mpf2>d,6 =BmvPms`,q#e!Oߝ/ }|ه̗<}w, e/ =$B0_ʗ>_a,eo(3_vpje|YeW/ Ksje2B0_.sP, e*_.sP,\|YAʗ/ 9(_.sPe2eʗ|YApe2>d,/ }|Y90_vP+̗eW/;|Y90_z̗e|YFBρa8_.sX~pep,\e20/ 90_.sX~̗e^̗ ep3_vpBO>̗ ep3_dΗ/ }ʗ/ 9(_.sPe2eʗ/;|YApep3_vPpep3_.sP,\|YeK>̗ep3_vp8_z̗>a,/ =Bρs`,e|Y90_vPpRRr,e2/ }|Ya!*_pCkCr?lٵy -\_~kۤk36(\"]!}WϴkSkX[LS6/umy^G~#um^y}^_>SZum~6R]6Y>6L}6H]5ٵꚯy g=kk`|/g日kY>L?pm>6^]6?pm>6M}_ 6?pm~6߇6J]h?kVѮz'o*g6,o*ڵvg|qm>6i6u\:~UpQUpQCUi]E U/T-;`WBκ]hGW!azh*Z9򽦫h*Z9zBρUs`-XE;х|*ZA_ U/%i!߃:򓗓i|g䠾(ӗidZO`N dZ8?ci!?9O,dZOcN|dڇ-h!?Ѹ^h[VApFCUp~;>*ZUp~4o4nYE iU]E 9vPUpÿ]*A} *ZϗYE;*Z8?\ZYE }ųvP_=}ųg-=WxV¿4Ճph!omf,g8?r6 m6 mz3M>B f,!S,bBsf2ej@̶Aej@̶YAm6 }ms`,6 d,6 =1s`,6 =1s`,6 =1Y( }Y( .BY`,BY`,BY ,BY ,BBO`,\d,BY3tB_,<4t,W\( }ųPvpj exB_,RBYApÿT( 9P.sP,\BYCCP.sP,䡡 e2PЅp e2>d,X( =BPz, eBABρs`,X(;BY9Pz,<4t[e*nd,X(; e2W-\( 9PvpU e˯ZP.sX~…pÿ e2W-\( BYgPEBY P7 ep,Y( }BY P7 e?Y( =A.?BYAwY(;BYAp e2P.sP,\BYA*T( 9P7 eU( 9P7 e2e*>a,\th\( }BY9P7 e eBYBρs`,X( =BPz, eU( 9,#BP.sX~ȅ>,r e|X( 9,?BYOS e1\pm~e1ʮo;6pm~tm~fwyݵysir]_{6ڼk>o}]_yK6ڼkg*]_s/9WkKc旛^kޘJcWk^wׯz]5k^c,]_^~Űkyѵ,QkYѵ3}_],]Lj5[vmzm>k>wbͷ]͇obصg6|pm>66=\>`5p&5pC5T F5/TM,UB&kb&L!E&zkbX9&kbX9&zWBρ5s`M,X;|XA_y 5!:d!ߠCc;dA}"g3wB! p~;4wB~Ls,I! ?dj,oͅ4 Qȩ7 }i25OTWB_LDAB_L#Du85!\.MB~Luj,MeJTj,\X905vP!}35Scx*5OCӡxB_LwB_L ysƩA+wh 9(5!kb?&]se 5Y y5Y;.CtM,bBbM,m@B fM,@&.sPM,\栚XĬTM,\栚XĬT5X =Bρ5/MBρ5s`M =Bρ5s`M =Bρ5s`M 5X 5Y;|aM,\栚AB&.sPM,\栚de3Ȯ>|aMkb&.sXFvM,qeT;XA5pjbXAB5GEkbU yTX9&rkb!X{㮉TM,\ yo5spM B&.sX7/O&5p B&.sXFXȣ"eT 9&.sPM,QkbU 9&5s`M,X yrX9&vP5'kbABρ5s`M,X;X9&zT TM,\栚XA5T 9&A&.sPM,!&e]5WBa0,\|G( /:pa3vPpa2e T0,\`XApa 9(.sP0,=ce  9(!a`X#Bρ >Ba0, }`ABρs`0, =B(2z \~p ;pa2+ 9,?\`XA' a3 L? =ABf0 at =ABf0,'a'` 9(&6a 9(.sP0,\`Ae  9(vPpa2Bf0젂a2Bf0,\`XAg: ,at =Bf0젂a`X90z T0, =Bρg: a`Ae  9,"e >a0,\Đat 9AG Aîc6oc ]&]wpmE3Eú63ڼkk̵ySI]umgɮk˵yM1]tm~eLQkʵer;b~r;Rjr׼~3<5_qk\|뇹~QkKȏ1um>6?pm~$|sm>L_6?pm~A6߾6\>kf||p }^o^ߚkmk9µ|vmym>C6!|o_|s.T+ _|s.U?_) _!_*:Ww|t+;O>d+BSt+ =Bρs`+{FBρs`+:o_W90uPN p}_oK|!~7|Xȏ{/ccm =^B~sl,'? ϱp~<<\oKoK|X8 }i26cc!o:6dl,o.4 秱i샊4?_IdKtK%KٱJև7R6Bpy4% ز 8H6vdc%xƂ|l,'% 'L렟xƂ~);P~) l,ȁecAO\@+Xo˴9,eZ L+X`Am mV 然(+Xo˴8 2` \ +Xo˴8 AZ th :h th :sAǁV@+Xq@ :Z` mnߖ:p9(`W#+ ^A zxDWot{=<+7turxn .qXNA^Aǁn:Pn@Wot{^Aǁn@ׁr{^Au^%r{NWp^;^%r{84nK:PnK .q@K .q+An .q+AnYt{^Aw:Pn@W]^pn@Wq@^AǁnK zAWp^nK:Pn唱^AO .qXn弱^AO .q+An .q+An>t{v{\d:Pn >v{^An `Wn+8n `ׁ˭\\\r^%h.W$ .qXnI\\+AnK:PnK rcׁr{8n@Wq+^Aǁn riWq+8u^Aǁn@Wq@^Aǁn?~4 .q+An .q+An u^%r{yn妡^A+aih\+8P⯠@WEZeWqj_Aǁ⯠@W v_A.- _%8H\[$ .q+A 7(Wp_An- .q@(Z\ Wp_+8Pt( rnWq+8Pt(:P⯠@Wq+8Pu_Aǁ⯠@WE\Z\ W v\ZF*_J*_A.- .q+A+AK$ rnWp_%}⯠@Wq+_Jt( rn+Xp`A"i r5o+XZE th zB+Xy[, `Am zL+؁˭\[`A/_h .qXn VV`A/_h .q,i3` \ +Xp`%(+Xp`%8 v`%8 GSVYKd .q샴Z^ th;PV/Z^ v`AǁV@+Xq,8 .V@+؁,a),]$`%Kq`A/_h .qX+l zB+Xp.q+K?|4 :Ht{=县^An6t{$SnDׁ^Aϩ:PnK .q@K .q+An .q+Ant{(Wp^Aϩ .q+An6t{8,^t zNMׁr{^Aǁn :t{^Am :t{+AnK^%r{+a+ ^%KŐ^Ao1.Cr{=~_6eM ˦Il/b˦_6}Ee7f/˦\]6Meg/2˦H)Ql ese/>]~ٔ_6MYe'eMAe/>U~s/>R.[~˜˦O_g~le~r_g~e~beGaptiA˦ /0ٴenM_6^M[_6-4e eU_6 ,4ey˦e%_6M4eM_6M4l]zgӚޣ NSqupt=O:IlNCHA~sA~I%]$]OR !zp<ȯ|t=8@I׃$]:t=/t=8@I׃$]&ك$]:t=8@o=/t=aI Jo]ƃg ?=ȯ=/=<|c˸|cTw|IA77 E{o=8Jp|I%oTKK.q%3=G*)؃R7My?8]xЏ&|cNyg؃~4{p:tăӅ{#\0\0Ƃo%oA|A?=ȗ~Xp=8]}p:xXpx؃|7o6<'9oAcN6m>'؃7_ 6T؃7 g==87}cr#؃|c2=87U[.OrAjIXn?H\?H$,W z\T[r!փ\I 0J wRm=aVmgփKfփKfփuHU[.qU[r'փT[:Pm=՟T[:Pmgփ\IփT[YփT[:PmgփT[:Pm=aVm=GsVm=aVm=aVmgփKfփKfփ܄KU[.qU[r.փKRm=ȃ!Rm=a/(փ~az2ٴ(ٴ(ٴX6æ ei=ȫ8i='6>M+܁MA?i=lZ٦٦٦cٴ\0۴\0۴\0۴\0۴L686lz2ٴ\0۴\0۴.MA6شX6شt`zqM+\ԔMAezqMA6شt`z2ٴEMٴ\0۴X6rQS6/M%EMٴMAezprQS6>M%M%M%M+8۴\0۴\0۴\0۴X68686 azqMA6>M+8۴t`zcٴ\փ޴ ϲi= lZzփlZzփ\<˦mZzփ\<˦ʰi;i=8+i=as*փޯΩlZzփKfփ(æmZ.qmZ.qmZ.qmZ?[fփޯ٦٦٦٦+i=ai=ai=ai]փlZzփlZ٦+i=8+igփlZ:i=8փ^7¦Vpi=u#lZ.qXjdzFش\2Ȧ/i=aeMA/_`zpR փ?|nz)jA .ۥz)jAtz)jAz+Ntʠ .ۥz)jA Ϊ8̪8̪8̪zpìzpìzpì Yv:Pm=aVm=aVm=aVm=aVm=!6T[.qU[.qU[zփKT[zփT[z VpVm=8@փT[YփT[:Pm=T[:Pm=8@U[.qU[.qU[.qX@Rm=aVm=T[.qXuz;\ⰴHWPm]Hֿz.pٜOI9leeWoHj9X͹buٜϘL]6G9Q́ڪ.?].t,]6|>X.trrsDeGTPurs$ee~UM15S]g^TW+e~IQuQuټl^\6og>;͛e(Nyu>eS+_FIпl|/+_6_Jˁ_A[>ԍ ߋ o_%Q\ Wp>B\%%R#%Qth v>ڽ|{+ȹ^A?}{sGO^yp>w~^A?}{烶yp/p(W ۽Kd .q@ٽKd :{A?{9+'ve raWO<^A?{A?{vu"'v q{:aРtt=+ȩ]AO:PATdtt]A rjGWS:9 \ GWS:K ]A :tt:?|4 :tt:!@GWq+8u1:]AǁuttZKGWS:\N=H%Vq+ XAǁJ zB%Vq+ XJt :Tb*XAo :Tb(%VE*K+(R\ⰴ X~JJ+Tb8,-Vb}Jc +QJVb2*^Ptʨ zNM%V[L*NX.ɭ :eTb=@)K .q+AJ .q+AJ)+AJ)SSuX%Rb=+AJ)^P\ⰈO zmC%Vq+95XJt :Tb*+8Pt :Tb+8Pt:PJ)K .qX=Vb8HچJJ6Tb8,e6VbŤ߫9We~e~_6ݿlt%c^]6g9:Sr.s1r\693Sees`.rzuBeeW͟SegeΑWWty5=]g^=OW?Fe?F{e"cVl^_6 ?;ey~fW}L;/ͣIey1]gwytȷ1+ |1+Ƭ _'l:PwJ~h Ƭ01+ |1+迷4f;%%S\;%%S\ cVp>D\;%%2fC%SAJ< +ȗbK-0J+8<η4~( 'σ% +8 'σ-|K@ItX\ )VpXJ\ )Vq@ݵ+цXA?b()V K~) @ݵ+'R {bA?b9^A z@UX{'{uN|9*yWA r4kUz{uWA K z@UpWA .q^=^*8{t Ѥ*8{t:P'WAǁޫ@Ձ:t :^zI^ą W=q* UA^ą'.WBL{Uګ~!* U.'l XU*8^th e :WBL{Uq*8^th :W({Uq*8^2U^\ {Uo˴W8^uUA-^\ {UpUAh .q*Ae .qAGW8^\ {UpU%W({UpU%W=K*8^th zJ{ՁWګ^th:PgWګU^th :W8^\ {UЫګKd:PW W8^\ {ՁW8^\ {UЫګ\[^٫*95U%W8^uU%W8^چ `{Uk۫U^ `{Ձ=Ohj`ԁeOho`T|aT0l roԁ$b&C& '6I8$$&e .qI*ŖMRAǁ&@T-MR$b&@TqI@MRAǁ&@ԁ2IMR%2Ih$\ TpLR$\ TpLRAym:P&$\\I*c6I8,=ʑTAg 7PvPy#+GvP*A oAtV r!kՁ˕#; T%rP(UpT%rP8AuT%8A\ UpT%rP*A reUpT%rP}@Uq*ȅTAǁ@Uq徠TA.d :tP:TAǁ 7PvP*A vP* T%}A;*ȅT%rP8Au-rP8AB9K .q*A vP8A\  :tP:\AuTAǁ vP(i rjT+ܹ$OT+ܹ$uLRAPh rjT3c\n7$OT+Kۍ6IBI*A&e zBTpLRAόi:P&Kd .qI@K,+Kd .qI*A&^$\ TpLR%2I}&@T+MR$ &@T+I*8$th :4I($th:P&74I8,6IQI*aI*- MR%KMRAoIh .qXzl MRA$sKT)I*i3MR.m :e4I|I*$&w4I2\g$th zLԁ2I8$\ TpLR$\ TpLR%2I(Tpß-2I8$\ TpLRAOi .qI*A&/4I8,2^$th zLԁ2IMRAǁ&e :4IMRA/_h :4II*A&KMR%2I|I*a)I* MR%KMRA"iE&ql$uMQl~/_6 l19S:.qQqd\69Sees(.3qu@[ee mQ͟ SeeΊlQ Wԭty5+]g^JW?F[e'?FCdhl\6.'wseRc4y/8QtMwpF}Sp>{(}SM|kp>/8 J雂Ko:P雂 MA?449ڰ)臚eh raCS5 MA?44; A?44Pxφu!臚 g|64}u2=KANUY z*@ҁ:h՞,=$g)=K2,9dg@y гAOY:Py г\ Y zHFRqg)8гIRqg)8гtz=KAǁu78гtY :,sg)=K.{{гhӳhG#,JRУz~sg)=K.gY ͕@=Kg)8гtY ͕Y :,JRqg)8гtY :,(Rqg)8г;/=Kʳ\ R,8ȳtH!Rq)8PB%D :"9Rk"Z+(FA?(FAO~5:phQ-FA/B5 zIQГ_jK+(QЋjK5 zKс8Hk\ QpFJk\gYPk\ QpF%8Hk"ZiK5 .qj^Pkt5:PZ!j^PktFAǁZ@Qq(8PkMiFJk^ZiK5:poQprZF%(a>`4!EA梠$4$ \Ґ6%( \X梠4$ޒ\\ sQp:"NpH\~+=@'V$ . tS\H- tS) tNRs:A@'8Pt(9P@Nq@'8P@Nq@@ tK$ .q@'a1X\ Nz tK tS\4X@)йl~/?:O/'ew毦S>\6l~/gc*˹l l~/Cc\g˘Jr.L8/s$ɹlw2?*ƹϼjq.3R̫$?FI$ɹl\6.@SecHw<l^6C/㟏 e,yzټl\6A/Ǡݹe$'8YyGp(IN yGpXV YYg%9J+Kr@$'ȯX|\%2Aǁ|78~( ·|=g% ·!Jr|x@Ir~( Χ%|=8z?P 8H\ I΁8Ht( $'ц%9A?Ԕ(IN Kr~) $@$'臚 {A?Ԕ9@ ş O΁:@_ zFN3T{p~'9=8ʃ?=8O?=8ʃ\ N/8ȃAzpуt :h҃t :SAǁ@Nq@ :zp=8Ti9pYf66=&WLlSf+&m6A)h9pYf+&m6AOh :lm6Abfsl6Aǁ6_1i :lm6Aǁ6e :l~9@lKd 6l&SlKd .q&Y m6%8fsl6%8Yf58f\ Mpl6%(Mpl6%fth :=@lm6A.i :(MУKlm6Aǁ6e :lKd .q&I>m6%(MГ|lKd .q@lKd .q&I>m6f\g6f\ ́=m&A6l&A6ls{Aq&8f6 `́|@ig\N[;\;9v&Z;d rḿykgp[;ڙAigp[;\"(Lnkgv&Aڙ >jg3A.| :(L kg3Aǁڙ :jg3J;t :8H;)Lp3%(Lp3%v@igK riLprKڙ X;th riLqY&ȵ2A=YZ2An1m :4Y&Ȼ6eb,sr-f@Lp2,\ Lp2%2(Lp2%8,\ Lp2Aml .qY@e\,\ Lp2Y&8,th riLqY&8,th9pfL[Le2Aǁf@LqY&ȵ2.7l .qY&-2.7l rmcLpr3fe riLp2%2(Lpß-2ŴY&AfeKd .qY&-2%28,Ae2Aǁf 6(LqY&-2Aǁ  oV&Ƚ0A?`T&Ƚ0AOa9pfMIЃsjJ˯L'Ԕ(MIpIS\~eҔ_4%JS\~Ҕ=8$U.5%JSתԔ=8$AiJK) 5%AǁԔjJ$95%AǁԔ(MIq$8PSt) :ԔG$8PSr4%A) .q$A[֔8,) .qXnY[S\ MI{$jJK[֔G$KgԔ\H$ QSȝDMI3FjJ@QSt)9pjMIA$#5%J\JV^;R\»4[DK}G# ;#Rtxz"J}ǁw;Aǁ'w;J\ }Gp%;R\T$XD"w{yl~/cw\6ol~/.}Lp揃l~/l~/Iqy;.?.cl1\6ye̫ 2?ϼZ@.3y~~< l\6O /gͣBehyPx<'l_6E>H3eyq<l~Lw.g͛la#8 8O#ȏ= 8Oۂ|/G_nA~/=>HGp>=n@Gq#8tAǁn@Gq@]b:t{=C.q% q.1AE AZA|$W [A~i9Pg|$臚V )l9Pg~i VjZA$ȷ [A %:{}^8up^8up ip^8 ~|38~L(U2Lrp>cBǁ#LrЏ hp>2z1#8? GF|&@?X\ GpJ\ Gq#臚 "PSqANF,#臚%#臚 (G5#i Ah 6tNCG,:; ?ۿԆ_iG)Qʠ_i8P:~}#A4@CGq#G@CGq@ :4t: @dq#8th8P". z;'P<S< wH}G;~Q#y.K{;~Q#Aǁ@}Gq#E;#8P"Aǁ@}Gq#8PqAǁ@}GЯw(}GpAbR\ }ǁwI}Gp%w=q#A;#A?ą;K.q#A.q#A\A;Aǁw(}Gq#$AǁzI}Gq#8PqAǁ@}Gp%w=#AzO}Gp%w(}Gp%w=@;K%w=#A;#A; `}Gq#͌Ar3c[FOm.Gm-#imA._l-#imA>e([Ff2/[Ff2-#HQDq("1EAǁ%zfLQDq("1EJt(:E"EAP(:E(QD+"K$.q(@"K˾E=("akQDca"K˾E-}-A(" E."-:HE=P(" Q+E$"\EZt(8PꅠT/8H}ꅠ'hT/Pp Aǁꅠ@B4z!8Pꅠ@B4 Aǁ^.qz!AwQ\\oz!  %vQ>W^6Ules.?1j.%]69͟ ߐ. }Lq揊h\6'9͹Z.?$.CqٜH\6@|LZ˘j.?.celd.?.selPlLϼ.3~̫2?vǨW SeTyviv/Xʽg ;8|^"B߭^^ A~ؽ,>HBq{!8нt^8P@Bq{!ȯUv/ Aǁu=8нt^:t/SA~Ͳ{!Aރe:~`^m ^{@ j|q{@ {!ȷ~^8P\úAǁ|.8OeӠ磨ʠKdP8PKw A?+9^!'zWr2bBO< A?+(BO< A?+9^@~iPz&EB?iP% g6(NB3)ڠK8 ANCmP8P_iPg à_iP8P~ A!AҠL@BqA!G@BqA@ :4( `qA!8РthP8P' .t РTӠT4(HBS~QA! .u~QA!! Aǁ@BqA!EA!8Р" Aǁ@BqA!8Рp Aǁ@Bo4((Bp AEҠ\ 2(IBp %2(=TA!AA!AKlqA!AKdP8PKdPA!8РthPzH2(1ҠthP8Pg4( ʠthP:4(8Ƞ\ B#wKdP8PG4(8Ƞ\ 2(8Ƞ\ B#wA!A ڠ\ BУhKdP.qA@KdP.qA!8_w26(ؠdlP8P 73 %! g r9RnB3c{\؃\[yۃ\[yۃp<幕!=Aǁ@B+{=ʃ @Bq@y=Aǁ@ z=% hʃ\ Bp<ʃ\ Bp<AnA8Py(ڃ\܁!ȍ=A?uKܒXpr]ĒDIBKCK% AnI,Id% A"$$J% A. -I:H$4$!AIK$I.q$@IK$I.q$!-% J\ IB[KK$I8P $$8,,IrKbIBq$!8PВ@IBq$!8Pprʒ $J% Aǁ@IB[K\]Y\ IBKCK\]YĒޕ% JВIK$I8PIK$I.qAIK$I.q$!ȥ% %$8HAJ% Aǁ $(IBq$!ȥ% AǁlIhrf@Y A.lz K2 LY ),A/h8Pf4 [Y ),A/%h.qY Af,thz K@qY@,thz K2 Aǁf@@K ,RfKd8PfARi.qX.,,f?`%XthzD}AAOai:H4 =vY Af6 9.R9cWo?P]A?`cWЃv0v=aWc~v=aWq`Wq`W.qPWpK2rw8?AKzî"l]2"_6}3eSP~ٔ_6}j"_6})eSF~_6}` /\6Meg/˦h)l es/>%~ٔ_6Me'esY/˘2>(eGes/>~M lPeges/3?W 2?s/3BB_6 k6˦_6~4eӜMlJi(˦///Qi ˦y/lze0MliMq 䓦BOp.*p<8}3B[ w$?O BEIcC˒ /D! tPB\B?ȯN*q@! tPOX?8A:('W)?a>a N?8H|P仄ۄtP>W U?8AV?8)}q@|PsӬAU:p5#.q>a>RHK*3t:(p:4|п2?8C<8{|p:e(΅N=>_ !=Bc/ =>8{|p: D.q \0B8̅.q /D{s~ѹ;CF~ѹ;CPsAԹ;/D9rBr9T9~!F9AN%U_Q '*]^QC(p:&_Q|.q/D9~!F9tPM?8AۃtPr|A:(q@9~p>***ಥV~F"UU z^=YB,B?8A A :q@ttFAѡ%s}pc ?app>8?ap?[8.q\08.q/Dt}:>?8>8?yt?app.q\08.q%s<80?m\0?apRd /qP}p.UT 0?Qk?:CE._oUT *py9UQA3TT *q@Q\gAEA3TT〢?8AEAE:(p\T͹%sQK\T%sQ>8?a.=?aSQ\%*.(TTSQ:CEܸtPT\QA E:H(=? AnTT%sQK8E%sQKPQ}%sQ%sQ\g%˭"?utPT〢SQtPT〢rHEr?8AE:( [E*p\T {*.TT *p\TAnTT%sQ}p.p\T%8E.q\0?a.=?a.p\T!tPT〢SQ}p.q@QAEB?ȭEz)RQ¢>8?Ez녢\T(K`?),RE.q\0_?),tPTE:(SX?8AE:(K ?8>8?\0?a.E.q\0?a8Ez8,RUTW (p EtPT(ABQ;A/%PT\71pyL`?q c\7?uz\7wW?a=8u:q@|pp\7n%s|pVǠn%KsǠn%˽e?q Úu/.rٜOu_-.3r*>x\69ѸlNes0>ͩ͟les&.?!>@\󬛿lh_|2\60\g^u͟  ͟ e~uocT)8X<9PA~R>8Oh&gΕVT) WJU)!WJ R:::?PAǁAǁA~=r|q`|q`|q`: tX)tX)tX)Buɕ%: k+YU)䫂+|YpY)+|cp|q`:k+僎+|up:tX)+僎+T|p'VVyҁ:\ß%:\ß%Χ݂+< WJܷ(AX)*|@1_+sBp>x*ŠR>8P U)ĕ%.qP.qP|p*sY)ÕA?Ԭ?PA?\)CJjVJjVPR>ٟ+?\A=A߃=7Wv9Bt7Wv=a{p>y+T{ЃvuA=8=8=GAǁAǁXq`{q`{q`:?ttt~߃߃~e{~.+cw1eqy ߃=(v=b{# vAǁAǁA@@uvttT{p߃~b{pT{v8=AA\uu8=A'~.qP{p߃K\k~:~:~z0t`Aǁ=߃߃T{q`{q`{p߃K%~?PAϩ\uu8=95T{p߃<[.fv8=A=A%~߃߃< ~r ANa~r ANaR 1)܃\J=<&jpr  wtnpr) np܃\J=8@5h=8=8@56665h=A %jp?P %jp.qP{;:75s{pr At\%np?p wtnp:Hlpr) i= ւ܃܃܃ѹ= =܃K\58@58=A A.%~-jp.qP{;:78=ȥ܃KKX{~|\6"c=l֥|08 Χkº|@եӀA5a]zp> OӀ.=҃KT\⠺U\⠺եƎ.=ȹ҃~Y~҃~.=臚uA?ԬK?PuA?ԬKf]z?ץuA.=҃~d]z֥9>t]zKrU'҃|,O֥1Ku%K ҃~d]zq`]zq`]z&҃҃auX+8.=8.=8.@ե֥ɺ?Y'\ɮKuAcd]RuAcX ҃ǰ.=(֥=a]z֥uAǁuAǁuA@.@ե֥źUtXtXT]zp҃~b]zpT]z֥8.=AuAX\⠺եե8.=Au⠺'K.qP]zp҃KT\⠺kK:K:Kz0ǺUtX`uAǁu.=҃҃T]zq`]zq`]zp҃KTu%K?PuAϩY\⠺եե8.=95T]zp҃<ե=mf]zp҃KT~҃KT\⠺ j.t]zqp]z]d\~rٽ.Gkk^ ǥ5.O9.u5r^ 5r{^ <{̓{Ty^^^^ktktk\^M8N6N܈~rAGA܈tوAv#zd#7A6A6s#zpу\ù=A%jD.qP#zpT#zpуKԈ=5s#zprgƍA܈\ܙq#وt؈t؈΍AǁAǁAǁ.wf܈΍AǁAǁAǁAǁA܈~rgƍ%jDr F;3nDrF58=5уKԈ\FՈ\F58[Ԉ΍%jD?PA܈\F5AǁAǁA܈~уу\ù=8@5AϤ؈~уX= уѲ@5b#z#Z6b#-уX=ȅT#z 6u#zp?FFGlD:lD?PAhوt؈􈖍=8=8=8=8=уT#z 68=A=A%jD.qX.=AAo؈\\u#z6_:H,=z'K,=z{t.=zĦ)=\=ƒKc7\VuSxl q-M͟dAle?Ʀ\6GcqYqٜh\6'9-OT\69͟Ses .c~y_64|L/?.36Oτe~u_2>llY|Lߗ7qeyv15~_6]6O*.cyHqn/?y;O7~)Bp";_rw F;7~Uʍdwq`wq`wq`:tt+7~6~6~6~#AǁAǁAǁHn_\#A`@=P$^ čAA;ȷ7~$6~ӋA; ;7~ӋAA; @5~Vq`wq`wpaӋdMvpɚ|5=8 GĂ#Ț|D,8 GĂ kUAd#bXp>"vj+&;8 .qPMvpjKT\⠚A]h@]M]h@]h:Gedeu/Pr_XYP%]>߮g~bB}AgW;YEg}A:a~auY=l`gtgtgTuq`uo:::::>P}Aǁ}Aǁ}A]@Y8:+Y88OV*OɊRqpG#8.0TqhAO Yq V=a8+ǰ88@U=aqq`qq`8:8:8.qPqp*<U㠧8.qPqp*Tqp*KTqPE%**.qPQq[TT~E%Q_qQqprEdQqq`Qqq`QqFwQqF=pQQ88eTQqp\8AE8eKTT\⠢UT\⠢8@82E%**>PEA.\T\⠢EAǁEAǁEA.\T|\88@LsQq#Z`Qq{$=eQ**zĢG,*zĢUTEAU]TEAo,XTZE%** ɢUTEAǁEAhYT|X88@`QqpKTT|KTT\⠢JKTTE%˕eIn$YduuMrdpPq ?p!.@97z >.en8}> eo0' e7? T7? e? e7\tJ }ˀoay=׼ ~ 0{mõy ?HGy\__?HcZGm_?cz5ɿ6͟ko63H5~6Lo^_g37Q??kK?1xT+kȵr|8um>zL!kǵJ|!~m>6_w<"| ~m>6_r\/ߏqkaԵh|cOw[y߃zZ0C~ kpko8_{w 8ѧyߐ߽ y߃o_G}ys`7 =}As`7 ys`7 =}As`7 =}A֐_ќ 9A֐&ԃЃ˟ip yM0p 2 aWF `#COaHӂ'0pȳCρa X90 z ?Ȃo8T7 g٩p~v*VX g٩p~v*VX gN T8?;NT7䁄 T8?;.sP 2|e*sA|C4Y=o|C4Y d ?,>o+vxC|JVkC׆ !;8^z׆e6 kCu6\x탌׆㵡h =k*^s`6- =kCρs`6 }ǂs`ⵡX0^.sP6\xAke׆ 9,9^.sP6ke{t׆{xm9O{py7}ПxiCO>,Pd5 U| }ks_C#zP&, B׃*>Fd5"MX|=k E_e˻^.Pd5\w~~2{m^ʵy(柵?'y$\^^>yi:Q\wsm͵y5kkd͋61bk6T5N5Akkok/2=/gyP;bW){m>6߿6ݿ6_^'A){mm6_^S*|{m6\_/wW){m>6^t 8_uA=Η!gl8_ep8_TG6/'wd28/C>BȆl_STG677wdCrG6\栎Ȇ;l9#{Pϔ;lkȆ;l9#{Pϔ;l9#{PϔfqG6\gJC^#{__ 9.z6/\VgB^9zeLņ.r*6䕑SWTA=ȩ+c*6%Sl^1ls`*R|zLņk|vP5p~"F 燑as` 6մk0R9#T 6F =`ap~)F:l3`ap~)\A`eT d!t c5M׃j>2kkYC3ځeЗ,P y ՃJP y p2%TeJP=jA p!o8zP p!o8.sXvB5121m'TC;zL!9zL1m'TC/sB5P =&TCρ s`B51P 9( p2%TCrB5\栄A%TCrB5\栄jA ՃJP 9(.sPB2%TensPB!z9.sPB5\栄LjȻ^NTB5P y s`B!z=90zP Ր7|P y bBo0 7|P=j[&TCtB5-og0oP} s`B5-A%TC2zL>.eBj90zLog0zLTB5 &TeJP=jA p27'TeJ{jay wP =PCnB }LTZ4Ӣ }ƴAEC'0-oiPLTZ4Ӣ/ }AEC_@1-Ϳ5yJ^e\c\a>/?6Fjkqcz?kIڵcGo]ѮGk\ka1E{m>IxL)kڵ|tm>D6]OS|tm>?6\ӮWK)E{m>:6\ѮJckcJ^Юk==z.!a{PU9D=a|հ - p>sr6Ϟ!ܰ } ۃj؆Dp6"7lej>Ȇm9azl؆Po9azl؆0p6ذ =6lCρ ۃz7ذ =6lCρ ۃz7ű2= ۃz82\oopdLGv6ذ ymKp7lC ۃz82uWƆmkq7l+c6źA5lC^az,T6K!/] f6EK!/] W*ц<r6 ymC(hC9D %ڐGQ.T6\mA%p.<ȔjsTCeJ5S!OKR )Ր'&NTJ5WXTCp0+,SR }Eǔj诰LcJATCdJ5R =T?h2zLSR =TCρ)s`JRj90zLTJ5\栔jY 嗝_~Y }-f;Y =Ճ-YwVC_ k3;Y ׂ쬆dgAvVCρs`g5fvVz쬆j9z쬆;AuVCρs`gD< }-i|< }-i|<}G#lPyhӃjPyhG#lT4i9yzPs`4< 9y.sP4n<=i'4< 9y.sP25Oej|@Ӄj< 25OCy.sP4\65Oej< ?i9yi=7OCӃS]n!5OCǸy.< yA5OCֻy8ӐniI7OCNyzPӐnσIw$U _UX }*AUa忐OY }KU؃†>f6bWaCp ! UY =VaCp zT6 7*leVa z†*l9 *AUaCa6\*lAU؃†T 9 .sPY 9, lywUs`ARC̱ҋ'˿j/XK=Zj9RC_z$ԃb-5Ik/XK=rZj8ZMKi(69ZM~mɯMVίM76m5tm4_\k6 צ4_Vsm~Oצ[6Mצ4_\k6צZq^_Ưys-צ_6ƱMI}^g<צ$=kӹͯMxw~mk*6צ_Mv6tjkqtkM׿=;t86]tm.tkӵͯM66]tkө׵kUͯM6]tk͵kMW6ut1kӥMWrM\6tUk/N/ ?O_&Iuf_uf_7JufQ|sgQp̾̾8l8lȟ̾GKQgE^W3"I30wf_\0wf_:/z̾93싞: :/z̾K*uf_Й}s@gEp~E=tf_Й _䑃:/.s~G ̆n˟gOLp~ZEi(B'틼SEl(B'm8?"7}DEq(BO "B"2}s@6#/@Cڋ̆sgE@3" ԙ}џ[tf_1:/ Byl8wf_ :/NC8uf_1:/Dy7A6#/>7AE B}n틞"/zІsE=Dh_\0Gh_\0Gh_s&оaІsE>lm8Gh_\0Gh_\0Gh_VІsesE>p>{CesensG}q}q}sPEA"/rІsJ6S/PE)9%"U /.+s27/$y?[}dd!d>G2"/zH|$s_ d>G2E}dn8's_}s@2E=$s_-$s_ ddddn8's_\0's_\0's_\0'sMC%s_:/.sX4T2?-o**>DE_bO\P}KLTqù Q}їTqù/1Q}'.KLTqM&Uq_UkRC6߷1VqͿn#6o||1Uq6߭zL6ob$[wb|^6~{m>$6n6jyLk|rym>{m>Ŀ6_3b5~V?|HJk%ѵ|>pm>-6=b|4pm>6]/Si|*pm>6]'Tڽ6]ϊ͇XA=Η!?A=#!pU7P zF8R|G)! yFonAp! =6zCρs`7=Cρs`7şFo9zlFo9zl!@ 9 nT7之gpyFo эސg0n pȃ7zCOސ'n3¡'Fo7zCρރjFo9zlFA5zCρ =6zCρs`7=FArC_. rC_. rd/7{?&>`/{!oЧ>`/7 =rz冞{2re|ZĽpz Ȉ{2reT/7\^nAܐOL{Ppz!r/7\^n4re=P.sP/7\^n9r|ʽܐsp/z ?SpT/7ш{ yp^nȣrCr<)t/7I{ yR^nȓBrC޽܃˃冞{!O =rCρs`/7\^nMre=< ^nayֽܐ\u/7{2YXrI1ݐv?4c!O== };d48 FC}2|Pз3 ySз3 }w? }; =FC`48 >hp~4 =FC=`4hp90z1zT48=Fe >hpAp2E./M:2IG?-/M:zL f7ӿK }AAC2CLT71ӿχ }A)C~~ |m+6߂1vͿQ 6||/1u۹6x6߆~L6ڼk+=]tm^ӵVcj_tm^ҵyH׸6ͿT[_)׼5ͿOͿNͿM~yT;cO1͇_]׸1Os6Һƍ'|m>y6º6zL=k|x{m>ƍ'|m>~6^g=k5~^>/_C|m>6_e]/Gkc_ͧ Æk۵1u sk۵|pm>cxLk۵|pywqp>Av8O!Oy[nqȿ1C~wOC~w8\>AC~cw8OD(O-?h}J>>q6C^Ѻ}zlǡq9}|PoǡqȋXCρs`8>> Bρs`8>> B}.s!s>>H{?>ez;ȡ0rK`C49z #!OFOC9|P{'!F=* !Y>yq!Z>yq}1nyqG}.sP8\q8?yP~>Ȃq,b8䑑 ơk`$*ɂqCd ơ/X0ՎЗ^,?Ȃqv, ơqMCρs` ơq9`z,T8X0=Cρ*T09`JyF>͘˃nPd8"ǡ}Blq ECg8">=p2d8>=Cgǡqlǡq9}z,hc8\D\0}Ƃqe/ϊ` ơ0X0͂q3 .` ơ0X0}q3 Cρs` ơq9`.sP8\栂qL\09`|P`.sP8\栂Ae*T0 U09`+e*>Ac8\栂qA*nsP8\栂q9`r.|6spG#.˿ !F\0>qȧ,]0y4qȓBC'.<)t !O ]0yRqٻ`|py$s`8I ơq9`z,T0qAp 'Z]09,O`.<8w8\HqS)9(R|Pp"2EeT8\HqAp"!oO9R.sP8\H#šHqST8)y{ʑs`"!oO9Rz=Hq9wy/i#\w9\iFy3ap!o9f2^*f2^CLcAC䃪&f59_6ɡo|P٬&jrlV !$p59-VCQd59>VCBa5AVCρзPXM=V ɡjr&T59XM=VCρs`59EVCρ䃪&jrApUM9.sP59\jAUe&jrayQwXM=C;}lT8),ǡ>}AC_g}؉l\^r8),??>{;{?µyA]o]oq_%6.]a]_=`|(|m>e6 _>6^gk̳}|y3c̳|m6_K]OS|vm6_F]GlE6_A] õkc"_' skӵ|#k?"C|F.r8A8C!?O4K!sϖ+!D\qr9Ո+{|TqyIs|AUC^{'T|Bg+4Uq9*烪8nsP9䵹+Ρs9z8+:F9z8w9Xq=VCρz#Xq=VCρz#q+2Xʡp3{< s9\&gC9z 5^w9䩘C͡'PA=h̡d9qC{ 2ʡs`B2dǹC8|ǹ'<-Tn9!s!y[yAC8ӹg-Tn91s2q҃zAC)dn95sˡ [G-Tn9BC0K!s[}r/h-FbnACAcn9[=?h2z-s[=Cρs`nrˡr90z-Tn9\rA+-?zo s/?s;?d9'g2}soR?\p9'ϡ/,9‚p2d9=C_X|Ps`9ϡs9z?ϡ"QC_E.2??sCCeP˃3?|Ї*?>Ta9 ϡs9|Ps`99.sP94?>sȇj9.sP2e?|?`2C.sP9\Ae9sg+ACt9>A%CN:.*J<<]s90t҉s`9x=&eJ<\wu9\sAGN<x9(2%*2%eJ<T9\sApx9(.sP9\s;PN<x9( ϡs90x=&Cށr9x>s;PN<!@9zL<>_hyY蜕>tRV:=:g!9+CYJGǬtPV:=:fdV:~s`sӡ? lN$9՜I`s:_Us:'jNtȇܜ}Oз8ٜt؜~s`s:=6CρjNt99 ՜=6Cρs`s:؜}s`sӡoq9.sPs:\A5ejNԜ99}Ppӡo79.sXVus:F6d:-С>=8t+tơCX1qWC>b::}~8ơm.ȵ1}m~m^еy@ہݏ)}mεNcZܵyp]U6J6o~L'^\V66ϻn>6]?'#6X_^66>]SpK;1GNk5~rxL1k۵|}um>m6]S|ium>h63\Oݮ͗U)F}m>c61\ܮ kcQ_ڮ'棅k;F}P A϶c!:F·@OA8AOӛup><y:vϺ@K1ױC~]y9:A..sP;52.pPuXcT9GSup!/\>:vAu[cT~us`;X=ֱMs`;Xy:v9zc&K9zc&K:ױez%ٛgup3y:vLP :v)ױC;$u샪c<|r;q!d]>:v3YױecST샬cR:vk$ֱCK!ء X>:v/c>2`;Bֱu_X}:c:v9zcd;X=ֱzcء:AձCρus`;X>:vAupءWX=|ѓa:ڡ:gU}ʮv+WvCcW;+ڡ\}I®v+WvC_.sPWڡ/I9栮v9zjڡ/I>v9]s`W;=v*p\q:;%&qa:;'١O\><tvȇ}tvC0zLg=Cρp2C>tvAJg|p2*.sP:;\tv'>tvA쐏9.sP:;y2eJgT:;\tv6Csp:;sNg>tvȣK_Eg5Ng|g5Ng< tv[e˳NgT:ACLg\8t:;yHtCCOgNg !ɜ١'tCCOp9(p2eJg>tvAp!o8}Pp,!o8.sXu:;-e˳Ng?tv90zLgtv90zLggayGs`:;=CρGNgT:;\tv#weJgypyp2*.sP:;\tvAJg9(.sP:;(e9(s`:;yJg!@9zLgT:;(Cρw=Cρw\ˍ./&8.+ˋ ubs\wuߊr! :sCJu/\wR;=C&wZޡ?&ly&[1a;M&CL>wLlyD-7}ϕ-?5}-=Cdb;>wLly[ޡo}P-s`;=Cρ-\=+[2ejyT;\栖wA-pZ9'[2ZCdAC(Ȁ-y>2`; [ޡ}dw4C}pyc-[Ck=kc6*^?'Cë6_W^U^ϭV6m2}m>66|Lkaյ|r{135~6^gk3}m>52~mr6c<@P|vmh6a\y)~m^6^\/Gyk6\\-ͧxSkŵ1¯xW ?3xA0Χ!?fxx8V !?iE8V\!7xx+;Cza"!xxK<ꅉp!9腉hs<<\xxA?yxAes<<\xxx90~Pxxs Cρs`<<?wyBρs`<<?wyB:.sлYyAMz,$M9ldFd<\&yAMj$9&y9Irn|Msp!s<\U$'Mj˿!$y"&yeIDM'nT<䉨!OD$l7.غIzl<u<$=6CρMp򈭛2[7ej\u<\?CX?R|[,ŇgHзX}'?RY}돥Y=Cc)>X?R|[,ŇKPs`)>X=CρwY=N8K2e*T)>\R|ApJU94K2EjCߕf)ACǰ~pyνן콇>d=a={c{}:{O.oùp>Y-{kkc3|/|+|cĵc_um'|m~B<Ǵkncz1Pŵy^gxmz ^'xm^5nḱ.N5oEk/kc|Hum>6^?'{{6^W^U^'Ss{6m~m>66|LkyԵ8|8{{1ޯ35~{6^gk̳~m>Kjwl_|RZckuٵ|Hqm>6=|>qm>6]S |4qm>6]%SԂϼZk̫cnԋ*|s|onyA|8GyDrns >܂?UB^r ns ^T 9-p^T9| :ejԂ|A-vnT >\|;e9zl[ACzl[!8܂=Cρ-s` ޡ =Cρ-s` ޡ y>|AP\Ԃy|Gez% [|ȓCCzl[,!#݂OCPo[!O܂=d='!\ty{C,AېX.C?GB_0 W \q(>1C/ Ň f(>C2C_@1.sP(>C2e90z ŇC/?P|90s`(>=...e˳Kne >\ʖ܂-G@l+c >['܂[𡏀؂}|# Cρ-s` Z|9.sP >\|܂9P-O.sP >\AejԂН[Ղ9;ej>d >\|A-jԂ9A<-sp >xn[Ղ sO{rO{!C:rν<r=qs{y\{RC׹Sν{{RCρs`={9,O:.sXu=\)ZeSν{)ZC:.sXuAC߲~pyE+c=i]t2Cze,2>2e*T=\栢{AEpUt9.sP=\栢{H.Tt9{9>Ut=CGr=Xt?{H.!#z,!#~pyEp򢉋2ECGb=\氼h{H,M\ty[Eˋ&.Tty[Ep!o.sP=\栢Ae*Tt}[EYt=C>e{90~P0C{}돹ʽ1s?`̽T=?{[̽{蟷̽̽so1z̽T=?Cρ{?{90z̽s{;̽s{}'pr2*.sP=\{Aʽ{}˚psoY3 sj{?{SC]2$g5̽>d=)swڜ{}{賚wڞGkßp'.kM׆[ʯ X6k^&6<[caq ƥk +}mkF_=gc a |mk܂ۜ[ ^Vko׆_Rמ79w_^N^΁_܉m8Nkk/׆߱ ^ ] ؉mw;w_N^~6fNk)k!kÑk/;3?w_g~Ŀ6܉p?/V^{^FtpvpkÅkñI6__.__NL^_=pypXpVprpi׆s׆c׆C׆C׆33?_G^g~lϿ<8Qpp!7../o|eKpppp!ׁ. ]tq|C^K!.E7j>\0v?(.sߨűK!/.ѥpإpإ?cecC^fKcecC^kK2]=?cC&؇ßK`zsCat?.ܥs8 0w?.ܥMyV.7~q|w%>Y'8n2uy'HcO?/|ȳO?<;Dzm><@DzCaNχszC}y&<CzC!=h y<Cz'?\y2/Oa?0@Dp8?9=/fۜsz6?/}13?79=/f|8?79=0?ќzszCaN☞szszCaN☞szszCaN☞pØpØszecze\X?˅zsC_XE}a1?\}a1?\sPsC_gEEt.h2}]6?\Yl.蓥h3W(ȧ7P'KsC,E}4?\űh0?ƢƢ| E9E_Y&?\0?\0q,a,a,GPűh2h!CecCkE9E9E_.s.s>/h!瀢|E9q,GpWsL 1w!ϤGe#w!ϤG☻G#Z?䲑\? ?-rzsCa9̹9,"w2\?\0qy89,"w!X wù#Z?\<}8?휻!G?`cCsN?o79w0?9w0q[sCa{tsÜsszsCa;sCa☻w/999_s.sspf7r>s8sr9wj>str$O6I6W1?wѮ7ѮͷX\v?yug+?͋6FcZ5k_|mcJ_{my׸#qL_]¿6661fS&|dvm>16_3Ӳǔɿ6_9_7_ʮdSǔɿ6ZM3m>!6J6F~Lkصp|T|me|yf3L5~ɿ6_gk3m><33泚k53m06<|Qzm&6\+Ǖ)m6\f'Kǔ׿6\e͇Ck5׿ϼk+y p> C~w^?j p^?j&:C^:r#bA8 !pA? ϤX}&ł~9z,TA?X=e*T 2q%e*T?~Ap !sA 2C>~A',T9Pp 2C>r.|JspA tA 򯢂~,_EY.#HAECGr$?yɑs`$?yɑ˛/7_9,o8>#2GCGb$?\氼H~۪\|q$?\H~۪y[Ցp"2E*.sP$?\H~۪?H~90)GC,s`A ?}, UcA??CX?~TO oN8  z,臾ǂ~9PзX=CcA ~9z,臞 z,TA?pe*T?~Ap 2.sPA?le;.臾H~F.9┑G>g$?Y #.}qH6GC3y1mwҮkk]kMk-kϏ)m^ݵnيkOk̓{Lkcikk۾6߳~Lkͻ67ƀ5nkkޘn_nm?53m>j6]ϝߝqk)c_ͿԯͿӯlkic__׿6]_?T|vm>bϼ?Ƹ5~׿yLkQZ|){m>ع6s^O9Sx|{m>ӹ6\8W)m>ι6\6g9棜k+͟yͧ8Wx?3(Ag?!(+ G^ G'~B.Qq?!(A2Q#r?䥶RPAQێ?(ȋ`Ge(AQÎT?\(kbGe?GQs(9ћnGQz-=FC 9z(Az(A,Qpt;wH?5 y`bAk.ԻF!Om]OCd5 yb} z(Xyb9Dg?!y~ȣ1w㐧@<vw?䩰qȃawC Ώ[/,}~oRfwI:ߤ?~ vCqaw? ~9Gs`w??~9z=vCρs`w2ue~Ap?,,3Y?bkC_;ځz1X}b'C_JR_~Y})bAO7f?\< bkAe*d?\bAY=Cρׂ,T?X}-b9z,G\.C_.eˣ].?bȧ\Wb,.xCf4ЧY,bACρs`?\bA8.sP!sr?\bA*T91?.sP?~.T}bApU9.sP?.!bG\9>C9pT?!s臭C2C;r<$s?䙱syf3cC9˓3cCρ9s`?9,O:.sXu?\栜]e˓]C:.sXu?\o?`azؾ+cq؞$o?SQaa{O`؞a{ƌQ0la% s$e Nwtzva aoa/a{57lo=a c)wr_I0luܰ5//`htv e] =hPv@Oh,Ov@Ohv@?itvithtv@ǁv etv@ǁv@hPv@ǁv@ǁv@ǁv@ǁv@ithPv@i,q] Av e,q] Av.] Av4v.,m<@@ u n')P@1@ \XNY 貞@1\XN ]`oنC_F>l 'ly}Ű}G=&}Ű=lazlo3c-zb ۇ݇<0l\ ۟0a0=(9A0OW21a=`^f ۫a{=3 a2 ;aGa'a{7l/a2 ?\ sƂa{7l`1l\qo ;a^22 qX0kƂa{=kƂa\4 ۋa\4 ??}e}^d ۇ s0ְƟMa{}uL.w>lW7Ww^&5}^Z ;a{;lE?La^2 `U `׼Lj׼Lüe:7T'[d Bm]sM -{I@F@ 2> \sMl lAdlAd[` …:X B +[KdAP@ۂX B`,?^ 8Ȃȷv[.!A@ۂX ? :? :?:8ȂpN: : !q!q!qB thAthAthAP'YgقXRqw#@ض \R%:.@G@vC E!B dl B#H B Q[.q@ǁ@ H B k4[Y؂pHK!--lAP{Y؂"@Vɶ \=ҁlmAdl B)B>@P -jH_iA%Z*M … -~!hAP@;Ђ. ݁iAZZZ4iAthAthAP@ǁ@ǁ@ǁ eAthAthAthAPYKdAxZKdA,q!ПiAPUޑH; ʟw$DB'\(BߑO; ~G?BR ; ~G?!Е f'8-g'8heۙ %e O,q(O 1 %e ~!?!A@ǁ@ǁ@ǁ@ӟp  ~!?!q?!q?!q?!q?! )B ϳ옳?@i@џ辏@ǁ@ǁF@nb  . ;l$,+AFd$PFd$,+AF@n䳑B ` l$,F ݥHX #A`HX #A`g#/ q l$dl$PF@l$dl$d+,! YYWXC^ mu|`XWzͺ@\(]A f 6[Wumrb]A f    Kʶ^ Kʶ^ K+l뵮 ġl뵮  .,z+d#j]4:4\X1H8H F >F  وH8H F@l$th$c q AF Kd$d#j#A`%28HX #28HX #A Q .,[lm$,q  K[ 9 ġl@   9 q q q²F@l$th$th$th$th$dl#28H%28H%2\(#A +68HX #28HX #A`HX #A`%2rh#A`%2H#A@#A@#A L6\(#A@#A L6:4\(#A L6:4U q q C& .,ml$,q(ml$,q(ml$FrF@h$,q(ml$F i Kd$F 9s AF . AF =s@  =s q q܂`]A&u^zG]ziRWu^\(]A{)]A{zM]AzG]:zG]A@]zG]A@]A{\(]A@]A@]A@]A@]A:\(]A8HWX ]8HWX ]A`tJWX ]A8szM]AK SW讆@W\(]A ]]RWS .,+tO]A .,+ u}7lH2 am>~[3ä+u>?YG0 2Oa{L)=Lttz SOa0 폒ad lRe0l0>C2l$a:F:٤`gT [a{'8loȇ{Fl1l1lMఽ?L*aa|NPe0l:9` a{>k^*_F0y qSe0l/qSe0k*a{8k*a\Dtf&=°+Wl>Ɵ-I0l̆ 0lG0wlkäGlޑ 0yK0k^za{76k^za^# ka^# aވ2eB^]R̅ {]ĕ L l@dBl@PGY>؀Ș؀ uT*A@6 8Ȁp |ŷ!A4e@,q!/6 \(B` |!Ad@t~d@t~tr0qB th@th@deB@B@B@…:98Ѐ8Ѐ8ЀpN!AdzߩYܧXlm@PdzKu<+  l@dlB#H…:@G@66 \Y Um@8@6-\Ձ,1dbA 4{ .@v.E ==jgu d{ Y&cѪ@AVS UjHy* KURS U~祪 Пj*P@ȧ П0*|  П0*t*t*Ҥ q qB    . q q qB K*,q *,q A@পB ?e%~ ?JDA_(9O %9k%~ ЯA\~ ЯA²͒͒mlH ġI Э%%ec%~[ ġN o˔8Hr8Pr8Pr8PreJ. q o˔::::q <6:K~PrX~e/%RrtIA@J.,,96K]Rr2@ ;wHr/,!]XC?|? ^Ƚj?<0tI^k?<0t?A8@n]З2?| /,} /2ﭴ ,[iYuY/䏴HȪڀ@6\(m@ .kZGJڀ@ǁڀ@ǁڀ ^XkZ8Ppa km@`C km@`8rbme/@/j^ay _ay B_EY@?Ћ_X@?A8]ȦЗ8]XKt?A8@6²ԗ8@?ġ.,}`C]/tx s_8@ǁ:²ԗr^8@ǁ:?s_.,q;]X9w%B]ȹ/,q%B]XKt`.P8]X9%?A^8@|?q2@ǁ_9d/t^8@|刊/,q(GT|`C9=d%刊/8#*?C&^.,q].՗8].,q%?3W^ /txg?q/Pzi@/M^/Pzi@x&/PzF =~R@x?q@ǁ_=/t_^8@ǁ:?#k^8 uGּ?A8 u`.,q%B]X=%d/?.yv_]~@?rcx Tmvutm_ǶǶeccpǶaǶ۞?ccۮmmAؖa6d|l{blǶl>ۏ\~a1;G/?=bOm9ޏmk?kcoُ1 |?#>.:ȏm3lc[8l |l+?Ɵd۪ǏmǶ~.)c|.A6s calc[nVk~ \Ǹ!㚇lc\ \Ƕ^c[K >u*mǶIǸ& )K >u{ۚqM@RV %:m%Ƕc[Vo%ꋏyyac^c^c[1]R1]R𱭾.).)}k^d\$0xp*փ̌n߃[ s#L;AGփ,P$0xA(,qI]` dM!%]`.0xa(A py!0xПj 0x TA.0x!0xП0 0x!0:<å : t 0xq p<8@`]``.0x!0xalazؾoc`?Q'}>o?Ӈ)0?·{>>LdaAϨad!?ϧaisK tM'0Va{Kے@6ږ,\,-YdmB ǟ_B ;/ TCC?w^@p TCCyi`.!i`' !i`' 4i`th`th`P@ǁ@ǁ@ǁ e`th`th`th`PKd`nKd`,q!i`P:V5.,,vK~i²eb@Q5DC~i!kb@Qpgb@QX};8;8he%e0(v,q( -vtAC`C iC[KʆH!q!q!Э8P8P8Pp]J2Zj!]6p hCش>²և@ı!Ѝ(nDn>ݥ.@`B$$]@lv7 p fwC` fw!q!{ndnq!q!qBY@Ȓև e},?2YYև@h}dIfCd>\և@vƶ>rbC ;c[']>3!q²ևևY.,hm},q(hm}8z@33!kz@d3z QVz@v3\XYȒz^;.,{gdgl=C38HX =C K2Kg,q!Az.!AzYYpaj=C` 쌭g,q(@gdgl=C`Cj=Á3:3:33!q!q!q² z@Pgtgtgtgtgdn=Å38HVzKgdn=C` JVzKgPzKg,qBKg,q!Az@g,q!Azgtgtgz Jgt7N=C@=C@=C GE3:3!q!q!".,Pg,q(Pg,q(Pgzr z@g,q(Pgz g,q!У"KgPz@NNg,q!Az g,q!Az@ONg8z@ǁz@ONgtgtgtn!@nP@[=/! nnn@n,V@n8@ǁ@ntnP@ntn ntntntntn@ǁ nKnPKn,qr>@n,q(n8z@3!u n'gPz@13!3\XX貞z@13\XXU31 o}z7le2OaW?l Mar7  }?ǰ>l ۟8}+ǰq>lO=a:{0נG0 z wð5lx ۟]Ø[?9r7 3a/t^qsӊ_Ü[Tet7 oSøWa?^t@q')İ7_øć?1l 㚧b^a0'y?1l0y]w>8Lފa{5l﹆}v7ފ0Oz+ְ=Lފa^V ۋa{;k^ފwZü5/oaV 󚗷b׼bw׼üwü5/oŰ5/o0y;-7iȮN?P5;-8p7;-8;-ٵiiqNp"ѳ"iX E 7;-K䴸PN@VWvZ8iX E%rZIJB9-KdeE`i8i8i8iqᏋ\;- t~tB ttt贸Pl:-:-:-.9@vZ\VKt^1űb`I+=")"mX[Q[q+rxbmESFmE dk+.y@ǁڊ@6?˅:MȒNw4A ;-YiqN"lP YDiȪN@iv@#цȂ@6?g?v`^_jW_ߩ49B=G_ߩ49N=DžseiJhs\(=G`Ys~_@9Iz@ǁz@ǁz.M999.#q#q#qB9999.#Az9bA=G`%słz ,q#/s8Oy#T~ziw^Z?4i;/~#K֏ e;/^~֏ ?T~֏m옵#ġl#A/,gm,q({gmtDG`C@kG+ Z?K.Z[?ݛXPq Mh쩵#q#qBY?Z?Z?Z?.#q$J@7hTPvZ r pj%HW .l;$5" .t-L=Gka9Qz #,Zz@srϞeQKqܸg=G`YsZ8Xts2s\Xvk[8X8Xq, d{^B=[b@k{m#?l=YY6Jzb@{gDž{m#>==.,[-dlG@Džrw_v XVvwzSqagFl%ܒ ?K6æd#%aSȗpK6YZ%ld#Џ7J6K$,qd#%%l8HX F`$JX F`$,ٸlZd#A@Vl8Mld#ġlZd@J6J6J6YZ8P8P8PqaٴjF 'l:l:l:l:ldBI6K$dOmF`$%ld#A %dOmF`$%l\(F`$%l8Hq$%l8HX F 6l8HX Ɓl:l:l%8P膛@ǁ@ǁ@|,t(tMF@F@F >l\XXXPXXPX%%X%PXPX%JX F>l8Hq$ZX F`$JX F`$Rq %%R8P8P8PuKF-%]Qq$^lzvEF-%J%eJ%RX֭$lQq %%R8Pq$R8PQ.%J8P8P8P8P9/%%J9/%%l8Hq$%l8HXPNY[X F'l8S֖ld#e=%c(t;Iƅld#$%~drƒ@ldrƒ@Ϯ(߇;%o}z7le2J6OaW?l Mal  }?ǰ>l ۟}+ǰq>lO=a:w:N m>=܆듀cd?؆ϵa4az iÜi 8yFǰa::8̙a/aaδ 2 8ӇqS1katXpWøad o d aae 60@ae "{cۧ:I2 d^aaΟh&Ȱ5ü%e 㚧 dlic(9L'؆E},4y|d ;$5/0yGyK>r#o׼#ü%9La^ |d^x|d׼#{׼#ü%ü%5׼$XL~bsP CI=pEIFIun1I i1Ʌ:jȒb@b@\(1I`$l+-& ,qBIYYLX 1I`$?^8HLrb %& ,q$ $%H1I@1I@1I@1Ʌ:8hq$-$$$$lr@1I@1I@1Ʌ:b u3g`IxPB ,)@:eٳ$ݺ$N$g J:eضB t(&Pg˿_<ڿuB djSJ`HH [#R٣ڔr:)%R9B)B dkjSJ 69=V0c%Я{tcB9Vm tA;˅7v@J%/mà%ПRig tA;˅S*,eQO\(;K`YԲ$,~i@Yqv@ǁv@ǁv.MYYY.%q%q%qBYYYY.%AvYvE;K`,%v eg ,q%oW8r?Oَ{@#La.,{rm t@#La.,;sm t@#L@#L@#LaaN#̅:X #L_i ,q naK.naKtF²FF@wo486͢&ġl&0%eo088~F &oaaa.,[m fSaaaaJ(m tSH#L`Y e˱0]6z%F ca]z^@z?.IK 7BX..u vH{]ҔBy].u2%q²^@^@^ u du du dui˅eWÁÁ. dg]̅5$]L >bY[5D]LS.&k dg]LЏt1|Zst1 #@ǁ fkTJF²2 ˆCPY[OP.,-C tx)C dcJd2@ ²2@:8HX J PK$C ,q %A2P. %A2PYXra(kJ`d(l'-C ,q(e-C d;iJ`C(kʁ:: %q %q %q ²Q2@:::: BPK$C dEkJ`d(% %A2 %C dEkJ`d(%\(J`d(%8Hrd(%8HX J '8  %q %q %[-CP2@ǁ2@NI,C t(CP2@NI,C t(C t-LJ@J@J $\XNXXPNXXPNX) e(%e(PXPNX) e(JX J$8Hrd(PX J`d(JX J`d(Rr e(e(R8P8P8Bǁ@/jRiJ ؆ eJ )%cRiJP@O1iJ ,Z@O1iJ eJ 슦iJ thJ @ǁ eJ @ǁ@O1iJP@ǁ@ǁ@ǁ@ǁxiʔ8ДrL)ӔX SJ`L)ʔX SJ`L)%ijM)%2zLNSJ`C9lSJKRӔ讆@W4\(SJR]]ҔSR.,PlJ tOSJR.,PlJ `@w5Ýag>O5*G[3l  aҨ ۟}'Ȱ?l> }ȰY?l4 ۟ a:9Ngӳo>=Fe?a45*'az ۟wÜiiTa|Qr sun[K3va/a\Ԩ 㚧Fe05Oʰ5O0y*V)0;X7|IdIʰ#?LahIʰYaR s6X^|CI2lćy0y)V~+øX5Oʰ}04ka\T a\T 㚧beޜpqa{ne a{?l/)a&u/&sgUI2lo1%氽1Խm65u/rl ۧz}ǰ~۟}ǰ>l> p}ǰq>l4 ۟ a:8pN ׮0z*.ʰ8_ØwUO0? 9r [0װ0]~*øV5Oʰsa:5kna{=l0kna\kޕa]Aޕa{i8lЇ`1 ]ϒ1 ۛa{q>l#&0Vޕa{g>l̇9:yw+|׼N wѻ2kޕa\ gFø]5OʰY5O0yzWa:@v Kaa/:6lo/e/ ˤa{:l4 P0> yð}lv65/0yy^_fK 5V#%1%pa#@F*@*@ֽV2V\(K ;_\J\ZrT.U.,r ,qB\قZX K`T.?^R8H>* r ,q%U.%RHK@K@K@˅:8xpIr tr tr trP\\\.9@ r ,Y%\9J%dEb9N%Y%դU.*XPrNr`KBK y\.ԩ@/ , dQkK`&r =/lky ž@6[.y@6Ų!&|tB deCL 96rcC̅:׆@tl dm+2l +2l얹Pn@T[&E2,xƟiPUJV@<s<4 4JJ\(+M`Ɋ4N+M`4J::piJ8J8Js4444J8J8J8Js4%8J7WZiKd ,q&o\(+M`4~s&AV e +4%Z8){)M8:i IN +p.,m taEN +p.,[m taENcONcONp{pЄsN8Ȅ讆&  'ġ 'A'., l ,q('!l tKN`C9aNKpKʙp]фXPF؄s M8M8M8.h¹ 'q 'q rN&@74:4:4:4\XvۄXL8oi ,ˤlʷ @p/&@/p=Π ²;>@hBlL Ul2&]*}6jM`Y&\X+g}6zgȹ}6zgȹ}6}6 22\(M `M `M-0l., d h@@v5\(M`Y}&Me7^}&Ы % duiMW3- [3 IL _- d'eL :, d'eL +Zg.,- d'eL hgnL`3| x&AgK$ ,qxBgK$ ,qx&/\(L`CbkL ;)gK[gIY<XPZ:Laܰcqpܰp箃qn^X2|NϰwͰ}ps 㳎&a{K=l 㚧gP㚧gr05O0y|qsL>üe5/0y|g^Fg׼L>3k^&a^2 '2üe5/ϰ}3k^&a^2 0üe5/ϰ}s2[~/'pkO #jO>/ E-? -?m dXmP@v2GBY~[[~YX υU'AY~4e ,q'-?X O Kf[~Kd9@ǁ@ǁ@ǁ e t~t>q&-?-?-?-?}@O@O@υ:tN8HًE>XXSƁX8P"@a, t(P9'q'|.)@/ d} k7^7^}[XHyP p˃9<(%A_ g?9B ,x dg'Q`؅:*RN@$ N uT,}D~INN@gI^N@gInN $ D$ dEv_,js`@Y}6@f66@fftlF~_(L6@ftlF%e}6ٌ(+::pif8f8ftlFmFmFmFf8f8f8ftlF%8fwڌKd3 ,q(Я](Q`lF~}(A6 e3 ;mF%8)mٸͥ8[NR4({KRB.,3lP : Jn iPϰA)M! J~РG J.hP ,qA)Ѕ Jc` J.hP ,q YKqKtrdrj@4(8;6(FA)ġ߱A)5" J% JҠ8Р8Р i.,gzlP 8@ǁ@ǁ@ǁ@ǁz!]ESXP9 bҁ+PJ^C+z2C҅H%In~)IP@)I `K$)[-I ,?r Ē 9K=$)c8J9$)c8K9$)q$r*Ē@@ %I d,I d,I dEkI҅刈%I%IXXX IR QK$)%IXGIR QK(IP@6V(V]Xv]Zu UG|[(_ɲҪ@-[uȪ@XuȪ@vVm٪ : ,q(oV8HuX Q`TG%R](Q`TG%Rm٪ : ,q(: dydQ`CلlQ #K&d(q(q(UGUGUGUGMV<(q(q(q(q(=UGJuX Q {0K: ,q(=UG%R](Q TK: ,qBK: ,q(A : ,q('%: ,q('%: d-lQ`C9)aQ Baߪ I Ru8Pu8Puܪ@ǁ@7T:T:T8r~Ūr~Ūr~Ū@T8+V8(ġ_(9UGJuX QsK:P@T8HuX хR8HuX QHTHQ@QHT:T:T:T](Q5UG^TzDхRzQSu(UG^T](QSTE-QSTz=?}>>M?6ضcccc~c.mmmύm#aa?=3>}j~_u1c;\G۞GǶ'ǜu1)!9<#:c煏m>理.cGqu1y>}J<\Gm>5Ǹ:c\ }߳݃1v==H*̏m==Hsacdc|NVlj =Hsb }lk?sb?7l }l+?5A qÃ1yx>M>5ǸAkqÃ6_~aaq]W綺c[ym 5l?1Ƕc,c^amǶǼ}ɏmme<}mǶc\< O}kq1yx>㚇c\<}lk>5Ǹy<5{>5{>>5{>5{>M>5{>5{> >5{>5{>͟gg/AKyzp=*pG= ,Q5]`ஏz/PG=XB_zq>A4zq>AJzq>AG=8@}ԃQ:GKv}ԃ%>A G=X,qQO>*pG=Xt}ԃ%>*pG=ȝG=X,qQ>[mqk-,9*I??[zA{`A>t V`9 $փM zЫr&H]#Bփ%`]`.z5"[Kvփ!za?XXI`C9#&փ%rNLKQ1 tփ%弘[`rhLB`C99&@t zq`Al#dl=8@V_%l=![:l=8@փ/t`z,rH]þ`Y`[azG5dz =}+pl=rF]bC cKXNH dK`y7l8l=9[z փ#I l=9[2l8l=8H V_.%zq`AA9ӑ`AAYbK V`9#փ,ez,ݾ 4ٷ|E4az[zi¾۷d,փC ߖ%zI!zol=Hz փ,$z][`AGl=.U,V.z][mYKvփ%`]`. [8삭Kvփ|[ Aʄ`YeLX noKLXz„-LX A[ A[*qd#փLX:0a=8փd MX8&IɄ`nza7a=NJ&KvVnzLX8&KvVnza7a=XⰛ,qMX堉LX rD&Yʄ`C9h"փdzq A&y0A&rD&]փLX A&]փLX +ӑ rNG&K9t փ%圎LX rNG&pÄ,qMX ݄t փ% ݄,qMX8&= k LX:0a=LX:0a=8V?8&Axh&At 5YzCGd=MVzs$h,+~d=94YzMփe=G&k 4Y:d='4Y:dyNGhXrrThX]MփBMVzcUh,q5Y8욬]`îzad=XP5K`îznh[ˡ8V9+ZׄCAph="k¡\ ,[z5?ZȁC+lCAOzġlC+P[ta2a7`8[7tn$6aS`8L߆6Oa`؞a0{Sܞtiv=k׳ova?a30=Oaλ\Xݰi7lVuta suх5l?k.a\ta Løw_ø5Oְw0?:tQ]#v}ܣ+xAڮRqڮ@mWq+Kڮ@mWq:@mWq+]Aǁڮ@mWq+8P]#v8HtڮiG Am׃v8Htڮi+Gv8H< m׃v8H< mW8+h#9k_F԰0a{crV cY5l[D0 :zP> #mUY8g|VAv q*xA>?.MG > =OA** {V9y+cSuKӃj** [V9= ;~<)x5d۞T_ak/Ȏ=O-y mS=<AMycSo{=Oh/8GS}A [VV*l ZYG4dʪM= VV8h/xAʪ5<⠩)>k lOU}:>gd> lOUЭ.lgf(nuf(uJ8>x|d}&+5YA KMVб&Ai>RJSt :daiRt :d=(MVq&+8PtzP@MVq&+8P4Y#d8HtgiG DMփd8Htgi&+d8H< Mփd8H< MV4Y#dh&+xARt@MVq&c˚@MVн7j5YAǁ[djQt :d=(MV4Y#d݊&A -8Ht+ -?xZ `+袦`+[A5MXom K +éMXAjl q؄d&e WcVK<ڄLXA.m pjVLX#2a8Ȅ< փRy*x|oR}VR*ȃTAJG TAmzPV?jh o+U5R=(+Uq*8JdVe q*fT#R8JdVY*>T#R8JT#R8+R8+R{*x_*ޛT#TAl :RZl J1b+U- ZTAǁV ;RZnJth :Rc~Vc~Vc~V;R8+R*x_*(TJ< +UQZGdzPV;R8J< +ՃR8J< +UR}V@+URZTAǁnnn;tK=(TuKT[tK]tK=(TwtK[*;[*xԭRA-ARA_l-tzPn/ :tK}%Fԃ\RAǁnzPn@TWbtK[*ȉG q[AGYG[*x[*{$>HTGLn@-t?n;.tK݀[A-tntK=x<Ӷ[*~&԰?5/0׼R[jk^nayQ\rK s-5YrK s-1P\rK s-5l& [*ߤ-<MZpX2vKbr9[*$-/ri[*;[*H-ER#rK=(T7vK8-< TR#rKyw`S[z䍌=OAooK^܇?,t5<y9c!S2ya!S(0Mya!SF!SF )AMyb!SF!S}v (\eBS 6lG[HSi)UNj.8Pd*@ӃR9)8Ptr lSq)NUNj.xApAr q(܃ 'gSFGrzPp# qT9}T9)Rdk*r (MSН%YF,QޢS*[T9=)c-UNA r FSZ=UNJ*{QTN#R9})xA*R!*@Sq)K*@SqAUNAǁ*@ӃR9UNAǁ*r q)xA*{RT98H< S=))xA*{RT98HTNA>K)xA*)xA*Gr q)-UN#R9}*{oT9UNCYV9Qtr :T9݊)8Pt*@SqAGr q)V4UNjF8xA*[T98hFc\*cb*?xLY

< !SMl GKdB t,d )d m!\zxglRZ:[t; H ۯa{la{_ L59=UH0\]*ˀ4lH](ϓaUc0 ?JX$ϑa)2y>C4;1l:yĿa/ayi@&i@pBi@ƚiskaoHi~# OvaɎ4lo8i~#}Lva>E;Ұ>loۯ>&;0fɎ4l\HV0׼fleGю45O;0SUtϼTAUуR}楪(x|`HUt* R}B(xARU @UQq(K@UQqAUEAǁ@UуRUEAǁ* q(xAVT8HU< UQ +(xAVT8HUTEA(xA(xAG* q(-UE#R}vTUESVQUt* :TĦ(8PUt׎@UQqAG4< UQMlnTEA7* q\UE#hUE#}(xᘰ(&6UE#ǘUEA7* q8f* MUQ1pgU* :TnbSU1ygUQq(8PU1gUQw:TUEAǁ@UQw:T=W*{(zGiCV}1 4i# MQ/m#z𘭱(W׶}EQw}EQw}Eѿ6 \m# N6 ol#z(8Fdl#zP6 `Qq(&mD7ۈӱ(x±(;ۈGIۈGFdc6 ml# i# OmmPg DgmP kڠ 6AilpXdڠ Ot=(mP>kBAj uIPЇS:<^()t'N{5t =xPS(xxlPBN{5t 3la{L)a۰ԇ>lSsw{L++3l0 ={a{?p۳=lݰ=\c1z;n3a/aλ&et  c35Oϰ4i05Oϰ:l65O0<?fXt c3lE>j{暐7c sM45!o0ׄA㰽>5!o0ׄA7hkBޠa7huA}Lޠa7hp`0׼A^0׼fZ 45Oo0ڠDgN-};slۙtљdΜ egN};s.:s0f=~;sٲ3'Ȟ9AW9Ar igN ^3'xTG%h*>9 9A6; ogN O;sUq3'Φ9AǁΜ cgNq3'8ЙdΜ@gNM;sUؙ<5xdΜ ^8șG4x< g DGg cgN'::sؙ9A AgN;stم3')v)vKQ*;x| (MOKtg*T9DσR} 'x|I_T<J< OxG UHOq'8Pto*9xUk _d A<(MM2k<_dWk<>B<|^ZhطhAfE3A>طh&8P4D3A>طh&xAf<- q8- q8- qh&ݬh&'E3Aw') YOLfNR4D3Ac( ;ILOfɌaSo؞a{ 7Lư0} 0}0}2 3>l=驪=l=tdۃ=la/a{9zR?̙փM3nX4 c43l1=ƚifTNƚifkaayfi&ƚf{zc sB3" 0׋,4㰽>" 0׋,4fEafuA}Lafp0׼,4^0׼&6eB35O 0<-4 ayZhi7iL3if2 c43lifka{r-}L;\7\fv}a-0׼uMa{_F0< LL3A44< LL3XTٕ*%U)AvJ >ƪ ;/Vٟ*%[R`MjR*AM}*%J*%U)j:(xTlY<*AA4d3Ϊ [V| ;rVه*%ȮU)j:(8PdkΪ@UʃR٫*%8PtJ KgUJq*%ȮU)j:(xAAA.J qtЃ aUJGJyPA# q*%gCa3| }d%%+JaهdA=lOJ 8KV>?7㧢7"KGLXS%#& ,"KX ,tDž!%KX%xA;.48AX'th` :4aith` :4<(Kq%8th`yP@Kq%8 ,#28t{XGd` F˃28t{X%68< ˃28< K ,#2m%xAt@KqAX ,Aǁ/_h`y%# ,AǁiXnth` q%xA#2rAM8t˝M>x- :ԖyұAiK'%At- :Ԗ<(mI'kK%A-yPڒiK<#Y[< mI%A- qAiK<[[< mI% ֖8 ֖8 ֖8 ֖8 ֖2$xL$C%Aǁڒ@mI}0kK<- NmIq$8P[dڒ@mIЇpjK%Aǁڒ {R֖d$ I0$ I>H3Iq$ I$L$Aǁf _L1|f3Iq$ȇ6HH-wG>ER>t[T|$##J>tS{}|#A) BȃSGG8OO- BHpÛDd>\ǶK?}@VlOcwcwcw2?emc[?-n-ll~-il r6l~-d_wc6!co?ƚ 䏱!c/a?ƚ m?cy@k2?zcy@k2?fcp1oz\׻䏹wiɰ]Z\׻䏹wis]Ғ?}c]Z\׻d.-c]Z\׻m=?ߥ%5KKnc}ᏹwi:511.-y Ғ?ƚ䏱!-cucCZX?l #lk菱v!c[ma|䏱v!c[[mM? GZkwhc[mw̵ gY֚cEss,lC+#5Zlk1<#?ƚ|䏱!cyGֆcyGk?u5cC>ǶcC>2lTc}N叹)?#v:5Gk~ #5Gk~鏹ws?81.c]>Ƕ˦?#5Gv4l<&_*nb?#?ȈJ>v1|TG~p#AfUdZ%;db%m;.AV~.#|y!q#?xa?,MG~]pG~!G~CG~? ;( ᒏ o$ s?Ȧ#?ȫ E~2E~?lE~2E?}1_>{AAvy&e,i,HpA~dM~t ~d;B~tA~d#N~tA6& cBxܟ>Q }J؅'?xT.u<&$<#'?;O~1 '“Kx]x2“IG“t!gi&?8il;iG$#Жm:Ж][|.mqص%?xaז=iK~1'mq8f-#dƅ_OO%a{|~u4̵a{gp'Q$25OȰ)8locS$1M cS$25O0 ay skck^"I$2lsK$25/$暗Hdk^"aE0׼D"\ %暗Hd%暗Hdk^"I$/H$E"*E"AF"5)SD E"6ȬZ$dZ- 6Z$'Ԃ[DZD"#yaHD"AǁRzٽ#{K=fXdR ok,xPmA6d5G?29l?YQǏL(A?2MQDYdjRd;j VsنAM9gYtxPj Vs9A:TsyԷAM8h%#FyP(Aq4Jj%xA(#RsTsپ#Ax@5GGz9#Ado5ǃzij [ZVs}<#2AGoc䠏n9xԭ#cQ}HA=Fu+HZDGJ$HHg@b9DAp[ -Ah1:Z <-bnݸ-#ǻq[ >-b-bnY >-b< AЧeZ Gd1qAY Gd1q 2- 8bt@Aq  v -A-A-Ach1h1xP ȷݝ [Z NbAZ -Aw'i1:<(AXZ |gDŽ-Aǁ b1cAq 8b< Aoam1q8l1--#-Ahi1q8l1 EO,*;IQAЧH NRTtw*ݝA nPTtwO&Q0׼D\ {\ sKT0locsKT05/Q=5'\(*ƚ`ka505O0<X|Lȯ|k^ˇ%v0׼\|Lay % sK805/fk^ay 75\ sK80l2 Z8oZ8dD-Uk1p ȠZ8/X̪A~/XL}&_>(@p 8H8< @8H8t(_K@]8 op ȮA- dm@xjlY?7^Av x6 ȾAvC A6 %jm@MQkGq6 ȮAǁڀ nm@q6 8Pdڀ@m@vkG?<[<5ڀ8H?G4< m@+k>H3@ AH@} Aǁ?,M Aǁ%: J8t(: p xA n#R8< @A)xP |m@J83n G$qpA G$qp xA S8< )K@qp 8P8Aǁ@@-xP@@w nR8t(:<Me8H8]j*3xA:(qTx#DŽ#|rp xtp  Gi7 p xyp  G7 >H@qp 8P8]p cp}G@qp 8P8t(:}GP#}G}G@cN )1a@+t }H@t }H@t }BN ~N xS ;)d2v PScN `@+v 8v yb@1aik3@ f [@6<(3@g^l 6<(3@- < _56wAkmxxx;feixx ȿmXk3@Z rc~68hcژ<⠍#ژY?ȍAǁnpc~qj+ pZmt%p}]AI>W^mt`jd\mAnq}qj>&WW?xx}qj _]zLjW:\m {{o{tk{q{tk{q}gAxaA>?xㅕ8/>8/P=l/a{=oV>b򰽔mJ 7MAǁԦMAǁ҃|MjSzq:)=8pSz]nJ:ܔt)A8h/xA҃5ySz1MAupSz1MAupSz1MAupSz1 MMAǁ҃(oJ:ܔt)c6ʛ҃MAǁ҃7nJꏛ<&)=xA҃z/Ӹ^<.,׋Ń[z^/׋}A^Jxxw^/䥄׋yG^qs˵A78v㹎n8uv;xxA78v;xx=~r'氽܇>l/aEǴ{^Ḃ"j|^ {^{Q?l/a{e {Ze=lƚNayLφ湓{ެcs'0 ϼ^WywYv.0etۉ>qum']Aw?:wY?]Aw?:]`:xx]#q8xuƻ~puƻ.aa{۫}^i҇>la{?E1ć>l1~^0׵u˸z?5]Xe=lxs cs7ldƚ.aycs0xApyt2/~PF>pyt/At<:G.~PF>pyt8hyz4蠯<:c.ށˣ^GyA^yy;x/ˣ|jytq A6Zps18hZ ^_?U?l/a* cs0<*??wNX\81őP`F#c1őo8ؾ 7oI'ctStK#c~&6%Ƒ`8q`8[fff/l2)[2Ag0c|Q?e qP8 feS;88c,㠟],q8Xơ㠏31q(q8 fez18`g2c80c80cq3{1z1z1X~qЧ{====}njge1tRY*jKAT|\Cw86>f8ȣ<v8ȣG3y|?>+>qͣٶ< qͣ< qͣa< qͣ?Qk~~3;|? yϼggg^{3;|? y϶ϼg^{m3^lۤk~/~5|;|g۶gqg7BmKmG{l/~#|?۶>6?s ϶}϶϶ӴggIgAg\Bm qͣٶUٶq< ceϸQkϸQlۮkϸQl۩G~5g\(~5~13Zg^ϼ~#3^ yϼgg^{3v2^k~/~|5|? T}?  (|rU}v`CN ӇH|r$U}>Lr,U}v` 8߇ۯq 8A|X߇Q!߃U ߇Q!_U}a|rS߇R!7TD!T} |r?H~߇pP^}70|zP}q@!T}q@!T q/? 8ۃ|+Sar2{7_nXa^}M |zPD!ߚT}4|I^}4|}߇~B7_||r@߇~_AWP}|}~WP}Xa^ <q/x>,|}^}Xa/>; 8߇e2{a 8߇eC 8߇~}Ga;CS߇|D߇|{߇|zP ߇|zP}q@7x^}Xa/>T|q 8߇op/>,|~ 8^}K*>,|q ^}Xa/>,|q ygU߇e C =(C =(>V|{߇>@߇޿E߇|Oq^} |Oq }2Oق;*>,P~2Oق7<*>,P~ƣCgC- }2=*>q 8_;C =(>q |zP}q@7X~䣂C。C =(>8Ч{(q(QOPDO}+>9/"}HC"Ч^T!y}Ez0M`wHC(p އEzr}REz"9>"8>"8 ˕DzDއ|6Q!}gEzDއ|sU!M އ|sU!M}GuEz媢:C>C>ӛ:rUQއF!} txn:2Cgw}ȗuxV`2媢:8>#,:8>#,:8>#,:rUQއeC?¢}GXtxq;8}Xa>,wxAtxzP}>t5݇~@MP 5݇~@M_Q}C> 1jiC;C/Ĩ>8kBCjyaB5`lCjyH5`lCj=>,P.[Cl2{M7X.[ClCW`C {5-sz܇~B0j}sz'܇ޫA07sz'܇,Pn(Ce`2喁8[ >N8[ >N]ۯ2>&c1tGdl}@} c\qͳ;5*1D=1yBW .c^yͫ{LU1yUqǼUۏYƼUWwl?aWwk^Uܱpek^U1ݏz8Y~O>/* :*O>FgqGgqg,n3,np? 'AΥ󢲸28(8|˷AҸ|;˷U r _\] r 7)] r 7m\=m6.sm{7.^uǁ 7\ zXmoY.=,=,mm[.^u`]'`]'Ay|,2*^u`]'qP6ȍoo|䛑˷A?|ۑ˷U ۠_IX rcۋ+ ˷A|+ ˷A|+ ˷A|{Q1~%a6XA1eT˘2,mЯ$,^T6XA۠Y qP6XA`oeTmʷ2*EoeT `o| ۠ǁ۠ǁ.Mooo/|8|8|8|{Q۠ǁ۠ǁ۠ǁۋ}w`oeTmʷ2*yۋ*8|]RoeTmJ] qP6XAۋ*8|,mʷ2*y`o|-˷A˷A˷U zX zX ˷U zX 8۠ǁ۠oY zX zXߤ87i2*}E&-XA۠3X qo.ڸ|,P~km~v`o>`6Xơ۠3X q(?q6 oe/p\=mmm,^,?q6q`6q`bA˷A|8|8|8|8|˷s\ qP6=o>=`6k.}mЇi,}mЧ,˷A|˷A|{-w6okEooo.ubhmm.8.yzmCtrۃ,lm&.m&.lmo.lEo|su6go|Tw A>f9Q A>f9Q{)t6/W&j|r6f6KTNDm_Lyʉ : YӉڋN2N2NA?k2Q,P:Q&2NA?k2Q{)t6XAڠ5 qP6gM&je qP2J8(Q& mC7 mO7 EUh~g6SVh m/ m/d6 Vh~9e6q`*AB8BWh/Ϯ=yڋ+A+A+2+2+2^,] q(] zW  A︰_ A︰_{p6k~/c6kqa6m kqaAoS_A_{p6Xơp6Xơp6Xơp6Xơp6m ke]kަ`kY8cs2qLڱ}F'dlblc}8ۿ)clcKbl}&'bl~5ko1ykǸٯۿKwhEuheԡ ?qP6q`M6=d|tMjAn&։kAn&䖡kAn&kY r5 Q\ r'5ًfkAn &8&{Q5 ߕ\ zX zX r5٠ǁ5 ߚ\2䫓k2}Q7|rM6XA7eT2,㠚l=dM6d~bM6wd/&kAX&ku2 d|wM6 d~`M6 d~`M.Ubl.U8&{Q*et2XA5٠_,XljAg&,㠚lj28&{Q5`deT =5ً8&{6k2dM6Ȼk==pi&8&8&{Q5٠ǁ5٠ǁ5٠ǁ5ً===^TM6XodeTlj2y5ً8&NdeTl;] qPM6XA5ً8&,㠚lj2y5`d&-kAkAkU zX zX kU zX 85٠ǁ5٠oY zX zX_8b2}EB,XA5٠3X q/.&,P~l~!v5`d>`M6Xơ 5٠3X q(?qM6 dea\=Țlll^,?qM6q`M6q`MbykA&8&8&8&8&kˏe\ qPM6=d>=`M6˱}˚lЇi}˚lЧkA&kA&{wM6d1Eddd.uMb.ll8[yzlC:ry5كll&l&llolEd|suM6gd/*{  O`lOR^,  zx 򞔃AX8'Aޓr06 c|t0b6`lC6`lC6`lЏ 8k8`lC6`lЏ ^, qP06Ice q`ce`l2 8`Af_/*'A?63'} נ"} ҍAE23[$} z} -נ"}ko̾=̾y׋冲AA^qbkkkCk6冲A^q5XơPv5K7'cg_a5f_a5f_/C}g_a5+f_a5f_l`5SLg_/*f2Pנ7} q(ξrkC9w5Xơ;f2Pנ7Z>?Րř>ac5S/vl}'kljl}}ilvۧil}Qg/vs^<{cWrLbƸً/1ybǸًۿGƸًg/vl g/vkر㘮y-k^ر}[nk^cŎyͫ;5^׼zcؾ5=5^׼zԋW/vk^ر}Czk^1yb1ybǼՋ5[ 17:2oO8췎c1[8췎[c~4ƹeulUϒ1-c:5~ؾ{6%qͳzLƸogukֱ}yk1y[c75~`5Xơĉ֠O,h q(?Aq AAAX0zʼn֠ǁ֠ǁ֋)N}DkDkDkDkDkxL^,Oq5Xo9ch5 }} ryۡՠZ @ Z @֡ Z H̡Ջ*CA8VV/*CACAnz:zvh5qph53VenV<ph5XơvhbAV|ph5' V|vh5' V|ph5OV|phBA:[R -ՠ@lr-ՠ"R Β[A'[AYrK57TdK5;KnjOn^,WR q'FTe>T~bdK5Xơ\sK5'FTe>T~bdKbjZA?1,㠖jOl8,㠖ETeR qPK A= Ӡa=iЏFiX= ӠӠ==iiotFEEO~c4q`4s{GO/;=yӋίAA2ί2ί2^,w~= q(w~==r"j$.^,g."j{,"j{,"j{,^,g."j$.yh"j[,83tQ2`5Xơ2 E`r"j[,83tQ2`5stOel5O>`c|6TDۇkl}ۿg:ۧjl+fl}:yۿ]iۇil}ƸYD:5"2-1yQdkE1yQ1yQǸYDۿBƸYDgul8'c^o25"׼TDWuk^E1yQ}yk^E1yQ:5"׼c׼c^*;c^*yͫk,co`ul߂w:qnYD7c*qnYDcThs"ؾ<Bc[Q_cukEԱ}kmlhgэqͳ:5"`4XơaӠO,6 q($q æAæAæAX0lzaӠǁaӠǁaӋ%}ǰiiiiix ^,?3q4Xo9c4n}viЧ^l}vilۥA]6ۥAz]z\v4m~KvEKw4qp4}MK/n9nviCvin8n^,' 9y ' y ' 9y "' 9yҋʓwc؍_Bc̍#7O3>ncӰ_=c6>gc\숎C65ώ<;c1]gGtlqۿiƸgGtlgGtkѱek1yvDc15c^ꈎyͫ#zL1yuDǼWGtla7ǼWG:c^ꈎyͫ#:o+yͫ#:5ؾ<5׼:,쁎{y8쁎c1@8쁎;c~3ctltOv1{c~:G:cؾ<51]Mgtk=1y@ 1y@Ǹ6<{c\쁎qͳzLƼu mk^WƼ=gcʘ׼zc^Stk^=1y@1y@ǼRƼWtl?FWNPB/*ձ`*O@ EBSB} hXhXhp? r^ rb '4ȩu,47XA`B}e =h/X^T4}@Yp4Ƚ@Qs4@ap {An26{An4zQkp47|@@/+{A{A{A8ˏ{u9XA|r4XA/vsA8vszu9XAe r=Рǁ= D =РhO5h/^5Ǡ m=Р !=Р !=ЋC>{2,E]s q5`@~gz2hz28,E@e qP4w^@/,h8z= oo88åhhE@@@@/888zQ=`@eqChz2y=Ћ8F@eh qP4XA=Ћ8,hz2y=`@-{A{A{ z z {˫ǁ= /A8-{A{A{˫`*XA=РO,_^8{2w}bhC{AoѲ,P~h[^,q4XA Ă=`c@>`4Xơ"=Ѓ==}b@@@/{A{A{A{A{Az\=`@}S@>se4S/@>se4m~@= \ r= OX.]6{A^BuzAn8侦{ l@@w4Xơ\v4m~@eqPbh hh h ^T4ȷH'?|p3o$'?|p3ȇ'?|p3gj'?/*- '?|v3o$&?gN~r)`r)`r)ϠИ q( `r)ϠИX.9,ghL~8(2J~8(yQ`%?23 ͠_I A0g~3W72g~3W7~%a~A0+ A-;yKu~3q`~3ȫo^,R z z q(R q(R qP~bfCfof,ofЯofЯofЯo^,oJfoyfoo8C`7~Cg~3Xơ;:!2`rfoo8C`7~Cg~3q@~^D~sluۧm>۾g} g}gۗgЍ36rm6om6mc}}|gۤ} gۜ}5gې}5g\o~}݌w,>G~m3y7?G~K3y7?G~3y7?G~elO~3=yϼg^{~mm3=yϼgg^{~3=vk~o^C~3Ylۃmg.ql~l;oqlyl;jl;igۦg)g!g?7?!vvٶٶ(*p!Q͇H%6n9Jl>T*p;|r>d*p2{ba=p2{bs|2r@̇P(!|ȝ2r/@A2r;@̇P(! wrW@̇|W(!w ̇|9Q(̇2\̇25E~'aNC(~'8 ~2q?,㰇2e;8̇|_W(-̇|/P ?|w0-̇~G !ߊ wi-̇|5V |q0i-̇~G 39|q0q>,㰷0?i-̇e;8-̇e2{ _N|Xaoa>,㰷0q[8-|Xaoa>,㰷0-|Xaoa>)Z8-A0*Z=ha>8&Z=ha>8[=ha>8fpoa>8CZ|Xaoa>B0q[8-̇cfpoa>,㰷0Z8-|ۆja>,㰷0q[|Xaoa>,㰷0q[yP-̇e Z CZ=haCZ=ha>eD0Ϡz|ja>8лha>8J˺۟n+=,vòn+=,\Bʇeݖ_L(ZJ E+u[~8hC ZC}zh2'V>8Q(Z9hC=V> E+z|q@2X~VhC!ZhC=V>8 ZЇiVˏ,|q@!/bKGR> }ĉ.CoKyQ]ʇï.WgT|ȇ_%{uɇ:C>.YKM2%q(7T|Xơ$S]P|Xơ$S]P|Xơ$S]P dK>,%1 uɇeC?f.^|XaKCdpH>COP@|EC; 9C/j$ɇ~w@@2$ɇ~w@@!d ˜ H>8  ˜ H>8  228˜ H>,$?meN$zN){R[I_垔yChGb*Z>+C{h܋+C hл(ZʮChGb*Z>8b-zWEˇe +ehC9!VarBC hC9!VwP|q`slc1UϢsl:>cP3951>c4Mc,8O1 4>cy>c\]qͳv91y.ݸ[hk1y./1y.ǸYۿ{ƸYgrl9c^15׼jTWrk^1y.}yk^1y.v95׼jc׼jc^]/c^]qͳL9onSrl?㬰L9kg5T㬰L9oi4c)geʱ}7{l?h8+,Sٿ;˔c~R9ڍ{c\,S]1y)ǸYgrl߹grkeʱ}qͳL952<˔tkk^Ƽukk^ec?8lƼUWʔc^*SyͫL9ԌyͫL952~H352׼ʔc׼˔ʔAy\ r ] ǧʔA˔4qt2ȁt29.S9.SS~dL,2eʔAA+8!䛵 rC _  ɃLHdoNHE] U _ u ɋJHdddNH=LH>E] q _* qߋ?mt7i'$et7XAet7XA _ z  ɠ ! ɠ __[A?63!;A?63!c3A?63!c3u0f&$et0XA ɋu,[2JHɄE%$e - `%$e qPB2XA ɋJH8(!,㠄doLH^TB2XA ɠ" qPB AOrB2q`B2q`B2K ɠǁ ɠǁ ɋ80!80!80!yQ ɠǁ ɠǁ ɠǁ ɋJH8(!,㠄dW qPB2XA /9!yQ `%$d 2JH8(!yQ `%$e qPB2XA o9!,㠄A&$ޅeB2q`B2q`BAAAsB~w80!5?'$&$ޅeB~,kH? 5R1|e 8N>sos z9`YC9uɜ#U? z'9`g1XơP9ǃ9=9=9Ϝ/ 8sAsAsAsAsAl1x9ǠǁƠOYlXA(A(Axu1nbEv1ȫ.6bre w]l q(7w]l r`r}ƃ.bN+^TZ1o^|/sZ1o^2A~:xܓr 1q@b$y1ā 'H c /8A>8xܓr 1Xơܓr 1Xơܓr 1e=)~a 1Xơܓr 1/{R$8(S 2 $T@b2 $^T A?~xQ Ġ@ i @ i Ġ-A?x Ġ-A?8@ i Ġ@ D ċ6AAxFt1q`1q`1XơFt1XơFt1XA ċ6AÿA[!A?+'bc'.6bc^.6bcЛ ,6XlbcЛ ,6Xl zƋƠXl r`ަ`1Xơ62Ơ)XlX]l q(.68C`ަ`1Xơ6A,6yL741Y4c`_STlۧrlʱ}&ۿW/EAA^s1q`1T[ _?v|r9`.O lrb|bП}'S|rS`/g8g>1Xơ\w>1seʭ}280809}/K'}|b|b|b|b|bH'^,WO<bp yĠA zC_t1c8GPv1G/ bAbʎ y wAXn:88䆲#22#An(;,Pn:x \+ ε y]+<Ȭ` `+ y?Y ` 1Y? ~A>f9+sug|rV0Xơ\qVb`C`ugeugeuge 9Y`e~N`V0XAYǁq8`O8`缎8`ЫqR<8`ЫqR )q8`Oy8r)qǁq yX.98080,P.9,P.9,8r)qr=Y ˁA{AATr  rA 7$ A 7$ A 7$ rA &$ z?A &An;Hx:H2XA‹  aC9Vu0  eʱAop0Hx:H,PU$ q(Ǫ aC9Vu0  $<kcӅ{llϩۿÎV8>c:{zL:>c>cczLS:g3:>c^k95Zab AF>/c/+G}^_;#A9ί#A8w3u/ȝ_GwEEu/wrԑ w~ q(wO rב`r5 \;Ȧ^Cn1^nyn^O5n]TS/s{7|qS/!6 W=c}L︱?b xc8<>c7>4cc<C<>4c>cykxǸ'wka1yc"050ؾu8}q3750550?42?%ue>,ͱ/f/Lߙǔn?"Obl|-m06 c0. 0G cG˜׼>Ƹn? Ƹngml^<1yDZ+qk1yo1yƸnۿǸngml<1yƼnWmc^Jy+65tؾ>oy+65t1ƼnWml?eWmk^鶱ik^_cml& 3cJq}26?c'lcQ~al?dml?%5&#бtl?!K@ci1]>1y&Ƹ`ۏƸ`gml?8[?c\Lq3651]e1y]Wlcؘ׼lc^JSmk^ 1y%c1y%Ƽ`OƼ`Wml? 5'؂PNE%؂S ~ϼ/*;1ϼs'؂t'؂i[[l2?m~pF,[lt?9W?oAnNAnO{An͟vAQC۹;tߩ]]oE]U _ݡ uF]T=]]F]`]`]A%C,Aet9:XAeԡ r]Џd]o]mР١ ١ ɘ١ ɘ 1;tA?C,E qP.X᧍:t28C";t28C,]:tա qP.XA_١]:tA(C,Av肼N]]]&;tA;tA;tu3hl<3sc<3sc\̍t>5ؾO9q3375;5<3sc<3sc\̍߹ǔWfnk^1ye)375ܘOYSfnlߗ۷Ǽ教Wf2sc^̍y+37My+375~,55ܯ137َ'c1eƸ>/Ch듙bl0q}237Y3sc~n:Wc'cA<3sc\̍gc\̍q337OӍ1yfƸ晙gfyϘ׼2sc^Sfnl?Wfnk'9ǔWfnk^Lnk^1ye1yeƼ教Ϲx|ߌul+ȿqu7@] ~tQ ? G7p.Vpu2 8(P܏ze qP?Aw^䛰uA^䮧uA^wQ > _] Nd.Wd..v;z]]T.-r.g.-|rnj8v]8E`qPnj8vuA=83:3sA?23䛯3su55[f|uf.[f~ef.[f~ef|\ϷEee qPf.=f.*3,\Я{8(3,\2s2]Tf.XA`e~cf2s2kA~03803u"gff4 z zA3sA3sA3su=q`f.q`f.q`f2s28(3 ;ge qPf.m'f.*3,\N8(3wQ qPf.XA8(3,\2s2 \C\л==]Tf.q`f.q`f. y]of悼E&X`i}cqAo2,}cqAf1F4a,.]Ӥ\dw,.]Ӥfw,.]eʕv28080.cqv.}X\X\X\X\X\g}ª[nA侦nAnv\t-}MW݂EU݂t-ȻnrU U 5]u q(W=]u r_ ?n + ?n +ᶋ yp[_= y "n :muۂt-ȯ+ۂeuۂeuۂenX8,P8,p[mA]1vYX o*ZC,AւBga-/tւBga kA- kAnEv\cqa-q`a-ȓ.k,.=,=,8k,.8k,.dD-2F.*ÿFԂ'cD-G.eGԂ'cD-GԂւ.~ kA kAvQǁ \X q(A.nZC9 ra-w{ւei kA۳v,PN\X q(A.nZC9 ra-w{ււ86(}϶O϶k(}=|-϶ckl?llGF}}϶϶O϶ϸ^P_l?>>>zkmRmQmQc'gggg^g\}}2}5g\}}G'ԟqͣٶkyϼ^_k~}5>kmm{yϼ^_k~}5>>ky϶ϼ5T>ێqg\g}P||g~ugg]϶ϸPQl;g)g!gۑgۉgۅ϶̱ggagYg\󨨍϶s?_$9d[S*w+9ŋ 7a3g<}6=ͿgxQQgӚ||3?W>+j3>+jϦn|g#|g>sESQ?k9;&%_NSb8'.|sb!/J=?C^9X{8=~8=~ȫkyPb!J=\aNeeAel0'z||7Ɋ=:TqQV!C=RTq]V8r38C3HOA!o_{rZqC_{-kP8M\ГC.'_{5kyZqe\?kqp]pm=\aeZqe\?\a=BG>_YCE_~_+?\}/=_^_{_{_{{A׊s2s_{_{_{_{_+88D8.0N? ~k==_{q@?<=8ZqCk==8V#Ck=eeCN88,V_{kqkqkYB2s2s8.0.0.0.0zъe_*ЋVz_{q@8z_{q@!Zq~]k?ok=\W׊O_{ kB]d~桗=\&_{.kqX~ԮZz_{.k=8Zqkz_{q@ k^]"vQ|C.X{Mk7J=R k9K%pJ=FP%rX{եkqX~Z|C~ЩO1UQ{:UԊsE!bt=UQ{:U*j塾*jAC~"p塾*jqXꫢpV\ꫢp塾*jq+jq+jPC,#PC,#pGCiPZXF(?J{eҊs(?J{o=X{zJ{G( ߛ(PCBiqX~oPeߛЊs ;B{ ZhW zhkLVg)j|:;Yg^ϼZg1ybf=6/c>j|:;Yg^ϼZgRg^ϼZgRg^_c,6?9Y猭\#6f 6?̌ui 13bӌ_c,6?ŒO0c #6f&6?<_gFl~gZgEg<[g1H&3Yg<[g똘ϼZg1yb>jS,6?̫uWZg1?˙W,6?͏b>j|:Ob>j|:ϬcpͬȕkfEn]365"Wz[5.fVJ5"Y; ..?tͬȟfVJ525$+&Y$+&E5Ɋ|^&Yx7Ɋ6Ɋ||&Yx7Ɋ||&dE^$+dE>>v6Ɋ8,$+M2c7Ɋ`좚d+'`+}?+}q>XWq>E\V71vq}`E{cȕ`'V8V8V\a}`E`En~+XLWaͯ`N`En~+A`Y*rWWWߺUƕ"W|q]+_EjV.Uƕ5+_Ej\W3\*ruWqeWѷ|qX6|}[WqeWѷ|]\6|qX6|qX6|}[WqeWѷ|=|qt^loWln]l/`36c_?6ciߝ*_d+6_}1AUb/6_bez;@l|uŋ寱v1yVb<|czbg+6bG~g<+_>3WgAg+3Wl?Wg^ϼ*_1yUW+3Wg^&6?̫W*_1yUb>|51yUb>|G51yb<,r' wl~`+6?D͏cS\wAl~tӯ>b 6?4͏bSc*r1yb<,rgC<\1yb<,r/*r|U̫uLE,3"Wg^Ec*r|,grUb>*r|U͏c>*r|UOc>̟rw=;_EW2w<|yWr*ο(οW@ws\ˠ;_e*.Wqu.U\A2|e*z*rAwʫ-S^Ev򺨔W.nwʫ}S^Eny*)"^NydʫES^E0U2)-v h8U)Jyy WeEw2YV|&ˊdY_,+K>eࢿ3YV|&ˊe좒eEg{^&ˊe좒eeldYq%.*YV\Aɲ2JqP좒ee,+.dYL]TeWE,+z,+ZN=L=Lx4,+z,+z,W&ˊ&ˊ&.ՄǁɲǁɲǁɲJqPeE,+.dYq%ˊ^u1YvQɲ2Uee,+.dYѫ.&ˊ8(YV\AɲJqPee,+.dYћ_&ˊ8,?-w/eEeEe,+z,+z,+zdEV80YV&ˊ3YV\_;YVΘɲɲdL4ɲ L3f\_;YVΘɲrMX~dY;c&ˊ8,w eEeEeE,dYdYd5'ˊ~dYdYdY*/JVU"vvTWɊڹJVUsJVUˏU]%+{s bE^+bE^X+bX'm"g-Vm"g-vqyXM+ybE>v[Ŋ8,ϳ+m2qXg-v? aEtVG aEtv?:X+?:XBXnŠ&BX+=..?p!oR,v!sŠŠŠ8,?p!q`!E aEUX+𿓅*,hu!΅WQ!E aE>wp! aEF\+r5BXRŠ\VЅ"W#.q!CBEŠ\Vf!Ո aU+ aEn ]+.,]+攅2܅oNY+.,]+攅܅2܅2܅oNY+.,]+攅ǁ<,ıů[c/±El^qLcш'#6|<Nj;Cl>1 bccl:c|e< cl.|>U|M͗c"z03zXlxY̳|U;zXg^ϼaLg^ϼaaLga)Cl~>x&Xb.3zXl~kGt ]l~+gsl.6?QbaaBg5qEnМ+r|\+..Vv>ȧ^dk#pE>rk#p+#pE^+2WwG9WwG..O+#pE>uȧ}g/qX;vQA/ }1f1ld+b [cي| Eي:Vwي\X9vqy [Md+r alElEle= Ft lw2V_A"׈]\29V\* pd+rZ7nyZ>7׊ws=kEV䍿kE[6.V䍿kE[6׊wskE\+rZqe-ZlqXn}/Zqe-Zl]\nqXnqXn}/Zqe-Zl=lqds-6|%_ ~͵!6|bWcjOFl>2yLbw|(b<l \͗|u'kxL|]|//tEb03ag^~͵|%̳gs-6'8Gc1y6bn86̳gs-6̳gs-6 ̳gs-6Ss-3Zg^͵ϼk\̫Ws-3Zl~|,g^͵䘚k1y5b>j 1y5b>jg1ybp66?ovZl~[1mN͏~d+6?؊͏bP~il%槻N{j|N̻Vw;=iE|.V2"iEiN+rN+r N+?x(rN+rN+Ώ֋.V\A2jqP;ieN+.vZ񯏦iN;lyCX!7Ŋ!vSb7Ŋ>l]TSb7Ŋ>lyCE5Ŋ)V~M2˺M"CnqbE)V\aY)VbubeubeubE)V\aY)VbEby~?8g/6_c_cS,6.6_c☮[El>6C/)6&Xl.qL|W|7tь̘ϮzcryLW˘ϼ1y])c>zkdg<{c1=N̳7WñBg<{c[Bg<{c Bg<{c1b>|̫7vLϼzc1yb>G!IHg^ϼzc̫7Wo,6?̫7Wo,6?̳7$礱W[bC^l~ߖbs 6?:ͿϴWol~j_懻 b#"YYx../8FV\SogcdE#+.4AȊȊȊ3Fvqy{1ǁ1ǁ1;$x1ǁ1_t5ݸaE.\ +rjXqWÊ\Vnհ"] ;ȂWSE|WSWO|WSW:\<v|W]*џ ^E>s0f0f0f0 YE>r0=YE>%q0뢂YE*)YE;uqyh`Vt fs0ǁǁ2CC2U:* f]T0N0U:˲/R fq0 XEM*MM"*Mjby_&Venb}nĺ&Venb}n*MjbFt5XeXE)*.l*M2Moĺl*.l*.l*M2Mo*zĺߐl:^MϦאlt>M_>|?ϦKgϦt>g56?lrYMϦ9tUl~6]S?G.M|\gӵRt%|g>E35>.#G.kCle}3?>sYϹ؜g~e}3?>sYM>;|3?>3>r͹|\g>s.ag>s.393yd> }ƿWM>^lz$F^}6=gӠϦAMO>|6=lzs6=l6?q6=٫Ϧgg<^}6mcs3yd>G3ydbs39{g~^g|g>s*6g>󙟳W٫Ϧ\]g~^}3?g>󙟳WMz?󙟳W٫ϦǼ9{g~^}3CzerGC|es!^=W!@٫✽zP!@٫rzQPd!A٫✽zÜzÜzÜ*٫8٫8٫ܝ+{UWAds!^=:C٫\\+{+ erC*٫){;lerUq^="[٫\q({=es!=([CD!+*[=NQe\zwrC V\T!׋*[=䍼VGcT!Uz=VGyKeeUq.[A{zcC*{٫>&^Cd {w^C_5zaerC {p9{p9{UWqßez=4&Cl4vMC8oz=4z&Vqnb=M~4s2s롷hb=\6s롷hb=\anb=\anb=\anb=\anb=Mzz8@4zzu X=hb=8GM4z*ί<8&CXŹ&CX=hb&e&e&Co'zzz$XŹpI4.g8M8M8MNppppz4.㰼`&C/zq@Mzq@M^֣U|q@롟]U\^QX=hb@zD7hbAzvCozRXG2˛!jbzq@M䣉U\^QM4ˋ"jb=-4rz-UT!7܊W=G rP9zC !ONzC>P<9U!A=#1ɩrPHLݦ>6=Fݦ\]mmz]MW8wrWnCnmzțSu˃-unCjm*.mzq@ݦ8,m +unSq6='MWA!W6M}B!WJ3=Ly{4Sq}W!ofzs!ofzh"TLy{4CMnYi Q顿!p4SqN3=ݲLqXGJ3= LqXGJ3= Ley4eLqXGJ3= LqXGJ3= L=L3^ f+6_lc׀_c)6 '+6_gc7 6?A;kll/DSlf>6Ո,1/kƱ1ybrW)6_Kc46_IBe43hg^_c)6_@cJ3|fb#ϼL1y?34Sg^iؼ+W)6g)6?o4Slw4bFL3E1wb?6cJ=czP~l1bf?6/ccJ3|f:4Sg^iϼLIVgr.g^iL734Sg^i8734Sg^i$L1yb>J3xf*;Ti"OLf*rQ4Si"iJ39N39N349N39 N3ENLf*.4Sq8(tQi2J3qPݹLf:6]TȍME:m*rnSwTnݦ6sME>mnSw\T8Tݦ6]T6zqȅMm*ݦ"6=6ytݣMEtQ>.rnS6t~}\Mem*.nA\8TI`_> L3}f4S'iOLE ;tQi/L3y;4SO`Lef4Sq8Y~76}m*^ݦy{'ݦm*^ݦy{6}/nSq9 6]TMEFm*z5nEu8Tjݦ2qPMem*.nSqu8Tjݦ6qP8Pݦǁݦ16=6=6x4m*zm*zmlvvv.T8T8T8tQݦ26qP$Mem*.nSIv.T\Aݦ6qPmǸMem*z;nSqu8T\Aݦ26gME/m*zm*zmnSnSnSzv.Ϣǁݦ]T\]^p})c}KEOKEY_Rq2@W>2e{/g}LKY_*zX_*zX_*z-ח֗֗..T)֗~ϟ+y_\9ϕ\9ȯYy;vTӱFǎ{:vTͱ"29vt"ŽyP{0G|@P7v"@E;TZ"tPkaG|JP{0GFwq`4wWaOG0tQ;#@EU*"1yOvϝ"SQw~.SQw~"OѧEԝr)̝K").㰄(OeqXy_OqeMO)).㰬)).㰬))rMOqeMO?tq`㏝|b66;?s>6|Kؼ O\/ Gl>Q@_5b;?g'3Ol*qL<;?yEx̳@x̳;x̳6S'3Og^ϼ:?̫W'3Ol^wg^ϼ:?̫W'6XW'3Ol~"3^1zbb>6p?6n?kzbNoXb>>6c+cWWb&>6/c{+1zb>6cJl~̳g'3^1zb>|̫sL)3^Og^c|̫:|]μz=1yzb>'1yzb>懮̫W'3^O޽"w}y빨^O ?z<9\Tp)p8ApQp8?.r빨^Oqz8S\AqPz=E9sʹ{=E,)r;^O{ zֹSڽsE{=E0빨^OjzcS8sQ"=L,x);Sq"O8)rQ8Nq"q99&99 EN8).8Nqq8(sQq2qPݹ8)r8EqZ;S>q"7׎pM8Eǹ8N })NY}adVagu\|:S:e).㠬Eeu8(S\AYoչwsd9~^I`9{Asd99E S 9Es.Sj圢W#,a99EFX)z5rNqs8]V#,qP99eT).rNѫs.S\AS9^9rNsss?<,=,=,\k6E9E9E9U)zX)zX)zXιrNqs8Sv2*qP9$9U).rNIs8sQ圢,q teTιrNqs8S\A2*g99E/Y)zX)zXιrNrNrNzs.Ϣǁ圢I8E)8E8E).8N+wq -o8Sʝq2H;d'P=rg28E8E8W:YrN:.9+.rN:.9+.\T9MrŦݸ+6EbSFyMQWlEUl܈bSWl\bSF˒-G-"oܢ)_-G-"nyeIV/l}Jw>3M)6"S7o.S7oMEoM_ؿ)fK"ofܿ).DܿM73qXݿ)f2˽7E̸sqwroMq"ݿ).ۻSܿ)zؿ|տoTb>|տ9Mg^ϼ7eϼ71yob{1yo~ؼя?vͿuߚؼ56nb[ؼȏ{Gl~7uؼGl~7y{QVl~#6cS&3Mg<7Ը̫qW&315nbsϼ71y5nqW&3Ml~xW&3Ml~nW&3Ml~dW&3Ml~yLϼ71y5nb ly'E5n0qS7E.>ݸ)2jqP7eԸ).M7l\T㦸冘 {'ٸ)^j}/Ml}ظMѫ6n^qSjja7eԸ).Mq8qS\A2jqP7ո).M6n8lqaq`q`ãMMES8qS8qS8qsQǁǁǁjqP㦸7Eo\ظ).Mq5n^@qsQ2jd㦸7ո)zMq8qS2jqP㦸7eԸ)zMq 7ndzqS8qS8qsQǁǁl\˝E7E㙱)zV)zΌMϮ)zV)zΌMq glު3cS\fey YY^p 36E?bƦq`ƦL\\pƦq`Ʀq`f?Y=~qS qS䇎7E7E~qS䇎7E7ո)CǍ""<vƦZgl\z:csQ"ok)rM㝱MKOgl|M[gl\z:csqك9cS1cS䝎36E 1cS2cS䍿36E8csqك9csrg;S5 \\).tNLt"1:S7Fs.*S7FstN#tEstN)霋K"o).ιtN7PNqX NytNqetN7PN\\ NqX NqX NOp:òOp:_9S80e|b665sb|bu66^t'۱Sil>}Lɝ%NlX.ǔ܉W|ɝ|>ktlDJ|9Ng^ɝϼ;1y%wbk}ϼ;1y%w)Wr'3/Gw#b>J|܉ͯE|܉3{z5;ï1uAb3Jl~䘒;!6?mͯ7O)4 Il~$6?s;Ngɝ<;1y&w)Wr'3Ng^ɝcJgl1y%wb>JSr'3Ng^ɝ93Ng^ɝ93Ng^ɝx93Ng^ɝ;1y%wb>Jx);Sɝ"O;)rNɝ"ɝJ9N9Nӹtr碒;E;EN;).Nq%w8(sQɝ2JqPrȽpaN 'w\9Sɝ"Nvrȥ;E)rE%w9Sɝ"N\TrU;E.9)zܹN'w&w8`qTx\-SSz="pzeH]n1))ܒ8ES,hcR4E~rȯYN=L\TMld&6_cWcJ+ulPtsMlH5:6_c#hb931]c>4|uak7+S4g&3MlzrL;J|敢/|敢+E%+Eg&6`=6^=khb*ט?U/n5~l6bKǔͯl&6bucJ$_W5bEscJx晢3EgR41yhb>J|敢9Ml5~g^)ϼR4ǔ+EW&683Mg^)63Mg^)63Mg^)R41yhb>Jx杢);ES)"OS4)rM)")J9N9N-9N9 N9 N\TR4e).Eh8(ES\A)"NE2)JvȅS4E)rMCL\TS4E!h~0EsQ)L`S4e).Mq8(ES\A)2J`R4e)zMqt)*L=L=Lx4)z)zMMMEhhhh.*ES\A)2JTaR4e)zEh8(ES)2zOR4E).8P q{e`qh8(ESʝ)2Nrgq`q`S4ES4Eܙ)z)z)zMѳM[uh~<MѳM[uhˬ,X8ESV)2+ˋNUgS4)S4ES4E?dbڅS4ES4ES4x1,;ܩ)̝9YNMe\T2wj,st:5E~SS|SSǕ2)A"daVͩ1E،)۸1Ej܌)fL7n}c3ryA_ة)ӝ/R}b;5OwjˉW;5σ>m)ߖm)ߖ"9bS!Fl0bsQ1bS"W)~2KyMq"W).ܾ;bS#6wGl8,qXn)2#6E(Igl~6}sgӗϦtlF\lޟ+gIϦgϦStH?g;6Gl>|gKl|6]?.ԟM8G|6]c5h|6]?󙟯α|ig~03?oU!btQgxlz#bٴJlf<"6#b3yDl>Gg<|3gWg~|3?Gl>#69bg~|3?Gl>glz=39b|g>s%|g>s|g>G3{"TB5M?lz!gӁϦ6=6j>^lgӋ M|6C5MXlg+ Mo|6clzxgg~|3?j>C59T[|Pg>s&6j>C5ϡϦ$3?j>C5MFg~|3?j>3?j>C5Mycs39Tg^< Յ)]DR!S]?vb*B!ay!dX+2,P/Ȱ<2,}@$&< { < W\.Y.}5A)C~S*H<_闇 _;/ʜ~y+/9_y~y/yKKqI($J<\a(R/yKe/yKe;t_re;t_.ܡ+wJ<\aCW!C/(L/ 1`l* Ӣܱu;6_cW|Jc!g46_)x>~/J/Ա:1/t͗H|}|Lϼ.1y]c>Z/g%3KlzrL J|~o|[μ/1y_b>J|~01[3 k߱:b/ccocO؏) 50m2Gl~㘲0gDZG=Gl~#60GgY<0ǔ+ W&3,1eabc>|故9,Lg^Yϼ0w1yeab>|故+ MW&3,Ll~|LYϼ01yeab<yꝅ)r,L'YDt,L,Eeagaga/gaga/ga.* S\AY2qP梲0e).,L{ugaS"Ws?{;ga9 sY"o)r,L lga.* S>Y"p梲0E)r,L,Eeat8 S80 S\AYd;tga: S0 SY"7y,Eea\: S 0Ew)1.EAw)ߖq"o w)ߖqoE]܈8Rq"wKї]Mtܥȅ.E3fܥ.ewKq]8(R".w).㠸K7qP8(2R.E!]bܥ+]>C\Tܥe.E!]^60rQqrw)zK]2R\Aq2\Tܥ.ew)zE]8(Rq2z .xw)zw)zw)h2R80R80rQqǁqǁqǁq===\Tܥ.ew)zKq]8(RJqzۯ+zǸKqwQqWzq~eEW\Aoq~e_q]p3R\ayq7܌==\\ypܥq`ܥ 7.E.E.Eow)z͸Ktw)3Rqrw)zO͸Kq9 fܥ]2R3]> V]..*8R80'O$]DrܥtܥO$]O]DrܥO$].*R'.E~"9R|:rQq"?w)r>w)qq/ĸK_aw)rK ]sܥ .;ȖIl;dӝ #[Z_|"0pˍ0E(: SuYC\T0E_):,FYe}br,L9,Lޜ),Lfa0 S_Y"9 S\*}b梲0EYb0E8 sq 8 S͌0eƀ0)fYKiY"of).; S佽0{{ga:,uX흅)Y˽0E,L,'90yb66!1fab嘲0J/Ա:1 /t͗HY|}yuu>s3Ks3 s3#c&6_cyfabyfabדc:jq晅笱I3,L3,LlR3 3 88"ǔy故y故y故9,L3,L3,L3,Ll~e!6 + + sLYg^Yg^YB3,L3,Ll~E!W&W&63Q1_bS|rl~<96?p?KLw=bMcJ懒ck-Gl~#6?|LGl~#6Eq~9K3K3K31_b1ϼ/1ϼ/ǔ~y~y~O2`c#\.*R1R.E0RŘ uP.ep).렀KG \T.ED0R\A7d\^\^\8   \Tu`u`u`.E.E.E.p).렀KqY\>`.e^^s \{ye^^s u{yp)`z/˻:Ap:(R95.ep)ׁׁ  }ǸKKK̰ 3,E-3,E63R\vyOa).s Kѧ̰]^ p 3,E Ks KO:rqy7"gX\q/ K+ΰ\TtgX\q2,E~8R3,E3,a)s"ʰd/ΰd!fX" Kΰ}`2gX`0rqQ6?zLg^g^g^cj'cy5Zby5Z̫̫,yhyhVgiJ+EOK+߼HR?K+E_YZ)Ɵ*\TiJ+eTZ)Ɵ*uPiV:,}JJJGׁׁ*,,,\Tiu`iu`iu`iJ+eTZ).J ,u te]k,\ te]k,u tUZ)xz^:z^+.Jѧ,uX-pifV^V^V..R:Ri3K+EK+EK+E6R0K+E-K+E(R\vydߥYZ).s)~V~TK+UZ)sť"ӥ"?W\ZJJ+.EV\qiVOV.RK+EK+eTZ)za)[3,a)":R3,E1RM3,E_Ma\,}adWY?p)ȀKF\t #.E_p)Ȁ #.E_p).렀Kp)˝.E~ctx\.*R7F\?.E_pK_p):ĀK7P\\^w(\:,o;rQ"op).밼K7PuXp)<y.e\:, yKqY<"uu`gp/;ߏ1Ɋu/6ۋ_p.yc5=6_c72`^+.>Ac^+.>Ac^K'h u teE@W\A/uqY@W\AOp).밼[K   \\1pu`e\^\^\..OջR-K+']Z).7>ei K+EbJq){VR/,K+E($JWj'QR;R"9RIJHNyv+(D)J$JWj'Q\$'Q.*RI"IJLIL$JD)TID)6I&L\\Nr/JqGXS)ʘJqGXS)ʘJїTTtLK*c*E_RS)ʘ^1rMPLK*c*{AT|1ׁˍ{)E~s/[R.R*o^JїR.R7R԰Rm{)wK)6Ƚ"oK).^JAuX^w/{$R:,y*^T˩{)eSR<p/r*^J/eR:l^8`? ,͗XZ_cL+tl. ot zLؼy 6毝ycɏ͟TZ_b桏 1Vb?6_c?ci%6_/1J+Lg^g^g^G?Jl8,8,/=TZqYZo;1WM%WMj*1ϼj*1ϼj*g^5g^5p3J3Jl~d8WM%̼j*TSyUSyUSq]S)r]S)5"'5yJJ ETTT"E.k*Ek*y"5uPMj*eTSJqYT:Rk*E g9Ǧ)=ƙG1UGwcoSuT=^\ݪzQ6Nj"/ojs˛/6NjQ/rx79^Mmx79^Mm9js6Nj"GRe6Nj^l;^7);w[;^ypNjEEX(/5wl;^}A_j[Rr샾g:g:wG<`}e:̭}AkelV^'/Z+/.g?yZyqY>[ZyqY>[u[+/.gskelV^\an/hQL)Ŕ)/[S^R)/k3)/zyQLy7*7ULy7*ES^"R1E//)/ +EQ^\&~y]Q(hDQ_FEyqhDQy4(ʋ/G#ȷ Ey(ʋxDQ^仐k?3!6_c79-6Al/m;[lvLGǔSKw>dbgLl~-6[l޶rL906oZl^gSN%6/Y;ƜJ;ƜJl9Oa)?iby}L3g^11ϼgTbK3ϜJ3ϜJld9g9}l33^8̩8̩oV1Jl~%6?:_$SN%6Wb[(%cʩbyTbyT)++ySySH<ʩ<ʩgcyTbyTb .3J3Jl~p!ƙwNȩwNȳ>T|T.*R䁟s*Es*E.s*S)rS)r S)-Ω ΩT.*R\A9ʩuPNr*eS).렜JgΩyJΩyJgΩyJΩy7JnΩyET<|sN#gT %l'?{w'?w'?|Sɏ"s1E5q_XHlfKflFc3yb26ebWc:by}8E]rC"bybAcȡ޲).렷l.-z˦޲).렷l:(P Cer(!ׁ!ׁ!9 9}̐CCCCCCCCCCCEjjj.*PP;PP;PP;pQ9"1+s Exq S-E9Qwj#yވ͟Q1ϼ>*S3g^c{#608lo8loϤcHqވgg,ƙg{#ƙg{#6'qވqވ_ǎ̫̫̫qL퍘g^퍘g^퍘g^{l:Fl/6?ͿIcy5by5b1ϼ1ϼӘg^}g^}15by5b4ƙw_ȩw_3B5|5.QAEE.(r(r (?k kL5.Q\A}kuP_e(.렾Fgky*FkyJFgkyjFkyFuks_C;5bye;~َ33?#)ƙg##f;byf;byf;bgَgَ,ƙg#ƙg#6;lG3lG3lG3l1e;bye;bye;bye;b9a++qLَg^َg^َ@|3lG3lGl~>W#W#6?++cv8v8vcyf;byf;bs1<1<H3Fݙ߈?)w7b{l~^la/6?:FlQ/6Ϳ߷cǔ߈?n`,3 B[B>`,e,b!Ea0R\V0 XHq[Sc!erAB>`,u`,u`,XHXHXES:0R:0R:0rQ/\=erOqYB>`,^).렗{> d,^).렗{> d,^XHчu=erES\A/u=erOqYB> d,e )0ׁׁky6GE%?>'?>'?>'?.*Q9Q9Q9qQɏ"ɏ"ɏ"q㢒E^('?^Fh="Sr="F);{ ErQ="SRI{ E~pz ERI{ EbR䉋{ E~v=#@s92p{ E,Rߓ=/U)H\\ )HyH\\ ).RT@:,G-=ˑ{ EKerdE@:,GuPrdHqY@:,GuP6@:]ց="q+@|="{se { E>HsHrqy="_Cj6kø6L{l6\_ ׆ke_jca]_6@^>c^>b^6aQ_6@^>\^vaE_>Y^>Xjca=_C5ymx"6@^_=׆Ϩ<'Tmz3?~<?^̏=ym`z3?@^=׆Ϥg~ϣg~ƙ{ _^=8s;kg~6|=] ]yjǃcC! z<8v=>:]yLJ^ z|u.0v=.0v=>\az|pY!OpYGz|ÿ:ÿ>w8-ހ"4gG|ȻPF>B#y+ȇ])B#yȇ5Eheȇ6>F>\a |yp |pY14oNȃcheȇ9C#.0FC#t|pY144.0F>)ΡshCߜΡsh14j~o|uC#znȇ>TC#Pe|蹝C#Pe|蹝C#}2F>\v |C94p}3F>)Ρ}3F>:̡sh_94a|uC#o }uC#zȇ^94Ї^94a|uC#o }Ї:}3F>\a|keƷ>yp|keƷ>pYȇ>C#.05[Co }Ї:o }Ї:}:9F>\ay}:9F>:̡sh} F>:̡ Z"rypl|CKCZ"rypl|CKCZ"rypl|CKeƖȇ$%z~ɇ^_!:Y1Y8EpY}I(ƜEc2gQ>?ٜEc2gQ>79r,ʇjY c΢|;:dQ>79a΢|u(.Ȣ|r,ʇ=gQ>\a9@Cឳ(.p pYuX8Eyp̢|r,ʇ:YuX8EpY1(.0fQ>EpYY(Fq΢|礐EI!v΢,ʇ|lY=sC,ʃ;Ȣ|ȷEYόyǔE͟Q1ϼ>*S3g^Yc%608̢8̢ϤcHqEYgYgY,ƙg%ƙg%6'qEqE_ǎ)+++rLYg^Yg^Yg^YlND<ʢ<ʢS%W%W%6?++yEyEO<ʢ<ʢ(1<(1<(9gYgY3,J3,Jl~z?63c3o߁Y_l/6?S%6?_rb}cʢ߾cwl;6}LYkfl13W%W嘲(1ϼ(1ϼ(1μ(EN(E9R;rQY"E)rE)rE,J;,J[,Jq EpUp8Y68rQYʢuP(E).,JqYeQM>a).ӧw>aܤBM2nRq &E2nR:0nRdܤu`ܤu`)z7)z7)z7w^M^M^M.ݟz&E0nR\Auл?EN2nrQuл?EN2nR\A\TܤIM:ݟz).w:ݟz&EN2nR\ayq/&E&E&"7)z7AM0M.*nR9nR9nR9nrQq"q"q"q9`SܤoM.*nRqK6~I7y4~I/9~IG#m"F/~I/)h"~_rQ"O /)z/)z/)z/)۸%Eù r9pܤrIqq/ qgq/=7)z7)&M2nR7 Msܤo\\87).p8nRnM:,q&EfܤrEM:,uPܤrIqYM:,uPܤOM:,&E(2nRsR&E-&7)AM[M|q˛䎛&?RMbqcKl l~lS$6dlc ϑ116/clrL06al0+706pĸG v)`G ϡg^B!#(P3ɏ1`?|byLbyLb1}8 ÀI3πI3πIl3`3`w8 8 \0y0y0y09I3I3I3Il~ ?6byLbyL)`+`+`y0y0< < gcyLbyL~gg}3πI3πIl~>ƙg$ƙg$6?g:q0q09I3πI3cOȱc,32xLߌScl16)L?2c/סcl})Bl=6hͿS0%6B ͿcyS)++;R;R9)EN)L)0"" E\S\SϢEn)L).`JqYS:(rQ uP0^6QTajejR+E0R\VUZ7MygL=L}JgL}JGׁׁzu`ju`ju`jD*zZ)zZ)zZ7:MJ}JqYT\Ao"}ET\Ao"}JqYtQ=Z).7:MzD*.7:MJ}JqY V>dju`ju`jR+E(EN(EN(EN(EN(D)rD)rD)rD&Jq5Q0R(D)&ш)E8rq9q0ȣSN`Jy4`JߩL)h "FL)K)S^SN`ES^SN`J`uX8Z).ӷp8RL֩/=Z)zZ) S+V3RV^VJJéS+EgjrJZ).p8RxV:,N\TjrJqYV:,NuPjrJqYVejrJ_9R'S+E>~J_9R2r"rjejȷœZJo9sdj%6{lbyV~33?wc'ƙgj%VbyVbyV)Oِ~ʆS6䘲!1 lH?eCbcu9k!cʆ1^ gcWcʆ<ʆcyeCbyeCb1ϼ!1ϼ!?lH3lH3lHl~<ƙg6$ƙg6$6?r336^8̆8̆S6$ƙg6$ƙg6$6?b~Lِg^ِjl~F0F>aj>F>fjHLjLL\ԛ8ES#ES#ES#&NHHES\AouPjF:MzsMF.MzGSouЛ8)\zĹ7q:Mz).HLuXMpjsMF^F^F.*5R:85R$85R$85R$85R$85rQ"'"'"'JIPj E9QT;<pS;xA;<powy*xE;xGwxTׁ":qQׁ":Q:0qQy=h. .{\\(E~ct٣d٣l=M=u͟C yb&1:b bBGlJby}<*t C$ƙg#ƙg#6~G3BG,t8,tSi#濃J1Tڈ16b;A*mǧcرl6bTڈccL毰TڈyUڈ͏K<*m<*m'cy6by6~gg|t3F3Fl~4:ƙgi#ƙgi#6?qYڈqY8F3F3Fl~J1ϼJt316by6b^f2c#6j4Ϳ8K,{S#6^.qXͿ 7qX?cOTͿJ%c}lA1ϼ1ϼ1μENEQqQe"\(r](r\G;G[Gq)Ep٣Up*eTٸFq!U6 QA+(ӭ"O(ӭ"(ǭ"O=ʸVFneyVFne\T+O2MF[\THBE0Q\fH!FҸ_ ŗhsk i=C i}ÐFq~ҿbHB~IO(ʐER:0Q:0Q:0qQ/ i i i\ԋ/eKqY4>aH^|)._>dH^|)._>dH^|Fч iuЋ/eER\A/uЋ/eKqY4>dH*CEz2Q:0Q:0.^\T?E\(OE/xQŋ"d.^\T+ LQ":(Eބ;jQME~ct Eބ;jQ7FG- w⢢E~ctԢțpG-trEEEE(zEObT9˩seS0P/a9"9QP3Q1af/a\\N(F_"(z(KsEsEsS0:,a9˩sEfr*Fߩ(.발 8qQ9˩se(.발 8Q\A9˩se(9˩Z02|ɭo(&2|ɭle@[E5[E>Veg2|ʭ+oؼ(yOb;3#6dO?/qLa?+bV楊;?'?%b>uZyb&Og12bc3VFl<'1ϼ>Lby}< y12bH3VF3VFl8gcjPO AqL Ag#14nrlNA+>El~NA)O毆Ԡyՠ|!sM/zQ|BaAU( @]+TB P 6q Yx!r["˶Eֺ!`.BEֺ!`.BE !`.BE|s‚e["@ǁE|s‚e["@ǁEKTgΠ wKB wnI,+lݒg0$zų%!hnI[ْ5Ė@x$9- M[ - ~N`KB@nIܒ8%!q`K‚e wKBْXP6nIc3[K&- ~lfKB`Cل%aA$8[KԒXPnI,qPKB`Cپ%!A- E%!ġlݒZr[dKB _.rKB _.rKB5[nȖ@\䖄@/j$"$,X4W$RrK`Kf/l6d9&- Ϳ36el6lq35!LMͿ6l6'g98͹l]p35!l6&lfsd6 l66ٜq] aԛ ay׵6gl6k^06ׯͼb3y5!16!l6،kMqͳ aw+fj4O5l濧 nF͘M6lcl4،df{ͦff& 6_ 66c6hf˶͏q7SfΦ 6߳k^yͫ`ͼh׼  6gf\l4l~v3y6l5Fwj7gf\l4l~v͸h< nF͸h< 6ߡ 6WfAf볛yͫfj4k^yͫ`3y5Lyͫ`3y5l6/<67gԠ||<67os͸٠|ĸ||~35(l6/n6.n6o6o5Aa$}3y5(l6*n5@z7(r@|7(,@ܠ@ A!p'A`w ;I~@ΝɓT?@ O`Ar~@t9T?@ `A8 A%,qP?@ w,qP?v%,qP?,qP?@`v?%,STy@ 7.䣾uy*uy@ .XP< ] < ;^8< A< A%*XP%*,qPy@w,XP%*>%*XP < AE`,Y%*7dy@@oY8<`AzC@ǁe8< q`y@C,t޲,}+J}8_8_8n @U u@V, ?B4 @O Y\ + @ǁ >tX8 q`Kd8 SV8>,q}@Oh/˿+k+<;g=@SY<Yr0gԚa9W5 Zs 5Rk@^nk Jkḱ|+֚,֚j́bym9q8Pk,qǬ8y,q(; ]XPvve9v:ρ?\`t:ρMy8ys@9gv:ρˮs`Cuy#,GX:ρ%rK<8y,q󼠜9ρ%rylyA9ρ%ryl9ġlN<U ;ρ%msJt->m@ja[:?Y7ҁ|¶t _-W-lKU ҁm@ja[zٖ䃟Sæ=l æ -!a/MzdYl~Ka"ش6-M_bæMa~]~Yl~ش6-M_ufa7ü>gaæoM_󚟿5??k~;a\5yY4~Xsӯfa DMæwOƬ@4~&aӳØ^l6==YhͼM/=lzl̢æw5?EM=k~5?ƋA4~) rKPڗ|tx  ʁf|`Κo;krk6G&q72G&|I| G>2G&$>āI| 2G&y$M9ʑI|`lX08&$>a6g@>$>a6,qM$>a6,qMݕI8:Ϗr䃼o{ 8r+@n~ Gz9r,@n A>`9^r ͰKfA>p,qKf9pv,qKf@?AKf@nu X0;ȁ| r,qKf%Y8avpt ]$8ȁ?%pt 8A>9:pt 8zDe ӛNo88}S`/ȷd/ {@`/:g#88H=%2Ra7z ]Ih=_5Z+ MBк!&@~Hh=OoZU#@z $ȯ MB끎:Zo^ :Zg@8 8B+ *@z@z D%8@h= B끎Bk`yz@>Jh=qz 8%8@h=qXD%X0 SBk,X0 8B%8 8B%z`,B%z`,#N z`,#N 8My)IhBlxx݇/}}݇ⅻ/^0Az/} ԋn&/ⅻz݇/L/}|݇y ⅻKxø^a7ŋl7_z"ۇ/a]a/L~ŋl6{G/}<ҋn&E㚧_z"ۇ/]]v3/}׼ҋ>k^~ŋl6ju"ۇy/xbK/^d0y/}ŋl7_z"ۇV]a\K/^d>k~ŋl6Qu"ۇq/x㸋n&E_z"ۇy/xü^a^K/^dҋ>k^~ŋl67u"^a^K7_z߲˚yf\Y/^|֋>O/}|(x^pa&}֋>l>xfY/^$ⅻ/^ⅻ/^0f> wUo5s+\Yڷ8-(}4ٰ>8Χ! J d@~~^8 _/Dtl-rc4OO9ӱxx?}X< hi 7O9ޱxȡ%rc4M%OK$rk4AOxX 4AOK$8H< >O=@>J Vj wRLn+5;f[ J 䃹@nmrl+uAY;J Vj m.(+5AVj`Y~TX +5AVꂲRKd8J ^VꂲRKdrj+5AVꂲRkX !A^C`Y޹J!@ǁVj7RZ 8J th:R9±8J thzC+5qA@ǁV=yk,q 85D+5/J ~OYCR@!Z|Vj@!ZJ]P2@Rio?:R@U\ސd RmN26'js2O6'79' @>a؜\q9' |°96'as2q98М thN@ǁ d`oO ɹ ; ʹ ,As+v.Kc@>fٹ c@e&@eҹ\c@qAgSt t(::8Pt t(:zD1qE@ǁ=qX_%7%+.(]1?[ꊁ^ +.+z%PW 썺 M~*{??=}?_އOQVtCyˋEj1AȯT|° Yt!E@>aXt d,:+բa1Og!%Oa1q8Pt t(:E@ǁ âc`D'K$:7E%: It iE$74|@ Qt Ҥ Qt +,h|@8Pt џE@ǁc@qA%K$:Ϣc`D%X 1Ac`D%:8Ht ,qȣ? Jt ,qȣ?%K$:$ܢc`C1,:$ܢc@1yLn192Pt!Eyd.W2˕r%\.W2˕r\u.WUr\u.WUr\u.WUr\u.WUr\u.WUr\u.WUr\u.WUr\=.WՃr\=.WՃr\=.WՃr\=.WՃr\=.WՃr\=.WՃr\=.WՃr\=.WՃr\=.WՃr\=.WՃr\=.WՃr\=.WՃr\=.WՃr\=.WՃr\=.WՃr\=.WՃr\=.WՃr\=.WՃr\=.WՃr\=.WՃr\=.WՃr\=.WՃr\=.WՃr\=.Wn5/Ep3/pw],|+ w],|+ w],|+ w],|+ w],|+ w],|+ w],cV rc0+jR =Ul{0p/({0+\??Rb _9$-%%rRb)1|iu@l/錁 Yg :c9X~Au@`3rkqA錁Yg $:c gK3r.`1A:QX 1A:K38Hg :K3rXp{ܩY "d wk! E@ni,BrfrAX "d !ȑE@-,BrcrA%!K$B8H N"d`D J ,qX 2[} J ,qE J "d`4i %K$BzO!P t(B.(2#E@ǁ"E@(B:!=8P\PB<!K$B8.4 Jg O:c?Oꌁ<3.yRg} ˔}˟:c=/u@?pSg :-"d R-B2!j2O!A7' JȯT J E@>aX @>aX E@ǁ"d@2qH! ' %DBXR(KP e`A)7BܭPX#HrA)?\T(A*<ʵB`y(B8P Q@ǁ e P:T(* J ,qBX 2ǍV(KP8H ,qB)%R(KP.(2A e`@7Z\P e`@7Z ,qBX 2V(Kf2V(Z<}%`1lIraK2[$>fGI1lml6k9]ẙfs6 7iLm6Gj9Q́lfsn4m6nl<}͸m69f\6㚧w3~qیkf7﷙׼|惌7%6w3~yk^f^n&o3y~yl~5b3y~yl>k^m5/o3y~y|s͸n;])f9f1n;]Ӓʛl>)l>(l>l>܌y x3͇ͧ1t7G]@z<8 .2mγer=U~B  '恜g 'T.(0S "\Pa`,@ n88? a &rh0HF? &@O.(0#? K8? ,q%X 0Aa X 03%A%Kd8? :=8?\Pae:? tzD0q@O8? ,q@>dǬOzIoA} F_ai-(/O}~'{N6-: כ]@~ כ]@ ȯ77 vf0_ov ȯ7mo0p6/.`  t:tmo0q `mo0A.rS.``\@]\P.``\@] , xC|.`,?:t EM0 ȣ\  t.`@pA<4 8 t.(0A.``\@ ,q X 0A.rK8 ,q \%ryhhpA%ryhh0A.``\@ ,q(]@ t-g͟fYLf{l7?OqEI=l7?py?[g7zǨm6W6g+7he3~T6*B|Jߌk f\T67_ٌk f\Tn&l3y*dqS!l>ͼ楐m6Ͽ77Bv3)dyK!k^ f^Rn&l3y)dyK!l>k^ f^R6GݛyK!ͼ楐m5/l3y)d7B|ܳl>ڌn&lkk3~P/l1eW %rEY v J 䪲$vCJb:X gG J G~X e@~Z[P2\ o-2\2\ ?,:pe@>~X t(-X?,8|FXKxt݂Kxt]` 4R 5 tx-(.K]K.F8P 5@ǁ݂y8j.qF8P[P]`4it<FX .A]`48H ,qFX nAit%KziJ[P]`4@Z ,qFX . X ,qFwCjtX -}˥iriڷ\-}˥iriڷ\-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-}˥brط\*-m5/#l[.#Ⱦ2o[.#Ⱦ2o[.#Ⱦ2o[.#Ⱦ2o[.#Ⱦ2o[.#Ⱦ2o[.#cS|r] 'L ڰ8Onsqbk~6my6üga^󳍶l=l:xl=k~6,SayT{4}4Wl6Ɵ'LMSڇMCڇ SmT{4}4}thal=l:Vyt0~T{4]LځӑJ{e5#ivs4 5p8nf7q;p8MNcE8MNc9vW)ށ@8nNS)^8MxNS)ށ/pv,q硑َ;WqN)v܁^}lȑَ 9wW`v܁ev܁2w G8,q9Ƒw`lv܁ʎ;a,qَ;a,q9֑8q8v܁Ȏ;[]^݆V_^݁|PWw _^݁ܢʫ;~yuWw _^݁wɫ 9Ww ^t^݁%Ww`X0{uz ٫;ag٫;aF^]X0{ur~#٫ 9đWw`%8̯X0{ur)nCxu:L ^݁٫;3)xu:t^iʫ;qWw ǚWtxuz0٫;ag٫;a6=q) K <(Xz @`y,<Xz @`'<{Xz 0( @oaXP x ߖ!~oȽ yސ yv !?!~oȇ_e`y~y "_ȇ!B|~y "_ȇ闁~y "_ȇ!Be`y~y _gj闁Z偎8gj8g۰ a:,? a:,?<':g0[_V;a:7y Oeu8il;Vgluå @V|FVglu8< ls^a / |lm^a^R>/^@ϋ>k^ 5O㚧y‡ͯA\p3)/ |<ϋ>k 6/^@0y*/ |/^@0y)/ |׼ϋn&y‡o7\p3)/ |׼ϋ>k^ 6_/^@0y)/ L 5/!6 ^kK 6O.^jy~RÇs*zRÇ͓ߋ>̟Tы>l^a1K 6|/^j⥆wUы.f3y8uȏg< :70Qnlӿy8;qǂA_<#˄6育A_< 2 8O߂A'_< oA٠%A_px4d4Ћi ʖLdE}I2 Ȓi%tAIY2 dEMtAI^ԔLˢdȡ%IY2 ,qd$@-8H2 ,qd$I%L9Hd$I&Y2 ,qdzCJ-X2 pi -r`4#KZ2 邒LõdY%@N,.(4KY2 t(.(4Ai`$I+%LK$rc4Ai`$@},.(4Ai '?LK$.(4K%z!>A/8I~Z2!%@ǁi`LJ J2 i@4qd^t(:L=d8P2 i@tAy%I%LdX 4Aik`ÿ%+5 l)zF5Л6޴Ql bkg[=àغ@0(zA5 Jl bk+[K[[_XP^}غ@o돷ʕ`%vA)|Tȕ`%6+JRbr%X u'?^ 7Ji-M¦m lIʦm lrca6OR6m$evA|i')|i`Xش ¦m 76m, M@ǁm`CXش ,q(zSn` I ,?!i J ,?!i'$ 7ZX]Pn`W-/ i7x1pg0_p8P t#k J t% k5% k5@ǁpK8H KpK8H ,q4i%pK.( 7An`4@a wAi%p5i%p9ﳆX p9ﳆ8P ݮYo6O06͓i1o6/6gCn6-6ifn6Or7sN6gwͣln6Op7vyk^fn3yycw7㚧<7L6㚧 GlG7p3yzßLQbfe3c9q8b,q([́%e3sO(Vs O?ȗQ?/(9|(W(g3_58t?e t?ρ?ρ@ǁK?8+%K?8,qρ%K?/(9As`@RbyAρ%9FX 9As LjK?ߐs Ljρ#VAɟ{D '7W߲k^Bf^7i͸)do6Lf\oyʽ|p3ڛ3 JB|4Ы*w EV8Gh L*w`T܁|4A*w`Ty>Xiq8|J|yxɜ%:@,:%rFg<бxS<_PycO<бxX {'.=qϾ|@O\zB=( gą>{7PK۱XP^Ͼ|@2-e@.0 JvIJ{ e@.0 Jvf=[\`%!$7ݚeweM>8iM>&Ik|&4@>NZ5@>NZ_쮬qҚ| wW,+k5@ǁ|`C]Y,q(+k kʿHS}`T$OE˧">|*N%/) |EȂ}``C |EȂ}  @ǁ} _`@ǁ} _`8P_P} _`8Pt(/(>A}`@:c>A}` J,q`X >AK$8H3`X >&>A}`@5-8H!@5-:9״`@V@۟th:?y8^%暟r\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)Wr\)WgMGMru\)Wru\)Wru\)Wru\)Wru\)WY<3 Gȁ 3yf@~V!A<3 Gȁ4ỹ\$p|-&'Ap<'_+p|-&@/j6 ΓyE&$8OW5W!E;|2vÂw䓱d~@>aQtC 'c;,X6wt8!ġlXP6w,qϯ|;#S"uJgN)XV:%TD`Y ,+A|;Ɲ S"|1S"/S"|1Sbt)8S"q`DN@N)8S"%S"q`ĂkIt)8SbAuJ8S"A|-ɝ%,qPD`:%TD`:%K)XN)XNuJ$wJ,NuJrNuJ8S"]wJ8Spם;%9uĂt)8S"q`ĂtZ ߫֍'_hؼlm6O67Ӝy9Ӻafyy|3u\l66tfs7gt7Sfzy^|̲IJ@χXvNTI`Cye'p$ġ@8XvXPDwI,; ,q(ߐqJ8C%qpJ P dܡ;Tw2PYP*;TwrG@*ѹC%w,@n;T@>ǻC%P *|wJ ݡ:T@>ǻC%PYlkݡxw9* m;Tv:P ,q(Zw8m;TKԡ`ްġlkoXP7,V/%[5x%%[5uƗX֭_˺UK _r˂j| ,V/|ˍ/k\/7dK _rKƗ@ǁ/_6/7,Ɨ@ǁ/|ˍ/_TK _rKƗ@ǁ/ %A/%j| TK`_KXƗXƗ58eA58%A/|ˍ/ %A/p%A/%j| ۍ/%j|!_9vKƗ@_6:l| t_6:l| tZ 9Ɩf#76 ,66M7{d6 +6*6M6 6aӜrGaaa æææyfsüMy=2{d65?l:xt#0~yt8al##(aI=2g;,!,i4x4)?bFN,@~Z*9p8&l.9p87D́с,$pn9iR~4)?p 8F48FN4M2NdFNI@M2rj&K&K&I%ś%śӤIIiR87X07X07X07M28M28M2:h9C558I@M2śdt$s gj dtI@$spM2ś.I@᚛d,qd&K&K&K&K&9lV́%sL$s oj9an9an d,qd,qdYM2ś%ś@II@S53r( Q3s 眪9NYjfS53r&9TL\3s c9cOajf_f@?U3s Gd9a9a9a9a9p2.,qkf,qkf53853853rVff@U3s`\388853853ś%w`\3s LT3!jftP3s9q@L\3s9q@́jfg@53:9p9_%9q@́Us *9T)΁'p.9OX[:_Q΁ܯN@WTs|9t:_Q΁ܯN@WTX9t_ޯN@u::9ġlUs`CپNN'lUs`Cپߐݰġls`s j9ġls .UKρ%sKρ|N-=8-=8-=u:-=8-=u:X0l:Zzts〖8@N@-=u:8'U‖vZzts j9an9an9ө'pn9an9an9an [z,q[z,q[ztj [z,q[zZz,q[z,q[zZz,q[z6DKρ<PKρZzS-=sKρZzts〖@-=:h9q@Kρ<-=7tOll6O`7#7fOfy<{l>l>}6箛cC3毇|||:{l>z}6Wff^k^>'yͫg3y1l5~͸|ΰ<}6gff\ٌk>͇mgmqͳg3yL>qͳg3yl6L>yͫgxhpm3yL>yͫg3yl5~g3yl5~97Wf^l> k^>7Sf^k^>yͫfl> k^>yͫg g3yl6l6n5~fqf?[l6dl6dlϖ>7Sf!ffl?ۻÇ ~TOA@% (A@% ,qP5Ђ ,qP5P`Vp(q`5PjU 8(q`5P ѮZP@9v5Pj_%K@åj@N] XjU8(A@S(A@ (;W8(A@ (A@% @ (A@^(A@e5P  @(cRWrPj@\ aT5P gj @<'p5Ђ/O\ ȹ9]s5P`KT XjUH`KT %KT Xj@] KT YKT g{$A@%ZP@%r KT I>nj@ǁ@(q`5Ђ $@V-(0q`5PjYUj@ǁ@<|t5P`@Y X 0A@ - ,q[Xj@lhA% K🼓?郭N='䆟o'/~)#z~@؏=O`?R G l>l>?ln6p7O4nͼ |׼6WfAf^jk^ R RqͳAj3y6Hm6_l5͸ |r<6gfff\lڌk R7{n3y6Hm5惻Aj3y5Hm6m6m5~3y5Hm5ͼ u35Hm5ͼ f^jk^ RDyͫAfjk^ RyͫAj3y5HL RGyͫAj3y5Hm616Hm5#͸ l>6gcSٲAfjl>l>̟]5$ _*d<~y 8$TT ?LKy 8/8O`d8TT`ʧKT>XU>XO:, (S˧O-@ǁS˧9vԂ* tX>q˧O-*qʧKS%*ZPS%* ,qPT`ʧKT>˧KT>ʧ9sT`ʧKT>ʧKT>X@]>ʧKT>ȹ˧KT>8@*?x6Srf@^L᫛9~u3U n TºjA5SrVf@NbL7S-f@cL8*q`3Ղj ,qP3U`KL8++%KL'KLXf@LKLKLge%AT%jZPT%KL36SݐT}f@ǁT *gl tL8jA 6Sʂ6SzXf@ǁT Jh T%KL %j Xf@]*ABcOMɾ ?UY ʾ ip dUY>w'&'憬 ą\7d=W'. ą\kAs^Ozz@O\Xϵ=qa=W \ +Hszz@oYϵKXz@XXP szzX+s0s8\.?[^$vW7ml ?_޴}7 [(~|! 5d\C! 5d ,_! 5dJu ق! 5dJu Y R]Cj@~,A5d%! kKTCj=Ր:2;ʸi!7m7 gT g7 z K9TqiZB,M 2aiZ *.M Pťi 4-CrҴ@U\`14-CrҴˌѥi'K8KKʌѥi%*M[]XPf.M ,q(3F84-98K9siZ`J9siZ`JKT]JKT]XҴ4-sj:,M tX84-q`iZ .M[PiKBK-Ҵ@Ҵ@ǁiKTiZ`C9qiZ .M[۸4-ġ۸4-ġ۸4-ġ۸4-Ai%*M ,qPiZ`Jy<ҴUXҴ@O4-Ai%*M KKTvCxʥiK9wiڂ*M tX84-q`iڂ*M tX84-q`iZ \84-q ǥcfOvl>ٌ;n棑擑gp< 省|O]l3pSv[< -N< )؂b ;3pΧ`7dQ[,j yڟtY8aR<[]PEm j<[ tYԶj<[ tY8V*j ,qPQ[ G.j ,qPQ[`TQ[`TQ[|X⠢G -AEm%*j ,qPQۂ*j ,qPQ[`rB袶@ǁEmTQ[@ǁEmmA:,j EmTQ[ G.j t.8mA8-AEm%*j ,qPQ[ '.j ,qPQۂ뎒8mNRZce:+V|Y] cgzI*ѢڂK$j;PڂK$j rnQہ8H:ݢڂEmA.s-j reQ)j rkQ[;]ڂZ^ע 7|۵@ڂ\Yע k --EmAǁ@Qہ8H\ Q[pDmAoD)j .q5]c;P؂K$j .q-ͷEmJ\ Q[oڂK$j;P|M&A-Aڂ.(j EmAǁ ڂEmJtA@Q[q-8Pv.QڂEḿDt(j zYOQ[q@] zYOQ[p.Q8Hv.Q8epDmAeK%]Q\KAO GW+HAo~i gKs\Ы.タt/Hs\Ы..Bh QCs\Лt)"]Aw'UvA廊 UvA廊 z'E];).UvA廊 z'E];)^Re* zEK]q.UvJe\h.M!Uv%˵Re\m.M!Uv%Ret[N9h 'DC^Z!/ yAkG4σϭljx@<- r- x@x<- x@\Z/`ށ X\ ^p{A>, .qx@|Y\ ^pKae߁Rw+< ree߁KA{/?-}~Ai rf_7/ݛ}~A r?d߁*> woem_/8\Ⱜ .qXV8wಊ/aYE\Ⱜ .q/u}~%*> ץ8>K r]j߁8qK }~A}~Aǁ>@_}~t =/8w|~A/8t;p) .qX4, .qX4, .qX4, .qX4/A>;P> .q/A>o|~%}> ;:}~A(_q/8t;P>@_q/8dGg_q/8l>>1G7OZRl.>cS/?]e*Xl>&Key~)]6WJ͏ Ԡ^6ImeieI,ye ln.xg^2ϼ,ye h 3OKeyZ/k8^ƙ%13OKeyZ/2<-qi ,qi 3OKes13 uesvg^A>s yu 2ϼ΀\晗%c^晗%2ϼ,7%.xg^f2ϼ,%2ϼ,ye 3/Kd lng/xg^b%2<--es'{gxټll^.ȏx?wZ/[2~~L岹o~Cpo4oλ[ ӷC0[ UC3``pޭr9veRpޭ!87o!Iyܼ Q0% ھ`07'=PI`p^eo`@ 78/{'yJ0\ `C K$ .q`08/{K$ .q`0e% 8H0x% 8H0\  8H0\ `q`0Aǁ@  A-?]AJ t%+1 ].G?=ay+++LAJdʬ` rL` rL`

@i\YF k>>1.A'Ǡ@cpRX\>8HxRX\>8,%>m1a)} rlcpAn} .q1A >(cpA/>8HAj,_} :>jAǁ k18Pdbcq1.Ǡ@c勵%Kic勵.]%KicpEZ\t>8,]J\ cpAv>(cpAv>8H\ c]%>~ HkA/>(cq18Pt}N28P'tj>K%ANANANA& % 2ٖ_9}_9}_(egep>/Eg偒_/K$ .q2ȇ%_(ec/K$ .q@/K&f̠gfe r#j3f3D3fQ1\21f,]͘A+4cܵ3͘ʌ2f 6c 3͘A.m8yu2ϼ\晗c:ysg^n.ˌy|2ϼ̘yeƼl.ˌ11/ˌyg^f<2c~Lf^2ϼ̘yeƼlt01/ӌy_]6qi/<+B粹q&hͼlr.8f~L?[]`p^omop.?sq rr rv \~9?vjmop(fps[yˏ(fp^o8ȩ\ znp3?-J}Kfp^e\pM(fp3eA% λ༛:ntpM(fpA,- .qp3A༛.qp3A༛.qp3A\(fp%n8Hy%n8Ht( rifqp38PyAǁ͠@f } 7p38PV͠@nڷp38+߷p3A 7K$ .qp3M%n(fO 7K$ .qp@ 7K$ .qp3ͿJ\ f 7K$ :n|p3Up3 A- r mf{h 7:Z.% rhf i 7a,

\ h+Z@KO64aeh+Z@KP64aI>j; vjР?C^РRt`;49ݡA[3ibhskhskhskhO[G[[Gu4ȧAέAέ:::/W[Gu4AYGKd .qu4Ǹ%(hq[GKd .qu@YGK;rf_iG_P4飯4ȅ}.u}AO}A.+ [4o 7@J܈Wnܾ 7TElJ܈WzRWt+ *4a+ .qX"JK+=p+ .qX"JKȾJ,+ .qX"J,+ .q4ȂJK+ @JK+ 4AWd_f_iq48Wt+ :ٗWz|Aǁ 2J}.}AeJ+ .qXcJ+=p+ .qXcJKؾ>4a+=P>4AJK+ ʵ4AJr+ .qYWt+ @J}AǁҠ@_J}Aǁ \J}Aǁ+ .q48WæM>6 ?l*~T_6?lT(OaSæWæ.MU6j?lzFlԩRaq65?l*~T]6Jg~JaW<g}~g~Ͼ晟}?l~g~^6Jg~0+aWz+aS?ȉ?AΤN2AΥNsp>8uNӺ%?%?iٟٟhBUjp>|+\A'r"=8Ut!W}CUt!W NEN6NEzp>a>.qYWച~pYW2` κu.6J\l֕g]ˀͺuz+} MJAn5+}qAϗ48Jt+}K}Jt+ κٗAYWYWu.qu.qu.qu.qur/]Kf]ip֕>ͧt.qu.quYWYWYW ҕg]Kf]KWYWKrG'T2}{:LN.An2};jLfQ.48LzQ.AV4rg郎\:p>8e].q/>a\ep>a\rap\rK˅r.ipv>av> G.8.2}k~L\0L\0L\0L\0L. 2}qA2}q48L.Lt2 Tt2}qA2}qA2}q4{|AA^p>aK .q]48_P}p2}U\:7@t7> =&> 7DzL : 7=&>Dھҍ"Dzn!:}p|SAo!:}p|S-8N87tH} H(R"B(R"A/ H Ί"A/ H} H(R"AH Ί(RtH}Ы.(R]+)R\0+R8,WH}Л_(R\ܻ"Ao~H}prJ"% e 1> ?Ao'O .W)O}SOSv?Ac>)2>ȉyA>d^ 92>ȇ̫re^ ˼ R2>ȉyAΖyAN̫re^}e^ 8888 @8l^}d^}pl^}pl^ 8,d^}>Wͫr-ꃞ>W[RʼyA+0>oyAne^ yAne^};)WK%N6̫rE+jp)d^}ɆyA[2>ؒy%K%\ʼؒyAhe^}pRlɼ  W\[2>6W\0W\0Wy%y%yA62gKfl,d^}pl^Yɼꃎ̫:0>8 JL8,}̫y52>ae^}pg˼ϖy%K-jp6>ae^}pl^}pl^}pl^} ̫.qͫ.qͫ!y%yBWdC,ꃎ̫y58Wt`^}qyAl^}qyAWdC,ꃎ̫:0oqͫ.qy͇ͫ.?Ul..|L.?BᲹOln.?ߏɼz%\6W esvl>Sl.O\67es1W/˼z\]晗y2ϼ̫-eyW/˼Ѽzg84^6wgqi^3Oesqqg84^67fͅeyW/Ӽ1W/Ӽzgcס<:2t\]yey3Bye^ ])<2^63/eyW/;<2~L<2^晗y2ϼ̫y׽3/eyW/+?84^6WmeyW?&eAozVesIt\~LV8g^6Ceϟ:8ANyokl έfp^upqssoAyo rjl {༷>Pj8\ gkpyQ\ gkpѤ5o֠:[5|+rl:[185d֠Mg끿+༂:t|p^(gkpAnfH ."@)R)EjpO)R)EjЏ7*Rrՠ@ꁒ\58Pt(W=PwPAǁrՠU^bSt(W=rA .q\@A .qU(W=PwPK$W ӡ\58kl[F-kF-k[ujY5Z֠Բ=`Բ{Y#RtKB-kF-끺}KCjY^#R\kl%VZ]c;PZ֠Բ8[p}#.Q%ᯔ 6E+UA/Z zJ zJlЋV`^R{TA/Z zJlЋV`*ؠ{`UAǁ*ؠ@lЋ9`aelЋ9`K+`V 6afR 6a fl{j`K+aVQ*KVQ*ؠ?2`^Q.*ؠ`T\ 6j*ؠLlл`~6ȔY?dʬ 2e2g|0Z?dʬ=SflFgLA= Sfl)~6A .q~6AgK %(lt .q~6A8,;{i?V =Դ(mA5AҶ=Դ;|=ԴjV۠Zm6 AV(ҳQ6djVFV۠MmA'V 7ܶ8,Anm .qXE[mpj\4Yj\4yd .q6AVe .q6AV [m6AV [mKd AZmAǁV۠@m.l :jth 68jth=j\68,l 8%q[m<`mprVe .qXj\ mp%Yvj\ mpAݶ8jAZm8Zmll=PV۠@mq68j{AǁV۠@mq6ȲV۠@mq@YmKd q%^6kl/?ⲹln> ͟esOq\S\6v͟dl(.l~]6!l>s\L\6-d3/es%qg^V<^6sye3/FeyZm/j{\]ƙ2P9o/A9o/A9o6ˣ%}RI!}Rt) ༽?PBܠcO!nJ q=Jy{t) ༽?PBim˧"mpT=PZHk\>im˧"OEZ W6գS !EmGkmst r imq68PkZ۠@68Pk\ 큿8Hk\ mxkm6AZ W8Hk{A( .q6AZ .q6AZ W(mpA魵 .q6AZRkZ 6An rmmBkmE[k{A r!mm5Jkt :jmAZ|(AK zHmp.8ၺ@\ %@dCc8HkʝZim6;%8Hk\ mpAܩ AǁZ۠Wjm6;AǁZ۠@큺t :r68Pk*Z۠@큺*Z]B .q@]B .q%imn=PPK ZZu-66:[Fm[ujm0jm==`F6kJWF6A؂Kt-:%vAoթ .q=c;P>rE:г{}8|SV VԴ@YS VԴ8چV@+ZA5i>pP V2+LJdʬ 2eV8H|%R8H\ eppAұ28A 򗎕%R8H|rt\'>'>O8艧O8}}.zO8{9AOp)m"@qM.%MA?h"8gMA68,%MA68,%MA68,%MAvt68D|L_0Kd".q@Kd".q8Ȏ&e".q8Ȏ&?HqMAǁ&@qq88Ddem MA66lNm":4\NpD\Ⱌఉ868,'8l"M% ϣఉ8a9aqpr&K68D\ 28DdnqpL8Ȃ&@qMDth":488Dth":4YDth":4(qp-:fW'->,.y.—esY1l/f䲹'l?&Kes+r\\6W eCU\6e.䲹 3/KeyY/[<_晗3/KeyY0Z/R|g2<-ŗqi)lC.R|g&䲹3OKeyZ?Wqi)3OKes13{yl./t2ϼZ]A<L.eyY/X]<_6]ye),ŗye)3/KeyY?&Kes{g^<_6׽-ŗqi)l.8L8\~L84_6/˥c2_W~/(8WGANyW7Mjp^]ypsutAyWoow: ./qpޕ8o\ qp^8o\ qp^8o\ y47 A}o|A{48wot7f7:(qps't7Ν@pk8>Pk8.?!k8<0ranp \ 5[t Ek88&A?0rhpqk885ݮ@r Ik885\ r 8 p;uk8A v 85|\An.qk8A.qk8A v (pp\An.qk8A52ͮ Wv Pkt Sk8M]Arlp<(5|\A.rlpe]5t:t 15\˅5]. .qu0A Kt07]rap\A.qk@^5\ pp\%r 855tzMpqk@^5t:t kAǁ@po]A:t kA.q55]3 .qk86u4A[?=}klAOEAOEA)">P؂>^SDQD|r8飈8#E_W8#E%\klM]c .q5%"zNqp8ہtœdyoΘ~781A@ߝܩM.?>i^S\~|&69;AܩMzNm&r69;AܩMiBmrq698P|Aǁ@mr+Zj\nY\ mrЍ%˥>kn,M.qXnYtcAmrpr jK;~[.ةMr6A/ TæS9$AT>pcr H:TzBrx:~ѩNK?v*HS9Ȕ٩dT2ev*2;|کdT>PN v*2;LʩdT2ev*8ȩ|%r*8ȩ\ rpAұS9ANT򗎝%r*8ȩ|rDN刐Au+$,j/9 Qd_fUsA9ªV5$ˬj:HT5$ ,_,\[p@ ,_,\p9m.MA'  \p N6Av.l ,_,\.qXp ,_,\.qXp ,_,\.qXp "-\.qp@ <`.8H\ .8H\ r]J\ r]%. H Aǁ@rqp9j*. r-\:.YZt(\:.Tp9a9br'U,\.qXNXI K*.y夊J\ⰜTp9A夊%.8H|%.Y[\ )\p98Pdi. Aǁ%\:. AV. .,\.q8Sp<䲹If\6ͽesmr\"^6p2lnL.!xܖ|L1.hesQrܓ\晗p2ϼ˗ey /K|\^晗p2ϼ˗q)\3OesExg8._6q)\3Oesq ^ƙp2<ӱ8._ƙpd]q>ﲹλ3XI<:ug^.K13Xy%\l/ey /<.L<._晗p2ϼp3/ey /?8._6w[Mey ?&ey ?ڧ.2'e̗ͫ鲹d̗q>)c,\%\Qp6 <8WGJ λ\@ f P@rqp98P|8]PWK$\zMrpJ K$\.qp9A.@ro J@rqp@]3 :. ^St(\zKrqp@]3 zKrp-f\k%f\ rmipA![GrнAOAOA)\>P؂> ^SQ|rp9LA)\ھ5A)\.q5]c zNrp(r+w Kt-A5$GFs4ژiczL1aB6ʴ1aH?!I˧"sk~J˧"spT$y %A)yzOsk~J95?%A)yzOsq98Pt(y>P@sq90%.7-y.q9薄"%A$<8, -y%9aRhs- %%˽:4@\5:I:( .lzhz#JtГ@tQ.hH#hzDtpr 郌g|[,cbX:Z,dx-2KX:Z,}A>-2K^X:Z,\ tpJ,\ tp%K8H,2K$>Pb Y,\ tp.G,.qXBY,:*弉AV(VRA*:FJ#H%u.M:TR(ZIt:/M::Ծ U$:Wdwe_A=}AVU{.qa9H`_AKU8, .q:*׾<`U8W\ _U8W\ _uU}W\ _uU}%U \}Aǁ@_uq:ȓ U:8Wdm_uq:Ȳ۾@_uq堏}%AP堏}%U8,}.q:A.q:SU8WAP\HB*;8os!u, +༁΅T!;؂7A& ` vpB*A༁.q;Ar .q;Ar .q;Aehʂth@ v?Z;- vp޻@Y~&Ђ젟 `( vp!s 3΍ʁrYoE.eܨ(upI\A.:tY\:87*A'. 7vYF%de.@uq:]Aǁ.rnuq:A..q:vY(up\A..q@Be\ up\e\ up\A.>P.e\ up\:E]Arfrfu[f3e. uvYm@\e. :8et:tY%[\{(u[;Kt0Aw - .q tY%[.:%6]%rY8e\ up\A/ ]Aǁ.렗tY:%6]Aǁ.@t:tYĦ:8e.@.[~oq@]A .qt>PWPK uAuAU=`U=`UU@b zzN]uF]V2ꪃ^#RW}2ꪃ^#RW\[l%Vb;P꠷U8[pn[lA_X zzFuГ@u_l>P.'.tYP@^(WFP.+]C=O铳;LAtv铳;:2}rv铳tv=}tv=}tv]g}Atv]gt:tv:t:tv%rv\\;6FAAtv8,* ;a[ig>pnegwГ@gwЛ_:\.]$'7tv]+Xvv &A>1ngwpdx2vv^;|dx>P vv^; dx.q;A.q;A9Kw%rv(gw.q;A#_vv8,Gw;8>@S}~rǞ ){ƒ==d[kOxб'<2͞<{zƒlk :{z?HOx=A(Ox=AǞ [{\it 4'吆=AǞ ;t{ƒ==A8,4 4'P@vq{;8н}8^p-wA.q};PKt_/A6ӽ}8^fK>Pto8Ƚ\ vp%rom{to6ӽt>PtoZgq{;8н@vЋVZgЋVKt3] .qе._>P:AA/zzzYO Qzgzu+9AwWk)^;kJwWk)^;AKt+9%uA/Ω.q]:PKtHFkwFkwл7Z0Z^E}AAizMkv;U4A?o!Uw7d%҈F@iăKF.`uRLa9Ai[N}p:qNBæ MCæaӆM}sC?l6-TuaӨ^6TsaӘiJ4?lj. iB4?9ml0,waS rƙMUƙY8_f*ƹM6͗ƹ ST=8kreVr\{.+STg+S>}pjʵYddpjddaMxplplplplpZ>a?a?8O\0[\0[ჳ_ +! g+! CN;J%8[3VJRyXჳR ,~pQSSgKfzKSJ8@K曇.qo>-87\0 87\0<| e߃87B8,~ e\0\0\0\0B !~qA/!~q=8Bt ~W[t ~qA/!~qAR!~q=8_m}лT\0_m}p-/ .q>ajp?1Avcn|A z7݃A7Pp?e ھ zK;n.q=8.qgލۃK;nۃK;n n=`p?zvf݃? =`p?m3z {pv?m3z ~z Kf6؃._vp|k,8{ .q.q|=8tv :p?8zY{p)KfvprGSn]墦%mMtwK+r_rKAz ~n A7? A7?҃A>?G*z {p&pk9ׯ NlA>$$?xsHlA>$$!想lA.lA>$!想lA>$p,p,β8̲8̲8̲8̲kY8̲,%p,p,.$prJOkY !qlAϒ?8h=C/G>:t~*/G$3lk%3~p9&.3mTA^TA?R!h.qXIh.qXIg%TAG?B ;tI#Itx!GJ$TARyDR8v.KG?]R8,'$.qY/,p,p,R8R8RyDR,p,GJ$p,տRyDRHt qTARyFRrPRHIt `$qTARrPR8,%`$prPRyFR8,%`$prPR,prPR8R8,%p,p,R8RyFR8R !`$qTA6g:?8@:?8@<#:[Gʛx4T%HItiI?a9AJ?^lesN.ۓ.Y]6G䲹8l/Kx|L" Ales2.q~L.SqL\6>ͽes >&ey/+<_晗-3/ey0/Sg"2&es~g㲹ln?&fp.sl[D ΅^npn0swD+`B/I?Y?87AEAE.q?A"K$+K$+K$?P"/DAǁ"C@!P\~upq?gE 5N@?sӄЁr bۮDW>strgWp.]A:t\vt?P 7vK?PK.q{\%r8\ WAK.q@K.q?ȅ]\ W[qK.qt?U]A. ]AreW;_̮ 7v(W_\daWt:tԅ]h .qЅ8Bcp.48]h .qЅƠwt %Θ?1]%r8\ Wp\A ]Aǁwt?1]Aǁ@W1t:t3?8@W1ݘ .q ݘ .qЍ٠=&t=&tsA ]A ]Ao?PWލ?蒉tm_AW^\⠫s%:n];P_M] .qչ];PWKtu.8{;Aiz4i?Ps==5-A&-Aiz4izOM{jZ~R,%8]c ?A5kZKt-Ath:],AǁkZ-Aǁ@q@Y-Aǁܴ?A[/ZK z\?֋*-A^8,9-zL(?pecBo\nYP"DŽ"QS";a]R"jEA A܍/\A5?-Z \mAk 9}GOR-~VA̧+!ȿ}A{ו+!_W~Cu==!_W~C ]}%zCpp\=8}%zCp@!ȿ}%zCppr{.qXNz}A{ t /}Aǁ{:/'=?8r{/Aǁ:|uEq+_]"8.'=ꊠ1WW8,'= %IO"ȳ<~uEprӯ,_]\Ⱌ+A1lesAt\^6'9X{-.9T͙les-1les.t]6Qkqg^ﵸlF/y^V2ϼkqg^^8|eyⲹ 3Z\ƙ{-.{8|eyⲹl.@/q^\ey2Z\{-.y^ ey2ϼkq\a|eyⲹln/{-.{-.{-.8|esq\^6W]9~wŁzwEpns΍ށzwEp.68W r F#wWֻ+s\wW8%zwEp]w%zwEp]w%N\⠻V_M] _OS}A|=Eб)sUzcS&8W5AǞ8Po8Po=Nm_g@9Po.Y(\A Agor%7P' A\"@t7P(@q@no:|Ep@q@\7P8 %zEp["Aorw7P8 %zŁzEp@\7P((K ~Ep@\7P|or=7P(\Q (R A%ە#ےeEcƗ JT}M,05 Agmr[eE;Z("5 A.jmx.18@th:4Pw%z\w%2Pꢁ"AK.@K .qлK zKŁz\wAo~i.q@(@\ Ep %2P8@@thzKEq@(@th:4PAǁ@EЛ_( AoDi:4PAoDi.q(8kp~''-Id"Id"8%[Г@DoJ&$H2qd""J&Q2%z=r .q#ߔL8ہLd"A܂K@=r .q#& A- A- A/hznizOMŁ2P=4P"=5 @4P8 @\ Ep"AOՂ>"AOSmh.qS(>HEq"8@ @th؆@Eq"8@th8P@Eq"5? .o,m.q"S/(K6P}EEpOh.qX]@ %K((\^j@@Z6P=&4P"1"7 A@ߚ4P޲"Q(@\<ⲁ"w\$^"l-\;zqrJ NVb#w+1\;Y绕A A~[\;}k'4L#,42%42 L#y-442%442 L@424K$8P24K$.qL#A2 L#A2%Q4K$.qLҪe%˥U4A2Rt(Q4eAǁ2@FqL@4eAǁ2 aXqri2@Fei8,V-2ҪeA^KL#ajFג,.qX.Z$4KKiy-2җq|Aގ/#-zMA_F/#|җI_Fk 2'}.zMA_F/#5}%lqX ڗ\ _Ɓe8ȗ֎2<`e8ȗ\ _Ɓe8ȗ\ _F[;2/#Ave8ȗA2^bӗt:e2}A/8pJl_Fq/#%6}Aǁe2}.W.qXۗ42K\%/#aJl_Fprؾ*}ʗ\\%/#A*}%e8ȗq|%ey;2>H_Fw:eyƾ:e2}ʗt:e2-<].!ۗ532/2%d2>Ť/#Lr پҗ'kz3.^ߜF/ |L8/Al>l>les>&ceeqes.Su泟es.uٜcC0}Lƌ(]晗12ϼye̸l>3/cey323.Әqgƌ84f\ƙ1p2<qi̸l>׹l>弌3Ocey3>{ؗqi̸3Ocet2ϼ__6k^6k^epyg^׮/˘1ݹ3חye̸l>ֿ3֗ye̸l>13/cd̸3/cey3.˘13./˘qg^ƌƌ84f\6^6_ƙ1c2f\ƙ1c@1+4m\6|L˘6.N.24y_p> rm8P6|p4Gp>;P6 6|X4Y4GckFp rl#A64Kd.qM#8otKd.qM#8otK+Aop}ŒPJ 3N6&8(aFɦ0#8糚MEp> 1Aڋ༜1J{txrlEOE{5k/YMOE5k/YMOE6k/Ý^j/p4:^(E fk/%^(Ep%^8H{\i/K.q"%^8H{q%^8H{ڋ.q"%%^8H{Aj/1Y{fڋ L^|ڋ WM^4"A.YrudEVk/"]An[ڋ08P{t:^%zA\%^"A/K@ .q  zEKŁzA\Ah.q@i/^R{\ Ep%^8H{ڋR{tzEKEq@i/^R{t:^Aǁڋ@E+Zj/A5:^A5.qi/8ā<)ϓ,?O+"=5YWĦ"(+ԋؔW}lCyE[;+_A/΂Q^\g%zq8;P򊠗ؔW8Yp^g%zqt(zFyEsKyEsKyE5+[+6S^qA-8 Wm@+6S^@+Kn,AƂ6S^\wcAP^\wcXЇ/W8Xp"8P^t(%:W}ByEq"8P^t(:W(yEq"8P^%.qX"ayhyEgWW8,-䧠iV^=`^<""7.zzMEFEoj/> "o8pyeEp ڋ^IJ"lGA~lf Alfer'eE6[4[moE;)-I}3nr48PN #ȿe;1#ȿe;1gN Y#ȟe;1#ȟe;1gNe;1K.q@91K.q#AN91)N91#ȟR.q#ANvb8,7ZT;1>H'Fq#Kvb:1AǁN@'Ɓrb:1A^ҰFAǁN ?.qXnډG%ˍV;1d'FprN ,ى\h#;Kvb8,7ZΒAǁŒ%FAR.-:f0¡AŒf^ 3~P5 3/.-.qg%f(aFpA/).q0#&aFp%f(aFp%f 0@ 3K$zHaFp0#8AǁŒ@aFq0#8PnŒf 3ލSt(z7NaFq0#8Pqr}ŒA{0#alaF,.qX/[7 3Kf(aFpr}Œ 3Kf8H\ aƁf8H7 3K$ A{0#8PE 30#8Pt(:f(aFq0#8wA-A-A^P"r}ڋ*.s\_"Jj/.ח AS{nڋŒߢl>"l>0lNes(?&aeeYes /x惡es/xٜ3~LŒ^晗02ϼy%̸l>3/aey 32 3.SqgŒ8f\ƙ02<q)̸l>l>3Oaey 3>ܗq)̸3Oaetw2ϼnn_6z^6y^׭ey]پ3 ۗy%nk_]<f\6_5<f\6^晗0cf\晗02ϼy%gy%̸3/ae_Faey 3.O//S1 3.Sqg{X2Hq|"1I8.O/c(l>,0LXqD08 6Hp>'

8ʥtx8'ҥOjIMK#8oÍriO4\1ۥӥ.|ӥ.|ӥ.|t\Aǁ. vi4K#ݳ]Aǁ.4K#A.4K.qK#U]%ri(F94K.qK@4K.qK#մ]ʥ\ Fi4K.qKti~K#ȥ]A ]AnrcFW4ܿڥ. wvi(FkX4\ڥ]aqK#8Хt8P/K0A/KzDFp?[zDFp^8a[X4< z KFp\ʥ.4K.qK#A.ti|.@F[X4]ʥ.@FqK@=m :ti4ҥtzIFqK@=m zIFp8ȥq8ikqK#?O4K@=F ϓ.<zOMƁz']A/.-%6]AХ֎.W.qcvti81ZpĦK#AK#%6]%z\h1Zpi+Gwom=m=m{h#衦h#U4EJPS*Wm(FЫh6^ESmj%z\⠗jA).qKOf(.qKR-6KR-ARt(:m}2CƁm6>h#8Pt(:m6h#8Pt(zOƁm8,%-`嵤EAlQ\P$+"P$l EB E8}R?c${[tE(9ͱ"&ͧ͑lNes /L>{iles/OK./1)E.cxٜc 3 yl>K3/Ǥ3/ey)E.K)1)E.O/K)rg^J俌J8T\6]6"_ƙRcR\ƙR2_ƙ^[IS1iJ./>iJ.p1iJ./c)l>ˬ) ΧrT.ȜYSr4%T.86kJ1Dp>;P 'Қ| O傜JkJKkJ1DpMiJ) %Ҕ8HS\ MIp4%"AK) %z\yA^Gk) ·&Tp{KMIp>4 ·&Tᥦ$8C% ·}A= (H)l$8oÃa_sKH8FPsKH9FPsKHk9FЁF\H[6t(9P ҖFK$9PFK$ .ql$A Ԗ8H6rd#An, .ql$A% .ql$A ֖(Hpd#An- .ql$AR6bʲ ױl䃔l$ e#Ae- r3kH{FZ6rd#A.h- rEkHzAǁ@Hql@ .qk .ql$e#%lql$e#%z\zAR)9PKZ/]*e#%(HлTFK$ .ql$AFޥR6AFe#AR) :(HлTFe#Aǁ48P6t( zJHql$$e#Aǁ4$e#%z\ ȁzth 7V´O+Hп}C+ȁzi\~*m3 A r^Zm$SZA^ J zV .qKʹ8Ձm$A/K@ .qK@+HK2ZA P r A/h zi zgL+ȁ=Դ3$Wzi zgL+H;cZA;V,A͂K,1 %zn %znv}B+Hp8 AZA AǁVPh9P͂ A th :ZA  th :r@YAKd zN+HpVW8,m9PVOh .qX>   A+[A0ZA0ZA\^Z  VV=`Ħ$$%6 A ߷$aye+HKlZA\a ~~[6ʐ ?߭ ʐ Vh reeHV(eH,+C2$m!2$+CfYt 9Pʐ@eHq2$+C!A~Xt 9poeHt\@MIYSr4%A~XXS?t֔s5%JS?tԔs5%AgMI?YS䇅5%AgMɁҔaaMI?YS\ MɁҔs5%%lqg4%JS\ MIp4%%Ҕ8HSg5%%Ҕ(MIA֔8HS\ MɁgkJK֔dM) :Ԕyǚ@MIq$8PSt)9P@MIq$Ȼ<֔\|$8PSV5%A^m$alMIW۬) .qX.>[S6kJK֔y͚5%A$8P67OȁRF e#A(9p]jHl$=e#AG`yѲ#HȁRFK$ zFHpd#%l$Ad#%8H6rd#%8H6% .ql$0e#%|”Fe#Aǁ@HkaF\[6t( z-LHql$SFe#Aǁ+8,W-  e#AP6\\l$SFK+(Hpr޲FK+8H6\ +HЧ{䃔p(FЇLTi@Fп Tiy{*8*mJ#C&4iFJTi}JFJ#8PqryUi|[n߿)^6\6\+l$Ӹl_6u]69͑l^LI/x4^6/O.14.xٜí^62ϼdZy%Ӹ3/_Fey4.Sq|ug28i\6o/Sqg2eey4.S1]3Oey4.>ݗyu2ϼuL/Ney3/t2ϼ.s_晗L2ϼq_晗L2ϼdL2ϼdy%Ӹ3/$Ӹl>3/ey4.g24.Sq|.t|Jzg2Iqg28i\ƙ<3w)yecw\6]O;.?8Og͗;2 :IVp> 2t(AGp> r,λ༺'YJYW$+ɳ#ٳ#8IVp> #8K$.q#A :>A߂K$Ϋ= .q4i(gNMyAÁzFthd@4IVqI#8/ÃI#8Ф$+蹥I#MMl(蹥I#uMl(蹥I#ȝMl@4MAnm:4i(F+i4M%2i(FpL%2i8Ȥ\ Fj4K F+;4Kd.qI@4Kd.qI#ȍMʤ\ Fkk4Kd.qI4iI#]MAnRl MA.lrbF *4\ڤR& W6i(FY4ڤ䩄MAYqI#8Фth8PʂK,AʂKdzCFp8lyh.qЃ=( zIƁzP\eA/=i.qI@4^zҤ\ FpL%2i8Ȥғ&ҤthzIFqI@4^zҤth:4iAǁ&@FKO4MAo'i:4iAo'i.q[joAhi7Vh V'hzKE!Z'=Ao~i VW]Zl'Z"AOzK'/%zsAo~i.qГ=9POzK'8jŠVZ=ԴZ(E +Z-jZ-jqA5AoizKŁZ"-AOŁz\⠗_%zV 8V:P/>Π"A/Kd AǁV@EZ_AǁV3h:ZZ-AǁVe:ZZ-^jq%Z"ayxhEoZ-KׇZ(EЧAZ8,om4V]'APyQyʋʋʋT^=}T^m"*/\GYyʋ"zLEpJʋT^+B@+B"/t+\b[\"ȅA~[\qA., VW| -raeqEq@+Aǁ -:W1cqEqƹA~[\t(c??p-q.QqhO4I#ȏ4& I@4q& I#'mI4I#'m8P& ?flI4Kd8P& I#A&'i.qI#A&4Kd&4I#ȯ+4Kd.qIM%g4I4i4cFqI#8Фth:4i(FqI#8Ф64\.>ۤthr`Fpr& ٤\\|I#ȫm6i8,mjM%g4fFpr& ozڤ\\|f# j6f#5A瓚IF[;j6'5Ao:l\=f#5A瓚vl8H\ Fp?[.qf#5%lx.qf#A.qf#Avl(Fp4A/.qflĦf#8Pt:lj6^bSqr'ߚ@FKlj65A/:lj6\[\ɷf#3j6K;l}CFpr'ߚt.qX[q4%˝|k6K.qX[td o]Z?PS?53'>~"'ffā%d'>~"[O}2CDOy @ā%d'^EC?q2@p٬uicˏMcӬ4?6m/S4?6MScӂǦFYcӀ4?64?6 M}٬uǦ1u1<?ƙ8:tcyh~3ÏMkCcyh~l tcyh~3e5Ccyh~l:ElcM'(?6g~||s<Ϸ323?k~l:11u1ul$==wpO<)pO{Aoaxp%{%{%AJ=׃KfąpO<8=ă>{"8?zq{AJ=ă:pO<8=:pO<8=pOgăKfă^b==z ăKfăKfă>Ӂ{%CI'B/K<^A]zxka%,%A/K<^AxЧ$KӁ{%˝|'.qX=.qX==qh OI`xw'exУ ă,&ă>nAA / \ OI`xw'exЧ$0H¸lՏqƗyuTP2ϼnL/ey]03/t2ϼ_晗 2ϼLyel>)3/d3/ey .1 ..qg^&󷿌&84A\6 \6]ƙ c2A\ƙ 23Oe3= x_Ǚ2<2ϼye3/+dl>3/+eyY)>&+eyY)2[)yRd+Ep(+Ep>O rl@IJdJ<)鲕""8/ЃyRp>^ /V[)=AVY)Kd.q"8/ЃKd.AV|\'s%z2w}(gyOIԁz]t(8Pn|-@%[%v=86=}tK["86=}tK["86=}tKĴ["86(Dq["Aǁnǣ)Dq["An.q["An%KroDp-n%K8Pn%KroārK8-n%K:tKdz["?O%k-6n wvKѳ["=AnrgārKڳ["A-qX%Aǁn*AOK*An1tK8UpX'V%zb\'VAo'8POK*$%rK(DI%K.q["An%N-A%Ao':tK(DI%Aǁn408-tz;IDq["#Aǁ*+!$UA*T9g#A*ץT9@= z]JCk~QJ/rz?DCp8HRKl@^R\g#%R9(CpTAǁ*HCq!8PUUq!3UAǁ*@Cq@UAǁ*wT9(CpTAr.q@S\ CpTAP\ⰼt!#>HC[X00\VYQ*UV9=`T9!!-,UAJP߷T9m A~thk{ŶKdke D[%A~@Y?@%JϷ@%A|k D?Z%A|k ā@O"ȟo-.q@I I K$8-nq"AI K$sI "=K K$.q@I Kʖ@8,7- %Aǁ /Xt(:@J %Jt(:@yʖ@J ~P\P";h@8,7-%% eK i DprC dZ\P"+@8,7?84D)!"tizSHCD!"M! AGezSHCD9m:4D(CD9m.q!"Ae.q!"M! %2Dxd.q!"Ae.q!"A74D(CDp Ao i.q!4D8!"8th:4D"^qr߆@CDЋs" A/i:4D"\\\!"s$"K6D}DCDpr߆ϑh.qXq %U"Kd A& AL"M"fCDG4D}BCDwl8plCDG(4Dy%ӆPhgiCD;c" Aǁ|= 'C{<#{ټ.M^6e^6OeF|yT?&Ce^6ee^6e~LS)Ce_ qi3OCeYey".qټ3OCey".Wqi3OCt2< qil>0_#<1]3˗yuo2ϼ ӥ<|g^#<2D\晗!2ϼ !2ϼ ye3/Cdl>3/Cey".2".q|*p|Fvgqg84D\ƙ!d2< qil> qi3OCey">&Cey".qg^q|vg^<2D|L8=9=߁=c Gâ {|,tDAEy˘HX(8EyK$z{K$z{ .q!8 8]p^}&u Aǁ|,@#Cp>h zhd|x A ywidrUh#Cp> zhdr_h#Cp> ·;1ȥ p@ Amd:42(#C;s %22(#Cp %228\ #C+tKdd8PF w628\ #Á228\ #C+u!AF 628\ #Cq!Ƚ AyF g62K! Amdrf#C{4!U A.ldF)8thd:42N%z\N%227!AK@w .q{w zH#Áz\NAid.q{edzH#Cp %228\ #CkD>H#Cq!5" AǁFedzH#Cq!8p Aid:42FW!8Ыp[FW!8ЫV^"8Ығ^*A/=UzNB[z_: AoyU.qc=zIBp[(BKOzK"A^U.qW!8,OBЋJkRSp A/r(]z)]zI.=.פt!& J^҅.8IӁz\'M%z^҅=i zIBp4'MA)].qГI>HBqt!8P҅)8P҅@Bqt!8Pt(]8P҅@Bqt!0 J\ BkaJK$]8P҅”.8H\ BЧ$.8,-]^w*=C*=C*嶰AOAuPA%AuPATRA >!8Pt(@8p-lB] y.Gy1 .y.yz|p&e1e]9/ݿ"8\6\ƙ2#l>"3e<*|g^/Kd13;—y%2l>3/ey .y%2Dy%23/ey >&eey .Kdp|Qdpg"eAey >&ey .Sdpg"C8\ƙc\ƙ2&eyey .Kd1 .[dTXdwtJdOp :8 ΁9 up> rH,2.c"Ap^Y|ߏup> .q 8K$2.q 8K-A"|\⠷o%zA x <8Ce·AOy}q Iq egyq ȝ4%1q Ş4%8糼 {6Ӕe:4׶q 88pA.m:488p%288\ @pAm.qq@\8\ @p8\ @pAm8PƁ\8\ @pAǁƁ \6I@6q ȏ~\u8Ɓ ]6q@y8Ɓ 7l8P/AǁƁ@z \⠗@%z \ @ K(A/K .qK}4@%z Ɓ:PƁ}488\ @p%2q48t3o{Ե7=A/=3d|Ы.d=A/=3={_({g|p8zЫ.8zŞ8g@8g>8g@ΞYz3z|g|ϞY?P=AO<{fg>m{T|,~zŞ .qˑY.qˑY.qˑr$.{Kr$A=d|q`|q`|]#Aǁ=A/w3t3t3t3t3z惎{惎{^g@8g>e {K3z^g>A=%z7Ξm{ލg> {惞!=C?py-g=Ao3f=A{ޠg>t=Ao3 {^(g>ayC* ?]%䧴T|ҮSUA^s|&UAn\%wT|%W* 7K v|q`|q`::;UAǁUA~XJ>8J@Ua***U%\*U*U%t37{=g>o${=g>ȟo3Ϸ[{Fr|?ߺg@2p|?ߺg>A=g>ȟo3\⠞8A=%.qP|{.qPs=%.qP.qXng>a]g>8g>C::::?P=Aǁ=Aǁ=ArRy=%Ry=%Ry=%Ry=%Ry=%Ry=%RmAǁmAǁmA҃bQzw ]$(=UTQzpbQzpGU҃(=U҃(=AEAX~҃KTE%*J ҃^u(=8(=8(=8(=8(=/\n(=8(=/҃҃(=8(=8(ƹ҃K.J E%ˍs}¢ƹ҃>aQzprE(=8w.?ۯ =}<Ѓ.]AYrz'mBz. , FЃdz =+|Bz....Bz.fe]6ecBl汻l5ey>.恻l}e]6e}L]ͫAs.t2v_6//|.̳ y}g]qم~ټ̻3.8Bl]6o/̳ 2z .qPyp*̓KTiZK+A%AoɃ[z| ŃF68zTX/7샬rMz|@6Ńܕ^<8P|@Ջ֋*vxq`r_zzՋzՋ8^PmA/m%zp>8Cp>AR\⠷%j ۼۼۼޥ@}:lz66666ttm;AmA\6m%j.qPwЫhy8,AvbawcDŽ.O\; \6;1aaw;)v}bwR,zEda3vםA~@urN W@z;N W@kٝ;;Zv'w+ wrA~-;8;8@urvrvrNN ?t}:湻l橻l6]6^/Gcꍾlyl^^6Oe]6}ټql838썾l>3z/̳72<{/̳7cꍾ3<ꍾ3菩7`2ϼz/̫7/s7t>PT$?EwC5pp^S 䟥88?OuC88C *s7tp^S|H\nuC8:88AoT7tp> .q[|oKz48*Xף _J8rsp>-ΧP)ŕKiAp>-·JAn\O Tsq`s{[88 .q:,q.qP*q.qPspJKT\ w.q.qP*qr88@889A%Av]|JKT%%*q.qPsp89fs_nxr x An7 An{|\9ȕ\@:lx:lx:lx>P/Kb A/Kʀ %z1\@p^ 8@46<%z1 %z1pޠ9A %jx.qPsp?B|@+zB7h,TzBu8  ^P9  ԽW@,TzB=`WJ/Y7/ Kt8A^P9ATr+ *8P9AP9Ad*T˕A/rXPP@*B堇A/rX| j*arЋ*B9,TzB= 8PK( A^P9A^P9Aԣ,T.qУ*AǁAǁA5Y|*drq`rq`rq`rq`*T:,T:,TzBU\BW],T.qP*TzB*8P9/ Kg;.T ^P91arcB.TzLX .7]P9 > `rQ*erkP;ugrL>PA~ֺ39-;6y39-; vgLr ?ݙǝdgrL:L:L>PAǁAǁr4ٙtؙ;;Tgr_L:L:L>P%L.qPgrL>P%L؅A~P9]| rr?ߺP9ȟo]| օA|B ur?ߺP9 * Tr[* KT| srp KT| KT\?\| KT KT| srp KT| K.T.qXGP,T:,T     Trq`rq`r\|r=҅AǁAsrpr=҅Asrpr=҅Asrpr=҅A^trpr=҅A^trpr=҅A^trpr=҅/*ݙ-;^3@u&^v&agr{Mv&pٙ^A39&;9L>PA^tgrЋv&83LzL>pL:LzL.qX639_rٝ%fw&8,wݙ\mvgrpr?]N~ރP91.TzXZA^sr3B oǸP9"*frc\|rw҅AYe1*frq`rq`rq`I*c*_6e]6>B湻l橻l6]6^/Gc*Tlyl^^6Oe]6T|ټql83B8,Tl>3 /̳P2< /̳Pc*T3B<*T3B叩P2ϼ /̻@UÍ ]}y7Tq>|7Tq߸88osFp> /Wmnp>g}AU888n8->·%zA㟠nwyuڍbqpU}p> ۍ\ 88>8Erqp^}p> rv༺?PAǁA.Qntn|ڍܤ888A8A%j7.qPqpڍ\8A8MۍKn\Mn7.qPqpڍ\@88mۍKn\v_tq_n7 ۍ\8OtqqFxqqv /n7rv 7n7>P\¸8888@].q].qPq[8~p%\A.$n|8~K28j7zIv88AdGqpxbGqЫ.vbGA/rQoЋvbGq_(~7(˕%\{A/rQ\uaGqp:KQ|:KQA7.!znYC|jd qs?1YC|j[qa q7.!zg J\⠫%Jƅ5%Jƅ5%Jd qp8!:!:!zu>88$kkkkkT qq` qq` q;)b qpjT q;)88A5AhYC\p'DŽ5A.YC&5A.YC5AeA~L8u˄tp ye?Sqp Y2U&:eA~ֺL8u˄?2 ?k]&tX&tX&|ʄ˄˄upq`p?ǻL88L@ s˄˄˄TppʄKT&g˄Tppʄup3.e 2 ?f\&O.T 2 Jup?L8ȟR]&O.ceA2U&䷽˄)e%*>PeA~L8Ae%*>Pe%*.qPppʄKT&G˄KT&|ʄhsppʄKT&|ʄKۈ..qXn#L,:,˄˄˄˄˄Tpq`pq`pׯ\&|reAǁeA^rppreA^rppreA^rppreAFtppreAFtppreAFtppreL88/8] T_ppR_pлd_p}A.t>|^]/8ȋ zWþJa_pp?Ⱦw5 : : : : zuɾ{ : zuɾW : : >pg垱/8ag7 .qX/8M>K{ >P}Ao~}A|r}A/8/\.9/8b_pЛ_}ľ7  }A/ : : : >pW] >C_McشUl i~lǦҏMF/Ǧi~lڬ4m?6 ۏMvcRǦA1c_/ǦuM ?ƙG_Ṃ/8 imcyg}?6m~lڟg}?ƙG_eṃ/8 ikz|<-6?6-L3?߀lcyˏ?晟/3?g~ iCc/8nmƃFp ~ߨ/itt Q_6pAͣ8}.q7\0yp0ypjB?88?8/A.Q〾 7 ~q@_K/%s_K8}r8}/An/%zݤ8}.qU}/%s_ܶ/%s_K8}.q ?ʄgʄ/DƨLA~LAndT& *~[ ?Ƚʄs\ͨLA.gT& *Fe:(~q@ppK.q˄e.q?a?aVL88_p|}AoP&~p.~4 ?a.~p\&|!}N žz'bBoppwR(}>}qAIAb?ۯžžNn8nž.qw\7.(}p\78>a.ž[žz5b=( ž[žznQQ}ܢAFPW#( žz5bAkža^78yp|AFP{p7(}p|%s(}q@>(َ[9Z',`|/GxlQDVe〰Co  ;=>a߇}z}q@ap>8 〰C eC} C}}}u)¾qXV¾}M!ڽ}}ڽ}}蓀vCF}蓀vCo }M!ڽru]އЋs{*o>REyއ|T!*>7y>REyއ|9U7TQ!_N}ȥ}ȗSyz}q@7=>8 />8 O>8 yއ|Wyއy9p9p9/s2s!_N?8| a߇|Qf#a߇|Q!a}GX}9 >#¾C>*/3 >#¾}}GX}.0}9ot >\a>\as2s2s2s2s!} C)p9p9a߇8,7}r3Paߋ>8 w?}q@a߇}z C¾=>U(}f¾=>U(}. T!B)pf¾y3Pa߇8,7}ț >\aC T2@}f¾qXn*a߇8,7 vCD9h>^އh{M{z vC5}Ȼj>"pn>I{zvC5vC/r}q@އ{z}&ڽrXއ{zvCڽDއ{*ڽqXk~{.\%Vh>\aJvC}rXpn>rއލ,{?vC!{ڽއ^7U {j>rއvC/w}q@އ{ڽb={g|͋6k6kڼA66ky6/Oͧ|خg3{{kAsvݿvyczm~pgk<۽x6o̳{gkRڼ"3v5y{?mkj~v5y{̫{m^^w9j^Wϼڽ{g^k>j^W3{̫{g^k{gkڼ3vgj^g<۽x63v5y{_>S<۽x3vgj^Wϼڽ|LkWk>j5WvCqe7w U Ǖp޻gp qPݐ2{p _Bn8]3D8 g+*2gppq!B[ln0W d+7w*-raVn8yUΟ[Bnm Uy8sKՍ[*?Vn8} q+78z t+78.VAre qP+7\AprC.2 qP+Z!7;n8.Vt+7\Aܐ;re qP+7vӭpre qP+7\At+7I`+7{[!n~d+7ĭܐ{?rCnA q+Z!W!n\rVA݈q+78z=2.2j~g+7\A7et#nć8F|nć{P7et#>p݈?Vn=[[znm-<e[vkCo?ح a.~[znm=vkKOvkC[ΗMi.~[.nAukCo?ح qP6\AڃֆfC }=l(fC0fC@10{PfC=l} ̆ك.㠋2zl.f'00.㠋u1;zp]qP`CfCǁ5fbvq``6zlq``6800z=lq``6800zAfe Ta`6\Aك ̆^00.lw ̆8,u 46dCf!zell6dCf!zƆll6dCoؐ AcCr 46dC/ِnȆ|sC6 ِotnȆ\p!V?l!?ݐ=l!?ݐ pCC6dC!zؐ =lTC68!zؐ lq`C6ǁ كjȆ|vC68!zؐ=l2jȆ|tC2jȆ|tCC6dC!g!Wܐ lGX7dC>º!{P ِnȆ|uC6#!aݐ lGX7d!.㠆lGX7deԐ=l7:7deԐ qPC2jȆ8!.㠆l!ܐ qPC!ܐ qPC6\A كjȆ8!.ܘsC68!zؐ yE lq`C68!zؐ=lq`C68!snȆ6dC^rC6\a1l+BnȆ8,7ܐ yc pƜ!o̹!.ܘsC697desnȆ1lrc كjȆ8,7>800{PB ̆00zl- ̆Of*0zlK̆00{Pِ a`6ғ a`6800z = ̆fC/==u`6800zlq``6ғlq``rׁpί00.u`67fe;̆lr ˇ^3M{Pi;ciCܙ $cri_ 3friЧi;ciCb6ΘiПP qPCiCӴǁi4ӴdLu|0|,ͻϔ6k6kڼ6ϔ6kY6k'|3ekͫk!3llyI{gk<^gt\^׍k>oyg^ϔ3l5yek{k>^Wڼr3lg^Wϼ|敭Lkgk>^W1[{gk׆kdžk<~l5yfk3[{gkk<^g3ek3[{gk<~l5yek+[{g^ϔ6˺3l_s6gkypqPڐ} mn8 ' /ֆ67?n󷾐F8[7*[.lm8q qP6?n|2m_ ˴ MAi嗢2m8ySi>d6NerޔwiySΛpNr;2m8oLΟ-Ch\ =,ӆiC.'] =,ӆ8L{PepieT qP6\AeڐJieT=2m˴2*ӆ8L{PepqP6epieT t6\AepieT qPCi}2mW=iC.8\ 2m%˴!W{.ӆ\tLr2AiCn;\ p6feڃgr2mq`68L{Pet>\AeT n2mه8Aݳq=pݳtه8}U˴2gPeY Ta%6R؃1zJlM+*ԍЛVbCY JlOzJlW/VbA cd%6&pUbzJl*2T%68Y }X=JlVbC0VbC̰{PĆ~g%*Y nJl?oY=;2n;ҡY=;2#ݞpݑ>;ҡW]Ć8t*zX =Ć^u{PwC+W]ĆVbC+ǁJAUbC+ǁ Vb.Jl+2T%6􂃕pUbeT c%6\a Ćf7ЯĆf%6o6+YĆf%66,Vb.7] b%6r׭אenT5{[!n krMkȷH^rMkȷH^C)zאon\Szz=kq`58-ҭkf^C[z kq`58zPp^ez A^ez l|p5O^^Cuíאn|uZ!Dz kQ^C>íאnT5\Aאn8zPאen8.A^ez qP5\Ap^C.A^C.kZz qP5\akq`58ҍ[ǁkq`58zPkq`5^.wz =ltkrͭאwz qX[24^CAs5\ak;hn8,wz yͭp[z qP58ЫK[bk%փ*^]¡w5,T5C[CjXl dC[CjXl =,[CǁЫK[zXl d58zpbk%ǁЫK[e.^]zpbkrp2[[e.^zPЛ_[C/Yl brҧ֐M\l Л_[C^rrЛ_[eۈ.~bkq`58zXl=Ft5ֿCb`^X^7Nzm>#ym>eӵyzm>zm>xm޺^O ^g*^CxgPkkymަ^|u33ۏ|uϼzg^k>*^|Ul3rUl3bg*^Wϼ|UlLkǃk>*^W௱zgk7k'k<,~b5y[̳zgkwk<,^g3[̳zgk<,~b5y[̫zg^Tl6˺3b_sw5wἓ O] tw5w"?QTw5|;pD_Bἓ O.䤻Ο(_B272ꮆ'] Kaw5|>jR] _8kˇ쮆ܬm%zpw;ܱꮆǐwWCǁՐ+FwWC2Tw5\ApuWe] qPw5puWrj2Tw5\mAՐHwW.j52ꮆ8.jze }Le 6(ke p5Q֐ GYCn3e=(kȅ!W\^;zPC5e =FYB|.ć8B|_e qЅp]? 2. !w|ԅp]c580zo´j Ӫu3ބiUCMV= L^#2iioWL΃zȴjeӪ2JTZ5ipUeV=jq` 51a :H }LH @ARCRC3%ԃ ~ g 5K8d $~ g 5\AWC3.+ɡ_H qЕ䃺%p]IqP CRCǁ;)RJrq` 5N@jq` 580zH=@jq` 580z@AReH l` 5\Aԃ ^60.@jwo 8,1пe̜1sпe̜1s!fNC9=\u4~ӿXiJC8V߱Ґ+ >JC;Vα҃|w4c!_+Ґot|w480VzPXiq`4cǁҐJ*VֱXiq`b28(Vα҃8(Vα+ Xiύ|p4c!'+ 8XAJC>N:VqұҐ|t4c!'+=Xib'+ qPb!?7:V.Xib+ qP4\ApJe+ vXib+ vXib2T4\ApncǁXi(JCcǁXAJCcǁҐQ+=r480V2c2,JCr4\aXiȻY8,w+ y7˱pnc!f9V.r4,JeYT4\Ap?8d͕%{MLdãɒi&KG%Ӄ*kd~K9,T4>LC/rX2 dCLC/rX2 =,LCKǁ%{MLdzX2 d48dzpi&Kǁ%{MLe.kdzpir%p庫K2uWLe.ުdzP%kaLCoY2 crm%X2 frm%Ч%kaLCdb4Z%_IX2 qPCLCKǁ%ڠK`,L|.ڼL%k6k6o{kqL%ka6kJ||?Sڼ6k>*^@k,^xY23ϒ5yL̳dzm7]g*^W3L_̫dzg^%kGƒ5yL̳dJxY23ϒ5yLծ̳dzg%kT23ϒ5yL̳dJ|U235yL?S-ϼJ%pɆN6?QT4%pɆ'pDqP%Ӑi8dE8 9.N6?Qi88i8 9.8/8d!cm+ ^8*VTBM+ v8PҐJy+prXi8yTBQ+ AJb]cǁXi-cǁpJ*V.Xib28(VrXib+ eq4\ApJ*V.Xib!T4\AҐFJe+ qP4\ApJ?d48G-,80s1sUϙӐ gNC1sri՞3!Μ\u8szPӐgNC;9 v҇\z8sz9 =̜Up]qUpeNCo9 qUp]?2J.㠫!ΜUp]$c4jЫJfc ^0VzP7C/+ )d4c??M!c߮+ ۛu{3Je+=Xiec28(VzPA%GC;097LTr4o6 *A%GCJ39U19zPwCJ39>&GCJ39.㠻á_ qჺ;UpqPrC&GCǁЛ%&Gpq`r4fhq`r4809z=hq`r4809şуJ8(9şp%G*9şp%Ge cr4\ah2&GCj [h2&GCj [h] w5L\:9rWh7tWEC* Uѐwy\ !UEC* Uу|CwU4K!]Uѐ/m|CwU48*zPUhq`U4KǁUѐOVE*)Uhq`U28*Uу8*UY hȯ{|wU4C!] AUEC>1*UѐO|btU4C!]=h!*.㠪AUEC~sU4\AUpUE*.㠪h28*UpUE*UpUEeT=h2U(WECǁUѐw?\ =VECǁUуVEC!~*zp hq`U4U(WEeP hrUѐW\ qXB**2U(WEC^rU4\a hȫP8*.㠪h2~6h$۠ c4vmЃ6AACo' ymA :۠1lN !۠1lAC۠ǁm6h$۠ =lN z=.u4vm6h$۠2RACo'=.u4\a]6hrmpv۠2ү=؟fꬊTU4ZU[uVECX= tU4G&VEyX= tU4eU4ZUX fU4\ah0ǁUhq`Ur3U{0VEuVE|.ڼLUk6k6o{kqLUka6kJ||?SUڼ6k>^@eUڼ3Ϫ5yVE̳*zgUk^|ULUk>^WUϼ*zmpg^Uk>^?*5VE̳*zmpmpgUT3Ϫ5yVE̳*zmvgUk<^}5yVE̳*zgUT35yUE̫*g|Ukn'jwu4w"?QT4䔺 ;pD_B۠ἓ O.伺 Ο( _B۠"\AC?C 1˶p=T3\p8λpf!!9w$g8yLB; A?bȥǁguǁp?*.g28(rg q3\Ap?*.g!T3\Aϐ+A?e qP3\Ap??d380z?[u!>۠ rM6h6AC* s4mА A rg6hȭ۠O۠u!>mАKjAC۠u!>\Aet!>\Am[Aet!>\AB|.ć8B|5۠u!>\AC i?C/ `.h^60zg+?@?C]14fe 󟡗 8(yP?e qPǁσ~w`3o6̐OonfT33ӛ! ffȧ773C>̃jf8򫗛2jfT33W/73e qP3󠚙2jf8.ff!_f qP3󠚙_f qP33\Ãjf8.\r338z y'ffq`338zm^( kʬm8,\ Pfm38zX =m\.⹶z97~6}6ڟMggӑٴ686ןMgiٴٴtT%ΟMg9ٴtJ6ҟMg\ٴt@986a} %Ο̣3y86mv3xQٴsg>sg>s\\\ٴ\\ٴ˙K?K?K?KϦ?K?K?>UCg<(qlL+xQ683xQg%Ο̣+ݵ3y83xQ683?83?83?8%ΟM_~3?8|8kN/N/¹i_p|pi_p|pSi_=͇׼=4N/N/."?|8mN pNf>><&Ḃ"P2@~8-NLr'dipZ ?>8Η.0_|Y2sOi>\a,p|8_|Y2e\ƩΗz|5—|_>ˇ^S |η,zeCo|w$/!|H_>nY-ˇ!|+/.0߲ C || C—|7/7ˇ~@2×C |/_s_|b/>/C#|!|/_>\ao/_>\aomˇ8m.0//"|C—B2>8 |!/z|q@ˇ/9|C—e8/.0/%ˇ8p_>K8—q×q×Bʇ/|CBW>!+zM|C) k*T>Ӹ rBC^)Q?"_NU|+%*T> rBCP/*T^D!WT|ȗS*Tp.T>8PBCPBC>PBe8*Mʇ*zP BeBeBCP΅ʇ8̅ʇ|_Q!aU|H*T># BC>ªP*Ts!T|4*Mʇ|@S!aU|4*ùPpPߑT|\ BC~GR2s2s2 q q q q BeBegPUU|\|Ϫ*T>\a.T>\a.T>-*/PBC2Pʇ*zP|q@2 =(T>8P T K7*T>8PnT|rFʇtBeK7*T>*.\Q!/ݨPpҍ yFʇ8,nT|ȿBea.Ts2s2s" D2 C(T> PY6.(T>ʇ$*zBC^S7.(T>ʋ(T>ʇ*zP|q@ʇ^@P΅ʇ*zBC rwRʇ^@PBC/ Q|rwRʇǣP.w'U|rwRʇ8,w'U|rwRʇ8,w'UgN|Cא| W>8 _f+z|Cא tW>)+zی|exџ|m3yHpN|etW>\aW^Dʇ+z tW>zʿ#~+k||۱ϔ6kQ6kbڼ&6ϔ6k6o'|3-k泫ƴ5y-3mym^ _g<Ӗ5y-3mym|]x晶3ϴgxgik0^WϼҖ|敶Lik>J[^Wڼ35y-Kk>J[~g9J[^?w\WϼҖ)mymqg^ik>J[^q5-3mymqmqgiϔ3ϴ5y-3mymwgikWW^̱^bHW ̃z1ze2yP++eT qPmT пelP-c2s<ؠ eZ6(A9 ЯlP ʃ4 A p] Ze.ԥЯlP8lAzؠ =lPAyPfC1:ǃJ>|bt1䓔!| $c'F'C>I9xPp%C~q1\AǃJ>.c2J>T1\Ap%e| qP1䋅2J>! :.cN>8(.cN>~cq`1gr'Cǁcq`񠒏ǁcȿ ֈǁǐF| qXn8ֈ2˭'Cq1\a5c[#N>8,F| ykp%e| qP1\Aǃ7| qP1\ϲ)d񠪎?_~ys|m>|vՃ6A؃35y kA^7|ՃL=k>A^Wϼzym rg^=k>A^?5 ̳ymqmqg=ԃ35y ̳ymxg=k 9sAj0!Opވ%6p^| Rp1 /-! >8O_ZB.| A{I ǁcݚǁp*.c2 >8(rc| {p1\Ap*.c!Wo>!W>8(xPpe| qPCCǁǐ8/vC| =_:qbׇLE?Ab*2۞Su?bϩȐk"C.:IuIʭÃ'1Cn: $a')C>Iu íÐOn|r0䓔[!7nT0䋅[!?lu.AC~r0\Apu.aZ2j8uÖ[2jT0-e: qPZ2j8u;[uz: ׭aq`08uz: C}$ C/83 Cst1gz3 c0\aNa7st8,3 qX9g.\s0\aNهY}}7+lCW g_+Wb}kf+žb7kf+8,W qXz;br}oCǁ}嚙}+=vZ͵y>Sڼ6o̓pmk\|xmk\'ڼ6ﻯ3u͇|ͫkɿ6kL]k|̫xg]k<2^gڼ۾3.5yv{k<2^gڼ»6/̳xg]e3.5yvktϼ^W 5yu?Sϼ|e3.g2^Wϼ5yu̫xmV_W3uwk>2^W3u_l̫xg^]kǚ.5yvji̳xe3.5yv+̳xg]kwe3.5yv̳|e3.5yu?S]ϼ|eIϼ{!F!]s [yCi1u17(UyC~P=p~r`l Id %[8*_B.l s8MƐ'y;prdc8yΟ`Bl A%L僓ǁdcȵǁp%*.dc2J68(2J6T1Zp%el:wP_ȫ|p/C+~!"\ AUB>D!;cWB>D!;cW>] qP*~!?.㠊_*~U qP/\ApUeT pU+~28wPpUeT EY =_q`/8zX =T/8zX \;|`w/8+~2vWB~`w/\a_8,] pUeT qP*~28.㠊AUel㠊_*~U =^60z^ȿ.^es{ ySŹBޘBޘ }n dn/*л7*z {cn/80wp^s{ǁл7e[^E3wp^rq˹ps{2-e[Tnk-~e/B g/ p3zPABo ykġ+=. yn. q8ørʡ%*eKT8(!C}ǁP_q`rʡ+=uStm6Kk]<> g ]g<:ɹ6/ͫk|ߵyh3smނ_'<0yLkϼ|k ]g<5y3wm^]g<޵y}gk< }5y3wmp5ykrϼ)wg^k> ]W3+wg^k6ϼ|6/+|35y>S1ϼ|6k ]g <)wgk< ]g<x6Lk< ]g<)wg^k> ]W3͟,+wg^3˙_yB:ypȇp^rH |8 !'ypTCFT8/y惊7Gyߠ>d/p79A\8p7y }AEK#}ǁH_ՙ#}ǁpE*.H_"}28(rH_"} Zp/\ApE*.H_"}!7kT/\7 8(wPpEe qPCFB#}ǁ6GꯟFB =T/‘H_qYۇ诟诟}>]lByOwPCfO}K'BֲO%}п٬epEpΗA]V \8_V ae.+lW#̅~2fe./+eЫVB(2z5AUB(2.\*sU qtW ]&B?1A} JЅ~@c. tbnO~_a.RLЅ~@c. tИ qɃ=} pݞ qPC&Btǁ +L]&Btǁ ]q`tǁ ]w&*A.]7:&e;]7:&e[NT.;t! U 1q.仃t!;]w'B>:A  ]w'B;8AwP NЅ|v.Stǁ ? =LT.e'Btǁ JЅ8(A.]ȧT'*A!+s!29 ̅|Npe.s+sU \WBn~] \VBn~];\fWB2+s2̅82.AUeT qPe.\A̅8ApU2zX| 7\| nABq-=oW],}[UoW],>C,^uz[Uo7,^uzX|;\r-ŷ[Uo2%*Bo~Y|;\r-\aD[rŷpo2%*zou~-bHBr Ʌoː\ȏ\B9$zkǐ\ȏɅ!KlBpH.֎!U(BdbH.!U(ePɅ8,W fH.\a \%6Crǁ!\q`Hr![;uok}ݵyCxmޗ_ ]Wϼt)@wg^k> ]|3ݵy}g^35?˙Wϼt<|3_cd5#sO8/8xLk<׌]fd5#s6xsܵ g]fd5#s|Lk>]Wdϼ"s)2wmbyg^k>}5yE/>2]?5'B:1ΟB:1rD OE,?S'A%>db.?z-WoἉ>\8 pb.7C&B Mt8oLŀsἉMt8 pb.7 3!WN̅&Bs!cN̅&e;\s2J̅8(1.\]s2JTb.p%e;\s2J̅ܝ91wPp%B.М qPb.\Ap%e\q`b.>͉gǁK5'Bs pb.801z _sb.801r!lG,80IłIpMηrAݦ b$Y8ߦ ˥$ IJ3IIIpMwPBJ3IłIЯLT,d2J8(IvPIp%>du,lV]𬎅XGXV:g6caY cu.#,cbu,lVB?²:g6c2jwPWBfu,\AWeTձXq`u,#,cu.8:ձXq`u,8:zX;Xq`u,8:ձ8:ձpU.@] FXrձotvXȇ|B>vPn|w;,CaYn|w;a! vXgMBa!z;vXnBa qP;,\AϚn}Wˎ|Wˎ|AEBXv+eGB^r+eGBXf+ՑˢՑO|W"_!9.W"_ qP+\ApEe y̑pE*#_2qP+]HGe qP+HG>d+80Z#_ǁWq`+80uPWq`+`G*z y=ؑAEBv+\ApEB޿u+\Ao:W"_2|8(uPpEe qP"_2|8A|8wvBguPW- ͎W;^zĎW;^ ;^?Nx²zĎW-,;^NxvzĎWq`+8upyWr[XveGx².ui36\eEE̓ay mkL^GڼrLk<^J]m5y>S:ϼa|3tgJ]W:ϼa5yî+vmޜ_W:3î+vg^k>J}tص5y\x^LIkJr]/^Wϼ\)ug^Ik>Jr]?_g)B+@_S^!NyWp()ׇ r׫p^v+w W!_ o8Aor7w)$붃\ o8 W_ r󧐐/rB\!WNre: W\2 r8(. W \2 rT++\2 r8(uPApe r렂\2 r\K9. W\2 r8(!\ǁA W-\B\!WUr*r Wq`+80rk Wq`+&A{b2{bp3w Aݙ ZW8ߙ +X I-1p3wPwB-oˬuT+[k]2u8uPpպ>d+tֺ[uZW[ֺ:k]oY k]u--k]b+tֺB?߲:k]2qͷ?Y qͷpպ>d+8zX |ZA| =u~e+8zX =uֺzX =u~cj]2u~c+\axZW=ֺeۗuT+k]!f!ߵuT+Ck]!] Zׇu|t+Ck]!]:ZWȇ|׺BYӵZWuֺk]ǁZAպeT qP+䳦[][!o򇫛[!򇫛[!>[!+[!wnn\vnn|ts+W7e yͭp5e:V[2jn8.V^nn8uPͭ qPs+\/;.V[!sg>sb]?Ggg<{Zip/Ǫj=\zq@U!Bj=8U 〪eV8W.0W.0W.0W.0WrUeV8WYU8U8Upj=\aj=\aj=HUpj=\aj=HU8wK$U.0WzPzq@UU\'U)〪V8WrCZ=j=zIUTrՠV8M8! Jn=+ [tt|_Iz8ݧ{8]/}g0$~[Fr롟z8ݧ tC pOpOɭ~%Ar2sr2sr+[q[Qzj=[T:Z|V8W:Z|CQV8__{[TT:Z|C@GU28:Zq=8〪CZ|V8__{q@U롟oQzq@UUTzP CZ=j=Z\z\z=T.ܐTU_PzrMRUpj=Z9jc=ӸX46V_?߮zȧqi\m|bT!zȧq¹Ojc=˩XĨ66C>16CTm¹ppOjcFTmd6CT!D?"zjc=+GX#m܈.QQmn6e6C^R2s2s+XqXqXqXqXy+Km8mpnc=,.0.wzzzțjc]Dm6CX=hc=86V8zzq@!o認mX=hcs!ꪍpp7vzzjcs2s2s2s+XqXqXqXzzzjc=\anc=8pп(\=k- WyAp[pKOz˃C/=Qz- WA* WAF WqXnpKO.Qꡗ(\=\aC/=Qzr#Gp.\=\a$Cz蓀U8zC WrH!_z?z7WyaB!.zrH8,zrH8,z7WqX)ozzq@p?C=k6?ӝk _\3eͻkf<96yY6y¯k><ޟ)umkl_W|ͺ3lֵyg^٬k>f5f3ug٬kS6f]|L٬k>f]W6ϼYϔfk}`Ef?SeaYUQ3eff>S-ϼ2V|敱Lk>X]WAϼ2V|敱6םy΋p^q;79 AeOȷ=gy_|>d*+P f8C9 ΋p^l?˟P<,Tq`*Y,AeB> =B8( uPYpee qP*\AYge:,TwCge qP꠲P2B8( r5,Aee q*\8( rI,TPǁY,Tq`*YKPǁY;gBP ,Tq`*80 r},Tq`*䫻PeB塳P_I Kh|'.~%a*/`Bl| -/~c*/%pYpΗAeB0 .,TP qPCfB@g*[fB@g*-P YϷBYY0 :Po Yp]:k]3 .k]2B},Tq`*80 Yz |,Tq`*80 z =BT*80 z ,Aee ,TrYЯ{B8,:,T=fB?C~>tm*CkS!]:_?6*CkS!]M|wm*CkSU Tw^צB>6֦B>6zX ׵Tq`mjS2M86!Q!~秛Q!~uPͨ??݌ ͨw܌ ͨ??ٌ .u3.u3*㤛Q!ץnF|t3*'7eԌ qP3ꠚQ2jF8.fTQ!@.fA5Bރr3*\Aͨp57݌ qP3*\AͨfTq`3*]@7BQǁͨfTq`3ꠚQǁͨfTnFT3*8ͨfA5B^u3*\Aͨp5B^u3*\AͨWd݌:fTQ2jF8uPͨp5?aif Z}{i $8GbuDj !n]} Jf2_\:rPgAuFΨ[qwFΨ,W] Z *B}Ze ~󊾎a-T2B}Zo'Y u ka-AB}Zk^:q-TpYˋB};Z9 vPe9,/r\ $krX^jrX^*5, 2LsT7, zP fTsRAf.:*, .A V`\dUПl zl`=gUЛ_6X V,/Q?` VAڳ*,#;wdAڳ*,# .A V*ޑ` VA/6X`r`Uˁ V`r`Uˁ VAo~`uP Ve9,O `Y Ve9, `Y Ve9, `Y VA.-ꃌd[1dzFAF 2\uV- Xdꎯ A 䎯 䎯 A 䎯V_A1_1_A +ȭ;߶? z9+Ƚ;^ z9:rPWpYz_A0_A0uP_A0_A>;sW; w|`W:6+ ;mvWnw| _e9+,u|TWpY .A_e9+,u|*_e9:|掯\:+,?rPWpY z9+ g+;^ z9:^ z9+4;+ z9:>J+,u|堎 _,+,u|j_\:rPWpY:\:rPAu|堎}w|g[ _A?e ;aW1 [Ǝ?v|-cW1 v_A_ǰ+v|TW1 z9+˛%w|尼YrWз .ay䎯o'\f_AN+,͒;+,͒;^Oo6 ,,`A_, * `AO0ͻp`9 y`A_,ȗ*. `[. .ay[. .ay[. `e9,\9 ^, z9,-}So=3ևgX>6X>0c=?$S9X=YX1>c1 cGX0Lz0,|h_{>%_~!ysW_TWˁ%_, z9JpWˁ%_e9JrPWpY* .A%_e9+ȫ|堒* l蒯T\J+,|堒 F\uP%_e9+|堒%_e9+K^, %_kWA/|yg⒯K+ |Xr`W'. z9+ȫ|ԯ]_ %_A/x#`ODM\Gx`"=^o۟oM\{0xzoރ+`wP=^AI\ .A=^A/|iϒ%_Aڳ+/K+O{|eW?XuPoł+C, Ӟ%_Ao~Y=Kr[z+=Kr[T K^, z9+/KXˁ%_Ao~Yr`Wˁ%_A/|XuP%_A/|X%_U\4%_AY\>%_AY\H%_AYrxZ=`AW;ĵ 2u=X<,`A2}֭cAn ֱ mn ֱ mn{cA:ͭcA:vPcA:ͭcA:Iau,ȳ[ǂ^l rֱ[ǂ^l;ֱ:r`XqXTXqX\,bAjX, .7. rb o],&bA>sXpY* .AbU,\łrPXpY* .AbAsXpY*;b ʹX,,b* .϶lAbA:X, ł^, bł^, z9X,b* z9X,b,;bł>X,b* bT,\ł|AbT,+jTXpY* .Abe9X젊łrPXpY* .AbU,\łrPXwR, .Abe9XAփ}zMX xփ?yEߛ,g=X& `Aߛ,{փT=X& ބ`A/փ\B,,)Fd=XpYS(׃}zP `e9,O\vP`e9,O\\o?ApwXгaAF aA>vTwXЗ z;,a` 2aA& 1ÂgwX`e .\ae9,\ae9,8ÂrXރ;,qvr`wXˁa` 2a_ߎz[:ևuXԱ>MXѱ>cO'?\SXX1X2+cN1u)cGX2֯Sz09α1yu\as^cc˜eΫslsccgX8969α~i:9α1yvұqγslscslscccWX q:Sؘ:<969α969α1yu|`s^cc:Sؘ:8+6s1~^kg;{z8zz48^ Xp;{z"81w=c= 1y=NLo<+69^yΫWls^bc4<+69^cxWNUV7~_5.+ ؃*+ 8,cʂ6YVYee~7Hy`tYY lA]Vr`YYˁeeAޗ,*+ eeA/堲*+ .Aeee9,,堲 P\V\,CʂrPYYpY*+;TV\ʂSqYA堲 /V\V\ʂrPYYpY堲ʂ^,+ z9,{ԯrXVeʂ^,+; o\Vr`YYˁeeA޻, \VvP\~+KXVO^?X=zAIXV/j{0m zAXV/^ zƲ`/A}$aYYpYz\,,GvPς[>#;>?G}dAaA? z>԰,teY# z>z:Ȃr-? z9,>Ȃ߲ޟG}dA/Gr`Yˁ}dGr`Yˁ}dAoGvpy4>Gq}de9,oGq}de9,Gq}dA.)كddP& 27\b7,ȐMfAddP& b7,ȸA@ 21vZgAwytZgAwytڃ@  ς@ ,@ , ς@ OjhAW܁r`Z4w؁r`AuYsf5gAn\sf5gUsf5gAn\s<לYrYЛ%֜y7ݸk΂<;,ȻqלyvpY\s\j΂rPA՜堚Ts\j΂rPY\s\j,# k΂rPYpY9;Ts\ßm9,k΂>,k΂^9 z9j΂^9 z9,:k,O9 z9j΂>,,՜堚O9 .A5gA>vA՜堚Ts\j,,՜堚TsvP5ge9,,՜}aŚTs\ß ؁5";Ђ7aZ&@ 7;ЂFdZ&@;ف ;Ђ7aAu}oفr`兕;ЂrX^X-kDv尼rZ׈@ .ayaف\hՁ\he9-& ҂\] 7 ҂,H;oYdAZO\vpy悴YIOMTAZз,H E ҂FdA做 ҂rX -,做 ҂rX -,做 ҂UgAZpYc}XAc9:1+ٱP,cX2ocSAX2(c"vc*HWAXO$g젪̂^2 *,s6̂rPYpY2 *TemWTYpY2 .AUfe9젪̂rPYpY2 .AUfUe\̂rPYN2 .AUfe9,Vʂ`[Y϶7ʂ|ᶲ/8VvPmeA, }j+ meA,>\B,,)ʂc[YpYS(}ǶPn+ mee9,OVvPmee9,OV\ʂrXB='5{΂Us%6{΂9;/sfY svpy枳s+=s+=gA1=ge9,os\=ge9,os\=gA_,, {΂rgYˁ=gA/svpy枳s59{Lϙq:GuXiX10.l׎b`?W~Uzϔ#eD뷴c=W٘zz\191ybs^=gcc\2XǸ6eޡ-ȓӂ^O rkӂ^O;҂!-!-҂!-|nH r?䆴ClH io7yg!-jH ͆Ԑ\҂>g!-,5 iՐ\҂rPCZpYjH;Ԑ\҂rPCA5堆Ԑ҂rPCZpYjH .ϲX ӂ`}ZO 7ӂ|R/8XvPiA>- ֧}O iA>->֧\^X>-,兕ӂc}ZpY +ק}VO ie9,/\vPie9,/\\ӂrX^X>A֧=Yd /Yf}Aէ}'5ӂ|5ק} '>-I '>-:O ikO .aykO .aykO >ie9,\éӂ^O z9>ziA_@g~7TZkr|?oǵݸV"V_N]+߉kq߇jZk%\+?Gʏk7Vk׮Eʏk'ѵr{!X+^,׬/Œ+ TXY?X+^,<ӡ"rbI"EPbf}\XRyCEڋrbI^i/z9zEڋ@Eڋ^"탵"Ep"E/^jEkEڋri/.ˡV,ZjEڋ@Eڋrii/P+^\CH`H{qY"e9Ԋy֊P+^-*^\CH{qY"e9ԊgY?jEڋ^?jEڋ^?jEڋAEkEڋ^"Eތ"E/^Z"GPCH{ˡWȋAT+^*>XGe9{H{ˡWXWH{ѧ^bybyY"o۟,^,^6WX}>{<{۬^by"탵"E:zEڋ>P}>{qY"e9Ԋz ]i/ӾoO`O{џ?Eo~{ڋw@`}7?Ei/ӾoO{џ?Eo~{ i^\C},ˡ^ri/z>X_^^ri/z9zkڋ^?E/ޟ7??,ڟς?e9,џς?e9,Oџς?E.?Y>$(W{y^dr\E~(W{^dr.y7^dry\Kr{\E&z9yrEkڋXrP^\C-W`-W{qY\e9r}j^C/W{^C/W{ˡ^rjj/z9rzڋ>grrzڋ>grzkڋ>grP^\C-W{^jڋ>grrP^\C-W{qY\탵\e9rP^\C-W`-W{qY\e9r}^\C-W{qY\e9r`/W r}^F/W{ߕ^7P/W{QP0zkڋj/;^Z0zڋj/J}pyr尼Bڋj/.ayr}^\6 j/J,m>X^\6 j/.ˡ,mՂ\EO^"Z{^'u/W`-W{Խ\EO^"/\6 ^"/\EO^"/\EAڋj\\e9,OPm(W{qY6,i^}|/W{qY6^C/W{ˡ>#\|s^jA/Xr`做 ԂdW=ߒFjCc;wc31=߉AT6/z$2 GcX}LjcgXQ4߱yΫ@ms^jc=Wژ z191y} 8Y69zqγ@msjcwsjc,Pwc=g< S< 8Y6T69~;֣1y=1yyΫ@ms^j@ms^jc,|aXc/,|aczD=c1 lG`c={L8_X>6ӯ~k?#Y>6փ{ks^/Ӄ1y=cwyΫ|ls^cc69 c*WXS缋ɂ>o6砊ɂr3o>ȋA}')f?b S.& ` T.& ` z9,T1YˁdA^),b*& ^dA/b*& .Ade9,,b \L\,sɂrP1YpY*&;bTL\ɂvp1Ab \L\ɂrP1YpY*& .AdP1Yᇊɂ^?TLU,b #\Lr`1Aybɂ^,& :dA/ybͬYA޳Av˸ s ``EI{,me\ z'`wP/e\;)v}fX; 2^=\ße9}-c1YП,& zb*& ӞdAo~YLd+/ɂ b?YLdAڳ,/WП,& .AツrP1ك,& z9,b7,&;w_A/e1YˁdA/XLr`1AXLr`1YgA\ݹ,,}d1YpY;}d1YpY<}d1Yc* xWL,hҕfA4 2t%wY+͂ )]i֕fA4 2wY+͂]i\Òһ,,%wAy vYby vك; t徳 ; @쾳; @쾳 ; t徳O~}gAwr`YIwr`AwN}gwN}gAwErY;)y˭΂;,΂^; z9΂^; z9,4,; z9΂>J,,堾; .A}gAwvP}ge9,,堾; .A}ge9,,TYpY; .A}gA,,,堾[˂l`kYЗ l- zlZSeA>FqkYЗ l-;ֲZe[˂l`kA}ֲZ[.oZ\6˭eA߽,,m[˂{ckYpY,}ֲYn-;ֲYn- .Aee9,o=vPcAƪ ZcAOMvTX zj{,W;AsXЗ 򕁻ǂ 򕁻ǂ|t/=vpy7h .ay7h .ay=\cAߍ{,ǂ^;AsXЗ{<-Ϸ?~__?o/z[oۿk5#??ǿW_?O|*ӯ_ۿoK?0?O׿ӿg8__o/xgd7$?<<[п?XEsR-w?ewvsZ-aR-c{˘G 1c_k/sa1ݫ-w=9sg ϏԘɖfјI _9~k1<)_aX<֘~+G;/귿~O뷿KӃDůϖ9Z۷ |o絣-I}{oosm} 6ކo}نo}ga}o{َ{޸qo{:n{ /%ZKI#\fw\G:[:~[i7?/?}Mp3/N9N r&ȿXͩF}Wqᅲ{ޛq~Jy:]ߩ?rk_7BLۧѺm~z?~;_w=|?r{9ϯq <w8|_>=-y\ǿ}mA{oQ_><~'\6|{~+l8|m}swߧ6e~y2|}p}p?>1x_>|m߼2|?{OǏe>=3%r.GMM- < _ {󺜃oc4~'e_: ?u~Я4ʛb?\+~W*gOe~N6| mنo}_Gee~o_f fvf{:n y.}\z|KHi>S4Ϟ)']St˱~fLLLLL1ǖye1|LO}͒ƌcsGc1Ec1Ec1E_/%R`?rTG΃CyPyh9>AРyh<4c1=<4 2|C) .ç<4 6fЃc\KQ"`83õ<* .YET]_<3 #Ϡg3c1 r2] .; r/2ݼ˃ .) .ç2 ˠe C.<9 Lt,:<)3 = 3Ê# +<) zy=|<* r 3-# <: r "2|<) .ç3c1<3m# ͎<6; "`O) .ç/ ҾgЃ6<n6?HTAH=<4`C* @yh<4 2|J* .ç<4 2|C>AЃczp}Cnym1=>eS\Oyhp>6|ykGӱ6}Lzv,ڡcH?S͏XO5?1$=|HJ/z^!)CRzÇ4ؓҋ2|=) _OJ/.דҋtIvnIip}HJCRzqRzRzqRzRz_ Ћ Ћ-v 04Ћ_[aŖ]l7C/`C/z^! Cz[8C= ͿЋ+ Ћ<( C`C/.Ћ02|= CzÇ04ЋĨ=|Q=F=bԋS+F=bԋ-F bK/.ċ2|=ADz{jVz{U04Ћ- 0b^lqE^li^Q=|C/z^\QqTaezzq^! Cf6ه04>m! _C/.Ћ02|= ؇aX?>jLS:֏) ұ~LaX??0tc0tc0tc0tc0Sc0tS:1L5cBԱ>u)D!c Qz:/S:֯S:/AA~=4ϢY`?TPgѠAiǠ4cP1( z=| J*( .ç4 2| J)( .çA~?>Ǡ6T\iO,4o* 'w{^wPqgu~5 `93g1 zw=|;d.Nܹd{qAƝKw.yPdeKS.1 zKT.ƹdgAjKyOo 6; `*p . ;) .ç3c1<3] w;ܥ: r3]΃;) .ç3 ΠqgǸ ͎;6; r3΃;=# .ç0 22|;dqg{jǝ=:,4ӨB=:,4Ө{VYh,4c1 .ç4* ҨB) .ç,4 РYh2S4ahyp& -nӄeӄeZSh\Oe rn0Z9jhyYc5VNy?VCk5Y -yjhycVCkZZ9ǖyC2zhrZ -yjhycX 4@3Vk%Ќ@Z 4c5мVkz!Vkr!Vkja kb9m~/kb9m~/kb9mhhh~/.W᫁@ej2|5 @rX6zm@?P,=|ck.XұpŒX.B=\|\|/zz/zz,Eȭ?G"^E_>XE"^IAߋ< {_nCȍ("KҼX2֐Œq=<"ՈejDG/zzD}"CD"}_!"`_\F/.W#kD"͈_.6#"|D%W{q _^\F?{C\b L>XK`׽Xָ{\b L^E_^E_^\0/.Wsָej\2|5{q׽JO>}Wz:+={c؋%`M^\c/zT c)gc9oS<6ȏ)))))ߊ1ccX2=xlS<6c3ccy_zb=Xmj)Vcz6֏)V}΂vPY*: SAEg~ ;,OaAY,ct1: zTt\OYp>EgeSt\Oك΂{p}nm1{aT)F{aԃ =:0*è =:0*c1zyAMAn 7T^ySiMA7=|̛>M7EtyS[kMAn7~k+MߓTq렂`a*H栂`a*H& b2|) .ç+c1:6M7ܦ 4a\NMA57S\OySǼ)ctPyS{MMA7tAPySp>Me7=|?z`?)o*o ʛAM=|̛>MA󦠇ySp>%elT\OySp>Me7=|=9 <#RGJ){tPRGJ)=|=?}zlv`qc;cX>c=6ǬFccX>Ncεc}cHc-RZ4#HiGJkX>"X>c=R:H)#v Hb;h{t=RZ=|.z)]!RCtÇH颇RGJeztq)]\GJ "f"6)هxbw\lN;[{s;\! \En{s{^;Us^;UsÇx'Cܒ)ȍB*ͬB/og]_LEn\lD_lD_lD_\;\\;ezsÇx碇Np HpL@f˗)ȭ碇 E`Op.rk"vJp.rkbKp=2|=wor$8ܽ)ݛ=س=س=س}^\!CsÇ碇ezqqA{sq\\g?E6w.zl\lN;[{sx'?#CcplhS3֏)yyy|#G<g}S3O)SLΘL쟯wz❱4Ac1Mǘ&cL19& b2|i) .ç& ba6fcc `Ob* $栒`Ob* A%1~A%12 r$栒 Nbܼ: r$& IGG&AnytdrPI'2faK!-TiA]{ZpPW{p>-S\OaKp>-AÖam&0l9-%|) [ܠ9l r% –aKǰ– 7h[ܠ9l r%Ö [?= [*l z~~=ȭc vi99&/* ib`(1M Ac1Mǘ&cL\OQBp>E SL\O1Mp>4Ac2 zl{LsP1M4{LsP1M÷4`~|LX?@>cS1c4e?Y1c.1c~}LX?Žy̔y̔y~732f#c=yLXFS62ֳǔl1e#c=okX~ґ`?T:ǟJGsPH* Ot$c:1 z=|LG>#S:\OHp>#eS: ӑ`?=fӑc:rp}8*Ae~>#qA8Orq<#c1܁:r#]c 1*or!͚ܯ9o`0?!`0?[`0?[`0?[2|*o.ç! 2|> A# ?)H!ȏeG )qƑBp>E A#AE Aj)qƑBG ){\Oƒ?y `O=È #܃9r0"=È #:ˆ`>0"د*Fmz0"c1zFӅyp>]T\OaDp>eF=| #nc0"eaAFTaAAÈo XO>֎kcH>NGc=X Z0cXO>c|ccO ǐ|c=XKz:tc-؟c\zLv. ~ҋ\^Oz~v< b;{p=r؎GrE">D=|.z9\!r2|=r _..#2|=rDp΃Cf"6K%`#.pY8,yvsGb;]_t\lI$[@{@r$\6]I#U1EnJs\vH1GRq!=}|_l~}qs{qqs\\1Eb>$$#}?d\fIIEnd\fIIezqÇ$㢇IF'YRq%%YRq_D<~5܃)=܃)=<")n=n=nG\vRyD=|#.zG\! _3 _3<2|= _#..>mlG\"`#.<"-<⢇yEߖG<~F A#H"2|@) .ç$ "2|@dcrp}@nȃL9}P)G*A~#p08#UA=r7dHG {rPHG { /.dqAAn\GqqqDp>]_SqPqDp>eGS18-cpp1qro!Ƚ &N)qz8=|L*qro!Ƚonnܛ?Ƚoܛ? n?}onnf?mo?etC\O7u\Oet\OAᅱÿb>}W]anATnAAsA÷ 3ҏߗTR*|ݐTc-UZ0STc*|cSyzp XO>nzp Zr0֓k@r1~ H>֒?S*|/?Nǔ*\l`O.e 26؃xv< b;]lA2دn/z\!8CppÇ ؃2|=8 _..׃`v8nAp}Cp0߶_l;ۅ `#.3ۙ?!V{Xq/av b;"ɸvd\.]IŖd{q%d\lIA+.r;"Ê)Š) _ _+= _+..Ê⢇aEp nQq[Qq[=|#.zG ~}t#Dp Pp; Ppq*\\ *\R@Wp{p! RER> =|H../.﯃=U _O..STᢇEz~o) fsA;ӮCzfғ2|== _OO..ӓ>'=|HO.z{zrX y."f+AAe+eS\OJp>e+eLA<<'Ƚsܛ8 < w5s) .ç< ws* r&M# 7l<bd̑M& .̓"2|l) .ç&cd19>F66Af#cd\OMp>E6eSd1 z=|l* &ȓ<ȓs9k_>(>؆ zc-D!Z4Cc-D!B"}_Bk@"cD7CiB4!vb; {tc)C4!vNÃ.p__l`ƿCtÇ颇!RH!eztq"]\H! B-D n!Rp}C4Or~%㧋b b bDN9vJ b;%^lwȉ.CvD!;s>D=|ȉ.z{Nt='DNtD)s9vz b;=TN4"wvʉ='ȭr)'ȭrD2|=' 2|=' _ω..s>DAPtEA]MQEޞ@[=H{.r+'Ӟ (ȭҞ- 2|= _O{=\O^+ɫp%د/rp"7. W.rp2|z2|z;Õp2|=\ _W.z\!\ nJp}WCfهp2|=\ _W..Õp墇E•>+\@E\@E\j Wۧe)zc^z򘢗1E/c=zyLnjX-S2cˌX12cˌc,󘢗~HzLX?a9z AE/~<%9vfvhzPBMhzPBM*XA e,S\Oƒ =X8`!؃cpp} ۄ'6! 7mo/ m6o* ۄe6! r2|edrPK!*z 3d`<%؏K%crPK!*z 3epPL'2~:\&^|srPK[;G/Ans\O @p>E/S\OKp>E/Aӕ2] r t%-ӕ%l(%- <P)@ .ç).D8 r$Mc2|.ç胊A) .ç$ b1H6fcc rp} l1Hp> eS \O1H$c 19$ݸc wA܍; b 3A* .3l>[Wr XP>|(c=@X P>|(c=@9c-D!Z0CBd$ׇcc83CQB(!v b; {HrcX "/s9v bA=r`A..sd9Ŗ{r \A@]o/.\l!I$[Hr "$n =$n =$n =$n =$n ..Cd!I$ 3Cyo "$\l`I.z\!$CH!v b;a"$\l`I.\b;{HrP9Enޔ{r7 {Sr7 {Srq{rq\\ 9 6|:.r"w:u\'UQE~*ȍܸ(길 _:X\'`2t"?X\+`,..2|=C`qÇ">m!nEp},2|= _,..>=|,.zX{`q;c3V`q;c[`E,X{`qqX\\r1ֿ/c񘒎t2#A%~ X `?Tb'J,>&AJ,yPE1^`?uT짎3qPqFھЉE'X{sb͉E7'AޜXSbqPEp>%eX=| %n#P"ȍC 7%;+wr ?"; $A~;% )A$A~;% r?$SuYn 8r+#ȭ :2|:).ç#c18>fǬ6uf# 2|:dzpPA@L*=r# ȍӃ7L==8 }ӃJ)=.Ӄ_'9҃҃|c==XKzz|c==XK!!X] XO>cHzB1~ H>Ư XOzBvJX;$SBp=!I\l`O.r\=NPニ{|p\\ ⃋->bbg..\l`.v\D|p=>؎\l`.ۆ`.ۆࢇ v0 b;]lH.sm'۹6\ E>$\l`O.sm'H.E'./r ,B"7K͒./r ` _ _ĝ{pܹ_Mw{ӝ\ItmEtʿW]UE~*"&ʿȽq:ԋUE~f*"?uٺʿ _L ?h9kGMEcSȃz{enĬ؍nL[I?(?}$n>۷O(?}$\QOAm%r ?{(9@>ȁ m|.@~p@ Y-4o$%m$cqI1K8L6NY)1K8L6}ַԴ,o/<Uhi}/BK|ZZyZyZ_wA35Whi}BK냜ZZs2>KjZK '煖.>LKϴ>}A>|Դ>qA -E\hi}BK(YZi *A܃|Z2 -=̽2 _P-?yPO΃xrœ..uq<ϒBK΃,94/ާA]<??=ɹ>:M΃,9?xpy8t 5p ǃ$ -!Yh xB -4.;ȷbA[u#8;~nA>?T<xPWǃx<ԕ<Z<\gAM xP!Oy~Ϗ?:</xy=>y=c2> -.<2>K>>M>>M o&۷Oۧ xpi\g A Qz䔶B r5;!wAMx=.;B2> 4󰖐M 5 8FFh(oLk,q~6)U&5}_)x螹FLt9k0 ]A r<ӳ< PA)Rn|_j%\^-9{92>%. Aθq.-g܃ti9K; ]ڃt6J=Hi3A:OqR{ g܃t$n0zn уt9r0`xN ÍϞ{ÃY0>I}|c7J=:c`1 FA}Z=O^ )M r8Hije|9\a2>q=0z'atpIܾ}Fo'atpI=㓼yPB˛)0 rΛy|F6AO1͛9fys_1,onm|q|4onm|LO4S>fr͔YgI_%y䫤2 u)+L9G]eAgʅ)*L9)m2>͔ -SL9V3 gʅ)-L9go3僚)*L9Weʅ)2 ą)&.L97qoŃ)V,L9ȷbeAΔ -S]hrO8WA -6>nEA]~< nP|ߊ[A7O{p7O{p"܃q)mPWZJԧGpoƞZgA}ygA}y[hnǧnǧnP+ZDzGA},{ԧGA} zԧGA -D rYh!ZphY\gnpEAFAF۷O#ۧnpi[}4 4 jn3BpAZ # G5"2>p, gDhe_&5q8޽FNǻix]7ixLkq{Υ6wѱT&5yq{ΒktY;Hn#A:Qw G:=pҙwB G6J|wߊ仃V$\ 练t09yFw r7 mAp r;Hgo#A:{Vlwn Gt+97 EApnv t`9m60c)1?|uA] ,ԕAw/$ uOwP uOwP %Ǖ廃|7>-m 9G%ÃdxP #ҒA$AN}| ,dxߊ$ÍRmP#"A  RmphApq;#e|$Iܾ}oD'npIܾ}$ rJ;H1cSAjh) ANi56)2>NiqJ;{5Mix=~W6yr4o6>-7 n%A - eVh n/BKp:=Op - !Th ncBKp{[ƧnŻA}jnBw|/=nOBKp - }Uh nBKp|Z47AMpZ '9-氅J*氅Au36A<-6K iup&4} 'ANp < *!mPWi |~nA}"y''A}"y[h nP|&Oރt7O~ - ճߠ>\= ճߠOߠOB~>>~>>~jH䐶b>?= C 9e,,bB i, .㳐6BڠOCڠOCۧ!mӐpiH[}4-ܾ}}|rYh n#BKpAZܠ5N2>Kp, ~3Ck4o|>ھm\wo⚾Ik9M.4>m&_kt3q|.6Nѽxgqٿgf%FbPlLkt ]̒AJ4Ry '̓t^9iYy '̓9i{7IsɃV$L"a tr-ԧ؃;.QC2A}XFzPٖQ32A}f[F=㓌z'u3Adԃ>>ɨ%:)>-geԃ9%5)2>N Q.zpgԃ8IF=㓌:}$n>ɨ۷O2퓌:}$AJ``jrif`jzi`jive|`.{y`m.YݦA6߻؏iƗ1 =fv,nm|,~Lc`z6?Ӑϧ6:!uA -Yh!uB := -QVh!uoB |ZH}PC꠿ V4[ѐ:yA>σ|:GYA> -M[h9toBˡ|=9tsBˡCZr5Z仫Т ]p8ϒ_'>qr=Iz%AFzP7 -R5R:zPԃxHAԃ9R/H=|O"7jԕ<u࠮u>'A]<*I}%A&A},{RDz'u>['Aj<44/>Ӥ>Ӥ&AN -+ I}PWOꃺxR䨹в2>J -.㳤>ϒ2>K>>M>>M o&۷OۧI}ӤpiR\=Eb5?AMq=j1~e|Y= m<9=? bۏa?o>|sb\0o cps0;1a?3{: b` 6ƠhnE.b7"tx E^I7p pE_nE/b7"/E7E_? p]ĄE8.b"1nዘp 9 EL_!"k1EL_{/½".{Gu"/\ʊu8RA]>Ω@^\^F/./u'Eʤȋyrfҁ˙t /z&ȋЁ4J"t /Br;u|ҁ?"v/R(ɋJ{>].b򢏏{}|ܻZkJRT+/)ʋkJra2>V^\Ƈʋ>>V^qrqqrqqrqqrqqrqq2>V^qܻ\EͥwyQs]^\z؁@^\ v /[;6xp:6~Ǭi7:6@Ys1@lo.WY1U W ]hJPJ*A W ]hJOZZuIpyAV/ V*$Gi A>J Q A.@ rrPk FVc6*#ȷQA mj /<P%N0yuRhIP^uk${W'Aݜ>εާ}Z1\ާAu/FO.uuF&72AndF&ȍLP7bod:>od:g!}>ۇABk*z-Ջ+@uc(@A -P[2>kԊ*^u*n^u(*O+O+B>>>>m --PPWuo{ U[Z \g-Pp@e|}|}|n>m o@۷O[ۧ-P(ZZgjEThQPkZ,xEkٻ3 ܺt=>/{IW@jͭKk46.̤3LJktq t6.ё%5:۸F5@|f\]~gk>_<ί̸*e5:۸FgyUeRH]B A*\H ҩBjN RtY!Bj RtkQj Jo˵ R =rh CtstF) r9( ]EA* r3HWQA U4Hk}AZ݄sy&Iw+\H fԠ.oVH] AF} AHԝ2>[q2e|\ Ik'=נs Fh= 5msZ5H=נ.s Y5㳞kp_|\ Nm٠V NmYwҭ 6nZAƭ[ԵٺA][ԵٺAq6Fւܭ 6nڠn֭[r(ڠ3֭ .nm'ڠO wk>>}|_$ڠA֭ dڠA֭ dڠA֭[\2>q6nm'ZpIܾ}ҭotk'ZpI6Atk?­[#ܺAmk[ԾƺA=> r}6迭g'|>ו9O[:-ގY1+xǬxky⭍}̊6oiƧ1+8w8f[Ǭxkom8̬xkY3gf[̊6oiI1+8'8fcZqoA. x rHPh[CB+ނ|ZyVh[oB+ނ|ZvP۪Bk|Z[ڪ xAj[uW #V(*wFUA3 QhmUǧmUUA^E@[jOE٢z}+j .㳢&ӆ,ӆOKmZnp`z ,`A`A`A`e|V볠n^u3,gA݌[;Z[ [ ZPnւܭnvkۧZpuke|֭}|ZZ‹^ZЊe^YiiVh[ǧ[nrۇ 3+$WvA -]PoA ..*2>>>>> )]+ۧ]ʮpie\g]pߏZBDxԘV.rA0k+׶:H5vmaƮ?cصgq6$Ycצ1m?f] Ǭk5vmƮ65vm>3k|fص̬kYc31m88f]i7oAn y rPh[BkނZ ȗl5oAd y %[hAȂ|ZE+j 젶`A^  ^h-XWBkZ i Vh-Xwσڂy<-XK?m  :y ߛ.޼u-K7oּY;\gPpCA6oA6oּ}|ڼnp6oly{Л WGA]Py y y y .-Ϛm|߾S~|lhQPn/p{ԅ .^B/迭zVuB/{zA. OE SB/ 2>+qP۾Bkz#y۾B+\WZ䶯ھ2>k>>m>>m ./ Bp<?Q BTzzԳ ^ZT 2>+ +B/zA. oz۷O ۧ^pze|V<Lm =}AF j4m_c57ʤ\krͦ˵k46.׮\\FQktRIv6.ڸ@FaFh(h|.\]pvg5h|f\]qV&5J'ڸ@FDdt$=% A.rI6H ]A.: rI|%W>HU AڭjjV V4ȅՠO AVA.ij|f ݪQڬ/Ȥ r5YA|G~dBփ\^`X6k`XlPփ SzA}[6OrYn=X{e| .fpw5>>}|҃ n6+MHڬ6!i :fPEku_6kPgm֠ڬe|f .6+m֠.,e[ulPwjulPw|n/FiI>OiЂ) Zp}|R ^ r>OZp<^ nkҠ m ڠ֠ m ZA)AԘe|ܠ AImPc kA n>iЂ۷Omp7hAkm_4hjA r65hX6ȡKm8i < 1kx6ǬAkM ZGMǬAkyj6hǬjP5ZmIFchqs6rY3FgfchqBqǼ Xh=gk #߈y(?e(| 8ȋGKA^< ^ iiiiiiThRZ/ZK/ȴ^*z)Wxzj{ .ֺj{ :SPxThSP=ySP=SP=Z\gJp5+e|֬}|=}|=ZThR6 !hKAGy률률2>(z)=PK?[~vOA={*"SuA{ r\V [*oE[훠UM֪p&hkU}*ܾ ZYk\gUpVASA.2 rbЊ SVd*b*TSASAS_ bЊ򂬘*>O- S)GwOA=½{*)wOA0{ jSǧSǧSuOA0{ rThSۧS)Ϻ2>jWS_vO=5T)wOA UTWk\ ItKݷqtM߻K(Nj~L2ڸ'F5y]襍{kqOtb6̸'3㞨Lzk5qOTOC] UAe = U Air4H{@A}|R I4*h'UРO WA4J*hF#*(UР> 'UA,(=V'\ D[hP z DhP zA}ZO4O]뉂 .hpW$"IO4㓞(UPp{R)UPUAe vfUРgV Y4e|+ Z&U 2$=r% +An\\=2m|owSA}|M n>馂۷On*} n>q75e|M .nQ wSe R7.c wMn*ԠOAtSA7\\ .rq>l)ۇ-Up!H75uSA5TnjPC5TnjPC}|MPźAꦂM R7njPS5n*2>W+ԠZ馂M jcԠ9M9wǴbjq*6~Ǭbj,ULmDbcZ#q濧HmscV#q*sj6dY3188f5cbO!gy),Qeq1^z ZA.f QhVwB뵂CZz AZAZAZAZAZAZky8VZA!jҏL{B뵂^+_﵂^zV .ozV 6VP {.ku^+KZA]*^+z2>{ *ފVW[Aܽu#*;WWA .*ϪB\uZuVWViu\^A^ <rRhA2> .*ϪOOۧUpiuU}*ܾ}Z]Yu\gUpUWe|V]}|kZ#ȽVUA rUhUGPs﵂>>>> ./Ȫ򂬺*>O SPV@5*PAM? jTP/ jT B+\@Z8 1^@ZiS_vL15T)HH((6E?m\#]্kk>ʤFF5=FFAHH(i m\#]kk>3ʤFFA٩9HG{AZ?# 1HGphzj STO Rzj6 Sa # mAiI=5zj'ԠOAS>>\O5Jj6FiS* Ԡ>'5P)=G\^HNX4 HAo=Ҡn# `=Ҡn# `=R{e|# .ep/>>}|R R^TEV R1[UEYU4HUQAUE*\BA\ R(UQAAR ./A:R~e|\ .hpWE*IU4㓪(}* n>۷O퓪(}*\U2>qU4AAz[`ڠ =ڠAM? I6hpyA ./۠yJ>Oi)K˗A|ʗA| r2HA˗A*_\ ReP+_5%ˠOʗAAR %ˠ&V r(mY?J6=YƩ1+Q8h1-QmJ6NY1+Q88f%JǬDiY(m|L?,wcZGz5,A~ZGza #/A_ ~ r Vy(%;F/A1 ~ iiiiiiRhAmXFԆ%׆>Խa cBk;#}o;ڎ>ێ>ێ>(#ڎ2>?(^ BA/4uB#;A.4 .B#B+4Z)i\^EAN -Rr"_hr2>+4.B# O O ۧFBpiQ}(ܾ}ZhY\gFpe|Vh}|ZhZB 5lB#aA (B# +4 Bp<(>O-4 y58迭VhZB#ײVh9/B#ȅFA.4 rQhFP/4 8EvgZgQhEP,H5(ڸzF!DW(h.z:5:۸^F'w nzό2i[ktd}yN̲g)$ϲAM\ *"dV ! *(E e|Ғ RKdZAܒ Rdb $%%- -.>>iI}|Ғ %IK2㓖d'-I!4J2"$EȠV aEHX)RbPVJ JA}[)1y+%1oDKe|\J .`pw>>)8n?SzBu!Ƚà.g; RaPg2>qNar S7Jw_ w '9|pw\ǽ2>}|; wn>۷Oz!}wn>q0{e|; .a'à;9zAwzAwrjap\- G&BpȤZns(~[ZڃFi An=r{0HAۃAj lAtAt|{0MYG1K8?fI~',o㫽4o_gI~ߪ,oK%m|$c,L6>m|6 o ]h @؅'Amh- ȏփZ(z @BoZ=| ۅVy3)z țIǧ@ǧ@ǧ@ǧ@ǧ@ǧ @7t|Z.'u]B>=C>=CnZ\g!pe|?;AZ<9~A]<r_h9~P9~pe|Zܷr 5/$5/Hs,I r]hIjBKRDY\g9~pe|}|}|n> o۷Osۧ9~?r2>,.?Ө>Q}A==Q}Ppz{T_hQ}EgZTL#Ө_-.EA-ƒ(PxP 9\ǡ2>No}|`&`osA],7ȹre|.0ȹ A}?m_侃$2 AN) r6o<%\ǹ2>}q;w'opIܾ}o'opI;se|.wp羃>>}) r8徃zZ;徃zZz;JNo}K@vPe hZQA KRXtskz6CYy1C8=fyh_\m|1> `>> `>> `>> `jzP3X -c ^Ii%A}{Rԇ'A} {R'A}{RZhIip%e|w}|?x%cԠubԠHu|u|Y\g[ŨAjRIipU9+*Q_eUA - .>2MJ,) .㳤4ϒҠOҠOۧIiӤpiRZ}4)-ܾ}YR\gIip%e|}|Z&+Р^04W'A=3H4;f\,keErmJZr"> yy+)j?X Vryҟ4Z(KRhd(KRhd(KY.yPs ? - sɃ=9;+1YEA -z rvVhcB=}|=}|=}|=H">= C#>= #>=+H."2>΂>gj^WL - y]P - y]Py]Py]pue|Z^5Lϴ$uA]< jy]pi^\gy]pue|}|}|n> ou۷Oۧy]Ӽ.2>, .㳼.H.GrA=< AM݂zxvY4< 5|; ]? JI5J8Fᅪk82IѪ|ONўI5ڒ8F;r'}hό2IѢZ{5ZS8FKjhEm-hiI)?HKRAZҒp>iIjqA <s3 =7<s3 Aq( =<RQ g9[p, eA)[ r8H⠏OAd>>o>-[ǾelqP-߲ g8\9_ԅĂA]I,xԥĂA]K,xĂ :> u|<,x\2>N<6~L gJ8KZ8KZ8o$x\2>q8q'cpIܾ}<o'cpI8e|<.qp>>-dH?* : %YB8Ad?p94xfe-,lY`31 ,x}L6^oY`1 ,x=fe/m$Wcf>afPW3xZū /^fy p9:>A9-4ȏBA/4ȏBA, .ƠAA - r Zh1AA - rWh1hsBAZ b ǠAƠAƠA7ᑃ?&%oAƃHxfAݦ< >YpP7*ςSyԭʳUZ\gkpED@zPUЃ.u8 Ņu|u|u|YP\gA͂z\hi\PwjςS{ԝڳԞ񭟊ee|Z\&dqo'qo'qoӸpi[}4-ܾ`n_0{, .㳸72>{>gDz.[hlPDe!z.]sBeyd^_ǩ5zhm^_LRk}~>q;o]R6Nf~;sJ|6N8%3όS2Iv[&)5m8%Fmhmmh}j-Om~qSA}5.&ty ˃9]YO GσRy Gσ=9z92>q( EA)zr^=zR փ9:ȡ A)кQBAփ>0 9HzAƒR,|Aƒ.ORA6-E}RA8-EԝRA:-EԽR ҃8\ǹtZ~',} Ir&$}p$uB9ݒA]-rr?~Pgϒe|$ԝ gzX8?'zX8?'8r8|_j%(nÖ<}[aKP>e|.|p卒9 PֲAe- `ֲA- r>e|1>m?cq̂64x|L6>YPƻO,`om,`oY3gf{6iKc z6^@Y1 xlm?MAyc-=kA -D5/>sA -rp_}*9?}ǧsAN -rp_h}B 9/>ς2e|uT?KA]s=~PW]Oz_hupeA߳pǧǧǧ6> i_hPOz4xԳ#B:>:>:>,=~P|O - ~Po$Oz#y\gAop5~o{h^}*nۧ{h\g{pe|}|˒5=aA{=~j{e|q{2߯o5D8~FGӸFbhMl-m_k q~k>3߯8~3όk>3߯8~/L"kq~6دѺ5ڕ>5Z8b?"A} G샴>9bY}Ƴ>_+ 89\zb 9X?ȱ Aq?F)r?Hqwyj%Xߧ>%{cARXbH?H~9DQ AJ\ RB` TB`p\DžYuY*aPuua*aPWvui*aPvuq*!Y2>qx~mOEATOEATn4S"~*PԛArC1㳆bPg Š zZ (%ĠAVB9+JA"+Je|w.QJAmnS"}*RBOEJH 1Ke|\B .Q W [[0U ][0U Aq0o~󞡍1Y1j-1x3|f3̬ghY31Ӟc3vz6yYoǬgh}u yջ o%y ,.![`u A^ׂoZ}YW~l?S[3S#;A( r䊢* WVQ("EUe|VQYtP+ GVQ9/9ch1zˍBу\n}BZn}BZn}BZnZtʍ ۧFhr㠖A.7 Jt6A rt_hGBk>|ZiQhGp5e||}|J"A=ȼ)Z$"A0 HP Ezgx-Rh|pe|}|W}ڶj\^-Am mKakR}ڶn-ֶ%-ֶu|޶u|sB%*AP)>*A=P P .>Z$7"Ay HkmZnZ$j2>E>>m> jGPto>y͛Bk>.#|Oa1?1b|G5b7cxŰ+,]1l>`a1 ט 16c>3l>|f|̰aG7cDŰ 41l>`*,V1l>ڤxQWi>^u"6/ºva]Nj+El>^"Nj3xq|kEl>|\E_"6/BqNj|\e||s 7/B!׿E_"f/BO}B|E7xۧSEh>|El>^}ފ E^\?ȵȋ_ZE/b-"yjXZ""/.Ze|X2gփE */'E,T^ԳE pBE=]PyQ)T^E 3a2>_bwq󢏏}|e|𼸌۷۷۷۷Fix^[㓆? Ṉ ϋzJ޼\E yQ#ix^q2>l!^qsq8qsq8qsq8/.ENJ&RżپT1/'ŰIsŰyu1[?cV{cZt6ށY1[x9fuKo?m>3[|fV̬niY3gfucZw*6^Y 1Txk]U*mJ1Tz* GVy?*J%QVc{Aڷi\^-A^ o vP˘B+c&)2&eL1A.c rSheLp1e|V52&UC1A rPh@B}|}|}|}|n65ۧMMhS65Anj >Ak A>jj V('5N8A8VY\g5Np8]KxzZz;Bxz+yk;Kb'7w<Y\g){pA߯Aӎ'ӎ'ӎ'ӎ':2>x ov<۷O;ۧOӎAxz{ShOPOώz{;ۡ|vꍠ}|}|Yi< ^5O(P6!-|J_s#A|P/F<^tyoȞ3Hzԃ >{(=Ok~ %oAZyg=;uή: wvYg}Azk븗FCo^e]͵{kq/w6ƽ5X̸3^ό{k>3̸3^L6ޮ*5Z۸{Fk5۸{k~A0ܯ ҆~m6 ka s kϲL ]p{Aү ~m k_}Q*AB r6HZ+AЂ\ R m* Whj mV Wht1!e ] A.kbI6 m'ڠOZAjɂ% nlZFi% rK6H-Y[AX_hnFv r6HQA*ܯ _I~mpk_\Z㳸IW5sAnfnPKk dܠK dܠL d\e|܍ .ndpw#>>i}|I37fn'2>n ?+jA"+jA"+j >>y}|R \%F5}JYާ5}JYG_?vPf5OkA:ԫjA=ڬԣj <>>y}|uOup_?>~/Ϗ+u"lPq+uA?L/ZAƭ_mAƭ_ r6HZe|ܯ}Vhm3|fV̬BkY3 1ڸ&;f5YdǬ&k㚬7cVMOЂZ* oVy{/* *Zn/H+ Ђ|Z+* _)% rKVh-Y[BkɂܒZK䖬Z \֒YsP[ _ ֒b(К (&'DgZgZǧZa+ƧEXZ+",EXaAseTh-YdAy|XԖ,ȇedA>, % aYh-Yǧ-Yǧ-Yde|֒YK\g-g ߫?~]yVh[PR/߂zz˷B+ % Q-YP"oɂzyKVh-YPIoɂzQzK\g-Yǧ-Yǧ-YdAdAdAdA)ZޒP,dA=%+,Ӗ,Ӗ,Ӗ,ZvC=>?Nw]uкu<+וw]A u]Wu]Av]Av]Av]e|uYUh]WP_% --YdAƽ% 6-YPqoɂz|]ƧZPwjЂS{ԝ+BЂzxԓ+/x>Ǭ k&VamG*cVy̪6 Yw̪6Uam>3|fV̬ kәikc^yM-:(kjAA^S  ZhKPug7uP ./ AV*:(uA\Z: AV&):(7IAe|VY:(KuAA^ *:(wA޸ jjiThuP6> iAA>X  rThuPǧuPǧ]QuEA.;|^Ԯ(UuEA> + yUh]Qǧ]Qǧ]QuEe|YW=0f0+(ȇYuEAk + ]QP/7/u NPW|/u eNPo#/u )R'R'R'RNP=/uzysK{^RJOKOKOKOP/o|z|?1@?ڼH*")oEVH*")IA=,H ]ERP2/zyThERǧERǧERǧERpIe|V$ZԻ6˼:TPO TP|{+HOu~*{SA㽟*~*gSA=<yI?u55ZˤF+qSh!n~mO][ktI\q?ug5S|fO]q?U7ʶ_ǯ:~2irw5HxAƃ] 쬻Yw( uWIw5  rw5HUA r=5HTAt\O ҥzj. St Ҫ( AiUr=zj +_=~g+ԠVA4PmB@ I5HN4PtK' ԠOAsEz* tz5J=5HWA:\O zj'ԠO S\ԠܟԛŊ [t. A.b+AA]ԓɺ wWz5Yw5wuWIw5*ՠAtW:SO1uWzYw5uWzYw5uWzYwj'ՠOAtW>> jP  :W)+ȭנAZA^>izԓĊAԋΊA r5[>>)}|Rl .bkp[A.ܳbkP=+Գ֊ [zX5[zX5ו[z]Y5{[[5g[A. bkP /jbcZl|̊6ޖYƻ1+8fVm>3+|fVl̬jY3bʷ*zJ6xK Vzy/+k|^A^A^ rU - +ȥW^A. ruP{ Zk)^+ȷLZAe -MZA ^hVZA^ _hMV{V{O  -sP 2V])*UUWAVWAZktuP{ _^k*^+ȗWZAZAZkY\ggQփ{,kZ䃮z tk.^+v*SA{*v*SA=~ TǧTǧTSA߯SASAs;ZA EVP/:﵂zyԋ{^tkZiiiuP^:^]*WW UЇUЇ}O=At }O=At }Oǧ}Op=e|ZH3ț К.u(7EAW) ePAA]~ ePAA `eПW>m\\ \%L kqs6.|ƅ5۸F5>|f\\qsgƅ5>e?qt6.{v2+i r4HoˠAA:;+uz R~='} =A{ib& mA.fiSr13H ԍR JbfV 3A.fir'1Hv;Aڷ}R l nS>{QA+ܽ ]e w/>>^}|ҽ{e|nF^ r2HLAeܽ {Iepw/>>W\ `JA:\ ҉zQzA]lsz zX27 zX2d'=H{A >_Au|6N s_~YzeA^A^A^ +>>W}|R ^iõCA=f{cƺA=f{ 1(̠OHA4(zX2'5(AnP$eP/:kPep7(A r25(zX2+zX2+AWuzePwjW$zePOW$9%ˠֽ aaKA=,{»//^Yh1^x=fK/Ǭ{iu/m|/6u/m>3^|fֽ̬{iә~wGV6^Y/{1exkn1e4Z/Թy/y/y/y/^p)^&ȽL2Ae 95H A${q Aދ )$KsA^ j jQZ-32#+~A^ _heFǧeFǧ}E oqPh}EB+|Z_OB+8Z+ R}E;bAPY ;FAPwZzwiwiwPhAǧAk:xfBB+\,Zb!K A]XBP|/{%ߋB+>>->>->> ;r2^hAP/zxwPhAjTԵ٣Bꃺ6{Tԫã^YT\gQ}EA=Q}Pgӫ㠿:WZԫË^^,Z׋^,vb! AX8B AXB A. XrP i|_o:qpsV2iƭ5Z۸uFFh|f:\qpgƭ5eqolƭmmH\kpq#qkmY#1SFbPfĠNA5:=+o+ A.tr0HCKFiOo` W?H r^a6 9 / ^zjW{QzA+ ҆^a6 >>}|+ r;Hm tW Ƚ A6rR?^!Ƚ Yt-^aPfum^QAH:ԝԪ WZu0UZu0`'AAT>> ?JWr0HB{A+ R0 [0 [0 [^a' OzF\ Ru` WzXu`lFu3 nV aaV aa2>\ nV ,>gAӅ}H !H#1CFbPk$}|HԵA:ԫA:ԫFi$H `ĠHF"ȍ 5An$y|_AHFƛcH~6Y#'1k$9fD/m>3k$|fH̬hY#_x=fmEǬhEm6"S"s"""pmr[QhmEۊBk+VZ[qPۊ iPhmEOBk+ ("GEA>* -Qk[QhDsP /VHy/B" ~AAvA^_j: o9y/!Av9Zp{AZ+ZkZ+uZ! V+uZ!ۥ A/VVV(Z!Z!К 79( A5An6{spP|]<.;wEA -:wEAj5ˣBz-գn}uK;ϢBzԥУ.}u)|֏- z%A&A&A&A&A&ꅖuD=;'Aݩ=QNzPwjOjhCnZhмB |BIƉ55ZB$QF+h'hmDmm_ݽk_q~gƉ5'tf׍6NԯQ6Ɖ5J8QFp'(nD-tf,QԹY>D}PggNBA )4rh>HyCA %n{N G߃t9r=HwCA&0賕t;L Vi{ ؃O9}: AQB( AΨirF=Ht3Aڧ $rFZ ۫;`7J=[؃Y{PW9 u{P9 }|`$r=;1Q9: eAΨ)rF=H.߾|~sPW&*a c}R{ G߃}9{ G߃}|?jQBA,4rh>惺[h>˯84\ǡyCA]D-4EBA]D-4EB ǖ[h|g+VBA>> }|94%BA]-4%BA]-4%B%ԅr ⃔9\<ȹ+k.~ ͏Yhƃm1 x=fy ,4oᘅm|/м6m>3 |f̞3^? h9fy,,4o嘅m|<%J+m|[fkEñefZ4[fEñefZ46 A A A A A7A> 8~B|Z_hywό _i5FAް -F]h1z7BуaZR-&A^ -)]hIyB˞DZiR^hIg⵴p{+noEcBу|z^l}ub.wu= A~>e]=ywPGϻ>zw9 /0A' -Fr^h1zcBуZ=1zA -F.ۛ5j|A]a= |B ,.u>+A]aO˯󅖏vt^^^OO云-ΨEI'y}<Ҍ{dzD>U:¯A^,:¯A^,:¯: ==ˌ؃=??x_kwڍ_kkv۸_kwkvӸ_k״<g_kkqWn{۵xZ]ׯ;۵xZ]ׯyf,"k5{D|zD|ז9 "\ g0ȩ)" rn" g0\D l`O)"q]x.~~~q"888_8_xyp㑞y@ş`?߂z?l~>σ|_`{?l+`{Wg,Y>؟۳2,??.}/AH`ATwkޑyfzG=g3[czߏo-[>?zl1_839597999;9ȃ|+As~_ws~9?O=؏GՃzwAdQ^G~>~>~>ԣ|C?ԣ|ȳ<=؏(ɏA(ɏAc(~W |?ԣ|?ԣ|?ԣ|??GGzWzWzWzGzG2>=Q>Oe|zz|(~m'C !B@T O*))Td! ߣ*{_n|ߓ !z 2pP !菌 !菌 !O !1!=>&JA˜9!y/sB^ Bpֺ/ys_u}!د{_f_xL}M/c_od/\c z_xL}Z^yf <3kY߆=Mv%cMZg?7k޼;czg.q3S?15kY<|~l-5ǖϏcxLZuƙY957 f,Esx"o,6xPAf%$ B*.!^{P/~=`?!Aƅ`?m~ƅۏ¸pPq!ȓAŅ` C6*A\<wA' 6Anu2>u2>u2>5N~Yoۏ K /A_KpKpA o/P%*[/_旃/A\_:ysu~ KpKc 6mm&۲Lݖ{h3zyͿfo3f^kwkͼnzyukͼ̵f^kWĩe|=N .q*`+[85JGǩV:.?{\~^[@5HiП Ӡ?e|? z|OSA^՟y W%\ipA^՟? .ipoPyU䫀 _T޿kw|LU?U/cպ폩j]TBֵ^hSպ֟;yfZ<3Uk5LUgq3S}YTyLZRyLZPyLZNyLZL晩\Tayf0<3Uk=caƙ=vNks=|,l-- cgGea{LZƙ95 \؂ [sa;/2T~ A`|?Ӄ*O~:=*O~,ls~N*lAm]=ȈvP-ȃ可ixjyG_y{ wA DnsA<yp .S{o]pwP.ȿ]9{:e|_k_ *{ .S, .S, .S, .S, z|Aba皃^*{:Xba׮2>2>2>u2>uX *=_?Gug Όdf N83=>fƠ\Ƨ\ƧxP1CPf uP1؃Ae`Z23T .BOw ΌA*̌A*̌e|ʌA113Tf NvP1ȗg _"|pf %™2cp2cp2cp?p<1P*@䣊e*6#P~k=P߯c7k=Pb۵(_kZk=P_^z|35Ϭ<(_z8z3keZwk=P^@Z{SkE5ϬG<^zz35l!Pƙ!P^59(_"P^[>[kgkg@yڳk @9ȹ)Prr  @rJ]Ƈzr都^;}I"eWv 7^۝a.`ksmHmHmH6y*V 9e|=m^ =A(yS :ȳˏVӊE( (:ȣ2E(zOGO~珞= /ן?~A(Q=;ep& oop& .wp_z\LJ;!{l-`[ ;eV/;e|^.rC=z|m(m(m(Q[lQyTP=>e| .e8`k^[ 2<d / {\~[=[exП ?e| z|(Ãp6 Gex<*Ã|Q+pe| .exp_/ÃЃ~??JՏ)U_돨83 TܜSusrzN Ru>Ȏ}P;_ c Au`x*m)^9O>OM9OM9OM9=>A>E /.A^]({Saz|,A?T|P?CP|PA`UL=8^l냞-{}гez|Az}^S{}O]{}cz|_ȧ.B>ue|_ȧنs'S FCp9䋨7|FCWkgroB_oB?g߄8MZտ7!^kUZ߄x^7k}"g7!^&kY߄x3yfPB} 7k}{oB_M뵾 Z{{}35Ϭ<^_z-3[g؄x3&ĵök`aaak 5L97m; rrv0i!ط to; /'{&$MI ]5.b"W۽)W۽i];}b];/?݌`l7`l76xfc7#w3ynFf ./yh ZU~1s/y bZ H<i"/9>_ r|Z~1_ .GBpacp__\W>e|}cǠLJ`_l=W>[t+mg!W>21/ .rLJA+1acGpT>|O+1v`cǠLJA+2+m-!W>ZB| `_`m-a1acVǠLJA[1!mu `:A>i#ط:e|}cp_1acVGou L[|$2/AƨeA1jY$ؗEee|}YdòȠLJe>k|L  Ǵr?ZR5~Zj2.\ -Z[>#.mjiZ 5 ^ 5 ^ 5Z .Wpָupq5۷k\o׸\Ƨ52>qi+q+qָ}ָ}ָ}ָ}ָ})Ok\e|Z .Pp׸׸j+q+>2x>2x>2x>2x= uP;^A;^A;^A;^e| .ӎAxAxAxFAxS+!pS+!pS+ AZ 77jS+G2ojHM ɼ#7|$AmjiS+OZe| z| z| z|:M ɼ#7|cVo ћZ\ƧM2>mj=>nj=>nj߯1_kmCcO൶!_k[\|s^k^mx}^{[^-_zmx}چ~g6>晵 yfmcYg6o>晵͛X/⯵ Z߯᯵ Z/چض/m{2>ض/m{2 ~lC= ~lC= ~lCo =?7`X? o`X? o7lm,}e|mc.kKp_X7\6^~m߾_?Eo-{|ÿoG ~lPG ~lPp_[_]_]lPߐ7̾ yPߐ̾ yPߐ52bu]o[Iu] yNߐ yNߐ yNmr|}r|}r|}.kNp_[yܷ`[EEoJ_EoJ_E Eo-B}e|mz|uz|ujpEWo_]ܾ}ujpEo-B}e|m.kPp_[_]_]lPU ^ElPM lPMom|e|m.k[5p_۪_]_]lP㫋PۧRwO: nJu>bu|lNmz|uz|uz|u.kNp_ulN5 ^lN/֍o瘾 ԍon,}ÿ77{cź =46!_7kVX|KߐY}c5o, om,}e|mc.kKKKKmc5o,}C>o 7!`XlKIpP[JP]JP]J}B_W /71-\yL /lҵ>ǴtscZJzLKIi)˸t/<|F\JzlǴt?d_̴t3R5LKI<3-%]̴,r3Ӳȗq)Z~LKIcZJ֟Rҵh'kyfZJi)gk=R5ΌKI_   `=`=`=`=`=` z| z| z|?~=AjQjQjQjQsAi(OEe|Z- .jQpVjQjQjA-wu8_j(_j(_j(_j(OEAEAEZ]ЫEAZ}ЫEAмZͫEZ\ƧբZ«EA-Z«EZa٫EAZ٫EAZ٫EZZZZ\Ƨբ2><բZݘZ2բ /3^- 2բ /3^-:բ2>i(OEAEAEoWn>ܾ}\-:}Ztpq(OEe|Z- .jQpVWWj(ؗ;j(wcj(wcj(wcj(wc\Ƨ%2>-i%q(qVWGƽGƽGƽGƽwKj(q(q(q(O{Ge|;:`_9`_9ZtPEAiZEZtPEq(WW|jQY^- 5˫EAfy(,jQpVZ\ƧբբբբZ- 5˫EA>z(@բ tP;@A( GP(?bQ_%\%\;@\;@y.}ZyͿf9=ʵsϵsϵZ{W|^kʯyf}5Ϭ=4؞M}}h=`{6 l2^}}h:2>4 .CE z|X~\ka(W?ey5`{kţG!ţAG}hP- ţA^Bx4+yiQ/ .Gx4ȫy ;Z< ţA޵x4y Z<)^Gx4ih ǧţe|}hp__ţ`_l316g}c2- ^ţA˴xG>CРAC;Ppa}(}>ܾCX\ׇe|}}hp__>4a}(ׇG `_l/>4_}}h .(.22 z|X>Cև}}(}dX nևGa}"և`_>4a}h2>4`[ sK@=4} h-`[%4ȗ%- eIK@|Y _4ȗ%- eIK@4/ .K@РLJ%AK@} h/KZÜ0%A>i hs{>bgi=k|L%gK<=k}2i]k?R<^cˬ2k<]k1\<3\̴s3.5L<<3X\̴ce֟f.ϵ0vygǴs?>]kIg]kvyyfig|ϸs3JNzJNzAAAAA]JNJNJN͖:Aϖ:Aϖ:fe`8e`8e`8e`7Z 7Z .2Opy\Ƨe~o7}j"opP>pP>pP>pP>e| z| z|9M M "M 9M2>mmiAnԖFoԖF/ԖF/ԖF=oyAJNZJJJJJJNpVr\Ƨ2>=>=>JN/EJNJNJNJNi#Oe|.~GJNJA=>n>n>n>n>n<ȭ usP[7A[7A[7A[7e|ں .AmAm̃\9Śj&kj&kbMΖ5XCk|bM9^ !Nj5A>xkX\ƧŚ2>-=>.=>.=>.bM9^ ̋5Ay&w0/Ś\ OŚ`~Nj3iZBǵ?kL3sZ_xl ][EǴrT>EkZ_tyL.<3-\̴r3Ӣ5L <3- |]ǴrK>EkU1-\oiZigEk]yfZt-3.\̼/uP*uP*uP*uP*uP*~OZ~SKП ]T ]AnAmAmAmBLLp`\Ƨ-2>m<----sP{~} Am`P[0~?-2>m=>n=>nLwIoyLIoyLwJoLK!/yK!/ԢKg{/yK{/yKg|/ԢKKKKp] wY}}};};j$_K'A^1trpTtrpTtrpTtrpTtrpTt\Ƨ2>-i$OK'AK'AK't]K'5qPK'5qPK'5qPK'5\Ƨ2>7i!O AK'AK'ttrpTtrpTtrpTtrpTt N{䠖NNNNt\ƧZ: 퐃Z: trPK'I/I/<ȥ`_: t仉N|7I&^: K'Ax䠖Nt\Ƨ2>-=>.=>.=>.I&^: K'A>;y$g'/ȃ\ H=u|5d5x~k}=5-Nk\k}]Z[׸5^kkY_x3yf=~ k}]vk֮uڵZ{W{3yf}]5Ϭku<{u83k 1(5cPk Ǡ`_lA1=>k z|X1a]#}d\~Ͼ1 Ȱq"5}]c]ǃ}]c]ǃ}]c]kFk ׌`_\5ue|}]cp__׸uA5=>k z|>خەqݸ}cݸ}cݸ}cݸ}cp__1a#=ۍ;=y/Ԣ oZPj#==>,z EA^1k=}c-z ^EA 1Ȼ=y;ТG/z r|Z1icp__1v9}k`r`tqK(j"Km|XnEp.}b-]2t1/] .K.=>,] /mZl[t1ض}bm `\2?AK.}"*W6xUc"*y^EU z|ثW1abp_߫\*}b-@^`[ .bbп'.߳/] XlK| 0t17 -] CK| 0tK2t1/] z|Xt1a"ؗ.A>ibO@Z.}".{bb?;}}չ]` A2 A2 A2i!O e|ex A A A A ~;7`.C_dj!/كe..C.C.A2Ev"{pq.A2y.A2=>2yK.C$2yS.A2y.CG2y.C2y.A29>29>29>2i!q]z{+Tou~O6P?ӸA5 oWne|je|jA?T2Gj{.Sj.Sj.Sj.Sjz|lAA5ng? 6x6bTz|lA?TP?{A5`lSo[Ձpu :䓁W8dՁ :䓁W|2Aiu Oe|Zz|\z|\z|\8Ձ :䋋W|q@/.^xA=yGkkEZZkk_VZgkkYyfC\~rɵ^_ko'zZgkkYyfֿ-383A~y{loۛG1؞."z|LJ ?ߏg2>0 .E 'dt=^.}`\n`{݂e|}`p_-\w .b`nLJ݂A߯_S/66.Wl`؃J{]Ibzءe}b}bp_ߡ\w(=>P z|ءvn>Pov(۷;C1ȫv(}⟟jbwWP A_C;ib2P JA^ʴC1kv(}b ǧAO;ŠLJM`Jl!hC`m AW!06}C`m %`_wp;i{l"`k({V@Y}eA^y=PVJY=سLJ>!z|zV\׳zgՃ=Y}`ꃭE_DVl-:؛N4Ah9s{sS}WL5A^1yTsS=؛2>7A}Cs܃> oj ?A~__Xc Z)_A1kY1k5LgPe u1k2~LAZ? _ïyf <3k5LAg|̽9 do*~qP9 7A|P9O9O9O9O9OA`.9}؛do;A`To;i_7zs_7777?؛{scoz|W`T;L_d+AU`Te +AUƠظqqqqq=>6}pqܾ}lo}P;۬A5xwE oA*yq2>2oJXWY2yse<Е1C+cBW A]2=>VƠXT {C;V Za7 {j=CbC`H z <;$ywH 9!1sCA2>2>2>ĠǐCA AA^9$y rH ɐl?zH|]sZB_k!Z_zH|35Ϭck͵_kk=$֮4zH|]hZμgCkYyf=$-3c ]DHl%,S`+a[ 8JXA!1C28e|=$.!"B` ۇx!q];=$kgvl`!qڃ=$.!qp_zH8!$z|BLJxy2 NWvlË~g z|*LJqp_ze\+2^=>TA1}P۷1}P۷1+ ySTH]Q!qEA=$8 B @ )$!qSH9>e|=$.!12>`]-$^DHl!q>`<*$P8CB  `G[>%l`/aQP 6ȳJ Ϛ*ae2>e2>eɂ3Yc& z|dA2دFЂjtP -دF~| -oU1fyP#=>6džvP -O -O -O -O -=>6۷ cC;}n>6jhA^^ԁ-+[P l؂U9yr` fʁ-ەA ؂[p[pA` A`lybtC  -Zn7jhЂ O*<{x:A`OA<9<y@sx )A2>2>2>A ϷOAo<:<yux |l?0Z;^vz}k=%AOjp_OVz d5ؒUm|ENj]-v]Dl+cWǮADyUATkQŮA.g{l=+{`Y޳[Ϻ5z vY<کg h5ȣz vY<کg{\{2޳5g z|YzVAճy2VX=k'cAճ{g Þzֵ~}L=Z?^=1kZzֵ~)xL=gu3Sg晩g}{ֵ~}L=Z?>udSϺ֏g]kz5L=gu3SϺ-߳Y`*h{R" VJ ^d*h{9A2>2>2>2>2>=hܾ} Z2hMV*hM.or"|PA+/d z| ZAA+1h=>6`T A5`A6`ZT5AUfجA52>52>52>52>5fجn>6۷c:}جYyKvzf{V7W ZA^:hyu:A++V2 eZA^f*h9> *h{:Am|_>6oudD vP"Z_G hA~т<:=up';Au`dɂT' N ;Yw dA'ɂ`oaz \[26haoZTlւ= ZVze7C`{(\C2P6eB٠LJP6! z|ha'[` Ev} 1خ^#=>A-lC 6e| .-lp_oaZؠLJܾ}haۇܾ}haۇ-l'w-lWWA^^y}U V-lp_oay#S L-l2AyU 6 r|jaZ2-l`oa{ l-,[`kaq_}ݳq_}ݳ! ]Dlq.\ǹAyV"`Kl[b 6['." 6S OJl<* 6S OJl6'e|= z|Hl۠LJm|%AyWb!_m|%`OlSbOۏ)]gk۵~OyLZ}n>&JlA^ƻ[Z oAl߂ۺyu .S;u- [W6 /moA^u9> ߂߂{@:[od ߂[gA[f oqP-ȳ['Jl*yTwb vP-S[T' ONlAR؂<:Tb .Sb .Sb .Sb z|LlA-1Tb !߉-C[|' Nl؂3ڵ~h~LZ?2_ǔѮche1ek25Lgeh!1ek2ڵ~@~LZ??vyfh<3ek25VT+ ZqP,kA`{d+ sP"=>V=Vc+;}~f9,CA `>4/UbzP51{A`9U=&&&)j)jm>ĠǚXkb{iۧxpT=5>?AÃJ6~ pP0*[A2>à` *[A`K8`o oA /?~-51{kA A^]&&&TM 51+kb} oUkbsM r|e|e|U=TM vP51sA`&{M z|A51&=>کUĠǚX&ypM  51kb7ă?@ϵ*^!_kGEZkʵ^!_kkB֮(z|35ϬWc^!_kGk?g/ksw3|Y;L^[~/kGke ymT(PFr5`/ vr%`oD LJB9Pz|(^(B9e|P.rp_/Q([ n>۷ [g Z3.J[oz\32iPvv]=Pv`ϴ-{lqp_ϴz\32 8o!z|ȴ2LJL;>24 w5܋h-`{t Fp N7 w{l/8p N7A wC ;^p`o'`{j ;'5܋h{ Gp@;7e|{ _ p[ ;"A5A{{p9>5AO wp_oz ;ȗ#5A wEȋhz|hLJ;.O@z|ȴd`ϴ<+L;C2 ʴQb%:kzYGe,AZ<{_RS֯(^Tpc*<3/c֏֏~cp֏1Uk~t{LZ?}A`oZUTiTQ IA`OմjcQ z|,AEjpjpjpjpjp,n>Ճ۷EcQ=I0OE5OE5OE5OE5OE5OMA`oZUTiTQ uPE5؛A`Oe|*e|*e|se|se|se|۷E5=>ՠǢ~Zk&< mڃ7M` P Cm?T z| AAj BmT 7 Cmc=P䫀CmT 7 sP6' +n7}W܃A>D#+np*np*AU J\q=CT yp~+n0Uq+ns r|e|e|Uq|r -7,W`ϐ{ z|A7=>V2>U2/R\T჊AR<:yJuz|Aqp'c OƎA<;y2v>8\Ƨ8\Ƨ8\Ƨ8pc>8q8ȋp /k?>c迯Z￯_kכk.7z}]mZ\vyˏz=_mvkPZ;\mv$k@xqZtAxC7nAwCP=/l4}=Fosp8osp8z|(΃LJ<8z|QQP0y= {qlCQDq(*@Q9أ _]<ȗEe|=*.Q9أ `[ <EA>(*GQ9أ ǧ<)*r|ʃzT\׳^GA>)*LQy`ʃ^DTlUtCT=>DAQyp_ʃorgAD=* <ȃ ʃLJQyU`ʃ<**<ï 2>2>2>2>=T vP7A`o=)))))m>ߠoc z|o{p1ܾ}ooc z|A71=>ߠǾo4wn<Ⱦŏ}nAfo4wd 5}n#AB\Ƨ\Ƨ{P}7ȇ`w|dq }7ȇ݃A}79>2>2>A>UXsnV(nc z|A}7O}7ow߿jsA\2|Pe8[p o-y~׶ kmz} Z^_BCkrsZ͵_kךڱkP~Ok}ڑZïksqZï_kGk= xf[ h<تgGV=[h<تgwAxC4=>D`ƃz4\ףE'ۧ>`-Ouw`z\2^wvuw`{l}-`k^w[_\26e| .۾}LJ;z|nponponpz|LJ;z|?i86_`w_?'Ey{Qlo;QDQC(!BEy={Q<ȗA({Q䓋 ]T좢<e|({Q닊 fT:<AШ({QT9>AOEyp_/ʃz <7.Ar(KEy/]*ʃ-z|(ʃLJ<(.EypCQp'FAybTP=B  Ճz=>`ՃԢ=}_S֧eԏS_Wԩ cu1uktZ?sCΆ~L Z??}3cj 15kx~8}L Z?>enհ=T{P ;#lGٰ=T- z|lA ;a=>6je|je|j2SL}p<3Ae`*S{>L\ƧL\ƧL\ƧL\ƧP 3uƒ *S{(2>2>2>2>2L3ucz|Ac>}n>f۷cz|A:1S=>fL*SAe`96>5ٰv?<Ȇ!aT ; 7 0ܰ|ܰ|Up] v1nA>ȸa=>62>52>5jA̸a6v3nAиaTr|nA ;aa"T ;W.7 ߹ܰ|rذvcz|le|je|۷ ;e|?ϟA>X~Ӈ_ .A~]܃<Tq\Ƨ\ƧX܃A .A\܃;ywpq~P=O=O=O==>~P=ȫ{W/YK9kǶZ/cerZb k2/ϬXc5~\bƏ_r+זkƏK˵?R?V3Ze&VKXj+rR?VΑZ!/Ś?X|=s5/rzA_ XlA> XsނA jn2_|^An X?'|fAnr|?!p_\W5USrXA>W!V/Az|=jn2s_-|vB PC!*~gjTLAQ_p_\W+=^?bWTJA^IP_j2Z?Vz|_/֊AP?Z{S*[2kL-ZSo_w⿌-1kfZ~/|L-Z> ;͗_7/cL-Z<tS9_뷜kZ~|L-Z??SO珩_g/s?1Z|j~SN[A[|c?\ƧX{~PE=؋A`/UԃTQ.SQ.S|E=؛A`oUԃiTQyPE=؛fpfpfpfpfpfpcQz|,AE==>Ǣ~pܾ},oǢ~p=>ǢXԃzcQz|,Uԃ堊zTQTQYԃ8*A?-;yw4?hÈyO#AȣC}Gu C}c?PMǡ>țC}7 o:At*)))=>P*yQtPlCm!Կ~k=Կ~k=Կ.zk=C8Zo^1vۼckZ+Cݔ_?_v#kuGڽZ[k{GڡZ#_kk=n z^=B`1>,`LJ?!z|?!L~e`-{l>3`Pz~`-{l43` [(\C2JP:e|=.۾}2LJL?!z|2}p!o2}p!o2}p!z|2LJL?!z|2}g~{l/E4`oi"/N ? &8? e`+L~e`|4QL?g`|P[2 _;ޡL?e`2 ǧL?).~p_fL?W#eA)H~oG=>dA~C\2ۇL?gAyT Gjx?#5A=GX`﯃? o^?e|z|popop.p_z\LJ?z|^yTSWѰ_>/cT1kz^S2k"~}LZ>_a+ؗ_TMkJ~{LZ=_cY1k$~ yLZ<_Aqf?rj^ AVzדדZ A=>=>=>j@pVA`UTP?A`_dzP?A`UT\Ƨ\Ƨ\Ƨ\Ƨ\Ƨ\Ʒ}Xcz|A?ܾ}opܾ}A?=>XA`}9/U<C`*lUQyw? ~OnA ?O ?O ~nAXf䫅~n~sr|ne|je||p ?7 |r?~c.S.۾}le|jA s+~yw9 ?s~x7je|je|je|jA ?ܾ}lodžpܾ}le|je|je|je|jA ?=>6jA"E ~0_Z!ZSǔL2S2fk2~S}LZ>LR[֗1_wǔ鿌Mfk2~g{LZ=L׮cq1ek2~xLZg6džP ?O A*{l?b{`ϝ2{<b{΃;*{ .S .S .S .S .S .|k%IN3?1n7CeKp#$E~},ۃe{l:>ADz=X~},n>_ۯeDz=Xct|,ۃe{l?=$U`?TgYA_ eAA\yEp ;|l`/l=XTTTe{wA^\y{peAA=\΃*ۃDl= 8.ۃl?=Xc\S \~},ۃK|*ۃeɲ=s 1.ۃfl=xA~ǻl;e{.ۃw~Pe{pOe{pOe{pOe{l:>_ۯeDzc~pl.l.l.l.l:>ADz=XT+e{H_ekl%lZ~Z/ۏlUR;Z/_k+Z/ۏl(_kklٮb{|kl},_ekqwƵ^^13LMe sS?>lo`oe].lo`o%؛ALJ&~t|h⃽\MEާ>=֧{>h^D>`[}`k4Olf郭\_o4z9%h.m>郎}C>Ч:>郎}zpOn>ׇ>=Ч_ALJ>}Ot|ALJ>}O>}]BOl`%$v >}]B.OЧ{>3A T{i9ȷ*AU{e>*`yPe>P.|pW^v|U惼2A õ2d|*AƧ|pWK| |U惼'2%F o1yQe+ALJ|2\UCe>+ALJ<=Qn|wU<;2IxW+>ȯqW+>ȯqފ.V|pK|t|hZ`oQ+>njZA>fԊ1V|ފ.V|pK|t|hZALJV<[AՊ-`e~Wzce~Se~Se~wVze1{1k1[1KZI}zLcykϩj_{jzj_zjycK֬S~d?ZDzct|჎=A%>j`j=Y{yPU{U{yPU{U{\S}\@h:,:,u>:U^`?X~},n>A:X^cyt|,A`?XTy*` O~ d}P%`VWA\]ATAUA>]A+**胪|SU :w+ _֮|[> :\A+ s\S\S xPtlWA]AvtW WUAUA :XAT_+*cU-AA^2yTqQ-A|A~6e-snlv|P-spO-spO-spO-se:>2p-sA:2$q|P-spO-spO-spO-se:>AǖZ _tnY2ٴ~]PA*ZCk;:>m`;{k;Em|ƪbv9|ϩ bv^!bvOaK|\l|1Q _*fnT1;ȗ`/fAƧbv\_Ƃ+R _*fT1;AU{1;P:t|(fz9ķP.bvWl|@ O_w߼ ߼_G߼]l/yU#{7xȃWәS:kw͙gԳ>3b~؅_?#vψ]53b~؅?3bψ]c¯c¯c¯5f.<܅ sdv?Z`E'q?dOjуI|P-z?E:>AZZY{Q~PEy h `/(K_^:{ꥃ>^:{`/VA~Q8^:/ KEz`({c/t|쥃t:>KEz?^:/ KEAߓAA>}]=tAU^=TsgW%>U%>UU=@ss4WA>\=Ps|Psz2>WA9ħ9ħ m|z9G z:>VACsz. .m>V%>U%K||(z9w'|b^z`fK^:xA~ǻ;޽AA>KKK{c/}Pt A>K r/3Ƚto$KKKK{c/t|^:~սt { {j|JZiZ JZziZ Z/_kC1Z{\_ki}ӯ^L֞կg1ӯG^L֞z1Z{^k9xӯkά9^z̖~(_k~PZ~(_??ז~(-PZ_kz֯G^Z֞13փLM sSi=TZ2;փL F;L4ڃ`{i{=`oALJF;ALJz]lvQz^Zv".{<`o[<`o] vvy]lw`o]`]t|hALJvy]t|h]lw`o] vvy.]vyh]U _qjS`k]X jz<`oR<ȷA.=5[忶|u u|uuC _j]PC:к:u\C:[%޺^D:njZA>fԺ-󿲇TPJv(U^J6+VEU:[U j]A `A^T k̃1<3k` At b0?`T1/*P`b0OU s1`oU {1xP`FA>] T T T1 `o A2\ ~%] tUe./ctA~ %]_. :>tAǒ.~ . .m>t%>t%>t%_뽃wcރ?^{+[Ãj |u5 Z`o 5 :> _ / __;,xfA~ǻ0 ;ޅY.̂K`avPY.̂o\Y0 `+~?Lfm1f̯쵖^Cu_̵^|/zZ{/P|֞.zZ{\kr_rZ/^kלY/G^sfy͙r5gˑל;C 3C 3C 3C 3C 3C 3C wmP][~g(-3pz Z{T 25pM% S 7T 2=p^ gb_.gLli=`قgl=[pGlK|gt|bl`ktg=l`{c/ g=c/V{3؞^ gb=x/U\D/3s24W/`eF _5ez/3{`eѩ^fee^zA~zA~z`^ALJez\C2׫%^ .e}ok}vjtwA~6gl~_?$[V{4Z`o[t-`k[-ҠC4" :>S=0`'`yV=0cV{=0`?Sk=T\gk1t~?o ?3Wc:_tֿ93_93_93_93_9w35f35f35f35f35f35f3cgǖ-3[~gA&3dz>ԙ?uÃ%?OO%A<u8A΃p~P`?<~8?yp:z I}p.~> wpt.}P | |p) wp) wp) A];x.\~}pt.\Ӆ;x:>^n>䷦A~k䷦A~k~>oMp߃:~8`?Aypt|?x6dm<;oA^j}Aƃ6~<}8߯`ZZ?B_k^RwX NI}:_k_߯c8>kZ?>ky_k9~|͙Skά:_sf3[~g8pR3I5fk 'טNזNזNזNזNI-3I}>tRdr:2;N~Rl`?Hq0l`?q8ƃ`v0l`?q8ƃ`??Ƀ5yd&5y]<خ~M䷬ɃK|<ׯ~M䧆Ƀ5y&A`&߂&߂&߂&߂&߂&=oܡ߯_K|\ć ``{`{V]yl{V]*ZcY+3Z*

|AAtP`:A;uA}P`?|;~>w:|~~o?ȯbw}K|_%>/uKA]%>ħK|pOK|_? ?V|l?c{=y}l߼>o^t|<Ac{pOc{pO%>ۃA~1~l?c{=؏~l?c?3}ߙLw:~g~g`?A3}Lt|<tKA]l%>˰/AކuDckYZ~ւ꯵خk-kZ;_ט'꯵/kZ}A_kAv꯵C~P^sf3לY?t5gpP3A5fk ט13ԯ-3ԯ-3ԯ-3ԯ-3ԯki3A}>tPdr:2;~Plϣ`?Q/~vP`;A}ԃ>~PlvP`;A}ԃ>~PlA~?o<?Oc/~~gԝ>Nwv;Ý~p>ҝ~U_uHGA~ GA^GA^GA^GA^GA^G`?:>G%~\~}8.#_?.#~}_zpA~?OX]ۅ?/ALJ _ۡ9\gn`l`q"ALJn ػALJn`t|z70{o}C@0ZA* "#}DpO}DpO}AA~UɭB_OnUSBnp䧀[ ?*]7*[c\S\~}lK|jK|jK|jBU8*Z A~ߺU֭B *{ [cpPB~-Թsqp\\**+~PeE_:>ADzʊcYt|,+eEpOeEpOeDz_#Wk _9|u1d\oZ1_1_1{1k1לڊU3Z|mŵnyLmŵ~yLmŵ~a~LmŵJzLmŵFtatatata̖ۊǩmc?w6 w`p6 -,$%>$Aǒ$XTIt|,I%I$ .$ .$9X~},In-!{tZ^kϮkx=N;{Z$^kkx͙N:KZ$^kc$^kkxׯNN;+Z$^koלYMm5goӯ9~~͙N5g-? gApmnµk??t zZ{Z- 25u M S0- 2=u - GRw Et - n!ػ-{0غ`[`[`[lBw [nau - 'T/xar*)as*x7ALJa]΃xl(m!؋A~xlC_/-BC$S0ȏ2^< C 0T< C 0T< CC~ALJax\W%ׇapK|x\àC0P<_}>헀>cp>cp>c3[}ƠC1g{1.g r}`({1.ʃK|c>ct|3%g .>#g__ۯ _gA|~5#OMS5sLcfgcfGcfcf93U3לߢut2kfxL̵^3jZG>jZE^sf:_sf:_sf:_sf:_sff9w1/?c?#/-X<3bψLT3\kL-؟;Ls砚`T3 L{3qPD,T3 LA5L{3qPD7L{3qPD7L{3qPD7A+<wA~ȝF_A4ѝF}]/Ux `?#/A~b#ӅG:>%>\.A#ħ3wpo...:>Acq' ېτmHp{ .O!A>܆ԆېcrPmHߣ {A!~>6$Ն=:ħ{tpO%>ݣmH 96$؆ېc\S\Srp 9؆~}lCna '@,Yd n%ȻZ<ȻɃ<;}Q&=??>{8{ܮ^0{UZu^sfy͙^zړZu^kk k;sZ|^kky=~͙>5gלY^sfOx͙5f^Zu^gu-\[9׹sP2ދ^ `{G{2Q^ wůlb!0>łC`} @wl_?_>__5 o`7SwM)ػ]ӠC]`뚂kl]SwMk iuM5 i"jl*ɦ"jm*٦"*؋A~t|JA~l5KTfjT`VK5OfT-ՠCK5[OKUX^a R Ta OVWXS5>UXD kQ +KALJ kt|z71ķPa . kpWXK|t|*C~})C3~}hz36כ%ތ .flt|hƂN`&X j V3_&z71׻ALJfl flt|h%ތ .f,Ќ_C3~}h.=Em O@yRm6jATA 'Ӝ۵~xL۵~֏pOpp֯7pT]sf*>ZvUU{WuP]UwUU{WuP]UwUU{Wt|dW䷎ vU*/wUUAQA~ >`SduTtt|uTpOu^rWǗ ?U꠺`o  ࠺`o  AvUA~1: UcW\S[\~}쪂K|ꪂK|ꪂK|ꪂ]U:U~}n>vU_ۯ]UpO]UpO]UpO]UpO]U :>vUUsWk]UewUAUZvWuPmApOmApOmApOmA :>vUUcWt|쪂K|ꪂK|n>vU_ۯ]Ǯۯaɖ+[ Gn< x+[jok,}~C6VZXmj6V.Z0b+GXƜYƜY l\,b+XʵZb+7XE!V+rO l\J͘3ؘ3ؘ3ؘ33C7ϑkusZ}cyVGX>G| Xo,_s/绡˿/Ko,{Ntb-|7XKot|tzNXJktzNXJktzNXJot|tFooook#nK!|_//_0-^W}c~_H uՋF~&F~(F~*F~,zUX k]0xUX k]0KaUz]Uz]K|0%o\u7.պj]Uj]uՋۯU/n^Wz]u7.պj]K|%ZW}u7:^WXou7򉉺BG]|FQWX o\⫅7.jaUz]b_j]K|zq__^~} O| wY/.yBIFޤe}#oR貾7)tY/._ﲾԟ_.c*ɵ~2yLE׵~0yLE׵~mzLE׵~kzLE5g3S1]1]1]c,Su_#Su"Su_"Su!1]לkLE5g3Su*C栊`9+?d `>+ħ+2 e2d <,yP-XqT _Ղ[j9,[j;,[j :>`AX,WdA~" AUdA~" Y_>ȂqEσȂ" Y" :>VdANJ,?y? Y__ςr ܟT Ag`T Ag`<,ct|ςY? ..m>g%>g%>g%>gA,ԟ~}n>g_ۯ,ħ,ħ,ħ,ħ,؟ς|? ,ȇ ς|?;#ħ#ħ#ħ#؟ςY? :>g%>g%>g_ۯc }rZ.kA\yrZ.kU˵ʵ[|?;O^kPsr3kJsr͵ʽ\z+3kάr9C+Z;\krrʵ ݵؽzcZ~\kq7vknt7v9ؽzc3kά7v1k_*]u` :o}W{7ؾW}yK|\uwU7xw} {7 ] ot|leUw} ou} /ػ :>t}]ߠC] ? sH] ?{wM~KהA~OVV]D7غALJot|AƧ/ػA~wA~䷛A~ o- %ػֶ{7ږ[2 :>t}]ߠC7ۖ%ׇopw}K|\]ߠC7_Wۯ]_p n>t}ׇopw}K|\]_ALJ/ػA>k YoWu} U@]_-K|m\m_o[ALJ/ػALJot|z7׻C~}ۯ]_p뻈oy^IT"pw0LE `*t|(AwASxSxc~1Q1לkL5g~cccsc?~1111ZU=~3Sx͙_/^sf9`;1ؿ{ #;` WAu%>u%>u%{A ύj-sZ`82Z[˃j-v;2ؿ Z`o- 2Z[ˠ?_J* \iJ3o[_hn-FskW[ Z{ [ˠckyPe :>A2֒_<4 Cѕf4Xt+J4T:J3K4T:J3KYi{t|4f :>V%>J%J3ħJ3ħJ3ħJ3Xi+̓ۯKJcypVAJ3ħJ3ħJcypVAJ3W VnJx9ξz;31յξWz;Z;[]1z;Z;]kxtѵξG۵ξz;3kά9ξz;{Ǐs"=` v}{u;ؾgl߳K|\_nzu;_n_kk-6ؾ.l_k`oZ7ƒC#`B Fx}!{#<ؾl_Fxt|hA7QPu ? U_ۼoMKA~QK7A~UKt|`ALJ.yKd|.ya.yq.ypy%6/ػ{<ڼ`[w]`kALJ.yKt|z7ķ%..ypwɃK|Kt|C~}_Kn>tׇ.9%..ypwɃK|K\]C<%{<绺A>U{;3J`ggyP;P:>^:>C;K%^_JC~}(}ۯE wpOww0L `*}ySMC;P:>n!|,Ńf.5Ku Yy \y |돋T4_T4_^4?Z/S|͏hT4_sf*93c|ߵS|_S|ߴS1͏hoKh/KhJh֯JM1לkLE5g3S|͙hsf.d UBA}}P%t_T %>%>%>%>%>%:ؿX/փ`b=:ؿXu:>A::?ۯķ>K|ۯżķiǷ8 Ӏ ?= x9\j7w| x7  wAAn 2>n WwAS@oK|jnj7A=@7A :>n . .m>K| .i7t| w_w_ߗv_w_w_w%>K| .i7t| wA5 iA寢r6㑧A< :>NӀAi@q\4 ħiiiii{t| Zj7һ  yn wA 8<:>7F *xLk}TF1 93 93 93 >Q~zLkF?\c\ǥ4OKi8ppZ?+]G444444טGrT_5*/A Fe\Ө ħQApOF%> K|<K|?>6>irZ97`%?ȕC%!ؿjt|\9WAǕAV }P+`>CB!?+!r:>+V%>K|Z9r.OA>9BA>,!x|xs _@#T! ლ#{!|Ps`/j#{!t|#A9Bq\S!\~}#4G.i\!8G:>n>^_a.ZTATt|lAAF\T%AUO5C 9.??X c\S\Sp?X~},n>&>~g.3Ta9AysaAuA?ߟ-!A'[Bln ?? ook)1 ֧Zj+ _֎=0 xziksO^kgk}Z;\Ӏډ5g֧9> x͙ikάO^sf}3[~g̞k2l"f{?g{?g{?g{?g{pl`pl`pl"f쳁\ ~-=O2?x S`* `* ISISISALJ TaaSSSSS?0Ut|* :>L SALJBOTapOTapOv clMrЎ!w |i0؊`1 .àÎaa;ALJàÎaa0cC;A>:cSM;A>ִcsM;%$a5clMr[;$_Ďa5Ƀ;ALJàÎaa0׫%ׇ_1 ._1 :>v )\}S06)l` MAo ۦ"6` nMAo yЦ [O[A^y)GALJMæ 7ALJMæ`po  )n>l ۯæ"6t|~?>8\@}p샃A48yMA48yMA48:}ap008t|\` ap %ApK`  O@=_^#<5µެ?5µެ_kǴFip333c\#\gǴ82Z?<~yLk 91-c5g5g5g5g5g5gθ8̸8̼Fxk`)!_ FApPk`)F.i\!ħ5BpOkrZ#C:5BkZ`~]t|hAm'쯓Am'urPۉv":>n'j;䷷t9D?]j;ON%:>n'ۉv":>n'j;\v"ħDpOۉA\,-WVA>8E=.}6pPVAEquqP":>.ÊV"וAI=UaER+jXA +>aEW5jA+:>+Ê"8.iX\~}V4.iX\Ӱ"88A- }qP `_@"ă\@"[U3!GU3y#9GAǑCq\!ħǑǑǑǑÃ9y{F5r8CpipP# `9y!;GAx͙kά aq_\kZ;8vXʵ>xUqkr#^kלYG85g9>x͙-3#^cfG0x=Oe 'Ss.bU1؞&lO`_U I*$Wi2WK|}U1WK|}U1WK|}Uq U`{b}U1Rl%\SR{ =`{Cc!C"!{ =Ơca1=`{Cc=}1؞`lO`c 'ؠca1t|c :>1\{%\{%t|c\c5kO{A=`c }1{ALJ=F1{ALJ= #|i1|k1ȇ| k{ֈc5lx1[#~{6(t|c :>1{%\~}c .=_c .=Ơ#Wl`_U UE*۪"V8K|qVGjoŃAGqqP` #5A?!yP`HAGqt|AA?K|4.i\#8:>nC?mH]oC|zrPې`76$  .it|܆! :>nCې6$<GOV?|zS # #7^?G#8:>?K|_%>?K|4:>?j Zxx `qyj O@qy#GA<:>8jt|qGAGpO#F_G_G_G_Gr#q.Z-<j N^xyv#ȳA8Bqt|qGA-8n!pqP# qy#G5.`ǵ>x kmZq\#ڈZqSG9>x͙kά8}õ>x]vy0x1kq<^k7k}Z`\5g9>x͙kά^sf ט13 <^cfx0x^z-sõɵɵvh8!`{F_e}2ށ>Dl` w`Q;ppQepQepQ"(C!`{Fe=L}2K|}2ׇ(K|}N2$n31a `{ELXc8',1ALJ ˠÄea `{Fe=}2؞>alALJ ˠÄea20at| :>LX}2',K|}2',K|}20at| -|k2ȷ-|k6}_߷ .}ˠþeaALJ}ˠþea2o{]Aصo]AڵoC`߷ ɯ} ڷ ٯ} ڷ Ơþea2ot|ط .}þep[ep[V*Rl+`_ ElMy$A^y5G[A^y5G[ALJIoM&[ALJ_ߚ .Ipak~}ؚ_&ׇElMy$$ط&dep!JQy<e# Qy<e# Qy<%7C`ߚ ɠؚ'&IoMydg mMy$ط&K|ߟ_xLCk}TqQZDyLCkLCkLCkLC}~[K|4ny`xԸ%5n wA[{P`4n .i\$ħIpOC| K%_| A-_˗%|9KJj˗`%| G}qt|\/AKqt|\%ħKpO˗/%>-_˗堖/A| K'/_jӍZ| .it|\/| :>._˗%=n qK[}t|-AqKq\Ӹ%ķ8n .i\Ӹ%ħqKq.s &y7\$Ȼ"AM< :>Ej.t|"AǹHpOs"_"_"_"_"r.s"AyVA MK$[&Aj<4 VIMxhrP䠆&A=94 ɡɡɡA M xhUC $ħI9`hrM^kk}hZMֆ&64և&ɵ>4y͙kάM^sf}hr C[Z^vx ahZ; \CYZ֎vև&kά^sf}3לY̖&13 M^cf04yah35fɵwɵwɵwɵwɵc KE TS7* `{@e=u@epT@epT@epT.b2؞>PlO` KAT#9*<*K|}2 K|}2 nZW۟`{ze]D}2^t|X :>W}2؞^SWv)`ze] +ALJʠzea2^ __ .__ .ʠzea2ȳ*<Pl ` ET@ea20P ʠ@ea20P\4P@A4P@A 4P O X  ` O@ea20Pt| .@epT@epT*h2Ȼ*h2Ȼ*h20P ʠ@ea20P\%>P n> Tۯ@%0P &@ea2~!ALJi_ .iKO[yѴeM[yѴeM[yѴ%'AU^_Wz%+ h2ȫ+ h2ȫ+^\?\4Pir-c\4P*ir͙ir͙ir͙i1Tc\/4pc\Á4Pir@Z? <ʵ~4444̖*ט*ט*ט*ט*ט*ט*-3T[~g<8Pylq*cAT{P`@%؟5P A T\@%ħJpO*%> TK|<ȁJ?ujO݃S*=J?uK|4P .i\#ħG=&^ WJz%_^ :>W땠z%^9J?uj엂Z+rqPÍ`1p#o AFstn4.it|nA>=AFOr7j˃nࠆ%> 7Íp㠆AFqt|n4.Kۯ|oy,rPc bX$7"5 HwE|{,c X$8 :>Ec"%DZHpOc"%>EK|܃y$3 A) L=Hg Ajt|܃ A=HpO{ _ _ _ _ r{ڃ A=Hq\$ħ=AArqprqP |yO#{'A<8Gpo||}_yd`["}#X\KڟZ_?K_\KڒZ_֖$5g֗$9$y͙%ɱaeZ;,\+YZo_kGMV&µ2ykpL^kk}eZ$z3kά9̖V&13L^cfX̰2yae35fɵwɵwɵwɵwɵ2yY^cf?~|U0?S4v6e6.b2zl`_ }^ۃ>W/A?W/K|}2W/K|}2W/K|}r`{e=}2zl%z\뫗%>\S%>\~}X Dp`*X E^i'W/쫗ALJˠea쫗`;eK.bd2'F&{bd2'F&߳LQ(G&#ALJ _ = _{?V^DLdad202 ɠdad202\#%>2 n>Lۯ|oiA>5OS_A>5O y ůy կy Syʠ OyJpbA^"4O%BA^"4O%B`\-bK|Z .i\"&ħMƒ\ 1~C8ELj7Z !ħELpOf$%>HK|_G/?-G/?-G/~x`?%F5z :>^F/~9K`j'9] OJZNW˟Vӕ`?t%Fr )AƽN =uJ/rSjA`W> ħuJqt|\:%N :>S)%>Sn>Sn>SnAMW+A<] AMW|z9ӕ yt%ȣ+5] :>NWӕt%ħJpoq\t%ħJpOӕt%>A>Eo/|{{qPۋ _^r"ȗA\Am/ۋ".i{\⠶A>)EOio/|J{{Sۋ۟ۋ^⠶A>)EOio/|J{{Sۋ?6^{{UxLۋkOg1m/c^\ۋǴ̴̴̴r{qZo?ŵ~L־֟Zh?ŵ~Lۋk5[1m/5gN3S'~͙ԉ_sfۋǩۋǩۋǩۋǩۋǩۋǩۋǖ-3n/[~g^<θxLۋk{y{dj?g s/ L~L ?1HAj rPcAA}pPc`?j$؏5 AA\$ħ1HpOc %>AK|$؏5 AAqPc`?j$؏%>AK|4 .i^\Ӽ"ķ89i99i9 ̃ ~:1Hq t| 5 AA&tPc` {.r rPc$/X5 փ<ȽA=x{j܃N:#ħIGpoq\Ӥ#ħIGpO#8888IGwO:|{ݓt#'A?=IGOO:jt|t'AIGpO&5AIGbO:|{ؓ Ğtt#?-'5AIGbO:|{ؓ ^m#lw` :>Hf$>#lw` ;TH*g$KAl`_\#h1޸|`,{>{ALJG={ALJ_{ .Gpa~}{_ק1 k5BcA-4 1  1  1 {?0t| :>Adpoa 2 K|} 2 cALJ1Hp}w<1y7eo{ Peo{ P}2ǩ(|j2ǩ(|j2ǩ(>@t| :> P(K|}2(>@SZA>5@SZA>5@SvGdD!T*I59з%*̠%r2? O+(A>@Pu϶AݳePl;@F9@iA=_A>`x@=2i1;2i#6>29fG&m|dr̎Lt̎L|fvd3#:~Ix?fW6C6Ï!Io쐤cvHƫ1;$iEgfgfgfgfg>=^) * * * :A]#_|Zsb=' ܃/>>>.>>>#Ӌ2>.㳋ӋӋӋ{P9HP~\vԯ~Rh A]$us~UA -}|z9Hǧ e|v\Ʒ=}z\g e|v\g A}|zR=}zR=}ϺAW&ve ݯL>~B;A '(ANPb JPWL?A '(A]1%+9H= ._A $˩u9s.~A9H9HZ= O vUAJ9HPWi? * A>9 $~SBAѿAA95:iskwjsk>3>3s29iMOEƧ"hovk—ɩ5ZTm|*r6>Fkt\%ό5'k>3N|f̖LNETLNETLNETLNEږLNEږLNEږLNEږLNETtfv*2SSA TdPgg":=; ]$$G&ɑ } dL|d2LI 7  7  7  7  7e||d2L2>>2\G&87ʑ } } } } }\G&dp .㛎e||1o{$;d55 }  }  } ɠOH}|rG;A;A;A;AOL ';2 r+ gAbG&uŎL}|rd2#  d'G&>>92\G&$=}rdܞ>92 nOOL ?EQ @.P/P]eP7t@ .PuC AeP7t@ O.P}|r2 e||2o{ep_ . e||2 A\O.P?iSӈ2 .+2>2 .+2>.۞>=$ G q;$AVh$AVh$AUh$AUh$A!Ia!Ia!Ia!Ia!Ia}|?|ƏW x%_x|}Qh}|zJ}|zJǧ+e|v\g+ӧ+ӧ+ӧ+ӧ+~<}~Rh-Ai%v)~Rh-A*-A*-A*-A*-A*-v[eKǧ-A^eKpeKp]eKp]}|zeKeK}eAl l)˖~O˖~O˖~O˖~O˖~O+~erP/bE]_u닠~}]Я/ EX@z`, "˩Xu9.~`,MG'",1"GcG1<әEly"-Xb ֱ1XSunr`NN,^Ɂŋ:=9/28",_O3^_O3^\~G="I""Esx򢏏A^9ȋ>>>yqA.nO\ܞ>>=}|rq{QA^)u|g!")>EܧS^\Ƈ>E")/GOyQ?}ʋQES^ԏ*rr/^}ʋ>>Oy)/.mOߧS^\Ƈ)/.}||򢏏S.nOߧ\fmp#Í򓏠~򓏠~򓏠~򓏠~aBAY(OS~DO>yO>>>=(OO>>>=#N>p{p{p{p{A?'?(O>:>?#N>,GP?GP?GP?GP?GP?GEOO>>>=#o{Ep"?QGS( ;' ?(#~#~#~#~#vPhA]~ uBA~ u+z.WA*_&W׶6F[W_\Ѽ M c=mm|=pv6FF_\5.q\\w6Fom|=pދz5z'3{gUό5Wk>9k:33k:33k:33끶9끶9끶9끶96F/tfv=0SA] z`Pgg:= 7FJm-\ǧ`p4i A>-2ȧWON |Z0Hm|p .ӂe||Z0O i2>NçA>-6ӂAjA>-6ӂAj`p .ӂe||Z0O mO\4 ]z`>az`>az`>az`>a ' _ ' _ ' _ ' _ ' _ O}|r=0~>`P?ri~ӂAeN w w+;-/WvZ0߮`'A>-iON }|rZ0O iAp{ =}rZܞ>9-nOi~ӂ  `Pg:>;-\ǧ ȧN [ ;-o5vZ0j )z'>>9-IIIo>F,9no}PwSwuw= A}(QjO̓\@-m}̊rlƯǬ(LYQn3ϗX?g%_JYnWcV6~=fgf1-m*uJtHƯQǬDK1+m 3B3B3B3B3[3-m:3-m:3-m:3-m:3-m:3-m:3-ǖLK9}lyδD[3-ǬD fKtP%:syDuz^ D\h/ȵpU46W\h9ȱbsp2>e|jlFUh9U \h9ȵ4bsp2>e|,6Y;9ȵbski BA\K,6Yl.\g9bspiOrO.oB֓Mzr ZO7 *oB֓D ''ZOrO.iO'}|ړ>>ͅcsPyl7csP?Kyl)A49<69\h9il46Yl.\=} Ocsil.ܞ>zl'ͅ:>A栎csp2>ͅ9csP?xlA9c<6Zl46}|>O?@0_">p{ Gؠp{ Gؠp{-7F zg R<6n][5zlz-M[5Բ/ϔz3^6m;5zkzڸ]q*z"ڸ^w6 ;5zhz̸]q3uglyΤ^әIg3zMg&L:5tk:3ms&myΤ-ϙtֶ9Ɲ^әYgԩYgԹYgYgYgYg rgm5H0*YW:H5ȝu:k;2>.(uBa; wA :Hppwe|Yqg\ǝupwe|\R( rgP:H0ȝuBa; e|Yqg\ǝupwe|YmOg wA:H}Y } rg>A Y)Rg rg::::~>D GApfvP?Yԏgat )묃i:~:*A>>鬃>>鬃>>鬃.ܞ>ܞ>ZhuP?Xg rgYgYgYg\ǝupw wAcuP?Xg1YsuAc5ȝu'u'u} ~8>mYaO@dlY>lY>lY>l-ϙf6f6f6f6f6f6fcsi>e|,SLZh:ȝ2u;ki.L\g:2upe2>e|,LZh:ȝ2u;ke wBAe|,SY.L\g:o{4SL]h:_v -SNe )L/;ASh:_v -SZrg-LL]h:ș:Li4S}| ?d9wP?y'95pZb '-A;> e|,pnO]=} OwwP;\g;w~9!wP?x܅>> Al:!A4;F~Z%A}ԗ|A}]h;;H^~}|Z>>߅VjjBin7c}6~>f'sw?}ww/,p1 m>3 m>3 in㗰c6~;f_Yn㗯66666inәinәinәinәinәinәi>ftf:5ANwPg;}PvU oO ;ְiZr.\g ;vp5ڰa a9Zr-ܞ>m؅ְaaY.㳆\g ;vpUăڰa a9Zr-[h ;6vp52>ke|ְamO6샚 2u?Z'BAO0Y -SyZr,LL]h:ș:Li4S}| ?nl ]~ZAV;_~{IwP?jyg-A;> e|,pnO]=} OwwP;\g;w~9!wP?x܅>> As'~ZGAwPIy;^ ~~Z;wǧ;]h6^A;kFOqFom\dڸ~_p.?6.],\Ư{pkeq3bygŲL5z{k2~ڸ_76.轭5zk3bygό5k>92~Mg&eL5k:3)tfRƯ̤-ϙ92޶ߍ9~R rzܞ>A߃Toq\{pe|q\Q AA߃To9~R\{pe|q\{pIn=H_` L } r/0&{zh AA߃)~$~$~$~$~g6> 2n 5AhM}PZSo>ì0kA̚~>Ŭ.>zp{Ozp{Oz5AcM=M}PgM}PgM}PgM}p7e|ԃ5AdM}P?YSO@5 7A4A4AF !ǂ~ȱ>r,9wϿl:#} @'/$F.u A] `PO\>>@/`P;] r~@ 6~?fmm:3r sˁN/:;8 KQ /Ev䥨Ў|PhA) 8 ώ8nq@vnnOq@vpq@pq@pY=Aۅv]hAۅv]hAe|v\ge|v\ge|v\Ʒ=}zpP )/%_ Zr-v[h?AMA&OH'?~0}L\&=`z\ge|v\gA^ˡ_ۡ___Z 01'1G1(ˁO/>>r .Dh/MzOYP?\g?%~o5PxO>>MA&Or8 r8 r8 r8ЎrzP+?+(.A]N?JӃ~>;.u9=\gv[䳂Ÿm|vVPOO6FPw^?'vFg'"迧V B%ym k5rB8m|!q&|.|!Q&h#h6Fm|=qگk5_O\qվ3]&'_O\W6F/m|=q^̸j_qվ3}gUlyzL'zL'zL'zmyzmyzmyzmyz'NyMgf:5ԹĠNή'uvv=(7A|1H_o iu A\7Qn )b"~o )b"~o ).e||1o  2>\Qn ;Io ;Io ;Io ;Io ;2>\7bp@ .e|'gA>sO>A>sO>A>sO!)$gT|0H:gT|0HQ|0迊# "˯t0O# .{e||0[%à~KAg .;]29Q)dԏSv0aP?P%C/}|r0KA\2 .drUdrUPhW® |U0{`P?UϮ U2>*U~ðAaW î ]  *UO }|rU0㓫e| rr~Z99?- g';9N_|ӱ{At!(= .v0# .{e||{AjAGu{ # rj=B}r(?3v+i@/4׆cvK1; hӀ6<m}g`1;7hcvnƛI |fvn3s6,,?ms6 ٹAܠ7cvn@2p2p2p2plyܠMgm:3=7hәAL tfznЦ3scsǖL -ϙ[3=78fml Ss :9?7h [B;VhGA^~ h o y-2>;8GAޅv4]hGAޅv4]hGAe|v4\gGe|v4\gGe|v4\g cF cF cF cF cFp @p @p mO'4EA]M]M]M?EA;r]#EA;r]Ի@zH . wA{]@z\gwe|v\gw.y . ?+;K]@ǧwA}| }3|e~ !)3|pe2>ჺ{n>ge3|eO3|ǧ> \ga;o{4nԀ_ ?=.ğvB|t􋂃zQ5/ [_u􋂠n~Q\ge|vQPhAޅvQ/ ~O ussB;7. T{(h㋂krE5Z8m|Qp|f|QP&弍/ ѫy_\6(F[M_\ό/ ό/ όK52(F+N_\/ zhi㋂k\q3{gƥόK5E5\\әE5\\әE5\\әEAEAEAEAEA_\. ujvQ0sA] . wt什Aک|0H;ui] ԃ.`v)l.`  什A]2> \w.`p .2(wI"wI"wI"wI"wIbp .㻀e||0]2铻A ו  ו  וA什AA 什Aȍ)pwrv?HdO&pe|qo?,8K-~?{,K~??_h~P?+X ӂ5A`~P?/X\Ǖ4ȍ~Pek-[mn[5 7A4A4A4e|ܮmO6Ʒ=}h ^h ^ 4>K~ _ *mJ/_z- ^v0"aPW/En}|r0o-Bo)an|0;"4ʹ.vn0 ܠQ. |Q0uy~~x9fmk6qٵAo8ڠ'z̮ |fvm^>p̮ x8fmJk6^ٵAQm>36hٵA̮ |fvm3 3 ^v̮ xU;fmk6^ӎٵA/im>3m>3m>3m>3m>9k6^ڠMgm:36hәAL -ϙ^[368 e|:jh]h=v ۅڃoZh\g=B{p2> e|ڃOC{ǧ= -B WwB Aڃ\m -ԖXmA҃ -=me|҃YK4Zh'(A=<CB A=?~ԯ -}|ڃ:>ޅz;[G -zuiԥͣwP6A]x=zu]h;i4zY{ .۞>e|~mO߳=7 ?폭pckh/ڃjK-=۲njK/[zP+o҃]yKr-=Ӗ\g-=Zz BkAn҃zK?({A A]=u!~P[z v/-2mү"-1mү-M[52i蕿[5zo~6nhWj~6k>3n|fү̸_qK3ygƍLZ5۸_ [5ڸ_[5̸q^q3ygƍlyΤ_әIK3~Mg&-LZ5k:3ims&-myΤ-ϙ9-mtfujunurֵkR r0=Hf Aڃj.=2>ڃk.=btA}ܵi r7]{v wA}q\]{pwe|ܵq\Ʒ=}ҵ}|ҵir5>]{AZܵkRAmt=9]R{ r:胗t=胗t=2>N׃8]$]9]K׃~t=},]9]gK׃a~Zt=,]9]$]ZԵ.u5=ˡ At,@czP7 Ѓ%Z=tOOO2>nO2>Ѓ8@.={l=HI7=}Ră'E<=}Ră'E<=}R}|Ru϶">{Aݳ}>\bkAn郺YKԍZz[ntu!>㓖>[2>nAn҃*m-QZz[.u (-=-}~W{u:{o]Ǭu:{o\ǬYgoYgL;{'Ǭrw:{vǬbw:{um>3m>3m>3m>3m>3m>3럏igoy:{/Ǭ{:{/Ǭ33333[3m:3m:3m:3m:3m:3m:3ǖL;9~lyδ[3Ǭۦ3ԩygܼyZr-eКwBkA^&LZ.\g;Ϛwp52>yyG-КwwBkAQ yyG .\g;Ϛwp52>ke|ӧ;uКwBkA^nZrz-zPwp'y9Zr-hkyY.\g;]h;__y7~o0޼ yu%ߛw>ڼ{.i~ЛwPW4oA]ҼyuME͛w5n޼ux;7l޼ :}|ڼ>>mA62>e|ӧ;Ϛwp52>kA6Owi.ܞ>mޅӧͻp{ynO6J{*Y;gB~|ԍΛwP7:oAyZF;i.\gͻКww5 7BkA]O%ܛw5.޼{.sפ6N?w'ktqF+W'kpq3}gɻL5ZO8y_ݮ58y_58y_q3}gόS52I(EqF{n'kqF;n'k^q3yg)όS5I3}Mg&L5$k:3ItfۖLwIn[3Ims&ɻ5Z`,y,y7Jp=Hb A׃.Һp=H:zpe|q\Q -A׃h9\Ңp=Hf -8\.p=2>׃8\.۞> ׃>> ׃39\p=H; AA׍Ҧ)MR\ r6=Hqug+mzg+mzpe|ܦqI~/![AbQ s nVu<[Aݫ000ZDrDԵ".>u<u<EA݁,"d9o'y'y'ypge|'ypGe|qDIDIDnODIDnODIDnODFȃZDԵ"򠮵~|s<A]0e9ȅyPW/+̃>>)̃>>)̃0.rs}Ǭ>ws|Ǭ>33333*3*6Ǭ>|sǬ>r|sm>3m>3m>3m>3m>9ܦ3ܦ3ܦ3ܦ3ܦ3ܦ3|lyδ>[3ǖL9|som:35Z&BXh8 ae /A^ -9Z&.L\g82qp-Z&Xh8be o-Y&.L\g82qpe2Li&V\h8[qe oAN2 -LYh8ȝ2q;ge wΠV3qg82qpe2>AB+A.%8_&uG-{nPT/}Z ɶВmPwOA<u_d[h6+'۠. l6x 6'BfA&۠Omǧ6ϢYpi .d\g6ϒmǧ6d[=}l Omi-ܞ>Mӧ&۠lbz ɶВo{nPw$AݑuG[h=7Ӟi Y .㳞[h=7=zn{n܃KAԝKpPw^/55#z rH|ޗpBp4,b!6^Yn똅666!wcxaf!WcxnYkYkYkYk-ϙ666666cs!i>ͲӧYp{4nOf,{PlP7:ϲA<u,[hYfB˲A]I<u%,ԕijlPWϲe>>ͲAf٠Olpe2>˲ee -ԸZhq5ՠ.W>Zyy R=o\ˮUm\eU5m\eQm*{gUό5W2hi*{Ŷ9*Um\eѦxgUό5Wk>3|f\ˮ̸IF5ں۸^5ڸ۸^}όk5ײk>3e|f\ˮ̖L5Tk:3tfRe̤^әI3*۶.۞>*Rmire6JԍҪVmuj;{U WB AnavP_- A}0;of= F }|f}|f}|f}|fq\Ʒ=}fۓ tH$ nOD$H$ nOD$H$ nODe|߾I{Խe^u/nfu0;[A:,aa6av'av'av'avpe|ff֡C:략A]g5oǞq}Pȃ}|yPנϟA'@k:(uzPA.y2ƿ1+m[r濃cV۸\r3r3r3r3r6Ǭ\|lyδ\r1+m-̬\̬\̬\̬\̬(̬(>庍1+mY☕6(qu8f庍?H̬(̬(̬(̬\̖LuLuLuLuLuLuoٝ dw\h8Ȼs Aޝj/\h8FɅuZL䭻brcr Be|,&Y;19ȋu /օXZLb]h19ȋup2>e|,&YL.۞>5&ZLׅBA Ph19ȟbr?ZL>(UуJsSi B+ANAVKsp2>+e|V>>-Af蠮 z:KgBz. 7qA} ԷqЅQ -CZr|, xP3tcǧ: i4CY| .*Iph.* &Iph.$\kABA]u^|PpPwOA< uw$pPwO…>>MA&᠏Opp%ߖv -!unPW/ۿӪoZ}y-dZ} ?VOpk1^f8{qῳH/nw~6h^|m܋Q/n^|gƽό{5k>3eҋ.-ms&{5Zl۸_ό{5k>3|f܋̸]q+^|:^k{5zƽ};h^|\q/n}/}̬:Hg{ -x6JO3=u'=Q -ANxd:H4ti AN8.h(t 'AZ>>>>>>>5JJ nf+m3Vfp:[nm.9WϯhqP_3 uA}8/:c;㠏O:㠏O:㠏O:cAANOuX-ԭjnV )1)q IYJ nOI))1=Wϟ)2rPCEAMQ!9F=!YjE6-Yl"1m!YlYlYlYlY|L#dc!x#<͂}>xfAf2>fe|͂,}|͂>>f5߳YP_=߳YP_==,o͂.,o -Z6 r)lSh,f OYǧ,li6 4~u 5WP _A]<~u-Wů2>_e|>>-cVƂNe,^ƂNe,^ƂNeXǧe,2}A+)A.cVRux e,[Ÿl\~ZfAi5uGlV5nlf[Y~P_OSzЂ[PfnA/_o$c[07a cܘ~b 0ƍ0ƍ0ƍ0ƍ0Ƶq[3q97;f clc>3qc>3qc>3qc>3qc>3qc>3qm 0ƍAaO1qc 1Ya5EX.b{X^="ָaz ׸}|\^q{""!]"^vۋ0[ۋ]"g1E n/v{ۋݾۋ0ۋ0[aEX&/bp{ɋ^e"a|ɋ^e"a rp{q'1kE n/zۋP^sp{e|^\Ƈe|^qp{=KuT.^ԕ@R݋HkTK{Q%սEhM1ս"5]T"^T"}|^q{ǩE}|^IR݋#I{Qw$Iu/$E]1%ս+^\Ƈe|^qE}TK{Q%սo^eFRELu/8սTw͋H{QWq/J"5E]I]~Lu1ս*^_S݋]I= .nOۓ$p= ^Z#E5^b#En6 v6-Y_iu6 Yk:1xmR3333v6nRǖL;ޱ9ӎw:^/Ǭ3w1cmX{LXcڸ>{1cmu>cռY_ -ռY7BgAlxi< 4}|ԲVhe-i(eZZ B+kA.kVւYY;e-T oSVւZY 2zY);{X //Z Rhi(KBKCA^^ - yy)g?GVsTZQ姵UrT{Jؚ,GY .\g9*i*ܞ>Qӧ*k. ުz*/^ 8׋SSI 'B+NAN&VL 89iq 8}|Z>>-NA.^Xxq b)e^yq*\g)ϊSǧũЊSP_8%ߋSP_8%ߋSSǧ)iq:m"9"DP QA]3Q|f8GIF9my$G-ϙ6Qhlu66N@eQjtP'kqFkp'2K@4HAN@4HF j4JIIIj3Hb*O+ U WA>4H}Q AAF<#Y*ϠHVyuG3;y_p\~N@H'APp{$'APp{$'APРpIԗ_C j >4-y%~?^761ƿ1kGmv濻c֎|f֎|f֎|f֎|f֎|f֎vi;:>>5A֚OkMǧ&ZSh&jMp՚2>5e|Vk>>5A֚ZS=}Zk A]>>~O;Ou{ ך{ r rn(ZPh&ȹjMsC՚ 熃Zk\k>>5A֚OkMǧ&BAM9ۄ4nҔSMHSN6!M9,Y .㳔\g)_Z 'ȝ:O;Ou wyy>>3N9|fr8\qʹ3S&)9Ӷ<8 .33ϠO2ϠO2Op{$O2Op{$A]M, ꋽeB+9^l%gPߌ Rr3H)"%gRDA*9>>)9>>)9>>)9>>)9S4J n&$%'MHJNp:!.9 .3끕A] z`%gJNK A*9Rr bUeP*XU}儓2\UʠnWVU-U%?Tǖ2VU֪JʠOˠZrԷTK.je?Ÿ-}&om>*AfB*A~s-B*A~/B*A~4}|U>>*AfOJǧYвJpe2>*e|U,}|U>>*ӧYp{4nOfӬRw__2<Z 껯ǑB#A_hq$_ -~ő ?q$q$8i 4}|Gǧpm|ZN i9)Ƨp2>+'e|VN>/<uwRhY%YвJJe8;x#'B\h#8Ok#'n:8Q5qnlM[G%kz껦'BKAVBA})ԗBA~)+F4m>-AO Gǧ B+A.V8\8 pyK)-%[gېZh#/V8- Z{O Gǧ#Qh#/V8_h#V8_h#AO Gǧ#ipZ.\g# GpO Gǧp{pnOQ=}Z8 pu9W~| pWq'NIZ8 B 'API}|N>> 'AOIp}.۞> 'A:<upԭIP6'A]<Z8 .pr?A]>GWz}~렮^偨QpB DADj)AݑcMŚY &Kǚ.mk>>5ScMXSh&k&k -kǚz ǚ 9gV `krدfد8\qȹ3!όC5k>39|fr8IȹF!my$-ϙ62(ȴqFA5IFA7k>Nebކ\m8h AZ&\ireb'h'h'h'Q !A.@ 4HkH !y7ȑg^xywF><>><>><>><>><>><>><>><ܞ><ܞ>.@{ԵA謮 FgumP7:k[]ԅZ2>kFwnt), Bl]nPbr[\]npw wArނjus6/V'׵Aݖ lumPek>>kO&u-umPkX]}ڠ+Vׂ\q]k6oV ~y]{LZoǬzNq̪Gյ61km:3kǬ̬̬̬̬̬=uׯcsu1dm\ɎY%kJv*Yzm\ɎY%kJv*YWcV|f=u-oÅVׂ6\hu-Dյ /VׂLZ] 2i] }|Zׂ>> hA -y_(}ZB hAj@ ;m Z@ ;mאЂZ@ JPh-Ӏi@ 4Z@ ;m ЂZ@ JPh-+Aǧ-Ӏi@ 4}|Ђ>> hЂ,Y@ .㳀\g-Ӏi@+ܞ> hӧp{4nOB hA?<Z@ .㳀\g-Zǧ-ӀVh-pgy@ r(Th-Z P Ђy@+i@ 4}|Ђ,mOOZP7:kA輮uԅZPbkVׂ>?gP=kA]Z] u-۲׵n^ׂY]+vBDP6`z ,;g>>`ux ,[gnux+ \g, vP3XPo=?_ymh(|vv6hh|vM56g|fϮ8]q>3|vgL5ZږL5=mȮQ"kDvk8]$k8]q"+D6HANdJD6HA mANd9 F0D6D6DE)l^?ȉl^?ȉQBWC 9t [ F ]5H/A]>> ]>> ]>> ]A]5HoA]5H/A]?555555C2>]8t .55ܞ> ]ܞ> ]5 e|q\ǡk'k'+ȡkPw- ]սBWC A*5A]Tj4JBW{ˠOBנOBנOB2>-OB2e|%tK n/kPZ ]>}s=?]W .hmI)o) nm/mmpe|\% hq-qmPlkg[\=ڠ}|ׂuӱ6ŵAt, cqmP7kAk8 kA~mmxYyLY*ǬkrY)ǬY?kәi?;fgfgfgfgfgf1gmF{cڸm혵6nkm֎Y[kvZc|ftf݂]h- JBKkAA -y)$Ҵ iZ 4ԴVhi-oւV_hi-oւVPQ Z8 m Q -Q_h -Q_h>> GAOQǧ(pi8*p\g(Qp2> GAOQi8*ܞ> Gӧp{4Z8 +B Ge|,Y8 4}| -uppTh( ThU) *9ZU r),rPRPwJV>>JAVORpō2Ӫ\gUVmZ kU*UpVnt,nZU U)kWA ;Gߩ=*A%AzTdڣ'}Y .\g=ޣ GGZ =*ޣ>>Q֣@y =* .Pޣ@y*\g=*(Q֣6|]umh*u66Qhiu6Qhmܣ̤Gq3ug=ό{5k>3QeңѪ}2-mܱњ=L55mܸQiu5j\mܸQju)ό5?I\(/_w,G# JE&LbnX>F_#fhW m ~ C`kP5ƚA8T 0U -nQ~l/`E }ߢۋ>oQq4 `{uh:۳)؏FGl |߅2Ba9ptB :!܅BwALJP߅]hp߅]hp߅Bw](}pPg`? .?m? I3 : Y3̠&0|[ 3׵0|_ sPK`;e 2NqiCT` :>\Z.-%\뗖` .KES1%ecJpǔ-)qk\'Aul`?I$O"7qHD9@$2 'ALJ_? :>\=9@Wv`zc?ut|zcのtx ]=9W%~\W8l ?>l\\*>Ƶ>S<Ƶ>Q<Ƶ>O<Ƶ>M<5fc:l\sf:l\sf:l\sf:l\sf:l\sf:l|k}zb:z\cc:z\Cc:z\\x֗ r/AZ?\t" rDA3A3A3$؇`??N>|o'yA]5Q~PW`U#pqP`8"9AmjNO )vP`Ԧ6_u':> wt'.N\ӝ ħ;ANt|ܾ}\V ^B?ǤA>':>.j _^vB?ףo=mjAn|IyP?:>nK|H?m\BZy{/Foރ}{Pڼ= ޼=G7Adܼ7%>mރ 'ojڼ6AP޼9@yt|ܼ=7A>y{noރ|p{~P6%>mރ?׶?מǰy=ko_kk}Z{_ט65g79y͙kάo_sf}~ k}~o_k3ҵ]MHv6v?s}Z_ڦ]ַ믵]mׯkάo_sf}36 5E,2;ؗك eg4>ku7~_`߮aopn}9?5`{"r} >El`_:>} >K|} >K|} >t|Xfee fZfue gZfe2;ؗك|i=ȇك|i=ǚكm e`2{o-62;ؗكALJe2{pq2"z8Ãmk %$|jKA>~tX:x,ywa;ȇlK`_%o/yגwx-ysRKA>'sRKA>'sRK`_.%__.k^^kY1.yGcZ^OǴďi{?Ӓ31-y93-y93-y93-y93-y93-y?%cZ>%>;<%>9<%>7|Kk}a1-y嵾}LKk}1-y93-y93-y13/yj@/y7ؗ wA-y3}+,yS67 pO OB?/?l A-AzP`zAEuP`Qz8_TAA\z8ħppO%>ZԒ7ȷAμ }%o/4/y|y{PK i^P7ȧA>ּ -A-y}KyPK`RԒ7ȷAM=%oqt|\%>-y`=gB/?j ^~󠖟%>-? K^~3ؗ A-?|kz[Ϡ󠖟A> g%/?|,ycσZ~ .i\[:=>>5t|C>ȇϓ}џ{}џ}6-Wި`a_ZZWBկH}k-k}_3þZW5gկ9~͙}kά𢡊a_}﫯}km կ!ZWFk}z ^Ze[ybZ˾v.˾wٯ]~͙]kl.سn`}=ئJXt{4: Ew/ۢ;݃m=ئ`_tEw/ۢ; ="lcK+Xts/9݃zE`{=#݃K|}=݃K|}=݃`:- _|ڂͧ- _}ڂݧ-<ط|i >矶|j >'gS|ڂ|q} >ָE[A>\-|a >\[ALJ-`[^|q|/Fm<طm [%t|؂5 ڂ-xo|Im|a [AKJ[AKJ[A-_߂.-_߂:>l[`߂!- ڂ!- W׶?SWziE~'VzޏiE~VטWiE~͙iE~͙iE~͙iE~͙iE~͙iE1ӊ1ȯ1ȯ 1ȯ1X?}L+k}VZ_?"L+k}EVלVלiEzEAȃ}T A]A}}P`evP`e.;ħ]vf?(V>Ԋ_Ԣ;ħEwpOAEA-|y͋ _n^tv;כAEA-|y#΋ q^t;K!x ~P[`_<׸m-xooj t|܂A-xpO[M\t6ǒ>Ew/jZt:>.}SS ^t(;GA> :>.jKʋ _R^t;ȗA>EwpO%>-;>EwQ/|zCԋ ^t}oNn1m1m1m1m13n33333c܂_#cڐ? >\= >Z= >X= Ǹ!ַiC~o{ӆ/}?!Lk}C61mȯ93mȯ9ӆc?3Aσ}:y>?\<>Aσ}}Aσ}}~P%7! };|`젶>=AփM~P`n=>n=Gڭ[.i\n=Y!p[л _ޭn=wA [:>jϻ ޭn='w[w[Aփ|xŻ .ޭn=[:>փ<r} K^>?yj}\<>oA޼>y7σ|y}t|\<ǒA>>y%σ|,y}~P%>σK|ZAyq}~P ߚ^<ȷA5>򭹮o}kl[v믵OZ߭>k}Zksw13֯kά_sf}3לY߭nv믵Z߭_pu_kյ[ Vnv믵=[퉯kmO_}حvn[w9[͙m_߭{vmlX/6{//b9`}-,x__. .[L6.lb/۸x`_ "ؗme` 6p/p/g,LA>t K ߮ K ߯ :>\.~ SUA>Vu sUA>Xu l%ϷO!7 `!  `! a0pCt|!\ę`/L0܃|L0~&lg`? 3A `L0w~&OgAt&OgAt&t|8`, ҙ`, ҙ`, L0K|L0gALJ3Ù |L0ȇ|L0ȇ|L\µ\?xCƟ7k|L7kc|L7kC|L7ǩ7tCtCtCtCtCtConpepepdi1}c!\tC 1c!\7tCtC̶O7wX[~ I4}>F~8ƃ7L?;I/N?؇郺$%>IK|t' .]%:_Lfm&g~P`6 (uD ꠎ(>_xzPG`OAQK|:tDy-[Q+G:x%经(A>}D :>QG:V%׺(A}D #JQK|: KB_X| ԅ%/,ua :>^XO>Sn 7~yMnp쇛:7%>ngګbU砮:A} N/F_uWU砮:A| N>_u|Wt .\U'x :>^uW2U'ȗ:A} eN/c_u\>o:|^k?kG=kO>OZ?>kZk 'k3'לY?5gO>9~9kmֹ|py 6B^'y Z;_\'Z?ϵ~y?}8N>5g%98\[~|-ڠ}o=6iHMtGl+vG ;?g< _/`ak]f5.3~ll`akpakpߑa"[׏%_۷%_۷Ǵ6cEj$؏im5ǴW_f9`z =د^m|`߃5ׯ^/tFIltĂ$6F'A7: .ؠIl$'A9: rIlNbhtl'%~\'%<=3؃u#_A>ulϋ:>?9 @ANrst _._:>?`r sct ?9ħ翿}kGRg'טy͙y͙y͙y͙y͙1?{>=>=>=xoyyZ^tvx?33[>1h>~>~>~>>_~σL s56| 2;_cL؃>'!<ԑ7x :!yk p7 .~ A]yP.%>]K|:~t~8x:_,py87GuAy}GqPG`Qԑ7wu .xw܃9k;Aq| rL7ħ;nt|7yw 2q| r27%>qK|t .t|u :>qwܠ7| r7Iw 'qt|=;nA;? '?{y΃t~I<«sп uuSy}K|:,tX:>aAe>,9\rsAe>,a9ķ,W򋌕Z:"Vcy|bX:1~uի3W1gVcά^ǜY:9zu֯ce-߳)2V/cs"=VfXH3V/Ez\Wc"=Vz+rՋXHv_}"=v̙-wkul:Z걲+KkVZEk_dvVZ|? b=e_/ȋŲyqWO_\sKe!B_moŲO S/֓zb9XO_,7ŲTz쿸WO_\'/.ՓAK|d'/:~'/:~kW=e'bsmXV&/{ee߶q #E>8/#93EN8q_G/r|q"GAaG)K%zB=e!S)S_?eS_?eEPá|PEq}jwϟ ǠG>/_?8r4}X/O=tp9~^_P/_䠈qȇ(_,q8b=8rbnX_S/Tq|8r_=qW_\/:~/:~~y/r^qWp"yqK|8%WZ-_c_c]c_ߎt9ֿr|r.ל.ל.ל.ל.ל.Z[g?|xUSWk} |LWk}|LWk}/1]c8_k1]]c^sf^sfg.-ؒ я-ؒ я}S13_L s5:|2;_LO c砮9+m/pn\;ħ;v-[^n~P` rA<ʃ:~ǃK|:t.< Ax<t|<A7j: x{^.z:9<Aǃ}r4y<ħx<t|<y< 'Zǃi}.5:I gi_GƃANƒMxġAN??c9 A]|r<ħ xt|<؏urF<5%>΃|vt|oAyv o~;?yv󃺝A΃v߃};y|v\<ħypOv0yÌoA39v0A΃K|s[~c~kZkq_kߍkv3Z5go9~;͙kάΏvZ2-3ί-3ǯkmj㯵ZfcG㯵[~ݯZfkZ;kά߁_sfgW\.Aܾ}=U۞*omO [`S 5E9o?ȉS`rP-nut|:>?ș]wANߺr]} ?]9RANͺTrn֥zQO7Auk\7Au _q5_q:>ܸn~lG`qr$э{#n܃ItH$::[-Ag~>7G|Stv:`;|/t>N|t>7N|St>7N|St>7N~:t|8F1z?~k\Auk\Auk\`?F.cr~yR}Gp'1^1]1]wZ+v}etPWߎ}!rP'`?2ԉ;؏u#A}:2t.\Ӊ;ħwpO'y:>';x:>n߾僿ak.;yor[R-|y><9> 3l#/A(9zP ';_Z|i r5ɃK|=ߟ-?I_E$(lE9g/A>}Q>rf_|6\E9xQ:>^Wσ(9xr/A(E9'ߢkyS-:O_ޞ2gߢ|~> ??qO;2;ȗA}ew/cߢńx>[t1ߢ<q̷ cEy -n%>ݢK|EՋCk-Zۏ1,a0~mp|~8_i5f Sa|0>tdv:2~5lUg*ﳃm nj>ׯK|j>ׯngGzppRnN~Rl;3 _?^1~m?6B~l`?ZBmpC1~p1~poALJc1~?pn>۷ml 3`[V-+?ؖ~l`?}R?ș\ANкrys`#t3 ^9?t\g%~3 (9@[|yIzRu`T@W9;:0AՁ%^t| :>T:0wT U9:Ձ%^\_9pAU * <'~p.[A>Hl[| ?Iߟ^Wt|8?<?á<?<g%~\gAǷt|`^f;zc;Z1]mZ1S;3c;19393939393>vxl=c;1-cw|>cj\cj\ yLk}.1]93]9c3,cSfcf:36SS3cS3cS{ƦcMǖ -36SSZ_cfn* ABɹdvn*T ؗ^oK:^렎:u/no\S ħApO}78}d>2 n7E[/o\A }uP}`_@T P7A }\7ħopO}%> K|۷}c 7:> }cc`_>){`?TS!OTA5pPM`o*TS!WAǦBk=K@ۯzfS&A.:\rE.\MtqpkYKA.]T"ȱ٥ g.ϥJ%>.t,EKA΂.]9 eA^k9zA5\rr]#ħFt|kT]#1u H5"]rt]#)ukT.t|?ɢA=|SE*z9G\S#ħA=t\rq#؋Urr#E*zߟKqEOG`=E .zy v㠊%>=K|*zAǢGyqms=qgzzڏqלללללcZ<|Xxl1=RXgT'k}61į]c:_뛐tTTטטטטט-3/[g,^<|Xxlx3s"\27/Lŋ s}A'} uP`_B->ؗPu%A'} \S}"ħDpOn'ϓ݊nd⠺ Au+K|SYA/}!tPŋ`_T"BUPpON%>/K|*^T.m>/ŋc"X:>/ŋ۷ŋ۷ŋ۷uPHuA9}셄s{!^H8>G9}s{ ":>9OlV?جwkOY>VSTXM SyS-K-A.u\j rRKZ\bAa]9:L a\gd|T .sPu 'ta] rBw& u :)E pi] rw栊4A.T .Ht|,4U ru&ȕ4A \ re"M㻋4U .H\S&>2lܲ r.s&ȹ-j9ֺet|l[6%>lK|jT&ȱ- Zlkݲ ru&ȱ-jTeز9MwllK|j[6AǖMGϵԏeZZo?k-ke3CZoټz5g[69޲y͙kάleZ[\[gh\[gh\-*ZoټZ613lkmQZ[\ڒZo?V,zᵶ`9޲y͙1l^cfhټвye3C5f͵{͵{͵{͵{͵޲ym^cfj 25lZ6LN-`/\Df-x1ؖP^lK`/^ %Tijpizfpi.H3:D&}(E`/ `ۊ{&|W.H3v>^l;`/ Oi'؋4m3׫ K|0׫ K|H3׋4K|۷EALJ"͠CfH3Pt|(o4ۇ"Mp=r}>6Aʠ߽33쵟[ `{gL3 &^lALJϠCg3Pt|!Z}۾}lm>ԛzipכMCi6ulSi6upSi+7DQ1jkHQ1*؋Q?5 QOŨ%^\Ũ`/F rb **F rb R*F r%bTwRjM{kjqQkj55ȍZSK|55[SALJTRkj!Rkj!04SAΟ* rTa( CUt|{ah04 CK|0셡A* r Vah CU@P.읠_L` ,N  0N +j{gp 7{zg!3͉#͞k۟͞k=ԸLk=~L͞ǩ͞={fc͞ s9|k}Z&1*=cjU\[Ԫws͙1w13w13w13w13w13w[gl<|yl=c{1wU5fNdnn<&A5/}1qP͋`_LT"դ DuI\S&X e#cY,spX9LW}tPem?4RA6i}rPM`_T&ؗ!դ eA5i}\S!ħ.CpO] %>5iK|۷Mc&ؤ :>6iMccccc`_at|=A[՞c'X9jO+劃{⠪=^8jOol՞c'X :>V{՞c砪=%>U{K|T .t|=oo\ rKRP{:ԹΥ u:g녏FA|5 rQkwk] s(5 .k\S蠺FFA5 ryQ3w\ktP]"w50r)U+LA\a: S.WK|0Ta :>V0s)ȭ+LAn"7Kn)TK)- g{R:RZJAǖRpO-ZJR rwK)- g{R rwKAXD:"RHA\D RARP?KAAV.\ ůRk۟k_c(2įk-kZKZ/P KA9^ z͙RеsGIJ:򹣤smQҹK:Z/לY/CZ/:ֆkZukmн 51$8Bk ט 513j^cf(ԼP 5=C=C=CZ/Լ13j 5M6{b ޱlnb۰;v^l`\DeSA%}*hO͗`o `[A{%_b.3ئ` ;k1mZ`3ئ%^L\ń%^L\ń%^LJW{2A kJpܾ& +kJpʠCe2_t|{eA72_l ` AW[ +m7_t| :>W+ALJJWzepWzepW+CQ%-n2-p2-`/ :g>wTж n 6SA6 mmAԶd|j 2>mzfpmm36m3͚6ܭm36޶t|h :>Tq^Sg5Uq^Sg5Uq38K|3׫8UALJ*NWqFTgkDUq.>2$8\S358\S358^t| :>Tq^\U%^ *  rM*  rM*1Hm`o Mm9m3ڀ6,[P3P3AV_>>]>>[>˵>Y^sXp֧TpXpXpXpXpyLk}|l=c{Ƃc*\#5fK k*UA >TM%`s/dM%˃|ypTXS9}*TM%؋%>TK|b .!\BP%˃l{P`xA_}=K%>K|5Tk. (D d%菌M?26Qn(MG&#S堚(^:>6QMc%D :jou]j{ࠚ(8&JwKc%D :>6QMc%D9&JpOM(%>5QK|j(A&&ǚJ[T55 MqrM%XS9Jt|_K|\ŷ}>K5An\ rRM[(jCTsP s&\ 2>jK|*T9RM;)j:jT>ϥ 7z.T&X :8A] r:NkDq\#sPu ׈T .t|8U r:NRqd8Aj\ rW:NqոsPuc'X :ߪT .sPu w5q']8Aj\y߿ rPtW'!sP] Ewul$d]oɮNwu?y'} ]|w=^k_]ZzW絖yswu^sfsqZ-8ז}ks kZnkm;0A6FCZƟswrN^kֵ;y [זkgɵ;yM13N.m6}{$lF1ۀ}VEn }VE\}%?uV\M%\M%丈&`1f`or Y)؛mV &`z-`pz-`1tc :x1[cn}6{c }ALJ>ƠCc}A_ă1.l`c x1ۢ#m1t|c :>1}ALJ>F1zcp1zcp1}C#}>hVEBMAn)BMAn*lUhr :>491t|hr :>49zcpﷷ/ Cpܾ}o: NH` 2>u@: O%\` r j r wk rHw@: A1z"%~TKۯjIpUzdUKZ2ت%^-\Ւ%^-t| :>TKZ2ե%\]ZrՒ` raj N rj N{dZ2P-t| .j_{dk'UKvRdk'UKvRdk'N.w2ȁX_L` r Vdz'$=2? j F9ȵcl\cj&\cj&\y?ȵcj\cfl<5gcj|k}==16=SZSZacl^\ccj^\Ccj^\#cj^\cKl^<ŵ>~eYf'AI{Pe`T!دUfk|pOe %>K|*3Tf. :?2C*3 :.\M=ħ$T%!XI:>Vnppk|$T%!؇$pt|$+ AJBt|$+ UIAU(|P`? T%!g$ t|$+ AJBt|$+ UI.\S%!ħJBpOc%!XI8}XI8}XIx?] bWr2v_!}c_!W8Bt|+ %>K|+ܾ}+ܾ}+ܾ}+ܾ}+|} 7)+T_!W2>} %>.} Q+8HN}+ AǾB\ӑ?ķ}~۾}H;ؑ8ǎDw$#Hԑ.#t|H;Ցr#DQw$;F(@ d.P$s"% U:>(c"ħEpO*P$s"% A.\rIEKߟ^=)mpFǃltOp#?-rFG7: d#Ȏ20.?l` p6 :>ALJC`%?\`/ DQ`п'k@p(ܾ&丨AW ۑwȃ;OAx yy*` :>T *%^1\C }P1n>T ۷  S`b0T1d| ._?{`!U R`!U R`*ALJC`po*K|.0-[[l?l` Az`p` r \D`{0S`K2$S`K20?t| :>z`p0%\0%\0%?t| :?xI{ ~rYRb%U)qQbG"ثܸJ1ȍܸ-qmAՖ mk۟ؖ?t?t~Lmk=ԖؖxLmǩmk}ؒ5u?_juA]<}P`?Ou?=~pO%>]K|t`Wu?ߕuwA]]yP`Wt.v A'~ pKkBu?؟uA]t|A~t|uA]zP`"CA]}:>^u?x:>^t >*1=Y n9On~% #A]I+|z>؏u% :>胎7oA}F\Ӎ>ħ}#sFd|oA}pO7u+A|P7`?2ԍ>!FG>x:>胎7%}pO7?/y?-xP`??ħ~pO?x?~{Mk^?>Ƚ{An r^@5\zͽ{A^@\S/ ħ^A\zͽ k^s/ {uL:>뵧4 ڃNu[B?c\T8BK2\ߓE**rQ!m An~>%ǧÃl1s!.>S Z m}^bx}ZeY]-Zzᵖbxp6ch1eז| S`{)37+툋(] upk{"Ku쥋`k $KK|t1KK|t1KK|tqh`{{b=ڃt1^l%^)\땂(] :!.JNJ5^`{{b5^lALJŠCbt1Pt|(] :>.t1f`/] @.@쥋6{b DALJŠCbt1Pt|(]\Das:4q䬡  uW':ثx=IFՉA2N z:1PĠCub:1P\Չ%^n>T'۷ՉCu"}P8`2>U'OՉAƧ_N .uW':ثxՉv`_DubՉALJĠCubp_oK|:1z>*ۧ^EpTЫFTAnDի\뽊%ޫt|U :>*W1եz\]W1եz\]o.t|hd{#cI52{S#c752{S##ALJFƠC#cp72z##ܽ1ݛܽ1ݛܽ}ALJFƠC#c1Pt|kPn}a/z r1 s*z r1/&G=Sc+==S"1}?׶??q _pqqטcZ_}ga A5=as̓lz\sP`kt|~ vDn?;" I:">)pP:"%>uDK| :>9Pe`T#U~KUUq頪 c#:>6=M[|lz\S㠚?Gc#:>6=Mc#8GK蠚>T#'jzDt|lzAǦGt|lz_}ۯ¾Wa_ArO.YA9]rP`T$AVn=]]t:>K풃j%AvI]\S$ħvvvvv. zD]d|n%AvIpO. AKP`GT$].9vI]t|l%%>K|۷%%>Kd$5%A. r vI[XK]%%>K풠c%A.=. rvIKOK\z]풃? dpOՓYZ9IVѥ ׈.FtiJ+AJt|,TZ .rP ׈.Fti%5K+A]Z rA.ci%XZ .эe--lh9elyQWlbF 7u&X9MkDWl\#b ?Mo?7׶??sZsVsRs7ט7s͙-{;coZ_~`X:/f`A5~}X YA6~}vP`\S'ħRO}sP`\}Ao}p #>=ȂAlNb&X9%ĂAl}69A5a}Nd&ߓMO6a};&L`Մ :>6a A5a}p9&L.Մ 堚0>0A&L t|l0ADzKOw]|e .T%7.%<<pP`8T&+U CC 6AA,Tq.t|, 6U :>lc&ħMpO۷۷۷۷` s&\ 2>l 6%>U`*{ 69M7Dd&"AǂM`t|, 6%>U9K|۷ 6%>ld&ȭ 6An\ rMKOl` 6%>lc 6A. ] rM Hl\@`*T .&t|t5'9A. ] rjAUs\Ѻt|9%>UsK|T5'Ƚ9A5] rjN{MWsksPc5'X :>VsK|c>kZ8:kø@:k uk3dg%ל}͙J1T쵥6{mi^[l#`/4&`- :>ԖjKQ> |4؆6{h ^>lkz"E/lW6} 53mxGGALJѠC(ŇQtEA( /؋@m "`4؆6{hM%^lSITE6{hM%EALJ"РCh4P:Op7Q f{gIy374Iͤ%L\ͤC3)}L n>4۷ͤj& j S3i4Ld|j& .f_{3iuC4غ!Ll՚`o& jE4[f4Lt|h& :>4zfpo4כIK|tͤA.LbNͤA5L^Sͤ`o& rf_o& .fҠC3iͤAn'LvRͤAn'LrWͤA.wL f_o ._o :>tTi{MuTi{Mu4ȵ:MALJN_4 .NSwUi+ZuUi+ZuU){7ALJNӠCipn OrWA.wUxrWA.wUxrWAUx _/< ._/< :> O`/< rO *< rO *<{i/? O׶??t ttt Oט Ot͙t͙t͙t͙t͙1A}z=RSO>?RSUj :>cozK>h{P`mA}}դ&uPդ`CTϠխ _Peo)l[ :[Ζ[-=ȂA}:Q_U0 F>DpP``SA} 8QOU0 ) X0 :>c(X0z f:s(;D! YQ[wK|;DAAuqP`oqT([! Au\S#ħGpO-ZAQCtPc(! :>vK|!:}!:}!:}!:}!;D^ 2>w s(! .C\S:D:D8Q`C%! LCt|;DAQpOmc(ħQpOY:Ϛ-xքn&5 7 T .&t| kBU rP@ׄ\&.5 w TM(ħMpOś7A&b @An :&PV7Mc(ħ&PpOMjKu(]@AR r&PT7t|l@A&PpO )&_˚SaM&tpTX:}* >քK| T .&\SM(X :>ք5 vM(UkBA] r?K|T(ؖi ׶?>c>kZNk;=w蝞Z־mz5g;=99n޵?u3uL]` 5ػ@mD .`Qۄ]6{h 4t|{hͯm~ .`_ 4`]D'>mR `3&6h{%e Q ϠEg٢3lQtl ^`3؆` e=3؞l` gwr۳;{9=t| :>rz9`o v _Jj{fLkTfpk5`o ~Ck[!5ov`7 .~_7 .~_7 :>k5ޮt|h :>k5K|]35ۇvMp]ܾ}ho5ծlAƧv Sf]3Ԯ\%ov`7{f]3!ޮlh zȠCf]3Юt|h .Cfpk 4-[h[(lQzMAT\%^t|( :>h@3 4ܠ@3 ^RA. U _/ ._ :_Ӣ]~Zk.]3Ŝ5ޮ^SALJv͠Cfpkz&5AnDծFTAnDծFT`q :>k5%\=h E&}*hO]>kۧv_o .v_o .v͠Cf]3Ю v Wj rv Wj rvEh3sVG1hS1aga1a/c*\_Tֿs͙s͙װc*\빏eصxl9RN7L|P`3A5v}&ƒlJ;ANt|?~-sPu`}>:OOU iA6vA5v}:NO_>T$؇?$<U :xyug'YytP`gA~P`T&ջ AnA~P`{7AMwt|5A\ jAUk|Z՚ 5AjAUkpP՚`/7T& U rAUk\S!ħrCpO AjMZsP՚c&X :>VkK|T9}X9}X9}X9}X5AîT&\ 2>Wk՚5%>Z冃u{7䠪5 y՚`5AjMZt|T.m>VkK|q&-{7Anyܻ rIAn\w\S&ħMwt|T&U{7Aܻ AnwнԻ .\S%ػ :>nܠwͽ 7hT&{7A. ݻ :>nK|Ի9MGwőu&ȍ{7AnDݻ9Gwt|{7%>5rۥ .v)'%K9,~RGrrb1,RXjj)g|1b3j)g̙Rε^+X-匕^߶W[ XĖceq E/!Zb_E/!Zb¿Xv^baX>_t|E׋>_t|bm} _,2ۿX[@_,}˟kEeP|VXkEePb_/9;+B_\d}|}|{ eh{X/ b^-/ b^-/ bz 苎z 苎z |E>z72Z@_+-/_o}ЋxXz/KbaX[@_,=/.jK|%-/:zz 苎z K|%zqЋ۷^ܾ}-h}s4Z@/Z@_d|h}%_aX[@_&-/ƋRcyXj,b|_o}_o}qQķ}z K|E/r˃AE.Py}%>_\E/.բ_/|ϋEnP"Y(|4U^E/r DZ_-|qVgW3_t|E׻%:׶??Q]1Urc%1Ur/c\c\c\c\cf\뻴T7iv`*9.Z_F<<(}Pu`Au}Mă&:AǺNt|<'σۿ.Auy} <>TAu} (}Pu`d]'Iuug]'Y :xntcPt*;J7?MtWt|,K7AM:ՙuu&׬3Ag] EL:sPՙ`//Tu& U AUgpPՙ`//T^.\Sy!ħB:t|Tu&X :>Vgՙ3%>Ugn>Vgn>Vgn>Vgn>Vg>L3U 2>Wgՙ su&ħLpO元1pu&ػU q{AVgt|3AL:\S#ķ}X .:^ǸW:ƽ 1q&m{5ի .W\S&ħ^MWt|T&͒{5^ 9^M9j\̹Wbν{K$ˑ YAK1olv?#I"Hj&X>> >팥p5B jw5A&]Mp]MC &ȇ!vWÐB jtw5A..8>> &ӻ2>. B9w5Ac&5&~WDj(]M]Mp]Mǧw5A}|zWShw5A&Q5F~W(j>j.w5׶>2δ5v}m|;s>6Fo\ә 5Υ\Qpu6sFAٝ As6w.eVAJe ߹ e'w.>>s5 AZP& Ao|x3H A>\LJ7rx3HboiY -A>eqv  Ү(7?ɇ7>x9͠^o|fAz] |f^׃|>3HA> G̠Og}|r>3A}/ 묝 + K |&3tA:` 0|f|>3H ap0 .e||0A|f'3>>92>>\3Lp{|&=}r>Sh3FLgu|v;;K ߹ jb`w.t;A: j4ʝ j e'w.>>sɝ2>>\Ʒ=}r2\% ^\% ^\eP,s\w.ep߹ e'w.As \5;AePs0s\|2\2>>+ɝˠO\$w.r2ٛݹeP4s\5A;e||2\|2HAsC ߹ ҡF\PQ\5c; _T e'w.>>s\"w.r2ܹ jtiw.]ڝˠFv2ѥݹ jkw.As\w.ep߹ e'w.>>s ˠfv2ݹ jflw.۝ˠf}|zzӶs>.m١KХ?cvƟ1;ti`q1;Xiqzy-c ﯅vЎYy?,A^ #cOY>>=f >>6[)f72lFpڍL2> &Ȼdy,3 v]2ȫxy?.vKg.A^c 1K %vwB;f ;y>=f %{JPJ^%~7SW 껩߫}|zRh*A({ _ ڽJ/ ^%v䋁2>.㳋2>.㳋OU>>W){OU>>W ^%U^p{^p{^p{^p{^AW *v;Rh#nGHPw Cݎtz;㇠OoG>> v$ۑ2>;a.۞> ^vpގnۑmz;R ^oGHpݎHpݎ}|z;Hݎ5ۑC~;|oGHP!)ۑ2> .ۑ2>;v$ۑe|z?V/@,H\. "vp䋁B;@\b\5B>>="Ӄ2>/O. OE. OE. OE. OE. 턡Nd' A!q0?SB;aj' A!105Fƈv:{\)\?rp9RFd_)\чW #l+2DhKkg\/:Ɨh/ W ) Ҫ3AZ|0HRiUMQ}|r03A) LaKOҒ(7 d Cp0 Coi% AD<ȗԖaAZ |0H+a\"ag+>[D=A ={AzA= `^}|rO0{A `''A>WD;ԗD;D;E;k; A>6'Td`J Ri;Ke|\.vp>>9@O}|r20㓓A .㓁e||2ܞ>9nO ON''v20Au~Pw^r}?Hs?I}?~'2>nmO>>5~PS5~PSu>2>q}?A>>\j4bF#Vj4bF#Vj4b}e|\.~p>>}|RI[>HuoA+ܖR| T6J[>HuOA7>>i}|Җ.|pAn}|ҖOEH[>i˃\NjxdfoVN~r9=ӃXY9=ӃXY9=ϛm 옕Ǭnm1r?cVNG?8x_?ftom.rcVN)y PhuBk%Zs-К o A^jsisisisis}l叵ZA~{.> OEkSZAH rGZhs;B럃yU*9sᷬ-ll>-rd ElA~-9o"M6ȋAm>>m>ok>>m>>m E껜A}9s"[䠾?{\h-r{BkZZBkƂdXp rxP{ A< 穽lǧlǧlpe|Z/^p{-ܞ>e O{ jA]ڼ] r=Xhjڮ?vJOՠOՠO2>+]-v5{ ]kz ZkPZ^zӫנ^Z\gkpUe|V}|Z}|ZZݫנ^u}5WAz .5Ϫ2>^>>^>>^ z z rwXhk˯B^ZЪ wz rwi}[ii\gkpUV}|ڮ5Tv5WAM? jdP$~x%+ɠ^I>oZIڦVǬz ֫y(^5+@A^ W iiiiiZۧj?UjWS^pTW=hA. rZhh /,V-VV)* wbV)+J1ȕbߛji}[i=ob&zPB}cP_ռo ˚A~>}cP_o,1TA}ЊBf`P[ ꛫV }Z }|Z }|Z Y1\g`A.  r1X=}Z nOz1=ƻn*/A\^ua/+wAvAve|2YWh_>2- L#bpȴ,>2- L2>+ .b0ϊOOB+z1Ӌ^ *b0Vpe|V Y1\g`ǧ`ǧ`Akm|jL}cAn| o raVh}c B\Ծ1ȅYǧ}cmWǧ}cǧ}cpe|7Z\OKF^52.tA w̤F53FNxm\]? kqw>6.~0h=m\] kqwkhn-hjOm\^ݳ\+PAZG\R:r֑ u$% #>>))}|RRII9㓒2%rI9蟊RRkzpT nA!?= A!(= A! r9HEZ{A*҂CRrzAznr'=䠏{A>>!}|Tk0MA}WsPVuQA4An0 =lU㠾)[8V5j I8KU>>q8 WT5j1=}R5OBu61m aAnk6qPk\M >gm⠏OA\mbF) uGpPw$+ uGpPw$+ upP+ \.pp0Ia80ȅV.h 5^A.  ra8H2>. qa8e|\0Iap'᠏s%%epȤ nكaJd}) wA> r9H]tT I9s'2>>qspw`jd` F`jd6 x7ol1789ffsVaq,t*6Yϗlm=zcVoqp6NYƙCGiƁcZ}̪6^x;fgǬlE؏ y-?k ߅d ]heX /zVy+*6ȋ^UA^ WhUlOؠOؠOؠOؠO#Ӟ6#ӞAގ L{#*6kLUA^c rͳЪ oV*Ъ wVK,*6]bUAzPؠOؠOؠ۪ؠOؠg'}_ 7A}7eޠOޠOBkxCxmKܠ^u#JܠAKܠR^}|ZY\g%nAݩ r[=}ZnOz]KܠA^u 7kV7[Ae|VY[h%A-q]yKܠnW^u7At-7J2>+q -7A] r Yh%n[B+qBY#\g`p5e|}|Z}|ZZi}[=}>k=o{.ܞ> O>хGZF GP->:ȅA\}|G}|G}|G}|GY\g}tAʅ*5V91ʅ*Six2iQR7Tm8_O2iQPƍ5IڸqF!I7("i$h/#m>;mb_죍[k|\u[kq}6nˬ?ZAbRqAtbҦ{6 ؃M7- m>>i}|bI=;}db.'؃I(- #;}dRT&E Ճ0EuA.i rQ=HMkAjZ\TR:HRTIQ=z'E?}A껱߃= ߃VX=߃I=;n4nbn'~bwA߃Y={p߃r=߃OI]hnA] 7Z=߃>>q: ߍ^fe^fHdH\2>q={'wA]1Aj R{e|@.rp7I=;OA߃>>q=Iܞ>鿃'wp{n{P!+ǃ\R=r| vQAjw}|RI9>r|pr9>rjۃJ}{P_o}{AFo.㳾=OOBۃxР4^u2>{V}BZZk\Ok̓iyA]5yP8o̓ykU[_RzPw$ԃ#y^hmPVԃbzi\gmpU5vyP+o̓]yk5[Aޚu ּZ2>k̓5.<<ּZnޚu<ȽeA- 5ro\gepe|[Yoikik^hyǧyǧyǧype|֚nOӧyik^=}ښ w| >>}|I??r?蟊t>r?H mhAi r(~A꧃\R?z O(OA>>}|\{r?.v50k] b`W.v5䫁e||50O}|r5䫁A]j ȇAn|uUÀA]0 ȇAu ~P)u~PW*u~P*^ϾA.u~P+zi%V\]qT탺X>ˌU탺X>[U탺uX>[UAq>e|\jI}P6ui} WTvjsp\e2>.;}|Rjr>㓪}'UOe|\.==}ROIܞ>j Ǫ WA r>H]qAꊃ\RW(U uŃ>>}|RjI>e|\jo6=m֦nmzAM?^gk_ki_189fU1ڏYUmLcVg!,c[>#YUm\jcVq^u̪68yL6EYl:Mο$iӯSO 8:fm{S6N)ALO )Ag=씠cvJ9ϱ_ڙAPJ?3rThgAڙAB;3r6RhgAF H9 3O >>=3̠ kgATࠞ93(>=3(4{ЃzPh A^zi A z೿MೢMk'.s 9E "vN{ "sO)>>="sB;.Ʒ=}φ۫_iu+n~ԝү4UFPJ(+2>.+2>J#+BWAݕЎ GPb?(CB;2A]G# bu%C.~QhAbu|gO8 #puQBA8 ?i vɯBP^2 v\g e|v\g A^/}|zPh Az!_/?. Džvӯ ?.82>돃>>^zЮ>>^z!2>^.녃z ǯ zЮBB^r^h ABP3)^(녠O>>^z!ϮzЮB]/u녠~Ph AMz5ZїZ& mfm|p>6lhˆke5Z=mm|pM??l(ˆk-c}bqͿN_'_D\/"Q(iKk>34(kNE5&ڸm(jAmˬZ:L:L:䪣:UGUGUG=FY]|cP\  o _| R䋏Aʓ|1HyR/>)O I>>ǠO.>}|r仍Am lnQ6)- n}`ť}2 _| R1HK{0#A|H2H!!I ($?pi A2?W&t&+A: I R(W&>>2ɕɠOL}|re2+ _ .+e|?>mB Lp{`&=`r009 n`fP ; 2>>\3`f'3>>9 ̠)v03Qe|rM䋏ASfPUnbuA]&fPzԕnb|3㳛A `&3g̠v2] .h- eˠ{v2]ep_xep xe'$+A>^ԅ؎W| +Jʠv2|`p .e||>0Axe'+>>9^2>>^\+Jp{x%=}r(+JW|2A EW"+t}(+JW}|r2A .e||F9^ ʠv2+xeP7x ok^x̮+61BiŮPk+6~lJ?4mK?B9|~z߃]`*m=؅J*B#cv53Bcvrkrcv91hp2kPiYJL/TtfzҦ3 6^B]ʱ9 cs*ǖL/T_څJPP ._]99 r.Th*A΅ B%ȱZ]94* zRhG Aj $_|9* G\AhGCm|o  dKrPh"A Z$!Cz-䐡CB e|'[!JCBP r\g*AlA^- ovͼЮ %vBm uAݶ8n[|zmKǧ-A޶}|zmKǧ-v\g-e|v\g-e|vKǧ/ӧ/ӧ/ӧ/ӧ/vԄ_ %_%_>>=| _KP?|)×B;jJ/vRh+A|%u~Rh+Au|~񽞝W~m)ۖOo[J]u9 .~/TJ|ԛzQh7A]# |u!.~ԅo> u.=Z2>k݃>>n>>>#ӛ2>.㳛ӛӛӛӛz8o> n>GPB|h|5YB#ӛOo>#n>~r7vo>]G|u9.#~kv\?x7 hk5j]&hn[>#9h[>hn]>G{磏k5 m|pgƇerq66.?e5۸F[y(h[f-GtrqMg&GtfrqMg&GtfrqMg&Gms&Gms&Gms&Gms&Gev䣏Arcpc>)j %UA>*Gr cA~)hcAھ|{AZ|0H vi Qi -Aw;`ae||09bvAZYbV C |\9 A>";gTdb Q R=3AQ b'g>>9E(2>>\gb'g>>9nOQO('g3 Q jVagA>\gbpQ b'gA>(53AM="ȷإĠv)1 ]J l$ۥD/%5[KA]J RQuݳÄA:La .v04r{ۃA?`PS=nu9ۃA]N 2>nq;A ȷ>>=On2>=nOOn'ۃF=hn|{ۃAM"`P=<ȷT7v{ۃA `'`p4$v{0+ JbA=ԍnuۃ?8fwm;6ٝ@VN?6;6ٝ@Nc|fZx ڸ+o}A |fv_36um>3cz̺6XY1vx?SotzCЦ36Mg7m:3!8>> 냠O Ϯ Ϯ 냠O O O O O _Ap]Ap]}|z}A]5냠&"~}Ld:>=M(^u냠~}گ^A]5냠ϯ:>>Ю|}PhA]1 APS?0( Wv`ԝ z}^hfPbσ{}^hygygyǧype|VZ}ԅ ^5O3+|fV̬oY 3ҶMgev@Ǭdl蘕mc\n1-tfZf̴nәiݦ32Mge92i}lyδ>ky>>my>>my -oPW~oy{{PBczؠ.^ZԽؠؠؠOBc =h WV+j4ה 5eՔA> 5ePw^) .㳚jʠ.m^Su2;הAy,&27A] MdPEo"(zYhMdǧMdǧMdǧMdp5Ao^SZMW;)j5eP_,2o^S-5A Ql}5ma^;kAqy>k^&5z=oqy^۸üF}\wר3ό;k>30̸ü3㮮Lzkqt66^5 ڶ{kQIOyMg&=5tfS^әIOyMg&=eIOٶ)qO9㓞r'=ep{ nO铞2=}S5z \=2>)}|S rO9K[O9SzA]z W[O9㳞rPg=堏OFim1m pAn\(6J8ەA.[BqPS+u9BqpP r8ەZ8˩Z-ZpP2u/ZpP2u/Z0ȵࠏOjAԂ>>q-8{~kueᠾfYg8YkuA qg8;A mm>ͿNC>f}bm"m}6~>f}bB>c'q3>gf}b6m:3Y_oǬic״[1xi 1tf ̴lәi/ئ3^Mg1۸<j%vA^ Th_B$ZiWh_ۗBܾZк /} +/Ϻ2> .//p{+ܞ> OкZ\g_pue|}|}|Zܻu {A ^P< z^Pd:>>>jz/^{AjI5KB+d%]KB+邺EzI-K2>+ %]PH/邺EzIwPA^u -kWmA]j+-[WmAVmAVme|V|~+ϩ{B=\P_y +pA}z2> WJIw6.ѷ%5&ڸFO@tFoeR]6.nt]KkT8qIwJk>3.̸3όK2)ۮ{q 譸˶kNe5Z(>Q&e5&ڸl3Lʶk:3)ۮ̤lkmms&e[IVfe۠V .'mTlmʶ m:̂\ ҲlAZv2e'ȕ -;Ai re6HY+Aqe6+Fi}&ȭ 3Ani r+6HL[AZr+6HD[A%܊ R/Vlz bK I+6Vl'ؠOZAbAnq+6[e|܊ .Vl'ؠOZI+ܞ>ił3$VAT r5 em o֠OAt[AuonkP7WuwjP&uojP7'\_ iՠV_ IC5H 5TAnJ PTB*%ԠeVB ^f%2>.\B ba%ԠeVB ^f=Ӡ.3 ba=Ӡ.3 ba=S{AˬgI4㓞ipLLϟmBgt[AǺA}nkP_yWǺ w[\֠O9fVǬjou[mn?6~}L6~=fVnuY1ڸjY3ngfV̺Ǵ:fB_YE/i1tf_̴jәiզ3Wm_ۤcsc&_[Ar ._[A. rrʭ [**ȫPWZQy**ȫPUTA^ rEUhU+2>:-TBkZ mZ o;ByY *Th-TWBkZ Z B **Z2>k .**p{*ܞ>y]ThuQpj. uQPf9{]i]ThuQPWA z]uBkx#]Bky#%ڱ; w,ֱc)%Ku,A],c .㳎:{ːB+C~eHPo !u=2$2$2$Ӿp׬S|?MI5%AmMIP{Sޔ7%AmMI5%e|֔YSqS<ͯm1O7,c06,cǰao> <91lX|.ذq21lX1 ưa% 4,c>3lX|fذ̰aa-JưE(c>/mܢ+x [1xa23eLg-ʘΌ[(cP\İE",gr>,g.nO3˙7(E,g޸|XμQn)gި+3oԥ[ʙ7ur捺GJ9卺HF]Tbu卺IFEyfIF}N--E-E-ar[7[o7F;ިoRwQI[7 oqنum\w\܆uG7O;ި?xS7O;ި?uae|Xwǧum<6c_j61IxlǬ&icV<&yj6~Y;f5I#cVqMrj6I|fV̬&iYM31BYǬ i6~Y~L6~U>fUH(*MgUHL6V!Ǭ iU!m\/*˅cRJPCP=z+ W(Vuz^ZrK o~+A _ .X|d:x-_%{Y/A._ | rRhK˗B+_\Y}nys=MBmEZm%j A? YhMBmZmj A6A6A6A6A6A6VYm\gMp6e|V}|?V}|?VnO6uUfКY3ߛnuf&fК.uu~AWxRhJPFW6zrP[BkIf-IPߌ% rKRh-I[BkI6-I$ ;!A}"$`^Z":zlu]G}jqPF#o$:ɻF#(#Ϻ2>:>>:>N#]55Otmu\qq]5ZwkS:Ow姍k۷qq6:Qqg]5w|fu\qQ&]Gwݵkz^\ˤFmu\6:̤븦3L6:Q]5ڸF-@=F@23O̬?ԩY2sdP'gɠA'AO?~?z˕A*W\ireV +B\QA*ܟ Rdz '+?d '2|<e'$ˠOAZۄ rg3H o;A =H{t;Aڣ fR w6|'͠O:At6>>l}| rg3;e| .fpw6>>l}|; 2ɵwr-3q2HX-32>>e\ mL'Z2'Rt [jEǠZ1HEGF)\: KaCA*;>#Ui$w5F)筕JAykàaPZ0?otr0Ke|\: tҡyyN cVHPY!#=fDm4BgfcZH/cVHq~ 6׏Y!ƅ1+$ڸhY!3BgfD ǴX8fBҵcZ,1+ڸX8fB ǬXhәiЦ3b mbcV,q<~̊6ǏYx̋N͋͋N΋΋NϋB+~<:Z!ȭAmZ}Z sX(b!ȱy A Xrl^hBc  ? i i i i iPhBB kJ* ÅVI9M(J"ȋtB$HZ%P* …VI9.J"ȡpǧDǧDǧDǧDǧDǧDUe|VIY%\gDpUAVAVcD:?Zf:5u!AMu(!_~l_O>ZP3L==S/89/%3ؼp>,6?xP&x2ԟ g'AxP&x2ԟ Z2\gxp%A$㯳K2~sSim Sk46NͯѰ85FOj|fIj~~qqj~6NͯQƩ5J85F5|f_qj~gƩym_e_ԼSkqj~R6Nͯ$53ԼSkqj~6NͯQƩ5J~85F5:5Kun,5Yj>ӳԼQA*R0>Hx9( $7 žA) r0>HodAξ}}$!H= g߃95O g߃9mx g߃ 91 Aξ)} r=H㠏OAd߃>>ɾ}|}$I{pg߃8\2>ξ}|}$nOdAA ,xA ,Ԉ ۅ}H_? eAξ-f וAoɃIܾ_ zxW{=OzP&XH=f[H=f[H=f[H=f[H=f[Hzpԃ8iHR|xf!uǭ,nmcRqڦ3:S:s:::A~/:!uCR9.:!uA -7nԅR9-:l5.:ߧA>59ts oCy3-:9tAΡjr Aޣ -]h9tBˡZ$r 'AAAAAAЅCY\g9tpe|C}|C}|CnOЅCrC5:AM<~?9tsBˡCZQϡ"3F B _ z]h9tPyԟ`V4;?<O3L;?<.;2>K>>MeW6O1M}ߧv}nq6(Y1K)>fv4nmtcnqR{6N|fn,nY3tgfc`? v',n%m`c`qi1K8=f v',n%m36'A'A'A'A'A'?~d(x;[EA*ox;vA -r]hve|o9-xԅR9.:_A -rH䐺B ԅR9.:!u2> -%B /مRy.:KvA -rFiHiHiHiHiHiH]h!upe|RYH\g!uǧ!uǧ!uiH]hQsPq{ԅܣO`o&Z4䠾y>OiAMBK ?߲&/_ :?<)ϡCs5Zԟ 5gGAQsP&xԘΣB,j.㳨9ۢ4gϓ ͮLr6Ρ'9tj}NmC_OskI}g9t(SmemC_Lskq}rk>3Ρ83όsk>3Ρ8.em5_k5q|kq|R6ΚQfY5JL8kFyig(-3yPfYͲAe̓:;˚uz57Jyv g̓s R(Y eAΚ)krΚ% r;ajT Gq5Y۶L?11YӘŬmKŬm ?c{b6ŬiƑ1Y82d=/6gA^> - YhmwB oyw?mǧmǧmǧmǧmǧme|Yx\gmpAAlI`Zԗ%`lP_?l(c7킖C}BC}BCnpЅۄ~,.:?M<Oϡ -.:r2>ˡ>>M5zԴ捞&)q<;׶~oIi5$Mn4m&_Okq|>6N8M.4%m&_dkq|6NQ2zgi5|f&_q|gi5e&_4ktr\&5JQƉ5>81Fg'(lm_A%ƃ:7Kur,1Yb( 9AiTQBA  R(PxB ƒ 9Pxp>}_(Y eAΚ)krI$rJ=?,ԟCR9\)2>NqJ=㓔zP3LK5ŴzPsLK9|~~kJݶs1Mc6'u6YƟ1K|f6?is',m>f1K8l6,mmgfo4mmc|f|g3ԸgfcDqzl3->fIt',n<%m$6'A'A'AAɅ_4O.<9yrу'9-<9yrAΓ -Or\hyr2>˓,O.FMDZ$В 'хD9.$:ItBKfZ5В مDy.$:kvǧItǧIt5B 'Z5;kA fZ5栏O栏O栏OB ,l.㳰92> >> :> -2؄<2rd|Pݠl7?5= OMv - OMvSӳݠ{AAl5-fo6[ ~ - ~3B ~, .7ςߠOߠƉ5P7A %5$ܽFn(mp}m^ϢkIq{gn(8&(lmm^kqzgak(lmm^kqzk>3F83`LBk7-_m!m(m(-vPf!m3 gOR|r 瓃O9|2 哃O9\2>'}|GS I6) r9HfAJ6lRdsAZ2lҒds 'G9=2 푃>>I6}|l$}=2RA#ilds 'G9=2栏OrBЂ R(Iؠ>& e e9, , =Onl/1٠^b6xɂ%& n,1٠+1٠x,12>qL6cA$C2Am,C2S9hLƱcwqu61xl,j18iVm[تcc[qlub6Yl3189fUG0,jVm[تc6EEm>3|f,*zL6n-߃]ǖAc`q18j1 -**/?,] Rt)HǃAflR @)Z}jVh)ZS hY\g)Zǧ)AMтZR h9E+-)ZBKтZR y,-kfǧ)Zǧ)Zǧ)Zǧ)Z)kfǧAY7B ʂI|^㼧~k'ZA< GVP?2O -X VPxO>gi\XkqW?hU}*wn]ۧqW]Ak - r\gqWp]e|wYiUhV%ZAf[DAol(Lk3\hkʹq4s~q4s~ q4s~q4sgLD3(fhh m\kʹq4sk>358ָ3Xόck>9ئm$i[?mږOb6mQlSfKӗAJ_?>I rd1HL#Af _O % r3HEp$>m|9g '>8\lj2_N¿? ;)+jh gE9+(Y eEAΊ}|9+/Y hAΊiG rV4H;ZA}| $+IV4㓬h'YѠOϋ!uу~ ]A,+ rV4o {A}SiP?lKöiP?lKg +2 +2 ]u;9 r5A G&ԠdAL"I>2A Rj"e|A .jpGP8I}6 u'_:,i,i,i,ilnY:mQ\3cqFqr6(YƹNr6m>3=|f{̖1f>ǖ]3cϱ{e>yShNsoJ1K[A]S< VPWxu堎4_t_tgix - _ - Oģr/|Bp5+>l [CB xe|Y\g!^pxA=qs/k{kKk+k k붍3k>3$F9Wg(jl\m ^ks]q6xg5c|f]-ϙdtf-sܰmm\$7,]A RQA Q Rpj l%8Jp8g6>/.2>/,4J@:H _p{$ nO铀4 퍃8 \2>Hq@:t6FIOirz:HiAJORzt'iAZ tV 9=x'頏OA>>IO}|$= rz:H+p̍JB+.g I(;ۡZ(;#Z(;['Z(Mu~0y\cYoAmYoAz~ɂA7ۂA7ۂ gтAhA` xpg˄8|r$|LI0 &sp{$|nσ8|\2>q3u|fsywLc\4>Ec\?$;ȁwCB ZPQypc- r[haeSѨ<蟊Fe|YT\gQypEe|Ԩʃ,*Y 8ȩAуZZԢr 腖9G/=r o󅖣y/=|A OsOsOsOsOsOsOsBуZ~PKŃ<R`?~P`{U؃.u`? =[갿 ;ۛBk;ۛxPhMCPg{ٯ緃!y_!qPzԟn筷>וAuFP]y\gspυV}Y\gGpUAVAVӧGiQ=}Z}nOVӧGpUe|V}Y\gGǧGǧGǧGUA>^+A9Y&Ǭ4i6~9fI+ҤYiǬ4iY301-Mڸ8fIǬ4i&m\Ҥ 6&m>3+|fV̬h-ϙ&m:|fZ[fʱefZ<7q_ -r` V9/|gOw1.&eBA. LdY\g]Lpu1e|YsP ӧ]LiS=}Z`"Ϻ2>b ..&.&ȑFs9).&ȹNu1Ab r"Th]Lǧ]Lu1A rPh]LBb/nOv1Av1Av1Av1Av1Av1Av19^(.9A( '9A %Ars(jtPk JA V jTRPxkF^+ZA۪A۪A]hOϥ/K^Z ԗ%/%XP5+,/K^eK,߼>> > AmAς+B+0YP0> ge|V`YQhYpge|VY}i}i}V=}ZnOgӧYi}V=}ZY}\gYpge|V}|Z}|Z}|ZZ}고g=tkmܭ]k5zkn.qv۸[3ό;2֮QO5ڸ[F=Qwkר'jnD|fܭ]qOtg=5wk|fs&5tkmܭ-3nm~^ij rTZn: r'ȵ\rA.FZnz #+LjAȤ? r6|ܟ .'gYgTDHςܟ R1e|ܟ .lpg>xcF))/ r6HyQ˵Aʋ\ \Irm‡ k>\! rm'ڠOʵAk>>)}|R \ r6HC˵\)r-7HDkAJ&\ R21賕Zng+2e|?XyoXiھA /ھA 0ھA 1 r7㓶oЇ mߠAھA}eC_ 'ԗB zA})pPߩ' rO8}{g=aXqPa ~5` c;A}ͲqP_a,k5e|a\ 2>nq8㓆q' cp{a nO4铆1=}0Oe|0.qp7aI8㓆q' cAj0Ryц5mǬalc0+1k5m>3k~|f< ceǬal5mܖ۲c0q[3gfmYڲ65m>9ӆMg co6ǖYk3i[ .UA& r5Yh=W \OJU?6N9O-@TI4,7P e{ZQ16 =7o8s#baMτ k}>[X$م[.A Po)w907P /dŐw90.́qg k`bxs|507W#xs|507W#Ǘšx@ s`BAдF/-ˁˁqa _{XX#50~aݣ/F/F/F/F/F/F/ƅ50~A6DЯ=, k kCǗ"xs|!7W"»irg\}C\WJĞQjs(C 敖Cs(W[zzKϡ`^q9ta-` V V V V !5q ke3 f2d++ŢWօ~{e3 fš`_&X03uO k3> f}va-do20Sj`gLς7W g}9g}}}vݣ/مwg=r]x}vݣ/YjoYjoY/Y/Y/مς.}Qu51gYfwm{|fxm|gٶ>3͖>͂lk}vmcnqm5n3p[o|g>Ybݖg6gnϟk=h-AhAh@ wڃ6Ђ>=m~A`b=xAgփ7Ϭmf=h;`̬jx3+x3+x3+3Ar|f=xs|>9>YϬbf}:h4ڕYڕ&gփv ]}f=3>YAY%gփv ]B}f=hG_̬bf=3>Yz/fփ}|1>ƒzЮ/*;AJpO(ԃv%zbf/fVgփM,n" A{|=>vAvEvev慚v楚vŚvA.>Mb=>}wٮ?q2h-3b|0C~-3b|0C~-#b b|0s|-3b|0s|-3?W(CxL!͘7}D< &j> &7#" }|>>G_Lݣ/&l6x {ń}|>xs|>a9>Ob>>Af 7X\Beؖ1=ZǼ\VjNpYc}fJ>-1=Ke~{Yc^V/c}fȎ";gVXYc}f7\r3{vߴ\ƟZA_ z}^X7ZA_A_ ؼo慵~ _Y jg]xG{a ܠܠ/ św»ρ{n/ހ7W7xs|5p7W7xs|5p}|9p/_0nЯ -kB k>> 5p~aa ܠ_`XX7=rrrrrrrtAnЯ!,k kxsB5p7W7xs|5p}|ߏLls^xw~/3 k*Z9Z>\>\ E^L7jzRM`^{aͷ`_`_`_`_`_ /'>޾^ Q=;o/ y,my¼d+Zl:tzf̢+Z,3Yfzf2sa36g`_ 0Ђ,P}@`?`a___`_ї,{ }y»G_ї7W7W7W7W}|yǗ}|yº?`ݿիXX\0OW >:q1V=pLw^ǤUaz1TtIccz1Y1{,=pLc}fZ3ӻc}f73{X=p,w[#;⎄zG F#ryaޑBQHx\^xܑQHxQHxQH/PبF`WB`WB2~_ rƻ 6Wz_ r oO+x}/9>7ǧ+ب b ZP%m ^(ג6}/kI}/ 6}/ ʅz_ Frabݣ+xaW>>}|~_ }BHQox\$ب# "F_( ^(X6 /k,>>` oب7Pez 2F7ǧ7 /6 /,q JK¼70 /-qF]_{w~w l/^gwl9qwF;Yf}^=xa ?Y^ُg/J[3g/ 6]/^wA0{Yeq ]/9> b¬^q z]/wA0W¬^qF }|~ .]/9> 7ǧwAlԻ ^+.C~ ]/̺wA0^lqF 7ǧwA.>> }|~ƻGwAl{]}~ƻG›ӻ ^xs|z |ߨ ,0il/rxlWz9}\/%;ey1?:g~jZ|1_/%WjwϬޱ>Zj3%qKXY.cyf]6~og}f7`,,*qw,;@ºsK /=~aa9u֝ k sBAPls_X?Jjojo/'~Л>J.̉ % kBšAP&~/,ˉaM5޺z^X?{ݣ/'~/'~/'~/'~/xЫAo k//z_X[j*@Pz0 T`,P=ԃYzo5ԃYz@PfC=5ԃ}|9ԃ}|9ԃ}|9ԃ7WC=xs|5/C9/s;իv0k`6מ__`_`_ knEfs;%v0Kx kno 9> T knfs;vЎbsܾ۬И۬ 6+?~|nf3l}͠c>otm6n3t[|ngs>Ybnߖgs<۷ܾ-,myf1/]^}5?h@_Wvy]^}5?h@_XA_샶` v0v]A}>hW@_v}|>xs|`9>_߯^Ayvya0vyy]^}>h@}|1o>o }Z.Akw}Z}=hi5NЧɃ8Z}=h{/f_lc6f_g߃k}=h{|=>}A}fc`v}fk`}fsw0f_g߃}|1`_̾j=h.^U^U.^Ue^x>}f`}f`}f {0E;X{0E;X{0E;Ͼb=>}Ͼog_g߃j=ԃbQCĐzC5̊YC>RARf/!`Rf/!`Rf!!uoVz0K[mVz0K[mVz0K[m乕^V[j+pYmc^. k+ØoXO˭t7j+ J|ú1߰.t7>Jj+3tϬұ>YncyfV:g[XYncyfVC z'_Xs(|a͡wr;šCA k_'za-/Ov zy^X'ya-煵x^ zyoOjoOjoO/P{#œCA kv/9ڽPkšC>C zW]Xs(]ua͡w s\X$nagw5@p k/i9&>\Aob /Dž>z}\X#Evaم>7W#Ǘ#ǗZ>ق{}f.90.ʁʁʁqa `,3=0Yfz`F0L`Af L=0.̐#!F0C~`㫁qa `F=0{`3fTqa `_`_`__ k`fy|_n kCw"ؿe!!^""Ǘ"7D0 To`쟽!?{C\XŢF0E k`f/^#Ǘ#h.h86+c>0n06+ c>lʰ͆e10ngm6͆136~m}f>0n3q[,myf10n3q[Y b`ܖg<Նxj7Aݠov xj7`̄>\1 Lx*03Aτ> ||&9> τogƒ7c`l_66ă֬AZ}Ce&uКAkb/Ȃ9>_ޝP}}wJ%cA+}oǾ}|1㋱`f#`vf+`c;؇{>!Ğ&6ف>`vfw`v@fwMvOvm};&fb`f`fddw0qMv3dw0qMv3dw0qMvOvb;>Ovo';'Y,j;Ţ&Xo(VUrrmUr*w0Lr*w0Lr *w0`rrWJ3&;'Yj};&ٮj;&}|1{?}tYyc.9o[e5ygf1Mc><-1Y.9ogj岚|f1Yj39oϬ漱>ng97gsXYycyf97gsz[X{a-vn[Z@o(z^X҅9ʁ^r(z]X%war܅5ʁ^rQ9Q9Q9Z.|ss|ﵞ-f.v`7sX/b;ЋZ>\bz05š@S k8N^ִz]XmtaMk`_gYPz=[Xnagzzz30_g`^Y{=zvan``QrKZ@_֒f% LKd{}ZXKKZd]K1dR{3fJ% ̔Kxs|d-% K,0c/Y`&^Ld-% % % % 9Z%ka-Y`^@֒uaU kcc@.̱ zc*0AU`փlW=VٮzZXcՅĘQ,0z/Y`F^֒f% ̦KǗKǗKIZƼ\V+טj^pY\c .a͘O˕kׇjZ|}V1_.kׇ>Zj3kϬV>Y\cyfr5g+XY\cyfrui~i=^zY/ܸ@ k}}07.ڸ@ k. 6./xs|q7Wxs|q7Wxs|q7W{Ӆq^{M 7^{1 n(л1\jzYX ^V#kZ@ k5r5 ,2ЫZ@R k5K-.V#/W#/WYz5PjfRիW'HHFG5Ҁz3HFG5Ҁ>,fa4`ўZ|̰S a0RO-`Z㫩eaM-`Z K=zj3feaM-`_N-`_N-`_N-_M- kj}XXS šZ@=.7k0}000q&`&LL={0X`fda &`3e,P,; wÀzov/w/wЎz}>l-o\<6+clJ16+cl:1ηelkclkclkclkclk>3hF7myfl?_){ʶŞ|lD)c7SnO&֪֔Z| rJ,kA[S!Ěr.kAk)Ⴞ r5r|M9xs|9>_S) r.kAk)Ʋq Z9heЗ@_6xxq0t>^xq >/xxq0{Fixq0Fmxq0F k8hA c8a&'6A>A  ` cH}H89jH89jH89ЇjH89jH89v%`_ bH8C> oχЇ6$>$!!Vpж Vp0fm3kVp0fm3kVp0zm3VVp;?I*#2dT  `Fؚ f `Fؚ @ Oo'}|1A 3<<_1Oŗt1Ⲛ.ƼP\VŘ׉j3bϬt1早/b/S_VŘ_1L}YMc~zϬ.Se>.rff0ֿ/g˙Მ.˙ᲛyWZ kG}GXX;;/􂻰v @/ kG. _Վ_Վ_Վpa܅#^p֎z]X;wz9YX;rv@v kG];#,v[@o kGrGrGXX;w0c`#{Gx{G 3=|t0gG 3f\=@_=@_=거VWz5{0Wzjz3fTӫWzjz3fTӫ>\=>\=>\=>\=9Z=거VWzz,6.aa᷇ 0o`6=lzXXxs|5l\6=l{3Sfaca հ_ `_`_Ƙ,o>68c>ll367c>ll3ac[bf|f|fEj̇mV|f%j[_gƷe1^x-{1^l˿/Q(Ƌ/nb;b76Akm}8hAl?A+z q .A+oW7q|8xs|z q .A+z 1Ak.qК A+~?7 5>$rAO"b9 5P 5P 5,|yt`FKf` 3G֘r0])mL1堍)mL}L9hc cAS@SfI`FDfI`FDfI`FD@DfI`FDfI`FD5}|1I`_L"b9xs|>9>D@D$$r&'6>Id0&6d\L5d\L5bQ,5>9>D^^r0w%3p^r0w%3p^^r|/9xs|㋽`_%?gߞOُ߿ޟ?/o׿ ۟,??_?s_w/w>su]O}^/p:g?//?9cψ~}W>Wz\iz}XڶLx[]aW p{>|<+|uwk}^?ra_3h6ܮl?۟~_Of+WQȷJxjץe Ʌke(vCೊ~ףnɿy +]dz*;eWXեk*\{zA |> ]4(k\ K]8ZuS kϟ TەDϟ|P?>A}}Mӏ"S>KE?>?HXt蟕K`?ƨz=??Hx\QGq;>G?|]$~_yƅk \WtUCuM'g {ןs[?=,Hۿg?]CxL+VYχuT,p3^74_wUp [{Gs[#y?'1O"y?ky=+=ǼEy1-bY5ЛM0MzS)@o 7ЛMa0Mz F ^@/(e$2e2z ^@/`aF^#@k5b0j5zF^#@׈k5zF F ^?: T2z ^@/e20e2z ^@/`du/w1c<_y|,1c<_}5E.\>|s1*Ars9\z.=A僑AȠgd3`dd32 zF0 z=̂fAfAY,R>o~UF=#AȠgd3`dd32 zF=#AȃAȠgd32 zF zFַ AЃAРh4 z=@F =A`c01<*́@s9a0z= F^=@k5`5y 6|b7rz$=@dG2#`D2# Hz$=@d@dG2#xϖk{>y/rc<ˍy[ymڲdc=#@G#|z>/AALz&=@dwoE^^eE89psȁcE89p̳޲zg=гYo0q8z=΁#΁@s7€?:J @ | z=_F=BF=B!AGH#`DH#$"`DH#$ z=B!A!AGHHDS")87q8z=΁@s9qn0q8z=΁@sǹs9qsnq7>_1c|4$b1 c1 c1 {5m԰QF!oސzܨIp&7j= n$QF {56jۨao@lG6jdۨmF瓏#>@O'`ħOV@O+> ~<4nϟ|hV=@A9sΠQoq 4 |X'`'`$$ zB=!@OH'HH'$ i0r9z=瀞s@9ׯ9gY1xL?7b&ѱUcu,rܘeՖUV=@jg5гYm0Y zV8z=#@c@O\/v/v/v~FUj*kl`ۨc'9|ɎL z&=I>ٛVlFhuAG]У%s`s09 z z=F`=XAV+ z` z`=XAJc*=uAG]У.Q$׿9xY1c<y^9xs1c<y<9xYeuU=낞uAϺg]г`G#q0`lAD"qп,8zzs6muP z(P z(=JA҅J}w 0,q8 ꧚uAϺg]г.Y;Y z=낞uAϺuAϺ~1Y z=낞uAϺg]г.Yw0.Y z= 00A z=΂g8_z>F]Yw̳g1ϺcuE\:ts1ϥ".x z=^/6GmgQ[EA}F/AK%rxOeȞ~7Fn~AH zH z=҂iAGZ#-H;` z0=.` z0=J?`ܪ z=wAϻwAϻ].y g̀Aàa0a0<aoax0.y z=wAϻȻ].y z z=wAOZuY$1OcL<.9s瘧1O"}.˅_.˅0AL&9 z=`0#`0#`0AL&s0& z=`0AL`L&􀹰& z=`_.x+5h z4=."J l; z=xAO'H'^/ z~u8 z˜H'^/ z z=xAO~4 z=\OёM?_ֱQ?zb=1@O @O '׾|fuovFg гYc0[?}=뽟y޾>뫟{ޞO `$#Гdz2=OPq|wngo4z=-4z=@<0i 4z=@\\.W@0Я E/zFx46RޘeAG#z|DD\q#G1c<"yD,bٲe2cl0 zpz=\#\@W+`+ 4 ෾#5 z=y@O^'3j2cl0b pz=\@W@Wuqz\z\=W@+$qMߴ7nԯ 3?O5~7Zn{{zN='AωW,r1ωc<'yN89qٳs؏>Uy\V| z=_F=_/Aϗ!AGGH#$9zJzJ=%Q'=%FJ=%AODSiDޗS")zJ z=AAЃºi@GH#$r0"$ z|IEHk9!' z=|>A>AO'gJ'H'SГ)t0)d z2=\?LE2d:t̓'1OcLŰƾ15_}D-\՘'*L Di4Qa0X1}Ƙ ) ӔiJ4a0MiKG=x4a0Mp&8LX$6jۨ QC! zڨij@܍6jڨijΟ6j^=5lسQcyoLQ3FD QF L50mQ?l~>FVsFa簍6jۨ9l氍6jf7jHۨ!m6jHۨ! QCF i5mo#=mԐCuny[nܘ1pcV܍%W+W+@@^:@/Kc0Jzt^:@/K`PnQ u2uz]^W@+וh$7 Fz#Fz#H@o$7W|/_Q-z q,D 4-h[И7etƼy3Mg̛Θ71o:ctE3Mg̛Θ71o:ctƼ,J%zI$ߚ_Mo4oh-[ T껸6껸6껸@F*0: 7oԏXO `:{^z Fu^@nW7ЫmaU7Ы 6ڨzl6z#Ao7B!Fz#Fz#Ao7B!F8F(lz].^.^AE"uq0!Fz#Ao7B!Fz#Fwfmݨoܨo- z F ^iy,䘷1oc&ǼMy69mrhc&ǼMy69mr䘷eU A/BK!R8Rz);6j|&5 5AwMл&] z z^DA/QЋ(Et0(E" z^D^DA/QЋ(E"(E" z" z^DAI`I$u: zN^'AQ'AI$u: zN^'N^'AI$u:9u: zN^'AAo7F#q0#zc1Ao7h7F#z F^-6=l{ؘ1acƼ-6=l{ؘ1acƼy[VU zj^j^@ZW-ЫUk0`T-ЫU zj^j^@Zq%= z@a@a0{= 6= z@a@a0{`T-ЫU 6z6zMަ@oS)mj0m 6zMަ@oS)`)  za0 Fa0^@/L& `t";N4Nz'މ@Dw";`t";Nz}=EGj3f̫͘W16c^mEj3f̫͘W16c^mƼ,&zII87'ozA FA4 F{^@o/ e0 >sr>+,7g\,u :zN^@SשS)u :5 zc1ޘ@oL7՘@oL7&`h7 ЛMb0M&zI$@o7 Л`4 ЛM&zI FI$@o7 ЛoY:zzA@ w;d0:zA\Wy2d;Șw1 ˢy2d;Șw1 cA1zA^P@/( e0 e0 zA^P@/(QP@/(] zA FA^P@/( zAzA^P@/(} ^@o/  z{˸ߠHؠ'l6 = :X z,X z,=KARc)t0b)X z,=K#^dD1c=zب}`6jب}`@l>QF5|Deyz=F=A}>yzz=A}>y?yz?$q;>MzSzS)))@o 7ЛMzS),zS)@o 7ЛMa0MzS)*Fֆ^@@/  d0 z^@@/ (  11E1c;Ʋc1ƼcyXccǘ1c?Ƽy,{`z Fb^1@WW +zb^1@Q1@W +zb^1b^1@W +NQ1@Q1@Whh[-zE"@o E"@o[-zzE"@oA@Ս1Hc6 ?ُg|̟Q?Q?z05z05z05d z=YAO֠'kГJ֠'kГ5d z=Y#YAO֠'kГ5o#vAݠݠnc7 z=vA݃Aݠnc7 z=vF=vA.{Y1">y|,1c<>y|#"vy={c1c=A +AG`#0 z z=AG`#`\SFQпj[W>=AǠc1|<| z>=AǠc`c1Vk,ok>z,B1"ty={C1cE={C1c58z=@c@c>c㻽o9ЃA z z=ȁ@r9P}/|]xno_k/(χ@$1Oc,* ADmAOG`G#0ކѣ%h z=Z-A-AGKУ%h z=Z-#Z{;FϝNs' z z=w;AϝNs'9 z=w;AϝNs.SRC)P:xJ]Iש?oE1Ocf<͎y4,옧1O"͎y4;iv옧1Ocfŗ<uAG]У.Q Gލ#$AɠdC2! zH ]%h4 =!ZAOР'h4 z04  g V=AAOР'h4 =  z=AAOР'h4 z04  z=AAOР'HР'h4  z=AAOЃAOР'h4  z=AF=AAOР'HM7D}]c<^yx=zc~貈c0d㻾7zF^#@Q#@kªk5zF^#F^#@k5zF FF^#@k515zS)@o 7Л`4ЛMzS)@o oD>BWzc1c1GumԏڨյQ?z4G3gKϘ1/=c^zŇyYgKϘ1/=c^zƼyҳ,JϘ nY(R)ЋE "zH^HU 6m/`ۨ_Fفف@ov7;Лfzfzف@ov7;ЛOف@ov7;Лf7fz-fzف@ov7;Лn0fzف@ov7Jw%л`K RzRzԀ^j@/5K R3RzԀ^j@/5Qj@/5~pF/5Qj@/5?{gE5gϘ71o`4>z F@o|7>oa5>z@o|7>zcK@0 jz5^@fWfW3Ы jz5^}޾}޾@o_(XQޯ]1/Qc^ƼDyZ/qc^Ƽ`yZk ֘1/Xc^Ƽ`y, ֘1/Xc^Ƽ`y'Yr$dyWO[6r^Q9`*tLEXfX3]+ =Bkue; B++ B+ B᱾B+ B+ B+ BwB>!ߤ+ B++ B+ 깦YizfYiz izfYizfYi6Qycxs0 =Cspxл-n Bлm5LOkԅucԅuG]Q7??7<߯y7yͼf^|?_umy)μg^3/ŏKq8Ry)μg^3/řǎ8Ry)μg^3/řKqk)^bz)J32 =#C3232 =#C32 =#C1#C32 =#C32 =#RK1R C/K1R C/š{gccޘ?ژ7fzcޘccޘ7fzcޘ7fTXC4: NC4:4: NCo3 1 1 1Cosx ,0 =̆ BoᱽBokx̫jx̫~_y5y^v̫jy5y^s8-a'̉?sϜ3'̉?sHğ9gNğ9?v$̉?sϜ3'̉?sϜ3'Ǯ?t]+>tŇ?<*>tŇCW|]ãCW|]+>tŇGŇCW|]+~xT|]!|4B觫8=B1B 8 8=B 8=B8=B 8=B`x`7)?8&o}4Wx Ѓ$ =Hys̼9f3oc1ys̼9f3o7̛cys̼9f3o7̛ccs̼9f3o7̛c1ys|7G1`xIHЏ*TE=Bϧ)| =Bϧix̧1|̧i4|1fO3ϧi4|y><>v̧i4|y> A2<IAz$IAz$c$IAz$IAz A2<IAz$cs7Gzs7G1<6Gzscs77qzAqzAqz!rsx(> =H =HBЃ$ =HB?1<IA2<W g>gAmqd3o 6ȸ 2n 6ȸ 2nAmqdAmqd3o 6ȸ 2n 6ȸ 2n;!;!;!;!;!;!;!;!;=t))))))):22CCCvy|^!WCvCvCvzNNN}CFCFCF|ȈȈȈȈȈȈ#><"h+8s3́?s;9g9gcG3́?s3́#g9gu<}{?,Bbɋ@z@z@zc C ::B :B1B1B`x ^ =B ==B =B  =B1B`x  ==B =@0<?~?`0y<f;`0y<f3ǎ0y|1:fĞِG.ХtCntCn ]K7t.Х;DXGp^Cp!3!:G>:Cc| 1t>| 1u]Ϲ}*ЅCbBCbBCbB ] qpc|)7vdǎ9fNSn攛騏]:Bg^ y#Bg(%BsȼЙ:GμЙ:Bg^ y3oxd^ y3/tμЙ:BgȼЙ:Bg^ 4S觙B,O^.\~Q:JCGi(Q:JCGi( }(XCkbH#ǯ??Ry^G}VCl svxl s6tbG~(֙ubXg.֙ubXg.֏:E3?Ztg.ݙKwҝtg.ݙU[6t7[n-eGuCa: ]ãCa: ]0tuGuCa: ]pxa: ]O^}>xa: ]pxa: ]0t~b}xct qO>99<3tt9

cGt3G9st3G9st~ΙsA:s3A:s;^Oе:<2tVgoT}EߨoTQ}!MߨhFE7*Q􍊦oT4=d}Eߨ.z.Fu7Q]pmhFE7*Q􍊦oT4}!3!!ߨ Fe7*C2CQʐoT|2!!ߨ Fe7*CQʐo6xok?no6ǟ/G4}숦ih9fih9f4s4M3Gǎh9fih9>s3]Tjȭй:BV [uFR7~n|x$^-BX b[l(QQ~+.tŅ7<*.tŅ¿xMUq+.t ]q+nxZP:BZP:BZP jCmxZP jC-tС:ԆGС:j?غ}Qq;*n抛f+n抛f+cGh3̅7s}( o›f. c|3L|J>k%3rs3tnG!F=oz/^|ïO#xsr]Ρ9t9.|7F7h-ECh ݢãECh ݢ[4t:ud7@ć<6VoqH]C(&ﯴsǎ 9g| 9g| 3L'>s3>s3̱>sc}X9g]:CzX#CzXc=tz?' |xl Bo Boqy3PߨgQ@=Ts#=7Bύs#=7=7Bx6>ͼfzȽ{+zȽ{! ro=zȽ{!Va!Q!Cn\0`B/\0`rzR=zI*zI!'CNT9rRT9oDy,CXvl[m6Vyͼ>2>3C2kx7x7^z zƅqgo= =Bϸzxco[Vzo[[V5<&UIzRT'UI5yCc, =Bc,c, =Bc, =Bc =Bc, =Bclxc, =Bclx{+ B{+{+ B{+ yCK-R B/B/K-R B/K-RK-R B/K-RK-yC`Ǹ?: CԆRñ>v,kl56y}X\،/ͮz^jc^jZ6v̱qy OBdx$> a''ce^WF1{QCo[1V Co[1V[1V Co[qxл-n >xЋ/* Bo* BoᱡBo* Bo* dž Bo* Bo** BoᱡBo* dž ~~A롔q(eJC)P~@K2+8fJRơq(<2C)P82CivC9r =r{=r{v쀙w;`0y̼>v̭?s;Z֟gn[֟?v<C =B[x~[?tn#C|s>t·9?t·9:C|s~x|s>t·9:G·9?<=t.^CQϡ9tc5tб:VCjX:VCjX:VCjXy.%dQ?J)?1c_9gNytؑ3suC?t:y ixpxWN<+'bo^`Gt:CGwݡ{xDwݡ;tt:CGuCq:uG~;v'?Jfcj昚9f15sLS315sLS3111;bj昚9fcj昚9f]1:BT1:B1:GNI:B'Q$ B?X6<(tNI:GNI:B'Q$ D(t $ ]='t|z(t3< $t _|]C.!_a:lBM 6&t_=cG/32s/;BcИ94fCcdg?73? #Pfe@9Pf]:PBJ@ AGn k~~4GڄNi:mBM 6&̀{i:mBHi:mBM 6&tڄN^:3BgI_Ё2<%Q!߄i:mBMȗGf#3~y+#&fccG0 30s0 ;a`9fagf3j;>v11sL|7~W=+hM B7A& tnu S&M0dNLLm2?~fH˙r洜9-gN˙cGZΜ3i9sZΜ3i#-gN˙cWZNi:-Cei:-C7` ݀ãC7` ݀0tnuCЁ:Cb@ 1t 1t ġz@ 1t 9% NyCw>tb ]5t.:O{9:gC{xnCGD3G#g#z戞9g#z3>Bwhg]:Cwߡ{xwסzxuס:t_}=<:t_ס:t_}Cu}CH :C't!ס:\Cy2eCy2]#CwԏC!:nP|;C;"#B;"#B!_е]Boex̓$< =OBϓ$< =OBϓ1OBϓ$< =OBϓ$<$yCT<y̼kf53f]3y̼kf53wǎ]3y̼k>v͓$< =O=2B12B##=2B##{!cd zdGFzd#Cg{l}xwߡ;t|ߡ;t|=<;t|:C1(Q> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 312 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-037.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 323 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 324 0 R/F2 325 0 R/F3 326 0 R>> /ExtGState << >>/ColorSpace << /sRGB 327 0 R >>>> /Length 4100 /Filter /FlateDecode >> stream x[I]Gݿ_qa"I,,c_"l:zk%_`;Ƿz:5t?|y_>?^{9w?߽oȿ)#~z'O=^Do2]Jnb>LEDZ/|_k>?sFTڎ,+Qdi.3ͣp|OUp#71y.#J/!QK: ogG.yٳ^ԁ?o~G6(!b=a=Y`&ỉ8KI>"8WO>7xjDvK/,4_*٤}a"ܯ~Dw !X)V/w/ԉy~Ql7=4):Q/ .SYCcON ?4t~e[ yV.hG'jٌ{e#e,!`q8GBUа#DZn#_L,0{VgLs{П 1?yAV|/~^c(g·3i ƞ.qWl5Her0]7H>6RGh7S?|~b`3)~Y9.WGxv[\қ(wlr-ދAT1X""}TN%,<ƶTc HhʛۣbK~ &`poOTyBJ&pqf2{Q{pO Fs9$0bA'/cvP [ j{N^y ~p$Êsl7ObeS`>76>|2k8qD?|te8J'#Qzc]';^>W 0\(.Ίgr嗀4x8?7%Zq[dq-A8t)A>WX#rpꍃ1)[L9.]m.;v7x:pa:n=\ww2L6e^w'!K7#͠ytW!U9)WA.AcUs MNtVv 㝭 p<%$v @N;/!";;?cһte8~3^.^@&wwn49ޏᐌ݌ŤQ )Lp, t/HG6|j]S&$u<0 0,=dex@n< T+R0a+}P@b<{-ʬğuCDShE%ayOֳQu\=Ρ:ʝt gP1ʣQcCR#1f/F f*frG*|*"dTv"o6߀dk9ԩd}b+9V&QI fYy[y= Vb*`ڿ_uZ].: eV^{`}mF~'n,\Ÿ%YEM guiQl„XNY; [yɮUtyePWҺ -=!s6X_Q+)UK*6,iγX7ba+?9b(b6V^-KBgbCVQI<{}"u 掋HuCb*'V P``No.R{x^rBz<oC+mXH,E~[B{ \Cd7G^ b+w{$pλ`+n!>+Zju2A5"s<%;&zzE*oaŁvܧ̰E^MbB$N6"w0_m{܁v3{X"=.D]V^w21CV; #Ρj}djT"m {Qy }nӮ =.Վ27ȫe01ew ٴV `CYgnpF/}vDjG6%ONEO/^Y4FOO `8K|{!9ML*:z/hud4*`++dkȕ{uFӎ#'kN&ȫF7%iQg;dW]H"QG#Gdj`8{Vdxx9fxA^s |e fO"W1IQ/򺊠W<-e8I`Cm0WRVlֲ8OF fm4O}@1yIuђf|_AALO/JTzx{Z>iDB7"o5rڪ;V+TWaV)`x?L<[olMFk|nLJx|䵈EBfF &m2%J+V1E~`F#Ui^+DUll姷; yj;-10["vGB& ZaZgfUq* #7 "9 .dSQesZ{^H5KfE^KFpҝV~\ hˆlۈ|e#"_Yk5)qϕ Ndc+o%3l_62}ѴL7ޡMUXŪQUll5Cc1FS `kOM7u(y6wx֯;x[ӏ{8 pZdA`+5۵v3w.V^o"g{2Wb}( z0}l/fԸ=yadԝ #qВy!F/B VwZyydc Q3baXFc[#bpR;MʫW+[xʎ_c`}?ǩ/FݬIܤy4届4㱊8NoqPlO"boSb`y|5$DbNMe+"?aa+UG4r\uGʰhՌy]-l_5y&YSnlu|(qQS՚ VDk'_?sV5SbQ_m^s. , OP_7b++GP"7m E'=qH~05AugW;9>&fA"h@`+?f՞{jE^砿Y )&K< p1川%*Pmc+Tw foʹV~vKh3N7 ^~D'޲ZV^Bsāk~joc-oK'G|਒olrYᣋgs𚇆oE[>_uG*ׯ+:^\-$Kqa:=%03_kERE$9ϣB)U3YÝV=; S]SȣbQ jJކ1Bx麟dg: ~ozsw!,{zo L endstream endobj 174 0 obj << /Type /ObjStm /N 100 /First 892 /Length 1360 /Filter /FlateDecode >> stream xZMoFW9˝MC Т4E;le }C2cE@a`q5;_;yIR2)^%\p'e}TcJ*+h V 24H!嬈V9 ~D22_ܰ\BY O G& X\2tV9.䜑  }zRK%<tBgE+z娼=f,_Hy{^Q`坑|d@:0DH%H諮VjX2*8|zN!:!J߁h Ai"Y,@KMFX[T$:$KJDe7_ A%f1/?jR/8%G#6^L'hjO]O_<%uf:YX:8 }N-Y[Lgg:)?CzhUEU =VOmk}>/>T(]z^..ΦGI/r鼂LHFnެ&ՙ#.X /f|q1-nΛY. ^eu ;0f–0b,h R`Kc#؏ax}WU^qKQʷU7/ѯcN>o闔nSve?@째)s*mcSȯWyW*q+%S$nnw׊Ij-nd5Hvy?uNy4am;x|0BSFto5]t-:0aa!Ƃ1'UyȼVyI5[HoI\{:~%sOI@2hG+U~m6nkr'{U~> ~l'ȯ1 1fv퐚?M| e%yMQ~N e}Ҕ}VUݷ D#.1G_F! endstream endobj 330 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 335 0 obj << /Length 633 /Filter /FlateDecode >> stream xڥSn@+3,@PU%EaNuv=s *93sssNT2l⽗(n8O;Ć]?~N0@]W%ZJU:YyM.%&Ke:>|m^eq3*pj!_ek"bf–6]CUfuzFCY(: 1CXibQҗ93atU\{xaMkZUgtɥ=9ҙJ&c04q[|Ľ\2džZcKuG5`8e&0{p|^Jh+hb5t`-4G뇣dBw &0Q1^F?+r bij WRXV梬&eeh긛2LrN>NtL>CƆ)0Ba7 /_HDDU䜧}WCiC;𘂢+|8֚# ]P3џXdK~ti!d+}x_1<)ƺxPT 6o _ĺJL9Lmz _Ac&R *v6E_%^fIqIfaB%DQDF! endstream endobj 313 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-038.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 336 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 337 0 R/F2 338 0 R/F3 339 0 R>> /ExtGState << >>/ColorSpace << /sRGB 340 0 R >>>> /Length 4415 /Filter /FlateDecode >> stream xZK%G.ۛK[di4 #lu0sNDT5 ,ܟϭ̌׉̏/~~g?oRzͧB=c~x_4ioݟxx׿w_=ɱ0YӒjP~|⫟XW5~xˏ/oYϷ۳G9[*!Cu B->z||ZVSu0d9?w~ԙ+Q{`YuWEeobmY4Y|go03LxB$aX#+ jf-?ƃLm?S lP4V׳TIXy%ہMV֞jK;3rk!Q:7ӱ$JfVKZOnm*{<6FmikJ\%ǘ!|mc&wwpyscÒ>-wjnVzU^  F 6v{3ЋC/Q%2±I@14e _A$S$)7|Gz酦uٺK[Uo[/p_)-,sznĠ. l}qmܟ_ ͳw)bNbs(V2pA(wBY]N8s9tԨ":,6E<0&s{6P_llC!1çr3O&QMbi/E6xfV) $$ Ė_DAy̢]7lKC(Ns+}G!\9,رjQ4tY2L'OqzcXplhn oތkf ϽI8T.OԺA{>h8M&H_##SWh`d%6r$:iLN<21mNV [Y]Um룳')p$X)K4:A'K[MbʘK O̕c"Y/BD .{_h0GC{p㕾&Xo6Y`苁T4M@2Fa76Qܹsx̅ UJ$Bqr85S5@QplKց4c } x&udtdbYMm$[~!/7eohXCJ`G|2 e1r ئ%賉Aǘc؜\13Y/hPO D J5KuR~Q?u؂ja7UyKl=wLfϽ&_9͞;, 1UDc@cMb1*=i LOt1i٨j[dñ$`!iC ùF PFaC J^RPw}4Emdm4Ņ/FA^DXz&:6 DRk N;pOk+=0>Y9ޠX;6ڀGylȴG{2>ٖHc#'W}圴1b+a0r6FQd>1 cݳ;;2&&ѨRC6.KN%E:ca2)kC.cbsD_RҎm! ì>=>bG`hVa72А\k>kCcb8vUǎU>*Vp$ò$DEuh}RW*6zQT9*wE\(*6qljePb_1 {c\k1;Zk" ŢREl]$~kot/zA=>Ƿ^FU$U%c茎dF*?c:caR1[HZ0$T+d_ m%ͣs-Gb<`\Ls#}6bYgc u#"?mC6P0,({tp:Yh;X#>☵ BJNjDzc3/ {W+(N0l"ym Ĩ"+[qnyNtW(9ewv| ꝳ34b =ށMbqkra&4 \a%߹+s XѤ3kȦ*sF)־6ܯEԑW`wDE.:eCD`py>WEvG@0hzrWtHQ,a\&هP0рmzs~8€EN( GvUJ`XTb8FZH?@WQuNa؜@azyd[4tjz<;-‹Ȕ0 bw/1x.~$7~U99ͻu|ѼU@\|4}1V$V -=$`y0"J୓`x-[u;Ƭkx_ ~(q\K^%akwW9~=XT25~`(#rcJ B-`$dzG-j`fCrQ9ǑQkg`WͻaU-y ,W($Ff5MzMuyBhI1)I/KV>Qp19202?83v zb7lCqr6!C\/HgA%2l]|+-$,uIM9۱I8#:wr|WP؟yD8XAѤs$p^9޸0x0U<}a|k m N&BsZ!ۮsZ{C#b@W2q^1,i'aA , !=$ae{`_a%ڎ0"wdu{6 \Gm=\]F]CJ6K/ ݼzyagޏ:c]G5Y#lu6:km)/ҰvF !OؾZӁ_5PzNWp2as fTTrZE[ħmvi*|ʛU/NvdsR`w]Oye0"nk۶󆧰`w2HT؟0]r/cxIe;pS%|Kq7T[{Kg.\$@8Nkٮb_n {CL3l;*GHh$yCagcߐN%qC"vK0W rH봼VΧ{IL㞗D*⚟d1u}┰_nq݆dѮڤDG^%zGzøQ3\HU_%D#O0]xDﮝ??/a}xtU`^'܏Xul(VaJ,;$j곚fށHQ1!ao\b'Uvbr^*r$%cU۹mIQ-t^$[޸*(vܯV!a%O^EW??&?l-ת ?R5?6x ]7Fy SF/am/̮/-'bl(tecnF:ZM\?8ڒ.fLtS)`_'݊FBvb> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 331 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-039.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 343 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 344 0 R/F3 345 0 R>> /ExtGState << >>/ColorSpace << /sRGB 346 0 R >>>> /Length 646 /Filter /FlateDecode >> stream xWMk1 ϯб8%]Btz(=t ]+xPB3ޙ7d隘vk<]>Cb :sM-0r@oҼ&}iCRhcَq'f=}FOLM,4JLJp q1뛕94mf87+DԞ^FK(:ף ]CGl+5q%[[Q@ݛt6vmz- \B|^b[E,iMQfMP4D/Akg1FBʍ9AQ1G#l\縳WKDe~Fj=U٪$TKҊq`Tp."Z17.$[)%D+Fڭ)voPաsK+wnD {"ye\Ղy$05{Q9%`ƍm"vh ĚpX cmͱ}ɸ.ViMjuX?zZ(b"guTtO/kZJ1G7/(Q.| 6)8ZA9- +r.u'8GkC"7Y endstream endobj 348 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 353 0 obj << /Length 336 /Filter /FlateDecode >> stream xڕRMO0 W%q,8 @RoZi[NnqcVZV`5XB{_?z9`v+i1ňjyU5:hE>Mnœ,:odYOlI6tԐ9mfHR7Us?r@f"h}$s͉=[65?Cd? \ iFCR6ǵ endstream endobj 332 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-040.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 355 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 356 0 R/F2 357 0 R/F3 358 0 R>> /ExtGState << >>/ColorSpace << /sRGB 359 0 R >>>> /Length 45998 /Filter /FlateDecode >> stream xK56_(&,l x`x 54b+ ّL&wϿ߿_~?<.!u|o2g?O6}FG?|?u|Ͽ|Q1?ڳZԃ1wKS_߹;?Zxy}׿/.ug}l?ms=~7ǧ//.K7??߻j??w~^Y6}of~:?.mZ߽}rjʪk`5{wy÷Sڽoޞs7;nzy|^~jYuSe3W0v)=Dg~3uRz+vq.rͻ{w}hhE{|3{p"ίoX};eQ/_Iw/ߥzY{~/.nPXl+S\s/.Heׯ潷3>_TBWn7\o.o*dmfB_+2nׯ-bwL^|.:@(/,2Djpfr!o$-BͲ*4~=yudu릈U^ZCd͚m|%VU o6kYXŚNT5v3E39+V3bW>pw]o1SRǠDuolj<7.޽u)AR7jȨ_Ư>վP}]׺+f@]Vwٱ/_|>8g+>]\^%,4<wf|tn=\iW3_h|X6o Uwo6,e$`1C XoŢ4|՘oiJQYb+|({}ʚEo6,{ ~!^uũhQu2\g}wQ7Vy^=rbM(}/-PYW:hU8_s.F6.ï+~+B*aZTxy+R-ΈovwEr1ȷfQ3^xa%>GD0vW]C|B~YJP jܛ{5ef31^8D4Ľz%Rx] &bJ:{} pՔ\tRyD,W71"MYPx tVi%`"Ꮎ{W*V*/o^;D>T&b~"XX _fbwSGv}`qt;ļw|.va&{H C ӽ؞CALlWϞ^`A.ALj'{b/{+wa&{O|z qݮS +8stʷCfrb5W*w@?3+WڽKOYwL|’)`Ο8ۊߐHkx!{tbD/zNfA+6I 9’h{1M<8@8ӧ][m{6Lϯx!"\\a~jƙL8j\`4NRWƙ>`sZe-gzr Wܝ8s1b 8xch=gz~Mqd+3y`,3}nbIh1p G'"ZKXJb*%m]` 2ܳ2&՜!95s/Z:`N gzMQ1Kp9&2[~s+Dx ɣ*x;P~}z Y151[~s&dPk=-v8ӧSq"c8w| 6 dùYKϋ53䓲 ]Mx3=G#"j^qyT(`Cgz+LD=XPpQ0A!LϬ.,T/qf%<_?!#B/| y3ELor5WUW\gƙ޾[`ԈIgz;:x8eVD[L`;ELdaDhˢ@Bwƙ>`O>YP/Y3YͯpXc;~nU=*F8l`MS r`!9v/z;/j8D(W˪]qOjIQUc樫& 0,grO$_IXk^~>ҟ-Q9ZELϷ(iIl{b<yw΋]mx^8~'\=1ρUu~n=P#"p7{V+<ŗEϜB9?r>Y0Q[X0ꕛq3ɂA,qOZ03d|f{ONgVL{}ҹy0!8UIJ38׬x*}}OaIEgxdܹU=?5*/z@pu3TS;M(&5T!H8'9Qt9AX!G%zR>Y3A7g"Bkr,޾&jR>yUBzA=O3鏧LϠ gļ2b=`;!޸=#>0ވgT]Ͽz3}:k?1Zslyă%L6XR2 Wƙ# L*fFpG%\:GUu+I;$,(.$Mk~ZZQ "lq7f WB|)z6E4cZIqk"*pONI+a[[8ӛ>eE1.IӦ3J/z6ÕطI*7=Ly`8h(-@q'3Xl lgS>s{)铝bna3f-#Lo?w[T~:ArHgt c9qb!q7 3HD'X{O)6ݙ|f/<󱓗3 sKn']O='y m4f,l/5Q*dfgC֓bJ^/t"}W2ɛpOVw>"%cV3=G7S[M$qY./,Zz_ɗ[3WWd}WUAv2]+ᾐ$4}1 p޾N1 ],?&W/d 5I[PZb J8ӓWa Z8@_I獽I7"u/˺/S|\RϤJA>s*>I=qtnӹE(_*G*V9*pY$n XK/z``/"x R(ꓱ:Ҋ z]9M-@Q3}V)bVm1L,9hɉOKN™>'I ؽzsvNђgF:#Ini gdlhXmX1I48~g8FTGTGdD3*?֞q7Kdc zw/'Lo`Y∝(3_FxFaoFp¥(;]1eC2=GhΪ^9VЪW 4p<`LԂwgz'gZ~EOfS63"˙ ;;e9ǙoQ덢nqmtP48~spw :͡8sF=jqں\9gzLͽ?Ō|`Y3#b?e},6cMϙ|LarO(WsN/hroc`,*3h=0n_ # jQ^vU;zEgzl:LTn /u񐻯D¨gzrLpy']1%%Ȫݒl+\ `vJLp5Ǚޞ;mpe[]|Y_Q>=G~# k>3W#,2O=$9aO,K?(X2a38[f8s*O(=${%B_Md{D/Y[kqgɣd'Ε!{{ {jƙ>sZ8i[QE9mLoV(6r6QzXQ3=ߢSwMS7KWdMoTKG@.vS#FlgMY˙߸=Acb4o,ޓ8/{Lϙ\r1HH)*q ޾&t+>S~>J Gn[ Z Lo2;jF)3,U"Q/WY)R{/zIa2*Fq7yRט5}& ytfOQ~ȅ8s%{Sc2%жG g,O5 'ш 'gzba{,9T W#+j6/D9vC~MF`~ѧY4n/_1y_#VwCȍ;~ѓ?! AP-Fƙ>q{'="T,qCbM,'̓1gz  "dDǁ m+GBi ӯ'W5="~+_3_A'bzQy,k!Nƙ>t.R,zy%LHJ ֖jUk>>Ɉ4#2Ʉ3=Ghy~bE\P7+_ 4-oV4}3} ( gzYR+z${\CENZzyILS/ޞ_y&;y^07M/ez`roM_IBNrTmfp7)h0 +bQ 4(%Z}PL8\Ix:. _a{CDqoqh1>[#3DFW8Ia8:o08L/˝ᮘv;WKBy,΋CLj J-AvnUT\2*L05KOt'(`"NRL9S!{N0mEV|p-eSߛLr7)jPԇLr/4)}h `LļZ X\O q yʣU9 Rsase8K2U]xC™OYA.}]Yp\Ǚ/aHd\Eo|XS^%ӳ:R;3Lg';(*0Ab>J8s odM,s8:B|_C[<9Ueכr$$JT] .!g;L/#TOtk*e q]l0j1G2Le9 Ĩ'G bfؾx\'~i8I\SF*pM0Mn hG#q[&b~waF!/NM p 7>^Vnԓl|C9Q%ISY{kOLQ^S.2Cu΢Nrf ص3=ߢ2ÿ Cc\, Egѭ>"WɶJzef|LokA=`1@3kFDSLٱmQn8'Nz%N%%N8sq3+S4A|m^K +5 gzaKp$4f̾-i̮lG*ZcF@v\(dU;ҫ~ѧW΂EJtg~΂&ΚCqEHz3pԂV^38-ٶ./y]GJ™#,5jTF@Ǚ>Il'Ey+ LJhgG픏{’EoN1q,)ʫB~*}sėܙˀë_i= |Xc=$_ooaEon*#&X++ i_ax6-{`sRgӚ5Oϴ&[t g@T0~s2"fIT(O/<g<$mb͏Uo]yRgzn*s g,yZ>˼**{~ѧ3ɽ3ǽEoR8MoNDUm]bͨ QHrӱ|M݈z s᠔Nr}h FLJI[WjgF+&*#֬k< dI3,2L=kL5ӄ3=G$&I`z^Tq[8~'B4gz o>uQ}CJAǙb0[ΒJg1h/&kؾ`_ISe{sN޲" %2pgzp,Y[u·aǙ>K4vfL4u ;˘Fѯ3=6vXgr5AI`,4g6|hIqt gz>3WOpE- e)鞝5g$ J inLFQ 7Y88i_q7;&֊B+U!=uhfB\e;~bk澻}ͭw$C|X5Kdoj=၌#YD_2aۭST[+5S#Ϋj6V1gt^'D ϋ2b"3)@AJ|elėp#t@g#t8g#i_xk^ZE™MAEb_a4%yg2 T'DSC}rjfGt$9q|"]k"$p3HsRz^EmRTT$ʚ iI <HؚDHx$-LO.+o-&ܥ[]kLlekHYUR%a&NQ|w{;ĉL-dj$&-JžtUUS,W1xTmJq~swX^bK^K=ca&m!l%a&6YKPnÈ0|˾&bIGjni LļZr蒪hMM@N}? dF%0]`&A.13[s1J0j10gͻ䊖$W4>G1Bc9Ѝ$Ko|e::y6]v_\w[o錖$UGSt(Cm|.= "95N™=ƴ gN qjyG[QQKbAb+R;Lo ^WO _ʕqQ]V6W#'L]% U*s#z Vxgz \2Zg1 g5?}Sk:~s_{; hJQ2A}ċt,ztE/zl{-VfB-zɈ3=w`C &\:[p0MԘ7 m&u\c\_Ә*3=A\KK5QŤw81Y gz̏ffL;"f8QU5AM!jbfgzrZeܣ> ԠEBhAԐpϻ{hm$R~N8sxvP@Z8a /zEv|)k ;i3=G4b(;(- Y'͚#vQ𴴈gƙZbI,hUgz[ÞC86"k$8\ef_L~0gixT NhErڄ3qڣ2QILI*GyVuMS ݌(/$ YA04pO+#*JzM^[S9m}ĤĤ]U_3=}Ꞿ c#8~ѧsSFcg9u/ދq퐣NEoXM1Yм?xIozJ5_8~<`/? O2f֌3=W^+ :V|}_I'dZd;Keڄ3qZЪ:0GvT8AK _Q`/dkDjl:DjmH8s MU$O5"pOVCPÑlg"nO81pRS䐒Ω=GuG ʪ> ՛eosH8-Xzh.:o/SNvL]\ :#fk=fo_2B)#5 =Vo>:~U gUU8s9R}M6vYL™#HJI>:|o8sMh6\=38) r*VLISbaL/ۃUCXuqņ~sEiL[6fS} 3=WútZu Xjkj1طr=GhB2^/c2b%/z[ y X1RmK>ږj9 u99+Z$,g>gzZ0vj+P{5pU=Z4dV/:|zzMYK5'gzY!O)!4cln9+6ͨגHA3 +6JE2+y3c3{o|LX"Aga\E~OOMϽYXIG^W`H%/z 2vLUpEgzMWcKwς8ӓG7`̇-gTMB8PEaEǙ &? ${Rzƙ#!1c;cK[P Ro#-?Hs/>n gz$;wB S{b 8ӓð/uD u@xz/z}]Ŋ6Eoo!M {qb=_|Ǚ>ϤU6JF63=G8Мޑ3 әERJ,ęEgX@񔨅y#&jP JGeԄQ$-L}1MNRNgƙoQK{a࿰iz%L{LwĚ{cK3qޢ>8t/zLޤհ{WTkƙ xz^H$ ́?8~uXE#a_h/z';,Nzm<^Ǚ JxP$ϖZ girZﰮ6L[$M[D5aZU~ĩf1s؛9p>YXsmpOҠ64* &3nKOՐīg$I4Y Y Te3(:oZX&Wc^g|Y*;.z_쮍 ;ˢ+Jc×7{hK}™>i^LEtڢ7=w0@JbgS~:0WkN&=3} x\$lg5v͕>݈`<+xv' 5TP*~۩'QtV(oJ[kCFA厥5qMoyN٣mfEM{/DUKi=DDCDp=$MGvZɻee$I%_&ĥO >&/ FˇZU T+T™>=})㗢>™+wDDyp&TL 9dZg ڋZcQ#p qZ<txIU_&w ^i䎻yKӜ6[ 铞>'RʝS)]D1QK>|;ќ绉$9%hhN,3\ݥҤL8[=x Ȱ W"'BOfJ=_H$w0`M# 9f7X*ڡo!ugq-_o8+_aVn8w'2QWO^/z;ְBbohEo3>`y$p LϋUZZ /zΤK`M3=Gz<ڤ~@kycjMqg$Qc=CX a!3$9mnmnm~g\ec~ri9{pU'N~sw32bp5#;<AV[fZ|t*k5*u"Y )̄_\QZڠ׊:q9k~t&'WEo2)5:* &ۋ\;#b脄5 }agM&|ԔamEN֊8Q#=c9Ғ9Lon;,?验M({+39~'y2fdɄ3=ߢ3"[#vWM-vfBEo?<#O Ԏ?|j M8-W<%xvjo"J&p{}7W{5v%#~LsG6dk>?$anz T~Zd*?$4X82`}q":ND^PI8s1 q589Q=s9Fy8')Ts)|RP™ /l38ɦx.Q=}U1>Yccfc5#jYk+q 2pO'W+G鉓py~&#c')LEgjzL@9_ɒ1D ,-9_?xw]C}=Je{ϩR>ޤ QI R(Y $1>V"U"8ӛD*#HUBrMsqO>I&kP_lou/zG^ˌ޽؋RgqZYU.5Tv 䙃*yJA_IG/V&_hf=b#HBgD8OjS gz[X3is3}^QVVU"E,Ɉ34.[^Eo# b_H$Ȇ_S+f_j/d+Nj+λj+N8'[1j؊[mA[q™t DyRY8$OL>GO>/{j祥{™#@AqRApw E@բyjJ8s)ݏ%Y];~'taih OƎ6Eo\NjjXڙ붓qOVV'%STA5L96㴁3}Y3ŷ[ȓp7O1,rF͘S?=L׺imkO[(V>E8yeE*N8SHzk^LϙwJfbyL<`3IelC*IMߡ\ 8ӛ-hfܷ ΋8ӛ4R{7H8gXb Tw7^!7Փ"fN'A/#01eP212&8c>:-ja=Q=[3pؠEk-k5V™#L] ʹp៟#cP7!C ~%-]ڊG`|!S,!Z_O0)~\/eH9~j%9y>q &'Ɔ(-Β=KR(7)M~YrdZMWwQh3S/ΖL_ R]++Oޟ| Q~vGZix!?ؙ(Ԋvr͸#)0GQ1=Y#x_4;GWţW>SgX*3DaR7H0 H Ԩ/sǀѤsB~?ڣzvP sݕi(<4d~ԫB}r!L˪RpKy.m4)XC m7  )LΘddŷc LxfglbR0rvh\[ƸDݙ$ҹT&&L)ƎĿ3=RXc\wv~ sdߏ"Pl KH{s1wQcx-1){['1/K$hMЉbS7*\5ݨgd{UIV-awb9r#/p:6ז@#[ik#m Ip(BۦXL[7V\&2$@*SppoccE~l!֤uÐ m\kzoVqaxkDʄ08 Ì愈wB.bY,'eqa_%ʁ`R *b qRE}f6J=YhULػ xu3 #LX/*+ .lzfMza]CNݪb1YdXI؛b3'kv=QYQVkB:o]X pa!DU LD*?̮LȂ'4򲔘3)r2e%˔ufdv#.;DT.#4Ӥa~Bh0U#o"LiUU=]?ab.oJȃrUbY֒UD?K! i9@VvmhvaP@4j8D͡?sT^Kߥ*ԺpvDwE:RYA`RIQ0):߳a |؊_4<$B̰ W@EB+ 'K偽h#QxHE, vTKLŊI!, k%m:/!z1uq(jLWXͼɺ|N !6lu4\PuJl҆l:y.p\*'1F`<Q4C|V FsG'B.*,-(ܮ.f.qiѨOpDLyIsg̝\q:?W⒚NGU;Y@\X+j` Iĥ!茛t`Х!~}/]_k+LKѕ;WLb(nMR҅ڥ!Kl>8ċ}X$LOJVI!+`.PD< Kq(lW`c[5)uhTxʏ?6~4pԱ:#cJB.EXo/I3!ޥmkҍAnY8hu"|=3GВp6gC e3OOvJ}-gPr1=֤pwS;+hX8?e&s _){փ*80Jy(NCVL %;sP5Ae CqzM@i^ s2k,q|LNm Fm8zm6nSOVj( oP7/MOlCOah fzz \CTlzJxx%.󰘪xɆbЬ~(Xo 35FiǨc cu,Ҵ.o(fL&1̰oc,vMW^xsq߲l֏arA.xv;kǻL_RUT qzOp$?b.7 a{nʙGҶ:[1O^B܁ؤ3]I&>cn$$=喊.!e97棎`xX83=M󱶝Q_w]n+i<_kZF;d4P&RޔjFmg0gQtւ1Ńҳ,;sbUl͸XwO vcY[;  nɶd-̀p +Nn1-owд9IۂfH1{{8͐rZxVa۔ָYnR:Xs6VUSOؚ<!P@hqO{L g6/oi$8wJp$в 3v;@LQ(Q\"QxcO:ט #mbI1$ճOzY n^~Ihʠ gD=ޅXA&QbnS+\Ge\EjzILn` Iq*fsaS|CôJmmq+R?>ōn9uŦW-,7#7-c&N!0ax\X4|Y,sbR&Iv=Ꞟ҅tkRn]C32 H^u X1{ ף&UoWgXZaX鷊~{>Yx@VQ 1u9N(3s: K؜:9mmBܠ98>gKX11.RAudǡ#Ee\]y(3]r$kŹb9Y rFz_~eAݑ\'֠< #9N:ўk}i+ }6bsBt=Nqa>,\Tr:;9bxN-Nra41)*AOs95e uvpŒǸqB!i(Kc;{_{6{8}i?-[Zhmr)6`m1(""ʨrf&k"#CY6YGSǵgUֆ`Z,zr."` 7N~a/yƐTZVXeIae!6>&5fXi'>Vc݌oU xsov`&~?=p/mGp:Es Ɩ w<;NO܅mr,8RU{_g%(xSo(w/Ee:wÖHB%X}bH@B^&K-)(@I}Bqg}b錕ff]Jq/ǖ;1i }R8Ae^i̯8yG(%jV|V@qIil2G6ᬣ91~' dtI_YQĽۗSA >!;(NCaYpئ7螓{Nqs(xV9KT^/Z7P(xaa)ԧ(/^,$g=Iq6D3 g=N%'Ź<+fФhhCL L'4|Zpm?rPe31Ϋx?0GhT麘HFlj2\(2vV{89=#URlaYmZ&۴%^h $Y!Ć26ڑ0n-̫Fé$qf6E"mNHhv^mvbR};'' FV&pYjHBY~аŕQX5Qpqkr ˱ǖ$DMP[ާ4tWZASum麬#"]WE+tLLg!xG v-Η4.p HᓻHsC\ω=,@ϝc9IRR,y3"0Uհmm>/i1dexk ;lv6?pw>l.8,Iƃ*twH !l.%|:YSImjSx|b\۬ʴv:iL1[5"L1[m9'LϏSrc1`4Y9!qIГ=kni 뗤UFx&h!h~=/o)Hz|;Z}4mV+/3KM&Vٷ=~CyNVr?*ݚ.\7 Ku~".\p9V, sX._@ S#RJ`$9~&<*VC!ORiQc<`Y+-%" -S{VEa*ƽSN_9Mxșr GN(OP%nB=fKM6)x2ssH9actKuo!0#Fp[4r8! T '~v={.`t/$\$%)N-G9mym1Mz Ɯ*s*:|`mdUpvz<yl%_I{zp:̺;۵Q, YWu[qKcd*;<(;(Ngg9It!w&ջN$VY OMId)|q~:Xn͟c\`y59&|>&|Li|(Nkӑ$9  &x3P[1(?l Oثa` $ziˆ8iĨx/1Wa#8s PCePӤn܍Wwz\`U|&! ,8yߖm<_o|lcۣ o-HTGp܇+>f>HK˩8AX_ ~<&P4+3lc4)טhR1PƠ-v͋WQ/ZPG6#C1ờ N.֚,Ơ,AWYAuu;벙{Mq'oUoKb1-T8PjVőҽ\׮P}h8(_FIJf': *áXyBbݟAlw ;sH9y}0hzfV¤M̦᤿]TS%9 ?}NgcA鐊=1r߮c}t I$R+=:wWZޱW?*;9B(91u֤1uxcG{~Ӹ#0x,ׁfL6`%m2{m]k5lBԣ> be%ksƑP͔7d>zN[&  I pp|s㜋1d71kKc؎bcg{c÷D)HŤ8oz mрʌm!UL .lf-:*yI=ƞZnޱ[6g;B2"oKxޖB5T˦~| BBZT|\_|vR0 G:GIN> |TɆ]NE XwDF s>ϡm,ӳ­ ۬KasJ\#RFfBg.0Kar k0\X #X)R=EC W=a9$>"փN9$Gy֎~Ul<j![@D $y'k8(OveΘuXðCOgyv[JAAb{'^;Fǖ4j#•e+`!j"YVl]g1hz?Uz¨? >-5+>W(f:B/v"`*6dY*Me*حb>,2,w<8Idhƨɛ*&Eg V32\=' Y#=Geȟ8?9Y *r2+8Yr3N 8R:N*y R ܼqQ# 6XUl;~3ڄ+Pq{GKoF}<[xM[@w0zőmqcD[(NzXF*)DO(1}Ȫ==rX[#g^Iuzt*. k3xzUG7a}ֆA3UʱuYamxmt='U4(jhXj0fԋ_Y횢BE=jx]wTVծ5Dt? ~^  .47̃#2FJ&xq) zA<'g~` #K`ݿSыHF/{^3Wű5 6uk>iY> G>2{`LS8K?So<~i#p a aA0/ld>i z_'en;24sKadnz2ip:c߃bzll}Ȟ9%Z%т߷o0Z5ޟ0tU< Ɵ0?XW3a=:?T[a =^+oc.j]Htѩ`M]Ȃ#jh: 1Zx_Z+`[2?hl߭5pg+zweE/m!npF/0!'7"lsTA!\3o2TBlf*aViHA~t %)h=Ndqi^=[受ݳ, pX<WIOХʼ?a~vߠ0ͽ&ȲN7AV K8Gh5}n$BvRJA)v=i1%&ٟ$p$-:(cRgDeAYOQq&i8= Gw}->&V+҉pj<Ӊ z:ݠ6=# l1fнXAy*w0;% 1acI5g#q@7]S_bQ)N J*;b-/ _i'Ol K`U@TlD1;Cqr{04Ɛ4Sb<'\9_ !j8ZU0Us'WpIȂ\; .jgf\K,˵ܒ}ķޥdSJ|D[ {k+T*E*7{Q,Zέ) Gkޅ(R6UT# K;"tyk3f˜+{1.1acػtr 3=M6UܦCb28c,n*xuh'-ax>!a*"q |M粍Pvw'y߭=t~#C[?Iߥv&tj=POZJ0O؍ܾ0PiJ҅e2ZtxʫQ/AQ<5V1zvR/ASSԖu--]G,Ef(JqRXcE]{g~?ڄ[&+y8 $sD :FQ,=E[[Shr)V3 W!x)[2>SsNVe"v7V d'IA7 %[nKPTw!µ3U\&u)OVnzcwh9-xONu&X`1[F ^a_ufyF[?VȍV֛C{{@EpXO4H'9ϣGZ]Αzx\KW ^ Y4ׯnKAnF|Iv/WߧV߽]7\Ll~rqG4b SGĩ۩ǩ@%Y:ZbsMč`B3ե ^!G.O+1Zmbi-(E@^PfUD8VBB)x)Ĥ,Q̒Cb[ayhnM(<aKƤ?("QzV~D)/NW.R4 *!GRfaI+l{MmX1R %(xGA@<KP clb-fXxF`a#ՂWBcފP"VRTK;P ^!OʂVKG빧9{(]<#ZӖ0/$;{Ԕ{w'ΜX97߇vߟ(H)Fok/޴;9tyF$ =^ixA%ݢX?$? 2A;a/Ȇi"mII.k!ҭ,J) L+Z'xpXJc/XDbmǪUuJų6Z;L9 )y`JNQGҪ%BFF,0~,6› ^0Kä wIͯY,al3 %(xE^B@9b*4) VZaRKP pHLJq%Wجd *U0|MA\b8vKt>]"ƸyV%OBB!爭QBBm${$qmJa }e{%Q܇- jpSܝ*&*_5|Yј6<HTTK<cOc̢,=EC.VOjf TK<Ϟ[Yn[jޅcQD: SBB5(n_jz ֹaSK0"h#}/+AY60ah!K_UX_ye{18l*K1*Dh: 8z{1T}F6}N:"=uXD#WaI#Sέ>uX61f}C"+D{O]&\m6 TCFop/w.{B)/h`ets׹D;xO]o.iC.?X}z׹r|Mk+`eƏ5x\2=u1vᵘOYNth~O]6Fo,1F<"'S y.*li*}4-\ԬxO]6v>~c^gek]gݍe] 3%#>z$:3U};NPrڰ8>(Uf+:t97nT8Vuiq0};ǬUчʚ21gE}Y$**LecLT/oG*N[CM|@~]ii m)}h&qng8oAjPW֋~DsK}Q_<46}EąXxiVB`5v!|v}z1r^<,^5R. x`!mV S0_Corh|T=JF}8 ˰شX7qhHa<^Ldp37vx<9d#!x[nZ ~7ꇾhgFrhT`h"ߐQ-Q`>-YvT*Z :\η3Q2>[d) Gt/omL'[@}xd8[6Tgd$o/}7(-"P fiWnvoY7>[DDjxhEŏ>[Dyhy4=NP ǡ%] ..)FסƣGx{#|?Ӽ6;ky-n0NyCӦV\iԲW\u?nyXq|7-֪󞌆6'v7γ`mZ)cr՗Xk7Jq'k!K;'v~< gPeps<½nedyNl2= sg_evN*1? 1S?-T75~xƮyb "_o @7>2SxoFɻ{hOe^ԇqd4o'm|=Vbb|ђpenC=LoQ_SoͨI / 'i&1(M`O?EXVZF1z,w{ߞ@(Pf3T7XW}̖1|zf,8393g RЇn9s(nS;&Ww*LHϠp9|Nx913^l/Cߑ t8F*qnx{ϴ(>ƞ)OCXly^ؙYv07ej%99boy5͙JhZ% Gs*G,X57}ҡsf( U(]Q(.,x =l2\{,=Y??pB0ܦ3)nS{˗տZ&A^?Ͼ6;ɜҎ@>C)ɼ8nGF(L#Pg<ܚKcpPS{9(8&sH(ƐFT&!C:^5ڊ`hUĂb}p+--NU⿛թ+ī6k }&l;oY?Pu3BjUSAn-Y7Y&%!7=YZݲ@Bcg:-CYWSŠ?C[xmb/-WͦqGo,ø@$'̺l10Y*ovmr/]g]o6)7gO2e#؜=YzuTS|Ti_EUIŹ.{pY޼.b+[ 凕n։B=uHmH 1Nm7Ȥ J~VWb!b%(SM[zIhn EJLuq:j>Zdj W|;xŽ57SiJ*"SQp>K0?"^˒ƊhQbo⡏; >̙աM;>zg_ߝwv>41J2q= :w|oU|+I(~|C-NFA5 pv Q^<}~u՟jnSӅEz6! B,붧6=Y hvU+*3 d7uKrsrNdža/*ehuEC }C|bD Ϸ&dMXY hMgOK*˅#ݼl5JsoYl]4o.Ҡ´MNc(ubu+&r\%qT-FđM*QLqt3 Äle*uv^Ϲ %@UHǧW҇DA疪Au9.*i7螺[t -3|-KpKY9/WV(K. ,:>YE@Xg:;t9 KY UypD8: -VYj-(ZꬒҭPM0X3sVKliQ uC` 14ȍuG έ(\Dj>lb[z]}v˺⫆ q7*f()HM }ۆ샬'CEva(mC"L+n{t4L؂`s~gh\QmNmoPn>Y~s>DXzY Zv'841גR*.8 ؝>g(ZN!!Vn Hp@拢B?u>>}=~񛏡EOv q9.S-M${Icxo>Edư+72G-[l%R_]xF' 'B@GK-Mo6h+/p( P _SǡE;JЁ`-#/;@U `/}_W fp![{ r \gs5:X1P[wo͡C?-! &cJ_?34~2-TZR@ܩ{e-n v `o=`[n>{Y^  {aZ׵Ȍ%j[q/KܡHLϙJ̸ϙCmnmS".K=i,1S_VCߍ1+yb^Dj7d8fcn6EGb1oX-MH y[BbAmX .b*vw~Wxg\V,kψ/Jw/C-d?4S[o״CdfݪCxx֝,̷dȇ e1೔Cߟq, ZBI(ua#7*Ck8<.Ɗ5ڞN}B%[U_PPs[ 5tY)>ԅXX;b_ ~aFخ"l(~x"]8$ٺl 9bn: NNk'S-27d\3(eh8C-Bͮ'EL+,# xxs<<i;BB"op,*C?PG S4zF(~4 )K~yn3סlT^Vd fKAb/Ť|h9tU4aU)&D+W)pDͿ>GKa3ZIMxB~FO}˃2BY['&6_,|{g?h`ruS gJ.5Ҙ\,zˡn>dik8lٓ\+l,V~ޕ4,Eݳ.<1лP ?Bp'ެ~VZgg/bʉ>COxOuGµ=YJ]lbXqa*}at"f5t/ jU]\ecc>Z3j}Fug֡dBMtT[^KEҊ}y袱XV<!/.QSlJ4p_J#n{Ku_ʡ.%7 !_/Rj6y?t#X*qqzob" ~|@@PU:D^m-8 gX_Ί:-^\y"{!`\g}+{1vJ7F =PGha4Ka% Qv"~'Mm,zSe[  8а1tYx;g %Aثb$oᇉvv?ٮ"*2YŏlSvyG|C[Ýb۝b \r8)>׭ɚL@0x{VzۺuwYPZ}Ň֭96ߺ-[o};balW{&˙l6:٬WaX 7?٢9;W^}Rh+[ a^@ 7v[-G0qFQܔggk*)Uyv>[\*Cޑk6hf#{[3[uC3U .WU֢d[;֪#o|S(HEd,p4 bz&8p-AhGwu)XR>p{rtS[4'"6>ktp"|kvT -"4SY*,Jgcb<#FwC= Bs*+r8Z.fC>FϤxv=uiogss<bwIm~=>[DXyo4η?[Uo}w=@*0FT %GPjRo;:7_@ێOZ֜|k\͉ 6Fkdh 2 N}k g}ix W$׬OKEuF0;k/Y& [h8t,! >oG9֪ϸXƟfٚŏg<;nR:Su9_*#|ށ28c\>{QRfg.}@7CGLjo܁g ~3>^ion`aG|G|FVao*s3a. >uN2ulJ|psos<V[t\ |նϾFT;Upf:9ΦVJ-v<#n ?M>o;cwx"z]^^ \I<`i<xg,{bmÌ+.ra2Ê7Uʬ;҉k+Kؾݜ }߹8Lx`ծl Np*YŶsmdSo6U$z[ݡR]{D_]^xd~]bkUAxZ].*w+NA Pj%78 B"aAVJ$$8N}5s"#9~TE)g\:GN.R׃T.cqI#ш:u$l(TGB|#qs$b/?$\O{ihKi^P-Bdu{f̖j;yj*#jX#i5oYyր+ogx^hţ4sq>YWē]Qn:ܧ~kGnZͺ|ad-n#TZ\1O]77-{JL]ht!kkK:t9>p{G!x5ql* AgCVMq_v]h9r ;'WʹvdhQx wcpvqq,w+gęgq(~Z,^`o[8hC7³oKs?,ŏgT6V "*xsơg*륍iFzgH*\}5D^:,*l*gTQUo"t(5$=>Od1C'z-4 1俛0g'$OJ΅f+.Z:6|XKrA|>9 zvEo wo݈b :|"KgWS#LƲ&jXw8>#_䉔^VieD.~l _WCoYQeӾSwM@Q|ys3l`+ξÎ|!GB-~>{HLe8V2ߺ"tpև XKzTMF40U0!ps(aF76Pcu>>OBUu% 'E>tD39^T͉j΄E=[w![uA[w}8 6Qm-ptqFg_;QrOJe"y/YGh9*9&ϷFT+Ӈ'qh/?O2q5yVԽP=~VpZe?-}P-҈ZH5il7~~[^k˦'7+VXlu=" ~`[3ͭ HwΡy;8URdy4to:֢vDs*YߜQHwu2 C3 hE}Y]m)U 5&2 Cі{ayʥ%2 CJyS+ECJG =,˧գ B6pdoȬr(8n&:sazSU6VEO>佅lAw?ܵD7MoyRs馳/.`"}'HbW|nZǏynEfͪ"[4,G@g7ۖg *nߍES!Guu[VQ,(sVe2ndnSz oƕZ^uNZcoD8':ˣ#dL(wDKb>in~sӖs3Y>M-'iD٢QQinđOg(nvٖ~QX0 P:nm{yony>P~(V4,?C>8*; q}v7ZbFYn0u[򼟃jcADV~iӇ-r/PuӇ{8!QD-b]0wiYzӵn]Z݃Ku#=iOy1#Ee^+Qs5>9lUU>ӂ͜qv%}ۇ0T-C7}-P Х &xnTKt`S8/Lay΋ 2bNTGItgo,beG fc26 _8>´hd,d8"#]7}ȳ7LduRn1Љ>,s*jg ԦyLJԼxu,N#ч8vvю<)߂%#[xY8?,` 8Agy(lL|ΐh\ݻk-Ukmt"};z\@D,{ͻ`mlLgyEM]ཬbT93~!T/(/\YܽFEO-'l;G^Nt,]O}6,Qׇ'uDiP˜gtbv,؅|yo څeēT:$oH˭[ۭ->[#dʄM̪S5ꂁϱu.,ϑ&vUziK0֠]LYwgiezt/jʓ/,vzaNo(o5G -l::UFv E7}\#ydIv,~<[Yr v!n],o;P4y!32'zV8aAl!+;rܟ:xӁDgy#xoYjlKc/ǤM7L?c٘ Cx٠Wa$ _ j\. ~8ɬL>>vsCsgYw qVҾ3?>qI $]3&GHbrD&a*׶3q9s'2 5$UP]+k>CIؿgE WkCYf)}M&a7qZήdvz}UE b{N *LfS70>>Z*Ygd6ߟ>!vok%+ҹ6 pٴx[pdzaf&0/F|Lh%rmo&痃iq,)׎[ϓ,ֵ{3"7d:P6C%MXoq&o ]3mZIsac9zhXnr۵n2 0xD1bdA'YXU3֬)M&a~w)udk,߆'n<c GmoS_bMDn z oLԾu>+bQiӇ<׼ƈ`l<) +#:{4EҋUDgGtؒSk<[`>׺%N2 DElz& %߷J1c#Jt3`P!`QEDgy~Ɏ[t7AA6S_m"n> ֫*"Іܥ"$G MG`E8FΌӇ|ڇȌ2}6LMgy>|x0.bAz49kbns>3 fX+go}GDgyR&Of0aq!9yyQ2bӇ<մ>&|ՄZ>C,Fg;:1|1Bൠ ´nsOVR5tw@Y8.t3®)Ä$T}-p7] %7Agy8|j  YG/|8P/C-0KͽaڽfU1>96pc8YAO=`0oC@󗼊`%%L}ɠy@hܼ!lhY- ra16Ү~U*=g}pk Z dJ`oZ>}N)Z2Vn3L=ƚd9>9XxY႟IN>}<%+hUVT0yzyenm%զy&¨܍# M\p?߀jHtdi`?vݴ̃| gq8&lD]?FS21Z"\kT<kO&ADa/;$oOsRwBDgyάU!mTVkxMlp^e5aX2Ӯܺծgfᮗ,|(6jqbN%P1G*ДD\ZbcKK,Y>Ҩl/MiDPqDY:ⱊ⦳<&Ro졇Vݽ,usLƳ}:rug^qW ckӇ|yo2b ߼2Уo2ƻ U'!g@J4D;b-<w;&VtO~T0 /r?n2 U*WJ|\O|-Rpy< Zӧ<߀ydIo؇^2هؼ;%vLb1tO' O6crd,k' /( 9 +Bzq> K'8P,s٩\OQC,,@›L>>c9tn$ M&aE\]زykIXSEcaM&aC3l TW09y'kX*cit(d9h bm!vEĦ8}$UNܦFS뎽&0i1ېڼhţ]{^npE>+, g`h! 3,߮p) @?ׯ1C4$ mC@ 7z^/f>YM@X_Y>Z+">A,Xwѣ .00;ͷ,*W eҞZN>+Q OaY-"";BJs6d%:˳8-ƃ½L<[`%P^rWg`{Mgyɦlh†]b6P 7Kg|Y-T\ur"1߼C-YY6>g7Otg Ht㏅Ftg (uiDsd:˳ʤoMU9X .Y-cѽFtx Mgy.`lq3/>,gGkoG-Dg|d٩9eɩMgyj.6%: j DC@|PVsU}Ӈ<+5ZmNB:Ktg (U 7؊3Lgy@R^}7++_>62Ӈ`?;m3C|rD6}K1@ڽ&quXӇ<,l& $_k'^ӇS}+B&:˻`wL-MzF=PVvUtgm݋čyAgNou~gm b Ve;ǝ>pyU`Ha9`|l /Mܚ<+K1V8yɃ>ƒ<ƫ)Ռ: Jm\83DJ8lwR3ч<(MưfOmMg>XOnexL{vodY W9ZbRYb>.K40r@刌띒>ӎ^}8[M R8>ZѲ1,CW9C.DzRoT쇇Rotw8zJYfO,1$ґxB\NЇ[oHmPw׽#X0'E4|_sL2|Ul,ȜҲZZ#rCku<=R]5N,:;tFM{7^m:˧w&:˧, Vs`=66?פX?CZj\T0T\i0zrS-TV<o Bn"+o:s}X\{}}R40S 8Xn Dgy X1Rzli\#Yo1O:ePg;GeTUYπ]x*atg t)3,.ozBQ8ė[{Tf<[@P(Fؗ}3C# QY@l<Dgy kVU(XMgy` kk`Lgyﰊ; dFʮa\Tg (hX[5/b:o:K[Ҩa^*}ze:s<A.ZfWEԲq49E<[@[2[ѽBDgy 2t Ktg @]MVe.;V,tO3+@4C3+Y-`VE{h12A|&0xLUYٓC>ɡ7"7&z\t#f;Pmە,{Mw9, w2NV[.wMT;҃>>C7. 0.k,T+ *$:l^x7oxn 'n0k+)Қ7ʅy()*wu83DgyWƎ˙5q8l73 z$y_a[cTݪO",]>t&߄v9htg (̀JG l:˳Fxieg36s<  d<Ԡmrdb V#ZZt$ʃ>i@o>xj"hZZtw[`YwIL!,ss! [.dN#zv~р<޲sY8,Ñl2 `R0Xe2Ln?T8(ۺfM'q>lڎ_t̰V-ŧ(@ddSK7r~d=c?D*XWڈTt u VZ$:'hh;Y|> XpjxBWD&tzY_FlЂM&{}œAfah3!l)ʪa#Jt>]kihxl!;YO`g"4,q*pj=4-&QXi\r~}S8+P,Ϸ`[T ;sb⦳<[Y7v|K/PkӇ<Qa{6d5qDsGpha<3Ʊe\ljy0̅),G,JS1`{ #7}Njgw$KS(C_e&Fg:JzKTЇ<{^<9N|VV}.wcuMgy(=t5u¸Co1Y Iqb+U}MgyQ _ ϙi92Y0A|E*C.djoӇ zgXLmSXɿ~sL"#U위pRtϣ(zGu`jT};xx܀<͞ ~;$3q0Qhk,HY6K:y_l~N[,/2|4@6ih\%ч<$BckL^Ag:c^J=YZPMV4]h@d6×\V 0!7p67Mۮ@;o6YO-"Jy?n-I8/>ҩu3"5KHHhF>_B4$hǹ_Ncd :Mͦy{j\cv> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 349 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-041.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 362 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 363 0 R/F3 364 0 R>> /ExtGState << >>/ColorSpace << /sRGB 365 0 R >>>> /Length 15505 /Filter /FlateDecode >> stream xݝKIqW|CDަnKH$0`6e߱"ݔZ6;N曗ȸXQ_?zׯ^߿ۿoR~o߯j?Xo^g?￾՗u [G^G'߲r}|4)]{i>G5'_/{?gc?2W?/?髼~~KaKث17_ze #/Ws\?sit-S_좕z}ikݕ8qRƭ=]رըxӚv>/Zq_:v?QAg~K&љ/c3ӛxCW|K=vpҹϏCUIfkc}8C=ݬ[o~h]ی ŒqGgŒwW*Z#D{'+l#M=MʨRdZX.^{]}Gօ TV(a/˼F>ˮs]ݦ !ܦܗqLD6îkP|z,2.ѕ3,+taʻY؈1?-FGqq#zwl-2t[g-h״|_ԪSWO(rr/u(xMm㖸FNݴQs侻ߎ4trDq.[274[~e,^XϮxOG'md/EN:3na}8rVy2V>\>^]'&&nWE$>[F(kơow[epq`OA'.btD7>Rm?urƐ]μ2\Ժ}Dhva9dDם\:yh3zƣ.SAV]g)f,x]}}Jōk#=E+*qзoVۦ"=geFΒEG}aO$>a G8`^'=Rěeaƒ/3V菪T8 ڜ[$xC+X. 2FbL)FwXb)e D3taWSkzt|8xpF&:uDCc;Oh^tx =ЏxEY:6E't(pU#B)$߆ 5EMQ(3Q;4Jd+\#hRœoYp|iys GW-CArbUH zhF74#D\tҹ;q ˳qYƊ]GJ =*!/ejPjЍS ^TxmǣW7sq\:m"o'y"}H3062O uѷg1Nl:u9zزҾwÊZHKIэ48^XF'26VojC~B c# -'*ƩBV5 h(iu7^O}Au1QQp2VQnbLx~fxH_ywY5t6ʟ!LTe`@ZGe_7#>vMk#[p(5N`5R|c YZ0}qj(rO]_t7|zƥ p.MDNۆڕ%0v]o,mַl,ѷL6?S=6cv?^ҧ΂+0 tXv Fo'b6q|X:CtE%h0T};Øel,(5r?^-W,$=BQ®7-XytX*s"^ !^"c~Z#`X2X\o=ߙ7Ԇ+iThsFNJYʥmע;:S"7[ruG1+QCV7|T1b4]u}0HeI UM"`r$yU`8B}\eEC2̻Qjdvݹ4Sg]XQTWV+P¾T)Yofd(PބQ8:t~&IUTy Aߍ$OEgQʻ w¶%ʖhDP䂅9th/뮮P%1`(8a OcBG=RQƢ$ϣ<$!ѱM-U7&i4DL/.6'H-ert fNEoDB |*]Np $ ##. DN,xsS'&d0Da# 6fHcr!m` .Q*ldϋ{-X\,Y33ox7IaY=6 Y F;Ui5(C rA5A5"DR݇3^FRJ2X*5n$^'q]`}P̈GLnD'yRg%s){eqc?,(}o" I\)4 C Mi!(iZPɛ&M=Z >hGYG7 |#`u3_/]Mow`]tHMWfe LeuWulXɬ-s5X^}^F_YFg{AYG!/0\'y-hHn(#qq}t`8zPսqp@a в̈|M aU +Јwh~FG0aH麁VH 8IbEw+HY+9hЅwЕ=iL@+SWAZu^STªfc<>c(M8]a#+$̏a:c Mƌ#q@ϭ'FtY0^HPċ5{Ҹȇc [hug؇>tL~vւ [!̇& sˮ}ăRx蔒ꓠ:Zk~2>ZBʤx1؆L  8c"EYD #=w3Oqϊ9ҝk7F=]ՄV~G\Hmމ4..(/O%xt㺅8rog.P#$ꦒ̠So'?RV"IqH8֮%90P=2^Hx)$,lƍ.A{ʉቕ3ȸr4Tz qU]7Ӳ)NjMФlĖ]OIZ̆@7[Ɏl|Ⱥ5nQrX:cf#ԔM₨S7Nڸq:: sh^a%VI? 2ʈZ"kL|Y-Ɓ~kBӐIҸc'cA,V刔`Z(f5F#!4qt](F=Malt!$14 mOc#b*Ygx ~7t<2@dY{=AK׳a 5u6܆Lg~X: ɷ\y)mh;?q EJ.+: 2G7+0SSo9PjԤo\7 K8=j`V%pnbNFd@!n񛾓UI ߕ 1YY.{u}Ҙ.)hQemD83;> [`): Rw2kPOĽ/RHӦuAuqyR5{awf۝<+!eE24r LBD%ta<Ava)tD6};;6ik8Z|!Nn Y"FZV 狫f F)D0td%S辆Oڱ٩*W~6]%5uN'`W``Q[,c2u)oWC@q®p*Q{4 >.2#z^fJd̚lZ2R3GÚq#'z~*P (+4O^rR3(>̇UȢv7Bfx'xq6[nV=q/G Lb(3r'.0*޿tO̓q^G _oO= $cDC $vmZQ?Bqu3üת<ԑyby]yCE *`2Yc➑MRee7IH 3/p`k ŭV}0G,:t+>WѾ# 5$2Y"j7q֦D3YWMDHʔݐ;i\ۈQmmq5N4y<37ء = D^8}&({B*g `>0wZԸ^f,쾯bu5e_ +Noz:Q〹)^y]ZU7hhĀX͟B@e)%$ׅ] ox_4-lvy#Zy^$3};*.0PEyX7&GDp*A+~*R `jX,U%TTZ=ۈ7L\0a^6dqlutidb>RBk!=SQӪLuc)liQ pAVn,p++(`=몿i/O|7~A oq L7G^߲r=Nm%_g0f@(jO7?_D?_o߾䧾,?R_?~ƿ_Fv[ݏU_g{6,h@S 1&XC"İXr!fD!-5Լ{F:={8z!iV>6ktVdL)* ҐK(a5汮{F1,1,$.T"$ >7n"ƣ.ցKgXAE;%tZt,MF? d/ XIʾ19{BE1 {80q-N.틳 ?χ+i?nzg6w,9:fit流wUXݸ1sM}#\aOmן}`v]]Ph3t2`M> 4 > 7<֍stXȇ]3%ש#)Ko@d} JL,T%=;#JYE1մ;ZʂN٢BO$:w>㢿#U¾za ȫ>g>[HY ÔUj}i26N2L(T>F7TZ7 )H.A K[*f%btlF2"Xc@(,E\-űQ JmQv`ކ>fh!.8'F4:Ҵl$t*.AGj6{E!= r};z3Fl\=O9FNJg|_8muG_I=KF|\P֬BLP"j&1!qHitX]|{ 4r߸V'&1}(L#,g%ǶiFFh.2#'"%ª9ZDG.pdn7n'yth)p788& ~Φ>;Y"EOLe뚼 kǾ*N}W8v=%Df==X' !|IR!d [ ' ecZr"Kl$<{FZ6=Pjء]qrǏ䨤qRtF"):'_:R.q?uxxx6QF,2t?5xr-;ByQafx!ՅSLljBsp$?'%|1fhd1'\MřOvK;E^ۆy%̨-AQ=Z&k?[KK uh/:S4ϕa]t#2?>pfY [ib<9i7+)=&<6Z !q9q>pQ0 xvJH1߉`2g>LUZ62X77MU3iXGAi1pч; U/('`R|&a>m%3 dF,+DQrCR'Rk8N~t1;Ϯ.D8f&4:I"τ@Vf0[m%QOq|߸V^vi$'>-΄^7Bhl6Q*qs9r-jqQ!P'IqqA֡~WY !Zg7 ENf.X= مWFFWyu(!ؙw>0þ]v^}C/M{"\.N*)00:ڸJbgJn"%^8zRɜoݸGFVo_5iU}Gp3A"Z` 4QF6:vP۴1Ƭ ɆX<݂6NBK˼#S]ܴXQJ+&o+b1rk {GG9tLY= zaK'4@EN,ާ/##vҪڀ]:f^P8#3ki̚ff 1~eqʸJab$LiZ¶1kJgL71| 3iq'm{z@x3B1SGUҸRAB.*i'm{?ܥe2-yQ6Ue>3\i<8>ޔsin" eGu4'6w™ŭzfE@h*%^%gVs* @#>Eѳ1ԋ`6iD;>z'Xѵg?yGϣ!~^ȴǀ\Pd–3NfݸB3B]3)mAEf„DJLמNnetӂ煅:cHl3GG-iҔuN|&+%Y,aؽS(쾍=(}3-C@͗N~*ϥuNʾbefKBX]!=6x3i4-͆Q|5m?bzIrA&̶c|KF"9̺8a%e#GEJ<,\H'@[&ㄖ+ghѺcZn#E-8.žݲĠ=ƽHϛ]omrWN*p$~f0d퐇ڴPz&1I(qQ8 2^M$|k3M@%h Ǖ+o2SU*tF(wth͈'ikߙIu> !] rэٶ}{ǯۿܻwGl-(9~u|4@e\HwxϠY?U"oϯ jS(J]YS{ E];j]aIRAk.6$]Z\wd|ydZ )%Zu6B%5=Dt+EHTɆfJUn(0Y])ᱻ%{FqFEl&~~%L x[9-~>T\DzI_ `]OÑ)9wI On+bjguGJn#>l:i!t[G=iuqu6UA9bt")hQuk8q-!j$VPulB8ukjeDU8wj+L*rkҡ+]辩bĵTvw[om2DaI f586)9S7 Td |Sj嫅8m 3.g9(&ZWH"qR~m}u n =TEWUA"4YD23`CGFgOfۭZz;aUPUwiy 1ѱMxD"Bts<&oԐJÙY, oh'LdDDAp8ln@Ck=u{k1Iɪ!XFOjcpkxs[IߖTIMlۭhA9@h6NTheU'Dkmf\<ψă$e15&xFVwwT ߪ3}.2L[olՌBTD%59u4/3\Cj{o<+4mB&}ehLgoPso8xB䑺=8.Ĉ^L(" ~h.qlqY[ qt{&Ou,J!MMZ/|ؑ }q4yu32x@4;6%pt4 MPl m2|B&d!wﻘ SiiDzGHQW^آT,̓rjtq Uy G:#oJ7ne/PU(Ntb(Ja}e\i%ILOgu<)EXi@11PڴƲtwfٕ!ZԤc0v,‚Ke,qmz]3'\ڱ:h|!i2辭h7<"r*\#B4 E v,1;YRVm.:\iiQlAz7.VxG6s`:q<.Jѡ K<[!1#ɮO5mZV@" fDIn1 'cVL+ I\4AH(.'xzcF$c3FtQ%Dqcwڄ:aY:5!WƲ^ _Ⱦƭe]fJxvT8J8FEs- GZAjƸ]YK1Թ@vָim2i` N C v.M?E;үVp7_}XSeT+QF&q(8c(9;~{1j'[r~#OO*^mR7t #F=&p wA;6, "GWUGntMls2k:fa=ИZq{x،f1&;H';tX2G1Ȭ^lǥ+r; 6 QE å}ΐPTȗUsKM9WFۻhX!rfBiulι텑KU^9fs{4+d1A}Y[dӑAiQ~QN|/1<=+OCH39F8[9Ui 1gS 7.ϼZ|($0g~|!W w<F$rM$6Vj B:j5+NWɆҎ!2%feN:7Q^w[r]J6cHQl*l"z-##K+vC1-tL>cYDBYG+mPqgNy5 يj|f^"7}0s)pǂ<:]z[X!VVZ0VI(6YsH!dL˨ Td;,Wl kp+lWn>ҚlPzCR/YL7Xf9i۴n9TTzK< =A y%"?vG5u#t{zS eP0*iwJ)-V%z6”t3Sy7Wdg-htZw.,Q7VwGFN9QB]EztCs7Y+ҩymlԅنۉOV6^-*R=I8+K^'CQ'Cdvy 'e.S&Xѡ͎]@*lJqRxU=-}U 9T2j+mO!5qoٰ3Ku iUQIƵ,fAA93F_؈( JiE:ȳjaڨeM쥬T%ӣqJ ìFyai!@67nDzttlPDNlO `ٖ|&|5'00~>YF5aaT'Kmf fM6%;5 xS&/"uɫwDiR6쬖kS'rk̯&`lw"S Zz2Ah\t ی 6"m{!boyGaO|7љ#hm&Y,]g ϕуFm1(l-봩'" tČw!+ ZRQrZjP@yE썫-fYH!9%0) D{V JWUp;:w5Zwȍb(F҆aP4 ,,fA_R##  c ~j Cf{`7oJ"`P&rmZ 6pUkFqCX[rš퉪+z56iD^eB薬F;,Ȋָ9KIf`M4u撮Qg"nAkU!?Y>|n53sVm-,Kږ !9թ\3JS"Rd!9>p_iԢ$ ח+'8=f,/%ҝjX$8 5RM@ "/n,OYD؉f@k([c^n0ֱiaT #A@F֨}W/@j(pm';5,դ}]u*C,I>_tZ:YtI̱5Gqi"? endstream endobj 367 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 372 0 obj << /Length 923 /Filter /FlateDecode >> stream xڝV[o0~ϯ@}2R@:mR^I}ؖi(!$9LJKRZU`\Oz& 1I3XOM8N3ڐ|N;ڽĞi4lUIf7{ڗFh`ٷT8T ѱS?02W(X~3% E-K39PQh9 >&9wpkci[R{Lw~RaĮ]ws;ZT[De@!pA (m7.鐗|#[:[s aG[T< z(шrӔrY M> /ExtGState << >>/ColorSpace << /sRGB 378 0 R >>>> /Length 8168 /Filter /FlateDecode >> stream x]ˎ$qWޔ~l` `K5; S݂Ob#L&lj㷏_~g~W7?=%z_Gï>_?___"/_ҌVi>s~|Zyξ./KHz~owv簨V3gxVΤO92;jy6eG+g@?>V3ǰ!o .mvu^S̻] w~=׷>uޛOO1U؞'ϩf˵xfmsj /,鯶"c O3aWٞ9W}[[hْ}7qy븅wQ%7)%o>FRS^e|Hٟg^[ 3=SێޞTvQzlNxaRXy{R[=>4=qi諹նްD?~j^ҚO椪q4g^uFU{ꌥ.(Ɲl`\0u jR)oj/ث`e3njj8?XvoA*sbj bF[y1ԦjO/ߎmq(SójǩjT[|Hk&Um@)vP6sMO|QeXzb"t`oeoFe Z|Pȥg75 rV\GZi ?]d(]ϙv~=׷kxuo5rh2lҵgT6n[5xgYШ(3([Te+r/ D oDJe/371*`8J[=lc~7pL#o*h.TAM{ol1Un dG'bϦߛbbrWOLyScx}cZ.C~w۫mQqP PRH%1Rh0>$wFe0+Oݏ0${ĺ̶K#h;p/sʕ!T4M5i0uR:M\FeۈXU5')0*P'03b8x0e6Wl*!JKw FdTEP3dzFeKrTQV٦̠)_ @(4ˆ\&pabHou3;n#M :6`T6IhbvIT!ےVҰ XkidfUXSG<|E])Ϧ}Mbh.I4CEcs¥e`q0UY'[Im MÜM-.BgfP]zJ&8;k^Įa:N'dRmCEc$Yȩ و'#|h)NQ>4sh(PDIEo -2"6"Jj"5Nb. $>`iF2J#T+eE ZBFA:PW ʦqʩ0&f% Ae=UT;:1w{ Q("꠺` ^c6Q |{)3)oa-*XsPJJ~\G(fh-I1ىJm}iS{ dQI6i927 #0>nsҬdn`jYDZj`kwVޕ t$U-h/jnũbܖ"jT ٗ"K*b<4jaxnL pm plR@,TvK~RLjy>S '@ZWX1dBY_5"6z*;h.} ~ReRT $_@8M , GTM(fNU2Uw]' TmA| (~=lu}hf=52Mja0CD*^4<{谖(մέ iT:>ccm*fقUTfףkϹj5pIRbsE$#5e#S :R=,r=(2uN[7lĩ'Eq|Pcؐ*A쒄;-:;FA* ,v*'4\fROT6nӵ6OzˍtUb/ 3"H׏qMlOml|Bl妉q &0E vjatFtKb`֙j!ઌlg`$;^D LQnSu[|QN2@ڱPF4[{1p53p"gǤpi'lQo1-wm=ƫ,mϤtt/RLҹ#Kؚ1姫&yPg֭{Hf; :m=ʋ^:$Hph Ӌ p(Vk*` {p!LWqF"; Cg?caS5ҵu}6)EY!:ُ˻8iSقYlZ !6izzޏO55aK'o=ԚDC-h7GT"Fxmn:Z2]U # eeSe\t@/]Өfn;tgx}EoJ8Ձ]5E1> ="2HI sy$,Ϋ;,)yLyq1!{D'p*O]˚K8J CrE8r/)$9I7#vykO]}s=/Dbi шkߡNI\,02[<c-'H#8nd}!+=Na!{v7/dїַ2Ri56L>d&|FBV]у0wjK%KG#U35m\ &QKd#UK5h[D9VHTO;Bs9},l6"QM>طD&Đf oǨj&4B$ i6t>bJ (mrOBl^A [s .Un̢wt#HVEHT=Y=ѥkQrKSadBA4+jAArFq@ oKhW o{! nіG;w) *d@g! YA QFu{biΚK5ȋw$X!ܚGBþ$`V:pbǷEV"F>tt=cҞ?u|'=BnOL+L"frfsث"zZl zǼlYČez~(ɮlcQ]%W@eݍa ½|łktLZ-莚W'7ܽpMwsSl`{u2"H0>pZ|;L]Wsp.>:jhW: jCT ԋwXujkdVw|#{& ԗ +/#Wk_';mnW^qyn^6lS*o[5Xz҈<3{o -ѐ{2P: rZ=("h彵CG}/ׁVPK/?uJA62"xWZ^߯qqc5=(VO܏D_x:xj^pݶ_}_Zm/^RX!fFd`i0V~?^y1⑮= q+Ocrv[S>}{׸V˸O(t Se [XDY(yq"AEbޮYtu"6MfwY)>AgR xO\AȜ@8lEk)ѳ )L@M,ӱnIQPk "0?7cL]M;a-j] \5S*pU4=/DN=R,F}ϝFKwܛ3/AiU=i´wnS7 g^ Tɴ6>` L uGB>iƂIlHS#0E6gZ@LV}$jgaYjSGo" 굉Pz"UQ7q\q#0DTn!\ 4 pElx~OL}'~ D]yEn{?J=qP> =YD7ЗLb@. JFWcn|,{8t*` l:% Yrx ' Ҽ10RĽ7'r$0T{B m[$'; 0M.oCȸQs/LX؛~(SD`V" `{p\ 9o ລk#kʕٓgk|k,eӀ+;`PI b: 2Xm!ܨt 2~j(Ax_#O*к{BVmb{F -*;'cl:׫kUa(4\gnW^h/v4O{^v!/)qP7 L<Ӣ] Z%{5% #!+ow.<ddNwdI"lJ,@r/=QܡUz$B4%13EA$rXF U0PxDa&/{,^ݟXd㖍4KGU'c\Жͽs<<{Q uСGcEDx_ 0iKu*w)Pi(IboDX8OUo5Zr;;mTF Z̦t.mSE 2.oRfPX;1A ]MT 7˅u$3S/8ɵ".gJSC5u_IjjEޫUkpxO;);@P>$?W{D0R8yRzm]#$߈S)>-x:C*gμXaV9#(cV9FHq/ք@#龯!̾sj-;1Ӡ9jWUUq{+~na @Ear1V`cgN1b?ZpF;)fq'p'M̠['۫ >B5c,w\6[dj5I:&-CơHm+FPwjط?)}݋G{늿{׸V˸ߏH Y} G&J=*/<5bߛ6ӌT4PeU,xdx/D[o0n%O>uE҃(^<XZ=zjj^Y]š~DK-J"b!OrIJ @s]0mnV $ΰQ88 g !5|J nh*En_[t-J pQrM(9"5uȺi%t~ @H=ӥj\ 0f }m(+pM{;^&{DB,[LdAffmy8Mţơ}=2;,Gu'E$zj?'dNމP%Vu@]e"> X-@ts(Zln3T6Ӽf7j;闣oBN]1#w%;@A(MnH5.:2~l7 yяE"cjݞDk8Mˑ~/'&j᝛' H"C? vEȳEOs*j N]9g1!t./=UD^ o"Y ,Ǔnx Ju=)˗=:[qʔ g;0UB#ÆB%"H'Qi8Q?fRv?){tұ&Ieg (zir NL*wpN-os e, i|?%05MG~hMePJN-ƘޮVW'\B/{m;ϩ!۩\liEi7Dt/ zp,M㝂[''m!!嘘 Fw-c=ZNJ<zG=ު"Ω'sdKpֿ(psp1E(NڗSr/) i R( gJʅs|mF1QZ]K~K j*h*5P&*򁾙1 E,S D#?g׭[o vYRTSK/kCI'IpID&N]:$Ë:% iV;)*WP&BVxX#`u7&&ñ H Y|=r 4^zm{7tgdq|H_k+O=]HL@3E,~klSZrk6rهdž5Uʒ8A3n hOLo<> =KPveݟ p"v_y CvJkW8~mP\\wA}AFn)шt+oƿG)q+OZOo[9w=Oセ_Zm/~?M.3SY]>u4dUUiA]؅t~6@D hQK,P{^#CwzTar"twk ͭmB:I5qO׹o iGX/#ZC;4UeeѲbK! Y@\=M ke"bgŭTO'ߍzJSjzfRw>p%y_t~'5Z}&+uqKZ+< OUt]ٌ\Afj76bra~z}QT[?+q@* S=<{_/Oo|d[#{xA_Ln_S](ɿDDaݗݞ; endstream endobj 380 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 368 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-043.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 381 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 382 0 R/F2 383 0 R/F3 384 0 R>> /ExtGState << >>/ColorSpace << /sRGB 385 0 R >>>> /Length 21138 /Filter /FlateDecode >> stream xɮ-Kv8_0w%T>@BBKAU23YD yfM'}?o7/o?ӏyPO_"y~i}>?x}_O_PqG}YW|!C~wyuOy>G_[jW)?F__}lHbB&?sѾZ?`&L{%Z5_~xWʟ5wk+ZUwFλWߙ5{w&}>i؟O^)};v_۾Zߙg~_;?{OkJy&9ʊVz6̵}{}ܿi{;#oc߻n4gﮓ.}o<Ϩ_~{+S9Df5+֟>[wO얔 (7>|b}|g3q{}P}0m/vMҟ(u ;Mk s×ŐhyXol ܾAرe.cas4A۟XLۣnΖ,uS֒Xjtڷo|c^=͐7^^zƖ|Pwc@Z V-o,y! !cN$oje<_e@ha,N }{ۃ-UnҶw޼֐#AMR*s!lV:$ P} (w$Y٬ K{-~|&-=TzPK\2}So}r $joy|Oe-u'ͱ~s-b eboݰiy o;ƤRl5-= #ya s_~wӿ%=vM7jiAXSL! KsUO~(и}A"Ce0g[!|-,pӟsǓqyJ #FN &ߚ qAE`ED,,/ l=ca& 6֜ c|+d'?`$'*8k׺㈂{[4o+ ₀iUly,1m֙ľw'tMs7Vf=)K%œ | . :~o ̽F51y|~s`V|bR'䎭HiU6+> V`U oI&*̮ *H犭9*Ƽ"E{xqz8{VKx)g%7RK F ߌX!aqJp~#@]OVjWl!_P.G.gߓ~EAc+/{%(]'F_KG֕N͇oI!iJI?$c^WKZ0ye{kyvМ8gЯ3_πnÖ(ljJGp(o"]3:e?g2gy`M{ { 6۴AĺvPƇ-c6 Wtj-b W#g rxBY4`LC^:x>csL o( /f, K 0`%5结o)M2&>0[KTaXg6 &(nƦ5EdCz*ȳG Ne朆Fse %V|C$ϋpCBdR={Ni.X+OɈY$3r`Dx@^Ai&0LK HAih21ߒ(.isX¯s)$XӘBI $ 3)ȡ{v24ih) O0 8snQ.9s3X W؃%A[˒O;yoNyY|5l \.o Y{)d8oY |Лp>,'_9y 6Gvuca\@u%ucǼSx.OAk)xr\ m1r&aKҐYV0wJ9Q=-+ y61a# *Oac|$}㷃â2\ )]asDx*M-д&_Qm TI(8AsJq`~G2`FU#ZJ3:-g<3 .)"m3GGG/܊i͍@ VEb* [5ɦO [B9**%<@Y-}rƈ&` ),-a7 )8n}'ª l^7Gߟ@?>#(R[8T`C(Ș1L~%t&#g~.P2@MOذOxTc^ANxWQv+ǶIk=& S9=ܡlL5N9!q{l]:2T°uitqlqy6^lR:au<21G[a8l}⽇g ;c'X8oxX0ҽ2-6KJ~C?_cTm'(a%2-Ɛx˰v3=1")Gx?( AǕX;yI!V$c?cG-AeIzѶBW"3xe(Gz¼_zyN+{l&̯ cvk=?s/a%RVq-y£-1馸Tr>Z%8iK<^ \CYϊn=#Aa J;ӕPfxtAVka1H$<8 2֞ *dy5cֳ IJck`⼉DDaNgEQtMa/> `hI U#9Q.!. t 懘W0#Pcs >i [JL[(Q)¶L@(:(]'~!l<ɒ5>APG^p-fHMvMׄyEMJ!_T<'+" 4ZYԼ i?ch F_p/bK_ /_W#[>Z#0G6-+Wj Ү?j!둤&˘&hQB0mR6oh5-؇Z4񆱥-FQsS}V)|GeKl9Fz"2$Ed!¶]l(H07r6%>e}eS"g'Χ2nlrd\xۉ"i|=;ȫcN[{*Dûuwa#Уy<Ox/¼"LHn絶aJ0MɅr6̏ Me8h|;8D,F_r2-72;O&)$Ⱦn<^R>[A([yϭI^'~Wxۭĕ%[nRز*q=GC^-_vK}8y2DayE(ni)x;{Iq;>!G1"c :T"~ 2 EĢ>W\k! _VpF[gfB~C-TѥKS] l񩮈-Y 7JАr8 ؐ]WN; en=cDA*#m' 385fcLH4јq,|d0+i񌒍ɐsD3%RoiѷvlFlW~([70ag(ζ#GD%x8NUq\#q>l-1/Im)a^~|}CZUSr4z\}L磋b)+m1uϑxIOې61 CCWg,Ϗ `./VuTEsȢ"^Cl%#B6LI L=x0'i{/яA&ctg *|*E]z,݌E9k]QɪèQ"YmEtplӘo)aAnN~e+ZvXw0CU5EHEY|)QƩزzӤG`p)pʶD6EӟVIjHӫW WQ*Thmt@2,ٙ1~G~~]<|UruU_ NaOczaK|ɢID0jh2jSh@^H<^$3 {TH_TۖbRNt"0Le ;t},[U(r)c-"h/`~5g!JI 6[! iDBOvnT(\}mPk-6!ݎ}Sr4ˆFf֚aU.U i;&nGJ&A_!Qv1a/*{R9ne o-|/ S\ o1;Fyrlˍ6I&wkKt(uMFc|_*XC2B#=)ensOi.%NJշ "멞oz]WXp߲ߋ2$ЭMaIhƤc8ZbRk]zpQs@}*rS$LeOȖ&|OʶFË#ꔠkw]X,-B :W"cD*Jٌ\2jt6 F~)pwGiy0im|O4Q1,.<d?d$e A?2A0 ϘsL{X9MDޓt} VU\/D[^>QFv_"6M{yETAn]:L+;̩lK[WSsʝU,1 3aٙ pO+ 2=nOc,7N%)zʳxpdVr#z.L~ _~ `0RΠwP17U 6zD rؒ2ET _%yq=Enc0GֆV8Bb5Bʝ \-Uʸk'`c>ptT7\#s\FP}x.YgepϮSRNXUo OviVH [҇ܳpMN'_Ĉ OQI߅9Jd Erv>CJ|R^StJ{QZڏ"jnRpQͷ<\];UlYMؼy.kℙySv/ewOĕǸ^ BiE_ip{iD(ѕs06YMYT*1:|HT h1 EVMޑlvLUVIfyQ,d& P.؂D6o {T19]U<0k/u\I;D]9.ʵ'WvWRg#[ۃSsƖT@O%+/i':Y*VLbS<5۪`k-#7+>A2kI¼b2˼8KvmO&G>+C9Aad}W;$ CLcwnoE|j|S>ӌlj91[j8,8"+" -6:J @VDŰ./ k+Խ5E4u '/rAu-"RFl^#Oкl̉)E{dP7nW7voiw)G~$Œ$X G9:Y#[vd4Ruɧ= tUᘥX|V,Abk1G"DQy@MيW7ݔ dT8_T>{9Y;˵}ddԗ\L`u;ӐH7Lɶ~zWF֊ s ~Ѹ }RĻ._L Y3-"<sd9b0DöȴVItb(%J>^y>Fz}IŒY9ْtx2#N13(DO(l02Giu('l<\(|jA)ƽ^Uyã $|E!H˘W0,y$??Ng) p@BZSP1zEFmaLb^e"/C8'51fjbĭfV2rTSͬr_THYOW0&)p:jQ14WyZc qUVkc檭ͩ2޾FU>U-)Bݯ,LqLB6OZ>2TaWh-WBF'=)D=uBޣuL%Ixi!w LA}t  %JeICD>tł^h<{UIтh# i̧bE 5 v(]La!~ߤdIǴ2W,ht |9>) Bkb z|5ŚWd1`?B<,y /xi@#X)a^*hG, "ljMla!? >PLS09‚kj1{t[09ùkk۾>EzTDz-)ee3>ft_JRT<LWe 8TZo)07U2ꇐ63~ Oq22_8!X9L4HG"+8>tbcH.{8=L}f!o*+mEo~ioFGJ2@1{zBm+UK{3Ԥlzey,ҽ]R^qAeIWvEC/J2#CBB1N]mK33Dp=6U4)RH R.p=֢ⱅR/ 9B)JPZv,0)<ڍS"lYH#=*3O$-DJ'AR|B o!ٮ="x e G.ODVR ׵fqt~dNĄ9ɺ>f\NxrRᨺi]Dge)xHL`e zZi(Yעg,-!1+BʨQV ǪPkNcGb9.)f1HBo{"6!fPPyjTfq|vf#`FHe.a^AD%0>E. 7Dԃ웬(ca^l|` )D3N Nafj׎"+J7S'9F{蓣0YM 赃*?qje_t#W!g5Uz$wEͪC`5]iW]g\O`E1a`-c,v`|)"kha=&ncgD-Ϻ8tI쏜4Y8Jf.|ꎊۃIKYU*SY !s) X' ;h0,̞)yaϔc #x'AٿwϢL矧'ЭHGg v̫Anl#ruh♎3{& F Μ*=Tx0D{r;7K{$'D/Gԭ8 [E/`pSY- ,WMK3݇\̊`\Wn6WMRla;K R(< ԔSqnjA#rb<Ƽ:?XxZбR Qo)g9RVv>g"zFM㣇8;9;5gnfyd\hp yd\;KbGJ1*Kiw4? B>OF<-+8_Y*GO&{bLr5^(=7sINk[xm(Q7 3tA;$+'k0,7=f*JIiy3Cc5wafQR;)k:4&]ʤ+a(kuw'Tt7 [kttf^a r+ ADvg" veKX֕`ە-G(LQ[kGdeN:YX!)Jd3yTkΩbj$zȼtştBy VYF*<]"KX%(n.<]uXxQggeW ~ 8>Ui82#}ˎ[QٴRS~v+={^Zc5>gf_DkRDe7UkqCh~b[6 =~K3o| vezGe3G@5ʷ>4)hD*Zǣ09Pl&CVĜdS#Em ƥ$Uy1Pff&1>YXKCzd@D1N'4-ڗI_@-l%YsorINedI] NFakWB!3:M5zJøWX?wFz!&)! h9n8q7:+pNjP[X' 1O;USm TLёNYt7W7綟.By]h_i8Țg]⩅]dLw7@=!w9WrPn>&ڭ4Xu{ʃ$@[WlaS^R-w1HYWW\EZ%g--[F6"5˵lNf?nLi& +g8+7=v]V~tg u[P)y_5`cZ(Bߴ2[;>MkQw.{^ꀯn r7+sjySGU%Q]͖YLLHHpxe=ٰ)E V?2ۡ2.!E$y=8㑂Aںٮ1̮;e\ƵF|2i+ë /k3@S4ь--W/k6X}^z穼祳cayn74泯^}(pVq;gЉ%.6cs~'l+CN\YUZ)J71[Uc/]bm>&M<5Yy`'ՁEx#Vz 0 Ř kt"IA2$N*dx:bNC+_"?*lagx&#FŲk9-\gWg 'iٳztC2nY˻W3"0>R_Ή`˕cq0[VGCq:=roZ8 ;82¨uyCaZ.nxhr -yjay &#> ѣjē!WsN+ָc/xb> )̂JXr tb>#HSeC|WDrdAryET){gU5!9 [v@yy7Dam#U祲v̥\qd֙꽮o<DʍȪd|:K2N,kԎA:ƿ]=bKB>GŽy”Àl]R֒z?uꏈE3eEYb{x^zc-Ms3}n_{dSa-lI(4W֭vzӧYv8/ue׎cFyK["OC c !S8(-d1LKغi#Fvp']ƅ=#g9<I!8P܇oa(.FzG^r@kz{>rEb0DQZ?"'v"+BY'7hC sTyu.-~2ؿrJ +DZEaB$LUQotejO霒(bm-ċtas 쩱 qtB^SޘO=,Uo!bGS!jU ^|}сב:/"]G- b>CMly 0gA?i!ryili?ra/?Jl%3-`՘W,7kϷd`CQ {$93v Ƽb1R _}gf©KwNMPm3㜝x%OYZ8`Qds]D8P$w8lϸ0V G"?F: !V-f2N'B:H5|]Yv-L3Hnki!H؊La^1Y+~XMsB7f:c7]j*͒=(7՟=3:2S3<٣[Gwjc'f(>EpH aƊ&[6fyDx~nY3L1~"xtE/["* HņnFSN/zM'dюE(F B=Jo6E-`7/-opiiC҆'L$,%8YO ! $lӟ]B'7x>ل@^ny!ż plΪ8]'=+}UW'0j\iM}צ,c^1"}b) 5ZP6f-/o'.1/{ (*SKg nKteXPǡbFp8` V1tVB88&16_ =LA$45Hd]7[`\S G# @a#=dua;2T}U@|<)Q'RekP;)BpO]%R}c37!NҁA3^i, IP;vA> ڐ R9F -l@q+8N,*3yȽ!fEQo@g.rw%h=$,l.q( o=*7djb{D0[fh0#'<̄u&-K;H봇@4( ؠYP| _f=qAg'-8N bC]]_.x~$M00<b%ittA^c}adxHnM7p(T/amlֿ*VoiݸK[xft-DHy_0r/`b,g!' ׵pDcU [fAWapr|f% (DLKD/0rn,'{۪19]TՉZF >%4$쟬9^w݌\ *$ijKU[p^h5uTG?+}ڴp]R\(-IC`8ݧ-8,Rx, ksc)J$Ne\M'xВp;N v):MЗJl3B{7ډ٨e@h`|%*#F2U1uyD,x(#[=ԓpf+(̊ 9SUso9[:fw#iMwt^n+''vz|ՙm-ѷXkv|}x_*oZG_uWH?i1W_$=E84eW2_]L2wwy2w_)jFN4j [Q#A,vԉ343 deZ_rV-k6@.) H9,wl4xD f1Xeqa vp#Wiy)#y)ʹGQizrdDs,>QsȊ2.vnUAXCUUA8_ڌ@.tF\0i)+̸"Zu󲦱zbWS<#G:p6)E6ggT7 gMƑ#1]Hq<2e^83Wv^4郙`q`>[j¹ئY9-ae9a^.xH0d@^9V~uGEu5_z  @=E_l-^؃ 3dž3dž =*9im>kLGSZS.V9:q' /7ԑqљ][gϻ Ġ䕑V:&)])N–0" 1愿pmb|63*Mpnf~} o|pz=CwK̻`+sAx˱lT 1XF3(  3WNI̎n霗Xd8)L#bV7ZИUpěW Q2 7C_@s1|x}oI[1E8oQ)s-kvvx'<#F (NBg;J 9JlS|.a3,c=m'OشL'yld:_tB-]`׍>T*u$7PK%~ۑu^=/(òe^O_%8%,GrLӮk#X{lŽ\_352-9[59疸='#ØW#m&qB)/E0gRB1K$Ŷ(W~t-ҕ"Y\,-GWds:sBy#I]Y#PӃ%i!ʇmhJ57efvqqDL4sDwSCcLDaNW7FRd s*H3G % 4N¯XIq9sE|v\79~ho_K'2|+UakQѠѠtcjDsGe q-ḥK7CxDe#)%eR`(8=xScƯ=È*4xm{ap$q? Fiu㣭UkQ %G@0`>l`|&DH9[ fj8L+ yXh9>PKW)ܭ~GĦdeD㫱Z.EUhvvgYO) Y8ga.3D+APǿ*Do??~<ߐ>_ZA_K"#y ehs}CVO(Ͽw`5O|)*#}a?R?[ۂ1%a{W9?_m endstream endobj 387 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 392 0 obj << /Length 572 /Filter /FlateDecode >> stream xڵUQo0~ϯ*xN$&'!knZӔ5sn´v4}Jl|Ep6IS&D&Y1g&I,Cb?wQlHKDh?[=bA"6dBB,fFXM;;o$*1B|}ҝ^ޣyJX<ѯ3,^:M^]Ջ.iXq8vkþBW9eXؖ0$Ђ ѥ ExA;vQ c{ʫ묝F ;‰pNȃ4C1fl:G5$~brk\۳ݺ"PsvT#JJw4nũl]q]ZwPuzC(FNЍ$SFSjI vKΡn}]!ۄG|A\oQ=QJf\~~2?Dtw׃KԺ_2>~H3ЏS/xz endstream endobj 369 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-044.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 393 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F3 394 0 R>> /ExtGState << >>/ColorSpace << /sRGB 395 0 R >>>> /Length 113605 /Filter /FlateDecode >> stream xlI&M [ dZ6«630-nUe291?G_~/o_?G?u]~s?ǿQ~o}/sE/-?6x^zZao?UpƏ8g<\?>Ӯ~>ߒxY~.|6sW?Kq}plKxCσǃϲgן_~>OGL \p.…x~?;Im='qGO?K X[)%|0x'ۿC۞~*\ T-BhQ)w-x+w>1 \_/)K~=w?r!~YpK`X {"\^|8•7_s4(]8O`Z}z*|ߖyp{s?ݚg}fz}Cl?ӚܽskwU^!n^Lz3|tgޑ ׳Vg[~l-?ӄs=j\/}h}Ot|m۬o_?)h)f?o\h=>8#Ƈqi?8#ƛs~c<|z߯ۗy̿gzOY6xyqg5lO!+ňbn g=baoiH[Gh;Ly3_]׃!wW?{wʛ)'G=8fs}+G빡gcy P$fߡpʯ,.~_E<'ᶷw}vDž@;W~~9?3u^%JN{}?#3?Mw'DN|b/OÀq{<'!?b~΂w?azV_~?ny3^W~C#x+?K. \GMɻ;%o`nn>x=̇ԟq<P|p?)f'|?NcNO:}~HR{=Y\?p6s ]bC?xlsڣx{XXk} o81q~;;y>a9XnO7P> 8)ۈ=@}y,[vfؒmgyqF`~yc!ϯlfNy`G`mOSQi<&zG}!Xn?멯xc}З|Iũ@?Qr|ӃS_:6Էεt`WG3>Q' Ĝ_>mI}1~)Du؞aVڟ>J} 4?nO>k{QگN ĺN{H|HO~-9G]pzl/{^`uXF϶=o&_Nn:;;wuJ7Wc>B)'ր3Q`yp}q|XhOp7`)¹>kq{a}hAh~:wG]s.h؏˖'Xk8%>/[/w8c\=UlOIg% 'hkP>xY=IM_Ly`SPg5[۝/|?fo}~c,7߷=ka+F{h:ު||}z~}?Q>0q밧w7p///nox~Z{`c /(ܿcA/‡aAF7p8wozzoK'7kWȻb{Qί?+ʯoI;sXꃋ\<* M X  z?ZxW]A [dž7=v_md$|ר'hԿ71%^<܁5*DьI=v}K6=A<y!/[0b`Xd,U'Ԯ_㦡X8<)'0Ombz2T`tG?Poyx玺Ops}O?71G'~(?N\)_v)?҂ڍ0#p֐Ty{v㨚߳㙴SI iM,w ` mGP=-XϣS>i艧Ny!1Jo ۃ:cc#H<0,Y sw.szR4Sb|?1sRI'ԣ\Uߟio-f4?-Ԑ[ 1ݾŻScO ,S>bq#uύ5ޅi>o#uby$8?-y=0QxIJ'l^GbQ -qq1|?`&V{CG|x. #zz<8~Hzpq#c|[/-hAM\(߸#'p5nx&Vۀo OGI(Ꟊ#'- %a- G/=?=rg[ `o5!_h_׹0Pԭ?-ܯV~;XZW[-Ge\9cxÈ#ǃ?oyolo֧s?G33nJO'ibm`8΅EgE$wqmI0EsgUne1ѹv 6x'y`,G6?xy}nB2ԅF=N uQ5 [훜1M'B0N8![s|DRy"9D"DYG+hp>?|F߄qp h \]t }"`#쓜<|ߍ)OPoj;Vw( : i|1cd;m}qLk7Plq.ؑpP=4 |aB9~9OT>ZucĨ <^'-w`bҢEaZlZde4"N~^ߚzjgtLhiꡄa'{%tی18PU2Wțp%V }c>ڀiӞfΰE;-M1WF |⵿w{m寮.mqszq}*>[LA Op)i_pe m)?Fs;>61i'FqkO{'S#F;v2fKgϥ3,|3u;ʊ'ؔ0̚V.F)mF n N|;jiWGQBmE9JXG+<\"3>j3*`J@bMcڸw#l{yh̃ތٟ2wPp 1L<>&-=BOUV_y%Fb~} V6SiX{N2{NQϜۓR疜sSV>]:h#D,9sU%q7HMbd3W8t$Fګw X;gSOל{80"8SoFZpxy84ٟ ߟkr I}#{p?_b_L_3T>ih'EB^ot>97weZ~&-SI]hw$^73=e@t{jA[+`;M?~nnF1Tܒ)a;MKOǖFO,Gvs[^;crSdm یSDjǖ[YD_T8rܳ>Wu;{v)FO۪߈}Iq[Ӷ6]Ww`XWYgu=eC;ɋ7ƷNt qu|HxwYTd:n#1R;>;R8>U޿{[21/*!V [J]d: -(\d\bj߅7ã0b"V"C#^< =~]I;J򛒕]b`7=JIIQ=szctSr:c06t$yCDY&ڷ,a/Y_7=\Cc>o-.}0qӢ1+"nHFQ14m}zQfY?ԗ̍Lqʶu$7mJ6(ǭ+v,I<἟"i`<]3цN K8:8:ꣶ<͙7Ҷ"9#sZy ޯv h~L/svgTfuGqץ"?H0Frw3e3c~y=+#q FOgAa~[8V)C k%_)q.)'G}+ QP_[QŰ~W"y7OFڿSYw3iq$OΟ};?*#Sa;㝶(SљZݑZ._h_Μ! :D~H㸒iY3/5?G鶊Wey=Sֿ8gap2C}=˶_'O=Ow}22)5ց 4<&;FnXG`.su!'W>\܃1|8#u\awJf L]oXrlplL /f7kWc'`fT[revtdߜ-io}:cZ.לMmk.Z;ޘq0;5v2Ф_:ܴeϜMv5P_X3ޓ:G8XWVl=f\5o,~wcz(=}l=8٭p6k׳!`gԏe0M^HX c]Slo]#ǧ_OW mf`W:x"kGK%Ǐ3@b ĒRF4{ҒlY>/_.=Eu;9ݒ͏k+RZhȗ3Сؾl'?CD -vɌx@0xFC(w90X֐x  ~b?4'>yu i޾>jpzp<"vS6v=|1{yG݃o^|xvmaxg&Vl.!s3[[s#1#q#,>ٹYvpPO܍>O׫+2iiO?(˙Izֻ[h|;Cp~D)|xt(%? 9Jϝ pE^/ӳxpXg`kbA7ޭW^8,[o(94ytK~FYo|y2%^;Tb/xÁ>:7.x:'x cq\b%\|h5b?i_Χ@~R\G$_p+0}(1?͘X!V7?}n.xvo0a'=nx3B9?v\_;^p}GuFiN1vȇed{ϛ |:{TG &BDm!q)jꞨy|ddFU>X\HOyX`yQ,x3{Ge{beg[8Z~H4Y-'cgCb4ѾhwE>Lݝ;?] FvGW?y.K("|? n6b|mSxS8ډ1*`L|d]y8sdi&#班)_~_>OE?#>8cTKj0G?LyvσW[](0y$Ѿ׏?ʫ/no>?,ʟ~[u^O׾T`ʏ{H}[•j7_0^0,E#"!?;)|kQ^X8Ǽ2\ayv>J\w¡!UC,?$IƅQ'nW^eyFK]WAS,Ok\ [`Mׯ_8s_}㍿@{L._y~o?AS[ַkgѣv~+ xr\G~n>|5|zurƧ"/n~xg@&΢,ܿǦ>Oj{F(vt>8#_oomľ>p>3|Gްqw޶_Q^n3.(.>8Y*WRpЈ# W$&ˍc]FILJ}BaYQAm -PznX엯|p/QmG(X j'/ROs| E7Z~$H5#M'/2&_8b {C+f䫛Ff{㨇eP1ߌK*bƦbbcvS}}!JU Ƥ A2Y u*.}b_|78Qe` Ff3w~/18{Q.T  gIT?F=`pmPq`p`{C0E@s/bByت`OuKn7U5qhU0ȩBe,[$:ҧfy0}ʪf,2תFo\J˰ɈocXh fć,3zFbof`٭,)ilW <fo >|?0?-J3֛ʃ(ZU"c?g#漈 ['dzd &bAT1+*4X98ۊɧIa#!X'0z^]ocayAQ9~` ޏFbq8=Sx{?t;X8&2")\K9x}1]:0/y˫>,ry~s>?Ta{+9o0fMy۞Uf dܮq>0&מȸ9._MV*-f-#s[}>Yd\_}^{&6286~__FF3mO~߃q'kWVַ3f:۷0B6ٲBI5c5G>" shcAۏ3BNۗAD=6W~l¼_~31 x }Οp :_|jN׆ga9|@EwIؿ1q=? Cqxɾ' /&.>H+$O}u|*yc1nWGF?__?뿀YPX7O"}1wB?z~c}Xs/ _OO}/d| C&~^Q_~i~r}{1?F2w\oL0GbbE#_Hi=f0YocB9/JulM@s)r>7-4~|[;RE.?1?]~ u+8+W17k𼤜Ɗh@9/yOtW~U*'GɼPfg)#<^~䯛x0 7LF0J:iCZ(YoyAn yG>ugB폱|??X"dx!}DĺN{yzWUJztU(Q^'b>{xTuXagoUŧİo*y8/[vj)PA|*"v/W V1r7_-d[\Vw"-h#yTn;Ƴzl'Y%!y5y7+b{r1'wTO+L;Si=Ve/O?(lo7Ea7[IG뵟w:aOZ0I؂dn)Gצ&{[㵭a3VOyެL&ON?7VOs]YCi BGb쏽|?ߙ<5uYê#+诃*hfXĜ__1('V}wNV;~ߜoɅ|3UeL82~8i1>l/ Yhワ/زt3>a;߀a33õ>@˟@9ok=8.}Y·aEJtagxG, f|r4!'b.d<졝t1⅜/XAM\>o]yx#WA&_A:0r9d_aX? 1w(]o#qc|?߻<}kb|.YQ>χ9{|?Ǎ(כ홞 :^B`G'Luٖ'_oq JΏ||䉿gQ~l|g\yB޻>3ߗ66r3y/>ẎYB1}̿]a1zyAN5O|9K#g| 33tc?GS7/L4|>;Rr3~w:t3a;_3èO*ㅗ=s/S_o~gerfEw~)1?f}Qߓ!%}ꉤ|j 3^-K[|V<T/6,?kϷAϓ|U 8-C||Д׭99S_y^||Cig#n78O Kgj̧>?u1Gy' }3?;3ncjEqy;?sz|Go{oS)?IlgZ\_e3o̷I $X8χ)7|OVTt)M'ͧozd?`;wYj?z= ֛]}n}lȃ7??q|g{||Ƌo؏˓kH mK^uw ?*Ձ?-N\[WΟ+/XoW>ɟzd $ɧ!ô{;wݟ ϯDJ'GbG50bx=B _Q%syอs8J~7O~y}!u9$3-2qXԽ#|@|&G!:?V1O|ՇIa~*pq}x9^ C|.o's=[ X'?Nw%_z^oO&WXo6:yZxg=+G6:LG'Gu =_`Tڕ:_Ց{cҳípfP`1["y=a&IIG nyEG`jME*x $c&MNT$',4#gY0ug*R߫h2^8r{2rfIϷ$U123u+c2]u#S:Auªv"8f1Way)3Qj7&+WG2Wו׳ S _~%Ozq.Q=r6NbNpr4[k6ձ𕱈/87;W*\F_̥R9ڜVm.(չ'QŹsM\M\a8ePuesTd,P5u*E)Auo.N \u9(W`fI]obs ֪XQB G:[$Fcq>8!yg+C\Isk])#+\UuY&WƾZq,fM۾m0|4F$+@Z*(={ysFWfUW]Xn3a&ތ.Z_+QzՌpm9BÚu.`xz=T>TY_d VkH֯s0[2$7[gm 6gjoAoɺJZ t@u xOb69b({Ug3%nu>23?We^T]淒¡G}<0dUw,&t>/9Dao>S"iw3GX+Qv.A՜2q2{}pκRH; ^f`iܷ=3u"rNl,U8'x0&MMpb9B9-7A1VO<|d{ڹ-ތc:F:cfaVqYE( EuƝd;GQuY`ٔ`~U^)Mb\Go¼=*_q߬:YG=8Žv?8Hĕ- y{f\Zy뎋 8~ޝPʃn6x+5=w5n\;GxS\頮g;Njb:\9n:i(8UHoK l&,_KW7]_G7Xr qXqH|pzc\au|^U/!Z+ŝr8Μ=xN#e}OT̛3SPu<+|5)A.5scV9!h4BbΐafF,Xᚤ-EsLkPkW9Wq+X/&uycWo՜S.񉕻%޽o\C>83nrjoݓX%e+[xG&+\S76qv|~P~_[8~RsOVv.-,:7370" oDm))8uR/ƛ3̪E3]HSkF솉*zNgFͮ15(KtC[#z•kM:^Y52WP.ÝW2B8"F`F_v^wqV.rnыx}kr^x n-1~9qyC?s;b\jQ. s>˒%^Nh91girƧaj1@}9` Lj쒔(<[jU +W`T{zXan8I8 [Ө2XaǚnkSZF&e!)\Qӎ_v/ .zJ(\9Ǎ;_ҍW?U8$|裘r k<-5ЄxW']&\pU7l#v1ڏvQ&]~ol YuĈ|ל&s 2ktue$}F>Yb1f$F搪 ]!agǁ+ Gy1SIi7SOFfڍ4SJj̩98_Hxz²|_Hϣ]BU25k$K!#_ Ҋ}SK^IP2=@̨ܠ~xsx #r+:3NP f?GQefFXm‹A<ѣ[q3|w2T #+]43!hv:03ޠ3#wdp\[8C302DuL\I{F('B*W.}Axf4b_ƒ3vy|*qGyuW^V8,m]Xz [g *J۟ShKw־8 DjA &ē+^>oboM((8,w WI}P9[Q,#YG<^z>2J6畩[Cʬ5|#=of-bVfU-Y·j,t%(}M3.a|<ǁO͓nE]-B~wjxW~_βzb+3/r F8^ն55hxC/F^Q[;'QdP7.DI>:޵qqgm(N#ӝc;5Νm+:9lTSv̅Ht;l3oln&"ڨvxoްbD6ʠƖ.o x6նtD{8&}6%͖#Hxr^iܖ춢it_}?Pzg5썌& &ȝ;+5p:O-/_OñZñ^}o}Y|}%|Ĺ[9*wG TC4׃Pv^ss|6| ݦ# >y#X._uc98w^WM^O${{2;RzJ;ctg1X|Ph?4jeQGC]xZ1|(aG :>% ̇mNǠ>8?1w0S|Da 8O7|UvJbͷ~]o@b%m7y>sq*`UzޕǗ j㉥ke=mlZw 1wy z(Zi?6zp! Y\ߎDcYj7lx{ wU 04~9j?ʘ&~8?ȗt1sxz7nRHu֓~_x{ 77gzw>5eszg>|fIy בzS!Zw~]VUMW9I\'y둁>]/G~r;Q -"|sz:MXW9KgZ~V^~ߴnyY?Db\^op-i>s=]\UZv}sm-gk_9fT ,G=X^lT}iǒt֟U{y6Q+뻖fਾ4O~^o=\zɕ^-z%$c>Sؾd#5v´f5AoAb%;_|A(D@~q;O;/Go{W=!R4ԭ_!|d$HLBL郾މo^,4S86?[n~|ܾZ9jzA|a?9ބ\ς~#`! z0H_iǒqn-2'_[%p=x>~R~b:ۆG=ּ狂y5B)2{?'IU"tuZ)oVf3O2kȓBU0H`7S^52gN'5&Fʫ-j5 Ґm>+q ?ߊu[W$V9Zyw ־Y ݨ5}^9^7˶.70#Fx-NgmNZui2g9@ c|9YFiX't"EP:QgTTj ~y(ь ~!eǡ]Q!&c}xpaW]7֩;*CWGl Ge} v%xu4z YHoA^ ʶȊLi>7Ty*ywbz*^:ҫŨ,{O07R^9]8*B+l{5پh4+-{Ta[MLf[޸^t6UP~JE+bbƹ[1,Y,tnZ$^EPK|H]BY˺EYyNYRwxxV\[rC7Qֲ ^͂ז=Co'7=.Kv_4r9[!EWTŻ9cVi11sYJ)X،~ަc&9֊d/|vZ4O {{-6eg0~Z4nj!NY,1'׸+P2/3мh)i/cPۛΜ 2OJWf>܌l`nGP"L3²,IhPBLU, Nl}!ǽ^?dp*Vay ',&m~WsSz,EE|ϴxug%r7ceQ?UQ{+]18A8S`RY>a5kTs11J;eлunj.@0P0 7pU2.J\E\l:Ye,UU>SW4,տ/|7C $/ƲZ2/hsM|(5#6 횊QEtEUUh{ RI~̋Y:cnt_j<\Kac4XC<#d@iT#1nJ74t앋&#/'IlSLZCqSl/,ͮ|Ȕ?c1ޞ1Et}X;Z§-ڗVY>ӓƻwj ֺx6zq.Q~[Tr4iaF3x0vNmMՂ}w>q|[-\˕{rjNˡ齿yUWbzU3b:7l'FrMH_f3D^:ɳw(cr#1EsNc;5Boy2Gs$ ϋqg=Fs74-3& R;,g *Q 1nK`̦u;1wOV'f}XbOLZ^_:AA6va 2wMZ{wMaHC dK'g]:1 nb;~@FՐ.7 4nnfrAr,@2ԀUpM3n.$o P\?07N F$c\n O'L (Q\5yrxf5s.w}ڃ1G>~kX2Ck(\c瀫C1B:_bmzARbY'~ujPq|85ʪ5%?s$*ujpL9-[C^F u_Bfs<=d%x1c0)'X-1{K7!U8&=TuSHN U"R ,q틞VuJsjǾy'Y~P[~S ozr)] rj[-<wYaZٝwa2%Nٵuf4ts+uޥgufvsu><+w˧w?_z#=fɈk!B _=~vKgf^؃7Tg^Qeu;?xQ/^_O=rz]d\b|H.oQ\WndDZi%iZn86]qzgEWh,wn֣<" b]ӨG12"~Z=k0 ?^ܿO+std,s0w]Ks$0v}ps@:r!eVu[9cVM5>=<܏*?#P8=W񯪭8| צ &\۞0+ oT'2r[͜F{7q,RLM݌-̫Ę^,ۃsrGy(hN{^#̼+SYꈾV\y.Kw~}a죏%\}a-fOA~obqڦ8hMٶ)6J뭼qz;O;Ȍl8)i_\Ivs̋m5ETn9L\m*Hޗf_Z|_Եr~-\W1so )qJ[ԧ|Juv|iͻ/qaNC7s?<~Z@{W37t'Fsj_iFf.jTMsolډpz|>`ps>ioM7sftKukfGS^c`^?ؾawVvIpFeWO^C1m;Rۮ|"錳0ו4_p}nHAʨ8pcs}׉0A&6'.>zMՇ>V>~1WRWK4G>vPz,`$Y `N@$܅sVqny&s5$1:!TQʴ#s>/w֓?o1Ν$ybdn0<<1BW(q \AR!W77g\h3_Egh(נ@5@\g{]g^3 (̊JĜh wb _xጬ,'Lq k8]OtqY@k{(h6aE^nUD@GcOTq>(2.<|=/S'f-iG6 5'@M{m6(O?s6TO5h"[cDEn/m ,AM>yik6x<W`ūӪ=]aMד/GƔ9~Ws=1Vq C~dX׽*;OoWAU3\6)&z!cZ,ۜSG{2_2+ Ͽqw!yS5?KI-Y".g B֡(b/,q \Hp#z`e-j@o6o~Vaװ{#sͮXNVe.~^hQFUo?/kOA$QSupTwUiLW4i)WSXT>{U x=0*d:S^f'h5ٿd`U hg+@&uz׻K]8yQxǎp~|ӪȌ_܂[]\lK5oT Ѫ0$vOc34T)_5i!FVi |}rZ~Cmّd~?]~^(ƻ#|3Q!FV|&>|[;r1Gb8?f1fFrnbqw3U[box<_ȖL#o]$>Թh׏r 򓘿svZRb̾g4-Kt5 ̙ J`o=3U ^?1G)}*9b[W>箰7buJGݥ8gw6y:smݧ&ώ6UwlE9G.`YBFMc-S ֶWe68XTPƍO󙶥N~EĭP)+'<ívZR2})Zm'@#UWI36Pn/ĪMȩbIPc̵~{w[@.zm5łJS?|JU^!W?ӵ@zaٿ`z itZ)%'u8kzl?ezd!$˅8ŷY0(mx.O:eǡD|ﶃɒww9T́IמV~Ч(|uG:|}:V}V9뵺_.u,;FZzxo׮;ia;w  La1dcyſoKhot@EHY[f7P.H(\s7e'=K"l)RqHc_ɇ:: V|!>[d>VVt_匷}yuR>_&'Ø_8 ;?-㩎_Cx0sgm+jQwOJk])v?Y{l=~G<<'J#\ǧhng*x@P6 aoy@݁2N%bL9}p< (َKP]ʯCPc;cZs9p {U| g5vc0"VͤT3P@[J3#!B營|} 怡jGf4<iLR)38~uk#зTݏuox7g pF -ŰTo_S!'Oؗ.(G_ aB0AVD< ַ>_q_#R/G/}! ՋгW"<'ߗ&MF#º:[zq Œ`A>oH~m1[EU f|Zq]/B5CH+9a{#>JGD?d>^uX/k+faNA9l~ToQ F51eؗL*C{SUIF`_s~2N|#,tX?Mm>CQ%'|Z4YZ-b!@Aqflʝ߾Lj3g k޸B.ї7YpErV6g,q]ECN(Qt”j?uD鞃68Ѐr=Iu@/wH[19EעOY>^פ~[<YYxM2Q{=Lɓ_ZFU`{\*HfibDP~M {JiYT]=4Έn9 eyc%NYث,XnKE8hNc}PghVby'2XVo8C:`3.P\;YVDL9q-/qMԢ~P)vu)=4,Ð2fVwqfxqm&.Edh;^YQq,dղ6  ~:g3y+u\Ȋc `t=#(KNe\Q )p{T72R^zVuʛr#=E] s|W@ƳٞI\qs%pA*K vb؎֋j7}ܣ9++Յ|*ubJ}wAtemO~_q b5OmWFν:;; ‘)n'=螯)QNةs9խ ?U>ƒh)ri0Ew'D@KXv!O' pmWVT5BB]'RrT-3MQ _,—|#lg4 s11Iv*stQ{9!kf֜oowLZgl\`G? פFؗ@}K91?lpz6"I)+)=~//>-W{ XWzx>Q?zPvi紞j:od߅Ͽ%WOx{=?ӳrw[3KSr7ɍ~r(*̟_&phJ9yK,>{LlT(o7 >w+>Sཨ.Ƿa`80"# Ȳ|Y;i~M#F}Te9>7xѐB=U/4[p<_Gvv2y8_mGäHSC~nAdp22ȪRM4reTuP_AS|>_&z|)+k"דx]糩0>)'g.?yL0K#滏;3 J9h/i#T)?.\TY' ~&\`bōギ[p`)낸jI4e$@4 OԐ|Lł IG ]A'vj{" ,QM\HE܂ F:D,Cc"'DNA]ɔ 'Q5.;C/jhn~nz\&)<Z``S;@XIy|p|FsrL!ĭYߕ51c9}2 ^fla*egAeKAE۾(@CiR"ns>i}t< K\ ~c~c$ sxw̧a{.}A&hR(Lޖ"XŸW ' )Ӟ(߆sdʷuXB gyKj΍"fȾ-Fi:Bx1i<},Gik,',G ɦ&)~`|gg` ^b6(_{;?/2) .RA=@)_"b27]o Vr6>fc8&8珛Ivh_) "LH!qDN'V) Lߡ?`0o]/%FBDrBK,O'DXGlJ[L p.LHM˵N5䅐^<oX&-#Ք~e͟܏5c{=Zw/CnӞ9ܜ `~"iҜF;*5c<ۭ>ZM>1ݤ>aQ"L)1Ož~hIr̪rxI {'ա 堬I{wxڣ$ )`ڀ2|dSfI|7 xTr\{lmϥ}}_ ۟i?SRBP Gt>%w W6xAVܖo|ZޯۿBUjz$Rf< @!AANXJ['*`:oPiu_+̀DTb~6peo_݈7,oŽ`e<#?}14qO_?4ˈm_ 6)'_70(QJq^ ݲ}x?%7GPhH^=B?)oJd#> YXy4~h7O-OL:;0CDLrUNfҁ<`xt֪a3Z$g0n%D5z:\ ?.ck6U=aeޥHv(A4;6F XUۚw0Y)7_y=9#67Ls V\r|l/&/d)"HSY\I%w| Cig3α02)&@)}15]RnLd|sg3g$Gʛϰ| O^d4RN 15QI|  0  F^'@lΜ1'SlM{`< 8Y47f[bAt&﷓⚰: V+(Iy{.Ê`5#xv!W?M{dpaw$OoT,}{s9d&_G6^p~ok3|&_{r|mFrִ>F!M|dmovw> &eǙl >5X9V 2ǨM~UE0+nRi;uLK `ӔQϭ.v)>{d jܑ99yWd~Ep]B06\)=VݮEqUh? /꒖aÔS~P98^ED_rQ)>V2u"vp{'<1m2#puʝ+^o S%&xb;51se2.=zlE:+O=frOI9>iשׂ\N~tG/:V{_'أ{>\v4 3?`2<)\>pn1+& RIs ׌7?0+0Pxk<<q (UfA ˣF{R{Ap?P]`vv^[ V?u %&?߇p[n{SxuWn[&S+#ܮ0]cC)nG )3yQ]*xTic zi>\s3L\La}sd^wSr(rE=m'ܟL\}[(;Fрύdoz/Ml#98@{p|KaQ] zO~YN&=O}^7#Ƥr'P6o?LV^މ2”!xZ-ꄃ0C:fNI\ ^|ǎ{E8SшN0z2Wpy!N#O< j2|>b.,5"\}=,},=m8bV7CZ|b49;is#L CIN Ϝ>cBIsN8GB'+Z#-A}#hSݶ 2F] #c:>kK0iEu섕dt9 ӻ#D уMGuVSf*(MwY,5CC*0gp!Ʉ6.IfNߨNO @r7MtYX)ʕ~V1#LmBU `W6_vom@G% 'c;i$ox{-{ߧ] 4;)X&@X wXǖ /ɂ}Åac)oG@#j]Η$XZzuw_Yg~ HNĖ l}, Ls5.ӣ+s߈U[puo;3%q}ui j`8 !ȢTrӜ) cu*_,Yb%0ϻ v3d)lp5c{7^7 yy< w?wfL}Xлk C>#Fh?uy>Ŷeޘ C?FK~qZBHΝ w'EkuU?t˝EV7\c R' rګs= XP2Paf*VMοnܵyjϪDU"<S[+#hd b9J#b6 ߛM.x)vTDu8yJZ'B6ᆳ?pKcDw8xwgT/4a 77e5oO6] ՚6t?nRNp Z Bh<(]|\,`:>3A8ӳ\ ,*ax#9Rj_ܥ*ϙ }Cx%U}y >`Q20\qc^D!d/$OU_P-CsusnŠ"`U|ϸ{پ8v~29-@Hb! 3?Bk{$[&I0-J|wtޟ0bhaGؔ2M>o8"a}6#iX1R:VZ%Ȱ Qi 3 d#nOŸ{#p TO/e?FLl8e73rmh lamzN;LACرv&4\؆b~kx Nn1G^b  TuW]]+ )(u1D>W-* $]rO`Br~.i8M_@Cwsq;sݣ{Oޏ{Qw}bߏѮ398"c-@0 ($ؼZ˽[jd/)o>1=Oal:_S4CL`DEy/BxR9$t4+H]%_I:3޷\q 9nȔ-(㈄9 w 4S9euoYk.8R;0nfz*,fLWQg#hexi.?wαyW7m#wWucA{&ukx#As/@c`WmmutXc.[z)s7̃8e^^=Umtџp@ȯxm`]@ތ|C]Yei)gU孧\C~eFГro!*'"OZ؜~Cy@LVEʠP4_uGVwʍ(+ xC%OO zGD!Pv "!@eW;f1Xw,{<-auvs e8;;t< Źjb%YZn1l&?=|$ir77Y@UEKm߈l`0[Pdm 棣bFH?ΪA95s]葮, 8;oިx]foYDݳ#Yu k ]דϯYVQj'_sd=1D4 0T0)^[6e%3_z_is9"Ͻ=QBɸ ޗs\UI=m06?9!ΔUIY%pQL^_$G OAx&wZn$c R^xgfۤQ]kn^8xܙ6r]'. \:ʅ?Ǎ 3*eMjWp! ZU:\8C'j{4p]8qdLSǯ~A\>ewBȢ'r&]~j|W_[R34dȪ-_mE7P/8_K{[򧚬@6@0U*`p fP¦2C=:pe! %o9-H5֥X".->+3M@ZV+x|mRIsPKsDٰ4!rBē |]W-&}hV7_ kPmfS%RPC=hju4.4gvS?]<Gts6}!q;` yӊC/ʬ4D Gx1 rW~/8DK&+ 5Lt 6'!CmдGvA}9ݤHCBμ&h F(KA=4wwX};žL=;+NӾFݼ&ٞHQ,ps Dz}) rNFR1NPtp $gluph]?QMa9 ʜ6,!fEXAC[?wg0%|t~N=Lb ӵN n_Ɛb 3/39>s<ü<1E?; ``LQNqcHbyD ;yfbǧ/fO_,F6:YݛՃ};ktl,0csa9LpՅ^uAGS3Stu?kszM`j3/y=Tt/IGj)Q7*) a;_'O;-r=5RGXaQGbX[lm5´6|7bjwAt'@B{Suf@֎\99ֹwCV}(fg4kpR#=; R\xbi] <F50fem ^6}ŊI!L9XfmpV f:ȧKy9qd/7i;mYC106fv`}f:W{vy/X/w%.0퍊*^t4ݷCN@h(M*5 k^܌7`64Zb6Mks@Of#\fۅi _rzWdmQ$R߳jq@T^GD}RӭdsPy. ?~[tWdN;(pXy;& znEvZ f@ߧʧ*8Ba47wc9=B[#mńZ7VHv> AN9zDrǡg a}M Z_Fc dNo)NY_W 4Mm8RCF )>imwv-`F ng^ڻ+^̀%h3jxb&AGK( h&6z+bxmX_jTDm`Ɨ*08'NAV{W2haOnw@TV.T=$f[Y "񓰡M|*is+1T%Nd.ۡ("n_{oV<Ӱ1p{{]l@;L̙oMu#4U ?;q;Dn=YvxRr3]q:D@Ķ䊥e (h] 9wvZNxEg{*hS(4|uAb?ND&3g"tT]㣦(EH 5nHb;ߚ3ʼnt} Z+ө:`%I]^i$ݳ"/'|K`CҢQ!1Rl0๘|ng@;5y7[i#w[Cy KKmmfUo4O^0m-b@fF\89_wRFq,n uܪ^p&8p&#u20s'=TyHxPuR?@4ԣNGqtJ̍yGxF,1´[|y߉K^<9/`AF$=ܶnet?o4`T@&#:(9rv_<ꎔr90hlsp)1 ˿k&L"%5wlԊI᧼|Ȏeߊ|GZV6w*mE:ܔF{|ɔa$Ʌ2UwY<>8#LaR@P{ʿ=:@{< cq>_z-ȫ|~_[KY [ 7>|`rOw}j-;q)c>[cy;715g\IΒ9>^Oޘ?cLd1?x(ǯ=IE /==,|OuJm|*dٶTjBYĔBN`bth.]J9'V*] *Sz[(i,|9l^G2Jd=)lD χ y_!<*,ϟlG]V4SiݪRC[Y* y}q0~_$"tP]zÿ#",/O1g5OMpGrAx|~ߏN$gYwA 4,~&ʿݚ 'R.'y|SnsHR>_|@o5#!z!x{]NM^?k>cx՝lniIsCs%r?VL{wg!Xa-w>ךXAis^@I;L}57[h{=2ĊSr<}R}Ơ˹ch=o922X_^f?VYNʩvC#0t95_KL&0㣯3<(BG߇OAv06s DqS#ӿ@A Wg<`gl(W9w E0I9@KrΟjS?sw{0`?ݒs=TQ#D?W4Rr>Qdyʥg~st0F7W P@Wtɿٺ" r9_@@:f9Wb^eN{G 2[|mg|# _!!sM>R[ NE$wP qm(eU&ٗB؍9(6hxZX)+ިr _T`] WKRlx) ߔπjS<)ωT"TKCb@f7x-3T]DyZ5Qxk׿0ެ{Yoڿy*ŒW[c|R*|[u?'e}n|6Q.>?W5{&{}ظ} |-&'^+#4DX`w% ”=X]O+GDN3p)As/&#>i!.Q¤iB2#`ϓq-Y~>[ Og3b5#އ97TLZ}2C6eD=OZ@+*#re2'BFRw>2§ F0#GE[Θ)fpmE|ȓ>=_a;h t݌~O>/a4L[o?g4"|n@HʕiA6x.yjPKy~y+_؅>g;ǼŽrQKŸsQsxiF w?~# ~ƍa3$Ƀnz\Z>2q@_|S;߶q0hЃr\fd}Ph獧H遢;3h/oƽq(k.LAȌr}0CG({ `fY}4?c2`?>\S]3b]q >e~DC|xg9߶"3؛О=Dm<~o3"оeeʳ^f"'rƻ0´L_ #D[۞Ej؞j۷mGӺB)|\Б -#ctl"TrEj! QQ Tcm`ɇ=Ff[9cc fh ̜Oq>5PALB[焔*ٙ396(:Wi9Ј!M3`1%ӨjŔޜ\)(`< "GH[79_ Ѓc\$qqGtL/9ib -U2vhc:~\iQN1Rx&cq(=S5悔 Y3`6vdL#G6H8PE%ド;syZsZ0LM9_y9FM2%j ޜzjgtT)ϛ`j춉A8֏0]R""80c`~1ctуXGr@9qs}h=5Zn0w=料.c|qz7H 9EL#~ _O~SnӾ1 F1yюO<0fC1! 904#|i߾My}tcbp 1.4izW=%l{('0^>?>(,3>18T ۶_Kf??z5G8Cz^+ x1KЕ^YJضkqeȋu[3|srOj!=9'N+߀yid q!і\s)Kߌ-9 Ƽ'kFl@]9]b,0IA]2<N.tDi{>׃vO"5̾H(wey2̸}P}}a]Obe  ]\orpʽ `:{zG!́?l?5f~>glsn[f~ }1g;MW;̻{9+1.?7f>1\[$5RK+\P7\D(}}? ]Xbyy"y5+f-H_E6&yqVּ;+|inκNtHcN;*j3x}uAycD5_W|D2 |63J;^}"*Ky:S슓B(BL۶*#ƞ<,/F/އߜ{55㻲 yմ'PN5|;5KfmӾ7,GMz3c^M ^gqNxO2DYڂ|,sèrU4=/ހm=U@QʎC9#3^uC Ipj#"?"J2)A wYMW*],&a[mJM@iLmH(Db;t#gT9뾻&X׍/F;"u _h!w rnZ =^)I~yjNrwq {zb8JXtRWfTDz!ƕ) qhz˸ yچqAes67\WV~f`I=dExo}E>%Ȱ}ɞ^x7?Y9ƧRf=~8gz'uJ_{W~;?%pA㝻ҕZn$ۻx5`s[飒吲*Aʃc\L~[9Z+I5?K{tJԲ?ơ0OԎ6lA*]}<۶ Z~}$Ҽ]\W_re&=z40o\] qrH=f@GVGfZhN:3l[ubZ\=lFӲG}~j[-1ч0e!ɾqH{Ϝfq}gu~\]\Q{и$:rW7z[lPg:DڿC{qy (/SHgq'9*o`|by!ZwLb&ߧ]mf!}6B¥ߏ3 ie(ѯ:93H`LVZ/c=/v|z3[||mgWݷ!z>\cw_6 N̈́}™.:KWg/8$kq6z+*F+vfTuǚ|[է0_]4*^}`ϻxg.OPYiޢ^|HVezsn2C7HA'6ĔHvUv^g۔g*~JQb(y*cJ }zbƇ\!?LMwcdyhnأknxPad۶ &3NJxv47:DRn%,bFь;D3A5o}ӹtP:mu*HӜn?i.C6E9RNvRڔ󥼬v[|_yAި=q~n4i2}v3E˹ kw\du|ƩF})R:Q+`\Lٺ-=E?7n}/ףσ:6:)`<Ejgot1>aoq^4[A:(` /z0vӜ^-Ǽ!QR~9sxo> 2Mٷ߇=uAwېӽ k, :N܏U񽮼!Wx+ r`yk+ϱ> x|{0&r= )7X韴F?R 7< ~.u+Vgr~>aBQ\}@Owq|H2HѾǏ_C͐E|'sOv%B]se1R/6r/\?]-{w]{^խhJ w9Yq,|HVO>x̻y>{tS/׋o9tI?>CN.Y8\oS&+k 9H{[ӹ^ēH_vAYQޡnD*Ԛ׫7xþ 6F{;~E}@U`֭z|!SB'9тzdpG9Gj%gxp2o. 7w}&e5R_& oaljA>A WE#c|W5(Rn;G*d0DWF*Aƣn~NƢ`l5,2IjEsF,)/2H_F Φ#E06.-J?0Fq7qM8Z`U,XUXeX%fv+*ӌXdw.a9jbF:xRdP|Dm~%Wߏ nϯbkftxJ3*/^U31U2EȟO*QZCG*Q1,DƣC|D{d/+a 8p-O%cyW/ȸ͈QO.ۂQ9j03Vwmf #@N+3`V32z8/(ȵ?ĸ0WYyS? o9uw6GG9JPY)Su#ye5uT"bь>fnŌƬp9zhqN~qqu|3>W>VGVCZ$e8xRZ_y}??uپ'U] cH.yKؓϕ9Ey~/+jw^fȽ+p}ae7|q|PǕ~^ub\ My;(U3(c?<9!f.ʋ.2%Jwؿ42x[ Ht~){1O׸_}|]/Iگ4ׇacyT2v?Ot}V֋7j s>oGpo>QNLY8]vl 3xp~kYX1H'b' iqد]\X7񻉑Yg]HTb9;r0q|>|u<:OK1{w\*d;LV:޸zPp|뿙ϒW?H{׹?^g o70pG9?_޿YΏPo=b=,/8^]7|>u__(_?/Toχqa܏O_Ⱦi/3o'x?0US431ϧ߀9^߅Л"M;^Ot+j {}A`wdU7pa6Ώ#_cs\ߨ.ϏApaY=_7zB=̴UC Āz>b|_i9?FL~Ύ|v&Y'Q!o&13dV{1fdǷE3q7U _G>&Ք+#ĭ\K"|igrCc3 ؿl{Q_5[#qUjv; !l}x:U]*Lѐ~a7w"f]}5ߔ=m©mg_Z׫7L"R:iRZ2Qb4GC_oKnmRuKM v 1;|T m5ls?^,8k/2`hz|M߃/o?̠ź9Aso~B,Bs?bX\/R˘߆e/7/Er} W 'y57dr:Ts}SVgiS?;;˙R}k/4_aY=ui~6&r}C5X_;x]p~w|0^)5_ 2҇Ud{}@/_/~`z;C)T %Z#{g<8Ա+ԯձ6&`w:@R?uJF/[vW|Ggk'tdclU]fNwϿSQ^6B < +0K_WG?f=?A֊bq4WcBr^cG(;3'I/8єJ_Mn?~^rI?UqI{%4U=o`_{} ~}8(?>>|; ^ygo>!ƋNT8&? KQNt4=jm{ 󵚿9e|fX8S&O({am=]qZ?`ޕ 4>Tx+Tˮ>=/_6O&ώmDxnVH** `|/$g|x?ڨZy,xLʜ_l_1Q\'!Q5;#❝ĵwZ鏿:oɅ|3o%i&_8GZćea|?6kݗ-K'ma4x'lP~x*iǹp Aݜ8w;h!97e'82S#g ׺}YAaXx. 1C|IJ~}3f{#H?3[ob~} dGק|'ٷ^/4>,^Տ`Gy~5 qHo?g7+9?'ORnoWlo'E[ߟs yRN/lzN|/>l_mìp _R_?&xx99|9KE'T?ݖChx?%q{W/L4|>;xa)ߝ7g0tī/O} ^|U3;OA"dwĝ|7=2=_?ۧJY9 -Q7!Yx(0%۰{6ϷAϓ~WV5zo1ߦH>/~ϻĢl処n'xf!)OC298Aq>5? lM{'BTK'w=:?j7~k_8d ̯:qy;{ۧ]M"螯iĭgZw ѺG%6o)K_Ae&gw~|M{6n>:3#ćէ+1G_yבhpb{ntr_ǯ3/XF\O|0t|ԘO}~ﭖ?>iS?#\0C|S͟?g37'?z]o)7~9YAlחٛ<~;3\j<I7){ ƛBVdmC.|u~{d =έg*^-L|ߵ6ׯ>x\)7Og`<.?[O(b}RWAضUgzW\oq=썔ύ 9G:ߗ]M{ri|`׍]~~{8mh~wOx~$Bj >z?)ya_zMmTɟq|Aϱ>fwfzgiF~Hj} )w3r>1h'i}WGri1m%L4+'EädO*#ڕxB'\{|.o's=[X'?Nw)_z'x^NYo6:y|-q<9~>՜}|G~N N~\oM~"狪}uo4_pč_ogR[׫{wOjc4뫑۩ GeC])7u~; /\_ƧAj\S)o*# _O8οatfq`x$q~-pzUf5<)c?~>ى1Se1Hh=Yb]s:9 [CZ׍ a|n Yrbz8Lcy}"NhsA˷=cr^VQuNvaz/$e<5T 2˔DWׇ#j^5elģN6]ma! ݦ9Y'Wf$x=u8;z*[W.K̯L*zݬhg,1EVpv\.bq=3)A I/gvxix:z|'b{7(ߓuquBö']?)SGG&擲nXOgj}tl; 3?"+muxKxZRCr~jg~!%:{iʷ:~򧜏j7a5$wS0bu%e 9g X%wf9JUe!wӸ{g:絗_-1gp8Jy3'j\u0VUs06[7C+?ٞv9n{(?d.ڦfat,ͺT YJU(.qgR 8kM=CzYY&2JcJMrF: opZÁO 罸W/3ǿ<.n/n)OYqR[uqymj䉏ΚW+s]϶6RHy;9~bŋ'+uO~u+W;8k-P SZiyƵub*s 4c69l4#$*W} }}=1j&ykWgMfKg6“>avXie@x'h9 cD]&^Bz+:be euCNXxI>(ּxo jϸsx*FxhM j'~uO#N#ayϬZO.5jE~ϔJ{iѮ17*Ka?5:+#6s^R2t_Ay{!> 3~?&H^Ȝ[21<Ŝ&\9īW &1sqSޖ+9;m<O= N99(F^r9g[^ٚOt/ǜ<|$4_9:/ в+E[fgN*l=ںך _kw[M]zF_q%u9ModaL|!x]b>(bmˋZtxS =)oZ5V~^aM>ʕƚjo]U>wAB&d#߷[ GP字a;%K!n=igSZZwW2#ZF#tʓ@ϯ|rTf䪂Akʸ ?SP<54;>i=Gvd+8c>}1 ;9dx~dYQGxގN|^v޷xN^K~ Cg gD[U9H}m}PCT&P*F=OvZe#$QaB|j%C#/OXX j!foȠ|<'nUФ>qO*C](FTܘ72 ?Ee5-_GʅԇsXLoBC܏~1Fdod+2++D?? dG]%OyEw$2؋-!c{*eRs#?ž aNʚH5XUck6}7l}AјӾSi5Wvt73~=180A>{8.q3B?ZS0߻oVn!{4ȢpTeoUUY l%qO~^5{3=LY/skQiySIȫղö2RӪ|m/#@IK>SoYY;rt4i%)р?Yg㠜{\soER{m3{I[+:vǽgW-ݚLFۺLyc"=6m#[|Wb!fmM͵?m=BT}Tiѩ^̽n'{7n'i_}U}UՇ{6?vgVaN#SoM|Sa,a,w%QX;_._;Gc@y*c|Fph`^!pmFؖNj~mkE$s끨^jYGa>}>!_ǭ?>id]#1x ̋c}[:gL|缝'kDKxsE0_9vz/> K֍ Z^YORAOw b5zP1n/M$rXz}wt61|81$bV4T[}t,sbA^9_$ND>OS+|eJkxOLbWoZQެTAk\F Ӊ띴`3/Si@2n!?_9)LVhOxoOá_DkTYX?# #ێ}n~>hP^o;ʆsLO[9/j?QxL_&~@/ʬFGʓ׻F~4z][/rD{ +_t]w8>׻C>Y䳛~NkTؿFNW֛ lUz7U>]%o05U_q3@TO֣_w}<;_Fr}A~}T߆fcv>9i^N/PLiMo~>ONHl~A>oOΧN>q[?lSō?=]$}gs3TxZ~^WYᨿ~s^v0S]yv, W?F:ys$_:oSge1Y}$7wkO5rHuW7}'\?V&3۾@|t4H| Lك)޼r39`:>m~>U}|ܾZ}9kso?ʷm:z>iOTC.#'Ng7PO(pޚ|Qp>>f}_?TSʃ|sp;?DI:3U2{-Bf=ٔqIay2^:÷gQ/{_EM#_Y{l?|%@zl51^ oѢ\ٞj ,~{E',ȷ8) B;^%ھYÌT;Y֫;Fim?([y R36gzu$6NZۿ:q#z̘|"ؗzq?r+"pQ% Y䠽\8(FR(ՠ.~!Q׿Fy(F̙ j=Ƙ$s#*GL]S^7DQaIa{ʘcz?adZFYx}ƍZj i|E!<;U2Qq*o氎x!+&QbB!r-&yG=ywqv.Fqv(,۷8J$w2^t27SZ/Rqʔ}a +bz2 SY^$^gɦmYmԪ5ҖZ۸ P{g[{\Ƥy$Ytmeږ9,.COKpO/ z_fKc7w-A^|cusʹJR4`ۯ^{yen̏Br ME,Ғ7/?fd-$?-ǿ71 {uYO78)sPX)b0'_2V#(~tD&g+5tw=+B/k|Ƨۏ>qK&'#'5j:*k5yb.XmMO4RiX 7z1 TYQUUqJ#(|#m ^ݔ yz`3HxP,Z7+ܧS>ԲaK_V Va,9H=-pE܇o ֽ1=W^u?'&& 缛kK">DXG>ٚ5V2$5lAVRzu5d ?I=0?22gpSXl/tFtɝ"OG.M:Ddݑ7IK 5/c!mYp`U0r3~ڰ؀&n>U1yJ9 wZ&沪6z<}8"++nV1`,cjRҌߣGb;2vb׊nmN:<|nOWkg\,o|ecKwܜcӶE@tfZ M2{(S^T/"ܫ>M£BNJ¤L?Eќޕ>.<Mc]҄_x4pIxLѳo@MV|=ъ A1+^ P!#L:!y>>S~GN5+֍]D|&2W&=l~/T bD27Rz`{wQCh)gi: ΢nbTQ~Hu k]!XE\qa3VsW^%2M f*%g>\H{ O⒢|Ts/uX5T5k 0GS|`iϻ'ˑ?Xe0h)udugƈʌUZg13B0T:_;7zS%?x,)qPg󃪎#(]pCk9B$LUm՞a?=UCQFa~yw 9 PODru`7p L~j؃{Zʍs6|?нהVݡ!\cS]~+1ha3yVCk(uPHLr$>ֶ\"xgޅth*~<]7TFiu =}9R~X79^z Q\vULY% r6"`uАBVvNnΌsh] glFbU2b1d{[sZ]N\˴(jX 346?׉uȀɸy"W[5b3۲+؊쯴x4zRٮ!Q=5r͵;9Chp.v HLlxi3{s7{~;މUS]]3cz|k<sӁbT{negoЉ 5 vdJ,x`6Mvעd0v@d-˽|<[X0\/N\_}xpOO~Gv43kuצdC( 5~xpmLnWfyaݞJv3>][ob뚽ړYL{> k^#C'jRDGuΣeu86ߊ,vRWkz=WiȨU}nd!O9-Z?.'YRXCތ@rՓSƎHFhs߷%5讫+cxV~V)jŖܾɓYݴ+3 4uxA&5R{\:NwoN7T@w)>.UŞ= |A#VךF=~'O.zju9ѩPyư"8Y}~bX.IQC"w>MYȇL'&t3Osc*nIobNEGiy B`8Ϸ1"@J04-D7FA^V\}[+WfuEƸ-_LW)1 =4:UᓨcҎ}d]L<>FM}I!S_Bx#`kiLHy}:qet7"iM3u{yQޛf̼6$oPs,eTz/|ͧExsYF*܃vQ0׫\ _3ֹOoʚHz1ڋ+LT?_^r4ŧf. WMLYkF :J_w%^6;O}ٻp,fETC3ww${N|5sw4Wu_lZrM;ۧQx;}Vw'F?zcnd̍O{-?\f6>b61X\!9N{ ߎyZGq yd?8syǤ㶍Vmmy~\v:!uT̥55j_t 14٣"x""y7%*9@,>q=1Z9TmkUӉ=(ZpY3֫B4c3h$7yft$J9:C&c-kLk/iGrP}n\ߛ]rZ$IgP8WSF+eE }!J+ŗ7xwע]$ݪ\f> q?/uB3 3PKj_RH3pF;c׀e5A˰GB}Ъk1?lBâ8$fs)nuM>&^y':9>"LZ4ZGZ>H)),o+&\,)|pdry4djf 2Դ"d ;ʟ?w{WSéJT^d/d\z w>fs}H.̈,i}c&V!|λ~[͖ZxWn}WO䴂gS-YjU\tԯDTBZĖYȳU22/SDzrc-_?1^Pm5WK.?v]'t &c!h1% îD]1kB/obqF{3MZ]w2n]gKQCO t:cܸ=)C~@q:s]qgvGgf[HOqSm?}$9U-,[rz8)@]q098%zW|] qH]c.dIu6oSƻ"9fs ~w69\_9'IAFJn:VgZmse7~Ut±rrg{IN}'+mG @[B*UP5x[j}SNNj_!p^WY=U(e^U{CH+buEۻߑibQl~VdrW$r{Ȫ܊5Śra$U;OSj0׺*n2tb}3uӃVkdDmZ/^׿LVq;^$:jr$R+8YSmx.O=Sy=rwJww_u!Īg1{3Ssn\wcWL=I'RnʅhSIAEIqۆum}tOXgORX=#W-{O __U?o- ߿nH U]V V ՎeqIڥ٣? ,;4]iyP%SՇq8Ѯic"1Er:mVωۇuEB)|lC(tNKI'e\wܐej80!OyQC>jgl!;b9}a>`Ж<;)q^'g˜ywq] cv Hƈ es?`03"zȼȲߌB5:˴(?Bxc.L8 sj0+9qAa'YR( |Z%/_+;w^lj>eZ>kFC*2yK9^`Io' 7z WPzIʇʔb1KE(yxV pt0Ru],ׁ+%ey,ȯPJ=:<ǰ Xа}t/0(/{ =#$UU(nĥ0FW|. a{-˳a02&Y?t$P-xTY# mBI#T ;7 LNceyV G /O "ұﺎt5#'8u@li ~uhC88ԘA]53WQ\CQ3MmyLʹOln_2ć늊6DUꍰ=oR0s*#0tzWr!xvUD住yb7G8 o8dugj*YS!gS^dC _&B.MQt)jbdhl_qE#t+UGhU,Wף^q_^7ɩߢأO+0=Z$y=uwz]~|>!g5#{iG|hq,ӣ-5yB'o}b|޿FBwX^ }7bA1`V-*D$0By|W^|D= Qqx '*w7?l><Y/1䫂r1(;# g0߫~#}DsY]&IA)._}*ܘrP'z-' eoވVUä4uUX2y+naNAoa?(!Sfhéh2KaA~`=TJ:A^9?HG_{/yHlx(ʌřqb,=XX 3gh)qerl=oY>LjShk3e1aP/_oDz,mNc6x.?c(atƔM3A=G7mXqڡ3,UByC:ge#P ) #/kV-͌r iixғRk{ֱagCL{([Qna|ta~~(Z{x=Oe{)=3[NDYTp[Y#ܨ<32PqSPQЙ2fp=Z:;yqMBqnf.a27)NMl2C۬S]gyǔDNI3NuRN+87}3;8wlRkr*Y3^K(Ƣnfʔ-ߧ(jtHS{X=͂!I4^J=zY3/+&Oyzo$YҹKы9%Hױ%:Yk$9B^ OyS~AE*$dI:˙B5ɓ+P{ =MB#vɃי[}.±tnsۋHV}z"1xIxXʉIO,Jd,USrʗ?PX  ߾)_qo b5؟R۞9- ǟkvV*GS%@US\^z5SgSW ]tKu~~aQ4R`Չ8=%ܼoWGPNtqnEQa/$'\5dqM[_y LS,t_hqEY~6:쪘$IK\H~p@<3N7ol rld:RV#BBQ.AUQ6-RWޔ{TMd5Y=,eɐy?k\QF|.ZOJSy?vܠ, ! N|wrLK%oF$FY/58^ Ŕ$ 0]ReySqi&$M3߂ ѻś(|^zZb_='Me%ե_&|)+k$#gS%`~a}^SHlQٿ3!0q>8_wȸhmr'_F(UpLgmG7OEkH3Rz ;+^E\u&nKO ?&Bl*ihΟ%k-!}]-I.$Kiމ#t}nT;Xm1 _<)T%jHB6&x3N%b!g'vbR>X,e%p!=Zs ƣ^=> 󯻀aD}Q LO8Rd'8~;< к9/y~\/~ Ǹ%\HC))|Ox` L쥌;Q2%i bq FL{p>8cЗ)u =/I;7! : P xlIʐIDxt0bXٍiϊg`bX|ps.;, C˄LKJDH9gAĊLmuؗS}?IXJd;kc:K ,L*@ ILa֤}{mLRi9=[ ęJtqE%Sn y$U-s7y>Km*.Y&M*~?Ћ! ٗwJykdo`S&Dp>$߄4H'`c0ߏqLI8C}c@i|_J}^Bkמm<lsi߽B{>8_ ۟i?RB}4; VP#KHlb쯸%+u/7kHS#02-<( K)w˓I8UxoxP_b0w||J35Q;b_yooX>mGy}G<=2C/~itFL?/DzwU ]B/:B_HݲJ?%7?we0BD\𿚴vw. ijLt0P?S ~ \w?`^;abhq GB7Rm36cgNc=D!Bij3L0-La!Kv7ddç+`ypU5ؒ" ˹v 13]b g8+3|"~AF1/XH( d:)16l1{XiەmσA*.,ng- !ɞ<(lpNV׎n^T[oܷј xE fL!7 V_O3!Y q~Z7r7g_wEhFCnxRs3KFPZئz#;dyYܼDZ=t39kyȰ%~rXJ/ik!4D8!?"(KaFŵ 6YLJzgsrm[Ln 1-;6ſ_5,놷n( /˵i}2u b?[dpW>WNy3} ?jFʟhR| "M(#V&~~>5dL䎻;ы2$?} AU*Wcoby4^*M޶f_BW%FV**SO_nE_|#{VP><+;u_l1,Π}+Oauuegk@GIO+XcAn&Ժ#ySgM+[Ė5MD5Mp8~'~w9{?ȰC{DrO&Ă&Kmٱ%F KrϪ}8[[kө.rzl"p}㋩.Brcl~j=~Flu{>ЗЯ; |*#\1dI1L0APXPVab4DJ_ u$6eL7w1]>cp^oDZ5|iYtl~;VK /_ʃ#s'\ٿӾ |ow~0v~|cشGM&f{G쵿ٽ~ёA5!<}Of~V}{f}mo]O5c9rGY nA_;ga`Ro-U㇥K'rl }t:i[Ρ^"9a'ܡc3^ݤ ?JmM*:÷p=ZŽSNqctIgYIo4kAqSCy!h `Զ9jAOo/b*ja*+AF``&Ұ~XU|X< ?m$oNybcH9lm5ge,H;c_/剽4 I 苅MŶkcke %V{oVƮkflOM?iשׂ[\~t#rArRV{_'VyR.SWN04ʉ.1K;ǧ;V1?bn4D^/>Pxgu%*ow>bCTW66b}}_ی]h?!y7SlWLo5h_bFHn>䝼cKՅpزjط%`TϏ2]`͟7pѽt={mc܌MCPv"$ #uXO+8QQO8 t٭fN}܎gaN@MYT0+lˇ h;, ٣/'d!e@%ef:gf!) '= Ǟ Ж{(g#iS>߶2@~E=c&;f~Hq9Yv-eXU>"J{C2$z-Wi0{~|4Y=^{JsClT w!BeW<_lm?Oqƪ}QFad@ZA$:x~5ٌ$q<:Rn7ycОl9U@Yi MgDO:(PwG1"F?({?yfXW]7zga;KU`|>?[#|MHE&G| ?O#38dg!ZBxY{ዂ@܌x=9A'6[Ƌ8^ʝ`~"סz*eT! y!/i[ M ohi{h^h sUVlOu{{}C`1d&Nҝ+DBHa$^ m)f nRm 'c]'ug9șj\MϠL't(jCwS 0 (RV.aµ`09C-xk87qGn qE*Vٌ~)f`pAѧ~GcVлZgClm{ x[9\hl,} ?Emߵ@K'-rub\7t!6N鿼0z5'K,T?Exvun ,#ع$Gz0^Klxt?7w˹Swc1Im97|7}|͹Q`mv M>>2w_/>uݥڇs?8 gdnEG{Oq^;Aۙy#v ܂=t̥i=n(C,dq\n%V>K.7r"xM&c TI:RBw/G'1J{iiT‘ǣNjwIxTZ\~ԨZDvpQSхtR/lȰfؑ:?T>IUپSOx~IX/9 ^_+~2`w?n"yީ9$L_Y&b'g{KBX}o2)6G&=5|& Ԕ",⭤W&lHM:u)vQ5 $fr'g7.u.KҬ;`6vqW%C*R5`8>K'cԏe?-9σx w'[+^O6dBխ!4\{S..Ӑ=Si{o1T/js.|/[1^l@Ga_t:=IzwEї+ ۗa} qn֛YlσHQ5pjC]7<7{4a {SRտ)o$uk,u$o b]A `'&{W,K h"28 [%f6q`˞vpJ.:3/I䆑Jkt w 2LBƋTat0 lX= JH}ĭ;BN̼քjf/젩Ꝑ?WB)iQY ljNzz,TMVw*11Ӫ=0orjD,SDӴtgMϴMijZ^rikb2"X๬@*1˺D"-:P2Y7p.f{~@emWtu!xi?DGi=`ZZ0QNLG^֠b՗gf3!mso.:qK2m.DвNO^uU@#IVźp`o;D4]b4蓱* Yqr a$Q*@=lBqkr`M ٓ^/$lǤV?9OvNGIrnOB6+0;FlHVm E,Xoi6dP;f9 gYD\6z>tħx~),FWrvܵÊ}ÕRWJ.Q4HԀR>exb*d'S0~^ޒyp:j?D|m^cOfd"aVj~sǥjWv%a\f8>yf~kAfІRuBԿd%{X^1Im-: >zXbqr]:4nߙݣW) m=+B5kP In)٠N[~)B()gmem 1 %&kY&BvfYCL5mC-?[/-,m".<25$@ we ! hyÔ$HE$jzvcxrER7)MD~UN$.kd7JU/+2r2|s.9SGu;E$ ),[OX?Dw٥QnT U y*W^N$[X!,OFraR#KkxzNE:1eU;kkBZ6}E3DeJODY?W4BZ[-ᅍCSUjMZҙÈw#H^ҎB9]K N &n4ǂ4`_:>4ZFZ&ʆ6ylz}Ukj=M}kBig`{-T0YMjV9c@*ϱ!q,e;X'vaZߓ 1jBegbѤP\ŠwٹՍԔ\9u5SmIh+<7*k+*Xv>؅?꨹x09Yxlqf! % ֲTd-s$MV*/Z c)x$WX: jXLx/ụ?HYf1?)䶚{v]2s[&„}D5-r"q[MmK#sk G+fgG>Qcx}%Cy^Bʄ[ 1qh-I#px =B99F`z'.,$Ϙ^#;rVQj FU8՛h+xvG#+-/J_όcXh:|cz&ul_+WABN?6%&겝2g(pՕt|򡣍Ëp\KilM(s\jE1JuJ , ,xXIM%+oؘ2:{R}sXqNl5Y㪭_D#0 hkVcN7+'۵x1++Дyu'[(s_Nk±"+wcy姕8A8ıs4/ܣ|~+(^l~DW+(~,tL-OܽTsl|l51(̫~[ 5zvơ>jv0慅p=KP[jZ=._9i-ؖӜ;0X6VWݹ5kll-eυF5E'^jpokǰkTe$Rl$Ѕ' {eۡ7k%r ~.>乑ʹu|I7z8IݐRC޿o}u IhIxc21Y)M/x}>E;˿ow\~}h+;jmS_{ӖD (y_04j8i|l|%V6zc ߓN1`OLIߋ Vo= X JYC ?_䩟\*ov[*Rߑ.(aM$\ɿ`a*]$m3a>/@GLcq ҾG]t.~H5f\JHiU)P6aRn!kW=%m49`6p̩H1rAIEMO=o9r)CJ2o)t_j&G4g͝F:XPUsŖ?b0KeJ#k^}еLeO>CIXLZ!_,TPuca"NXi6/*[ DZ$6iޖ$^tD,38nb ΁MR@šҸ;q/n+=Z`fd!7ȝ,`2=KK%BԂR ;,[6д| i~0yYOr&-$h(oJz`oA*c,eCHm#VqJHa-A79(X`497 ƈ)=yO׳m&(&ǰ{A$}}(#GjhV=KM{S/mi} Ff~e3gHŽ̘p_lPѦQoh>|wW!#VGGjiLξh6t4mrR. Xh>+FK>[{vuF.\Nӵ\d_@P;p7AtWQ򦡤>smZ85g\(?kw5lV~җ{Mxgn=Ţ'[YCFkIs^F1Ϊ+ov׹ED/y~1~ZVZN`*4a):/hkuVɱH%lB2ٖ\%mYfl};Ce緝7+מ;v%6g 6(S1I$蘞m._>:єjh :?c$t <_˹nt+ޯ GmLO7GBM%I)P q";vN`ex,|%3U F_`&9pL0!_yFt|Z 8lrkYi>щhf2Az4.^,9Mz7+ Jk9:jwUQ䛒nH-}ӱՒg0Oޣ!"mΏ{fj$1hr,jzaC!U>l)4-u.-W'Qi pm* T& JZѻܿlmBtJs4ZNfTtl%=fNG=׬ÖcaV6Yn9:mvAZqUc4'Ư\ip _LCu33Z~c49 Qx_+m@d4R3qHXb:cZirhaN6\x2Y0Eirh;gk{ml})fA@@!Jwm }fzd FhTfz`9hrg˷, mu+Yi-zf+yੵm쓮4 zBХ631}u.Ghʩ??t֗3W63nkEJ*m#tn?ohM[g{+ݖ_*TŵDi uT᷀>n?j 1/}LCFmr]!#4P8h{j0a "Ǐ~d {=ɫ;[FιISRsnҔtduB}@UyF4uy3X!?.?+fNƋD6 מqLj3L/ty349p_iL3g(}:v֦JYpPFL<^!a.Op sGi>6}H/ۼ496/hbaZ[7T "]ipƯ=<_f_e>Rd|s 0=|7+܈J6Ւlj9Z8 B&{>XE`)<1̈́~3 ny15i+}^ߙKirL~-1zåX  IѦIK)a"D“ KݰJ/:ZjSJEڬ>vϞ4iuu%7\͠._FLփ])OgCis59Fir]֥<=fC|:ʛn71jɁh1y3>N[ ju J۷u ;0Aܢ;}\w_Jcl†_Atcм<j&xz(}i YcѪ c*k)5,_84m~:v{~0OѶ:f}i{U\:Uݒنߣr5{ʏkz!+x>Ct491ڞ!:mnC9GiqѦ=oJorfõUCǰ%#$tJoRuopIxvފ܅rmpgF49n-M5cr..]m/Z6')J{` U 6D^gPg0}zaF^.G4ݥשh5 Ȟ#m3'=g*ٮ tm.LjY];56k[2)!3Ew3:Y;w%Lu748 >|iFLa4\:a4V31ڞQ3>ٙ=7 Y17R: S)ڞ+MT}]m)Ϥ2mi2毀MID|!'}] A%I)m0yNum؋ˣJ3@t/r`kDJ3D*ݫ0u`o =ե!i 5ՂA_ .MPtYZw/8,[myVi:kqyJ.Qr/W$ ׽ZzWaIyPfezկmhQW%k[p(z[0ڞQf.5 s:1gt< 1}}#&1W~. 2qPB>kIX+;ևc&k\H9SJN'9W:xį:gT΃)V\7T`&`yF_BbduMGLJܮFU(-4wlX7&(MZtgyupw&R<սOZv5d78h{|Aqc>+ ۲498<̅4\Jiҍoݾ#&딵meLq* VNadtfc}Nj1)O_Q9ZKVJ[t<1Uintek/ItTk4:9^'#M0ھv4|/NAR {理#xM,%(z:@)aᯜsRj7WY)Ngt]~ \;x*g4JڸwD$cV49p N5XZ=490|ҵ|cul}tfCNg Zip| ,FWgt-JճBrpj؍%Bu킥֥Gt̉Tc?7!tonW}r4(݋K)1?ZRѪJi?ϐ S<ɏsߢt>*?{5Ш4pYtௐ-!;7Z&k*m!];+o۷H>4IW-_Kˎ3o=ⶔg盛hF3ɯ~V=j%k=Cvj. B.4PN6+oF!=f1:AibM҉̑jZғ%TLNdfMl9Jo طh߷oZEoY+kgy }@D?kƉ yj7+8Dn66HeKy،ҕSfN}L'}dP49`5zҏWcpMQe&Oٶ45Y3f}hg}g!"z!CB-c7ApۚзY<<-݁J;ТfjkFiӵ+O>J/@g4=o>lE۩(zޚ`<6=Y*i~;aPp v}}I E31Ϩ* 7\X5AMe!iV1%MiS%NcO9lթ48*z݊Ti>seH8.8.=$kAjbvɁL=E>QbX;!\L}~ lhr @ZdbVPE}{ըpyN7g.X!ۣt/:9fDɛ8%})M\pļ@`+1bFLBi?er3c|3*[ ]("%Ԫ"qiw+|xFT݊&*IRϲJmǞ!W=Mhj DAa49P HpW7=EZe״9-JN h9VNMzE0U&ǥ'S17RX@Cd0fKo|FKyPD|E?g_TމөrSBnrFf5׺h$7㪷z"iEi-eMMg7n#OZgP''fa=T-Ywc"?VRy|QƆ{N(35@N9$=#cCȄ|H|/INeP2_CJ5*#)B)QZdCt;JCt ߂^an]~~4UҾ-M7²ouW+y_D8+܈}\zz<EԻ<@~KbȽio1s|v|/>_/K9Sde.MJ8᨟=pE8Gx! Oَk;S^=k +|u L <; O XcЊ;̽v/"όy_$@٤oh6Cu8#m-#!}PHjG&{ r鼈wl&v;Jz^̸NOH9FGX_bh bE!3{M8>DLx n,L@8sb39 #$!Zn ;znwtvO@prIIiiĮ;" n/›-os--!델v>L+w֚k ! =sB| .csUE(pn!N?l@ !OD%":BP&wڳzbrC >nɈb9)&^)O1yD/S "(B7 װVaZ\54@7.j`1 +S)#!o '/z6#8m2yfȽh)r`;o$;N_{iBh/I,!ؔG 5]s-/B{8I'DqQ鰸*]w>7فp*mpbxFKFqi#5Cڣ^[M_šPs*-7,<|(4yVIoF:Sp&-5!NW==!<+ge < &'!XEFh%M%8ObvDZuj`aWpX<:!b)vW_bzPBxQ~ ~ 0i;w^&\C@5pz_8+2sVUr %ف(!8 V{0FՈؓ㐶J Q߳#E>#{<"!3@!zЊ(gC;c ˁ=.!.1?ȑye^c*ųb^:,i9;,䄨clgB1)4G!mʄc38Q&={},pr _zB>ԇ"@K/hMg)s_+g=;!ǡ>y/B_}r/B_;#zґ+n+(Va盛Ɩ>#A}ȫz%#}|>}&Dg_i;wާ={ߑ_嗓Pc *"jD~p*58rso_Tq' 8o^嵎Ϟ6!<&^@xFMVw6!jON=Y穢"8Ѧ%^CW/1R*#kpe6!+1Cw^c5YQ5'݅&1b/BƿL`9Lbp›g_̈( ѱŝ5$j?y/U%^k0W;2W#me3wkw#G +/[7q$#^re$_+n JX6ɟrl @>VJgdFA 6,4!xYu|1MHȇ;E5|ѨڎJ94$nB>d*Wp{7PszӭQrb!fx.*[D B aD\' ȇ݁| "b;@>豫1ו+#!dT(MȇNj|ذy%C}_C,ܥb.!;ȇX8.nw&Cqlވ{r_\{ :J^XJX6b/B]qKBF>Ah'IP69KSfzݡ<[<Q ÓB+ !Ǧy/B RH$ eу 1/B<5=rLFW|w͈>ß{hvYὈ3BG~iU'\ q\۷n-荘rt~3bl'Suu.!F1M1;P *-3uMXFH9W [ 9Š"8ve jwU#&n拳y/$V#| KZ:OK Cyz/B˿X׎;Լz׀}r }(ޛղpD7/wG *aON94W_x'rL~wxrd19ѻi\_cewVx:/B=hcBȁ7((fěJޔȯ7qR%V+e^EqY߫6ϪjW9HrL̤i {Heoi&s학_߂3E>xfmBqUFG@>viF&Cjv{M9Ē5B:$c5g+(!xv2"ߟCny+1y !Ǡ}c/yuB渚Ǫ=ٵBTI}lJ/aE1CtȄPª>KbBsA?eYq,?1-Ǭ͂IYώ|>=~E><'cm9^xƋ|^G`D_#4StVy9<1߁kN/BsӹΜđ!G?כP#TG zf{UAvCvab !vxkBB}=/򹕥L"[i{DS&D ?nMX&yn=/6*AaBnv |S*/9e+|kпSe'D j'DsEh"CAl/^sB4|bE= lUBѿĩnT8|+>#ϺbsNܾ9S%@1ŋCl*u܌|VQXF짛gCK;#~Nۭ2vv #:$Dcs Ѿ[ZZH838Q+ e{^S5BCrT%sB"6!+n6v !8pj]Ɉ ګj̈Ů;#=TU!N/B*BNDW@!_e,'u׾u`$Z~=3(Vocr>Ưl֤;bC|-oo\o@1vfDv[px[$ӄCwʈ>cZ$C$u0/OB>%\nŝt!Gy_fu4E!jݢ;jZ+/G {t+f!I>Kܥ^E18OV*!* , B<\B><,5!jwV4!䀴XUEc^#!LjV*!*t$CecEڙrhe򡓀wl&Î(d18~A궾pp7/Bb&!ԃ7@~ՔyV:^Ehە PYq kB><[ 1lbm%'c1 /򡎳gB>t.U?%F>TzZG}?6}=!jA,0DBqWc UknWAgqBUBZont/R#k\ǣ rެ rz ЛPGE߰99>`&Q 8!p)ۑt%~rYx^f"أcNB>D3BCOGvȇ8MjǜDZġ4ƶ昐߬ S7ƜF;fN VY٦K%" tGB}_yrYßmn#'rkQ(!{0/j]#ebR`E"0ʷYEkzݑI"'w;$g&~`Y-^ iW^v"zS4!䘴2|ag"uv93OfKo9^@ Xt Qcp!>_DOŽOSV|;ZCRpLz +~B!֟ڽ4K n{q~%E3X9\F_!p8n|rR@Q=*!}Uꦁ(Ga? !z5Q!VxF_DW@~c{gEȡkAևNj|'9^DUÇ1_kc6 =B*oXk'7 8N2+elJ1+yOЄo=x0gֈ/*<]wDD($_!Nzs8QL@~{-$I"@& "Q)Z7׭к@Bh(\QH-#h?!LN AWT C ; Jg'w1ύשޚZ z[&i uVzms5:s=]^D9:IX>G 99WwExJKHJEzۯv> ~8 LMxվJTG~4'Ћ#Qesj+ P'Or񸜄|.&k\'Cu)r\Iȇ7RdX^O"ŢXx9d<񈙄g&qD5byĮX;"9]'OKȇh[9ZtLȇHA{HYɚE>ưNh(auCtiԢBKMaB KM ،8-Vm9bޅEQH/B=n?xP/B-ቷtc'ZJD1Be7Phzmn+nHǎ[)9b(^:#䐯/+Hg_W9kNƱ_]J[iFu3T9]Eȡ6k,r&rU9BI{״] =9B?,N!ǡJ+呲/B8'k]c}fDxab@Tg>µZJ;U_0Rֲ}BI=^CxMkA96c< E>čk9!a}rlmN%:]3DHoo!!ϻRG%C]IsB#/5cS&y!ǡh Xzu} _Ɏg!P_:BܚF鋐C78OE>d>6ޚl}$c%NMBo蹑/y^c}4͍:/BOM9"+{܄CK9_#E}2½_1cBIMN„P:@(~oH9Bh+&D94B3~%r 1$]Bq/B9Vhހ O Ir0 Qi1#r&)mW '<9ћ ɿuDe(ط(( 1إOV| |ط RTJH@>d6T|m& IB>f"*d Uª ~2&OȇCM+-*OL?M9"f_VT !nˣ*]6&@lv𶔰%DQIܒ-/[#(C}vY.s}>ۄ{ԋCn %"X 6k,$L~@h ;DaO~BؓQV9b u5E> Z}4_Dm{+XQ[յ;v{v kSƖj҃+ !XJ =EkOj=gS8MQ]kS"c!ǖo@ȱ6f3Q}lv'҉؋3:O`N$*OvD;a {&J㹀8$ !G{;9ȵg(s޾g=uXFx6ΓϽ9V<G{fY_s|I9g:%<#/B.^)#皗Y/Bz$`!M9ēV%X[#z5hig_v8>\s@curSDfD9gpq9!&50#yه@Ƿp |;G _3ixr Zb!G[@Qg~bX zW9=7#^v+9QiQf!w+>qDB@4gFhQL=;!zo蘅K9B9NxY)CJXę9 _{B58Q1%_ٮKP mKkv5E)!հ*`B(t=Qa9!*_ABTJqGJ9B)&ϬRPT;|j[CjyQ/ٰ/#!<7#au lBWj='be$!!>r@pB__&#jwBi^BmEzB!2zM]$1rk&" nBo%r7!ǏפUp@,g׈O~9"O"^;!}"^A7#ERr`GdhB>5.F$CF$C&V! ^C y'/!N,@>T(<՜Eȱ)ċ|3A"U"ldݐr5sʈhpEfʋ0vj[ҋcQ:ɪ!qJ?0aB>( |/?"}/ZFp~qz|=Qg.|(ݹgJ ;4l"tl!IpTh9,EAI;Ixb&!voӊ" b'C.14__8n#Bm<,k(!fu%o5_F8brfI3#G\O@8sN Ɋ 'C^a6 JdפB9"(DO9DcQ>!ܕ|!2Bcň凌CL~B?cxlEQHʠ_Q7/wqZB(D'E8u0DBȱ/fFfJ \cb:r,1r.7_8!ފ5r\f/LwI`PET~4E'#ݱiB(Ƅ<莄<-IByZ)W^baQE3!հRg8l{aI=sݣB5[tB-?!ѱM?MqqHʷЗԑg!s9k_D8RJ!ܪnjN/"lbBۃ2"(7"eQ$ChWo" l2AEȁ~WO^to1stuli[g3Cƴע PQ_Q)!!]!5"A7 }sl|>{[i|F Gtn7G ,|ǻE(m7(xx@M'f{BA=إ?E!֎1Py[GjU45f2BF%(8G qnm&D9 f$]Br<(z)׼f(ǦLrthc"1U[XB!+jc !tlB z3@AsZ#oԜb<!G( !ǡɪj$7r:%UG Ą|3TFD7b_/ЋNDU7p87=Q@p_ c ߋC#bHsB[@߰B[ y NjC8#mzltƐ1Wb1+$I1Eȱ8>V=!rQ 8!XWpB>v3ڷJ% !j"\F"r'Wk9/ݹO@ 5G_c/UypΆSGb=1|½/k")գ^{U^{_T ޗ>_Dm^@\5p QĄ]Eh5"6!Ȋ Eh뻴dYw}WEhCt ԋς.{6q.4f|Z Q+܀~+#jAcE'>f|r.=%EmT&7ƿ)*@ y8/BV!_=OE]V,Yq 6JsB1xyFF"j9 M[7|ۜzm87zgo>"Qݣ:3B }KXY "yk!ǭ3ʈHpFrlZnܶ"aړrLjv&DmJncEvT3O-֍: Q=(~F!֟^NjQr ?=_DO1jJ9w(PPC*9DE9#y%CvMȇ n9ko>#--,E(-A@k8Flq Xg]WTqr'bzaʓr%{2WRɢDoUb8ԬoQ%nBg<J-q|rն>~SiXw8؅:$/y_9E>/L=xb(NH^r`͕M X@ԟ}@4ۚy7>>Fi5Q+\_ zRjDv$@Kqsekbe{FFѸ^̪0:fΨwV|HKoQ+# u3B^-#䨔+3Wo;rȯxB!V"OsBtTG n9g !G@ckQpuB-U}F#="&!3vDXZcy=Qn<#}:Q/< X.KxrtZGDBBǯwBZw<#c6.3ց3.N_䋘'Lc^q}`< G~Kơhe` p@>֖^cuJD&C\Nk<;_ҋOڟ"ؔރExwҞ" kΣ !8G m%N@<#<YGx^DO,2V~ Ƈ"YrB>.#//BiHv@>-L;=2BJ 9p"MgIGs@#K.VW9>AnpM:!u iQ"!䐳xdWF1m~rWjrj-~%auB%#jUz(OzG* Qe:+\ѻТYv-tXn3xra9u;g̈YMl*5"_C$Kg9+͢2BͿ~R/ c)zr_r,Mn6#""rLj'Cm4 Q#nI(15j)UgEdM'Ijߒry#c:G s/i6ȱz OЏ$ZyF†R<3#j!xs]9*-'/eg9$Ʀ;!hvkL-$/BA\^sɡ IFȁrGFTFW[^4 gm++mF1({J / 8G ~]ZhxrH|ؖs˛WX!Ǧw*%ֈxp#ϊEhcۤq9py*9*fn|/BE!6DŽs[I9=9J>#j1f?!Soyڃ!H9MIA;Zʋ9~ȈZ&s"(|Y%|rr ZJ#[?bBΊڜ#r p@> r-#X|#qsEō/M%&!Gܪ)'QSqB" $_u/BxG RtzM#c.giF6"fYu,Oȇ8K_^C5 =FDB>T3!z"> ֌^Znʈp,Ör\Jq8oT\ײ9 IB>Ԅb!hʼnx@3#Ř|$xl}E!Q@׷ y$<!}#>bgLyQË(J@1) 8@=&d $7%v38 Կ%Ƙh"Ц&'l"/b! !G#ry2>!X.rȺ !8 Ts"STj̤NjCx+r\Gȁ7nJ95IKp,xu&"OEY^@ȡ'A|aN9.=%/Bdw9VpPI:oZByѤ _㋐#voqQ@ir994/1ˆb %5h{!ǛȇL&ޕ)#!+(.zp82o|/BӤG hB&[G_ƚE77+_(yV#8wD9.溅=oxeBn rfE4C jhk?.@y(9diz3@(bo"jC9XQ r r4Bh,cX઴P6w %bq=у*!2E=Yf#`i [0VaFTcs/8v++-7%nB)o gpUZndBc6=9!䀴<Gj)J9*%yB񢟸E' 92=Ι,~L9TGBh}rL2.rlNeZ_B *mn_aU9ضB{a˰/BI/)}5{)ɱCxv~F!;xDDFɒgO >f*iK!C}@ր#]$ctoifqHh3U$ظ7ʆj}IP;JP{/k4b %lhXĤkN뜭5 fK&o4f`zй:Iڇ_? "P}AQŵy>*FIav:Qhx<[b##K]̪PiS@A+g8?޾Ǽy9~<,f!)Ǝ_~;*㮑ixrx#0VUN0W7$1Z$4a+$㽃s%lƨ5I >dl(e88HxZF"ɴ[{PѢnz$e5`h$"^;6ڏ"aqY҅ߒ57O6k|`ND䬑>ڏf|ޏG7sB\n3>y:c޸|5i޲*|b/㸍@ּRI^Ɲw.lfETEIhl~3HNbhجڜ4HmuF>UhgV$ Eшu0T?m`/VfQ] :[C}oGTؼ.d6 q3H`}eB#ո-J`?F\3D[8rtЮ+h[:F>Yy(@V}^AvYdYehD̜F?{kIdm~_do9-c_KjUg]g,+AۅO}U5Aۆ̕l@̦4HQV[kyl/yl$R1l9%A;b5ؔ@#4Hl·ZP#( Hſ !43T8䃰 WV#%NwN1+r݃1;H yy#Ȗۯ=4Nf]I0V^k#241* xa_eI/x<>Nc8)FjA fW_:m$_;ƈTsS%3w/N y[7`ir 3>j3O3F2+ݙy(=~]J"^;hjJ yA=O:ͮ锄w>9+4]?u:e'0\>@~#{ЇSA_g^_#0y9oP]sb#а> γķط8z'ƻq$6+6xw]$x4W-h?lAu6$ĸi6-Jq5@$~`KVm*I? ڳnNbߚ֏cJ_\K˾elsͯsHhoH]:5 [:HD6QŞF1hxyD@Hh]s{sh/CGD҈ixQ>k_#FaٻXub3>Ow4څ4vRk>UvG' }P we25 mOtՉi6 1@ãP[;ƽh0BxE͋1u4\U!&[͢~5ׇVUV׏DЊ >fL4jɍDl[ϪHI\$AC8ۯdkkgf(pFa*L?¼⫲:q ZE.Vj-'7dOb5Fatp>#0kkG ڇŹ/jֲ/f6m 1A_C 4^GFL'v>n#K#:״\WNaW9݉i/h|ӈiBҜi5b'jܙ0KwF]I>q]+#Jh?>UF~1-eq&ae-Nr:d3qE2=>3Ύ۽r~1KB)]εF?x>r#3J—:c#A϶yY̝qoC Z׈1;aX';oݍN 'R ӟއ{,+* ?C* @â?o!je.[,ƉQv(Y_FZ}S kSd֎5_ {ֵ}6x$q`^J$a3QkXW%8ӫx襛<"0?"D+j1Hm[%и $vݟ ~z\*)k)l$~$]#g}D4:ۧ8ʾH<4ib]L@6bک4cyx2m#аyT~P#xVmL 4S#s`f16 Xq%W 4n\WOpgTǨE/s;C^xsD$eY-y4 n=wג@5ccs5Z @<뭪Wv_go8ڮ<,;( U3.FaVk;:Gubyfl'а6Z6Tgp+~J< 1V+ƵFzlJ0'|CҒs1%ޯg7?nQ9_ieܿ ]u?~KAW`rP|޿gӸ`4bG6z@ZvO#)rzk#ОsEز R#6t⻝@âu1(1џl5bѯķQ 1f@cFL)1=%0Of֥lGSY~bJr5phkUY\ 7 Cz`\ (F@M24ϛt-)Mt #q"A l, 3= $Јnqqt _3hەMX xF޿moA;'~}PLhw6tFbFl6{^b!y ҃A:9z7>$A1/?z __P ;e1oJ^~~|@כ@g'NO99JqzSHB9IlˋYs L/RTBe/d],y*Iq݄ T V@y̞ӈi|j/MY#?8FGc?h<s4lxc FG-FP >t͏Z,F<܏aD+(p(\x#1)=q^ a-<nUD ka9WlY{@58JLL#p>4_@!( hvo k60Wʌp0y:}lĉU~xd[g[)/zÞ bD@Wlx yӜxLFD6h mAj0@ ONϔ@sSӰkT.\V텫{e޿ZV͇y_UcVz/"`kν7Kw|=Qg[7r9\Ć#'>*vY{@n87U/7@& Ž ȕ,ή} (|<_k.@qwWW0}{ݮ<_/qWKXmB̭r~@^w&=^?<_Ƒk<_SqPm ]fi/=NR `[yFl] l |" Ė. ]~km\pAY0uZFZk.03ܾ@DZӾ@X`?~zsܹ6:k*M5sv b7̜U`+.?Lz&8 ThNfdDhWO)ۇ㋹&hug8}zݛs !5Zs~J^`1Zv2YLΞQ0QpE0`8 I~R^`fY=Klؗ r3Oe*(٥qR`!(='`rl7Nɾ~}ݬ _c&c͝gm ~p FR'ɛ\k7;(/Aд ,Z!3`z.zMxJ( ̬ZX\eSpq63'8 …rs#.Pc]:{ֺXlG @eQ7B~= Xb xXIA^ ;[ ䷿.0 aNO1 6 z! !Rn%5@|cC?;BZߘ)k_b}u_ xYs @ _|(&?-7?ϓUN?a?O@w DrOzj˃cˢ _l}R_>@w,X\ wG.YKo-40}qsw$g!Ujȗ7 a k ߇˴ Bå@py ﷭]#Z;h Z"~j AA\1 +h+^={@S0=Gh9<ȭr+?P߯`ҙ3LI8If_߷nR.a|9> ݕK\ 2~`zrQ0=.>._Ӿ_D.?·Ҿ.w 6ᖆ1>vyp,sD?io˜ܴ _)Hy(ڤ39T^|_xL"`z0WGLTP˜)~L_;S`2R!l]*Vf%`2w&tMƏ5~z0^ERbU6_*/?K_7+bcݽ |T`ߖ;HK"'mΨ < 7s5j~p@_NY(*m+[8W ¾o>r ')uVoiYw= |9 @uy iޛ*]Ƴ_mp^D4T9 Mrt `^Wwi zU N]fDp]F0~,swq^~Y`|Z b1vߖ!XUK@~Y`|]-b|2 R|u./O__u~\?Wz77*|MnlV!\>3w],ң`z|nRϕPz6B>/| S|tdӻ3M/ _@i =UY7K)k!TP|p1/Ct\?syJ^ ݤQk8* =ǀ(}?2.Y 3k^Ri`|0sm 0>9fU;0>u@nbK?/'_Əg5:ҽf_G?ts(ܷWa}xh_ m|o^{>9PO-/+~/eO_*?ݽ^k}G7(Wm ݲ? ܤ}P㗻  m?=y/$J@q\.?&c[ ȁ bzۥ|MϋbpXsȗp>:.0s|X|",@ok; ܛ3 f\kkz1VK:*OAoN3/Wͥ(r/91? @!l8>9?+_6>>:r¦ȗ;dWG8.=h`~YQ!1/ j|$eΏ8?~,>/G5?@o?_ c쟀hGQ͑L|k:q_6R^r[ F䟛j?KAH-_,5x5 endstream endobj 397 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 388 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-045.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 398 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F3 399 0 R>> /ExtGState << >>/ColorSpace << /sRGB 400 0 R >>>> /Length 46639 /Filter /FlateDecode >> stream xɮ-͚$4O09}3  &)9q3s_g;l9?WHmX|}?ď??ǿ?Ǐ ?B˿?y_#CvIG%oW-3OK=rp]?~?yO^?/G??%1P?g5k5q7cB3,!z1G`v+n5"1tjne=cEum^clеG+q7Sf)Ѭ?*2%Ns9xT0tcH6ֵ/*e5[LۏׯU"ig>c3lyG;|<sY/>FQn1םջYќU,]{돜|*}uðsgËf u7GkRSd˼U[v4u\ϽIfj=Iay5Ja5qae}h=1S?,sƸm>}T{blO5vnl>V_z=_?lܰ^]WMZ7Qk1s׸Ǽl q~n q~Z7(j5V{}g6y׷jG^"߷&Zh״k<mp<[cϖ1 c_3Z9'I|߳sy5krv8>=@_[{|W^8{*^,}׃~xZuHi~FϽ5Y;_{<SW|X_ˆkzvd o]VN{v{mx^:x~܌<9qkXM7 /ZxՎjwupu.%1߯cg-rk|n)p0^wZN1GH]LޏWׁ?=m΍ vzxr77Rky}J˼&ZhDHsuǹzqyU,dW|&=Sl_? {ޚ&>5Y ؿ벼/om}￵=n9-{i(>~|߶#5w/ֲx`_Χ}z||[ORN|~11uT?u}Jj&m|nk?.I^651I?A{g u0b}gy  m+hkdOcձ׺?v/DHn{ zA{|nkHk}Xc\yΓæ|}mu056~kzX?|y_Ώٯ["w-c?=O}ۙ{_MO{*|mm'QϷkڟNj~k~gz뱴^g^vx,fmL9Z-Mװζ׋8m|O:}'Mb٪:2kfٹ-3ٳ鬇/h]z|_χma~Y7xv?iD}:T;m?筃t?=τkgm|ٷښkZt}[:߼e/zy~Ozxcv)w‰v?wm,0Q3 Xy`ve]|osL{9?9s?eD/2g~U,vگ0o[CyQ/;~;|*k#/(s~3@3p1_]6?6q~4_x yC W:97Wx9Sp|J伖? یa8ϖY)lg-6ٮ<~;e?p>x/<=a8U܏rz!'FSS!>b{]aWrfia ᯣwO_?RC=ڈwFC>~xU+yMG^}~xjmƳis#8v;/q}7Kyp!/˶.{)M m<)p޴ծk 6Kfܨw6ȟaiJ`y`x^/ aWax!([׬G_xr{l}ڏqɕKLP̴ڈ#zD{}g=cG5* m)g8v=z c?&zPd`|q}{^`ԝӶ5VCV+&>41sɁ?V 5ߑCEAdIC5iOǹ4U`&c23sϤ,<>7;Yf (!ͭ2r&'2u'`VD4R}QM}\;>P2]^pgyfNU ?/ |5mc b}fٲ[m۶ *Ph7j 6۱Fb߲m[>mݏF=gSm:g2W,׷o/eO. 7-8 ~F;(n)v-ys?'7eKA TOˉ#v1Old<Ƥ&ɉO}Mn~Ϭ-QF?=^yv^=l]*w}8\)رSIO{>LnVu^3{A[=y*ŚaS47Mc4Pt_,ђG7&/v}f,jz?Έ7xXx_iGw#d/jl.GDO x}V<#h+h6]}w-v0DspН/N1~}agY83\h6mw{gy{H>[dg(ROvf D0uGg~hF9h껍Ol=OUѝ*~.]:ўr~~~vX|?ze0?{>Ī; {vٮ!;;vU4dCѵtCѰ>*Z~;$g-Ri H.O6߫O3ވOj|etD=-ՎgwG?6N6hk%EOy_ic":{ޯiߋ+y~[lWE_cGaFtd`MKPpm^Vc4/;s-=$|4 ks{^UAZ MYɯ\ݿmBN~[_}i~kpkZ`ʆfRcjO;1XZZ\>16׵l?"uBdk.|<OgV4a0ڏ9 972[Yh{ c|~4K|>q/C:Vxn]?N3~zFNp_M&c2U>OE֘@[! 7lE a3jӹUg)w '['+5V'v>fՌ|5N9146&L-q&ڹ}nL[Fp-m'+׃I'SOW/|5-k+=pT9U<<~m 9М\Vu>49F"kWC5j=q*=xm93Ьu/BcR _| ]tWv~%hW>jv6nxgs)XCmY!AIa30$vLgfj&ܞmu[&g%@@l M;s+Cw}}J.ÈlM Y}P)U7T9獪"YZvC2'ؾs8DqXߕKMhNݡ ~pB/8i&6PwȤ\+l (Dٔhd kW U'URb{N8WXOl+'7}Me"A[E8S%Ǯf'B&پ3 iՓC<ʆdafpY Q8lMiMk9ß4c#Ms"~kE,Cٿq!K#n$AN_$i>\ !y m G/׿?RKȂfS!ɳS$K'0#  ڙr\)1~l87HQ?G:RG?_eT 1_; 3?o Ud|<IOc"v~,*D"[T*O1PQ\'@s4%_1Hmӥ4s*lN$<|ld12ޯZq4Ŀak"pXzv^vΘDWKkJbt.`v 17o_B},]C&3H[grPY`ovA~y #1W{ D F*Xz%X*zt*h<>m 3)3GϙN um%d>B=m s&XA12; s]f $u't3Iu1E51~u٧lf*첍 [k򹂊J}8畝{:Rϙ+:{igBRvTlN/|.LąNazm1aɒi?S,iS)n (ά%&*yB$1-<7K݈Z*eE~tjubr/죓 H gLɌnӋܝ,D~Ql3"pV^Q y.Lnn}1@0>76,AďNZ42ʱU:v\fWvd=Rk5Cهoqr;;6 [WlW6)jVb*Gc!TB {@`r4-6a Rv<޷)3ܿdkŝHB/oZK4> v iǕ0v>cn ֈ &߼N&pq9<>s*9Px \Wv idf}29| L" ]`bWCv1 LYw`HdX^ᚘ;<;;Lⱉ6UAx&p$clK1 L {bf1[*4[0sMĽ[Ok,qqN!J5`; /ionI!4\h=A=Ya@S@k'k)?-F+<;v,ɧ,v v%h\rA0XfBc!x &])M! H2Ln'="ZWW)&J [,G6N=&5䩈N[_D!&I3u#3Qy:m UƑf}M=},B)&膔 gk^oez<&B!B4ɣS;8=hO{(iPqΏB06̠fF>}n2rk/}Y.F,t~"L篒Ҁ0cVHDDpZ&2⊍-bvYi|*q@\y a>kg=\&FPV,hpBz+Pޝ3%l̵b-Q "MfK`MrE!c˪.?oK*fQ6P&Aщδs+I6!BcZ]h'D`cVd0 $'AǛ#܄ge v3:Ce]I*ˡ| <{>h\Y6mY 3/<s4"JJA`61d;<2D&eKI2 7~ljH"ÿ*t`3EA4݌f M m]4PaC)34>% ]hm"kF{#h ]äO 2`crMI{t5I->?Ǧ%DDVfUKV:9{]usʣF0ЬMJu0bLy?Tfэl(|%옜ӓxmխi}l0ܕM1LDJj*'_Q\PBfrt9mdaq$]1z_;QL Yd@ 0ǞAs>;e'w󞼲 &1DُMzLi$2|~%5EƢCf[G";}8ªDd0q՝80?m3 Ǜ-o~+H$2fG)Ks,, ]i'Bkɞ7I6x&FId6LaM.-gVbfl}W3OtFi^F $'Zs9OhI}"%t[B*AmDCPZq'ـHxf2uJ֬2kK\߼7Hx2xBڷ>6B5)YD xyc9plCJK2 w*¯=#[|l|dU  ݢR[%/crr*PD+lu*J\L9?"9o"rE&|G%0M= gaHLTnDU\#,C{dó:G"9"TnЛl#)X3yNPͩHqWZգ%(UEL:եE v.qErr C5;l4ΚCޢ':0\[z5BH_:ׂ&&s[x&n"l')[y<229F́rvdiɯsCa*.DRī&g7Sft in?֧@WbD̲`p-OS!g+M[]6Uɳ1f 2HY85byw1GuUU{t# 𙪤4j! [$N9gg If DW_Z1,ԈB!se4FPHQ$c}RwؼP CgH:Ifsj^"6`AœMGUx `yE%J3O βKU#fKepHFlMbЩ.ՙ0 2M*r<&Kb,ˠztfT_ e@D.`2o(*d7BCl+L:[)יÔq DQlR0vP}UQ*u@ز_; iL*z:wem31k^c *}FZiXƕ\ͥ:t +Czݥ5c7(E@ 241 ^k:(K٨s'`8;D^b4łےԄZ7;m]Ƕ P_׀ t29 X# ?Nmn5d0) f!Qc[]:yLz :n? Yx.LQt~'u,fF?Kdӱ3¶B…)A`Drx)V)H3UT-M~iOz[R%Ss$B1>tGB]'P]:⥌Ru22Du:Gio-IЇ2YfD`BUݝ+CU&g]o+d oLX~Q6: 6?_"cǞ٬Uv<|ξ+X_z~fp:.vu^d'B3 3nR՟sFN>c2<KaHk5 +*VeL&WVGIBSCb_:Ә&IcQӋjzEFQԐGE U(P$g2-Z 0g %i?n;FfeXeJ]6LEl+xʇ{,\lN*SbH͵/ [ :@%C0Ap;F^a  MfMX +}F7p7BN3 (3J`9,٦H84çi Sr̓ɲ_$Ju&%?Kg9?ÄXGOL.i/X9-_;Kݰp*c7&هM%$$4Y}SG!6Sp X4sݴ/uށKCw63|Z,{-&rt-¾&UTbupH: < \eXgQމ,U˜*.)H}rݯtgVxS2qK,k,܇|*B5m "EO| $ww%WV8ft>C.|x[DISdL-POKP`.7Ln%*o|@9883}+oH򇅤7΄8al߯,jө}3$Bp(u^Q4ֆ 7194A-?@Y3s1zzOI l%> g=Nm,)Dn"K`{1zzUC27=l7>u*$q2@y%4.S4~μLJ}`" #:o&$6*ƭ#ô8DD"d [*默qSu$Ə4=P9򒿊0n$D%mZ ,#dU4k}FL"S:,wB3 =G$LB%U._MV2t [ WtJf_ SH\8[ t=Nj$5سPŹW#ҖbBm4NbGW=r h(l2 E8oq[Į C+U+5-R!Г2oܕ$i,ځZWF[^1B=8UA&mC5GѸL]iHJ.2D~^ATeI1T>zr巪dbp9D1/>T(Su\n8cPc*K9Ů"hu> XXZ-gHmoNntT[6>ɼ wCTNi[6]eD38JNݥ.06M'o9Q_0KDxaY*|]V<dU2䑙}gWa/rX]86:iOib+90+l;x9 8SS0L%׵: u[YhWxNbZWl+!<3f OX2;X @g'سA@YeE(0Q5޳33Ws;}U4 ^'=[{NdO6l}-U" aĊD)LR>tKAR ܙ3W AI⯝*Ĥߒ ojb$C}pp2ÇYB0٨v&bݪ6Vg4De0Qtcwq8}@%5Jd0;0tP)U"!TYa%愜ʞ5P`M]%ia'BJo<^{\ҢEƀG1_J-pGX4PFUƄ 0o}u*OFJyt׌8")N%J6XT@Nʌ0|n 6l\Ӧ*Fi&hR֫gD7"V<պ-.댄i\n.1kzv^nB1, Hj,8u.~SOb;k`ldInjU/:lTvi ȴVPp 3ʋzVN(,}5ôԞgjh@0ڹwUL|KlӋ @Nҏent\0OBxlkpVk6pԙ\g"k<ت,q{E(  ՙ&o_Q6"U|_Q䓘+68R&4* DjD}q/b y `4!9X1)0i2 Ȯ'7m E2ԸpB²pM-,*awUhv2r[w$K%/%'+ȷN*ZoYʧS9CHWœs2R-[A\$;ٜ&5hc4F'H?c_* }'9%Æ!OdQZ_+ja '%5U"YY8~MHr Vf;:&(K.הY:؝.*j$+Qp'awe~}gQ_cu:䦋0 D6e 'V7qw-Ol#9G3$=?dq?K|\]PlSLm_ο"of t.1L 4*P=*')YBchS ͇-̜q0mJ=-Xy>0j#Ʈ5;TG2X۫n: Pݕ aƿc+),[J'668*2Dte?HPY*<$_l`m: ({f  :C"gmӅ〡$)˧Yh8Aqd TvJeޅnm$l)bjo,L-YJ)Pۊ'wRxUư̚lr|& d捖ml$3)-$n{AډZIZNߙ'e ]1&!~ccMK@fMݲV4#[TW-K[E<-MUAf*lrW yE08?|.@Gep"Iejβ0nshDhLۢze|/tiPMKցm)[VoGྫྷ"щ޻mNY8mG2M5cZu[:y%`GnN.CukȌlGSV8##KԶZ9k[xv2MId 摒Ž5sb#ڝU J 'qC tK3'wqgr߅`FJ/*/h(2d]FYL)k隔}tv٥Ma[fUNZK圠 %7d7Em5:Z4.yt =}2K }p`d.} 4'k< |T quY7H<V}Ҕ2N.4Kx"FuXĴeq"G֗lAL0f R53\2U9m~,_yϼ1;F!SU?->H;'AJ.Aiy@xu[W9D+ʼn):?j$-z#X}my;yhc[їWfL#2; hWM|>mVի`PK80 "^M xX&6T^XS})HM@*f_h9:?+|1ɀiʴl![ ; ;^g:o|dǶ  G);;ꝑh^rqi$n5Y ik ]mԫ~t-$9,=1\qGű!FSUDR"j]˦HtTeǶh!(UkJboA9NuzIzY-~ldpc~{7C#)T$]X) sQ\\1Bي0 T}^Y7Ζ̓4IsZٺ_ڎ>) 3(SO)rW >&ҠؒFfLIbΠSȸYuK}p&mz>^zuT4d߯;t\/8,ߴ1>뫺:JL(}M̋H˚n{R@)/}^(apJ0WIL-0A_EAȮ2#PS<qՔP%9B}-DB߰uae6L *sٜx0;'!(/S% ȴD ![S;ȼLѥu:$-uDruO :^-ْ#ύj01٩bvIU3W\ }k#dv-j%|$=  W +6GU> w (O/j|W5C|RЬ$K= q5] mU "`>]5E/F.&1IL774YK13Ł집D6w]*S %b ~l^E?eUVÕF omxQM FaOeʣeFhс)I u6W}a*]S Wm$,NfIUAZd^zH%`tkRe2TF"Um ]!-9ٲJX&Hx fѻ ƢH]A zRz3RbhaP>M̜ر y&:%HjՐ7@Ou팮ͰWLl)-zOCȳ$6R<6SC%aevuL`2aL[w:l: rPoWVh&x t 3< x=PY(Ac[G9\ŦkK]gD+bf56ƗjԈ@ 80ݕV&٢0 'BB6X"m˩ O=`c Vq$@rN8U]0BWxS5`nL):~=mׅp#[MHYry\"r`d}]!xl]ـ g#ŇvA}azzsݠߚ[eJOLuO[z"_l]\::wu96[c 6ːNEP6`9y;{Ugqwnk D;elW N]yv<ⶈh%}Vn4h)Y1&=mZTnMU~LjvUЕ>9U/y_Zu<+Gz4%~NYSDB`HؠbC5^V 7G_k~ .5D^."x$Lem8; S,X[^ϳ&xwC)1 \iVA*ڦ$%e@9 8gqYAF'*D- 73XxB4)ceo uk9A~$#kF H 6#A0Ne3G[݋]Q)_Z2YW5Oѕt1S0-Uydzt]bliQ$Z4$ h]T'vP Ҡ9;1;抂qdDNь=^۩nd>{Xf$IlZqb;nd7DPLgR!y[eQ.\ p[ӆ~yHndUhR YA.mF B(MuÄ(^v37ADlD9!k]xί|-تõVx @ f gd)&d aRZ7ݵ2EJ<`]:08Hg5rc RV=C@ƺXE+T20&ds(Lvdaucwd ER[3&&XlFry 4{*~i1I2 wSQcw8]iXQft,MaU+1LM`9H_~@gdH#̼^jGZ>Zo^EZeZݢR|x4ʬgQq3!@bfa_w@Û""#qFWA" N̚J[;ͬ8%T e휃TJǛ_JAz+$QgDw:9vNwBfTH8X] K YO8VblBZt+hN1o^q+LcQۺ8wi7+79ȃ#@# c[L2nMl1(CU3]N4LK#$'K" Rլ0M0U&B&-:nw2ꅭl]Ѽ]P ! cEΡWoPdN`: u,n#"ǁb٢+q"A؂Z;^cɉ|UÛ7|]þ5n[QVbЇ Ia}~X`#CXJf& ]8Ӽ }+o"EqVp\ƇXPՊR;o77zvWQ1KZZӪs]sw4q30LS* Ǿ:ԏ{Q4vb6 {J@V]`wP?:Н}ۮG`duiFf Xsq.~.cK$t"i>k51Y-dp!9FeaJ\d=:$l*~Q5A8Lawy2FHQI,dA4$]ƍ Ezїb-:&L 0F:I`ڋ<;\8ڋ2@aam6t+o}uPzin-Ylq8ĨgyuZ'NwWW)xꊕ\=W 1N =}_i=| $oȯ\ך ]2 N *o,1\YKZxH/ʄOV*U!sVDyέ;:)) eZSk[$x$ҽ q)cF;02[f,)0iJºAg{u\ojIV8+N$>X4:{qOށn B^SWϹ?`9Eo{P-ʯ'oHbEUpo#"]9,՝؇[$MVM]&NXv$T7-ߺLXCx7 Cx[ &!f ژ7ޟEܖ"ܹ~eum L-A/FVYjԤt1\mV dJ.`cQpycҜAѠ]f9RE+̙K:hUSӊN̲.`j rYvBqIB 0toRA!uh[m5W=t\Y߽*UmW*\h)9)@DS}:]awm3V&/S> %BLTPqP8\YTYN"WiZOcM gR(]/a﬒).ZVJ/Mz.͓7H sh@^xij%5SQj%U8 J&! Xa7v9&TrwX49tS"EdvaH2LMLp`UMټz at1( vś}\y))NJY~%ܜvZoMTnN}yX!z~&swj1B/3\\|26cS_; eWqo GѪܹhyc !X Y@M=u$-7 ֡ާvb}XUr*gT\ʮtl;(`d޳qp,.O9&mʕ%&L*T5.ʏ:k 3ԢU+_.K)LVѕ2"AڔNԹЄoڊАC~$zX6Γ `&KBi(tDIx6y1jMTJ22df9ӗf H/*K,~˷Y @e/% 9MZs0W ዒ:v} T31h,cM0@`u_PBE5gz$3M h{7n =7!DE+ֵSggKcQ1`冊B9|U%زؾ a!wZwD{j2kZR{ 3UV lle RKN +0}-ۜ%+⿘3-*1"بB'P"H5dDhv LNJ}VL5Q8γ%b|\ r!I^+H9Au2kǵXt"6AǂVC,fv_EӈT]W ;y{* +7T&Bb <ӯR5)|R`ed &(*w(Kg3s7)-o~%QT9e@QfEoN f+(Jhbܖ}j$d]E<;/=)RHP?#T/i='*E_zZP@Z'ܘdH/ђ4I cZ"ͻi$H#mS.9 㪙Ӱ uX[joTzM]3rd(35USn{J* w}T]('M7 wk/&c\nKH`ͥ0te8y5~o?Ԝ7=ڐE%"q]w\ wou6+HÖVYBNKK‘kVQ  m@y2@/q_eZM$ \ΊlL6BVQN1@QQJw*~fݪQ~s%KwϓV)y=k*0>Id&8j7qTC5N,-u)ܰ98&5n=KyvnxrioK IDv[E`HT5SV 8pst|2!粯p^;c*o+f{%D2& &0h+Jsy\igcnN;|d1x[21cӈZ7(q)}'`sj6;9- Aɸsϔ)80N2 \H6}]b.+R:Bb%e9d5>nAM*~ dQWRcNRA'J~Yp&1ѽ=o45M,/n̜.[Wu\EWa #g%8.2@EŠx:Of/\耒pA0 yQE Ul; pe^|7BYƿO>Ρ%iKF]Q)GW ;;.yE=v)NA86kos'?/]޶D  r/>n%H L8*BB\xsOHE+'%f,"2mFL/r~ .Jsp%f^u'\vǛA6S~.saaId繾:_,KsԮWB·bbRY0Do۾Ⱥ,n}',vC%LA-mh õd`Ր̧5hVp ш}Y-fma V6mup& Wfd"|+]o <: Frd xMjb=T3p *F#pNN{ GG):)n[E6B__ۭK7 V.kv*vsu,GVr*C*c+SWJMV0я/FNK+LNy $YRݥmMᝦNI#+$; ]*(пS:!mi1"~SZ9$Rk/VbC X>"Vΐl6k}XTD&κifﯼ2 j5k}!۞XLȠ{[6ct>_%8+eS"jեɱ[j͕gư*hou5:v[d4q);61v77ElRT',(i-cX*-G4ИͷA &2E*Dg )aZ{[1B#x!xdt^@lAw%_كT"7l-IpӼ ⷸە22Է;y L粧t#|}ZU,C䕍k:b(J`21+`Ә'E"dUIѷ(%R8BM9&tN|U$;vԃc)gMzy3 6/AA{_h&ypPv8Up{qU@\'GOv`G2fԥXFiB1ˢM5P;KLVuM8+4f*>tP؁IwYSQ= pw%@s::#"xޕ8`L jCpt[]A^\ϔ,(tBi¾`/kiTVp5aF|̮ ^tLSh:jD#@m7.W^c( 5iwcBzpLDv٧U-eЪEښIgY;[p4ykq'Q ** @~GQ묲-p ʄ$`jQ ,;bdibYB /m玖3++:Qh|.h!0+vZEVU3:WqgFxSAf#T*EW^[7ؚeIT32[ӲnKNC=Kia ;B΄729sQ-WlA*ͣ(~7 %؊df]S-%gnR+ vc|^E۫:ՖydEL^ (h-4T*E46Gfx  HA|ly#6VV30ۮ5hP.0iӐLI3gCMI ) \Vx5]SW I@YdºL[W)$WO~F0OŸ_u GJpJv4K!F! a{ȖEM4{:QR%&>}#%ql7/%b IZ8I-]4,*UU&QݮhXG5% U*.Soh [& .^^*3h% aXRҶRӕUQj~׌l*bpU

?YWLϰޔOʆ\rX-.i64*p3$f{K9m㳍i8%Wg&l[0!҅ OHzNNr:AU^ʻPQZ8UHF6P}KGJm:󟨒Jʡ|Vg&NOIȸLBgN^*ߩkf7=*e}Y`N,t]; wSׂ^m>i O#m$!%tn7!"4L +In8%E W=Ƅ QJL46yOl,˲SZy:5>9O3;؍OH}mƖ2w]N֜TO4Yt*b EvB&Bh0-m=Y A,#IxNͫVTOt<'ޭGWؚ+WCvᲭz*w4#>6;č)\ْd=lYE"?7VMՖ:}[fQ(:윚Rn]ޜ0uK*X5S"Dh''Ï%%LJI]k e2xF1I:% $noXVM*c-H㦈$EӪ G?9z1I}:׃ \4ȹ]Јb}is2e h!!bGD5v~tͤMh`}[ Nu_:pNjj7]Zow^}fuj|ܖo Ѩ(c]Tr.5V/T\A.8$ԐzboٷXtR%LWՠ부 P-=df7Qu>Ầ]T,3]X[j`P!:=%h*(&L?SJ[53>o/I N.51Ќmٗ wo5$'YhUWlBdt2eF} tCDNdX 23a).V(K=A@FR1zJߐoU'A,UkgqMs3~[+"CN:v<\>$xG} $3ڱk[X; ;>\sOA A4Rsm,/T9zbkzcFq&O'nZ}b+Ď?<~e(a آMp[@.oXp |ɥVgR iA j#䄨Ap@Ū]>)2&@NRl@Q@K r$`W %ȶ0@=Zݵ i)E}UDK @*myLiYSFb1AD#?GpeمƺBLJ2/FXCɓ釛=P W?<Ƣ]XDYHb1+V)MԅG'&̡nIUa: !IoΔ9y4UBodu&0r?_H]7׵yrk$9:Ra;>$^_'R퍈 <%W I z"o4a<xUf`$r=V!}qTK#nM!4C3A R,mw JՈ\w ]yJE:ckB5\dcw/Ӣ\zեb҉smnp2AxJdkb4; 7Hml &܊4Ssr!433u/`;< ͵|W%"A@..l5:,2Ț4Umz ` NXމ[h̘yLHcQ 4ط$ۙO+]q ckeqO#]%y Zf iMp 2+Fh`<>I/f͛e-)𫦦{ `݁tl^ wK e roVaQ$XcPY85uIV;Lr׿q36\Mo?J/[EALfM3;(vQ],c`<)tW EhR߈N&%M |pz7W%%dޜ4rtDݫ&6 BYNQ4GO[XuFqưY-^r#emNS&6Lfx7ptbU9]ĉVUu%: MOLyfXMwF/rX4eƀq{&ʃQB9 ]nϤ.°NkӺ+x w.٣hVwdʼWPhmEOvie3A*Tff7|fKG2*]"y:TN@lWx&N`*_;űVu1`Q3L+e #H&TC%vT:7qi/hsB=@1 ȋYTxs$=kbôۥ&omzIT֩|YD ;9ݐ(sCnO ad诓Q j\ AW7-{22.ٳAQ?x~_7MWeN` n;1q-N 8, -K2׋E'iv>N ItyQ(N>{P R۶H*ܖ^"`&e!w}Q$jZiF3yvݖR%uGH.pk1Gၰ` F~3LQnQ{/7FRʥ8sZx cs~S9)8P̏ZJm( ׭%]cEgDPnRkObG)uojU([MҰ+Q\X_FتU9E7.x%݈Ajw6!rʌ7h 3!ܔ |mW}QB0ܤC+woErxH}&%E}cɗ2`c6W`2c A(d܂QGgw&գk:4h"*Wmy qnŢe~9i K8}waLN*P+gFH R8 1fptǟ|/7W=a ILgZ*^Gnխ1yҡ -ѽЕkɁZ6xFeul;'Ҡ.(p9rZ `IHU]iozʅ--(GaݶƠ?iwo48({VA1PY]4Y[$8.ԨDWt[ ƧqSQSkDqdyt 2( +([&}Hۚܞk#$>aotLEbcEǑxBzХyyp% !+d`q"'L?p`w0˾_x&=Uu((,QZ/`g1iDOnżl)m,jV~makA+?nɝ >KT6޿7ƑjJv'Շ8jx/kZt(0\6P6x= kPo/BŎ½Ex Pb<^ v1? xy {toI>NJW+3> )od!3fp2|sg."!P .ziVY`Gލvq]?mA2<HNcGIӟ? J1Yw J#&!ƛ?c WCg!L}8XBd2S0Ow֑R+Z[Kox,zG1Hɥ>~XS_4r²IX .{h)PģC] k?;V'l 0y֍o> 15a )Q΂eQG˾6*)N2, 3tYQ4&0m2_$72J؛gM``tDasASm mN0g JfT04׹#lDM&A5ف\G.H '#駳 Ҍ ~ֺ7ZBwfHŕ>ԣH- X$~SʗUm3:k Ǧ9ǣBH"R{u5 O/=~1%1|CCW.\Ód^i1%O?LZcVh ̴X>iE=}mnZH8o-I۸jsN<50րicM-_c8LkeSMNe+4Gǎ[#:}-v}|\W{m&29I iFZu"/S̮$Wȍ.x * D*9QG񷭣/d(BIťģ{Cx[',^W=fu؆NRzIf~{nL 6E6̔ȩѩ:-=L}øTj, ~]Uϗ[}(-=YR]QReO7Isk!\FYt[4+#&l> B+JmH|R*Q]߄UM^6Ѵ%_r?x?{R=WTD\`ovֆv_9cVs!yM.dnP ! ʴP7k *KSeU+ kG#TLljUOxMw&Y:b[O37n #UcVQ`Mm(d:wB/x,:'F店8ts/KQ A~߉z? P& GA~f H ~]^iWosA%O[͉,尙CN:鼌Q4DA 428:EVsVBq5~]_̃ݩ93>ȜVWf< *CQ DSnY(*`+ A@QE(_蕵Zţbq!z meuAxֆf_v%'$Jvi!i<ʦvO+)*g10aa($2bq#n= nz^ijr$o_ÈbnCϦ #jHPFu%0?G~D܆2SV߉=\$vK| Ee߿k`V-FmR"JЭ0+eIWGօG#qs1T+Q&hY B]󅬯1h~˺HTUm4,GgwPӂ(2$5B'|8'D$kJ($C?K(x>fS7%30}G3o/Ov}Ȟ}fA?<Чhh%HJtDnѝ^'Q/& |p zSZ`$3?5LN"MT鹀>ڃ3V)AWUmYM{*Kt2ߟ^Bsԥ<^%l;WNu<:ibz6G[$'=o؊^'Aw> WvsL)T;u1zM w;s%= _6rVwIS9Z4mSl7ZK-@p)rWՁ?U]RleQ]eʠ?di$L|#"!(ky[!0A &9,,?s&2k`XaK`poY5g)\1꡾ypHr2(  3ekg{2e==<@`] :iǀ:4ejX2~,^\ =A~wÂG6HB^fe:ոiUL١|Ԁyf'f ҄O5[99;3r'9ޭ§!KWofX;T3O@ry+(ejUD"j5pM5sֺ7J|9LT>r*gÂ~b6a^!G[ï2O}P܍2c /.eR;FѴqyZ= ֝6u S,;y66+X,j]\uxEޘWv-><>ȲʨLYy;T=J07*XΏY%-yt*晓,,lSpaTkT*Zi{ >cTP&Q{BTlB|ѯݘ JFPz8~#¶[lBUDqٯZa ۠ɓT$0"\´dxPX6΍e`ឋi>B2uwAxΨ74f酅.<+:1%QzuLh*~ڛ"Ӱ[6~@ $|J_dбM 9L 7$::GIwHMWgQcɏ2h4#[!$)"6rNg-;U:W1&e-PԼ' Bh$T/7a!R澀J:]*P?{+%"s͔@Al*`f1:=V ;q"q0HZѾ<Vi%jP}ogh\I}3P8 r *l!ֹG)D[6BPH{<6c(h4sUB16{Q]y[ '/w|!V*aS68+>V2~X&!4GXpcVŒԀEEWhW}DIŝ s+ eMAʷ79Ej>Ap}~jEsv^< 3ëс>LH*Q^ 9K%5[22dE(4D {\eJ'A@.Bn6SdMC4jÖjnQcM}2tQ[>i:> ۑzE~Wk':b 7x:<<7j@ ٚB uf_#u0#F l(V/Zu'$Kqv1]RŦ*6hs앜JG|{: TOÒ }BpQ{&h7xpMݧѢU~3(w#f~&O~wzBx[%SUn5Ԍe;~t6ǟAGF  FPbx(ԆH#gIXGSAYO%Qe$ +¶A $塝X{g0oQQR im7U'7[jQnj#% ROYb4?_ o @ 5 P~@AE#; PU8@^9B5Sn78;pF qws+e0Rxpd_#WV$[BhcT`c|6;:/*"{< jN!zƲAIOE/ж-.}3tw%@b@lV"!Wqή3. +Q]ә2 ݑҞT,~$_؃j֣׉\oߜY U+ꃃBb;b7Õ6F277/ dI8.A u#nM#ŪgZ*=BW&  "j[P/'I"&SQ[~٧"^8,C27ڝ&T&o*RHs :plb\a J AT] :C@>Z2Jc WA滭/ }]ߌ7bq_WT*_P.WҤ3byhDؘ'spbPc_;WdWs07BKWEۅuU䨝eZ+pCۧPIXɮ/-J-|Zۮ˯+ebַrt$_V/뻚Qa04? ~G ]Q,Lcx9CP IqI4z m&oEq]\LGe  6`yPiTᠲץ,TQDfBR:)TWl>quGזl*(Ì1-9/£̠.­K]zND~Ŏ6~hd{:-v kU6znզލ0T.Emt-1~y%jţHJيśQ2W@we]Ib_$ƹjg$7b:I4GtE&"4< `cca[xLONWxl%!;55WJ7:&U̙ΏQK>?~Ph>X9vn?AixIJ)z= *BA`JXT’ifs,, kz]|nZ R/͓4$jPn@^Ƥ([P.am !}N(Hڄfx~VAA١~o7Z#e NHCd^tI՗S+lIU*+kYoUR3_3}@ȵܟW,ZYEҦ =͕PSq U!rGۛ@׽RZ{$}Gi;%"v:`AdL. ^mz(u%}|VEiKAJBK T6үYbUthP)̆ QgT-j;j=$ͮdٴ `R# T/o\&Q@#rk5?ۨǁtV 1]<ސp]u\i10' NnŷW@,V{v>y=_Y}Ͷ}7^Gal68w6(I` }l$C`aRY}W咐deA|F>#d*j^!~3$19_=mIR(踚W#[oC '`b:U 5z2DUԎXwRLNBr*R%gAZ8WJՄB3whЯlt)r˷HuGQF `>WiPAٻw=~ [zl4!#}^G`#`~G'CtMR?&'n:'^]R~R H(ٙn^B  z)]Hq'F6n*iG=~7n rQAJXo)3&ɵo;e 9Tn9݈Vig%zMM{+m IyWt <l QS\:anj:NF#Sl&aRNA"n:mxҍ~ֱ|EmcbZGi GqZ0%NX!; d$hF7Vv V; k4gĕSo\jW @'* ((ax_^%QTVJ2"1*Tl,?i`~ = whhx0o}T@?BgqO77My𱱬k ~7Ek'T6޼+rl=#dnuPa5LΠn}2z3H\N"@"!L_T|fV][ ))/T\ꐽ.+5ۍ"lk?{.E:TҢ:3DP"$~9H("a2[[#>W`{@Str^AUְqR8^_!G1UV&+~|{eCl:C7Zܯ4lGɉ_;oAܵ:ÑpЫ)6 sV/λP+Hm]N8ͷp^G j"N7C/l* 7EWe8 UO; /: p[7V˻[ĨߝSŠTkg;(aaHxA6+V؀途 MP8XϠאfv:bCHwH թm&Yxlomzʙ|2[-C ٍR-1o!CZEJ;NǺȩ,{ܢ[ 3Wj\6alCo2ɪ|T%'DZD;fo9vYѯsw? f۬oD ý?c\ķH6"lGzU2)V `IJ0΋fYZ9f(XŖ$JS]_? HGꌳT6-(B wʖ+Qj\Xl(g3Pv/p0μMAeErWn9K(WASpٗ&q!,3V!2@K`So=NaZLQS Ղz}-`0*Ur&pJAsHcD: vޛj; o#NYyho4|H-j'0Q9VOW6r^ ە5l A!?Wx!8ӜR= L"4ԕYJ/cQUtbOQ9DB8qUH{. #^Րwq0 AdfX r˽JO"XyI?u 5X}[b@K{])*+ %襳m6tC)~Zh(sŭmo.O+|o gQ{YE ޅR-;{.?l2f*OdAw^aΑAQqmFhG"3'[zOtrs&0E. /Lrt4oU_ 5;^(ewS^TfO'Se',gڣ4n7!_F%.Pp@PD÷6Z- T6 6T ,El>x PAa \saWM^۝cǼAILb IT@{ +her,!lrBev]Z`L~OtO8\j j28%tܦIEcN"QPhW+(_~k蜦9M]"вi/FKI A`(坶 ]rq{>K)ל9ßb#9'x YJLs VtTJV,磗oORg$"XR}.yU>>4mpMڅ Zu"ȼyv7| OFͬESF&_-8LVaDV،=s=7Udr 1 | l*Ti,}(N~M`u%ePxpMWMSqt42Cg09(W357֟rVhTJ VNS">19'> W^S.$+<5I۲9ÉxWo֧5rPAE{fܶGF[:.dY?kjA:uEֈ %xR0~WvJJE?&cAౡ^Me]oV:VBj72RKްm ?A>65AA(kbd(.@r@$4Rdc:d,oA%| 9,fZ$XeP(ҠwЛǍM7%\&bv[* R vAͰP;J+]/vϪ~Cm},w[M܂; oJt¢v8h1VSnM ]^@Ԓ5cὉ+j5&LzQ4JJd%jgPQڙ0|5Y!fJ"UR VI$.2IՕR_J[, ^5YRju=JD0&*85χwBkT/1"JFA] 2N *hk )- &]Yꍾ0ՙU-kUjxn0J,]|4p"a7PtP:xX(=oN4*QOuoi#i4iZ׌43R7\/KGd@B=U \/?i@__;xjB亩|3)g/CEz.6֯qztG˕R$l0qpi7Jܹ3$kO >Xv٪S'~[mE JI[5!:sX.,ÕZOB}8Cn(g(=~F JLΞʶ&+iEͅ9 9*~ou.*E=jHmQg!}'y9ʠ!69~oTay4RCs.kڑtSthAd.7R Z`1b/wh&>gYGTjŷ3S]Pe"< 7tk ]:p`HYyQ!? ?~`:ZxR>t{{* alJ~8V+XlAI:1~%%;+qJttR(ewׂiQ.$fNwKed(ٝVG;i_W&4Cc F E\e݂]-W=T,$M&:nPpm}[&f#@ܺۓXʆ4< T< N4$Iy%P5XjBh?Z&ߟ%sqh >UU@Y~SX1Q5~eЭ}Qh/V,9+tTe'g_etj,VabTk3cqi\Dۧ )䟭WKt+э!4grc'oF$-:Z}j6Ur Q8K?4ߪ$]w]EtZoFR!7 \@m_IT/ۀMϾ[[DD8xblI5ϴLZdQpg~ }+OtJ 0Ic<[?t _G6L_x\i }HZ}}4$G ӚZ&1N$- A H[V;"nG=@Z\IdY\ l6p}Hm?jAyeH'6 g>^pq=Ze'J@Wj`Ë 9s+QP AI?;FN*uw1H!3?\xh]}!&ic$Q,7~ "_SKr%URP.y~KTݨ3v]T|Hh/dOKz5*V{vm oX0ۄ1Sx`3vP+=w:DN$[S%#1J$"W2q/]VF=(S"zRK[njTւOb|A VZ]fJ, d+T2U&n5I|k ä+݉`A/ExhUZOa"FS;HrM2 P>U # P`IoEnjok!kKH?I7Jx9xTeUvzR3MZ(V MmDEэ+\pMSP?>fetv(c;)Rlj%˖+AM^ J,ɍ~d#u[QXD4_2(@ yPNaW2wnh&NcՅaqyyVOTaUgwY; "QaYap3910^Ko?k'R ,j&y `ރva5b;L |Ŀ &g("LƒyQy<:,[qF迬S+|O < 9ȯ~&ʝۙݭ T^W>E29?^+j6I8~_*i sAYlj#g!_٠x>SU^Dޣsm!-pCl|]A}UѢ7芔5ۻZu`|)zOHۼl= ~FMS$#jO3`U:ŨK5zq0 Ky\+o_c~$(PSSpFրbR=sb^m`y]wR])/xWSD;ˤstQ֑SZlLuzO=XOd[M1U&>°a3ƖYkc&֣5٦ƭϾIU$EjW ]>sko2nA HՊ PX_;f$&t tӳ=ۼ17kS/}zz{ yά/>]M_@IoZn5tD\|Quۊx.o,A,kKHՄ @ ABY1a^oB'W#xaZx=LNKVF_1AtqFWt2J2AV^`4~aK'PjN G7$Nb L*-S^2 8r!КnE Pw6ID;<$$MB"=yl]/\}w >n2$ˆǤ,& J U+@h/Dj P&\=i T4ԉ4#W5EڰiRJwKۡLBbwJ%OF ,sQF݂TEƿ.MgAڧPʑSڒwHʈÉl)Q&g0 ]3O6`n<*A,{pѳ4Nor:_YMH AEr@l( cf[ۏ{C9eC}쿚p@6M=/a3 &N&jm!%X)&,* aEL3m?=3 endstream endobj 402 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 407 0 obj << /Length 643 /Filter /FlateDecode >> stream xڝUo@ ~_Uc!gYⅮEޡ့>ePI\J̻N!x-RR7[`L>\8✉$Jy{vH;Zo!4hxx|Cf[4,(X \AH{0Yؗr.[Y%T"gYk:%Kz8͌4.n\)e,SmʭSY8?suh;gK/GNl)p\+3 mn&Ά]+BDpG~߰- endstream endobj 389 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-046.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 408 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F3 409 0 R>> /ExtGState << >>/ColorSpace << /sRGB 410 0 R >>>> /Length 105977 /Filter /FlateDecode >> stream xɮ%v:ϯ8Cr^@IQ$ ۾#bqTUDxJK}:_~# !|ӏ?W,???[*q돿|ş?_/J?㥬/~Uv9r^YB^_[F9WgnV8GWP)^_W?߾ӏhi柹~\~ w\ j\g?˫)*qADz>  ~dNi嵕`U^[^]}Uo?g3<7H?Kg']q =?3s *qGމo"}3bֿ C+h_"(/qU|^>Ӌ҉cppZ p$ y5d\?k"]o¯Xjoi \Ss {-W3e~-4֞%=jz.m_^酙"a_xQ-{[/Q^Szjx+3&ã@,\O=axDc\υKϼ?cu˜o+}*o28?^xG/~gjf{_>V}+&f3\qDzj߰w=1=?[/9F'~ ?_?oma`88#ƫdLiUd&<_L8yUͷ셛˜OWg~_~,J_MMpk~ް~UQC5g V@`U/Fp c4^ 7]틚Ok>.܈&塿-^'(k~0"zAzi? _^OR5˳)~0} T x1UϵEì_VA:?^rGl5տyM+_'pycq~_;u?~œk>}>7t`}p~_>/\Dy> [;ܱ76tN#rj?6//g=j<_mv]x78iu{jO_lz{]|_4?Ϳ^t?_xhbx^k˜ȟYyU{|O۱¹=kjF(\? U] 9ǽ h_ W^ߪ_GoZΫ\%}IяQ=;gzOa7[ 7h]T:ې?k~s'o_ga?#}0~C~|>__jMa(4=l~7u^P^'}o^j#֞L/!%Cc]6MX&]~8k+}奵-Q^X,R%za|iL՞|JGWu~-j/}Ty60׈1~quM\UA~?v%l_igv% -˟ȟ'k''> c\-Wl_Tџk{q_>o_}AxR}p?J{rD@TɇGzNyAp'MC8dž3ʋ_+k 3!z g/tŕč&?͐',nӌc_;߄|2WgyO?UܧNHIC /=ۛu=u_'蟶>1#7&IFǙu \ߌmeV{^o'0\Q,qIg}k>p[ݿJ/oݯ 2?n]W6ٜ_8=IXqzpuS!QLI)g7O޸$1ah$ȼ-W\7")$\0뗯I_8n'sFz}y+k{T߆C+^i/%ܰF"5qo<_@K"VjOQ?ΟxHP?a|o[ZukY5A/$+ek P􂺮/8zcŭ=m]ՂBpe4/<onǛfĩªכf;ހ6cy(~;v=Cޤ%x_:n{{F&bɴ&u&Lwj[ul_?'2' C:8N2c<,R ile<*kyA~OgߘC;&eqYdw\YffP˟7ãrh:oT}'?˵+|#LN/kg֟xwPsq>ul_?t 2=_>]e2>'eezEv|ê_Rc{5RKhC}^/zgg\!B eKQ=_JtqU} 뻥^xrm_Qj=RfF,s2 Jy$إ'eqwOP{ O8}ST~W=(x->m0O4SEG =y~E9B{ wЁwtKLs>|9Čjz[ v()vГQ?aݙiHfcǖ SICl5HC5pVJ,^Jr83X.x/x ˳猆#z7ڵ>S_#f`RUB>{,pV>WZԞUշjZ^C>uF&ZRMTZ__i3~Uu'MSw8Glc|4۳(Nyݞ -(xp~nOBz"һA'^@}$X,<.rn<>c^cJ {-qU했wy:qVYG3pW˦sr9n{k|8>˰vq&udpY6o53mW>>#3df݋Nl0w߲-;͜ݤ3: iۮgn VC_`s%&US:Ҩkqð9ݛ;:4|5}6NͿAl܇=Eh7C['@P[Lcx5w>yMK%(Ykjf;UНtNA ^ߜp K.l~nANF>D椻p^A+]?V%ͻf-c(Ç_Vȳf{Hy;~Oxw춼:;,b[  {M8l~愠a/Ǔta>^Cr}v FܩaN'8xBr6 s˙C ݙ ܌kpv)F Y^n7CHX+y2 #¹IL8݇h1{y8fpd5Yo~ko=YyoƓ̹>&F/C]6e{gn٩bWCp֥0rfLsf|!q*C1yo'=ؿ6g=9qh ]3D舝o&NWq%؝Dz"醳M^~WLmST9|ߚm3{ v mzMprfMͪN:j|8̰E: Uxo؈мԀ]Z f2gnjҁR3Ia <W2?\,vO Yj$ᬲ'I K Nb˻6tƖ})S+ҳHvqڙTi pU?gsOr"hXof~[4-"JG[!ÚY8, ދΉ`GV(@ ے+8߉Wl}~@dNz/(Ѳ95>#oeNt5P&q 8>D^OjoTځHw\iw_?`8im4 UQniL0aYCvY ˜-5(ꀳƨy!vs-yVJ\e+5gA~c gp`]\FEiOnP:7e]L RܻIFq3 PiN"'2xa Qރ8 TpmTb]f,`"uw%h+Г%@ՋmFt?BBּ|L,ޠf%)5?Ozk%TNO""_[ΉmTϒGB 2Fɟ$R>Ja1<b7ƫkM%E @|%‚~xɇ%Yɞud$N-^o S]Eec]D<>ܷ=_WIL}f9e Tw1R#lB}NK^׾Vނ0 m&'cN>1]CYwdi }&<L.-$k͵2 rU޳7fT|q =wgȉVWX ͐0LaTL.r"0y \sE%fIf!UE&4Y2fokui*&$5Gnz>INTx#iA_ЊsRk1%Osڃ`֐9y_ #2)qY=q7l nU*B7߿axV@PV:ٷ`H:Ww$ oo~ EwzI.hke[;GD;WvFţWR^y*kFjwTO9Z Dt٠Uh]OpL2av_RJ|MXwB.cl=ѱ/.Sl)DKsd5 Z7o'(;r^W1V>oZ4|w\u^SMg3!z{u<τH&|`ݓ|ˏGqy+aޑAupK] {&Xp׏֏\x7\Gs:1͂9)hh+AtD-h5ŝ&[C6e6GKR躨1UA\ ތMgH_]ݒa~wLӾ(εYɗwUG8_U-ru~xux} GdCkS8:HŽ,amWxmiO#e^Ӂl|ɝ׬i#Py"Ln9a'ᙣ) zL0ǡFgvq`jC'|ԴY!Zdn_????گ?WOd H_|}q} Nsw3z?﯁':j瘪?8eoǏ׽ h$/&qD#n$(DDg@| #?#~ao"-t<z0`VGɍ$G=9XD>+is`Z' ‰IW #E|<2B Y"\ԞaYĆ"ZĻYĐHC1^8M7?jD{4 9r# !8*ѝFYAHrg3HQqu|eD<`HE:x"j'."tJ ?ƛ^1ςp K| ,"j0) ؙ''\a. bF&՟4N%xe\XFx2f/! Ve0kH-Ca.^o FaaS0T4QLdza{ m}gMY8t7޳0f>?T}w{~՝15~Q]+-nCY }@_NSӥ?2h_0}Y; w ?vbJnJ!69dfB!^'®s\v:SO9V?>+.\ )a(ʀ OxUQ(eWcgYjOCcq!@kD%\o|ozmp5k>f9k~T/,t ˑ 塿CD:|+G̮|~\eUxi?@^_)\'e%ZNPUQluK>771&5wWy $WU"̨X򦖊đ.c`jI#ZQU|?C~\~YԮ# gh{/~h] T_O"&:/, |_H?c3j/b'HS@p}cm cݎt{c~dv}0]ɉޘ B\ "+*>AIm0uM |z7}Ej?6/ F> 0颍GC?k^ %\Lo8qFque1bXtRzBC%NY_9I;? ?ޠ9~ߐ?kKMѵ^"?u_Xf?@Ҵ=ka~?![^^#jЀh0s|@қ+_>hObR9?(:T{ jCߟHoIZ*p" H~!Lj_}>RK~~OQaFHI "IL$R>6EIҦa e L -kG||F=nj3ojDɗuG!0#|iӒv{X3q e8$c!pS4p$N?5D>0ݨ{U wv?4 ȝ Xi$.fNo߯fu>ҲU2Nθm]}$qns¼?0?(h}B!q<QYA(kՇze s L 6}Q%sO ~&saqoDypj}IRG/WSQ:Oy[Lg:&KM !/>p 85|> РtwyU4Iu7w&8_LDfax F{:ä {_P*}syt5# c81=f#XK:pa{&q!M~IJP~ex7<=>8 ξA6kAːb[q,˓NG$2髬aŝ՞ HYg?߁g砟^{C޾CxhX:*I/Z߃uCn?U4OYV1jӄs;c-}> SUy]1{w>#_eͺ 3lվ׉4w7vosz~twkeNL2UZ>)HpO_,ˡ f,c<⾯Ao DL)jx~7.7ATobH$ȼ-o~qt #[L(I劣q[t4u$B•[sz-Ԭhd}m"Z^۷z g>xwƨW`bnFYUM.zw}kUhi]1/X W<ԟ9ʟ|U(gJ7UVۋWdc}lU߾vq~Xyª\~fy?|^^;O%MW׬^quwߗ|mJb.ޖWl{<@S~ٿ\%z7wnth3b@N?V -N8;$$ {e##pB:iCIN-@ >N0˔{3(8 9I.7= G`e0Wx:O\C>ctN5=hEx_r$Ёw:^a {ci *C}Byė1q^ v)vГQ?aݙi8v #>nj[w5#xk7S_#d 6 JV,>{!O+Z~ocCgS{ WWG<\x4P!V35Q`ppBy]Hȁmo&i=;4{vhRӐn I}sb9_>d9'DO2,y8[ : :+^XFprv0Ml'zM-O6dj." r8oebYҋdZD8Q[}SB񼽹􃦄^<狨vj[GG"vV1dt]vIʊ^7b<9 `ȳ=MpOh˹,|s<ߏ< WHjAxB5ΡiFt}/8~ qeGn<:=1Bw ȮN γǻN,I?.o`;exNן\+)u7l0\VYܔ˅|Ppv xo;4۸Cc< -.RWppޟ5zw9x$^llXw~` C눌\Z3;z\vXP7w8IUWVy{o@^vSx4|wroϠ>n^aS_-@/p.w:<0MZKJ?dHտ9/*ANXʳBF"}%n%&5`~{oc:gڰ7!GWDpip@we)Mܷv99aMwF3` Ȯ,:u8t7^dTz,t"$7)Z\nPL)ص7x#& u<4hM4Q pZoz VU`=Ɲdkh4ppP5{HD]Fܳl#)=ҝ?&8q{YݫG'$ @T+pF ]91KG=̡6t 0|]~ 0Lz>JDA=\ED#wgdS#JeMN쮉bL*{R;lYav"8]g!OJI\ e:fx+ӄ Yg{=gc+84+ƕD]U=ЌYesvF*n[+3BgxL =sBeL%(A WPr$:*I={ :r:W=UUO͆H?#y>W,b5WC|K-{ܽ/hgaVt6F 8q V>T=ӶoLo}! )^]-zW'Tx"WPxDc#ҕ+lΪDc)6nV9o8wųOeW _\mm߬1Ԅ>O?xGdEՄ3^Yl"5bC#(.®{=I~{D fW= = }$ Yݙ }8b5+(3؝QFӾS=qYs_:}wY ^\_2R*l,#3WFFUY,# g"UWFi)#2y(/n ȏm(?J:l2IyT7zTG-FY4|p  _2qS}:zGZgG']1"<)V \o 6HweP{FeG@b1(HOF SBUXF\) 1JK 04xwUPU}`tDuˈp^A%E982RU,#E|XcetD6+S#BJbY,u*rdqB7"N }b _ JS2B֗22?YFFWT622mYF"h(w0࿱YFb9XFp7Ș0S6ȁ>Įc/*6ܶ؟:YFv9XFVo֏2Yw{} nUd0 Ybr22.|1ỵYFfK; bϠ>UP,#?UP,#QK7Lb9AD 22aYEgedfQQc,=c?Mb86+f$sXFf+"b̮?"Lxo 22AYF&{7DT2rH߬_f2ed}PYF&(%dW,#WuLD=gr` 3vzLeQ7J6MedBYF&X6YBAe9?zI$i°˳eZ22YF&o 22yN2B22h~iE"=h50tef~YFoeDXFn'f~YF| 7Ȁfa2BYEl}>id 2Bg2B22 m֑&xm4 lAVp ,!Y8F XF]LV%M7C`kPed4dedOL. -ki.]fY#}s}22|4nY{ʱ=d j/ r`?X3V!H!XFn\Y_Dd£KJ#~;XF -n'ߊeQ,#V?hJ9$V;(OedWBwkvx,#6k,eЛk G{ 0oedlwͱuj /"g_ˈ>b`,#226l?22}p=m7622XEG5`YAUR 2{8 ؇oed fyN,#c`amE7:XQ22XEsFtSW0b]R~~/Ͽa>bK~~$z2,MP5VdŽsDG,#,]GQFWIdDDO3o$'Op`IQY\x^ņ>eΊ%w:u]$DdY׸DҫbRcbD,ˈObYVgea,Ŵ%Ȓp,ҩP,#&MHi/혽 b)6?b2}mTŒL^Vec00[PDX̯8;m%YFJ}T,# xj7m,#1 ?p][,#tz_d)@tH' _%%3^2ȏ4)n3hB"fYE'Y2R*"xev-wxcE,tد*gL'T^J>51>2b)YFJRq-t:*Yu^'KzjXF lR\۸6xԟd!I_bSO8R_ZUxUԋeda0,>zRS;xT< *sy}T[a܇2b_UpF8?H,# 7XHb)YF#Ci`!ˈMXFNԾy(.y&A2bfQe#YIt,<߁F3-=:i;;1fa Avv=C^W\uڞ(IBѭzP`(bzw'Y5Ƈ7OM4T҇Y@^d1 ԸX@ s~X,!|$kȞ^O,O@,| s} u|.{|Ao3- IN㕎_2b+,2rXd!(z'T J΍z LтwrFѢic-@ymŐep,#.9I+|o?#}X5# JyMϽc3掝{D5}Sv 2I+jkE8~_+UD#kzPg'h6?{=v:\xHd%'z'kO>>/cN5uNt`X-ˈq$,#&ˈI*bx\1Y;4,# BД>2b ;U9V7Ͽ77͚`2bx,#z.O ,#yeNW<VS\\e f1\/o,)=i,2bHIfpLˈg=XF4beD/YtXFlˈ'^6nj=O?{Z2XFtŠeDzH/VQ?NcN@W"#:=M++ps*\*2Z#8>6<޽s;,{Ȳqp%wY`s:M|^T2QN/y!P@AEDrO.I;S,+$CpvӗN?/\ƙ2ږZDƭIN9?&v3dVZHZu{(n [FUt8ئl"ly!ѓ!!>;@}sP{yN\!Ǽ\fz agi /vsG{ެ?F5SF:C1#K.s}̹ &̶alr3^}G'z\{!sȅ>^nh7evM¬=M&ȯ]O /ٶv͛ N+0 [ U6C nNn,Bd#;Ƃt~S74)P!{?qkX9+kvbDܱ>$re {$S=% Nwc&6mgːC_e\DRw%C[Mć$jN=!X]mc⺱.pt6ZEht')v agog(W'k&V{(9vE}a&0GX,ۂ;&\apCi;`eNur:dj%5GSd~A܂M5^M^dBS UjAU'E91brX}nEǵ8NrYA^1F|T \𣷍6dMjC(h c儽nX2%2`>Nߚ2X.H!Tz)3diC+lP51; ~N8:hҝ愎 ,FDlqmYMj@\;ذJ jG.}+r.KR;Ĉ}"CJvIabeѥ3\i~r2hi'oQ܃S}ӽ~B m5 00oVmб1ʌm7])=nM[˄-8&|ܻ bvw*B@q <ŌqgRd-,CÛӂG5y \DW$RGGD g1\=1BX*t ds7A`\]Qꨠwo+DyԊeXz&BTwێhxn4xp?&2\UfuGXct?*GȯT lHX:3vo2w"ŕ,%X?[D(wy}wkĚQqNV ɇ팷9e_l׹%5yb"WH!Kc=beøތqit'ضv.-C#v&1 jrF6o} wNe^\^>7婶ltiI {tPVJz<3zjϻlL͹["b|m5`2NWD;b RqΖb\sz7 -bIs&7׮滜{QiȌ #%C|5*Ɛs)pMo:BRqkR|^XQ}cf{"g?5dX8Aц3 Y)j`OF#+:bNfXjizC/.VKx4y+EQ,?#B"MVMJ#̧ R,ܡ-(V+ XQJpt@2R17KU?>`"fXxnQW ^R0X*S+BlVP&pڬg܈Y~Vw*6BLQP:MyL>yȢ¨. AWs$!+6uj` O` 7tWDg]^QE /'V kR}%\Lo81Y)*Bbf6+} /Yft{ʐRT0oV 'Rh=B BXy{2W\ܘ,RVw^QF9?+3 ^X)}$VOҏ\v#˜R%V +,aijqڟJaQ c ^asyIKFt<z^WFS5|F~'##w~A;SĹJ"TV0qs|6vD?#i>tmx`>5;Rw[>OIJQaS#8.̈I<_bb6D'Rj, RT1JayRp p.X)`XԎTsU銩p#V~Eox(/}cg؇B?-WT?ɺCVQ ObXxX)c3&kޓŒesm" /hM+>JbI+ gHubf8PO++_:=chU^=ӊ+ݾ0D,_K0_*6R,\aՅX-JXpfXQ;iGhˏ Yd1d˛ST@}M]_"ׁ6bX{', uP JatO d`S=m_yqo~QX),ڬ?RY\V{"CLߠ߁u⌕Mt'O|?6A&ԧ 7Ӹ\i?=Qw:>̺TJpS[d6S}k FP:Q呅>>YZ>a_V쯇5< yX)oϨ_p7蛛8믃٘6dW,,IXJqck/SՒ |2FQM<V zMUVJ&Rp.gi:pzRʵ>qׯnٯ}m 7k{sr}lL/Š/V XJa: FY f3Qb_N=t}1lV,*M֏7ڞ!h?VOo8RS+nY)Gf3{W\ut|6XTu> o oN ub͐=c(Pғdv, C2O2 |MX$Nh~ WRSm$t̒,[ET(pLgY)s (#ڱw}Ύ2%ET}+q#+|gsacq ~08v|8<0mqZ^zebߘG Cu+K=_B{\/Y/Zo}E'|mb{ȟ0{bk0y1l1~g;` RN_; ; G b޵6"c{dw7vܰhQT1~wNǶbJax|mVnZrR >Fl9s?aΠTy;fޮHևբi蟲Oo&+D >a57~hdPH?e'TSk z#b,/\3Fy O|ϗǎarxmP{[|1qǨ`0V>X)*JavPqCf4X)4bB/VYr@l#Px-/}R6+=_2߰Ok`*]+7lփp9X)?BR~Re|^V:JX)$+$b:Jqdu To~Kt.$^ST$yhUf=67>5e9A|Wk2%vK3O<'b$pU OQs؃f9Nuޝaxw]S}_#)sv#مl)E{` >Nbk kǽ mS>&g`_Kx:}K}LD*Y *vZq+1mi߯!Nܚ6=*!1:a7-^wOvKZ$3{52lOM["}{i3Avܣa,Xcjɉ MR찷ź :-y#z(a&cT<fmYZ^R/tR@rY4mev7͉̽MmsY@\?z@foj>&6t[7^n\!YC9xKm8‰%11gjMzCB(Mc =l"-KN hk.;B0W6{WXh]\MS='>6并`7^,LLugd=mA%F iUC]=MHXN0 w<+,*ɒtb=B6p8k?5O#O% hV҈3Dr ~^=ȉ w ;nM;?cDy6`hus: &9o9槦\yaJǾs|=Xݾ=="u' 79Wٞ\g}Ӕߚu q L^~n}1ыMbDEZ=,\xs0l^`otiTětz8 ǕcZQ !T ;N=hs #)(Uj];[ .j;#DGWUY\kCP!&+HOnQݖ}i_a}F p [7B8uŎ%L!뻇bmR6.xF1ojqx5ZPk!o8^DvsAdF=qV Ș5bУLiҬBhn㯭`ωB>t.-n !Ą-y*4s9q&mK3QV @LbTAIYo"޻ ]Vշޘ}hUZ*ߟ+H&MbHtO0*Ai`RK`z ܔ?NHT',P5EԦ!`T cS;[%j;,UYk+.sW FgK8d{~>U[n~n 0Or@pRXI)_kv,~KTN7N5V7pTp!UcR {hIgSU_E|]vACR R>~YҲ3 8lr:wl^_ߗsuenk W:MEr|W̷ѷL<ŠY'*_:>[\ChM7ƯqS4V_ҔꧽDRX W`qqMaN09#^@+zJ2``g}$ ٷjfbe`_n8и48췮 ش:u撣$xUjFFD5M@/ls$_k,12[ jsGGxR#bLBG3JwD#9r/ 1 6!~UzeJ|˦j>rc~g {_^sdg>:ߐ{f >D:sɃ寍7 _LohD{*>1{Y&ҟtԑ ﴖSbHbm;[L|z{̇8>|x_xQU\z~ϑ[sQFo8F~]fo#_Y8 ?|s~3-@:W~6ɛkGӊد<2+_!QX/)dz0yr=y#?2G񚜟l~{od>Cy꘏t GdGnd?}=e~q"7̯ #milqlHeO}}˦T/3}Crh޿e`~ҳg?;W+~n/]Fy>_,V>a2+d_l7sL0.O\@V3[<ٽlXpG쯳>CyUѾYɏ'dv}ϳ|9v6O{VfZ<&+;Yo+]f[1[;#_뭛ׇW;+<_XZg^|w믗ﱿ.'>\~s~]v]Kp3yas;_Ny;_jܯ;3%^s޿wy">*Ͽv˯~1~vv|[rد3Z{IgwD6#7'Vs~~ߧwҴV~}_>-c?~]to}o5Ot|2S2IsNnw29ɘ_\ﯩ=fxS^RA{+ﲩ˚'nog6թ~krǣ/_3%?}70yC eәl3[Grf-17uA?]v^[CSQh#ևT r\,/ly`sP/Wp_r>sq9HLE0yl|%vQ\8߭ ]c-.Ez<3ۇUNmr)}48od_}E65gGG>>f{v0YMHv -O>(}OAh+Yvv*7^nՑ}~<}9Mc?orb{7aOW ޷wn3gUߴ,X g{GrA;G9m.Wd'ymo ّ֠z3zx sor /|n0aW~gj?3Ooٞ`1X:k}OR68y%/?y8C3[oA?!أѭny>ʋba:rǿ7/+/y꽼 Vٰ{dk?CK7؞>8p{8>yqLa8 O;oO[}gvw{Fl<T_~tշ}Yo/W7WQ).^IrVފ^ O_5/nlۛ ՞Dw5=I7vXq]5MFu5>q#MhƵ1θaojό\j vnogZx{\pq" ]^QZo2Mpk@"0E oHxˢ7Ԙ/]F#6楑KѾA#=,d_->?'__)]o@Aq W/oOAzh 6~lqӲi6 P';\!/(?F3?¦楏N"_6n: թCBLJC\ :ӣ} ]:D6} *|i_)7|?~H_/׮1ZO oBGX2:;9s~r^̗*S ھ>dW}?y׷ Ook}cM7txƊr~/wm~}c2]Sчѵ܂OPa{ݙy 9+_uGeKwxkH/y 5/[,rͽn鵾5?{m%lh￷[p-qKֱ3ɡ_ mֿr d&ֽ֜=WwuxMgr<Ǻ)yo+.ʠbgB_ Wzf.s~|߼,T3wyoz}}o27}Dzzo\^='Q.r2th|iYʅmoa6̇<_;e֯r}orc1_r/_1~J5ڷy ;x;ګ*,nHo2?䬘\MpM[X@cS7frBnA'~ߣ<`g`D${6wϾڱ~=~#;OCqdc}`|剰~+wk=2|GXRkNH14jmov6^*h)t? &i⒵fO5bQ.UZ\ќ⛖@}`d dUqX38=HYn7kԶUm~{('s?N8dY`7;ǙڗXB쉸z <L8pFɁ-,iܢAxsn O2\\",ru}(O[M`"0R3=Emic.!$ZRMb&;:yKDmQ!V%DJbB|bS֯fky Ŧr|+bc%DD,7?X bYHijUO" &-;^}R'ۙpN|iEJ,ކ-U_.N*'Ԑz ĮN87}MūkObdž|eC,y#mFS_Û:sK~[H-Q)7RogPܣv9",, &wqehz/zW=:YuDgK{VPe~}=OO5MTdQۅ(TMj /׈S/WALl @m; B[:L!wM{J/u3Ôﲯˡ39d` >qʞo@Y*r~RTzpN0\e6z:mT+"Sڻ yks+]r&4%,h}Ԉ„G(q~fDv+/ʣԈ3Y[Cx) l=Q3Vl/hz'N264|f/`MU&.KIm{áf¿=qHdy(d;;(?kE9"Z}}>3..2>&f,A9֝ĤҚ_-WzM"Uf[ Z"~ڇo# *pn{^?l< [FA vwop+ s$-ޠpRgDeiYY-O\є?0 ?j z"w)ms(j9u|m`(۽O8[\Pt V2AL>5y 1~5O9$H>herCXRmĘv>YK+od2Q fV}>fAfdbZKe2 pG14$4r@_S7n.a+|WRHzB0#o ~k_dk!w-8* @?ޕ;Ď 5owG ң֕=t *=kߩx)Ðwy7وWv^hqѷhoLb n`]̵ 굱SƆѕs= m= O*!UsG ?,λbb;qt}kQBfMIDPJl?POvɖ] Z@v"1#V](SBȅ*/J!ZlKԳojt5QrѰ-ۇ LbZNjg^I֞| =؅(j, u,-%R[O)dL~%q=}2+(C&pIoK : nH";~!xUvZfX^}k#>KփrKS{X߿\ih=}W) LoEu;^л~11*4@ZWۗ~o0朋xfI|PE2mъuB]^=nlhZB6㶅M绲vK+Zv^.$uRan)?*rlsw@GsPzצ^G[:oNfʨ_} |.6> Ʒ Bo.o'ͱH`eV|) ;{BՄ~U3WXR%+4}H2Q [|0&  vJGY5hcԗ ǝT1+I'HXb%i2ՈSqP]s~tkfwn/\< S_u 0I6嗍ѩ`6!+j#5.ܯ$Nc_{?lrk}WG:&t5*ۯ_a zxqA} |Kt~raBY?tgb支 */;;VnaV6QQ`!#~682q`*?r ٷ{)a w.PUeJ_Nyl8ٔ׬+sl]eC407c=J+WmSytRߖWG QTph:Wx@f{?ygҦSiQ}qUf'7Sb%DBꪾD˯Uh~t.*U6oJD`J"2`= R{7WEҜKDrK"MgXT .|CsdJ& sr!ֹգ?JCѥ,˳NTCΔi>?Ȥ [`mn5Emn탯?o^prTz#fB"Z[_E6:}-^S/N.XgU$S[0:pHqW9&kiɪdu.N. ,hrqҌs5bv%Br8@^EKa hhص1iHF9&!: /?h`h%09^N (h7؍eon9vfo;h7h XfsN yfH1Я54G>ߓϵ4Gv &#ts 澻9>;aFOnNsK4G'h-x_] YG^Jh 9t[fF㨟[YO[} b8  'BOJl #;Mt<=?tg'-.oA4U:?AO9Aa.O7<(Hs|䅷YCn/Vt3;ȽC{avwG#w\1_=.4CIs|d#oƊ8극^7sx#;.&սW e~~o1Hs|+.Ye}9rkx7rj+ )3\cY8qMyB-Ѽ[}s#hgZh 뗐~A6Ÿ.gOow7Nk64ʱ_kKs$ͱoxfnsbϓ4IslO?As|d,;<Y~EmWa}dgB~-\֘kB̺ZS<t-josh [by xnЖT~Ł@6_fW7^MT=8>V3imA9徿˛Nw7j߁RX9^]Թ^2bx6gW%C8i3Li+ y9@y?}/iQV9,im=cلtp'w$JFZV ݿiҝrW+`-EI؅(?O? r<ޣ`G1~N}<5߻Sxy]~Mc~?CF iAX?!d5ߎ}X/)G6P spd-<ٍ4ړ$!Rݲx/vw"i h/hwfT/(\<#hw"ͯ6IVP>%eLrp_Hs\:-qwMW%quh7}ᄜ?4OAApuBabǾj: 5)]q.RH2fvј#8U^xh>J#w'(tqAͱN2R65Xq$ͱ8/V Bu\O?{Kw#Jnb|_D?i*`Hkl-r}W99xHka1} З}o, beD9ӡt0gX\Iߓ'\.}HauɍER( 1/y[Hsl2iW9E{llFiO ̛M7}x Isl2B4G^пiNs|d)L_?f臡dx!~y{~X0IϨo3\^џ 93Tן߃kƛ4ǖʯ?As| `~44Q[a9n?Asl #q˄=CGڿ9i_ё4G^p{n|"MM祷'<-|Ʌe⼛p"qnr#g2oyo'w6N+| ;q}xEOrb$ao^~tws=^o7o{5~\}>a {ON95ؓ;?^j=;`Mle?? a hq%ͱiDU'L&µpZ:9d`77WE7oǍ54{|16`Zo2MpkCEa:џۿh[|"@u;d l hQ~oqn93/:IFƿ78 BnbŠFcXWJL)D:XDF0_o+_3d6#1CKD4:}渹!iG;g.9l8h@7-ˉ;VlP';\!9~eh#1} <K7Ӹt-E)Lrᅬ(AE~ %߇h~Cب5ڄf? :Y&~ =lxIf}ˬv$=ztί^X&3?]1_ګo1wh_̏4O'xԷ,ook}c ǖǛ6V[~oߧ% s{Tl uo#3hAy%NP9]? |[}`e@2oNC\B74wyWYq˯_rC} c3ɬۼ_ YAcq/R$^"ͱОޞƽWZE~Ho-={9>AƧ3}7i;_Qo27|/A;^c {NZWM.}H/5=O2iV̧us鵽)?i_VR6#O߿4Ƕ"iLMwn 969ͱ}]Ho2wZAVyQH!96-NMf}ϊ!ͱ'hW(9Ѿ>sZc)33:>h;Ou&+lOA2MFy1?㽹W ^Sˈ$ͱɘ3|cŎ+j!mrEGȿǎ71cGrZcGc^ŗn47 `ǘ'hM~4Ylf/j2.Bg5ˠEA͘jIhQՈGFUIZcUiP~t(ͱh_D8`N=/5&G koc/X.׼G[~.&,+ N9J^=c|)[,w,4Q޼4QF8iӛ)9>Mrz}rDV9y4'hq #NkZHaAT :~G<.Httiw34++桫)bNABUj9L=u1k$~8=؂'ңʗލ>`[y*S'\ξ 휠 YU >p9v ʀ%'L+ g ]~&Map-^EAL,b-4qïm)xz:"Zdb^&p̌Ӟ+lx gSU<6"d=&h#Rcwh*s݈cYj?*˹~Lo 3^eF_G][OW()Kv#kSBbAXtN\63{dyWּ^ lq_b_mk_C_G%zSQsUfMp]8_CgaW- {d P1gF^čI=Ԥ̏)@,v"Y|K}Z]Ì (56hl o)hg~]q.Xc2؉Վ1N  t P;$ڇ Ȋ1ҋb%Ex.w0K"Bn?wJj+ p'p:wGS+$ζimƖ%p!wf+Jdm}PpQ!|rtC;!E"֧' Ǎ `jڻzg/{kPΦ+r 9K7o |:Y*[SSxH|:^}^.]]IP|(GRo@H[SpDֽqOx]pIi6,H~R\R[N7xΤl߇D7.gMfRY顢RM_;k~ebRkwjr)եB}8ENXur@#:Rl-Y1 K+ofOᦊ En;@(AUv"F,d_QS@alLP%H7ڔ t5pל Uq8D xA]K/'bU0Px-9*#CDc$-@l@, aC60$-iF 47񘙉6PhGl 8?ŨV9cɝI @i% #B|lɂūuB<3N܌K%Bu7FB+lV NZVE%}(N(rZ~$`Ja|*\ _&awAm˟MY13)3 Vv_&ŊU9"/҇ _2k4յԍ@ Zi[lp&H &EYfBIRRrʜ҄R%.40!m-^)򧩕ݿN~|*s"_.DӛdLru3tH͢Km=_>AHq}p{0-/_"QGvȩ\I) є 07HI5qU ;LP 3܇oEd~8If9HwͯUDWF׭k -i92N.52i\>yȾ&:~82e@ qSi_ P{F}pv/3`?Xʤ'qPpnHG)><\܃X;+џ`@3) 3n< /I@߅Wm0Ю2{W*o06yh7y"4DIs{pa%}*{GsO3A9KH!/ԟ`źq"БۼF {_uy{[0-7ޟvS7K!d2'~L_Gw[yAC~?5ؓ;g۳}ƃ6q<E!(XdZ7djx'n\[8'R7L vo2n9~2( )cGmL桋{t{db}2g}h94~ìs/oE'Cn.`xdR G5dϟu><3/3-IF?W_P',wnVz _LZ_Xq8ÂymfCΤA^wycyh!l2LhM`+!vZ03=.q{:lS0 fyY9tn|>%fPul *A&3Q)p4Dg^amȗt >D YΜ_,?~>_uP%lRE!:f&ׁE|~{{or|?z?2QG.o:lxd.b}|S铌\پ]^9;PיF̗*S uOCghU/^#F~^#$g~_#Wy/޾O5K5s}^\gY;m;Ա+2Ľ.Z4-yga=fSt0dL@#PQ0_e-?ȿo7otiMNQ[wF7ᕓWxV^3 u/2+lOkJ!C߷r}Oq&Av&d/3?g dh_i},+|WyyA& /v|Og \QU~Ď{#;3O;:_q2!y0 }|'b|&O;F?OM|Gb>Cqch" #5dRrC~qb]z*hD 8V.z= TI|O%W=a(, Z/QL_pz$N*2dOWk>_]=CwϗN=5Etf*-E^9OS!oot, <-R(j p46`フ YŦXV"RhB562շPn6+[q`oj.S-ŰdB q >כc&k'J"˗ N؁v]q*P} |m棞%6$e[%:µe@n:QRsD/NPۗ߳&ODx^Kzma)hW+9._p^F;ImKqE"%с~LXR.1V,] $~.9*6J jSˮvžToA.ŽW01= oV2#P k/0ž&dm-$"bBwEKZ~9%P|P)P&]W_ qDbvHYZk 2H؇=y*<"_]i? o h6_*5p `_lJyo=J}ƟD>;y!OxSdl\}\C ύgЁj,RM_AyB,}GÕ$kNgD5aD !+JS['}xI>Cj7م0RܚkSPá嵀\[p"qH$'L‰cr7iR_)klAC`CW@,Ej~+_GKj>`d_l }MJBOTB(ƈGb"9>\dA=7ucDT%Hg([U܌WrTk~M\l 1OM K:q)~E ^=k=Ni$OR]މQUS !Az}q7(4/ w7 |O{j2cSZUq8Z"8vرvv"i}—D`!uL5Kv.]1 shF׬tP-g<"Pֈ:~(q)ĕ!n"پu“.f?)*x C1w uԪǩb@chP& e9t3db9t̟稌LyqQ)Ae q7Ʀ H*p/e&(/BY MsHDcy84-nj4vEi &T]1Ƿ _2.OQfW'Kl߿ UR:l5qN@ W>@-PkũbX h*2q3O[?֕Af![ |^- hi"T]r&hb Be*#NKՔ8d |$㗒y φ1HVp)T ,j<2;L(Eso%Lp"Qd]Z7iYFGd-0Eupb}:x@_Qͧ;!5kι.poļeVJ@ܤk'oe<2B\]}al> bt8$ff9Tծ(ItuLSGGw2^ם?Hj\:0TZ;I4ծȅZwŜ 4B]+ؓMz Xz+L>|5ۑ@1oW%婿NP!D4D23 ⬴ 8 4V*Ư2œقc.W^C]d*\ +w }vAԥ??d+q[L‡^ BVHҵpPĘjJAY R|]}zo^yLY$7Z}S}9Y҇tSf <{I2 YikUD??n&2\N.ȅ8Ba ڱ`/2LV+;q"xǐ 40 OnTx8 * cv?wg:?v>qU$cw6N? \%?vh݆'P}؝&^C`!㑓뒧~@Cw>e?;p cw+Ow/c~tfޑ?N`6*/;l@!Z꿒'^5 ;{yP.@r <{d>Yˣ yC^!WyAް/:Rᑛc(9]vA9rq?CSf9[?vc-O?y 1xd$L_ ǧl?N฀2旳?vzB9z}Q: =xGG_l hrE&-3'36 ?;{sS?hr?(?7ys?X.zAPY?cszP8AK]{h`4/ֿ`}g6;~r]_O7AMpG3?&`~OGKǂ<:o iUMf'硍~IG-˔~?MG/ds<;d<Oȸ28Gǎ8`{Tu9cGc?9=&Ku;AFzs=`uUCEpG]>=C?h97;h)?vt#ocw16%|vvw?[}>~ O oddzBO. >~Ky:1\f7s@d<M6xpX_P Vr5CQX.]ޜ/@:'Z'mQ&cױ[Pf62 8؟ = GKMNyo?]IG[xk?hGaEg슅QB։JG=Fj/@˾y/?Zl?o,#'`k4O߼.q%h~c{~sѮ^ۅDn_cx\_QAgҿw:v]2+:߯`%Ͻ>-~?.d~}>L]tRisb~FS%qeG?DSg@rLc)w"鯜keR X!N~xHGBa(7mr?}ۧpL-|xE:|L(eo8eo5`ڃH I9~7~?ݞnώ$O77<wr 7@D&S;pZ@;ZhxHj ˈYtxag{49K_|Anﰸyҽ>w7kZ Hk/jK@=a~oo2옻ۿh\|c$ۼMv>7_C_c|/,ny엃rAz?M2S]n|A%j<||B//r~3쏴 a9k]t&B]6Y1owyo32d3h-fZ£oo@##m0d0eʀu7MrGĈM<@!ul *glZ{6lʹQ']'% /dSo: !'=G6HuP6)h^oԙN1tP +QF8:v,x}_# #Yr@y^b|}N&#}_^g1ҫL[_gT?lOCί%ٮYc<6m<~o7oԩv/kb=GO/>cqRg;m;z2G K?=Ѵnಊ[\^or13gc,w-M.?wk~[2oF֞. d)/m?x1oo5~l{Ƌ B{&Sk{eg#SfM6[:ǧ߿unl&W9;cgW79ZcKF|ޘ_嘏7cQ!/Ο2s$/Nzhoa}.9_7lI?i1?O/apvQnEG?_;;hrL?h\>;lxM`k;o lwE"}xwiɬY1d4n'#d)Ѿ>GWئS({f9Cv{[7=쑅;˛_+^#]^>w'Mίr#vT" ;И']FXoC>1gҎyc 9Ì퍷[{'r&ae]0129"5dcF^lq&rRZCh.o/].:=oL_ho/5?@x.dd o2~%GL [X˫9Cǎȯ[ !#?xil< @gt"'l^g2rAOm/xqoK˳ џXaye{d^-K ǻ2Arw 9"5О{ ۫<GhW [ZD'n8c`O?ROG%ۉOϨvQk /5=#5OBAeg8 /'E 6|G3񌏴n6g98&$>;wP-N;nNϛA/"q7&tB #/X#j/ (!`9 ` /$P㍨Q͎u›۫vqHH$CYBj0{?:a9澲Qi[UڱD`3/[ͶE$-n_> hv z|R$\FqR9aFƚ8I YSxPDy6?Q3{!vL%:1LWG&؄ 1l4E'&<-H!DSzg֟v"LLC\Ob뾆/5Bi,NWq1vL#47Fh8'fbZyXp߇ėOȖI/jj1}ffє BTٽ +YO} 'b3?!x~yWu{d2qO|\e3yV'r4ǩ|jpv=Wzola|#A1R IQk{BḅL'CպBIT }MtFXŋ~bY} |>kr ~bAi}I/T#*ó PVNd)fFY2ۮH Bkb{[Psj7e+ I9lƏ hg6KM ٔPvjJ#CK%%icO V D")x_OR iƦI=jg/ǒzx%o[0ϕ'6!4Uf J!CL)jۀt΋]pKsJU&y՜Sjwl>;EeήC}b<,vjwܗf `oMtmU!wΗƴj0Ly@aV`DеkР Ċ2LFr_qtBT}֧fWwjGlsK.<œd=d T3A/AePԈr!3^ߗ"JzdL0Z*V)_Ò {\?ɅljMաW32ir&@$َ]HWf[ja8 |JQ(QA1/7ڲXUI$ҪxɎcn7Ke~w-4-@xE!t4nmՀEFC-4ǒ) uƔ7TǢ~yLBWn#!+=$:R-C8#h52*HİdQ;=&E33X/δGKBg>31`h}\1_†B?b IX׶sh+a2'J0#//~0dz`OaD* `XĖѣJ/r:b?,ȡ'-䓹;:PЦҿTtc1O*{Þ[\,a8v `?%6MܮVDIx"l$DwOr"t踚Ěa$W O Ďp1rwp}5E:WCC㊨[=;68gໄz(cy GᆖG6\>IHt/gU5(5 qEK~ߒ2v7E4@Ĥyht#h9=%0F D`^mD`ν,,l5ބap.UQCK$hY}@x;H" c="ӥxB e;5!z8o{jSXܑCvK6C3#0@Y)̾0g &Xb33-G Λ`޳ȕKOx,K &S<,8-vwfa n t܍x2Ni$Z+gŸvQWhU~dѤQղ ?<7g]d};.yBNO`.VF#Gy#\ȑ)-x_Q ʡ"4£_D#YE #M>ߪks#ˑq6|"7H3w[,‘G\پ~ QU>?ÕɍGhONWF`"GI#/׾gMD2yrE}PSu tc`ۭlpLG>[o'd׈E Yw}ߓ^eBI{WS7M뤹hes'mPM˝;a|LӢ\\fkȑ'd;i"j:kj6IP3:akF r ԧG9J;ij6ddԷc1 mHQC;%'s:Ϻ?yǂ|žt+w$#=f~7u?2BiF?1_3rlam27cj}c?d4E|LK}GM^m|wfOxA#XO}[]误ׅeW6yh= ((*ޟ.~fo'+m&?/M3pN=byt<;<~7I`_3fye&>'iM^< n0@憼Qߏd{ͦ0Is^o7g>Cdl0%ΦhTO)3Ö?rgv}O\s>0|ls{c?K>4YϨ{9u޳/; # ZL҇ d/?~wK>qUw _Sg\<ꐵ^z;iFECWK?~ZM'k_kRc~v=eg-0K=\{|ifC=>K߫rߟN:MUߙ_[_'r7yg|:IQ 8n2t](чv|@ٿ߂c?>'ۧ|_| Ik)//̂kw;~kY ΰڟ5-o&֑}d؇l&|~|\|@&|J΃d50â76cVɘ_n9.OlW?U̿`U}]eOܘߤ}3U7yuM[-`f/l*G}7조/m^~M[y|ίM {2vᇭr[o[)u~oA>K9I.{_?MFܮؿM8_8?~%$ Q~_~ e5'o;aϜe; Ss.o9iaqh [ۧ|{쏫r~O?ͧP_|oXBe6G|>/ lwLg?wCc|c>Z)/ wCCr~?Hn?Q{_'˹#i~ =7_aJؿȇnr}s~`b 7]_Ii12M=`\>(o.ɢ=zj o>Z4 {3ePnW<(כV_Rax 7`_*7v'=}K{ ۟rRlߤ|Ha'UٟKgkEW 5>PrV}d/OPym'g1~EV?0e(h72G[K-ka>z{$@)(޷Џ|4_6?i<]-7?K?G>-ؗ!wY;>{܁lXܱg3џ-؄d}(G38ncP|W}fȈ!l4,R G }H,W}7r} 6|_)7<ކ1~Y6W(壿>OK)C?GŽ6){>_Ƨi|>?c:^O_a=֓t7 i3,/b}>_OK>Sl}}}lu?]1F}v􂥔)Z0Qw˩5R&%ק[F[(CLji79-`lϼM[.}~ )Zgz5[xiHUoi~cs~Q_ ~{ͧQ?-ϯm~b׿dg5ٿ훜?^>~9>^,#zG>S[3~&/}[([ݿPT0w~`9l//r׏/n;#b|ɼ6}k׎f_];Eˮt&%ku1dgwzU/ _w 9kGYAo-K{>3 @nG\{wٿ!Wq&w_'\e}{o?!О'L|'Z1y>,oZѻ3쨑}-r XK՚l_CLܿ]Zƴz;j/yqfi1Y#OH˗寠-{ւ\,{6{^,ceɈjDoHcYݭ~x*oI/e5f3투O*hІ^0HEZ+of}xڎp>߿x& _{i|_H%Px}~z1e;qtJli's wG v_oش#5鄜Malߪh=->ɲ<}oc,l/rk9_Θ/ڑVz|~3|_xR[NȁnX-7ҧlFNkqfsQt>qWQYO[x|R;܊k :<bx' v`HH'ygA`ƪ( G# zdbBZ)h9"2B,'>SOlZF3 i{v%Þzl!|bYw.<̙D7ĕB|G(qv⎞VX-艐rFVE?7;p,D׌tŝ# od)EzBVg40Vĉb)HNAݤ?6g ӪZ~-,b8a#{i^IX 3yaa${֬@Mav{Dc'ɞFK9WL]pV\R~3eu}0XJ\#\e'rypс`P~""E4C a2 1*]9#yܒ']@^#Eg-ZJ@r做嶬~.K9P893I"I[iY\*P]>Ӓsk"A'E"xǎ/i TZ=a̝:{߭0f_Kxt"fh<xBx6 r?ͥ]>CH f!; Ҍz#mnQŻ #U`װEFlDr*ӈg? x$ lx2hAwo;5fdAh@Gz7'U=ڤNw)Ƒ6K$fddԴwu*4~r؀v@6&Q9l9DFq*#4k~8Ep;Y}8QƓm["*!*#/X ۢr˝r+I٠>r8ETT}ϭɤ )?癨8rh9{ʑ'*#wL=Epd7=qohnPuʡ|+*S8̀؇EpƯ}kK&6 w|>~1@%@ۗ\6^Ce`]9irhLnCjݍuR9X{/|+/w1Q$?mPbb82Ϳo`6.v6?Q9yzf! ʳ/8'Q9wMO@yI_H?>6}`|& [08IHNNj_BFo{\a7*m/FmPlQ9οE=-*Ɲi/٤v NAؖʁ N尋;oʁGN少ɶo<Tg|+rX;CөaKu}l,c:qQ9[y77[_TG6{mP.`r82||eFf=}8S}:y *_S9y{}E gVX*sEp%7گ+eRc?HÑUϓ3&L=0T%rT~<_%WGPgF$*3A+*s>'FT&2?|GdwHX}Lj%}#پ%zGջsBÑwwڡIa_A-uo=khP}Lj['6k<*SVr}d[ϯN濞K)8AxsYX`yY\E`Qh|-а,q+_,#Pe9b y\YNj)xBE6TN(?=}~z^;j-z(_[g=q(z5H0MP_ӍQ9Q{dv l [u#ڪo@c_3d&E_8~q~~}~;b>7"I-k}a |r0$.Y-1BY6T|?errٌWl >]4rGO*}0B:rG||/Y#5pg~32>ll&eQ=GGCT>$s/D"=e0lkKVdW>kKy!؄#G*Hhest>>>;Q;hjVgW>{Ȭ_+{!\){I-Q}OG{LNj\4~I> l{޿gzֻی縷7]|y'[zϔ6[_o|޿mUo#} WעS9~ݴTw٭xoI Mxߒjz2y8SJ`Dm :dhv0y8g^VGR+&CmjQ5r=Zv+wy~iS ݿ*ܳN`5> r_Ƴ+s,}NQd'QA7SKFy֭QYSEdoz%s~sw|V?c|JR}OnAw,_MRv(oעoݿ7峾V?>e=_^5AKN"O ryϭ?nq*}Q9nWv{_ !ڗDTfw@ywǩ~?O|vY;qcDK"7dAggU2joBy}?Gj ?aHprk}: ˟{ $?aޟ]kE8Q9gyӊ^>@G܎xW*oblga},Hk׎{DܿM6;mUOI夂pFfB*l!Ag"4#)L?N`-eOzAv:ؖeP[oҶYʥȤӿ$*J3Q9T9uC$[Dys8R9'VM?y~<"R_$o'I y|K2vh=M>ɲ^ϬzԣP/׉a/ )~y_k'UT1(O'l, 7- lG:z Z"2e eo O/~J)8i;pBUendRb+ac*і#!n "o軐WpJOd@ծ-6F\ci6\(4dcszK][Vb9[m@1]4B( ۋhi!5 'S0"QV{?D"][ц Y}r!#E0_!hD'"+G>DNC0ov[`Oó,UbFX9,'cHU*gָ3jdO &B Z Ethܸ7A|,YF ͪ 0a$*u2+@晏~=$B~ fm!yhˑ2|Ѿ.X`-N$`&Yb| 1b yXF8esx#3~I^bVYo$x";fpٸa|OJ UÕucvmXDЙLo`se >%LXd+AXpeLz-  >&Q!&Y gfoG ett I&IV\vJD[*OBٍ`*AB\t-DDhe[DZM"> 06@Oflaho2BtsePAx ) ?@4i!݀` >5O#r)숇 w[2#H3s9y u|;`k ѦoDD| ]lyx#vՀ\J`CD9CD޲N}2"'0SdCid:Q@E #ߞnR7WG@q0o7"<ѓy\Y [%e㟛e5y)|cZ}G2,#8hNXrF<#  6SAtq #\14OyM2~rI:&XLm}-J[;&9m ]~~FTLKt yЌAn[V6c|&=bTMbK_Dz5B'3{z\hՠj3Q­fA"mbcPNoz_N BKĂ{P/dzO%`}v`h}.]HgZDQ{JVd8ѹB<{tj? cOQDZʳf_K9o֏5D#dQؿC-xDDVlD =K"h.^6Pl}d'21a8+56tM]Y\`~?  PQ''\<~{#U=xNkx,B 0ν hc\?:@f'O sMVS1oZFdƨ󅼑+Z ^iOD L،qAU S+c͐b.2K?g:8 <BF;oP/1%'  ;lʡA>GRS?5`GAB"gäkxG~(&BS8ܤLn $.L?,ƽЅRQ#'r(&G6fhȷNCM'\70qM#DžE)ƈK9"{B'k/@pGkp.s-O_ ¹Fw G 7K8%B(U2aV:'\8r\%c IabF`n9X`}e!7 pHs0O2W71JN D&aڸL΅#+@ u.mm>~4sP67q.4Ź`20N=D΅0Eq.4bs]>@l1ŽP򇅁p`q,rNFN6q-}  |&Y0zm\h4\ mG޸*q.pGq.U$OGSF8l9|fBBs.C ٔι0֋sa!W9hr֝sa QRι@ kK/v΅Ć\{zI4Masa#39 Ź &y\Xs΅S9s`?XKW4>r\Xdr΅iO/s.㴶Ź& Lk<&΅"\ιsaWqFt=!Sx#Z968gs(l~q.!Õs.l$qgw/`>mrBsaé s.096|+ι8†g9s`p^ w\~hq.L͙$ e~K?nx1\Ȗp΅0M8ű}\`s.[ d78sa96?\jsa\_r~ŹpHs@s.l`X;Fs.|SDUč\X9q&&`c!9sA ι9?:&s.%Άtœ ι@L_\_s~8‚g9֧s.,:sa#_UG88 F}S e=b/jۋsa s&~q'@ @s΅5 \?ι.΅#W8p/%2:ͰIzo8ր緌|\XHq΅"ޅIN}J G‘q0B G6`3@3,\h#SU? 8lmk(^ʰs~a}\f\Xxye`\8_g%Ӂ Vq.+Fr9MZA`e-w ޜs^ TιHrOQ8z$΅5@r.΅L\`s.,sa 8{8xp'i#q.|cq΅Կ:1g??toW\X@q y!9sa-`` }\X@sqOιwQ?ߓSϿ?c{75uw|0O54> :H"6/sO7|;xLFq?wq.~8߮;cVz rN YAFҕ}~~+΅cϘKr{Ӟ}}'N‘9.DNڗP&~#d±_"Gٳxۥ1aH >f\hRsȆCq.}oK`:鈳DGʓa\s,\r ,"BxNNr.}T GnOn+ιpA[{_7`?X0>.OĹm|hŹZSX+QN΅#3@Hz}ȠBs.FT9N{s?zIı`M_=9CьM8Zg*$9lntB>zS9_yA!y~r>̈@q.A[3ևdy"ɴ\9 79)8‘ M.u8LI,rŹ`?ι`ϧ\P}\8lp8(s إ5"}$‘cvr,DFgbyq$4gsD~?syXO^tEs=@d/vR&g0Fq.y~]<9>KK#ɰC9?ι`-*oxJ.xD3qs;sG{/l;ԟE!BD!: cwsdoĹа0ss7"΅#Ob O\MϋCB%t&0_FqIgN\ιpn=+>]XdcpLv`sAYUggۧeV|,dqwg)"A>"O>pmf!_>~i,]6K^_'ޟYK۠\y!f؄7|s7}Mͩ}8dj9~r64WK=d֯w Ο/%DR>OSbJ11+<re/?ƻU~?f<ǽ=Ĺn[f}>_OX_2Wfiێ> ?s,2~_E>"aV=s.{9[o}baƼH.s-¸őB$^]?ιGK%sXo2&-ZlLB췒)ߓca"݂ПnޟKVPꗬ,/\?ˡӌ*ly}}CfKFy`l7՘uɜEg5š4p+v_r\Dj!c!s"4}X[[ޭd>s?(KE׏|˾[_^WŊ<5sSƊ$&{pqqO eWҊ'9}\{tqL|AĹ`vwQ>.fl~{Բ s7Whi__LvxoYvΠ3*dᩚ3i?r`"CaN?ι;x h{);~=Ϗk 8n}EOogy#Za~ ^ΌuX ?ӀE ޾v;߫rohjs.cr. :5kzfFeٛ?ι`2ݲ'Ymϓg}tiY{r&a{~gC9ssz=rb d<0֧HBLjs2]Əs.xKx\r̙ ^'߾\Li|8炕c|wz GsGeWv="75~<rEyDΆ>}qq'r(%/}Q}q, ̪2>ۓQ~Yߧq Y1@Nt|Ծd\pA}:aeX-oNtXg;bKŊ-p So_c4Ï7B7di%7z>|יNA6B,+DY0Vy=#SE0\Joz&q7&Dg)*{bω`BP-9|w^"* E~퀎z8 ,NeEl (Ԝ|)s O"0P'k#Gl%4A: Ua$83a "_wFp^9]p?AUdO٤e~֠Z:j yK"+5:*SxCFT2t!%㶅'! gF(F|or+=WYb|+E #"+FZXA}GzW@_Lp&0ᣖ}hq{jf,'-Zn lU]RKDS)>orXZ܆l'vk،>'b&S3 +*!!Q#5 [Cdث& k<G8NGFIvh4g? HKW  بg/6C3+\ٓ:95 gqH':W@jkv@>3a9,m3鄢L@j )NteGHE^!*cDd oyk sDm5z23)mfL6ѹ)MYqIJR@UVR_# ӳ)j a-#n%cŋ+IF-+5$بYԄU.;bE'#A4x{O@1U7h ojV6B-$]XO5oU'["HFjIk':RZo 0f;P#Dg6¹ `=;vG!jRNp*쀭&K33Mw!M֏[FxzCٗyw6lm()0Gn5סZeм犸 pUYQ޷~bf*P'hBmrC!c#\ƣI%dP͈fG9J9eM@e l5Mi؇$ޘ,<Ƌɽ -uqD-̒3"lri24cFUCRePxt͐L]meO.,hU PAঝ+0mΝQ>[A<B3C[Y\fNOգs2zpɼ [:³,!#;l%b{n<|5~ve)"*HĂwQ}FgnhWf߂}OO"rqF32K/؈}W{0^9Dk|f/d)܌Mݡ+oz.HGW}1hzrc1#T¶- F'OY'uC+b5)``{<3#[9oB /7:8*Y[kb!"/A [H;{bMN?l-ȌIx#[eYD 6y8R+J}EBV>)kyqYoGn;E2oe wt1"~RpGʹ#y1~HGq1}|ԩ )dd{2HGfqG}R6c32la2c8efzB 7NC"v|n  T,#Fh!TH'><,%}KlHD~T |IϪw)">-O0 9}"_G偛n7|^*#O )8Rp'G;I|~oHD ˕qf,:yAI%MHX09^h{; )ȝwZ6H&TeNnrjۏ[!KHGaHDN\! $k< )s>'nwTHGސǑ;'(C|aIT"|}ǯ~) WW4#Edc"w@8RpĠ# H B >r]2#b8u-!G 1C1\HG)L{~?@?|6p>Lr5MN6KhX"l78Rpg; Io)oi?ج~]<єԞȿk=6z<&e_q`QW+}Om)D|߯e>1R!/`;?lr )DžI$N$:!_d*Mok^Hv )בl!ġ2]HG>;c|)y!.`;ׅyR)'0c|UR˸Lz+~Ϣ$Èa)ȆlL#)r![B )r!GRB 1/N !v+}JHLB )6>[6>} ) DHGnv)!y )kRqB6ye0G)賓W*5a<,cg )3~)\HG6@"vt ) g:s,BdJRͿt!unl !j>Mo^&NǐmG N0!kS w뇼SHFam'!!`8Ʈ(' ^+_f.BR58`ڇu )~U[k_:O>tJX~~"k' R{>~W뗌 ߏH@>kP+:OII%o(#? zZHoO*B :rȑeE;W?q+೿zypde/,Uۑ;}~&dlw̨RpgN;1C@/-T#OwSgHa GM-Lў~E\.ٻp0~a!wJ)x~>h(G^C!9 s$5 H?'Dx s1!/5Q#woT)^&#gؿZOXK*K2 &ʼ_W=/N3pe IHN(zY')|OH?<4>!Y& )bS:L`^q`k[_H AY_EBߗH#wDdɖ6?<{/VB=@~}OH|o!~3UG^iz~=8ZW2B&;'}?" uDeӡ4ڛl1 BW s| eB G )؞G5^>OfPL+ !c .!w@8R9u^`&RPG QB 6r77s+W!ND>pR~!cZHHBDߐ\?؀)?)G(^";?q_ɑ;e"7I+)XrH_&weBNoz[|J`ӿ@3~)ޗlD]$O\O orv40Pʢڀ#ʗ,ƥyYMXC^.Cc\q/G貐M8>1??{о>>g{Ų[rQ?Kj/5dG$or^d!%k~{{i&Y}ܢR0=;2<2oLzߵp )X>V!Ϣma)z@4ЅԂ;?e )b[|JgOW<8*``S.a[kw SԿtBrpǺ|6Ο)wʚQB2ܿoMB#{SnA|S.@`߸_0}~~19=ݿe}66_5C_5cgӾɶ'H?; jlq{y~\;Kx =?1z,o" 9?}yǗHվutAyo_v KP!z^us/B1R;8f)`dAE,{Bޕgv*S=Dz~\#lMo:nI<)H͟]OM?)`}ݑ=ӅHR4.sDž%gΤG}<Ṽ3W# UeϬ\3M#RHH&ۃE+tNeN{TY}噷WW@.3 :8R Vd_<I󫢽Yg{cS&22|wW?}Su"z_|s+`?'"_=-ۉ[ PcWR *ݟBZLy'iӜNh=sm3x4- bAF (' xhqu=D\=ё1<'4iPED½>A.Ipr?X +.Hމ2#xN9(1oY x!vܯ&H O9m WY;#l/\[} n|!?4`O2e,^Hс(xՠ"6s0 jρP2WlBz:8yg[k&W~mӤX 3Bʱ .g!3™^, 3 k4;90`@dpttP_/嗹A2e65>R^HmlQXRB3׊ qJi./3 7drlV>,ȓgzk?dPׇXť#oPAu(N= 'HHtL|;\R0sᶱ 9fq>~wexv|)@0g\#C3"%Q:bd'6%J!!ggq>6wN̡@)M B*<4a =2ķlI:gjo&pDf{'K/EkRY: 4X}@)2L$Q.$I ϥiQONbD_e (2I:" K \vn#^ғhjILh T= HdGhQ>,|`F ̦ᐹBd(dT.]ʉN&T46춣憫l'nm*U`P. / LJbr?gҲ1B4X"ݟNLb;Hь|#~PX•'5AtKlpPGG?[?L8dCh~0=c%Ga <@]kn3 Q-jPAs@A"?JDoIOoȤloKVHG:Ӥ׀Ȳ|)̞商#ԑCkqg˿im ^/σf'a ,hu8sV|p3bu:@HGu43FFZPUA7\wfovDETo>:d"[@N]0*PʒZaKt .5C OATV@NގxC?"^@H)u/ sJI}^IY+R+t`fFߗݢx$RnR؃@e?Co1hN"jx0M*.3Q*;8c\׎6 GgZ}%1a.3dg朒䴱0&}S=Q|_r1z'}Wqk-L  /H_ 4H"ڶFhAd'N4#:؟IY9VKZwʎacb0`i`E3' a*;2SZ+dYq5Lʊ,-ޢF^x5ώ%qR?FAu0g"ތP4'ĩ##%deav?=-(OW XkYc U!!NȂ*5N0v;LgZRc; ;t6Hѳ}DVeN#>`tߑkĪ"JP4ŊfѠԎEIL}vk):鑎89).RMYGIM^S@zD@pjG]&0 6xJGi _aGcgMT3~۬*6\G&|'ʐ*b$J=k!hx)'sOr8qسyJeMi6Ip3879b0٦$U55c1Vٶ{'l9[ߵ :w#j+?ؤY7qgu߯@h#a1iϮ b&aKm?U7v`ǘ4ciߨC,㬪3lsaN͈mb?bAp>ʄ yNΜlI#d$RBQs$vXg{w NtK*%|+:Q)1.LQg$I15.EWp/ّ9ؐyc(*RbA%vkR$1e>TE+TFU?uCjx -n' HflD5⢑:Iy}!fC=ψL2fW_A?Gz3Y|JYUhn{b/ԇdEs"unǥ "+3@PkǨ~eH b(JV կ,vuFVLfYwլ( p :Fq]=5 MbjZ۔U"o|LDY!B$W-n?O{J4v䅟yff\4TRP~b1mٍ,$NGyӣg@m *MG;JTs} 7_ug]3WGÙ %Ź=Ԍ;q#_ZDBۏ-P8vnoǹ5́s[/κh+N?91嬘c}8`zfHSǔ{.257r9CsJXLXg]}k;lG~(gL?κHK5we~xX>LtgEX #j8L;lݘcϘr nt+g #Q8rĚ#jlxftǘ1fUB?v _:䟥 U`etAXc "stEPcPa ^G|NXC{\P}uŢ+K _¿%bc; {GӥHսams9^x]"J V|K-5\A+.b?'|cαޭk[w)mrjMD?*L1q&IeogKFd,.&՜:W|)v-K۟׆ ~O{im|6膡6C-?xj?okιU۠UzP}JTjjRl e4mTsGWx:R8iҶ=N),kF%̟f*G 01YSs%5WSz=av\FsQdKaV\G1-,_44\fo/s+7=UwSNWscEKl;?0w: /-aEIb eꉉg`U7g֍;vB(]͟j6rcx% lE ]Ԁ!1^È^(n|ؗPs`X$ӕ`C0eNןz2Nin/o.q+g4t,Dp=:]4 g,)\ xuy斠?V]= H\̎^X<af\f1XYx:ܷgAW׷cMXX612~J 3ӿW%y(#`rhĶw`p;uR nV d4c.;aKbM3}# IpJbC\tn<$w6(ɍ̸KLL}V^z3_l&_P̡Ś|n[VUvtFnF-xXR2~~ǎY}gaGYC9!xɣ8)vϕ2SgJ6Fi*g:wjLW}u!FӕX'2~T?`)M\jƔMzL$K4{8Z/sץ3:x}!<#sb.1ScxYS'[oo3.5Q'>D[+l&9fy;ML?+sa^!(/ m m_xA_| O+V{/8ǣN`ѵzcې ϣcb1XFM9! ޏFtx9,iVh( ګ(Q7M_(\GM:H~OF@&|:5Vf\KX=%[c8wh ^Ǎ^SsAHJ`H6~,Ϭ-&R㍺4 #~;! sWn_3ި4F\˴M~P\jrixžc/,5hFy`~QXAQv茣{1T_$~ܟCn} _?Fs~Y_8co}1AANcGU~hW<0ifx~%ԛypWCo@M;3zϷ^I~cR{.%'YƉ?̌gC`feJSMaF θ|ΚK-{Jnщ>*j׏GS ƹ;򧡏$ ;M5|H܈ptƍ*(I(Pcg1Nx}(P;| ٣tt/wo3-(gv?'/C"#[1cr;} b zH81֍}]Z7Tg?өard`en-NM24DFj!yY." "cPL;y!{_Jfo4rS4q/k~1hp3yVD ǾϢӲ1buF%ω]YR'md{lmI>5F/c,h=YLtfp/ub{Ԙ%y}ix2`bۻXR,>C$W!I'bɳO7Drʛ7UN~ņ< 3 oׄk6D~kÓ X,?538Fi%}콙+T`5B`OMad"zm}ynV_,g'E{iigʁN:~yζFZ.q~}F:9RYKl{k=2;~%@"r/t7FIH>GE<3s_pšπC. rw Z'gf:izCR o{@BHO*[ %&Y_pbi >ɒ ;.%QH0-.@' ߚ}~&7fӥ8"nYNV2wO4$*m%xx5DT91 L |كoZLjuN2gXA xH }~?<O?2?|[A*.i{䞙LW'7;ld̜LxdX{Vu)ۻN&)ZN~Xpx~ 1z!}d%bmnj%R;[PcԗT3hqvQt^g/]zzs/" LEo!D!y:,;`4._'ʡoEi6Ay$Ĭ-*#BpcK3]X!rurdm +1KDدW*!|)v׬-XBf2W6OﯳHKdW*v Yw:wKC`8J~c0/Кu)k{Á~߅f剺}lC1up,/IP[_n;͝r?_utcH&2<ۧ^#Ajc\'ӿ<NdwЀμhc_'1%}W|KVM=LC;z+Bc b!I[$Ck.%PL' f .'&Xfۦ dngeRh .+e$^MP}> \FL`̼d5"JYs 52PjF  (.xnA>J.PjM2̛WCXJ9Aӎ"'ԻA"]Ͷe)u՜Br7e[V: k Seƺ\~'[N!GY*cl A±`x#J8D) rNVsLYx, HIC|%Jـ?D)$J^rQK:(Hd'M 򁿗(e (`JyXr@E%4Jy^(Pjcf#Jy8+(ak@)D)9;Q8RJhe>RVcM0rvav<oԉRct܃(aZNf!JyخFg\6QxE%gFMn~QjgaQjE(H "D)L vrbH.D) > ҈R(Q (eD)@)D@)9QjFRRV/g01 J9GCF|\(x#ϼQ6,(e-+{;)P(8w;+'sJY9R9RىRV/l(eWlQ'Q"& s?RFsD),s[!JYM;Jf'JWD)QjA2FIqM2 JyX _f=D)cld'{/4k!JWD){(e avOC-G) 2jan(e8cG(e(e5w%JYMD)RaTa6qn#JWD)[tJWD)9*ɉ9Q4RV Q0Q.D)Y|Ы|Ҷ$E(aV ^aVQ8bd!%)c.J`D JVD)(ea|0lCD)(_)Q+K岓51!J|(sR>R6+Q=l ԅR>R679Q0D) QcY}.(aӢQ6Q^'ϳ Qʸ Jy-JCÌJ@)Jy5Pk(e4B04Q}Hc\JRw2=Cݏ DJMR20qjF#2հa oJ[(e4EKٮ.lSw*p[h}0#?@)AJkmƍR<r%J<(e?la.G!D)h'J7RVm,B2E|ThΗ99K[RSץIKKGZh~ih47Lb7*s폲PjnArrak<ոץ5Qhe(e_l&_p7T(e5GUQ5'ɿ+پqzgtC>d#J9~T+QʏKu>_Eǝ:Pᬧ4J׷]}Vb\4m S[|1 +I2M,El5w}]jqR K$+(e4Eb'&Jf!J Q0;Q3QX"og(eQʱ\qEswa6mD)*Eg!JaBr3؏/ ѐ`'G_0!Jx+L&x(e)dcA/&sfsZK[/>.Bk y>&rJeg40"c:i1VKFB+jmylrކ®\]ʂbSZyyNǮK ;WtatӱMÓc6~[xuwĻ=@4ӵ j 1 7j@_`E t*ȹZ;0dkB Vjkoy..Y=[w/k̽md _Xi-%oGӥܳrx#Yk '~c= Bv+AHNY)&M3,.$(_ 8NH@/@#a~~C{mJzw/#f8#)4Y?:_b7ɜl InC1R9A{b58 !;6M!̥z3FeXdrv ,@5wgI -c CޑdYx|Hg3I(_[ 'k=WҗdBv/-2uV';#%t,߾`82OD wJ?yw!(I2nl<䣼-Ek'(R3٩hsfh=3аye$n?&~}ا,6g8yLZyKe0q͉dw\f'quL*vRh6g ??r0bKY_ϦGugqkd&h .Ğby鄩C+-v 8m'֙FGVȸMf9&q)đb:1,mO tl`S1xBv鳬ۆfaK?5g;$!7iokG_mc7R*9϶I7Q~iN"%9)*5oOɔۤ& a1{e=w^&t;`b̤1lV3\B-e^r9`s`%uw8!uL!_7A$wUrޞϚL7k~`W_$r2|^`/Xpo~V,@A-O)UMqثKڃ9vXBY4^PM& XK5 v0:WR_l,ukNok\ӹͬH yGobeܰd\xđW`AMٙY.vKpM(wv|6$ar;C6!b!;~<'2Rw+t]M)J͜˲F6XgA+Ui:.ǁFV*;S}?7yi}VM>:}Ѳok'1rLWn w1gT7WINעC]%Q2w8UFHժ Y6mcK7Q w~}#uD zTD$w)q0mlnb3s" ҾmtYyf|ɽ|F4at I,,alae .z?AP2nsb7Œoǒt;s~Ӟ2Ye q+'Nd|*c3WXܔǀ"%ݼF8/fAJ,yk (Bz:*i:?_t+@&J68`ݬ_}C$"i*icQ‡Ԡʔԉ'ǒ)`snL3zR5K7<<<09o.e8h]JJ#@ 䦼?$M/;)[/2l̀_'Y;|ts 9H+̣yNl<Mc%g>:gx>&[զIA|JJsln̔P0q5Mi9<<Ӆ R0172=/ K ),%٦VL K ,),%XX-Fa)A+,`Rb`iL;]$SXp42 KgRr<{cvhA(sN]a)FJR"⣹ K xF"_*~H=nC+,u@a-:톰-SXG),uEa)ǦR]Ѱr K;QXJ2f K K؆:b(,e':Hɠa(,e} K"(,uFa)QXpc %.T1]a>, ,:Pf K?a+,eLVMRLa)YAEa(]ǹu^aaG KtQcgPx-6v<+,em Kp(,u/ww+,u,.h:),e;u2 KVER\Na)JRpQXʺ/ KYFa)r),e37 a)AcWRX:d KNLK 2b),e 2JR_+,eT2 Kah+l-+Z.JcДrS KyKڞRyLR^Da)' K ~{a)ȢSXjX _ K a)JR[Perlx5(,6RCXJШy+ԔMa)RYCIcbR:E׷RB8Al4%*ɰ6o: P )PXJ - K Ps PBaaB!,zծNa)R4O{a} K8+,[ Rp(,u+,%XQXJ Ha)Na)r-iPX&xR\IŹU ~R KY{_L),%ۯ_J KYOHa)wR{.n(,%"{T09$>U;Ea)ٮ$5@ WXz KvlksFDuy uR=]y+,žoJGr5Ìx/X!8j+,In?9bk!,xre8Y gՌVf__u|#=Ǥ!L;Ɗ@JHt"\VU62CXj5v[ 7a,CXjCK B#Hgd"6d-/Fu?P{RZ<gj{.,5&BXjLd0RmK ކ0G2'D٧iSsY\RjCaa>r3&,R0 pJa0ch2mKY}Y),5l'7ע-ׅ旚\jr 'Fax[= 儰RXJMәbiM%(,RsM K `n KǕR0 Ia)}+lR0>0/I4= aa~74BXJMzOԸ0FBX  Rӄ|>H]RjڦU#f먿B qE(,u7GPXJ3(,50lP(,渚MjJa) HĻTÄc߽#c k85Ĺ9\ʋnn X3!!3RBI*370GUAOx?&=RZ}K,RB(CX (,57?GL΢kr7R4q(,e1 K0RH*|+ 7-aSb"8Z?~-BM/&R<~BRPXyx?OWx=k׮'}?+}GK sN(,in*a)c_a)|K %\a)9&R{h0/7<,7Rux{?n?خmRXRLy6,ki(,%iqU25s^Xc\a)+z]M˴.x? K!^we%lӀR 56˜Uҋ_\a)mĞWXJĕ͒&;m[m۾mVPXJ@X^qK "I9_戚cΒrDBg*~lNnKGZWhjr_BK'I+SBX]t!ćyzRxHBq]^WXJlFۖﮯg\a):R"oUw͚3GOL^+,1 K 4),ll_9CX~TsrNy(poQ֫eNwPu*A:2S(,: g72Sq3lR ">uyJJus(,5(,2avZM KBa)Rd]X fTܨEor>>zD49$RX fTum4ngBa. KhTf K4 CB5/ij&?U SI*1M+jB%ePX*>h]u.[IG!L)B,Ea{DiTZfy>N0iK<Kۑ ݩ$O59Nv K\l KʫѕFa ϣR[fLPa)9RpTH4b/ K!dSXJMYqT:`N Aɛy9XfR0R1|\*a IVpבf{7jPd;R(!,sw/e)V7Mㅀ+ptRI*1knRXʋ= sKEvEM¤{(,u/7 RbCXʫhK=̨R),BRsL K(,* f𫇰԰SX PX ]~T0WtBs>Zl1 a)CXǺrrKk:SF_YIƗn)%Teq;Y|.ƻȩy|$cF^ ΙZГِ oXPLRdTFz)\)3bߔa'y#VcA++yGME~~S4 h# a,k`1}WE7C+0#17iQJ/~ ; ߏkSL*Ra6g RHSLCSaIl'K[D7UYT+M%iߴf!p<)$Ts ^ӣGaKNRW~W8ϓodЕ>53vw.#-k0ďڎ~Kg I|wKIۃin.<䳎N7]}jwgϷEk^)vh~A#  ˩@0eqJ1g}OuXmۂSf/&X-E[`"RfR2)wEz<ׯ͊YC͘CX&clcYR"4+/N;i1c TbIj)34p-R?cVvN 곈 x&$%'᝸c"ee!Fh|ab+'tj]1XY'_&bcHVhrс&cg203htr;LؗCF0DȚY_0_SI6EXp=o˵ $'hXrl`|68bdM ؝w%.QS^\IVyPk%<=X&Ĝnc!rk9i#- 'ڎRE 䁌?g܈i8M#2oC_: RsY)eйa:'@H:@dCr*PD_d{9&*ůRx aw)#>Uq⎌ |8[t$A rzV+ [M4={LNYg '꩸xF6K\|r ^'}:3]9'zF:8}lcX:fBykw~d*W:ҏxA"O9Ɋ&}rڙӉZܲz=SN._M$z' $ƑKua9K$ef^ >L{Nr"$Ῡj<ʶH' \-Wqɕ"Rgy(6V:>XS v 2a_-f/DZՓrkdrh.fZ!Hj1fPRJI%tL0*:08+"7/YT rZqsBH>rg|@שveK2GplIٝ@TWgA/5s!pnPtۮ-US“2;`Cv/tRbDrQ9] k$X~v̔[y3UGkh=5紼*SuEB~'oK䙤mt V? \ms tLEVؿD&OvR@|N+lKH&("ık"dv K`S#asZti>٘`:-2`j8<ӢBfd]5G$lw d;-`>`jZ_jfu9ǜq]9aVvf s2SyN\~MDh*ajIخ> ÀNKI.Ʉ9kqstva.vQ "}2`SۤZN!lwi `jJ']c"/s!H0`jV7Z6\Xlwڒ.`ӳxeVlw.W+~ݹQ] )tݏ ad;-f`9lwjZ#vb8`s 4rخw𥚧;7a0'ajMܦ1?^kZ+ ƪi\W/ 4Nc+lqۍ_v՜]]o!]w<\NZ7p-`ӔPaj֟݀a\7۝8[t q45-5U|2 ۝[=۝ خ;Յu۝/ 4ǀaӰ۝"pvBjHPs a<U6vqJôAJUsvx#!0Wͽ U<=wc?`"lfo,>]ǹhQ:Y۝)\i؀N۹خ{™g]]\f5UsvTisր⾝]ܨ sNff+uSa}l}]!l7:z]fi¬z+EY'a su3躉!1Jnd\!S%lf'lWͱ ۝VfYK5zAxXv1hnva ab$+v)Dž]5&]71$lWͶU va6vu?iz#۝=خZV6m*`]t)lwl!`0 a€ s=f_egM\NM! ]5m28smvp lWyՂBخ `(ؑ]ԫB.r۽io?LNخB.c]o8}=݄ND9nah]]zخ7D= ah] s;ݡ]t}]nXݰ2sDh1/$l+W^ز %JxN8P-i{5zť]ܨחngO.Kk wS*aa .u3{ nļ0!KAy+`3e_.za{a8Lvy*o`]i~x.S Z^vq),rgdNQ.x6?Bخ]71׼+ ǀAvmSlw۽G1Y_ܯ^_j &d݉ C'lara9۝&|]pخoVlW2۝W]lw~ QsLi}"l!K wpk2=vF9ixìIQVJJw!d^%i9a0uL˃cBخo\3z!,!-*;:j uRۍb옕F.>"l7>پ<rG#>܄ ٰ+SgJ6FipʪQt;lf%]73aXdۅilj$]73Vv4R`k<:&9 ۅىyHM3ۅy۝ .B~. S^XFQE$lf#laƥڥ^S )qQ_x}xAcF_xQ_1axB.6vqC/f!]71;_&sY'k]u,aHÀDžt\os {|#ʹluFnbJ+A.ʹݺ1a =l3`0 AB]hBnapEG .VXGDĂ*躉9a0X놳W۽73E}aӈAMLm<$]7‡)^k%]71 Իo8B.6aA{A?va.vvah E.A.Mn\ގȼTW%]7invc`s×Lva^n n+gnv7([w4wgY.os N'lwXcvY~pm;y.Aӳ:uas0<^S"crNlf{Ns//e& 15#Fi<02 ;d;mon]rndWW&l^o4Xa]}Q7!lf%lf;f]4I.AnuqnB.aVf J)`>l{9`nbIPI؆a3nø7}l+"' ^o!pҳDs|SQsOf:9 N $2NJSfnKjǻ/@-OАp~$5 y3K ߊb!4:[֖Ws0z8CUݩ}_ҡNHHp/RA<@ɳ~I=T2Ci Cö3}N p'9wӔ> .=fQOIz^拡s̟Sv&)#hyyuc2-r&lC7⺿|i__p_55I .=ғ; óm\HJa8%Ipڦ eKLIeo6!9*%,xmbrgp<9kp1_jrC9ӔSXK~?L4e%c@EI6Vnmߐn62kxf:/@ʰ:M s629+Jo;[&{[50oɻ۾doI8Vst½^è֟cy= 9߳ymtcj֔ӄJjw8Ϻ9ySdGv"q8dȦίϏuMTXC+oTFiemsI===X(kGe~6~ţ"wkd>W6Y3gW_4Br\9cq&H;@/^̳L ̖La;rj_08|=/$Y~":9c|W39_{ǜ6EkZ}`qRcݬ??d{ endstream endobj 412 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 403 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-047.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 413 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 414 0 R/F2 415 0 R/F3 416 0 R>> /ExtGState << >>/ColorSpace << /sRGB 417 0 R >>>> /Length 9797 /Filter /FlateDecode >> stream x]ˎ%qW%i` `B¦([28YUMfwg9"Ϭxo//>~O?}!璘On<.i~xv#+]PKRןo>ݧo)>~7!|/_YFן*;+RhbVhϾ3_k}onRCnZ=#Z~- `t8&{!.C,j·P=C:fk$+3*g6¸{dG֣q P"` Dm1Fa=恡@P}> [waH p$X,|Qa#mՁFV< fSksao `i qБ![ n*V,ʶ ^$''[~0mX>IhK(5牭hY؅0VJ0 B,!ۜ1SKixhsT'kf7 .pڊ8b[0lc{h|niSb}ىpӲh#r%ΉXKaN 3 <0#.)k^sĖΘh#m'ш;Wn?桕xMAX@8&{,`g),m"b*jݱX@0WzBG~._Ld2|~";uw `#6Xy^KZ_Ɵ_mпy?/d{01WV}/Ƴg͟ nVL$we7as6Tb"6ǣp22f~;!>]8q z-#8좷ݏf$b΅ Iawؖ7c1? ~Å=xqthW8ak8}?vdyhmx<cnS%s,vװmc=XgaolNǡy o)||_]@FOv>5/Rn}$;)z_OJ(;ƫyYmcxa2&i귍 Rd}8z7B\ON=86l#9A{5oyJ%:~ mdDŽ6Xg}CoI^ >YE1<8ɿm{>6>8lMDs?jƃF},oۂwW;?>,;cb?9,\M8-өjUd#C>֑S`8c7_6^9Ƶ EZ}=zqj Va%^ll=fd;"ɳx?;s7M?<q^=9s.&|OxqR0E? Ts~*OXq~"qyZv?pnľ+ǧTɳhO}˙XI'Y< s^2-I/6P[<O?hA/{ru牰@E(Տ_Zg_~ce\1Kx^|/a8Lǘ\oW]9 02ecUM_◛˧%aEgEs_>볤(B̍y󉊍q9tDtRItLACLɏO0#F1;}hNo[;SHoMy~C>lǸ]|BP<EWE ۜ*!gnY*oJ5hW/ { Yϑ'/8 FLכL `JI9N GG} f~Of/R.p*Ie|X>]GWiGU/O6k=]0ˤ\+]>oҧ aE2@z<)~gWWQfH:z#)ׂؗ#,( yWLC5c̚g`=9̅۳tןAɟ/1Uwc=8 G?F?o E j1žP܆ϔjo]γ @&9|+sh"|>R-8qp$:(S`15o5mfMʪEE 7dI{)UÇ^hElݷPltȊt,^l Y2T]}|wĚgrhFC{ 9ЋXV)5zZX܁,_} B |fKWhEPDܣKn߱8lYK>)fQ:LX=p:B4~?kD$u,7kHV\6*\Q-6H7s9ȷvRbo?|E1QDZM;NgIJv9WvH6얓zyO3D,;-ʓr$9eWڋAr"];ܔ&ؼKoOIu; j*+uѾbWݝ sX =g,;7N,=3I/|z+N˯UjFj[ vwK/EiPSfǾ^|=Ko5}|y{P~_5ҮLУoϹ i+QI˂4H W]ӳՉdMal 2k,OG-k|S"G;qV"i<^,k- >pH~h <"E󈜈|&zgsY5!K,j ,k>_q>&U.oniky<ĬyEP` wY~c7=4%,$㱿 ךj7 mT^alph봮;Mf5MeECͮd_mD@yEPin ()P.$ֶՌ1]w@ږtNuzZ%kQ7dwFa.hlmdT`ЛAc Q6R]7"W;\.-[W9eձq:<Е5v~ե|V@((^Q=*O9t}ڵH2vu5R5.zD3w0 VPU_[Q9-/ 6s dha]vBbNE{T5t`-}s]*f]!&`/͡mzh\b3O+)_4pmr`㖔Yxx+u؎Y6pM58Z\VI`2a1{E=%U:5PonXXKW7_띳WUO+V3ˋ-up%46let47&\ e@ ]i@0Vn(@/@8{ O$ˎސ r#eːí5*oˆT BM]w@5.5l&PP_w!:q9qdtXC_UQLaUE+ErɪTn!\HH^Pp+Xܔ):38V%Srߧe[ LJO[j ϰ BD lˊYyb%aNG! "m);`ZOmSQi^Cɱ{UMh+HIU\f/ 1+yd.r5.A$F˔yƐyѴFw/z2yVNuӒ΁e`_g:eYY9zQ>ŒK$UJ*1brChWD}-BzSЪG![%ar5cYo҈a\hOufsT(p,a<1p1/u|1-/b@<7{yM2gUݛSt L~W``פxn1,z27F0Dn02ݾE=šٰv~\ښ3 q&֥~xP UK邯})#ĵyuc%#Ֆ,K1-YfNډ7V>؜7O BJ!yERn;PwI|*AN|P%wlKo/yf (!ntSp7r(fH#34.Gf0):z4b%;0\bK#_߱#e(=Q:ۋ5tJ'q_Td+sFoo6Q2 ܒML`@5$mJ"_A( n#i0+D CbYIS}=DI2af(4B*qGևN821o\.䥝JOe(+R,(K"˚!|g 胱9N->JfnS ?g i+3Ʊ@Wl$m&ay-i#ԡ'6J+S\n ic+̽8E$)x(ʄ` @Ɯ#nCy2uHGzd2|D}a劯<(^vfܮJ"+ht3"N|cspVZТlrv5z+t:~9)܊}Fqc=PmOslsn'gQɛ"Va+R7zf'}gH f(ܯg)I3yg` ]-3o,.k*{c=`gE q[ ^)Jd# 8Σ{J2"O߱) c?X0^klW>@AG ~B. iBlHJezryi7ϵ% |Y8uIn̎VhyWњ e((]r ߕ+jlH=4wtn{!6~KB`tΨix7fQ``Ɣ1W6w\d4v&|!Ȼ");%.4+9ɮKAp8gokTP</m=͕Zp}z[鎜Lx{VZm_o' W@[$ZQV]JAVCAUщ'~BYf*lxV+>#0WFJm2逐"lE_.*+yҞ4([>*1(M4EY~&P.j7kyf#V3؝"2b늹Y ?N^_X. DND{+#Ok}fEB -^](˅B zx?=7fL!TXP6|W{}*ۛCSѸLF. r`}9EgCfH%sЫK*8H" rR;X A 4r~&ީYܭ o[)˅".hIFfBl #[O9!V^xF%S | ҿ#AEIJwu FZՊ},ʷI4?GS|6bgg]~$gHs޴h۪Hωü!n/*EX8Bfr_*1fR.dH8 mXޤw2ײe\fꖕɢɟw}흂LN- ׸4m8qJ3ob‹ P‰}pn灹)R300CYs-XSbZ!(mv@h\<%yM2$eEZ]UƯ1G\v:qz[+]%t]ĝ "BgwȢE"أg"?I40ѯAd|72 pSLԅ0;3w&5z;_|ƻ5ф#$FNs?r'9ou"2]7vm%uN㋾44blBݷlaHbɼ$r˪ґ$/ndL9E_iu㗓B%$OeVf1vh&gc?(K~thϏ fźln͘QFx0EvNFN|@xt y1 ӻ^@e`^H-X3OzF쨞˄Gh}+(/ -F ޭ\I;JC_)Ir2*Ee0ҟR藓B%Hє9EP軄Q|B_N PKyR)\м ~9)T֗;'@רB*NQܯõt6ƒxPu"ωQg+N ~9)Tb_Wl:4"*-}W[9aDzLͭ򝱥qq(cKNlѽyk2jmg(a~cx; nF*|HL̘9#/]tG>Ǽ,N^|R]@'(˅"I6SDcE#E~_$@hxC[_E~/'J76VTM!y/qɢVzn>egt9Wo.gHmFU"yu]_.]y2J~1YL(ȠP^!u'kN<%A؈zf~@IÓ Ê5 `r r`}96lw["]̑×(wo3@mݱr$^^]ŘYo7#7V8ឱ![=q+i ~BiǏPۑ]= ]O6:8p¹eQtD{Vwo @%"iÿtZ+[\44DM=ty%>RR5PNjne%>. rR:fp cu%pU)??2?dC?2u*h(1f=}[ ̺tT6?Fx\ӎ_.,|v4U*{8;M?GT&*=' IQ&9(>sM]L}!DWזikX9~9)TxСӚ+,Spt8C|aN鄎/v}w+tAe{MHO3ZƠ}!E߹k>PV)#ׁпvM^a)IA1owkė*gDA5Mw +J%/n3:7$]u#$9s ?eMA~AkBg9lU Ő aլ ,3t!~@fg?II_~{Wc p>Ϭӯ~OO^ސ K>&=tSǿ7Ex>7*_k>_nO7d5jE[z7r} endstream endobj 419 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 424 0 obj << /Length 1127 /Filter /FlateDecode >> stream xڕVo6_!1")QR eZ෶,'l+{_dOiW4Ex; 2\i,TA[Pi /"h9$RiH'"].o4Љ*RIq*/`>]F>-)TVMt&3oɦb6|SGXaT KTj,bcX_hZuEQ9 #Oi=vmQlkSq&O RnIAŏRP'۰la/H8jX[\ nzD08$!IHkjZ}w(m[LC9/o}8]qrs$I2nޯi3AŽf0 e y\"|M`=Ce2W Ks֋A+ mE1k%I(`Q-΃n_'PJ9H)O7OH5s5}^.zrx̅U/V !.|䄁JO4"I|LD%04 'IN$2FCj)$"EIZy5*us?SI'礞!B42-~[PVӤJq6D$yV9SjUjۨH1  p 1OFW:~ xǞn~|E0#W Thxb/5@+!`Lh1Na'u[$y1'J!A- '*qPR #w"#Ϸc8T[[IQo&lii+9I=2K &Qo+ endstream endobj 404 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-048.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 426 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 427 0 R/F3 428 0 R>> /ExtGState << >>/ColorSpace << /sRGB 429 0 R >>>> /Length 864 /Filter /FlateDecode >> stream xXMo0W.]] $8 NPUDįgI@9$~~*/ER.op==-On߾x*orr~zy鳨z[^FRיzy$XN˳=vzj*hn.\oW Y>-*/q].z7W&Rf2/KmݫMG{a|Y7t#0p`P|\4Wa*n~OnyF[*a,\s,%eFr]5͉DK^F̲|"|Ekm1T]y*ȳV%- :DąԔӽDrG[|Ό^=i1pkyV>*ehc|1C# ,'ҭIϞlٶKK`fEdc"Ίt,$!ztVd sOBXg*DMkm582v8f08JVG%Af/PJdZkAu[,;Ft ;4Bj=T ȶ eըQ=jPZ0!S!E9{Ps^:13FlXFgmAG,>q> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 420 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-049.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 432 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 433 0 R/F2 434 0 R>> /ExtGState << >>/ColorSpace << /sRGB 435 0 R >>>> /Length 2874 /Filter /FlateDecode >> stream x[ˎ, W2rQ3Kq x, gqa#Rp3!UԃZ6>lܾ~َ86~/>㯟oQٗ{hߚE?營??n/nyڇڇf!oo}vl~>ܤV;T[ M}ñ}s7IaWícشB<5ߜ٣͜/M^~`eGk#ʞC9a˯v6 Ө{IFr4{oT/]lG/;s>шc|o#ջ[bS|zqM]؋ŝhHuXԕrT 7c{4-rΜ`iX^=׶,V0Xu_ }Չ}uB$okROV~ CrZM%&FT!cGiJbt%@=iB U`gaWg{zrNU"{gJ>/Ȇ7F7V;zo4q2LQNu4cFZt_ X5c#W;gEPD- E$ 4>q)p@Ū> +ymj,sOLȀ]p`FCǨcxٲ{%`!1z]I "O0"/qi(ZShv,B#PB6 @5ƆiDaGZ'&4il kQ;|% sA4GN>cP1KCd^EshJN%=&g$ 1 r >bg@}kb"d,qY$ٳe-tAsd{]JL$"aؒgN8?#/K|J$ȓ8aEh$AʕC[ivwF#B9v0AsTrN(kfF>Ԥ8[a|RNYC6=JcgBd-' HDc&c) H .1+)η:P4 IV b^)d>"L)}$0:&`-B^S+ג F)0A8#ŭJdB:!uR?PdT<7cX9XQҬR 4ru ˱.'AT$zrD{cSH%Z멝£Nj\6ztgΦZĥ[Gr^L@;-a ܂!sIXXagqz‰KGn_=8ϝ|%=w*("9ظ#JiJbt%0CڮrANrx``g]ezrKh 7=c/Ӑcqf4`!suBoypSz =ȟW3n^%ՐE*r^|ł1bT/=r0b7eDCOxa_ɝZJ&$_˩cۗTN"Uw#OV.{탹5;}h].Ǩcx2Z;L.%^WC9i@KrF+zBf-W̥gHh4~Lx&ZENL< |c|Z/}۠y̗<<AeS3a% bDUjJ UCڇKpP O"4huL8[\'ƁPv.%^WC%iA$撩VƪCqۓ؜X, :C#aaN6"|Ahv0Ct1 dőG. Ȉ]ҢdqyNGFJwvG4cQ1*f.A))+plDT"z;vCa,p Ś a ܥαk.X|eq}b?S0S0[K9SKe߹a+_"S5f~›kLQW}Բ-'c1I+7C_0(/ ݧܱA8eb;wV߉v𘴏1~]SȬ{E Z}`녴B/KV|(<(ɛkO,'M#J"-&o|ǩ'<DP~d{3TM4ͷ#k7KfqSCt)u"63JÌk/h8Ϝ> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 442 0 obj << /Length 818 /Filter /FlateDecode >> stream xڥVn@}WXTޛT *PDހq$NsAЯgn^;mRje{w93 ?4N&p.˃r{֦7:P$k"A:ףōJA2Ph4rpWa k k c&3{@ Hf .SI8P܄F6 \2H'SF\#e. 12ƄISM6C[Vl]J@eR76#kM7~u9qr4ma i.G'w'"8gD%"D$_Famߐž%..B2#_tRM:ӱK*Đ_:`,W1 ̹zo1}N^Z2ux.9.rxȽDFĜVnzJPa,QB] GӦ*58vfW*km6%҄/*J2 X8aFJt!@(~W6gWN )9X T5%fP|JIo. w])Z&OP|PL>eGq)ea=(qa{%6WN,ȭX=tE2V}EGE88tkbNBNtm\ ɍrJrmKhC endstream endobj 421 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-051.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 443 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 444 0 R/F2 445 0 R/F3 446 0 R>> /ExtGState << >>/ColorSpace << /sRGB 447 0 R >>>> /Length 19392 /Filter /FlateDecode >> stream xKMv6?(M~A^x 2GQS{viAs_?ɟ?~o?ǿϿ?~PO'/_}>߽_f?{~?Qgִ%?g__?~˟;߲UZ?~u~M'ט_b5_]M_%I3:+ԕXyzV›ƊO~W:W]\R:Vw*Sj?yOk3~0W/뫔,]_}o_5nN]_sdW=W?y_{VƵVYOmo_ݖ.}qnٿCO6n6lN~pe\?8.Ogpu6Ο rq(y:Onu<}?+zSW N9Y?Xz?B]qH?u [mbWx=g{6?x#מ'40BJ~Gc;vώXψd=7}[~B'k<Jfw,:~>sEϞ埼N\9Ħi<,as?5xm+;z:rWjo!<}9 9w#Z;ۉ:P}$ av+xկ>󴢏fܒn蘱ñIDct=Q6-qַ{ FV _Yvrjvw `qXsH A,$ZzmB.[L'MXuR_يvavMa_ނx Nsk:'-8>4KbZڐlWLx}m}kjwgba [uWlpV s918| q_;\}9faaj7S}f'.~egaqZ\7_Znb[T-ݘ-[~7v>좤$~ƯۻJp7ÇLl-xlv+o ؘ8&24W= !|VG+(IX㰻\ksbj8-Kbo}L\.[Pf8ZL_S&IncC[/0?vH0vl)ζ$|E5u ca,P[ĝ;è0Z 0jH qiϿ;b zC q/U>8Ci+~m֚8XӃl1=aΙ^ c_$X ͍6&9Fb8e;Rj}^mPvQ`hNa7W$t/8}b>0Z,]|!]\gZ#~u}iqu_&JbJ3J}t'h3e>$Q=ZN8 )n%9/. X5޶>nrq {=>| ~sYz\:t̅͟ٲDzWnlyV:mgM(Yhbm6Owvl sƀH8T'cоq}묟>_uC?g8zi4fm!W`.g\U{Aq^kR6Q9ǪbڑGkk1a{q #E8G {ƑD}!>VQtFR ԏķ8>{pJF?fѢ})6IkAmh9_nEd8Z/Q);SF>T4IXy>Zj#|%)9G)- Os%(ʦ {#MB ;TU_tRfbF 4$q`i~m(bc2F~_8xGn AЏעiͮsF 8. b/X"T/Bbҝq5/pŴ y7X>]JW1%^14vp5@[0Z4c^Izuqu]%{ Ѣ.kh\\k>)&r uKF^ ; D0hm;1Zl;*DZ&i{:nt[|FzNhOCr8- .hႋl8W7nǚ' 5F\onses\r> Ų~X-kmZSX_%CL$8Y4i8'-ɑގ; D?3ENnjU_Mނ/"9Ԋ6aA'P؇jBbWX+yR~܁> _Dk䓡J$F =͗ |9:~69GkS-&XyG g;!#ohji-q!ߞ^ueu$gd VU Y<NqMkXhu>fWx>H]ӽlwyC+9Q7 K=. 9ZM+i0ZȨc^O7Zl6 LZZ{Lǚ6xKR81R;W'Zl-0<\SXLK}K _; Z[k MsQX#ݮu{TW.# аF-\`4ZӇQF-ՇQǹ6FYκXlcM)sҚ@\Ia^blPb#7χ rLl5~6V2x8}l/ގ{%\-mү2Q0ZUT+?s|]=7_h1-V m:c&bogpsƚ+6L߹^̸8Llk,*u[3,~ ' J\i#l(6m.j}\E8.@dNJ5X_tX[+˙Ǫ/6W]uV}g}Tcǵ$cvs3xs[a^ ~;h8"V%܇]tE_}ksoah!vpo~+ɰL{ΉѢ+XsCzX_1ytXyc0_3AӜLh/Y5ib9h_Z1KPhv ] v?8.`~KhnanJ/]J-gqb9l csCpu 1H OyioG>QOOÎra%bw9vy_eHs a}ھLaiw]p,L57vl9?^11VIޛhEHĥ\) lZEKzup5]kt7Rn 9[+OlـMl{k"UeHU}TM>!hLnX-[Z4{؝`qh\Gp#XԜ8۽vNa0}c>+>䩢>ҵ%3]?si]?c]_U%Kc̅hLFZ!4aQ(g;CA}\A}wP@Z'X$\NLs[oæyqXzMxo-^w^wc= wg5~#oϚsL8ª1bK&\8[2aD炳M'o݃ڐv"/uR]/h ±bͣۻvvbslpXvn \lh2b[dJ "Yt9c{O le$ cFؒnm`K">Ñ-)lnl.h1,Oɶ+\F 뻮Bqsn>NQdmEXk}x;RH gy&0idM)0ș 7̹,rlqxcMKp.G8֚|ВZhXvFҜsQewq&Y`qٌ-[yLĪCY-@|7n)Ч /ևKqGVuC?ه=pт/΢51\^j}EV@[ʖdi.Bbem.h1p>&wxx=?e?qMKkOS!)Ȏ\-,zsg[V' -1ZLx"0"\I`-g:yvX$aE "j>H (mhG}= #@zc. baS+PPU0_ePPSExW[ta *pA`d+f9٤+qxf47x$gl`{ƌq,hik7_d=ŷ5,q]Jʹv9Leo6.l :bt ce׃xX_և|o|@t~C;iHm.T12k3fi6Ҭ$XB2\GP:8Gؿc8)A5!~S\_}F +ZΘqϘơ3q3f<~ִY]87kJ|?i%%kadQ&\ l9 .,){FF2IтQR@\Sh%UV6[t(prfl#34@H{ ;)R98]0ZgcZEƍԍWhE$B7~ΥG7CDL'S൉k4GKjQG>H}Ydx rόVbw5>sD}HFF  4> 4h!m+tkbh)P7'9Y^ N: PD tݧ xhh'#]8BBF gl_:|k_ؗQтֈFhgъ'.EOkF\0[X%l }k|GߴΑ*NR3ɷ{>Q>r#W;RZ?1y~b'( Ϻyyg=pJY F]L!FgBq9z DDze/ /43 AbplΝ!l>sW,"ύ>b_t͜J!KAI q|7&h|$ ߘD%MC3VaRBwaեwHZsA޿c&C!.鮘әt?a9S6!V #S3NqXjz61oQ޽eN-?v}Q3*-|xzg-FJQC4oRDB$FZع4[> '֫.lݗ rGvCe{_,MFY&boQ\R"i-kfs0h؛Ѣ^p aºQ`K"6;(L-2/w猗w֧;q〬!~^_~ۛ<~ō)bAsIύH|V@w~szy }߹"[c%aa5;#l]q9fH-#G`6DaWhk\# LCHݠCvjS̖ؔ\ Cj;|FLF)D(A_iZntĚ"ށ-:"Bat4ӻ4QVe"Cdh)D qG !Zk`+> vPStE0Z@۵+o6pMs6,'[+(~gȮtW-#Yb}(,8iq[3Z X_1zah v#*mV'Xs8Is8~-k \'$>_?fFn_A49V(sjX#bG@#Gd+2 >8#x} JNy?bNbXS_vAsd%>c3Ld2#:u#:q:LO.O(.{i~#}sH𬇫'ax]gmfʄ{[$W-h#3:۳h|{y$Q˚EC c_aTp![Y[>&|Oȁ$D}J|= c Or ]s ekH6-ێ҆Ib@a1p'l'/Oze~r0 +y9[bcdbjOW}seƊux2g/Zx_e.n\c:$O{Gh:<{KhKXViR9 GZ:x[![9_J\ hr0e1ľ6`;KtF _po F"JUR-I(i`6SClG =tc>*4&0Q^scBRFYJ>cGe !>LDcE$l-4\k@Jh`ٯREk8%Y@FPb+]9Hݷ51[It>?2hl\}ϰGىIƶَƶ^5]͍|1u } ;Tg>jdL373Oo@z'!z*Ms;PV7!Bxwypl:o\y''y O܁J'X˰JZMIo[tbpWBnJ2P+9 n<~n*ܦd1K{2Nf[u6f7K"aއ}EVꁕ]Xq\*\8Ϲpěy0'~Fч l[ԪZyG8KZzas3>k^1[^EVԄ£IܼQk82 Jy;+k2bj{4:g۽k%ܓha.1Z8{gwlkw]7Z4$r Pgfs/I&&F gf #6J%z'bl|^1Zk/mVՇ\Gxj=Ѯƿ`c)x[ݞlQ y-㰙y-RKݥzȓZ a]i!C?2óKOE/=̽SKO["Kr1-@͆q%x1p͍z\z\ߘ Yg]&FPxfyv cwZ1Uy uv+TzV ʱ%M%q-g`!JbPzVmXk5"n@[;ȴXH{d}*òN!2 F.5B@15՛}VoV30esG.d@+,֡qNt,Rh9paZH"[U@~}M=mW{V9@k!M3{C/k?I}c[&OSvy-WiL\%4UW} vUPꪑBVk8xTi8[O@ /%[sibREآh@Z-W!{oGjG< j?"gیE M ,+l0j۾f6"h0*Xox33@,2USq3}?3AK>5+VKy 5Tc0p6)J)Wp>GP<@n$vwǐF|39gPk':Ηy/",R'Yxǘ:w$eJpdZm9ݦdx/wOF.}?$>,ՕpGő~韵͜-OGv}hjViFW%$tTVfd{({J!蠒ܩHe5h]P}=pϠ@ 9bO]JH:]Mz 8Ml,D~w["xݐȔRFq$BXs160%$ wvP A/CB~v/ Io4~~?BDDqK!28' [ia^ CWZ/u]fz;%h[5"sYHcG𹈬3S#/.du< F廕l6`yA]\{o6j3;`ҘWfNn c0Z |/*9<`Ƚ2^y^߈zAVd /QՁ⼿Qjݣාȃ `VE$]CO]tsow.%TI93X$[ZȎ35W r9-s d5HK#F GQ%&y>r9!5eJҷ5wMRv{+Ǩ"%gNkq88}PYF*s=z8#8ꃚ#/ڬ,"CQsf sFWj:*U$&)bh=#S!4%Q*D.B.ms3ܕ}n.sC\oBFYQ0յ0Xv)tIl>XRo;j:(j:re[[=62~Df~V>W3 ֚>nG5\\%ަߪeH131gXgXxpCXEN“Tx]3?ӍhFL@Q⅘u1XyonuSHXPAηnd_(Qdy]+lYþzp}}`,/#/Ә5]}j!#DC(trWqbp 8X8cyXC6bpBd%!~yE\Ѣ x+Uy-Cy=u G5?llI,ۇo!F qǭ:H6f ΠIX(Ku2jj'zL ZǴ`XdNwc.xٝC/b$TF 11m]EzS0$\̳N&daKhA&$S}} ad;8\P&F P-yKmgßg-~(p]*ȓqKAM]-2>;8mA:1<HrbZBc]HKhv ~v-\"]$3c}η>>>h(fpo*xpi_lH!D- WJj 'AFmG[r7z3)-av FNJK(|v25"Xs`@l.J6cpN-? 茕9%7hvOhsD(HK3xD{lQ܌M-n#!p곱/q^ `\CV^BG{ ʖJlrư /OwRpuJ4[ݟ6GW`QnwuZwl@Z$mTHU-* i+ ĕ8cu_#~:D T$I Yr$Qd.0I L_ic}1յ y WEqC Yj,]F?n27 vp=Z9( }`Xh= (8G#qA/`—w!e447i)jHħ:ng.\2S [jEͶ;·yaX!Eg6,^ =x8l%/՛#1!Hc+Jp^)-:N2psXKO Xf43 xo˂+ *m7y^87/ $73pz?#!) ET&F џUYxඐ%7@ڗ$BA|CC 竦ݒ.I*;GH)Jr}UWT:MAᅯJ'ˡ w0TJM.s js2z k F ׀m c:h0qe&`aъ+p%A.D 5}Oe+>qte:8t"c7_}j>ф&:xFVSkXP qVE;E銳rٿ(>/vD>ȽHP;,o7ȘC"\24Yf,R>/P1SpoeO:AĘeIZya ':C7 /1(;bG!S(d#[5e-.3%: Z7'ܒQ]!( 3{73*{Hg]C3w >)gx[ Eo Mp( P4Q )2MG7Ky UΩkod9Yym̂C\"w/ISj'k@;{K} ]; W5w=F .!t}0wsU~o݊T Fufdܷ: oo,;n#- 6Y7'+/,kҤ 亪~WSw6q=rT2"2L)^/EMТ<L/nǍ-+xs߇>Ŝ;C%r{WGM!dbw~# M/#j'в}v{1e( 􀚿`9;.ޠƬi|u4ZͦEᰀF.EedDJ6F;~Y˝zlǞi!ii Bbh_l'w?bҠ3*݈cTQ Z_wL8Ẑ-O?{ҥ[18mF r0m>\!g|.O;u [vgvʑ jM g{6?ۓ\=g-ڀ>. w gw&5z*'BoE F>yҲt"UGdKn"id"{Ԟ9y 5y );zN:2-wB|A7f_GX;oW=N:yП%%EzmBuPNxDUƋL_&5=pБNQm8]e?,X_ }gqQZ$mm!3·EC.{$fyQ&SPzeX{7 &; 7?3LAneB.).f3z-4N&YO*G_W ͤ894/~o|~Ycc.oBͯ΄Q'694@tBE).YqfF7II /Bͱ?J;>ɏ8# $? }z y+Vz@7 5\{ْEpp:<6<P3d\a yp`"WLo睕iL+'u'?}n|>R/L@+95O;_([*O}}mFXr z{e7}T&/J1(_ FC[Px~f>F_BT63%^JV}=*Gזe{겋;5VQdȧqQo.qƛ{rhagb.(ܟsFa.){PZI@mr"6|wӽŻ <7; Md~r@8 'C%yݦA6 #+a.Nw'hˆ]#_f&=kG1l?jLd´܄ )|uKe;yN&΅ME[Qs 5=(:i$\Y Oi`V9vaT='?c_kP<#[e%|O7FvC[zҏ湙&Qpnr"-;HUn\we! Xt̅zVFF crk43rѨ'.mzWKW;#JW1`-[痀}O(E eij7p7C[0,\ݻ ሺV7Zυ{S[, ?̨m@p-8t6רog zjEE  &*s*ݳZ|j{TJn`{4H`G]J$Kf. %7?bCPG`^ 6/ "-,1YgUw1zHmQQXI΢Da} A|@0yo\资il汫]͟iBsC 芥M=yC#?5W&#\H<.&R'BJ\_\Z3R 6ı=AOܢ[0u9洛-8Dr`ԓB /מj}-#N 󾌠qeEg_;- ^ᢪ`I(=< Z,Y~:l~ 8 F(VG{FIei)Bw_V\B KP#c.^nCˏ!(cOg)ֶ"8%WdDI%ȪC\ҍzt,{#q ~@9"+e0?8 ki1/ Otc6ĉr5'55hU}Ȣ+Fl=)tu} -j?$% On*0axMt K<o??\MdQ|_?ћ?}w?fO?/|!'X?y>_Ф_?}G?k%q=T_٠}Ee endstream endobj 449 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 438 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-052.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 450 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 451 0 R/F2 452 0 R/F3 453 0 R>> /ExtGState << >>/ColorSpace << /sRGB 454 0 R >>>> /Length 1198 /Filter /FlateDecode >> stream xYn]7W⣌$ptv AgW2 v)K:{Hgggy\)7k.WxQ=Zm~0\N]g).QY)xsoh[ZOw?ܟmoo*ofiQq7ˏrY+n'A=Yѕ]fOoOWGNr/V[;}сubmº/ͤ@&/oë.}5h}(>FZ,f F,潊^ > ]::'W!ڸ 3 q{mR1t.V8@8H2ZB5CSC贷*smkL[ ::)ccS2r4(7usp+NAgY~./%" _p XgC}e=W\&9]j#FO@'T|H5|Ȃk`rтJL^ :qs:E 4q3]J$/(k2a[TX&@^DJ,|X1]*e0Q Am dfu[ioiiwfb2{TZ6Y^FG0¼ey2E*͉5)O#RN59s/Ka,KjP~e'0ȱ!pt=nNH8 OTDON`g$LyN6CM9} pmw7bptb/T$l-  k.:5z駮e!{t ]Ƶ~st7$u3)!Cꘔq>`LhpP)L=wLag7(@`c &<2ws&hAX%VG}<|oХTN95 t s7nhMHk}*Au2PZx@E3-Qh &tqiD"'k ejT4(hjZ@Bw@v_[%}r7V5myˤ ֔j; gQc=g{"<_~<{ܧ{/׭ނ)ЀGgjH<>il-{X}rgOR_ endstream endobj 456 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 461 0 obj << /Length 995 /Filter /FlateDecode >> stream xڭVK6WЃ D\)hm 5pOm-o ؒkI;&B" 2 ʜ>9'\(iJX5͏=PiQ{w.]O?W$bfuLIeTf?^f]g~Yف|io~{J!m]k<rh`؃@$ Ƽz]8%hFTadjwЖ&P4@sΫfTΨuf"7Q(oDE-+=KE Z:=Ztohw؆iG N!vïUbfm~!1+4b]XGCC?׵.DhZ}@0k?qdz)U D}PF8jF-77=K>YჀ/e|ʡFSJE>UG^yNU O`/Sx¥ڼ Ζ?5˜1-hp S!;4\e=|^[ <ʏ fTC#۔ਦAķ՛pNw\PWkzDު)~3c(ӹؤ <0}3s0yqR_L{^ endstream endobj 439 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-053.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 463 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 464 0 R/F2 465 0 R/F3 466 0 R>> /ExtGState << >>/ColorSpace << /sRGB 467 0 R >>>> /Length 11523 /Filter /FlateDecode >> stream xK-qW1` `2/@>,҆azڷ8;]rwUZۗ/^SziFӯ<^Gyӿ?|ۗ͟_6/]C#~WKb_~^~O߽??/ZK.՗U_sһ?zڛg//?_no_U9o=$~G_?/8/1/_/k_?W=cw??eSwJ/?饽v[k>? 4njZuzUx3+xY_8u wy-5y{ş.5.`쳱YkzdQG~-`hd59[[uzp2]_L`~>k)culr齏u#kA<36>lzLwXh:E mpxv9d]G+O_kaM<c73M߂+}{hO9х/)B-^؍Ϻ&}>p+:N; z?w lN;ǼF'A>{egZ :+D2/yE9< n|XQ t Kp d{̼96E{6~ {mA㻟bB{'^*aZu`<ɖ9GncNdu &9.A=}TiL Yu8^a G@%uFGj>ύNF_nA'mg:kDWW'PyQʷI n6C|hBtMr\Ys[v0p{˽{\FgqK{Yd]( ]s*w'E-' )QpҜ 1NjSǜG5vIQv!Kt,?<to:$(} nOBH6lsByi;R"]81'ٓM~qHOF.` 0uZ}8 C9^oݎ*9Άm2z`}ч N$Sk1:2ass~D.ę8byټjQ׉wq+B|n;C;qzaVg5ݻ|x16}]_ëT\)"=dͧ.(\q~x(I L+ی,(<VOlfڹPA#O o&|8Cq&ҩ&#K+ Pr5Cn҇0D]=!| _d303[ *̈f-㽉.8{F[hthAI;(9g#srW!Qv1PSjtfb3ţ:SV]V]R62myQ;/D/U': uiGN05yV*@Sw\hev#:n% Nժ[ _vī}]<0vp%x\ȫWl3Rm!pDŚ!Ȏ 0Cf!W,1@Kj4CȕqZ6 (-XT}ABN_<_9v>] 7p9̘fqf?ΓcEn!`WfQ{g+9YZZ) 'Y3]NnXYq ͵ NHё[. !1'^b<~ǘTm zfHtktd^\`En)-{c;ER)E+ ;Q Vw6z$<pp ٝQ(t{*xd.0 [lgb+Cϋb+Cne~62Tj~VٹJ0 ?iǸjC*.;FiVHM&]OHpx?;afFޏ*s>* B cVgh]^QP&1uBṠvF'd8+=mZ6E'a<<gj]y-cgw[>yYQZ"faO[f/\L>Eyͧ:OS:O߻igTT]=9*F5v%ޗSh}n eP.-"DOW&STn.{"EПvH[nk^ep̉՚{t|v5]洔 VniJgmeW&vw[+=90XD E5JrEwXtiAބxL@pfy\KgPF훖% [r>Zj>n[rh݂2p7ЊN@>o] V}:&J3T:/ !. h_mZdt+HA9 P"`X͡+3>%)`#ݖq-A&{O&0cQU G]YZRˑתWZr:~ފώ<ի*2rֲGG}do4 ^ ~%h=mvцGgөs~h𻌎 <~SMMOgwOM=5^=1nC=5o7V9}?P3+׫Ɛr_6b(nv4R ؛U= $]+sd6dy%GaR1I˙M݁"yx7~^6([qA3ݣRoәTXs|'7l‰foz+>3D{+B[ɜѯ^^x~0ns vɘJZN0]> D+e/戹{TDA2jt],n y=|f:1n3rj%ab:=Bm @(8QˮJE'7] _6Wa*jݣ%o,TSߩ1ڰso3Y[&$P: ]EVM,9Tk(W9+)Xjq*Y"―QbgN;Vn ,iǩgPh ']VmO-лZpqpjWlnv'wkw?\"ȁB`.B:"P; R3G'X A-jW"کiVw%\8E`+wOAc(2e^0LU-reĮYNLmi]T冠#%Vß^Lω{sJlyŧw7%GE řIފƚA$~u?* ^s*o& c/~5J^G틚>VWٛo3`ɲ 0BS hG5W}+LGlF%@xwy㎣jWpՅ`+f_hV8E{P吴پT| $8$ g ZI %:;0>0%|/7ho+.o7,5YO"0~a*;7~%|4RE )3n]ɕ\9F` 7EftLӂH(.0J(T؈}%7T@SCPC 2Y$u_si!bT"2LW{5(Zܮ&\Ycu޵ȤсyȚT%_RA/MWz˚ ;ӇJ CHObTXb-c#DˬL~Kr ]'1ЪNg.ʚeB>UqraAd=t"{LMuߚs$MQ)gP&CHc"b'̓4mM=9%Z;EJ5$#yu!Bz'4!4Yiѓ7}*HPNw8һ${Βrz=BK~wwtz۽pINZ@"|ttOGOHJHr+ hGmV樝(q SN*LFB]͗[dNW .;uݦ?Ρ;^77JDm2#THOVz@$:bƎK-4zI)AF6kDZ%Z2kZ|NxtJd !ɟ3Yot\8/R4]v\aMJ{+ߢ巢\ԋ+ S5u8HfW[wVh #z-cBux S6A'm Taw$ oݳCR]30T4p˜|cK9v}N59@:$^tZSWd~wC[*G2]38.9j{UXgRl!:K\K&єSlƂbAx"Ѫ #2ڰF3+a-(JXPn 5,|&&zLb;Ř|K#v1EF/ʕ F.fL2E|W3t#W:SYբGcƢ ݣZ=VG"si,'5ʔ$wa+Ls5+;Xr|Eߐ8%D" 4ኚFhk-Ѥ "L iVԝ ou Yic÷u$-AZ-&s\bz%VciN ]%n׶Ug *.U' *_TTxz $كtCSs u 60& d(FqWASZuޝK*Y!9=vʢdaWg"c|nMWs%b5;jkha7:D7TCtVs=EE8B˦E1Wa3}!Y 5U{ntڴ,2AhB'h}3ٜ$<0)i˸#29J;R`dg=PSkCQ̓>}w G2њ74qis:j]ra@ۗlK0ɼa&x+ ^uTIGG_؈2G`*Cr+C|PW,Z"VvvLg7&`Z~g)?.<iرLUҚBA == 5ՙ BjHUCY*ae+z1kbkϏ}Q[3:.CDo@^ 4ZLik|uսQ& xC@R2 xݿQv;[Ku#$r ɒ@%MG$\q=.sOdN4M(QA ?|òa+]IRcJUDDCVcW&$|QnFʻF8S2$RvW=U#ͷ!r ꬫ믗{=r{}?b+#{,l{wuwXՈ_('kClQ%i붭X&!7Ycg&Xp/fkVYW2u1% TLc֜2w)M[W|[&ٺc-,Pxr$3ԒzhȚyYnB1{]Bp.cgdπbq*@3 5eKZckZ<}Cnq:7)2@fnXPTt=)~ ԝXB\h3w!>nVi')>R&0ȿKO>n;4prK,M?%r?jY1=^"G4tK4U/?Hs0D];ɣQUClHF9xwqM,u[K88V25BQrg k "E>jk.,(`yRPBivTrL%JS R50R6 QPVA-TϜJhLqP"=F'q=t lwh 4{N/2WؒmaS8&꯻xĈj!z916>{ү҃ O@n $~3=yg҃ZWO݆,DU9̪TmN|HE&F?֎ a:BJqbktP9k 0l Ӓ?/:$!oMĝUX:_/a'F֗.;Mꪨj==wd}ɰߡ^*~8nyI&PPS-PdWG_Q2ou 1qǭgS'aӪs4G: ?S.|3IlG3D]љ}!ދ6ö%*2Jt9sIN |vHIJNzJ1:霪5Վs1O91j-YI 2vg)g^j'V >?hؽ)[R&~]fp3E-Qi ['3 Lmt?ۼt/ooɼ[E.ow?mJh&RzG8)t'OtITl-NJs}Ƚ!0ۛ d?/w qs GR“͹DD)oqIkNyV&6Lk!CCK >oм;BWo#͆?sY: 9O}UD|2zwA`UϺy6[5\I1C@;] =>:NjjT{ZYF aAlfY" V Rx=L|.w!)݀)g3FĐĠ'TI3*@W)yx3֬FNU)==R }>&#Fɨm Ꝿ#QU(&!:LjNEHXuA%`ug9z2ONܟ=L&)};MuLPb. ͮ%r4 b T͞xg w>xKѰ§ 5smOIr=%ǚBV֛_*3J6SIθf,- 7C-T3-Q^{i婜FETJvt|Ǵ7QNWȬ%Je9}dRL\7stNj̃H JFLJET-pj3'mF+>V%b~Ͻxgy)Y0xoy+S\-\qۡ;~!9$좭9[^$w&ϳfOS4|R8N"HjV jQqKƶ+"JUs|ey"k^j-5Ri] c7 Tv;~6i0ԌN-Deq{.5jm S̑Rm(BNد:=e 0-ON2CwpO d-tyH3:C m(D=زnlaֆKVc )Y g;HK-. ڑ?e 낪&Q+g*ky[aV ×K endstream endobj 328 0 obj << /Type /ObjStm /N 100 /First 893 /Length 1362 /Filter /FlateDecode >> stream xZKoFW1>}FnI |V] oDV*qЊbڙo?t֓&g%K$ L0PsF1(gNW5m96 8ɞ {s 1#099j*Y_G֊N"d!&FK|#32;.QШ1LZl4ATq8KۙB2B퀈s8L  0* މN"XI*bSA)!'Ϻb`,Fb!:0XQ Dz=/3;ي1yYQNc 8<.}0ڊod)ZE()R)vRSnJ\_˘ԓzQDԏemSWYMհ^5?K7Od7驘LdX-n/٢>k//˼̚7(ngj\v}jӲXj^+n}QM;oח^tRu̻n2դEEPdzK5GlՔI;֫oz^uWݬZ-nꦜP h0uV Bp?b 2կ/~/ͭkTSX`P|v*:cW/]zq*{:/RS$i.?G,n9 ~l'{z#A8sDфׁs:/uv/[P Tc[۶Ye쏆MM˨;CNn}n:no.ڋ_|//'6YNw~חCZN"nD!*;5V*Tz| :#nk;)__xzǝ-+~Srމ_רG~ݙ_㞎qq0_c)ci+e8n%,%[gyMo'V8D}~D>d)oPWi'bIB:b$!yf|xx5ySښO0Gn|7fuv;q nv<`;w۞66  ~V`#zk.yX)ƩUIq5{c=gccw~1yÇ+ʖ/v;AٷKHٛ)쉲VC;VY endstream endobj 469 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 457 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-054.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 470 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 471 0 R/F3 472 0 R>> /ExtGState << >>/ColorSpace << /sRGB 473 0 R >>>> /Length 1052 /Filter /FlateDecode >> stream xYnG+:$RW*6@d.@P$JAI0]5K 87uWt8ݧyӒ)^^>}[ӗFݛtbNƃZK*t_~9Ag_?\=~N\Տ$9]}~w1nq-kB=_?ҷiϻ ܾJ|r[8«R/bT~]*wm>o-K֨CAWH%ΰ*_<L(bR]BQu' ܄p)^ <"pR23CSnb eKSb w&IjfGHĕ*m6z7d|4\LIP^.!˰RYq<r3Cq?l~(?XY iSF$Cc´t4psSH;pem՜i3{pX; 7R\sǒnf (}pXvW׸W{uwnfWRnq ZZG˥[0cN>|hϳK첖Ay zWˉs"7c}F!RFeG p/ýwT>/G ~ʗoܥ#hu^Gd Gmw)hT-5gAFǼvb;ը#ug nB ۖs^n(\Yhmgxd-uךyY" Ф/N#~> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 481 0 obj << /Length 900 /Filter /FlateDecode >> stream xڝUo0_!x $1`h!kӭ"mFӉyK Tys|?|2𓑱idet9>Z[.їvi= Vcң< Όe4wz.d]u,Y 5U=}v_OΏ\'*sm)ةIlհ7"hD*`%)0!d6=W+ϖ*CaeYcկr` Kr :xnU ( ؏F~qdDžl/oL!RFQlZk,BÖ@(hCj6sӋ+z%8 _ #S a`+v`BT-'Fi @1t\r6;+tcS x׏y&t󾨽6.ƲQlfAil׼iܷq( 4PqVk;E6 ֢CU\Ȍ#SnCy; 5gF2 [RD^D1 j/P[ s_ }J8h^:#[XΆΧbM/@n^L %n;%zCc QKNhN71T)ybXO2,> /ExtGState << >>/ColorSpace << /sRGB 486 0 R >>>> /Length 1844 /Filter /FlateDecode >> stream x[nTGW{t=%Q)x, d P>!VBg|8ugI'<9$>J./_Ok$/"e]:K<9._ui{BǛWNG穕tvJ7WIj:|2]e:>`m^a5^,nɧɋ?⯽χ/SII J8ltC[.K/o^X"Z&7./u,4Y4z ſ7Z1 ;#. V۲w(IݭѴ[-ng,FKw5܆[R2Usͳ1vG$MᦔWUmv+bf_[ ˅Y#[f 0ڦ :ǎ{(c)m~i[*;T?)MWBQfQ1 vevw#CF֥QL;b<|8čUIԫ))kIf9Í<9աAŊ[ܽҌWTճCd!TgV@ceAht .<*.V% <ԙ6xo0nKdø#D+aJY|ޝ  q1 3,Zػ*xph$vCO%)=Z(7="Nj/LȄ͘h YRkB;hP&bgNn[Yvf&0qZEjyJ96Ȑ"aO{~GtB1( e{|-Hk{7__ON~9]}_2O endstream endobj 488 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 477 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-056.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 489 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 490 0 R/F3 491 0 R>> /ExtGState << >>/ColorSpace << /sRGB 492 0 R >>>> /Length 7203 /Filter /FlateDecode >> stream xKeq+\``2B0! wF9 E#-Nͩs*+232>~/}㯏6۳GK?)z/o7?/o_Y\1Z}=?|9}3ƳO%~bshs(SO}Eݿ;Gy[}>[_Oomg~6~=zO?~uzgysI/Wz+~< GxM>c{wg=dyߡT2i}|{EE糓mVR,e<"p]7Gdq1h'@O?/tx/9bĹmPQ#nsL,q`c'zz^+2pI'mst<2]lwi\({AOd*IEˏV`3`_>.v;4|mVj;A:q|:p}Qm. ֓KJOAMK> YVGOO];7Ab+JЬei'u=.)ٺëuؗח{m|q&&d#¢k@Lkzn .!o"FBw ]6đ05ɗt:qaet ]Wq=|<=иᾆ}y׵܁825A||=fQw*;}۬B6m2' wtѢGqrB  $xmtp A_?ΐUxi., _zedP`abk2Wec8;he6 13V9 :̌E@br ~)[FWim b8e_b@TJL>Ll߆~zgWhl]6l'O 5d*j q?(ȼJakᄬGIXb"5ח_dٴ| ߰dbOl>]2c[&>AXX'}'c!Gfen"@ut0p|X@qeGkӖp]lu5waWp ye^XAYy3JU|cu2yljFN a}~anQ 48qY׎ xO*ȓAC Ҍ4cׯo99?|*E?|"ן_3U<0ӫGg֪\f? YdQHWOw8ƪLduöZiEGmy83 <ಚ}&Q1ՅK,XI9`'ӏ7qBy'yP"y#mK4!$^;2\$52*!fy8&J'%Hh,P|cV 5*6F{iwj 9t]S Um(1£qd'^+,$C2z{I4~,1SU;BZ.X#.n'GP N]~%OH5['3d C. uOUk$K89GhcK _H-q큋J=c8zvJ5Mެ^hgĞ'ę5eHڸ;kC+YH0+\,24L2(/>)I9C}g4H1WCY\kn0y.Yxd7"qd]s&q%E;.zgȦ :ޔ wJģo ͰKB=MUS):5B)0nZ$$L+T1׻*QVnB~!#x=QB@$|573>Jo*cOFoa`kqYbٶf忂RLC*:Yb]LaXFe~nc˄;~U T/eZPdb$C@xǧS%*:oӑFv.u1Qn`5=P6 iW#I*蹶eL|x#Dst@$Sn\@>6t2CW?3*LnuMƎG$\I80I*>eR,>CS}) Ĥc3 jgc|c7&ǁp@r cȂnHIM  [ħv(4'W󦭺NVU)(m^dmMqCݞ|a>]trKHU%]!uPTY%$XH7+lr#t,Q f۾X6wE}RUĢ|,ݰ8X冿a{YRWˡ@1P+@!OAX][u˩W&;xfT.'^";%E!ZY_0p ɢ+>%o}AmX`j8|ʒ{gǞȾ[p=&WSꘃEa̅mUSM]=3c]^ǁ=V@J .'ǍJ&nQQӡZ8.fbd-;5m 3"k}ʋJUhdYQd$uT۶Qg);(АVNGN/`1/xƜ}13,[\ C-dü1E${#ȡ WcTPKFkK u}0Ó(W"Ϭǁ.ib*HC-𷜮8(1NA!{ӮYxIz ZjqY[Bu۴V*$8g@ŸcY*LQP|ޛ5V$Q-23kn;cϪkUX-CV`PjΨ aڥjB -1 ǽ&ےúX PaMX$=.S(W('8ޒ{NX6Sh˪ywC>~tadWf|+k1d>02$'wNۂeY ՝vS߷q( Rn^IϟB~" ,~ wIj׏ ??Zdu[8|dz-fb>3* ;s^,1E4;N1Qq㝡6t!BMޚ,Lj~s{*Ї(q4t؛+D+@`Pդ.[=ȻZ: HV &IRd9?pًI+8&HxVHb$I}׊^I.'kG2ꗝR*HR.JRrY.lwBb|!-LU7Tx+xֶí\CJYL*YOY R\)ݥ:>; D现rb!}vqfpF(YS4ުoJF[yE; EO(B[Hml+ؐlY'hw@FKa]D8lu`7Bҍ|,+,T~ +""/W^TW;kgV$V|mdLNh2(2=ڻ3' vT }W 0 X]u%[f1M,JNd_eB FVv̺/_Ri80sPZ٭ E˭|yX<:|.`LIGj fZܼ[E\!.bk`wCHvTO[nrXgg=JU}Zb9Cbj!]n0^0Z9M&"|I0<)ڽ-J䣇e]LOْZL+.db܋F}T kq'py-SdXo.̀]*n'=&}w+h0MBf(iR$Mb?PW gצܺj\ K#CcAt}E?ixɥV/D%$Sgqr-$#Մ<+ ڋ^e*{ݳd}lP(&*TH5jŏ5>W69v+N ! iRK*RlDFMEduf Ŕq)pJ vz pn}qBb>m䬯İޣ/MZ"u,oѫnٓ IEV_ :94ǚzɞ,O>G{g7E'Gvb(*nS;3V9T¯T(H}$6tY# BsF4;)9$ʉkV]u3ejĺ`^NPgw9{j}۵eϖ.,Zvxw&һ qbe$ɑeC,lubȨ,ȎIq )LMl:d{Eo6A:r!tԙ6vMƊE^]@/O桒I5V v-tD5?Fv[V,'33[Va^nYń~ĜֽLz&;a$E) \'uxt08TE<I5iݘ&YFdA^Sua*)G YԤڨ6i9!ܝn'k=$7S+EB =sصSCn1WNZx,#\K` qn 7UW-{ȽrwG@hB7SFE86k)oE^8É$𽙌s5ۅ)u~BqG6/Uk^#w8Lw n7GZvR0ppҤiHJITIkֺ\|T0y@Hݎj5.oKUD-RjMGf[^,eaQ'b)aDeܚm ȩ 6DY[)jRZFD1MYtYxIvJ(wǴT#M\ ԬDP,DAI:t&iٿ>$Ymad9j3~'Vg;{9X]'`&ܨ@!`-&UsHgP8;v^EiTl?DK,rӝA6-|^LTC[!z:,mkf*4fTbaq\Ң1le&eEvXg johŗUdZ}4Y2Hs ؔD`L/!4$%|u^BQHdag f#WɵYFLs,mX>Y&IGIξ0 xMs묢p@e(AJ$Wvet T0(HTrm;@-;}[\_;IJV"mI.[oݚ:rHPOvQbanOul=J0lӓ}쀁&:*)"]?twP8"JBNa.{Ltc\vX fg|ѺQ6=Q܍*]YtB*#U_Ny[J(_-{. CB%{BcfHA*`ݩ9YA13N>vA3qt;-?z Έq쀍25+V/R@;÷Lu%?l Ԃ)kyQdd"8|. endstream endobj 494 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 499 0 obj << /Length 523 /Filter /FlateDecode >> stream xڭM@ L擏JU/Um6,)@VC2{y\:L yh׮sDIXk]}V?b KZo*tT/uM*Ee݋! *1~(]&['MC9J'^+m8áqbscp/q @ݼ@B? zXZlcӁXڥp=ead>jVF*OGx9'&`-wa~K촣Yj[yn᫻r|T&3ҧ%՛Y ߖqVN/ 9r5PSRiwC%W<܊s ng+0J"ĺlсW;nC%Ejq_C =aEG.9+6!qrư+(N.-ᲩOoDUwSÜ1~`83~ԋo6`?Tk)%%$Q,LM_?q13L endstream endobj 478 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-057.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 500 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F2 501 0 R/F3 502 0 R>> /ExtGState << >>/ColorSpace << /sRGB 503 0 R >>>> /Length 26837 /Filter /FlateDecode >> stream x-I7ϧ8CrcJB"@@n"!͆z{>lque?~u:篭_OKk?Z>x/Gѿ:|]G/Fh{Z|׽=$opp~m~vW}|Fo*׎xCok>o)rӶg7<Ћ}+P M 2~yU֯ n5|Tj_> ~u M9^^gׂ4' ߳$?۶]q,D@mkYeZ۶_d_~'^YU|ͅS' |Y*<3gF9L_޼mEh@ҮJ _}=yl0WAJ>=7Svfc|J&>]23oJ3Bjo{p[;?47}|6cmoFhkPuqc=MVTgW1\/~RY*t\=3Y&W0B#t@ov|lW>TE?. `=>G;^F\x\ 443`v ,1yElWY+{Avf>KV{͘jMcǶ cDxv;O$3`1^[Ӥљ-P{|`{vg cxn0=m|" wCS]N$}V/~vh_{jK~LG;XgSlزbN>r=#~gcw.:y}@@ǁ{| 3ԁ^7=swxf׫7OZeM.LX:?g~kY gV'{v~Z-{ҡx7GC`I,<~+\ |N.;m456P+}4UmlK<i!zIя6 s 3ے9YP-lo㜂_+0 ^6/{5zشW۾ zv巕] jRYzQiCCuf _ӹ Lj:<*>O8Mϕ({:HT/X/ܩ rY'&ijt&Ɲ(V֥Ah _vi71W!3hG']'\~O?K|f}aa]WZd[xY1 =ϱ߻*'Lr{AbĐr[=|xjl'̬ٚ&5+xxC>m3~g&(6޲靚il5yO=ֶ}OP/` y`>3F(i瓁igjBϞ[J p&Nvщ$ppv<㶏66``p sWǾś|Bںwӂ_vzSQ`u|l ЖʥU\ge(~ 6[3R! P&1/$iOplrplܩ?k%kejn\ ?Wҹb rk+@[ԔAk }@=g *FDl ӂTl6y&x'AE@TRbW IJ<ߓ_6 ( 2to9Nt_ŎxCzZ=9&"PfB0!y0 < HRX+ldaJ\S؜02 2+fZv9kD?lxvM`d6d:$#ك#뭛Qd{rVJ3WV{č[:\n&ݩ/޲^3Q*@%`Zy ^b-um6\r˗#N -v|RR(˜%hjqBaIJI,NqOh~'$)&=j& x:v'$;=-vX/NuKB\Iꨀx0kxH3|6g"g  h$UV#DvOUT! <,3 ?C@8m'sZ1Qq{ ؑ|iJ&f$e@HYڪO0- :_6hdښE SkekxF4} q#e:9X}:5`:Np}Ԫ62ֿ( v Cp M;:PHs'r}y ۉUZx`f=XiIdA75_u^ W{:՘2U6TL :覉: WΕJ@Jp_L ? 0M4/RKfr\d'LO횐 i/Ge}+EVF !Uh9Kw!gfpNu> ـBʋ7.*Û1dIJH&3A_Ū4qѷ`Ql,Չl|m:n5iO94 AS0}gYRfYbTp? ].NWdO[LNGvMZe'&ЄuL[*6DU4d2 +=K¥xK,㊛7ZM4֓VkI_E󞥐!:B7+q0 um4= u!| (OJsfZt Gr` [f_E*~.}c"' Y\!)Czof"c옓΄srG7냂)Fp8^ehƅSJq4X*m-^=J<{й2Ǡ sg +e` Hg055)4Nq+|R9W(R8:$9FiR[\;ӌ_?pJ'Zâ_jff0h #Ѭk2VA~v}+x,ItSq_ ERxc~^ﺒ^tֹO|K!-̞abNG20S=HpX5c[RV[3;6'0[X TU-\1>e@>dй24 q`)`N=)AK-^&ݾ@Ceۻ XƂiND}(dp9 )2C2Yǧ)/ GkP._Aϋ )m2![ijr4Rj>nf xjq{j  "CD+i9߳pCd)lt\Vdh 8j)` ;ԡjMFbR-5ۇ(྿ &GJ'FF[3.rGNpWFL`Sz&|o j$ f౎.3{] H%~3Ɋ28C%Ox'%х[ RtWY=쌻=ߟ7As=MPOI7ȒŲ ּ7<ж,xXlPO}U`}]SbWX}Ͼ"\>X\ǙqI:&+4`g:O$Rʎi"crv",qR}GjcELpa#W~vn Ag)D{4Zۓ/7ACQn.CF&Oȍ)^?tta Lsb50izD{ f|s9xd_d7< t,Cѩ׼DHKAv*.FE]]MѢ'L%XG+ ]J^dB@p:4% Đx5.s?k[Qa SP(PTg:C&,L*ڀ+2`<傎EOU-s`΢) zܕ rj#TO$$]/}իL d,j\c! VyooCs~sY{adp澷l%5O$dp0rb8Vذkr'i+aRʦi6)0 c55}(;ek0Sbb[FN;5f*XTkLZE6ѼMD„jZ(%MXc✼>=>1x:%E%X-В  b{rx˒j\);&X>k{6FKt" rXQ${{YwvjqIP~3j,Q`/.cƭK@.7<_nZG ˹Jb{rKHދqjx F2YXXlP\l[ V<5ro4@濴m]w*_!+PNT01XszaxLTȯX'@vKnS17q P.Ҥ⺭@--alZ[JZAu_p6 m T$u 8wd G QyVq ]u[iS0mQh c R8x'ҟC. PIVsIͶQ`cWԆͪǟ,;{Õk3Q{h;B^-zPV 3'sPIiPɔ)/k78*uSD}#QE+a3',}D^*[m&P0|Uӕ#DܷQ!~{N 3@hٙ Z{|wZF{g:kuJ;D '_,"`n6,38lC8fSXBP;T{tmpJŧf*=WN3Huh-C y齟 Vi)+q[mB,F><=&sZyh*VW_rb%w|dXD辨V~>a>R#Wn]WkJG/Pd\;jn5/[)/n߄gQ1z„3riRc#xn$0 g _ϙ+Ac 6izP<β 0ו TjpTr"psRn]S/tw{{0a+,jCF.+ǰ`;7GFԛ ~87vX=ԟU1j@(P\OPVtrR0kA/!L TzU.k*@ceRіEMo:}&5c33tosaPmpRј8^$I_2J:EH!ZEGˤεJ:a+úAmJ, <._HTYV`I쓑ok~QAEq+aw`E^hPEg^¨}y:{( JD`SL 7a- *iaW܉N[;`&yAA/KN{9x0ftoSR1D۷kYV:h5ɱ+˥%26`V6H˜:)-iJOCR}Bů+ڏ5>!8pCCےa@'&^Vvr̊6UM'b@ wK4l)EӶ\O8;/XtXT%P :iO{&VdĈLZ30{i?01d2W(T"FL$E)\_:ٮav hE c's L]ܤįqre TPe_:+bvaG:rN0F?]`ʹT)*+L˃\Y3)TC9]x(W03A쾳f:Mj!PWP/Da@&9t!k7Y>&y#*qiuLEظB;3Ƿ6>i_eq=5VvTGZ2L 0kUf3㾢0ȅStK-1È$5FHr<ܼR0l&#ۨ!(&C0*F'+UlԁyMELyp9#ao  E802(oݔ7%ʡ6֢ l[7PijZczDP؋C#gwGjIi+ic)bP \y@/II{miYњ>"WtIEM=_ՀԴ1*S2&H}w#HixH{*7v 2}|:@T0@# >HoB M[{^G=i+ece- +u` U -/~g`=*y2D-< 03:(驝\AM`Iiym:]h3֋{ lRB}ϒf)>{A;:jڶ$9|;kmKP 3փ$woћ{26"FFú/&P@ijizOI"ZvQ}@8g\CX Ev-Vɺq ҵB?Lݣ=R+P1Ls/bX"906+^2c'kVQmEh(ª+ejD}{2Ǵ)d<++tHsoPkvp#{ĪḢ:! =46aܳUU:c W^HKRgw%cMS-{n%1y;fL;iuf0ۢ^>xfrW&^)HϴEjhK '߰͜! NrLi!WRv!!@9w7Qq-3xō%3WX %U-7bZjaM/h[%!> *y U5-b62/E5>4f( D!F|eM" )3͢ (H"izMNXq?)dw`>9%PD 4$Ȼօy2ʞ6ZvK$JN28 {N"bҐ=,(bEV)[.WX,ue^n DRrIT`ЂZj&&L TYmu-E4ܒtfT'5i̪Fի88e,G͑lB;B-1LBXHoWB>.墴LR{ ~r}J|"RP0< !PMͩ =A.PAfa0xV5 ?v0ၜgWyW]H.-v֑iY>u+14mmoG 7Emf oOR߂R7v Z+Pf:˭&h;U {~Rvxυ̎@A"ՎY&PHjTXlP )f}:%jFcnIL=5$"xp8WtbZrA"7Z2Q8=O;0G_`OZ,QOfiz(SiLfB T.Z ۅ;(-M_K'^]+E|RF:Eʌ˦uk7ݻE5Gf&ʪّ+KJ56O!M󴌼l0k ( ػS0d`'y+W-%3Yr`W,^ 3Lרs#{K%Y7ҳoEvBU~ç_.mkk0K,՗Ө[G6=ة`itu kBVG_>V.ڴw/|Q+yzR$c'ޔv63NVu*6BA:B,P=|N˙M 4 U~%Ls.^6j}6phf7gEpPY ,s3i$<0MHpxȴ@1.Z'pUQ1l#@,$HޘNA| U jEeaLO~l(8#pNSU),$DfSQ@ vOM 0 RUi=I`҉NZB|AUYbhr!~2dZdn̤p >glkB fDl m-,%$PxDUx/NiJ+e /Qa$bO~Nπ4v" l_6f&&d6I+uVc:ͯѹtW-S`sЄa\F3M.x;- FK@y-mxY7n:V>S tT,H,S;Mhi:/[t:( Թ:del&uQ h޶_4-N:ð_I1RN]I3X.lVk*͚ܪ<`$"ʒ:klel%PQ_̽^x q)IߣS_dr5ߵ6T+[I]]B0g%ps@Q+q܏'g+N¯Rڷx?t۪`(t,$[8,w9`pfͿAqw7UYګ+ms0ʙVh`{!7 I3Q )D^ڟOa#HZp3|~XU—`4$w1.\9@V78=+W-OタԾ|!&bUvq(JP0ō]eeG S0Xz[W;a,*f3iA@ԩ,ĎJEl&bF8mj' iD`:I[s 8g/2#ÈVizHF`tTLxE ise5W&O҆ze ^H,مG5zˌ42Q~1#aiX )Yʤ#9Qq[>WI!x?vG| e+|ˊ\ UfV4d&Ynpql7p+ʫ` *R i7xϑ*R> Vίb97_UB>xȹq&0~$JǦu98OǶ'Vi䒪+U5h;ᗞIG a1 L؃s_ZEȓ#m$5.Slt)1.gF''Ir~c6bu~1H-O{eo=5)HFDa7P޸I[rҴY%=/ߞvB{BҢՋE%d cn7%bqO8`͜>΃"Lq OCA(M OjAM& fC']>/7֡C߳D.: 9h7d`6ugquܑ+J-kdoCJH7kŒٮȒyΥ$%o|%WBnt)0;rI4nngJ_sڿX?Ajw@yWw}g}F VG>`#`ɤ~;F&;/Pbm{N5n0}Tuՠ+ه t>:΃&e 6g+Vz%(pE}Fz3!8ɕ6X\&}<&|[7:,) s5ؿ~xO`_E~9%-*(|vAVQW^o`t3v81C>  h;<5n1i;j;x=;qƒὓHt7X[<=[Y#'Xq[qՍBf _ӤXLT-U6n7ZŻ 8țZ$l/,'G&H8^gULMdXө9ב"Nݰ3x۝7'ೋO t)5594xl}^ /< dgYxMm7ܤ/Li0_ @}q+`F$`u6c@Bz2\9 t$sj{|ڙ+Il;hpX؝Jʾg_٧}2bo'hS,*pG: $hߎ_T&eڵzxFZ @H8di~$e]Bۢv@\{%vlΝDI%F9 &jNx 6$i8n'qշDb  R@~tt#1^wep|#=7`a:oog3XA;+x FU5cڋO;j ½:m[ -H ҝ ~+ 0Bbb<l>!p4IEŃw t+d"M#p@G5'|bFC. S{%6xx0B:%I&cg A * OWTP3ySVoYR)t'iE Nj ε-'gN Sa9tc*{ޞD+XǗ2mZ7TᅵwzbaXl&owDLz=aGzNH IEc_0RVxmE I 5ߙ8H[>1.eʕ9DZGfXQRJ 8M犈Y\R#109+p8Ӻf%[s;&:ʆ]ʕ %SE#bD5;l, Nmki%U2t+4ݽi Cm16bhc{!+X 32=үT$cFDHs0s(pL|2w77VF$lB,0q%~ ~tTFA`K}vPICX-bT+Y&=ae4Mڧ(Yo5g,OfCTj0Aw=@wUVSq׬ٌ7N5=58%7&=>QZmǗOکi5@|09K>F'?AXc'HnUޞv߾ߠI}/Za8B [Z <2^GsJ(FyAu9L^Z ['}D6CWb5f|N%V Ѐ#:27K{҅ڹs VTB20&uhNb~@dMrc#ޅ^ ?1F<`Xwb rt+ JY24Tk49t zX#C:!}#_ f*i~:xH x, ~f0n ^W j+p+2WJ7 <*,7fb 1^fu5؝L& ,Pg5hJSN4Ss+i(s%VQ s|J$UGruRof ."+WZ,8x}.[o;e(sd`l2CaxKTR{f߳|]fz{OV uFh ᑍ/=AX\{k~\{ll$P4+B]@ʚ\T'-gCwEcJi;X?OqSS# ka!ǦJ3WUdBcbRCPr% (p9WO7iwWvi}J{Z)1^X$'p F#~k"B-WB`Ţ6>Utj%h_I4>DG Sk~_cLNS7e®?Ao$8V#J-?t:{_e!W yˑ:=UoL P} am!T ؽ{$sDDWJ@+zO_ǏuT䈰Ba"_y<@o^ۆ|74jC+dK4Il|xDB^xhʧ(LfMbe9͛4TwxyMN퍂o"Ȧ08!SgyZ,HI`ɿy-G<"بQ=lQ$;}4|^@<:΋Bs>,tl\ ڙ^zu oKMrMMye#[D,!7S_~&$i;i !cV\[W\Fw;C ~Fj \i8Jǜ@Ir8:a4OF s~!!3g2hN'z.B* ,[N4\"˨ *0"Ws/7:f`WIcM Y0Lq{x$O3 <4`dNh%P3gY6pOEBJtb[vf }NWB[D>#gy~SKp?ľVH>7ӽ6kTV0ww1`N>B8s&~_1v1-/Kޢ^6Ns?ibhG'zӨuBSvQCŠ]C"iV"T 3đMq&uz~$,PMhOPD'ɝHQ'_$ ΣcHFnq4,Sv5’H滻:CV3v$ [(kI%^1w`ImIyp'ϡbׇ'8:c#G9tu"UpX鄥 ,zo؎ &  Ux;UBQUQ(S?> 5Ɗ-!S7 zJl8\7Wϙhd.TnbQފ0BİpN1lK`~)F94 EX{&B%vQ`8%d#gH CiX1NisQ>+ <0@Ĺo@Y^W!0sb!AXT}i";|F#ݲg7kEm#U$@hBqj:F,dhqIF65"&-ǧ'HI93 Lۨ,\%, DUo>mOjp*|'fj%K#c7|tg~8DQrvLT\` ,Lw'g*pVXrRc(@F{<~Dh>^wSwԁ}DcAdS0>zW6&ZN]d\"&pLDgR}MԾS_\ UߟO9r~lf5F mjhYJj< ,Vc#y֭c*!D +,| >= DrMZR*j7z鋚IG6V? %Il9&^[؃7xs=thX({#dCQӴ${ysӴYxp9&C;j@7 v_\gHȖ-jC8(7f{Ɨ&$i0'ݶ\HkN @ ^bs~hlpjM ttNϺQxg2 Ғ/?~믃Vns°ws'L`3Cyoukч1,٢ xc-6?:,kqGhb)#G%_8ǷNY*JC9 آ\P 9griU[t^?ʊ5jd@KO/ڄR)K=aK^/ڐZ1&yi|7[M[Pn5T-N.вim~v#}9vZ#>}ZE M ghNSs-"Vbc)HаGU3Nc0*e[wd$'=m͆XQHIV),RM3|j F^\cHoL+6j= 6Z5[j%9#pPՕ=0nϻBLuD5<*HVlQ}Fzmܭ|'zkjrጰ,a980sOBI~#E0ͮ.f=)rhh{ hnUGW֟(: Y,т\JZBWC|pm8`GRa NU>P@-kǏ갣b̰J4-Q)aȖEkBꭤOZҜf-:KB/FH*; 2%-M;Iqc/Mip J<Ɗ^"c  C >z#:+֯/ 5nk[KJg*ɡ)[IWвqft&9 rJI7V뗙TJ.ÖP g"xAd(r9'p)#\\n/֡ %0ǠaLaFT?GU&ۼe; .,'Ĩ0fEoZ~(ZFNK<ꫴ1aN;aG[}< A-`C- ~;,;PӢ./j^bcc! qo0 }z:9I=Ѽ.է &AAA P+~*8'u U9wbӷqH$0oR.I8r&)9 ִ(PCm[O&f7I  AG`cP¡,)jwTRhI#PD#6) #9 L4F W6d b´.R;:,fZM4&m;^Sa?|x4 #OE^訂5 (PFͰ ؉%xN#kQ4G"˼W엙 t9,+s㢾rH` `"!(+P6ͻT?]<@|l\$)9ojhZN N˼/2BI#̈́>S@70kDK zn+=u' Qzސ莪l+d H D2:QI tC/G-ļy;T=6p5:n=$J%=~K+y'Ӹ4Wn(/1H.*CѧwlŻVɫƄb=2I@4xWrtl92DF;Hxū)iL? U_V.'|/JSn |Ƌ/k-b/" Eg9NO-P^}DoAq;w>i0I|Cz;;-R9>,xQȾL&7)gdszyLB NVf!HRYƹn_~yn{>ϕ#PNT!xUOYEuby j?Z( ٲM̘@j 3CpvlOs=QiRR6FFojOYT%#{QUI3$IpS`KY0}Ǒ`G[|XeaBc@}eM.u }}Q~&|=#Cz6\/ Ձ[ F_%Rލm5yTBK0EjM?>='`Nt>XN:.#+bWC14;k~Mb7|\;DnՐ oݤu =w¾WI *.pJI{&t4,0-_8+))^eJ:qD PB`7vaaR{<$ =TkazXKRzCLU"A@ɷsʳD1͡0M&lR!ibm({}~E6IiQl+YC.L?M7ID.9li)Mh;-}=uafW k yl==ǹ)7V%5ؿqlbA."L#{DhQ »Uh_5w{T T)BФ*ף!yfKݤ#e.rp)gBsD\a{P]:]̡ӯs,>Q_k 尹1{JSRtcf2U0[.F DK>0ZZ#nt28Kq-b %#T4[vC ͝D׆CEUF0pK !D:QsI ZN:>J9Z PoiW@4j[cܜzڅsSN!O#`Fa؊U{w9 tvێ . Eç]pYũ.am ;0 kEx @PBΧJՊ--֏G`uHROk#V,۞>Ωlk c` 3ީ8g _ꤊajx* P" +x d<`2_\B_~7`W;jNYmSX:20$Uy /VLiȓr;;BK $DlV\:0R> O`AFm -lbY`6܄WtL{؃\S\e{܋b)´-i.(_tgT4R4XI"5O 0hd֗,7 <;x}ERF(V~{ 9 ݫhC-V S-1{|DU Sͭ.< X1G\fϋC0 4^=-4 XJ3Ί=;KMnR%g grôJzMaP8mA18'_q tR Zr2>Kٚ(Fx[VtHy5E'@BP .]r3@5|g8T5.J ^4]Q&4桁O'<`VnXHۺwpJ"Eax\JX@ iQ[ParSP0)OM&sw4}thx;%9U{㕚'P UpHJ3n)mv[/\Gj q[ijX" o&5}WdP Vny-嚟_/Q#9`QT}v2 g~nG]dvp8a/:r{Vo[%I\N:zڕ1, 'Gzx9ge57 ϕoӱvXchru+&xF|=컆 ܶM hf^ _aT`EPi8뷑%P&aˮ$T-.B Tj.0S(V;%PE~O[ANYtjo+ExQ?x~d(ʦ43V< - W0ŜiE6Qx T" rbJRl0BӾ"]v6N3-`J('PEN^KicR\ Y`)ld>]68i먾v`ĂD[nhU ucT,hxr: >փ]sl.b[^ =Ȗʎ8:+swirXja/ŕ=QaٯEr_װKx@UUj=#517e)u@ >3aZ*gD])nK%JV"ӛ˽Adݔ0-&`oYTM%7F(UפXi96<niV&na8K~%ǹP0\ZKYeKȵ,g@AdDs߃}7˭@W[]05z1Mw[eļ\r@ӾMFvM; 8 O!p_KвFGgҤ6(ѢVC"4`﹣:uS#mBj cM1]o֝޽5yߩ yZ$.,LC)&F)HTM7ܶE(vYPShbg4:8荿}Mka}IFR& }*dž]ecuJ:K18I 'hw0^JT~86EHB5nœ@NJ˖[Th]ʖFMAl+aPoYw豆ۥu{HaNe6I#͂v1ݤ7Ttz\f;݇"I/'MPX%'(5eƦe{ڞ t &dJ`]vI.Ί ;R#@ii );܍,Yc=PZK7[y(R?Ab*Y9ȱ1M̑=dDx{[6ҖވL wɰW ^ #3Sji+SQ=vz F5+Ci?ď7rv5*`}fQP[ ҕfwr~tUuvu=grA{,/%֯U '&)cEԒjs8mגfJYC;F#pEC5pe8NX7FAsL[% Çۑ+'A gO3w+A;~4 wq͕Á Q\m!5IeԬE0/{}'^RXL(rK.p= ,ےBm2wDJq({pP{!ܦٺH}ݰ Y_5sO7C(b=¯gٶ.j龌RmdoLB+?]a64jw%h|Q]'@z$:M{"ƿ\_bZJ/bu}Vʩ$f=a=RZB1y ^sI۴7S"ɲ&g9oӘ)ǎ_/W??"?W~GVg-oۏ_c g???-?~om?(?__NiG_<򷋛Y,U(ϟϟ?᱓;<'Jt2| _-RH>~O_H|NMg>\?3ώj~wu㴄>'owW>c?G ?엟- ?矝7kDŴ?쬻>J,{w;+yYr{$c;8]>p 6+|_{<~{?>__Eij?Un V^䫝Op/O)f./-sy9rm'*νD? ??oUs板^V$fgLןX5߽w>dzܿAy~˯/#mw=cTPnjAO獎=Vz`y9\Tg^nywn(?o@\;?պ*G4TS힨h V(뎣М;.8uGѡgi_y*._\gtaAKG7g /@ endstream endobj 505 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 495 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-058.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 506 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 507 0 R/F3 508 0 R>> /ExtGState << >>/ColorSpace << /sRGB 509 0 R >>>> /Length 1770 /Filter /FlateDecode >> stream xZM]_1x3AdR RUs "(WYt9]]ݽ*+>~wOo?ߕ7ʗ|||O?r[}9ӇR)O?W?g{iWy)^lcUǼ߾~__Gӧ2k,lܮ֧yTӿ-ߗی9߽xyQ݊Èk|[l8.}D#j7a{jx̺bY.WM0!`:sq}ݤ͝9.=lnC^'-`r#p1XgebVJjf5Mvlxh,Z[.nہ1ŨyD#6GֹyNCm/ ?t >3;JHM7Bq9:CqvrDrQ0àQ`&vVw"$5Č@R3&ɐ|Gnt7Rtǽ)NӝzT(!k Ff݊0sqp]"%גStk \!_-Nו( @i}:~)n bӕ`G%3cI$jUo_Rg !dBϨS=趚Aj֕x-lU [é6QyGgӭ;8KgW) v(#7\8g촸x6ay9F> ܺC1vCѶ"sI 3>;88Πx_#%Zڀxuj%]չQCa)j ƷHN`F{$<“1)ĂQ~5Hp_]"쐽~~"FѮPN_Pm M D;uwTtS+>C3],O*`JÓJr:P z ft d\Z:x cx,8U1Ky;d9gǟZٓt DYQeI_Z/"@F<'\80Y) Zp !`)݃ 88hO"pVҭdqpTX>iA?<bMQ2ĭ-jg _O@3+.P<8i ٕjrlqޡ~*56BJlcpm2&A7j/Y-Ev 44Hj[jP qה"W߮n>$kq8 8KOr2D)NR#>C`}'ߩjt;.L}\0{S8-+c1td' ;%mm$8Rk!gNS)~n Jm29ijWTV,:aB:7q{4ŕhdvNsxT_kDzA vq]"+L bm$bt_i@D* $r`N)ˣץI$Xz_>>@WO#~ endstream endobj 511 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 516 0 obj << /Length 648 /Filter /FlateDecode >> stream xڭTߏ@~_A|D>Ƙ> x \ο)l=ꝉ!ۥ3|37"@,ݨ@ktfY\$I%7tDFMbAޮ֗: gv?ŕ)LlwpDx nW5D߶חSɌ"g?Y>Xbə6$0"! F U8R6LmEpmXvܴ#8%Rd>{ġ !}Ei[\s,S. wNnziQowK@MF7$$gd K9>UiHD>N`k jhk땲$#UHduTFIʔ.[yVyJ0̩l{o2M8"Yԩ02[}6%/ʖOtCHҵH.NB%Z6&,D:/f6rrgi1WcMbtz'ow0(/)A$P%o$u%6\\aT~XGۚUnԓнCRMIֲ5;]hP#v\nVCS̖/:fxP[Z;r OJT*͠IMM*Q= endstream endobj 496 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-059.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 517 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 518 0 R/F3 519 0 R>> /ExtGState << >>/ColorSpace << /sRGB 520 0 R >>>> /Length 1607 /Filter /FlateDecode >> stream xYM6_%lL؂JHDRPP-k'ό}ĥx9snfbuqz{ee?9X?eӯo<7/^&;M_-noޚɼ92xs`~}03Ms처Ou;>#3Pǥ_zqoƃ@=u\#=`=u ,_u|k_0m:.M2~2TG~[PE6@1zh2N@wnk90U Рȁ@TGcȉ@D6@) PA] # s$zA0KCA0Kq%SaHV#Lj]()A0 JJfxJ.k F闦QrOe0KQr. DХ(9Ar DҥY! Af%0 \gU$ R*ʀ.FI\T!]:$N9h F*%IBI B*sCOe0%54rN)1ti5rN؅\V49'L\VCtW֨A0DsY BvT$dK9 B!]Z$N9 B!]:$N9h F!]Z$tT!€\()s)1%'\C%'B. B]$G. B!]:5h" dVʘۮj$![AeRХ(I딃 2K)He@V$ ]()AHe`.Ts)1FTr|aHV@rYe" h H\VCt@ 4hp2+A0#Ii%IȖrPC%IprPCt9IprC%IBI BPR2SrYc0R䛝ReSrYc0RХ(9ArYeRإ(9Ar 2K B*4%yp5^©IPνGf+}1wpy ʻ/F>Pw;eq1<3PVѯudFjl^۠BdǻY3kf ?Y)F_3 -dVDɬ0gɬ0Mͬ?Jfѯ%^E׻勮w_n~+׏vybSsx~OYo<ߞ̓oezWv߽Ll?]{r0]ȧt'^T/qY~{Uw|q.a|?Cb~<̧[zLZXۚszcO hyecky|Xy7X>szqFӹi.]xy#r|_r"=-:~0^>{{g)u endstream endobj 522 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 512 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-060.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 523 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 524 0 R/F2 525 0 R/F3 526 0 R>> /ExtGState << >>/ColorSpace << /sRGB 527 0 R >>>> /Length 5461 /Filter /FlateDecode >> stream x\K^7n]:3_3A[.Yd\E/IP:;ucY1R<Ļ(Ƿ/,>KBw?oG,APq(q^?޾><<7oIG1|_yx/?~oANŰ֟#KJ߯iO}b6_{}~ǿǷߏ/,KbJGJAxM=+J,IjJ"ܯ$-b5#pEz2kH2dY 2I֮9-*e9_?upUYu_+媂_6y\U#ׂ#;6"zM5^W"~HW7ȇ: p5! -,?1~jW|Yv6 WkIoʰ8N !nJ6 JL3`_>;ᯈ``g~E6F/q{}JX{ng}A#9wL,%?p'C1ѯѿ{F?s |/mG׼.b1e?{8W qo+(r~fݿ5g}bnk~Qln0ǘ4 f'=޹aS*PydGDK%H%[%O#-mHYjGej9JQB5:إRE%zk\f: )f+ף&ƭ|ڊ=vMy[mWN-3ZVnwFZ&t䲰{޻ޣe.w7lXZnZn\n^oXVZoZVoܮ{k/Qx@C`*,{z>=&).7=:3|^k=e"Sq(8pEOg"H6}B~]"w8]Ën\q Ў$7xtuYyQٸQъGe-d${APEs+ϯĆ+\;w>Vpy10r2_BE ǯX6-.JKb^<Q(%rb%p-_}/dH\č1pa*1ѐcÿ1EMm %fE%M+ٸk\55I8[]W`ts8,kM H3o K>ݣ%xug߅yskt8ŵtj8 R$6軲7уty0JX3xl]'Wy:j Sib]< g@F\^|M-5;y|# J)5W`+M+JDmcvuysySo |0OyU̶OBĉc*h?3q29cxes`a׆c8KLW28&qu~e`9ںp0%3N'ciU$ =G -&p4HLfWѩ)9'!=_P6ms,+ZG2_'љK| W.SGg_49C7szRX *m9W`_PͱO<,Q0u(RMu㢯VI]~ ?t ny,Xs Թ*'jiqs]e޷fP<ڔzDvy=\HݵiFǾk KL^LLԬ&ś #B'r_}uIspm`_edgEb7,2Uw]> Tpy% 8u|<SõESp^Qq WSe^rI视*i"+.GVއl'79PCǃOMmI*pEu*@%NP7(oaQ %)ّPhIBkI#0>WQ-gۙ-R88B, y%ja *ĺLf5B ?:{ÎrEOȩN~'ViE^(4v)>ĕ;'`RPxH ĸ7-wIXa\ASJAB_yimEG#$(b^y ҽ{o _ lu%:j\lV[XJC:=_Bj`O y(^,k/؊$:t.CayB\3,l,YuPM- a=ư) [|fK%W˽Z9ы̽^]za`O t_MxH@S8Jj`)H:j΁BaF`V~+ց#it_yγv72b~W ?x"`ͮ߁-Rh3h;ЃѴZz= 4&PG?sL=欜Ձ-Rxqz8\Cs;fa&/;CX1K1l_)Tn k6/ 3;>5rņJݭW$:HRg`O ˙=x>#quC`_d< 7N ?+Ĉ(T;xD ~ڨU7Y3{#3WK;ULQݩRr 282&j%ͪ\Fw`{;]<J7O֊[ԺzHZnxƩZLWnDG& l„qX#*xMZ:d>V`A(taxۨ]5ϡ}6Hy*vE덫5[|Y)e饼j-dcpZ0|94xޔݼqL{O[י[YhBj7M36j*W5/:mTO~7z&-[~Ʒ \`MÁ-M,f+]yOgဎg(gd,[mZL1u%}ދF`ڋ~W VO^HX~=y`) ڽ×I; yTC7Q ɕ O#+ka#/߰W`-5^ j/ЬH;So gqhyEb1r=W䅐kX, Y, dj;SCgiv'|8fpf:Rhwq%]eiR ~Ă5E <] n _0Ϧas>,lFy֠#iE^[gA %P.Diw$Hs=[= Qߔ<-aہ-ΚPh5[|]FF ŊaP˸.s@]Է0+J>`[kް;~7P:_Af$O:oyMy}p6iO*I{b.\qlAj(@fӕJa lA3-ISJ^/}|<-_=Nyʗ#n~~_oA w!Ĵ|ݻM+2J# fZ=z$mw\3! );߿y 7e_ :k endstream endobj 529 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 534 0 obj << /Length 637 /Filter /FlateDecode >> stream xڥTn@+KdS T*BP԰n4Q8̓п羜HE(d3gΜk$:$ƨcQ&qCG`H&Ww Lx0!F&a_ ɸNMj!87XoARQ4<͢5PS a2`K&P62ʜѱcfPj+l%Qɼ۩Ց&4΢_LtNq5wiWa8݊֏e7٢ƥ5rI(S )7Zt..ȽW]M M4rU*`*q+yw.Av ^t؝vY@N]d`!Iٰ K~ 6F׹393Q΅6jYy;^JySWj-Pq-D-1sS bvVu #iIʹ6yA;VēLx贫u'ݎ`yn31*fN}v&@.ʭ|:iZ*'~o ݪgJW4q{]ܩG g: Q} endstream endobj 513 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-061.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 535 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 536 0 R/F2 537 0 R/F3 538 0 R>> /ExtGState << >>/ColorSpace << /sRGB 539 0 R >>>> /Length 1765 /Filter /FlateDecode >> stream xZM5ϯ.ʟW"@B)d%S yM{θw.׫WH.HWOW?~Ux|K1_q{Oq`{ٞ(Zx\f<~?ߗ՜n%*>??v ^ ޾m%s¯7 M6),|ǜ4ֆo7e9<} = iIQwjO>}k@ZB1gL9}4em1u=v7jC \=t/9hS>K@aR7X KK@10pyn<d:-x+qiIUہT $cR]  ̑ YcX8RtcvusHhY . [oRJl.돨G:fua5yMk4 h| 4ט\><8OSf*ۥc@!N9 3#iu$(^pӊrCy?<wL[P2~ FNOfuB3XmB|K;c` iK12Dч޸,' ?a:>KDŽCuǴͩ0u%m;j?B\_ 6VzIwg6]:RМ81c)VզVNT2j,BMY?./q̹I:6' 9avp4ߒH嫶<'dL^ ._\HsuN̼NJihQI8 Bu%$Tg|a6f9&/zO2 [7)̒fco._}W;.~!m m)]4R|VӡSLYdO[gh 1OzuJ?B?x>\gM.9/vYX3W3laҡ4GBs|fy JrˉGZʘUKG4 \g?]t 5\(yJc(b;Gc$<>f{@axGq1xW_-vƘG8G5m γޛH1fjhdzlQf]dሚd7X193v>{!=9Ǩ4{ sU4[/h> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 530 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-062.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 542 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 543 0 R/F3 544 0 R>> /ExtGState << >>/ColorSpace << /sRGB 545 0 R >>>> /Length 727 /Filter /FlateDecode >> stream xXMoA ﯘ#\x>H%+q@ Z$}w7B|!Lo??ۑp$܅o^tv|<\SJx8V?RzY%(Z‹^nz5/3Z{i> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 552 0 obj << /Length 710 /Filter /FlateDecode >> stream xڝUKo@WX[7V*`8K8l(ݱI6Ef흙ofv Ox:fDyZk+#E4Rkz~,$Ё|Ɨ:g>^v1K˦Ww ߳wDFR4ƨ?B-߂v]MTݯI NQ9a𭲒@Km<B^CrJt[@{y_P&(3nP[ܕ_@yf6 <KH~9G77Ps|-xjZ"q_c"ݼ1bs2+)3Fjj !79lK%b:wv鼃6pb:AjPPP89"7I=:4SeszDZZQ#} $yF5Wipt}}I![]d=AyROF2㣌w 3^*Τ[ } ,Qbo]wxS`6`*{ N;B⭁ELW. !}:[>7e -bm5c]26w'Di~!H 0 cP*wZ:r!C{;T†VEE>1F9X'fOFۇ'#Y$y.I:SI>ה`}ŠVѼԔ>ä SSwM endstream endobj 531 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-063.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 553 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 554 0 R/F2 555 0 R/F3 556 0 R>> /ExtGState << >>/ColorSpace << /sRGB 557 0 R >>>> /Length 14521 /Filter /FlateDecode >> stream x}InKr3q Y’]%1@ L-W "sꕰjp8'2wG_o___~?wJ+㏿?r>ne}?׿?weB봿S_vacjKUwA~߿?o ~Wc?+}?1뫷Ҿ~#}_.{+v_s}8k*jX{_Æ1w5\{p.ߥ.{wO|~ =7*[*c\okQ~m .ൾ矀m:h?Z ~L5lz4o}o N c۷,#l?*x#;lvA./z{Z&yӰ'6Az΅_OJs:پ>j᪌ユe= ||URs0a&]CknTr7oM>6m~'w{Jq;-Si>^zs;n hral*fm^% )^8+ÖV)fis/CuF'v7=89glR*ܸ {N]"Ր6.ƁYPY#e[.48=ұK+qּYarnsF$5hy!4/|"w7,4o޸MSh^;/e~0Vx WUcNq렴Ml2B߸b%sbۨ,vtay)FC],5$چ])v߇2w}7P2*N]XI^u6-*W Ful\J0jrjg6.B.FΧN:I<լHW)sQijTbGqV"S*ve!Wpl8A|3WwZe)5Z׏ES2&-ѥnlU_7 mХkͤLS!VeW<#KHe'|G$ZgwƚHec@(}jLxa%HylqHgnB(Ȕ CI3jۄy&uVRQ{1:$jSf2C:)GY& b@/^u\4 .ii@3ռ _z}2fH:LY gפ5)Wʫk/SlH>fWK&-ҜA XW(:BAUK sHmvx_b:Z{m&R(ޕ{ ~]0w|]U 8LS! Yn,4IJ^7Ne!gOu҄œDx—!ҩX:WJB6mr!iKU6Zѝlo#ϣ9F uh8ju8!K.+!.]ɛ/Jg"?.,~CJz׀$5aU@X̆&7* aNmp#69ܦH:T7V&-],3>O%iFޒC~7E[L6 aDj|VuUj}M͗DSת*w]L~^Mu_+ΣJ{,Y{^KRO˓B]Uè[&/IZ:A Y!ܸX:NI֠ar.{bDnvWV]1 ӆ:νW핱}Gſ}_.MH \'Yb _7RY 93[953DG67Qqa#:@#հy/9.#ZYBf)ss{TqKzާ'Tw#zRLBX!ZGXB&$u)Qm+1ѼB:Ej|]1߼JС6yרAZrL]Y iELm=T^9vtgUOޓ)_*z^Hm HjМx]sÍC,( VITj&%fB=8!G4LFr;#Xξ-YoO] `U>TI!?oB t9N#VCR|pe(UW21IJ \ID k44ay \9~㲑 %9OC &F/vSmh>Umn}`$ne*To .3~e,$YFc0xIٿ.ΩSu$I7.KI*nu9HKcN=8 =Ɛv4֨CߓC.c&Q {\KsB6I rc]1*S̚vӈBS1]ظ-i&.E,]$]#ԛ+b% [vovdq`3I3Nz+ۧ#f L$*?.tSᶻd׻gh߸!wg|鉴C1v$ Fht~eEto!Ȫ:1;*+4a%yv^v^R\|A(y=Q1Ꭻě! {9FJwj@+(-bBg XjZ!V蚥 [Q~KԔکC!HqGdAOl..ΩC ? b2<.Y4>ਥMD]S}Z ;Nr6J\e.{a LޑzɩבMy" #Tu/,sm<ͫ$udö'ͻC^x^<]xD讃͊J^3u;ǢW*VRQJo}~%VbP$H| b$2nX#Z |襺ΐoy9%'C:BJ1gOU8<VV VG8:Xe+U6&|=k6^ASǩNZn^lW&#@Bl}䯭dL/QJ&@:q#|cx%J^Y ,|Գ%# E-oR<`𪢴L gM+1+-.yiu#tn nڱ\rw D7iS2i%I!FYR$S/_Z\FP:)tM|ogR J[7YEqJ ZڱAMP\ʫ ;xw_nd/F%Ղ{ޞHIq*_`\5 p6P)Uۇ}Gsx&Jbpmjw҅Sgt"/.r)}g0I .:7ft񺑩Zb X$vW/3>uL2sȟмE}/t̗ۮ*%E<&AiU(8tvD+dzTIaMF*b۴'x3iɌ:Y vbZWv{u.;wֱV"mp "nn1l\xcs[.,JV>YY=iNzұ!}bp|XB2~O+S<i*Z)tPˬfH+7uU+xebɦ:Em$3,}vlDZ7]\%PX:*s.vҧ.tuж=$+y*O QVkA*X\}#?;ڔҧGiHv6"_7NU%H d)\sokas a}.&-\A!M?+۰>/p^^tHW,{j9t,_"i|B )4҉ѐ8욚ϗ.#DKa7K_V;n\F,%5 o}$zaN3ƑTMތ'^-PוhW}!@Nͤi0 HgϢ.llZAjQtoumg@ܥ\u͎M6Ҿ)~.C3Zk#y{jx&&ٌZ^5rQO46OAD4GA]E Gb)EfeIDh+|[TG0L4(F' +vL^ 4!ċ0bD+fIP_Ur䯑c+(T|[԰(CDw׳c]+ǡ WSo@uܩ301 >PK|TWd?Kt*Ansm|J1^eM(:Ʒ2j,n,q|%C/i%$TⳒQLq=kV %<*c( [?iIwd$ːBD:udaN_$(eqZ,ҦéC4HYu$_{i6*ZO֜z٘EuPmB8*IEZTUqZ[aYS<#t2L݌Y}zsA}51;64Y"MtEY $ ^ (6SKզ6QBT!ߴ4(#|}xJ`4Yv4{`LZ9`4{kbR4T_5c]>rq i2¿ax4fJnPm:iV3cJ ^h *s%fY;+9ئ_܋V +t'bN_WMϞzM8vzIצMWYMm&E;FUY2HJ_WiF)bWa8#$(Cm\rU}SqRը3'JXԼ2NrK0VЧ9hFǚ`ȽaהfN)[]siI.)0zb^:.]jz4^$ΚJH֬4#Ak~NG[+v"I;ϛTKe+fi{UOYfjL[Ig? RTjf<>Ut8v!,\egaJ.Κa*&r/8*v;vMWQR"2證:`4T5@.ȑ>Ǧn*#yjK@mz [-y*c팊"!qr}Dk^d+41m/b4 7%Jŝ*+ gd18qZޱMV%Z$Iܨ(QϼᐥJ~RE7;,-2=-K[51i$vɶ8{VD҆y4j4gU[74h<y۷5QYsX72mv.WbpbmE(JWc28(7)% quǠ?m@|/ Žr`HŽ\Qv G_~ :F٬N R8Ђ/,dtzt ?Yd^G|`{O&5[VZ,ց)clߓ- G|i˨q\IȪQb) pGV,,[G @D<>{*9!]ZU (c ] CX}zc1\;f-*q8{,^OicozUY L-Ja8X;=8yvzgPU_8 36 `wf p*+V W/6.z`]tt pĿc@-JmO@(c@rAb$|ޕ}/2Q '/{L]6~IX`ŃG|)6s7=Ee1V p3(&BkoԼ`G/n sy cԉ^8vp~rX#So.p/ovYڥ{m/e#>zh4bSbM?ZW0&FҠ!# 66u1Ҿ?D1L$M8QA'} 7WȬ 0pL|8u@NM'IT:[L{ 1 o3"Ɣv#>z>lŶmȵ0xG|S-J2/w ,*Mf,ȉS[ݽ=GCTX^W%Ӝce0wonb@=&a{.S4j8u5}pcȋ!bg ÂQEB{nnp,뾘z(#v7+ TMv/D5~dJ̬Zŕ 9⊨';y&,:8X?,ݻ٪{1 }p-#]X盠/V!5JЇ 3q8J?<+IeY q(SY8t13?} ̕lO_k)w/p1xŸB"?ՠykiC%Ì:|7+]ݫ,oﳘC&U\, gpY0+EICQNYeE:$zz)kg/wpX> p)|ԕ1{`e4̳,nX{Ԋ|@ zid}?4jGKa ;|JXk\pĿ+ >\o^q 8JIy{7BDK G{K4nhPy#"fvPCcEW<Ԣ4 oskكIbQ2&uҤS-s1Pe-nB*+8{\pctzH5pV_8,臌7{oQ7u(y=(C_D[5->ʢWռzƣjd:W;KpY0"4W)> ߰>-x'(;6/.ҙ^~'|?}~ef-Qn8aU~0?xd==3{ yr ^0|t7+0gA By'm_u{?:@}W2_^8{O%T֨^5H;rJs&;ߤSi¿/<4㬃=̐@*GGM(wU/{0@lT/OEܥ6#%7D}%}5y\d% pgGo+_YME8{a֞<6wiqnGNsvv>2D;ߋԠb-`}=P6q ȱZVx= vRȎ֦tpĿ+={@Kɘ?h {Bco/&O} Eal@nO.޽wŊ}~O5꡸nO#=tObE\:#,R;EGB=x$ř|f0?oP K'g%Ftk_8 ! / pE9ؙ8; bS!Ls|I0x87%SQȵSܤiT.Ju\B9+'0:<݃сZO/ozi\{qu'yc^8s6id7z" 1W=xKl+XxڦprN'1J†Bg>~4{HEGvsG|Cu*?yNb]pW,^ƪ8ST8{,Zr^>}zO|_*R] =o/R#yH0 JIO fA)#ݖ\ynKNV#b87gqja8{:fSфwAA4uyֽ>^JfX{Jf渚N5|<0o/y܂8|u{e`qUWvlGs*Ÿ€?8TĚ.U.>p~lf!bё@D7+8_ e\ +%Frwg^˅?jG:'iBH7pNT͉:h+< a?rL֟PD^8;CĜu$wyG|d];k { B~1P77~QfE81` F| vc]|Ͻ(tzս?^u=OTe58`e"]:{tjL8$F#ςb?- ݭ2ip{ܑZXU/o=П*Úw ()>GaG|VWQOБ:^8uYrw"\A p 졒)ݫ6cV8w8Q' G|2Q8-PCYK/#y2oaM~8{ӽ6wsfk=pĿ+~}+ׁ|[!dN/yqy{ ~YrY  oVĿ{¼pH̳ܽ+?z/ʑ@kzdG{`iw3G|MV<ŋ1JuaX)G#~U2c0h G{&r8\<$w7q >zw;m~vo^O5נf *t Upď=}3SCB{=Tʑ؝K⚊TCHA8+yigٮ 0o0/-ZRq|w8\IjT =υ?V Qn“ #YIl򸾒p5g:('˲d>fv=~$4y.,pĿ/,+ sH(yxp1nFɨ)G|_8t&|0"{[h=k/pNr\Tg ;T0;[S㙡knsZ`@f -F9KmIk#>zWBH&n}chLU|FWa p rv!i;fG|;o¿jܵE :'"<޷0O| ƒN %Kr#eA+5Ҽց#&v Q紒G||:ĘG|%h+ Gy0ܖTyifG3$aD`1/0\ZJZ *MR#x@t=i]3$NVu{Eߋɥ>-qJ>oYΛv#ឧX{ Y(/_8{~0}8_8{ ryt&ѫmNDa)WbSZe~{I`։NwF DFzߵ G>3 Nf~,0 ux04({ŵ?fgpgx|ۀUs#LJR/X&|I^8xNK>)t$PTv#0S,wO-f G|LBj!{NXZ~Fw a~r8XM3F /Wj#|^8{~V=m[Gόo|o2]X)^JpĿEbu14OTh^mw)X>?|R%DG\N ^OG3j%E$\Kc_0b_`/18tNZ9ѝ'sgN#I\ěcPT.c' oQ qΛ#yF9ܛ1\:B[;UD%%% :G|0芃!QQj6])%e?} NoT^PuG|C}FoVxM؇^8=wܱt.]h3gwJ"V7ecԅ| Gu{:pܪQ9SvyxtZuǎpNSʲ-1>ub 0}cc@c(p#g /} 0RS^DPa(P9ܞ>ӏܝ w5#7q"0r/w7N`O(ّln ;Ĵs9lCw{R ҫוÕ FK9fv.k嫎_$ۍMiA /]m#>{sŜYc%"~=P; O'ŹjD8,9M<< V1#ك$/z~@<ܮ Eum0 h,p/00?(Ϣ, ]4 a/rnGw{q >{=6gA%I+e#=Q>k(UU=Ѷ; p kg\ZSu/z|mi[Q,!\f2di' i89V*ȏg?~N z zugi0 rz+ˣjPD*@hyPĈ;$O_~sW=pᇧ ~;M3w)?;Yp1xՁoj^/^8x~rG?Ezߧ<gW+d6UWGQ`0 o?,꧞>Lr/uЁHu*"0 uap )#BzNXP~8E^R|niǏ#czF Hb?ﳠB*r_dR#drp)!kO)ރzNZ1ﳠ#QFi`o{p1P|}fu:CaN0 cipteU|['XIPr5 >rxː%8 ޚ&z G `@چ>~s6rs `@fk4mv~8ZԽ`@F||ZRu-x̦~F!8=(*Ku"gv?H~r@i+~)3O5.Uo?)D: endstream endobj 559 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 548 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-064.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 560 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 561 0 R/F2 562 0 R/F3 563 0 R>> /ExtGState << >>/ColorSpace << /sRGB 564 0 R >>>> /Length 6423 /Filter /FlateDecode >> stream x]Kq_H HEx!x1e$3S4M #٬:ux|u/_~w珿?q~~f?bcqx8ޏ?_ǗLJ^\[߾:qq_K㋏x?߿+WG9Sӷ_O/X/>m7F[Ooo|//KK77Kk<ƙڀç#nB/G^o_ڄlo.$eu?j 9V~tBX#^?/ LMۑS>m=HWV3rgGVs'꒕k)dSn ;󜑫)6a/kS9VۍHc:SYRNi0-4 v?嬕)Mv޼J)|7!ۉ+pK{h(>4QT]elK^/m"I3(,eh(Ḛ`8z]߄\ U4W2&d;pU{LȆ)NԶœ.7!z&C39sQhnF̍ ٌhT4'`7Cܛi i.8d*ҋě=-{c`$D H;?LHcr @Ɍel#+O|ƳW7r*89nޱu)ɏ _pSff %Lͬ8@&v"YLTS;a&d/@Ey\;oPX]ee w\ ƻL5)fjG6 ˍWpfd<BgGr)8*!VEh;u`7,41zw>[k7j;B◔kfh AW5!HfOCNW(wO~. gQq6`l.gAԶY3; C-$.xYTTnGȤsTΛXN'i}֛GG]( $)0Wxq67E0bDU)£;}c9͎ͼ]vI9vHadwe-f4 ˚ۣeSfJ]cPrNLKmyrMKm;MY)Sяlv*ᗝ[F|#m]txׁhI9 F:ͣ9 pT To#7RT(-P⫵\Gw6ۺ$}b*qyL0[q%O-w@ȩ]Ӽ|- ~g^_7/`HYBh!^̈́<&E*3YHZK =UwsUdfW>HvkߘAQ4w7ρM;bZNQ(s(=yw鑦 ):'ը6õo&L i|j<8tz-'/|ݻ;UwH\VxgE3wcB/]7-sB)"=MZfJ23^@4[%Šu&B[ v%["k]Jo$u^ւ=Q|fSꢹ13Df.ώ)l&7ԫզ"FI\>Ä3v -V%GUy4 08<W gDOOPB6ģrd:*Y+,ǣ;^C!τY#4(EfȐѓ"Yrك-NZQD)u]\IY-RȽ4ۛגj"iJyTx"*NsӬ,s`5kH3{OQpupgShozP@ ON^EDRyøs -S& J?"YnI@L;4hA$%1kES(yPmj7ǖ%$yQgamIs*[\ɘ:GLJLG6x /}38D\`sQ(K[Y~FE4@iP /۾U!˥ J>"j0swqV_ʸѺؾ`T Zg̈́W^x|PLJ7\k9G$'`MuTDF>+v֦v&5{ #ew@VU"w6DV;/ ȅ6Wtvm Y |ZsuhImԩ^Pa:]"zZ^Z[*kk#uM*3=82p%X}J$LesJe1ⴉV95n9",5c*c6x M XTѨYʳleV*BMq BRsVET[2>l-z@]6ZjQrRJUj+I=,)}Ղ~LÊ],n)Ԟڨ3vi:k/h.lVR#{GṁZBρf_<ˆ.{OCEw;`-(t2;؄8ܴp뢖N<ص$pBpu4I5Ij^6,FCթLK$a;RK9A9%(*00Szjdy>L"q̞իmA ӥpp1l7"g'v5zSL&? R]uvqMώXMm}`e1 un OI%˅գ2$ K]7 9ѓ;Ku5u`5I$0luy%l9M㧹P334PuZDO{ɺFTJVR䭆%_8¾9cH|,P_(4QrwVP>*& ־1Wշ.։*+Fq|P+h.$TG4soc`hGF9K|d6QX, 5;iYHafeZiبMƽkgl)QSh*cyn3&#ou!2^.#+ ՜*v0_Ohb冽@CËVhh)UmtqV]/}}BAyXFUy{W!-u{Aloh^ocBCK5Du }Ʋy#BI_3VbUgu;\!-3Z E8sn)f3* fH@ CuXUGj&ZPM'>%9x/i%bbR'dlQ08 ]M/m{_8_S/*]B3+c>6]k[aeԛvCRq!h(W M%kHk( O:^nj=K6Z_7'ײ٫(IiUXq`OTdra~jq8I }p нYY ]-Z,Eg۽{XLs-5̏J>L#QxSj20_mäDgP? V]#CA ߺi3i`61E:Fń‚w@sy{>eRvȯZwoOzk׮@wG)7֪ ٗ3?Mw>B,#⇞b-rQ'Kbak1uN6ssQq8]{ $o([C0-I"0m nS+7p\+FUq AVsK{ rB`+[g#$\c`2vwh>ィӱI#,hk~jIkۑebJFIR$pv3lT8Z1Dq:WK;HZlPoO2XE$~1xݒHj?IJ0*G>7?]#]H(oçBD_> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 570 0 obj << /Length 1364 /Filter /FlateDecode >> stream xڽXKo6W=@ĈC ȲcĶ\KI6;/ʔWI,lɢ8fWtßlZe"wf*MOwe)w`sER$EPO(0F/譻vaPJr6[<{]9~`QȔ(@ 4GMhb.V'3]p}v ]5k4)%g f{aK)cаANFnb3xIͼH%+%=ݐ,H.Yި0tFBExyLqp%ʒ".rkXg#m\+OS6G+%Q.U!~IG1lBV`EvdQ"L&cX6S Pyf:I`xkuc*2YzysZ> /ExtGState << >>/ColorSpace << /sRGB 575 0 R >>>> /Length 1231 /Filter /FlateDecode >> stream xWn\7 ߯2("6A[@  +jAbD<|}>᰽uK1._n?(Gߘ_#Q4bkp B 퇃 cnܟvJ ^z ǧ׻/pr]Jd#~_ݠ1/Xm%}RcpF[-֌IN[+C2qk"`?( ¾4]j8t=0(U^-9 zԕ3ʯi:7*ˠذߓ^IlU]82ˠy;^:$q0š|VAɗc:f7M<<!'a2^a@ +c7D]~i7+Tq6ˢ;?ؑM( ˢ}K(IvP?S^VI[HY`GlD?vvd.Bԕ6Z.ԧ jgWBY#[Y@a mJb-Rg=1_KnC)P!vfW+8qW+KTO\6B 'WpV NȔ瀯'Vpl_zjyħ_FLj endstream endobj 577 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 580 0 obj << /Length 112 /Filter /FlateDecode >> stream x3T0BCS3=3KcSSS=Ss \B.=3#<0j {Z(s!LNj.N!\n z !i FFz0CR54cC\C endstream endobj 567 0 obj << /Type /XObject /Subtype /Form /FormType 1 /PTEX.FileName (/tmp/RtmpIYluob/Rbuild3e8026075f82/spatstat/vignettes/datasets-067.pdf) /PTEX.PageNumber 1 /PTEX.InfoDict 581 0 R /BBox [0 0 864 432] /Resources << /ProcSet [ /PDF /Text ] /Font << /F1 582 0 R/F2 583 0 R/F3 584 0 R>> /ExtGState << >>/ColorSpace << /sRGB 585 0 R >>>> /Length 1261 /Filter /FlateDecode >> stream xYMkGﯘtx{>I@@9@%ǒ?ճ!d-}ս΃a-|)K/Śe#,/ˋ~-~S ?,/~R+KmiO&+kpWgЩ!f}771.Yٯo+uH"W_Q$4]eDqh1 כ 7_m?pr(5K[yۇXƜDkܖ?,hF,Ek*/g̬f%ɗ~tѨbjѭGĻbJ{Et>",Jݏ5VPO=ƈp5If]5V 4]J iY =(k%88{AEe^KLƌ->"u֨IHo%qFAn&%_ ^&NE|lVR3!"UGlcB"fܘ?d? Egk4]h֡>Dk7Ģ6-qL]tSfxK ST.5#aZr{,L~:bij:5Nl=v8'r w*S1)8$K%Bɚ)e3l> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 588 0 obj << /Length 117 /Filter /FlateDecode >> stream x31ӳP0P02U06P06W05WH1*24͡Rɹ\N\ \@q.}O_T.}gB4PS,{h 0T0pzrr۵& endstream endobj 592 0 obj << /Length 155 /Filter /FlateDecode >> stream x313R0P0U0S01CB.cI$r9yr\`W4K)YKE!P E ?0? J!DH" @ l%r38 H.WO@.E endstream endobj 474 0 obj << /Type /ObjStm /N 100 /First 893 /Length 2070 /Filter /FlateDecode >> stream xZ[o~篘/E@7mdآvʒ+Qf(c l Ϝs;R 6(tQ FhptH]N'akhDL G+KN(i)RcH$L&)RLTmU6>*I8I)I'SB@I "BAÁA'NWKS)E`x8p* ֮0CFcVpP@+pi4jju tD-{ Q#n5'i)Ҕ.e+g% cY39jzzh dS{88WgsVxT0{pعAz9n:=~ $(EӽA$A\O",B 9[Ԍp "mEB mAET!t1=C|BE\' l,”rH2X. uȏde K2 23Q&)AHu$*(#ɂPz߬ڦoo̤^ a⫪~}ڏ]?Ǖ⏫s7]7ǥl3mWj&~_ d._.}US2UU5Pgͺ^wOMfOoϖYUa1]Π3)>RENɽ(Zw럺njWbڮX}-6kDfٷzuW_ۺnvj~n>kfZ.O2LgLu=Y,Y{U`n>ovϛưM_/m3m/hf^hI(+4GŧLcz?n\ Jy+@pjnH?4-akl?fEL٨3=Vۜ- OPE&1]Z ]z/~r_q0F+q9e#w [ک~cj;xn11}O{[e8#_8wgY9zHDt2{&'HN=IϤr] czS~ow3鯒%s$/)Yn{wr,Sϩ~_>}GSNUBLpOꯘ̮ZtCyTuAu_WNwϓck0Oֲ*]Z/ղ|в}M~8^ʝW:&>7=>po?#, @qp/ҷ; PH_u,̯ .Y.NpWQѿAan|nV蟛Oݬλr3# LQj_Nd~8!wR1iLMiqO#7bVH)GP-V{1V!V2N`n0R6v.Y}. o$2&'fJ+S'z^wJM!f(;fjK=-Jj,e-1[kf!fcE)jn r$|H# |D-]cx*~.JKlZh8^b.6p1f|`U2f/guŒ#(n­@X]ǂE&t>tPd,P`Ȋ9U\pS26AHY.b ˬ3tY#yߧ3*)%kS]6sdb`eSƯØq|OeeYSa1mJ 7su۬pBK5s)239?v,zjznwh+?% Kb6հ:sD nfmps5d-;8,"MMU&> stream xڌP]Ҁbݝwwdp!$xp}$U[T-ݣe1'%)( sfdeb)jXXؙXX))-mSj,Av|s9č @vY+;C#@)@q1r3 b t41(9[m#@&@gqA#`lddr4eY:[TN@GW)௒FO PtB df l,MvNK\L5y=_2`{sLu9{ `fi(I39;3L24q7r512~7;u# dhidiWyf ;S1- -&õy,L*ŞY(#ow9:&PdK^=`^ pvtxS 04q-xheyo?V_}0S󿏘YFNINK%W)* rx1rr8Y\ld@dr~_x/`rD8"E\f? `C<f7 YY0r=zޣ+!^=?zEsQCK>uлbs1/qL@6 _[?:gf{{f/,~^ { ?,ep^?=z_+p~13s/{rp=a{vN6/#}wsςr\ߓpξWlxgL\_;_MWA&AVuA5"n(hV<"$Vg}r-<ҋ'As+J&-Q0^u~y{pH!f5d+x,e rƽ[{@xa5s,cF^@)K_;Jf I=el0)(לq7k؅w~*R@.# #kcIMu*)+E^ j1$AYѥS`ɟX$GGv;Z&-ݹ($HV5,ečn ̆ ARITk5Xuz~3Av\[F>LKL[ D'Sidq*5v 1t)Hva$Uݘqx:g3d8FSh48Vhcnlvc;ޑo3?a*K>=r 0I%a4Uy+߳7]K (VYǘ ِsUCz=ܢ=l7V^TIE]j OsJdP 1Fk/ 1o~%"O\-Q Jg4HSuL;ջzt;%T\Al]io2?o2 C$[Ӡ$ZW#;mp"P'{ wXB+ e7/ &*2Oâ^Wzդd"BP:= c#pt:Vg'(=G"?odN/t(]Dg5Q1Q fVIg߻&sg齳 r^9^![ 1cXu28b'XK|).1 eb"݊%$)nG!%@\o뻵<]9Cpa+DP2S0 `H?luN~>1AkG"C,0)ޔ] J҇g] *^1Fru6&{%K@!ϫ#9+/o>']u(…ߣ+{~<=񎳦?2=a&@W@C|TTޟQt~)'ba] JkE u~N5H(Zαtg%>2ZɁy8Р1?]~ؤ)ĖRieaW]ƫP<. Ta#:Cx ~ n-aWIf̠fYC~+ `#{ FwZcNH7فSһ譫S+ T|jQeAg96\*pn1lx1pCSe!s\q.U{CFާrU )fS-=/2lp&|=ǏS}t4J«z5y}M9<ӭ3H e5/[lE9!o^g .f2Tqe3(wOz_TYwq7 Z"A5]AYي5ٟuE%C;7)m5:1Dt5$v v9m&EnnOKp?kdt] UucP}|y{(yhsH"}f>v{ܺ[UXPiN;0i'<&0HvJv*7wfD+bL(hco1|Vt;cYG~X!#+N~^rEUzYo@8:ʳO.΋~)kPa8-{dZ΢z"~@\wK`;-u-`5npI! R6LG䂠ddjkJJ+>DcU6e?$yocBjĘ$H lx|(ъk0p+d`I{rǐ 'vQ໫:𹃁Ŋ,~eR:I'ZSg*g^0xPb#wW%ũ俌{.2T%nskP[C#~l]'{&48A&~S&{c@͢º˕Cf0]Nm24ɯ# mӳrL3Y%{`N6Ksii^!& #;>9EwƱJ jHim!6V[ of|=5oP45E4DkETj7B2yDURbl UPt ~J\{T&U,ɇNoYت$^7G[x/'U[vdp^n O8ޞA[>y=;=x!g`%Z)gZq&EEގj3l13ڭHn(:.;^Gȷ{jg[GXh д(1FT(qs}战葐-;sCv M7 \W LO?*>%ϬMIZjՔ۶ݗ)n9X[ -I}Tz~< I<ɵKc 8ע#!HeWA]r;:]O$0ɠ#lHHZ5F ÀG5ɃΜ9鼿tT (`k5d*W7][{B{maTdo^+zloTEv6OZe}WAl~,MuI|p5޺\уiӳA)+ɩ&D.KT{xc2_ ӴnkwPdGCE(yҔk Mi7'bB6~Zk(q3?X~#[mȵGӶ`}'_z,Oni1q y' f0X^_iӼRnz8aFu md5p",(!d r VǃJ^47`֍Q:%7:Ú5l ?@hi*@Mb22dfF), Wpxiy OSkNS)X"څ!~cKpD)Ԩ½g1wlaTږZVy\-܂~׷TXW:憛Uy 4%okRBBF}(;kޚͿng|a:ޙ0/"kz_`gJ~$>Y\V,jZ:ShfhHRi`pkTnl4˔P pBG&>LU|`$ +p'CpYAEM0޳ʥw_38Q Oui #jqi\ղ2ep! %x$rSؚ%"P5FYv-_g\l{6S¬Qѧ3_>:48folݞ< +붻}Y`@;K?7+xbeGONex4zl{::LsRBw@&$mg4qN ]vě'~EfuD_ OM(bXI.,0_ Uu?z)ɉi8$X%Oa Y *]|"T%ҒA"!S}Qھ> 6w8X#<}^x~A}CQ}H'pWQlo+ް}c7Ry .Ӌ@6yLF< 4qDihTg^3#> eġ\|=\%˥͎z&p5qr(񀻏rErћsN!I*U#*uô/p/`̀7S,frb+$=sp916o,E"lf6^*]:H˜ϰ?b!w1C#٧96|Qi@g‬CׄOl$lj*NVʝ>mt!~J6ƗRPȼ7aʩᓧoVN&Pi,j8vGv;9y?%  C ,GRwzNP0!WWO/ycp1d)#돃9>^e&jfkߚʅOl5r Y`bS#_Lolm\^@_wKp3Se7x~R&OInv!0FE[smxfCF~$e@{ō#Tqv)sĕJ"Y@q,h˄ Mq}#{Lin ?fa{Jxɍ/(z(-4x(Qy!|Y>eEܞ/TPoP͜AЊ!ڽyMM";Ǩy&dnݚwdʱmlɶ|!ݷc9RJȿzMC{A-KHTznz Qh. z SKZ#"kT՞G7v~ k|q՟j6.Dĉ$͘04K`ĩTOshe2gQ}k59)m#Jr:ugtܠ"x]YQEP+Zar]erd6rsFDgQlا'q/=@L5q=za_ 9$J Tn(!COv䞃,sp< QS:0f(TS PԨ Ⳓaoa"vSRA.]vjГ4&?  H=&`XL(QS,8{rFrW*4?zL\Nd #]#!V ֜r>ðR^hE*]hQQ}uj;TDS@[(qZ&b3ACw.XZ !ZRJ!Tk6ZH14/2L5 Mj6"[ ^o9:]A3Db yAl%d`lfy89CPѮfd9 !ya)ΗLԆQC35V4Iq!3&nL.38ʈ 7!$r6-Kq'Yvz?~K3i.<}*aS;Kj7"lZ]f"wrF[gHu$.nLEHX)r~jPG 68#R22$,e:2=eq"[y~hhD VΗco|ܯ Sp|+[8DEU*[Z .R޾SWft RԼ~]9l>aքMVx}}n[_x%NjV!o&}n 4ͽݙvW2I"fI\jj+Aj[0j XɎڂ4)i)Fhljљ^zzȒ+a2b"oOpжKkI#0R9: /gQ"~DGYa:h#{S?ʋ%ٕp5_/;IJ¾?xpVsOAjkK>G4e[#w:V=X42/ƶ&4/rݛ}:7ٌ4r`%/򠙂E +MK}x*f04נlT/>V l8;-D86hao"m`٨yr$'o!w؂SJ/MpFph#T%@xʗ,(-Nh9rW3@.N?P(+@@ oD++Lv}[' _e$n:60gx(l33jx;hXyE6gi:%H TEQ'sW(][/ڄadaT {zp@{i=m[Łv5#[2 NڝFDxt< 5䫀t0aFD ψ-i?=7ミGr"NՔ/،R@XLCg*ml6vQۆ BOݱ^&b$YC%.?)6(*t:BTp)xx Xo۷kEܓbD .6ĈGvIϣ(4,hQe*j9|q:~,& BFύimS FaZ}9sعHȋTœl(h6*SJ1Gzb:DAUQ}Oe ;矦ֆU>6_]#%7';]xFac{]ŽŲʱU(6nk^^7U"YR͜G._-tX Q E%_Eؕpa ֻG$KzHs`Ƅlۃlku^EvakMCR̫I8^ i;lqL./*%\\pT^ߪvx0DE0VtGciESr.*\l^O`u?ײ# &rgjf,svU0!1:axCwYⶵ5ۢ- C@Et`q4:{xO/N՟V!zaPˍye׻$kbV"fU0tJ0vy|uRy|iC,8ɤ.w5놔;ssVg3qrM3ԗu});ս#({6YBݝRm~aR[ 3ƕ$L|=__e˿#$Ah=1 ?6P 0G$!$Y/?m:@H9u"r(USCa:U }mXL27 x30we'HOD;|it}5Rsi93?Gg;Ţ뫗W͋[>n4QwAFOD ҶwI] t~ %\NL<OلYU?>]00ec`ly yx^blf B{+yV>iis]/~ۢD`uqUbk s\ "lJ)rrv̅=~Y wNǣY8y&eC$T[Ls] K_]pՆnJ?ep/u؟ɹ1Ar79ldkk$lSoLa[kϺb m܃&V\:}g`\ =oHq=-S^ V_K-VA.=w]nN{H+4 Zi`( jzA}!B {iTᤄ|J arty Y5V1'Fv,a{62{"ͰN1:B۾xB⑑9z|V/j+(7>>Rlߞ@c0z1pJ+c_ݒ (?H ?P gR]fmcډA/j#LAX!4슭id`h^zvD/1iHQ+XMO.8߯@ʈyl Dg4vJޔ]&à j?œjZ<&1zEu)PXJ>1R?qX|k l| i˃lMȲH]ӊddVmYخ4I9%A<\IP5[⾬3p۵\q]* &)N:]~<=a>O +"e6q=yAA>SXC65@lygگ Q:N t!ʟCCGf(KJ9z}OAXCa5'.^L͔j[ma*5Bx:`m ܰ@\;!(8j/ TݥiADǒEZ>Yxҡ_^ &B]Z[w`پKB%΍_[Y!^f 呟NX[|4 XGWfơnřrHS5kXJSpa `fR%9B~/7 iy IP4&_'m'`w^RIf(b] Ȓ `?R1vjE TO1+$Q/?8l N|%-^X<5==`|?t1 AԌ3}X@\#!iP˺Ph  흵aqy;Sbҥ=N NkXp. 8ojc}oC0x\ y L2WHf?5m@a|0IޚD0!۹ R~6!<(*u."SS[mUr2cidA4=Cff0'Є R= b(O/Opjq7? b9?/D<>\im&TSx73*V1^auiYiB/v\W1R_wZ(w,g+UXœ&Lk8PcflSފY:`ӈ%J=$ƒ{&QNqdJuB !$J^)k$H6C]r;3×҅Wђ롐UsxlLtˈŒwȟ?6J}nbeuxN4jTXFk0LwxbnpIl\jT ~}%uB E0FhU0&uye/i%]<*k.VqʮWP;,IءҬ buG3ZV9..y kRLr8W<#j9ƅ!N[X4q58eBGҋQ8󇙠N|v^h :ہiw:`G(+5\D8Q :W뽹dq\bbGOͧ l R8(2@;$sf3zT}2pwCP*cط 2jw&Fj~?aoQ쒯ueXY? ZٶX/{\qy 5iu[{7*&Uh RF X>%\lzߥOza2&I[W)G~b~SFL~lK"x_}R:y77it7Z$( 8esѓnC`0f=s3"Å4kZ"\W)OƱsԎSڄH[En$Ga `~ :Np-Q!1@]L N.g6Ԡ[>GXxT,b7մ-qN%xxA.hR+(0OF󩭞I}laUMs6<IHOMF { W0e\oIYKU{=V9}k4lI]ʾ\1-%Rl0X9Z9\[1W-&n}XQ2B(o,ӅGٮj2?btXKfD #2m#-ͷV){$Isc~l ;b)g^b (3?_*T# qK-[un[X ǎBr|<+,sq=fOhǤG㔟 ,-o5-AmSsh%1(rVOīKo+Ò>]9,e5^ecvp8z(*f#SeOm6njnMUwn;AfR|f??:0-i0:&d7pBGXӛ{q.B^y*=%Mo,о: FteQ{2'tM yâA>qU\hB5w8voa^ڠ)CS65ȁ4dj=۠qNHߗ?r6  BE%v1^ᥝCQٱv}@IVvb=eOĩM_iXފ03QU_n!ޯ/X$LfW+X"4H) ϢuEy{5M/Ƨn'/@9xEgt106Uo'Nj۠ 0©ũz RN~,rv Y zx^4 }ުMx .Ɨ8^'~׻ ȃKB87qg),] 5Xn'c JEQH}ڱIJGUyZu]NdU3܊Ud#%瞼쒧iX¾7{WB8_iZ fGTgr]o7 ia=Fhܤ'fЙ)#\ڗP>Du8'9؜I'ސ|}U|ޏ[#s#_^&2dfud/whLEruƢT|}P\vi  7{5sMsS_u$Y*!Xa&{_~_&|),z|>G %Ny~G O6-I%T.?vcJ)h(J ( /BX-+ W:05KZO/ FoyssyvHtQd5W)K$8u>_Hm!WToX滑€Z}Ö'=iSqUC{eʕ7Hq oB-=ɢw,ӵ$8IոNǷM"e\нJÔu&{Ο6AM)CQ3I ,|)YC86'߿xLFJM[!e+Y_xGP*)KI5oAyB~!p+_'3y35 >)dsVX9rJ~vd{0WA¢vk5ܬǠyXh$7AȎ?'O\ )kq-fB0-2NX~a?:X~~ջ,.ZCIU.@45wwsVN5ԩvLx6e h&[6(taRY>/uϔF@}`灟Xӯn˓B}G=xWD/Ss_gR?oϝQw03"«'XIo7J⬀&-bbpp~9Sld:5IhZSЬ&ENwvOa@@3HJQ˜]QRl< ,-­Bfb=)BNiyM}E('M; 2 >W[d( mޑ/KcY6_KJfS?5tksZ@\7~?, {{ou<-\j 2z3LyE[iIaj{Zk DbGbm*稈Cz!ɄM{KE&?\=Itp,=@sAOSg4;-o?vwEd63Y?7;¤#:3R ]Q7{,MtQk%j ?K3jY5߶'[֕ lyNPbjՋ+GwT\)+nutLiWΕ3|U 1);T(va(@b' *E˘;.#kY7ޫENw!v' uo;%ťM^N]K3rlcK&W>(-"_=x'ظ&fKO 2pb PaPTJ-ݠF,/QO!j{f 2{Y0Τ9?b;h! m \~m %LKjGa7;4C?!.p ZQ ل{UE9ۛQ_f }(>~ts9?bD.PbѾh+دpU} .Cԕn}nXTnev*tW *iA< yڋ/5ǀP0^_9 {nX8E\zBݏCd'fNY+Hy1_$&x\2YK$帧-[XIxL.\@xsX^.k.9O( M#`>!/Lst}l(3_Tr$~q#b 7m $#ΜUm7> kbO%%+KR,9<_P^[nq,xl{4~5[&a#dRbf6s1QE Y.koLz0]V@ ~',yD(v;O k֔{"8!-W2*h&TŒ ,˲'Xޯ^퍭)_|KubjTHWD7[9ĺu*t_#2Xk}h-+OlOܾӾ!-i#丌:b;r&fNB%jGS,$8zbڬhi:uTAYwALCLҭ\A'zu;0pekㄠE@oŠ*^ 1!ꄔ_nn endstream endobj 612 0 obj << /Length1 1713 /Length2 9763 /Length3 0 /Length 10857 /Filter /FlateDecode >> stream xڍT6 (!%)CK5t 00 0t7H(HItJwHI#!Hwxι{[<{?/'h UpDrpqdd yx|\ܼLL0W8?vl&}(C D5G@ rsxECtD0@ 숀`3::y!a6ů#""G8E¬p_ 8Z^♸(vprDHr<`m ~ P;@ k sӡhFB8 pqC@H}u*@ 'xxNWD0`++G'0 XP*+&.`w0 ':'k>+$Յ=#wcG@dWPsuG5 = 9Aܛ@]B"3ie ]@ ~?'G'P?5"ݠ~>v7@`VK O{3O|H'^~<ߟ+ ∀{C㊁*:*SF)~G b֎?۽? Υx\(?B7ය!**H Ovb+~ wT蟫+O| c(<MrӮ{0Tipps~{M/GX9B~o D+ _G\Gp~kG$~@B|?HT#anPD@?>o{F _ ym@ؿ}f }fĿ}fǿ!=!?Qx# G O9^.+7$c? ZaL:Z} m>Qyp I3$r [.1Y/"OA V埝Hl7TcF4&h5]^]kž! yDͩ+{dM)M_3㫢gUg5rA1Wz1&ALY1\9i=Of *f/h7|.K;#9̓Qf$e)8C }2Nú2ke.D.@5R!_gRFiDbBF~Mٷa)}(1j8[Wۗgq'uL̪$9Քff뺝OjlW.1  @97։!_@fWO-l߉c}DB`/SjNۻ&DS3 #0++52ո#U{.J[n2 ,,EYKjna)|,<-D^O&JM\Oha+_P?x_fV܃'YpYb'y(D;xTP: 2qQpf<TNx4`#"yϺϓ1PoA&ų Лq e0**LOf7&=.KO Pᔤv:MWm6-_ ?8)"l|]l8[^ek cZ^'jjWPhPٗ׋Ɣ@ȑoCh4 X+'5 _㴍m3e<[h5ADo=l-_Bk:>K&9g~`,C[hpQSP\ }&6j}\aVŕ>)F}F󫅹h;h'=;BXd 2 Z|dC^N喷84C99GFBۈOq_%=w V\7(hJe;:,HlGUWf7qV$lOA;$B 0nl z[K, deJ%*i$LQ_\J)]HLs)w6^7U! ˀAkyG7JR8.;~nIjӁEBTZqm4Dt`}K./kf=Q18gQJ3/Sjؼ˗t9qȄϺxVż^0]{əU?y>[:X1D@ "auƝ8p5+\3jDCJ0s xn>h@45<먄^KŇuFb%@"H ݚ5JsIrBKkQ-9epBlSyJiؿman5vwD:bx,he L}Z/zׂ Ȉ9t.nѵJ6,}z3 2ʭݍ^ 9_޻J%"\WX}:l=F RRM`|@Չ=lE(zO/Wb%u6+c&79ɒəV0p!pNCMC7\E9_#UPBk1o>2uűثL2!18$3#(>\?8P_;$CP7(Ģ\v;mԊGSi>~ 5j׫dQYͳm[ViRmA?Z.vu$ (wf Z_B$Q+RK#QA??۶k+TdN?fNg\ ?m<;Sk,Fz7Rqǽ.CBGti[T5P<`E$%&^ř1]mqZ6Rde=kD= ́uhVR\LF?Naap5|H0zSxMKz;C&C7jH-_T7~Yѡ"_rHz %2P݁鎢$^2]&E[f۪5L M,,W1"%ZYyg"iD2z[3Be\2J\0=>*3dD_1#WύR(4K~zly1@R>0-<^D>ݛ];6~% \[U̘%I+ ^r^ޜiUlӏn:V~Ҝ5׺;*nѰ){b*yM{;UkƲFpS+Ea+9tK5Sↄ~Jl d2Ò^R潨EW+-72MCyj/,9B#T!`_w f%ol|J]0*M[R&[}"̸=sa'2qIХ,}ZC<@60U*Q\f.D3gM/ːﱵxetH.@T死$؎ )V'iK)oS`dú.X)5Qp?e@fD7 %ř .iq--{ AWi@/ĆsN,-L);,1/C_73h#o2a?zV*!(줙vۀ}ĉוil]k(4 {VhOX-x@ղݭ$M8OM&eaӪYS^8^E !G>Z~ŐvSUdOM7'M1֚9u;mjf,>Ӄ{ԭ*oQo?',T{8LPlYEuU罨>|}5)ɥ&7z\0&3I/ +fwX!譬-Tǂӟ ^LcٸKm V =?}MMw{T~|aOSBZcZf2YiW/#f GΩ?v3MJťeeC~T7cwS=e.A`,8Ѡ ?ܖP"ExԘ>̓ NʉO/ \;,F;L6S%th .Y{Qlގd?;\iUGo@Bd`Մ!I#Yj(WJD&!+~D5O\5?yNK`TMbqJX?=3}B3 ǿӷo  1M7lyj#YJ=X$vXySoW. 9}+M!ߦ1rv $'=ԣd]s^?sl^Eu/nxzq9հ*t\ܮ?YKr OM %bGjkq|{γz-K^BRյ\Z䂄d ד[F-@ZMr->AV\A]|1I="h 將Rϐv`F҄o,y<*b596T|sebsG?#yecqJ)#}ӳ-b_C|`(R~',lj@dk ` B(WSM\=_Qg"]0",_#m/x ZN77;lZJ!3PS络wر [E6=vI6_+)zhzaJ-NWKb:fm>=x1»  yqߔı 09jt|S>1ݾ@)YE1uutq&.A4*O`2JD/KKW◉PM Tg$zIځ@VւhN iB&{M^ U|vZ撄al+{\RαKgjtEQRR`SMϪ!El{iVr-cL8*s+n^`u-+2cy^[H=5ζVkuu$HZX[3c^WU|AOLOU^s":HȱA >y`Zc(~b+4IJ@7JR>!H\%GS?l7"ayԼMBwR3),)6_ڷ}v33t:JKyd^XDE\U_Z#Z"+Wjx_@<͗]8&4eu;͔*/)Z !cVUKxP1>pMև9_ 0{=.@z2?tgoku];AE5=NNSɽx<'/7%;([9fBz,鲑NAg|<[ھ:}Gih:b2ԃǼ)r$L3wFhsѹYLIj&T7lUڱan֍AJQ" r#G5X?88z|)f 蛐ֈQ:VNu@k/xYnɦ4;u%<ط[aʔ`-\§s?(b}.;Sk>cb&r;?7mxԒh0S{qV?-\Yb1< D.{ڸ@kA" j ӹ$·nw^U9^E-txq]I'veMB o6)-M꘍&X9}^JIAl~&ɗsӛ1Z6%3Az$v~iwДmV*"|&]tˌ%B {PW]yOws?Š?\ 7ayia@2t&Ij[Kٰ9Lg/2 p^4en{fxjp.]^G@eJ)~byv\0کw 9Mm._cpZ])%Fn JǸlӒcrqaCżKgҀ'%CKMa՚[P]ڻp +ٍh:ɺ}9T y:56 }rz;c҃vpjl[)esF3\)4t60̀OH5f$$logFصvR*ݙE ^mN Qd/JMlx‘ɢ1 eGKo{F$JjSoH0S݅c_Uc$B$gS4+O]jT  [ɈH9h. ̵YT>D!+WXtHU,7#T[QN*zTXXCAlƖ&/I 0'1@WN } gU uy"nMc7#eI3'+nXZsw<$0bʕK|KftH$K%ƜFSRi)拚Ѱx%ů5^ U,,^7A3Zm}/lnі3)ّRҲ3v[3e!/ ,OEbj2W8DtuZ&6o NaSc#h]P'[mRS7ud6]+MErѕ)]3yϤwq(XJ'c‘:K ;\Sm?`QjD$`i=1%%jR&#vPYG*W-vh(m4sw5#'sbh2Y;. 9QWSv8J'_ <'W˖ :7i^l`< M\th}?*8IAA&WHpb:ZП*8ݑ64VFi? ӬK?ZP&]' C]V'oiEߜ * 9`蚲92 jyP[B+Rexɤ1çԟ} t wʜIr*6!w^?%q1>N 75`kR8hj=8#/ 2MGM?Cmye|2PM ] 3r(YᛖYA+ nTr\-zڇ#:̲.@ i_Glp^3(hAbAb tüJ*y024eWA}c3گZ:ι1R(;G.hTTxWe `OZiȧtY0FRN>&T2w t)X}MD=L>OݰBeJ_2zpdOnG+>=`8|e\#w ;@S"r[Z륉8w="nk)tD{,f|i%ӓvu>I &P!X^H@Q@!L4/5v1\rSzL󩿢URa^ *l>^\ diOO6l=i˥^"zqB+i{ xCpj eg T1;[mM-xRi5j9!O{a)mЌ7뙖cXCٸq, {T?ow2#Qw禥F3Q2-ꐒ(ȉO[R2rv1<6^d Pr&R,Ǽ7"<7ؽ./1L,Duf*d^}f%ʝ~Co|_!YzbHHiZg[0tTvnrs\/hߖf!k)M;4~Wvj1j|&0 EM(i8mX E63rŠhBF$I1ٖ9 BZZ]FVbqqgڒÊ:s',ev˝Fp%= ֻWo23pcYtޛޞ 2aݙfˏnR(Ou:i0IYTUcdi=dp9t>M}n4}>MA9xӔl@=j-m=P[1} S+bm %@$uũޡ/7=\ؚ} >ZjJ 4ZlUl= Rt8TտsAB=<ǀy:)M2֣"gFgá0 =A(hlɡz1ï!SvG @4D%/~y9o=Z.իo~tqlPh!PLՈ,y9MKE>ă<!ZFdeAC-s<"Εu% i;M1__f^{`dGԺȺGIi/ JT\˒3a7ib~aжN|$"B`[.G-vtv?R 73^.@-B z53(9P|#Rʉ ?"(+ ~d8ALEye4cx&7]xI^;8SlWmp]GmW}hH+|+IJq_ '`kmw{ǍB(pb3Ԛ%zo86NRnۅgmR)w?5bAJR[UD'eeA/9l/_je"նְI #/ے;|ݠ,BNpE 9%CP,Y&KƪdF M/G7ӓoq~,9Ovl|v6?b&85gūtJOO;N i uF7Tmp xXnE^R.9bGM endstream endobj 614 0 obj << /Length1 1513 /Length2 7678 /Length3 0 /Length 8679 /Filter /FlateDecode >> stream xڍtT]6JHHwwww C H%ݥt HYs}}>ڇ^׀K (a.>n^q/Wpc2A0P\!@ҦD Zp@ 'EP0@A.ޮP;{rVOLLw8@ a- tAP_)X%qOOOn7N E!nWi6WoxLC{-  NP憌q!5M Y'q_@yCav[ɍBp0/" zN@$w@l@PW< Y V;;C`7_)B]! {u0'oh m5vw1AC$!MxA!^QA^~ ䷓م `l |݀Oǿ !6;( ?ّf)W@>+Kpo@Y㯞ý|.~!^@w] :~U/O9$l'ӆ# GB ,!?KRvwrM`P'?)H#G쿩&?X ;W Dﳄ)C `](df7'( wzb\|C! 膜8[!șw J0k@WW7H)䔂!^ ]-- x~~#QH x"@!?0+Dv@ ^4$PtQ.Gɵ1,yta5ѥ4n üF\'Smgi3ǾtUނg\tvt6oNS3L֡d$ӊvDʀ X*:f,%WaǹQ}aս]dgJ QBW?߲S~YGz>W WQ[&x#n*ȊO^; ;|.k/Wɼ_ Rw~,m Rwr,=nߍVfS~qPn^U<#dEoS'csMu*HAWP3qG +^݄OJt8!3sv4-KJ'Vu& )aG)cfJ|ŭFhܩ*U<EGwHqޤEh/Y}'!Y^sl{TʾQ$ "29GV2_osrԓg/---PM"D1fk&zv **E_ػj _$6 uPA"f'4*Gw)*Gu |5%< 3˔|6yc }\LY&؞z_tBmD U.S+uA-ّn_.$,iy`bZ_ה.OcMX ,_/QXM6^XWmОz$ޠÁQgMwB.#(_ޗʛe7=#  OG3bU_ʦ^| +$ Ӊom`v'-|J½2j>$[RO%-?+ z6y:6Xf`dnaj7yUKÐfy5nȒL]SD#kh5^Ns˅d؂{*gVjiWzÏa9Y2^gGx-O/?|B"#z3.o#Na'ūnܙAb^Vt[@AԏQIY'x qh!ATI08UoUJUQ E,'.Y~Д0p]4#9[.ypjf+e+l3*xIӕ}XE-<ꣂ.KZ r+$OditO{5U $qɼ ILuy=VȪ89>cuidYXJrh_D$Orx+ `+Q~6. 'r60 WuK9>:CiZ?ˎHH!&, ,20%xTI6#L)<ȓǸC LJP#VΔ"~JSd_ -RsXM85tY 0PZI-odH}'/sБh-whI!`@5woDbFPҫkKbzUfGAړvtsɱp@'}RWLˆGw'}ьۏo* s(ktǤjӏf !~#l4Ev3 |Em;9,r? C˷uGK<'gj/~=x8}TmGS lkJ5(?Íamf6A!i*(-}.aT J= ?la{RX ߛ*:CיHL+GhE=%"ґ~a`j2I!k4!z蒌d1LF{|ˣ3Wh gjuKН/}~.xYSpU:^ÊR Q[DȻ`|N4+0>s"BV|x63Mx6FIRy=^zDי- O&ٯ_#DŽe'hh5ocvbb$eE?:|~Q,sڪ]FzS^{@))Ed0*wN܇oCՙXa(lsnVfb:P0Rt{4r:xj i!K[q!;T7;OYǗ<_hd/ ړ*<k3SƪvQ viV9}FH:I\Dxrsi@͛Jl.3{|W^Z=?R([%*ϘfIίWR2] ehaU9l+\6Gft'Sp ozf%o{d$޾<(ʢ{pj-NAw6(<]ܚL@x_I[%JAפ2pc zT , |Ô/>M{a}&, ;[{aиvTlLk]i$ǩ~yK૤IZ^J idNNbLK1 6v]Mv"[;nd){ǒ{6sNav*cYiW5o& q҃qy:".F1m%$,"I⌢U617H^ fQL#qtyx6B'ԕ d ^S-$T56}O?,~O_|QԒۻkKIGrI ܐFKʌgt80M0a =^l06Qod +e794WQM;V*kS{[N +UűMtO $_$tݕ R%N::;(ӛffJiMZ7 έۦ#7ǯP' 1585HCϕ}3I8xE>mwiU~tyl4'X?3|j$~4M뵹k_yۋZq V͜Np:Yz9HCNwW? }$͸ lݾWy՛Uf;D1i7`bKgB :/ad;|R+MTsIW - 6N< pZ#1Jl}-,붣S<ܣ?SI=έYJ?݊]f+=X*<$4Aͥgv(NHcL֌y"r[D'h8MnBwLqseDdcW&ydҹטgU 4>;#Y+47|#`) 0PPBc=ܪX˻:^#YFyM(<ʯ ӟA m|VdsQ^5eŃu1eYQ̱ST]c _=BwTN[Fy@Wl-řzpq}Cxy^WA+w4#QNK6mXf;q} ϞZL|XtrNd|XAcHkodV+ERaLcDvSR0+bꀗpq^Oc#9Ȧ3&:+g }+h!WUqlo5 Lt"ˬ`ID~@X:=y|܈e}(L <$̔Y7lSz"E-F4ÖJ34)7Xt*y=tHVT~ U&"+49<{7ͮV˴ g3+T<82zC=Vr<NI`^8.Q] Ļw]JU -)ϋ<&?֩KQvRifArI7o; (8S ^B\:#u O]7LW_Tzm:RYia?LImqԴ=kbGet?=)gop*rؐhCY,%Z:q͵l6<|BZ!6g^lA! ٙ" Wo5_~zQ7_v#E1\L!;9gϢ+`9^ma"I{n}Uc짳OTO)I!ĸCR blCviww4Rx^Si=RC?'L5/2ex}N.u`?}eҾfx;_a7{eiizdpZJDX8Y1 U}ie9jOxz7P;wf$hggUtk7f1vӏ?\ݻo madzbpS.tí; K [p]8k ݀H@;"3B ÝDR<,a뜓a8@% l"wVD}) uV 5@YδAYCJtQz hfjZ01k)3ԟky>"/flcǦvqӛrdL1٢roH Ye󗋖(Ln\Q p0> stream xڍT 8T{4nʱ%۬ftɾ3)1slfa$Kn!JqS7KJۂtvI3hgynw(>&6tNa Lph==_XzTLJ9l7; =(@6*dx"7'-8@H#`:\9lֳpxppiXK3$\:`Âx0 d b!i `H]C.Ā,> 20"aACs| fĊ qX,-<|9/&fEB.vB. ą  P8h!Xq (.4ċ1\` @10B|0qKQY~Zd̽r}^DP T^@۵ԥgoNk][BN-z.|G>H).Sەa$Peh[lc Lnm|@> ַ*A)lPs@+6~fC}MF:z{rZG^wh/}Ooںm+pӔwt1fMì)OSfmCiWօvK;N&_Q[>cYHjMu֐SnRigpQ}[bOU_:U{+W58>zJ$qQI_:^U좕\Bk7a(&gӉF7nrYaVM[3:2lЧk.[0#w'jEi=Ueg VVIJIM,ϖg. ߀;kZuRs ELU:Ӆ?kMܣyc?ό^C>0ܙxmKC(ۘ)SUަ^lyfjD5ۀf79ʴP*ڙ ;]䆞xӦڻ70QƓ|O;rz4 i}!>#yIxOoշz*s?6bRۥucs%55TχLp[PW\u@:thtxG:ʶ(lN %摺0:N,Ev;C- e뻇=2l+JlZHiMʴ)^x92]U*eW9 ⟳rIٳkQ]P[>JzNǭ|0f U/8$֦Gv>Us"@OTˌo鏑7JgӶWP(pUbڨ}L/ նWW~o^htq\+SyJ&iάS]am:r#\.8>l~k*MC,5n]땠|A\EA6D>" 6wdlc)l$wܙYDr}[ >YͬRw{ku6inC%~(Dmr=숆Z6vPv /Nx.7 endstream endobj 618 0 obj << /Length1 2605 /Length2 22750 /Length3 0 /Length 24222 /Filter /FlateDecode >> stream xڌtk-6۶mٱ ۰il6hlsӞǸwd=3zzMA bh ttpc`ad)0Y((ԭ(4.֎M@6q7P@`eef23O /@utQ9:yX[Zj3 _{@ h:hf t5/'+ = tx (ne]̀ ws t8@MFt;Xz?0KO/"kML,v@<=W#(r T?幚X;2Z* b@7W_ĭ]f{3} ks_E;1i8X;e ~,nfff.62bE `*gmu5\܁~:XXfnSovh7]@4{,_~3IANKSD:z|ؙ _C_ed+`[-Mg9hRtM-@{9@,ϣW߄b $ng/mbomOhh@ Z{i+fZKh*i4Wv3{Zk2;k t5Z-3[У4fV`b dv@)Py~ G_7`eqD#.o `xL".fo`XLҿI7b0F -rHoҢ(F -J"nE7iQ@Z~#E7/|&"62{'ЂzF f"4cagek2@{, `W?@[ ? /hl3Y~~;q(]+o'+ 3l5Z?%;fq;w;zm:-~7Th_s= rgwG7;KŁ|. z6_?P@/¬_Mup]'ΘŎV2 KtMEzˍH`"ac 8wOF1;pX}y"{ a ԅw=; o|.K΍~#dinS(HilӌR(7BZ3/)Wb:8(|_u3>+ꬮ为8h#sE}^ęI (i֪U# m,Y `*>z #R%̊ft*6[ ~JˍfM"Ϗ﯀iDCN>ۏIMwC[~ZBB,VzKiI%no;2( O&!x,y̺ܐoFN8ޝ+=Sl[FD3z T|5mO:;õb0NH&MW *a:[/b/&?'gޙ+ e<,h‘B}0_0 scy훨!OG2P\amJ)R"o nfaMɎE=H룑F; ojeWb K3iʩ<^ٷkǼFBA^!St$×s '~V%y0 W<>C M'=ԐIV8"B%jp9ݬJSI2v?J:pwx0r#s>neGpS+Ǧd}Mm-@B6>V&ԓ+eF3<~fVF)TyJг5n<\Q_!-olQfv\8_ՋxۙDGmMQ᪫cNlG{~ALύ Vǒ5@ѳ0~$6o- |9L7m`*P(GށC“p⫔]U6s̓Cͬ)\lҺrTG^kcN^)B2$q!tOvZuw]+k6/<. zWܵ ;rxjymHIXRd`n_yXulm#"/9*N%iĴ"zo+r˪2l5qJ #r͔&yN_MDe07!իn_3) ͮi`9b^S4ZKoO IJ5.}ij*VV~kdT?xIr~<ĤDޘf(*vJVGP=p>e/UCC;Hc:;_<[#a lN6<5eI\QLQ>o,D%Qh tN|WJF`Tذ55[FCʕ[ӷps% :b)/.L* 'ymӦَβ3cGY I~L̰ v{7Mg‘|@q4$NܽQJ\sJ<"xe_(D-78|T9TS,"31ʽ;10ʼn ʎ9 .f*5wXӯx]bp=>"3lo/7WXDJ2DsYM=ny'@<+^踜\ slX`Nv DEb?OZXDziz_.NfI*tD r3["FР#V&Vb.Lv[ 6ai:BM_nt]5|ex|Ү7e(Ok-} [%|i%dckU9ҍrxeQW1>hu?X"!ݐPk%~|Us%H5wT }hTV`ػ$R 6t0^_% _dZL099$gcP' Ue O•*I֮-ڦv9:65Y2~y*ܴwŒL^5Aw 6Yv1 T7{DL8 Qn).3Sr99äMtEAFբ>C2?^ yL@=lԭ$w&.q 4׎$̷)ev?K`3mHN}ӃcIK96P)qdm}ue? *TGr+rm%V%1Ġ }. iuU!ߣ*jH*jb~(kN'Cbmd 4ssr-G&ޅlUCqJ-YFr(=73 :\Q`۔D!# V B,1bk2&8.z \&i߷kWq-MǺ (>h5\*km7* o߃$ƚ0T~.Uj<3{Kvc/ 9fG~vĄ]A2K{"[5Qxo C vS0DMA&{'E97Zw4K&҉e뺍SQ~={WEPID{720:;ESrf GDi #2jP,˩&T+=V%: ;\^M8 :R3|D<Qœ:`X!+kxpw1A6rP\1]0v'f+*mz(+S+/_h,&%}Ҍrdդ)_![bO `[U%l&e"$-[St_Hr M^f6, HQ,\'4njtzj (lFIN#_aG?E)T`LG]D%f%L|<\y.WjRZπ",O;G75 P.`dKs:6Klz7Qe=X"٩yʾR:ρH>s27:T7Ψ?Ir{@(v^&wH\Lp3s~Ob0qA_-fךW"M)gPRy K}A7t'kH>EƱ|ϖ!h7fCNP=l[-֜gUôg*~{F4Q.;'b8Ye?k *9ҠjX ݘ"_֣F6êXBv-J,y z*h -|*Rf!j^ߵ}:yzM' otԢ9j> N ᐡ =fII`8t+\,n0j\6`\R[|\oZwVD1>]_,; {QzD9_gȉ{,`1\b,Gۈ.ON#"|l] y`l4?ʽA^ȻZu);\?'3;""U \F$ E6q;1OR> k(sNV]K< hc1gU6M-BZV:F-jL^ZXl^#5ߍPR:+6A"ќT gycuz{ .#i/_6puSh#}k[> :HP-qT2xA1MJ+ƅԱCTCyjHHcAd_nh{c3 "ub9GjN|+XɞP8mIlsM)Yz=,WRuCbn7VȥD>nl@Դ}yaA~-+mP׹cMqٙwIMM~b.}"8osc IZnbԳ$oo R`;{ݧ9_Uz'2TL>]ؘ/Q4@3 e+htzT6Blfve܃s8eOH+RQ[W)t\HO^Mw Kb)Ciw0W4[kB[4J[E/1WKc9@L 9!^>JX5}sCnEg8[QWgTwTڱ5fQ8eSox5 0aMcœi,oK[,\+ H| F|O)?Dt]7!oX vOoquDdh"-{2n@]H_nLzx>EɌ}KaicDu~BZXg[kZrMB!f u@4l3 _qw.ϑdN`Ď0<.q>~jF[Xau'|EU[%Ǽg-%F2 ِf&]5NDl!혴(3{{x %D)I?$[0;z߈K8loӏ YaQ"fYg{yq[ #cg + ,2:Im 'L$'99;AZJďPic0STj'O~v5oOL1yrEUKJX Ӳ$[&,AߞE3 iD*^u:sV]aJ' >Jlo(@gcno֩|?3(5>iucЩؿFka&/^2ԉKtI;5µϒy/7)2o+KjQћ>}v/I獖tx9E ;^#6KB1~䛨XTC8^^3L2`iLMo^ >e64r_ʽ^A=)%2h/mئ`?mCrU=FhL~?S޷(L^T'7~>}.Vą?91׾xbOH#5;^*&˱]Gl6s`iVPAOTX\:-_ M⪻aNP($xv6l)Jomܰ87 'o'2Yq*K7q*is/? KU>$O4]K Fd鲴Y7WbB;v '"I3F|SJqަVfay!9R7^elذ.ޛRG6 :O4O!?dưf?WxR4(q\Sb[/-_/lNu%$UU@"XZf[%r\Ei-lY 뚎|{|iVUiTX wPq INߴ[5ߌt!1_LmjF{Y Clc|'&vt=U⽽quJ\&z:t]s$YW{R{$ӎFyYmc5S3H,/W0~!d!8Xc RuTҽ ᡧO(E=u3;n"!؉!7;"bbտ]҄m }9a2Blcf4e#rhB5 \TbF04'b~˷Ιs\.BPQ20J+\# ѮO6,Z3$ FTgO&=?<?ڛ[bᒿi:,ZlƄXldVSnh7]Q!d6_(?׬d\/_ [QoQQpFDak"0= zq$fZ)q&ۤ{"q|[a_uR9[iȏ0vH ^}M׆@('A8܅D&TKȦ3BskpJObyEO)taUd2FM &J}AߞH^d&djH%+XE/wlg,y[7N/vqve#:'`\-4ncX윥Mg_9i%Ao9M>mA;8R".ch$)*}w/4 @CR9IN߿WV7̒! j*|K~CA匪‹hxFZʉ'bGdHtEj }{S&"ί& Jg2CN|Bs #~>ATD )xdSFy3~H-!R$VW.)ގX˓0VmC}Mz>kr[Q& *(ja."YmIluS HƪyMREZaڔ}8<+óE_U ls!džX adZ79z #o=7:/|S>%Y`ߓ b&72[p~6q@#t2ފ_dA7R κs@c %z+@FTǎa1U4xv۴{"}!E8-Hn;ʯCɬ u$_ 6O6Դ0զ\2 mOX~*Eڻ-vjgonuܵ?v2b|$|2ɉ?bG|8RμUL 쇿t܂9TS11sh$Vq?4!&XW5^$OP:ש&tJg?o\bB@ c"m\7PzPvȺ hT{;ri$eoV)!6;"p!.Z#=݌LGZ/~WkTPp8Y\Y|h&1l@xr伄S{e9O14xg Q)e$̀ 1CfœhS>uV|ohwg 9nZMw1]7Tc⾻\9Im %N]2 \CB  zAUhACe!"в%ojɈ^\|2? Ov>ѳ.WfR` R@*.?e%s?s8y|7GB/r͆{-.@Ki˕?GpqXDgCZ筡 ;{&j,%7L$, fD4`hyE2!fMB|t9/b;\xA%n~OŚKth}$Q{}lF_ |Ú kNM/ܴX2nX}nk l$ 6Nغ-7pl:8oo_52 &;y҈-#Uϩ6Q!"R_R*ziu~ն&*lb|op2"nHMDWBx!8uq㽓8ת/zuک{z H6pUj_E[52p^⚬(UFĞv\t14`ұp,!~=2sYJHf_KV5@my9; $Y['pv+Bw/Yl&C'I0L="W@`KѠ.$D4F R~|ʏ%z)]F1QM T3'!R K(Z$Ca]D']q^Ϫ@J|4}욉x$B0SL,H5tԇĚ=|=KHhِ:*89/Q:( cFBJ2bekPK:K*?(,~+pF$hԣRJ?@"OG9Eq"qʚwCW#La~SMGi #.sz|a;0#4\Y} tT?./];^Z戫1YDy=҈df[r- ܃b#9 af{+jHɽ}- EѸc WZ3TLI3M&գ`vbT8caƠXߗ0A.oA Y!AqcXh欂#) gޏ 9\{`I2j99_"8cIA)JA+Z ϻ%#_ҩ3:g̱klP_3P v@B" Ǘ#m" ٧Sd%{b+q=guqL8@+=##i&VG޼FE^A_܇ןN8xitlK̈́ʪ.KVWhwp_]T `WnFQ&G6zʢD ܨm*1 d<-e Q^yS$|OD:cni&*'㥮A=(מY,b[sbsGSkxTnx/M4'垕|69)zͷ QS,V+ݣTU:ᢘ> WɋWtgR]-TBւk3gFzwj5!QkNh.jU\Q@|/}a&^ۨHꄜG9U05xXpm97.<8\DܰK[9g5W^/u#{[WUuy]#&J|j UF\;r\Y5gRVdO-;,ב咲-Sj` )>Sx?٠ c!M3(zT1 ĞYRɴY;k( wnR1ZQm4HΆAC-J}#߮[HŦO{;Og'+BhĠl* 6fg'g!1qC3 R\9\~6L`f2HObr儈BbA{2qvSdf2#i|t-)GxvcMzRMD(tP7CIʩI89?'a*=-M'iXFC{V}zθF9䮲BA(a$R$ʾN'v[Z%uMG \TLˁXG g&fMurоtQ3T* R Hcz25J b:c{HѱO韦fґXQZP#o ӫE{ۗ<ٲM>!߱/>m!v}ʮ.μ 3(gUDsﺕT2ѧ)J A&ov\w߻4 .APr]HP[ ӓ,1M`}ָ jv:n6FZXAGݤTFSu@=۠D`Vf4c[y6C }]5UE_.̧%)Jf[-)'xTѣE2Ɩ3> 5ȝa6&7q$;րx׉1Sz)UQHm P9Fh'# Kw9J{^$3DTgW26_^C~TmQ*?E}NQoAP|~0" Fgatc Ij)3Kj~*UON|Ҽ:7OOt1Brˏm)(Ն/!"uzG m*%Ko,u: :KRmϘ7޷Dfo_;OCT,ૂײa$L;bMr!%ס̆8ܣ~MZЏd}Xذ2YKI,$٨o_5xU5Լwmڒކ6b!u\7͸}D=ryƤOj#.% tVPSn)3-1<ݠ 9Y75FJ|r'xdZJduFmcq<4+랒]xΞjS܏,D(r&pЕ@jVؼ@7HLj;…69O<oƓ k+{ ÕÕ/\fܓD2W=a .\$GTT( !a$f8|RG9w3.L;B6b\d83K1%η>0(񥣀({wa-e<^l5"0_f4Š._s1v8 ]rpB֮eb0 j{y=df^178 ƭl?i&'y k6T}쪜\'68o?+eMNHNM t 6BCLqdฯz\Y֟\p8/RQ2*<'n`W`%Ҕ6@Tj; @{%gp\ӆcocZ4D{p_XuB")"YQBu3R9ts]ǰRZF\曨I߀Itgԓs&H 7qŸO7QSQ&8<ƿG-47OBp`i?ʋ#%"rԨ iIlS춘vm~b:Abt)sIg A# thF)04kQ)9 *.m-l遧/ Y 'ޥ?橷/y_i+$K⨧="7wjo5j $|m𚽞3\夾XL} +kf!`^'|@)ҢBsAd@P*zfi!u*Bm"_Ը@ }UW4X~hrֻw D4/ȸ7gXY3ުXϊ} A,CWlCD 1S W8_R,a H _!3D) 5Z!U͊37)x$u;p4i( ݎ kO+t GvT/`a$ ·^mr8_sGd[)b j%* 7b?f6p<|pYWo?O̢!nAl1W/߁0˹jV6uH #Ы~ H[;v_m(z19ڴEy#}W87ܟ/bG/#/|$$[gf%N⾜s:p[L.bizg #og1ZQƞ꧊@̇^#!kɖ|JHq FEQXb})mQ_2g!BC~XjkB*XuO||ƺPwV|:8>L<Ҧ 0x:u)xk#zQ5hO15ڹ/Ɓp5$j>HWE! *r~! 9qMڌ:U0b gEQ^N"c3ҡ OL5r֍a$VqV`vϩd$؈%`P"]qV2s]m$b{"Y$0*J۝6jY@kL\TQ%s*V;̥#vf<.Bm_{5/})HL1:_<㻥BZjwlcrL8`(B@݀HA!8)@21?8U|Q^ p`;?SE0Q͓o FX6VLgSKleU6M!stf#l&bgQpP|8")Sv( 8X,޹ˎ:]ݓ -= OFGOٶ-%}X><7DtF#M<^KFy7B,N?\`mUJ=׬DG a0\q5 /P7} J37ffksL,_\ \ BV7iM|SjGK|) bZd}#c[vZz s9G`C.R' E] 6nG39ӈ:ކG3},KHAW*Z4~k2e7B̞#ml_6hd`7Y/yM;6!/K$¯GF;RAV5/g8лv~:'7ŵ694Vg1ӵId$oG&ͬ>]<2 +wO"E%Cz.]wWFI/'ײ__kaniqmy| †SE,굔Ki? ,Ϗse*k E~BVn!pԉr͖ #D7Bqx739U\B0qو3XDUB n`{lyY%Zs"Bqzc}y,)[O\}bklDmav\[dX|^7lY t"0+`Kp|ډ*-4U{^@mE;b#q,DB'8\ɞ1M3Γ+q"j9ۇU;4 jD:Q]- -D lכ]L+:Y0TF8W>rS%~S}F mDڟ$y`LJx'e!8zg{HvĖ %s!{uCδ ?F-gy.b&ws`+CYT:5qw"yZ_LPG,;[>r < YF).FF/Sۼ* ϨV&~"8,#Mٺ/ tdބ]~Izayҽ+0 .n_1)6TyR~dS-Ɍ|[~G >rδۘEnb1f2{B9kp03iO"oӶ/(\" C.jC]put*၅ O{\IiNh~l}K%فCU{0}HH+&tZRf~@:MV?MMg#:L+ 8xa}ƈ 5Z5{ Mu,,]PMKFQ8p+A!p2CO%dFۏȹ[57`x1;=mƐ]3_M0^3.;}U}QU:mj C>>5JQ"h,m gcӢ~nAΌlFws!ky T}8t#*䝥2@1/8ʼrF6X\uMV=`st^yu$kv: W<[x%(x8a#4#z8 3ž)p|c_!Գ1#\\syevqꛖ1"Go_6E6m3Ayv\i25jiAdGs%Op|BW&R&I3d%\6"1n踹S1a) A};DX A4j OqJcEס<b$?zwNAU⋦, :ъlIR6$]~(͍ Hn@+LU)濣g 2\U[834ISG1#HZ(L9uoG .thb~5 Ы|C"b4$Z# 2Б8l0lw@=l+ڪjSiG8|I*Wݍe D g#Ty^$pw[/Q. U Ф.yy9 H*AЏǏ!K^Ͷd^kvgG6]HĚ[6л{=5YT)Hf'CcHi@Z-͐wwa7<}Σ*k`fF~$MDzy:>6F5}r rD]?jթ\">?4zeY cclI+d$fB #˩KІf^*oFW`3pPh'_o%#sӟ!q:*E*Xh󄊋:x>G3 /ETpe)@Y ;(Ln ql`Pol((`no$I&DzoeV;g^E&m0iYK!P}3R>6bXbH)z7>Dw -~r!9|{"/F ؊LأCғ7~n\'kfmJ/v%0WE o/ 6KwS_cfV<'N*1 qRkzae/*Yȁґ8-d >C!y_il6ru9ۗ2s| KqjetA|Y?%ذI`vK5$-Vz %6}<-MAsx oLnui(gA? cV" |rY\`FCBlF/ԲgNz9L%&¹u{Ṙ8qUtWMZ4'^C#Kb|L;r2 / g>G=,yQ}1Pk"\+opQʆ()uU58cl,:nr9O֘dn#қdX:huB7qf"aJ #X?H";L33Y^?6֦QŮ:lD-3TɃ3ܭUAT(핟hZqH(%gUrZA{)bUd@/? ULˏ d#XP0Ui>#+8?)]ָV"uQˆ}زy(J.DyF5p`R] 2CX M۾$=_}{q ʫ#r(  ȁ py+W.uzmfCtVІf[Q>`̂yk\r@D6Bp&SNȃa¶66Ea|Qw3kR+ s`H5kSA-ݟj%p *yFF=% j-v,uu*)KZw~Hlt*c>ܜ6sc3ZZiR\̢%o5:x*5zFC#aY >dFQ$50 -Ď2} kPA1τ])T f3P"KC[ښCt>\< Z">-qMwNijQ&.Ps]k.3Olx1P9~SSBDcSxgQEO E3юY~(Va8?Gϝpޞ5IC_(7<>xQF`KMoK OLNljŸt"d.8ås{;[/CL0^׶l'CTNpdJn2C=ϟ˛)[5?KLHUp2"7|QSmzn6:BszV]{*Qg<@_ŷU6DQZ_3ޠq0'BAEVܐoQsƭjYLҊ͓Vjm& "*P^M vU+zgj䋆@ֳhPW-u-b$X;0-4qq~FG _dn5'O-O9I6iܭz*)'3 /RqP$ƦbOFO);YYF=dW?6fM3naRڧ^2ɀ|d/Vulֻ͇ĞӥW 6$ gTrbsݲgullc_P&ܒGaLn}zsC;Fh p endstream endobj 620 0 obj << /Length1 1688 /Length2 9894 /Length3 0 /Length 10979 /Filter /FlateDecode >> stream xڍP\.%h[%h 4Ґ-w ' !ᑙ3sWWF]M,uad T49@ 7;ȅNG q%FÜ!PH '4N Ptpr88@(CG@`gt:)G'O)>LNAA~?`\lO-@-G B0ظ8 qpa֢Lw @ -vƎNж8)rrq'= u~pZa-ew@pYX8:85 b*x@P߆ {g'b22r@VBzj-`'gvg9~y:e>i ltN XAVtuЁB޸2y#x@ ^ _o'G'S`_ r\``_+sr,!.s5O'1O4|`|'^P{/˟txqظx9~^ UUjاSOn͟`w,U'҂p xf.-H5.OWq|Y% .=Z}gYRba'Y^2{ }8=m?T2P G` OtxyޜOh v˓ =_# DxE">?H!78dF\Co$#'˧k9,9DV!OR9A9 >U|/_)?)c =]O `¬pm}pm;ΗWSt;ziLl N{ldkkO[2W?[Q5&'tYqC᜿ܙU-f!s൏HTiC@ i/ر|&Y5XY!lj6lߴ~_pM310}5`ٞ"qkw |2]SSڶi w YׂdѴ3r[1Mw12kom?JT +DGkFs%^P WbʼnqPZKS>uZT^ /`c//6fd޽0Yֵ?t vMR=S51Dt@c ?|=~҂@,X7G[ ;(TܽZy8p܇XE L:C  z;~|1$­5IbU 7KLUyȖ;gs[lr}Z' 2giKTf ]nS}ƟVSVtyoHڵw|v%11!VzV,=լ N.j4JŸQ*jUnFWN%̞tTObię<}RE8h1m+v#Ldܱ|\1D~LbERrSt M62ˮ+-U/( |2{u -ƪK6d!q-5MQQ^wӹشPclv8mȢEB߫$TuN 6,OHG[ ̏kILU!C JJ%"x(|L#Ø@ ;`, S-d1}zwxrxҔB&br[yΜa`DXL;3-јj){Qlo&huQs}"U"akD"v5<QfwtXe)|Ƙ *#A;NؤAZ4Uq;/|fnD;j7\Cj Uͮ_p4(/席) w`X,ۏ8rLٸrL}GGo}Šy5Hf.qR+4QHWǨٍxTT0OKLc":v$ѡZy"4x?k F Uan>"|r .oCD7cHj/ݒGv}9+$j𞥤'dܯ_]0qt❝Tٌ+J g4t Ly22(8LU YҞ$)O4=r{iB,i:5123rQwsI- {_ WUa}U Ejͨ| E7C3уCUվ.zFԴ36(65詿YJc\5,>8n-6QZ sP: Wo=VNPi+#wZaHx^ c0PcYx~ʽypT(0>)(bޚ~x \ZbkI0ɬ&Ums|ޭKܔ nK'̌CJ\QEԜF],fr71}  ᜊLd5ISFb2N117xF5w>,P~U2ȫai5X}7y=:9S%PnC$y1zC;ũ{d~ H?_*\-@ADyvb=Nc?7\)iL9S}zK̄-į,;#pZxH@#.( VrZ#|Neڊq|Zz_g-N$$@1!E|x%/@ɗV bӶ. BQ ry9(p#M|HҰDX`׀_7մ vx*|A*˩v:ID8 [SE) EpU.V:SmMpz &mţv' }%Ş%ĔZJLg3\xTSⶆOCs_j>}>kZxa;vp1r!&qڑ4(ɑ {Kd !b*C%N𥥴׳w_{m!>j{2Zm^qC4M\֧|Kc4x=2Nl?kP3&vh|eD+AkWz&c}2~T=.$9 1#I i]E]4q/}?vw/hp 9hBDЊ?A~*̈́}HzD޺p(KX}%MzX;9 m(m[5% %Y&dԴb1-l('oSzڞ&Ox&F;P:gE5b0ȿ|CM>볋/H{ Ə_- saIAj详 .!yݑ(򂻯U\jDX=42;f ,!,vk'@SNG'AmDR)N ޭhpٳє;,j붏:nE?#sX^hy{Y*Lȧ)z=Ҹ<氺@Bwu۞и򕺔oò)+Bap\rBmT'Pê--l0֦WLplȟ[J6.]r Ļ%MQ)5{PKeIF~z"3GhᾔǀcWD5_I\U<-0ƪo{.kd?JGO99SXsq]P>_YN/*mj%оii}0@®cꮺNS.nǓ<* gC Zx?bE=C-1:H]+]:5w)P:pZ~Ls^TOh㖮VZA0'"QEcnl;*8w)Ř0@EIYmyF \rfc澅t2e>DkGPI}^Uhv4g!}f KFg".s  =k +YN8H$~_˘jʔ ӛ-:YJ ba j;5 @, ΅;5Wp}HPh/tj&c4y[4tq=(H"9z5g6/?StUa8o8!+'g_p:zee3. rFwPn\#v/Ij <Í Ir׺T"ȑrGP3$z&F*s K%[WxWo b7jvIpꭌb\b[wH:$P7EgsB6}?7o="]2!}͈Gr8kb^&tbFa,u0hl0܈4rTBlɛ38@tsЦ^ZmqR0 >a,~\h%Ŵ++'uŋl^\ oJP7 3Q7[iy_z^n|yfE^Y&̷rӟkZnXgر2`Mn/s3tiؠoO+JM-JeΥډ`ݮA4=Z%SeZFDy(2S8?ێ?h r'j>,\kMӽT"^%"^@WȂee~fݏ(͚k07گwSXFh@_ſȂv9%FCYH|(L-zuhа(z}rBzVv]5J|}O}'|KܨiA #ur/V17~^5n!w&Ņ AIeyOBT)p)Sn1F#r@~k0s[VJETC}!@DAA攷TbXZ,.˺6كًy@ڀ]{;.Lz ^"Rnm%V<%-EjLưN8c(1Tqh# }Bx2*Oiɽq/Ţdyb NM*'kU@irʭO{6DQ>J{_߱2*mM{Έ"gLH|74nHBd3{JH?)*a)UYijpUE3ұ_ct' g_7B9wl8P%fֺ?-apUB('w5Wf[l;8ȬG݂JOoj 򿜟d^he tFMwg䫮9-zb;cWS2јNR0 J# \WpYzMTR:.ȷ #_d4 jPJ'q޹zɒPA((1^zU1fq ݖw/"6=r+-ǙtG~T:"-/h'"=t/vp;KkƏ0@g28~~vfLu5ue+g?(}7l,QBDEsdVQW :x;`P9%'FdKǽ# OЙ// ph0B(3=NCrG4\P볆jq̐8e29 ]+}hIш'ꆝϜC^[YOC|T hMd/x5>l6 RkI.wNIg/4=;0^LscHm+%PDqX.ܢcĢYM,9eSw D&<]'KFmMޫ o|3o/{C=pERԌtLfza;3ʛwÇ˄:yĥl:Yr_*{O} ɪң 1Ӆ6[i Jrڣl6p}Y|LZo/ޥ7JbR7=Dcd$[E|J :#XE8On+wS0H< U }sg'd$qezspO, Ul,W!{LŨe^J8Wc rٛ XfAwFÔ8yXk8$Wlk7]"PS/lK,s{ ̾]#YĴXe(Jת}W'e[y6p*D(GƦYiS1]t8 o%SK2ba%X_saq2~byn'EMDSg䗲uTL__Ḙ'-Yg[d#qK?>xFRZBKԭׁSn74^@aK;={Qe3ȆϣSiQ[ >I;Xp AUmh:xjߟK$0̘HV ikRE7%']t(Zt3lന%75|'B˂ޥՖ\9 `?b<σw+œlBy*qׄ '6S|΀gS"pG->XJ_N!jՄ˿Zu.W,W82bZLhӥ,J tcO ִIIr C<rjƟmbL$SS$UfFIQT``c|5 hήe#jL̬lX00|>84FXsGVh$N_[մ̏a Fwlާ)֌O4aN!D~m $v8i-Jb[:dTlVolpJ}$<17Uxӫ>zW6Şؑ5ad%kB퉺,}nXnJ07jb#V*(Hr B$D|)K\<] Laɟ@V\jCAo)2Viw-(\'_΋{-ݳ#&$̲ dj_u4g58f{@cpQv4{]44 [Ν4K{tMGӧ6V+{X&4#9Y+=;z`Pg;.p/kC޽( 725 }#M_RQi~7T͋bP0>)d:}pO ̕ЕJL,|0; A1%V<īM;bzW&r'{en2pʍ~h#ѱz3hh̋ؠTi(LlbP;8M`l֊Kf?Wb "xHk%zYxx]:n3Z;Y,#gF\ɓZN"]!aqTɇBcKeRj$gؕ{|bʯ(y~Խ P}?8FfH3+|MϵuEK/'m[qtB1}>0# fu1E5n##7SڸphQt"ј0hBoɈk1/`pa~z{] 2i@ 2-L^d5a ;g`cF/euзS=臉q\k;M{Aet%m-GeWK2eT1`w+NGo{3zPE%/eEs*}Vfҳ!psJ=H7Sox_*_ JKPgXy'Ii\׉J,H'#_ *l9'd#rzEwh#=`ˀe;9 W4u7Cx&`+CsN7O"&=GTì {+yp Se8qc>r6[K/Dm?Ys`Y?3)`w.߬,Hl3v}{hh=H(FG>+}4[I ָN,iI \\g v|>d^}{R+A1oh1gHeujjQO\@J̐ṵ1iC{K<&א$5M0V&ChW?i*i~ue ȓ̩OoWۯ틆Dwp":y1M`δ_˜|,j6V?{= endstream endobj 622 0 obj << /Length1 1526 /Length2 7823 /Length3 0 /Length 8834 /Filter /FlateDecode >> stream xڍP-C`mp A !. !CGrνUT|{uWFC]j B`\iU-.''''7=یAruC! H,`O6 O (;x\\œnNN 5@0襡ޮ`[;2z0Y1$@`+ @frzZ ` , zzzrX8q@]mŘ`@ rY~7 Pp=@]jp `+)b r<-VT; U"׿;' lCl6`G@]NcX@-ݠO`G 'Ÿ-rn sp;n;.BNN  w}2`WӶ{:Y7Cm~7a ԅ]A2SLق`>NNN!>N m~ yj=`Yx0Ww?0`+d `'d~:|WI{\ߟ?< qU uTY>))  ;*8*iUl0;I 4YB;M[7mvYwؓUOS_>诙UYݝ׫xI㿷&YkaVv/!sC@P7[?ɲrx9ܞz^Rb=a| WW o '!q|FG  { <b>Q~AP-~A? |8Dv]b@?Sn4/n d8 m$dOcfYtmwAMf ^sL܃%t!D}S=jD;_wf ZS;_&&~JSSH|w r@ߩD.Q@x'U_u4|~G[2]4{n쫠Y|9RZ;%  A#R+~{u77sWuݺȌH)/F|vSH|J>5sp+eGa|Qa2 WFٿ\f| CA;~-&]M?Pޫ[I z@i8l=ٱ)3T" &a=ҷ~l^_;m d\{ q4fē j tdvK31C;mk%#q ~y;/kB}Ƌ)o#$Xj+?[l}l {OuV QJÖo%צtoDg3pcC=]{E*;Jn\o#`~Gl N *ǰ 9->-KP.wWKZ^;f3Pl>/1C%S␵??9]ɶ-kpKV;>=˯'5MOl=Y 86q:MFe'qQ-٬y1Jb-s+Eсwz:8fF%+ t˶EϖX9EeKY5R mEYWn=۲ 5gYÀL8n/LD_ڡv+-YoםH4.VK"sJcKx'TS&@0>uY14IwK9v.v\Gf-.i+@9P#'TKQR;d3޲Eku{Jxoƭ*O @;:Xz(U i응Y:9Z^t<vɕG,& 5:Gtӆ]mD_jd'5x377`EwvOCu XNJvZZ0m\[U,Nh7/ѣ:z64 E_вG!4;Rs fgT5c<wø,=R 7AFN_f.E`Qhl#W eԛLonYzsQ׹1 7r# |81$2hsP-k V$wX/ 7E tl4^/YP-#"`Z2Rf'GE jJÊs,7 /A)!35hN'Hk _);벙i>ϰRlEB)2J'\dB \|Vr 1ke&="[iZ lcv-Yzpy+/j5\]wlr'އ/egn_ s]f0ԑ$OwZ/)$5{`y1MTR$uwȼe߁r7`DO }[Wh%Nih<}c#2~ l"OL}iӂ ni;NsЭ0j F$'p$rnmjrnX.O5LZ)4,EpXARq}tߒxkP]-t" ~j) \VdV[']eb `Z^}6Kq1/vS-dEJ`;`F#ULa{-kIIVH(hb}sZ B2å[UjZ. ."#+) M|.ko]Vg-YbgS8U1uH-_ ۶B%y uEQtzjMӲSIJlg3n+@61(j" H(GLNt}Ť*BeC<ׂE"?2rz?n ;ơ؛Px4b^9u#?/D=|bf->~q%-v>?waNV%"\gSܛ@F}9g UxLsf76v$ ZVxçu5 &Yʘk/T6v/|0t܇m,KFck4s$ ^UK;& h~bsTՋÈ"cZhO%r;Ycmvh"[/|䈗@^N@ p'IۀS\:Cq}JQژ{WYŝLKv'Y}:]:xJ-BJQR'.CYӼ mȆXs\h4\5ZO0/Vl3Q?gY!c'tȘv-^/ug2C UgW#L}rpJȑF%enɐP79+Vu[r%Gviwǵ>A&IcBC*e/8C,Z h;9KSӨUi]HOӲ$^L}Ix0 {A$Y3`Xkk >.S~CZdv^+x] șs:,z$Qݶ@~)=c (Whh89+wɈoG׃ ʥ'A8}o&/VDMk4x\G Do(}_S꿤^OFWMe!!O}>vDK}"=e&g+" GfR~+4~"ZORaVS(3Ҙ+5Wg`XrFI:z6?_nr 7?J`DCAr}T6?M!hb,;;z':;LMjz鐅g71g7^n> XxfڗNE$,w'1:Ś 덅5-CieFq]*ھbQ!i<9\'P&a1ęBiμ7EvkueLp.~)[^f 짝'L 0Q58CFgtDL r\)hVmQ=-m5Rܹ^OA* ~a {KޞJ]:EzsS'ECC޲% h k!}33JbVoq!'FXg^*MNJ'(J#g %93,rԦh_-rĒv|J g"|v9,ײ1>ѕX#VyO+4孵֯_X^3tb <0&%zi 7I\s/X$)<V>VZopGOM+i)w̲ak=6yP1ά-3Vg6Qqgh[562 l5AeOXE*Ge‰"!10ק.D㗳?_A?7<,&uVo&{z‡Vn H󄌉"|hE?GlrPUlP@ ryF^Uxa)P+xR}E\#;S"lRKTT?AMߛZmΔJİTSRq"`U)y~AQ]T> stream xڌstmv&۞ضhv6n[mipY\{_P)29%]ؘYbrjjlVVfVVv**5+W[ /3$7vQn67??++Dg~@ `tAsprtE_)_;=@hhjl Pu0z w,,v.t+WK 4.`l8f*˿U@H`ke wٛU9#diw_Ʀv^Vs+[ @QRӕ`lohl7v7561@RD` ?:[90XP%.r:;xVfK1ssdQrrʈ!Y]\:,y9d-02~|\݁Wg7ߊEll3+SW who g+O.+h?͙僶 ?Eu098l<n>n:R2O"؛;/Q?@=/]{e5쿽vV^FTM࿷Xhff2Ơ63'V.V@3%+WSɿb0hLm@׉ h:VJ؛:9v.n+hع>l4zk, B?3qXD `X$ 6`@> O?T,*ȧ `Q@5Ax(?AA\A3Ai8m#co |Ǧ .PlS[hW[bg'3']Pf1@Y81 L@7cJ/nQ,x-~?S@[1K/GK_ //:- 92uуZinFBnc?ɀ_jP [Gc?=BڳϡphWr ?ꌓ+3`Ij9`Vwa/ "s̅K!6\jw?Yv}e,Χ9a,}XL|̣%5峸8{ǝL}(R5GIyw.Oh 4ELw#=DD_BLC۴Ƈև2ď@051¼#@3՜JZ7R$ P[Ƹٗ)hX=h8ڽ6qFF}*i3>*ٜ˰eFww' I]p&Mo L7Nr NG$$B@RH..ϑ.Y ̻[Ơ8Ϛ/?~ļihlRc c? #%U2v|RBc~xrz=}5fK ;=Lt\v!IA5 9L 50CpYID]Q$SPü(z8Q'tXRH]}j_ ԵN -Aa)> =űn,G >PVڗROlJN:ifozy,COKZ ?04'It{/*6e 6h,HV^ʶ-e%9O4NҕsD[?-$&yռ '/Zd!nfs|[,eA+ǓWk_P.H7_}m"WSQ^L%IY~eI>:|eà22n(V,F.q5b~- >u3 XG9 nA)X'Yiyp|`I7sen tC2fz.vW1*NCb>J3%NNYwYƭ >C 8GSg*Khr~dJw GD-5}ļZV]z3Fs:@<|LJT| E;kW<;׬zV렋i>]Qgb憶/؆%Bq~JwIgĶR5]33rvCSc^uwοCh&Z0{4\Qll;pdc^n?mў d½N}hW3/ T;n M/GZ(tXR gNڗ!gd_u4B/vli_PUkJVwv^ }xwvN yʆC>u% ^J+"$yd.R i,.룖%ZP дH ף!(rb5?qCʡ'PT*z`&9h nl^_m|h1"h4fѽPp zzvF4$W~uLoZڌ#ԘY-0\~4|hֽq}G'wY o~;G?[ha[͒"fDwC.|Ќyw'<}!IN;pW={9}u(u^Hl7g|p1s{RUP~"^ŝO1!؅t0$IrWvA70Z.yJZ9јTKܮ~. +E\[iR~,pSזI\=΂gPdP4e*)hAɑj:lae(gTw ̙P,uF~j<P Q_di#n:u ȇnVt"SY[5̻нA&шH !a~0|<>]#JV}cEݼ9|:%;U0 ܙGWݛ LD,yُp0"[z8S:o!ޓfI&H>!Ja (sfXF?sv'Cr,u 1]1Aos@|cmW<h+j*ؤ nd]" '?,4}\ҳi͝j2X{gWe.cW_G>=τ3tDE&2HPٌ;BjUOc'oK>w5G, R=_/uQ >@!de iVh$—iBꚸl%›<90Q&#mX[XP>ڮF),ê%Q$oҫ+W\T˨ D4Cvx䷲Gm.VB/F 2Gb ;a!z>d9G-V\o_'EժPOjX] w"ADtCC)U?չevBO%6p530GH.iҺ\Pr=7hƕdŁ QTr}4*Œk%a"[vrMsE8U=R!&9(xp_;/P։g{EQ#| %4(4o[}bACbx ~ua-뻀8+/W7EI7ԭnV+X<6*繣[88D]$ID%m?R& M2V2鱛m]nzdlq"%0=`{.4v$°/x 2%m(3ku^)O_#MPgS{ǝk}+? ePWez5J/&pL7hk᯸E݁쐮,#ݬ(3GZHďmħ +^9,tB-8&m@Q{-k]9D(1 b{S8,e/ycվ/iB3; ?7R `5;56;+``K|ԃVrN:\t97,ƤS{r@(!NF-b#9ىV^l>juf>I&^A !)*jbzh~1$I|izcVQLu 3 3ƄwF®:[ѭ=ڱG rtMFgC5b( N+9pNR:جRZ]|z`[P^EmrB CƷ Ss$#ݺ]v4n{3/6q[n2R?૕~C~;}pec#л%ez"8 X2^\)=gӋR*(v"9.4W4(C ltI|˒ʊ{ ]p=A;Jڴ/`3i|ݚ=3Ȼ\ʛ1?K7lkCZ~2NSׄڱByK*]>&A+YbY漺! |}(ay]VX\c:^v7HDeme3A>|o&Qp)a ?b\>Sr~Ok ӗ fS'}^lmGumHYf43#ªE'O9X`R5<_鉎>IS0fJ2Ʊ<qU(z؆r%o})u:g;RLkzVn,4C75NM~ƒW[˷P\`!,}'Ӗߥf6 #EyI$Ea n!JV=6X_=R% h̔sv Xlą*lՒnCs4 [ҭн׼'__p)]~ ^(f>+BV=Ȓ9JͳwMʧq 󨑶O9ORV(;wC&NpT:XO-Ni(K[UP{U{ 4j30sz{{#hۓ2Flg 6Jٝ}_ru7ըҊmSmt͖Ȝi5 C1)q0wɍ:S?6ɫ 4N12?uAxdJ®W cm'qBǻ/?\͑PQmA7Z>Fa-l!Öa&.PI3e4tmbOYV@%J]@KL1>Ruڜc* :+[TBĔ1 v/28 cWHKT_ ,((bEҴLwEiM0 l^WJkkƎzĹ4vH >:I2 ]'m tf jzX˥y-JvƢ>2|{4)b ] ?5^g݌ #zQ؜!4P u{4PVs(+Azlj?oeCY4P)ܼ?J?Z__H\zXWD*_"S<%pmxhy7zWHQE퐆llxp1{u9juD.ϪUxHKxp"6=0ȳ<5TS5Csˌ2h{Hp3=+}I+GJFWfg􎷢hZbr?٠B? T8a;w&ٶ]%,{ K];Yɘ`zTQVOLUzsw*j޲b~՚&%c`{A!lb1U6i$E^4fF0xU NO Iu kݕB$j $^[bv%id&0}Yyb}o '}jteEg Ɩ 0T$! \u֩oUj܍xiХop5łX~ "Ds}wJ'"/C+GvN$1d<-y㰝\iyf͔p( Ȅ7|{r 7љ#SkiZ7KOowtM1nVK$Ma*]ȣ"gpF"c4?`5)}㊽ IEY#pn3ݖZոw9{~e \_ĬU=]!RGsSr+e %wqN(f~vCb.NXTun) bL:b Yrt{WLpjt Kʪao>":S.Z6K(ZEQ*KڢU&8dTa j|O,P;N)ǫmV{3FZ4}'9jjnOE-Tۛ֬+oRh1U 7م {s  <Yp,b8fDd,ѩ ]r=CIM9ƫJPIK^?fE׹dff2J7cV·S3 #^c3`ݖn[Q>S@gjLIĔ|_HRۊl7YԻš0$~N vGV8_thD<(_ѺbŭTi77̾`%=<-I߼rJO+u2=W0`^}nVZ"$}0CT>G47oiòApÞCrmwa(ӷ Dx \qX% IdKQ/QȄƆ䡱/onl8j31, OSi(\a;~1=+GX;'ޭ(i2Bzn tv.Ii6~,"- XcYyZI^Aݖ/˪nZ!jP|(Yr)c6g#чKVJ|muՒQa"/<[T(M=I OT8n4Hݽzʺ'yp`'C֧`Ah/8}5L* 9x`O;t50Yʜm[ Cc)U 1Ӑܝ#Y!&tIqFk\`KAoOK˻Ў\k8W;F4M}鶍ךMQd~``+ 8rRPց$Kϴn2}Ey{_;R-$v/;*VA039&iIf}ZXxz'aH$)G:-)w xnnvݤsaq~I}901zoߩS*1yrj}ҕݶ@3l]S,?>ʎv 1uZ-k Цhyu7?Aͅi( ^9!N҆mwH37G"hnĶv},pHu.wФЅ]{`S?r1T{RKoewR-G}N*cG Ӹ܍it ~ge+ڮt@%(N Y6TpcxPT` _>lg͵'Rr/^|(7 )<)X :\B"Ь|XY|ӳ[$h~RyE?X)v9}te y< m2r>;JcH:Y?t>bdD 77}aQd>5ɣ;M'sg#O[u~䶈M=?ڹw C>iu9ҵ% ٳ:8kTG/QĊ&ïu6s-m}2u'TZS: eb%&9˞hxa6 >֕m&UEǴ@Fz^p.` !que>s;1{_<&jS9vȃ!@"+ߣdqogFoϜuq;P79MXN?̾--_1z#SFR? on͢7LvEFNN2GIgpOu8ui.Nx~x:VtY?^~>d{H]Yxj)a:] }`Pr{}|%qe9cMZwEo݌<%c C;b'r:aR($Ͽ,ҝ-|O:}ӌ y .ph/wړ6B/$Pp~שU%s% JHj^AGE+BWN"42zxYSQ%sKr{{if'}AuOPwCU T_=_i?c^j &p(/xŠxO\L"9xv9+IrQؼy)(Ai|qF}BHzUӧs؂bHz&A5(zv 3~mjTo!D_8K>n\'3M^\}nf{{(WeEQdEIk^.DŽj,LSb6W= S T`k41:bNM,G c Sh[ AjΡ5A4Ɛb?e`LJ?'mZKwn ~wW:tv?/#L O~SNU WrtV>h/W"s`[@ZUC JlI35DCj XH'P"X$~A͟: ͗j+Z4ٷџ{%}GbG|bv>dKs@%Rw5Q~idZLէ,wG2g! ' "%ȱۢ(xsFyYk,zygyTs|#yM4dz)*ߎ8 .85>@>eSmR1Ha#^^;@m;IDfDdV>1NWijY[k %z,x(~nt_QQRAHt!BC[]= eıa$x6B YhvRB}[ _X9? ٜ9 oȞ~{k˭yZ.yFUg+?VY2H4|x77'0S/"?Ag|4~}*|hIsDb$=x6l=ܽMfcW<Œ.͝xQ*_nАjԙՂw ǔBKEϒ]L}] nrBbP!6sz[zO28[>>+:lΏkjVV?IEY ҇iN<r!{}o$.1:i&JI!noPmPn *e~q080)'bAbI؀'r:CDs"={j\r 4; 󅕓km+^ xK&B(1^ɦJ 8 N: Ȭ5!tS:ZַŷW^bPI=tl:;Өh:JI >9_`-eaRgCi%b Ia( Cq`[e~-pMy.(V`؎sMyx +Bt)q=&e ۹?[3q4`S6a.'A?JٸC 7Ax]zU0ˋ)bt!zG 2FibeJo^j,jY[; _L"ZJle9he 3EĚ KpF7 "ς+dbkrƔbOKN'tzJڛ"#* ۲1 ~J;_xDy-o=0D)u-;N>V>>ƑH1CN^lIk4//ûJiLZb~(@ R@hQ0ntnfWJ^9vb22FuTn Cw}KEU"SX7$tl\4*ތ@tE<1Z„!C-1e޽HpN$$ '?/5 ` fEv]Py(G nL:Śvq1D?^__ٍMVPȱpVl0[tHBY[iYKyk=_`Ud+1`>6SnOe!<:^޻p-=u"с^G 7_{Bu'UVۤi=}ָ9n#ՙJSM v˶H kpH|!N[;U-7ugv#?ƿ'%6`c1|dPJ,?vQ?qnbI|oEMƎ-)m\9\;H wrpUj\57)yU%KK [ ݢEٝ%}и5dBۇs*4{>2םh 3I 1?Jer'w'CM_F}& o2Gf4Kө/5 eX{5k$wyGKhͅG?V{km/ Hn٤*Yu/wWM%1dd霉$H}[j RU?yh^N1DPG` nhb!=6*V&5;vD `Ơ=7Y#.c|G%:[m-YV,K9AT*!]2%;Dh^`'r2}}먪bupH6ONY~ zΈ0S+NЁ 2myG_6y?o_ڌk0Ґg&L.>aY-`'TSeI=p$ A[6sa3k&3h@ZFP ,i?--\k:V4gGZ (>BmE* &4j+K"oHJsh<\ 8Ewٝ/fZqZe(s]򬹅rhB8a9LȪ_6KmC1mA%Isf(QֺG.&sH>JvB9:PF&:k$^GI& {l:-P5x mRx1OVUf5vYg+W%eWdĜvKCY/"X4Z&E|$YW<#q{>wΰ(ﮰ L{`𫍲7s4kV(t7t،Z]79/g >-^)yҘ9XjdW%}q3Lu2+Mmu"Lq8%e'g)mbAuomTSI%yXbgG2 pf'ݩ ΅ Ш /j|Mȃa5 9rS^1'?XeyuzLo[UK'-Yi#爛.n~OP5^wy\H4-CB|/`9)E<4⌖͋ `.YϣeN2*YDrLb4E\ <0 4aAm&Q]8oo<90 endstream endobj 626 0 obj << /Length1 1608 /Length2 7875 /Length3 0 /Length 8911 /Filter /FlateDecode >> stream xڍTk6 C7HJt 03 ] !H7H+4( ݍHw7~9=}Ykڽ}{a搲YaP7'P @ GŠ vC`P?,d\ ĽL7TAJn^1dAn+*'@ ñd`N.[}>̖,naaA)G ` 6FxWf1[Iݝ焹H![vq[~ P9nc ІY#A.`b ]\V`}v @ X/vpsr'޿AA0G'XCuyNZ29a 7dqot@^J.'q#ׯ0cZPW}=>\{(@a qv+ms/WfF@v=,m~%tVrsX߷X_Xppqzo X"`b_] #=_Lf:xk^K077 |;w*BaῪ?M߱`1hy.$Hᷞ/q➸%PݯMm* ZE~6$.[i@/_5`pȯ ~,=))堖0_k/<\xs7\Ppߞ/Lx\ZD /pY Y\?}`?}d=_Ȱ?}d? xp!B.?}^hr&/.0l5= RҝcrsDAW8?q2Vagis2d Jֽ㶌$l\׊Gnq Fa3(BIgnj7q"λ^ _78 i(RYȑ4ˈ1.F):/o#!1ҟ/=WbRdtw%U( %"'R{q\RQL$w)+fljffJK}mڄ*QB"0ywq ;<`[jqtc$~Q)jfb BWQc< hy@PTU~r\k> -HTFVI\ {硓we+Sxӌ+ޗQ<$5yVxaڄ,N,q-}*M,}}:2ekie,p ` j" Sgw*bf8eN{܈6lۛC-KO6vXo=, 7u_:I%0E1>7YΩ .QcXleTةOdYf%-s8!M4ىGSJO9YL.ۚ|FR6!kvpc^h|yh*euY;Aiqfe*.ĉv0e ))'y9B z9,VWQmqTq^/q,gR@7_1$1zZNk|ZQ-ԝLH؊6FGߩug{ wL5*l-ߌ-, >?e.cAij9I+kD8Pq 'HְnMխ r ƪt7&%a[vє+&|DS$1)%u'! z,IGM$ yW%`VDI^Gզ滋 X1k_Df'9dhR G#(?sݦY;Rm 35rvb9^Lpx,I~JmK8[ZP %4m!Kɲj!_-ݧd6~gD,K|ݽ"8| 0z-.:aڄӱU r`r~%R0@Ӝ"~N U'ą-2̽ԈNLN.C:׉jQ7;vZ!,pF9^:dAV^0؝DfNH' 5)9;>_ҕ6RE0`ɥ"|<&)V9sAzv=+ QO#PzėNg$FMRsgBț.1Һ]$ N(u_ y >y?џiz~2HK/GfxVȑUkoi#(uʘvIhX8GգR}]MENR!ķ3v$U^%2/o64 gOgŴْ~5:" ? 9FfHNј6.>DJ}vGu֮R3sErL9|3dc?S?*#5t6}K&ƲK^xz(Rst>ΫInQEafA#4 KNVoAy6oӈ-O+cNz7S@ x1MenM)O0tTb?x.]SPH?Јv@†ػG$n_6/{7_L߮M6VHo968dEo-L7ѯN\ s}27%10f _|<,I%Kn2@1>Բuzb^tU Չ˿h>Ym(In%Aˍ{s=\X{+A}9>tNj}Xk8SJtS8RELs4Pb$AiKEb]UCe=Kv~J Ywa)x`AEU׮a`hz {Hf@,$Nx @wK%l/w5pĊ)Gԑsn:p)Cb7!K`&N`FErd|9^ 6~ʇϛދ% A>˴ƐL@R#9N2EJ9h>ퟄK<g~UTBFdQL+r x_ߡ䚤Athw ,bJ>q&0̛L0׏#\`5WNBZvDw[8=gAŦ*,ѲC(snHhG=|{͛[޸Ќ5*qYh .a_O:V1"kޯT.3=(.*f[dnz$Dx3 Zj];q1qH\ (ju c\ЭP&?P`Q1Bs)#b酞~˫kY& LE(:.VISQ;CU bg"SnwI 5Q.gZ/JVv :Q9!w Ԍn.?<̽{ޖwǀLMcCS55k|%jW2ci,*K658AǨWA:1_FLI;a^&M|]{Q#ß΂mZ&?ҝ5fYHSjfouKPn,ND#žZO=tq&bq)74LnG$/WBk8ʇoʒ oS[)3,sJQ/Q J&b;59>"[N9ִZjs8;MيRYy{ 7kkSlM˂+rmGs>xM B:@5rdLN :"[j!3HQ75y\XfE*FC&ښ2Lǘk,׳KS%ܠF Yb5BG0Uc$(a. !R` v L*JX$i< >\C?^@/Ѕf<Wdmp ;율e!ҸPeTEjyL9,\ |X+k'/? |*[ 'ǓC>Ox}ZTW\)ϺQe?l<7Cuc=k?WAC:9nAPK:u òO_:fu`sPQ\廃WCL{~S ~ԩjphN Q۪U:ͣ쎑~dƎ"/j` ;`b|CG³Q :7r4 )Zw,دKYE뵆Oi/+l/ƾj0M36ӧkZe8_g4R|k,cWbԜb 1KEDjc4\>]"p;c|Kwʁ&=˞rZSE*B,r9Z;hk'M m<:>N[,SԔlɝddz<"V5NDKE&8ُ]f]&'JnvXKpl>I/aZ ‘q'Μ|WrAx6׏6258Kam"IMYZx'i3pZ%U#k)zGQ%e/(N*xF^8{ݢ s{Jziq-Ɂ [U 0+WB [uL-\dV/=x%R/ɞ|EyۣP / nPLv({aجhK5Ky:כ8}_նCe~]fOL*Mm\)2>< ;~LoAGhWw7[D(ŧQlUUev!4?%03<.%Bz(~FQMX>d 5({|?&4™['k@HбcV]cዉר__gJVOu~ۆ-F?76{Ҹם\WFةy+$_Zp+$Oo0i+K뗈B-7w·Rj9LJ2AçSR9tw_޸?Ѵ]_[S^la)É?CdpVzAf"m?j g6L~BiD'kj?Ƿ1LפP`[飼Yܐe(;^vUK耞,,U 2-"uDf:v[ ZGl|MT4 \:y åoO) r\ Rppy>gKK!s~ʂ\TsSVDيdBzݼkiWd}]I5ޜj/b >Xu04Ιye1 оL`">{4)w Kze6TJB1 2-q'81h3Gr :'ζΟMH3/}G 3Җl(R\2"? 9#d:1ʔ;Gt=|A\PX6Ssf6YL1@]Ҝkc( 굸ֺZzn0Rׄ$1_L[y2$oڎ[%iUbqSy$׳d..Y[a];|^Vn"描Pm,o?.04"%g  MNj>iе?M= v_ ։(8ȕ2s<81 S3Sf-cUU C7ή| y2ň|;; "8M<|qC؅f~,k#UdLccr+ EjEzf^gЦGƯ_pRpVG,XT!"9VxFn6+sS>F2&*0z$3gVћpOtR+qWrZa*m׀Ї,Z[.á}NKpUP-z父H?CɸVDG-xBVs5K|,:_ګwHU\q,`_ַuPFeTc&WO w=L'æs54%>6;4J.ݸRU HLWaZw6W?M=0Tցv?Gٔo{) ??JHvD^Yg)V(WOAs]OG=or yb0;Ìݮ.5M|rѱz7diԑRv޾Ta9\rz.3`АNu_9EVrT Xqr Y.1,[lD8.]oM,hj6^HCXC-Oi2\j*EBUQxV.%s+ ,Ԩ=㫯[BK{o"Mm>MM֦ a] +^ǑU_WkR$^.54 潼Z1Dя! +hM [ʧ{UE_moun hd^E׃!|WJN TA6ud07㇮$^H"z\RSB0Xi-u.UUn;Ӷ(vCW6頑;ƽ5dw\7Ud+9N惫_g8>ۑTq5݀#M+rh`غ%TBeID]ӀY}TIBIs$oHπmn*P~gGB#}Sѷlט +R13(D]YT+tLsKt'wbC9Co LUBmũ\q\|£=T.ETTmu endstream endobj 628 0 obj << /Length1 1413 /Length2 6039 /Length3 0 /Length 7006 /Filter /FlateDecode >> stream xڍxT}? @] "1! )]Jt+)] % Syy߳s\}}vad*A:B50PDLoj Issprbn ' D'Ah>%@)Y@\LLoE,@ Ew([ svAc22B*PO.PwLD00EaP?\ɻ>>>" wY_CL(7U24bn 0E:}@PF z0:zC(⏲!_Er/G0oct!`g jꉠ}BKG!1 o r(NT10U @DP0EYQCChaP0~ A}r! Nʀxy#`:`D9CI1))I  ` cj @z0e@`NPq =A D  00u!#:9c ؈az0kX U@UU/ @X\ ,#%RiiI@?`:'$@O>_kAe0 7I1o3XJH Qp; ^h#1oUKՇB`^AmPA8c- -"v҄B!F04k!Q_7 JL0̒0 CC#!M\R cf9I@} *@1&LA''bn2Q0  D=0SBB0 ׿{yzb$$CP0<,"NG8jwl@$=F Q_.gj4UrALΓҏc)RYlpmx,"{%o>FmݓJ)82 *{؏9V#)>U2y:YP3qSpztVeŞ8n}9}:d MMiё<Ci!!}O*H 6),5֒,0řd!A^dFLo +ˀZ6R%dj' a wB.MP7O_i.XTNC]dX^նLr`{ '~:k:*RiM ^:uo0%~_oƖvӷscj-!ΈxUsdڇAXsA^Y+gSWͅP&#TT 25!<2$1_P"X.Np%wt{6\Z(iy|ng;) ӼHGQ+C~*ȞÇ{[coFR['͍[i]ۋݬi@jg}giwy{:;, YMr4OJےfϾg5֣6q>g,p\Z3$ 'U,Fx;!&.J,``MJ 7JD/f_|bbB=k)39kKV%Z`z/_@48ݧw|q ͠S-U{k8Y,O Uk @6<^n/OUmHgKϫW}(1^V}ksFZB n 1#٬Phy]|KE!g137o:bn;Q1b$9,:~4iS}U۰Hy/[;<*J~و7ѥ`9AFN2}=4io KF@Tкy3TIcXO wڣL~ƥmbҳ3dDZ$Ͼ?>=u Twy&Q1;bJ܉Woy~se[0۝zp1W ~T)ϛRÅ>!5-oa &|c[}dG[O'wI-6Wkkv9fC@ثՇS t]ErIW^vX*'R KވlušN j[QqࠀaLG,90`Єbxiq\@%AE֑ 7kq3O6&{@9gJsB$'"xZ TO^2|dSѫd6OWr% 2{7dH(隴S<ԇ߮\yS&rԔ8J|83ǥFKL6[dvJԸ],LZKy91(z9~==hd+b=;/MHE\=7N <(|()̽ALm? @ Fe{7Y?u^6-~FQPoEKnf`d@sA~[5!sǭa`ܤ7{DV<=9z6vښ"|wTGk4߼XI{+ZsJ^Ĺ҆D-5xnqjI6T_Ƨ nmY}^*K:1[/{(| ୼=e(a˨:b}м?w/"-VmabqLL FW4^e\hUHo{ՅwOh_`&Y#b@INX>n.҄I0΄0.1օU}J*Xӭ;=%~f9 }MCW90-lϹ!,Fm9X=e + *pX&~srNQҵ@׵]HRX73IN.\]DE0yy+?eݵ^J hHz `VGߙc*F)k5Ѓɝ]c y@;j0޽z=؇g^]A4"ύ[`,L/FPؒ~"VI8,3?o`CnUUcq bZYWĶ$*,w#fh[zT+~*;{+v&3p[oR=%m|J ftP zZ%.I6&e$( EȎf93pۨXq&$o=[JCf1qrx7٨\WxWe&Xl͘vG!ױFq;]#[%@v~^=ElO|}*ծ/&GW^.On"c~t6O? YQ;)T6qs,dxv:>'%L vѮJ|-X;|ʋrI]}<: >J>|'Tƃþ̸ݗ R\ƣU,E2񗮶TS +ŻEJ! 7 9l?w+D]bj&]> !bN݃< H){ȸM)Ek„;wrs㲽WK{ vj-VKѡk|2z]";ܸIfv,j9鍦',fi5SLAQ!Y쮛/'?S~nk{WU Zy.DPBܝ`t鬭 cuzJf2NciP[$aլwcfBl4*x$(_SQdqK#݉(n4X_9! ̜(F ZG89ˡ֦l'ڬV!;F2!'\o2O,ȵ0d4\Haq'XsLdbM?4wI7lXC@R.8~p>xOvTtN}/6ns4nފ-rj@G7Wl/r* ,k<;L'.Ջs)Nm_@JJѮHLzKw" U=(.ԥ\߈^?p)vOj!]hyTU]ޖ>@JXKC>X 8eةQNO_wT1~+릅:S ]#2yρj#5+u\G^-SZ Gԧ.ߝ٦ΊOSZ||>vFj.>HS ls% }ge[R^{@ͯ;lv*DCk3,7I}2k]6*y \9.\(zmN1De߁o R9 YR8fC[J햟6oRӖd:JW9x|ڏ+e Cp6vccorf_zr(.I|kL|j7<.[80p8Iyo#=B3U穔k뎂\η\s,o^zx铴(+By^>ȂĢM/6YCE=L._Vr27ygi@S%/PeNxiʾd}r1~tv\g *t8$32x3bQ,=o@Su~AK TЧGRY||&;d(>ݪ8 y:زIE*k I {T ٍ痯wT$#:+qyJWcq٫ H Lf%3+jO<+O! ||.B4~YWw-,<XTu(/~>= o8u~?\R1!Y>Pۚ{I6nl;U4*vqrULgљx\oq{5&w3K{0xam\> stream xڍPM-w  ][p` 2kp '{NpOOv{kh4&9HcbHh)pq8999ttZ`_vt:3G3 l6=8@.v.n'')@g!+PttR`ks-L.AA~? gb?w0h:XA0*spssc7;8[20 rvY P5Met9;}NqX ʀ _>;׿G!0d3 {G3b ہoea0V@3;s9ϭd%f p; P9(|2K){{Ec`g{p}7׿bi KGm w̳ ?6k ) w ?hy8tra~z[нf O#t..%0Y!lYx,?.fOW̡c.7;%%^lܼ6 /' ^w53G v_[v[k:<+` 9y9-?\r3OT w$bglwijr]`S< 5* Kz`f ~V4;'_v0TT,lR_v? 9@0Y{2 W,?]2 ? 0sv6@g zJKbpC`)g>+g?.!OP784F AſsW?~g s+[>'8^ϊ>7r|\L?\O_'n9/[,g,^WKrc9 ;8Gݒ!uVzssƝ9'1+f XdGik)D<_ߊde=/rUvRjo70XZ}v.>L/W6G( 1F"QH1InSSޤy7JY)D?t)趺dHb[>0C5h3OO4!&#JPId3l 3KO*e(*+#ECT˞\30gM̘J)`J#hT<5a6 fFrF;G '>MN>7ᰜ 8(y;~>\!im,Y_P\X,zWxf2m[qIvce]܋>d\"4\={17zh$ 5z {Fy_dqL]3lQ3el,^V7{*^[SzAPw=}=72Kg^=VlnywZVW_<4j_W b3/Igw ͈H19M2,SM%>LS+Q_*M)1g/0# !r0Ҏ蹒1~FxT=j QO"xxnak$^/LsGl>lSQYx#MlҷmJ5^h\VxTYޑPwx1~aWզ"t!TKhS8R!8]+e5K'25 -# 7{aܻ2"9rA(}c??*ȏs,TϸȕTrL@fi3/Ďdm``_ZU63}MC1~nmS]K.HLS/|B_ښa(| /A.vq2Q&cM+-LlFËf93:ht%.?#bRwxtk=xzJϻ-LqF,~nS1z!*r# ^Ӵ^<-c>-{ZXŗז[x9] #dԔw˥q%bU2t 5/9EAnN}!G w[!Ty>vN q vcvA2@U>M-2滖WCJ\L4wҰӻ]-% Qd!yo*FV-zvVylsn})Iz_"N5QId8yRG^gO.[D5M9MQx Kw lpQ%6{kfm-2v=K+WH>ӊ `gil7ʜן͍|4%Ri\7egmXKˡ.BȠ4 f얈h}?g*}>Y>&-ʯ(/In|Y`}Iv JQZꐌ:%4IiICiay[KQ*}0ZD˺Jx.G%]R@%r "@7.RuޫKjpiٮę7˿qzh)3Q{/ . U[q)4M!"( ,5m:6, l`uqDZ]F9Y~pv0@ tytK!YP +-HR`ecXSlؓ _it:G('\uN~A]Fm8tu4JLآ;o# QZi O= :5:dPGD[6j1.AnYmqAzXC]:vb!DpK2W|fFUk]`-Tk0sG@f}CMgx{ʯ*:.|]jG{@3W}5T(#qFqIØ0$rD 5ӎc-YwG=Ztu$muM*WqS$]Do^{x25 kMc:Y#mٍ{ZG:%xG V?=R<n6fԝ+S&l-F/3м4 <(b3"t١aؗpǮ.9;XdrBj fs-T|e,A`{7::QzzOrjl[o`m#qr; +eo>ӚndeSik֫C#+<:hKg6zҋTظҹSHTx1;^)zz`9gQ+U\+0Xح d4밺{T؞'h q6BijlFtƔe%O-Xq: UIab~{ԅټD^uc1~g&u }6Dd;A@?=KA&-7AQlH˚Ev*?J JXQ?j5UxdEVh~F⧭kMT+g\, g!/jKܷG#Y7y9X\y$2 {DP?Jj=~S 1WcV*ʹ\(TiȶW AMjL8 PL1gycXe'Z=(_?~.nrtF$ͱZPϸm`%Fq\yG{F3@]ke ޡ38z?ZSF@o;$nQk<>ށՕ[N%p9F8qmݿ$v}ӓ Xvum<=Z&^|Z8Y 5ڦEtRxQ zݚs@Z꽽շAtۭ@;?¤I)’L|_rߘ@[WS].d89nOȚTU'bHaִK(גMཥ`@$h=rbO?N>5"-:CX=yfGEBi /ẁ4ш.079@ h/AmpΏ lіkVșɃ*v2 5'#!4[{f)2wxInSKpf"?mg|BVƓҾ&[f ˺м1Kj$T=[tCŌm~LKt;\ۢ*{ Be^G0[_^qkqEޭD./?9^ c߇W `_Vb;%I t/!H7Jvlzp=˛%PD'u MTMs,-[O&$&Iwwu1`Qu7w%X&MHjK{w)J5(%J,QD;~L, i᪫lMn\0(1AҋKeIJd:Fq7SJkʉZ;^= Dqo+ QL#w4oUR٬/[%guU VMcP2 B"0Z2OϘgdp&45G`oJ SQD8'z/z;(#/aebRujV/.SЖV;/PɇӐeJ|?:R}:MZIзe ޸VaYh"y?1\M> [tW1w I˧66pK겏cCY!@b<'-?'fѳ*M$u2܅Oo5&Ԇ!U H%$(ؼJs˜ξ6n!rx3oh4 HMj#I*u{en$mtO:< ݖj^&n}iб  c2Ǐ("/7ޫb^o8R<{x­C;Gll`+C&/Ӗ-+"ev/2Q3aDGG'Q{‹:K׍ǒt$S̀! U}7=-88£5Kxj-4'T2\k$ @GUI}|3<ߘY'KPB%1|[Ji ZeKD(*Ps26Z0U1˼ZOMS$9?r5&EoynIpռb!/ $9/>4:¼y諫Bz`^WlsAQ!"sV^|5J+h]ڂI$b+\Em];8.>@ŜJ1ѕ>;\4W^ )E&QþLUM$5`1aZR!?=78 E ;<<\p=K?26nf *KfzOY,;M jyˬM-uMR7MGs/=}OLV? gkx#>3Jᒷ]El^0: |'zm7_dy7]#SuܫWo8&iډPO3ѳ;/N:2^$!Hw4UN7OiĕQYrgߵ|תv№ _b(CkIk,:#D$GH&# Z:DY>2{o] i;eL635x$v3,e}i"sTw`5~VጏzZU鎒x2y)t`6O_!'ɇ{lJEѶk3 bOms 3Or# Iq7Ĺai_ ZZ_~OS˜?~;w3aD-r-p 5B^Y|FwZZ,,l[t]U͸&s^ĽFj-^UQAD<&{t@ uuՂ"\sZtV:Ьi*s~^WNN˹l''i\&ǥSz{ &ak4jwvChUW4wCitPDqI{QƔ~:m{zIarw+ nYuQJ#g@l7&-Stm>,JdŏZL{D^^k o|['0 ۽|^n}Z=+bU*.V{A{7b졉[[i9'VBYJSŠOO*'"~97g&`˦b\d#hG1ÚɞǸ\k^]%e s#5wJ8a4OE2Q f-zVp/wm .2E)Dx{"L#,:%988 OA[7;0[|wb~`}EJZS5L'po:y_B=xذRxLGk 'KވZhJFM2P;/Joק,^Dk/^W;4R =|]{?9}]&ҾNخO;U2i!pS+|F풼Ql"Ƹ|^^d^ G`;2 E0c0%hոz@7#{VS'?tN aJI`|Xkq:Xozŀ69JM{drp 6}W4.@GF :ClƣFQ=š1k}WhHmdEoJg8jMc[O~zԚ͙qDe`;vR,:o\&:i̕Y.IZWsћ  v1 c1ü7gw7%N l}T׻ -K}1T1U&$~/8*\e+EYSvIW_h^z j2 -#MR{euZG7 GBix$k|e 21 w^yҐڪzwU^G7o*h$c B7͑opB/:y! r: !1weSgky&\)caf<&S@#0G,32 XdH_e qwDg;?oΏ|]v0PTxPoMn=tr2ؖA% Q3YIۨ;/p~~Y  ji%:k"Y\4J"ި/ 1uxom\h8ly$Zn=,'6.Tټ:#(' 7A':܊ȻXDl>!P[PHr)Zz x/ѹ}rApl`y/Ҟ$OƘlݓctQml2u^kW0ԌQOPmUaI0@.ʊP>0G,L$RPē[d( x?LTo/J*Fgp-6%0W8U=97>w>/DF4n?xBH8X`.B [`lD,+q¸$se.O2 =Ln]dYPI }sN{"`L xa(c ߱bNʍTqs{2>3" qv@'r$o`GVBG"Nvhq]A@h\X Rdo'2yQʑۡXYT,DѱBH^vYQ7nkj+{xJ}* 2HdЀpA-$+d!/r]0Dc6r kZd;5U >0'\ǎiOfmuN} Tkå$]v5|mu*w °˅|`n汱UmrkEȡxUeF~Q`Γg Z](ex|EZLd}Xh.]X q $ytnQr9%i(I5ּ(^! zJBCnSP5BĥGC?Lytn^Te"CzE1DCe?9_bV6LKlNr-{(t& 8N 8Wq{_mtA0^7lD ^Աkg֕K@ׇߠYAim VAxk)jO:;15'ڗ(8醰&t~)n >K*$SrԾs|z$O tgq8z:B%Jy}Q憹N;|ae+97NJSaj'ha6a,`,N^J/Qw2{ΏYM#QTQo-bUK~ipt"WNz~#<+?UfXB_l$P *>hQbҠTI`!*Gwl@퓮owu#q8sPbk_1X}ND/ >:0j%x2w7Bq:ow&RѾg1ۮΑQc09XJ݂֕N?`I}+əQ ݄zg\v*fb3JVB?^ua^5}O'6j0pМ@JU:NցZIWľ3qkkۏ4W#B endstream endobj 632 0 obj << /Length1 1947 /Length2 12678 /Length3 0 /Length 13887 /Filter /FlateDecode >> stream xڍP c 6Kp\ww [;!8r9jYݫmJ25FQ3{ `aagbaaCTm#RjAv|b;o6 cQ b`errXXxCwHL9{;3"V??4V^^n?@'@l }hjlP7F vcfvssc2ufweU3hcd-ј) j`7c' `29ؙoj e_d tGdg``DcgxcWc֍Ro= cF?Ҽ-G '۹{0}vnv^A ;3?0sq`ְ9e%漙YNv6t7dO'|oc}@/D/gcW "++ d -@vd3or豼ɏ翿ffog+fR{:^F6vv';7 YTAwHY;s{_;v[4-s)ٿGX8YL!4GWߎ\llEۂl'1^lUl=7T^bJ7sXh 6&IǻNd1XlVy'(|:vՌQܤ 썄!܅;ћ))yxm6Lyo# n㬵KsV%.,귚>qwsWX7< G,f}iM"Ta#0TtP6K(9Y7>E#qxw3yk^u%UTj|y~dڼJWbm Ui+c# aZTz TͻD!*>>] 7KVLbː6ō+ӹ~YөIŞ:YCarŪw0b=?ba/U1Z'meskF>EZpVjy%qvSj7Qx)y_Ybn\un5'eH&h1zPv}OC}ՠK%Bb_K#y;c8XL!e9.\NPt:?gf}̍}Vݔ%ly$knl(NwYN[1{>T~mP%\G_0K+Տ̊$򚊻5AK9> 1^cmF.5,98׺^ 2[J_ e*1;%7 )n'$/ {H:nƞGDL$uG*8åLض;aLXc'Mreò"{=4ND1_/_;J=,y*38v]*P{-6U3y$^/g ٦z|HcN `zStKraW_SCQ!f"2F9׳TlEF4< 3MqI QsU\U%ߩfVrp-zE/<,vL6L{$WJE"6(]L8A cΆ.ۧB$tX|,7:˜50"hIM$FFߞGȒTQbSMK>du8J;ϖīػ<#ۥ4uuC* LkmeDFlU;uDJ#"`|A>&ФޘWYesJiv*Bj?Zœi <? +uRzH94 ږS&|/d)ѧN%TybyULk 2e,_IXkp5هHG6y-z1N,пV^Vx6R҈%;z5`'%z/H˽z2fPwRGB(Nm OB pox*8Ӥ:[c >IW/zj}up}啬]ZrI&idO5X{9N-;˜tJGeD\DM)$ݞXLZ л+>1]mq뇵nCJV(0/Jh3S%Z?*_V_5אӔ\`y_qpcDyHllDW{m+&{Wyw=l? O@􄜟e-EX5`j:&sCna2z7o'2Tz13GBp#T3|c+vr J(NYɒ* $:)YV̬bohj1EJC D0RDծc$26.KaKZ f1ctIdx,`VE(hmHN . V- V]67o.z d]S#`\au*_}qdx͎$ SRwm,Z$'ܨda';U0;G/pU!_RReG3nwLrFy׾KOX?.h7kQN=J&̷yWm."8R癑 mXҦ>J C[1>(sxŋ!D "ܛL֍cR9ʞb}k3€@aU;R3{w CqbD[W}3;.;C^SFJF"$G& w2)z*ZׯJӝᛗA⤩S6}MkhsMȏ*Le@2v>ǗVp{'/A* jM]*c#̹7SRӴ#)SwoKpE`VI oI=·;m%T.āOeCPD:I&Lg ̴s=cG뜏I2xTν1gyd^'1#W=pBk6ONvJrG/QW󤪭]u)[uvG|8\E[qW&\ܥx&OIkoXr XM#X䃙&{QλM[gs:k,l|cS8ujv/E‹ 8e7npۣ&5],~%ΏZ:CIIy6Th&Ȝf7`4swG1' }ۘ]^kR.ߡ}OEvl\T.5V!hFFaTd/lLpo,Q+*8አv/Ε\Q`$pyۊWIӻ!u4OI?HLpzj-ypFG*kK.RvB ~lnLD>$LvsIZfk䧾)ydP&0MsﱗS)ȃnoD]z!t-``MVNCd.Xt,\h҃YhEP6wL*-ё(ę`YcCuBlv B3l}nt1.@EP)rLE=Θoi%q*(碒I]uzԀK/JEMA@So!MG$/$26|0?gQoӫo|sy`ǒ).iɠxKD^%0qbo` ty򧜿wK/; YP2"iak=hԖǭ܊XVi!+6eV^7p ?N[F۰d5g0y֮?b,Y T6'kghnpڛ8 vhULv(LGoDCnQ \OPF| PMM e$8<Ѳ7 )F>G%Yxc´k~ʽ%D#6k9ʴ eC2ۼ(&<ɢ+PAJ ^jk<E@;(b;O@ڵ)`+a+TDNzXr8U%ۂ1*&z {v.%~s5&6 CL(GL->K󽦳ϵ^?+2]O־lNx.] <"MB\~)\p@enIK8X<k}[[*\a#U)Klբ#ʌT9R G8fi<_4kZiU Jc_]eͤ"+^:*;I9F]&)׸^ oRj7>]+C-F)s̶"*P ٖz!8TI*=_d13_@4hy|z> G%Y}Ygy=@0FOYKbϵj<'^ :Ur8mNa{ܴTQ ZPvh-n[B%L&3|}5KfFDS &zo\AL]Ӎ=@(8Y sGpln>ʗk+4؊=K53.Q;{OzYwS ʗ:U"FK;MZUQ+%IjP1lPq0TB'ZؼeL6֕^kNFk6wiִ[lr$1i8Q˃I|%,}.txyRQb 'ǻÔHGHsf}}?XYvr<t}l_DPÏvLb/CyfO![^L  ;'.j3MOK \ua5S Ɨp=x(z`S=qFNaR)~:ӯ4S pmc Mok%/hkҀuz1H4q9FUF MM23vw' 컶/j fd &+4vZ.6@i$ɉ? {I3nZ@-3CŦ-++=)?vLwzrkm11.|n$N~|j^?zkpAD.?cB A{f+vA.F^}DĖ \ow [OMA(j^fLz?LR6):0\mfvXo8E@eK O*f'b;DfXz\Zktle'UMQvnv9d('EOwYB2^l);3YOBew>&{4EfP*:)3׌U4&àzh>;?of.5{xpiJ9;;D7f=/}!b"6^CcV$6>5aWՓ&ϱ}Lpwxsf*<ǒn6zaGCݛym$"h#gZf' - -~'[0Aj&"JCz+>h_RuC \-Yh,h᭐}qSd=eA=JpgmH&-.lIJ:yi{ڋM´hlyI~I׳7wQeT=#?5 5# aג 4Q[ s?VVI٬{xb':U[xN:,_.eU ϓsVj8.}E<5'v$;qWwHe¶|\QITʪ[y4 R!K<%˔_~$ r>:`a{T=0XKr>RTslpb^[9K<xWAKk`s<:^c>15aJ{-c|)caA>}9/|lm\hb#o '>k$J98[RWhK>kF'# /_b/Ug]ފHM$7!'ы; yR 3^zO /G joZF-_Se5S ޅrT۪[UNEfXiXuSPMwx-Nw9:?,rHH+XT7T |M-$z,껆Riʘ{tlO;n 0`mc>ȽO]Y(Ml6B**|'р~䤝Q<@W#gWɔYX:}^ gVB1;Z)a@e^e!M K\HcȼV*=iV]JO>.οB?e#]d3 Ew}m=S+b/fٮU!}2Jw. g(we{߽ L0vىa,GB%頋Rit( rq2^H V+tQ^9#00TS pK5; ^'2~1Tvu>5I:#AEJa3}|$]%m/) l<ήslf iu ŲH -_(U-=6,Zcc:K,=Ey:ʌꗐS #+ xvJn=ix(mS)]7Q .ċ2ǕP 4|oz`J;E>3 R4p "4LXx)7D-(ߓSZ破lVy.bPTVK&GmH|оnzMTw `] y@u¸eݫ$h8e6gŋRDtڼʨ+$j~\Dž#^|jĶcN\9 ;3:BζOZH$80u6%JEӱ?_>ZŶ aXzI5Y' e\J,6#5exyAC$a7^& |t2t1<E&O <m0-|{߼?#kVj$X+LRa̳?Slexi零sPymVCd쇐Zx!&;1ˋ}ۉ}M[cJs88~KbN| ;n7ym;[%a-l9xyY6D48[}]˕wr7)B`D 6ENyP,hﻸd,&k s7AREw 3&jpG:_[h֗bc!"}"xR5@Fs9CFOaQk±^eFϥl<{'yD6WظNj̊,% _^\]}xwbF &ѐbK qp\) ,s).泟38B$Il8[ƚV5cΏwr<*;_d$}ѽ?f؈U^ވ=~F&p:1GIbLkS_-õbkTlF|jzA_ |>UgU|/@wŷuU=7Iﺧs0$Ѿϵ9B(Y9a~?s0`d0ICR`@+`z_ʠ\ˮ}n7[uPɯc狦^bXC\ ¶UM}nP$j]B[~j621ia\µ cT,GR{=@u[p[v'O? ZC/0Q[Gݲ:6D-C+ NrK\Gl)c}vEMլw #t><<𬮫&fVm[b>\qBsss`,zV67$<⊢?T؉M_=@ G>4WR;8E_*[זp-E]}6CGJg:$0wдMY̞lOE7EAhYd/%,+3g[P}{;ciHTvpvr_X١q(8x)!pg^ʺeYyimMvvwn|uتS5z)0TU^\.Aj"AMƔ^xS0dmb'+oW ubCn60nt3b" PD}nPl1G^ 1,;|*/DW೚_A숖Ċ3hLn#*KG~[8qSYpP¶%%,Md[}V菪U]tE<6x\ ͩ `r٢xR/ bB|| )Cv&UPVlDyLf $vkm DH$mO8+Q'%WewQ$u+[83=FZ(DoJmWD],EI赃i,d\()ϽGRO1ʉhREƽI %K 4I: 6 ⹥rHil?|Oe^#jnwn6v~j s3/$L}ĖUHΗat X+`cL! * S; ,iVS/ iTvmBR 8(=.&'+,B'zFg14'J 2HdAeFqFI.3$NEc{CWP)}Ez,Fy:1*go*/N*zsi2iZđ%3t)mc^cI.b:P?IKdNoKk U>Zocҵ!lR]* 3=iΨtBe #< ;qO5qq{q%$ƝQF)>""{ WlG!h } e4fWJnKIǂbPUu\?ޜ^ ?Ư䢬qI$K/$P\]PX+nƃUtE^Io_mZ!8s*V:kazbIo]rnq(TIz?M̭MT˿yq儙Uj6Ѱoр-^k~YÄ ͛zmY3BY@i W{;Qh{[&رAb+4ȑB_$:T\I怬1Z+;Ysڤ |'j̰>.pjB~N2Y,42icDt`6 G.yN1@ gwGLz`[bF7vgV6ҙYR?*?Uq k>~}2KP\(AwM+W&n++F( D$q6[J$F{Z*6x2ލcUk:1u2u'Rҟ`vH-&cC4_#;\\m09eÊ&lɘ?-  endstream endobj 634 0 obj << /Length1 1517 /Length2 2610 /Length3 0 /Length 3577 /Filter /FlateDecode >> stream xڍT 8GAuyc͖%5E2f1 3XB$d)Y,9RҢDYC:_R]]s]3sޞ{TfɊ XL|$^=4:"C4yy RDڛG֫؅ Y>٢ ExE+A!.658 ?X&!X0.- " !Wu 3( `??FӨѿ+WqpZ]gҒ` ߫)02id:`}X~ <4kaނ1"}%r6J]#G QW0o#Xh߅ GYX xZ0ABL[( $A,"e:B4Ѝ΄_@+~=0%WB ;ЈtҲp D#00p@,V# Z!1F,8tbJ 4aٵbhJtO6:W{0zsWNa cauWE] ,:^y]@0 $"^<MRBR|n˲uƺqzt.Ϩt[$+Ӆ݊>|Ǔ7bp݄zoQ#h}$u)7޾3PF 4q͵C;I;SZz(!C륶lK/Zgو}r^ɱpDvrvNYm^hHvjJ8̑[4n{Xs7؜0E3/w %ɒ0ŸㆼbC&Qep݃*sLRXO꛻=~Ѝ&񔨐UXް.-sطw,̢|=XpYrǽҥAuA; 1ilWe蝚 O^]o*%e-^tYR5CIЕ5siz඿7X-tWi\ ۝?qi\ ѵ{-5\j[ŘWGkγ:eLв㵷vޮmG i0x-mzhbpqzSbwT?oQS>5I#JǠ,pMš JA c֠WqĺN1f {NL+419~buk ל9_"84K=PbsN^4:TGJ<<=5'}Q{nsV څ&Q.rݨRdBkz3piŸcG dw]+LV{m_x{UBVS7T <_:.mL*CDLK:N0m骯K|q2ZdRU|umr{>yJMW7cGE>Oǭ9Gn ^v^ܟ1(p|`mT:OSkDM}=HMheR=QA ;r1Gޟy論Zz}!Njm'ce%jQ .ztdlȜv䄣 $;Tm5:+55UY%Nz`ܣr{lQ7=v?}]oLqP?ؼ98j{x$f{ 3;6'I80M§$a?୧ K-̑Sk ۾8YD0)ڰfHuk6OW_nvי/a~UP̿Ōr-u^Z,Qv,\+c3s W k'OP, 킆_$mhFcda]TTՕAJ٣ %gfI.ِmL"5zC3bRgPVC~wSGܻ% f~x@fv5뇫T8g42ǫT Sr51Hmu INLL$7tXҒҌԯ7^b:b1Ý%ʁХmx){jj|]E )ii"X\\L%q.7QYՄSm;&_A= |sfҫZL̎|ӫp$fy[/P̝vqJފ*HO V_dL}iH;r_/̶o* yܬ2;" ##3^,}J=B$$"՗n1HiJlp>jhXkOUa\Cb3ݯS5NJo/"+ \.XJUB|{39Sy:M%חg>ڜ6U?.)ކG5 >s? n,_pm2EBYIʛkmݚw4 ]v-=8y?K NnmF*&9H]g04G-N@cK5B&Wy ϧ;>|׿<*WKilE| PEނP}Kx߮}v;6h2#sZ9V!ս]n{wg?H CÕSd=kꏶ:) U gWNRj  endstream endobj 638 0 obj << /Producer (pdfTeX-1.40.18) /Creator (TeX) /CreationDate (D:20200222161339+08'00') /ModDate (D:20200222161339+08'00') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) kpathsea version 6.2.3) >> endobj 603 0 obj << /Type /ObjStm /N 41 /First 349 /Length 2250 /Filter /FlateDecode >> stream xZ[O~s=}V+  p( &IS5{`L^VZ)׺56,c6L oÌezx;&ÎgY!2@G1,v SR)5L bZ $R:` 'QX(7lVϘIT i$b9`Efe*2@: +͜Q#m-/sþL>B!QW11 َ Glla\N>ojKI *(ӌӪ݀fZP <̂@&}uwC *RJK2t53VΒmH'XAN%4MD3fYhu1YiP /$WjHO ܛlJjv d-;"+ +Lu7PGIVIS; 0>dohq`xIlieq8tt.ދ!bQ({I ѣ%mn4zNw#;U[VEdt=s"DZ.ZScآ_ԏ-?\#CZRz VX(W T w( !B!"byG 8e$nONjѰA$x~:Cu3ɞ,GldDx6@IA]~k<=owjƝ8>ae{ݳ_67E v4}g֠ ^b 'Wp VÇ{Co;ښi>_oiq `<p 4qu7;6|?G ?_+~5̀;~pW y?/hZ9 _aӤO#ʿ_V$aoʦ{dS`S6` Da,4q/vU CF 0dtM2o#T5lc~#`WBa9W55 LW"U'IYЮnׇ}rҤsmgùgU zU5m4=;]?_g3Ge/ŪzU>,߿K!l'`YbxO_?Dk(q3zWs!-(9FRpPLG R&N ,חa6=qOOb/pAލ"bKb<۷Q݀MJ6ͳFWGdm ܘh)Ad+T\G]=h( txAqRų*$[PkUV{p5BӚJl_ހMթL]<}O$(IW1OC~= "Urn`G HAK #6s(F"> endstream endobj 639 0 obj << /Type /XRef /Index [0 640] /Size 640 /W [1 3 1] /Root 637 0 R /Info 638 0 R /ID [ ] /Length 1725 /Filter /FlateDecode >> stream x%yl\{Ɖ=y%xKf̛8YŎ찔Ҥc mEEZ ]@PUZvoJEiUU[AJZA#={;7c!a܈?iIӼǓl<-=OzV==pͳ1I&ed ڠ %R6|_۵[mfHrl=:B)A9l *Aq:1CczӽW[uȊ!|w}]l&͠"fϷڠ:`3tY(x׈ |F3-_i[`+BYS[` -gߧzD:_ז0(һ CrC]}~GMC"Hه?7Mvꉽ0"ӿr˜Ѿq &aJ~|saN>6Dp!=m9GD=7Qǵ4I_S0 (ы_Җ`Y?gI.Hj8ǘl %϶TW[Rl %ʖ([l+aobRfgcR-%ޖx[mk|-+[&Q*=h$9v-mMRQْg"e BHz&fҜ.HuZD1e 쒤MzvNs^+,TmyVǪav?e)AaTHTTB >=#a kI+j:)k6B=4@BҽMzY#4&hWg.}Ѿ~heCloyQ/[$#Ǎ )od뻴9 iVO  CV>O#ى}ǵe BzZE.MvQɞ=GK' &aJz^n#;o%Lc0mћ 8 KnFNy`X\0袼n[a &)ED-wu{oW׬#ʎ(;:%}KQvDeG]'x>w65/SZQvDeA ϼ-DenZ#a gjIv Ŏ;RF$|B(0nHW@¤Հ:`GMCpUi :b눭|MߨgGbu'%\ p@0tLG0iHq5)XTiKW J&]K{uA.B uU)?J *&_^U 7U\ 7^U֮mz`Wg, endstream endobj startxref 3833154 %%EOF spatstat/inst/doc/datasets.R0000644000176200001440000004126513624161264015607 0ustar liggesusers### R code from vignette source 'datasets.Rnw' ################################################### ### code chunk number 1: datasets.Rnw:5-6 ################################################### options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) ################################################### ### code chunk number 2: datasets.Rnw:27-34 ################################################### library(spatstat) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") spatstat.options(transparent=FALSE) options(useFancyQuotes=FALSE) ################################################### ### code chunk number 3: datasets.Rnw:213-231 ################################################### opa <- par() ## How to set all margins to zero and eliminate all outer spaces zeromargins <- function() { par( mar=rep(0,4), omd=c(0,1,0,1), xaxs="i", yaxs="i" ) invisible(NULL) } ## Set 'mar' setmargins <- function(...) { x <- c(...) x <- rep(x, 4)[1:4] par(mar=x) invisible(NULL) } ################################################### ### code chunk number 4: datasets.Rnw:240-241 (eval = FALSE) ################################################### ## plot(amacrine) ################################################### ### code chunk number 5: datasets.Rnw:243-245 ################################################### getOption("SweaveHooks")[["fig"]]() setmargins(0,1,2,0) plot(amacrine) ################################################### ### code chunk number 6: datasets.Rnw:254-255 (eval = FALSE) ################################################### ## plot(anemones, markscale=1) ################################################### ### code chunk number 7: datasets.Rnw:257-259 ################################################### getOption("SweaveHooks")[["fig"]]() setmargins(0,0,2,0) plot(anemones, markscale=1) ################################################### ### code chunk number 8: datasets.Rnw:272-273 (eval = FALSE) ################################################### ## ants.extra$plotit() ################################################### ### code chunk number 9: datasets.Rnw:275-277 ################################################### getOption("SweaveHooks")[["fig"]]() setmargins(0,0,1,0) ants.extra$plotit() ################################################### ### code chunk number 10: datasets.Rnw:285-286 ################################################### getOption("SweaveHooks")[["fig"]]() plot(austates) ################################################### ### code chunk number 11: datasets.Rnw:296-298 (eval = FALSE) ################################################### ## plot(bdspots, equal.scales=TRUE, pch="+", ## panel.args=function(i)list(cex=c(0.15, 0.2, 0.7)[i])) ################################################### ### code chunk number 12: datasets.Rnw:300-304 ################################################### getOption("SweaveHooks")[["fig"]]() zeromargins() plot(bdspots, equal.scales=TRUE, pch="+", main="", mar.panel=0, hsep=1, panel.args=function(i)list(cex=c(0.15, 0.2, 0.7)[i])) ################################################### ### code chunk number 13: datasets.Rnw:314-316 (eval = FALSE) ################################################### ## plot(bei.extra$elev, main="Beilschmiedia") ## plot(bei, add=TRUE, pch=16, cex=0.3) ################################################### ### code chunk number 14: datasets.Rnw:318-321 ################################################### getOption("SweaveHooks")[["fig"]]() setmargins(0,0,2,0) plot(bei.extra$elev, main="Beilschmiedia") plot(bei, add=TRUE, pch=16, cex=0.3) ################################################### ### code chunk number 15: datasets.Rnw:324-330 ################################################### getOption("SweaveHooks")[["fig"]]() M <- persp(bei.extra$elev, theta=-45, phi=18, expand=7, border=NA, apron=TRUE, shade=0.3, box=FALSE, visible=TRUE, main="") perspPoints(bei, Z=bei.extra$elev, M=M, pch=16, cex=0.3) ################################################### ### code chunk number 16: datasets.Rnw:339-340 ################################################### getOption("SweaveHooks")[["fig"]]() plot(betacells) ################################################### ### code chunk number 17: datasets.Rnw:345-346 ################################################### getOption("SweaveHooks")[["fig"]]() plot(bramblecanes, cols=1:3) ################################################### ### code chunk number 18: datasets.Rnw:349-350 ################################################### getOption("SweaveHooks")[["fig"]]() plot(split(bramblecanes)) ################################################### ### code chunk number 19: datasets.Rnw:360-361 ################################################### getOption("SweaveHooks")[["fig"]]() plot(bronzefilter,markscale=2) ################################################### ### code chunk number 20: datasets.Rnw:370-371 ################################################### getOption("SweaveHooks")[["fig"]]() plot(cells) ################################################### ### code chunk number 21: datasets.Rnw:379-380 ################################################### getOption("SweaveHooks")[["fig"]]() plot(cetaceans.extra$patterns, main="Cetaceans data", cols=1:5, hsep=1) ################################################### ### code chunk number 22: datasets.Rnw:389-392 ################################################### getOption("SweaveHooks")[["fig"]]() plot(chicago, main="Chicago Crimes", col="grey", cols=c("red", "blue", "black", "blue", "red", "blue", "blue"), chars=c(16,2,22,17,24,15,6), leg.side="left", show.window=FALSE) ################################################### ### code chunk number 23: datasets.Rnw:402-403 ################################################### getOption("SweaveHooks")[["fig"]]() chorley.extra$plotit() ################################################### ### code chunk number 24: datasets.Rnw:419-421 ################################################### getOption("SweaveHooks")[["fig"]]() plot(clmfires, which.marks="cause", cols=2:5, cex=0.25, main="Castilla-La Mancha forest fires") ################################################### ### code chunk number 25: datasets.Rnw:431-432 ################################################### getOption("SweaveHooks")[["fig"]]() plot(clmfires.extra$clmcov200, main="Covariates for forest fires") ################################################### ### code chunk number 26: datasets.Rnw:443-445 ################################################### getOption("SweaveHooks")[["fig"]]() plot(copper$Points, main="Copper") plot(copper$Lines, add=TRUE) ################################################### ### code chunk number 27: datasets.Rnw:452-454 ################################################### getOption("SweaveHooks")[["fig"]]() plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) }), parargs=list(mar=rep(1,4))) ################################################### ### code chunk number 28: datasets.Rnw:461-462 ################################################### getOption("SweaveHooks")[["fig"]]() plot(demopat) ################################################### ### code chunk number 29: datasets.Rnw:476-477 ################################################### getOption("SweaveHooks")[["fig"]]() plot(dendrite, leg.side="bottom", main="", cex=0.75, cols=2:4) ################################################### ### code chunk number 30: datasets.Rnw:485-486 ################################################### getOption("SweaveHooks")[["fig"]]() plot(finpines, main="Finnish pines") ################################################### ### code chunk number 31: datasets.Rnw:499-503 ################################################### getOption("SweaveHooks")[["fig"]]() wildM1 <- with(flu, virustype == "wt" & stain == "M2-M1") plot(flu[wildM1, 1, drop=TRUE], main=c("flu data", "wild type virus, M2-M1 stain"), chars=c(16,3), cex=0.4, cols=2:3) ################################################### ### code chunk number 32: datasets.Rnw:511-512 ################################################### getOption("SweaveHooks")[["fig"]]() plot(gordon, main="People in Gordon Square", pch=16) ################################################### ### code chunk number 33: datasets.Rnw:527-528 ################################################### getOption("SweaveHooks")[["fig"]]() plot(gorillas, which.marks=1, chars=c(1,3), cols=2:3, main="Gorilla nest sites") ################################################### ### code chunk number 34: datasets.Rnw:532-533 (eval = FALSE) ################################################### ## system.file("rawdata/gorillas/vegetation.asc", package="spatstat") ################################################### ### code chunk number 35: datasets.Rnw:542-543 ################################################### getOption("SweaveHooks")[["fig"]]() plot(hamster, cols=c(2,4)) ################################################### ### code chunk number 36: datasets.Rnw:553-554 ################################################### getOption("SweaveHooks")[["fig"]]() plot(heather) ################################################### ### code chunk number 37: datasets.Rnw:564-565 ################################################### getOption("SweaveHooks")[["fig"]]() plot(humberside) ################################################### ### code chunk number 38: datasets.Rnw:577-578 ################################################### getOption("SweaveHooks")[["fig"]]() plot(hyytiala, cols=2:5) ################################################### ### code chunk number 39: datasets.Rnw:587-588 ################################################### getOption("SweaveHooks")[["fig"]]() plot(japanesepines) ################################################### ### code chunk number 40: datasets.Rnw:597-598 ################################################### getOption("SweaveHooks")[["fig"]]() plot(lansing) ################################################### ### code chunk number 41: datasets.Rnw:601-602 ################################################### getOption("SweaveHooks")[["fig"]]() plot(split(lansing)) ################################################### ### code chunk number 42: datasets.Rnw:609-610 ################################################### getOption("SweaveHooks")[["fig"]]() plot(longleaf) ################################################### ### code chunk number 43: datasets.Rnw:619-621 ################################################### getOption("SweaveHooks")[["fig"]]() plot(mucosa, chars=c(1,3), cols=c("red", "green")) plot(mucosa.subwin, add=TRUE, lty=3) ################################################### ### code chunk number 44: datasets.Rnw:635-638 ################################################### getOption("SweaveHooks")[["fig"]]() plot(murchison$greenstone, main="Murchison data", col="lightgreen") plot(murchison$gold, add=TRUE, pch=3, col="blue") plot(murchison$faults, add=TRUE, col="red") ################################################### ### code chunk number 45: datasets.Rnw:646-647 ################################################### getOption("SweaveHooks")[["fig"]]() plot(nbfires, use.marks=FALSE, pch=".") ################################################### ### code chunk number 46: datasets.Rnw:650-651 ################################################### getOption("SweaveHooks")[["fig"]]() plot(split(nbfires), use.marks=FALSE, chars=".") ################################################### ### code chunk number 47: datasets.Rnw:654-659 ################################################### getOption("SweaveHooks")[["fig"]]() par(mar=c(0,0,2,0)) plot(split(nbfires)$"2000", which.marks="fire.type", main=c("New Brunswick fires 2000", "by fire type"), cols=c("blue", "green", "red", "cyan"), leg.side="left") ################################################### ### code chunk number 48: datasets.Rnw:667-669 ################################################### getOption("SweaveHooks")[["fig"]]() plot(nztrees) plot(trim.rectangle(as.owin(nztrees), c(0,5), 0), add=TRUE, lty=3) ################################################### ### code chunk number 49: datasets.Rnw:682-683 ################################################### getOption("SweaveHooks")[["fig"]]() plot(osteo[1:10,], main.panel="", pch=21, bg='white') ################################################### ### code chunk number 50: datasets.Rnw:689-690 (eval = FALSE) ################################################### ## system.file("rawdata/osteo/osteo36.txt", package="spatstat") ################################################### ### code chunk number 51: datasets.Rnw:699-700 ################################################### getOption("SweaveHooks")[["fig"]]() plot(paracou, cols=2:3, chars=c(16,3)) ################################################### ### code chunk number 52: datasets.Rnw:708-709 ################################################### getOption("SweaveHooks")[["fig"]]() ponderosa.extra$plotit() ################################################### ### code chunk number 53: datasets.Rnw:720-723 ################################################### getOption("SweaveHooks")[["fig"]]() pyr <- pyramidal pyr$grp <- abbreviate(pyramidal$group, minlength=7) plot(pyr, quote(plot(Neurons, pch=16, main=grp)), main="Pyramidal Neurons") ################################################### ### code chunk number 54: datasets.Rnw:743-745 ################################################### getOption("SweaveHooks")[["fig"]]() plot(redwood) plot(redwood3, add=TRUE, pch=20) ################################################### ### code chunk number 55: datasets.Rnw:748-749 ################################################### getOption("SweaveHooks")[["fig"]]() redwoodfull.extra$plotit() ################################################### ### code chunk number 56: datasets.Rnw:763-765 ################################################### getOption("SweaveHooks")[["fig"]]() plot(as.solist(residualspaper[c("Fig1", "Fig4a", "Fig4b", "Fig4c")]), main="") ################################################### ### code chunk number 57: datasets.Rnw:773-774 ################################################### getOption("SweaveHooks")[["fig"]]() shapley.extra$plotit(main="Shapley") ################################################### ### code chunk number 58: datasets.Rnw:781-782 ################################################### getOption("SweaveHooks")[["fig"]]() plot(simdat) ################################################### ### code chunk number 59: datasets.Rnw:790-791 ################################################### getOption("SweaveHooks")[["fig"]]() plot(spiders, pch=16, show.window=FALSE) ################################################### ### code chunk number 60: datasets.Rnw:798-801 ################################################### getOption("SweaveHooks")[["fig"]]() plot(sporophores, chars=c(16,1,2), cex=0.6) points(0,0,pch=16, cex=2) text(15,8,"Tree", cex=0.75) ################################################### ### code chunk number 61: datasets.Rnw:810-811 ################################################### getOption("SweaveHooks")[["fig"]]() plot(spruces, maxsize=min(nndist(spruces))) ################################################### ### code chunk number 62: datasets.Rnw:820-821 ################################################### getOption("SweaveHooks")[["fig"]]() plot(swedishpines) ################################################### ### code chunk number 63: datasets.Rnw:830-831 ################################################### getOption("SweaveHooks")[["fig"]]() plot(urkiola, cex=0.5, cols=2:3) ################################################### ### code chunk number 64: datasets.Rnw:838-840 ################################################### getOption("SweaveHooks")[["fig"]]() par(mar=c(0,0,2,0)) plot(waka, markscale=0.04, main=c("Waka national park", "tree diameters")) ################################################### ### code chunk number 65: datasets.Rnw:847-851 ################################################### getOption("SweaveHooks")[["fig"]]() v <- rotate(vesicles, pi/2) ve <- lapply(vesicles.extra, rotate, pi/2) plot(v, main="Vesicles") plot(ve$activezone, add=TRUE, lwd=3) ################################################### ### code chunk number 66: datasets.Rnw:876-877 (eval = FALSE) ################################################### ## system.file("rawdata/vesicles/mitochondria.txt", package="spatstat") ################################################### ### code chunk number 67: datasets.Rnw:885-886 ################################################### getOption("SweaveHooks")[["fig"]]() plot(waterstriders) spatstat/inst/ratfor/0000755000176200001440000000000013065443316014374 5ustar liggesusersspatstat/inst/ratfor/inxypOld.r0000755000176200001440000000216313115273007016364 0ustar liggesuserssubroutine inxyp(x,y,xp,yp,npts,nedges,score,onbndry) implicit double precision(a-h,o-z) dimension x(npts), y(npts), xp(nedges), yp(nedges), score(npts) logical first, onbndry(npts) zero = 0.0d0 half = 0.5d0 one = 1.0d0 do i = 1,nedges { x0 = xp(i) y0 = yp(i) if(i == nedges) { x1 = xp(1) y1 = yp(1) } else { x1 = xp(i+1) y1 = yp(i+1) } dx = x1 - x0 dy = y1 - y0 do j = 1,npts { xcrit = (x(j) - x0)*(x(j) - x1) if(xcrit <= zero) { if(xcrit == zero) { contrib = half } else { contrib = one } ycrit = y(j)*dx - x(j)*dy + x0*dy - y0*dx if(dx < 0) { if(ycrit >= zero) { score(j) = score(j) + contrib } onbndry(j) = onbndry(j) | (ycrit == zero) } else if(dx > zero) { if(ycrit < zero) { score(j) = score(j) - contrib } onbndry(j) = onbndry(j) | (ycrit == zero) } else { if(x(j) == x0) { ycrit = (y(j) - y0)*(y(j) - y1) } onbndry(j) = onbndry(j) | (ycrit <= zero) } } } } return end spatstat/inst/ratfor/dppll.r0000755000176200001440000000203313115273007015665 0ustar liggesuserssubroutine dppll(x,y,l1,l2,l3,l4,np,nl,eps,mint,rslt,xmin,jmin) implicit double precision(a-h,o-z) dimension x(np), y(np), rslt(np,nl), xmin(np), jmin(np) double precision l1(nl), l2(nl), l3(nl), l4(nl) one = 1.d0 zero = 0.d0 do j = 1,nl { dx = l3(j) - l1(j) dy = l4(j) - l2(j) alen = sqrt(dx**2 + dy**2) if(alen .gt. eps) { co = dx/alen si = dy/alen } else { co = 0.5 si = 0.5 } do i = 1, np { xpx1 = x(i) - l1(j) ypy1 = y(i) - l2(j) xpx2 = x(i) - l3(j) ypy2 = y(i) - l4(j) d1 = xpx1**2 + ypy1**2 d2 = xpx2**2 + ypy2**2 dd = min(d1,d2) if(alen .gt. eps) { xpr = xpx1*co + ypy1*si if(xpr .lt. zero .or. xpr .gt. alen) { d3 = -one } else { ypr = - xpx1*si + ypy1*co d3 = ypr**2 } } else { d3 = -one } if(d3 .ge. zero) { dd = min(dd,d3) } sd =sqrt(dd) rslt(i,j) = sd if(mint.gt.0) { if(sd .lt. xmin(i)) { xmin(i) = sd if(mint.gt.1) { jmin(i) = j } } } } } return end spatstat/inst/ratfor/Makefile0000755000176200001440000000244413624161312016035 0ustar liggesusers RATFOR = /home/adrian/bin/ratfor77 #RATFOR = /usr/local/bin/ratfor CPP = /usr/bin/cpp ########################################################## # Sources actually written by humans: RAT_SRC = dppll.r inxypOld.r C_DOMINIC = dinfty.c dwpure.c C_MISC = raster.h areadiff.c closepair.c connect.c corrections.c \ discarea.c distances.c distmapbin.c distseg.c \ exactdist.c exactPdist.c \ massdisthack.c poly2im.c trigraf.c utils.c xyseg.c C_MH = methas.h dist2.h areaint.c badgey.c dgs.c \ diggra.c dist2.c fexitc.c getcif.c geyer.c \ lookup.c methas.c stfcr.c \ straush.c straushm.c strauss.c straussm.c C_KEST = Kloop.h Kborder.c C_SRC = $(C_DOMINIC) $(C_MISC) $(C_MH) $(C_KEST) CC_SRC = PerfectStrauss.cc HUMAN = $(RAT_SRC) $(C_SRC) $(CC_SRC) Makefile ########################################################## # Source to be generated automatically: RAT_FOR = dppll.f inxypOld.f GENERATED = $(RAT_FOR) ###################################################### ########### TARGETS ################################ target: $(GENERATED) @echo -- Done ------- tar: tar cvf src.tar $(HUMAN) clean: rm $(GENERATED) -rm src.tar ####################################################### ######### RULES ################################## .r.f: $(RATFOR) -o $@ $? spatstat/inst/CITATION0000755000176200001440000000500313115273007014227 0ustar liggesuserscitHeader("To cite spatstat in publications use:") citEntry(entry = "Book", title = "Spatial Point Patterns: Methodology and Applications with {R}", author = personList(as.person("Adrian Baddeley"), as.person("Ege Rubak"), as.person("Rolf Turner")), year = "2015", publisher = "Chapman and Hall/CRC Press", address = "London", url="http://www.crcpress.com/Spatial-Point-Patterns-Methodology-and-Applications-with-R/Baddeley-Rubak-Turner/9781482210200/", textVersion = paste("Adrian Baddeley, Ege Rubak, Rolf Turner (2015).", "Spatial Point Patterns: Methodology and Applications with R.", "London: Chapman and Hall/CRC Press, 2015.", "URL http://www.crcpress.com/Spatial-Point-Patterns-Methodology-and-Applications-with-R/Baddeley-Rubak-Turner/9781482210200/") ) citEntry(entry = "Article", title = "Hybrids of Gibbs Point Process Models and Their Implementation", author = personList(as.person("Adrian Baddeley"), as.person("Rolf Turner"), as.person("Jorge Mateu"), as.person("Andrew Bevan")), journal = "Journal of Statistical Software", year = "2013", volume = "55", number = "11", pages = "1--43", url = "http://www.jstatsoft.org/v55/i11/", textVersion = paste("Adrian Baddeley, Rolf Turner, Jorge Mateu, Andrew Bevan (2013).", "Hybrids of Gibbs Point Process Models and Their Implementation.", "Journal of Statistical Software, 55(11), 1-43.", "URL http://www.jstatsoft.org/v55/i11/."), header = "If you use hybrid models, please also cite:" ) citEntry(entry = "Article", title = "{spatstat}: An {R} Package for Analyzing Spatial Point Patterns", author = personList(as.person("Adrian Baddeley"), as.person("Rolf Turner")), journal = "Journal of Statistical Software", year = "2005", volume = "12", number = "6", pages = "1--42", url = "http://www.jstatsoft.org/v12/i06/", textVersion = paste("Adrian Baddeley, Rolf Turner (2005).", "spatstat: An R Package for Analyzing Spatial Point Patterns.", "Journal of Statistical Software 12(6), 1-42.", "URL http://www.jstatsoft.org/v12/i06/."), header = "In survey articles, please cite the original paper on spatstat:" )